diff options
Diffstat (limited to 'compiler')
252 files changed, 4536 insertions, 3473 deletions
diff --git a/compiler/rustc_ast/src/token.rs b/compiler/rustc_ast/src/token.rs index 7ef39f8026b..6646fa9446f 100644 --- a/compiler/rustc_ast/src/token.rs +++ b/compiler/rustc_ast/src/token.rs @@ -11,6 +11,7 @@ use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_data_structures::sync::Lrc; use rustc_macros::HashStable_Generic; use rustc_span::symbol::{kw, sym}; +#[cfg_attr(not(bootstrap), allow(hidden_glob_reexports))] use rustc_span::symbol::{Ident, Symbol}; use rustc_span::{self, edition::Edition, Span, DUMMY_SP}; use std::borrow::Cow; diff --git a/compiler/rustc_ast_lowering/src/asm.rs b/compiler/rustc_ast_lowering/src/asm.rs index 941d3179587..d350498bc96 100644 --- a/compiler/rustc_ast_lowering/src/asm.rs +++ b/compiler/rustc_ast_lowering/src/asm.rs @@ -44,6 +44,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { | asm::InlineAsmArch::AArch64 | asm::InlineAsmArch::RiscV32 | asm::InlineAsmArch::RiscV64 + | asm::InlineAsmArch::LoongArch64 ); if !is_stable && !self.tcx.features().asm_experimental_arch { feature_err( diff --git a/compiler/rustc_borrowck/src/def_use.rs b/compiler/rustc_borrowck/src/def_use.rs index b775739fed2..b719a610e07 100644 --- a/compiler/rustc_borrowck/src/def_use.rs +++ b/compiler/rustc_borrowck/src/def_use.rs @@ -50,7 +50,6 @@ pub fn categorize(context: PlaceContext) -> Option<DefUse> { PlaceContext::MutatingUse(MutatingUseContext::Borrow) | PlaceContext::NonMutatingUse(NonMutatingUseContext::SharedBorrow) | PlaceContext::NonMutatingUse(NonMutatingUseContext::ShallowBorrow) | - PlaceContext::NonMutatingUse(NonMutatingUseContext::UniqueBorrow) | // `PlaceMention` and `AscribeUserType` both evaluate the place, which must not // contain dangling references. diff --git a/compiler/rustc_borrowck/src/diagnostics/bound_region_errors.rs b/compiler/rustc_borrowck/src/diagnostics/bound_region_errors.rs index f41795d60a0..cfcf31fce32 100644 --- a/compiler/rustc_borrowck/src/diagnostics/bound_region_errors.rs +++ b/compiler/rustc_borrowck/src/diagnostics/bound_region_errors.rs @@ -180,24 +180,25 @@ trait TypeOpInfo<'tcx> { return; }; - let placeholder_region = tcx.mk_re_placeholder(ty::Placeholder { - universe: adjusted_universe.into(), - bound: placeholder.bound, - }); - - let error_region = - if let RegionElement::PlaceholderRegion(error_placeholder) = error_element { - let adjusted_universe = - error_placeholder.universe.as_u32().checked_sub(base_universe.as_u32()); - adjusted_universe.map(|adjusted| { - tcx.mk_re_placeholder(ty::Placeholder { - universe: adjusted.into(), - bound: error_placeholder.bound, - }) - }) - } else { - None - }; + let placeholder_region = ty::Region::new_placeholder( + tcx, + ty::Placeholder { universe: adjusted_universe.into(), bound: placeholder.bound }, + ); + + let error_region = if let RegionElement::PlaceholderRegion(error_placeholder) = + error_element + { + let adjusted_universe = + error_placeholder.universe.as_u32().checked_sub(base_universe.as_u32()); + adjusted_universe.map(|adjusted| { + ty::Region::new_placeholder( + tcx, + ty::Placeholder { universe: adjusted.into(), bound: error_placeholder.bound }, + ) + }) + } else { + None + }; debug!(?placeholder_region); @@ -390,7 +391,7 @@ fn try_extract_error_from_fulfill_cx<'tcx>( error_region, ®ion_constraints, |vid| ocx.infcx.region_var_origin(vid), - |vid| ocx.infcx.universe_of_region(ocx.infcx.tcx.mk_re_var(vid)), + |vid| ocx.infcx.universe_of_region(ty::Region::new_var(ocx.infcx.tcx, vid)), ) } @@ -411,7 +412,7 @@ fn try_extract_error_from_region_constraints<'tcx>( } // FIXME: Should this check the universe of the var? Constraint::VarSubReg(vid, sup) if sup == placeholder_region => { - Some((infcx.tcx.mk_re_var(vid), cause.clone())) + Some((ty::Region::new_var(infcx.tcx, vid), cause.clone())) } _ => None, } diff --git a/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs b/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs index 04b8174079a..15d73ed732f 100644 --- a/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs +++ b/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs @@ -1635,34 +1635,6 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> { }) } - /// Reports StorageDeadOrDrop of `place` conflicts with `borrow`. - /// - /// Depending on the origin of the StorageDeadOrDrop, this may be - /// reported as either a drop or an illegal mutation of a borrowed value. - /// The latter is preferred when the this is a drop triggered by a - /// reassignment, as it's more user friendly to report a problem with the - /// explicit assignment than the implicit drop. - #[instrument(level = "debug", skip(self))] - pub(crate) fn report_storage_dead_or_drop_of_borrowed( - &mut self, - location: Location, - place_span: (Place<'tcx>, Span), - borrow: &BorrowData<'tcx>, - ) { - // It's sufficient to check the last desugaring as Replace is the last - // one to be applied. - if let Some(DesugaringKind::Replace) = place_span.1.desugaring_kind() { - self.report_illegal_mutation_of_borrowed(location, place_span, borrow) - } else { - self.report_borrowed_value_does_not_live_long_enough( - location, - borrow, - place_span, - Some(WriteKind::StorageDeadOrDrop), - ) - } - } - /// This means that some data referenced by `borrow` needs to live /// past the point where the StorageDeadOrDrop of `place` occurs. /// This is usually interpreted as meaning that `place` has too diff --git a/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs b/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs index 4bde372c847..d0e17bf5a08 100644 --- a/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs +++ b/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs @@ -641,13 +641,8 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> { let Some(hir::Node::Item(item)) = node else { return; }; let hir::ItemKind::Fn(.., body_id) = item.kind else { return; }; let body = self.infcx.tcx.hir().body(body_id); - let mut assign_span = span; - // Drop desugaring is done at MIR build so it's not in the HIR - if let Some(DesugaringKind::Replace) = span.desugaring_kind() { - assign_span.remove_mark(); - } - let mut v = V { assign_span, err, ty, suggested: false }; + let mut v = V { assign_span: span, err, ty, suggested: false }; v.visit_body(body); if !v.suggested { err.help(format!( diff --git a/compiler/rustc_borrowck/src/invalidation.rs b/compiler/rustc_borrowck/src/invalidation.rs index 036391d074d..b2ff25ecb96 100644 --- a/compiler/rustc_borrowck/src/invalidation.rs +++ b/compiler/rustc_borrowck/src/invalidation.rs @@ -112,11 +112,13 @@ impl<'cx, 'tcx> Visitor<'tcx> for InvalidationGenerator<'cx, 'tcx> { TerminatorKind::SwitchInt { discr, targets: _ } => { self.consume_operand(location, discr); } - TerminatorKind::Drop { place: drop_place, target: _, unwind: _ } => { + TerminatorKind::Drop { place: drop_place, target: _, unwind: _, replace } => { + let write_kind = + if *replace { WriteKind::Replace } else { WriteKind::StorageDeadOrDrop }; self.access_place( location, *drop_place, - (AccessDepth::Drop, Write(WriteKind::StorageDeadOrDrop)), + (AccessDepth::Drop, Write(write_kind)), LocalMutationIsAllowed::Yes, ); } diff --git a/compiler/rustc_borrowck/src/lib.rs b/compiler/rustc_borrowck/src/lib.rs index 9277a262f97..a53ea100c22 100644 --- a/compiler/rustc_borrowck/src/lib.rs +++ b/compiler/rustc_borrowck/src/lib.rs @@ -685,17 +685,19 @@ impl<'cx, 'tcx> rustc_mir_dataflow::ResultsVisitor<'cx, 'tcx> for MirBorrowckCtx TerminatorKind::SwitchInt { discr, targets: _ } => { self.consume_operand(loc, (discr, span), flow_state); } - TerminatorKind::Drop { place, target: _, unwind: _ } => { + TerminatorKind::Drop { place, target: _, unwind: _, replace } => { debug!( "visit_terminator_drop \ loc: {:?} term: {:?} place: {:?} span: {:?}", loc, term, place, span ); + let write_kind = + if *replace { WriteKind::Replace } else { WriteKind::StorageDeadOrDrop }; self.access_place( loc, (*place, span), - (AccessDepth::Drop, Write(WriteKind::StorageDeadOrDrop)), + (AccessDepth::Drop, Write(write_kind)), LocalMutationIsAllowed::Yes, flow_state, ); @@ -885,6 +887,7 @@ enum ReadKind { #[derive(Copy, Clone, PartialEq, Eq, Debug)] enum WriteKind { StorageDeadOrDrop, + Replace, MutableBorrow(BorrowKind), Mutate, Move, @@ -1132,13 +1135,21 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> { this.buffer_error(err); } WriteKind::StorageDeadOrDrop => this - .report_storage_dead_or_drop_of_borrowed(location, place_span, borrow), + .report_borrowed_value_does_not_live_long_enough( + location, + borrow, + place_span, + Some(WriteKind::StorageDeadOrDrop), + ), WriteKind::Mutate => { this.report_illegal_mutation_of_borrowed(location, place_span, borrow) } WriteKind::Move => { this.report_move_out_while_borrowed(location, place_span, borrow) } + WriteKind::Replace => { + this.report_illegal_mutation_of_borrowed(location, place_span, borrow) + } } Control::Break } @@ -1982,12 +1993,14 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> { Reservation( WriteKind::Move + | WriteKind::Replace | WriteKind::StorageDeadOrDrop | WriteKind::MutableBorrow(BorrowKind::Shared) | WriteKind::MutableBorrow(BorrowKind::Shallow), ) | Write( WriteKind::Move + | WriteKind::Replace | WriteKind::StorageDeadOrDrop | WriteKind::MutableBorrow(BorrowKind::Shared) | WriteKind::MutableBorrow(BorrowKind::Shallow), diff --git a/compiler/rustc_borrowck/src/nll.rs b/compiler/rustc_borrowck/src/nll.rs index 889acb3acbe..b5014a3f479 100644 --- a/compiler/rustc_borrowck/src/nll.rs +++ b/compiler/rustc_borrowck/src/nll.rs @@ -441,7 +441,7 @@ fn for_each_region_constraint<'tcx>( let subject = match req.subject { ClosureOutlivesSubject::Region(subject) => format!("{:?}", subject), ClosureOutlivesSubject::Ty(ty) => { - format!("{:?}", ty.instantiate(tcx, |vid| tcx.mk_re_var(vid))) + format!("{:?}", ty.instantiate(tcx, |vid| ty::Region::new_var(tcx, vid))) } }; with_msg(format!("where {}: {:?}", subject, req.outlived_free_region,))?; diff --git a/compiler/rustc_borrowck/src/region_infer/mod.rs b/compiler/rustc_borrowck/src/region_infer/mod.rs index 50b246b1478..2a0cb49672b 100644 --- a/compiler/rustc_borrowck/src/region_infer/mod.rs +++ b/compiler/rustc_borrowck/src/region_infer/mod.rs @@ -1158,7 +1158,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { .universal_regions_outlived_by(r_scc) .filter(|&u_r| !self.universal_regions.is_local_free_region(u_r)) .find(|&u_r| self.eval_equal(u_r, r_vid)) - .map(|u_r| tcx.mk_re_var(u_r)) + .map(|u_r| ty::Region::new_var(tcx, u_r)) // In the case of a failure, use `ReErased`. We will eventually // return `None` in this case. .unwrap_or(tcx.lifetimes.re_erased) @@ -1355,7 +1355,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { let vid = self.to_region_vid(r); let scc = self.constraint_sccs.scc(vid); let repr = self.scc_representatives[scc]; - tcx.mk_re_var(repr) + ty::Region::new_var(tcx, repr) }) } @@ -1779,7 +1779,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { } // If not, report an error. - let member_region = infcx.tcx.mk_re_var(member_region_vid); + let member_region = ty::Region::new_var(infcx.tcx, member_region_vid); errors_buffer.push(RegionErrorKind::UnexpectedHiddenRegion { span: m_c.definition_span, hidden_ty: m_c.hidden_ty, diff --git a/compiler/rustc_borrowck/src/region_infer/opaque_types.rs b/compiler/rustc_borrowck/src/region_infer/opaque_types.rs index 725ff783ee9..13e346b86bc 100644 --- a/compiler/rustc_borrowck/src/region_infer/opaque_types.rs +++ b/compiler/rustc_borrowck/src/region_infer/opaque_types.rs @@ -92,7 +92,8 @@ impl<'tcx> RegionInferenceContext<'tcx> { } None => { subst_regions.push(vid); - infcx.tcx.mk_re_error_with_message( + ty::Region::new_error_with_message( + infcx.tcx, concrete_type.span, "opaque type with non-universal region substs", ) diff --git a/compiler/rustc_borrowck/src/type_check/liveness/trace.rs b/compiler/rustc_borrowck/src/type_check/liveness/trace.rs index fd94ac86d7d..eb02604b9d9 100644 --- a/compiler/rustc_borrowck/src/type_check/liveness/trace.rs +++ b/compiler/rustc_borrowck/src/type_check/liveness/trace.rs @@ -3,9 +3,9 @@ use rustc_index::bit_set::HybridBitSet; use rustc_index::interval::IntervalSet; use rustc_infer::infer::canonical::QueryRegionConstraints; use rustc_middle::mir::{BasicBlock, Body, ConstraintCategory, Local, Location}; +use rustc_middle::traits::query::DropckOutlivesResult; use rustc_middle::ty::{Ty, TyCtxt, TypeVisitable, TypeVisitableExt}; use rustc_span::DUMMY_SP; -use rustc_trait_selection::traits::query::dropck_outlives::DropckOutlivesResult; use rustc_trait_selection::traits::query::type_op::outlives::DropckOutlives; use rustc_trait_selection::traits::query::type_op::{TypeOp, TypeOpOutput}; use std::rc::Rc; diff --git a/compiler/rustc_borrowck/src/type_check/mod.rs b/compiler/rustc_borrowck/src/type_check/mod.rs index b759a848bf5..908ff3da5ca 100644 --- a/compiler/rustc_borrowck/src/type_check/mod.rs +++ b/compiler/rustc_borrowck/src/type_check/mod.rs @@ -139,7 +139,7 @@ pub(crate) fn type_check<'mir, 'tcx>( upvars: &[Upvar<'tcx>], use_polonius: bool, ) -> MirTypeckResults<'tcx> { - let implicit_region_bound = infcx.tcx.mk_re_var(universal_regions.fr_fn_body); + let implicit_region_bound = ty::Region::new_var(infcx.tcx, universal_regions.fr_fn_body); let mut constraints = MirTypeckRegionConstraints { placeholder_indices: PlaceholderIndices::default(), placeholder_index_to_region: IndexVec::default(), @@ -763,8 +763,8 @@ impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> { PlaceContext::MutatingUse(_) => ty::Invariant, PlaceContext::NonUse(StorageDead | StorageLive | VarDebugInfo) => ty::Invariant, PlaceContext::NonMutatingUse( - Inspect | Copy | Move | PlaceMention | SharedBorrow | ShallowBorrow | UniqueBorrow - | AddressOf | Projection, + Inspect | Copy | Move | PlaceMention | SharedBorrow | ShallowBorrow | AddressOf + | Projection, ) => ty::Covariant, PlaceContext::NonUse(AscribeUserTy(variance)) => variance, } diff --git a/compiler/rustc_borrowck/src/universal_regions.rs b/compiler/rustc_borrowck/src/universal_regions.rs index 56f078f2da8..c871703429a 100644 --- a/compiler/rustc_borrowck/src/universal_regions.rs +++ b/compiler/rustc_borrowck/src/universal_regions.rs @@ -500,7 +500,7 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> { .next_nll_region_var(FR, || RegionCtxt::Free(Symbol::intern("c-variadic"))) .as_var(); - let region = self.infcx.tcx.mk_re_var(reg_vid); + let region = ty::Region::new_var(self.infcx.tcx, reg_vid); let va_list_ty = self.infcx.tcx.type_of(va_list_did).subst(self.infcx.tcx, &[region.into()]); @@ -660,7 +660,7 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> { var: ty::BoundVar::from_usize(bound_vars.len() - 1), kind: ty::BrEnv, }; - let env_region = tcx.mk_re_late_bound(ty::INNERMOST, br); + let env_region = ty::Region::new_late_bound(tcx, ty::INNERMOST, br); let closure_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap(); // The "inputs" of the closure in the @@ -778,7 +778,8 @@ impl<'cx, 'tcx> InferCtxtExt<'tcx> for BorrowckInferCtxt<'cx, 'tcx> { { let (value, _map) = self.tcx.replace_late_bound_regions(value, |br| { debug!(?br); - let liberated_region = self.tcx.mk_re_free(all_outlive_scope.to_def_id(), br.kind); + let liberated_region = + ty::Region::new_free(self.tcx, all_outlive_scope.to_def_id(), br.kind); let region_vid = { let name = match br.kind.get_name() { Some(name) => name, @@ -889,7 +890,7 @@ impl<'tcx> UniversalRegionIndices<'tcx> { where T: TypeFoldable<TyCtxt<'tcx>>, { - tcx.fold_regions(value, |region, _| tcx.mk_re_var(self.to_region_vid(region))) + tcx.fold_regions(value, |region, _| ty::Region::new_var(tcx, self.to_region_vid(region))) } } @@ -929,7 +930,7 @@ fn for_each_late_bound_region_in_item<'tcx>( for bound_var in tcx.late_bound_vars(tcx.hir().local_def_id_to_hir_id(mir_def_id)) { let ty::BoundVariableKind::Region(bound_region) = bound_var else { continue; }; - let liberated_region = tcx.mk_re_free(mir_def_id.to_def_id(), bound_region); + let liberated_region = ty::Region::new_free(tcx, mir_def_id.to_def_id(), bound_region); f(liberated_region); } } diff --git a/compiler/rustc_builtin_macros/src/assert/context.rs b/compiler/rustc_builtin_macros/src/assert/context.rs index ea830a0ce60..b619e80e15f 100644 --- a/compiler/rustc_builtin_macros/src/assert/context.rs +++ b/compiler/rustc_builtin_macros/src/assert/context.rs @@ -233,10 +233,19 @@ impl<'cx, 'a> Context<'cx, 'a> { ExprKind::Cast(local_expr, _) => { self.manage_cond_expr(local_expr); } + ExprKind::If(local_expr, _, _) => { + self.manage_cond_expr(local_expr); + } ExprKind::Index(prefix, suffix) => { self.manage_cond_expr(prefix); self.manage_cond_expr(suffix); } + ExprKind::Let(_, local_expr, _) => { + self.manage_cond_expr(local_expr); + } + ExprKind::Match(local_expr, _) => { + self.manage_cond_expr(local_expr); + } ExprKind::MethodCall(call) => { for arg in &mut call.args { self.manage_cond_expr(arg); @@ -295,17 +304,14 @@ impl<'cx, 'a> Context<'cx, 'a> { | ExprKind::Continue(_) | ExprKind::Err | ExprKind::Field(_, _) - | ExprKind::FormatArgs(_) | ExprKind::ForLoop(_, _, _, _) - | ExprKind::If(_, _, _) + | ExprKind::FormatArgs(_) | ExprKind::IncludedBytes(..) | ExprKind::InlineAsm(_) - | ExprKind::OffsetOf(_, _) - | ExprKind::Let(_, _, _) | ExprKind::Lit(_) | ExprKind::Loop(_, _, _) | ExprKind::MacCall(_) - | ExprKind::Match(_, _) + | ExprKind::OffsetOf(_, _) | ExprKind::Path(_, _) | ExprKind::Ret(_) | ExprKind::Try(_) diff --git a/compiler/rustc_builtin_macros/src/compile_error.rs b/compiler/rustc_builtin_macros/src/compile_error.rs index aeb3bb80045..5efc5a4e3ee 100644 --- a/compiler/rustc_builtin_macros/src/compile_error.rs +++ b/compiler/rustc_builtin_macros/src/compile_error.rs @@ -18,7 +18,7 @@ pub fn expand_compile_error<'cx>( reason = "diagnostic message is specified by user" )] #[expect(rustc::untranslatable_diagnostic, reason = "diagnostic message is specified by user")] - cx.span_err(sp, var.as_str()); + cx.span_err(sp, var.to_string()); DummyResult::any(sp) } diff --git a/compiler/rustc_builtin_macros/src/deriving/clone.rs b/compiler/rustc_builtin_macros/src/deriving/clone.rs index 9883563746e..9ba98d0a5d1 100644 --- a/compiler/rustc_builtin_macros/src/deriving/clone.rs +++ b/compiler/rustc_builtin_macros/src/deriving/clone.rs @@ -68,7 +68,6 @@ pub fn expand_deriving_clone( _ => cx.span_bug(span, "`#[derive(Clone)]` on trait item or impl item"), } - let attrs = thin_vec![cx.attr_word(sym::inline, span)]; let trait_def = TraitDef { span, path: path_std!(clone::Clone), @@ -82,7 +81,7 @@ pub fn expand_deriving_clone( explicit_self: true, nonself_args: Vec::new(), ret_ty: Self_, - attributes: attrs, + attributes: thin_vec![cx.attr_word(sym::inline, span)], fieldless_variants_strategy: FieldlessVariantsStrategy::Default, combine_substructure: substructure, }], diff --git a/compiler/rustc_builtin_macros/src/deriving/cmp/eq.rs b/compiler/rustc_builtin_macros/src/deriving/cmp/eq.rs index af971958680..c78a0eb04a0 100644 --- a/compiler/rustc_builtin_macros/src/deriving/cmp/eq.rs +++ b/compiler/rustc_builtin_macros/src/deriving/cmp/eq.rs @@ -18,11 +18,6 @@ pub fn expand_deriving_eq( is_const: bool, ) { let span = cx.with_def_site_ctxt(span); - let attrs = thin_vec![ - cx.attr_word(sym::inline, span), - cx.attr_nested_word(sym::doc, sym::hidden, span), - cx.attr_word(sym::no_coverage, span) - ]; let trait_def = TraitDef { span, path: path_std!(cmp::Eq), @@ -36,7 +31,11 @@ pub fn expand_deriving_eq( explicit_self: true, nonself_args: vec![], ret_ty: Unit, - attributes: attrs, + attributes: thin_vec![ + cx.attr_word(sym::inline, span), + cx.attr_nested_word(sym::doc, sym::hidden, span), + cx.attr_word(sym::no_coverage, span) + ], fieldless_variants_strategy: FieldlessVariantsStrategy::Unify, combine_substructure: combine_substructure(Box::new(|a, b, c| { cs_total_eq_assert(a, b, c) diff --git a/compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs b/compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs index cfd36f030a1..4401cf8a9c5 100644 --- a/compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs +++ b/compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs @@ -15,7 +15,6 @@ pub fn expand_deriving_ord( push: &mut dyn FnMut(Annotatable), is_const: bool, ) { - let attrs = thin_vec![cx.attr_word(sym::inline, span)]; let trait_def = TraitDef { span, path: path_std!(cmp::Ord), @@ -29,7 +28,7 @@ pub fn expand_deriving_ord( explicit_self: true, nonself_args: vec![(self_ref(), sym::other)], ret_ty: Path(path_std!(cmp::Ordering)), - attributes: attrs, + attributes: thin_vec![cx.attr_word(sym::inline, span)], fieldless_variants_strategy: FieldlessVariantsStrategy::Unify, combine_substructure: combine_substructure(Box::new(|a, b, c| cs_cmp(a, b, c))), }], diff --git a/compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs index bad47db0de1..a71ecc5db7d 100644 --- a/compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs +++ b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs @@ -82,14 +82,13 @@ pub fn expand_deriving_partial_eq( // No need to generate `ne`, the default suffices, and not generating it is // faster. - let attrs = thin_vec![cx.attr_word(sym::inline, span)]; let methods = vec![MethodDef { name: sym::eq, generics: Bounds::empty(), explicit_self: true, nonself_args: vec![(self_ref(), sym::other)], ret_ty: Path(path_local!(bool)), - attributes: attrs, + attributes: thin_vec![cx.attr_word(sym::inline, span)], fieldless_variants_strategy: FieldlessVariantsStrategy::Unify, combine_substructure: combine_substructure(Box::new(|a, b, c| cs_eq(a, b, c))), }]; diff --git a/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs index 9f46247908d..54b6cb7d713 100644 --- a/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs +++ b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs @@ -19,8 +19,6 @@ pub fn expand_deriving_partial_ord( let ret_ty = Path(Path::new_(pathvec_std!(option::Option), vec![Box::new(ordering_ty)], PathKind::Std)); - let attrs = thin_vec![cx.attr_word(sym::inline, span)]; - // Order in which to perform matching let tag_then_data = if let Annotatable::Item(item) = item && let ItemKind::Enum(def, _) = &item.kind { @@ -48,7 +46,7 @@ pub fn expand_deriving_partial_ord( explicit_self: true, nonself_args: vec![(self_ref(), sym::other)], ret_ty, - attributes: attrs, + attributes: thin_vec![cx.attr_word(sym::inline, span)], fieldless_variants_strategy: FieldlessVariantsStrategy::Unify, combine_substructure: combine_substructure(Box::new(|cx, span, substr| { cs_partial_cmp(cx, span, substr, tag_then_data) diff --git a/compiler/rustc_builtin_macros/src/deriving/default.rs b/compiler/rustc_builtin_macros/src/deriving/default.rs index 33fe98b40e1..07b172bc757 100644 --- a/compiler/rustc_builtin_macros/src/deriving/default.rs +++ b/compiler/rustc_builtin_macros/src/deriving/default.rs @@ -20,7 +20,6 @@ pub fn expand_deriving_default( ) { item.visit_with(&mut DetectNonVariantDefaultAttr { cx }); - let attrs = thin_vec![cx.attr_word(sym::inline, span)]; let trait_def = TraitDef { span, path: Path::new(vec![kw::Default, sym::Default]), @@ -34,7 +33,7 @@ pub fn expand_deriving_default( explicit_self: false, nonself_args: Vec::new(), ret_ty: Self_, - attributes: attrs, + attributes: thin_vec![cx.attr_word(sym::inline, span)], fieldless_variants_strategy: FieldlessVariantsStrategy::Default, combine_substructure: combine_substructure(Box::new(|cx, trait_span, substr| { match substr.fields { diff --git a/compiler/rustc_builtin_macros/src/deriving/hash.rs b/compiler/rustc_builtin_macros/src/deriving/hash.rs index 4eee573db42..101401f9c85 100644 --- a/compiler/rustc_builtin_macros/src/deriving/hash.rs +++ b/compiler/rustc_builtin_macros/src/deriving/hash.rs @@ -1,7 +1,7 @@ use crate::deriving::generic::ty::*; use crate::deriving::generic::*; use crate::deriving::{path_std, pathvec_std}; -use rustc_ast::{AttrVec, MetaItem, Mutability}; +use rustc_ast::{MetaItem, Mutability}; use rustc_expand::base::{Annotatable, ExtCtxt}; use rustc_span::symbol::sym; use rustc_span::Span; @@ -33,7 +33,7 @@ pub fn expand_deriving_hash( explicit_self: true, nonself_args: vec![(Ref(Box::new(Path(arg)), Mutability::Mut), sym::state)], ret_ty: Unit, - attributes: AttrVec::new(), + attributes: thin_vec![cx.attr_word(sym::inline, span)], fieldless_variants_strategy: FieldlessVariantsStrategy::Unify, combine_substructure: combine_substructure(Box::new(|a, b, c| { hash_substructure(a, b, c) diff --git a/compiler/rustc_builtin_macros/src/errors.rs b/compiler/rustc_builtin_macros/src/errors.rs index d0d78646009..f1ab279daba 100644 --- a/compiler/rustc_builtin_macros/src/errors.rs +++ b/compiler/rustc_builtin_macros/src/errors.rs @@ -377,7 +377,7 @@ impl<'a, G: EmissionGuarantee> IntoDiagnostic<'a, G> for EnvNotDefined { rustc::untranslatable_diagnostic, reason = "cannot translate user-provided messages" )] - handler.struct_diagnostic(msg.as_str()) + handler.struct_diagnostic(msg.to_string()) } else { handler.struct_diagnostic(crate::fluent_generated::builtin_macros_env_not_defined) }; diff --git a/compiler/rustc_codegen_cranelift/Readme.md b/compiler/rustc_codegen_cranelift/Readme.md index c5222982aa7..26dccf309e1 100644 --- a/compiler/rustc_codegen_cranelift/Readme.md +++ b/compiler/rustc_codegen_cranelift/Readme.md @@ -42,6 +42,32 @@ This will build your project with rustc_codegen_cranelift instead of the usual L For additional ways to use rustc_codegen_cranelift like the JIT mode see [usage.md](docs/usage.md). +## Building and testing with changes in rustc code + +This is useful when changing code in `rustc_codegen_cranelift` as part of changing [main Rust repository](https://github.com/rust-lang/rust/). +This can happen, for example, when you are implementing a new compiler intrinsic. + +Instruction below uses `$RustCheckoutDir` as substitute for any folder where you cloned Rust repository. + +You need to do this steps to successfully compile and use the cranelift backend with your changes in rustc code: + +1. `cd $RustCheckoutDir` +2. Run `python x.py setup` and choose option for compiler (`b`). +3. Build compiler and necessary tools: `python x.py build --stage=2 compiler library/std src/tools/rustdoc src/tools/rustfmt` + * (Optional) You can also build cargo by adding `src/tools/cargo` to previous command. +4. Copy exectutable files from `./build/host/stage2-tools/<your hostname triple>/release` +to `./build/host/stage2/bin/`. Note that you would need to do this every time you rebuilt `rust` repository. +5. Copy cargo from another toolchain: `cp $(rustup which cargo) .build/<your hostname triple>/stage2/bin/cargo` + * Another option is to build it at step 3 and copy with other executables at step 4. +6. Link your new `rustc` to toolchain: `rustup toolchain link stage2 ./build/host/stage2/`. +7. (Windows only) compile y.rs: `rustc +stage2 -O y.rs`. +8. You need to prefix every `./y.rs` (or `y` if you built `y.rs`) command by `rustup run stage2` to make cg_clif use your local changes in rustc. + + * `rustup run stage2 ./y.rs prepare` + * `rustup run stage2 ./y.rs build` + * (Optional) run tests: `rustup run stage2 ./y.rs test` +9. Now you can use your cg_clif build to compile other Rust programs, e.g. you can open any Rust crate and run commands like `$RustCheckoutDir/compiler/rustc_codegen_cranelift/dist/cargo-clif build --release`. + ## Configuration See the documentation on the `BackendConfig` struct in [config.rs](src/config.rs) for all diff --git a/compiler/rustc_codegen_cranelift/src/base.rs b/compiler/rustc_codegen_cranelift/src/base.rs index 9c6a0fae327..fcfa0b862d4 100644 --- a/compiler/rustc_codegen_cranelift/src/base.rs +++ b/compiler/rustc_codegen_cranelift/src/base.rs @@ -473,7 +473,7 @@ fn codegen_fn_body(fx: &mut FunctionCx<'_, '_, '_>, start_block: Block) { | TerminatorKind::GeneratorDrop => { bug!("shouldn't exist at codegen {:?}", bb_data.terminator()); } - TerminatorKind::Drop { place, target, unwind: _ } => { + TerminatorKind::Drop { place, target, unwind: _, replace: _ } => { let drop_place = codegen_place(fx, *place); crate::abi::codegen_drop(fx, source_info, drop_place); diff --git a/compiler/rustc_codegen_cranelift/src/common.rs b/compiler/rustc_codegen_cranelift/src/common.rs index 5eaa988dd09..67fd6d793e0 100644 --- a/compiler/rustc_codegen_cranelift/src/common.rs +++ b/compiler/rustc_codegen_cranelift/src/common.rs @@ -361,7 +361,7 @@ impl<'tcx> FunctionCx<'_, '_, 'tcx> { self.instance.subst_mir_and_normalize_erasing_regions( self.tcx, ty::ParamEnv::reveal_all(), - ty::EarlyBinder(value), + ty::EarlyBinder::bind(value), ) } diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs index 869344ce92d..f9ea0f00456 100644 --- a/compiler/rustc_codegen_gcc/src/builder.rs +++ b/compiler/rustc_codegen_gcc/src/builder.rs @@ -758,7 +758,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { assert_eq!(place.llextra.is_some(), place.layout.is_unsized()); if place.layout.is_zst() { - return OperandRef::new_zst(self, place.layout); + return OperandRef::zero_sized(place.layout); } fn scalar_load_metadata<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, load: RValue<'gcc>, scalar: &abi::Scalar) { diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs index ac04b61a306..bad87db4732 100644 --- a/compiler/rustc_codegen_gcc/src/common.rs +++ b/compiler/rustc_codegen_gcc/src/common.rs @@ -1,17 +1,15 @@ use gccjit::LValue; use gccjit::{RValue, Type, ToRValue}; -use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::{ BaseTypeMethods, ConstMethods, - DerivedTypeMethods, MiscMethods, StaticMethods, }; use rustc_middle::mir::Mutability; -use rustc_middle::ty::layout::{TyAndLayout, LayoutOf}; +use rustc_middle::ty::layout::{LayoutOf}; use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar}; -use rustc_target::abi::{self, HasDataLayout, Pointer, Size}; +use rustc_target::abi::{self, HasDataLayout, Pointer}; use crate::consts::const_alloc_to_gcc; use crate::context::CodegenCx; @@ -240,28 +238,26 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { const_alloc_to_gcc(self, alloc) } - fn from_const_alloc(&self, layout: TyAndLayout<'tcx>, alloc: ConstAllocation<'tcx>, offset: Size) -> PlaceRef<'tcx, RValue<'gcc>> { - assert_eq!(alloc.inner().align, layout.align.abi); - let ty = self.type_ptr_to(layout.gcc_type(self)); - let value = - if layout.size == Size::ZERO { - let value = self.const_usize(alloc.inner().align.bytes()); - self.const_bitcast(value, ty) - } - else { - let init = const_alloc_to_gcc(self, alloc); - let base_addr = self.static_addr_of(init, alloc.inner().align, None); - - let array = self.const_bitcast(base_addr, self.type_i8p()); - let value = self.context.new_array_access(None, array, self.const_usize(offset.bytes())).get_address(None); - self.const_bitcast(value, ty) - }; - PlaceRef::new_sized(value, layout) - } - fn const_ptrcast(&self, val: RValue<'gcc>, ty: Type<'gcc>) -> RValue<'gcc> { self.context.new_cast(None, val, ty) } + + fn const_bitcast(&self, value: RValue<'gcc>, typ: Type<'gcc>) -> RValue<'gcc> { + if value.get_type() == self.bool_type.make_pointer() { + if let Some(pointee) = typ.get_pointee() { + if pointee.dyncast_vector().is_some() { + panic!() + } + } + } + // NOTE: since bitcast makes a value non-constant, don't bitcast if not necessary as some + // SIMD builtins require a constant value. + self.bitcast_if_needed(value, typ) + } + + fn const_ptr_byte_offset(&self, base_addr: Self::Value, offset: abi::Size) -> Self::Value { + self.context.new_array_access(None, base_addr, self.const_usize(offset.bytes())).get_address(None) + } } pub trait SignType<'gcc, 'tcx> { diff --git a/compiler/rustc_codegen_gcc/src/consts.rs b/compiler/rustc_codegen_gcc/src/consts.rs index 792ab8f890d..873f652e6f1 100644 --- a/compiler/rustc_codegen_gcc/src/consts.rs +++ b/compiler/rustc_codegen_gcc/src/consts.rs @@ -1,6 +1,6 @@ #[cfg(feature = "master")] use gccjit::FnAttribute; -use gccjit::{Function, GlobalKind, LValue, RValue, ToRValue, Type}; +use gccjit::{Function, GlobalKind, LValue, RValue, ToRValue}; use rustc_codegen_ssa::traits::{BaseTypeMethods, ConstMethods, DerivedTypeMethods, StaticMethods}; use rustc_middle::span_bug; use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs}; @@ -16,21 +16,6 @@ use crate::context::CodegenCx; use crate::errors::InvalidMinimumAlignment; use crate::type_of::LayoutGccExt; -impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { - pub fn const_bitcast(&self, value: RValue<'gcc>, typ: Type<'gcc>) -> RValue<'gcc> { - if value.get_type() == self.bool_type.make_pointer() { - if let Some(pointee) = typ.get_pointee() { - if pointee.dyncast_vector().is_some() { - panic!() - } - } - } - // NOTE: since bitcast makes a value non-constant, don't bitcast if not necessary as some - // SIMD builtins require a constant value. - self.bitcast_if_needed(value, typ) - } -} - fn set_global_alignment<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, gv: LValue<'gcc>, mut align: Align) { // The target may require greater alignment for globals than the type does. // Note: GCC and Clang also allow `__attribute__((aligned))` on variables, diff --git a/compiler/rustc_codegen_gcc/src/type_of.rs b/compiler/rustc_codegen_gcc/src/type_of.rs index 5df8c1a209d..30a3fe67b85 100644 --- a/compiler/rustc_codegen_gcc/src/type_of.rs +++ b/compiler/rustc_codegen_gcc/src/type_of.rs @@ -159,8 +159,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { fn is_gcc_immediate(&self) -> bool { match self.abi { Abi::Scalar(_) | Abi::Vector { .. } => true, - Abi::ScalarPair(..) => false, - Abi::Uninhabited | Abi::Aggregate { .. } => self.is_zst(), + Abi::ScalarPair(..) | Abi::Uninhabited | Abi::Aggregate { .. } => false, } } diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs index 651d644ebb6..6d00464e0a0 100644 --- a/compiler/rustc_codegen_llvm/src/attributes.rs +++ b/compiler/rustc_codegen_llvm/src/attributes.rs @@ -88,6 +88,9 @@ pub fn sanitize_attrs<'ll>( attrs.push(llvm::AttributeKind::SanitizeMemTag.create_attr(cx.llcx)); } + if enabled.contains(SanitizerSet::SAFESTACK) { + attrs.push(llvm::AttributeKind::SanitizeSafeStack.create_attr(cx.llcx)); + } attrs } diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 4d0bcd53d15..5968e70b1cc 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -486,7 +486,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { assert_eq!(place.llextra.is_some(), place.layout.is_unsized()); if place.layout.is_zst() { - return OperandRef::new_zst(self, place.layout); + return OperandRef::zero_sized(place.layout); } #[instrument(level = "trace", skip(bx))] diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index 9127fba388b..a3910fef954 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -8,16 +8,15 @@ use crate::type_of::LayoutLlvmExt; use crate::value::Value; use rustc_ast::Mutability; -use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::*; use rustc_data_structures::stable_hasher::{Hash128, HashStable, StableHasher}; use rustc_hir::def_id::DefId; use rustc_middle::bug; use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar}; -use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; +use rustc_middle::ty::layout::LayoutOf; use rustc_middle::ty::TyCtxt; use rustc_session::cstore::{DllCallingConvention, DllImport, PeImportNameType}; -use rustc_target::abi::{self, AddressSpace, HasDataLayout, Pointer, Size}; +use rustc_target::abi::{self, AddressSpace, HasDataLayout, Pointer}; use rustc_target::spec::Target; use libc::{c_char, c_uint}; @@ -307,38 +306,24 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { const_alloc_to_llvm(self, alloc) } - fn from_const_alloc( - &self, - layout: TyAndLayout<'tcx>, - alloc: ConstAllocation<'tcx>, - offset: Size, - ) -> PlaceRef<'tcx, &'ll Value> { - let alloc_align = alloc.inner().align; - assert_eq!(alloc_align, layout.align.abi); - let llty = self.type_ptr_to(layout.llvm_type(self)); - let llval = if layout.size == Size::ZERO { - let llval = self.const_usize(alloc_align.bytes()); - unsafe { llvm::LLVMConstIntToPtr(llval, llty) } - } else { - let init = const_alloc_to_llvm(self, alloc); - let base_addr = self.static_addr_of(init, alloc_align, None); - - let llval = unsafe { - llvm::LLVMRustConstInBoundsGEP2( - self.type_i8(), - self.const_bitcast(base_addr, self.type_i8p()), - &self.const_usize(offset.bytes()), - 1, - ) - }; - self.const_bitcast(llval, llty) - }; - PlaceRef::new_sized(llval, layout) - } - fn const_ptrcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value { consts::ptrcast(val, ty) } + + fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value { + self.const_bitcast(val, ty) + } + + fn const_ptr_byte_offset(&self, base_addr: Self::Value, offset: abi::Size) -> Self::Value { + unsafe { + llvm::LLVMRustConstInBoundsGEP2( + self.type_i8(), + self.const_bitcast(base_addr, self.type_i8p()), + &self.const_usize(offset.bytes()), + 1, + ) + } + } } /// Get the [LLVM type][Type] of a [`Value`]. diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs index 3fff112a020..64961baf272 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs @@ -93,7 +93,7 @@ fn make_mir_scope<'ll, 'tcx>( let callee = cx.tcx.subst_and_normalize_erasing_regions( instance.substs, ty::ParamEnv::reveal_all(), - ty::EarlyBinder(callee), + ty::EarlyBinder::bind(callee), ); let callee_fn_abi = cx.fn_abi_of_instance(callee, ty::List::empty()); cx.dbg_scope_fn(callee, callee_fn_abi, None) diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index de93a64c0d6..6ef3418cc5f 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -196,6 +196,7 @@ pub enum AttributeKind { AllocSize = 37, AllocatedPointer = 38, AllocAlign = 39, + SanitizeSafeStack = 40, } /// LLVMIntPredicate diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index e264ce78f0d..a493c9c0548 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -198,8 +198,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { fn is_llvm_immediate(&self) -> bool { match self.abi { Abi::Scalar(_) | Abi::Vector { .. } => true, - Abi::ScalarPair(..) => false, - Abi::Uninhabited | Abi::Aggregate { .. } => self.is_zst(), + Abi::ScalarPair(..) | Abi::Uninhabited | Abi::Aggregate { .. } => false, } } diff --git a/compiler/rustc_codegen_ssa/src/back/link.rs b/compiler/rustc_codegen_ssa/src/back/link.rs index 8a00c42a0e8..f8bb9bf2bb5 100644 --- a/compiler/rustc_codegen_ssa/src/back/link.rs +++ b/compiler/rustc_codegen_ssa/src/back/link.rs @@ -23,7 +23,7 @@ use rustc_session::utils::NativeLibKind; use rustc_session::{filesearch, Session}; use rustc_span::symbol::Symbol; use rustc_target::spec::crt_objects::{CrtObjects, LinkSelfContainedDefault}; -use rustc_target::spec::{Cc, LinkOutputKind, LinkerFlavor, LinkerFlavorCli, Lld, PanicStrategy}; +use rustc_target::spec::{Cc, LinkOutputKind, LinkerFlavor, Lld, PanicStrategy}; use rustc_target::spec::{RelocModel, RelroLevel, SanitizerSet, SplitDebuginfo}; use super::archive::{ArchiveBuilder, ArchiveBuilderBuilder}; @@ -893,7 +893,7 @@ fn link_natively<'a>( linker_path: &linker_path, exit_status: prog.status, command: &cmd, - escaped_output: &escaped_output, + escaped_output, }; sess.diagnostic().emit_err(err); // If MSVC's `link.exe` was expected but the return code @@ -1188,6 +1188,9 @@ fn add_sanitizer_libraries(sess: &Session, crate_type: CrateType, linker: &mut d if sanitizer.contains(SanitizerSet::HWADDRESS) { link_sanitizer_runtime(sess, linker, "hwasan"); } + if sanitizer.contains(SanitizerSet::SAFESTACK) { + link_sanitizer_runtime(sess, linker, "safestack"); + } } fn link_sanitizer_runtime(sess: &Session, linker: &mut dyn Linker, name: &str) { @@ -1299,44 +1302,7 @@ pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) { let stem = linker.file_stem().and_then(|stem| stem.to_str()).unwrap_or_else(|| { sess.emit_fatal(errors::LinkerFileStem); }); - - // Remove any version postfix. - let stem = stem - .rsplit_once('-') - .and_then(|(lhs, rhs)| rhs.chars().all(char::is_numeric).then_some(lhs)) - .unwrap_or(stem); - - // GCC/Clang can have an optional target prefix. - let flavor = if stem == "emcc" { - LinkerFlavor::EmCc - } else if stem == "gcc" - || stem.ends_with("-gcc") - || stem == "g++" - || stem.ends_with("-g++") - || stem == "clang" - || stem.ends_with("-clang") - || stem == "clang++" - || stem.ends_with("-clang++") - { - LinkerFlavor::from_cli(LinkerFlavorCli::Gcc, &sess.target) - } else if stem == "wasm-ld" || stem.ends_with("-wasm-ld") { - LinkerFlavor::WasmLld(Cc::No) - } else if stem == "ld" || stem.ends_with("-ld") { - LinkerFlavor::from_cli(LinkerFlavorCli::Ld, &sess.target) - } else if stem == "ld.lld" { - LinkerFlavor::Gnu(Cc::No, Lld::Yes) - } else if stem == "link" { - LinkerFlavor::Msvc(Lld::No) - } else if stem == "lld-link" { - LinkerFlavor::Msvc(Lld::Yes) - } else if stem == "lld" || stem == "rust-lld" { - let lld_flavor = sess.target.linker_flavor.lld_flavor(); - LinkerFlavor::from_cli(LinkerFlavorCli::Lld(lld_flavor), &sess.target) - } else { - // fall back to the value in the target spec - sess.target.linker_flavor - }; - + let flavor = sess.target.linker_flavor.with_linker_hints(stem); Some((linker, flavor)) } (None, None) => None, @@ -1346,7 +1312,7 @@ pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) { // linker and linker flavor specified via command line have precedence over what the target // specification specifies let linker_flavor = - sess.opts.cg.linker_flavor.map(|flavor| LinkerFlavor::from_cli(flavor, &sess.target)); + sess.opts.cg.linker_flavor.map(|flavor| sess.target.linker_flavor.with_cli_hints(flavor)); if let Some(ret) = infer_from(sess, sess.opts.cg.linker.clone(), linker_flavor) { return ret; } diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs index c323372bda4..10e9e5588f6 100644 --- a/compiler/rustc_codegen_ssa/src/back/write.rs +++ b/compiler/rustc_codegen_ssa/src/back/write.rs @@ -1800,7 +1800,7 @@ impl SharedEmitterMain { handler.emit_diagnostic(&mut d); } Ok(SharedEmitterMessage::InlineAsmError(cookie, msg, level, source)) => { - let msg = msg.strip_prefix("error: ").unwrap_or(&msg); + let msg = msg.strip_prefix("error: ").unwrap_or(&msg).to_string(); let mut err = match level { Level::Error { lint: false } => sess.struct_err(msg).forget_guarantee(), diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs index 15c7847155d..242d209b684 100644 --- a/compiler/rustc_codegen_ssa/src/base.rs +++ b/compiler/rustc_codegen_ssa/src/base.rs @@ -295,7 +295,7 @@ pub fn coerce_unsized_into<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( let (base, info) = match bx.load_operand(src).val { OperandValue::Pair(base, info) => unsize_ptr(bx, base, src_ty, dst_ty, Some(info)), OperandValue::Immediate(base) => unsize_ptr(bx, base, src_ty, dst_ty, None), - OperandValue::Ref(..) => bug!(), + OperandValue::Ref(..) | OperandValue::ZeroSized => bug!(), }; OperandValue::Pair(base, info).store(bx, dst); } diff --git a/compiler/rustc_codegen_ssa/src/errors.rs b/compiler/rustc_codegen_ssa/src/errors.rs index cf4893b8226..bf37ac69f2d 100644 --- a/compiler/rustc_codegen_ssa/src/errors.rs +++ b/compiler/rustc_codegen_ssa/src/errors.rs @@ -336,7 +336,7 @@ pub struct LinkingFailed<'a> { pub linker_path: &'a PathBuf, pub exit_status: ExitStatus, pub command: &'a Command, - pub escaped_output: &'a str, + pub escaped_output: String, } impl IntoDiagnostic<'_> for LinkingFailed<'_> { @@ -345,11 +345,13 @@ impl IntoDiagnostic<'_> for LinkingFailed<'_> { diag.set_arg("linker_path", format!("{}", self.linker_path.display())); diag.set_arg("exit_status", format!("{}", self.exit_status)); - diag.note(format!("{:?}", self.command)).note(self.escaped_output); + let contains_undefined_ref = self.escaped_output.contains("undefined reference to"); + + diag.note(format!("{:?}", self.command)).note(self.escaped_output.to_string()); // Trying to match an error from OS linkers // which by now we have no way to translate. - if self.escaped_output.contains("undefined reference to") { + if contains_undefined_ref { diag.note(fluent::codegen_ssa_extern_funcs_not_found) .note(fluent::codegen_ssa_specify_libraries_to_link) .note(fluent::codegen_ssa_use_cargo_directive); diff --git a/compiler/rustc_codegen_ssa/src/mir/analyze.rs b/compiler/rustc_codegen_ssa/src/mir/analyze.rs index 835074806e9..22c1f05974d 100644 --- a/compiler/rustc_codegen_ssa/src/mir/analyze.rs +++ b/compiler/rustc_codegen_ssa/src/mir/analyze.rs @@ -234,7 +234,6 @@ impl<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx> | PlaceContext::NonMutatingUse( NonMutatingUseContext::Inspect | NonMutatingUseContext::SharedBorrow - | NonMutatingUseContext::UniqueBorrow | NonMutatingUseContext::ShallowBorrow | NonMutatingUseContext::AddressOf | NonMutatingUseContext::Projection, diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index d516ac4ebb7..e0cb26d3ba8 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -1,5 +1,5 @@ use super::operand::OperandRef; -use super::operand::OperandValue::{Immediate, Pair, Ref}; +use super::operand::OperandValue::{Immediate, Pair, Ref, ZeroSized}; use super::place::PlaceRef; use super::{CachedLlbb, FunctionCx, LocalRef}; @@ -427,6 +427,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { assert_eq!(align, op.layout.align.abi, "return place is unaligned!"); llval } + ZeroSized => bug!("ZST return value shouldn't be in PassMode::Cast"), }; let ty = bx.cast_backend_type(cast_ty); let addr = bx.pointercast(llslot, bx.type_ptr_to(ty)); @@ -1256,7 +1257,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { MergingSucc::False } - mir::TerminatorKind::Drop { place, target, unwind } => { + mir::TerminatorKind::Drop { place, target, unwind, replace: _ } => { self.codegen_drop_terminator(helper, bx, place, target, unwind, mergeable_succ()) } @@ -1386,6 +1387,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { (llval, align, true) } } + ZeroSized => match arg.mode { + PassMode::Indirect { .. } => { + // Though `extern "Rust"` doesn't pass ZSTs, some ABIs pass + // a pointer for `repr(C)` structs even when empty, so get + // one from an `alloca` (which can be left uninitialized). + let scratch = PlaceRef::alloca(bx, arg.layout); + (scratch.llval, scratch.align, true) + } + _ => bug!("ZST {op:?} wasn't ignored, but was passed with abi {arg:?}"), + }, }; if by_ref && !arg.is_indirect() { diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs index bba2800fb05..4f79c6a3d82 100644 --- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs +++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs @@ -352,6 +352,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.set_var_name(a, &(name.clone() + ".0")); bx.set_var_name(b, &(name.clone() + ".1")); } + OperandValue::ZeroSized => { + // These never have a value to talk about + } }, LocalRef::PendingOperand => {} } diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs index 1204c99e533..2809ec2deb5 100644 --- a/compiler/rustc_codegen_ssa/src/mir/mod.rs +++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs @@ -111,7 +111,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { self.instance.subst_mir_and_normalize_erasing_regions( self.cx.tcx(), ty::ParamEnv::reveal_all(), - ty::EarlyBinder(value), + ty::EarlyBinder::bind(value), ) } } @@ -129,16 +129,13 @@ enum LocalRef<'tcx, V> { PendingOperand, } -impl<'a, 'tcx, V: CodegenObject> LocalRef<'tcx, V> { - fn new_operand<Bx: BuilderMethods<'a, 'tcx, Value = V>>( - bx: &mut Bx, - layout: TyAndLayout<'tcx>, - ) -> LocalRef<'tcx, V> { +impl<'tcx, V: CodegenObject> LocalRef<'tcx, V> { + fn new_operand(layout: TyAndLayout<'tcx>) -> LocalRef<'tcx, V> { if layout.is_zst() { // Zero-size temporaries aren't always initialized, which // doesn't matter because they don't contain data, but // we need something in the operand. - LocalRef::Operand(OperandRef::new_zst(bx, layout)) + LocalRef::Operand(OperandRef::zero_sized(layout)) } else { LocalRef::PendingOperand } @@ -249,7 +246,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( } } else { debug!("alloc: {:?} -> operand", local); - LocalRef::new_operand(&mut start_bx, layout) + LocalRef::new_operand(layout) } }; @@ -355,7 +352,7 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( let local = |op| LocalRef::Operand(op); match arg.mode { PassMode::Ignore => { - return local(OperandRef::new_zst(bx, arg.layout)); + return local(OperandRef::zero_sized(arg.layout)); } PassMode::Direct(_) => { let llarg = bx.get_param(llarg_idx); diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index 2301c3ef13e..31c293d7c29 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -8,10 +8,10 @@ use crate::traits::*; use crate::MemFlags; use rustc_middle::mir; -use rustc_middle::mir::interpret::{ConstValue, Pointer, Scalar}; +use rustc_middle::mir::interpret::{alloc_range, ConstValue, Pointer, Scalar}; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_middle::ty::Ty; -use rustc_target::abi::{Abi, Align, Size}; +use rustc_target::abi::{self, Abi, Align, Size}; use std::fmt; @@ -45,6 +45,14 @@ pub enum OperandValue<V> { /// as returned by [`LayoutTypeMethods::scalar_pair_element_backend_type`] /// with `immediate: true`. Pair(V, V), + /// A value taking no bytes, and which therefore needs no LLVM value at all. + /// + /// If you ever need a `V` to pass to something, get a fresh poison value + /// from [`ConstMethods::const_poison`]. + /// + /// An `OperandValue` *must* be this variant for any type for which + /// `is_zst` on its `Layout` returns `true`. + ZeroSized, } /// An `OperandRef` is an "SSA" reference to a Rust value, along with @@ -71,15 +79,9 @@ impl<V: CodegenObject> fmt::Debug for OperandRef<'_, V> { } impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { - pub fn new_zst<Bx: BuilderMethods<'a, 'tcx, Value = V>>( - bx: &mut Bx, - layout: TyAndLayout<'tcx>, - ) -> OperandRef<'tcx, V> { + pub fn zero_sized(layout: TyAndLayout<'tcx>) -> OperandRef<'tcx, V> { assert!(layout.is_zst()); - OperandRef { - val: OperandValue::Immediate(bx.const_poison(bx.immediate_backend_type(layout))), - layout, - } + OperandRef { val: OperandValue::ZeroSized, layout } } pub fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>( @@ -97,7 +99,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout)); OperandValue::Immediate(llval) } - ConstValue::ZeroSized => return OperandRef::new_zst(bx, layout), + ConstValue::ZeroSized => return OperandRef::zero_sized(layout), ConstValue::Slice { data, start, end } => { let Abi::ScalarPair(a_scalar, _) = layout.abi else { bug!("from_const: invalid ScalarPair layout: {:#?}", layout); @@ -115,13 +117,82 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { OperandValue::Pair(a_llval, b_llval) } ConstValue::ByRef { alloc, offset } => { - return bx.load_operand(bx.from_const_alloc(layout, alloc, offset)); + return Self::from_const_alloc(bx, layout, alloc, offset); } }; OperandRef { val, layout } } + fn from_const_alloc<Bx: BuilderMethods<'a, 'tcx, Value = V>>( + bx: &mut Bx, + layout: TyAndLayout<'tcx>, + alloc: rustc_middle::mir::interpret::ConstAllocation<'tcx>, + offset: Size, + ) -> Self { + let alloc_align = alloc.inner().align; + assert_eq!(alloc_align, layout.align.abi); + let ty = bx.type_ptr_to(bx.cx().backend_type(layout)); + + let read_scalar = |start, size, s: abi::Scalar, ty| { + let val = alloc + .0 + .read_scalar( + bx, + alloc_range(start, size), + /*read_provenance*/ matches!(s.primitive(), abi::Pointer(_)), + ) + .unwrap(); + bx.scalar_to_backend(val, s, ty) + }; + + // It may seem like all types with `Scalar` or `ScalarPair` ABI are fair game at this point. + // However, `MaybeUninit<u64>` is considered a `Scalar` as far as its layout is concerned -- + // and yet cannot be represented by an interpreter `Scalar`, since we have to handle the + // case where some of the bytes are initialized and others are not. So, we need an extra + // check that walks over the type of `mplace` to make sure it is truly correct to treat this + // like a `Scalar` (or `ScalarPair`). + match layout.abi { + Abi::Scalar(s @ abi::Scalar::Initialized { .. }) => { + let size = s.size(bx); + assert_eq!(size, layout.size, "abi::Scalar size does not match layout size"); + let val = read_scalar(Size::ZERO, size, s, ty); + OperandRef { val: OperandValue::Immediate(val), layout } + } + Abi::ScalarPair( + a @ abi::Scalar::Initialized { .. }, + b @ abi::Scalar::Initialized { .. }, + ) => { + let (a_size, b_size) = (a.size(bx), b.size(bx)); + let b_offset = a_size.align_to(b.align(bx).abi); + assert!(b_offset.bytes() > 0); + let a_val = read_scalar( + Size::ZERO, + a_size, + a, + bx.scalar_pair_element_backend_type(layout, 0, true), + ); + let b_val = read_scalar( + b_offset, + b_size, + b, + bx.scalar_pair_element_backend_type(layout, 1, true), + ); + OperandRef { val: OperandValue::Pair(a_val, b_val), layout } + } + _ if layout.is_zst() => OperandRef::zero_sized(layout), + _ => { + // Neither a scalar nor scalar pair. Load from a place + let init = bx.const_data_from_alloc(alloc); + let base_addr = bx.static_addr_of(init, alloc_align, None); + + let llval = bx.const_ptr_byte_offset(base_addr, offset); + let llval = bx.const_bitcast(llval, ty); + bx.load_operand(PlaceRef::new_sized(llval, layout)) + } + } + } + /// Asserts that this operand refers to a scalar and returns /// a reference to its value. pub fn immediate(self) -> V { @@ -147,6 +218,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { OperandValue::Immediate(llptr) => (llptr, None), OperandValue::Pair(llptr, llextra) => (llptr, Some(llextra)), OperandValue::Ref(..) => bug!("Deref of by-Ref operand {:?}", self), + OperandValue::ZeroSized => bug!("Deref of ZST operand {:?}", self), }; let layout = cx.layout_of(projected_ty); PlaceRef { llval: llptr, llextra, layout, align: layout.align.abi } @@ -204,9 +276,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { let mut val = match (self.val, self.layout.abi) { // If the field is ZST, it has no data. - _ if field.is_zst() => { - return OperandRef::new_zst(bx, field); - } + _ if field.is_zst() => OperandValue::ZeroSized, // Newtype of a scalar, scalar pair or vector. (OperandValue::Immediate(_) | OperandValue::Pair(..), _) @@ -237,6 +307,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { }; match (&mut val, field.abi) { + (OperandValue::ZeroSized, _) => {} ( OperandValue::Immediate(llval), Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. }, @@ -290,8 +361,8 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { impl<'a, 'tcx, V: CodegenObject> OperandValue<V> { /// Returns an `OperandValue` that's generally UB to use in any way. /// - /// Depending on the `layout`, returns an `Immediate` or `Pair` containing - /// poison value(s), or a `Ref` containing a poison pointer. + /// Depending on the `layout`, returns `ZeroSized` for ZSTs, an `Immediate` or + /// `Pair` containing poison value(s), or a `Ref` containing a poison pointer. /// /// Supports sized types only. pub fn poison<Bx: BuilderMethods<'a, 'tcx, Value = V>>( @@ -299,7 +370,9 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> { layout: TyAndLayout<'tcx>, ) -> OperandValue<V> { assert!(layout.is_sized()); - if bx.cx().is_backend_immediate(layout) { + if layout.is_zst() { + OperandValue::ZeroSized + } else if bx.cx().is_backend_immediate(layout) { let ibty = bx.cx().immediate_backend_type(layout); OperandValue::Immediate(bx.const_poison(ibty)) } else if bx.cx().is_backend_scalar_pair(layout) { @@ -352,12 +425,11 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> { flags: MemFlags, ) { debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest); - // Avoid generating stores of zero-sized values, because the only way to have a zero-sized - // value is through `undef`, and store itself is useless. - if dest.layout.is_zst() { - return; - } match self { + OperandValue::ZeroSized => { + // Avoid generating stores of zero-sized values, because the only way to have a zero-sized + // value is through `undef`/`poison`, and the store itself is useless. + } OperandValue::Ref(r, None, source_align) => { if flags.contains(MemFlags::NONTEMPORAL) { // HACK(nox): This is inefficient but there is no nontemporal memcpy. @@ -458,7 +530,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // checks in `codegen_consume` and `extract_field`. let elem = o.layout.field(bx.cx(), 0); if elem.is_zst() { - o = OperandRef::new_zst(bx, elem); + o = OperandRef::zero_sized(elem); } else { return None; } @@ -492,7 +564,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // ZSTs don't require any actual memory access. if layout.is_zst() { - return OperandRef::new_zst(bx, layout); + return OperandRef::zero_sized(layout); } if let Some(o) = self.maybe_codegen_consume_direct(bx, place_ref) { diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 6e7065713b8..5241a5aee00 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -70,6 +70,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { OperandValue::Ref(_, Some(_), _) => { bug!("unsized coercion on an unsized rvalue"); } + OperandValue::ZeroSized => { + bug!("unsized coercion on a ZST rvalue"); + } } } @@ -165,11 +168,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } match src.val { - OperandValue::Ref(..) => { + OperandValue::Ref(..) | OperandValue::ZeroSized => { span_bug!( self.mir.span, "Operand path should have handled transmute \ - from `Ref` {src:?} to place {dst:?}" + from {src:?} to place {dst:?}" ); } OperandValue::Immediate(..) | OperandValue::Pair(..) => { @@ -220,17 +223,22 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let fake_place = PlaceRef::new_sized_aligned(cast_ptr, cast, align); Some(bx.load_operand(fake_place).val) } + OperandValue::ZeroSized => { + let OperandValueKind::ZeroSized = operand_kind else { + bug!("Found {operand_kind:?} for operand {operand:?}"); + }; + if let OperandValueKind::ZeroSized = cast_kind { + Some(OperandValue::ZeroSized) + } else { + None + } + } OperandValue::Immediate(imm) => { let OperandValueKind::Immediate(in_scalar) = operand_kind else { bug!("Found {operand_kind:?} for operand {operand:?}"); }; - if let OperandValueKind::Immediate(out_scalar) = cast_kind { - match (in_scalar, out_scalar) { - (ScalarOrZst::Zst, ScalarOrZst::Zst) => { - Some(OperandRef::new_zst(bx, cast).val) - } - (ScalarOrZst::Scalar(in_scalar), ScalarOrZst::Scalar(out_scalar)) - if in_scalar.size(self.cx) == out_scalar.size(self.cx) => + if let OperandValueKind::Immediate(out_scalar) = cast_kind + && in_scalar.size(self.cx) == out_scalar.size(self.cx) { let operand_bty = bx.backend_type(operand.layout); let cast_bty = bx.backend_type(cast); @@ -242,9 +250,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { out_scalar, cast_bty, ))) - } - _ => None, - } } else { None } @@ -457,6 +462,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { OperandValue::Ref(..) => { bug!("by-ref operand {:?} in `codegen_rvalue_operand`", operand); } + OperandValue::ZeroSized => { + bug!("zero-sized operand {:?} in `codegen_rvalue_operand`", operand); + } }; let (lldata, llextra) = base::unsize_ptr(bx, lldata, operand.layout.ty, cast.ty, llextra); @@ -490,6 +498,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { OperandValue::Ref(_, _, _) => todo!(), OperandValue::Immediate(v) => (v, None), OperandValue::Pair(v, l) => (v, Some(l)), + OperandValue::ZeroSized => bug!("ZST -- which is not PointerLike -- in DynStar"), }; let (lldata, llextra) = base::cast_to_dyn_star(bx, lldata, operand.layout, cast.ty, llextra); @@ -668,11 +677,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::Rvalue::NullaryOp(ref null_op, ty) => { let ty = self.monomorphize(ty); - assert!(bx.cx().type_is_sized(ty)); let layout = bx.cx().layout_of(ty); let val = match null_op { - mir::NullOp::SizeOf => layout.size.bytes(), - mir::NullOp::AlignOf => layout.align.abi.bytes(), + mir::NullOp::SizeOf => { + assert!(bx.cx().type_is_sized(ty)); + layout.size.bytes() + } + mir::NullOp::AlignOf => { + assert!(bx.cx().type_is_sized(ty)); + layout.align.abi.bytes() + } mir::NullOp::OffsetOf(fields) => { layout.offset_of_subfield(bx.cx(), fields.iter().map(|f| f.index())).bytes() } @@ -713,7 +727,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // According to `rvalue_creates_operand`, only ZST // aggregate rvalues are allowed to be operands. let ty = rvalue.ty(self.mir, self.cx.tcx()); - OperandRef::new_zst(bx, self.cx.layout_of(self.monomorphize(ty))) + OperandRef::zero_sized(self.cx.layout_of(self.monomorphize(ty))) } mir::Rvalue::ShallowInitBox(ref operand, content_ty) => { let operand = self.codegen_operand(bx, operand); @@ -931,6 +945,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // Can always load from a pointer as needed (OperandValueKind::Ref, _) => true, + // ZST-to-ZST is the easiest thing ever + (OperandValueKind::ZeroSized, OperandValueKind::ZeroSized) => true, + + // But if only one of them is a ZST the sizes can't match + (OperandValueKind::ZeroSized, _) | (_, OperandValueKind::ZeroSized) => false, + // Need to generate an `alloc` to get a pointer from an immediate (OperandValueKind::Immediate(..) | OperandValueKind::Pair(..), OperandValueKind::Ref) => false, @@ -974,12 +994,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { /// Gets which variant of [`OperandValue`] is expected for a particular type. fn value_kind(&self, layout: TyAndLayout<'tcx>) -> OperandValueKind { - if self.cx.is_backend_immediate(layout) { + if layout.is_zst() { + OperandValueKind::ZeroSized + } else if self.cx.is_backend_immediate(layout) { debug_assert!(!self.cx.is_backend_scalar_pair(layout)); OperandValueKind::Immediate(match layout.abi { - abi::Abi::Scalar(s) => ScalarOrZst::Scalar(s), - abi::Abi::Vector { element, .. } => ScalarOrZst::Scalar(element), - _ if layout.is_zst() => ScalarOrZst::Zst, + abi::Abi::Scalar(s) => s, + abi::Abi::Vector { element, .. } => element, x => span_bug!(self.mir.span, "Couldn't translate {x:?} as backend immediate"), }) } else if self.cx.is_backend_scalar_pair(layout) { @@ -1002,21 +1023,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { #[derive(Debug, Copy, Clone)] enum OperandValueKind { Ref, - Immediate(ScalarOrZst), + Immediate(abi::Scalar), Pair(abi::Scalar, abi::Scalar), -} - -#[derive(Debug, Copy, Clone)] -enum ScalarOrZst { - Zst, - Scalar(abi::Scalar), -} - -impl ScalarOrZst { - pub fn size(self, cx: &impl abi::HasDataLayout) -> abi::Size { - match self { - ScalarOrZst::Zst => abi::Size::ZERO, - ScalarOrZst::Scalar(s) => s.size(cx), - } - } + ZeroSized, } diff --git a/compiler/rustc_codegen_ssa/src/traits/consts.rs b/compiler/rustc_codegen_ssa/src/traits/consts.rs index 61906302779..dc2fc396480 100644 --- a/compiler/rustc_codegen_ssa/src/traits/consts.rs +++ b/compiler/rustc_codegen_ssa/src/traits/consts.rs @@ -1,8 +1,6 @@ use super::BackendTypes; -use crate::mir::place::PlaceRef; use rustc_middle::mir::interpret::{ConstAllocation, Scalar}; -use rustc_middle::ty::layout::TyAndLayout; -use rustc_target::abi::{self, Size}; +use rustc_target::abi; pub trait ConstMethods<'tcx>: BackendTypes { // Constant constructors @@ -30,12 +28,8 @@ pub trait ConstMethods<'tcx>: BackendTypes { fn const_data_from_alloc(&self, alloc: ConstAllocation<'tcx>) -> Self::Value; fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: Self::Type) -> Self::Value; - fn from_const_alloc( - &self, - layout: TyAndLayout<'tcx>, - alloc: ConstAllocation<'tcx>, - offset: Size, - ) -> PlaceRef<'tcx, Self::Value>; fn const_ptrcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value; + fn const_bitcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value; + fn const_ptr_byte_offset(&self, val: Self::Value, offset: abi::Size) -> Self::Value; } diff --git a/compiler/rustc_const_eval/src/const_eval/mod.rs b/compiler/rustc_const_eval/src/const_eval/mod.rs index 05be45fef13..b59ca8e2070 100644 --- a/compiler/rustc_const_eval/src/const_eval/mod.rs +++ b/compiler/rustc_const_eval/src/const_eval/mod.rs @@ -2,10 +2,8 @@ use crate::errors::MaxNumNodesInConstErr; use crate::interpret::{ - intern_const_alloc_recursive, ConstValue, InternKind, InterpCx, InterpResult, MemPlaceMeta, - Scalar, + intern_const_alloc_recursive, ConstValue, InternKind, InterpCx, InterpResult, Scalar, }; -use rustc_hir::Mutability; use rustc_middle::mir; use rustc_middle::mir::interpret::{EvalToValTreeResult, GlobalId}; use rustc_middle::ty::{self, TyCtxt}; @@ -131,38 +129,3 @@ pub(crate) fn try_destructure_mir_constant<'tcx>( Ok(mir::DestructuredConstant { variant, fields }) } - -#[instrument(skip(tcx), level = "debug")] -pub(crate) fn deref_mir_constant<'tcx>( - tcx: TyCtxt<'tcx>, - param_env: ty::ParamEnv<'tcx>, - val: mir::ConstantKind<'tcx>, -) -> mir::ConstantKind<'tcx> { - let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false); - let op = ecx.eval_mir_constant(&val, None, None).unwrap(); - let mplace = ecx.deref_operand(&op).unwrap(); - if let Some(alloc_id) = mplace.ptr.provenance { - assert_eq!( - tcx.global_alloc(alloc_id).unwrap_memory().0.0.mutability, - Mutability::Not, - "deref_mir_constant cannot be used with mutable allocations as \ - that could allow pattern matching to observe mutable statics", - ); - } - - let ty = match mplace.meta { - MemPlaceMeta::None => mplace.layout.ty, - // In case of unsized types, figure out the real type behind. - MemPlaceMeta::Meta(scalar) => match mplace.layout.ty.kind() { - ty::Str => bug!("there's no sized equivalent of a `str`"), - ty::Slice(elem_ty) => tcx.mk_array(*elem_ty, scalar.to_target_usize(&tcx).unwrap()), - _ => bug!( - "type {} should not have metadata, but had {:?}", - mplace.layout.ty, - mplace.meta - ), - }, - }; - - mir::ConstantKind::Val(op_to_const(&ecx, &mplace.into()), ty) -} diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs index 7e94578003e..91ccdef7215 100644 --- a/compiler/rustc_const_eval/src/interpret/eval_context.rs +++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs @@ -497,7 +497,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { .try_subst_mir_and_normalize_erasing_regions( *self.tcx, self.param_env, - ty::EarlyBinder(value), + ty::EarlyBinder::bind(value), ) .map_err(|_| err_inval!(TooGeneric)) } diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs index df387920010..586e8f063ee 100644 --- a/compiler/rustc_const_eval/src/interpret/terminator.rs +++ b/compiler/rustc_const_eval/src/interpret/terminator.rs @@ -114,7 +114,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } } - Drop { place, target, unwind } => { + Drop { place, target, unwind, replace: _ } => { let frame = self.frame(); let ty = place.ty(&frame.body.local_decls, *self.tcx).ty; let ty = self.subst_from_frame_and_normalize_erasing_regions(frame, ty)?; diff --git a/compiler/rustc_const_eval/src/lib.rs b/compiler/rustc_const_eval/src/lib.rs index c36282d5ed4..0c48d99915a 100644 --- a/compiler/rustc_const_eval/src/lib.rs +++ b/compiler/rustc_const_eval/src/lib.rs @@ -56,10 +56,6 @@ pub fn provide(providers: &mut Providers) { providers.valtree_to_const_val = |tcx, (ty, valtree)| { const_eval::valtree_to_const_value(tcx, ty::ParamEnv::empty().and(ty), valtree) }; - providers.deref_mir_constant = |tcx, param_env_and_value| { - let (param_env, value) = param_env_and_value.into_parts(); - const_eval::deref_mir_constant(tcx, param_env, value) - }; providers.check_validity_requirement = |tcx, (init_kind, param_env_and_ty)| { util::check_validity_requirement(tcx, init_kind, param_env_and_ty) }; diff --git a/compiler/rustc_const_eval/src/transform/check_consts/check.rs b/compiler/rustc_const_eval/src/transform/check_consts/check.rs index 138bc3eb74a..57d939747aa 100644 --- a/compiler/rustc_const_eval/src/transform/check_consts/check.rs +++ b/compiler/rustc_const_eval/src/transform/check_consts/check.rs @@ -412,9 +412,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> { BorrowKind::Shallow => { PlaceContext::NonMutatingUse(NonMutatingUseContext::ShallowBorrow) } - BorrowKind::Unique => { - PlaceContext::NonMutatingUse(NonMutatingUseContext::UniqueBorrow) - } + BorrowKind::Unique => PlaceContext::MutatingUse(MutatingUseContext::Borrow), BorrowKind::Mut { .. } => { PlaceContext::MutatingUse(MutatingUseContext::Borrow) } diff --git a/compiler/rustc_data_structures/src/sharded.rs b/compiler/rustc_data_structures/src/sharded.rs index 7ed70ba1e0f..40cbf14958e 100644 --- a/compiler/rustc_data_structures/src/sharded.rs +++ b/compiler/rustc_data_structures/src/sharded.rs @@ -1,4 +1,6 @@ use crate::fx::{FxHashMap, FxHasher}; +#[cfg(parallel_compiler)] +use crate::sync::is_dyn_thread_safe; use crate::sync::{CacheAligned, Lock, LockGuard}; use std::borrow::Borrow; use std::collections::hash_map::RawEntryMut; @@ -18,6 +20,11 @@ pub const SHARDS: usize = 1 << SHARD_BITS; /// An array of cache-line aligned inner locked structures with convenience methods. pub struct Sharded<T> { + /// This mask is used to ensure that accesses are inbounds of `shards`. + /// When dynamic thread safety is off, this field is set to 0 causing only + /// a single shard to be used for greater cache efficiency. + #[cfg(parallel_compiler)] + mask: usize, shards: [CacheAligned<Lock<T>>; SHARDS], } @@ -31,31 +38,54 @@ impl<T: Default> Default for Sharded<T> { impl<T> Sharded<T> { #[inline] pub fn new(mut value: impl FnMut() -> T) -> Self { - Sharded { shards: [(); SHARDS].map(|()| CacheAligned(Lock::new(value()))) } + Sharded { + #[cfg(parallel_compiler)] + mask: if is_dyn_thread_safe() { SHARDS - 1 } else { 0 }, + shards: [(); SHARDS].map(|()| CacheAligned(Lock::new(value()))), + } + } + + #[inline(always)] + fn mask(&self) -> usize { + #[cfg(parallel_compiler)] + { + if SHARDS == 1 { 0 } else { self.mask } + } + #[cfg(not(parallel_compiler))] + { + 0 + } + } + + #[inline(always)] + fn count(&self) -> usize { + // `self.mask` is always one below the used shard count + self.mask() + 1 } /// The shard is selected by hashing `val` with `FxHasher`. #[inline] pub fn get_shard_by_value<K: Hash + ?Sized>(&self, val: &K) -> &Lock<T> { - if SHARDS == 1 { &self.shards[0].0 } else { self.get_shard_by_hash(make_hash(val)) } + self.get_shard_by_hash(if SHARDS == 1 { 0 } else { make_hash(val) }) } #[inline] pub fn get_shard_by_hash(&self, hash: u64) -> &Lock<T> { - &self.shards[get_shard_index_by_hash(hash)].0 + self.get_shard_by_index(get_shard_hash(hash)) } #[inline] pub fn get_shard_by_index(&self, i: usize) -> &Lock<T> { - &self.shards[i].0 + // SAFETY: The index get ANDed with the mask, ensuring it is always inbounds. + unsafe { &self.shards.get_unchecked(i & self.mask()).0 } } pub fn lock_shards(&self) -> Vec<LockGuard<'_, T>> { - (0..SHARDS).map(|i| self.shards[i].0.lock()).collect() + (0..self.count()).map(|i| self.get_shard_by_index(i).lock()).collect() } pub fn try_lock_shards(&self) -> Option<Vec<LockGuard<'_, T>>> { - (0..SHARDS).map(|i| self.shards[i].0.try_lock()).collect() + (0..self.count()).map(|i| self.get_shard_by_index(i).try_lock()).collect() } } @@ -136,11 +166,9 @@ pub fn make_hash<K: Hash + ?Sized>(val: &K) -> u64 { /// `hash` can be computed with any hasher, so long as that hasher is used /// consistently for each `Sharded` instance. #[inline] -#[allow(clippy::modulo_one)] -pub fn get_shard_index_by_hash(hash: u64) -> usize { +fn get_shard_hash(hash: u64) -> usize { let hash_len = mem::size_of::<usize>(); // Ignore the top 7 bits as hashbrown uses these and get the next SHARD_BITS highest bits. // hashbrown also uses the lowest bits, so we can't use those - let bits = (hash >> (hash_len * 8 - 7 - SHARD_BITS)) as usize; - bits % SHARDS + (hash >> (hash_len * 8 - 7 - SHARD_BITS)) as usize } diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs index 6c3197d8ec2..25a08237346 100644 --- a/compiler/rustc_data_structures/src/sync.rs +++ b/compiler/rustc_data_structures/src/sync.rs @@ -139,9 +139,14 @@ cfg_if! { impl Atomic<bool> { pub fn fetch_or(&self, val: bool, _: Ordering) -> bool { - let result = self.0.get() | val; - self.0.set(val); - result + let old = self.0.get(); + self.0.set(val | old); + old + } + pub fn fetch_and(&self, val: bool, _: Ordering) -> bool { + let old = self.0.get(); + self.0.set(val & old); + old } } diff --git a/compiler/rustc_driver_impl/src/args.rs b/compiler/rustc_driver_impl/src/args.rs index a713affa099..eb92ccc17b2 100644 --- a/compiler/rustc_driver_impl/src/args.rs +++ b/compiler/rustc_driver_impl/src/args.rs @@ -18,6 +18,9 @@ fn arg_expand(arg: String) -> Result<Vec<String>, Error> { } } +/// **Note:** This function doesn't interpret argument 0 in any special way. +/// If this function is intended to be used with command line arguments, +/// `argv[0]` must be removed prior to calling it manually. pub fn arg_expand_all(at_args: &[String]) -> Vec<String> { let mut args = Vec::new(); for arg in at_args { diff --git a/compiler/rustc_driver_impl/src/lib.rs b/compiler/rustc_driver_impl/src/lib.rs index 40aa69e5a41..14888cf4d75 100644 --- a/compiler/rustc_driver_impl/src/lib.rs +++ b/compiler/rustc_driver_impl/src/lib.rs @@ -250,6 +250,16 @@ fn run_compiler( Box<dyn FnOnce(&config::Options) -> Box<dyn CodegenBackend> + Send>, >, ) -> interface::Result<()> { + // Throw away the first argument, the name of the binary. + // In case of at_args being empty, as might be the case by + // passing empty argument array to execve under some platforms, + // just use an empty slice. + // + // This situation was possible before due to arg_expand_all being + // called before removing the argument, enabling a crash by calling + // the compiler with @empty_file as argv[0] and no more arguments. + let at_args = at_args.get(1..).unwrap_or_default(); + let args = args::arg_expand_all(at_args); let Some(matches) = handle_options(&args) else { return Ok(()) }; @@ -1074,9 +1084,6 @@ fn print_flag_list<T>( /// So with all that in mind, the comments below have some more detail about the /// contortions done here to get things to work out correctly. pub fn handle_options(args: &[String]) -> Option<getopts::Matches> { - // Throw away the first argument, the name of the binary - let args = &args[1..]; - if args.is_empty() { // user did not write `-v` nor `-Z unstable-options`, so do not // include that extra information. @@ -1251,7 +1258,7 @@ pub fn install_ice_hook(bug_report_url: &'static str, extra_info: fn(&Handler)) if let Some(msg) = info.payload().downcast_ref::<String>() { if msg.starts_with("failed printing to stdout: ") && msg.ends_with("(os error 232)") { // the error code is already going to be reported when the panic unwinds up the stack - let _ = early_error_no_abort(ErrorOutputType::default(), msg.as_str()); + let _ = early_error_no_abort(ErrorOutputType::default(), msg.clone()); return; } }; diff --git a/compiler/rustc_error_codes/src/error_codes/E0133.md b/compiler/rustc_error_codes/src/error_codes/E0133.md index 1adbcc31356..8ca3f03ce15 100644 --- a/compiler/rustc_error_codes/src/error_codes/E0133.md +++ b/compiler/rustc_error_codes/src/error_codes/E0133.md @@ -1,4 +1,4 @@ -Unsafe code was used outside of an unsafe function or block. +Unsafe code was used outside of an unsafe block. Erroneous code example: @@ -30,4 +30,21 @@ fn main() { See the [unsafe section][unsafe-section] of the Book for more details. +#### Unsafe code in functions + +Unsafe code is currently accepted in unsafe functions, but that is being phased +out in favor of requiring unsafe blocks here too. + +``` +unsafe fn f() { return; } + +unsafe fn g() { + f(); // Is accepted, but no longer recommended + unsafe { f(); } // Recommended way to write this +} +``` + +Linting against this is controlled via the `unsafe_op_in_unsafe_fn` lint, which +is `allow` by default but will be upgraded to `warn` in a future edition. + [unsafe-section]: https://doc.rust-lang.org/book/ch19-01-unsafe-rust.html diff --git a/compiler/rustc_error_messages/src/lib.rs b/compiler/rustc_error_messages/src/lib.rs index 0accb4ab96f..2a97c4ff7ae 100644 --- a/compiler/rustc_error_messages/src/lib.rs +++ b/compiler/rustc_error_messages/src/lib.rs @@ -263,8 +263,7 @@ type FluentId = Cow<'static, str>; #[rustc_diagnostic_item = "SubdiagnosticMessage"] pub enum SubdiagnosticMessage { /// Non-translatable diagnostic message. - // FIXME(davidtwco): can a `Cow<'static, str>` be used here? - Str(String), + Str(Cow<'static, str>), /// Translatable message which has already been translated eagerly. /// /// Some diagnostics have repeated subdiagnostics where the same interpolated variables would @@ -275,8 +274,7 @@ pub enum SubdiagnosticMessage { /// incorrect diagnostics. Eager translation results in translation for a subdiagnostic /// happening immediately after the subdiagnostic derive's logic has been run. This variant /// stores messages which have been translated eagerly. - // FIXME(#100717): can a `Cow<'static, str>` be used here? - Eager(String), + Eager(Cow<'static, str>), /// Identifier of a Fluent message. Instances of this variant are generated by the /// `Subdiagnostic` derive. FluentIdentifier(FluentId), @@ -290,17 +288,17 @@ pub enum SubdiagnosticMessage { impl From<String> for SubdiagnosticMessage { fn from(s: String) -> Self { - SubdiagnosticMessage::Str(s) + SubdiagnosticMessage::Str(Cow::Owned(s)) } } -impl<'a> From<&'a str> for SubdiagnosticMessage { - fn from(s: &'a str) -> Self { - SubdiagnosticMessage::Str(s.to_string()) +impl From<&'static str> for SubdiagnosticMessage { + fn from(s: &'static str) -> Self { + SubdiagnosticMessage::Str(Cow::Borrowed(s)) } } impl From<Cow<'static, str>> for SubdiagnosticMessage { fn from(s: Cow<'static, str>) -> Self { - SubdiagnosticMessage::Str(s.to_string()) + SubdiagnosticMessage::Str(s) } } @@ -312,8 +310,7 @@ impl From<Cow<'static, str>> for SubdiagnosticMessage { #[rustc_diagnostic_item = "DiagnosticMessage"] pub enum DiagnosticMessage { /// Non-translatable diagnostic message. - // FIXME(#100717): can a `Cow<'static, str>` be used here? - Str(String), + Str(Cow<'static, str>), /// Translatable message which has already been translated eagerly. /// /// Some diagnostics have repeated subdiagnostics where the same interpolated variables would @@ -324,8 +321,7 @@ pub enum DiagnosticMessage { /// incorrect diagnostics. Eager translation results in translation for a subdiagnostic /// happening immediately after the subdiagnostic derive's logic has been run. This variant /// stores messages which have been translated eagerly. - // FIXME(#100717): can a `Cow<'static, str>` be used here? - Eager(String), + Eager(Cow<'static, str>), /// Identifier for a Fluent message (with optional attribute) corresponding to the diagnostic /// message. /// @@ -363,17 +359,17 @@ impl DiagnosticMessage { impl From<String> for DiagnosticMessage { fn from(s: String) -> Self { - DiagnosticMessage::Str(s) + DiagnosticMessage::Str(Cow::Owned(s)) } } -impl<'a> From<&'a str> for DiagnosticMessage { - fn from(s: &'a str) -> Self { - DiagnosticMessage::Str(s.to_string()) +impl From<&'static str> for DiagnosticMessage { + fn from(s: &'static str) -> Self { + DiagnosticMessage::Str(Cow::Borrowed(s)) } } impl From<Cow<'static, str>> for DiagnosticMessage { fn from(s: Cow<'static, str>) -> Self { - DiagnosticMessage::Str(s.to_string()) + DiagnosticMessage::Str(s) } } diff --git a/compiler/rustc_errors/src/diagnostic.rs b/compiler/rustc_errors/src/diagnostic.rs index 29c692128bc..488f2d67ee5 100644 --- a/compiler/rustc_errors/src/diagnostic.rs +++ b/compiler/rustc_errors/src/diagnostic.rs @@ -352,14 +352,9 @@ impl Diagnostic { /// Labels all the given spans with the provided label. /// See [`Self::span_label()`] for more information. - pub fn span_labels( - &mut self, - spans: impl IntoIterator<Item = Span>, - label: impl AsRef<str>, - ) -> &mut Self { - let label = label.as_ref(); + pub fn span_labels(&mut self, spans: impl IntoIterator<Item = Span>, label: &str) -> &mut Self { for span in spans { - self.span_label(span, label); + self.span_label(span, label.to_string()); } self } @@ -394,17 +389,18 @@ impl Diagnostic { expected: DiagnosticStyledString, found: DiagnosticStyledString, ) -> &mut Self { - let mut msg: Vec<_> = vec![("required when trying to coerce from type `", Style::NoStyle)]; + let mut msg: Vec<_> = + vec![(Cow::from("required when trying to coerce from type `"), Style::NoStyle)]; msg.extend(expected.0.iter().map(|x| match *x { - StringPart::Normal(ref s) => (s.as_str(), Style::NoStyle), - StringPart::Highlighted(ref s) => (s.as_str(), Style::Highlight), + StringPart::Normal(ref s) => (Cow::from(s.clone()), Style::NoStyle), + StringPart::Highlighted(ref s) => (Cow::from(s.clone()), Style::Highlight), })); - msg.push(("` to type '", Style::NoStyle)); + msg.push((Cow::from("` to type '"), Style::NoStyle)); msg.extend(found.0.iter().map(|x| match *x { - StringPart::Normal(ref s) => (s.as_str(), Style::NoStyle), - StringPart::Highlighted(ref s) => (s.as_str(), Style::Highlight), + StringPart::Normal(ref s) => (Cow::from(s.clone()), Style::NoStyle), + StringPart::Highlighted(ref s) => (Cow::from(s.clone()), Style::Highlight), })); - msg.push(("`", Style::NoStyle)); + msg.push((Cow::from("`"), Style::NoStyle)); // For now, just attach these as notes self.highlighted_note(msg); diff --git a/compiler/rustc_errors/src/diagnostic_builder.rs b/compiler/rustc_errors/src/diagnostic_builder.rs index db97d96fccd..7d9d0c76450 100644 --- a/compiler/rustc_errors/src/diagnostic_builder.rs +++ b/compiler/rustc_errors/src/diagnostic_builder.rs @@ -558,7 +558,7 @@ impl<'a, G: EmissionGuarantee> DiagnosticBuilder<'a, G> { } // Take the `Diagnostic` by replacing it with a dummy. - let dummy = Diagnostic::new(Level::Allow, DiagnosticMessage::Str("".to_string())); + let dummy = Diagnostic::new(Level::Allow, DiagnosticMessage::from("")); let diagnostic = std::mem::replace(&mut *self.inner.diagnostic, dummy); // Disable the ICE on `Drop`. @@ -627,7 +627,7 @@ impl<'a, G: EmissionGuarantee> DiagnosticBuilder<'a, G> { pub fn span_labels( &mut self, spans: impl IntoIterator<Item = Span>, - label: impl AsRef<str>, + label: &str, ) -> &mut Self); forward!(pub fn note_expected_found( @@ -781,8 +781,8 @@ impl Drop for DiagnosticBuilderInner<'_> { if !panicking() { handler.emit_diagnostic(&mut Diagnostic::new( Level::Bug, - DiagnosticMessage::Str( - "the following error was constructed but not emitted".to_string(), + DiagnosticMessage::from( + "the following error was constructed but not emitted", ), )); handler.emit_diagnostic(&mut self.diagnostic); diff --git a/compiler/rustc_errors/src/emitter.rs b/compiler/rustc_errors/src/emitter.rs index e8cd7eaa60f..d8c997b49a1 100644 --- a/compiler/rustc_errors/src/emitter.rs +++ b/compiler/rustc_errors/src/emitter.rs @@ -367,7 +367,7 @@ pub trait Emitter: Translate { children.push(SubDiagnostic { level: Level::Note, - message: vec![(DiagnosticMessage::Str(msg), Style::NoStyle)], + message: vec![(DiagnosticMessage::from(msg), Style::NoStyle)], span: MultiSpan::new(), render_span: None, }); diff --git a/compiler/rustc_errors/src/lib.rs b/compiler/rustc_errors/src/lib.rs index 3dec0d9299c..6c5f3e62454 100644 --- a/compiler/rustc_errors/src/lib.rs +++ b/compiler/rustc_errors/src/lib.rs @@ -628,7 +628,7 @@ impl Handler { message: DiagnosticMessage, args: impl Iterator<Item = DiagnosticArg<'a, 'static>>, ) -> SubdiagnosticMessage { - SubdiagnosticMessage::Eager(self.eagerly_translate_to_string(message, args)) + SubdiagnosticMessage::Eager(Cow::from(self.eagerly_translate_to_string(message, args))) } /// Translate `message` eagerly with `args` to `String`. @@ -1450,14 +1450,14 @@ impl HandlerInner { self.emit_stashed_diagnostics(); let warnings = match self.deduplicated_warn_count { - 0 => String::new(), - 1 => "1 warning emitted".to_string(), - count => format!("{count} warnings emitted"), + 0 => Cow::from(""), + 1 => Cow::from("1 warning emitted"), + count => Cow::from(format!("{count} warnings emitted")), }; let errors = match self.deduplicated_err_count { - 0 => String::new(), - 1 => "aborting due to previous error".to_string(), - count => format!("aborting due to {count} previous errors"), + 0 => Cow::from(""), + 1 => Cow::from("aborting due to previous error"), + count => Cow::from(format!("aborting due to {count} previous errors")), }; if self.treat_err_as_bug() { return; diff --git a/compiler/rustc_expand/src/base.rs b/compiler/rustc_expand/src/base.rs index 4671adccc54..0d43b30474b 100644 --- a/compiler/rustc_expand/src/base.rs +++ b/compiler/rustc_expand/src/base.rs @@ -1154,7 +1154,7 @@ impl<'a> ExtCtxt<'a> { // Fixme: does this result in errors? self.expansions.clear(); } - pub fn bug(&self, msg: &str) -> ! { + pub fn bug(&self, msg: &'static str) -> ! { self.sess.parse_sess.span_diagnostic.bug(msg); } pub fn trace_macros(&self) -> bool { @@ -1224,7 +1224,7 @@ pub fn resolve_path( pub fn expr_to_spanned_string<'a>( cx: &'a mut ExtCtxt<'_>, expr: P<ast::Expr>, - err_msg: &str, + err_msg: &'static str, ) -> Result<(Symbol, ast::StrStyle, Span), Option<(DiagnosticBuilder<'a, ErrorGuaranteed>, bool)>> { // Perform eager expansion on the expression. // We want to be able to handle e.g., `concat!("foo", "bar")`. @@ -1262,7 +1262,7 @@ pub fn expr_to_spanned_string<'a>( pub fn expr_to_string( cx: &mut ExtCtxt<'_>, expr: P<ast::Expr>, - err_msg: &str, + err_msg: &'static str, ) -> Option<(Symbol, ast::StrStyle)> { expr_to_spanned_string(cx, expr, err_msg) .map_err(|err| { diff --git a/compiler/rustc_expand/src/mbe/diagnostics.rs b/compiler/rustc_expand/src/mbe/diagnostics.rs index cb8b4899e48..3593bed2d02 100644 --- a/compiler/rustc_expand/src/mbe/diagnostics.rs +++ b/compiler/rustc_expand/src/mbe/diagnostics.rs @@ -170,7 +170,7 @@ impl<'a, 'cx, 'matcher> Tracker<'matcher> for CollectTrackerAndEmitter<'a, 'cx, } Error(err_sp, msg) => { let span = err_sp.substitute_dummy(self.root_span); - self.cx.struct_span_err(span, msg.as_str()).emit(); + self.cx.struct_span_err(span, msg.clone()).emit(); self.result = Some(DummyResult::any(span)); } ErrorReported(_) => self.result = Some(DummyResult::any(self.root_span)), @@ -222,7 +222,7 @@ pub(super) fn emit_frag_parse_err( { let msg = &e.message[0]; e.message[0] = ( - DiagnosticMessage::Str(format!( + DiagnosticMessage::from(format!( "macro expansion ends with an incomplete expression: {}", message.replace(", found `<eof>`", ""), )), @@ -313,9 +313,9 @@ pub(super) fn annotate_doc_comment(err: &mut Diagnostic, sm: &SourceMap, span: S /// Generates an appropriate parsing failure message. For EOF, this is "unexpected end...". For /// other tokens, this is "unexpected token...". -pub(super) fn parse_failure_msg(tok: &Token) -> String { +pub(super) fn parse_failure_msg(tok: &Token) -> Cow<'static, str> { match tok.kind { - token::Eof => "unexpected end of macro invocation".to_string(), - _ => format!("no rules expected the token `{}`", pprust::token_to_string(tok),), + token::Eof => Cow::from("unexpected end of macro invocation"), + _ => Cow::from(format!("no rules expected the token `{}`", pprust::token_to_string(tok))), } } diff --git a/compiler/rustc_feature/src/accepted.rs b/compiler/rustc_feature/src/accepted.rs index 5b2e4d15dfe..0170d52e82a 100644 --- a/compiler/rustc_feature/src/accepted.rs +++ b/compiler/rustc_feature/src/accepted.rs @@ -131,7 +131,7 @@ declare_features! ( /// Allows `crate` in paths. (accepted, crate_in_paths, "1.30.0", Some(45477), None), /// Allows using `#[debugger_visualizer]` attribute. - (accepted, debugger_visualizer, "CURRENT_RUSTC_VERSION", Some(95939), None), + (accepted, debugger_visualizer, "1.71.0", Some(95939), None), /// Allows rustc to inject a default alloc_error_handler (accepted, default_alloc_error_handler, "1.68.0", Some(66741), None), /// Allows using assigning a default type to type parameters in algebraic data type definitions. @@ -281,7 +281,7 @@ declare_features! ( /// Allows use of the postfix `?` operator in expressions. (accepted, question_mark, "1.13.0", Some(31436), None), /// Allows the use of raw-dylibs (RFC 2627). - (accepted, raw_dylib, "CURRENT_RUSTC_VERSION", Some(58713), None), + (accepted, raw_dylib, "1.71.0", Some(58713), None), /// Allows keywords to be escaped for use as identifiers. (accepted, raw_identifiers, "1.30.0", Some(48589), None), /// Allows relaxing the coherence rules such that diff --git a/compiler/rustc_feature/src/active.rs b/compiler/rustc_feature/src/active.rs index 57e55752027..dde9890dfa5 100644 --- a/compiler/rustc_feature/src/active.rs +++ b/compiler/rustc_feature/src/active.rs @@ -165,7 +165,7 @@ declare_features! ( /// Allows the `multiple_supertrait_upcastable` lint. (active, multiple_supertrait_upcastable, "1.69.0", None, None), /// Allow negative trait bounds. This is an internal-only feature for testing the trait solver! - (incomplete, negative_bounds, "CURRENT_RUSTC_VERSION", None, None), + (incomplete, negative_bounds, "1.71.0", None, None), /// Allows using `#[omit_gdb_pretty_printer_section]`. (active, omit_gdb_pretty_printer_section, "1.5.0", None, None), /// Allows using `#[prelude_import]` on glob `use` items. @@ -314,15 +314,15 @@ declare_features! ( /// Allows async functions to be declared, implemented, and used in traits. (active, async_fn_in_trait, "1.66.0", Some(91611), None), /// Allows builtin # foo() syntax - (active, builtin_syntax, "CURRENT_RUSTC_VERSION", Some(110680), None), + (active, builtin_syntax, "1.71.0", Some(110680), None), /// Allows `c"foo"` literals. - (active, c_str_literals, "CURRENT_RUSTC_VERSION", Some(105723), None), + (active, c_str_literals, "1.71.0", Some(105723), None), /// Treat `extern "C"` function as nounwind. (active, c_unwind, "1.52.0", Some(74990), None), /// Allows using C-variadics. (active, c_variadic, "1.34.0", Some(44930), None), /// Allows the use of `#[cfg(overflow_checks)` to check if integer overflow behaviour. - (active, cfg_overflow_checks, "CURRENT_RUSTC_VERSION", Some(111466), None), + (active, cfg_overflow_checks, "1.71.0", Some(111466), None), /// Allows the use of `#[cfg(sanitize = "option")]`; set when -Zsanitizer is used. (active, cfg_sanitize, "1.41.0", Some(39699), None), /// Allows `cfg(target_abi = "...")`. @@ -338,7 +338,7 @@ declare_features! ( /// Allow conditional compilation depending on rust version (active, cfg_version, "1.45.0", Some(64796), None), /// Allows to use the `#[cfi_encoding = ""]` attribute. - (active, cfi_encoding, "CURRENT_RUSTC_VERSION", Some(89653), None), + (active, cfi_encoding, "1.71.0", Some(89653), None), /// Allows `for<...>` on closures and generators. (active, closure_lifetime_binder, "1.64.0", Some(97362), None), /// Allows `#[track_caller]` on closures and generators. diff --git a/compiler/rustc_hir_analysis/src/astconv/mod.rs b/compiler/rustc_hir_analysis/src/astconv/mod.rs index 5fb06cf9465..3d78ea9aa9b 100644 --- a/compiler/rustc_hir_analysis/src/astconv/mod.rs +++ b/compiler/rustc_hir_analysis/src/astconv/mod.rs @@ -239,7 +239,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { var: ty::BoundVar::from_u32(index), kind: ty::BrNamed(def_id, name), }; - tcx.mk_re_late_bound(debruijn, br) + ty::Region::new_late_bound(tcx, debruijn, br) } Some(rbv::ResolvedArg::EarlyBound(def_id)) => { @@ -247,12 +247,12 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { let item_def_id = tcx.hir().ty_param_owner(def_id.expect_local()); let generics = tcx.generics_of(item_def_id); let index = generics.param_def_id_to_index[&def_id]; - tcx.mk_re_early_bound(ty::EarlyBoundRegion { def_id, index, name }) + ty::Region::new_early_bound(tcx, ty::EarlyBoundRegion { def_id, index, name }) } Some(rbv::ResolvedArg::Free(scope, id)) => { let name = lifetime_name(id.expect_local()); - tcx.mk_re_free(scope, ty::BrNamed(id, name)) + ty::Region::new_free(tcx, scope, ty::BrNamed(id, name)) // (*) -- not late-bound, won't change } @@ -269,7 +269,8 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { // elision. `resolve_lifetime` should have // reported an error in this case -- but if // not, let's error out. - tcx.mk_re_error_with_message( + ty::Region::new_error_with_message( + tcx, lifetime.ident.span, "unelided lifetime in signature", ) @@ -485,7 +486,8 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { debug!(?param, "unelided lifetime in signature"); // This indicates an illegal lifetime in a non-assoc-trait position - tcx.mk_re_error_with_message( + ty::Region::new_error_with_message( + tcx, self.span, "unelided lifetime in signature", ) @@ -1159,7 +1161,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { // those that do. self.one_bound_for_assoc_type( || traits::supertraits(tcx, trait_ref), - trait_ref.print_only_trait_path(), + trait_ref.skip_binder().print_only_trait_name(), binding.item_name, path_span, match binding.kind { @@ -1219,15 +1221,15 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { let substs = candidate.skip_binder().substs.extend_to(tcx, assoc_item.def_id, |param, _| { let subst = match param.kind { - GenericParamDefKind::Lifetime => tcx - .mk_re_late_bound( - ty::INNERMOST, - ty::BoundRegion { - var: ty::BoundVar::from_usize(num_bound_vars), - kind: ty::BoundRegionKind::BrNamed(param.def_id, param.name), - }, - ) - .into(), + GenericParamDefKind::Lifetime => ty::Region::new_late_bound( + tcx, + ty::INNERMOST, + ty::BoundRegion { + var: ty::BoundVar::from_usize(num_bound_vars), + kind: ty::BoundRegionKind::BrNamed(param.def_id, param.name), + }, + ) + .into(), GenericParamDefKind::Type { .. } => tcx .mk_bound( ty::INNERMOST, @@ -1278,7 +1280,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { // params (and trait ref's late bound params). This logic is very similar to // `Predicate::subst_supertrait`, and it's no coincidence why. let shifted_output = tcx.shift_bound_var_indices(num_bound_vars, output); - let subst_output = ty::EarlyBinder(shifted_output).subst(tcx, substs); + let subst_output = ty::EarlyBinder::bind(shifted_output).subst(tcx, substs); let bound_vars = tcx.late_bound_vars(binding.hir_id); ty::Binder::bind_with_vars(subst_output, bound_vars) @@ -1804,7 +1806,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { } else { err.emit() }; - tcx.mk_re_error(e) + ty::Region::new_error(tcx, e) }) } }) diff --git a/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs b/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs index 8bf1e0e84a4..31b89525f15 100644 --- a/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs +++ b/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs @@ -25,6 +25,7 @@ use rustc_trait_selection::traits::outlives_bounds::InferCtxtExt as _; use rustc_trait_selection::traits::{ self, ObligationCause, ObligationCauseCode, ObligationCtxt, Reveal, }; +use std::borrow::Cow; use std::iter; /// Checks that a method from an impl conforms to the signature of @@ -471,7 +472,8 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for RemapLateBound<'_, 'tcx> { fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> { if let ty::ReFree(fr) = *r { - self.tcx.mk_re_free( + ty::Region::new_free( + self.tcx, fr.scope, self.mapping.get(&fr.bound_region).copied().unwrap_or(fr.bound_region), ) @@ -684,7 +686,7 @@ pub(super) fn collect_return_position_impl_trait_in_trait_tys<'tcx>( &cause, hir.get_if_local(impl_m.def_id) .and_then(|node| node.fn_decl()) - .map(|decl| (decl.output.span(), "return type in trait".to_owned())), + .map(|decl| (decl.output.span(), Cow::from("return type in trait"))), Some(infer::ValuePairs::Terms(ExpectedFound { expected: trait_return_ty.into(), found: impl_return_ty.into(), @@ -785,23 +787,23 @@ pub(super) fn collect_return_position_impl_trait_in_trait_tys<'tcx>( } let Some(ty::ReEarlyBound(e)) = map.get(®ion.into()).map(|r| r.expect_region().kind()) else { - return tcx.mk_re_error_with_message(return_span, "expected ReFree to map to ReEarlyBound") + return ty::Region::new_error_with_message(tcx, return_span, "expected ReFree to map to ReEarlyBound") }; - tcx.mk_re_early_bound(ty::EarlyBoundRegion { + ty::Region::new_early_bound(tcx, ty::EarlyBoundRegion { def_id: e.def_id, name: e.name, index: (e.index as usize - num_trait_substs + num_impl_substs) as u32, }) }); debug!(%ty); - collected_tys.insert(def_id, ty::EarlyBinder(ty)); + collected_tys.insert(def_id, ty::EarlyBinder::bind(ty)); } Err(err) => { let reported = tcx.sess.delay_span_bug( return_span, format!("could not fully resolve: {ty} => {err:?}"), ); - collected_tys.insert(def_id, ty::EarlyBinder(tcx.ty_error(reported))); + collected_tys.insert(def_id, ty::EarlyBinder::bind(tcx.ty_error(reported))); } } } @@ -901,7 +903,7 @@ fn report_trait_method_mismatch<'tcx>( if trait_m.fn_has_self_parameter => { let ty = trait_sig.inputs()[0]; - let sugg = match ExplicitSelf::determine(ty, |_| ty == impl_trait_ref.self_ty()) { + let sugg = match ExplicitSelf::determine(ty, |ty| ty == impl_trait_ref.self_ty()) { ExplicitSelf::ByValue => "self".to_owned(), ExplicitSelf::ByReference(_, hir::Mutability::Not) => "&self".to_owned(), ExplicitSelf::ByReference(_, hir::Mutability::Mut) => "&mut self".to_owned(), @@ -963,7 +965,7 @@ fn report_trait_method_mismatch<'tcx>( infcx.err_ctxt().note_type_err( &mut diag, &cause, - trait_err_span.map(|sp| (sp, "type in trait".to_owned())), + trait_err_span.map(|sp| (sp, Cow::from("type in trait"))), Some(infer::ValuePairs::Sigs(ExpectedFound { expected: trait_sig, found: impl_sig })), terr, false, @@ -1731,7 +1733,7 @@ pub(super) fn compare_impl_const_raw( infcx.err_ctxt().note_type_err( &mut diag, &cause, - trait_c_span.map(|span| (span, "type in trait".to_owned())), + trait_c_span.map(|span| (span, Cow::from("type in trait"))), Some(infer::ValuePairs::Terms(ExpectedFound { expected: trait_ty.into(), found: impl_ty.into(), @@ -1932,7 +1934,8 @@ pub(super) fn check_type_bounds<'tcx>( let kind = ty::BoundRegionKind::BrNamed(param.def_id, param.name); let bound_var = ty::BoundVariableKind::Region(kind); bound_vars.push(bound_var); - tcx.mk_re_late_bound( + ty::Region::new_late_bound( + tcx, ty::INNERMOST, ty::BoundRegion { var: ty::BoundVar::from_usize(bound_vars.len() - 1), kind }, ) diff --git a/compiler/rustc_hir_analysis/src/check/dropck.rs b/compiler/rustc_hir_analysis/src/check/dropck.rs index e0ba255cc06..13d1abe2a65 100644 --- a/compiler/rustc_hir_analysis/src/check/dropck.rs +++ b/compiler/rustc_hir_analysis/src/check/dropck.rs @@ -128,7 +128,7 @@ fn ensure_drop_predicates_are_implied_by_item_defn<'tcx>( // We don't need to normalize this param-env or anything, since we're only // substituting it with free params, so no additional param-env normalization // can occur on top of what has been done in the param_env query itself. - let param_env = ty::EarlyBinder(tcx.param_env(adt_def_id)) + let param_env = ty::EarlyBinder::bind(tcx.param_env(adt_def_id)) .subst(tcx, adt_to_impl_substs) .with_constness(tcx.constness(drop_impl_def_id)); @@ -183,7 +183,7 @@ fn ensure_drop_predicates_are_implied_by_item_defn<'tcx>( } RegionResolutionError::SubSupConflict(_, _, _, a, _, b, _) => format!("{b}: {a}"), RegionResolutionError::UpperBoundUniverseConflict(a, _, _, _, b) => { - format!("{b}: {a}", a = tcx.mk_re_var(a)) + format!("{b}: {a}", a = ty::Region::new_var(tcx, a)) } }; guar = Some( diff --git a/compiler/rustc_hir_analysis/src/check/intrinsic.rs b/compiler/rustc_hir_analysis/src/check/intrinsic.rs index e8785235c83..1f18017f00b 100644 --- a/compiler/rustc_hir_analysis/src/check/intrinsic.rs +++ b/compiler/rustc_hir_analysis/src/check/intrinsic.rs @@ -121,10 +121,11 @@ pub fn intrinsic_operation_unsafety(tcx: TyCtxt<'_>, intrinsic_id: DefId) -> hir if has_safe_attr != is_in_list { tcx.sess.struct_span_err( tcx.def_span(intrinsic_id), - DiagnosticMessage::Str(format!( - "intrinsic safety mismatch between list of intrinsics within the compiler and core library intrinsics for intrinsic `{}`", - tcx.item_name(intrinsic_id) - ))).emit(); + DiagnosticMessage::from(format!( + "intrinsic safety mismatch between list of intrinsics within the compiler and core library intrinsics for intrinsic `{}`", + tcx.item_name(intrinsic_id) + ) + )).emit(); } is_in_list @@ -144,11 +145,13 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) { ]); let mk_va_list_ty = |mutbl| { tcx.lang_items().va_list().map(|did| { - let region = tcx.mk_re_late_bound( + let region = ty::Region::new_late_bound( + tcx, ty::INNERMOST, ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon(None) }, ); - let env_region = tcx.mk_re_late_bound( + let env_region = ty::Region::new_late_bound( + tcx, ty::INNERMOST, ty::BoundRegion { var: ty::BoundVar::from_u32(1), kind: ty::BrEnv }, ); @@ -392,7 +395,12 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) { let br = ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon(None) }; ( 1, - vec![tcx.mk_imm_ref(tcx.mk_re_late_bound(ty::INNERMOST, br), param(0))], + vec![ + tcx.mk_imm_ref( + ty::Region::new_late_bound(tcx, ty::INNERMOST, br), + param(0), + ), + ], tcx.mk_projection(discriminant_def_id, tcx.mk_substs(&[param(0).into()])), ) } @@ -442,7 +450,8 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) { sym::raw_eq => { let br = ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon(None) }; - let param_ty = tcx.mk_imm_ref(tcx.mk_re_late_bound(ty::INNERMOST, br), param(0)); + let param_ty = + tcx.mk_imm_ref(ty::Region::new_late_bound(tcx, ty::INNERMOST, br), param(0)); (1, vec![param_ty; 2], tcx.types.bool) } diff --git a/compiler/rustc_hir_analysis/src/check/wfcheck.rs b/compiler/rustc_hir_analysis/src/check/wfcheck.rs index b403ee96b42..fff417fcb29 100644 --- a/compiler/rustc_hir_analysis/src/check/wfcheck.rs +++ b/compiler/rustc_hir_analysis/src/check/wfcheck.rs @@ -556,11 +556,14 @@ fn gather_gat_bounds<'tcx, T: TypeFoldable<TyCtxt<'tcx>>>( // Same for the region. In our example, 'a corresponds // to the 'me parameter. let region_param = gat_generics.param_at(*region_a_idx, tcx); - let region_param = tcx.mk_re_early_bound(ty::EarlyBoundRegion { - def_id: region_param.def_id, - index: region_param.index, - name: region_param.name, - }); + let region_param = ty::Region::new_early_bound( + tcx, + ty::EarlyBoundRegion { + def_id: region_param.def_id, + index: region_param.index, + name: region_param.name, + }, + ); // The predicate we expect to see. (In our example, // `Self: 'me`.) let clause = ty::PredicateKind::Clause(ty::Clause::TypeOutlives( @@ -593,18 +596,24 @@ fn gather_gat_bounds<'tcx, T: TypeFoldable<TyCtxt<'tcx>>>( debug!("required clause: {region_a} must outlive {region_b}"); // Translate into the generic parameters of the GAT. let region_a_param = gat_generics.param_at(*region_a_idx, tcx); - let region_a_param = tcx.mk_re_early_bound(ty::EarlyBoundRegion { - def_id: region_a_param.def_id, - index: region_a_param.index, - name: region_a_param.name, - }); + let region_a_param = ty::Region::new_early_bound( + tcx, + ty::EarlyBoundRegion { + def_id: region_a_param.def_id, + index: region_a_param.index, + name: region_a_param.name, + }, + ); // Same for the region. let region_b_param = gat_generics.param_at(*region_b_idx, tcx); - let region_b_param = tcx.mk_re_early_bound(ty::EarlyBoundRegion { - def_id: region_b_param.def_id, - index: region_b_param.index, - name: region_b_param.name, - }); + let region_b_param = ty::Region::new_early_bound( + tcx, + ty::EarlyBoundRegion { + def_id: region_b_param.def_id, + index: region_b_param.index, + name: region_b_param.name, + }, + ); // The predicate we expect to see. let clause = ty::PredicateKind::Clause(ty::Clause::RegionOutlives( ty::OutlivesPredicate(region_a_param, region_b_param), @@ -1398,7 +1407,7 @@ fn check_where_clauses<'tcx>(wfcx: &WfCheckingCtxt<'_, 'tcx>, span: Span, def_id } let mut param_count = CountParams::default(); let has_region = pred.visit_with(&mut param_count).is_break(); - let substituted_pred = ty::EarlyBinder(pred).subst(tcx, substs); + let substituted_pred = ty::EarlyBinder::bind(pred).subst(tcx, substs); // Don't check non-defaulted params, dependent defaults (including lifetimes) // or preds with multiple params. if substituted_pred.has_non_region_param() || param_count.params.len() > 1 || has_region diff --git a/compiler/rustc_hir_analysis/src/collect.rs b/compiler/rustc_hir_analysis/src/collect.rs index ca0d5509c57..2f7d465839c 100644 --- a/compiler/rustc_hir_analysis/src/collect.rs +++ b/compiler/rustc_hir_analysis/src/collect.rs @@ -440,7 +440,7 @@ impl<'tcx> AstConv<'tcx> for ItemCtxt<'tcx> { self.tcx.replace_late_bound_regions_uncached( poly_trait_ref, |_| { - self.tcx.mk_re_early_bound(ty::EarlyBoundRegion { + ty::Region::new_early_bound(self.tcx, ty::EarlyBoundRegion { def_id: item_def_id, index: 0, name: Symbol::intern(<_name), @@ -1124,7 +1124,7 @@ fn fn_sig(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<ty::PolyFnSig< bug!("unexpected sort of node in fn_sig(): {:?}", x); } }; - ty::EarlyBinder(output) + ty::EarlyBinder::bind(output) } fn infer_return_ty_for_fn_sig<'tcx>( @@ -1312,7 +1312,7 @@ fn impl_trait_ref( check_impl_constness(tcx, impl_.constness, ast_trait_ref), ) }) - .map(ty::EarlyBinder) + .map(ty::EarlyBinder::bind) } fn check_impl_constness( diff --git a/compiler/rustc_hir_analysis/src/collect/item_bounds.rs b/compiler/rustc_hir_analysis/src/collect/item_bounds.rs index 948b903e509..b92d561fb86 100644 --- a/compiler/rustc_hir_analysis/src/collect/item_bounds.rs +++ b/compiler/rustc_hir_analysis/src/collect/item_bounds.rs @@ -86,7 +86,7 @@ pub(super) fn explicit_item_bounds( Some(ty::ImplTraitInTraitData::Trait { opaque_def_id, .. }) => { let item = tcx.hir().get_by_def_id(opaque_def_id.expect_local()).expect_item(); let opaque_ty = item.expect_opaque_ty(); - return ty::EarlyBinder(opaque_type_bounds( + return ty::EarlyBinder::bind(opaque_type_bounds( tcx, opaque_def_id.expect_local(), opaque_ty.bounds, @@ -124,7 +124,7 @@ pub(super) fn explicit_item_bounds( } _ => bug!("item_bounds called on {:?}", def_id), }; - ty::EarlyBinder(bounds) + ty::EarlyBinder::bind(bounds) } pub(super) fn item_bounds( diff --git a/compiler/rustc_hir_analysis/src/collect/predicates_of.rs b/compiler/rustc_hir_analysis/src/collect/predicates_of.rs index e5b5dae551e..70d950eddd8 100644 --- a/compiler/rustc_hir_analysis/src/collect/predicates_of.rs +++ b/compiler/rustc_hir_analysis/src/collect/predicates_of.rs @@ -306,11 +306,14 @@ fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Gen let Some(dup_index) = generics.param_def_id_to_index(tcx, dup_def) else { bug!() }; - let dup_region = tcx.mk_re_early_bound(ty::EarlyBoundRegion { - def_id: dup_def, - index: dup_index, - name: duplicate.name.ident().name, - }); + let dup_region = ty::Region::new_early_bound( + tcx, + ty::EarlyBoundRegion { + def_id: dup_def, + index: dup_index, + name: duplicate.name.ident().name, + }, + ); predicates.push(( ty::Binder::dummy(ty::PredicateKind::Clause(ty::Clause::RegionOutlives( ty::OutlivesPredicate(orig_region, dup_region), diff --git a/compiler/rustc_hir_analysis/src/collect/type_of.rs b/compiler/rustc_hir_analysis/src/collect/type_of.rs index 8e082d3c532..65ab00fda81 100644 --- a/compiler/rustc_hir_analysis/src/collect/type_of.rs +++ b/compiler/rustc_hir_analysis/src/collect/type_of.rs @@ -323,7 +323,7 @@ pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<Ty return map[&assoc_item.trait_item_def_id.unwrap()]; } Err(_) => { - return ty::EarlyBinder(tcx.ty_error_with_message( + return ty::EarlyBinder::bind(tcx.ty_error_with_message( DUMMY_SP, "Could not collect return position impl trait in trait tys", )); @@ -497,7 +497,7 @@ pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<Ty bug!("unexpected sort of node in type_of(): {:?}", x); } }; - ty::EarlyBinder(output) + ty::EarlyBinder::bind(output) } fn infer_placeholder_type<'a>( diff --git a/compiler/rustc_hir_analysis/src/outlives/explicit.rs b/compiler/rustc_hir_analysis/src/outlives/explicit.rs index 357deb07b8f..7ce48fe1c01 100644 --- a/compiler/rustc_hir_analysis/src/outlives/explicit.rs +++ b/compiler/rustc_hir_analysis/src/outlives/explicit.rs @@ -68,7 +68,7 @@ impl<'tcx> ExplicitPredicatesMap<'tcx> { } } - ty::EarlyBinder(required_predicates) + ty::EarlyBinder::bind(required_predicates) }) } } diff --git a/compiler/rustc_hir_analysis/src/outlives/implicit_infer.rs b/compiler/rustc_hir_analysis/src/outlives/implicit_infer.rs index 0cd2fc1aa29..71dca918fc5 100644 --- a/compiler/rustc_hir_analysis/src/outlives/implicit_infer.rs +++ b/compiler/rustc_hir_analysis/src/outlives/implicit_infer.rs @@ -68,12 +68,13 @@ pub(super) fn infer_predicates( // Therefore mark `predicates_added` as true and which will ensure // we walk the crates again and re-calculate predicates for all // items. - let item_predicates_len: usize = - global_inferred_outlives.get(&item_did.to_def_id()).map_or(0, |p| p.0.len()); + let item_predicates_len: usize = global_inferred_outlives + .get(&item_did.to_def_id()) + .map_or(0, |p| p.as_ref().skip_binder().len()); if item_required_predicates.len() > item_predicates_len { predicates_added = true; global_inferred_outlives - .insert(item_did.to_def_id(), ty::EarlyBinder(item_required_predicates)); + .insert(item_did.to_def_id(), ty::EarlyBinder::bind(item_required_predicates)); } } @@ -137,7 +138,9 @@ fn insert_required_predicates_to_be_wf<'tcx>( // 'a` holds for `Foo`. debug!("Adt"); if let Some(unsubstituted_predicates) = global_inferred_outlives.get(&def.did()) { - for (unsubstituted_predicate, &span) in &unsubstituted_predicates.0 { + for (unsubstituted_predicate, &span) in + unsubstituted_predicates.as_ref().skip_binder() + { // `unsubstituted_predicate` is `U: 'b` in the // example above. So apply the substitution to // get `T: 'a` (or `predicate`): @@ -251,7 +254,7 @@ fn check_explicit_predicates<'tcx>( ); let explicit_predicates = explicit_map.explicit_predicates_of(tcx, def_id); - for (outlives_predicate, &span) in &explicit_predicates.0 { + for (outlives_predicate, &span) in explicit_predicates.as_ref().skip_binder() { debug!("outlives_predicate = {:?}", &outlives_predicate); // Careful: If we are inferring the effects of a `dyn Trait<..>` diff --git a/compiler/rustc_hir_analysis/src/outlives/mod.rs b/compiler/rustc_hir_analysis/src/outlives/mod.rs index a8596c707f3..2106d6ff07d 100644 --- a/compiler/rustc_hir_analysis/src/outlives/mod.rs +++ b/compiler/rustc_hir_analysis/src/outlives/mod.rs @@ -98,24 +98,27 @@ fn inferred_outlives_crate(tcx: TyCtxt<'_>, (): ()) -> CratePredicatesMap<'_> { let predicates = global_inferred_outlives .iter() .map(|(&def_id, set)| { - let predicates = &*tcx.arena.alloc_from_iter(set.0.iter().filter_map( - |(ty::OutlivesPredicate(kind1, region2), &span)| { - match kind1.unpack() { - GenericArgKind::Type(ty1) => Some(( - ty::Clause::TypeOutlives(ty::OutlivesPredicate(ty1, *region2)), - span, - )), - GenericArgKind::Lifetime(region1) => Some(( - ty::Clause::RegionOutlives(ty::OutlivesPredicate(region1, *region2)), - span, - )), - GenericArgKind::Const(_) => { - // Generic consts don't impose any constraints. - None + let predicates = + &*tcx.arena.alloc_from_iter(set.as_ref().skip_binder().iter().filter_map( + |(ty::OutlivesPredicate(kind1, region2), &span)| { + match kind1.unpack() { + GenericArgKind::Type(ty1) => Some(( + ty::Clause::TypeOutlives(ty::OutlivesPredicate(ty1, *region2)), + span, + )), + GenericArgKind::Lifetime(region1) => Some(( + ty::Clause::RegionOutlives(ty::OutlivesPredicate( + region1, *region2, + )), + span, + )), + GenericArgKind::Const(_) => { + // Generic consts don't impose any constraints. + None + } } - } - }, - )); + }, + )); (def_id, predicates) }) .collect(); diff --git a/compiler/rustc_hir_typeck/messages.ftl b/compiler/rustc_hir_typeck/messages.ftl index 9761b1d3fac..aab432eee57 100644 --- a/compiler/rustc_hir_typeck/messages.ftl +++ b/compiler/rustc_hir_typeck/messages.ftl @@ -25,6 +25,8 @@ hir_typeck_const_select_must_be_fn = this argument must be a function item hir_typeck_convert_to_str = try converting the passed type into a `&str` +hir_typeck_ctor_is_private = tuple struct constructor `{$def}` is private + hir_typeck_expected_default_return_type = expected `()` because of default return type hir_typeck_expected_return_type = expected `{$expected}` because of return type @@ -59,8 +61,8 @@ hir_typeck_lang_start_incorrect_param = parameter {$param_num} of the `start` la hir_typeck_lang_start_incorrect_ret_ty = the return type of the `start` lang item is incorrect .suggestion = change the type from `{$found_ty}` to `{$expected_ty}` -hir_typeck_method_call_on_unknown_type = - the type of this value must be known to call a method on a raw pointer on it +hir_typeck_method_call_on_unknown_raw_pointee = + cannot call a method on a raw pointer with an unknown pointee type hir_typeck_missing_parentheses_in_range = can't call method `{$method_name}` on type `{$ty_str}` diff --git a/compiler/rustc_hir_typeck/src/callee.rs b/compiler/rustc_hir_typeck/src/callee.rs index 655ab94eb48..4389ad6ef26 100644 --- a/compiler/rustc_hir_typeck/src/callee.rs +++ b/compiler/rustc_hir_typeck/src/callee.rs @@ -420,20 +420,14 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { .steal_diagnostic(segment.ident.span, StashKey::CallIntoMethod) { // Try suggesting `foo(a)` -> `a.foo()` if possible. - if let Some(ty) = - self.suggest_call_as_method( - &mut diag, - segment, - arg_exprs, - call_expr, - expected - ) - { - diag.emit(); - return ty; - } else { - diag.emit(); - } + self.suggest_call_as_method( + &mut diag, + segment, + arg_exprs, + call_expr, + expected + ); + diag.emit(); } let err = self.report_invalid_callee(call_expr, callee_expr, callee_ty, arg_exprs); @@ -496,9 +490,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { arg_exprs: &'tcx [hir::Expr<'tcx>], call_expr: &'tcx hir::Expr<'tcx>, expected: Expectation<'tcx>, - ) -> Option<Ty<'tcx>> { + ) { if let [callee_expr, rest @ ..] = arg_exprs { - let callee_ty = self.typeck_results.borrow().expr_ty_adjusted_opt(callee_expr)?; + let Some(callee_ty) = self.typeck_results.borrow().expr_ty_adjusted_opt(callee_expr) else { + return; + }; // First, do a probe with `IsSuggestion(true)` to avoid emitting // any strange errors. If it's successful, then we'll do a true @@ -513,7 +509,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { ProbeScope::AllTraits, expected.only_has_type(self), ) else { - return None; + return; }; let pick = self.confirm_method( @@ -525,7 +521,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { segment, ); if pick.illegal_sized_bound.is_some() { - return None; + return; } let up_to_rcvr_span = segment.ident.span.until(callee_expr.span); @@ -567,22 +563,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { sugg, Applicability::MaybeIncorrect, ); - - // Let's check the method fully now - let return_ty = self.check_method_argument_types( - segment.ident.span, - call_expr, - Ok(pick.callee), - rest, - TupleArgumentsFlag::DontTupleArguments, - expected, - ); - - return Some(return_ty); } } - - None } fn report_invalid_callee( diff --git a/compiler/rustc_hir_typeck/src/coercion.rs b/compiler/rustc_hir_typeck/src/coercion.rs index 08c4082e885..905781ec8f5 100644 --- a/compiler/rustc_hir_typeck/src/coercion.rs +++ b/compiler/rustc_hir_typeck/src/coercion.rs @@ -62,6 +62,7 @@ use rustc_span::{self, BytePos, DesugaringKind, Span}; use rustc_target::spec::abi::Abi; use rustc_trait_selection::infer::InferCtxtExt as _; use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt as _; +use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt; use rustc_trait_selection::traits::{ self, NormalizeExt, ObligationCause, ObligationCauseCode, ObligationCtxt, }; @@ -144,12 +145,28 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> { debug!("unify(a: {:?}, b: {:?}, use_lub: {})", a, b, self.use_lub); self.commit_if_ok(|_| { let at = self.at(&self.cause, self.fcx.param_env); - if self.use_lub { + + let res = if self.use_lub { at.lub(DefineOpaqueTypes::Yes, b, a) } else { at.sup(DefineOpaqueTypes::Yes, b, a) .map(|InferOk { value: (), obligations }| InferOk { value: a, obligations }) + }; + + // In the new solver, lazy norm may allow us to shallowly equate + // more types, but we emit possibly impossible-to-satisfy obligations. + // Filter these cases out to make sure our coercion is more accurate. + if self.tcx.trait_solver_next() { + if let Ok(res) = &res { + for obligation in &res.obligations { + if !self.predicate_may_hold(&obligation) { + return Err(TypeError::Mismatch); + } + } + } } + + res }) } @@ -791,6 +808,8 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> { G: FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>>, { self.commit_if_ok(|snapshot| { + let outer_universe = self.infcx.universe(); + let result = if let ty::FnPtr(fn_ty_b) = b.kind() && let (hir::Unsafety::Normal, hir::Unsafety::Unsafe) = (fn_ty_a.unsafety(), fn_ty_b.unsafety()) @@ -807,7 +826,7 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> { // want the coerced type to be the actual supertype of these two, // but for now, we want to just error to ensure we don't lock // ourselves into a specific behavior with NLL. - self.leak_check(false, snapshot)?; + self.leak_check(outer_universe, Some(snapshot))?; result }) diff --git a/compiler/rustc_hir_typeck/src/errors.rs b/compiler/rustc_hir_typeck/src/errors.rs index 102a313067f..4222205c841 100644 --- a/compiler/rustc_hir_typeck/src/errors.rs +++ b/compiler/rustc_hir_typeck/src/errors.rs @@ -49,8 +49,8 @@ pub struct StructExprNonExhaustive { } #[derive(Diagnostic)] -#[diag(hir_typeck_method_call_on_unknown_type, code = "E0699")] -pub struct MethodCallOnUnknownType { +#[diag(hir_typeck_method_call_on_unknown_raw_pointee, code = "E0699")] +pub struct MethodCallOnUnknownRawPointee { #[primary_span] pub span: Span, } @@ -319,3 +319,11 @@ pub struct CandidateTraitNote { pub item_name: Ident, pub action_or_ty: String, } + +#[derive(Diagnostic)] +#[diag(hir_typeck_ctor_is_private, code = "E0603")] +pub struct CtorIsPrivate { + #[primary_span] + pub span: Span, + pub def: String, +} diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs index 2fdcd09b9a2..38ddb7e7604 100644 --- a/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs +++ b/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs @@ -1,4 +1,5 @@ use crate::callee::{self, DeferredCallResolution}; +use crate::errors::CtorIsPrivate; use crate::method::{self, MethodCallee, SelfSource}; use crate::rvalue_scopes; use crate::{BreakableCtxt, Diverges, Expectation, FnCtxt, LocalTy, RawTy}; @@ -1207,6 +1208,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { match ty.normalized.ty_adt_def() { Some(adt_def) if adt_def.has_ctor() => { let (ctor_kind, ctor_def_id) = adt_def.non_enum_variant().ctor.unwrap(); + // Check the visibility of the ctor. + let vis = tcx.visibility(ctor_def_id); + if !vis.is_accessible_from(tcx.parent_module(hir_id).to_def_id(), tcx) { + tcx.sess + .emit_err(CtorIsPrivate { span, def: tcx.def_path_str(adt_def.did()) }); + } let new_res = Res::Def(DefKind::Ctor(CtorOf::Struct, ctor_kind), ctor_def_id); let user_substs = Self::user_substs_for_adt(ty); user_self_ty = user_substs.user_self_ty; @@ -1379,7 +1386,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // the referenced item. let ty = tcx.type_of(def_id); assert!(!substs.has_escaping_bound_vars()); - assert!(!ty.0.has_escaping_bound_vars()); + assert!(!ty.skip_binder().has_escaping_bound_vars()); let ty_substituted = self.normalize(span, ty.subst(tcx, substs)); if let Some(UserSelfTy { impl_def_id, self_ty }) = user_self_ty { diff --git a/compiler/rustc_hir_typeck/src/generator_interior/mod.rs b/compiler/rustc_hir_typeck/src/generator_interior/mod.rs index 019fb86f55c..fb28233bfb1 100644 --- a/compiler/rustc_hir_typeck/src/generator_interior/mod.rs +++ b/compiler/rustc_hir_typeck/src/generator_interior/mod.rs @@ -122,7 +122,7 @@ impl<'a, 'tcx> InteriorVisitor<'a, 'tcx> { self.fcx .need_type_info_err_in_generator(self.kind, span, unresolved_term) - .span_note(yield_data.span, &*note) + .span_note(yield_data.span, note) .emit(); } } else { @@ -269,7 +269,7 @@ pub fn resolve_interior<'a, 'tcx>( }, _ => mk_bound_region(ty::BrAnon(None)), }; - let r = fcx.tcx.mk_re_late_bound(current_depth, br); + let r = ty::Region::new_late_bound(fcx.tcx, current_depth, br); r }); captured_tys.insert(ty).then(|| { @@ -295,7 +295,11 @@ pub fn resolve_interior<'a, 'tcx>( let var = ty::BoundVar::from_usize(bound_vars.len()); bound_vars.push(ty::BoundVariableKind::Region(kind)); counter += 1; - fcx.tcx.mk_re_late_bound(ty::INNERMOST, ty::BoundRegion { var, kind }) + ty::Region::new_late_bound( + fcx.tcx, + ty::INNERMOST, + ty::BoundRegion { var, kind }, + ) }, types: &mut |b| bug!("unexpected bound ty in binder: {b:?}"), consts: &mut |b, ty| bug!("unexpected bound ct in binder: {b:?} {ty}"), @@ -686,7 +690,7 @@ fn check_must_not_suspend_def( // Add optional reason note if let Some(note) = attr.value_str() { // FIXME(guswynn): consider formatting this better - lint.span_note(data.source_span, note.as_str()); + lint.span_note(data.source_span, note.to_string()); } // Add some quick suggestions on what to do diff --git a/compiler/rustc_hir_typeck/src/method/probe.rs b/compiler/rustc_hir_typeck/src/method/probe.rs index ba21edea30b..9f3d35a77dc 100644 --- a/compiler/rustc_hir_typeck/src/method/probe.rs +++ b/compiler/rustc_hir_typeck/src/method/probe.rs @@ -3,7 +3,7 @@ use super::CandidateSource; use super::MethodError; use super::NoMatchData; -use crate::errors::MethodCallOnUnknownType; +use crate::errors::MethodCallOnUnknownRawPointee; use crate::FnCtxt; use rustc_data_structures::fx::FxHashSet; use rustc_errors::Applicability; @@ -438,7 +438,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // so we do a future-compat lint here for the 2015 edition // (see https://github.com/rust-lang/rust/issues/46906) if self.tcx.sess.rust_2018() { - self.tcx.sess.emit_err(MethodCallOnUnknownType { span }); + self.tcx.sess.emit_err(MethodCallOnUnknownRawPointee { span }); } else { self.tcx.struct_span_lint_hir( lint::builtin::TYVAR_BEHIND_RAW_POINTER, diff --git a/compiler/rustc_hir_typeck/src/method/suggest.rs b/compiler/rustc_hir_typeck/src/method/suggest.rs index 8555c20204a..9ee967dc7a9 100644 --- a/compiler/rustc_hir_typeck/src/method/suggest.rs +++ b/compiler/rustc_hir_typeck/src/method/suggest.rs @@ -473,6 +473,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let mut custom_span_label = false; let static_candidates = &mut no_match_data.static_candidates; + + // `static_candidates` may have same candidates appended by + // inherent and extension, which may result in incorrect + // diagnostic. + static_candidates.dedup(); + if !static_candidates.is_empty() { err.note( "found the following associated functions; to be used as methods, \ diff --git a/compiler/rustc_infer/src/infer/at.rs b/compiler/rustc_infer/src/infer/at.rs index 0c8854e962a..6b2dd0a2b4f 100644 --- a/compiler/rustc_infer/src/infer/at.rs +++ b/compiler/rustc_infer/src/infer/at.rs @@ -70,8 +70,8 @@ impl<'tcx> InferCtxt<'tcx> { tcx: self.tcx, defining_use_anchor: self.defining_use_anchor, considering_regions: self.considering_regions, + skip_leak_check: self.skip_leak_check, inner: self.inner.clone(), - skip_leak_check: self.skip_leak_check.clone(), lexical_region_resolutions: self.lexical_region_resolutions.clone(), selection_cache: self.selection_cache.clone(), evaluation_cache: self.evaluation_cache.clone(), diff --git a/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs b/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs index 427d05c8b4d..bf53a73f398 100644 --- a/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs +++ b/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs @@ -771,7 +771,7 @@ impl<'cx, 'tcx> Canonicalizer<'cx, 'tcx> { ) -> ty::Region<'tcx> { let var = self.canonical_var(info, r.into()); let br = ty::BoundRegion { var, kind: ty::BrAnon(None) }; - self.interner().mk_re_late_bound(self.binder_index, br) + ty::Region::new_late_bound(self.interner(), self.binder_index, br) } /// Given a type variable `ty_var` of the given kind, first check diff --git a/compiler/rustc_infer/src/infer/canonical/mod.rs b/compiler/rustc_infer/src/infer/canonical/mod.rs index 2abdd5b0aec..c8c318c3f02 100644 --- a/compiler/rustc_infer/src/infer/canonical/mod.rs +++ b/compiler/rustc_infer/src/infer/canonical/mod.rs @@ -141,7 +141,7 @@ impl<'tcx> InferCtxt<'tcx> { CanonicalVarKind::PlaceholderRegion(ty::PlaceholderRegion { universe, bound }) => { let universe_mapped = universe_map(universe); let placeholder_mapped = ty::PlaceholderRegion { universe: universe_mapped, bound }; - self.tcx.mk_re_placeholder(placeholder_mapped).into() + ty::Region::new_placeholder(self.tcx, placeholder_mapped).into() } CanonicalVarKind::Const(ui, ty) => self diff --git a/compiler/rustc_infer/src/infer/canonical/query_response.rs b/compiler/rustc_infer/src/infer/canonical/query_response.rs index 88256c819f4..2cf8d8c702d 100644 --- a/compiler/rustc_infer/src/infer/canonical/query_response.rs +++ b/compiler/rustc_infer/src/infer/canonical/query_response.rs @@ -668,14 +668,15 @@ pub fn make_query_region_constraints<'tcx>( let constraint = match *k { // Swap regions because we are going from sub (<=) to outlives // (>=). - Constraint::VarSubVar(v1, v2) => { - ty::OutlivesPredicate(tcx.mk_re_var(v2).into(), tcx.mk_re_var(v1)) - } + Constraint::VarSubVar(v1, v2) => ty::OutlivesPredicate( + ty::Region::new_var(tcx, v2).into(), + ty::Region::new_var(tcx, v1), + ), Constraint::VarSubReg(v1, r2) => { - ty::OutlivesPredicate(r2.into(), tcx.mk_re_var(v1)) + ty::OutlivesPredicate(r2.into(), ty::Region::new_var(tcx, v1)) } Constraint::RegSubVar(r1, v2) => { - ty::OutlivesPredicate(tcx.mk_re_var(v2).into(), r1) + ty::OutlivesPredicate(ty::Region::new_var(tcx, v2).into(), r1) } Constraint::RegSubReg(r1, r2) => ty::OutlivesPredicate(r2.into(), r1), }; @@ -719,7 +720,7 @@ impl<'tcx> TypeRelatingDelegate<'tcx> for QueryTypeRelatingDelegate<'_, 'tcx> { } fn next_placeholder_region(&mut self, placeholder: ty::PlaceholderRegion) -> ty::Region<'tcx> { - self.infcx.tcx.mk_re_placeholder(placeholder) + ty::Region::new_placeholder(self.infcx.tcx, placeholder) } fn generalize_existential(&mut self, universe: ty::UniverseIndex) -> ty::Region<'tcx> { diff --git a/compiler/rustc_infer/src/infer/error_reporting/mod.rs b/compiler/rustc_infer/src/infer/error_reporting/mod.rs index 35c05e80bad..f8a253c8949 100644 --- a/compiler/rustc_infer/src/infer/error_reporting/mod.rs +++ b/compiler/rustc_infer/src/infer/error_reporting/mod.rs @@ -76,6 +76,7 @@ use rustc_middle::ty::{ }; use rustc_span::{sym, symbol::kw, BytePos, DesugaringKind, Pos, Span}; use rustc_target::spec::abi; +use std::borrow::Cow; use std::ops::{ControlFlow, Deref}; use std::path::PathBuf; use std::{cmp, fmt, iter}; @@ -1470,7 +1471,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> { &self, diag: &mut Diagnostic, cause: &ObligationCause<'tcx>, - secondary_span: Option<(Span, String)>, + secondary_span: Option<(Span, Cow<'static, str>)>, mut values: Option<ValuePairs<'tcx>>, terr: TypeError<'tcx>, swap_secondary_and_primary: bool, @@ -1629,7 +1630,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> { } }; - let mut label_or_note = |span: Span, msg: &str| { + let mut label_or_note = |span: Span, msg: Cow<'static, str>| { if (prefer_label && is_simple_error) || &[span] == diag.span.primary_spans() { diag.span_label(span, msg); } else { @@ -1643,15 +1644,15 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> { .. })) = values { - format!("expected this to be `{}`", expected) + Cow::from(format!("expected this to be `{}`", expected)) } else { - terr.to_string(self.tcx).to_string() + terr.to_string(self.tcx) }; - label_or_note(sp, &terr); - label_or_note(span, &msg); + label_or_note(sp, terr); + label_or_note(span, msg); } else { - label_or_note(span, &terr.to_string(self.tcx)); - label_or_note(sp, &msg); + label_or_note(span, terr.to_string(self.tcx)); + label_or_note(sp, msg); } } else { if let Some(values) = values @@ -1663,12 +1664,12 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> { let expected = with_forced_trimmed_paths!(e.sort_string(self.tcx)); let found = with_forced_trimmed_paths!(f.sort_string(self.tcx)); if expected == found { - label_or_note(span, &terr.to_string(self.tcx)); + label_or_note(span, terr.to_string(self.tcx)); } else { - label_or_note(span, &format!("expected {expected}, found {found}")); + label_or_note(span, Cow::from(format!("expected {expected}, found {found}"))); } } else { - label_or_note(span, &terr.to_string(self.tcx)); + label_or_note(span, terr.to_string(self.tcx)); } } diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs index c9c1f0aeaac..0b3bc1ce6b3 100644 --- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs +++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs @@ -79,7 +79,7 @@ impl<'tcx> NiceRegionError<'_, 'tcx> { sup_placeholder @ Region(Interned(RePlaceholder(_), _)), _, )) => self.try_report_trait_placeholder_mismatch( - Some(self.tcx().mk_re_var(*vid)), + Some(ty::Region::new_var(self.tcx(), *vid)), cause, Some(*sub_placeholder), Some(*sup_placeholder), @@ -95,7 +95,7 @@ impl<'tcx> NiceRegionError<'_, 'tcx> { _, _, )) => self.try_report_trait_placeholder_mismatch( - Some(self.tcx().mk_re_var(*vid)), + Some(ty::Region::new_var(self.tcx(), *vid)), cause, Some(*sub_placeholder), None, @@ -111,7 +111,7 @@ impl<'tcx> NiceRegionError<'_, 'tcx> { sup_placeholder @ Region(Interned(RePlaceholder(_), _)), _, )) => self.try_report_trait_placeholder_mismatch( - Some(self.tcx().mk_re_var(*vid)), + Some(ty::Region::new_var(self.tcx(), *vid)), cause, None, Some(*sup_placeholder), @@ -127,7 +127,7 @@ impl<'tcx> NiceRegionError<'_, 'tcx> { sup_placeholder @ Region(Interned(RePlaceholder(_), _)), _, )) => self.try_report_trait_placeholder_mismatch( - Some(self.tcx().mk_re_var(*vid)), + Some(ty::Region::new_var(self.tcx(), *vid)), cause, None, Some(*sup_placeholder), @@ -141,7 +141,7 @@ impl<'tcx> NiceRegionError<'_, 'tcx> { SubregionOrigin::Subtype(box TypeTrace { cause, values }), sup_placeholder @ Region(Interned(RePlaceholder(_), _)), )) => self.try_report_trait_placeholder_mismatch( - Some(self.tcx().mk_re_var(*vid)), + Some(ty::Region::new_var(self.tcx(), *vid)), cause, None, Some(*sup_placeholder), diff --git a/compiler/rustc_infer/src/infer/error_reporting/note_and_explain.rs b/compiler/rustc_infer/src/infer/error_reporting/note_and_explain.rs index 421eb807a14..d1f110472c9 100644 --- a/compiler/rustc_infer/src/infer/error_reporting/note_and_explain.rs +++ b/compiler/rustc_infer/src/infer/error_reporting/note_and_explain.rs @@ -234,13 +234,13 @@ impl<T> Trait<T> for X { ); } (_, ty::Alias(ty::Projection | ty::Inherent, proj_ty)) if !tcx.is_impl_trait_in_trait(proj_ty.def_id) => { - let msg = format!( + let msg = || format!( "consider constraining the associated type `{}` to `{}`", values.found, values.expected, ); if !(self.suggest_constraining_opaque_associated_type( diag, - &msg, + msg, proj_ty, values.expected, ) || self.suggest_constraint( @@ -250,7 +250,7 @@ impl<T> Trait<T> for X { proj_ty, values.expected, )) { - diag.help(msg); + diag.help(msg()); diag.note( "for more information, visit \ https://doc.rust-lang.org/book/ch19-03-advanced-traits.html", @@ -308,7 +308,7 @@ impl<T> Trait<T> for X { fn suggest_constraint( &self, diag: &mut Diagnostic, - msg: &str, + msg: impl Fn() -> String, body_owner_def_id: DefId, proj_ty: &ty::AliasTy<'tcx>, ty: Ty<'tcx>, @@ -340,7 +340,7 @@ impl<T> Trait<T> for X { assoc, assoc_substs, ty, - msg, + &msg, false, ) { return true; @@ -374,10 +374,12 @@ impl<T> Trait<T> for X { ) { let tcx = self.tcx; - let msg = format!( - "consider constraining the associated type `{}` to `{}`", - values.expected, values.found - ); + let msg = || { + format!( + "consider constraining the associated type `{}` to `{}`", + values.expected, values.found + ) + }; let body_owner = tcx.hir().get_if_local(body_owner_def_id); let current_method_ident = body_owner.and_then(|n| n.ident()).map(|i| i.name); @@ -428,10 +430,11 @@ impl<T> Trait<T> for X { if callable_scope { diag.help(format!( "{} or calling a method that returns `{}`", - msg, values.expected + msg(), + values.expected )); } else { - diag.help(msg); + diag.help(msg()); } diag.note( "for more information, visit \ @@ -463,7 +466,7 @@ fn foo(&self) -> Self::T { String::new() } fn suggest_constraining_opaque_associated_type( &self, diag: &mut Diagnostic, - msg: &str, + msg: impl Fn() -> String, proj_ty: &ty::AliasTy<'tcx>, ty: Ty<'tcx>, ) -> bool { @@ -635,7 +638,7 @@ fn foo(&self) -> Self::T { String::new() } assoc: ty::AssocItem, assoc_substs: &[ty::GenericArg<'tcx>], ty: Ty<'tcx>, - msg: &str, + msg: impl Fn() -> String, is_bound_surely_present: bool, ) -> bool { // FIXME: we would want to call `resolve_vars_if_possible` on `ty` before suggesting. @@ -678,7 +681,7 @@ fn foo(&self) -> Self::T { String::new() } assoc: ty::AssocItem, assoc_substs: &[ty::GenericArg<'tcx>], ty: Ty<'tcx>, - msg: &str, + msg: impl Fn() -> String, ) -> bool { let tcx = self.tcx; @@ -693,7 +696,7 @@ fn foo(&self) -> Self::T { String::new() } let item_args = self.format_generic_args(assoc_substs); (span.shrink_to_hi(), format!("<{}{} = {}>", assoc.ident(tcx), item_args, ty)) }; - diag.span_suggestion_verbose(span, msg, sugg, MaybeIncorrect); + diag.span_suggestion_verbose(span, msg(), sugg, MaybeIncorrect); return true; } false diff --git a/compiler/rustc_infer/src/infer/higher_ranked/mod.rs b/compiler/rustc_infer/src/infer/higher_ranked/mod.rs index c304cd25c9c..974bc2f1153 100644 --- a/compiler/rustc_infer/src/infer/higher_ranked/mod.rs +++ b/compiler/rustc_infer/src/infer/higher_ranked/mod.rs @@ -82,8 +82,10 @@ impl<'tcx> InferCtxt<'tcx> { let delegate = FnMutDelegate { regions: &mut |br: ty::BoundRegion| { - self.tcx - .mk_re_placeholder(ty::PlaceholderRegion { universe: next_universe, bound: br }) + ty::Region::new_placeholder( + self.tcx, + ty::PlaceholderRegion { universe: next_universe, bound: br }, + ) }, types: &mut |bound_ty: ty::BoundTy| { self.tcx.mk_placeholder(ty::PlaceholderType { @@ -103,13 +105,15 @@ impl<'tcx> InferCtxt<'tcx> { self.tcx.replace_bound_vars_uncached(binder, delegate) } - /// See [RegionConstraintCollector::leak_check][1]. + /// See [RegionConstraintCollector::leak_check][1]. We only check placeholder + /// leaking into `outer_universe`, i.e. placeholders which cannot be named by that + /// universe. /// /// [1]: crate::infer::region_constraints::RegionConstraintCollector::leak_check pub fn leak_check( &self, - overly_polymorphic: bool, - snapshot: &CombinedSnapshot<'tcx>, + outer_universe: ty::UniverseIndex, + only_consider_snapshot: Option<&CombinedSnapshot<'tcx>>, ) -> RelateResult<'tcx, ()> { // If the user gave `-Zno-leak-check`, or we have been // configured to skip the leak check, then skip the leak check @@ -117,15 +121,15 @@ impl<'tcx> InferCtxt<'tcx> { // subtyping errors that it would have caught will now be // caught later on, during region checking. However, we // continue to use it for a transition period. - if self.tcx.sess.opts.unstable_opts.no_leak_check || self.skip_leak_check.get() { + if self.tcx.sess.opts.unstable_opts.no_leak_check || self.skip_leak_check { return Ok(()); } self.inner.borrow_mut().unwrap_region_constraints().leak_check( self.tcx, - overly_polymorphic, + outer_universe, self.universe(), - snapshot, + only_consider_snapshot, ) } } diff --git a/compiler/rustc_infer/src/infer/lexical_region_resolve/mod.rs b/compiler/rustc_infer/src/infer/lexical_region_resolve/mod.rs index 8482ae2aa38..485e34fe2bf 100644 --- a/compiler/rustc_infer/src/infer/lexical_region_resolve/mod.rs +++ b/compiler/rustc_infer/src/infer/lexical_region_resolve/mod.rs @@ -347,7 +347,7 @@ impl<'cx, 'tcx> LexicalResolver<'cx, 'tcx> { // name the placeholder, then the placeholder is // larger; otherwise, the only ancestor is `'static`. Err(placeholder) if empty_ui.can_name(placeholder.universe) => { - self.tcx().mk_re_placeholder(placeholder) + ty::Region::new_placeholder(self.tcx(), placeholder) } Err(_) => self.tcx().lifetimes.re_static, }; diff --git a/compiler/rustc_infer/src/infer/mod.rs b/compiler/rustc_infer/src/infer/mod.rs index cd99fc31212..447d4c9f84b 100644 --- a/compiler/rustc_infer/src/infer/mod.rs +++ b/compiler/rustc_infer/src/infer/mod.rs @@ -251,14 +251,13 @@ pub struct InferCtxt<'tcx> { /// solving is left to borrowck instead. pub considering_regions: bool, - pub inner: RefCell<InferCtxtInner<'tcx>>, - /// If set, this flag causes us to skip the 'leak check' during /// higher-ranked subtyping operations. This flag is a temporary one used /// to manage the removal of the leak-check: for the time being, we still run the - /// leak-check, but we issue warnings. This flag can only be set to true - /// when entering a snapshot. - skip_leak_check: Cell<bool>, + /// leak-check, but we issue warnings. + skip_leak_check: bool, + + pub inner: RefCell<InferCtxtInner<'tcx>>, /// Once region inference is done, the values for each variable. lexical_region_resolutions: RefCell<Option<LexicalRegionResolutions<'tcx>>>, @@ -543,6 +542,7 @@ pub struct InferCtxtBuilder<'tcx> { tcx: TyCtxt<'tcx>, defining_use_anchor: DefiningAnchor, considering_regions: bool, + skip_leak_check: bool, /// Whether we are in coherence mode. intercrate: bool, } @@ -557,6 +557,7 @@ impl<'tcx> TyCtxtInferExt<'tcx> for TyCtxt<'tcx> { tcx: self, defining_use_anchor: DefiningAnchor::Error, considering_regions: true, + skip_leak_check: false, intercrate: false, } } @@ -584,6 +585,11 @@ impl<'tcx> InferCtxtBuilder<'tcx> { self } + pub fn skip_leak_check(mut self, skip_leak_check: bool) -> Self { + self.skip_leak_check = skip_leak_check; + self + } + /// Given a canonical value `C` as a starting point, create an /// inference context that contains each of the bound values /// within instantiated as a fresh variable. The `f` closure is @@ -605,11 +611,18 @@ impl<'tcx> InferCtxtBuilder<'tcx> { } pub fn build(&mut self) -> InferCtxt<'tcx> { - let InferCtxtBuilder { tcx, defining_use_anchor, considering_regions, intercrate } = *self; + let InferCtxtBuilder { + tcx, + defining_use_anchor, + considering_regions, + skip_leak_check, + intercrate, + } = *self; InferCtxt { tcx, defining_use_anchor, considering_regions, + skip_leak_check, inner: RefCell::new(InferCtxtInner::new()), lexical_region_resolutions: RefCell::new(None), selection_cache: Default::default(), @@ -619,7 +632,6 @@ impl<'tcx> InferCtxtBuilder<'tcx> { tainted_by_errors: Cell::new(None), err_count_on_creation: tcx.sess.err_count(), in_snapshot: Cell::new(false), - skip_leak_check: Cell::new(false), universe: Cell::new(ty::UniverseIndex::ROOT), intercrate, } @@ -815,32 +827,9 @@ impl<'tcx> InferCtxt<'tcx> { r } - /// If `should_skip` is true, then execute `f` then unroll any bindings it creates. - #[instrument(skip(self, f), level = "debug")] - pub fn probe_maybe_skip_leak_check<R, F>(&self, should_skip: bool, f: F) -> R - where - F: FnOnce(&CombinedSnapshot<'tcx>) -> R, - { - let snapshot = self.start_snapshot(); - let was_skip_leak_check = self.skip_leak_check.get(); - if should_skip { - self.skip_leak_check.set(true); - } - let r = f(&snapshot); - self.rollback_to("probe", snapshot); - self.skip_leak_check.set(was_skip_leak_check); - r - } - - /// Scan the constraints produced since `snapshot` began and returns: - /// - /// - `None` -- if none of them involves "region outlives" constraints. - /// - `Some(true)` -- if there are `'a: 'b` constraints where `'a` or `'b` is a placeholder. - /// - `Some(false)` -- if there are `'a: 'b` constraints but none involve placeholders. - pub fn region_constraints_added_in_snapshot( - &self, - snapshot: &CombinedSnapshot<'tcx>, - ) -> Option<bool> { + /// Scan the constraints produced since `snapshot` and check whether + /// we added any region constraints. + pub fn region_constraints_added_in_snapshot(&self, snapshot: &CombinedSnapshot<'tcx>) -> bool { self.inner .borrow_mut() .unwrap_region_constraints() @@ -1065,7 +1054,7 @@ impl<'tcx> InferCtxt<'tcx> { ) -> ty::Region<'tcx> { let region_var = self.inner.borrow_mut().unwrap_region_constraints().new_region_var(universe, origin); - self.tcx.mk_re_var(region_var) + ty::Region::new_var(self.tcx, region_var) } /// Return the universe that the region `r` was created in. For diff --git a/compiler/rustc_infer/src/infer/opaque_types.rs b/compiler/rustc_infer/src/infer/opaque_types.rs index 9d5ec228d82..105a3f08c82 100644 --- a/compiler/rustc_infer/src/infer/opaque_types.rs +++ b/compiler/rustc_infer/src/infer/opaque_types.rs @@ -533,17 +533,29 @@ impl<'tcx> InferCtxt<'tcx> { // these are the same span, but not in cases like `-> (impl // Foo, impl Bar)`. let span = cause.span; - let prev = self.inner.borrow_mut().opaque_types().register( - opaque_type_key, - OpaqueHiddenType { ty: hidden_ty, span }, - origin, - ); - let mut obligations = if let Some(prev) = prev { - self.at(&cause, param_env) - .eq_exp(DefineOpaqueTypes::Yes, a_is_expected, prev, hidden_ty)? - .obligations + let mut obligations = if self.intercrate { + // During intercrate we do not define opaque types but instead always + // force ambiguity unless the hidden type is known to not implement + // our trait. + vec![traits::Obligation::new( + self.tcx, + cause.clone(), + param_env, + ty::PredicateKind::Ambiguous, + )] } else { - Vec::new() + let prev = self.inner.borrow_mut().opaque_types().register( + opaque_type_key, + OpaqueHiddenType { ty: hidden_ty, span }, + origin, + ); + if let Some(prev) = prev { + self.at(&cause, param_env) + .eq_exp(DefineOpaqueTypes::Yes, a_is_expected, prev, hidden_ty)? + .obligations + } else { + Vec::new() + } }; self.add_item_bounds_for_hidden_type( diff --git a/compiler/rustc_infer/src/infer/outlives/verify.rs b/compiler/rustc_infer/src/infer/outlives/verify.rs index c2bf0f3db25..59ae2ce6c60 100644 --- a/compiler/rustc_infer/src/infer/outlives/verify.rs +++ b/compiler/rustc_infer/src/infer/outlives/verify.rs @@ -293,7 +293,7 @@ impl<'cx, 'tcx> VerifyBoundCx<'cx, 'tcx> { ) -> impl Iterator<Item = ty::Region<'tcx>> { let tcx = self.tcx; let bounds = tcx.item_bounds(alias_ty.def_id); - trace!("{:#?}", bounds.0); + trace!("{:#?}", bounds.skip_binder()); bounds .subst_iter(tcx, alias_ty.substs) .filter_map(|p| p.to_opt_type_outlives()) diff --git a/compiler/rustc_infer/src/infer/region_constraints/leak_check.rs b/compiler/rustc_infer/src/infer/region_constraints/leak_check.rs index 89cfc9ea3d1..dd65f66ccd1 100644 --- a/compiler/rustc_infer/src/infer/region_constraints/leak_check.rs +++ b/compiler/rustc_infer/src/infer/region_constraints/leak_check.rs @@ -3,7 +3,6 @@ use crate::infer::CombinedSnapshot; use rustc_data_structures::{ fx::FxIndexMap, graph::{scc::Sccs, vec_graph::VecGraph}, - undo_log::UndoLogs, }; use rustc_index::Idx; use rustc_middle::ty::error::TypeError; @@ -13,7 +12,9 @@ impl<'tcx> RegionConstraintCollector<'_, 'tcx> { /// Searches new universes created during `snapshot`, looking for /// placeholders that may "leak" out from the universes they are contained /// in. If any leaking placeholders are found, then an `Err` is returned - /// (typically leading to the snapshot being reversed). + /// (typically leading to the snapshot being reversed). This algorithm + /// only looks at placeholders which cannot be named by `outer_universe`, + /// as this is the universe we're currently checking for a leak. /// /// The leak check *used* to be the only way we had to handle higher-ranked /// obligations. Now that we have integrated universes into the region @@ -55,6 +56,12 @@ impl<'tcx> RegionConstraintCollector<'_, 'tcx> { /// * if they must also be equal to a placeholder P, and U cannot name P, report an error, as that /// indicates `P: R` and `R` is in an incompatible universe /// + /// To improve performance and for the old trait solver caching to be sound, this takes + /// an optional snapshot in which case we only look at region constraints added in that + /// snapshot. If we were to not do that the `leak_check` during evaluation can rely on + /// region constraints added outside of that evaluation. As that is not reflected in the + /// cache key this would be unsound. + /// /// # Historical note /// /// Older variants of the leak check used to report errors for these @@ -62,36 +69,21 @@ impl<'tcx> RegionConstraintCollector<'_, 'tcx> { /// /// * R: P1, even if R cannot name P1, because R = 'static is a valid sol'n /// * R: P1, R: P2, as above + #[instrument(level = "debug", skip(self, tcx, only_consider_snapshot), ret)] pub fn leak_check( &mut self, tcx: TyCtxt<'tcx>, - overly_polymorphic: bool, + outer_universe: ty::UniverseIndex, max_universe: ty::UniverseIndex, - snapshot: &CombinedSnapshot<'tcx>, + only_consider_snapshot: Option<&CombinedSnapshot<'tcx>>, ) -> RelateResult<'tcx, ()> { - debug!( - "leak_check(max_universe={:?}, snapshot.universe={:?}, overly_polymorphic={:?})", - max_universe, snapshot.universe, overly_polymorphic - ); - - assert!(UndoLogs::<super::UndoLog<'_>>::in_snapshot(&self.undo_log)); - - let universe_at_start_of_snapshot = snapshot.universe; - if universe_at_start_of_snapshot == max_universe { + if outer_universe == max_universe { return Ok(()); } - let mini_graph = - &MiniGraph::new(tcx, self.undo_log.region_constraints(), &self.storage.data.verifys); + let mini_graph = &MiniGraph::new(tcx, &self, only_consider_snapshot); - let mut leak_check = LeakCheck::new( - tcx, - universe_at_start_of_snapshot, - max_universe, - overly_polymorphic, - mini_graph, - self, - ); + let mut leak_check = LeakCheck::new(tcx, outer_universe, max_universe, mini_graph, self); leak_check.assign_placeholder_values()?; leak_check.propagate_scc_value()?; Ok(()) @@ -100,9 +92,7 @@ impl<'tcx> RegionConstraintCollector<'_, 'tcx> { struct LeakCheck<'me, 'tcx> { tcx: TyCtxt<'tcx>, - universe_at_start_of_snapshot: ty::UniverseIndex, - /// Only used when reporting region errors. - overly_polymorphic: bool, + outer_universe: ty::UniverseIndex, mini_graph: &'me MiniGraph<'tcx>, rcc: &'me RegionConstraintCollector<'me, 'tcx>, @@ -130,17 +120,15 @@ struct LeakCheck<'me, 'tcx> { impl<'me, 'tcx> LeakCheck<'me, 'tcx> { fn new( tcx: TyCtxt<'tcx>, - universe_at_start_of_snapshot: ty::UniverseIndex, + outer_universe: ty::UniverseIndex, max_universe: ty::UniverseIndex, - overly_polymorphic: bool, mini_graph: &'me MiniGraph<'tcx>, rcc: &'me RegionConstraintCollector<'me, 'tcx>, ) -> Self { let dummy_scc_universe = SccUniverse { universe: max_universe, region: None }; Self { tcx, - universe_at_start_of_snapshot, - overly_polymorphic, + outer_universe, mini_graph, rcc, scc_placeholders: IndexVec::from_elem_n(None, mini_graph.sccs.num_sccs()), @@ -165,7 +153,7 @@ impl<'me, 'tcx> LeakCheck<'me, 'tcx> { // Detect those SCCs that directly contain a placeholder if let ty::RePlaceholder(placeholder) = **region { - if self.universe_at_start_of_snapshot.cannot_name(placeholder.universe) { + if self.outer_universe.cannot_name(placeholder.universe) { self.assign_scc_value(scc, placeholder)?; } } @@ -280,7 +268,7 @@ impl<'me, 'tcx> LeakCheck<'me, 'tcx> { placeholder1: ty::PlaceholderRegion, placeholder2: ty::PlaceholderRegion, ) -> TypeError<'tcx> { - self.error(placeholder1, self.tcx.mk_re_placeholder(placeholder2)) + self.error(placeholder1, ty::Region::new_placeholder(self.tcx, placeholder2)) } fn error( @@ -289,11 +277,7 @@ impl<'me, 'tcx> LeakCheck<'me, 'tcx> { other_region: ty::Region<'tcx>, ) -> TypeError<'tcx> { debug!("error: placeholder={:?}, other_region={:?}", placeholder, other_region); - if self.overly_polymorphic { - TypeError::RegionsOverlyPolymorphic(placeholder.bound.kind, other_region) - } else { - TypeError::RegionsInsufficientlyPolymorphic(placeholder.bound.kind, other_region) - } + TypeError::RegionsInsufficientlyPolymorphic(placeholder.bound.kind, other_region) } } @@ -379,56 +363,70 @@ struct MiniGraph<'tcx> { } impl<'tcx> MiniGraph<'tcx> { - fn new<'a>( + fn new( tcx: TyCtxt<'tcx>, - undo_log: impl Iterator<Item = &'a UndoLog<'tcx>>, - verifys: &[Verify<'tcx>], - ) -> Self - where - 'tcx: 'a, - { + region_constraints: &RegionConstraintCollector<'_, 'tcx>, + only_consider_snapshot: Option<&CombinedSnapshot<'tcx>>, + ) -> Self { let mut nodes = FxIndexMap::default(); let mut edges = Vec::new(); // Note that if `R2: R1`, we get a callback `r1, r2`, so `target` is first parameter. - Self::iterate_undo_log(tcx, undo_log, verifys, |target, source| { - let source_node = Self::add_node(&mut nodes, source); - let target_node = Self::add_node(&mut nodes, target); - edges.push((source_node, target_node)); - }); + Self::iterate_region_constraints( + tcx, + region_constraints, + only_consider_snapshot, + |target, source| { + let source_node = Self::add_node(&mut nodes, source); + let target_node = Self::add_node(&mut nodes, target); + edges.push((source_node, target_node)); + }, + ); let graph = VecGraph::new(nodes.len(), edges); let sccs = Sccs::new(&graph); Self { nodes, sccs } } /// Invokes `each_edge(R1, R2)` for each edge where `R2: R1` - fn iterate_undo_log<'a>( + fn iterate_region_constraints( tcx: TyCtxt<'tcx>, - undo_log: impl Iterator<Item = &'a UndoLog<'tcx>>, - verifys: &[Verify<'tcx>], + region_constraints: &RegionConstraintCollector<'_, 'tcx>, + only_consider_snapshot: Option<&CombinedSnapshot<'tcx>>, mut each_edge: impl FnMut(ty::Region<'tcx>, ty::Region<'tcx>), - ) where - 'tcx: 'a, - { - for undo_entry in undo_log { - match undo_entry { - &AddConstraint(Constraint::VarSubVar(a, b)) => { - each_edge(tcx.mk_re_var(a), tcx.mk_re_var(b)); - } - &AddConstraint(Constraint::RegSubVar(a, b)) => { - each_edge(a, tcx.mk_re_var(b)); - } - &AddConstraint(Constraint::VarSubReg(a, b)) => { - each_edge(tcx.mk_re_var(a), b); - } - &AddConstraint(Constraint::RegSubReg(a, b)) => { - each_edge(a, b); + ) { + let mut each_constraint = |constraint| match constraint { + &Constraint::VarSubVar(a, b) => { + each_edge(ty::Region::new_var(tcx, a), ty::Region::new_var(tcx, b)); + } + &Constraint::RegSubVar(a, b) => { + each_edge(a, ty::Region::new_var(tcx, b)); + } + &Constraint::VarSubReg(a, b) => { + each_edge(ty::Region::new_var(tcx, a), b); + } + &Constraint::RegSubReg(a, b) => { + each_edge(a, b); + } + }; + + if let Some(snapshot) = only_consider_snapshot { + for undo_entry in + region_constraints.undo_log.region_constraints_in_snapshot(&snapshot.undo_snapshot) + { + match undo_entry { + AddConstraint(constraint) => { + each_constraint(constraint); + } + &AddVerify(i) => span_bug!( + region_constraints.data().verifys[i].origin.span(), + "we never add verifications while doing higher-ranked things", + ), + &AddCombination(..) | &AddVar(..) => {} } - &AddVerify(i) => span_bug!( - verifys[i].origin.span(), - "we never add verifications while doing higher-ranked things", - ), - &AddCombination(..) | &AddVar(..) => {} + } + } else { + for (constraint, _origin) in ®ion_constraints.data().constraints { + each_constraint(constraint) } } } diff --git a/compiler/rustc_infer/src/infer/region_constraints/mod.rs b/compiler/rustc_infer/src/infer/region_constraints/mod.rs index c7a307b89e4..613da8a0b45 100644 --- a/compiler/rustc_infer/src/infer/region_constraints/mod.rs +++ b/compiler/rustc_infer/src/infer/region_constraints/mod.rs @@ -400,7 +400,7 @@ impl<'tcx> RegionConstraintCollector<'_, 'tcx> { data } - pub(super) fn data(&self) -> &RegionConstraintData<'tcx> { + pub fn data(&self) -> &RegionConstraintData<'tcx> { &self.data } @@ -610,13 +610,13 @@ impl<'tcx> RegionConstraintCollector<'_, 'tcx> { let resolved = ut .probe_value(root_vid) .get_value_ignoring_universes() - .unwrap_or_else(|| tcx.mk_re_var(root_vid)); + .unwrap_or_else(|| ty::Region::new_var(tcx, root_vid)); // Don't resolve a variable to a region that it cannot name. if self.var_universe(vid).can_name(self.universe(resolved)) { resolved } else { - tcx.mk_re_var(vid) + ty::Region::new_var(tcx, vid) } } @@ -637,7 +637,7 @@ impl<'tcx> RegionConstraintCollector<'_, 'tcx> { ) -> Region<'tcx> { let vars = TwoRegions { a, b }; if let Some(&c) = self.combine_map(t).get(&vars) { - return tcx.mk_re_var(c); + return ty::Region::new_var(tcx, c); } let a_universe = self.universe(a); let b_universe = self.universe(b); @@ -645,7 +645,7 @@ impl<'tcx> RegionConstraintCollector<'_, 'tcx> { let c = self.new_region_var(c_universe, MiscVariable(origin.span())); self.combine_map(t).insert(vars, c); self.undo_log.push(AddCombination(t, vars)); - let new_r = tcx.mk_re_var(c); + let new_r = ty::Region::new_var(tcx, c); for old_r in [a, b] { match t { Glb => self.make_subregion(origin.clone(), new_r, old_r), @@ -683,15 +683,10 @@ impl<'tcx> RegionConstraintCollector<'_, 'tcx> { } /// See `InferCtxt::region_constraints_added_in_snapshot`. - pub fn region_constraints_added_in_snapshot(&self, mark: &Snapshot<'tcx>) -> Option<bool> { + pub fn region_constraints_added_in_snapshot(&self, mark: &Snapshot<'tcx>) -> bool { self.undo_log .region_constraints_in_snapshot(mark) - .map(|&elt| match elt { - AddConstraint(constraint) => Some(constraint.involves_placeholders()), - _ => None, - }) - .max() - .unwrap_or(None) + .any(|&elt| matches!(elt, AddConstraint(_))) } #[inline] diff --git a/compiler/rustc_infer/src/infer/undo_log.rs b/compiler/rustc_infer/src/infer/undo_log.rs index 955c54e8515..25d06b21ec8 100644 --- a/compiler/rustc_infer/src/infer/undo_log.rs +++ b/compiler/rustc_infer/src/infer/undo_log.rs @@ -138,11 +138,9 @@ impl<'tcx> InferCtxtInner<'tcx> { } if self.undo_log.num_open_snapshots == 1 { - // The root snapshot. It's safe to clear the undo log because - // there's no snapshot further out that we might need to roll back - // to. + // After the root snapshot the undo log should be empty. assert!(snapshot.undo_len == 0); - self.undo_log.logs.clear(); + assert!(self.undo_log.logs.is_empty()); } self.undo_log.num_open_snapshots -= 1; @@ -183,15 +181,6 @@ impl<'tcx> InferCtxtUndoLogs<'tcx> { self.logs[s.undo_len..].iter().any(|log| matches!(log, UndoLog::OpaqueTypes(..))) } - pub(crate) fn region_constraints( - &self, - ) -> impl Iterator<Item = &'_ region_constraints::UndoLog<'tcx>> + Clone { - self.logs.iter().filter_map(|log| match log { - UndoLog::RegionConstraintCollector(log) => Some(log), - _ => None, - }) - } - fn assert_open_snapshot(&self, snapshot: &Snapshot<'tcx>) { // Failures here may indicate a failure to follow a stack discipline. assert!(self.logs.len() >= snapshot.undo_len); diff --git a/compiler/rustc_lint/messages.ftl b/compiler/rustc_lint/messages.ftl index d34a3afcba5..98fe3821947 100644 --- a/compiler/rustc_lint/messages.ftl +++ b/compiler/rustc_lint/messages.ftl @@ -155,6 +155,8 @@ lint_builtin_unused_doc_comment = unused doc comment lint_builtin_while_true = denote infinite loops with `loop {"{"} ... {"}"}` .suggestion = use `loop` +lint_cast_ref_to_mut = casting `&T` to `&mut T` is undefined behavior, even if the reference is unused, consider instead using an `UnsafeCell` + lint_check_name_deprecated = lint name `{$lint_name}` is deprecated and does not have an effect anymore. Use: {$new_name} lint_check_name_unknown = unknown lint: `{$lint_name}` @@ -304,6 +306,14 @@ lint_improper_ctypes_union_layout_help = consider adding a `#[repr(C)]` or `#[re lint_improper_ctypes_union_layout_reason = this union has unspecified layout lint_improper_ctypes_union_non_exhaustive = this union is non-exhaustive +# FIXME: we should ordinalize $valid_up_to when we add support for doing so +lint_invalid_from_utf8_checked = calls to `{$method}` with a invalid literal always return an error + .label = the literal was valid UTF-8 up to the {$valid_up_to} bytes + +# FIXME: we should ordinalize $valid_up_to when we add support for doing so +lint_invalid_from_utf8_unchecked = calls to `{$method}` with a invalid literal are undefined behavior + .label = the literal was valid UTF-8 up to the {$valid_up_to} bytes + lint_lintpass_by_hand = implementing `LintPass` by hand .help = try using `declare_lint_pass!` or `impl_lint_pass!` instead diff --git a/compiler/rustc_lint/src/cast_ref_to_mut.rs b/compiler/rustc_lint/src/cast_ref_to_mut.rs new file mode 100644 index 00000000000..84308d48c10 --- /dev/null +++ b/compiler/rustc_lint/src/cast_ref_to_mut.rs @@ -0,0 +1,72 @@ +use rustc_ast::Mutability; +use rustc_hir::{Expr, ExprKind, MutTy, TyKind, UnOp}; +use rustc_middle::ty; +use rustc_span::sym; + +use crate::{lints::CastRefToMutDiag, LateContext, LateLintPass, LintContext}; + +declare_lint! { + /// The `cast_ref_to_mut` lint checks for casts of `&T` to `&mut T` + /// without using interior mutability. + /// + /// ### Example + /// + /// ```rust,compile_fail + /// fn x(r: &i32) { + /// unsafe { + /// *(r as *const i32 as *mut i32) += 1; + /// } + /// } + /// ``` + /// + /// {{produces}} + /// + /// ### Explanation + /// + /// Casting `&T` to `&mut T` without using interior mutability is undefined behavior, + /// as it's a violation of Rust reference aliasing requirements. + /// + /// `UnsafeCell` is the only way to obtain aliasable data that is considered + /// mutable. + CAST_REF_TO_MUT, + Deny, + "casts of `&T` to `&mut T` without interior mutability" +} + +declare_lint_pass!(CastRefToMut => [CAST_REF_TO_MUT]); + +impl<'tcx> LateLintPass<'tcx> for CastRefToMut { + fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'tcx>) { + let ExprKind::Unary(UnOp::Deref, e) = &expr.kind else { return; }; + + let e = e.peel_blocks(); + let e = if let ExprKind::Cast(e, t) = e.kind + && let TyKind::Ptr(MutTy { mutbl: Mutability::Mut, .. }) = t.kind { + e + } else if let ExprKind::MethodCall(_, expr, [], _) = e.kind + && let Some(def_id) = cx.typeck_results().type_dependent_def_id(e.hir_id) + && cx.tcx.is_diagnostic_item(sym::ptr_cast_mut, def_id) { + expr + } else { + return; + }; + + let e = e.peel_blocks(); + let e = if let ExprKind::Cast(e, t) = e.kind + && let TyKind::Ptr(MutTy { mutbl: Mutability::Not, .. }) = t.kind { + e + } else if let ExprKind::Call(path, [arg]) = e.kind + && let ExprKind::Path(ref qpath) = path.kind + && let Some(def_id) = cx.qpath_res(qpath, path.hir_id).opt_def_id() + && cx.tcx.is_diagnostic_item(sym::ptr_from_ref, def_id) { + arg + } else { + return; + }; + + let e = e.peel_blocks(); + if let ty::Ref(..) = cx.typeck_results().node_type(e.hir_id).kind() { + cx.emit_spanned_lint(CAST_REF_TO_MUT, expr.span, CastRefToMutDiag); + } + } +} diff --git a/compiler/rustc_lint/src/context.rs b/compiler/rustc_lint/src/context.rs index 1d0c43e95e0..947530a1b65 100644 --- a/compiler/rustc_lint/src/context.rs +++ b/compiler/rustc_lint/src/context.rs @@ -952,6 +952,10 @@ pub trait LintContext: Sized { db.span_label(first_reexport_span, format!("the name `{}` in the {} namespace is first re-exported here", name, namespace)); db.span_label(duplicate_reexport_span, format!("but the name `{}` in the {} namespace is also re-exported here", name, namespace)); } + BuiltinLintDiagnostics::HiddenGlobReexports { name, namespace, glob_reexport_span, private_item_span } => { + db.span_label(glob_reexport_span, format!("the name `{}` in the {} namespace is supposed to be publicly re-exported here", name, namespace)); + db.span_label(private_item_span, "but the private item here shadows it"); + } } // Rewrap `db`, and pass control to the user. decorate(db) diff --git a/compiler/rustc_lint/src/errors.rs b/compiler/rustc_lint/src/errors.rs index bbae3d368f4..68167487a1b 100644 --- a/compiler/rustc_lint/src/errors.rs +++ b/compiler/rustc_lint/src/errors.rs @@ -39,7 +39,7 @@ impl AddToDiagnostic for OverruledAttributeSub { diag.span_label(span, fluent::lint_node_source); if let Some(rationale) = reason { #[allow(rustc::untranslatable_diagnostic)] - diag.note(rationale.as_str()); + diag.note(rationale.to_string()); } } OverruledAttributeSub::CommandLineSource => { diff --git a/compiler/rustc_lint/src/invalid_from_utf8.rs b/compiler/rustc_lint/src/invalid_from_utf8.rs new file mode 100644 index 00000000000..3291286ad67 --- /dev/null +++ b/compiler/rustc_lint/src/invalid_from_utf8.rs @@ -0,0 +1,118 @@ +use std::str::Utf8Error; + +use rustc_ast::{BorrowKind, LitKind}; +use rustc_hir::{Expr, ExprKind}; +use rustc_span::source_map::Spanned; +use rustc_span::sym; + +use crate::lints::InvalidFromUtf8Diag; +use crate::{LateContext, LateLintPass, LintContext}; + +declare_lint! { + /// The `invalid_from_utf8_unchecked` lint checks for calls to + /// `std::str::from_utf8_unchecked` and `std::str::from_utf8_unchecked_mut` + /// with an invalid UTF-8 literal. + /// + /// ### Example + /// + /// ```rust,compile_fail + /// # #[allow(unused)] + /// unsafe { + /// std::str::from_utf8_unchecked(b"Ru\x82st"); + /// } + /// ``` + /// + /// {{produces}} + /// + /// ### Explanation + /// + /// Creating such a `str` would result in undefined behavior as per documentation + /// for `std::str::from_utf8_unchecked` and `std::str::from_utf8_unchecked_mut`. + pub INVALID_FROM_UTF8_UNCHECKED, + Deny, + "using a non UTF-8 literal in `std::str::from_utf8_unchecked`" +} + +declare_lint! { + /// The `invalid_from_utf8` lint checks for calls to + /// `std::str::from_utf8` and `std::str::from_utf8_mut` + /// with an invalid UTF-8 literal. + /// + /// ### Example + /// + /// ```rust + /// # #[allow(unused)] + /// std::str::from_utf8(b"Ru\x82st"); + /// ``` + /// + /// {{produces}} + /// + /// ### Explanation + /// + /// Trying to create such a `str` would always return an error as per documentation + /// for `std::str::from_utf8` and `std::str::from_utf8_mut`. + pub INVALID_FROM_UTF8, + Warn, + "using a non UTF-8 literal in `std::str::from_utf8`" +} + +declare_lint_pass!(InvalidFromUtf8 => [INVALID_FROM_UTF8_UNCHECKED, INVALID_FROM_UTF8]); + +impl<'tcx> LateLintPass<'tcx> for InvalidFromUtf8 { + fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'tcx>) { + if let ExprKind::Call(path, [arg]) = expr.kind + && let ExprKind::Path(ref qpath) = path.kind + && let Some(def_id) = cx.qpath_res(qpath, path.hir_id).opt_def_id() + && let Some(diag_item) = cx.tcx.get_diagnostic_name(def_id) + && [sym::str_from_utf8, sym::str_from_utf8_mut, + sym::str_from_utf8_unchecked, sym::str_from_utf8_unchecked_mut].contains(&diag_item) + { + let lint = |utf8_error: Utf8Error| { + let label = arg.span; + let method = diag_item.as_str().strip_prefix("str_").unwrap(); + let method = format!("std::str::{method}"); + let valid_up_to = utf8_error.valid_up_to(); + let is_unchecked_variant = diag_item.as_str().contains("unchecked"); + + cx.emit_spanned_lint( + if is_unchecked_variant { INVALID_FROM_UTF8_UNCHECKED } else { INVALID_FROM_UTF8 }, + expr.span, + if is_unchecked_variant { + InvalidFromUtf8Diag::Unchecked { method, valid_up_to, label } + } else { + InvalidFromUtf8Diag::Checked { method, valid_up_to, label } + } + ) + }; + + match &arg.kind { + ExprKind::Lit(Spanned { node: lit, .. }) => { + if let LitKind::ByteStr(bytes, _) = &lit + && let Err(utf8_error) = std::str::from_utf8(bytes) + { + lint(utf8_error); + } + }, + ExprKind::AddrOf(BorrowKind::Ref, _, Expr { kind: ExprKind::Array(args), .. }) => { + let elements = args.iter().map(|e|{ + match &e.kind { + ExprKind::Lit(Spanned { node: lit, .. }) => match lit { + LitKind::Byte(b) => Some(*b), + LitKind::Int(b, _) => Some(*b as u8), + _ => None + } + _ => None + } + }).collect::<Option<Vec<_>>>(); + + if let Some(elements) = elements + && let Err(utf8_error) = std::str::from_utf8(&elements) + { + lint(utf8_error); + } + } + _ => {} + } + } + } +} diff --git a/compiler/rustc_lint/src/levels.rs b/compiler/rustc_lint/src/levels.rs index b92ed11f38a..8376835f52c 100644 --- a/compiler/rustc_lint/src/levels.rs +++ b/compiler/rustc_lint/src/levels.rs @@ -242,7 +242,9 @@ impl LintLevelsProvider for LintLevelQueryMap<'_> { struct QueryMapExpectationsWrapper<'tcx> { tcx: TyCtxt<'tcx>, + /// HirId of the currently investigated element. cur: HirId, + /// Level map for `cur`. specs: ShallowLintLevelMap, expectations: Vec<(LintExpectationId, LintExpectation)>, unstable_to_stable_ids: FxHashMap<LintExpectationId, LintExpectationId>, @@ -255,11 +257,11 @@ impl LintLevelsProvider for QueryMapExpectationsWrapper<'_> { self.specs.specs.get(&self.cur.local_id).unwrap_or(&self.empty) } fn insert(&mut self, id: LintId, lvl: LevelAndSource) { - let specs = self.specs.specs.get_mut_or_insert_default(self.cur.local_id); - specs.clear(); - specs.insert(id, lvl); + self.specs.specs.get_mut_or_insert_default(self.cur.local_id).insert(id, lvl); } fn get_lint_level(&self, lint: &'static Lint, _: &Session) -> LevelAndSource { + // We cannot use `tcx.lint_level_at_node` because we want to know in which order the + // attributes have been inserted, in particular whether an `expect` follows a `forbid`. self.specs.lint_level_id_at_node(self.tcx, LintId::of(lint), self.cur) } fn push_expectation(&mut self, id: LintExpectationId, expectation: LintExpectation) { @@ -355,7 +357,9 @@ impl<'tcx> Visitor<'tcx> for LintLevelsBuilder<'_, LintLevelQueryMap<'tcx>> { impl<'tcx> LintLevelsBuilder<'_, QueryMapExpectationsWrapper<'tcx>> { fn add_id(&mut self, hir_id: HirId) { + // Change both the `HirId` and the associated specs. self.provider.cur = hir_id; + self.provider.specs.specs.clear(); self.add(self.provider.tcx.hir().attrs(hir_id), hir_id == hir::CRATE_HIR_ID, Some(hir_id)); } } diff --git a/compiler/rustc_lint/src/lib.rs b/compiler/rustc_lint/src/lib.rs index dfddfe09ab3..5e3f057d428 100644 --- a/compiler/rustc_lint/src/lib.rs +++ b/compiler/rustc_lint/src/lib.rs @@ -50,6 +50,7 @@ extern crate tracing; mod array_into_iter; pub mod builtin; +mod cast_ref_to_mut; mod context; mod deref_into_dyn_supertrait; mod drop_forget_useless; @@ -60,6 +61,7 @@ mod expect; mod for_loops_over_fallibles; pub mod hidden_unicode_codepoints; mod internal; +mod invalid_from_utf8; mod late; mod let_underscore; mod levels; @@ -96,12 +98,14 @@ use rustc_span::Span; use array_into_iter::ArrayIntoIter; use builtin::*; +use cast_ref_to_mut::*; use deref_into_dyn_supertrait::*; use drop_forget_useless::*; use enum_intrinsics_non_enums::EnumIntrinsicsNonEnums; use for_loops_over_fallibles::*; use hidden_unicode_codepoints::*; use internal::*; +use invalid_from_utf8::*; use let_underscore::*; use map_unit_fn::*; use methods::*; @@ -207,10 +211,12 @@ late_lint_methods!( HardwiredLints: HardwiredLints, ImproperCTypesDeclarations: ImproperCTypesDeclarations, ImproperCTypesDefinitions: ImproperCTypesDefinitions, + InvalidFromUtf8: InvalidFromUtf8, VariantSizeDifferences: VariantSizeDifferences, BoxPointers: BoxPointers, PathStatements: PathStatements, LetUnderscore: LetUnderscore, + CastRefToMut: CastRefToMut, // Depends on referenced function signatures in expressions UnusedResults: UnusedResults, NonUpperCaseGlobals: NonUpperCaseGlobals, diff --git a/compiler/rustc_lint/src/lints.rs b/compiler/rustc_lint/src/lints.rs index de1c2be2875..fd15f795202 100644 --- a/compiler/rustc_lint/src/lints.rs +++ b/compiler/rustc_lint/src/lints.rs @@ -699,6 +699,30 @@ pub struct ForgetCopyDiag<'a> { pub label: Span, } +// invalid_from_utf8.rs +#[derive(LintDiagnostic)] +pub enum InvalidFromUtf8Diag { + #[diag(lint_invalid_from_utf8_unchecked)] + Unchecked { + method: String, + valid_up_to: usize, + #[label] + label: Span, + }, + #[diag(lint_invalid_from_utf8_checked)] + Checked { + method: String, + valid_up_to: usize, + #[label] + label: Span, + }, +} + +// cast_ref_to_mut.rs +#[derive(LintDiagnostic)] +#[diag(lint_cast_ref_to_mut)] +pub struct CastRefToMutDiag; + // hidden_unicode_codepoints.rs #[derive(LintDiagnostic)] #[diag(lint_hidden_unicode_codepoints)] @@ -1527,7 +1551,7 @@ impl<'a> DecorateLint<'a, ()> for UnusedDef<'_, '_> { diag.set_arg("def", self.cx.tcx.def_path_str(self.def_id)); // check for #[must_use = "..."] if let Some(note) = self.note { - diag.note(note.as_str()); + diag.note(note.to_string()); } if let Some(sugg) = self.suggestion { diag.subdiagnostic(sugg); diff --git a/compiler/rustc_lint_defs/src/builtin.rs b/compiler/rustc_lint_defs/src/builtin.rs index 6e9dc880a7d..1507087bdd4 100644 --- a/compiler/rustc_lint_defs/src/builtin.rs +++ b/compiler/rustc_lint_defs/src/builtin.rs @@ -3272,6 +3272,43 @@ declare_lint! { "ambiguous glob re-exports", } +declare_lint! { + /// The `hidden_glob_reexports` lint detects cases where glob re-export items are shadowed by + /// private items. + /// + /// ### Example + /// + /// ```rust,compile_fail + /// #![deny(hidden_glob_reexports)] + /// + /// pub mod upstream { + /// mod inner { pub struct Foo {}; pub struct Bar {}; } + /// pub use self::inner::*; + /// struct Foo {} // private item shadows `inner::Foo` + /// } + /// + /// // mod downstream { + /// // fn test() { + /// // let _ = crate::upstream::Foo; // inaccessible + /// // } + /// // } + /// + /// pub fn main() {} + /// ``` + /// + /// {{produces}} + /// + /// ### Explanation + /// + /// This was previously accepted without any errors or warnings but it could silently break a + /// crate's downstream user code. If the `struct Foo` was added, `dep::inner::Foo` would + /// silently become inaccessible and trigger a "`struct `Foo` is private`" visibility error at + /// the downstream use site. + pub HIDDEN_GLOB_REEXPORTS, + Warn, + "name introduced by a private item shadows a name introduced by a public glob re-export", +} + declare_lint_pass! { /// Does nothing as a lint pass, but registers some `Lint`s /// that are used by other parts of the compiler. @@ -3304,6 +3341,7 @@ declare_lint_pass! { FORBIDDEN_LINT_GROUPS, FUNCTION_ITEM_REFERENCES, FUZZY_PROVENANCE_CASTS, + HIDDEN_GLOB_REEXPORTS, ILL_FORMED_ATTRIBUTE_INPUT, ILLEGAL_FLOATING_POINT_LITERAL_PATTERN, IMPLIED_BOUNDS_ENTAILMENT, diff --git a/compiler/rustc_lint_defs/src/lib.rs b/compiler/rustc_lint_defs/src/lib.rs index e27e322db88..5a5031b7919 100644 --- a/compiler/rustc_lint_defs/src/lib.rs +++ b/compiler/rustc_lint_defs/src/lib.rs @@ -540,6 +540,16 @@ pub enum BuiltinLintDiagnostics { /// Span where the same name is also re-exported. duplicate_reexport_span: Span, }, + HiddenGlobReexports { + /// The name of the local binding which shadows the glob re-export. + name: String, + /// The namespace for which the shadowing occurred in. + namespace: String, + /// The glob reexport that is shadowed by the local binding. + glob_reexport_span: Span, + /// The local binding that shadows the glob reexport. + private_item_span: Span, + }, } /// Lints that are buffered up early on in the `Session` before the diff --git a/compiler/rustc_llvm/llvm-wrapper/LLVMWrapper.h b/compiler/rustc_llvm/llvm-wrapper/LLVMWrapper.h index 05890628378..af6f4d5eaf9 100644 --- a/compiler/rustc_llvm/llvm-wrapper/LLVMWrapper.h +++ b/compiler/rustc_llvm/llvm-wrapper/LLVMWrapper.h @@ -96,6 +96,7 @@ enum LLVMRustAttribute { AllocatedPointer = 38, AllocAlign = 39, #endif + SanitizeSafeStack = 40, }; typedef struct OpaqueRustString *RustStringRef; diff --git a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp index 49acd71b3e1..ea04899ab68 100644 --- a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp +++ b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp @@ -234,6 +234,8 @@ static Attribute::AttrKind fromRust(LLVMRustAttribute Kind) { case AllocAlign: return Attribute::AllocAlign; #endif + case SanitizeSafeStack: + return Attribute::SafeStack; } report_fatal_error("bad AttributeKind"); } diff --git a/compiler/rustc_metadata/src/creader.rs b/compiler/rustc_metadata/src/creader.rs index aaf72ab94e7..b3976d756eb 100644 --- a/compiler/rustc_metadata/src/creader.rs +++ b/compiler/rustc_metadata/src/creader.rs @@ -365,6 +365,7 @@ impl<'a, 'tcx> CrateLoader<'a, 'tcx> { lib: Library, dep_kind: CrateDepKind, name: Symbol, + private_dep: Option<bool>, ) -> Result<CrateNum, CrateError> { let _prof_timer = self.sess.prof.generic_activity("metadata_register_crate"); @@ -372,8 +373,13 @@ impl<'a, 'tcx> CrateLoader<'a, 'tcx> { let crate_root = metadata.get_root(); let host_hash = host_lib.as_ref().map(|lib| lib.metadata.get_root().hash()); - let private_dep = - self.sess.opts.externs.get(name.as_str()).is_some_and(|e| e.is_private_dep); + let private_dep = self + .sess + .opts + .externs + .get(name.as_str()) + .map_or(private_dep.unwrap_or(false), |e| e.is_private_dep) + && private_dep.unwrap_or(true); // Claim this crate number and cache it let cnum = self.cstore.intern_stable_crate_id(&crate_root)?; @@ -518,15 +524,16 @@ impl<'a, 'tcx> CrateLoader<'a, 'tcx> { if !name.as_str().is_ascii() { return Err(CrateError::NonAsciiName(name)); } - let (root, hash, host_hash, extra_filename, path_kind) = match dep { + let (root, hash, host_hash, extra_filename, path_kind, private_dep) = match dep { Some((root, dep)) => ( Some(root), Some(dep.hash), dep.host_hash, Some(&dep.extra_filename[..]), PathKind::Dependency, + Some(dep.is_private), ), - None => (None, None, None, None, PathKind::Crate), + None => (None, None, None, None, PathKind::Crate, None), }; let result = if let Some(cnum) = self.existing_match(name, hash, path_kind) { (LoadResult::Previous(cnum), None) @@ -562,10 +569,13 @@ impl<'a, 'tcx> CrateLoader<'a, 'tcx> { dep_kind = CrateDepKind::MacrosOnly; } data.update_dep_kind(|data_dep_kind| cmp::max(data_dep_kind, dep_kind)); + if let Some(private_dep) = private_dep { + data.update_and_private_dep(private_dep); + } Ok(cnum) } (LoadResult::Loaded(library), host_library) => { - self.register_crate(host_library, root, library, dep_kind, name) + self.register_crate(host_library, root, library, dep_kind, name, private_dep) } _ => panic!(), } diff --git a/compiler/rustc_metadata/src/locator.rs b/compiler/rustc_metadata/src/locator.rs index ceb348f3469..a89d7b464e2 100644 --- a/compiler/rustc_metadata/src/locator.rs +++ b/compiler/rustc_metadata/src/locator.rs @@ -666,31 +666,30 @@ impl<'a> CrateLocator<'a> { return None; } - let root = metadata.get_root(); - if root.is_proc_macro_crate() != self.is_proc_macro { + let header = metadata.get_header(); + if header.is_proc_macro_crate != self.is_proc_macro { info!( "Rejecting via proc macro: expected {} got {}", - self.is_proc_macro, - root.is_proc_macro_crate(), + self.is_proc_macro, header.is_proc_macro_crate, ); return None; } - if self.exact_paths.is_empty() && self.crate_name != root.name() { + if self.exact_paths.is_empty() && self.crate_name != header.name { info!("Rejecting via crate name"); return None; } - if root.triple() != &self.triple { - info!("Rejecting via crate triple: expected {} got {}", self.triple, root.triple()); + if header.triple != self.triple { + info!("Rejecting via crate triple: expected {} got {}", self.triple, header.triple); self.crate_rejections.via_triple.push(CrateMismatch { path: libpath.to_path_buf(), - got: root.triple().to_string(), + got: header.triple.to_string(), }); return None; } - let hash = root.hash(); + let hash = header.hash; if let Some(expected_hash) = self.hash { if hash != expected_hash { info!("Rejecting via hash: expected {} got {}", expected_hash, hash); diff --git a/compiler/rustc_metadata/src/rmeta/decoder.rs b/compiler/rustc_metadata/src/rmeta/decoder.rs index cc4e60cf6ac..8f883bdcf12 100644 --- a/compiler/rustc_metadata/src/rmeta/decoder.rs +++ b/compiler/rustc_metadata/src/rmeta/decoder.rs @@ -9,7 +9,7 @@ use rustc_data_structures::captures::Captures; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::owned_slice::OwnedSlice; use rustc_data_structures::svh::Svh; -use rustc_data_structures::sync::{AppendOnlyVec, Lock, Lrc, OnceCell}; +use rustc_data_structures::sync::{AppendOnlyVec, AtomicBool, Lock, Lrc, OnceCell}; use rustc_data_structures::unhash::UnhashMap; use rustc_expand::base::{SyntaxExtension, SyntaxExtensionKind}; use rustc_expand::proc_macro::{AttrProcMacro, BangProcMacro, DeriveProcMacro}; @@ -40,6 +40,7 @@ use proc_macro::bridge::client::ProcMacro; use std::iter::TrustedLen; use std::num::NonZeroUsize; use std::path::Path; +use std::sync::atomic::Ordering; use std::{io, iter, mem}; pub(super) use cstore_impl::provide; @@ -74,6 +75,7 @@ pub(crate) struct CrateMetadata { blob: MetadataBlob, // --- Some data pre-decoded from the metadata blob, usually for performance --- + /// Data about the top-level items in a crate, as well as various crate-level metadata. root: CrateRoot, /// Trait impl data. /// FIXME: Used only from queries and can use query cache, @@ -111,9 +113,10 @@ pub(crate) struct CrateMetadata { dep_kind: Lock<CrateDepKind>, /// Filesystem location of this crate. source: Lrc<CrateSource>, - /// Whether or not this crate should be consider a private dependency - /// for purposes of the 'exported_private_dependencies' lint - private_dep: bool, + /// Whether or not this crate should be consider a private dependency. + /// Used by the 'exported_private_dependencies' lint, and for determining + /// whether to emit suggestions that reference this crate. + private_dep: AtomicBool, /// The hash for the host proc macro. Used to support `-Z dual-proc-macro`. host_hash: Option<Svh>, @@ -449,7 +452,7 @@ impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for SyntaxContext { You need to explicitly pass `(crate_metadata_ref, tcx)` to `decode` instead of just `crate_metadata_ref`."); }; - let cname = cdata.root.name; + let cname = cdata.root.name(); rustc_span::hygiene::decode_syntax_context(decoder, &cdata.hygiene_context, |_, id| { debug!("SpecializedDecoder<SyntaxContext>: decoding {}", id); cdata @@ -564,7 +567,7 @@ impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for Span { let cnum = u32::decode(decoder); panic!( "Decoding of crate {:?} tried to access proc-macro dep {:?}", - decoder.cdata().root.name, + decoder.cdata().root.header.name, cnum ); } @@ -671,6 +674,16 @@ impl MetadataBlob { .decode(self) } + pub(crate) fn get_header(&self) -> CrateHeader { + let slice = &self.blob()[..]; + let offset = METADATA_HEADER.len(); + + let pos_bytes = slice[offset..][..4].try_into().unwrap(); + let pos = u32::from_be_bytes(pos_bytes) as usize; + + LazyValue::<CrateHeader>::from_position(NonZeroUsize::new(pos).unwrap()).decode(self) + } + pub(crate) fn get_root(&self) -> CrateRoot { let slice = &self.blob()[..]; let offset = METADATA_HEADER.len(); @@ -684,18 +697,19 @@ impl MetadataBlob { pub(crate) fn list_crate_metadata(&self, out: &mut dyn io::Write) -> io::Result<()> { let root = self.get_root(); writeln!(out, "Crate info:")?; - writeln!(out, "name {}{}", root.name, root.extra_filename)?; - writeln!(out, "hash {} stable_crate_id {:?}", root.hash, root.stable_crate_id)?; + writeln!(out, "name {}{}", root.name(), root.extra_filename)?; + writeln!(out, "hash {} stable_crate_id {:?}", root.hash(), root.stable_crate_id)?; writeln!(out, "proc_macro {:?}", root.proc_macro_data.is_some())?; writeln!(out, "=External Dependencies=")?; for (i, dep) in root.crate_deps.decode(self).enumerate() { - let CrateDep { name, extra_filename, hash, host_hash, kind } = dep; + let CrateDep { name, extra_filename, hash, host_hash, kind, is_private } = dep; let number = i + 1; writeln!( out, - "{number} {name}{extra_filename} hash {hash} host_hash {host_hash:?} kind {kind:?}" + "{number} {name}{extra_filename} hash {hash} host_hash {host_hash:?} kind {kind:?} {privacy}", + privacy = if is_private { "private" } else { "public" } )?; } write!(out, "\n")?; @@ -709,21 +723,17 @@ impl CrateRoot { } pub(crate) fn name(&self) -> Symbol { - self.name + self.header.name } pub(crate) fn hash(&self) -> Svh { - self.hash + self.header.hash } pub(crate) fn stable_crate_id(&self) -> StableCrateId { self.stable_crate_id } - pub(crate) fn triple(&self) -> &TargetTriple { - &self.triple - } - pub(crate) fn decode_crate_deps<'a>( &self, metadata: &'a MetadataBlob, @@ -794,7 +804,7 @@ impl<'a, 'tcx> CrateMetadataRef<'a> { bug!( "CrateMetadata::def_kind({:?}): id not found, in crate {:?} with number {}", item_id, - self.root.name, + self.root.name(), self.cnum, ) }) @@ -851,7 +861,7 @@ impl<'a, 'tcx> CrateMetadataRef<'a> { } else { tcx.arena.alloc_from_iter(lazy.decode((self, tcx))) }; - ty::EarlyBinder(&*output) + ty::EarlyBinder::bind(&*output) } fn get_variant( @@ -1617,7 +1627,7 @@ impl CrateMetadata { dependencies, dep_kind: Lock::new(dep_kind), source: Lrc::new(source), - private_dep, + private_dep: AtomicBool::new(private_dep), host_hash, extern_crate: Lock::new(None), hygiene_context: Default::default(), @@ -1665,6 +1675,10 @@ impl CrateMetadata { self.dep_kind.with_lock(|dep_kind| *dep_kind = f(*dep_kind)) } + pub(crate) fn update_and_private_dep(&self, private_dep: bool) { + self.private_dep.fetch_and(private_dep, Ordering::SeqCst); + } + pub(crate) fn required_panic_strategy(&self) -> Option<PanicStrategy> { self.root.required_panic_strategy } @@ -1702,11 +1716,11 @@ impl CrateMetadata { } pub(crate) fn name(&self) -> Symbol { - self.root.name + self.root.header.name } pub(crate) fn hash(&self) -> Svh { - self.root.hash + self.root.header.hash } fn num_def_ids(&self) -> usize { diff --git a/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs b/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs index 7425963d30f..a15307e4345 100644 --- a/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs +++ b/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs @@ -285,7 +285,13 @@ provide! { tcx, def_id, other, cdata, is_ctfe_mir_available => { cdata.is_ctfe_mir_available(def_id.index) } dylib_dependency_formats => { cdata.get_dylib_dependency_formats(tcx) } - is_private_dep => { cdata.private_dep } + is_private_dep => { + // Parallel compiler needs to synchronize type checking and linting (which use this flag) + // so that they happen strictly crate loading. Otherwise, the full list of available + // impls aren't loaded yet. + use std::sync::atomic::Ordering; + cdata.private_dep.load(Ordering::Acquire) + } is_panic_runtime => { cdata.root.panic_runtime } is_compiler_builtins => { cdata.root.compiler_builtins } has_global_allocator => { cdata.root.has_global_allocator } @@ -317,9 +323,9 @@ provide! { tcx, def_id, other, cdata, } native_libraries => { cdata.get_native_libraries(tcx.sess).collect() } foreign_modules => { cdata.get_foreign_modules(tcx.sess).map(|m| (m.def_id, m)).collect() } - crate_hash => { cdata.root.hash } + crate_hash => { cdata.root.header.hash } crate_host_hash => { cdata.host_hash } - crate_name => { cdata.root.name } + crate_name => { cdata.root.header.name } extra_filename => { cdata.root.extra_filename.clone() } @@ -581,7 +587,7 @@ impl CrateStore for CStore { } fn crate_name(&self, cnum: CrateNum) -> Symbol { - self.get_crate_data(cnum).root.name + self.get_crate_data(cnum).root.header.name } fn stable_crate_id(&self, cnum: CrateNum) -> StableCrateId { diff --git a/compiler/rustc_metadata/src/rmeta/encoder.rs b/compiler/rustc_metadata/src/rmeta/encoder.rs index f067bca4b0b..6ceb61e793e 100644 --- a/compiler/rustc_metadata/src/rmeta/encoder.rs +++ b/compiler/rustc_metadata/src/rmeta/encoder.rs @@ -662,10 +662,13 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> { let root = stat!("final", || { let attrs = tcx.hir().krate_attrs(); self.lazy(CrateRoot { - name: tcx.crate_name(LOCAL_CRATE), + header: CrateHeader { + name: tcx.crate_name(LOCAL_CRATE), + triple: tcx.sess.opts.target_triple.clone(), + hash: tcx.crate_hash(LOCAL_CRATE), + is_proc_macro_crate: proc_macro_data.is_some(), + }, extra_filename: tcx.sess.opts.cg.extra_filename.clone(), - triple: tcx.sess.opts.target_triple.clone(), - hash: tcx.crate_hash(LOCAL_CRATE), stable_crate_id: tcx.def_path_hash(LOCAL_CRATE.as_def_id()).stable_crate_id(), required_panic_strategy: tcx.required_panic_strategy(LOCAL_CRATE), panic_in_drop_strategy: tcx.sess.opts.unstable_opts.panic_in_drop, @@ -1727,7 +1730,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> { ty::Closure(_, substs) => { let constness = self.tcx.constness(def_id.to_def_id()); self.tables.constness.set_some(def_id.to_def_id().index, constness); - record!(self.tables.fn_sig[def_id.to_def_id()] <- ty::EarlyBinder(substs.as_closure().sig())); + record!(self.tables.fn_sig[def_id.to_def_id()] <- ty::EarlyBinder::bind(substs.as_closure().sig())); } _ => bug!("closure that is neither generator nor closure"), @@ -1880,6 +1883,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> { host_hash: self.tcx.crate_host_hash(cnum), kind: self.tcx.dep_kind(cnum), extra_filename: self.tcx.extra_filename(cnum).clone(), + is_private: self.tcx.is_private_dep(cnum), }; (cnum, dep) }) diff --git a/compiler/rustc_metadata/src/rmeta/mod.rs b/compiler/rustc_metadata/src/rmeta/mod.rs index 97e67fcf8fd..2da888f4468 100644 --- a/compiler/rustc_metadata/src/rmeta/mod.rs +++ b/compiler/rustc_metadata/src/rmeta/mod.rs @@ -56,7 +56,7 @@ pub(crate) fn rustc_version(cfg_version: &'static str) -> String { /// Metadata encoding version. /// N.B., increment this if you change the format of metadata such that /// the rustc version can't be found to compare with `rustc_version()`. -const METADATA_VERSION: u8 = 7; +const METADATA_VERSION: u8 = 8; /// Metadata header which includes `METADATA_VERSION`. /// @@ -199,7 +199,27 @@ pub(crate) struct ProcMacroData { macros: LazyArray<DefIndex>, } -/// Serialized metadata for a crate. +/// Serialized crate metadata. +/// +/// This contains just enough information to determine if we should load the `CrateRoot` or not. +/// Prefer [`CrateRoot`] whenever possible to avoid ICEs when using `omit-git-hash` locally. +/// See #76720 for more details. +/// +/// If you do modify this struct, also bump the [`METADATA_VERSION`] constant. +#[derive(MetadataEncodable, MetadataDecodable)] +pub(crate) struct CrateHeader { + pub(crate) triple: TargetTriple, + pub(crate) hash: Svh, + pub(crate) name: Symbol, + /// Whether this is the header for a proc-macro crate. + /// + /// This is separate from [`ProcMacroData`] to avoid having to update [`METADATA_VERSION`] every + /// time ProcMacroData changes. + pub(crate) is_proc_macro_crate: bool, +} + +/// Serialized `.rmeta` data for a crate. +/// /// When compiling a proc-macro crate, we encode many of /// the `LazyArray<T>` fields as `Lazy::empty()`. This serves two purposes: /// @@ -217,10 +237,10 @@ pub(crate) struct ProcMacroData { /// to being unused. #[derive(MetadataEncodable, MetadataDecodable)] pub(crate) struct CrateRoot { - name: Symbol, - triple: TargetTriple, + /// A header used to detect if this is the right crate to load. + header: CrateHeader, + extra_filename: String, - hash: Svh, stable_crate_id: StableCrateId, required_panic_strategy: Option<PanicStrategy>, panic_in_drop_strategy: PanicStrategy, @@ -302,6 +322,7 @@ pub(crate) struct CrateDep { pub host_hash: Option<Svh>, pub kind: CrateDepKind, pub extra_filename: String, + pub is_private: bool, } #[derive(MetadataEncodable, MetadataDecodable)] @@ -465,6 +486,7 @@ trivially_parameterized_over_tcx! { RawDefId, TraitImpls, IncoherentImpls, + CrateHeader, CrateRoot, CrateDep, AttrFlags, diff --git a/compiler/rustc_metadata/src/rmeta/table.rs b/compiler/rustc_metadata/src/rmeta/table.rs index dda30bce2c0..f002d7f97b9 100644 --- a/compiler/rustc_metadata/src/rmeta/table.rs +++ b/compiler/rustc_metadata/src/rmeta/table.rs @@ -439,7 +439,7 @@ where /// Given the metadata, extract out the value at a particular index (if any). #[inline(never)] pub(super) fn get<'a, 'tcx, M: Metadata<'a, 'tcx>>(&self, metadata: M, i: I) -> T::Value<'tcx> { - debug!("LazyTable::lookup: index={:?} len={:?}", i, self.encoded_size); + trace!("LazyTable::lookup: index={:?} len={:?}", i, self.encoded_size); let start = self.position.get(); let bytes = &metadata.blob()[start..start + self.encoded_size]; diff --git a/compiler/rustc_middle/src/infer/canonical.rs b/compiler/rustc_middle/src/infer/canonical.rs index 56171314944..29dae67bfca 100644 --- a/compiler/rustc_middle/src/infer/canonical.rs +++ b/compiler/rustc_middle/src/infer/canonical.rs @@ -415,7 +415,7 @@ impl<'tcx> CanonicalVarValues<'tcx> { var: ty::BoundVar::from_usize(i), kind: ty::BrAnon(None), }; - tcx.mk_re_late_bound(ty::INNERMOST, br).into() + ty::Region::new_late_bound(tcx, ty::INNERMOST, br).into() } CanonicalVarKind::Const(_, ty) | CanonicalVarKind::PlaceholderConst(_, ty) => tcx diff --git a/compiler/rustc_middle/src/lint.rs b/compiler/rustc_middle/src/lint.rs index 14343ac1108..caf3fc26039 100644 --- a/compiler/rustc_middle/src/lint.rs +++ b/compiler/rustc_middle/src/lint.rs @@ -251,7 +251,7 @@ pub fn explain_lint_level_source( } LintLevelSource::Node { name: lint_attr_name, span, reason, .. } => { if let Some(rationale) = reason { - err.note(rationale.as_str()); + err.note(rationale.to_string()); } err.span_note_once(span, "the lint level is defined here"); if lint_attr_name.as_str() != name { diff --git a/compiler/rustc_middle/src/middle/stability.rs b/compiler/rustc_middle/src/middle/stability.rs index 6354c0aabde..60844c17e47 100644 --- a/compiler/rustc_middle/src/middle/stability.rs +++ b/compiler/rustc_middle/src/middle/stability.rs @@ -104,7 +104,7 @@ pub fn report_unstable( suggestion: Option<(Span, String, String, Applicability)>, is_soft: bool, span: Span, - soft_handler: impl FnOnce(&'static Lint, Span, &str), + soft_handler: impl FnOnce(&'static Lint, Span, String), ) { let msg = match reason { Some(r) => format!("use of unstable library feature '{}': {}", feature, r), @@ -112,7 +112,7 @@ pub fn report_unstable( }; if is_soft { - soft_handler(SOFT_UNSTABLE, span, &msg) + soft_handler(SOFT_UNSTABLE, span, msg) } else { let mut err = feature_err_issue(&sess.parse_sess, feature, span, GateIssue::Library(issue), msg); @@ -225,7 +225,7 @@ pub fn deprecation_message_and_lint( pub fn early_report_deprecation( lint_buffer: &mut LintBuffer, - message: &str, + message: String, suggestion: Option<Symbol>, lint: &'static Lint, span: Span, @@ -241,7 +241,7 @@ pub fn early_report_deprecation( fn late_report_deprecation( tcx: TyCtxt<'_>, - message: &str, + message: String, suggestion: Option<Symbol>, lint: &'static Lint, span: Span, @@ -396,7 +396,7 @@ impl<'tcx> TyCtxt<'tcx> { late_report_deprecation( self, - &deprecation_message( + deprecation_message( is_in_effect, depr_attr.since, depr_attr.note, @@ -619,7 +619,7 @@ impl<'tcx> TyCtxt<'tcx> { allow_unstable: AllowUnstable, unmarked: impl FnOnce(Span, DefId), ) -> bool { - let soft_handler = |lint, span, msg: &_| { + let soft_handler = |lint, span, msg: String| { self.struct_span_lint_hir(lint, id.unwrap_or(hir::CRATE_HIR_ID), span, msg, |lint| lint) }; let eval_result = diff --git a/compiler/rustc_middle/src/mir/mod.rs b/compiler/rustc_middle/src/mir/mod.rs index 5c71910a955..5c27bdec575 100644 --- a/compiler/rustc_middle/src/mir/mod.rs +++ b/compiler/rustc_middle/src/mir/mod.rs @@ -3,7 +3,7 @@ //! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/mir/index.html use crate::mir::interpret::{ - AllocRange, ConstAllocation, ConstValue, ErrorHandled, GlobalAlloc, LitToConstInput, Scalar, + AllocRange, ConstAllocation, ConstValue, ErrorHandled, GlobalAlloc, Scalar, }; use crate::mir::visit::MirVisitable; use crate::ty::codec::{TyDecoder, TyEncoder}; @@ -476,7 +476,7 @@ impl<'tcx> Body<'tcx> { /// Returns the return type; it always return first element from `local_decls` array. #[inline] pub fn bound_return_ty(&self) -> ty::EarlyBinder<Ty<'tcx>> { - ty::EarlyBinder(self.local_decls[RETURN_PLACE].ty) + ty::EarlyBinder::bind(self.local_decls[RETURN_PLACE].ty) } /// Gets the location of the terminator for the given block. @@ -2461,51 +2461,6 @@ impl<'tcx> ConstantKind<'tcx> { Self::Val(val, ty) } - #[instrument(skip(tcx), level = "debug", ret)] - pub fn from_inline_const(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> Self { - let hir_id = tcx.hir().local_def_id_to_hir_id(def_id); - let body_id = match tcx.hir().get(hir_id) { - hir::Node::AnonConst(ac) => ac.body, - _ => span_bug!( - tcx.def_span(def_id.to_def_id()), - "from_inline_const can only process anonymous constants" - ), - }; - let expr = &tcx.hir().body(body_id).value; - let ty = tcx.typeck(def_id).node_type(hir_id); - - let lit_input = match expr.kind { - hir::ExprKind::Lit(ref lit) => Some(LitToConstInput { lit: &lit.node, ty, neg: false }), - hir::ExprKind::Unary(hir::UnOp::Neg, ref expr) => match expr.kind { - hir::ExprKind::Lit(ref lit) => { - Some(LitToConstInput { lit: &lit.node, ty, neg: true }) - } - _ => None, - }, - _ => None, - }; - if let Some(lit_input) = lit_input { - // If an error occurred, ignore that it's a literal and leave reporting the error up to - // mir. - match tcx.at(expr.span).lit_to_mir_constant(lit_input) { - Ok(c) => return c, - Err(_) => {} - } - } - - let typeck_root_def_id = tcx.typeck_root_def_id(def_id.to_def_id()); - let parent_substs = - tcx.erase_regions(InternalSubsts::identity_for_item(tcx, typeck_root_def_id)); - let substs = - ty::InlineConstSubsts::new(tcx, ty::InlineConstSubstsParts { parent_substs, ty }) - .substs; - - let uneval = UnevaluatedConst { def: def_id.to_def_id(), substs, promoted: None }; - debug_assert!(!uneval.has_free_regions()); - - Self::Unevaluated(uneval, ty) - } - /// Literals are converted to `ConstantKindVal`, const generic parameters are eagerly /// converted to a constant, everything else becomes `Unevaluated`. #[instrument(skip(tcx), level = "debug", ret)] diff --git a/compiler/rustc_middle/src/mir/query.rs b/compiler/rustc_middle/src/mir/query.rs index 53fd2dd23a7..a15c419da7a 100644 --- a/compiler/rustc_middle/src/mir/query.rs +++ b/compiler/rustc_middle/src/mir/query.rs @@ -413,7 +413,7 @@ impl<'tcx> ClosureOutlivesSubjectTy<'tcx> { ty::ReVar(vid) => { let br = ty::BoundRegion { var: ty::BoundVar::new(vid.index()), kind: ty::BrAnon(None) }; - tcx.mk_re_late_bound(depth, br) + ty::Region::new_late_bound(tcx, depth, br) } _ => bug!("unexpected region in ClosureOutlivesSubjectTy: {r:?}"), }); diff --git a/compiler/rustc_middle/src/mir/syntax.rs b/compiler/rustc_middle/src/mir/syntax.rs index 21faf1958e9..3e474c1d377 100644 --- a/compiler/rustc_middle/src/mir/syntax.rs +++ b/compiler/rustc_middle/src/mir/syntax.rs @@ -220,6 +220,11 @@ pub enum BorrowKind { /// immutable, but not aliasable. This solves the problem. For /// simplicity, we don't give users the way to express this /// borrow, it's just used when translating closures. + /// + // FIXME(#112072): This is wrong. Unique borrows are mutable borrows except + // that they do not require their pointee to be marked as a mutable. + // They should still be treated as mutable borrows in every other way, + // e.g. for variance or overlap checking. Unique, /// Data is mutable and not aliasable. @@ -603,7 +608,11 @@ pub enum TerminatorKind<'tcx> { /// > The drop glue is executed if, among all statements executed within this `Body`, an assignment to /// > the place or one of its "parents" occurred more recently than a move out of it. This does not /// > consider indirect assignments. - Drop { place: Place<'tcx>, target: BasicBlock, unwind: UnwindAction }, + /// + /// The `replace` flag indicates whether this terminator was created as part of an assignment. + /// This should only be used for diagnostic purposes, and does not have any operational + /// meaning. + Drop { place: Place<'tcx>, target: BasicBlock, unwind: UnwindAction, replace: bool }, /// Roughly speaking, evaluates the `func` operand and the arguments, and starts execution of /// the referred to function. The operand types must match the argument types of the function. diff --git a/compiler/rustc_middle/src/mir/terminator.rs b/compiler/rustc_middle/src/mir/terminator.rs index 2c6126cdd29..561ef371b09 100644 --- a/compiler/rustc_middle/src/mir/terminator.rs +++ b/compiler/rustc_middle/src/mir/terminator.rs @@ -105,7 +105,7 @@ pub struct Terminator<'tcx> { pub kind: TerminatorKind<'tcx>, } -pub type Successors<'a> = impl Iterator<Item = BasicBlock> + 'a; +pub type Successors<'a> = impl DoubleEndedIterator<Item = BasicBlock> + 'a; pub type SuccessorsMut<'a> = iter::Chain<std::option::IntoIter<&'a mut BasicBlock>, slice::IterMut<'a, BasicBlock>>; diff --git a/compiler/rustc_middle/src/mir/traversal.rs b/compiler/rustc_middle/src/mir/traversal.rs index 7d247eeb656..99ead14139a 100644 --- a/compiler/rustc_middle/src/mir/traversal.rs +++ b/compiler/rustc_middle/src/mir/traversal.rs @@ -149,7 +149,7 @@ impl<'a, 'tcx> Postorder<'a, 'tcx> { // B C // | | // | | - // D | + // | D // \ / // \ / // E @@ -159,26 +159,26 @@ impl<'a, 'tcx> Postorder<'a, 'tcx> { // // When the first call to `traverse_successor` happens, the following happens: // - // [(B, [D]), // `B` taken from the successors of `A`, pushed to the - // // top of the stack along with the successors of `B` - // (A, [C])] + // [(C, [D]), // `C` taken from the successors of `A`, pushed to the + // // top of the stack along with the successors of `C` + // (A, [B])] // - // [(D, [E]), // `D` taken from successors of `B`, pushed to stack - // (B, []), - // (A, [C])] + // [(D, [E]), // `D` taken from successors of `C`, pushed to stack + // (C, []), + // (A, [B])] // // [(E, []), // `E` taken from successors of `D`, pushed to stack // (D, []), - // (B, []), - // (A, [C])] + // (C, []), + // (A, [B])] // // Now that the top of the stack has no successors we can traverse, each item will - // be popped off during iteration until we get back to `A`. This yields [E, D, B]. + // be popped off during iteration until we get back to `A`. This yields [E, D, C]. // - // When we yield `B` and call `traverse_successor`, we push `C` to the stack, but + // When we yield `C` and call `traverse_successor`, we push `B` to the stack, but // since we've already visited `E`, that child isn't added to the stack. The last - // two iterations yield `C` and finally `A` for a final traversal of [E, D, B, C, A] - while let Some(&mut (_, ref mut iter)) = self.visit_stack.last_mut() && let Some(bb) = iter.next() { + // two iterations yield `B` and finally `A` for a final traversal of [E, D, C, B, A] + while let Some(&mut (_, ref mut iter)) = self.visit_stack.last_mut() && let Some(bb) = iter.next_back() { if self.visited.insert(bb) { if let Some(term) = &self.basic_blocks[bb].terminator { self.visit_stack.push((bb, term.successors())); diff --git a/compiler/rustc_middle/src/mir/visit.rs b/compiler/rustc_middle/src/mir/visit.rs index 596dd80bf48..8d44e929afd 100644 --- a/compiler/rustc_middle/src/mir/visit.rs +++ b/compiler/rustc_middle/src/mir/visit.rs @@ -504,6 +504,7 @@ macro_rules! make_mir_visitor { place, target: _, unwind: _, + replace: _, } => { self.visit_place( place, @@ -649,8 +650,8 @@ macro_rules! make_mir_visitor { BorrowKind::Shallow => PlaceContext::NonMutatingUse( NonMutatingUseContext::ShallowBorrow ), - BorrowKind::Unique => PlaceContext::NonMutatingUse( - NonMutatingUseContext::UniqueBorrow + BorrowKind::Unique => PlaceContext::MutatingUse( + MutatingUseContext::Borrow ), BorrowKind::Mut { .. } => PlaceContext::MutatingUse(MutatingUseContext::Borrow), @@ -1264,8 +1265,6 @@ pub enum NonMutatingUseContext { SharedBorrow, /// Shallow borrow. ShallowBorrow, - /// Unique borrow. - UniqueBorrow, /// AddressOf for *const pointer. AddressOf, /// PlaceMention statement. @@ -1344,9 +1343,7 @@ impl PlaceContext { matches!( self, PlaceContext::NonMutatingUse( - NonMutatingUseContext::SharedBorrow - | NonMutatingUseContext::ShallowBorrow - | NonMutatingUseContext::UniqueBorrow + NonMutatingUseContext::SharedBorrow | NonMutatingUseContext::ShallowBorrow ) | PlaceContext::MutatingUse(MutatingUseContext::Borrow) ) } diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs index 1528be42f6a..0b31c9bbf81 100644 --- a/compiler/rustc_middle/src/query/mod.rs +++ b/compiler/rustc_middle/src/query/mod.rs @@ -1081,14 +1081,6 @@ rustc_queries! { desc { "destructuring MIR constant"} } - /// Dereference a constant reference or raw pointer and turn the result into a constant - /// again. - query deref_mir_constant( - key: ty::ParamEnvAnd<'tcx, mir::ConstantKind<'tcx>> - ) -> mir::ConstantKind<'tcx> { - desc { "dereferencing MIR constant" } - } - query const_caller_location(key: (rustc_span::Symbol, u32, u32)) -> ConstValue<'tcx> { desc { "getting a &core::panic::Location referring to a span" } } @@ -1100,10 +1092,6 @@ rustc_queries! { desc { "converting literal to const" } } - query lit_to_mir_constant(key: LitToConstInput<'tcx>) -> Result<mir::ConstantKind<'tcx>, LitToConstError> { - desc { "converting literal to mir constant" } - } - query check_match(key: LocalDefId) -> Result<(), rustc_errors::ErrorGuaranteed> { desc { |tcx| "match-checking `{}`", tcx.def_path_str(key) } cache_on_disk_if { true } diff --git a/compiler/rustc_middle/src/ty/adt.rs b/compiler/rustc_middle/src/ty/adt.rs index 7c5c030c276..e067d2a984f 100644 --- a/compiler/rustc_middle/src/ty/adt.rs +++ b/compiler/rustc_middle/src/ty/adt.rs @@ -573,7 +573,7 @@ impl<'tcx> AdtDef<'tcx> { /// Due to normalization being eager, this applies even if /// the associated type is behind a pointer (e.g., issue #31299). pub fn sized_constraint(self, tcx: TyCtxt<'tcx>) -> ty::EarlyBinder<&'tcx [Ty<'tcx>]> { - ty::EarlyBinder(tcx.adt_sized_constraint(self.did())) + ty::EarlyBinder::bind(tcx.adt_sized_constraint(self.did())) } } diff --git a/compiler/rustc_middle/src/ty/codec.rs b/compiler/rustc_middle/src/ty/codec.rs index 7fc75674da5..76f52bc34ed 100644 --- a/compiler/rustc_middle/src/ty/codec.rs +++ b/compiler/rustc_middle/src/ty/codec.rs @@ -264,7 +264,7 @@ impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for mir::Place<'tcx> { impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for ty::Region<'tcx> { fn decode(decoder: &mut D) -> Self { - decoder.interner().mk_region_from_kind(Decodable::decode(decoder)) + ty::Region::new_from_kind(decoder.interner(), Decodable::decode(decoder)) } } diff --git a/compiler/rustc_middle/src/ty/consts.rs b/compiler/rustc_middle/src/ty/consts.rs index 1a4bd14815f..aecb46556b0 100644 --- a/compiler/rustc_middle/src/ty/consts.rs +++ b/compiler/rustc_middle/src/ty/consts.rs @@ -254,5 +254,5 @@ pub fn const_param_default(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBind "`const_param_default` expected a generic parameter with a constant" ), }; - ty::EarlyBinder(Const::from_anon_const(tcx, default_def_id)) + ty::EarlyBinder::bind(Const::from_anon_const(tcx, default_def_id)) } diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs index 2bde55bc4fd..77725f0b3b6 100644 --- a/compiler/rustc_middle/src/ty/context.rs +++ b/compiler/rustc_middle/src/ty/context.rs @@ -115,6 +115,16 @@ impl<'tcx> Interner for TyCtxt<'tcx> { type FreeRegion = ty::FreeRegion; type RegionVid = ty::RegionVid; type PlaceholderRegion = ty::PlaceholderRegion; + + fn ty_and_mut_to_parts( + TypeAndMut { ty, mutbl }: TypeAndMut<'tcx>, + ) -> (Self::Ty, Self::Mutability) { + (ty, mutbl) + } + + fn mutability_is_mut(mutbl: Self::Mutability) -> bool { + mutbl.is_mut() + } } type InternedSet<'tcx, T> = ShardedHashMap<InternedInSet<'tcx, T>, ()>; @@ -713,30 +723,6 @@ impl<'tcx> TyCtxt<'tcx> { self.mk_ty_from_kind(Error(reported)) } - /// Constructs a `RegionKind::ReError` lifetime. - #[track_caller] - pub fn mk_re_error(self, reported: ErrorGuaranteed) -> Region<'tcx> { - self.intern_region(ty::ReError(reported)) - } - - /// Constructs a `RegionKind::ReError` lifetime and registers a `delay_span_bug` to ensure it - /// gets used. - #[track_caller] - pub fn mk_re_error_misc(self) -> Region<'tcx> { - self.mk_re_error_with_message( - DUMMY_SP, - "RegionKind::ReError constructed but no error reported", - ) - } - - /// Constructs a `RegionKind::ReError` lifetime and registers a `delay_span_bug` with the given - /// `msg` to ensure it gets used. - #[track_caller] - pub fn mk_re_error_with_message<S: Into<MultiSpan>>(self, span: S, msg: &str) -> Region<'tcx> { - let reported = self.sess.delay_span_bug(span, msg); - self.mk_re_error(reported) - } - /// Like [TyCtxt::ty_error] but for constants, with current `ErrorGuaranteed` #[track_caller] pub fn const_error(self, ty: Ty<'tcx>, reported: ErrorGuaranteed) -> Const<'tcx> { @@ -759,7 +745,7 @@ impl<'tcx> TyCtxt<'tcx> { self, ty: Ty<'tcx>, span: S, - msg: &str, + msg: &'static str, ) -> Const<'tcx> { let reported = self.sess.delay_span_bug(span, msg); self.mk_const(ty::ConstKind::Error(reported), ty) @@ -1515,9 +1501,9 @@ macro_rules! direct_interners { // Functions with a `mk_` prefix are intended for use outside this file and // crate. Functions with an `intern_` prefix are intended for use within this -// file only, and have a corresponding `mk_` function. +// crate only, and have a corresponding `mk_` function. direct_interners! { - region: intern_region(RegionKind<'tcx>): Region -> Region<'tcx>, + region: pub(crate) intern_region(RegionKind<'tcx>): Region -> Region<'tcx>, const_: intern_const(ConstData<'tcx>): Const -> Const<'tcx>, const_allocation: pub mk_const_alloc(Allocation): ConstAllocation -> ConstAllocation<'tcx>, layout: pub mk_layout(LayoutS): Layout -> Layout<'tcx>, @@ -1992,7 +1978,7 @@ impl<'tcx> TyCtxt<'tcx> { pub fn mk_param_from_def(self, param: &ty::GenericParamDef) -> GenericArg<'tcx> { match param.kind { GenericParamDefKind::Lifetime => { - self.mk_re_early_bound(param.to_early_bound_region_data()).into() + ty::Region::new_early_bound(self, param.to_early_bound_region_data()).into() } GenericParamDefKind::Type { .. } => self.mk_ty_param(param.index, param.name).into(), GenericParamDefKind::Const { .. } => self @@ -2032,65 +2018,6 @@ impl<'tcx> TyCtxt<'tcx> { self.mk_alias(ty::Opaque, self.mk_alias_ty(def_id, substs)) } - #[inline] - pub fn mk_re_early_bound(self, early_bound_region: ty::EarlyBoundRegion) -> Region<'tcx> { - self.intern_region(ty::ReEarlyBound(early_bound_region)) - } - - #[inline] - pub fn mk_re_late_bound( - self, - debruijn: ty::DebruijnIndex, - bound_region: ty::BoundRegion, - ) -> Region<'tcx> { - // Use a pre-interned one when possible. - if let ty::BoundRegion { var, kind: ty::BrAnon(None) } = bound_region - && let Some(inner) = self.lifetimes.re_late_bounds.get(debruijn.as_usize()) - && let Some(re) = inner.get(var.as_usize()).copied() - { - re - } else { - self.intern_region(ty::ReLateBound(debruijn, bound_region)) - } - } - - #[inline] - pub fn mk_re_free(self, scope: DefId, bound_region: ty::BoundRegionKind) -> Region<'tcx> { - self.intern_region(ty::ReFree(ty::FreeRegion { scope, bound_region })) - } - - #[inline] - pub fn mk_re_var(self, v: ty::RegionVid) -> Region<'tcx> { - // Use a pre-interned one when possible. - self.lifetimes - .re_vars - .get(v.as_usize()) - .copied() - .unwrap_or_else(|| self.intern_region(ty::ReVar(v))) - } - - #[inline] - pub fn mk_re_placeholder(self, placeholder: ty::PlaceholderRegion) -> Region<'tcx> { - self.intern_region(ty::RePlaceholder(placeholder)) - } - - // Avoid this in favour of more specific `mk_re_*` methods, where possible, - // to avoid the cost of the `match`. - pub fn mk_region_from_kind(self, kind: ty::RegionKind<'tcx>) -> Region<'tcx> { - match kind { - ty::ReEarlyBound(region) => self.mk_re_early_bound(region), - ty::ReLateBound(debruijn, region) => self.mk_re_late_bound(debruijn, region), - ty::ReFree(ty::FreeRegion { scope, bound_region }) => { - self.mk_re_free(scope, bound_region) - } - ty::ReStatic => self.lifetimes.re_static, - ty::ReVar(vid) => self.mk_re_var(vid), - ty::RePlaceholder(region) => self.mk_re_placeholder(region), - ty::ReErased => self.lifetimes.re_erased, - ty::ReError(reported) => self.mk_re_error(reported), - } - } - pub fn mk_place_field(self, place: Place<'tcx>, f: FieldIdx, ty: Ty<'tcx>) -> Place<'tcx> { self.mk_place_elem(place, PlaceElem::Field(f, ty)) } diff --git a/compiler/rustc_middle/src/ty/diagnostics.rs b/compiler/rustc_middle/src/ty/diagnostics.rs index 6a29063b80d..9c91b778403 100644 --- a/compiler/rustc_middle/src/ty/diagnostics.rs +++ b/compiler/rustc_middle/src/ty/diagnostics.rs @@ -1,5 +1,6 @@ //! Diagnostics related methods for `Ty`. +use std::borrow::Cow; use std::ops::ControlFlow; use crate::ty::{ @@ -384,22 +385,18 @@ pub fn suggest_constraining_type_params<'a>( if suggestions.len() == 1 { let (span, suggestion, msg) = suggestions.pop().unwrap(); - - let s; let msg = match msg { SuggestChangingConstraintsMessage::RestrictBoundFurther => { - "consider further restricting this bound" + Cow::from("consider further restricting this bound") } SuggestChangingConstraintsMessage::RestrictType { ty } => { - s = format!("consider restricting type parameter `{}`", ty); - &s + Cow::from(format!("consider restricting type parameter `{}`", ty)) } SuggestChangingConstraintsMessage::RestrictTypeFurther { ty } => { - s = format!("consider further restricting type parameter `{}`", ty); - &s + Cow::from(format!("consider further restricting type parameter `{}`", ty)) } SuggestChangingConstraintsMessage::RemovingQSized => { - "consider removing the `?Sized` bound to make the type parameter `Sized`" + Cow::from("consider removing the `?Sized` bound to make the type parameter `Sized`") } }; diff --git a/compiler/rustc_middle/src/ty/error.rs b/compiler/rustc_middle/src/ty/error.rs index 49ab9b79e96..66293f19eef 100644 --- a/compiler/rustc_middle/src/ty/error.rs +++ b/compiler/rustc_middle/src/ty/error.rs @@ -45,7 +45,6 @@ pub enum TypeError<'tcx> { RegionsDoesNotOutlive(Region<'tcx>, Region<'tcx>), RegionsInsufficientlyPolymorphic(BoundRegionKind, Region<'tcx>), - RegionsOverlyPolymorphic(BoundRegionKind, Region<'tcx>), RegionsPlaceholderMismatch, Sorts(ExpectedFound<Ty<'tcx>>), @@ -74,7 +73,6 @@ impl TypeError<'_> { match self { TypeError::RegionsDoesNotOutlive(_, _) | TypeError::RegionsInsufficientlyPolymorphic(_, _) - | TypeError::RegionsOverlyPolymorphic(_, _) | TypeError::RegionsPlaceholderMismatch => true, _ => false, } @@ -98,11 +96,6 @@ impl<'tcx> TypeError<'tcx> { } } - let br_string = |br: ty::BoundRegionKind| match br { - ty::BrNamed(_, name) => format!(" {}", name), - _ => String::new(), - }; - match self { CyclicTy(_) => "cyclic type of infinite size".into(), CyclicConst(_) => "encountered a self-referencing constant".into(), @@ -144,11 +137,6 @@ impl<'tcx> TypeError<'tcx> { RegionsInsufficientlyPolymorphic(..) => { "one type is more general than the other".into() } - RegionsOverlyPolymorphic(br, _) => format!( - "expected concrete lifetime, found bound lifetime parameter{}", - br_string(br) - ) - .into(), RegionsPlaceholderMismatch => "one type is more general than the other".into(), ArgumentSorts(values, _) | Sorts(values) => { let expected = values.expected.sort_string(tcx); @@ -228,7 +216,6 @@ impl<'tcx> TypeError<'tcx> { | FieldMisMatch(..) | RegionsDoesNotOutlive(..) | RegionsInsufficientlyPolymorphic(..) - | RegionsOverlyPolymorphic(..) | RegionsPlaceholderMismatch | Traits(_) | ProjectionMismatched(_) diff --git a/compiler/rustc_middle/src/ty/fold.rs b/compiler/rustc_middle/src/ty/fold.rs index 25890eb15cd..149ce29b8d9 100644 --- a/compiler/rustc_middle/src/ty/fold.rs +++ b/compiler/rustc_middle/src/ty/fold.rs @@ -213,7 +213,7 @@ where // debruijn index. Then we adjust it to the // correct depth. assert_eq!(debruijn1, ty::INNERMOST); - self.tcx.mk_re_late_bound(debruijn, br) + ty::Region::new_late_bound(self.tcx, debruijn, br) } else { region } @@ -328,7 +328,7 @@ impl<'tcx> TyCtxt<'tcx> { T: TypeFoldable<TyCtxt<'tcx>>, { self.replace_late_bound_regions_uncached(value, |br| { - self.mk_re_free(all_outlive_scope, br.kind) + ty::Region::new_free(self, all_outlive_scope, br.kind) }) } @@ -341,7 +341,8 @@ impl<'tcx> TyCtxt<'tcx> { value, FnMutDelegate { regions: &mut |r: ty::BoundRegion| { - self.mk_re_late_bound( + ty::Region::new_late_bound( + self, ty::INNERMOST, ty::BoundRegion { var: shift_bv(r.var), kind: r.kind }, ) @@ -383,7 +384,7 @@ impl<'tcx> TyCtxt<'tcx> { .or_insert_with(|| ty::BoundVariableKind::Region(ty::BrAnon(None))) .expect_region(); let br = ty::BoundRegion { var, kind }; - self.tcx.mk_re_late_bound(ty::INNERMOST, br) + ty::Region::new_late_bound(self.tcx, ty::INNERMOST, br) } fn replace_ty(&mut self, bt: ty::BoundTy) -> Ty<'tcx> { let entry = self.map.entry(bt.var); @@ -451,7 +452,7 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for Shifter<'tcx> { match *r { ty::ReLateBound(debruijn, br) if debruijn >= self.current_index => { let debruijn = debruijn.shifted_in(self.amount); - self.tcx.mk_re_late_bound(debruijn, br) + ty::Region::new_late_bound(self.tcx, debruijn, br) } _ => r, } @@ -492,7 +493,7 @@ pub fn shift_region<'tcx>( ) -> ty::Region<'tcx> { match *region { ty::ReLateBound(debruijn, br) if amount > 0 => { - tcx.mk_re_late_bound(debruijn.shifted_in(amount), br) + ty::Region::new_late_bound(tcx, debruijn.shifted_in(amount), br) } _ => region, } diff --git a/compiler/rustc_middle/src/ty/generics.rs b/compiler/rustc_middle/src/ty/generics.rs index b0ffe78299d..30f92b91cb7 100644 --- a/compiler/rustc_middle/src/ty/generics.rs +++ b/compiler/rustc_middle/src/ty/generics.rs @@ -100,7 +100,7 @@ impl GenericParamDef { preceding_substs: &[ty::GenericArg<'tcx>], ) -> ty::GenericArg<'tcx> { match &self.kind { - ty::GenericParamDefKind::Lifetime => tcx.mk_re_error_misc().into(), + ty::GenericParamDefKind::Lifetime => ty::Region::new_error_misc(tcx).into(), ty::GenericParamDefKind::Type { .. } => tcx.ty_error_misc().into(), ty::GenericParamDefKind::Const { .. } => { tcx.const_error_misc(tcx.type_of(self.def_id).subst(tcx, preceding_substs)).into() @@ -343,7 +343,7 @@ impl<'tcx> GenericPredicates<'tcx> { substs: SubstsRef<'tcx>, ) -> impl Iterator<Item = (Predicate<'tcx>, Span)> + DoubleEndedIterator + ExactSizeIterator { - EarlyBinder(self.predicates).subst_iter_copied(tcx, substs) + EarlyBinder::bind(self.predicates).subst_iter_copied(tcx, substs) } #[instrument(level = "debug", skip(self, tcx))] @@ -358,7 +358,7 @@ impl<'tcx> GenericPredicates<'tcx> { } instantiated .predicates - .extend(self.predicates.iter().map(|(p, _)| EarlyBinder(*p).subst(tcx, substs))); + .extend(self.predicates.iter().map(|(p, _)| EarlyBinder::bind(*p).subst(tcx, substs))); instantiated.spans.extend(self.predicates.iter().map(|(_, sp)| *sp)); } diff --git a/compiler/rustc_middle/src/ty/inhabitedness/inhabited_predicate.rs b/compiler/rustc_middle/src/ty/inhabitedness/inhabited_predicate.rs index ac42d6e0510..d48672b2baa 100644 --- a/compiler/rustc_middle/src/ty/inhabitedness/inhabited_predicate.rs +++ b/compiler/rustc_middle/src/ty/inhabitedness/inhabited_predicate.rs @@ -158,7 +158,7 @@ impl<'tcx> InhabitedPredicate<'tcx> { fn subst_opt(self, tcx: TyCtxt<'tcx>, substs: ty::SubstsRef<'tcx>) -> Option<Self> { match self { Self::ConstIsZero(c) => { - let c = ty::EarlyBinder(c).subst(tcx, substs); + let c = ty::EarlyBinder::bind(c).subst(tcx, substs); let pred = match c.kind().try_to_target_usize(tcx) { Some(0) => Self::True, Some(1..) => Self::False, @@ -167,7 +167,7 @@ impl<'tcx> InhabitedPredicate<'tcx> { Some(pred) } Self::GenericType(t) => { - Some(ty::EarlyBinder(t).subst(tcx, substs).inhabited_predicate(tcx)) + Some(ty::EarlyBinder::bind(t).subst(tcx, substs).inhabited_predicate(tcx)) } Self::And(&[a, b]) => match a.subst_opt(tcx, substs) { None => b.subst_opt(tcx, substs).map(|b| a.and(tcx, b)), diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs index a8d0dca37ff..72341e656e2 100644 --- a/compiler/rustc_middle/src/ty/mod.rs +++ b/compiler/rustc_middle/src/ty/mod.rs @@ -53,7 +53,6 @@ use rustc_span::symbol::{kw, sym, Ident, Symbol}; use rustc_span::{ExpnId, ExpnKind, Span}; use rustc_target::abi::{Align, FieldIdx, Integer, IntegerType, VariantIdx}; pub use rustc_target::abi::{ReprFlags, ReprOptions}; -use rustc_type_ir::WithCachedTypeInfo; pub use subst::*; pub use vtable::*; @@ -145,6 +144,7 @@ mod opaque_types; mod parameterized; mod rvalue_scopes; mod structural_impls; +#[cfg_attr(not(bootstrap), allow(hidden_glob_reexports))] mod sty; mod typeck_results; @@ -764,7 +764,7 @@ impl<'tcx> Predicate<'tcx> { let shifted_pred = tcx.shift_bound_var_indices(trait_bound_vars.len(), bound_pred.skip_binder()); // 2) Self: Bar1<'a, '^0.1> -> T: Bar1<'^0.0, '^0.1> - let new = EarlyBinder(shifted_pred).subst(tcx, trait_ref.skip_binder().substs); + let new = EarlyBinder::bind(shifted_pred).subst(tcx, trait_ref.skip_binder().substs); // 3) ['x] + ['b] -> ['x, 'b] let bound_vars = tcx.mk_bound_variable_kinds_from_iter(trait_bound_vars.iter().chain(pred_bound_vars)); @@ -1496,7 +1496,7 @@ impl<'tcx> OpaqueHiddenType<'tcx> { /// identified by both a universe, as well as a name residing within that universe. Distinct bound /// regions/types/consts within the same universe simply have an unknown relationship to one /// another. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] #[derive(HashStable, TyEncodable, TyDecodable)] pub struct Placeholder<T> { pub universe: UniverseIndex, diff --git a/compiler/rustc_middle/src/ty/opaque_types.rs b/compiler/rustc_middle/src/ty/opaque_types.rs index 1b336b7bfc6..d1ed7be3d2e 100644 --- a/compiler/rustc_middle/src/ty/opaque_types.rs +++ b/compiler/rustc_middle/src/ty/opaque_types.rs @@ -141,7 +141,7 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for ReverseMapper<'tcx> { ) .emit(); - self.interner().mk_re_error(e) + ty::Region::new_error(self.interner(), e) } } } diff --git a/compiler/rustc_middle/src/ty/print/mod.rs b/compiler/rustc_middle/src/ty/print/mod.rs index 64e7480e626..2de0a3f75dc 100644 --- a/compiler/rustc_middle/src/ty/print/mod.rs +++ b/compiler/rustc_middle/src/ty/print/mod.rs @@ -123,7 +123,7 @@ pub trait Printer<'tcx>: Sized { impl_trait_ref.map(|i| i.subst(self.tcx(), substs)), ) } else { - (self_ty.0, impl_trait_ref.map(|i| i.0)) + (self_ty.subst_identity(), impl_trait_ref.map(|i| i.subst_identity())) }; self.print_impl_path(def_id, substs, self_ty, impl_trait_ref) } diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs index a064174e261..6156fdf7eac 100644 --- a/compiler/rustc_middle/src/ty/print/pretty.rs +++ b/compiler/rustc_middle/src/ty/print/pretty.rs @@ -184,7 +184,7 @@ impl<'tcx> RegionHighlightMode<'tcx> { /// Convenience wrapper for `highlighting_region`. pub fn highlighting_region_vid(&mut self, vid: ty::RegionVid, number: usize) { - self.highlighting_region(self.tcx.mk_re_var(vid), number) + self.highlighting_region(ty::Region::new_var(self.tcx, vid), number) } /// Returns `Some(n)` with the number to use for the given region, if any. @@ -685,29 +685,30 @@ pub trait PrettyPrinter<'tcx>: } ty::FnPtr(ref bare_fn) => p!(print(bare_fn)), ty::Infer(infer_ty) => { - let verbose = self.should_print_verbose(); + if self.should_print_verbose() { + p!(write("{:?}", ty.kind())); + return Ok(self); + } + if let ty::TyVar(ty_vid) = infer_ty { if let Some(name) = self.ty_infer_name(ty_vid) { p!(write("{}", name)) } else { - if verbose { - p!(write("{:?}", infer_ty)) - } else { - p!(write("{}", infer_ty)) - } + p!(write("{}", infer_ty)) } } else { - if verbose { p!(write("{:?}", infer_ty)) } else { p!(write("{}", infer_ty)) } + p!(write("{}", infer_ty)) } } - ty::Error(_) => p!("[type error]"), + ty::Error(_) => p!("{{type error}}"), ty::Param(ref param_ty) => p!(print(param_ty)), ty::Bound(debruijn, bound_ty) => match bound_ty.kind { - ty::BoundTyKind::Anon => debug_bound_var(&mut self, debruijn, bound_ty.var)?, + ty::BoundTyKind::Anon => { + rustc_type_ir::debug_bound_var(&mut self, debruijn, bound_ty.var)? + } ty::BoundTyKind::Param(_, s) => match self.should_print_verbose() { - true if debruijn == ty::INNERMOST => p!(write("^{}", s)), - true => p!(write("^{}_{}", debruijn.index(), s)), - false => p!(write("{}", s)), + true => p!(write("{:?}", ty.kind())), + false => p!(write("{s}")), }, }, ty::Adt(def, substs) => { @@ -740,10 +741,11 @@ pub trait PrettyPrinter<'tcx>: } } ty::Placeholder(placeholder) => match placeholder.bound.kind { - ty::BoundTyKind::Anon => { - debug_placeholder_var(&mut self, placeholder.universe, placeholder.bound.var)?; - } - ty::BoundTyKind::Param(_, name) => p!(write("{}", name)), + ty::BoundTyKind::Anon => p!(write("{placeholder:?}")), + ty::BoundTyKind::Param(_, name) => match self.should_print_verbose() { + true => p!(write("{:?}", ty.kind())), + false => p!(write("{name}")), + }, }, ty::Alias(ty::Opaque, ty::AliasTy { def_id, substs, .. }) => { // We use verbose printing in 'NO_QUERIES' mode, to @@ -1372,15 +1374,13 @@ pub trait PrettyPrinter<'tcx>: } ty::ConstKind::Bound(debruijn, bound_var) => { - debug_bound_var(&mut self, debruijn, bound_var)? + rustc_type_ir::debug_bound_var(&mut self, debruijn, bound_var)? } - ty::ConstKind::Placeholder(placeholder) => { - debug_placeholder_var(&mut self, placeholder.universe, placeholder.bound)?; - }, + ty::ConstKind::Placeholder(placeholder) => p!(write("{placeholder:?}")), // FIXME(generic_const_exprs): // write out some legible representation of an abstract const? - ty::ConstKind::Expr(_) => p!("[const expr]"), - ty::ConstKind::Error(_) => p!("[const error]"), + ty::ConstKind::Expr(_) => p!("{{const expr}}"), + ty::ConstKind::Error(_) => p!("{{const error}}"), }; Ok(self) } @@ -2303,7 +2303,7 @@ impl<'a, 'tcx> ty::TypeFolder<TyCtxt<'tcx>> for RegionFolder<'a, 'tcx> { }; if let ty::ReLateBound(debruijn1, br) = *region { assert_eq!(debruijn1, ty::INNERMOST); - self.tcx.mk_re_late_bound(self.current_index, br) + ty::Region::new_late_bound(self.tcx, self.current_index, br) } else { region } @@ -2415,7 +2415,8 @@ impl<'tcx> FmtPrinter<'_, 'tcx> { if let Some(lt_idx) = lifetime_idx { if lt_idx > binder_level_idx { let kind = ty::BrNamed(CRATE_DEF_ID.to_def_id(), name); - return tcx.mk_re_late_bound( + return ty::Region::new_late_bound( + tcx, ty::INNERMOST, ty::BoundRegion { var: br.var, kind }, ); @@ -2430,7 +2431,8 @@ impl<'tcx> FmtPrinter<'_, 'tcx> { if let Some(lt_idx) = lifetime_idx { if lt_idx > binder_level_idx { let kind = ty::BrNamed(def_id, name); - return tcx.mk_re_late_bound( + return ty::Region::new_late_bound( + tcx, ty::INNERMOST, ty::BoundRegion { var: br.var, kind }, ); @@ -2443,7 +2445,8 @@ impl<'tcx> FmtPrinter<'_, 'tcx> { if let Some(lt_idx) = lifetime_idx { if lt_idx > binder_level_idx { let kind = br.kind; - return tcx.mk_re_late_bound( + return ty::Region::new_late_bound( + tcx, ty::INNERMOST, ty::BoundRegion { var: br.var, kind }, ); @@ -2458,7 +2461,11 @@ impl<'tcx> FmtPrinter<'_, 'tcx> { start_or_continue(&mut self, "for<", ", "); do_continue(&mut self, name); } - tcx.mk_re_late_bound(ty::INNERMOST, ty::BoundRegion { var: br.var, kind }) + ty::Region::new_late_bound( + tcx, + ty::INNERMOST, + ty::BoundRegion { var: br.var, kind }, + ) }; let mut folder = RegionFolder { tcx, @@ -3065,27 +3072,3 @@ pub struct OpaqueFnEntry<'tcx> { fn_trait_ref: Option<ty::PolyTraitRef<'tcx>>, return_ty: Option<ty::Binder<'tcx, Term<'tcx>>>, } - -pub fn debug_bound_var<T: std::fmt::Write>( - fmt: &mut T, - debruijn: ty::DebruijnIndex, - var: ty::BoundVar, -) -> Result<(), std::fmt::Error> { - if debruijn == ty::INNERMOST { - write!(fmt, "^{}", var.index()) - } else { - write!(fmt, "^{}_{}", debruijn.index(), var.index()) - } -} - -pub fn debug_placeholder_var<T: std::fmt::Write>( - fmt: &mut T, - universe: ty::UniverseIndex, - bound: ty::BoundVar, -) -> Result<(), std::fmt::Error> { - if universe == ty::UniverseIndex::ROOT { - write!(fmt, "!{}", bound.index()) - } else { - write!(fmt, "!{}_{}", universe.index(), bound.index()) - } -} diff --git a/compiler/rustc_middle/src/ty/relate.rs b/compiler/rustc_middle/src/ty/relate.rs index 3bbe6a23b66..8bcae3d9ab7 100644 --- a/compiler/rustc_middle/src/ty/relate.rs +++ b/compiler/rustc_middle/src/ty/relate.rs @@ -589,17 +589,6 @@ pub fn structurally_relate_consts<'tcx, R: TypeRelation<'tcx>>( debug!("{}.structurally_relate_consts(a = {:?}, b = {:?})", relation.tag(), a, b); let tcx = relation.tcx(); - // HACK(const_generics): We still need to eagerly evaluate consts when - // relating them because during `normalize_param_env_or_error`, - // we may relate an evaluated constant in a obligation against - // an unnormalized (i.e. unevaluated) const in the param-env. - // FIXME(generic_const_exprs): Once we always lazily unify unevaluated constants - // these `eval` calls can be removed. - if !tcx.features().generic_const_exprs { - a = a.eval(tcx, relation.param_env()); - b = b.eval(tcx, relation.param_env()); - } - if tcx.features().generic_const_exprs { a = tcx.expand_abstract_consts(a); b = tcx.expand_abstract_consts(b); diff --git a/compiler/rustc_middle/src/ty/structural_impls.rs b/compiler/rustc_middle/src/ty/structural_impls.rs index 16cb6c91046..a965450d27d 100644 --- a/compiler/rustc_middle/src/ty/structural_impls.rs +++ b/compiler/rustc_middle/src/ty/structural_impls.rs @@ -88,7 +88,35 @@ impl fmt::Debug for ty::FreeRegion { impl<'tcx> fmt::Debug for ty::FnSig<'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "({:?}; c_variadic: {})->{:?}", self.inputs(), self.c_variadic, self.output()) + let ty::FnSig { inputs_and_output: _, c_variadic, unsafety, abi } = self; + + write!(f, "{}", unsafety.prefix_str())?; + match abi { + rustc_target::spec::abi::Abi::Rust => (), + abi => write!(f, "extern \"{abi:?}\" ")?, + }; + + write!(f, "fn(")?; + let inputs = self.inputs(); + match inputs.len() { + 0 if *c_variadic => write!(f, "...)")?, + 0 => write!(f, ")")?, + _ => { + for ty in &self.inputs()[0..(self.inputs().len() - 1)] { + write!(f, "{ty:?}, ")?; + } + write!(f, "{:?}", self.inputs().last().unwrap())?; + if *c_variadic { + write!(f, "...")?; + } + write!(f, ")")?; + } + } + + match self.output().kind() { + ty::Tuple(list) if list.is_empty() => Ok(()), + _ => write!(f, " -> {:?}", self.output()), + } } } @@ -216,20 +244,37 @@ impl<'tcx> fmt::Debug for ty::ConstKind<'tcx> { match self { Param(param) => write!(f, "{param:?}"), Infer(var) => write!(f, "{var:?}"), - Bound(debruijn, var) => ty::print::debug_bound_var(f, *debruijn, *var), - Placeholder(placeholder) => { - ty::print::debug_placeholder_var(f, placeholder.universe, placeholder.bound) - } + Bound(debruijn, var) => rustc_type_ir::debug_bound_var(f, *debruijn, *var), + Placeholder(placeholder) => write!(f, "{placeholder:?}"), Unevaluated(uv) => { f.debug_tuple("Unevaluated").field(&uv.substs).field(&uv.def).finish() } Value(valtree) => write!(f, "{valtree:?}"), - Error(_) => write!(f, "[const error]"), + Error(_) => write!(f, "{{const error}}"), Expr(expr) => write!(f, "{expr:?}"), } } } +impl fmt::Debug for ty::BoundTy { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.kind { + ty::BoundTyKind::Anon => write!(f, "{:?}", self.var), + ty::BoundTyKind::Param(_, sym) => write!(f, "{sym:?}"), + } + } +} + +impl<T: fmt::Debug> fmt::Debug for ty::Placeholder<T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.universe == ty::UniverseIndex::ROOT { + write!(f, "!{:?}", self.bound) + } else { + write!(f, "!{}_{:?}", self.universe.index(), self.bound) + } + } +} + /////////////////////////////////////////////////////////////////////////// // Atomic structs // @@ -294,6 +339,7 @@ TrivialTypeTraversalAndLiftImpls! { crate::ty::AliasRelationDirection, crate::ty::Placeholder<crate::ty::BoundRegion>, crate::ty::Placeholder<crate::ty::BoundTy>, + crate::ty::Placeholder<ty::BoundVar>, crate::ty::ClosureKind, crate::ty::FreeRegion, crate::ty::InferTy, @@ -310,7 +356,6 @@ TrivialTypeTraversalAndLiftImpls! { interpret::Scalar, rustc_target::abi::Size, ty::BoundVar, - ty::Placeholder<ty::BoundVar>, } TrivialTypeTraversalAndLiftImpls! { diff --git a/compiler/rustc_middle/src/ty/sty.rs b/compiler/rustc_middle/src/ty/sty.rs index e6d51c4ec97..d19a7bcde79 100644 --- a/compiler/rustc_middle/src/ty/sty.rs +++ b/compiler/rustc_middle/src/ty/sty.rs @@ -15,14 +15,14 @@ use hir::def::DefKind; use polonius_engine::Atom; use rustc_data_structures::captures::Captures; use rustc_data_structures::intern::Interned; -use rustc_errors::{DiagnosticArgValue, IntoDiagnosticArg}; +use rustc_errors::{DiagnosticArgValue, ErrorGuaranteed, IntoDiagnosticArg, MultiSpan}; use rustc_hir as hir; use rustc_hir::def_id::DefId; use rustc_hir::LangItem; use rustc_index::Idx; use rustc_macros::HashStable; use rustc_span::symbol::{kw, sym, Symbol}; -use rustc_span::Span; +use rustc_span::{Span, DUMMY_SP}; use rustc_target::abi::{FieldIdx, VariantIdx, FIRST_VARIANT}; use rustc_target::spec::abi::{self, Abi}; use std::borrow::Cow; @@ -568,7 +568,7 @@ impl<'tcx> GeneratorSubsts<'tcx> { let layout = tcx.generator_layout(def_id).unwrap(); layout.variant_fields.iter().map(move |variant| { variant.iter().map(move |field| { - ty::EarlyBinder(layout.field_tys[*field].ty).subst(tcx, self.substs) + ty::EarlyBinder::bind(layout.field_tys[*field].ty).subst(tcx, self.substs) }) }) } @@ -1459,6 +1459,103 @@ impl ParamConst { #[rustc_pass_by_value] pub struct Region<'tcx>(pub Interned<'tcx, RegionKind<'tcx>>); +impl<'tcx> Region<'tcx> { + #[inline] + pub fn new_early_bound( + tcx: TyCtxt<'tcx>, + early_bound_region: ty::EarlyBoundRegion, + ) -> Region<'tcx> { + tcx.intern_region(ty::ReEarlyBound(early_bound_region)) + } + + #[inline] + pub fn new_late_bound( + tcx: TyCtxt<'tcx>, + debruijn: ty::DebruijnIndex, + bound_region: ty::BoundRegion, + ) -> Region<'tcx> { + // Use a pre-interned one when possible. + if let ty::BoundRegion { var, kind: ty::BrAnon(None) } = bound_region + && let Some(inner) = tcx.lifetimes.re_late_bounds.get(debruijn.as_usize()) + && let Some(re) = inner.get(var.as_usize()).copied() + { + re + } else { + tcx.intern_region(ty::ReLateBound(debruijn, bound_region)) + } + } + + #[inline] + pub fn new_free( + tcx: TyCtxt<'tcx>, + scope: DefId, + bound_region: ty::BoundRegionKind, + ) -> Region<'tcx> { + tcx.intern_region(ty::ReFree(ty::FreeRegion { scope, bound_region })) + } + + #[inline] + pub fn new_var(tcx: TyCtxt<'tcx>, v: ty::RegionVid) -> Region<'tcx> { + // Use a pre-interned one when possible. + tcx.lifetimes + .re_vars + .get(v.as_usize()) + .copied() + .unwrap_or_else(|| tcx.intern_region(ty::ReVar(v))) + } + + #[inline] + pub fn new_placeholder(tcx: TyCtxt<'tcx>, placeholder: ty::PlaceholderRegion) -> Region<'tcx> { + tcx.intern_region(ty::RePlaceholder(placeholder)) + } + + /// Constructs a `RegionKind::ReError` region. + #[track_caller] + pub fn new_error(tcx: TyCtxt<'tcx>, reported: ErrorGuaranteed) -> Region<'tcx> { + tcx.intern_region(ty::ReError(reported)) + } + + /// Constructs a `RegionKind::ReError` region and registers a `delay_span_bug` to ensure it + /// gets used. + #[track_caller] + pub fn new_error_misc(tcx: TyCtxt<'tcx>) -> Region<'tcx> { + Region::new_error_with_message( + tcx, + DUMMY_SP, + "RegionKind::ReError constructed but no error reported", + ) + } + + /// Constructs a `RegionKind::ReError` region and registers a `delay_span_bug` with the given + /// `msg` to ensure it gets used. + #[track_caller] + pub fn new_error_with_message<S: Into<MultiSpan>>( + tcx: TyCtxt<'tcx>, + span: S, + msg: &'static str, + ) -> Region<'tcx> { + let reported = tcx.sess.delay_span_bug(span, msg); + Region::new_error(tcx, reported) + } + + /// Avoid this in favour of more specific `new_*` methods, where possible, + /// to avoid the cost of the `match`. + pub fn new_from_kind(tcx: TyCtxt<'tcx>, kind: RegionKind<'tcx>) -> Region<'tcx> { + match kind { + ty::ReEarlyBound(region) => Region::new_early_bound(tcx, region), + ty::ReLateBound(debruijn, region) => Region::new_late_bound(tcx, debruijn, region), + ty::ReFree(ty::FreeRegion { scope, bound_region }) => { + Region::new_free(tcx, scope, bound_region) + } + ty::ReStatic => tcx.lifetimes.re_static, + ty::ReVar(vid) => Region::new_var(tcx, vid), + ty::RePlaceholder(region) => Region::new_placeholder(tcx, region), + ty::ReErased => tcx.lifetimes.re_erased, + ty::ReError(reported) => Region::new_error(tcx, reported), + } + } +} + impl<'tcx> Deref for Region<'tcx> { type Target = RegionKind<'tcx>; @@ -1511,10 +1608,11 @@ impl Atom for RegionVid { rustc_index::newtype_index! { #[derive(HashStable)] + #[debug_format = "{}"] pub struct BoundVar {} } -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)] #[derive(HashStable)] pub struct BoundTy { pub var: BoundVar, @@ -2366,7 +2464,7 @@ impl<'tcx> Ty<'tcx> { ty::Tuple(tys) => tys.iter().all(|ty| ty.is_trivially_sized(tcx)), - ty::Adt(def, _substs) => def.sized_constraint(tcx).0.is_empty(), + ty::Adt(def, _substs) => def.sized_constraint(tcx).skip_binder().is_empty(), ty::Alias(..) | ty::Param(_) | ty::Placeholder(..) => false, diff --git a/compiler/rustc_middle/src/ty/subst.rs b/compiler/rustc_middle/src/ty/subst.rs index 43f95635ab0..8685a22d9ca 100644 --- a/compiler/rustc_middle/src/ty/subst.rs +++ b/compiler/rustc_middle/src/ty/subst.rs @@ -538,13 +538,17 @@ impl<'tcx, T: TypeVisitable<TyCtxt<'tcx>>> TypeVisitable<TyCtxt<'tcx>> for &'tcx /// [`subst_identity`](EarlyBinder::subst_identity) or [`skip_binder`](EarlyBinder::skip_binder). #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] #[derive(Encodable, Decodable, HashStable)] -pub struct EarlyBinder<T>(pub T); +pub struct EarlyBinder<T>(T); /// For early binders, you should first call `subst` before using any visitors. impl<'tcx, T> !TypeFoldable<TyCtxt<'tcx>> for ty::EarlyBinder<T> {} impl<'tcx, T> !TypeVisitable<TyCtxt<'tcx>> for ty::EarlyBinder<T> {} impl<T> EarlyBinder<T> { + pub fn bind(inner: T) -> EarlyBinder<T> { + EarlyBinder(inner) + } + pub fn as_ref(&self) -> EarlyBinder<&T> { EarlyBinder(&self.0) } @@ -582,6 +586,9 @@ impl<T> EarlyBinder<T> { /// arguments of an `FnSig`). Otherwise, consider using /// [`subst_identity`](EarlyBinder::subst_identity). /// + /// To skip the binder on `x: &EarlyBinder<T>` to obtain `&T`, leverage + /// [`EarlyBinder::as_ref`](EarlyBinder::as_ref): `x.as_ref().skip_binder()`. + /// /// See also [`Binder::skip_binder`](super::Binder::skip_binder), which is /// the analogous operation on [`super::Binder`]. pub fn skip_binder(self) -> T { diff --git a/compiler/rustc_middle/src/ty/util.rs b/compiler/rustc_middle/src/ty/util.rs index ba05135638e..dce2f5545f5 100644 --- a/compiler/rustc_middle/src/ty/util.rs +++ b/compiler/rustc_middle/src/ty/util.rs @@ -15,7 +15,7 @@ use rustc_data_structures::stable_hasher::{Hash64, HashStable, StableHasher}; use rustc_errors::ErrorGuaranteed; use rustc_hir as hir; use rustc_hir::def::{CtorOf, DefKind, Res}; -use rustc_hir::def_id::{DefId, LocalDefId}; +use rustc_hir::def_id::{CrateNum, DefId, LocalDefId}; use rustc_index::bit_set::GrowableBitSet; use rustc_index::{Idx, IndexVec}; use rustc_macros::HashStable; @@ -709,7 +709,7 @@ impl<'tcx> TyCtxt<'tcx> { .as_ref() .map_or_else(|| [].iter(), |l| l.field_tys.iter()) .filter(|decl| !decl.ignore_for_traits) - .map(|decl| ty::EarlyBinder(decl.ty)) + .map(|decl| ty::EarlyBinder::bind(decl.ty)) } /// Normalizes all opaque types in the given value, replacing them @@ -857,6 +857,26 @@ impl<'tcx> TyCtxt<'tcx> { _ => def_kind.article(), } } + + /// Return `true` if the supplied `CrateNum` is "user-visible," meaning either a [public] + /// dependency, or a [direct] private dependency. This is used to decide whether the crate can + /// be shown in `impl` suggestions. + /// + /// [public]: TyCtxt::is_private_dep + /// [direct]: rustc_session::cstore::ExternCrate::is_direct + pub fn is_user_visible_dep(self, key: CrateNum) -> bool { + // | Private | Direct | Visible | | + // |---------|--------|---------|--------------------| + // | Yes | Yes | Yes | !true || true | + // | No | Yes | Yes | !false || true | + // | Yes | No | No | !true || false | + // | No | No | Yes | !false || false | + !self.is_private_dep(key) + // If `extern_crate` is `None`, then the crate was injected (e.g., by the allocator). + // Treat that kind of crate as "indirect", since it's an implementation detail of + // the language. + || self.extern_crate(key.as_def_id()).map_or(false, |e| e.is_direct()) + } } struct OpaqueTypeExpander<'tcx> { diff --git a/compiler/rustc_middle/src/values.rs b/compiler/rustc_middle/src/values.rs index c62c33d4dfc..a00c77ccbcd 100644 --- a/compiler/rustc_middle/src/values.rs +++ b/compiler/rustc_middle/src/values.rs @@ -96,13 +96,13 @@ impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for Representability { impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for ty::EarlyBinder<Ty<'_>> { fn from_cycle_error(tcx: TyCtxt<'tcx>, cycle: &[QueryInfo<DepKind>]) -> Self { - ty::EarlyBinder(Ty::from_cycle_error(tcx, cycle)) + ty::EarlyBinder::bind(Ty::from_cycle_error(tcx, cycle)) } } impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for ty::EarlyBinder<ty::Binder<'_, ty::FnSig<'_>>> { fn from_cycle_error(tcx: TyCtxt<'tcx>, cycle: &[QueryInfo<DepKind>]) -> Self { - ty::EarlyBinder(ty::Binder::from_cycle_error(tcx, cycle)) + ty::EarlyBinder::bind(ty::Binder::from_cycle_error(tcx, cycle)) } } diff --git a/compiler/rustc_mir_build/src/build/custom/parse/instruction.rs b/compiler/rustc_mir_build/src/build/custom/parse/instruction.rs index b74422708ce..ebf830cb9c1 100644 --- a/compiler/rustc_mir_build/src/build/custom/parse/instruction.rs +++ b/compiler/rustc_mir_build/src/build/custom/parse/instruction.rs @@ -57,6 +57,7 @@ impl<'tcx, 'body> ParseCtxt<'tcx, 'body> { place: self.parse_place(args[0])?, target: self.parse_block(args[1])?, unwind: UnwindAction::Continue, + replace: false, }) }, @call("mir_call", args) => { diff --git a/compiler/rustc_mir_build/src/build/expr/as_constant.rs b/compiler/rustc_mir_build/src/build/expr/as_constant.rs index 4d99ab4b0ec..73d5eb62750 100644 --- a/compiler/rustc_mir_build/src/build/expr/as_constant.rs +++ b/compiler/rustc_mir_build/src/build/expr/as_constant.rs @@ -106,7 +106,7 @@ pub fn as_constant_inner<'tcx>( } #[instrument(skip(tcx, lit_input))] -pub(crate) fn lit_to_mir_constant<'tcx>( +fn lit_to_mir_constant<'tcx>( tcx: TyCtxt<'tcx>, lit_input: LitToConstInput<'tcx>, ) -> Result<ConstantKind<'tcx>, LitToConstError> { diff --git a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs index bcab4c0d24b..3742d640e3b 100644 --- a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs +++ b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs @@ -725,6 +725,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { place: to_drop, target: success, unwind: UnwindAction::Continue, + replace: false, }, ); this.diverge_from(block); diff --git a/compiler/rustc_mir_build/src/build/mod.rs b/compiler/rustc_mir_build/src/build/mod.rs index 4e3e98b56e7..8f6a069a7db 100644 --- a/compiler/rustc_mir_build/src/build/mod.rs +++ b/compiler/rustc_mir_build/src/build/mod.rs @@ -1,4 +1,3 @@ -pub(crate) use crate::build::expr::as_constant::lit_to_mir_constant; use crate::build::expr::as_place::PlaceBuilder; use crate::build::scope::DropKind; use rustc_apfloat::ieee::{Double, Single}; diff --git a/compiler/rustc_mir_build/src/build/scope.rs b/compiler/rustc_mir_build/src/build/scope.rs index 7331f8ecaa9..7c0fbc6f81c 100644 --- a/compiler/rustc_mir_build/src/build/scope.rs +++ b/compiler/rustc_mir_build/src/build/scope.rs @@ -91,7 +91,7 @@ use rustc_middle::middle::region; use rustc_middle::mir::*; use rustc_middle::thir::{Expr, LintLevel}; -use rustc_span::{DesugaringKind, Span, DUMMY_SP}; +use rustc_span::{Span, DUMMY_SP}; #[derive(Debug)] pub struct Scopes<'tcx> { @@ -371,6 +371,7 @@ impl DropTree { // The caller will handle this if needed. unwind: UnwindAction::Terminate, place: drop_data.0.local.into(), + replace: false, }; cfg.terminate(block, drop_data.0.source_info, terminator); } @@ -1128,9 +1129,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { place: Place<'tcx>, value: Rvalue<'tcx>, ) -> BlockAnd<()> { - let span = self.tcx.with_stable_hashing_context(|hcx| { - span.mark_with_reason(None, DesugaringKind::Replace, self.tcx.sess.edition(), hcx) - }); let source_info = self.source_info(span); // create the new block for the assignment @@ -1148,6 +1146,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { place, target: assign, unwind: UnwindAction::Cleanup(assign_unwind), + replace: true, }, ); self.diverge_from(block); @@ -1261,6 +1260,7 @@ fn build_scope_drops<'tcx>( place: local.into(), target: next, unwind: UnwindAction::Continue, + replace: false, }, ); block = next; diff --git a/compiler/rustc_mir_build/src/lib.rs b/compiler/rustc_mir_build/src/lib.rs index c964e62c9d0..0eaab9b5703 100644 --- a/compiler/rustc_mir_build/src/lib.rs +++ b/compiler/rustc_mir_build/src/lib.rs @@ -32,7 +32,6 @@ fluent_messages! { "../messages.ftl" } pub fn provide(providers: &mut Providers) { providers.check_match = thir::pattern::check_match; providers.lit_to_const = thir::constant::lit_to_const; - providers.lit_to_mir_constant = build::lit_to_mir_constant; providers.mir_built = build::mir_built; providers.thir_check_unsafety = check_unsafety::thir_check_unsafety; providers.thir_body = thir::cx::thir_body; diff --git a/compiler/rustc_mir_build/src/thir/constant.rs b/compiler/rustc_mir_build/src/thir/constant.rs index 57ae6a3652d..a7be8e3c903 100644 --- a/compiler/rustc_mir_build/src/thir/constant.rs +++ b/compiler/rustc_mir_build/src/thir/constant.rs @@ -3,6 +3,8 @@ use rustc_middle::mir::interpret::{LitToConstError, LitToConstInput}; use rustc_middle::ty::{self, ParamEnv, ScalarInt, TyCtxt}; use rustc_span::DUMMY_SP; +use crate::build::parse_float_into_scalar; + pub(crate) fn lit_to_const<'tcx>( tcx: TyCtxt<'tcx>, lit_input: LitToConstInput<'tcx>, @@ -46,12 +48,28 @@ pub(crate) fn lit_to_const<'tcx>( (ast::LitKind::Byte(n), ty::Uint(ty::UintTy::U8)) => { ty::ValTree::from_scalar_int((*n).into()) } + (ast::LitKind::CStr(data, _), ty::Ref(_, inner_ty, _)) if matches!(inner_ty.kind(), ty::Adt(def, _) if Some(def.did()) == tcx.lang_items().c_str()) => + { + let bytes = data as &[u8]; + ty::ValTree::from_raw_bytes(tcx, bytes) + } (ast::LitKind::Int(n, _), ty::Uint(_)) | (ast::LitKind::Int(n, _), ty::Int(_)) => { let scalar_int = trunc(if neg { (*n as i128).overflowing_neg().0 as u128 } else { *n })?; ty::ValTree::from_scalar_int(scalar_int) } (ast::LitKind::Bool(b), ty::Bool) => ty::ValTree::from_scalar_int((*b).into()), + (ast::LitKind::Float(n, _), ty::Float(fty)) => { + let bits = parse_float_into_scalar(*n, *fty, neg) + .ok_or_else(|| { + LitToConstError::Reported(tcx.sess.delay_span_bug( + DUMMY_SP, + format!("couldn't parse float literal: {:?}", lit_input.lit), + )) + })? + .assert_int(); + ty::ValTree::from_scalar_int(bits) + } (ast::LitKind::Char(c), ty::Char) => ty::ValTree::from_scalar_int((*c).into()), (ast::LitKind::Err, _) => { return Err(LitToConstError::Reported( diff --git a/compiler/rustc_mir_build/src/thir/cx/mod.rs b/compiler/rustc_mir_build/src/thir/cx/mod.rs index 463f639defe..d00fb754c64 100644 --- a/compiler/rustc_mir_build/src/thir/cx/mod.rs +++ b/compiler/rustc_mir_build/src/thir/cx/mod.rs @@ -142,7 +142,7 @@ impl<'tcx> Cx<'tcx> { var: ty::BoundVar::from_usize(bound_vars.len() - 1), kind: ty::BrEnv, }; - let env_region = self.tcx.mk_re_late_bound(ty::INNERMOST, br); + let env_region = ty::Region::new_late_bound(self.tcx, ty::INNERMOST, br); let closure_env_ty = self.tcx.closure_env_ty(closure_def_id, closure_substs, env_region).unwrap(); let liberated_closure_env_ty = self.tcx.erase_late_bound_regions( diff --git a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs index b243f1dc8d0..7976b148f75 100644 --- a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs +++ b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs @@ -1,13 +1,14 @@ use rustc_hir as hir; +use rustc_hir::def_id::DefId; use rustc_index::Idx; use rustc_infer::infer::{InferCtxt, TyCtxtInferExt}; use rustc_infer::traits::Obligation; use rustc_middle::mir; use rustc_middle::thir::{FieldPat, Pat, PatKind}; -use rustc_middle::ty::{self, Ty, TyCtxt}; +use rustc_middle::ty::{self, Ty, TyCtxt, ValTree}; use rustc_session::lint; use rustc_span::Span; -use rustc_target::abi::FieldIdx; +use rustc_target::abi::{FieldIdx, VariantIdx}; use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt; use rustc_trait_selection::traits::{self, ObligationCause}; @@ -29,11 +30,11 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> { cv: mir::ConstantKind<'tcx>, id: hir::HirId, span: Span, - mir_structural_match_violation: bool, + check_body_for_struct_match_violation: Option<DefId>, ) -> Box<Pat<'tcx>> { let infcx = self.tcx.infer_ctxt().build(); let mut convert = ConstToPat::new(self, id, span, infcx); - convert.to_pat(cv, mir_structural_match_violation) + convert.to_pat(cv, check_body_for_struct_match_violation) } } @@ -104,7 +105,7 @@ impl<'tcx> ConstToPat<'tcx> { fn to_pat( &mut self, cv: mir::ConstantKind<'tcx>, - mir_structural_match_violation: bool, + check_body_for_struct_match_violation: Option<DefId>, ) -> Box<Pat<'tcx>> { trace!(self.treat_byte_string_as_slice); // This method is just a wrapper handling a validity check; the heavy lifting is @@ -114,14 +115,44 @@ impl<'tcx> ConstToPat<'tcx> { // once indirect_structural_match is a full fledged error, this // level of indirection can be eliminated - let inlined_const_as_pat = - self.recur(cv, mir_structural_match_violation).unwrap_or_else(|_| { - Box::new(Pat { - span: self.span, - ty: cv.ty(), - kind: PatKind::Constant { value: cv }, - }) - }); + let mir_structural_match_violation = check_body_for_struct_match_violation.map(|def_id| { + // `mir_const_qualif` must be called with the `DefId` of the item where the const is + // defined, not where it is declared. The difference is significant for associated + // constants. + self.tcx().mir_const_qualif(def_id).custom_eq + }); + debug!(?check_body_for_struct_match_violation, ?mir_structural_match_violation); + + let inlined_const_as_pat = match cv { + mir::ConstantKind::Ty(c) => match c.kind() { + ty::ConstKind::Param(_) + | ty::ConstKind::Infer(_) + | ty::ConstKind::Bound(_, _) + | ty::ConstKind::Placeholder(_) + | ty::ConstKind::Unevaluated(_) + | ty::ConstKind::Error(_) + | ty::ConstKind::Expr(_) => { + span_bug!(self.span, "unexpected const in `to_pat`: {:?}", c.kind()) + } + ty::ConstKind::Value(valtree) => self + .recur(valtree, cv.ty(), mir_structural_match_violation.unwrap_or(false)) + .unwrap_or_else(|_| { + Box::new(Pat { + span: self.span, + ty: cv.ty(), + kind: PatKind::Constant { value: cv }, + }) + }), + }, + mir::ConstantKind::Unevaluated(_, _) => { + span_bug!(self.span, "unevaluated const in `to_pat`: {cv:?}") + } + mir::ConstantKind::Val(_, _) => Box::new(Pat { + span: self.span, + ty: cv.ty(), + kind: PatKind::Constant { value: cv }, + }), + }; if !self.saw_const_match_error.get() { // If we were able to successfully convert the const to some pat, @@ -141,29 +172,70 @@ impl<'tcx> ConstToPat<'tcx> { // // FIXME(#73448): Find a way to bring const qualification into parity with // `search_for_structural_match_violation`. - if structural.is_none() && mir_structural_match_violation { + if structural.is_none() && mir_structural_match_violation.unwrap_or(false) { warn!("MIR const-checker found novel structural match violation. See #73448."); return inlined_const_as_pat; } if let Some(non_sm_ty) = structural { if !self.type_may_have_partial_eq_impl(cv.ty()) { - // fatal avoids ICE from resolution of nonexistent method (rare case). - self.tcx() - .sess - .emit_fatal(TypeNotStructural { span: self.span, non_sm_ty: non_sm_ty }); - } else if mir_structural_match_violation && !self.saw_const_match_lint.get() { - self.tcx().emit_spanned_lint( - lint::builtin::INDIRECT_STRUCTURAL_MATCH, - self.id, - self.span, - IndirectStructuralMatch { non_sm_ty }, - ); - } else { - debug!( - "`search_for_structural_match_violation` found one, but `CustomEq` was \ - not in the qualifs for that `const`" - ); + if let ty::Adt(def, ..) = non_sm_ty.kind() { + if def.is_union() { + let err = UnionPattern { span: self.span }; + self.tcx().sess.emit_err(err); + } else { + // fatal avoids ICE from resolution of nonexistent method (rare case). + self.tcx() + .sess + .emit_fatal(TypeNotStructural { span: self.span, non_sm_ty }); + } + } else { + let err = InvalidPattern { span: self.span, non_sm_ty }; + self.tcx().sess.emit_err(err); + return Box::new(Pat { span: self.span, ty: cv.ty(), kind: PatKind::Wild }); + } + } else if !self.saw_const_match_lint.get() { + if let Some(mir_structural_match_violation) = mir_structural_match_violation { + match non_sm_ty.kind() { + ty::RawPtr(pointee) + if pointee.ty.is_sized(self.tcx(), self.param_env) => {} + ty::FnPtr(..) | ty::RawPtr(..) => { + self.tcx().emit_spanned_lint( + lint::builtin::POINTER_STRUCTURAL_MATCH, + self.id, + self.span, + PointerPattern, + ); + } + ty::Adt(..) if mir_structural_match_violation => { + self.tcx().emit_spanned_lint( + lint::builtin::INDIRECT_STRUCTURAL_MATCH, + self.id, + self.span, + IndirectStructuralMatch { non_sm_ty }, + ); + } + _ => { + debug!( + "`search_for_structural_match_violation` found one, but `CustomEq` was \ + not in the qualifs for that `const`" + ); + } + } + } + } + } else if !self.saw_const_match_lint.get() { + match cv.ty().kind() { + ty::RawPtr(pointee) if pointee.ty.is_sized(self.tcx(), self.param_env) => {} + ty::FnPtr(..) | ty::RawPtr(..) => { + self.tcx().emit_spanned_lint( + lint::builtin::POINTER_STRUCTURAL_MATCH, + self.id, + self.span, + PointerPattern, + ); + } + _ => {} } } } @@ -171,6 +243,7 @@ impl<'tcx> ConstToPat<'tcx> { inlined_const_as_pat } + #[instrument(level = "trace", skip(self), ret)] fn type_may_have_partial_eq_impl(&self, ty: Ty<'tcx>) -> bool { // double-check there even *is* a semantic `PartialEq` to dispatch to. // @@ -187,29 +260,19 @@ impl<'tcx> ConstToPat<'tcx> { ); // FIXME: should this call a `predicate_must_hold` variant instead? - let has_impl = self.infcx.predicate_may_hold(&partial_eq_obligation); - - // Note: To fix rust-lang/rust#65466, we could just remove this type - // walk hack for function pointers, and unconditionally error - // if `PartialEq` is not implemented. However, that breaks stable - // code at the moment, because types like `for <'a> fn(&'a ())` do - // not *yet* implement `PartialEq`. So for now we leave this here. - has_impl - || ty.walk().any(|t| match t.unpack() { - ty::subst::GenericArgKind::Lifetime(_) => false, - ty::subst::GenericArgKind::Type(t) => t.is_fn_ptr(), - ty::subst::GenericArgKind::Const(_) => false, - }) + self.infcx.predicate_may_hold(&partial_eq_obligation) } fn field_pats( &self, - vals: impl Iterator<Item = mir::ConstantKind<'tcx>>, + vals: impl Iterator<Item = (ValTree<'tcx>, Ty<'tcx>)>, ) -> Result<Vec<FieldPat<'tcx>>, FallbackToConstRef> { vals.enumerate() - .map(|(idx, val)| { + .map(|(idx, (val, ty))| { let field = FieldIdx::new(idx); - Ok(FieldPat { field, pattern: self.recur(val, false)? }) + // Patterns can only use monomorphic types. + let ty = self.tcx().normalize_erasing_regions(self.param_env, ty); + Ok(FieldPat { field, pattern: self.recur(val, ty, false)? }) }) .collect() } @@ -218,7 +281,8 @@ impl<'tcx> ConstToPat<'tcx> { #[instrument(skip(self), level = "debug")] fn recur( &self, - cv: mir::ConstantKind<'tcx>, + cv: ValTree<'tcx>, + ty: Ty<'tcx>, mir_structural_match_violation: bool, ) -> Result<Box<Pat<'tcx>>, FallbackToConstRef> { let id = self.id; @@ -226,8 +290,9 @@ impl<'tcx> ConstToPat<'tcx> { let tcx = self.tcx(); let param_env = self.param_env; - let kind = match cv.ty().kind() { + let kind = match ty.kind() { ty::Float(_) => { + self.saw_const_match_lint.set(true); tcx.emit_spanned_lint( lint::builtin::ILLEGAL_FLOATING_POINT_LITERAL_PATTERN, id, @@ -236,27 +301,6 @@ impl<'tcx> ConstToPat<'tcx> { ); return Err(FallbackToConstRef); } - ty::Adt(adt_def, _) if adt_def.is_union() => { - // Matching on union fields is unsafe, we can't hide it in constants - self.saw_const_match_error.set(true); - let err = UnionPattern { span }; - tcx.sess.emit_err(err); - PatKind::Wild - } - ty::Adt(..) - if !self.type_may_have_partial_eq_impl(cv.ty()) - // FIXME(#73448): Find a way to bring const qualification into parity with - // `search_for_structural_match_violation` and then remove this condition. - - // Obtain the actual type that isn't annotated. If we just looked at `cv.ty` we - // could get `Option<NonStructEq>`, even though `Option` is annotated with derive. - && let Some(non_sm_ty) = traits::search_for_structural_match_violation(span, tcx, cv.ty()) => - { - self.saw_const_match_error.set(true); - let err = TypeNotStructural { span, non_sm_ty }; - tcx.sess.emit_err(err); - PatKind::Wild - } // If the type is not structurally comparable, just emit the constant directly, // causing the pattern match code to treat it opaquely. // FIXME: This code doesn't emit errors itself, the caller emits the errors. @@ -266,16 +310,14 @@ impl<'tcx> ConstToPat<'tcx> { // details. // Backwards compatibility hack because we can't cause hard errors on these // types, so we compare them via `PartialEq::eq` at runtime. - ty::Adt(..) if !self.type_marked_structural(cv.ty()) && self.behind_reference.get() => { - if !self.saw_const_match_error.get() - && !self.saw_const_match_lint.get() - { + ty::Adt(..) if !self.type_marked_structural(ty) && self.behind_reference.get() => { + if !self.saw_const_match_error.get() && !self.saw_const_match_lint.get() { self.saw_const_match_lint.set(true); tcx.emit_spanned_lint( lint::builtin::INDIRECT_STRUCTURAL_MATCH, id, span, - IndirectStructuralMatch { non_sm_ty: cv.ty() }, + IndirectStructuralMatch { non_sm_ty: ty }, ); } // Since we are behind a reference, we can just bubble the error up so we get a @@ -283,77 +325,75 @@ impl<'tcx> ConstToPat<'tcx> { // `PartialEq::eq` on it. return Err(FallbackToConstRef); } - ty::Adt(adt_def, _) if !self.type_marked_structural(cv.ty()) => { - debug!( - "adt_def {:?} has !type_marked_structural for cv.ty: {:?}", - adt_def, - cv.ty() - ); + ty::Adt(adt_def, _) if !self.type_marked_structural(ty) => { + debug!("adt_def {:?} has !type_marked_structural for cv.ty: {:?}", adt_def, ty,); self.saw_const_match_error.set(true); - let err = TypeNotStructural { span, non_sm_ty: cv.ty() }; + let err = TypeNotStructural { span, non_sm_ty: ty }; tcx.sess.emit_err(err); PatKind::Wild } ty::Adt(adt_def, substs) if adt_def.is_enum() => { - let destructured = tcx.destructure_mir_constant(param_env, cv); - + let (&variant_index, fields) = cv.unwrap_branch().split_first().unwrap(); + let variant_index = + VariantIdx::from_u32(variant_index.unwrap_leaf().try_to_u32().ok().unwrap()); PatKind::Variant { adt_def: *adt_def, substs, - variant_index: destructured - .variant - .expect("destructed const of adt without variant id"), - subpatterns: self.field_pats(destructured.fields.iter().copied())?, + variant_index, + subpatterns: self.field_pats( + fields.iter().copied().zip( + adt_def.variants()[variant_index] + .fields + .iter() + .map(|field| field.ty(self.tcx(), substs)), + ), + )?, } } - ty::Tuple(_) | ty::Adt(_, _) => { - let destructured = tcx.destructure_mir_constant(param_env, cv); - PatKind::Leaf { subpatterns: self.field_pats(destructured.fields.iter().copied())? } - } - ty::Array(..) => PatKind::Array { - prefix: tcx - .destructure_mir_constant(param_env, cv) - .fields + ty::Tuple(fields) => PatKind::Leaf { + subpatterns: self + .field_pats(cv.unwrap_branch().iter().copied().zip(fields.iter()))?, + }, + ty::Adt(def, substs) => PatKind::Leaf { + subpatterns: self.field_pats(cv.unwrap_branch().iter().copied().zip( + def.non_enum_variant().fields.iter().map(|field| field.ty(self.tcx(), substs)), + ))?, + }, + ty::Array(elem_ty, _) => PatKind::Array { + prefix: cv + .unwrap_branch() .iter() - .map(|val| self.recur(*val, false)) + .map(|val| self.recur(*val, *elem_ty, false)) .collect::<Result<_, _>>()?, slice: None, suffix: Box::new([]), }, ty::Ref(_, pointee_ty, ..) => match *pointee_ty.kind() { - // These are not allowed and will error elsewhere anyway. - ty::Dynamic(..) => { - self.saw_const_match_error.set(true); - let err = InvalidPattern { span, non_sm_ty: cv.ty() }; - tcx.sess.emit_err(err); - PatKind::Wild - } - // `&str` is represented as `ConstValue::Slice`, let's keep using this + // `&str` is represented as a valtree, let's keep using this // optimization for now. - ty::Str => PatKind::Constant { value: cv }, + ty::Str => PatKind::Constant { value: mir::ConstantKind::Ty(tcx.mk_const(cv, ty)) }, // `b"foo"` produces a `&[u8; 3]`, but you can't use constants of array type when // matching against references, you can only use byte string literals. // The typechecker has a special case for byte string literals, by treating them // as slices. This means we turn `&[T; N]` constants into slice patterns, which // has no negative effects on pattern matching, even if we're actually matching on // arrays. - ty::Array(..) if !self.treat_byte_string_as_slice => { + ty::Array(elem_ty, _) if !self.treat_byte_string_as_slice => { let old = self.behind_reference.replace(true); - let array = tcx.deref_mir_constant(self.param_env.and(cv)); + // References have the same valtree representation as their pointee. + let array = cv; let val = PatKind::Deref { subpattern: Box::new(Pat { kind: PatKind::Array { - prefix: tcx - .destructure_mir_constant(param_env, array) - .fields + prefix: array.unwrap_branch() .iter() - .map(|val| self.recur(*val, false)) + .map(|val| self.recur(*val, elem_ty, false)) .collect::<Result<_, _>>()?, slice: None, suffix: Box::new([]), }, span, - ty: *pointee_ty, + ty: tcx.mk_slice(elem_ty), }), }; self.behind_reference.set(old); @@ -365,15 +405,14 @@ impl<'tcx> ConstToPat<'tcx> { // pattern. ty::Slice(elem_ty) => { let old = self.behind_reference.replace(true); - let array = tcx.deref_mir_constant(self.param_env.and(cv)); + // References have the same valtree representation as their pointee. + let array = cv; let val = PatKind::Deref { subpattern: Box::new(Pat { kind: PatKind::Slice { - prefix: tcx - .destructure_mir_constant(param_env, array) - .fields + prefix: array.unwrap_branch() .iter() - .map(|val| self.recur(*val, false)) + .map(|val| self.recur(*val, elem_ty, false)) .collect::<Result<_, _>>()?, slice: None, suffix: Box::new([]), @@ -418,48 +457,28 @@ impl<'tcx> ConstToPat<'tcx> { // deref pattern. _ => { if !pointee_ty.is_sized(tcx, param_env) { - // `tcx.deref_mir_constant()` below will ICE with an unsized type - // (except slices, which are handled in a separate arm above). - let err = UnsizedPattern { span, non_sm_ty: *pointee_ty }; tcx.sess.emit_err(err); + // FIXME: introduce PatKind::Error to silence follow up diagnostics due to unreachable patterns. PatKind::Wild } else { let old = self.behind_reference.replace(true); - let subpattern = self.recur(tcx.deref_mir_constant(self.param_env.and(cv)), false)?; + // References have the same valtree representation as their pointee. + let subpattern = self.recur(cv, *pointee_ty, false)?; self.behind_reference.set(old); PatKind::Deref { subpattern } } } }, ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::FnDef(..) => { - PatKind::Constant { value: cv } - } - ty::RawPtr(pointee) if pointee.ty.is_sized(tcx, param_env) => { - return Err(FallbackToConstRef); - } - // FIXME: these can have very surprising behaviour where optimization levels or other - // compilation choices change the runtime behaviour of the match. - // See https://github.com/rust-lang/rust/issues/70861 for examples. - ty::FnPtr(..) | ty::RawPtr(..) => { - if !self.saw_const_match_error.get() - && !self.saw_const_match_lint.get() - { - self.saw_const_match_lint.set(true); - tcx.emit_spanned_lint( - lint::builtin::POINTER_STRUCTURAL_MATCH, - id, - span, - PointerPattern - ); - } - return Err(FallbackToConstRef); + PatKind::Constant { value: mir::ConstantKind::Ty(tcx.mk_const(cv, ty)) } } + ty::FnPtr(..) | ty::RawPtr(..) => unreachable!(), _ => { self.saw_const_match_error.set(true); - let err = InvalidPattern { span, non_sm_ty: cv.ty() }; - tcx.sess.emit_err(err); + let err = InvalidPattern { span, non_sm_ty: ty }; + tcx.sess.emit_err(err); PatKind::Wild } }; @@ -472,7 +491,7 @@ impl<'tcx> ConstToPat<'tcx> { // Obtain the actual type that isn't annotated. If we just looked at `cv.ty` we // could get `Option<NonStructEq>`, even though `Option` is annotated with derive. - && let Some(non_sm_ty) = traits::search_for_structural_match_violation(span, tcx, cv.ty()) + && let Some(non_sm_ty) = traits::search_for_structural_match_violation(span, tcx, ty) { self.saw_const_match_lint.set(true); tcx.emit_spanned_lint( @@ -483,6 +502,6 @@ impl<'tcx> ConstToPat<'tcx> { ); } - Ok(Box::new(Pat { span, ty: cv.ty(), kind })) + Ok(Box::new(Pat { span, ty, kind })) } } diff --git a/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs b/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs index 6a77146138b..9df6d2f43ad 100644 --- a/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs +++ b/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs @@ -53,11 +53,11 @@ use smallvec::{smallvec, SmallVec}; use rustc_data_structures::captures::Captures; use rustc_hir::{HirId, RangeEnd}; use rustc_index::Idx; +use rustc_middle::middle::stability::EvalResult; use rustc_middle::mir; use rustc_middle::thir::{FieldPat, Pat, PatKind, PatRange}; use rustc_middle::ty::layout::IntegerExt; use rustc_middle::ty::{self, Ty, TyCtxt, VariantDef}; -use rustc_middle::{middle::stability::EvalResult, mir::interpret::ConstValue}; use rustc_session::lint; use rustc_span::{Span, DUMMY_SP}; use rustc_target::abi::{FieldIdx, Integer, Size, VariantIdx, FIRST_VARIANT}; @@ -140,28 +140,17 @@ impl IntRange { value: mir::ConstantKind<'tcx>, ) -> Option<IntRange> { let ty = value.ty(); - if let Some((target_size, bias)) = Self::integral_size_and_signed_bias(tcx, ty) { - let val = if let mir::ConstantKind::Val(ConstValue::Scalar(scalar), _) = value { - // For this specific pattern we can skip a lot of effort and go - // straight to the result, after doing a bit of checking. (We - // could remove this branch and just fall through, which - // is more general but much slower.) - scalar.to_bits_or_ptr_internal(target_size).unwrap().left()? - } else { - if let mir::ConstantKind::Ty(c) = value - && let ty::ConstKind::Value(_) = c.kind() - { - bug!("encountered ConstValue in mir::ConstantKind::Ty, whereas this is expected to be in ConstantKind::Val"); - } + let (target_size, bias) = Self::integral_size_and_signed_bias(tcx, ty)?; + let val = match value { + mir::ConstantKind::Ty(c) if let ty::ConstKind::Value(valtree) = c.kind() => { + valtree.unwrap_leaf().to_bits(target_size).ok() + }, + // This is a more general form of the previous case. + _ => value.try_eval_bits(tcx, param_env, ty), + }?; - // This is a more general form of the previous case. - value.try_eval_bits(tcx, param_env, ty)? - }; - let val = val ^ bias; - Some(IntRange { range: val..=val, bias }) - } else { - None - } + let val = val ^ bias; + Some(IntRange { range: val..=val, bias }) } #[inline] diff --git a/compiler/rustc_mir_build/src/thir/pattern/mod.rs b/compiler/rustc_mir_build/src/thir/pattern/mod.rs index 1cf2f7ec0ff..1bbe7b45c1e 100644 --- a/compiler/rustc_mir_build/src/thir/pattern/mod.rs +++ b/compiler/rustc_mir_build/src/thir/pattern/mod.rs @@ -18,14 +18,15 @@ use rustc_hir::pat_util::EnumerateAndAdjustIterator; use rustc_hir::RangeEnd; use rustc_index::Idx; use rustc_middle::mir::interpret::{ - ConstValue, ErrorHandled, LitToConstError, LitToConstInput, Scalar, + ConstValue, ErrorHandled, GlobalId, LitToConstError, LitToConstInput, Scalar, }; -use rustc_middle::mir::{self, UserTypeProjection}; +use rustc_middle::mir::{self, ConstantKind, UserTypeProjection}; use rustc_middle::mir::{BorrowKind, Mutability}; use rustc_middle::thir::{Ascription, BindingMode, FieldPat, LocalVarId, Pat, PatKind, PatRange}; use rustc_middle::ty::subst::{GenericArg, SubstsRef}; use rustc_middle::ty::CanonicalUserTypeAnnotation; -use rustc_middle::ty::{self, AdtDef, ConstKind, Region, Ty, TyCtxt, UserType}; +use rustc_middle::ty::TypeVisitableExt; +use rustc_middle::ty::{self, AdtDef, Region, Ty, TyCtxt, UserType}; use rustc_span::{Span, Symbol}; use rustc_target::abi::FieldIdx; @@ -518,16 +519,24 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> { } }; - // `mir_const_qualif` must be called with the `DefId` of the item where the const is - // defined, not where it is declared. The difference is significant for associated - // constants. - let mir_structural_match_violation = self.tcx.mir_const_qualif(instance.def_id()).custom_eq; - debug!("mir_structural_match_violation({:?}) -> {}", qpath, mir_structural_match_violation); - - match self.tcx.const_eval_instance(param_env_reveal_all, instance, Some(span)) { - Ok(literal) => { - let const_ = mir::ConstantKind::Val(literal, ty); - let pattern = self.const_to_pat(const_, id, span, mir_structural_match_violation); + let cid = GlobalId { instance, promoted: None }; + // Prefer valtrees over opaque constants. + let const_value = self + .tcx + .const_eval_global_id_for_typeck(param_env_reveal_all, cid, Some(span)) + .map(|val| match val { + Some(valtree) => mir::ConstantKind::Ty(self.tcx.mk_const(valtree, ty)), + None => mir::ConstantKind::Val( + self.tcx + .const_eval_global_id(param_env_reveal_all, cid, Some(span)) + .expect("const_eval_global_id_for_typeck should have already failed"), + ty, + ), + }); + + match const_value { + Ok(const_) => { + let pattern = self.const_to_pat(const_, id, span, Some(instance.def_id())); if !is_associated_const { return pattern; @@ -577,27 +586,69 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> { id: hir::HirId, span: Span, ) -> PatKind<'tcx> { - let value = mir::ConstantKind::from_inline_const(self.tcx, anon_const.def_id); - - // Evaluate early like we do in `lower_path`. - let value = value.eval(self.tcx, self.param_env); - - match value { - mir::ConstantKind::Ty(c) => match c.kind() { - ConstKind::Param(_) => { - self.tcx.sess.emit_err(ConstParamInPattern { span }); - return PatKind::Wild; - } - ConstKind::Error(_) => { - return PatKind::Wild; + let tcx = self.tcx; + let def_id = anon_const.def_id; + let hir_id = tcx.hir().local_def_id_to_hir_id(def_id); + let body_id = match tcx.hir().get(hir_id) { + hir::Node::AnonConst(ac) => ac.body, + _ => span_bug!( + tcx.def_span(def_id.to_def_id()), + "from_inline_const can only process anonymous constants" + ), + }; + let expr = &tcx.hir().body(body_id).value; + let ty = tcx.typeck(def_id).node_type(hir_id); + + // Special case inline consts that are just literals. This is solely + // a performance optimization, as we could also just go through the regular + // const eval path below. + // FIXME: investigate the performance impact of removing this. + let lit_input = match expr.kind { + hir::ExprKind::Lit(ref lit) => Some(LitToConstInput { lit: &lit.node, ty, neg: false }), + hir::ExprKind::Unary(hir::UnOp::Neg, ref expr) => match expr.kind { + hir::ExprKind::Lit(ref lit) => { + Some(LitToConstInput { lit: &lit.node, ty, neg: true }) } - _ => bug!("Expected ConstKind::Param"), + _ => None, }, - mir::ConstantKind::Val(_, _) => self.const_to_pat(value, id, span, false).kind, - mir::ConstantKind::Unevaluated(..) => { - // If we land here it means the const can't be evaluated because it's `TooGeneric`. - self.tcx.sess.emit_err(ConstPatternDependsOnGenericParameter { span }); - return PatKind::Wild; + _ => None, + }; + if let Some(lit_input) = lit_input { + match tcx.at(expr.span).lit_to_const(lit_input) { + Ok(c) => return self.const_to_pat(ConstantKind::Ty(c), id, span, None).kind, + // If an error occurred, ignore that it's a literal + // and leave reporting the error up to const eval of + // the unevaluated constant below. + Err(_) => {} + } + } + + let typeck_root_def_id = tcx.typeck_root_def_id(def_id.to_def_id()); + let parent_substs = + tcx.erase_regions(ty::InternalSubsts::identity_for_item(tcx, typeck_root_def_id)); + let substs = + ty::InlineConstSubsts::new(tcx, ty::InlineConstSubstsParts { parent_substs, ty }) + .substs; + + let uneval = mir::UnevaluatedConst { def: def_id.to_def_id(), substs, promoted: None }; + debug_assert!(!substs.has_free_regions()); + + let ct = ty::UnevaluatedConst { def: def_id.to_def_id(), substs: substs }; + // First try using a valtree in order to destructure the constant into a pattern. + if let Ok(Some(valtree)) = + self.tcx.const_eval_resolve_for_typeck(self.param_env, ct, Some(span)) + { + self.const_to_pat(ConstantKind::Ty(self.tcx.mk_const(valtree, ty)), id, span, None).kind + } else { + // If that fails, convert it to an opaque constant pattern. + match tcx.const_eval_resolve(self.param_env, uneval, None) { + Ok(val) => self.const_to_pat(mir::ConstantKind::Val(val, ty), id, span, None).kind, + Err(ErrorHandled::TooGeneric) => { + // If we land here it means the const can't be evaluated because it's `TooGeneric`. + self.tcx.sess.emit_err(ConstPatternDependsOnGenericParameter { span }); + PatKind::Wild + } + Err(ErrorHandled::Reported(_)) => PatKind::Wild, } } } @@ -626,8 +677,10 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> { let lit_input = LitToConstInput { lit: &lit.node, ty: self.typeck_results.expr_ty(expr), neg }; - match self.tcx.at(expr.span).lit_to_mir_constant(lit_input) { - Ok(constant) => self.const_to_pat(constant, expr.hir_id, lit.span, false).kind, + match self.tcx.at(expr.span).lit_to_const(lit_input) { + Ok(constant) => { + self.const_to_pat(ConstantKind::Ty(constant), expr.hir_id, lit.span, None).kind + } Err(LitToConstError::Reported(_)) => PatKind::Wild, Err(LitToConstError::TypeError) => bug!("lower_lit: had type error"), } @@ -806,6 +859,9 @@ pub(crate) fn compare_const_vals<'tcx>( mir::ConstantKind::Val(ConstValue::Scalar(Scalar::Int(a)), _a_ty), mir::ConstantKind::Val(ConstValue::Scalar(Scalar::Int(b)), _b_ty), ) => return Some(a.cmp(&b)), + (mir::ConstantKind::Ty(a), mir::ConstantKind::Ty(b)) => { + return Some(a.kind().cmp(&b.kind())); + } _ => {} }, } diff --git a/compiler/rustc_mir_dataflow/src/elaborate_drops.rs b/compiler/rustc_mir_dataflow/src/elaborate_drops.rs index 18895072c3b..d615c83d621 100644 --- a/compiler/rustc_mir_dataflow/src/elaborate_drops.rs +++ b/compiler/rustc_mir_dataflow/src/elaborate_drops.rs @@ -237,6 +237,7 @@ where place: self.place, target: self.succ, unwind: self.unwind.into_action(), + replace: false, }, ); } @@ -719,6 +720,7 @@ where place: tcx.mk_place_deref(ptr), target: loop_block, unwind: unwind.into_action(), + replace: false, }, ); @@ -963,8 +965,12 @@ where } fn drop_block(&mut self, target: BasicBlock, unwind: Unwind) -> BasicBlock { - let block = - TerminatorKind::Drop { place: self.place, target, unwind: unwind.into_action() }; + let block = TerminatorKind::Drop { + place: self.place, + target, + unwind: unwind.into_action(), + replace: false, + }; self.new_block(unwind, block) } diff --git a/compiler/rustc_mir_dataflow/src/framework/direction.rs b/compiler/rustc_mir_dataflow/src/framework/direction.rs index c8fe1af6674..ba328e78040 100644 --- a/compiler/rustc_mir_dataflow/src/framework/direction.rs +++ b/compiler/rustc_mir_dataflow/src/framework/direction.rs @@ -479,7 +479,7 @@ impl Direction for Forward { Goto { target } => propagate(target, exit_state), Assert { target, unwind, expected: _, msg: _, cond: _ } - | Drop { target, unwind, place: _ } + | Drop { target, unwind, place: _, replace: _ } | FalseUnwind { real_target: target, unwind } => { if let UnwindAction::Cleanup(unwind) = unwind { propagate(unwind, exit_state); diff --git a/compiler/rustc_mir_dataflow/src/impls/liveness.rs b/compiler/rustc_mir_dataflow/src/impls/liveness.rs index aeca0073304..6ae6bdc17d5 100644 --- a/compiler/rustc_mir_dataflow/src/impls/liveness.rs +++ b/compiler/rustc_mir_dataflow/src/impls/liveness.rs @@ -199,8 +199,7 @@ impl DefUse { | NonMutatingUseContext::Move | NonMutatingUseContext::PlaceMention | NonMutatingUseContext::ShallowBorrow - | NonMutatingUseContext::SharedBorrow - | NonMutatingUseContext::UniqueBorrow, + | NonMutatingUseContext::SharedBorrow, ) => Some(DefUse::Use), PlaceContext::MutatingUse(MutatingUseContext::Projection) diff --git a/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs b/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs index b29ffcc70f9..ef2a0c790e9 100644 --- a/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs +++ b/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs @@ -80,7 +80,7 @@ fn add_move_for_packed_drop<'tcx>( is_cleanup: bool, ) { debug!("add_move_for_packed_drop({:?} @ {:?})", terminator, loc); - let TerminatorKind::Drop { ref place, target, unwind } = terminator.kind else { + let TerminatorKind::Drop { ref place, target, unwind, replace } = terminator.kind else { unreachable!(); }; @@ -98,6 +98,11 @@ fn add_move_for_packed_drop<'tcx>( patch.add_assign(loc, Place::from(temp), Rvalue::Use(Operand::Move(*place))); patch.patch_terminator( loc.block, - TerminatorKind::Drop { place: Place::from(temp), target: storage_dead_block, unwind }, + TerminatorKind::Drop { + place: Place::from(temp), + target: storage_dead_block, + unwind, + replace, + }, ); } diff --git a/compiler/rustc_mir_transform/src/check_alignment.rs b/compiler/rustc_mir_transform/src/check_alignment.rs index d60184e0ebe..1fe8ea07892 100644 --- a/compiler/rustc_mir_transform/src/check_alignment.rs +++ b/compiler/rustc_mir_transform/src/check_alignment.rs @@ -75,6 +75,14 @@ struct PointerFinder<'tcx, 'a> { } impl<'tcx, 'a> Visitor<'tcx> for PointerFinder<'tcx, 'a> { + fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) { + if let Rvalue::AddressOf(..) = rvalue { + // Ignore dereferences inside of an AddressOf + return; + } + self.super_rvalue(rvalue, location); + } + fn visit_place(&mut self, place: &Place<'tcx>, context: PlaceContext, _location: Location) { if let PlaceContext::NonUse(_) = context { return; diff --git a/compiler/rustc_mir_transform/src/const_prop.rs b/compiler/rustc_mir_transform/src/const_prop.rs index a5d18fff89b..1ba1951afde 100644 --- a/compiler/rustc_mir_transform/src/const_prop.rs +++ b/compiler/rustc_mir_transform/src/const_prop.rs @@ -772,7 +772,6 @@ impl<'tcx> Visitor<'tcx> for CanConstProp { // mutation. | NonMutatingUse(NonMutatingUseContext::SharedBorrow) | NonMutatingUse(NonMutatingUseContext::ShallowBorrow) - | NonMutatingUse(NonMutatingUseContext::UniqueBorrow) | NonMutatingUse(NonMutatingUseContext::AddressOf) | MutatingUse(MutatingUseContext::Borrow) | MutatingUse(MutatingUseContext::AddressOf) => { diff --git a/compiler/rustc_mir_transform/src/copy_prop.rs b/compiler/rustc_mir_transform/src/copy_prop.rs index 319f3a79705..3df459dfa79 100644 --- a/compiler/rustc_mir_transform/src/copy_prop.rs +++ b/compiler/rustc_mir_transform/src/copy_prop.rs @@ -130,7 +130,6 @@ impl<'tcx> MutVisitor<'tcx> for Replacer<'_, 'tcx> { PlaceContext::NonMutatingUse( NonMutatingUseContext::SharedBorrow | NonMutatingUseContext::ShallowBorrow - | NonMutatingUseContext::UniqueBorrow | NonMutatingUseContext::AddressOf, ) => true, // For debuginfo, merging locals is ok. diff --git a/compiler/rustc_mir_transform/src/elaborate_drops.rs b/compiler/rustc_mir_transform/src/elaborate_drops.rs index 98e7a519c20..fda0e1023f7 100644 --- a/compiler/rustc_mir_transform/src/elaborate_drops.rs +++ b/compiler/rustc_mir_transform/src/elaborate_drops.rs @@ -14,7 +14,7 @@ use rustc_mir_dataflow::un_derefer::UnDerefer; use rustc_mir_dataflow::MoveDataParamEnv; use rustc_mir_dataflow::{on_all_children_bits, on_all_drop_children_bits}; use rustc_mir_dataflow::{Analysis, ResultsCursor}; -use rustc_span::{DesugaringKind, Span}; +use rustc_span::Span; use rustc_target::abi::{FieldIdx, VariantIdx}; use std::fmt; @@ -401,7 +401,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { let terminator = data.terminator(); match terminator.kind { - TerminatorKind::Drop { mut place, target, unwind } => { + TerminatorKind::Drop { mut place, target, unwind, replace } => { if let Some(new_place) = self.un_derefer.derefer(place.as_ref(), self.body) { place = new_place; } @@ -434,10 +434,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { ) } LookupResult::Parent(..) => { - if !matches!( - terminator.source_info.span.desugaring_kind(), - Some(DesugaringKind::Replace), - ) { + if !replace { self.tcx.sess.delay_span_bug( terminator.source_info.span, format!("drop of untracked value {:?}", bb), diff --git a/compiler/rustc_mir_transform/src/function_item_references.rs b/compiler/rustc_mir_transform/src/function_item_references.rs index 5989dbebf2d..b1c9c4acc40 100644 --- a/compiler/rustc_mir_transform/src/function_item_references.rs +++ b/compiler/rustc_mir_transform/src/function_item_references.rs @@ -83,7 +83,7 @@ impl<'tcx> FunctionItemRefChecker<'_, 'tcx> { // If the inner type matches the type bound by `Pointer` if inner_ty == bound_ty { // Do a substitution using the parameters from the callsite - let subst_ty = EarlyBinder(inner_ty).subst(self.tcx, substs_ref); + let subst_ty = EarlyBinder::bind(inner_ty).subst(self.tcx, substs_ref); if let Some((fn_id, fn_substs)) = FunctionItemRefChecker::is_fn_ref(subst_ty) { diff --git a/compiler/rustc_mir_transform/src/generator.rs b/compiler/rustc_mir_transform/src/generator.rs index 891e446942e..89567ed0ab8 100644 --- a/compiler/rustc_mir_transform/src/generator.rs +++ b/compiler/rustc_mir_transform/src/generator.rs @@ -1045,7 +1045,10 @@ fn elaborate_generator_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { for (block, block_data) in body.basic_blocks.iter_enumerated() { let (target, unwind, source_info) = match block_data.terminator() { - Terminator { source_info, kind: TerminatorKind::Drop { place, target, unwind } } => { + Terminator { + source_info, + kind: TerminatorKind::Drop { place, target, unwind, replace: _ }, + } => { if let Some(local) = place.as_local() { if local == SELF_ARG { (target, unwind, source_info) @@ -1304,6 +1307,7 @@ fn insert_clean_drop(body: &mut Body<'_>) -> BasicBlock { place: Place::from(SELF_ARG), target: return_block, unwind: UnwindAction::Continue, + replace: false, }; let source_info = SourceInfo::outermost(body.span); diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs index 1748b1bf4a0..55b9f084c39 100644 --- a/compiler/rustc_mir_transform/src/inline.rs +++ b/compiler/rustc_mir_transform/src/inline.rs @@ -192,7 +192,7 @@ impl<'tcx> Inliner<'tcx> { let Ok(callee_body) = callsite.callee.try_subst_mir_and_normalize_erasing_regions( self.tcx, self.param_env, - ty::EarlyBinder(callee_body.clone()), + ty::EarlyBinder::bind(callee_body.clone()), ) else { return Err("failed to normalize callee body"); }; @@ -449,16 +449,16 @@ impl<'tcx> Inliner<'tcx> { checker.visit_basic_block_data(bb, blk); let term = blk.terminator(); - if let TerminatorKind::Drop { ref place, target, unwind } = term.kind { + if let TerminatorKind::Drop { ref place, target, unwind, replace: _ } = term.kind { work_list.push(target); // If the place doesn't actually need dropping, treat it like a regular goto. let ty = callsite .callee - .subst_mir(self.tcx, ty::EarlyBinder(&place.ty(callee_body, tcx).ty)); + .subst_mir(self.tcx, ty::EarlyBinder::bind(&place.ty(callee_body, tcx).ty)); if ty.needs_drop(tcx, self.param_env) && let UnwindAction::Cleanup(unwind) = unwind { - work_list.push(unwind); - } + work_list.push(unwind); + } } else if callee_attrs.instruction_set != self.codegen_fn_attrs.instruction_set && matches!(term.kind, TerminatorKind::InlineAsm { .. }) { @@ -790,7 +790,7 @@ impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> { // If the place doesn't actually need dropping, treat it like a regular goto. let ty = self .instance - .subst_mir(tcx, ty::EarlyBinder(&place.ty(self.callee_body, tcx).ty)); + .subst_mir(tcx, ty::EarlyBinder::bind(&place.ty(self.callee_body, tcx).ty)); if ty.needs_drop(tcx, self.param_env) { self.cost += CALL_PENALTY; if let UnwindAction::Cleanup(_) = unwind { @@ -801,7 +801,7 @@ impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> { } } TerminatorKind::Call { func: Operand::Constant(ref f), unwind, .. } => { - let fn_ty = self.instance.subst_mir(tcx, ty::EarlyBinder(&f.literal.ty())); + let fn_ty = self.instance.subst_mir(tcx, ty::EarlyBinder::bind(&f.literal.ty())); self.cost += if let ty::FnDef(def_id, _) = *fn_ty.kind() && tcx.is_intrinsic(def_id) { // Don't give intrinsics the extra penalty for calls INSTR_COST diff --git a/compiler/rustc_mir_transform/src/inline/cycle.rs b/compiler/rustc_mir_transform/src/inline/cycle.rs index 1ccf06f6153..8a10445f837 100644 --- a/compiler/rustc_mir_transform/src/inline/cycle.rs +++ b/compiler/rustc_mir_transform/src/inline/cycle.rs @@ -47,7 +47,7 @@ pub(crate) fn mir_callgraph_reachable<'tcx>( let Ok(substs) = caller.try_subst_mir_and_normalize_erasing_regions( tcx, param_env, - ty::EarlyBinder(substs), + ty::EarlyBinder::bind(substs), ) else { trace!(?caller, ?param_env, ?substs, "cannot normalize, skipping"); continue; diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs index 65864dc016f..54c138b6fbd 100644 --- a/compiler/rustc_mir_transform/src/lib.rs +++ b/compiler/rustc_mir_transform/src/lib.rs @@ -3,6 +3,7 @@ #![deny(rustc::diagnostic_outside_of_impl)] #![feature(box_patterns)] #![feature(drain_filter)] +#![feature(is_sorted)] #![feature(let_chains)] #![feature(map_try_insert)] #![feature(min_specialization)] @@ -84,6 +85,7 @@ mod match_branches; mod multiple_return_terminators; mod normalize_array_len; mod nrvo; +mod prettify; mod ref_prop; mod remove_noop_landing_pads; mod remove_storage_markers; @@ -581,6 +583,9 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { &large_enums::EnumSizeOpt { discrepancy: 128 }, // Some cleanup necessary at least for LLVM and potentially other codegen backends. &add_call_guards::CriticalCallEdges, + // Cleanup for human readability, off by default. + &prettify::ReorderBasicBlocks, + &prettify::ReorderLocals, // Dump the end result for testing and debugging purposes. &dump_mir::Marker("PreCodegen"), ], diff --git a/compiler/rustc_mir_transform/src/match_branches.rs b/compiler/rustc_mir_transform/src/match_branches.rs index 59942dc76f9..6eb48498274 100644 --- a/compiler/rustc_mir_transform/src/match_branches.rs +++ b/compiler/rustc_mir_transform/src/match_branches.rs @@ -41,7 +41,7 @@ pub struct MatchBranchSimplification; impl<'tcx> MirPass<'tcx> for MatchBranchSimplification { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { - sess.mir_opt_level() >= 3 + sess.mir_opt_level() >= 1 } fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { @@ -62,7 +62,12 @@ impl<'tcx> MirPass<'tcx> for MatchBranchSimplification { .. } if targets.iter().len() == 1 => { let (value, target) = targets.iter().next().unwrap(); - if target == targets.otherwise() { + // We require that this block and the two possible target blocks all be + // distinct. + if target == targets.otherwise() + || bb_idx == target + || bb_idx == targets.otherwise() + { continue; } (discr, value, target, targets.otherwise()) diff --git a/compiler/rustc_mir_transform/src/prettify.rs b/compiler/rustc_mir_transform/src/prettify.rs new file mode 100644 index 00000000000..6f46974ea00 --- /dev/null +++ b/compiler/rustc_mir_transform/src/prettify.rs @@ -0,0 +1,150 @@ +//! These two passes provide no value to the compiler, so are off at every level. +//! +//! However, they can be enabled on the command line +//! (`-Zmir-enable-passes=+ReorderBasicBlocks,+ReorderLocals`) +//! to make the MIR easier to read for humans. + +use crate::MirPass; +use rustc_index::{bit_set::BitSet, IndexSlice, IndexVec}; +use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor}; +use rustc_middle::mir::*; +use rustc_middle::ty::TyCtxt; +use rustc_session::Session; + +/// Rearranges the basic blocks into a *reverse post-order*. +/// +/// Thus after this pass, all the successors of a block are later than it in the +/// `IndexVec`, unless that successor is a back-edge (such as from a loop). +pub struct ReorderBasicBlocks; + +impl<'tcx> MirPass<'tcx> for ReorderBasicBlocks { + fn is_enabled(&self, _session: &Session) -> bool { + false + } + + fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { + let rpo: IndexVec<BasicBlock, BasicBlock> = + body.basic_blocks.postorder().iter().copied().rev().collect(); + if rpo.iter().is_sorted() { + return; + } + + let mut updater = BasicBlockUpdater { map: rpo.invert_bijective_mapping(), tcx }; + debug_assert_eq!(updater.map[START_BLOCK], START_BLOCK); + updater.visit_body(body); + + permute(body.basic_blocks.as_mut(), &updater.map); + } +} + +/// Rearranges the locals into *use* order. +/// +/// Thus after this pass, a local with a smaller [`Location`] where it was first +/// assigned or referenced will have a smaller number. +/// +/// (Does not reorder arguments nor the [`RETURN_PLACE`].) +pub struct ReorderLocals; + +impl<'tcx> MirPass<'tcx> for ReorderLocals { + fn is_enabled(&self, _session: &Session) -> bool { + false + } + + fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { + let mut finder = + LocalFinder { map: IndexVec::new(), seen: BitSet::new_empty(body.local_decls.len()) }; + + // We can't reorder the return place or the arguments + for local in (0..=body.arg_count).map(Local::from_usize) { + finder.track(local); + } + + for (bb, bbd) in body.basic_blocks.iter_enumerated() { + finder.visit_basic_block_data(bb, bbd); + } + + // track everything in case there are some locals that we never saw, + // such as in non-block things like debug info or in non-uses. + for local in body.local_decls.indices() { + finder.track(local); + } + + if finder.map.iter().is_sorted() { + return; + } + + let mut updater = LocalUpdater { map: finder.map.invert_bijective_mapping(), tcx }; + + for local in (0..=body.arg_count).map(Local::from_usize) { + debug_assert_eq!(updater.map[local], local); + } + + updater.visit_body_preserves_cfg(body); + + permute(&mut body.local_decls, &updater.map); + } +} + +fn permute<I: rustc_index::Idx + Ord, T>(data: &mut IndexVec<I, T>, map: &IndexSlice<I, I>) { + // FIXME: It would be nice to have a less-awkward way to apply permutations, + // but I don't know one that exists. `sort_by_cached_key` has logic for it + // internally, but not in a way that we're allowed to use here. + let mut enumerated: Vec<_> = std::mem::take(data).into_iter_enumerated().collect(); + enumerated.sort_by_key(|p| map[p.0]); + *data = enumerated.into_iter().map(|p| p.1).collect(); +} + +struct BasicBlockUpdater<'tcx> { + map: IndexVec<BasicBlock, BasicBlock>, + tcx: TyCtxt<'tcx>, +} + +impl<'tcx> MutVisitor<'tcx> for BasicBlockUpdater<'tcx> { + fn tcx(&self) -> TyCtxt<'tcx> { + self.tcx + } + + fn visit_terminator(&mut self, terminator: &mut Terminator<'tcx>, _location: Location) { + for succ in terminator.successors_mut() { + *succ = self.map[*succ]; + } + } +} + +struct LocalFinder { + map: IndexVec<Local, Local>, + seen: BitSet<Local>, +} + +impl LocalFinder { + fn track(&mut self, l: Local) { + if self.seen.insert(l) { + self.map.push(l); + } + } +} + +impl<'tcx> Visitor<'tcx> for LocalFinder { + fn visit_local(&mut self, l: Local, context: PlaceContext, _location: Location) { + // Exclude non-uses to keep `StorageLive` from controlling where we put + // a `Local`, since it might not actually be assigned until much later. + if context.is_use() { + self.track(l); + } + } +} + +struct LocalUpdater<'tcx> { + pub map: IndexVec<Local, Local>, + pub tcx: TyCtxt<'tcx>, +} + +impl<'tcx> MutVisitor<'tcx> for LocalUpdater<'tcx> { + fn tcx(&self) -> TyCtxt<'tcx> { + self.tcx + } + + fn visit_local(&mut self, l: &mut Local, _: PlaceContext, _: Location) { + *l = self.map[*l]; + } +} diff --git a/compiler/rustc_mir_transform/src/required_consts.rs b/compiler/rustc_mir_transform/src/required_consts.rs index 0ea8f2ba93f..243cb463560 100644 --- a/compiler/rustc_mir_transform/src/required_consts.rs +++ b/compiler/rustc_mir_transform/src/required_consts.rs @@ -17,8 +17,8 @@ impl<'tcx> Visitor<'tcx> for RequiredConstsVisitor<'_, 'tcx> { let literal = constant.literal; match literal { ConstantKind::Ty(c) => match c.kind() { - ConstKind::Param(_) | ConstKind::Error(_) => {} - _ => bug!("only ConstKind::Param should be encountered here, got {:#?}", c), + ConstKind::Param(_) | ConstKind::Error(_) | ConstKind::Value(_) => {} + _ => bug!("only ConstKind::Param/Value should be encountered here, got {:#?}", c), }, ConstantKind::Unevaluated(..) => self.required_consts.push(*constant), ConstantKind::Val(..) => {} diff --git a/compiler/rustc_mir_transform/src/shim.rs b/compiler/rustc_mir_transform/src/shim.rs index 7c47d8814db..ae726dea944 100644 --- a/compiler/rustc_mir_transform/src/shim.rs +++ b/compiler/rustc_mir_transform/src/shim.rs @@ -69,7 +69,7 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> Body<' // of this function. Is this intentional? if let Some(ty::Generator(gen_def_id, substs, _)) = ty.map(Ty::kind) { let body = tcx.optimized_mir(*gen_def_id).generator_drop().unwrap(); - let body = EarlyBinder(body.clone()).subst(tcx, substs); + let body = EarlyBinder::bind(body.clone()).subst(tcx, substs); debug!("make_shim({:?}) = {:?}", instance, body); return body; } @@ -544,6 +544,7 @@ impl<'tcx> CloneShimBuilder<'tcx> { place: dest_field, target: unwind, unwind: UnwindAction::Terminate, + replace: false, }, true, ); @@ -643,8 +644,11 @@ fn build_call_shim<'tcx>( let sig = sig.map_bound(|sig| tcx.erase_late_bound_regions(sig)); assert_eq!(sig_substs.is_some(), !instance.has_polymorphic_mir_body()); - let mut sig = - if let Some(sig_substs) = sig_substs { sig.subst(tcx, &sig_substs) } else { sig.0 }; + let mut sig = if let Some(sig_substs) = sig_substs { + sig.subst(tcx, &sig_substs) + } else { + sig.skip_binder() + }; if let CallKind::Indirect(fnty) = call_kind { // `sig` determines our local decls, and thus the callee type in the `Call` terminator. This @@ -800,6 +804,7 @@ fn build_call_shim<'tcx>( place: rcvr_place(), target: BasicBlock::new(2), unwind: UnwindAction::Continue, + replace: false, }, false, ); @@ -815,6 +820,7 @@ fn build_call_shim<'tcx>( place: rcvr_place(), target: BasicBlock::new(4), unwind: UnwindAction::Terminate, + replace: false, }, true, ); diff --git a/compiler/rustc_mir_transform/src/ssa.rs b/compiler/rustc_mir_transform/src/ssa.rs index e8e4246b797..7a0d3a025f3 100644 --- a/compiler/rustc_mir_transform/src/ssa.rs +++ b/compiler/rustc_mir_transform/src/ssa.rs @@ -216,7 +216,6 @@ impl<'tcx> Visitor<'tcx> for SsaVisitor<'_> { PlaceContext::NonMutatingUse( NonMutatingUseContext::SharedBorrow | NonMutatingUseContext::ShallowBorrow - | NonMutatingUseContext::UniqueBorrow | NonMutatingUseContext::AddressOf, ) | PlaceContext::MutatingUse(_) => { diff --git a/compiler/rustc_monomorphize/src/collector.rs b/compiler/rustc_monomorphize/src/collector.rs index 35b154b7b34..cefa64d27ac 100644 --- a/compiler/rustc_monomorphize/src/collector.rs +++ b/compiler/rustc_monomorphize/src/collector.rs @@ -677,7 +677,7 @@ impl<'a, 'tcx> MirNeighborCollector<'a, 'tcx> { self.instance.subst_mir_and_normalize_erasing_regions( self.tcx, ty::ParamEnv::reveal_all(), - ty::EarlyBinder(value), + ty::EarlyBinder::bind(value), ) } } diff --git a/compiler/rustc_monomorphize/src/partitioning.rs b/compiler/rustc_monomorphize/src/partitioning.rs new file mode 100644 index 00000000000..be9c349c384 --- /dev/null +++ b/compiler/rustc_monomorphize/src/partitioning.rs @@ -0,0 +1,1182 @@ +//! Partitioning Codegen Units for Incremental Compilation +//! ====================================================== +//! +//! The task of this module is to take the complete set of monomorphizations of +//! a crate and produce a set of codegen units from it, where a codegen unit +//! is a named set of (mono-item, linkage) pairs. That is, this module +//! decides which monomorphization appears in which codegen units with which +//! linkage. The following paragraphs describe some of the background on the +//! partitioning scheme. +//! +//! The most important opportunity for saving on compilation time with +//! incremental compilation is to avoid re-codegenning and re-optimizing code. +//! Since the unit of codegen and optimization for LLVM is "modules" or, how +//! we call them "codegen units", the particulars of how much time can be saved +//! by incremental compilation are tightly linked to how the output program is +//! partitioned into these codegen units prior to passing it to LLVM -- +//! especially because we have to treat codegen units as opaque entities once +//! they are created: There is no way for us to incrementally update an existing +//! LLVM module and so we have to build any such module from scratch if it was +//! affected by some change in the source code. +//! +//! From that point of view it would make sense to maximize the number of +//! codegen units by, for example, putting each function into its own module. +//! That way only those modules would have to be re-compiled that were actually +//! affected by some change, minimizing the number of functions that could have +//! been re-used but just happened to be located in a module that is +//! re-compiled. +//! +//! However, since LLVM optimization does not work across module boundaries, +//! using such a highly granular partitioning would lead to very slow runtime +//! code since it would effectively prohibit inlining and other inter-procedure +//! optimizations. We want to avoid that as much as possible. +//! +//! Thus we end up with a trade-off: The bigger the codegen units, the better +//! LLVM's optimizer can do its work, but also the smaller the compilation time +//! reduction we get from incremental compilation. +//! +//! Ideally, we would create a partitioning such that there are few big codegen +//! units with few interdependencies between them. For now though, we use the +//! following heuristic to determine the partitioning: +//! +//! - There are two codegen units for every source-level module: +//! - One for "stable", that is non-generic, code +//! - One for more "volatile" code, i.e., monomorphized instances of functions +//! defined in that module +//! +//! In order to see why this heuristic makes sense, let's take a look at when a +//! codegen unit can get invalidated: +//! +//! 1. The most straightforward case is when the BODY of a function or global +//! changes. Then any codegen unit containing the code for that item has to be +//! re-compiled. Note that this includes all codegen units where the function +//! has been inlined. +//! +//! 2. The next case is when the SIGNATURE of a function or global changes. In +//! this case, all codegen units containing a REFERENCE to that item have to be +//! re-compiled. This is a superset of case 1. +//! +//! 3. The final and most subtle case is when a REFERENCE to a generic function +//! is added or removed somewhere. Even though the definition of the function +//! might be unchanged, a new REFERENCE might introduce a new monomorphized +//! instance of this function which has to be placed and compiled somewhere. +//! Conversely, when removing a REFERENCE, it might have been the last one with +//! that particular set of generic arguments and thus we have to remove it. +//! +//! From the above we see that just using one codegen unit per source-level +//! module is not such a good idea, since just adding a REFERENCE to some +//! generic item somewhere else would invalidate everything within the module +//! containing the generic item. The heuristic above reduces this detrimental +//! side-effect of references a little by at least not touching the non-generic +//! code of the module. +//! +//! A Note on Inlining +//! ------------------ +//! As briefly mentioned above, in order for LLVM to be able to inline a +//! function call, the body of the function has to be available in the LLVM +//! module where the call is made. This has a few consequences for partitioning: +//! +//! - The partitioning algorithm has to take care of placing functions into all +//! codegen units where they should be available for inlining. It also has to +//! decide on the correct linkage for these functions. +//! +//! - The partitioning algorithm has to know which functions are likely to get +//! inlined, so it can distribute function instantiations accordingly. Since +//! there is no way of knowing for sure which functions LLVM will decide to +//! inline in the end, we apply a heuristic here: Only functions marked with +//! `#[inline]` are considered for inlining by the partitioner. The current +//! implementation will not try to determine if a function is likely to be +//! inlined by looking at the functions definition. +//! +//! Note though that as a side-effect of creating a codegen units per +//! source-level module, functions from the same module will be available for +//! inlining, even when they are not marked `#[inline]`. + +use std::cmp; +use std::collections::hash_map::Entry; +use std::fs::{self, File}; +use std::io::{BufWriter, Write}; +use std::path::{Path, PathBuf}; + +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_data_structures::sync; +use rustc_hir::def::DefKind; +use rustc_hir::def_id::{DefId, DefIdSet, LOCAL_CRATE}; +use rustc_hir::definitions::DefPathDataName; +use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; +use rustc_middle::middle::exported_symbols::{SymbolExportInfo, SymbolExportLevel}; +use rustc_middle::mir; +use rustc_middle::mir::mono::{ + CodegenUnit, CodegenUnitNameBuilder, InstantiationMode, Linkage, MonoItem, Visibility, +}; +use rustc_middle::query::Providers; +use rustc_middle::ty::print::{characteristic_def_id_of_type, with_no_trimmed_paths}; +use rustc_middle::ty::{self, visit::TypeVisitableExt, InstanceDef, TyCtxt}; +use rustc_session::config::{DumpMonoStatsFormat, SwitchWithOptPath}; +use rustc_span::symbol::Symbol; + +use crate::collector::InliningMap; +use crate::collector::{self, MonoItemCollectionMode}; +use crate::errors::{CouldntDumpMonoStats, SymbolAlreadyDefined, UnknownCguCollectionMode}; + +struct PartitioningCx<'a, 'tcx> { + tcx: TyCtxt<'tcx>, + target_cgu_count: usize, + inlining_map: &'a InliningMap<'tcx>, +} + +struct PlacedRootMonoItems<'tcx> { + codegen_units: Vec<CodegenUnit<'tcx>>, + roots: FxHashSet<MonoItem<'tcx>>, + internalization_candidates: FxHashSet<MonoItem<'tcx>>, +} + +fn partition<'tcx, I>( + tcx: TyCtxt<'tcx>, + mono_items: &mut I, + max_cgu_count: usize, + inlining_map: &InliningMap<'tcx>, +) -> Vec<CodegenUnit<'tcx>> +where + I: Iterator<Item = MonoItem<'tcx>>, +{ + let _prof_timer = tcx.prof.generic_activity("cgu_partitioning"); + + let cx = &PartitioningCx { tcx, target_cgu_count: max_cgu_count, inlining_map }; + // In the first step, we place all regular monomorphizations into their + // respective 'home' codegen unit. Regular monomorphizations are all + // functions and statics defined in the local crate. + let PlacedRootMonoItems { mut codegen_units, roots, internalization_candidates } = { + let _prof_timer = tcx.prof.generic_activity("cgu_partitioning_place_roots"); + place_root_mono_items(cx, mono_items) + }; + + for cgu in &mut codegen_units { + cgu.create_size_estimate(tcx); + } + + debug_dump(tcx, "INITIAL PARTITIONING", &codegen_units); + + // Merge until we have at most `max_cgu_count` codegen units. + // `merge_codegen_units` is responsible for updating the CGU size + // estimates. + { + let _prof_timer = tcx.prof.generic_activity("cgu_partitioning_merge_cgus"); + merge_codegen_units(cx, &mut codegen_units); + debug_dump(tcx, "POST MERGING", &codegen_units); + } + + // In the next step, we use the inlining map to determine which additional + // monomorphizations have to go into each codegen unit. These additional + // monomorphizations can be drop-glue, functions from external crates, and + // local functions the definition of which is marked with `#[inline]`. + let mono_item_placements = { + let _prof_timer = tcx.prof.generic_activity("cgu_partitioning_place_inline_items"); + place_inlined_mono_items(cx, &mut codegen_units, roots) + }; + + for cgu in &mut codegen_units { + cgu.create_size_estimate(tcx); + } + + debug_dump(tcx, "POST INLINING", &codegen_units); + + // Next we try to make as many symbols "internal" as possible, so LLVM has + // more freedom to optimize. + if !tcx.sess.link_dead_code() { + let _prof_timer = tcx.prof.generic_activity("cgu_partitioning_internalize_symbols"); + internalize_symbols( + cx, + &mut codegen_units, + mono_item_placements, + internalization_candidates, + ); + } + + let instrument_dead_code = + tcx.sess.instrument_coverage() && !tcx.sess.instrument_coverage_except_unused_functions(); + + if instrument_dead_code { + assert!( + codegen_units.len() > 0, + "There must be at least one CGU that code coverage data can be generated in." + ); + + // Find the smallest CGU that has exported symbols and put the dead + // function stubs in that CGU. We look for exported symbols to increase + // the likelihood the linker won't throw away the dead functions. + // FIXME(#92165): In order to truly resolve this, we need to make sure + // the object file (CGU) containing the dead function stubs is included + // in the final binary. This will probably require forcing these + // function symbols to be included via `-u` or `/include` linker args. + let mut cgus: Vec<_> = codegen_units.iter_mut().collect(); + cgus.sort_by_key(|cgu| cgu.size_estimate()); + + let dead_code_cgu = + if let Some(cgu) = cgus.into_iter().rev().find(|cgu| { + cgu.items().iter().any(|(_, (linkage, _))| *linkage == Linkage::External) + }) { + cgu + } else { + // If there are no CGUs that have externally linked items, + // then we just pick the first CGU as a fallback. + &mut codegen_units[0] + }; + dead_code_cgu.make_code_coverage_dead_code_cgu(); + } + + // Finally, sort by codegen unit name, so that we get deterministic results. + codegen_units.sort_by(|a, b| a.name().as_str().cmp(b.name().as_str())); + + debug_dump(tcx, "FINAL", &codegen_units); + + codegen_units +} + +fn place_root_mono_items<'tcx, I>( + cx: &PartitioningCx<'_, 'tcx>, + mono_items: &mut I, +) -> PlacedRootMonoItems<'tcx> +where + I: Iterator<Item = MonoItem<'tcx>>, +{ + let mut roots = FxHashSet::default(); + let mut codegen_units = FxHashMap::default(); + let is_incremental_build = cx.tcx.sess.opts.incremental.is_some(); + let mut internalization_candidates = FxHashSet::default(); + + // Determine if monomorphizations instantiated in this crate will be made + // available to downstream crates. This depends on whether we are in + // share-generics mode and whether the current crate can even have + // downstream crates. + let export_generics = + cx.tcx.sess.opts.share_generics() && cx.tcx.local_crate_exports_generics(); + + let cgu_name_builder = &mut CodegenUnitNameBuilder::new(cx.tcx); + let cgu_name_cache = &mut FxHashMap::default(); + + for mono_item in mono_items { + match mono_item.instantiation_mode(cx.tcx) { + InstantiationMode::GloballyShared { .. } => {} + InstantiationMode::LocalCopy => continue, + } + + let characteristic_def_id = characteristic_def_id_of_mono_item(cx.tcx, mono_item); + let is_volatile = is_incremental_build && mono_item.is_generic_fn(); + + let codegen_unit_name = match characteristic_def_id { + Some(def_id) => compute_codegen_unit_name( + cx.tcx, + cgu_name_builder, + def_id, + is_volatile, + cgu_name_cache, + ), + None => fallback_cgu_name(cgu_name_builder), + }; + + let codegen_unit = codegen_units + .entry(codegen_unit_name) + .or_insert_with(|| CodegenUnit::new(codegen_unit_name)); + + let mut can_be_internalized = true; + let (linkage, visibility) = mono_item_linkage_and_visibility( + cx.tcx, + &mono_item, + &mut can_be_internalized, + export_generics, + ); + if visibility == Visibility::Hidden && can_be_internalized { + internalization_candidates.insert(mono_item); + } + + codegen_unit.items_mut().insert(mono_item, (linkage, visibility)); + roots.insert(mono_item); + } + + // Always ensure we have at least one CGU; otherwise, if we have a + // crate with just types (for example), we could wind up with no CGU. + if codegen_units.is_empty() { + let codegen_unit_name = fallback_cgu_name(cgu_name_builder); + codegen_units.insert(codegen_unit_name, CodegenUnit::new(codegen_unit_name)); + } + + let codegen_units = codegen_units.into_values().collect(); + PlacedRootMonoItems { codegen_units, roots, internalization_candidates } +} + +fn merge_codegen_units<'tcx>( + cx: &PartitioningCx<'_, 'tcx>, + codegen_units: &mut Vec<CodegenUnit<'tcx>>, +) { + assert!(cx.target_cgu_count >= 1); + + // Note that at this point in time the `codegen_units` here may not be + // in a deterministic order (but we know they're deterministically the + // same set). We want this merging to produce a deterministic ordering + // of codegen units from the input. + // + // Due to basically how we've implemented the merging below (merge the + // two smallest into each other) we're sure to start off with a + // deterministic order (sorted by name). This'll mean that if two cgus + // have the same size the stable sort below will keep everything nice + // and deterministic. + codegen_units.sort_by(|a, b| a.name().as_str().cmp(b.name().as_str())); + + // This map keeps track of what got merged into what. + let mut cgu_contents: FxHashMap<Symbol, Vec<Symbol>> = + codegen_units.iter().map(|cgu| (cgu.name(), vec![cgu.name()])).collect(); + + // Merge the two smallest codegen units until the target size is + // reached. + while codegen_units.len() > cx.target_cgu_count { + // Sort small cgus to the back + codegen_units.sort_by_cached_key(|cgu| cmp::Reverse(cgu.size_estimate())); + let mut smallest = codegen_units.pop().unwrap(); + let second_smallest = codegen_units.last_mut().unwrap(); + + // Move the mono-items from `smallest` to `second_smallest` + second_smallest.modify_size_estimate(smallest.size_estimate()); + for (k, v) in smallest.items_mut().drain() { + second_smallest.items_mut().insert(k, v); + } + + // Record that `second_smallest` now contains all the stuff that was + // in `smallest` before. + let mut consumed_cgu_names = cgu_contents.remove(&smallest.name()).unwrap(); + cgu_contents.get_mut(&second_smallest.name()).unwrap().append(&mut consumed_cgu_names); + + debug!( + "CodegenUnit {} merged into CodegenUnit {}", + smallest.name(), + second_smallest.name() + ); + } + + let cgu_name_builder = &mut CodegenUnitNameBuilder::new(cx.tcx); + + if cx.tcx.sess.opts.incremental.is_some() { + // If we are doing incremental compilation, we want CGU names to + // reflect the path of the source level module they correspond to. + // For CGUs that contain the code of multiple modules because of the + // merging done above, we use a concatenation of the names of all + // contained CGUs. + let new_cgu_names: FxHashMap<Symbol, String> = cgu_contents + .into_iter() + // This `filter` makes sure we only update the name of CGUs that + // were actually modified by merging. + .filter(|(_, cgu_contents)| cgu_contents.len() > 1) + .map(|(current_cgu_name, cgu_contents)| { + let mut cgu_contents: Vec<&str> = cgu_contents.iter().map(|s| s.as_str()).collect(); + + // Sort the names, so things are deterministic and easy to + // predict. We are sorting primitive `&str`s here so we can + // use unstable sort. + cgu_contents.sort_unstable(); + + (current_cgu_name, cgu_contents.join("--")) + }) + .collect(); + + for cgu in codegen_units.iter_mut() { + if let Some(new_cgu_name) = new_cgu_names.get(&cgu.name()) { + if cx.tcx.sess.opts.unstable_opts.human_readable_cgu_names { + cgu.set_name(Symbol::intern(&new_cgu_name)); + } else { + // If we don't require CGU names to be human-readable, + // we use a fixed length hash of the composite CGU name + // instead. + let new_cgu_name = CodegenUnit::mangle_name(&new_cgu_name); + cgu.set_name(Symbol::intern(&new_cgu_name)); + } + } + } + } else { + // If we are compiling non-incrementally we just generate simple CGU + // names containing an index. + for (index, cgu) in codegen_units.iter_mut().enumerate() { + let numbered_codegen_unit_name = + cgu_name_builder.build_cgu_name_no_mangle(LOCAL_CRATE, &["cgu"], Some(index)); + cgu.set_name(numbered_codegen_unit_name); + } + } +} + +/// For symbol internalization, we need to know whether a symbol/mono-item is +/// accessed from outside the codegen unit it is defined in. This type is used +/// to keep track of that. +#[derive(Clone, PartialEq, Eq, Debug)] +enum MonoItemPlacement { + SingleCgu { cgu_name: Symbol }, + MultipleCgus, +} + +fn place_inlined_mono_items<'tcx>( + cx: &PartitioningCx<'_, 'tcx>, + codegen_units: &mut [CodegenUnit<'tcx>], + roots: FxHashSet<MonoItem<'tcx>>, +) -> FxHashMap<MonoItem<'tcx>, MonoItemPlacement> { + let mut mono_item_placements = FxHashMap::default(); + + let single_codegen_unit = codegen_units.len() == 1; + + for old_codegen_unit in codegen_units.iter_mut() { + // Collect all items that need to be available in this codegen unit. + let mut reachable = FxHashSet::default(); + for root in old_codegen_unit.items().keys() { + follow_inlining(*root, cx.inlining_map, &mut reachable); + } + + let mut new_codegen_unit = CodegenUnit::new(old_codegen_unit.name()); + + // Add all monomorphizations that are not already there. + for mono_item in reachable { + if let Some(linkage) = old_codegen_unit.items().get(&mono_item) { + // This is a root, just copy it over. + new_codegen_unit.items_mut().insert(mono_item, *linkage); + } else { + if roots.contains(&mono_item) { + bug!( + "GloballyShared mono-item inlined into other CGU: \ + {:?}", + mono_item + ); + } + + // This is a CGU-private copy. + new_codegen_unit + .items_mut() + .insert(mono_item, (Linkage::Internal, Visibility::Default)); + } + + if !single_codegen_unit { + // If there is more than one codegen unit, we need to keep track + // in which codegen units each monomorphization is placed. + match mono_item_placements.entry(mono_item) { + Entry::Occupied(e) => { + let placement = e.into_mut(); + debug_assert!(match *placement { + MonoItemPlacement::SingleCgu { cgu_name } => { + cgu_name != new_codegen_unit.name() + } + MonoItemPlacement::MultipleCgus => true, + }); + *placement = MonoItemPlacement::MultipleCgus; + } + Entry::Vacant(e) => { + e.insert(MonoItemPlacement::SingleCgu { + cgu_name: new_codegen_unit.name(), + }); + } + } + } + } + + *old_codegen_unit = new_codegen_unit; + } + + return mono_item_placements; + + fn follow_inlining<'tcx>( + mono_item: MonoItem<'tcx>, + inlining_map: &InliningMap<'tcx>, + visited: &mut FxHashSet<MonoItem<'tcx>>, + ) { + if !visited.insert(mono_item) { + return; + } + + inlining_map.with_inlining_candidates(mono_item, |target| { + follow_inlining(target, inlining_map, visited); + }); + } +} + +fn internalize_symbols<'tcx>( + cx: &PartitioningCx<'_, 'tcx>, + codegen_units: &mut [CodegenUnit<'tcx>], + mono_item_placements: FxHashMap<MonoItem<'tcx>, MonoItemPlacement>, + internalization_candidates: FxHashSet<MonoItem<'tcx>>, +) { + if codegen_units.len() == 1 { + // Fast path for when there is only one codegen unit. In this case we + // can internalize all candidates, since there is nowhere else they + // could be accessed from. + for cgu in codegen_units { + for candidate in &internalization_candidates { + cgu.items_mut().insert(*candidate, (Linkage::Internal, Visibility::Default)); + } + } + + return; + } + + // Build a map from every monomorphization to all the monomorphizations that + // reference it. + let mut accessor_map: FxHashMap<MonoItem<'tcx>, Vec<MonoItem<'tcx>>> = Default::default(); + cx.inlining_map.iter_accesses(|accessor, accessees| { + for accessee in accessees { + accessor_map.entry(*accessee).or_default().push(accessor); + } + }); + + // For each internalization candidates in each codegen unit, check if it is + // accessed from outside its defining codegen unit. + for cgu in codegen_units { + let home_cgu = MonoItemPlacement::SingleCgu { cgu_name: cgu.name() }; + + for (accessee, linkage_and_visibility) in cgu.items_mut() { + if !internalization_candidates.contains(accessee) { + // This item is no candidate for internalizing, so skip it. + continue; + } + debug_assert_eq!(mono_item_placements[accessee], home_cgu); + + if let Some(accessors) = accessor_map.get(accessee) { + if accessors + .iter() + .filter_map(|accessor| { + // Some accessors might not have been + // instantiated. We can safely ignore those. + mono_item_placements.get(accessor) + }) + .any(|placement| *placement != home_cgu) + { + // Found an accessor from another CGU, so skip to the next + // item without marking this one as internal. + continue; + } + } + + // If we got here, we did not find any accesses from other CGUs, + // so it's fine to make this monomorphization internal. + *linkage_and_visibility = (Linkage::Internal, Visibility::Default); + } + } +} + +fn characteristic_def_id_of_mono_item<'tcx>( + tcx: TyCtxt<'tcx>, + mono_item: MonoItem<'tcx>, +) -> Option<DefId> { + match mono_item { + MonoItem::Fn(instance) => { + let def_id = match instance.def { + ty::InstanceDef::Item(def) => def, + ty::InstanceDef::VTableShim(..) + | ty::InstanceDef::ReifyShim(..) + | ty::InstanceDef::FnPtrShim(..) + | ty::InstanceDef::ClosureOnceShim { .. } + | ty::InstanceDef::Intrinsic(..) + | ty::InstanceDef::DropGlue(..) + | ty::InstanceDef::Virtual(..) + | ty::InstanceDef::CloneShim(..) + | ty::InstanceDef::ThreadLocalShim(..) + | ty::InstanceDef::FnPtrAddrShim(..) => return None, + }; + + // If this is a method, we want to put it into the same module as + // its self-type. If the self-type does not provide a characteristic + // DefId, we use the location of the impl after all. + + if tcx.trait_of_item(def_id).is_some() { + let self_ty = instance.substs.type_at(0); + // This is a default implementation of a trait method. + return characteristic_def_id_of_type(self_ty).or(Some(def_id)); + } + + if let Some(impl_def_id) = tcx.impl_of_method(def_id) { + if tcx.sess.opts.incremental.is_some() + && tcx.trait_id_of_impl(impl_def_id) == tcx.lang_items().drop_trait() + { + // Put `Drop::drop` into the same cgu as `drop_in_place` + // since `drop_in_place` is the only thing that can + // call it. + return None; + } + + // When polymorphization is enabled, methods which do not depend on their generic + // parameters, but the self-type of their impl block do will fail to normalize. + if !tcx.sess.opts.unstable_opts.polymorphize || !instance.has_param() { + // This is a method within an impl, find out what the self-type is: + let impl_self_ty = tcx.subst_and_normalize_erasing_regions( + instance.substs, + ty::ParamEnv::reveal_all(), + tcx.type_of(impl_def_id), + ); + if let Some(def_id) = characteristic_def_id_of_type(impl_self_ty) { + return Some(def_id); + } + } + } + + Some(def_id) + } + MonoItem::Static(def_id) => Some(def_id), + MonoItem::GlobalAsm(item_id) => Some(item_id.owner_id.to_def_id()), + } +} + +fn compute_codegen_unit_name( + tcx: TyCtxt<'_>, + name_builder: &mut CodegenUnitNameBuilder<'_>, + def_id: DefId, + volatile: bool, + cache: &mut CguNameCache, +) -> Symbol { + // Find the innermost module that is not nested within a function. + let mut current_def_id = def_id; + let mut cgu_def_id = None; + // Walk backwards from the item we want to find the module for. + loop { + if current_def_id.is_crate_root() { + if cgu_def_id.is_none() { + // If we have not found a module yet, take the crate root. + cgu_def_id = Some(def_id.krate.as_def_id()); + } + break; + } else if tcx.def_kind(current_def_id) == DefKind::Mod { + if cgu_def_id.is_none() { + cgu_def_id = Some(current_def_id); + } + } else { + // If we encounter something that is not a module, throw away + // any module that we've found so far because we now know that + // it is nested within something else. + cgu_def_id = None; + } + + current_def_id = tcx.parent(current_def_id); + } + + let cgu_def_id = cgu_def_id.unwrap(); + + *cache.entry((cgu_def_id, volatile)).or_insert_with(|| { + let def_path = tcx.def_path(cgu_def_id); + + let components = def_path.data.iter().map(|part| match part.data.name() { + DefPathDataName::Named(name) => name, + DefPathDataName::Anon { .. } => unreachable!(), + }); + + let volatile_suffix = volatile.then_some("volatile"); + + name_builder.build_cgu_name(def_path.krate, components, volatile_suffix) + }) +} + +// Anything we can't find a proper codegen unit for goes into this. +fn fallback_cgu_name(name_builder: &mut CodegenUnitNameBuilder<'_>) -> Symbol { + name_builder.build_cgu_name(LOCAL_CRATE, &["fallback"], Some("cgu")) +} + +fn mono_item_linkage_and_visibility<'tcx>( + tcx: TyCtxt<'tcx>, + mono_item: &MonoItem<'tcx>, + can_be_internalized: &mut bool, + export_generics: bool, +) -> (Linkage, Visibility) { + if let Some(explicit_linkage) = mono_item.explicit_linkage(tcx) { + return (explicit_linkage, Visibility::Default); + } + let vis = mono_item_visibility(tcx, mono_item, can_be_internalized, export_generics); + (Linkage::External, vis) +} + +type CguNameCache = FxHashMap<(DefId, bool), Symbol>; + +fn static_visibility<'tcx>( + tcx: TyCtxt<'tcx>, + can_be_internalized: &mut bool, + def_id: DefId, +) -> Visibility { + if tcx.is_reachable_non_generic(def_id) { + *can_be_internalized = false; + default_visibility(tcx, def_id, false) + } else { + Visibility::Hidden + } +} + +fn mono_item_visibility<'tcx>( + tcx: TyCtxt<'tcx>, + mono_item: &MonoItem<'tcx>, + can_be_internalized: &mut bool, + export_generics: bool, +) -> Visibility { + let instance = match mono_item { + // This is pretty complicated; see below. + MonoItem::Fn(instance) => instance, + + // Misc handling for generics and such, but otherwise: + MonoItem::Static(def_id) => return static_visibility(tcx, can_be_internalized, *def_id), + MonoItem::GlobalAsm(item_id) => { + return static_visibility(tcx, can_be_internalized, item_id.owner_id.to_def_id()); + } + }; + + let def_id = match instance.def { + InstanceDef::Item(def_id) | InstanceDef::DropGlue(def_id, Some(_)) => def_id, + + // We match the visibility of statics here + InstanceDef::ThreadLocalShim(def_id) => { + return static_visibility(tcx, can_be_internalized, def_id); + } + + // These are all compiler glue and such, never exported, always hidden. + InstanceDef::VTableShim(..) + | InstanceDef::ReifyShim(..) + | InstanceDef::FnPtrShim(..) + | InstanceDef::Virtual(..) + | InstanceDef::Intrinsic(..) + | InstanceDef::ClosureOnceShim { .. } + | InstanceDef::DropGlue(..) + | InstanceDef::CloneShim(..) + | InstanceDef::FnPtrAddrShim(..) => return Visibility::Hidden, + }; + + // The `start_fn` lang item is actually a monomorphized instance of a + // function in the standard library, used for the `main` function. We don't + // want to export it so we tag it with `Hidden` visibility but this symbol + // is only referenced from the actual `main` symbol which we unfortunately + // don't know anything about during partitioning/collection. As a result we + // forcibly keep this symbol out of the `internalization_candidates` set. + // + // FIXME: eventually we don't want to always force this symbol to have + // hidden visibility, it should indeed be a candidate for + // internalization, but we have to understand that it's referenced + // from the `main` symbol we'll generate later. + // + // This may be fixable with a new `InstanceDef` perhaps? Unsure! + if tcx.lang_items().start_fn() == Some(def_id) { + *can_be_internalized = false; + return Visibility::Hidden; + } + + let is_generic = instance.substs.non_erasable_generics().next().is_some(); + + // Upstream `DefId` instances get different handling than local ones. + let Some(def_id) = def_id.as_local() else { + return if export_generics && is_generic { + // If it is an upstream monomorphization and we export generics, we must make + // it available to downstream crates. + *can_be_internalized = false; + default_visibility(tcx, def_id, true) + } else { + Visibility::Hidden + }; + }; + + if is_generic { + if export_generics { + if tcx.is_unreachable_local_definition(def_id) { + // This instance cannot be used from another crate. + Visibility::Hidden + } else { + // This instance might be useful in a downstream crate. + *can_be_internalized = false; + default_visibility(tcx, def_id.to_def_id(), true) + } + } else { + // We are not exporting generics or the definition is not reachable + // for downstream crates, we can internalize its instantiations. + Visibility::Hidden + } + } else { + // If this isn't a generic function then we mark this a `Default` if + // this is a reachable item, meaning that it's a symbol other crates may + // access when they link to us. + if tcx.is_reachable_non_generic(def_id.to_def_id()) { + *can_be_internalized = false; + debug_assert!(!is_generic); + return default_visibility(tcx, def_id.to_def_id(), false); + } + + // If this isn't reachable then we're gonna tag this with `Hidden` + // visibility. In some situations though we'll want to prevent this + // symbol from being internalized. + // + // There's two categories of items here: + // + // * First is weak lang items. These are basically mechanisms for + // libcore to forward-reference symbols defined later in crates like + // the standard library or `#[panic_handler]` definitions. The + // definition of these weak lang items needs to be referencable by + // libcore, so we're no longer a candidate for internalization. + // Removal of these functions can't be done by LLVM but rather must be + // done by the linker as it's a non-local decision. + // + // * Second is "std internal symbols". Currently this is primarily used + // for allocator symbols. Allocators are a little weird in their + // implementation, but the idea is that the compiler, at the last + // minute, defines an allocator with an injected object file. The + // `alloc` crate references these symbols (`__rust_alloc`) and the + // definition doesn't get hooked up until a linked crate artifact is + // generated. + // + // The symbols synthesized by the compiler (`__rust_alloc`) are thin + // veneers around the actual implementation, some other symbol which + // implements the same ABI. These symbols (things like `__rg_alloc`, + // `__rdl_alloc`, `__rde_alloc`, etc), are all tagged with "std + // internal symbols". + // + // The std-internal symbols here **should not show up in a dll as an + // exported interface**, so they return `false` from + // `is_reachable_non_generic` above and we'll give them `Hidden` + // visibility below. Like the weak lang items, though, we can't let + // LLVM internalize them as this decision is left up to the linker to + // omit them, so prevent them from being internalized. + let attrs = tcx.codegen_fn_attrs(def_id); + if attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL) { + *can_be_internalized = false; + } + + Visibility::Hidden + } +} + +fn default_visibility(tcx: TyCtxt<'_>, id: DefId, is_generic: bool) -> Visibility { + if !tcx.sess.target.default_hidden_visibility { + return Visibility::Default; + } + + // Generic functions never have export-level C. + if is_generic { + return Visibility::Hidden; + } + + // Things with export level C don't get instantiated in + // downstream crates. + if !id.is_local() { + return Visibility::Hidden; + } + + // C-export level items remain at `Default`, all other internal + // items become `Hidden`. + match tcx.reachable_non_generics(id.krate).get(&id) { + Some(SymbolExportInfo { level: SymbolExportLevel::C, .. }) => Visibility::Default, + _ => Visibility::Hidden, + } +} +fn debug_dump<'a, 'tcx: 'a>(tcx: TyCtxt<'tcx>, label: &str, cgus: &[CodegenUnit<'tcx>]) { + let dump = move || { + use std::fmt::Write; + + let num_cgus = cgus.len(); + let max = cgus.iter().map(|cgu| cgu.size_estimate()).max().unwrap(); + let min = cgus.iter().map(|cgu| cgu.size_estimate()).min().unwrap(); + let ratio = max as f64 / min as f64; + + let s = &mut String::new(); + let _ = writeln!( + s, + "{label} ({num_cgus} CodegenUnits, max={max}, min={min}, max/min={ratio:.1}):" + ); + for cgu in cgus { + let _ = + writeln!(s, "CodegenUnit {} estimated size {}:", cgu.name(), cgu.size_estimate()); + + for (mono_item, linkage) in cgu.items() { + let symbol_name = mono_item.symbol_name(tcx).name; + let symbol_hash_start = symbol_name.rfind('h'); + let symbol_hash = symbol_hash_start.map_or("<no hash>", |i| &symbol_name[i..]); + + let _ = with_no_trimmed_paths!(writeln!( + s, + " - {} [{:?}] [{}] estimated size {}", + mono_item, + linkage, + symbol_hash, + mono_item.size_estimate(tcx) + )); + } + + let _ = writeln!(s); + } + + std::mem::take(s) + }; + + debug!("{}", dump()); +} + +#[inline(never)] // give this a place in the profiler +fn assert_symbols_are_distinct<'a, 'tcx, I>(tcx: TyCtxt<'tcx>, mono_items: I) +where + I: Iterator<Item = &'a MonoItem<'tcx>>, + 'tcx: 'a, +{ + let _prof_timer = tcx.prof.generic_activity("assert_symbols_are_distinct"); + + let mut symbols: Vec<_> = + mono_items.map(|mono_item| (mono_item, mono_item.symbol_name(tcx))).collect(); + + symbols.sort_by_key(|sym| sym.1); + + for &[(mono_item1, ref sym1), (mono_item2, ref sym2)] in symbols.array_windows() { + if sym1 == sym2 { + let span1 = mono_item1.local_span(tcx); + let span2 = mono_item2.local_span(tcx); + + // Deterministically select one of the spans for error reporting + let span = match (span1, span2) { + (Some(span1), Some(span2)) => { + Some(if span1.lo().0 > span2.lo().0 { span1 } else { span2 }) + } + (span1, span2) => span1.or(span2), + }; + + tcx.sess.emit_fatal(SymbolAlreadyDefined { span, symbol: sym1.to_string() }); + } + } +} + +fn collect_and_partition_mono_items(tcx: TyCtxt<'_>, (): ()) -> (&DefIdSet, &[CodegenUnit<'_>]) { + let collection_mode = match tcx.sess.opts.unstable_opts.print_mono_items { + Some(ref s) => { + let mode = s.to_lowercase(); + let mode = mode.trim(); + if mode == "eager" { + MonoItemCollectionMode::Eager + } else { + if mode != "lazy" { + tcx.sess.emit_warning(UnknownCguCollectionMode { mode }); + } + + MonoItemCollectionMode::Lazy + } + } + None => { + if tcx.sess.link_dead_code() { + MonoItemCollectionMode::Eager + } else { + MonoItemCollectionMode::Lazy + } + } + }; + + let (items, inlining_map) = collector::collect_crate_mono_items(tcx, collection_mode); + + tcx.sess.abort_if_errors(); + + let (codegen_units, _) = tcx.sess.time("partition_and_assert_distinct_symbols", || { + sync::join( + || { + let mut codegen_units = partition( + tcx, + &mut items.iter().copied(), + tcx.sess.codegen_units(), + &inlining_map, + ); + codegen_units[0].make_primary(); + &*tcx.arena.alloc_from_iter(codegen_units) + }, + || assert_symbols_are_distinct(tcx, items.iter()), + ) + }); + + if tcx.prof.enabled() { + // Record CGU size estimates for self-profiling. + for cgu in codegen_units { + tcx.prof.artifact_size( + "codegen_unit_size_estimate", + cgu.name().as_str(), + cgu.size_estimate() as u64, + ); + } + } + + let mono_items: DefIdSet = items + .iter() + .filter_map(|mono_item| match *mono_item { + MonoItem::Fn(ref instance) => Some(instance.def_id()), + MonoItem::Static(def_id) => Some(def_id), + _ => None, + }) + .collect(); + + // Output monomorphization stats per def_id + if let SwitchWithOptPath::Enabled(ref path) = tcx.sess.opts.unstable_opts.dump_mono_stats { + if let Err(err) = + dump_mono_items_stats(tcx, &codegen_units, path, tcx.crate_name(LOCAL_CRATE)) + { + tcx.sess.emit_fatal(CouldntDumpMonoStats { error: err.to_string() }); + } + } + + if tcx.sess.opts.unstable_opts.print_mono_items.is_some() { + let mut item_to_cgus: FxHashMap<_, Vec<_>> = Default::default(); + + for cgu in codegen_units { + for (&mono_item, &linkage) in cgu.items() { + item_to_cgus.entry(mono_item).or_default().push((cgu.name(), linkage)); + } + } + + let mut item_keys: Vec<_> = items + .iter() + .map(|i| { + let mut output = with_no_trimmed_paths!(i.to_string()); + output.push_str(" @@"); + let mut empty = Vec::new(); + let cgus = item_to_cgus.get_mut(i).unwrap_or(&mut empty); + cgus.sort_by_key(|(name, _)| *name); + cgus.dedup(); + for &(ref cgu_name, (linkage, _)) in cgus.iter() { + output.push(' '); + output.push_str(cgu_name.as_str()); + + let linkage_abbrev = match linkage { + Linkage::External => "External", + Linkage::AvailableExternally => "Available", + Linkage::LinkOnceAny => "OnceAny", + Linkage::LinkOnceODR => "OnceODR", + Linkage::WeakAny => "WeakAny", + Linkage::WeakODR => "WeakODR", + Linkage::Appending => "Appending", + Linkage::Internal => "Internal", + Linkage::Private => "Private", + Linkage::ExternalWeak => "ExternalWeak", + Linkage::Common => "Common", + }; + + output.push('['); + output.push_str(linkage_abbrev); + output.push(']'); + } + output + }) + .collect(); + + item_keys.sort(); + + for item in item_keys { + println!("MONO_ITEM {item}"); + } + } + + (tcx.arena.alloc(mono_items), codegen_units) +} + +/// Outputs stats about instantiation counts and estimated size, per `MonoItem`'s +/// def, to a file in the given output directory. +fn dump_mono_items_stats<'tcx>( + tcx: TyCtxt<'tcx>, + codegen_units: &[CodegenUnit<'tcx>], + output_directory: &Option<PathBuf>, + crate_name: Symbol, +) -> Result<(), Box<dyn std::error::Error>> { + let output_directory = if let Some(ref directory) = output_directory { + fs::create_dir_all(directory)?; + directory + } else { + Path::new(".") + }; + + let format = tcx.sess.opts.unstable_opts.dump_mono_stats_format; + let ext = format.extension(); + let filename = format!("{crate_name}.mono_items.{ext}"); + let output_path = output_directory.join(&filename); + let file = File::create(&output_path)?; + let mut file = BufWriter::new(file); + + // Gather instantiated mono items grouped by def_id + let mut items_per_def_id: FxHashMap<_, Vec<_>> = Default::default(); + for cgu in codegen_units { + for (&mono_item, _) in cgu.items() { + // Avoid variable-sized compiler-generated shims + if mono_item.is_user_defined() { + items_per_def_id.entry(mono_item.def_id()).or_default().push(mono_item); + } + } + } + + #[derive(serde::Serialize)] + struct MonoItem { + name: String, + instantiation_count: usize, + size_estimate: usize, + total_estimate: usize, + } + + // Output stats sorted by total instantiated size, from heaviest to lightest + let mut stats: Vec<_> = items_per_def_id + .into_iter() + .map(|(def_id, items)| { + let name = with_no_trimmed_paths!(tcx.def_path_str(def_id)); + let instantiation_count = items.len(); + let size_estimate = items[0].size_estimate(tcx); + let total_estimate = instantiation_count * size_estimate; + MonoItem { name, instantiation_count, size_estimate, total_estimate } + }) + .collect(); + stats.sort_unstable_by_key(|item| cmp::Reverse(item.total_estimate)); + + if !stats.is_empty() { + match format { + DumpMonoStatsFormat::Json => serde_json::to_writer(file, &stats)?, + DumpMonoStatsFormat::Markdown => { + writeln!( + file, + "| Item | Instantiation count | Estimated Cost Per Instantiation | Total Estimated Cost |" + )?; + writeln!(file, "| --- | ---: | ---: | ---: |")?; + + for MonoItem { name, instantiation_count, size_estimate, total_estimate } in stats { + writeln!( + file, + "| `{name}` | {instantiation_count} | {size_estimate} | {total_estimate} |" + )?; + } + } + } + } + + Ok(()) +} + +fn codegened_and_inlined_items(tcx: TyCtxt<'_>, (): ()) -> &DefIdSet { + let (items, cgus) = tcx.collect_and_partition_mono_items(()); + let mut visited = DefIdSet::default(); + let mut result = items.clone(); + + for cgu in cgus { + for (item, _) in cgu.items() { + if let MonoItem::Fn(ref instance) = item { + let did = instance.def_id(); + if !visited.insert(did) { + continue; + } + let body = tcx.instance_mir(instance.def); + for block in body.basic_blocks.iter() { + for statement in &block.statements { + let mir::StatementKind::Coverage(_) = statement.kind else { continue }; + let scope = statement.source_info.scope; + if let Some(inlined) = scope.inlined_instance(&body.source_scopes) { + result.insert(inlined.def_id()); + } + } + } + } + } + } + + tcx.arena.alloc(result) +} + +pub fn provide(providers: &mut Providers) { + providers.collect_and_partition_mono_items = collect_and_partition_mono_items; + providers.codegened_and_inlined_items = codegened_and_inlined_items; + + providers.is_codegened_item = |tcx, def_id| { + let (all_mono_items, _) = tcx.collect_and_partition_mono_items(()); + all_mono_items.contains(&def_id) + }; + + providers.codegen_unit = |tcx, name| { + let (_, all) = tcx.collect_and_partition_mono_items(()); + all.iter() + .find(|cgu| cgu.name() == name) + .unwrap_or_else(|| panic!("failed to find cgu with name {name:?}")) + }; +} diff --git a/compiler/rustc_monomorphize/src/partitioning/default.rs b/compiler/rustc_monomorphize/src/partitioning/default.rs deleted file mode 100644 index 603b3ddc106..00000000000 --- a/compiler/rustc_monomorphize/src/partitioning/default.rs +++ /dev/null @@ -1,644 +0,0 @@ -use std::cmp; -use std::collections::hash_map::Entry; - -use rustc_data_structures::fx::{FxHashMap, FxHashSet}; -use rustc_hir::def::DefKind; -use rustc_hir::def_id::{DefId, LOCAL_CRATE}; -use rustc_hir::definitions::DefPathDataName; -use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; -use rustc_middle::middle::exported_symbols::{SymbolExportInfo, SymbolExportLevel}; -use rustc_middle::mir::mono::{CodegenUnit, CodegenUnitNameBuilder, Linkage, Visibility}; -use rustc_middle::mir::mono::{InstantiationMode, MonoItem}; -use rustc_middle::ty::print::characteristic_def_id_of_type; -use rustc_middle::ty::{self, visit::TypeVisitableExt, InstanceDef, TyCtxt}; -use rustc_span::symbol::Symbol; - -use super::PartitioningCx; -use crate::collector::InliningMap; -use crate::partitioning::{MonoItemPlacement, Partition, PlacedRootMonoItems}; - -pub struct DefaultPartitioning; - -impl<'tcx> Partition<'tcx> for DefaultPartitioning { - fn place_root_mono_items<I>( - &mut self, - cx: &PartitioningCx<'_, 'tcx>, - mono_items: &mut I, - ) -> PlacedRootMonoItems<'tcx> - where - I: Iterator<Item = MonoItem<'tcx>>, - { - let mut roots = FxHashSet::default(); - let mut codegen_units = FxHashMap::default(); - let is_incremental_build = cx.tcx.sess.opts.incremental.is_some(); - let mut internalization_candidates = FxHashSet::default(); - - // Determine if monomorphizations instantiated in this crate will be made - // available to downstream crates. This depends on whether we are in - // share-generics mode and whether the current crate can even have - // downstream crates. - let export_generics = - cx.tcx.sess.opts.share_generics() && cx.tcx.local_crate_exports_generics(); - - let cgu_name_builder = &mut CodegenUnitNameBuilder::new(cx.tcx); - let cgu_name_cache = &mut FxHashMap::default(); - - for mono_item in mono_items { - match mono_item.instantiation_mode(cx.tcx) { - InstantiationMode::GloballyShared { .. } => {} - InstantiationMode::LocalCopy => continue, - } - - let characteristic_def_id = characteristic_def_id_of_mono_item(cx.tcx, mono_item); - let is_volatile = is_incremental_build && mono_item.is_generic_fn(); - - let codegen_unit_name = match characteristic_def_id { - Some(def_id) => compute_codegen_unit_name( - cx.tcx, - cgu_name_builder, - def_id, - is_volatile, - cgu_name_cache, - ), - None => fallback_cgu_name(cgu_name_builder), - }; - - let codegen_unit = codegen_units - .entry(codegen_unit_name) - .or_insert_with(|| CodegenUnit::new(codegen_unit_name)); - - let mut can_be_internalized = true; - let (linkage, visibility) = mono_item_linkage_and_visibility( - cx.tcx, - &mono_item, - &mut can_be_internalized, - export_generics, - ); - if visibility == Visibility::Hidden && can_be_internalized { - internalization_candidates.insert(mono_item); - } - - codegen_unit.items_mut().insert(mono_item, (linkage, visibility)); - roots.insert(mono_item); - } - - // Always ensure we have at least one CGU; otherwise, if we have a - // crate with just types (for example), we could wind up with no CGU. - if codegen_units.is_empty() { - let codegen_unit_name = fallback_cgu_name(cgu_name_builder); - codegen_units.insert(codegen_unit_name, CodegenUnit::new(codegen_unit_name)); - } - - let codegen_units = codegen_units.into_values().collect(); - PlacedRootMonoItems { codegen_units, roots, internalization_candidates } - } - - fn merge_codegen_units( - &mut self, - cx: &PartitioningCx<'_, 'tcx>, - codegen_units: &mut Vec<CodegenUnit<'tcx>>, - ) { - assert!(cx.target_cgu_count >= 1); - - // Note that at this point in time the `codegen_units` here may not be - // in a deterministic order (but we know they're deterministically the - // same set). We want this merging to produce a deterministic ordering - // of codegen units from the input. - // - // Due to basically how we've implemented the merging below (merge the - // two smallest into each other) we're sure to start off with a - // deterministic order (sorted by name). This'll mean that if two cgus - // have the same size the stable sort below will keep everything nice - // and deterministic. - codegen_units.sort_by(|a, b| a.name().as_str().cmp(b.name().as_str())); - - // This map keeps track of what got merged into what. - let mut cgu_contents: FxHashMap<Symbol, Vec<Symbol>> = - codegen_units.iter().map(|cgu| (cgu.name(), vec![cgu.name()])).collect(); - - // Merge the two smallest codegen units until the target size is - // reached. - while codegen_units.len() > cx.target_cgu_count { - // Sort small cgus to the back - codegen_units.sort_by_cached_key(|cgu| cmp::Reverse(cgu.size_estimate())); - let mut smallest = codegen_units.pop().unwrap(); - let second_smallest = codegen_units.last_mut().unwrap(); - - // Move the mono-items from `smallest` to `second_smallest` - second_smallest.modify_size_estimate(smallest.size_estimate()); - for (k, v) in smallest.items_mut().drain() { - second_smallest.items_mut().insert(k, v); - } - - // Record that `second_smallest` now contains all the stuff that was - // in `smallest` before. - let mut consumed_cgu_names = cgu_contents.remove(&smallest.name()).unwrap(); - cgu_contents.get_mut(&second_smallest.name()).unwrap().append(&mut consumed_cgu_names); - - debug!( - "CodegenUnit {} merged into CodegenUnit {}", - smallest.name(), - second_smallest.name() - ); - } - - let cgu_name_builder = &mut CodegenUnitNameBuilder::new(cx.tcx); - - if cx.tcx.sess.opts.incremental.is_some() { - // If we are doing incremental compilation, we want CGU names to - // reflect the path of the source level module they correspond to. - // For CGUs that contain the code of multiple modules because of the - // merging done above, we use a concatenation of the names of all - // contained CGUs. - let new_cgu_names: FxHashMap<Symbol, String> = cgu_contents - .into_iter() - // This `filter` makes sure we only update the name of CGUs that - // were actually modified by merging. - .filter(|(_, cgu_contents)| cgu_contents.len() > 1) - .map(|(current_cgu_name, cgu_contents)| { - let mut cgu_contents: Vec<&str> = - cgu_contents.iter().map(|s| s.as_str()).collect(); - - // Sort the names, so things are deterministic and easy to - // predict. We are sorting primitive `&str`s here so we can - // use unstable sort. - cgu_contents.sort_unstable(); - - (current_cgu_name, cgu_contents.join("--")) - }) - .collect(); - - for cgu in codegen_units.iter_mut() { - if let Some(new_cgu_name) = new_cgu_names.get(&cgu.name()) { - if cx.tcx.sess.opts.unstable_opts.human_readable_cgu_names { - cgu.set_name(Symbol::intern(&new_cgu_name)); - } else { - // If we don't require CGU names to be human-readable, - // we use a fixed length hash of the composite CGU name - // instead. - let new_cgu_name = CodegenUnit::mangle_name(&new_cgu_name); - cgu.set_name(Symbol::intern(&new_cgu_name)); - } - } - } - } else { - // If we are compiling non-incrementally we just generate simple CGU - // names containing an index. - for (index, cgu) in codegen_units.iter_mut().enumerate() { - let numbered_codegen_unit_name = - cgu_name_builder.build_cgu_name_no_mangle(LOCAL_CRATE, &["cgu"], Some(index)); - cgu.set_name(numbered_codegen_unit_name); - } - } - } - - fn place_inlined_mono_items( - &mut self, - cx: &PartitioningCx<'_, 'tcx>, - codegen_units: &mut [CodegenUnit<'tcx>], - roots: FxHashSet<MonoItem<'tcx>>, - ) -> FxHashMap<MonoItem<'tcx>, MonoItemPlacement> { - let mut mono_item_placements = FxHashMap::default(); - - let single_codegen_unit = codegen_units.len() == 1; - - for old_codegen_unit in codegen_units.iter_mut() { - // Collect all items that need to be available in this codegen unit. - let mut reachable = FxHashSet::default(); - for root in old_codegen_unit.items().keys() { - follow_inlining(*root, cx.inlining_map, &mut reachable); - } - - let mut new_codegen_unit = CodegenUnit::new(old_codegen_unit.name()); - - // Add all monomorphizations that are not already there. - for mono_item in reachable { - if let Some(linkage) = old_codegen_unit.items().get(&mono_item) { - // This is a root, just copy it over. - new_codegen_unit.items_mut().insert(mono_item, *linkage); - } else { - if roots.contains(&mono_item) { - bug!( - "GloballyShared mono-item inlined into other CGU: \ - {:?}", - mono_item - ); - } - - // This is a CGU-private copy. - new_codegen_unit - .items_mut() - .insert(mono_item, (Linkage::Internal, Visibility::Default)); - } - - if !single_codegen_unit { - // If there is more than one codegen unit, we need to keep track - // in which codegen units each monomorphization is placed. - match mono_item_placements.entry(mono_item) { - Entry::Occupied(e) => { - let placement = e.into_mut(); - debug_assert!(match *placement { - MonoItemPlacement::SingleCgu { cgu_name } => { - cgu_name != new_codegen_unit.name() - } - MonoItemPlacement::MultipleCgus => true, - }); - *placement = MonoItemPlacement::MultipleCgus; - } - Entry::Vacant(e) => { - e.insert(MonoItemPlacement::SingleCgu { - cgu_name: new_codegen_unit.name(), - }); - } - } - } - } - - *old_codegen_unit = new_codegen_unit; - } - - return mono_item_placements; - - fn follow_inlining<'tcx>( - mono_item: MonoItem<'tcx>, - inlining_map: &InliningMap<'tcx>, - visited: &mut FxHashSet<MonoItem<'tcx>>, - ) { - if !visited.insert(mono_item) { - return; - } - - inlining_map.with_inlining_candidates(mono_item, |target| { - follow_inlining(target, inlining_map, visited); - }); - } - } - - fn internalize_symbols( - &mut self, - cx: &PartitioningCx<'_, 'tcx>, - codegen_units: &mut [CodegenUnit<'tcx>], - mono_item_placements: FxHashMap<MonoItem<'tcx>, MonoItemPlacement>, - internalization_candidates: FxHashSet<MonoItem<'tcx>>, - ) { - if codegen_units.len() == 1 { - // Fast path for when there is only one codegen unit. In this case we - // can internalize all candidates, since there is nowhere else they - // could be accessed from. - for cgu in codegen_units { - for candidate in &internalization_candidates { - cgu.items_mut().insert(*candidate, (Linkage::Internal, Visibility::Default)); - } - } - - return; - } - - // Build a map from every monomorphization to all the monomorphizations that - // reference it. - let mut accessor_map: FxHashMap<MonoItem<'tcx>, Vec<MonoItem<'tcx>>> = Default::default(); - cx.inlining_map.iter_accesses(|accessor, accessees| { - for accessee in accessees { - accessor_map.entry(*accessee).or_default().push(accessor); - } - }); - - // For each internalization candidates in each codegen unit, check if it is - // accessed from outside its defining codegen unit. - for cgu in codegen_units { - let home_cgu = MonoItemPlacement::SingleCgu { cgu_name: cgu.name() }; - - for (accessee, linkage_and_visibility) in cgu.items_mut() { - if !internalization_candidates.contains(accessee) { - // This item is no candidate for internalizing, so skip it. - continue; - } - debug_assert_eq!(mono_item_placements[accessee], home_cgu); - - if let Some(accessors) = accessor_map.get(accessee) { - if accessors - .iter() - .filter_map(|accessor| { - // Some accessors might not have been - // instantiated. We can safely ignore those. - mono_item_placements.get(accessor) - }) - .any(|placement| *placement != home_cgu) - { - // Found an accessor from another CGU, so skip to the next - // item without marking this one as internal. - continue; - } - } - - // If we got here, we did not find any accesses from other CGUs, - // so it's fine to make this monomorphization internal. - *linkage_and_visibility = (Linkage::Internal, Visibility::Default); - } - } - } -} - -fn characteristic_def_id_of_mono_item<'tcx>( - tcx: TyCtxt<'tcx>, - mono_item: MonoItem<'tcx>, -) -> Option<DefId> { - match mono_item { - MonoItem::Fn(instance) => { - let def_id = match instance.def { - ty::InstanceDef::Item(def) => def, - ty::InstanceDef::VTableShim(..) - | ty::InstanceDef::ReifyShim(..) - | ty::InstanceDef::FnPtrShim(..) - | ty::InstanceDef::ClosureOnceShim { .. } - | ty::InstanceDef::Intrinsic(..) - | ty::InstanceDef::DropGlue(..) - | ty::InstanceDef::Virtual(..) - | ty::InstanceDef::CloneShim(..) - | ty::InstanceDef::ThreadLocalShim(..) - | ty::InstanceDef::FnPtrAddrShim(..) => return None, - }; - - // If this is a method, we want to put it into the same module as - // its self-type. If the self-type does not provide a characteristic - // DefId, we use the location of the impl after all. - - if tcx.trait_of_item(def_id).is_some() { - let self_ty = instance.substs.type_at(0); - // This is a default implementation of a trait method. - return characteristic_def_id_of_type(self_ty).or(Some(def_id)); - } - - if let Some(impl_def_id) = tcx.impl_of_method(def_id) { - if tcx.sess.opts.incremental.is_some() - && tcx.trait_id_of_impl(impl_def_id) == tcx.lang_items().drop_trait() - { - // Put `Drop::drop` into the same cgu as `drop_in_place` - // since `drop_in_place` is the only thing that can - // call it. - return None; - } - - // When polymorphization is enabled, methods which do not depend on their generic - // parameters, but the self-type of their impl block do will fail to normalize. - if !tcx.sess.opts.unstable_opts.polymorphize || !instance.has_param() { - // This is a method within an impl, find out what the self-type is: - let impl_self_ty = tcx.subst_and_normalize_erasing_regions( - instance.substs, - ty::ParamEnv::reveal_all(), - tcx.type_of(impl_def_id), - ); - if let Some(def_id) = characteristic_def_id_of_type(impl_self_ty) { - return Some(def_id); - } - } - } - - Some(def_id) - } - MonoItem::Static(def_id) => Some(def_id), - MonoItem::GlobalAsm(item_id) => Some(item_id.owner_id.to_def_id()), - } -} - -fn compute_codegen_unit_name( - tcx: TyCtxt<'_>, - name_builder: &mut CodegenUnitNameBuilder<'_>, - def_id: DefId, - volatile: bool, - cache: &mut CguNameCache, -) -> Symbol { - // Find the innermost module that is not nested within a function. - let mut current_def_id = def_id; - let mut cgu_def_id = None; - // Walk backwards from the item we want to find the module for. - loop { - if current_def_id.is_crate_root() { - if cgu_def_id.is_none() { - // If we have not found a module yet, take the crate root. - cgu_def_id = Some(def_id.krate.as_def_id()); - } - break; - } else if tcx.def_kind(current_def_id) == DefKind::Mod { - if cgu_def_id.is_none() { - cgu_def_id = Some(current_def_id); - } - } else { - // If we encounter something that is not a module, throw away - // any module that we've found so far because we now know that - // it is nested within something else. - cgu_def_id = None; - } - - current_def_id = tcx.parent(current_def_id); - } - - let cgu_def_id = cgu_def_id.unwrap(); - - *cache.entry((cgu_def_id, volatile)).or_insert_with(|| { - let def_path = tcx.def_path(cgu_def_id); - - let components = def_path.data.iter().map(|part| match part.data.name() { - DefPathDataName::Named(name) => name, - DefPathDataName::Anon { .. } => unreachable!(), - }); - - let volatile_suffix = volatile.then_some("volatile"); - - name_builder.build_cgu_name(def_path.krate, components, volatile_suffix) - }) -} - -// Anything we can't find a proper codegen unit for goes into this. -fn fallback_cgu_name(name_builder: &mut CodegenUnitNameBuilder<'_>) -> Symbol { - name_builder.build_cgu_name(LOCAL_CRATE, &["fallback"], Some("cgu")) -} - -fn mono_item_linkage_and_visibility<'tcx>( - tcx: TyCtxt<'tcx>, - mono_item: &MonoItem<'tcx>, - can_be_internalized: &mut bool, - export_generics: bool, -) -> (Linkage, Visibility) { - if let Some(explicit_linkage) = mono_item.explicit_linkage(tcx) { - return (explicit_linkage, Visibility::Default); - } - let vis = mono_item_visibility(tcx, mono_item, can_be_internalized, export_generics); - (Linkage::External, vis) -} - -type CguNameCache = FxHashMap<(DefId, bool), Symbol>; - -fn static_visibility<'tcx>( - tcx: TyCtxt<'tcx>, - can_be_internalized: &mut bool, - def_id: DefId, -) -> Visibility { - if tcx.is_reachable_non_generic(def_id) { - *can_be_internalized = false; - default_visibility(tcx, def_id, false) - } else { - Visibility::Hidden - } -} - -fn mono_item_visibility<'tcx>( - tcx: TyCtxt<'tcx>, - mono_item: &MonoItem<'tcx>, - can_be_internalized: &mut bool, - export_generics: bool, -) -> Visibility { - let instance = match mono_item { - // This is pretty complicated; see below. - MonoItem::Fn(instance) => instance, - - // Misc handling for generics and such, but otherwise: - MonoItem::Static(def_id) => return static_visibility(tcx, can_be_internalized, *def_id), - MonoItem::GlobalAsm(item_id) => { - return static_visibility(tcx, can_be_internalized, item_id.owner_id.to_def_id()); - } - }; - - let def_id = match instance.def { - InstanceDef::Item(def_id) | InstanceDef::DropGlue(def_id, Some(_)) => def_id, - - // We match the visibility of statics here - InstanceDef::ThreadLocalShim(def_id) => { - return static_visibility(tcx, can_be_internalized, def_id); - } - - // These are all compiler glue and such, never exported, always hidden. - InstanceDef::VTableShim(..) - | InstanceDef::ReifyShim(..) - | InstanceDef::FnPtrShim(..) - | InstanceDef::Virtual(..) - | InstanceDef::Intrinsic(..) - | InstanceDef::ClosureOnceShim { .. } - | InstanceDef::DropGlue(..) - | InstanceDef::CloneShim(..) - | InstanceDef::FnPtrAddrShim(..) => return Visibility::Hidden, - }; - - // The `start_fn` lang item is actually a monomorphized instance of a - // function in the standard library, used for the `main` function. We don't - // want to export it so we tag it with `Hidden` visibility but this symbol - // is only referenced from the actual `main` symbol which we unfortunately - // don't know anything about during partitioning/collection. As a result we - // forcibly keep this symbol out of the `internalization_candidates` set. - // - // FIXME: eventually we don't want to always force this symbol to have - // hidden visibility, it should indeed be a candidate for - // internalization, but we have to understand that it's referenced - // from the `main` symbol we'll generate later. - // - // This may be fixable with a new `InstanceDef` perhaps? Unsure! - if tcx.lang_items().start_fn() == Some(def_id) { - *can_be_internalized = false; - return Visibility::Hidden; - } - - let is_generic = instance.substs.non_erasable_generics().next().is_some(); - - // Upstream `DefId` instances get different handling than local ones. - let Some(def_id) = def_id.as_local() else { - return if export_generics && is_generic { - // If it is an upstream monomorphization and we export generics, we must make - // it available to downstream crates. - *can_be_internalized = false; - default_visibility(tcx, def_id, true) - } else { - Visibility::Hidden - }; - }; - - if is_generic { - if export_generics { - if tcx.is_unreachable_local_definition(def_id) { - // This instance cannot be used from another crate. - Visibility::Hidden - } else { - // This instance might be useful in a downstream crate. - *can_be_internalized = false; - default_visibility(tcx, def_id.to_def_id(), true) - } - } else { - // We are not exporting generics or the definition is not reachable - // for downstream crates, we can internalize its instantiations. - Visibility::Hidden - } - } else { - // If this isn't a generic function then we mark this a `Default` if - // this is a reachable item, meaning that it's a symbol other crates may - // access when they link to us. - if tcx.is_reachable_non_generic(def_id.to_def_id()) { - *can_be_internalized = false; - debug_assert!(!is_generic); - return default_visibility(tcx, def_id.to_def_id(), false); - } - - // If this isn't reachable then we're gonna tag this with `Hidden` - // visibility. In some situations though we'll want to prevent this - // symbol from being internalized. - // - // There's two categories of items here: - // - // * First is weak lang items. These are basically mechanisms for - // libcore to forward-reference symbols defined later in crates like - // the standard library or `#[panic_handler]` definitions. The - // definition of these weak lang items needs to be referencable by - // libcore, so we're no longer a candidate for internalization. - // Removal of these functions can't be done by LLVM but rather must be - // done by the linker as it's a non-local decision. - // - // * Second is "std internal symbols". Currently this is primarily used - // for allocator symbols. Allocators are a little weird in their - // implementation, but the idea is that the compiler, at the last - // minute, defines an allocator with an injected object file. The - // `alloc` crate references these symbols (`__rust_alloc`) and the - // definition doesn't get hooked up until a linked crate artifact is - // generated. - // - // The symbols synthesized by the compiler (`__rust_alloc`) are thin - // veneers around the actual implementation, some other symbol which - // implements the same ABI. These symbols (things like `__rg_alloc`, - // `__rdl_alloc`, `__rde_alloc`, etc), are all tagged with "std - // internal symbols". - // - // The std-internal symbols here **should not show up in a dll as an - // exported interface**, so they return `false` from - // `is_reachable_non_generic` above and we'll give them `Hidden` - // visibility below. Like the weak lang items, though, we can't let - // LLVM internalize them as this decision is left up to the linker to - // omit them, so prevent them from being internalized. - let attrs = tcx.codegen_fn_attrs(def_id); - if attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL) { - *can_be_internalized = false; - } - - Visibility::Hidden - } -} - -fn default_visibility(tcx: TyCtxt<'_>, id: DefId, is_generic: bool) -> Visibility { - if !tcx.sess.target.default_hidden_visibility { - return Visibility::Default; - } - - // Generic functions never have export-level C. - if is_generic { - return Visibility::Hidden; - } - - // Things with export level C don't get instantiated in - // downstream crates. - if !id.is_local() { - return Visibility::Hidden; - } - - // C-export level items remain at `Default`, all other internal - // items become `Hidden`. - match tcx.reachable_non_generics(id.krate).get(&id) { - Some(SymbolExportInfo { level: SymbolExportLevel::C, .. }) => Visibility::Default, - _ => Visibility::Hidden, - } -} diff --git a/compiler/rustc_monomorphize/src/partitioning/mod.rs b/compiler/rustc_monomorphize/src/partitioning/mod.rs deleted file mode 100644 index d0b23ca9ea4..00000000000 --- a/compiler/rustc_monomorphize/src/partitioning/mod.rs +++ /dev/null @@ -1,673 +0,0 @@ -//! Partitioning Codegen Units for Incremental Compilation -//! ====================================================== -//! -//! The task of this module is to take the complete set of monomorphizations of -//! a crate and produce a set of codegen units from it, where a codegen unit -//! is a named set of (mono-item, linkage) pairs. That is, this module -//! decides which monomorphization appears in which codegen units with which -//! linkage. The following paragraphs describe some of the background on the -//! partitioning scheme. -//! -//! The most important opportunity for saving on compilation time with -//! incremental compilation is to avoid re-codegenning and re-optimizing code. -//! Since the unit of codegen and optimization for LLVM is "modules" or, how -//! we call them "codegen units", the particulars of how much time can be saved -//! by incremental compilation are tightly linked to how the output program is -//! partitioned into these codegen units prior to passing it to LLVM -- -//! especially because we have to treat codegen units as opaque entities once -//! they are created: There is no way for us to incrementally update an existing -//! LLVM module and so we have to build any such module from scratch if it was -//! affected by some change in the source code. -//! -//! From that point of view it would make sense to maximize the number of -//! codegen units by, for example, putting each function into its own module. -//! That way only those modules would have to be re-compiled that were actually -//! affected by some change, minimizing the number of functions that could have -//! been re-used but just happened to be located in a module that is -//! re-compiled. -//! -//! However, since LLVM optimization does not work across module boundaries, -//! using such a highly granular partitioning would lead to very slow runtime -//! code since it would effectively prohibit inlining and other inter-procedure -//! optimizations. We want to avoid that as much as possible. -//! -//! Thus we end up with a trade-off: The bigger the codegen units, the better -//! LLVM's optimizer can do its work, but also the smaller the compilation time -//! reduction we get from incremental compilation. -//! -//! Ideally, we would create a partitioning such that there are few big codegen -//! units with few interdependencies between them. For now though, we use the -//! following heuristic to determine the partitioning: -//! -//! - There are two codegen units for every source-level module: -//! - One for "stable", that is non-generic, code -//! - One for more "volatile" code, i.e., monomorphized instances of functions -//! defined in that module -//! -//! In order to see why this heuristic makes sense, let's take a look at when a -//! codegen unit can get invalidated: -//! -//! 1. The most straightforward case is when the BODY of a function or global -//! changes. Then any codegen unit containing the code for that item has to be -//! re-compiled. Note that this includes all codegen units where the function -//! has been inlined. -//! -//! 2. The next case is when the SIGNATURE of a function or global changes. In -//! this case, all codegen units containing a REFERENCE to that item have to be -//! re-compiled. This is a superset of case 1. -//! -//! 3. The final and most subtle case is when a REFERENCE to a generic function -//! is added or removed somewhere. Even though the definition of the function -//! might be unchanged, a new REFERENCE might introduce a new monomorphized -//! instance of this function which has to be placed and compiled somewhere. -//! Conversely, when removing a REFERENCE, it might have been the last one with -//! that particular set of generic arguments and thus we have to remove it. -//! -//! From the above we see that just using one codegen unit per source-level -//! module is not such a good idea, since just adding a REFERENCE to some -//! generic item somewhere else would invalidate everything within the module -//! containing the generic item. The heuristic above reduces this detrimental -//! side-effect of references a little by at least not touching the non-generic -//! code of the module. -//! -//! A Note on Inlining -//! ------------------ -//! As briefly mentioned above, in order for LLVM to be able to inline a -//! function call, the body of the function has to be available in the LLVM -//! module where the call is made. This has a few consequences for partitioning: -//! -//! - The partitioning algorithm has to take care of placing functions into all -//! codegen units where they should be available for inlining. It also has to -//! decide on the correct linkage for these functions. -//! -//! - The partitioning algorithm has to know which functions are likely to get -//! inlined, so it can distribute function instantiations accordingly. Since -//! there is no way of knowing for sure which functions LLVM will decide to -//! inline in the end, we apply a heuristic here: Only functions marked with -//! `#[inline]` are considered for inlining by the partitioner. The current -//! implementation will not try to determine if a function is likely to be -//! inlined by looking at the functions definition. -//! -//! Note though that as a side-effect of creating a codegen units per -//! source-level module, functions from the same module will be available for -//! inlining, even when they are not marked `#[inline]`. - -mod default; - -use std::cmp; -use std::fs::{self, File}; -use std::io::{BufWriter, Write}; -use std::path::{Path, PathBuf}; - -use rustc_data_structures::fx::{FxHashMap, FxHashSet}; -use rustc_data_structures::sync; -use rustc_hir::def_id::{DefIdSet, LOCAL_CRATE}; -use rustc_middle::mir; -use rustc_middle::mir::mono::MonoItem; -use rustc_middle::mir::mono::{CodegenUnit, Linkage}; -use rustc_middle::query::Providers; -use rustc_middle::ty::print::with_no_trimmed_paths; -use rustc_middle::ty::TyCtxt; -use rustc_session::config::{DumpMonoStatsFormat, SwitchWithOptPath}; -use rustc_span::symbol::Symbol; - -use crate::collector::InliningMap; -use crate::collector::{self, MonoItemCollectionMode}; -use crate::errors::{ - CouldntDumpMonoStats, SymbolAlreadyDefined, UnknownCguCollectionMode, UnknownPartitionStrategy, -}; - -enum Partitioner { - Default(default::DefaultPartitioning), - // Other partitioning strategies can go here. - Unknown, -} - -impl<'tcx> Partition<'tcx> for Partitioner { - fn place_root_mono_items<I>( - &mut self, - cx: &PartitioningCx<'_, 'tcx>, - mono_items: &mut I, - ) -> PlacedRootMonoItems<'tcx> - where - I: Iterator<Item = MonoItem<'tcx>>, - { - match self { - Partitioner::Default(partitioner) => partitioner.place_root_mono_items(cx, mono_items), - Partitioner::Unknown => cx.tcx.sess.emit_fatal(UnknownPartitionStrategy), - } - } - - fn merge_codegen_units( - &mut self, - cx: &PartitioningCx<'_, 'tcx>, - codegen_units: &mut Vec<CodegenUnit<'tcx>>, - ) { - match self { - Partitioner::Default(partitioner) => partitioner.merge_codegen_units(cx, codegen_units), - Partitioner::Unknown => cx.tcx.sess.emit_fatal(UnknownPartitionStrategy), - } - } - - fn place_inlined_mono_items( - &mut self, - cx: &PartitioningCx<'_, 'tcx>, - codegen_units: &mut [CodegenUnit<'tcx>], - roots: FxHashSet<MonoItem<'tcx>>, - ) -> FxHashMap<MonoItem<'tcx>, MonoItemPlacement> { - match self { - Partitioner::Default(partitioner) => { - partitioner.place_inlined_mono_items(cx, codegen_units, roots) - } - Partitioner::Unknown => cx.tcx.sess.emit_fatal(UnknownPartitionStrategy), - } - } - - fn internalize_symbols( - &mut self, - cx: &PartitioningCx<'_, 'tcx>, - codegen_units: &mut [CodegenUnit<'tcx>], - mono_item_placements: FxHashMap<MonoItem<'tcx>, MonoItemPlacement>, - internalization_candidates: FxHashSet<MonoItem<'tcx>>, - ) { - match self { - Partitioner::Default(partitioner) => partitioner.internalize_symbols( - cx, - codegen_units, - mono_item_placements, - internalization_candidates, - ), - Partitioner::Unknown => cx.tcx.sess.emit_fatal(UnknownPartitionStrategy), - } - } -} - -struct PartitioningCx<'a, 'tcx> { - tcx: TyCtxt<'tcx>, - target_cgu_count: usize, - inlining_map: &'a InliningMap<'tcx>, -} - -pub struct PlacedRootMonoItems<'tcx> { - codegen_units: Vec<CodegenUnit<'tcx>>, - roots: FxHashSet<MonoItem<'tcx>>, - internalization_candidates: FxHashSet<MonoItem<'tcx>>, -} - -trait Partition<'tcx> { - fn place_root_mono_items<I>( - &mut self, - cx: &PartitioningCx<'_, 'tcx>, - mono_items: &mut I, - ) -> PlacedRootMonoItems<'tcx> - where - I: Iterator<Item = MonoItem<'tcx>>; - - fn merge_codegen_units( - &mut self, - cx: &PartitioningCx<'_, 'tcx>, - codegen_units: &mut Vec<CodegenUnit<'tcx>>, - ); - - fn place_inlined_mono_items( - &mut self, - cx: &PartitioningCx<'_, 'tcx>, - codegen_units: &mut [CodegenUnit<'tcx>], - roots: FxHashSet<MonoItem<'tcx>>, - ) -> FxHashMap<MonoItem<'tcx>, MonoItemPlacement>; - - fn internalize_symbols( - &mut self, - cx: &PartitioningCx<'_, 'tcx>, - codegen_units: &mut [CodegenUnit<'tcx>], - mono_item_placements: FxHashMap<MonoItem<'tcx>, MonoItemPlacement>, - internalization_candidates: FxHashSet<MonoItem<'tcx>>, - ); -} - -fn get_partitioner(tcx: TyCtxt<'_>) -> Partitioner { - let strategy = match &tcx.sess.opts.unstable_opts.cgu_partitioning_strategy { - None => "default", - Some(s) => &s[..], - }; - - match strategy { - "default" => Partitioner::Default(default::DefaultPartitioning), - _ => Partitioner::Unknown, - } -} - -fn partition<'tcx, I>( - tcx: TyCtxt<'tcx>, - mono_items: &mut I, - max_cgu_count: usize, - inlining_map: &InliningMap<'tcx>, -) -> Vec<CodegenUnit<'tcx>> -where - I: Iterator<Item = MonoItem<'tcx>>, -{ - let _prof_timer = tcx.prof.generic_activity("cgu_partitioning"); - - let mut partitioner = get_partitioner(tcx); - let cx = &PartitioningCx { tcx, target_cgu_count: max_cgu_count, inlining_map }; - // In the first step, we place all regular monomorphizations into their - // respective 'home' codegen unit. Regular monomorphizations are all - // functions and statics defined in the local crate. - let PlacedRootMonoItems { mut codegen_units, roots, internalization_candidates } = { - let _prof_timer = tcx.prof.generic_activity("cgu_partitioning_place_roots"); - partitioner.place_root_mono_items(cx, mono_items) - }; - - for cgu in &mut codegen_units { - cgu.create_size_estimate(tcx); - } - - debug_dump(tcx, "INITIAL PARTITIONING", &codegen_units); - - // Merge until we have at most `max_cgu_count` codegen units. - // `merge_codegen_units` is responsible for updating the CGU size - // estimates. - { - let _prof_timer = tcx.prof.generic_activity("cgu_partitioning_merge_cgus"); - partitioner.merge_codegen_units(cx, &mut codegen_units); - debug_dump(tcx, "POST MERGING", &codegen_units); - } - - // In the next step, we use the inlining map to determine which additional - // monomorphizations have to go into each codegen unit. These additional - // monomorphizations can be drop-glue, functions from external crates, and - // local functions the definition of which is marked with `#[inline]`. - let mono_item_placements = { - let _prof_timer = tcx.prof.generic_activity("cgu_partitioning_place_inline_items"); - partitioner.place_inlined_mono_items(cx, &mut codegen_units, roots) - }; - - for cgu in &mut codegen_units { - cgu.create_size_estimate(tcx); - } - - debug_dump(tcx, "POST INLINING", &codegen_units); - - // Next we try to make as many symbols "internal" as possible, so LLVM has - // more freedom to optimize. - if !tcx.sess.link_dead_code() { - let _prof_timer = tcx.prof.generic_activity("cgu_partitioning_internalize_symbols"); - partitioner.internalize_symbols( - cx, - &mut codegen_units, - mono_item_placements, - internalization_candidates, - ); - } - - let instrument_dead_code = - tcx.sess.instrument_coverage() && !tcx.sess.instrument_coverage_except_unused_functions(); - - if instrument_dead_code { - assert!( - codegen_units.len() > 0, - "There must be at least one CGU that code coverage data can be generated in." - ); - - // Find the smallest CGU that has exported symbols and put the dead - // function stubs in that CGU. We look for exported symbols to increase - // the likelihood the linker won't throw away the dead functions. - // FIXME(#92165): In order to truly resolve this, we need to make sure - // the object file (CGU) containing the dead function stubs is included - // in the final binary. This will probably require forcing these - // function symbols to be included via `-u` or `/include` linker args. - let mut cgus: Vec<_> = codegen_units.iter_mut().collect(); - cgus.sort_by_key(|cgu| cgu.size_estimate()); - - let dead_code_cgu = - if let Some(cgu) = cgus.into_iter().rev().find(|cgu| { - cgu.items().iter().any(|(_, (linkage, _))| *linkage == Linkage::External) - }) { - cgu - } else { - // If there are no CGUs that have externally linked items, - // then we just pick the first CGU as a fallback. - &mut codegen_units[0] - }; - dead_code_cgu.make_code_coverage_dead_code_cgu(); - } - - // Finally, sort by codegen unit name, so that we get deterministic results. - codegen_units.sort_by(|a, b| a.name().as_str().cmp(b.name().as_str())); - - debug_dump(tcx, "FINAL", &codegen_units); - - codegen_units -} - -/// For symbol internalization, we need to know whether a symbol/mono-item is -/// accessed from outside the codegen unit it is defined in. This type is used -/// to keep track of that. -#[derive(Clone, PartialEq, Eq, Debug)] -enum MonoItemPlacement { - SingleCgu { cgu_name: Symbol }, - MultipleCgus, -} - -fn debug_dump<'a, 'tcx: 'a>(tcx: TyCtxt<'tcx>, label: &str, cgus: &[CodegenUnit<'tcx>]) { - let dump = move || { - use std::fmt::Write; - - let num_cgus = cgus.len(); - let max = cgus.iter().map(|cgu| cgu.size_estimate()).max().unwrap(); - let min = cgus.iter().map(|cgu| cgu.size_estimate()).min().unwrap(); - let ratio = max as f64 / min as f64; - - let s = &mut String::new(); - let _ = writeln!( - s, - "{label} ({num_cgus} CodegenUnits, max={max}, min={min}, max/min={ratio:.1}):" - ); - for cgu in cgus { - let _ = - writeln!(s, "CodegenUnit {} estimated size {}:", cgu.name(), cgu.size_estimate()); - - for (mono_item, linkage) in cgu.items() { - let symbol_name = mono_item.symbol_name(tcx).name; - let symbol_hash_start = symbol_name.rfind('h'); - let symbol_hash = symbol_hash_start.map_or("<no hash>", |i| &symbol_name[i..]); - - let _ = with_no_trimmed_paths!(writeln!( - s, - " - {} [{:?}] [{}] estimated size {}", - mono_item, - linkage, - symbol_hash, - mono_item.size_estimate(tcx) - )); - } - - let _ = writeln!(s); - } - - std::mem::take(s) - }; - - debug!("{}", dump()); -} - -#[inline(never)] // give this a place in the profiler -fn assert_symbols_are_distinct<'a, 'tcx, I>(tcx: TyCtxt<'tcx>, mono_items: I) -where - I: Iterator<Item = &'a MonoItem<'tcx>>, - 'tcx: 'a, -{ - let _prof_timer = tcx.prof.generic_activity("assert_symbols_are_distinct"); - - let mut symbols: Vec<_> = - mono_items.map(|mono_item| (mono_item, mono_item.symbol_name(tcx))).collect(); - - symbols.sort_by_key(|sym| sym.1); - - for &[(mono_item1, ref sym1), (mono_item2, ref sym2)] in symbols.array_windows() { - if sym1 == sym2 { - let span1 = mono_item1.local_span(tcx); - let span2 = mono_item2.local_span(tcx); - - // Deterministically select one of the spans for error reporting - let span = match (span1, span2) { - (Some(span1), Some(span2)) => { - Some(if span1.lo().0 > span2.lo().0 { span1 } else { span2 }) - } - (span1, span2) => span1.or(span2), - }; - - tcx.sess.emit_fatal(SymbolAlreadyDefined { span, symbol: sym1.to_string() }); - } - } -} - -fn collect_and_partition_mono_items(tcx: TyCtxt<'_>, (): ()) -> (&DefIdSet, &[CodegenUnit<'_>]) { - let collection_mode = match tcx.sess.opts.unstable_opts.print_mono_items { - Some(ref s) => { - let mode = s.to_lowercase(); - let mode = mode.trim(); - if mode == "eager" { - MonoItemCollectionMode::Eager - } else { - if mode != "lazy" { - tcx.sess.emit_warning(UnknownCguCollectionMode { mode }); - } - - MonoItemCollectionMode::Lazy - } - } - None => { - if tcx.sess.link_dead_code() { - MonoItemCollectionMode::Eager - } else { - MonoItemCollectionMode::Lazy - } - } - }; - - let (items, inlining_map) = collector::collect_crate_mono_items(tcx, collection_mode); - - tcx.sess.abort_if_errors(); - - let (codegen_units, _) = tcx.sess.time("partition_and_assert_distinct_symbols", || { - sync::join( - || { - let mut codegen_units = partition( - tcx, - &mut items.iter().copied(), - tcx.sess.codegen_units(), - &inlining_map, - ); - codegen_units[0].make_primary(); - &*tcx.arena.alloc_from_iter(codegen_units) - }, - || assert_symbols_are_distinct(tcx, items.iter()), - ) - }); - - if tcx.prof.enabled() { - // Record CGU size estimates for self-profiling. - for cgu in codegen_units { - tcx.prof.artifact_size( - "codegen_unit_size_estimate", - cgu.name().as_str(), - cgu.size_estimate() as u64, - ); - } - } - - let mono_items: DefIdSet = items - .iter() - .filter_map(|mono_item| match *mono_item { - MonoItem::Fn(ref instance) => Some(instance.def_id()), - MonoItem::Static(def_id) => Some(def_id), - _ => None, - }) - .collect(); - - // Output monomorphization stats per def_id - if let SwitchWithOptPath::Enabled(ref path) = tcx.sess.opts.unstable_opts.dump_mono_stats { - if let Err(err) = - dump_mono_items_stats(tcx, &codegen_units, path, tcx.crate_name(LOCAL_CRATE)) - { - tcx.sess.emit_fatal(CouldntDumpMonoStats { error: err.to_string() }); - } - } - - if tcx.sess.opts.unstable_opts.print_mono_items.is_some() { - let mut item_to_cgus: FxHashMap<_, Vec<_>> = Default::default(); - - for cgu in codegen_units { - for (&mono_item, &linkage) in cgu.items() { - item_to_cgus.entry(mono_item).or_default().push((cgu.name(), linkage)); - } - } - - let mut item_keys: Vec<_> = items - .iter() - .map(|i| { - let mut output = with_no_trimmed_paths!(i.to_string()); - output.push_str(" @@"); - let mut empty = Vec::new(); - let cgus = item_to_cgus.get_mut(i).unwrap_or(&mut empty); - cgus.sort_by_key(|(name, _)| *name); - cgus.dedup(); - for &(ref cgu_name, (linkage, _)) in cgus.iter() { - output.push(' '); - output.push_str(cgu_name.as_str()); - - let linkage_abbrev = match linkage { - Linkage::External => "External", - Linkage::AvailableExternally => "Available", - Linkage::LinkOnceAny => "OnceAny", - Linkage::LinkOnceODR => "OnceODR", - Linkage::WeakAny => "WeakAny", - Linkage::WeakODR => "WeakODR", - Linkage::Appending => "Appending", - Linkage::Internal => "Internal", - Linkage::Private => "Private", - Linkage::ExternalWeak => "ExternalWeak", - Linkage::Common => "Common", - }; - - output.push('['); - output.push_str(linkage_abbrev); - output.push(']'); - } - output - }) - .collect(); - - item_keys.sort(); - - for item in item_keys { - println!("MONO_ITEM {item}"); - } - } - - (tcx.arena.alloc(mono_items), codegen_units) -} - -/// Outputs stats about instantiation counts and estimated size, per `MonoItem`'s -/// def, to a file in the given output directory. -fn dump_mono_items_stats<'tcx>( - tcx: TyCtxt<'tcx>, - codegen_units: &[CodegenUnit<'tcx>], - output_directory: &Option<PathBuf>, - crate_name: Symbol, -) -> Result<(), Box<dyn std::error::Error>> { - let output_directory = if let Some(ref directory) = output_directory { - fs::create_dir_all(directory)?; - directory - } else { - Path::new(".") - }; - - let format = tcx.sess.opts.unstable_opts.dump_mono_stats_format; - let ext = format.extension(); - let filename = format!("{crate_name}.mono_items.{ext}"); - let output_path = output_directory.join(&filename); - let file = File::create(&output_path)?; - let mut file = BufWriter::new(file); - - // Gather instantiated mono items grouped by def_id - let mut items_per_def_id: FxHashMap<_, Vec<_>> = Default::default(); - for cgu in codegen_units { - for (&mono_item, _) in cgu.items() { - // Avoid variable-sized compiler-generated shims - if mono_item.is_user_defined() { - items_per_def_id.entry(mono_item.def_id()).or_default().push(mono_item); - } - } - } - - #[derive(serde::Serialize)] - struct MonoItem { - name: String, - instantiation_count: usize, - size_estimate: usize, - total_estimate: usize, - } - - // Output stats sorted by total instantiated size, from heaviest to lightest - let mut stats: Vec<_> = items_per_def_id - .into_iter() - .map(|(def_id, items)| { - let name = with_no_trimmed_paths!(tcx.def_path_str(def_id)); - let instantiation_count = items.len(); - let size_estimate = items[0].size_estimate(tcx); - let total_estimate = instantiation_count * size_estimate; - MonoItem { name, instantiation_count, size_estimate, total_estimate } - }) - .collect(); - stats.sort_unstable_by_key(|item| cmp::Reverse(item.total_estimate)); - - if !stats.is_empty() { - match format { - DumpMonoStatsFormat::Json => serde_json::to_writer(file, &stats)?, - DumpMonoStatsFormat::Markdown => { - writeln!( - file, - "| Item | Instantiation count | Estimated Cost Per Instantiation | Total Estimated Cost |" - )?; - writeln!(file, "| --- | ---: | ---: | ---: |")?; - - for MonoItem { name, instantiation_count, size_estimate, total_estimate } in stats { - writeln!( - file, - "| `{name}` | {instantiation_count} | {size_estimate} | {total_estimate} |" - )?; - } - } - } - } - - Ok(()) -} - -fn codegened_and_inlined_items(tcx: TyCtxt<'_>, (): ()) -> &DefIdSet { - let (items, cgus) = tcx.collect_and_partition_mono_items(()); - let mut visited = DefIdSet::default(); - let mut result = items.clone(); - - for cgu in cgus { - for (item, _) in cgu.items() { - if let MonoItem::Fn(ref instance) = item { - let did = instance.def_id(); - if !visited.insert(did) { - continue; - } - let body = tcx.instance_mir(instance.def); - for block in body.basic_blocks.iter() { - for statement in &block.statements { - let mir::StatementKind::Coverage(_) = statement.kind else { continue }; - let scope = statement.source_info.scope; - if let Some(inlined) = scope.inlined_instance(&body.source_scopes) { - result.insert(inlined.def_id()); - } - } - } - } - } - } - - tcx.arena.alloc(result) -} - -pub fn provide(providers: &mut Providers) { - providers.collect_and_partition_mono_items = collect_and_partition_mono_items; - providers.codegened_and_inlined_items = codegened_and_inlined_items; - - providers.is_codegened_item = |tcx, def_id| { - let (all_mono_items, _) = tcx.collect_and_partition_mono_items(()); - all_mono_items.contains(&def_id) - }; - - providers.codegen_unit = |tcx, name| { - let (_, all) = tcx.collect_and_partition_mono_items(()); - all.iter() - .find(|cgu| cgu.name() == name) - .unwrap_or_else(|| panic!("failed to find cgu with name {name:?}")) - }; -} diff --git a/compiler/rustc_monomorphize/src/util.rs b/compiler/rustc_monomorphize/src/util.rs index d12bfc6f6bb..f6a80b0431f 100644 --- a/compiler/rustc_monomorphize/src/util.rs +++ b/compiler/rustc_monomorphize/src/util.rs @@ -29,12 +29,12 @@ pub(crate) fn dump_closure_profile<'tcx>(tcx: TyCtxt<'tcx>, closure_instance: In let before_feature_tys = tcx.subst_and_normalize_erasing_regions( closure_instance.substs, param_env, - ty::EarlyBinder(before_feature_tys), + ty::EarlyBinder::bind(before_feature_tys), ); let after_feature_tys = tcx.subst_and_normalize_erasing_regions( closure_instance.substs, param_env, - ty::EarlyBinder(after_feature_tys), + ty::EarlyBinder::bind(after_feature_tys), ); let new_size = tcx diff --git a/compiler/rustc_parse/src/parser/generics.rs b/compiler/rustc_parse/src/parser/generics.rs index cd779b0b43e..8ab38c4fb8b 100644 --- a/compiler/rustc_parse/src/parser/generics.rs +++ b/compiler/rustc_parse/src/parser/generics.rs @@ -43,6 +43,15 @@ impl<'a> Parser<'a> { fn parse_ty_param(&mut self, preceding_attrs: AttrVec) -> PResult<'a, GenericParam> { let ident = self.parse_ident()?; + // We might have a typo'd `Const` that was parsed as a type parameter. + if self.may_recover() + && ident.name.as_str().to_ascii_lowercase() == kw::Const.as_str() + && self.check_ident() + // `Const` followed by IDENT + { + return Ok(self.recover_const_param_with_mistyped_const(preceding_attrs, ident)?); + } + // Parse optional colon and param bounds. let mut colon_span = None; let bounds = if self.eat(&token::Colon) { @@ -120,6 +129,41 @@ impl<'a> Parser<'a> { }) } + pub(crate) fn recover_const_param_with_mistyped_const( + &mut self, + preceding_attrs: AttrVec, + mistyped_const_ident: Ident, + ) -> PResult<'a, GenericParam> { + let ident = self.parse_ident()?; + self.expect(&token::Colon)?; + let ty = self.parse_ty()?; + + // Parse optional const generics default value. + let default = if self.eat(&token::Eq) { Some(self.parse_const_arg()?) } else { None }; + + let mut err = self.struct_span_err( + mistyped_const_ident.span, + format!("`const` keyword was mistyped as `{}`", mistyped_const_ident.as_str()), + ); + err.span_suggestion_verbose( + mistyped_const_ident.span, + "use the `const` keyword", + kw::Const.as_str(), + Applicability::MachineApplicable, + ); + err.emit(); + + Ok(GenericParam { + ident, + id: ast::DUMMY_NODE_ID, + attrs: preceding_attrs, + bounds: Vec::new(), + kind: GenericParamKind::Const { ty, kw_span: mistyped_const_ident.span, default }, + is_placeholder: false, + colon_span: None, + }) + } + /// Parses a (possibly empty) list of lifetime and type parameters, possibly including /// a trailing comma and erroneous trailing attributes. pub(super) fn parse_generic_params(&mut self) -> PResult<'a, ThinVec<ast::GenericParam>> { diff --git a/compiler/rustc_parse/src/parser/mod.rs b/compiler/rustc_parse/src/parser/mod.rs index 0c265d7af0e..c23420661fa 100644 --- a/compiler/rustc_parse/src/parser/mod.rs +++ b/compiler/rustc_parse/src/parser/mod.rs @@ -536,7 +536,9 @@ impl<'a> Parser<'a> { } else if inedible.contains(&self.token.kind) { // leave it in the input Ok(false) - } else if self.last_unexpected_token_span == Some(self.token.span) { + } else if self.token.kind != token::Eof + && self.last_unexpected_token_span == Some(self.token.span) + { FatalError.raise(); } else { self.expected_one_of_not_found(edible, inedible) diff --git a/compiler/rustc_parse/src/parser/stmt.rs b/compiler/rustc_parse/src/parser/stmt.rs index 54f9fc5d2b9..9fcf51a04ec 100644 --- a/compiler/rustc_parse/src/parser/stmt.rs +++ b/compiler/rustc_parse/src/parser/stmt.rs @@ -23,6 +23,7 @@ use rustc_errors::{Applicability, DiagnosticBuilder, ErrorGuaranteed, PResult}; use rustc_span::source_map::{BytePos, Span}; use rustc_span::symbol::{kw, sym, Ident}; +use std::borrow::Cow; use std::mem; use thin_vec::{thin_vec, ThinVec}; @@ -364,7 +365,7 @@ impl<'a> Parser<'a> { // `let...else if`. Emit the same error that `parse_block()` would, // but explicitly point out that this pattern is not allowed. let msg = "conditional `else if` is not supported for `let...else`"; - return Err(self.error_block_no_opening_brace_msg(msg)); + return Err(self.error_block_no_opening_brace_msg(Cow::from(msg))); } let els = self.parse_block()?; self.check_let_else_init_bool_expr(&init); @@ -438,7 +439,7 @@ impl<'a> Parser<'a> { fn error_block_no_opening_brace_msg( &mut self, - msg: &str, + msg: Cow<'static, str>, ) -> DiagnosticBuilder<'a, ErrorGuaranteed> { let sp = self.token.span; let mut e = self.struct_span_err(sp, msg); @@ -502,7 +503,7 @@ impl<'a> Parser<'a> { fn error_block_no_opening_brace<T>(&mut self) -> PResult<'a, T> { let tok = super::token_descr(&self.token); let msg = format!("expected `{{`, found {}", tok); - Err(self.error_block_no_opening_brace_msg(&msg)) + Err(self.error_block_no_opening_brace_msg(Cow::from(msg))) } /// Parses a block. Inner attributes are allowed. diff --git a/compiler/rustc_query_impl/Cargo.toml b/compiler/rustc_query_impl/Cargo.toml index e596993465c..c9353b6fc9f 100644 --- a/compiler/rustc_query_impl/Cargo.toml +++ b/compiler/rustc_query_impl/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" [dependencies] -memoffset = { version = "0.6.0", features = ["unstable_const"] } +memoffset = { version = "0.8.0", features = ["unstable_const"] } field-offset = "0.3.5" measureme = "10.0.0" rustc_ast = { path = "../rustc_ast" } diff --git a/compiler/rustc_resolve/src/diagnostics.rs b/compiler/rustc_resolve/src/diagnostics.rs index 8c6ac822a77..377652ce71b 100644 --- a/compiler/rustc_resolve/src/diagnostics.rs +++ b/compiler/rustc_resolve/src/diagnostics.rs @@ -2540,7 +2540,7 @@ fn show_candidates( err.note(msg); } if let Some(note) = (*note).as_deref() { - err.note(note); + err.note(note.to_string()); } } else { let (_, descr_first, _, _) = &inaccessible_path_strings[0]; diff --git a/compiler/rustc_resolve/src/imports.rs b/compiler/rustc_resolve/src/imports.rs index 7c4c05d4b94..c1bb262c0d4 100644 --- a/compiler/rustc_resolve/src/imports.rs +++ b/compiler/rustc_resolve/src/imports.rs @@ -21,7 +21,8 @@ use rustc_middle::metadata::Reexport; use rustc_middle::span_bug; use rustc_middle::ty; use rustc_session::lint::builtin::{ - AMBIGUOUS_GLOB_REEXPORTS, PUB_USE_OF_PRIVATE_EXTERN_CRATE, UNUSED_IMPORTS, + AMBIGUOUS_GLOB_REEXPORTS, HIDDEN_GLOB_REEXPORTS, PUB_USE_OF_PRIVATE_EXTERN_CRATE, + UNUSED_IMPORTS, }; use rustc_session::lint::BuiltinLintDiagnostics; use rustc_span::edit_distance::find_best_match_for_name; @@ -526,31 +527,71 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { } } - pub(crate) fn check_reexport_ambiguities( + pub(crate) fn check_hidden_glob_reexports( &mut self, exported_ambiguities: FxHashSet<Interned<'a, NameBinding<'a>>>, ) { for module in self.arenas.local_modules().iter() { - module.for_each_child(self, |this, ident, ns, binding| { - if let NameBindingKind::Import { import, .. } = binding.kind - && let Some((amb_binding, _)) = binding.ambiguity - && binding.res() != Res::Err - && exported_ambiguities.contains(&Interned::new_unchecked(binding)) - { - this.lint_buffer.buffer_lint_with_diagnostic( - AMBIGUOUS_GLOB_REEXPORTS, - import.root_id, - import.root_span, - "ambiguous glob re-exports", - BuiltinLintDiagnostics::AmbiguousGlobReexports { - name: ident.to_string(), - namespace: ns.descr().to_string(), - first_reexport_span: import.root_span, - duplicate_reexport_span: amb_binding.span, - }, - ); + for (key, resolution) in self.resolutions(module).borrow().iter() { + let resolution = resolution.borrow(); + + if let Some(binding) = resolution.binding { + if let NameBindingKind::Import { import, .. } = binding.kind + && let Some((amb_binding, _)) = binding.ambiguity + && binding.res() != Res::Err + && exported_ambiguities.contains(&Interned::new_unchecked(binding)) + { + self.lint_buffer.buffer_lint_with_diagnostic( + AMBIGUOUS_GLOB_REEXPORTS, + import.root_id, + import.root_span, + "ambiguous glob re-exports", + BuiltinLintDiagnostics::AmbiguousGlobReexports { + name: key.ident.to_string(), + namespace: key.ns.descr().to_string(), + first_reexport_span: import.root_span, + duplicate_reexport_span: amb_binding.span, + }, + ); + } + + if let Some(glob_binding) = resolution.shadowed_glob { + let binding_id = match binding.kind { + NameBindingKind::Res(res) => { + Some(self.def_id_to_node_id[res.def_id().expect_local()]) + } + NameBindingKind::Module(module) => { + Some(self.def_id_to_node_id[module.def_id().expect_local()]) + } + NameBindingKind::Import { import, .. } => import.id(), + }; + + if binding.res() != Res::Err + && glob_binding.res() != Res::Err + && let NameBindingKind::Import { import: glob_import, .. } = glob_binding.kind + && let Some(binding_id) = binding_id + && let Some(glob_import_id) = glob_import.id() + && let glob_import_def_id = self.local_def_id(glob_import_id) + && self.effective_visibilities.is_exported(glob_import_def_id) + && glob_binding.vis.is_public() + && !binding.vis.is_public() + { + self.lint_buffer.buffer_lint_with_diagnostic( + HIDDEN_GLOB_REEXPORTS, + binding_id, + binding.span, + "private item shadows public glob re-export", + BuiltinLintDiagnostics::HiddenGlobReexports { + name: key.ident.name.to_string(), + namespace: key.ns.descr().to_owned(), + glob_reexport_span: glob_binding.span, + private_item_span: binding.span, + }, + ); + } + } } - }); + } } } diff --git a/compiler/rustc_resolve/src/late/diagnostics.rs b/compiler/rustc_resolve/src/late/diagnostics.rs index df65825802e..f79f8d0c6ca 100644 --- a/compiler/rustc_resolve/src/late/diagnostics.rs +++ b/compiler/rustc_resolve/src/late/diagnostics.rs @@ -29,6 +29,7 @@ use rustc_span::hygiene::MacroKind; use rustc_span::symbol::{kw, sym, Ident, Symbol}; use rustc_span::Span; +use std::borrow::Cow; use std::iter; use std::ops::Deref; @@ -1248,7 +1249,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> { }), ) if followed_by_brace => { if let Some(sp) = closing_brace { - err.span_label(span, fallback_label); + err.span_label(span, fallback_label.to_string()); err.multipart_suggestion( "surround the struct literal with parentheses", vec![ @@ -1320,7 +1321,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> { ); } _ => { - err.span_label(span, fallback_label); + err.span_label(span, fallback_label.to_string()); } } }; @@ -1333,7 +1334,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> { })) | PathSource::Struct, ) => { - err.span_label(span, fallback_label); + err.span_label(span, fallback_label.to_string()); err.span_suggestion_verbose( span.shrink_to_hi(), "use `!` to invoke the macro", @@ -1345,7 +1346,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> { } } (Res::Def(DefKind::Macro(MacroKind::Bang), _), _) => { - err.span_label(span, fallback_label); + err.span_label(span, fallback_label.to_string()); } (Res::Def(DefKind::TyAlias, def_id), PathSource::Trait(_)) => { err.span_label(span, "type aliases cannot be used as traits"); @@ -1513,7 +1514,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> { ); } (Res::SelfTyParam { .. } | Res::SelfTyAlias { .. }, _) if ns == ValueNS => { - err.span_label(span, fallback_label); + err.span_label(span, fallback_label.to_string()); err.note("can't use `Self` as a constructor, you must use the implemented struct"); } (Res::Def(DefKind::TyAlias | DefKind::AssocTy, _), _) if ns == ValueNS => { @@ -2243,7 +2244,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> { &self, err: &mut Diagnostic, name: Option<&str>, - suggest: impl Fn(&mut Diagnostic, bool, Span, &str, String) -> bool, + suggest: impl Fn(&mut Diagnostic, bool, Span, Cow<'static, str>, String) -> bool, ) { let mut suggest_note = true; for rib in self.lifetime_ribs.iter().rev() { @@ -2288,22 +2289,23 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> { (span, sugg) }; if higher_ranked { - let message = format!( + let message = Cow::from(format!( "consider making the {} lifetime-generic with a new `{}` lifetime", kind.descr(), name.unwrap_or("'a"), - ); - should_continue = suggest(err, true, span, &message, sugg); + )); + should_continue = suggest(err, true, span, message, sugg); err.note_once( "for more information on higher-ranked polymorphism, visit \ https://doc.rust-lang.org/nomicon/hrtb.html", ); } else if let Some(name) = name { - let message = format!("consider introducing lifetime `{}` here", name); - should_continue = suggest(err, false, span, &message, sugg); + let message = + Cow::from(format!("consider introducing lifetime `{}` here", name)); + should_continue = suggest(err, false, span, message, sugg); } else { - let message = "consider introducing a named lifetime parameter"; - should_continue = suggest(err, false, span, &message, sugg); + let message = Cow::from("consider introducing a named lifetime parameter"); + should_continue = suggest(err, false, span, message, sugg); } } LifetimeRibKind::Item => break, diff --git a/compiler/rustc_resolve/src/lib.rs b/compiler/rustc_resolve/src/lib.rs index 3d2bd842906..fd977e8e254 100644 --- a/compiler/rustc_resolve/src/lib.rs +++ b/compiler/rustc_resolve/src/lib.rs @@ -1496,8 +1496,8 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { let exported_ambiguities = self.tcx.sess.time("compute_effective_visibilities", || { EffectiveVisibilitiesVisitor::compute_effective_visibilities(self, krate) }); - self.tcx.sess.time("check_reexport_ambiguities", || { - self.check_reexport_ambiguities(exported_ambiguities) + self.tcx.sess.time("check_hidden_glob_reexports", || { + self.check_hidden_glob_reexports(exported_ambiguities) }); self.tcx.sess.time("finalize_macro_resolutions", || self.finalize_macro_resolutions()); self.tcx.sess.time("late_resolve_crate", || self.late_resolve_crate(krate)); diff --git a/compiler/rustc_resolve/src/macros.rs b/compiler/rustc_resolve/src/macros.rs index df5c16a9375..d8a7bcbfff9 100644 --- a/compiler/rustc_resolve/src/macros.rs +++ b/compiler/rustc_resolve/src/macros.rs @@ -827,7 +827,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { if !is_allowed(feature) && !allowed_by_implication { let lint_buffer = &mut self.lint_buffer; let soft_handler = - |lint, span, msg: &_| lint_buffer.buffer_lint(lint, node_id, span, msg); + |lint, span, msg: String| lint_buffer.buffer_lint(lint, node_id, span, msg); stability::report_unstable( self.tcx.sess, feature, @@ -846,7 +846,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { let (message, lint) = stability::deprecation_message_and_lint(depr, "macro", &path); stability::early_report_deprecation( &mut self.lint_buffer, - &message, + message, depr.suggestion, lint, span, diff --git a/compiler/rustc_session/messages.ftl b/compiler/rustc_session/messages.ftl index 5a0b8f9f73c..4897bd8d5da 100644 --- a/compiler/rustc_session/messages.ftl +++ b/compiler/rustc_session/messages.ftl @@ -27,6 +27,10 @@ session_feature_gate_error = {$explain} session_file_is_not_writeable = output file {$file} is not writeable -- check its permissions session_hexadecimal_float_literal_not_supported = hexadecimal float literal is not supported + +session_incompatible_linker_flavor = linker flavor `{$flavor}` is incompatible with the current target + .note = compatible flavors are: {$compatible_list} + session_incorrect_cgu_reuse_type = CGU-reuse for `{$cgu_user_name}` is `{$actual_reuse}` but should be {$at_least -> [one] {"at least "} diff --git a/compiler/rustc_session/src/config.rs b/compiler/rustc_session/src/config.rs index 6c8c8e484f9..0ce83e79097 100644 --- a/compiler/rustc_session/src/config.rs +++ b/compiler/rustc_session/src/config.rs @@ -12,7 +12,7 @@ use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::stable_hasher::{StableOrd, ToStableHashKey}; use rustc_target::abi::Align; -use rustc_target::spec::{PanicStrategy, SanitizerSet, SplitDebuginfo}; +use rustc_target::spec::{LinkerFlavorCli, PanicStrategy, SanitizerSet, SplitDebuginfo}; use rustc_target::spec::{Target, TargetTriple, TargetWarnings, TARGETS}; use crate::parse::{CrateCheckConfig, CrateConfig}; @@ -2525,6 +2525,19 @@ pub fn build_session_options(matches: &getopts::Matches) -> Options { } } + if let Some(flavor) = cg.linker_flavor { + if matches!(flavor, LinkerFlavorCli::BpfLinker | LinkerFlavorCli::PtxLinker) + && !nightly_options::is_unstable_enabled(matches) + { + let msg = format!( + "linker flavor `{}` is unstable, `-Z unstable-options` \ + flag must also be passed to explicitly use it", + flavor.desc() + ); + early_error(error_format, msg); + } + } + let prints = collect_print_requests(&mut cg, &mut unstable_opts, matches, error_format); let cg = cg; diff --git a/compiler/rustc_session/src/errors.rs b/compiler/rustc_session/src/errors.rs index 546c0fa8e03..4a3e668da11 100644 --- a/compiler/rustc_session/src/errors.rs +++ b/compiler/rustc_session/src/errors.rs @@ -422,3 +422,11 @@ pub fn report_lit_error(sess: &ParseSess, err: LitError, lit: token::Lit, span: pub struct OptimisationFuelExhausted { pub msg: String, } + +#[derive(Diagnostic)] +#[diag(session_incompatible_linker_flavor)] +#[note] +pub struct IncompatibleLinkerFlavor { + pub flavor: &'static str, + pub compatible_list: String, +} diff --git a/compiler/rustc_session/src/options.rs b/compiler/rustc_session/src/options.rs index 2c4c4a7a6ce..7cc2b2c880c 100644 --- a/compiler/rustc_session/src/options.rs +++ b/compiler/rustc_session/src/options.rs @@ -372,7 +372,7 @@ mod desc { pub const parse_opt_panic_strategy: &str = parse_panic_strategy; pub const parse_oom_strategy: &str = "either `panic` or `abort`"; pub const parse_relro_level: &str = "one of: `full`, `partial`, or `off`"; - pub const parse_sanitizers: &str = "comma separated list of sanitizers: `address`, `cfi`, `hwaddress`, `kcfi`, `kernel-address`, `leak`, `memory`, `memtag`, `shadow-call-stack`, or `thread`"; + pub const parse_sanitizers: &str = "comma separated list of sanitizers: `address`, `cfi`, `hwaddress`, `kcfi`, `kernel-address`, `leak`, `memory`, `memtag`, `safestack`, `shadow-call-stack`, or `thread`"; pub const parse_sanitizer_memory_track_origins: &str = "0, 1, or 2"; pub const parse_cfguard: &str = "either a boolean (`yes`, `no`, `on`, `off`, etc), `checks`, or `nochecks`"; @@ -694,6 +694,7 @@ mod parse { "shadow-call-stack" => SanitizerSet::SHADOWCALLSTACK, "thread" => SanitizerSet::THREAD, "hwaddress" => SanitizerSet::HWADDRESS, + "safestack" => SanitizerSet::SAFESTACK, _ => return false, } } @@ -1371,8 +1372,6 @@ options! { "set options for branch target identification and pointer authentication on AArch64"), cf_protection: CFProtection = (CFProtection::None, parse_cfprotection, [TRACKED], "instrument control-flow architecture protection"), - cgu_partitioning_strategy: Option<String> = (None, parse_opt_string, [TRACKED], - "the codegen unit partitioning strategy to use"), codegen_backend: Option<String> = (None, parse_opt_string, [TRACKED], "the backend to use"), combine_cgu: bool = (false, parse_bool, [TRACKED], diff --git a/compiler/rustc_session/src/parse.rs b/compiler/rustc_session/src/parse.rs index 7b396dde91b..a433e2371c9 100644 --- a/compiler/rustc_session/src/parse.rs +++ b/compiler/rustc_session/src/parse.rs @@ -84,6 +84,7 @@ impl SymbolGallery { /// Construct a diagnostic for a language feature error due to the given `span`. /// The `feature`'s `Symbol` is the one you used in `active.rs` and `rustc_span::symbols`. +#[track_caller] pub fn feature_err( sess: &ParseSess, feature: Symbol, @@ -123,7 +124,7 @@ pub fn feature_err_issue( /// Construct a future incompatibility diagnostic for a feature gate. /// /// This diagnostic is only a warning and *does not cause compilation to fail*. -pub fn feature_warn(sess: &ParseSess, feature: Symbol, span: Span, explain: &str) { +pub fn feature_warn(sess: &ParseSess, feature: Symbol, span: Span, explain: &'static str) { feature_warn_issue(sess, feature, span, GateIssue::Language, explain); } @@ -140,7 +141,7 @@ pub fn feature_warn_issue( feature: Symbol, span: Span, issue: GateIssue, - explain: &str, + explain: &'static str, ) { let mut err = sess.span_diagnostic.struct_span_warn(span, explain); add_feature_diagnostics_for_issue(&mut err, sess, feature, issue); diff --git a/compiler/rustc_session/src/session.rs b/compiler/rustc_session/src/session.rs index bbe52dbced0..1eb54cee5a1 100644 --- a/compiler/rustc_session/src/session.rs +++ b/compiler/rustc_session/src/session.rs @@ -1675,6 +1675,13 @@ fn validate_commandline_args_with_session_available(sess: &Session) { if sess.opts.unstable_opts.instrument_xray.is_some() && !sess.target.options.supports_xray { sess.emit_err(errors::InstrumentationNotSupported { us: "XRay".to_string() }); } + + if let Some(flavor) = sess.opts.cg.linker_flavor { + if let Some(compatible_list) = sess.target.linker_flavor.check_compatibility(flavor) { + let flavor = flavor.desc(); + sess.emit_err(errors::IncompatibleLinkerFlavor { flavor, compatible_list }); + } + } } /// Holds data on the current incremental compilation session, if there is one. diff --git a/compiler/rustc_smir/src/rustc_smir/mod.rs b/compiler/rustc_smir/src/rustc_smir/mod.rs index 6af43f5d3f3..5572108f495 100644 --- a/compiler/rustc_smir/src/rustc_smir/mod.rs +++ b/compiler/rustc_smir/src/rustc_smir/mod.rs @@ -309,7 +309,7 @@ fn rustc_terminator_to_terminator( Terminate => Terminator::Abort, Return => Terminator::Return, Unreachable => Terminator::Unreachable, - Drop { place, target, unwind } => Terminator::Drop { + Drop { place, target, unwind, replace: _ } => Terminator::Drop { place: rustc_place_to_place(place), target: target.as_usize(), unwind: rustc_unwind_to_unwind(unwind), diff --git a/compiler/rustc_span/src/hygiene.rs b/compiler/rustc_span/src/hygiene.rs index 6755657c727..0c7e36b3bef 100644 --- a/compiler/rustc_span/src/hygiene.rs +++ b/compiler/rustc_span/src/hygiene.rs @@ -1147,7 +1147,6 @@ pub enum DesugaringKind { Await, ForLoop, WhileLoop, - Replace, } impl DesugaringKind { @@ -1163,7 +1162,6 @@ impl DesugaringKind { DesugaringKind::OpaqueTy => "`impl Trait`", DesugaringKind::ForLoop => "`for` loop", DesugaringKind::WhileLoop => "`while` loop", - DesugaringKind::Replace => "drop and replace", } } } @@ -1290,7 +1288,7 @@ pub fn decode_expn_id( decode_data: impl FnOnce(ExpnId) -> (ExpnData, ExpnHash), ) -> ExpnId { if index == 0 { - debug!("decode_expn_id: deserialized root"); + trace!("decode_expn_id: deserialized root"); return ExpnId::root(); } @@ -1323,7 +1321,7 @@ pub fn decode_syntax_context<D: Decoder, F: FnOnce(&mut D, u32) -> SyntaxContext ) -> SyntaxContext { let raw_id: u32 = Decodable::decode(d); if raw_id == 0 { - debug!("decode_syntax_context: deserialized root"); + trace!("decode_syntax_context: deserialized root"); // The root is special return SyntaxContext::root(); } diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs index 874d578fe1d..2f002e42427 100644 --- a/compiler/rustc_span/src/symbol.rs +++ b/compiler/rustc_span/src/symbol.rs @@ -1146,6 +1146,8 @@ symbols! { profiler_builtins, profiler_runtime, ptr, + ptr_cast_mut, + ptr_from_ref, ptr_guaranteed_cmp, ptr_mask, ptr_null, @@ -1454,6 +1456,10 @@ symbols! { stop_after_dataflow, store, str, + str_from_utf8, + str_from_utf8_mut, + str_from_utf8_unchecked, + str_from_utf8_unchecked_mut, str_split_whitespace, str_trim, str_trim_end, diff --git a/compiler/rustc_symbol_mangling/src/typeid/typeid_itanium_cxx_abi.rs b/compiler/rustc_symbol_mangling/src/typeid/typeid_itanium_cxx_abi.rs index 9fa49123a86..b245742e533 100644 --- a/compiler/rustc_symbol_mangling/src/typeid/typeid_itanium_cxx_abi.rs +++ b/compiler/rustc_symbol_mangling/src/typeid/typeid_itanium_cxx_abi.rs @@ -10,7 +10,6 @@ use core::fmt::Display; use rustc_data_structures::base_n; use rustc_data_structures::fx::FxHashMap; -use rustc_errors::DiagnosticMessage; use rustc_hir as hir; use rustc_middle::ty::subst::{GenericArg, GenericArgKind, SubstsRef}; use rustc_middle::ty::{ @@ -534,10 +533,7 @@ fn encode_ty<'tcx>( tcx.sess .struct_span_err( cfi_encoding.span, - DiagnosticMessage::Str(format!( - "invalid `cfi_encoding` for `{:?}`", - ty.kind() - )), + format!("invalid `cfi_encoding` for `{:?}`", ty.kind()), ) .emit(); } @@ -589,10 +585,7 @@ fn encode_ty<'tcx>( tcx.sess .struct_span_err( cfi_encoding.span, - DiagnosticMessage::Str(format!( - "invalid `cfi_encoding` for `{:?}`", - ty.kind() - )), + format!("invalid `cfi_encoding` for `{:?}`", ty.kind()), ) .emit(); } diff --git a/compiler/rustc_symbol_mangling/src/v0.rs b/compiler/rustc_symbol_mangling/src/v0.rs index 4cccc639892..0a805e2422d 100644 --- a/compiler/rustc_symbol_mangling/src/v0.rs +++ b/compiler/rustc_symbol_mangling/src/v0.rs @@ -274,7 +274,7 @@ impl<'tcx> Printer<'tcx> for &mut SymbolMangler<'tcx> { let mut param_env = self.tcx.param_env_reveal_all_normalized(impl_def_id); if !substs.is_empty() { - param_env = EarlyBinder(param_env).subst(self.tcx, substs); + param_env = EarlyBinder::bind(param_env).subst(self.tcx, substs); } match &mut impl_trait_ref { diff --git a/compiler/rustc_target/src/lib.rs b/compiler/rustc_target/src/lib.rs index dc2cc23ffb1..a7b54766bc6 100644 --- a/compiler/rustc_target/src/lib.rs +++ b/compiler/rustc_target/src/lib.rs @@ -11,6 +11,7 @@ #![feature(assert_matches)] #![feature(associated_type_bounds)] #![feature(exhaustive_patterns)] +#![feature(iter_intersperse)] #![feature(min_specialization)] #![feature(never_type)] #![feature(rustc_attrs)] diff --git a/compiler/rustc_target/src/spec/mips64_unknown_linux_gnuabi64.rs b/compiler/rustc_target/src/spec/mips64_unknown_linux_gnuabi64.rs index fc5dbd114e4..b9df0046b12 100644 --- a/compiler/rustc_target/src/spec/mips64_unknown_linux_gnuabi64.rs +++ b/compiler/rustc_target/src/spec/mips64_unknown_linux_gnuabi64.rs @@ -12,7 +12,7 @@ pub fn target() -> Target { endian: Endian::Big, // NOTE(mips64r2) matches C toolchain cpu: "mips64r2".into(), - features: "+mips64r2".into(), + features: "+mips64r2,+xgot".into(), max_atomic_width: Some(64), mcount: "_mcount".into(), diff --git a/compiler/rustc_target/src/spec/mips64el_unknown_linux_gnuabi64.rs b/compiler/rustc_target/src/spec/mips64el_unknown_linux_gnuabi64.rs index e0d5f6f57f1..57ad8c47399 100644 --- a/compiler/rustc_target/src/spec/mips64el_unknown_linux_gnuabi64.rs +++ b/compiler/rustc_target/src/spec/mips64el_unknown_linux_gnuabi64.rs @@ -10,7 +10,7 @@ pub fn target() -> Target { abi: "abi64".into(), // NOTE(mips64r2) matches C toolchain cpu: "mips64r2".into(), - features: "+mips64r2".into(), + features: "+mips64r2,+xgot".into(), max_atomic_width: Some(64), mcount: "_mcount".into(), diff --git a/compiler/rustc_target/src/spec/mod.rs b/compiler/rustc_target/src/spec/mod.rs index ba4b89c9ea1..05cb7e87a93 100644 --- a/compiler/rustc_target/src/spec/mod.rs +++ b/compiler/rustc_target/src/spec/mod.rs @@ -205,15 +205,11 @@ impl ToJson for LldFlavor { } impl LinkerFlavor { - pub fn from_cli(cli: LinkerFlavorCli, target: &TargetOptions) -> LinkerFlavor { - Self::from_cli_impl(cli, target.linker_flavor.lld_flavor(), target.linker_flavor.is_gnu()) - } - - /// The passed CLI flavor is preferred over other args coming from the default target spec, - /// so this function can produce a flavor that is incompatible with the current target. - /// FIXME: Produce errors when `-Clinker-flavor` is set to something incompatible - /// with the current target. - fn from_cli_impl(cli: LinkerFlavorCli, lld_flavor: LldFlavor, is_gnu: bool) -> LinkerFlavor { + /// At this point the target's reference linker flavor doesn't yet exist and we need to infer + /// it. The inference always succeds and gives some result, and we don't report any flavor + /// incompatibility errors for json target specs. The CLI flavor is used as the main source + /// of truth, other flags are used in case of ambiguities. + fn from_cli_json(cli: LinkerFlavorCli, lld_flavor: LldFlavor, is_gnu: bool) -> LinkerFlavor { match cli { LinkerFlavorCli::Gcc => match lld_flavor { LldFlavor::Ld if is_gnu => LinkerFlavor::Gnu(Cc::Yes, Lld::No), @@ -257,6 +253,85 @@ impl LinkerFlavor { } } + fn infer_cli_hints(cli: LinkerFlavorCli) -> (Option<Cc>, Option<Lld>) { + match cli { + LinkerFlavorCli::Gcc | LinkerFlavorCli::Em => (Some(Cc::Yes), None), + LinkerFlavorCli::Lld(_) => (Some(Cc::No), Some(Lld::Yes)), + LinkerFlavorCli::Ld | LinkerFlavorCli::Msvc => (Some(Cc::No), Some(Lld::No)), + LinkerFlavorCli::BpfLinker | LinkerFlavorCli::PtxLinker => (None, None), + } + } + + fn infer_linker_hints(linker_stem: &str) -> (Option<Cc>, Option<Lld>) { + // Remove any version postfix. + let stem = linker_stem + .rsplit_once('-') + .and_then(|(lhs, rhs)| rhs.chars().all(char::is_numeric).then_some(lhs)) + .unwrap_or(linker_stem); + + // GCC/Clang can have an optional target prefix. + if stem == "emcc" + || stem == "gcc" + || stem.ends_with("-gcc") + || stem == "g++" + || stem.ends_with("-g++") + || stem == "clang" + || stem.ends_with("-clang") + || stem == "clang++" + || stem.ends_with("-clang++") + { + (Some(Cc::Yes), None) + } else if stem == "wasm-ld" + || stem.ends_with("-wasm-ld") + || stem == "ld.lld" + || stem == "lld" + || stem == "rust-lld" + || stem == "lld-link" + { + (Some(Cc::No), Some(Lld::Yes)) + } else if stem == "ld" || stem.ends_with("-ld") || stem == "link" { + (Some(Cc::No), Some(Lld::No)) + } else { + (None, None) + } + } + + fn with_hints(self, (cc_hint, lld_hint): (Option<Cc>, Option<Lld>)) -> LinkerFlavor { + match self { + LinkerFlavor::Gnu(cc, lld) => { + LinkerFlavor::Gnu(cc_hint.unwrap_or(cc), lld_hint.unwrap_or(lld)) + } + LinkerFlavor::Darwin(cc, lld) => { + LinkerFlavor::Darwin(cc_hint.unwrap_or(cc), lld_hint.unwrap_or(lld)) + } + LinkerFlavor::WasmLld(cc) => LinkerFlavor::WasmLld(cc_hint.unwrap_or(cc)), + LinkerFlavor::Unix(cc) => LinkerFlavor::Unix(cc_hint.unwrap_or(cc)), + LinkerFlavor::Msvc(lld) => LinkerFlavor::Msvc(lld_hint.unwrap_or(lld)), + LinkerFlavor::EmCc | LinkerFlavor::Bpf | LinkerFlavor::Ptx => self, + } + } + + pub fn with_cli_hints(self, cli: LinkerFlavorCli) -> LinkerFlavor { + self.with_hints(LinkerFlavor::infer_cli_hints(cli)) + } + + pub fn with_linker_hints(self, linker_stem: &str) -> LinkerFlavor { + self.with_hints(LinkerFlavor::infer_linker_hints(linker_stem)) + } + + pub fn check_compatibility(self, cli: LinkerFlavorCli) -> Option<String> { + // The CLI flavor should be compatible with the target if it survives this roundtrip. + let compatible = |cli| cli == self.with_cli_hints(cli).to_cli(); + (!compatible(cli)).then(|| { + LinkerFlavorCli::all() + .iter() + .filter(|cli| compatible(**cli)) + .map(|cli| cli.desc()) + .intersperse(", ") + .collect() + }) + } + pub fn lld_flavor(self) -> LldFlavor { match self { LinkerFlavor::Gnu(..) @@ -278,6 +353,10 @@ impl LinkerFlavor { macro_rules! linker_flavor_cli_impls { ($(($($flavor:tt)*) $string:literal)*) => ( impl LinkerFlavorCli { + const fn all() -> &'static [LinkerFlavorCli] { + &[$($($flavor)*,)*] + } + pub const fn one_of() -> &'static str { concat!("one of: ", $($string, " ",)*) } @@ -289,8 +368,8 @@ macro_rules! linker_flavor_cli_impls { }) } - pub fn desc(&self) -> &str { - match *self { + pub fn desc(self) -> &'static str { + match self { $($($flavor)* => $string,)* } } @@ -815,6 +894,7 @@ bitflags::bitflags! { const SHADOWCALLSTACK = 1 << 7; const KCFI = 1 << 8; const KERNELADDRESS = 1 << 9; + const SAFESTACK = 1 << 10; } } @@ -831,6 +911,7 @@ impl SanitizerSet { SanitizerSet::LEAK => "leak", SanitizerSet::MEMORY => "memory", SanitizerSet::MEMTAG => "memtag", + SanitizerSet::SAFESTACK => "safestack", SanitizerSet::SHADOWCALLSTACK => "shadow-call-stack", SanitizerSet::THREAD => "thread", SanitizerSet::HWADDRESS => "hwaddress", @@ -871,6 +952,7 @@ impl IntoIterator for SanitizerSet { SanitizerSet::THREAD, SanitizerSet::HWADDRESS, SanitizerSet::KERNELADDRESS, + SanitizerSet::SAFESTACK, ] .iter() .copied() @@ -1798,7 +1880,7 @@ impl TargetOptions { } fn update_from_cli(&mut self) { - self.linker_flavor = LinkerFlavor::from_cli_impl( + self.linker_flavor = LinkerFlavor::from_cli_json( self.linker_flavor_json, self.lld_flavor_json, self.linker_is_gnu_json, @@ -1812,12 +1894,7 @@ impl TargetOptions { ] { args.clear(); for (flavor, args_json) in args_json { - // Cannot use `from_cli` due to borrow checker. - let linker_flavor = LinkerFlavor::from_cli_impl( - *flavor, - self.lld_flavor_json, - self.linker_is_gnu_json, - ); + let linker_flavor = self.linker_flavor.with_cli_hints(*flavor); // Normalize to no lld to avoid asserts. let linker_flavor = match linker_flavor { LinkerFlavor::Gnu(cc, _) => LinkerFlavor::Gnu(cc, Lld::No), @@ -2364,6 +2441,7 @@ impl Target { Some("leak") => SanitizerSet::LEAK, Some("memory") => SanitizerSet::MEMORY, Some("memtag") => SanitizerSet::MEMTAG, + Some("safestack") => SanitizerSet::SAFESTACK, Some("shadow-call-stack") => SanitizerSet::SHADOWCALLSTACK, Some("thread") => SanitizerSet::THREAD, Some("hwaddress") => SanitizerSet::HWADDRESS, diff --git a/compiler/rustc_target/src/spec/x86_64_pc_solaris.rs b/compiler/rustc_target/src/spec/x86_64_pc_solaris.rs index cb62a817322..d2906d6c4ae 100644 --- a/compiler/rustc_target/src/spec/x86_64_pc_solaris.rs +++ b/compiler/rustc_target/src/spec/x86_64_pc_solaris.rs @@ -7,7 +7,7 @@ pub fn target() -> Target { base.vendor = "pc".into(); base.max_atomic_width = Some(64); base.stack_probes = StackProbeType::X86; - base.supported_sanitizers = SanitizerSet::ADDRESS | SanitizerSet::CFI; + base.supported_sanitizers = SanitizerSet::ADDRESS | SanitizerSet::CFI | SanitizerSet::THREAD; Target { llvm_target: "x86_64-pc-solaris".into(), diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_illumos.rs b/compiler/rustc_target/src/spec/x86_64_unknown_illumos.rs index 04a12a7bfa6..ca5b62e279c 100644 --- a/compiler/rustc_target/src/spec/x86_64_unknown_illumos.rs +++ b/compiler/rustc_target/src/spec/x86_64_unknown_illumos.rs @@ -5,7 +5,7 @@ pub fn target() -> Target { base.add_pre_link_args(LinkerFlavor::Unix(Cc::Yes), &["-m64", "-std=c99"]); base.cpu = "x86-64".into(); base.max_atomic_width = Some(64); - base.supported_sanitizers = SanitizerSet::ADDRESS | SanitizerSet::CFI; + base.supported_sanitizers = SanitizerSet::ADDRESS | SanitizerSet::CFI | SanitizerSet::THREAD; Target { // LLVM does not currently have a separate illumos target, diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnu.rs index 9af1049b870..deb15c02c68 100644 --- a/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnu.rs +++ b/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnu.rs @@ -11,6 +11,7 @@ pub fn target() -> Target { | SanitizerSet::CFI | SanitizerSet::LEAK | SanitizerSet::MEMORY + | SanitizerSet::SAFESTACK | SanitizerSet::THREAD; base.supports_xray = true; diff --git a/compiler/rustc_trait_selection/src/solve/assembly/structural_traits.rs b/compiler/rustc_trait_selection/src/solve/assembly/structural_traits.rs index 0ede32c753c..0f3f8f1ac2c 100644 --- a/compiler/rustc_trait_selection/src/solve/assembly/structural_traits.rs +++ b/compiler/rustc_trait_selection/src/solve/assembly/structural_traits.rs @@ -96,7 +96,7 @@ pub(in crate::solve) fn replace_erased_lifetimes_with_bound_vars<'tcx>( let br = ty::BoundRegion { var: ty::BoundVar::from_u32(counter), kind: ty::BrAnon(None) }; counter += 1; - tcx.mk_re_late_bound(current_depth, br) + ty::Region::new_late_bound(tcx, current_depth, br) } // All free regions should be erased here. r => bug!("unexpected region: {r:?}"), @@ -148,11 +148,7 @@ pub(in crate::solve) fn instantiate_constituent_tys_for_sized_trait<'tcx>( ty::Adt(def, substs) => { let sized_crit = def.sized_constraint(ecx.tcx()); - Ok(sized_crit - .0 - .iter() - .map(|ty| sized_crit.rebind(*ty).subst(ecx.tcx(), substs)) - .collect()) + Ok(sized_crit.subst_iter_copied(ecx.tcx(), substs).collect()) } } } diff --git a/compiler/rustc_trait_selection/src/solve/canonicalize.rs b/compiler/rustc_trait_selection/src/solve/canonicalize.rs index ff4bff10cc8..29bdb5ff67d 100644 --- a/compiler/rustc_trait_selection/src/solve/canonicalize.rs +++ b/compiler/rustc_trait_selection/src/solve/canonicalize.rs @@ -255,7 +255,7 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for Canonicalizer<'_, 'tcx> { }), ); let br = ty::BoundRegion { var, kind: BrAnon(None) }; - self.interner().mk_re_late_bound(self.binder_index, br) + ty::Region::new_late_bound(self.interner(), self.binder_index, br) } fn fold_ty(&mut self, mut t: Ty<'tcx>) -> Ty<'tcx> { diff --git a/compiler/rustc_trait_selection/src/solve/eval_ctxt/canonical.rs b/compiler/rustc_trait_selection/src/solve/eval_ctxt/canonical.rs index fdb209fbff8..bca2343e424 100644 --- a/compiler/rustc_trait_selection/src/solve/eval_ctxt/canonical.rs +++ b/compiler/rustc_trait_selection/src/solve/eval_ctxt/canonical.rs @@ -137,6 +137,13 @@ impl<'tcx> EvalCtxt<'_, 'tcx> { #[instrument(level = "debug", skip(self), ret)] fn compute_external_query_constraints(&self) -> Result<ExternalConstraints<'tcx>, NoSolution> { + // We only check for leaks from universes which were entered inside + // of the query. + self.infcx.leak_check(self.max_input_universe, None).map_err(|e| { + debug!(?e, "failed the leak check"); + NoSolution + })?; + // Cannot use `take_registered_region_obligations` as we may compute the response // inside of a `probe` whenever we have multiple choices inside of the solver. let region_obligations = self.infcx.inner.borrow().region_obligations().to_owned(); diff --git a/compiler/rustc_trait_selection/src/solve/mod.rs b/compiler/rustc_trait_selection/src/solve/mod.rs index 26ace28f5fd..56a254d9c07 100644 --- a/compiler/rustc_trait_selection/src/solve/mod.rs +++ b/compiler/rustc_trait_selection/src/solve/mod.rs @@ -231,13 +231,21 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> { let mut candidates = Vec::new(); // LHS normalizes-to RHS - candidates.extend( - evaluate_normalizes_to(self, alias_lhs, rhs, direction, Invert::No).ok(), - ); + candidates.extend(evaluate_normalizes_to( + self, + alias_lhs, + rhs, + direction, + Invert::No, + )); // RHS normalizes-to RHS - candidates.extend( - evaluate_normalizes_to(self, alias_rhs, lhs, direction, Invert::Yes).ok(), - ); + candidates.extend(evaluate_normalizes_to( + self, + alias_rhs, + lhs, + direction, + Invert::Yes, + )); // Relate via substs let subst_relate_response = self.probe(|ecx| { let span = tracing::span!( @@ -265,10 +273,18 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> { if let Some(merged) = self.try_merge_responses(&candidates) { Ok(merged) - } else if let Ok(subst_relate_response) = subst_relate_response { - Ok(subst_relate_response) } else { - self.flounder(&candidates) + // When relating two aliases and we have ambiguity, we prefer + // relating the generic arguments of the aliases over normalizing + // them. This is necessary for inference during typeck. + // + // As this is incomplete, we must not do so during coherence. + match (self.solver_mode(), subst_relate_response) { + (SolverMode::Normal, Ok(response)) => Ok(response), + (SolverMode::Normal, Err(NoSolution)) | (SolverMode::Coherence, _) => { + self.flounder(&candidates) + } + } } } } diff --git a/compiler/rustc_trait_selection/src/traits/coherence.rs b/compiler/rustc_trait_selection/src/traits/coherence.rs index e8c5a8fab2a..d6fd457de06 100644 --- a/compiler/rustc_trait_selection/src/traits/coherence.rs +++ b/compiler/rustc_trait_selection/src/traits/coherence.rs @@ -5,7 +5,7 @@ //! [trait-specialization]: https://rustc-dev-guide.rust-lang.org/traits/specialization.html use crate::infer::outlives::env::OutlivesEnvironment; -use crate::infer::{CombinedSnapshot, InferOk}; +use crate::infer::InferOk; use crate::traits::outlives_bounds::InferCtxtExt as _; use crate::traits::select::IntercrateAmbiguityCause; use crate::traits::util::impl_subject_and_oblig; @@ -62,6 +62,21 @@ pub fn add_placeholder_note(err: &mut Diagnostic) { ); } +#[derive(Debug, Clone, Copy)] +enum TrackAmbiguityCauses { + Yes, + No, +} + +impl TrackAmbiguityCauses { + fn is_yes(self) -> bool { + match self { + TrackAmbiguityCauses::Yes => true, + TrackAmbiguityCauses::No => false, + } + } +} + /// If there are types that satisfy both impls, returns `Some` /// with a suitably-freshened `ImplHeader` with those types /// substituted. Otherwise, returns `None`. @@ -97,29 +112,28 @@ pub fn overlapping_impls( return None; } - let infcx = tcx - .infer_ctxt() - .with_opaque_type_inference(DefiningAnchor::Bubble) - .intercrate(true) - .build(); - let selcx = &mut SelectionContext::new(&infcx); - let overlaps = - overlap(selcx, skip_leak_check, impl1_def_id, impl2_def_id, overlap_mode).is_some(); - if !overlaps { - return None; - } + let _overlap_with_bad_diagnostics = overlap( + tcx, + TrackAmbiguityCauses::No, + skip_leak_check, + impl1_def_id, + impl2_def_id, + overlap_mode, + )?; // In the case where we detect an error, run the check again, but // this time tracking intercrate ambiguity causes for better // diagnostics. (These take time and can lead to false errors.) - let infcx = tcx - .infer_ctxt() - .with_opaque_type_inference(DefiningAnchor::Bubble) - .intercrate(true) - .build(); - let selcx = &mut SelectionContext::new(&infcx); - selcx.enable_tracking_intercrate_ambiguity_causes(); - Some(overlap(selcx, skip_leak_check, impl1_def_id, impl2_def_id, overlap_mode).unwrap()) + let overlap = overlap( + tcx, + TrackAmbiguityCauses::Yes, + skip_leak_check, + impl1_def_id, + impl2_def_id, + overlap_mode, + ) + .unwrap(); + Some(overlap) } fn with_fresh_ty_vars<'cx, 'tcx>( @@ -146,40 +160,34 @@ fn with_fresh_ty_vars<'cx, 'tcx>( /// Can both impl `a` and impl `b` be satisfied by a common type (including /// where-clauses)? If so, returns an `ImplHeader` that unifies the two impls. -fn overlap<'cx, 'tcx>( - selcx: &mut SelectionContext<'cx, 'tcx>, +#[instrument(level = "debug", skip(tcx))] +fn overlap<'tcx>( + tcx: TyCtxt<'tcx>, + track_ambiguity_causes: TrackAmbiguityCauses, skip_leak_check: SkipLeakCheck, impl1_def_id: DefId, impl2_def_id: DefId, overlap_mode: OverlapMode, ) -> Option<OverlapResult<'tcx>> { - debug!( - "overlap(impl1_def_id={:?}, impl2_def_id={:?}, overlap_mode={:?})", - impl1_def_id, impl2_def_id, overlap_mode - ); - - selcx.infcx.probe_maybe_skip_leak_check(skip_leak_check.is_yes(), |snapshot| { - overlap_within_probe(selcx, impl1_def_id, impl2_def_id, overlap_mode, snapshot) - }) -} - -fn overlap_within_probe<'cx, 'tcx>( - selcx: &mut SelectionContext<'cx, 'tcx>, - impl1_def_id: DefId, - impl2_def_id: DefId, - overlap_mode: OverlapMode, - snapshot: &CombinedSnapshot<'tcx>, -) -> Option<OverlapResult<'tcx>> { - let infcx = selcx.infcx; - if overlap_mode.use_negative_impl() { - if negative_impl(infcx.tcx, impl1_def_id, impl2_def_id) - || negative_impl(infcx.tcx, impl2_def_id, impl1_def_id) + if negative_impl(tcx, impl1_def_id, impl2_def_id) + || negative_impl(tcx, impl2_def_id, impl1_def_id) { return None; } } + let infcx = tcx + .infer_ctxt() + .with_opaque_type_inference(DefiningAnchor::Bubble) + .skip_leak_check(skip_leak_check.is_yes()) + .intercrate(true) + .build(); + let selcx = &mut SelectionContext::new(&infcx); + if track_ambiguity_causes.is_yes() { + selcx.enable_tracking_intercrate_ambiguity_causes(); + } + // For the purposes of this check, we don't bring any placeholder // types into scope; instead, we replace the generic types with // fresh type variables, and hence we do our evaluations in an @@ -198,18 +206,23 @@ fn overlap_within_probe<'cx, 'tcx>( } } - // We disable the leak when creating the `snapshot` by using - // `infcx.probe_maybe_disable_leak_check`. - if infcx.leak_check(true, snapshot).is_err() { + // We toggle the `leak_check` by using `skip_leak_check` when constructing the + // inference context, so this may be a noop. + if infcx.leak_check(ty::UniverseIndex::ROOT, None).is_err() { debug!("overlap: leak check failed"); return None; } let intercrate_ambiguity_causes = selcx.take_intercrate_ambiguity_causes(); debug!("overlap: intercrate_ambiguity_causes={:#?}", intercrate_ambiguity_causes); - - let involves_placeholder = - matches!(selcx.infcx.region_constraints_added_in_snapshot(snapshot), Some(true)); + let involves_placeholder = infcx + .inner + .borrow_mut() + .unwrap_region_constraints() + .data() + .constraints + .iter() + .any(|c| c.0.involves_placeholders()); let impl_header = selcx.infcx.resolve_vars_if_possible(impl1_header); Some(OverlapResult { impl_header, intercrate_ambiguity_causes, involves_placeholder }) diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs index a10ececbb1e..1470dc452a1 100644 --- a/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs @@ -42,6 +42,7 @@ use rustc_session::Limit; use rustc_span::def_id::LOCAL_CRATE; use rustc_span::symbol::sym; use rustc_span::{ExpnKind, Span, DUMMY_SP}; +use std::borrow::Cow; use std::fmt; use std::iter; use std::ops::ControlFlow; @@ -1602,7 +1603,7 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> { }), ) => Some(( ty.span, - with_forced_trimmed_paths!(format!( + with_forced_trimmed_paths!(Cow::from(format!( "type mismatch resolving `{}`", self.resolve_vars_if_possible(predicate) .print(FmtPrinter::new_with_limit( @@ -1612,7 +1613,7 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> { )) .unwrap() .into_buffer() - )), + ))), )), _ => None, } @@ -1775,6 +1776,7 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> { || !trait_pred .skip_binder() .is_constness_satisfied_by(self.tcx.constness(def_id)) + || !self.tcx.is_user_visible_dep(def_id.krate) { return None; } diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs index 82bad96ea42..b5b8c7fe3ac 100644 --- a/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs +++ b/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs @@ -38,6 +38,7 @@ use rustc_span::def_id::LocalDefId; use rustc_span::symbol::{sym, Ident, Symbol}; use rustc_span::{BytePos, DesugaringKind, ExpnKind, MacroKind, Span, DUMMY_SP}; use rustc_target::spec::abi; +use std::borrow::Cow; use std::iter; use std::ops::Deref; @@ -186,7 +187,12 @@ pub trait TypeErrCtxtExt<'tcx> { trait_pred: ty::PolyTraitPredicate<'tcx>, ) -> bool; - fn get_closure_name(&self, def_id: DefId, err: &mut Diagnostic, msg: &str) -> Option<Symbol>; + fn get_closure_name( + &self, + def_id: DefId, + err: &mut Diagnostic, + msg: Cow<'static, str>, + ) -> Option<Symbol>; fn suggest_fn_call( &self, @@ -857,7 +863,12 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> { /// Given a closure's `DefId`, return the given name of the closure. /// /// This doesn't account for reassignments, but it's only used for suggestions. - fn get_closure_name(&self, def_id: DefId, err: &mut Diagnostic, msg: &str) -> Option<Symbol> { + fn get_closure_name( + &self, + def_id: DefId, + err: &mut Diagnostic, + msg: Cow<'static, str>, + ) -> Option<Symbol> { let get_name = |err: &mut Diagnostic, kind: &hir::PatKind<'_>| -> Option<Symbol> { // Get the local name of this closure. This can be inaccurate because // of the possibility of reassignment, but this should be good enough. @@ -934,17 +945,17 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> { let msg = match def_id_or_name { DefIdOrName::DefId(def_id) => match self.tcx.def_kind(def_id) { DefKind::Ctor(CtorOf::Struct, _) => { - "use parentheses to construct this tuple struct".to_string() + Cow::from("use parentheses to construct this tuple struct") } DefKind::Ctor(CtorOf::Variant, _) => { - "use parentheses to construct this tuple variant".to_string() + Cow::from("use parentheses to construct this tuple variant") } - kind => format!( + kind => Cow::from(format!( "use parentheses to call this {}", self.tcx.def_kind_descr(kind, def_id) - ), + )), }, - DefIdOrName::Name(name) => format!("use parentheses to call this {name}"), + DefIdOrName::Name(name) => Cow::from(format!("use parentheses to call this {name}")), }; let args = inputs @@ -979,7 +990,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> { .. })) => { err.span_label(*fn_decl_span, "consider calling this closure"); - let Some(name) = self.get_closure_name(def_id, err, &msg) else { + let Some(name) = self.get_closure_name(def_id, err, msg.clone()) else { return false; }; name.to_string() @@ -1341,7 +1352,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> { err.note(msg); } else { err.message = - vec![(rustc_errors::DiagnosticMessage::Str(msg), Style::NoStyle)]; + vec![(rustc_errors::DiagnosticMessage::from(msg), Style::NoStyle)]; } err.span_label( span, @@ -2958,7 +2969,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> { for ty in bound_tys.skip_binder() { with_forced_trimmed_paths!(write!(msg, "`{}`, ", ty).unwrap()); } - err.note(msg.trim_end_matches(", ")) + err.note(msg.trim_end_matches(", ").to_string()) } ty::GeneratorWitnessMIR(def_id, substs) => { use std::fmt::Write; @@ -2972,7 +2983,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> { let ty = bty.subst(tcx, substs); write!(msg, "`{}`, ", ty).unwrap(); } - err.note(msg.trim_end_matches(", ")) + err.note(msg.trim_end_matches(", ").to_string()) } ty::Generator(def_id, _, _) => { let sp = self.tcx.def_span(def_id); diff --git a/compiler/rustc_trait_selection/src/traits/fulfill.rs b/compiler/rustc_trait_selection/src/traits/fulfill.rs index 2f85c32b575..88d2091de0f 100644 --- a/compiler/rustc_trait_selection/src/traits/fulfill.rs +++ b/compiler/rustc_trait_selection/src/traits/fulfill.rs @@ -116,6 +116,7 @@ impl<'a, 'tcx> FulfillmentContext<'tcx> { } impl<'tcx> TraitEngine<'tcx> for FulfillmentContext<'tcx> { + #[inline] fn register_predicate_obligation( &mut self, infcx: &InferCtxt<'tcx>, diff --git a/compiler/rustc_trait_selection/src/traits/mod.rs b/compiler/rustc_trait_selection/src/traits/mod.rs index f265230ff77..f7389bda159 100644 --- a/compiler/rustc_trait_selection/src/traits/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/mod.rs @@ -14,10 +14,12 @@ mod object_safety; pub mod outlives_bounds; mod project; pub mod query; +#[cfg_attr(not(bootstrap), allow(hidden_glob_reexports))] mod select; mod specialize; mod structural_match; mod structural_normalize; +#[cfg_attr(not(bootstrap), allow(hidden_glob_reexports))] mod util; mod vtable; pub mod wf; @@ -30,7 +32,7 @@ use rustc_errors::ErrorGuaranteed; use rustc_middle::query::Providers; use rustc_middle::ty::fold::TypeFoldable; use rustc_middle::ty::visit::{TypeVisitable, TypeVisitableExt}; -use rustc_middle::ty::{self, ToPredicate, Ty, TyCtxt, TypeSuperVisitable}; +use rustc_middle::ty::{self, ToPredicate, Ty, TyCtxt, TypeFolder, TypeSuperVisitable}; use rustc_middle::ty::{InternalSubsts, SubstsRef}; use rustc_span::def_id::DefId; use rustc_span::Span; @@ -270,8 +272,62 @@ pub fn normalize_param_env_or_error<'tcx>( // parameter environments once for every fn as it goes, // and errors will get reported then; so outside of type inference we // can be sure that no errors should occur. - let mut predicates: Vec<_> = - util::elaborate(tcx, unnormalized_env.caller_bounds().into_iter()).collect(); + let mut predicates: Vec<_> = util::elaborate( + tcx, + unnormalized_env.caller_bounds().into_iter().map(|predicate| { + if tcx.features().generic_const_exprs { + return predicate; + } + + struct ConstNormalizer<'tcx>(TyCtxt<'tcx>); + + impl<'tcx> TypeFolder<TyCtxt<'tcx>> for ConstNormalizer<'tcx> { + fn interner(&self) -> TyCtxt<'tcx> { + self.0 + } + + fn fold_const(&mut self, c: ty::Const<'tcx>) -> ty::Const<'tcx> { + // While it is pretty sus to be evaluating things with an empty param env, it + // should actually be okay since without `feature(generic_const_exprs)` the only + // const arguments that have a non-empty param env are array repeat counts. These + // do not appear in the type system though. + c.eval(self.0, ty::ParamEnv::empty()) + } + } + + // This whole normalization step is a hack to work around the fact that + // `normalize_param_env_or_error` is fundamentally broken from using an + // unnormalized param env with a trait solver that expects the param env + // to be normalized. + // + // When normalizing the param env we can end up evaluating obligations + // that have been normalized but can only be proven via a where clause + // which is still in its unnormalized form. example: + // + // Attempting to prove `T: Trait<<u8 as Identity>::Assoc>` in a param env + // with a `T: Trait<<u8 as Identity>::Assoc>` where clause will fail because + // we first normalize obligations before proving them so we end up proving + // `T: Trait<u8>`. Since lazy normalization is not implemented equating `u8` + // with `<u8 as Identity>::Assoc` fails outright so we incorrectly believe that + // we cannot prove `T: Trait<u8>`. + // + // The same thing is true for const generics- attempting to prove + // `T: Trait<ConstKind::Unevaluated(...)>` with the same thing as a where clauses + // will fail. After normalization we may be attempting to prove `T: Trait<4>` with + // the unnormalized where clause `T: Trait<ConstKind::Unevaluated(...)>`. In order + // for the obligation to hold `4` must be equal to `ConstKind::Unevaluated(...)` + // but as we do not have lazy norm implemented, equating the two consts fails outright. + // + // Ideally we would not normalize consts here at all but it is required for backwards + // compatibility. Eventually when lazy norm is implemented this can just be removed. + // We do not normalize types here as there is no backwards compatibility requirement + // for us to do so. + // + // FIXME(-Ztrait-solver=next): remove this hack since we have deferred projection equality + predicate.fold_with(&mut ConstNormalizer(tcx)) + }), + ) + .collect(); debug!("normalize_param_env_or_error: elaborated-predicates={:?}", predicates); @@ -485,7 +541,7 @@ fn is_impossible_method(tcx: TyCtxt<'_>, (impl_def_id, trait_item_def_id): (DefI tcx, ObligationCause::dummy_with_span(*span), param_env, - ty::EarlyBinder(*pred).subst(tcx, impl_trait_ref.substs), + ty::EarlyBinder::bind(*pred).subst(tcx, impl_trait_ref.substs), ) }) }); diff --git a/compiler/rustc_trait_selection/src/traits/object_safety.rs b/compiler/rustc_trait_selection/src/traits/object_safety.rs index c81bf6ebc2e..9582479941b 100644 --- a/compiler/rustc_trait_selection/src/traits/object_safety.rs +++ b/compiler/rustc_trait_selection/src/traits/object_safety.rs @@ -642,7 +642,7 @@ fn receiver_for_self_ty<'tcx>( if param.index == 0 { self_ty.into() } else { tcx.mk_param_from_def(param) } }); - let result = EarlyBinder(receiver_ty).subst(tcx, substs); + let result = EarlyBinder::bind(receiver_ty).subst(tcx, substs); debug!( "receiver_for_self_ty({:?}, {:?}, {:?}) = {:?}", receiver_ty, self_ty, method_def_id, result diff --git a/compiler/rustc_trait_selection/src/traits/outlives_bounds.rs b/compiler/rustc_trait_selection/src/traits/outlives_bounds.rs index 0e797a1cb60..f8d056e321e 100644 --- a/compiler/rustc_trait_selection/src/traits/outlives_bounds.rs +++ b/compiler/rustc_trait_selection/src/traits/outlives_bounds.rs @@ -1,9 +1,9 @@ use crate::infer::InferCtxt; -use crate::traits::query::type_op::{self, TypeOp, TypeOpOutput}; use crate::traits::{ObligationCause, ObligationCtxt}; use rustc_data_structures::fx::FxIndexSet; -use rustc_errors::ErrorGuaranteed; use rustc_infer::infer::resolve::OpportunisticRegionResolver; +use rustc_infer::infer::InferOk; +use rustc_middle::infer::canonical::{OriginalQueryValues, QueryRegionConstraints}; use rustc_middle::ty::{self, ParamEnv, Ty, TypeFolder, TypeVisitableExt}; use rustc_span::def_id::LocalDefId; @@ -68,20 +68,29 @@ impl<'a, 'tcx: 'a> InferCtxtExt<'a, 'tcx> for InferCtxt<'tcx> { return vec![]; } - let span = self.tcx.def_span(body_id); - let result: Result<_, ErrorGuaranteed> = param_env - .and(type_op::implied_outlives_bounds::ImpliedOutlivesBounds { ty }) - .fully_perform(self, span); - let result = match result { - Ok(r) => r, - Err(_) => { - return vec![]; - } + let mut canonical_var_values = OriginalQueryValues::default(); + let canonical_ty = + self.canonicalize_query_keep_static(param_env.and(ty), &mut canonical_var_values); + let Ok(canonical_result) = self.tcx.implied_outlives_bounds(canonical_ty) else { + return vec![]; + }; + + let mut constraints = QueryRegionConstraints::default(); + let Ok(InferOk { value, obligations }) = self + .instantiate_nll_query_response_and_region_obligations( + &ObligationCause::dummy(), + param_env, + &canonical_var_values, + canonical_result, + &mut constraints, + ) else { + return vec![]; }; + assert_eq!(&obligations, &[]); - let TypeOpOutput { output, constraints, .. } = result; + if !constraints.is_empty() { + let span = self.tcx.def_span(body_id); - if let Some(constraints) = constraints { debug!(?constraints); if !constraints.member_constraints.is_empty() { span_bug!(span, "{:#?}", constraints.member_constraints); @@ -108,7 +117,7 @@ impl<'a, 'tcx: 'a> InferCtxtExt<'a, 'tcx> for InferCtxt<'tcx> { } }; - output + value } fn implied_bounds_tys( diff --git a/compiler/rustc_trait_selection/src/traits/project.rs b/compiler/rustc_trait_selection/src/traits/project.rs index 51069897120..65af0bb1c4e 100644 --- a/compiler/rustc_trait_selection/src/traits/project.rs +++ b/compiler/rustc_trait_selection/src/traits/project.rs @@ -824,7 +824,7 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for BoundVarReplacer<'_, 'tcx> { let universe = self.universe_for(debruijn); let p = ty::PlaceholderRegion { universe, bound: br }; self.mapped_regions.insert(p, br); - self.infcx.tcx.mk_re_placeholder(p) + ty::Region::new_placeholder(self.infcx.tcx, p) } _ => r, } @@ -945,7 +945,7 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for PlaceholderReplacer<'_, 'tcx> { let db = ty::DebruijnIndex::from_usize( self.universe_indices.len() - index + self.current_index.as_usize() - 1, ); - self.interner().mk_re_late_bound(db, *replace_var) + ty::Region::new_late_bound(self.interner(), db, *replace_var) } None => r1, } diff --git a/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs b/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs index 455b53bfb7d..709c3f432e6 100644 --- a/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs +++ b/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs @@ -1,6 +1,11 @@ -use rustc_middle::ty::{self, Ty, TyCtxt}; +use crate::traits::query::normalize::QueryNormalizeExt; +use crate::traits::query::NoSolution; +use crate::traits::{Normalized, ObligationCause, ObligationCtxt}; -pub use rustc_middle::traits::query::{DropckConstraint, DropckOutlivesResult}; +use rustc_data_structures::fx::FxHashSet; +use rustc_middle::traits::query::{DropckConstraint, DropckOutlivesResult}; +use rustc_middle::ty::{self, EarlyBinder, ParamEnvAnd, Ty, TyCtxt}; +use rustc_span::source_map::{Span, DUMMY_SP}; /// This returns true if the type `ty` is "trivial" for /// dropck-outlives -- that is, if it doesn't require any types to @@ -71,3 +76,263 @@ pub fn trivial_dropck_outlives<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool { | ty::Generator(..) => false, } } + +pub fn compute_dropck_outlives_inner<'tcx>( + ocx: &ObligationCtxt<'_, 'tcx>, + goal: ParamEnvAnd<'tcx, Ty<'tcx>>, +) -> Result<DropckOutlivesResult<'tcx>, NoSolution> { + let tcx = ocx.infcx.tcx; + let ParamEnvAnd { param_env, value: for_ty } = goal; + + let mut result = DropckOutlivesResult { kinds: vec![], overflows: vec![] }; + + // A stack of types left to process. Each round, we pop + // something from the stack and invoke + // `dtorck_constraint_for_ty_inner`. This may produce new types that + // have to be pushed on the stack. This continues until we have explored + // all the reachable types from the type `for_ty`. + // + // Example: Imagine that we have the following code: + // + // ```rust + // struct A { + // value: B, + // children: Vec<A>, + // } + // + // struct B { + // value: u32 + // } + // + // fn f() { + // let a: A = ...; + // .. + // } // here, `a` is dropped + // ``` + // + // at the point where `a` is dropped, we need to figure out + // which types inside of `a` contain region data that may be + // accessed by any destructors in `a`. We begin by pushing `A` + // onto the stack, as that is the type of `a`. We will then + // invoke `dtorck_constraint_for_ty_inner` which will expand `A` + // into the types of its fields `(B, Vec<A>)`. These will get + // pushed onto the stack. Eventually, expanding `Vec<A>` will + // lead to us trying to push `A` a second time -- to prevent + // infinite recursion, we notice that `A` was already pushed + // once and stop. + let mut ty_stack = vec![(for_ty, 0)]; + + // Set used to detect infinite recursion. + let mut ty_set = FxHashSet::default(); + + let cause = ObligationCause::dummy(); + let mut constraints = DropckConstraint::empty(); + while let Some((ty, depth)) = ty_stack.pop() { + debug!( + "{} kinds, {} overflows, {} ty_stack", + result.kinds.len(), + result.overflows.len(), + ty_stack.len() + ); + dtorck_constraint_for_ty_inner(tcx, DUMMY_SP, for_ty, depth, ty, &mut constraints)?; + + // "outlives" represent types/regions that may be touched + // by a destructor. + result.kinds.append(&mut constraints.outlives); + result.overflows.append(&mut constraints.overflows); + + // If we have even one overflow, we should stop trying to evaluate further -- + // chances are, the subsequent overflows for this evaluation won't provide useful + // information and will just decrease the speed at which we can emit these errors + // (since we'll be printing for just that much longer for the often enormous types + // that result here). + if !result.overflows.is_empty() { + break; + } + + // dtorck types are "types that will get dropped but which + // do not themselves define a destructor", more or less. We have + // to push them onto the stack to be expanded. + for ty in constraints.dtorck_types.drain(..) { + let Normalized { value: ty, obligations } = + ocx.infcx.at(&cause, param_env).query_normalize(ty)?; + ocx.register_obligations(obligations); + + debug!("dropck_outlives: ty from dtorck_types = {:?}", ty); + + match ty.kind() { + // All parameters live for the duration of the + // function. + ty::Param(..) => {} + + // A projection that we couldn't resolve - it + // might have a destructor. + ty::Alias(..) => { + result.kinds.push(ty.into()); + } + + _ => { + if ty_set.insert(ty) { + ty_stack.push((ty, depth + 1)); + } + } + } + } + } + + debug!("dropck_outlives: result = {:#?}", result); + Ok(result) +} + +/// Returns a set of constraints that needs to be satisfied in +/// order for `ty` to be valid for destruction. +pub fn dtorck_constraint_for_ty_inner<'tcx>( + tcx: TyCtxt<'tcx>, + span: Span, + for_ty: Ty<'tcx>, + depth: usize, + ty: Ty<'tcx>, + constraints: &mut DropckConstraint<'tcx>, +) -> Result<(), NoSolution> { + debug!("dtorck_constraint_for_ty_inner({:?}, {:?}, {:?}, {:?})", span, for_ty, depth, ty); + + if !tcx.recursion_limit().value_within_limit(depth) { + constraints.overflows.push(ty); + return Ok(()); + } + + if trivial_dropck_outlives(tcx, ty) { + return Ok(()); + } + + match ty.kind() { + ty::Bool + | ty::Char + | ty::Int(_) + | ty::Uint(_) + | ty::Float(_) + | ty::Str + | ty::Never + | ty::Foreign(..) + | ty::RawPtr(..) + | ty::Ref(..) + | ty::FnDef(..) + | ty::FnPtr(_) + | ty::GeneratorWitness(..) + | ty::GeneratorWitnessMIR(..) => { + // these types never have a destructor + } + + ty::Array(ety, _) | ty::Slice(ety) => { + // single-element containers, behave like their element + rustc_data_structures::stack::ensure_sufficient_stack(|| { + dtorck_constraint_for_ty_inner(tcx, span, for_ty, depth + 1, *ety, constraints) + })?; + } + + ty::Tuple(tys) => rustc_data_structures::stack::ensure_sufficient_stack(|| { + for ty in tys.iter() { + dtorck_constraint_for_ty_inner(tcx, span, for_ty, depth + 1, ty, constraints)?; + } + Ok::<_, NoSolution>(()) + })?, + + ty::Closure(_, substs) => { + if !substs.as_closure().is_valid() { + // By the time this code runs, all type variables ought to + // be fully resolved. + + tcx.sess.delay_span_bug( + span, + format!("upvar_tys for closure not found. Expected capture information for closure {ty}",), + ); + return Err(NoSolution); + } + + rustc_data_structures::stack::ensure_sufficient_stack(|| { + for ty in substs.as_closure().upvar_tys() { + dtorck_constraint_for_ty_inner(tcx, span, for_ty, depth + 1, ty, constraints)?; + } + Ok::<_, NoSolution>(()) + })? + } + + ty::Generator(_, substs, _movability) => { + // rust-lang/rust#49918: types can be constructed, stored + // in the interior, and sit idle when generator yields + // (and is subsequently dropped). + // + // It would be nice to descend into interior of a + // generator to determine what effects dropping it might + // have (by looking at any drop effects associated with + // its interior). + // + // However, the interior's representation uses things like + // GeneratorWitness that explicitly assume they are not + // traversed in such a manner. So instead, we will + // simplify things for now by treating all generators as + // if they were like trait objects, where its upvars must + // all be alive for the generator's (potential) + // destructor. + // + // In particular, skipping over `_interior` is safe + // because any side-effects from dropping `_interior` can + // only take place through references with lifetimes + // derived from lifetimes attached to the upvars and resume + // argument, and we *do* incorporate those here. + + if !substs.as_generator().is_valid() { + // By the time this code runs, all type variables ought to + // be fully resolved. + tcx.sess.delay_span_bug( + span, + format!("upvar_tys for generator not found. Expected capture information for generator {ty}",), + ); + return Err(NoSolution); + } + + constraints.outlives.extend( + substs + .as_generator() + .upvar_tys() + .map(|t| -> ty::subst::GenericArg<'tcx> { t.into() }), + ); + constraints.outlives.push(substs.as_generator().resume_ty().into()); + } + + ty::Adt(def, substs) => { + let DropckConstraint { dtorck_types, outlives, overflows } = + tcx.at(span).adt_dtorck_constraint(def.did())?; + // FIXME: we can try to recursively `dtorck_constraint_on_ty` + // there, but that needs some way to handle cycles. + constraints + .dtorck_types + .extend(dtorck_types.iter().map(|t| EarlyBinder::bind(*t).subst(tcx, substs))); + constraints + .outlives + .extend(outlives.iter().map(|t| EarlyBinder::bind(*t).subst(tcx, substs))); + constraints + .overflows + .extend(overflows.iter().map(|t| EarlyBinder::bind(*t).subst(tcx, substs))); + } + + // Objects must be alive in order for their destructor + // to be called. + ty::Dynamic(..) => { + constraints.outlives.push(ty.into()); + } + + // Types that can't be resolved. Pass them forward. + ty::Alias(..) | ty::Param(..) => { + constraints.dtorck_types.push(ty); + } + + ty::Placeholder(..) | ty::Bound(..) | ty::Infer(..) | ty::Error(_) => { + // By the time this code runs, all type variables ought to + // be fully resolved. + return Err(NoSolution); + } + } + + Ok(()) +} diff --git a/compiler/rustc_trait_selection/src/traits/query/evaluate_obligation.rs b/compiler/rustc_trait_selection/src/traits/query/evaluate_obligation.rs index edbe2de8105..a8a74d7501a 100644 --- a/compiler/rustc_trait_selection/src/traits/query/evaluate_obligation.rs +++ b/compiler/rustc_trait_selection/src/traits/query/evaluate_obligation.rs @@ -90,7 +90,7 @@ impl<'tcx> InferCtxtExt<'tcx> for InferCtxt<'tcx> { Ok(EvaluationResult::EvaluatedToAmbig) } else if self.opaque_types_added_in_snapshot(snapshot) { Ok(EvaluationResult::EvaluatedToOkModuloOpaqueTypes) - } else if self.region_constraints_added_in_snapshot(snapshot).is_some() { + } else if self.region_constraints_added_in_snapshot(snapshot) { Ok(EvaluationResult::EvaluatedToOkModuloRegions) } else { Ok(EvaluationResult::EvaluatedToOk) diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/ascribe_user_type.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/ascribe_user_type.rs index c61f5454ec5..01d7a1e7913 100644 --- a/compiler/rustc_trait_selection/src/traits/query/type_op/ascribe_user_type.rs +++ b/compiler/rustc_trait_selection/src/traits/query/type_op/ascribe_user_type.rs @@ -1,8 +1,13 @@ use crate::infer::canonical::{Canonical, CanonicalQueryResponse}; +use crate::traits::ObligationCtxt; +use rustc_hir::def_id::{DefId, CRATE_DEF_ID}; +use rustc_infer::traits::Obligation; use rustc_middle::traits::query::NoSolution; -use rustc_middle::ty::{ParamEnvAnd, TyCtxt}; +use rustc_middle::traits::{ObligationCause, ObligationCauseCode}; +use rustc_middle::ty::{self, ParamEnvAnd, Ty, TyCtxt, UserSelfTy, UserSubsts, UserType}; pub use rustc_middle::traits::query::type_op::AscribeUserType; +use rustc_span::{Span, DUMMY_SP}; impl<'tcx> super::QueryTypeOp<'tcx> for AscribeUserType<'tcx> { type QueryResponse = (); @@ -20,4 +25,116 @@ impl<'tcx> super::QueryTypeOp<'tcx> for AscribeUserType<'tcx> { ) -> Result<CanonicalQueryResponse<'tcx, ()>, NoSolution> { tcx.type_op_ascribe_user_type(canonicalized) } + + fn perform_locally_in_new_solver( + ocx: &ObligationCtxt<'_, 'tcx>, + key: ParamEnvAnd<'tcx, Self>, + ) -> Result<Self::QueryResponse, NoSolution> { + type_op_ascribe_user_type_with_span(ocx, key, None) + } +} + +/// The core of the `type_op_ascribe_user_type` query: for diagnostics purposes in NLL HRTB errors, +/// this query can be re-run to better track the span of the obligation cause, and improve the error +/// message. Do not call directly unless you're in that very specific context. +pub fn type_op_ascribe_user_type_with_span<'tcx>( + ocx: &ObligationCtxt<'_, 'tcx>, + key: ParamEnvAnd<'tcx, AscribeUserType<'tcx>>, + span: Option<Span>, +) -> Result<(), NoSolution> { + let (param_env, AscribeUserType { mir_ty, user_ty }) = key.into_parts(); + debug!("type_op_ascribe_user_type: mir_ty={:?} user_ty={:?}", mir_ty, user_ty); + let span = span.unwrap_or(DUMMY_SP); + match user_ty { + UserType::Ty(user_ty) => relate_mir_and_user_ty(ocx, param_env, span, mir_ty, user_ty)?, + UserType::TypeOf(def_id, user_substs) => { + relate_mir_and_user_substs(ocx, param_env, span, mir_ty, def_id, user_substs)? + } + }; + Ok(()) +} + +#[instrument(level = "debug", skip(ocx, param_env, span))] +fn relate_mir_and_user_ty<'tcx>( + ocx: &ObligationCtxt<'_, 'tcx>, + param_env: ty::ParamEnv<'tcx>, + span: Span, + mir_ty: Ty<'tcx>, + user_ty: Ty<'tcx>, +) -> Result<(), NoSolution> { + let cause = ObligationCause::dummy_with_span(span); + let user_ty = ocx.normalize(&cause, param_env, user_ty); + ocx.eq(&cause, param_env, mir_ty, user_ty)?; + + // FIXME(#104764): We should check well-formedness before normalization. + let predicate = ty::Binder::dummy(ty::PredicateKind::WellFormed(user_ty.into())); + ocx.register_obligation(Obligation::new(ocx.infcx.tcx, cause, param_env, predicate)); + Ok(()) +} + +#[instrument(level = "debug", skip(ocx, param_env, span))] +fn relate_mir_and_user_substs<'tcx>( + ocx: &ObligationCtxt<'_, 'tcx>, + param_env: ty::ParamEnv<'tcx>, + span: Span, + mir_ty: Ty<'tcx>, + def_id: DefId, + user_substs: UserSubsts<'tcx>, +) -> Result<(), NoSolution> { + let param_env = param_env.without_const(); + let UserSubsts { user_self_ty, substs } = user_substs; + let tcx = ocx.infcx.tcx; + let cause = ObligationCause::dummy_with_span(span); + + let ty = tcx.type_of(def_id).subst(tcx, substs); + let ty = ocx.normalize(&cause, param_env, ty); + debug!("relate_type_and_user_type: ty of def-id is {:?}", ty); + + ocx.eq(&cause, param_env, mir_ty, ty)?; + + // Prove the predicates coming along with `def_id`. + // + // Also, normalize the `instantiated_predicates` + // because otherwise we wind up with duplicate "type + // outlives" error messages. + let instantiated_predicates = tcx.predicates_of(def_id).instantiate(tcx, substs); + + debug!(?instantiated_predicates); + for (instantiated_predicate, predicate_span) in instantiated_predicates { + let span = if span == DUMMY_SP { predicate_span } else { span }; + let cause = ObligationCause::new( + span, + CRATE_DEF_ID, + ObligationCauseCode::AscribeUserTypeProvePredicate(predicate_span), + ); + let instantiated_predicate = + ocx.normalize(&cause.clone(), param_env, instantiated_predicate); + + ocx.register_obligation(Obligation::new(tcx, cause, param_env, instantiated_predicate)); + } + + if let Some(UserSelfTy { impl_def_id, self_ty }) = user_self_ty { + let self_ty = ocx.normalize(&cause, param_env, self_ty); + let impl_self_ty = tcx.type_of(impl_def_id).subst(tcx, substs); + let impl_self_ty = ocx.normalize(&cause, param_env, impl_self_ty); + + ocx.eq(&cause, param_env, self_ty, impl_self_ty)?; + let predicate = ty::Binder::dummy(ty::PredicateKind::WellFormed(impl_self_ty.into())); + ocx.register_obligation(Obligation::new(tcx, cause.clone(), param_env, predicate)); + } + + // In addition to proving the predicates, we have to + // prove that `ty` is well-formed -- this is because + // the WF of `ty` is predicated on the substs being + // well-formed, and we haven't proven *that*. We don't + // want to prove the WF of types from `substs` directly because they + // haven't been normalized. + // + // FIXME(nmatsakis): Well, perhaps we should normalize + // them? This would only be relevant if some input + // type were ill-formed but did not appear in `ty`, + // which...could happen with normalization... + let predicate = ty::Binder::dummy(ty::PredicateKind::WellFormed(ty.into())); + ocx.register_obligation(Obligation::new(tcx, cause, param_env, predicate)); + Ok(()) } diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/eq.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/eq.rs index 40f8ecfd4ce..f6589308806 100644 --- a/compiler/rustc_trait_selection/src/traits/query/type_op/eq.rs +++ b/compiler/rustc_trait_selection/src/traits/query/type_op/eq.rs @@ -1,5 +1,7 @@ use crate::infer::canonical::{Canonical, CanonicalQueryResponse}; +use crate::traits::ObligationCtxt; use rustc_middle::traits::query::NoSolution; +use rustc_middle::traits::ObligationCause; use rustc_middle::ty::{ParamEnvAnd, TyCtxt}; pub use rustc_middle::traits::query::type_op::Eq; @@ -20,4 +22,12 @@ impl<'tcx> super::QueryTypeOp<'tcx> for Eq<'tcx> { ) -> Result<CanonicalQueryResponse<'tcx, ()>, NoSolution> { tcx.type_op_eq(canonicalized) } + + fn perform_locally_in_new_solver( + ocx: &ObligationCtxt<'_, 'tcx>, + key: ParamEnvAnd<'tcx, Self>, + ) -> Result<Self::QueryResponse, NoSolution> { + ocx.eq(&ObligationCause::dummy(), key.param_env, key.value.a, key.value.b)?; + Ok(()) + } } diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/implied_outlives_bounds.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/implied_outlives_bounds.rs index 26f0d554d35..9989fc9c479 100644 --- a/compiler/rustc_trait_selection/src/traits/query/type_op/implied_outlives_bounds.rs +++ b/compiler/rustc_trait_selection/src/traits/query/type_op/implied_outlives_bounds.rs @@ -1,7 +1,15 @@ -use crate::infer::canonical::{Canonical, CanonicalQueryResponse}; +use crate::traits::query::NoSolution; +use crate::traits::wf; +use crate::traits::ObligationCtxt; + +use rustc_infer::infer::canonical::Canonical; +use rustc_infer::infer::outlives::components::{push_outlives_components, Component}; use rustc_infer::traits::query::OutlivesBound; -use rustc_middle::traits::query::NoSolution; -use rustc_middle::ty::{self, ParamEnvAnd, Ty, TyCtxt}; +use rustc_middle::infer::canonical::CanonicalQueryResponse; +use rustc_middle::ty::{self, ParamEnvAnd, Ty, TyCtxt, TypeVisitableExt}; +use rustc_span::def_id::CRATE_DEF_ID; +use rustc_span::source_map::DUMMY_SP; +use smallvec::{smallvec, SmallVec}; #[derive(Copy, Clone, Debug, HashStable, TypeFoldable, TypeVisitable, Lift)] pub struct ImpliedOutlivesBounds<'tcx> { @@ -39,4 +47,169 @@ impl<'tcx> super::QueryTypeOp<'tcx> for ImpliedOutlivesBounds<'tcx> { tcx.implied_outlives_bounds(canonicalized) } + + fn perform_locally_in_new_solver( + ocx: &ObligationCtxt<'_, 'tcx>, + key: ParamEnvAnd<'tcx, Self>, + ) -> Result<Self::QueryResponse, NoSolution> { + compute_implied_outlives_bounds_inner(ocx, key.param_env, key.value.ty) + } +} + +pub fn compute_implied_outlives_bounds_inner<'tcx>( + ocx: &ObligationCtxt<'_, 'tcx>, + param_env: ty::ParamEnv<'tcx>, + ty: Ty<'tcx>, +) -> Result<Vec<OutlivesBound<'tcx>>, NoSolution> { + let tcx = ocx.infcx.tcx; + + // Sometimes when we ask what it takes for T: WF, we get back that + // U: WF is required; in that case, we push U onto this stack and + // process it next. Because the resulting predicates aren't always + // guaranteed to be a subset of the original type, so we need to store the + // WF args we've computed in a set. + let mut checked_wf_args = rustc_data_structures::fx::FxHashSet::default(); + let mut wf_args = vec![ty.into()]; + + let mut outlives_bounds: Vec<ty::OutlivesPredicate<ty::GenericArg<'tcx>, ty::Region<'tcx>>> = + vec![]; + + while let Some(arg) = wf_args.pop() { + if !checked_wf_args.insert(arg) { + continue; + } + + // Compute the obligations for `arg` to be well-formed. If `arg` is + // an unresolved inference variable, just substituted an empty set + // -- because the return type here is going to be things we *add* + // to the environment, it's always ok for this set to be smaller + // than the ultimate set. (Note: normally there won't be + // unresolved inference variables here anyway, but there might be + // during typeck under some circumstances.) + // + // FIXME(@lcnr): It's not really "always fine", having fewer implied + // bounds can be backward incompatible, e.g. #101951 was caused by + // us not dealing with inference vars in `TypeOutlives` predicates. + let obligations = wf::obligations(ocx.infcx, param_env, CRATE_DEF_ID, 0, arg, DUMMY_SP) + .unwrap_or_default(); + + for obligation in obligations { + debug!(?obligation); + assert!(!obligation.has_escaping_bound_vars()); + + // While these predicates should all be implied by other parts of + // the program, they are still relevant as they may constrain + // inference variables, which is necessary to add the correct + // implied bounds in some cases, mostly when dealing with projections. + // + // Another important point here: we only register `Projection` + // predicates, since otherwise we might register outlives + // predicates containing inference variables, and we don't + // learn anything new from those. + if obligation.predicate.has_non_region_infer() { + match obligation.predicate.kind().skip_binder() { + ty::PredicateKind::Clause(ty::Clause::Projection(..)) + | ty::PredicateKind::AliasRelate(..) => { + ocx.register_obligation(obligation.clone()); + } + _ => {} + } + } + + let pred = match obligation.predicate.kind().no_bound_vars() { + None => continue, + Some(pred) => pred, + }; + match pred { + ty::PredicateKind::Clause(ty::Clause::Trait(..)) + // FIXME(const_generics): Make sure that `<'a, 'b, const N: &'a &'b u32>` is sound + // if we ever support that + | ty::PredicateKind::Clause(ty::Clause::ConstArgHasType(..)) + | ty::PredicateKind::Subtype(..) + | ty::PredicateKind::Coerce(..) + | ty::PredicateKind::Clause(ty::Clause::Projection(..)) + | ty::PredicateKind::ClosureKind(..) + | ty::PredicateKind::ObjectSafe(..) + | ty::PredicateKind::ConstEvaluatable(..) + | ty::PredicateKind::ConstEquate(..) + | ty::PredicateKind::Ambiguous + | ty::PredicateKind::AliasRelate(..) + | ty::PredicateKind::TypeWellFormedFromEnv(..) => {} + + // We need to search through *all* WellFormed predicates + ty::PredicateKind::WellFormed(arg) => { + wf_args.push(arg); + } + + // We need to register region relationships + ty::PredicateKind::Clause(ty::Clause::RegionOutlives(ty::OutlivesPredicate( + r_a, + r_b, + ))) => outlives_bounds.push(ty::OutlivesPredicate(r_a.into(), r_b)), + + ty::PredicateKind::Clause(ty::Clause::TypeOutlives(ty::OutlivesPredicate( + ty_a, + r_b, + ))) => outlives_bounds.push(ty::OutlivesPredicate(ty_a.into(), r_b)), + } + } + } + + // This call to `select_all_or_error` is necessary to constrain inference variables, which we + // use further down when computing the implied bounds. + match ocx.select_all_or_error().as_slice() { + [] => (), + _ => return Err(NoSolution), + } + + // We lazily compute the outlives components as + // `select_all_or_error` constrains inference variables. + let implied_bounds = outlives_bounds + .into_iter() + .flat_map(|ty::OutlivesPredicate(a, r_b)| match a.unpack() { + ty::GenericArgKind::Lifetime(r_a) => vec![OutlivesBound::RegionSubRegion(r_b, r_a)], + ty::GenericArgKind::Type(ty_a) => { + let ty_a = ocx.infcx.resolve_vars_if_possible(ty_a); + let mut components = smallvec![]; + push_outlives_components(tcx, ty_a, &mut components); + implied_bounds_from_components(r_b, components) + } + ty::GenericArgKind::Const(_) => unreachable!(), + }) + .collect(); + + Ok(implied_bounds) +} + +/// When we have an implied bound that `T: 'a`, we can further break +/// this down to determine what relationships would have to hold for +/// `T: 'a` to hold. We get to assume that the caller has validated +/// those relationships. +fn implied_bounds_from_components<'tcx>( + sub_region: ty::Region<'tcx>, + sup_components: SmallVec<[Component<'tcx>; 4]>, +) -> Vec<OutlivesBound<'tcx>> { + sup_components + .into_iter() + .filter_map(|component| { + match component { + Component::Region(r) => Some(OutlivesBound::RegionSubRegion(sub_region, r)), + Component::Param(p) => Some(OutlivesBound::RegionSubParam(sub_region, p)), + Component::Alias(p) => Some(OutlivesBound::RegionSubAlias(sub_region, p)), + Component::EscapingAlias(_) => + // If the projection has escaping regions, don't + // try to infer any implied bounds even for its + // free components. This is conservative, because + // the caller will still have to prove that those + // free components outlive `sub_region`. But the + // idea is that the WAY that the caller proves + // that may change in the future and we want to + // give ourselves room to get smarter here. + { + None + } + Component::UnresolvedInferenceVariable(..) => None, + } + }) + .collect() } diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/mod.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/mod.rs index 64232659848..642fdec2d9a 100644 --- a/compiler/rustc_trait_selection/src/traits/query/type_op/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/query/type_op/mod.rs @@ -2,7 +2,7 @@ use crate::infer::canonical::{ Canonical, CanonicalQueryResponse, OriginalQueryValues, QueryRegionConstraints, }; use crate::infer::{InferCtxt, InferOk}; -use crate::traits::ObligationCause; +use crate::traits::{ObligationCause, ObligationCtxt}; use rustc_errors::ErrorGuaranteed; use rustc_infer::infer::canonical::Certainty; use rustc_infer::traits::PredicateObligations; @@ -23,6 +23,8 @@ pub mod subtype; pub use rustc_middle::traits::query::type_op::*; +use self::custom::scrape_region_constraints; + /// "Type ops" are used in NLL to perform some particular action and /// extract out the resulting region constraints (or an error if it /// cannot be completed). @@ -81,6 +83,17 @@ pub trait QueryTypeOp<'tcx>: fmt::Debug + Copy + TypeFoldable<TyCtxt<'tcx>> + 't canonicalized: Canonical<'tcx, ParamEnvAnd<'tcx, Self>>, ) -> Result<CanonicalQueryResponse<'tcx, Self::QueryResponse>, NoSolution>; + /// In the new trait solver, we already do caching in the solver itself, + /// so there's no need to canonicalize and cache via the query system. + /// Additionally, even if we were to canonicalize, we'd still need to + /// make sure to feed it predefined opaque types and the defining anchor + /// and that would require duplicating all of the tcx queries. Instead, + /// just perform these ops locally. + fn perform_locally_in_new_solver( + ocx: &ObligationCtxt<'_, 'tcx>, + key: ParamEnvAnd<'tcx, Self>, + ) -> Result<Self::QueryResponse, NoSolution>; + fn fully_perform_into( query_key: ParamEnvAnd<'tcx, Self>, infcx: &InferCtxt<'tcx>, @@ -133,6 +146,16 @@ where infcx: &InferCtxt<'tcx>, span: Span, ) -> Result<TypeOpOutput<'tcx, Self>, ErrorGuaranteed> { + if infcx.tcx.trait_solver_next() { + return Ok(scrape_region_constraints( + infcx, + |ocx| QueryTypeOp::perform_locally_in_new_solver(ocx, self), + "query type op", + span, + )? + .0); + } + let mut region_constraints = QueryRegionConstraints::default(); let (output, error_info, mut obligations, _) = Q::fully_perform_into(self, infcx, &mut region_constraints).map_err(|_| { diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/normalize.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/normalize.rs index 776c74fdfae..57ca14aa492 100644 --- a/compiler/rustc_trait_selection/src/traits/query/type_op/normalize.rs +++ b/compiler/rustc_trait_selection/src/traits/query/type_op/normalize.rs @@ -1,5 +1,7 @@ use crate::infer::canonical::{Canonical, CanonicalQueryResponse}; +use crate::traits::ObligationCtxt; use rustc_middle::traits::query::NoSolution; +use rustc_middle::traits::ObligationCause; use rustc_middle::ty::fold::TypeFoldable; use rustc_middle::ty::{self, Lift, ParamEnvAnd, Ty, TyCtxt, TypeVisitableExt}; use std::fmt; @@ -22,6 +24,14 @@ where ) -> Result<CanonicalQueryResponse<'tcx, Self::QueryResponse>, NoSolution> { T::type_op_method(tcx, canonicalized) } + + fn perform_locally_in_new_solver( + ocx: &ObligationCtxt<'_, 'tcx>, + key: ParamEnvAnd<'tcx, Self>, + ) -> Result<Self::QueryResponse, NoSolution> { + // FIXME(-Ztrait-solver=next): shouldn't be using old normalizer + Ok(ocx.normalize(&ObligationCause::dummy(), key.param_env, key.value.value)) + } } pub trait Normalizable<'tcx>: fmt::Debug + TypeFoldable<TyCtxt<'tcx>> + Lift<'tcx> + Copy { diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/outlives.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/outlives.rs index 7ce09bbdb7a..98894263374 100644 --- a/compiler/rustc_trait_selection/src/traits/query/type_op/outlives.rs +++ b/compiler/rustc_trait_selection/src/traits/query/type_op/outlives.rs @@ -1,6 +1,9 @@ use crate::infer::canonical::{Canonical, CanonicalQueryResponse}; -use crate::traits::query::dropck_outlives::{trivial_dropck_outlives, DropckOutlivesResult}; -use rustc_middle::traits::query::NoSolution; +use crate::traits::query::dropck_outlives::{ + compute_dropck_outlives_inner, trivial_dropck_outlives, +}; +use crate::traits::ObligationCtxt; +use rustc_middle::traits::query::{DropckOutlivesResult, NoSolution}; use rustc_middle::ty::{ParamEnvAnd, Ty, TyCtxt}; #[derive(Copy, Clone, Debug, HashStable, TypeFoldable, TypeVisitable, Lift)] @@ -48,4 +51,11 @@ impl<'tcx> super::QueryTypeOp<'tcx> for DropckOutlives<'tcx> { tcx.dropck_outlives(canonicalized) } + + fn perform_locally_in_new_solver( + ocx: &ObligationCtxt<'_, 'tcx>, + key: ParamEnvAnd<'tcx, Self>, + ) -> Result<Self::QueryResponse, NoSolution> { + compute_dropck_outlives_inner(ocx, key.param_env.and(key.value.dropped_ty)) + } } diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/prove_predicate.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/prove_predicate.rs index 7c02f363960..47850bc330d 100644 --- a/compiler/rustc_trait_selection/src/traits/query/type_op/prove_predicate.rs +++ b/compiler/rustc_trait_selection/src/traits/query/type_op/prove_predicate.rs @@ -1,5 +1,8 @@ use crate::infer::canonical::{Canonical, CanonicalQueryResponse}; +use crate::traits::ObligationCtxt; +use rustc_infer::traits::Obligation; use rustc_middle::traits::query::NoSolution; +use rustc_middle::traits::ObligationCause; use rustc_middle::ty::{self, ParamEnvAnd, TyCtxt}; pub use rustc_middle::traits::query::type_op::ProvePredicate; @@ -36,4 +39,17 @@ impl<'tcx> super::QueryTypeOp<'tcx> for ProvePredicate<'tcx> { ) -> Result<CanonicalQueryResponse<'tcx, ()>, NoSolution> { tcx.type_op_prove_predicate(canonicalized) } + + fn perform_locally_in_new_solver( + ocx: &ObligationCtxt<'_, 'tcx>, + key: ParamEnvAnd<'tcx, Self>, + ) -> Result<Self::QueryResponse, NoSolution> { + ocx.register_obligation(Obligation::new( + ocx.infcx.tcx, + ObligationCause::dummy(), + key.param_env, + key.value.predicate, + )); + Ok(()) + } } diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/subtype.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/subtype.rs index 2f2b931afcf..10976d5cd71 100644 --- a/compiler/rustc_trait_selection/src/traits/query/type_op/subtype.rs +++ b/compiler/rustc_trait_selection/src/traits/query/type_op/subtype.rs @@ -1,5 +1,7 @@ use crate::infer::canonical::{Canonical, CanonicalQueryResponse}; +use crate::traits::ObligationCtxt; use rustc_middle::traits::query::NoSolution; +use rustc_middle::traits::ObligationCause; use rustc_middle::ty::{ParamEnvAnd, TyCtxt}; pub use rustc_middle::traits::query::type_op::Subtype; @@ -17,4 +19,12 @@ impl<'tcx> super::QueryTypeOp<'tcx> for Subtype<'tcx> { ) -> Result<CanonicalQueryResponse<'tcx, ()>, NoSolution> { tcx.type_op_subtype(canonicalized) } + + fn perform_locally_in_new_solver( + ocx: &ObligationCtxt<'_, 'tcx>, + key: ParamEnvAnd<'tcx, Self>, + ) -> Result<Self::QueryResponse, NoSolution> { + ocx.sub(&ObligationCause::dummy(), key.param_env, key.value.sub, key.value.sup)?; + Ok(()) + } } diff --git a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs index 8bc82b9f549..3c223db5a0b 100644 --- a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs +++ b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs @@ -360,7 +360,9 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { // consider a "quick reject". This avoids creating more types // and so forth that we need to. let impl_trait_ref = self.tcx().impl_trait_ref(impl_def_id).unwrap(); - if !drcx.substs_refs_may_unify(obligation_substs, impl_trait_ref.0.substs) { + if !drcx + .substs_refs_may_unify(obligation_substs, impl_trait_ref.skip_binder().substs) + { return; } if self.reject_fn_ptr_impls( diff --git a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs index 0d9f55d4c2e..4e961c3ee73 100644 --- a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs +++ b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs @@ -527,9 +527,9 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { substs.extend(trait_predicate.trait_ref.substs.iter()); let mut bound_vars: smallvec::SmallVec<[ty::BoundVariableKind; 8]> = smallvec::SmallVec::with_capacity( - bound.0.kind().bound_vars().len() + defs.count(), + bound.skip_binder().kind().bound_vars().len() + defs.count(), ); - bound_vars.extend(bound.0.kind().bound_vars().into_iter()); + bound_vars.extend(bound.skip_binder().kind().bound_vars().into_iter()); InternalSubsts::fill_single(&mut substs, defs, &mut |param, _| match param .kind { @@ -550,7 +550,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { let kind = ty::BoundRegionKind::BrNamed(param.def_id, param.name); let bound_var = ty::BoundVariableKind::Region(kind); bound_vars.push(bound_var); - tcx.mk_re_late_bound( + ty::Region::new_late_bound( + tcx, ty::INNERMOST, ty::BoundRegion { var: ty::BoundVar::from_usize(bound_vars.len() - 1), diff --git a/compiler/rustc_trait_selection/src/traits/select/mod.rs b/compiler/rustc_trait_selection/src/traits/select/mod.rs index 3baf1c97c9f..42c1b629ac2 100644 --- a/compiler/rustc_trait_selection/src/traits/select/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/select/mod.rs @@ -561,9 +561,10 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { op: impl FnOnce(&mut Self) -> Result<EvaluationResult, OverflowError>, ) -> Result<EvaluationResult, OverflowError> { self.infcx.probe(|snapshot| -> Result<EvaluationResult, OverflowError> { + let outer_universe = self.infcx.universe(); let result = op(self)?; - match self.infcx.leak_check(true, snapshot) { + match self.infcx.leak_check(outer_universe, Some(snapshot)) { Ok(()) => {} Err(_) => return Ok(EvaluatedToErr), } @@ -572,9 +573,10 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { return Ok(result.max(EvaluatedToOkModuloOpaqueTypes)); } - match self.infcx.region_constraints_added_in_snapshot(snapshot) { - None => Ok(result), - Some(_) => Ok(result.max(EvaluatedToOkModuloRegions)), + if self.infcx.region_constraints_added_in_snapshot(snapshot) { + Ok(result.max(EvaluatedToOkModuloRegions)) + } else { + Ok(result) } }) } @@ -2149,13 +2151,11 @@ impl<'tcx> SelectionContext<'_, 'tcx> { ty::Adt(def, substs) => { let sized_crit = def.sized_constraint(self.tcx()); // (*) binder moved here - Where(obligation.predicate.rebind({ - sized_crit - .0 - .iter() - .map(|ty| sized_crit.rebind(*ty).subst(self.tcx(), substs)) - .collect() - })) + Where( + obligation + .predicate + .rebind(sized_crit.subst_iter_copied(self.tcx(), substs).collect()), + ) } ty::Alias(..) | ty::Param(_) | ty::Placeholder(..) => None, @@ -3029,7 +3029,7 @@ fn bind_generator_hidden_types_above<'tcx>( kind: ty::BrAnon(None), }; counter += 1; - tcx.mk_re_late_bound(current_depth, br) + ty::Region::new_late_bound(tcx, current_depth, br) } r => bug!("unexpected region: {r:?}"), }) diff --git a/compiler/rustc_trait_selection/src/traits/util.rs b/compiler/rustc_trait_selection/src/traits/util.rs index 82f3df40198..e2c9c62512e 100644 --- a/compiler/rustc_trait_selection/src/traits/util.rs +++ b/compiler/rustc_trait_selection/src/traits/util.rs @@ -41,7 +41,12 @@ impl<'tcx> TraitAliasExpansionInfo<'tcx> { /// Adds diagnostic labels to `diag` for the expansion path of a trait through all intermediate /// trait aliases. - pub fn label_with_exp_info(&self, diag: &mut Diagnostic, top_label: &str, use_desc: &str) { + pub fn label_with_exp_info( + &self, + diag: &mut Diagnostic, + top_label: &'static str, + use_desc: &str, + ) { diag.span_label(self.top().1, top_label); if self.path.len() > 1 { for (_, sp) in self.path.iter().rev().skip(1).take(self.path.len() - 2) { diff --git a/compiler/rustc_traits/src/chalk/db.rs b/compiler/rustc_traits/src/chalk/db.rs index c319b2e31c7..38f94c38861 100644 --- a/compiler/rustc_traits/src/chalk/db.rs +++ b/compiler/rustc_traits/src/chalk/db.rs @@ -294,7 +294,7 @@ impl<'tcx> chalk_solve::RustIrDatabase<RustInterner<'tcx>> for RustIrDatabase<'t }; Arc::new(chalk_solve::rust_ir::FnDefDatum { id: fn_def_id, - sig: sig.0.lower_into(self.interner), + sig: sig.skip_binder().lower_into(self.interner), binders: chalk_ir::Binders::new(binders, bound), }) } @@ -727,7 +727,7 @@ fn bound_vars_for_item(tcx: TyCtxt<'_>, def_id: DefId) -> SubstsRef<'_> { var: ty::BoundVar::from_usize(substs.len()), kind: ty::BrAnon(None), }; - tcx.mk_re_late_bound(ty::INNERMOST, br).into() + ty::Region::new_late_bound(tcx, ty::INNERMOST, br).into() } ty::GenericParamDefKind::Const { .. } => tcx diff --git a/compiler/rustc_traits/src/chalk/lowering.rs b/compiler/rustc_traits/src/chalk/lowering.rs index e447ab94f64..e6c6e0f13df 100644 --- a/compiler/rustc_traits/src/chalk/lowering.rs +++ b/compiler/rustc_traits/src/chalk/lowering.rs @@ -542,7 +542,8 @@ impl<'tcx> LowerInto<'tcx, Region<'tcx>> for &chalk_ir::Lifetime<RustInterner<'t fn lower_into(self, interner: RustInterner<'tcx>) -> Region<'tcx> { let tcx = interner.tcx; match self.data(interner) { - chalk_ir::LifetimeData::BoundVar(var) => tcx.mk_re_late_bound( + chalk_ir::LifetimeData::BoundVar(var) => ty::Region::new_late_bound( + tcx, ty::DebruijnIndex::from_u32(var.debruijn.depth()), ty::BoundRegion { var: ty::BoundVar::from_usize(var.index), @@ -550,13 +551,16 @@ impl<'tcx> LowerInto<'tcx, Region<'tcx>> for &chalk_ir::Lifetime<RustInterner<'t }, ), chalk_ir::LifetimeData::InferenceVar(_var) => unimplemented!(), - chalk_ir::LifetimeData::Placeholder(p) => tcx.mk_re_placeholder(ty::Placeholder { - universe: ty::UniverseIndex::from_usize(p.ui.counter), - bound: ty::BoundRegion { - var: ty::BoundVar::from_usize(p.idx), - kind: ty::BoundRegionKind::BrAnon(None), + chalk_ir::LifetimeData::Placeholder(p) => ty::Region::new_placeholder( + tcx, + ty::Placeholder { + universe: ty::UniverseIndex::from_usize(p.ui.counter), + bound: ty::BoundRegion { + var: ty::BoundVar::from_usize(p.idx), + kind: ty::BoundRegionKind::BrAnon(None), + }, }, - }), + ), chalk_ir::LifetimeData::Static => tcx.lifetimes.re_static, chalk_ir::LifetimeData::Erased => tcx.lifetimes.re_erased, chalk_ir::LifetimeData::Phantom(void, _) => match *void {}, @@ -1051,7 +1055,7 @@ impl<'a, 'tcx> TypeFolder<TyCtxt<'tcx>> for NamedBoundVarSubstitutor<'a, 'tcx> { ty::BrNamed(def_id, _name) => match self.named_parameters.get(&def_id) { Some(_) => { let new_br = ty::BoundRegion { var: br.var, kind: ty::BrAnon(None) }; - return self.tcx.mk_re_late_bound(index, new_br); + return ty::Region::new_late_bound(self.tcx, index, new_br); } None => panic!("Missing `BrNamed`."), }, @@ -1142,7 +1146,7 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for ParamsSubstitutor<'tcx> { var: ty::BoundVar::from_u32(*idx), kind: ty::BrAnon(None), }; - self.tcx.mk_re_late_bound(self.binder_index, br) + ty::Region::new_late_bound(self.tcx, self.binder_index, br) } None => { let idx = self.named_regions.len() as u32; @@ -1151,7 +1155,7 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for ParamsSubstitutor<'tcx> { kind: ty::BrAnon(None), }; self.named_regions.insert(_re.def_id, idx); - self.tcx.mk_re_late_bound(self.binder_index, br) + ty::Region::new_late_bound(self.tcx, self.binder_index, br) } }, diff --git a/compiler/rustc_traits/src/dropck_outlives.rs b/compiler/rustc_traits/src/dropck_outlives.rs index 83f6c7d07fe..f35c14eeac8 100644 --- a/compiler/rustc_traits/src/dropck_outlives.rs +++ b/compiler/rustc_traits/src/dropck_outlives.rs @@ -3,17 +3,14 @@ use rustc_hir::def_id::DefId; use rustc_infer::infer::canonical::{Canonical, QueryResponse}; use rustc_infer::infer::TyCtxtInferExt; use rustc_middle::query::Providers; +use rustc_middle::traits::query::{DropckConstraint, DropckOutlivesResult}; use rustc_middle::ty::InternalSubsts; -use rustc_middle::ty::{self, EarlyBinder, ParamEnvAnd, Ty, TyCtxt}; -use rustc_span::source_map::{Span, DUMMY_SP}; +use rustc_middle::ty::TyCtxt; use rustc_trait_selection::infer::InferCtxtBuilderExt; -use rustc_trait_selection::traits::query::dropck_outlives::trivial_dropck_outlives; use rustc_trait_selection::traits::query::dropck_outlives::{ - DropckConstraint, DropckOutlivesResult, + compute_dropck_outlives_inner, dtorck_constraint_for_ty_inner, }; -use rustc_trait_selection::traits::query::normalize::QueryNormalizeExt; use rustc_trait_selection::traits::query::{CanonicalTyGoal, NoSolution}; -use rustc_trait_selection::traits::{Normalized, ObligationCause}; pub(crate) fn provide(p: &mut Providers) { *p = Providers { dropck_outlives, adt_dtorck_constraint, ..*p }; @@ -26,263 +23,10 @@ fn dropck_outlives<'tcx>( debug!("dropck_outlives(goal={:#?})", canonical_goal); tcx.infer_ctxt().enter_canonical_trait_query(&canonical_goal, |ocx, goal| { - let tcx = ocx.infcx.tcx; - let ParamEnvAnd { param_env, value: for_ty } = goal; - - let mut result = DropckOutlivesResult { kinds: vec![], overflows: vec![] }; - - // A stack of types left to process. Each round, we pop - // something from the stack and invoke - // `dtorck_constraint_for_ty`. This may produce new types that - // have to be pushed on the stack. This continues until we have explored - // all the reachable types from the type `for_ty`. - // - // Example: Imagine that we have the following code: - // - // ```rust - // struct A { - // value: B, - // children: Vec<A>, - // } - // - // struct B { - // value: u32 - // } - // - // fn f() { - // let a: A = ...; - // .. - // } // here, `a` is dropped - // ``` - // - // at the point where `a` is dropped, we need to figure out - // which types inside of `a` contain region data that may be - // accessed by any destructors in `a`. We begin by pushing `A` - // onto the stack, as that is the type of `a`. We will then - // invoke `dtorck_constraint_for_ty` which will expand `A` - // into the types of its fields `(B, Vec<A>)`. These will get - // pushed onto the stack. Eventually, expanding `Vec<A>` will - // lead to us trying to push `A` a second time -- to prevent - // infinite recursion, we notice that `A` was already pushed - // once and stop. - let mut ty_stack = vec![(for_ty, 0)]; - - // Set used to detect infinite recursion. - let mut ty_set = FxHashSet::default(); - - let cause = ObligationCause::dummy(); - let mut constraints = DropckConstraint::empty(); - while let Some((ty, depth)) = ty_stack.pop() { - debug!( - "{} kinds, {} overflows, {} ty_stack", - result.kinds.len(), - result.overflows.len(), - ty_stack.len() - ); - dtorck_constraint_for_ty(tcx, DUMMY_SP, for_ty, depth, ty, &mut constraints)?; - - // "outlives" represent types/regions that may be touched - // by a destructor. - result.kinds.append(&mut constraints.outlives); - result.overflows.append(&mut constraints.overflows); - - // If we have even one overflow, we should stop trying to evaluate further -- - // chances are, the subsequent overflows for this evaluation won't provide useful - // information and will just decrease the speed at which we can emit these errors - // (since we'll be printing for just that much longer for the often enormous types - // that result here). - if !result.overflows.is_empty() { - break; - } - - // dtorck types are "types that will get dropped but which - // do not themselves define a destructor", more or less. We have - // to push them onto the stack to be expanded. - for ty in constraints.dtorck_types.drain(..) { - let Normalized { value: ty, obligations } = - ocx.infcx.at(&cause, param_env).query_normalize(ty)?; - ocx.register_obligations(obligations); - - debug!("dropck_outlives: ty from dtorck_types = {:?}", ty); - - match ty.kind() { - // All parameters live for the duration of the - // function. - ty::Param(..) => {} - - // A projection that we couldn't resolve - it - // might have a destructor. - ty::Alias(..) => { - result.kinds.push(ty.into()); - } - - _ => { - if ty_set.insert(ty) { - ty_stack.push((ty, depth + 1)); - } - } - } - } - } - - debug!("dropck_outlives: result = {:#?}", result); - Ok(result) + compute_dropck_outlives_inner(ocx, goal) }) } -/// Returns a set of constraints that needs to be satisfied in -/// order for `ty` to be valid for destruction. -fn dtorck_constraint_for_ty<'tcx>( - tcx: TyCtxt<'tcx>, - span: Span, - for_ty: Ty<'tcx>, - depth: usize, - ty: Ty<'tcx>, - constraints: &mut DropckConstraint<'tcx>, -) -> Result<(), NoSolution> { - debug!("dtorck_constraint_for_ty({:?}, {:?}, {:?}, {:?})", span, for_ty, depth, ty); - - if !tcx.recursion_limit().value_within_limit(depth) { - constraints.overflows.push(ty); - return Ok(()); - } - - if trivial_dropck_outlives(tcx, ty) { - return Ok(()); - } - - match ty.kind() { - ty::Bool - | ty::Char - | ty::Int(_) - | ty::Uint(_) - | ty::Float(_) - | ty::Str - | ty::Never - | ty::Foreign(..) - | ty::RawPtr(..) - | ty::Ref(..) - | ty::FnDef(..) - | ty::FnPtr(_) - | ty::GeneratorWitness(..) - | ty::GeneratorWitnessMIR(..) => { - // these types never have a destructor - } - - ty::Array(ety, _) | ty::Slice(ety) => { - // single-element containers, behave like their element - rustc_data_structures::stack::ensure_sufficient_stack(|| { - dtorck_constraint_for_ty(tcx, span, for_ty, depth + 1, *ety, constraints) - })?; - } - - ty::Tuple(tys) => rustc_data_structures::stack::ensure_sufficient_stack(|| { - for ty in tys.iter() { - dtorck_constraint_for_ty(tcx, span, for_ty, depth + 1, ty, constraints)?; - } - Ok::<_, NoSolution>(()) - })?, - - ty::Closure(_, substs) => { - if !substs.as_closure().is_valid() { - // By the time this code runs, all type variables ought to - // be fully resolved. - - tcx.sess.delay_span_bug( - span, - format!("upvar_tys for closure not found. Expected capture information for closure {ty}",), - ); - return Err(NoSolution); - } - - rustc_data_structures::stack::ensure_sufficient_stack(|| { - for ty in substs.as_closure().upvar_tys() { - dtorck_constraint_for_ty(tcx, span, for_ty, depth + 1, ty, constraints)?; - } - Ok::<_, NoSolution>(()) - })? - } - - ty::Generator(_, substs, _movability) => { - // rust-lang/rust#49918: types can be constructed, stored - // in the interior, and sit idle when generator yields - // (and is subsequently dropped). - // - // It would be nice to descend into interior of a - // generator to determine what effects dropping it might - // have (by looking at any drop effects associated with - // its interior). - // - // However, the interior's representation uses things like - // GeneratorWitness that explicitly assume they are not - // traversed in such a manner. So instead, we will - // simplify things for now by treating all generators as - // if they were like trait objects, where its upvars must - // all be alive for the generator's (potential) - // destructor. - // - // In particular, skipping over `_interior` is safe - // because any side-effects from dropping `_interior` can - // only take place through references with lifetimes - // derived from lifetimes attached to the upvars and resume - // argument, and we *do* incorporate those here. - - if !substs.as_generator().is_valid() { - // By the time this code runs, all type variables ought to - // be fully resolved. - tcx.sess.delay_span_bug( - span, - format!("upvar_tys for generator not found. Expected capture information for generator {ty}",), - ); - return Err(NoSolution); - } - - constraints.outlives.extend( - substs - .as_generator() - .upvar_tys() - .map(|t| -> ty::subst::GenericArg<'tcx> { t.into() }), - ); - constraints.outlives.push(substs.as_generator().resume_ty().into()); - } - - ty::Adt(def, substs) => { - let DropckConstraint { dtorck_types, outlives, overflows } = - tcx.at(span).adt_dtorck_constraint(def.did())?; - // FIXME: we can try to recursively `dtorck_constraint_on_ty` - // there, but that needs some way to handle cycles. - constraints - .dtorck_types - .extend(dtorck_types.iter().map(|t| EarlyBinder(*t).subst(tcx, substs))); - constraints - .outlives - .extend(outlives.iter().map(|t| EarlyBinder(*t).subst(tcx, substs))); - constraints - .overflows - .extend(overflows.iter().map(|t| EarlyBinder(*t).subst(tcx, substs))); - } - - // Objects must be alive in order for their destructor - // to be called. - ty::Dynamic(..) => { - constraints.outlives.push(ty.into()); - } - - // Types that can't be resolved. Pass them forward. - ty::Alias(..) | ty::Param(..) => { - constraints.dtorck_types.push(ty); - } - - ty::Placeholder(..) | ty::Bound(..) | ty::Infer(..) | ty::Error(_) => { - // By the time this code runs, all type variables ought to - // be fully resolved. - return Err(NoSolution); - } - } - - Ok(()) -} - /// Calculates the dtorck constraint for a type. pub(crate) fn adt_dtorck_constraint( tcx: TyCtxt<'_>, @@ -311,7 +55,7 @@ pub(crate) fn adt_dtorck_constraint( let mut result = DropckConstraint::empty(); for field in def.all_fields() { let fty = tcx.type_of(field.did).subst_identity(); - dtorck_constraint_for_ty(tcx, span, fty, 0, fty, &mut result)?; + dtorck_constraint_for_ty_inner(tcx, span, fty, 0, fty, &mut result)?; } result.outlives.extend(tcx.destructor_constraints(def)); dedup_dtorck_constraint(&mut result); diff --git a/compiler/rustc_traits/src/implied_outlives_bounds.rs b/compiler/rustc_traits/src/implied_outlives_bounds.rs index 49cbf9efa74..959838ab348 100644 --- a/compiler/rustc_traits/src/implied_outlives_bounds.rs +++ b/compiler/rustc_traits/src/implied_outlives_bounds.rs @@ -3,18 +3,13 @@ //! [`rustc_trait_selection::traits::query::type_op::implied_outlives_bounds`]. use rustc_infer::infer::canonical::{self, Canonical}; -use rustc_infer::infer::outlives::components::{push_outlives_components, Component}; use rustc_infer::infer::TyCtxtInferExt; use rustc_infer::traits::query::OutlivesBound; use rustc_middle::query::Providers; -use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitableExt}; -use rustc_span::def_id::CRATE_DEF_ID; -use rustc_span::source_map::DUMMY_SP; +use rustc_middle::ty::TyCtxt; use rustc_trait_selection::infer::InferCtxtBuilderExt; +use rustc_trait_selection::traits::query::type_op::implied_outlives_bounds::compute_implied_outlives_bounds_inner; use rustc_trait_selection::traits::query::{CanonicalTyGoal, NoSolution}; -use rustc_trait_selection::traits::wf; -use rustc_trait_selection::traits::ObligationCtxt; -use smallvec::{smallvec, SmallVec}; pub(crate) fn provide(p: &mut Providers) { *p = Providers { implied_outlives_bounds, ..*p }; @@ -29,164 +24,6 @@ fn implied_outlives_bounds<'tcx>( > { tcx.infer_ctxt().enter_canonical_trait_query(&goal, |ocx, key| { let (param_env, ty) = key.into_parts(); - compute_implied_outlives_bounds(ocx, param_env, ty) + compute_implied_outlives_bounds_inner(ocx, param_env, ty) }) } - -fn compute_implied_outlives_bounds<'tcx>( - ocx: &ObligationCtxt<'_, 'tcx>, - param_env: ty::ParamEnv<'tcx>, - ty: Ty<'tcx>, -) -> Result<Vec<OutlivesBound<'tcx>>, NoSolution> { - let tcx = ocx.infcx.tcx; - - // Sometimes when we ask what it takes for T: WF, we get back that - // U: WF is required; in that case, we push U onto this stack and - // process it next. Because the resulting predicates aren't always - // guaranteed to be a subset of the original type, so we need to store the - // WF args we've computed in a set. - let mut checked_wf_args = rustc_data_structures::fx::FxHashSet::default(); - let mut wf_args = vec![ty.into()]; - - let mut outlives_bounds: Vec<ty::OutlivesPredicate<ty::GenericArg<'tcx>, ty::Region<'tcx>>> = - vec![]; - - while let Some(arg) = wf_args.pop() { - if !checked_wf_args.insert(arg) { - continue; - } - - // Compute the obligations for `arg` to be well-formed. If `arg` is - // an unresolved inference variable, just substituted an empty set - // -- because the return type here is going to be things we *add* - // to the environment, it's always ok for this set to be smaller - // than the ultimate set. (Note: normally there won't be - // unresolved inference variables here anyway, but there might be - // during typeck under some circumstances.) - // - // FIXME(@lcnr): It's not really "always fine", having fewer implied - // bounds can be backward incompatible, e.g. #101951 was caused by - // us not dealing with inference vars in `TypeOutlives` predicates. - let obligations = wf::obligations(ocx.infcx, param_env, CRATE_DEF_ID, 0, arg, DUMMY_SP) - .unwrap_or_default(); - - for obligation in obligations { - debug!(?obligation); - assert!(!obligation.has_escaping_bound_vars()); - - // While these predicates should all be implied by other parts of - // the program, they are still relevant as they may constrain - // inference variables, which is necessary to add the correct - // implied bounds in some cases, mostly when dealing with projections. - // - // Another important point here: we only register `Projection` - // predicates, since otherwise we might register outlives - // predicates containing inference variables, and we don't - // learn anything new from those. - if obligation.predicate.has_non_region_infer() { - match obligation.predicate.kind().skip_binder() { - ty::PredicateKind::Clause(ty::Clause::Projection(..)) - | ty::PredicateKind::AliasRelate(..) => { - ocx.register_obligation(obligation.clone()); - } - _ => {} - } - } - - let pred = match obligation.predicate.kind().no_bound_vars() { - None => continue, - Some(pred) => pred, - }; - match pred { - ty::PredicateKind::Clause(ty::Clause::Trait(..)) - // FIXME(const_generics): Make sure that `<'a, 'b, const N: &'a &'b u32>` is sound - // if we ever support that - | ty::PredicateKind::Clause(ty::Clause::ConstArgHasType(..)) - | ty::PredicateKind::Subtype(..) - | ty::PredicateKind::Coerce(..) - | ty::PredicateKind::Clause(ty::Clause::Projection(..)) - | ty::PredicateKind::ClosureKind(..) - | ty::PredicateKind::ObjectSafe(..) - | ty::PredicateKind::ConstEvaluatable(..) - | ty::PredicateKind::ConstEquate(..) - | ty::PredicateKind::Ambiguous - | ty::PredicateKind::AliasRelate(..) - | ty::PredicateKind::TypeWellFormedFromEnv(..) => {} - - // We need to search through *all* WellFormed predicates - ty::PredicateKind::WellFormed(arg) => { - wf_args.push(arg); - } - - // We need to register region relationships - ty::PredicateKind::Clause(ty::Clause::RegionOutlives(ty::OutlivesPredicate( - r_a, - r_b, - ))) => outlives_bounds.push(ty::OutlivesPredicate(r_a.into(), r_b)), - - ty::PredicateKind::Clause(ty::Clause::TypeOutlives(ty::OutlivesPredicate( - ty_a, - r_b, - ))) => outlives_bounds.push(ty::OutlivesPredicate(ty_a.into(), r_b)), - } - } - } - - // This call to `select_all_or_error` is necessary to constrain inference variables, which we - // use further down when computing the implied bounds. - match ocx.select_all_or_error().as_slice() { - [] => (), - _ => return Err(NoSolution), - } - - // We lazily compute the outlives components as - // `select_all_or_error` constrains inference variables. - let implied_bounds = outlives_bounds - .into_iter() - .flat_map(|ty::OutlivesPredicate(a, r_b)| match a.unpack() { - ty::GenericArgKind::Lifetime(r_a) => vec![OutlivesBound::RegionSubRegion(r_b, r_a)], - ty::GenericArgKind::Type(ty_a) => { - let ty_a = ocx.infcx.resolve_vars_if_possible(ty_a); - let mut components = smallvec![]; - push_outlives_components(tcx, ty_a, &mut components); - implied_bounds_from_components(r_b, components) - } - ty::GenericArgKind::Const(_) => unreachable!(), - }) - .collect(); - - Ok(implied_bounds) -} - -/// When we have an implied bound that `T: 'a`, we can further break -/// this down to determine what relationships would have to hold for -/// `T: 'a` to hold. We get to assume that the caller has validated -/// those relationships. -fn implied_bounds_from_components<'tcx>( - sub_region: ty::Region<'tcx>, - sup_components: SmallVec<[Component<'tcx>; 4]>, -) -> Vec<OutlivesBound<'tcx>> { - sup_components - .into_iter() - .filter_map(|component| { - match component { - Component::Region(r) => Some(OutlivesBound::RegionSubRegion(sub_region, r)), - Component::Param(p) => Some(OutlivesBound::RegionSubParam(sub_region, p)), - Component::Alias(p) => Some(OutlivesBound::RegionSubAlias(sub_region, p)), - Component::EscapingAlias(_) => - // If the projection has escaping regions, don't - // try to infer any implied bounds even for its - // free components. This is conservative, because - // the caller will still have to prove that those - // free components outlive `sub_region`. But the - // idea is that the WAY that the caller proves - // that may change in the future and we want to - // give ourselves room to get smarter here. - { - None - } - Component::UnresolvedInferenceVariable(..) => None, - } - }) - .collect() -} diff --git a/compiler/rustc_traits/src/lib.rs b/compiler/rustc_traits/src/lib.rs index b0f9c57154f..907e2d39c51 100644 --- a/compiler/rustc_traits/src/lib.rs +++ b/compiler/rustc_traits/src/lib.rs @@ -21,7 +21,8 @@ mod normalize_erasing_regions; mod normalize_projection_ty; mod type_op; -pub use type_op::{type_op_ascribe_user_type_with_span, type_op_prove_predicate_with_cause}; +pub use rustc_trait_selection::traits::query::type_op::ascribe_user_type::type_op_ascribe_user_type_with_span; +pub use type_op::type_op_prove_predicate_with_cause; use rustc_middle::query::Providers; diff --git a/compiler/rustc_traits/src/type_op.rs b/compiler/rustc_traits/src/type_op.rs index faf985169de..9904acb1c0d 100644 --- a/compiler/rustc_traits/src/type_op.rs +++ b/compiler/rustc_traits/src/type_op.rs @@ -1,17 +1,15 @@ -use rustc_hir as hir; use rustc_infer::infer::canonical::{Canonical, QueryResponse}; use rustc_infer::infer::TyCtxtInferExt; use rustc_middle::query::Providers; use rustc_middle::traits::query::NoSolution; -use rustc_middle::traits::{DefiningAnchor, ObligationCauseCode}; -use rustc_middle::ty::{self, FnSig, Lift, PolyFnSig, Ty, TyCtxt, TypeFoldable}; +use rustc_middle::traits::DefiningAnchor; +use rustc_middle::ty::{FnSig, Lift, PolyFnSig, Ty, TyCtxt, TypeFoldable}; use rustc_middle::ty::{ParamEnvAnd, Predicate}; -use rustc_middle::ty::{UserSelfTy, UserSubsts, UserType}; -use rustc_span::def_id::CRATE_DEF_ID; -use rustc_span::{Span, DUMMY_SP}; use rustc_trait_selection::infer::InferCtxtBuilderExt; use rustc_trait_selection::traits::query::normalize::QueryNormalizeExt; -use rustc_trait_selection::traits::query::type_op::ascribe_user_type::AscribeUserType; +use rustc_trait_selection::traits::query::type_op::ascribe_user_type::{ + type_op_ascribe_user_type_with_span, AscribeUserType, +}; use rustc_trait_selection::traits::query::type_op::eq::Eq; use rustc_trait_selection::traits::query::type_op::normalize::Normalize; use rustc_trait_selection::traits::query::type_op::prove_predicate::ProvePredicate; @@ -42,111 +40,6 @@ fn type_op_ascribe_user_type<'tcx>( }) } -/// The core of the `type_op_ascribe_user_type` query: for diagnostics purposes in NLL HRTB errors, -/// this query can be re-run to better track the span of the obligation cause, and improve the error -/// message. Do not call directly unless you're in that very specific context. -pub fn type_op_ascribe_user_type_with_span<'tcx>( - ocx: &ObligationCtxt<'_, 'tcx>, - key: ParamEnvAnd<'tcx, AscribeUserType<'tcx>>, - span: Option<Span>, -) -> Result<(), NoSolution> { - let (param_env, AscribeUserType { mir_ty, user_ty }) = key.into_parts(); - debug!("type_op_ascribe_user_type: mir_ty={:?} user_ty={:?}", mir_ty, user_ty); - let span = span.unwrap_or(DUMMY_SP); - match user_ty { - UserType::Ty(user_ty) => relate_mir_and_user_ty(ocx, param_env, span, mir_ty, user_ty)?, - UserType::TypeOf(def_id, user_substs) => { - relate_mir_and_user_substs(ocx, param_env, span, mir_ty, def_id, user_substs)? - } - }; - Ok(()) -} - -#[instrument(level = "debug", skip(ocx, param_env, span))] -fn relate_mir_and_user_ty<'tcx>( - ocx: &ObligationCtxt<'_, 'tcx>, - param_env: ty::ParamEnv<'tcx>, - span: Span, - mir_ty: Ty<'tcx>, - user_ty: Ty<'tcx>, -) -> Result<(), NoSolution> { - let cause = ObligationCause::dummy_with_span(span); - let user_ty = ocx.normalize(&cause, param_env, user_ty); - ocx.eq(&cause, param_env, mir_ty, user_ty)?; - - // FIXME(#104764): We should check well-formedness before normalization. - let predicate = ty::Binder::dummy(ty::PredicateKind::WellFormed(user_ty.into())); - ocx.register_obligation(Obligation::new(ocx.infcx.tcx, cause, param_env, predicate)); - Ok(()) -} - -#[instrument(level = "debug", skip(ocx, param_env, span))] -fn relate_mir_and_user_substs<'tcx>( - ocx: &ObligationCtxt<'_, 'tcx>, - param_env: ty::ParamEnv<'tcx>, - span: Span, - mir_ty: Ty<'tcx>, - def_id: hir::def_id::DefId, - user_substs: UserSubsts<'tcx>, -) -> Result<(), NoSolution> { - let param_env = param_env.without_const(); - let UserSubsts { user_self_ty, substs } = user_substs; - let tcx = ocx.infcx.tcx; - let cause = ObligationCause::dummy_with_span(span); - - let ty = tcx.type_of(def_id).subst(tcx, substs); - let ty = ocx.normalize(&cause, param_env, ty); - debug!("relate_type_and_user_type: ty of def-id is {:?}", ty); - - ocx.eq(&cause, param_env, mir_ty, ty)?; - - // Prove the predicates coming along with `def_id`. - // - // Also, normalize the `instantiated_predicates` - // because otherwise we wind up with duplicate "type - // outlives" error messages. - let instantiated_predicates = tcx.predicates_of(def_id).instantiate(tcx, substs); - - debug!(?instantiated_predicates); - for (instantiated_predicate, predicate_span) in instantiated_predicates { - let span = if span == DUMMY_SP { predicate_span } else { span }; - let cause = ObligationCause::new( - span, - CRATE_DEF_ID, - ObligationCauseCode::AscribeUserTypeProvePredicate(predicate_span), - ); - let instantiated_predicate = - ocx.normalize(&cause.clone(), param_env, instantiated_predicate); - - ocx.register_obligation(Obligation::new(tcx, cause, param_env, instantiated_predicate)); - } - - if let Some(UserSelfTy { impl_def_id, self_ty }) = user_self_ty { - let self_ty = ocx.normalize(&cause, param_env, self_ty); - let impl_self_ty = tcx.type_of(impl_def_id).subst(tcx, substs); - let impl_self_ty = ocx.normalize(&cause, param_env, impl_self_ty); - - ocx.eq(&cause, param_env, self_ty, impl_self_ty)?; - let predicate = ty::Binder::dummy(ty::PredicateKind::WellFormed(impl_self_ty.into())); - ocx.register_obligation(Obligation::new(tcx, cause.clone(), param_env, predicate)); - } - - // In addition to proving the predicates, we have to - // prove that `ty` is well-formed -- this is because - // the WF of `ty` is predicated on the substs being - // well-formed, and we haven't proven *that*. We don't - // want to prove the WF of types from `substs` directly because they - // haven't been normalized. - // - // FIXME(nmatsakis): Well, perhaps we should normalize - // them? This would only be relevant if some input - // type were ill-formed but did not appear in `ty`, - // which...could happen with normalization... - let predicate = ty::Binder::dummy(ty::PredicateKind::WellFormed(ty.into())); - ocx.register_obligation(Obligation::new(tcx, cause, param_env, predicate)); - Ok(()) -} - fn type_op_eq<'tcx>( tcx: TyCtxt<'tcx>, canonicalized: Canonical<'tcx, ParamEnvAnd<'tcx, Eq<'tcx>>>, diff --git a/compiler/rustc_ty_utils/src/abi.rs b/compiler/rustc_ty_utils/src/abi.rs index 15c19104616..5b3ffc9fc36 100644 --- a/compiler/rustc_ty_utils/src/abi.rs +++ b/compiler/rustc_ty_utils/src/abi.rs @@ -81,7 +81,7 @@ fn fn_sig_for_fn_abi<'tcx>( var: ty::BoundVar::from_usize(bound_vars.len() - 1), kind: ty::BoundRegionKind::BrEnv, }; - let env_region = tcx.mk_re_late_bound(ty::INNERMOST, br); + let env_region = ty::Region::new_late_bound(tcx, ty::INNERMOST, br); let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap(); let sig = sig.skip_binder(); @@ -106,7 +106,7 @@ fn fn_sig_for_fn_abi<'tcx>( var: ty::BoundVar::from_usize(bound_vars.len() - 1), kind: ty::BoundRegionKind::BrEnv, }; - let env_ty = tcx.mk_mut_ref(tcx.mk_re_late_bound(ty::INNERMOST, br), ty); + let env_ty = tcx.mk_mut_ref(ty::Region::new_late_bound(tcx, ty::INNERMOST, br), ty); let pin_did = tcx.require_lang_item(LangItem::Pin, None); let pin_adt_ref = tcx.adt_def(pin_did); diff --git a/compiler/rustc_ty_utils/src/assoc.rs b/compiler/rustc_ty_utils/src/assoc.rs index ed574f22e61..57b4183d336 100644 --- a/compiler/rustc_ty_utils/src/assoc.rs +++ b/compiler/rustc_ty_utils/src/assoc.rs @@ -301,7 +301,7 @@ fn associated_type_for_impl_trait_in_trait( trait_assoc_ty.impl_defaultness(tcx.impl_defaultness(fn_def_id)); // Copy type_of of the opaque. - trait_assoc_ty.type_of(ty::EarlyBinder(tcx.mk_opaque( + trait_assoc_ty.type_of(ty::EarlyBinder::bind(tcx.mk_opaque( opaque_ty_def_id.to_def_id(), InternalSubsts::identity_for_item(tcx, opaque_ty_def_id), ))); diff --git a/compiler/rustc_ty_utils/src/consts.rs b/compiler/rustc_ty_utils/src/consts.rs index 1219bb40098..ce77df0df5d 100644 --- a/compiler/rustc_ty_utils/src/consts.rs +++ b/compiler/rustc_ty_utils/src/consts.rs @@ -419,7 +419,7 @@ pub fn thir_abstract_const( let root_span = body.exprs[body_id].span; - Ok(Some(ty::EarlyBinder(recurse_build(tcx, body, body_id, root_span)?))) + Ok(Some(ty::EarlyBinder::bind(recurse_build(tcx, body, body_id, root_span)?))) } pub fn provide(providers: &mut Providers) { diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs index 16cd8bc8e69..7015778e24b 100644 --- a/compiler/rustc_ty_utils/src/layout.rs +++ b/compiler/rustc_ty_utils/src/layout.rs @@ -610,7 +610,7 @@ fn generator_layout<'tcx>( ) -> Result<Layout<'tcx>, LayoutError<'tcx>> { use SavedLocalEligibility::*; let tcx = cx.tcx; - let subst_field = |ty: Ty<'tcx>| EarlyBinder(ty).subst(tcx, substs); + let subst_field = |ty: Ty<'tcx>| EarlyBinder::bind(ty).subst(tcx, substs); let Some(info) = tcx.generator_layout(def_id) else { return Err(LayoutError::Unknown(ty)); diff --git a/compiler/rustc_ty_utils/src/needs_drop.rs b/compiler/rustc_ty_utils/src/needs_drop.rs index 1f9701b9322..9d593dc5e04 100644 --- a/compiler/rustc_ty_utils/src/needs_drop.rs +++ b/compiler/rustc_ty_utils/src/needs_drop.rs @@ -210,7 +210,7 @@ fn drop_tys_helper<'tcx>( match subty.kind() { ty::Adt(adt_id, subst) => { for subty in tcx.adt_drop_tys(adt_id.did())? { - vec.push(EarlyBinder(subty).subst(tcx, subst)); + vec.push(EarlyBinder::bind(subty).subst(tcx, subst)); } } _ => vec.push(subty), diff --git a/compiler/rustc_ty_utils/src/ty.rs b/compiler/rustc_ty_utils/src/ty.rs index 65dc3c39c6a..12ad8b0842f 100644 --- a/compiler/rustc_ty_utils/src/ty.rs +++ b/compiler/rustc_ty_utils/src/ty.rs @@ -44,9 +44,7 @@ fn sized_constraint_for_ty<'tcx>( let adt_tys = adt.sized_constraint(tcx); debug!("sized_constraint_for_ty({:?}) intermediate = {:?}", ty, adt_tys); adt_tys - .0 - .iter() - .map(|ty| adt_tys.rebind(*ty).subst(tcx, substs)) + .subst_iter_copied(tcx, substs) .flat_map(|ty| sized_constraint_for_ty(tcx, adtdef, ty)) .collect() } @@ -289,12 +287,13 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for ImplTraitInTraitFinder<'_, 'tcx> { let shifted_alias_ty = self.tcx.fold_regions(unshifted_alias_ty, |re, depth| { if let ty::ReLateBound(index, bv) = re.kind() { if depth != ty::INNERMOST { - return self.tcx.mk_re_error_with_message( + return ty::Region::new_error_with_message( + self.tcx, DUMMY_SP, "we shouldn't walk non-predicate binders with `impl Trait`...", ); } - self.tcx.mk_re_late_bound(index.shifted_out_to_binder(self.depth), bv) + ty::Region::new_late_bound(self.tcx, index.shifted_out_to_binder(self.depth), bv) } else { re } @@ -508,7 +507,7 @@ fn issue33140_self_ty(tcx: TyCtxt<'_>, def_id: DefId) -> Option<EarlyBinder<Ty<' if self_ty_matches { debug!("issue33140_self_ty - MATCHES!"); - Some(EarlyBinder(self_ty)) + Some(EarlyBinder::bind(self_ty)) } else { debug!("issue33140_self_ty - non-matching self type"); None diff --git a/compiler/rustc_type_ir/src/lib.rs b/compiler/rustc_type_ir/src/lib.rs index f6b44bdf27e..ae16fbb162e 100644 --- a/compiler/rustc_type_ir/src/lib.rs +++ b/compiler/rustc_type_ir/src/lib.rs @@ -52,7 +52,7 @@ pub trait Interner: Sized { type PolyFnSig: Clone + Debug + Hash + Ord; type ListBinderExistentialPredicate: Clone + Debug + Hash + Ord; type BinderListTy: Clone + Debug + Hash + Ord; - type ListTy: Clone + Debug + Hash + Ord; + type ListTy: Clone + Debug + Hash + Ord + IntoIterator<Item = Self::Ty>; type AliasTy: Clone + Debug + Hash + Ord; type ParamTy: Clone + Debug + Hash + Ord; type BoundTy: Clone + Debug + Hash + Ord; @@ -67,6 +67,9 @@ pub trait Interner: Sized { type FreeRegion: Clone + Debug + Hash + Ord; type RegionVid: Clone + Debug + Hash + Ord; type PlaceholderRegion: Clone + Debug + Hash + Ord; + + fn ty_and_mut_to_parts(ty_and_mut: Self::TypeAndMut) -> (Self::Ty, Self::Mutability); + fn mutability_is_mut(mutbl: Self::Mutability) -> bool; } /// Imagine you have a function `F: FnOnce(&[T]) -> R`, plus an iterator `iter` @@ -390,7 +393,19 @@ impl DebruijnIndex { } } -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub fn debug_bound_var<T: std::fmt::Write>( + fmt: &mut T, + debruijn: DebruijnIndex, + var: impl std::fmt::Debug, +) -> Result<(), std::fmt::Error> { + if debruijn == INNERMOST { + write!(fmt, "^{:?}", var) + } else { + write!(fmt, "^{}_{:?}", debruijn.index(), var) + } +} + +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] #[derive(Encodable, Decodable, HashStable_Generic)] pub enum IntTy { Isize, @@ -448,7 +463,7 @@ impl IntTy { } } -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Copy, Debug)] +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Copy)] #[derive(Encodable, Decodable, HashStable_Generic)] pub enum UintTy { Usize, @@ -506,7 +521,7 @@ impl UintTy { } } -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] #[derive(Encodable, Decodable, HashStable_Generic)] pub enum FloatTy { F32, diff --git a/compiler/rustc_type_ir/src/structural_impls.rs b/compiler/rustc_type_ir/src/structural_impls.rs index 45a2e9023c9..553d7f31b2d 100644 --- a/compiler/rustc_type_ir/src/structural_impls.rs +++ b/compiler/rustc_type_ir/src/structural_impls.rs @@ -4,11 +4,12 @@ use crate::fold::{FallibleTypeFolder, TypeFoldable}; use crate::visit::{TypeVisitable, TypeVisitor}; -use crate::Interner; +use crate::{FloatTy, IntTy, Interner, UintTy}; use rustc_data_structures::functor::IdFunctor; use rustc_data_structures::sync::Lrc; use rustc_index::{Idx, IndexVec}; +use core::fmt; use std::ops::ControlFlow; /////////////////////////////////////////////////////////////////////////// @@ -163,3 +164,21 @@ impl<I: Interner, T: TypeVisitable<I>, Ix: Idx> TypeVisitable<I> for IndexVec<Ix self.iter().try_for_each(|t| t.visit_with(visitor)) } } + +impl fmt::Debug for IntTy { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.name_str()) + } +} + +impl fmt::Debug for UintTy { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.name_str()) + } +} + +impl fmt::Debug for FloatTy { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.name_str()) + } +} diff --git a/compiler/rustc_type_ir/src/sty.rs b/compiler/rustc_type_ir/src/sty.rs index f7344bacc02..fa18f921ee4 100644 --- a/compiler/rustc_type_ir/src/sty.rs +++ b/compiler/rustc_type_ir/src/sty.rs @@ -294,7 +294,7 @@ impl<I: Interner> Clone for TyKind<I> { Str => Str, Array(t, c) => Array(t.clone(), c.clone()), Slice(t) => Slice(t.clone()), - RawPtr(t) => RawPtr(t.clone()), + RawPtr(p) => RawPtr(p.clone()), Ref(r, t, m) => Ref(r.clone(), t.clone(), m.clone()), FnDef(d, s) => FnDef(d.clone(), s.clone()), FnPtr(s) => FnPtr(s.clone()), @@ -499,33 +499,65 @@ impl<I: Interner> hash::Hash for TyKind<I> { impl<I: Interner> fmt::Debug for TyKind<I> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - Bool => f.write_str("Bool"), - Char => f.write_str("Char"), - Int(i) => f.debug_tuple_field1_finish("Int", i), - Uint(u) => f.debug_tuple_field1_finish("Uint", u), - Float(float) => f.debug_tuple_field1_finish("Float", float), + Bool => write!(f, "bool"), + Char => write!(f, "char"), + Int(i) => write!(f, "{i:?}"), + Uint(u) => write!(f, "{u:?}"), + Float(float) => write!(f, "{float:?}"), Adt(d, s) => f.debug_tuple_field2_finish("Adt", d, s), Foreign(d) => f.debug_tuple_field1_finish("Foreign", d), - Str => f.write_str("Str"), - Array(t, c) => f.debug_tuple_field2_finish("Array", t, c), - Slice(t) => f.debug_tuple_field1_finish("Slice", t), - RawPtr(t) => f.debug_tuple_field1_finish("RawPtr", t), - Ref(r, t, m) => f.debug_tuple_field3_finish("Ref", r, t, m), + Str => write!(f, "str"), + Array(t, c) => write!(f, "[{t:?}; {c:?}]"), + Slice(t) => write!(f, "[{t:?}]"), + RawPtr(p) => { + let (ty, mutbl) = I::ty_and_mut_to_parts(p.clone()); + match I::mutability_is_mut(mutbl) { + true => write!(f, "*mut "), + false => write!(f, "*const "), + }?; + write!(f, "{ty:?}") + } + Ref(r, t, m) => match I::mutability_is_mut(m.clone()) { + true => write!(f, "&{r:?} mut {t:?}"), + false => write!(f, "&{r:?} {t:?}"), + }, FnDef(d, s) => f.debug_tuple_field2_finish("FnDef", d, s), - FnPtr(s) => f.debug_tuple_field1_finish("FnPtr", s), - Dynamic(p, r, repr) => f.debug_tuple_field3_finish("Dynamic", p, r, repr), + FnPtr(s) => write!(f, "{s:?}"), + Dynamic(p, r, repr) => match repr { + DynKind::Dyn => write!(f, "dyn {p:?} + {r:?}"), + DynKind::DynStar => write!(f, "dyn* {p:?} + {r:?}"), + }, Closure(d, s) => f.debug_tuple_field2_finish("Closure", d, s), Generator(d, s, m) => f.debug_tuple_field3_finish("Generator", d, s, m), GeneratorWitness(g) => f.debug_tuple_field1_finish("GeneratorWitness", g), GeneratorWitnessMIR(d, s) => f.debug_tuple_field2_finish("GeneratorWitnessMIR", d, s), - Never => f.write_str("Never"), - Tuple(t) => f.debug_tuple_field1_finish("Tuple", t), + Never => write!(f, "!"), + Tuple(t) => { + let mut iter = t.clone().into_iter(); + + write!(f, "(")?; + + match iter.next() { + None => return write!(f, ")"), + Some(ty) => write!(f, "{ty:?}")?, + }; + + match iter.next() { + None => return write!(f, ",)"), + Some(ty) => write!(f, "{ty:?})")?, + } + + for ty in iter { + write!(f, ", {ty:?}")?; + } + write!(f, ")") + } Alias(i, a) => f.debug_tuple_field2_finish("Alias", i, a), - Param(p) => f.debug_tuple_field1_finish("Param", p), - Bound(d, b) => f.debug_tuple_field2_finish("Bound", d, b), - Placeholder(p) => f.debug_tuple_field1_finish("Placeholder", p), - Infer(t) => f.debug_tuple_field1_finish("Infer", t), - TyKind::Error(e) => f.debug_tuple_field1_finish("Error", e), + Param(p) => write!(f, "{p:?}"), + Bound(d, b) => crate::debug_bound_var(f, *d, b), + Placeholder(p) => write!(f, "{p:?}"), + Infer(t) => write!(f, "{t:?}"), + TyKind::Error(_) => write!(f, "{{type error}}"), } } } |
