diff options
Diffstat (limited to 'compiler')
98 files changed, 665 insertions, 1263 deletions
diff --git a/compiler/rustc_ast_lowering/src/expr.rs b/compiler/rustc_ast_lowering/src/expr.rs index ccc6644923a..e568da9bbc0 100644 --- a/compiler/rustc_ast_lowering/src/expr.rs +++ b/compiler/rustc_ast_lowering/src/expr.rs @@ -183,14 +183,6 @@ impl<'hir> LoweringContext<'_, 'hir> { self.arena.alloc_from_iter(arms.iter().map(|x| self.lower_arm(x))), hir::MatchSource::Normal, ), - ExprKind::Gen(capture_clause, block, GenBlockKind::Async) => self.make_async_expr( - *capture_clause, - e.id, - None, - e.span, - hir::CoroutineSource::Block, - |this| this.with_new_scopes(e.span, |this| this.lower_block_expr(block)), - ), ExprKind::Await(expr, await_kw_span) => self.lower_expr_await(*await_kw_span, expr), ExprKind::Closure(box Closure { binder, @@ -226,6 +218,22 @@ impl<'hir> LoweringContext<'_, 'hir> { *fn_arg_span, ), }, + ExprKind::Gen(capture_clause, block, genblock_kind) => { + let desugaring_kind = match genblock_kind { + GenBlockKind::Async => hir::CoroutineDesugaring::Async, + GenBlockKind::Gen => hir::CoroutineDesugaring::Gen, + GenBlockKind::AsyncGen => hir::CoroutineDesugaring::AsyncGen, + }; + self.make_desugared_coroutine_expr( + *capture_clause, + e.id, + None, + e.span, + desugaring_kind, + hir::CoroutineSource::Block, + |this| this.with_new_scopes(e.span, |this| this.lower_block_expr(block)), + ) + } ExprKind::Block(blk, opt_label) => { let opt_label = self.lower_label(*opt_label); hir::ExprKind::Block(self.lower_block(blk, opt_label.is_some()), opt_label) @@ -313,23 +321,6 @@ impl<'hir> LoweringContext<'_, 'hir> { rest, ) } - ExprKind::Gen(capture_clause, block, GenBlockKind::Gen) => self.make_gen_expr( - *capture_clause, - e.id, - None, - e.span, - hir::CoroutineSource::Block, - |this| this.with_new_scopes(e.span, |this| this.lower_block_expr(block)), - ), - ExprKind::Gen(capture_clause, block, GenBlockKind::AsyncGen) => self - .make_async_gen_expr( - *capture_clause, - e.id, - None, - e.span, - hir::CoroutineSource::Block, - |this| this.with_new_scopes(e.span, |this| this.lower_block_expr(block)), - ), ExprKind::Yield(opt_expr) => self.lower_expr_yield(e.span, opt_expr.as_deref()), ExprKind::Err => { hir::ExprKind::Err(self.dcx().span_delayed_bug(e.span, "lowered ExprKind::Err")) @@ -612,213 +603,91 @@ impl<'hir> LoweringContext<'_, 'hir> { hir::Arm { hir_id, pat, guard, body, span } } - /// Lower an `async` construct to a coroutine that implements `Future`. + /// Lower/desugar a coroutine construct. /// - /// This results in: - /// - /// ```text - /// static move? |_task_context| -> <ret_ty> { - /// <body> - /// } - /// ``` - pub(super) fn make_async_expr( - &mut self, - capture_clause: CaptureBy, - closure_node_id: NodeId, - ret_ty: Option<hir::FnRetTy<'hir>>, - span: Span, - async_coroutine_source: hir::CoroutineSource, - body: impl FnOnce(&mut Self) -> hir::Expr<'hir>, - ) -> hir::ExprKind<'hir> { - let output = ret_ty.unwrap_or_else(|| hir::FnRetTy::DefaultReturn(self.lower_span(span))); - - // Resume argument type: `ResumeTy` - let unstable_span = self.mark_span_with_reason( - DesugaringKind::Async, - self.lower_span(span), - Some(self.allow_gen_future.clone()), - ); - let resume_ty = self.make_lang_item_qpath(hir::LangItem::ResumeTy, unstable_span); - let input_ty = hir::Ty { - hir_id: self.next_id(), - kind: hir::TyKind::Path(resume_ty), - span: unstable_span, - }; - - // The closure/coroutine `FnDecl` takes a single (resume) argument of type `input_ty`. - let fn_decl = self.arena.alloc(hir::FnDecl { - inputs: arena_vec![self; input_ty], - output, - c_variadic: false, - implicit_self: hir::ImplicitSelfKind::None, - lifetime_elision_allowed: false, - }); - - // Lower the argument pattern/ident. The ident is used again in the `.await` lowering. - let (pat, task_context_hid) = self.pat_ident_binding_mode( - span, - Ident::with_dummy_span(sym::_task_context), - hir::BindingAnnotation::MUT, - ); - let param = hir::Param { - hir_id: self.next_id(), - pat, - ty_span: self.lower_span(span), - span: self.lower_span(span), - }; - let params = arena_vec![self; param]; - - let coroutine_kind = - hir::CoroutineKind::Desugared(hir::CoroutineDesugaring::Async, async_coroutine_source); - let body = self.lower_body(move |this| { - this.coroutine_kind = Some(coroutine_kind); - - let old_ctx = this.task_context; - this.task_context = Some(task_context_hid); - let res = body(this); - this.task_context = old_ctx; - (params, res) - }); - - // `static |_task_context| -> <ret_ty> { body }`: - hir::ExprKind::Closure(self.arena.alloc(hir::Closure { - def_id: self.local_def_id(closure_node_id), - binder: hir::ClosureBinder::Default, - capture_clause, - bound_generic_params: &[], - fn_decl, - body, - fn_decl_span: self.lower_span(span), - fn_arg_span: None, - kind: hir::ClosureKind::Coroutine(coroutine_kind), - constness: hir::Constness::NotConst, - })) - } - - /// Lower a `gen` construct to a generator that implements `Iterator`. + /// In particular, this creates the correct async resume argument and `_task_context`. /// /// This results in: /// /// ```text - /// static move? |()| -> () { + /// static move? |<_task_context?>| -> <return_ty> { /// <body> /// } /// ``` - pub(super) fn make_gen_expr( + pub(super) fn make_desugared_coroutine_expr( &mut self, capture_clause: CaptureBy, closure_node_id: NodeId, - _yield_ty: Option<hir::FnRetTy<'hir>>, + return_ty: Option<hir::FnRetTy<'hir>>, span: Span, + desugaring_kind: hir::CoroutineDesugaring, coroutine_source: hir::CoroutineSource, body: impl FnOnce(&mut Self) -> hir::Expr<'hir>, ) -> hir::ExprKind<'hir> { - let output = hir::FnRetTy::DefaultReturn(self.lower_span(span)); - - // The closure/generator `FnDecl` takes a single (resume) argument of type `input_ty`. - let fn_decl = self.arena.alloc(hir::FnDecl { - inputs: &[], - output, - c_variadic: false, - implicit_self: hir::ImplicitSelfKind::None, - lifetime_elision_allowed: false, - }); - - let coroutine_kind = - hir::CoroutineKind::Desugared(hir::CoroutineDesugaring::Gen, coroutine_source); - let body = self.lower_body(move |this| { - this.coroutine_kind = Some(coroutine_kind); - - let res = body(this); - (&[], res) - }); - - // `static |()| -> () { body }`: - hir::ExprKind::Closure(self.arena.alloc(hir::Closure { - def_id: self.local_def_id(closure_node_id), - binder: hir::ClosureBinder::Default, - capture_clause, - bound_generic_params: &[], - fn_decl, - body, - fn_decl_span: self.lower_span(span), - fn_arg_span: None, - kind: hir::ClosureKind::Coroutine(coroutine_kind), - constness: hir::Constness::NotConst, - })) - } + let coroutine_kind = hir::CoroutineKind::Desugared(desugaring_kind, coroutine_source); + + // The `async` desugaring takes a resume argument and maintains a `task_context`, + // whereas a generator does not. + let (inputs, params, task_context): (&[_], &[_], _) = match desugaring_kind { + hir::CoroutineDesugaring::Async | hir::CoroutineDesugaring::AsyncGen => { + // Resume argument type: `ResumeTy` + let unstable_span = self.mark_span_with_reason( + DesugaringKind::Async, + self.lower_span(span), + Some(self.allow_gen_future.clone()), + ); + let resume_ty = self.make_lang_item_qpath(hir::LangItem::ResumeTy, unstable_span); + let input_ty = hir::Ty { + hir_id: self.next_id(), + kind: hir::TyKind::Path(resume_ty), + span: unstable_span, + }; + let inputs = arena_vec![self; input_ty]; - /// Lower a `async gen` construct to a generator that implements `AsyncIterator`. - /// - /// This results in: - /// - /// ```text - /// static move? |_task_context| -> () { - /// <body> - /// } - /// ``` - pub(super) fn make_async_gen_expr( - &mut self, - capture_clause: CaptureBy, - closure_node_id: NodeId, - _yield_ty: Option<hir::FnRetTy<'hir>>, - span: Span, - async_coroutine_source: hir::CoroutineSource, - body: impl FnOnce(&mut Self) -> hir::Expr<'hir>, - ) -> hir::ExprKind<'hir> { - let output = hir::FnRetTy::DefaultReturn(self.lower_span(span)); + // Lower the argument pattern/ident. The ident is used again in the `.await` lowering. + let (pat, task_context_hid) = self.pat_ident_binding_mode( + span, + Ident::with_dummy_span(sym::_task_context), + hir::BindingAnnotation::MUT, + ); + let param = hir::Param { + hir_id: self.next_id(), + pat, + ty_span: self.lower_span(span), + span: self.lower_span(span), + }; + let params = arena_vec![self; param]; - // Resume argument type: `ResumeTy` - let unstable_span = self.mark_span_with_reason( - DesugaringKind::Async, - self.lower_span(span), - Some(self.allow_gen_future.clone()), - ); - let resume_ty = self.make_lang_item_qpath(hir::LangItem::ResumeTy, unstable_span); - let input_ty = hir::Ty { - hir_id: self.next_id(), - kind: hir::TyKind::Path(resume_ty), - span: unstable_span, + (inputs, params, Some(task_context_hid)) + } + hir::CoroutineDesugaring::Gen => (&[], &[], None), }; - // The closure/coroutine `FnDecl` takes a single (resume) argument of type `input_ty`. + let output = + return_ty.unwrap_or_else(|| hir::FnRetTy::DefaultReturn(self.lower_span(span))); + let fn_decl = self.arena.alloc(hir::FnDecl { - inputs: arena_vec![self; input_ty], + inputs, output, c_variadic: false, implicit_self: hir::ImplicitSelfKind::None, lifetime_elision_allowed: false, }); - // Lower the argument pattern/ident. The ident is used again in the `.await` lowering. - let (pat, task_context_hid) = self.pat_ident_binding_mode( - span, - Ident::with_dummy_span(sym::_task_context), - hir::BindingAnnotation::MUT, - ); - let param = hir::Param { - hir_id: self.next_id(), - pat, - ty_span: self.lower_span(span), - span: self.lower_span(span), - }; - let params = arena_vec![self; param]; - - let coroutine_kind = hir::CoroutineKind::Desugared( - hir::CoroutineDesugaring::AsyncGen, - async_coroutine_source, - ); let body = self.lower_body(move |this| { this.coroutine_kind = Some(coroutine_kind); let old_ctx = this.task_context; - this.task_context = Some(task_context_hid); + if task_context.is_some() { + this.task_context = task_context; + } let res = body(this); this.task_context = old_ctx; + (params, res) }); - // `static |_task_context| -> <ret_ty> { body }`: + // `static |<_task_context?>| -> <return_ty> { <body> }`: hir::ExprKind::Closure(self.arena.alloc(hir::Closure { def_id: self.local_def_id(closure_node_id), binder: hir::ClosureBinder::Default, @@ -1203,11 +1072,12 @@ impl<'hir> LoweringContext<'_, 'hir> { None }; - let async_body = this.make_async_expr( + let async_body = this.make_desugared_coroutine_expr( capture_clause, inner_closure_id, async_ret_ty, body.span, + hir::CoroutineDesugaring::Async, hir::CoroutineSource::Closure, |this| this.with_new_scopes(fn_decl_span, |this| this.lower_expr_mut(body)), ); diff --git a/compiler/rustc_ast_lowering/src/item.rs b/compiler/rustc_ast_lowering/src/item.rs index 3848f3b7782..45357aca533 100644 --- a/compiler/rustc_ast_lowering/src/item.rs +++ b/compiler/rustc_ast_lowering/src/item.rs @@ -1209,33 +1209,20 @@ impl<'hir> LoweringContext<'_, 'hir> { this.expr_block(body) }; - // FIXME(gen_blocks): Consider unifying the `make_*_expr` functions. - let coroutine_expr = match coroutine_kind { - CoroutineKind::Async { .. } => this.make_async_expr( - CaptureBy::Value { move_kw: rustc_span::DUMMY_SP }, - closure_id, - None, - body.span, - hir::CoroutineSource::Fn, - mkbody, - ), - CoroutineKind::Gen { .. } => this.make_gen_expr( - CaptureBy::Value { move_kw: rustc_span::DUMMY_SP }, - closure_id, - None, - body.span, - hir::CoroutineSource::Fn, - mkbody, - ), - CoroutineKind::AsyncGen { .. } => this.make_async_gen_expr( - CaptureBy::Value { move_kw: rustc_span::DUMMY_SP }, - closure_id, - None, - body.span, - hir::CoroutineSource::Fn, - mkbody, - ), + let desugaring_kind = match coroutine_kind { + CoroutineKind::Async { .. } => hir::CoroutineDesugaring::Async, + CoroutineKind::Gen { .. } => hir::CoroutineDesugaring::Gen, + CoroutineKind::AsyncGen { .. } => hir::CoroutineDesugaring::AsyncGen, }; + let coroutine_expr = this.make_desugared_coroutine_expr( + CaptureBy::Value { move_kw: rustc_span::DUMMY_SP }, + closure_id, + None, + body.span, + desugaring_kind, + hir::CoroutineSource::Fn, + mkbody, + ); let hir_id = this.lower_node_id(closure_id); this.maybe_forward_track_caller(body.span, fn_id, hir_id); diff --git a/compiler/rustc_borrowck/src/diagnostics/mod.rs b/compiler/rustc_borrowck/src/diagnostics/mod.rs index c14a05bd145..b31325485db 100644 --- a/compiler/rustc_borrowck/src/diagnostics/mod.rs +++ b/compiler/rustc_borrowck/src/diagnostics/mod.rs @@ -370,7 +370,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> { ty::Array(ty, _) | ty::Slice(ty) => { self.describe_field_from_ty(ty, field, variant_index, including_tuple_field) } - ty::Closure(def_id, _) | ty::Coroutine(def_id, _, _) => { + ty::Closure(def_id, _) | ty::Coroutine(def_id, _) => { // We won't be borrowck'ing here if the closure came from another crate, // so it's safe to call `expect_local`. // @@ -792,8 +792,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> { debug!("move_spans: moved_place={:?} location={:?} stmt={:?}", moved_place, location, stmt); if let StatementKind::Assign(box (_, Rvalue::Aggregate(kind, places))) = &stmt.kind - && let AggregateKind::Closure(def_id, _) | AggregateKind::Coroutine(def_id, _, _) = - **kind + && let AggregateKind::Closure(def_id, _) | AggregateKind::Coroutine(def_id, _) = **kind { debug!("move_spans: def_id={:?} places={:?}", def_id, places); let def_id = def_id.expect_local(); @@ -928,7 +927,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> { if let StatementKind::Assign(box (_, Rvalue::Aggregate(kind, places))) = &stmt.kind { let (&def_id, is_coroutine) = match kind { box AggregateKind::Closure(def_id, _) => (def_id, false), - box AggregateKind::Coroutine(def_id, _, _) => (def_id, true), + box AggregateKind::Coroutine(def_id, _) => (def_id, true), _ => continue, }; let def_id = def_id.expect_local(); diff --git a/compiler/rustc_borrowck/src/lib.rs b/compiler/rustc_borrowck/src/lib.rs index af21847cffd..495b255583c 100644 --- a/compiler/rustc_borrowck/src/lib.rs +++ b/compiler/rustc_borrowck/src/lib.rs @@ -274,11 +274,12 @@ fn do_mir_borrowck<'tcx>( // The first argument is the coroutine type passed by value if let Some(local) = body.local_decls.raw.get(1) // Get the interior types and args which typeck computed - && let ty::Coroutine(_, _, hir::Movability::Static) = local.ty.kind() + && let ty::Coroutine(def_id, _) = *local.ty.kind() + && tcx.coroutine_movability(def_id) == hir::Movability::Movable { - false - } else { true + } else { + false }; for (idx, move_data) in promoted_move_data { @@ -1306,7 +1307,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> { // moved into the closure and subsequently used by the closure, // in order to populate our used_mut set. match **aggregate_kind { - AggregateKind::Closure(def_id, _) | AggregateKind::Coroutine(def_id, _, _) => { + AggregateKind::Closure(def_id, _) | AggregateKind::Coroutine(def_id, _) => { let def_id = def_id.expect_local(); let BorrowCheckResult { used_mut_upvars, .. } = self.infcx.tcx.mir_borrowck(def_id); @@ -1612,7 +1613,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> { | ty::FnPtr(_) | ty::Dynamic(_, _, _) | ty::Closure(_, _) - | ty::Coroutine(_, _, _) + | ty::Coroutine(_, _) | ty::CoroutineWitness(..) | ty::Never | ty::Tuple(_) @@ -1636,7 +1637,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> { return; } } - ty::Closure(_, _) | ty::Coroutine(_, _, _) | ty::Tuple(_) => (), + ty::Closure(_, _) | ty::Coroutine(_, _) | ty::Tuple(_) => (), ty::Bool | ty::Char | ty::Int(_) diff --git a/compiler/rustc_borrowck/src/type_check/mod.rs b/compiler/rustc_borrowck/src/type_check/mod.rs index 8a862953fba..80575e30a8d 100644 --- a/compiler/rustc_borrowck/src/type_check/mod.rs +++ b/compiler/rustc_borrowck/src/type_check/mod.rs @@ -762,7 +762,7 @@ impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> { let (variant, args) = match base_ty { PlaceTy { ty, variant_index: Some(variant_index) } => match *ty.kind() { ty::Adt(adt_def, args) => (adt_def.variant(variant_index), args), - ty::Coroutine(def_id, args, _) => { + ty::Coroutine(def_id, args) => { let mut variants = args.as_coroutine().state_tys(def_id, tcx); let Some(mut variant) = variants.nth(variant_index.into()) else { bug!( @@ -790,7 +790,7 @@ impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> { }), }; } - ty::Coroutine(_, args, _) => { + ty::Coroutine(_, args) => { // Only prefix fields (upvars and current state) are // accessible without a variant index. return match args.as_coroutine().prefix_tys().get(field.index()) { @@ -1784,7 +1784,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { }), } } - AggregateKind::Coroutine(_, args, _) => { + AggregateKind::Coroutine(_, args) => { // It doesn't make sense to look at a field beyond the prefix; // these require a variant index, and are not initialized in // aggregate rvalues. @@ -2392,7 +2392,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { AggregateKind::Array(_) => None, AggregateKind::Tuple => None, AggregateKind::Closure(_, _) => None, - AggregateKind::Coroutine(_, _, _) => None, + AggregateKind::Coroutine(_, _) => None, }, } } @@ -2620,7 +2620,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { // desugaring. A closure gets desugared to a struct, and // these extra requirements are basically like where // clauses on the struct. - AggregateKind::Closure(def_id, args) | AggregateKind::Coroutine(def_id, args, _) => { + AggregateKind::Closure(def_id, args) | AggregateKind::Coroutine(def_id, args) => { (def_id, self.prove_closure_bounds(tcx, def_id.expect_local(), args, location)) } diff --git a/compiler/rustc_borrowck/src/universal_regions.rs b/compiler/rustc_borrowck/src/universal_regions.rs index 2b83c787139..a02304a2f8b 100644 --- a/compiler/rustc_borrowck/src/universal_regions.rs +++ b/compiler/rustc_borrowck/src/universal_regions.rs @@ -14,7 +14,6 @@ use rustc_data_structures::fx::FxHashMap; use rustc_errors::Diagnostic; -use rustc_hir as hir; use rustc_hir::def_id::{DefId, LocalDefId}; use rustc_hir::lang_items::LangItem; use rustc_hir::BodyOwnerKind; @@ -94,7 +93,7 @@ pub enum DefiningTy<'tcx> { /// The MIR is a coroutine. The signature is that coroutines take /// no parameters and return the result of /// `ClosureArgs::coroutine_return_ty`. - Coroutine(DefId, GenericArgsRef<'tcx>, hir::Movability), + Coroutine(DefId, GenericArgsRef<'tcx>), /// The MIR is a fn item with the given `DefId` and args. The signature /// of the function can be bound then with the `fn_sig` query. @@ -118,7 +117,7 @@ impl<'tcx> DefiningTy<'tcx> { pub fn upvar_tys(self) -> &'tcx ty::List<Ty<'tcx>> { match self { DefiningTy::Closure(_, args) => args.as_closure().upvar_tys(), - DefiningTy::Coroutine(_, args, _) => args.as_coroutine().upvar_tys(), + DefiningTy::Coroutine(_, args) => args.as_coroutine().upvar_tys(), DefiningTy::FnDef(..) | DefiningTy::Const(..) | DefiningTy::InlineConst(..) => { ty::List::empty() } @@ -354,7 +353,7 @@ impl<'tcx> UniversalRegions<'tcx> { err.note(format!("late-bound region is {:?}", self.to_region_vid(r))); }); } - DefiningTy::Coroutine(def_id, args, _) => { + DefiningTy::Coroutine(def_id, args) => { let v = with_no_trimmed_paths!( args[tcx.generics_of(def_id).parent_count..] .iter() @@ -527,7 +526,7 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> { debug!("build: local regions = {}..{}", first_local_index, num_universals); let yield_ty = match defining_ty { - DefiningTy::Coroutine(_, args, _) => Some(args.as_coroutine().yield_ty()), + DefiningTy::Coroutine(_, args) => Some(args.as_coroutine().yield_ty()), _ => None, }; @@ -562,9 +561,7 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> { match *defining_ty.kind() { ty::Closure(def_id, args) => DefiningTy::Closure(def_id, args), - ty::Coroutine(def_id, args, movability) => { - DefiningTy::Coroutine(def_id, args, movability) - } + ty::Coroutine(def_id, args) => DefiningTy::Coroutine(def_id, args), ty::FnDef(def_id, args) => DefiningTy::FnDef(def_id, args), _ => span_bug!( tcx.def_span(self.mir_def), @@ -620,7 +617,7 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> { let identity_args = GenericArgs::identity_for_item(tcx, typeck_root_def_id); let fr_args = match defining_ty { DefiningTy::Closure(_, args) - | DefiningTy::Coroutine(_, args, _) + | DefiningTy::Coroutine(_, args) | DefiningTy::InlineConst(_, args) => { // In the case of closures, we rely on the fact that // the first N elements in the ClosureArgs are @@ -685,11 +682,11 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> { ) } - DefiningTy::Coroutine(def_id, args, movability) => { + DefiningTy::Coroutine(def_id, args) => { assert_eq!(self.mir_def.to_def_id(), def_id); let resume_ty = args.as_coroutine().resume_ty(); let output = args.as_coroutine().return_ty(); - let coroutine_ty = Ty::new_coroutine(tcx, def_id, args, movability); + let coroutine_ty = Ty::new_coroutine(tcx, def_id, args); let inputs_and_output = self.infcx.tcx.mk_type_list(&[coroutine_ty, resume_ty, output]); ty::Binder::dummy(inputs_and_output) diff --git a/compiler/rustc_codegen_cranelift/src/value_and_place.rs b/compiler/rustc_codegen_cranelift/src/value_and_place.rs index 838c73fa213..f016e6950d4 100644 --- a/compiler/rustc_codegen_cranelift/src/value_and_place.rs +++ b/compiler/rustc_codegen_cranelift/src/value_and_place.rs @@ -974,8 +974,8 @@ pub(crate) fn assert_assignable<'tcx>( } } } - (&ty::Coroutine(def_id_a, args_a, mov_a), &ty::Coroutine(def_id_b, args_b, mov_b)) - if def_id_a == def_id_b && mov_a == mov_b => + (&ty::Coroutine(def_id_a, args_a), &ty::Coroutine(def_id_b, args_b)) + if def_id_a == def_id_b => { let mut types_a = args_a.types(); let mut types_b = args_b.types(); diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs index b8a8c144dc9..42e61b3ccb5 100644 --- a/compiler/rustc_codegen_gcc/src/builder.rs +++ b/compiler/rustc_codegen_gcc/src/builder.rs @@ -1296,7 +1296,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } // Atomic Operations - fn atomic_cmpxchg(&mut self, dst: RValue<'gcc>, cmp: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> { + fn atomic_cmpxchg(&mut self, dst: RValue<'gcc>, cmp: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> (RValue<'gcc>, RValue<'gcc>) { let expected = self.current_func().new_local(None, cmp.get_type(), "expected"); self.llbb().add_assignment(None, expected, cmp); // NOTE: gcc doesn't support a failure memory model that is stronger than the success @@ -1310,20 +1310,12 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { }; let success = self.compare_exchange(dst, expected, src, order, failure_order, weak); - let pair_type = self.cx.type_struct(&[src.get_type(), self.bool_type], false); - let result = self.current_func().new_local(None, pair_type, "atomic_cmpxchg_result"); - let align = Align::from_bits(64).expect("align"); // TODO(antoyo): use good align. + // NOTE: since success contains the call to the intrinsic, it must be added to the basic block before + // expected so that we store expected after the call. + let success_var = self.current_func().new_local(None, self.bool_type, "success"); + self.llbb().add_assignment(None, success_var, success); - let value_type = result.to_rvalue().get_type(); - if let Some(struct_type) = value_type.is_struct() { - self.store(success, result.access_field(None, struct_type.get_field(1)).get_address(None), align); - // NOTE: since success contains the call to the intrinsic, it must be stored before - // expected so that we store expected after the call. - self.store(expected.to_rvalue(), result.access_field(None, struct_type.get_field(0)).get_address(None), align); - } - // TODO(antoyo): handle when value is not a struct. - - result.to_rvalue() + (expected.to_rvalue(), success_var.to_rvalue()) } fn atomic_rmw(&mut self, op: AtomicRmwBinOp, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> { diff --git a/compiler/rustc_codegen_gcc/src/type_of.rs b/compiler/rustc_codegen_gcc/src/type_of.rs index 479a814788a..e5c0b2de4ca 100644 --- a/compiler/rustc_codegen_gcc/src/type_of.rs +++ b/compiler/rustc_codegen_gcc/src/type_of.rs @@ -98,7 +98,7 @@ fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout write!(&mut name, "::{}", def.variant(index).name).unwrap(); } } - if let (&ty::Coroutine(_, _, _), &Variants::Single { index }) = + if let (&ty::Coroutine(_, _), &Variants::Single { index }) = (layout.ty.kind(), &layout.variants) { write!(&mut name, "::{}", ty::CoroutineArgs::variant_name(index)).unwrap(); diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 8f60175a603..4e5fe290bb1 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -1072,7 +1072,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { order: rustc_codegen_ssa::common::AtomicOrdering, failure_order: rustc_codegen_ssa::common::AtomicOrdering, weak: bool, - ) -> &'ll Value { + ) -> (&'ll Value, &'ll Value) { let weak = if weak { llvm::True } else { llvm::False }; unsafe { let value = llvm::LLVMBuildAtomicCmpXchg( @@ -1085,7 +1085,9 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { llvm::False, // SingleThreaded ); llvm::LLVMSetWeak(value, weak); - value + let val = self.extract_value(value, 0); + let success = self.extract_value(value, 1); + (val, success) } } fn atomic_rmw( diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs index 59c075a3d3e..76c9ac6614a 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs @@ -1066,7 +1066,7 @@ fn build_upvar_field_di_nodes<'ll, 'tcx>( closure_or_coroutine_di_node: &'ll DIType, ) -> SmallVec<&'ll DIType> { let (&def_id, up_var_tys) = match closure_or_coroutine_ty.kind() { - ty::Coroutine(def_id, args, _) => (def_id, args.as_coroutine().prefix_tys()), + ty::Coroutine(def_id, args) => (def_id, args.as_coroutine().prefix_tys()), ty::Closure(def_id, args) => (def_id, args.as_closure().upvar_tys()), _ => { bug!( diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs index 4a2861af44c..4792b0798df 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs @@ -679,7 +679,7 @@ fn build_union_fields_for_direct_tag_coroutine<'ll, 'tcx>( }; let (coroutine_def_id, coroutine_args) = match coroutine_type_and_layout.ty.kind() { - &ty::Coroutine(def_id, args, _) => (def_id, args.as_coroutine()), + &ty::Coroutine(def_id, args) => (def_id, args.as_coroutine()), _ => unreachable!(), }; diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs index eef8dbb33b4..7f671d1d061 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs @@ -336,7 +336,7 @@ pub fn build_coroutine_variant_struct_type_di_node<'ll, 'tcx>( let variant_layout = coroutine_type_and_layout.for_variant(cx, variant_index); let coroutine_args = match coroutine_type_and_layout.ty.kind() { - ty::Coroutine(_, args, _) => args.as_coroutine(), + ty::Coroutine(_, args) => args.as_coroutine(), _ => unreachable!(), }; diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs index cba4e3811d5..3dbe820b8ff 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs @@ -135,7 +135,7 @@ pub(super) fn build_coroutine_di_node<'ll, 'tcx>( unique_type_id: UniqueTypeId<'tcx>, ) -> DINodeCreationResult<'ll> { let coroutine_type = unique_type_id.expect_ty(); - let &ty::Coroutine(coroutine_def_id, _, _) = coroutine_type.kind() else { + let &ty::Coroutine(coroutine_def_id, _) = coroutine_type.kind() else { bug!("build_coroutine_di_node() called with non-coroutine type: `{:?}`", coroutine_type) }; diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index 624ce6d8813..57b46382c96 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -54,7 +54,7 @@ fn uncached_llvm_type<'a, 'tcx>( write!(&mut name, "::{}", def.variant(index).name).unwrap(); } } - if let (&ty::Coroutine(_, _, _), &Variants::Single { index }) = + if let (&ty::Coroutine(_, _), &Variants::Single { index }) = (layout.ty.kind(), &layout.variants) { write!(&mut name, "::{}", ty::CoroutineArgs::variant_name(index)).unwrap(); diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs index 533803ea7ff..8530bf9e2b3 100644 --- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs @@ -335,7 +335,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { cmp = bx.ptrtoint(cmp, bx.type_isize()); src = bx.ptrtoint(src, bx.type_isize()); } - let pair = bx.atomic_cmpxchg( + let (val, success) = bx.atomic_cmpxchg( dst, cmp, src, @@ -343,8 +343,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { parse_ordering(bx, failure), weak, ); - let val = bx.extract_value(pair, 0); - let success = bx.extract_value(pair, 1); let val = bx.from_immediate(val); let success = bx.from_immediate(success); diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs index aa411f002a0..1c5c78e6ca2 100644 --- a/compiler/rustc_codegen_ssa/src/traits/builder.rs +++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs @@ -296,7 +296,7 @@ pub trait BuilderMethods<'a, 'tcx>: order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool, - ) -> Self::Value; + ) -> (Self::Value, Self::Value); fn atomic_rmw( &mut self, op: AtomicRmwBinOp, diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs index d9f583c1d1f..bb8c17cf779 100644 --- a/compiler/rustc_const_eval/src/interpret/discriminant.rs +++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs @@ -171,7 +171,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ty::Adt(adt, _) => { adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits) } - ty::Coroutine(def_id, args, _) => { + ty::Coroutine(def_id, args) => { let args = args.as_coroutine(); args.discriminants(def_id, *self.tcx).find(|(_, var)| var.val == discr_bits) } diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs index c29f23b913f..1e9e7d94596 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs @@ -85,7 +85,7 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>( | ty::FnPtr(_) | ty::Dynamic(_, _, _) | ty::Closure(_, _) - | ty::Coroutine(_, _, _) + | ty::Coroutine(_, _) | ty::CoroutineWitness(..) | ty::Never | ty::Tuple(_) diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index 07500f74477..8b44b87647d 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -217,7 +217,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' // Now we know we are projecting to a field, so figure out which one. match layout.ty.kind() { // coroutines and closures. - ty::Closure(def_id, _) | ty::Coroutine(def_id, _, _) => { + ty::Closure(def_id, _) | ty::Coroutine(def_id, _) => { let mut name = None; // FIXME this should be more descriptive i.e. CapturePlace instead of CapturedVar // https://github.com/rust-lang/project-rfc-2229/issues/46 diff --git a/compiler/rustc_const_eval/src/transform/validate.rs b/compiler/rustc_const_eval/src/transform/validate.rs index 68ded1d324f..b249ffb84b3 100644 --- a/compiler/rustc_const_eval/src/transform/validate.rs +++ b/compiler/rustc_const_eval/src/transform/validate.rs @@ -694,7 +694,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { }; check_equal(self, location, f_ty); } - &ty::Coroutine(def_id, args, _) => { + &ty::Coroutine(def_id, args) => { let f_ty = if let Some(var) = parent_ty.variant_index { let gen_body = if def_id == self.body.source.def_id() { self.body diff --git a/compiler/rustc_const_eval/src/util/type_name.rs b/compiler/rustc_const_eval/src/util/type_name.rs index a82b65b19a8..976e42ad768 100644 --- a/compiler/rustc_const_eval/src/util/type_name.rs +++ b/compiler/rustc_const_eval/src/util/type_name.rs @@ -51,7 +51,7 @@ impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> { | ty::FnDef(def_id, args) | ty::Alias(ty::Projection | ty::Opaque, ty::AliasTy { def_id, args, .. }) | ty::Closure(def_id, args) - | ty::Coroutine(def_id, args, _) => self.print_def_path(def_id, args), + | ty::Coroutine(def_id, args) => self.print_def_path(def_id, args), ty::Foreign(def_id) => self.print_def_path(def_id, &[]), ty::Alias(ty::Weak, _) => bug!("type_name: unexpected weak projection"), diff --git a/compiler/rustc_data_structures/src/hashes.rs b/compiler/rustc_data_structures/src/hashes.rs index ad068cdbc98..291ee5bbe26 100644 --- a/compiler/rustc_data_structures/src/hashes.rs +++ b/compiler/rustc_data_structures/src/hashes.rs @@ -25,7 +25,7 @@ impl Hash64 { pub const ZERO: Hash64 = Hash64 { inner: 0 }; #[inline] - pub(crate) fn new(n: u64) -> Self { + pub fn new(n: u64) -> Self { Self { inner: n } } diff --git a/compiler/rustc_error_codes/src/error_codes/E0453.md b/compiler/rustc_error_codes/src/error_codes/E0453.md index 11789db8f36..86ca0d9eca9 100644 --- a/compiler/rustc_error_codes/src/error_codes/E0453.md +++ b/compiler/rustc_error_codes/src/error_codes/E0453.md @@ -8,8 +8,8 @@ Example of erroneous code: #[allow(non_snake_case)] fn main() { - let MyNumber = 2; // error: allow(non_snake_case) overruled by outer - // forbid(non_snake_case) + // error: allow(non_snake_case) incompatible with previous forbid + let MyNumber = 2; } ``` diff --git a/compiler/rustc_hir/src/def_path_hash_map.rs b/compiler/rustc_hir/src/def_path_hash_map.rs index 8bfb47af26f..9a6dee1e511 100644 --- a/compiler/rustc_hir/src/def_path_hash_map.rs +++ b/compiler/rustc_hir/src/def_path_hash_map.rs @@ -1,21 +1,22 @@ -use rustc_data_structures::fingerprint::Fingerprint; -use rustc_span::def_id::{DefIndex, DefPathHash}; +use rustc_data_structures::stable_hasher::Hash64; +use rustc_span::def_id::DefIndex; #[derive(Clone, Default)] pub struct Config; impl odht::Config for Config { - type Key = DefPathHash; + // This hash-map is single-crate, so we only need to key by the local hash. + type Key = Hash64; type Value = DefIndex; - type EncodedKey = [u8; 16]; + type EncodedKey = [u8; 8]; type EncodedValue = [u8; 4]; type H = odht::UnHashFn; #[inline] - fn encode_key(k: &DefPathHash) -> [u8; 16] { - k.0.to_le_bytes() + fn encode_key(k: &Hash64) -> [u8; 8] { + k.as_u64().to_le_bytes() } #[inline] @@ -24,8 +25,8 @@ impl odht::Config for Config { } #[inline] - fn decode_key(k: &[u8; 16]) -> DefPathHash { - DefPathHash(Fingerprint::from_le_bytes(*k)) + fn decode_key(k: &[u8; 8]) -> Hash64 { + Hash64::new(u64::from_le_bytes(*k)) } #[inline] diff --git a/compiler/rustc_hir/src/definitions.rs b/compiler/rustc_hir/src/definitions.rs index 2ab9a6ef32c..9fb1fc19bf4 100644 --- a/compiler/rustc_hir/src/definitions.rs +++ b/compiler/rustc_hir/src/definitions.rs @@ -20,27 +20,42 @@ use std::hash::Hash; /// Internally the `DefPathTable` holds a tree of `DefKey`s, where each `DefKey` /// stores the `DefIndex` of its parent. /// There is one `DefPathTable` for each crate. -#[derive(Clone, Default, Debug)] +#[derive(Debug)] pub struct DefPathTable { + stable_crate_id: StableCrateId, index_to_key: IndexVec<DefIndex, DefKey>, - def_path_hashes: IndexVec<DefIndex, DefPathHash>, + // We do only store the local hash, as all the definitions are from the current crate. + def_path_hashes: IndexVec<DefIndex, Hash64>, def_path_hash_to_index: DefPathHashMap, } impl DefPathTable { + fn new(stable_crate_id: StableCrateId) -> DefPathTable { + DefPathTable { + stable_crate_id, + index_to_key: Default::default(), + def_path_hashes: Default::default(), + def_path_hash_to_index: Default::default(), + } + } + fn allocate(&mut self, key: DefKey, def_path_hash: DefPathHash) -> DefIndex { + // Assert that all DefPathHashes correctly contain the local crate's StableCrateId. + debug_assert_eq!(self.stable_crate_id, def_path_hash.stable_crate_id()); + let local_hash = def_path_hash.local_hash(); + let index = { let index = DefIndex::from(self.index_to_key.len()); debug!("DefPathTable::insert() - {:?} <-> {:?}", key, index); self.index_to_key.push(key); index }; - self.def_path_hashes.push(def_path_hash); + self.def_path_hashes.push(local_hash); debug_assert!(self.def_path_hashes.len() == self.index_to_key.len()); // Check for hash collisions of DefPathHashes. These should be // exceedingly rare. - if let Some(existing) = self.def_path_hash_to_index.insert(&def_path_hash, &index) { + if let Some(existing) = self.def_path_hash_to_index.insert(&local_hash, &index) { let def_path1 = DefPath::make(LOCAL_CRATE, existing, |idx| self.def_key(idx)); let def_path2 = DefPath::make(LOCAL_CRATE, index, |idx| self.def_key(idx)); @@ -58,13 +73,6 @@ impl DefPathTable { ); } - // Assert that all DefPathHashes correctly contain the local crate's - // StableCrateId - #[cfg(debug_assertions)] - if let Some(root) = self.def_path_hashes.get(CRATE_DEF_INDEX) { - assert!(def_path_hash.stable_crate_id() == root.stable_crate_id()); - } - index } @@ -73,19 +81,19 @@ impl DefPathTable { self.index_to_key[index] } + #[instrument(level = "trace", skip(self), ret)] #[inline(always)] pub fn def_path_hash(&self, index: DefIndex) -> DefPathHash { let hash = self.def_path_hashes[index]; - debug!("def_path_hash({:?}) = {:?}", index, hash); - hash + DefPathHash::new(self.stable_crate_id, hash) } pub fn enumerated_keys_and_path_hashes( &self, - ) -> impl Iterator<Item = (DefIndex, &DefKey, &DefPathHash)> + ExactSizeIterator + '_ { + ) -> impl Iterator<Item = (DefIndex, &DefKey, DefPathHash)> + ExactSizeIterator + '_ { self.index_to_key .iter_enumerated() - .map(move |(index, key)| (index, key, &self.def_path_hashes[index])) + .map(move |(index, key)| (index, key, self.def_path_hash(index))) } } @@ -96,9 +104,6 @@ impl DefPathTable { pub struct Definitions { table: DefPathTable, next_disambiguator: UnordMap<(LocalDefId, DefPathData), u32>, - - /// The [StableCrateId] of the local crate. - stable_crate_id: StableCrateId, } /// A unique identifier that we can use to lookup a definition @@ -329,11 +334,11 @@ impl Definitions { let def_path_hash = key.compute_stable_hash(parent_hash); // Create the root definition. - let mut table = DefPathTable::default(); + let mut table = DefPathTable::new(stable_crate_id); let root = LocalDefId { local_def_index: table.allocate(key, def_path_hash) }; assert_eq!(root.local_def_index, CRATE_DEF_INDEX); - Definitions { table, next_disambiguator: Default::default(), stable_crate_id } + Definitions { table, next_disambiguator: Default::default() } } /// Adds a definition with a parent definition. @@ -375,10 +380,10 @@ impl Definitions { hash: DefPathHash, err: &mut dyn FnMut() -> !, ) -> LocalDefId { - debug_assert!(hash.stable_crate_id() == self.stable_crate_id); + debug_assert!(hash.stable_crate_id() == self.table.stable_crate_id); self.table .def_path_hash_to_index - .get(&hash) + .get(&hash.local_hash()) .map(|local_def_index| LocalDefId { local_def_index }) .unwrap_or_else(|| err()) } diff --git a/compiler/rustc_hir_typeck/src/check.rs b/compiler/rustc_hir_typeck/src/check.rs index 984c2829c81..0ca0f7d2daf 100644 --- a/compiler/rustc_hir_typeck/src/check.rs +++ b/compiler/rustc_hir_typeck/src/check.rs @@ -160,12 +160,7 @@ pub(super) fn check_fn<'a, 'tcx>( )); let (resume_ty, yield_ty) = fcx.resume_yield_tys.unwrap(); - Some(CoroutineTypes { - resume_ty, - yield_ty, - interior, - movability: coroutine_kind.movability(), - }) + Some(CoroutineTypes { resume_ty, yield_ty, interior }) } else { None }; diff --git a/compiler/rustc_hir_typeck/src/closure.rs b/compiler/rustc_hir_typeck/src/closure.rs index c29ef375ce4..bf6fda20df8 100644 --- a/compiler/rustc_hir_typeck/src/closure.rs +++ b/compiler/rustc_hir_typeck/src/closure.rs @@ -105,8 +105,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { span: self.tcx.def_span(expr_def_id), }); - if let Some(CoroutineTypes { resume_ty, yield_ty, interior, movability }) = coroutine_types - { + if let Some(CoroutineTypes { resume_ty, yield_ty, interior }) = coroutine_types { let coroutine_args = ty::CoroutineArgs::new( self.tcx, ty::CoroutineArgsParts { @@ -119,12 +118,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { }, ); - return Ty::new_coroutine( - self.tcx, - expr_def_id.to_def_id(), - coroutine_args.args, - movability, - ); + return Ty::new_coroutine(self.tcx, expr_def_id.to_def_id(), coroutine_args.args); } // Tuple up the arguments and insert the resulting function type into diff --git a/compiler/rustc_hir_typeck/src/lib.rs b/compiler/rustc_hir_typeck/src/lib.rs index d9106439420..da9a2bde783 100644 --- a/compiler/rustc_hir_typeck/src/lib.rs +++ b/compiler/rustc_hir_typeck/src/lib.rs @@ -304,9 +304,6 @@ struct CoroutineTypes<'tcx> { /// Types that are captured (see `CoroutineInterior` for more). interior: Ty<'tcx>, - - /// Indicates if the coroutine is movable or static (immovable). - movability: hir::Movability, } #[derive(Copy, Clone, Debug, PartialEq, Eq)] diff --git a/compiler/rustc_hir_typeck/src/method/suggest.rs b/compiler/rustc_hir_typeck/src/method/suggest.rs index 6530d828b3b..47fdd64796e 100644 --- a/compiler/rustc_hir_typeck/src/method/suggest.rs +++ b/compiler/rustc_hir_typeck/src/method/suggest.rs @@ -3306,6 +3306,7 @@ fn print_disambiguation_help<'tcx>( span: Span, item: ty::AssocItem, ) -> Option<String> { + let trait_impl_type = trait_ref.self_ty().peel_refs(); let trait_ref = if item.fn_has_self_parameter { trait_ref.print_only_trait_name().to_string() } else { @@ -3318,27 +3319,34 @@ fn print_disambiguation_help<'tcx>( { let def_kind_descr = tcx.def_kind_descr(item.kind.as_def_kind(), item.def_id); let item_name = item.ident(tcx); - let rcvr_ref = tcx - .fn_sig(item.def_id) - .skip_binder() - .skip_binder() - .inputs() - .get(0) - .and_then(|ty| ty.ref_mutability()) - .map_or("", |mutbl| mutbl.ref_prefix_str()); - let args = format!( - "({}{})", - rcvr_ref, - std::iter::once(receiver) - .chain(args.iter()) - .map(|arg| tcx - .sess - .source_map() - .span_to_snippet(arg.span) - .unwrap_or_else(|_| { "_".to_owned() })) - .collect::<Vec<_>>() - .join(", "), + let first_input = + tcx.fn_sig(item.def_id).instantiate_identity().skip_binder().inputs().get(0); + let (first_arg_type, rcvr_ref) = ( + first_input.map(|first| first.peel_refs()), + first_input + .and_then(|ty| ty.ref_mutability()) + .map_or("", |mutbl| mutbl.ref_prefix_str()), ); + + // If the type of first arg of this assoc function is `Self` or current trait impl type or `arbitrary_self_types`, we need to take the receiver as args. Otherwise, we don't. + let args = if let Some(first_arg_type) = first_arg_type + && (first_arg_type == tcx.types.self_param + || first_arg_type == trait_impl_type + || item.fn_has_self_parameter) + { + Some(receiver) + } else { + None + } + .into_iter() + .chain(args) + .map(|arg| { + tcx.sess.source_map().span_to_snippet(arg.span).unwrap_or_else(|_| "_".to_owned()) + }) + .collect::<Vec<_>>() + .join(", "); + + let args = format!("({}{})", rcvr_ref, args); err.span_suggestion_verbose( span, format!( diff --git a/compiler/rustc_hir_typeck/src/upvar.rs b/compiler/rustc_hir_typeck/src/upvar.rs index fc525a0fd4e..47b9d5f6503 100644 --- a/compiler/rustc_hir_typeck/src/upvar.rs +++ b/compiler/rustc_hir_typeck/src/upvar.rs @@ -172,7 +172,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let ty = self.node_ty(closure_hir_id); let (closure_def_id, args) = match *ty.kind() { ty::Closure(def_id, args) => (def_id, UpvarArgs::Closure(args)), - ty::Coroutine(def_id, args, _) => (def_id, UpvarArgs::Coroutine(args)), + ty::Coroutine(def_id, args) => (def_id, UpvarArgs::Coroutine(args)), ty::Error(_) => { // #51714: skip analysis when we have already encountered type errors return; diff --git a/compiler/rustc_infer/src/infer/opaque_types.rs b/compiler/rustc_infer/src/infer/opaque_types.rs index 2656fd529cd..11b5b437eff 100644 --- a/compiler/rustc_infer/src/infer/opaque_types.rs +++ b/compiler/rustc_infer/src/infer/opaque_types.rs @@ -456,7 +456,7 @@ where args.as_closure().sig_as_fn_ptr_ty().visit_with(self); } - ty::Coroutine(_, args, _) => { + ty::Coroutine(_, args) => { // Skip lifetime parameters of the enclosing item(s) // Also skip the witness type, because that has no free regions. diff --git a/compiler/rustc_infer/src/infer/outlives/components.rs b/compiler/rustc_infer/src/infer/outlives/components.rs index 47038cfd468..fc3d8375873 100644 --- a/compiler/rustc_infer/src/infer/outlives/components.rs +++ b/compiler/rustc_infer/src/infer/outlives/components.rs @@ -103,7 +103,7 @@ fn compute_components<'tcx>( compute_components(tcx, tupled_ty, out, visited); } - ty::Coroutine(_, args, _) => { + ty::Coroutine(_, args) => { // Same as the closure case let tupled_ty = args.as_coroutine().tupled_upvars_ty(); compute_components(tcx, tupled_ty, out, visited); diff --git a/compiler/rustc_infer/src/infer/relate/generalize.rs b/compiler/rustc_infer/src/infer/relate/generalize.rs index 4d8c691ea3d..27d37fd9369 100644 --- a/compiler/rustc_infer/src/infer/relate/generalize.rs +++ b/compiler/rustc_infer/src/infer/relate/generalize.rs @@ -1,6 +1,7 @@ use std::mem; use rustc_data_structures::sso::SsoHashMap; +use rustc_data_structures::stack::ensure_sufficient_stack; use rustc_hir::def_id::DefId; use rustc_middle::infer::unify_key::{ConstVarValue, ConstVariableValue}; use rustc_middle::ty::error::TypeError; @@ -215,7 +216,9 @@ where let old_ambient_variance = self.ambient_variance; self.ambient_variance = self.ambient_variance.xform(variance); debug!(?self.ambient_variance, "new ambient variance"); - let r = self.relate(a, b)?; + // Recursive calls to `relate` can overflow the stack. For example a deeper version of + // `ui/associated-consts/issue-93775.rs`. + let r = ensure_sufficient_stack(|| self.relate(a, b))?; self.ambient_variance = old_ambient_variance; Ok(r) } diff --git a/compiler/rustc_lint/src/context.rs b/compiler/rustc_lint/src/context.rs index 40b70ba4e04..39c965e75d6 100644 --- a/compiler/rustc_lint/src/context.rs +++ b/compiler/rustc_lint/src/context.rs @@ -1069,7 +1069,7 @@ impl<'a> EarlyContext<'a> { pub(crate) fn new( sess: &'a Session, features: &'a Features, - warn_about_weird_lints: bool, + lint_added_lints: bool, lint_store: &'a LintStore, registered_tools: &'a RegisteredTools, buffered: LintBuffer, @@ -1078,7 +1078,7 @@ impl<'a> EarlyContext<'a> { builder: LintLevelsBuilder::new( sess, features, - warn_about_weird_lints, + lint_added_lints, lint_store, registered_tools, ), diff --git a/compiler/rustc_lint/src/levels.rs b/compiler/rustc_lint/src/levels.rs index 17c56f1ca58..c229728cb79 100644 --- a/compiler/rustc_lint/src/levels.rs +++ b/compiler/rustc_lint/src/levels.rs @@ -135,7 +135,7 @@ fn lint_expectations(tcx: TyCtxt<'_>, (): ()) -> Vec<(LintExpectationId, LintExp unstable_to_stable_ids: FxHashMap::default(), empty: FxHashMap::default(), }, - warn_about_weird_lints: false, + lint_added_lints: false, store, registered_tools: tcx.registered_tools(()), }; @@ -164,7 +164,7 @@ fn shallow_lint_levels_on(tcx: TyCtxt<'_>, owner: hir::OwnerId) -> ShallowLintLe empty: FxHashMap::default(), attrs, }, - warn_about_weird_lints: false, + lint_added_lints: false, store, registered_tools: tcx.registered_tools(()), }; @@ -451,7 +451,7 @@ pub struct LintLevelsBuilder<'s, P> { sess: &'s Session, features: &'s Features, provider: P, - warn_about_weird_lints: bool, + lint_added_lints: bool, store: &'s LintStore, registered_tools: &'s RegisteredTools, } @@ -464,7 +464,7 @@ impl<'s> LintLevelsBuilder<'s, TopDown> { pub(crate) fn new( sess: &'s Session, features: &'s Features, - warn_about_weird_lints: bool, + lint_added_lints: bool, store: &'s LintStore, registered_tools: &'s RegisteredTools, ) -> Self { @@ -472,7 +472,7 @@ impl<'s> LintLevelsBuilder<'s, TopDown> { sess, features, provider: TopDown { sets: LintLevelSets::new(), cur: COMMAND_LINE }, - warn_about_weird_lints, + lint_added_lints, store, registered_tools, }; @@ -642,7 +642,7 @@ impl<'s, P: LintLevelsProvider> LintLevelsBuilder<'s, P> { // // This means that this only errors if we're truly lowering the lint // level from forbid. - if level != Level::Forbid { + if self.lint_added_lints && level != Level::Forbid { if let Level::Forbid = old_level { // Backwards compatibility check: // @@ -968,7 +968,7 @@ impl<'s, P: LintLevelsProvider> LintLevelsBuilder<'s, P> { continue; } - _ if !self.warn_about_weird_lints => {} + _ if !self.lint_added_lints => {} CheckLintNameResult::Renamed(ref replace) => { let suggestion = @@ -1029,7 +1029,7 @@ impl<'s, P: LintLevelsProvider> LintLevelsBuilder<'s, P> { } } - if !is_crate_node { + if self.lint_added_lints && !is_crate_node { for (id, &(level, ref src)) in self.current_specs().iter() { if !id.lint.crate_level_only { continue; @@ -1054,33 +1054,33 @@ impl<'s, P: LintLevelsProvider> LintLevelsBuilder<'s, P> { /// Checks if the lint is gated on a feature that is not enabled. /// /// Returns `true` if the lint's feature is enabled. - // FIXME only emit this once for each attribute, instead of repeating it 4 times for - // pre-expansion lints, post-expansion lints, `shallow_lint_levels_on` and `lint_expectations`. #[track_caller] fn check_gated_lint(&self, lint_id: LintId, span: Span, lint_from_cli: bool) -> bool { if let Some(feature) = lint_id.lint.feature_gate { if !self.features.active(feature) { - let lint = builtin::UNKNOWN_LINTS; - let (level, src) = self.lint_level(builtin::UNKNOWN_LINTS); - struct_lint_level( - self.sess, - lint, - level, - src, - Some(span.into()), - fluent::lint_unknown_gated_lint, - |lint| { - lint.set_arg("name", lint_id.lint.name_lower()); - lint.note(fluent::lint_note); - rustc_session::parse::add_feature_diagnostics_for_issue( - lint, - &self.sess.parse_sess, - feature, - GateIssue::Language, - lint_from_cli, - ); - }, - ); + if self.lint_added_lints { + let lint = builtin::UNKNOWN_LINTS; + let (level, src) = self.lint_level(builtin::UNKNOWN_LINTS); + struct_lint_level( + self.sess, + lint, + level, + src, + Some(span.into()), + fluent::lint_unknown_gated_lint, + |lint| { + lint.set_arg("name", lint_id.lint.name_lower()); + lint.note(fluent::lint_note); + rustc_session::parse::add_feature_diagnostics_for_issue( + lint, + &self.sess.parse_sess, + feature, + GateIssue::Language, + lint_from_cli, + ); + }, + ); + } return false; } } diff --git a/compiler/rustc_metadata/src/rmeta/decoder.rs b/compiler/rustc_metadata/src/rmeta/decoder.rs index ae4a0e15fab..2de29db9e5c 100644 --- a/compiler/rustc_metadata/src/rmeta/decoder.rs +++ b/compiler/rustc_metadata/src/rmeta/decoder.rs @@ -508,21 +508,19 @@ impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for ExpnId { impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for Span { fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Span { let start = decoder.position(); - let mode = SpanEncodingMode::decode(decoder); - let data = match mode { - SpanEncodingMode::Direct => SpanData::decode(decoder), - SpanEncodingMode::RelativeOffset(offset) => { - decoder.with_position(start - offset, |decoder| { - let mode = SpanEncodingMode::decode(decoder); - debug_assert!(matches!(mode, SpanEncodingMode::Direct)); - SpanData::decode(decoder) - }) - } - SpanEncodingMode::AbsoluteOffset(addr) => decoder.with_position(addr, |decoder| { - let mode = SpanEncodingMode::decode(decoder); - debug_assert!(matches!(mode, SpanEncodingMode::Direct)); - SpanData::decode(decoder) - }), + let tag = SpanTag(decoder.peek_byte()); + let data = if tag.kind() == SpanKind::Indirect { + // Skip past the tag we just peek'd. + decoder.read_u8(); + let offset_or_position = decoder.read_usize(); + let position = if tag.is_relative_offset() { + start - offset_or_position + } else { + offset_or_position + }; + decoder.with_position(position, SpanData::decode) + } else { + SpanData::decode(decoder) }; Span::new(data.lo, data.hi, data.ctxt, data.parent) } @@ -530,17 +528,17 @@ impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for Span { impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for SpanData { fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> SpanData { - let ctxt = SyntaxContext::decode(decoder); - let tag = u8::decode(decoder); + let tag = SpanTag::decode(decoder); + let ctxt = tag.context().unwrap_or_else(|| SyntaxContext::decode(decoder)); - if tag == TAG_PARTIAL_SPAN { + if tag.kind() == SpanKind::Partial { return DUMMY_SP.with_ctxt(ctxt).data(); } - debug_assert!(tag == TAG_VALID_SPAN_LOCAL || tag == TAG_VALID_SPAN_FOREIGN); + debug_assert!(tag.kind() == SpanKind::Local || tag.kind() == SpanKind::Foreign); let lo = BytePos::decode(decoder); - let len = BytePos::decode(decoder); + let len = tag.length().unwrap_or_else(|| BytePos::decode(decoder)); let hi = lo + len; let Some(sess) = decoder.sess else { @@ -581,7 +579,7 @@ impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for SpanData { // treat the 'local' and 'foreign' cases almost identically during deserialization: // we can call `imported_source_file` for the proper crate, and binary search // through the returned slice using our span. - let source_file = if tag == TAG_VALID_SPAN_LOCAL { + let source_file = if tag.kind() == SpanKind::Local { decoder.cdata().imported_source_file(metadata_index, sess) } else { // When we encode a proc-macro crate, all `Span`s should be encoded diff --git a/compiler/rustc_metadata/src/rmeta/def_path_hash_map.rs b/compiler/rustc_metadata/src/rmeta/def_path_hash_map.rs index 4f280bb9d80..9950bc1c31f 100644 --- a/compiler/rustc_metadata/src/rmeta/def_path_hash_map.rs +++ b/compiler/rustc_metadata/src/rmeta/def_path_hash_map.rs @@ -19,7 +19,9 @@ impl DefPathHashMapRef<'_> { #[inline] pub fn def_path_hash_to_def_index(&self, def_path_hash: &DefPathHash) -> DefIndex { match *self { - DefPathHashMapRef::OwnedFromMetadata(ref map) => map.get(def_path_hash).unwrap(), + DefPathHashMapRef::OwnedFromMetadata(ref map) => { + map.get(&def_path_hash.local_hash()).unwrap() + } DefPathHashMapRef::BorrowedFromTcx(_) => { panic!("DefPathHashMap::BorrowedFromTcx variant only exists for serialization") } diff --git a/compiler/rustc_metadata/src/rmeta/encoder.rs b/compiler/rustc_metadata/src/rmeta/encoder.rs index f322588e365..aca7a66596e 100644 --- a/compiler/rustc_metadata/src/rmeta/encoder.rs +++ b/compiler/rustc_metadata/src/rmeta/encoder.rs @@ -177,15 +177,17 @@ impl<'a, 'tcx> Encodable<EncodeContext<'a, 'tcx>> for Span { // previously saved offset must be smaller than the current position. let offset = s.opaque.position() - last_location; if offset < last_location { - SpanEncodingMode::RelativeOffset(offset).encode(s) + SpanTag::indirect(true).encode(s); + offset.encode(s); } else { - SpanEncodingMode::AbsoluteOffset(last_location).encode(s) + SpanTag::indirect(false).encode(s); + last_location.encode(s); } } Entry::Vacant(v) => { let position = s.opaque.position(); v.insert(position); - SpanEncodingMode::Direct.encode(s); + // Data is encoded with a SpanTag prefix (see below). self.data().encode(s); } } @@ -225,14 +227,15 @@ impl<'a, 'tcx> Encodable<EncodeContext<'a, 'tcx>> for SpanData { // IMPORTANT: If this is ever changed, be sure to update // `rustc_span::hygiene::raw_encode_expn_id` to handle // encoding `ExpnData` for proc-macro crates. - if s.is_proc_macro { - SyntaxContext::root().encode(s); - } else { - self.ctxt.encode(s); - } + let ctxt = if s.is_proc_macro { SyntaxContext::root() } else { self.ctxt }; if self.is_dummy() { - return TAG_PARTIAL_SPAN.encode(s); + let tag = SpanTag::new(SpanKind::Partial, ctxt, 0); + tag.encode(s); + if tag.context().is_none() { + ctxt.encode(s); + } + return; } // The Span infrastructure should make sure that this invariant holds: @@ -250,7 +253,12 @@ impl<'a, 'tcx> Encodable<EncodeContext<'a, 'tcx>> for SpanData { if !source_file.contains(self.hi) { // Unfortunately, macro expansion still sometimes generates Spans // that malformed in this way. - return TAG_PARTIAL_SPAN.encode(s); + let tag = SpanTag::new(SpanKind::Partial, ctxt, 0); + tag.encode(s); + if tag.context().is_none() { + ctxt.encode(s); + } + return; } // There are two possible cases here: @@ -269,7 +277,7 @@ impl<'a, 'tcx> Encodable<EncodeContext<'a, 'tcx>> for SpanData { // if we're a proc-macro crate. // This allows us to avoid loading the dependencies of proc-macro crates: all of // the information we need to decode `Span`s is stored in the proc-macro crate. - let (tag, metadata_index) = if source_file.is_imported() && !s.is_proc_macro { + let (kind, metadata_index) = if source_file.is_imported() && !s.is_proc_macro { // To simplify deserialization, we 'rebase' this span onto the crate it originally came // from (the crate that 'owns' the file it references. These rebased 'lo' and 'hi' // values are relative to the source map information for the 'foreign' crate whose @@ -287,7 +295,7 @@ impl<'a, 'tcx> Encodable<EncodeContext<'a, 'tcx>> for SpanData { } }; - (TAG_VALID_SPAN_FOREIGN, metadata_index) + (SpanKind::Foreign, metadata_index) } else { // Record the fact that we need to encode the data for this `SourceFile` let source_files = @@ -296,7 +304,7 @@ impl<'a, 'tcx> Encodable<EncodeContext<'a, 'tcx>> for SpanData { let metadata_index: u32 = metadata_index.try_into().expect("cannot export more than U32_MAX files"); - (TAG_VALID_SPAN_LOCAL, metadata_index) + (SpanKind::Local, metadata_index) }; // Encode the start position relative to the file start, so we profit more from the @@ -307,14 +315,20 @@ impl<'a, 'tcx> Encodable<EncodeContext<'a, 'tcx>> for SpanData { // from the variable-length integer encoding that we use. let len = self.hi - self.lo; + let tag = SpanTag::new(kind, ctxt, len.0 as usize); tag.encode(s); + if tag.context().is_none() { + ctxt.encode(s); + } lo.encode(s); - len.encode(s); + if tag.length().is_none() { + len.encode(s); + } // Encode the index of the `SourceFile` for the span, in order to make decoding faster. metadata_index.encode(s); - if tag == TAG_VALID_SPAN_FOREIGN { + if kind == SpanKind::Foreign { // This needs to be two lines to avoid holding the `s.source_file_cache` // while calling `cnum.encode(s)` let cnum = s.source_file_cache.0.cnum; @@ -1444,7 +1458,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> { if def_kind == DefKind::Closure && let Some(coroutine_kind) = self.tcx.coroutine_kind(def_id) { - self.tables.coroutine_kind.set(def_id.index, Some(coroutine_kind)); + self.tables.coroutine_kind.set(def_id.index, Some(coroutine_kind)) } if let DefKind::Enum | DefKind::Struct | DefKind::Union = def_kind { self.encode_info_for_adt(local_id); diff --git a/compiler/rustc_metadata/src/rmeta/mod.rs b/compiler/rustc_metadata/src/rmeta/mod.rs index bafd3f0b84d..54ee50c2358 100644 --- a/compiler/rustc_metadata/src/rmeta/mod.rs +++ b/compiler/rustc_metadata/src/rmeta/mod.rs @@ -66,13 +66,6 @@ const METADATA_VERSION: u8 = 9; /// unsigned integer, and further followed by the rustc version string. pub const METADATA_HEADER: &[u8] = &[b'r', b'u', b's', b't', 0, 0, 0, METADATA_VERSION]; -#[derive(Encodable, Decodable)] -enum SpanEncodingMode { - RelativeOffset(usize), - AbsoluteOffset(usize), - Direct, -} - /// A value of type T referred to by its absolute position /// in the metadata, and which can be decoded lazily. /// @@ -488,10 +481,88 @@ bitflags::bitflags! { } } -// Tags used for encoding Spans: -const TAG_VALID_SPAN_LOCAL: u8 = 0; -const TAG_VALID_SPAN_FOREIGN: u8 = 1; -const TAG_PARTIAL_SPAN: u8 = 2; +/// A span tag byte encodes a bunch of data, so that we can cut out a few extra bytes from span +/// encodings (which are very common, for example, libcore has ~650,000 unique spans and over 1.1 +/// million references to prior-written spans). +/// +/// The byte format is split into several parts: +/// +/// [ a a a a a c d d ] +/// +/// `a` bits represent the span length. We have 5 bits, so we can store lengths up to 30 inline, with +/// an all-1s pattern representing that the length is stored separately. +/// +/// `c` represents whether the span context is zero (and then it is not stored as a separate varint) +/// for direct span encodings, and whether the offset is absolute or relative otherwise (zero for +/// absolute). +/// +/// d bits represent the kind of span we are storing (local, foreign, partial, indirect). +#[derive(Encodable, Decodable, Copy, Clone)] +struct SpanTag(u8); + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +enum SpanKind { + Local = 0b00, + Foreign = 0b01, + Partial = 0b10, + // Indicates the actual span contents are elsewhere. + // If this is the kind, then the span context bit represents whether it is a relative or + // absolute offset. + Indirect = 0b11, +} + +impl SpanTag { + fn new(kind: SpanKind, context: rustc_span::SyntaxContext, length: usize) -> SpanTag { + let mut data = 0u8; + data |= kind as u8; + if context.is_root() { + data |= 0b100; + } + let all_1s_len = (0xffu8 << 3) >> 3; + // strictly less than - all 1s pattern is a sentinel for storage being out of band. + if length < all_1s_len as usize { + data |= (length as u8) << 3; + } else { + data |= all_1s_len << 3; + } + + SpanTag(data) + } + + fn indirect(relative: bool) -> SpanTag { + let mut tag = SpanTag(SpanKind::Indirect as u8); + if relative { + tag.0 |= 0b100; + } + tag + } + + fn kind(self) -> SpanKind { + let masked = self.0 & 0b11; + match masked { + 0b00 => SpanKind::Local, + 0b01 => SpanKind::Foreign, + 0b10 => SpanKind::Partial, + 0b11 => SpanKind::Indirect, + _ => unreachable!(), + } + } + + fn is_relative_offset(self) -> bool { + debug_assert_eq!(self.kind(), SpanKind::Indirect); + self.0 & 0b100 != 0 + } + + fn context(self) -> Option<rustc_span::SyntaxContext> { + if self.0 & 0b100 != 0 { Some(rustc_span::SyntaxContext::root()) } else { None } + } + + fn length(self) -> Option<rustc_span::BytePos> { + let all_1s_len = (0xffu8 << 3) >> 3; + let len = self.0 >> 3; + if len != all_1s_len { Some(rustc_span::BytePos(u32::from(len))) } else { None } + } +} // Tags for encoding Symbol's const SYMBOL_STR: u8 = 0; diff --git a/compiler/rustc_middle/src/mir/pretty.rs b/compiler/rustc_middle/src/mir/pretty.rs index b995f622388..8e7aaee065f 100644 --- a/compiler/rustc_middle/src/mir/pretty.rs +++ b/compiler/rustc_middle/src/mir/pretty.rs @@ -1049,7 +1049,7 @@ impl<'tcx> Debug for Rvalue<'tcx> { struct_fmt.finish() }), - AggregateKind::Coroutine(def_id, _, _) => ty::tls::with(|tcx| { + AggregateKind::Coroutine(def_id, _) => ty::tls::with(|tcx| { let name = format!("{{coroutine@{:?}}}", tcx.def_span(def_id)); let mut struct_fmt = fmt.debug_struct(&name); @@ -1304,11 +1304,11 @@ impl<'tcx> Visitor<'tcx> for ExtraComments<'tcx> { self.push(&format!("+ args: {args:#?}")); } - AggregateKind::Coroutine(def_id, args, movability) => { + AggregateKind::Coroutine(def_id, args) => { self.push("coroutine"); self.push(&format!("+ def_id: {def_id:?}")); self.push(&format!("+ args: {args:#?}")); - self.push(&format!("+ movability: {movability:?}")); + self.push(&format!("+ kind: {:?}", self.tcx.coroutine_kind(def_id))); } AggregateKind::Adt(_, _, _, Some(user_ty), _) => { diff --git a/compiler/rustc_middle/src/mir/syntax.rs b/compiler/rustc_middle/src/mir/syntax.rs index 8cf9e55f0b6..462076d750f 100644 --- a/compiler/rustc_middle/src/mir/syntax.rs +++ b/compiler/rustc_middle/src/mir/syntax.rs @@ -14,7 +14,6 @@ use crate::ty::{Region, UserTypeAnnotationIndex}; use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece}; use rustc_hir::def_id::DefId; -use rustc_hir::{self as hir}; use rustc_hir::{self, CoroutineKind}; use rustc_index::IndexVec; use rustc_target::abi::{FieldIdx, VariantIdx}; @@ -1344,7 +1343,7 @@ pub enum AggregateKind<'tcx> { Adt(DefId, VariantIdx, GenericArgsRef<'tcx>, Option<UserTypeAnnotationIndex>, Option<FieldIdx>), Closure(DefId, GenericArgsRef<'tcx>), - Coroutine(DefId, GenericArgsRef<'tcx>, hir::Movability), + Coroutine(DefId, GenericArgsRef<'tcx>), } #[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)] diff --git a/compiler/rustc_middle/src/mir/tcx.rs b/compiler/rustc_middle/src/mir/tcx.rs index f9b2a6ee8aa..234ccee5546 100644 --- a/compiler/rustc_middle/src/mir/tcx.rs +++ b/compiler/rustc_middle/src/mir/tcx.rs @@ -201,9 +201,7 @@ impl<'tcx> Rvalue<'tcx> { } AggregateKind::Adt(did, _, args, _, _) => tcx.type_of(did).instantiate(tcx, args), AggregateKind::Closure(did, args) => Ty::new_closure(tcx, did, args), - AggregateKind::Coroutine(did, args, movability) => { - Ty::new_coroutine(tcx, did, args, movability) - } + AggregateKind::Coroutine(did, args) => Ty::new_coroutine(tcx, did, args), }, Rvalue::ShallowInitBox(_, ty) => Ty::new_box(tcx, ty), Rvalue::CopyForDeref(ref place) => place.ty(local_decls, tcx).ty, diff --git a/compiler/rustc_middle/src/mir/visit.rs b/compiler/rustc_middle/src/mir/visit.rs index 9059936f495..132ecf91af1 100644 --- a/compiler/rustc_middle/src/mir/visit.rs +++ b/compiler/rustc_middle/src/mir/visit.rs @@ -736,7 +736,6 @@ macro_rules! make_mir_visitor { AggregateKind::Coroutine( _, coroutine_args, - _movability, ) => { self.visit_args(coroutine_args, location); } diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs index 17691de630f..6807eacb7f1 100644 --- a/compiler/rustc_middle/src/ty/context.rs +++ b/compiler/rustc_middle/src/ty/context.rs @@ -847,6 +847,12 @@ impl<'tcx> TyCtxt<'tcx> { self.coroutine_kind(def_id).is_some() } + /// Returns the movability of the coroutine of `def_id`, or panics + /// if given a `def_id` that is not a coroutine. + pub fn coroutine_movability(self, def_id: DefId) -> hir::Movability { + self.coroutine_kind(def_id).expect("expected a coroutine").movability() + } + /// Returns `true` if the node pointed to by `def_id` is a coroutine for an async construct. pub fn coroutine_is_async(self, def_id: DefId) -> bool { matches!( diff --git a/compiler/rustc_middle/src/ty/fast_reject.rs b/compiler/rustc_middle/src/ty/fast_reject.rs index 38a9cabca97..6ed68f90eb3 100644 --- a/compiler/rustc_middle/src/ty/fast_reject.rs +++ b/compiler/rustc_middle/src/ty/fast_reject.rs @@ -128,7 +128,7 @@ pub fn simplify_type<'tcx>( }, ty::Ref(_, _, mutbl) => Some(SimplifiedType::Ref(mutbl)), ty::FnDef(def_id, _) | ty::Closure(def_id, _) => Some(SimplifiedType::Closure(def_id)), - ty::Coroutine(def_id, _, _) => Some(SimplifiedType::Coroutine(def_id)), + ty::Coroutine(def_id, _) => Some(SimplifiedType::Coroutine(def_id)), ty::CoroutineWitness(def_id, _) => Some(SimplifiedType::CoroutineWitness(def_id)), ty::Never => Some(SimplifiedType::Never), ty::Tuple(tys) => Some(SimplifiedType::Tuple(tys.len())), diff --git a/compiler/rustc_middle/src/ty/flags.rs b/compiler/rustc_middle/src/ty/flags.rs index f9a2385b100..0c1d1091414 100644 --- a/compiler/rustc_middle/src/ty/flags.rs +++ b/compiler/rustc_middle/src/ty/flags.rs @@ -96,7 +96,7 @@ impl FlagComputation { self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE); } - ty::Coroutine(_, args, _) => { + ty::Coroutine(_, args) => { let args = args.as_coroutine(); let should_remove_further_specializable = !self.flags.contains(TypeFlags::STILL_FURTHER_SPECIALIZABLE); diff --git a/compiler/rustc_middle/src/ty/instance.rs b/compiler/rustc_middle/src/ty/instance.rs index 1c7a7545e2b..1b6d59ab257 100644 --- a/compiler/rustc_middle/src/ty/instance.rs +++ b/compiler/rustc_middle/src/ty/instance.rs @@ -689,13 +689,13 @@ fn polymorphize<'tcx>( Ty::new_closure(self.tcx, def_id, polymorphized_args) } } - ty::Coroutine(def_id, args, movability) => { + ty::Coroutine(def_id, args) => { let polymorphized_args = polymorphize(self.tcx, ty::InstanceDef::Item(def_id), args); if args == polymorphized_args { ty } else { - Ty::new_coroutine(self.tcx, def_id, polymorphized_args, movability) + Ty::new_coroutine(self.tcx, def_id, polymorphized_args) } } _ => ty.super_fold_with(self), diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs index 8a02914b435..5cc0ce87c9b 100644 --- a/compiler/rustc_middle/src/ty/layout.rs +++ b/compiler/rustc_middle/src/ty/layout.rs @@ -906,7 +906,7 @@ where i, ), - ty::Coroutine(def_id, args, _) => match this.variants { + ty::Coroutine(def_id, args) => match this.variants { Variants::Single { index } => TyMaybeWithLayout::Ty( args.as_coroutine() .state_tys(def_id, tcx) diff --git a/compiler/rustc_middle/src/ty/opaque_types.rs b/compiler/rustc_middle/src/ty/opaque_types.rs index 71fe7d15a6c..fc4d4c9a3d2 100644 --- a/compiler/rustc_middle/src/ty/opaque_types.rs +++ b/compiler/rustc_middle/src/ty/opaque_types.rs @@ -153,9 +153,9 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for ReverseMapper<'tcx> { Ty::new_closure(self.tcx, def_id, args) } - ty::Coroutine(def_id, args, movability) => { + ty::Coroutine(def_id, args) => { let args = self.fold_closure_args(def_id, args); - Ty::new_coroutine(self.tcx, def_id, args, movability) + Ty::new_coroutine(self.tcx, def_id, args) } ty::CoroutineWitness(def_id, args) => { diff --git a/compiler/rustc_middle/src/ty/print/mod.rs b/compiler/rustc_middle/src/ty/print/mod.rs index 5e09154789a..f32b7b0852a 100644 --- a/compiler/rustc_middle/src/ty/print/mod.rs +++ b/compiler/rustc_middle/src/ty/print/mod.rs @@ -259,7 +259,7 @@ fn characteristic_def_id_of_type_cached<'a>( ty::FnDef(def_id, _) | ty::Closure(def_id, _) - | ty::Coroutine(def_id, _, _) + | ty::Coroutine(def_id, _) | ty::CoroutineWitness(def_id, _) | ty::Foreign(def_id) => Some(def_id), diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs index f7900d883ad..99384e34222 100644 --- a/compiler/rustc_middle/src/ty/print/pretty.rs +++ b/compiler/rustc_middle/src/ty/print/pretty.rs @@ -783,14 +783,14 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { } } ty::Str => p!("str"), - ty::Coroutine(did, args, movability) => { + ty::Coroutine(did, args) => { p!(write("{{")); let coroutine_kind = self.tcx().coroutine_kind(did).unwrap(); let should_print_movability = self.should_print_verbose() || matches!(coroutine_kind, hir::CoroutineKind::Coroutine(_)); if should_print_movability { - match movability { + match coroutine_kind.movability() { hir::Movability::Movable => {} hir::Movability::Static => p!("static "), } @@ -1055,7 +1055,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { && assoc.trait_container(tcx) == tcx.lang_items().coroutine_trait() && assoc.name == rustc_span::sym::Return { - if let ty::Coroutine(_, args, _) = args.type_at(0).kind() { + if let ty::Coroutine(_, args) = args.type_at(0).kind() { let return_ty = args.as_coroutine().return_ty(); if !return_ty.is_ty_var() { return_ty.into() diff --git a/compiler/rustc_middle/src/ty/relate.rs b/compiler/rustc_middle/src/ty/relate.rs index 9d92f81db0b..8543bd0bbdd 100644 --- a/compiler/rustc_middle/src/ty/relate.rs +++ b/compiler/rustc_middle/src/ty/relate.rs @@ -455,14 +455,12 @@ pub fn structurally_relate_tys<'tcx, R: TypeRelation<'tcx>>( Ok(Ty::new_dynamic(tcx, relation.relate(a_obj, b_obj)?, region_bound, a_repr)) } - (&ty::Coroutine(a_id, a_args, movability), &ty::Coroutine(b_id, b_args, _)) - if a_id == b_id => - { + (&ty::Coroutine(a_id, a_args), &ty::Coroutine(b_id, b_args)) if a_id == b_id => { // All Coroutine types with the same id represent // the (anonymous) type of the same coroutine expression. So // all of their regions should be equated. let args = relate_args_invariantly(relation, a_args, b_args)?; - Ok(Ty::new_coroutine(tcx, a_id, args, movability)) + Ok(Ty::new_coroutine(tcx, a_id, args)) } (&ty::CoroutineWitness(a_id, a_args), &ty::CoroutineWitness(b_id, b_args)) diff --git a/compiler/rustc_middle/src/ty/structural_impls.rs b/compiler/rustc_middle/src/ty/structural_impls.rs index 1c75d73e552..7c869adbd83 100644 --- a/compiler/rustc_middle/src/ty/structural_impls.rs +++ b/compiler/rustc_middle/src/ty/structural_impls.rs @@ -578,9 +578,7 @@ impl<'tcx> TypeSuperFoldable<TyCtxt<'tcx>> for Ty<'tcx> { ty::Ref(r, ty, mutbl) => { ty::Ref(r.try_fold_with(folder)?, ty.try_fold_with(folder)?, mutbl) } - ty::Coroutine(did, args, movability) => { - ty::Coroutine(did, args.try_fold_with(folder)?, movability) - } + ty::Coroutine(did, args) => ty::Coroutine(did, args.try_fold_with(folder)?), ty::CoroutineWitness(did, args) => { ty::CoroutineWitness(did, args.try_fold_with(folder)?) } @@ -630,7 +628,7 @@ impl<'tcx> TypeSuperVisitable<TyCtxt<'tcx>> for Ty<'tcx> { r.visit_with(visitor)?; ty.visit_with(visitor) } - ty::Coroutine(_did, ref args, _) => args.visit_with(visitor), + ty::Coroutine(_did, ref args) => args.visit_with(visitor), ty::CoroutineWitness(_did, ref args) => args.visit_with(visitor), ty::Closure(_did, ref args) => args.visit_with(visitor), ty::Alias(_, ref data) => data.visit_with(visitor), diff --git a/compiler/rustc_middle/src/ty/sty.rs b/compiler/rustc_middle/src/ty/sty.rs index 5b9dff8e3f2..38bf39bff90 100644 --- a/compiler/rustc_middle/src/ty/sty.rs +++ b/compiler/rustc_middle/src/ty/sty.rs @@ -241,38 +241,36 @@ pub struct ClosureArgs<'tcx> { } /// Struct returned by `split()`. -pub struct ClosureArgsParts<'tcx, T> { +pub struct ClosureArgsParts<'tcx> { pub parent_args: &'tcx [GenericArg<'tcx>], - pub closure_kind_ty: T, - pub closure_sig_as_fn_ptr_ty: T, - pub tupled_upvars_ty: T, + pub closure_kind_ty: Ty<'tcx>, + pub closure_sig_as_fn_ptr_ty: Ty<'tcx>, + pub tupled_upvars_ty: Ty<'tcx>, } impl<'tcx> ClosureArgs<'tcx> { /// Construct `ClosureArgs` from `ClosureArgsParts`, containing `Args` /// for the closure parent, alongside additional closure-specific components. - pub fn new(tcx: TyCtxt<'tcx>, parts: ClosureArgsParts<'tcx, Ty<'tcx>>) -> ClosureArgs<'tcx> { + pub fn new(tcx: TyCtxt<'tcx>, parts: ClosureArgsParts<'tcx>) -> ClosureArgs<'tcx> { ClosureArgs { - args: tcx.mk_args_from_iter( - parts.parent_args.iter().copied().chain( - [parts.closure_kind_ty, parts.closure_sig_as_fn_ptr_ty, parts.tupled_upvars_ty] - .iter() - .map(|&ty| ty.into()), - ), - ), + args: tcx.mk_args_from_iter(parts.parent_args.iter().copied().chain([ + parts.closure_kind_ty.into(), + parts.closure_sig_as_fn_ptr_ty.into(), + parts.tupled_upvars_ty.into(), + ])), } } /// Divides the closure args into their respective components. /// The ordering assumed here must match that used by `ClosureArgs::new` above. - fn split(self) -> ClosureArgsParts<'tcx, GenericArg<'tcx>> { + fn split(self) -> ClosureArgsParts<'tcx> { match self.args[..] { [ref parent_args @ .., closure_kind_ty, closure_sig_as_fn_ptr_ty, tupled_upvars_ty] => { ClosureArgsParts { parent_args, - closure_kind_ty, - closure_sig_as_fn_ptr_ty, - tupled_upvars_ty, + closure_kind_ty: closure_kind_ty.expect_ty(), + closure_sig_as_fn_ptr_ty: closure_sig_as_fn_ptr_ty.expect_ty(), + tupled_upvars_ty: tupled_upvars_ty.expect_ty(), } } _ => bug!("closure args missing synthetics"), @@ -285,7 +283,7 @@ impl<'tcx> ClosureArgs<'tcx> { /// Used primarily by `ty::print::pretty` to be able to handle closure /// types that haven't had their synthetic types substituted in. pub fn is_valid(self) -> bool { - self.args.len() >= 3 && matches!(self.split().tupled_upvars_ty.expect_ty().kind(), Tuple(_)) + self.args.len() >= 3 && matches!(self.split().tupled_upvars_ty.kind(), Tuple(_)) } /// Returns the substitutions of the closure's parent. @@ -309,14 +307,14 @@ impl<'tcx> ClosureArgs<'tcx> { /// Returns the tuple type representing the upvars for this closure. #[inline] pub fn tupled_upvars_ty(self) -> Ty<'tcx> { - self.split().tupled_upvars_ty.expect_ty() + self.split().tupled_upvars_ty } /// Returns the closure kind for this closure; may return a type /// variable during inference. To get the closure kind during /// inference, use `infcx.closure_kind(args)`. pub fn kind_ty(self) -> Ty<'tcx> { - self.split().closure_kind_ty.expect_ty() + self.split().closure_kind_ty } /// Returns the `fn` pointer type representing the closure signature for this @@ -325,7 +323,7 @@ impl<'tcx> ClosureArgs<'tcx> { // type is known at the time of the creation of `ClosureArgs`, // see `rustc_hir_analysis::check::closure`. pub fn sig_as_fn_ptr_ty(self) -> Ty<'tcx> { - self.split().closure_sig_as_fn_ptr_ty.expect_ty() + self.split().closure_sig_as_fn_ptr_ty } /// Returns the closure kind for this closure; only usable outside @@ -357,51 +355,42 @@ pub struct CoroutineArgs<'tcx> { pub args: GenericArgsRef<'tcx>, } -pub struct CoroutineArgsParts<'tcx, T> { +pub struct CoroutineArgsParts<'tcx> { pub parent_args: &'tcx [GenericArg<'tcx>], - pub resume_ty: T, - pub yield_ty: T, - pub return_ty: T, - pub witness: T, - pub tupled_upvars_ty: T, + pub resume_ty: Ty<'tcx>, + pub yield_ty: Ty<'tcx>, + pub return_ty: Ty<'tcx>, + pub witness: Ty<'tcx>, + pub tupled_upvars_ty: Ty<'tcx>, } impl<'tcx> CoroutineArgs<'tcx> { /// Construct `CoroutineArgs` from `CoroutineArgsParts`, containing `Args` /// for the coroutine parent, alongside additional coroutine-specific components. - pub fn new( - tcx: TyCtxt<'tcx>, - parts: CoroutineArgsParts<'tcx, Ty<'tcx>>, - ) -> CoroutineArgs<'tcx> { + pub fn new(tcx: TyCtxt<'tcx>, parts: CoroutineArgsParts<'tcx>) -> CoroutineArgs<'tcx> { CoroutineArgs { - args: tcx.mk_args_from_iter( - parts.parent_args.iter().copied().chain( - [ - parts.resume_ty, - parts.yield_ty, - parts.return_ty, - parts.witness, - parts.tupled_upvars_ty, - ] - .iter() - .map(|&ty| ty.into()), - ), - ), + args: tcx.mk_args_from_iter(parts.parent_args.iter().copied().chain([ + parts.resume_ty.into(), + parts.yield_ty.into(), + parts.return_ty.into(), + parts.witness.into(), + parts.tupled_upvars_ty.into(), + ])), } } /// Divides the coroutine args into their respective components. /// The ordering assumed here must match that used by `CoroutineArgs::new` above. - fn split(self) -> CoroutineArgsParts<'tcx, GenericArg<'tcx>> { + fn split(self) -> CoroutineArgsParts<'tcx> { match self.args[..] { [ref parent_args @ .., resume_ty, yield_ty, return_ty, witness, tupled_upvars_ty] => { CoroutineArgsParts { parent_args, - resume_ty, - yield_ty, - return_ty, - witness, - tupled_upvars_ty, + resume_ty: resume_ty.expect_ty(), + yield_ty: yield_ty.expect_ty(), + return_ty: return_ty.expect_ty(), + witness: witness.expect_ty(), + tupled_upvars_ty: tupled_upvars_ty.expect_ty(), } } _ => bug!("coroutine args missing synthetics"), @@ -414,7 +403,7 @@ impl<'tcx> CoroutineArgs<'tcx> { /// Used primarily by `ty::print::pretty` to be able to handle coroutine /// types that haven't had their synthetic types substituted in. pub fn is_valid(self) -> bool { - self.args.len() >= 5 && matches!(self.split().tupled_upvars_ty.expect_ty().kind(), Tuple(_)) + self.args.len() >= 5 && matches!(self.split().tupled_upvars_ty.kind(), Tuple(_)) } /// Returns the substitutions of the coroutine's parent. @@ -428,7 +417,7 @@ impl<'tcx> CoroutineArgs<'tcx> { /// The state transformation MIR pass may only produce layouts which mention types /// in this tuple. Upvars are not counted here. pub fn witness(self) -> Ty<'tcx> { - self.split().witness.expect_ty() + self.split().witness } /// Returns an iterator over the list of types of captured paths by the coroutine. @@ -447,31 +436,32 @@ impl<'tcx> CoroutineArgs<'tcx> { /// Returns the tuple type representing the upvars for this coroutine. #[inline] pub fn tupled_upvars_ty(self) -> Ty<'tcx> { - self.split().tupled_upvars_ty.expect_ty() + self.split().tupled_upvars_ty } /// Returns the type representing the resume type of the coroutine. pub fn resume_ty(self) -> Ty<'tcx> { - self.split().resume_ty.expect_ty() + self.split().resume_ty } /// Returns the type representing the yield type of the coroutine. pub fn yield_ty(self) -> Ty<'tcx> { - self.split().yield_ty.expect_ty() + self.split().yield_ty } /// Returns the type representing the return type of the coroutine. pub fn return_ty(self) -> Ty<'tcx> { - self.split().return_ty.expect_ty() + self.split().return_ty } /// Returns the "coroutine signature", which consists of its resume, yield /// and return types. pub fn sig(self) -> GenSig<'tcx> { + let parts = self.split(); ty::GenSig { - resume_ty: self.resume_ty(), - yield_ty: self.yield_ty(), - return_ty: self.return_ty(), + resume_ty: parts.resume_ty, + yield_ty: parts.yield_ty, + return_ty: parts.return_ty, } } } @@ -2168,14 +2158,13 @@ impl<'tcx> Ty<'tcx> { tcx: TyCtxt<'tcx>, def_id: DefId, coroutine_args: GenericArgsRef<'tcx>, - movability: hir::Movability, ) -> Ty<'tcx> { debug_assert_eq!( coroutine_args.len(), tcx.generics_of(tcx.typeck_root_def_id(def_id)).count() + 5, "coroutine constructed with incorrect number of substitutions" ); - Ty::new(tcx, Coroutine(def_id, coroutine_args, movability)) + Ty::new(tcx, Coroutine(def_id, coroutine_args)) } #[inline] @@ -2656,7 +2645,7 @@ impl<'tcx> Ty<'tcx> { pub fn variant_range(self, tcx: TyCtxt<'tcx>) -> Option<Range<VariantIdx>> { match self.kind() { TyKind::Adt(adt, _) => Some(adt.variant_range()), - TyKind::Coroutine(def_id, args, _) => { + TyKind::Coroutine(def_id, args) => { Some(args.as_coroutine().variant_range(*def_id, tcx)) } _ => None, @@ -2677,7 +2666,7 @@ impl<'tcx> Ty<'tcx> { TyKind::Adt(adt, _) if adt.is_enum() => { Some(adt.discriminant_for_variant(tcx, variant_index)) } - TyKind::Coroutine(def_id, args, _) => { + TyKind::Coroutine(def_id, args) => { Some(args.as_coroutine().discriminant_for_variant(*def_id, tcx, variant_index)) } _ => None, @@ -2688,7 +2677,7 @@ impl<'tcx> Ty<'tcx> { pub fn discriminant_ty(self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> { match self.kind() { ty::Adt(adt, _) if adt.is_enum() => adt.repr().discr_type().to_ty(tcx), - ty::Coroutine(_, args, _) => args.as_coroutine().discr_ty(tcx), + ty::Coroutine(_, args) => args.as_coroutine().discr_ty(tcx), ty::Param(_) | ty::Alias(..) | ty::Infer(ty::TyVar(_)) => { let assoc_items = tcx.associated_item_def_ids( @@ -2983,7 +2972,7 @@ impl<'tcx> Ty<'tcx> { | FnPtr(_) | Dynamic(_, _, _) | Closure(_, _) - | Coroutine(_, _, _) + | Coroutine(_, _) | CoroutineWitness(..) | Never | Tuple(_) => true, diff --git a/compiler/rustc_middle/src/ty/walk.rs b/compiler/rustc_middle/src/ty/walk.rs index 20bdbcb5b7b..9050716db9d 100644 --- a/compiler/rustc_middle/src/ty/walk.rs +++ b/compiler/rustc_middle/src/ty/walk.rs @@ -189,7 +189,7 @@ fn push_inner<'tcx>(stack: &mut TypeWalkerStack<'tcx>, parent: GenericArg<'tcx>) } ty::Adt(_, args) | ty::Closure(_, args) - | ty::Coroutine(_, args, _) + | ty::Coroutine(_, args) | ty::CoroutineWitness(_, args) | ty::FnDef(_, args) => { stack.extend(args.iter().rev()); diff --git a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs index 04dcc6854c7..f799be165ec 100644 --- a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs +++ b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs @@ -387,8 +387,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { closure_id, args, ref upvars, - movability, ref fake_reads, + movability: _, }) => { // Convert the closure fake reads, if any, from `ExprRef` to mir `Place` // and push the fake reads. @@ -474,10 +474,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let result = match args { UpvarArgs::Coroutine(args) => { - // We implicitly set the discriminant to 0. See - // librustc_mir/transform/deaggregator.rs for details. - let movability = movability.unwrap(); - Box::new(AggregateKind::Coroutine(closure_id.to_def_id(), args, movability)) + Box::new(AggregateKind::Coroutine(closure_id.to_def_id(), args)) } UpvarArgs::Closure(args) => { Box::new(AggregateKind::Closure(closure_id.to_def_id(), args)) diff --git a/compiler/rustc_mir_build/src/build/mod.rs b/compiler/rustc_mir_build/src/build/mod.rs index a6336ec63b2..e0199fb8767 100644 --- a/compiler/rustc_mir_build/src/build/mod.rs +++ b/compiler/rustc_mir_build/src/build/mod.rs @@ -646,7 +646,7 @@ fn construct_error(tcx: TyCtxt<'_>, def_id: LocalDefId, guar: ErrorGuaranteed) - } DefKind::Closure if coroutine_kind.is_some() => { let coroutine_ty = tcx.type_of(def_id).instantiate_identity(); - let ty::Coroutine(_, args, _) = coroutine_ty.kind() else { + let ty::Coroutine(_, args) = coroutine_ty.kind() else { bug!("expected type of coroutine-like closure to be a coroutine") }; let args = args.as_coroutine(); @@ -813,7 +813,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let upvar_args = match closure_ty.kind() { ty::Closure(_, args) => ty::UpvarArgs::Closure(args), - ty::Coroutine(_, args, _) => ty::UpvarArgs::Coroutine(args), + ty::Coroutine(_, args) => ty::UpvarArgs::Coroutine(args), _ => return, }; diff --git a/compiler/rustc_mir_build/src/thir/cx/expr.rs b/compiler/rustc_mir_build/src/thir/cx/expr.rs index 01f4678fa09..8ec70c58c46 100644 --- a/compiler/rustc_mir_build/src/thir/cx/expr.rs +++ b/compiler/rustc_mir_build/src/thir/cx/expr.rs @@ -552,8 +552,8 @@ impl<'tcx> Cx<'tcx> { let closure_ty = self.typeck_results().expr_ty(expr); let (def_id, args, movability) = match *closure_ty.kind() { ty::Closure(def_id, args) => (def_id, UpvarArgs::Closure(args), None), - ty::Coroutine(def_id, args, movability) => { - (def_id, UpvarArgs::Coroutine(args), Some(movability)) + ty::Coroutine(def_id, args) => { + (def_id, UpvarArgs::Coroutine(args), Some(tcx.coroutine_movability(def_id))) } _ => { span_bug!(expr.span, "closure expr w/o closure type: {:?}", closure_ty); diff --git a/compiler/rustc_mir_dataflow/src/elaborate_drops.rs b/compiler/rustc_mir_dataflow/src/elaborate_drops.rs index 958fa0d17cd..c9930565186 100644 --- a/compiler/rustc_mir_dataflow/src/elaborate_drops.rs +++ b/compiler/rustc_mir_dataflow/src/elaborate_drops.rs @@ -862,7 +862,7 @@ where // This should only happen for the self argument on the resume function. // It effectively only contains upvars until the coroutine transformation runs. // See librustc_body/transform/coroutine.rs for more details. - ty::Coroutine(_, args, _) => self.open_drop_for_tuple(args.as_coroutine().upvar_tys()), + ty::Coroutine(_, args) => self.open_drop_for_tuple(args.as_coroutine().upvar_tys()), ty::Tuple(fields) => self.open_drop_for_tuple(fields), ty::Adt(def, args) => self.open_drop_for_adt(*def, args), ty::Dynamic(..) => self.complete_drop(self.succ, self.unwind), diff --git a/compiler/rustc_mir_dataflow/src/move_paths/builder.rs b/compiler/rustc_mir_dataflow/src/move_paths/builder.rs index ccf3dc7941f..cae35765308 100644 --- a/compiler/rustc_mir_dataflow/src/move_paths/builder.rs +++ b/compiler/rustc_mir_dataflow/src/move_paths/builder.rs @@ -155,7 +155,7 @@ impl<'b, 'a, 'tcx, F: Fn(Ty<'tcx>) -> bool> Gatherer<'b, 'a, 'tcx, F> { | ty::FnPtr(_) | ty::Dynamic(_, _, _) | ty::Closure(_, _) - | ty::Coroutine(_, _, _) + | ty::Coroutine(_, _) | ty::CoroutineWitness(..) | ty::Never | ty::Tuple(_) @@ -177,7 +177,7 @@ impl<'b, 'a, 'tcx, F: Fn(Ty<'tcx>) -> bool> Gatherer<'b, 'a, 'tcx, F> { union_path.get_or_insert(base); } } - ty::Closure(_, _) | ty::Coroutine(_, _, _) | ty::Tuple(_) => (), + ty::Closure(_, _) | ty::Coroutine(_, _) | ty::Tuple(_) => (), ty::Bool | ty::Char | ty::Int(_) diff --git a/compiler/rustc_mir_transform/src/check_unsafety.rs b/compiler/rustc_mir_transform/src/check_unsafety.rs index 23726e49f4d..d94d96c1115 100644 --- a/compiler/rustc_mir_transform/src/check_unsafety.rs +++ b/compiler/rustc_mir_transform/src/check_unsafety.rs @@ -128,7 +128,7 @@ impl<'tcx> Visitor<'tcx> for UnsafetyChecker<'_, 'tcx> { ), } } - &AggregateKind::Closure(def_id, _) | &AggregateKind::Coroutine(def_id, _, _) => { + &AggregateKind::Closure(def_id, _) | &AggregateKind::Coroutine(def_id, _) => { let def_id = def_id.expect_local(); let UnsafetyCheckResult { violations, used_unsafe_blocks, .. } = self.tcx.unsafety_check_result(def_id); diff --git a/compiler/rustc_mir_transform/src/const_prop.rs b/compiler/rustc_mir_transform/src/const_prop.rs index e66d5e0a9f9..c5824c30770 100644 --- a/compiler/rustc_mir_transform/src/const_prop.rs +++ b/compiler/rustc_mir_transform/src/const_prop.rs @@ -1,29 +1,22 @@ //! Propagates constants for early reporting of statically known //! assertion failures -use either::Right; -use rustc_const_eval::ReportErrorExt; +use rustc_const_eval::interpret::{ + self, compile_time_machine, AllocId, ConstAllocation, FnArg, Frame, ImmTy, InterpCx, + InterpResult, OpTy, PlaceTy, Pointer, +}; use rustc_data_structures::fx::FxHashSet; -use rustc_hir::def::DefKind; use rustc_index::bit_set::BitSet; -use rustc_index::{IndexSlice, IndexVec}; -use rustc_middle::mir::visit::{ - MutVisitor, MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor, -}; +use rustc_index::IndexVec; +use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor}; use rustc_middle::mir::*; use rustc_middle::query::TyCtxtAt; -use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout}; -use rustc_middle::ty::{self, GenericArgs, Instance, ParamEnv, Ty, TyCtxt, TypeVisitableExt}; -use rustc_span::{def_id::DefId, Span}; -use rustc_target::abi::{self, HasDataLayout, Size, TargetDataLayout}; +use rustc_middle::ty::layout::TyAndLayout; +use rustc_middle::ty::{self, ParamEnv, TyCtxt}; +use rustc_span::def_id::DefId; +use rustc_target::abi::Size; use rustc_target::spec::abi::Abi as CallAbi; -use crate::dataflow_const_prop::Patch; -use rustc_const_eval::interpret::{ - self, compile_time_machine, AllocId, ConstAllocation, FnArg, Frame, ImmTy, Immediate, InterpCx, - InterpResult, MemoryKind, OpTy, PlaceTy, Pointer, Scalar, StackPopCleanup, -}; - /// The maximum number of bytes that we'll allocate space for a local or the return value. /// Needed for #66397, because otherwise we eval into large places and that can cause OOM or just /// Severely regress performance. @@ -56,62 +49,7 @@ pub(crate) macro throw_machine_stop_str($($tt:tt)*) {{ throw_machine_stop!(Zst) }} -pub struct ConstProp; - -impl<'tcx> MirPass<'tcx> for ConstProp { - fn is_enabled(&self, sess: &rustc_session::Session) -> bool { - sess.mir_opt_level() >= 2 - } - - #[instrument(skip(self, tcx), level = "debug")] - fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - // will be evaluated by miri and produce its errors there - if body.source.promoted.is_some() { - return; - } - - let def_id = body.source.def_id().expect_local(); - let def_kind = tcx.def_kind(def_id); - let is_fn_like = def_kind.is_fn_like(); - let is_assoc_const = def_kind == DefKind::AssocConst; - - // Only run const prop on functions, methods, closures and associated constants - if !is_fn_like && !is_assoc_const { - // skip anon_const/statics/consts because they'll be evaluated by miri anyway - trace!("ConstProp skipped for {:?}", def_id); - return; - } - - // FIXME(welseywiser) const prop doesn't work on coroutines because of query cycles - // computing their layout. - if tcx.is_coroutine(def_id.to_def_id()) { - trace!("ConstProp skipped for coroutine {:?}", def_id); - return; - } - - trace!("ConstProp starting for {:?}", def_id); - - // FIXME(oli-obk, eddyb) Optimize locals (or even local paths) to hold - // constants, instead of just checking for const-folding succeeding. - // That would require a uniform one-def no-mutation analysis - // and RPO (or recursing when needing the value of a local). - let mut optimization_finder = ConstPropagator::new(body, tcx); - - // Traverse the body in reverse post-order, to ensure that `FullConstProp` locals are - // assigned before being read. - for &bb in body.basic_blocks.reverse_postorder() { - let data = &body.basic_blocks[bb]; - optimization_finder.visit_basic_block_data(bb, data); - } - - let mut patch = optimization_finder.patch; - patch.visit_body_preserves_cfg(body); - - trace!("ConstProp done for {:?}", def_id); - } -} - -pub struct ConstPropMachine<'mir, 'tcx> { +pub(crate) struct ConstPropMachine<'mir, 'tcx> { /// The virtual call stack. stack: Vec<Frame<'mir, 'tcx>>, pub written_only_inside_own_block_locals: FxHashSet<Local>, @@ -267,297 +205,6 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx> } } -/// Finds optimization opportunities on the MIR. -struct ConstPropagator<'mir, 'tcx> { - ecx: InterpCx<'mir, 'tcx, ConstPropMachine<'mir, 'tcx>>, - tcx: TyCtxt<'tcx>, - param_env: ParamEnv<'tcx>, - local_decls: &'mir IndexSlice<Local, LocalDecl<'tcx>>, - patch: Patch<'tcx>, -} - -impl<'tcx> LayoutOfHelpers<'tcx> for ConstPropagator<'_, 'tcx> { - type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>; - - #[inline] - fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> { - err - } -} - -impl HasDataLayout for ConstPropagator<'_, '_> { - #[inline] - fn data_layout(&self) -> &TargetDataLayout { - &self.tcx.data_layout - } -} - -impl<'tcx> ty::layout::HasTyCtxt<'tcx> for ConstPropagator<'_, 'tcx> { - #[inline] - fn tcx(&self) -> TyCtxt<'tcx> { - self.tcx - } -} - -impl<'tcx> ty::layout::HasParamEnv<'tcx> for ConstPropagator<'_, 'tcx> { - #[inline] - fn param_env(&self) -> ty::ParamEnv<'tcx> { - self.param_env - } -} - -impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { - fn new(body: &'mir Body<'tcx>, tcx: TyCtxt<'tcx>) -> ConstPropagator<'mir, 'tcx> { - let def_id = body.source.def_id(); - let args = &GenericArgs::identity_for_item(tcx, def_id); - let param_env = tcx.param_env_reveal_all_normalized(def_id); - - let can_const_prop = CanConstProp::check(tcx, param_env, body); - let mut ecx = InterpCx::new( - tcx, - tcx.def_span(def_id), - param_env, - ConstPropMachine::new(can_const_prop), - ); - - let ret_layout = ecx - .layout_of(body.bound_return_ty().instantiate(tcx, args)) - .ok() - // Don't bother allocating memory for large values. - // I don't know how return types can seem to be unsized but this happens in the - // `type/type-unsatisfiable.rs` test. - .filter(|ret_layout| { - ret_layout.is_sized() && ret_layout.size < Size::from_bytes(MAX_ALLOC_LIMIT) - }) - .unwrap_or_else(|| ecx.layout_of(tcx.types.unit).unwrap()); - - let ret = ecx - .allocate(ret_layout, MemoryKind::Stack) - .expect("couldn't perform small allocation") - .into(); - - ecx.push_stack_frame( - Instance::new(def_id, args), - body, - &ret, - StackPopCleanup::Root { cleanup: false }, - ) - .expect("failed to push initial stack frame"); - - for local in body.local_decls.indices() { - // Mark everything initially live. - // This is somewhat dicey since some of them might be unsized and it is incoherent to - // mark those as live... We rely on `local_to_place`/`local_to_op` in the interpreter - // stopping us before those unsized immediates can cause issues deeper in the - // interpreter. - ecx.frame_mut().locals[local].make_live_uninit(); - } - - let patch = Patch::new(tcx); - ConstPropagator { ecx, tcx, param_env, local_decls: &body.local_decls, patch } - } - - fn get_const(&self, place: Place<'tcx>) -> Option<OpTy<'tcx>> { - let op = match self.ecx.eval_place_to_op(place, None) { - Ok(op) => { - if op - .as_mplace_or_imm() - .right() - .is_some_and(|imm| matches!(*imm, Immediate::Uninit)) - { - // Make sure nobody accidentally uses this value. - return None; - } - op - } - Err(e) => { - trace!("get_const failed: {:?}", e.into_kind().debug()); - return None; - } - }; - - // Try to read the local as an immediate so that if it is representable as a scalar, we can - // handle it as such, but otherwise, just return the value as is. - Some(match self.ecx.read_immediate_raw(&op) { - Ok(Right(imm)) => imm.into(), - _ => op, - }) - } - - /// Remove `local` from the pool of `Locals`. Allows writing to them, - /// but not reading from them anymore. - fn remove_const(ecx: &mut InterpCx<'mir, 'tcx, ConstPropMachine<'mir, 'tcx>>, local: Local) { - ecx.frame_mut().locals[local].make_live_uninit(); - ecx.machine.written_only_inside_own_block_locals.remove(&local); - } - - fn check_rvalue(&mut self, rvalue: &Rvalue<'tcx>) -> Option<()> { - // Perform any special handling for specific Rvalue types. - // Generally, checks here fall into one of two categories: - // 1. Additional checking to provide useful lints to the user - // - In this case, we will do some validation and then fall through to the - // end of the function which evals the assignment. - // 2. Working around bugs in other parts of the compiler - // - In this case, we'll return `None` from this function to stop evaluation. - match rvalue { - // Do not try creating references (#67862) - Rvalue::AddressOf(_, place) | Rvalue::Ref(_, _, place) => { - trace!("skipping AddressOf | Ref for {:?}", place); - - // This may be creating mutable references or immutable references to cells. - // If that happens, the pointed to value could be mutated via that reference. - // Since we aren't tracking references, the const propagator loses track of what - // value the local has right now. - // Thus, all locals that have their reference taken - // must not take part in propagation. - Self::remove_const(&mut self.ecx, place.local); - - return None; - } - Rvalue::ThreadLocalRef(def_id) => { - trace!("skipping ThreadLocalRef({:?})", def_id); - - return None; - } - // There's no other checking to do at this time. - Rvalue::Aggregate(..) - | Rvalue::Use(..) - | Rvalue::CopyForDeref(..) - | Rvalue::Repeat(..) - | Rvalue::Len(..) - | Rvalue::Cast(..) - | Rvalue::ShallowInitBox(..) - | Rvalue::Discriminant(..) - | Rvalue::NullaryOp(..) - | Rvalue::UnaryOp(..) - | Rvalue::BinaryOp(..) - | Rvalue::CheckedBinaryOp(..) => {} - } - - // FIXME we need to revisit this for #67176 - if rvalue.has_param() { - trace!("skipping, has param"); - return None; - } - if !rvalue - .ty(&self.ecx.frame().body.local_decls, *self.ecx.tcx) - .is_sized(*self.ecx.tcx, self.param_env) - { - // the interpreter doesn't support unsized locals (only unsized arguments), - // but rustc does (in a kinda broken way), so we have to skip them here - return None; - } - - Some(()) - } - - // Attempt to use algebraic identities to eliminate constant expressions - fn eval_rvalue_with_identities( - &mut self, - rvalue: &Rvalue<'tcx>, - place: Place<'tcx>, - ) -> Option<()> { - match rvalue { - Rvalue::BinaryOp(op, box (left, right)) - | Rvalue::CheckedBinaryOp(op, box (left, right)) => { - let l = self.ecx.eval_operand(left, None).and_then(|x| self.ecx.read_immediate(&x)); - let r = - self.ecx.eval_operand(right, None).and_then(|x| self.ecx.read_immediate(&x)); - - let const_arg = match (l, r) { - (Ok(x), Err(_)) | (Err(_), Ok(x)) => x, // exactly one side is known - (Err(_), Err(_)) => return None, // neither side is known - (Ok(_), Ok(_)) => return self.ecx.eval_rvalue_into_place(rvalue, place).ok(), // both sides are known - }; - - if !matches!(const_arg.layout.abi, abi::Abi::Scalar(..)) { - // We cannot handle Scalar Pair stuff. - // No point in calling `eval_rvalue_into_place`, since only one side is known - return None; - } - - let arg_value = const_arg.to_scalar().to_bits(const_arg.layout.size).ok()?; - let dest = self.ecx.eval_place(place).ok()?; - - match op { - BinOp::BitAnd if arg_value == 0 => { - self.ecx.write_immediate(*const_arg, &dest).ok() - } - BinOp::BitOr - if arg_value == const_arg.layout.size.truncate(u128::MAX) - || (const_arg.layout.ty.is_bool() && arg_value == 1) => - { - self.ecx.write_immediate(*const_arg, &dest).ok() - } - BinOp::Mul if const_arg.layout.ty.is_integral() && arg_value == 0 => { - if let Rvalue::CheckedBinaryOp(_, _) = rvalue { - let val = Immediate::ScalarPair( - const_arg.to_scalar(), - Scalar::from_bool(false), - ); - self.ecx.write_immediate(val, &dest).ok() - } else { - self.ecx.write_immediate(*const_arg, &dest).ok() - } - } - _ => None, - } - } - _ => self.ecx.eval_rvalue_into_place(rvalue, place).ok(), - } - } - - fn replace_with_const(&mut self, place: Place<'tcx>) -> Option<Const<'tcx>> { - // This will return None if the above `const_prop` invocation only "wrote" a - // type whose creation requires no write. E.g. a coroutine whose initial state - // consists solely of uninitialized memory (so it doesn't capture any locals). - let value = self.get_const(place)?; - if !self.tcx.consider_optimizing(|| format!("ConstantPropagation - {value:?}")) { - return None; - } - trace!("replacing {:?} with {:?}", place, value); - - // FIXME: figure out what to do when read_immediate_raw fails - let imm = self.ecx.read_immediate_raw(&value).ok()?; - - let Right(imm) = imm else { return None }; - match *imm { - Immediate::Scalar(scalar) if scalar.try_to_int().is_ok() => { - Some(Const::from_scalar(self.tcx, scalar, value.layout.ty)) - } - Immediate::ScalarPair(l, r) if l.try_to_int().is_ok() && r.try_to_int().is_ok() => { - let alloc_id = self - .ecx - .intern_with_temp_alloc(value.layout, |ecx, dest| { - ecx.write_immediate(*imm, dest) - }) - .ok()?; - - Some(Const::Val( - ConstValue::Indirect { alloc_id, offset: Size::ZERO }, - value.layout.ty, - )) - } - // Scalars or scalar pairs that contain undef values are assumed to not have - // successfully evaluated and are thus not propagated. - _ => None, - } - } - - fn ensure_not_propagated(&self, local: Local) { - if cfg!(debug_assertions) { - assert!( - self.get_const(local.into()).is_none() - || self - .layout_of(self.local_decls[local].ty) - .map_or(true, |layout| layout.is_zst()), - "failed to remove values for `{local:?}`, value={:?}", - self.get_const(local.into()), - ) - } - } -} - /// The mode that `ConstProp` is allowed to run in for a given `Local`. #[derive(Clone, Copy, Debug, PartialEq)] pub enum ConstPropMode { @@ -677,154 +324,3 @@ impl<'tcx> Visitor<'tcx> for CanConstProp { } } } - -impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> { - fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) { - self.super_operand(operand, location); - if let Some(place) = operand.place() - && let Some(value) = self.replace_with_const(place) - { - self.patch.before_effect.insert((location, place), value); - } - } - - fn visit_projection_elem( - &mut self, - _: PlaceRef<'tcx>, - elem: PlaceElem<'tcx>, - _: PlaceContext, - location: Location, - ) { - if let PlaceElem::Index(local) = elem - && let Some(value) = self.replace_with_const(local.into()) - { - self.patch.before_effect.insert((location, local.into()), value); - } - } - - fn visit_assign(&mut self, place: &Place<'tcx>, rvalue: &Rvalue<'tcx>, location: Location) { - self.super_assign(place, rvalue, location); - - let Some(()) = self.check_rvalue(rvalue) else { - trace!("rvalue check failed, removing const"); - Self::remove_const(&mut self.ecx, place.local); - return; - }; - - match self.ecx.machine.can_const_prop[place.local] { - // Do nothing if the place is indirect. - _ if place.is_indirect() => {} - ConstPropMode::NoPropagation => self.ensure_not_propagated(place.local), - ConstPropMode::OnlyInsideOwnBlock | ConstPropMode::FullConstProp => { - if let Some(()) = self.eval_rvalue_with_identities(rvalue, *place) { - // If this was already an evaluated constant, keep it. - if let Rvalue::Use(Operand::Constant(c)) = rvalue - && let Const::Val(..) = c.const_ - { - trace!( - "skipping replace of Rvalue::Use({:?} because it is already a const", - c - ); - } else if let Some(operand) = self.replace_with_const(*place) { - self.patch.assignments.insert(location, operand); - } - } else { - // Const prop failed, so erase the destination, ensuring that whatever happens - // from here on, does not know about the previous value. - // This is important in case we have - // ```rust - // let mut x = 42; - // x = SOME_MUTABLE_STATIC; - // // x must now be uninit - // ``` - // FIXME: we overzealously erase the entire local, because that's easier to - // implement. - trace!( - "propagation into {:?} failed. - Nuking the entire site from orbit, it's the only way to be sure", - place, - ); - Self::remove_const(&mut self.ecx, place.local); - } - } - } - } - - fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) { - trace!("visit_statement: {:?}", statement); - - // We want to evaluate operands before any change to the assigned-to value, - // so we recurse first. - self.super_statement(statement, location); - - match statement.kind { - StatementKind::SetDiscriminant { ref place, .. } => { - match self.ecx.machine.can_const_prop[place.local] { - // Do nothing if the place is indirect. - _ if place.is_indirect() => {} - ConstPropMode::NoPropagation => self.ensure_not_propagated(place.local), - ConstPropMode::FullConstProp | ConstPropMode::OnlyInsideOwnBlock => { - if self.ecx.statement(statement).is_ok() { - trace!("propped discriminant into {:?}", place); - } else { - Self::remove_const(&mut self.ecx, place.local); - } - } - } - } - StatementKind::StorageLive(local) => { - Self::remove_const(&mut self.ecx, local); - } - // We do not need to mark dead locals as such. For `FullConstProp` locals, - // this allows to propagate the single assigned value in this case: - // ``` - // let x = SOME_CONST; - // if a { - // f(copy x); - // StorageDead(x); - // } else { - // g(copy x); - // StorageDead(x); - // } - // ``` - // - // This may propagate a constant where the local would be uninit or dead. - // In both cases, this does not matter, as those reads would be UB anyway. - _ => {} - } - } - - fn visit_basic_block_data(&mut self, block: BasicBlock, data: &BasicBlockData<'tcx>) { - self.super_basic_block_data(block, data); - - // We remove all Locals which are restricted in propagation to their containing blocks and - // which were modified in the current block. - // Take it out of the ecx so we can get a mutable reference to the ecx for `remove_const`. - let mut written_only_inside_own_block_locals = - std::mem::take(&mut self.ecx.machine.written_only_inside_own_block_locals); - - // This loop can get very hot for some bodies: it check each local in each bb. - // To avoid this quadratic behaviour, we only clear the locals that were modified inside - // the current block. - for local in written_only_inside_own_block_locals.drain() { - debug_assert_eq!( - self.ecx.machine.can_const_prop[local], - ConstPropMode::OnlyInsideOwnBlock - ); - Self::remove_const(&mut self.ecx, local); - } - self.ecx.machine.written_only_inside_own_block_locals = - written_only_inside_own_block_locals; - - if cfg!(debug_assertions) { - for (local, &mode) in self.ecx.machine.can_const_prop.iter_enumerated() { - match mode { - ConstPropMode::FullConstProp => {} - ConstPropMode::NoPropagation | ConstPropMode::OnlyInsideOwnBlock => { - self.ensure_not_propagated(local); - } - } - } - } - } -} diff --git a/compiler/rustc_mir_transform/src/coroutine.rs b/compiler/rustc_mir_transform/src/coroutine.rs index 05d8d842b58..ce1a36cf670 100644 --- a/compiler/rustc_mir_transform/src/coroutine.rs +++ b/compiler/rustc_mir_transform/src/coroutine.rs @@ -1417,20 +1417,18 @@ fn create_coroutine_resume_function<'tcx>( cases.insert(0, (UNRESUMED, START_BLOCK)); // Panic when resumed on the returned or poisoned state - let coroutine_kind = body.coroutine_kind().unwrap(); - if can_unwind { cases.insert( 1, - (POISONED, insert_panic_block(tcx, body, ResumedAfterPanic(coroutine_kind))), + (POISONED, insert_panic_block(tcx, body, ResumedAfterPanic(transform.coroutine_kind))), ); } if can_return { - let block = match coroutine_kind { + let block = match transform.coroutine_kind { CoroutineKind::Desugared(CoroutineDesugaring::Async, _) | CoroutineKind::Coroutine(_) => { - insert_panic_block(tcx, body, ResumedAfterReturn(coroutine_kind)) + insert_panic_block(tcx, body, ResumedAfterReturn(transform.coroutine_kind)) } CoroutineKind::Desugared(CoroutineDesugaring::AsyncGen, _) | CoroutineKind::Desugared(CoroutineDesugaring::Gen, _) => { @@ -1444,7 +1442,7 @@ fn create_coroutine_resume_function<'tcx>( make_coroutine_state_argument_indirect(tcx, body); - match coroutine_kind { + match transform.coroutine_kind { // Iterator::next doesn't accept a pinned argument, // unlike for all other coroutine kinds. CoroutineKind::Desugared(CoroutineDesugaring::Gen, _) => {} @@ -1565,7 +1563,7 @@ pub(crate) fn mir_coroutine_witnesses<'tcx>( let coroutine_ty = body.local_decls[ty::CAPTURE_STRUCT_LOCAL].ty; let movable = match *coroutine_ty.kind() { - ty::Coroutine(_, _, movability) => movability == hir::Movability::Movable, + ty::Coroutine(def_id, _) => tcx.coroutine_movability(def_id) == hir::Movability::Movable, ty::Error(_) => return None, _ => span_bug!(body.span, "unexpected coroutine type {}", coroutine_ty), }; @@ -1597,12 +1595,13 @@ impl<'tcx> MirPass<'tcx> for StateTransform { // The first argument is the coroutine type passed by value let coroutine_ty = body.local_decls.raw[1].ty; + let coroutine_kind = body.coroutine_kind().unwrap(); // Get the discriminant type and args which typeck computed let (discr_ty, movable) = match *coroutine_ty.kind() { - ty::Coroutine(_, args, movability) => { + ty::Coroutine(_, args) => { let args = args.as_coroutine(); - (args.discr_ty(tcx), movability == hir::Movability::Movable) + (args.discr_ty(tcx), coroutine_kind.movability() == hir::Movability::Movable) } _ => { tcx.dcx().span_delayed_bug( @@ -1613,19 +1612,7 @@ impl<'tcx> MirPass<'tcx> for StateTransform { } }; - let is_async_kind = matches!( - body.coroutine_kind(), - Some(CoroutineKind::Desugared(CoroutineDesugaring::Async, _)) - ); - let is_async_gen_kind = matches!( - body.coroutine_kind(), - Some(CoroutineKind::Desugared(CoroutineDesugaring::AsyncGen, _)) - ); - let is_gen_kind = matches!( - body.coroutine_kind(), - Some(CoroutineKind::Desugared(CoroutineDesugaring::Gen, _)) - ); - let new_ret_ty = match body.coroutine_kind().unwrap() { + let new_ret_ty = match coroutine_kind { CoroutineKind::Desugared(CoroutineDesugaring::Async, _) => { // Compute Poll<return_ty> let poll_did = tcx.require_lang_item(LangItem::Poll, None); @@ -1658,7 +1645,10 @@ impl<'tcx> MirPass<'tcx> for StateTransform { let old_ret_local = replace_local(RETURN_PLACE, new_ret_ty, body, tcx); // Replace all occurrences of `ResumeTy` with `&mut Context<'_>` within async bodies. - if is_async_kind || is_async_gen_kind { + if matches!( + coroutine_kind, + CoroutineKind::Desugared(CoroutineDesugaring::Async | CoroutineDesugaring::AsyncGen, _) + ) { transform_async_context(tcx, body); } @@ -1667,11 +1657,7 @@ impl<'tcx> MirPass<'tcx> for StateTransform { // case there is no `Assign` to it that the transform can turn into a store to the coroutine // state. After the yield the slot in the coroutine state would then be uninitialized. let resume_local = Local::new(2); - let resume_ty = if is_async_kind { - Ty::new_task_context(tcx) - } else { - body.local_decls[resume_local].ty - }; + let resume_ty = body.local_decls[resume_local].ty; let old_resume_local = replace_local(resume_local, resume_ty, body, tcx); // When first entering the coroutine, move the resume argument into its old local @@ -1714,11 +1700,11 @@ impl<'tcx> MirPass<'tcx> for StateTransform { // Run the transformation which converts Places from Local to coroutine struct // accesses for locals in `remap`. // It also rewrites `return x` and `yield y` as writing a new coroutine state and returning - // either CoroutineState::Complete(x) and CoroutineState::Yielded(y), - // or Poll::Ready(x) and Poll::Pending respectively depending on `is_async_kind`. + // either `CoroutineState::Complete(x)` and `CoroutineState::Yielded(y)`, + // or `Poll::Ready(x)` and `Poll::Pending` respectively depending on the coroutine kind. let mut transform = TransformVisitor { tcx, - coroutine_kind: body.coroutine_kind().unwrap(), + coroutine_kind, remap, storage_liveness, always_live_locals, @@ -1735,7 +1721,7 @@ impl<'tcx> MirPass<'tcx> for StateTransform { body.spread_arg = None; // Remove the context argument within generator bodies. - if is_gen_kind { + if matches!(coroutine_kind, CoroutineKind::Desugared(CoroutineDesugaring::Gen, _)) { transform_gen_context(tcx, body); } diff --git a/compiler/rustc_mir_transform/src/coverage/counters.rs b/compiler/rustc_mir_transform/src/coverage/counters.rs index 604589e5b96..d995d562521 100644 --- a/compiler/rustc_mir_transform/src/coverage/counters.rs +++ b/compiler/rustc_mir_transform/src/coverage/counters.rs @@ -1,4 +1,4 @@ -use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::fx::FxIndexMap; use rustc_data_structures::graph::WithNumNodes; use rustc_index::bit_set::BitSet; use rustc_index::IndexVec; @@ -47,7 +47,10 @@ pub(super) struct CoverageCounters { bcb_counters: IndexVec<BasicCoverageBlock, Option<BcbCounter>>, /// Coverage counters/expressions that are associated with the control-flow /// edge between two BCBs. - bcb_edge_counters: FxHashMap<(BasicCoverageBlock, BasicCoverageBlock), BcbCounter>, + /// + /// The iteration order of this map can affect the precise contents of MIR, + /// so we use `FxIndexMap` to avoid query stability hazards. + bcb_edge_counters: FxIndexMap<(BasicCoverageBlock, BasicCoverageBlock), BcbCounter>, /// Tracks which BCBs have a counter associated with some incoming edge. /// Only used by assertions, to verify that BCBs with incoming edge /// counters do not have their own physical counters (expressions are allowed). @@ -64,7 +67,7 @@ impl CoverageCounters { Self { next_counter_id: CounterId::START, bcb_counters: IndexVec::from_elem_n(None, num_bcbs), - bcb_edge_counters: FxHashMap::default(), + bcb_edge_counters: FxIndexMap::default(), bcb_has_incoming_edge_counters: BitSet::new_empty(num_bcbs), expressions: IndexVec::new(), } diff --git a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs index 474cb205d13..ff6545e9d25 100644 --- a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs +++ b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs @@ -91,7 +91,7 @@ fn bcb_to_initial_coverage_spans<'a, 'tcx>( fn is_closure(statement: &Statement<'_>) -> bool { match statement.kind { StatementKind::Assign(box (_, Rvalue::Aggregate(box ref agg_kind, _))) => match agg_kind { - AggregateKind::Closure(_, _) | AggregateKind::Coroutine(_, _, _) => true, + AggregateKind::Closure(_, _) | AggregateKind::Coroutine(_, _) => true, _ => false, }, _ => false, diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs index 3b8adf7e86b..2551c8aca88 100644 --- a/compiler/rustc_mir_transform/src/gvn.rs +++ b/compiler/rustc_mir_transform/src/gvn.rs @@ -109,7 +109,7 @@ pub struct GVN; impl<'tcx> MirPass<'tcx> for GVN { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { - sess.mir_opt_level() >= 4 + sess.mir_opt_level() >= 2 } #[instrument(level = "trace", skip(self, tcx, body))] @@ -850,7 +850,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { assert!(!fields.is_empty()); (AggregateTy::Tuple, FIRST_VARIANT) } - AggregateKind::Closure(did, substs) | AggregateKind::Coroutine(did, substs, _) => { + AggregateKind::Closure(did, substs) | AggregateKind::Coroutine(did, substs) => { (AggregateTy::Def(did, substs), FIRST_VARIANT) } AggregateKind::Adt(did, variant_index, substs, _, None) => { diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs index 9d03bab4844..5562ae7f3bd 100644 --- a/compiler/rustc_mir_transform/src/lib.rs +++ b/compiler/rustc_mir_transform/src/lib.rs @@ -588,7 +588,6 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { // destroy the SSA property. It should still happen before const-propagation, so the // latter pass will leverage the created opportunities. &separate_const_switch::SeparateConstSwitch, - &const_prop::ConstProp, &gvn::GVN, &simplify::SimplifyLocals::AfterGVN, &dataflow_const_prop::DataflowConstProp, diff --git a/compiler/rustc_mir_transform/src/shim.rs b/compiler/rustc_mir_transform/src/shim.rs index fba73d5195b..f6b820bfcd0 100644 --- a/compiler/rustc_mir_transform/src/shim.rs +++ b/compiler/rustc_mir_transform/src/shim.rs @@ -69,7 +69,7 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> Body<' ty::InstanceDef::DropGlue(def_id, ty) => { // FIXME(#91576): Drop shims for coroutines aren't subject to the MIR passes at the end // of this function. Is this intentional? - if let Some(ty::Coroutine(coroutine_def_id, args, _)) = ty.map(Ty::kind) { + if let Some(ty::Coroutine(coroutine_def_id, args)) = ty.map(Ty::kind) { let body = tcx.optimized_mir(*coroutine_def_id).coroutine_drop().unwrap(); let mut body = EarlyBinder::bind(body.clone()).instantiate(tcx, args); debug!("make_shim({:?}) = {:?}", instance, body); @@ -394,7 +394,8 @@ fn build_clone_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'tcx>) - _ if is_copy => builder.copy_shim(), ty::Closure(_, args) => builder.tuple_like_shim(dest, src, args.as_closure().upvar_tys()), ty::Tuple(..) => builder.tuple_like_shim(dest, src, self_ty.tuple_fields()), - ty::Coroutine(coroutine_def_id, args, hir::Movability::Movable) => { + ty::Coroutine(coroutine_def_id, args) => { + assert_eq!(tcx.coroutine_movability(*coroutine_def_id), hir::Movability::Movable); builder.coroutine_shim(dest, src, *coroutine_def_id, args.as_coroutine()) } _ => bug!("clone shim for `{:?}` which is not `Copy` and is not an aggregate", self_ty), diff --git a/compiler/rustc_next_trait_solver/src/canonicalizer.rs b/compiler/rustc_next_trait_solver/src/canonicalizer.rs index ac2e8960b06..db1aee11903 100644 --- a/compiler/rustc_next_trait_solver/src/canonicalizer.rs +++ b/compiler/rustc_next_trait_solver/src/canonicalizer.rs @@ -333,7 +333,7 @@ impl<Infcx: InferCtxtLike<Interner = I>, I: Interner> TypeFolder<I> | ty::FnPtr(_) | ty::Dynamic(_, _, _) | ty::Closure(_, _) - | ty::Coroutine(_, _, _) + | ty::Coroutine(_, _) | ty::CoroutineWitness(..) | ty::Never | ty::Tuple(_) diff --git a/compiler/rustc_parse/src/parser/diagnostics.rs b/compiler/rustc_parse/src/parser/diagnostics.rs index b4493df57e2..77bca2f138a 100644 --- a/compiler/rustc_parse/src/parser/diagnostics.rs +++ b/compiler/rustc_parse/src/parser/diagnostics.rs @@ -450,37 +450,39 @@ impl<'a> Parser<'a> { let mut expected = edible .iter() - .map(|x| TokenType::Token(x.clone())) - .chain(inedible.iter().map(|x| TokenType::Token(x.clone()))) + .chain(inedible) + .cloned() + .map(TokenType::Token) .chain(self.expected_tokens.iter().cloned()) - .filter_map(|token| { - // filter out suggestions which suggest the same token which was found and deemed incorrect + .filter(|token| { + // Filter out suggestions that suggest the same token which was found and deemed incorrect. fn is_ident_eq_keyword(found: &TokenKind, expected: &TokenType) -> bool { - if let TokenKind::Ident(current_sym, _) = found { - if let TokenType::Keyword(suggested_sym) = expected { - return current_sym == suggested_sym; - } + if let TokenKind::Ident(current_sym, _) = found + && let TokenType::Keyword(suggested_sym) = expected + { + return current_sym == suggested_sym; } false } - if token != parser::TokenType::Token(self.token.kind.clone()) { + + if *token != parser::TokenType::Token(self.token.kind.clone()) { let eq = is_ident_eq_keyword(&self.token.kind, &token); - // if the suggestion is a keyword and the found token is an ident, + // If the suggestion is a keyword and the found token is an ident, // the content of which are equal to the suggestion's content, - // we can remove that suggestion (see the return None statement below) + // we can remove that suggestion (see the `return false` below). - // if this isn't the case however, and the suggestion is a token the - // content of which is the same as the found token's, we remove it as well + // If this isn't the case however, and the suggestion is a token the + // content of which is the same as the found token's, we remove it as well. if !eq { if let TokenType::Token(kind) = &token { if kind == &self.token.kind { - return None; + return false; } } - return Some(token); + return true; } } - return None; + false }) .collect::<Vec<_>>(); expected.sort_by_cached_key(|x| x.to_string()); @@ -488,10 +490,10 @@ impl<'a> Parser<'a> { let sm = self.sess.source_map(); - // Special-case "expected `;`" errors + // Special-case "expected `;`" errors. if expected.contains(&TokenType::Token(token::Semi)) { // If the user is trying to write a ternary expression, recover it and - // return an Err to prevent a cascade of irrelevant diagnostics + // return an Err to prevent a cascade of irrelevant diagnostics. if self.prev_token == token::Question && let Err(e) = self.maybe_recover_from_ternary_operator() { diff --git a/compiler/rustc_parse/src/parser/expr.rs b/compiler/rustc_parse/src/parser/expr.rs index 73bd19f34c1..bf6151b64d3 100644 --- a/compiler/rustc_parse/src/parser/expr.rs +++ b/compiler/rustc_parse/src/parser/expr.rs @@ -2937,7 +2937,13 @@ impl<'a> Parser<'a> { let is_almost_fat_arrow = TokenKind::FatArrow .similar_tokens() .is_some_and(|similar_tokens| similar_tokens.contains(&this.token.kind)); - let mut result = if !is_fat_arrow && !is_almost_fat_arrow { + + // this avoids the compiler saying that a `,` or `}` was expected even though + // the pattern isn't a never pattern (and thus an arm body is required) + let armless = (!is_fat_arrow && !is_almost_fat_arrow && pat.could_be_never_pattern()) + || matches!(this.token.kind, token::Comma | token::CloseDelim(Delimiter::Brace)); + + let mut result = if armless { // A pattern without a body, allowed for never patterns. arm_body = None; this.expect_one_of(&[token::Comma], &[token::CloseDelim(Delimiter::Brace)]).map( diff --git a/compiler/rustc_parse/src/parser/mod.rs b/compiler/rustc_parse/src/parser/mod.rs index 1598fd19f6d..19226f37abe 100644 --- a/compiler/rustc_parse/src/parser/mod.rs +++ b/compiler/rustc_parse/src/parser/mod.rs @@ -320,9 +320,15 @@ impl TokenType { } } +/// Used by [`Parser::expect_any_with_type`]. #[derive(Copy, Clone, Debug)] enum TokenExpectType { + /// Unencountered tokens are inserted into [`Parser::expected_tokens`]. + /// See [`Parser::check`]. Expect, + + /// Unencountered tokens are not inserted into [`Parser::expected_tokens`]. + /// See [`Parser::check_noexpect`]. NoExpect, } @@ -504,18 +510,10 @@ impl<'a> Parser<'a> { } fn ident_or_err(&mut self, recover: bool) -> PResult<'a, (Ident, /* is_raw */ bool)> { - let result = self.token.ident().ok_or_else(|| self.expected_ident_found(recover)); - - let (ident, is_raw) = match result { - Ok(ident) => ident, - Err(err) => match err { - // we recovered! - Ok(ident) => ident, - Err(err) => return Err(err), - }, - }; - - Ok((ident, is_raw)) + match self.token.ident() { + Some(ident) => Ok(ident), + None => self.expected_ident_found(recover), + } } /// Checks if the next token is `tok`, and returns `true` if so. @@ -766,13 +764,17 @@ impl<'a> Parser<'a> { } } + /// Checks if the next token is contained within `kets`, and returns `true` if so. fn expect_any_with_type(&mut self, kets: &[&TokenKind], expect: TokenExpectType) -> bool { kets.iter().any(|k| match expect { TokenExpectType::Expect => self.check(k), - TokenExpectType::NoExpect => self.token == **k, + TokenExpectType::NoExpect => self.check_noexpect(k), }) } + /// Parses a sequence until the specified delimiters. The function + /// `f` must consume tokens until reaching the next separator or + /// closing bracket. fn parse_seq_to_before_tokens<T>( &mut self, kets: &[&TokenKind], @@ -791,13 +793,15 @@ impl<'a> Parser<'a> { } if let Some(t) = &sep.sep { if first { + // no separator for the first element first = false; } else { + // check for separator match self.expect(t) { - Ok(false) => { + Ok(false) /* not recovered */ => { self.current_closure.take(); } - Ok(true) => { + Ok(true) /* recovered */ => { self.current_closure.take(); recovered = true; break; @@ -965,7 +969,7 @@ impl<'a> Parser<'a> { Ok(()) } - /// Parses a sequence, not including the closing delimiter. The function + /// Parses a sequence, not including the delimiters. The function /// `f` must consume tokens until reaching the next separator or /// closing bracket. fn parse_seq_to_before_end<T>( @@ -973,11 +977,11 @@ impl<'a> Parser<'a> { ket: &TokenKind, sep: SeqSep, f: impl FnMut(&mut Parser<'a>) -> PResult<'a, T>, - ) -> PResult<'a, (ThinVec<T>, bool, bool)> { + ) -> PResult<'a, (ThinVec<T>, bool /* trailing */, bool /* recovered */)> { self.parse_seq_to_before_tokens(&[ket], sep, TokenExpectType::Expect, f) } - /// Parses a sequence, including the closing delimiter. The function + /// Parses a sequence, including only the closing delimiter. The function /// `f` must consume tokens until reaching the next separator or /// closing bracket. fn parse_seq_to_end<T>( @@ -993,7 +997,7 @@ impl<'a> Parser<'a> { Ok((val, trailing)) } - /// Parses a sequence, including the closing delimiter. The function + /// Parses a sequence, including both delimiters. The function /// `f` must consume tokens until reaching the next separator or /// closing bracket. fn parse_unspanned_seq<T>( @@ -1002,16 +1006,19 @@ impl<'a> Parser<'a> { ket: &TokenKind, sep: SeqSep, f: impl FnMut(&mut Parser<'a>) -> PResult<'a, T>, - ) -> PResult<'a, (ThinVec<T>, bool)> { + ) -> PResult<'a, (ThinVec<T>, bool /* trailing */)> { self.expect(bra)?; self.parse_seq_to_end(ket, sep, f) } + /// Parses a comma-separated sequence, including both delimiters. + /// The function `f` must consume tokens until reaching the next separator or + /// closing bracket. fn parse_delim_comma_seq<T>( &mut self, delim: Delimiter, f: impl FnMut(&mut Parser<'a>) -> PResult<'a, T>, - ) -> PResult<'a, (ThinVec<T>, bool)> { + ) -> PResult<'a, (ThinVec<T>, bool /* trailing */)> { self.parse_unspanned_seq( &token::OpenDelim(delim), &token::CloseDelim(delim), @@ -1020,10 +1027,13 @@ impl<'a> Parser<'a> { ) } + /// Parses a comma-separated sequence delimited by parentheses (e.g. `(x, y)`). + /// The function `f` must consume tokens until reaching the next separator or + /// closing bracket. fn parse_paren_comma_seq<T>( &mut self, f: impl FnMut(&mut Parser<'a>) -> PResult<'a, T>, - ) -> PResult<'a, (ThinVec<T>, bool)> { + ) -> PResult<'a, (ThinVec<T>, bool /* trailing */)> { self.parse_delim_comma_seq(Delimiter::Parenthesis, f) } diff --git a/compiler/rustc_passes/src/liveness.rs b/compiler/rustc_passes/src/liveness.rs index 6be01e0d88f..cfe829f170f 100644 --- a/compiler/rustc_passes/src/liveness.rs +++ b/compiler/rustc_passes/src/liveness.rs @@ -101,7 +101,6 @@ use rustc_span::symbol::{kw, sym, Symbol}; use rustc_span::DUMMY_SP; use rustc_span::{BytePos, Span}; -use std::collections::VecDeque; use std::io; use std::io::prelude::*; use std::rc::Rc; @@ -317,35 +316,13 @@ impl<'tcx> IrMaps<'tcx> { // For struct patterns, take note of which fields used shorthand // (`x` rather than `x: x`). let mut shorthand_field_ids = HirIdSet::default(); - let mut pats = VecDeque::new(); - pats.push_back(pat); - - while let Some(pat) = pats.pop_front() { - use rustc_hir::PatKind::*; - match &pat.kind { - Binding(.., inner_pat) => { - pats.extend(inner_pat.iter()); - } - Struct(_, fields, _) => { - let (short, not_short): (Vec<hir::PatField<'_>>, _) = - fields.iter().partition(|f| f.is_shorthand); - shorthand_field_ids.extend(short.iter().map(|f| f.pat.hir_id)); - pats.extend(not_short.iter().map(|f| f.pat)); - } - Ref(inner_pat, _) | Box(inner_pat) => { - pats.push_back(inner_pat); - } - TupleStruct(_, inner_pats, _) | Tuple(inner_pats, _) | Or(inner_pats) => { - pats.extend(inner_pats.iter()); - } - Slice(pre_pats, inner_pat, post_pats) => { - pats.extend(pre_pats.iter()); - pats.extend(inner_pat.iter()); - pats.extend(post_pats.iter()); - } - _ => {} + + pat.walk_always(|pat| { + if let hir::PatKind::Struct(_, fields, _) = pat.kind { + let short = fields.iter().filter(|f| f.is_shorthand); + shorthand_field_ids.extend(short.map(|f| f.pat.hir_id)); } - } + }); shorthand_field_ids } @@ -1351,6 +1328,9 @@ impl<'a, 'tcx> Visitor<'tcx> for Liveness<'a, 'tcx> { fn visit_arm(&mut self, arm: &'tcx hir::Arm<'tcx>) { self.check_unused_vars_in_pat(arm.pat, None, None, |_, _, _, _| {}); + if let Some(hir::Guard::IfLet(let_expr)) = arm.guard { + self.check_unused_vars_in_pat(let_expr.pat, None, None, |_, _, _, _| {}); + } intravisit::walk_arm(self, arm); } } diff --git a/compiler/rustc_pattern_analysis/src/rustc.rs b/compiler/rustc_pattern_analysis/src/rustc.rs index 3c1bdfd910e..b09d565f076 100644 --- a/compiler/rustc_pattern_analysis/src/rustc.rs +++ b/compiler/rustc_pattern_analysis/src/rustc.rs @@ -366,7 +366,7 @@ impl<'p, 'tcx> RustcMatchCheckCtxt<'p, 'tcx> { | ty::FnPtr(_) | ty::Dynamic(_, _, _) | ty::Closure(_, _) - | ty::Coroutine(_, _, _) + | ty::Coroutine(_, _) | ty::Alias(_, _) | ty::Param(_) | ty::Error(_) => ConstructorSet::Unlistable, diff --git a/compiler/rustc_smir/src/rustc_internal/internal.rs b/compiler/rustc_smir/src/rustc_internal/internal.rs index bbc98af45c0..17162d0de25 100644 --- a/compiler/rustc_smir/src/rustc_internal/internal.rs +++ b/compiler/rustc_smir/src/rustc_internal/internal.rs @@ -104,11 +104,9 @@ impl<'tcx> RustcInternal<'tcx> for RigidTy { RigidTy::Closure(def, args) => { rustc_ty::TyKind::Closure(def.0.internal(tables), args.internal(tables)) } - RigidTy::Coroutine(def, args, mov) => rustc_ty::TyKind::Coroutine( - def.0.internal(tables), - args.internal(tables), - mov.internal(tables), - ), + RigidTy::Coroutine(def, args, _mov) => { + rustc_ty::TyKind::Coroutine(def.0.internal(tables), args.internal(tables)) + } RigidTy::CoroutineWitness(def, args) => { rustc_ty::TyKind::CoroutineWitness(def.0.internal(tables), args.internal(tables)) } diff --git a/compiler/rustc_smir/src/rustc_smir/convert/mir.rs b/compiler/rustc_smir/src/rustc_smir/convert/mir.rs index 49bf2192f82..c5fb6f7a26f 100644 --- a/compiler/rustc_smir/src/rustc_smir/convert/mir.rs +++ b/compiler/rustc_smir/src/rustc_smir/convert/mir.rs @@ -531,11 +531,11 @@ impl<'tcx> Stable<'tcx> for mir::AggregateKind<'tcx> { generic_arg.stable(tables), ) } - mir::AggregateKind::Coroutine(def_id, generic_arg, movability) => { + mir::AggregateKind::Coroutine(def_id, generic_arg) => { stable_mir::mir::AggregateKind::Coroutine( tables.coroutine_def(*def_id), generic_arg.stable(tables), - movability.stable(tables), + tables.tcx.coroutine_movability(*def_id).stable(tables), ) } } diff --git a/compiler/rustc_smir/src/rustc_smir/convert/ty.rs b/compiler/rustc_smir/src/rustc_smir/convert/ty.rs index cbdddc30072..f0f1d798d44 100644 --- a/compiler/rustc_smir/src/rustc_smir/convert/ty.rs +++ b/compiler/rustc_smir/src/rustc_smir/convert/ty.rs @@ -386,10 +386,10 @@ impl<'tcx> Stable<'tcx> for ty::TyKind<'tcx> { tables.closure_def(*def_id), generic_args.stable(tables), )), - ty::Coroutine(def_id, generic_args, movability) => TyKind::RigidTy(RigidTy::Coroutine( + ty::Coroutine(def_id, generic_args) => TyKind::RigidTy(RigidTy::Coroutine( tables.coroutine_def(*def_id), generic_args.stable(tables), - movability.stable(tables), + tables.tcx.coroutine_movability(*def_id).stable(tables), )), ty::Never => TyKind::RigidTy(RigidTy::Never), ty::Tuple(fields) => { diff --git a/compiler/rustc_symbol_mangling/src/legacy.rs b/compiler/rustc_symbol_mangling/src/legacy.rs index a273a412146..4a5f58443bc 100644 --- a/compiler/rustc_symbol_mangling/src/legacy.rs +++ b/compiler/rustc_symbol_mangling/src/legacy.rs @@ -211,7 +211,7 @@ impl<'tcx> Printer<'tcx> for SymbolPrinter<'tcx> { ty::FnDef(def_id, args) | ty::Alias(ty::Projection | ty::Opaque, ty::AliasTy { def_id, args, .. }) | ty::Closure(def_id, args) - | ty::Coroutine(def_id, args, _) => self.print_def_path(def_id, args), + | ty::Coroutine(def_id, args) => self.print_def_path(def_id, args), // The `pretty_print_type` formatting of array size depends on // -Zverbose-internals flag, so we cannot reuse it here. diff --git a/compiler/rustc_symbol_mangling/src/typeid/typeid_itanium_cxx_abi.rs b/compiler/rustc_symbol_mangling/src/typeid/typeid_itanium_cxx_abi.rs index d2962b2968b..0cc82ac7506 100644 --- a/compiler/rustc_symbol_mangling/src/typeid/typeid_itanium_cxx_abi.rs +++ b/compiler/rustc_symbol_mangling/src/typeid/typeid_itanium_cxx_abi.rs @@ -895,8 +895,8 @@ fn transform_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>, options: TransformTyOptio ty = Ty::new_closure(tcx, *def_id, transform_args(tcx, args, options)); } - ty::Coroutine(def_id, args, movability) => { - ty = Ty::new_coroutine(tcx, *def_id, transform_args(tcx, args, options), *movability); + ty::Coroutine(def_id, args) => { + ty = Ty::new_coroutine(tcx, *def_id, transform_args(tcx, args, options)); } ty::Ref(region, ty0, ..) => { diff --git a/compiler/rustc_symbol_mangling/src/v0.rs b/compiler/rustc_symbol_mangling/src/v0.rs index e002e345ae6..e89a640767f 100644 --- a/compiler/rustc_symbol_mangling/src/v0.rs +++ b/compiler/rustc_symbol_mangling/src/v0.rs @@ -427,7 +427,7 @@ impl<'tcx> Printer<'tcx> for SymbolMangler<'tcx> { | ty::FnDef(def_id, args) | ty::Alias(ty::Projection | ty::Opaque, ty::AliasTy { def_id, args, .. }) | ty::Closure(def_id, args) - | ty::Coroutine(def_id, args, _) => { + | ty::Coroutine(def_id, args) => { self.print_def_path(def_id, args)?; } ty::Foreign(def_id) => { diff --git a/compiler/rustc_trait_selection/src/solve/assembly/mod.rs b/compiler/rustc_trait_selection/src/solve/assembly/mod.rs index 81a766f24b0..caf9470b4c6 100644 --- a/compiler/rustc_trait_selection/src/solve/assembly/mod.rs +++ b/compiler/rustc_trait_selection/src/solve/assembly/mod.rs @@ -432,7 +432,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> { | ty::FnPtr(_) | ty::Dynamic(_, _, _) | ty::Closure(_, _) - | ty::Coroutine(_, _, _) + | ty::Coroutine(_, _) | ty::Never | ty::Tuple(_) => { let simp = diff --git a/compiler/rustc_trait_selection/src/solve/assembly/structural_traits.rs b/compiler/rustc_trait_selection/src/solve/assembly/structural_traits.rs index f442e2a08a8..274a75a125c 100644 --- a/compiler/rustc_trait_selection/src/solve/assembly/structural_traits.rs +++ b/compiler/rustc_trait_selection/src/solve/assembly/structural_traits.rs @@ -57,7 +57,7 @@ pub(in crate::solve) fn instantiate_constituent_tys_for_auto_trait<'tcx>( ty::Closure(_, args) => Ok(vec![args.as_closure().tupled_upvars_ty()]), - ty::Coroutine(_, args, _) => { + ty::Coroutine(_, args) => { let coroutine_args = args.as_coroutine(); Ok(vec![coroutine_args.tupled_upvars_ty(), coroutine_args.witness()]) } @@ -177,7 +177,6 @@ pub(in crate::solve) fn instantiate_constituent_tys_for_copy_clone_trait<'tcx>( ty::Dynamic(..) | ty::Str | ty::Slice(_) - | ty::Coroutine(_, _, Movability::Static) | ty::Foreign(..) | ty::Ref(_, _, Mutability::Mut) | ty::Adt(_, _) @@ -194,14 +193,17 @@ pub(in crate::solve) fn instantiate_constituent_tys_for_copy_clone_trait<'tcx>( ty::Closure(_, args) => Ok(vec![args.as_closure().tupled_upvars_ty()]), - ty::Coroutine(_, args, Movability::Movable) => { - if ecx.tcx().features().coroutine_clone { - let coroutine = args.as_coroutine(); - Ok(vec![coroutine.tupled_upvars_ty(), coroutine.witness()]) - } else { - Err(NoSolution) + ty::Coroutine(def_id, args) => match ecx.tcx().coroutine_movability(def_id) { + Movability::Static => Err(NoSolution), + Movability::Movable => { + if ecx.tcx().features().coroutine_clone { + let coroutine = args.as_coroutine(); + Ok(vec![coroutine.tupled_upvars_ty(), coroutine.witness()]) + } else { + Err(NoSolution) + } } - } + }, ty::CoroutineWitness(def_id, args) => Ok(ecx .tcx() @@ -278,7 +280,7 @@ pub(in crate::solve) fn extract_tupled_inputs_and_output_from_callable<'tcx>( | ty::RawPtr(_) | ty::Ref(_, _, _) | ty::Dynamic(_, _, _) - | ty::Coroutine(_, _, _) + | ty::Coroutine(_, _) | ty::CoroutineWitness(..) | ty::Never | ty::Tuple(_) diff --git a/compiler/rustc_trait_selection/src/solve/normalizes_to/mod.rs b/compiler/rustc_trait_selection/src/solve/normalizes_to/mod.rs index bac08f6588f..ccee6f8eb29 100644 --- a/compiler/rustc_trait_selection/src/solve/normalizes_to/mod.rs +++ b/compiler/rustc_trait_selection/src/solve/normalizes_to/mod.rs @@ -468,7 +468,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for NormalizesTo<'tcx> { goal: Goal<'tcx, Self>, ) -> QueryResult<'tcx> { let self_ty = goal.predicate.self_ty(); - let ty::Coroutine(def_id, args, _) = *self_ty.kind() else { + let ty::Coroutine(def_id, args) = *self_ty.kind() else { return Err(NoSolution); }; @@ -499,7 +499,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for NormalizesTo<'tcx> { goal: Goal<'tcx, Self>, ) -> QueryResult<'tcx> { let self_ty = goal.predicate.self_ty(); - let ty::Coroutine(def_id, args, _) = *self_ty.kind() else { + let ty::Coroutine(def_id, args) = *self_ty.kind() else { return Err(NoSolution); }; @@ -530,7 +530,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for NormalizesTo<'tcx> { goal: Goal<'tcx, Self>, ) -> QueryResult<'tcx> { let self_ty = goal.predicate.self_ty(); - let ty::Coroutine(def_id, args, _) = *self_ty.kind() else { + let ty::Coroutine(def_id, args) = *self_ty.kind() else { return Err(NoSolution); }; @@ -564,7 +564,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for NormalizesTo<'tcx> { goal: Goal<'tcx, Self>, ) -> QueryResult<'tcx> { let self_ty = goal.predicate.self_ty(); - let ty::Coroutine(def_id, args, _) = *self_ty.kind() else { + let ty::Coroutine(def_id, args) = *self_ty.kind() else { return Err(NoSolution); }; diff --git a/compiler/rustc_trait_selection/src/solve/trait_goals.rs b/compiler/rustc_trait_selection/src/solve/trait_goals.rs index ac3ffd2d6c2..be079275684 100644 --- a/compiler/rustc_trait_selection/src/solve/trait_goals.rs +++ b/compiler/rustc_trait_selection/src/solve/trait_goals.rs @@ -337,7 +337,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for TraitPredicate<'tcx> { return Err(NoSolution); } - let ty::Coroutine(def_id, _, _) = *goal.predicate.self_ty().kind() else { + let ty::Coroutine(def_id, _) = *goal.predicate.self_ty().kind() else { return Err(NoSolution); }; @@ -361,7 +361,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for TraitPredicate<'tcx> { return Err(NoSolution); } - let ty::Coroutine(def_id, _, _) = *goal.predicate.self_ty().kind() else { + let ty::Coroutine(def_id, _) = *goal.predicate.self_ty().kind() else { return Err(NoSolution); }; @@ -385,7 +385,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for TraitPredicate<'tcx> { return Err(NoSolution); } - let ty::Coroutine(def_id, _, _) = *goal.predicate.self_ty().kind() else { + let ty::Coroutine(def_id, _) = *goal.predicate.self_ty().kind() else { return Err(NoSolution); }; @@ -410,7 +410,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for TraitPredicate<'tcx> { } let self_ty = goal.predicate.self_ty(); - let ty::Coroutine(def_id, args, _) = *self_ty.kind() else { + let ty::Coroutine(def_id, args) = *self_ty.kind() else { return Err(NoSolution); }; @@ -927,10 +927,10 @@ impl<'tcx> EvalCtxt<'_, 'tcx> { // Coroutines have one special built-in candidate, `Unpin`, which // takes precedence over the structural auto trait candidate being // assembled. - ty::Coroutine(_, _, movability) + ty::Coroutine(def_id, _) if Some(goal.predicate.def_id()) == self.tcx().lang_items().unpin_trait() => { - match movability { + match self.tcx().coroutine_movability(def_id) { Movability::Static => Some(Err(NoSolution)), Movability::Movable => { Some(self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)) @@ -959,7 +959,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> { | ty::FnDef(_, _) | ty::FnPtr(_) | ty::Closure(_, _) - | ty::Coroutine(_, _, _) + | ty::Coroutine(_, _) | ty::CoroutineWitness(..) | ty::Never | ty::Tuple(_) diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs index 1143b9d3360..f63314081d6 100644 --- a/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs +++ b/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs @@ -3406,7 +3406,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> { } err.note(msg.trim_end_matches(", ").to_string()) } - ty::Coroutine(def_id, _, _) => { + ty::Coroutine(def_id, _) => { let sp = self.tcx.def_span(def_id); // Special-case this to say "async block" instead of `[static coroutine]`. diff --git a/compiler/rustc_trait_selection/src/traits/project.rs b/compiler/rustc_trait_selection/src/traits/project.rs index d32b4adbefc..dd4e69efe37 100644 --- a/compiler/rustc_trait_selection/src/traits/project.rs +++ b/compiler/rustc_trait_selection/src/traits/project.rs @@ -2083,7 +2083,7 @@ fn confirm_coroutine_candidate<'cx, 'tcx>( nested: Vec<PredicateObligation<'tcx>>, ) -> Progress<'tcx> { let self_ty = selcx.infcx.shallow_resolve(obligation.predicate.self_ty()); - let ty::Coroutine(_, args, _) = self_ty.kind() else { + let ty::Coroutine(_, args) = self_ty.kind() else { unreachable!( "expected coroutine self type for built-in coroutine candidate, found {self_ty}" ) @@ -2138,7 +2138,7 @@ fn confirm_future_candidate<'cx, 'tcx>( nested: Vec<PredicateObligation<'tcx>>, ) -> Progress<'tcx> { let self_ty = selcx.infcx.shallow_resolve(obligation.predicate.self_ty()); - let ty::Coroutine(_, args, _) = self_ty.kind() else { + let ty::Coroutine(_, args) = self_ty.kind() else { unreachable!( "expected coroutine self type for built-in async future candidate, found {self_ty}" ) @@ -2182,7 +2182,7 @@ fn confirm_iterator_candidate<'cx, 'tcx>( nested: Vec<PredicateObligation<'tcx>>, ) -> Progress<'tcx> { let self_ty = selcx.infcx.shallow_resolve(obligation.predicate.self_ty()); - let ty::Coroutine(_, args, _) = self_ty.kind() else { + let ty::Coroutine(_, args) = self_ty.kind() else { unreachable!("expected coroutine self type for built-in gen candidate, found {self_ty}") }; let gen_sig = args.as_coroutine().sig(); @@ -2223,8 +2223,7 @@ fn confirm_async_iterator_candidate<'cx, 'tcx>( obligation: &ProjectionTyObligation<'tcx>, nested: Vec<PredicateObligation<'tcx>>, ) -> Progress<'tcx> { - let ty::Coroutine(_, args, _) = - selcx.infcx.shallow_resolve(obligation.predicate.self_ty()).kind() + let ty::Coroutine(_, args) = selcx.infcx.shallow_resolve(obligation.predicate.self_ty()).kind() else { unreachable!() }; diff --git a/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs b/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs index f9c8f3d14c3..138bc6129f7 100644 --- a/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs +++ b/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs @@ -259,7 +259,7 @@ pub fn dtorck_constraint_for_ty_inner<'tcx>( })? } - ty::Coroutine(_, args, _movability) => { + ty::Coroutine(_, args) => { // rust-lang/rust#49918: types can be constructed, stored // in the interior, and sit idle when coroutine yields // (and is subsequently dropped). diff --git a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs index 3a37bc518ef..54b91ab1d4d 100644 --- a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs +++ b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs @@ -263,7 +263,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { candidates: &mut SelectionCandidateSet<'tcx>, ) { let self_ty = obligation.self_ty().skip_binder(); - if let ty::Coroutine(did, args, _) = *self_ty.kind() { + if let ty::Coroutine(did, args) = *self_ty.kind() { // gen constructs get lowered to a special kind of coroutine that // should directly `impl AsyncIterator`. if self.tcx().coroutine_is_async_gen(did) { @@ -486,7 +486,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { | ty::RawPtr(_) | ty::Ref(_, _, _) | ty::Closure(_, _) - | ty::Coroutine(_, _, _) + | ty::Coroutine(_, _) | ty::CoroutineWitness(..) | ty::Never | ty::Tuple(_) @@ -529,7 +529,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { let def_id = obligation.predicate.def_id(); if self.tcx().trait_is_auto(def_id) { - match self_ty.kind() { + match *self_ty.kind() { ty::Dynamic(..) => { // For object types, we don't know what the closed // over types are. This means we conservatively @@ -564,10 +564,10 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { // The auto impl might apply; we don't know. candidates.ambiguous = true; } - ty::Coroutine(_, _, movability) + ty::Coroutine(coroutine_def_id, _) if self.tcx().lang_items().unpin_trait() == Some(def_id) => { - match movability { + match self.tcx().coroutine_movability(coroutine_def_id) { hir::Movability::Static => { // Immovable coroutines are never `Unpin`, so // suppress the normal auto-impl candidate for it. @@ -1023,7 +1023,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { | ty::FnPtr(_) | ty::Dynamic(_, _, _) | ty::Closure(_, _) - | ty::Coroutine(_, _, _) + | ty::Coroutine(_, _) | ty::CoroutineWitness(..) | ty::Never | ty::Alias(..) diff --git a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs index f1da1c046d4..e20bb06d777 100644 --- a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs +++ b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs @@ -730,7 +730,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { // touch bound regions, they just capture the in-scope // type/region parameters. let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder()); - let ty::Coroutine(coroutine_def_id, args, _) = *self_ty.kind() else { + let ty::Coroutine(coroutine_def_id, args) = *self_ty.kind() else { bug!("closure candidate for non-closure {:?}", obligation); }; @@ -768,7 +768,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { // touch bound regions, they just capture the in-scope // type/region parameters. let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder()); - let ty::Coroutine(coroutine_def_id, args, _) = *self_ty.kind() else { + let ty::Coroutine(coroutine_def_id, args) = *self_ty.kind() else { bug!("closure candidate for non-closure {:?}", obligation); }; @@ -797,7 +797,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { // touch bound regions, they just capture the in-scope // type/region parameters. let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder()); - let ty::Coroutine(coroutine_def_id, args, _) = *self_ty.kind() else { + let ty::Coroutine(coroutine_def_id, args) = *self_ty.kind() else { bug!("closure candidate for non-closure {:?}", obligation); }; @@ -826,7 +826,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { // touch bound regions, they just capture the in-scope // type/region parameters. let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder()); - let ty::Coroutine(coroutine_def_id, args, _) = *self_ty.kind() else { + let ty::Coroutine(coroutine_def_id, args) = *self_ty.kind() else { bug!("closure candidate for non-closure {:?}", obligation); }; @@ -1298,7 +1298,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { ty::Closure(_, args) => { stack.push(args.as_closure().tupled_upvars_ty()); } - ty::Coroutine(_, args, _) => { + ty::Coroutine(_, args) => { let coroutine = args.as_coroutine(); stack.extend([coroutine.tupled_upvars_ty(), coroutine.witness()]); } diff --git a/compiler/rustc_trait_selection/src/traits/select/mod.rs b/compiler/rustc_trait_selection/src/traits/select/mod.rs index 336c0c5299f..c45925295ee 100644 --- a/compiler/rustc_trait_selection/src/traits/select/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/select/mod.rs @@ -2176,7 +2176,6 @@ impl<'tcx> SelectionContext<'_, 'tcx> { ty::Dynamic(..) | ty::Str | ty::Slice(..) - | ty::Coroutine(_, _, hir::Movability::Static) | ty::Foreign(..) | ty::Ref(_, _, hir::Mutability::Mut) => None, @@ -2185,26 +2184,31 @@ impl<'tcx> SelectionContext<'_, 'tcx> { Where(obligation.predicate.rebind(tys.iter().collect())) } - ty::Coroutine(_, args, hir::Movability::Movable) => { - if self.tcx().features().coroutine_clone { - let resolved_upvars = - self.infcx.shallow_resolve(args.as_coroutine().tupled_upvars_ty()); - let resolved_witness = - self.infcx.shallow_resolve(args.as_coroutine().witness()); - if resolved_upvars.is_ty_var() || resolved_witness.is_ty_var() { - // Not yet resolved. - Ambiguous - } else { - let all = args - .as_coroutine() - .upvar_tys() - .iter() - .chain([args.as_coroutine().witness()]) - .collect::<Vec<_>>(); - Where(obligation.predicate.rebind(all)) + ty::Coroutine(coroutine_def_id, args) => { + match self.tcx().coroutine_movability(coroutine_def_id) { + hir::Movability::Static => None, + hir::Movability::Movable => { + if self.tcx().features().coroutine_clone { + let resolved_upvars = + self.infcx.shallow_resolve(args.as_coroutine().tupled_upvars_ty()); + let resolved_witness = + self.infcx.shallow_resolve(args.as_coroutine().witness()); + if resolved_upvars.is_ty_var() || resolved_witness.is_ty_var() { + // Not yet resolved. + Ambiguous + } else { + let all = args + .as_coroutine() + .upvar_tys() + .iter() + .chain([args.as_coroutine().witness()]) + .collect::<Vec<_>>(); + Where(obligation.predicate.rebind(all)) + } + } else { + None + } } - } else { - None } } @@ -2307,7 +2311,7 @@ impl<'tcx> SelectionContext<'_, 'tcx> { t.rebind(vec![ty]) } - ty::Coroutine(_, args, _) => { + ty::Coroutine(_, args) => { let ty = self.infcx.shallow_resolve(args.as_coroutine().tupled_upvars_ty()); let witness = args.as_coroutine().witness(); t.rebind([ty].into_iter().chain(iter::once(witness)).collect()) diff --git a/compiler/rustc_ty_utils/src/abi.rs b/compiler/rustc_ty_utils/src/abi.rs index 4756a45a447..2772831e731 100644 --- a/compiler/rustc_ty_utils/src/abi.rs +++ b/compiler/rustc_ty_utils/src/abi.rs @@ -97,7 +97,7 @@ fn fn_sig_for_fn_abi<'tcx>( bound_vars, ) } - ty::Coroutine(did, args, _) => { + ty::Coroutine(did, args) => { let coroutine_kind = tcx.coroutine_kind(did).unwrap(); let sig = args.as_coroutine().sig(); diff --git a/compiler/rustc_ty_utils/src/instance.rs b/compiler/rustc_ty_utils/src/instance.rs index 42db43caf9f..81d5304b812 100644 --- a/compiler/rustc_ty_utils/src/instance.rs +++ b/compiler/rustc_ty_utils/src/instance.rs @@ -246,7 +246,7 @@ fn resolve_associated_item<'tcx>( }) } } else if Some(trait_ref.def_id) == lang_items.future_trait() { - let ty::Coroutine(coroutine_def_id, args, _) = *rcvr_args.type_at(0).kind() else { + let ty::Coroutine(coroutine_def_id, args) = *rcvr_args.type_at(0).kind() else { bug!() }; if Some(trait_item_id) == tcx.lang_items().future_poll_fn() { @@ -259,7 +259,7 @@ fn resolve_associated_item<'tcx>( Some(Instance::new(trait_item_id, rcvr_args)) } } else if Some(trait_ref.def_id) == lang_items.iterator_trait() { - let ty::Coroutine(coroutine_def_id, args, _) = *rcvr_args.type_at(0).kind() else { + let ty::Coroutine(coroutine_def_id, args) = *rcvr_args.type_at(0).kind() else { bug!() }; if Some(trait_item_id) == tcx.lang_items().next_fn() { @@ -272,7 +272,7 @@ fn resolve_associated_item<'tcx>( Some(Instance::new(trait_item_id, rcvr_args)) } } else if Some(trait_ref.def_id) == lang_items.async_iterator_trait() { - let ty::Coroutine(coroutine_def_id, args, _) = *rcvr_args.type_at(0).kind() else { + let ty::Coroutine(coroutine_def_id, args) = *rcvr_args.type_at(0).kind() else { bug!() }; @@ -287,7 +287,7 @@ fn resolve_associated_item<'tcx>( // `AsyncIterator::poll_next` is generated by the compiler. Some(Instance { def: ty::InstanceDef::Item(coroutine_def_id), args }) } else if Some(trait_ref.def_id) == lang_items.coroutine_trait() { - let ty::Coroutine(coroutine_def_id, args, _) = *rcvr_args.type_at(0).kind() else { + let ty::Coroutine(coroutine_def_id, args) = *rcvr_args.type_at(0).kind() else { bug!() }; if cfg!(debug_assertions) && tcx.item_name(trait_item_id) != sym::resume { diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs index d39377a1acb..db89fba2a89 100644 --- a/compiler/rustc_ty_utils/src/layout.rs +++ b/compiler/rustc_ty_utils/src/layout.rs @@ -316,7 +316,7 @@ fn layout_of_uncached<'tcx>( tcx.mk_layout(unit) } - ty::Coroutine(def_id, args, _) => coroutine_layout(cx, ty, def_id, args)?, + ty::Coroutine(def_id, args) => coroutine_layout(cx, ty, def_id, args)?, ty::Closure(_, args) => { let tys = args.as_closure().upvar_tys(); @@ -961,7 +961,7 @@ fn record_layout_for_printing<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, layout: T record(adt_kind.into(), adt_packed, opt_discr_size, variant_infos); } - ty::Coroutine(def_id, args, _) => { + ty::Coroutine(def_id, args) => { debug!("print-type-size t: `{:?}` record coroutine", layout.ty); // Coroutines always have a begin/poisoned/end state with additional suspend points let (variant_infos, opt_discr_size) = diff --git a/compiler/rustc_ty_utils/src/needs_drop.rs b/compiler/rustc_ty_utils/src/needs_drop.rs index 8d118e6dfef..08e5476ae43 100644 --- a/compiler/rustc_ty_utils/src/needs_drop.rs +++ b/compiler/rustc_ty_utils/src/needs_drop.rs @@ -145,7 +145,7 @@ where // for the coroutine witness and check whether any of the contained types // need to be dropped, and only require the captured types to be live // if they do. - ty::Coroutine(_, args, _) => { + ty::Coroutine(_, args) => { if self.reveal_coroutine_witnesses { queue_type(self, args.as_coroutine().witness()); } else { diff --git a/compiler/rustc_type_ir/src/ty_kind.rs b/compiler/rustc_type_ir/src/ty_kind.rs index 70adfbee2ed..859000fb6cb 100644 --- a/compiler/rustc_type_ir/src/ty_kind.rs +++ b/compiler/rustc_type_ir/src/ty_kind.rs @@ -207,7 +207,7 @@ pub enum TyKind<I: Interner> { /// /// For more info about coroutine args, visit the documentation for /// `CoroutineArgs`. - Coroutine(I::DefId, I::GenericArgs, Movability), + Coroutine(I::DefId, I::GenericArgs), /// A type representing the types stored inside a coroutine. /// This should only appear as part of the `CoroutineArgs`. @@ -317,7 +317,7 @@ const fn tykind_discriminant<I: Interner>(value: &TyKind<I>) -> usize { FnPtr(_) => 13, Dynamic(..) => 14, Closure(_, _) => 15, - Coroutine(_, _, _) => 16, + Coroutine(_, _) => 16, CoroutineWitness(_, _) => 17, Never => 18, Tuple(_) => 19, @@ -356,9 +356,7 @@ impl<I: Interner> PartialEq for TyKind<I> { a_p == b_p && a_r == b_r && a_repr == b_repr } (Closure(a_d, a_s), Closure(b_d, b_s)) => a_d == b_d && a_s == b_s, - (Coroutine(a_d, a_s, a_m), Coroutine(b_d, b_s, b_m)) => { - a_d == b_d && a_s == b_s && a_m == b_m - } + (Coroutine(a_d, a_s), Coroutine(b_d, b_s)) => a_d == b_d && a_s == b_s, (CoroutineWitness(a_d, a_s), CoroutineWitness(b_d, b_s)) => a_d == b_d && a_s == b_s, (Tuple(a_t), Tuple(b_t)) => a_t == b_t, (Alias(a_i, a_p), Alias(b_i, b_p)) => a_i == b_i && a_p == b_p, @@ -432,9 +430,7 @@ impl<I: Interner> DebugWithInfcx<I> for TyKind<I> { } }, Closure(d, s) => f.debug_tuple("Closure").field(d).field(&this.wrap(s)).finish(), - Coroutine(d, s, m) => { - f.debug_tuple("Coroutine").field(d).field(&this.wrap(s)).field(m).finish() - } + Coroutine(d, s) => f.debug_tuple("Coroutine").field(d).field(&this.wrap(s)).finish(), CoroutineWitness(d, s) => { f.debug_tuple("CoroutineWitness").field(d).field(&this.wrap(s)).finish() } diff --git a/compiler/stable_mir/src/mir/body.rs b/compiler/stable_mir/src/mir/body.rs index 5871600bece..72227a04bf1 100644 --- a/compiler/stable_mir/src/mir/body.rs +++ b/compiler/stable_mir/src/mir/body.rs @@ -662,6 +662,7 @@ pub enum AggregateKind { Tuple, Adt(AdtDef, VariantIdx, GenericArgs, Option<UserTypeAnnotationIndex>, Option<FieldIdx>), Closure(ClosureDef, GenericArgs), + // FIXME(stable_mir): Movability here is redundant Coroutine(CoroutineDef, GenericArgs, Movability), } diff --git a/compiler/stable_mir/src/ty.rs b/compiler/stable_mir/src/ty.rs index 1d4d7b6d352..9e6ecbe8315 100644 --- a/compiler/stable_mir/src/ty.rs +++ b/compiler/stable_mir/src/ty.rs @@ -460,6 +460,7 @@ pub enum RigidTy { FnDef(FnDef, GenericArgs), FnPtr(PolyFnSig), Closure(ClosureDef, GenericArgs), + // FIXME(stable_mir): Movability here is redundant Coroutine(CoroutineDef, GenericArgs, Movability), Dynamic(Vec<Binder<ExistentialPredicate>>, Region, DynKind), Never, |
