diff options
| author | bors <bors@rust-lang.org> | 2023-12-10 08:08:03 +0000 |
|---|---|---|
| committer | bors <bors@rust-lang.org> | 2023-12-10 08:08:03 +0000 |
| commit | 92ab9d65bb8f3a1b167c2ccb9d22de301a7aef45 (patch) | |
| tree | 0a9542f67746bf7a90b8bb4881b56de39b138cb9 /compiler | |
| parent | aa7afff8c619fc9c04515b7cef57d72dc3c7e51e (diff) | |
| parent | 035d86594ddcaec12b59c35f8de1a99c3b8b81fb (diff) | |
| download | rust-92ab9d65bb8f3a1b167c2ccb9d22de301a7aef45.tar.gz rust-92ab9d65bb8f3a1b167c2ccb9d22de301a7aef45.zip | |
Auto merge of #3217 - RalfJung:rustup, r=RalfJung
Rustup Pulls in https://github.com/rust-lang/rust/pull/117953 (in preparation for implementing those intrinsics)
Diffstat (limited to 'compiler')
196 files changed, 6638 insertions, 4484 deletions
diff --git a/compiler/rustc_ast/src/ast.rs b/compiler/rustc_ast/src/ast.rs index d6c2bfacf66..190fae95652 100644 --- a/compiler/rustc_ast/src/ast.rs +++ b/compiler/rustc_ast/src/ast.rs @@ -658,6 +658,24 @@ impl Pat { pub fn is_rest(&self) -> bool { matches!(self.kind, PatKind::Rest) } + + /// Whether this could be a never pattern, taking into account that a macro invocation can + /// return a never pattern. Used to inform errors during parsing. + pub fn could_be_never_pattern(&self) -> bool { + let mut could_be_never_pattern = false; + self.walk(&mut |pat| match &pat.kind { + PatKind::Never | PatKind::MacCall(_) => { + could_be_never_pattern = true; + false + } + PatKind::Or(s) => { + could_be_never_pattern = s.iter().all(|p| p.could_be_never_pattern()); + false + } + _ => true, + }); + could_be_never_pattern + } } /// A single field in a struct pattern. @@ -1080,8 +1098,8 @@ pub struct Arm { pub pat: P<Pat>, /// Match arm guard, e.g. `n > 10` in `match foo { n if n > 10 => {}, _ => {} }` pub guard: Option<P<Expr>>, - /// Match arm body. - pub body: P<Expr>, + /// Match arm body. Omitted if the pattern is a never pattern. + pub body: Option<P<Expr>>, pub span: Span, pub id: NodeId, pub is_placeholder: bool, @@ -1311,7 +1329,7 @@ pub struct Closure { pub binder: ClosureBinder, pub capture_clause: CaptureBy, pub constness: Const, - pub coro_kind: Option<CoroutineKind>, + pub coroutine_kind: Option<CoroutineKind>, pub movability: Movability, pub fn_decl: P<FnDecl>, pub body: P<Expr>, @@ -1516,6 +1534,7 @@ pub enum ExprKind { pub enum GenBlockKind { Async, Gen, + AsyncGen, } impl fmt::Display for GenBlockKind { @@ -1529,6 +1548,7 @@ impl GenBlockKind { match self { GenBlockKind::Async => "async", GenBlockKind::Gen => "gen", + GenBlockKind::AsyncGen => "async gen", } } } @@ -2413,10 +2433,12 @@ pub enum Unsafe { /// Iterator`. #[derive(Copy, Clone, Encodable, Decodable, Debug)] pub enum CoroutineKind { - /// `async`, which evaluates to `impl Future` + /// `async`, which returns an `impl Future` Async { span: Span, closure_id: NodeId, return_impl_trait_id: NodeId }, - /// `gen`, which evaluates to `impl Iterator` + /// `gen`, which returns an `impl Iterator` Gen { span: Span, closure_id: NodeId, return_impl_trait_id: NodeId }, + /// `async gen`, which returns an `impl AsyncIterator` + AsyncGen { span: Span, closure_id: NodeId, return_impl_trait_id: NodeId }, } impl CoroutineKind { @@ -2428,12 +2450,23 @@ impl CoroutineKind { matches!(self, CoroutineKind::Gen { .. }) } + pub fn closure_id(self) -> NodeId { + match self { + CoroutineKind::Async { closure_id, .. } + | CoroutineKind::Gen { closure_id, .. } + | CoroutineKind::AsyncGen { closure_id, .. } => closure_id, + } + } + /// In this case this is an `async` or `gen` return, the `NodeId` for the generated `impl Trait` /// item. pub fn return_id(self) -> (NodeId, Span) { match self { CoroutineKind::Async { return_impl_trait_id, span, .. } - | CoroutineKind::Gen { return_impl_trait_id, span, .. } => (return_impl_trait_id, span), + | CoroutineKind::Gen { return_impl_trait_id, span, .. } + | CoroutineKind::AsyncGen { return_impl_trait_id, span, .. } => { + (return_impl_trait_id, span) + } } } } @@ -2838,7 +2871,7 @@ pub struct FnHeader { /// The `unsafe` keyword, if any pub unsafety: Unsafe, /// Whether this is `async`, `gen`, or nothing. - pub coro_kind: Option<CoroutineKind>, + pub coroutine_kind: Option<CoroutineKind>, /// The `const` keyword, if any pub constness: Const, /// The `extern` keyword and corresponding ABI string, if any @@ -2848,9 +2881,9 @@ pub struct FnHeader { impl FnHeader { /// Does this function header have any qualifiers or is it empty? pub fn has_qualifiers(&self) -> bool { - let Self { unsafety, coro_kind, constness, ext } = self; + let Self { unsafety, coroutine_kind, constness, ext } = self; matches!(unsafety, Unsafe::Yes(_)) - || coro_kind.is_some() + || coroutine_kind.is_some() || matches!(constness, Const::Yes(_)) || !matches!(ext, Extern::None) } @@ -2858,7 +2891,12 @@ impl FnHeader { impl Default for FnHeader { fn default() -> FnHeader { - FnHeader { unsafety: Unsafe::No, coro_kind: None, constness: Const::No, ext: Extern::None } + FnHeader { + unsafety: Unsafe::No, + coroutine_kind: None, + constness: Const::No, + ext: Extern::None, + } } } diff --git a/compiler/rustc_ast/src/mut_visit.rs b/compiler/rustc_ast/src/mut_visit.rs index c6a31fbdbc3..41c4e024447 100644 --- a/compiler/rustc_ast/src/mut_visit.rs +++ b/compiler/rustc_ast/src/mut_visit.rs @@ -121,8 +121,8 @@ pub trait MutVisitor: Sized { noop_visit_fn_decl(d, self); } - fn visit_coro_kind(&mut self, a: &mut CoroutineKind) { - noop_visit_coro_kind(a, self); + fn visit_coroutine_kind(&mut self, a: &mut CoroutineKind) { + noop_visit_coroutine_kind(a, self); } fn visit_closure_binder(&mut self, b: &mut ClosureBinder) { @@ -453,7 +453,7 @@ pub fn noop_flat_map_arm<T: MutVisitor>(mut arm: Arm, vis: &mut T) -> SmallVec<[ vis.visit_id(id); vis.visit_pat(pat); visit_opt(guard, |guard| vis.visit_expr(guard)); - vis.visit_expr(body); + visit_opt(body, |body| vis.visit_expr(body)); vis.visit_span(span); smallvec![arm] } @@ -871,10 +871,11 @@ pub fn noop_visit_closure_binder<T: MutVisitor>(binder: &mut ClosureBinder, vis: } } -pub fn noop_visit_coro_kind<T: MutVisitor>(coro_kind: &mut CoroutineKind, vis: &mut T) { - match coro_kind { +pub fn noop_visit_coroutine_kind<T: MutVisitor>(coroutine_kind: &mut CoroutineKind, vis: &mut T) { + match coroutine_kind { CoroutineKind::Async { span, closure_id, return_impl_trait_id } - | CoroutineKind::Gen { span, closure_id, return_impl_trait_id } => { + | CoroutineKind::Gen { span, closure_id, return_impl_trait_id } + | CoroutineKind::AsyncGen { span, closure_id, return_impl_trait_id } => { vis.visit_span(span); vis.visit_id(closure_id); vis.visit_id(return_impl_trait_id); @@ -1171,9 +1172,9 @@ fn visit_const_item<T: MutVisitor>( } pub fn noop_visit_fn_header<T: MutVisitor>(header: &mut FnHeader, vis: &mut T) { - let FnHeader { unsafety, coro_kind, constness, ext: _ } = header; + let FnHeader { unsafety, coroutine_kind, constness, ext: _ } = header; visit_constness(constness, vis); - coro_kind.as_mut().map(|coro_kind| vis.visit_coro_kind(coro_kind)); + coroutine_kind.as_mut().map(|coroutine_kind| vis.visit_coroutine_kind(coroutine_kind)); visit_unsafety(unsafety, vis); } @@ -1407,7 +1408,7 @@ pub fn noop_visit_expr<T: MutVisitor>( binder, capture_clause, constness, - coro_kind, + coroutine_kind, movability: _, fn_decl, body, @@ -1416,7 +1417,7 @@ pub fn noop_visit_expr<T: MutVisitor>( }) => { vis.visit_closure_binder(binder); visit_constness(constness, vis); - coro_kind.as_mut().map(|coro_kind| vis.visit_coro_kind(coro_kind)); + coroutine_kind.as_mut().map(|coroutine_kind| vis.visit_coroutine_kind(coroutine_kind)); vis.visit_capture_by(capture_clause); vis.visit_fn_decl(fn_decl); vis.visit_expr(body); diff --git a/compiler/rustc_ast/src/visit.rs b/compiler/rustc_ast/src/visit.rs index a303d6584f4..ce5214efaca 100644 --- a/compiler/rustc_ast/src/visit.rs +++ b/compiler/rustc_ast/src/visit.rs @@ -861,7 +861,7 @@ pub fn walk_expr<'a, V: Visitor<'a>>(visitor: &mut V, expression: &'a Expr) { ExprKind::Closure(box Closure { binder, capture_clause, - coro_kind: _, + coroutine_kind: _, constness: _, movability: _, fn_decl, @@ -951,7 +951,7 @@ pub fn walk_param<'a, V: Visitor<'a>>(visitor: &mut V, param: &'a Param) { pub fn walk_arm<'a, V: Visitor<'a>>(visitor: &mut V, arm: &'a Arm) { visitor.visit_pat(&arm.pat); walk_list!(visitor, visit_expr, &arm.guard); - visitor.visit_expr(&arm.body); + walk_list!(visitor, visit_expr, &arm.body); walk_list!(visitor, visit_attribute, &arm.attrs); } diff --git a/compiler/rustc_ast_lowering/messages.ftl b/compiler/rustc_ast_lowering/messages.ftl index 91591a71611..6bde4f2d8fa 100644 --- a/compiler/rustc_ast_lowering/messages.ftl +++ b/compiler/rustc_ast_lowering/messages.ftl @@ -91,6 +91,10 @@ ast_lowering_invalid_register = ast_lowering_invalid_register_class = invalid register class `{$reg_class}`: {$error} +ast_lowering_match_arm_with_no_body = + `match` arm with no body + .suggestion = add a body after the pattern + ast_lowering_misplaced_assoc_ty_binding = associated type bounds are only allowed in where clauses and function signatures, not in {$position} @@ -104,6 +108,15 @@ ast_lowering_misplaced_impl_trait = ast_lowering_misplaced_relax_trait_bound = `?Trait` bounds are only permitted at the point where a type parameter is declared +ast_lowering_never_pattern_with_body = + a never pattern is always unreachable + .label = this will never be executed + .suggestion = remove this expression + +ast_lowering_never_pattern_with_guard = + a guard on a never pattern will never be run + .suggestion = remove this guard + ast_lowering_not_supported_for_lifetime_binder_async_closure = `for<...>` binders on `async` closures are not currently supported diff --git a/compiler/rustc_ast_lowering/src/errors.rs b/compiler/rustc_ast_lowering/src/errors.rs index 6e1a9eff500..11bb559719b 100644 --- a/compiler/rustc_ast_lowering/src/errors.rs +++ b/compiler/rustc_ast_lowering/src/errors.rs @@ -340,6 +340,32 @@ pub struct NotSupportedForLifetimeBinderAsyncClosure { pub span: Span, } +#[derive(Diagnostic)] +#[diag(ast_lowering_match_arm_with_no_body)] +pub struct MatchArmWithNoBody { + #[primary_span] + pub span: Span, + #[suggestion(code = " => todo!(),", applicability = "has-placeholders")] + pub suggestion: Span, +} + +#[derive(Diagnostic)] +#[diag(ast_lowering_never_pattern_with_body)] +pub struct NeverPatternWithBody { + #[primary_span] + #[label] + #[suggestion(code = "", applicability = "maybe-incorrect")] + pub span: Span, +} + +#[derive(Diagnostic)] +#[diag(ast_lowering_never_pattern_with_guard)] +pub struct NeverPatternWithGuard { + #[primary_span] + #[suggestion(code = "", applicability = "maybe-incorrect")] + pub span: Span, +} + #[derive(Diagnostic, Clone, Copy)] #[diag(ast_lowering_arbitrary_expression_in_pattern)] pub struct ArbitraryExpressionInPattern { diff --git a/compiler/rustc_ast_lowering/src/expr.rs b/compiler/rustc_ast_lowering/src/expr.rs index 3556ee02bd7..99691f43f91 100644 --- a/compiler/rustc_ast_lowering/src/expr.rs +++ b/compiler/rustc_ast_lowering/src/expr.rs @@ -1,8 +1,9 @@ use super::errors::{ AsyncCoroutinesNotSupported, AsyncNonMoveClosureNotSupported, AwaitOnlyInAsyncFnAndBlocks, BaseExpressionDoubleDot, ClosureCannotBeStatic, CoroutineTooManyParameters, - FunctionalRecordUpdateDestructuringAssignment, InclusiveRangeWithNoEnd, - NotSupportedForLifetimeBinderAsyncClosure, UnderscoreExprLhsAssign, + FunctionalRecordUpdateDestructuringAssignment, InclusiveRangeWithNoEnd, MatchArmWithNoBody, + NeverPatternWithBody, NeverPatternWithGuard, NotSupportedForLifetimeBinderAsyncClosure, + UnderscoreExprLhsAssign, }; use super::ResolverAstLoweringExt; use super::{ImplTraitContext, LoweringContext, ParamMode, ParenthesizedGenericArgs}; @@ -13,6 +14,7 @@ use rustc_ast::*; use rustc_data_structures::stack::ensure_sufficient_stack; use rustc_hir as hir; use rustc_hir::def::{DefKind, Res}; +use rustc_middle::span_bug; use rustc_session::errors::report_lit_error; use rustc_span::source_map::{respan, Spanned}; use rustc_span::symbol::{kw, sym, Ident, Symbol}; @@ -195,22 +197,19 @@ impl<'hir> LoweringContext<'_, 'hir> { binder, capture_clause, constness, - coro_kind, + coroutine_kind, movability, fn_decl, body, fn_decl_span, fn_arg_span, - }) => match coro_kind { - Some( - CoroutineKind::Async { closure_id, .. } - | CoroutineKind::Gen { closure_id, .. }, - ) => self.lower_expr_async_closure( + }) => match coroutine_kind { + Some(coroutine_kind) => self.lower_expr_coroutine_closure( binder, *capture_clause, e.id, hir_id, - *closure_id, + *coroutine_kind, fn_decl, body, *fn_decl_span, @@ -324,6 +323,15 @@ impl<'hir> LoweringContext<'_, 'hir> { hir::CoroutineSource::Block, |this| this.with_new_scopes(e.span, |this| this.lower_block_expr(block)), ), + ExprKind::Gen(capture_clause, block, GenBlockKind::AsyncGen) => self + .make_async_gen_expr( + *capture_clause, + e.id, + None, + e.span, + hir::CoroutineSource::Block, + |this| this.with_new_scopes(e.span, |this| this.lower_block_expr(block)), + ), ExprKind::Yield(opt_expr) => self.lower_expr_yield(e.span, opt_expr.as_deref()), ExprKind::Err => hir::ExprKind::Err( self.tcx.sess.span_delayed_bug(e.span, "lowered ExprKind::Err"), @@ -549,7 +557,7 @@ impl<'hir> LoweringContext<'_, 'hir> { fn lower_arm(&mut self, arm: &Arm) -> hir::Arm<'hir> { let pat = self.lower_pat(&arm.pat); - let guard = arm.guard.as_ref().map(|cond| { + let mut guard = arm.guard.as_ref().map(|cond| { if let ExprKind::Let(pat, scrutinee, span, is_recovered) = &cond.kind { hir::Guard::IfLet(self.arena.alloc(hir::Let { hir_id: self.next_id(), @@ -564,14 +572,43 @@ impl<'hir> LoweringContext<'_, 'hir> { } }); let hir_id = self.next_id(); + let span = self.lower_span(arm.span); self.lower_attrs(hir_id, &arm.attrs); - hir::Arm { - hir_id, - pat, - guard, - body: self.lower_expr(&arm.body), - span: self.lower_span(arm.span), - } + let is_never_pattern = pat.is_never_pattern(); + let body = if let Some(body) = &arm.body + && !is_never_pattern + { + self.lower_expr(body) + } else { + // Either `body.is_none()` or `is_never_pattern` here. + if !is_never_pattern { + let suggestion = span.shrink_to_hi(); + self.tcx.sess.emit_err(MatchArmWithNoBody { span, suggestion }); + } else if let Some(body) = &arm.body { + self.tcx.sess.emit_err(NeverPatternWithBody { span: body.span }); + guard = None; + } else if let Some(g) = &arm.guard { + self.tcx.sess.emit_err(NeverPatternWithGuard { span: g.span }); + guard = None; + } + + // We add a fake `loop {}` arm body so that it typecks to `!`. + // FIXME(never_patterns): Desugar into a call to `unreachable_unchecked`. + let block = self.arena.alloc(hir::Block { + stmts: &[], + expr: None, + hir_id: self.next_id(), + rules: hir::BlockCheckMode::DefaultBlock, + span, + targeted_by_break: false, + }); + self.arena.alloc(hir::Expr { + hir_id: self.next_id(), + kind: hir::ExprKind::Loop(block, None, hir::LoopSource::Loop, span), + span, + }) + }; + hir::Arm { hir_id, pat, guard, body, span } } /// Lower an `async` construct to a coroutine that implements `Future`. @@ -597,7 +634,7 @@ impl<'hir> LoweringContext<'_, 'hir> { // Resume argument type: `ResumeTy` let unstable_span = self.mark_span_with_reason( DesugaringKind::Async, - span, + self.lower_span(span), Some(self.allow_gen_future.clone()), ); let resume_ty = hir::QPath::LangItem(hir::LangItem::ResumeTy, unstable_span); @@ -706,6 +743,87 @@ impl<'hir> LoweringContext<'_, 'hir> { })) } + /// Lower a `async gen` construct to a generator that implements `AsyncIterator`. + /// + /// This results in: + /// + /// ```text + /// static move? |_task_context| -> () { + /// <body> + /// } + /// ``` + pub(super) fn make_async_gen_expr( + &mut self, + capture_clause: CaptureBy, + closure_node_id: NodeId, + _yield_ty: Option<hir::FnRetTy<'hir>>, + span: Span, + async_coroutine_source: hir::CoroutineSource, + body: impl FnOnce(&mut Self) -> hir::Expr<'hir>, + ) -> hir::ExprKind<'hir> { + let output = hir::FnRetTy::DefaultReturn(self.lower_span(span)); + + // Resume argument type: `ResumeTy` + let unstable_span = self.mark_span_with_reason( + DesugaringKind::Async, + self.lower_span(span), + Some(self.allow_gen_future.clone()), + ); + let resume_ty = hir::QPath::LangItem(hir::LangItem::ResumeTy, unstable_span); + let input_ty = hir::Ty { + hir_id: self.next_id(), + kind: hir::TyKind::Path(resume_ty), + span: unstable_span, + }; + + // The closure/coroutine `FnDecl` takes a single (resume) argument of type `input_ty`. + let fn_decl = self.arena.alloc(hir::FnDecl { + inputs: arena_vec![self; input_ty], + output, + c_variadic: false, + implicit_self: hir::ImplicitSelfKind::None, + lifetime_elision_allowed: false, + }); + + // Lower the argument pattern/ident. The ident is used again in the `.await` lowering. + let (pat, task_context_hid) = self.pat_ident_binding_mode( + span, + Ident::with_dummy_span(sym::_task_context), + hir::BindingAnnotation::MUT, + ); + let param = hir::Param { + hir_id: self.next_id(), + pat, + ty_span: self.lower_span(span), + span: self.lower_span(span), + }; + let params = arena_vec![self; param]; + + let body = self.lower_body(move |this| { + this.coroutine_kind = Some(hir::CoroutineKind::AsyncGen(async_coroutine_source)); + + let old_ctx = this.task_context; + this.task_context = Some(task_context_hid); + let res = body(this); + this.task_context = old_ctx; + (params, res) + }); + + // `static |_task_context| -> <ret_ty> { body }`: + hir::ExprKind::Closure(self.arena.alloc(hir::Closure { + def_id: self.local_def_id(closure_node_id), + binder: hir::ClosureBinder::Default, + capture_clause, + bound_generic_params: &[], + fn_decl, + body, + fn_decl_span: self.lower_span(span), + fn_arg_span: None, + movability: Some(hir::Movability::Static), + constness: hir::Constness::NotConst, + })) + } + /// Forwards a possible `#[track_caller]` annotation from `outer_hir_id` to /// `inner_hir_id` in case the `async_fn_track_caller` feature is enabled. pub(super) fn maybe_forward_track_caller( @@ -755,15 +873,18 @@ impl<'hir> LoweringContext<'_, 'hir> { /// ``` fn lower_expr_await(&mut self, await_kw_span: Span, expr: &Expr) -> hir::ExprKind<'hir> { let full_span = expr.span.to(await_kw_span); - match self.coroutine_kind { - Some(hir::CoroutineKind::Async(_)) => {} + + let is_async_gen = match self.coroutine_kind { + Some(hir::CoroutineKind::Async(_)) => false, + Some(hir::CoroutineKind::AsyncGen(_)) => true, Some(hir::CoroutineKind::Coroutine) | Some(hir::CoroutineKind::Gen(_)) | None => { return hir::ExprKind::Err(self.tcx.sess.emit_err(AwaitOnlyInAsyncFnAndBlocks { await_kw_span, item_span: self.current_item, })); } - } + }; + let span = self.mark_span_with_reason(DesugaringKind::Await, await_kw_span, None); let gen_future_span = self.mark_span_with_reason( DesugaringKind::Await, @@ -852,12 +973,19 @@ impl<'hir> LoweringContext<'_, 'hir> { self.stmt_expr(span, match_expr) }; - // task_context = yield (); + // Depending on `async` of `async gen`: + // async - task_context = yield (); + // async gen - task_context = yield ASYNC_GEN_PENDING; let yield_stmt = { - let unit = self.expr_unit(span); + let yielded = if is_async_gen { + self.arena.alloc(self.expr_lang_item_path(span, hir::LangItem::AsyncGenPending)) + } else { + self.expr_unit(span) + }; + let yield_expr = self.expr( span, - hir::ExprKind::Yield(unit, hir::YieldSource::Await { expr: Some(expr_hir_id) }), + hir::ExprKind::Yield(yielded, hir::YieldSource::Await { expr: Some(expr_hir_id) }), ); let yield_expr = self.arena.alloc(yield_expr); @@ -967,7 +1095,11 @@ impl<'hir> LoweringContext<'_, 'hir> { } Some(movability) } - Some(hir::CoroutineKind::Gen(_)) | Some(hir::CoroutineKind::Async(_)) => { + Some( + hir::CoroutineKind::Gen(_) + | hir::CoroutineKind::Async(_) + | hir::CoroutineKind::AsyncGen(_), + ) => { panic!("non-`async`/`gen` closure body turned `async`/`gen` during lowering"); } None => { @@ -994,18 +1126,22 @@ impl<'hir> LoweringContext<'_, 'hir> { (binder, params) } - fn lower_expr_async_closure( + fn lower_expr_coroutine_closure( &mut self, binder: &ClosureBinder, capture_clause: CaptureBy, closure_id: NodeId, closure_hir_id: hir::HirId, - inner_closure_id: NodeId, + coroutine_kind: CoroutineKind, decl: &FnDecl, body: &Expr, fn_decl_span: Span, fn_arg_span: Span, ) -> hir::ExprKind<'hir> { + let CoroutineKind::Async { closure_id: inner_closure_id, .. } = coroutine_kind else { + span_bug!(fn_decl_span, "`async gen` and `gen` closures are not supported, yet"); + }; + if let &ClosureBinder::For { span, .. } = binder { self.tcx.sess.emit_err(NotSupportedForLifetimeBinderAsyncClosure { span }); } @@ -1474,8 +1610,9 @@ impl<'hir> LoweringContext<'_, 'hir> { } fn lower_expr_yield(&mut self, span: Span, opt_expr: Option<&Expr>) -> hir::ExprKind<'hir> { - match self.coroutine_kind { - Some(hir::CoroutineKind::Gen(_)) => {} + let is_async_gen = match self.coroutine_kind { + Some(hir::CoroutineKind::Gen(_)) => false, + Some(hir::CoroutineKind::AsyncGen(_)) => true, Some(hir::CoroutineKind::Async(_)) => { return hir::ExprKind::Err( self.tcx.sess.emit_err(AsyncCoroutinesNotSupported { span }), @@ -1491,14 +1628,24 @@ impl<'hir> LoweringContext<'_, 'hir> { ) .emit(); } - self.coroutine_kind = Some(hir::CoroutineKind::Coroutine) + self.coroutine_kind = Some(hir::CoroutineKind::Coroutine); + false } - } + }; - let expr = + let mut yielded = opt_expr.as_ref().map(|x| self.lower_expr(x)).unwrap_or_else(|| self.expr_unit(span)); - hir::ExprKind::Yield(expr, hir::YieldSource::Yield) + if is_async_gen { + // yield async_gen_ready($expr); + yielded = self.expr_call_lang_item_fn( + span, + hir::LangItem::AsyncGenReady, + std::slice::from_ref(yielded), + ); + } + + hir::ExprKind::Yield(yielded, hir::YieldSource::Yield) } /// Desugar `ExprForLoop` from: `[opt_ident]: for <pat> in <head> <body>` into: diff --git a/compiler/rustc_ast_lowering/src/item.rs b/compiler/rustc_ast_lowering/src/item.rs index 80854c8a6c0..7c05724f64c 100644 --- a/compiler/rustc_ast_lowering/src/item.rs +++ b/compiler/rustc_ast_lowering/src/item.rs @@ -206,19 +206,25 @@ impl<'hir> LoweringContext<'_, 'hir> { // `impl Future<Output = T>` here because lower_body // only cares about the input argument patterns in the function // declaration (decl), not the return types. - let coro_kind = header.coro_kind; + let coroutine_kind = header.coroutine_kind; let body_id = this.lower_maybe_coroutine_body( span, hir_id, decl, - coro_kind, + coroutine_kind, body.as_deref(), ); let itctx = ImplTraitContext::Universal; let (generics, decl) = this.lower_generics(generics, header.constness, id, &itctx, |this| { - this.lower_fn_decl(decl, id, *fn_sig_span, FnDeclKind::Fn, coro_kind) + this.lower_fn_decl( + decl, + id, + *fn_sig_span, + FnDeclKind::Fn, + coroutine_kind, + ) }); let sig = hir::FnSig { decl, @@ -734,7 +740,7 @@ impl<'hir> LoweringContext<'_, 'hir> { sig, i.id, FnDeclKind::Trait, - sig.header.coro_kind, + sig.header.coroutine_kind, ); (generics, hir::TraitItemKind::Fn(sig, hir::TraitFn::Required(names)), false) } @@ -743,7 +749,7 @@ impl<'hir> LoweringContext<'_, 'hir> { i.span, hir_id, &sig.decl, - sig.header.coro_kind, + sig.header.coroutine_kind, Some(body), ); let (generics, sig) = self.lower_method_sig( @@ -751,7 +757,7 @@ impl<'hir> LoweringContext<'_, 'hir> { sig, i.id, FnDeclKind::Trait, - sig.header.coro_kind, + sig.header.coroutine_kind, ); (generics, hir::TraitItemKind::Fn(sig, hir::TraitFn::Provided(body_id)), true) } @@ -844,7 +850,7 @@ impl<'hir> LoweringContext<'_, 'hir> { i.span, hir_id, &sig.decl, - sig.header.coro_kind, + sig.header.coroutine_kind, body.as_deref(), ); let (generics, sig) = self.lower_method_sig( @@ -852,7 +858,7 @@ impl<'hir> LoweringContext<'_, 'hir> { sig, i.id, if self.is_in_trait_impl { FnDeclKind::Impl } else { FnDeclKind::Inherent }, - sig.header.coro_kind, + sig.header.coroutine_kind, ); (generics, hir::ImplItemKind::Fn(sig, body_id)) @@ -1023,17 +1029,13 @@ impl<'hir> LoweringContext<'_, 'hir> { span: Span, fn_id: hir::HirId, decl: &FnDecl, - coro_kind: Option<CoroutineKind>, + coroutine_kind: Option<CoroutineKind>, body: Option<&Block>, ) -> hir::BodyId { - let (Some(coro_kind), Some(body)) = (coro_kind, body) else { + let (Some(coroutine_kind), Some(body)) = (coroutine_kind, body) else { return self.lower_fn_body_block(span, decl, body); }; - let closure_id = match coro_kind { - CoroutineKind::Async { closure_id, .. } | CoroutineKind::Gen { closure_id, .. } => { - closure_id - } - }; + let closure_id = coroutine_kind.closure_id(); self.lower_body(|this| { let mut parameters: Vec<hir::Param<'_>> = Vec::new(); @@ -1200,7 +1202,8 @@ impl<'hir> LoweringContext<'_, 'hir> { this.expr_block(body) }; - let coroutine_expr = match coro_kind { + // FIXME(gen_blocks): Consider unifying the `make_*_expr` functions. + let coroutine_expr = match coroutine_kind { CoroutineKind::Async { .. } => this.make_async_expr( CaptureBy::Value { move_kw: rustc_span::DUMMY_SP }, closure_id, @@ -1217,6 +1220,14 @@ impl<'hir> LoweringContext<'_, 'hir> { hir::CoroutineSource::Fn, mkbody, ), + CoroutineKind::AsyncGen { .. } => this.make_async_gen_expr( + CaptureBy::Value { move_kw: rustc_span::DUMMY_SP }, + closure_id, + None, + body.span, + hir::CoroutineSource::Fn, + mkbody, + ), }; let hir_id = this.lower_node_id(closure_id); @@ -1233,19 +1244,19 @@ impl<'hir> LoweringContext<'_, 'hir> { sig: &FnSig, id: NodeId, kind: FnDeclKind, - coro_kind: Option<CoroutineKind>, + coroutine_kind: Option<CoroutineKind>, ) -> (&'hir hir::Generics<'hir>, hir::FnSig<'hir>) { let header = self.lower_fn_header(sig.header); let itctx = ImplTraitContext::Universal; let (generics, decl) = self.lower_generics(generics, sig.header.constness, id, &itctx, |this| { - this.lower_fn_decl(&sig.decl, id, sig.span, kind, coro_kind) + this.lower_fn_decl(&sig.decl, id, sig.span, kind, coroutine_kind) }); (generics, hir::FnSig { header, decl, span: self.lower_span(sig.span) }) } fn lower_fn_header(&mut self, h: FnHeader) -> hir::FnHeader { - let asyncness = if let Some(CoroutineKind::Async { span, .. }) = h.coro_kind { + let asyncness = if let Some(CoroutineKind::Async { span, .. }) = h.coroutine_kind { hir::IsAsync::Async(span) } else { hir::IsAsync::NotAsync @@ -1371,6 +1382,7 @@ impl<'hir> LoweringContext<'_, 'hir> { let host_param_parts = if let Const::Yes(span) = constness && self.tcx.features().effects { + let span = self.lower_span(span); let param_node_id = self.next_node_id(); let hir_id = self.next_id(); let def_id = self.create_def( diff --git a/compiler/rustc_ast_lowering/src/lib.rs b/compiler/rustc_ast_lowering/src/lib.rs index 5dda8f5a6a3..b4c211eec69 100644 --- a/compiler/rustc_ast_lowering/src/lib.rs +++ b/compiler/rustc_ast_lowering/src/lib.rs @@ -132,6 +132,7 @@ struct LoweringContext<'a, 'hir> { allow_try_trait: Lrc<[Symbol]>, allow_gen_future: Lrc<[Symbol]>, + allow_async_iterator: Lrc<[Symbol]>, /// Mapping from generics `def_id`s to TAIT generics `def_id`s. /// For each captured lifetime (e.g., 'a), we create a new lifetime parameter that is a generic @@ -176,6 +177,9 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { } else { [sym::gen_future].into() }, + // FIXME(gen_blocks): how does `closure_track_caller`/`async_fn_track_caller` + // interact with `gen`/`async gen` blocks + allow_async_iterator: [sym::gen_future, sym::async_iterator].into(), generics_def_id_map: Default::default(), host_param_id: None, } @@ -1675,7 +1679,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { duplicated_lifetime_node_id, lifetime.ident.name, DefKind::LifetimeParam, - lifetime.ident.span, + self.lower_span(lifetime.ident.span), ); captured_to_synthesized_mapping.insert(old_def_id, duplicated_lifetime_def_id); // FIXME: Instead of doing this, we could move this whole loop @@ -1684,7 +1688,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { synthesized_lifetime_definitions.push(( duplicated_lifetime_node_id, duplicated_lifetime_def_id, - lifetime.ident, + self.lower_ident(lifetime.ident), )); // Now make an arg that we can use for the generic params of the opaque tykind. @@ -1900,13 +1904,18 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { fn_span: Span, ) -> hir::FnRetTy<'hir> { let span = self.lower_span(fn_span); - let opaque_ty_span = self.mark_span_with_reason(DesugaringKind::Async, span, None); - let opaque_ty_node_id = match coro { - CoroutineKind::Async { return_impl_trait_id, .. } - | CoroutineKind::Gen { return_impl_trait_id, .. } => return_impl_trait_id, + let (opaque_ty_node_id, allowed_features) = match coro { + CoroutineKind::Async { return_impl_trait_id, .. } => (return_impl_trait_id, None), + CoroutineKind::Gen { return_impl_trait_id, .. } => (return_impl_trait_id, None), + CoroutineKind::AsyncGen { return_impl_trait_id, .. } => { + (return_impl_trait_id, Some(self.allow_async_iterator.clone())) + } }; + let opaque_ty_span = + self.mark_span_with_reason(DesugaringKind::Async, span, allowed_features); + let captured_lifetimes: Vec<_> = self .resolver .take_extra_lifetime_params(opaque_ty_node_id) @@ -1925,7 +1934,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { let bound = this.lower_coroutine_fn_output_type_to_bound( output, coro, - span, + opaque_ty_span, ImplTraitContext::ReturnPositionOpaqueTy { origin: hir::OpaqueTyOrigin::FnReturn(fn_def_id), fn_kind, @@ -1944,7 +1953,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { &mut self, output: &FnRetTy, coro: CoroutineKind, - span: Span, + opaque_ty_span: Span, nested_impl_trait_context: ImplTraitContext, ) -> hir::GenericBound<'hir> { // Compute the `T` in `Future<Output = T>` from the return type. @@ -1960,20 +1969,21 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { // "<$assoc_ty_name = T>" let (assoc_ty_name, trait_lang_item) = match coro { - CoroutineKind::Async { .. } => (hir::FN_OUTPUT_NAME, hir::LangItem::Future), - CoroutineKind::Gen { .. } => (hir::ITERATOR_ITEM_NAME, hir::LangItem::Iterator), + CoroutineKind::Async { .. } => (sym::Output, hir::LangItem::Future), + CoroutineKind::Gen { .. } => (sym::Item, hir::LangItem::Iterator), + CoroutineKind::AsyncGen { .. } => (sym::Item, hir::LangItem::AsyncIterator), }; let future_args = self.arena.alloc(hir::GenericArgs { args: &[], - bindings: arena_vec![self; self.assoc_ty_binding(assoc_ty_name, span, output_ty)], + bindings: arena_vec![self; self.assoc_ty_binding(assoc_ty_name, opaque_ty_span, output_ty)], parenthesized: hir::GenericArgsParentheses::No, span_ext: DUMMY_SP, }); hir::GenericBound::LangItemTrait( trait_lang_item, - self.lower_span(span), + opaque_ty_span, self.next_id(), future_args, ) @@ -2243,7 +2253,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { match c.value.kind { ExprKind::Underscore => { if self.tcx.features().generic_arg_infer { - hir::ArrayLen::Infer(self.lower_node_id(c.id), c.value.span) + hir::ArrayLen::Infer(self.lower_node_id(c.id), self.lower_span(c.value.span)) } else { feature_err( &self.tcx.sess.parse_sess, diff --git a/compiler/rustc_ast_lowering/src/path.rs b/compiler/rustc_ast_lowering/src/path.rs index 7ab0805d086..efd80af5ef4 100644 --- a/compiler/rustc_ast_lowering/src/path.rs +++ b/compiler/rustc_ast_lowering/src/path.rs @@ -389,7 +389,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { FnRetTy::Default(_) => self.arena.alloc(self.ty_tup(*span, &[])), }; let args = smallvec![GenericArg::Type(self.arena.alloc(self.ty_tup(*inputs_span, inputs)))]; - let binding = self.assoc_ty_binding(hir::FN_OUTPUT_NAME, output_ty.span, output_ty); + let binding = self.assoc_ty_binding(sym::Output, output_ty.span, output_ty); ( GenericArgsCtor { args, diff --git a/compiler/rustc_ast_passes/src/ast_validation.rs b/compiler/rustc_ast_passes/src/ast_validation.rs index 554ed36b814..1f9bc09f5f7 100644 --- a/compiler/rustc_ast_passes/src/ast_validation.rs +++ b/compiler/rustc_ast_passes/src/ast_validation.rs @@ -1271,14 +1271,15 @@ impl<'a> Visitor<'a> for AstValidator<'a> { // Functions cannot both be `const async` or `const gen` if let Some(&FnHeader { constness: Const::Yes(cspan), - coro_kind: - Some( - CoroutineKind::Async { span: aspan, .. } - | CoroutineKind::Gen { span: aspan, .. }, - ), + coroutine_kind: Some(coroutine_kind), .. }) = fk.header() { + let aspan = match coroutine_kind { + CoroutineKind::Async { span: aspan, .. } + | CoroutineKind::Gen { span: aspan, .. } + | CoroutineKind::AsyncGen { span: aspan, .. } => aspan, + }; // FIXME(gen_blocks): Report a different error for `const gen` self.err_handler().emit_err(errors::ConstAndAsync { spans: vec![cspan, aspan], diff --git a/compiler/rustc_ast_pretty/src/pprust/state.rs b/compiler/rustc_ast_pretty/src/pprust/state.rs index 1ad28ffbf2b..ff36e6c2845 100644 --- a/compiler/rustc_ast_pretty/src/pprust/state.rs +++ b/compiler/rustc_ast_pretty/src/pprust/state.rs @@ -1490,14 +1490,18 @@ impl<'a> State<'a> { } } - fn print_coro_kind(&mut self, coro_kind: ast::CoroutineKind) { - match coro_kind { + fn print_coroutine_kind(&mut self, coroutine_kind: ast::CoroutineKind) { + match coroutine_kind { ast::CoroutineKind::Gen { .. } => { self.word_nbsp("gen"); } ast::CoroutineKind::Async { .. } => { self.word_nbsp("async"); } + ast::CoroutineKind::AsyncGen { .. } => { + self.word_nbsp("async"); + self.word_nbsp("gen"); + } } } @@ -1690,7 +1694,7 @@ impl<'a> State<'a> { fn print_fn_header_info(&mut self, header: ast::FnHeader) { self.print_constness(header.constness); - header.coro_kind.map(|coro_kind| self.print_coro_kind(coro_kind)); + header.coroutine_kind.map(|coroutine_kind| self.print_coroutine_kind(coroutine_kind)); self.print_unsafety(header.unsafety); match header.ext { diff --git a/compiler/rustc_ast_pretty/src/pprust/state/expr.rs b/compiler/rustc_ast_pretty/src/pprust/state/expr.rs index 0082d6e350e..5397278bbb1 100644 --- a/compiler/rustc_ast_pretty/src/pprust/state/expr.rs +++ b/compiler/rustc_ast_pretty/src/pprust/state/expr.rs @@ -413,7 +413,7 @@ impl<'a> State<'a> { binder, capture_clause, constness, - coro_kind, + coroutine_kind, movability, fn_decl, body, @@ -423,7 +423,7 @@ impl<'a> State<'a> { self.print_closure_binder(binder); self.print_constness(*constness); self.print_movability(*movability); - coro_kind.map(|coro_kind| self.print_coro_kind(coro_kind)); + coroutine_kind.map(|coroutine_kind| self.print_coroutine_kind(coroutine_kind)); self.print_capture_clause(*capture_clause); self.print_fn_params_and_ret(fn_decl, true); @@ -631,28 +631,33 @@ impl<'a> State<'a> { self.print_expr(e); self.space(); } - self.word_space("=>"); - match &arm.body.kind { - ast::ExprKind::Block(blk, opt_label) => { - if let Some(label) = opt_label { - self.print_ident(label.ident); - self.word_space(":"); - } + if let Some(body) = &arm.body { + self.word_space("=>"); + + match &body.kind { + ast::ExprKind::Block(blk, opt_label) => { + if let Some(label) = opt_label { + self.print_ident(label.ident); + self.word_space(":"); + } - // The block will close the pattern's ibox. - self.print_block_unclosed_indent(blk); + // The block will close the pattern's ibox. + self.print_block_unclosed_indent(blk); - // If it is a user-provided unsafe block, print a comma after it. - if let BlockCheckMode::Unsafe(ast::UserProvided) = blk.rules { + // If it is a user-provided unsafe block, print a comma after it. + if let BlockCheckMode::Unsafe(ast::UserProvided) = blk.rules { + self.word(","); + } + } + _ => { + self.end(); // Close the ibox for the pattern. + self.print_expr(body); self.word(","); } } - _ => { - self.end(); // Close the ibox for the pattern. - self.print_expr(&arm.body); - self.word(","); - } + } else { + self.word(","); } self.end(); // Close enclosing cbox. } diff --git a/compiler/rustc_borrowck/src/dataflow.rs b/compiler/rustc_borrowck/src/dataflow.rs index 54f161ea9b4..1b544b53012 100644 --- a/compiler/rustc_borrowck/src/dataflow.rs +++ b/compiler/rustc_borrowck/src/dataflow.rs @@ -11,101 +11,88 @@ use rustc_middle::ty::TyCtxt; use rustc_mir_dataflow::impls::{EverInitializedPlaces, MaybeUninitializedPlaces}; use rustc_mir_dataflow::ResultsVisitable; use rustc_mir_dataflow::{self, fmt::DebugWithContext, GenKill}; -use rustc_mir_dataflow::{Analysis, Direction, Results}; +use rustc_mir_dataflow::{Analysis, AnalysisDomain, Results}; use std::fmt; use crate::{places_conflict, BorrowSet, PlaceConflictBias, PlaceExt, RegionInferenceContext}; -/// A tuple with named fields that can hold either the results or the transient state of the -/// dataflow analyses used by the borrow checker. -#[derive(Debug)] -pub struct BorrowckAnalyses<B, U, E> { - pub borrows: B, - pub uninits: U, - pub ever_inits: E, -} - /// The results of the dataflow analyses used by the borrow checker. -pub type BorrowckResults<'mir, 'tcx> = BorrowckAnalyses< - Results<'tcx, Borrows<'mir, 'tcx>>, - Results<'tcx, MaybeUninitializedPlaces<'mir, 'tcx>>, - Results<'tcx, EverInitializedPlaces<'mir, 'tcx>>, ->; +pub struct BorrowckResults<'mir, 'tcx> { + pub(crate) borrows: Results<'tcx, Borrows<'mir, 'tcx>>, + pub(crate) uninits: Results<'tcx, MaybeUninitializedPlaces<'mir, 'tcx>>, + pub(crate) ever_inits: Results<'tcx, EverInitializedPlaces<'mir, 'tcx>>, +} /// The transient state of the dataflow analyses used by the borrow checker. -pub type BorrowckFlowState<'mir, 'tcx> = - <BorrowckResults<'mir, 'tcx> as ResultsVisitable<'tcx>>::FlowState; - -macro_rules! impl_visitable { - ( $( - $T:ident { $( $field:ident : $A:ident ),* $(,)? } - )* ) => { $( - impl<'tcx, $($A),*, D: Direction> ResultsVisitable<'tcx> for $T<$( Results<'tcx, $A> ),*> - where - $( $A: Analysis<'tcx, Direction = D>, )* - { - type Direction = D; - type FlowState = $T<$( $A::Domain ),*>; +#[derive(Debug)] +pub struct BorrowckFlowState<'mir, 'tcx> { + pub(crate) borrows: <Borrows<'mir, 'tcx> as AnalysisDomain<'tcx>>::Domain, + pub(crate) uninits: <MaybeUninitializedPlaces<'mir, 'tcx> as AnalysisDomain<'tcx>>::Domain, + pub(crate) ever_inits: <EverInitializedPlaces<'mir, 'tcx> as AnalysisDomain<'tcx>>::Domain, +} - fn new_flow_state(&self, body: &mir::Body<'tcx>) -> Self::FlowState { - $T { - $( $field: self.$field.analysis.bottom_value(body) ),* - } - } +impl<'mir, 'tcx> ResultsVisitable<'tcx> for BorrowckResults<'mir, 'tcx> { + // All three analyses are forward, but we have to use just one here. + type Direction = <Borrows<'mir, 'tcx> as AnalysisDomain<'tcx>>::Direction; + type FlowState = BorrowckFlowState<'mir, 'tcx>; - fn reset_to_block_entry( - &self, - state: &mut Self::FlowState, - block: BasicBlock, - ) { - $( state.$field.clone_from(&self.$field.entry_set_for_block(block)); )* - } + fn new_flow_state(&self, body: &mir::Body<'tcx>) -> Self::FlowState { + BorrowckFlowState { + borrows: self.borrows.analysis.bottom_value(body), + uninits: self.uninits.analysis.bottom_value(body), + ever_inits: self.ever_inits.analysis.bottom_value(body), + } + } - fn reconstruct_before_statement_effect( - &mut self, - state: &mut Self::FlowState, - stmt: &mir::Statement<'tcx>, - loc: Location, - ) { - $( self.$field.analysis - .apply_before_statement_effect(&mut state.$field, stmt, loc); )* - } + fn reset_to_block_entry(&self, state: &mut Self::FlowState, block: BasicBlock) { + state.borrows.clone_from(&self.borrows.entry_set_for_block(block)); + state.uninits.clone_from(&self.uninits.entry_set_for_block(block)); + state.ever_inits.clone_from(&self.ever_inits.entry_set_for_block(block)); + } - fn reconstruct_statement_effect( - &mut self, - state: &mut Self::FlowState, - stmt: &mir::Statement<'tcx>, - loc: Location, - ) { - $( self.$field.analysis - .apply_statement_effect(&mut state.$field, stmt, loc); )* - } + fn reconstruct_before_statement_effect( + &mut self, + state: &mut Self::FlowState, + stmt: &mir::Statement<'tcx>, + loc: Location, + ) { + self.borrows.analysis.apply_before_statement_effect(&mut state.borrows, stmt, loc); + self.uninits.analysis.apply_before_statement_effect(&mut state.uninits, stmt, loc); + self.ever_inits.analysis.apply_before_statement_effect(&mut state.ever_inits, stmt, loc); + } - fn reconstruct_before_terminator_effect( - &mut self, - state: &mut Self::FlowState, - term: &mir::Terminator<'tcx>, - loc: Location, - ) { - $( self.$field.analysis - .apply_before_terminator_effect(&mut state.$field, term, loc); )* - } + fn reconstruct_statement_effect( + &mut self, + state: &mut Self::FlowState, + stmt: &mir::Statement<'tcx>, + loc: Location, + ) { + self.borrows.analysis.apply_statement_effect(&mut state.borrows, stmt, loc); + self.uninits.analysis.apply_statement_effect(&mut state.uninits, stmt, loc); + self.ever_inits.analysis.apply_statement_effect(&mut state.ever_inits, stmt, loc); + } - fn reconstruct_terminator_effect( - &mut self, - state: &mut Self::FlowState, - term: &mir::Terminator<'tcx>, - loc: Location, - ) { - $( self.$field.analysis - .apply_terminator_effect(&mut state.$field, term, loc); )* - } - } - )* } -} + fn reconstruct_before_terminator_effect( + &mut self, + state: &mut Self::FlowState, + term: &mir::Terminator<'tcx>, + loc: Location, + ) { + self.borrows.analysis.apply_before_terminator_effect(&mut state.borrows, term, loc); + self.uninits.analysis.apply_before_terminator_effect(&mut state.uninits, term, loc); + self.ever_inits.analysis.apply_before_terminator_effect(&mut state.ever_inits, term, loc); + } -impl_visitable! { - BorrowckAnalyses { borrows: B, uninits: U, ever_inits: E } + fn reconstruct_terminator_effect( + &mut self, + state: &mut Self::FlowState, + term: &mir::Terminator<'tcx>, + loc: Location, + ) { + self.borrows.analysis.apply_terminator_effect(&mut state.borrows, term, loc); + self.uninits.analysis.apply_terminator_effect(&mut state.uninits, term, loc); + self.ever_inits.analysis.apply_terminator_effect(&mut state.ever_inits, term, loc); + } } rustc_index::newtype_index! { @@ -598,7 +585,7 @@ impl<'tcx> rustc_mir_dataflow::GenKillAnalysis<'tcx> for Borrows<'_, 'tcx> { fn before_terminator_effect( &mut self, - trans: &mut impl GenKill<Self::Idx>, + trans: &mut Self::Domain, _terminator: &mir::Terminator<'tcx>, location: Location, ) { @@ -625,7 +612,7 @@ impl<'tcx> rustc_mir_dataflow::GenKillAnalysis<'tcx> for Borrows<'_, 'tcx> { fn call_return_effect( &mut self, - _trans: &mut impl GenKill<Self::Idx>, + _trans: &mut Self::Domain, _block: mir::BasicBlock, _return_places: CallReturnPlaces<'_, 'tcx>, ) { diff --git a/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs b/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs index 7bcad92ff33..7e62bb9793d 100644 --- a/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs +++ b/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs @@ -2517,12 +2517,23 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> { CoroutineKind::Gen(kind) => match kind { CoroutineSource::Block => "gen block", CoroutineSource::Closure => "gen closure", - _ => bug!("gen block/closure expected, but gen function found."), + CoroutineSource::Fn => { + bug!("gen block/closure expected, but gen function found.") + } + }, + CoroutineKind::AsyncGen(kind) => match kind { + CoroutineSource::Block => "async gen block", + CoroutineSource::Closure => "async gen closure", + CoroutineSource::Fn => { + bug!("gen block/closure expected, but gen function found.") + } }, CoroutineKind::Async(async_kind) => match async_kind { CoroutineSource::Block => "async block", CoroutineSource::Closure => "async closure", - _ => bug!("async block/closure expected, but async function found."), + CoroutineSource::Fn => { + bug!("async block/closure expected, but async function found.") + } }, CoroutineKind::Coroutine => "coroutine", }, diff --git a/compiler/rustc_borrowck/src/diagnostics/region_name.rs b/compiler/rustc_borrowck/src/diagnostics/region_name.rs index 977a5d5d50d..a17c3bc3a78 100644 --- a/compiler/rustc_borrowck/src/diagnostics/region_name.rs +++ b/compiler/rustc_borrowck/src/diagnostics/region_name.rs @@ -684,7 +684,7 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> { hir::FnRetTy::Return(hir_ty) => (fn_decl.output.span(), Some(hir_ty)), }; let mir_description = match hir.body(body).coroutine_kind { - Some(hir::CoroutineKind::Async(gen)) => match gen { + Some(hir::CoroutineKind::Async(src)) => match src { hir::CoroutineSource::Block => " of async block", hir::CoroutineSource::Closure => " of async closure", hir::CoroutineSource::Fn => { @@ -701,7 +701,7 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> { " of async function" } }, - Some(hir::CoroutineKind::Gen(gen)) => match gen { + Some(hir::CoroutineKind::Gen(src)) => match src { hir::CoroutineSource::Block => " of gen block", hir::CoroutineSource::Closure => " of gen closure", hir::CoroutineSource::Fn => { @@ -715,6 +715,21 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> { " of gen function" } }, + + Some(hir::CoroutineKind::AsyncGen(src)) => match src { + hir::CoroutineSource::Block => " of async gen block", + hir::CoroutineSource::Closure => " of async gen closure", + hir::CoroutineSource::Fn => { + let parent_item = + hir.get_by_def_id(hir.get_parent_item(mir_hir_id).def_id); + let output = &parent_item + .fn_decl() + .expect("coroutine lowered from async gen fn should be in fn") + .output; + span = output.span(); + " of async gen function" + } + }, Some(hir::CoroutineKind::Coroutine) => " of coroutine", None => " of closure", }; diff --git a/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs index f3164bd2c2a..7f5589210d4 100644 --- a/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs +++ b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs @@ -136,7 +136,7 @@ fn cs_partial_cmp( && let Some(last) = arms.last_mut() && let PatKind::Wild = last.pat.kind { - last.body = expr2; + last.body = Some(expr2); expr1 } else { let eq_arm = cx.arm( diff --git a/compiler/rustc_builtin_macros/src/test.rs b/compiler/rustc_builtin_macros/src/test.rs index 81433155ecf..e5b274304e7 100644 --- a/compiler/rustc_builtin_macros/src/test.rs +++ b/compiler/rustc_builtin_macros/src/test.rs @@ -541,12 +541,30 @@ fn check_test_signature( return Err(sd.emit_err(errors::TestBadFn { span: i.span, cause: span, kind: "unsafe" })); } - if let Some(ast::CoroutineKind::Async { span, .. }) = f.sig.header.coro_kind { - return Err(sd.emit_err(errors::TestBadFn { span: i.span, cause: span, kind: "async" })); - } - - if let Some(ast::CoroutineKind::Gen { span, .. }) = f.sig.header.coro_kind { - return Err(sd.emit_err(errors::TestBadFn { span: i.span, cause: span, kind: "gen" })); + if let Some(coroutine_kind) = f.sig.header.coroutine_kind { + match coroutine_kind { + ast::CoroutineKind::Async { span, .. } => { + return Err(sd.emit_err(errors::TestBadFn { + span: i.span, + cause: span, + kind: "async", + })); + } + ast::CoroutineKind::Gen { span, .. } => { + return Err(sd.emit_err(errors::TestBadFn { + span: i.span, + cause: span, + kind: "gen", + })); + } + ast::CoroutineKind::AsyncGen { span, .. } => { + return Err(sd.emit_err(errors::TestBadFn { + span: i.span, + cause: span, + kind: "async gen", + })); + } + } } // If the termination trait is active, the compiler will check that the output diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs index 0bd211fd614..5997e6026b4 100644 --- a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs +++ b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs @@ -1,5 +1,6 @@ //! Codegen `extern "platform-intrinsic"` intrinsics. +use cranelift_codegen::ir::immediates::Offset32; use rustc_middle::ty::GenericArgsRef; use rustc_span::Symbol; use rustc_target::abi::Endian; @@ -1008,8 +1009,57 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( } } + sym::simd_masked_load => { + intrinsic_args!(fx, args => (mask, ptr, val); intrinsic); + + let (val_lane_count, val_lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx); + let (mask_lane_count, _mask_lane_ty) = mask.layout().ty.simd_size_and_type(fx.tcx); + let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx); + assert_eq!(val_lane_count, mask_lane_count); + assert_eq!(val_lane_count, ret_lane_count); + + let lane_clif_ty = fx.clif_type(val_lane_ty).unwrap(); + let ret_lane_layout = fx.layout_of(ret_lane_ty); + let ptr_val = ptr.load_scalar(fx); + + for lane_idx in 0..ret_lane_count { + let val_lane = val.value_lane(fx, lane_idx).load_scalar(fx); + let mask_lane = mask.value_lane(fx, lane_idx).load_scalar(fx); + + let if_enabled = fx.bcx.create_block(); + let if_disabled = fx.bcx.create_block(); + let next = fx.bcx.create_block(); + let res_lane = fx.bcx.append_block_param(next, lane_clif_ty); + + fx.bcx.ins().brif(mask_lane, if_enabled, &[], if_disabled, &[]); + fx.bcx.seal_block(if_enabled); + fx.bcx.seal_block(if_disabled); + + fx.bcx.switch_to_block(if_enabled); + let offset = lane_idx as i32 * lane_clif_ty.bytes() as i32; + let res = fx.bcx.ins().load( + lane_clif_ty, + MemFlags::trusted(), + ptr_val, + Offset32::new(offset), + ); + fx.bcx.ins().jump(next, &[res]); + + fx.bcx.switch_to_block(if_disabled); + fx.bcx.ins().jump(next, &[val_lane]); + + fx.bcx.seal_block(next); + fx.bcx.switch_to_block(next); + + fx.bcx.ins().nop(); + + ret.place_lane(fx, lane_idx) + .write_cvalue(fx, CValue::by_val(res_lane, ret_lane_layout)); + } + } + sym::simd_scatter => { - intrinsic_args!(fx, args => (val, ptr, mask); intrinsic); + intrinsic_args!(fx, args => (mask, ptr, val); intrinsic); let (val_lane_count, _val_lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx); let (ptr_lane_count, _ptr_lane_ty) = ptr.layout().ty.simd_size_and_type(fx.tcx); diff --git a/compiler/rustc_codegen_gcc/src/context.rs b/compiler/rustc_codegen_gcc/src/context.rs index a043660ea63..893cad05161 100644 --- a/compiler/rustc_codegen_gcc/src/context.rs +++ b/compiler/rustc_codegen_gcc/src/context.rs @@ -569,5 +569,6 @@ fn to_gcc_tls_mode(tls_model: TlsModel) -> gccjit::TlsModel { TlsModel::LocalDynamic => gccjit::TlsModel::LocalDynamic, TlsModel::InitialExec => gccjit::TlsModel::InitialExec, TlsModel::LocalExec => gccjit::TlsModel::LocalExec, + TlsModel::Emulated => gccjit::TlsModel::GlobalDynamic, } } diff --git a/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs b/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs index 36484c3c3fc..28a88dd2efe 100644 --- a/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs +++ b/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs @@ -39,7 +39,7 @@ impl OwnedTargetMachine { split_dwarf_file: &CStr, output_obj_file: &CStr, debug_info_compression: &CStr, - force_emulated_tls: bool, + use_emulated_tls: bool, args_cstr_buff: &[u8], ) -> Result<Self, LlvmError<'static>> { assert!(args_cstr_buff.len() > 0); @@ -71,7 +71,7 @@ impl OwnedTargetMachine { split_dwarf_file.as_ptr(), output_obj_file.as_ptr(), debug_info_compression.as_ptr(), - force_emulated_tls, + use_emulated_tls, args_cstr_buff.as_ptr() as *const c_char, args_cstr_buff.len(), ) diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index 1a567c0fce8..bdabb9129a7 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -33,7 +33,7 @@ use rustc_session::config::{self, Lto, OutputType, Passes, SplitDwarfKind, Switc use rustc_session::Session; use rustc_span::symbol::sym; use rustc_span::InnerSpan; -use rustc_target::spec::{CodeModel, RelocModel, SanitizerSet, SplitDebuginfo}; +use rustc_target::spec::{CodeModel, RelocModel, SanitizerSet, SplitDebuginfo, TlsModel}; use crate::llvm::diagnostic::OptimizationDiagnosticKind; use libc::{c_char, c_int, c_uint, c_void, size_t}; @@ -223,7 +223,7 @@ pub fn target_machine_factory( let path_mapping = sess.source_map().path_mapping().clone(); - let force_emulated_tls = sess.target.force_emulated_tls; + let use_emulated_tls = matches!(sess.tls_model(), TlsModel::Emulated); // copy the exe path, followed by path all into one buffer // null terminating them so we can use them as null terminated strings @@ -297,7 +297,7 @@ pub fn target_machine_factory( &split_dwarf_file, &output_obj_file, &debuginfo_compression, - force_emulated_tls, + use_emulated_tls, &args_cstr_buff, ) }) diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 92a8c00510b..ed0d5e68cbf 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -120,6 +120,7 @@ fn to_llvm_tls_model(tls_model: TlsModel) -> llvm::ThreadLocalMode { TlsModel::LocalDynamic => llvm::ThreadLocalMode::LocalDynamic, TlsModel::InitialExec => llvm::ThreadLocalMode::InitialExec, TlsModel::LocalExec => llvm::ThreadLocalMode::LocalExec, + TlsModel::Emulated => llvm::ThreadLocalMode::GeneralDynamic, } } diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs index 7d69756181a..8386f067baf 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs @@ -100,6 +100,9 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { let Coverage { kind } = coverage; match *kind { + // Span markers are only meaningful during MIR instrumentation, + // and have no effect during codegen. + CoverageKind::SpanMarker => {} CoverageKind::CounterIncrement { id } => { func_coverage.mark_counter_id_seen(id); // We need to explicitly drop the `RefMut` before calling into `instrprof_increment`, diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index cc7e78b9c62..f16014e1361 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -1492,6 +1492,198 @@ fn generic_simd_intrinsic<'ll, 'tcx>( return Ok(v); } + if name == sym::simd_masked_load { + // simd_masked_load(mask: <N x i{M}>, pointer: *_ T, values: <N x T>) -> <N x T> + // * N: number of elements in the input vectors + // * T: type of the element to load + // * M: any integer width is supported, will be truncated to i1 + // Loads contiguous elements from memory behind `pointer`, but only for + // those lanes whose `mask` bit is enabled. + // The memory addresses corresponding to the “off” lanes are not accessed. + + // The element type of the "mask" argument must be a signed integer type of any width + let mask_ty = in_ty; + let (mask_len, mask_elem) = (in_len, in_elem); + + // The second argument must be a pointer matching the element type + let pointer_ty = arg_tys[1]; + + // The last argument is a passthrough vector providing values for disabled lanes + let values_ty = arg_tys[2]; + let (values_len, values_elem) = require_simd!(values_ty, SimdThird); + + require_simd!(ret_ty, SimdReturn); + + // Of the same length: + require!( + values_len == mask_len, + InvalidMonomorphization::ThirdArgumentLength { + span, + name, + in_len: mask_len, + in_ty: mask_ty, + arg_ty: values_ty, + out_len: values_len + } + ); + + // The return type must match the last argument type + require!( + ret_ty == values_ty, + InvalidMonomorphization::ExpectedReturnType { span, name, in_ty: values_ty, ret_ty } + ); + + require!( + matches!( + pointer_ty.kind(), + ty::RawPtr(p) if p.ty == values_elem && p.ty.kind() == values_elem.kind() + ), + InvalidMonomorphization::ExpectedElementType { + span, + name, + expected_element: values_elem, + second_arg: pointer_ty, + in_elem: values_elem, + in_ty: values_ty, + mutability: ExpectedPointerMutability::Not, + } + ); + + require!( + matches!(mask_elem.kind(), ty::Int(_)), + InvalidMonomorphization::ThirdArgElementType { + span, + name, + expected_element: values_elem, + third_arg: mask_ty, + } + ); + + // Alignment of T, must be a constant integer value: + let alignment_ty = bx.type_i32(); + let alignment = bx.const_i32(bx.align_of(values_ty).bytes() as i32); + + // Truncate the mask vector to a vector of i1s: + let (mask, mask_ty) = { + let i1 = bx.type_i1(); + let i1xn = bx.type_vector(i1, mask_len); + (bx.trunc(args[0].immediate(), i1xn), i1xn) + }; + + let llvm_pointer = bx.type_ptr(); + + // Type of the vector of elements: + let llvm_elem_vec_ty = llvm_vector_ty(bx, values_elem, values_len); + let llvm_elem_vec_str = llvm_vector_str(bx, values_elem, values_len); + + let llvm_intrinsic = format!("llvm.masked.load.{llvm_elem_vec_str}.p0"); + let fn_ty = bx + .type_func(&[llvm_pointer, alignment_ty, mask_ty, llvm_elem_vec_ty], llvm_elem_vec_ty); + let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty); + let v = bx.call( + fn_ty, + None, + None, + f, + &[args[1].immediate(), alignment, mask, args[2].immediate()], + None, + ); + return Ok(v); + } + + if name == sym::simd_masked_store { + // simd_masked_store(mask: <N x i{M}>, pointer: *mut T, values: <N x T>) -> () + // * N: number of elements in the input vectors + // * T: type of the element to load + // * M: any integer width is supported, will be truncated to i1 + // Stores contiguous elements to memory behind `pointer`, but only for + // those lanes whose `mask` bit is enabled. + // The memory addresses corresponding to the “off” lanes are not accessed. + + // The element type of the "mask" argument must be a signed integer type of any width + let mask_ty = in_ty; + let (mask_len, mask_elem) = (in_len, in_elem); + + // The second argument must be a pointer matching the element type + let pointer_ty = arg_tys[1]; + + // The last argument specifies the values to store to memory + let values_ty = arg_tys[2]; + let (values_len, values_elem) = require_simd!(values_ty, SimdThird); + + // Of the same length: + require!( + values_len == mask_len, + InvalidMonomorphization::ThirdArgumentLength { + span, + name, + in_len: mask_len, + in_ty: mask_ty, + arg_ty: values_ty, + out_len: values_len + } + ); + + // The second argument must be a mutable pointer type matching the element type + require!( + matches!( + pointer_ty.kind(), + ty::RawPtr(p) if p.ty == values_elem && p.ty.kind() == values_elem.kind() && p.mutbl.is_mut() + ), + InvalidMonomorphization::ExpectedElementType { + span, + name, + expected_element: values_elem, + second_arg: pointer_ty, + in_elem: values_elem, + in_ty: values_ty, + mutability: ExpectedPointerMutability::Mut, + } + ); + + require!( + matches!(mask_elem.kind(), ty::Int(_)), + InvalidMonomorphization::ThirdArgElementType { + span, + name, + expected_element: values_elem, + third_arg: mask_ty, + } + ); + + // Alignment of T, must be a constant integer value: + let alignment_ty = bx.type_i32(); + let alignment = bx.const_i32(bx.align_of(values_elem).bytes() as i32); + + // Truncate the mask vector to a vector of i1s: + let (mask, mask_ty) = { + let i1 = bx.type_i1(); + let i1xn = bx.type_vector(i1, in_len); + (bx.trunc(args[0].immediate(), i1xn), i1xn) + }; + + let ret_t = bx.type_void(); + + let llvm_pointer = bx.type_ptr(); + + // Type of the vector of elements: + let llvm_elem_vec_ty = llvm_vector_ty(bx, values_elem, values_len); + let llvm_elem_vec_str = llvm_vector_str(bx, values_elem, values_len); + + let llvm_intrinsic = format!("llvm.masked.store.{llvm_elem_vec_str}.p0"); + let fn_ty = bx.type_func(&[llvm_elem_vec_ty, llvm_pointer, alignment_ty, mask_ty], ret_t); + let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty); + let v = bx.call( + fn_ty, + None, + None, + f, + &[args[2].immediate(), args[1].immediate(), alignment, mask], + None, + ); + return Ok(v); + } + if name == sym::simd_scatter { // simd_scatter(values: <N x T>, pointers: <N x *mut T>, // mask: <N x i{M}>) -> () diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs index 915cf31de08..33b19ab362a 100644 --- a/compiler/rustc_codegen_llvm/src/lib.rs +++ b/compiler/rustc_codegen_llvm/src/lib.rs @@ -306,7 +306,9 @@ impl CodegenBackend for LlvmCodegenBackend { } PrintKind::TlsModels => { writeln!(out, "Available TLS models:"); - for name in &["global-dynamic", "local-dynamic", "initial-exec", "local-exec"] { + for name in + &["global-dynamic", "local-dynamic", "initial-exec", "local-exec", "emulated"] + { writeln!(out, " {name}"); } writeln!(out); diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 6c3ccc9cf0d..432cfe203c8 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -2159,7 +2159,7 @@ extern "C" { SplitDwarfFile: *const c_char, OutputObjFile: *const c_char, DebugInfoCompression: *const c_char, - ForceEmulatedTls: bool, + UseEmulatedTls: bool, ArgsCstrBuff: *const c_char, ArgsCstrBuffLen: usize, ) -> *mut TargetMachine; diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs index eb69efb0d59..93cb7327a01 100644 --- a/compiler/rustc_codegen_llvm/src/llvm_util.rs +++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs @@ -263,6 +263,10 @@ pub fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> LLVMFeature<'a> { "sve2-bitperm", TargetFeatureFoldStrength::EnableOnly("neon"), ), + // The unaligned-scalar-mem feature was renamed to fast-unaligned-access. + ("riscv32" | "riscv64", "fast-unaligned-access") if get_version().0 <= 17 => { + LLVMFeature::new("unaligned-scalar-mem") + } (_, s) => LLVMFeature::new(s), } } diff --git a/compiler/rustc_codegen_ssa/src/back/linker.rs b/compiler/rustc_codegen_ssa/src/back/linker.rs index bb5f6e27e4d..eeb57d4d0f4 100644 --- a/compiler/rustc_codegen_ssa/src/back/linker.rs +++ b/compiler/rustc_codegen_ssa/src/back/linker.rs @@ -1748,7 +1748,9 @@ fn exported_symbols_for_non_proc_macro(tcx: TyCtxt<'_>, crate_type: CrateType) - let export_threshold = symbol_export::crates_export_threshold(&[crate_type]); for_each_exported_symbols_include_dep(tcx, crate_type, |symbol, info, cnum| { if info.level.is_below_threshold(export_threshold) { - symbols.push(symbol_export::symbol_name_for_instance_in_crate(tcx, symbol, cnum)); + symbols.push(symbol_export::exporting_symbol_name_for_instance_in_crate( + tcx, symbol, cnum, + )); } }); diff --git a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs index f9ad8ca9563..54b523cb6bd 100644 --- a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs +++ b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs @@ -16,7 +16,7 @@ use rustc_middle::ty::{self, SymbolName, TyCtxt}; use rustc_middle::ty::{GenericArgKind, GenericArgsRef}; use rustc_middle::util::Providers; use rustc_session::config::{CrateType, OomStrategy}; -use rustc_target::spec::SanitizerSet; +use rustc_target::spec::{SanitizerSet, TlsModel}; pub fn threshold(tcx: TyCtxt<'_>) -> SymbolExportLevel { crates_export_threshold(tcx.crate_types()) @@ -564,6 +564,12 @@ pub fn linking_symbol_name_for_instance_in_crate<'tcx>( let mut undecorated = symbol_name_for_instance_in_crate(tcx, symbol, instantiating_crate); + // thread local will not be a function call, + // so it is safe to return before windows symbol decoration check. + if let Some(name) = maybe_emutls_symbol_name(tcx, symbol, &undecorated) { + return name; + } + let target = &tcx.sess.target; if !target.is_like_windows { // Mach-O has a global "_" suffix and `object` crate will handle it. @@ -624,6 +630,32 @@ pub fn linking_symbol_name_for_instance_in_crate<'tcx>( format!("{prefix}{undecorated}{suffix}{args_in_bytes}") } +pub fn exporting_symbol_name_for_instance_in_crate<'tcx>( + tcx: TyCtxt<'tcx>, + symbol: ExportedSymbol<'tcx>, + cnum: CrateNum, +) -> String { + let undecorated = symbol_name_for_instance_in_crate(tcx, symbol, cnum); + maybe_emutls_symbol_name(tcx, symbol, &undecorated).unwrap_or(undecorated) +} + +fn maybe_emutls_symbol_name<'tcx>( + tcx: TyCtxt<'tcx>, + symbol: ExportedSymbol<'tcx>, + undecorated: &str, +) -> Option<String> { + if matches!(tcx.sess.tls_model(), TlsModel::Emulated) + && let ExportedSymbol::NonGeneric(def_id) = symbol + && tcx.is_thread_local_static(def_id) + { + // When using emutls, LLVM will add the `__emutls_v.` prefix to thread local symbols, + // and exported symbol name need to match this. + Some(format!("__emutls_v.{undecorated}")) + } else { + None + } +} + fn wasm_import_module_map(tcx: TyCtxt<'_>, cnum: CrateNum) -> FxHashMap<DefId, String> { // Build up a map from DefId to a `NativeLib` structure, where // `NativeLib` internally contains information about diff --git a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs index 8630e5623e1..dda30046bfb 100644 --- a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs +++ b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs @@ -566,6 +566,9 @@ fn coroutine_kind_label(coroutine_kind: Option<CoroutineKind>) -> &'static str { Some(CoroutineKind::Async(CoroutineSource::Block)) => "async_block", Some(CoroutineKind::Async(CoroutineSource::Closure)) => "async_closure", Some(CoroutineKind::Async(CoroutineSource::Fn)) => "async_fn", + Some(CoroutineKind::AsyncGen(CoroutineSource::Block)) => "async_gen_block", + Some(CoroutineKind::AsyncGen(CoroutineSource::Closure)) => "async_gen_closure", + Some(CoroutineKind::AsyncGen(CoroutineSource::Fn)) => "async_gen_fn", Some(CoroutineKind::Coroutine) => "coroutine", None => "closure", } diff --git a/compiler/rustc_codegen_ssa/src/mir/locals.rs b/compiler/rustc_codegen_ssa/src/mir/locals.rs index 378c5401322..7db260c9f5b 100644 --- a/compiler/rustc_codegen_ssa/src/mir/locals.rs +++ b/compiler/rustc_codegen_ssa/src/mir/locals.rs @@ -43,7 +43,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let local = mir::Local::from_usize(local); let expected_ty = self.monomorphize(self.mir.local_decls[local].ty); if expected_ty != op.layout.ty { - warn!("Unexpected initial operand type. See the issues/114858"); + warn!( + "Unexpected initial operand type: expected {expected_ty:?}, found {:?}.\ + See <https://github.com/rust-lang/rust/issues/114858>.", + op.layout.ty + ); } } } diff --git a/compiler/rustc_codegen_ssa/src/target_features.rs b/compiler/rustc_codegen_ssa/src/target_features.rs index d802816bb75..c3b8859c779 100644 --- a/compiler/rustc_codegen_ssa/src/target_features.rs +++ b/compiler/rustc_codegen_ssa/src/target_features.rs @@ -291,9 +291,9 @@ const RISCV_ALLOWED_FEATURES: &[(&str, Stability)] = &[ ("d", Unstable(sym::riscv_target_feature)), ("e", Unstable(sym::riscv_target_feature)), ("f", Unstable(sym::riscv_target_feature)), + ("fast-unaligned-access", Unstable(sym::riscv_target_feature)), ("m", Stable), ("relax", Unstable(sym::riscv_target_feature)), - ("unaligned-scalar-mem", Unstable(sym::riscv_target_feature)), ("v", Unstable(sym::riscv_target_feature)), ("zba", Stable), ("zbb", Stable), diff --git a/compiler/rustc_const_eval/src/transform/check_consts/check.rs b/compiler/rustc_const_eval/src/transform/check_consts/check.rs index bcc42a376ea..e604f4c1aec 100644 --- a/compiler/rustc_const_eval/src/transform/check_consts/check.rs +++ b/compiler/rustc_const_eval/src/transform/check_consts/check.rs @@ -22,7 +22,7 @@ use std::mem; use std::ops::{ControlFlow, Deref}; use super::ops::{self, NonConstOp, Status}; -use super::qualifs::{self, CustomEq, HasMutInterior, NeedsDrop}; +use super::qualifs::{self, CustomEq, HasMutInterior, NeedsDrop, NeedsNonConstDrop}; use super::resolver::FlowSensitiveAnalysis; use super::{ConstCx, Qualif}; use crate::const_eval::is_unstable_const_fn; @@ -35,7 +35,7 @@ type QualifResults<'mir, 'tcx, Q> = pub struct Qualifs<'mir, 'tcx> { has_mut_interior: Option<QualifResults<'mir, 'tcx, HasMutInterior>>, needs_drop: Option<QualifResults<'mir, 'tcx, NeedsDrop>>, - // needs_non_const_drop: Option<QualifResults<'mir, 'tcx, NeedsNonConstDrop>>, + needs_non_const_drop: Option<QualifResults<'mir, 'tcx, NeedsNonConstDrop>>, } impl<'mir, 'tcx> Qualifs<'mir, 'tcx> { @@ -78,27 +78,25 @@ impl<'mir, 'tcx> Qualifs<'mir, 'tcx> { local: Local, location: Location, ) -> bool { - // FIXME(effects) replace with `NeedsNonconstDrop` after const traits work again - /* let ty = ccx.body.local_decls[local].ty; - if !NeedsDrop::in_any_value_of_ty(ccx, ty) { + // Peeking into opaque types causes cycles if the current function declares said opaque + // type. Thus we avoid short circuiting on the type and instead run the more expensive + // analysis that looks at the actual usage within this function + if !ty.has_opaque_types() && !NeedsNonConstDrop::in_any_value_of_ty(ccx, ty) { return false; } let needs_non_const_drop = self.needs_non_const_drop.get_or_insert_with(|| { let ConstCx { tcx, body, .. } = *ccx; - FlowSensitiveAnalysis::new(NeedsDrop, ccx) - .into_engine(tcx, &body) + FlowSensitiveAnalysis::new(NeedsNonConstDrop, ccx) + .into_engine(tcx, body) .iterate_to_fixpoint() - .into_results_cursor(&body) + .into_results_cursor(body) }); needs_non_const_drop.seek_before_primary_effect(location); needs_non_const_drop.get().contains(local) - */ - - self.needs_drop(ccx, local, location) } /// Returns `true` if `local` is `HasMutInterior` at the given `Location`. @@ -1013,9 +1011,8 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> { let mut err_span = self.span; let ty_of_dropped_place = dropped_place.ty(self.body, self.tcx).ty; - // FIXME(effects) replace with `NeedsNonConstDrop` once we fix const traits let ty_needs_non_const_drop = - qualifs::NeedsDrop::in_any_value_of_ty(self.ccx, ty_of_dropped_place); + qualifs::NeedsNonConstDrop::in_any_value_of_ty(self.ccx, ty_of_dropped_place); debug!(?ty_of_dropped_place, ?ty_needs_non_const_drop); diff --git a/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs b/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs index 06371438ec1..5cd13783c23 100644 --- a/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs +++ b/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs @@ -5,7 +5,7 @@ use rustc_span::{symbol::sym, Span}; use super::check::Qualifs; use super::ops::{self, NonConstOp}; -use super::qualifs::{NeedsDrop, Qualif}; +use super::qualifs::{NeedsNonConstDrop, Qualif}; use super::ConstCx; /// Returns `true` if we should use the more precise live drop checker that runs after drop @@ -83,8 +83,7 @@ impl<'tcx> Visitor<'tcx> for CheckLiveDrops<'_, 'tcx> { mir::TerminatorKind::Drop { place: dropped_place, .. } => { let dropped_ty = dropped_place.ty(self.body, self.tcx).ty; - // FIXME(effects) use `NeedsNonConstDrop` - if !NeedsDrop::in_any_value_of_ty(self.ccx, dropped_ty) { + if !NeedsNonConstDrop::in_any_value_of_ty(self.ccx, dropped_ty) { // Instead of throwing a bug, we just return here. This is because we have to // run custom `const Drop` impls. return; diff --git a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs index de3186a53c1..d5f418e1710 100644 --- a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs +++ b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs @@ -23,8 +23,7 @@ pub fn in_any_value_of_ty<'tcx>( ConstQualifs { has_mut_interior: HasMutInterior::in_any_value_of_ty(cx, ty), needs_drop: NeedsDrop::in_any_value_of_ty(cx, ty), - // FIXME(effects) - needs_non_const_drop: NeedsDrop::in_any_value_of_ty(cx, ty), + needs_non_const_drop: NeedsNonConstDrop::in_any_value_of_ty(cx, ty), custom_eq: CustomEq::in_any_value_of_ty(cx, ty), tainted_by_errors, } @@ -155,12 +154,27 @@ impl Qualif for NeedsNonConstDrop { return false; } - // FIXME(effects) constness + // FIXME(effects): If `destruct` is not a `const_trait`, + // or effects are disabled in this crate, then give up. + let destruct_def_id = cx.tcx.require_lang_item(LangItem::Destruct, Some(cx.body.span)); + if cx.tcx.generics_of(destruct_def_id).host_effect_index.is_none() + || !cx.tcx.features().effects + { + return NeedsDrop::in_any_value_of_ty(cx, ty); + } + let obligation = Obligation::new( cx.tcx, ObligationCause::dummy_with_span(cx.body.span), cx.param_env, - ty::TraitRef::from_lang_item(cx.tcx, LangItem::Destruct, cx.body.span, [ty]), + ty::TraitRef::new( + cx.tcx, + destruct_def_id, + [ + ty::GenericArg::from(ty), + ty::GenericArg::from(cx.tcx.expected_const_effect_param_for_body(cx.def_id())), + ], + ), ); let infcx = cx.tcx.infer_ctxt().build(); diff --git a/compiler/rustc_data_structures/src/jobserver.rs b/compiler/rustc_data_structures/src/jobserver.rs index b777bfd4d3c..412e33aaa65 100644 --- a/compiler/rustc_data_structures/src/jobserver.rs +++ b/compiler/rustc_data_structures/src/jobserver.rs @@ -52,7 +52,7 @@ fn default_client() -> Client { static GLOBAL_CLIENT_CHECKED: OnceLock<Client> = OnceLock::new(); -pub fn check(report_warning: impl FnOnce(&'static str)) { +pub fn initialize_checked(report_warning: impl FnOnce(&'static str)) { let client_checked = match &*GLOBAL_CLIENT { Ok(client) => client.clone(), Err(e) => { diff --git a/compiler/rustc_driver_impl/Cargo.toml b/compiler/rustc_driver_impl/Cargo.toml index f2a8c54b6d5..49042984553 100644 --- a/compiler/rustc_driver_impl/Cargo.toml +++ b/compiler/rustc_driver_impl/Cargo.toml @@ -38,6 +38,7 @@ rustc_mir_transform = { path = "../rustc_mir_transform" } rustc_monomorphize = { path = "../rustc_monomorphize" } rustc_parse = { path = "../rustc_parse" } rustc_passes = { path = "../rustc_passes" } +rustc_pattern_analysis = { path = "../rustc_pattern_analysis" } rustc_privacy = { path = "../rustc_privacy" } rustc_query_system = { path = "../rustc_query_system" } rustc_resolve = { path = "../rustc_resolve" } diff --git a/compiler/rustc_driver_impl/src/lib.rs b/compiler/rustc_driver_impl/src/lib.rs index 1f60400b513..8b7a4dbff9d 100644 --- a/compiler/rustc_driver_impl/src/lib.rs +++ b/compiler/rustc_driver_impl/src/lib.rs @@ -128,6 +128,7 @@ pub static DEFAULT_LOCALE_RESOURCES: &[&str] = &[ rustc_monomorphize::DEFAULT_LOCALE_RESOURCE, rustc_parse::DEFAULT_LOCALE_RESOURCE, rustc_passes::DEFAULT_LOCALE_RESOURCE, + rustc_pattern_analysis::DEFAULT_LOCALE_RESOURCE, rustc_privacy::DEFAULT_LOCALE_RESOURCE, rustc_query_system::DEFAULT_LOCALE_RESOURCE, rustc_resolve::DEFAULT_LOCALE_RESOURCE, diff --git a/compiler/rustc_errors/src/diagnostic.rs b/compiler/rustc_errors/src/diagnostic.rs index 6aaf4a0f5cf..4fb63d67e78 100644 --- a/compiler/rustc_errors/src/diagnostic.rs +++ b/compiler/rustc_errors/src/diagnostic.rs @@ -591,17 +591,18 @@ impl Diagnostic { pub fn multipart_suggestion_with_style( &mut self, msg: impl Into<SubdiagnosticMessage>, - suggestion: Vec<(Span, String)>, + mut suggestion: Vec<(Span, String)>, applicability: Applicability, style: SuggestionStyle, ) -> &mut Self { - let mut parts = suggestion + suggestion.sort_unstable(); + suggestion.dedup(); + + let parts = suggestion .into_iter() .map(|(span, snippet)| SubstitutionPart { snippet, span }) .collect::<Vec<_>>(); - parts.sort_unstable_by_key(|part| part.span); - assert!(!parts.is_empty()); debug_assert_eq!( parts.iter().find(|part| part.span.is_empty() && part.snippet.is_empty()), diff --git a/compiler/rustc_expand/messages.ftl b/compiler/rustc_expand/messages.ftl index 8b93829623d..fc3f7b1d749 100644 --- a/compiler/rustc_expand/messages.ftl +++ b/compiler/rustc_expand/messages.ftl @@ -71,6 +71,8 @@ expand_macro_const_stability = .label = invalid const stability attribute .label2 = const stability attribute affects this macro +expand_macro_expands_to_match_arm = macros cannot expand to match arms + expand_malformed_feature_attribute = malformed `feature` attribute input .expected = expected just one word diff --git a/compiler/rustc_expand/src/build.rs b/compiler/rustc_expand/src/build.rs index 794e11d87d1..86f555fa08b 100644 --- a/compiler/rustc_expand/src/build.rs +++ b/compiler/rustc_expand/src/build.rs @@ -505,7 +505,7 @@ impl<'a> ExtCtxt<'a> { attrs: AttrVec::new(), pat, guard: None, - body: expr, + body: Some(expr), span, id: ast::DUMMY_NODE_ID, is_placeholder: false, @@ -547,7 +547,7 @@ impl<'a> ExtCtxt<'a> { binder: ast::ClosureBinder::NotPresent, capture_clause: ast::CaptureBy::Ref, constness: ast::Const::No, - coro_kind: None, + coroutine_kind: None, movability: ast::Movability::Movable, fn_decl, body, diff --git a/compiler/rustc_expand/src/errors.rs b/compiler/rustc_expand/src/errors.rs index d86632c47fc..6e919a8fa9f 100644 --- a/compiler/rustc_expand/src/errors.rs +++ b/compiler/rustc_expand/src/errors.rs @@ -304,6 +304,8 @@ pub(crate) struct IncompleteParse<'a> { pub label_span: Span, pub macro_path: &'a ast::Path, pub kind_name: &'a str, + #[note(expand_macro_expands_to_match_arm)] + pub expands_to_match_arm: Option<()>, #[suggestion( expand_suggestion_add_semi, diff --git a/compiler/rustc_expand/src/expand.rs b/compiler/rustc_expand/src/expand.rs index ad21a73f063..47d4b802c0f 100644 --- a/compiler/rustc_expand/src/expand.rs +++ b/compiler/rustc_expand/src/expand.rs @@ -955,12 +955,15 @@ pub fn ensure_complete_parse<'a>( _ => None, }; + let expands_to_match_arm = kind_name == "pattern" && parser.token == token::FatArrow; + parser.sess.emit_err(IncompleteParse { span: def_site_span, token, label_span: span, macro_path, kind_name, + expands_to_match_arm: expands_to_match_arm.then_some(()), add_semicolon, }); } diff --git a/compiler/rustc_expand/src/placeholders.rs b/compiler/rustc_expand/src/placeholders.rs index 1292a855230..ded0baa9563 100644 --- a/compiler/rustc_expand/src/placeholders.rs +++ b/compiler/rustc_expand/src/placeholders.rs @@ -119,7 +119,7 @@ pub fn placeholder( }]), AstFragmentKind::Arms => AstFragment::Arms(smallvec![ast::Arm { attrs: Default::default(), - body: expr_placeholder(), + body: Some(expr_placeholder()), guard: None, id, pat: pat(), diff --git a/compiler/rustc_hir/src/hir.rs b/compiler/rustc_hir/src/hir.rs index ee66ebc2554..01508375b1a 100644 --- a/compiler/rustc_hir/src/hir.rs +++ b/compiler/rustc_hir/src/hir.rs @@ -1056,6 +1056,23 @@ impl<'hir> Pat<'hir> { true }) } + + /// Whether this a never pattern. + pub fn is_never_pattern(&self) -> bool { + let mut is_never_pattern = false; + self.walk(|pat| match &pat.kind { + PatKind::Never => { + is_never_pattern = true; + false + } + PatKind::Or(s) => { + is_never_pattern = s.iter().all(|p| p.is_never_pattern()); + false + } + _ => true, + }); + is_never_pattern + } } /// A single field in a struct pattern. @@ -1339,12 +1356,16 @@ impl<'hir> Body<'hir> { /// The type of source expression that caused this coroutine to be created. #[derive(Clone, PartialEq, Eq, Debug, Copy, Hash, HashStable_Generic, Encodable, Decodable)] pub enum CoroutineKind { - /// An explicit `async` block or the body of an async function. + /// An explicit `async` block or the body of an `async` function. Async(CoroutineSource), /// An explicit `gen` block or the body of a `gen` function. Gen(CoroutineSource), + /// An explicit `async gen` block or the body of an `async gen` function, + /// which is able to both `yield` and `.await`. + AsyncGen(CoroutineSource), + /// A coroutine literal created via a `yield` inside a closure. Coroutine, } @@ -1369,6 +1390,14 @@ impl fmt::Display for CoroutineKind { } k.fmt(f) } + CoroutineKind::AsyncGen(k) => { + if f.alternate() { + f.write_str("`async gen` ")?; + } else { + f.write_str("async gen ")? + } + k.fmt(f) + } } } } @@ -2064,17 +2093,6 @@ impl fmt::Display for YieldSource { } } -impl From<CoroutineKind> for YieldSource { - fn from(kind: CoroutineKind) -> Self { - match kind { - // Guess based on the kind of the current coroutine. - CoroutineKind::Coroutine => Self::Yield, - CoroutineKind::Async(_) => Self::Await { expr: None }, - CoroutineKind::Gen(_) => Self::Yield, - } - } -} - // N.B., if you change this, you'll probably want to change the corresponding // type structure in middle/ty.rs as well. #[derive(Debug, Clone, Copy, HashStable_Generic)] @@ -2254,11 +2272,6 @@ pub enum ImplItemKind<'hir> { Type(&'hir Ty<'hir>), } -/// The name of the associated type for `Fn` return types. -pub const FN_OUTPUT_NAME: Symbol = sym::Output; -/// The name of the associated type for `Iterator` item types. -pub const ITERATOR_ITEM_NAME: Symbol = sym::Item; - /// Bind a type to an associated type (i.e., `A = Foo`). /// /// Bindings like `A: Debug` are represented as a special type `A = diff --git a/compiler/rustc_hir/src/lang_items.rs b/compiler/rustc_hir/src/lang_items.rs index 60f1449c177..b0b53bb7478 100644 --- a/compiler/rustc_hir/src/lang_items.rs +++ b/compiler/rustc_hir/src/lang_items.rs @@ -212,6 +212,7 @@ language_item_table! { Iterator, sym::iterator, iterator_trait, Target::Trait, GenericRequirement::Exact(0); Future, sym::future_trait, future_trait, Target::Trait, GenericRequirement::Exact(0); + AsyncIterator, sym::async_iterator, async_iterator_trait, Target::Trait, GenericRequirement::Exact(0); CoroutineState, sym::coroutine_state, coroutine_state, Target::Enum, GenericRequirement::None; Coroutine, sym::coroutine, coroutine_trait, Target::Trait, GenericRequirement::Minimum(1); Unpin, sym::unpin, unpin_trait, Target::Trait, GenericRequirement::None; @@ -294,6 +295,10 @@ language_item_table! { PollReady, sym::Ready, poll_ready_variant, Target::Variant, GenericRequirement::None; PollPending, sym::Pending, poll_pending_variant, Target::Variant, GenericRequirement::None; + AsyncGenReady, sym::AsyncGenReady, async_gen_ready, Target::Method(MethodKind::Inherent), GenericRequirement::Exact(1); + AsyncGenPending, sym::AsyncGenPending, async_gen_pending, Target::AssocConst, GenericRequirement::Exact(1); + AsyncGenFinished, sym::AsyncGenFinished, async_gen_finished, Target::AssocConst, GenericRequirement::Exact(1); + // FIXME(swatinem): the following lang items are used for async lowering and // should become obsolete eventually. ResumeTy, sym::ResumeTy, resume_ty, Target::Struct, GenericRequirement::None; diff --git a/compiler/rustc_hir_analysis/messages.ftl b/compiler/rustc_hir_analysis/messages.ftl index 8ab91bebcf6..139e1c0ac5f 100644 --- a/compiler/rustc_hir_analysis/messages.ftl +++ b/compiler/rustc_hir_analysis/messages.ftl @@ -1,8 +1,28 @@ +hir_analysis_ambiguous_assoc_item = ambiguous associated {$assoc_kind} `{$assoc_name}` in bounds of `{$ty_param_name}` + .label = ambiguous associated {$assoc_kind} `{$assoc_name}` + hir_analysis_ambiguous_lifetime_bound = ambiguous lifetime bound, explicit lifetime bound required -hir_analysis_assoc_bound_on_const = expected associated type, found {$descr} - .note = trait bounds not allowed on {$descr} +hir_analysis_assoc_item_not_found = associated {$assoc_kind} `{$assoc_name}` not found for `{$ty_param_name}` + +hir_analysis_assoc_item_not_found_found_in_other_trait_label = there is {$identically_named -> + [true] an + *[false] a similarly named + } associated {$assoc_kind} `{$suggested_name}` in the trait `{$trait_name}` +hir_analysis_assoc_item_not_found_label = associated {$assoc_kind} `{$assoc_name}` not found +hir_analysis_assoc_item_not_found_other_sugg = `{$ty_param_name}` has the following associated {$assoc_kind} +hir_analysis_assoc_item_not_found_similar_in_other_trait_sugg = change the associated {$assoc_kind} name to use `{$suggested_name}` from `{$trait_name}` +hir_analysis_assoc_item_not_found_similar_in_other_trait_with_bound_sugg = and also change the associated {$assoc_kind} name +hir_analysis_assoc_item_not_found_similar_sugg = there is an associated {$assoc_kind} with a similar name + +hir_analysis_assoc_kind_mismatch = expected {$expected}, found {$got} + .label = unexpected {$got} + .expected_because_label = expected a {$expected} because of this associated {$expected} + .note = the associated {$assoc_kind} is defined here + .bound_on_assoc_const_label = bounds are not allowed on associated constants + +hir_analysis_assoc_kind_mismatch_wrap_in_braces_sugg = consider adding braces here hir_analysis_assoc_type_binding_not_allowed = associated type bindings are not allowed here @@ -280,10 +300,6 @@ hir_analysis_placeholder_not_allowed_item_signatures = the placeholder `_` is no hir_analysis_requires_note = the `{$trait_name}` impl for `{$ty}` requires that `{$error_predicate}` -hir_analysis_return_type_notation_conflicting_bound = - ambiguous associated function `{$assoc_name}` for `{$ty_name}` - .note = `{$assoc_name}` is declared in two supertraits: `{$first_bound}` and `{$second_bound}` - hir_analysis_return_type_notation_equality_bound = return type notation is not allowed to use type equality @@ -294,9 +310,6 @@ hir_analysis_return_type_notation_illegal_param_type = return type notation is not allowed for functions that have type parameters .label = type parameter declared here -hir_analysis_return_type_notation_missing_method = - cannot find associated function `{$assoc_name}` for `{$ty_name}` - hir_analysis_return_type_notation_on_non_rpitit = return type notation used on function that is not `async` and does not return `impl Trait` .note = function returns `{$ty}`, which is not compatible with associated type return bounds diff --git a/compiler/rustc_hir_analysis/src/astconv/bounds.rs b/compiler/rustc_hir_analysis/src/astconv/bounds.rs index f6907019d6e..dfec3c5e829 100644 --- a/compiler/rustc_hir_analysis/src/astconv/bounds.rs +++ b/compiler/rustc_hir_analysis/src/astconv/bounds.rs @@ -3,8 +3,7 @@ use rustc_errors::struct_span_err; use rustc_hir as hir; use rustc_hir::def::{DefKind, Res}; use rustc_hir::def_id::{DefId, LocalDefId}; -use rustc_lint_defs::Applicability; -use rustc_middle::ty::{self as ty, Ty, TypeVisitableExt}; +use rustc_middle::ty::{self as ty, Ty}; use rustc_span::symbol::Ident; use rustc_span::{ErrorGuaranteed, Span}; use rustc_trait_selection::traits; @@ -256,64 +255,49 @@ impl<'tcx> dyn AstConv<'tcx> + '_ { let tcx = self.tcx(); - let return_type_notation = - binding.gen_args.parenthesized == hir::GenericArgsParentheses::ReturnTypeNotation; - - let candidate = if return_type_notation { - if self.trait_defines_associated_item_named( - trait_ref.def_id(), - ty::AssocKind::Fn, - binding.item_name, - ) { - trait_ref + let assoc_kind = + if binding.gen_args.parenthesized == hir::GenericArgsParentheses::ReturnTypeNotation { + ty::AssocKind::Fn + } else if let ConvertedBindingKind::Equality(term) = binding.kind + && let ty::TermKind::Const(_) = term.node.unpack() + { + ty::AssocKind::Const } else { - self.one_bound_for_assoc_method( - traits::supertraits(tcx, trait_ref), - trait_ref.print_only_trait_path(), - binding.item_name, - path_span, - )? - } - } else if self.trait_defines_associated_item_named( + ty::AssocKind::Type + }; + + let candidate = if self.trait_defines_associated_item_named( trait_ref.def_id(), - ty::AssocKind::Type, + assoc_kind, binding.item_name, ) { - // Simple case: X is defined in the current trait. + // Simple case: The assoc item is defined in the current trait. trait_ref } else { // Otherwise, we have to walk through the supertraits to find - // those that do. - self.one_bound_for_assoc_type( + // one that does define it. + self.one_bound_for_assoc_item( || traits::supertraits(tcx, trait_ref), trait_ref.skip_binder().print_only_trait_name(), None, + assoc_kind, binding.item_name, path_span, - match binding.kind { - ConvertedBindingKind::Equality(term) => Some(term), - _ => None, - }, + Some(&binding), )? }; let (assoc_ident, def_scope) = tcx.adjust_ident_and_get_scope(binding.item_name, candidate.def_id(), hir_ref_id); - // We have already adjusted the item name above, so compare with `ident.normalize_to_macros_2_0()` instead - // of calling `filter_by_name_and_kind`. - let find_item_of_kind = |kind| { - tcx.associated_items(candidate.def_id()) - .filter_by_name_unhygienic(assoc_ident.name) - .find(|i| i.kind == kind && i.ident(tcx).normalize_to_macros_2_0() == assoc_ident) - }; - let assoc_item = if return_type_notation { - find_item_of_kind(ty::AssocKind::Fn) - } else { - find_item_of_kind(ty::AssocKind::Type) - .or_else(|| find_item_of_kind(ty::AssocKind::Const)) - } - .expect("missing associated type"); + // We have already adjusted the item name above, so compare with `.normalize_to_macros_2_0()` + // instead of calling `filter_by_name_and_kind` which would needlessly normalize the + // `assoc_ident` again and again. + let assoc_item = tcx + .associated_items(candidate.def_id()) + .filter_by_name_unhygienic(assoc_ident.name) + .find(|i| i.kind == assoc_kind && i.ident(tcx).normalize_to_macros_2_0() == assoc_ident) + .expect("missing associated item"); if !assoc_item.visibility(tcx).is_accessible_from(def_scope, tcx) { tcx.sess @@ -340,7 +324,7 @@ impl<'tcx> dyn AstConv<'tcx> + '_ { .or_insert(binding.span); } - let projection_ty = if return_type_notation { + let projection_ty = if let ty::AssocKind::Fn = assoc_kind { let mut emitted_bad_param_err = false; // If we have an method return type bound, then we need to substitute // the method's early bound params with suitable late-bound params. @@ -467,7 +451,7 @@ impl<'tcx> dyn AstConv<'tcx> + '_ { let late_bound_in_trait_ref = tcx.collect_constrained_late_bound_regions(&projection_ty); let late_bound_in_ty = - tcx.collect_referenced_late_bound_regions(&trait_ref.rebind(ty)); + tcx.collect_referenced_late_bound_regions(&trait_ref.rebind(ty.node)); debug!(?late_bound_in_trait_ref); debug!(?late_bound_in_ty); @@ -492,77 +476,27 @@ impl<'tcx> dyn AstConv<'tcx> + '_ { } } - let assoc_item_def_id = projection_ty.skip_binder().def_id; - let def_kind = tcx.def_kind(assoc_item_def_id); match binding.kind { - ConvertedBindingKind::Equality(..) if return_type_notation => { + ConvertedBindingKind::Equality(..) if let ty::AssocKind::Fn = assoc_kind => { return Err(self.tcx().sess.emit_err( crate::errors::ReturnTypeNotationEqualityBound { span: binding.span }, )); } - ConvertedBindingKind::Equality(mut term) => { + ConvertedBindingKind::Equality(term) => { // "Desugar" a constraint like `T: Iterator<Item = u32>` this to // the "projection predicate" for: // // `<T as Iterator>::Item = u32` - match (def_kind, term.unpack()) { - (DefKind::AssocTy, ty::TermKind::Ty(_)) - | (DefKind::AssocConst, ty::TermKind::Const(_)) => (), - (_, _) => { - let got = if let Some(_) = term.ty() { "type" } else { "constant" }; - let expected = tcx.def_descr(assoc_item_def_id); - let mut err = tcx.sess.struct_span_err( - binding.span, - format!("expected {expected} bound, found {got}"), - ); - err.span_note( - tcx.def_span(assoc_item_def_id), - format!("{expected} defined here"), - ); - - if let DefKind::AssocConst = def_kind - && let Some(t) = term.ty() - && (t.is_enum() || t.references_error()) - && tcx.features().associated_const_equality - { - err.span_suggestion( - binding.span, - "if equating a const, try wrapping with braces", - format!("{} = {{ const }}", binding.item_name), - Applicability::HasPlaceholders, - ); - } - let reported = err.emit(); - term = match def_kind { - DefKind::AssocTy => Ty::new_error(tcx, reported).into(), - DefKind::AssocConst => ty::Const::new_error( - tcx, - reported, - tcx.type_of(assoc_item_def_id) - .instantiate(tcx, projection_ty.skip_binder().args), - ) - .into(), - _ => unreachable!(), - }; - } - } bounds.push_projection_bound( tcx, - projection_ty - .map_bound(|projection_ty| ty::ProjectionPredicate { projection_ty, term }), + projection_ty.map_bound(|projection_ty| ty::ProjectionPredicate { + projection_ty, + term: term.node, + }), binding.span, ); } ConvertedBindingKind::Constraint(ast_bounds) => { - match def_kind { - DefKind::AssocTy => {} - _ => { - return Err(tcx.sess.emit_err(errors::AssocBoundOnConst { - span: assoc_ident.span, - descr: tcx.def_descr(assoc_item_def_id), - })); - } - } // "Desugar" a constraint like `T: Iterator<Item: Debug>` to // // `<T as Iterator>::Item: Debug` diff --git a/compiler/rustc_hir_analysis/src/astconv/errors.rs b/compiler/rustc_hir_analysis/src/astconv/errors.rs index eb12ec7a098..13ad9a453b2 100644 --- a/compiler/rustc_hir_analysis/src/astconv/errors.rs +++ b/compiler/rustc_hir_analysis/src/astconv/errors.rs @@ -1,15 +1,16 @@ -use crate::astconv::AstConv; +use crate::astconv::{AstConv, ConvertedBindingKind}; use crate::errors::{ - AssocTypeBindingNotAllowed, ManualImplementation, MissingTypeParams, + self, AssocTypeBindingNotAllowed, ManualImplementation, MissingTypeParams, ParenthesizedFnTraitExpansion, }; +use crate::fluent_generated as fluent; use crate::traits::error_reporting::report_object_safety_error; use rustc_data_structures::fx::{FxHashMap, FxIndexMap, FxIndexSet}; use rustc_errors::{pluralize, struct_span_err, Applicability, Diagnostic, ErrorGuaranteed}; use rustc_hir as hir; use rustc_hir::def_id::{DefId, LocalDefId}; use rustc_infer::traits::FulfillmentError; -use rustc_middle::ty::{self, suggest_constraining_type_param, AssocItem, AssocKind, Ty, TyCtxt}; +use rustc_middle::ty::{self, suggest_constraining_type_param, Ty, TyCtxt, TypeVisitableExt}; use rustc_session::parse::feature_err; use rustc_span::edit_distance::find_best_match_for_name; use rustc_span::symbol::{sym, Ident}; @@ -97,83 +98,88 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { } } - pub(crate) fn complain_about_assoc_type_not_found<I>( + pub(super) fn complain_about_assoc_item_not_found<I>( &self, all_candidates: impl Fn() -> I, ty_param_name: &str, ty_param_def_id: Option<LocalDefId>, + assoc_kind: ty::AssocKind, assoc_name: Ident, span: Span, + binding: Option<&super::ConvertedBinding<'_, 'tcx>>, ) -> ErrorGuaranteed where I: Iterator<Item = ty::PolyTraitRef<'tcx>>, { + let tcx = self.tcx(); + + // First and foremost, provide a more user-friendly & “intuitive” error on kind mismatches. + if let Some(assoc_item) = all_candidates().find_map(|r| { + tcx.associated_items(r.def_id()) + .filter_by_name_unhygienic(assoc_name.name) + .find(|item| tcx.hygienic_eq(assoc_name, item.ident(tcx), r.def_id())) + }) { + return self.complain_about_assoc_kind_mismatch( + assoc_item, assoc_kind, assoc_name, span, binding, + ); + } + + let assoc_kind_str = super::assoc_kind_str(assoc_kind); + // The fallback span is needed because `assoc_name` might be an `Fn()`'s `Output` without a // valid span, so we point at the whole path segment instead. let is_dummy = assoc_name.span == DUMMY_SP; - let mut err = struct_span_err!( - self.tcx().sess, - if is_dummy { span } else { assoc_name.span }, - E0220, - "associated type `{}` not found for `{}`", + let mut err = errors::AssocItemNotFound { + span: if is_dummy { span } else { assoc_name.span }, assoc_name, - ty_param_name - ); + assoc_kind: assoc_kind_str, + ty_param_name, + label: None, + sugg: None, + }; if is_dummy { - err.span_label(span, format!("associated type `{assoc_name}` not found")); - return err.emit(); + err.label = Some(errors::AssocItemNotFoundLabel::NotFound { span }); + return tcx.sess.emit_err(err); } let all_candidate_names: Vec<_> = all_candidates() - .flat_map(|r| self.tcx().associated_items(r.def_id()).in_definition_order()) + .flat_map(|r| tcx.associated_items(r.def_id()).in_definition_order()) .filter_map(|item| { - if !item.is_impl_trait_in_trait() && item.kind == ty::AssocKind::Type { - Some(item.name) - } else { - None - } + (!item.is_impl_trait_in_trait() && item.kind == assoc_kind).then_some(item.name) }) .collect(); if let Some(suggested_name) = find_best_match_for_name(&all_candidate_names, assoc_name.name, None) { - err.span_suggestion( - assoc_name.span, - "there is an associated type with a similar name", + err.sugg = Some(errors::AssocItemNotFoundSugg::Similar { + span: assoc_name.span, + assoc_kind: assoc_kind_str, suggested_name, - Applicability::MaybeIncorrect, - ); - return err.emit(); + }); + return tcx.sess.emit_err(err); } // If we didn't find a good item in the supertraits (or couldn't get // the supertraits), like in ItemCtxt, then look more generally from // all visible traits. If there's one clear winner, just suggest that. - let visible_traits: Vec<_> = self - .tcx() + let visible_traits: Vec<_> = tcx .all_traits() .filter(|trait_def_id| { - let viz = self.tcx().visibility(*trait_def_id); + let viz = tcx.visibility(*trait_def_id); let def_id = self.item_def_id(); - viz.is_accessible_from(def_id, self.tcx()) + viz.is_accessible_from(def_id, tcx) }) .collect(); let wider_candidate_names: Vec<_> = visible_traits .iter() - .flat_map(|trait_def_id| { - self.tcx().associated_items(*trait_def_id).in_definition_order() - }) + .flat_map(|trait_def_id| tcx.associated_items(*trait_def_id).in_definition_order()) .filter_map(|item| { - if !item.is_impl_trait_in_trait() && item.kind == ty::AssocKind::Type { - Some(item.name) - } else { - None - } + (!item.is_impl_trait_in_trait() && item.kind == assoc_kind).then_some(item.name) }) .collect(); @@ -182,52 +188,51 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { { if let [best_trait] = visible_traits .iter() + .copied() .filter(|trait_def_id| { - self.tcx() - .associated_items(*trait_def_id) + tcx.associated_items(trait_def_id) .filter_by_name_unhygienic(suggested_name) - .any(|item| item.kind == ty::AssocKind::Type) + .any(|item| item.kind == assoc_kind) }) .collect::<Vec<_>>()[..] { - let trait_name = self.tcx().def_path_str(*best_trait); - let an = if suggested_name != assoc_name.name { "a similarly named" } else { "an" }; - err.span_label( - assoc_name.span, - format!( - "there is {an} associated type `{suggested_name}` in the \ - trait `{trait_name}`", - ), - ); - let hir = self.tcx().hir(); + let trait_name = tcx.def_path_str(best_trait); + err.label = Some(errors::AssocItemNotFoundLabel::FoundInOtherTrait { + span: assoc_name.span, + assoc_kind: assoc_kind_str, + trait_name: &trait_name, + suggested_name, + identically_named: suggested_name == assoc_name.name, + }); + let hir = tcx.hir(); if let Some(def_id) = ty_param_def_id - && let parent = hir.get_parent_item(self.tcx().local_def_id_to_hir_id(def_id)) + && let parent = hir.get_parent_item(tcx.local_def_id_to_hir_id(def_id)) && let Some(generics) = hir.get_generics(parent.def_id) { if generics.bounds_for_param(def_id).flat_map(|pred| pred.bounds.iter()).any( |b| match b { hir::GenericBound::Trait(t, ..) => { - t.trait_ref.trait_def_id().as_ref() == Some(best_trait) + t.trait_ref.trait_def_id() == Some(best_trait) } _ => false, }, ) { // The type param already has a bound for `trait_name`, we just need to - // change the associated type. - err.span_suggestion_verbose( - assoc_name.span, - format!( - "change the associated type name to use `{suggested_name}` from \ - `{trait_name}`", - ), - suggested_name.to_string(), - Applicability::MaybeIncorrect, - ); - } else if suggest_constraining_type_param( - self.tcx(), + // change the associated item. + err.sugg = Some(errors::AssocItemNotFoundSugg::SimilarInOtherTrait { + span: assoc_name.span, + assoc_kind: assoc_kind_str, + suggested_name, + }); + return tcx.sess.emit_err(err); + } + + let mut err = tcx.sess.create_err(err); + if suggest_constraining_type_param( + tcx, generics, &mut err, - ty_param_name, + &ty_param_name, &trait_name, None, None, @@ -237,39 +242,101 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { // was also not an exact match, so we also suggest changing it. err.span_suggestion_verbose( assoc_name.span, - "and also change the associated type name", + fluent::hir_analysis_assoc_item_not_found_similar_in_other_trait_with_bound_sugg, suggested_name.to_string(), Applicability::MaybeIncorrect, ); } + return err.emit(); } - return err.emit(); + return tcx.sess.emit_err(err); } } // If we still couldn't find any associated type, and only one associated type exists, // suggests using it. - - if all_candidate_names.len() == 1 { + if let [candidate_name] = all_candidate_names.as_slice() { // this should still compile, except on `#![feature(associated_type_defaults)]` // where it could suggests `type A = Self::A`, thus recursing infinitely - let applicability = if self.tcx().features().associated_type_defaults { + let applicability = if tcx.features().associated_type_defaults { Applicability::Unspecified } else { Applicability::MaybeIncorrect }; - err.span_suggestion( - assoc_name.span, - format!("`{ty_param_name}` has the following associated type"), - all_candidate_names.first().unwrap().to_string(), + err.sugg = Some(errors::AssocItemNotFoundSugg::Other { + span: assoc_name.span, applicability, - ); + ty_param_name, + assoc_kind: assoc_kind_str, + suggested_name: *candidate_name, + }); } else { - err.span_label(assoc_name.span, format!("associated type `{assoc_name}` not found")); + err.label = Some(errors::AssocItemNotFoundLabel::NotFound { span: assoc_name.span }); } - err.emit() + tcx.sess.emit_err(err) + } + + fn complain_about_assoc_kind_mismatch( + &self, + assoc_item: &ty::AssocItem, + assoc_kind: ty::AssocKind, + ident: Ident, + span: Span, + binding: Option<&super::ConvertedBinding<'_, 'tcx>>, + ) -> ErrorGuaranteed { + let tcx = self.tcx(); + + let bound_on_assoc_const_label = if let ty::AssocKind::Const = assoc_item.kind + && let Some(binding) = binding + && let ConvertedBindingKind::Constraint(_) = binding.kind + { + let lo = if binding.gen_args.span_ext.is_dummy() { + ident.span + } else { + binding.gen_args.span_ext + }; + Some(lo.between(span.shrink_to_hi())) + } else { + None + }; + + // FIXME(associated_const_equality): This has quite a few false positives and negatives. + let wrap_in_braces_sugg = if let Some(binding) = binding + && let ConvertedBindingKind::Equality(term) = binding.kind + && let ty::TermKind::Ty(ty) = term.node.unpack() + && (ty.is_enum() || ty.references_error()) + && tcx.features().associated_const_equality + { + Some(errors::AssocKindMismatchWrapInBracesSugg { + lo: term.span.shrink_to_lo(), + hi: term.span.shrink_to_hi(), + }) + } else { + None + }; + + // For equality bounds, we want to blame the term (RHS) instead of the item (LHS) since + // one can argue that that's more “untuitive” to the user. + let (span, expected_because_label, expected, got) = if let Some(binding) = binding + && let ConvertedBindingKind::Equality(term) = binding.kind + { + (term.span, Some(ident.span), assoc_item.kind, assoc_kind) + } else { + (ident.span, None, assoc_kind, assoc_item.kind) + }; + + tcx.sess.emit_err(errors::AssocKindMismatch { + span, + expected: super::assoc_kind_str(expected), + got: super::assoc_kind_str(got), + expected_because_label, + assoc_kind: super::assoc_kind_str(assoc_item.kind), + def_span: tcx.def_span(assoc_item.def_id), + bound_on_assoc_const_label, + wrap_in_braces_sugg, + }) } pub(crate) fn complain_about_ambiguous_inherent_assoc_type( @@ -598,7 +665,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { let assoc_item = tcx.associated_items(trait_def).find_by_name_and_kind( tcx, ident, - AssocKind::Type, + ty::AssocKind::Type, trait_def, ); @@ -606,7 +673,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { })) }) .flatten() - .collect::<FxHashMap<Symbol, &AssocItem>>(); + .collect::<FxHashMap<Symbol, &ty::AssocItem>>(); let mut names = names .into_iter() diff --git a/compiler/rustc_hir_analysis/src/astconv/mod.rs b/compiler/rustc_hir_analysis/src/astconv/mod.rs index 0f06407f445..20d36a1b069 100644 --- a/compiler/rustc_hir_analysis/src/astconv/mod.rs +++ b/compiler/rustc_hir_analysis/src/astconv/mod.rs @@ -18,8 +18,8 @@ use crate::require_c_abi_if_c_variadic; use rustc_ast::TraitObjectSyntax; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_errors::{ - struct_span_err, Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, FatalError, - MultiSpan, + error_code, struct_span_err, Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, + FatalError, MultiSpan, }; use rustc_hir as hir; use rustc_hir::def::{CtorOf, DefKind, Namespace, Res}; @@ -36,6 +36,7 @@ use rustc_middle::ty::{ }; use rustc_session::lint::builtin::AMBIGUOUS_ASSOCIATED_ITEMS; use rustc_span::edit_distance::find_best_match_for_name; +use rustc_span::source_map::{respan, Spanned}; use rustc_span::symbol::{kw, Ident, Symbol}; use rustc_span::{sym, BytePos, Span, DUMMY_SP}; use rustc_target::spec::abi; @@ -162,7 +163,7 @@ struct ConvertedBinding<'a, 'tcx> { #[derive(Debug)] enum ConvertedBindingKind<'a, 'tcx> { - Equality(ty::Term<'tcx>), + Equality(Spanned<ty::Term<'tcx>>), Constraint(&'a [hir::GenericBound<'a>]), } @@ -595,12 +596,14 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { .map(|binding| { let kind = match &binding.kind { hir::TypeBindingKind::Equality { term } => match term { - hir::Term::Ty(ty) => { - ConvertedBindingKind::Equality(self.ast_ty_to_ty(ty).into()) - } + hir::Term::Ty(ty) => ConvertedBindingKind::Equality(respan( + ty.span, + self.ast_ty_to_ty(ty).into(), + )), hir::Term::Const(c) => { + let span = self.tcx().def_span(c.def_id); let c = Const::from_anon_const(self.tcx(), c.def_id); - ConvertedBindingKind::Equality(c.into()) + ConvertedBindingKind::Equality(respan(span, c.into())) } }, hir::TypeBindingKind::Constraint { bounds } => { @@ -1056,7 +1059,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { debug!("find_bound_for_assoc_item: predicates={:#?}", predicates); let param_name = tcx.hir().ty_param_name(ty_param_def_id); - self.one_bound_for_assoc_type( + self.one_bound_for_assoc_item( || { traits::transitive_bounds_that_define_assoc_item( tcx, @@ -1068,6 +1071,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { }, param_name, Some(ty_param_def_id), + ty::AssocKind::Type, assoc_name, span, None, @@ -1076,48 +1080,45 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { // Checks that `bounds` contains exactly one element and reports appropriate // errors otherwise. - #[instrument(level = "debug", skip(self, all_candidates, ty_param_name, is_equality), ret)] - fn one_bound_for_assoc_type<I>( + #[instrument(level = "debug", skip(self, all_candidates, ty_param_name, binding), ret)] + fn one_bound_for_assoc_item<I>( &self, all_candidates: impl Fn() -> I, ty_param_name: impl Display, ty_param_def_id: Option<LocalDefId>, + assoc_kind: ty::AssocKind, assoc_name: Ident, span: Span, - is_equality: Option<ty::Term<'tcx>>, + binding: Option<&ConvertedBinding<'_, 'tcx>>, ) -> Result<ty::PolyTraitRef<'tcx>, ErrorGuaranteed> where I: Iterator<Item = ty::PolyTraitRef<'tcx>>, { + let tcx = self.tcx(); + let mut matching_candidates = all_candidates().filter(|r| { - self.trait_defines_associated_item_named(r.def_id(), ty::AssocKind::Type, assoc_name) - }); - let mut const_candidates = all_candidates().filter(|r| { - self.trait_defines_associated_item_named(r.def_id(), ty::AssocKind::Const, assoc_name) + self.trait_defines_associated_item_named(r.def_id(), assoc_kind, assoc_name) }); - let (mut bound, mut next_cand) = match (matching_candidates.next(), const_candidates.next()) - { - (Some(bound), _) => (bound, matching_candidates.next()), - (None, Some(bound)) => (bound, const_candidates.next()), - (None, None) => { - let reported = self.complain_about_assoc_type_not_found( - all_candidates, - &ty_param_name.to_string(), - ty_param_def_id, - assoc_name, - span, - ); - return Err(reported); - } + let Some(mut bound) = matching_candidates.next() else { + let reported = self.complain_about_assoc_item_not_found( + all_candidates, + &ty_param_name.to_string(), + ty_param_def_id, + assoc_kind, + assoc_name, + span, + binding, + ); + return Err(reported); }; debug!(?bound); // look for a candidate that is not the same as our first bound, disregarding // whether the bound is const. + let mut next_cand = matching_candidates.next(); while let Some(mut bound2) = next_cand { debug!(?bound2); - let tcx = self.tcx(); if bound2.bound_vars() != bound.bound_vars() { break; } @@ -1138,7 +1139,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { .map(|(n, arg)| if host_index == n { tcx.consts.true_.into() } else { arg }); if unconsted_args.eq(bound2.skip_binder().args.iter()) { - next_cand = matching_candidates.next().or_else(|| const_candidates.next()); + next_cand = matching_candidates.next(); } else { break; } @@ -1147,48 +1148,53 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { if let Some(bound2) = next_cand { debug!(?bound2); - let bounds = IntoIterator::into_iter([bound, bound2]).chain(matching_candidates); - let mut err = if is_equality.is_some() { - // More specific Error Index entry. - struct_span_err!( - self.tcx().sess, - span, - E0222, - "ambiguous associated type `{}` in bounds of `{}`", - assoc_name, - ty_param_name - ) - } else { - struct_span_err!( - self.tcx().sess, - span, - E0221, - "ambiguous associated type `{}` in bounds of `{}`", - assoc_name, - ty_param_name - ) - }; - err.span_label(span, format!("ambiguous associated type `{assoc_name}`")); + let assoc_kind_str = assoc_kind_str(assoc_kind); + let ty_param_name = &ty_param_name.to_string(); + let mut err = tcx.sess.create_err(crate::errors::AmbiguousAssocItem { + span, + assoc_kind: assoc_kind_str, + assoc_name, + ty_param_name, + }); + // Provide a more specific error code index entry for equality bindings. + err.code( + if let Some(binding) = binding + && let ConvertedBindingKind::Equality(_) = binding.kind + { + error_code!(E0222) + } else { + error_code!(E0221) + }, + ); + // FIXME(#97583): Resugar equality bounds to type/const bindings. + // FIXME: Turn this into a structured, translateable & more actionable suggestion. let mut where_bounds = vec![]; - for bound in bounds { + for bound in [bound, bound2].into_iter().chain(matching_candidates) { let bound_id = bound.def_id(); - let bound_span = self - .tcx() + let bound_span = tcx .associated_items(bound_id) - .find_by_name_and_kind(self.tcx(), assoc_name, ty::AssocKind::Type, bound_id) - .and_then(|item| self.tcx().hir().span_if_local(item.def_id)); + .find_by_name_and_kind(tcx, assoc_name, assoc_kind, bound_id) + .and_then(|item| tcx.hir().span_if_local(item.def_id)); if let Some(bound_span) = bound_span { err.span_label( bound_span, format!("ambiguous `{assoc_name}` from `{}`", bound.print_trait_sugared(),), ); - if let Some(constraint) = &is_equality { - where_bounds.push(format!( - " T: {trait}::{assoc_name} = {constraint}", - trait=bound.print_only_trait_path(), - )); + if let Some(binding) = binding { + match binding.kind { + ConvertedBindingKind::Equality(term) => { + // FIXME(#97583): This isn't syntactically well-formed! + where_bounds.push(format!( + " T: {trait}::{assoc_name} = {term}", + trait = bound.print_only_trait_path(), + term = term.node, + )); + } + // FIXME: Provide a suggestion. + ConvertedBindingKind::Constraint(_bounds) => {} + } } else { err.span_suggestion_verbose( span.with_hi(assoc_name.span.lo()), @@ -1199,7 +1205,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { } } else { err.note(format!( - "associated type `{ty_param_name}` could derive from `{}`", + "associated {assoc_kind_str} `{assoc_name}` could derive from `{}`", bound.print_only_trait_path(), )); } @@ -1220,46 +1226,6 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { Ok(bound) } - #[instrument(level = "debug", skip(self, all_candidates, ty_name), ret)] - fn one_bound_for_assoc_method( - &self, - all_candidates: impl Iterator<Item = ty::PolyTraitRef<'tcx>>, - ty_name: impl Display, - assoc_name: Ident, - span: Span, - ) -> Result<ty::PolyTraitRef<'tcx>, ErrorGuaranteed> { - let mut matching_candidates = all_candidates.filter(|r| { - self.trait_defines_associated_item_named(r.def_id(), ty::AssocKind::Fn, assoc_name) - }); - - let candidate = match matching_candidates.next() { - Some(candidate) => candidate, - None => { - return Err(self.tcx().sess.emit_err( - crate::errors::ReturnTypeNotationMissingMethod { - span, - ty_name: ty_name.to_string(), - assoc_name: assoc_name.name, - }, - )); - } - }; - - if let Some(conflicting_candidate) = matching_candidates.next() { - return Err(self.tcx().sess.emit_err( - crate::errors::ReturnTypeNotationConflictingBound { - span, - ty_name: ty_name.to_string(), - assoc_name: assoc_name.name, - first_bound: candidate.print_only_trait_path(), - second_bound: conflicting_candidate.print_only_trait_path(), - }, - )); - } - - Ok(candidate) - } - // Create a type from a path to an associated type or to an enum variant. // For a path `A::B::C::D`, `qself_ty` and `qself_def` are the type and def for `A::B::C` // and item_segment is the path segment for `D`. We return a type and a def for @@ -1421,7 +1387,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { return Err(guar); }; - self.one_bound_for_assoc_type( + self.one_bound_for_assoc_item( || { traits::supertraits( tcx, @@ -1430,6 +1396,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { }, kw::SelfUpper, None, + ty::AssocKind::Type, assoc_ident, span, None, @@ -1510,15 +1477,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { }; let trait_did = bound.def_id(); - let Some(assoc_ty_did) = self.lookup_assoc_ty(assoc_ident, hir_ref_id, span, trait_did) - else { - // Assume that if it's not matched, there must be a const defined with the same name - // but it was used in a type position. - let msg = format!("found associated const `{assoc_ident}` when type was expected"); - let guar = tcx.sess.struct_span_err(span, msg).emit(); - return Err(guar); - }; - + let assoc_ty_did = self.lookup_assoc_ty(assoc_ident, hir_ref_id, span, trait_did).unwrap(); let ty = self.projected_ty_from_poly_trait_ref(span, assoc_ty_did, assoc_segment, bound); if let Some(variant_def_id) = variant_resolution { @@ -1731,8 +1690,9 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { let tcx = self.tcx(); let (ident, def_scope) = tcx.adjust_ident_and_get_scope(name, scope, block); - // We have already adjusted the item name above, so compare with `ident.normalize_to_macros_2_0()` instead - // of calling `find_by_name_and_kind`. + // We have already adjusted the item name above, so compare with `.normalize_to_macros_2_0()` + // instead of calling `filter_by_name_and_kind` which would needlessly normalize the + // `ident` again and again. let item = tcx.associated_items(scope).in_definition_order().find(|i| { i.kind.namespace() == Namespace::TypeNS && i.ident(tcx).normalize_to_macros_2_0() == ident @@ -2858,3 +2818,11 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { Some(r) } } + +fn assoc_kind_str(kind: ty::AssocKind) -> &'static str { + match kind { + ty::AssocKind::Fn => "function", + ty::AssocKind::Const => "constant", + ty::AssocKind::Type => "type", + } +} diff --git a/compiler/rustc_hir_analysis/src/check/intrinsic.rs b/compiler/rustc_hir_analysis/src/check/intrinsic.rs index 7ea21b24fc8..33337190562 100644 --- a/compiler/rustc_hir_analysis/src/check/intrinsic.rs +++ b/compiler/rustc_hir_analysis/src/check/intrinsic.rs @@ -521,6 +521,8 @@ pub fn check_platform_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) sym::simd_fpowi => (1, 0, vec![param(0), tcx.types.i32], param(0)), sym::simd_fma => (1, 0, vec![param(0), param(0), param(0)], param(0)), sym::simd_gather => (3, 0, vec![param(0), param(1), param(2)], param(0)), + sym::simd_masked_load => (3, 0, vec![param(0), param(1), param(2)], param(2)), + sym::simd_masked_store => (3, 0, vec![param(0), param(1), param(2)], Ty::new_unit(tcx)), sym::simd_scatter => (3, 0, vec![param(0), param(1), param(2)], Ty::new_unit(tcx)), sym::simd_insert => (2, 0, vec![param(0), tcx.types.u32, param(1)], param(0)), sym::simd_extract => (2, 0, vec![param(0), tcx.types.u32], param(1)), diff --git a/compiler/rustc_hir_analysis/src/errors.rs b/compiler/rustc_hir_analysis/src/errors.rs index dd83b5b6f2c..a4772293697 100644 --- a/compiler/rustc_hir_analysis/src/errors.rs +++ b/compiler/rustc_hir_analysis/src/errors.rs @@ -6,10 +6,122 @@ use rustc_errors::{ MultiSpan, }; use rustc_macros::{Diagnostic, LintDiagnostic, Subdiagnostic}; -use rustc_middle::ty::{self, print::TraitRefPrintOnlyTraitPath, Ty}; +use rustc_middle::ty::Ty; use rustc_span::{symbol::Ident, Span, Symbol}; #[derive(Diagnostic)] +#[diag(hir_analysis_ambiguous_assoc_item)] +pub struct AmbiguousAssocItem<'a> { + #[primary_span] + #[label] + pub span: Span, + pub assoc_kind: &'static str, + pub assoc_name: Ident, + pub ty_param_name: &'a str, +} + +#[derive(Diagnostic)] +#[diag(hir_analysis_assoc_kind_mismatch)] +pub struct AssocKindMismatch { + #[primary_span] + #[label] + pub span: Span, + pub expected: &'static str, + pub got: &'static str, + #[label(hir_analysis_expected_because_label)] + pub expected_because_label: Option<Span>, + pub assoc_kind: &'static str, + #[note] + pub def_span: Span, + #[label(hir_analysis_bound_on_assoc_const_label)] + pub bound_on_assoc_const_label: Option<Span>, + #[subdiagnostic] + pub wrap_in_braces_sugg: Option<AssocKindMismatchWrapInBracesSugg>, +} + +#[derive(Subdiagnostic)] +#[multipart_suggestion( + hir_analysis_assoc_kind_mismatch_wrap_in_braces_sugg, + applicability = "maybe-incorrect" +)] +pub struct AssocKindMismatchWrapInBracesSugg { + #[suggestion_part(code = "{{ ")] + pub lo: Span, + #[suggestion_part(code = " }}")] + pub hi: Span, +} + +#[derive(Diagnostic)] +#[diag(hir_analysis_assoc_item_not_found, code = "E0220")] +pub struct AssocItemNotFound<'a> { + #[primary_span] + pub span: Span, + pub assoc_name: Ident, + pub assoc_kind: &'static str, + pub ty_param_name: &'a str, + #[subdiagnostic] + pub label: Option<AssocItemNotFoundLabel<'a>>, + #[subdiagnostic] + pub sugg: Option<AssocItemNotFoundSugg<'a>>, +} + +#[derive(Subdiagnostic)] +pub enum AssocItemNotFoundLabel<'a> { + #[label(hir_analysis_assoc_item_not_found_label)] + NotFound { + #[primary_span] + span: Span, + }, + #[label(hir_analysis_assoc_item_not_found_found_in_other_trait_label)] + FoundInOtherTrait { + #[primary_span] + span: Span, + assoc_kind: &'static str, + trait_name: &'a str, + suggested_name: Symbol, + identically_named: bool, + }, +} + +#[derive(Subdiagnostic)] + +pub enum AssocItemNotFoundSugg<'a> { + #[suggestion( + hir_analysis_assoc_item_not_found_similar_sugg, + code = "{suggested_name}", + applicability = "maybe-incorrect" + )] + Similar { + #[primary_span] + span: Span, + assoc_kind: &'static str, + suggested_name: Symbol, + }, + #[suggestion( + hir_analysis_assoc_item_not_found_similar_in_other_trait_sugg, + code = "{suggested_name}", + style = "verbose", + applicability = "maybe-incorrect" + )] + SimilarInOtherTrait { + #[primary_span] + span: Span, + assoc_kind: &'static str, + suggested_name: Symbol, + }, + #[suggestion(hir_analysis_assoc_item_not_found_other_sugg, code = "{suggested_name}")] + Other { + #[primary_span] + span: Span, + #[applicability] + applicability: Applicability, + ty_param_name: &'a str, + assoc_kind: &'static str, + suggested_name: Symbol, + }, +} + +#[derive(Diagnostic)] #[diag(hir_analysis_unrecognized_atomic_operation, code = "E0092")] pub struct UnrecognizedAtomicOperation<'a> { #[primary_span] @@ -538,27 +650,6 @@ pub(crate) struct ReturnTypeNotationEqualityBound { } #[derive(Diagnostic)] -#[diag(hir_analysis_return_type_notation_missing_method)] -pub(crate) struct ReturnTypeNotationMissingMethod { - #[primary_span] - pub span: Span, - pub ty_name: String, - pub assoc_name: Symbol, -} - -#[derive(Diagnostic)] -#[diag(hir_analysis_return_type_notation_conflicting_bound)] -#[note] -pub(crate) struct ReturnTypeNotationConflictingBound<'tcx> { - #[primary_span] - pub span: Span, - pub ty_name: String, - pub assoc_name: Symbol, - pub first_bound: ty::Binder<'tcx, TraitRefPrintOnlyTraitPath<'tcx>>, - pub second_bound: ty::Binder<'tcx, TraitRefPrintOnlyTraitPath<'tcx>>, -} - -#[derive(Diagnostic)] #[diag(hir_analysis_placeholder_not_allowed_item_signatures, code = "E0121")] pub(crate) struct PlaceholderNotAllowedItemSignatures { #[primary_span] @@ -955,15 +1046,6 @@ pub(crate) struct ReturnPositionImplTraitInTraitRefined<'tcx> { } #[derive(Diagnostic)] -#[diag(hir_analysis_assoc_bound_on_const)] -#[note] -pub struct AssocBoundOnConst { - #[primary_span] - pub span: Span, - pub descr: &'static str, -} - -#[derive(Diagnostic)] #[diag(hir_analysis_inherent_ty_outside, code = "E0390")] #[help] pub struct InherentTyOutside { diff --git a/compiler/rustc_hir_typeck/src/check.rs b/compiler/rustc_hir_typeck/src/check.rs index 0cd1ae2dbfe..19b566ff9fa 100644 --- a/compiler/rustc_hir_typeck/src/check.rs +++ b/compiler/rustc_hir_typeck/src/check.rs @@ -67,6 +67,28 @@ pub(super) fn check_fn<'a, 'tcx>( fcx.require_type_is_sized(yield_ty, span, traits::SizedYieldType); yield_ty } + // HACK(-Ztrait-solver=next): In the *old* trait solver, we must eagerly + // guide inference on the yield type so that we can handle `AsyncIterator` + // in this block in projection correctly. In the new trait solver, it is + // not a problem. + hir::CoroutineKind::AsyncGen(..) => { + let yield_ty = fcx.next_ty_var(TypeVariableOrigin { + kind: TypeVariableOriginKind::TypeInference, + span, + }); + fcx.require_type_is_sized(yield_ty, span, traits::SizedYieldType); + + Ty::new_adt( + tcx, + tcx.adt_def(tcx.require_lang_item(hir::LangItem::Poll, Some(span))), + tcx.mk_args(&[Ty::new_adt( + tcx, + tcx.adt_def(tcx.require_lang_item(hir::LangItem::Option, Some(span))), + tcx.mk_args(&[yield_ty.into()]), + ) + .into()]), + ) + } hir::CoroutineKind::Async(..) => Ty::new_unit(tcx), }; diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs index e1a2a260df7..df840aaa578 100644 --- a/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs +++ b/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs @@ -657,19 +657,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { | ty::PredicateKind::Clause(ty::ClauseKind::TypeOutlives(..)) | ty::PredicateKind::Clause(ty::ClauseKind::WellFormed(..)) | ty::PredicateKind::ObjectSafe(..) + | ty::PredicateKind::NormalizesTo(..) | ty::PredicateKind::AliasRelate(..) | ty::PredicateKind::Clause(ty::ClauseKind::ConstEvaluatable(..)) | ty::PredicateKind::ConstEquate(..) - // N.B., this predicate is created by breaking down a - // `ClosureType: FnFoo()` predicate, where - // `ClosureType` represents some `Closure`. It can't - // possibly be referring to the current closure, - // because we haven't produced the `Closure` for - // this closure yet; this is exactly why the other - // code is looking for a self type of an unresolved - // inference variable. - | ty::PredicateKind::Ambiguous - => None, + | ty::PredicateKind::Ambiguous => None, }, ) } @@ -771,6 +763,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let args = self.fresh_args_for_item(span, def_id); let ty = item_ty.instantiate(self.tcx, args); + self.write_args(hir_id, args); self.write_resolution(hir_id, Ok((def_kind, def_id))); let code = match lang_item { diff --git a/compiler/rustc_hir_typeck/src/method/suggest.rs b/compiler/rustc_hir_typeck/src/method/suggest.rs index 143454c71e1..b38a6ebd501 100644 --- a/compiler/rustc_hir_typeck/src/method/suggest.rs +++ b/compiler/rustc_hir_typeck/src/method/suggest.rs @@ -1591,10 +1591,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { { let sig = self.tcx.fn_sig(assoc.def_id).instantiate_identity(); sig.inputs().skip_binder().get(0).and_then(|first| { - if first.peel_refs() == rcvr_ty.peel_refs() { - None - } else { + let impl_ty = self.tcx.type_of(*impl_did).instantiate_identity(); + // if the type of first arg is the same as the current impl type, we should take the first arg into assoc function + if first.peel_refs() == impl_ty { Some(first.ref_mutability().map_or("", |mutbl| mutbl.ref_prefix_str())) + } else { + None } }) } else { diff --git a/compiler/rustc_infer/src/infer/combine.rs b/compiler/rustc_infer/src/infer/combine.rs index 759ebaa1d1e..bab21bc237a 100644 --- a/compiler/rustc_infer/src/infer/combine.rs +++ b/compiler/rustc_infer/src/infer/combine.rs @@ -35,6 +35,7 @@ use rustc_middle::infer::unify_key::{ConstVariableOrigin, ConstVariableOriginKin use rustc_middle::ty::error::{ExpectedFound, TypeError}; use rustc_middle::ty::relate::{RelateResult, TypeRelation}; use rustc_middle::ty::{self, InferConst, ToPredicate, Ty, TyCtxt, TypeVisitableExt}; +use rustc_middle::ty::{AliasRelationDirection, TyVar}; use rustc_middle::ty::{IntType, UintType}; use rustc_span::DUMMY_SP; @@ -459,7 +460,12 @@ impl<'infcx, 'tcx> CombineFields<'infcx, 'tcx> { ambient_variance, )?; - self.infcx.inner.borrow_mut().type_variables().instantiate(b_vid, b_ty); + // Constrain `b_vid` to the generalized type `b_ty`. + if let &ty::Infer(TyVar(b_ty_vid)) = b_ty.kind() { + self.infcx.inner.borrow_mut().type_variables().equate(b_vid, b_ty_vid); + } else { + self.infcx.inner.borrow_mut().type_variables().instantiate(b_vid, b_ty); + } if needs_wf { self.obligations.push(Obligation::new( @@ -484,31 +490,46 @@ impl<'infcx, 'tcx> CombineFields<'infcx, 'tcx> { // cyclic type. We instead delay the unification in case // the alias can be normalized to something which does not // mention `?0`. - - // FIXME(-Ztrait-solver=next): replace this with `AliasRelate` - let &ty::Alias(kind, data) = a_ty.kind() else { - bug!("generalization should only result in infer vars for aliases"); - }; - if !self.infcx.next_trait_solver() { - // The old solver only accepts projection predicates for associated types. - match kind { - ty::AliasKind::Projection => {} - ty::AliasKind::Inherent | ty::AliasKind::Weak | ty::AliasKind::Opaque => { - return Err(TypeError::CyclicTy(a_ty)); + if self.infcx.next_trait_solver() { + let (lhs, rhs, direction) = match ambient_variance { + ty::Variance::Invariant => { + (a_ty.into(), b_ty.into(), AliasRelationDirection::Equate) + } + ty::Variance::Covariant => { + (a_ty.into(), b_ty.into(), AliasRelationDirection::Subtype) + } + ty::Variance::Contravariant => { + (b_ty.into(), a_ty.into(), AliasRelationDirection::Subtype) + } + ty::Variance::Bivariant => unreachable!("bivariant generalization"), + }; + self.obligations.push(Obligation::new( + self.tcx(), + self.trace.cause.clone(), + self.param_env, + ty::PredicateKind::AliasRelate(lhs, rhs, direction), + )); + } else { + match a_ty.kind() { + &ty::Alias(ty::AliasKind::Projection, data) => { + // FIXME: This does not handle subtyping correctly, we could + // instead create a new inference variable for `a_ty`, emitting + // `Projection(a_ty, a_infer)` and `a_infer <: b_ty`. + self.obligations.push(Obligation::new( + self.tcx(), + self.trace.cause.clone(), + self.param_env, + ty::ProjectionPredicate { projection_ty: data, term: b_ty.into() }, + )) } + // The old solver only accepts projection predicates for associated types. + ty::Alias( + ty::AliasKind::Inherent | ty::AliasKind::Weak | ty::AliasKind::Opaque, + _, + ) => return Err(TypeError::CyclicTy(a_ty)), + _ => bug!("generalizated `{a_ty:?} to infer, not an alias"), } } - - // FIXME: This does not handle subtyping correctly, we should switch to - // alias-relate in the new solver and could instead create a new inference - // variable for `a_ty`, emitting `Projection(a_ty, a_infer)` and - // `a_infer <: b_ty`. - self.obligations.push(Obligation::new( - self.tcx(), - self.trace.cause.clone(), - self.param_env, - ty::ProjectionPredicate { projection_ty: data, term: b_ty.into() }, - )) } else { match ambient_variance { ty::Variance::Invariant => self.equate(a_is_expected).relate(a_ty, b_ty), @@ -519,9 +540,7 @@ impl<'infcx, 'tcx> CombineFields<'infcx, 'tcx> { a_ty, b_ty, ), - ty::Variance::Bivariant => { - unreachable!("no code should be generalizing bivariantly (currently)") - } + ty::Variance::Bivariant => unreachable!("bivariant generalization"), }?; } diff --git a/compiler/rustc_infer/src/infer/error_reporting/mod.rs b/compiler/rustc_infer/src/infer/error_reporting/mod.rs index ecaf9f4e169..745c3d195ad 100644 --- a/compiler/rustc_infer/src/infer/error_reporting/mod.rs +++ b/compiler/rustc_infer/src/infer/error_reporting/mod.rs @@ -1181,37 +1181,54 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> { debug!("cmp(t1={}, t1.kind={:?}, t2={}, t2.kind={:?})", t1, t1.kind(), t2, t2.kind()); // helper functions - fn equals<'tcx>(a: Ty<'tcx>, b: Ty<'tcx>) -> bool { - match (a.kind(), b.kind()) { - (a, b) if *a == *b => true, - (&ty::Int(_), &ty::Infer(ty::InferTy::IntVar(_))) - | ( - &ty::Infer(ty::InferTy::IntVar(_)), - &ty::Int(_) | &ty::Infer(ty::InferTy::IntVar(_)), - ) - | (&ty::Float(_), &ty::Infer(ty::InferTy::FloatVar(_))) - | ( - &ty::Infer(ty::InferTy::FloatVar(_)), - &ty::Float(_) | &ty::Infer(ty::InferTy::FloatVar(_)), - ) => true, - _ => false, + let recurse = |t1, t2, values: &mut (DiagnosticStyledString, DiagnosticStyledString)| { + let (x1, x2) = self.cmp(t1, t2); + (values.0).0.extend(x1.0); + (values.1).0.extend(x2.0); + }; + + fn fmt_region<'tcx>(region: ty::Region<'tcx>) -> String { + let mut r = region.to_string(); + if r == "'_" { + r.clear(); + } else { + r.push(' '); } + format!("&{r}") } - fn push_ty_ref<'tcx>( + fn push_ref<'tcx>( region: ty::Region<'tcx>, - ty: Ty<'tcx>, mutbl: hir::Mutability, s: &mut DiagnosticStyledString, ) { - let mut r = region.to_string(); - if r == "'_" { - r.clear(); + s.push_highlighted(fmt_region(region)); + s.push_highlighted(mutbl.prefix_str()); + } + + fn cmp_ty_refs<'tcx>( + r1: ty::Region<'tcx>, + mut1: hir::Mutability, + r2: ty::Region<'tcx>, + mut2: hir::Mutability, + ss: &mut (DiagnosticStyledString, DiagnosticStyledString), + ) { + let (r1, r2) = (fmt_region(r1), fmt_region(r2)); + if r1 != r2 { + ss.0.push_highlighted(r1); + ss.1.push_highlighted(r2); } else { - r.push(' '); + ss.0.push_normal(r1); + ss.1.push_normal(r2); + } + + if mut1 != mut2 { + ss.0.push_highlighted(mut1.prefix_str()); + ss.1.push_highlighted(mut2.prefix_str()); + } else { + ss.0.push_normal(mut1.prefix_str()); + ss.1.push_normal(mut2.prefix_str()); } - s.push_highlighted(format!("&{}{}", r, mutbl.prefix_str())); - s.push_normal(ty.to_string()); } // process starts here @@ -1310,9 +1327,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> { values.0.push_normal("_"); values.1.push_normal("_"); } else { - let (x1, x2) = self.cmp(ta1, ta2); - (values.0).0.extend(x1.0); - (values.1).0.extend(x2.0); + recurse(ta1, ta2, &mut values); } self.push_comma(&mut values.0, &mut values.1, len, i); } @@ -1418,27 +1433,24 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> { } } - // When finding T != &T, highlight only the borrow - (&ty::Ref(r1, ref_ty1, mutbl1), _) if equals(ref_ty1, t2) => { + // When finding `&T != &T`, compare the references, then recurse into pointee type + (&ty::Ref(r1, ref_ty1, mutbl1), &ty::Ref(r2, ref_ty2, mutbl2)) => { let mut values = (DiagnosticStyledString::new(), DiagnosticStyledString::new()); - push_ty_ref(r1, ref_ty1, mutbl1, &mut values.0); - values.1.push_normal(t2.to_string()); + cmp_ty_refs(r1, mutbl1, r2, mutbl2, &mut values); + recurse(ref_ty1, ref_ty2, &mut values); values } - (_, &ty::Ref(r2, ref_ty2, mutbl2)) if equals(t1, ref_ty2) => { + // When finding T != &T, highlight the borrow + (&ty::Ref(r1, ref_ty1, mutbl1), _) => { let mut values = (DiagnosticStyledString::new(), DiagnosticStyledString::new()); - values.0.push_normal(t1.to_string()); - push_ty_ref(r2, ref_ty2, mutbl2, &mut values.1); + push_ref(r1, mutbl1, &mut values.0); + recurse(ref_ty1, t2, &mut values); values } - - // When encountering &T != &mut T, highlight only the borrow - (&ty::Ref(r1, ref_ty1, mutbl1), &ty::Ref(r2, ref_ty2, mutbl2)) - if equals(ref_ty1, ref_ty2) => - { + (_, &ty::Ref(r2, ref_ty2, mutbl2)) => { let mut values = (DiagnosticStyledString::new(), DiagnosticStyledString::new()); - push_ty_ref(r1, ref_ty1, mutbl1, &mut values.0); - push_ty_ref(r2, ref_ty2, mutbl2, &mut values.1); + push_ref(r2, mutbl2, &mut values.1); + recurse(t1, ref_ty2, &mut values); values } @@ -1448,9 +1460,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> { (DiagnosticStyledString::normal("("), DiagnosticStyledString::normal("(")); let len = args1.len(); for (i, (left, right)) in args1.iter().zip(args2).enumerate() { - let (x1, x2) = self.cmp(left, right); - (values.0).0.extend(x1.0); - (values.1).0.extend(x2.0); + recurse(left, right, &mut values); self.push_comma(&mut values.0, &mut values.1, len, i); } if len == 1 { diff --git a/compiler/rustc_infer/src/infer/mod.rs b/compiler/rustc_infer/src/infer/mod.rs index 32c09e491c7..3a71251e73d 100644 --- a/compiler/rustc_infer/src/infer/mod.rs +++ b/compiler/rustc_infer/src/infer/mod.rs @@ -345,37 +345,61 @@ pub struct InferCtxt<'tcx> { impl<'tcx> ty::InferCtxtLike for InferCtxt<'tcx> { type Interner = TyCtxt<'tcx>; - fn universe_of_ty(&self, ty: ty::InferTy) -> Option<ty::UniverseIndex> { - use InferTy::*; - match ty { - // FIXME(BoxyUwU): this is kind of jank and means that printing unresolved - // ty infers will give you the universe of the var it resolved to not the universe - // it actually had. It also means that if you have a `?0.1` and infer it to `u8` then - // try to print out `?0.1` it will just print `?0`. - TyVar(ty_vid) => match self.probe_ty_var(ty_vid) { - Err(universe) => Some(universe), - Ok(_) => None, - }, - IntVar(_) | FloatVar(_) | FreshTy(_) | FreshIntTy(_) | FreshFloatTy(_) => None, + fn interner(&self) -> TyCtxt<'tcx> { + self.tcx + } + + fn universe_of_ty(&self, vid: TyVid) -> Option<ty::UniverseIndex> { + // FIXME(BoxyUwU): this is kind of jank and means that printing unresolved + // ty infers will give you the universe of the var it resolved to not the universe + // it actually had. It also means that if you have a `?0.1` and infer it to `u8` then + // try to print out `?0.1` it will just print `?0`. + match self.probe_ty_var(vid) { + Err(universe) => Some(universe), + Ok(_) => None, } } - fn universe_of_ct(&self, ct: ty::InferConst) -> Option<ty::UniverseIndex> { - use ty::InferConst::*; - match ct { - // Same issue as with `universe_of_ty` - Var(ct_vid) => match self.probe_const_var(ct_vid) { - Err(universe) => Some(universe), - Ok(_) => None, - }, - EffectVar(_) => None, - Fresh(_) => None, + fn universe_of_ct(&self, ct: ConstVid) -> Option<ty::UniverseIndex> { + // Same issue as with `universe_of_ty` + match self.probe_const_var(ct) { + Err(universe) => Some(universe), + Ok(_) => None, } } fn universe_of_lt(&self, lt: ty::RegionVid) -> Option<ty::UniverseIndex> { Some(self.universe_of_region_vid(lt)) } + + fn root_ty_var(&self, vid: TyVid) -> TyVid { + self.root_var(vid) + } + + fn probe_ty_var(&self, vid: TyVid) -> Option<Ty<'tcx>> { + self.probe_ty_var(vid).ok() + } + + fn root_lt_var(&self, vid: ty::RegionVid) -> ty::RegionVid { + self.root_region_var(vid) + } + + fn probe_lt_var(&self, vid: ty::RegionVid) -> Option<ty::Region<'tcx>> { + let re = self + .inner + .borrow_mut() + .unwrap_region_constraints() + .opportunistic_resolve_var(self.tcx, vid); + if re.is_var() { None } else { Some(re) } + } + + fn root_ct_var(&self, vid: ConstVid) -> ConstVid { + self.root_const_var(vid) + } + + fn probe_ct_var(&self, vid: ConstVid) -> Option<ty::Const<'tcx>> { + self.probe_const_var(vid).ok() + } } /// See the `error_reporting` module for more details. @@ -1347,6 +1371,10 @@ impl<'tcx> InferCtxt<'tcx> { self.inner.borrow_mut().type_variables().root_var(var) } + pub fn root_region_var(&self, var: ty::RegionVid) -> ty::RegionVid { + self.inner.borrow_mut().unwrap_region_constraints().root_var(var) + } + pub fn root_const_var(&self, var: ty::ConstVid) -> ty::ConstVid { self.inner.borrow_mut().const_unification_table().find(var).vid } diff --git a/compiler/rustc_infer/src/infer/region_constraints/mod.rs b/compiler/rustc_infer/src/infer/region_constraints/mod.rs index cbd8040c9f1..5c043b1d3dd 100644 --- a/compiler/rustc_infer/src/infer/region_constraints/mod.rs +++ b/compiler/rustc_infer/src/infer/region_constraints/mod.rs @@ -623,6 +623,11 @@ impl<'tcx> RegionConstraintCollector<'_, 'tcx> { } } + pub fn root_var(&mut self, vid: ty::RegionVid) -> ty::RegionVid { + let mut ut = self.unification_table_mut(); // FIXME(rust-lang/ena#42): unnecessary mut + ut.find(vid).vid + } + fn combine_map(&mut self, t: CombineMapType) -> &mut CombineMap<'tcx> { match t { Glb => &mut self.glbs, diff --git a/compiler/rustc_infer/src/infer/type_variable.rs b/compiler/rustc_infer/src/infer/type_variable.rs index bc83f8d3f96..bd6f905c824 100644 --- a/compiler/rustc_infer/src/infer/type_variable.rs +++ b/compiler/rustc_infer/src/infer/type_variable.rs @@ -229,12 +229,11 @@ impl<'tcx> TypeVariableTable<'_, 'tcx> { /// Precondition: `vid` must not have been previously instantiated. pub fn instantiate(&mut self, vid: ty::TyVid, ty: Ty<'tcx>) { let vid = self.root_var(vid); + debug_assert!(!ty.is_ty_var(), "instantiating ty var with var: {vid:?} {ty:?}"); debug_assert!(self.probe(vid).is_unknown()); debug_assert!( self.eq_relations().probe_value(vid).is_unknown(), - "instantiating type variable `{:?}` twice: new-value = {:?}, old-value={:?}", - vid, - ty, + "instantiating type variable `{vid:?}` twice: new-value = {ty:?}, old-value={:?}", self.eq_relations().probe_value(vid) ); self.eq_relations().union_value(vid, TypeVariableValue::Known { value: ty }); diff --git a/compiler/rustc_infer/src/traits/util.rs b/compiler/rustc_infer/src/traits/util.rs index 1bcae736fd9..50190058a76 100644 --- a/compiler/rustc_infer/src/traits/util.rs +++ b/compiler/rustc_infer/src/traits/util.rs @@ -261,9 +261,14 @@ impl<'tcx, O: Elaboratable<'tcx>> Elaborator<'tcx, O> { fn elaborate(&mut self, elaboratable: &O) { let tcx = self.visited.tcx; - let bound_predicate = elaboratable.predicate().kind(); - match bound_predicate.skip_binder() { - ty::PredicateKind::Clause(ty::ClauseKind::Trait(data)) => { + // We only elaborate clauses. + let Some(clause) = elaboratable.predicate().as_clause() else { + return; + }; + + let bound_clause = clause.kind(); + match bound_clause.skip_binder() { + ty::ClauseKind::Trait(data) => { // Negative trait bounds do not imply any supertrait bounds if data.polarity == ty::ImplPolarity::Negative { return; @@ -280,49 +285,16 @@ impl<'tcx, O: Elaboratable<'tcx>> Elaborator<'tcx, O> { let obligations = predicates.predicates.iter().enumerate().map(|(index, &(clause, span))| { elaboratable.child_with_derived_cause( - clause.subst_supertrait(tcx, &bound_predicate.rebind(data.trait_ref)), + clause.subst_supertrait(tcx, &bound_clause.rebind(data.trait_ref)), span, - bound_predicate.rebind(data), + bound_clause.rebind(data), index, ) }); debug!(?data, ?obligations, "super_predicates"); self.extend_deduped(obligations); } - ty::PredicateKind::Clause(ty::ClauseKind::WellFormed(..)) => { - // Currently, we do not elaborate WF predicates, - // although we easily could. - } - ty::PredicateKind::ObjectSafe(..) => { - // Currently, we do not elaborate object-safe - // predicates. - } - ty::PredicateKind::Subtype(..) => { - // Currently, we do not "elaborate" predicates like `X <: Y`, - // though conceivably we might. - } - ty::PredicateKind::Coerce(..) => { - // Currently, we do not "elaborate" predicates like `X -> Y`, - // though conceivably we might. - } - ty::PredicateKind::Clause(ty::ClauseKind::Projection(..)) => { - // Nothing to elaborate in a projection predicate. - } - ty::PredicateKind::Clause(ty::ClauseKind::ConstEvaluatable(..)) => { - // Currently, we do not elaborate const-evaluatable - // predicates. - } - ty::PredicateKind::ConstEquate(..) => { - // Currently, we do not elaborate const-equate - // predicates. - } - ty::PredicateKind::Clause(ty::ClauseKind::RegionOutlives(..)) => { - // Nothing to elaborate from `'a: 'b`. - } - ty::PredicateKind::Clause(ty::ClauseKind::TypeOutlives(ty::OutlivesPredicate( - ty_max, - r_min, - ))) => { + ty::ClauseKind::TypeOutlives(ty::OutlivesPredicate(ty_max, r_min)) => { // We know that `T: 'a` for some type `T`. We can // often elaborate this. For example, if we know that // `[U]: 'a`, that implies that `U: 'a`. Similarly, if @@ -385,15 +357,25 @@ impl<'tcx, O: Elaboratable<'tcx>> Elaborator<'tcx, O> { } }) .map(|clause| { - elaboratable.child(bound_predicate.rebind(clause).to_predicate(tcx)) + elaboratable.child(bound_clause.rebind(clause).to_predicate(tcx)) }), ); } - ty::PredicateKind::Ambiguous => {} - ty::PredicateKind::AliasRelate(..) => { - // No + ty::ClauseKind::RegionOutlives(..) => { + // Nothing to elaborate from `'a: 'b`. + } + ty::ClauseKind::WellFormed(..) => { + // Currently, we do not elaborate WF predicates, + // although we easily could. + } + ty::ClauseKind::Projection(..) => { + // Nothing to elaborate in a projection predicate. + } + ty::ClauseKind::ConstEvaluatable(..) => { + // Currently, we do not elaborate const-evaluatable + // predicates. } - ty::PredicateKind::Clause(ty::ClauseKind::ConstArgHasType(..)) => { + ty::ClauseKind::ConstArgHasType(..) => { // Nothing to elaborate } } diff --git a/compiler/rustc_interface/src/interface.rs b/compiler/rustc_interface/src/interface.rs index 7831b251db4..6527e87d396 100644 --- a/compiler/rustc_interface/src/interface.rs +++ b/compiler/rustc_interface/src/interface.rs @@ -316,6 +316,10 @@ pub fn run_compiler<R: Send>(config: Config, f: impl FnOnce(&Compiler) -> R + Se // Set parallel mode before thread pool creation, which will create `Lock`s. rustc_data_structures::sync::set_dyn_thread_safe_mode(config.opts.unstable_opts.threads > 1); + // Check jobserver before run_in_thread_pool_with_globals, which call jobserver::acquire_thread + let early_handler = EarlyErrorHandler::new(config.opts.error_format); + early_handler.initialize_checked_jobserver(); + util::run_in_thread_pool_with_globals( config.opts.edition, config.opts.unstable_opts.threads, diff --git a/compiler/rustc_interface/src/tests.rs b/compiler/rustc_interface/src/tests.rs index f7b6ab331a5..ce58b2ab061 100644 --- a/compiler/rustc_interface/src/tests.rs +++ b/compiler/rustc_interface/src/tests.rs @@ -27,6 +27,8 @@ use std::sync::Arc; fn mk_session(matches: getopts::Matches) -> (Session, Cfg) { let mut early_handler = EarlyErrorHandler::new(ErrorOutputType::default()); + early_handler.initialize_checked_jobserver(); + let registry = registry::Registry::new(&[]); let sessopts = build_session_options(&mut early_handler, &matches); let temps_dir = sessopts.unstable_opts.temps_dir.as_deref().map(PathBuf::from); diff --git a/compiler/rustc_lexer/src/unescape.rs b/compiler/rustc_lexer/src/unescape.rs index 717b042fbda..249126a269e 100644 --- a/compiler/rustc_lexer/src/unescape.rs +++ b/compiler/rustc_lexer/src/unescape.rs @@ -4,10 +4,14 @@ use std::ops::Range; use std::str::Chars; +use Mode::*; + #[cfg(test)] mod tests; -/// Errors and warnings that can occur during string unescaping. +/// Errors and warnings that can occur during string unescaping. They mostly +/// relate to malformed escape sequences, but there are a few that are about +/// other problems. #[derive(Debug, PartialEq, Eq)] pub enum EscapeError { /// Expected 1 char, but 0 were found. @@ -73,25 +77,24 @@ impl EscapeError { } } -/// Takes a contents of a literal (without quotes) and produces a -/// sequence of escaped characters or errors. -/// Values are returned through invoking of the provided callback. +/// Takes a contents of a literal (without quotes) and produces a sequence of +/// escaped characters or errors. +/// +/// Values are returned by invoking `callback`. For `Char` and `Byte` modes, +/// the callback will be called exactly once. pub fn unescape_literal<F>(src: &str, mode: Mode, callback: &mut F) where F: FnMut(Range<usize>, Result<char, EscapeError>), { match mode { - Mode::Char | Mode::Byte => { + Char | Byte => { let mut chars = src.chars(); - let res = unescape_char_or_byte(&mut chars, mode == Mode::Byte); + let res = unescape_char_or_byte(&mut chars, mode); callback(0..(src.len() - chars.as_str().len()), res); } - Mode::Str | Mode::ByteStr => unescape_str_common(src, mode, callback), - - Mode::RawStr | Mode::RawByteStr => { - unescape_raw_str_or_raw_byte_str(src, mode == Mode::RawByteStr, callback) - } - Mode::CStr | Mode::RawCStr => unreachable!(), + Str | ByteStr => unescape_str_common(src, mode, callback), + RawStr | RawByteStr => unescape_raw_str_or_raw_byte_str(src, mode, callback), + CStr | RawCStr => unreachable!(), } } @@ -117,38 +120,44 @@ pub fn unescape_c_string<F>(src: &str, mode: Mode, callback: &mut F) where F: FnMut(Range<usize>, Result<CStrUnit, EscapeError>), { - if mode == Mode::RawCStr { - unescape_raw_str_or_raw_byte_str( - src, - mode.characters_should_be_ascii(), - &mut |r, result| callback(r, result.map(CStrUnit::Char)), - ); - } else { - unescape_str_common(src, mode, callback); + match mode { + CStr => { + unescape_str_common(src, mode, callback); + } + RawCStr => { + unescape_raw_str_or_raw_byte_str(src, mode, &mut |r, result| { + callback(r, result.map(CStrUnit::Char)) + }); + } + Char | Byte | Str | RawStr | ByteStr | RawByteStr => unreachable!(), } } /// Takes a contents of a char literal (without quotes), and returns an /// unescaped char or an error. pub fn unescape_char(src: &str) -> Result<char, EscapeError> { - unescape_char_or_byte(&mut src.chars(), false) + unescape_char_or_byte(&mut src.chars(), Char) } /// Takes a contents of a byte literal (without quotes), and returns an /// unescaped byte or an error. pub fn unescape_byte(src: &str) -> Result<u8, EscapeError> { - unescape_char_or_byte(&mut src.chars(), true).map(byte_from_char) + unescape_char_or_byte(&mut src.chars(), Byte).map(byte_from_char) } /// What kind of literal do we parse. #[derive(Debug, Clone, Copy, PartialEq)] pub enum Mode { Char, - Str, + Byte, - ByteStr, + + Str, RawStr, + + ByteStr, RawByteStr, + CStr, RawCStr, } @@ -156,45 +165,42 @@ pub enum Mode { impl Mode { pub fn in_double_quotes(self) -> bool { match self { - Mode::Str - | Mode::ByteStr - | Mode::RawStr - | Mode::RawByteStr - | Mode::CStr - | Mode::RawCStr => true, - Mode::Char | Mode::Byte => false, + Str | RawStr | ByteStr | RawByteStr | CStr | RawCStr => true, + Char | Byte => false, } } /// Non-byte literals should have `\xXX` escapes that are within the ASCII range. - pub fn ascii_escapes_should_be_ascii(self) -> bool { + fn ascii_escapes_should_be_ascii(self) -> bool { match self { - Mode::Char | Mode::Str | Mode::RawStr => true, - Mode::Byte | Mode::ByteStr | Mode::RawByteStr | Mode::CStr | Mode::RawCStr => false, + Char | Str => true, + Byte | ByteStr | CStr => false, + RawStr | RawByteStr | RawCStr => unreachable!(), } } - /// Whether characters within the literal must be within the ASCII range - pub fn characters_should_be_ascii(self) -> bool { + /// Whether characters within the literal must be within the ASCII range. + #[inline] + fn chars_should_be_ascii(self) -> bool { match self { - Mode::Byte | Mode::ByteStr | Mode::RawByteStr => true, - Mode::Char | Mode::Str | Mode::RawStr | Mode::CStr | Mode::RawCStr => false, + Byte | ByteStr | RawByteStr => true, + Char | Str | RawStr | CStr | RawCStr => false, } } /// Byte literals do not allow unicode escape. - pub fn is_unicode_escape_disallowed(self) -> bool { + fn is_unicode_escape_disallowed(self) -> bool { match self { - Mode::Byte | Mode::ByteStr | Mode::RawByteStr => true, - Mode::Char | Mode::Str | Mode::RawStr | Mode::CStr | Mode::RawCStr => false, + Byte | ByteStr | RawByteStr => true, + Char | Str | RawStr | CStr | RawCStr => false, } } pub fn prefix_noraw(self) -> &'static str { match self { - Mode::Byte | Mode::ByteStr | Mode::RawByteStr => "b", - Mode::CStr | Mode::RawCStr => "c", - Mode::Char | Mode::Str | Mode::RawStr => "", + Char | Str | RawStr => "", + Byte | ByteStr | RawByteStr => "b", + CStr | RawCStr => "c", } } } @@ -294,22 +300,21 @@ fn scan_unicode( } #[inline] -fn ascii_check(c: char, characters_should_be_ascii: bool) -> Result<char, EscapeError> { - if characters_should_be_ascii && !c.is_ascii() { - // Byte literal can't be a non-ascii character. +fn ascii_check(c: char, chars_should_be_ascii: bool) -> Result<char, EscapeError> { + if chars_should_be_ascii && !c.is_ascii() { Err(EscapeError::NonAsciiCharInByte) } else { Ok(c) } } -fn unescape_char_or_byte(chars: &mut Chars<'_>, is_byte: bool) -> Result<char, EscapeError> { +fn unescape_char_or_byte(chars: &mut Chars<'_>, mode: Mode) -> Result<char, EscapeError> { let c = chars.next().ok_or(EscapeError::ZeroChars)?; let res = match c { - '\\' => scan_escape(chars, if is_byte { Mode::Byte } else { Mode::Char }), + '\\' => scan_escape(chars, mode), '\n' | '\t' | '\'' => Err(EscapeError::EscapeOnlyChar), '\r' => Err(EscapeError::BareCarriageReturn), - _ => ascii_check(c, is_byte), + _ => ascii_check(c, mode.chars_should_be_ascii()), }?; if chars.next().is_some() { return Err(EscapeError::MoreThanOneChar); @@ -324,6 +329,7 @@ where F: FnMut(Range<usize>, Result<T, EscapeError>), { let mut chars = src.chars(); + let chars_should_be_ascii = mode.chars_should_be_ascii(); // get this outside the loop // The `start` and `end` computation here is complicated because // `skip_ascii_whitespace` makes us to skip over chars without counting @@ -346,14 +352,12 @@ where _ => scan_escape::<T>(&mut chars, mode), } } - '\n' => Ok(b'\n'.into()), - '\t' => Ok(b'\t'.into()), '"' => Err(EscapeError::EscapeOnlyChar), '\r' => Err(EscapeError::BareCarriageReturn), - _ => ascii_check(c, mode.characters_should_be_ascii()).map(Into::into), + _ => ascii_check(c, chars_should_be_ascii).map(Into::into), }; let end = src.len() - chars.as_str().len(); - callback(start..end, res.map(Into::into)); + callback(start..end, res); } } @@ -387,20 +391,21 @@ where /// sequence of characters or errors. /// NOTE: Raw strings do not perform any explicit character escaping, here we /// only produce errors on bare CR. -fn unescape_raw_str_or_raw_byte_str<F>(src: &str, is_byte: bool, callback: &mut F) +fn unescape_raw_str_or_raw_byte_str<F>(src: &str, mode: Mode, callback: &mut F) where F: FnMut(Range<usize>, Result<char, EscapeError>), { let mut chars = src.chars(); + let chars_should_be_ascii = mode.chars_should_be_ascii(); // get this outside the loop // The `start` and `end` computation here matches the one in - // `unescape_str_or_byte_str` for consistency, even though this function + // `unescape_str_common` for consistency, even though this function // doesn't have to worry about skipping any chars. while let Some(c) = chars.next() { let start = src.len() - chars.as_str().len() - c.len_utf8(); let res = match c { '\r' => Err(EscapeError::BareCarriageReturnInRawString), - _ => ascii_check(c, is_byte), + _ => ascii_check(c, chars_should_be_ascii), }; let end = src.len() - chars.as_str().len(); callback(start..end, res); @@ -410,7 +415,7 @@ where #[inline] pub fn byte_from_char(c: char) -> u8 { let res = c as u32; - debug_assert!(res <= u8::MAX as u32, "guaranteed because of Mode::ByteStr"); + debug_assert!(res <= u8::MAX as u32, "guaranteed because of ByteStr"); res as u8 } diff --git a/compiler/rustc_lint/src/builtin.rs b/compiler/rustc_lint/src/builtin.rs index 8932200c5b7..64de5e92abf 100644 --- a/compiler/rustc_lint/src/builtin.rs +++ b/compiler/rustc_lint/src/builtin.rs @@ -1000,8 +1000,10 @@ impl EarlyLintPass for UnusedDocComment { } fn check_arm(&mut self, cx: &EarlyContext<'_>, arm: &ast::Arm) { - let arm_span = arm.pat.span.with_hi(arm.body.span.hi()); - warn_if_doc(cx, arm_span, "match arms", &arm.attrs); + if let Some(body) = &arm.body { + let arm_span = arm.pat.span.with_hi(body.span.hi()); + warn_if_doc(cx, arm_span, "match arms", &arm.attrs); + } } fn check_pat(&mut self, cx: &EarlyContext<'_>, pat: &ast::Pat) { diff --git a/compiler/rustc_lint/src/early.rs b/compiler/rustc_lint/src/early.rs index 7c4f81a4c39..4c7f9eeff8c 100644 --- a/compiler/rustc_lint/src/early.rs +++ b/compiler/rustc_lint/src/early.rs @@ -162,12 +162,8 @@ impl<'a, T: EarlyLintPass> ast_visit::Visitor<'a> for EarlyContextAndPass<'a, T> // Explicitly check for lints associated with 'closure_id', since // it does not have a corresponding AST node if let ast_visit::FnKind::Fn(_, _, sig, _, _, _) = fk { - if let Some( - ast::CoroutineKind::Async { closure_id, .. } - | ast::CoroutineKind::Gen { closure_id, .. }, - ) = sig.header.coro_kind - { - self.check_id(closure_id); + if let Some(coroutine_kind) = sig.header.coroutine_kind { + self.check_id(coroutine_kind.closure_id()); } } } @@ -227,13 +223,11 @@ impl<'a, T: EarlyLintPass> ast_visit::Visitor<'a> for EarlyContextAndPass<'a, T> // it does not have a corresponding AST node match e.kind { ast::ExprKind::Closure(box ast::Closure { - coro_kind: - Some( - ast::CoroutineKind::Async { closure_id, .. } - | ast::CoroutineKind::Gen { closure_id, .. }, - ), + coroutine_kind: Some(coroutine_kind), .. - }) => self.check_id(closure_id), + }) => { + self.check_id(coroutine_kind.closure_id()); + } _ => {} } lint_callback!(self, check_expr_post, e); diff --git a/compiler/rustc_lint/src/unused.rs b/compiler/rustc_lint/src/unused.rs index c492e7c6fbf..34cdee4ec5c 100644 --- a/compiler/rustc_lint/src/unused.rs +++ b/compiler/rustc_lint/src/unused.rs @@ -1113,15 +1113,17 @@ impl EarlyLintPass for UnusedParens { } ExprKind::Match(ref _expr, ref arm) => { for a in arm { - self.check_unused_delims_expr( - cx, - &a.body, - UnusedDelimsCtx::MatchArmExpr, - false, - None, - None, - true, - ); + if let Some(body) = &a.body { + self.check_unused_delims_expr( + cx, + body, + UnusedDelimsCtx::MatchArmExpr, + false, + None, + None, + true, + ); + } } } _ => {} diff --git a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp index 0edcac93b62..55e1c84c8a2 100644 --- a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp +++ b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp @@ -410,7 +410,7 @@ extern "C" LLVMTargetMachineRef LLVMRustCreateTargetMachine( const char *SplitDwarfFile, const char *OutputObjFile, const char *DebugInfoCompression, - bool ForceEmulatedTls, + bool UseEmulatedTls, const char *ArgsCstrBuff, size_t ArgsCstrBuffLen) { auto OptLevel = fromRust(RustOptLevel); @@ -456,13 +456,9 @@ extern "C" LLVMTargetMachineRef LLVMRustCreateTargetMachine( Options.UseInitArray = UseInitArray; #if LLVM_VERSION_LT(17, 0) - if (ForceEmulatedTls) { - Options.ExplicitEmulatedTLS = true; - Options.EmulatedTLS = true; - } -#else - Options.EmulatedTLS = ForceEmulatedTls || Trip.hasDefaultEmulatedTLS(); + Options.ExplicitEmulatedTLS = true; #endif + Options.EmulatedTLS = UseEmulatedTls; if (TrapUnreachable) { // Tell LLVM to codegen `unreachable` into an explicit trap instruction. diff --git a/compiler/rustc_middle/src/middle/region.rs b/compiler/rustc_middle/src/middle/region.rs index b8f04ed03dc..30f13afff9a 100644 --- a/compiler/rustc_middle/src/middle/region.rs +++ b/compiler/rustc_middle/src/middle/region.rs @@ -349,10 +349,6 @@ impl ScopeTree { } } - pub fn opt_destruction_scope(&self, n: hir::ItemLocalId) -> Option<Scope> { - self.destruction_scopes.get(&n).cloned() - } - pub fn record_var_scope(&mut self, var: hir::ItemLocalId, lifetime: Scope) { debug!("record_var_scope(sub={:?}, sup={:?})", var, lifetime); assert!(var != lifetime.item_local_id()); diff --git a/compiler/rustc_middle/src/mir/coverage.rs b/compiler/rustc_middle/src/mir/coverage.rs index f15ee0082ce..ec5edceb269 100644 --- a/compiler/rustc_middle/src/mir/coverage.rs +++ b/compiler/rustc_middle/src/mir/coverage.rs @@ -76,6 +76,13 @@ impl Debug for CovTerm { #[derive(Clone, PartialEq, TyEncodable, TyDecodable, Hash, HashStable, TypeFoldable, TypeVisitable)] pub enum CoverageKind { + /// Marks a span that might otherwise not be represented in MIR, so that + /// coverage instrumentation can associate it with its enclosing block/BCB. + /// + /// Only used by the `InstrumentCoverage` pass, and has no effect during + /// codegen. + SpanMarker, + /// Marks the point in MIR control flow represented by a coverage counter. /// /// This is eventually lowered to `llvm.instrprof.increment` in LLVM IR. @@ -99,6 +106,7 @@ impl Debug for CoverageKind { fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result { use CoverageKind::*; match self { + SpanMarker => write!(fmt, "SpanMarker"), CounterIncrement { id } => write!(fmt, "CounterIncrement({:?})", id.index()), ExpressionUsed { id } => write!(fmt, "ExpressionUsed({:?})", id.index()), } diff --git a/compiler/rustc_middle/src/mir/terminator.rs b/compiler/rustc_middle/src/mir/terminator.rs index 9a6ac6ff57a..aa4cb36c5ce 100644 --- a/compiler/rustc_middle/src/mir/terminator.rs +++ b/compiler/rustc_middle/src/mir/terminator.rs @@ -150,11 +150,17 @@ impl<O> AssertKind<O> { RemainderByZero(_) => "attempt to calculate the remainder with a divisor of zero", ResumedAfterReturn(CoroutineKind::Coroutine) => "coroutine resumed after completion", ResumedAfterReturn(CoroutineKind::Async(_)) => "`async fn` resumed after completion", + ResumedAfterReturn(CoroutineKind::AsyncGen(_)) => { + "`async gen fn` resumed after completion" + } ResumedAfterReturn(CoroutineKind::Gen(_)) => { "`gen fn` should just keep returning `None` after completion" } ResumedAfterPanic(CoroutineKind::Coroutine) => "coroutine resumed after panicking", ResumedAfterPanic(CoroutineKind::Async(_)) => "`async fn` resumed after panicking", + ResumedAfterPanic(CoroutineKind::AsyncGen(_)) => { + "`async gen fn` resumed after panicking" + } ResumedAfterPanic(CoroutineKind::Gen(_)) => { "`gen fn` should just keep returning `None` after panicking" } @@ -245,6 +251,7 @@ impl<O> AssertKind<O> { DivisionByZero(_) => middle_assert_divide_by_zero, RemainderByZero(_) => middle_assert_remainder_by_zero, ResumedAfterReturn(CoroutineKind::Async(_)) => middle_assert_async_resume_after_return, + ResumedAfterReturn(CoroutineKind::AsyncGen(_)) => todo!(), ResumedAfterReturn(CoroutineKind::Gen(_)) => { bug!("gen blocks can be resumed after they return and will keep returning `None`") } @@ -252,6 +259,7 @@ impl<O> AssertKind<O> { middle_assert_coroutine_resume_after_return } ResumedAfterPanic(CoroutineKind::Async(_)) => middle_assert_async_resume_after_panic, + ResumedAfterPanic(CoroutineKind::AsyncGen(_)) => todo!(), ResumedAfterPanic(CoroutineKind::Gen(_)) => middle_assert_gen_resume_after_panic, ResumedAfterPanic(CoroutineKind::Coroutine) => { middle_assert_coroutine_resume_after_panic diff --git a/compiler/rustc_middle/src/thir.rs b/compiler/rustc_middle/src/thir.rs index c48428c713c..b6759d35210 100644 --- a/compiler/rustc_middle/src/thir.rs +++ b/compiler/rustc_middle/src/thir.rs @@ -134,7 +134,6 @@ pub struct Block { /// This does *not* include labels on loops, e.g. `'label: loop {}`. pub targeted_by_break: bool, pub region_scope: region::Scope, - pub opt_destruction_scope: Option<region::Scope>, /// The span of the block, including the opening braces, /// the label, and the `unsafe` keyword, if present. pub span: Span, @@ -193,7 +192,6 @@ pub enum BlockSafety { #[derive(Clone, Debug, HashStable)] pub struct Stmt<'tcx> { pub kind: StmtKind<'tcx>, - pub opt_destruction_scope: Option<region::Scope>, } #[derive(Clone, Debug, HashStable)] @@ -1224,12 +1222,12 @@ impl<'tcx> fmt::Display for Pat<'tcx> { mod size_asserts { use super::*; // tidy-alphabetical-start - static_assert_size!(Block, 56); + static_assert_size!(Block, 48); static_assert_size!(Expr<'_>, 64); static_assert_size!(ExprKind<'_>, 40); static_assert_size!(Pat<'_>, 64); static_assert_size!(PatKind<'_>, 48); - static_assert_size!(Stmt<'_>, 56); + static_assert_size!(Stmt<'_>, 48); static_assert_size!(StmtKind<'_>, 48); // tidy-alphabetical-end } diff --git a/compiler/rustc_middle/src/traits/select.rs b/compiler/rustc_middle/src/traits/select.rs index 96ed1a4d0be..e8e2907eb33 100644 --- a/compiler/rustc_middle/src/traits/select.rs +++ b/compiler/rustc_middle/src/traits/select.rs @@ -144,10 +144,14 @@ pub enum SelectionCandidate<'tcx> { /// generated for an async construct. FutureCandidate, - /// Implementation of an `Iterator` trait by one of the generator types - /// generated for a gen construct. + /// Implementation of an `Iterator` trait by one of the coroutine types + /// generated for a `gen` construct. IteratorCandidate, + /// Implementation of an `AsyncIterator` trait by one of the coroutine types + /// generated for a `async gen` construct. + AsyncIteratorCandidate, + /// Implementation of a `Fn`-family trait by one of the anonymous /// types generated for a fn pointer type (e.g., `fn(int) -> int`) FnPointerCandidate { diff --git a/compiler/rustc_middle/src/ty/consts.rs b/compiler/rustc_middle/src/ty/consts.rs index e48840fac20..293df4f691d 100644 --- a/compiler/rustc_middle/src/ty/consts.rs +++ b/compiler/rustc_middle/src/ty/consts.rs @@ -7,7 +7,7 @@ use rustc_hir as hir; use rustc_hir::def::{DefKind, Res}; use rustc_hir::def_id::LocalDefId; use rustc_macros::HashStable; -use rustc_type_ir::{TypeFlags, WithCachedTypeInfo}; +use rustc_type_ir::{ConstTy, IntoKind, TypeFlags, WithCachedTypeInfo}; mod int; mod kind; @@ -26,6 +26,20 @@ use super::sty::ConstKind; #[rustc_pass_by_value] pub struct Const<'tcx>(pub(super) Interned<'tcx, WithCachedTypeInfo<ConstData<'tcx>>>); +impl<'tcx> IntoKind for Const<'tcx> { + type Kind = ConstKind<'tcx>; + + fn kind(self) -> ConstKind<'tcx> { + self.kind().clone() + } +} + +impl<'tcx> ConstTy<TyCtxt<'tcx>> for Const<'tcx> { + fn ty(self) -> Ty<'tcx> { + self.ty() + } +} + /// Typed constant value. #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, HashStable, TyEncodable, TyDecodable)] pub struct ConstData<'tcx> { diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs index 4a01a24fd61..eb6fde83fcc 100644 --- a/compiler/rustc_middle/src/ty/context.rs +++ b/compiler/rustc_middle/src/ty/context.rs @@ -39,7 +39,9 @@ use rustc_data_structures::profiling::SelfProfilerRef; use rustc_data_structures::sharded::{IntoPointer, ShardedHashMap}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_data_structures::steal::Steal; -use rustc_data_structures::sync::{FreezeReadGuard, Lock, WorkerLocal}; +use rustc_data_structures::sync::{self, FreezeReadGuard, Lock, WorkerLocal}; +#[cfg(parallel_compiler)] +use rustc_data_structures::sync::{DynSend, DynSync}; use rustc_data_structures::unord::UnordSet; use rustc_errors::{ DecorateLint, DiagnosticBuilder, DiagnosticMessage, ErrorGuaranteed, MultiSpan, @@ -121,6 +123,7 @@ impl<'tcx> Interner for TyCtxt<'tcx> { type RegionOutlivesPredicate = ty::RegionOutlivesPredicate<'tcx>; type TypeOutlivesPredicate = ty::TypeOutlivesPredicate<'tcx>; type ProjectionPredicate = ty::ProjectionPredicate<'tcx>; + type NormalizesTo = ty::NormalizesTo<'tcx>; type SubtypePredicate = ty::SubtypePredicate<'tcx>; type CoercePredicate = ty::CoercePredicate<'tcx>; type ClosureKind = ty::ClosureKind; @@ -130,6 +133,31 @@ impl<'tcx> Interner for TyCtxt<'tcx> { ) -> (Self::Ty, ty::Mutability) { (ty, mutbl) } + + fn mk_canonical_var_infos(self, infos: &[ty::CanonicalVarInfo<Self>]) -> Self::CanonicalVars { + self.mk_canonical_var_infos(infos) + } + + fn mk_bound_ty(self, debruijn: ty::DebruijnIndex, var: ty::BoundVar) -> Self::Ty { + Ty::new_bound(self, debruijn, ty::BoundTy { var, kind: ty::BoundTyKind::Anon }) + } + + fn mk_bound_region(self, debruijn: ty::DebruijnIndex, var: ty::BoundVar) -> Self::Region { + Region::new_bound( + self, + debruijn, + ty::BoundRegion { var, kind: ty::BoundRegionKind::BrAnon }, + ) + } + + fn mk_bound_const( + self, + debruijn: ty::DebruijnIndex, + var: ty::BoundVar, + ty: Self::Ty, + ) -> Self::Const { + Const::new_bound(self, debruijn, var, ty) + } } type InternedSet<'tcx, T> = ShardedHashMap<InternedInSet<'tcx, T>, ()>; @@ -552,6 +580,16 @@ pub struct TyCtxt<'tcx> { gcx: &'tcx GlobalCtxt<'tcx>, } +// Explicitly implement `DynSync` and `DynSend` for `TyCtxt` to short circuit trait resolution. +#[cfg(parallel_compiler)] +unsafe impl DynSend for TyCtxt<'_> {} +#[cfg(parallel_compiler)] +unsafe impl DynSync for TyCtxt<'_> {} +fn _assert_tcx_fields() { + sync::assert_dyn_sync::<&'_ GlobalCtxt<'_>>(); + sync::assert_dyn_send::<&'_ GlobalCtxt<'_>>(); +} + impl<'tcx> Deref for TyCtxt<'tcx> { type Target = &'tcx GlobalCtxt<'tcx>; #[inline(always)] @@ -824,11 +862,16 @@ impl<'tcx> TyCtxt<'tcx> { matches!(self.coroutine_kind(def_id), Some(hir::CoroutineKind::Coroutine)) } - /// Returns `true` if the node pointed to by `def_id` is a coroutine for a gen construct. + /// Returns `true` if the node pointed to by `def_id` is a coroutine for a `gen` construct. pub fn coroutine_is_gen(self, def_id: DefId) -> bool { matches!(self.coroutine_kind(def_id), Some(hir::CoroutineKind::Gen(_))) } + /// Returns `true` if the node pointed to by `def_id` is a coroutine for a `async gen` construct. + pub fn coroutine_is_async_gen(self, def_id: DefId) -> bool { + matches!(self.coroutine_kind(def_id), Some(hir::CoroutineKind::AsyncGen(_))) + } + pub fn stability(self) -> &'tcx stability::Index { self.stability_index(()) } diff --git a/compiler/rustc_middle/src/ty/flags.rs b/compiler/rustc_middle/src/ty/flags.rs index 5084fc98913..f9a2385b100 100644 --- a/compiler/rustc_middle/src/ty/flags.rs +++ b/compiler/rustc_middle/src/ty/flags.rs @@ -272,6 +272,10 @@ impl FlagComputation { self.add_const(found); } ty::PredicateKind::Ambiguous => {} + ty::PredicateKind::NormalizesTo(ty::NormalizesTo { alias, term }) => { + self.add_alias_ty(alias); + self.add_term(term); + } ty::PredicateKind::AliasRelate(t1, t2, _) => { self.add_term(t1); self.add_term(t2); diff --git a/compiler/rustc_middle/src/ty/inhabitedness/inhabited_predicate.rs b/compiler/rustc_middle/src/ty/inhabitedness/inhabited_predicate.rs index f278cace99d..ae17942785f 100644 --- a/compiler/rustc_middle/src/ty/inhabitedness/inhabited_predicate.rs +++ b/compiler/rustc_middle/src/ty/inhabitedness/inhabited_predicate.rs @@ -1,3 +1,5 @@ +use smallvec::SmallVec; + use crate::ty::context::TyCtxt; use crate::ty::{self, DefId, ParamEnv, Ty}; @@ -31,27 +33,31 @@ impl<'tcx> InhabitedPredicate<'tcx> { /// Returns true if the corresponding type is inhabited in the given /// `ParamEnv` and module pub fn apply(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>, module_def_id: DefId) -> bool { - let Ok(result) = self - .apply_inner::<!>(tcx, param_env, &|id| Ok(tcx.is_descendant_of(module_def_id, id))); + let Ok(result) = self.apply_inner::<!>(tcx, param_env, &mut Default::default(), &|id| { + Ok(tcx.is_descendant_of(module_def_id, id)) + }); result } /// Same as `apply`, but returns `None` if self contains a module predicate pub fn apply_any_module(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Option<bool> { - self.apply_inner(tcx, param_env, &|_| Err(())).ok() + self.apply_inner(tcx, param_env, &mut Default::default(), &|_| Err(())).ok() } /// Same as `apply`, but `NotInModule(_)` predicates yield `false`. That is, /// privately uninhabited types are considered always uninhabited. pub fn apply_ignore_module(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> bool { - let Ok(result) = self.apply_inner::<!>(tcx, param_env, &|_| Ok(true)); + let Ok(result) = + self.apply_inner::<!>(tcx, param_env, &mut Default::default(), &|_| Ok(true)); result } - fn apply_inner<E>( + #[instrument(level = "debug", skip(tcx, param_env, in_module), ret)] + fn apply_inner<E: std::fmt::Debug>( self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>, + eval_stack: &mut SmallVec<[Ty<'tcx>; 1]>, // for cycle detection in_module: &impl Fn(DefId) -> Result<bool, E>, ) -> Result<bool, E> { match self { @@ -71,11 +77,25 @@ impl<'tcx> InhabitedPredicate<'tcx> { match normalized_pred { // We don't have more information than we started with, so consider inhabited. Self::GenericType(_) => Ok(true), - pred => pred.apply_inner(tcx, param_env, in_module), + pred => { + // A type which is cyclic when monomorphized can happen here since the + // layout error would only trigger later. See e.g. `tests/ui/sized/recursive-type-2.rs`. + if eval_stack.contains(&t) { + return Ok(true); // Recover; this will error later. + } + eval_stack.push(t); + let ret = pred.apply_inner(tcx, param_env, eval_stack, in_module); + eval_stack.pop(); + ret + } } } - Self::And([a, b]) => try_and(a, b, |x| x.apply_inner(tcx, param_env, in_module)), - Self::Or([a, b]) => try_or(a, b, |x| x.apply_inner(tcx, param_env, in_module)), + Self::And([a, b]) => { + try_and(a, b, |x| x.apply_inner(tcx, param_env, eval_stack, in_module)) + } + Self::Or([a, b]) => { + try_or(a, b, |x| x.apply_inner(tcx, param_env, eval_stack, in_module)) + } } } @@ -197,7 +217,7 @@ impl<'tcx> InhabitedPredicate<'tcx> { // this is basically like `f(a)? && f(b)?` but different in the case of // `Ok(false) && Err(_) -> Ok(false)` -fn try_and<T, E>(a: T, b: T, f: impl Fn(T) -> Result<bool, E>) -> Result<bool, E> { +fn try_and<T, E>(a: T, b: T, mut f: impl FnMut(T) -> Result<bool, E>) -> Result<bool, E> { let a = f(a); if matches!(a, Ok(false)) { return Ok(false); @@ -209,7 +229,7 @@ fn try_and<T, E>(a: T, b: T, f: impl Fn(T) -> Result<bool, E>) -> Result<bool, E } } -fn try_or<T, E>(a: T, b: T, f: impl Fn(T) -> Result<bool, E>) -> Result<bool, E> { +fn try_or<T, E>(a: T, b: T, mut f: impl FnMut(T) -> Result<bool, E>) -> Result<bool, E> { let a = f(a); if matches!(a, Ok(true)) { return Ok(true); diff --git a/compiler/rustc_middle/src/ty/inhabitedness/mod.rs b/compiler/rustc_middle/src/ty/inhabitedness/mod.rs index 68ac54e899a..d67fb9fd0b7 100644 --- a/compiler/rustc_middle/src/ty/inhabitedness/mod.rs +++ b/compiler/rustc_middle/src/ty/inhabitedness/mod.rs @@ -103,6 +103,7 @@ impl<'tcx> VariantDef { } impl<'tcx> Ty<'tcx> { + #[instrument(level = "debug", skip(tcx), ret)] pub fn inhabited_predicate(self, tcx: TyCtxt<'tcx>) -> InhabitedPredicate<'tcx> { match self.kind() { // For now, unions are always considered inhabited diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs index 77196486ac1..71ff7021ca5 100644 --- a/compiler/rustc_middle/src/ty/mod.rs +++ b/compiler/rustc_middle/src/ty/mod.rs @@ -65,15 +65,10 @@ use std::ops::ControlFlow; use std::{fmt, str}; pub use crate::ty::diagnostics::*; -pub use rustc_type_ir::AliasKind::*; pub use rustc_type_ir::ConstKind::{ Bound as BoundCt, Error as ErrorCt, Expr as ExprCt, Infer as InferCt, Param as ParamCt, Placeholder as PlaceholderCt, Unevaluated, Value, }; -pub use rustc_type_ir::DynKind::*; -pub use rustc_type_ir::InferTy::*; -pub use rustc_type_ir::RegionKind::*; -pub use rustc_type_ir::TyKind::*; pub use rustc_type_ir::*; pub use self::binding::BindingMode; @@ -474,6 +469,14 @@ pub struct CReaderCacheKey { #[rustc_pass_by_value] pub struct Ty<'tcx>(Interned<'tcx, WithCachedTypeInfo<TyKind<'tcx>>>); +impl<'tcx> IntoKind for Ty<'tcx> { + type Kind = TyKind<'tcx>; + + fn kind(self) -> TyKind<'tcx> { + self.kind().clone() + } +} + impl EarlyParamRegion { /// Does this early bound region have a name? Early bound regions normally /// always have names except when using anonymous lifetimes (`'_`). @@ -553,6 +556,10 @@ impl<'tcx> Predicate<'tcx> { pub fn allow_normalization(self) -> bool { match self.kind().skip_binder() { PredicateKind::Clause(ClauseKind::WellFormed(_)) => false, + // `NormalizesTo` is only used in the new solver, so this shouldn't + // matter. Normalizing `term` would be 'wrong' however, as it changes whether + // `normalizes-to(<T as Trait>::Assoc, <T as Trait>::Assoc)` holds. + PredicateKind::NormalizesTo(..) => false, PredicateKind::Clause(ClauseKind::Trait(_)) | PredicateKind::Clause(ClauseKind::RegionOutlives(_)) | PredicateKind::Clause(ClauseKind::TypeOutlives(_)) @@ -1093,6 +1100,33 @@ impl<'tcx> PolyProjectionPredicate<'tcx> { } } +/// Used by the new solver. Unlike a `ProjectionPredicate` this can only be +/// proven by actually normalizing `alias`. +#[derive(Copy, Clone, PartialEq, Eq, Hash, TyEncodable, TyDecodable)] +#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)] +pub struct NormalizesTo<'tcx> { + pub alias: AliasTy<'tcx>, + pub term: Term<'tcx>, +} + +impl<'tcx> NormalizesTo<'tcx> { + pub fn self_ty(self) -> Ty<'tcx> { + self.alias.self_ty() + } + + pub fn with_self_ty(self, tcx: TyCtxt<'tcx>, self_ty: Ty<'tcx>) -> NormalizesTo<'tcx> { + Self { alias: self.alias.with_self_ty(tcx, self_ty), ..self } + } + + pub fn trait_def_id(self, tcx: TyCtxt<'tcx>) -> DefId { + self.alias.trait_def_id(tcx) + } + + pub fn def_id(self) -> DefId { + self.alias.def_id + } +} + pub trait ToPolyTraitRef<'tcx> { fn to_poly_trait_ref(&self) -> PolyTraitRef<'tcx>; } @@ -1274,6 +1308,12 @@ impl<'tcx> ToPredicate<'tcx, Clause<'tcx>> for PolyProjectionPredicate<'tcx> { } } +impl<'tcx> ToPredicate<'tcx> for NormalizesTo<'tcx> { + fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> { + PredicateKind::NormalizesTo(self).to_predicate(tcx) + } +} + impl<'tcx> Predicate<'tcx> { pub fn to_opt_poly_trait_pred(self) -> Option<PolyTraitPredicate<'tcx>> { let predicate = self.kind(); @@ -1281,6 +1321,7 @@ impl<'tcx> Predicate<'tcx> { PredicateKind::Clause(ClauseKind::Trait(t)) => Some(predicate.rebind(t)), PredicateKind::Clause(ClauseKind::Projection(..)) | PredicateKind::Clause(ClauseKind::ConstArgHasType(..)) + | PredicateKind::NormalizesTo(..) | PredicateKind::AliasRelate(..) | PredicateKind::Subtype(..) | PredicateKind::Coerce(..) @@ -1300,6 +1341,7 @@ impl<'tcx> Predicate<'tcx> { PredicateKind::Clause(ClauseKind::Projection(t)) => Some(predicate.rebind(t)), PredicateKind::Clause(ClauseKind::Trait(..)) | PredicateKind::Clause(ClauseKind::ConstArgHasType(..)) + | PredicateKind::NormalizesTo(..) | PredicateKind::AliasRelate(..) | PredicateKind::Subtype(..) | PredicateKind::Coerce(..) @@ -1506,34 +1548,42 @@ pub struct Placeholder<T> { pub type PlaceholderRegion = Placeholder<BoundRegion>; -impl rustc_type_ir::Placeholder for PlaceholderRegion { - fn universe(&self) -> UniverseIndex { +impl PlaceholderLike for PlaceholderRegion { + fn universe(self) -> UniverseIndex { self.universe } - fn var(&self) -> BoundVar { + fn var(self) -> BoundVar { self.bound.var } fn with_updated_universe(self, ui: UniverseIndex) -> Self { Placeholder { universe: ui, ..self } } + + fn new(ui: UniverseIndex, var: BoundVar) -> Self { + Placeholder { universe: ui, bound: BoundRegion { var, kind: BoundRegionKind::BrAnon } } + } } pub type PlaceholderType = Placeholder<BoundTy>; -impl rustc_type_ir::Placeholder for PlaceholderType { - fn universe(&self) -> UniverseIndex { +impl PlaceholderLike for PlaceholderType { + fn universe(self) -> UniverseIndex { self.universe } - fn var(&self) -> BoundVar { + fn var(self) -> BoundVar { self.bound.var } fn with_updated_universe(self, ui: UniverseIndex) -> Self { Placeholder { universe: ui, ..self } } + + fn new(ui: UniverseIndex, var: BoundVar) -> Self { + Placeholder { universe: ui, bound: BoundTy { var, kind: BoundTyKind::Anon } } + } } #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable)] @@ -1545,18 +1595,22 @@ pub struct BoundConst<'tcx> { pub type PlaceholderConst = Placeholder<BoundVar>; -impl rustc_type_ir::Placeholder for PlaceholderConst { - fn universe(&self) -> UniverseIndex { +impl PlaceholderLike for PlaceholderConst { + fn universe(self) -> UniverseIndex { self.universe } - fn var(&self) -> BoundVar { + fn var(self) -> BoundVar { self.bound } fn with_updated_universe(self, ui: UniverseIndex) -> Self { Placeholder { universe: ui, ..self } } + + fn new(ui: UniverseIndex, var: BoundVar) -> Self { + Placeholder { universe: ui, bound: var } + } } /// When type checking, we use the `ParamEnv` to track diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs index 1592d852bc7..bd9f0fd3ea9 100644 --- a/compiler/rustc_middle/src/ty/print/pretty.rs +++ b/compiler/rustc_middle/src/ty/print/pretty.rs @@ -2814,6 +2814,7 @@ define_print! { p!("the constant `", print(c1), "` equals `", print(c2), "`") } ty::PredicateKind::Ambiguous => p!("ambiguous"), + ty::PredicateKind::NormalizesTo(data) => p!(print(data)), ty::PredicateKind::AliasRelate(t1, t2, dir) => p!(print(t1), write(" {} ", dir), print(t2)), } } @@ -2945,6 +2946,12 @@ define_print_and_forward_display! { p!(print(self.term)) } + ty::NormalizesTo<'tcx> { + p!(print(self.alias), " normalizes-to "); + cx.reset_type_limit(); + p!(print(self.term)) + } + ty::Term<'tcx> { match self.unpack() { ty::TermKind::Ty(ty) => p!(print(ty)), diff --git a/compiler/rustc_middle/src/ty/structural_impls.rs b/compiler/rustc_middle/src/ty/structural_impls.rs index 0f19a2fd822..0cff6b77eb6 100644 --- a/compiler/rustc_middle/src/ty/structural_impls.rs +++ b/compiler/rustc_middle/src/ty/structural_impls.rs @@ -173,6 +173,12 @@ impl<'tcx> fmt::Debug for ty::ProjectionPredicate<'tcx> { } } +impl<'tcx> fmt::Debug for ty::NormalizesTo<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "NormalizesTo({:?}, {:?})", self.alias, self.term) + } +} + impl<'tcx> fmt::Debug for ty::Predicate<'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{:?}", self.kind()) diff --git a/compiler/rustc_middle/src/ty/sty.rs b/compiler/rustc_middle/src/ty/sty.rs index e221b4e8bec..50a1b85b169 100644 --- a/compiler/rustc_middle/src/ty/sty.rs +++ b/compiler/rustc_middle/src/ty/sty.rs @@ -6,7 +6,7 @@ use crate::infer::canonical::Canonical; use crate::ty::visit::ValidateBoundVars; use crate::ty::InferTy::*; use crate::ty::{ - self, AdtDef, Discr, Term, Ty, TyCtxt, TypeFlags, TypeSuperVisitable, TypeVisitable, + self, AdtDef, Discr, IntoKind, Term, Ty, TyCtxt, TypeFlags, TypeSuperVisitable, TypeVisitable, TypeVisitableExt, TypeVisitor, }; use crate::ty::{GenericArg, GenericArgs, GenericArgsRef}; @@ -1477,6 +1477,14 @@ impl ParamConst { #[rustc_pass_by_value] pub struct Region<'tcx>(pub Interned<'tcx, RegionKind<'tcx>>); +impl<'tcx> IntoKind for Region<'tcx> { + type Kind = RegionKind<'tcx>; + + fn kind(self) -> RegionKind<'tcx> { + *self + } +} + impl<'tcx> Region<'tcx> { #[inline] pub fn new_early_param( diff --git a/compiler/rustc_middle/src/ty/util.rs b/compiler/rustc_middle/src/ty/util.rs index 70252a4dc67..b7c3edee9e5 100644 --- a/compiler/rustc_middle/src/ty/util.rs +++ b/compiler/rustc_middle/src/ty/util.rs @@ -732,6 +732,7 @@ impl<'tcx> TyCtxt<'tcx> { DefKind::Closure if let Some(coroutine_kind) = self.coroutine_kind(def_id) => { match coroutine_kind { rustc_hir::CoroutineKind::Async(..) => "async closure", + rustc_hir::CoroutineKind::AsyncGen(..) => "async gen closure", rustc_hir::CoroutineKind::Coroutine => "coroutine", rustc_hir::CoroutineKind::Gen(..) => "gen closure", } @@ -752,6 +753,7 @@ impl<'tcx> TyCtxt<'tcx> { DefKind::Closure if let Some(coroutine_kind) = self.coroutine_kind(def_id) => { match coroutine_kind { rustc_hir::CoroutineKind::Async(..) => "an", + rustc_hir::CoroutineKind::AsyncGen(..) => "an", rustc_hir::CoroutineKind::Coroutine => "a", rustc_hir::CoroutineKind::Gen(..) => "a", } @@ -781,9 +783,9 @@ impl<'tcx> TyCtxt<'tcx> { } pub fn expected_const_effect_param_for_body(self, def_id: LocalDefId) -> ty::Const<'tcx> { - // if the callee does have the param, we need to equate the param to some const - // value no matter whether the effects feature is enabled in the local crate, - // because inference will fail if we don't. + // FIXME(effects): This is suspicious and should probably not be done, + // especially now that we enforce host effects and then properly handle + // effect vars during fallback. let mut host_always_on = !self.features().effects || self.sess.opts.unstable_opts.unleash_the_miri_inside_of_you; diff --git a/compiler/rustc_mir_build/Cargo.toml b/compiler/rustc_mir_build/Cargo.toml index db542234052..6d681dc295e 100644 --- a/compiler/rustc_mir_build/Cargo.toml +++ b/compiler/rustc_mir_build/Cargo.toml @@ -17,6 +17,7 @@ rustc_index = { path = "../rustc_index" } rustc_infer = { path = "../rustc_infer" } rustc_macros = { path = "../rustc_macros" } rustc_middle = { path = "../rustc_middle" } +rustc_pattern_analysis = { path = "../rustc_pattern_analysis" } rustc_session = { path = "../rustc_session" } rustc_span = { path = "../rustc_span" } rustc_target = { path = "../rustc_target" } diff --git a/compiler/rustc_mir_build/messages.ftl b/compiler/rustc_mir_build/messages.ftl index c8d6c2114e9..615b553434f 100644 --- a/compiler/rustc_mir_build/messages.ftl +++ b/compiler/rustc_mir_build/messages.ftl @@ -237,15 +237,6 @@ mir_build_non_const_path = runtime values cannot be referenced in patterns mir_build_non_exhaustive_match_all_arms_guarded = match arms with guards don't count towards exhaustivity -mir_build_non_exhaustive_omitted_pattern = some variants are not matched explicitly - .help = ensure that all variants are matched explicitly by adding the suggested match arms - .note = the matched value is of type `{$scrut_ty}` and the `non_exhaustive_omitted_patterns` attribute was found - -mir_build_non_exhaustive_omitted_pattern_lint_on_arm = the lint level must be set on the whole match - .help = it no longer has any effect to set the lint level on an individual match arm - .label = remove this attribute - .suggestion = set the lint level on the whole match - mir_build_non_exhaustive_patterns_type_not_empty = non-exhaustive patterns: type `{$ty}` is non-empty .def_note = `{$peeled_ty}` defined here .type_note = the matched value is of type `{$ty}` @@ -260,10 +251,6 @@ mir_build_non_partial_eq_match = mir_build_nontrivial_structural_match = to use a constant of type `{$non_sm_ty}` in a pattern, the constant's initializer must be trivial or `{$non_sm_ty}` must be annotated with `#[derive(PartialEq, Eq)]` -mir_build_overlapping_range_endpoints = multiple patterns overlap on their endpoints - .range = ... with this range - .note = you likely meant to write mutually exclusive ranges - mir_build_pattern_not_covered = refutable pattern in {$origin} .pattern_ty = the matched value is of type `{$pattern_ty}` @@ -317,13 +304,6 @@ mir_build_unconditional_recursion = function cannot return without recursing mir_build_unconditional_recursion_call_site_label = recursive call site -mir_build_uncovered = {$count -> - [1] pattern `{$witness_1}` - [2] patterns `{$witness_1}` and `{$witness_2}` - [3] patterns `{$witness_1}`, `{$witness_2}` and `{$witness_3}` - *[other] patterns `{$witness_1}`, `{$witness_2}`, `{$witness_3}` and {$remainder} more - } not covered - mir_build_union_field_requires_unsafe = access to union field is unsafe and requires unsafe block .note = the field may not be properly initialized: using uninitialized data will cause undefined behavior diff --git a/compiler/rustc_mir_build/src/build/block.rs b/compiler/rustc_mir_build/src/build/block.rs index 58295b39755..8cad6976c0d 100644 --- a/compiler/rustc_mir_build/src/build/block.rs +++ b/compiler/rustc_mir_build/src/build/block.rs @@ -13,32 +13,13 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { ast_block: BlockId, source_info: SourceInfo, ) -> BlockAnd<()> { - let Block { - region_scope, - opt_destruction_scope, - span, - ref stmts, - expr, - targeted_by_break, - safety_mode, - } = self.thir[ast_block]; + let Block { region_scope, span, ref stmts, expr, targeted_by_break, safety_mode } = + self.thir[ast_block]; let expr = expr.map(|expr| &self.thir[expr]); - self.in_opt_scope(opt_destruction_scope.map(|de| (de, source_info)), move |this| { - this.in_scope((region_scope, source_info), LintLevel::Inherited, move |this| { - if targeted_by_break { - this.in_breakable_scope(None, destination, span, |this| { - Some(this.ast_block_stmts( - destination, - block, - span, - stmts, - expr, - safety_mode, - region_scope, - )) - }) - } else { - this.ast_block_stmts( + self.in_scope((region_scope, source_info), LintLevel::Inherited, move |this| { + if targeted_by_break { + this.in_breakable_scope(None, destination, span, |this| { + Some(this.ast_block_stmts( destination, block, span, @@ -46,9 +27,19 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { expr, safety_mode, region_scope, - ) - } - }) + )) + }) + } else { + this.ast_block_stmts( + destination, + block, + span, + stmts, + expr, + safety_mode, + region_scope, + ) + } }) } @@ -92,20 +83,15 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let source_info = this.source_info(span); for stmt in stmts { - let Stmt { ref kind, opt_destruction_scope } = this.thir[*stmt]; + let Stmt { ref kind } = this.thir[*stmt]; match kind { StmtKind::Expr { scope, expr } => { this.block_context.push(BlockFrame::Statement { ignores_expr_result: true }); + let si = (*scope, source_info); unpack!( - block = this.in_opt_scope( - opt_destruction_scope.map(|de| (de, source_info)), - |this| { - let si = (*scope, source_info); - this.in_scope(si, LintLevel::Inherited, |this| { - this.stmt_expr(block, &this.thir[*expr], Some(*scope)) - }) - } - ) + block = this.in_scope(si, LintLevel::Inherited, |this| { + this.stmt_expr(block, &this.thir[*expr], Some(*scope)) + }) ); } StmtKind::Let { @@ -221,43 +207,38 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let init = &this.thir[*initializer]; let initializer_span = init.span; + let scope = (*init_scope, source_info); let failure = unpack!( - block = this.in_opt_scope( - opt_destruction_scope.map(|de| (de, source_info)), - |this| { - let scope = (*init_scope, source_info); - this.in_scope(scope, *lint_level, |this| { - this.declare_bindings( - visibility_scope, - remainder_span, - pattern, - None, - Some((Some(&destination), initializer_span)), - ); - this.visit_primary_bindings( - pattern, - UserTypeProjections::none(), - &mut |this, _, _, _, node, span, _, _| { - this.storage_live_binding( - block, - node, - span, - OutsideGuard, - true, - ); - }, - ); - this.ast_let_else( + block = this.in_scope(scope, *lint_level, |this| { + this.declare_bindings( + visibility_scope, + remainder_span, + pattern, + None, + Some((Some(&destination), initializer_span)), + ); + this.visit_primary_bindings( + pattern, + UserTypeProjections::none(), + &mut |this, _, _, _, node, span, _, _| { + this.storage_live_binding( block, - init, - initializer_span, - *else_block, - &last_remainder_scope, - pattern, - ) - }) - } - ) + node, + span, + OutsideGuard, + true, + ); + }, + ); + this.ast_let_else( + block, + init, + initializer_span, + *else_block, + &last_remainder_scope, + pattern, + ) + }) ); this.cfg.goto(failure, source_info, failure_entry); @@ -298,25 +279,20 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { if let Some(init) = initializer { let init = &this.thir[*init]; let initializer_span = init.span; + let scope = (*init_scope, source_info); unpack!( - block = this.in_opt_scope( - opt_destruction_scope.map(|de| (de, source_info)), - |this| { - let scope = (*init_scope, source_info); - this.in_scope(scope, *lint_level, |this| { - this.declare_bindings( - visibility_scope, - remainder_span, - pattern, - None, - Some((None, initializer_span)), - ); - this.expr_into_pattern(block, pattern, init) - // irrefutable pattern - }) - }, - ) + block = this.in_scope(scope, *lint_level, |this| { + this.declare_bindings( + visibility_scope, + remainder_span, + pattern, + None, + Some((None, initializer_span)), + ); + this.expr_into_pattern(block, &pattern, init) + // irrefutable pattern + }) ) } else { let scope = (*init_scope, source_info); diff --git a/compiler/rustc_mir_build/src/build/cfg.rs b/compiler/rustc_mir_build/src/build/cfg.rs index fddcf9de7c7..2bd0e289731 100644 --- a/compiler/rustc_mir_build/src/build/cfg.rs +++ b/compiler/rustc_mir_build/src/build/cfg.rs @@ -101,6 +101,19 @@ impl<'tcx> CFG<'tcx> { self.push(block, stmt); } + /// Adds a dummy statement whose only role is to associate a span with its + /// enclosing block for the purposes of coverage instrumentation. + /// + /// This results in more accurate coverage reports for certain kinds of + /// syntax (e.g. `continue` or `if !`) that would otherwise not appear in MIR. + pub(crate) fn push_coverage_span_marker(&mut self, block: BasicBlock, source_info: SourceInfo) { + let kind = StatementKind::Coverage(Box::new(Coverage { + kind: coverage::CoverageKind::SpanMarker, + })); + let stmt = Statement { source_info, kind }; + self.push(block, stmt); + } + pub(crate) fn terminate( &mut self, block: BasicBlock, diff --git a/compiler/rustc_mir_build/src/build/matches/mod.rs b/compiler/rustc_mir_build/src/build/matches/mod.rs index 90f950d59d5..541b87af797 100644 --- a/compiler/rustc_mir_build/src/build/matches/mod.rs +++ b/compiler/rustc_mir_build/src/build/matches/mod.rs @@ -90,6 +90,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let local_scope = this.local_scope(); let (success_block, failure_block) = this.in_if_then_scope(local_scope, expr_span, |this| { + // Help out coverage instrumentation by injecting a dummy statement with + // the original condition's span (including `!`). This fixes #115468. + if this.tcx.sess.instrument_coverage() { + this.cfg.push_coverage_span_marker(block, this.source_info(expr_span)); + } this.then_else_break( block, &this.thir[arg], diff --git a/compiler/rustc_mir_build/src/build/scope.rs b/compiler/rustc_mir_build/src/build/scope.rs index 993fee95895..25b79e6a523 100644 --- a/compiler/rustc_mir_build/src/build/scope.rs +++ b/compiler/rustc_mir_build/src/build/scope.rs @@ -90,7 +90,6 @@ use rustc_index::{IndexSlice, IndexVec}; use rustc_middle::middle::region; use rustc_middle::mir::*; use rustc_middle::thir::{Expr, LintLevel}; -use rustc_middle::ty::Ty; use rustc_session::lint::Level; use rustc_span::{Span, DUMMY_SP}; @@ -537,27 +536,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { (then_block, else_block) } - pub(crate) fn in_opt_scope<F, R>( - &mut self, - opt_scope: Option<(region::Scope, SourceInfo)>, - f: F, - ) -> BlockAnd<R> - where - F: FnOnce(&mut Builder<'a, 'tcx>) -> BlockAnd<R>, - { - debug!("in_opt_scope(opt_scope={:?})", opt_scope); - if let Some(region_scope) = opt_scope { - self.push_scope(region_scope); - } - let mut block; - let rv = unpack!(block = f(self)); - if let Some(region_scope) = opt_scope { - unpack!(block = self.pop_scope(region_scope, block)); - } - debug!("in_scope: exiting opt_scope={:?} block={:?}", opt_scope, block); - block.and(rv) - } - /// Convenience wrapper that pushes a scope and then executes `f` /// to build its contents, popping the scope afterwards. #[instrument(skip(self, f), level = "debug")] @@ -660,14 +638,15 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { (None, Some(_)) => { panic!("`return`, `become` and `break` with value and must have a destination") } - (None, None) if self.tcx.sess.instrument_coverage() => { - // Unlike `break` and `return`, which push an `Assign` statement to MIR, from which - // a Coverage code region can be generated, `continue` needs no `Assign`; but - // without one, the `InstrumentCoverage` MIR pass cannot generate a code region for - // `continue`. Coverage will be missing unless we add a dummy `Assign` to MIR. - self.add_dummy_assignment(span, block, source_info); + (None, None) => { + if self.tcx.sess.instrument_coverage() { + // Normally we wouldn't build any MIR in this case, but that makes it + // harder for coverage instrumentation to extract a relevant span for + // `continue` expressions. So here we inject a dummy statement with the + // desired span. + self.cfg.push_coverage_span_marker(block, source_info); + } } - (None, None) => {} } let region_scope = self.scopes.breakable_scopes[break_index].region_scope; @@ -723,14 +702,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { self.cfg.terminate(block, source_info, TerminatorKind::UnwindResume); } - // Add a dummy `Assign` statement to the CFG, with the span for the source code's `continue` - // statement. - fn add_dummy_assignment(&mut self, span: Span, block: BasicBlock, source_info: SourceInfo) { - let local_decl = LocalDecl::new(Ty::new_unit(self.tcx), span); - let temp_place = Place::from(self.local_decls.push(local_decl)); - self.cfg.push_assign_unit(block, source_info, temp_place, self.tcx); - } - fn leave_top_scope(&mut self, block: BasicBlock) -> BasicBlock { // If we are emitting a `drop` statement, we need to have the cached // diverge cleanup pads ready in case that drop panics. diff --git a/compiler/rustc_mir_build/src/errors.rs b/compiler/rustc_mir_build/src/errors.rs index 8d2a559e73c..9baae706dff 100644 --- a/compiler/rustc_mir_build/src/errors.rs +++ b/compiler/rustc_mir_build/src/errors.rs @@ -1,15 +1,12 @@ -use crate::{ - fluent_generated as fluent, - thir::pattern::{deconstruct_pat::WitnessPat, MatchCheckCtxt}, -}; +use crate::fluent_generated as fluent; use rustc_errors::DiagnosticArgValue; use rustc_errors::{ error_code, AddToDiagnostic, Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, Handler, IntoDiagnostic, MultiSpan, SubdiagnosticMessage, }; use rustc_macros::{Diagnostic, LintDiagnostic, Subdiagnostic}; -use rustc_middle::thir::Pat; use rustc_middle::ty::{self, Ty}; +use rustc_pattern_analysis::{cx::MatchCheckCtxt, errors::Uncovered}; use rustc_span::symbol::Symbol; use rustc_span::Span; @@ -812,94 +809,6 @@ pub struct NonPartialEqMatch<'tcx> { pub non_peq_ty: Ty<'tcx>, } -#[derive(LintDiagnostic)] -#[diag(mir_build_overlapping_range_endpoints)] -#[note] -pub struct OverlappingRangeEndpoints<'tcx> { - #[label(mir_build_range)] - pub range: Span, - #[subdiagnostic] - pub overlap: Vec<Overlap<'tcx>>, -} - -pub struct Overlap<'tcx> { - pub span: Span, - pub range: Pat<'tcx>, -} - -impl<'tcx> AddToDiagnostic for Overlap<'tcx> { - fn add_to_diagnostic_with<F>(self, diag: &mut Diagnostic, _: F) - where - F: Fn(&mut Diagnostic, SubdiagnosticMessage) -> SubdiagnosticMessage, - { - let Overlap { span, range } = self; - - // FIXME(mejrs) unfortunately `#[derive(LintDiagnostic)]` - // does not support `#[subdiagnostic(eager)]`... - let message = format!("this range overlaps on `{range}`..."); - diag.span_label(span, message); - } -} - -#[derive(LintDiagnostic)] -#[diag(mir_build_non_exhaustive_omitted_pattern)] -#[help] -#[note] -pub(crate) struct NonExhaustiveOmittedPattern<'tcx> { - pub scrut_ty: Ty<'tcx>, - #[subdiagnostic] - pub uncovered: Uncovered<'tcx>, -} - -#[derive(LintDiagnostic)] -#[diag(mir_build_non_exhaustive_omitted_pattern_lint_on_arm)] -#[help] -pub(crate) struct NonExhaustiveOmittedPatternLintOnArm { - #[label] - pub lint_span: Span, - #[suggestion(code = "#[{lint_level}({lint_name})]\n", applicability = "maybe-incorrect")] - pub suggest_lint_on_match: Option<Span>, - pub lint_level: &'static str, - pub lint_name: &'static str, -} - -#[derive(Subdiagnostic)] -#[label(mir_build_uncovered)] -pub(crate) struct Uncovered<'tcx> { - #[primary_span] - span: Span, - count: usize, - witness_1: Pat<'tcx>, - witness_2: Pat<'tcx>, - witness_3: Pat<'tcx>, - remainder: usize, -} - -impl<'tcx> Uncovered<'tcx> { - pub fn new<'p>( - span: Span, - cx: &MatchCheckCtxt<'p, 'tcx>, - witnesses: Vec<WitnessPat<'tcx>>, - ) -> Self { - let witness_1 = witnesses.get(0).unwrap().to_diagnostic_pat(cx); - Self { - span, - count: witnesses.len(), - // Substitute dummy values if witnesses is smaller than 3. These will never be read. - witness_2: witnesses - .get(1) - .map(|w| w.to_diagnostic_pat(cx)) - .unwrap_or_else(|| witness_1.clone()), - witness_3: witnesses - .get(2) - .map(|w| w.to_diagnostic_pat(cx)) - .unwrap_or_else(|| witness_1.clone()), - witness_1, - remainder: witnesses.len().saturating_sub(3), - } - } -} - #[derive(Diagnostic)] #[diag(mir_build_pattern_not_covered, code = "E0005")] pub(crate) struct PatternNotCovered<'s, 'tcx> { diff --git a/compiler/rustc_mir_build/src/thir/cx/block.rs b/compiler/rustc_mir_build/src/thir/cx/block.rs index 527eadb277e..1e93e126b70 100644 --- a/compiler/rustc_mir_build/src/thir/cx/block.rs +++ b/compiler/rustc_mir_build/src/thir/cx/block.rs @@ -13,15 +13,12 @@ impl<'tcx> Cx<'tcx> { // We have to eagerly lower the "spine" of the statements // in order to get the lexical scoping correctly. let stmts = self.mirror_stmts(block.hir_id.local_id, block.stmts); - let opt_destruction_scope = - self.region_scope_tree.opt_destruction_scope(block.hir_id.local_id); let block = Block { targeted_by_break: block.targeted_by_break, region_scope: region::Scope { id: block.hir_id.local_id, data: region::ScopeData::Node, }, - opt_destruction_scope, span: block.span, stmts, expr: block.expr.map(|expr| self.mirror_expr(expr)), @@ -49,7 +46,6 @@ impl<'tcx> Cx<'tcx> { .enumerate() .filter_map(|(index, stmt)| { let hir_id = stmt.hir_id; - let opt_dxn_ext = self.region_scope_tree.opt_destruction_scope(hir_id.local_id); match stmt.kind { hir::StmtKind::Expr(expr) | hir::StmtKind::Semi(expr) => { let stmt = Stmt { @@ -60,7 +56,6 @@ impl<'tcx> Cx<'tcx> { }, expr: self.mirror_expr(expr), }, - opt_destruction_scope: opt_dxn_ext, }; Some(self.thir.stmts.push(stmt)) } @@ -122,7 +117,6 @@ impl<'tcx> Cx<'tcx> { lint_level: LintLevel::Explicit(local.hir_id), span, }, - opt_destruction_scope: opt_dxn_ext, }; Some(self.thir.stmts.push(stmt)) } diff --git a/compiler/rustc_mir_build/src/thir/cx/expr.rs b/compiler/rustc_mir_build/src/thir/cx/expr.rs index 9759c72bf58..31874b29bb3 100644 --- a/compiler/rustc_mir_build/src/thir/cx/expr.rs +++ b/compiler/rustc_mir_build/src/thir/cx/expr.rs @@ -54,7 +54,7 @@ impl<'tcx> Cx<'tcx> { trace!(?expr.ty, "after adjustments"); - // Next, wrap this up in the expr's scope. + // Finally, wrap this up in the expr's scope. expr = Expr { temp_lifetime: expr.temp_lifetime, ty: expr.ty, @@ -66,22 +66,6 @@ impl<'tcx> Cx<'tcx> { }, }; - // Finally, create a destruction scope, if any. - if let Some(region_scope) = - self.region_scope_tree.opt_destruction_scope(hir_expr.hir_id.local_id) - { - expr = Expr { - temp_lifetime: expr.temp_lifetime, - ty: expr.ty, - span: hir_expr.span, - kind: ExprKind::Scope { - region_scope, - value: self.thir.exprs.push(expr), - lint_level: LintLevel::Inherited, - }, - }; - } - // OK, all done! self.thir.exprs.push(expr) } diff --git a/compiler/rustc_mir_build/src/thir/pattern/check_match.rs b/compiler/rustc_mir_build/src/thir/pattern/check_match.rs index b72b9da21b7..e8a972005b0 100644 --- a/compiler/rustc_mir_build/src/thir/pattern/check_match.rs +++ b/compiler/rustc_mir_build/src/thir/pattern/check_match.rs @@ -1,7 +1,9 @@ -use super::deconstruct_pat::{Constructor, DeconstructedPat, WitnessPat}; -use super::usefulness::{ - compute_match_usefulness, MatchArm, MatchCheckCtxt, Reachability, UsefulnessReport, -}; +use rustc_pattern_analysis::constructor::Constructor; +use rustc_pattern_analysis::cx::MatchCheckCtxt; +use rustc_pattern_analysis::errors::Uncovered; +use rustc_pattern_analysis::pat::{DeconstructedPat, WitnessPat}; +use rustc_pattern_analysis::usefulness::{Usefulness, UsefulnessReport}; +use rustc_pattern_analysis::{analyze_match, MatchArm}; use crate::errors::*; @@ -42,7 +44,7 @@ pub(crate) fn check_match(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Result<(), Err for param in thir.params.iter() { if let Some(box ref pattern) = param.pat { - visitor.check_binding_is_irrefutable(pattern, "function argument", None); + visitor.check_binding_is_irrefutable(pattern, "function argument", None, None); } } visitor.error @@ -254,10 +256,11 @@ impl<'thir, 'p, 'tcx> MatchVisitor<'thir, 'p, 'tcx> { self.with_lint_level(lint_level, |this| this.visit_land_rhs(&this.thir[value])) } ExprKind::Let { box ref pat, expr } => { + let expr = &self.thir()[expr]; self.with_let_source(LetSource::None, |this| { - this.visit_expr(&this.thir()[expr]); + this.visit_expr(expr); }); - Ok(Some((ex.span, self.is_let_irrefutable(pat)?))) + Ok(Some((ex.span, self.is_let_irrefutable(pat, Some(expr))?))) } _ => { self.with_let_source(LetSource::None, |this| { @@ -283,39 +286,118 @@ impl<'thir, 'p, 'tcx> MatchVisitor<'thir, 'p, 'tcx> { check_borrow_conflicts_in_at_patterns(self, pat); check_for_bindings_named_same_as_variants(self, pat, refutable); }); - Ok(cx.pattern_arena.alloc(DeconstructedPat::from_pat(cx, pat))) + Ok(cx.pattern_arena.alloc(cx.lower_pat(pat))) + } + } + + /// Inspects the match scrutinee expression to determine whether the place it evaluates to may + /// hold invalid data. + fn is_known_valid_scrutinee(&self, scrutinee: &Expr<'tcx>) -> bool { + use ExprKind::*; + match &scrutinee.kind { + // Both pointers and references can validly point to a place with invalid data. + Deref { .. } => false, + // Inherit validity of the parent place, unless the parent is an union. + Field { lhs, .. } => { + let lhs = &self.thir()[*lhs]; + match lhs.ty.kind() { + ty::Adt(def, _) if def.is_union() => false, + _ => self.is_known_valid_scrutinee(lhs), + } + } + // Essentially a field access. + Index { lhs, .. } => { + let lhs = &self.thir()[*lhs]; + self.is_known_valid_scrutinee(lhs) + } + + // No-op. + Scope { value, .. } => self.is_known_valid_scrutinee(&self.thir()[*value]), + + // Casts don't cause a load. + NeverToAny { source } + | Cast { source } + | Use { source } + | PointerCoercion { source, .. } + | PlaceTypeAscription { source, .. } + | ValueTypeAscription { source, .. } => { + self.is_known_valid_scrutinee(&self.thir()[*source]) + } + + // These diverge. + Become { .. } | Break { .. } | Continue { .. } | Return { .. } => true, + + // These are statements that evaluate to `()`. + Assign { .. } | AssignOp { .. } | InlineAsm { .. } | Let { .. } => true, + + // These evaluate to a value. + AddressOf { .. } + | Adt { .. } + | Array { .. } + | Binary { .. } + | Block { .. } + | Borrow { .. } + | Box { .. } + | Call { .. } + | Closure { .. } + | ConstBlock { .. } + | ConstParam { .. } + | If { .. } + | Literal { .. } + | LogicalOp { .. } + | Loop { .. } + | Match { .. } + | NamedConst { .. } + | NonHirLiteral { .. } + | OffsetOf { .. } + | Repeat { .. } + | StaticRef { .. } + | ThreadLocalRef { .. } + | Tuple { .. } + | Unary { .. } + | UpvarRef { .. } + | VarRef { .. } + | ZstLiteral { .. } + | Yield { .. } => true, } } fn new_cx( &self, refutability: RefutableFlag, - match_span: Option<Span>, + whole_match_span: Option<Span>, + scrutinee: Option<&Expr<'tcx>>, scrut_span: Span, ) -> MatchCheckCtxt<'p, 'tcx> { let refutable = match refutability { Irrefutable => false, Refutable => true, }; + // If we don't have a scrutinee we're either a function parameter or a `let x;`. Both cases + // require validity. + let known_valid_scrutinee = + scrutinee.map(|scrut| self.is_known_valid_scrutinee(scrut)).unwrap_or(true); MatchCheckCtxt { tcx: self.tcx, param_env: self.param_env, module: self.tcx.parent_module(self.lint_level).to_def_id(), pattern_arena: self.pattern_arena, match_lint_level: self.lint_level, - match_span, + whole_match_span, scrut_span, refutable, + known_valid_scrutinee, } } #[instrument(level = "trace", skip(self))] fn check_let(&mut self, pat: &Pat<'tcx>, scrutinee: Option<ExprId>, span: Span) { assert!(self.let_source != LetSource::None); + let scrut = scrutinee.map(|id| &self.thir[id]); if let LetSource::PlainLet = self.let_source { - self.check_binding_is_irrefutable(pat, "local binding", Some(span)) + self.check_binding_is_irrefutable(pat, "local binding", scrut, Some(span)) } else { - let Ok(refutability) = self.is_let_irrefutable(pat) else { return }; + let Ok(refutability) = self.is_let_irrefutable(pat, scrut) else { return }; if matches!(refutability, Irrefutable) { report_irrefutable_let_patterns( self.tcx, @@ -336,7 +418,7 @@ impl<'thir, 'p, 'tcx> MatchVisitor<'thir, 'p, 'tcx> { expr_span: Span, ) { let scrut = &self.thir[scrut]; - let cx = self.new_cx(Refutable, Some(expr_span), scrut.span); + let cx = self.new_cx(Refutable, Some(expr_span), Some(scrut), scrut.span); let mut tarms = Vec::with_capacity(arms.len()); for &arm in arms { @@ -353,7 +435,7 @@ impl<'thir, 'p, 'tcx> MatchVisitor<'thir, 'p, 'tcx> { } let scrut_ty = scrut.ty; - let report = compute_match_usefulness(&cx, &tarms, scrut_ty); + let report = analyze_match(&cx, &tarms, scrut_ty); match source { // Don't report arm reachability of desugared `match $iter.into_iter() { iter => .. }` @@ -377,7 +459,12 @@ impl<'thir, 'p, 'tcx> MatchVisitor<'thir, 'p, 'tcx> { debug_assert_eq!(pat.span.desugaring_kind(), Some(DesugaringKind::ForLoop)); let PatKind::Variant { ref subpatterns, .. } = pat.kind else { bug!() }; let [pat_field] = &subpatterns[..] else { bug!() }; - self.check_binding_is_irrefutable(&pat_field.pattern, "`for` loop binding", None); + self.check_binding_is_irrefutable( + &pat_field.pattern, + "`for` loop binding", + None, + None, + ); } else { self.error = Err(report_non_exhaustive_match( &cx, self.thir, scrut_ty, scrut.span, witnesses, arms, expr_span, @@ -457,16 +544,21 @@ impl<'thir, 'p, 'tcx> MatchVisitor<'thir, 'p, 'tcx> { &mut self, pat: &Pat<'tcx>, refutability: RefutableFlag, + scrut: Option<&Expr<'tcx>>, ) -> Result<(MatchCheckCtxt<'p, 'tcx>, UsefulnessReport<'p, 'tcx>), ErrorGuaranteed> { - let cx = self.new_cx(refutability, None, pat.span); + let cx = self.new_cx(refutability, None, scrut, pat.span); let pat = self.lower_pattern(&cx, pat)?; let arms = [MatchArm { pat, hir_id: self.lint_level, has_guard: false }]; - let report = compute_match_usefulness(&cx, &arms, pat.ty()); + let report = analyze_match(&cx, &arms, pat.ty()); Ok((cx, report)) } - fn is_let_irrefutable(&mut self, pat: &Pat<'tcx>) -> Result<RefutableFlag, ErrorGuaranteed> { - let (cx, report) = self.analyze_binding(pat, Refutable)?; + fn is_let_irrefutable( + &mut self, + pat: &Pat<'tcx>, + scrut: Option<&Expr<'tcx>>, + ) -> Result<RefutableFlag, ErrorGuaranteed> { + let (cx, report) = self.analyze_binding(pat, Refutable, scrut)?; // Report if the pattern is unreachable, which can only occur when the type is uninhabited. // This also reports unreachable sub-patterns. report_arm_reachability(&cx, &report); @@ -476,10 +568,16 @@ impl<'thir, 'p, 'tcx> MatchVisitor<'thir, 'p, 'tcx> { } #[instrument(level = "trace", skip(self))] - fn check_binding_is_irrefutable(&mut self, pat: &Pat<'tcx>, origin: &str, sp: Option<Span>) { + fn check_binding_is_irrefutable( + &mut self, + pat: &Pat<'tcx>, + origin: &str, + scrut: Option<&Expr<'tcx>>, + sp: Option<Span>, + ) { let pattern_ty = pat.ty; - let Ok((cx, report)) = self.analyze_binding(pat, Irrefutable) else { return }; + let Ok((cx, report)) = self.analyze_binding(pat, Irrefutable, scrut) else { return }; let witnesses = report.non_exhaustiveness_witnesses; if witnesses.is_empty() { // The pattern is irrefutable. @@ -749,18 +847,18 @@ fn report_arm_reachability<'p, 'tcx>( ); }; - use Reachability::*; + use Usefulness::*; let mut catchall = None; for (arm, is_useful) in report.arm_usefulness.iter() { match is_useful { - Unreachable => report_unreachable_pattern(arm.pat.span(), arm.hir_id, catchall), - Reachable(unreachables) if unreachables.is_empty() => {} - // The arm is reachable, but contains unreachable subpatterns (from or-patterns). - Reachable(unreachables) => { - let mut unreachables = unreachables.clone(); + Redundant => report_unreachable_pattern(arm.pat.span(), arm.hir_id, catchall), + Useful(redundant_spans) if redundant_spans.is_empty() => {} + // The arm is reachable, but contains redundant subpatterns (from or-patterns). + Useful(redundant_spans) => { + let mut redundant_spans = redundant_spans.clone(); // Emit lints in the order in which they occur in the file. - unreachables.sort_unstable(); - for span in unreachables { + redundant_spans.sort_unstable(); + for span in redundant_spans { report_unreachable_pattern(span, arm.hir_id, None); } } @@ -828,7 +926,7 @@ fn report_non_exhaustive_match<'p, 'tcx>( pattern = if witnesses.len() < 4 { witnesses .iter() - .map(|witness| witness.to_diagnostic_pat(cx).to_string()) + .map(|witness| cx.hoist_witness_pat(witness).to_string()) .collect::<Vec<String>>() .join(" | ") } else { @@ -852,7 +950,7 @@ fn report_non_exhaustive_match<'p, 'tcx>( if !is_empty_match { let mut non_exhaustive_tys = FxHashSet::default(); // Look at the first witness. - collect_non_exhaustive_tys(cx.tcx, &witnesses[0], &mut non_exhaustive_tys); + collect_non_exhaustive_tys(cx, &witnesses[0], &mut non_exhaustive_tys); for ty in non_exhaustive_tys { if ty.is_ptr_sized_integral() { @@ -987,13 +1085,13 @@ fn joined_uncovered_patterns<'p, 'tcx>( witnesses: &[WitnessPat<'tcx>], ) -> String { const LIMIT: usize = 3; - let pat_to_str = |pat: &WitnessPat<'tcx>| pat.to_diagnostic_pat(cx).to_string(); + let pat_to_str = |pat: &WitnessPat<'tcx>| cx.hoist_witness_pat(pat).to_string(); match witnesses { [] => bug!(), - [witness] => format!("`{}`", witness.to_diagnostic_pat(cx)), + [witness] => format!("`{}`", cx.hoist_witness_pat(witness)), [head @ .., tail] if head.len() < LIMIT => { let head: Vec<_> = head.iter().map(pat_to_str).collect(); - format!("`{}` and `{}`", head.join("`, `"), tail.to_diagnostic_pat(cx)) + format!("`{}` and `{}`", head.join("`, `"), cx.hoist_witness_pat(tail)) } _ => { let (head, tail) = witnesses.split_at(LIMIT); @@ -1004,7 +1102,7 @@ fn joined_uncovered_patterns<'p, 'tcx>( } fn collect_non_exhaustive_tys<'tcx>( - tcx: TyCtxt<'tcx>, + cx: &MatchCheckCtxt<'_, 'tcx>, pat: &WitnessPat<'tcx>, non_exhaustive_tys: &mut FxHashSet<Ty<'tcx>>, ) { @@ -1012,13 +1110,13 @@ fn collect_non_exhaustive_tys<'tcx>( non_exhaustive_tys.insert(pat.ty()); } if let Constructor::IntRange(range) = pat.ctor() { - if range.is_beyond_boundaries(pat.ty(), tcx) { + if cx.is_range_beyond_boundaries(range, pat.ty()) { // The range denotes the values before `isize::MIN` or the values after `usize::MAX`/`isize::MAX`. non_exhaustive_tys.insert(pat.ty()); } } pat.iter_fields() - .for_each(|field_pat| collect_non_exhaustive_tys(tcx, field_pat, non_exhaustive_tys)) + .for_each(|field_pat| collect_non_exhaustive_tys(cx, field_pat, non_exhaustive_tys)) } fn report_adt_defined_here<'tcx>( diff --git a/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs b/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs deleted file mode 100644 index ddcceeb7ef6..00000000000 --- a/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs +++ /dev/null @@ -1,1898 +0,0 @@ -//! As explained in [`super::usefulness`], values and patterns are made from constructors applied to -//! fields. This file defines a `Constructor` enum, a `Fields` struct, and various operations to -//! manipulate them and convert them from/to patterns. -//! -//! There are two important bits of core logic in this file: constructor inclusion and constructor -//! splitting. Constructor inclusion, i.e. whether a constructor is included in/covered by another, -//! is straightforward and defined in [`Constructor::is_covered_by`]. -//! -//! Constructor splitting is mentioned in [`super::usefulness`] but not detailed. We describe it -//! precisely here. -//! -//! -//! # Constructor grouping and splitting -//! -//! As explained in the corresponding section in [`super::usefulness`], to make usefulness tractable -//! we need to group together constructors that have the same effect when they are used to -//! specialize the matrix. -//! -//! Example: -//! ```compile_fail,E0004 -//! match (0, false) { -//! (0 ..=100, true) => {} -//! (50..=150, false) => {} -//! (0 ..=200, _) => {} -//! } -//! ``` -//! -//! In this example we can restrict specialization to 5 cases: `0..50`, `50..=100`, `101..=150`, -//! `151..=200` and `200..`. -//! -//! In [`super::usefulness`], we had said that `specialize` only takes value-only constructors. We -//! now relax this restriction: we allow `specialize` to take constructors like `0..50` as long as -//! we're careful to only do that with constructors that make sense. For example, `specialize(0..50, -//! (0..=100, true))` is sensible, but `specialize(50..=200, (0..=100, true))` is not. -//! -//! Constructor splitting looks at the constructors in the first column of the matrix and constructs -//! such a sensible set of constructors. Formally, we want to find a smallest disjoint set of -//! constructors: -//! - Whose union covers the whole type, and -//! - That have no non-trivial intersection with any of the constructors in the column (i.e. they're -//! each either disjoint with or covered by any given column constructor). -//! -//! We compute this in two steps: first [`ConstructorSet::for_ty`] determines the set of all -//! possible constructors for the type. Then [`ConstructorSet::split`] looks at the column of -//! constructors and splits the set into groups accordingly. The precise invariants of -//! [`ConstructorSet::split`] is described in [`SplitConstructorSet`]. -//! -//! Constructor splitting has two interesting special cases: integer range splitting (see -//! [`IntRange::split`]) and slice splitting (see [`Slice::split`]). -//! -//! -//! # The `Missing` constructor -//! -//! We detail a special case of constructor splitting that is a bit subtle. Take the following: -//! -//! ``` -//! enum Direction { North, South, East, West } -//! # let wind = (Direction::North, 0u8); -//! match wind { -//! (Direction::North, 50..) => {} -//! (_, _) => {} -//! } -//! ``` -//! -//! Here we expect constructor splitting to output two cases: `North`, and "everything else". This -//! "everything else" is represented by [`Constructor::Missing`]. Unlike other constructors, it's a -//! bit contextual: to know the exact list of constructors it represents we have to look at the -//! column. In practice however we don't need to, because by construction it only matches rows that -//! have wildcards. This is how this constructor is special: the only constructor that covers it is -//! `Wildcard`. -//! -//! The only place where we care about which constructors `Missing` represents is in diagnostics -//! (see `super::usefulness::WitnessMatrix::apply_constructor`). -//! -//! We choose whether to specialize with `Missing` in -//! `super::usefulness::compute_exhaustiveness_and_reachability`. -//! -//! -//! -//! ## Opaque patterns -//! -//! Some patterns, such as constants that are not allowed to be matched structurally, cannot be -//! inspected, which we handle with `Constructor::Opaque`. Since we know nothing of these patterns, -//! we assume they never cover each other. In order to respect the invariants of -//! [`SplitConstructorSet`], we give each `Opaque` constructor a unique id so we can recognize it. - -use std::cell::Cell; -use std::cmp::{self, max, min, Ordering}; -use std::fmt; -use std::iter::once; - -use smallvec::{smallvec, SmallVec}; - -use rustc_apfloat::ieee::{DoubleS, IeeeFloat, SingleS}; -use rustc_data_structures::captures::Captures; -use rustc_data_structures::fx::FxHashSet; -use rustc_hir::RangeEnd; -use rustc_index::Idx; -use rustc_middle::middle::stability::EvalResult; -use rustc_middle::mir; -use rustc_middle::mir::interpret::Scalar; -use rustc_middle::thir::{FieldPat, Pat, PatKind, PatRange, PatRangeBoundary}; -use rustc_middle::ty::layout::IntegerExt; -use rustc_middle::ty::{self, Ty, TyCtxt, VariantDef}; -use rustc_span::{Span, DUMMY_SP}; -use rustc_target::abi::{FieldIdx, Integer, VariantIdx, FIRST_VARIANT}; - -use self::Constructor::*; -use self::MaybeInfiniteInt::*; -use self::SliceKind::*; - -use super::usefulness::{MatchCheckCtxt, PatCtxt}; - -/// Recursively expand this pattern into its subpatterns. Only useful for or-patterns. -fn expand_or_pat<'p, 'tcx>(pat: &'p Pat<'tcx>) -> Vec<&'p Pat<'tcx>> { - fn expand<'p, 'tcx>(pat: &'p Pat<'tcx>, vec: &mut Vec<&'p Pat<'tcx>>) { - if let PatKind::Or { pats } = &pat.kind { - for pat in pats.iter() { - expand(pat, vec); - } - } else { - vec.push(pat) - } - } - - let mut pats = Vec::new(); - expand(pat, &mut pats); - pats -} - -/// Whether we have seen a constructor in the column or not. -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] -enum Presence { - Unseen, - Seen, -} - -/// A possibly infinite integer. Values are encoded such that the ordering on `u128` matches the -/// natural order on the original type. For example, `-128i8` is encoded as `0` and `127i8` as -/// `255`. See `signed_bias` for details. -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] -pub(crate) enum MaybeInfiniteInt { - NegInfinity, - /// Encoded value. DO NOT CONSTRUCT BY HAND; use `new_finite`. - Finite(u128), - /// The integer after `u128::MAX`. We need it to represent `x..=u128::MAX` as an exclusive range. - JustAfterMax, - PosInfinity, -} - -impl MaybeInfiniteInt { - // The return value of `signed_bias` should be XORed with a value to encode/decode it. - fn signed_bias(tcx: TyCtxt<'_>, ty: Ty<'_>) -> u128 { - match *ty.kind() { - ty::Int(ity) => { - let bits = Integer::from_int_ty(&tcx, ity).size().bits() as u128; - 1u128 << (bits - 1) - } - _ => 0, - } - } - - fn new_finite(tcx: TyCtxt<'_>, ty: Ty<'_>, bits: u128) -> Self { - let bias = Self::signed_bias(tcx, ty); - // Perform a shift if the underlying types are signed, which makes the interval arithmetic - // type-independent. - let x = bits ^ bias; - Finite(x) - } - fn from_pat_range_bdy<'tcx>( - bdy: PatRangeBoundary<'tcx>, - ty: Ty<'tcx>, - tcx: TyCtxt<'tcx>, - param_env: ty::ParamEnv<'tcx>, - ) -> Self { - match bdy { - PatRangeBoundary::NegInfinity => NegInfinity, - PatRangeBoundary::Finite(value) => { - let bits = value.eval_bits(tcx, param_env); - Self::new_finite(tcx, ty, bits) - } - PatRangeBoundary::PosInfinity => PosInfinity, - } - } - - /// Used only for diagnostics. - /// Note: it is possible to get `isize/usize::MAX+1` here, as explained in the doc for - /// [`IntRange::split`]. This cannot be represented as a `Const`, so we represent it with - /// `PosInfinity`. - fn to_diagnostic_pat_range_bdy<'tcx>( - self, - ty: Ty<'tcx>, - tcx: TyCtxt<'tcx>, - ) -> PatRangeBoundary<'tcx> { - match self { - NegInfinity => PatRangeBoundary::NegInfinity, - Finite(x) => { - let bias = Self::signed_bias(tcx, ty); - let bits = x ^ bias; - let size = ty.primitive_size(tcx); - match Scalar::try_from_uint(bits, size) { - Some(scalar) => { - let value = mir::Const::from_scalar(tcx, scalar, ty); - PatRangeBoundary::Finite(value) - } - // The value doesn't fit. Since `x >= 0` and 0 always encodes the minimum value - // for a type, the problem isn't that the value is too small. So it must be too - // large. - None => PatRangeBoundary::PosInfinity, - } - } - JustAfterMax | PosInfinity => PatRangeBoundary::PosInfinity, - } - } - - /// Note: this will not turn a finite value into an infinite one or vice-versa. - pub(crate) fn minus_one(self) -> Self { - match self { - Finite(n) => match n.checked_sub(1) { - Some(m) => Finite(m), - None => bug!(), - }, - JustAfterMax => Finite(u128::MAX), - x => x, - } - } - /// Note: this will not turn a finite value into an infinite one or vice-versa. - pub(crate) fn plus_one(self) -> Self { - match self { - Finite(n) => match n.checked_add(1) { - Some(m) => Finite(m), - None => JustAfterMax, - }, - JustAfterMax => bug!(), - x => x, - } - } -} - -/// An exclusive interval, used for precise integer exhaustiveness checking. `IntRange`s always -/// store a contiguous range. -/// -/// `IntRange` is never used to encode an empty range or a "range" that wraps around the (offset) -/// space: i.e., `range.lo < range.hi`. -#[derive(Clone, Copy, PartialEq, Eq)] -pub(crate) struct IntRange { - pub(crate) lo: MaybeInfiniteInt, // Must not be `PosInfinity`. - pub(crate) hi: MaybeInfiniteInt, // Must not be `NegInfinity`. -} - -impl IntRange { - #[inline] - pub(super) fn is_integral(ty: Ty<'_>) -> bool { - matches!(ty.kind(), ty::Char | ty::Int(_) | ty::Uint(_)) - } - - /// Best effort; will not know that e.g. `255u8..` is a singleton. - pub(super) fn is_singleton(&self) -> bool { - // Since `lo` and `hi` can't be the same `Infinity` and `plus_one` never changes from finite - // to infinite, this correctly only detects ranges that contain exacly one `Finite(x)`. - self.lo.plus_one() == self.hi - } - - #[inline] - fn from_bits<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>, bits: u128) -> IntRange { - let x = MaybeInfiniteInt::new_finite(tcx, ty, bits); - IntRange { lo: x, hi: x.plus_one() } - } - - #[inline] - fn from_range(lo: MaybeInfiniteInt, mut hi: MaybeInfiniteInt, end: RangeEnd) -> IntRange { - if end == RangeEnd::Included { - hi = hi.plus_one(); - } - if lo >= hi { - // This should have been caught earlier by E0030. - bug!("malformed range pattern: {lo:?}..{hi:?}"); - } - IntRange { lo, hi } - } - - fn is_subrange(&self, other: &Self) -> bool { - other.lo <= self.lo && self.hi <= other.hi - } - - fn intersection(&self, other: &Self) -> Option<Self> { - if self.lo < other.hi && other.lo < self.hi { - Some(IntRange { lo: max(self.lo, other.lo), hi: min(self.hi, other.hi) }) - } else { - None - } - } - - /// Partition a range of integers into disjoint subranges. This does constructor splitting for - /// integer ranges as explained at the top of the file. - /// - /// This returns an output that covers `self`. The output is split so that the only - /// intersections between an output range and a column range are inclusions. No output range - /// straddles the boundary of one of the inputs. - /// - /// Additionally, we track for each output range whether it is covered by one of the column ranges or not. - /// - /// The following input: - /// ```text - /// (--------------------------) // `self` - /// (------) (----------) (-) - /// (------) (--------) - /// ``` - /// is first intersected with `self`: - /// ```text - /// (--------------------------) // `self` - /// (----) (----------) (-) - /// (------) (--------) - /// ``` - /// and then iterated over as follows: - /// ```text - /// (-(--)-(-)-(------)-)--(-)- - /// ``` - /// where each sequence of dashes is an output range, and dashes outside parentheses are marked - /// as `Presence::Missing`. - /// - /// ## `isize`/`usize` - /// - /// Whereas a wildcard of type `i32` stands for the range `i32::MIN..=i32::MAX`, a `usize` - /// wildcard stands for `0..PosInfinity` and a `isize` wildcard stands for - /// `NegInfinity..PosInfinity`. In other words, as far as `IntRange` is concerned, there are - /// values before `isize::MIN` and after `usize::MAX`/`isize::MAX`. - /// This is to avoid e.g. `0..(u32::MAX as usize)` from being exhaustive on one architecture and - /// not others. This was decided in <https://github.com/rust-lang/rfcs/pull/2591>. - /// - /// These infinities affect splitting subtly: it is possible to get `NegInfinity..0` and - /// `usize::MAX+1..PosInfinity` in the output. Diagnostics must be careful to handle these - /// fictitious ranges sensibly. - fn split( - &self, - column_ranges: impl Iterator<Item = IntRange>, - ) -> impl Iterator<Item = (Presence, IntRange)> { - // The boundaries of ranges in `column_ranges` intersected with `self`. - // We do parenthesis matching for input ranges. A boundary counts as +1 if it starts - // a range and -1 if it ends it. When the count is > 0 between two boundaries, we - // are within an input range. - let mut boundaries: Vec<(MaybeInfiniteInt, isize)> = column_ranges - .filter_map(|r| self.intersection(&r)) - .flat_map(|r| [(r.lo, 1), (r.hi, -1)]) - .collect(); - // We sort by boundary, and for each boundary we sort the "closing parentheses" first. The - // order of +1/-1 for a same boundary value is actually irrelevant, because we only look at - // the accumulated count between distinct boundary values. - boundaries.sort_unstable(); - - // Accumulate parenthesis counts. - let mut paren_counter = 0isize; - // Gather pairs of adjacent boundaries. - let mut prev_bdy = self.lo; - boundaries - .into_iter() - // End with the end of the range. The count is ignored. - .chain(once((self.hi, 0))) - // List pairs of adjacent boundaries and the count between them. - .map(move |(bdy, delta)| { - // `delta` affects the count as we cross `bdy`, so the relevant count between - // `prev_bdy` and `bdy` is untouched by `delta`. - let ret = (prev_bdy, paren_counter, bdy); - prev_bdy = bdy; - paren_counter += delta; - ret - }) - // Skip empty ranges. - .filter(|&(prev_bdy, _, bdy)| prev_bdy != bdy) - // Convert back to ranges. - .map(move |(prev_bdy, paren_count, bdy)| { - use Presence::*; - let presence = if paren_count > 0 { Seen } else { Unseen }; - let range = IntRange { lo: prev_bdy, hi: bdy }; - (presence, range) - }) - } - - /// Whether the range denotes the fictitious values before `isize::MIN` or after - /// `usize::MAX`/`isize::MAX` (see doc of [`IntRange::split`] for why these exist). - pub(crate) fn is_beyond_boundaries<'tcx>(&self, ty: Ty<'tcx>, tcx: TyCtxt<'tcx>) -> bool { - ty.is_ptr_sized_integral() && { - // The two invalid ranges are `NegInfinity..isize::MIN` (represented as - // `NegInfinity..0`), and `{u,i}size::MAX+1..PosInfinity`. `to_diagnostic_pat_range_bdy` - // converts `MAX+1` to `PosInfinity`, and we couldn't have `PosInfinity` in `self.lo` - // otherwise. - let lo = self.lo.to_diagnostic_pat_range_bdy(ty, tcx); - matches!(lo, PatRangeBoundary::PosInfinity) - || matches!(self.hi, MaybeInfiniteInt::Finite(0)) - } - } - /// Only used for displaying the range. - pub(super) fn to_diagnostic_pat<'tcx>(&self, ty: Ty<'tcx>, tcx: TyCtxt<'tcx>) -> Pat<'tcx> { - let kind = if matches!((self.lo, self.hi), (NegInfinity, PosInfinity)) { - PatKind::Wild - } else if self.is_singleton() { - let lo = self.lo.to_diagnostic_pat_range_bdy(ty, tcx); - let value = lo.as_finite().unwrap(); - PatKind::Constant { value } - } else { - // We convert to an inclusive range for diagnostics. - let mut end = RangeEnd::Included; - let mut lo = self.lo.to_diagnostic_pat_range_bdy(ty, tcx); - if matches!(lo, PatRangeBoundary::PosInfinity) { - // The only reason to get `PosInfinity` here is the special case where - // `to_diagnostic_pat_range_bdy` found `{u,i}size::MAX+1`. So the range denotes the - // fictitious values after `{u,i}size::MAX` (see [`IntRange::split`] for why we do - // this). We show this to the user as `usize::MAX..` which is slightly incorrect but - // probably clear enough. - let c = ty.numeric_max_val(tcx).unwrap(); - let value = mir::Const::from_ty_const(c, tcx); - lo = PatRangeBoundary::Finite(value); - } - let hi = if matches!(self.hi, MaybeInfiniteInt::Finite(0)) { - // The range encodes `..ty::MIN`, so we can't convert it to an inclusive range. - end = RangeEnd::Excluded; - self.hi - } else { - self.hi.minus_one() - }; - let hi = hi.to_diagnostic_pat_range_bdy(ty, tcx); - PatKind::Range(Box::new(PatRange { lo, hi, end, ty })) - }; - - Pat { ty, span: DUMMY_SP, kind } - } -} - -/// Note: this will render signed ranges incorrectly. To render properly, convert to a pattern -/// first. -impl fmt::Debug for IntRange { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if let Finite(lo) = self.lo { - write!(f, "{lo}")?; - } - write!(f, "{}", RangeEnd::Excluded)?; - if let Finite(hi) = self.hi { - write!(f, "{hi}")?; - } - Ok(()) - } -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -enum SliceKind { - /// Patterns of length `n` (`[x, y]`). - FixedLen(usize), - /// Patterns using the `..` notation (`[x, .., y]`). - /// Captures any array constructor of `length >= i + j`. - /// In the case where `array_len` is `Some(_)`, - /// this indicates that we only care about the first `i` and the last `j` values of the array, - /// and everything in between is a wildcard `_`. - VarLen(usize, usize), -} - -impl SliceKind { - fn arity(self) -> usize { - match self { - FixedLen(length) => length, - VarLen(prefix, suffix) => prefix + suffix, - } - } - - /// Whether this pattern includes patterns of length `other_len`. - fn covers_length(self, other_len: usize) -> bool { - match self { - FixedLen(len) => len == other_len, - VarLen(prefix, suffix) => prefix + suffix <= other_len, - } - } -} - -/// A constructor for array and slice patterns. -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub(super) struct Slice { - /// `None` if the matched value is a slice, `Some(n)` if it is an array of size `n`. - array_len: Option<usize>, - /// The kind of pattern it is: fixed-length `[x, y]` or variable length `[x, .., y]`. - kind: SliceKind, -} - -impl Slice { - fn new(array_len: Option<usize>, kind: SliceKind) -> Self { - let kind = match (array_len, kind) { - // If the middle `..` is empty, we effectively have a fixed-length pattern. - (Some(len), VarLen(prefix, suffix)) if prefix + suffix >= len => FixedLen(len), - _ => kind, - }; - Slice { array_len, kind } - } - - fn arity(self) -> usize { - self.kind.arity() - } - - /// See `Constructor::is_covered_by` - fn is_covered_by(self, other: Self) -> bool { - other.kind.covers_length(self.arity()) - } - - /// This computes constructor splitting for variable-length slices, as explained at the top of - /// the file. - /// - /// A slice pattern `[x, .., y]` behaves like the infinite or-pattern `[x, y] | [x, _, y] | [x, - /// _, _, y] | etc`. The corresponding value constructors are fixed-length array constructors of - /// corresponding lengths. We obviously can't list this infinitude of constructors. - /// Thankfully, it turns out that for each finite set of slice patterns, all sufficiently large - /// array lengths are equivalent. - /// - /// Let's look at an example, where we are trying to split the last pattern: - /// ``` - /// # fn foo(x: &[bool]) { - /// match x { - /// [true, true, ..] => {} - /// [.., false, false] => {} - /// [..] => {} - /// } - /// # } - /// ``` - /// Here are the results of specialization for the first few lengths: - /// ``` - /// # fn foo(x: &[bool]) { match x { - /// // length 0 - /// [] => {} - /// // length 1 - /// [_] => {} - /// // length 2 - /// [true, true] => {} - /// [false, false] => {} - /// [_, _] => {} - /// // length 3 - /// [true, true, _ ] => {} - /// [_, false, false] => {} - /// [_, _, _ ] => {} - /// // length 4 - /// [true, true, _, _ ] => {} - /// [_, _, false, false] => {} - /// [_, _, _, _ ] => {} - /// // length 5 - /// [true, true, _, _, _ ] => {} - /// [_, _, _, false, false] => {} - /// [_, _, _, _, _ ] => {} - /// # _ => {} - /// # }} - /// ``` - /// - /// We see that above length 4, we are simply inserting columns full of wildcards in the middle. - /// This means that specialization and witness computation with slices of length `l >= 4` will - /// give equivalent results regardless of `l`. This applies to any set of slice patterns: there - /// will be a length `L` above which all lengths behave the same. This is exactly what we need - /// for constructor splitting. - /// - /// A variable-length slice pattern covers all lengths from its arity up to infinity. As we just - /// saw, we can split this in two: lengths below `L` are treated individually with a - /// fixed-length slice each; lengths above `L` are grouped into a single variable-length slice - /// constructor. - /// - /// For each variable-length slice pattern `p` with a prefix of length `plₚ` and suffix of - /// length `slₚ`, only the first `plₚ` and the last `slₚ` elements are examined. Therefore, as - /// long as `L` is positive (to avoid concerns about empty types), all elements after the - /// maximum prefix length and before the maximum suffix length are not examined by any - /// variable-length pattern, and therefore can be ignored. This gives us a way to compute `L`. - /// - /// Additionally, if fixed-length patterns exist, we must pick an `L` large enough to miss them, - /// so we can pick `L = max(max(FIXED_LEN)+1, max(PREFIX_LEN) + max(SUFFIX_LEN))`. - /// `max_slice` below will be made to have this arity `L`. - /// - /// If `self` is fixed-length, it is returned as-is. - /// - /// Additionally, we track for each output slice whether it is covered by one of the column slices or not. - fn split( - self, - column_slices: impl Iterator<Item = Slice>, - ) -> impl Iterator<Item = (Presence, Slice)> { - // Range of lengths below `L`. - let smaller_lengths; - let arity = self.arity(); - let mut max_slice = self.kind; - // Tracks the smallest variable-length slice we've seen. Any slice arity above it is - // therefore `Presence::Seen` in the column. - let mut min_var_len = usize::MAX; - // Tracks the fixed-length slices we've seen, to mark them as `Presence::Seen`. - let mut seen_fixed_lens = FxHashSet::default(); - match &mut max_slice { - VarLen(max_prefix_len, max_suffix_len) => { - // We grow `max_slice` to be larger than all slices encountered, as described above. - // For diagnostics, we keep the prefix and suffix lengths separate, but grow them so that - // `L = max_prefix_len + max_suffix_len`. - let mut max_fixed_len = 0; - for slice in column_slices { - match slice.kind { - FixedLen(len) => { - max_fixed_len = cmp::max(max_fixed_len, len); - if arity <= len { - seen_fixed_lens.insert(len); - } - } - VarLen(prefix, suffix) => { - *max_prefix_len = cmp::max(*max_prefix_len, prefix); - *max_suffix_len = cmp::max(*max_suffix_len, suffix); - min_var_len = cmp::min(min_var_len, prefix + suffix); - } - } - } - // We want `L = max(L, max_fixed_len + 1)`, modulo the fact that we keep prefix and - // suffix separate. - if max_fixed_len + 1 >= *max_prefix_len + *max_suffix_len { - // The subtraction can't overflow thanks to the above check. - // The new `max_prefix_len` is larger than its previous value. - *max_prefix_len = max_fixed_len + 1 - *max_suffix_len; - } - - // We cap the arity of `max_slice` at the array size. - match self.array_len { - Some(len) if max_slice.arity() >= len => max_slice = FixedLen(len), - _ => {} - } - - smaller_lengths = match self.array_len { - // The only admissible fixed-length slice is one of the array size. Whether `max_slice` - // is fixed-length or variable-length, it will be the only relevant slice to output - // here. - Some(_) => 0..0, // empty range - // We need to cover all arities in the range `(arity..infinity)`. We split that - // range into two: lengths smaller than `max_slice.arity()` are treated - // independently as fixed-lengths slices, and lengths above are captured by - // `max_slice`. - None => self.arity()..max_slice.arity(), - }; - } - FixedLen(_) => { - // No need to split here. We only track presence. - for slice in column_slices { - match slice.kind { - FixedLen(len) => { - if len == arity { - seen_fixed_lens.insert(len); - } - } - VarLen(prefix, suffix) => { - min_var_len = cmp::min(min_var_len, prefix + suffix); - } - } - } - smaller_lengths = 0..0; - } - }; - - smaller_lengths.map(FixedLen).chain(once(max_slice)).map(move |kind| { - let arity = kind.arity(); - let seen = if min_var_len <= arity || seen_fixed_lens.contains(&arity) { - Presence::Seen - } else { - Presence::Unseen - }; - (seen, Slice::new(self.array_len, kind)) - }) - } -} - -/// A globally unique id to distinguish `Opaque` patterns. -#[derive(Clone, Debug, PartialEq, Eq)] -pub(super) struct OpaqueId(u32); - -impl OpaqueId { - fn new() -> Self { - use std::sync::atomic::{AtomicU32, Ordering}; - static OPAQUE_ID: AtomicU32 = AtomicU32::new(0); - OpaqueId(OPAQUE_ID.fetch_add(1, Ordering::SeqCst)) - } -} - -/// A value can be decomposed into a constructor applied to some fields. This struct represents -/// the constructor. See also `Fields`. -/// -/// `pat_constructor` retrieves the constructor corresponding to a pattern. -/// `specialize_constructor` returns the list of fields corresponding to a pattern, given a -/// constructor. `Constructor::apply` reconstructs the pattern from a pair of `Constructor` and -/// `Fields`. -#[derive(Clone, Debug, PartialEq)] -pub(super) enum Constructor<'tcx> { - /// The constructor for patterns that have a single constructor, like tuples, struct patterns, - /// and references. Fixed-length arrays are treated separately with `Slice`. - Single, - /// Enum variants. - Variant(VariantIdx), - /// Booleans - Bool(bool), - /// Ranges of integer literal values (`2`, `2..=5` or `2..5`). - IntRange(IntRange), - /// Ranges of floating-point literal values (`2.0..=5.2`). - F32Range(IeeeFloat<SingleS>, IeeeFloat<SingleS>, RangeEnd), - F64Range(IeeeFloat<DoubleS>, IeeeFloat<DoubleS>, RangeEnd), - /// String literals. Strings are not quite the same as `&[u8]` so we treat them separately. - Str(mir::Const<'tcx>), - /// Array and slice patterns. - Slice(Slice), - /// Constants that must not be matched structurally. They are treated as black boxes for the - /// purposes of exhaustiveness: we must not inspect them, and they don't count towards making a - /// match exhaustive. - /// Carries an id that must be unique within a match. We need this to ensure the invariants of - /// [`SplitConstructorSet`]. - Opaque(OpaqueId), - /// Or-pattern. - Or, - /// Wildcard pattern. - Wildcard, - /// Fake extra constructor for enums that aren't allowed to be matched exhaustively. Also used - /// for those types for which we cannot list constructors explicitly, like `f64` and `str`. - NonExhaustive, - /// Fake extra constructor for variants that should not be mentioned in diagnostics. - /// We use this for variants behind an unstable gate as well as - /// `#[doc(hidden)]` ones. - Hidden, - /// Fake extra constructor for constructors that are not seen in the matrix, as explained at the - /// top of the file. - Missing, -} - -impl<'tcx> Constructor<'tcx> { - pub(super) fn is_wildcard(&self) -> bool { - matches!(self, Wildcard) - } - pub(super) fn is_non_exhaustive(&self) -> bool { - matches!(self, NonExhaustive) - } - - pub(super) fn as_variant(&self) -> Option<VariantIdx> { - match self { - Variant(i) => Some(*i), - _ => None, - } - } - fn as_bool(&self) -> Option<bool> { - match self { - Bool(b) => Some(*b), - _ => None, - } - } - pub(super) fn as_int_range(&self) -> Option<&IntRange> { - match self { - IntRange(range) => Some(range), - _ => None, - } - } - fn as_slice(&self) -> Option<Slice> { - match self { - Slice(slice) => Some(*slice), - _ => None, - } - } - - fn variant_index_for_adt(&self, adt: ty::AdtDef<'tcx>) -> VariantIdx { - match *self { - Variant(idx) => idx, - Single => { - assert!(!adt.is_enum()); - FIRST_VARIANT - } - _ => bug!("bad constructor {:?} for adt {:?}", self, adt), - } - } - - /// The number of fields for this constructor. This must be kept in sync with - /// `Fields::wildcards`. - pub(super) fn arity(&self, pcx: &PatCtxt<'_, '_, 'tcx>) -> usize { - match self { - Single | Variant(_) => match pcx.ty.kind() { - ty::Tuple(fs) => fs.len(), - ty::Ref(..) => 1, - ty::Adt(adt, ..) => { - if adt.is_box() { - // The only legal patterns of type `Box` (outside `std`) are `_` and box - // patterns. If we're here we can assume this is a box pattern. - 1 - } else { - let variant = &adt.variant(self.variant_index_for_adt(*adt)); - Fields::list_variant_nonhidden_fields(pcx.cx, pcx.ty, variant).count() - } - } - _ => bug!("Unexpected type for `Single` constructor: {:?}", pcx.ty), - }, - Slice(slice) => slice.arity(), - Bool(..) - | IntRange(..) - | F32Range(..) - | F64Range(..) - | Str(..) - | Opaque(..) - | NonExhaustive - | Hidden - | Missing { .. } - | Wildcard => 0, - Or => bug!("The `Or` constructor doesn't have a fixed arity"), - } - } - - /// Returns whether `self` is covered by `other`, i.e. whether `self` is a subset of `other`. - /// For the simple cases, this is simply checking for equality. For the "grouped" constructors, - /// this checks for inclusion. - // We inline because this has a single call site in `Matrix::specialize_constructor`. - #[inline] - pub(super) fn is_covered_by<'p>(&self, pcx: &PatCtxt<'_, 'p, 'tcx>, other: &Self) -> bool { - match (self, other) { - (Wildcard, _) => { - span_bug!( - pcx.cx.scrut_span, - "Constructor splitting should not have returned `Wildcard`" - ) - } - // Wildcards cover anything - (_, Wildcard) => true, - // Only a wildcard pattern can match these special constructors. - (Missing { .. } | NonExhaustive | Hidden, _) => false, - - (Single, Single) => true, - (Variant(self_id), Variant(other_id)) => self_id == other_id, - (Bool(self_b), Bool(other_b)) => self_b == other_b, - - (IntRange(self_range), IntRange(other_range)) => self_range.is_subrange(other_range), - (F32Range(self_from, self_to, self_end), F32Range(other_from, other_to, other_end)) => { - self_from.ge(other_from) - && match self_to.partial_cmp(other_to) { - Some(Ordering::Less) => true, - Some(Ordering::Equal) => other_end == self_end, - _ => false, - } - } - (F64Range(self_from, self_to, self_end), F64Range(other_from, other_to, other_end)) => { - self_from.ge(other_from) - && match self_to.partial_cmp(other_to) { - Some(Ordering::Less) => true, - Some(Ordering::Equal) => other_end == self_end, - _ => false, - } - } - (Str(self_val), Str(other_val)) => { - // FIXME Once valtrees are available we can directly use the bytes - // in the `Str` variant of the valtree for the comparison here. - self_val == other_val - } - (Slice(self_slice), Slice(other_slice)) => self_slice.is_covered_by(*other_slice), - - // Opaque constructors don't interact with anything unless they come from the - // syntactically identical pattern. - (Opaque(self_id), Opaque(other_id)) => self_id == other_id, - (Opaque(..), _) | (_, Opaque(..)) => false, - - _ => span_bug!( - pcx.cx.scrut_span, - "trying to compare incompatible constructors {:?} and {:?}", - self, - other - ), - } - } -} - -/// Describes the set of all constructors for a type. -#[derive(Debug)] -pub(super) enum ConstructorSet { - /// The type has a single constructor, e.g. `&T` or a struct. - Single, - /// This type has the following list of constructors. - /// Some variants are hidden, which means they won't be mentioned in diagnostics unless the user - /// mentioned them first. We use this for variants behind an unstable gate as well as - /// `#[doc(hidden)]` ones. - Variants { - visible_variants: Vec<VariantIdx>, - hidden_variants: Vec<VariantIdx>, - non_exhaustive: bool, - }, - /// Booleans. - Bool, - /// The type is spanned by integer values. The range or ranges give the set of allowed values. - /// The second range is only useful for `char`. - Integers { range_1: IntRange, range_2: Option<IntRange> }, - /// The type is matched by slices. The usize is the compile-time length of the array, if known. - Slice(Option<usize>), - /// The type is matched by slices whose elements are uninhabited. - SliceOfEmpty, - /// The constructors cannot be listed, and the type cannot be matched exhaustively. E.g. `str`, - /// floats. - Unlistable, - /// The type has no inhabitants. - Uninhabited, -} - -/// Describes the result of analyzing the constructors in a column of a match. -/// -/// `present` is morally the set of constructors present in the column, and `missing` is the set of -/// constructors that exist in the type but are not present in the column. -/// -/// More formally, if we discard wildcards from the column, this respects the following constraints: -/// 1. the union of `present` and `missing` covers the whole type -/// 2. each constructor in `present` is covered by something in the column -/// 3. no constructor in `missing` is covered by anything in the column -/// 4. each constructor in the column is equal to the union of one or more constructors in `present` -/// 5. `missing` does not contain empty constructors (see discussion about emptiness at the top of -/// the file); -/// 6. constructors in `present` and `missing` are split for the column; in other words, they are -/// either fully included in or fully disjoint from each constructor in the column. In other -/// words, there are no non-trivial intersections like between `0..10` and `5..15`. -/// -/// We must be particularly careful with weird constructors like `Opaque`: they're not formally part -/// of the `ConstructorSet` for the type, yet if we forgot to include them in `present` we would be -/// ignoring any row with `Opaque`s in the algorithm. Hence the importance of point 4. -#[derive(Debug)] -pub(super) struct SplitConstructorSet<'tcx> { - pub(super) present: SmallVec<[Constructor<'tcx>; 1]>, - pub(super) missing: Vec<Constructor<'tcx>>, -} - -impl ConstructorSet { - /// Creates a set that represents all the constructors of `ty`. - #[instrument(level = "debug", skip(cx), ret)] - pub(super) fn for_ty<'p, 'tcx>(cx: &MatchCheckCtxt<'p, 'tcx>, ty: Ty<'tcx>) -> Self { - let make_range = |start, end| { - IntRange::from_range( - MaybeInfiniteInt::new_finite(cx.tcx, ty, start), - MaybeInfiniteInt::new_finite(cx.tcx, ty, end), - RangeEnd::Included, - ) - }; - // This determines the set of all possible constructors for the type `ty`. For numbers, - // arrays and slices we use ranges and variable-length slices when appropriate. - // - // If the `exhaustive_patterns` feature is enabled, we make sure to omit constructors that - // are statically impossible. E.g., for `Option<!>`, we do not include `Some(_)` in the - // returned list of constructors. - // Invariant: this is `Uninhabited` if and only if the type is uninhabited (as determined by - // `cx.is_uninhabited()`). - match ty.kind() { - ty::Bool => Self::Bool, - ty::Char => { - // The valid Unicode Scalar Value ranges. - Self::Integers { - range_1: make_range('\u{0000}' as u128, '\u{D7FF}' as u128), - range_2: Some(make_range('\u{E000}' as u128, '\u{10FFFF}' as u128)), - } - } - &ty::Int(ity) => { - let range = if ty.is_ptr_sized_integral() { - // The min/max values of `isize` are not allowed to be observed. - IntRange { lo: NegInfinity, hi: PosInfinity } - } else { - let bits = Integer::from_int_ty(&cx.tcx, ity).size().bits() as u128; - let min = 1u128 << (bits - 1); - let max = min - 1; - make_range(min, max) - }; - Self::Integers { range_1: range, range_2: None } - } - &ty::Uint(uty) => { - let range = if ty.is_ptr_sized_integral() { - // The max value of `usize` is not allowed to be observed. - let lo = MaybeInfiniteInt::new_finite(cx.tcx, ty, 0); - IntRange { lo, hi: PosInfinity } - } else { - let size = Integer::from_uint_ty(&cx.tcx, uty).size(); - let max = size.truncate(u128::MAX); - make_range(0, max) - }; - Self::Integers { range_1: range, range_2: None } - } - ty::Array(sub_ty, len) if len.try_eval_target_usize(cx.tcx, cx.param_env).is_some() => { - let len = len.eval_target_usize(cx.tcx, cx.param_env) as usize; - if len != 0 && cx.is_uninhabited(*sub_ty) { - Self::Uninhabited - } else { - Self::Slice(Some(len)) - } - } - // Treat arrays of a constant but unknown length like slices. - ty::Array(sub_ty, _) | ty::Slice(sub_ty) => { - if cx.is_uninhabited(*sub_ty) { - Self::SliceOfEmpty - } else { - Self::Slice(None) - } - } - ty::Adt(def, args) if def.is_enum() => { - // If the enum is declared as `#[non_exhaustive]`, we treat it as if it had an - // additional "unknown" constructor. - // There is no point in enumerating all possible variants, because the user can't - // actually match against them all themselves. So we always return only the fictitious - // constructor. - // E.g., in an example like: - // - // ``` - // let err: io::ErrorKind = ...; - // match err { - // io::ErrorKind::NotFound => {}, - // } - // ``` - // - // we don't want to show every possible IO error, but instead have only `_` as the - // witness. - let is_declared_nonexhaustive = cx.is_foreign_non_exhaustive_enum(ty); - - if def.variants().is_empty() && !is_declared_nonexhaustive { - Self::Uninhabited - } else { - let is_exhaustive_pat_feature = cx.tcx.features().exhaustive_patterns; - let (hidden_variants, visible_variants) = def - .variants() - .iter_enumerated() - .filter(|(_, v)| { - // If `exhaustive_patterns` is enabled, we exclude variants known to be - // uninhabited. - !is_exhaustive_pat_feature - || v.inhabited_predicate(cx.tcx, *def) - .instantiate(cx.tcx, args) - .apply(cx.tcx, cx.param_env, cx.module) - }) - .map(|(idx, _)| idx) - .partition(|idx| { - let variant_def_id = def.variant(*idx).def_id; - // Filter variants that depend on a disabled unstable feature. - let is_unstable = matches!( - cx.tcx.eval_stability(variant_def_id, None, DUMMY_SP, None), - EvalResult::Deny { .. } - ); - // Filter foreign `#[doc(hidden)]` variants. - let is_doc_hidden = - cx.tcx.is_doc_hidden(variant_def_id) && !variant_def_id.is_local(); - is_unstable || is_doc_hidden - }); - - Self::Variants { - visible_variants, - hidden_variants, - non_exhaustive: is_declared_nonexhaustive, - } - } - } - ty::Never => Self::Uninhabited, - _ if cx.is_uninhabited(ty) => Self::Uninhabited, - ty::Adt(..) | ty::Tuple(..) | ty::Ref(..) => Self::Single, - // This type is one for which we cannot list constructors, like `str` or `f64`. - _ => Self::Unlistable, - } - } - - /// This analyzes a column of constructors to 1/ determine which constructors of the type (if - /// any) are missing; 2/ split constructors to handle non-trivial intersections e.g. on ranges - /// or slices. This can get subtle; see [`SplitConstructorSet`] for details of this operation - /// and its invariants. - #[instrument(level = "debug", skip(self, pcx, ctors), ret)] - pub(super) fn split<'a, 'tcx>( - &self, - pcx: &PatCtxt<'_, '_, 'tcx>, - ctors: impl Iterator<Item = &'a Constructor<'tcx>> + Clone, - ) -> SplitConstructorSet<'tcx> - where - 'tcx: 'a, - { - let mut present: SmallVec<[_; 1]> = SmallVec::new(); - let mut missing = Vec::new(); - // Constructors in `ctors`, except wildcards and opaques. - let mut seen = Vec::new(); - for ctor in ctors.cloned() { - if let Constructor::Opaque(..) = ctor { - present.push(ctor); - } else if !ctor.is_wildcard() { - seen.push(ctor); - } - } - - match self { - ConstructorSet::Single => { - if seen.is_empty() { - missing.push(Single); - } else { - present.push(Single); - } - } - ConstructorSet::Variants { visible_variants, hidden_variants, non_exhaustive } => { - let seen_set: FxHashSet<_> = seen.iter().map(|c| c.as_variant().unwrap()).collect(); - let mut skipped_a_hidden_variant = false; - - for variant in visible_variants { - let ctor = Variant(*variant); - if seen_set.contains(variant) { - present.push(ctor); - } else { - missing.push(ctor); - } - } - - for variant in hidden_variants { - let ctor = Variant(*variant); - if seen_set.contains(variant) { - present.push(ctor); - } else { - skipped_a_hidden_variant = true; - } - } - if skipped_a_hidden_variant { - missing.push(Hidden); - } - - if *non_exhaustive { - missing.push(NonExhaustive); - } - } - ConstructorSet::Bool => { - let mut seen_false = false; - let mut seen_true = false; - for b in seen.iter().map(|ctor| ctor.as_bool().unwrap()) { - if b { - seen_true = true; - } else { - seen_false = true; - } - } - if seen_false { - present.push(Bool(false)); - } else { - missing.push(Bool(false)); - } - if seen_true { - present.push(Bool(true)); - } else { - missing.push(Bool(true)); - } - } - ConstructorSet::Integers { range_1, range_2 } => { - let seen_ranges: Vec<_> = - seen.iter().map(|ctor| ctor.as_int_range().unwrap().clone()).collect(); - for (seen, splitted_range) in range_1.split(seen_ranges.iter().cloned()) { - match seen { - Presence::Unseen => missing.push(IntRange(splitted_range)), - Presence::Seen => present.push(IntRange(splitted_range)), - } - } - if let Some(range_2) = range_2 { - for (seen, splitted_range) in range_2.split(seen_ranges.into_iter()) { - match seen { - Presence::Unseen => missing.push(IntRange(splitted_range)), - Presence::Seen => present.push(IntRange(splitted_range)), - } - } - } - } - &ConstructorSet::Slice(array_len) => { - let seen_slices = seen.iter().map(|c| c.as_slice().unwrap()); - let base_slice = Slice::new(array_len, VarLen(0, 0)); - for (seen, splitted_slice) in base_slice.split(seen_slices) { - let ctor = Slice(splitted_slice); - match seen { - Presence::Unseen => missing.push(ctor), - Presence::Seen => present.push(ctor), - } - } - } - ConstructorSet::SliceOfEmpty => { - // This one is tricky because even though there's only one possible value of this - // type (namely `[]`), slice patterns of all lengths are allowed, they're just - // unreachable if length != 0. - // We still gather the seen constructors in `present`, but the only slice that can - // go in `missing` is `[]`. - let seen_slices = seen.iter().map(|c| c.as_slice().unwrap()); - let base_slice = Slice::new(None, VarLen(0, 0)); - for (seen, splitted_slice) in base_slice.split(seen_slices) { - let ctor = Slice(splitted_slice); - match seen { - Presence::Seen => present.push(ctor), - Presence::Unseen if splitted_slice.arity() == 0 => { - missing.push(Slice(Slice::new(None, FixedLen(0)))) - } - Presence::Unseen => {} - } - } - } - ConstructorSet::Unlistable => { - // Since we can't list constructors, we take the ones in the column. This might list - // some constructors several times but there's not much we can do. - present.extend(seen); - missing.push(NonExhaustive); - } - // If `exhaustive_patterns` is disabled and our scrutinee is an empty type, we cannot - // expose its emptiness. The exception is if the pattern is at the top level, because we - // want empty matches to be considered exhaustive. - ConstructorSet::Uninhabited - if !pcx.cx.tcx.features().exhaustive_patterns && !pcx.is_top_level => - { - missing.push(NonExhaustive); - } - ConstructorSet::Uninhabited => {} - } - - SplitConstructorSet { present, missing } - } -} - -/// A value can be decomposed into a constructor applied to some fields. This struct represents -/// those fields, generalized to allow patterns in each field. See also `Constructor`. -/// -/// This is constructed for a constructor using [`Fields::wildcards()`]. The idea is that -/// [`Fields::wildcards()`] constructs a list of fields where all entries are wildcards, and then -/// given a pattern we fill some of the fields with its subpatterns. -/// In the following example `Fields::wildcards` returns `[_, _, _, _]`. Then in -/// `extract_pattern_arguments` we fill some of the entries, and the result is -/// `[Some(0), _, _, _]`. -/// ```compile_fail,E0004 -/// # fn foo() -> [Option<u8>; 4] { [None; 4] } -/// let x: [Option<u8>; 4] = foo(); -/// match x { -/// [Some(0), ..] => {} -/// } -/// ``` -/// -/// Note that the number of fields of a constructor may not match the fields declared in the -/// original struct/variant. This happens if a private or `non_exhaustive` field is uninhabited, -/// because the code mustn't observe that it is uninhabited. In that case that field is not -/// included in `fields`. For that reason, when you have a `FieldIdx` you must use -/// `index_with_declared_idx`. -#[derive(Debug, Clone, Copy)] -pub(super) struct Fields<'p, 'tcx> { - fields: &'p [DeconstructedPat<'p, 'tcx>], -} - -impl<'p, 'tcx> Fields<'p, 'tcx> { - fn empty() -> Self { - Fields { fields: &[] } - } - - fn singleton(cx: &MatchCheckCtxt<'p, 'tcx>, field: DeconstructedPat<'p, 'tcx>) -> Self { - let field: &_ = cx.pattern_arena.alloc(field); - Fields { fields: std::slice::from_ref(field) } - } - - pub(super) fn from_iter( - cx: &MatchCheckCtxt<'p, 'tcx>, - fields: impl IntoIterator<Item = DeconstructedPat<'p, 'tcx>>, - ) -> Self { - let fields: &[_] = cx.pattern_arena.alloc_from_iter(fields); - Fields { fields } - } - - fn wildcards_from_tys( - cx: &MatchCheckCtxt<'p, 'tcx>, - tys: impl IntoIterator<Item = Ty<'tcx>>, - ) -> Self { - Fields::from_iter(cx, tys.into_iter().map(|ty| DeconstructedPat::wildcard(ty, DUMMY_SP))) - } - - // In the cases of either a `#[non_exhaustive]` field list or a non-public field, we hide - // uninhabited fields in order not to reveal the uninhabitedness of the whole variant. - // This lists the fields we keep along with their types. - fn list_variant_nonhidden_fields<'a>( - cx: &'a MatchCheckCtxt<'p, 'tcx>, - ty: Ty<'tcx>, - variant: &'a VariantDef, - ) -> impl Iterator<Item = (FieldIdx, Ty<'tcx>)> + Captures<'a> + Captures<'p> { - let ty::Adt(adt, args) = ty.kind() else { bug!() }; - // Whether we must not match the fields of this variant exhaustively. - let is_non_exhaustive = variant.is_field_list_non_exhaustive() && !adt.did().is_local(); - - variant.fields.iter().enumerate().filter_map(move |(i, field)| { - let ty = field.ty(cx.tcx, args); - // `field.ty()` doesn't normalize after substituting. - let ty = cx.tcx.normalize_erasing_regions(cx.param_env, ty); - let is_visible = adt.is_enum() || field.vis.is_accessible_from(cx.module, cx.tcx); - let is_uninhabited = cx.is_uninhabited(ty); - - if is_uninhabited && (!is_visible || is_non_exhaustive) { - None - } else { - Some((FieldIdx::new(i), ty)) - } - }) - } - - /// Creates a new list of wildcard fields for a given constructor. The result must have a - /// length of `constructor.arity()`. - #[instrument(level = "trace")] - pub(super) fn wildcards(pcx: &PatCtxt<'_, 'p, 'tcx>, constructor: &Constructor<'tcx>) -> Self { - let ret = match constructor { - Single | Variant(_) => match pcx.ty.kind() { - ty::Tuple(fs) => Fields::wildcards_from_tys(pcx.cx, fs.iter()), - ty::Ref(_, rty, _) => Fields::wildcards_from_tys(pcx.cx, once(*rty)), - ty::Adt(adt, args) => { - if adt.is_box() { - // The only legal patterns of type `Box` (outside `std`) are `_` and box - // patterns. If we're here we can assume this is a box pattern. - Fields::wildcards_from_tys(pcx.cx, once(args.type_at(0))) - } else { - let variant = &adt.variant(constructor.variant_index_for_adt(*adt)); - let tys = Fields::list_variant_nonhidden_fields(pcx.cx, pcx.ty, variant) - .map(|(_, ty)| ty); - Fields::wildcards_from_tys(pcx.cx, tys) - } - } - _ => bug!("Unexpected type for `Single` constructor: {:?}", pcx), - }, - Slice(slice) => match *pcx.ty.kind() { - ty::Slice(ty) | ty::Array(ty, _) => { - let arity = slice.arity(); - Fields::wildcards_from_tys(pcx.cx, (0..arity).map(|_| ty)) - } - _ => bug!("bad slice pattern {:?} {:?}", constructor, pcx), - }, - Bool(..) - | IntRange(..) - | F32Range(..) - | F64Range(..) - | Str(..) - | Opaque(..) - | NonExhaustive - | Hidden - | Missing { .. } - | Wildcard => Fields::empty(), - Or => { - bug!("called `Fields::wildcards` on an `Or` ctor") - } - }; - debug!(?ret); - ret - } - - /// Returns the list of patterns. - pub(super) fn iter_patterns<'a>( - &'a self, - ) -> impl Iterator<Item = &'p DeconstructedPat<'p, 'tcx>> + Captures<'a> { - self.fields.iter() - } -} - -/// Values and patterns can be represented as a constructor applied to some fields. This represents -/// a pattern in this form. -/// This also uses interior mutability to keep track of whether the pattern has been found reachable -/// during analysis. For this reason they cannot be cloned. -/// A `DeconstructedPat` will almost always come from user input; the only exception are some -/// `Wildcard`s introduced during specialization. -pub(crate) struct DeconstructedPat<'p, 'tcx> { - ctor: Constructor<'tcx>, - fields: Fields<'p, 'tcx>, - ty: Ty<'tcx>, - span: Span, - reachable: Cell<bool>, -} - -impl<'p, 'tcx> DeconstructedPat<'p, 'tcx> { - pub(super) fn wildcard(ty: Ty<'tcx>, span: Span) -> Self { - Self::new(Wildcard, Fields::empty(), ty, span) - } - - pub(super) fn new( - ctor: Constructor<'tcx>, - fields: Fields<'p, 'tcx>, - ty: Ty<'tcx>, - span: Span, - ) -> Self { - DeconstructedPat { ctor, fields, ty, span, reachable: Cell::new(false) } - } - - /// Note: the input patterns must have been lowered through - /// `super::check_match::MatchVisitor::lower_pattern`. - pub(crate) fn from_pat(cx: &MatchCheckCtxt<'p, 'tcx>, pat: &Pat<'tcx>) -> Self { - let mkpat = |pat| DeconstructedPat::from_pat(cx, pat); - let ctor; - let fields; - match &pat.kind { - PatKind::AscribeUserType { subpattern, .. } - | PatKind::InlineConstant { subpattern, .. } => return mkpat(subpattern), - PatKind::Binding { subpattern: Some(subpat), .. } => return mkpat(subpat), - PatKind::Binding { subpattern: None, .. } | PatKind::Wild => { - ctor = Wildcard; - fields = Fields::empty(); - } - PatKind::Deref { subpattern } => { - ctor = Single; - fields = Fields::singleton(cx, mkpat(subpattern)); - } - PatKind::Leaf { subpatterns } | PatKind::Variant { subpatterns, .. } => { - match pat.ty.kind() { - ty::Tuple(fs) => { - ctor = Single; - let mut wilds: SmallVec<[_; 2]> = - fs.iter().map(|ty| DeconstructedPat::wildcard(ty, pat.span)).collect(); - for pat in subpatterns { - wilds[pat.field.index()] = mkpat(&pat.pattern); - } - fields = Fields::from_iter(cx, wilds); - } - ty::Adt(adt, args) if adt.is_box() => { - // The only legal patterns of type `Box` (outside `std`) are `_` and box - // patterns. If we're here we can assume this is a box pattern. - // FIXME(Nadrieril): A `Box` can in theory be matched either with `Box(_, - // _)` or a box pattern. As a hack to avoid an ICE with the former, we - // ignore other fields than the first one. This will trigger an error later - // anyway. - // See https://github.com/rust-lang/rust/issues/82772 , - // explanation: https://github.com/rust-lang/rust/pull/82789#issuecomment-796921977 - // The problem is that we can't know from the type whether we'll match - // normally or through box-patterns. We'll have to figure out a proper - // solution when we introduce generalized deref patterns. Also need to - // prevent mixing of those two options. - let pattern = subpatterns.into_iter().find(|pat| pat.field.index() == 0); - let pat = if let Some(pat) = pattern { - mkpat(&pat.pattern) - } else { - DeconstructedPat::wildcard(args.type_at(0), pat.span) - }; - ctor = Single; - fields = Fields::singleton(cx, pat); - } - ty::Adt(adt, _) => { - ctor = match pat.kind { - PatKind::Leaf { .. } => Single, - PatKind::Variant { variant_index, .. } => Variant(variant_index), - _ => bug!(), - }; - let variant = &adt.variant(ctor.variant_index_for_adt(*adt)); - // For each field in the variant, we store the relevant index into `self.fields` if any. - let mut field_id_to_id: Vec<Option<usize>> = - (0..variant.fields.len()).map(|_| None).collect(); - let tys = Fields::list_variant_nonhidden_fields(cx, pat.ty, variant) - .enumerate() - .map(|(i, (field, ty))| { - field_id_to_id[field.index()] = Some(i); - ty - }); - let mut wilds: SmallVec<[_; 2]> = - tys.map(|ty| DeconstructedPat::wildcard(ty, pat.span)).collect(); - for pat in subpatterns { - if let Some(i) = field_id_to_id[pat.field.index()] { - wilds[i] = mkpat(&pat.pattern); - } - } - fields = Fields::from_iter(cx, wilds); - } - _ => bug!("pattern has unexpected type: pat: {:?}, ty: {:?}", pat, pat.ty), - } - } - PatKind::Constant { value } => { - match pat.ty.kind() { - ty::Bool => { - ctor = match value.try_eval_bool(cx.tcx, cx.param_env) { - Some(b) => Bool(b), - None => Opaque(OpaqueId::new()), - }; - fields = Fields::empty(); - } - ty::Char | ty::Int(_) | ty::Uint(_) => { - ctor = match value.try_eval_bits(cx.tcx, cx.param_env) { - Some(bits) => IntRange(IntRange::from_bits(cx.tcx, pat.ty, bits)), - None => Opaque(OpaqueId::new()), - }; - fields = Fields::empty(); - } - ty::Float(ty::FloatTy::F32) => { - ctor = match value.try_eval_bits(cx.tcx, cx.param_env) { - Some(bits) => { - use rustc_apfloat::Float; - let value = rustc_apfloat::ieee::Single::from_bits(bits); - F32Range(value, value, RangeEnd::Included) - } - None => Opaque(OpaqueId::new()), - }; - fields = Fields::empty(); - } - ty::Float(ty::FloatTy::F64) => { - ctor = match value.try_eval_bits(cx.tcx, cx.param_env) { - Some(bits) => { - use rustc_apfloat::Float; - let value = rustc_apfloat::ieee::Double::from_bits(bits); - F64Range(value, value, RangeEnd::Included) - } - None => Opaque(OpaqueId::new()), - }; - fields = Fields::empty(); - } - ty::Ref(_, t, _) if t.is_str() => { - // We want a `&str` constant to behave like a `Deref` pattern, to be compatible - // with other `Deref` patterns. This could have been done in `const_to_pat`, - // but that causes issues with the rest of the matching code. - // So here, the constructor for a `"foo"` pattern is `&` (represented by - // `Single`), and has one field. That field has constructor `Str(value)` and no - // fields. - // Note: `t` is `str`, not `&str`. - let subpattern = - DeconstructedPat::new(Str(*value), Fields::empty(), *t, pat.span); - ctor = Single; - fields = Fields::singleton(cx, subpattern) - } - // All constants that can be structurally matched have already been expanded - // into the corresponding `Pat`s by `const_to_pat`. Constants that remain are - // opaque. - _ => { - ctor = Opaque(OpaqueId::new()); - fields = Fields::empty(); - } - } - } - PatKind::Range(box PatRange { lo, hi, end, .. }) => { - let ty = pat.ty; - ctor = match ty.kind() { - ty::Char | ty::Int(_) | ty::Uint(_) => { - let lo = - MaybeInfiniteInt::from_pat_range_bdy(*lo, ty, cx.tcx, cx.param_env); - let hi = - MaybeInfiniteInt::from_pat_range_bdy(*hi, ty, cx.tcx, cx.param_env); - IntRange(IntRange::from_range(lo, hi, *end)) - } - ty::Float(fty) => { - use rustc_apfloat::Float; - let lo = lo.as_finite().map(|c| c.eval_bits(cx.tcx, cx.param_env)); - let hi = hi.as_finite().map(|c| c.eval_bits(cx.tcx, cx.param_env)); - match fty { - ty::FloatTy::F32 => { - use rustc_apfloat::ieee::Single; - let lo = lo.map(Single::from_bits).unwrap_or(-Single::INFINITY); - let hi = hi.map(Single::from_bits).unwrap_or(Single::INFINITY); - F32Range(lo, hi, *end) - } - ty::FloatTy::F64 => { - use rustc_apfloat::ieee::Double; - let lo = lo.map(Double::from_bits).unwrap_or(-Double::INFINITY); - let hi = hi.map(Double::from_bits).unwrap_or(Double::INFINITY); - F64Range(lo, hi, *end) - } - } - } - _ => bug!("invalid type for range pattern: {}", ty), - }; - fields = Fields::empty(); - } - PatKind::Array { prefix, slice, suffix } | PatKind::Slice { prefix, slice, suffix } => { - let array_len = match pat.ty.kind() { - ty::Array(_, length) => { - Some(length.eval_target_usize(cx.tcx, cx.param_env) as usize) - } - ty::Slice(_) => None, - _ => span_bug!(pat.span, "bad ty {:?} for slice pattern", pat.ty), - }; - let kind = if slice.is_some() { - VarLen(prefix.len(), suffix.len()) - } else { - FixedLen(prefix.len() + suffix.len()) - }; - ctor = Slice(Slice::new(array_len, kind)); - fields = - Fields::from_iter(cx, prefix.iter().chain(suffix.iter()).map(|p| mkpat(&*p))); - } - PatKind::Or { .. } => { - ctor = Or; - let pats = expand_or_pat(pat); - fields = Fields::from_iter(cx, pats.into_iter().map(mkpat)); - } - PatKind::Never => { - // FIXME(never_patterns): handle `!` in exhaustiveness. This is a sane default - // in the meantime. - ctor = Wildcard; - fields = Fields::empty(); - } - PatKind::Error(_) => { - ctor = Opaque(OpaqueId::new()); - fields = Fields::empty(); - } - } - DeconstructedPat::new(ctor, fields, pat.ty, pat.span) - } - - pub(super) fn is_or_pat(&self) -> bool { - matches!(self.ctor, Or) - } - /// Expand this (possibly-nested) or-pattern into its alternatives. - pub(super) fn flatten_or_pat(&'p self) -> SmallVec<[&'p Self; 1]> { - if self.is_or_pat() { - self.iter_fields().flat_map(|p| p.flatten_or_pat()).collect() - } else { - smallvec![self] - } - } - - pub(super) fn ctor(&self) -> &Constructor<'tcx> { - &self.ctor - } - pub(super) fn ty(&self) -> Ty<'tcx> { - self.ty - } - pub(super) fn span(&self) -> Span { - self.span - } - - pub(super) fn iter_fields<'a>( - &'a self, - ) -> impl Iterator<Item = &'p DeconstructedPat<'p, 'tcx>> + Captures<'a> { - self.fields.iter_patterns() - } - - /// Specialize this pattern with a constructor. - /// `other_ctor` can be different from `self.ctor`, but must be covered by it. - pub(super) fn specialize<'a>( - &'a self, - pcx: &PatCtxt<'_, 'p, 'tcx>, - other_ctor: &Constructor<'tcx>, - ) -> SmallVec<[&'p DeconstructedPat<'p, 'tcx>; 2]> { - match (&self.ctor, other_ctor) { - (Wildcard, _) => { - // We return a wildcard for each field of `other_ctor`. - Fields::wildcards(pcx, other_ctor).iter_patterns().collect() - } - (Slice(self_slice), Slice(other_slice)) - if self_slice.arity() != other_slice.arity() => - { - // The only tricky case: two slices of different arity. Since `self_slice` covers - // `other_slice`, `self_slice` must be `VarLen`, i.e. of the form - // `[prefix, .., suffix]`. Moreover `other_slice` is guaranteed to have a larger - // arity. So we fill the middle part with enough wildcards to reach the length of - // the new, larger slice. - match self_slice.kind { - FixedLen(_) => bug!("{:?} doesn't cover {:?}", self_slice, other_slice), - VarLen(prefix, suffix) => { - let (ty::Slice(inner_ty) | ty::Array(inner_ty, _)) = *self.ty.kind() else { - bug!("bad slice pattern {:?} {:?}", self.ctor, self.ty); - }; - let prefix = &self.fields.fields[..prefix]; - let suffix = &self.fields.fields[self_slice.arity() - suffix..]; - let wildcard: &_ = pcx - .cx - .pattern_arena - .alloc(DeconstructedPat::wildcard(inner_ty, DUMMY_SP)); - let extra_wildcards = other_slice.arity() - self_slice.arity(); - let extra_wildcards = (0..extra_wildcards).map(|_| wildcard); - prefix.iter().chain(extra_wildcards).chain(suffix).collect() - } - } - } - _ => self.fields.iter_patterns().collect(), - } - } - - /// We keep track for each pattern if it was ever reachable during the analysis. This is used - /// with `unreachable_spans` to report unreachable subpatterns arising from or patterns. - pub(super) fn set_reachable(&self) { - self.reachable.set(true) - } - pub(super) fn is_reachable(&self) -> bool { - if self.reachable.get() { - true - } else if self.is_or_pat() && self.iter_fields().any(|f| f.is_reachable()) { - // We always expand or patterns in the matrix, so we will never see the actual - // or-pattern (the one with constructor `Or`) in the column. As such, it will not be - // marked as reachable itself, only its children will. We recover this information here. - self.set_reachable(); - true - } else { - false - } - } - - /// Report the spans of subpatterns that were not reachable, if any. - pub(super) fn unreachable_spans(&self) -> Vec<Span> { - let mut spans = Vec::new(); - self.collect_unreachable_spans(&mut spans); - spans - } - fn collect_unreachable_spans(&self, spans: &mut Vec<Span>) { - // We don't look at subpatterns if we already reported the whole pattern as unreachable. - if !self.is_reachable() { - spans.push(self.span); - } else { - for p in self.iter_fields() { - p.collect_unreachable_spans(spans); - } - } - } -} - -/// This is mostly copied from the `Pat` impl. This is best effort and not good enough for a -/// `Display` impl. -impl<'p, 'tcx> fmt::Debug for DeconstructedPat<'p, 'tcx> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // Printing lists is a chore. - let mut first = true; - let mut start_or_continue = |s| { - if first { - first = false; - "" - } else { - s - } - }; - let mut start_or_comma = || start_or_continue(", "); - - match &self.ctor { - Single | Variant(_) => match self.ty.kind() { - ty::Adt(def, _) if def.is_box() => { - // Without `box_patterns`, the only legal pattern of type `Box` is `_` (outside - // of `std`). So this branch is only reachable when the feature is enabled and - // the pattern is a box pattern. - let subpattern = self.iter_fields().next().unwrap(); - write!(f, "box {subpattern:?}") - } - ty::Adt(..) | ty::Tuple(..) => { - let variant = match self.ty.kind() { - ty::Adt(adt, _) => Some(adt.variant(self.ctor.variant_index_for_adt(*adt))), - ty::Tuple(_) => None, - _ => unreachable!(), - }; - - if let Some(variant) = variant { - write!(f, "{}", variant.name)?; - } - - // Without `cx`, we can't know which field corresponds to which, so we can't - // get the names of the fields. Instead we just display everything as a tuple - // struct, which should be good enough. - write!(f, "(")?; - for p in self.iter_fields() { - write!(f, "{}", start_or_comma())?; - write!(f, "{p:?}")?; - } - write!(f, ")") - } - // Note: given the expansion of `&str` patterns done in `expand_pattern`, we should - // be careful to detect strings here. However a string literal pattern will never - // be reported as a non-exhaustiveness witness, so we can ignore this issue. - ty::Ref(_, _, mutbl) => { - let subpattern = self.iter_fields().next().unwrap(); - write!(f, "&{}{:?}", mutbl.prefix_str(), subpattern) - } - _ => write!(f, "_"), - }, - Slice(slice) => { - let mut subpatterns = self.fields.iter_patterns(); - write!(f, "[")?; - match slice.kind { - FixedLen(_) => { - for p in subpatterns { - write!(f, "{}{:?}", start_or_comma(), p)?; - } - } - VarLen(prefix_len, _) => { - for p in subpatterns.by_ref().take(prefix_len) { - write!(f, "{}{:?}", start_or_comma(), p)?; - } - write!(f, "{}", start_or_comma())?; - write!(f, "..")?; - for p in subpatterns { - write!(f, "{}{:?}", start_or_comma(), p)?; - } - } - } - write!(f, "]") - } - Bool(b) => write!(f, "{b}"), - // Best-effort, will render signed ranges incorrectly - IntRange(range) => write!(f, "{range:?}"), - F32Range(lo, hi, end) => write!(f, "{lo}{end}{hi}"), - F64Range(lo, hi, end) => write!(f, "{lo}{end}{hi}"), - Str(value) => write!(f, "{value}"), - Opaque(..) => write!(f, "<constant pattern>"), - Or => { - for pat in self.iter_fields() { - write!(f, "{}{:?}", start_or_continue(" | "), pat)?; - } - Ok(()) - } - Wildcard | Missing { .. } | NonExhaustive | Hidden => write!(f, "_ : {:?}", self.ty), - } - } -} - -/// Same idea as `DeconstructedPat`, except this is a fictitious pattern built up for diagnostics -/// purposes. As such they don't use interning and can be cloned. -#[derive(Debug, Clone)] -pub(crate) struct WitnessPat<'tcx> { - ctor: Constructor<'tcx>, - pub(crate) fields: Vec<WitnessPat<'tcx>>, - ty: Ty<'tcx>, -} - -impl<'tcx> WitnessPat<'tcx> { - pub(super) fn new(ctor: Constructor<'tcx>, fields: Vec<Self>, ty: Ty<'tcx>) -> Self { - Self { ctor, fields, ty } - } - pub(super) fn wildcard(ty: Ty<'tcx>) -> Self { - Self::new(Wildcard, Vec::new(), ty) - } - - /// Construct a pattern that matches everything that starts with this constructor. - /// For example, if `ctor` is a `Constructor::Variant` for `Option::Some`, we get the pattern - /// `Some(_)`. - pub(super) fn wild_from_ctor(pcx: &PatCtxt<'_, '_, 'tcx>, ctor: Constructor<'tcx>) -> Self { - // Reuse `Fields::wildcards` to get the types. - let fields = Fields::wildcards(pcx, &ctor) - .iter_patterns() - .map(|deco_pat| Self::wildcard(deco_pat.ty())) - .collect(); - Self::new(ctor, fields, pcx.ty) - } - - pub(super) fn ctor(&self) -> &Constructor<'tcx> { - &self.ctor - } - pub(super) fn ty(&self) -> Ty<'tcx> { - self.ty - } - - /// Convert back to a `thir::Pat` for diagnostic purposes. This panics for patterns that don't - /// appear in diagnostics, like float ranges. - pub(crate) fn to_diagnostic_pat(&self, cx: &MatchCheckCtxt<'_, 'tcx>) -> Pat<'tcx> { - let is_wildcard = |pat: &Pat<'_>| matches!(pat.kind, PatKind::Wild); - let mut subpatterns = self.iter_fields().map(|p| Box::new(p.to_diagnostic_pat(cx))); - let kind = match &self.ctor { - Bool(b) => PatKind::Constant { value: mir::Const::from_bool(cx.tcx, *b) }, - IntRange(range) => return range.to_diagnostic_pat(self.ty, cx.tcx), - Single | Variant(_) => match self.ty.kind() { - ty::Tuple(..) => PatKind::Leaf { - subpatterns: subpatterns - .enumerate() - .map(|(i, pattern)| FieldPat { field: FieldIdx::new(i), pattern }) - .collect(), - }, - ty::Adt(adt_def, _) if adt_def.is_box() => { - // Without `box_patterns`, the only legal pattern of type `Box` is `_` (outside - // of `std`). So this branch is only reachable when the feature is enabled and - // the pattern is a box pattern. - PatKind::Deref { subpattern: subpatterns.next().unwrap() } - } - ty::Adt(adt_def, args) => { - let variant_index = self.ctor.variant_index_for_adt(*adt_def); - let variant = &adt_def.variant(variant_index); - let subpatterns = Fields::list_variant_nonhidden_fields(cx, self.ty, variant) - .zip(subpatterns) - .map(|((field, _ty), pattern)| FieldPat { field, pattern }) - .collect(); - - if adt_def.is_enum() { - PatKind::Variant { adt_def: *adt_def, args, variant_index, subpatterns } - } else { - PatKind::Leaf { subpatterns } - } - } - // Note: given the expansion of `&str` patterns done in `expand_pattern`, we should - // be careful to reconstruct the correct constant pattern here. However a string - // literal pattern will never be reported as a non-exhaustiveness witness, so we - // ignore this issue. - ty::Ref(..) => PatKind::Deref { subpattern: subpatterns.next().unwrap() }, - _ => bug!("unexpected ctor for type {:?} {:?}", self.ctor, self.ty), - }, - Slice(slice) => { - match slice.kind { - FixedLen(_) => PatKind::Slice { - prefix: subpatterns.collect(), - slice: None, - suffix: Box::new([]), - }, - VarLen(prefix, _) => { - let mut subpatterns = subpatterns.peekable(); - let mut prefix: Vec<_> = subpatterns.by_ref().take(prefix).collect(); - if slice.array_len.is_some() { - // Improves diagnostics a bit: if the type is a known-size array, instead - // of reporting `[x, _, .., _, y]`, we prefer to report `[x, .., y]`. - // This is incorrect if the size is not known, since `[_, ..]` captures - // arrays of lengths `>= 1` whereas `[..]` captures any length. - while !prefix.is_empty() && is_wildcard(prefix.last().unwrap()) { - prefix.pop(); - } - while subpatterns.peek().is_some() - && is_wildcard(subpatterns.peek().unwrap()) - { - subpatterns.next(); - } - } - let suffix: Box<[_]> = subpatterns.collect(); - let wild = Pat::wildcard_from_ty(self.ty); - PatKind::Slice { - prefix: prefix.into_boxed_slice(), - slice: Some(Box::new(wild)), - suffix, - } - } - } - } - &Str(value) => PatKind::Constant { value }, - Wildcard | NonExhaustive | Hidden => PatKind::Wild, - Missing { .. } => bug!( - "trying to convert a `Missing` constructor into a `Pat`; this is probably a bug, - `Missing` should have been processed in `apply_constructors`" - ), - F32Range(..) | F64Range(..) | Opaque(..) | Or => { - bug!("can't convert to pattern: {:?}", self) - } - }; - - Pat { ty: self.ty, span: DUMMY_SP, kind } - } - - pub(super) fn iter_fields<'a>(&'a self) -> impl Iterator<Item = &'a WitnessPat<'tcx>> { - self.fields.iter() - } -} diff --git a/compiler/rustc_mir_build/src/thir/pattern/mod.rs b/compiler/rustc_mir_build/src/thir/pattern/mod.rs index eb548ad29eb..af0dab4ebc7 100644 --- a/compiler/rustc_mir_build/src/thir/pattern/mod.rs +++ b/compiler/rustc_mir_build/src/thir/pattern/mod.rs @@ -2,11 +2,8 @@ mod check_match; mod const_to_pat; -pub(crate) mod deconstruct_pat; -mod usefulness; pub(crate) use self::check_match::check_match; -pub(crate) use self::usefulness::MatchCheckCtxt; use crate::errors::*; use crate::thir::util::UserAnnotatedTyHelpers; diff --git a/compiler/rustc_mir_build/src/thir/print.rs b/compiler/rustc_mir_build/src/thir/print.rs index 547da7e7007..28be3139905 100644 --- a/compiler/rustc_mir_build/src/thir/print.rs +++ b/compiler/rustc_mir_build/src/thir/print.rs @@ -91,23 +91,11 @@ impl<'a, 'tcx> ThirPrinter<'a, 'tcx> { } fn print_block(&mut self, block_id: BlockId, depth_lvl: usize) { - let Block { - targeted_by_break, - opt_destruction_scope, - span, - region_scope, - stmts, - expr, - safety_mode, - } = &self.thir.blocks[block_id]; + let Block { targeted_by_break, span, region_scope, stmts, expr, safety_mode } = + &self.thir.blocks[block_id]; print_indented!(self, "Block {", depth_lvl); print_indented!(self, format!("targeted_by_break: {}", targeted_by_break), depth_lvl + 1); - print_indented!( - self, - format!("opt_destruction_scope: {:?}", opt_destruction_scope), - depth_lvl + 1 - ); print_indented!(self, format!("span: {:?}", span), depth_lvl + 1); print_indented!(self, format!("region_scope: {:?}", region_scope), depth_lvl + 1); print_indented!(self, format!("safety_mode: {:?}", safety_mode), depth_lvl + 1); @@ -133,14 +121,9 @@ impl<'a, 'tcx> ThirPrinter<'a, 'tcx> { } fn print_stmt(&mut self, stmt_id: StmtId, depth_lvl: usize) { - let Stmt { kind, opt_destruction_scope } = &self.thir.stmts[stmt_id]; + let Stmt { kind } = &self.thir.stmts[stmt_id]; print_indented!(self, "Stmt {", depth_lvl); - print_indented!( - self, - format!("opt_destruction_scope: {:?}", opt_destruction_scope), - depth_lvl + 1 - ); match kind { StmtKind::Expr { scope, expr } => { diff --git a/compiler/rustc_mir_dataflow/src/framework/direction.rs b/compiler/rustc_mir_dataflow/src/framework/direction.rs index 08b7b1a2619..4c3fadf487b 100644 --- a/compiler/rustc_mir_dataflow/src/framework/direction.rs +++ b/compiler/rustc_mir_dataflow/src/framework/direction.rs @@ -196,7 +196,7 @@ impl Direction for Backward { { results.reset_to_block_entry(state, block); - vis.visit_block_end(results, state, block_data, block); + vis.visit_block_end(state); // Terminator let loc = Location { block, statement_index: block_data.statements.len() }; @@ -214,7 +214,7 @@ impl Direction for Backward { vis.visit_statement_after_primary_effect(results, state, stmt, loc); } - vis.visit_block_start(results, state, block_data, block); + vis.visit_block_start(state); } fn join_state_into_successors_of<'tcx, A>( @@ -449,7 +449,7 @@ impl Direction for Forward { { results.reset_to_block_entry(state, block); - vis.visit_block_start(results, state, block_data, block); + vis.visit_block_start(state); for (statement_index, stmt) in block_data.statements.iter().enumerate() { let loc = Location { block, statement_index }; @@ -466,7 +466,7 @@ impl Direction for Forward { results.reconstruct_terminator_effect(state, term, loc); vis.visit_terminator_after_primary_effect(results, state, term, loc); - vis.visit_block_end(results, state, block_data, block); + vis.visit_block_end(state); } fn join_state_into_successors_of<'tcx, A>( diff --git a/compiler/rustc_mir_dataflow/src/framework/graphviz.rs b/compiler/rustc_mir_dataflow/src/framework/graphviz.rs index fa16cac3168..0270e059a58 100644 --- a/compiler/rustc_mir_dataflow/src/framework/graphviz.rs +++ b/compiler/rustc_mir_dataflow/src/framework/graphviz.rs @@ -545,25 +545,13 @@ where { type FlowState = A::Domain; - fn visit_block_start( - &mut self, - _results: &mut Results<'tcx, A>, - state: &Self::FlowState, - _block_data: &mir::BasicBlockData<'tcx>, - _block: BasicBlock, - ) { + fn visit_block_start(&mut self, state: &Self::FlowState) { if A::Direction::IS_FORWARD { self.prev_state.clone_from(state); } } - fn visit_block_end( - &mut self, - _results: &mut Results<'tcx, A>, - state: &Self::FlowState, - _block_data: &mir::BasicBlockData<'tcx>, - _block: BasicBlock, - ) { + fn visit_block_end(&mut self, state: &Self::FlowState) { if A::Direction::IS_BACKWARD { self.prev_state.clone_from(state); } diff --git a/compiler/rustc_mir_dataflow/src/framework/mod.rs b/compiler/rustc_mir_dataflow/src/framework/mod.rs index b7dfbe0710d..09cdb055a3e 100644 --- a/compiler/rustc_mir_dataflow/src/framework/mod.rs +++ b/compiler/rustc_mir_dataflow/src/framework/mod.rs @@ -248,18 +248,19 @@ pub trait Analysis<'tcx>: AnalysisDomain<'tcx> { /// A gen/kill dataflow problem. /// -/// Each method in this trait has a corresponding one in `Analysis`. However, these methods only -/// allow modification of the dataflow state via "gen" and "kill" operations. By defining transfer -/// functions for each statement in this way, the transfer function for an entire basic block can -/// be computed efficiently. +/// Each method in this trait has a corresponding one in `Analysis`. However, the first two methods +/// here only allow modification of the dataflow state via "gen" and "kill" operations. By defining +/// transfer functions for each statement in this way, the transfer function for an entire basic +/// block can be computed efficiently. The remaining methods match up with `Analysis` exactly. /// -/// `Analysis` is automatically implemented for all implementers of `GenKillAnalysis`. +/// `Analysis` is automatically implemented for all implementers of `GenKillAnalysis` via a blanket +/// impl below. pub trait GenKillAnalysis<'tcx>: Analysis<'tcx> { type Idx: Idx; fn domain_size(&self, body: &mir::Body<'tcx>) -> usize; - /// See `Analysis::apply_statement_effect`. + /// See `Analysis::apply_statement_effect`. Note how the second arg differs. fn statement_effect( &mut self, trans: &mut impl GenKill<Self::Idx>, @@ -267,7 +268,8 @@ pub trait GenKillAnalysis<'tcx>: Analysis<'tcx> { location: Location, ); - /// See `Analysis::apply_before_statement_effect`. + /// See `Analysis::apply_before_statement_effect`. Note how the second arg + /// differs. fn before_statement_effect( &mut self, _trans: &mut impl GenKill<Self::Idx>, @@ -287,7 +289,7 @@ pub trait GenKillAnalysis<'tcx>: Analysis<'tcx> { /// See `Analysis::apply_before_terminator_effect`. fn before_terminator_effect( &mut self, - _trans: &mut impl GenKill<Self::Idx>, + _trans: &mut Self::Domain, _terminator: &mir::Terminator<'tcx>, _location: Location, ) { @@ -298,7 +300,7 @@ pub trait GenKillAnalysis<'tcx>: Analysis<'tcx> { /// See `Analysis::apply_call_return_effect`. fn call_return_effect( &mut self, - trans: &mut impl GenKill<Self::Idx>, + trans: &mut Self::Domain, block: BasicBlock, return_places: CallReturnPlaces<'_, 'tcx>, ); @@ -313,6 +315,7 @@ pub trait GenKillAnalysis<'tcx>: Analysis<'tcx> { } } +// Blanket impl: any impl of `GenKillAnalysis` automatically impls `Analysis`. impl<'tcx, A> Analysis<'tcx> for A where A: GenKillAnalysis<'tcx>, diff --git a/compiler/rustc_mir_dataflow/src/framework/visitor.rs b/compiler/rustc_mir_dataflow/src/framework/visitor.rs index e3648bb4076..8b8a16bda99 100644 --- a/compiler/rustc_mir_dataflow/src/framework/visitor.rs +++ b/compiler/rustc_mir_dataflow/src/framework/visitor.rs @@ -31,14 +31,7 @@ pub fn visit_results<'mir, 'tcx, F, R>( pub trait ResultsVisitor<'mir, 'tcx, R> { type FlowState; - fn visit_block_start( - &mut self, - _results: &mut R, - _state: &Self::FlowState, - _block_data: &'mir mir::BasicBlockData<'tcx>, - _block: BasicBlock, - ) { - } + fn visit_block_start(&mut self, _state: &Self::FlowState) {} /// Called with the `before_statement_effect` of the given statement applied to `state` but not /// its `statement_effect`. @@ -86,20 +79,13 @@ pub trait ResultsVisitor<'mir, 'tcx, R> { ) { } - fn visit_block_end( - &mut self, - _results: &mut R, - _state: &Self::FlowState, - _block_data: &'mir mir::BasicBlockData<'tcx>, - _block: BasicBlock, - ) { - } + fn visit_block_end(&mut self, _state: &Self::FlowState) {} } /// Things that can be visited by a `ResultsVisitor`. /// -/// This trait exists so that we can visit the results of multiple dataflow analyses simultaneously. -/// DO NOT IMPLEMENT MANUALLY. Instead, use the `impl_visitable` macro below. +/// This trait exists so that we can visit the results of one or more dataflow analyses +/// simultaneously. pub trait ResultsVisitable<'tcx> { type Direction: Direction; type FlowState; diff --git a/compiler/rustc_mir_dataflow/src/impls/borrowed_locals.rs b/compiler/rustc_mir_dataflow/src/impls/borrowed_locals.rs index 01acc380fa3..693994b5da7 100644 --- a/compiler/rustc_mir_dataflow/src/impls/borrowed_locals.rs +++ b/compiler/rustc_mir_dataflow/src/impls/borrowed_locals.rs @@ -62,7 +62,7 @@ impl<'tcx> GenKillAnalysis<'tcx> for MaybeBorrowedLocals { fn call_return_effect( &mut self, - _trans: &mut impl GenKill<Self::Idx>, + _trans: &mut Self::Domain, _block: BasicBlock, _return_places: CallReturnPlaces<'_, 'tcx>, ) { diff --git a/compiler/rustc_mir_dataflow/src/impls/initialized.rs b/compiler/rustc_mir_dataflow/src/impls/initialized.rs index b050e963d8e..6653b99b3f5 100644 --- a/compiler/rustc_mir_dataflow/src/impls/initialized.rs +++ b/compiler/rustc_mir_dataflow/src/impls/initialized.rs @@ -376,7 +376,7 @@ impl<'tcx> GenKillAnalysis<'tcx> for MaybeInitializedPlaces<'_, 'tcx> { fn call_return_effect( &mut self, - trans: &mut impl GenKill<Self::Idx>, + trans: &mut Self::Domain, _block: mir::BasicBlock, return_places: CallReturnPlaces<'_, 'tcx>, ) { @@ -499,7 +499,7 @@ impl<'tcx> GenKillAnalysis<'tcx> for MaybeUninitializedPlaces<'_, 'tcx> { fn call_return_effect( &mut self, - trans: &mut impl GenKill<Self::Idx>, + trans: &mut Self::Domain, _block: mir::BasicBlock, return_places: CallReturnPlaces<'_, 'tcx>, ) { @@ -617,7 +617,7 @@ impl<'tcx> GenKillAnalysis<'tcx> for DefinitelyInitializedPlaces<'_, 'tcx> { fn call_return_effect( &mut self, - trans: &mut impl GenKill<Self::Idx>, + trans: &mut Self::Domain, _block: mir::BasicBlock, return_places: CallReturnPlaces<'_, 'tcx>, ) { @@ -712,7 +712,7 @@ impl<'tcx> GenKillAnalysis<'tcx> for EverInitializedPlaces<'_, 'tcx> { fn call_return_effect( &mut self, - trans: &mut impl GenKill<Self::Idx>, + trans: &mut Self::Domain, block: mir::BasicBlock, _return_places: CallReturnPlaces<'_, 'tcx>, ) { diff --git a/compiler/rustc_mir_dataflow/src/impls/liveness.rs b/compiler/rustc_mir_dataflow/src/impls/liveness.rs index c3fdca1641a..04bae6ae2fe 100644 --- a/compiler/rustc_mir_dataflow/src/impls/liveness.rs +++ b/compiler/rustc_mir_dataflow/src/impls/liveness.rs @@ -69,7 +69,7 @@ impl<'tcx> GenKillAnalysis<'tcx> for MaybeLiveLocals { fn call_return_effect( &mut self, - trans: &mut impl GenKill<Self::Idx>, + trans: &mut Self::Domain, _block: mir::BasicBlock, return_places: CallReturnPlaces<'_, 'tcx>, ) { diff --git a/compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs b/compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs index 26fc903973f..646c70eb88f 100644 --- a/compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs +++ b/compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs @@ -72,7 +72,7 @@ impl<'tcx, 'a> crate::GenKillAnalysis<'tcx> for MaybeStorageLive<'a> { fn call_return_effect( &mut self, - _trans: &mut impl GenKill<Self::Idx>, + _trans: &mut Self::Domain, _block: BasicBlock, _return_places: CallReturnPlaces<'_, 'tcx>, ) { @@ -144,7 +144,7 @@ impl<'tcx> crate::GenKillAnalysis<'tcx> for MaybeStorageDead { fn call_return_effect( &mut self, - _trans: &mut impl GenKill<Self::Idx>, + _trans: &mut Self::Domain, _block: BasicBlock, _return_places: CallReturnPlaces<'_, 'tcx>, ) { @@ -238,7 +238,7 @@ impl<'tcx> crate::GenKillAnalysis<'tcx> for MaybeRequiresStorage<'_, 'tcx> { fn before_terminator_effect( &mut self, - trans: &mut impl GenKill<Self::Idx>, + trans: &mut Self::Domain, terminator: &Terminator<'tcx>, loc: Location, ) { @@ -334,7 +334,7 @@ impl<'tcx> crate::GenKillAnalysis<'tcx> for MaybeRequiresStorage<'_, 'tcx> { fn call_return_effect( &mut self, - trans: &mut impl GenKill<Self::Idx>, + trans: &mut Self::Domain, _block: BasicBlock, return_places: CallReturnPlaces<'_, 'tcx>, ) { diff --git a/compiler/rustc_mir_transform/Cargo.toml b/compiler/rustc_mir_transform/Cargo.toml index c2ca0a6bcb8..9cc083edb44 100644 --- a/compiler/rustc_mir_transform/Cargo.toml +++ b/compiler/rustc_mir_transform/Cargo.toml @@ -27,8 +27,3 @@ rustc_trait_selection = { path = "../rustc_trait_selection" } smallvec = { version = "1.8.1", features = ["union", "may_dangle"] } tracing = "0.1" # tidy-alphabetical-end - -[dev-dependencies] -# tidy-alphabetical-start -coverage_test_macros = { path = "src/coverage/test_macros" } -# tidy-alphabetical-end diff --git a/compiler/rustc_mir_transform/src/coroutine.rs b/compiler/rustc_mir_transform/src/coroutine.rs index 79a1509531d..737fb6bf612 100644 --- a/compiler/rustc_mir_transform/src/coroutine.rs +++ b/compiler/rustc_mir_transform/src/coroutine.rs @@ -66,9 +66,9 @@ use rustc_index::{Idx, IndexVec}; use rustc_middle::mir::dump_mir; use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor}; use rustc_middle::mir::*; +use rustc_middle::ty::CoroutineArgs; use rustc_middle::ty::InstanceDef; -use rustc_middle::ty::{self, AdtDef, Ty, TyCtxt}; -use rustc_middle::ty::{CoroutineArgs, GenericArgsRef}; +use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_mir_dataflow::impls::{ MaybeBorrowedLocals, MaybeLiveLocals, MaybeRequiresStorage, MaybeStorageLive, }; @@ -225,8 +225,6 @@ struct SuspensionPoint<'tcx> { struct TransformVisitor<'tcx> { tcx: TyCtxt<'tcx>, coroutine_kind: hir::CoroutineKind, - state_adt_ref: AdtDef<'tcx>, - state_args: GenericArgsRef<'tcx>, // The type of the discriminant in the coroutine struct discr_ty: Ty<'tcx>, @@ -245,22 +243,56 @@ struct TransformVisitor<'tcx> { always_live_locals: BitSet<Local>, // The original RETURN_PLACE local - new_ret_local: Local, + old_ret_local: Local, + + old_yield_ty: Ty<'tcx>, + + old_ret_ty: Ty<'tcx>, } impl<'tcx> TransformVisitor<'tcx> { fn insert_none_ret_block(&self, body: &mut Body<'tcx>) -> BasicBlock { let block = BasicBlock::new(body.basic_blocks.len()); - let source_info = SourceInfo::outermost(body.span); - let (kind, idx) = self.coroutine_state_adt_and_variant_idx(true); - assert_eq!(self.state_adt_ref.variant(idx).fields.len(), 0); + let none_value = match self.coroutine_kind { + CoroutineKind::Async(_) => span_bug!(body.span, "`Future`s are not fused inherently"), + CoroutineKind::Coroutine => span_bug!(body.span, "`Coroutine`s cannot be fused"), + // `gen` continues return `None` + CoroutineKind::Gen(_) => { + let option_def_id = self.tcx.require_lang_item(LangItem::Option, None); + Rvalue::Aggregate( + Box::new(AggregateKind::Adt( + option_def_id, + VariantIdx::from_usize(0), + self.tcx.mk_args(&[self.old_yield_ty.into()]), + None, + None, + )), + IndexVec::new(), + ) + } + // `async gen` continues to return `Poll::Ready(None)` + CoroutineKind::AsyncGen(_) => { + let ty::Adt(_poll_adt, args) = *self.old_yield_ty.kind() else { bug!() }; + let ty::Adt(_option_adt, args) = *args.type_at(0).kind() else { bug!() }; + let yield_ty = args.type_at(0); + Rvalue::Use(Operand::Constant(Box::new(ConstOperand { + span: source_info.span, + const_: Const::Unevaluated( + UnevaluatedConst::new( + self.tcx.require_lang_item(LangItem::AsyncGenFinished, None), + self.tcx.mk_args(&[yield_ty.into()]), + ), + self.old_yield_ty, + ), + user_ty: None, + }))) + } + }; + let statements = vec![Statement { - kind: StatementKind::Assign(Box::new(( - Place::return_place(), - Rvalue::Aggregate(Box::new(kind), IndexVec::new()), - ))), + kind: StatementKind::Assign(Box::new((Place::return_place(), none_value))), source_info, }]; @@ -273,23 +305,6 @@ impl<'tcx> TransformVisitor<'tcx> { block } - fn coroutine_state_adt_and_variant_idx( - &self, - is_return: bool, - ) -> (AggregateKind<'tcx>, VariantIdx) { - let idx = VariantIdx::new(match (is_return, self.coroutine_kind) { - (true, hir::CoroutineKind::Coroutine) => 1, // CoroutineState::Complete - (false, hir::CoroutineKind::Coroutine) => 0, // CoroutineState::Yielded - (true, hir::CoroutineKind::Async(_)) => 0, // Poll::Ready - (false, hir::CoroutineKind::Async(_)) => 1, // Poll::Pending - (true, hir::CoroutineKind::Gen(_)) => 0, // Option::None - (false, hir::CoroutineKind::Gen(_)) => 1, // Option::Some - }); - - let kind = AggregateKind::Adt(self.state_adt_ref.did(), idx, self.state_args, None, None); - (kind, idx) - } - // Make a `CoroutineState` or `Poll` variant assignment. // // `core::ops::CoroutineState` only has single element tuple variants, @@ -302,51 +317,119 @@ impl<'tcx> TransformVisitor<'tcx> { is_return: bool, statements: &mut Vec<Statement<'tcx>>, ) { - let (kind, idx) = self.coroutine_state_adt_and_variant_idx(is_return); - - match self.coroutine_kind { - // `Poll::Pending` + let rvalue = match self.coroutine_kind { CoroutineKind::Async(_) => { - if !is_return { - assert_eq!(self.state_adt_ref.variant(idx).fields.len(), 0); - - // FIXME(swatinem): assert that `val` is indeed unit? - statements.push(Statement { - kind: StatementKind::Assign(Box::new(( - Place::return_place(), - Rvalue::Aggregate(Box::new(kind), IndexVec::new()), - ))), - source_info, - }); - return; + let poll_def_id = self.tcx.require_lang_item(LangItem::Poll, None); + let args = self.tcx.mk_args(&[self.old_ret_ty.into()]); + if is_return { + // Poll::Ready(val) + Rvalue::Aggregate( + Box::new(AggregateKind::Adt( + poll_def_id, + VariantIdx::from_usize(0), + args, + None, + None, + )), + IndexVec::from_raw(vec![val]), + ) + } else { + // Poll::Pending + Rvalue::Aggregate( + Box::new(AggregateKind::Adt( + poll_def_id, + VariantIdx::from_usize(1), + args, + None, + None, + )), + IndexVec::new(), + ) } } - // `Option::None` CoroutineKind::Gen(_) => { + let option_def_id = self.tcx.require_lang_item(LangItem::Option, None); + let args = self.tcx.mk_args(&[self.old_yield_ty.into()]); if is_return { - assert_eq!(self.state_adt_ref.variant(idx).fields.len(), 0); - - statements.push(Statement { - kind: StatementKind::Assign(Box::new(( - Place::return_place(), - Rvalue::Aggregate(Box::new(kind), IndexVec::new()), - ))), - source_info, - }); - return; + // None + Rvalue::Aggregate( + Box::new(AggregateKind::Adt( + option_def_id, + VariantIdx::from_usize(0), + args, + None, + None, + )), + IndexVec::new(), + ) + } else { + // Some(val) + Rvalue::Aggregate( + Box::new(AggregateKind::Adt( + option_def_id, + VariantIdx::from_usize(1), + args, + None, + None, + )), + IndexVec::from_raw(vec![val]), + ) } } - CoroutineKind::Coroutine => {} - } - - // else: `Poll::Ready(x)`, `CoroutineState::Yielded(x)`, `CoroutineState::Complete(x)`, or `Option::Some(x)` - assert_eq!(self.state_adt_ref.variant(idx).fields.len(), 1); + CoroutineKind::AsyncGen(_) => { + if is_return { + let ty::Adt(_poll_adt, args) = *self.old_yield_ty.kind() else { bug!() }; + let ty::Adt(_option_adt, args) = *args.type_at(0).kind() else { bug!() }; + let yield_ty = args.type_at(0); + Rvalue::Use(Operand::Constant(Box::new(ConstOperand { + span: source_info.span, + const_: Const::Unevaluated( + UnevaluatedConst::new( + self.tcx.require_lang_item(LangItem::AsyncGenFinished, None), + self.tcx.mk_args(&[yield_ty.into()]), + ), + self.old_yield_ty, + ), + user_ty: None, + }))) + } else { + Rvalue::Use(val) + } + } + CoroutineKind::Coroutine => { + let coroutine_state_def_id = + self.tcx.require_lang_item(LangItem::CoroutineState, None); + let args = self.tcx.mk_args(&[self.old_yield_ty.into(), self.old_ret_ty.into()]); + if is_return { + // CoroutineState::Complete(val) + Rvalue::Aggregate( + Box::new(AggregateKind::Adt( + coroutine_state_def_id, + VariantIdx::from_usize(1), + args, + None, + None, + )), + IndexVec::from_raw(vec![val]), + ) + } else { + // CoroutineState::Yielded(val) + Rvalue::Aggregate( + Box::new(AggregateKind::Adt( + coroutine_state_def_id, + VariantIdx::from_usize(0), + args, + None, + None, + )), + IndexVec::from_raw(vec![val]), + ) + } + } + }; statements.push(Statement { - kind: StatementKind::Assign(Box::new(( - Place::return_place(), - Rvalue::Aggregate(Box::new(kind), [val].into()), - ))), + kind: StatementKind::Assign(Box::new((Place::return_place(), rvalue))), source_info, }); } @@ -420,7 +503,7 @@ impl<'tcx> MutVisitor<'tcx> for TransformVisitor<'tcx> { let ret_val = match data.terminator().kind { TerminatorKind::Return => { - Some((true, None, Operand::Move(Place::from(self.new_ret_local)), None)) + Some((true, None, Operand::Move(Place::from(self.old_ret_local)), None)) } TerminatorKind::Yield { ref value, resume, resume_arg, drop } => { Some((false, Some((resume, resume_arg)), value.clone(), drop)) @@ -1334,7 +1417,9 @@ fn create_coroutine_resume_function<'tcx>( CoroutineKind::Async(_) | CoroutineKind::Coroutine => { insert_panic_block(tcx, body, ResumedAfterReturn(coroutine_kind)) } - CoroutineKind::Gen(_) => transform.insert_none_ret_block(body), + CoroutineKind::AsyncGen(_) | CoroutineKind::Gen(_) => { + transform.insert_none_ret_block(body) + } }; cases.insert(1, (RETURNED, block)); } @@ -1493,10 +1578,11 @@ pub(crate) fn mir_coroutine_witnesses<'tcx>( impl<'tcx> MirPass<'tcx> for StateTransform { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - let Some(yield_ty) = body.yield_ty() else { + let Some(old_yield_ty) = body.yield_ty() else { // This only applies to coroutines return; }; + let old_ret_ty = body.return_ty(); assert!(body.coroutine_drop().is_none()); @@ -1519,38 +1605,42 @@ impl<'tcx> MirPass<'tcx> for StateTransform { }; let is_async_kind = matches!(body.coroutine_kind(), Some(CoroutineKind::Async(_))); + let is_async_gen_kind = matches!(body.coroutine_kind(), Some(CoroutineKind::AsyncGen(_))); let is_gen_kind = matches!(body.coroutine_kind(), Some(CoroutineKind::Gen(_))); - let (state_adt_ref, state_args) = match body.coroutine_kind().unwrap() { + let new_ret_ty = match body.coroutine_kind().unwrap() { CoroutineKind::Async(_) => { // Compute Poll<return_ty> let poll_did = tcx.require_lang_item(LangItem::Poll, None); let poll_adt_ref = tcx.adt_def(poll_did); - let poll_args = tcx.mk_args(&[body.return_ty().into()]); - (poll_adt_ref, poll_args) + let poll_args = tcx.mk_args(&[old_ret_ty.into()]); + Ty::new_adt(tcx, poll_adt_ref, poll_args) } CoroutineKind::Gen(_) => { // Compute Option<yield_ty> let option_did = tcx.require_lang_item(LangItem::Option, None); let option_adt_ref = tcx.adt_def(option_did); - let option_args = tcx.mk_args(&[body.yield_ty().unwrap().into()]); - (option_adt_ref, option_args) + let option_args = tcx.mk_args(&[old_yield_ty.into()]); + Ty::new_adt(tcx, option_adt_ref, option_args) + } + CoroutineKind::AsyncGen(_) => { + // The yield ty is already `Poll<Option<yield_ty>>` + old_yield_ty } CoroutineKind::Coroutine => { // Compute CoroutineState<yield_ty, return_ty> let state_did = tcx.require_lang_item(LangItem::CoroutineState, None); let state_adt_ref = tcx.adt_def(state_did); - let state_args = tcx.mk_args(&[yield_ty.into(), body.return_ty().into()]); - (state_adt_ref, state_args) + let state_args = tcx.mk_args(&[old_yield_ty.into(), old_ret_ty.into()]); + Ty::new_adt(tcx, state_adt_ref, state_args) } }; - let ret_ty = Ty::new_adt(tcx, state_adt_ref, state_args); - // We rename RETURN_PLACE which has type mir.return_ty to new_ret_local + // We rename RETURN_PLACE which has type mir.return_ty to old_ret_local // RETURN_PLACE then is a fresh unused local with type ret_ty. - let new_ret_local = replace_local(RETURN_PLACE, ret_ty, body, tcx); + let old_ret_local = replace_local(RETURN_PLACE, new_ret_ty, body, tcx); // Replace all occurrences of `ResumeTy` with `&mut Context<'_>` within async bodies. - if is_async_kind { + if is_async_kind || is_async_gen_kind { transform_async_context(tcx, body); } @@ -1564,9 +1654,10 @@ impl<'tcx> MirPass<'tcx> for StateTransform { } else { body.local_decls[resume_local].ty }; - let new_resume_local = replace_local(resume_local, resume_ty, body, tcx); + let old_resume_local = replace_local(resume_local, resume_ty, body, tcx); - // When first entering the coroutine, move the resume argument into its new local. + // When first entering the coroutine, move the resume argument into its old local + // (which is now a generator interior). let source_info = SourceInfo::outermost(body.span); let stmts = &mut body.basic_blocks_mut()[START_BLOCK].statements; stmts.insert( @@ -1574,7 +1665,7 @@ impl<'tcx> MirPass<'tcx> for StateTransform { Statement { source_info, kind: StatementKind::Assign(Box::new(( - new_resume_local.into(), + old_resume_local.into(), Rvalue::Use(Operand::Move(resume_local.into())), ))), }, @@ -1610,14 +1701,14 @@ impl<'tcx> MirPass<'tcx> for StateTransform { let mut transform = TransformVisitor { tcx, coroutine_kind: body.coroutine_kind().unwrap(), - state_adt_ref, - state_args, remap, storage_liveness, always_live_locals, suspension_points: Vec::new(), - new_ret_local, + old_ret_local, discr_ty, + old_ret_ty, + old_yield_ty, }; transform.visit_body(body); diff --git a/compiler/rustc_mir_transform/src/coverage/spans.rs b/compiler/rustc_mir_transform/src/coverage/spans.rs index 4db0a1db166..05ad14f1525 100644 --- a/compiler/rustc_mir_transform/src/coverage/spans.rs +++ b/compiler/rustc_mir_transform/src/coverage/spans.rs @@ -89,10 +89,10 @@ impl CoverageSpan { } } - pub fn merge_from(&mut self, mut other: CoverageSpan) { - debug_assert!(self.is_mergeable(&other)); + pub fn merge_from(&mut self, other: &Self) { + debug_assert!(self.is_mergeable(other)); self.span = self.span.to(other.span); - self.merged_spans.append(&mut other.merged_spans); + self.merged_spans.extend_from_slice(&other.merged_spans); } pub fn cutoff_statements_at(&mut self, cutoff_pos: BytePos) { @@ -267,7 +267,7 @@ impl<'a> CoverageSpansGenerator<'a> { if curr.is_mergeable(prev) { debug!(" same bcb (and neither is a closure), merge with prev={prev:?}"); let prev = self.take_prev(); - self.curr_mut().merge_from(prev); + self.curr_mut().merge_from(&prev); self.maybe_push_macro_name_span(); // Note that curr.span may now differ from curr_original_span } else if prev.span.hi() <= curr.span.lo() { @@ -275,7 +275,7 @@ impl<'a> CoverageSpansGenerator<'a> { " different bcbs and disjoint spans, so keep curr for next iter, and add prev={prev:?}", ); let prev = self.take_prev(); - self.push_refined_span(prev); + self.refined_spans.push(prev); self.maybe_push_macro_name_span(); } else if prev.is_closure { // drop any equal or overlapping span (`curr`) and keep `prev` to test again in the @@ -319,33 +319,30 @@ impl<'a> CoverageSpansGenerator<'a> { } } - let prev = self.take_prev(); - debug!(" AT END, adding last prev={prev:?}"); - - // Take `pending_dups` so that we can drain it while calling self methods. - // It is never used as a field after this point. - for dup in std::mem::take(&mut self.pending_dups) { + // Drain any remaining dups into the output. + for dup in self.pending_dups.drain(..) { debug!(" ...adding at least one pending dup={:?}", dup); - self.push_refined_span(dup); + self.refined_spans.push(dup); } - // Async functions wrap a closure that implements the body to be executed. The enclosing - // function is called and returns an `impl Future` without initially executing any of the - // body. To avoid showing the return from the enclosing function as a "covered" return from - // the closure, the enclosing function's `TerminatorKind::Return`s `CoverageSpan` is - // excluded. The closure's `Return` is the only one that will be counted. This provides - // adequate coverage, and more intuitive counts. (Avoids double-counting the closing brace - // of the function body.) - let body_ends_with_closure = if let Some(last_covspan) = self.refined_spans.last() { - last_covspan.is_closure && last_covspan.span.hi() == self.body_span.hi() - } else { - false - }; - - if !body_ends_with_closure { - self.push_refined_span(prev); + // There is usually a final span remaining in `prev` after the loop ends, + // so add it to the output as well. + if let Some(prev) = self.some_prev.take() { + debug!(" AT END, adding last prev={prev:?}"); + self.refined_spans.push(prev); } + // Do one last merge pass, to simplify the output. + self.refined_spans.dedup_by(|b, a| { + if a.is_mergeable(b) { + debug!(?a, ?b, "merging list-adjacent refined spans"); + a.merge_from(b); + true + } else { + false + } + }); + // Remove `CoverageSpan`s derived from closures, originally added to ensure the coverage // regions for the current function leave room for the closure's own coverage regions // (injected separately, from the closure's own MIR). @@ -353,18 +350,6 @@ impl<'a> CoverageSpansGenerator<'a> { self.refined_spans } - fn push_refined_span(&mut self, covspan: CoverageSpan) { - if let Some(last) = self.refined_spans.last_mut() - && last.is_mergeable(&covspan) - { - // Instead of pushing the new span, merge it with the last refined span. - debug!(?last, ?covspan, "merging new refined span with last refined span"); - last.merge_from(covspan); - } else { - self.refined_spans.push(covspan); - } - } - /// If `curr` is part of a new macro expansion, carve out and push a separate /// span that ends just after the macro name and its subsequent `!`. fn maybe_push_macro_name_span(&mut self) { @@ -397,41 +382,39 @@ impl<'a> CoverageSpansGenerator<'a> { " and curr starts a new macro expansion, so add a new span just for \ the macro `{visible_macro}!`, new span={macro_name_cov:?}", ); - self.push_refined_span(macro_name_cov); + self.refined_spans.push(macro_name_cov); } + #[track_caller] fn curr(&self) -> &CoverageSpan { - self.some_curr - .as_ref() - .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_curr")) + self.some_curr.as_ref().unwrap_or_else(|| bug!("some_curr is None (curr)")) } + #[track_caller] fn curr_mut(&mut self) -> &mut CoverageSpan { - self.some_curr - .as_mut() - .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_curr")) + self.some_curr.as_mut().unwrap_or_else(|| bug!("some_curr is None (curr_mut)")) } /// If called, then the next call to `next_coverage_span()` will *not* update `prev` with the /// `curr` coverage span. + #[track_caller] fn take_curr(&mut self) -> CoverageSpan { - self.some_curr.take().unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_curr")) + self.some_curr.take().unwrap_or_else(|| bug!("some_curr is None (take_curr)")) } + #[track_caller] fn prev(&self) -> &CoverageSpan { - self.some_prev - .as_ref() - .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_prev")) + self.some_prev.as_ref().unwrap_or_else(|| bug!("some_prev is None (prev)")) } + #[track_caller] fn prev_mut(&mut self) -> &mut CoverageSpan { - self.some_prev - .as_mut() - .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_prev")) + self.some_prev.as_mut().unwrap_or_else(|| bug!("some_prev is None (prev_mut)")) } + #[track_caller] fn take_prev(&mut self) -> CoverageSpan { - self.some_prev.take().unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_prev")) + self.some_prev.take().unwrap_or_else(|| bug!("some_prev is None (take_prev)")) } /// If there are `pending_dups` but `prev` is not a matching dup (`prev.span` doesn't match the @@ -454,19 +437,14 @@ impl<'a> CoverageSpansGenerator<'a> { previous iteration, or prev started a new disjoint span" ); if last_dup.span.hi() <= self.curr().span.lo() { - // Temporarily steal `pending_dups` into a local, so that we can - // drain it while calling other self methods. - let mut pending_dups = std::mem::take(&mut self.pending_dups); - for dup in pending_dups.drain(..) { + for dup in self.pending_dups.drain(..) { debug!(" ...adding at least one pending={:?}", dup); - self.push_refined_span(dup); + self.refined_spans.push(dup); } - // The list of dups is now empty, but we can recycle its capacity. - assert!(pending_dups.is_empty() && self.pending_dups.is_empty()); - self.pending_dups = pending_dups; } else { self.pending_dups.clear(); } + assert!(self.pending_dups.is_empty()); } /// Advance `prev` to `curr` (if any), and `curr` to the next `CoverageSpan` in sorted order. @@ -513,22 +491,18 @@ impl<'a> CoverageSpansGenerator<'a> { let has_pre_closure_span = prev.span.lo() < right_cutoff; let has_post_closure_span = prev.span.hi() > right_cutoff; - // Temporarily steal `pending_dups` into a local, so that we can - // mutate and/or drain it while calling other self methods. - let mut pending_dups = std::mem::take(&mut self.pending_dups); - if has_pre_closure_span { let mut pre_closure = self.prev().clone(); pre_closure.span = pre_closure.span.with_hi(left_cutoff); debug!(" prev overlaps a closure. Adding span for pre_closure={:?}", pre_closure); - if !pending_dups.is_empty() { - for mut dup in pending_dups.iter().cloned() { - dup.span = dup.span.with_hi(left_cutoff); - debug!(" ...and at least one pre_closure dup={:?}", dup); - self.push_refined_span(dup); - } + + for mut dup in self.pending_dups.iter().cloned() { + dup.span = dup.span.with_hi(left_cutoff); + debug!(" ...and at least one pre_closure dup={:?}", dup); + self.refined_spans.push(dup); } - self.push_refined_span(pre_closure); + + self.refined_spans.push(pre_closure); } if has_post_closure_span { @@ -537,19 +511,17 @@ impl<'a> CoverageSpansGenerator<'a> { // about how the `CoverageSpan`s are ordered.) self.prev_mut().span = self.prev().span.with_lo(right_cutoff); debug!(" Mutated prev.span to start after the closure. prev={:?}", self.prev()); - for dup in pending_dups.iter_mut() { + + for dup in &mut self.pending_dups { debug!(" ...and at least one overlapping dup={:?}", dup); dup.span = dup.span.with_lo(right_cutoff); } + let closure_covspan = self.take_curr(); // Prevent this curr from becoming prev. - self.push_refined_span(closure_covspan); // since self.prev() was already updated + self.refined_spans.push(closure_covspan); // since self.prev() was already updated } else { - pending_dups.clear(); + self.pending_dups.clear(); } - - // Restore the modified post-closure spans, or the empty vector's capacity. - assert!(self.pending_dups.is_empty()); - self.pending_dups = pending_dups; } /// Called if `curr.span` equals `prev_original_span` (and potentially equal to all @@ -645,7 +617,7 @@ impl<'a> CoverageSpansGenerator<'a> { } else { debug!(" ... adding modified prev={:?}", self.prev()); let prev = self.take_prev(); - self.push_refined_span(prev); + self.refined_spans.push(prev); } } else { // with `pending_dups`, `prev` cannot have any statements that don't overlap diff --git a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs index e1531f2c239..eab9a9c98f8 100644 --- a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs +++ b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs @@ -44,6 +44,16 @@ pub(super) fn mir_to_initial_sorted_coverage_spans( .then_with(|| Ord::cmp(&a.is_closure, &b.is_closure).reverse()) }); + // The desugaring of an async function includes a closure containing the + // original function body, and a terminator that returns the `impl Future`. + // That terminator will cause a confusing coverage count for the function's + // closing brace, so discard everything after the body closure span. + if let Some(body_closure_index) = + initial_spans.iter().rposition(|covspan| covspan.is_closure && covspan.span == body_span) + { + initial_spans.truncate(body_closure_index + 1); + } + initial_spans } @@ -92,13 +102,13 @@ fn is_closure(statement: &Statement<'_>) -> bool { /// If the MIR `Statement` has a span contributive to computing coverage spans, /// return it; otherwise return `None`. fn filtered_statement_span(statement: &Statement<'_>) -> Option<Span> { + use mir::coverage::CoverageKind; + match statement.kind { // These statements have spans that are often outside the scope of the executed source code // for their parent `BasicBlock`. StatementKind::StorageLive(_) | StatementKind::StorageDead(_) - // Coverage should not be encountered, but don't inject coverage coverage - | StatementKind::Coverage(_) // Ignore `ConstEvalCounter`s | StatementKind::ConstEvalCounter // Ignore `Nop`s @@ -122,9 +132,13 @@ fn filtered_statement_span(statement: &Statement<'_>) -> Option<Span> { // If and when the Issue is resolved, remove this special case match pattern: StatementKind::FakeRead(box (FakeReadCause::ForGuardBinding, _)) => None, - // Retain spans from all other statements + // Retain spans from most other statements. StatementKind::FakeRead(box (_, _)) // Not including `ForGuardBinding` | StatementKind::Intrinsic(..) + | StatementKind::Coverage(box mir::Coverage { + // The purpose of `SpanMarker` is to be matched and accepted here. + kind: CoverageKind::SpanMarker + }) | StatementKind::Assign(_) | StatementKind::SetDiscriminant { .. } | StatementKind::Deinit(..) @@ -133,6 +147,11 @@ fn filtered_statement_span(statement: &Statement<'_>) -> Option<Span> { | StatementKind::AscribeUserType(_, _) => { Some(statement.source_info.span) } + + StatementKind::Coverage(box mir::Coverage { + // These coverage statements should not exist prior to coverage instrumentation. + kind: CoverageKind::CounterIncrement { .. } | CoverageKind::ExpressionUsed { .. } + }) => bug!("Unexpected coverage statement found during coverage instrumentation: {statement:?}"), } } diff --git a/compiler/rustc_mir_transform/src/coverage/test_macros/Cargo.toml b/compiler/rustc_mir_transform/src/coverage/test_macros/Cargo.toml deleted file mode 100644 index f753caa9124..00000000000 --- a/compiler/rustc_mir_transform/src/coverage/test_macros/Cargo.toml +++ /dev/null @@ -1,7 +0,0 @@ -[package] -name = "coverage_test_macros" -version = "0.0.0" -edition = "2021" - -[lib] -proc-macro = true diff --git a/compiler/rustc_mir_transform/src/coverage/test_macros/src/lib.rs b/compiler/rustc_mir_transform/src/coverage/test_macros/src/lib.rs deleted file mode 100644 index f41adf667ec..00000000000 --- a/compiler/rustc_mir_transform/src/coverage/test_macros/src/lib.rs +++ /dev/null @@ -1,6 +0,0 @@ -use proc_macro::TokenStream; - -#[proc_macro] -pub fn let_bcb(item: TokenStream) -> TokenStream { - format!("let bcb{item} = graph::BasicCoverageBlock::from_usize({item});").parse().unwrap() -} diff --git a/compiler/rustc_mir_transform/src/coverage/tests.rs b/compiler/rustc_mir_transform/src/coverage/tests.rs index 702fe5f563e..302cbf05d78 100644 --- a/compiler/rustc_mir_transform/src/coverage/tests.rs +++ b/compiler/rustc_mir_transform/src/coverage/tests.rs @@ -27,8 +27,6 @@ use super::counters; use super::graph::{self, BasicCoverageBlock}; -use coverage_test_macros::let_bcb; - use itertools::Itertools; use rustc_data_structures::graph::WithNumNodes; use rustc_data_structures::graph::WithSuccessors; @@ -37,6 +35,10 @@ use rustc_middle::mir::*; use rustc_middle::ty; use rustc_span::{self, BytePos, Pos, Span, DUMMY_SP}; +fn bcb(index: u32) -> BasicCoverageBlock { + BasicCoverageBlock::from_u32(index) +} + // All `TEMP_BLOCK` targets should be replaced before calling `to_body() -> mir::Body`. const TEMP_BLOCK: BasicBlock = BasicBlock::MAX; @@ -300,12 +302,15 @@ fn goto_switchint<'a>() -> Body<'a> { mir_body } -macro_rules! assert_successors { - ($basic_coverage_blocks:ident, $i:ident, [$($successor:ident),*]) => { - let mut successors = $basic_coverage_blocks.successors[$i].clone(); - successors.sort_unstable(); - assert_eq!(successors, vec![$($successor),*]); - } +#[track_caller] +fn assert_successors( + basic_coverage_blocks: &graph::CoverageGraph, + bcb: BasicCoverageBlock, + expected_successors: &[BasicCoverageBlock], +) { + let mut successors = basic_coverage_blocks.successors[bcb].clone(); + successors.sort_unstable(); + assert_eq!(successors, expected_successors); } #[test] @@ -334,13 +339,9 @@ fn test_covgraph_goto_switchint() { basic_coverage_blocks.iter_enumerated().collect::<Vec<_>>() ); - let_bcb!(0); - let_bcb!(1); - let_bcb!(2); - - assert_successors!(basic_coverage_blocks, bcb0, [bcb1, bcb2]); - assert_successors!(basic_coverage_blocks, bcb1, []); - assert_successors!(basic_coverage_blocks, bcb2, []); + assert_successors(&basic_coverage_blocks, bcb(0), &[bcb(1), bcb(2)]); + assert_successors(&basic_coverage_blocks, bcb(1), &[]); + assert_successors(&basic_coverage_blocks, bcb(2), &[]); } /// Create a mock `Body` with a loop. @@ -418,15 +419,10 @@ fn test_covgraph_switchint_then_loop_else_return() { basic_coverage_blocks.iter_enumerated().collect::<Vec<_>>() ); - let_bcb!(0); - let_bcb!(1); - let_bcb!(2); - let_bcb!(3); - - assert_successors!(basic_coverage_blocks, bcb0, [bcb1]); - assert_successors!(basic_coverage_blocks, bcb1, [bcb2, bcb3]); - assert_successors!(basic_coverage_blocks, bcb2, []); - assert_successors!(basic_coverage_blocks, bcb3, [bcb1]); + assert_successors(&basic_coverage_blocks, bcb(0), &[bcb(1)]); + assert_successors(&basic_coverage_blocks, bcb(1), &[bcb(2), bcb(3)]); + assert_successors(&basic_coverage_blocks, bcb(2), &[]); + assert_successors(&basic_coverage_blocks, bcb(3), &[bcb(1)]); } /// Create a mock `Body` with nested loops. @@ -546,21 +542,13 @@ fn test_covgraph_switchint_loop_then_inner_loop_else_break() { basic_coverage_blocks.iter_enumerated().collect::<Vec<_>>() ); - let_bcb!(0); - let_bcb!(1); - let_bcb!(2); - let_bcb!(3); - let_bcb!(4); - let_bcb!(5); - let_bcb!(6); - - assert_successors!(basic_coverage_blocks, bcb0, [bcb1]); - assert_successors!(basic_coverage_blocks, bcb1, [bcb2, bcb3]); - assert_successors!(basic_coverage_blocks, bcb2, []); - assert_successors!(basic_coverage_blocks, bcb3, [bcb4]); - assert_successors!(basic_coverage_blocks, bcb4, [bcb5, bcb6]); - assert_successors!(basic_coverage_blocks, bcb5, [bcb1]); - assert_successors!(basic_coverage_blocks, bcb6, [bcb4]); + assert_successors(&basic_coverage_blocks, bcb(0), &[bcb(1)]); + assert_successors(&basic_coverage_blocks, bcb(1), &[bcb(2), bcb(3)]); + assert_successors(&basic_coverage_blocks, bcb(2), &[]); + assert_successors(&basic_coverage_blocks, bcb(3), &[bcb(4)]); + assert_successors(&basic_coverage_blocks, bcb(4), &[bcb(5), bcb(6)]); + assert_successors(&basic_coverage_blocks, bcb(5), &[bcb(1)]); + assert_successors(&basic_coverage_blocks, bcb(6), &[bcb(4)]); } #[test] @@ -595,10 +583,7 @@ fn test_find_loop_backedges_one() { backedges ); - let_bcb!(1); - let_bcb!(3); - - assert_eq!(backedges[bcb1], vec![bcb3]); + assert_eq!(backedges[bcb(1)], &[bcb(3)]); } #[test] @@ -613,13 +598,8 @@ fn test_find_loop_backedges_two() { backedges ); - let_bcb!(1); - let_bcb!(4); - let_bcb!(5); - let_bcb!(6); - - assert_eq!(backedges[bcb1], vec![bcb5]); - assert_eq!(backedges[bcb4], vec![bcb6]); + assert_eq!(backedges[bcb(1)], &[bcb(5)]); + assert_eq!(backedges[bcb(4)], &[bcb(6)]); } #[test] @@ -632,13 +612,11 @@ fn test_traverse_coverage_with_loops() { traversed_in_order.push(bcb); } - let_bcb!(6); - // bcb0 is visited first. Then bcb1 starts the first loop, and all remaining nodes, *except* // bcb6 are inside the first loop. assert_eq!( *traversed_in_order.last().expect("should have elements"), - bcb6, + bcb(6), "bcb6 should not be visited until all nodes inside the first loop have been visited" ); } @@ -656,20 +634,18 @@ fn test_make_bcb_counters() { coverage_counters.make_bcb_counters(&basic_coverage_blocks, bcb_has_coverage_spans); assert_eq!(coverage_counters.num_expressions(), 0); - let_bcb!(1); assert_eq!( 0, // bcb1 has a `Counter` with id = 0 - match coverage_counters.bcb_counter(bcb1).expect("should have a counter") { + match coverage_counters.bcb_counter(bcb(1)).expect("should have a counter") { counters::BcbCounter::Counter { id, .. } => id, _ => panic!("expected a Counter"), } .as_u32() ); - let_bcb!(2); assert_eq!( 1, // bcb2 has a `Counter` with id = 1 - match coverage_counters.bcb_counter(bcb2).expect("should have a counter") { + match coverage_counters.bcb_counter(bcb(2)).expect("should have a counter") { counters::BcbCounter::Counter { id, .. } => id, _ => panic!("expected a Counter"), } diff --git a/compiler/rustc_monomorphize/src/collector.rs b/compiler/rustc_monomorphize/src/collector.rs index 3a7cc405ca7..71b3754fac8 100644 --- a/compiler/rustc_monomorphize/src/collector.rs +++ b/compiler/rustc_monomorphize/src/collector.rs @@ -844,6 +844,9 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirUsedCollector<'a, 'tcx> { mir::TerminatorKind::Assert { ref msg, .. } => { let lang_item = match &**msg { mir::AssertKind::BoundsCheck { .. } => LangItem::PanicBoundsCheck, + mir::AssertKind::MisalignedPointerDereference { .. } => { + LangItem::PanicMisalignedPointerDereference + } _ => LangItem::Panic, }; push_mono_lang_item(self, lang_item); diff --git a/compiler/rustc_next_trait_solver/Cargo.toml b/compiler/rustc_next_trait_solver/Cargo.toml new file mode 100644 index 00000000000..9d496fd8e81 --- /dev/null +++ b/compiler/rustc_next_trait_solver/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "rustc_next_trait_solver" +version = "0.0.0" +edition = "2021" + +[dependencies] +rustc_type_ir = { path = "../rustc_type_ir", default-features = false } + +[features] +default = ["nightly"] +nightly = [ + "rustc_type_ir/nightly", +] \ No newline at end of file diff --git a/compiler/rustc_trait_selection/src/solve/canonicalize.rs b/compiler/rustc_next_trait_solver/src/canonicalizer.rs index 004dc45263c..cb1f328577d 100644 --- a/compiler/rustc_trait_selection/src/solve/canonicalize.rs +++ b/compiler/rustc_next_trait_solver/src/canonicalizer.rs @@ -1,17 +1,10 @@ use std::cmp::Ordering; -use crate::infer::InferCtxt; -use rustc_middle::infer::canonical::Canonical; -use rustc_middle::infer::canonical::CanonicalTyVarKind; -use rustc_middle::infer::canonical::CanonicalVarInfo; -use rustc_middle::infer::canonical::CanonicalVarInfos; -use rustc_middle::infer::canonical::CanonicalVarKind; -use rustc_middle::ty::BoundRegionKind::BrAnon; -use rustc_middle::ty::BoundTyKind; -use rustc_middle::ty::TyCtxt; -use rustc_middle::ty::TypeVisitableExt; -use rustc_middle::ty::{self, Ty}; -use rustc_middle::ty::{TypeFoldable, TypeFolder, TypeSuperFoldable}; +use rustc_type_ir::fold::{TypeFoldable, TypeFolder, TypeSuperFoldable}; +use rustc_type_ir::{ + self as ty, Canonical, CanonicalTyVarKind, CanonicalVarInfo, CanonicalVarKind, ConstTy, + InferCtxtLike, Interner, IntoKind, PlaceholderLike, +}; /// Whether we're canonicalizing a query input or the query response. /// @@ -42,23 +35,22 @@ pub enum CanonicalizeMode { }, } -pub struct Canonicalizer<'a, 'tcx> { - infcx: &'a InferCtxt<'tcx>, +pub struct Canonicalizer<'a, Infcx: InferCtxtLike<Interner = I>, I: Interner> { + infcx: &'a Infcx, canonicalize_mode: CanonicalizeMode, - variables: &'a mut Vec<ty::GenericArg<'tcx>>, - primitive_var_infos: Vec<CanonicalVarInfo<'tcx>>, + variables: &'a mut Vec<I::GenericArg>, + primitive_var_infos: Vec<CanonicalVarInfo<I>>, binder_index: ty::DebruijnIndex, } -impl<'a, 'tcx> Canonicalizer<'a, 'tcx> { - #[instrument(level = "debug", skip(infcx), ret)] - pub fn canonicalize<T: TypeFoldable<TyCtxt<'tcx>>>( - infcx: &'a InferCtxt<'tcx>, +impl<'a, Infcx: InferCtxtLike<Interner = I>, I: Interner> Canonicalizer<'a, Infcx, I> { + pub fn canonicalize<T: TypeFoldable<I>>( + infcx: &'a Infcx, canonicalize_mode: CanonicalizeMode, - variables: &'a mut Vec<ty::GenericArg<'tcx>>, + variables: &'a mut Vec<I::GenericArg>, value: T, - ) -> Canonical<'tcx, T> { + ) -> ty::Canonical<I, T> { let mut canonicalizer = Canonicalizer { infcx, canonicalize_mode, @@ -69,15 +61,16 @@ impl<'a, 'tcx> Canonicalizer<'a, 'tcx> { }; let value = value.fold_with(&mut canonicalizer); - assert!(!value.has_infer()); - assert!(!value.has_placeholders()); + // FIXME: Restore these assertions. Should we uplift type flags? + // assert!(!value.has_infer(), "unexpected infer in {value:?}"); + // assert!(!value.has_placeholders(), "unexpected placeholders in {value:?}"); let (max_universe, variables) = canonicalizer.finalize(); Canonical { max_universe, variables, value } } - fn finalize(self) -> (ty::UniverseIndex, CanonicalVarInfos<'tcx>) { + fn finalize(self) -> (ty::UniverseIndex, I::CanonicalVars) { let mut var_infos = self.primitive_var_infos; // See the rustc-dev-guide section about how we deal with universes // during canonicalization in the new solver. @@ -105,7 +98,7 @@ impl<'a, 'tcx> Canonicalizer<'a, 'tcx> { .max() .unwrap_or(ty::UniverseIndex::ROOT); - let var_infos = self.infcx.tcx.mk_canonical_var_infos(&var_infos); + let var_infos = self.infcx.interner().mk_canonical_var_infos(&var_infos); return (max_universe, var_infos); } } @@ -131,7 +124,7 @@ impl<'a, 'tcx> Canonicalizer<'a, 'tcx> { let mut existential_in_new_uv = false; let mut next_orig_uv = Some(ty::UniverseIndex::ROOT); while let Some(orig_uv) = next_orig_uv.take() { - let mut update_uv = |var: &mut CanonicalVarInfo<'tcx>, orig_uv, is_existential| { + let mut update_uv = |var: &mut CanonicalVarInfo<I>, orig_uv, is_existential| { let uv = var.universe(); match uv.cmp(&orig_uv) { Ordering::Less => (), // Already updated @@ -187,19 +180,22 @@ impl<'a, 'tcx> Canonicalizer<'a, 'tcx> { } } - let var_infos = self.infcx.tcx.mk_canonical_var_infos(&var_infos); + let var_infos = self.infcx.interner().mk_canonical_var_infos(&var_infos); (curr_compressed_uv, var_infos) } } -impl<'tcx> TypeFolder<TyCtxt<'tcx>> for Canonicalizer<'_, 'tcx> { - fn interner(&self) -> TyCtxt<'tcx> { - self.infcx.tcx +impl<Infcx: InferCtxtLike<Interner = I>, I: Interner> TypeFolder<I> + for Canonicalizer<'_, Infcx, I> +{ + fn interner(&self) -> I { + self.infcx.interner() } - fn fold_binder<T>(&mut self, t: ty::Binder<'tcx, T>) -> ty::Binder<'tcx, T> + fn fold_binder<T>(&mut self, t: I::Binder<T>) -> I::Binder<T> where - T: TypeFoldable<TyCtxt<'tcx>>, + T: TypeFoldable<I>, + I::Binder<T>: TypeSuperFoldable<I>, { self.binder_index.shift_in(1); let t = t.super_fold_with(self); @@ -207,21 +203,8 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for Canonicalizer<'_, 'tcx> { t } - fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> { - if let ty::ReVar(vid) = *r { - let resolved_region = self - .infcx - .inner - .borrow_mut() - .unwrap_region_constraints() - .opportunistic_resolve_var(self.infcx.tcx, vid); - assert_eq!( - r, resolved_region, - "region var should have been resolved, {r} -> {resolved_region}" - ); - } - - let kind = match *r { + fn fold_region(&mut self, r: I::Region) -> I::Region { + let kind = match r.kind() { ty::ReBound(..) => return r, // We may encounter `ReStatic` in item signatures or the hidden type @@ -237,9 +220,11 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for Canonicalizer<'_, 'tcx> { CanonicalizeMode::Response { .. } => return r, }, - ty::ReLateParam(_) | ty::ReEarlyParam(_) => match self.canonicalize_mode { + ty::ReEarlyParam(_) | ty::ReLateParam(_) => match self.canonicalize_mode { CanonicalizeMode::Input => CanonicalVarKind::Region(ty::UniverseIndex::ROOT), - CanonicalizeMode::Response { .. } => bug!("unexpected region in response: {r:?}"), + CanonicalizeMode::Response { .. } => { + panic!("unexpected region in response: {r:?}") + } }, ty::RePlaceholder(placeholder) => match self.canonicalize_mode { @@ -248,20 +233,32 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for Canonicalizer<'_, 'tcx> { CanonicalizeMode::Response { max_input_universe } => { // If we have a placeholder region inside of a query, it must be from // a new universe. - if max_input_universe.can_name(placeholder.universe) { - bug!("new placeholder in universe {max_input_universe:?}: {r:?}"); + if max_input_universe.can_name(placeholder.universe()) { + panic!("new placeholder in universe {max_input_universe:?}: {r:?}"); } CanonicalVarKind::PlaceholderRegion(placeholder) } }, - ty::ReVar(_) => match self.canonicalize_mode { - CanonicalizeMode::Input => CanonicalVarKind::Region(ty::UniverseIndex::ROOT), - CanonicalizeMode::Response { .. } => { - CanonicalVarKind::Region(self.infcx.universe_of_region(r)) - } - }, + ty::ReVar(vid) => { + assert_eq!( + self.infcx.root_lt_var(vid), + vid, + "region vid should have been resolved fully before canonicalization" + ); + assert_eq!( + self.infcx.probe_lt_var(vid), + None, + "region vid should have been resolved fully before canonicalization" + ); + match self.canonicalize_mode { + CanonicalizeMode::Input => CanonicalVarKind::Region(ty::UniverseIndex::ROOT), + CanonicalizeMode::Response { .. } => { + CanonicalVarKind::Region(self.infcx.universe_of_lt(vid).unwrap()) + } + } + } ty::ReError(_) => return r, }; @@ -271,55 +268,60 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for Canonicalizer<'_, 'tcx> { self.variables.iter().position(|&v| v == r.into()).map(ty::BoundVar::from) } }; + let var = existing_bound_var.unwrap_or_else(|| { let var = ty::BoundVar::from(self.variables.len()); self.variables.push(r.into()); self.primitive_var_infos.push(CanonicalVarInfo { kind }); var }); - let br = ty::BoundRegion { var, kind: BrAnon }; - ty::Region::new_bound(self.interner(), self.binder_index, br) + + self.interner().mk_bound_region(self.binder_index, var) } - fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { - let kind = match *t.kind() { - ty::Infer(ty::TyVar(vid)) => { - assert_eq!(self.infcx.root_var(vid), vid, "ty vid should have been resolved"); - let Err(ui) = self.infcx.probe_ty_var(vid) else { - bug!("ty var should have been resolved: {t}"); - }; - CanonicalVarKind::Ty(CanonicalTyVarKind::General(ui)) - } - ty::Infer(ty::IntVar(vid)) => { - assert_eq!(self.infcx.opportunistic_resolve_int_var(vid), t); - CanonicalVarKind::Ty(CanonicalTyVarKind::Int) - } - ty::Infer(ty::FloatVar(vid)) => { - assert_eq!(self.infcx.opportunistic_resolve_float_var(vid), t); - CanonicalVarKind::Ty(CanonicalTyVarKind::Float) - } - ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => { - bug!("fresh var during canonicalization: {t:?}") - } + fn fold_ty(&mut self, t: I::Ty) -> I::Ty + where + I::Ty: TypeSuperFoldable<I>, + { + let kind = match t.kind() { + ty::Infer(i) => match i { + ty::TyVar(vid) => { + assert_eq!( + self.infcx.root_ty_var(vid), + vid, + "ty vid should have been resolved fully before canonicalization" + ); + assert_eq!( + self.infcx.probe_ty_var(vid), + None, + "ty vid should have been resolved fully before canonicalization" + ); + + CanonicalVarKind::Ty(CanonicalTyVarKind::General( + self.infcx + .universe_of_ty(vid) + .unwrap_or_else(|| panic!("ty var should have been resolved: {t:?}")), + )) + } + ty::IntVar(_) => CanonicalVarKind::Ty(CanonicalTyVarKind::Int), + ty::FloatVar(_) => CanonicalVarKind::Ty(CanonicalTyVarKind::Float), + ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_) => { + todo!() + } + }, ty::Placeholder(placeholder) => match self.canonicalize_mode { - CanonicalizeMode::Input => CanonicalVarKind::PlaceholderTy(ty::Placeholder { - universe: placeholder.universe, - bound: ty::BoundTy { - var: ty::BoundVar::from_usize(self.variables.len()), - kind: ty::BoundTyKind::Anon, - }, - }), + CanonicalizeMode::Input => CanonicalVarKind::PlaceholderTy(PlaceholderLike::new( + placeholder.universe(), + self.variables.len().into(), + )), CanonicalizeMode::Response { .. } => CanonicalVarKind::PlaceholderTy(placeholder), }, ty::Param(_) => match self.canonicalize_mode { - CanonicalizeMode::Input => CanonicalVarKind::PlaceholderTy(ty::Placeholder { - universe: ty::UniverseIndex::ROOT, - bound: ty::BoundTy { - var: ty::BoundVar::from_usize(self.variables.len()), - kind: ty::BoundTyKind::Anon, - }, - }), - CanonicalizeMode::Response { .. } => bug!("param ty in response: {t:?}"), + CanonicalizeMode::Input => CanonicalVarKind::PlaceholderTy(PlaceholderLike::new( + ty::UniverseIndex::ROOT, + self.variables.len().into(), + )), + CanonicalizeMode::Response { .. } => panic!("param ty in response: {t:?}"), }, ty::Bool | ty::Char @@ -354,44 +356,38 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for Canonicalizer<'_, 'tcx> { var }), ); - let bt = ty::BoundTy { var, kind: BoundTyKind::Anon }; - Ty::new_bound(self.infcx.tcx, self.binder_index, bt) + + self.interner().mk_bound_ty(self.binder_index, var) } - fn fold_const(&mut self, c: ty::Const<'tcx>) -> ty::Const<'tcx> { + fn fold_const(&mut self, c: I::Const) -> I::Const + where + I::Const: TypeSuperFoldable<I>, + { let kind = match c.kind() { - ty::ConstKind::Infer(ty::InferConst::Var(vid)) => { - assert_eq!( - self.infcx.root_const_var(vid), - vid, - "const var should have been resolved" - ); - let Err(ui) = self.infcx.probe_const_var(vid) else { - bug!("const var should have been resolved"); - }; - // FIXME: we should fold this ty eventually - CanonicalVarKind::Const(ui, c.ty()) - } - ty::ConstKind::Infer(ty::InferConst::EffectVar(vid)) => { - assert_eq!( - self.infcx.root_effect_var(vid), - vid, - "effect var should have been resolved" - ); - let None = self.infcx.probe_effect_var(vid) else { - bug!("effect var should have been resolved"); - }; - CanonicalVarKind::Effect - } - ty::ConstKind::Infer(ty::InferConst::Fresh(_)) => { - bug!("fresh var during canonicalization: {c:?}") + ty::ConstKind::Infer(i) => { + // FIXME: we should fold the ty too eventually + match i { + ty::InferConst::Var(vid) => { + assert_eq!( + self.infcx.root_ct_var(vid), + vid, + "region vid should have been resolved fully before canonicalization" + ); + assert_eq!( + self.infcx.probe_ct_var(vid), + None, + "region vid should have been resolved fully before canonicalization" + ); + CanonicalVarKind::Const(self.infcx.universe_of_ct(vid).unwrap(), c.ty()) + } + ty::InferConst::EffectVar(_) => CanonicalVarKind::Effect, + ty::InferConst::Fresh(_) => todo!(), + } } ty::ConstKind::Placeholder(placeholder) => match self.canonicalize_mode { CanonicalizeMode::Input => CanonicalVarKind::PlaceholderConst( - ty::Placeholder { - universe: placeholder.universe, - bound: ty::BoundVar::from(self.variables.len()), - }, + PlaceholderLike::new(placeholder.universe(), self.variables.len().into()), c.ty(), ), CanonicalizeMode::Response { .. } => { @@ -400,13 +396,10 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for Canonicalizer<'_, 'tcx> { }, ty::ConstKind::Param(_) => match self.canonicalize_mode { CanonicalizeMode::Input => CanonicalVarKind::PlaceholderConst( - ty::Placeholder { - universe: ty::UniverseIndex::ROOT, - bound: ty::BoundVar::from(self.variables.len()), - }, + PlaceholderLike::new(ty::UniverseIndex::ROOT, self.variables.len().into()), c.ty(), ), - CanonicalizeMode::Response { .. } => bug!("param ty in response: {c:?}"), + CanonicalizeMode::Response { .. } => panic!("param ty in response: {c:?}"), }, ty::ConstKind::Bound(_, _) | ty::ConstKind::Unevaluated(_) @@ -423,6 +416,7 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for Canonicalizer<'_, 'tcx> { var }), ); - ty::Const::new_bound(self.infcx.tcx, self.binder_index, var, c.ty()) + + self.interner().mk_bound_const(self.binder_index, var, c.ty()) } } diff --git a/compiler/rustc_next_trait_solver/src/lib.rs b/compiler/rustc_next_trait_solver/src/lib.rs new file mode 100644 index 00000000000..e5fc8f755e0 --- /dev/null +++ b/compiler/rustc_next_trait_solver/src/lib.rs @@ -0,0 +1 @@ +pub mod canonicalizer; diff --git a/compiler/rustc_parse/messages.ftl b/compiler/rustc_parse/messages.ftl index da51d9dbe9f..363b8f4bfb9 100644 --- a/compiler/rustc_parse/messages.ftl +++ b/compiler/rustc_parse/messages.ftl @@ -23,8 +23,6 @@ parse_async_block_in_2015 = `async` blocks are only allowed in Rust 2018 or late parse_async_fn_in_2015 = `async fn` is not permitted in Rust 2015 .label = to use `async fn`, switch to Rust 2018 or later -parse_async_gen_fn = `async gen` functions are not supported - parse_async_move_block_in_2015 = `async move` blocks are only allowed in Rust 2018 or later parse_async_move_order_incorrect = the order of `move` and `async` is incorrect @@ -458,8 +456,6 @@ parse_macro_expands_to_adt_field = macros cannot expand to {$adt_ty} fields parse_macro_expands_to_enum_variant = macros cannot expand to enum variants -parse_macro_expands_to_match_arm = macros cannot expand to match arms - parse_macro_invocation_visibility = can't qualify macro invocation with `pub` .suggestion = remove the visibility .help = try adjusting the macro to put `{$vis}` inside the invocation diff --git a/compiler/rustc_parse/src/errors.rs b/compiler/rustc_parse/src/errors.rs index 45f950db5c3..bc53ab83439 100644 --- a/compiler/rustc_parse/src/errors.rs +++ b/compiler/rustc_parse/src/errors.rs @@ -563,13 +563,6 @@ pub(crate) struct GenFn { } #[derive(Diagnostic)] -#[diag(parse_async_gen_fn)] -pub(crate) struct AsyncGenFn { - #[primary_span] - pub span: Span, -} - -#[derive(Diagnostic)] #[diag(parse_comma_after_base_struct)] #[note] pub(crate) struct CommaAfterBaseStruct { diff --git a/compiler/rustc_parse/src/parser/diagnostics.rs b/compiler/rustc_parse/src/parser/diagnostics.rs index 7ab0d3f35ea..5295172b25e 100644 --- a/compiler/rustc_parse/src/parser/diagnostics.rs +++ b/compiler/rustc_parse/src/parser/diagnostics.rs @@ -2839,7 +2839,6 @@ impl<'a> Parser<'a> { pub(crate) fn maybe_recover_unexpected_comma( &mut self, lo: Span, - is_mac_invoc: bool, rt: CommaRecoveryMode, ) -> PResult<'a, ()> { if self.token != token::Comma { @@ -2860,28 +2859,24 @@ impl<'a> Parser<'a> { let seq_span = lo.to(self.prev_token.span); let mut err = self.struct_span_err(comma_span, "unexpected `,` in pattern"); if let Ok(seq_snippet) = self.span_to_snippet(seq_span) { - if is_mac_invoc { - err.note(fluent::parse_macro_expands_to_match_arm); - } else { - err.multipart_suggestion( - format!( - "try adding parentheses to match on a tuple{}", - if let CommaRecoveryMode::LikelyTuple = rt { "" } else { "..." }, - ), - vec![ - (seq_span.shrink_to_lo(), "(".to_string()), - (seq_span.shrink_to_hi(), ")".to_string()), - ], + err.multipart_suggestion( + format!( + "try adding parentheses to match on a tuple{}", + if let CommaRecoveryMode::LikelyTuple = rt { "" } else { "..." }, + ), + vec![ + (seq_span.shrink_to_lo(), "(".to_string()), + (seq_span.shrink_to_hi(), ")".to_string()), + ], + Applicability::MachineApplicable, + ); + if let CommaRecoveryMode::EitherTupleOrPipe = rt { + err.span_suggestion( + seq_span, + "...or a vertical bar to match on multiple alternatives", + seq_snippet.replace(',', " |"), Applicability::MachineApplicable, ); - if let CommaRecoveryMode::EitherTupleOrPipe = rt { - err.span_suggestion( - seq_span, - "...or a vertical bar to match on multiple alternatives", - seq_snippet.replace(',', " |"), - Applicability::MachineApplicable, - ); - } } } Err(err) diff --git a/compiler/rustc_parse/src/parser/expr.rs b/compiler/rustc_parse/src/parser/expr.rs index 8482824ec4b..406a6def019 100644 --- a/compiler/rustc_parse/src/parser/expr.rs +++ b/compiler/rustc_parse/src/parser/expr.rs @@ -1442,20 +1442,21 @@ impl<'a> Parser<'a> { } else if this.token.uninterpolated_span().at_least_rust_2018() { // `Span:.at_least_rust_2018()` is somewhat expensive; don't get it repeatedly. if this.check_keyword(kw::Async) { - if this.is_gen_block(kw::Async) { - // Check for `async {` and `async move {`. + // FIXME(gen_blocks): Parse `gen async` and suggest swap + if this.is_gen_block(kw::Async, 0) { + // Check for `async {` and `async move {`, + // or `async gen {` and `async gen move {`. this.parse_gen_block() } else { this.parse_expr_closure() } - } else if this.eat_keyword(kw::Await) { + } else if this.token.uninterpolated_span().at_least_rust_2024() + && (this.is_gen_block(kw::Gen, 0) + || (this.check_keyword(kw::Async) && this.is_gen_block(kw::Gen, 1))) + { + this.parse_gen_block() + } else if this.eat_keyword_noexpect(kw::Await) { this.recover_incorrect_await_syntax(lo, this.prev_token.span) - } else if this.token.uninterpolated_span().at_least_rust_2024() { - if this.is_gen_block(kw::Gen) { - this.parse_gen_block() - } else { - this.parse_expr_lit() - } } else { this.parse_expr_lit() } @@ -2234,8 +2235,8 @@ impl<'a> Parser<'a> { let movability = if self.eat_keyword(kw::Static) { Movability::Static } else { Movability::Movable }; - let asyncness = if self.token.uninterpolated_span().at_least_rust_2018() { - self.parse_asyncness(Case::Sensitive) + let coroutine_kind = if self.token.uninterpolated_span().at_least_rust_2018() { + self.parse_coroutine_kind(Case::Sensitive) } else { None }; @@ -2261,9 +2262,17 @@ impl<'a> Parser<'a> { } }; - if let Some(CoroutineKind::Async { span, .. }) = asyncness { - // Feature-gate `async ||` closures. - self.sess.gated_spans.gate(sym::async_closure, span); + match coroutine_kind { + Some(CoroutineKind::Async { span, .. }) => { + // Feature-gate `async ||` closures. + self.sess.gated_spans.gate(sym::async_closure, span); + } + Some(CoroutineKind::Gen { span, .. }) | Some(CoroutineKind::AsyncGen { span, .. }) => { + // Feature-gate `gen ||` and `async gen ||` closures. + // FIXME(gen_blocks): This perhaps should be a different gate. + self.sess.gated_spans.gate(sym::gen_blocks, span); + } + None => {} } if self.token.kind == TokenKind::Semi @@ -2284,7 +2293,7 @@ impl<'a> Parser<'a> { binder, capture_clause, constness, - coro_kind: asyncness, + coroutine_kind, movability, fn_decl, body, @@ -2899,127 +2908,155 @@ impl<'a> Parser<'a> { self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| { let lo = this.token.span; let (pat, guard) = this.parse_match_arm_pat_and_guard()?; - let arrow_span = this.token.span; - if let Err(mut err) = this.expect(&token::FatArrow) { - // We might have a `=>` -> `=` or `->` typo (issue #89396). - if TokenKind::FatArrow - .similar_tokens() - .is_some_and(|similar_tokens| similar_tokens.contains(&this.token.kind)) - { - err.span_suggestion( - this.token.span, - "use a fat arrow to start a match arm", - "=>", - Applicability::MachineApplicable, - ); - if matches!( - (&this.prev_token.kind, &this.token.kind), - (token::DotDotEq, token::Gt) - ) { - // `error_inclusive_range_match_arrow` handles cases like `0..=> {}`, - // so we suppress the error here - err.delay_as_bug(); + + let span_before_body = this.prev_token.span; + let arm_body; + let is_fat_arrow = this.check(&token::FatArrow); + let is_almost_fat_arrow = TokenKind::FatArrow + .similar_tokens() + .is_some_and(|similar_tokens| similar_tokens.contains(&this.token.kind)); + let mut result = if !is_fat_arrow && !is_almost_fat_arrow { + // A pattern without a body, allowed for never patterns. + arm_body = None; + this.expect_one_of(&[token::Comma], &[token::CloseDelim(Delimiter::Brace)]) + } else { + if let Err(mut err) = this.expect(&token::FatArrow) { + // We might have a `=>` -> `=` or `->` typo (issue #89396). + if is_almost_fat_arrow { + err.span_suggestion( + this.token.span, + "use a fat arrow to start a match arm", + "=>", + Applicability::MachineApplicable, + ); + if matches!( + (&this.prev_token.kind, &this.token.kind), + (token::DotDotEq, token::Gt) + ) { + // `error_inclusive_range_match_arrow` handles cases like `0..=> {}`, + // so we suppress the error here + err.delay_as_bug(); + } else { + err.emit(); + } + this.bump(); } else { - err.emit(); + return Err(err); } - this.bump(); - } else { - return Err(err); } - } - let arm_start_span = this.token.span; + let arrow_span = this.prev_token.span; + let arm_start_span = this.token.span; - let expr = this.parse_expr_res(Restrictions::STMT_EXPR, None).map_err(|mut err| { - err.span_label(arrow_span, "while parsing the `match` arm starting here"); - err - })?; + let expr = + this.parse_expr_res(Restrictions::STMT_EXPR, None).map_err(|mut err| { + err.span_label(arrow_span, "while parsing the `match` arm starting here"); + err + })?; - let require_comma = classify::expr_requires_semi_to_be_stmt(&expr) - && this.token != token::CloseDelim(Delimiter::Brace); - - let hi = this.prev_token.span; - - if require_comma { - let sm = this.sess.source_map(); - if let Some(body) = this.parse_arm_body_missing_braces(&expr, arrow_span) { - let span = body.span; - return Ok(( - ast::Arm { - attrs, - pat, - guard, - body, - span, - id: DUMMY_NODE_ID, - is_placeholder: false, - }, - TrailingToken::None, - )); - } - this.expect_one_of(&[token::Comma], &[token::CloseDelim(Delimiter::Brace)]) - .or_else(|mut err| { - if this.token == token::FatArrow { - if let Ok(expr_lines) = sm.span_to_lines(expr.span) - && let Ok(arm_start_lines) = sm.span_to_lines(arm_start_span) - && arm_start_lines.lines[0].end_col == expr_lines.lines[0].end_col - && expr_lines.lines.len() == 2 - { - // We check whether there's any trailing code in the parse span, - // if there isn't, we very likely have the following: - // - // X | &Y => "y" - // | -- - missing comma - // | | - // | arrow_span - // X | &X => "x" - // | - ^^ self.token.span - // | | - // | parsed until here as `"y" & X` - err.span_suggestion_short( - arm_start_span.shrink_to_hi(), - "missing a comma here to end this `match` arm", - ",", - Applicability::MachineApplicable, + let require_comma = classify::expr_requires_semi_to_be_stmt(&expr) + && this.token != token::CloseDelim(Delimiter::Brace); + + if !require_comma { + arm_body = Some(expr); + this.eat(&token::Comma); + Ok(false) + } else if let Some(body) = this.parse_arm_body_missing_braces(&expr, arrow_span) { + arm_body = Some(body); + Ok(true) + } else { + let expr_span = expr.span; + arm_body = Some(expr); + this.expect_one_of(&[token::Comma], &[token::CloseDelim(Delimiter::Brace)]) + .map_err(|mut err| { + if this.token == token::FatArrow { + let sm = this.sess.source_map(); + if let Ok(expr_lines) = sm.span_to_lines(expr_span) + && let Ok(arm_start_lines) = sm.span_to_lines(arm_start_span) + && arm_start_lines.lines[0].end_col + == expr_lines.lines[0].end_col + && expr_lines.lines.len() == 2 + { + // We check whether there's any trailing code in the parse span, + // if there isn't, we very likely have the following: + // + // X | &Y => "y" + // | -- - missing comma + // | | + // | arrow_span + // X | &X => "x" + // | - ^^ self.token.span + // | | + // | parsed until here as `"y" & X` + err.span_suggestion_short( + arm_start_span.shrink_to_hi(), + "missing a comma here to end this `match` arm", + ",", + Applicability::MachineApplicable, + ); + } + } else { + err.span_label( + arrow_span, + "while parsing the `match` arm starting here", ); - return Err(err); } - } else { - // FIXME(compiler-errors): We could also recover `; PAT =>` here - - // Try to parse a following `PAT =>`, if successful - // then we should recover. - let mut snapshot = this.create_snapshot_for_diagnostic(); - let pattern_follows = snapshot - .parse_pat_allow_top_alt( - None, - RecoverComma::Yes, - RecoverColon::Yes, - CommaRecoveryMode::EitherTupleOrPipe, - ) - .map_err(|err| err.cancel()) - .is_ok(); - if pattern_follows && snapshot.check(&TokenKind::FatArrow) { - err.cancel(); - this.sess.emit_err(errors::MissingCommaAfterMatchArm { - span: hi.shrink_to_hi(), - }); - return Ok(true); - } - } - err.span_label(arrow_span, "while parsing the `match` arm starting here"); - Err(err) - })?; - } else { - this.eat(&token::Comma); + err + }) + } + }; + + let hi_span = arm_body.as_ref().map_or(span_before_body, |body| body.span); + let arm_span = lo.to(hi_span); + + // We want to recover: + // X | Some(_) => foo() + // | - missing comma + // X | None => "x" + // | ^^^^ self.token.span + // as well as: + // X | Some(!) + // | - missing comma + // X | None => "x" + // | ^^^^ self.token.span + // But we musn't recover + // X | pat[0] => {} + // | ^ self.token.span + let recover_missing_comma = arm_body.is_some() || pat.could_be_never_pattern(); + if recover_missing_comma { + result = result.or_else(|err| { + // FIXME(compiler-errors): We could also recover `; PAT =>` here + + // Try to parse a following `PAT =>`, if successful + // then we should recover. + let mut snapshot = this.create_snapshot_for_diagnostic(); + let pattern_follows = snapshot + .parse_pat_allow_top_alt( + None, + RecoverComma::Yes, + RecoverColon::Yes, + CommaRecoveryMode::EitherTupleOrPipe, + ) + .map_err(|err| err.cancel()) + .is_ok(); + if pattern_follows && snapshot.check(&TokenKind::FatArrow) { + err.cancel(); + this.sess.emit_err(errors::MissingCommaAfterMatchArm { + span: arm_span.shrink_to_hi(), + }); + return Ok(true); + } + Err(err) + }); } + result?; Ok(( ast::Arm { attrs, pat, guard, - body: expr, - span: lo.to(hi), + body: arm_body, + span: arm_span, id: DUMMY_NODE_ID, is_placeholder: false, }, @@ -3179,7 +3216,7 @@ impl<'a> Parser<'a> { fn parse_gen_block(&mut self) -> PResult<'a, P<Expr>> { let lo = self.token.span; let kind = if self.eat_keyword(kw::Async) { - GenBlockKind::Async + if self.eat_keyword(kw::Gen) { GenBlockKind::AsyncGen } else { GenBlockKind::Async } } else { assert!(self.eat_keyword(kw::Gen)); self.sess.gated_spans.gate(sym::gen_blocks, lo.to(self.token.span)); @@ -3191,22 +3228,26 @@ impl<'a> Parser<'a> { Ok(self.mk_expr_with_attrs(lo.to(self.prev_token.span), kind, attrs)) } - fn is_gen_block(&self, kw: Symbol) -> bool { - self.token.is_keyword(kw) + fn is_gen_block(&self, kw: Symbol, lookahead: usize) -> bool { + self.is_keyword_ahead(lookahead, &[kw]) && (( // `async move {` - self.is_keyword_ahead(1, &[kw::Move]) - && self.look_ahead(2, |t| { + self.is_keyword_ahead(lookahead + 1, &[kw::Move]) + && self.look_ahead(lookahead + 2, |t| { *t == token::OpenDelim(Delimiter::Brace) || t.is_whole_block() }) ) || ( // `async {` - self.look_ahead(1, |t| { + self.look_ahead(lookahead + 1, |t| { *t == token::OpenDelim(Delimiter::Brace) || t.is_whole_block() }) )) } + pub(super) fn is_async_gen_block(&self) -> bool { + self.token.is_keyword(kw::Async) && self.is_gen_block(kw::Gen, 1) + } + fn is_certainly_not_a_block(&self) -> bool { self.look_ahead(1, |t| t.is_ident()) && ( diff --git a/compiler/rustc_parse/src/parser/item.rs b/compiler/rustc_parse/src/parser/item.rs index 086e8d5cf9b..d22cc04d182 100644 --- a/compiler/rustc_parse/src/parser/item.rs +++ b/compiler/rustc_parse/src/parser/item.rs @@ -2359,8 +2359,10 @@ impl<'a> Parser<'a> { || case == Case::Insensitive && t.is_non_raw_ident_where(|i| quals.iter().any(|qual| qual.as_str() == i.name.as_str().to_lowercase())) ) - // Rule out unsafe extern block. - && !self.is_unsafe_foreign_mod()) + // Rule out `unsafe extern {`. + && !self.is_unsafe_foreign_mod() + // Rule out `async gen {` and `async gen move {` + && !self.is_async_gen_block()) }) // `extern ABI fn` || self.check_keyword_case(kw::Extern, case) @@ -2392,10 +2394,7 @@ impl<'a> Parser<'a> { let constness = self.parse_constness(case); let async_start_sp = self.token.span; - let asyncness = self.parse_asyncness(case); - - let _gen_start_sp = self.token.span; - let genness = self.parse_genness(case); + let coroutine_kind = self.parse_coroutine_kind(case); let unsafe_start_sp = self.token.span; let unsafety = self.parse_unsafety(case); @@ -2403,7 +2402,7 @@ impl<'a> Parser<'a> { let ext_start_sp = self.token.span; let ext = self.parse_extern(case); - if let Some(CoroutineKind::Async { span, .. }) = asyncness { + if let Some(CoroutineKind::Async { span, .. }) = coroutine_kind { if span.is_rust_2015() { self.sess.emit_err(errors::AsyncFnIn2015 { span, @@ -2412,16 +2411,11 @@ impl<'a> Parser<'a> { } } - if let Some(CoroutineKind::Gen { span, .. }) = genness { - self.sess.gated_spans.gate(sym::gen_blocks, span); - } - - if let ( - Some(CoroutineKind::Async { span: async_span, .. }), - Some(CoroutineKind::Gen { span: gen_span, .. }), - ) = (asyncness, genness) - { - self.sess.emit_err(errors::AsyncGenFn { span: async_span.to(gen_span) }); + match coroutine_kind { + Some(CoroutineKind::Gen { span, .. }) | Some(CoroutineKind::AsyncGen { span, .. }) => { + self.sess.gated_spans.gate(sym::gen_blocks, span); + } + Some(CoroutineKind::Async { .. }) | None => {} } if !self.eat_keyword_case(kw::Fn, case) { @@ -2440,7 +2434,7 @@ impl<'a> Parser<'a> { // We may be able to recover let mut recover_constness = constness; - let mut recover_asyncness = asyncness; + let mut recover_coroutine_kind = coroutine_kind; let mut recover_unsafety = unsafety; // This will allow the machine fix to directly place the keyword in the correct place or to indicate // that the keyword is already present and the second instance should be removed. @@ -2453,15 +2447,24 @@ impl<'a> Parser<'a> { } } } else if self.check_keyword(kw::Async) { - match asyncness { + match coroutine_kind { Some(CoroutineKind::Async { span, .. }) => { Some(WrongKw::Duplicated(span)) } + Some(CoroutineKind::AsyncGen { span, .. }) => { + Some(WrongKw::Duplicated(span)) + } Some(CoroutineKind::Gen { .. }) => { - panic!("not sure how to recover here") + recover_coroutine_kind = Some(CoroutineKind::AsyncGen { + span: self.token.span, + closure_id: DUMMY_NODE_ID, + return_impl_trait_id: DUMMY_NODE_ID, + }); + // FIXME(gen_blocks): This span is wrong, didn't want to think about it. + Some(WrongKw::Misplaced(unsafe_start_sp)) } None => { - recover_asyncness = Some(CoroutineKind::Async { + recover_coroutine_kind = Some(CoroutineKind::Async { span: self.token.span, closure_id: DUMMY_NODE_ID, return_impl_trait_id: DUMMY_NODE_ID, @@ -2559,7 +2562,7 @@ impl<'a> Parser<'a> { return Ok(FnHeader { constness: recover_constness, unsafety: recover_unsafety, - coro_kind: recover_asyncness, + coroutine_kind: recover_coroutine_kind, ext, }); } @@ -2569,13 +2572,7 @@ impl<'a> Parser<'a> { } } - let coro_kind = match asyncness { - Some(CoroutineKind::Async { .. }) => asyncness, - Some(CoroutineKind::Gen { .. }) => unreachable!("asycness cannot be Gen"), - None => genness, - }; - - Ok(FnHeader { constness, unsafety, coro_kind, ext }) + Ok(FnHeader { constness, unsafety, coroutine_kind, ext }) } /// Parses the parameter list and result type of a function declaration. diff --git a/compiler/rustc_parse/src/parser/mod.rs b/compiler/rustc_parse/src/parser/mod.rs index 2816386cbad..7a306823ed4 100644 --- a/compiler/rustc_parse/src/parser/mod.rs +++ b/compiler/rustc_parse/src/parser/mod.rs @@ -1125,23 +1125,30 @@ impl<'a> Parser<'a> { } /// Parses asyncness: `async` or nothing. - fn parse_asyncness(&mut self, case: Case) -> Option<CoroutineKind> { + fn parse_coroutine_kind(&mut self, case: Case) -> Option<CoroutineKind> { + let span = self.token.uninterpolated_span(); if self.eat_keyword_case(kw::Async, case) { - let span = self.prev_token.uninterpolated_span(); - Some(CoroutineKind::Async { - span, - closure_id: DUMMY_NODE_ID, - return_impl_trait_id: DUMMY_NODE_ID, - }) - } else { - None - } - } - - /// Parses genness: `gen` or nothing. - fn parse_genness(&mut self, case: Case) -> Option<CoroutineKind> { - if self.token.span.at_least_rust_2024() && self.eat_keyword_case(kw::Gen, case) { - let span = self.prev_token.uninterpolated_span(); + // FIXME(gen_blocks): Do we want to unconditionally parse `gen` and then + // error if edition <= 2024, like we do with async and edition <= 2018? + if self.token.uninterpolated_span().at_least_rust_2024() + && self.eat_keyword_case(kw::Gen, case) + { + let gen_span = self.prev_token.uninterpolated_span(); + Some(CoroutineKind::AsyncGen { + span: span.to(gen_span), + closure_id: DUMMY_NODE_ID, + return_impl_trait_id: DUMMY_NODE_ID, + }) + } else { + Some(CoroutineKind::Async { + span, + closure_id: DUMMY_NODE_ID, + return_impl_trait_id: DUMMY_NODE_ID, + }) + } + } else if self.token.uninterpolated_span().at_least_rust_2024() + && self.eat_keyword_case(kw::Gen, case) + { Some(CoroutineKind::Gen { span, closure_id: DUMMY_NODE_ID, diff --git a/compiler/rustc_parse/src/parser/pat.rs b/compiler/rustc_parse/src/parser/pat.rs index bd1bf2c7859..020b66a985a 100644 --- a/compiler/rustc_parse/src/parser/pat.rs +++ b/compiler/rustc_parse/src/parser/pat.rs @@ -154,12 +154,8 @@ impl<'a> Parser<'a> { } Err(err) => return Err(err), }; - if rc == RecoverComma::Yes { - self.maybe_recover_unexpected_comma( - first_pat.span, - matches!(first_pat.kind, PatKind::MacCall(_)), - rt, - )?; + if rc == RecoverComma::Yes && !first_pat.could_be_never_pattern() { + self.maybe_recover_unexpected_comma(first_pat.span, rt)?; } // If the next token is not a `|`, @@ -200,8 +196,8 @@ impl<'a> Parser<'a> { err.span_label(lo, WHILE_PARSING_OR_MSG); err })?; - if rc == RecoverComma::Yes { - self.maybe_recover_unexpected_comma(pat.span, false, rt)?; + if rc == RecoverComma::Yes && !pat.could_be_never_pattern() { + self.maybe_recover_unexpected_comma(pat.span, rt)?; } pats.push(pat); } diff --git a/compiler/rustc_parse/src/parser/ty.rs b/compiler/rustc_parse/src/parser/ty.rs index f349140e8c3..da8cc05ff66 100644 --- a/compiler/rustc_parse/src/parser/ty.rs +++ b/compiler/rustc_parse/src/parser/ty.rs @@ -598,7 +598,7 @@ impl<'a> Parser<'a> { tokens: None, }; let span_start = self.token.span; - let ast::FnHeader { ext, unsafety, constness, coro_kind } = + let ast::FnHeader { ext, unsafety, constness, coroutine_kind } = self.parse_fn_front_matter(&inherited_vis, Case::Sensitive)?; if self.may_recover() && self.token.kind == TokenKind::Lt { self.recover_fn_ptr_with_generics(lo, &mut params, param_insertion_point)?; @@ -611,7 +611,7 @@ impl<'a> Parser<'a> { // cover it. self.sess.emit_err(FnPointerCannotBeConst { span: whole_span, qualifier: span }); } - if let Some(ast::CoroutineKind::Async { span, .. }) = coro_kind { + if let Some(ast::CoroutineKind::Async { span, .. }) = coroutine_kind { self.sess.emit_err(FnPointerCannotBeAsync { span: whole_span, qualifier: span }); } // FIXME(gen_blocks): emit a similar error for `gen fn()` diff --git a/compiler/rustc_pattern_analysis/Cargo.toml b/compiler/rustc_pattern_analysis/Cargo.toml new file mode 100644 index 00000000000..0639944a45c --- /dev/null +++ b/compiler/rustc_pattern_analysis/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "rustc_pattern_analysis" +version = "0.0.0" +edition = "2021" + +[dependencies] +# tidy-alphabetical-start +rustc_apfloat = "0.2.0" +rustc_arena = { path = "../rustc_arena" } +rustc_data_structures = { path = "../rustc_data_structures" } +rustc_errors = { path = "../rustc_errors" } +rustc_fluent_macro = { path = "../rustc_fluent_macro" } +rustc_hir = { path = "../rustc_hir" } +rustc_index = { path = "../rustc_index" } +rustc_macros = { path = "../rustc_macros" } +rustc_middle = { path = "../rustc_middle" } +rustc_session = { path = "../rustc_session" } +rustc_span = { path = "../rustc_span" } +rustc_target = { path = "../rustc_target" } +smallvec = { version = "1.8.1", features = ["union", "may_dangle"] } +tracing = "0.1" +# tidy-alphabetical-end diff --git a/compiler/rustc_pattern_analysis/messages.ftl b/compiler/rustc_pattern_analysis/messages.ftl new file mode 100644 index 00000000000..827928f97d7 --- /dev/null +++ b/compiler/rustc_pattern_analysis/messages.ftl @@ -0,0 +1,19 @@ +pattern_analysis_non_exhaustive_omitted_pattern = some variants are not matched explicitly + .help = ensure that all variants are matched explicitly by adding the suggested match arms + .note = the matched value is of type `{$scrut_ty}` and the `non_exhaustive_omitted_patterns` attribute was found + +pattern_analysis_non_exhaustive_omitted_pattern_lint_on_arm = the lint level must be set on the whole match + .help = it no longer has any effect to set the lint level on an individual match arm + .label = remove this attribute + .suggestion = set the lint level on the whole match + +pattern_analysis_overlapping_range_endpoints = multiple patterns overlap on their endpoints + .label = ... with this range + .note = you likely meant to write mutually exclusive ranges + +pattern_analysis_uncovered = {$count -> + [1] pattern `{$witness_1}` + [2] patterns `{$witness_1}` and `{$witness_2}` + [3] patterns `{$witness_1}`, `{$witness_2}` and `{$witness_3}` + *[other] patterns `{$witness_1}`, `{$witness_2}`, `{$witness_3}` and {$remainder} more + } not covered diff --git a/compiler/rustc_pattern_analysis/src/constructor.rs b/compiler/rustc_pattern_analysis/src/constructor.rs new file mode 100644 index 00000000000..716ccdd4dcd --- /dev/null +++ b/compiler/rustc_pattern_analysis/src/constructor.rs @@ -0,0 +1,987 @@ +//! As explained in [`crate::usefulness`], values and patterns are made from constructors applied to +//! fields. This file defines a `Constructor` enum and various operations to manipulate them. +//! +//! There are two important bits of core logic in this file: constructor inclusion and constructor +//! splitting. Constructor inclusion, i.e. whether a constructor is included in/covered by another, +//! is straightforward and defined in [`Constructor::is_covered_by`]. +//! +//! Constructor splitting is mentioned in [`crate::usefulness`] but not detailed. We describe it +//! precisely here. +//! +//! +//! +//! # Constructor grouping and splitting +//! +//! As explained in the corresponding section in [`crate::usefulness`], to make usefulness tractable +//! we need to group together constructors that have the same effect when they are used to +//! specialize the matrix. +//! +//! Example: +//! ```compile_fail,E0004 +//! match (0, false) { +//! (0 ..=100, true) => {} +//! (50..=150, false) => {} +//! (0 ..=200, _) => {} +//! } +//! ``` +//! +//! In this example we can restrict specialization to 5 cases: `0..50`, `50..=100`, `101..=150`, +//! `151..=200` and `200..`. +//! +//! In [`crate::usefulness`], we had said that `specialize` only takes value-only constructors. We +//! now relax this restriction: we allow `specialize` to take constructors like `0..50` as long as +//! we're careful to only do that with constructors that make sense. For example, `specialize(0..50, +//! (0..=100, true))` is sensible, but `specialize(50..=200, (0..=100, true))` is not. +//! +//! Constructor splitting looks at the constructors in the first column of the matrix and constructs +//! such a sensible set of constructors. Formally, we want to find a smallest disjoint set of +//! constructors: +//! - Whose union covers the whole type, and +//! - That have no non-trivial intersection with any of the constructors in the column (i.e. they're +//! each either disjoint with or covered by any given column constructor). +//! +//! We compute this in two steps: first [`crate::cx::MatchCheckCtxt::ctors_for_ty`] determines the +//! set of all possible constructors for the type. Then [`ConstructorSet::split`] looks at the +//! column of constructors and splits the set into groups accordingly. The precise invariants of +//! [`ConstructorSet::split`] is described in [`SplitConstructorSet`]. +//! +//! Constructor splitting has two interesting special cases: integer range splitting (see +//! [`IntRange::split`]) and slice splitting (see [`Slice::split`]). +//! +//! +//! +//! # The `Missing` constructor +//! +//! We detail a special case of constructor splitting that is a bit subtle. Take the following: +//! +//! ``` +//! enum Direction { North, South, East, West } +//! # let wind = (Direction::North, 0u8); +//! match wind { +//! (Direction::North, 50..) => {} +//! (_, _) => {} +//! } +//! ``` +//! +//! Here we expect constructor splitting to output two cases: `North`, and "everything else". This +//! "everything else" is represented by [`Constructor::Missing`]. Unlike other constructors, it's a +//! bit contextual: to know the exact list of constructors it represents we have to look at the +//! column. In practice however we don't need to, because by construction it only matches rows that +//! have wildcards. This is how this constructor is special: the only constructor that covers it is +//! `Wildcard`. +//! +//! The only place where we care about which constructors `Missing` represents is in diagnostics +//! (see `crate::usefulness::WitnessMatrix::apply_constructor`). +//! +//! We choose whether to specialize with `Missing` in +//! `crate::usefulness::compute_exhaustiveness_and_usefulness`. +//! +//! +//! +//! ## Empty types, empty constructors, and the `exhaustive_patterns` feature +//! +//! An empty type is a type that has no valid value, like `!`, `enum Void {}`, or `Result<!, !>`. +//! They require careful handling. +//! +//! First, for soundness reasons related to the possible existence of invalid values, by default we +//! don't treat empty types as empty. We force them to be matched with wildcards. Except if the +//! `exhaustive_patterns` feature is turned on, in which case we do treat them as empty. And also +//! except if the type has no constructors (like `enum Void {}` but not like `Result<!, !>`), we +//! specifically allow `match void {}` to be exhaustive. There are additionally considerations of +//! place validity that are handled in `crate::usefulness`. Yes this is a bit tricky. +//! +//! The second thing is that regardless of the above, it is always allowed to use all the +//! constructors of a type. For example, all the following is ok: +//! +//! ```rust,ignore(example) +//! # #![feature(never_type)] +//! # #![feature(exhaustive_patterns)] +//! fn foo(x: Option<!>) { +//! match x { +//! None => {} +//! Some(_) => {} +//! } +//! } +//! fn bar(x: &[!]) -> u32 { +//! match x { +//! [] => 1, +//! [_] => 2, +//! [_, _] => 3, +//! } +//! } +//! ``` +//! +//! Moreover, take the following: +//! +//! ```rust +//! # #![feature(never_type)] +//! # #![feature(exhaustive_patterns)] +//! # let x = None::<!>; +//! match x { +//! None => {} +//! } +//! ``` +//! +//! On a normal type, we would identify `Some` as missing and tell the user. If `x: Option<!>` +//! however (and `exhaustive_patterns` is on), it's ok to omit `Some`. When listing the constructors +//! of a type, we must therefore track which can be omitted. +//! +//! Let's call "empty" a constructor that matches no valid value for the type, like `Some` for the +//! type `Option<!>`. What this all means is that `ConstructorSet` must know which constructors are +//! empty. The difference between empty and nonempty constructors is that empty constructors need +//! not be present for the match to be exhaustive. +//! +//! A final remark: empty constructors of arity 0 break specialization, we must avoid them. The +//! reason is that if we specialize by them, nothing remains to witness the emptiness; the rest of +//! the algorithm can't distinguish them from a nonempty constructor. The only known case where this +//! could happen is the `[..]` pattern on `[!; N]` with `N > 0` so we must take care to not emit it. +//! +//! This is all handled by [`crate::cx::MatchCheckCtxt::ctors_for_ty`] and +//! [`ConstructorSet::split`]. The invariants of [`SplitConstructorSet`] are also of interest. +//! +//! +//! +//! ## Opaque patterns +//! +//! Some patterns, such as constants that are not allowed to be matched structurally, cannot be +//! inspected, which we handle with `Constructor::Opaque`. Since we know nothing of these patterns, +//! we assume they never cover each other. In order to respect the invariants of +//! [`SplitConstructorSet`], we give each `Opaque` constructor a unique id so we can recognize it. + +use std::cmp::{self, max, min, Ordering}; +use std::fmt; +use std::iter::once; + +use smallvec::SmallVec; + +use rustc_apfloat::ieee::{DoubleS, IeeeFloat, SingleS}; +use rustc_data_structures::fx::FxHashSet; +use rustc_hir::RangeEnd; +use rustc_index::IndexVec; +use rustc_middle::mir::Const; +use rustc_target::abi::VariantIdx; + +use self::Constructor::*; +use self::MaybeInfiniteInt::*; +use self::SliceKind::*; + +use crate::usefulness::PatCtxt; + +/// Whether we have seen a constructor in the column or not. +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +enum Presence { + Unseen, + Seen, +} + +/// A possibly infinite integer. Values are encoded such that the ordering on `u128` matches the +/// natural order on the original type. For example, `-128i8` is encoded as `0` and `127i8` as +/// `255`. See `signed_bias` for details. +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +pub enum MaybeInfiniteInt { + NegInfinity, + /// Encoded value. DO NOT CONSTRUCT BY HAND; use `new_finite`. + #[non_exhaustive] + Finite(u128), + /// The integer after `u128::MAX`. We need it to represent `x..=u128::MAX` as an exclusive range. + JustAfterMax, + PosInfinity, +} + +impl MaybeInfiniteInt { + pub fn new_finite_uint(bits: u128) -> Self { + Finite(bits) + } + pub fn new_finite_int(bits: u128, size: u64) -> Self { + // Perform a shift if the underlying types are signed, which makes the interval arithmetic + // type-independent. + let bias = 1u128 << (size - 1); + Finite(bits ^ bias) + } + + pub fn as_finite_uint(self) -> Option<u128> { + match self { + Finite(bits) => Some(bits), + _ => None, + } + } + pub fn as_finite_int(self, size: u64) -> Option<u128> { + // We decode the shift. + match self { + Finite(bits) => { + let bias = 1u128 << (size - 1); + Some(bits ^ bias) + } + _ => None, + } + } + + /// Note: this will not turn a finite value into an infinite one or vice-versa. + pub fn minus_one(self) -> Self { + match self { + Finite(n) => match n.checked_sub(1) { + Some(m) => Finite(m), + None => bug!(), + }, + JustAfterMax => Finite(u128::MAX), + x => x, + } + } + /// Note: this will not turn a finite value into an infinite one or vice-versa. + pub fn plus_one(self) -> Self { + match self { + Finite(n) => match n.checked_add(1) { + Some(m) => Finite(m), + None => JustAfterMax, + }, + JustAfterMax => bug!(), + x => x, + } + } +} + +/// An exclusive interval, used for precise integer exhaustiveness checking. `IntRange`s always +/// store a contiguous range. +/// +/// `IntRange` is never used to encode an empty range or a "range" that wraps around the (offset) +/// space: i.e., `range.lo < range.hi`. +#[derive(Clone, Copy, PartialEq, Eq)] +pub struct IntRange { + pub lo: MaybeInfiniteInt, // Must not be `PosInfinity`. + pub hi: MaybeInfiniteInt, // Must not be `NegInfinity`. +} + +impl IntRange { + /// Best effort; will not know that e.g. `255u8..` is a singleton. + pub(crate) fn is_singleton(&self) -> bool { + // Since `lo` and `hi` can't be the same `Infinity` and `plus_one` never changes from finite + // to infinite, this correctly only detects ranges that contain exacly one `Finite(x)`. + self.lo.plus_one() == self.hi + } + + #[inline] + pub fn from_singleton(x: MaybeInfiniteInt) -> IntRange { + IntRange { lo: x, hi: x.plus_one() } + } + + #[inline] + pub fn from_range(lo: MaybeInfiniteInt, mut hi: MaybeInfiniteInt, end: RangeEnd) -> IntRange { + if end == RangeEnd::Included { + hi = hi.plus_one(); + } + if lo >= hi { + // This should have been caught earlier by E0030. + bug!("malformed range pattern: {lo:?}..{hi:?}"); + } + IntRange { lo, hi } + } + + fn is_subrange(&self, other: &Self) -> bool { + other.lo <= self.lo && self.hi <= other.hi + } + + fn intersection(&self, other: &Self) -> Option<Self> { + if self.lo < other.hi && other.lo < self.hi { + Some(IntRange { lo: max(self.lo, other.lo), hi: min(self.hi, other.hi) }) + } else { + None + } + } + + /// Partition a range of integers into disjoint subranges. This does constructor splitting for + /// integer ranges as explained at the top of the file. + /// + /// This returns an output that covers `self`. The output is split so that the only + /// intersections between an output range and a column range are inclusions. No output range + /// straddles the boundary of one of the inputs. + /// + /// Additionally, we track for each output range whether it is covered by one of the column ranges or not. + /// + /// The following input: + /// ```text + /// (--------------------------) // `self` + /// (------) (----------) (-) + /// (------) (--------) + /// ``` + /// is first intersected with `self`: + /// ```text + /// (--------------------------) // `self` + /// (----) (----------) (-) + /// (------) (--------) + /// ``` + /// and then iterated over as follows: + /// ```text + /// (-(--)-(-)-(------)-)--(-)- + /// ``` + /// where each sequence of dashes is an output range, and dashes outside parentheses are marked + /// as `Presence::Missing`. + /// + /// ## `isize`/`usize` + /// + /// Whereas a wildcard of type `i32` stands for the range `i32::MIN..=i32::MAX`, a `usize` + /// wildcard stands for `0..PosInfinity` and a `isize` wildcard stands for + /// `NegInfinity..PosInfinity`. In other words, as far as `IntRange` is concerned, there are + /// values before `isize::MIN` and after `usize::MAX`/`isize::MAX`. + /// This is to avoid e.g. `0..(u32::MAX as usize)` from being exhaustive on one architecture and + /// not others. This was decided in <https://github.com/rust-lang/rfcs/pull/2591>. + /// + /// These infinities affect splitting subtly: it is possible to get `NegInfinity..0` and + /// `usize::MAX+1..PosInfinity` in the output. Diagnostics must be careful to handle these + /// fictitious ranges sensibly. + fn split( + &self, + column_ranges: impl Iterator<Item = IntRange>, + ) -> impl Iterator<Item = (Presence, IntRange)> { + // The boundaries of ranges in `column_ranges` intersected with `self`. + // We do parenthesis matching for input ranges. A boundary counts as +1 if it starts + // a range and -1 if it ends it. When the count is > 0 between two boundaries, we + // are within an input range. + let mut boundaries: Vec<(MaybeInfiniteInt, isize)> = column_ranges + .filter_map(|r| self.intersection(&r)) + .flat_map(|r| [(r.lo, 1), (r.hi, -1)]) + .collect(); + // We sort by boundary, and for each boundary we sort the "closing parentheses" first. The + // order of +1/-1 for a same boundary value is actually irrelevant, because we only look at + // the accumulated count between distinct boundary values. + boundaries.sort_unstable(); + + // Accumulate parenthesis counts. + let mut paren_counter = 0isize; + // Gather pairs of adjacent boundaries. + let mut prev_bdy = self.lo; + boundaries + .into_iter() + // End with the end of the range. The count is ignored. + .chain(once((self.hi, 0))) + // List pairs of adjacent boundaries and the count between them. + .map(move |(bdy, delta)| { + // `delta` affects the count as we cross `bdy`, so the relevant count between + // `prev_bdy` and `bdy` is untouched by `delta`. + let ret = (prev_bdy, paren_counter, bdy); + prev_bdy = bdy; + paren_counter += delta; + ret + }) + // Skip empty ranges. + .filter(|&(prev_bdy, _, bdy)| prev_bdy != bdy) + // Convert back to ranges. + .map(move |(prev_bdy, paren_count, bdy)| { + use Presence::*; + let presence = if paren_count > 0 { Seen } else { Unseen }; + let range = IntRange { lo: prev_bdy, hi: bdy }; + (presence, range) + }) + } +} + +/// Note: this will render signed ranges incorrectly. To render properly, convert to a pattern +/// first. +impl fmt::Debug for IntRange { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Finite(lo) = self.lo { + write!(f, "{lo}")?; + } + write!(f, "{}", RangeEnd::Excluded)?; + if let Finite(hi) = self.hi { + write!(f, "{hi}")?; + } + Ok(()) + } +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum SliceKind { + /// Patterns of length `n` (`[x, y]`). + FixedLen(usize), + /// Patterns using the `..` notation (`[x, .., y]`). + /// Captures any array constructor of `length >= i + j`. + /// In the case where `array_len` is `Some(_)`, + /// this indicates that we only care about the first `i` and the last `j` values of the array, + /// and everything in between is a wildcard `_`. + VarLen(usize, usize), +} + +impl SliceKind { + fn arity(self) -> usize { + match self { + FixedLen(length) => length, + VarLen(prefix, suffix) => prefix + suffix, + } + } + + /// Whether this pattern includes patterns of length `other_len`. + fn covers_length(self, other_len: usize) -> bool { + match self { + FixedLen(len) => len == other_len, + VarLen(prefix, suffix) => prefix + suffix <= other_len, + } + } +} + +/// A constructor for array and slice patterns. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct Slice { + /// `None` if the matched value is a slice, `Some(n)` if it is an array of size `n`. + pub(crate) array_len: Option<usize>, + /// The kind of pattern it is: fixed-length `[x, y]` or variable length `[x, .., y]`. + pub(crate) kind: SliceKind, +} + +impl Slice { + pub fn new(array_len: Option<usize>, kind: SliceKind) -> Self { + let kind = match (array_len, kind) { + // If the middle `..` has length 0, we effectively have a fixed-length pattern. + (Some(len), VarLen(prefix, suffix)) if prefix + suffix == len => FixedLen(len), + (Some(len), VarLen(prefix, suffix)) if prefix + suffix > len => bug!( + "Slice pattern of length {} longer than its array length {len}", + prefix + suffix + ), + _ => kind, + }; + Slice { array_len, kind } + } + + pub(crate) fn arity(self) -> usize { + self.kind.arity() + } + + /// See `Constructor::is_covered_by` + fn is_covered_by(self, other: Self) -> bool { + other.kind.covers_length(self.arity()) + } + + /// This computes constructor splitting for variable-length slices, as explained at the top of + /// the file. + /// + /// A slice pattern `[x, .., y]` behaves like the infinite or-pattern `[x, y] | [x, _, y] | [x, + /// _, _, y] | etc`. The corresponding value constructors are fixed-length array constructors of + /// corresponding lengths. We obviously can't list this infinitude of constructors. + /// Thankfully, it turns out that for each finite set of slice patterns, all sufficiently large + /// array lengths are equivalent. + /// + /// Let's look at an example, where we are trying to split the last pattern: + /// ``` + /// # fn foo(x: &[bool]) { + /// match x { + /// [true, true, ..] => {} + /// [.., false, false] => {} + /// [..] => {} + /// } + /// # } + /// ``` + /// Here are the results of specialization for the first few lengths: + /// ``` + /// # fn foo(x: &[bool]) { match x { + /// // length 0 + /// [] => {} + /// // length 1 + /// [_] => {} + /// // length 2 + /// [true, true] => {} + /// [false, false] => {} + /// [_, _] => {} + /// // length 3 + /// [true, true, _ ] => {} + /// [_, false, false] => {} + /// [_, _, _ ] => {} + /// // length 4 + /// [true, true, _, _ ] => {} + /// [_, _, false, false] => {} + /// [_, _, _, _ ] => {} + /// // length 5 + /// [true, true, _, _, _ ] => {} + /// [_, _, _, false, false] => {} + /// [_, _, _, _, _ ] => {} + /// # _ => {} + /// # }} + /// ``` + /// + /// We see that above length 4, we are simply inserting columns full of wildcards in the middle. + /// This means that specialization and witness computation with slices of length `l >= 4` will + /// give equivalent results regardless of `l`. This applies to any set of slice patterns: there + /// will be a length `L` above which all lengths behave the same. This is exactly what we need + /// for constructor splitting. + /// + /// A variable-length slice pattern covers all lengths from its arity up to infinity. As we just + /// saw, we can split this in two: lengths below `L` are treated individually with a + /// fixed-length slice each; lengths above `L` are grouped into a single variable-length slice + /// constructor. + /// + /// For each variable-length slice pattern `p` with a prefix of length `plₚ` and suffix of + /// length `slₚ`, only the first `plₚ` and the last `slₚ` elements are examined. Therefore, as + /// long as `L` is positive (to avoid concerns about empty types), all elements after the + /// maximum prefix length and before the maximum suffix length are not examined by any + /// variable-length pattern, and therefore can be ignored. This gives us a way to compute `L`. + /// + /// Additionally, if fixed-length patterns exist, we must pick an `L` large enough to miss them, + /// so we can pick `L = max(max(FIXED_LEN)+1, max(PREFIX_LEN) + max(SUFFIX_LEN))`. + /// `max_slice` below will be made to have this arity `L`. + /// + /// If `self` is fixed-length, it is returned as-is. + /// + /// Additionally, we track for each output slice whether it is covered by one of the column slices or not. + fn split( + self, + column_slices: impl Iterator<Item = Slice>, + ) -> impl Iterator<Item = (Presence, Slice)> { + // Range of lengths below `L`. + let smaller_lengths; + let arity = self.arity(); + let mut max_slice = self.kind; + // Tracks the smallest variable-length slice we've seen. Any slice arity above it is + // therefore `Presence::Seen` in the column. + let mut min_var_len = usize::MAX; + // Tracks the fixed-length slices we've seen, to mark them as `Presence::Seen`. + let mut seen_fixed_lens = FxHashSet::default(); + match &mut max_slice { + VarLen(max_prefix_len, max_suffix_len) => { + // A length larger than any fixed-length slice encountered. + // We start at 1 in case the subtype is empty because in that case the zero-length + // slice must be treated separately from the rest. + let mut fixed_len_upper_bound = 1; + // We grow `max_slice` to be larger than all slices encountered, as described above. + // `L` is `max_slice.arity()`. For diagnostics, we keep the prefix and suffix + // lengths separate. + for slice in column_slices { + match slice.kind { + FixedLen(len) => { + fixed_len_upper_bound = cmp::max(fixed_len_upper_bound, len + 1); + seen_fixed_lens.insert(len); + } + VarLen(prefix, suffix) => { + *max_prefix_len = cmp::max(*max_prefix_len, prefix); + *max_suffix_len = cmp::max(*max_suffix_len, suffix); + min_var_len = cmp::min(min_var_len, prefix + suffix); + } + } + } + // If `fixed_len_upper_bound >= L`, we set `L` to `fixed_len_upper_bound`. + if let Some(delta) = + fixed_len_upper_bound.checked_sub(*max_prefix_len + *max_suffix_len) + { + *max_prefix_len += delta + } + + // We cap the arity of `max_slice` at the array size. + match self.array_len { + Some(len) if max_slice.arity() >= len => max_slice = FixedLen(len), + _ => {} + } + + smaller_lengths = match self.array_len { + // The only admissible fixed-length slice is one of the array size. Whether `max_slice` + // is fixed-length or variable-length, it will be the only relevant slice to output + // here. + Some(_) => 0..0, // empty range + // We need to cover all arities in the range `(arity..infinity)`. We split that + // range into two: lengths smaller than `max_slice.arity()` are treated + // independently as fixed-lengths slices, and lengths above are captured by + // `max_slice`. + None => self.arity()..max_slice.arity(), + }; + } + FixedLen(_) => { + // No need to split here. We only track presence. + for slice in column_slices { + match slice.kind { + FixedLen(len) => { + if len == arity { + seen_fixed_lens.insert(len); + } + } + VarLen(prefix, suffix) => { + min_var_len = cmp::min(min_var_len, prefix + suffix); + } + } + } + smaller_lengths = 0..0; + } + }; + + smaller_lengths.map(FixedLen).chain(once(max_slice)).map(move |kind| { + let arity = kind.arity(); + let seen = if min_var_len <= arity || seen_fixed_lens.contains(&arity) { + Presence::Seen + } else { + Presence::Unseen + }; + (seen, Slice::new(self.array_len, kind)) + }) + } +} + +/// A globally unique id to distinguish `Opaque` patterns. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct OpaqueId(u32); + +impl OpaqueId { + pub fn new() -> Self { + use std::sync::atomic::{AtomicU32, Ordering}; + static OPAQUE_ID: AtomicU32 = AtomicU32::new(0); + OpaqueId(OPAQUE_ID.fetch_add(1, Ordering::SeqCst)) + } +} + +/// A value can be decomposed into a constructor applied to some fields. This struct represents +/// the constructor. See also `Fields`. +/// +/// `pat_constructor` retrieves the constructor corresponding to a pattern. +/// `specialize_constructor` returns the list of fields corresponding to a pattern, given a +/// constructor. `Constructor::apply` reconstructs the pattern from a pair of `Constructor` and +/// `Fields`. +#[derive(Clone, Debug, PartialEq)] +pub enum Constructor<'tcx> { + /// The constructor for patterns that have a single constructor, like tuples, struct patterns, + /// and references. Fixed-length arrays are treated separately with `Slice`. + Single, + /// Enum variants. + Variant(VariantIdx), + /// Booleans + Bool(bool), + /// Ranges of integer literal values (`2`, `2..=5` or `2..5`). + IntRange(IntRange), + /// Ranges of floating-point literal values (`2.0..=5.2`). + F32Range(IeeeFloat<SingleS>, IeeeFloat<SingleS>, RangeEnd), + F64Range(IeeeFloat<DoubleS>, IeeeFloat<DoubleS>, RangeEnd), + /// String literals. Strings are not quite the same as `&[u8]` so we treat them separately. + Str(Const<'tcx>), + /// Array and slice patterns. + Slice(Slice), + /// Constants that must not be matched structurally. They are treated as black boxes for the + /// purposes of exhaustiveness: we must not inspect them, and they don't count towards making a + /// match exhaustive. + /// Carries an id that must be unique within a match. We need this to ensure the invariants of + /// [`SplitConstructorSet`]. + Opaque(OpaqueId), + /// Or-pattern. + Or, + /// Wildcard pattern. + Wildcard, + /// Fake extra constructor for enums that aren't allowed to be matched exhaustively. Also used + /// for those types for which we cannot list constructors explicitly, like `f64` and `str`. + NonExhaustive, + /// Fake extra constructor for variants that should not be mentioned in diagnostics. + /// We use this for variants behind an unstable gate as well as + /// `#[doc(hidden)]` ones. + Hidden, + /// Fake extra constructor for constructors that are not seen in the matrix, as explained at the + /// top of the file. + Missing, +} + +impl<'tcx> Constructor<'tcx> { + pub(crate) fn is_non_exhaustive(&self) -> bool { + matches!(self, NonExhaustive) + } + + pub(crate) fn as_variant(&self) -> Option<VariantIdx> { + match self { + Variant(i) => Some(*i), + _ => None, + } + } + fn as_bool(&self) -> Option<bool> { + match self { + Bool(b) => Some(*b), + _ => None, + } + } + pub(crate) fn as_int_range(&self) -> Option<&IntRange> { + match self { + IntRange(range) => Some(range), + _ => None, + } + } + fn as_slice(&self) -> Option<Slice> { + match self { + Slice(slice) => Some(*slice), + _ => None, + } + } + + /// The number of fields for this constructor. This must be kept in sync with + /// `Fields::wildcards`. + pub(crate) fn arity(&self, pcx: &PatCtxt<'_, '_, 'tcx>) -> usize { + pcx.cx.ctor_arity(self, pcx.ty) + } + + /// Returns whether `self` is covered by `other`, i.e. whether `self` is a subset of `other`. + /// For the simple cases, this is simply checking for equality. For the "grouped" constructors, + /// this checks for inclusion. + // We inline because this has a single call site in `Matrix::specialize_constructor`. + #[inline] + pub(crate) fn is_covered_by<'p>(&self, pcx: &PatCtxt<'_, 'p, 'tcx>, other: &Self) -> bool { + match (self, other) { + (Wildcard, _) => { + span_bug!( + pcx.cx.scrut_span, + "Constructor splitting should not have returned `Wildcard`" + ) + } + // Wildcards cover anything + (_, Wildcard) => true, + // Only a wildcard pattern can match these special constructors. + (Missing { .. } | NonExhaustive | Hidden, _) => false, + + (Single, Single) => true, + (Variant(self_id), Variant(other_id)) => self_id == other_id, + (Bool(self_b), Bool(other_b)) => self_b == other_b, + + (IntRange(self_range), IntRange(other_range)) => self_range.is_subrange(other_range), + (F32Range(self_from, self_to, self_end), F32Range(other_from, other_to, other_end)) => { + self_from.ge(other_from) + && match self_to.partial_cmp(other_to) { + Some(Ordering::Less) => true, + Some(Ordering::Equal) => other_end == self_end, + _ => false, + } + } + (F64Range(self_from, self_to, self_end), F64Range(other_from, other_to, other_end)) => { + self_from.ge(other_from) + && match self_to.partial_cmp(other_to) { + Some(Ordering::Less) => true, + Some(Ordering::Equal) => other_end == self_end, + _ => false, + } + } + (Str(self_val), Str(other_val)) => { + // FIXME Once valtrees are available we can directly use the bytes + // in the `Str` variant of the valtree for the comparison here. + self_val == other_val + } + (Slice(self_slice), Slice(other_slice)) => self_slice.is_covered_by(*other_slice), + + // Opaque constructors don't interact with anything unless they come from the + // syntactically identical pattern. + (Opaque(self_id), Opaque(other_id)) => self_id == other_id, + (Opaque(..), _) | (_, Opaque(..)) => false, + + _ => span_bug!( + pcx.cx.scrut_span, + "trying to compare incompatible constructors {:?} and {:?}", + self, + other + ), + } + } +} + +#[derive(Debug, Clone, Copy)] +pub enum VariantVisibility { + /// Variant that doesn't fit the other cases, i.e. most variants. + Visible, + /// Variant behind an unstable gate or with the `#[doc(hidden)]` attribute. It will not be + /// mentioned in diagnostics unless the user mentioned it first. + Hidden, + /// Variant that matches no value. E.g. `Some::<Option<!>>` if the `exhaustive_patterns` feature + /// is enabled. Like `Hidden`, it will not be mentioned in diagnostics unless the user mentioned + /// it first. + Empty, +} + +/// Describes the set of all constructors for a type. For details, in particular about the emptiness +/// of constructors, see the top of the file. +/// +/// In terms of division of responsibility, [`ConstructorSet::split`] handles all of the +/// `exhaustive_patterns` feature. +#[derive(Debug)] +pub enum ConstructorSet { + /// The type has a single constructor, e.g. `&T` or a struct. `empty` tracks whether the + /// constructor is empty. + Single { empty: bool }, + /// This type has the following list of constructors. If `variants` is empty and + /// `non_exhaustive` is false, don't use this; use `NoConstructors` instead. + Variants { variants: IndexVec<VariantIdx, VariantVisibility>, non_exhaustive: bool }, + /// Booleans. + Bool, + /// The type is spanned by integer values. The range or ranges give the set of allowed values. + /// The second range is only useful for `char`. + Integers { range_1: IntRange, range_2: Option<IntRange> }, + /// The type is matched by slices. `array_len` is the compile-time length of the array, if + /// known. If `subtype_is_empty`, all constructors are empty except possibly the zero-length + /// slice `[]`. + Slice { array_len: Option<usize>, subtype_is_empty: bool }, + /// The constructors cannot be listed, and the type cannot be matched exhaustively. E.g. `str`, + /// floats. + Unlistable, + /// The type has no constructors (not even empty ones). This is `!` and empty enums. + NoConstructors, +} + +/// Describes the result of analyzing the constructors in a column of a match. +/// +/// `present` is morally the set of constructors present in the column, and `missing` is the set of +/// constructors that exist in the type but are not present in the column. +/// +/// More formally, if we discard wildcards from the column, this respects the following constraints: +/// 1. the union of `present`, `missing` and `missing_empty` covers all the constructors of the type +/// 2. each constructor in `present` is covered by something in the column +/// 3. no constructor in `missing` or `missing_empty` is covered by anything in the column +/// 4. each constructor in the column is equal to the union of one or more constructors in `present` +/// 5. `missing` does not contain empty constructors (see discussion about emptiness at the top of +/// the file); +/// 6. `missing_empty` contains only empty constructors +/// 7. constructors in `present`, `missing` and `missing_empty` are split for the column; in other +/// words, they are either fully included in or fully disjoint from each constructor in the +/// column. In yet other words, there are no non-trivial intersections like between `0..10` and +/// `5..15`. +/// +/// We must be particularly careful with weird constructors like `Opaque`: they're not formally part +/// of the `ConstructorSet` for the type, yet if we forgot to include them in `present` we would be +/// ignoring any row with `Opaque`s in the algorithm. Hence the importance of point 4. +#[derive(Debug)] +pub(crate) struct SplitConstructorSet<'tcx> { + pub(crate) present: SmallVec<[Constructor<'tcx>; 1]>, + pub(crate) missing: Vec<Constructor<'tcx>>, + pub(crate) missing_empty: Vec<Constructor<'tcx>>, +} + +impl ConstructorSet { + /// This analyzes a column of constructors to 1/ determine which constructors of the type (if + /// any) are missing; 2/ split constructors to handle non-trivial intersections e.g. on ranges + /// or slices. This can get subtle; see [`SplitConstructorSet`] for details of this operation + /// and its invariants. + #[instrument(level = "debug", skip(self, pcx, ctors), ret)] + pub(crate) fn split<'a, 'tcx>( + &self, + pcx: &PatCtxt<'_, '_, 'tcx>, + ctors: impl Iterator<Item = &'a Constructor<'tcx>> + Clone, + ) -> SplitConstructorSet<'tcx> + where + 'tcx: 'a, + { + let mut present: SmallVec<[_; 1]> = SmallVec::new(); + // Empty constructors found missing. + let mut missing_empty = Vec::new(); + // Nonempty constructors found missing. + let mut missing = Vec::new(); + // Constructors in `ctors`, except wildcards and opaques. + let mut seen = Vec::new(); + for ctor in ctors.cloned() { + match ctor { + Opaque(..) => present.push(ctor), + Wildcard => {} // discard wildcards + _ => seen.push(ctor), + } + } + + match self { + ConstructorSet::Single { empty } => { + if !seen.is_empty() { + present.push(Single); + } else if *empty { + missing_empty.push(Single); + } else { + missing.push(Single); + } + } + ConstructorSet::Variants { variants, non_exhaustive } => { + let seen_set: FxHashSet<_> = seen.iter().map(|c| c.as_variant().unwrap()).collect(); + let mut skipped_a_hidden_variant = false; + + for (idx, visibility) in variants.iter_enumerated() { + let ctor = Variant(idx); + if seen_set.contains(&idx) { + present.push(ctor); + } else { + // We only put visible variants directly into `missing`. + match visibility { + VariantVisibility::Visible => missing.push(ctor), + VariantVisibility::Hidden => skipped_a_hidden_variant = true, + VariantVisibility::Empty => missing_empty.push(ctor), + } + } + } + + if skipped_a_hidden_variant { + missing.push(Hidden); + } + if *non_exhaustive { + missing.push(NonExhaustive); + } + } + ConstructorSet::Bool => { + let mut seen_false = false; + let mut seen_true = false; + for b in seen.iter().map(|ctor| ctor.as_bool().unwrap()) { + if b { + seen_true = true; + } else { + seen_false = true; + } + } + if seen_false { + present.push(Bool(false)); + } else { + missing.push(Bool(false)); + } + if seen_true { + present.push(Bool(true)); + } else { + missing.push(Bool(true)); + } + } + ConstructorSet::Integers { range_1, range_2 } => { + let seen_ranges: Vec<_> = + seen.iter().map(|ctor| ctor.as_int_range().unwrap().clone()).collect(); + for (seen, splitted_range) in range_1.split(seen_ranges.iter().cloned()) { + match seen { + Presence::Unseen => missing.push(IntRange(splitted_range)), + Presence::Seen => present.push(IntRange(splitted_range)), + } + } + if let Some(range_2) = range_2 { + for (seen, splitted_range) in range_2.split(seen_ranges.into_iter()) { + match seen { + Presence::Unseen => missing.push(IntRange(splitted_range)), + Presence::Seen => present.push(IntRange(splitted_range)), + } + } + } + } + ConstructorSet::Slice { array_len, subtype_is_empty } => { + let seen_slices = seen.iter().map(|c| c.as_slice().unwrap()); + let base_slice = Slice::new(*array_len, VarLen(0, 0)); + for (seen, splitted_slice) in base_slice.split(seen_slices) { + let ctor = Slice(splitted_slice); + match seen { + Presence::Seen => present.push(ctor), + Presence::Unseen => { + if *subtype_is_empty && splitted_slice.arity() != 0 { + // We have subpatterns of an empty type, so the constructor is + // empty. + missing_empty.push(ctor); + } else { + missing.push(ctor); + } + } + } + } + } + ConstructorSet::Unlistable => { + // Since we can't list constructors, we take the ones in the column. This might list + // some constructors several times but there's not much we can do. + present.extend(seen); + missing.push(NonExhaustive); + } + ConstructorSet::NoConstructors => { + // In a `MaybeInvalid` place even an empty pattern may be reachable. We therefore + // add a dummy empty constructor here, which will be ignored if the place is + // `ValidOnly`. + missing_empty.push(NonExhaustive); + } + } + + // We have now grouped all the constructors into 3 buckets: present, missing, missing_empty. + // In the absence of the `exhaustive_patterns` feature however, we don't count nested empty + // types as empty. Only non-nested `!` or `enum Foo {}` are considered empty. + if !pcx.cx.tcx.features().exhaustive_patterns + && !(pcx.is_top_level && matches!(self, Self::NoConstructors)) + { + // Treat all missing constructors as nonempty. + missing.extend(missing_empty.drain(..)); + } + + SplitConstructorSet { present, missing, missing_empty } + } +} diff --git a/compiler/rustc_pattern_analysis/src/cx.rs b/compiler/rustc_pattern_analysis/src/cx.rs new file mode 100644 index 00000000000..8a4f39a1f4a --- /dev/null +++ b/compiler/rustc_pattern_analysis/src/cx.rs @@ -0,0 +1,856 @@ +use std::fmt; +use std::iter::once; + +use rustc_arena::TypedArena; +use rustc_data_structures::captures::Captures; +use rustc_hir::def_id::DefId; +use rustc_hir::{HirId, RangeEnd}; +use rustc_index::Idx; +use rustc_index::IndexVec; +use rustc_middle::middle::stability::EvalResult; +use rustc_middle::mir; +use rustc_middle::mir::interpret::Scalar; +use rustc_middle::thir::{FieldPat, Pat, PatKind, PatRange, PatRangeBoundary}; +use rustc_middle::ty::layout::IntegerExt; +use rustc_middle::ty::{self, Ty, TyCtxt, VariantDef}; +use rustc_span::{Span, DUMMY_SP}; +use rustc_target::abi::{FieldIdx, Integer, VariantIdx, FIRST_VARIANT}; +use smallvec::SmallVec; + +use crate::constructor::{ + Constructor, ConstructorSet, IntRange, MaybeInfiniteInt, OpaqueId, Slice, SliceKind, + VariantVisibility, +}; +use crate::pat::{DeconstructedPat, WitnessPat}; + +use Constructor::*; + +pub struct MatchCheckCtxt<'p, 'tcx> { + pub tcx: TyCtxt<'tcx>, + /// The module in which the match occurs. This is necessary for + /// checking inhabited-ness of types because whether a type is (visibly) + /// inhabited can depend on whether it was defined in the current module or + /// not. E.g., `struct Foo { _private: ! }` cannot be seen to be empty + /// outside its module and should not be matchable with an empty match statement. + pub module: DefId, + pub param_env: ty::ParamEnv<'tcx>, + pub pattern_arena: &'p TypedArena<DeconstructedPat<'p, 'tcx>>, + /// Lint level at the match. + pub match_lint_level: HirId, + /// The span of the whole match, if applicable. + pub whole_match_span: Option<Span>, + /// Span of the scrutinee. + pub scrut_span: Span, + /// Only produce `NON_EXHAUSTIVE_OMITTED_PATTERNS` lint on refutable patterns. + pub refutable: bool, + /// Whether the data at the scrutinee is known to be valid. This is false if the scrutinee comes + /// from a union field, a pointer deref, or a reference deref (pending opsem decisions). + pub known_valid_scrutinee: bool, +} + +impl<'p, 'tcx> MatchCheckCtxt<'p, 'tcx> { + pub(super) fn is_uninhabited(&self, ty: Ty<'tcx>) -> bool { + !ty.is_inhabited_from(self.tcx, self.module, self.param_env) + } + + /// Returns whether the given type is an enum from another crate declared `#[non_exhaustive]`. + pub fn is_foreign_non_exhaustive_enum(&self, ty: Ty<'tcx>) -> bool { + match ty.kind() { + ty::Adt(def, ..) => { + def.is_enum() && def.is_variant_list_non_exhaustive() && !def.did().is_local() + } + _ => false, + } + } + + pub(crate) fn alloc_wildcard_slice( + &self, + tys: impl IntoIterator<Item = Ty<'tcx>>, + ) -> &'p [DeconstructedPat<'p, 'tcx>] { + self.pattern_arena + .alloc_from_iter(tys.into_iter().map(|ty| DeconstructedPat::wildcard(ty, DUMMY_SP))) + } + + // In the cases of either a `#[non_exhaustive]` field list or a non-public field, we hide + // uninhabited fields in order not to reveal the uninhabitedness of the whole variant. + // This lists the fields we keep along with their types. + pub(crate) fn list_variant_nonhidden_fields<'a>( + &'a self, + ty: Ty<'tcx>, + variant: &'a VariantDef, + ) -> impl Iterator<Item = (FieldIdx, Ty<'tcx>)> + Captures<'p> + Captures<'a> { + let cx = self; + let ty::Adt(adt, args) = ty.kind() else { bug!() }; + // Whether we must not match the fields of this variant exhaustively. + let is_non_exhaustive = variant.is_field_list_non_exhaustive() && !adt.did().is_local(); + + variant.fields.iter().enumerate().filter_map(move |(i, field)| { + let ty = field.ty(cx.tcx, args); + // `field.ty()` doesn't normalize after substituting. + let ty = cx.tcx.normalize_erasing_regions(cx.param_env, ty); + let is_visible = adt.is_enum() || field.vis.is_accessible_from(cx.module, cx.tcx); + let is_uninhabited = cx.tcx.features().exhaustive_patterns && cx.is_uninhabited(ty); + + if is_uninhabited && (!is_visible || is_non_exhaustive) { + None + } else { + Some((FieldIdx::new(i), ty)) + } + }) + } + + pub(crate) fn variant_index_for_adt( + ctor: &Constructor<'tcx>, + adt: ty::AdtDef<'tcx>, + ) -> VariantIdx { + match *ctor { + Variant(idx) => idx, + Single => { + assert!(!adt.is_enum()); + FIRST_VARIANT + } + _ => bug!("bad constructor {:?} for adt {:?}", ctor, adt), + } + } + + /// Creates a new list of wildcard fields for a given constructor. The result must have a length + /// of `ctor.arity()`. + #[instrument(level = "trace", skip(self))] + pub(crate) fn ctor_wildcard_fields( + &self, + ctor: &Constructor<'tcx>, + ty: Ty<'tcx>, + ) -> &'p [DeconstructedPat<'p, 'tcx>] { + let cx = self; + match ctor { + Single | Variant(_) => match ty.kind() { + ty::Tuple(fs) => cx.alloc_wildcard_slice(fs.iter()), + ty::Ref(_, rty, _) => cx.alloc_wildcard_slice(once(*rty)), + ty::Adt(adt, args) => { + if adt.is_box() { + // The only legal patterns of type `Box` (outside `std`) are `_` and box + // patterns. If we're here we can assume this is a box pattern. + cx.alloc_wildcard_slice(once(args.type_at(0))) + } else { + let variant = + &adt.variant(MatchCheckCtxt::variant_index_for_adt(&ctor, *adt)); + let tys = cx.list_variant_nonhidden_fields(ty, variant).map(|(_, ty)| ty); + cx.alloc_wildcard_slice(tys) + } + } + _ => bug!("Unexpected type for `Single` constructor: {:?}", ty), + }, + Slice(slice) => match *ty.kind() { + ty::Slice(ty) | ty::Array(ty, _) => { + let arity = slice.arity(); + cx.alloc_wildcard_slice((0..arity).map(|_| ty)) + } + _ => bug!("bad slice pattern {:?} {:?}", ctor, ty), + }, + Bool(..) + | IntRange(..) + | F32Range(..) + | F64Range(..) + | Str(..) + | Opaque(..) + | NonExhaustive + | Hidden + | Missing { .. } + | Wildcard => &[], + Or => { + bug!("called `Fields::wildcards` on an `Or` ctor") + } + } + } + + /// The number of fields for this constructor. This must be kept in sync with + /// `Fields::wildcards`. + pub(crate) fn ctor_arity(&self, ctor: &Constructor<'tcx>, ty: Ty<'tcx>) -> usize { + match ctor { + Single | Variant(_) => match ty.kind() { + ty::Tuple(fs) => fs.len(), + ty::Ref(..) => 1, + ty::Adt(adt, ..) => { + if adt.is_box() { + // The only legal patterns of type `Box` (outside `std`) are `_` and box + // patterns. If we're here we can assume this is a box pattern. + 1 + } else { + let variant = + &adt.variant(MatchCheckCtxt::variant_index_for_adt(&ctor, *adt)); + self.list_variant_nonhidden_fields(ty, variant).count() + } + } + _ => bug!("Unexpected type for `Single` constructor: {:?}", ty), + }, + Slice(slice) => slice.arity(), + Bool(..) + | IntRange(..) + | F32Range(..) + | F64Range(..) + | Str(..) + | Opaque(..) + | NonExhaustive + | Hidden + | Missing { .. } + | Wildcard => 0, + Or => bug!("The `Or` constructor doesn't have a fixed arity"), + } + } + + /// Creates a set that represents all the constructors of `ty`. + /// + /// See [`crate::constructor`] for considerations of emptiness. + #[instrument(level = "debug", skip(self), ret)] + pub fn ctors_for_ty(&self, ty: Ty<'tcx>) -> ConstructorSet { + let cx = self; + let make_uint_range = |start, end| { + IntRange::from_range( + MaybeInfiniteInt::new_finite_uint(start), + MaybeInfiniteInt::new_finite_uint(end), + RangeEnd::Included, + ) + }; + // This determines the set of all possible constructors for the type `ty`. For numbers, + // arrays and slices we use ranges and variable-length slices when appropriate. + match ty.kind() { + ty::Bool => ConstructorSet::Bool, + ty::Char => { + // The valid Unicode Scalar Value ranges. + ConstructorSet::Integers { + range_1: make_uint_range('\u{0000}' as u128, '\u{D7FF}' as u128), + range_2: Some(make_uint_range('\u{E000}' as u128, '\u{10FFFF}' as u128)), + } + } + &ty::Int(ity) => { + let range = if ty.is_ptr_sized_integral() { + // The min/max values of `isize` are not allowed to be observed. + IntRange { + lo: MaybeInfiniteInt::NegInfinity, + hi: MaybeInfiniteInt::PosInfinity, + } + } else { + let size = Integer::from_int_ty(&cx.tcx, ity).size().bits(); + let min = 1u128 << (size - 1); + let max = min - 1; + let min = MaybeInfiniteInt::new_finite_int(min, size); + let max = MaybeInfiniteInt::new_finite_int(max, size); + IntRange::from_range(min, max, RangeEnd::Included) + }; + ConstructorSet::Integers { range_1: range, range_2: None } + } + &ty::Uint(uty) => { + let range = if ty.is_ptr_sized_integral() { + // The max value of `usize` is not allowed to be observed. + let lo = MaybeInfiniteInt::new_finite_uint(0); + IntRange { lo, hi: MaybeInfiniteInt::PosInfinity } + } else { + let size = Integer::from_uint_ty(&cx.tcx, uty).size(); + let max = size.truncate(u128::MAX); + make_uint_range(0, max) + }; + ConstructorSet::Integers { range_1: range, range_2: None } + } + ty::Slice(sub_ty) => ConstructorSet::Slice { + array_len: None, + subtype_is_empty: cx.is_uninhabited(*sub_ty), + }, + ty::Array(sub_ty, len) => { + // We treat arrays of a constant but unknown length like slices. + ConstructorSet::Slice { + array_len: len.try_eval_target_usize(cx.tcx, cx.param_env).map(|l| l as usize), + subtype_is_empty: cx.is_uninhabited(*sub_ty), + } + } + ty::Adt(def, args) if def.is_enum() => { + let is_declared_nonexhaustive = cx.is_foreign_non_exhaustive_enum(ty); + if def.variants().is_empty() && !is_declared_nonexhaustive { + ConstructorSet::NoConstructors + } else { + let mut variants = + IndexVec::from_elem(VariantVisibility::Visible, def.variants()); + for (idx, v) in def.variants().iter_enumerated() { + let variant_def_id = def.variant(idx).def_id; + // Visibly uninhabited variants. + let is_inhabited = v + .inhabited_predicate(cx.tcx, *def) + .instantiate(cx.tcx, args) + .apply(cx.tcx, cx.param_env, cx.module); + // Variants that depend on a disabled unstable feature. + let is_unstable = matches!( + cx.tcx.eval_stability(variant_def_id, None, DUMMY_SP, None), + EvalResult::Deny { .. } + ); + // Foreign `#[doc(hidden)]` variants. + let is_doc_hidden = + cx.tcx.is_doc_hidden(variant_def_id) && !variant_def_id.is_local(); + let visibility = if !is_inhabited { + // FIXME: handle empty+hidden + VariantVisibility::Empty + } else if is_unstable || is_doc_hidden { + VariantVisibility::Hidden + } else { + VariantVisibility::Visible + }; + variants[idx] = visibility; + } + + ConstructorSet::Variants { variants, non_exhaustive: is_declared_nonexhaustive } + } + } + ty::Adt(..) | ty::Tuple(..) | ty::Ref(..) => { + ConstructorSet::Single { empty: cx.is_uninhabited(ty) } + } + ty::Never => ConstructorSet::NoConstructors, + // This type is one for which we cannot list constructors, like `str` or `f64`. + // FIXME(Nadrieril): which of these are actually allowed? + ty::Float(_) + | ty::Str + | ty::Foreign(_) + | ty::RawPtr(_) + | ty::FnDef(_, _) + | ty::FnPtr(_) + | ty::Dynamic(_, _, _) + | ty::Closure(_, _) + | ty::Coroutine(_, _, _) + | ty::Alias(_, _) + | ty::Param(_) + | ty::Error(_) => ConstructorSet::Unlistable, + ty::CoroutineWitness(_, _) | ty::Bound(_, _) | ty::Placeholder(_) | ty::Infer(_) => { + bug!("Encountered unexpected type in `ConstructorSet::for_ty`: {ty:?}") + } + } + } + + pub(crate) fn lower_pat_range_bdy( + &self, + bdy: PatRangeBoundary<'tcx>, + ty: Ty<'tcx>, + ) -> MaybeInfiniteInt { + match bdy { + PatRangeBoundary::NegInfinity => MaybeInfiniteInt::NegInfinity, + PatRangeBoundary::Finite(value) => { + let bits = value.eval_bits(self.tcx, self.param_env); + match *ty.kind() { + ty::Int(ity) => { + let size = Integer::from_int_ty(&self.tcx, ity).size().bits(); + MaybeInfiniteInt::new_finite_int(bits, size) + } + _ => MaybeInfiniteInt::new_finite_uint(bits), + } + } + PatRangeBoundary::PosInfinity => MaybeInfiniteInt::PosInfinity, + } + } + + /// Note: the input patterns must have been lowered through + /// `rustc_mir_build::thir::pattern::check_match::MatchVisitor::lower_pattern`. + pub fn lower_pat(&self, pat: &Pat<'tcx>) -> DeconstructedPat<'p, 'tcx> { + let singleton = |pat| std::slice::from_ref(self.pattern_arena.alloc(pat)); + let cx = self; + let ctor; + let fields: &[_]; + match &pat.kind { + PatKind::AscribeUserType { subpattern, .. } + | PatKind::InlineConstant { subpattern, .. } => return self.lower_pat(subpattern), + PatKind::Binding { subpattern: Some(subpat), .. } => return self.lower_pat(subpat), + PatKind::Binding { subpattern: None, .. } | PatKind::Wild => { + ctor = Wildcard; + fields = &[]; + } + PatKind::Deref { subpattern } => { + ctor = Single; + fields = singleton(self.lower_pat(subpattern)); + } + PatKind::Leaf { subpatterns } | PatKind::Variant { subpatterns, .. } => { + match pat.ty.kind() { + ty::Tuple(fs) => { + ctor = Single; + let mut wilds: SmallVec<[_; 2]> = + fs.iter().map(|ty| DeconstructedPat::wildcard(ty, pat.span)).collect(); + for pat in subpatterns { + wilds[pat.field.index()] = self.lower_pat(&pat.pattern); + } + fields = cx.pattern_arena.alloc_from_iter(wilds); + } + ty::Adt(adt, args) if adt.is_box() => { + // The only legal patterns of type `Box` (outside `std`) are `_` and box + // patterns. If we're here we can assume this is a box pattern. + // FIXME(Nadrieril): A `Box` can in theory be matched either with `Box(_, + // _)` or a box pattern. As a hack to avoid an ICE with the former, we + // ignore other fields than the first one. This will trigger an error later + // anyway. + // See https://github.com/rust-lang/rust/issues/82772 , + // explanation: https://github.com/rust-lang/rust/pull/82789#issuecomment-796921977 + // The problem is that we can't know from the type whether we'll match + // normally or through box-patterns. We'll have to figure out a proper + // solution when we introduce generalized deref patterns. Also need to + // prevent mixing of those two options. + let pattern = subpatterns.into_iter().find(|pat| pat.field.index() == 0); + let pat = if let Some(pat) = pattern { + self.lower_pat(&pat.pattern) + } else { + DeconstructedPat::wildcard(args.type_at(0), pat.span) + }; + ctor = Single; + fields = singleton(pat); + } + ty::Adt(adt, _) => { + ctor = match pat.kind { + PatKind::Leaf { .. } => Single, + PatKind::Variant { variant_index, .. } => Variant(variant_index), + _ => bug!(), + }; + let variant = + &adt.variant(MatchCheckCtxt::variant_index_for_adt(&ctor, *adt)); + // For each field in the variant, we store the relevant index into `self.fields` if any. + let mut field_id_to_id: Vec<Option<usize>> = + (0..variant.fields.len()).map(|_| None).collect(); + let tys = cx + .list_variant_nonhidden_fields(pat.ty, variant) + .enumerate() + .map(|(i, (field, ty))| { + field_id_to_id[field.index()] = Some(i); + ty + }); + let mut wilds: SmallVec<[_; 2]> = + tys.map(|ty| DeconstructedPat::wildcard(ty, pat.span)).collect(); + for pat in subpatterns { + if let Some(i) = field_id_to_id[pat.field.index()] { + wilds[i] = self.lower_pat(&pat.pattern); + } + } + fields = cx.pattern_arena.alloc_from_iter(wilds); + } + _ => bug!("pattern has unexpected type: pat: {:?}, ty: {:?}", pat, pat.ty), + } + } + PatKind::Constant { value } => { + match pat.ty.kind() { + ty::Bool => { + ctor = match value.try_eval_bool(cx.tcx, cx.param_env) { + Some(b) => Bool(b), + None => Opaque(OpaqueId::new()), + }; + fields = &[]; + } + ty::Char | ty::Int(_) | ty::Uint(_) => { + ctor = match value.try_eval_bits(cx.tcx, cx.param_env) { + Some(bits) => { + let x = match *pat.ty.kind() { + ty::Int(ity) => { + let size = Integer::from_int_ty(&cx.tcx, ity).size().bits(); + MaybeInfiniteInt::new_finite_int(bits, size) + } + _ => MaybeInfiniteInt::new_finite_uint(bits), + }; + IntRange(IntRange::from_singleton(x)) + } + None => Opaque(OpaqueId::new()), + }; + fields = &[]; + } + ty::Float(ty::FloatTy::F32) => { + ctor = match value.try_eval_bits(cx.tcx, cx.param_env) { + Some(bits) => { + use rustc_apfloat::Float; + let value = rustc_apfloat::ieee::Single::from_bits(bits); + F32Range(value, value, RangeEnd::Included) + } + None => Opaque(OpaqueId::new()), + }; + fields = &[]; + } + ty::Float(ty::FloatTy::F64) => { + ctor = match value.try_eval_bits(cx.tcx, cx.param_env) { + Some(bits) => { + use rustc_apfloat::Float; + let value = rustc_apfloat::ieee::Double::from_bits(bits); + F64Range(value, value, RangeEnd::Included) + } + None => Opaque(OpaqueId::new()), + }; + fields = &[]; + } + ty::Ref(_, t, _) if t.is_str() => { + // We want a `&str` constant to behave like a `Deref` pattern, to be compatible + // with other `Deref` patterns. This could have been done in `const_to_pat`, + // but that causes issues with the rest of the matching code. + // So here, the constructor for a `"foo"` pattern is `&` (represented by + // `Single`), and has one field. That field has constructor `Str(value)` and no + // fields. + // Note: `t` is `str`, not `&str`. + let subpattern = DeconstructedPat::new(Str(*value), &[], *t, pat.span); + ctor = Single; + fields = singleton(subpattern) + } + // All constants that can be structurally matched have already been expanded + // into the corresponding `Pat`s by `const_to_pat`. Constants that remain are + // opaque. + _ => { + ctor = Opaque(OpaqueId::new()); + fields = &[]; + } + } + } + PatKind::Range(patrange) => { + let PatRange { lo, hi, end, .. } = patrange.as_ref(); + let ty = pat.ty; + ctor = match ty.kind() { + ty::Char | ty::Int(_) | ty::Uint(_) => { + let lo = cx.lower_pat_range_bdy(*lo, ty); + let hi = cx.lower_pat_range_bdy(*hi, ty); + IntRange(IntRange::from_range(lo, hi, *end)) + } + ty::Float(fty) => { + use rustc_apfloat::Float; + let lo = lo.as_finite().map(|c| c.eval_bits(cx.tcx, cx.param_env)); + let hi = hi.as_finite().map(|c| c.eval_bits(cx.tcx, cx.param_env)); + match fty { + ty::FloatTy::F32 => { + use rustc_apfloat::ieee::Single; + let lo = lo.map(Single::from_bits).unwrap_or(-Single::INFINITY); + let hi = hi.map(Single::from_bits).unwrap_or(Single::INFINITY); + F32Range(lo, hi, *end) + } + ty::FloatTy::F64 => { + use rustc_apfloat::ieee::Double; + let lo = lo.map(Double::from_bits).unwrap_or(-Double::INFINITY); + let hi = hi.map(Double::from_bits).unwrap_or(Double::INFINITY); + F64Range(lo, hi, *end) + } + } + } + _ => bug!("invalid type for range pattern: {}", ty), + }; + fields = &[]; + } + PatKind::Array { prefix, slice, suffix } | PatKind::Slice { prefix, slice, suffix } => { + let array_len = match pat.ty.kind() { + ty::Array(_, length) => { + Some(length.eval_target_usize(cx.tcx, cx.param_env) as usize) + } + ty::Slice(_) => None, + _ => span_bug!(pat.span, "bad ty {:?} for slice pattern", pat.ty), + }; + let kind = if slice.is_some() { + SliceKind::VarLen(prefix.len(), suffix.len()) + } else { + SliceKind::FixedLen(prefix.len() + suffix.len()) + }; + ctor = Slice(Slice::new(array_len, kind)); + fields = cx.pattern_arena.alloc_from_iter( + prefix.iter().chain(suffix.iter()).map(|p| self.lower_pat(&*p)), + ) + } + PatKind::Or { .. } => { + ctor = Or; + let pats = expand_or_pat(pat); + fields = + cx.pattern_arena.alloc_from_iter(pats.into_iter().map(|p| self.lower_pat(p))) + } + PatKind::Never => { + // FIXME(never_patterns): handle `!` in exhaustiveness. This is a sane default + // in the meantime. + ctor = Wildcard; + fields = &[]; + } + PatKind::Error(_) => { + ctor = Opaque(OpaqueId::new()); + fields = &[]; + } + } + DeconstructedPat::new(ctor, fields, pat.ty, pat.span) + } + + /// Convert back to a `thir::PatRangeBoundary` for diagnostic purposes. + /// Note: it is possible to get `isize/usize::MAX+1` here, as explained in the doc for + /// [`IntRange::split`]. This cannot be represented as a `Const`, so we represent it with + /// `PosInfinity`. + pub(crate) fn hoist_pat_range_bdy( + &self, + miint: MaybeInfiniteInt, + ty: Ty<'tcx>, + ) -> PatRangeBoundary<'tcx> { + use MaybeInfiniteInt::*; + let tcx = self.tcx; + match miint { + NegInfinity => PatRangeBoundary::NegInfinity, + Finite(_) => { + let size = ty.primitive_size(tcx); + let bits = match *ty.kind() { + ty::Int(_) => miint.as_finite_int(size.bits()).unwrap(), + _ => miint.as_finite_uint().unwrap(), + }; + match Scalar::try_from_uint(bits, size) { + Some(scalar) => { + let value = mir::Const::from_scalar(tcx, scalar, ty); + PatRangeBoundary::Finite(value) + } + // The value doesn't fit. Since `x >= 0` and 0 always encodes the minimum value + // for a type, the problem isn't that the value is too small. So it must be too + // large. + None => PatRangeBoundary::PosInfinity, + } + } + JustAfterMax | PosInfinity => PatRangeBoundary::PosInfinity, + } + } + + /// Whether the range denotes the fictitious values before `isize::MIN` or after + /// `usize::MAX`/`isize::MAX` (see doc of [`IntRange::split`] for why these exist). + pub fn is_range_beyond_boundaries(&self, range: &IntRange, ty: Ty<'tcx>) -> bool { + ty.is_ptr_sized_integral() && { + // The two invalid ranges are `NegInfinity..isize::MIN` (represented as + // `NegInfinity..0`), and `{u,i}size::MAX+1..PosInfinity`. `hoist_pat_range_bdy` + // converts `MAX+1` to `PosInfinity`, and we couldn't have `PosInfinity` in `range.lo` + // otherwise. + let lo = self.hoist_pat_range_bdy(range.lo, ty); + matches!(lo, PatRangeBoundary::PosInfinity) + || matches!(range.hi, MaybeInfiniteInt::Finite(0)) + } + } + + /// Convert back to a `thir::Pat` for diagnostic purposes. + pub(crate) fn hoist_pat_range(&self, range: &IntRange, ty: Ty<'tcx>) -> Pat<'tcx> { + use MaybeInfiniteInt::*; + let cx = self; + let kind = if matches!((range.lo, range.hi), (NegInfinity, PosInfinity)) { + PatKind::Wild + } else if range.is_singleton() { + let lo = cx.hoist_pat_range_bdy(range.lo, ty); + let value = lo.as_finite().unwrap(); + PatKind::Constant { value } + } else { + // We convert to an inclusive range for diagnostics. + let mut end = RangeEnd::Included; + let mut lo = cx.hoist_pat_range_bdy(range.lo, ty); + if matches!(lo, PatRangeBoundary::PosInfinity) { + // The only reason to get `PosInfinity` here is the special case where + // `hoist_pat_range_bdy` found `{u,i}size::MAX+1`. So the range denotes the + // fictitious values after `{u,i}size::MAX` (see [`IntRange::split`] for why we do + // this). We show this to the user as `usize::MAX..` which is slightly incorrect but + // probably clear enough. + let c = ty.numeric_max_val(cx.tcx).unwrap(); + let value = mir::Const::from_ty_const(c, cx.tcx); + lo = PatRangeBoundary::Finite(value); + } + let hi = if matches!(range.hi, Finite(0)) { + // The range encodes `..ty::MIN`, so we can't convert it to an inclusive range. + end = RangeEnd::Excluded; + range.hi + } else { + range.hi.minus_one() + }; + let hi = cx.hoist_pat_range_bdy(hi, ty); + PatKind::Range(Box::new(PatRange { lo, hi, end, ty })) + }; + + Pat { ty, span: DUMMY_SP, kind } + } + /// Convert back to a `thir::Pat` for diagnostic purposes. This panics for patterns that don't + /// appear in diagnostics, like float ranges. + pub fn hoist_witness_pat(&self, pat: &WitnessPat<'tcx>) -> Pat<'tcx> { + let cx = self; + let is_wildcard = |pat: &Pat<'_>| matches!(pat.kind, PatKind::Wild); + let mut subpatterns = pat.iter_fields().map(|p| Box::new(cx.hoist_witness_pat(p))); + let kind = match pat.ctor() { + Bool(b) => PatKind::Constant { value: mir::Const::from_bool(cx.tcx, *b) }, + IntRange(range) => return self.hoist_pat_range(range, pat.ty()), + Single | Variant(_) => match pat.ty().kind() { + ty::Tuple(..) => PatKind::Leaf { + subpatterns: subpatterns + .enumerate() + .map(|(i, pattern)| FieldPat { field: FieldIdx::new(i), pattern }) + .collect(), + }, + ty::Adt(adt_def, _) if adt_def.is_box() => { + // Without `box_patterns`, the only legal pattern of type `Box` is `_` (outside + // of `std`). So this branch is only reachable when the feature is enabled and + // the pattern is a box pattern. + PatKind::Deref { subpattern: subpatterns.next().unwrap() } + } + ty::Adt(adt_def, args) => { + let variant_index = + MatchCheckCtxt::variant_index_for_adt(&pat.ctor(), *adt_def); + let variant = &adt_def.variant(variant_index); + let subpatterns = cx + .list_variant_nonhidden_fields(pat.ty(), variant) + .zip(subpatterns) + .map(|((field, _ty), pattern)| FieldPat { field, pattern }) + .collect(); + + if adt_def.is_enum() { + PatKind::Variant { adt_def: *adt_def, args, variant_index, subpatterns } + } else { + PatKind::Leaf { subpatterns } + } + } + // Note: given the expansion of `&str` patterns done in `expand_pattern`, we should + // be careful to reconstruct the correct constant pattern here. However a string + // literal pattern will never be reported as a non-exhaustiveness witness, so we + // ignore this issue. + ty::Ref(..) => PatKind::Deref { subpattern: subpatterns.next().unwrap() }, + _ => bug!("unexpected ctor for type {:?} {:?}", pat.ctor(), pat.ty()), + }, + Slice(slice) => { + match slice.kind { + SliceKind::FixedLen(_) => PatKind::Slice { + prefix: subpatterns.collect(), + slice: None, + suffix: Box::new([]), + }, + SliceKind::VarLen(prefix, _) => { + let mut subpatterns = subpatterns.peekable(); + let mut prefix: Vec<_> = subpatterns.by_ref().take(prefix).collect(); + if slice.array_len.is_some() { + // Improves diagnostics a bit: if the type is a known-size array, instead + // of reporting `[x, _, .., _, y]`, we prefer to report `[x, .., y]`. + // This is incorrect if the size is not known, since `[_, ..]` captures + // arrays of lengths `>= 1` whereas `[..]` captures any length. + while !prefix.is_empty() && is_wildcard(prefix.last().unwrap()) { + prefix.pop(); + } + while subpatterns.peek().is_some() + && is_wildcard(subpatterns.peek().unwrap()) + { + subpatterns.next(); + } + } + let suffix: Box<[_]> = subpatterns.collect(); + let wild = Pat::wildcard_from_ty(pat.ty()); + PatKind::Slice { + prefix: prefix.into_boxed_slice(), + slice: Some(Box::new(wild)), + suffix, + } + } + } + } + &Str(value) => PatKind::Constant { value }, + Wildcard | NonExhaustive | Hidden => PatKind::Wild, + Missing { .. } => bug!( + "trying to convert a `Missing` constructor into a `Pat`; this is probably a bug, + `Missing` should have been processed in `apply_constructors`" + ), + F32Range(..) | F64Range(..) | Opaque(..) | Or => { + bug!("can't convert to pattern: {:?}", pat) + } + }; + + Pat { ty: pat.ty(), span: DUMMY_SP, kind } + } + + /// Best-effort `Debug` implementation. + pub(crate) fn debug_pat( + f: &mut fmt::Formatter<'_>, + pat: &DeconstructedPat<'p, 'tcx>, + ) -> fmt::Result { + let mut first = true; + let mut start_or_continue = |s| { + if first { + first = false; + "" + } else { + s + } + }; + let mut start_or_comma = || start_or_continue(", "); + + match pat.ctor() { + Single | Variant(_) => match pat.ty().kind() { + ty::Adt(def, _) if def.is_box() => { + // Without `box_patterns`, the only legal pattern of type `Box` is `_` (outside + // of `std`). So this branch is only reachable when the feature is enabled and + // the pattern is a box pattern. + let subpattern = pat.iter_fields().next().unwrap(); + write!(f, "box {subpattern:?}") + } + ty::Adt(..) | ty::Tuple(..) => { + let variant = match pat.ty().kind() { + ty::Adt(adt, _) => Some( + adt.variant(MatchCheckCtxt::variant_index_for_adt(pat.ctor(), *adt)), + ), + ty::Tuple(_) => None, + _ => unreachable!(), + }; + + if let Some(variant) = variant { + write!(f, "{}", variant.name)?; + } + + // Without `cx`, we can't know which field corresponds to which, so we can't + // get the names of the fields. Instead we just display everything as a tuple + // struct, which should be good enough. + write!(f, "(")?; + for p in pat.iter_fields() { + write!(f, "{}", start_or_comma())?; + write!(f, "{p:?}")?; + } + write!(f, ")") + } + // Note: given the expansion of `&str` patterns done in `expand_pattern`, we should + // be careful to detect strings here. However a string literal pattern will never + // be reported as a non-exhaustiveness witness, so we can ignore this issue. + ty::Ref(_, _, mutbl) => { + let subpattern = pat.iter_fields().next().unwrap(); + write!(f, "&{}{:?}", mutbl.prefix_str(), subpattern) + } + _ => write!(f, "_"), + }, + Slice(slice) => { + let mut subpatterns = pat.iter_fields(); + write!(f, "[")?; + match slice.kind { + SliceKind::FixedLen(_) => { + for p in subpatterns { + write!(f, "{}{:?}", start_or_comma(), p)?; + } + } + SliceKind::VarLen(prefix_len, _) => { + for p in subpatterns.by_ref().take(prefix_len) { + write!(f, "{}{:?}", start_or_comma(), p)?; + } + write!(f, "{}", start_or_comma())?; + write!(f, "..")?; + for p in subpatterns { + write!(f, "{}{:?}", start_or_comma(), p)?; + } + } + } + write!(f, "]") + } + Bool(b) => write!(f, "{b}"), + // Best-effort, will render signed ranges incorrectly + IntRange(range) => write!(f, "{range:?}"), + F32Range(lo, hi, end) => write!(f, "{lo}{end}{hi}"), + F64Range(lo, hi, end) => write!(f, "{lo}{end}{hi}"), + Str(value) => write!(f, "{value}"), + Opaque(..) => write!(f, "<constant pattern>"), + Or => { + for pat in pat.iter_fields() { + write!(f, "{}{:?}", start_or_continue(" | "), pat)?; + } + Ok(()) + } + Wildcard | Missing { .. } | NonExhaustive | Hidden => write!(f, "_ : {:?}", pat.ty()), + } + } +} + +/// Recursively expand this pattern into its subpatterns. Only useful for or-patterns. +fn expand_or_pat<'p, 'tcx>(pat: &'p Pat<'tcx>) -> Vec<&'p Pat<'tcx>> { + fn expand<'p, 'tcx>(pat: &'p Pat<'tcx>, vec: &mut Vec<&'p Pat<'tcx>>) { + if let PatKind::Or { pats } = &pat.kind { + for pat in pats.iter() { + expand(pat, vec); + } + } else { + vec.push(pat) + } + } + + let mut pats = Vec::new(); + expand(pat, &mut pats); + pats +} diff --git a/compiler/rustc_pattern_analysis/src/errors.rs b/compiler/rustc_pattern_analysis/src/errors.rs new file mode 100644 index 00000000000..0efa8a0ec08 --- /dev/null +++ b/compiler/rustc_pattern_analysis/src/errors.rs @@ -0,0 +1,95 @@ +use crate::{cx::MatchCheckCtxt, pat::WitnessPat}; + +use rustc_errors::{AddToDiagnostic, Diagnostic, SubdiagnosticMessage}; +use rustc_macros::{LintDiagnostic, Subdiagnostic}; +use rustc_middle::thir::Pat; +use rustc_middle::ty::Ty; +use rustc_span::Span; + +#[derive(Subdiagnostic)] +#[label(pattern_analysis_uncovered)] +pub struct Uncovered<'tcx> { + #[primary_span] + span: Span, + count: usize, + witness_1: Pat<'tcx>, + witness_2: Pat<'tcx>, + witness_3: Pat<'tcx>, + remainder: usize, +} + +impl<'tcx> Uncovered<'tcx> { + pub fn new<'p>( + span: Span, + cx: &MatchCheckCtxt<'p, 'tcx>, + witnesses: Vec<WitnessPat<'tcx>>, + ) -> Self { + let witness_1 = cx.hoist_witness_pat(witnesses.get(0).unwrap()); + Self { + span, + count: witnesses.len(), + // Substitute dummy values if witnesses is smaller than 3. These will never be read. + witness_2: witnesses + .get(1) + .map(|w| cx.hoist_witness_pat(w)) + .unwrap_or_else(|| witness_1.clone()), + witness_3: witnesses + .get(2) + .map(|w| cx.hoist_witness_pat(w)) + .unwrap_or_else(|| witness_1.clone()), + witness_1, + remainder: witnesses.len().saturating_sub(3), + } + } +} + +#[derive(LintDiagnostic)] +#[diag(pattern_analysis_overlapping_range_endpoints)] +#[note] +pub struct OverlappingRangeEndpoints<'tcx> { + #[label] + pub range: Span, + #[subdiagnostic] + pub overlap: Vec<Overlap<'tcx>>, +} + +pub struct Overlap<'tcx> { + pub span: Span, + pub range: Pat<'tcx>, +} + +impl<'tcx> AddToDiagnostic for Overlap<'tcx> { + fn add_to_diagnostic_with<F>(self, diag: &mut Diagnostic, _: F) + where + F: Fn(&mut Diagnostic, SubdiagnosticMessage) -> SubdiagnosticMessage, + { + let Overlap { span, range } = self; + + // FIXME(mejrs) unfortunately `#[derive(LintDiagnostic)]` + // does not support `#[subdiagnostic(eager)]`... + let message = format!("this range overlaps on `{range}`..."); + diag.span_label(span, message); + } +} + +#[derive(LintDiagnostic)] +#[diag(pattern_analysis_non_exhaustive_omitted_pattern)] +#[help] +#[note] +pub(crate) struct NonExhaustiveOmittedPattern<'tcx> { + pub scrut_ty: Ty<'tcx>, + #[subdiagnostic] + pub uncovered: Uncovered<'tcx>, +} + +#[derive(LintDiagnostic)] +#[diag(pattern_analysis_non_exhaustive_omitted_pattern_lint_on_arm)] +#[help] +pub(crate) struct NonExhaustiveOmittedPatternLintOnArm { + #[label] + pub lint_span: Span, + #[suggestion(code = "#[{lint_level}({lint_name})]\n", applicability = "maybe-incorrect")] + pub suggest_lint_on_match: Option<Span>, + pub lint_level: &'static str, + pub lint_name: &'static str, +} diff --git a/compiler/rustc_pattern_analysis/src/lib.rs b/compiler/rustc_pattern_analysis/src/lib.rs new file mode 100644 index 00000000000..07730aa49d3 --- /dev/null +++ b/compiler/rustc_pattern_analysis/src/lib.rs @@ -0,0 +1,56 @@ +//! Analysis of patterns, notably match exhaustiveness checking. + +pub mod constructor; +pub mod cx; +pub mod errors; +pub(crate) mod lints; +pub mod pat; +pub mod usefulness; + +#[macro_use] +extern crate tracing; +#[macro_use] +extern crate rustc_middle; + +rustc_fluent_macro::fluent_messages! { "../messages.ftl" } + +use lints::PatternColumn; +use rustc_hir::HirId; +use rustc_middle::ty::Ty; +use usefulness::{compute_match_usefulness, UsefulnessReport}; + +use crate::cx::MatchCheckCtxt; +use crate::lints::{lint_nonexhaustive_missing_variants, lint_overlapping_range_endpoints}; +use crate::pat::DeconstructedPat; + +/// The arm of a match expression. +#[derive(Clone, Copy, Debug)] +pub struct MatchArm<'p, 'tcx> { + /// The pattern must have been lowered through `check_match::MatchVisitor::lower_pattern`. + pub pat: &'p DeconstructedPat<'p, 'tcx>, + pub hir_id: HirId, + pub has_guard: bool, +} + +/// The entrypoint for this crate. Computes whether a match is exhaustive and which of its arms are +/// useful, and runs some lints. +pub fn analyze_match<'p, 'tcx>( + cx: &MatchCheckCtxt<'p, 'tcx>, + arms: &[MatchArm<'p, 'tcx>], + scrut_ty: Ty<'tcx>, +) -> UsefulnessReport<'p, 'tcx> { + let pat_column = PatternColumn::new(arms); + + let report = compute_match_usefulness(cx, arms, scrut_ty); + + // Lint on ranges that overlap on their endpoints, which is likely a mistake. + lint_overlapping_range_endpoints(cx, &pat_column); + + // Run the non_exhaustive_omitted_patterns lint. Only run on refutable patterns to avoid hitting + // `if let`s. Only run if the match is exhaustive otherwise the error is redundant. + if cx.refutable && report.non_exhaustiveness_witnesses.is_empty() { + lint_nonexhaustive_missing_variants(cx, arms, &pat_column, scrut_ty) + } + + report +} diff --git a/compiler/rustc_pattern_analysis/src/lints.rs b/compiler/rustc_pattern_analysis/src/lints.rs new file mode 100644 index 00000000000..8ab559c9e7a --- /dev/null +++ b/compiler/rustc_pattern_analysis/src/lints.rs @@ -0,0 +1,290 @@ +use smallvec::SmallVec; + +use rustc_data_structures::captures::Captures; +use rustc_middle::ty::{self, Ty}; +use rustc_session::lint; +use rustc_session::lint::builtin::NON_EXHAUSTIVE_OMITTED_PATTERNS; +use rustc_span::Span; + +use crate::constructor::{Constructor, IntRange, MaybeInfiniteInt, SplitConstructorSet}; +use crate::cx::MatchCheckCtxt; +use crate::errors::{ + NonExhaustiveOmittedPattern, NonExhaustiveOmittedPatternLintOnArm, Overlap, + OverlappingRangeEndpoints, Uncovered, +}; +use crate::pat::{DeconstructedPat, WitnessPat}; +use crate::usefulness::PatCtxt; +use crate::MatchArm; + +/// A column of patterns in the matrix, where a column is the intuitive notion of "subpatterns that +/// inspect the same subvalue/place". +/// This is used to traverse patterns column-by-column for lints. Despite similarities with the +/// algorithm in [`crate::usefulness`], this does a different traversal. Notably this is linear in +/// the depth of patterns, whereas `compute_exhaustiveness_and_usefulness` is worst-case exponential +/// (exhaustiveness is NP-complete). The core difference is that we treat sub-columns separately. +/// +/// This must not contain an or-pattern. `specialize` takes care to expand them. +/// +/// This is not used in the main algorithm; only in lints. +#[derive(Debug)] +pub(crate) struct PatternColumn<'p, 'tcx> { + patterns: Vec<&'p DeconstructedPat<'p, 'tcx>>, +} + +impl<'p, 'tcx> PatternColumn<'p, 'tcx> { + pub(crate) fn new(arms: &[MatchArm<'p, 'tcx>]) -> Self { + let mut patterns = Vec::with_capacity(arms.len()); + for arm in arms { + if arm.pat.is_or_pat() { + patterns.extend(arm.pat.flatten_or_pat()) + } else { + patterns.push(arm.pat) + } + } + Self { patterns } + } + + fn is_empty(&self) -> bool { + self.patterns.is_empty() + } + fn head_ty(&self) -> Option<Ty<'tcx>> { + if self.patterns.len() == 0 { + return None; + } + // If the type is opaque and it is revealed anywhere in the column, we take the revealed + // version. Otherwise we could encounter constructors for the revealed type and crash. + let is_opaque = |ty: Ty<'tcx>| matches!(ty.kind(), ty::Alias(ty::Opaque, ..)); + let first_ty = self.patterns[0].ty(); + if is_opaque(first_ty) { + for pat in &self.patterns { + let ty = pat.ty(); + if !is_opaque(ty) { + return Some(ty); + } + } + } + Some(first_ty) + } + + /// Do constructor splitting on the constructors of the column. + fn analyze_ctors(&self, pcx: &PatCtxt<'_, 'p, 'tcx>) -> SplitConstructorSet<'tcx> { + let column_ctors = self.patterns.iter().map(|p| p.ctor()); + pcx.cx.ctors_for_ty(pcx.ty).split(pcx, column_ctors) + } + + fn iter<'a>(&'a self) -> impl Iterator<Item = &'p DeconstructedPat<'p, 'tcx>> + Captures<'a> { + self.patterns.iter().copied() + } + + /// Does specialization: given a constructor, this takes the patterns from the column that match + /// the constructor, and outputs their fields. + /// This returns one column per field of the constructor. They usually all have the same length + /// (the number of patterns in `self` that matched `ctor`), except that we expand or-patterns + /// which may change the lengths. + fn specialize(&self, pcx: &PatCtxt<'_, 'p, 'tcx>, ctor: &Constructor<'tcx>) -> Vec<Self> { + let arity = ctor.arity(pcx); + if arity == 0 { + return Vec::new(); + } + + // We specialize the column by `ctor`. This gives us `arity`-many columns of patterns. These + // columns may have different lengths in the presence of or-patterns (this is why we can't + // reuse `Matrix`). + let mut specialized_columns: Vec<_> = + (0..arity).map(|_| Self { patterns: Vec::new() }).collect(); + let relevant_patterns = + self.patterns.iter().filter(|pat| ctor.is_covered_by(pcx, pat.ctor())); + for pat in relevant_patterns { + let specialized = pat.specialize(pcx, ctor); + for (subpat, column) in specialized.iter().zip(&mut specialized_columns) { + if subpat.is_or_pat() { + column.patterns.extend(subpat.flatten_or_pat()) + } else { + column.patterns.push(subpat) + } + } + } + + assert!( + !specialized_columns[0].is_empty(), + "ctor {ctor:?} was listed as present but isn't; + there is an inconsistency between `Constructor::is_covered_by` and `ConstructorSet::split`" + ); + specialized_columns + } +} + +/// Traverse the patterns to collect any variants of a non_exhaustive enum that fail to be mentioned +/// in a given column. +#[instrument(level = "debug", skip(cx), ret)] +fn collect_nonexhaustive_missing_variants<'p, 'tcx>( + cx: &MatchCheckCtxt<'p, 'tcx>, + column: &PatternColumn<'p, 'tcx>, +) -> Vec<WitnessPat<'tcx>> { + let Some(ty) = column.head_ty() else { + return Vec::new(); + }; + let pcx = &PatCtxt::new_dummy(cx, ty); + + let set = column.analyze_ctors(pcx); + if set.present.is_empty() { + // We can't consistently handle the case where no constructors are present (since this would + // require digging deep through any type in case there's a non_exhaustive enum somewhere), + // so for consistency we refuse to handle the top-level case, where we could handle it. + return vec![]; + } + + let mut witnesses = Vec::new(); + if cx.is_foreign_non_exhaustive_enum(ty) { + witnesses.extend( + set.missing + .into_iter() + // This will list missing visible variants. + .filter(|c| !matches!(c, Constructor::Hidden | Constructor::NonExhaustive)) + .map(|missing_ctor| WitnessPat::wild_from_ctor(pcx, missing_ctor)), + ) + } + + // Recurse into the fields. + for ctor in set.present { + let specialized_columns = column.specialize(pcx, &ctor); + let wild_pat = WitnessPat::wild_from_ctor(pcx, ctor); + for (i, col_i) in specialized_columns.iter().enumerate() { + // Compute witnesses for each column. + let wits_for_col_i = collect_nonexhaustive_missing_variants(cx, col_i); + // For each witness, we build a new pattern in the shape of `ctor(_, _, wit, _, _)`, + // adding enough wildcards to match `arity`. + for wit in wits_for_col_i { + let mut pat = wild_pat.clone(); + pat.fields[i] = wit; + witnesses.push(pat); + } + } + } + witnesses +} + +pub(crate) fn lint_nonexhaustive_missing_variants<'p, 'tcx>( + cx: &MatchCheckCtxt<'p, 'tcx>, + arms: &[MatchArm<'p, 'tcx>], + pat_column: &PatternColumn<'p, 'tcx>, + scrut_ty: Ty<'tcx>, +) { + if !matches!( + cx.tcx.lint_level_at_node(NON_EXHAUSTIVE_OMITTED_PATTERNS, cx.match_lint_level).0, + rustc_session::lint::Level::Allow + ) { + let witnesses = collect_nonexhaustive_missing_variants(cx, pat_column); + if !witnesses.is_empty() { + // Report that a match of a `non_exhaustive` enum marked with `non_exhaustive_omitted_patterns` + // is not exhaustive enough. + // + // NB: The partner lint for structs lives in `compiler/rustc_hir_analysis/src/check/pat.rs`. + cx.tcx.emit_spanned_lint( + NON_EXHAUSTIVE_OMITTED_PATTERNS, + cx.match_lint_level, + cx.scrut_span, + NonExhaustiveOmittedPattern { + scrut_ty, + uncovered: Uncovered::new(cx.scrut_span, cx, witnesses), + }, + ); + } + } else { + // We used to allow putting the `#[allow(non_exhaustive_omitted_patterns)]` on a match + // arm. This no longer makes sense so we warn users, to avoid silently breaking their + // usage of the lint. + for arm in arms { + let (lint_level, lint_level_source) = + cx.tcx.lint_level_at_node(NON_EXHAUSTIVE_OMITTED_PATTERNS, arm.hir_id); + if !matches!(lint_level, rustc_session::lint::Level::Allow) { + let decorator = NonExhaustiveOmittedPatternLintOnArm { + lint_span: lint_level_source.span(), + suggest_lint_on_match: cx.whole_match_span.map(|span| span.shrink_to_lo()), + lint_level: lint_level.as_str(), + lint_name: "non_exhaustive_omitted_patterns", + }; + + use rustc_errors::DecorateLint; + let mut err = cx.tcx.sess.struct_span_warn(arm.pat.span(), ""); + err.set_primary_message(decorator.msg()); + decorator.decorate_lint(&mut err); + err.emit(); + } + } + } +} + +/// Traverse the patterns to warn the user about ranges that overlap on their endpoints. +#[instrument(level = "debug", skip(cx))] +pub(crate) fn lint_overlapping_range_endpoints<'p, 'tcx>( + cx: &MatchCheckCtxt<'p, 'tcx>, + column: &PatternColumn<'p, 'tcx>, +) { + let Some(ty) = column.head_ty() else { + return; + }; + let pcx = &PatCtxt::new_dummy(cx, ty); + + let set = column.analyze_ctors(pcx); + + if matches!(ty.kind(), ty::Char | ty::Int(_) | ty::Uint(_)) { + let emit_lint = |overlap: &IntRange, this_span: Span, overlapped_spans: &[Span]| { + let overlap_as_pat = cx.hoist_pat_range(overlap, ty); + let overlaps: Vec<_> = overlapped_spans + .iter() + .copied() + .map(|span| Overlap { range: overlap_as_pat.clone(), span }) + .collect(); + cx.tcx.emit_spanned_lint( + lint::builtin::OVERLAPPING_RANGE_ENDPOINTS, + cx.match_lint_level, + this_span, + OverlappingRangeEndpoints { overlap: overlaps, range: this_span }, + ); + }; + + // If two ranges overlapped, the split set will contain their intersection as a singleton. + let split_int_ranges = set.present.iter().filter_map(|c| c.as_int_range()); + for overlap_range in split_int_ranges.clone() { + if overlap_range.is_singleton() { + let overlap: MaybeInfiniteInt = overlap_range.lo; + // Ranges that look like `lo..=overlap`. + let mut prefixes: SmallVec<[_; 1]> = Default::default(); + // Ranges that look like `overlap..=hi`. + let mut suffixes: SmallVec<[_; 1]> = Default::default(); + // Iterate on patterns that contained `overlap`. + for pat in column.iter() { + let this_span = pat.span(); + let Constructor::IntRange(this_range) = pat.ctor() else { continue }; + if this_range.is_singleton() { + // Don't lint when one of the ranges is a singleton. + continue; + } + if this_range.lo == overlap { + // `this_range` looks like `overlap..=this_range.hi`; it overlaps with any + // ranges that look like `lo..=overlap`. + if !prefixes.is_empty() { + emit_lint(overlap_range, this_span, &prefixes); + } + suffixes.push(this_span) + } else if this_range.hi == overlap.plus_one() { + // `this_range` looks like `this_range.lo..=overlap`; it overlaps with any + // ranges that look like `overlap..=hi`. + if !suffixes.is_empty() { + emit_lint(overlap_range, this_span, &suffixes); + } + prefixes.push(this_span) + } + } + } + } + } else { + // Recurse into the fields. + for ctor in set.present { + for col in column.specialize(pcx, &ctor) { + lint_overlapping_range_endpoints(cx, &col); + } + } + } +} diff --git a/compiler/rustc_pattern_analysis/src/pat.rs b/compiler/rustc_pattern_analysis/src/pat.rs new file mode 100644 index 00000000000..404651124ad --- /dev/null +++ b/compiler/rustc_pattern_analysis/src/pat.rs @@ -0,0 +1,205 @@ +//! As explained in [`crate::usefulness`], values and patterns are made from constructors applied to +//! fields. This file defines types that represent patterns in this way. +use std::cell::Cell; +use std::fmt; + +use smallvec::{smallvec, SmallVec}; + +use rustc_data_structures::captures::Captures; +use rustc_middle::ty::{self, Ty}; +use rustc_span::{Span, DUMMY_SP}; + +use self::Constructor::*; +use self::SliceKind::*; + +use crate::constructor::{Constructor, SliceKind}; +use crate::cx::MatchCheckCtxt; +use crate::usefulness::PatCtxt; + +/// Values and patterns can be represented as a constructor applied to some fields. This represents +/// a pattern in this form. +/// This also uses interior mutability to keep track of whether the pattern has been found reachable +/// during analysis. For this reason they cannot be cloned. +/// A `DeconstructedPat` will almost always come from user input; the only exception are some +/// `Wildcard`s introduced during specialization. +/// +/// Note that the number of fields may not match the fields declared in the original struct/variant. +/// This happens if a private or `non_exhaustive` field is uninhabited, because the code mustn't +/// observe that it is uninhabited. In that case that field is not included in `fields`. Care must +/// be taken when converting to/from `thir::Pat`. +pub struct DeconstructedPat<'p, 'tcx> { + ctor: Constructor<'tcx>, + fields: &'p [DeconstructedPat<'p, 'tcx>], + ty: Ty<'tcx>, + span: Span, + /// Whether removing this arm would change the behavior of the match expression. + useful: Cell<bool>, +} + +impl<'p, 'tcx> DeconstructedPat<'p, 'tcx> { + pub fn wildcard(ty: Ty<'tcx>, span: Span) -> Self { + Self::new(Wildcard, &[], ty, span) + } + + pub fn new( + ctor: Constructor<'tcx>, + fields: &'p [DeconstructedPat<'p, 'tcx>], + ty: Ty<'tcx>, + span: Span, + ) -> Self { + DeconstructedPat { ctor, fields, ty, span, useful: Cell::new(false) } + } + + pub(crate) fn is_or_pat(&self) -> bool { + matches!(self.ctor, Or) + } + /// Expand this (possibly-nested) or-pattern into its alternatives. + pub(crate) fn flatten_or_pat(&'p self) -> SmallVec<[&'p Self; 1]> { + if self.is_or_pat() { + self.iter_fields().flat_map(|p| p.flatten_or_pat()).collect() + } else { + smallvec![self] + } + } + + pub fn ctor(&self) -> &Constructor<'tcx> { + &self.ctor + } + pub fn ty(&self) -> Ty<'tcx> { + self.ty + } + pub fn span(&self) -> Span { + self.span + } + + pub fn iter_fields<'a>( + &'a self, + ) -> impl Iterator<Item = &'p DeconstructedPat<'p, 'tcx>> + Captures<'a> { + self.fields.iter() + } + + /// Specialize this pattern with a constructor. + /// `other_ctor` can be different from `self.ctor`, but must be covered by it. + pub(crate) fn specialize<'a>( + &'a self, + pcx: &PatCtxt<'_, 'p, 'tcx>, + other_ctor: &Constructor<'tcx>, + ) -> SmallVec<[&'p DeconstructedPat<'p, 'tcx>; 2]> { + match (&self.ctor, other_ctor) { + (Wildcard, _) => { + // We return a wildcard for each field of `other_ctor`. + pcx.cx.ctor_wildcard_fields(other_ctor, pcx.ty).iter().collect() + } + (Slice(self_slice), Slice(other_slice)) + if self_slice.arity() != other_slice.arity() => + { + // The only tricky case: two slices of different arity. Since `self_slice` covers + // `other_slice`, `self_slice` must be `VarLen`, i.e. of the form + // `[prefix, .., suffix]`. Moreover `other_slice` is guaranteed to have a larger + // arity. So we fill the middle part with enough wildcards to reach the length of + // the new, larger slice. + match self_slice.kind { + FixedLen(_) => bug!("{:?} doesn't cover {:?}", self_slice, other_slice), + VarLen(prefix, suffix) => { + let (ty::Slice(inner_ty) | ty::Array(inner_ty, _)) = *self.ty.kind() else { + bug!("bad slice pattern {:?} {:?}", self.ctor, self.ty); + }; + let prefix = &self.fields[..prefix]; + let suffix = &self.fields[self_slice.arity() - suffix..]; + let wildcard: &_ = pcx + .cx + .pattern_arena + .alloc(DeconstructedPat::wildcard(inner_ty, DUMMY_SP)); + let extra_wildcards = other_slice.arity() - self_slice.arity(); + let extra_wildcards = (0..extra_wildcards).map(|_| wildcard); + prefix.iter().chain(extra_wildcards).chain(suffix).collect() + } + } + } + _ => self.fields.iter().collect(), + } + } + + /// We keep track for each pattern if it was ever useful during the analysis. This is used + /// with `redundant_spans` to report redundant subpatterns arising from or patterns. + pub(crate) fn set_useful(&self) { + self.useful.set(true) + } + pub(crate) fn is_useful(&self) -> bool { + if self.useful.get() { + true + } else if self.is_or_pat() && self.iter_fields().any(|f| f.is_useful()) { + // We always expand or patterns in the matrix, so we will never see the actual + // or-pattern (the one with constructor `Or`) in the column. As such, it will not be + // marked as useful itself, only its children will. We recover this information here. + self.set_useful(); + true + } else { + false + } + } + + /// Report the spans of subpatterns that were not useful, if any. + pub(crate) fn redundant_spans(&self) -> Vec<Span> { + let mut spans = Vec::new(); + self.collect_redundant_spans(&mut spans); + spans + } + fn collect_redundant_spans(&self, spans: &mut Vec<Span>) { + // We don't look at subpatterns if we already reported the whole pattern as redundant. + if !self.is_useful() { + spans.push(self.span); + } else { + for p in self.iter_fields() { + p.collect_redundant_spans(spans); + } + } + } +} + +/// This is mostly copied from the `Pat` impl. This is best effort and not good enough for a +/// `Display` impl. +impl<'p, 'tcx> fmt::Debug for DeconstructedPat<'p, 'tcx> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + MatchCheckCtxt::debug_pat(f, self) + } +} + +/// Same idea as `DeconstructedPat`, except this is a fictitious pattern built up for diagnostics +/// purposes. As such they don't use interning and can be cloned. +#[derive(Debug, Clone)] +pub struct WitnessPat<'tcx> { + ctor: Constructor<'tcx>, + pub(crate) fields: Vec<WitnessPat<'tcx>>, + ty: Ty<'tcx>, +} + +impl<'tcx> WitnessPat<'tcx> { + pub(crate) fn new(ctor: Constructor<'tcx>, fields: Vec<Self>, ty: Ty<'tcx>) -> Self { + Self { ctor, fields, ty } + } + pub(crate) fn wildcard(ty: Ty<'tcx>) -> Self { + Self::new(Wildcard, Vec::new(), ty) + } + + /// Construct a pattern that matches everything that starts with this constructor. + /// For example, if `ctor` is a `Constructor::Variant` for `Option::Some`, we get the pattern + /// `Some(_)`. + pub(crate) fn wild_from_ctor(pcx: &PatCtxt<'_, '_, 'tcx>, ctor: Constructor<'tcx>) -> Self { + let field_tys = + pcx.cx.ctor_wildcard_fields(&ctor, pcx.ty).iter().map(|deco_pat| deco_pat.ty()); + let fields = field_tys.map(|ty| Self::wildcard(ty)).collect(); + Self::new(ctor, fields, pcx.ty) + } + + pub fn ctor(&self) -> &Constructor<'tcx> { + &self.ctor + } + pub fn ty(&self) -> Ty<'tcx> { + self.ty + } + + pub fn iter_fields<'a>(&'a self) -> impl Iterator<Item = &'a WitnessPat<'tcx>> { + self.fields.iter() + } +} diff --git a/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs b/compiler/rustc_pattern_analysis/src/usefulness.rs index a2f829f93e3..f268a551547 100644 --- a/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs +++ b/compiler/rustc_pattern_analysis/src/usefulness.rs @@ -1,8 +1,8 @@ -//! # Match exhaustiveness and reachability algorithm +//! # Match exhaustiveness and redundancy algorithm //! -//! This file contains the logic for exhaustiveness and reachability checking for pattern-matching. +//! This file contains the logic for exhaustiveness and usefulness checking for pattern-matching. //! Specifically, given a list of patterns in a match, we can tell whether: -//! (a) a given pattern is reachable (reachability) +//! (a) a given pattern is redundant //! (b) the patterns cover every possible value for the type (exhaustiveness) //! //! The algorithm implemented here is inspired from the one described in [this @@ -19,15 +19,15 @@ //! The algorithm is given as input a list of patterns, one for each arm of a match, and computes //! the following: //! - a set of values that match none of the patterns (if any), -//! - for each subpattern (taking into account or-patterns), whether it would catch any value that -//! isn't caught by a pattern before it, i.e. whether it is reachable. +//! - for each subpattern (taking into account or-patterns), whether removing it would change +//! anything about how the match executes, i.e. whether it is useful/not redundant. //! //! To a first approximation, the algorithm works by exploring all possible values for the type //! being matched on, and determining which arm(s) catch which value. To make this tractable we //! cleverly group together values, as we'll see below. //! //! The entrypoint of this file is the [`compute_match_usefulness`] function, which computes -//! reachability for each subpattern and exhaustiveness for the whole match. +//! usefulness for each subpattern and exhaustiveness for the whole match. //! //! In this page we explain the necessary concepts to understand how the algorithm works. //! @@ -39,17 +39,17 @@ //! none of the `p_i`. We write `usefulness(p_1 .. p_n, q)` for a function that returns a list of //! such values. The aim of this file is to compute it efficiently. //! -//! This is enough to compute reachability: a pattern in a `match` expression is reachable iff it is -//! useful w.r.t. the patterns above it: +//! This is enough to compute usefulness: a pattern in a `match` expression is redundant iff it is +//! not useful w.r.t. the patterns above it: //! ```compile_fail,E0004 //! # #![feature(exclusive_range_pattern)] //! # fn foo() { //! match Some(0u32) { //! Some(0..100) => {}, -//! Some(90..190) => {}, // reachable: `Some(150)` is matched by this but not the branch above -//! Some(50..150) => {}, // unreachable: all the values this matches are already matched by +//! Some(90..190) => {}, // useful: `Some(150)` is matched by this but not the branch above +//! Some(50..150) => {}, // redundant: all the values this matches are already matched by //! // the branches above -//! None => {}, // reachable: `None` is matched by this but not the branches above +//! None => {}, // useful: `None` is matched by this but not the branches above //! } //! # } //! ``` @@ -97,8 +97,9 @@ //! - `matches!([v0], [p0, .., p1]) := false` (incompatible lengths) //! - `matches!([v0, v1, v2], [p0, .., p1]) := matches!(v0, p0) && matches!(v2, p1)` //! -//! Constructors, fields and relevant operations are defined in the [`super::deconstruct_pat`] -//! module. The question of whether a constructor is matched by another one is answered by +//! Constructors and relevant operations are defined in the [`crate::constructor`] module. A +//! representation of patterns that uses constructors is available in [`crate::pat`]. The question +//! of whether a constructor is matched by another one is answered by //! [`Constructor::is_covered_by`]. //! //! Note 1: variable bindings (like the `x` in `Some(x)`) match anything, so we treat them as wildcards. @@ -241,23 +242,22 @@ //! Therefore `usefulness(tp_1, tp_2, tq)` returns the single witness-tuple `[Variant2(Some(true), 0)]`. //! //! -//! Computing the set of constructors for a type is done in [`ConstructorSet::for_ty`]. See the -//! following sections for more accurate versions of the algorithm and corresponding links. +//! Computing the set of constructors for a type is done in [`MatchCheckCtxt::ctors_for_ty`]. See +//! the following sections for more accurate versions of the algorithm and corresponding links. //! //! //! -//! # Computing reachability and exhaustiveness in one go +//! # Computing usefulness and exhaustiveness in one go //! -//! The algorithm we have described so far computes usefulness of each pattern in turn to check if -//! it is reachable, and ends by checking if `_` is useful to determine exhaustiveness of the whole -//! match. In practice, instead of doing "for each pattern { for each constructor { ... } }", we do -//! "for each constructor { for each pattern { ... } }". This allows us to compute everything in one -//! go. +//! The algorithm we have described so far computes usefulness of each pattern in turn, and ends by +//! checking if `_` is useful to determine exhaustiveness of the whole match. In practice, instead +//! of doing "for each pattern { for each constructor { ... } }", we do "for each constructor { for +//! each pattern { ... } }". This allows us to compute everything in one go. //! -//! [`Matrix`] stores the set of pattern-tuples under consideration. We track reachability of each +//! [`Matrix`] stores the set of pattern-tuples under consideration. We track usefulness of each //! row mutably in the matrix as we go along. We ignore witnesses of usefulness of the match rows. //! We gather witnesses of the usefulness of `_` in [`WitnessMatrix`]. The algorithm that computes -//! all this is in [`compute_exhaustiveness_and_reachability`]. +//! all this is in [`compute_exhaustiveness_and_usefulness`]. //! //! See the full example at the bottom of this documentation. //! @@ -279,7 +279,7 @@ //! ``` //! //! In this example, trying any of `0`, `1`, .., `49` will give the same specialized matrix, and -//! thus the same reachability/exhaustiveness results. We can thus accelerate the algorithm by +//! thus the same usefulness/exhaustiveness results. We can thus accelerate the algorithm by //! trying them all at once. Here in fact, the only cases we need to consider are: `0..50`, //! `50..=100`, `101..=150`,`151..=200` and `201..`. //! @@ -296,7 +296,8 @@ //! the same reasoning, we only need to try two cases: `North`, and "everything else". //! //! We call _constructor splitting_ the operation that computes such a minimal set of cases to try. -//! This is done in [`ConstructorSet::split`] and explained in [`super::deconstruct_pat`]. +//! This is done in [`ConstructorSet::split`] and explained in [`crate::constructor`]. +//! //! //! //! # Or-patterns @@ -305,9 +306,9 @@ //! first pattern of a row in the matrix is an or-pattern, we expand it by duplicating the rest of //! the row as necessary. This is handled automatically in [`Matrix`]. //! -//! This makes reachability tracking subtle, because we also want to compute whether an alternative -//! of an or-pattern is unreachable, e.g. in `Some(_) | Some(0)`. We track reachability of each -//! subpattern by interior mutability in [`DeconstructedPat`] with `set_reachable`/`is_reachable`. +//! This makes usefulness tracking subtle, because we also want to compute whether an alternative +//! of an or-pattern is redundant, e.g. in `Some(_) | Some(0)`. We track usefulness of each +//! subpattern by interior mutability in [`DeconstructedPat`] with `set_useful`/`is_useful`. //! //! It's unfortunate that we have to use interior mutability, but believe me (Nadrieril), I have //! tried [other](https://github.com/rust-lang/rust/pull/80104) @@ -332,6 +333,69 @@ //! //! //! +//! # Usefulness vs reachability, validity, and empty patterns +//! +//! This is likely the subtlest aspect of the algorithm. To be fully precise, a match doesn't +//! operate on a value, it operates on a place. In certain unsafe circumstances, it is possible for +//! a place to not contain valid data for its type. This has subtle consequences for empty types. +//! Take the following: +//! +//! ```rust +//! enum Void {} +//! let x: u8 = 0; +//! let ptr: *const Void = &x as *const u8 as *const Void; +//! unsafe { +//! match *ptr { +//! _ => println!("Reachable!"), +//! } +//! } +//! ``` +//! +//! In this example, `ptr` is a valid pointer pointing to a place with invalid data. The `_` pattern +//! does not look at the contents of `*ptr`, so this is ok and the arm is taken. In other words, +//! despite the place we are inspecting being of type `Void`, there is a reachable arm. If the +//! arm had a binding however: +//! +//! ```rust +//! # #[derive(Copy, Clone)] +//! # enum Void {} +//! # let x: u8 = 0; +//! # let ptr: *const Void = &x as *const u8 as *const Void; +//! # unsafe { +//! match *ptr { +//! _a => println!("Unreachable!"), +//! } +//! # } +//! ``` +//! +//! Here the binding loads the value of type `Void` from the `*ptr` place. In this example, this +//! causes UB since the data is not valid. In the general case, this asserts validity of the data at +//! `*ptr`. Either way, this arm will never be taken. +//! +//! Finally, let's consider the empty match `match *ptr {}`. If we consider this exhaustive, then +//! having invalid data at `*ptr` is invalid. In other words, the empty match is semantically +//! equivalent to the `_a => ...` match. In the interest of explicitness, we prefer the case with an +//! arm, hence we won't tell the user to remove the `_a` arm. In other words, the `_a` arm is +//! unreachable yet not redundant. This is why we lint on redundant arms rather than unreachable +//! arms, despite the fact that the lint says "unreachable". +//! +//! These considerations only affects certain places, namely those that can contain non-valid data +//! without UB. These are: pointer dereferences, reference dereferences, and union field accesses. +//! We track in the algorithm whether a given place is known to contain valid data. This is done +//! first by inspecting the scrutinee syntactically (which gives us `cx.known_valid_scrutinee`), and +//! then by tracking validity of each column of the matrix (which correspond to places) as we +//! recurse into subpatterns. That second part is done through [`ValidityConstraint`], most notably +//! [`ValidityConstraint::specialize`]. +//! +//! Having said all that, in practice we don't fully follow what's been presented in this section. +//! Under `exhaustive_patterns`, we allow omitting empty arms even in `!known_valid` places, for +//! backwards-compatibility until we have a better alternative. Without `exhaustive_patterns`, we +//! mostly treat empty types as inhabited, except specifically a non-nested `!` or empty enum. In +//! this specific case we also allow the empty match regardless of place validity, for +//! backwards-compatibility. Hopefully we can eventually deprecate this. +//! +//! +//! //! # Full example //! //! We illustrate a full run of the algorithm on the following match. @@ -348,7 +412,7 @@ //! ``` //! //! We keep track of the original row for illustration purposes, this is not what the algorithm -//! actually does (it tracks reachability as a boolean on each row). +//! actually does (it tracks usefulness as a boolean on each row). //! //! ```text //! ┐ Patterns: @@ -377,7 +441,7 @@ //! │ │ │ ├─┐ Patterns: //! │ │ │ │ │ 1. `[]` //! │ │ │ │ │ -//! │ │ │ │ │ We note arm 1 is reachable (by `Pair(Some(0), true)`). +//! │ │ │ │ │ We note arm 1 is useful (by `Pair(Some(0), true)`). //! │ │ │ ├─┘ //! │ │ │ │ //! │ │ │ │ Specialize with `false`: @@ -385,7 +449,7 @@ //! │ │ │ │ │ 1. `[]` //! │ │ │ │ │ 3. `[]` //! │ │ │ │ │ -//! │ │ │ │ │ We note arm 1 is reachable (by `Pair(Some(0), false)`). +//! │ │ │ │ │ We note arm 1 is useful (by `Pair(Some(0), false)`). //! │ │ │ ├─┘ //! │ │ ├─┘ //! │ │ │ @@ -408,7 +472,7 @@ //! │ │ │ ├─┐ Patterns: //! │ │ │ │ │ 2. `[]` //! │ │ │ │ │ -//! │ │ │ │ │ We note arm 2 is reachable (by `Pair(Some(1..), false)`). +//! │ │ │ │ │ We note arm 2 is useful (by `Pair(Some(1..), false)`). //! │ │ │ ├─┘ //! │ │ │ │ //! │ │ │ │ Total witnesses for `1..`: @@ -442,7 +506,7 @@ //! │ │ ├─┐ Patterns: //! │ │ │ │ 2. `[]` //! │ │ │ │ -//! │ │ │ │ We note arm 2 is reachable (by `Pair(None, false)`). +//! │ │ │ │ We note arm 2 is useful (by `Pair(None, false)`). //! │ │ ├─┘ //! │ │ │ //! │ │ │ Total witnesses for `None`: @@ -466,7 +530,7 @@ //! ``` //! //! We conclude: -//! - Arm 3 is unreachable (it was never marked as reachable); +//! - Arm 3 is redundant (it was never marked as useful); //! - The match is not exhaustive; //! - Adding arms with `Pair(Some(1..), true)` and `Pair(None, true)` would make the match exhaustive. //! @@ -488,77 +552,35 @@ //! I (Nadrieril) prefer to put new tests in `ui/pattern/usefulness` unless there's a specific //! reason not to, for example if they crucially depend on a particular feature like `or_patterns`. -use super::deconstruct_pat::{ - Constructor, ConstructorSet, DeconstructedPat, IntRange, MaybeInfiniteInt, SplitConstructorSet, - WitnessPat, -}; -use crate::errors::{ - NonExhaustiveOmittedPattern, NonExhaustiveOmittedPatternLintOnArm, Overlap, - OverlappingRangeEndpoints, Uncovered, -}; - -use rustc_data_structures::captures::Captures; - -use rustc_arena::TypedArena; -use rustc_data_structures::stack::ensure_sufficient_stack; -use rustc_hir::def_id::DefId; -use rustc_hir::HirId; -use rustc_middle::ty::{self, Ty, TyCtxt}; -use rustc_session::lint; -use rustc_session::lint::builtin::NON_EXHAUSTIVE_OMITTED_PATTERNS; -use rustc_span::{Span, DUMMY_SP}; - use smallvec::{smallvec, SmallVec}; use std::fmt; -pub(crate) struct MatchCheckCtxt<'p, 'tcx> { - pub(crate) tcx: TyCtxt<'tcx>, - /// The module in which the match occurs. This is necessary for - /// checking inhabited-ness of types because whether a type is (visibly) - /// inhabited can depend on whether it was defined in the current module or - /// not. E.g., `struct Foo { _private: ! }` cannot be seen to be empty - /// outside its module and should not be matchable with an empty match statement. - pub(crate) module: DefId, - pub(crate) param_env: ty::ParamEnv<'tcx>, - pub(crate) pattern_arena: &'p TypedArena<DeconstructedPat<'p, 'tcx>>, - /// Lint level at the match. - pub(crate) match_lint_level: HirId, - /// The span of the whole match, if applicable. - pub(crate) match_span: Option<Span>, - /// Span of the scrutinee. - pub(crate) scrut_span: Span, - /// Only produce `NON_EXHAUSTIVE_OMITTED_PATTERNS` lint on refutable patterns. - pub(crate) refutable: bool, -} +use rustc_data_structures::{captures::Captures, stack::ensure_sufficient_stack}; +use rustc_middle::ty::{self, Ty}; +use rustc_span::{Span, DUMMY_SP}; -impl<'a, 'tcx> MatchCheckCtxt<'a, 'tcx> { - pub(super) fn is_uninhabited(&self, ty: Ty<'tcx>) -> bool { - if self.tcx.features().exhaustive_patterns { - !ty.is_inhabited_from(self.tcx, self.module, self.param_env) - } else { - false - } - } +use crate::constructor::{Constructor, ConstructorSet}; +use crate::cx::MatchCheckCtxt; +use crate::pat::{DeconstructedPat, WitnessPat}; +use crate::MatchArm; - /// Returns whether the given type is an enum from another crate declared `#[non_exhaustive]`. - pub(super) fn is_foreign_non_exhaustive_enum(&self, ty: Ty<'tcx>) -> bool { - match ty.kind() { - ty::Adt(def, ..) => { - def.is_enum() && def.is_variant_list_non_exhaustive() && !def.did().is_local() - } - _ => false, - } - } -} +use self::ValidityConstraint::*; #[derive(Copy, Clone)] -pub(super) struct PatCtxt<'a, 'p, 'tcx> { - pub(super) cx: &'a MatchCheckCtxt<'p, 'tcx>, +pub(crate) struct PatCtxt<'a, 'p, 'tcx> { + pub(crate) cx: &'a MatchCheckCtxt<'p, 'tcx>, /// Type of the current column under investigation. - pub(super) ty: Ty<'tcx>, + pub(crate) ty: Ty<'tcx>, /// Whether the current pattern is the whole pattern as found in a match arm, or if it's a /// subpattern. - pub(super) is_top_level: bool, + pub(crate) is_top_level: bool, +} + +impl<'a, 'p, 'tcx> PatCtxt<'a, 'p, 'tcx> { + /// A `PatCtxt` when code other than `is_useful` needs one. + pub(crate) fn new_dummy(cx: &'a MatchCheckCtxt<'p, 'tcx>, ty: Ty<'tcx>) -> Self { + PatCtxt { cx, ty, is_top_level: false } + } } impl<'a, 'p, 'tcx> fmt::Debug for PatCtxt<'a, 'p, 'tcx> { @@ -567,6 +589,70 @@ impl<'a, 'p, 'tcx> fmt::Debug for PatCtxt<'a, 'p, 'tcx> { } } +/// Serves two purposes: +/// - in a wildcard, tracks whether the wildcard matches only valid values (i.e. is a binding `_a`) +/// or also invalid values (i.e. is a true `_` pattern). +/// - in the matrix, track whether a given place (aka column) is known to contain a valid value or +/// not. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +enum ValidityConstraint { + ValidOnly, + MaybeInvalid, + /// Option for backwards compatibility: the place is not known to be valid but we allow omitting + /// `useful && !reachable` arms anyway. + MaybeInvalidButAllowOmittingArms, +} + +impl ValidityConstraint { + fn from_bool(is_valid_only: bool) -> Self { + if is_valid_only { ValidOnly } else { MaybeInvalid } + } + + fn allow_omitting_side_effecting_arms(self) -> Self { + match self { + MaybeInvalid | MaybeInvalidButAllowOmittingArms => MaybeInvalidButAllowOmittingArms, + // There are no side-effecting empty arms here, nothing to do. + ValidOnly => ValidOnly, + } + } + + fn is_known_valid(self) -> bool { + matches!(self, ValidOnly) + } + fn allows_omitting_empty_arms(self) -> bool { + matches!(self, ValidOnly | MaybeInvalidButAllowOmittingArms) + } + + /// If the place has validity given by `self` and we read that the value at the place has + /// constructor `ctor`, this computes what we can assume about the validity of the constructor + /// fields. + /// + /// Pending further opsem decisions, the current behavior is: validity is preserved, except + /// inside `&` and union fields where validity is reset to `MaybeInvalid`. + fn specialize<'tcx>(self, pcx: &PatCtxt<'_, '_, 'tcx>, ctor: &Constructor<'tcx>) -> Self { + // We preserve validity except when we go inside a reference or a union field. + if matches!(ctor, Constructor::Single) + && (matches!(pcx.ty.kind(), ty::Ref(..)) + || matches!(pcx.ty.kind(), ty::Adt(def, ..) if def.is_union())) + { + // Validity of `x: &T` does not imply validity of `*x: T`. + MaybeInvalid + } else { + self + } + } +} + +impl fmt::Display for ValidityConstraint { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let s = match self { + ValidOnly => "✓", + MaybeInvalid | MaybeInvalidButAllowOmittingArms => "?", + }; + write!(f, "{s}") + } +} + /// Represents a pattern-tuple under investigation. #[derive(Clone)] struct PatStack<'p, 'tcx> { @@ -639,13 +725,13 @@ struct MatrixRow<'p, 'tcx> { /// Whether the original arm had a guard. This is inherited when specializing. is_under_guard: bool, /// When we specialize, we remember which row of the original matrix produced a given row of the - /// specialized matrix. When we unspecialize, we use this to propagate reachability back up the + /// specialized matrix. When we unspecialize, we use this to propagate usefulness back up the /// callstack. parent_row: usize, /// False when the matrix is just built. This is set to `true` by - /// [`compute_exhaustiveness_and_reachability`] if the arm is found to be reachable. + /// [`compute_exhaustiveness_and_usefulness`] if the arm is found to be useful. /// This is reset to `false` when specializing. - reachable: bool, + useful: bool, } impl<'p, 'tcx> MatrixRow<'p, 'tcx> { @@ -672,7 +758,7 @@ impl<'p, 'tcx> MatrixRow<'p, 'tcx> { pats: patstack, parent_row: self.parent_row, is_under_guard: self.is_under_guard, - reachable: false, + useful: false, }) } @@ -688,7 +774,7 @@ impl<'p, 'tcx> MatrixRow<'p, 'tcx> { pats: self.pats.pop_head_constructor(pcx, ctor), parent_row, is_under_guard: self.is_under_guard, - reachable: false, + useful: false, } } } @@ -711,10 +797,15 @@ impl<'p, 'tcx> fmt::Debug for MatrixRow<'p, 'tcx> { /// the matrix will correspond to `scrutinee.0.Some.0` and the second column to `scrutinee.1`. #[derive(Clone)] struct Matrix<'p, 'tcx> { + /// Vector of rows. The rows must form a rectangular 2D array. Moreover, all the patterns of + /// each column must have the same type. Each column corresponds to a place within the + /// scrutinee. rows: Vec<MatrixRow<'p, 'tcx>>, /// Stores an extra fictitious row full of wildcards. Mostly used to keep track of the type of /// each column. This must obey the same invariants as the real rows. wildcard_row: PatStack<'p, 'tcx>, + /// Track for each column/place whether it contains a known valid value. + place_validity: SmallVec<[ValidityConstraint; 2]>, } impl<'p, 'tcx> Matrix<'p, 'tcx> { @@ -732,16 +823,28 @@ impl<'p, 'tcx> Matrix<'p, 'tcx> { } /// Build a new matrix from an iterator of `MatchArm`s. - fn new(cx: &MatchCheckCtxt<'p, 'tcx>, arms: &[MatchArm<'p, 'tcx>], scrut_ty: Ty<'tcx>) -> Self { + fn new<'a>( + cx: &MatchCheckCtxt<'p, 'tcx>, + arms: &[MatchArm<'p, 'tcx>], + scrut_ty: Ty<'tcx>, + scrut_validity: ValidityConstraint, + ) -> Self + where + 'p: 'a, + { let wild_pattern = cx.pattern_arena.alloc(DeconstructedPat::wildcard(scrut_ty, DUMMY_SP)); let wildcard_row = PatStack::from_pattern(wild_pattern); - let mut matrix = Matrix { rows: Vec::with_capacity(arms.len()), wildcard_row }; + let mut matrix = Matrix { + rows: Vec::with_capacity(arms.len()), + wildcard_row, + place_validity: smallvec![scrut_validity], + }; for (row_id, arm) in arms.iter().enumerate() { let v = MatrixRow { pats: PatStack::from_pattern(arm.pat), parent_row: row_id, // dummy, we won't read it is_under_guard: arm.has_guard, - reachable: false, + useful: false, }; matrix.expand_and_push(v); } @@ -799,7 +902,13 @@ impl<'p, 'tcx> Matrix<'p, 'tcx> { ctor: &Constructor<'tcx>, ) -> Matrix<'p, 'tcx> { let wildcard_row = self.wildcard_row.pop_head_constructor(pcx, ctor); - let mut matrix = Matrix { rows: Vec::new(), wildcard_row }; + let new_validity = self.place_validity[0].specialize(pcx, ctor); + let new_place_validity = std::iter::repeat(new_validity) + .take(ctor.arity(pcx)) + .chain(self.place_validity[1..].iter().copied()) + .collect(); + let mut matrix = + Matrix { rows: Vec::new(), wildcard_row, place_validity: new_place_validity }; for (i, row) in self.rows().enumerate() { if ctor.is_covered_by(pcx, row.head().ctor()) { let new_row = row.pop_head_constructor(pcx, ctor, i); @@ -818,27 +927,38 @@ impl<'p, 'tcx> Matrix<'p, 'tcx> { /// + true + [Second(true)] + /// + false + [_] + /// + _ + [_, _, tail @ ..] + +/// | ✓ | ? | // column validity /// ``` impl<'p, 'tcx> fmt::Debug for Matrix<'p, 'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "\n")?; - let Matrix { rows, .. } = self; - let pretty_printed_matrix: Vec<Vec<String>> = - rows.iter().map(|row| row.iter().map(|pat| format!("{pat:?}")).collect()).collect(); + let mut pretty_printed_matrix: Vec<Vec<String>> = self + .rows + .iter() + .map(|row| row.iter().map(|pat| format!("{pat:?}")).collect()) + .collect(); + pretty_printed_matrix + .push(self.place_validity.iter().map(|validity| format!("{validity}")).collect()); - let column_count = rows.iter().map(|row| row.len()).next().unwrap_or(0); - assert!(rows.iter().all(|row| row.len() == column_count)); + let column_count = self.column_count(); + assert!(self.rows.iter().all(|row| row.len() == column_count)); + assert!(self.place_validity.len() == column_count); let column_widths: Vec<usize> = (0..column_count) .map(|col| pretty_printed_matrix.iter().map(|row| row[col].len()).max().unwrap_or(0)) .collect(); - for row in pretty_printed_matrix { - write!(f, "+")?; + for (row_i, row) in pretty_printed_matrix.into_iter().enumerate() { + let is_validity_row = row_i == self.rows.len(); + let sep = if is_validity_row { "|" } else { "+" }; + write!(f, "{sep}")?; for (column, pat_str) in row.into_iter().enumerate() { write!(f, " ")?; write!(f, "{:1$}", pat_str, column_widths[column])?; - write!(f, " +")?; + write!(f, " {sep}")?; + } + if is_validity_row { + write!(f, " // column validity")?; } write!(f, "\n")?; } @@ -900,7 +1020,7 @@ impl<'p, 'tcx> fmt::Debug for Matrix<'p, 'tcx> { /// /// See the top of the file for more detailed explanations and examples. #[derive(Debug, Clone)] -pub(crate) struct WitnessStack<'tcx>(Vec<WitnessPat<'tcx>>); +struct WitnessStack<'tcx>(Vec<WitnessPat<'tcx>>); impl<'tcx> WitnessStack<'tcx> { /// Asserts that the witness contains a single pattern, and returns it. @@ -940,14 +1060,14 @@ impl<'tcx> WitnessStack<'tcx> { /// Represents a set of pattern-tuples that are witnesses of non-exhaustiveness for error /// reporting. This has similar invariants as `Matrix` does. /// -/// The `WitnessMatrix` returned by [`compute_exhaustiveness_and_reachability`] obeys the invariant +/// The `WitnessMatrix` returned by [`compute_exhaustiveness_and_usefulness`] obeys the invariant /// that the union of the input `Matrix` and the output `WitnessMatrix` together matches the type /// exhaustively. /// /// Just as the `Matrix` starts with a single column, by the end of the algorithm, this has a single /// column, which contains the patterns that are missing for the match to be exhaustive. #[derive(Debug, Clone)] -pub struct WitnessMatrix<'tcx>(Vec<WitnessStack<'tcx>>); +struct WitnessMatrix<'tcx>(Vec<WitnessStack<'tcx>>); impl<'tcx> WitnessMatrix<'tcx> { /// New matrix with no witnesses. @@ -1029,7 +1149,7 @@ impl<'tcx> WitnessMatrix<'tcx> { /// The core of the algorithm. /// /// This recursively computes witnesses of the non-exhaustiveness of `matrix` (if any). Also tracks -/// usefulness of each row in the matrix (in `row.reachable`). We track reachability of each +/// usefulness of each row in the matrix (in `row.useful`). We track usefulness of each /// subpattern using interior mutability in `DeconstructedPat`. /// /// The input `Matrix` and the output `WitnessMatrix` together match the type exhaustively. @@ -1038,10 +1158,10 @@ impl<'tcx> WitnessMatrix<'tcx> { /// - specialization, where we dig into the rows that have a specific constructor and call ourselves /// recursively; /// - unspecialization, where we lift the results from the previous step into results for this step -/// (using `apply_constructor` and by updating `row.reachable` for each parent row). +/// (using `apply_constructor` and by updating `row.useful` for each parent row). /// This is all explained at the top of the file. #[instrument(level = "debug", skip(cx, is_top_level), ret)] -fn compute_exhaustiveness_and_reachability<'p, 'tcx>( +fn compute_exhaustiveness_and_usefulness<'p, 'tcx>( cx: &MatchCheckCtxt<'p, 'tcx>, matrix: &mut Matrix<'p, 'tcx>, is_top_level: bool, @@ -1050,13 +1170,13 @@ fn compute_exhaustiveness_and_reachability<'p, 'tcx>( let Some(ty) = matrix.head_ty() else { // The base case: there are no columns in the matrix. We are morally pattern-matching on (). - // A row is reachable iff it has no (unguarded) rows above it. + // A row is useful iff it has no (unguarded) rows above it. for row in matrix.rows_mut() { - // All rows are reachable until we find one without a guard. - row.reachable = true; + // All rows are useful until they're not. + row.useful = true; + // When there's an unguarded row, the match is exhaustive and any subsequent row is not + // useful. if !row.is_under_guard { - // There's an unguarded row, so the match is exhaustive, and any subsequent row is - // unreachable. return WitnessMatrix::empty(); } } @@ -1067,26 +1187,39 @@ fn compute_exhaustiveness_and_reachability<'p, 'tcx>( debug!("ty: {ty:?}"); let pcx = &PatCtxt { cx, ty, is_top_level }; + // Whether the place/column we are inspecting is known to contain valid data. + let place_validity = matrix.place_validity[0]; + // For backwards compability we allow omitting some empty arms that we ideally shouldn't. + let place_validity = place_validity.allow_omitting_side_effecting_arms(); + // Analyze the constructors present in this column. let ctors = matrix.heads().map(|p| p.ctor()); - let split_set = ConstructorSet::for_ty(pcx.cx, pcx.ty).split(pcx, ctors); - + let ctors_for_ty = &cx.ctors_for_ty(ty); + let is_integers = matches!(ctors_for_ty, ConstructorSet::Integers { .. }); // For diagnostics. + let split_set = ctors_for_ty.split(pcx, ctors); let all_missing = split_set.present.is_empty(); - let always_report_all = is_top_level && !IntRange::is_integral(pcx.ty); - // Whether we should report "Enum::A and Enum::C are missing" or "_ is missing". - let report_individual_missing_ctors = always_report_all || !all_missing; + // Build the set of constructors we will specialize with. It must cover the whole type. let mut split_ctors = split_set.present; - let mut only_report_missing = false; if !split_set.missing.is_empty() { // We need to iterate over a full set of constructors, so we add `Missing` to represent the // missing ones. This is explained under "Constructor Splitting" at the top of this file. split_ctors.push(Constructor::Missing); - // For diagnostic purposes we choose to only report the constructors that are missing. Since - // `Missing` matches only the wildcard rows, it matches fewer rows than any normal - // constructor and is therefore guaranteed to result in more witnesses. So skipping the - // other constructors does not jeopardize correctness. - only_report_missing = true; + } else if !split_set.missing_empty.is_empty() && !place_validity.is_known_valid() { + // The missing empty constructors are reachable if the place can contain invalid data. + split_ctors.push(Constructor::Missing); + } + + // Decide what constructors to report. + let always_report_all = is_top_level && !is_integers; + // Whether we should report "Enum::A and Enum::C are missing" or "_ is missing". + let report_individual_missing_ctors = always_report_all || !all_missing; + // Which constructors are considered missing. We ensure that `!missing_ctors.is_empty() => + // split_ctors.contains(Missing)`. The converse usually holds except in the + // `MaybeInvalidButAllowOmittingArms` backwards-compatibility case. + let mut missing_ctors = split_set.missing; + if !place_validity.allows_omitting_empty_arms() { + missing_ctors.extend(split_set.missing_empty); } let mut ret = WitnessMatrix::empty(); @@ -1095,14 +1228,22 @@ fn compute_exhaustiveness_and_reachability<'p, 'tcx>( // Dig into rows that match `ctor`. let mut spec_matrix = matrix.specialize_constructor(pcx, &ctor); let mut witnesses = ensure_sufficient_stack(|| { - compute_exhaustiveness_and_reachability(cx, &mut spec_matrix, false) + compute_exhaustiveness_and_usefulness(cx, &mut spec_matrix, false) }); - if !only_report_missing || matches!(ctor, Constructor::Missing) { + let counts_for_exhaustiveness = match ctor { + Constructor::Missing => !missing_ctors.is_empty(), + // If there are missing constructors we'll report those instead. Since `Missing` matches + // only the wildcard rows, it matches fewer rows than this constructor, and is therefore + // guaranteed to result in the same or more witnesses. So skipping this does not + // jeopardize correctness. + _ => missing_ctors.is_empty(), + }; + if counts_for_exhaustiveness { // Transform witnesses for `spec_matrix` into witnesses for `matrix`. witnesses.apply_constructor( pcx, - &split_set.missing, + &missing_ctors, &ctor, report_individual_missing_ctors, ); @@ -1113,275 +1254,51 @@ fn compute_exhaustiveness_and_reachability<'p, 'tcx>( // A parent row is useful if any of its children is. for child_row in spec_matrix.rows() { let parent_row = &mut matrix.rows[child_row.parent_row]; - parent_row.reachable = parent_row.reachable || child_row.reachable; + parent_row.useful = parent_row.useful || child_row.useful; } } - // Record that the subpattern is reachable. + // Record usefulness in the patterns. for row in matrix.rows() { - if row.reachable { - row.head().set_reachable(); + if row.useful { + row.head().set_useful(); } } ret } -/// A column of patterns in the matrix, where a column is the intuitive notion of "subpatterns that -/// inspect the same subvalue/place". -/// This is used to traverse patterns column-by-column for lints. Despite similarities with -/// [`compute_exhaustiveness_and_reachability`], this does a different traversal. Notably this is -/// linear in the depth of patterns, whereas `compute_exhaustiveness_and_reachability` is worst-case -/// exponential (exhaustiveness is NP-complete). The core difference is that we treat sub-columns -/// separately. -/// -/// This must not contain an or-pattern. `specialize` takes care to expand them. -/// -/// This is not used in the main algorithm; only in lints. -#[derive(Debug)] -struct PatternColumn<'p, 'tcx> { - patterns: Vec<&'p DeconstructedPat<'p, 'tcx>>, -} - -impl<'p, 'tcx> PatternColumn<'p, 'tcx> { - fn new(patterns: Vec<&'p DeconstructedPat<'p, 'tcx>>) -> Self { - Self { patterns } - } - - fn is_empty(&self) -> bool { - self.patterns.is_empty() - } - fn head_ty(&self) -> Option<Ty<'tcx>> { - if self.patterns.len() == 0 { - return None; - } - // If the type is opaque and it is revealed anywhere in the column, we take the revealed - // version. Otherwise we could encounter constructors for the revealed type and crash. - let is_opaque = |ty: Ty<'tcx>| matches!(ty.kind(), ty::Alias(ty::Opaque, ..)); - let first_ty = self.patterns[0].ty(); - if is_opaque(first_ty) { - for pat in &self.patterns { - let ty = pat.ty(); - if !is_opaque(ty) { - return Some(ty); - } - } - } - Some(first_ty) - } - - /// Do constructor splitting on the constructors of the column. - fn analyze_ctors(&self, pcx: &PatCtxt<'_, 'p, 'tcx>) -> SplitConstructorSet<'tcx> { - let column_ctors = self.patterns.iter().map(|p| p.ctor()); - ConstructorSet::for_ty(pcx.cx, pcx.ty).split(pcx, column_ctors) - } - - fn iter<'a>(&'a self) -> impl Iterator<Item = &'p DeconstructedPat<'p, 'tcx>> + Captures<'a> { - self.patterns.iter().copied() - } - - /// Does specialization: given a constructor, this takes the patterns from the column that match - /// the constructor, and outputs their fields. - /// This returns one column per field of the constructor. They usually all have the same length - /// (the number of patterns in `self` that matched `ctor`), except that we expand or-patterns - /// which may change the lengths. - fn specialize(&self, pcx: &PatCtxt<'_, 'p, 'tcx>, ctor: &Constructor<'tcx>) -> Vec<Self> { - let arity = ctor.arity(pcx); - if arity == 0 { - return Vec::new(); - } - - // We specialize the column by `ctor`. This gives us `arity`-many columns of patterns. These - // columns may have different lengths in the presence of or-patterns (this is why we can't - // reuse `Matrix`). - let mut specialized_columns: Vec<_> = - (0..arity).map(|_| Self { patterns: Vec::new() }).collect(); - let relevant_patterns = - self.patterns.iter().filter(|pat| ctor.is_covered_by(pcx, pat.ctor())); - for pat in relevant_patterns { - let specialized = pat.specialize(pcx, ctor); - for (subpat, column) in specialized.iter().zip(&mut specialized_columns) { - if subpat.is_or_pat() { - column.patterns.extend(subpat.flatten_or_pat()) - } else { - column.patterns.push(subpat) - } - } - } - - assert!( - !specialized_columns[0].is_empty(), - "ctor {ctor:?} was listed as present but isn't; - there is an inconsistency between `Constructor::is_covered_by` and `ConstructorSet::split`" - ); - specialized_columns - } -} - -/// Traverse the patterns to collect any variants of a non_exhaustive enum that fail to be mentioned -/// in a given column. -#[instrument(level = "debug", skip(cx), ret)] -fn collect_nonexhaustive_missing_variants<'p, 'tcx>( - cx: &MatchCheckCtxt<'p, 'tcx>, - column: &PatternColumn<'p, 'tcx>, -) -> Vec<WitnessPat<'tcx>> { - let Some(ty) = column.head_ty() else { - return Vec::new(); - }; - let pcx = &PatCtxt { cx, ty, is_top_level: false }; - - let set = column.analyze_ctors(pcx); - if set.present.is_empty() { - // We can't consistently handle the case where no constructors are present (since this would - // require digging deep through any type in case there's a non_exhaustive enum somewhere), - // so for consistency we refuse to handle the top-level case, where we could handle it. - return vec![]; - } - - let mut witnesses = Vec::new(); - if cx.is_foreign_non_exhaustive_enum(ty) { - witnesses.extend( - set.missing - .into_iter() - // This will list missing visible variants. - .filter(|c| !matches!(c, Constructor::Hidden | Constructor::NonExhaustive)) - .map(|missing_ctor| WitnessPat::wild_from_ctor(pcx, missing_ctor)), - ) - } - - // Recurse into the fields. - for ctor in set.present { - let specialized_columns = column.specialize(pcx, &ctor); - let wild_pat = WitnessPat::wild_from_ctor(pcx, ctor); - for (i, col_i) in specialized_columns.iter().enumerate() { - // Compute witnesses for each column. - let wits_for_col_i = collect_nonexhaustive_missing_variants(cx, col_i); - // For each witness, we build a new pattern in the shape of `ctor(_, _, wit, _, _)`, - // adding enough wildcards to match `arity`. - for wit in wits_for_col_i { - let mut pat = wild_pat.clone(); - pat.fields[i] = wit; - witnesses.push(pat); - } - } - } - witnesses -} - -/// Traverse the patterns to warn the user about ranges that overlap on their endpoints. -#[instrument(level = "debug", skip(cx))] -fn lint_overlapping_range_endpoints<'p, 'tcx>( - cx: &MatchCheckCtxt<'p, 'tcx>, - column: &PatternColumn<'p, 'tcx>, -) { - let Some(ty) = column.head_ty() else { - return; - }; - let pcx = &PatCtxt { cx, ty, is_top_level: false }; - - let set = column.analyze_ctors(pcx); - - if IntRange::is_integral(ty) { - let emit_lint = |overlap: &IntRange, this_span: Span, overlapped_spans: &[Span]| { - let overlap_as_pat = overlap.to_diagnostic_pat(ty, cx.tcx); - let overlaps: Vec<_> = overlapped_spans - .iter() - .copied() - .map(|span| Overlap { range: overlap_as_pat.clone(), span }) - .collect(); - cx.tcx.emit_spanned_lint( - lint::builtin::OVERLAPPING_RANGE_ENDPOINTS, - cx.match_lint_level, - this_span, - OverlappingRangeEndpoints { overlap: overlaps, range: this_span }, - ); - }; - - // If two ranges overlapped, the split set will contain their intersection as a singleton. - let split_int_ranges = set.present.iter().filter_map(|c| c.as_int_range()); - for overlap_range in split_int_ranges.clone() { - if overlap_range.is_singleton() { - let overlap: MaybeInfiniteInt = overlap_range.lo; - // Ranges that look like `lo..=overlap`. - let mut prefixes: SmallVec<[_; 1]> = Default::default(); - // Ranges that look like `overlap..=hi`. - let mut suffixes: SmallVec<[_; 1]> = Default::default(); - // Iterate on patterns that contained `overlap`. - for pat in column.iter() { - let this_span = pat.span(); - let Constructor::IntRange(this_range) = pat.ctor() else { continue }; - if this_range.is_singleton() { - // Don't lint when one of the ranges is a singleton. - continue; - } - if this_range.lo == overlap { - // `this_range` looks like `overlap..=this_range.hi`; it overlaps with any - // ranges that look like `lo..=overlap`. - if !prefixes.is_empty() { - emit_lint(overlap_range, this_span, &prefixes); - } - suffixes.push(this_span) - } else if this_range.hi == overlap.plus_one() { - // `this_range` looks like `this_range.lo..=overlap`; it overlaps with any - // ranges that look like `overlap..=hi`. - if !suffixes.is_empty() { - emit_lint(overlap_range, this_span, &suffixes); - } - prefixes.push(this_span) - } - } - } - } - } else { - // Recurse into the fields. - for ctor in set.present { - for col in column.specialize(pcx, &ctor) { - lint_overlapping_range_endpoints(cx, &col); - } - } - } -} - -/// The arm of a match expression. -#[derive(Clone, Copy, Debug)] -pub(crate) struct MatchArm<'p, 'tcx> { - /// The pattern must have been lowered through `check_match::MatchVisitor::lower_pattern`. - pub(crate) pat: &'p DeconstructedPat<'p, 'tcx>, - pub(crate) hir_id: HirId, - pub(crate) has_guard: bool, -} - -/// Indicates whether or not a given arm is reachable. +/// Indicates whether or not a given arm is useful. #[derive(Clone, Debug)] -pub(crate) enum Reachability { - /// The arm is reachable. This additionally carries a set of or-pattern branches that have been - /// found to be unreachable despite the overall arm being reachable. Used only in the presence - /// of or-patterns, otherwise it stays empty. - Reachable(Vec<Span>), - /// The arm is unreachable. - Unreachable, +pub enum Usefulness { + /// The arm is useful. This additionally carries a set of or-pattern branches that have been + /// found to be redundant despite the overall arm being useful. Used only in the presence of + /// or-patterns, otherwise it stays empty. + Useful(Vec<Span>), + /// The arm is redundant and can be removed without changing the behavior of the match + /// expression. + Redundant, } -/// The output of checking a match for exhaustiveness and arm reachability. -pub(crate) struct UsefulnessReport<'p, 'tcx> { - /// For each arm of the input, whether that arm is reachable after the arms above it. - pub(crate) arm_usefulness: Vec<(MatchArm<'p, 'tcx>, Reachability)>, +/// The output of checking a match for exhaustiveness and arm usefulness. +pub struct UsefulnessReport<'p, 'tcx> { + /// For each arm of the input, whether that arm is useful after the arms above it. + pub arm_usefulness: Vec<(MatchArm<'p, 'tcx>, Usefulness)>, /// If the match is exhaustive, this is empty. If not, this contains witnesses for the lack of /// exhaustiveness. - pub(crate) non_exhaustiveness_witnesses: Vec<WitnessPat<'tcx>>, + pub non_exhaustiveness_witnesses: Vec<WitnessPat<'tcx>>, } -/// The entrypoint for this file. Computes whether a match is exhaustive and which of its arms are -/// reachable. +/// Computes whether a match is exhaustive and which of its arms are useful. #[instrument(skip(cx, arms), level = "debug")] pub(crate) fn compute_match_usefulness<'p, 'tcx>( cx: &MatchCheckCtxt<'p, 'tcx>, arms: &[MatchArm<'p, 'tcx>], scrut_ty: Ty<'tcx>, ) -> UsefulnessReport<'p, 'tcx> { - let mut matrix = Matrix::new(cx, arms, scrut_ty); - let non_exhaustiveness_witnesses = - compute_exhaustiveness_and_reachability(cx, &mut matrix, true); + let scrut_validity = ValidityConstraint::from_bool(cx.known_valid_scrutinee); + let mut matrix = Matrix::new(cx, arms, scrut_ty, scrut_validity); + let non_exhaustiveness_witnesses = compute_exhaustiveness_and_usefulness(cx, &mut matrix, true); let non_exhaustiveness_witnesses: Vec<_> = non_exhaustiveness_witnesses.single_column(); let arm_usefulness: Vec<_> = arms @@ -1389,67 +1306,14 @@ pub(crate) fn compute_match_usefulness<'p, 'tcx>( .copied() .map(|arm| { debug!(?arm); - let reachability = if arm.pat.is_reachable() { - Reachability::Reachable(arm.pat.unreachable_spans()) + // We warn when a pattern is not useful. + let usefulness = if arm.pat.is_useful() { + Usefulness::Useful(arm.pat.redundant_spans()) } else { - Reachability::Unreachable + Usefulness::Redundant }; - (arm, reachability) + (arm, usefulness) }) .collect(); - let report = UsefulnessReport { arm_usefulness, non_exhaustiveness_witnesses }; - - let pat_column = PatternColumn::new(matrix.heads().collect()); - // Lint on ranges that overlap on their endpoints, which is likely a mistake. - lint_overlapping_range_endpoints(cx, &pat_column); - - // Run the non_exhaustive_omitted_patterns lint. Only run on refutable patterns to avoid hitting - // `if let`s. Only run if the match is exhaustive otherwise the error is redundant. - if cx.refutable && report.non_exhaustiveness_witnesses.is_empty() { - if !matches!( - cx.tcx.lint_level_at_node(NON_EXHAUSTIVE_OMITTED_PATTERNS, cx.match_lint_level).0, - rustc_session::lint::Level::Allow - ) { - let witnesses = collect_nonexhaustive_missing_variants(cx, &pat_column); - if !witnesses.is_empty() { - // Report that a match of a `non_exhaustive` enum marked with `non_exhaustive_omitted_patterns` - // is not exhaustive enough. - // - // NB: The partner lint for structs lives in `compiler/rustc_hir_analysis/src/check/pat.rs`. - cx.tcx.emit_spanned_lint( - NON_EXHAUSTIVE_OMITTED_PATTERNS, - cx.match_lint_level, - cx.scrut_span, - NonExhaustiveOmittedPattern { - scrut_ty, - uncovered: Uncovered::new(cx.scrut_span, cx, witnesses), - }, - ); - } - } else { - // We used to allow putting the `#[allow(non_exhaustive_omitted_patterns)]` on a match - // arm. This no longer makes sense so we warn users, to avoid silently breaking their - // usage of the lint. - for arm in arms { - let (lint_level, lint_level_source) = - cx.tcx.lint_level_at_node(NON_EXHAUSTIVE_OMITTED_PATTERNS, arm.hir_id); - if !matches!(lint_level, rustc_session::lint::Level::Allow) { - let decorator = NonExhaustiveOmittedPatternLintOnArm { - lint_span: lint_level_source.span(), - suggest_lint_on_match: cx.match_span.map(|span| span.shrink_to_lo()), - lint_level: lint_level.as_str(), - lint_name: "non_exhaustive_omitted_patterns", - }; - - use rustc_errors::DecorateLint; - let mut err = cx.tcx.sess.struct_span_warn(arm.pat.span(), ""); - err.set_primary_message(decorator.msg()); - decorator.decorate_lint(&mut err); - err.emit(); - } - } - } - } - - report + UsefulnessReport { arm_usefulness, non_exhaustiveness_witnesses } } diff --git a/compiler/rustc_privacy/src/lib.rs b/compiler/rustc_privacy/src/lib.rs index 9064cb6e875..e49282e638a 100644 --- a/compiler/rustc_privacy/src/lib.rs +++ b/compiler/rustc_privacy/src/lib.rs @@ -218,20 +218,21 @@ where return ControlFlow::Continue(()); } - let kind = match kind { - ty::Inherent | ty::Projection => "associated type", - ty::Weak => "type alias", - ty::Opaque => unreachable!(), - }; self.def_id_visitor.visit_def_id( data.def_id, - kind, + match kind { + ty::Inherent | ty::Projection => "associated type", + ty::Weak => "type alias", + ty::Opaque => unreachable!(), + }, &LazyDefPathStr { def_id: data.def_id, tcx }, )?; // This will also visit args if necessary, so we don't need to recurse. return if V::SHALLOW { ControlFlow::Continue(()) + } else if kind == ty::Projection { + self.visit_projection_ty(data) } else { data.args.iter().try_for_each(|subst| subst.visit_with(self)) }; diff --git a/compiler/rustc_resolve/src/def_collector.rs b/compiler/rustc_resolve/src/def_collector.rs index ab5d3b368eb..02553d50071 100644 --- a/compiler/rustc_resolve/src/def_collector.rs +++ b/compiler/rustc_resolve/src/def_collector.rs @@ -156,29 +156,33 @@ impl<'a, 'b, 'tcx> visit::Visitor<'a> for DefCollector<'a, 'b, 'tcx> { fn visit_fn(&mut self, fn_kind: FnKind<'a>, span: Span, _: NodeId) { if let FnKind::Fn(_, _, sig, _, generics, body) = fn_kind { - if let Some( - CoroutineKind::Async { closure_id, .. } | CoroutineKind::Gen { closure_id, .. }, - ) = sig.header.coro_kind - { - self.visit_generics(generics); - - // For async functions, we need to create their inner defs inside of a - // closure to match their desugared representation. Besides that, - // we must mirror everything that `visit::walk_fn` below does. - self.visit_fn_header(&sig.header); - for param in &sig.decl.inputs { - self.visit_param(param); - } - self.visit_fn_ret_ty(&sig.decl.output); - // If this async fn has no body (i.e. it's an async fn signature in a trait) - // then the closure_def will never be used, and we should avoid generating a - // def-id for it. - if let Some(body) = body { - let closure_def = - self.create_def(closure_id, kw::Empty, DefKind::Closure, span); - self.with_parent(closure_def, |this| this.visit_block(body)); + match sig.header.coroutine_kind { + Some(coroutine_kind) => { + self.visit_generics(generics); + + // For async functions, we need to create their inner defs inside of a + // closure to match their desugared representation. Besides that, + // we must mirror everything that `visit::walk_fn` below does. + self.visit_fn_header(&sig.header); + for param in &sig.decl.inputs { + self.visit_param(param); + } + self.visit_fn_ret_ty(&sig.decl.output); + // If this async fn has no body (i.e. it's an async fn signature in a trait) + // then the closure_def will never be used, and we should avoid generating a + // def-id for it. + if let Some(body) = body { + let closure_def = self.create_def( + coroutine_kind.closure_id(), + kw::Empty, + DefKind::Closure, + span, + ); + self.with_parent(closure_def, |this| this.visit_block(body)); + } + return; } - return; + None => {} } } @@ -284,11 +288,13 @@ impl<'a, 'b, 'tcx> visit::Visitor<'a> for DefCollector<'a, 'b, 'tcx> { // Async closures desugar to closures inside of closures, so // we must create two defs. let closure_def = self.create_def(expr.id, kw::Empty, DefKind::Closure, expr.span); - match closure.coro_kind { - Some( - CoroutineKind::Async { closure_id, .. } - | CoroutineKind::Gen { closure_id, .. }, - ) => self.create_def(closure_id, kw::Empty, DefKind::Closure, expr.span), + match closure.coroutine_kind { + Some(coroutine_kind) => self.create_def( + coroutine_kind.closure_id(), + kw::Empty, + DefKind::Closure, + expr.span, + ), None => closure_def, } } diff --git a/compiler/rustc_resolve/src/late.rs b/compiler/rustc_resolve/src/late.rs index ad14f5e5225..9c96e9a9bd7 100644 --- a/compiler/rustc_resolve/src/late.rs +++ b/compiler/rustc_resolve/src/late.rs @@ -916,8 +916,10 @@ impl<'a: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'a, '_, 'ast, &sig.decl.output, ); - if let Some((coro_node_id, _)) = - sig.header.coro_kind.map(|coro_kind| coro_kind.return_id()) + if let Some((coro_node_id, _)) = sig + .header + .coroutine_kind + .map(|coroutine_kind| coroutine_kind.return_id()) { this.record_lifetime_params_for_impl_trait(coro_node_id); } @@ -942,8 +944,10 @@ impl<'a: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'a, '_, 'ast, this.visit_generics(generics); let declaration = &sig.decl; - let coro_node_id = - sig.header.coro_kind.map(|coro_kind| coro_kind.return_id()); + let coro_node_id = sig + .header + .coroutine_kind + .map(|coroutine_kind| coroutine_kind.return_id()); this.with_lifetime_rib( LifetimeRibKind::AnonymousCreateParameter { @@ -3297,7 +3301,7 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> { self.with_rib(ValueNS, RibKind::Normal, |this| { this.resolve_pattern_top(&arm.pat, PatternSource::Match); walk_list!(this, visit_expr, &arm.guard); - this.visit_expr(&arm.body); + walk_list!(this, visit_expr, &arm.body); }); } @@ -4294,7 +4298,7 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> { // // Similarly, `gen |x| ...` gets desugared to `|x| gen {...}`, so we handle that too. ExprKind::Closure(box ast::Closure { - coro_kind: Some(_), + coroutine_kind: Some(_), ref fn_decl, ref body, .. diff --git a/compiler/rustc_session/src/config.rs b/compiler/rustc_session/src/config.rs index 1ea3ab0d5ec..ef465f371d2 100644 --- a/compiler/rustc_session/src/config.rs +++ b/compiler/rustc_session/src/config.rs @@ -1283,7 +1283,7 @@ fn default_configuration(sess: &Session) -> Cfg { ret.insert((sym::relocation_model, Some(relocation_model))); } ret.insert((sym::target_vendor, Some(Symbol::intern(vendor)))); - if sess.target.has_thread_local { + if sess.opts.unstable_opts.has_thread_local.unwrap_or(sess.target.has_thread_local) { ret.insert((sym::target_thread_local, None)); } let mut has_atomic = false; @@ -1422,6 +1422,9 @@ impl CheckCfg { }; // NOTE: This should be kept in sync with `default_configuration` + // + // When adding a new config here you should also update + // `tests/ui/check-cfg/well-known-values.rs`. let panic_values = &PanicStrategy::all(); diff --git a/compiler/rustc_session/src/options.rs b/compiler/rustc_session/src/options.rs index 7a6108bfbe2..b1cf43f471a 100644 --- a/compiler/rustc_session/src/options.rs +++ b/compiler/rustc_session/src/options.rs @@ -1624,6 +1624,8 @@ options! { graphviz_font: String = ("Courier, monospace".to_string(), parse_string, [UNTRACKED], "use the given `fontname` in graphviz output; can be overridden by setting \ environment variable `RUSTC_GRAPHVIZ_FONT` (default: `Courier, monospace`)"), + has_thread_local: Option<bool> = (None, parse_opt_bool, [TRACKED], + "explicitly enable the `cfg(target_thread_local)` directive"), hir_stats: bool = (false, parse_bool, [UNTRACKED], "print some statistics about AST and HIR (default: no)"), human_readable_cgu_names: bool = (false, parse_bool, [TRACKED], diff --git a/compiler/rustc_session/src/session.rs b/compiler/rustc_session/src/session.rs index 123e9c788f5..24c7459392a 100644 --- a/compiler/rustc_session/src/session.rs +++ b/compiler/rustc_session/src/session.rs @@ -1474,17 +1474,6 @@ pub fn build_session( let asm_arch = if target_cfg.allow_asm { InlineAsmArch::from_str(&target_cfg.arch).ok() } else { None }; - // Check jobserver before getting `jobserver::client`. - jobserver::check(|err| { - #[allow(rustc::untranslatable_diagnostic)] - #[allow(rustc::diagnostic_outside_of_impl)] - parse_sess - .span_diagnostic - .struct_warn(err) - .note("the build environment is likely misconfigured") - .emit() - }); - let sess = Session { target: target_cfg, host, @@ -1792,6 +1781,18 @@ impl EarlyErrorHandler { pub fn early_warn(&self, msg: impl Into<DiagnosticMessage>) { self.handler.struct_warn(msg).emit() } + + pub fn initialize_checked_jobserver(&self) { + // initialize jobserver before getting `jobserver::client` and `build_session`. + jobserver::initialize_checked(|err| { + #[allow(rustc::untranslatable_diagnostic)] + #[allow(rustc::diagnostic_outside_of_impl)] + self.handler + .struct_warn(err) + .note("the build environment is likely misconfigured") + .emit() + }); + } } fn mk_emitter(output: ErrorOutputType) -> Box<DynEmitter> { diff --git a/compiler/rustc_smir/Cargo.toml b/compiler/rustc_smir/Cargo.toml index 836ea046ffe..c9e23efcb10 100644 --- a/compiler/rustc_smir/Cargo.toml +++ b/compiler/rustc_smir/Cargo.toml @@ -5,6 +5,7 @@ edition = "2021" [dependencies] # tidy-alphabetical-start +rustc_abi = { path = "../rustc_abi" } rustc_data_structures = { path = "../rustc_data_structures" } rustc_hir = { path = "../rustc_hir" } rustc_middle = { path = "../rustc_middle" } diff --git a/compiler/rustc_smir/src/rustc_smir/alloc.rs b/compiler/rustc_smir/src/rustc_smir/alloc.rs index 850a52ce275..48cb164c308 100644 --- a/compiler/rustc_smir/src/rustc_smir/alloc.rs +++ b/compiler/rustc_smir/src/rustc_smir/alloc.rs @@ -2,6 +2,7 @@ use rustc_middle::mir::{ interpret::{alloc_range, AllocRange, Pointer}, ConstValue, }; +use stable_mir::Error; use crate::rustc_smir::{Stable, Tables}; use stable_mir::mir::Mutability; @@ -26,23 +27,35 @@ pub fn new_allocation<'tcx>( const_value: ConstValue<'tcx>, tables: &mut Tables<'tcx>, ) -> Allocation { - match const_value { + try_new_allocation(ty, const_value, tables).unwrap() +} + +#[allow(rustc::usage_of_qualified_ty)] +pub fn try_new_allocation<'tcx>( + ty: rustc_middle::ty::Ty<'tcx>, + const_value: ConstValue<'tcx>, + tables: &mut Tables<'tcx>, +) -> Result<Allocation, Error> { + Ok(match const_value { ConstValue::Scalar(scalar) => { let size = scalar.size(); let align = tables .tcx .layout_of(rustc_middle::ty::ParamEnv::reveal_all().and(ty)) - .unwrap() + .map_err(|e| e.stable(tables))? .align; let mut allocation = rustc_middle::mir::interpret::Allocation::uninit(size, align.abi); allocation .write_scalar(&tables.tcx, alloc_range(rustc_target::abi::Size::ZERO, size), scalar) - .unwrap(); + .map_err(|e| e.stable(tables))?; allocation.stable(tables) } ConstValue::ZeroSized => { - let align = - tables.tcx.layout_of(rustc_middle::ty::ParamEnv::empty().and(ty)).unwrap().align; + let align = tables + .tcx + .layout_of(rustc_middle::ty::ParamEnv::empty().and(ty)) + .map_err(|e| e.stable(tables))? + .align; new_empty_allocation(align.abi) } ConstValue::Slice { data, meta } => { @@ -51,8 +64,10 @@ pub fn new_allocation<'tcx>( let scalar_ptr = rustc_middle::mir::interpret::Scalar::from_pointer(ptr, &tables.tcx); let scalar_meta = rustc_middle::mir::interpret::Scalar::from_target_usize(meta, &tables.tcx); - let layout = - tables.tcx.layout_of(rustc_middle::ty::ParamEnv::reveal_all().and(ty)).unwrap(); + let layout = tables + .tcx + .layout_of(rustc_middle::ty::ParamEnv::reveal_all().and(ty)) + .map_err(|e| e.stable(tables))?; let mut allocation = rustc_middle::mir::interpret::Allocation::uninit(layout.size, layout.align.abi); allocation @@ -61,14 +76,14 @@ pub fn new_allocation<'tcx>( alloc_range(rustc_target::abi::Size::ZERO, tables.tcx.data_layout.pointer_size), scalar_ptr, ) - .unwrap(); + .map_err(|e| e.stable(tables))?; allocation .write_scalar( &tables.tcx, alloc_range(tables.tcx.data_layout.pointer_size, scalar_meta.size()), scalar_meta, ) - .unwrap(); + .map_err(|e| e.stable(tables))?; allocation.stable(tables) } ConstValue::Indirect { alloc_id, offset } => { @@ -76,11 +91,11 @@ pub fn new_allocation<'tcx>( let ty_size = tables .tcx .layout_of(rustc_middle::ty::ParamEnv::reveal_all().and(ty)) - .unwrap() + .map_err(|e| e.stable(tables))? .size; allocation_filter(&alloc.0, alloc_range(offset, ty_size), tables) } - } + }) } /// Creates an `Allocation` only from information within the `AllocRange`. diff --git a/compiler/rustc_smir/src/rustc_smir/context.rs b/compiler/rustc_smir/src/rustc_smir/context.rs index b10dfe85914..341c69e9327 100644 --- a/compiler/rustc_smir/src/rustc_smir/context.rs +++ b/compiler/rustc_smir/src/rustc_smir/context.rs @@ -11,18 +11,29 @@ use stable_mir::compiler_interface::Context; use stable_mir::mir::alloc::GlobalAlloc; use stable_mir::mir::mono::{InstanceDef, StaticDef}; use stable_mir::mir::Body; +use stable_mir::target::{MachineInfo, MachineSize}; use stable_mir::ty::{ AdtDef, AdtKind, Allocation, ClosureDef, ClosureKind, Const, FieldDef, FnDef, GenericArgs, - LineInfo, PolyFnSig, RigidTy, Span, TyKind, VariantDef, + LineInfo, PolyFnSig, RigidTy, Span, Ty, TyKind, VariantDef, }; use stable_mir::{self, Crate, CrateItem, DefId, Error, Filename, ItemKind, Symbol}; use std::cell::RefCell; use crate::rustc_internal::{internal, RustcInternal}; use crate::rustc_smir::builder::BodyBuilder; -use crate::rustc_smir::{new_item_kind, smir_crate, Stable, Tables}; +use crate::rustc_smir::{alloc, new_item_kind, smir_crate, Stable, Tables}; impl<'tcx> Context for TablesWrapper<'tcx> { + fn target_info(&self) -> MachineInfo { + let mut tables = self.0.borrow_mut(); + MachineInfo { + endian: tables.tcx.data_layout.endian.stable(&mut *tables), + pointer_width: MachineSize::from_bits( + tables.tcx.data_layout.pointer_size.bits().try_into().unwrap(), + ), + } + } + fn entry_fn(&self) -> Option<stable_mir::CrateItem> { let mut tables = self.0.borrow_mut(); let tcx = tables.tcx; @@ -382,6 +393,21 @@ impl<'tcx> Context for TablesWrapper<'tcx> { Instance::resolve_closure(tables.tcx, def_id, args_ref, closure_kind).stable(&mut *tables) } + fn eval_instance(&self, def: InstanceDef, const_ty: Ty) -> Result<Allocation, Error> { + let mut tables = self.0.borrow_mut(); + let instance = tables.instances[def]; + let result = tables.tcx.const_eval_instance( + ParamEnv::reveal_all(), + instance, + Some(tables.tcx.def_span(instance.def_id())), + ); + result + .map(|const_val| { + alloc::try_new_allocation(const_ty.internal(&mut *tables), const_val, &mut *tables) + }) + .map_err(|e| e.stable(&mut *tables))? + } + fn eval_static_initializer(&self, def: StaticDef) -> Result<Allocation, Error> { let mut tables = self.0.borrow_mut(); let def_id = def.0.internal(&mut *tables); diff --git a/compiler/rustc_smir/src/rustc_smir/convert/error.rs b/compiler/rustc_smir/src/rustc_smir/convert/error.rs new file mode 100644 index 00000000000..6c582b799f8 --- /dev/null +++ b/compiler/rustc_smir/src/rustc_smir/convert/error.rs @@ -0,0 +1,22 @@ +//! Handle the conversion of different internal errors into a stable version. +//! +//! Currently we encode everything as [stable_mir::Error], which is represented as a string. +use crate::rustc_smir::{Stable, Tables}; +use rustc_middle::mir::interpret::AllocError; +use rustc_middle::ty::layout::LayoutError; + +impl<'tcx> Stable<'tcx> for LayoutError<'tcx> { + type T = stable_mir::Error; + + fn stable(&self, _tables: &mut Tables<'tcx>) -> Self::T { + stable_mir::Error::new(format!("{self:?}")) + } +} + +impl<'tcx> Stable<'tcx> for AllocError { + type T = stable_mir::Error; + + fn stable(&self, _tables: &mut Tables<'tcx>) -> Self::T { + stable_mir::Error::new(format!("{self:?}")) + } +} diff --git a/compiler/rustc_smir/src/rustc_smir/convert/mod.rs b/compiler/rustc_smir/src/rustc_smir/convert/mod.rs index 9c0b2b29bca..7d8339ab503 100644 --- a/compiler/rustc_smir/src/rustc_smir/convert/mod.rs +++ b/compiler/rustc_smir/src/rustc_smir/convert/mod.rs @@ -5,6 +5,7 @@ use stable_mir::ty::{IndexedVal, VariantIdx}; use crate::rustc_smir::{Stable, Tables}; +mod error; mod mir; mod ty; @@ -56,6 +57,7 @@ impl<'tcx> Stable<'tcx> for rustc_hir::CoroutineKind { stable_mir::mir::CoroutineKind::Gen(source.stable(tables)) } CoroutineKind::Coroutine => stable_mir::mir::CoroutineKind::Coroutine, + CoroutineKind::AsyncGen(_) => todo!(), } } } @@ -75,3 +77,14 @@ impl<'tcx> Stable<'tcx> for rustc_span::Span { tables.create_span(*self) } } + +impl<'tcx> Stable<'tcx> for rustc_abi::Endian { + type T = stable_mir::target::Endian; + + fn stable(&self, _tables: &mut Tables<'tcx>) -> Self::T { + match self { + rustc_abi::Endian::Little => stable_mir::target::Endian::Little, + rustc_abi::Endian::Big => stable_mir::target::Endian::Big, + } + } +} diff --git a/compiler/rustc_smir/src/rustc_smir/convert/ty.rs b/compiler/rustc_smir/src/rustc_smir/convert/ty.rs index 4fe847c291c..cbdddc30072 100644 --- a/compiler/rustc_smir/src/rustc_smir/convert/ty.rs +++ b/compiler/rustc_smir/src/rustc_smir/convert/ty.rs @@ -601,6 +601,7 @@ impl<'tcx> Stable<'tcx> for ty::PredicateKind<'tcx> { stable_mir::ty::PredicateKind::ConstEquate(a.stable(tables), b.stable(tables)) } PredicateKind::Ambiguous => stable_mir::ty::PredicateKind::Ambiguous, + PredicateKind::NormalizesTo(_pred) => unimplemented!(), PredicateKind::AliasRelate(a, b, alias_relation_direction) => { stable_mir::ty::PredicateKind::AliasRelate( a.unpack().stable(tables), diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs index 5c1e703837a..485265e6889 100644 --- a/compiler/rustc_span/src/symbol.rs +++ b/compiler/rustc_span/src/symbol.rs @@ -139,6 +139,9 @@ symbols! { AssertParamIsClone, AssertParamIsCopy, AssertParamIsEq, + AsyncGenFinished, + AsyncGenPending, + AsyncGenReady, AtomicBool, AtomicI128, AtomicI16, @@ -423,6 +426,7 @@ symbols! { async_closure, async_fn_in_trait, async_fn_track_caller, + async_iterator, atomic, atomic_mod, atomics, @@ -1200,6 +1204,7 @@ symbols! { pointer, pointer_like, poll, + poll_next, post_dash_lto: "post-lto", powerpc_target_feature, powf32, @@ -1511,6 +1516,8 @@ symbols! { simd_insert, simd_le, simd_lt, + simd_masked_load, + simd_masked_store, simd_mul, simd_ne, simd_neg, diff --git a/compiler/rustc_target/src/spec/base/android.rs b/compiler/rustc_target/src/spec/base/android.rs index af15c16a5a9..5320f1b4bbb 100644 --- a/compiler/rustc_target/src/spec/base/android.rs +++ b/compiler/rustc_target/src/spec/base/android.rs @@ -1,10 +1,11 @@ -use crate::spec::{base, SanitizerSet, TargetOptions}; +use crate::spec::{base, SanitizerSet, TargetOptions, TlsModel}; pub fn opts() -> TargetOptions { let mut base = base::linux::opts(); base.os = "android".into(); base.is_like_android = true; base.default_dwarf_version = 2; + base.tls_model = TlsModel::Emulated; base.has_thread_local = false; base.supported_sanitizers = SanitizerSet::ADDRESS; // This is for backward compatibility, see https://github.com/rust-lang/rust/issues/49867 diff --git a/compiler/rustc_target/src/spec/base/linux_ohos.rs b/compiler/rustc_target/src/spec/base/linux_ohos.rs index 273e6a98dd4..8272cda05e7 100644 --- a/compiler/rustc_target/src/spec/base/linux_ohos.rs +++ b/compiler/rustc_target/src/spec/base/linux_ohos.rs @@ -1,11 +1,11 @@ -use crate::spec::{base, TargetOptions}; +use crate::spec::{base, TargetOptions, TlsModel}; pub fn opts() -> TargetOptions { let mut base = base::linux::opts(); base.env = "ohos".into(); base.crt_static_default = false; - base.force_emulated_tls = true; + base.tls_model = TlsModel::Emulated; base.has_thread_local = false; base diff --git a/compiler/rustc_target/src/spec/base/openbsd.rs b/compiler/rustc_target/src/spec/base/openbsd.rs index e7db14e05a4..bc3aecb5c1e 100644 --- a/compiler/rustc_target/src/spec/base/openbsd.rs +++ b/compiler/rustc_target/src/spec/base/openbsd.rs @@ -1,4 +1,4 @@ -use crate::spec::{cvs, FramePointer, RelroLevel, TargetOptions}; +use crate::spec::{cvs, FramePointer, RelroLevel, TargetOptions, TlsModel}; pub fn opts() -> TargetOptions { TargetOptions { @@ -11,6 +11,7 @@ pub fn opts() -> TargetOptions { frame_pointer: FramePointer::Always, // FIXME 43575: should be MayOmit... relro_level: RelroLevel::Full, default_dwarf_version: 2, + tls_model: TlsModel::Emulated, ..Default::default() } } diff --git a/compiler/rustc_target/src/spec/mod.rs b/compiler/rustc_target/src/spec/mod.rs index 63002aa4aa4..24893bda9e7 100644 --- a/compiler/rustc_target/src/spec/mod.rs +++ b/compiler/rustc_target/src/spec/mod.rs @@ -929,6 +929,7 @@ pub enum TlsModel { LocalDynamic, InitialExec, LocalExec, + Emulated, } impl FromStr for TlsModel { @@ -942,6 +943,7 @@ impl FromStr for TlsModel { "local-dynamic" => TlsModel::LocalDynamic, "initial-exec" => TlsModel::InitialExec, "local-exec" => TlsModel::LocalExec, + "emulated" => TlsModel::Emulated, _ => return Err(()), }) } @@ -954,6 +956,7 @@ impl ToJson for TlsModel { TlsModel::LocalDynamic => "local-dynamic", TlsModel::InitialExec => "initial-exec", TlsModel::LocalExec => "local-exec", + TlsModel::Emulated => "emulated", } .to_json() } @@ -1600,8 +1603,10 @@ supported_targets! { ("aarch64-uwp-windows-msvc", aarch64_uwp_windows_msvc), ("x86_64-pc-windows-msvc", x86_64_pc_windows_msvc), ("x86_64-uwp-windows-msvc", x86_64_uwp_windows_msvc), + ("x86_64-win7-windows-msvc", x86_64_win7_windows_msvc), ("i686-pc-windows-msvc", i686_pc_windows_msvc), ("i686-uwp-windows-msvc", i686_uwp_windows_msvc), + ("i686-win7-windows-msvc", i686_win7_windows_msvc), ("i586-pc-windows-msvc", i586_pc_windows_msvc), ("thumbv7a-pc-windows-msvc", thumbv7a_pc_windows_msvc), ("thumbv7a-uwp-windows-msvc", thumbv7a_uwp_windows_msvc), @@ -2191,9 +2196,6 @@ pub struct TargetOptions { /// Whether the target supports XRay instrumentation. pub supports_xray: bool, - - /// Forces the use of emulated TLS (__emutls_get_address) - pub force_emulated_tls: bool, } /// Add arguments for the given flavor and also for its "twin" flavors @@ -2409,7 +2411,6 @@ impl Default for TargetOptions { entry_name: "main".into(), entry_abi: Conv::C, supports_xray: false, - force_emulated_tls: false, } } } @@ -3113,7 +3114,6 @@ impl Target { key!(entry_name); key!(entry_abi, Conv)?; key!(supports_xray, bool); - key!(force_emulated_tls, bool); if base.is_builtin { // This can cause unfortunate ICEs later down the line. @@ -3369,7 +3369,6 @@ impl ToJson for Target { target_option_val!(entry_name); target_option_val!(entry_abi); target_option_val!(supports_xray); - target_option_val!(force_emulated_tls); if let Some(abi) = self.default_adjusted_cabi { d.insert("default-adjusted-cabi".into(), Abi::name(abi).to_json()); diff --git a/compiler/rustc_target/src/spec/targets/i686_win7_windows_msvc.rs b/compiler/rustc_target/src/spec/targets/i686_win7_windows_msvc.rs new file mode 100644 index 00000000000..ba80c23196e --- /dev/null +++ b/compiler/rustc_target/src/spec/targets/i686_win7_windows_msvc.rs @@ -0,0 +1,32 @@ +use crate::spec::{base, LinkerFlavor, Lld, Target}; + +pub fn target() -> Target { + let mut base = base::windows_msvc::opts(); + base.cpu = "pentium4".into(); + base.max_atomic_width = Some(64); + + base.add_pre_link_args( + LinkerFlavor::Msvc(Lld::No), + &[ + // Mark all dynamic libraries and executables as compatible with the larger 4GiB address + // space available to x86 Windows binaries on x86_64. + "/LARGEADDRESSAWARE", + // Ensure the linker will only produce an image if it can also produce a table of + // the image's safe exception handlers. + // https://docs.microsoft.com/en-us/cpp/build/reference/safeseh-image-has-safe-exception-handlers + "/SAFESEH", + ], + ); + // Workaround for #95429 + base.has_thread_local = false; + + Target { + llvm_target: "i686-pc-windows-msvc".into(), + pointer_width: 32, + data_layout: "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-\ + i64:64-f80:128-n8:16:32-a:0:32-S32" + .into(), + arch: "x86".into(), + options: base, + } +} diff --git a/compiler/rustc_target/src/spec/targets/x86_64_win7_windows_msvc.rs b/compiler/rustc_target/src/spec/targets/x86_64_win7_windows_msvc.rs new file mode 100644 index 00000000000..5a59839ebc6 --- /dev/null +++ b/compiler/rustc_target/src/spec/targets/x86_64_win7_windows_msvc.rs @@ -0,0 +1,18 @@ +use crate::spec::{base, Target}; + +pub fn target() -> Target { + let mut base = base::windows_msvc::opts(); + base.cpu = "x86-64".into(); + base.plt_by_default = false; + base.max_atomic_width = Some(64); + base.vendor = "win7".into(); + + Target { + llvm_target: "x86_64-win7-windows-msvc".into(), + pointer_width: 64, + data_layout: "e-m:w-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" + .into(), + arch: "x86_64".into(), + options: base, + } +} diff --git a/compiler/rustc_trait_selection/Cargo.toml b/compiler/rustc_trait_selection/Cargo.toml index 7d098180b93..29c0d8b5ff1 100644 --- a/compiler/rustc_trait_selection/Cargo.toml +++ b/compiler/rustc_trait_selection/Cargo.toml @@ -15,6 +15,7 @@ rustc_index = { path = "../rustc_index" } rustc_infer = { path = "../rustc_infer" } rustc_macros = { path = "../rustc_macros" } rustc_middle = { path = "../rustc_middle" } +rustc_next_trait_solver = { path = "../rustc_next_trait_solver" } rustc_parse_format = { path = "../rustc_parse_format" } rustc_query_system = { path = "../rustc_query_system" } rustc_session = { path = "../rustc_session" } diff --git a/compiler/rustc_trait_selection/src/solve/alias_relate.rs b/compiler/rustc_trait_selection/src/solve/alias_relate.rs index 739bbe929b3..2e99854ddc6 100644 --- a/compiler/rustc_trait_selection/src/solve/alias_relate.rs +++ b/compiler/rustc_trait_selection/src/solve/alias_relate.rs @@ -92,7 +92,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> { self.add_goal(Goal::new( self.tcx(), param_env, - ty::ProjectionPredicate { projection_ty: alias, term }, + ty::NormalizesTo { alias, term }, )); self.try_evaluate_added_goals()?; Ok(Some(self.resolve_vars_if_possible(term))) @@ -109,11 +109,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> { opaque: ty::AliasTy<'tcx>, term: ty::Term<'tcx>, ) -> QueryResult<'tcx> { - self.add_goal(Goal::new( - self.tcx(), - param_env, - ty::ProjectionPredicate { projection_ty: opaque, term }, - )); + self.add_goal(Goal::new(self.tcx(), param_env, ty::NormalizesTo { alias: opaque, term })); self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes) } diff --git a/compiler/rustc_trait_selection/src/solve/assembly/mod.rs b/compiler/rustc_trait_selection/src/solve/assembly/mod.rs index b6861d258d1..62d62bdfd11 100644 --- a/compiler/rustc_trait_selection/src/solve/assembly/mod.rs +++ b/compiler/rustc_trait_selection/src/solve/assembly/mod.rs @@ -207,6 +207,11 @@ pub(super) trait GoalKind<'tcx>: goal: Goal<'tcx, Self>, ) -> QueryResult<'tcx>; + fn consider_builtin_async_iterator_candidate( + ecx: &mut EvalCtxt<'_, 'tcx>, + goal: Goal<'tcx, Self>, + ) -> QueryResult<'tcx>; + /// A coroutine (that doesn't come from an `async` or `gen` desugaring) is known to /// implement `Coroutine<R, Yield = Y, Return = O>`, given the resume, yield, /// and return types of the coroutine computed during type-checking. @@ -352,15 +357,13 @@ impl<'tcx> EvalCtxt<'_, 'tcx> { num_steps: usize, ) { let tcx = self.tcx(); - let &ty::Alias(_, projection_ty) = goal.predicate.self_ty().kind() else { return }; + let &ty::Alias(_, alias) = goal.predicate.self_ty().kind() else { return }; candidates.extend(self.probe(|_| ProbeKind::NormalizedSelfTyAssembly).enter(|ecx| { if tcx.recursion_limit().value_within_limit(num_steps) { let normalized_ty = ecx.next_ty_infer(); - let normalizes_to_goal = goal.with( - tcx, - ty::ProjectionPredicate { projection_ty, term: normalized_ty.into() }, - ); + let normalizes_to_goal = + goal.with(tcx, ty::NormalizesTo { alias, term: normalized_ty.into() }); ecx.add_goal(normalizes_to_goal); if let Err(NoSolution) = ecx.try_evaluate_added_goals() { debug!("self type normalization failed"); @@ -567,6 +570,8 @@ impl<'tcx> EvalCtxt<'_, 'tcx> { G::consider_builtin_future_candidate(self, goal) } else if lang_items.iterator_trait() == Some(trait_def_id) { G::consider_builtin_iterator_candidate(self, goal) + } else if lang_items.async_iterator_trait() == Some(trait_def_id) { + G::consider_builtin_async_iterator_candidate(self, goal) } else if lang_items.coroutine_trait() == Some(trait_def_id) { G::consider_builtin_coroutine_candidate(self, goal) } else if lang_items.discriminant_kind_trait() == Some(trait_def_id) { diff --git a/compiler/rustc_trait_selection/src/solve/eval_ctxt/canonical.rs b/compiler/rustc_trait_selection/src/solve/eval_ctxt/canonical.rs index da2f16e9760..7457ba837f5 100644 --- a/compiler/rustc_trait_selection/src/solve/eval_ctxt/canonical.rs +++ b/compiler/rustc_trait_selection/src/solve/eval_ctxt/canonical.rs @@ -9,7 +9,6 @@ //! //! [c]: https://rustc-dev-guide.rust-lang.org/solve/canonicalization.html use super::{CanonicalInput, Certainty, EvalCtxt, Goal}; -use crate::solve::canonicalize::{CanonicalizeMode, Canonicalizer}; use crate::solve::{ inspect, response_no_constraints_raw, CanonicalResponse, QueryResult, Response, }; @@ -27,6 +26,7 @@ use rustc_middle::traits::solve::{ }; use rustc_middle::traits::ObligationCause; use rustc_middle::ty::{self, BoundVar, GenericArgKind, Ty, TyCtxt, TypeFoldable}; +use rustc_next_trait_solver::canonicalizer::{CanonicalizeMode, Canonicalizer}; use rustc_span::DUMMY_SP; use std::iter; use std::ops::Deref; diff --git a/compiler/rustc_trait_selection/src/solve/eval_ctxt/mod.rs b/compiler/rustc_trait_selection/src/solve/eval_ctxt/mod.rs index 5d404d7e3e9..b3e7a63c972 100644 --- a/compiler/rustc_trait_selection/src/solve/eval_ctxt/mod.rs +++ b/compiler/rustc_trait_selection/src/solve/eval_ctxt/mod.rs @@ -103,7 +103,7 @@ pub(super) struct NestedGoals<'tcx> { /// with a fresh inference variable when we evaluate this goal. That can result /// in a trait solver cycle. This would currently result in overflow but can be /// can be unsound with more powerful coinduction in the future. - pub(super) normalizes_to_hack_goal: Option<Goal<'tcx, ty::ProjectionPredicate<'tcx>>>, + pub(super) normalizes_to_hack_goal: Option<Goal<'tcx, ty::NormalizesTo<'tcx>>>, /// The rest of the goals which have not yet processed or remain ambiguous. pub(super) goals: Vec<Goal<'tcx, ty::Predicate<'tcx>>>, } @@ -423,6 +423,9 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> { ty::PredicateKind::ConstEquate(_, _) => { bug!("ConstEquate should not be emitted when `-Ztrait-solver=next` is active") } + ty::PredicateKind::NormalizesTo(predicate) => { + self.compute_normalizes_to_goal(Goal { param_env, predicate }) + } ty::PredicateKind::AliasRelate(lhs, rhs, direction) => self .compute_alias_relate_goal(Goal { param_env, @@ -492,10 +495,7 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> { let unconstrained_rhs = self.next_term_infer_of_kind(goal.predicate.term); let unconstrained_goal = goal.with( tcx, - ty::ProjectionPredicate { - projection_ty: goal.predicate.projection_ty, - term: unconstrained_rhs, - }, + ty::NormalizesTo { alias: goal.predicate.alias, term: unconstrained_rhs }, ); let (_, certainty, instantiate_goals) = self.evaluate_goal( @@ -517,9 +517,7 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> { // looking at the "has changed" return from evaluate_goal, // because we expect the `unconstrained_rhs` part of the predicate // to have changed -- that means we actually normalized successfully! - if goal.predicate.projection_ty - != self.resolve_vars_if_possible(goal.predicate.projection_ty) - { + if goal.predicate.alias != self.resolve_vars_if_possible(goal.predicate.alias) { unchanged_certainty = None; } @@ -589,9 +587,10 @@ impl<'tcx> EvalCtxt<'_, 'tcx> { /// /// This is the case if the `term` is an inference variable in the innermost universe /// and does not occur in any other part of the predicate. + #[instrument(level = "debug", skip(self), ret)] pub(super) fn term_is_fully_unconstrained( &self, - goal: Goal<'tcx, ty::ProjectionPredicate<'tcx>>, + goal: Goal<'tcx, ty::NormalizesTo<'tcx>>, ) -> bool { let term_is_infer = match goal.predicate.term.unpack() { ty::TermKind::Ty(ty) => { @@ -655,7 +654,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> { let mut visitor = ContainsTerm { infcx: self.infcx, term: goal.predicate.term }; term_is_infer - && goal.predicate.projection_ty.visit_with(&mut visitor).is_continue() + && goal.predicate.alias.visit_with(&mut visitor).is_continue() && goal.param_env.visit_with(&mut visitor).is_continue() } diff --git a/compiler/rustc_trait_selection/src/solve/fulfill.rs b/compiler/rustc_trait_selection/src/solve/fulfill.rs index b73ec93b824..2139210b873 100644 --- a/compiler/rustc_trait_selection/src/solve/fulfill.rs +++ b/compiler/rustc_trait_selection/src/solve/fulfill.rs @@ -108,6 +108,11 @@ impl<'tcx> TraitEngine<'tcx> for FulfillmentCtxt<'tcx> { MismatchedProjectionTypes { err: TypeError::Mismatch }, ) } + ty::PredicateKind::NormalizesTo(..) => { + FulfillmentErrorCode::CodeProjectionError( + MismatchedProjectionTypes { err: TypeError::Mismatch }, + ) + } ty::PredicateKind::AliasRelate(_, _, _) => { FulfillmentErrorCode::CodeProjectionError( MismatchedProjectionTypes { err: TypeError::Mismatch }, diff --git a/compiler/rustc_trait_selection/src/solve/mod.rs b/compiler/rustc_trait_selection/src/solve/mod.rs index bf3b72caeb4..0ab099ef0c8 100644 --- a/compiler/rustc_trait_selection/src/solve/mod.rs +++ b/compiler/rustc_trait_selection/src/solve/mod.rs @@ -30,11 +30,11 @@ use rustc_middle::ty::{ mod alias_relate; mod assembly; -mod canonicalize; mod eval_ctxt; mod fulfill; pub mod inspect; mod normalize; +mod normalizes_to; mod project_goals; mod search_graph; mod trait_goals; @@ -216,7 +216,7 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> { impl<'tcx> EvalCtxt<'_, 'tcx> { #[instrument(level = "debug", skip(self))] - fn set_normalizes_to_hack_goal(&mut self, goal: Goal<'tcx, ty::ProjectionPredicate<'tcx>>) { + fn set_normalizes_to_hack_goal(&mut self, goal: Goal<'tcx, ty::NormalizesTo<'tcx>>) { assert!( self.nested_goals.normalizes_to_hack_goal.is_none(), "attempted to set the projection eq hack goal when one already exists" @@ -310,17 +310,17 @@ impl<'tcx> EvalCtxt<'_, 'tcx> { return None; } - let ty::Alias(kind, projection_ty) = *ty.kind() else { + let ty::Alias(kind, alias) = *ty.kind() else { return Some(ty); }; // We do no always define opaque types eagerly to allow non-defining uses in the defining scope. if let (DefineOpaqueTypes::No, ty::AliasKind::Opaque) = (define_opaque_types, kind) { - if let Some(def_id) = projection_ty.def_id.as_local() { + if let Some(def_id) = alias.def_id.as_local() { if self .unify_existing_opaque_tys( param_env, - OpaqueTypeKey { def_id, args: projection_ty.args }, + OpaqueTypeKey { def_id, args: alias.args }, self.next_ty_infer(), ) .is_empty() @@ -335,7 +335,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> { let normalizes_to_goal = Goal::new( this.tcx(), param_env, - ty::ProjectionPredicate { projection_ty, term: normalized_ty.into() }, + ty::NormalizesTo { alias, term: normalized_ty.into() }, ); this.add_goal(normalizes_to_goal); this.try_evaluate_added_goals()?; diff --git a/compiler/rustc_trait_selection/src/solve/normalize.rs b/compiler/rustc_trait_selection/src/solve/normalize.rs index ea512ba5fa7..1e495b4d979 100644 --- a/compiler/rustc_trait_selection/src/solve/normalize.rs +++ b/compiler/rustc_trait_selection/src/solve/normalize.rs @@ -76,7 +76,7 @@ impl<'tcx> NormalizationFolder<'_, 'tcx> { tcx, self.at.cause.clone(), self.at.param_env, - ty::ProjectionPredicate { projection_ty: alias, term: new_infer_ty.into() }, + ty::NormalizesTo { alias, term: new_infer_ty.into() }, ); // Do not emit an error if normalization is known to fail but instead @@ -129,8 +129,8 @@ impl<'tcx> NormalizationFolder<'_, 'tcx> { tcx, self.at.cause.clone(), self.at.param_env, - ty::ProjectionPredicate { - projection_ty: AliasTy::new(tcx, uv.def, uv.args), + ty::NormalizesTo { + alias: AliasTy::new(tcx, uv.def, uv.args), term: new_infer_ct.into(), }, ); diff --git a/compiler/rustc_trait_selection/src/solve/project_goals/inherent_projection.rs b/compiler/rustc_trait_selection/src/solve/normalizes_to/inherent.rs index 28fe59b7f6a..c3b8ae9a943 100644 --- a/compiler/rustc_trait_selection/src/solve/project_goals/inherent_projection.rs +++ b/compiler/rustc_trait_selection/src/solve/normalizes_to/inherent.rs @@ -12,10 +12,10 @@ use super::EvalCtxt; impl<'tcx> EvalCtxt<'_, 'tcx> { pub(super) fn normalize_inherent_associated_type( &mut self, - goal: Goal<'tcx, ty::ProjectionPredicate<'tcx>>, + goal: Goal<'tcx, ty::NormalizesTo<'tcx>>, ) -> QueryResult<'tcx> { let tcx = self.tcx(); - let inherent = goal.predicate.projection_ty; + let inherent = goal.predicate.alias; let expected = goal.predicate.term.ty().expect("inherent consts are treated separately"); let impl_def_id = tcx.parent(inherent.def_id); diff --git a/compiler/rustc_trait_selection/src/solve/project_goals/mod.rs b/compiler/rustc_trait_selection/src/solve/normalizes_to/mod.rs index 03823569669..2fe51b400ec 100644 --- a/compiler/rustc_trait_selection/src/solve/project_goals/mod.rs +++ b/compiler/rustc_trait_selection/src/solve/normalizes_to/mod.rs @@ -13,20 +13,20 @@ use rustc_middle::traits::solve::{ }; use rustc_middle::traits::BuiltinImplSource; use rustc_middle::ty::fast_reject::{DeepRejectCtxt, TreatParams}; -use rustc_middle::ty::ProjectionPredicate; +use rustc_middle::ty::NormalizesTo; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_middle::ty::{ToPredicate, TypeVisitableExt}; use rustc_span::{sym, ErrorGuaranteed, DUMMY_SP}; -mod inherent_projection; +mod inherent; mod opaques; mod weak_types; impl<'tcx> EvalCtxt<'_, 'tcx> { #[instrument(level = "debug", skip(self), ret)] - pub(super) fn compute_projection_goal( + pub(super) fn compute_normalizes_to_goal( &mut self, - goal: Goal<'tcx, ProjectionPredicate<'tcx>>, + goal: Goal<'tcx, NormalizesTo<'tcx>>, ) -> QueryResult<'tcx> { let def_id = goal.predicate.def_id(); match self.tcx().def_kind(def_id) { @@ -71,16 +71,13 @@ impl<'tcx> EvalCtxt<'_, 'tcx> { #[instrument(level = "debug", skip(self), ret)] fn normalize_anon_const( &mut self, - goal: Goal<'tcx, ty::ProjectionPredicate<'tcx>>, + goal: Goal<'tcx, ty::NormalizesTo<'tcx>>, ) -> QueryResult<'tcx> { if let Some(normalized_const) = self.try_const_eval_resolve( goal.param_env, - ty::UnevaluatedConst::new( - goal.predicate.projection_ty.def_id, - goal.predicate.projection_ty.args, - ), + ty::UnevaluatedConst::new(goal.predicate.alias.def_id, goal.predicate.alias.args), self.tcx() - .type_of(goal.predicate.projection_ty.def_id) + .type_of(goal.predicate.alias.def_id) .no_bound_vars() .expect("const ty should not rely on other generics"), ) { @@ -92,13 +89,13 @@ impl<'tcx> EvalCtxt<'_, 'tcx> { } } -impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> { +impl<'tcx> assembly::GoalKind<'tcx> for NormalizesTo<'tcx> { fn self_ty(self) -> Ty<'tcx> { self.self_ty() } fn trait_ref(self, tcx: TyCtxt<'tcx>) -> ty::TraitRef<'tcx> { - self.projection_ty.trait_ref(tcx) + self.alias.trait_ref(tcx) } fn with_self_ty(self, tcx: TyCtxt<'tcx>, self_ty: Ty<'tcx>) -> Self { @@ -123,7 +120,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> { ecx.instantiate_binder_with_infer(projection_pred); ecx.eq( goal.param_env, - goal.predicate.projection_ty, + goal.predicate.alias, assumption_projection_pred.projection_ty, )?; ecx.eq(goal.param_env, goal.predicate.term, assumption_projection_pred.term) @@ -132,7 +129,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> { // Add GAT where clauses from the trait's definition ecx.add_goals( tcx.predicates_of(goal.predicate.def_id()) - .instantiate_own(tcx, goal.predicate.projection_ty.args) + .instantiate_own(tcx, goal.predicate.alias.args) .map(|(pred, _)| goal.with(tcx, pred)), ); @@ -148,12 +145,12 @@ impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> { fn consider_impl_candidate( ecx: &mut EvalCtxt<'_, 'tcx>, - goal: Goal<'tcx, ProjectionPredicate<'tcx>>, + goal: Goal<'tcx, NormalizesTo<'tcx>>, impl_def_id: DefId, ) -> Result<Candidate<'tcx>, NoSolution> { let tcx = ecx.tcx(); - let goal_trait_ref = goal.predicate.projection_ty.trait_ref(tcx); + let goal_trait_ref = goal.predicate.alias.trait_ref(tcx); let impl_trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap(); let drcx = DeepRejectCtxt { treat_obligation_params: TreatParams::ForLookup }; if !drcx.args_may_unify(goal_trait_ref.args, impl_trait_ref.skip_binder().args) { @@ -177,7 +174,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> { // Add GAT where clauses from the trait's definition ecx.add_goals( tcx.predicates_of(goal.predicate.def_id()) - .instantiate_own(tcx, goal.predicate.projection_ty.args) + .instantiate_own(tcx, goal.predicate.alias.args) .map(|(pred, _)| goal.with(tcx, pred)), ); @@ -202,7 +199,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> { tcx, guar, tcx.type_of(goal.predicate.def_id()) - .instantiate(tcx, goal.predicate.projection_ty.args), + .instantiate(tcx, goal.predicate.alias.args), ) .into(), ty::AssocKind::Type => Ty::new_error(tcx, guar).into(), @@ -227,11 +224,8 @@ impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> { // // And then map these args to the args of the defining impl of `Assoc`, going // from `[u32, u64]` to `[u32, i32, u64]`. - let impl_args_with_gat = goal.predicate.projection_ty.args.rebase_onto( - tcx, - goal_trait_ref.def_id, - impl_args, - ); + let impl_args_with_gat = + goal.predicate.alias.args.rebase_onto(tcx, goal_trait_ref.def_id, impl_args); let args = ecx.translate_args( goal.param_env, impl_def_id, @@ -516,6 +510,40 @@ impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> { ) } + fn consider_builtin_async_iterator_candidate( + ecx: &mut EvalCtxt<'_, 'tcx>, + goal: Goal<'tcx, Self>, + ) -> QueryResult<'tcx> { + let self_ty = goal.predicate.self_ty(); + let ty::Coroutine(def_id, args, _) = *self_ty.kind() else { + return Err(NoSolution); + }; + + // Coroutines are not AsyncIterators unless they come from `gen` desugaring + let tcx = ecx.tcx(); + if !tcx.coroutine_is_async_gen(def_id) { + return Err(NoSolution); + } + + ecx.probe_misc_candidate("builtin AsyncIterator kind").enter(|ecx| { + // Take `AsyncIterator<Item = I>` and turn it into the corresponding + // coroutine yield ty `Poll<Option<I>>`. + let expected_ty = Ty::new_adt( + tcx, + tcx.adt_def(tcx.require_lang_item(LangItem::Poll, None)), + tcx.mk_args(&[Ty::new_adt( + tcx, + tcx.adt_def(tcx.require_lang_item(LangItem::Option, None)), + tcx.mk_args(&[goal.predicate.term.into()]), + ) + .into()]), + ); + let yield_ty = args.as_coroutine().yield_ty(); + ecx.eq(goal.param_env, expected_ty, yield_ty)?; + ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes) + }) + } + fn consider_builtin_coroutine_candidate( ecx: &mut EvalCtxt<'_, 'tcx>, goal: Goal<'tcx, Self>, diff --git a/compiler/rustc_trait_selection/src/solve/project_goals/opaques.rs b/compiler/rustc_trait_selection/src/solve/normalizes_to/opaques.rs index 1fde129c3a0..b5d1aa06e4e 100644 --- a/compiler/rustc_trait_selection/src/solve/project_goals/opaques.rs +++ b/compiler/rustc_trait_selection/src/solve/normalizes_to/opaques.rs @@ -12,10 +12,10 @@ use crate::solve::{EvalCtxt, SolverMode}; impl<'tcx> EvalCtxt<'_, 'tcx> { pub(super) fn normalize_opaque_type( &mut self, - goal: Goal<'tcx, ty::ProjectionPredicate<'tcx>>, + goal: Goal<'tcx, ty::NormalizesTo<'tcx>>, ) -> QueryResult<'tcx> { let tcx = self.tcx(); - let opaque_ty = goal.predicate.projection_ty; + let opaque_ty = goal.predicate.alias; let expected = goal.predicate.term.ty().expect("no such thing as an opaque const"); match (goal.param_env.reveal(), self.solver_mode()) { diff --git a/compiler/rustc_trait_selection/src/solve/project_goals/weak_types.rs b/compiler/rustc_trait_selection/src/solve/normalizes_to/weak_types.rs index 54de32cf618..8d2bbec6d8b 100644 --- a/compiler/rustc_trait_selection/src/solve/project_goals/weak_types.rs +++ b/compiler/rustc_trait_selection/src/solve/normalizes_to/weak_types.rs @@ -11,10 +11,10 @@ use super::EvalCtxt; impl<'tcx> EvalCtxt<'_, 'tcx> { pub(super) fn normalize_weak_type( &mut self, - goal: Goal<'tcx, ty::ProjectionPredicate<'tcx>>, + goal: Goal<'tcx, ty::NormalizesTo<'tcx>>, ) -> QueryResult<'tcx> { let tcx = self.tcx(); - let weak_ty = goal.predicate.projection_ty; + let weak_ty = goal.predicate.alias; let expected = goal.predicate.term.ty().expect("no such thing as a const alias"); let actual = tcx.type_of(weak_ty.def_id).instantiate(tcx, weak_ty.args); diff --git a/compiler/rustc_trait_selection/src/solve/project_goals.rs b/compiler/rustc_trait_selection/src/solve/project_goals.rs new file mode 100644 index 00000000000..0b80969c307 --- /dev/null +++ b/compiler/rustc_trait_selection/src/solve/project_goals.rs @@ -0,0 +1,23 @@ +use super::EvalCtxt; +use rustc_middle::traits::solve::{Certainty, Goal, QueryResult}; +use rustc_middle::ty::{self, ProjectionPredicate}; + +impl<'tcx> EvalCtxt<'_, 'tcx> { + #[instrument(level = "debug", skip(self), ret)] + pub(super) fn compute_projection_goal( + &mut self, + goal: Goal<'tcx, ProjectionPredicate<'tcx>>, + ) -> QueryResult<'tcx> { + match goal.predicate.term.unpack() { + ty::TermKind::Ty(term) => { + let alias = goal.predicate.projection_ty.to_ty(self.tcx()); + self.eq(goal.param_env, alias, term)?; + self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes) + } + // FIXME(associated_const_equality): actually do something here. + ty::TermKind::Const(_) => { + self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes) + } + } + } +} diff --git a/compiler/rustc_trait_selection/src/solve/trait_goals.rs b/compiler/rustc_trait_selection/src/solve/trait_goals.rs index 95712da3c5e..5807f7c6153 100644 --- a/compiler/rustc_trait_selection/src/solve/trait_goals.rs +++ b/compiler/rustc_trait_selection/src/solve/trait_goals.rs @@ -370,6 +370,30 @@ impl<'tcx> assembly::GoalKind<'tcx> for TraitPredicate<'tcx> { ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes) } + fn consider_builtin_async_iterator_candidate( + ecx: &mut EvalCtxt<'_, 'tcx>, + goal: Goal<'tcx, Self>, + ) -> QueryResult<'tcx> { + if goal.predicate.polarity != ty::ImplPolarity::Positive { + return Err(NoSolution); + } + + let ty::Coroutine(def_id, _, _) = *goal.predicate.self_ty().kind() else { + return Err(NoSolution); + }; + + // Coroutines are not iterators unless they come from `gen` desugaring + let tcx = ecx.tcx(); + if !tcx.coroutine_is_async_gen(def_id) { + return Err(NoSolution); + } + + // Gen coroutines unconditionally implement `Iterator` + // Technically, we need to check that the iterator output type is Sized, + // but that's already proven by the coroutines being WF. + ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes) + } + fn consider_builtin_coroutine_candidate( ecx: &mut EvalCtxt<'_, 'tcx>, goal: Goal<'tcx, Self>, diff --git a/compiler/rustc_trait_selection/src/traits/auto_trait.rs b/compiler/rustc_trait_selection/src/traits/auto_trait.rs index d8bf97138cd..0b8e6e2ef8b 100644 --- a/compiler/rustc_trait_selection/src/traits/auto_trait.rs +++ b/compiler/rustc_trait_selection/src/traits/auto_trait.rs @@ -820,6 +820,7 @@ impl<'tcx> AutoTraitFinder<'tcx> { // the `ParamEnv`. ty::PredicateKind::Clause(ty::ClauseKind::WellFormed(..)) | ty::PredicateKind::Clause(ty::ClauseKind::ConstArgHasType(..)) + | ty::PredicateKind::NormalizesTo(..) | ty::PredicateKind::AliasRelate(..) | ty::PredicateKind::ObjectSafe(..) | ty::PredicateKind::Subtype(..) diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs index 6b231a30ea7..95ffd07e397 100644 --- a/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs +++ b/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs @@ -2587,6 +2587,23 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> { CoroutineKind::Async(CoroutineSource::Closure) => { format!("future created by async closure is not {trait_name}") } + CoroutineKind::AsyncGen(CoroutineSource::Fn) => self + .tcx + .parent(coroutine_did) + .as_local() + .map(|parent_did| self.tcx.local_def_id_to_hir_id(parent_did)) + .and_then(|parent_hir_id| hir.opt_name(parent_hir_id)) + .map(|name| { + format!("async iterator returned by `{name}` is not {trait_name}") + })?, + CoroutineKind::AsyncGen(CoroutineSource::Block) => { + format!("async iterator created by async gen block is not {trait_name}") + } + CoroutineKind::AsyncGen(CoroutineSource::Closure) => { + format!( + "async iterator created by async gen closure is not {trait_name}" + ) + } CoroutineKind::Gen(CoroutineSource::Fn) => self .tcx .parent(coroutine_did) @@ -3129,6 +3146,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> { | Some(hir::CoroutineKind::Coroutine) | Some(hir::CoroutineKind::Gen(_)) => "yield", Some(hir::CoroutineKind::Async(..)) => "await", + Some(hir::CoroutineKind::AsyncGen(_)) => "yield`/`await", }; err.note(format!( "all values live across `{what}` must have a statically known size" diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/type_err_ctxt_ext.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/type_err_ctxt_ext.rs index 090706070eb..8fa0dceda87 100644 --- a/compiler/rustc_trait_selection/src/traits/error_reporting/type_err_ctxt_ext.rs +++ b/compiler/rustc_trait_selection/src/traits/error_reporting/type_err_ctxt_ext.rs @@ -854,6 +854,11 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> { ty::PredicateKind::Ambiguous => span_bug!(span, "ambiguous"), + ty::PredicateKind::NormalizesTo(..) => span_bug!( + span, + "NormalizesTo predicate should never be the predicate cause of a SelectionError" + ), + ty::PredicateKind::AliasRelate(..) => span_bug!( span, "AliasRelate predicate should never be the predicate cause of a SelectionError" @@ -1916,6 +1921,9 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> { hir::CoroutineKind::Async(hir::CoroutineSource::Block) => "an async block", hir::CoroutineKind::Async(hir::CoroutineSource::Fn) => "an async function", hir::CoroutineKind::Async(hir::CoroutineSource::Closure) => "an async closure", + hir::CoroutineKind::AsyncGen(hir::CoroutineSource::Block) => "an async gen block", + hir::CoroutineKind::AsyncGen(hir::CoroutineSource::Fn) => "an async gen function", + hir::CoroutineKind::AsyncGen(hir::CoroutineSource::Closure) => "an async gen closure", hir::CoroutineKind::Gen(hir::CoroutineSource::Block) => "a gen block", hir::CoroutineKind::Gen(hir::CoroutineSource::Fn) => "a gen function", hir::CoroutineKind::Gen(hir::CoroutineSource::Closure) => "a gen closure", diff --git a/compiler/rustc_trait_selection/src/traits/fulfill.rs b/compiler/rustc_trait_selection/src/traits/fulfill.rs index fd39fce9dd1..9cbddd2bb2b 100644 --- a/compiler/rustc_trait_selection/src/traits/fulfill.rs +++ b/compiler/rustc_trait_selection/src/traits/fulfill.rs @@ -360,8 +360,11 @@ impl<'a, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'tcx> { ProcessResult::Changed(mk_pending(vec![obligation.with(infcx.tcx, pred)])) } ty::PredicateKind::Ambiguous => ProcessResult::Unchanged, + ty::PredicateKind::NormalizesTo(..) => { + bug!("NormalizesTo is only used by the new solver") + } ty::PredicateKind::AliasRelate(..) => { - bug!("AliasRelate is only used for new solver") + bug!("AliasRelate is only used by the new solver") } }, Some(pred) => match pred { @@ -412,8 +415,11 @@ impl<'a, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'tcx> { } ty::PredicateKind::Ambiguous => ProcessResult::Unchanged, + ty::PredicateKind::NormalizesTo(..) => { + bug!("NormalizesTo is only used by the new solver") + } ty::PredicateKind::AliasRelate(..) => { - bug!("AliasRelate is only used for new solver") + bug!("AliasRelate is only used by the new solver") } // General case overflow check. Allow `process_trait_obligation` diff --git a/compiler/rustc_trait_selection/src/traits/project.rs b/compiler/rustc_trait_selection/src/traits/project.rs index 5b0829b5732..a08e35b566f 100644 --- a/compiler/rustc_trait_selection/src/traits/project.rs +++ b/compiler/rustc_trait_selection/src/traits/project.rs @@ -1823,11 +1823,18 @@ fn assemble_candidates_from_impls<'cx, 'tcx>( let self_ty = selcx.infcx.shallow_resolve(obligation.predicate.self_ty()); let lang_items = selcx.tcx().lang_items(); - if [lang_items.coroutine_trait(), lang_items.future_trait(), lang_items.iterator_trait()].contains(&Some(trait_ref.def_id)) - || selcx.tcx().fn_trait_kind_from_def_id(trait_ref.def_id).is_some() + if [ + lang_items.coroutine_trait(), + lang_items.future_trait(), + lang_items.iterator_trait(), + lang_items.async_iterator_trait(), + lang_items.fn_trait(), + lang_items.fn_mut_trait(), + lang_items.fn_once_trait(), + ].contains(&Some(trait_ref.def_id)) { true - } else if lang_items.discriminant_kind_trait() == Some(trait_ref.def_id) { + }else if lang_items.discriminant_kind_trait() == Some(trait_ref.def_id) { match self_ty.kind() { ty::Bool | ty::Char @@ -2042,6 +2049,8 @@ fn confirm_select_candidate<'cx, 'tcx>( confirm_future_candidate(selcx, obligation, data) } else if lang_items.iterator_trait() == Some(trait_def_id) { confirm_iterator_candidate(selcx, obligation, data) + } else if lang_items.async_iterator_trait() == Some(trait_def_id) { + confirm_async_iterator_candidate(selcx, obligation, data) } else if selcx.tcx().fn_trait_kind_from_def_id(trait_def_id).is_some() { if obligation.predicate.self_ty().is_closure() { confirm_closure_candidate(selcx, obligation, data) @@ -2203,6 +2212,57 @@ fn confirm_iterator_candidate<'cx, 'tcx>( .with_addl_obligations(obligations) } +fn confirm_async_iterator_candidate<'cx, 'tcx>( + selcx: &mut SelectionContext<'cx, 'tcx>, + obligation: &ProjectionTyObligation<'tcx>, + nested: Vec<PredicateObligation<'tcx>>, +) -> Progress<'tcx> { + let ty::Coroutine(_, args, _) = + selcx.infcx.shallow_resolve(obligation.predicate.self_ty()).kind() + else { + unreachable!() + }; + let gen_sig = args.as_coroutine().sig(); + let Normalized { value: gen_sig, obligations } = normalize_with_depth( + selcx, + obligation.param_env, + obligation.cause.clone(), + obligation.recursion_depth + 1, + gen_sig, + ); + + debug!(?obligation, ?gen_sig, ?obligations, "confirm_async_iterator_candidate"); + + let tcx = selcx.tcx(); + let iter_def_id = tcx.require_lang_item(LangItem::AsyncIterator, None); + + let (trait_ref, yield_ty) = super::util::async_iterator_trait_ref_and_outputs( + tcx, + iter_def_id, + obligation.predicate.self_ty(), + gen_sig, + ); + + debug_assert_eq!(tcx.associated_item(obligation.predicate.def_id).name, sym::Item); + + let ty::Adt(_poll_adt, args) = *yield_ty.kind() else { + bug!(); + }; + let ty::Adt(_option_adt, args) = *args.type_at(0).kind() else { + bug!(); + }; + let item_ty = args.type_at(0); + + let predicate = ty::ProjectionPredicate { + projection_ty: ty::AliasTy::new(tcx, obligation.predicate.def_id, trait_ref.args), + term: item_ty.into(), + }; + + confirm_param_env_candidate(selcx, obligation, ty::Binder::dummy(predicate), false) + .with_addl_obligations(nested) + .with_addl_obligations(obligations) +} + fn confirm_builtin_candidate<'cx, 'tcx>( selcx: &mut SelectionContext<'cx, 'tcx>, obligation: &ProjectionTyObligation<'tcx>, diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/implied_outlives_bounds.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/implied_outlives_bounds.rs index 56844f39478..f9b8ea32d89 100644 --- a/compiler/rustc_trait_selection/src/traits/query/type_op/implied_outlives_bounds.rs +++ b/compiler/rustc_trait_selection/src/traits/query/type_op/implied_outlives_bounds.rs @@ -123,9 +123,9 @@ pub fn compute_implied_outlives_bounds_inner<'tcx>( Some(pred) => pred, }; match pred { - ty::PredicateKind::Clause(ty::ClauseKind::Trait(..)) // FIXME(const_generics): Make sure that `<'a, 'b, const N: &'a &'b u32>` is sound // if we ever support that + ty::PredicateKind::Clause(ty::ClauseKind::Trait(..)) | ty::PredicateKind::Clause(ty::ClauseKind::ConstArgHasType(..)) | ty::PredicateKind::Subtype(..) | ty::PredicateKind::Coerce(..) @@ -134,8 +134,8 @@ pub fn compute_implied_outlives_bounds_inner<'tcx>( | ty::PredicateKind::Clause(ty::ClauseKind::ConstEvaluatable(..)) | ty::PredicateKind::ConstEquate(..) | ty::PredicateKind::Ambiguous - | ty::PredicateKind::AliasRelate(..) - => {} + | ty::PredicateKind::NormalizesTo(..) + | ty::PredicateKind::AliasRelate(..) => {} // We need to search through *all* WellFormed predicates ty::PredicateKind::Clause(ty::ClauseKind::WellFormed(arg)) => { @@ -143,10 +143,9 @@ pub fn compute_implied_outlives_bounds_inner<'tcx>( } // We need to register region relationships - ty::PredicateKind::Clause(ty::ClauseKind::RegionOutlives(ty::OutlivesPredicate( - r_a, - r_b, - ))) => outlives_bounds.push(ty::OutlivesPredicate(r_a.into(), r_b)), + ty::PredicateKind::Clause(ty::ClauseKind::RegionOutlives( + ty::OutlivesPredicate(r_a, r_b), + )) => outlives_bounds.push(ty::OutlivesPredicate(r_a.into(), r_b)), ty::PredicateKind::Clause(ty::ClauseKind::TypeOutlives(ty::OutlivesPredicate( ty_a, diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/mod.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/mod.rs index e87e585ef0b..cd09aaff2d9 100644 --- a/compiler/rustc_trait_selection/src/traits/query/type_op/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/query/type_op/mod.rs @@ -157,18 +157,10 @@ where } let mut region_constraints = QueryRegionConstraints::default(); - let (output, error_info, mut obligations) = - Q::fully_perform_into(self, infcx, &mut region_constraints) - .map_err(|_| { - infcx.tcx.sess.span_delayed_bug(span, format!("error performing {self:?}")) - }) - .and_then(|(output, error_info, obligations, certainty)| match certainty { - Certainty::Proven => Ok((output, error_info, obligations)), - Certainty::Ambiguous => Err(infcx - .tcx - .sess - .span_delayed_bug(span, format!("ambiguity performing {self:?}"))), - })?; + let (output, error_info, mut obligations, _) = + Q::fully_perform_into(self, infcx, &mut region_constraints).map_err(|_| { + infcx.tcx.sess.span_delayed_bug(span, format!("error performing {self:?}")) + })?; // Typically, instantiating NLL query results does not // create obligations. However, in some cases there diff --git a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs index 5a559bb5fa0..c7d0ab71644 100644 --- a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs +++ b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs @@ -112,6 +112,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { self.assemble_future_candidates(obligation, &mut candidates); } else if lang_items.iterator_trait() == Some(def_id) { self.assemble_iterator_candidates(obligation, &mut candidates); + } else if lang_items.async_iterator_trait() == Some(def_id) { + self.assemble_async_iterator_candidates(obligation, &mut candidates); } self.assemble_closure_candidates(obligation, &mut candidates); @@ -258,6 +260,34 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { } } + fn assemble_async_iterator_candidates( + &mut self, + obligation: &PolyTraitObligation<'tcx>, + candidates: &mut SelectionCandidateSet<'tcx>, + ) { + let self_ty = obligation.self_ty().skip_binder(); + if let ty::Coroutine(did, args, _) = *self_ty.kind() { + // gen constructs get lowered to a special kind of coroutine that + // should directly `impl AsyncIterator`. + if self.tcx().coroutine_is_async_gen(did) { + debug!(?self_ty, ?obligation, "assemble_iterator_candidates",); + + // Can only confirm this candidate if we have constrained + // the `Yield` type to at least `Poll<Option<?0>>`.. + let ty::Adt(_poll_def, args) = *args.as_coroutine().yield_ty().kind() else { + candidates.ambiguous = true; + return; + }; + let ty::Adt(_option_def, _) = *args.type_at(0).kind() else { + candidates.ambiguous = true; + return; + }; + + candidates.vec.push(AsyncIteratorCandidate); + } + } + } + /// Checks for the artificial impl that the compiler will create for an obligation like `X : /// FnMut<..>` where `X` is a closure type. /// @@ -872,9 +902,18 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { ) { // If the predicate is `~const Destruct` in a non-const environment, we don't actually need // to check anything. We'll short-circuit checking any obligations in confirmation, too. - // FIXME(effects) - if true { - candidates.vec.push(ConstDestructCandidate(None)); + let Some(host_effect_index) = + self.tcx().generics_of(obligation.predicate.def_id()).host_effect_index + else { + candidates.vec.push(BuiltinCandidate { has_nested: false }); + return; + }; + // If the obligation has `host = true`, then the obligation is non-const and it's always + // trivially implemented. + if obligation.predicate.skip_binder().trait_ref.args.const_at(host_effect_index) + == self.tcx().consts.true_ + { + candidates.vec.push(BuiltinCandidate { has_nested: false }); return; } diff --git a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs index fc4f6f37621..4a342a7f6b1 100644 --- a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs +++ b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs @@ -98,6 +98,11 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { ImplSource::Builtin(BuiltinImplSource::Misc, vtable_iterator) } + AsyncIteratorCandidate => { + let vtable_iterator = self.confirm_async_iterator_candidate(obligation)?; + ImplSource::Builtin(BuiltinImplSource::Misc, vtable_iterator) + } + FnPointerCandidate { is_const } => { let data = self.confirm_fn_pointer_candidate(obligation, is_const)?; ImplSource::Builtin(BuiltinImplSource::Misc, data) @@ -813,6 +818,35 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { Ok(nested) } + fn confirm_async_iterator_candidate( + &mut self, + obligation: &PolyTraitObligation<'tcx>, + ) -> Result<Vec<PredicateObligation<'tcx>>, SelectionError<'tcx>> { + // Okay to skip binder because the args on coroutine types never + // touch bound regions, they just capture the in-scope + // type/region parameters. + let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder()); + let ty::Coroutine(coroutine_def_id, args, _) = *self_ty.kind() else { + bug!("closure candidate for non-closure {:?}", obligation); + }; + + debug!(?obligation, ?coroutine_def_id, ?args, "confirm_async_iterator_candidate"); + + let gen_sig = args.as_coroutine().sig(); + + let (trait_ref, _) = super::util::async_iterator_trait_ref_and_outputs( + self.tcx(), + obligation.predicate.def_id(), + obligation.predicate.no_bound_vars().expect("iterator has no bound vars").self_ty(), + gen_sig, + ); + + let nested = self.confirm_poly_trait_refs(obligation, ty::Binder::dummy(trait_ref))?; + debug!(?trait_ref, ?nested, "iterator candidate obligations"); + + Ok(nested) + } + #[instrument(skip(self), level = "debug")] fn confirm_closure_candidate( &mut self, @@ -1172,11 +1206,13 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { obligation: &PolyTraitObligation<'tcx>, impl_def_id: Option<DefId>, ) -> Result<Vec<PredicateObligation<'tcx>>, SelectionError<'tcx>> { - // `~const Destruct` in a non-const environment is always trivially true, since our type is `Drop` - // FIXME(effects) - if true { - return Ok(vec![]); - } + let Some(host_effect_index) = + self.tcx().generics_of(obligation.predicate.def_id()).host_effect_index + else { + bug!() + }; + let host_effect_param: ty::GenericArg<'tcx> = + obligation.predicate.skip_binder().trait_ref.args.const_at(host_effect_index).into(); let drop_trait = self.tcx().require_lang_item(LangItem::Drop, None); @@ -1284,7 +1320,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { self.tcx(), LangItem::Destruct, cause.span, - [nested_ty], + [nested_ty.into(), host_effect_param], ), polarity: ty::ImplPolarity::Positive, }), @@ -1310,7 +1346,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { self.tcx(), LangItem::Destruct, cause.span, - [nested_ty], + [nested_ty.into(), host_effect_param], ), polarity: ty::ImplPolarity::Positive, }); diff --git a/compiler/rustc_trait_selection/src/traits/select/mod.rs b/compiler/rustc_trait_selection/src/traits/select/mod.rs index 0d3ec41f511..7f31a2529f5 100644 --- a/compiler/rustc_trait_selection/src/traits/select/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/select/mod.rs @@ -990,8 +990,11 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { } } } + ty::PredicateKind::NormalizesTo(..) => { + bug!("NormalizesTo is only used by the new solver") + } ty::PredicateKind::AliasRelate(..) => { - bug!("AliasRelate is only used for new solver") + bug!("AliasRelate is only used by the new solver") } ty::PredicateKind::Ambiguous => Ok(EvaluatedToAmbig), ty::PredicateKind::Clause(ty::ClauseKind::ConstArgHasType(ct, ty)) => { @@ -1872,6 +1875,7 @@ impl<'tcx> SelectionContext<'_, 'tcx> { | CoroutineCandidate | FutureCandidate | IteratorCandidate + | AsyncIteratorCandidate | FnPointerCandidate { .. } | BuiltinObjectCandidate | BuiltinUnsizeCandidate @@ -1901,6 +1905,7 @@ impl<'tcx> SelectionContext<'_, 'tcx> { | CoroutineCandidate | FutureCandidate | IteratorCandidate + | AsyncIteratorCandidate | FnPointerCandidate { .. } | BuiltinObjectCandidate | BuiltinUnsizeCandidate @@ -1936,6 +1941,7 @@ impl<'tcx> SelectionContext<'_, 'tcx> { | CoroutineCandidate | FutureCandidate | IteratorCandidate + | AsyncIteratorCandidate | FnPointerCandidate { .. } | BuiltinObjectCandidate | BuiltinUnsizeCandidate @@ -1951,6 +1957,7 @@ impl<'tcx> SelectionContext<'_, 'tcx> { | CoroutineCandidate | FutureCandidate | IteratorCandidate + | AsyncIteratorCandidate | FnPointerCandidate { .. } | BuiltinObjectCandidate | BuiltinUnsizeCandidate @@ -2058,6 +2065,7 @@ impl<'tcx> SelectionContext<'_, 'tcx> { | CoroutineCandidate | FutureCandidate | IteratorCandidate + | AsyncIteratorCandidate | FnPointerCandidate { .. } | BuiltinObjectCandidate | BuiltinUnsizeCandidate @@ -2069,6 +2077,7 @@ impl<'tcx> SelectionContext<'_, 'tcx> { | CoroutineCandidate | FutureCandidate | IteratorCandidate + | AsyncIteratorCandidate | FnPointerCandidate { .. } | BuiltinObjectCandidate | BuiltinUnsizeCandidate diff --git a/compiler/rustc_trait_selection/src/traits/structural_normalize.rs b/compiler/rustc_trait_selection/src/traits/structural_normalize.rs index 9d6be768901..32de8feda81 100644 --- a/compiler/rustc_trait_selection/src/traits/structural_normalize.rs +++ b/compiler/rustc_trait_selection/src/traits/structural_normalize.rs @@ -25,8 +25,7 @@ impl<'tcx> StructurallyNormalizeExt<'tcx> for At<'_, 'tcx> { // FIXME(-Ztrait-solver=next): correctly handle // overflow here. for _ in 0..256 { - let ty::Alias(ty::Projection | ty::Inherent | ty::Weak, projection_ty) = *ty.kind() - else { + let ty::Alias(ty::Projection | ty::Inherent | ty::Weak, alias) = *ty.kind() else { break; }; @@ -38,10 +37,7 @@ impl<'tcx> StructurallyNormalizeExt<'tcx> for At<'_, 'tcx> { self.infcx.tcx, self.cause.clone(), self.param_env, - ty::Binder::dummy(ty::ProjectionPredicate { - projection_ty, - term: new_infer_ty.into(), - }), + ty::NormalizesTo { alias, term: new_infer_ty.into() }, ); if self.infcx.predicate_may_hold(&obligation) { fulfill_cx.register_predicate_obligation(self.infcx, obligation); diff --git a/compiler/rustc_trait_selection/src/traits/util.rs b/compiler/rustc_trait_selection/src/traits/util.rs index 5574badf238..98da3bc2fe9 100644 --- a/compiler/rustc_trait_selection/src/traits/util.rs +++ b/compiler/rustc_trait_selection/src/traits/util.rs @@ -308,6 +308,17 @@ pub fn iterator_trait_ref_and_outputs<'tcx>( (trait_ref, sig.yield_ty) } +pub fn async_iterator_trait_ref_and_outputs<'tcx>( + tcx: TyCtxt<'tcx>, + async_iterator_def_id: DefId, + self_ty: Ty<'tcx>, + sig: ty::GenSig<'tcx>, +) -> (ty::TraitRef<'tcx>, Ty<'tcx>) { + assert!(!self_ty.has_escaping_bound_vars()); + let trait_ref = ty::TraitRef::new(tcx, async_iterator_def_id, [self_ty]); + (trait_ref, sig.yield_ty) +} + pub fn impl_item_is_final(tcx: TyCtxt<'_>, assoc_item: &ty::AssocItem) -> bool { assoc_item.defaultness(tcx).is_final() && tcx.defaultness(assoc_item.container_id(tcx)).is_final() diff --git a/compiler/rustc_traits/src/normalize_erasing_regions.rs b/compiler/rustc_traits/src/normalize_erasing_regions.rs index 06486a100a9..0576fe01027 100644 --- a/compiler/rustc_traits/src/normalize_erasing_regions.rs +++ b/compiler/rustc_traits/src/normalize_erasing_regions.rs @@ -55,6 +55,7 @@ fn not_outlives_predicate(p: ty::Predicate<'_>) -> bool { ty::PredicateKind::Clause(ty::ClauseKind::Trait(..)) | ty::PredicateKind::Clause(ty::ClauseKind::Projection(..)) | ty::PredicateKind::Clause(ty::ClauseKind::ConstArgHasType(..)) + | ty::PredicateKind::NormalizesTo(..) | ty::PredicateKind::AliasRelate(..) | ty::PredicateKind::Clause(ty::ClauseKind::WellFormed(..)) | ty::PredicateKind::ObjectSafe(..) diff --git a/compiler/rustc_ty_utils/src/abi.rs b/compiler/rustc_ty_utils/src/abi.rs index a58e98ce99c..a5f11ca23e1 100644 --- a/compiler/rustc_ty_utils/src/abi.rs +++ b/compiler/rustc_ty_utils/src/abi.rs @@ -119,9 +119,9 @@ fn fn_sig_for_fn_abi<'tcx>( // unlike for all other coroutine kinds. env_ty } - hir::CoroutineKind::Async(_) | hir::CoroutineKind::Coroutine => { - Ty::new_adt(tcx, pin_adt_ref, pin_args) - } + hir::CoroutineKind::Async(_) + | hir::CoroutineKind::AsyncGen(_) + | hir::CoroutineKind::Coroutine => Ty::new_adt(tcx, pin_adt_ref, pin_args), }; // The `FnSig` and the `ret_ty` here is for a coroutines main @@ -168,6 +168,30 @@ fn fn_sig_for_fn_abi<'tcx>( (None, ret_ty) } + hir::CoroutineKind::AsyncGen(_) => { + // The signature should be + // `AsyncIterator::poll_next(_, &mut Context<'_>) -> Poll<Option<Output>>` + assert_eq!(sig.return_ty, tcx.types.unit); + + // Yield type is already `Poll<Option<yield_ty>>` + let ret_ty = sig.yield_ty; + + // We have to replace the `ResumeTy` that is used for type and borrow checking + // with `&mut Context<'_>` which is used in codegen. + #[cfg(debug_assertions)] + { + if let ty::Adt(resume_ty_adt, _) = sig.resume_ty.kind() { + let expected_adt = + tcx.adt_def(tcx.require_lang_item(LangItem::ResumeTy, None)); + assert_eq!(*resume_ty_adt, expected_adt); + } else { + panic!("expected `ResumeTy`, found `{:?}`", sig.resume_ty); + }; + } + let context_mut_ref = Ty::new_task_context(tcx); + + (Some(context_mut_ref), ret_ty) + } hir::CoroutineKind::Coroutine => { // The signature should be `Coroutine::resume(_, Resume) -> CoroutineState<Yield, Return>` let state_did = tcx.require_lang_item(LangItem::CoroutineState, None); diff --git a/compiler/rustc_ty_utils/src/instance.rs b/compiler/rustc_ty_utils/src/instance.rs index a0f01d9eca9..f1c9bb23e5d 100644 --- a/compiler/rustc_ty_utils/src/instance.rs +++ b/compiler/rustc_ty_utils/src/instance.rs @@ -271,6 +271,21 @@ fn resolve_associated_item<'tcx>( debug_assert!(tcx.defaultness(trait_item_id).has_value()); Some(Instance::new(trait_item_id, rcvr_args)) } + } else if Some(trait_ref.def_id) == lang_items.async_iterator_trait() { + let ty::Coroutine(coroutine_def_id, args, _) = *rcvr_args.type_at(0).kind() else { + bug!() + }; + + if cfg!(debug_assertions) && tcx.item_name(trait_item_id) != sym::poll_next { + span_bug!( + tcx.def_span(coroutine_def_id), + "no definition for `{trait_ref}::{}` for built-in coroutine type", + tcx.item_name(trait_item_id) + ) + } + + // `AsyncIterator::poll_next` is generated by the compiler. + Some(Instance { def: ty::InstanceDef::Item(coroutine_def_id), args }) } else if Some(trait_ref.def_id) == lang_items.coroutine_trait() { let ty::Coroutine(coroutine_def_id, args, _) = *rcvr_args.type_at(0).kind() else { bug!() diff --git a/compiler/rustc_type_ir/src/canonical.rs b/compiler/rustc_type_ir/src/canonical.rs index d2768703297..572c6f201d3 100644 --- a/compiler/rustc_type_ir/src/canonical.rs +++ b/compiler/rustc_type_ir/src/canonical.rs @@ -4,7 +4,7 @@ use std::ops::ControlFlow; use crate::fold::{FallibleTypeFolder, TypeFoldable}; use crate::visit::{TypeVisitable, TypeVisitor}; -use crate::{Interner, Placeholder, UniverseIndex}; +use crate::{Interner, PlaceholderLike, UniverseIndex}; /// A "canonicalized" type `V` is one where all free inference /// variables have been rewritten to "canonical vars". These are @@ -157,7 +157,7 @@ where } impl<I: Interner> CanonicalVarInfo<I> { - pub fn universe(&self) -> UniverseIndex { + pub fn universe(self) -> UniverseIndex { self.kind.universe() } @@ -305,11 +305,11 @@ where } impl<I: Interner> CanonicalVarKind<I> { - pub fn universe(&self) -> UniverseIndex { + pub fn universe(self) -> UniverseIndex { match self { - CanonicalVarKind::Ty(CanonicalTyVarKind::General(ui)) => *ui, - CanonicalVarKind::Region(ui) => *ui, - CanonicalVarKind::Const(ui, _) => *ui, + CanonicalVarKind::Ty(CanonicalTyVarKind::General(ui)) => ui, + CanonicalVarKind::Region(ui) => ui, + CanonicalVarKind::Const(ui, _) => ui, CanonicalVarKind::PlaceholderTy(placeholder) => placeholder.universe(), CanonicalVarKind::PlaceholderRegion(placeholder) => placeholder.universe(), CanonicalVarKind::PlaceholderConst(placeholder, _) => placeholder.universe(), diff --git a/compiler/rustc_type_ir/src/const_kind.rs b/compiler/rustc_type_ir/src/const_kind.rs index 409033a2d8d..879de58f100 100644 --- a/compiler/rustc_type_ir/src/const_kind.rs +++ b/compiler/rustc_type_ir/src/const_kind.rs @@ -81,7 +81,7 @@ impl<I: Interner> DebugWithInfcx<I> for ConstKind<I> { match this.data { Param(param) => write!(f, "{param:?}"), Infer(var) => write!(f, "{:?}", &this.wrap(var)), - Bound(debruijn, var) => crate::debug_bound_var(f, *debruijn, var.clone()), + Bound(debruijn, var) => crate::debug_bound_var(f, *debruijn, var), Placeholder(placeholder) => write!(f, "{placeholder:?}"), Unevaluated(uv) => { write!(f, "{:?}", &this.wrap(uv)) @@ -146,15 +146,15 @@ impl<I: Interner> DebugWithInfcx<I> for InferConst { this: WithInfcx<'_, Infcx, &Self>, f: &mut core::fmt::Formatter<'_>, ) -> core::fmt::Result { - match this.infcx.universe_of_ct(*this.data) { - None => write!(f, "{:?}", this.data), - Some(universe) => match *this.data { - InferConst::Var(vid) => write!(f, "?{}_{}c", vid.index(), universe.index()), - InferConst::EffectVar(vid) => write!(f, "?{}_{}e", vid.index(), universe.index()), - InferConst::Fresh(_) => { - unreachable!() - } + match *this.data { + InferConst::Var(vid) => match this.infcx.universe_of_ct(vid) { + None => write!(f, "{:?}", this.data), + Some(universe) => write!(f, "?{}_{}c", vid.index(), universe.index()), }, + InferConst::EffectVar(vid) => write!(f, "?{}e", vid.index()), + InferConst::Fresh(_) => { + unreachable!() + } } } } diff --git a/compiler/rustc_type_ir/src/debug.rs b/compiler/rustc_type_ir/src/debug.rs index db29ec9da8c..8998001ec20 100644 --- a/compiler/rustc_type_ir/src/debug.rs +++ b/compiler/rustc_type_ir/src/debug.rs @@ -1,35 +1,50 @@ -use crate::{InferConst, InferTy, Interner, UniverseIndex}; +use crate::{ConstVid, InferCtxtLike, Interner, TyVid, UniverseIndex}; use core::fmt; use std::marker::PhantomData; -pub trait InferCtxtLike { - type Interner: Interner; +pub struct NoInfcx<I>(PhantomData<I>); - fn universe_of_ty(&self, ty: InferTy) -> Option<UniverseIndex>; +impl<I: Interner> InferCtxtLike for NoInfcx<I> { + type Interner = I; - fn universe_of_lt( - &self, - lt: <Self::Interner as Interner>::InferRegion, - ) -> Option<UniverseIndex>; + fn interner(&self) -> Self::Interner { + unreachable!() + } - fn universe_of_ct(&self, ct: InferConst) -> Option<UniverseIndex>; -} + fn universe_of_ty(&self, _ty: TyVid) -> Option<UniverseIndex> { + None + } -pub struct NoInfcx<I>(PhantomData<I>); + fn universe_of_lt(&self, _lt: I::InferRegion) -> Option<UniverseIndex> { + None + } -impl<I: Interner> InferCtxtLike for NoInfcx<I> { - type Interner = I; + fn universe_of_ct(&self, _ct: ConstVid) -> Option<UniverseIndex> { + None + } + + fn root_ty_var(&self, vid: TyVid) -> TyVid { + vid + } - fn universe_of_ty(&self, _ty: InferTy) -> Option<UniverseIndex> { + fn probe_ty_var(&self, _vid: TyVid) -> Option<I::Ty> { None } - fn universe_of_ct(&self, _ct: InferConst) -> Option<UniverseIndex> { + fn root_lt_var(&self, vid: I::InferRegion) -> I::InferRegion { + vid + } + + fn probe_lt_var(&self, _vid: I::InferRegion) -> Option<I::Region> { None } - fn universe_of_lt(&self, _lt: <I as Interner>::InferRegion) -> Option<UniverseIndex> { + fn root_ct_var(&self, vid: ConstVid) -> ConstVid { + vid + } + + fn probe_ct_var(&self, _vid: ConstVid) -> Option<I::Const> { None } } diff --git a/compiler/rustc_type_ir/src/infcx.rs b/compiler/rustc_type_ir/src/infcx.rs new file mode 100644 index 00000000000..681f129a50b --- /dev/null +++ b/compiler/rustc_type_ir/src/infcx.rs @@ -0,0 +1,40 @@ +use crate::{ConstVid, Interner, TyVid, UniverseIndex}; + +pub trait InferCtxtLike { + type Interner: Interner; + + fn interner(&self) -> Self::Interner; + + fn universe_of_ty(&self, ty: TyVid) -> Option<UniverseIndex>; + + /// Resolve `TyVid` to its root `TyVid`. + fn root_ty_var(&self, vid: TyVid) -> TyVid; + + /// Resolve `TyVid` to its inferred type, if it has been equated with a non-infer type. + fn probe_ty_var(&self, vid: TyVid) -> Option<<Self::Interner as Interner>::Ty>; + + fn universe_of_lt( + &self, + lt: <Self::Interner as Interner>::InferRegion, + ) -> Option<UniverseIndex>; + + /// Resolve `InferRegion` to its root `InferRegion`. + fn root_lt_var( + &self, + vid: <Self::Interner as Interner>::InferRegion, + ) -> <Self::Interner as Interner>::InferRegion; + + /// Resolve `InferRegion` to its inferred region, if it has been equated with a non-infer region. + fn probe_lt_var( + &self, + vid: <Self::Interner as Interner>::InferRegion, + ) -> Option<<Self::Interner as Interner>::Region>; + + fn universe_of_ct(&self, ct: ConstVid) -> Option<UniverseIndex>; + + /// Resolve `ConstVid` to its root `ConstVid`. + fn root_ct_var(&self, vid: ConstVid) -> ConstVid; + + /// Resolve `ConstVid` to its inferred type, if it has been equated with a non-infer type. + fn probe_ct_var(&self, vid: ConstVid) -> Option<<Self::Interner as Interner>::Const>; +} diff --git a/compiler/rustc_type_ir/src/interner.rs b/compiler/rustc_type_ir/src/interner.rs index 16508c1a257..c262302133b 100644 --- a/compiler/rustc_type_ir/src/interner.rs +++ b/compiler/rustc_type_ir/src/interner.rs @@ -2,74 +2,113 @@ use smallvec::SmallVec; use std::fmt::Debug; use std::hash::Hash; -use crate::{BoundVar, DebugWithInfcx, Mutability, UniverseIndex}; +use crate::{ + BoundVar, CanonicalVarInfo, ConstKind, DebruijnIndex, DebugWithInfcx, Mutability, RegionKind, + TyKind, UniverseIndex, +}; pub trait Interner: Sized { - type DefId: Clone + Debug + Hash + Ord; - type AdtDef: Clone + Debug + Hash + Ord; + type DefId: Copy + Debug + Hash + Ord; + type AdtDef: Copy + Debug + Hash + Ord; - type GenericArgs: Clone + type GenericArgs: Copy + DebugWithInfcx<Self> + Hash + Ord + IntoIterator<Item = Self::GenericArg>; - type GenericArg: Clone + DebugWithInfcx<Self> + Hash + Ord; - type Term: Clone + Debug + Hash + Ord; + type GenericArg: Copy + DebugWithInfcx<Self> + Hash + Ord; + type Term: Copy + Debug + Hash + Ord; type Binder<T>; - type TypeAndMut: Clone + Debug + Hash + Ord; - type CanonicalVars: Clone + Debug + Hash + Eq; + type TypeAndMut: Copy + Debug + Hash + Ord; + type CanonicalVars: Copy + Debug + Hash + Eq + IntoIterator<Item = CanonicalVarInfo<Self>>; // Kinds of tys - type Ty: Clone + DebugWithInfcx<Self> + Hash + Ord; - type Tys: Clone + Debug + Hash + Ord + IntoIterator<Item = Self::Ty>; - type AliasTy: Clone + DebugWithInfcx<Self> + Hash + Ord; - type ParamTy: Clone + Debug + Hash + Ord; - type BoundTy: Clone + Debug + Hash + Ord; - type PlaceholderTy: Clone + Debug + Hash + Ord + Placeholder; + type Ty: Copy + + DebugWithInfcx<Self> + + Hash + + Ord + + Into<Self::GenericArg> + + IntoKind<Kind = TyKind<Self>>; + type Tys: Copy + Debug + Hash + Ord + IntoIterator<Item = Self::Ty>; + type AliasTy: Copy + DebugWithInfcx<Self> + Hash + Ord; + type ParamTy: Copy + Debug + Hash + Ord; + type BoundTy: Copy + Debug + Hash + Ord; + type PlaceholderTy: Copy + Debug + Hash + Ord + PlaceholderLike; // Things stored inside of tys - type ErrorGuaranteed: Clone + Debug + Hash + Ord; - type BoundExistentialPredicates: Clone + DebugWithInfcx<Self> + Hash + Ord; - type PolyFnSig: Clone + DebugWithInfcx<Self> + Hash + Ord; - type AllocId: Clone + Debug + Hash + Ord; + type ErrorGuaranteed: Copy + Debug + Hash + Ord; + type BoundExistentialPredicates: Copy + DebugWithInfcx<Self> + Hash + Ord; + type PolyFnSig: Copy + DebugWithInfcx<Self> + Hash + Ord; + type AllocId: Copy + Debug + Hash + Ord; // Kinds of consts - type Const: Clone + DebugWithInfcx<Self> + Hash + Ord; - type AliasConst: Clone + DebugWithInfcx<Self> + Hash + Ord; - type PlaceholderConst: Clone + Debug + Hash + Ord + Placeholder; - type ParamConst: Clone + Debug + Hash + Ord; - type BoundConst: Clone + Debug + Hash + Ord; - type ValueConst: Clone + Debug + Hash + Ord; - type ExprConst: Clone + DebugWithInfcx<Self> + Hash + Ord; + type Const: Copy + + DebugWithInfcx<Self> + + Hash + + Ord + + Into<Self::GenericArg> + + IntoKind<Kind = ConstKind<Self>> + + ConstTy<Self>; + type AliasConst: Copy + DebugWithInfcx<Self> + Hash + Ord; + type PlaceholderConst: Copy + Debug + Hash + Ord + PlaceholderLike; + type ParamConst: Copy + Debug + Hash + Ord; + type BoundConst: Copy + Debug + Hash + Ord; + type ValueConst: Copy + Debug + Hash + Ord; + type ExprConst: Copy + DebugWithInfcx<Self> + Hash + Ord; // Kinds of regions - type Region: Clone + DebugWithInfcx<Self> + Hash + Ord; - type EarlyParamRegion: Clone + Debug + Hash + Ord; - type BoundRegion: Clone + Debug + Hash + Ord; - type LateParamRegion: Clone + Debug + Hash + Ord; - type InferRegion: Clone + DebugWithInfcx<Self> + Hash + Ord; - type PlaceholderRegion: Clone + Debug + Hash + Ord + Placeholder; + type Region: Copy + + DebugWithInfcx<Self> + + Hash + + Ord + + Into<Self::GenericArg> + + IntoKind<Kind = RegionKind<Self>>; + type EarlyParamRegion: Copy + Debug + Hash + Ord; + type LateParamRegion: Copy + Debug + Hash + Ord; + type BoundRegion: Copy + Debug + Hash + Ord; + type InferRegion: Copy + DebugWithInfcx<Self> + Hash + Ord; + type PlaceholderRegion: Copy + Debug + Hash + Ord + PlaceholderLike; // Predicates - type Predicate: Clone + Debug + Hash + Eq; - type TraitPredicate: Clone + Debug + Hash + Eq; - type RegionOutlivesPredicate: Clone + Debug + Hash + Eq; - type TypeOutlivesPredicate: Clone + Debug + Hash + Eq; - type ProjectionPredicate: Clone + Debug + Hash + Eq; - type SubtypePredicate: Clone + Debug + Hash + Eq; - type CoercePredicate: Clone + Debug + Hash + Eq; - type ClosureKind: Clone + Debug + Hash + Eq; + type Predicate: Copy + Debug + Hash + Eq; + type TraitPredicate: Copy + Debug + Hash + Eq; + type RegionOutlivesPredicate: Copy + Debug + Hash + Eq; + type TypeOutlivesPredicate: Copy + Debug + Hash + Eq; + type ProjectionPredicate: Copy + Debug + Hash + Eq; + type NormalizesTo: Copy + Debug + Hash + Eq; + type SubtypePredicate: Copy + Debug + Hash + Eq; + type CoercePredicate: Copy + Debug + Hash + Eq; + type ClosureKind: Copy + Debug + Hash + Eq; fn ty_and_mut_to_parts(ty_and_mut: Self::TypeAndMut) -> (Self::Ty, Mutability); + + fn mk_canonical_var_infos(self, infos: &[CanonicalVarInfo<Self>]) -> Self::CanonicalVars; + + // FIXME: We should not have all these constructors on `Interner`, but as functions on some trait. + fn mk_bound_ty(self, debruijn: DebruijnIndex, var: BoundVar) -> Self::Ty; + fn mk_bound_region(self, debruijn: DebruijnIndex, var: BoundVar) -> Self::Region; + fn mk_bound_const(self, debruijn: DebruijnIndex, var: BoundVar, ty: Self::Ty) -> Self::Const; } /// Common capabilities of placeholder kinds -pub trait Placeholder { - fn universe(&self) -> UniverseIndex; - fn var(&self) -> BoundVar; +pub trait PlaceholderLike { + fn universe(self) -> UniverseIndex; + fn var(self) -> BoundVar; fn with_updated_universe(self, ui: UniverseIndex) -> Self; + + fn new(ui: UniverseIndex, var: BoundVar) -> Self; +} + +pub trait IntoKind { + type Kind; + + fn kind(self) -> Self::Kind; +} + +pub trait ConstTy<I: Interner> { + fn ty(self) -> I::Ty; } /// Imagine you have a function `F: FnOnce(&[T]) -> R`, plus an iterator `iter` diff --git a/compiler/rustc_type_ir/src/lib.rs b/compiler/rustc_type_ir/src/lib.rs index 2aeb4230bb8..200963ff7c5 100644 --- a/compiler/rustc_type_ir/src/lib.rs +++ b/compiler/rustc_type_ir/src/lib.rs @@ -4,6 +4,7 @@ )] #![deny(rustc::untranslatable_diagnostic)] #![deny(rustc::diagnostic_outside_of_impl)] +#![allow(rustc::usage_of_ty_tykind)] #![cfg_attr(feature = "nightly", allow(internal_features))] #[cfg(feature = "nightly")] @@ -35,6 +36,7 @@ mod canonical; mod const_kind; mod debug; mod flags; +mod infcx; mod interner; mod predicate_kind; mod region_kind; @@ -43,13 +45,19 @@ pub use canonical::*; #[cfg(feature = "nightly")] pub use codec::*; pub use const_kind::*; -pub use debug::{DebugWithInfcx, InferCtxtLike, WithInfcx}; +pub use debug::{DebugWithInfcx, WithInfcx}; pub use flags::*; +pub use infcx::InferCtxtLike; pub use interner::*; pub use predicate_kind::*; pub use region_kind::*; pub use ty_info::*; pub use ty_kind::*; +pub use AliasKind::*; +pub use DynKind::*; +pub use InferTy::*; +pub use RegionKind::*; +pub use TyKind::*; rustc_index::newtype_index! { /// A [De Bruijn index][dbi] is a standard means of representing @@ -337,6 +345,12 @@ impl UniverseIndex { } } +impl Default for UniverseIndex { + fn default() -> Self { + Self::ROOT + } +} + rustc_index::newtype_index! { #[cfg_attr(feature = "nightly", derive(HashStable_NoContext))] #[encodable] diff --git a/compiler/rustc_type_ir/src/predicate_kind.rs b/compiler/rustc_type_ir/src/predicate_kind.rs index f6a2dca4eee..b567fa8e2f6 100644 --- a/compiler/rustc_type_ir/src/predicate_kind.rs +++ b/compiler/rustc_type_ir/src/predicate_kind.rs @@ -120,7 +120,7 @@ where } #[derive(derivative::Derivative)] -#[derivative(Clone(bound = ""), Hash(bound = ""))] +#[derivative(Clone(bound = ""), Hash(bound = ""), PartialEq(bound = ""), Eq(bound = ""))] #[cfg_attr(feature = "nightly", derive(TyEncodable, TyDecodable, HashStable_NoContext))] pub enum PredicateKind<I: Interner> { /// Prove a clause @@ -153,6 +153,15 @@ pub enum PredicateKind<I: Interner> { /// Used for coherence to mark opaque types as possibly equal to each other but ambiguous. Ambiguous, + /// The alias normalizes to `term`. Unlike `Projection`, this always fails if the alias + /// cannot be normalized in the current context. + /// + /// `Projection(<T as Trait>::Assoc, ?x)` results in `?x == <T as Trait>::Assoc` while + /// `NormalizesTo(<T as Trait>::Assoc, ?x)` results in `NoSolution`. + /// + /// Only used in the new solver. + NormalizesTo(I::NormalizesTo), + /// Separate from `ClauseKind::Projection` which is used for normalization in new solver. /// This predicate requires two terms to be equal to eachother. /// @@ -168,28 +177,11 @@ where I::Term: Copy, I::CoercePredicate: Copy, I::SubtypePredicate: Copy, + I::NormalizesTo: Copy, ClauseKind<I>: Copy, { } -impl<I: Interner> PartialEq for PredicateKind<I> { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (Self::Clause(l0), Self::Clause(r0)) => l0 == r0, - (Self::ObjectSafe(l0), Self::ObjectSafe(r0)) => l0 == r0, - (Self::Subtype(l0), Self::Subtype(r0)) => l0 == r0, - (Self::Coerce(l0), Self::Coerce(r0)) => l0 == r0, - (Self::ConstEquate(l0, l1), Self::ConstEquate(r0, r1)) => l0 == r0 && l1 == r1, - (Self::AliasRelate(l0, l1, l2), Self::AliasRelate(r0, r1, r2)) => { - l0 == r0 && l1 == r1 && l2 == r2 - } - _ => core::mem::discriminant(self) == core::mem::discriminant(other), - } - } -} - -impl<I: Interner> Eq for PredicateKind<I> {} - impl<I: Interner> TypeFoldable<I> for PredicateKind<I> where I::DefId: TypeFoldable<I>, @@ -198,6 +190,7 @@ where I::Term: TypeFoldable<I>, I::CoercePredicate: TypeFoldable<I>, I::SubtypePredicate: TypeFoldable<I>, + I::NormalizesTo: TypeFoldable<I>, ClauseKind<I>: TypeFoldable<I>, { fn try_fold_with<F: FallibleTypeFolder<I>>(self, folder: &mut F) -> Result<Self, F::Error> { @@ -210,6 +203,7 @@ where PredicateKind::ConstEquate(a.try_fold_with(folder)?, b.try_fold_with(folder)?) } PredicateKind::Ambiguous => PredicateKind::Ambiguous, + PredicateKind::NormalizesTo(p) => PredicateKind::NormalizesTo(p.try_fold_with(folder)?), PredicateKind::AliasRelate(a, b, d) => PredicateKind::AliasRelate( a.try_fold_with(folder)?, b.try_fold_with(folder)?, @@ -227,6 +221,7 @@ where I::Term: TypeVisitable<I>, I::CoercePredicate: TypeVisitable<I>, I::SubtypePredicate: TypeVisitable<I>, + I::NormalizesTo: TypeVisitable<I>, ClauseKind<I>: TypeVisitable<I>, { fn visit_with<V: TypeVisitor<I>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> { @@ -240,6 +235,7 @@ where b.visit_with(visitor) } PredicateKind::Ambiguous => ControlFlow::Continue(()), + PredicateKind::NormalizesTo(p) => p.visit_with(visitor), PredicateKind::AliasRelate(a, b, d) => { a.visit_with(visitor)?; b.visit_with(visitor)?; @@ -294,6 +290,7 @@ impl<I: Interner> fmt::Debug for PredicateKind<I> { } PredicateKind::ConstEquate(c1, c2) => write!(f, "ConstEquate({c1:?}, {c2:?})"), PredicateKind::Ambiguous => write!(f, "Ambiguous"), + PredicateKind::NormalizesTo(p) => p.fmt(f), PredicateKind::AliasRelate(t1, t2, dir) => { write!(f, "AliasRelate({t1:?}, {dir:?}, {t2:?})") } diff --git a/compiler/rustc_type_ir/src/ty_kind.rs b/compiler/rustc_type_ir/src/ty_kind.rs index 3d4e7f77a4f..a7a5cae254c 100644 --- a/compiler/rustc_type_ir/src/ty_kind.rs +++ b/compiler/rustc_type_ir/src/ty_kind.rs @@ -1,5 +1,3 @@ -#![allow(rustc::usage_of_ty_tykind)] - #[cfg(feature = "nightly")] use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; #[cfg(feature = "nightly")] @@ -394,7 +392,7 @@ impl<I: Interner> DebugWithInfcx<I> for TyKind<I> { Float(float) => write!(f, "{float:?}"), Adt(d, s) => { write!(f, "{d:?}")?; - let mut s = s.clone().into_iter(); + let mut s = s.into_iter(); let first = s.next(); match first { Some(first) => write!(f, "<{:?}", first)?, @@ -412,7 +410,7 @@ impl<I: Interner> DebugWithInfcx<I> for TyKind<I> { Array(t, c) => write!(f, "[{:?}; {:?}]", &this.wrap(t), &this.wrap(c)), Slice(t) => write!(f, "[{:?}]", &this.wrap(t)), RawPtr(p) => { - let (ty, mutbl) = I::ty_and_mut_to_parts(p.clone()); + let (ty, mutbl) = I::ty_and_mut_to_parts(*p); match mutbl { Mutability::Mut => write!(f, "*mut "), Mutability::Not => write!(f, "*const "), @@ -442,7 +440,7 @@ impl<I: Interner> DebugWithInfcx<I> for TyKind<I> { Tuple(t) => { write!(f, "(")?; let mut count = 0; - for ty in t.clone() { + for ty in *t { if count > 0 { write!(f, ", ")?; } @@ -820,15 +818,15 @@ impl<I: Interner> DebugWithInfcx<I> for InferTy { this: WithInfcx<'_, Infcx, &Self>, f: &mut fmt::Formatter<'_>, ) -> fmt::Result { - use InferTy::*; - match this.infcx.universe_of_ty(*this.data) { - None => write!(f, "{:?}", this.data), - Some(universe) => match *this.data { - TyVar(ty_vid) => write!(f, "?{}_{}t", ty_vid.index(), universe.index()), - IntVar(_) | FloatVar(_) | FreshTy(_) | FreshIntTy(_) | FreshFloatTy(_) => { - unreachable!() + match this.data { + InferTy::TyVar(vid) => { + if let Some(universe) = this.infcx.universe_of_ty(*vid) { + write!(f, "?{}_{}t", vid.index(), universe.index()) + } else { + write!(f, "{:?}", this.data) } - }, + } + _ => write!(f, "{:?}", this.data), } } } diff --git a/compiler/stable_mir/src/compiler_interface.rs b/compiler/stable_mir/src/compiler_interface.rs index 7916d04250d..17c5212fb9c 100644 --- a/compiler/stable_mir/src/compiler_interface.rs +++ b/compiler/stable_mir/src/compiler_interface.rs @@ -8,6 +8,7 @@ use std::cell::Cell; use crate::mir::alloc::{AllocId, GlobalAlloc}; use crate::mir::mono::{Instance, InstanceDef, StaticDef}; use crate::mir::Body; +use crate::target::MachineInfo; use crate::ty::{ AdtDef, AdtKind, Allocation, ClosureDef, ClosureKind, Const, FieldDef, FnDef, GenericArgs, GenericPredicates, Generics, ImplDef, ImplTrait, LineInfo, PolyFnSig, RigidTy, Span, TraitDecl, @@ -150,6 +151,9 @@ pub trait Context { /// Evaluate a static's initializer. fn eval_static_initializer(&self, def: StaticDef) -> Result<Allocation, Error>; + /// Try to evaluate an instance into a constant. + fn eval_instance(&self, def: InstanceDef, const_ty: Ty) -> Result<Allocation, Error>; + /// Retrieve global allocation for the given allocation ID. fn global_alloc(&self, id: AllocId) -> GlobalAlloc; @@ -157,6 +161,9 @@ pub trait Context { fn vtable_allocation(&self, global_alloc: &GlobalAlloc) -> Option<AllocId>; fn krate(&self, def_id: DefId) -> Crate; fn instance_name(&self, def: InstanceDef, trimmed: bool) -> Symbol; + + /// Return information about the target machine. + fn target_info(&self) -> MachineInfo; } // A thread local variable that stores a pointer to the tables mapping between TyCtxt diff --git a/compiler/stable_mir/src/error.rs b/compiler/stable_mir/src/error.rs index bb5e1a34180..c6da3ae41d3 100644 --- a/compiler/stable_mir/src/error.rs +++ b/compiler/stable_mir/src/error.rs @@ -6,11 +6,11 @@ use std::convert::From; use std::fmt::{Debug, Display, Formatter}; -use std::{error, fmt}; +use std::{error, fmt, io}; macro_rules! error { ($fmt: literal $(,)?) => { Error(format!($fmt)) }; - ($fmt: literal, $($arg:tt)*) => { Error(format!($fmt, $($arg:tt)*)) }; + ($fmt: literal, $($arg:tt)*) => { Error(format!($fmt, $($arg)*)) }; } /// An error type used to represent an error that has already been reported by the compiler. @@ -79,3 +79,9 @@ where impl error::Error for Error {} impl<T> error::Error for CompilerError<T> where T: Display + Debug {} + +impl From<io::Error> for Error { + fn from(value: io::Error) -> Self { + Error(value.to_string()) + } +} diff --git a/compiler/stable_mir/src/lib.rs b/compiler/stable_mir/src/lib.rs index 1e7495009d8..8c66bfb2e98 100644 --- a/compiler/stable_mir/src/lib.rs +++ b/compiler/stable_mir/src/lib.rs @@ -39,6 +39,7 @@ pub mod compiler_interface; #[macro_use] pub mod error; pub mod mir; +pub mod target; pub mod ty; pub mod visitor; diff --git a/compiler/stable_mir/src/mir/alloc.rs b/compiler/stable_mir/src/mir/alloc.rs index af951bcef8c..c780042ff26 100644 --- a/compiler/stable_mir/src/mir/alloc.rs +++ b/compiler/stable_mir/src/mir/alloc.rs @@ -1,7 +1,9 @@ //! This module provides methods to retrieve allocation information, such as static variables. use crate::mir::mono::{Instance, StaticDef}; +use crate::target::{Endian, MachineInfo}; use crate::ty::{Allocation, Binder, ExistentialTraitRef, IndexedVal, Ty}; -use crate::with; +use crate::{with, Error}; +use std::io::Read; /// An allocation in the SMIR global memory can be either a function pointer, /// a static, or a "real" allocation with some data in it. @@ -38,7 +40,7 @@ impl GlobalAlloc { } /// A unique identification number for each provenance -#[derive(Clone, Copy, PartialEq, Eq, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)] pub struct AllocId(usize); impl IndexedVal for AllocId { @@ -49,3 +51,33 @@ impl IndexedVal for AllocId { self.0 } } + +/// Utility function used to read an allocation data into a unassigned integer. +pub(crate) fn read_target_uint(mut bytes: &[u8]) -> Result<u128, Error> { + let mut buf = [0u8; std::mem::size_of::<u128>()]; + match MachineInfo::target_endianess() { + Endian::Little => { + bytes.read(&mut buf)?; + Ok(u128::from_le_bytes(buf)) + } + Endian::Big => { + bytes.read(&mut buf[16 - bytes.len()..])?; + Ok(u128::from_be_bytes(buf)) + } + } +} + +/// Utility function used to read an allocation data into an assigned integer. +pub(crate) fn read_target_int(mut bytes: &[u8]) -> Result<i128, Error> { + let mut buf = [0u8; std::mem::size_of::<i128>()]; + match MachineInfo::target_endianess() { + Endian::Little => { + bytes.read(&mut buf)?; + Ok(i128::from_le_bytes(buf)) + } + Endian::Big => { + bytes.read(&mut buf[16 - bytes.len()..])?; + Ok(i128::from_be_bytes(buf)) + } + } +} diff --git a/compiler/stable_mir/src/mir/body.rs b/compiler/stable_mir/src/mir/body.rs index 90bd7aa7d18..663275d9a0f 100644 --- a/compiler/stable_mir/src/mir/body.rs +++ b/compiler/stable_mir/src/mir/body.rs @@ -21,7 +21,7 @@ pub struct Body { pub(super) arg_count: usize, /// Debug information pertaining to user variables, including captures. - pub(super) var_debug_info: Vec<VarDebugInfo>, + pub var_debug_info: Vec<VarDebugInfo>, } pub type BasicBlockIdx = usize; @@ -616,6 +616,24 @@ pub struct VarDebugInfo { pub argument_index: Option<u16>, } +impl VarDebugInfo { + /// Return a local variable if this info is related to one. + pub fn local(&self) -> Option<Local> { + match &self.value { + VarDebugInfoContents::Place(place) if place.projection.is_empty() => Some(place.local), + VarDebugInfoContents::Place(_) | VarDebugInfoContents::Const(_) => None, + } + } + + /// Return a constant if this info is related to one. + pub fn constant(&self) -> Option<&ConstOperand> { + match &self.value { + VarDebugInfoContents::Place(_) => None, + VarDebugInfoContents::Const(const_op) => Some(const_op), + } + } +} + pub type SourceScope = u32; #[derive(Clone, Debug, Eq, PartialEq)] @@ -832,7 +850,7 @@ pub enum MutBorrowKind { ClosureCapture, } -#[derive(Copy, Clone, Debug, PartialEq, Eq)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] pub enum Mutability { Not, Mut, diff --git a/compiler/stable_mir/src/mir/mono.rs b/compiler/stable_mir/src/mir/mono.rs index 5c27f9281de..bc5d4a3b8f4 100644 --- a/compiler/stable_mir/src/mir/mono.rs +++ b/compiler/stable_mir/src/mir/mono.rs @@ -132,6 +132,14 @@ impl Instance { pub fn is_empty_shim(&self) -> bool { self.kind == InstanceKind::Shim && with(|cx| cx.is_empty_drop_shim(self.def)) } + + /// Try to constant evaluate the instance into a constant with the given type. + /// + /// This can be used to retrieve a constant that represents an intrinsic return such as + /// `type_id`. + pub fn try_const_eval(&self, const_ty: Ty) -> Result<Allocation, Error> { + with(|cx| cx.eval_instance(self.def, const_ty)) + } } impl Debug for Instance { @@ -212,7 +220,7 @@ impl TryFrom<CrateItem> for StaticDef { type Error = crate::Error; fn try_from(value: CrateItem) -> Result<Self, Self::Error> { - if matches!(value.kind(), ItemKind::Static | ItemKind::Const) { + if matches!(value.kind(), ItemKind::Static) { Ok(StaticDef(value.0)) } else { Err(Error::new(format!("Expected a static item, but found: {value:?}"))) diff --git a/compiler/stable_mir/src/target.rs b/compiler/stable_mir/src/target.rs new file mode 100644 index 00000000000..bed1dbc4c00 --- /dev/null +++ b/compiler/stable_mir/src/target.rs @@ -0,0 +1,50 @@ +//! Provide information about the machine that this is being compiled into. + +use crate::compiler_interface::with; + +/// The properties of the target machine being compiled into. +#[derive(Clone, PartialEq, Eq)] +pub struct MachineInfo { + pub endian: Endian, + pub pointer_width: MachineSize, +} + +impl MachineInfo { + pub fn target() -> MachineInfo { + with(|cx| cx.target_info().clone()) + } + + pub fn target_endianess() -> Endian { + with(|cx| cx.target_info().endian) + } + + pub fn target_pointer_width() -> MachineSize { + with(|cx| cx.target_info().pointer_width) + } +} + +#[derive(Copy, Clone, PartialEq, Eq)] +pub enum Endian { + Little, + Big, +} + +/// Represent the size of a component. +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +pub struct MachineSize { + num_bits: usize, +} + +impl MachineSize { + pub fn bytes(self) -> usize { + self.num_bits / 8 + } + + pub fn bits(self) -> usize { + self.num_bits + } + + pub fn from_bits(num_bits: usize) -> MachineSize { + MachineSize { num_bits } + } +} diff --git a/compiler/stable_mir/src/ty.rs b/compiler/stable_mir/src/ty.rs index c922264f8a3..bea7702bd34 100644 --- a/compiler/stable_mir/src/ty.rs +++ b/compiler/stable_mir/src/ty.rs @@ -4,9 +4,11 @@ use super::{ with, DefId, Error, Symbol, }; use crate::crate_def::CrateDef; -use crate::mir::alloc::AllocId; +use crate::mir::alloc::{read_target_int, read_target_uint, AllocId}; +use crate::target::MachineInfo; use crate::{Filename, Opaque}; use std::fmt::{self, Debug, Display, Formatter}; +use std::ops::Range; #[derive(Copy, Clone, Eq, PartialEq, Hash)] pub struct Ty(pub usize); @@ -366,6 +368,19 @@ pub enum IntTy { I128, } +impl IntTy { + pub fn num_bytes(self) -> usize { + match self { + IntTy::Isize => crate::target::MachineInfo::target_pointer_width().bytes().into(), + IntTy::I8 => 1, + IntTy::I16 => 2, + IntTy::I32 => 4, + IntTy::I64 => 8, + IntTy::I128 => 16, + } + } +} + #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum UintTy { Usize, @@ -376,6 +391,19 @@ pub enum UintTy { U128, } +impl UintTy { + pub fn num_bytes(self) -> usize { + match self { + UintTy::Usize => crate::target::MachineInfo::target_pointer_width().bytes().into(), + UintTy::U8 => 1, + UintTy::U16 => 2, + UintTy::U32 => 4, + UintTy::U64 => 8, + UintTy::U128 => 16, + } + } +} + #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum FloatTy { F32, @@ -821,21 +849,21 @@ pub struct BoundTy { pub type Bytes = Vec<Option<u8>>; pub type Size = usize; -#[derive(Clone, Copy, PartialEq, Eq, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)] pub struct Prov(pub AllocId); pub type Align = u64; pub type Promoted = u32; pub type InitMaskMaterialized = Vec<u64>; /// Stores the provenance information of pointers stored in memory. -#[derive(Clone, Debug, Eq, PartialEq)] +#[derive(Clone, Debug, Eq, PartialEq, Hash)] pub struct ProvenanceMap { /// Provenance in this map applies from the given offset for an entire pointer-size worth of /// bytes. Two entries in this map are always at least a pointer size apart. pub ptrs: Vec<(Size, Prov)>, } -#[derive(Clone, Debug, Eq, PartialEq)] +#[derive(Clone, Debug, Eq, PartialEq, Hash)] pub struct Allocation { pub bytes: Bytes, pub provenance: ProvenanceMap, @@ -843,6 +871,74 @@ pub struct Allocation { pub mutability: Mutability, } +impl Allocation { + /// Get a vector of bytes for an Allocation that has been fully initialized + pub fn raw_bytes(&self) -> Result<Vec<u8>, Error> { + self.bytes + .iter() + .copied() + .collect::<Option<Vec<_>>>() + .ok_or_else(|| error!("Found uninitialized bytes: `{:?}`", self.bytes)) + } + + /// Read a uint value from the specified range. + pub fn read_partial_uint(&self, range: Range<usize>) -> Result<u128, Error> { + if range.end - range.start > 16 { + return Err(error!("Allocation is bigger than largest integer")); + } + if range.end > self.bytes.len() { + return Err(error!( + "Range is out of bounds. Allocation length is `{}`, but requested range `{:?}`", + self.bytes.len(), + range + )); + } + let raw = self.bytes[range] + .iter() + .copied() + .collect::<Option<Vec<_>>>() + .ok_or_else(|| error!("Found uninitialized bytes: `{:?}`", self.bytes))?; + read_target_uint(&raw) + } + + /// Read this allocation and try to convert it to an unassigned integer. + pub fn read_uint(&self) -> Result<u128, Error> { + if self.bytes.len() > 16 { + return Err(error!("Allocation is bigger than largest integer")); + } + let raw = self.raw_bytes()?; + read_target_uint(&raw) + } + + /// Read this allocation and try to convert it to a signed integer. + pub fn read_int(&self) -> Result<i128, Error> { + if self.bytes.len() > 16 { + return Err(error!("Allocation is bigger than largest integer")); + } + let raw = self.raw_bytes()?; + read_target_int(&raw) + } + + /// Read this allocation and try to convert it to a boolean. + pub fn read_bool(&self) -> Result<bool, Error> { + match self.read_int()? { + 0 => Ok(false), + 1 => Ok(true), + val @ _ => Err(error!("Unexpected value for bool: `{val}`")), + } + } + + /// Read this allocation as a pointer and return whether it represents a `null` pointer. + pub fn is_null(&self) -> Result<bool, Error> { + let len = self.bytes.len(); + let ptr_len = MachineInfo::target_pointer_width().bytes(); + if len != ptr_len { + return Err(error!("Expected width of pointer (`{ptr_len}`), but found: `{len}`")); + } + Ok(self.read_uint()? == 0 && self.provenance.ptrs.is_empty()) + } +} + #[derive(Clone, Debug, Eq, PartialEq)] pub enum ConstantKind { Allocated(Allocation), |
