diff options
Diffstat (limited to 'compiler/rustc_parse/src/lib.rs')
| -rw-r--r-- | compiler/rustc_parse/src/lib.rs | 390 |
1 files changed, 31 insertions, 359 deletions
diff --git a/compiler/rustc_parse/src/lib.rs b/compiler/rustc_parse/src/lib.rs index 44999c9b63a..9abffbacfc3 100644 --- a/compiler/rustc_parse/src/lib.rs +++ b/compiler/rustc_parse/src/lib.rs @@ -7,22 +7,18 @@ use rustc_ast as ast; use rustc_ast::attr::HasAttrs; -use rustc_ast::token::{self, DelimToken, Nonterminal, Token, TokenKind}; -use rustc_ast::tokenstream::{self, LazyTokenStream, TokenStream, TokenTree}; +use rustc_ast::token::{self, Nonterminal}; +use rustc_ast::tokenstream::{self, CanSynthesizeMissingTokens, LazyTokenStream, TokenStream}; use rustc_ast_pretty::pprust; -use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::sync::Lrc; use rustc_errors::{Diagnostic, FatalError, Level, PResult}; use rustc_session::parse::ParseSess; -use rustc_span::{symbol::kw, FileName, SourceFile, Span, DUMMY_SP}; +use rustc_span::{FileName, SourceFile, Span}; -use smallvec::SmallVec; -use std::cell::RefCell; -use std::mem; use std::path::Path; use std::str; -use tracing::{debug, info}; +use tracing::debug; pub const MACRO_ARGUMENTS: Option<&str> = Some("macro arguments"); @@ -237,7 +233,12 @@ pub fn parse_in<'a, T>( // NOTE(Centril): The following probably shouldn't be here but it acknowledges the // fact that architecturally, we are using parsing (read on below to understand why). -pub fn nt_to_tokenstream(nt: &Nonterminal, sess: &ParseSess, span: Span) -> TokenStream { +pub fn nt_to_tokenstream( + nt: &Nonterminal, + sess: &ParseSess, + span: Span, + synthesize_tokens: CanSynthesizeMissingTokens, +) -> TokenStream { // A `Nonterminal` is often a parsed AST item. At this point we now // need to convert the parsed AST to an actual token stream, e.g. // un-parse it basically. @@ -255,9 +256,11 @@ pub fn nt_to_tokenstream(nt: &Nonterminal, sess: &ParseSess, span: Span) -> Toke |tokens: Option<&LazyTokenStream>| tokens.as_ref().map(|t| t.create_token_stream()); let tokens = match *nt { - Nonterminal::NtItem(ref item) => prepend_attrs(&item.attrs, item.tokens.as_ref()), + Nonterminal::NtItem(ref item) => { + prepend_attrs(sess, &item.attrs, nt, span, item.tokens.as_ref()) + } Nonterminal::NtBlock(ref block) => convert_tokens(block.tokens.as_ref()), - Nonterminal::NtStmt(ref stmt) => prepend_attrs(stmt.attrs(), stmt.tokens()), + Nonterminal::NtStmt(ref stmt) => prepend_attrs(sess, stmt.attrs(), nt, span, stmt.tokens()), Nonterminal::NtPat(ref pat) => convert_tokens(pat.tokens.as_ref()), Nonterminal::NtTy(ref ty) => convert_tokens(ty.tokens.as_ref()), Nonterminal::NtIdent(ident, is_raw) => { @@ -274,376 +277,45 @@ pub fn nt_to_tokenstream(nt: &Nonterminal, sess: &ParseSess, span: Span) -> Toke if expr.tokens.is_none() { debug!("missing tokens for expr {:?}", expr); } - prepend_attrs(&expr.attrs, expr.tokens.as_ref()) + prepend_attrs(sess, &expr.attrs, nt, span, expr.tokens.as_ref()) } }; - // Caches the stringification of 'good' `TokenStreams` which passed - // `tokenstream_probably_equal_for_proc_macro`. This allows us to avoid - // repeatedly stringifying and comparing the same `TokenStream` for deeply - // nested nonterminals. - // - // We cache by the strinification instead of the `TokenStream` to avoid - // needing to implement `Hash` for `TokenStream`. Note that it's possible to - // have two distinct `TokenStream`s that stringify to the same result - // (e.g. if they differ only in hygiene information). However, any - // information lost during the stringification process is also intentionally - // ignored by `tokenstream_probably_equal_for_proc_macro`, so it's fine - // that a single cache entry may 'map' to multiple distinct `TokenStream`s. - // - // This is a temporary hack to prevent compilation blowup on certain inputs. - // The entire pretty-print/retokenize process will be removed soon. - thread_local! { - static GOOD_TOKEN_CACHE: RefCell<FxHashSet<String>> = Default::default(); - } - - // FIXME(#43081): Avoid this pretty-print + reparse hack - // Pretty-print the AST struct without inserting any parenthesis - // beyond those explicitly written by the user (e.g. `ExpnKind::Paren`). - // The resulting stream may have incorrect precedence, but it's only - // ever used for a comparison against the capture tokenstream. - let source = pprust::nonterminal_to_string_no_extra_parens(nt); - let filename = FileName::macro_expansion_source_code(&source); - let reparsed_tokens = parse_stream_from_source_str(filename, source.clone(), sess, Some(span)); - - // During early phases of the compiler the AST could get modified - // directly (e.g., attributes added or removed) and the internal cache - // of tokens my not be invalidated or updated. Consequently if the - // "lossless" token stream disagrees with our actual stringification - // (which has historically been much more battle-tested) then we go - // with the lossy stream anyway (losing span information). - // - // Note that the comparison isn't `==` here to avoid comparing spans, - // but it *also* is a "probable" equality which is a pretty weird - // definition. We mostly want to catch actual changes to the AST - // like a `#[cfg]` being processed or some weird `macro_rules!` - // expansion. - // - // What we *don't* want to catch is the fact that a user-defined - // literal like `0xf` is stringified as `15`, causing the cached token - // stream to not be literal `==` token-wise (ignoring spans) to the - // token stream we got from stringification. - // - // Instead the "probably equal" check here is "does each token - // recursively have the same discriminant?" We basically don't look at - // the token values here and assume that such fine grained token stream - // modifications, including adding/removing typically non-semantic - // tokens such as extra braces and commas, don't happen. if let Some(tokens) = tokens { - if GOOD_TOKEN_CACHE.with(|cache| cache.borrow().contains(&source)) { - return tokens; - } - - // Compare with a non-relaxed delim match to start. - if tokenstream_probably_equal_for_proc_macro(&tokens, &reparsed_tokens, sess, false) { - GOOD_TOKEN_CACHE.with(|cache| cache.borrow_mut().insert(source.clone())); - return tokens; - } - - // The check failed. This time, we pretty-print the AST struct with parenthesis - // inserted to preserve precedence. This may cause `None`-delimiters in the captured - // token stream to match up with inserted parenthesis in the reparsed stream. - let source_with_parens = pprust::nonterminal_to_string(nt); - let filename_with_parens = FileName::macro_expansion_source_code(&source_with_parens); - - if GOOD_TOKEN_CACHE.with(|cache| cache.borrow().contains(&source_with_parens)) { - return tokens; - } - - let reparsed_tokens_with_parens = parse_stream_from_source_str( - filename_with_parens, - source_with_parens, - sess, - Some(span), - ); - - // Compare with a relaxed delim match - we want inserted parenthesis in the - // reparsed stream to match `None`-delimiters in the original stream. - if tokenstream_probably_equal_for_proc_macro( - &tokens, - &reparsed_tokens_with_parens, - sess, - true, - ) { - GOOD_TOKEN_CACHE.with(|cache| cache.borrow_mut().insert(source.clone())); - return tokens; - } - - info!( - "cached tokens found, but they're not \"probably equal\", \ - going with stringified version" - ); - info!("cached tokens: {}", pprust::tts_to_string(&tokens)); - info!("reparsed tokens: {}", pprust::tts_to_string(&reparsed_tokens_with_parens)); - - info!("cached tokens debug: {:?}", tokens); - info!("reparsed tokens debug: {:?}", reparsed_tokens_with_parens); - } - reparsed_tokens -} - -// See comments in `Nonterminal::to_tokenstream` for why we care about -// *probably* equal here rather than actual equality -// -// This is otherwise the same as `eq_unspanned`, only recursing with a -// different method. -pub fn tokenstream_probably_equal_for_proc_macro( - tokens: &TokenStream, - reparsed_tokens: &TokenStream, - sess: &ParseSess, - relaxed_delim_match: bool, -) -> bool { - // When checking for `probably_eq`, we ignore certain tokens that aren't - // preserved in the AST. Because they are not preserved, the pretty - // printer arbitrarily adds or removes them when printing as token - // streams, making a comparison between a token stream generated from an - // AST and a token stream which was parsed into an AST more reliable. - fn semantic_tree(tree: &TokenTree) -> bool { - if let TokenTree::Token(token) = tree { - if let - // The pretty printer tends to add trailing commas to - // everything, and in particular, after struct fields. - | token::Comma - // The pretty printer collapses many semicolons into one. - | token::Semi - // We don't preserve leading `|` tokens in patterns, so - // we ignore them entirely - | token::BinOp(token::BinOpToken::Or) - // We don't preserve trailing '+' tokens in trait bounds, - // so we ignore them entirely - | token::BinOp(token::BinOpToken::Plus) - // The pretty printer can turn `$crate` into `::crate_name` - | token::ModSep = token.kind { - return false; - } - } - true - } - - // When comparing two `TokenStream`s, we ignore the `IsJoint` information. - // - // However, `rustc_parse::lexer::tokentrees::TokenStreamBuilder` will - // use `Token.glue` on adjacent tokens with the proper `IsJoint`. - // Since we are ignoreing `IsJoint`, a 'glued' token (e.g. `BinOp(Shr)`) - // and its 'split'/'unglued' compoenents (e.g. `Gt, Gt`) are equivalent - // when determining if two `TokenStream`s are 'probably equal'. - // - // Therefore, we use `break_two_token_op` to convert all tokens - // to the 'unglued' form (if it exists). This ensures that two - // `TokenStream`s which differ only in how their tokens are glued - // will be considered 'probably equal', which allows us to keep spans. - // - // This is important when the original `TokenStream` contained - // extra spaces (e.g. `f :: < Vec < _ > > ( ) ;'). These extra spaces - // will be omitted when we pretty-print, which can cause the original - // and reparsed `TokenStream`s to differ in the assignment of `IsJoint`, - // leading to some tokens being 'glued' together in one stream but not - // the other. See #68489 for more details. - fn break_tokens(tree: TokenTree) -> impl Iterator<Item = TokenTree> { - // In almost all cases, we should have either zero or one levels - // of 'unglueing'. However, in some unusual cases, we may need - // to iterate breaking tokens mutliple times. For example: - // '[BinOpEq(Shr)] => [Gt, Ge] -> [Gt, Gt, Eq]' - let mut token_trees: SmallVec<[_; 2]>; - if let TokenTree::Token(token) = tree { - let mut out = SmallVec::<[_; 2]>::new(); - out.push(token); - // Iterate to fixpoint: - // * We start off with 'out' containing our initial token, and `temp` empty - // * If we are able to break any tokens in `out`, then `out` will have - // at least one more element than 'temp', so we will try to break tokens - // again. - // * If we cannot break any tokens in 'out', we are done - loop { - let mut temp = SmallVec::<[_; 2]>::new(); - let mut changed = false; - - for token in out.into_iter() { - if let Some((first, second)) = token.kind.break_two_token_op() { - temp.push(Token::new(first, DUMMY_SP)); - temp.push(Token::new(second, DUMMY_SP)); - changed = true; - } else { - temp.push(token); - } - } - out = temp; - if !changed { - break; - } - } - token_trees = out.into_iter().map(TokenTree::Token).collect(); - } else { - token_trees = SmallVec::new(); - token_trees.push(tree); - } - token_trees.into_iter() - } - - fn expand_token(tree: TokenTree, sess: &ParseSess) -> impl Iterator<Item = TokenTree> { - // When checking tokenstreams for 'probable equality', we are comparing - // a captured (from parsing) `TokenStream` to a reparsed tokenstream. - // The reparsed Tokenstream will never have `None`-delimited groups, - // since they are only ever inserted as a result of macro expansion. - // Therefore, inserting a `None`-delimtied group here (when we - // convert a nested `Nonterminal` to a tokenstream) would cause - // a mismatch with the reparsed tokenstream. - // - // Note that we currently do not handle the case where the - // reparsed stream has a `Parenthesis`-delimited group - // inserted. This will cause a spurious mismatch: - // issue #75734 tracks resolving this. - - let expanded: SmallVec<[_; 1]> = - if let TokenTree::Token(Token { kind: TokenKind::Interpolated(nt), span }) = &tree { - nt_to_tokenstream(nt, sess, *span) - .into_trees() - .flat_map(|t| expand_token(t, sess)) - .collect() - } else { - // Filter before and after breaking tokens, - // since we may want to ignore both glued and unglued tokens. - std::iter::once(tree) - .filter(semantic_tree) - .flat_map(break_tokens) - .filter(semantic_tree) - .collect() - }; - expanded.into_iter() - } - - // Break tokens after we expand any nonterminals, so that we break tokens - // that are produced as a result of nonterminal expansion. - let tokens = tokens.trees().flat_map(|t| expand_token(t, sess)); - let reparsed_tokens = reparsed_tokens.trees().flat_map(|t| expand_token(t, sess)); - - tokens.eq_by(reparsed_tokens, |t, rt| { - tokentree_probably_equal_for_proc_macro(&t, &rt, sess, relaxed_delim_match) - }) -} - -// See comments in `Nonterminal::to_tokenstream` for why we care about -// *probably* equal here rather than actual equality -// -// This is otherwise the same as `eq_unspanned`, only recursing with a -// different method. -pub fn tokentree_probably_equal_for_proc_macro( - token: &TokenTree, - reparsed_token: &TokenTree, - sess: &ParseSess, - relaxed_delim_match: bool, -) -> bool { - match (token, reparsed_token) { - (TokenTree::Token(token), TokenTree::Token(reparsed_token)) => { - token_probably_equal_for_proc_macro(token, reparsed_token) - } - ( - TokenTree::Delimited(_, delim, tokens), - TokenTree::Delimited(_, reparsed_delim, reparsed_tokens), - ) if delim == reparsed_delim => tokenstream_probably_equal_for_proc_macro( - tokens, - reparsed_tokens, - sess, - relaxed_delim_match, - ), - (TokenTree::Delimited(_, DelimToken::NoDelim, tokens), reparsed_token) => { - if relaxed_delim_match { - if let TokenTree::Delimited(_, DelimToken::Paren, reparsed_tokens) = reparsed_token - { - if tokenstream_probably_equal_for_proc_macro( - tokens, - reparsed_tokens, - sess, - relaxed_delim_match, - ) { - return true; - } - } - } - tokens.len() == 1 - && tokentree_probably_equal_for_proc_macro( - &tokens.trees().next().unwrap(), - reparsed_token, - sess, - relaxed_delim_match, - ) - } - _ => false, + return tokens; + } else if matches!(synthesize_tokens, CanSynthesizeMissingTokens::Yes) { + return fake_token_stream(sess, nt, span); + } else { + let pretty = rustc_ast_pretty::pprust::nonterminal_to_string_no_extra_parens(&nt); + panic!("Missing tokens at {:?} for nt {:?}", span, pretty); } } -// See comments in `Nonterminal::to_tokenstream` for why we care about -// *probably* equal here rather than actual equality -fn token_probably_equal_for_proc_macro(first: &Token, other: &Token) -> bool { - if mem::discriminant(&first.kind) != mem::discriminant(&other.kind) { - return false; - } - use rustc_ast::token::TokenKind::*; - match (&first.kind, &other.kind) { - (&Eq, &Eq) - | (&Lt, &Lt) - | (&Le, &Le) - | (&EqEq, &EqEq) - | (&Ne, &Ne) - | (&Ge, &Ge) - | (&Gt, &Gt) - | (&AndAnd, &AndAnd) - | (&OrOr, &OrOr) - | (&Not, &Not) - | (&Tilde, &Tilde) - | (&At, &At) - | (&Dot, &Dot) - | (&DotDot, &DotDot) - | (&DotDotDot, &DotDotDot) - | (&DotDotEq, &DotDotEq) - | (&Comma, &Comma) - | (&Semi, &Semi) - | (&Colon, &Colon) - | (&ModSep, &ModSep) - | (&RArrow, &RArrow) - | (&LArrow, &LArrow) - | (&FatArrow, &FatArrow) - | (&Pound, &Pound) - | (&Dollar, &Dollar) - | (&Question, &Question) - | (&Eof, &Eof) => true, - - (&BinOp(a), &BinOp(b)) | (&BinOpEq(a), &BinOpEq(b)) => a == b, - - (&OpenDelim(a), &OpenDelim(b)) | (&CloseDelim(a), &CloseDelim(b)) => a == b, - - (&DocComment(a1, a2, a3), &DocComment(b1, b2, b3)) => a1 == b1 && a2 == b2 && a3 == b3, - - (&Literal(a), &Literal(b)) => a == b, - - (&Lifetime(a), &Lifetime(b)) => a == b, - (&Ident(a, b), &Ident(c, d)) => { - b == d && (a == c || a == kw::DollarCrate || c == kw::DollarCrate) - } - - (&Interpolated(..), &Interpolated(..)) => panic!("Unexpanded Interpolated!"), - - _ => panic!("forgot to add a token?"), - } +pub fn fake_token_stream(sess: &ParseSess, nt: &Nonterminal, span: Span) -> TokenStream { + let source = pprust::nonterminal_to_string(nt); + let filename = FileName::macro_expansion_source_code(&source); + parse_stream_from_source_str(filename, source, sess, Some(span)) } fn prepend_attrs( + sess: &ParseSess, attrs: &[ast::Attribute], + nt: &Nonterminal, + span: Span, tokens: Option<&tokenstream::LazyTokenStream>, ) -> Option<tokenstream::TokenStream> { - let tokens = tokens?.create_token_stream(); if attrs.is_empty() { - return Some(tokens); + return Some(tokens?.create_token_stream()); } let mut builder = tokenstream::TokenStreamBuilder::new(); for attr in attrs { // FIXME: Correctly handle tokens for inner attributes. // For now, we fall back to reparsing the original AST node if attr.style == ast::AttrStyle::Inner { - return None; + return Some(fake_token_stream(sess, nt, span)); } builder.push(attr.tokens()); } - builder.push(tokens); + builder.push(tokens?.create_token_stream()); Some(builder.build()) } |
