diff options
Diffstat (limited to 'compiler/rustc_parse/src')
| -rw-r--r-- | compiler/rustc_parse/src/lib.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_parse/src/parser/attr.rs | 4 | ||||
| -rw-r--r-- | compiler/rustc_parse/src/parser/attr_wrapper.rs | 194 | ||||
| -rw-r--r-- | compiler/rustc_parse/src/parser/mod.rs | 229 | ||||
| -rw-r--r-- | compiler/rustc_parse/src/parser/mut_visit/tests.rs | 65 | ||||
| -rw-r--r-- | compiler/rustc_parse/src/parser/tests.rs | 6 |
6 files changed, 69 insertions, 431 deletions
diff --git a/compiler/rustc_parse/src/lib.rs b/compiler/rustc_parse/src/lib.rs index e73d68e2037..d06922f1e04 100644 --- a/compiler/rustc_parse/src/lib.rs +++ b/compiler/rustc_parse/src/lib.rs @@ -5,13 +5,13 @@ #![allow(rustc::diagnostic_outside_of_impl)] #![allow(rustc::untranslatable_diagnostic)] #![cfg_attr(bootstrap, feature(let_chains))] -#![feature(array_windows)] #![feature(assert_matches)] #![feature(box_patterns)] #![feature(debug_closure_helpers)] #![feature(if_let_guard)] #![feature(iter_intersperse)] #![feature(string_from_utf8_lossy_owned)] +#![recursion_limit = "256"] // tidy-alphabetical-end use std::path::{Path, PathBuf}; diff --git a/compiler/rustc_parse/src/parser/attr.rs b/compiler/rustc_parse/src/parser/attr.rs index 53614049f08..41d3889c448 100644 --- a/compiler/rustc_parse/src/parser/attr.rs +++ b/compiler/rustc_parse/src/parser/attr.rs @@ -1,5 +1,6 @@ use rustc_ast as ast; use rustc_ast::token::{self, MetaVarKind}; +use rustc_ast::tokenstream::ParserRange; use rustc_ast::{Attribute, attr}; use rustc_errors::codes::*; use rustc_errors::{Diag, PResult}; @@ -8,8 +9,7 @@ use thin_vec::ThinVec; use tracing::debug; use super::{ - AttrWrapper, Capturing, FnParseMode, ForceCollect, Parser, ParserRange, PathStyle, Trailing, - UsePreAttrPos, + AttrWrapper, Capturing, FnParseMode, ForceCollect, Parser, PathStyle, Trailing, UsePreAttrPos, }; use crate::{errors, exp, fluent_generated as fluent}; diff --git a/compiler/rustc_parse/src/parser/attr_wrapper.rs b/compiler/rustc_parse/src/parser/attr_wrapper.rs index 6061c9cb485..44fdf146f9c 100644 --- a/compiler/rustc_parse/src/parser/attr_wrapper.rs +++ b/compiler/rustc_parse/src/parser/attr_wrapper.rs @@ -1,21 +1,18 @@ use std::borrow::Cow; -use std::{iter, mem}; +use std::mem; -use rustc_ast::token::{Delimiter, Token}; +use rustc_ast::token::Token; use rustc_ast::tokenstream::{ - AttrTokenStream, AttrTokenTree, AttrsTarget, DelimSpacing, DelimSpan, LazyAttrTokenStream, - Spacing, ToAttrTokenStream, + AttrsTarget, LazyAttrTokenStream, NodeRange, ParserRange, Spacing, TokenCursor, }; use rustc_ast::{self as ast, AttrVec, Attribute, HasAttrs, HasTokens}; use rustc_data_structures::fx::FxHashSet; use rustc_errors::PResult; use rustc_session::parse::ParseSess; -use rustc_span::{DUMMY_SP, Span, sym}; +use rustc_span::{DUMMY_SP, sym}; +use thin_vec::ThinVec; -use super::{ - Capturing, FlatToken, ForceCollect, NodeRange, NodeReplacement, Parser, ParserRange, - TokenCursor, Trailing, -}; +use super::{Capturing, ForceCollect, Parser, Trailing}; // When collecting tokens, this fully captures the start point. Usually its // just after outer attributes, but occasionally it's before. @@ -94,95 +91,6 @@ fn has_cfg_or_cfg_attr(attrs: &[Attribute]) -> bool { }) } -// From a value of this type we can reconstruct the `TokenStream` seen by the -// `f` callback passed to a call to `Parser::collect_tokens`, by -// replaying the getting of the tokens. This saves us producing a `TokenStream` -// if it is never needed, e.g. a captured `macro_rules!` argument that is never -// passed to a proc macro. In practice, token stream creation happens rarely -// compared to calls to `collect_tokens` (see some statistics in #78736) so we -// are doing as little up-front work as possible. -// -// This also makes `Parser` very cheap to clone, since -// there is no intermediate collection buffer to clone. -struct LazyAttrTokenStreamImpl { - start_token: (Token, Spacing), - cursor_snapshot: TokenCursor, - num_calls: u32, - break_last_token: u32, - node_replacements: Box<[NodeReplacement]>, -} - -impl ToAttrTokenStream for LazyAttrTokenStreamImpl { - fn to_attr_token_stream(&self) -> AttrTokenStream { - // The token produced by the final call to `{,inlined_}next` was not - // actually consumed by the callback. The combination of chaining the - // initial token and using `take` produces the desired result - we - // produce an empty `TokenStream` if no calls were made, and omit the - // final token otherwise. - let mut cursor_snapshot = self.cursor_snapshot.clone(); - let tokens = iter::once(FlatToken::Token(self.start_token)) - .chain(iter::repeat_with(|| FlatToken::Token(cursor_snapshot.next()))) - .take(self.num_calls as usize); - - if self.node_replacements.is_empty() { - make_attr_token_stream(tokens, self.break_last_token) - } else { - let mut tokens: Vec<_> = tokens.collect(); - let mut node_replacements = self.node_replacements.to_vec(); - node_replacements.sort_by_key(|(range, _)| range.0.start); - - #[cfg(debug_assertions)] - for [(node_range, tokens), (next_node_range, next_tokens)] in - node_replacements.array_windows() - { - assert!( - node_range.0.end <= next_node_range.0.start - || node_range.0.end >= next_node_range.0.end, - "Node ranges should be disjoint or nested: ({:?}, {:?}) ({:?}, {:?})", - node_range, - tokens, - next_node_range, - next_tokens, - ); - } - - // Process the replace ranges, starting from the highest start - // position and working our way back. If have tokens like: - // - // `#[cfg(FALSE)] struct Foo { #[cfg(FALSE)] field: bool }` - // - // Then we will generate replace ranges for both - // the `#[cfg(FALSE)] field: bool` and the entire - // `#[cfg(FALSE)] struct Foo { #[cfg(FALSE)] field: bool }` - // - // By starting processing from the replace range with the greatest - // start position, we ensure that any (outer) replace range which - // encloses another (inner) replace range will fully overwrite the - // inner range's replacement. - for (node_range, target) in node_replacements.into_iter().rev() { - assert!( - !node_range.0.is_empty(), - "Cannot replace an empty node range: {:?}", - node_range.0 - ); - - // Replace the tokens in range with zero or one `FlatToken::AttrsTarget`s, plus - // enough `FlatToken::Empty`s to fill up the rest of the range. This keeps the - // total length of `tokens` constant throughout the replacement process, allowing - // us to do all replacements without adjusting indices. - let target_len = target.is_some() as usize; - tokens.splice( - (node_range.0.start as usize)..(node_range.0.end as usize), - target.into_iter().map(|target| FlatToken::AttrsTarget(target)).chain( - iter::repeat(FlatToken::Empty).take(node_range.0.len() - target_len), - ), - ); - } - make_attr_token_stream(tokens.into_iter(), self.break_last_token) - } - } -} - impl<'a> Parser<'a> { pub(super) fn collect_pos(&self) -> CollectPos { CollectPos { @@ -387,10 +295,10 @@ impl<'a> Parser<'a> { // This is hot enough for `deep-vector` that checking the conditions for an empty iterator // is measurably faster than actually executing the iterator. - let node_replacements: Box<[_]> = if parser_replacements_start == parser_replacements_end + let node_replacements = if parser_replacements_start == parser_replacements_end && inner_attr_parser_replacements.is_empty() { - Box::new([]) + ThinVec::new() } else { // Grab any replace ranges that occur *inside* the current AST node. Convert them // from `ParserRange` form to `NodeRange` form. We will perform the actual @@ -429,13 +337,13 @@ impl<'a> Parser<'a> { // - `attrs`: includes the outer and the inner attr. // - `tokens`: lazy tokens for `g` (with its inner attr deleted). - let tokens = LazyAttrTokenStream::new(LazyAttrTokenStreamImpl { - start_token: collect_pos.start_token, - cursor_snapshot: collect_pos.cursor_snapshot, + let tokens = LazyAttrTokenStream::new_pending( + collect_pos.start_token, + collect_pos.cursor_snapshot, num_calls, - break_last_token: self.break_last_token, + self.break_last_token, node_replacements, - }); + ); let mut tokens_used = false; // If in "definite capture mode" we need to register a replace range @@ -483,71 +391,6 @@ impl<'a> Parser<'a> { } } -/// Converts a flattened iterator of tokens (including open and close delimiter tokens) into an -/// `AttrTokenStream`, creating an `AttrTokenTree::Delimited` for each matching pair of open and -/// close delims. -fn make_attr_token_stream( - iter: impl Iterator<Item = FlatToken>, - break_last_token: u32, -) -> AttrTokenStream { - #[derive(Debug)] - struct FrameData { - // This is `None` for the first frame, `Some` for all others. - open_delim_sp: Option<(Delimiter, Span, Spacing)>, - inner: Vec<AttrTokenTree>, - } - // The stack always has at least one element. Storing it separately makes for shorter code. - let mut stack_top = FrameData { open_delim_sp: None, inner: vec![] }; - let mut stack_rest = vec![]; - for flat_token in iter { - match flat_token { - FlatToken::Token((token @ Token { kind, span }, spacing)) => { - if let Some(delim) = kind.open_delim() { - stack_rest.push(mem::replace( - &mut stack_top, - FrameData { open_delim_sp: Some((delim, span, spacing)), inner: vec![] }, - )); - } else if let Some(delim) = kind.close_delim() { - let frame_data = mem::replace(&mut stack_top, stack_rest.pop().unwrap()); - let (open_delim, open_sp, open_spacing) = frame_data.open_delim_sp.unwrap(); - assert!( - open_delim.eq_ignoring_invisible_origin(&delim), - "Mismatched open/close delims: open={open_delim:?} close={span:?}" - ); - let dspan = DelimSpan::from_pair(open_sp, span); - let dspacing = DelimSpacing::new(open_spacing, spacing); - let stream = AttrTokenStream::new(frame_data.inner); - let delimited = AttrTokenTree::Delimited(dspan, dspacing, delim, stream); - stack_top.inner.push(delimited); - } else { - stack_top.inner.push(AttrTokenTree::Token(token, spacing)) - } - } - FlatToken::AttrsTarget(target) => { - stack_top.inner.push(AttrTokenTree::AttrsTarget(target)) - } - FlatToken::Empty => {} - } - } - - if break_last_token > 0 { - let last_token = stack_top.inner.pop().unwrap(); - if let AttrTokenTree::Token(last_token, spacing) = last_token { - let (unglued, _) = last_token.kind.break_two_token_op(break_last_token).unwrap(); - - // Tokens are always ASCII chars, so we can use byte arithmetic here. - let mut first_span = last_token.span.shrink_to_lo(); - first_span = - first_span.with_hi(first_span.lo() + rustc_span::BytePos(break_last_token)); - - stack_top.inner.push(AttrTokenTree::Token(Token::new(unglued, first_span), spacing)); - } else { - panic!("Unexpected last token {last_token:?}") - } - } - AttrTokenStream::new(stack_top.inner) -} - /// Tokens are needed if: /// - any non-single-segment attributes (other than doc comments) are present, /// e.g. `rustfmt::skip`; or @@ -562,14 +405,3 @@ fn needs_tokens(attrs: &[ast::Attribute]) -> bool { } }) } - -// Some types are used a lot. Make sure they don't unintentionally get bigger. -#[cfg(target_pointer_width = "64")] -mod size_asserts { - use rustc_data_structures::static_assert_size; - - use super::*; - // tidy-alphabetical-start - static_assert_size!(LazyAttrTokenStreamImpl, 96); - // tidy-alphabetical-end -} diff --git a/compiler/rustc_parse/src/parser/mod.rs b/compiler/rustc_parse/src/parser/mod.rs index 48df8b59d55..968376678f3 100644 --- a/compiler/rustc_parse/src/parser/mod.rs +++ b/compiler/rustc_parse/src/parser/mod.rs @@ -12,7 +12,6 @@ pub mod token_type; mod ty; use std::assert_matches::debug_assert_matches; -use std::ops::Range; use std::{fmt, mem, slice}; use attr_wrapper::{AttrWrapper, UsePreAttrPos}; @@ -25,7 +24,9 @@ use rustc_ast::ptr::P; use rustc_ast::token::{ self, IdentIsRaw, InvisibleOrigin, MetaVarKind, NtExprKind, NtPatKind, Token, TokenKind, }; -use rustc_ast::tokenstream::{AttrsTarget, Spacing, TokenStream, TokenTree}; +use rustc_ast::tokenstream::{ + ParserRange, ParserReplacement, Spacing, TokenCursor, TokenStream, TokenTree, TokenTreeCursor, +}; use rustc_ast::util::case::Case; use rustc_ast::{ self as ast, AnonConst, AttrArgs, AttrId, ByRef, Const, CoroutineKind, DUMMY_NODE_ID, @@ -37,7 +38,7 @@ use rustc_data_structures::fx::FxHashMap; use rustc_errors::{Applicability, Diag, FatalError, MultiSpan, PResult}; use rustc_index::interval::IntervalSet; use rustc_session::parse::ParseSess; -use rustc_span::{DUMMY_SP, Ident, Span, Symbol, kw, sym}; +use rustc_span::{Ident, Span, Symbol, kw, sym}; use thin_vec::ThinVec; use token_type::TokenTypeSet; pub use token_type::{ExpKeywordPair, ExpTokenPair, TokenType}; @@ -55,19 +56,64 @@ mod tests; mod tokenstream { mod tests; } -#[cfg(test)] -mod mut_visit { - mod tests; -} bitflags::bitflags! { + /// Restrictions applied while parsing. + /// + /// The parser maintains a bitset of restrictions it will honor while + /// parsing. This is essentially used as a way of tracking state of what + /// is being parsed and to change behavior based on that. #[derive(Clone, Copy, Debug)] struct Restrictions: u8 { + /// Restricts expressions for use in statement position. + /// + /// When expressions are used in various places, like statements or + /// match arms, this is used to stop parsing once certain tokens are + /// reached. + /// + /// For example, `if true {} & 1` with `STMT_EXPR` in effect is parsed + /// as two separate expression statements (`if` and a reference to 1). + /// Otherwise it is parsed as a bitwise AND where `if` is on the left + /// and 1 is on the right. const STMT_EXPR = 1 << 0; + /// Do not allow struct literals. + /// + /// There are several places in the grammar where we don't want to + /// allow struct literals because they can require lookahead, or + /// otherwise could be ambiguous or cause confusion. For example, + /// `if Foo {} {}` isn't clear if it is `Foo{}` struct literal, or + /// just `Foo` is the condition, followed by a consequent block, + /// followed by an empty block. + /// + /// See [RFC 92](https://rust-lang.github.io/rfcs/0092-struct-grammar.html). const NO_STRUCT_LITERAL = 1 << 1; + /// Used to provide better error messages for const generic arguments. + /// + /// An un-braced const generic argument is limited to a very small + /// subset of expressions. This is used to detect the situation where + /// an expression outside of that subset is used, and to suggest to + /// wrap the expression in braces. const CONST_EXPR = 1 << 2; + /// Allows `let` expressions. + /// + /// `let pattern = scrutinee` is parsed as an expression, but it is + /// only allowed in let chains (`if` and `while` conditions). + /// Otherwise it is not an expression (note that `let` in statement + /// positions is treated as a `StmtKind::Let` statement, which has a + /// slightly different grammar). const ALLOW_LET = 1 << 3; + /// Used to detect a missing `=>` in a match guard. + /// + /// This is used for error handling in a match guard to give a better + /// error message if the `=>` is missing. It is set when parsing the + /// guard expression. const IN_IF_GUARD = 1 << 4; + /// Used to detect the incorrect use of expressions in patterns. + /// + /// This is used for error handling while parsing a pattern. During + /// error recovery, this will be set to try to parse the pattern as an + /// expression, but halts parsing the expression when reaching certain + /// tokens like `=`. const IS_PAT = 1 << 5; } } @@ -187,57 +233,6 @@ struct ClosureSpans { body: Span, } -/// A token range within a `Parser`'s full token stream. -#[derive(Clone, Debug)] -struct ParserRange(Range<u32>); - -/// A token range within an individual AST node's (lazy) token stream, i.e. -/// relative to that node's first token. Distinct from `ParserRange` so the two -/// kinds of range can't be mixed up. -#[derive(Clone, Debug)] -struct NodeRange(Range<u32>); - -/// Indicates a range of tokens that should be replaced by an `AttrsTarget` -/// (replacement) or be replaced by nothing (deletion). This is used in two -/// places during token collection. -/// -/// 1. Replacement. During the parsing of an AST node that may have a -/// `#[derive]` attribute, when we parse a nested AST node that has `#[cfg]` -/// or `#[cfg_attr]`, we replace the entire inner AST node with -/// `FlatToken::AttrsTarget`. This lets us perform eager cfg-expansion on an -/// `AttrTokenStream`. -/// -/// 2. Deletion. We delete inner attributes from all collected token streams, -/// and instead track them through the `attrs` field on the AST node. This -/// lets us manipulate them similarly to outer attributes. When we create a -/// `TokenStream`, the inner attributes are inserted into the proper place -/// in the token stream. -/// -/// Each replacement starts off in `ParserReplacement` form but is converted to -/// `NodeReplacement` form when it is attached to a single AST node, via -/// `LazyAttrTokenStreamImpl`. -type ParserReplacement = (ParserRange, Option<AttrsTarget>); - -/// See the comment on `ParserReplacement`. -type NodeReplacement = (NodeRange, Option<AttrsTarget>); - -impl NodeRange { - // Converts a range within a parser's tokens to a range within a - // node's tokens beginning at `start_pos`. - // - // For example, imagine a parser with 50 tokens in its token stream, a - // function that spans `ParserRange(20..40)` and an inner attribute within - // that function that spans `ParserRange(30..35)`. We would find the inner - // attribute's range within the function's tokens by subtracting 20, which - // is the position of the function's start token. This gives - // `NodeRange(10..15)`. - fn new(ParserRange(parser_range): ParserRange, start_pos: u32) -> NodeRange { - assert!(!parser_range.is_empty()); - assert!(parser_range.start >= start_pos); - NodeRange((parser_range.start - start_pos)..(parser_range.end - start_pos)) - } -} - /// Controls how we capture tokens. Capturing can be expensive, /// so we try to avoid performing capturing in cases where /// we will never need an `AttrTokenStream`. @@ -260,104 +255,6 @@ struct CaptureState { seen_attrs: IntervalSet<AttrId>, } -#[derive(Clone, Debug)] -struct TokenTreeCursor { - stream: TokenStream, - /// Points to the current token tree in the stream. In `TokenCursor::curr`, - /// this can be any token tree. In `TokenCursor::stack`, this is always a - /// `TokenTree::Delimited`. - index: usize, -} - -impl TokenTreeCursor { - #[inline] - fn new(stream: TokenStream) -> Self { - TokenTreeCursor { stream, index: 0 } - } - - #[inline] - fn curr(&self) -> Option<&TokenTree> { - self.stream.get(self.index) - } - - fn look_ahead(&self, n: usize) -> Option<&TokenTree> { - self.stream.get(self.index + n) - } - - #[inline] - fn bump(&mut self) { - self.index += 1; - } -} - -/// A `TokenStream` cursor that produces `Token`s. It's a bit odd that -/// we (a) lex tokens into a nice tree structure (`TokenStream`), and then (b) -/// use this type to emit them as a linear sequence. But a linear sequence is -/// what the parser expects, for the most part. -#[derive(Clone, Debug)] -struct TokenCursor { - // Cursor for the current (innermost) token stream. The index within the - // cursor can point to any token tree in the stream (or one past the end). - // The delimiters for this token stream are found in `self.stack.last()`; - // if that is `None` we are in the outermost token stream which never has - // delimiters. - curr: TokenTreeCursor, - - // Token streams surrounding the current one. The index within each cursor - // always points to a `TokenTree::Delimited`. - stack: Vec<TokenTreeCursor>, -} - -impl TokenCursor { - fn next(&mut self) -> (Token, Spacing) { - self.inlined_next() - } - - /// This always-inlined version should only be used on hot code paths. - #[inline(always)] - fn inlined_next(&mut self) -> (Token, Spacing) { - loop { - // FIXME: we currently don't return `Delimiter::Invisible` open/close delims. To fix - // #67062 we will need to, whereupon the `delim != Delimiter::Invisible` conditions - // below can be removed. - if let Some(tree) = self.curr.curr() { - match tree { - &TokenTree::Token(token, spacing) => { - debug_assert!(!token.kind.is_delim()); - let res = (token, spacing); - self.curr.bump(); - return res; - } - &TokenTree::Delimited(sp, spacing, delim, ref tts) => { - let trees = TokenTreeCursor::new(tts.clone()); - self.stack.push(mem::replace(&mut self.curr, trees)); - if !delim.skip() { - return (Token::new(delim.as_open_token_kind(), sp.open), spacing.open); - } - // No open delimiter to return; continue on to the next iteration. - } - }; - } else if let Some(parent) = self.stack.pop() { - // We have exhausted this token stream. Move back to its parent token stream. - let Some(&TokenTree::Delimited(span, spacing, delim, _)) = parent.curr() else { - panic!("parent should be Delimited") - }; - self.curr = parent; - self.curr.bump(); // move past the `Delimited` - if !delim.skip() { - return (Token::new(delim.as_close_token_kind(), span.close), spacing.close); - } - // No close delimiter to return; continue on to the next iteration. - } else { - // We have exhausted the outermost token stream. The use of - // `Spacing::Alone` is arbitrary and immaterial, because the - // `Eof` token's spacing is never used. - return (Token::new(token::Eof, DUMMY_SP), Spacing::Alone); - } - } - } -} - /// A sequence separator. #[derive(Debug)] struct SeqSep<'a> { @@ -1742,26 +1639,6 @@ impl<'a> Parser<'a> { } } -/// A helper struct used when building an `AttrTokenStream` from -/// a `LazyAttrTokenStream`. Both delimiter and non-delimited tokens -/// are stored as `FlatToken::Token`. A vector of `FlatToken`s -/// is then 'parsed' to build up an `AttrTokenStream` with nested -/// `AttrTokenTree::Delimited` tokens. -#[derive(Debug, Clone)] -enum FlatToken { - /// A token - this holds both delimiter (e.g. '{' and '}') - /// and non-delimiter tokens - Token((Token, Spacing)), - /// Holds the `AttrsTarget` for an AST node. The `AttrsTarget` is inserted - /// directly into the constructed `AttrTokenStream` as an - /// `AttrTokenTree::AttrsTarget`. - AttrsTarget(AttrsTarget), - /// A special 'empty' token that is ignored during the conversion - /// to an `AttrTokenStream`. This is used to simplify the - /// handling of replace ranges. - Empty, -} - // Metavar captures of various kinds. #[derive(Clone, Debug)] pub enum ParseNtResult { diff --git a/compiler/rustc_parse/src/parser/mut_visit/tests.rs b/compiler/rustc_parse/src/parser/mut_visit/tests.rs deleted file mode 100644 index 46c678c3902..00000000000 --- a/compiler/rustc_parse/src/parser/mut_visit/tests.rs +++ /dev/null @@ -1,65 +0,0 @@ -use rustc_ast as ast; -use rustc_ast::mut_visit::MutVisitor; -use rustc_ast_pretty::pprust; -use rustc_span::{Ident, create_default_session_globals_then}; - -use crate::parser::tests::{matches_codepattern, string_to_crate}; - -// This version doesn't care about getting comments or doc-strings in. -fn print_crate_items(krate: &ast::Crate) -> String { - krate.items.iter().map(|i| pprust::item_to_string(i)).collect::<Vec<_>>().join(" ") -} - -// Change every identifier to "zz". -struct ToZzIdentMutVisitor; - -impl MutVisitor for ToZzIdentMutVisitor { - const VISIT_TOKENS: bool = true; - - fn visit_ident(&mut self, ident: &mut Ident) { - *ident = Ident::from_str("zz"); - } -} - -macro_rules! assert_matches_codepattern { - ($a:expr , $b:expr) => {{ - let a_val = $a; - let b_val = $b; - if !matches_codepattern(&a_val, &b_val) { - panic!("expected args satisfying `matches_codepattern`, got {} and {}", a_val, b_val); - } - }}; -} - -// Make sure idents get transformed everywhere. -#[test] -fn ident_transformation() { - create_default_session_globals_then(|| { - let mut zz_visitor = ToZzIdentMutVisitor; - let mut krate = - string_to_crate("#[a] mod b {fn c (d : e, f : g) {h!(i,j,k);l;m}}".to_string()); - zz_visitor.visit_crate(&mut krate); - assert_matches_codepattern!( - print_crate_items(&krate), - "#[zz]mod zz{fn zz(zz:zz,zz:zz){zz!(zz,zz,zz);zz;zz}}".to_string() - ); - }) -} - -// Make sure idents get transformed even inside macro defs. -#[test] -fn ident_transformation_in_defs() { - create_default_session_globals_then(|| { - let mut zz_visitor = ToZzIdentMutVisitor; - let mut krate = string_to_crate( - "macro_rules! a {(b $c:expr $(d $e:token)f+ => \ - (g $(d $d $e)+))} " - .to_string(), - ); - zz_visitor.visit_crate(&mut krate); - assert_matches_codepattern!( - print_crate_items(&krate), - "macro_rules! zz{(zz$zz:zz$(zz $zz:zz)zz+=>(zz$(zz$zz$zz)+))}".to_string() - ); - }) -} diff --git a/compiler/rustc_parse/src/parser/tests.rs b/compiler/rustc_parse/src/parser/tests.rs index 8285070839a..2a44c90abc1 100644 --- a/compiler/rustc_parse/src/parser/tests.rs +++ b/compiler/rustc_parse/src/parser/tests.rs @@ -95,12 +95,6 @@ pub(crate) fn string_to_stream(source_str: String) -> TokenStream { )) } -/// Parses a string, returns a crate. -pub(crate) fn string_to_crate(source_str: String) -> ast::Crate { - let psess = psess(); - with_error_checking_parse(source_str, &psess, |p| p.parse_crate_mod()) -} - /// Does the given string match the pattern? whitespace in the first string /// may be deleted or replaced with other whitespace to match the pattern. /// This function is relatively Unicode-ignorant; fortunately, the careful design |
