about summary refs log tree commit diff
diff options
context:
space:
mode:
authorVadim Petrochenkov <vadim.petrochenkov@gmail.com>2020-03-07 16:34:29 +0300
committerVadim Petrochenkov <vadim.petrochenkov@gmail.com>2020-03-09 12:42:41 +0300
commit5d7f67d3b109e95fb0dca8f773a2146db4eb4a93 (patch)
tree8babcc69a09b4ca3ebf03b4b359e340001a749a1
parent43b27df5b211bc0fae74f34834ae84d17215c5ac (diff)
downloadrust-5d7f67d3b109e95fb0dca8f773a2146db4eb4a93.tar.gz
rust-5d7f67d3b109e95fb0dca8f773a2146db4eb4a93.zip
rustc_parse: Remove `Parser::normalized(_prev)_token`
-rw-r--r--src/librustc_parse/lib.rs5
-rw-r--r--src/librustc_parse/parser/mod.rs42
2 files changed, 7 insertions, 40 deletions
diff --git a/src/librustc_parse/lib.rs b/src/librustc_parse/lib.rs
index 25f9f8fd3ad..10d524776a1 100644
--- a/src/librustc_parse/lib.rs
+++ b/src/librustc_parse/lib.rs
@@ -4,7 +4,7 @@
 #![feature(crate_visibility_modifier)]
 
 use rustc_ast::ast;
-use rustc_ast::token::{self, Nonterminal, Token};
+use rustc_ast::token::{self, Nonterminal};
 use rustc_ast::tokenstream::{self, TokenStream, TokenTree};
 use rustc_ast_pretty::pprust;
 use rustc_data_structures::sync::Lrc;
@@ -171,8 +171,7 @@ fn maybe_source_file_to_parser(
     let mut parser = stream_to_parser(sess, stream, None);
     parser.unclosed_delims = unclosed_delims;
     if parser.token == token::Eof {
-        let span = Span::new(end_pos, end_pos, parser.token.span.ctxt());
-        parser.set_token(Token::new(token::Eof, span));
+        parser.token.span = Span::new(end_pos, end_pos, parser.token.span.ctxt());
     }
 
     Ok(parser)
diff --git a/src/librustc_parse/parser/mod.rs b/src/librustc_parse/parser/mod.rs
index 252a80431ac..9376c7c1c72 100644
--- a/src/librustc_parse/parser/mod.rs
+++ b/src/librustc_parse/parser/mod.rs
@@ -88,21 +88,10 @@ macro_rules! maybe_recover_from_interpolated_ty_qpath {
 #[derive(Clone)]
 pub struct Parser<'a> {
     pub sess: &'a ParseSess,
-    /// The current non-normalized token.
+    /// The current token.
     pub token: Token,
-    /// The current normalized token.
-    /// "Normalized" means that some interpolated tokens
-    /// (`$i: ident` and `$l: lifetime` meta-variables) are replaced
-    /// with non-interpolated identifier and lifetime tokens they refer to.
-    /// Use this if you need to check for `token::Ident` or `token::Lifetime` specifically,
-    /// this also includes edition checks for edition-specific keyword identifiers.
-    pub normalized_token: Token,
-    /// The previous non-normalized token.
+    /// The previous token.
     pub prev_token: Token,
-    /// The previous normalized token.
-    /// Use this if you need to check for `token::Ident` or `token::Lifetime` specifically,
-    /// this also includes edition checks for edition-specific keyword identifiers.
-    pub normalized_prev_token: Token,
     restrictions: Restrictions,
     /// Used to determine the path to externally loaded source files.
     pub(super) directory: Directory,
@@ -374,9 +363,7 @@ impl<'a> Parser<'a> {
         let mut parser = Parser {
             sess,
             token: Token::dummy(),
-            normalized_token: Token::dummy(),
             prev_token: Token::dummy(),
-            normalized_prev_token: Token::dummy(),
             restrictions: Restrictions::empty(),
             recurse_into_file_modules,
             directory: Directory {
@@ -609,7 +596,7 @@ impl<'a> Parser<'a> {
             Some((first, second)) if first == expected => {
                 let first_span = self.sess.source_map().start_point(self.token.span);
                 let second_span = self.token.span.with_lo(first_span.hi());
-                self.set_token(Token::new(first, first_span));
+                self.token = Token::new(first, first_span);
                 self.bump_with(Token::new(second, second_span));
                 true
             }
@@ -817,23 +804,6 @@ impl<'a> Parser<'a> {
         self.parse_delim_comma_seq(token::Paren, f)
     }
 
-    // Interpolated identifier (`$i: ident`) and lifetime (`$l: lifetime`)
-    // tokens are replaced with usual identifier and lifetime tokens,
-    // so the former are never encountered during normal parsing.
-    crate fn set_token(&mut self, token: Token) {
-        self.token = token;
-        self.normalized_token = match &self.token.kind {
-            token::Interpolated(nt) => match **nt {
-                token::NtIdent(ident, is_raw) => {
-                    Token::new(token::Ident(ident.name, is_raw), ident.span)
-                }
-                token::NtLifetime(ident) => Token::new(token::Lifetime(ident.name), ident.span),
-                _ => self.token.clone(),
-            },
-            _ => self.token.clone(),
-        }
-    }
-
     /// Advance the parser by one token using provided token as the next one.
     fn bump_with(&mut self, next_token: Token) {
         // Bumping after EOF is a bad sign, usually an infinite loop.
@@ -843,9 +813,7 @@ impl<'a> Parser<'a> {
         }
 
         // Update the current and previous tokens.
-        self.prev_token = self.token.take();
-        self.normalized_prev_token = self.normalized_token.take();
-        self.set_token(next_token);
+        self.prev_token = mem::replace(&mut self.token, next_token);
 
         // Diagnostics.
         self.expected_tokens.clear();
@@ -1005,7 +973,7 @@ impl<'a> Parser<'a> {
                     &mut self.token_cursor.frame,
                     self.token_cursor.stack.pop().unwrap(),
                 );
-                self.set_token(Token::new(TokenKind::CloseDelim(frame.delim), frame.span.close));
+                self.token = Token::new(TokenKind::CloseDelim(frame.delim), frame.span.close);
                 self.bump();
                 TokenTree::Delimited(frame.span, frame.delim, frame.tree_cursor.stream)
             }