diff options
Diffstat (limited to 'compiler/rustc_parse/src')
| -rw-r--r-- | compiler/rustc_parse/src/lexer/mod.rs | 24 | ||||
| -rw-r--r-- | compiler/rustc_parse/src/lexer/tokentrees.rs | 24 | ||||
| -rw-r--r-- | compiler/rustc_parse/src/lexer/unicode_chars.rs | 2 |
3 files changed, 27 insertions, 23 deletions
diff --git a/compiler/rustc_parse/src/lexer/mod.rs b/compiler/rustc_parse/src/lexer/mod.rs index 7db9291921f..9626d4e8171 100644 --- a/compiler/rustc_parse/src/lexer/mod.rs +++ b/compiler/rustc_parse/src/lexer/mod.rs @@ -42,9 +42,9 @@ pub struct UnmatchedDelim { pub candidate_span: Option<Span>, } -pub(crate) fn parse_token_trees<'a>( - sess: &'a ParseSess, - mut src: &'a str, +pub(crate) fn parse_token_trees<'sess, 'src>( + sess: &'sess ParseSess, + mut src: &'src str, mut start_pos: BytePos, override_span: Option<Span>, ) -> Result<TokenStream, Vec<Diagnostic>> { @@ -90,16 +90,16 @@ pub(crate) fn parse_token_trees<'a>( } } -struct StringReader<'a> { - sess: &'a ParseSess, +struct StringReader<'sess, 'src> { + sess: &'sess ParseSess, /// Initial position, read-only. start_pos: BytePos, /// The absolute offset within the source_map of the current character. pos: BytePos, /// Source text to tokenize. - src: &'a str, + src: &'src str, /// Cursor for getting lexer tokens. - cursor: Cursor<'a>, + cursor: Cursor<'src>, override_span: Option<Span>, /// When a "unknown start of token: \u{a0}" has already been emitted earlier /// in this file, it's safe to treat further occurrences of the non-breaking @@ -107,8 +107,8 @@ struct StringReader<'a> { nbsp_is_whitespace: bool, } -impl<'a> StringReader<'a> { - pub fn dcx(&self) -> &'a DiagCtxt { +impl<'sess, 'src> StringReader<'sess, 'src> { + pub fn dcx(&self) -> &'sess DiagCtxt { &self.sess.dcx } @@ -526,7 +526,7 @@ impl<'a> StringReader<'a> { /// Slice of the source text from `start` up to but excluding `self.pos`, /// meaning the slice does not include the character `self.ch`. - fn str_from(&self, start: BytePos) -> &'a str { + fn str_from(&self, start: BytePos) -> &'src str { self.str_from_to(start, self.pos) } @@ -537,12 +537,12 @@ impl<'a> StringReader<'a> { } /// Slice of the source text spanning from `start` up to but excluding `end`. - fn str_from_to(&self, start: BytePos, end: BytePos) -> &'a str { + fn str_from_to(&self, start: BytePos, end: BytePos) -> &'src str { &self.src[self.src_index(start)..self.src_index(end)] } /// Slice of the source text spanning from `start` until the end - fn str_from_to_end(&self, start: BytePos) -> &'a str { + fn str_from_to_end(&self, start: BytePos) -> &'src str { &self.src[self.src_index(start)..] } diff --git a/compiler/rustc_parse/src/lexer/tokentrees.rs b/compiler/rustc_parse/src/lexer/tokentrees.rs index 64b3928689a..c9ff2d58e2c 100644 --- a/compiler/rustc_parse/src/lexer/tokentrees.rs +++ b/compiler/rustc_parse/src/lexer/tokentrees.rs @@ -8,18 +8,18 @@ use rustc_ast_pretty::pprust::token_to_string; use rustc_errors::{Applicability, PErr}; use rustc_span::symbol::kw; -pub(super) struct TokenTreesReader<'a> { - string_reader: StringReader<'a>, +pub(super) struct TokenTreesReader<'sess, 'src> { + string_reader: StringReader<'sess, 'src>, /// The "next" token, which has been obtained from the `StringReader` but /// not yet handled by the `TokenTreesReader`. token: Token, diag_info: TokenTreeDiagInfo, } -impl<'a> TokenTreesReader<'a> { +impl<'sess, 'src> TokenTreesReader<'sess, 'src> { pub(super) fn parse_all_token_trees( - string_reader: StringReader<'a>, - ) -> (TokenStream, Result<(), Vec<PErr<'a>>>, Vec<UnmatchedDelim>) { + string_reader: StringReader<'sess, 'src>, + ) -> (TokenStream, Result<(), Vec<PErr<'sess>>>, Vec<UnmatchedDelim>) { let mut tt_reader = TokenTreesReader { string_reader, token: Token::dummy(), @@ -35,7 +35,7 @@ impl<'a> TokenTreesReader<'a> { fn parse_token_trees( &mut self, is_delimited: bool, - ) -> (Spacing, TokenStream, Result<(), Vec<PErr<'a>>>) { + ) -> (Spacing, TokenStream, Result<(), Vec<PErr<'sess>>>) { // Move past the opening delimiter. let (_, open_spacing) = self.bump(false); @@ -71,7 +71,7 @@ impl<'a> TokenTreesReader<'a> { } } - fn eof_err(&mut self) -> PErr<'a> { + fn eof_err(&mut self) -> PErr<'sess> { let msg = "this file contains an unclosed delimiter"; let mut err = self.string_reader.sess.dcx.struct_span_err(self.token.span, msg); for &(_, sp) in &self.diag_info.open_braces { @@ -99,7 +99,7 @@ impl<'a> TokenTreesReader<'a> { fn parse_token_tree_open_delim( &mut self, open_delim: Delimiter, - ) -> Result<TokenTree, Vec<PErr<'a>>> { + ) -> Result<TokenTree, Vec<PErr<'sess>>> { // The span for beginning of the delimited section let pre_span = self.token.span; @@ -229,7 +229,11 @@ impl<'a> TokenTreesReader<'a> { (this_tok, this_spacing) } - fn unclosed_delim_err(&mut self, tts: TokenStream, mut errs: Vec<PErr<'a>>) -> Vec<PErr<'a>> { + fn unclosed_delim_err( + &mut self, + tts: TokenStream, + mut errs: Vec<PErr<'sess>>, + ) -> Vec<PErr<'sess>> { // If there are unclosed delims, see if there are diff markers and if so, point them // out instead of complaining about the unclosed delims. let mut parser = crate::stream_to_parser(self.string_reader.sess, tts, None); @@ -285,7 +289,7 @@ impl<'a> TokenTreesReader<'a> { return errs; } - fn close_delim_err(&mut self, delim: Delimiter) -> PErr<'a> { + fn close_delim_err(&mut self, delim: Delimiter) -> PErr<'sess> { // An unexpected closing delimiter (i.e., there is no // matching opening delimiter). let token_str = token_to_string(&self.token); diff --git a/compiler/rustc_parse/src/lexer/unicode_chars.rs b/compiler/rustc_parse/src/lexer/unicode_chars.rs index dac7569e385..a136abaa28b 100644 --- a/compiler/rustc_parse/src/lexer/unicode_chars.rs +++ b/compiler/rustc_parse/src/lexer/unicode_chars.rs @@ -337,7 +337,7 @@ const ASCII_ARRAY: &[(&str, &str, Option<token::TokenKind>)] = &[ ]; pub(super) fn check_for_substitution( - reader: &StringReader<'_>, + reader: &StringReader<'_, '_>, pos: BytePos, ch: char, count: usize, |
