Fix lifetimes in StringReader
.
Two different lifetimes are conflated. This doesn't matter right now, but needs to be fixed for the next commit to work. And the more descriptive lifetime names make the code easier to read.
This commit is contained in:
parent
fbe68bc40c
commit
d02150fd45
3 changed files with 27 additions and 23 deletions
|
@ -42,9 +42,9 @@ pub struct UnmatchedDelim {
|
||||||
pub candidate_span: Option<Span>,
|
pub candidate_span: Option<Span>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn parse_token_trees<'a>(
|
pub(crate) fn parse_token_trees<'sess, 'src>(
|
||||||
sess: &'a ParseSess,
|
sess: &'sess ParseSess,
|
||||||
mut src: &'a str,
|
mut src: &'src str,
|
||||||
mut start_pos: BytePos,
|
mut start_pos: BytePos,
|
||||||
override_span: Option<Span>,
|
override_span: Option<Span>,
|
||||||
) -> Result<TokenStream, Vec<Diagnostic>> {
|
) -> Result<TokenStream, Vec<Diagnostic>> {
|
||||||
|
@ -90,16 +90,16 @@ pub(crate) fn parse_token_trees<'a>(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct StringReader<'a> {
|
struct StringReader<'sess, 'src> {
|
||||||
sess: &'a ParseSess,
|
sess: &'sess ParseSess,
|
||||||
/// Initial position, read-only.
|
/// Initial position, read-only.
|
||||||
start_pos: BytePos,
|
start_pos: BytePos,
|
||||||
/// The absolute offset within the source_map of the current character.
|
/// The absolute offset within the source_map of the current character.
|
||||||
pos: BytePos,
|
pos: BytePos,
|
||||||
/// Source text to tokenize.
|
/// Source text to tokenize.
|
||||||
src: &'a str,
|
src: &'src str,
|
||||||
/// Cursor for getting lexer tokens.
|
/// Cursor for getting lexer tokens.
|
||||||
cursor: Cursor<'a>,
|
cursor: Cursor<'src>,
|
||||||
override_span: Option<Span>,
|
override_span: Option<Span>,
|
||||||
/// When a "unknown start of token: \u{a0}" has already been emitted earlier
|
/// When a "unknown start of token: \u{a0}" has already been emitted earlier
|
||||||
/// in this file, it's safe to treat further occurrences of the non-breaking
|
/// in this file, it's safe to treat further occurrences of the non-breaking
|
||||||
|
@ -107,8 +107,8 @@ struct StringReader<'a> {
|
||||||
nbsp_is_whitespace: bool,
|
nbsp_is_whitespace: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> StringReader<'a> {
|
impl<'sess, 'src> StringReader<'sess, 'src> {
|
||||||
pub fn dcx(&self) -> &'a DiagCtxt {
|
pub fn dcx(&self) -> &'sess DiagCtxt {
|
||||||
&self.sess.dcx
|
&self.sess.dcx
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -526,7 +526,7 @@ impl<'a> StringReader<'a> {
|
||||||
|
|
||||||
/// Slice of the source text from `start` up to but excluding `self.pos`,
|
/// Slice of the source text from `start` up to but excluding `self.pos`,
|
||||||
/// meaning the slice does not include the character `self.ch`.
|
/// meaning the slice does not include the character `self.ch`.
|
||||||
fn str_from(&self, start: BytePos) -> &'a str {
|
fn str_from(&self, start: BytePos) -> &'src str {
|
||||||
self.str_from_to(start, self.pos)
|
self.str_from_to(start, self.pos)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -537,12 +537,12 @@ impl<'a> StringReader<'a> {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Slice of the source text spanning from `start` up to but excluding `end`.
|
/// Slice of the source text spanning from `start` up to but excluding `end`.
|
||||||
fn str_from_to(&self, start: BytePos, end: BytePos) -> &'a str {
|
fn str_from_to(&self, start: BytePos, end: BytePos) -> &'src str {
|
||||||
&self.src[self.src_index(start)..self.src_index(end)]
|
&self.src[self.src_index(start)..self.src_index(end)]
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Slice of the source text spanning from `start` until the end
|
/// Slice of the source text spanning from `start` until the end
|
||||||
fn str_from_to_end(&self, start: BytePos) -> &'a str {
|
fn str_from_to_end(&self, start: BytePos) -> &'src str {
|
||||||
&self.src[self.src_index(start)..]
|
&self.src[self.src_index(start)..]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -8,18 +8,18 @@ use rustc_ast_pretty::pprust::token_to_string;
|
||||||
use rustc_errors::{Applicability, PErr};
|
use rustc_errors::{Applicability, PErr};
|
||||||
use rustc_span::symbol::kw;
|
use rustc_span::symbol::kw;
|
||||||
|
|
||||||
pub(super) struct TokenTreesReader<'a> {
|
pub(super) struct TokenTreesReader<'sess, 'src> {
|
||||||
string_reader: StringReader<'a>,
|
string_reader: StringReader<'sess, 'src>,
|
||||||
/// The "next" token, which has been obtained from the `StringReader` but
|
/// The "next" token, which has been obtained from the `StringReader` but
|
||||||
/// not yet handled by the `TokenTreesReader`.
|
/// not yet handled by the `TokenTreesReader`.
|
||||||
token: Token,
|
token: Token,
|
||||||
diag_info: TokenTreeDiagInfo,
|
diag_info: TokenTreeDiagInfo,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> TokenTreesReader<'a> {
|
impl<'sess, 'src> TokenTreesReader<'sess, 'src> {
|
||||||
pub(super) fn parse_all_token_trees(
|
pub(super) fn parse_all_token_trees(
|
||||||
string_reader: StringReader<'a>,
|
string_reader: StringReader<'sess, 'src>,
|
||||||
) -> (TokenStream, Result<(), Vec<PErr<'a>>>, Vec<UnmatchedDelim>) {
|
) -> (TokenStream, Result<(), Vec<PErr<'sess>>>, Vec<UnmatchedDelim>) {
|
||||||
let mut tt_reader = TokenTreesReader {
|
let mut tt_reader = TokenTreesReader {
|
||||||
string_reader,
|
string_reader,
|
||||||
token: Token::dummy(),
|
token: Token::dummy(),
|
||||||
|
@ -35,7 +35,7 @@ impl<'a> TokenTreesReader<'a> {
|
||||||
fn parse_token_trees(
|
fn parse_token_trees(
|
||||||
&mut self,
|
&mut self,
|
||||||
is_delimited: bool,
|
is_delimited: bool,
|
||||||
) -> (Spacing, TokenStream, Result<(), Vec<PErr<'a>>>) {
|
) -> (Spacing, TokenStream, Result<(), Vec<PErr<'sess>>>) {
|
||||||
// Move past the opening delimiter.
|
// Move past the opening delimiter.
|
||||||
let (_, open_spacing) = self.bump(false);
|
let (_, open_spacing) = self.bump(false);
|
||||||
|
|
||||||
|
@ -71,7 +71,7 @@ impl<'a> TokenTreesReader<'a> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn eof_err(&mut self) -> PErr<'a> {
|
fn eof_err(&mut self) -> PErr<'sess> {
|
||||||
let msg = "this file contains an unclosed delimiter";
|
let msg = "this file contains an unclosed delimiter";
|
||||||
let mut err = self.string_reader.sess.dcx.struct_span_err(self.token.span, msg);
|
let mut err = self.string_reader.sess.dcx.struct_span_err(self.token.span, msg);
|
||||||
for &(_, sp) in &self.diag_info.open_braces {
|
for &(_, sp) in &self.diag_info.open_braces {
|
||||||
|
@ -99,7 +99,7 @@ impl<'a> TokenTreesReader<'a> {
|
||||||
fn parse_token_tree_open_delim(
|
fn parse_token_tree_open_delim(
|
||||||
&mut self,
|
&mut self,
|
||||||
open_delim: Delimiter,
|
open_delim: Delimiter,
|
||||||
) -> Result<TokenTree, Vec<PErr<'a>>> {
|
) -> Result<TokenTree, Vec<PErr<'sess>>> {
|
||||||
// The span for beginning of the delimited section
|
// The span for beginning of the delimited section
|
||||||
let pre_span = self.token.span;
|
let pre_span = self.token.span;
|
||||||
|
|
||||||
|
@ -229,7 +229,11 @@ impl<'a> TokenTreesReader<'a> {
|
||||||
(this_tok, this_spacing)
|
(this_tok, this_spacing)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn unclosed_delim_err(&mut self, tts: TokenStream, mut errs: Vec<PErr<'a>>) -> Vec<PErr<'a>> {
|
fn unclosed_delim_err(
|
||||||
|
&mut self,
|
||||||
|
tts: TokenStream,
|
||||||
|
mut errs: Vec<PErr<'sess>>,
|
||||||
|
) -> Vec<PErr<'sess>> {
|
||||||
// If there are unclosed delims, see if there are diff markers and if so, point them
|
// If there are unclosed delims, see if there are diff markers and if so, point them
|
||||||
// out instead of complaining about the unclosed delims.
|
// out instead of complaining about the unclosed delims.
|
||||||
let mut parser = crate::stream_to_parser(self.string_reader.sess, tts, None);
|
let mut parser = crate::stream_to_parser(self.string_reader.sess, tts, None);
|
||||||
|
@ -285,7 +289,7 @@ impl<'a> TokenTreesReader<'a> {
|
||||||
return errs;
|
return errs;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn close_delim_err(&mut self, delim: Delimiter) -> PErr<'a> {
|
fn close_delim_err(&mut self, delim: Delimiter) -> PErr<'sess> {
|
||||||
// An unexpected closing delimiter (i.e., there is no
|
// An unexpected closing delimiter (i.e., there is no
|
||||||
// matching opening delimiter).
|
// matching opening delimiter).
|
||||||
let token_str = token_to_string(&self.token);
|
let token_str = token_to_string(&self.token);
|
||||||
|
|
|
@ -337,7 +337,7 @@ const ASCII_ARRAY: &[(&str, &str, Option<token::TokenKind>)] = &[
|
||||||
];
|
];
|
||||||
|
|
||||||
pub(super) fn check_for_substitution(
|
pub(super) fn check_for_substitution(
|
||||||
reader: &StringReader<'_>,
|
reader: &StringReader<'_, '_>,
|
||||||
pos: BytePos,
|
pos: BytePos,
|
||||||
ch: char,
|
ch: char,
|
||||||
count: usize,
|
count: usize,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue