2019-10-16 13:23:46 +02:00
|
|
|
pub mod attr;
|
2019-08-11 13:14:30 +02:00
|
|
|
mod expr;
|
2019-08-11 18:34:42 +02:00
|
|
|
mod item;
|
|
|
|
mod module;
|
2020-01-11 13:19:57 -06:00
|
|
|
pub use module::{ModulePath, ModulePathSuccess};
|
2019-12-22 17:42:04 -05:00
|
|
|
mod pat;
|
2019-08-11 19:59:27 +02:00
|
|
|
mod path;
|
2019-12-22 17:42:04 -05:00
|
|
|
mod ty;
|
2019-10-16 10:59:30 +02:00
|
|
|
pub use path::PathStyle;
|
2019-10-08 09:46:06 +02:00
|
|
|
mod diagnostics;
|
2019-12-22 17:42:04 -05:00
|
|
|
mod generics;
|
|
|
|
mod stmt;
|
2019-10-08 09:46:06 +02:00
|
|
|
use diagnostics::Error;
|
2019-08-11 18:34:42 +02:00
|
|
|
|
2019-10-15 22:48:13 +02:00
|
|
|
use crate::lexer::UnmatchedBrace;
|
2019-12-22 17:42:04 -05:00
|
|
|
use crate::{Directory, DirectoryOwnership};
|
2019-10-15 22:48:13 +02:00
|
|
|
|
2019-12-22 17:42:04 -05:00
|
|
|
use log::debug;
|
2020-01-11 17:02:46 +01:00
|
|
|
use rustc_ast_pretty::pprust;
|
2019-12-31 21:25:16 +01:00
|
|
|
use rustc_errors::{struct_span_err, Applicability, DiagnosticBuilder, FatalError, PResult};
|
2020-01-11 15:03:15 +01:00
|
|
|
use rustc_session::parse::ParseSess;
|
2019-12-31 20:15:40 +03:00
|
|
|
use rustc_span::source_map::respan;
|
|
|
|
use rustc_span::symbol::{kw, sym, Symbol};
|
2020-02-04 23:04:29 -05:00
|
|
|
use rustc_span::{FileName, Span, DUMMY_SP};
|
2019-12-22 17:42:04 -05:00
|
|
|
use syntax::ast::{self, AttrStyle, AttrVec, CrateSugar, Extern, Ident, Unsafety, DUMMY_NODE_ID};
|
|
|
|
use syntax::ast::{IsAsync, MacArgs, MacDelimiter, Mutability, StrLit, Visibility, VisibilityKind};
|
2019-10-15 22:48:13 +02:00
|
|
|
use syntax::ptr::P;
|
2019-12-22 17:42:04 -05:00
|
|
|
use syntax::token::{self, DelimToken, Token, TokenKind};
|
|
|
|
use syntax::tokenstream::{self, DelimSpan, TokenStream, TokenTree, TreeAndJoint};
|
2019-10-15 22:48:13 +02:00
|
|
|
use syntax::util::comments::{doc_comment_style, strip_doc_comment_decoration};
|
2012-12-23 17:41:37 -05:00
|
|
|
|
2018-05-18 16:19:35 +10:00
|
|
|
use std::borrow::Cow;
|
2019-08-11 18:34:42 +02:00
|
|
|
use std::path::PathBuf;
|
2019-12-22 17:42:04 -05:00
|
|
|
use std::{cmp, mem, slice};
|
2018-07-03 19:38:14 +02:00
|
|
|
|
2019-02-07 02:33:01 +09:00
|
|
|
bitflags::bitflags! {
|
2018-05-31 16:53:30 -06:00
|
|
|
struct Restrictions: u8 {
|
2017-09-08 15:08:01 -04:00
|
|
|
const STMT_EXPR = 1 << 0;
|
|
|
|
const NO_STRUCT_LITERAL = 1 << 1;
|
2014-09-15 22:22:12 -07:00
|
|
|
}
|
2011-12-20 20:12:52 -08:00
|
|
|
}
|
2011-01-24 15:26:10 -08:00
|
|
|
|
2018-03-21 01:58:25 +03:00
|
|
|
#[derive(Clone, Copy, PartialEq, Debug)]
|
2019-10-08 09:35:34 +02:00
|
|
|
enum SemiColonMode {
|
2016-02-10 16:11:27 +13:00
|
|
|
Break,
|
|
|
|
Ignore,
|
2019-01-20 00:37:06 -08:00
|
|
|
Comma,
|
2016-02-10 16:11:27 +13:00
|
|
|
}
|
|
|
|
|
2018-03-21 01:58:25 +03:00
|
|
|
#[derive(Clone, Copy, PartialEq, Debug)]
|
2019-10-08 09:35:34 +02:00
|
|
|
enum BlockMode {
|
2017-04-13 22:37:05 +03:00
|
|
|
Break,
|
|
|
|
Ignore,
|
|
|
|
}
|
|
|
|
|
2019-09-06 03:56:45 +01:00
|
|
|
/// Like `maybe_whole_expr`, but for things other than expressions.
|
2019-08-11 15:24:37 +02:00
|
|
|
#[macro_export]
|
2014-11-14 09:18:10 -08:00
|
|
|
macro_rules! maybe_whole {
|
2016-11-02 03:03:55 +00:00
|
|
|
($p:expr, $constructor:ident, |$x:ident| $e:expr) => {
|
2019-06-05 01:17:07 +03:00
|
|
|
if let token::Interpolated(nt) = &$p.token.kind {
|
2019-03-10 11:53:16 +03:00
|
|
|
if let token::$constructor(x) = &**nt {
|
|
|
|
let $x = x.clone();
|
2016-11-02 03:03:55 +00:00
|
|
|
$p.bump();
|
|
|
|
return Ok($e);
|
2013-03-02 13:02:27 -08:00
|
|
|
}
|
2013-07-02 12:47:32 -07:00
|
|
|
}
|
2016-11-02 03:03:55 +00:00
|
|
|
};
|
2014-11-14 09:18:10 -08:00
|
|
|
}
|
2012-07-03 18:39:37 -07:00
|
|
|
|
2019-03-09 17:41:01 +03:00
|
|
|
/// If the next tokens are ill-formed `$ty::` recover them as `<$ty>::`.
|
2019-08-11 13:14:30 +02:00
|
|
|
#[macro_export]
|
2019-03-09 17:41:01 +03:00
|
|
|
macro_rules! maybe_recover_from_interpolated_ty_qpath {
|
|
|
|
($self: expr, $allow_qpath_recovery: expr) => {
|
|
|
|
if $allow_qpath_recovery && $self.look_ahead(1, |t| t == &token::ModSep) {
|
2019-06-05 01:17:07 +03:00
|
|
|
if let token::Interpolated(nt) = &$self.token.kind {
|
2019-03-09 17:41:01 +03:00
|
|
|
if let token::NtTy(ty) = &**nt {
|
|
|
|
let ty = ty.clone();
|
|
|
|
$self.bump();
|
|
|
|
return $self.maybe_recover_from_bad_qpath_stage_2($self.prev_span, ty);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-12-22 17:42:04 -05:00
|
|
|
};
|
2019-03-09 17:41:01 +03:00
|
|
|
}
|
|
|
|
|
2017-07-19 21:54:01 -07:00
|
|
|
#[derive(Debug, Clone, Copy, PartialEq)]
|
2016-09-21 12:16:28 +10:00
|
|
|
enum PrevTokenKind {
|
2016-09-16 15:46:40 +10:00
|
|
|
DocComment,
|
|
|
|
Comma,
|
2017-04-05 01:12:53 +03:00
|
|
|
Plus,
|
2016-09-16 15:46:40 +10:00
|
|
|
Interpolated,
|
|
|
|
Eof,
|
2017-07-04 17:04:34 +03:00
|
|
|
Ident,
|
2019-04-22 19:37:23 -07:00
|
|
|
BitOr,
|
2016-09-16 15:46:40 +10:00
|
|
|
Other,
|
|
|
|
}
|
|
|
|
|
2017-06-14 20:42:24 -07:00
|
|
|
#[derive(Clone)]
|
2014-03-09 16:54:34 +02:00
|
|
|
pub struct Parser<'a> {
|
2014-03-27 15:39:48 -07:00
|
|
|
pub sess: &'a ParseSess,
|
2019-06-05 22:04:52 +03:00
|
|
|
/// The current normalized token.
|
|
|
|
/// "Normalized" means that some interpolated tokens
|
|
|
|
/// (`$i: ident` and `$l: lifetime` meta-variables) are replaced
|
|
|
|
/// with non-interpolated identifier and lifetime tokens they refer to.
|
2020-02-09 17:54:38 +03:00
|
|
|
/// Use span from this token if you need an isolated span.
|
2019-06-05 01:17:07 +03:00
|
|
|
pub token: Token,
|
2020-02-09 17:54:38 +03:00
|
|
|
/// The current non-normalized token if it's different from `token`.
|
|
|
|
/// Preferable use is through the `unnormalized_token()` getter.
|
|
|
|
/// Use span from this token if you need to concatenate it with some neighbouring spans.
|
|
|
|
unnormalized_token: Option<Token>,
|
|
|
|
/// The previous normalized token.
|
|
|
|
/// Use span from this token if you need an isolated span.
|
|
|
|
prev_token: Token,
|
|
|
|
/// The previous non-normalized token if it's different from `prev_token`.
|
|
|
|
/// Preferable use is through the `unnormalized_prev_token()` getter.
|
|
|
|
/// Use span from this token if you need to concatenate it with some neighbouring spans.
|
|
|
|
unnormalized_prev_token: Option<Token>,
|
|
|
|
/// Equivalent to `prev_token.kind` in simplified form.
|
|
|
|
/// FIXME: Remove in favor of `(unnormalized_)prev_token().kind`.
|
2016-09-21 12:16:28 +10:00
|
|
|
prev_token_kind: PrevTokenKind,
|
2020-02-09 17:54:38 +03:00
|
|
|
/// Equivalent to `unnormalized_prev_token().span`.
|
|
|
|
/// FIXME: Remove in favor of `(unnormalized_)prev_token().span`.
|
|
|
|
pub prev_span: Span,
|
2018-05-31 16:53:30 -06:00
|
|
|
restrictions: Restrictions,
|
2019-02-28 22:43:53 +00:00
|
|
|
/// Used to determine the path to externally loaded source files.
|
2019-10-08 09:35:34 +02:00
|
|
|
pub(super) directory: Directory<'a>,
|
2019-02-28 22:43:53 +00:00
|
|
|
/// `true` to parse sub-modules in other files.
|
2020-01-11 13:19:57 -06:00
|
|
|
// Public for rustfmt usage.
|
|
|
|
pub recurse_into_file_modules: bool,
|
2014-05-16 14:23:04 -07:00
|
|
|
/// Name of the root module this parser originated from. If `None`, then the
|
|
|
|
/// name is not known. This does not change while the parser is descending
|
|
|
|
/// into modules, and sub-parsers have new values for this name.
|
2019-10-16 10:59:30 +02:00
|
|
|
pub root_module_name: Option<String>,
|
2019-10-08 09:35:34 +02:00
|
|
|
expected_tokens: Vec<TokenType>,
|
2019-08-31 16:03:54 +03:00
|
|
|
token_cursor: TokenCursor,
|
2018-05-31 16:53:30 -06:00
|
|
|
desugar_doc_comments: bool,
|
2019-02-28 22:43:53 +00:00
|
|
|
/// `true` we should configure out of line modules as we parse.
|
2020-01-11 13:19:57 -06:00
|
|
|
// Public for rustfmt usage.
|
|
|
|
pub cfg_mods: bool,
|
2019-01-23 02:35:13 +01:00
|
|
|
/// This field is used to keep track of how many left angle brackets we have seen. This is
|
|
|
|
/// required in order to detect extra leading left angle brackets (`<` characters) and error
|
|
|
|
/// appropriately.
|
|
|
|
///
|
|
|
|
/// See the comments in the `parse_path_segment` function for more details.
|
2019-10-08 09:35:34 +02:00
|
|
|
unmatched_angle_bracket_count: u32,
|
|
|
|
max_angle_bracket_count: u32,
|
2019-09-06 03:56:45 +01:00
|
|
|
/// A list of all unclosed delimiters found by the lexer. If an entry is used for error recovery
|
2019-02-06 02:24:07 -08:00
|
|
|
/// it gets removed from here. Every entry left at the end gets emitted as an independent
|
|
|
|
/// error.
|
2019-10-08 09:35:34 +02:00
|
|
|
pub(super) unclosed_delims: Vec<UnmatchedBrace>,
|
|
|
|
last_unexpected_token_span: Option<Span>,
|
2019-10-16 10:59:30 +02:00
|
|
|
pub last_type_ascription: Option<(Span, bool /* likely path typo */)>,
|
2019-05-21 22:17:53 -07:00
|
|
|
/// If present, this `Parser` is not parsing Rust code but rather a macro call.
|
2019-10-08 09:35:34 +02:00
|
|
|
subparser_name: Option<&'static str>,
|
Make the parser’s ‘expected <foo>, found <bar>’ errors more accurate
As an example of what this changes, the following code:
let x: [int ..4];
Currently spits out ‘expected `]`, found `..`’. However, a comma would also be
valid there, as would a number of other tokens. This change adjusts the parser
to produce more accurate errors, so that that example now produces ‘expected one
of `(`, `+`, `,`, `::`, or `]`, found `..`’.
2014-12-03 22:47:53 +13:00
|
|
|
}
|
|
|
|
|
2019-03-03 11:13:19 -08:00
|
|
|
impl<'a> Drop for Parser<'a> {
|
|
|
|
fn drop(&mut self) {
|
2019-10-28 17:44:20 -07:00
|
|
|
emit_unclosed_delims(&mut self.unclosed_delims, &self.sess);
|
2019-03-03 11:13:19 -08:00
|
|
|
}
|
|
|
|
}
|
2017-05-18 10:37:24 +12:00
|
|
|
|
2017-06-09 20:30:33 -07:00
|
|
|
#[derive(Clone)]
|
2019-08-31 16:03:54 +03:00
|
|
|
struct TokenCursor {
|
|
|
|
frame: TokenCursorFrame,
|
|
|
|
stack: Vec<TokenCursorFrame>,
|
2017-02-20 05:44:06 +00:00
|
|
|
}
|
|
|
|
|
2017-06-09 20:30:33 -07:00
|
|
|
#[derive(Clone)]
|
2019-08-31 16:03:54 +03:00
|
|
|
struct TokenCursorFrame {
|
|
|
|
delim: token::DelimToken,
|
|
|
|
span: DelimSpan,
|
|
|
|
open_delim: bool,
|
|
|
|
tree_cursor: tokenstream::Cursor,
|
|
|
|
close_delim: bool,
|
|
|
|
last_token: LastToken,
|
2017-07-12 09:50:05 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/// This is used in `TokenCursorFrame` above to track tokens that are consumed
|
|
|
|
/// by the parser, and then that's transitively used to record the tokens that
|
|
|
|
/// each parse AST item is created with.
|
|
|
|
///
|
|
|
|
/// Right now this has two states, either collecting tokens or not collecting
|
|
|
|
/// tokens. If we're collecting tokens we just save everything off into a local
|
|
|
|
/// `Vec`. This should eventually though likely save tokens from the original
|
|
|
|
/// token stream and just use slicing of token streams to avoid creation of a
|
|
|
|
/// whole new vector.
|
|
|
|
///
|
|
|
|
/// The second state is where we're passively not recording tokens, but the last
|
|
|
|
/// token is still tracked for when we want to start recording tokens. This
|
|
|
|
/// "last token" means that when we start recording tokens we'll want to ensure
|
|
|
|
/// that this, the first token, is included in the output.
|
|
|
|
///
|
|
|
|
/// You can find some more example usage of this in the `collect_tokens` method
|
|
|
|
/// on the parser.
|
|
|
|
#[derive(Clone)]
|
2019-10-08 09:35:34 +02:00
|
|
|
enum LastToken {
|
2019-01-10 11:58:38 +11:00
|
|
|
Collecting(Vec<TreeAndJoint>),
|
|
|
|
Was(Option<TreeAndJoint>),
|
2017-02-20 05:44:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl TokenCursorFrame {
|
2019-06-25 23:22:45 +02:00
|
|
|
fn new(span: DelimSpan, delim: DelimToken, tts: &TokenStream) -> Self {
|
2017-02-20 05:44:06 +00:00
|
|
|
TokenCursorFrame {
|
2019-06-25 23:22:45 +02:00
|
|
|
delim,
|
|
|
|
span,
|
2018-11-30 10:02:04 +11:00
|
|
|
open_delim: delim == token::NoDelim,
|
2019-01-09 16:53:14 +11:00
|
|
|
tree_cursor: tts.clone().into_trees(),
|
2018-11-30 10:02:04 +11:00
|
|
|
close_delim: delim == token::NoDelim,
|
2017-07-12 09:50:05 -07:00
|
|
|
last_token: LastToken::Was(None),
|
2017-02-20 05:44:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl TokenCursor {
|
2019-06-04 18:48:40 +03:00
|
|
|
fn next(&mut self) -> Token {
|
2017-02-20 05:44:06 +00:00
|
|
|
loop {
|
|
|
|
let tree = if !self.frame.open_delim {
|
|
|
|
self.frame.open_delim = true;
|
2019-11-03 14:58:01 +03:00
|
|
|
TokenTree::open_tt(self.frame.span, self.frame.delim)
|
2017-02-20 05:44:06 +00:00
|
|
|
} else if let Some(tree) = self.frame.tree_cursor.next() {
|
|
|
|
tree
|
|
|
|
} else if !self.frame.close_delim {
|
|
|
|
self.frame.close_delim = true;
|
2019-11-03 14:58:01 +03:00
|
|
|
TokenTree::close_tt(self.frame.span, self.frame.delim)
|
2017-02-20 05:44:06 +00:00
|
|
|
} else if let Some(frame) = self.stack.pop() {
|
|
|
|
self.frame = frame;
|
2019-12-22 17:42:04 -05:00
|
|
|
continue;
|
2017-02-20 05:44:06 +00:00
|
|
|
} else {
|
2019-06-05 09:39:34 +03:00
|
|
|
return Token::new(token::Eof, DUMMY_SP);
|
2017-02-20 05:44:06 +00:00
|
|
|
};
|
|
|
|
|
2017-07-12 09:50:05 -07:00
|
|
|
match self.frame.last_token {
|
2018-07-22 08:48:29 -07:00
|
|
|
LastToken::Collecting(ref mut v) => v.push(tree.clone().into()),
|
|
|
|
LastToken::Was(ref mut t) => *t = Some(tree.clone().into()),
|
2017-07-12 09:50:05 -07:00
|
|
|
}
|
|
|
|
|
2017-02-20 05:44:06 +00:00
|
|
|
match tree {
|
2019-06-04 20:42:43 +03:00
|
|
|
TokenTree::Token(token) => return token,
|
2018-11-30 10:02:04 +11:00
|
|
|
TokenTree::Delimited(sp, delim, tts) => {
|
|
|
|
let frame = TokenCursorFrame::new(sp, delim, &tts);
|
2017-02-20 05:44:06 +00:00
|
|
|
self.stack.push(mem::replace(&mut self.frame, frame));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-04 18:48:40 +03:00
|
|
|
fn next_desugared(&mut self) -> Token {
|
2019-06-05 14:17:56 +03:00
|
|
|
let (name, sp) = match self.next() {
|
|
|
|
Token { kind: token::DocComment(name), span } => (name, span),
|
2017-05-12 20:05:39 +02:00
|
|
|
tok => return tok,
|
2017-02-20 05:44:06 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
let stripped = strip_doc_comment_decoration(&name.as_str());
|
|
|
|
|
|
|
|
// Searches for the occurrences of `"#*` and returns the minimum number of `#`s
|
|
|
|
// required to wrap the text.
|
|
|
|
let mut num_of_hashes = 0;
|
|
|
|
let mut count = 0;
|
|
|
|
for ch in stripped.chars() {
|
|
|
|
count = match ch {
|
|
|
|
'"' => 1,
|
|
|
|
'#' if count > 0 => count + 1,
|
|
|
|
_ => 0,
|
|
|
|
};
|
|
|
|
num_of_hashes = cmp::max(num_of_hashes, count);
|
|
|
|
}
|
|
|
|
|
2018-09-08 18:07:02 -07:00
|
|
|
let delim_span = DelimSpan::from_single(sp);
|
2018-11-30 10:02:04 +11:00
|
|
|
let body = TokenTree::Delimited(
|
|
|
|
delim_span,
|
|
|
|
token::Bracket,
|
2019-05-19 01:04:26 +03:00
|
|
|
[
|
2019-06-05 13:25:26 +03:00
|
|
|
TokenTree::token(token::Ident(sym::doc, false), sp),
|
|
|
|
TokenTree::token(token::Eq, sp),
|
2019-12-22 17:42:04 -05:00
|
|
|
TokenTree::token(
|
|
|
|
TokenKind::lit(token::StrRaw(num_of_hashes), Symbol::intern(&stripped), None),
|
|
|
|
sp,
|
|
|
|
),
|
2018-11-30 10:02:04 +11:00
|
|
|
]
|
2019-12-22 17:42:04 -05:00
|
|
|
.iter()
|
|
|
|
.cloned()
|
|
|
|
.collect::<TokenStream>()
|
|
|
|
.into(),
|
2018-11-30 10:02:04 +11:00
|
|
|
);
|
2017-02-20 05:44:06 +00:00
|
|
|
|
2019-12-22 17:42:04 -05:00
|
|
|
self.stack.push(mem::replace(
|
|
|
|
&mut self.frame,
|
|
|
|
TokenCursorFrame::new(
|
|
|
|
delim_span,
|
|
|
|
token::NoDelim,
|
|
|
|
&if doc_comment_style(&name.as_str()) == AttrStyle::Inner {
|
|
|
|
[TokenTree::token(token::Pound, sp), TokenTree::token(token::Not, sp), body]
|
|
|
|
.iter()
|
|
|
|
.cloned()
|
|
|
|
.collect::<TokenStream>()
|
|
|
|
} else {
|
|
|
|
[TokenTree::token(token::Pound, sp), body]
|
|
|
|
.iter()
|
|
|
|
.cloned()
|
|
|
|
.collect::<TokenStream>()
|
|
|
|
},
|
|
|
|
),
|
|
|
|
));
|
2017-02-20 05:44:06 +00:00
|
|
|
|
|
|
|
self.next()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-21 01:58:25 +03:00
|
|
|
#[derive(Clone, PartialEq)]
|
2019-10-08 09:35:34 +02:00
|
|
|
enum TokenType {
|
2019-06-05 14:17:56 +03:00
|
|
|
Token(TokenKind),
|
2019-05-11 17:41:37 +03:00
|
|
|
Keyword(Symbol),
|
Make the parser’s ‘expected <foo>, found <bar>’ errors more accurate
As an example of what this changes, the following code:
let x: [int ..4];
Currently spits out ‘expected `]`, found `..`’. However, a comma would also be
valid there, as would a number of other tokens. This change adjusts the parser
to produce more accurate errors, so that that example now produces ‘expected one
of `(`, `+`, `,`, `::`, or `]`, found `..`’.
2014-12-03 22:47:53 +13:00
|
|
|
Operator,
|
2017-01-18 19:01:04 +03:00
|
|
|
Lifetime,
|
|
|
|
Ident,
|
|
|
|
Path,
|
|
|
|
Type,
|
2019-02-05 16:49:38 +01:00
|
|
|
Const,
|
Make the parser’s ‘expected <foo>, found <bar>’ errors more accurate
As an example of what this changes, the following code:
let x: [int ..4];
Currently spits out ‘expected `]`, found `..`’. However, a comma would also be
valid there, as would a number of other tokens. This change adjusts the parser
to produce more accurate errors, so that that example now produces ‘expected one
of `(`, `+`, `,`, `::`, or `]`, found `..`’.
2014-12-03 22:47:53 +13:00
|
|
|
}
|
|
|
|
|
|
|
|
impl TokenType {
|
2019-10-08 09:35:34 +02:00
|
|
|
fn to_string(&self) -> String {
|
Make the parser’s ‘expected <foo>, found <bar>’ errors more accurate
As an example of what this changes, the following code:
let x: [int ..4];
Currently spits out ‘expected `]`, found `..`’. However, a comma would also be
valid there, as would a number of other tokens. This change adjusts the parser
to produce more accurate errors, so that that example now produces ‘expected one
of `(`, `+`, `,`, `::`, or `]`, found `..`’.
2014-12-03 22:47:53 +13:00
|
|
|
match *self {
|
2019-06-08 22:38:23 +03:00
|
|
|
TokenType::Token(ref t) => format!("`{}`", pprust::token_kind_to_string(t)),
|
2019-05-11 17:41:37 +03:00
|
|
|
TokenType::Keyword(kw) => format!("`{}`", kw),
|
2017-01-18 19:01:04 +03:00
|
|
|
TokenType::Operator => "an operator".to_string(),
|
|
|
|
TokenType::Lifetime => "lifetime".to_string(),
|
|
|
|
TokenType::Ident => "identifier".to_string(),
|
|
|
|
TokenType::Path => "path".to_string(),
|
|
|
|
TokenType::Type => "type".to_string(),
|
2019-02-05 16:49:38 +01:00
|
|
|
TokenType::Const => "const".to_string(),
|
Make the parser’s ‘expected <foo>, found <bar>’ errors more accurate
As an example of what this changes, the following code:
let x: [int ..4];
Currently spits out ‘expected `]`, found `..`’. However, a comma would also be
valid there, as would a number of other tokens. This change adjusts the parser
to produce more accurate errors, so that that example now produces ‘expected one
of `(`, `+`, `,`, `::`, or `]`, found `..`’.
2014-12-03 22:47:53 +13:00
|
|
|
}
|
|
|
|
}
|
2012-09-07 19:04:40 -07:00
|
|
|
}
|
2012-05-23 15:06:11 -07:00
|
|
|
|
2018-03-21 01:58:25 +03:00
|
|
|
#[derive(Copy, Clone, Debug)]
|
2019-10-08 09:35:34 +02:00
|
|
|
enum TokenExpectType {
|
2017-09-07 15:07:49 +09:00
|
|
|
Expect,
|
|
|
|
NoExpect,
|
|
|
|
}
|
|
|
|
|
2019-10-08 12:59:59 +02:00
|
|
|
/// A sequence separator.
|
|
|
|
struct SeqSep {
|
|
|
|
/// The separator token.
|
|
|
|
sep: Option<TokenKind>,
|
|
|
|
/// `true` if a trailing separator is allowed.
|
|
|
|
trailing_sep_allowed: bool,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl SeqSep {
|
|
|
|
fn trailing_allowed(t: TokenKind) -> SeqSep {
|
2019-12-22 17:42:04 -05:00
|
|
|
SeqSep { sep: Some(t), trailing_sep_allowed: true }
|
2019-10-08 12:59:59 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
fn none() -> SeqSep {
|
2019-12-22 17:42:04 -05:00
|
|
|
SeqSep { sep: None, trailing_sep_allowed: false }
|
2019-10-08 12:59:59 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-22 17:42:04 -05:00
|
|
|
pub enum FollowedByType {
|
|
|
|
Yes,
|
|
|
|
No,
|
|
|
|
}
|
2019-11-07 11:26:36 +01:00
|
|
|
|
2019-12-07 03:07:35 +01:00
|
|
|
fn token_descr_opt(token: &Token) -> Option<&'static str> {
|
|
|
|
Some(match token.kind {
|
|
|
|
_ if token.is_special_ident() => "reserved identifier",
|
|
|
|
_ if token.is_used_keyword() => "keyword",
|
|
|
|
_ if token.is_unused_keyword() => "reserved keyword",
|
|
|
|
token::DocComment(..) => "doc comment",
|
|
|
|
_ => return None,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
pub(super) fn token_descr(token: &Token) -> String {
|
|
|
|
let token_str = pprust::token_to_string(token);
|
|
|
|
match token_descr_opt(token) {
|
|
|
|
Some(prefix) => format!("{} `{}`", prefix, token_str),
|
|
|
|
_ => format!("`{}`", token_str),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-09 16:54:34 +02:00
|
|
|
impl<'a> Parser<'a> {
|
2019-10-16 10:59:30 +02:00
|
|
|
pub fn new(
|
2019-05-21 17:47:23 -07:00
|
|
|
sess: &'a ParseSess,
|
|
|
|
tokens: TokenStream,
|
|
|
|
directory: Option<Directory<'a>>,
|
|
|
|
recurse_into_file_modules: bool,
|
|
|
|
desugar_doc_comments: bool,
|
2019-05-21 22:17:53 -07:00
|
|
|
subparser_name: Option<&'static str>,
|
2019-05-21 17:47:23 -07:00
|
|
|
) -> Self {
|
2016-11-03 07:43:29 +00:00
|
|
|
let mut parser = Parser {
|
2017-08-06 22:54:09 -07:00
|
|
|
sess,
|
2019-06-05 09:39:34 +03:00
|
|
|
token: Token::dummy(),
|
2020-02-09 17:54:38 +03:00
|
|
|
unnormalized_token: None,
|
|
|
|
prev_token: Token::dummy(),
|
|
|
|
unnormalized_prev_token: None,
|
2016-09-21 12:16:28 +10:00
|
|
|
prev_token_kind: PrevTokenKind::Other,
|
2020-02-09 17:54:38 +03:00
|
|
|
prev_span: DUMMY_SP,
|
2015-04-29 14:58:43 -07:00
|
|
|
restrictions: Restrictions::empty(),
|
2017-08-06 22:54:09 -07:00
|
|
|
recurse_into_file_modules,
|
2017-11-27 18:14:24 -08:00
|
|
|
directory: Directory {
|
2018-05-18 16:19:35 +10:00
|
|
|
path: Cow::from(PathBuf::new()),
|
2019-12-22 17:42:04 -05:00
|
|
|
ownership: DirectoryOwnership::Owned { relative: None },
|
2017-11-27 18:14:24 -08:00
|
|
|
},
|
2014-05-26 00:27:36 +01:00
|
|
|
root_module_name: None,
|
Make the parser’s ‘expected <foo>, found <bar>’ errors more accurate
As an example of what this changes, the following code:
let x: [int ..4];
Currently spits out ‘expected `]`, found `..`’. However, a comma would also be
valid there, as would a number of other tokens. This change adjusts the parser
to produce more accurate errors, so that that example now produces ‘expected one
of `(`, `+`, `,`, `::`, or `]`, found `..`’.
2014-12-03 22:47:53 +13:00
|
|
|
expected_tokens: Vec::new(),
|
2017-02-20 05:44:06 +00:00
|
|
|
token_cursor: TokenCursor {
|
2019-12-22 17:42:04 -05:00
|
|
|
frame: TokenCursorFrame::new(DelimSpan::dummy(), token::NoDelim, &tokens.into()),
|
2017-02-20 05:44:06 +00:00
|
|
|
stack: Vec::new(),
|
|
|
|
},
|
2017-08-06 22:54:09 -07:00
|
|
|
desugar_doc_comments,
|
2017-01-18 13:13:36 +13:00
|
|
|
cfg_mods: true,
|
2019-01-23 02:35:13 +01:00
|
|
|
unmatched_angle_bracket_count: 0,
|
2019-01-27 21:04:50 -08:00
|
|
|
max_angle_bracket_count: 0,
|
|
|
|
unclosed_delims: Vec::new(),
|
2019-03-01 21:47:06 -08:00
|
|
|
last_unexpected_token_span: None,
|
2019-07-17 11:40:36 -07:00
|
|
|
last_type_ascription: None,
|
2019-05-21 22:17:53 -07:00
|
|
|
subparser_name,
|
2016-11-03 07:43:29 +00:00
|
|
|
};
|
|
|
|
|
2019-06-05 01:17:07 +03:00
|
|
|
parser.token = parser.next_tok();
|
2017-05-18 10:37:24 +12:00
|
|
|
|
2016-12-07 00:28:51 +00:00
|
|
|
if let Some(directory) = directory {
|
|
|
|
parser.directory = directory;
|
2019-06-07 13:31:13 +03:00
|
|
|
} else if !parser.token.span.is_dummy() {
|
2019-08-20 23:35:03 +03:00
|
|
|
if let Some(FileName::Real(path)) =
|
2019-12-22 17:42:04 -05:00
|
|
|
&sess.source_map().lookup_char_pos(parser.token.span.lo()).file.unmapped_path
|
|
|
|
{
|
2019-08-20 23:35:03 +03:00
|
|
|
if let Some(directory_path) = path.parent() {
|
|
|
|
parser.directory.path = Cow::from(directory_path.to_path_buf());
|
|
|
|
}
|
2017-12-14 08:09:19 +01:00
|
|
|
}
|
2016-11-03 07:43:29 +00:00
|
|
|
}
|
2017-05-18 10:37:24 +12:00
|
|
|
|
2017-03-29 07:17:18 +00:00
|
|
|
parser.process_potential_macro_variable();
|
2016-11-03 07:43:29 +00:00
|
|
|
parser
|
|
|
|
}
|
|
|
|
|
2020-02-09 17:54:38 +03:00
|
|
|
fn unnormalized_token(&self) -> &Token {
|
|
|
|
self.unnormalized_token.as_ref().unwrap_or(&self.token)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn unnormalized_prev_token(&self) -> &Token {
|
|
|
|
self.unnormalized_prev_token.as_ref().unwrap_or(&self.prev_token)
|
|
|
|
}
|
|
|
|
|
2019-06-04 18:48:40 +03:00
|
|
|
fn next_tok(&mut self) -> Token {
|
2017-05-12 20:05:39 +02:00
|
|
|
let mut next = if self.desugar_doc_comments {
|
|
|
|
self.token_cursor.next_desugared()
|
|
|
|
} else {
|
|
|
|
self.token_cursor.next()
|
2017-02-21 12:04:45 +00:00
|
|
|
};
|
2019-06-04 18:48:40 +03:00
|
|
|
if next.span.is_dummy() {
|
2018-04-22 04:10:15 +03:00
|
|
|
// Tweak the location for better diagnostics, but keep syntactic context intact.
|
2020-02-09 17:54:38 +03:00
|
|
|
next.span = self.unnormalized_token().span.with_ctxt(next.span.ctxt());
|
2014-05-26 00:27:36 +01:00
|
|
|
}
|
2017-02-21 12:04:45 +00:00
|
|
|
next
|
2014-05-26 00:27:36 +01:00
|
|
|
}
|
2014-06-09 13:12:30 -07:00
|
|
|
|
2018-05-31 16:53:30 -06:00
|
|
|
crate fn unexpected<T>(&mut self) -> PResult<'a, T> {
|
2015-03-28 21:58:51 +00:00
|
|
|
match self.expect_one_of(&[], &[]) {
|
2015-12-31 12:11:53 +13:00
|
|
|
Err(e) => Err(e),
|
2019-11-13 12:05:37 +01:00
|
|
|
// We can get `Ok(true)` from `recover_closing_delimiter`
|
|
|
|
// which is called in `expected_one_of_not_found`.
|
|
|
|
Ok(_) => FatalError.raise(),
|
2015-03-28 21:58:51 +00:00
|
|
|
}
|
2013-06-14 18:21:47 -07:00
|
|
|
}
|
|
|
|
|
2019-02-08 14:53:55 +01:00
|
|
|
/// Expects and consumes the token `t`. Signals an error if the next token is not `t`.
|
2019-06-05 14:17:56 +03:00
|
|
|
pub fn expect(&mut self, t: &TokenKind) -> PResult<'a, bool /* recovered */> {
|
Make the parser’s ‘expected <foo>, found <bar>’ errors more accurate
As an example of what this changes, the following code:
let x: [int ..4];
Currently spits out ‘expected `]`, found `..`’. However, a comma would also be
valid there, as would a number of other tokens. This change adjusts the parser
to produce more accurate errors, so that that example now produces ‘expected one
of `(`, `+`, `,`, `::`, or `]`, found `..`’.
2014-12-03 22:47:53 +13:00
|
|
|
if self.expected_tokens.is_empty() {
|
|
|
|
if self.token == *t {
|
2015-12-31 12:11:53 +13:00
|
|
|
self.bump();
|
2019-01-27 21:04:50 -08:00
|
|
|
Ok(false)
|
Make the parser’s ‘expected <foo>, found <bar>’ errors more accurate
As an example of what this changes, the following code:
let x: [int ..4];
Currently spits out ‘expected `]`, found `..`’. However, a comma would also be
valid there, as would a number of other tokens. This change adjusts the parser
to produce more accurate errors, so that that example now produces ‘expected one
of `(`, `+`, `,`, `::`, or `]`, found `..`’.
2014-12-03 22:47:53 +13:00
|
|
|
} else {
|
2019-05-21 22:17:53 -07:00
|
|
|
self.unexpected_try_recover(t)
|
Make the parser’s ‘expected <foo>, found <bar>’ errors more accurate
As an example of what this changes, the following code:
let x: [int ..4];
Currently spits out ‘expected `]`, found `..`’. However, a comma would also be
valid there, as would a number of other tokens. This change adjusts the parser
to produce more accurate errors, so that that example now produces ‘expected one
of `(`, `+`, `,`, `::`, or `]`, found `..`’.
2014-12-03 22:47:53 +13:00
|
|
|
}
|
2013-06-14 18:21:47 -07:00
|
|
|
} else {
|
2018-06-01 10:05:46 -04:00
|
|
|
self.expect_one_of(slice::from_ref(t), &[])
|
2013-06-14 18:21:47 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-09 13:12:30 -07:00
|
|
|
/// Expect next token to be edible or inedible token. If edible,
|
|
|
|
/// then consume it; if inedible, then return without consuming
|
|
|
|
/// anything. Signal a fatal error if next token is unexpected.
|
2019-01-27 21:04:50 -08:00
|
|
|
pub fn expect_one_of(
|
|
|
|
&mut self,
|
2019-06-05 14:17:56 +03:00
|
|
|
edible: &[TokenKind],
|
|
|
|
inedible: &[TokenKind],
|
2019-01-27 21:04:50 -08:00
|
|
|
) -> PResult<'a, bool /* recovered */> {
|
2019-06-08 22:38:23 +03:00
|
|
|
if edible.contains(&self.token.kind) {
|
2015-12-31 12:11:53 +13:00
|
|
|
self.bump();
|
2019-01-27 21:04:50 -08:00
|
|
|
Ok(false)
|
2019-06-08 22:38:23 +03:00
|
|
|
} else if inedible.contains(&self.token.kind) {
|
2013-08-05 22:18:29 +02:00
|
|
|
// leave it in the input
|
2019-01-27 21:04:50 -08:00
|
|
|
Ok(false)
|
2019-06-07 13:31:13 +03:00
|
|
|
} else if self.last_unexpected_token_span == Some(self.token.span) {
|
2019-03-01 21:47:06 -08:00
|
|
|
FatalError.raise();
|
2013-08-05 22:18:29 +02:00
|
|
|
} else {
|
2019-05-23 13:10:24 -07:00
|
|
|
self.expected_one_of_not_found(edible, inedible)
|
2013-08-05 22:18:29 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-11 13:19:57 -06:00
|
|
|
// Public for rustfmt usage.
|
|
|
|
pub fn parse_ident(&mut self) -> PResult<'a, ast::Ident> {
|
2018-01-06 14:43:20 -08:00
|
|
|
self.parse_ident_common(true)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn parse_ident_common(&mut self, recover: bool) -> PResult<'a, ast::Ident> {
|
2019-06-05 01:17:07 +03:00
|
|
|
match self.token.kind {
|
2019-06-05 11:56:06 +03:00
|
|
|
token::Ident(name, _) => {
|
2017-06-29 13:16:35 +03:00
|
|
|
if self.token.is_reserved_ident() {
|
2018-01-22 19:03:51 -08:00
|
|
|
let mut err = self.expected_ident_found();
|
2018-01-06 14:43:20 -08:00
|
|
|
if recover {
|
|
|
|
err.emit();
|
|
|
|
} else {
|
|
|
|
return Err(err);
|
|
|
|
}
|
2017-06-29 13:16:35 +03:00
|
|
|
}
|
2019-06-07 13:31:13 +03:00
|
|
|
let span = self.token.span;
|
2015-12-31 12:11:53 +13:00
|
|
|
self.bump();
|
2019-06-05 11:56:06 +03:00
|
|
|
Ok(Ident::new(name, span))
|
2013-06-14 18:21:47 -07:00
|
|
|
}
|
2019-12-22 17:42:04 -05:00
|
|
|
_ => Err(if self.prev_token_kind == PrevTokenKind::DocComment {
|
|
|
|
self.span_fatal_err(self.prev_span, Error::UselessDocComment)
|
|
|
|
} else {
|
|
|
|
self.expected_ident_found()
|
|
|
|
}),
|
2013-06-14 18:21:47 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-08 14:53:55 +01:00
|
|
|
/// Checks if the next token is `tok`, and returns `true` if so.
|
Make the parser’s ‘expected <foo>, found <bar>’ errors more accurate
As an example of what this changes, the following code:
let x: [int ..4];
Currently spits out ‘expected `]`, found `..`’. However, a comma would also be
valid there, as would a number of other tokens. This change adjusts the parser
to produce more accurate errors, so that that example now produces ‘expected one
of `(`, `+`, `,`, `::`, or `]`, found `..`’.
2014-12-03 22:47:53 +13:00
|
|
|
///
|
2016-02-01 08:39:50 +13:00
|
|
|
/// This method will automatically add `tok` to `expected_tokens` if `tok` is not
|
Make the parser’s ‘expected <foo>, found <bar>’ errors more accurate
As an example of what this changes, the following code:
let x: [int ..4];
Currently spits out ‘expected `]`, found `..`’. However, a comma would also be
valid there, as would a number of other tokens. This change adjusts the parser
to produce more accurate errors, so that that example now produces ‘expected one
of `(`, `+`, `,`, `::`, or `]`, found `..`’.
2014-12-03 22:47:53 +13:00
|
|
|
/// encountered.
|
2019-10-08 09:35:34 +02:00
|
|
|
fn check(&mut self, tok: &TokenKind) -> bool {
|
Make the parser’s ‘expected <foo>, found <bar>’ errors more accurate
As an example of what this changes, the following code:
let x: [int ..4];
Currently spits out ‘expected `]`, found `..`’. However, a comma would also be
valid there, as would a number of other tokens. This change adjusts the parser
to produce more accurate errors, so that that example now produces ‘expected one
of `(`, `+`, `,`, `::`, or `]`, found `..`’.
2014-12-03 22:47:53 +13:00
|
|
|
let is_present = self.token == *tok;
|
2019-12-22 17:42:04 -05:00
|
|
|
if !is_present {
|
|
|
|
self.expected_tokens.push(TokenType::Token(tok.clone()));
|
|
|
|
}
|
Make the parser’s ‘expected <foo>, found <bar>’ errors more accurate
As an example of what this changes, the following code:
let x: [int ..4];
Currently spits out ‘expected `]`, found `..`’. However, a comma would also be
valid there, as would a number of other tokens. This change adjusts the parser
to produce more accurate errors, so that that example now produces ‘expected one
of `(`, `+`, `,`, `::`, or `]`, found `..`’.
2014-12-03 22:47:53 +13:00
|
|
|
is_present
|
|
|
|
}
|
|
|
|
|
2019-02-08 14:53:55 +01:00
|
|
|
/// Consumes a token 'tok' if it exists. Returns whether the given token was present.
|
2019-06-05 14:17:56 +03:00
|
|
|
pub fn eat(&mut self, tok: &TokenKind) -> bool {
|
Make the parser’s ‘expected <foo>, found <bar>’ errors more accurate
As an example of what this changes, the following code:
let x: [int ..4];
Currently spits out ‘expected `]`, found `..`’. However, a comma would also be
valid there, as would a number of other tokens. This change adjusts the parser
to produce more accurate errors, so that that example now produces ‘expected one
of `(`, `+`, `,`, `::`, or `]`, found `..`’.
2014-12-03 22:47:53 +13:00
|
|
|
let is_present = self.check(tok);
|
2019-12-22 17:42:04 -05:00
|
|
|
if is_present {
|
|
|
|
self.bump()
|
|
|
|
}
|
2015-12-31 12:11:53 +13:00
|
|
|
is_present
|
2013-06-14 18:21:47 -07:00
|
|
|
}
|
|
|
|
|
2019-10-01 05:13:42 +02:00
|
|
|
/// If the next token is the given keyword, returns `true` without eating it.
|
|
|
|
/// An expectation is also added for diagnostics purposes.
|
2019-05-11 17:41:37 +03:00
|
|
|
fn check_keyword(&mut self, kw: Symbol) -> bool {
|
2015-01-16 16:04:28 +13:00
|
|
|
self.expected_tokens.push(TokenType::Keyword(kw));
|
|
|
|
self.token.is_keyword(kw)
|
|
|
|
}
|
|
|
|
|
2019-10-01 05:13:42 +02:00
|
|
|
/// If the next token is the given keyword, eats it and returns `true`.
|
|
|
|
/// Otherwise, returns `false`. An expectation is also added for diagnostics purposes.
|
2020-01-11 13:19:57 -06:00
|
|
|
// Public for rustfmt usage.
|
|
|
|
pub fn eat_keyword(&mut self, kw: Symbol) -> bool {
|
2015-01-16 16:04:28 +13:00
|
|
|
if self.check_keyword(kw) {
|
2015-12-31 12:11:53 +13:00
|
|
|
self.bump();
|
|
|
|
true
|
2015-01-16 16:04:28 +13:00
|
|
|
} else {
|
2015-12-31 12:11:53 +13:00
|
|
|
false
|
2015-01-16 16:04:28 +13:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-11 17:41:37 +03:00
|
|
|
fn eat_keyword_noexpect(&mut self, kw: Symbol) -> bool {
|
2014-10-27 23:33:30 +11:00
|
|
|
if self.token.is_keyword(kw) {
|
2015-12-31 12:11:53 +13:00
|
|
|
self.bump();
|
|
|
|
true
|
2014-08-27 21:34:03 -07:00
|
|
|
} else {
|
2015-12-31 12:11:53 +13:00
|
|
|
false
|
2014-08-27 21:34:03 -07:00
|
|
|
}
|
2013-06-14 18:21:47 -07:00
|
|
|
}
|
|
|
|
|
2019-02-08 14:53:55 +01:00
|
|
|
/// If the given word is not a keyword, signals an error.
|
|
|
|
/// If the next token is not the given word, signals an error.
|
|
|
|
/// Otherwise, eats it.
|
2019-05-11 17:41:37 +03:00
|
|
|
fn expect_keyword(&mut self, kw: Symbol) -> PResult<'a, ()> {
|
2019-12-22 17:42:04 -05:00
|
|
|
if !self.eat_keyword(kw) { self.unexpected() } else { Ok(()) }
|
2013-06-14 18:21:47 -07:00
|
|
|
}
|
|
|
|
|
2019-10-01 05:55:28 +02:00
|
|
|
fn check_or_expected(&mut self, ok: bool, typ: TokenType) -> bool {
|
2019-09-30 06:21:30 +02:00
|
|
|
if ok {
|
2017-01-18 19:01:04 +03:00
|
|
|
true
|
|
|
|
} else {
|
2019-10-01 05:55:28 +02:00
|
|
|
self.expected_tokens.push(typ);
|
2017-01-18 19:01:04 +03:00
|
|
|
false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-08 09:35:34 +02:00
|
|
|
fn check_ident(&mut self) -> bool {
|
2019-10-01 05:55:28 +02:00
|
|
|
self.check_or_expected(self.token.is_ident(), TokenType::Ident)
|
2019-09-30 06:21:30 +02:00
|
|
|
}
|
|
|
|
|
2017-01-18 19:01:04 +03:00
|
|
|
fn check_path(&mut self) -> bool {
|
2019-10-01 05:55:28 +02:00
|
|
|
self.check_or_expected(self.token.is_path_start(), TokenType::Path)
|
2017-01-18 19:01:04 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
fn check_type(&mut self) -> bool {
|
2019-10-01 05:55:28 +02:00
|
|
|
self.check_or_expected(self.token.can_begin_type(), TokenType::Type)
|
2017-01-18 19:01:04 +03:00
|
|
|
}
|
|
|
|
|
2019-02-05 16:49:38 +01:00
|
|
|
fn check_const_arg(&mut self) -> bool {
|
2019-10-01 05:55:28 +02:00
|
|
|
self.check_or_expected(self.token.can_begin_const_arg(), TokenType::Const)
|
2019-09-30 06:21:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Checks to see if the next token is either `+` or `+=`.
|
|
|
|
/// Otherwise returns `false`.
|
|
|
|
fn check_plus(&mut self) -> bool {
|
|
|
|
self.check_or_expected(
|
|
|
|
self.token.is_like_plus(),
|
2019-10-01 05:55:28 +02:00
|
|
|
TokenType::Token(token::BinOp(token::Plus)),
|
2019-09-30 06:21:30 +02:00
|
|
|
)
|
2019-02-05 16:49:38 +01:00
|
|
|
}
|
|
|
|
|
2019-02-08 14:53:55 +01:00
|
|
|
/// Expects and consumes a `+`. if `+=` is seen, replaces it with a `=`
|
|
|
|
/// and continues. If a `+` is not seen, returns `false`.
|
2018-05-25 16:40:16 -04:00
|
|
|
///
|
2019-02-08 14:53:55 +01:00
|
|
|
/// This is used when token-splitting `+=` into `+`.
|
|
|
|
/// See issue #47856 for an example of when this may occur.
|
2018-05-25 16:40:16 -04:00
|
|
|
fn eat_plus(&mut self) -> bool {
|
|
|
|
self.expected_tokens.push(TokenType::Token(token::BinOp(token::Plus)));
|
2019-06-05 01:17:07 +03:00
|
|
|
match self.token.kind {
|
2018-05-25 16:40:16 -04:00
|
|
|
token::BinOp(token::Plus) => {
|
|
|
|
self.bump();
|
|
|
|
true
|
|
|
|
}
|
|
|
|
token::BinOpEq(token::Plus) => {
|
2020-02-04 23:04:29 -05:00
|
|
|
let start_point = self.sess.source_map().start_point(self.token.span);
|
|
|
|
self.bump_with(token::Eq, self.token.span.with_lo(start_point.hi()));
|
2018-05-25 16:40:16 -04:00
|
|
|
true
|
|
|
|
}
|
|
|
|
_ => false,
|
|
|
|
}
|
|
|
|
}
|
2018-05-25 17:36:23 -04:00
|
|
|
|
2019-02-08 14:53:55 +01:00
|
|
|
/// Expects and consumes an `&`. If `&&` is seen, replaces it with a single
|
|
|
|
/// `&` and continues. If an `&` is not seen, signals an error.
|
2015-12-21 10:00:43 +13:00
|
|
|
fn expect_and(&mut self) -> PResult<'a, ()> {
|
2015-01-16 16:04:28 +13:00
|
|
|
self.expected_tokens.push(TokenType::Token(token::BinOp(token::And)));
|
2019-06-05 01:17:07 +03:00
|
|
|
match self.token.kind {
|
2015-12-31 12:11:53 +13:00
|
|
|
token::BinOp(token::And) => {
|
|
|
|
self.bump();
|
|
|
|
Ok(())
|
|
|
|
}
|
2014-10-27 19:22:52 +11:00
|
|
|
token::AndAnd => {
|
2020-02-04 23:04:29 -05:00
|
|
|
let start_point = self.sess.source_map().start_point(self.token.span);
|
|
|
|
Ok(self
|
|
|
|
.bump_with(token::BinOp(token::And), self.token.span.with_lo(start_point.hi())))
|
2014-04-17 17:35:31 +09:00
|
|
|
}
|
2019-12-22 17:42:04 -05:00
|
|
|
_ => self.unexpected(),
|
2014-04-17 17:35:31 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-08 14:53:55 +01:00
|
|
|
/// Expects and consumes an `|`. If `||` is seen, replaces it with a single
|
|
|
|
/// `|` and continues. If an `|` is not seen, signals an error.
|
2017-09-07 15:07:49 +09:00
|
|
|
fn expect_or(&mut self) -> PResult<'a, ()> {
|
|
|
|
self.expected_tokens.push(TokenType::Token(token::BinOp(token::Or)));
|
2019-06-05 01:17:07 +03:00
|
|
|
match self.token.kind {
|
2017-09-07 15:07:49 +09:00
|
|
|
token::BinOp(token::Or) => {
|
|
|
|
self.bump();
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
token::OrOr => {
|
2020-02-04 23:04:29 -05:00
|
|
|
let start_point = self.sess.source_map().start_point(self.token.span);
|
|
|
|
Ok(self
|
|
|
|
.bump_with(token::BinOp(token::Or), self.token.span.with_lo(start_point.hi())))
|
2017-09-07 15:07:49 +09:00
|
|
|
}
|
2019-12-22 17:42:04 -05:00
|
|
|
_ => self.unexpected(),
|
2017-09-07 15:07:49 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-08 14:53:55 +01:00
|
|
|
/// Attempts to consume a `<`. If `<<` is seen, replaces it with a single
|
|
|
|
/// `<` and continue. If `<-` is seen, replaces it with a single `<`
|
|
|
|
/// and continue. If a `<` is not seen, returns false.
|
2014-06-09 13:12:30 -07:00
|
|
|
///
|
|
|
|
/// This is meant to be used when parsing generics on a path to get the
|
2014-12-23 13:13:49 +13:00
|
|
|
/// starting token.
|
2015-12-31 12:11:53 +13:00
|
|
|
fn eat_lt(&mut self) -> bool {
|
2015-01-16 16:04:28 +13:00
|
|
|
self.expected_tokens.push(TokenType::Token(token::Lt));
|
2019-06-05 01:17:07 +03:00
|
|
|
let ate = match self.token.kind {
|
2015-12-31 12:11:53 +13:00
|
|
|
token::Lt => {
|
|
|
|
self.bump();
|
|
|
|
true
|
|
|
|
}
|
2014-10-27 19:22:52 +11:00
|
|
|
token::BinOp(token::Shl) => {
|
2020-02-04 23:04:29 -05:00
|
|
|
let start_point = self.sess.source_map().start_point(self.token.span);
|
|
|
|
self.bump_with(token::Lt, self.token.span.with_lo(start_point.hi()));
|
2015-12-31 12:11:53 +13:00
|
|
|
true
|
2014-05-10 21:27:44 -07:00
|
|
|
}
|
2019-02-07 10:10:11 +01:00
|
|
|
token::LArrow => {
|
2020-02-04 23:04:29 -05:00
|
|
|
let start_point = self.sess.source_map().start_point(self.token.span);
|
|
|
|
self.bump_with(
|
|
|
|
token::BinOp(token::Minus),
|
|
|
|
self.token.span.with_lo(start_point.hi()),
|
|
|
|
);
|
2019-02-07 10:10:11 +01:00
|
|
|
true
|
|
|
|
}
|
2015-12-31 12:11:53 +13:00
|
|
|
_ => false,
|
2019-01-23 02:35:13 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
if ate {
|
|
|
|
// See doc comment for `unmatched_angle_bracket_count`.
|
|
|
|
self.unmatched_angle_bracket_count += 1;
|
2019-01-27 21:04:50 -08:00
|
|
|
self.max_angle_bracket_count += 1;
|
2019-01-23 02:35:13 +01:00
|
|
|
debug!("eat_lt: (increment) count={:?}", self.unmatched_angle_bracket_count);
|
2014-05-10 21:27:44 -07:00
|
|
|
}
|
2019-01-23 02:35:13 +01:00
|
|
|
|
|
|
|
ate
|
2014-05-10 21:27:44 -07:00
|
|
|
}
|
|
|
|
|
2015-12-21 10:00:43 +13:00
|
|
|
fn expect_lt(&mut self) -> PResult<'a, ()> {
|
2019-12-22 17:42:04 -05:00
|
|
|
if !self.eat_lt() { self.unexpected() } else { Ok(()) }
|
2014-05-10 21:27:44 -07:00
|
|
|
}
|
|
|
|
|
2019-02-08 14:53:55 +01:00
|
|
|
/// Expects and consumes a single `>` token. if a `>>` is seen, replaces it
|
|
|
|
/// with a single `>` and continues. If a `>` is not seen, signals an error.
|
2018-05-31 16:53:30 -06:00
|
|
|
fn expect_gt(&mut self) -> PResult<'a, ()> {
|
2015-01-16 16:04:28 +13:00
|
|
|
self.expected_tokens.push(TokenType::Token(token::Gt));
|
2019-06-05 01:17:07 +03:00
|
|
|
let ate = match self.token.kind {
|
2015-12-31 12:11:53 +13:00
|
|
|
token::Gt => {
|
|
|
|
self.bump();
|
2019-01-23 02:35:13 +01:00
|
|
|
Some(())
|
2015-12-31 12:11:53 +13:00
|
|
|
}
|
2014-10-27 19:22:52 +11:00
|
|
|
token::BinOp(token::Shr) => {
|
2020-02-04 23:04:29 -05:00
|
|
|
let start_point = self.sess.source_map().start_point(self.token.span);
|
|
|
|
Some(self.bump_with(token::Gt, self.token.span.with_lo(start_point.hi())))
|
2013-12-30 15:17:53 -08:00
|
|
|
}
|
2014-10-27 19:22:52 +11:00
|
|
|
token::BinOpEq(token::Shr) => {
|
2020-02-04 23:04:29 -05:00
|
|
|
let start_point = self.sess.source_map().start_point(self.token.span);
|
|
|
|
Some(self.bump_with(token::Ge, self.token.span.with_lo(start_point.hi())))
|
2014-06-20 09:53:12 -07:00
|
|
|
}
|
2014-10-27 19:22:52 +11:00
|
|
|
token::Ge => {
|
2020-02-04 23:04:29 -05:00
|
|
|
let start_point = self.sess.source_map().start_point(self.token.span);
|
|
|
|
Some(self.bump_with(token::Eq, self.token.span.with_lo(start_point.hi())))
|
2014-06-20 09:53:12 -07:00
|
|
|
}
|
2019-01-23 02:35:13 +01:00
|
|
|
_ => None,
|
|
|
|
};
|
|
|
|
|
|
|
|
match ate {
|
2019-01-27 21:04:50 -08:00
|
|
|
Some(_) => {
|
2019-01-23 02:35:13 +01:00
|
|
|
// See doc comment for `unmatched_angle_bracket_count`.
|
2019-02-22 11:17:30 -08:00
|
|
|
if self.unmatched_angle_bracket_count > 0 {
|
|
|
|
self.unmatched_angle_bracket_count -= 1;
|
|
|
|
debug!("expect_gt: (decrement) count={:?}", self.unmatched_angle_bracket_count);
|
|
|
|
}
|
2019-01-23 02:35:13 +01:00
|
|
|
|
2019-01-27 21:04:50 -08:00
|
|
|
Ok(())
|
2019-12-22 17:42:04 -05:00
|
|
|
}
|
2019-01-23 02:35:13 +01:00
|
|
|
None => self.unexpected(),
|
2013-06-14 18:21:47 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-09 10:27:07 +02:00
|
|
|
fn expect_any_with_type(&mut self, kets: &[&TokenKind], expect: TokenExpectType) -> bool {
|
2019-12-22 17:42:04 -05:00
|
|
|
kets.iter().any(|k| match expect {
|
|
|
|
TokenExpectType::Expect => self.check(k),
|
|
|
|
TokenExpectType::NoExpect => self.token == **k,
|
2019-07-09 10:27:07 +02:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-10-08 09:35:34 +02:00
|
|
|
fn parse_seq_to_before_tokens<T>(
|
2018-08-20 16:16:17 -07:00
|
|
|
&mut self,
|
2019-06-05 14:17:56 +03:00
|
|
|
kets: &[&TokenKind],
|
2018-08-20 16:16:17 -07:00
|
|
|
sep: SeqSep,
|
|
|
|
expect: TokenExpectType,
|
2019-07-09 10:27:07 +02:00
|
|
|
mut f: impl FnMut(&mut Parser<'a>) -> PResult<'a, T>,
|
|
|
|
) -> PResult<'a, (Vec<T>, bool /* trailing */, bool /* recovered */)> {
|
2019-01-27 21:04:50 -08:00
|
|
|
let mut first = true;
|
|
|
|
let mut recovered = false;
|
2019-07-09 10:27:07 +02:00
|
|
|
let mut trailing = false;
|
2016-10-29 22:54:04 +01:00
|
|
|
let mut v = vec![];
|
2019-07-09 10:27:07 +02:00
|
|
|
while !self.expect_any_with_type(kets, expect) {
|
|
|
|
if let token::CloseDelim(..) | token::Eof = self.token.kind {
|
2019-12-22 17:42:04 -05:00
|
|
|
break;
|
2019-07-09 10:27:07 +02:00
|
|
|
}
|
2017-05-12 20:05:39 +02:00
|
|
|
if let Some(ref t) = sep.sep {
|
|
|
|
if first {
|
|
|
|
first = false;
|
|
|
|
} else {
|
2019-01-27 21:04:50 -08:00
|
|
|
match self.expect(t) {
|
|
|
|
Ok(false) => {}
|
|
|
|
Ok(true) => {
|
|
|
|
recovered = true;
|
|
|
|
break;
|
2017-10-25 00:04:01 +11:00
|
|
|
}
|
2019-11-24 22:33:00 +01:00
|
|
|
Err(mut expect_err) => {
|
2020-01-10 11:22:33 -08:00
|
|
|
let sp = self.prev_span.shrink_to_hi();
|
2019-11-24 22:33:00 +01:00
|
|
|
let token_str = pprust::token_kind_to_string(t);
|
|
|
|
|
2019-09-06 03:56:45 +01:00
|
|
|
// Attempt to keep parsing if it was a similar separator.
|
2019-01-27 21:04:50 -08:00
|
|
|
if let Some(ref tokens) = t.similar_tokens() {
|
2019-06-08 22:38:23 +03:00
|
|
|
if tokens.contains(&self.token.kind) {
|
2019-01-27 21:04:50 -08:00
|
|
|
self.bump();
|
|
|
|
}
|
|
|
|
}
|
2019-11-24 22:33:00 +01:00
|
|
|
|
2019-09-06 03:56:45 +01:00
|
|
|
// Attempt to keep parsing if it was an omitted separator.
|
2019-01-27 21:04:50 -08:00
|
|
|
match f(self) {
|
|
|
|
Ok(t) => {
|
2019-11-24 22:33:00 +01:00
|
|
|
// Parsed successfully, therefore most probably the code only
|
|
|
|
// misses a separator.
|
|
|
|
expect_err
|
|
|
|
.span_suggestion_short(
|
|
|
|
sp,
|
|
|
|
&format!("missing `{}`", token_str),
|
|
|
|
token_str,
|
|
|
|
Applicability::MaybeIncorrect,
|
|
|
|
)
|
|
|
|
.emit();
|
|
|
|
|
2019-01-27 21:04:50 -08:00
|
|
|
v.push(t);
|
|
|
|
continue;
|
2019-12-22 17:42:04 -05:00
|
|
|
}
|
2019-01-27 21:04:50 -08:00
|
|
|
Err(mut e) => {
|
2019-11-24 22:33:00 +01:00
|
|
|
// Parsing failed, therefore it must be something more serious
|
|
|
|
// than just a missing separator.
|
|
|
|
expect_err.emit();
|
|
|
|
|
2019-01-27 21:04:50 -08:00
|
|
|
e.cancel();
|
|
|
|
break;
|
|
|
|
}
|
2017-10-25 00:04:01 +11:00
|
|
|
}
|
|
|
|
}
|
2016-01-29 17:49:59 +13:00
|
|
|
}
|
|
|
|
}
|
2013-06-14 18:21:47 -07:00
|
|
|
}
|
2019-07-09 10:27:07 +02:00
|
|
|
if sep.trailing_sep_allowed && self.expect_any_with_type(kets, expect) {
|
|
|
|
trailing = true;
|
2016-02-01 08:39:50 +13:00
|
|
|
break;
|
|
|
|
}
|
2016-01-29 17:49:59 +13:00
|
|
|
|
2017-10-22 09:19:30 -07:00
|
|
|
let t = f(self)?;
|
|
|
|
v.push(t);
|
2013-06-14 18:21:47 -07:00
|
|
|
}
|
2016-01-29 17:49:59 +13:00
|
|
|
|
2019-07-09 10:27:07 +02:00
|
|
|
Ok((v, trailing, recovered))
|
2013-06-14 18:21:47 -07:00
|
|
|
}
|
|
|
|
|
2019-12-04 10:13:29 +01:00
|
|
|
/// Parses a sequence, not including the closing delimiter. The function
|
|
|
|
/// `f` must consume tokens until reaching the next separator or
|
|
|
|
/// closing bracket.
|
|
|
|
fn parse_seq_to_before_end<T>(
|
|
|
|
&mut self,
|
|
|
|
ket: &TokenKind,
|
|
|
|
sep: SeqSep,
|
|
|
|
f: impl FnMut(&mut Parser<'a>) -> PResult<'a, T>,
|
|
|
|
) -> PResult<'a, (Vec<T>, bool, bool)> {
|
|
|
|
self.parse_seq_to_before_tokens(&[ket], sep, TokenExpectType::Expect, f)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Parses a sequence, including the closing delimiter. The function
|
|
|
|
/// `f` must consume tokens until reaching the next separator or
|
|
|
|
/// closing bracket.
|
|
|
|
fn parse_seq_to_end<T>(
|
|
|
|
&mut self,
|
|
|
|
ket: &TokenKind,
|
|
|
|
sep: SeqSep,
|
2019-12-22 17:42:04 -05:00
|
|
|
f: impl FnMut(&mut Parser<'a>) -> PResult<'a, T>,
|
2019-12-04 10:13:29 +01:00
|
|
|
) -> PResult<'a, (Vec<T>, bool /* trailing */)> {
|
|
|
|
let (val, trailing, recovered) = self.parse_seq_to_before_end(ket, sep, f)?;
|
|
|
|
if !recovered {
|
|
|
|
self.eat(ket);
|
|
|
|
}
|
|
|
|
Ok((val, trailing))
|
|
|
|
}
|
|
|
|
|
2019-02-08 14:53:55 +01:00
|
|
|
/// Parses a sequence, including the closing delimiter. The function
|
|
|
|
/// `f` must consume tokens until reaching the next separator or
|
2014-06-09 13:12:30 -07:00
|
|
|
/// closing bracket.
|
2019-07-09 10:27:07 +02:00
|
|
|
fn parse_unspanned_seq<T>(
|
2019-01-27 21:04:50 -08:00
|
|
|
&mut self,
|
2019-06-05 14:17:56 +03:00
|
|
|
bra: &TokenKind,
|
|
|
|
ket: &TokenKind,
|
2019-01-27 21:04:50 -08:00
|
|
|
sep: SeqSep,
|
2019-07-09 10:27:07 +02:00
|
|
|
f: impl FnMut(&mut Parser<'a>) -> PResult<'a, T>,
|
|
|
|
) -> PResult<'a, (Vec<T>, bool)> {
|
2016-03-22 22:01:37 -05:00
|
|
|
self.expect(bra)?;
|
2019-12-04 10:13:29 +01:00
|
|
|
self.parse_seq_to_end(ket, sep, f)
|
2013-06-14 18:21:47 -07:00
|
|
|
}
|
|
|
|
|
2019-07-09 10:31:24 +02:00
|
|
|
fn parse_delim_comma_seq<T>(
|
|
|
|
&mut self,
|
|
|
|
delim: DelimToken,
|
|
|
|
f: impl FnMut(&mut Parser<'a>) -> PResult<'a, T>,
|
|
|
|
) -> PResult<'a, (Vec<T>, bool)> {
|
|
|
|
self.parse_unspanned_seq(
|
|
|
|
&token::OpenDelim(delim),
|
|
|
|
&token::CloseDelim(delim),
|
|
|
|
SeqSep::trailing_allowed(token::Comma),
|
|
|
|
f,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn parse_paren_comma_seq<T>(
|
|
|
|
&mut self,
|
|
|
|
f: impl FnMut(&mut Parser<'a>) -> PResult<'a, T>,
|
|
|
|
) -> PResult<'a, (Vec<T>, bool)> {
|
|
|
|
self.parse_delim_comma_seq(token::Paren, f)
|
|
|
|
}
|
|
|
|
|
2019-09-06 03:56:45 +01:00
|
|
|
/// Advance the parser by one token.
|
2015-12-31 12:11:53 +13:00
|
|
|
pub fn bump(&mut self) {
|
2016-09-21 12:16:28 +10:00
|
|
|
if self.prev_token_kind == PrevTokenKind::Eof {
|
2016-03-26 00:13:54 +02:00
|
|
|
// Bumping after EOF is a bad sign, usually an infinite loop.
|
2019-12-31 04:30:55 +01:00
|
|
|
let msg = "attempted to bump the parser past EOF (may be stuck in a loop)";
|
|
|
|
self.span_bug(self.token.span, msg);
|
2016-03-26 00:13:54 +02:00
|
|
|
}
|
|
|
|
|
2020-02-09 17:54:38 +03:00
|
|
|
// Update the current and previous tokens.
|
|
|
|
let next_token = self.next_tok();
|
|
|
|
self.prev_token = mem::replace(&mut self.token, next_token);
|
|
|
|
self.unnormalized_prev_token = self.unnormalized_token.take();
|
2016-09-16 15:46:40 +10:00
|
|
|
|
2020-02-09 17:54:38 +03:00
|
|
|
// Update fields derived from the previous token.
|
|
|
|
self.prev_token_kind = match self.prev_token.kind {
|
2016-09-21 12:16:28 +10:00
|
|
|
token::DocComment(..) => PrevTokenKind::DocComment,
|
|
|
|
token::Comma => PrevTokenKind::Comma,
|
2017-04-05 01:12:53 +03:00
|
|
|
token::BinOp(token::Plus) => PrevTokenKind::Plus,
|
2019-04-22 19:37:23 -07:00
|
|
|
token::BinOp(token::Or) => PrevTokenKind::BitOr,
|
2016-09-21 12:16:28 +10:00
|
|
|
token::Interpolated(..) => PrevTokenKind::Interpolated,
|
|
|
|
token::Eof => PrevTokenKind::Eof,
|
2017-07-04 17:04:34 +03:00
|
|
|
token::Ident(..) => PrevTokenKind::Ident,
|
2016-09-21 12:16:28 +10:00
|
|
|
_ => PrevTokenKind::Other,
|
2013-08-05 22:18:29 +02:00
|
|
|
};
|
2020-02-09 17:54:38 +03:00
|
|
|
self.prev_span = self.unnormalized_prev_token().span;
|
2016-09-16 15:46:40 +10:00
|
|
|
|
Make the parser’s ‘expected <foo>, found <bar>’ errors more accurate
As an example of what this changes, the following code:
let x: [int ..4];
Currently spits out ‘expected `]`, found `..`’. However, a comma would also be
valid there, as would a number of other tokens. This change adjusts the parser
to produce more accurate errors, so that that example now produces ‘expected one
of `(`, `+`, `,`, `::`, or `]`, found `..`’.
2014-12-03 22:47:53 +13:00
|
|
|
self.expected_tokens.clear();
|
2019-09-06 03:56:45 +01:00
|
|
|
// Check after each token.
|
2017-03-29 07:17:18 +00:00
|
|
|
self.process_potential_macro_variable();
|
2012-01-13 09:56:53 +01:00
|
|
|
}
|
2013-07-02 12:47:32 -07:00
|
|
|
|
2019-09-06 03:56:45 +01:00
|
|
|
/// Advances the parser using provided token as a next one. Use this when
|
2016-02-06 18:42:17 +01:00
|
|
|
/// consuming a part of a token. For example a single `<` from `<<`.
|
2020-02-09 17:54:38 +03:00
|
|
|
/// FIXME: this function sets the previous token data to some semi-nonsensical values
|
|
|
|
/// which kind of work because they are currently used in very limited ways in practice.
|
|
|
|
/// Correct token kinds and spans need to be calculated instead.
|
2019-06-05 14:17:56 +03:00
|
|
|
fn bump_with(&mut self, next: TokenKind, span: Span) {
|
2020-02-09 17:54:38 +03:00
|
|
|
// Update the current and previous tokens.
|
|
|
|
let next_token = Token::new(next, span);
|
|
|
|
self.prev_token = mem::replace(&mut self.token, next_token);
|
|
|
|
self.unnormalized_prev_token = self.unnormalized_token.take();
|
|
|
|
|
|
|
|
// Update fields derived from the previous token.
|
2016-09-21 12:16:28 +10:00
|
|
|
self.prev_token_kind = PrevTokenKind::Other;
|
2020-02-09 17:54:38 +03:00
|
|
|
self.prev_span = self.unnormalized_prev_token().span.with_hi(span.lo());
|
|
|
|
|
2016-02-06 18:42:17 +01:00
|
|
|
self.expected_tokens.clear();
|
2012-01-13 09:56:53 +01:00
|
|
|
}
|
2016-02-06 18:42:17 +01:00
|
|
|
|
2019-10-01 05:13:42 +02:00
|
|
|
/// Look-ahead `dist` tokens of `self.token` and get access to that token there.
|
|
|
|
/// When `dist == 0` then the current token is looked at.
|
2019-09-30 06:21:30 +02:00
|
|
|
pub fn look_ahead<R>(&self, dist: usize, looker: impl FnOnce(&Token) -> R) -> R {
|
2019-05-23 13:10:24 -07:00
|
|
|
if dist == 0 {
|
2019-09-30 06:21:30 +02:00
|
|
|
return looker(&self.token);
|
2019-05-23 13:10:24 -07:00
|
|
|
}
|
|
|
|
|
2019-06-05 01:17:07 +03:00
|
|
|
let frame = &self.token_cursor.frame;
|
2019-09-30 06:21:30 +02:00
|
|
|
looker(&match frame.tree_cursor.look_ahead(dist - 1) {
|
2019-05-23 13:10:24 -07:00
|
|
|
Some(tree) => match tree {
|
2019-06-05 01:17:07 +03:00
|
|
|
TokenTree::Token(token) => token,
|
2019-12-22 17:42:04 -05:00
|
|
|
TokenTree::Delimited(dspan, delim, _) => {
|
|
|
|
Token::new(token::OpenDelim(delim), dspan.open)
|
|
|
|
}
|
|
|
|
},
|
|
|
|
None => Token::new(token::CloseDelim(frame.delim), frame.span.close),
|
2019-05-23 13:10:24 -07:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-05-29 17:58:44 +02:00
|
|
|
/// Returns whether any of the given keywords are `dist` tokens ahead of the current one.
|
|
|
|
fn is_keyword_ahead(&self, dist: usize, kws: &[Symbol]) -> bool {
|
|
|
|
self.look_ahead(dist, |t| kws.iter().any(|&kw| t.is_keyword(kw)))
|
|
|
|
}
|
|
|
|
|
2019-02-08 14:53:55 +01:00
|
|
|
/// Parses asyncness: `async` or nothing.
|
2018-06-18 21:18:10 -07:00
|
|
|
fn parse_asyncness(&mut self) -> IsAsync {
|
2019-05-11 17:41:37 +03:00
|
|
|
if self.eat_keyword(kw::Async) {
|
2019-12-22 17:42:04 -05:00
|
|
|
IsAsync::Async { closure_id: DUMMY_NODE_ID, return_impl_trait_id: DUMMY_NODE_ID }
|
2018-06-18 21:18:10 -07:00
|
|
|
} else {
|
|
|
|
IsAsync::NotAsync
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-08 14:53:55 +01:00
|
|
|
/// Parses unsafety: `unsafe` or nothing.
|
2017-12-02 22:15:03 +03:00
|
|
|
fn parse_unsafety(&mut self) -> Unsafety {
|
2019-12-22 17:42:04 -05:00
|
|
|
if self.eat_keyword(kw::Unsafe) { Unsafety::Unsafe } else { Unsafety::Normal }
|
2013-01-31 17:12:29 -08:00
|
|
|
}
|
2012-11-04 20:41:00 -08:00
|
|
|
|
2019-02-08 14:53:55 +01:00
|
|
|
/// Parses mutability (`mut` or nothing).
|
2017-03-17 00:47:32 +03:00
|
|
|
fn parse_mutability(&mut self) -> Mutability {
|
2019-12-22 17:42:04 -05:00
|
|
|
if self.eat_keyword(kw::Mut) { Mutability::Mut } else { Mutability::Not }
|
2012-05-23 15:06:11 -07:00
|
|
|
}
|
2012-04-24 15:52:52 -07:00
|
|
|
|
2019-09-30 02:36:08 +02:00
|
|
|
/// Possibly parses mutability (`const` or `mut`).
|
|
|
|
fn parse_const_or_mut(&mut self) -> Option<Mutability> {
|
|
|
|
if self.eat_keyword(kw::Mut) {
|
2019-12-16 17:28:40 +01:00
|
|
|
Some(Mutability::Mut)
|
2019-09-30 02:36:08 +02:00
|
|
|
} else if self.eat_keyword(kw::Const) {
|
2019-12-16 17:28:40 +01:00
|
|
|
Some(Mutability::Not)
|
2019-09-30 02:36:08 +02:00
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-31 16:53:30 -06:00
|
|
|
fn parse_field_name(&mut self) -> PResult<'a, Ident> {
|
2019-12-22 17:42:04 -05:00
|
|
|
if let token::Literal(token::Lit { kind: token::Integer, symbol, suffix }) = self.token.kind
|
|
|
|
{
|
2019-06-07 13:31:13 +03:00
|
|
|
self.expect_no_suffix(self.token.span, "a tuple index", suffix);
|
2016-07-29 23:47:55 +03:00
|
|
|
self.bump();
|
2019-05-19 01:04:26 +03:00
|
|
|
Ok(Ident::new(symbol, self.prev_span))
|
2016-07-29 23:47:55 +03:00
|
|
|
} else {
|
2018-01-06 14:43:20 -08:00
|
|
|
self.parse_ident_common(false)
|
2016-07-29 23:47:55 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-01 02:25:32 +03:00
|
|
|
fn parse_mac_args(&mut self) -> PResult<'a, P<MacArgs>> {
|
2019-12-01 17:07:38 +03:00
|
|
|
self.parse_mac_args_common(true).map(P)
|
2019-12-01 02:25:32 +03:00
|
|
|
}
|
|
|
|
|
2019-12-01 17:07:38 +03:00
|
|
|
fn parse_attr_args(&mut self) -> PResult<'a, MacArgs> {
|
2019-12-01 02:25:32 +03:00
|
|
|
self.parse_mac_args_common(false)
|
|
|
|
}
|
|
|
|
|
2019-12-01 17:07:38 +03:00
|
|
|
fn parse_mac_args_common(&mut self, delimited_only: bool) -> PResult<'a, MacArgs> {
|
2019-12-22 17:42:04 -05:00
|
|
|
Ok(
|
|
|
|
if self.check(&token::OpenDelim(DelimToken::Paren))
|
|
|
|
|| self.check(&token::OpenDelim(DelimToken::Bracket))
|
|
|
|
|| self.check(&token::OpenDelim(DelimToken::Brace))
|
|
|
|
{
|
|
|
|
match self.parse_token_tree() {
|
|
|
|
TokenTree::Delimited(dspan, delim, tokens) =>
|
2019-12-02 21:56:11 +03:00
|
|
|
// We've confirmed above that there is a delimiter so unwrapping is OK.
|
2019-12-22 17:42:04 -05:00
|
|
|
{
|
|
|
|
MacArgs::Delimited(dspan, MacDelimiter::from_token(delim).unwrap(), tokens)
|
2019-12-01 02:25:32 +03:00
|
|
|
}
|
2019-12-22 17:42:04 -05:00
|
|
|
_ => unreachable!(),
|
2019-12-01 02:25:32 +03:00
|
|
|
}
|
2019-12-22 17:42:04 -05:00
|
|
|
} else if !delimited_only {
|
|
|
|
if self.eat(&token::Eq) {
|
|
|
|
let eq_span = self.prev_span;
|
|
|
|
let mut is_interpolated_expr = false;
|
|
|
|
if let token::Interpolated(nt) = &self.token.kind {
|
|
|
|
if let token::NtExpr(..) = **nt {
|
|
|
|
is_interpolated_expr = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
let token_tree = if is_interpolated_expr {
|
|
|
|
// We need to accept arbitrary interpolated expressions to continue
|
|
|
|
// supporting things like `doc = $expr` that work on stable.
|
|
|
|
// Non-literal interpolated expressions are rejected after expansion.
|
|
|
|
self.parse_token_tree()
|
|
|
|
} else {
|
|
|
|
self.parse_unsuffixed_lit()?.token_tree()
|
|
|
|
};
|
2019-12-01 02:25:32 +03:00
|
|
|
|
2019-12-22 17:42:04 -05:00
|
|
|
MacArgs::Eq(eq_span, token_tree.into())
|
|
|
|
} else {
|
|
|
|
MacArgs::Empty
|
|
|
|
}
|
2019-12-01 02:25:32 +03:00
|
|
|
} else {
|
2019-12-22 17:42:04 -05:00
|
|
|
return self.unexpected();
|
|
|
|
},
|
|
|
|
)
|
2014-10-30 01:47:53 +11:00
|
|
|
}
|
|
|
|
|
2019-09-30 06:21:30 +02:00
|
|
|
fn parse_or_use_outer_attributes(
|
|
|
|
&mut self,
|
2019-12-03 16:38:34 +01:00
|
|
|
already_parsed_attrs: Option<AttrVec>,
|
|
|
|
) -> PResult<'a, AttrVec> {
|
2019-08-11 13:14:30 +02:00
|
|
|
if let Some(attrs) = already_parsed_attrs {
|
|
|
|
Ok(attrs)
|
|
|
|
} else {
|
|
|
|
self.parse_outer_attributes().map(|a| a.into())
|
2019-06-07 12:08:38 +10:00
|
|
|
}
|
2019-08-11 13:14:30 +02:00
|
|
|
}
|
2019-06-07 12:08:38 +10:00
|
|
|
|
2019-10-16 10:59:30 +02:00
|
|
|
pub fn process_potential_macro_variable(&mut self) {
|
2020-02-09 17:54:38 +03:00
|
|
|
let normalized_token = match self.token.kind {
|
2019-12-22 17:42:04 -05:00
|
|
|
token::Dollar
|
|
|
|
if self.token.span.from_expansion() && self.look_ahead(1, |t| t.is_ident()) =>
|
|
|
|
{
|
2015-12-31 12:11:53 +13:00
|
|
|
self.bump();
|
2019-08-11 13:14:30 +02:00
|
|
|
let name = match self.token.kind {
|
|
|
|
token::Ident(name, _) => name,
|
2019-12-22 17:42:04 -05:00
|
|
|
_ => unreachable!(),
|
2017-12-17 01:53:11 +03:00
|
|
|
};
|
2019-08-11 13:14:30 +02:00
|
|
|
let span = self.prev_span.to(self.token.span);
|
2019-12-31 00:25:30 +01:00
|
|
|
self.struct_span_err(span, &format!("unknown macro variable `{}`", name))
|
2019-08-11 13:14:30 +02:00
|
|
|
.span_label(span, "unknown macro variable")
|
|
|
|
.emit();
|
2015-12-31 12:11:53 +13:00
|
|
|
self.bump();
|
2019-12-22 17:42:04 -05:00
|
|
|
return;
|
2014-08-05 19:44:21 -07:00
|
|
|
}
|
2019-08-11 13:14:30 +02:00
|
|
|
token::Interpolated(ref nt) => {
|
|
|
|
// Interpolated identifier and lifetime tokens are replaced with usual identifier
|
|
|
|
// and lifetime tokens, so the former are never encountered during normal parsing.
|
|
|
|
match **nt {
|
2019-12-22 17:42:04 -05:00
|
|
|
token::NtIdent(ident, is_raw) => {
|
|
|
|
Token::new(token::Ident(ident.name, is_raw), ident.span)
|
|
|
|
}
|
|
|
|
token::NtLifetime(ident) => Token::new(token::Lifetime(ident.name), ident.span),
|
2019-08-11 13:14:30 +02:00
|
|
|
_ => return,
|
2017-02-17 15:12:47 -08:00
|
|
|
}
|
2019-08-11 13:14:30 +02:00
|
|
|
}
|
|
|
|
_ => return,
|
|
|
|
};
|
2020-02-09 17:54:38 +03:00
|
|
|
self.unnormalized_token = Some(mem::replace(&mut self.token, normalized_token));
|
2019-08-11 13:14:30 +02:00
|
|
|
}
|
2019-06-03 15:34:54 +10:00
|
|
|
|
2019-08-11 13:14:30 +02:00
|
|
|
/// Parses a single token tree from the input.
|
2019-10-16 10:59:30 +02:00
|
|
|
pub fn parse_token_tree(&mut self) -> TokenTree {
|
2019-08-11 13:14:30 +02:00
|
|
|
match self.token.kind {
|
|
|
|
token::OpenDelim(..) => {
|
2019-12-22 17:42:04 -05:00
|
|
|
let frame = mem::replace(
|
|
|
|
&mut self.token_cursor.frame,
|
|
|
|
self.token_cursor.stack.pop().unwrap(),
|
|
|
|
);
|
2019-08-11 13:14:30 +02:00
|
|
|
self.token.span = frame.span.entire();
|
|
|
|
self.bump();
|
2019-12-22 17:42:04 -05:00
|
|
|
TokenTree::Delimited(frame.span, frame.delim, frame.tree_cursor.stream.into())
|
|
|
|
}
|
2019-08-11 13:14:30 +02:00
|
|
|
token::CloseDelim(_) | token::Eof => unreachable!(),
|
|
|
|
_ => {
|
2020-02-09 17:54:38 +03:00
|
|
|
let token = self.token.clone();
|
2019-08-11 13:14:30 +02:00
|
|
|
self.bump();
|
|
|
|
TokenTree::Token(token)
|
2014-07-06 14:29:29 -07:00
|
|
|
}
|
2012-05-23 15:06:11 -07:00
|
|
|
}
|
2015-11-03 17:39:51 +01:00
|
|
|
}
|
|
|
|
|
2019-08-11 13:14:30 +02:00
|
|
|
/// Parses a stream of tokens into a list of `TokenTree`s, up to EOF.
|
|
|
|
pub fn parse_all_token_trees(&mut self) -> PResult<'a, Vec<TokenTree>> {
|
|
|
|
let mut tts = Vec::new();
|
|
|
|
while self.token != token::Eof {
|
|
|
|
tts.push(self.parse_token_tree());
|
|
|
|
}
|
|
|
|
Ok(tts)
|
|
|
|
}
|
2019-04-18 23:58:57 +03:00
|
|
|
|
2019-08-11 13:14:30 +02:00
|
|
|
pub fn parse_tokens(&mut self) -> TokenStream {
|
|
|
|
let mut result = Vec::new();
|
|
|
|
loop {
|
|
|
|
match self.token.kind {
|
|
|
|
token::Eof | token::CloseDelim(..) => break,
|
|
|
|
_ => result.push(self.parse_token_tree().into()),
|
2019-04-14 17:09:03 -07:00
|
|
|
}
|
|
|
|
}
|
2019-08-11 13:14:30 +02:00
|
|
|
TokenStream::new(result)
|
2019-04-14 17:09:03 -07:00
|
|
|
}
|
|
|
|
|
2019-08-11 13:14:30 +02:00
|
|
|
/// Evaluates the closure with restrictions in place.
|
|
|
|
///
|
|
|
|
/// Afters the closure is evaluated, restrictions are reset.
|
2019-09-30 03:29:41 +02:00
|
|
|
fn with_res<T>(&mut self, res: Restrictions, f: impl FnOnce(&mut Self) -> T) -> T {
|
2019-08-11 13:14:30 +02:00
|
|
|
let old = self.restrictions;
|
2019-09-30 03:29:41 +02:00
|
|
|
self.restrictions = res;
|
|
|
|
let res = f(self);
|
2019-08-11 13:14:30 +02:00
|
|
|
self.restrictions = old;
|
2019-09-30 03:29:41 +02:00
|
|
|
res
|
2019-08-11 13:14:30 +02:00
|
|
|
}
|
2010-12-31 17:28:43 -08:00
|
|
|
|
2019-08-11 20:32:29 +02:00
|
|
|
fn is_crate_vis(&self) -> bool {
|
|
|
|
self.token.is_keyword(kw::Crate) && self.look_ahead(1, |t| t != &token::ModSep)
|
|
|
|
}
|
|
|
|
|
2019-08-11 18:34:42 +02:00
|
|
|
/// Parses `pub`, `pub(crate)` and `pub(in path)` plus shortcuts `crate` for `pub(crate)`,
|
|
|
|
/// `pub(self)` for `pub(in self)` and `pub(super)` for `pub(in super)`.
|
|
|
|
/// If the following element can't be a tuple (i.e., it's a function definition), then
|
|
|
|
/// it's not a tuple struct field), and the contents within the parentheses isn't valid,
|
|
|
|
/// so emit a proper diagnostic.
|
2019-11-07 11:26:36 +01:00
|
|
|
pub fn parse_visibility(&mut self, fbt: FollowedByType) -> PResult<'a, Visibility> {
|
2019-08-11 18:34:42 +02:00
|
|
|
maybe_whole!(self, NtVis, |x| x);
|
2012-08-15 17:10:23 -07:00
|
|
|
|
2019-08-11 18:34:42 +02:00
|
|
|
self.expected_tokens.push(TokenType::Keyword(kw::Crate));
|
|
|
|
if self.is_crate_vis() {
|
|
|
|
self.bump(); // `crate`
|
2019-10-30 16:38:16 +01:00
|
|
|
self.sess.gated_spans.gate(sym::crate_visibility_modifier, self.prev_span);
|
2019-08-11 18:34:42 +02:00
|
|
|
return Ok(respan(self.prev_span, VisibilityKind::Crate(CrateSugar::JustCrate)));
|
|
|
|
}
|
2014-08-11 09:32:26 -07:00
|
|
|
|
2019-08-11 18:34:42 +02:00
|
|
|
if !self.eat_keyword(kw::Pub) {
|
|
|
|
// We need a span for our `Spanned<VisibilityKind>`, but there's inherently no
|
|
|
|
// keyword to grab a span from for inherited visibility; an empty span at the
|
|
|
|
// beginning of the current token would seem to be the "Schelling span".
|
2019-12-22 17:42:04 -05:00
|
|
|
return Ok(respan(self.token.span.shrink_to_lo(), VisibilityKind::Inherited));
|
2019-08-11 18:34:42 +02:00
|
|
|
}
|
|
|
|
let lo = self.prev_span;
|
2017-03-08 02:50:13 +03:00
|
|
|
|
|
|
|
if self.check(&token::OpenDelim(token::Paren)) {
|
2017-03-17 21:13:00 -07:00
|
|
|
// We don't `self.bump()` the `(` yet because this might be a struct definition where
|
|
|
|
// `()` or a tuple might be allowed. For example, `struct Struct(pub (), pub (usize));`.
|
|
|
|
// Because of this, we only `bump` the `(` if we're assured it is appropriate to do so
|
|
|
|
// by the following tokens.
|
2019-12-22 17:42:04 -05:00
|
|
|
if self.is_keyword_ahead(1, &[kw::Crate]) && self.look_ahead(2, |t| t != &token::ModSep)
|
|
|
|
// account for `pub(crate::foo)`
|
2019-04-30 17:48:18 -07:00
|
|
|
{
|
2019-10-01 05:53:23 +02:00
|
|
|
// Parse `pub(crate)`.
|
|
|
|
self.bump(); // `(`
|
|
|
|
self.bump(); // `crate`
|
|
|
|
self.expect(&token::CloseDelim(token::Paren))?; // `)`
|
|
|
|
let vis = VisibilityKind::Crate(CrateSugar::PubCrate);
|
|
|
|
return Ok(respan(lo.to(self.prev_span), vis));
|
2019-05-29 17:58:44 +02:00
|
|
|
} else if self.is_keyword_ahead(1, &[kw::In]) {
|
2019-10-01 05:53:23 +02:00
|
|
|
// Parse `pub(in path)`.
|
|
|
|
self.bump(); // `(`
|
|
|
|
self.bump(); // `in`
|
|
|
|
let path = self.parse_path(PathStyle::Mod)?; // `path`
|
|
|
|
self.expect(&token::CloseDelim(token::Paren))?; // `)`
|
2019-12-22 17:42:04 -05:00
|
|
|
let vis = VisibilityKind::Restricted { path: P(path), id: ast::DUMMY_NODE_ID };
|
2019-10-01 05:53:23 +02:00
|
|
|
return Ok(respan(lo.to(self.prev_span), vis));
|
2019-09-30 06:42:56 +02:00
|
|
|
} else if self.look_ahead(2, |t| t == &token::CloseDelim(token::Paren))
|
|
|
|
&& self.is_keyword_ahead(1, &[kw::Super, kw::SelfLower])
|
2018-01-27 16:13:50 +09:00
|
|
|
{
|
2019-10-01 05:53:23 +02:00
|
|
|
// Parse `pub(self)` or `pub(super)`.
|
|
|
|
self.bump(); // `(`
|
|
|
|
let path = self.parse_path(PathStyle::Mod)?; // `super`/`self`
|
|
|
|
self.expect(&token::CloseDelim(token::Paren))?; // `)`
|
2019-12-22 17:42:04 -05:00
|
|
|
let vis = VisibilityKind::Restricted { path: P(path), id: ast::DUMMY_NODE_ID };
|
2019-10-01 05:53:23 +02:00
|
|
|
return Ok(respan(lo.to(self.prev_span), vis));
|
2019-11-07 11:26:36 +01:00
|
|
|
} else if let FollowedByType::No = fbt {
|
|
|
|
// Provide this diagnostic if a type cannot follow;
|
|
|
|
// in particular, if this is not a tuple struct.
|
2019-09-30 06:42:56 +02:00
|
|
|
self.recover_incorrect_vis_restriction()?;
|
|
|
|
// Emit diagnostic, but continue with public visibility.
|
2016-04-23 05:40:55 +00:00
|
|
|
}
|
2016-04-11 00:39:35 +00:00
|
|
|
}
|
2017-03-08 02:50:13 +03:00
|
|
|
|
2018-01-29 14:12:09 +09:00
|
|
|
Ok(respan(lo, VisibilityKind::Public))
|
2012-02-22 21:47:23 -08:00
|
|
|
}
|
2013-03-22 12:56:10 -07:00
|
|
|
|
2019-09-30 06:42:56 +02:00
|
|
|
/// Recovery for e.g. `pub(something) fn ...` or `struct X { pub(something) y: Z }`
|
|
|
|
fn recover_incorrect_vis_restriction(&mut self) -> PResult<'a, ()> {
|
|
|
|
self.bump(); // `(`
|
|
|
|
let path = self.parse_path(PathStyle::Mod)?;
|
2019-12-22 17:42:04 -05:00
|
|
|
self.expect(&token::CloseDelim(token::Paren))?; // `)`
|
2019-09-30 06:42:56 +02:00
|
|
|
|
|
|
|
let msg = "incorrect visibility restriction";
|
|
|
|
let suggestion = r##"some possible visibility restrictions are:
|
|
|
|
`pub(crate)`: visible only on the current crate
|
|
|
|
`pub(super)`: visible only in the current module's parent
|
|
|
|
`pub(in path::to::module)`: visible only on the specified path"##;
|
|
|
|
|
2019-10-08 22:17:46 +02:00
|
|
|
let path_str = pprust::path_to_string(&path);
|
|
|
|
|
2019-09-30 06:42:56 +02:00
|
|
|
struct_span_err!(self.sess.span_diagnostic, path.span, E0704, "{}", msg)
|
|
|
|
.help(suggestion)
|
|
|
|
.span_suggestion(
|
|
|
|
path.span,
|
2019-10-08 22:17:46 +02:00
|
|
|
&format!("make this visible only to module `{}` with `in`", path_str),
|
|
|
|
format!("in {}", path_str),
|
2019-09-30 06:42:56 +02:00
|
|
|
Applicability::MachineApplicable,
|
|
|
|
)
|
|
|
|
.emit();
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-10-28 00:29:23 +01:00
|
|
|
/// Parses `extern string_literal?`.
|
2019-11-09 22:05:20 +03:00
|
|
|
fn parse_extern(&mut self) -> PResult<'a, Extern> {
|
2019-10-27 23:14:35 +01:00
|
|
|
Ok(if self.eat_keyword(kw::Extern) {
|
2019-11-10 17:04:12 +03:00
|
|
|
Extern::from_abi(self.parse_abi())
|
2019-09-29 19:22:18 -04:00
|
|
|
} else {
|
2019-11-09 22:05:20 +03:00
|
|
|
Extern::None
|
2019-10-27 23:14:35 +01:00
|
|
|
})
|
2019-09-29 19:22:18 -04:00
|
|
|
}
|
|
|
|
|
2019-10-28 00:29:23 +01:00
|
|
|
/// Parses a string literal as an ABI spec.
|
2019-11-10 17:04:12 +03:00
|
|
|
fn parse_abi(&mut self) -> Option<StrLit> {
|
|
|
|
match self.parse_str_lit() {
|
|
|
|
Ok(str_lit) => Some(str_lit),
|
|
|
|
Err(Some(lit)) => match lit.kind {
|
|
|
|
ast::LitKind::Err(_) => None,
|
2019-10-28 00:29:23 +01:00
|
|
|
_ => {
|
2019-11-10 17:04:12 +03:00
|
|
|
self.struct_span_err(lit.span, "non-string ABI literal")
|
2019-10-28 00:29:23 +01:00
|
|
|
.span_suggestion(
|
2019-11-10 17:04:12 +03:00
|
|
|
lit.span,
|
2019-10-28 00:29:23 +01:00
|
|
|
"specify the ABI with a string literal",
|
|
|
|
"\"C\"".to_string(),
|
|
|
|
Applicability::MaybeIncorrect,
|
|
|
|
)
|
|
|
|
.emit();
|
2019-11-10 17:04:12 +03:00
|
|
|
None
|
2019-10-28 00:29:23 +01:00
|
|
|
}
|
2019-12-22 17:42:04 -05:00
|
|
|
},
|
2019-11-10 17:04:12 +03:00
|
|
|
Err(None) => None,
|
2019-11-09 22:05:20 +03:00
|
|
|
}
|
2013-03-13 22:25:28 -04:00
|
|
|
}
|
|
|
|
|
2019-06-29 21:38:26 +02:00
|
|
|
/// We are parsing `async fn`. If we are on Rust 2015, emit an error.
|
|
|
|
fn ban_async_in_2015(&self, async_span: Span) {
|
|
|
|
if async_span.rust_2015() {
|
2019-11-11 23:34:57 +01:00
|
|
|
struct_span_err!(
|
|
|
|
self.diagnostic(),
|
|
|
|
async_span,
|
|
|
|
E0670,
|
|
|
|
"`async fn` is not permitted in the 2015 edition",
|
|
|
|
)
|
|
|
|
.emit();
|
2019-06-29 21:38:26 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-30 06:21:30 +02:00
|
|
|
fn collect_tokens<R>(
|
|
|
|
&mut self,
|
|
|
|
f: impl FnOnce(&mut Self) -> PResult<'a, R>,
|
|
|
|
) -> PResult<'a, (R, TokenStream)> {
|
2017-07-12 09:50:05 -07:00
|
|
|
// Record all tokens we parse when parsing this item.
|
|
|
|
let mut tokens = Vec::new();
|
2018-07-22 08:48:29 -07:00
|
|
|
let prev_collecting = match self.token_cursor.frame.last_token {
|
2019-12-22 17:42:04 -05:00
|
|
|
LastToken::Collecting(ref mut list) => Some(mem::take(list)),
|
2018-07-22 08:48:29 -07:00
|
|
|
LastToken::Was(ref mut last) => {
|
|
|
|
tokens.extend(last.take());
|
|
|
|
None
|
|
|
|
}
|
|
|
|
};
|
2017-07-12 09:50:05 -07:00
|
|
|
self.token_cursor.frame.last_token = LastToken::Collecting(tokens);
|
|
|
|
let prev = self.token_cursor.stack.len();
|
|
|
|
let ret = f(self);
|
|
|
|
let last_token = if self.token_cursor.stack.len() == prev {
|
|
|
|
&mut self.token_cursor.frame.last_token
|
2019-07-23 11:19:13 -07:00
|
|
|
} else if self.token_cursor.stack.get(prev).is_none() {
|
|
|
|
// This can happen due to a bad interaction of two unrelated recovery mechanisms with
|
2019-07-23 12:51:34 -07:00
|
|
|
// mismatched delimiters *and* recovery lookahead on the likely typo `pub ident(`
|
|
|
|
// (#62881).
|
2019-10-10 12:01:12 +11:00
|
|
|
return Ok((ret?, TokenStream::default()));
|
2017-07-12 09:50:05 -07:00
|
|
|
} else {
|
|
|
|
&mut self.token_cursor.stack[prev].last_token
|
|
|
|
};
|
2018-07-22 08:48:29 -07:00
|
|
|
|
2019-01-10 11:58:38 +11:00
|
|
|
// Pull out the tokens that we've collected from the call to `f` above.
|
2018-07-22 08:48:29 -07:00
|
|
|
let mut collected_tokens = match *last_token {
|
2019-06-30 11:30:01 -07:00
|
|
|
LastToken::Collecting(ref mut v) => mem::take(v),
|
2019-07-23 11:19:13 -07:00
|
|
|
LastToken::Was(ref was) => {
|
|
|
|
let msg = format!("our vector went away? - found Was({:?})", was);
|
|
|
|
debug!("collect_tokens: {}", msg);
|
|
|
|
self.sess.span_diagnostic.delay_span_bug(self.token.span, &msg);
|
|
|
|
// This can happen due to a bad interaction of two unrelated recovery mechanisms
|
2019-07-23 12:51:34 -07:00
|
|
|
// with mismatched delimiters *and* recovery lookahead on the likely typo
|
|
|
|
// `pub ident(` (#62895, different but similar to the case above).
|
2019-10-10 12:01:12 +11:00
|
|
|
return Ok((ret?, TokenStream::default()));
|
2019-07-23 11:19:13 -07:00
|
|
|
}
|
2017-07-12 09:50:05 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
// If we're not at EOF our current token wasn't actually consumed by
|
|
|
|
// `f`, but it'll still be in our list that we pulled out. In that case
|
|
|
|
// put it back.
|
2019-12-22 17:42:04 -05:00
|
|
|
let extra_token = if self.token != token::Eof { collected_tokens.pop() } else { None };
|
2018-07-22 08:48:29 -07:00
|
|
|
|
|
|
|
// If we were previously collecting tokens, then this was a recursive
|
|
|
|
// call. In that case we need to record all the tokens we collected in
|
|
|
|
// our parent list as well. To do that we push a clone of our stream
|
|
|
|
// onto the previous list.
|
|
|
|
match prev_collecting {
|
|
|
|
Some(mut list) => {
|
2019-01-10 11:58:38 +11:00
|
|
|
list.extend(collected_tokens.iter().cloned());
|
2018-07-22 08:48:29 -07:00
|
|
|
list.extend(extra_token);
|
|
|
|
*last_token = LastToken::Collecting(list);
|
|
|
|
}
|
|
|
|
None => {
|
|
|
|
*last_token = LastToken::Was(extra_token);
|
|
|
|
}
|
2017-07-12 09:50:05 -07:00
|
|
|
}
|
|
|
|
|
2019-01-10 11:58:38 +11:00
|
|
|
Ok((ret?, TokenStream::new(collected_tokens)))
|
2017-07-12 09:50:05 -07:00
|
|
|
}
|
|
|
|
|
2018-03-10 18:44:44 +03:00
|
|
|
/// `::{` or `::*`
|
|
|
|
fn is_import_coupler(&mut self) -> bool {
|
2019-12-22 17:42:04 -05:00
|
|
|
self.check(&token::ModSep)
|
|
|
|
&& self.look_ahead(1, |t| {
|
|
|
|
*t == token::OpenDelim(token::Brace) || *t == token::BinOp(token::Star)
|
|
|
|
})
|
2016-04-17 03:48:40 +03:00
|
|
|
}
|
2011-01-10 18:18:16 -08:00
|
|
|
}
|
2019-02-05 01:35:25 -08:00
|
|
|
|
2019-10-25 18:30:02 -07:00
|
|
|
crate fn make_unclosed_delims_error(
|
|
|
|
unmatched: UnmatchedBrace,
|
2019-10-28 17:44:20 -07:00
|
|
|
sess: &ParseSess,
|
2019-10-25 18:30:02 -07:00
|
|
|
) -> Option<DiagnosticBuilder<'_>> {
|
|
|
|
// `None` here means an `Eof` was found. We already emit those errors elsewhere, we add them to
|
|
|
|
// `unmatched_braces` only for error recovery in the `Parser`.
|
|
|
|
let found_delim = unmatched.found_delim?;
|
2019-12-22 17:42:04 -05:00
|
|
|
let mut err = sess.span_diagnostic.struct_span_err(
|
|
|
|
unmatched.found_span,
|
|
|
|
&format!(
|
2020-01-03 08:40:15 -05:00
|
|
|
"mismatched closing delimiter: `{}`",
|
2019-12-22 17:42:04 -05:00
|
|
|
pprust::token_kind_to_string(&token::CloseDelim(found_delim)),
|
|
|
|
),
|
|
|
|
);
|
2020-01-03 08:40:15 -05:00
|
|
|
err.span_label(unmatched.found_span, "mismatched closing delimiter");
|
2019-10-25 18:30:02 -07:00
|
|
|
if let Some(sp) = unmatched.candidate_span {
|
2020-01-03 08:40:15 -05:00
|
|
|
err.span_label(sp, "closing delimiter possibly meant for this");
|
2019-10-25 18:30:02 -07:00
|
|
|
}
|
|
|
|
if let Some(sp) = unmatched.unclosed_span {
|
2020-01-03 08:40:15 -05:00
|
|
|
err.span_label(sp, "unclosed delimiter");
|
2019-10-25 18:30:02 -07:00
|
|
|
}
|
|
|
|
Some(err)
|
|
|
|
}
|
|
|
|
|
2019-10-28 17:44:20 -07:00
|
|
|
pub fn emit_unclosed_delims(unclosed_delims: &mut Vec<UnmatchedBrace>, sess: &ParseSess) {
|
2019-12-22 17:42:04 -05:00
|
|
|
*sess.reached_eof.borrow_mut() |=
|
|
|
|
unclosed_delims.iter().any(|unmatched_delim| unmatched_delim.found_delim.is_none());
|
2019-10-25 18:30:02 -07:00
|
|
|
for unmatched in unclosed_delims.drain(..) {
|
2019-10-28 17:44:20 -07:00
|
|
|
make_unclosed_delims_error(unmatched, sess).map(|mut e| e.emit());
|
2019-02-05 01:35:25 -08:00
|
|
|
}
|
2019-02-05 02:26:26 -08:00
|
|
|
}
|