2020-03-08 13:36:20 +01:00
|
|
|
use crate::base::{DummyResult, ExtCtxt, MacResult, TTMacroExpander};
|
2019-10-16 10:59:30 +02:00
|
|
|
use crate::base::{SyntaxExtension, SyntaxExtensionKind};
|
2019-12-22 17:42:04 -05:00
|
|
|
use crate::expand::{ensure_complete_parse, parse_ast_fragment, AstFragment, AstFragmentKind};
|
2019-10-16 10:59:30 +02:00
|
|
|
use crate::mbe;
|
|
|
|
use crate::mbe::macro_check;
|
2020-02-05 09:44:03 +11:00
|
|
|
use crate::mbe::macro_parser::parse_tt;
|
2020-03-17 14:13:32 +01:00
|
|
|
use crate::mbe::macro_parser::{Error, ErrorReported, Failure, Success};
|
2020-02-05 09:44:03 +11:00
|
|
|
use crate::mbe::macro_parser::{MatchedNonterminal, MatchedSeq};
|
2019-10-16 10:59:30 +02:00
|
|
|
use crate::mbe::transcribe::transcribe;
|
|
|
|
|
2020-04-27 23:26:11 +05:30
|
|
|
use rustc_ast as ast;
|
2020-07-27 14:04:54 +02:00
|
|
|
use rustc_ast::token::{self, NonterminalKind, NtTT, Token, TokenKind::*};
|
2020-02-29 20:37:32 +03:00
|
|
|
use rustc_ast::tokenstream::{DelimSpan, TokenStream};
|
2021-06-23 14:07:32 +00:00
|
|
|
use rustc_ast::{NodeId, DUMMY_NODE_ID};
|
2020-01-11 17:02:46 +01:00
|
|
|
use rustc_ast_pretty::pprust;
|
2020-01-11 13:15:20 +01:00
|
|
|
use rustc_attr::{self as attr, TransparencyError};
|
2020-01-09 11:18:47 +01:00
|
|
|
use rustc_data_structures::fx::FxHashMap;
|
|
|
|
use rustc_data_structures::sync::Lrc;
|
2020-03-17 12:54:57 +01:00
|
|
|
use rustc_errors::{Applicability, DiagnosticBuilder};
|
2019-11-30 00:23:38 +01:00
|
|
|
use rustc_feature::Features;
|
2021-03-25 21:42:21 +08:00
|
|
|
use rustc_lint_defs::builtin::{OR_PATTERNS_BACK_COMPAT, SEMICOLON_IN_EXPRESSIONS_FROM_MACROS};
|
|
|
|
use rustc_lint_defs::BuiltinLintDiagnostics;
|
2019-10-15 22:48:13 +02:00
|
|
|
use rustc_parse::parser::Parser;
|
2020-01-11 15:03:15 +01:00
|
|
|
use rustc_session::parse::ParseSess;
|
2020-07-30 11:27:50 +10:00
|
|
|
use rustc_session::Session;
|
2020-01-01 19:40:49 +01:00
|
|
|
use rustc_span::edition::Edition;
|
2019-12-31 20:15:40 +03:00
|
|
|
use rustc_span::hygiene::Transparency;
|
2020-08-01 17:45:17 +02:00
|
|
|
use rustc_span::symbol::{kw, sym, Ident, MacroRulesNormalizedIdent};
|
2019-12-31 20:15:40 +03:00
|
|
|
use rustc_span::Span;
|
2019-02-07 02:33:01 +09:00
|
|
|
|
2018-07-02 19:44:01 -05:00
|
|
|
use std::borrow::Cow;
|
|
|
|
use std::collections::hash_map::Entry;
|
2019-10-30 17:34:00 +01:00
|
|
|
use std::{mem, slice};
|
2020-08-13 23:05:01 -07:00
|
|
|
use tracing::debug;
|
2018-02-27 17:11:14 +01:00
|
|
|
|
2019-09-22 17:42:17 +03:00
|
|
|
crate struct ParserAnyMacro<'a> {
|
2016-09-23 09:32:58 +00:00
|
|
|
parser: Parser<'a>,
|
2015-04-04 13:13:57 -07:00
|
|
|
|
|
|
|
/// Span of the expansion site of the macro this parser is for
|
|
|
|
site_span: Span,
|
|
|
|
/// The ident of the macro we're parsing
|
2020-04-19 13:00:18 +02:00
|
|
|
macro_ident: Ident,
|
2020-12-07 18:55:00 -05:00
|
|
|
lint_node_id: NodeId,
|
2018-11-05 12:25:31 -08:00
|
|
|
arm_span: Span,
|
2013-08-30 14:40:05 -07:00
|
|
|
}
|
|
|
|
|
2019-09-22 17:42:17 +03:00
|
|
|
crate fn annotate_err_with_kind(
|
|
|
|
err: &mut DiagnosticBuilder<'_>,
|
|
|
|
kind: AstFragmentKind,
|
|
|
|
span: Span,
|
|
|
|
) {
|
2019-08-09 09:39:30 -07:00
|
|
|
match kind {
|
|
|
|
AstFragmentKind::Ty => {
|
|
|
|
err.span_label(span, "this macro call doesn't expand to a type");
|
|
|
|
}
|
|
|
|
AstFragmentKind::Pat => {
|
|
|
|
err.span_label(span, "this macro call doesn't expand to a pattern");
|
|
|
|
}
|
|
|
|
_ => {}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2020-03-17 08:55:28 +01:00
|
|
|
fn emit_frag_parse_err(
|
|
|
|
mut e: DiagnosticBuilder<'_>,
|
|
|
|
parser: &Parser<'_>,
|
2020-03-26 15:54:47 +01:00
|
|
|
orig_parser: &mut Parser<'_>,
|
2020-03-17 08:55:28 +01:00
|
|
|
site_span: Span,
|
|
|
|
arm_span: Span,
|
|
|
|
kind: AstFragmentKind,
|
|
|
|
) {
|
|
|
|
if parser.token == token::Eof && e.message().ends_with(", found `<eof>`") {
|
|
|
|
if !e.span.is_dummy() {
|
|
|
|
// early end of macro arm (#52866)
|
|
|
|
e.replace_span_with(parser.sess.source_map().next_point(parser.token.span));
|
|
|
|
}
|
|
|
|
let msg = &e.message[0];
|
|
|
|
e.message[0] = (
|
|
|
|
format!(
|
|
|
|
"macro expansion ends with an incomplete expression: {}",
|
|
|
|
msg.0.replace(", found `<eof>`", ""),
|
|
|
|
),
|
|
|
|
msg.1,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
if e.span.is_dummy() {
|
|
|
|
// Get around lack of span in error (#30128)
|
|
|
|
e.replace_span_with(site_span);
|
|
|
|
if !parser.sess.source_map().is_imported(arm_span) {
|
|
|
|
e.span_label(arm_span, "in this macro arm");
|
|
|
|
}
|
|
|
|
} else if parser.sess.source_map().is_imported(parser.token.span) {
|
|
|
|
e.span_label(site_span, "in this macro invocation");
|
|
|
|
}
|
|
|
|
match kind {
|
2020-03-26 15:54:47 +01:00
|
|
|
// Try a statement if an expression is wanted but failed and suggest adding `;` to call.
|
|
|
|
AstFragmentKind::Expr => match parse_ast_fragment(orig_parser, AstFragmentKind::Stmts) {
|
|
|
|
Err(mut err) => err.cancel(),
|
|
|
|
Ok(_) => {
|
|
|
|
e.note(
|
|
|
|
"the macro call doesn't expand to an expression, but it can expand to a statement",
|
|
|
|
);
|
|
|
|
e.span_suggestion_verbose(
|
|
|
|
site_span.shrink_to_hi(),
|
|
|
|
"add `;` to interpret the expansion as a statement",
|
|
|
|
";".to_string(),
|
|
|
|
Applicability::MaybeIncorrect,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
},
|
2020-03-17 08:55:28 +01:00
|
|
|
_ => annotate_err_with_kind(&mut e, kind, site_span),
|
|
|
|
};
|
|
|
|
e.emit();
|
|
|
|
}
|
|
|
|
|
2014-03-09 16:54:34 +02:00
|
|
|
impl<'a> ParserAnyMacro<'a> {
|
2019-09-22 17:42:17 +03:00
|
|
|
crate fn make(mut self: Box<ParserAnyMacro<'a>>, kind: AstFragmentKind) -> AstFragment {
|
2020-12-07 18:55:00 -05:00
|
|
|
let ParserAnyMacro { site_span, macro_ident, ref mut parser, lint_node_id, arm_span } =
|
|
|
|
*self;
|
2020-03-26 15:54:47 +01:00
|
|
|
let snapshot = &mut parser.clone();
|
2020-03-17 08:55:28 +01:00
|
|
|
let fragment = match parse_ast_fragment(parser, kind) {
|
|
|
|
Ok(f) => f,
|
|
|
|
Err(err) => {
|
2020-12-18 12:27:36 +01:00
|
|
|
emit_frag_parse_err(err, parser, snapshot, site_span, arm_span, kind);
|
2020-03-17 08:55:28 +01:00
|
|
|
return kind.dummy(site_span);
|
2018-10-23 10:07:11 -07:00
|
|
|
}
|
2020-03-17 08:55:28 +01:00
|
|
|
};
|
2015-07-25 21:54:19 -07:00
|
|
|
|
2018-11-27 02:59:49 +00:00
|
|
|
// We allow semicolons at the end of expressions -- e.g., the semicolon in
|
2016-09-23 23:09:23 +00:00
|
|
|
// `macro_rules! m { () => { panic!(); } }` isn't parsed by `.parse_expr()`,
|
2018-11-27 02:59:49 +00:00
|
|
|
// but `m!()` is allowed in expression positions (cf. issue #34706).
|
2018-06-20 02:08:08 +03:00
|
|
|
if kind == AstFragmentKind::Expr && parser.token == token::Semi {
|
2020-12-07 18:55:00 -05:00
|
|
|
parser.sess.buffer_lint(
|
|
|
|
SEMICOLON_IN_EXPRESSIONS_FROM_MACROS,
|
|
|
|
parser.token.span,
|
|
|
|
lint_node_id,
|
|
|
|
"trailing semicolon in macro used in expression position",
|
|
|
|
);
|
2016-09-23 23:09:23 +00:00
|
|
|
parser.bump();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure we don't have any tokens left to parse so we don't silently drop anything.
|
2018-03-19 03:54:56 +03:00
|
|
|
let path = ast::Path::from_ident(macro_ident.with_span_pos(site_span));
|
2019-10-16 10:59:30 +02:00
|
|
|
ensure_complete_parse(parser, &path, kind.name(), site_span);
|
2018-06-20 02:08:08 +03:00
|
|
|
fragment
|
2015-07-25 21:54:19 -07:00
|
|
|
}
|
2013-08-30 14:40:05 -07:00
|
|
|
}
|
|
|
|
|
2014-01-25 13:34:26 -08:00
|
|
|
struct MacroRulesMacroExpander {
|
2020-04-19 13:00:18 +02:00
|
|
|
name: Ident,
|
2019-06-30 03:05:52 +03:00
|
|
|
span: Span,
|
2019-08-23 01:31:01 +03:00
|
|
|
transparency: Transparency,
|
2019-09-22 19:17:30 +03:00
|
|
|
lhses: Vec<mbe::TokenTree>,
|
|
|
|
rhses: Vec<mbe::TokenTree>,
|
2015-11-14 19:50:46 +09:00
|
|
|
valid: bool,
|
2013-08-30 14:40:05 -07:00
|
|
|
}
|
|
|
|
|
2014-07-10 12:09:56 -07:00
|
|
|
impl TTMacroExpander for MacroRulesMacroExpander {
|
2018-10-23 17:18:14 -07:00
|
|
|
fn expand<'cx>(
|
|
|
|
&self,
|
2019-02-07 02:33:01 +09:00
|
|
|
cx: &'cx mut ExtCtxt<'_>,
|
2018-10-23 17:18:14 -07:00
|
|
|
sp: Span,
|
|
|
|
input: TokenStream,
|
2019-05-29 20:21:26 +02:00
|
|
|
) -> Box<dyn MacResult + 'cx> {
|
2015-11-14 19:50:46 +09:00
|
|
|
if !self.valid {
|
|
|
|
return DummyResult::any(sp);
|
|
|
|
}
|
2019-08-23 01:31:01 +03:00
|
|
|
generic_extension(
|
2019-12-22 17:42:04 -05:00
|
|
|
cx,
|
|
|
|
sp,
|
|
|
|
self.span,
|
|
|
|
self.name,
|
|
|
|
self.transparency,
|
|
|
|
input,
|
|
|
|
&self.lhses,
|
|
|
|
&self.rhses,
|
2019-08-23 01:31:01 +03:00
|
|
|
)
|
2013-08-30 14:40:05 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-18 13:34:11 +01:00
|
|
|
fn macro_rules_dummy_expander<'cx>(
|
|
|
|
_: &'cx mut ExtCtxt<'_>,
|
|
|
|
span: Span,
|
|
|
|
_: TokenStream,
|
|
|
|
) -> Box<dyn MacResult + 'cx> {
|
|
|
|
DummyResult::any(span)
|
2020-03-17 12:54:57 +01:00
|
|
|
}
|
|
|
|
|
2020-02-05 09:44:03 +11:00
|
|
|
fn trace_macros_note(cx_expansions: &mut FxHashMap<Span, Vec<String>>, sp: Span, message: String) {
|
2021-01-11 20:45:33 +01:00
|
|
|
let sp = sp.macro_backtrace().last().map_or(sp, |trace| trace.call_site);
|
2020-02-05 09:44:03 +11:00
|
|
|
cx_expansions.entry(sp).or_default().push(message);
|
2017-05-19 13:43:06 -05:00
|
|
|
}
|
|
|
|
|
2014-06-09 13:12:30 -07:00
|
|
|
/// Given `lhses` and `rhses`, this is the new macro we create
|
2019-05-29 20:21:26 +02:00
|
|
|
fn generic_extension<'cx>(
|
|
|
|
cx: &'cx mut ExtCtxt<'_>,
|
|
|
|
sp: Span,
|
2019-06-30 03:05:52 +03:00
|
|
|
def_span: Span,
|
2020-04-19 13:00:18 +02:00
|
|
|
name: Ident,
|
2019-08-23 01:31:01 +03:00
|
|
|
transparency: Transparency,
|
2019-05-29 20:21:26 +02:00
|
|
|
arg: TokenStream,
|
2019-09-22 19:17:30 +03:00
|
|
|
lhses: &[mbe::TokenTree],
|
|
|
|
rhses: &[mbe::TokenTree],
|
2019-05-29 20:21:26 +02:00
|
|
|
) -> Box<dyn MacResult + 'cx> {
|
2020-07-30 11:27:50 +10:00
|
|
|
let sess = &cx.sess.parse_sess;
|
2020-03-08 13:36:20 +01:00
|
|
|
|
2013-08-30 14:40:05 -07:00
|
|
|
if cx.trace_macros() {
|
2020-06-24 17:45:08 +03:00
|
|
|
let msg = format!("expanding `{}! {{ {} }}`", name, pprust::tts_to_string(&arg));
|
2020-02-05 09:44:03 +11:00
|
|
|
trace_macros_note(&mut cx.expansions, sp, msg);
|
2013-08-30 14:40:05 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Which arm's failure should we report? (the one furthest along)
|
2019-06-05 22:04:52 +03:00
|
|
|
let mut best_failure: Option<(Token, &str)> = None;
|
2020-02-05 09:44:03 +11:00
|
|
|
|
|
|
|
// We create a base parser that can be used for the "black box" parts.
|
2020-02-05 09:44:03 +11:00
|
|
|
// Every iteration needs a fresh copy of that parser. However, the parser
|
|
|
|
// is not mutated on many of the iterations, particularly when dealing with
|
|
|
|
// macros like this:
|
2020-02-05 09:44:03 +11:00
|
|
|
//
|
|
|
|
// macro_rules! foo {
|
|
|
|
// ("a") => (A);
|
|
|
|
// ("b") => (B);
|
|
|
|
// ("c") => (C);
|
|
|
|
// // ... etc. (maybe hundreds more)
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// as seen in the `html5ever` benchmark. We use a `Cow` so that the base
|
|
|
|
// parser is only cloned when necessary (upon mutation). Furthermore, we
|
|
|
|
// reinitialize the `Cow` with the base parser at the start of every
|
|
|
|
// iteration, so that any mutated parsers are not reused. This is all quite
|
|
|
|
// hacky, but speeds up the `html5ever` benchmark significantly. (Issue
|
|
|
|
// 68836 suggests a more comprehensive but more complex change to deal with
|
|
|
|
// this situation.)
|
2020-03-08 13:36:20 +01:00
|
|
|
let parser = parser_from_cx(sess, arg.clone());
|
2020-02-05 09:44:03 +11:00
|
|
|
|
2019-05-29 20:21:26 +02:00
|
|
|
for (i, lhs) in lhses.iter().enumerate() {
|
|
|
|
// try each arm's matchers
|
2015-11-14 19:18:32 +09:00
|
|
|
let lhs_tt = match *lhs {
|
2019-09-22 19:17:30 +03:00
|
|
|
mbe::TokenTree::Delimited(_, ref delim) => &delim.tts[..],
|
2019-05-29 20:21:26 +02:00
|
|
|
_ => cx.span_bug(sp, "malformed macro lhs"),
|
2015-11-14 19:18:32 +09:00
|
|
|
};
|
2015-02-24 19:56:01 +01:00
|
|
|
|
2019-10-30 17:34:00 +01:00
|
|
|
// Take a snapshot of the state of pre-expansion gating at this point.
|
|
|
|
// This is used so that if a matcher is not `Success(..)`ful,
|
2019-11-26 22:19:54 -05:00
|
|
|
// then the spans which became gated when parsing the unsuccessful matcher
|
2019-10-30 17:34:00 +01:00
|
|
|
// are not recorded. On the first `Success(..)`ful matcher, the spans are merged.
|
2020-03-08 13:36:20 +01:00
|
|
|
let mut gated_spans_snapshot = mem::take(&mut *sess.gated_spans.spans.borrow_mut());
|
2019-10-30 17:34:00 +01:00
|
|
|
|
2021-06-07 20:17:48 -05:00
|
|
|
match parse_tt(&mut Cow::Borrowed(&parser), lhs_tt, name) {
|
2015-11-14 19:18:32 +09:00
|
|
|
Success(named_matches) => {
|
2019-10-30 17:34:00 +01:00
|
|
|
// The matcher was `Success(..)`ful.
|
|
|
|
// Merge the gated spans from parsing the matcher with the pre-existing ones.
|
2020-03-08 13:36:20 +01:00
|
|
|
sess.gated_spans.merge(gated_spans_snapshot);
|
2019-10-30 17:34:00 +01:00
|
|
|
|
2015-11-14 19:11:40 +09:00
|
|
|
let rhs = match rhses[i] {
|
2015-11-14 19:18:32 +09:00
|
|
|
// ignore delimiters
|
2019-09-22 19:17:30 +03:00
|
|
|
mbe::TokenTree::Delimited(_, ref delimed) => delimed.tts.clone(),
|
2016-05-17 17:39:11 +02:00
|
|
|
_ => cx.span_bug(sp, "malformed macro rhs"),
|
2013-08-30 14:40:05 -07:00
|
|
|
};
|
2018-11-05 12:25:31 -08:00
|
|
|
let arm_span = rhses[i].span();
|
2017-07-19 21:54:01 -07:00
|
|
|
|
2017-07-21 16:44:23 -07:00
|
|
|
let rhs_spans = rhs.iter().map(|t| t.span()).collect::<Vec<_>>();
|
2013-08-30 14:40:05 -07:00
|
|
|
// rhs has holes ( `$id` and `$(...)` that need filled)
|
2020-03-17 12:12:57 +01:00
|
|
|
let mut tts = match transcribe(cx, &named_matches, rhs, transparency) {
|
|
|
|
Ok(tts) => tts,
|
|
|
|
Err(mut err) => {
|
|
|
|
err.emit();
|
|
|
|
return DummyResult::any(arm_span);
|
|
|
|
}
|
|
|
|
};
|
2017-07-19 21:54:01 -07:00
|
|
|
|
|
|
|
// Replace all the tokens for the corresponding positions in the macro, to maintain
|
|
|
|
// proper positions in error reporting, while maintaining the macro_backtrace.
|
2017-07-21 16:44:23 -07:00
|
|
|
if rhs_spans.len() == tts.len() {
|
2020-09-14 01:45:10 -04:00
|
|
|
tts = tts.map_enumerated(|i, tt| {
|
|
|
|
let mut tt = tt.clone();
|
2017-07-21 16:44:23 -07:00
|
|
|
let mut sp = rhs_spans[i];
|
2017-07-31 23:04:34 +03:00
|
|
|
sp = sp.with_ctxt(tt.span().ctxt());
|
2017-07-19 21:54:01 -07:00
|
|
|
tt.set_span(sp);
|
|
|
|
tt
|
|
|
|
});
|
|
|
|
}
|
2017-05-19 13:43:06 -05:00
|
|
|
|
|
|
|
if cx.trace_macros() {
|
2020-06-24 17:45:08 +03:00
|
|
|
let msg = format!("to `{}`", pprust::tts_to_string(&tts));
|
2020-02-05 09:44:03 +11:00
|
|
|
trace_macros_note(&mut cx.expansions, sp, msg);
|
2017-05-19 13:43:06 -05:00
|
|
|
}
|
|
|
|
|
2020-03-09 10:35:35 +01:00
|
|
|
let mut p = Parser::new(sess, tts, false, None);
|
2019-07-18 18:36:19 -07:00
|
|
|
p.last_type_ascription = cx.current_expansion.prior_type_ascription;
|
2020-12-07 18:55:00 -05:00
|
|
|
let lint_node_id = cx.resolver.lint_node_id(cx.current_expansion.id);
|
2016-11-05 04:16:26 +00:00
|
|
|
|
2013-08-30 14:40:05 -07:00
|
|
|
// Let the context choose how to interpret the result.
|
|
|
|
// Weird, but useful for X-macros.
|
2015-04-15 20:56:16 -07:00
|
|
|
return Box::new(ParserAnyMacro {
|
2016-09-23 09:32:58 +00:00
|
|
|
parser: p,
|
2015-04-04 13:13:57 -07:00
|
|
|
|
|
|
|
// Pass along the original expansion site and the name of the macro
|
|
|
|
// so we can print a useful error message if the parse of the expanded
|
|
|
|
// macro leaves unparsed tokens.
|
|
|
|
site_span: sp,
|
2018-11-05 12:25:31 -08:00
|
|
|
macro_ident: name,
|
2020-12-07 18:55:00 -05:00
|
|
|
lint_node_id,
|
2018-11-05 12:25:31 -08:00
|
|
|
arm_span,
|
2019-05-29 20:21:26 +02:00
|
|
|
});
|
2015-11-14 19:18:32 +09:00
|
|
|
}
|
2019-06-05 22:04:52 +03:00
|
|
|
Failure(token, msg) => match best_failure {
|
|
|
|
Some((ref best_token, _)) if best_token.span.lo() >= token.span.lo() => {}
|
2019-05-29 20:21:26 +02:00
|
|
|
_ => best_failure = Some((token, msg)),
|
|
|
|
},
|
2020-03-17 09:37:59 +01:00
|
|
|
Error(err_sp, ref msg) => {
|
|
|
|
let span = err_sp.substitute_dummy(sp);
|
|
|
|
cx.struct_span_err(span, &msg).emit();
|
|
|
|
return DummyResult::any(span);
|
|
|
|
}
|
2020-03-17 14:13:32 +01:00
|
|
|
ErrorReported => return DummyResult::any(sp),
|
2013-08-30 14:40:05 -07:00
|
|
|
}
|
2019-10-30 17:34:00 +01:00
|
|
|
|
|
|
|
// The matcher was not `Success(..)`ful.
|
|
|
|
// Restore to the state before snapshotting and maybe try again.
|
2020-03-08 13:36:20 +01:00
|
|
|
mem::swap(&mut gated_spans_snapshot, &mut sess.gated_spans.spans.borrow_mut());
|
2013-08-30 14:40:05 -07:00
|
|
|
}
|
2020-02-05 09:44:03 +11:00
|
|
|
drop(parser);
|
2015-08-07 16:36:56 +02:00
|
|
|
|
2019-06-05 22:04:52 +03:00
|
|
|
let (token, label) = best_failure.expect("ran no matchers");
|
|
|
|
let span = token.span.substitute_dummy(sp);
|
2019-06-08 22:38:23 +03:00
|
|
|
let mut err = cx.struct_span_err(span, &parse_failure_msg(&token));
|
2019-06-05 22:04:52 +03:00
|
|
|
err.span_label(span, label);
|
2019-11-13 13:01:43 +01:00
|
|
|
if !def_span.is_dummy() && !cx.source_map().is_imported(def_span) {
|
2020-03-09 11:42:37 -07:00
|
|
|
err.span_label(cx.source_map().guess_head_span(def_span), "when calling this macro");
|
2018-10-23 17:18:14 -07:00
|
|
|
}
|
2018-07-14 23:50:08 -07:00
|
|
|
|
|
|
|
// Check whether there's a missing comma in this macro call, like `println!("{}" a);`
|
|
|
|
if let Some((arg, comma_span)) = arg.add_comma() {
|
2019-05-29 20:21:26 +02:00
|
|
|
for lhs in lhses {
|
|
|
|
// try each arm's matchers
|
2018-07-14 23:50:08 -07:00
|
|
|
let lhs_tt = match *lhs {
|
2019-09-22 19:17:30 +03:00
|
|
|
mbe::TokenTree::Delimited(_, ref delim) => &delim.tts[..],
|
2018-08-07 22:28:09 -07:00
|
|
|
_ => continue,
|
2018-07-14 23:50:08 -07:00
|
|
|
};
|
2020-03-22 13:36:56 +01:00
|
|
|
if let Success(_) =
|
2021-06-07 20:17:48 -05:00
|
|
|
parse_tt(&mut Cow::Borrowed(&parser_from_cx(sess, arg.clone())), lhs_tt, name)
|
2020-03-22 13:36:56 +01:00
|
|
|
{
|
|
|
|
if comma_span.is_dummy() {
|
|
|
|
err.note("you might be missing a comma");
|
|
|
|
} else {
|
|
|
|
err.span_suggestion_short(
|
|
|
|
comma_span,
|
|
|
|
"missing comma here",
|
|
|
|
", ".to_string(),
|
|
|
|
Applicability::MachineApplicable,
|
|
|
|
);
|
2018-07-14 23:50:08 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
err.emit();
|
2017-09-02 17:21:13 +02:00
|
|
|
cx.trace_macros_diag();
|
|
|
|
DummyResult::any(sp)
|
2013-08-30 14:40:05 -07:00
|
|
|
}
|
|
|
|
|
2014-10-07 00:18:24 +01:00
|
|
|
// Note that macro-by-example's input is also matched against a token tree:
|
|
|
|
// $( $lhs:tt => $rhs:tt );+
|
|
|
|
//
|
|
|
|
// Holy self-referential!
|
|
|
|
|
2019-09-22 17:42:17 +03:00
|
|
|
/// Converts a macro item into a syntax extension.
|
|
|
|
pub fn compile_declarative_macro(
|
2020-07-30 11:27:50 +10:00
|
|
|
sess: &Session,
|
2019-01-14 19:14:02 -06:00
|
|
|
features: &Features,
|
|
|
|
def: &ast::Item,
|
2019-05-29 20:21:26 +02:00
|
|
|
edition: Edition,
|
2019-01-14 19:14:02 -06:00
|
|
|
) -> SyntaxExtension {
|
2020-06-20 20:59:04 -04:00
|
|
|
debug!("compile_declarative_macro: {:?}", def);
|
2020-03-17 12:54:57 +01:00
|
|
|
let mk_syn_ext = |expander| {
|
|
|
|
SyntaxExtension::new(
|
|
|
|
sess,
|
|
|
|
SyntaxExtensionKind::LegacyBang(expander),
|
|
|
|
def.span,
|
|
|
|
Vec::new(),
|
|
|
|
edition,
|
|
|
|
def.ident.name,
|
|
|
|
&def.attrs,
|
|
|
|
)
|
|
|
|
};
|
|
|
|
|
2020-07-30 11:27:50 +10:00
|
|
|
let diag = &sess.parse_sess.span_diagnostic;
|
2020-04-19 13:00:18 +02:00
|
|
|
let lhs_nm = Ident::new(sym::lhs, def.span);
|
|
|
|
let rhs_nm = Ident::new(sym::rhs, def.span);
|
2020-12-19 16:30:56 -05:00
|
|
|
let tt_spec = Some(NonterminalKind::TT);
|
2012-07-18 16:18:02 -07:00
|
|
|
|
2017-03-18 01:55:51 +00:00
|
|
|
// Parse the macro_rules! invocation
|
2020-03-14 00:52:24 +03:00
|
|
|
let (macro_rules, body) = match &def.kind {
|
|
|
|
ast::ItemKind::MacroDef(def) => (def.macro_rules, def.body.inner_tokens()),
|
2017-03-18 01:55:51 +00:00
|
|
|
_ => unreachable!(),
|
|
|
|
};
|
|
|
|
|
2013-05-29 16:21:04 -07:00
|
|
|
// The pattern that macro_rules matches.
|
2012-07-27 17:32:15 -07:00
|
|
|
// The grammar for macro_rules! is:
|
2014-10-06 23:00:56 +01:00
|
|
|
// $( $lhs:tt => $rhs:tt );+
|
2012-07-27 17:32:15 -07:00
|
|
|
// ...quasiquoting this would be nice.
|
2014-10-06 23:00:56 +01:00
|
|
|
// These spans won't matter, anyways
|
2016-06-03 20:27:15 +00:00
|
|
|
let argument_gram = vec![
|
2019-09-22 19:17:30 +03:00
|
|
|
mbe::TokenTree::Sequence(
|
2019-05-29 20:21:26 +02:00
|
|
|
DelimSpan::dummy(),
|
2019-09-22 19:17:30 +03:00
|
|
|
Lrc::new(mbe::SequenceRepetition {
|
2019-05-29 20:21:26 +02:00
|
|
|
tts: vec![
|
2019-09-22 19:17:30 +03:00
|
|
|
mbe::TokenTree::MetaVarDecl(def.span, lhs_nm, tt_spec),
|
|
|
|
mbe::TokenTree::token(token::FatArrow, def.span),
|
|
|
|
mbe::TokenTree::MetaVarDecl(def.span, rhs_nm, tt_spec),
|
2019-05-29 20:21:26 +02:00
|
|
|
],
|
|
|
|
separator: Some(Token::new(
|
2020-03-14 00:52:24 +03:00
|
|
|
if macro_rules { token::Semi } else { token::Comma },
|
2019-05-29 20:21:26 +02:00
|
|
|
def.span,
|
|
|
|
)),
|
2019-09-22 19:17:30 +03:00
|
|
|
kleene: mbe::KleeneToken::new(mbe::KleeneOp::OneOrMore, def.span),
|
2019-05-29 20:21:26 +02:00
|
|
|
num_captures: 2,
|
|
|
|
}),
|
|
|
|
),
|
2016-06-03 20:27:15 +00:00
|
|
|
// to phase into semicolon-termination instead of semicolon-separation
|
2019-09-22 19:17:30 +03:00
|
|
|
mbe::TokenTree::Sequence(
|
2019-05-29 20:21:26 +02:00
|
|
|
DelimSpan::dummy(),
|
2019-09-22 19:17:30 +03:00
|
|
|
Lrc::new(mbe::SequenceRepetition {
|
|
|
|
tts: vec![mbe::TokenTree::token(
|
2020-03-14 00:52:24 +03:00
|
|
|
if macro_rules { token::Semi } else { token::Comma },
|
2019-08-01 21:07:47 +01:00
|
|
|
def.span,
|
|
|
|
)],
|
2019-05-29 20:21:26 +02:00
|
|
|
separator: None,
|
2019-09-22 19:17:30 +03:00
|
|
|
kleene: mbe::KleeneToken::new(mbe::KleeneOp::ZeroOrMore, def.span),
|
2019-05-29 20:21:26 +02:00
|
|
|
num_captures: 0,
|
|
|
|
}),
|
|
|
|
),
|
2016-06-03 20:27:15 +00:00
|
|
|
];
|
2012-07-27 17:32:15 -07:00
|
|
|
|
2020-07-30 11:27:50 +10:00
|
|
|
let parser = Parser::new(&sess.parse_sess, body, true, rustc_parse::MACRO_ARGUMENTS);
|
2021-06-07 20:17:48 -05:00
|
|
|
let argument_map = match parse_tt(&mut Cow::Borrowed(&parser), &argument_gram, def.ident) {
|
2015-08-07 16:36:56 +02:00
|
|
|
Success(m) => m,
|
2019-06-05 01:17:07 +03:00
|
|
|
Failure(token, msg) => {
|
2019-06-08 22:38:23 +03:00
|
|
|
let s = parse_failure_msg(&token);
|
2019-06-05 01:17:07 +03:00
|
|
|
let sp = token.span.substitute_dummy(def.span);
|
2020-07-30 11:27:50 +10:00
|
|
|
sess.parse_sess.span_diagnostic.struct_span_err(sp, &s).span_label(sp, msg).emit();
|
2020-03-18 13:34:11 +01:00
|
|
|
return mk_syn_ext(Box::new(macro_rules_dummy_expander));
|
2016-10-21 12:01:06 +11:00
|
|
|
}
|
2020-03-17 12:54:57 +01:00
|
|
|
Error(sp, msg) => {
|
2020-07-30 11:27:50 +10:00
|
|
|
sess.parse_sess
|
|
|
|
.span_diagnostic
|
|
|
|
.struct_span_err(sp.substitute_dummy(def.span), &msg)
|
|
|
|
.emit();
|
2020-03-18 13:34:11 +01:00
|
|
|
return mk_syn_ext(Box::new(macro_rules_dummy_expander));
|
2015-08-07 16:36:56 +02:00
|
|
|
}
|
2020-03-17 14:13:32 +01:00
|
|
|
ErrorReported => {
|
2020-03-18 13:34:11 +01:00
|
|
|
return mk_syn_ext(Box::new(macro_rules_dummy_expander));
|
2020-03-17 14:13:32 +01:00
|
|
|
}
|
2015-08-07 16:36:56 +02:00
|
|
|
};
|
2012-07-06 18:04:28 -07:00
|
|
|
|
2015-11-14 19:50:46 +09:00
|
|
|
let mut valid = true;
|
|
|
|
|
2012-07-27 17:32:15 -07:00
|
|
|
// Extract the arguments:
|
2020-03-11 20:05:19 +00:00
|
|
|
let lhses = match argument_map[&MacroRulesNormalizedIdent::new(lhs_nm)] {
|
2019-12-12 15:48:30 +11:00
|
|
|
MatchedSeq(ref s) => s
|
2019-05-29 20:21:26 +02:00
|
|
|
.iter()
|
|
|
|
.map(|m| {
|
2017-06-08 05:51:32 -06:00
|
|
|
if let MatchedNonterminal(ref nt) = *m {
|
2016-11-02 03:03:55 +00:00
|
|
|
if let NtTT(ref tt) = **nt {
|
2020-12-28 16:57:13 -06:00
|
|
|
let tt = mbe::quoted::parse(
|
|
|
|
tt.clone().into(),
|
|
|
|
true,
|
|
|
|
&sess.parse_sess,
|
|
|
|
def.id,
|
|
|
|
features,
|
Use correct edition when parsing `:pat` matchers
As described in issue #85708, we currently do not properly decode
`SyntaxContext::root()` and `ExpnId::root()` from foreign crates. As a
result, when we decode a span from a foreign crate with
`SyntaxContext::root()`, we end up up considering it to have the edition
of the *current* crate, instead of the foreign crate where it was
originally created.
A full fix for this issue will be a fairly significant undertaking.
Fortunately, it's possible to implement a partial fix, which gives us
the correct edition-dependent behavior for `:pat` matchers when the
macro is loaded from another crate. Since we have the edition of the
macro's defining crate available, we can 'recover' from seeing a
`SyntaxContext::root()` and use the edition of the macro's defining
crate.
Any solution to issue #85708 must reproduce the behavior of this
targeted fix - properly preserving a foreign `SyntaxContext::root()`
means (among other things) preserving its edition, which by definition
is the edition of the foreign crate itself. Therefore, this fix moves us
closer to the correct overall solution, and does not expose any new
incorrect behavior to macros.
2021-05-25 22:58:42 -05:00
|
|
|
edition,
|
2020-12-28 16:57:13 -06:00
|
|
|
)
|
|
|
|
.pop()
|
|
|
|
.unwrap();
|
2021-06-23 14:07:32 +00:00
|
|
|
valid &= check_lhs_nt_follows(&sess.parse_sess, features, &def, &tt);
|
2017-01-29 08:38:44 +00:00
|
|
|
return tt;
|
2016-11-02 03:03:55 +00:00
|
|
|
}
|
2016-05-19 00:38:08 +02:00
|
|
|
}
|
2020-07-30 11:27:50 +10:00
|
|
|
sess.parse_sess.span_diagnostic.span_bug(def.span, "wrong-structured lhs")
|
2019-05-29 20:21:26 +02:00
|
|
|
})
|
2019-09-22 19:17:30 +03:00
|
|
|
.collect::<Vec<mbe::TokenTree>>(),
|
2020-07-30 11:27:50 +10:00
|
|
|
_ => sess.parse_sess.span_diagnostic.span_bug(def.span, "wrong-structured lhs"),
|
2012-07-06 18:04:28 -07:00
|
|
|
};
|
2013-02-17 10:59:09 -08:00
|
|
|
|
2020-03-11 20:05:19 +00:00
|
|
|
let rhses = match argument_map[&MacroRulesNormalizedIdent::new(rhs_nm)] {
|
2019-12-12 15:48:30 +11:00
|
|
|
MatchedSeq(ref s) => s
|
2019-05-29 20:21:26 +02:00
|
|
|
.iter()
|
|
|
|
.map(|m| {
|
2017-06-08 05:51:32 -06:00
|
|
|
if let MatchedNonterminal(ref nt) = *m {
|
2016-11-02 03:03:55 +00:00
|
|
|
if let NtTT(ref tt) = **nt {
|
2020-07-30 11:27:50 +10:00
|
|
|
return mbe::quoted::parse(
|
|
|
|
tt.clone().into(),
|
|
|
|
false,
|
|
|
|
&sess.parse_sess,
|
|
|
|
def.id,
|
2020-12-28 16:57:13 -06:00
|
|
|
features,
|
Use correct edition when parsing `:pat` matchers
As described in issue #85708, we currently do not properly decode
`SyntaxContext::root()` and `ExpnId::root()` from foreign crates. As a
result, when we decode a span from a foreign crate with
`SyntaxContext::root()`, we end up up considering it to have the edition
of the *current* crate, instead of the foreign crate where it was
originally created.
A full fix for this issue will be a fairly significant undertaking.
Fortunately, it's possible to implement a partial fix, which gives us
the correct edition-dependent behavior for `:pat` matchers when the
macro is loaded from another crate. Since we have the edition of the
macro's defining crate available, we can 'recover' from seeing a
`SyntaxContext::root()` and use the edition of the macro's defining
crate.
Any solution to issue #85708 must reproduce the behavior of this
targeted fix - properly preserving a foreign `SyntaxContext::root()`
means (among other things) preserving its edition, which by definition
is the edition of the foreign crate itself. Therefore, this fix moves us
closer to the correct overall solution, and does not expose any new
incorrect behavior to macros.
2021-05-25 22:58:42 -05:00
|
|
|
edition,
|
2020-07-30 11:27:50 +10:00
|
|
|
)
|
|
|
|
.pop()
|
|
|
|
.unwrap();
|
2016-11-02 03:03:55 +00:00
|
|
|
}
|
|
|
|
}
|
2020-07-30 11:27:50 +10:00
|
|
|
sess.parse_sess.span_diagnostic.span_bug(def.span, "wrong-structured lhs")
|
2019-05-29 20:21:26 +02:00
|
|
|
})
|
2019-09-22 19:17:30 +03:00
|
|
|
.collect::<Vec<mbe::TokenTree>>(),
|
2020-07-30 11:27:50 +10:00
|
|
|
_ => sess.parse_sess.span_diagnostic.span_bug(def.span, "wrong-structured rhs"),
|
2012-07-06 18:04:28 -07:00
|
|
|
};
|
|
|
|
|
2015-11-14 19:50:46 +09:00
|
|
|
for rhs in &rhses {
|
2020-07-30 11:27:50 +10:00
|
|
|
valid &= check_rhs(&sess.parse_sess, rhs);
|
2015-11-14 19:50:46 +09:00
|
|
|
}
|
|
|
|
|
2016-09-25 18:55:04 +02:00
|
|
|
// don't abort iteration early, so that errors for multiple lhses can be reported
|
|
|
|
for lhs in &lhses {
|
2020-07-30 11:27:50 +10:00
|
|
|
valid &= check_lhs_no_empty_seq(&sess.parse_sess, slice::from_ref(lhs));
|
2016-09-25 18:55:04 +02:00
|
|
|
}
|
|
|
|
|
2020-07-30 11:27:50 +10:00
|
|
|
valid &= macro_check::check_meta_variables(&sess.parse_sess, def.id, def.span, &lhses, &rhses);
|
2019-05-30 12:53:27 +02:00
|
|
|
|
2020-07-30 11:27:50 +10:00
|
|
|
let (transparency, transparency_error) = attr::find_transparency(sess, &def.attrs, macro_rules);
|
2019-06-30 01:12:04 +03:00
|
|
|
match transparency_error {
|
2019-12-22 17:42:04 -05:00
|
|
|
Some(TransparencyError::UnknownTransparency(value, span)) => {
|
|
|
|
diag.span_err(span, &format!("unknown macro transparency: `{}`", value))
|
|
|
|
}
|
|
|
|
Some(TransparencyError::MultipleTransparencyAttrs(old_span, new_span)) => {
|
|
|
|
diag.span_err(vec![old_span, new_span], "multiple macro transparency attributes")
|
|
|
|
}
|
2019-06-30 01:12:04 +03:00
|
|
|
None => {}
|
|
|
|
}
|
2019-06-07 00:37:47 +03:00
|
|
|
|
2020-03-18 13:34:11 +01:00
|
|
|
mk_syn_ext(Box::new(MacroRulesMacroExpander {
|
2019-12-22 17:42:04 -05:00
|
|
|
name: def.ident,
|
|
|
|
span: def.span,
|
|
|
|
transparency,
|
|
|
|
lhses,
|
|
|
|
rhses,
|
|
|
|
valid,
|
2020-03-18 13:34:11 +01:00
|
|
|
}))
|
2012-10-15 14:56:42 -07:00
|
|
|
}
|
2015-01-02 16:41:24 -05:00
|
|
|
|
2019-05-29 20:21:26 +02:00
|
|
|
fn check_lhs_nt_follows(
|
|
|
|
sess: &ParseSess,
|
|
|
|
features: &Features,
|
2021-06-23 14:07:32 +00:00
|
|
|
def: &ast::Item,
|
2019-09-22 19:17:30 +03:00
|
|
|
lhs: &mbe::TokenTree,
|
2019-05-29 20:21:26 +02:00
|
|
|
) -> bool {
|
2015-11-14 19:11:40 +09:00
|
|
|
// lhs is going to be like TokenTree::Delimited(...), where the
|
2015-11-06 14:52:02 +01:00
|
|
|
// entire lhs is those tts. Or, it can be a "bare sequence", not wrapped in parens.
|
2019-09-22 19:17:30 +03:00
|
|
|
if let mbe::TokenTree::Delimited(_, ref tts) = *lhs {
|
2021-06-23 14:07:32 +00:00
|
|
|
check_matcher(sess, features, def, &tts.tts)
|
2017-05-12 20:05:39 +02:00
|
|
|
} else {
|
|
|
|
let msg = "invalid macro matcher; matchers must be contained in balanced delimiters";
|
|
|
|
sess.span_diagnostic.span_err(lhs.span(), msg);
|
|
|
|
false
|
2016-05-18 15:08:19 +02:00
|
|
|
}
|
2015-01-02 16:41:24 -05:00
|
|
|
// we don't abort on errors on rejection, the driver will do that for us
|
|
|
|
// after parsing/expansion. we can report every error in every macro this way.
|
|
|
|
}
|
|
|
|
|
2019-02-08 14:53:55 +01:00
|
|
|
/// Checks that the lhs contains no repetition which could match an empty token
|
2016-09-25 18:55:04 +02:00
|
|
|
/// tree, because then the matcher would hang indefinitely.
|
2019-09-22 19:17:30 +03:00
|
|
|
fn check_lhs_no_empty_seq(sess: &ParseSess, tts: &[mbe::TokenTree]) -> bool {
|
|
|
|
use mbe::TokenTree;
|
2016-09-25 18:55:04 +02:00
|
|
|
for tt in tts {
|
|
|
|
match *tt {
|
2017-03-28 05:32:43 +00:00
|
|
|
TokenTree::Token(..) | TokenTree::MetaVar(..) | TokenTree::MetaVarDecl(..) => (),
|
2019-05-29 20:21:26 +02:00
|
|
|
TokenTree::Delimited(_, ref del) => {
|
|
|
|
if !check_lhs_no_empty_seq(sess, &del.tts) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2016-09-25 18:55:04 +02:00
|
|
|
TokenTree::Sequence(span, ref seq) => {
|
2019-05-29 20:21:26 +02:00
|
|
|
if seq.separator.is_none()
|
|
|
|
&& seq.tts.iter().all(|seq_tt| match *seq_tt {
|
2020-12-19 16:30:56 -05:00
|
|
|
TokenTree::MetaVarDecl(_, _, Some(NonterminalKind::Vis)) => true,
|
2019-05-29 20:21:26 +02:00
|
|
|
TokenTree::Sequence(_, ref sub_seq) => {
|
2019-09-22 19:17:30 +03:00
|
|
|
sub_seq.kleene.op == mbe::KleeneOp::ZeroOrMore
|
|
|
|
|| sub_seq.kleene.op == mbe::KleeneOp::ZeroOrOne
|
2019-05-29 20:21:26 +02:00
|
|
|
}
|
2018-07-02 19:44:01 -05:00
|
|
|
_ => false,
|
2019-05-29 20:21:26 +02:00
|
|
|
})
|
|
|
|
{
|
2018-09-08 18:07:02 -07:00
|
|
|
let sp = span.entire();
|
|
|
|
sess.span_diagnostic.span_err(sp, "repetition matches empty token tree");
|
2017-05-12 20:05:39 +02:00
|
|
|
return false;
|
2016-09-25 18:55:04 +02:00
|
|
|
}
|
|
|
|
if !check_lhs_no_empty_seq(sess, &seq.tts) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
true
|
|
|
|
}
|
|
|
|
|
2019-09-22 19:17:30 +03:00
|
|
|
fn check_rhs(sess: &ParseSess, rhs: &mbe::TokenTree) -> bool {
|
2015-11-14 19:50:46 +09:00
|
|
|
match *rhs {
|
2019-09-22 19:17:30 +03:00
|
|
|
mbe::TokenTree::Delimited(..) => return true,
|
2019-05-29 20:21:26 +02:00
|
|
|
_ => sess.span_diagnostic.span_err(rhs.span(), "macro rhs must be delimited"),
|
2015-11-14 19:50:46 +09:00
|
|
|
}
|
|
|
|
false
|
|
|
|
}
|
|
|
|
|
2019-05-29 20:21:26 +02:00
|
|
|
fn check_matcher(
|
|
|
|
sess: &ParseSess,
|
|
|
|
features: &Features,
|
2021-06-23 14:07:32 +00:00
|
|
|
def: &ast::Item,
|
2019-09-22 19:17:30 +03:00
|
|
|
matcher: &[mbe::TokenTree],
|
2019-05-29 20:21:26 +02:00
|
|
|
) -> bool {
|
2015-11-12 20:55:28 +01:00
|
|
|
let first_sets = FirstSets::new(matcher);
|
|
|
|
let empty_suffix = TokenSet::empty();
|
2016-09-20 20:29:13 +00:00
|
|
|
let err = sess.span_diagnostic.err_count();
|
2021-06-23 14:07:32 +00:00
|
|
|
check_matcher_core(sess, features, def, &first_sets, matcher, &empty_suffix);
|
2016-09-20 20:29:13 +00:00
|
|
|
err == sess.span_diagnostic.err_count()
|
2015-11-12 20:55:28 +01:00
|
|
|
}
|
|
|
|
|
2018-11-27 02:59:49 +00:00
|
|
|
// `The FirstSets` for a matcher is a mapping from subsequences in the
|
2015-11-12 20:55:28 +01:00
|
|
|
// matcher to the FIRST set for that subsequence.
|
|
|
|
//
|
|
|
|
// This mapping is partially precomputed via a backwards scan over the
|
|
|
|
// token trees of the matcher, which provides a mapping from each
|
2018-11-27 02:59:49 +00:00
|
|
|
// repetition sequence to its *first* set.
|
2015-11-12 20:55:28 +01:00
|
|
|
//
|
2018-11-27 02:59:49 +00:00
|
|
|
// (Hypothetically, sequences should be uniquely identifiable via their
|
|
|
|
// spans, though perhaps that is false, e.g., for macro-generated macros
|
2015-11-12 20:55:28 +01:00
|
|
|
// that do not try to inject artificial span information. My plan is
|
|
|
|
// to try to catch such cases ahead of time and not include them in
|
|
|
|
// the precomputed mapping.)
|
|
|
|
struct FirstSets {
|
|
|
|
// this maps each TokenTree::Sequence `$(tt ...) SEP OP` that is uniquely identified by its
|
|
|
|
// span in the original matcher to the First set for the inner sequence `tt ...`.
|
|
|
|
//
|
|
|
|
// If two sequences have the same span in a matcher, then map that
|
|
|
|
// span to None (invalidating the mapping here and forcing the code to
|
|
|
|
// use a slow path).
|
2018-08-18 13:55:43 +03:00
|
|
|
first: FxHashMap<Span, Option<TokenSet>>,
|
2015-11-12 20:55:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
impl FirstSets {
|
2019-09-22 19:17:30 +03:00
|
|
|
fn new(tts: &[mbe::TokenTree]) -> FirstSets {
|
|
|
|
use mbe::TokenTree;
|
2017-01-29 08:38:44 +00:00
|
|
|
|
2018-08-18 13:55:43 +03:00
|
|
|
let mut sets = FirstSets { first: FxHashMap::default() };
|
2015-11-12 20:55:28 +01:00
|
|
|
build_recur(&mut sets, tts);
|
|
|
|
return sets;
|
|
|
|
|
|
|
|
// walks backward over `tts`, returning the FIRST for `tts`
|
|
|
|
// and updating `sets` at the same time for all sequence
|
|
|
|
// substructure we find within `tts`.
|
|
|
|
fn build_recur(sets: &mut FirstSets, tts: &[TokenTree]) -> TokenSet {
|
|
|
|
let mut first = TokenSet::empty();
|
|
|
|
for tt in tts.iter().rev() {
|
|
|
|
match *tt {
|
2017-03-28 05:32:43 +00:00
|
|
|
TokenTree::Token(..) | TokenTree::MetaVar(..) | TokenTree::MetaVarDecl(..) => {
|
2017-01-30 23:48:14 +00:00
|
|
|
first.replace_with(tt.clone());
|
2015-11-12 20:55:28 +01:00
|
|
|
}
|
2017-01-23 04:58:15 +00:00
|
|
|
TokenTree::Delimited(span, ref delimited) => {
|
2015-11-12 20:55:28 +01:00
|
|
|
build_recur(sets, &delimited.tts[..]);
|
2019-11-03 14:58:01 +03:00
|
|
|
first.replace_with(delimited.open_tt(span));
|
2015-11-12 20:55:28 +01:00
|
|
|
}
|
|
|
|
TokenTree::Sequence(sp, ref seq_rep) => {
|
|
|
|
let subfirst = build_recur(sets, &seq_rep.tts[..]);
|
|
|
|
|
2018-09-08 18:07:02 -07:00
|
|
|
match sets.first.entry(sp.entire()) {
|
2015-11-12 20:55:28 +01:00
|
|
|
Entry::Vacant(vac) => {
|
|
|
|
vac.insert(Some(subfirst.clone()));
|
|
|
|
}
|
|
|
|
Entry::Occupied(mut occ) => {
|
|
|
|
// if there is already an entry, then a span must have collided.
|
|
|
|
// This should not happen with typical macro_rules macros,
|
|
|
|
// but syntax extensions need not maintain distinct spans,
|
|
|
|
// so distinct syntax trees can be assigned the same span.
|
|
|
|
// In such a case, the map cannot be trusted; so mark this
|
|
|
|
// entry as unusable.
|
|
|
|
occ.insert(None);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the sequence contents can be empty, then the first
|
|
|
|
// token could be the separator token itself.
|
|
|
|
|
2019-06-08 20:20:00 +03:00
|
|
|
if let (Some(sep), true) = (&seq_rep.separator, subfirst.maybe_empty) {
|
|
|
|
first.add_one_maybe(TokenTree::Token(sep.clone()));
|
2015-11-12 20:55:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Reverse scan: Sequence comes before `first`.
|
2019-01-14 15:50:33 -06:00
|
|
|
if subfirst.maybe_empty
|
2019-09-22 19:17:30 +03:00
|
|
|
|| seq_rep.kleene.op == mbe::KleeneOp::ZeroOrMore
|
|
|
|
|| seq_rep.kleene.op == mbe::KleeneOp::ZeroOrOne
|
2019-01-14 15:50:33 -06:00
|
|
|
{
|
2015-11-12 20:55:28 +01:00
|
|
|
// If sequence is potentially empty, then
|
|
|
|
// union them (preserving first emptiness).
|
2018-07-02 19:44:01 -05:00
|
|
|
first.add_all(&TokenSet { maybe_empty: true, ..subfirst });
|
2015-11-12 20:55:28 +01:00
|
|
|
} else {
|
|
|
|
// Otherwise, sequence guaranteed
|
|
|
|
// non-empty; replace first.
|
|
|
|
first = subfirst;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-12 20:05:39 +02:00
|
|
|
first
|
2015-11-12 20:55:28 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// walks forward over `tts` until all potential FIRST tokens are
|
|
|
|
// identified.
|
2019-09-22 19:17:30 +03:00
|
|
|
fn first(&self, tts: &[mbe::TokenTree]) -> TokenSet {
|
|
|
|
use mbe::TokenTree;
|
2017-01-29 08:38:44 +00:00
|
|
|
|
2015-11-12 20:55:28 +01:00
|
|
|
let mut first = TokenSet::empty();
|
|
|
|
for tt in tts.iter() {
|
|
|
|
assert!(first.maybe_empty);
|
|
|
|
match *tt {
|
2017-03-28 05:32:43 +00:00
|
|
|
TokenTree::Token(..) | TokenTree::MetaVar(..) | TokenTree::MetaVarDecl(..) => {
|
2017-01-30 23:48:14 +00:00
|
|
|
first.add_one(tt.clone());
|
2015-11-12 20:55:28 +01:00
|
|
|
return first;
|
|
|
|
}
|
2017-01-23 04:58:15 +00:00
|
|
|
TokenTree::Delimited(span, ref delimited) => {
|
2019-11-03 14:58:01 +03:00
|
|
|
first.add_one(delimited.open_tt(span));
|
2015-11-12 20:55:28 +01:00
|
|
|
return first;
|
|
|
|
}
|
|
|
|
TokenTree::Sequence(sp, ref seq_rep) => {
|
2019-07-24 23:32:26 +02:00
|
|
|
let subfirst_owned;
|
|
|
|
let subfirst = match self.first.get(&sp.entire()) {
|
|
|
|
Some(&Some(ref subfirst)) => subfirst,
|
2015-11-12 20:55:28 +01:00
|
|
|
Some(&None) => {
|
2019-07-24 23:32:26 +02:00
|
|
|
subfirst_owned = self.first(&seq_rep.tts[..]);
|
|
|
|
&subfirst_owned
|
2015-11-12 20:55:28 +01:00
|
|
|
}
|
|
|
|
None => {
|
|
|
|
panic!("We missed a sequence during FirstSets construction");
|
|
|
|
}
|
2019-07-24 23:32:26 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
// If the sequence contents can be empty, then the first
|
|
|
|
// token could be the separator token itself.
|
|
|
|
if let (Some(sep), true) = (&seq_rep.separator, subfirst.maybe_empty) {
|
|
|
|
first.add_one_maybe(TokenTree::Token(sep.clone()));
|
|
|
|
}
|
|
|
|
|
|
|
|
assert!(first.maybe_empty);
|
|
|
|
first.add_all(subfirst);
|
|
|
|
if subfirst.maybe_empty
|
2019-09-22 19:17:30 +03:00
|
|
|
|| seq_rep.kleene.op == mbe::KleeneOp::ZeroOrMore
|
|
|
|
|| seq_rep.kleene.op == mbe::KleeneOp::ZeroOrOne
|
2019-07-24 23:32:26 +02:00
|
|
|
{
|
|
|
|
// Continue scanning for more first
|
|
|
|
// tokens, but also make sure we
|
|
|
|
// restore empty-tracking state.
|
|
|
|
first.maybe_empty = true;
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
return first;
|
2015-11-12 20:55:28 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// we only exit the loop if `tts` was empty or if every
|
|
|
|
// element of `tts` matches the empty sequence.
|
|
|
|
assert!(first.maybe_empty);
|
2017-05-12 20:05:39 +02:00
|
|
|
first
|
2015-11-12 20:55:28 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-22 19:17:30 +03:00
|
|
|
// A set of `mbe::TokenTree`s, which may include `TokenTree::Match`s
|
2017-01-30 23:48:14 +00:00
|
|
|
// (for macro-by-example syntactic variables). It also carries the
|
2015-11-12 20:55:28 +01:00
|
|
|
// `maybe_empty` flag; that is true if and only if the matcher can
|
|
|
|
// match an empty token sequence.
|
|
|
|
//
|
|
|
|
// The First set is computed on submatchers like `$($a:expr b),* $(c)* d`,
|
|
|
|
// which has corresponding FIRST = {$a:expr, c, d}.
|
|
|
|
// Likewise, `$($a:expr b),* $(c)+ d` has FIRST = {$a:expr, c}.
|
|
|
|
//
|
|
|
|
// (Notably, we must allow for *-op to occur zero times.)
|
|
|
|
#[derive(Clone, Debug)]
|
|
|
|
struct TokenSet {
|
2019-09-22 19:17:30 +03:00
|
|
|
tokens: Vec<mbe::TokenTree>,
|
2015-11-12 20:55:28 +01:00
|
|
|
maybe_empty: bool,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl TokenSet {
|
|
|
|
// Returns a set for the empty sequence.
|
2019-05-29 20:21:26 +02:00
|
|
|
fn empty() -> Self {
|
|
|
|
TokenSet { tokens: Vec::new(), maybe_empty: true }
|
|
|
|
}
|
2015-11-12 20:55:28 +01:00
|
|
|
|
|
|
|
// Returns the set `{ tok }` for the single-token (and thus
|
|
|
|
// non-empty) sequence [tok].
|
2019-09-22 19:17:30 +03:00
|
|
|
fn singleton(tok: mbe::TokenTree) -> Self {
|
2018-07-02 19:44:01 -05:00
|
|
|
TokenSet { tokens: vec![tok], maybe_empty: false }
|
2015-11-12 20:55:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Changes self to be the set `{ tok }`.
|
|
|
|
// Since `tok` is always present, marks self as non-empty.
|
2019-09-22 19:17:30 +03:00
|
|
|
fn replace_with(&mut self, tok: mbe::TokenTree) {
|
2015-11-12 20:55:28 +01:00
|
|
|
self.tokens.clear();
|
|
|
|
self.tokens.push(tok);
|
|
|
|
self.maybe_empty = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Changes self to be the empty set `{}`; meant for use when
|
|
|
|
// the particular token does not matter, but we want to
|
|
|
|
// record that it occurs.
|
|
|
|
fn replace_with_irrelevant(&mut self) {
|
|
|
|
self.tokens.clear();
|
|
|
|
self.maybe_empty = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Adds `tok` to the set for `self`, marking sequence as non-empy.
|
2019-09-22 19:17:30 +03:00
|
|
|
fn add_one(&mut self, tok: mbe::TokenTree) {
|
2015-11-12 20:55:28 +01:00
|
|
|
if !self.tokens.contains(&tok) {
|
|
|
|
self.tokens.push(tok);
|
|
|
|
}
|
|
|
|
self.maybe_empty = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Adds `tok` to the set for `self`. (Leaves `maybe_empty` flag alone.)
|
2019-09-22 19:17:30 +03:00
|
|
|
fn add_one_maybe(&mut self, tok: mbe::TokenTree) {
|
2015-11-12 20:55:28 +01:00
|
|
|
if !self.tokens.contains(&tok) {
|
|
|
|
self.tokens.push(tok);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Adds all elements of `other` to this.
|
|
|
|
//
|
|
|
|
// (Since this is a set, we filter out duplicates.)
|
|
|
|
//
|
|
|
|
// If `other` is potentially empty, then preserves the previous
|
|
|
|
// setting of the empty flag of `self`. If `other` is guaranteed
|
|
|
|
// non-empty, then `self` is marked non-empty.
|
|
|
|
fn add_all(&mut self, other: &Self) {
|
|
|
|
for tok in &other.tokens {
|
|
|
|
if !self.tokens.contains(tok) {
|
|
|
|
self.tokens.push(tok.clone());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !other.maybe_empty {
|
|
|
|
self.maybe_empty = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Checks that `matcher` is internally consistent and that it
|
2019-07-24 23:21:25 +02:00
|
|
|
// can legally be followed by a token `N`, for all `N` in `follow`.
|
2015-11-12 20:55:28 +01:00
|
|
|
// (If `follow` is empty, then it imposes no constraint on
|
|
|
|
// the `matcher`.)
|
|
|
|
//
|
|
|
|
// Returns the set of NT tokens that could possibly come last in
|
|
|
|
// `matcher`. (If `matcher` matches the empty sequence, then
|
|
|
|
// `maybe_empty` will be set to true.)
|
|
|
|
//
|
|
|
|
// Requires that `first_sets` is pre-computed for `matcher`;
|
|
|
|
// see `FirstSets::new`.
|
2019-05-29 20:21:26 +02:00
|
|
|
fn check_matcher_core(
|
|
|
|
sess: &ParseSess,
|
|
|
|
features: &Features,
|
2021-06-23 14:07:32 +00:00
|
|
|
def: &ast::Item,
|
2019-05-29 20:21:26 +02:00
|
|
|
first_sets: &FirstSets,
|
2019-09-22 19:17:30 +03:00
|
|
|
matcher: &[mbe::TokenTree],
|
2019-05-29 20:21:26 +02:00
|
|
|
follow: &TokenSet,
|
|
|
|
) -> TokenSet {
|
2019-09-22 19:17:30 +03:00
|
|
|
use mbe::TokenTree;
|
2015-11-12 20:55:28 +01:00
|
|
|
|
|
|
|
let mut last = TokenSet::empty();
|
|
|
|
|
|
|
|
// 2. For each token and suffix [T, SUFFIX] in M:
|
|
|
|
// ensure that T can be followed by SUFFIX, and if SUFFIX may be empty,
|
|
|
|
// then ensure T can also be followed by any element of FOLLOW.
|
|
|
|
'each_token: for i in 0..matcher.len() {
|
|
|
|
let token = &matcher[i];
|
2019-05-29 20:21:26 +02:00
|
|
|
let suffix = &matcher[i + 1..];
|
2015-11-12 20:55:28 +01:00
|
|
|
|
|
|
|
let build_suffix_first = || {
|
|
|
|
let mut s = first_sets.first(suffix);
|
2019-05-29 20:21:26 +02:00
|
|
|
if s.maybe_empty {
|
|
|
|
s.add_all(follow);
|
|
|
|
}
|
2017-05-12 20:05:39 +02:00
|
|
|
s
|
2015-11-12 20:55:28 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
// (we build `suffix_first` on demand below; you can tell
|
|
|
|
// which cases are supposed to fall through by looking for the
|
|
|
|
// initialization of this variable.)
|
|
|
|
let suffix_first;
|
|
|
|
|
|
|
|
// First, update `last` so that it corresponds to the set
|
|
|
|
// of NT tokens that might end the sequence `... token`.
|
|
|
|
match *token {
|
2017-03-28 05:32:43 +00:00
|
|
|
TokenTree::Token(..) | TokenTree::MetaVar(..) | TokenTree::MetaVarDecl(..) => {
|
2020-08-01 17:45:17 +02:00
|
|
|
if token_can_be_followed_by_any(token) {
|
2015-11-12 20:55:28 +01:00
|
|
|
// don't need to track tokens that work with any,
|
|
|
|
last.replace_with_irrelevant();
|
|
|
|
// ... and don't need to check tokens that can be
|
|
|
|
// followed by anything against SUFFIX.
|
|
|
|
continue 'each_token;
|
|
|
|
} else {
|
2017-01-30 23:48:14 +00:00
|
|
|
last.replace_with(token.clone());
|
2015-11-12 20:55:28 +01:00
|
|
|
suffix_first = build_suffix_first();
|
|
|
|
}
|
|
|
|
}
|
2017-01-23 04:58:15 +00:00
|
|
|
TokenTree::Delimited(span, ref d) => {
|
2019-11-03 14:58:01 +03:00
|
|
|
let my_suffix = TokenSet::singleton(d.close_tt(span));
|
2021-06-23 14:07:32 +00:00
|
|
|
check_matcher_core(sess, features, def, first_sets, &d.tts, &my_suffix);
|
2015-11-12 20:55:28 +01:00
|
|
|
// don't track non NT tokens
|
|
|
|
last.replace_with_irrelevant();
|
|
|
|
|
|
|
|
// also, we don't need to check delimited sequences
|
|
|
|
// against SUFFIX
|
|
|
|
continue 'each_token;
|
|
|
|
}
|
2019-06-08 20:20:00 +03:00
|
|
|
TokenTree::Sequence(_, ref seq_rep) => {
|
2015-11-12 20:55:28 +01:00
|
|
|
suffix_first = build_suffix_first();
|
|
|
|
// The trick here: when we check the interior, we want
|
|
|
|
// to include the separator (if any) as a potential
|
|
|
|
// (but not guaranteed) element of FOLLOW. So in that
|
|
|
|
// case, we make a temp copy of suffix and stuff
|
|
|
|
// delimiter in there.
|
|
|
|
//
|
|
|
|
// FIXME: Should I first scan suffix_first to see if
|
|
|
|
// delimiter is already in it before I go through the
|
|
|
|
// work of cloning it? But then again, this way I may
|
|
|
|
// get a "tighter" span?
|
|
|
|
let mut new;
|
2019-06-08 20:20:00 +03:00
|
|
|
let my_suffix = if let Some(sep) = &seq_rep.separator {
|
2015-11-12 20:55:28 +01:00
|
|
|
new = suffix_first.clone();
|
2019-06-08 20:20:00 +03:00
|
|
|
new.add_one_maybe(TokenTree::Token(sep.clone()));
|
2015-11-12 20:55:28 +01:00
|
|
|
&new
|
|
|
|
} else {
|
|
|
|
&suffix_first
|
|
|
|
};
|
|
|
|
|
|
|
|
// At this point, `suffix_first` is built, and
|
|
|
|
// `my_suffix` is some TokenSet that we can use
|
|
|
|
// for checking the interior of `seq_rep`.
|
2019-05-29 20:21:26 +02:00
|
|
|
let next =
|
2021-06-23 14:07:32 +00:00
|
|
|
check_matcher_core(sess, features, def, first_sets, &seq_rep.tts, my_suffix);
|
2015-11-12 20:55:28 +01:00
|
|
|
if next.maybe_empty {
|
|
|
|
last.add_all(&next);
|
|
|
|
} else {
|
|
|
|
last = next;
|
|
|
|
}
|
|
|
|
|
|
|
|
// the recursive call to check_matcher_core already ran the 'each_last
|
|
|
|
// check below, so we can just keep going forward here.
|
|
|
|
continue 'each_token;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// (`suffix_first` guaranteed initialized once reaching here.)
|
|
|
|
|
|
|
|
// Now `last` holds the complete set of NT tokens that could
|
|
|
|
// end the sequence before SUFFIX. Check that every one works with `suffix`.
|
2020-08-01 17:45:17 +02:00
|
|
|
for token in &last.tokens {
|
2021-03-25 21:42:21 +08:00
|
|
|
if let TokenTree::MetaVarDecl(span, name, Some(kind)) = *token {
|
2017-01-30 23:48:14 +00:00
|
|
|
for next_token in &suffix_first.tokens {
|
2021-06-23 14:07:32 +00:00
|
|
|
// Check if the old pat is used and the next token is `|`
|
|
|
|
// to warn about incompatibility with Rust 2021.
|
|
|
|
// We only emit this lint if we're parsing the original
|
|
|
|
// definition of this macro_rules, not while (re)parsing
|
|
|
|
// the macro when compiling another crate that is using the
|
|
|
|
// macro. (See #86567.)
|
|
|
|
// Macros defined in the current crate have a real node id,
|
|
|
|
// whereas macros from an external crate have a dummy id.
|
|
|
|
if def.id != DUMMY_NODE_ID
|
|
|
|
&& matches!(kind, NonterminalKind::PatParam { inferred: true })
|
|
|
|
&& matches!(next_token, TokenTree::Token(token) if token.kind == BinOp(token::BinOpToken::Or))
|
|
|
|
{
|
|
|
|
// It is suggestion to use pat_param, for example: $x:pat -> $x:pat_param.
|
|
|
|
let suggestion = quoted_tt_to_string(&TokenTree::MetaVarDecl(
|
|
|
|
span,
|
|
|
|
name,
|
|
|
|
Some(NonterminalKind::PatParam { inferred: false }),
|
|
|
|
));
|
|
|
|
sess.buffer_lint_with_diagnostic(
|
|
|
|
&OR_PATTERNS_BACK_COMPAT,
|
|
|
|
span,
|
|
|
|
ast::CRATE_NODE_ID,
|
|
|
|
"the meaning of the `pat` fragment specifier is changing in Rust 2021, which may affect this macro",
|
|
|
|
BuiltinLintDiagnostics::OrPatternsBackCompat(span, suggestion),
|
|
|
|
);
|
2021-03-25 21:42:21 +08:00
|
|
|
}
|
2020-08-01 17:45:17 +02:00
|
|
|
match is_in_follow(next_token, kind) {
|
2018-10-23 21:37:32 -07:00
|
|
|
IsInFollow::Yes => {}
|
2019-06-08 09:02:15 +10:00
|
|
|
IsInFollow::No(possible) => {
|
2019-05-29 20:21:26 +02:00
|
|
|
let may_be = if last.tokens.len() == 1 && suffix_first.tokens.len() == 1
|
2015-11-12 20:55:28 +01:00
|
|
|
{
|
|
|
|
"is"
|
|
|
|
} else {
|
|
|
|
"may be"
|
|
|
|
};
|
|
|
|
|
2018-10-23 21:37:32 -07:00
|
|
|
let sp = next_token.span();
|
|
|
|
let mut err = sess.span_diagnostic.struct_span_err(
|
|
|
|
sp,
|
2019-05-29 20:21:26 +02:00
|
|
|
&format!(
|
|
|
|
"`${name}:{frag}` {may_be} followed by `{next}`, which \
|
|
|
|
is not allowed for `{frag}` fragments",
|
|
|
|
name = name,
|
2020-08-01 17:45:17 +02:00
|
|
|
frag = kind,
|
2019-05-29 20:21:26 +02:00
|
|
|
next = quoted_tt_to_string(next_token),
|
|
|
|
may_be = may_be
|
|
|
|
),
|
2016-05-18 15:08:19 +02:00
|
|
|
);
|
2020-08-01 17:45:17 +02:00
|
|
|
err.span_label(sp, format!("not allowed after `{}` fragments", kind));
|
2018-10-23 21:37:32 -07:00
|
|
|
let msg = "allowed there are: ";
|
2019-06-08 09:02:15 +10:00
|
|
|
match possible {
|
2018-10-23 21:37:32 -07:00
|
|
|
&[] => {}
|
|
|
|
&[t] => {
|
|
|
|
err.note(&format!(
|
|
|
|
"only {} is allowed after `{}` fragments",
|
2020-08-01 17:45:17 +02:00
|
|
|
t, kind,
|
2018-10-23 21:37:32 -07:00
|
|
|
));
|
|
|
|
}
|
|
|
|
ts => {
|
|
|
|
err.note(&format!(
|
|
|
|
"{}{} or {}",
|
|
|
|
msg,
|
2019-05-29 20:21:26 +02:00
|
|
|
ts[..ts.len() - 1]
|
|
|
|
.iter()
|
2020-02-29 13:14:52 +01:00
|
|
|
.copied()
|
2019-05-29 20:21:26 +02:00
|
|
|
.collect::<Vec<_>>()
|
|
|
|
.join(", "),
|
2018-10-23 21:37:32 -07:00
|
|
|
ts[ts.len() - 1],
|
|
|
|
));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
err.emit();
|
2015-11-12 20:55:28 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
last
|
|
|
|
}
|
|
|
|
|
2019-09-22 19:17:30 +03:00
|
|
|
fn token_can_be_followed_by_any(tok: &mbe::TokenTree) -> bool {
|
2020-12-19 16:30:56 -05:00
|
|
|
if let mbe::TokenTree::MetaVarDecl(_, _, Some(kind)) = *tok {
|
2020-08-01 17:45:17 +02:00
|
|
|
frag_can_be_followed_by_any(kind)
|
2015-11-12 20:55:28 +01:00
|
|
|
} else {
|
2020-03-06 12:13:55 +01:00
|
|
|
// (Non NT's can always be followed by anything in matchers.)
|
2015-11-12 20:55:28 +01:00
|
|
|
true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-08 14:53:55 +01:00
|
|
|
/// Returns `true` if a fragment of type `frag` can be followed by any sort of
|
|
|
|
/// token. We use this (among other things) as a useful approximation
|
2015-11-12 20:55:28 +01:00
|
|
|
/// for when `frag` can be followed by a repetition like `$(...)*` or
|
|
|
|
/// `$(...)+`. In general, these can be a bit tricky to reason about,
|
|
|
|
/// so we adopt a conservative position that says that any fragment
|
|
|
|
/// specifier which consumes at most one token tree can be followed by
|
|
|
|
/// a fragment specifier (indeed, these fragments can be followed by
|
|
|
|
/// ANYTHING without fear of future compatibility hazards).
|
2020-08-01 17:45:17 +02:00
|
|
|
fn frag_can_be_followed_by_any(kind: NonterminalKind) -> bool {
|
2020-10-26 20:02:06 -04:00
|
|
|
matches!(
|
|
|
|
kind,
|
2020-08-01 17:45:17 +02:00
|
|
|
NonterminalKind::Item // always terminated by `}` or `;`
|
|
|
|
| NonterminalKind::Block // exactly one token tree
|
|
|
|
| NonterminalKind::Ident // exactly one token tree
|
|
|
|
| NonterminalKind::Literal // exactly one token tree
|
|
|
|
| NonterminalKind::Meta // exactly one token tree
|
|
|
|
| NonterminalKind::Lifetime // exactly one token tree
|
2020-10-26 20:02:06 -04:00
|
|
|
| NonterminalKind::TT // exactly one token tree
|
|
|
|
)
|
2015-05-15 13:20:26 -04:00
|
|
|
}
|
|
|
|
|
2018-10-23 21:37:32 -07:00
|
|
|
enum IsInFollow {
|
|
|
|
Yes,
|
2019-06-08 09:02:15 +10:00
|
|
|
No(&'static [&'static str]),
|
2018-10-23 21:37:32 -07:00
|
|
|
}
|
|
|
|
|
2019-02-08 14:53:55 +01:00
|
|
|
/// Returns `true` if `frag` can legally be followed by the token `tok`. For
|
2015-11-12 20:55:28 +01:00
|
|
|
/// fragments that can consume an unbounded number of tokens, `tok`
|
2015-05-15 13:20:26 -04:00
|
|
|
/// must be within a well-defined follow set. This is intended to
|
|
|
|
/// guarantee future compatibility: for example, without this rule, if
|
|
|
|
/// we expanded `expr` to include a new binary operator, we might
|
|
|
|
/// break macros that were relying on that binary operator as a
|
|
|
|
/// separator.
|
2016-01-27 13:26:47 -05:00
|
|
|
// when changing this do not forget to update doc/book/macros.md!
|
2020-08-01 17:45:17 +02:00
|
|
|
fn is_in_follow(tok: &mbe::TokenTree, kind: NonterminalKind) -> IsInFollow {
|
2019-09-22 19:17:30 +03:00
|
|
|
use mbe::TokenTree;
|
2017-01-30 23:48:14 +00:00
|
|
|
|
2019-06-04 20:42:43 +03:00
|
|
|
if let TokenTree::Token(Token { kind: token::CloseDelim(_), .. }) = *tok {
|
2015-05-15 13:20:26 -04:00
|
|
|
// closing a token tree can never be matched by any fragment;
|
|
|
|
// iow, we always require that `(` and `)` match, etc.
|
2018-10-23 21:37:32 -07:00
|
|
|
IsInFollow::Yes
|
2015-01-19 22:43:15 +01:00
|
|
|
} else {
|
2020-08-01 17:45:17 +02:00
|
|
|
match kind {
|
|
|
|
NonterminalKind::Item => {
|
2015-01-19 22:43:15 +01:00
|
|
|
// since items *must* be followed by either a `;` or a `}`, we can
|
|
|
|
// accept anything after them
|
2018-10-23 21:37:32 -07:00
|
|
|
IsInFollow::Yes
|
2019-05-29 20:21:26 +02:00
|
|
|
}
|
2020-08-01 17:45:17 +02:00
|
|
|
NonterminalKind::Block => {
|
2015-10-07 23:11:25 +01:00
|
|
|
// anything can follow block, the braces provide an easy boundary to
|
2015-01-19 22:43:15 +01:00
|
|
|
// maintain
|
2018-10-23 21:37:32 -07:00
|
|
|
IsInFollow::Yes
|
2019-05-29 20:21:26 +02:00
|
|
|
}
|
2020-08-01 17:45:17 +02:00
|
|
|
NonterminalKind::Stmt | NonterminalKind::Expr => {
|
2019-06-08 09:02:15 +10:00
|
|
|
const TOKENS: &[&str] = &["`=>`", "`,`", "`;`"];
|
2019-06-04 20:42:43 +03:00
|
|
|
match tok {
|
|
|
|
TokenTree::Token(token) => match token.kind {
|
2018-10-23 21:37:32 -07:00
|
|
|
FatArrow | Comma | Semi => IsInFollow::Yes,
|
2019-06-08 09:02:15 +10:00
|
|
|
_ => IsInFollow::No(TOKENS),
|
2018-10-23 21:37:32 -07:00
|
|
|
},
|
2019-06-08 09:02:15 +10:00
|
|
|
_ => IsInFollow::No(TOKENS),
|
2018-10-23 21:37:32 -07:00
|
|
|
}
|
2019-05-29 20:21:26 +02:00
|
|
|
}
|
2021-04-14 20:34:51 -05:00
|
|
|
NonterminalKind::PatParam { .. } => {
|
2019-06-08 09:02:15 +10:00
|
|
|
const TOKENS: &[&str] = &["`=>`", "`,`", "`=`", "`|`", "`if`", "`in`"];
|
2019-06-04 20:42:43 +03:00
|
|
|
match tok {
|
|
|
|
TokenTree::Token(token) => match token.kind {
|
2018-10-23 21:37:32 -07:00
|
|
|
FatArrow | Comma | Eq | BinOp(token::Or) => IsInFollow::Yes,
|
2019-06-05 11:56:06 +03:00
|
|
|
Ident(name, false) if name == kw::If || name == kw::In => IsInFollow::Yes,
|
2021-03-25 21:42:21 +08:00
|
|
|
_ => IsInFollow::No(TOKENS),
|
|
|
|
},
|
|
|
|
_ => IsInFollow::No(TOKENS),
|
|
|
|
}
|
|
|
|
}
|
2021-04-27 21:15:59 -05:00
|
|
|
NonterminalKind::PatWithOr { .. } => {
|
2021-03-25 21:42:21 +08:00
|
|
|
const TOKENS: &[&str] = &["`=>`", "`,`", "`=`", "`if`", "`in`"];
|
|
|
|
match tok {
|
|
|
|
TokenTree::Token(token) => match token.kind {
|
|
|
|
FatArrow | Comma | Eq => IsInFollow::Yes,
|
|
|
|
Ident(name, false) if name == kw::If || name == kw::In => IsInFollow::Yes,
|
2019-06-08 09:02:15 +10:00
|
|
|
_ => IsInFollow::No(TOKENS),
|
2018-10-23 21:37:32 -07:00
|
|
|
},
|
2019-06-08 09:02:15 +10:00
|
|
|
_ => IsInFollow::No(TOKENS),
|
2018-10-23 21:37:32 -07:00
|
|
|
}
|
2019-05-29 20:21:26 +02:00
|
|
|
}
|
2020-08-01 17:45:17 +02:00
|
|
|
NonterminalKind::Path | NonterminalKind::Ty => {
|
2019-06-08 09:02:15 +10:00
|
|
|
const TOKENS: &[&str] = &[
|
2019-05-29 20:21:26 +02:00
|
|
|
"`{`", "`[`", "`=>`", "`,`", "`>`", "`=`", "`:`", "`;`", "`|`", "`as`",
|
2018-10-23 21:37:32 -07:00
|
|
|
"`where`",
|
|
|
|
];
|
2019-06-04 20:42:43 +03:00
|
|
|
match tok {
|
|
|
|
TokenTree::Token(token) => match token.kind {
|
2019-05-29 20:21:26 +02:00
|
|
|
OpenDelim(token::DelimToken::Brace)
|
|
|
|
| OpenDelim(token::DelimToken::Bracket)
|
|
|
|
| Comma
|
|
|
|
| FatArrow
|
|
|
|
| Colon
|
|
|
|
| Eq
|
|
|
|
| Gt
|
|
|
|
| BinOp(token::Shr)
|
|
|
|
| Semi
|
|
|
|
| BinOp(token::Or) => IsInFollow::Yes,
|
|
|
|
Ident(name, false) if name == kw::As || name == kw::Where => {
|
|
|
|
IsInFollow::Yes
|
|
|
|
}
|
2019-06-08 09:02:15 +10:00
|
|
|
_ => IsInFollow::No(TOKENS),
|
2018-10-23 21:37:32 -07:00
|
|
|
},
|
2020-12-19 16:30:56 -05:00
|
|
|
TokenTree::MetaVarDecl(_, _, Some(NonterminalKind::Block)) => IsInFollow::Yes,
|
2019-06-08 09:02:15 +10:00
|
|
|
_ => IsInFollow::No(TOKENS),
|
2018-10-23 21:37:32 -07:00
|
|
|
}
|
2019-05-29 20:21:26 +02:00
|
|
|
}
|
2020-08-01 17:45:17 +02:00
|
|
|
NonterminalKind::Ident | NonterminalKind::Lifetime => {
|
2017-05-10 00:30:47 +00:00
|
|
|
// being a single token, idents and lifetimes are harmless
|
2018-10-23 21:37:32 -07:00
|
|
|
IsInFollow::Yes
|
2019-05-29 20:21:26 +02:00
|
|
|
}
|
2020-08-01 17:45:17 +02:00
|
|
|
NonterminalKind::Literal => {
|
2018-04-10 02:08:47 +03:00
|
|
|
// literals may be of a single token, or two tokens (negative numbers)
|
2018-10-23 21:37:32 -07:00
|
|
|
IsInFollow::Yes
|
2019-05-29 20:21:26 +02:00
|
|
|
}
|
2020-08-01 17:45:17 +02:00
|
|
|
NonterminalKind::Meta | NonterminalKind::TT => {
|
2015-01-19 22:43:15 +01:00
|
|
|
// being either a single token or a delimited sequence, tt is
|
|
|
|
// harmless
|
2018-10-23 21:37:32 -07:00
|
|
|
IsInFollow::Yes
|
2019-05-29 20:21:26 +02:00
|
|
|
}
|
2020-08-01 17:45:17 +02:00
|
|
|
NonterminalKind::Vis => {
|
2016-04-25 02:04:01 +10:00
|
|
|
// Explicitly disallow `priv`, on the off chance it comes back.
|
2019-06-08 09:02:15 +10:00
|
|
|
const TOKENS: &[&str] = &["`,`", "an ident", "a type"];
|
2019-06-04 20:42:43 +03:00
|
|
|
match tok {
|
|
|
|
TokenTree::Token(token) => match token.kind {
|
2018-10-23 21:37:32 -07:00
|
|
|
Comma => IsInFollow::Yes,
|
2019-06-05 11:56:06 +03:00
|
|
|
Ident(name, is_raw) if is_raw || name != kw::Priv => IsInFollow::Yes,
|
2019-05-29 20:21:26 +02:00
|
|
|
_ => {
|
|
|
|
if token.can_begin_type() {
|
|
|
|
IsInFollow::Yes
|
|
|
|
} else {
|
|
|
|
IsInFollow::No(TOKENS)
|
|
|
|
}
|
2018-10-23 21:37:32 -07:00
|
|
|
}
|
2016-04-25 02:04:01 +10:00
|
|
|
},
|
2020-08-01 17:45:17 +02:00
|
|
|
TokenTree::MetaVarDecl(
|
|
|
|
_,
|
|
|
|
_,
|
2020-12-19 16:30:56 -05:00
|
|
|
Some(NonterminalKind::Ident | NonterminalKind::Ty | NonterminalKind::Path),
|
2020-08-01 17:45:17 +02:00
|
|
|
) => IsInFollow::Yes,
|
2019-06-08 09:02:15 +10:00
|
|
|
_ => IsInFollow::No(TOKENS),
|
2016-04-25 02:04:01 +10:00
|
|
|
}
|
2019-05-29 20:21:26 +02:00
|
|
|
}
|
2015-01-19 22:43:15 +01:00
|
|
|
}
|
2015-01-02 16:41:24 -05:00
|
|
|
}
|
|
|
|
}
|
2015-11-12 20:55:28 +01:00
|
|
|
|
2019-09-22 19:17:30 +03:00
|
|
|
fn quoted_tt_to_string(tt: &mbe::TokenTree) -> String {
|
2017-01-30 23:48:14 +00:00
|
|
|
match *tt {
|
2019-10-16 10:59:30 +02:00
|
|
|
mbe::TokenTree::Token(ref token) => pprust::token_to_string(&token),
|
2019-09-22 19:17:30 +03:00
|
|
|
mbe::TokenTree::MetaVar(_, name) => format!("${}", name),
|
2020-12-19 16:30:56 -05:00
|
|
|
mbe::TokenTree::MetaVarDecl(_, name, Some(kind)) => format!("${}:{}", name, kind),
|
|
|
|
mbe::TokenTree::MetaVarDecl(_, name, None) => format!("${}:", name),
|
2019-05-29 20:21:26 +02:00
|
|
|
_ => panic!(
|
2020-10-19 00:05:45 +02:00
|
|
|
"{}",
|
|
|
|
"unexpected mbe::TokenTree::{Sequence or Delimited} \
|
2019-05-29 20:21:26 +02:00
|
|
|
in follow set checker"
|
|
|
|
),
|
2017-01-30 23:48:14 +00:00
|
|
|
}
|
|
|
|
}
|
2019-09-22 19:41:04 +03:00
|
|
|
|
2020-03-09 10:35:35 +01:00
|
|
|
fn parser_from_cx(sess: &ParseSess, tts: TokenStream) -> Parser<'_> {
|
2020-03-08 13:36:20 +01:00
|
|
|
Parser::new(sess, tts, true, rustc_parse::MACRO_ARGUMENTS)
|
2019-09-22 19:41:04 +03:00
|
|
|
}
|
2019-09-25 10:17:59 +03:00
|
|
|
|
|
|
|
/// Generates an appropriate parsing failure message. For EOF, this is "unexpected end...". For
|
|
|
|
/// other tokens, this is "unexpected token...".
|
|
|
|
fn parse_failure_msg(tok: &Token) -> String {
|
|
|
|
match tok.kind {
|
|
|
|
token::Eof => "unexpected end of macro invocation".to_string(),
|
2019-12-22 17:42:04 -05:00
|
|
|
_ => format!("no rules expected the token `{}`", pprust::token_to_string(tok),),
|
2019-09-25 10:17:59 +03:00
|
|
|
}
|
|
|
|
}
|