2015-01-02 16:41:24 -05:00
|
|
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
|
2012-12-03 16:48:01 -08:00
|
|
|
// file at the top-level directory of this distribution and at
|
|
|
|
// http://rust-lang.org/COPYRIGHT.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
|
|
// option. This file may not be copied, modified, or distributed
|
|
|
|
// except according to those terms.
|
|
|
|
|
2015-11-06 14:52:02 +01:00
|
|
|
use ast::{self, TokenTree};
|
2014-10-06 23:00:56 +01:00
|
|
|
use codemap::{Span, DUMMY_SP};
|
2015-11-14 19:50:46 +09:00
|
|
|
use ext::base::{DummyResult, ExtCtxt, MacResult, SyntaxExtension};
|
2014-07-10 12:09:56 -07:00
|
|
|
use ext::base::{NormalTT, TTMacroExpander};
|
2014-01-09 15:05:33 +02:00
|
|
|
use ext::tt::macro_parser::{Success, Error, Failure};
|
2015-11-14 19:11:40 +09:00
|
|
|
use ext::tt::macro_parser::{MatchedSeq, MatchedNonterminal};
|
2015-08-07 16:36:56 +02:00
|
|
|
use ext::tt::macro_parser::parse;
|
2015-02-24 19:56:01 +01:00
|
|
|
use parse::lexer::new_tt_reader;
|
2016-03-01 09:28:42 +00:00
|
|
|
use parse::parser::{Parser, Restrictions};
|
2016-04-16 18:05:06 +03:00
|
|
|
use parse::token::{self, gensym_ident, NtTT, Token};
|
2015-01-02 16:41:24 -05:00
|
|
|
use parse::token::Token::*;
|
2012-12-23 17:41:37 -05:00
|
|
|
use print;
|
2014-09-13 19:06:01 +03:00
|
|
|
use ptr::P;
|
|
|
|
|
2013-11-24 23:08:53 -08:00
|
|
|
use util::small_vector::SmallVector;
|
2012-12-23 17:41:37 -05:00
|
|
|
|
2014-02-28 12:54:01 -08:00
|
|
|
use std::cell::RefCell;
|
2015-11-12 20:55:28 +01:00
|
|
|
use std::collections::{HashMap};
|
|
|
|
use std::collections::hash_map::{Entry};
|
2014-03-27 16:40:35 +02:00
|
|
|
use std::rc::Rc;
|
2014-02-28 12:54:01 -08:00
|
|
|
|
2014-03-09 16:54:34 +02:00
|
|
|
struct ParserAnyMacro<'a> {
|
|
|
|
parser: RefCell<Parser<'a>>,
|
2015-04-04 13:13:57 -07:00
|
|
|
|
|
|
|
/// Span of the expansion site of the macro this parser is for
|
|
|
|
site_span: Span,
|
|
|
|
/// The ident of the macro we're parsing
|
|
|
|
macro_ident: ast::Ident
|
2013-08-30 14:40:05 -07:00
|
|
|
}
|
|
|
|
|
2014-03-09 16:54:34 +02:00
|
|
|
impl<'a> ParserAnyMacro<'a> {
|
2013-10-02 14:43:15 +10:00
|
|
|
/// Make sure we don't have any tokens left to parse, so we don't
|
|
|
|
/// silently drop anything. `allow_semi` is so that "optional"
|
2014-07-10 17:46:09 -07:00
|
|
|
/// semicolons at the end of normal expressions aren't complained
|
2015-01-02 14:44:21 -08:00
|
|
|
/// about e.g. the semicolon in `macro_rules! kapow { () => {
|
|
|
|
/// panic!(); } }` doesn't get picked up by .parse_expr(), but it's
|
2013-10-02 14:43:15 +10:00
|
|
|
/// allowed to be there.
|
2015-11-23 21:06:51 +01:00
|
|
|
fn ensure_complete_parse(&self, allow_semi: bool, context: &str) {
|
2013-12-30 14:04:00 -08:00
|
|
|
let mut parser = self.parser.borrow_mut();
|
2014-10-27 19:22:52 +11:00
|
|
|
if allow_semi && parser.token == token::Semi {
|
2015-12-31 12:11:53 +13:00
|
|
|
parser.bump();
|
2013-10-02 14:43:15 +10:00
|
|
|
}
|
2014-10-27 19:22:52 +11:00
|
|
|
if parser.token != token::Eof {
|
2014-06-21 03:39:03 -07:00
|
|
|
let token_str = parser.this_token_to_string();
|
2013-12-30 14:04:00 -08:00
|
|
|
let msg = format!("macro expansion ignores token `{}` and any \
|
|
|
|
following",
|
|
|
|
token_str);
|
2014-03-20 15:05:37 -07:00
|
|
|
let span = parser.span;
|
2015-12-21 10:00:43 +13:00
|
|
|
let mut err = parser.diagnostic().struct_span_err(span, &msg[..]);
|
2015-04-04 13:13:57 -07:00
|
|
|
let msg = format!("caused by the macro expansion here; the usage \
|
2015-11-24 16:34:48 +01:00
|
|
|
of `{}!` is likely invalid in {} context",
|
2015-11-23 21:06:51 +01:00
|
|
|
self.macro_ident, context);
|
2015-12-21 10:00:43 +13:00
|
|
|
err.span_note(self.site_span, &msg[..])
|
|
|
|
.emit();
|
2013-10-02 14:43:15 +10:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-15 22:00:14 +10:00
|
|
|
impl<'a> MacResult for ParserAnyMacro<'a> {
|
2014-09-13 19:06:01 +03:00
|
|
|
fn make_expr(self: Box<ParserAnyMacro<'a>>) -> Option<P<ast::Expr>> {
|
2015-11-10 16:08:26 -08:00
|
|
|
let ret = panictry!(self.parser.borrow_mut().parse_expr());
|
2015-11-23 21:06:51 +01:00
|
|
|
self.ensure_complete_parse(true, "expression");
|
2014-04-15 22:00:14 +10:00
|
|
|
Some(ret)
|
2013-08-30 14:40:05 -07:00
|
|
|
}
|
2014-09-13 19:06:01 +03:00
|
|
|
fn make_pat(self: Box<ParserAnyMacro<'a>>) -> Option<P<ast::Pat>> {
|
2015-11-10 16:08:26 -08:00
|
|
|
let ret = panictry!(self.parser.borrow_mut().parse_pat());
|
2015-11-23 21:06:51 +01:00
|
|
|
self.ensure_complete_parse(false, "pattern");
|
2014-05-19 13:32:51 -07:00
|
|
|
Some(ret)
|
|
|
|
}
|
2014-09-13 19:06:01 +03:00
|
|
|
fn make_items(self: Box<ParserAnyMacro<'a>>) -> Option<SmallVector<P<ast::Item>>> {
|
2013-11-24 23:08:53 -08:00
|
|
|
let mut ret = SmallVector::zero();
|
2015-11-10 16:08:26 -08:00
|
|
|
while let Some(item) = panictry!(self.parser.borrow_mut().parse_item()) {
|
2015-03-13 11:34:51 +02:00
|
|
|
ret.push(item);
|
2013-11-24 23:08:53 -08:00
|
|
|
}
|
2015-11-23 21:06:51 +01:00
|
|
|
self.ensure_complete_parse(false, "item");
|
2014-04-15 22:00:14 +10:00
|
|
|
Some(ret)
|
2013-08-30 14:40:05 -07:00
|
|
|
}
|
2014-07-10 17:46:09 -07:00
|
|
|
|
2015-03-11 23:38:58 +02:00
|
|
|
fn make_impl_items(self: Box<ParserAnyMacro<'a>>)
|
2016-02-11 23:33:09 +03:00
|
|
|
-> Option<SmallVector<ast::ImplItem>> {
|
2014-07-10 17:46:09 -07:00
|
|
|
let mut ret = SmallVector::zero();
|
|
|
|
loop {
|
|
|
|
let mut parser = self.parser.borrow_mut();
|
|
|
|
match parser.token {
|
2014-10-27 19:22:52 +11:00
|
|
|
token::Eof => break,
|
2015-03-28 21:58:51 +00:00
|
|
|
_ => ret.push(panictry!(parser.parse_impl_item()))
|
2014-07-10 17:46:09 -07:00
|
|
|
}
|
|
|
|
}
|
2015-11-23 21:06:51 +01:00
|
|
|
self.ensure_complete_parse(false, "item");
|
2014-07-10 17:46:09 -07:00
|
|
|
Some(ret)
|
|
|
|
}
|
|
|
|
|
2015-04-07 08:21:18 -05:00
|
|
|
fn make_stmts(self: Box<ParserAnyMacro<'a>>)
|
2016-02-11 23:33:09 +03:00
|
|
|
-> Option<SmallVector<ast::Stmt>> {
|
2015-04-07 08:21:18 -05:00
|
|
|
let mut ret = SmallVector::zero();
|
|
|
|
loop {
|
|
|
|
let mut parser = self.parser.borrow_mut();
|
|
|
|
match parser.token {
|
|
|
|
token::Eof => break,
|
2015-11-10 16:08:26 -08:00
|
|
|
_ => match parser.parse_stmt() {
|
2015-04-07 08:21:18 -05:00
|
|
|
Ok(maybe_stmt) => match maybe_stmt {
|
|
|
|
Some(stmt) => ret.push(stmt),
|
|
|
|
None => (),
|
|
|
|
},
|
2015-12-21 10:00:43 +13:00
|
|
|
Err(mut e) => {
|
|
|
|
e.emit();
|
|
|
|
break;
|
|
|
|
}
|
2015-04-07 08:21:18 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-11-23 21:06:51 +01:00
|
|
|
self.ensure_complete_parse(false, "statement");
|
2015-04-07 08:21:18 -05:00
|
|
|
Some(ret)
|
2013-08-30 14:40:05 -07:00
|
|
|
}
|
2015-07-25 21:54:19 -07:00
|
|
|
|
|
|
|
fn make_ty(self: Box<ParserAnyMacro<'a>>) -> Option<P<ast::Ty>> {
|
2015-11-10 16:08:26 -08:00
|
|
|
let ret = panictry!(self.parser.borrow_mut().parse_ty());
|
2015-11-23 21:06:51 +01:00
|
|
|
self.ensure_complete_parse(false, "type");
|
2015-07-25 21:54:19 -07:00
|
|
|
Some(ret)
|
|
|
|
}
|
2013-08-30 14:40:05 -07:00
|
|
|
}
|
|
|
|
|
2014-01-25 13:34:26 -08:00
|
|
|
struct MacroRulesMacroExpander {
|
2015-01-02 16:41:24 -05:00
|
|
|
name: ast::Ident,
|
|
|
|
imported_from: Option<ast::Ident>,
|
2015-11-14 19:11:40 +09:00
|
|
|
lhses: Vec<TokenTree>,
|
|
|
|
rhses: Vec<TokenTree>,
|
2015-11-14 19:50:46 +09:00
|
|
|
valid: bool,
|
2013-08-30 14:40:05 -07:00
|
|
|
}
|
|
|
|
|
2014-07-10 12:09:56 -07:00
|
|
|
impl TTMacroExpander for MacroRulesMacroExpander {
|
2014-08-27 21:46:52 -04:00
|
|
|
fn expand<'cx>(&self,
|
|
|
|
cx: &'cx mut ExtCtxt,
|
|
|
|
sp: Span,
|
2015-11-14 19:11:40 +09:00
|
|
|
arg: &[TokenTree])
|
2014-08-27 21:46:52 -04:00
|
|
|
-> Box<MacResult+'cx> {
|
2015-11-14 19:50:46 +09:00
|
|
|
if !self.valid {
|
|
|
|
return DummyResult::any(sp);
|
|
|
|
}
|
2014-02-28 12:54:01 -08:00
|
|
|
generic_extension(cx,
|
|
|
|
sp,
|
|
|
|
self.name,
|
2014-09-15 18:27:28 -07:00
|
|
|
self.imported_from,
|
2014-02-28 12:54:01 -08:00
|
|
|
arg,
|
2015-02-18 15:58:07 -08:00
|
|
|
&self.lhses,
|
|
|
|
&self.rhses)
|
2013-08-30 14:40:05 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-09 13:12:30 -07:00
|
|
|
/// Given `lhses` and `rhses`, this is the new macro we create
|
2014-08-27 21:46:52 -04:00
|
|
|
fn generic_extension<'cx>(cx: &'cx ExtCtxt,
|
|
|
|
sp: Span,
|
2015-01-02 16:41:24 -05:00
|
|
|
name: ast::Ident,
|
|
|
|
imported_from: Option<ast::Ident>,
|
2015-11-14 19:11:40 +09:00
|
|
|
arg: &[TokenTree],
|
|
|
|
lhses: &[TokenTree],
|
|
|
|
rhses: &[TokenTree])
|
2014-08-27 21:46:52 -04:00
|
|
|
-> Box<MacResult+'cx> {
|
2013-08-30 14:40:05 -07:00
|
|
|
if cx.trace_macros() {
|
2014-10-22 16:37:20 +11:00
|
|
|
println!("{}! {{ {} }}",
|
2015-07-28 18:07:20 +02:00
|
|
|
name,
|
2014-10-22 16:37:20 +11:00
|
|
|
print::pprust::tts_to_string(arg));
|
2013-08-30 14:40:05 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Which arm's failure should we report? (the one furthest along)
|
2014-01-01 15:53:22 +09:00
|
|
|
let mut best_fail_spot = DUMMY_SP;
|
2014-05-25 03:17:19 -07:00
|
|
|
let mut best_fail_msg = "internal error: ran no matchers".to_string();
|
2013-08-30 14:40:05 -07:00
|
|
|
|
|
|
|
for (i, lhs) in lhses.iter().enumerate() { // try each arm's matchers
|
2015-11-14 19:18:32 +09:00
|
|
|
let lhs_tt = match *lhs {
|
|
|
|
TokenTree::Delimited(_, ref delim) => &delim.tts[..],
|
2016-05-17 17:39:11 +02:00
|
|
|
_ => cx.span_bug(sp, "malformed macro lhs")
|
2015-11-14 19:18:32 +09:00
|
|
|
};
|
2015-02-24 19:56:01 +01:00
|
|
|
|
2015-11-14 19:18:32 +09:00
|
|
|
match TokenTree::parse(cx, lhs_tt, arg) {
|
|
|
|
Success(named_matches) => {
|
2015-11-14 19:11:40 +09:00
|
|
|
let rhs = match rhses[i] {
|
2015-11-14 19:18:32 +09:00
|
|
|
// ignore delimiters
|
|
|
|
TokenTree::Delimited(_, ref delimed) => delimed.tts.clone(),
|
2016-05-17 17:39:11 +02:00
|
|
|
_ => cx.span_bug(sp, "malformed macro rhs"),
|
2013-08-30 14:40:05 -07:00
|
|
|
};
|
|
|
|
// rhs has holes ( `$id` and `$(...)` that need filled)
|
2014-03-16 20:56:24 +02:00
|
|
|
let trncbr = new_tt_reader(&cx.parse_sess().span_diagnostic,
|
|
|
|
Some(named_matches),
|
2014-09-15 18:27:28 -07:00
|
|
|
imported_from,
|
2013-08-30 14:40:05 -07:00
|
|
|
rhs);
|
2015-02-15 09:52:21 +01:00
|
|
|
let mut p = Parser::new(cx.parse_sess(), cx.cfg(), Box::new(trncbr));
|
2016-03-01 09:28:42 +00:00
|
|
|
p.filename = cx.filename.clone();
|
|
|
|
p.mod_path_stack = cx.mod_path_stack.clone();
|
|
|
|
p.restrictions = match cx.in_block {
|
|
|
|
true => Restrictions::NO_NONINLINE_MOD,
|
|
|
|
false => Restrictions::empty(),
|
|
|
|
};
|
2015-12-31 12:11:53 +13:00
|
|
|
p.check_unknown_macro_variable();
|
2013-08-30 14:40:05 -07:00
|
|
|
// Let the context choose how to interpret the result.
|
|
|
|
// Weird, but useful for X-macros.
|
2015-04-15 20:56:16 -07:00
|
|
|
return Box::new(ParserAnyMacro {
|
2013-12-30 14:04:00 -08:00
|
|
|
parser: RefCell::new(p),
|
2015-04-04 13:13:57 -07:00
|
|
|
|
|
|
|
// Pass along the original expansion site and the name of the macro
|
|
|
|
// so we can print a useful error message if the parse of the expanded
|
|
|
|
// macro leaves unparsed tokens.
|
|
|
|
site_span: sp,
|
|
|
|
macro_ident: name
|
2015-04-15 20:56:16 -07:00
|
|
|
})
|
2015-11-14 19:18:32 +09:00
|
|
|
}
|
|
|
|
Failure(sp, ref msg) => if sp.lo >= best_fail_spot.lo {
|
2013-08-30 14:40:05 -07:00
|
|
|
best_fail_spot = sp;
|
|
|
|
best_fail_msg = (*msg).clone();
|
2015-11-14 19:18:32 +09:00
|
|
|
},
|
|
|
|
Error(err_sp, ref msg) => {
|
2015-11-24 07:23:53 +05:30
|
|
|
cx.span_fatal(err_sp.substitute_dummy(sp), &msg[..])
|
2013-08-30 14:40:05 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-08-07 16:36:56 +02:00
|
|
|
|
2015-11-24 07:23:53 +05:30
|
|
|
cx.span_fatal(best_fail_spot.substitute_dummy(sp), &best_fail_msg[..]);
|
2013-08-30 14:40:05 -07:00
|
|
|
}
|
|
|
|
|
2014-10-07 00:18:24 +01:00
|
|
|
// Note that macro-by-example's input is also matched against a token tree:
|
|
|
|
// $( $lhs:tt => $rhs:tt );+
|
|
|
|
//
|
|
|
|
// Holy self-referential!
|
|
|
|
|
2014-12-30 19:10:46 -08:00
|
|
|
/// Converts a `macro_rules!` invocation into a syntax extension.
|
|
|
|
pub fn compile<'cx>(cx: &'cx mut ExtCtxt,
|
|
|
|
def: &ast::MacroDef) -> SyntaxExtension {
|
2012-07-06 18:04:28 -07:00
|
|
|
|
2013-06-04 12:34:25 -07:00
|
|
|
let lhs_nm = gensym_ident("lhs");
|
|
|
|
let rhs_nm = gensym_ident("rhs");
|
2012-07-18 16:18:02 -07:00
|
|
|
|
2013-05-29 16:21:04 -07:00
|
|
|
// The pattern that macro_rules matches.
|
2012-07-27 17:32:15 -07:00
|
|
|
// The grammar for macro_rules! is:
|
2014-10-06 23:00:56 +01:00
|
|
|
// $( $lhs:tt => $rhs:tt );+
|
2012-07-27 17:32:15 -07:00
|
|
|
// ...quasiquoting this would be nice.
|
2014-10-06 23:00:56 +01:00
|
|
|
// These spans won't matter, anyways
|
2016-04-16 18:05:06 +03:00
|
|
|
let match_lhs_tok = MatchNt(lhs_nm, token::str_to_ident("tt"));
|
|
|
|
let match_rhs_tok = MatchNt(rhs_nm, token::str_to_ident("tt"));
|
2014-02-28 13:09:09 -08:00
|
|
|
let argument_gram = vec!(
|
2015-11-06 14:52:02 +01:00
|
|
|
TokenTree::Sequence(DUMMY_SP,
|
2014-11-02 12:21:16 +01:00
|
|
|
Rc::new(ast::SequenceRepetition {
|
|
|
|
tts: vec![
|
2015-11-06 14:52:02 +01:00
|
|
|
TokenTree::Token(DUMMY_SP, match_lhs_tok),
|
|
|
|
TokenTree::Token(DUMMY_SP, token::FatArrow),
|
|
|
|
TokenTree::Token(DUMMY_SP, match_rhs_tok)],
|
2014-11-02 12:21:16 +01:00
|
|
|
separator: Some(token::Semi),
|
2016-02-09 11:45:01 +01:00
|
|
|
op: ast::KleeneOp::OneOrMore,
|
2014-11-02 12:21:16 +01:00
|
|
|
num_captures: 2
|
|
|
|
})),
|
2012-08-01 14:34:35 -07:00
|
|
|
//to phase into semicolon-termination instead of
|
|
|
|
//semicolon-separation
|
2015-11-06 14:52:02 +01:00
|
|
|
TokenTree::Sequence(DUMMY_SP,
|
2014-11-02 12:21:16 +01:00
|
|
|
Rc::new(ast::SequenceRepetition {
|
2015-11-06 14:52:02 +01:00
|
|
|
tts: vec![TokenTree::Token(DUMMY_SP, token::Semi)],
|
2014-11-02 12:21:16 +01:00
|
|
|
separator: None,
|
2016-02-09 11:45:01 +01:00
|
|
|
op: ast::KleeneOp::ZeroOrMore,
|
2014-11-02 12:21:16 +01:00
|
|
|
num_captures: 0
|
|
|
|
})));
|
2012-07-06 18:04:28 -07:00
|
|
|
|
2012-07-27 17:32:15 -07:00
|
|
|
|
|
|
|
// Parse the macro_rules! invocation (`none` is for no interpolations):
|
2014-03-16 20:56:24 +02:00
|
|
|
let arg_reader = new_tt_reader(&cx.parse_sess().span_diagnostic,
|
2014-09-15 18:27:28 -07:00
|
|
|
None,
|
2013-06-27 17:41:35 -07:00
|
|
|
None,
|
2014-12-30 19:10:46 -08:00
|
|
|
def.body.clone());
|
2015-08-07 16:36:56 +02:00
|
|
|
|
|
|
|
let argument_map = match parse(cx.parse_sess(),
|
|
|
|
cx.cfg(),
|
|
|
|
arg_reader,
|
|
|
|
&argument_gram) {
|
|
|
|
Success(m) => m,
|
2015-08-10 20:40:46 +02:00
|
|
|
Failure(sp, str) | Error(sp, str) => {
|
|
|
|
panic!(cx.parse_sess().span_diagnostic
|
|
|
|
.span_fatal(sp.substitute_dummy(def.span), &str[..]));
|
2015-08-07 16:36:56 +02:00
|
|
|
}
|
|
|
|
};
|
2012-07-06 18:04:28 -07:00
|
|
|
|
2015-11-14 19:50:46 +09:00
|
|
|
let mut valid = true;
|
|
|
|
|
2012-07-27 17:32:15 -07:00
|
|
|
// Extract the arguments:
|
2015-09-24 23:05:02 +03:00
|
|
|
let lhses = match **argument_map.get(&lhs_nm.name).unwrap() {
|
2015-11-14 19:11:40 +09:00
|
|
|
MatchedSeq(ref s, _) => {
|
|
|
|
s.iter().map(|m| match **m {
|
2016-05-19 00:38:08 +02:00
|
|
|
MatchedNonterminal(NtTT(ref tt)) => {
|
|
|
|
valid &= check_lhs_nt_follows(cx, tt);
|
|
|
|
(**tt).clone()
|
|
|
|
}
|
2015-11-14 19:11:40 +09:00
|
|
|
_ => cx.span_bug(def.span, "wrong-structured lhs")
|
|
|
|
}).collect()
|
|
|
|
}
|
2014-12-30 19:10:46 -08:00
|
|
|
_ => cx.span_bug(def.span, "wrong-structured lhs")
|
2012-07-06 18:04:28 -07:00
|
|
|
};
|
2013-02-17 10:59:09 -08:00
|
|
|
|
2015-09-24 23:05:02 +03:00
|
|
|
let rhses = match **argument_map.get(&rhs_nm.name).unwrap() {
|
2015-11-14 19:11:40 +09:00
|
|
|
MatchedSeq(ref s, _) => {
|
|
|
|
s.iter().map(|m| match **m {
|
|
|
|
MatchedNonterminal(NtTT(ref tt)) => (**tt).clone(),
|
|
|
|
_ => cx.span_bug(def.span, "wrong-structured rhs")
|
|
|
|
}).collect()
|
|
|
|
}
|
2014-12-30 19:10:46 -08:00
|
|
|
_ => cx.span_bug(def.span, "wrong-structured rhs")
|
2012-07-06 18:04:28 -07:00
|
|
|
};
|
|
|
|
|
2015-11-14 19:50:46 +09:00
|
|
|
for rhs in &rhses {
|
|
|
|
valid &= check_rhs(cx, rhs);
|
|
|
|
}
|
|
|
|
|
2015-04-15 20:56:16 -07:00
|
|
|
let exp: Box<_> = Box::new(MacroRulesMacroExpander {
|
2014-12-30 19:10:46 -08:00
|
|
|
name: def.ident,
|
|
|
|
imported_from: def.imported_from,
|
2013-08-30 14:40:05 -07:00
|
|
|
lhses: lhses,
|
|
|
|
rhses: rhses,
|
2015-11-14 19:50:46 +09:00
|
|
|
valid: valid,
|
2015-04-15 20:56:16 -07:00
|
|
|
});
|
2012-07-06 18:04:28 -07:00
|
|
|
|
Add #[allow_internal_unstable] to track stability for macros better.
Unstable items used in a macro expansion will now always trigger
stability warnings, *unless* the unstable items are directly inside a
macro marked with `#[allow_internal_unstable]`. IOW, the compiler warns
unless the span of the unstable item is a subspan of the definition of a
macro marked with that attribute.
E.g.
#[allow_internal_unstable]
macro_rules! foo {
($e: expr) => {{
$e;
unstable(); // no warning
only_called_by_foo!();
}}
}
macro_rules! only_called_by_foo {
() => { unstable() } // warning
}
foo!(unstable()) // warning
The unstable inside `foo` is fine, due to the attribute. But the
`unstable` inside `only_called_by_foo` is not, since that macro doesn't
have the attribute, and the `unstable` passed into `foo` is also not
fine since it isn't contained in the macro itself (that is, even though
it is only used directly in the macro).
In the process this makes the stability tracking much more precise,
e.g. previously `println!("{}", unstable())` got no warning, but now it
does. As such, this is a bug fix that may cause [breaking-change]s.
The attribute is definitely feature gated, since it explicitly allows
side-stepping the feature gating system.
2015-03-01 14:09:28 +11:00
|
|
|
NormalTT(exp, Some(def.span), def.allow_internal_unstable)
|
2012-10-15 14:56:42 -07:00
|
|
|
}
|
2015-01-02 16:41:24 -05:00
|
|
|
|
2016-05-18 15:08:19 +02:00
|
|
|
fn check_lhs_nt_follows(cx: &mut ExtCtxt, lhs: &TokenTree) -> bool {
|
2015-11-14 19:11:40 +09:00
|
|
|
// lhs is going to be like TokenTree::Delimited(...), where the
|
2015-11-06 14:52:02 +01:00
|
|
|
// entire lhs is those tts. Or, it can be a "bare sequence", not wrapped in parens.
|
2015-01-18 10:19:47 -05:00
|
|
|
match lhs {
|
2016-05-18 15:08:19 +02:00
|
|
|
&TokenTree::Delimited(_, ref tts) => check_matcher(cx, &tts.tts),
|
|
|
|
_ => {
|
2016-05-17 17:39:11 +02:00
|
|
|
cx.span_err(lhs.get_span(), "invalid macro matcher; matchers must \
|
|
|
|
be contained in balanced delimiters");
|
2016-05-18 15:08:19 +02:00
|
|
|
false
|
|
|
|
}
|
|
|
|
}
|
2015-01-02 16:41:24 -05:00
|
|
|
// we don't abort on errors on rejection, the driver will do that for us
|
|
|
|
// after parsing/expansion. we can report every error in every macro this way.
|
|
|
|
}
|
|
|
|
|
2015-11-14 19:50:46 +09:00
|
|
|
fn check_rhs(cx: &mut ExtCtxt, rhs: &TokenTree) -> bool {
|
|
|
|
match *rhs {
|
|
|
|
TokenTree::Delimited(..) => return true,
|
|
|
|
_ => cx.span_err(rhs.get_span(), "macro rhs must be delimited")
|
|
|
|
}
|
|
|
|
false
|
|
|
|
}
|
|
|
|
|
2016-05-18 15:08:19 +02:00
|
|
|
fn check_matcher(cx: &mut ExtCtxt, matcher: &[TokenTree]) -> bool {
|
2015-11-12 20:55:28 +01:00
|
|
|
let first_sets = FirstSets::new(matcher);
|
|
|
|
let empty_suffix = TokenSet::empty();
|
2016-05-31 16:51:52 +02:00
|
|
|
let err = cx.parse_sess.span_diagnostic.err_count();
|
|
|
|
check_matcher_core(cx, &first_sets, matcher, &empty_suffix);
|
|
|
|
err == cx.parse_sess.span_diagnostic.err_count()
|
2015-11-12 20:55:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// The FirstSets for a matcher is a mapping from subsequences in the
|
|
|
|
// matcher to the FIRST set for that subsequence.
|
|
|
|
//
|
|
|
|
// This mapping is partially precomputed via a backwards scan over the
|
|
|
|
// token trees of the matcher, which provides a mapping from each
|
|
|
|
// repetition sequence to its FIRST set.
|
|
|
|
//
|
|
|
|
// (Hypothetically sequences should be uniquely identifiable via their
|
|
|
|
// spans, though perhaps that is false e.g. for macro-generated macros
|
|
|
|
// that do not try to inject artificial span information. My plan is
|
|
|
|
// to try to catch such cases ahead of time and not include them in
|
|
|
|
// the precomputed mapping.)
|
|
|
|
struct FirstSets {
|
|
|
|
// this maps each TokenTree::Sequence `$(tt ...) SEP OP` that is uniquely identified by its
|
|
|
|
// span in the original matcher to the First set for the inner sequence `tt ...`.
|
|
|
|
//
|
|
|
|
// If two sequences have the same span in a matcher, then map that
|
|
|
|
// span to None (invalidating the mapping here and forcing the code to
|
|
|
|
// use a slow path).
|
|
|
|
first: HashMap<Span, Option<TokenSet>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl FirstSets {
|
|
|
|
fn new(tts: &[TokenTree]) -> FirstSets {
|
|
|
|
let mut sets = FirstSets { first: HashMap::new() };
|
|
|
|
build_recur(&mut sets, tts);
|
|
|
|
return sets;
|
|
|
|
|
|
|
|
// walks backward over `tts`, returning the FIRST for `tts`
|
|
|
|
// and updating `sets` at the same time for all sequence
|
|
|
|
// substructure we find within `tts`.
|
|
|
|
fn build_recur(sets: &mut FirstSets, tts: &[TokenTree]) -> TokenSet {
|
|
|
|
let mut first = TokenSet::empty();
|
|
|
|
for tt in tts.iter().rev() {
|
|
|
|
match *tt {
|
|
|
|
TokenTree::Token(sp, ref tok) => {
|
|
|
|
first.replace_with((sp, tok.clone()));
|
|
|
|
}
|
|
|
|
TokenTree::Delimited(_, ref delimited) => {
|
|
|
|
build_recur(sets, &delimited.tts[..]);
|
|
|
|
first.replace_with((delimited.open_span,
|
|
|
|
Token::OpenDelim(delimited.delim)));
|
|
|
|
}
|
|
|
|
TokenTree::Sequence(sp, ref seq_rep) => {
|
|
|
|
let subfirst = build_recur(sets, &seq_rep.tts[..]);
|
|
|
|
|
|
|
|
match sets.first.entry(sp) {
|
|
|
|
Entry::Vacant(vac) => {
|
|
|
|
vac.insert(Some(subfirst.clone()));
|
|
|
|
}
|
|
|
|
Entry::Occupied(mut occ) => {
|
|
|
|
// if there is already an entry, then a span must have collided.
|
|
|
|
// This should not happen with typical macro_rules macros,
|
|
|
|
// but syntax extensions need not maintain distinct spans,
|
|
|
|
// so distinct syntax trees can be assigned the same span.
|
|
|
|
// In such a case, the map cannot be trusted; so mark this
|
|
|
|
// entry as unusable.
|
|
|
|
occ.insert(None);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the sequence contents can be empty, then the first
|
|
|
|
// token could be the separator token itself.
|
|
|
|
|
|
|
|
if let (Some(ref sep), true) = (seq_rep.separator.clone(),
|
|
|
|
subfirst.maybe_empty) {
|
|
|
|
first.add_one_maybe((sp, sep.clone()));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reverse scan: Sequence comes before `first`.
|
|
|
|
if subfirst.maybe_empty || seq_rep.op == ast::KleeneOp::ZeroOrMore {
|
|
|
|
// If sequence is potentially empty, then
|
|
|
|
// union them (preserving first emptiness).
|
|
|
|
first.add_all(&TokenSet { maybe_empty: true, ..subfirst });
|
|
|
|
} else {
|
|
|
|
// Otherwise, sequence guaranteed
|
|
|
|
// non-empty; replace first.
|
|
|
|
first = subfirst;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return first;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// walks forward over `tts` until all potential FIRST tokens are
|
|
|
|
// identified.
|
|
|
|
fn first(&self, tts: &[TokenTree]) -> TokenSet {
|
|
|
|
let mut first = TokenSet::empty();
|
|
|
|
for tt in tts.iter() {
|
|
|
|
assert!(first.maybe_empty);
|
|
|
|
match *tt {
|
|
|
|
TokenTree::Token(sp, ref tok) => {
|
|
|
|
first.add_one((sp, tok.clone()));
|
|
|
|
return first;
|
|
|
|
}
|
|
|
|
TokenTree::Delimited(_, ref delimited) => {
|
|
|
|
first.add_one((delimited.open_span,
|
|
|
|
Token::OpenDelim(delimited.delim)));
|
|
|
|
return first;
|
|
|
|
}
|
|
|
|
TokenTree::Sequence(sp, ref seq_rep) => {
|
|
|
|
match self.first.get(&sp) {
|
|
|
|
Some(&Some(ref subfirst)) => {
|
|
|
|
|
|
|
|
// If the sequence contents can be empty, then the first
|
|
|
|
// token could be the separator token itself.
|
|
|
|
|
|
|
|
if let (Some(ref sep), true) = (seq_rep.separator.clone(),
|
|
|
|
subfirst.maybe_empty) {
|
|
|
|
first.add_one_maybe((sp, sep.clone()));
|
|
|
|
}
|
|
|
|
|
|
|
|
assert!(first.maybe_empty);
|
|
|
|
first.add_all(subfirst);
|
|
|
|
if subfirst.maybe_empty || seq_rep.op == ast::KleeneOp::ZeroOrMore {
|
|
|
|
// continue scanning for more first
|
|
|
|
// tokens, but also make sure we
|
|
|
|
// restore empty-tracking state
|
|
|
|
first.maybe_empty = true;
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
return first;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Some(&None) => {
|
|
|
|
panic!("assume all sequences have (unique) spans for now");
|
|
|
|
}
|
|
|
|
|
|
|
|
None => {
|
|
|
|
panic!("We missed a sequence during FirstSets construction");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// we only exit the loop if `tts` was empty or if every
|
|
|
|
// element of `tts` matches the empty sequence.
|
|
|
|
assert!(first.maybe_empty);
|
|
|
|
return first;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// A set of Tokens, which may include MatchNt tokens (for
|
|
|
|
// macro-by-example syntactic variables). It also carries the
|
|
|
|
// `maybe_empty` flag; that is true if and only if the matcher can
|
|
|
|
// match an empty token sequence.
|
|
|
|
//
|
|
|
|
// The First set is computed on submatchers like `$($a:expr b),* $(c)* d`,
|
|
|
|
// which has corresponding FIRST = {$a:expr, c, d}.
|
|
|
|
// Likewise, `$($a:expr b),* $(c)+ d` has FIRST = {$a:expr, c}.
|
|
|
|
//
|
|
|
|
// (Notably, we must allow for *-op to occur zero times.)
|
|
|
|
#[derive(Clone, Debug)]
|
|
|
|
struct TokenSet {
|
|
|
|
tokens: Vec<(Span, Token)>,
|
|
|
|
maybe_empty: bool,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl TokenSet {
|
|
|
|
// Returns a set for the empty sequence.
|
|
|
|
fn empty() -> Self { TokenSet { tokens: Vec::new(), maybe_empty: true } }
|
|
|
|
|
|
|
|
// Returns the set `{ tok }` for the single-token (and thus
|
|
|
|
// non-empty) sequence [tok].
|
|
|
|
fn singleton(tok: (Span, Token)) -> Self {
|
|
|
|
TokenSet { tokens: vec![tok], maybe_empty: false }
|
|
|
|
}
|
|
|
|
|
|
|
|
// Changes self to be the set `{ tok }`.
|
|
|
|
// Since `tok` is always present, marks self as non-empty.
|
|
|
|
fn replace_with(&mut self, tok: (Span, Token)) {
|
|
|
|
self.tokens.clear();
|
|
|
|
self.tokens.push(tok);
|
|
|
|
self.maybe_empty = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Changes self to be the empty set `{}`; meant for use when
|
|
|
|
// the particular token does not matter, but we want to
|
|
|
|
// record that it occurs.
|
|
|
|
fn replace_with_irrelevant(&mut self) {
|
|
|
|
self.tokens.clear();
|
|
|
|
self.maybe_empty = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Adds `tok` to the set for `self`, marking sequence as non-empy.
|
|
|
|
fn add_one(&mut self, tok: (Span, Token)) {
|
|
|
|
if !self.tokens.contains(&tok) {
|
|
|
|
self.tokens.push(tok);
|
|
|
|
}
|
|
|
|
self.maybe_empty = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Adds `tok` to the set for `self`. (Leaves `maybe_empty` flag alone.)
|
|
|
|
fn add_one_maybe(&mut self, tok: (Span, Token)) {
|
|
|
|
if !self.tokens.contains(&tok) {
|
|
|
|
self.tokens.push(tok);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Adds all elements of `other` to this.
|
|
|
|
//
|
|
|
|
// (Since this is a set, we filter out duplicates.)
|
|
|
|
//
|
|
|
|
// If `other` is potentially empty, then preserves the previous
|
|
|
|
// setting of the empty flag of `self`. If `other` is guaranteed
|
|
|
|
// non-empty, then `self` is marked non-empty.
|
|
|
|
fn add_all(&mut self, other: &Self) {
|
|
|
|
for tok in &other.tokens {
|
|
|
|
if !self.tokens.contains(tok) {
|
|
|
|
self.tokens.push(tok.clone());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !other.maybe_empty {
|
|
|
|
self.maybe_empty = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Checks that `matcher` is internally consistent and that it
|
|
|
|
// can legally by followed by a token N, for all N in `follow`.
|
|
|
|
// (If `follow` is empty, then it imposes no constraint on
|
|
|
|
// the `matcher`.)
|
|
|
|
//
|
|
|
|
// Returns the set of NT tokens that could possibly come last in
|
|
|
|
// `matcher`. (If `matcher` matches the empty sequence, then
|
|
|
|
// `maybe_empty` will be set to true.)
|
|
|
|
//
|
|
|
|
// Requires that `first_sets` is pre-computed for `matcher`;
|
|
|
|
// see `FirstSets::new`.
|
|
|
|
fn check_matcher_core(cx: &mut ExtCtxt,
|
|
|
|
first_sets: &FirstSets,
|
|
|
|
matcher: &[TokenTree],
|
2016-05-31 16:51:52 +02:00
|
|
|
follow: &TokenSet) -> TokenSet {
|
2015-11-12 20:55:28 +01:00
|
|
|
use print::pprust::token_to_string;
|
|
|
|
|
|
|
|
let mut last = TokenSet::empty();
|
|
|
|
|
|
|
|
// 2. For each token and suffix [T, SUFFIX] in M:
|
|
|
|
// ensure that T can be followed by SUFFIX, and if SUFFIX may be empty,
|
|
|
|
// then ensure T can also be followed by any element of FOLLOW.
|
|
|
|
'each_token: for i in 0..matcher.len() {
|
|
|
|
let token = &matcher[i];
|
|
|
|
let suffix = &matcher[i+1..];
|
|
|
|
|
|
|
|
let build_suffix_first = || {
|
|
|
|
let mut s = first_sets.first(suffix);
|
|
|
|
if s.maybe_empty { s.add_all(follow); }
|
|
|
|
return s;
|
|
|
|
};
|
|
|
|
|
|
|
|
// (we build `suffix_first` on demand below; you can tell
|
|
|
|
// which cases are supposed to fall through by looking for the
|
|
|
|
// initialization of this variable.)
|
|
|
|
let suffix_first;
|
|
|
|
|
|
|
|
// First, update `last` so that it corresponds to the set
|
|
|
|
// of NT tokens that might end the sequence `... token`.
|
|
|
|
match *token {
|
|
|
|
TokenTree::Token(sp, ref tok) => {
|
|
|
|
let can_be_followed_by_any;
|
|
|
|
if let Err(bad_frag) = has_legal_fragment_specifier(tok) {
|
2016-05-31 16:51:52 +02:00
|
|
|
cx.struct_span_err(sp, &format!("invalid fragment specifier `{}`", bad_frag))
|
|
|
|
.help("valid fragment specifiers are `ident`, `block`, \
|
|
|
|
`stmt`, `expr`, `pat`, `ty`, `path`, `meta`, `tt` \
|
|
|
|
and `item`")
|
|
|
|
.emit();
|
2015-11-12 20:55:28 +01:00
|
|
|
// (This eliminates false positives and duplicates
|
|
|
|
// from error messages.)
|
|
|
|
can_be_followed_by_any = true;
|
|
|
|
} else {
|
|
|
|
can_be_followed_by_any = token_can_be_followed_by_any(tok);
|
|
|
|
}
|
|
|
|
|
|
|
|
if can_be_followed_by_any {
|
|
|
|
// don't need to track tokens that work with any,
|
|
|
|
last.replace_with_irrelevant();
|
|
|
|
// ... and don't need to check tokens that can be
|
|
|
|
// followed by anything against SUFFIX.
|
|
|
|
continue 'each_token;
|
|
|
|
} else {
|
|
|
|
last.replace_with((sp, tok.clone()));
|
|
|
|
suffix_first = build_suffix_first();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
TokenTree::Delimited(_, ref d) => {
|
|
|
|
let my_suffix = TokenSet::singleton((d.close_span, Token::CloseDelim(d.delim)));
|
2016-05-31 16:51:52 +02:00
|
|
|
check_matcher_core(cx, first_sets, &d.tts, &my_suffix);
|
2015-11-12 20:55:28 +01:00
|
|
|
// don't track non NT tokens
|
|
|
|
last.replace_with_irrelevant();
|
|
|
|
|
|
|
|
// also, we don't need to check delimited sequences
|
|
|
|
// against SUFFIX
|
|
|
|
continue 'each_token;
|
|
|
|
}
|
|
|
|
TokenTree::Sequence(sp, ref seq_rep) => {
|
|
|
|
suffix_first = build_suffix_first();
|
|
|
|
// The trick here: when we check the interior, we want
|
|
|
|
// to include the separator (if any) as a potential
|
|
|
|
// (but not guaranteed) element of FOLLOW. So in that
|
|
|
|
// case, we make a temp copy of suffix and stuff
|
|
|
|
// delimiter in there.
|
|
|
|
//
|
|
|
|
// FIXME: Should I first scan suffix_first to see if
|
|
|
|
// delimiter is already in it before I go through the
|
|
|
|
// work of cloning it? But then again, this way I may
|
|
|
|
// get a "tighter" span?
|
|
|
|
let mut new;
|
|
|
|
let my_suffix = if let Some(ref u) = seq_rep.separator {
|
|
|
|
new = suffix_first.clone();
|
|
|
|
new.add_one_maybe((sp, u.clone()));
|
|
|
|
&new
|
|
|
|
} else {
|
|
|
|
&suffix_first
|
|
|
|
};
|
|
|
|
|
|
|
|
// At this point, `suffix_first` is built, and
|
|
|
|
// `my_suffix` is some TokenSet that we can use
|
|
|
|
// for checking the interior of `seq_rep`.
|
2016-05-31 16:51:52 +02:00
|
|
|
let next = check_matcher_core(cx, first_sets, &seq_rep.tts, my_suffix);
|
2015-11-12 20:55:28 +01:00
|
|
|
if next.maybe_empty {
|
|
|
|
last.add_all(&next);
|
|
|
|
} else {
|
|
|
|
last = next;
|
|
|
|
}
|
|
|
|
|
|
|
|
// the recursive call to check_matcher_core already ran the 'each_last
|
|
|
|
// check below, so we can just keep going forward here.
|
|
|
|
continue 'each_token;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// (`suffix_first` guaranteed initialized once reaching here.)
|
|
|
|
|
|
|
|
// Now `last` holds the complete set of NT tokens that could
|
|
|
|
// end the sequence before SUFFIX. Check that every one works with `suffix`.
|
|
|
|
'each_last: for &(_sp, ref t) in &last.tokens {
|
2016-04-16 04:12:02 +03:00
|
|
|
if let MatchNt(ref name, ref frag_spec) = *t {
|
2015-11-12 20:55:28 +01:00
|
|
|
for &(sp, ref next_token) in &suffix_first.tokens {
|
|
|
|
match is_in_follow(cx, next_token, &frag_spec.name.as_str()) {
|
2016-05-18 15:08:19 +02:00
|
|
|
Err((msg, help)) => {
|
2016-05-31 16:51:52 +02:00
|
|
|
cx.struct_span_err(sp, &msg).help(help).emit();
|
2015-11-12 20:55:28 +01:00
|
|
|
// don't bother reporting every source of
|
|
|
|
// conflict for a particular element of `last`.
|
|
|
|
continue 'each_last;
|
|
|
|
}
|
|
|
|
Ok(true) => {}
|
|
|
|
Ok(false) => {
|
|
|
|
let may_be = if last.tokens.len() == 1 &&
|
|
|
|
suffix_first.tokens.len() == 1
|
|
|
|
{
|
|
|
|
"is"
|
|
|
|
} else {
|
|
|
|
"may be"
|
|
|
|
};
|
|
|
|
|
2016-05-31 16:51:52 +02:00
|
|
|
cx.span_err(
|
|
|
|
sp,
|
2015-11-12 20:55:28 +01:00
|
|
|
&format!("`${name}:{frag}` {may_be} followed by `{next}`, which \
|
|
|
|
is not allowed for `{frag}` fragments",
|
|
|
|
name=name,
|
|
|
|
frag=frag_spec,
|
|
|
|
next=token_to_string(next_token),
|
2016-05-31 16:51:52 +02:00
|
|
|
may_be=may_be)
|
2016-05-18 15:08:19 +02:00
|
|
|
);
|
2015-11-12 20:55:28 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
last
|
|
|
|
}
|
|
|
|
|
|
|
|
fn token_can_be_followed_by_any(tok: &Token) -> bool {
|
2016-04-16 04:12:02 +03:00
|
|
|
if let &MatchNt(_, ref frag_spec) = tok {
|
2015-11-12 20:55:28 +01:00
|
|
|
frag_can_be_followed_by_any(&frag_spec.name.as_str())
|
|
|
|
} else {
|
|
|
|
// (Non NT's can always be followed by anthing in matchers.)
|
|
|
|
true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// True if a fragment of type `frag` can be followed by any sort of
|
|
|
|
/// token. We use this (among other things) as a useful approximation
|
|
|
|
/// for when `frag` can be followed by a repetition like `$(...)*` or
|
|
|
|
/// `$(...)+`. In general, these can be a bit tricky to reason about,
|
|
|
|
/// so we adopt a conservative position that says that any fragment
|
|
|
|
/// specifier which consumes at most one token tree can be followed by
|
|
|
|
/// a fragment specifier (indeed, these fragments can be followed by
|
|
|
|
/// ANYTHING without fear of future compatibility hazards).
|
|
|
|
fn frag_can_be_followed_by_any(frag: &str) -> bool {
|
|
|
|
match frag {
|
2016-05-31 16:51:52 +02:00
|
|
|
"item" | // always terminated by `}` or `;`
|
2015-05-15 13:20:26 -04:00
|
|
|
"block" | // exactly one token tree
|
|
|
|
"ident" | // exactly one token tree
|
2016-05-31 16:51:52 +02:00
|
|
|
"meta" | // exactly one token tree
|
|
|
|
"tt" => // exactly one token tree
|
2015-05-15 13:20:26 -04:00
|
|
|
true,
|
|
|
|
|
|
|
|
_ =>
|
|
|
|
false,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// True if `frag` can legally be followed by the token `tok`. For
|
2015-11-12 20:55:28 +01:00
|
|
|
/// fragments that can consume an unbounded number of tokens, `tok`
|
2015-05-15 13:20:26 -04:00
|
|
|
/// must be within a well-defined follow set. This is intended to
|
|
|
|
/// guarantee future compatibility: for example, without this rule, if
|
|
|
|
/// we expanded `expr` to include a new binary operator, we might
|
|
|
|
/// break macros that were relying on that binary operator as a
|
|
|
|
/// separator.
|
2016-01-27 13:26:47 -05:00
|
|
|
// when changing this do not forget to update doc/book/macros.md!
|
2016-05-18 15:08:19 +02:00
|
|
|
fn is_in_follow(_: &ExtCtxt, tok: &Token, frag: &str) -> Result<bool, (String, &'static str)> {
|
2015-01-02 16:41:24 -05:00
|
|
|
if let &CloseDelim(_) = tok {
|
2015-05-15 13:20:26 -04:00
|
|
|
// closing a token tree can never be matched by any fragment;
|
|
|
|
// iow, we always require that `(` and `)` match, etc.
|
2015-01-19 22:43:15 +01:00
|
|
|
Ok(true)
|
|
|
|
} else {
|
|
|
|
match frag {
|
|
|
|
"item" => {
|
|
|
|
// since items *must* be followed by either a `;` or a `}`, we can
|
|
|
|
// accept anything after them
|
|
|
|
Ok(true)
|
|
|
|
},
|
|
|
|
"block" => {
|
2015-10-07 23:11:25 +01:00
|
|
|
// anything can follow block, the braces provide an easy boundary to
|
2015-01-19 22:43:15 +01:00
|
|
|
// maintain
|
|
|
|
Ok(true)
|
|
|
|
},
|
|
|
|
"stmt" | "expr" => {
|
|
|
|
match *tok {
|
|
|
|
FatArrow | Comma | Semi => Ok(true),
|
|
|
|
_ => Ok(false)
|
|
|
|
}
|
|
|
|
},
|
|
|
|
"pat" => {
|
|
|
|
match *tok {
|
2015-11-18 00:17:48 +01:00
|
|
|
FatArrow | Comma | Eq | BinOp(token::Or) => Ok(true),
|
2016-04-16 04:12:02 +03:00
|
|
|
Ident(i) if (i.name.as_str() == "if" ||
|
|
|
|
i.name.as_str() == "in") => Ok(true),
|
2015-01-19 22:43:15 +01:00
|
|
|
_ => Ok(false)
|
|
|
|
}
|
|
|
|
},
|
|
|
|
"path" | "ty" => {
|
|
|
|
match *tok {
|
2016-01-23 13:38:18 -05:00
|
|
|
OpenDelim(token::DelimToken::Brace) | OpenDelim(token::DelimToken::Bracket) |
|
2015-11-18 00:17:48 +01:00
|
|
|
Comma | FatArrow | Colon | Eq | Gt | Semi | BinOp(token::Or) => Ok(true),
|
2016-04-16 04:12:02 +03:00
|
|
|
MatchNt(_, ref frag) if frag.name.as_str() == "block" => Ok(true),
|
|
|
|
Ident(i) if i.name.as_str() == "as" || i.name.as_str() == "where" => Ok(true),
|
2015-01-19 22:43:15 +01:00
|
|
|
_ => Ok(false)
|
|
|
|
}
|
|
|
|
},
|
|
|
|
"ident" => {
|
|
|
|
// being a single token, idents are harmless
|
|
|
|
Ok(true)
|
|
|
|
},
|
|
|
|
"meta" | "tt" => {
|
|
|
|
// being either a single token or a delimited sequence, tt is
|
|
|
|
// harmless
|
|
|
|
Ok(true)
|
|
|
|
},
|
2016-05-18 15:08:19 +02:00
|
|
|
_ => Err((format!("invalid fragment specifier `{}`", frag),
|
|
|
|
"valid fragment specifiers are `ident`, `block`, \
|
|
|
|
`stmt`, `expr`, `pat`, `ty`, `path`, `meta`, `tt` \
|
|
|
|
and `item`"))
|
2015-01-19 22:43:15 +01:00
|
|
|
}
|
2015-01-02 16:41:24 -05:00
|
|
|
}
|
|
|
|
}
|
2015-11-12 20:55:28 +01:00
|
|
|
|
|
|
|
fn has_legal_fragment_specifier(tok: &Token) -> Result<(), String> {
|
|
|
|
debug!("has_legal_fragment_specifier({:?})", tok);
|
2016-04-16 04:12:02 +03:00
|
|
|
if let &MatchNt(_, ref frag_spec) = tok {
|
2015-11-12 20:55:28 +01:00
|
|
|
let s = &frag_spec.name.as_str();
|
|
|
|
if !is_legal_fragment_specifier(s) {
|
|
|
|
return Err(s.to_string());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn is_legal_fragment_specifier(frag: &str) -> bool {
|
|
|
|
match frag {
|
|
|
|
"item" | "block" | "stmt" | "expr" | "pat" |
|
|
|
|
"path" | "ty" | "ident" | "meta" | "tt" => true,
|
|
|
|
_ => false,
|
|
|
|
}
|
|
|
|
}
|