2015-01-02 16:41:24 -05:00
|
|
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
|
2012-12-03 16:48:01 -08:00
|
|
|
// file at the top-level directory of this distribution and at
|
|
|
|
// http://rust-lang.org/COPYRIGHT.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
|
|
// option. This file may not be copied, modified, or distributed
|
|
|
|
// except according to those terms.
|
|
|
|
|
2015-11-06 14:52:02 +01:00
|
|
|
use ast::{self, TokenTree};
|
2014-10-06 23:00:56 +01:00
|
|
|
use codemap::{Span, DUMMY_SP};
|
2015-11-14 19:50:46 +09:00
|
|
|
use ext::base::{DummyResult, ExtCtxt, MacResult, SyntaxExtension};
|
2014-07-10 12:09:56 -07:00
|
|
|
use ext::base::{NormalTT, TTMacroExpander};
|
2014-01-09 15:05:33 +02:00
|
|
|
use ext::tt::macro_parser::{Success, Error, Failure};
|
2015-11-14 19:11:40 +09:00
|
|
|
use ext::tt::macro_parser::{MatchedSeq, MatchedNonterminal};
|
2015-08-07 16:36:56 +02:00
|
|
|
use ext::tt::macro_parser::parse;
|
2015-02-24 19:56:01 +01:00
|
|
|
use parse::lexer::new_tt_reader;
|
2012-11-18 15:55:03 -08:00
|
|
|
use parse::parser::Parser;
|
2015-01-18 17:18:29 +01:00
|
|
|
use parse::token::{self, special_idents, gensym_ident, NtTT, Token};
|
2015-01-02 16:41:24 -05:00
|
|
|
use parse::token::Token::*;
|
2012-12-23 17:41:37 -05:00
|
|
|
use print;
|
2014-09-13 19:06:01 +03:00
|
|
|
use ptr::P;
|
|
|
|
|
2013-11-24 23:08:53 -08:00
|
|
|
use util::small_vector::SmallVector;
|
2012-12-23 17:41:37 -05:00
|
|
|
|
2014-02-28 12:54:01 -08:00
|
|
|
use std::cell::RefCell;
|
2015-11-12 20:55:28 +01:00
|
|
|
use std::collections::{HashMap};
|
|
|
|
use std::collections::hash_map::{Entry};
|
2014-03-27 16:40:35 +02:00
|
|
|
use std::rc::Rc;
|
2014-02-28 12:54:01 -08:00
|
|
|
|
2014-03-09 16:54:34 +02:00
|
|
|
struct ParserAnyMacro<'a> {
|
|
|
|
parser: RefCell<Parser<'a>>,
|
2015-04-04 13:13:57 -07:00
|
|
|
|
|
|
|
/// Span of the expansion site of the macro this parser is for
|
|
|
|
site_span: Span,
|
|
|
|
/// The ident of the macro we're parsing
|
|
|
|
macro_ident: ast::Ident
|
2013-08-30 14:40:05 -07:00
|
|
|
}
|
|
|
|
|
2014-03-09 16:54:34 +02:00
|
|
|
impl<'a> ParserAnyMacro<'a> {
|
2013-10-02 14:43:15 +10:00
|
|
|
/// Make sure we don't have any tokens left to parse, so we don't
|
|
|
|
/// silently drop anything. `allow_semi` is so that "optional"
|
2014-07-10 17:46:09 -07:00
|
|
|
/// semicolons at the end of normal expressions aren't complained
|
2015-01-02 14:44:21 -08:00
|
|
|
/// about e.g. the semicolon in `macro_rules! kapow { () => {
|
|
|
|
/// panic!(); } }` doesn't get picked up by .parse_expr(), but it's
|
2013-10-02 14:43:15 +10:00
|
|
|
/// allowed to be there.
|
2015-11-23 21:06:51 +01:00
|
|
|
fn ensure_complete_parse(&self, allow_semi: bool, context: &str) {
|
2013-12-30 14:04:00 -08:00
|
|
|
let mut parser = self.parser.borrow_mut();
|
2014-10-27 19:22:52 +11:00
|
|
|
if allow_semi && parser.token == token::Semi {
|
2015-12-31 12:11:53 +13:00
|
|
|
parser.bump();
|
2013-10-02 14:43:15 +10:00
|
|
|
}
|
2014-10-27 19:22:52 +11:00
|
|
|
if parser.token != token::Eof {
|
2014-06-21 03:39:03 -07:00
|
|
|
let token_str = parser.this_token_to_string();
|
2013-12-30 14:04:00 -08:00
|
|
|
let msg = format!("macro expansion ignores token `{}` and any \
|
|
|
|
following",
|
|
|
|
token_str);
|
2014-03-20 15:05:37 -07:00
|
|
|
let span = parser.span;
|
2015-12-21 10:00:43 +13:00
|
|
|
let mut err = parser.diagnostic().struct_span_err(span, &msg[..]);
|
2015-04-04 13:13:57 -07:00
|
|
|
let msg = format!("caused by the macro expansion here; the usage \
|
2015-11-24 16:34:48 +01:00
|
|
|
of `{}!` is likely invalid in {} context",
|
2015-11-23 21:06:51 +01:00
|
|
|
self.macro_ident, context);
|
2015-12-21 10:00:43 +13:00
|
|
|
err.span_note(self.site_span, &msg[..])
|
|
|
|
.emit();
|
2013-10-02 14:43:15 +10:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-15 22:00:14 +10:00
|
|
|
impl<'a> MacResult for ParserAnyMacro<'a> {
|
2014-09-13 19:06:01 +03:00
|
|
|
fn make_expr(self: Box<ParserAnyMacro<'a>>) -> Option<P<ast::Expr>> {
|
2015-11-10 16:08:26 -08:00
|
|
|
let ret = panictry!(self.parser.borrow_mut().parse_expr());
|
2015-11-23 21:06:51 +01:00
|
|
|
self.ensure_complete_parse(true, "expression");
|
2014-04-15 22:00:14 +10:00
|
|
|
Some(ret)
|
2013-08-30 14:40:05 -07:00
|
|
|
}
|
2014-09-13 19:06:01 +03:00
|
|
|
fn make_pat(self: Box<ParserAnyMacro<'a>>) -> Option<P<ast::Pat>> {
|
2015-11-10 16:08:26 -08:00
|
|
|
let ret = panictry!(self.parser.borrow_mut().parse_pat());
|
2015-11-23 21:06:51 +01:00
|
|
|
self.ensure_complete_parse(false, "pattern");
|
2014-05-19 13:32:51 -07:00
|
|
|
Some(ret)
|
|
|
|
}
|
2014-09-13 19:06:01 +03:00
|
|
|
fn make_items(self: Box<ParserAnyMacro<'a>>) -> Option<SmallVector<P<ast::Item>>> {
|
2013-11-24 23:08:53 -08:00
|
|
|
let mut ret = SmallVector::zero();
|
2015-11-10 16:08:26 -08:00
|
|
|
while let Some(item) = panictry!(self.parser.borrow_mut().parse_item()) {
|
2015-03-13 11:34:51 +02:00
|
|
|
ret.push(item);
|
2013-11-24 23:08:53 -08:00
|
|
|
}
|
2015-11-23 21:06:51 +01:00
|
|
|
self.ensure_complete_parse(false, "item");
|
2014-04-15 22:00:14 +10:00
|
|
|
Some(ret)
|
2013-08-30 14:40:05 -07:00
|
|
|
}
|
2014-07-10 17:46:09 -07:00
|
|
|
|
2015-03-11 23:38:58 +02:00
|
|
|
fn make_impl_items(self: Box<ParserAnyMacro<'a>>)
|
|
|
|
-> Option<SmallVector<P<ast::ImplItem>>> {
|
2014-07-10 17:46:09 -07:00
|
|
|
let mut ret = SmallVector::zero();
|
|
|
|
loop {
|
|
|
|
let mut parser = self.parser.borrow_mut();
|
|
|
|
match parser.token {
|
2014-10-27 19:22:52 +11:00
|
|
|
token::Eof => break,
|
2015-03-28 21:58:51 +00:00
|
|
|
_ => ret.push(panictry!(parser.parse_impl_item()))
|
2014-07-10 17:46:09 -07:00
|
|
|
}
|
|
|
|
}
|
2015-11-23 21:06:51 +01:00
|
|
|
self.ensure_complete_parse(false, "item");
|
2014-07-10 17:46:09 -07:00
|
|
|
Some(ret)
|
|
|
|
}
|
|
|
|
|
2015-04-07 08:21:18 -05:00
|
|
|
fn make_stmts(self: Box<ParserAnyMacro<'a>>)
|
|
|
|
-> Option<SmallVector<P<ast::Stmt>>> {
|
|
|
|
let mut ret = SmallVector::zero();
|
|
|
|
loop {
|
|
|
|
let mut parser = self.parser.borrow_mut();
|
|
|
|
match parser.token {
|
|
|
|
token::Eof => break,
|
2015-11-10 16:08:26 -08:00
|
|
|
_ => match parser.parse_stmt() {
|
2015-04-07 08:21:18 -05:00
|
|
|
Ok(maybe_stmt) => match maybe_stmt {
|
|
|
|
Some(stmt) => ret.push(stmt),
|
|
|
|
None => (),
|
|
|
|
},
|
2015-12-21 10:00:43 +13:00
|
|
|
Err(mut e) => {
|
|
|
|
e.emit();
|
|
|
|
break;
|
|
|
|
}
|
2015-04-07 08:21:18 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-11-23 21:06:51 +01:00
|
|
|
self.ensure_complete_parse(false, "statement");
|
2015-04-07 08:21:18 -05:00
|
|
|
Some(ret)
|
2013-08-30 14:40:05 -07:00
|
|
|
}
|
2015-07-25 21:54:19 -07:00
|
|
|
|
|
|
|
fn make_ty(self: Box<ParserAnyMacro<'a>>) -> Option<P<ast::Ty>> {
|
2015-11-10 16:08:26 -08:00
|
|
|
let ret = panictry!(self.parser.borrow_mut().parse_ty());
|
2015-11-23 21:06:51 +01:00
|
|
|
self.ensure_complete_parse(false, "type");
|
2015-07-25 21:54:19 -07:00
|
|
|
Some(ret)
|
|
|
|
}
|
2013-08-30 14:40:05 -07:00
|
|
|
}
|
|
|
|
|
2014-01-25 13:34:26 -08:00
|
|
|
struct MacroRulesMacroExpander {
|
2015-01-02 16:41:24 -05:00
|
|
|
name: ast::Ident,
|
|
|
|
imported_from: Option<ast::Ident>,
|
2015-11-14 19:11:40 +09:00
|
|
|
lhses: Vec<TokenTree>,
|
|
|
|
rhses: Vec<TokenTree>,
|
2015-11-14 19:50:46 +09:00
|
|
|
valid: bool,
|
2013-08-30 14:40:05 -07:00
|
|
|
}
|
|
|
|
|
2014-07-10 12:09:56 -07:00
|
|
|
impl TTMacroExpander for MacroRulesMacroExpander {
|
2014-08-27 21:46:52 -04:00
|
|
|
fn expand<'cx>(&self,
|
|
|
|
cx: &'cx mut ExtCtxt,
|
|
|
|
sp: Span,
|
2015-11-14 19:11:40 +09:00
|
|
|
arg: &[TokenTree])
|
2014-08-27 21:46:52 -04:00
|
|
|
-> Box<MacResult+'cx> {
|
2015-11-14 19:50:46 +09:00
|
|
|
if !self.valid {
|
|
|
|
return DummyResult::any(sp);
|
|
|
|
}
|
2014-02-28 12:54:01 -08:00
|
|
|
generic_extension(cx,
|
|
|
|
sp,
|
|
|
|
self.name,
|
2014-09-15 18:27:28 -07:00
|
|
|
self.imported_from,
|
2014-02-28 12:54:01 -08:00
|
|
|
arg,
|
2015-02-18 15:58:07 -08:00
|
|
|
&self.lhses,
|
|
|
|
&self.rhses)
|
2013-08-30 14:40:05 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-09 13:12:30 -07:00
|
|
|
/// Given `lhses` and `rhses`, this is the new macro we create
|
2014-08-27 21:46:52 -04:00
|
|
|
fn generic_extension<'cx>(cx: &'cx ExtCtxt,
|
|
|
|
sp: Span,
|
2015-01-02 16:41:24 -05:00
|
|
|
name: ast::Ident,
|
|
|
|
imported_from: Option<ast::Ident>,
|
2015-11-14 19:11:40 +09:00
|
|
|
arg: &[TokenTree],
|
|
|
|
lhses: &[TokenTree],
|
|
|
|
rhses: &[TokenTree])
|
2014-08-27 21:46:52 -04:00
|
|
|
-> Box<MacResult+'cx> {
|
2013-08-30 14:40:05 -07:00
|
|
|
if cx.trace_macros() {
|
2014-10-22 16:37:20 +11:00
|
|
|
println!("{}! {{ {} }}",
|
2015-07-28 18:07:20 +02:00
|
|
|
name,
|
2014-10-22 16:37:20 +11:00
|
|
|
print::pprust::tts_to_string(arg));
|
2013-08-30 14:40:05 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Which arm's failure should we report? (the one furthest along)
|
2014-01-01 15:53:22 +09:00
|
|
|
let mut best_fail_spot = DUMMY_SP;
|
2014-05-25 03:17:19 -07:00
|
|
|
let mut best_fail_msg = "internal error: ran no matchers".to_string();
|
2013-08-30 14:40:05 -07:00
|
|
|
|
|
|
|
for (i, lhs) in lhses.iter().enumerate() { // try each arm's matchers
|
2015-11-14 19:18:32 +09:00
|
|
|
let lhs_tt = match *lhs {
|
|
|
|
TokenTree::Delimited(_, ref delim) => &delim.tts[..],
|
2016-01-04 01:11:54 +01:00
|
|
|
_ => cx.span_fatal(sp, "malformed macro lhs")
|
2015-11-14 19:18:32 +09:00
|
|
|
};
|
2015-02-24 19:56:01 +01:00
|
|
|
|
2015-11-14 19:18:32 +09:00
|
|
|
match TokenTree::parse(cx, lhs_tt, arg) {
|
|
|
|
Success(named_matches) => {
|
2015-11-14 19:11:40 +09:00
|
|
|
let rhs = match rhses[i] {
|
2015-11-14 19:18:32 +09:00
|
|
|
// ignore delimiters
|
|
|
|
TokenTree::Delimited(_, ref delimed) => delimed.tts.clone(),
|
2016-01-04 01:11:54 +01:00
|
|
|
_ => cx.span_fatal(sp, "malformed macro rhs"),
|
2013-08-30 14:40:05 -07:00
|
|
|
};
|
|
|
|
// rhs has holes ( `$id` and `$(...)` that need filled)
|
2014-03-16 20:56:24 +02:00
|
|
|
let trncbr = new_tt_reader(&cx.parse_sess().span_diagnostic,
|
|
|
|
Some(named_matches),
|
2014-09-15 18:27:28 -07:00
|
|
|
imported_from,
|
2013-08-30 14:40:05 -07:00
|
|
|
rhs);
|
2015-02-15 09:52:21 +01:00
|
|
|
let mut p = Parser::new(cx.parse_sess(), cx.cfg(), Box::new(trncbr));
|
2015-12-31 12:11:53 +13:00
|
|
|
p.check_unknown_macro_variable();
|
2013-08-30 14:40:05 -07:00
|
|
|
// Let the context choose how to interpret the result.
|
|
|
|
// Weird, but useful for X-macros.
|
2015-04-15 20:56:16 -07:00
|
|
|
return Box::new(ParserAnyMacro {
|
2013-12-30 14:04:00 -08:00
|
|
|
parser: RefCell::new(p),
|
2015-04-04 13:13:57 -07:00
|
|
|
|
|
|
|
// Pass along the original expansion site and the name of the macro
|
|
|
|
// so we can print a useful error message if the parse of the expanded
|
|
|
|
// macro leaves unparsed tokens.
|
|
|
|
site_span: sp,
|
|
|
|
macro_ident: name
|
2015-04-15 20:56:16 -07:00
|
|
|
})
|
2015-11-14 19:18:32 +09:00
|
|
|
}
|
|
|
|
Failure(sp, ref msg) => if sp.lo >= best_fail_spot.lo {
|
2013-08-30 14:40:05 -07:00
|
|
|
best_fail_spot = sp;
|
|
|
|
best_fail_msg = (*msg).clone();
|
2015-11-14 19:18:32 +09:00
|
|
|
},
|
|
|
|
Error(err_sp, ref msg) => {
|
2015-11-24 07:23:53 +05:30
|
|
|
cx.span_fatal(err_sp.substitute_dummy(sp), &msg[..])
|
2013-08-30 14:40:05 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-08-07 16:36:56 +02:00
|
|
|
|
2015-11-24 07:23:53 +05:30
|
|
|
cx.span_fatal(best_fail_spot.substitute_dummy(sp), &best_fail_msg[..]);
|
2013-08-30 14:40:05 -07:00
|
|
|
}
|
|
|
|
|
2014-10-07 00:18:24 +01:00
|
|
|
// Note that macro-by-example's input is also matched against a token tree:
|
|
|
|
// $( $lhs:tt => $rhs:tt );+
|
|
|
|
//
|
|
|
|
// Holy self-referential!
|
|
|
|
|
2014-12-30 19:10:46 -08:00
|
|
|
/// Converts a `macro_rules!` invocation into a syntax extension.
|
|
|
|
pub fn compile<'cx>(cx: &'cx mut ExtCtxt,
|
|
|
|
def: &ast::MacroDef) -> SyntaxExtension {
|
2012-07-06 18:04:28 -07:00
|
|
|
|
2013-06-04 12:34:25 -07:00
|
|
|
let lhs_nm = gensym_ident("lhs");
|
|
|
|
let rhs_nm = gensym_ident("rhs");
|
2012-07-18 16:18:02 -07:00
|
|
|
|
2013-05-29 16:21:04 -07:00
|
|
|
// The pattern that macro_rules matches.
|
2012-07-27 17:32:15 -07:00
|
|
|
// The grammar for macro_rules! is:
|
2014-10-06 23:00:56 +01:00
|
|
|
// $( $lhs:tt => $rhs:tt );+
|
2012-07-27 17:32:15 -07:00
|
|
|
// ...quasiquoting this would be nice.
|
2014-10-06 23:00:56 +01:00
|
|
|
// These spans won't matter, anyways
|
|
|
|
let match_lhs_tok = MatchNt(lhs_nm, special_idents::tt, token::Plain, token::Plain);
|
|
|
|
let match_rhs_tok = MatchNt(rhs_nm, special_idents::tt, token::Plain, token::Plain);
|
2014-02-28 13:09:09 -08:00
|
|
|
let argument_gram = vec!(
|
2015-11-06 14:52:02 +01:00
|
|
|
TokenTree::Sequence(DUMMY_SP,
|
2014-11-02 12:21:16 +01:00
|
|
|
Rc::new(ast::SequenceRepetition {
|
|
|
|
tts: vec![
|
2015-11-06 14:52:02 +01:00
|
|
|
TokenTree::Token(DUMMY_SP, match_lhs_tok),
|
|
|
|
TokenTree::Token(DUMMY_SP, token::FatArrow),
|
|
|
|
TokenTree::Token(DUMMY_SP, match_rhs_tok)],
|
2014-11-02 12:21:16 +01:00
|
|
|
separator: Some(token::Semi),
|
|
|
|
op: ast::OneOrMore,
|
|
|
|
num_captures: 2
|
|
|
|
})),
|
2012-08-01 14:34:35 -07:00
|
|
|
//to phase into semicolon-termination instead of
|
|
|
|
//semicolon-separation
|
2015-11-06 14:52:02 +01:00
|
|
|
TokenTree::Sequence(DUMMY_SP,
|
2014-11-02 12:21:16 +01:00
|
|
|
Rc::new(ast::SequenceRepetition {
|
2015-11-06 14:52:02 +01:00
|
|
|
tts: vec![TokenTree::Token(DUMMY_SP, token::Semi)],
|
2014-11-02 12:21:16 +01:00
|
|
|
separator: None,
|
|
|
|
op: ast::ZeroOrMore,
|
|
|
|
num_captures: 0
|
|
|
|
})));
|
2012-07-06 18:04:28 -07:00
|
|
|
|
2012-07-27 17:32:15 -07:00
|
|
|
|
|
|
|
// Parse the macro_rules! invocation (`none` is for no interpolations):
|
2014-03-16 20:56:24 +02:00
|
|
|
let arg_reader = new_tt_reader(&cx.parse_sess().span_diagnostic,
|
2014-09-15 18:27:28 -07:00
|
|
|
None,
|
2013-06-27 17:41:35 -07:00
|
|
|
None,
|
2014-12-30 19:10:46 -08:00
|
|
|
def.body.clone());
|
2015-08-07 16:36:56 +02:00
|
|
|
|
|
|
|
let argument_map = match parse(cx.parse_sess(),
|
|
|
|
cx.cfg(),
|
|
|
|
arg_reader,
|
|
|
|
&argument_gram) {
|
|
|
|
Success(m) => m,
|
2015-08-10 20:40:46 +02:00
|
|
|
Failure(sp, str) | Error(sp, str) => {
|
|
|
|
panic!(cx.parse_sess().span_diagnostic
|
|
|
|
.span_fatal(sp.substitute_dummy(def.span), &str[..]));
|
2015-08-07 16:36:56 +02:00
|
|
|
}
|
|
|
|
};
|
2012-07-06 18:04:28 -07:00
|
|
|
|
2015-11-14 19:50:46 +09:00
|
|
|
let mut valid = true;
|
|
|
|
|
2012-07-27 17:32:15 -07:00
|
|
|
// Extract the arguments:
|
2015-09-24 23:05:02 +03:00
|
|
|
let lhses = match **argument_map.get(&lhs_nm.name).unwrap() {
|
2015-11-14 19:11:40 +09:00
|
|
|
MatchedSeq(ref s, _) => {
|
|
|
|
s.iter().map(|m| match **m {
|
|
|
|
MatchedNonterminal(NtTT(ref tt)) => (**tt).clone(),
|
|
|
|
_ => cx.span_bug(def.span, "wrong-structured lhs")
|
|
|
|
}).collect()
|
|
|
|
}
|
2014-12-30 19:10:46 -08:00
|
|
|
_ => cx.span_bug(def.span, "wrong-structured lhs")
|
2012-07-06 18:04:28 -07:00
|
|
|
};
|
2013-02-17 10:59:09 -08:00
|
|
|
|
2015-01-31 12:20:46 -05:00
|
|
|
for lhs in &lhses {
|
2015-11-10 20:16:28 +09:00
|
|
|
check_lhs_nt_follows(cx, lhs, def.span);
|
2015-01-02 16:41:24 -05:00
|
|
|
}
|
|
|
|
|
2015-09-24 23:05:02 +03:00
|
|
|
let rhses = match **argument_map.get(&rhs_nm.name).unwrap() {
|
2015-11-14 19:11:40 +09:00
|
|
|
MatchedSeq(ref s, _) => {
|
|
|
|
s.iter().map(|m| match **m {
|
|
|
|
MatchedNonterminal(NtTT(ref tt)) => (**tt).clone(),
|
|
|
|
_ => cx.span_bug(def.span, "wrong-structured rhs")
|
|
|
|
}).collect()
|
|
|
|
}
|
2014-12-30 19:10:46 -08:00
|
|
|
_ => cx.span_bug(def.span, "wrong-structured rhs")
|
2012-07-06 18:04:28 -07:00
|
|
|
};
|
|
|
|
|
2015-11-14 19:50:46 +09:00
|
|
|
for rhs in &rhses {
|
|
|
|
valid &= check_rhs(cx, rhs);
|
|
|
|
}
|
|
|
|
|
2015-04-15 20:56:16 -07:00
|
|
|
let exp: Box<_> = Box::new(MacroRulesMacroExpander {
|
2014-12-30 19:10:46 -08:00
|
|
|
name: def.ident,
|
|
|
|
imported_from: def.imported_from,
|
2013-08-30 14:40:05 -07:00
|
|
|
lhses: lhses,
|
|
|
|
rhses: rhses,
|
2015-11-14 19:50:46 +09:00
|
|
|
valid: valid,
|
2015-04-15 20:56:16 -07:00
|
|
|
});
|
2012-07-06 18:04:28 -07:00
|
|
|
|
Add #[allow_internal_unstable] to track stability for macros better.
Unstable items used in a macro expansion will now always trigger
stability warnings, *unless* the unstable items are directly inside a
macro marked with `#[allow_internal_unstable]`. IOW, the compiler warns
unless the span of the unstable item is a subspan of the definition of a
macro marked with that attribute.
E.g.
#[allow_internal_unstable]
macro_rules! foo {
($e: expr) => {{
$e;
unstable(); // no warning
only_called_by_foo!();
}}
}
macro_rules! only_called_by_foo {
() => { unstable() } // warning
}
foo!(unstable()) // warning
The unstable inside `foo` is fine, due to the attribute. But the
`unstable` inside `only_called_by_foo` is not, since that macro doesn't
have the attribute, and the `unstable` passed into `foo` is also not
fine since it isn't contained in the macro itself (that is, even though
it is only used directly in the macro).
In the process this makes the stability tracking much more precise,
e.g. previously `println!("{}", unstable())` got no warning, but now it
does. As such, this is a bug fix that may cause [breaking-change]s.
The attribute is definitely feature gated, since it explicitly allows
side-stepping the feature gating system.
2015-03-01 14:09:28 +11:00
|
|
|
NormalTT(exp, Some(def.span), def.allow_internal_unstable)
|
2012-10-15 14:56:42 -07:00
|
|
|
}
|
2015-01-02 16:41:24 -05:00
|
|
|
|
2015-11-12 20:55:28 +01:00
|
|
|
// why is this here? because of https://github.com/rust-lang/rust/issues/27774
|
|
|
|
fn ref_slice<A>(s: &A) -> &[A] { use std::slice::from_raw_parts; unsafe { from_raw_parts(s, 1) } }
|
|
|
|
|
2015-11-14 19:11:40 +09:00
|
|
|
fn check_lhs_nt_follows(cx: &mut ExtCtxt, lhs: &TokenTree, sp: Span) {
|
|
|
|
// lhs is going to be like TokenTree::Delimited(...), where the
|
2015-11-06 14:52:02 +01:00
|
|
|
// entire lhs is those tts. Or, it can be a "bare sequence", not wrapped in parens.
|
2015-01-18 10:19:47 -05:00
|
|
|
match lhs {
|
2015-11-14 19:18:32 +09:00
|
|
|
&TokenTree::Delimited(_, ref tts) => {
|
2015-11-12 20:55:28 +01:00
|
|
|
check_matcher(cx, &tts.tts);
|
2015-11-14 19:18:32 +09:00
|
|
|
},
|
|
|
|
tt @ &TokenTree::Sequence(..) => {
|
2015-11-12 20:55:28 +01:00
|
|
|
check_matcher(cx, ref_slice(tt));
|
2015-01-02 16:41:24 -05:00
|
|
|
},
|
2015-11-23 21:06:51 +01:00
|
|
|
_ => cx.span_err(sp, "invalid macro matcher; matchers must be contained \
|
2015-11-14 19:18:32 +09:00
|
|
|
in balanced delimiters or a repetition indicator")
|
2015-01-02 16:41:24 -05:00
|
|
|
};
|
|
|
|
// we don't abort on errors on rejection, the driver will do that for us
|
|
|
|
// after parsing/expansion. we can report every error in every macro this way.
|
|
|
|
}
|
|
|
|
|
2015-11-14 19:50:46 +09:00
|
|
|
fn check_rhs(cx: &mut ExtCtxt, rhs: &TokenTree) -> bool {
|
|
|
|
match *rhs {
|
|
|
|
TokenTree::Delimited(..) => return true,
|
|
|
|
_ => cx.span_err(rhs.get_span(), "macro rhs must be delimited")
|
|
|
|
}
|
|
|
|
false
|
|
|
|
}
|
|
|
|
|
2015-11-12 20:55:28 +01:00
|
|
|
// Issue 30450: when we are through a warning cycle, we can just error
|
|
|
|
// on all failure conditions and remove this struct and enum.
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
struct OnFail {
|
|
|
|
saw_failure: bool,
|
|
|
|
action: OnFailAction,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Copy, Clone, Debug)]
|
|
|
|
enum OnFailAction { Warn, Error, DoNothing }
|
|
|
|
|
|
|
|
impl OnFail {
|
|
|
|
fn warn() -> OnFail { OnFail { saw_failure: false, action: OnFailAction::Warn } }
|
|
|
|
fn error() -> OnFail { OnFail { saw_failure: false, action: OnFailAction::Error } }
|
|
|
|
fn do_nothing() -> OnFail { OnFail { saw_failure: false, action: OnFailAction::DoNothing } }
|
|
|
|
fn react(&mut self, cx: &mut ExtCtxt, sp: Span, msg: &str) {
|
|
|
|
match self.action {
|
|
|
|
OnFailAction::DoNothing => {}
|
|
|
|
OnFailAction::Error => cx.span_err(sp, msg),
|
|
|
|
OnFailAction::Warn => {
|
|
|
|
cx.struct_span_warn(sp, msg)
|
|
|
|
.span_note(sp, "The above warning will be a hard error in the next release.")
|
|
|
|
.emit();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
self.saw_failure = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn check_matcher(cx: &mut ExtCtxt, matcher: &[TokenTree]) {
|
|
|
|
// Issue 30450: when we are through a warning cycle, we can just
|
|
|
|
// error on all failure conditions (and remove check_matcher_old).
|
|
|
|
|
|
|
|
// First run the old-pass, but *only* to find out if it would have failed.
|
|
|
|
let mut on_fail = OnFail::do_nothing();
|
|
|
|
check_matcher_old(cx, matcher.iter(), &Eof, &mut on_fail);
|
|
|
|
// Then run the new pass, but merely warn if the old pass accepts and new pass rejects.
|
|
|
|
// (Note this silently accepts code if new pass accepts.)
|
|
|
|
let mut on_fail = if on_fail.saw_failure {
|
|
|
|
OnFail::error()
|
|
|
|
} else {
|
|
|
|
OnFail::warn()
|
|
|
|
};
|
|
|
|
check_matcher_new(cx, matcher, &mut on_fail);
|
|
|
|
}
|
|
|
|
|
|
|
|
// returns the last token that was checked, for TokenTree::Sequence.
|
|
|
|
// return value is used by recursive calls.
|
|
|
|
fn check_matcher_old<'a, I>(cx: &mut ExtCtxt, matcher: I, follow: &Token, on_fail: &mut OnFail)
|
2015-01-06 18:02:00 -05:00
|
|
|
-> Option<(Span, Token)> where I: Iterator<Item=&'a TokenTree> {
|
2015-01-02 16:41:24 -05:00
|
|
|
use print::pprust::token_to_string;
|
2015-11-12 20:55:28 +01:00
|
|
|
use std::iter::once;
|
2015-01-02 16:41:24 -05:00
|
|
|
|
2015-01-06 18:02:00 -05:00
|
|
|
let mut last = None;
|
2015-01-02 16:41:24 -05:00
|
|
|
|
|
|
|
// 2. For each token T in M:
|
2015-01-06 18:02:00 -05:00
|
|
|
let mut tokens = matcher.peekable();
|
2015-01-02 16:41:24 -05:00
|
|
|
while let Some(token) = tokens.next() {
|
2015-01-06 18:02:00 -05:00
|
|
|
last = match *token {
|
2015-11-06 14:52:02 +01:00
|
|
|
TokenTree::Token(sp, MatchNt(ref name, ref frag_spec, _, _)) => {
|
2015-01-02 16:41:24 -05:00
|
|
|
// ii. If T is a simple NT, look ahead to the next token T' in
|
2015-05-15 13:20:26 -04:00
|
|
|
// M. If T' is in the set FOLLOW(NT), continue. Else; reject.
|
2015-07-28 18:07:20 +02:00
|
|
|
if can_be_followed_by_any(&frag_spec.name.as_str()) {
|
2015-05-15 13:20:26 -04:00
|
|
|
continue
|
|
|
|
} else {
|
|
|
|
let next_token = match tokens.peek() {
|
|
|
|
// If T' closes a complex NT, replace T' with F
|
2015-11-06 14:52:02 +01:00
|
|
|
Some(&&TokenTree::Token(_, CloseDelim(_))) => follow.clone(),
|
|
|
|
Some(&&TokenTree::Token(_, ref tok)) => tok.clone(),
|
|
|
|
Some(&&TokenTree::Sequence(sp, _)) => {
|
2015-05-15 13:20:26 -04:00
|
|
|
// Be conservative around sequences: to be
|
|
|
|
// more specific, we would need to
|
|
|
|
// consider FIRST sets, but also the
|
|
|
|
// possibility that the sequence occurred
|
|
|
|
// zero times (in which case we need to
|
|
|
|
// look at the token that follows the
|
2015-07-28 18:07:20 +02:00
|
|
|
// sequence, which may itself be a sequence,
|
2015-05-15 13:20:26 -04:00
|
|
|
// and so on).
|
2015-11-12 20:55:28 +01:00
|
|
|
on_fail.react(cx, sp,
|
2015-05-15 13:20:26 -04:00
|
|
|
&format!("`${0}:{1}` is followed by a \
|
|
|
|
sequence repetition, which is not \
|
|
|
|
allowed for `{1}` fragments",
|
2015-07-28 18:07:20 +02:00
|
|
|
name, frag_spec)
|
2015-02-01 21:53:25 -05:00
|
|
|
);
|
2015-05-15 13:20:26 -04:00
|
|
|
Eof
|
|
|
|
},
|
|
|
|
// die next iteration
|
2015-11-06 14:52:02 +01:00
|
|
|
Some(&&TokenTree::Delimited(_, ref delim)) => delim.close_token(),
|
2015-05-15 13:20:26 -04:00
|
|
|
// else, we're at the end of the macro or sequence
|
|
|
|
None => follow.clone()
|
|
|
|
};
|
|
|
|
|
2015-11-06 14:52:02 +01:00
|
|
|
let tok = if let TokenTree::Token(_, ref tok) = *token {
|
|
|
|
tok
|
|
|
|
} else {
|
|
|
|
unreachable!()
|
|
|
|
};
|
2015-05-15 13:20:26 -04:00
|
|
|
|
|
|
|
// If T' is in the set FOLLOW(NT), continue. Else, reject.
|
2015-07-28 18:07:20 +02:00
|
|
|
match (&next_token, is_in_follow(cx, &next_token, &frag_spec.name.as_str())) {
|
2015-05-15 13:20:26 -04:00
|
|
|
(_, Err(msg)) => {
|
2015-11-12 20:55:28 +01:00
|
|
|
on_fail.react(cx, sp, &msg);
|
2015-05-15 13:20:26 -04:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
(&Eof, _) => return Some((sp, tok.clone())),
|
|
|
|
(_, Ok(true)) => continue,
|
|
|
|
(next, Ok(false)) => {
|
2015-11-12 20:55:28 +01:00
|
|
|
on_fail.react(cx, sp, &format!("`${0}:{1}` is followed by `{2}`, which \
|
2015-05-15 13:20:26 -04:00
|
|
|
is not allowed for `{1}` fragments",
|
2015-07-28 18:07:20 +02:00
|
|
|
name, frag_spec,
|
2015-05-15 13:20:26 -04:00
|
|
|
token_to_string(next)));
|
|
|
|
continue
|
|
|
|
},
|
2015-02-25 17:11:36 -08:00
|
|
|
}
|
2015-01-02 16:41:24 -05:00
|
|
|
}
|
|
|
|
},
|
2015-11-06 14:52:02 +01:00
|
|
|
TokenTree::Sequence(sp, ref seq) => {
|
2015-01-02 16:41:24 -05:00
|
|
|
// iii. Else, T is a complex NT.
|
|
|
|
match seq.separator {
|
|
|
|
// If T has the form $(...)U+ or $(...)U* for some token U,
|
|
|
|
// run the algorithm on the contents with F set to U. If it
|
|
|
|
// accepts, continue, else, reject.
|
2015-01-06 18:02:00 -05:00
|
|
|
Some(ref u) => {
|
2015-11-12 20:55:28 +01:00
|
|
|
let last = check_matcher_old(cx, seq.tts.iter(), u, on_fail);
|
2015-01-06 18:02:00 -05:00
|
|
|
match last {
|
2015-01-06 15:57:50 -08:00
|
|
|
// Since the delimiter isn't required after the last
|
|
|
|
// repetition, make sure that the *next* token is
|
|
|
|
// sane. This doesn't actually compute the FIRST of
|
|
|
|
// the rest of the matcher yet, it only considers
|
|
|
|
// single tokens and simple NTs. This is imprecise,
|
|
|
|
// but conservatively correct.
|
2015-01-06 18:02:00 -05:00
|
|
|
Some((span, tok)) => {
|
|
|
|
let fol = match tokens.peek() {
|
2015-11-06 14:52:02 +01:00
|
|
|
Some(&&TokenTree::Token(_, ref tok)) => tok.clone(),
|
|
|
|
Some(&&TokenTree::Delimited(_, ref delim)) =>
|
|
|
|
delim.close_token(),
|
2015-01-06 18:02:00 -05:00
|
|
|
Some(_) => {
|
2015-11-12 20:55:28 +01:00
|
|
|
on_fail.react(cx, sp, "sequence repetition followed by \
|
2015-01-06 18:02:00 -05:00
|
|
|
another sequence repetition, which is not allowed");
|
|
|
|
Eof
|
|
|
|
},
|
|
|
|
None => Eof
|
|
|
|
};
|
2015-11-12 20:55:28 +01:00
|
|
|
check_matcher_old(cx, once(&TokenTree::Token(span, tok.clone())),
|
|
|
|
&fol, on_fail)
|
2015-01-06 18:02:00 -05:00
|
|
|
},
|
|
|
|
None => last,
|
|
|
|
}
|
|
|
|
},
|
2015-01-06 15:57:50 -08:00
|
|
|
// If T has the form $(...)+ or $(...)*, run the algorithm
|
|
|
|
// on the contents with F set to the token following the
|
|
|
|
// sequence. If it accepts, continue, else, reject.
|
2015-01-06 18:02:00 -05:00
|
|
|
None => {
|
|
|
|
let fol = match tokens.peek() {
|
2015-11-06 14:52:02 +01:00
|
|
|
Some(&&TokenTree::Token(_, ref tok)) => tok.clone(),
|
|
|
|
Some(&&TokenTree::Delimited(_, ref delim)) => delim.close_token(),
|
2015-01-06 18:02:00 -05:00
|
|
|
Some(_) => {
|
2015-11-12 20:55:28 +01:00
|
|
|
on_fail.react(cx, sp, "sequence repetition followed by another \
|
2015-01-06 18:02:00 -05:00
|
|
|
sequence repetition, which is not allowed");
|
|
|
|
Eof
|
|
|
|
},
|
|
|
|
None => Eof
|
|
|
|
};
|
2015-11-12 20:55:28 +01:00
|
|
|
check_matcher_old(cx, seq.tts.iter(), &fol, on_fail)
|
2015-01-06 18:02:00 -05:00
|
|
|
}
|
2015-01-02 16:41:24 -05:00
|
|
|
}
|
|
|
|
},
|
2015-11-06 14:52:02 +01:00
|
|
|
TokenTree::Token(..) => {
|
2015-01-02 16:41:24 -05:00
|
|
|
// i. If T is not an NT, continue.
|
|
|
|
continue
|
|
|
|
},
|
2015-11-06 14:52:02 +01:00
|
|
|
TokenTree::Delimited(_, ref tts) => {
|
2015-01-02 16:41:24 -05:00
|
|
|
// if we don't pass in that close delimiter, we'll incorrectly consider the matcher
|
2015-01-06 18:02:00 -05:00
|
|
|
// `{ $foo:ty }` as having a follow that isn't `RBrace`
|
2015-11-12 20:55:28 +01:00
|
|
|
check_matcher_old(cx, tts.tts.iter(), &tts.close_token(), on_fail)
|
2015-01-02 16:41:24 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-01-06 18:02:00 -05:00
|
|
|
last
|
2015-01-02 16:41:24 -05:00
|
|
|
}
|
|
|
|
|
2015-11-12 20:55:28 +01:00
|
|
|
fn check_matcher_new(cx: &mut ExtCtxt, matcher: &[TokenTree], on_fail: &mut OnFail) {
|
|
|
|
let first_sets = FirstSets::new(matcher);
|
|
|
|
let empty_suffix = TokenSet::empty();
|
|
|
|
check_matcher_core(cx, &first_sets, matcher, &empty_suffix, on_fail);
|
|
|
|
}
|
|
|
|
|
|
|
|
// The FirstSets for a matcher is a mapping from subsequences in the
|
|
|
|
// matcher to the FIRST set for that subsequence.
|
|
|
|
//
|
|
|
|
// This mapping is partially precomputed via a backwards scan over the
|
|
|
|
// token trees of the matcher, which provides a mapping from each
|
|
|
|
// repetition sequence to its FIRST set.
|
|
|
|
//
|
|
|
|
// (Hypothetically sequences should be uniquely identifiable via their
|
|
|
|
// spans, though perhaps that is false e.g. for macro-generated macros
|
|
|
|
// that do not try to inject artificial span information. My plan is
|
|
|
|
// to try to catch such cases ahead of time and not include them in
|
|
|
|
// the precomputed mapping.)
|
|
|
|
struct FirstSets {
|
|
|
|
// this maps each TokenTree::Sequence `$(tt ...) SEP OP` that is uniquely identified by its
|
|
|
|
// span in the original matcher to the First set for the inner sequence `tt ...`.
|
|
|
|
//
|
|
|
|
// If two sequences have the same span in a matcher, then map that
|
|
|
|
// span to None (invalidating the mapping here and forcing the code to
|
|
|
|
// use a slow path).
|
|
|
|
first: HashMap<Span, Option<TokenSet>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl FirstSets {
|
|
|
|
fn new(tts: &[TokenTree]) -> FirstSets {
|
|
|
|
let mut sets = FirstSets { first: HashMap::new() };
|
|
|
|
build_recur(&mut sets, tts);
|
|
|
|
return sets;
|
|
|
|
|
|
|
|
// walks backward over `tts`, returning the FIRST for `tts`
|
|
|
|
// and updating `sets` at the same time for all sequence
|
|
|
|
// substructure we find within `tts`.
|
|
|
|
fn build_recur(sets: &mut FirstSets, tts: &[TokenTree]) -> TokenSet {
|
|
|
|
let mut first = TokenSet::empty();
|
|
|
|
for tt in tts.iter().rev() {
|
|
|
|
match *tt {
|
|
|
|
TokenTree::Token(sp, ref tok) => {
|
|
|
|
first.replace_with((sp, tok.clone()));
|
|
|
|
}
|
|
|
|
TokenTree::Delimited(_, ref delimited) => {
|
|
|
|
build_recur(sets, &delimited.tts[..]);
|
|
|
|
first.replace_with((delimited.open_span,
|
|
|
|
Token::OpenDelim(delimited.delim)));
|
|
|
|
}
|
|
|
|
TokenTree::Sequence(sp, ref seq_rep) => {
|
|
|
|
let subfirst = build_recur(sets, &seq_rep.tts[..]);
|
|
|
|
|
|
|
|
match sets.first.entry(sp) {
|
|
|
|
Entry::Vacant(vac) => {
|
|
|
|
vac.insert(Some(subfirst.clone()));
|
|
|
|
}
|
|
|
|
Entry::Occupied(mut occ) => {
|
|
|
|
// if there is already an entry, then a span must have collided.
|
|
|
|
// This should not happen with typical macro_rules macros,
|
|
|
|
// but syntax extensions need not maintain distinct spans,
|
|
|
|
// so distinct syntax trees can be assigned the same span.
|
|
|
|
// In such a case, the map cannot be trusted; so mark this
|
|
|
|
// entry as unusable.
|
|
|
|
occ.insert(None);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the sequence contents can be empty, then the first
|
|
|
|
// token could be the separator token itself.
|
|
|
|
|
|
|
|
if let (Some(ref sep), true) = (seq_rep.separator.clone(),
|
|
|
|
subfirst.maybe_empty) {
|
|
|
|
first.add_one_maybe((sp, sep.clone()));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reverse scan: Sequence comes before `first`.
|
|
|
|
if subfirst.maybe_empty || seq_rep.op == ast::KleeneOp::ZeroOrMore {
|
|
|
|
// If sequence is potentially empty, then
|
|
|
|
// union them (preserving first emptiness).
|
|
|
|
first.add_all(&TokenSet { maybe_empty: true, ..subfirst });
|
|
|
|
} else {
|
|
|
|
// Otherwise, sequence guaranteed
|
|
|
|
// non-empty; replace first.
|
|
|
|
first = subfirst;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return first;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// walks forward over `tts` until all potential FIRST tokens are
|
|
|
|
// identified.
|
|
|
|
fn first(&self, tts: &[TokenTree]) -> TokenSet {
|
|
|
|
let mut first = TokenSet::empty();
|
|
|
|
for tt in tts.iter() {
|
|
|
|
assert!(first.maybe_empty);
|
|
|
|
match *tt {
|
|
|
|
TokenTree::Token(sp, ref tok) => {
|
|
|
|
first.add_one((sp, tok.clone()));
|
|
|
|
return first;
|
|
|
|
}
|
|
|
|
TokenTree::Delimited(_, ref delimited) => {
|
|
|
|
first.add_one((delimited.open_span,
|
|
|
|
Token::OpenDelim(delimited.delim)));
|
|
|
|
return first;
|
|
|
|
}
|
|
|
|
TokenTree::Sequence(sp, ref seq_rep) => {
|
|
|
|
match self.first.get(&sp) {
|
|
|
|
Some(&Some(ref subfirst)) => {
|
|
|
|
|
|
|
|
// If the sequence contents can be empty, then the first
|
|
|
|
// token could be the separator token itself.
|
|
|
|
|
|
|
|
if let (Some(ref sep), true) = (seq_rep.separator.clone(),
|
|
|
|
subfirst.maybe_empty) {
|
|
|
|
first.add_one_maybe((sp, sep.clone()));
|
|
|
|
}
|
|
|
|
|
|
|
|
assert!(first.maybe_empty);
|
|
|
|
first.add_all(subfirst);
|
|
|
|
if subfirst.maybe_empty || seq_rep.op == ast::KleeneOp::ZeroOrMore {
|
|
|
|
// continue scanning for more first
|
|
|
|
// tokens, but also make sure we
|
|
|
|
// restore empty-tracking state
|
|
|
|
first.maybe_empty = true;
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
return first;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Some(&None) => {
|
|
|
|
panic!("assume all sequences have (unique) spans for now");
|
|
|
|
}
|
|
|
|
|
|
|
|
None => {
|
|
|
|
panic!("We missed a sequence during FirstSets construction");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// we only exit the loop if `tts` was empty or if every
|
|
|
|
// element of `tts` matches the empty sequence.
|
|
|
|
assert!(first.maybe_empty);
|
|
|
|
return first;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// A set of Tokens, which may include MatchNt tokens (for
|
|
|
|
// macro-by-example syntactic variables). It also carries the
|
|
|
|
// `maybe_empty` flag; that is true if and only if the matcher can
|
|
|
|
// match an empty token sequence.
|
|
|
|
//
|
|
|
|
// The First set is computed on submatchers like `$($a:expr b),* $(c)* d`,
|
|
|
|
// which has corresponding FIRST = {$a:expr, c, d}.
|
|
|
|
// Likewise, `$($a:expr b),* $(c)+ d` has FIRST = {$a:expr, c}.
|
|
|
|
//
|
|
|
|
// (Notably, we must allow for *-op to occur zero times.)
|
|
|
|
#[derive(Clone, Debug)]
|
|
|
|
struct TokenSet {
|
|
|
|
tokens: Vec<(Span, Token)>,
|
|
|
|
maybe_empty: bool,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl TokenSet {
|
|
|
|
// Returns a set for the empty sequence.
|
|
|
|
fn empty() -> Self { TokenSet { tokens: Vec::new(), maybe_empty: true } }
|
|
|
|
|
|
|
|
// Returns the set `{ tok }` for the single-token (and thus
|
|
|
|
// non-empty) sequence [tok].
|
|
|
|
fn singleton(tok: (Span, Token)) -> Self {
|
|
|
|
TokenSet { tokens: vec![tok], maybe_empty: false }
|
|
|
|
}
|
|
|
|
|
|
|
|
// Changes self to be the set `{ tok }`.
|
|
|
|
// Since `tok` is always present, marks self as non-empty.
|
|
|
|
fn replace_with(&mut self, tok: (Span, Token)) {
|
|
|
|
self.tokens.clear();
|
|
|
|
self.tokens.push(tok);
|
|
|
|
self.maybe_empty = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Changes self to be the empty set `{}`; meant for use when
|
|
|
|
// the particular token does not matter, but we want to
|
|
|
|
// record that it occurs.
|
|
|
|
fn replace_with_irrelevant(&mut self) {
|
|
|
|
self.tokens.clear();
|
|
|
|
self.maybe_empty = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Adds `tok` to the set for `self`, marking sequence as non-empy.
|
|
|
|
fn add_one(&mut self, tok: (Span, Token)) {
|
|
|
|
if !self.tokens.contains(&tok) {
|
|
|
|
self.tokens.push(tok);
|
|
|
|
}
|
|
|
|
self.maybe_empty = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Adds `tok` to the set for `self`. (Leaves `maybe_empty` flag alone.)
|
|
|
|
fn add_one_maybe(&mut self, tok: (Span, Token)) {
|
|
|
|
if !self.tokens.contains(&tok) {
|
|
|
|
self.tokens.push(tok);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Adds all elements of `other` to this.
|
|
|
|
//
|
|
|
|
// (Since this is a set, we filter out duplicates.)
|
|
|
|
//
|
|
|
|
// If `other` is potentially empty, then preserves the previous
|
|
|
|
// setting of the empty flag of `self`. If `other` is guaranteed
|
|
|
|
// non-empty, then `self` is marked non-empty.
|
|
|
|
fn add_all(&mut self, other: &Self) {
|
|
|
|
for tok in &other.tokens {
|
|
|
|
if !self.tokens.contains(tok) {
|
|
|
|
self.tokens.push(tok.clone());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !other.maybe_empty {
|
|
|
|
self.maybe_empty = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Checks that `matcher` is internally consistent and that it
|
|
|
|
// can legally by followed by a token N, for all N in `follow`.
|
|
|
|
// (If `follow` is empty, then it imposes no constraint on
|
|
|
|
// the `matcher`.)
|
|
|
|
//
|
|
|
|
// Returns the set of NT tokens that could possibly come last in
|
|
|
|
// `matcher`. (If `matcher` matches the empty sequence, then
|
|
|
|
// `maybe_empty` will be set to true.)
|
|
|
|
//
|
|
|
|
// Requires that `first_sets` is pre-computed for `matcher`;
|
|
|
|
// see `FirstSets::new`.
|
|
|
|
fn check_matcher_core(cx: &mut ExtCtxt,
|
|
|
|
first_sets: &FirstSets,
|
|
|
|
matcher: &[TokenTree],
|
|
|
|
follow: &TokenSet,
|
|
|
|
on_fail: &mut OnFail) -> TokenSet {
|
|
|
|
use print::pprust::token_to_string;
|
|
|
|
|
|
|
|
let mut last = TokenSet::empty();
|
|
|
|
|
|
|
|
// 2. For each token and suffix [T, SUFFIX] in M:
|
|
|
|
// ensure that T can be followed by SUFFIX, and if SUFFIX may be empty,
|
|
|
|
// then ensure T can also be followed by any element of FOLLOW.
|
|
|
|
'each_token: for i in 0..matcher.len() {
|
|
|
|
let token = &matcher[i];
|
|
|
|
let suffix = &matcher[i+1..];
|
|
|
|
|
|
|
|
let build_suffix_first = || {
|
|
|
|
let mut s = first_sets.first(suffix);
|
|
|
|
if s.maybe_empty { s.add_all(follow); }
|
|
|
|
return s;
|
|
|
|
};
|
|
|
|
|
|
|
|
// (we build `suffix_first` on demand below; you can tell
|
|
|
|
// which cases are supposed to fall through by looking for the
|
|
|
|
// initialization of this variable.)
|
|
|
|
let suffix_first;
|
|
|
|
|
|
|
|
// First, update `last` so that it corresponds to the set
|
|
|
|
// of NT tokens that might end the sequence `... token`.
|
|
|
|
match *token {
|
|
|
|
TokenTree::Token(sp, ref tok) => {
|
|
|
|
let can_be_followed_by_any;
|
|
|
|
if let Err(bad_frag) = has_legal_fragment_specifier(tok) {
|
|
|
|
on_fail.react(cx, sp, &format!("invalid fragment specifier `{}`", bad_frag));
|
|
|
|
// (This eliminates false positives and duplicates
|
|
|
|
// from error messages.)
|
|
|
|
can_be_followed_by_any = true;
|
|
|
|
} else {
|
|
|
|
can_be_followed_by_any = token_can_be_followed_by_any(tok);
|
|
|
|
}
|
|
|
|
|
|
|
|
if can_be_followed_by_any {
|
|
|
|
// don't need to track tokens that work with any,
|
|
|
|
last.replace_with_irrelevant();
|
|
|
|
// ... and don't need to check tokens that can be
|
|
|
|
// followed by anything against SUFFIX.
|
|
|
|
continue 'each_token;
|
|
|
|
} else {
|
|
|
|
last.replace_with((sp, tok.clone()));
|
|
|
|
suffix_first = build_suffix_first();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
TokenTree::Delimited(_, ref d) => {
|
|
|
|
let my_suffix = TokenSet::singleton((d.close_span, Token::CloseDelim(d.delim)));
|
|
|
|
check_matcher_core(cx, first_sets, &d.tts, &my_suffix, on_fail);
|
|
|
|
// don't track non NT tokens
|
|
|
|
last.replace_with_irrelevant();
|
|
|
|
|
|
|
|
// also, we don't need to check delimited sequences
|
|
|
|
// against SUFFIX
|
|
|
|
continue 'each_token;
|
|
|
|
}
|
|
|
|
TokenTree::Sequence(sp, ref seq_rep) => {
|
|
|
|
suffix_first = build_suffix_first();
|
|
|
|
// The trick here: when we check the interior, we want
|
|
|
|
// to include the separator (if any) as a potential
|
|
|
|
// (but not guaranteed) element of FOLLOW. So in that
|
|
|
|
// case, we make a temp copy of suffix and stuff
|
|
|
|
// delimiter in there.
|
|
|
|
//
|
|
|
|
// FIXME: Should I first scan suffix_first to see if
|
|
|
|
// delimiter is already in it before I go through the
|
|
|
|
// work of cloning it? But then again, this way I may
|
|
|
|
// get a "tighter" span?
|
|
|
|
let mut new;
|
|
|
|
let my_suffix = if let Some(ref u) = seq_rep.separator {
|
|
|
|
new = suffix_first.clone();
|
|
|
|
new.add_one_maybe((sp, u.clone()));
|
|
|
|
&new
|
|
|
|
} else {
|
|
|
|
&suffix_first
|
|
|
|
};
|
|
|
|
|
|
|
|
// At this point, `suffix_first` is built, and
|
|
|
|
// `my_suffix` is some TokenSet that we can use
|
|
|
|
// for checking the interior of `seq_rep`.
|
|
|
|
let next = check_matcher_core(cx, first_sets, &seq_rep.tts, my_suffix, on_fail);
|
|
|
|
if next.maybe_empty {
|
|
|
|
last.add_all(&next);
|
|
|
|
} else {
|
|
|
|
last = next;
|
|
|
|
}
|
|
|
|
|
|
|
|
// the recursive call to check_matcher_core already ran the 'each_last
|
|
|
|
// check below, so we can just keep going forward here.
|
|
|
|
continue 'each_token;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// (`suffix_first` guaranteed initialized once reaching here.)
|
|
|
|
|
|
|
|
// Now `last` holds the complete set of NT tokens that could
|
|
|
|
// end the sequence before SUFFIX. Check that every one works with `suffix`.
|
|
|
|
'each_last: for &(_sp, ref t) in &last.tokens {
|
|
|
|
if let MatchNt(ref name, ref frag_spec, _, _) = *t {
|
|
|
|
for &(sp, ref next_token) in &suffix_first.tokens {
|
|
|
|
match is_in_follow(cx, next_token, &frag_spec.name.as_str()) {
|
|
|
|
Err(msg) => {
|
|
|
|
on_fail.react(cx, sp, &msg);
|
|
|
|
// don't bother reporting every source of
|
|
|
|
// conflict for a particular element of `last`.
|
|
|
|
continue 'each_last;
|
|
|
|
}
|
|
|
|
Ok(true) => {}
|
|
|
|
Ok(false) => {
|
|
|
|
let may_be = if last.tokens.len() == 1 &&
|
|
|
|
suffix_first.tokens.len() == 1
|
|
|
|
{
|
|
|
|
"is"
|
|
|
|
} else {
|
|
|
|
"may be"
|
|
|
|
};
|
|
|
|
|
|
|
|
on_fail.react(
|
|
|
|
cx, sp,
|
|
|
|
&format!("`${name}:{frag}` {may_be} followed by `{next}`, which \
|
|
|
|
is not allowed for `{frag}` fragments",
|
|
|
|
name=name,
|
|
|
|
frag=frag_spec,
|
|
|
|
next=token_to_string(next_token),
|
|
|
|
may_be=may_be));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
last
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
fn token_can_be_followed_by_any(tok: &Token) -> bool {
|
|
|
|
if let &MatchNt(_, ref frag_spec, _, _) = tok {
|
|
|
|
frag_can_be_followed_by_any(&frag_spec.name.as_str())
|
|
|
|
} else {
|
|
|
|
// (Non NT's can always be followed by anthing in matchers.)
|
|
|
|
true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// True if a fragment of type `frag` can be followed by any sort of
|
|
|
|
/// token. We use this (among other things) as a useful approximation
|
|
|
|
/// for when `frag` can be followed by a repetition like `$(...)*` or
|
|
|
|
/// `$(...)+`. In general, these can be a bit tricky to reason about,
|
|
|
|
/// so we adopt a conservative position that says that any fragment
|
|
|
|
/// specifier which consumes at most one token tree can be followed by
|
|
|
|
/// a fragment specifier (indeed, these fragments can be followed by
|
|
|
|
/// ANYTHING without fear of future compatibility hazards).
|
|
|
|
fn frag_can_be_followed_by_any(frag: &str) -> bool {
|
|
|
|
match frag {
|
|
|
|
"item" | // always terminated by `}` or `;`
|
|
|
|
"block" | // exactly one token tree
|
|
|
|
"ident" | // exactly one token tree
|
|
|
|
"meta" | // exactly one token tree
|
|
|
|
"tt" => // exactly one token tree
|
|
|
|
true,
|
|
|
|
|
|
|
|
_ =>
|
|
|
|
false,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-15 13:20:26 -04:00
|
|
|
/// True if a fragment of type `frag` can be followed by any sort of
|
|
|
|
/// token. We use this (among other things) as a useful approximation
|
|
|
|
/// for when `frag` can be followed by a repetition like `$(...)*` or
|
|
|
|
/// `$(...)+`. In general, these can be a bit tricky to reason about,
|
|
|
|
/// so we adopt a conservative position that says that any fragment
|
|
|
|
/// specifier which consumes at most one token tree can be followed by
|
|
|
|
/// a fragment specifier (indeed, these fragments can be followed by
|
|
|
|
/// ANYTHING without fear of future compatibility hazards).
|
|
|
|
fn can_be_followed_by_any(frag: &str) -> bool {
|
|
|
|
match frag {
|
|
|
|
"item" | // always terminated by `}` or `;`
|
|
|
|
"block" | // exactly one token tree
|
|
|
|
"ident" | // exactly one token tree
|
|
|
|
"meta" | // exactly one token tree
|
|
|
|
"tt" => // exactly one token tree
|
|
|
|
true,
|
|
|
|
|
|
|
|
_ =>
|
|
|
|
false,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// True if `frag` can legally be followed by the token `tok`. For
|
2015-11-12 20:55:28 +01:00
|
|
|
/// fragments that can consume an unbounded number of tokens, `tok`
|
2015-05-15 13:20:26 -04:00
|
|
|
/// must be within a well-defined follow set. This is intended to
|
|
|
|
/// guarantee future compatibility: for example, without this rule, if
|
|
|
|
/// we expanded `expr` to include a new binary operator, we might
|
|
|
|
/// break macros that were relying on that binary operator as a
|
|
|
|
/// separator.
|
2015-01-18 17:18:29 +01:00
|
|
|
fn is_in_follow(_: &ExtCtxt, tok: &Token, frag: &str) -> Result<bool, String> {
|
2015-01-02 16:41:24 -05:00
|
|
|
if let &CloseDelim(_) = tok {
|
2015-05-15 13:20:26 -04:00
|
|
|
// closing a token tree can never be matched by any fragment;
|
|
|
|
// iow, we always require that `(` and `)` match, etc.
|
2015-01-19 22:43:15 +01:00
|
|
|
Ok(true)
|
|
|
|
} else {
|
|
|
|
match frag {
|
|
|
|
"item" => {
|
|
|
|
// since items *must* be followed by either a `;` or a `}`, we can
|
|
|
|
// accept anything after them
|
|
|
|
Ok(true)
|
|
|
|
},
|
|
|
|
"block" => {
|
2015-10-07 23:11:25 +01:00
|
|
|
// anything can follow block, the braces provide an easy boundary to
|
2015-01-19 22:43:15 +01:00
|
|
|
// maintain
|
|
|
|
Ok(true)
|
|
|
|
},
|
|
|
|
"stmt" | "expr" => {
|
|
|
|
match *tok {
|
|
|
|
FatArrow | Comma | Semi => Ok(true),
|
|
|
|
_ => Ok(false)
|
|
|
|
}
|
|
|
|
},
|
|
|
|
"pat" => {
|
|
|
|
match *tok {
|
|
|
|
FatArrow | Comma | Eq => Ok(true),
|
2015-09-24 23:05:02 +03:00
|
|
|
Ident(i, _) if i.name.as_str() == "if" || i.name.as_str() == "in" => Ok(true),
|
2015-01-19 22:43:15 +01:00
|
|
|
_ => Ok(false)
|
|
|
|
}
|
|
|
|
},
|
|
|
|
"path" | "ty" => {
|
|
|
|
match *tok {
|
2015-07-12 15:53:04 -07:00
|
|
|
Comma | FatArrow | Colon | Eq | Gt | Semi => Ok(true),
|
2015-09-24 23:05:02 +03:00
|
|
|
Ident(i, _) if i.name.as_str() == "as" => Ok(true),
|
2015-01-19 22:43:15 +01:00
|
|
|
_ => Ok(false)
|
|
|
|
}
|
|
|
|
},
|
|
|
|
"ident" => {
|
|
|
|
// being a single token, idents are harmless
|
|
|
|
Ok(true)
|
|
|
|
},
|
|
|
|
"meta" | "tt" => {
|
|
|
|
// being either a single token or a delimited sequence, tt is
|
|
|
|
// harmless
|
|
|
|
Ok(true)
|
|
|
|
},
|
2015-01-23 18:51:12 +03:00
|
|
|
_ => Err(format!("invalid fragment specifier `{}`", frag))
|
2015-01-19 22:43:15 +01:00
|
|
|
}
|
2015-01-02 16:41:24 -05:00
|
|
|
}
|
|
|
|
}
|
2015-11-12 20:55:28 +01:00
|
|
|
|
|
|
|
fn has_legal_fragment_specifier(tok: &Token) -> Result<(), String> {
|
|
|
|
debug!("has_legal_fragment_specifier({:?})", tok);
|
|
|
|
if let &MatchNt(_, ref frag_spec, _, _) = tok {
|
|
|
|
let s = &frag_spec.name.as_str();
|
|
|
|
if !is_legal_fragment_specifier(s) {
|
|
|
|
return Err(s.to_string());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn is_legal_fragment_specifier(frag: &str) -> bool {
|
|
|
|
match frag {
|
|
|
|
"item" | "block" | "stmt" | "expr" | "pat" |
|
|
|
|
"path" | "ty" | "ident" | "meta" | "tt" => true,
|
|
|
|
_ => false,
|
|
|
|
}
|
|
|
|
}
|