rust/src/libsyntax/ext/tt/macro_parser.rs

544 lines
21 KiB
Rust
Raw Normal View History

// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
2014-06-09 13:12:30 -07:00
//! This is an Earley-like parser, without support for in-grammar nonterminals,
//! only by calling out to the main rust parser for named nonterminals (which it
//! commits to fully when it hits one in a grammar). This means that there are no
//! completer or predictor rules, and therefore no need to store one column per
//! token: instead, there's a set of current Earley items and a set of next
//! ones. Instead of NTs, we have a special case for Kleene star. The big-O, in
//! pathological cases, is worse than traditional Earley parsing, but it's an
//! easier fit for Macro-by-Example-style rules, and I think the overhead is
//! lower. (In order to prevent the pathological case, we'd need to lazily
//! construct the resulting `NamedMatch`es at the very end. It'd be a pain,
//! and require more memory to keep around old items, but it would also save
//! overhead)
//!
//! Quick intro to how the parser works:
//!
//! A 'position' is a dot in the middle of a matcher, usually represented as a
//! dot. For example `· a $( a )* a b` is a position, as is `a $( · a )* a b`.
//!
//! The parser walks through the input a character at a time, maintaining a list
//! of items consistent with the current position in the input string: `cur_eis`.
//!
//! As it processes them, it fills up `eof_eis` with items that would be valid if
//! the macro invocation is now over, `bb_eis` with items that are waiting on
//! a Rust nonterminal like `$e:expr`, and `next_eis` with items that are waiting
//! on a particular token. Most of the logic concerns moving the · through the
2014-06-09 13:12:30 -07:00
//! repetitions indicated by Kleene stars. It only advances or calls out to the
//! real Rust parser when no `cur_eis` items remain
//!
//! Example: Start parsing `a a a a b` against [· a $( a )* a b].
//!
//! Remaining input: `a a a a b`
//! next_eis: [· a $( a )* a b]
//!
//! - - - Advance over an `a`. - - -
//!
//! Remaining input: `a a a b`
//! cur: [a · $( a )* a b]
//! Descend/Skip (first item).
//! next: [a $( · a )* a b] [a $( a )* · a b].
//!
//! - - - Advance over an `a`. - - -
//!
//! Remaining input: `a a b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
//!
//! - - - Advance over an `a`. - - - (this looks exactly like the last step)
//!
//! Remaining input: `a b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
//!
//! - - - Advance over an `a`. - - - (this looks exactly like the last step)
//!
//! Remaining input: `b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b]
//!
//! - - - Advance over a `b`. - - -
//!
//! Remaining input: ``
//! eof: [a $( a )* a b ·]
pub use self::NamedMatch::*;
pub use self::ParseResult::*;
use self::TokenTreeOrTokenTreeVec::*;
2016-08-07 02:19:10 +00:00
use ast::Ident;
use syntax_pos::{self, BytePos, mk_sp, Span};
use codemap::Spanned;
2015-12-21 10:00:43 +13:00
use errors::FatalError;
use parse::lexer::*; //resolve bug?
use parse::ParseSess;
use parse::parser::{PathStyle, Parser};
2015-12-12 03:29:35 +00:00
use parse::token::{DocComment, MatchNt, SubstNt};
2014-10-27 19:22:52 +11:00
use parse::token::{Token, Nonterminal};
use parse::token;
use print::pprust;
use tokenstream::{self, TokenTree};
use util::small_vector::SmallVector;
2014-10-06 23:00:56 +01:00
use std::mem;
2014-03-27 16:52:27 +02:00
use std::rc::Rc;
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Vacant, Occupied};
2012-06-12 10:59:50 -07:00
2014-10-07 00:18:24 +01:00
// To avoid costly uniqueness checks, we require that `MatchSeq` always has
// a nonempty body.
2012-06-12 10:59:50 -07:00
#[derive(Clone)]
2014-11-02 12:21:16 +01:00
enum TokenTreeOrTokenTreeVec {
Tt(tokenstream::TokenTree),
TtSeq(Vec<tokenstream::TokenTree>),
2014-11-02 12:21:16 +01:00
}
impl TokenTreeOrTokenTreeVec {
2015-01-17 23:33:05 +00:00
fn len(&self) -> usize {
2015-11-17 23:24:49 +09:00
match *self {
TtSeq(ref v) => v.len(),
Tt(ref tt) => tt.len(),
2014-11-02 12:21:16 +01:00
}
}
2015-01-17 23:33:05 +00:00
fn get_tt(&self, index: usize) -> TokenTree {
2015-11-17 23:24:49 +09:00
match *self {
TtSeq(ref v) => v[index].clone(),
Tt(ref tt) => tt.get_tt(index),
2014-11-02 12:21:16 +01:00
}
}
}
2014-10-06 23:00:56 +01:00
/// an unzipping of `TokenTree`s
#[derive(Clone)]
2014-10-06 23:00:56 +01:00
struct MatcherTtFrame {
2014-11-02 12:21:16 +01:00
elts: TokenTreeOrTokenTreeVec,
2015-01-17 23:33:05 +00:00
idx: usize,
2014-10-06 23:00:56 +01:00
}
#[derive(Clone)]
pub struct MatcherPos {
2014-10-06 23:00:56 +01:00
stack: Vec<MatcherTtFrame>,
2014-11-02 12:21:16 +01:00
top_elts: TokenTreeOrTokenTreeVec,
sep: Option<Token>,
2015-01-17 23:33:05 +00:00
idx: usize,
up: Option<Box<MatcherPos>>,
2014-03-27 16:52:27 +02:00
matches: Vec<Vec<Rc<NamedMatch>>>,
2015-01-17 23:33:05 +00:00
match_lo: usize,
match_cur: usize,
match_hi: usize,
sp_lo: BytePos,
}
2012-06-12 10:59:50 -07:00
2016-11-07 19:40:00 -07:00
pub type NamedParseResult = ParseResult<HashMap<Ident, Rc<NamedMatch>>>;
2015-01-17 23:33:05 +00:00
pub fn count_names(ms: &[TokenTree]) -> usize {
2014-10-06 23:00:56 +01:00
ms.iter().fold(0, |count, elt| {
2015-11-17 23:24:49 +09:00
count + match *elt {
TokenTree::Sequence(_, ref seq) => {
2014-11-02 12:21:16 +01:00
seq.num_captures
}
2015-11-17 23:24:49 +09:00
TokenTree::Delimited(_, ref delim) => {
count_names(&delim.tts)
}
2015-11-17 23:24:49 +09:00
TokenTree::Token(_, MatchNt(..)) => {
2014-10-06 23:00:56 +01:00
1
}
2016-08-26 19:23:42 +03:00
TokenTree::Token(..) => 0,
}
2014-10-06 23:00:56 +01:00
})
}
pub fn initial_matcher_pos(ms: Vec<TokenTree>, sep: Option<Token>, lo: BytePos)
2014-10-06 23:00:56 +01:00
-> Box<MatcherPos> {
let match_idx_hi = count_names(&ms[..]);
let matches: Vec<_> = (0..match_idx_hi).map(|_| Vec::new()).collect();
Box::new(MatcherPos {
2014-10-06 23:00:56 +01:00
stack: vec![],
2014-11-02 12:21:16 +01:00
top_elts: TtSeq(ms),
sep: sep,
idx: 0,
up: None,
matches: matches,
match_lo: 0,
match_cur: 0,
match_hi: match_idx_hi,
sp_lo: lo
})
2012-06-12 10:59:50 -07:00
}
2014-10-07 00:18:24 +01:00
/// NamedMatch is a pattern-match result for a single token::MATCH_NONTERMINAL:
2014-06-09 13:12:30 -07:00
/// so it is associated with a single ident in a parse, and all
2014-10-07 00:18:24 +01:00
/// `MatchedNonterminal`s in the NamedMatch have the same nonterminal type
/// (expr, item, etc). Each leaf in a single NamedMatch corresponds to a
/// single token::MATCH_NONTERMINAL in the TokenTree that produced it.
2014-06-09 13:12:30 -07:00
///
2014-10-06 23:00:56 +01:00
/// The in-memory structure of a particular NamedMatch represents the match
/// that occurred when a particular subset of a matcher was applied to a
/// particular token tree.
2014-06-09 13:12:30 -07:00
///
/// The width of each MatchedSeq in the NamedMatch, and the identity of the
2014-10-07 00:18:24 +01:00
/// `MatchedNonterminal`s, will depend on the token tree it was applied to:
/// each MatchedSeq corresponds to a single TTSeq in the originating
/// token tree. The depth of the NamedMatch structure will therefore depend
/// only on the nesting depth of `ast::TTSeq`s in the originating
/// token tree it was derived from.
pub enum NamedMatch {
MatchedSeq(Vec<Rc<NamedMatch>>, syntax_pos::Span),
MatchedNonterminal(Rc<Nonterminal>)
}
2012-06-12 10:59:50 -07:00
2016-11-07 19:40:00 -07:00
fn nameize(ms: &[TokenTree], res: &[Rc<NamedMatch>]) -> NamedParseResult {
fn n_rec(m: &TokenTree, res: &[Rc<NamedMatch>],
2016-08-07 02:19:10 +00:00
ret_val: &mut HashMap<Ident, Rc<NamedMatch>>, idx: &mut usize)
-> Result<(), (syntax_pos::Span, String)> {
2015-11-17 23:24:49 +09:00
match *m {
TokenTree::Sequence(_, ref seq) => {
2015-01-31 12:20:46 -05:00
for next_m in &seq.tts {
n_rec(next_m, res, ret_val, idx)?
2014-10-06 23:00:56 +01:00
}
}
2015-11-17 23:24:49 +09:00
TokenTree::Delimited(_, ref delim) => {
2015-01-31 12:20:46 -05:00
for next_m in &delim.tts {
n_rec(next_m, res, ret_val, idx)?;
2014-10-06 23:00:56 +01:00
}
}
2016-04-16 04:12:02 +03:00
TokenTree::Token(sp, MatchNt(bind_name, _)) => {
2016-08-07 02:19:10 +00:00
match ret_val.entry(bind_name) {
2014-10-06 23:00:56 +01:00
Vacant(spot) => {
spot.insert(res[*idx].clone());
2014-10-06 23:00:56 +01:00
*idx += 1;
}
Occupied(..) => {
2015-11-25 20:58:57 +01:00
return Err((sp, format!("duplicated bind name: {}", bind_name)))
2014-10-06 23:00:56 +01:00
}
}
}
2015-11-25 20:58:57 +01:00
TokenTree::Token(sp, SubstNt(..)) => {
return Err((sp, "missing fragment specifier".to_string()))
}
2016-08-26 19:23:42 +03:00
TokenTree::Token(..) => (),
}
2015-11-25 20:58:57 +01:00
Ok(())
}
2015-11-25 20:58:57 +01:00
let mut ret_val = HashMap::new();
let mut idx = 0;
2015-11-25 20:58:57 +01:00
for m in ms {
match n_rec(m, res, &mut ret_val, &mut idx) {
2015-11-25 20:58:57 +01:00
Ok(_) => {},
Err((sp, msg)) => return Error(sp, msg),
}
}
Success(ret_val)
}
pub enum ParseResult<T> {
Success(T),
/// Arm failed to match. If the second parameter is `token::Eof`, it
/// indicates an unexpected end of macro invocation. Otherwise, it
/// indicates that no rules expected the given token.
Failure(syntax_pos::Span, Token),
2015-11-25 20:58:57 +01:00
/// Fatal error (malformed macro?). Abort compilation.
Error(syntax_pos::Span, String)
}
pub fn parse_failure_msg(tok: Token) -> String {
match tok {
token::Eof => "unexpected end of macro invocation".to_string(),
_ => format!("no rules expected the token `{}`", pprust::token_to_string(&tok)),
}
}
2014-06-09 13:12:30 -07:00
/// Perform a token equality check, ignoring syntax context (that is, an
/// unhygienic comparison)
pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool {
match (t1,t2) {
2016-04-16 04:12:02 +03:00
(&token::Ident(id1),&token::Ident(id2))
2014-10-27 19:22:52 +11:00
| (&token::Lifetime(id1),&token::Lifetime(id2)) =>
id1.name == id2.name,
_ => *t1 == *t2
}
}
pub fn parse(sess: &ParseSess, rdr: TtReader, ms: &[TokenTree]) -> NamedParseResult {
let mut parser = Parser::new_with_doc_flag(sess, Box::new(rdr), true);
let mut cur_eis = SmallVector::one(initial_matcher_pos(ms.to_owned(), None, parser.span.lo));
2012-06-12 10:59:50 -07:00
loop {
let mut bb_eis = Vec::new(); // black-box parsed by parser.rs
let mut next_eis = Vec::new(); // or proceed normally
let mut eof_eis = Vec::new();
2012-06-12 10:59:50 -07:00
// for each Earley item
while let Some(mut ei) = cur_eis.pop() {
2014-10-06 23:00:56 +01:00
// When unzipped trees end, remove them
2014-11-02 12:21:16 +01:00
while ei.idx >= ei.top_elts.len() {
2014-10-06 23:00:56 +01:00
match ei.stack.pop() {
Some(MatcherTtFrame { elts, idx }) => {
2014-11-02 12:21:16 +01:00
ei.top_elts = elts;
2014-10-06 23:00:56 +01:00
ei.idx = idx + 1;
}
None => break
}
}
2012-06-12 10:59:50 -07:00
let idx = ei.idx;
2014-11-02 12:21:16 +01:00
let len = ei.top_elts.len();
2012-06-12 10:59:50 -07:00
/* at end of sequence */
if idx >= len {
// can't move out of `match`es, so:
if ei.up.is_some() {
2012-06-12 10:59:50 -07:00
// hack: a matcher sequence is repeating iff it has a
// parent (the top level is just a container)
// disregard separator, try to go up
// (remove this condition to make trailing seps ok)
if idx == len {
// pop from the matcher position
let mut new_pos = ei.up.clone().unwrap();
2012-06-12 10:59:50 -07:00
// update matches (the MBE "parse tree") by appending
// each tree as a subtree.
// I bet this is a perf problem: we're preemptively
// doing a lot of array work that will get thrown away
// most of the time.
// Only touch the binders we have actually bound
for idx in ei.match_lo..ei.match_hi {
let sub = ei.matches[idx].clone();
new_pos.matches[idx]
2014-03-27 16:52:27 +02:00
.push(Rc::new(MatchedSeq(sub, mk_sp(ei.sp_lo,
parser.span.hi))));
2012-06-12 10:59:50 -07:00
}
2014-10-06 23:00:56 +01:00
new_pos.match_cur = ei.match_hi;
2012-09-10 18:28:00 -07:00
new_pos.idx += 1;
cur_eis.push(new_pos);
2012-06-12 10:59:50 -07:00
}
// can we go around again?
// Check if we need a separator
if idx == len && ei.sep.is_some() {
if ei.sep.as_ref().map(|ref sep| token_name_eq(&parser.token, sep))
.unwrap_or(false) {
// i'm conflicted about whether this should be hygienic.... though in
// this case, if the separators are never legal idents, it shouldn't
// matter.
// ei.match_cur = ei.match_lo;
ei.idx += 1;
next_eis.push(ei);
2012-06-12 10:59:50 -07:00
}
} else { // we don't need a separator
ei.match_cur = ei.match_lo;
ei.idx = 0;
cur_eis.push(ei);
2012-06-12 10:59:50 -07:00
}
} else {
eof_eis.push(ei);
2012-06-12 10:59:50 -07:00
}
} else {
2014-11-02 12:21:16 +01:00
match ei.top_elts.get_tt(idx) {
2014-10-06 23:00:56 +01:00
/* need to descend into sequence */
TokenTree::Sequence(sp, seq) => {
if seq.op == tokenstream::KleeneOp::ZeroOrMore {
2014-10-06 23:00:56 +01:00
let mut new_ei = ei.clone();
2014-11-02 12:21:16 +01:00
new_ei.match_cur += seq.num_captures;
new_ei.idx += 1;
2014-10-06 23:00:56 +01:00
//we specifically matched zero repeats.
for idx in ei.match_cur..ei.match_cur + seq.num_captures {
new_ei.matches[idx].push(Rc::new(MatchedSeq(vec![], sp)));
2014-10-06 23:00:56 +01:00
}
cur_eis.push(new_ei);
}
let matches: Vec<_> = (0..ei.matches.len())
2014-12-30 10:51:18 -08:00
.map(|_| Vec::new()).collect();
cur_eis.push(Box::new(MatcherPos {
2014-10-06 23:00:56 +01:00
stack: vec![],
2014-11-02 12:21:16 +01:00
sep: seq.separator.clone(),
idx: 0,
2014-10-06 23:00:56 +01:00
matches: matches,
match_lo: ei.match_cur,
match_cur: ei.match_cur,
match_hi: ei.match_cur + seq.num_captures,
up: Some(ei),
2014-11-02 12:21:16 +01:00
sp_lo: sp.lo,
top_elts: Tt(TokenTree::Sequence(sp, seq)),
}));
2012-06-12 10:59:50 -07:00
}
TokenTree::Token(_, MatchNt(..)) => {
2014-10-06 23:00:56 +01:00
// Built-in nonterminals never start with these tokens,
// so we can eliminate them from consideration.
match parser.token {
2014-10-06 23:00:56 +01:00
token::CloseDelim(_) => {},
_ => bb_eis.push(ei),
}
}
TokenTree::Token(sp, SubstNt(..)) => {
return Error(sp, "missing fragment specifier".to_string())
2014-10-06 23:00:56 +01:00
}
seq @ TokenTree::Delimited(..) | seq @ TokenTree::Token(_, DocComment(..)) => {
2014-11-02 12:21:16 +01:00
let lower_elts = mem::replace(&mut ei.top_elts, Tt(seq));
2014-10-06 23:00:56 +01:00
let idx = ei.idx;
ei.stack.push(MatcherTtFrame {
2014-11-02 12:21:16 +01:00
elts: lower_elts,
2014-10-06 23:00:56 +01:00
idx: idx,
});
ei.idx = 0;
cur_eis.push(ei);
}
TokenTree::Token(_, ref t) => {
if token_name_eq(t, &parser.token) {
ei.idx += 1;
next_eis.push(ei);
2014-10-06 23:00:56 +01:00
}
2012-09-10 18:28:00 -07:00
}
2012-06-12 10:59:50 -07:00
}
}
}
/* error messages here could be improved with links to orig. rules */
if token_name_eq(&parser.token, &token::Eof) {
if eof_eis.len() == 1 {
let v = eof_eis[0].matches.iter_mut()
.map(|dv| dv.pop().unwrap()).collect::<Vec<_>>();
return nameize(ms, &v[..]);
} else if eof_eis.len() > 1 {
return Error(parser.span, "ambiguity: multiple successful parses".to_string());
2012-06-12 10:59:50 -07:00
} else {
return Failure(parser.span, token::Eof);
2012-06-12 10:59:50 -07:00
}
} else {
if (!bb_eis.is_empty() && !next_eis.is_empty())
|| bb_eis.len() > 1 {
let nts = bb_eis.iter().map(|ei| match ei.top_elts.get_tt(ei.idx) {
2016-04-16 04:12:02 +03:00
TokenTree::Token(_, MatchNt(bind, name)) => {
format!("{} ('{}')", name, bind)
}
_ => panic!()
}).collect::<Vec<String>>().join(" or ");
return Error(parser.span, format!(
"local ambiguity: multiple parsing options: {}",
match next_eis.len() {
0 => format!("built-in NTs {}.", nts),
1 => format!("built-in NTs {} or 1 other option.", nts),
n => format!("built-in NTs {} or {} other options.", nts, n),
}
))
} else if bb_eis.is_empty() && next_eis.is_empty() {
return Failure(parser.span, parser.token);
} else if !next_eis.is_empty() {
2012-06-12 10:59:50 -07:00
/* Now process the next token */
cur_eis.extend(next_eis.drain(..));
parser.bump();
2012-06-12 10:59:50 -07:00
} else /* bb_eis.len() == 1 */ {
let mut ei = bb_eis.pop().unwrap();
if let TokenTree::Token(span, MatchNt(_, ident)) = ei.top_elts.get_tt(ei.idx) {
let match_cur = ei.match_cur;
2016-11-07 19:17:45 -07:00
ei.matches[match_cur].push(Rc::new(MatchedNonterminal(
Rc::new(parse_nt(&mut parser, span, &ident.name.as_str())))));
ei.idx += 1;
ei.match_cur += 1;
} else {
unreachable!()
}
cur_eis.push(ei);
2012-06-12 10:59:50 -07:00
}
}
assert!(!cur_eis.is_empty());
2012-06-12 10:59:50 -07:00
}
}
2015-12-21 10:00:43 +13:00
pub fn parse_nt<'a>(p: &mut Parser<'a>, sp: Span, name: &str) -> Nonterminal {
match name {
"tt" => {
p.quote_depth += 1; //but in theory, non-quoted tts might be useful
let mut tt = panictry!(p.parse_token_tree());
p.quote_depth -= 1;
2016-11-10 17:30:01 -07:00
while let TokenTree::Token(sp, token::Interpolated(nt)) = tt {
if let token::NtTT(..) = *nt {
match Rc::try_unwrap(nt) {
Ok(token::NtTT(sub_tt)) => tt = sub_tt,
Ok(_) => unreachable!(),
Err(nt_rc) => match *nt_rc {
token::NtTT(ref sub_tt) => tt = sub_tt.clone(),
_ => unreachable!(),
},
}
} else {
tt = TokenTree::Token(sp, token::Interpolated(nt.clone()));
break
}
}
return token::NtTT(tt);
}
_ => {}
}
// check at the beginning and the parser checks after each bump
p.check_unknown_macro_variable();
2012-08-06 12:34:08 -07:00
match name {
"item" => match panictry!(p.parse_item()) {
Some(i) => token::NtItem(i),
2015-12-21 10:00:43 +13:00
None => {
p.fatal("expected an item keyword").emit();
panic!(FatalError);
}
},
"block" => token::NtBlock(panictry!(p.parse_block())),
"stmt" => match panictry!(p.parse_stmt()) {
Some(s) => token::NtStmt(s),
2015-12-21 10:00:43 +13:00
None => {
p.fatal("expected a statement").emit();
panic!(FatalError);
}
},
"pat" => token::NtPat(panictry!(p.parse_pat())),
"expr" => token::NtExpr(panictry!(p.parse_expr())),
"ty" => token::NtTy(panictry!(p.parse_ty())),
// this could be handled like a token, since it is one
"ident" => match p.token {
2016-04-16 04:12:02 +03:00
token::Ident(sn) => {
p.bump();
token::NtIdent(Spanned::<Ident>{node: sn, span: p.span})
}
_ => {
let token_str = pprust::token_to_string(&p.token);
2015-12-21 10:00:43 +13:00
p.fatal(&format!("expected ident, found {}",
&token_str[..])).emit();
panic!(FatalError)
}
},
"path" => {
token::NtPath(panictry!(p.parse_path(PathStyle::Type)))
},
"meta" => token::NtMeta(panictry!(p.parse_meta_item())),
// this is not supposed to happen, since it has been checked
// when compiling the macro.
_ => p.span_bug(sp, "invalid fragment specifier")
2012-06-12 10:59:50 -07:00
}
}