rust/src/libsyntax/ext/tt/macro_parser.rs

447 lines
16 KiB
Rust
Raw Normal View History

// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
2012-06-12 10:59:50 -07:00
// Earley-like parser for macros.
use ast;
use ast::{Matcher, MatchTok, MatchSeq, MatchNonterminal, Ident};
2013-01-30 09:56:33 -08:00
use codemap::{BytePos, mk_sp};
use codemap;
use parse::lexer::*; //resolve bug?
use parse::ParseSess;
use parse::attr::ParserAttr;
use parse::parser::{LifetimeAndTypesWithoutColons, Parser};
use parse::token::{Token, EOF, Nonterminal};
use parse::token;
use collections::HashMap;
2013-06-24 20:40:33 -04:00
use std::vec;
2012-06-12 10:59:50 -07:00
/* This is an Earley-like parser, without support for in-grammar nonterminals,
only by calling out to the main rust parser for named nonterminals (which it
commits to fully when it hits one in a grammar). This means that there are no
completer or predictor rules, and therefore no need to store one column per
token: instead, there's a set of current Earley items and a set of next
ones. Instead of NTs, we have a special case for Kleene star. The big-O, in
pathological cases, is worse than traditional Earley parsing, but it's an
easier fit for Macro-by-Example-style rules, and I think the overhead is
lower. (In order to prevent the pathological case, we'd need to lazily
construct the resulting `NamedMatch`es at the very end. It'd be a pain,
and require more memory to keep around old items, but it would also save
overhead)*/
/* Quick intro to how the parser works:
A 'position' is a dot in the middle of a matcher, usually represented as a
dot. For example `· a $( a )* a b` is a position, as is `a $( · a )* a b`.
The parser walks through the input a character at a time, maintaining a list
of items consistent with the current position in the input string: `cur_eis`.
As it processes them, it fills up `eof_eis` with items that would be valid if
the macro invocation is now over, `bb_eis` with items that are waiting on
a Rust nonterminal like `$e:expr`, and `next_eis` with items that are waiting
on the a particular token. Most of the logic concerns moving the · through the
repetitions indicated by Kleene stars. It only advances or calls out to the
real Rust parser when no `cur_eis` items remain
Example: Start parsing `a a a a b` against [· a $( a )* a b].
Remaining input: `a a a a b`
next_eis: [· a $( a )* a b]
- - - Advance over an `a`. - - -
Remaining input: `a a a b`
cur: [a · $( a )* a b]
Descend/Skip (first item).
next: [a $( · a )* a b] [a $( a )* · a b].
- - - Advance over an `a`. - - -
Remaining input: `a a b`
cur: [a $( a · )* a b] next: [a $( a )* a · b]
Finish/Repeat (first item)
next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
- - - Advance over an `a`. - - - (this looks exactly like the last step)
Remaining input: `a b`
cur: [a $( a · )* a b] next: [a $( a )* a · b]
Finish/Repeat (first item)
next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
- - - Advance over an `a`. - - - (this looks exactly like the last step)
Remaining input: `b`
cur: [a $( a · )* a b] next: [a $( a )* a · b]
Finish/Repeat (first item)
next: [a $( a )* · a b] [a $( · a )* a b]
- - - Advance over a `b`. - - -
Remaining input: ``
eof: [a $( a )* a b ·]
*/
2012-06-12 10:59:50 -07:00
/* to avoid costly uniqueness checks, we require that `MatchSeq` always has a
2012-06-12 10:59:50 -07:00
nonempty body. */
2013-07-02 12:47:32 -07:00
#[deriving(Clone)]
pub struct MatcherPos {
elts: ~[ast::Matcher], // maybe should be <'>? Need to understand regions.
sep: Option<Token>,
idx: uint,
up: Option<~MatcherPos>,
matches: ~[~[@NamedMatch]],
match_lo: uint, match_hi: uint,
sp_lo: BytePos,
}
2012-06-12 10:59:50 -07:00
pub fn count_names(ms: &[Matcher]) -> uint {
ms.iter().fold(0, |ct, m| {
2012-08-06 12:34:08 -07:00
ct + match m.node {
MatchTok(_) => 0u,
MatchSeq(ref more_ms, _, _, _, _) => count_names((*more_ms)),
MatchNonterminal(_, _, _) => 1u
}})
2012-06-12 10:59:50 -07:00
}
pub fn initial_matcher_pos(ms: ~[Matcher], sep: Option<Token>, lo: BytePos)
-> ~MatcherPos {
let mut match_idx_hi = 0u;
for elt in ms.iter() {
2012-08-06 12:34:08 -07:00
match elt.node {
MatchTok(_) => (),
MatchSeq(_,_,_,_,hi) => {
match_idx_hi = hi; // it is monotonic...
}
MatchNonterminal(_,_,pos) => {
match_idx_hi = pos+1u; // ...so latest is highest
}
}
}
2013-03-07 18:37:22 -05:00
let matches = vec::from_fn(count_names(ms), |_i| ~[]);
~MatcherPos {
elts: ms,
sep: sep,
idx: 0u,
up: None,
matches: matches,
match_lo: 0u,
match_hi: match_idx_hi,
sp_lo: lo
}
2012-06-12 10:59:50 -07:00
}
// NamedMatch is a pattern-match result for a single ast::MatchNonterminal:
// so it is associated with a single ident in a parse, and all
// MatchedNonterminal's in the NamedMatch have the same nonterminal type
// (expr, item, etc). All the leaves in a single NamedMatch correspond to a
// single matcher_nonterminal in the ast::Matcher that produced it.
//
// It should probably be renamed, it has more or less exact correspondence to
// ast::match nodes, and the in-memory structure of a particular NamedMatch
// represents the match that occurred when a particular subset of an
// ast::match -- those ast::Matcher nodes leading to a single
// MatchNonterminal -- was applied to a particular token tree.
//
// The width of each MatchedSeq in the NamedMatch, and the identity of the
// MatchedNonterminal's, will depend on the token tree it was applied to: each
// MatchedSeq corresponds to a single MatchSeq in the originating
// ast::Matcher. The depth of the NamedMatch structure will therefore depend
// only on the nesting depth of ast::MatchSeq's in the originating
// ast::Matcher it was derived from.
pub enum NamedMatch {
MatchedSeq(~[@NamedMatch], codemap::Span),
MatchedNonterminal(Nonterminal)
}
2012-06-12 10:59:50 -07:00
pub fn nameize(p_s: @ParseSess, ms: &[Matcher], res: &[@NamedMatch])
-> HashMap<Ident, @NamedMatch> {
fn n_rec(p_s: @ParseSess, m: &Matcher, res: &[@NamedMatch],
ret_val: &mut HashMap<Ident, @NamedMatch>) {
match *m {
codemap::Spanned {node: MatchTok(_), .. } => (),
codemap::Spanned {node: MatchSeq(ref more_ms, _, _, _, _), .. } => {
for next_m in more_ms.iter() {
n_rec(p_s, next_m, res, ret_val)
2012-12-04 21:13:02 -08:00
};
}
codemap::Spanned {
node: MatchNonterminal(bind_name, _, idx),
span
} => {
if ret_val.contains_key(&bind_name) {
let string = token::get_ident(bind_name);
p_s.span_diagnostic
.span_fatal(span, "duplicated bind name: " + string.get())
}
ret_val.insert(bind_name, res[idx]);
}
}
}
let mut ret_val = HashMap::new();
for m in ms.iter() { n_rec(p_s, m, res, &mut ret_val) }
ret_val
}
pub enum ParseResult {
Success(HashMap<Ident, @NamedMatch>),
Failure(codemap::Span, ~str),
Error(codemap::Span, ~str)
}
2014-02-07 00:38:33 +02:00
pub fn parse_or_else<R: Reader>(sess: @ParseSess,
cfg: ast::CrateConfig,
rdr: R,
ms: ~[Matcher])
-> HashMap<Ident, @NamedMatch> {
2012-08-06 12:34:08 -07:00
match parse(sess, cfg, rdr, ms) {
Success(m) => m,
Failure(sp, str) => sess.span_diagnostic.span_fatal(sp, str),
Error(sp, str) => sess.span_diagnostic.span_fatal(sp, str)
}
}
// perform a token equality check, ignoring syntax context (that is, an unhygienic comparison)
pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool {
match (t1,t2) {
(&token::IDENT(id1,_),&token::IDENT(id2,_))
| (&token::LIFETIME(id1),&token::LIFETIME(id2)) =>
id1.name == id2.name,
_ => *t1 == *t2
}
}
2014-02-07 00:38:33 +02:00
pub fn parse<R: Reader>(sess: @ParseSess,
cfg: ast::CrateConfig,
rdr: R,
ms: &[Matcher])
-> ParseResult {
let mut cur_eis = ~[];
cur_eis.push(initial_matcher_pos(ms.to_owned(), None, rdr.peek().sp.lo));
2012-06-12 10:59:50 -07:00
loop {
let mut bb_eis = ~[]; // black-box parsed by parser.rs
let mut next_eis = ~[]; // or proceed normally
let mut eof_eis = ~[];
2012-06-12 10:59:50 -07:00
2013-01-30 09:56:33 -08:00
let TokenAndSpan {tok: tok, sp: sp} = rdr.peek();
2012-06-12 10:59:50 -07:00
/* we append new items to this while we go */
loop {
let ei = match cur_eis.pop() {
None => break, /* for each Earley Item */
Some(ei) => ei,
};
2012-06-12 10:59:50 -07:00
let idx = ei.idx;
let len = ei.elts.len();
/* at end of sequence */
if idx >= len {
// can't move out of `match`es, so:
if ei.up.is_some() {
2012-06-12 10:59:50 -07:00
// hack: a matcher sequence is repeating iff it has a
// parent (the top level is just a container)
// disregard separator, try to go up
// (remove this condition to make trailing seps ok)
if idx == len {
// pop from the matcher position
let mut new_pos = ei.up.clone().unwrap();
2012-06-12 10:59:50 -07:00
// update matches (the MBE "parse tree") by appending
// each tree as a subtree.
// I bet this is a perf problem: we're preemptively
// doing a lot of array work that will get thrown away
// most of the time.
// Only touch the binders we have actually bound
for idx in range(ei.match_lo, ei.match_hi) {
2013-07-02 12:47:32 -07:00
let sub = ei.matches[idx].clone();
new_pos.matches[idx]
.push(@MatchedSeq(sub, mk_sp(ei.sp_lo,
sp.hi)));
2012-06-12 10:59:50 -07:00
}
2012-09-10 18:28:00 -07:00
new_pos.idx += 1;
cur_eis.push(new_pos);
2012-06-12 10:59:50 -07:00
}
// can we go around again?
// the *_t vars are workarounds for the lack of unary move
2013-07-02 12:47:32 -07:00
match ei.sep {
Some(ref t) if idx == len => { // we need a separator
// i'm conflicted about whether this should be hygienic....
// though in this case, if the separators are never legal
// idents, it shouldn't matter.
if token_name_eq(&tok, t) { //pass the separator
2013-07-02 12:47:32 -07:00
let mut ei_t = ei.clone();
2012-09-10 18:28:00 -07:00
ei_t.idx += 1;
next_eis.push(ei_t);
2012-06-12 10:59:50 -07:00
}
}
2012-08-03 19:59:04 -07:00
_ => { // we don't need a separator
let mut ei_t = ei;
2012-09-10 18:28:00 -07:00
ei_t.idx = 0;
cur_eis.push(ei_t);
2012-06-12 10:59:50 -07:00
}
}
} else {
eof_eis.push(ei);
2012-06-12 10:59:50 -07:00
}
} else {
2013-07-02 12:47:32 -07:00
match ei.elts[idx].node.clone() {
2012-06-12 10:59:50 -07:00
/* need to descend into sequence */
MatchSeq(ref matchers, ref sep, zero_ok,
match_idx_lo, match_idx_hi) => {
2012-06-12 10:59:50 -07:00
if zero_ok {
2013-07-02 12:47:32 -07:00
let mut new_ei = ei.clone();
2012-06-12 10:59:50 -07:00
new_ei.idx += 1u;
//we specifically matched zero repeats.
for idx in range(match_idx_lo, match_idx_hi) {
new_ei.matches[idx].push(@MatchedSeq(~[], sp));
}
cur_eis.push(new_ei);
2012-06-12 10:59:50 -07:00
}
let matches = vec::from_elem(ei.matches.len(), ~[]);
let ei_t = ei;
cur_eis.push(~MatcherPos {
2013-07-02 12:47:32 -07:00
elts: (*matchers).clone(),
sep: (*sep).clone(),
idx: 0u,
up: Some(ei_t),
matches: matches,
match_lo: match_idx_lo, match_hi: match_idx_hi,
sp_lo: sp.lo
2012-06-12 10:59:50 -07:00
});
}
MatchNonterminal(_,_,_) => { bb_eis.push(ei) }
MatchTok(ref t) => {
2013-07-02 12:47:32 -07:00
let mut ei_t = ei.clone();
2013-09-05 14:15:00 -07:00
//if (token_name_eq(t,&tok)) {
2014-01-19 19:21:14 +11:00
if token::mtwt_token_eq(t,&tok) {
2012-09-10 18:28:00 -07:00
ei_t.idx += 1;
next_eis.push(ei_t);
2012-09-10 18:28:00 -07:00
}
2012-06-12 10:59:50 -07:00
}
}
}
}
/* error messages here could be improved with links to orig. rules */
if token_name_eq(&tok, &EOF) {
if eof_eis.len() == 1u {
2013-03-07 18:37:22 -05:00
let mut v = ~[];
for dv in eof_eis[0u].matches.mut_iter() {
v.push(dv.pop().unwrap());
2013-03-07 18:37:22 -05:00
}
return Success(nameize(sess, ms, v));
2012-06-12 10:59:50 -07:00
} else if eof_eis.len() > 1u {
return Error(sp, ~"ambiguity: multiple successful parses");
2012-06-12 10:59:50 -07:00
} else {
return Failure(sp, ~"unexpected end of macro invocation");
2012-06-12 10:59:50 -07:00
}
} else {
if (bb_eis.len() > 0u && next_eis.len() > 0u)
|| bb_eis.len() > 1u {
2013-06-11 02:34:14 +10:00
let nts = bb_eis.map(|ei| {
2012-08-06 12:34:08 -07:00
match ei.elts[ei.idx].node {
MatchNonterminal(bind, name, _) => {
format!("{} ('{}')",
token::get_ident(name),
token::get_ident(bind))
2012-07-24 11:44:17 -07:00
}
_ => fail!()
2013-06-11 02:34:14 +10:00
} }).connect(" or ");
return Error(sp, format!(
"local ambiguity: multiple parsing options: \
2013-09-27 21:01:58 -07:00
built-in NTs {} or {} other options.",
2012-08-22 17:24:52 -07:00
nts, next_eis.len()));
2014-01-19 19:21:14 +11:00
} else if bb_eis.len() == 0u && next_eis.len() == 0u {
return Failure(sp, format!("no rules expected the token `{}`",
token::to_str(&tok)));
2014-01-19 19:21:14 +11:00
} else if next_eis.len() > 0u {
2012-06-12 10:59:50 -07:00
/* Now process the next token */
2014-01-19 19:21:14 +11:00
while next_eis.len() > 0u {
cur_eis.push(next_eis.pop().unwrap());
2012-06-12 10:59:50 -07:00
}
rdr.next_token();
} else /* bb_eis.len() == 1 */ {
2013-12-30 14:04:00 -08:00
let mut rust_parser = Parser(sess, cfg.clone(), rdr.dup());
2012-06-12 10:59:50 -07:00
let mut ei = bb_eis.pop().unwrap();
2012-08-06 12:34:08 -07:00
match ei.elts[ei.idx].node {
MatchNonterminal(_, name, idx) => {
let name_string = token::get_ident(name);
ei.matches[idx].push(@MatchedNonterminal(
parse_nt(&mut rust_parser, name_string.get())));
2012-06-12 10:59:50 -07:00
ei.idx += 1u;
}
_ => fail!()
2012-06-12 10:59:50 -07:00
}
cur_eis.push(ei);
2012-06-12 10:59:50 -07:00
for _ in range(0, rust_parser.tokens_consumed) {
let _ = rdr.next_token();
}
2012-06-12 10:59:50 -07:00
}
}
2013-03-28 18:39:09 -07:00
assert!(cur_eis.len() > 0u);
2012-06-12 10:59:50 -07:00
}
}
pub fn parse_nt(p: &mut Parser, name: &str) -> Nonterminal {
2012-08-06 12:34:08 -07:00
match name {
"item" => match p.parse_item(~[]) {
Some(i) => token::NtItem(i),
None => p.fatal("expected an item keyword")
},
"block" => token::NtBlock(p.parse_block()),
"stmt" => token::NtStmt(p.parse_stmt(~[])),
"pat" => token::NtPat(p.parse_pat()),
"expr" => token::NtExpr(p.parse_expr()),
"ty" => token::NtTy(p.parse_ty(false /* no need to disambiguate*/)),
2012-06-12 10:59:50 -07:00
// this could be handled like a token, since it is one
"ident" => match p.token {
token::IDENT(sn,b) => { p.bump(); token::NtIdent(~sn,b) }
_ => {
let token_str = token::to_str(&p.token);
p.fatal(~"expected ident, found " + token_str)
}
},
"path" => {
token::NtPath(~p.parse_path(LifetimeAndTypesWithoutColons).path)
}
"attr" => token::NtAttr(@p.parse_attribute(false)),
"tt" => {
p.quote_depth += 1u; //but in theory, non-quoted tts might be useful
let res = token::NtTT(@p.parse_token_tree());
p.quote_depth -= 1u;
2012-07-06 14:48:01 -07:00
res
}
"matchers" => token::NtMatchers(p.parse_matchers()),
_ => p.fatal(~"unsupported builtin nonterminal parser: " + name)
2012-06-12 10:59:50 -07:00
}
}