2012-12-03 16:48:01 -08:00
|
|
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
|
|
|
// file at the top-level directory of this distribution and at
|
|
|
|
// http://rust-lang.org/COPYRIGHT.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
|
|
// option. This file may not be copied, modified, or distributed
|
|
|
|
// except according to those terms.
|
|
|
|
|
2012-06-12 10:59:50 -07:00
|
|
|
// Earley-like parser for macros.
|
2013-05-17 15:28:44 -07:00
|
|
|
|
2013-02-25 14:11:21 -05:00
|
|
|
use ast;
|
2013-09-02 02:50:59 +02:00
|
|
|
use ast::{matcher, match_tok, match_seq, match_nonterminal, Ident};
|
2013-01-30 09:56:33 -08:00
|
|
|
use codemap::{BytePos, mk_sp};
|
2012-12-23 17:41:37 -05:00
|
|
|
use codemap;
|
|
|
|
use parse::lexer::*; //resolve bug?
|
2013-02-21 00:16:31 -08:00
|
|
|
use parse::ParseSess;
|
2013-08-08 13:28:06 -04:00
|
|
|
use parse::attr::parser_attr;
|
2013-08-07 09:47:28 -07:00
|
|
|
use parse::parser::{LifetimeAndTypesWithoutColons, Parser};
|
2013-06-04 12:34:25 -07:00
|
|
|
use parse::token::{Token, EOF, to_str, nonterminal, get_ident_interner, ident_to_str};
|
2012-12-23 17:41:37 -05:00
|
|
|
use parse::token;
|
|
|
|
|
2013-06-24 20:40:33 -04:00
|
|
|
use std::hashmap::HashMap;
|
|
|
|
use std::vec;
|
2012-06-12 10:59:50 -07:00
|
|
|
|
2012-07-27 19:14:46 -07:00
|
|
|
/* This is an Earley-like parser, without support for in-grammar nonterminals,
|
2012-08-24 18:16:56 -07:00
|
|
|
only by calling out to the main rust parser for named nonterminals (which it
|
2012-07-27 19:14:46 -07:00
|
|
|
commits to fully when it hits one in a grammar). This means that there are no
|
|
|
|
completer or predictor rules, and therefore no need to store one column per
|
|
|
|
token: instead, there's a set of current Earley items and a set of next
|
|
|
|
ones. Instead of NTs, we have a special case for Kleene star. The big-O, in
|
|
|
|
pathological cases, is worse than traditional Earley parsing, but it's an
|
|
|
|
easier fit for Macro-by-Example-style rules, and I think the overhead is
|
2012-08-24 18:16:56 -07:00
|
|
|
lower. (In order to prevent the pathological case, we'd need to lazily
|
|
|
|
construct the resulting `named_match`es at the very end. It'd be a pain,
|
|
|
|
and require more memory to keep around old items, but it would also save
|
|
|
|
overhead)*/
|
|
|
|
|
|
|
|
/* Quick intro to how the parser works:
|
|
|
|
|
|
|
|
A 'position' is a dot in the middle of a matcher, usually represented as a
|
|
|
|
dot. For example `· a $( a )* a b` is a position, as is `a $( · a )* a b`.
|
|
|
|
|
|
|
|
The parser walks through the input a character at a time, maintaining a list
|
|
|
|
of items consistent with the current position in the input string: `cur_eis`.
|
|
|
|
|
|
|
|
As it processes them, it fills up `eof_eis` with items that would be valid if
|
|
|
|
the macro invocation is now over, `bb_eis` with items that are waiting on
|
|
|
|
a Rust nonterminal like `$e:expr`, and `next_eis` with items that are waiting
|
|
|
|
on the a particular token. Most of the logic concerns moving the · through the
|
|
|
|
repetitions indicated by Kleene stars. It only advances or calls out to the
|
|
|
|
real Rust parser when no `cur_eis` items remain
|
|
|
|
|
|
|
|
Example: Start parsing `a a a a b` against [· a $( a )* a b].
|
|
|
|
|
|
|
|
Remaining input: `a a a a b`
|
|
|
|
next_eis: [· a $( a )* a b]
|
|
|
|
|
|
|
|
- - - Advance over an `a`. - - -
|
|
|
|
|
|
|
|
Remaining input: `a a a b`
|
|
|
|
cur: [a · $( a )* a b]
|
|
|
|
Descend/Skip (first item).
|
|
|
|
next: [a $( · a )* a b] [a $( a )* · a b].
|
|
|
|
|
|
|
|
- - - Advance over an `a`. - - -
|
|
|
|
|
|
|
|
Remaining input: `a a b`
|
|
|
|
cur: [a $( a · )* a b] next: [a $( a )* a · b]
|
|
|
|
Finish/Repeat (first item)
|
|
|
|
next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
|
|
|
|
|
|
|
|
- - - Advance over an `a`. - - - (this looks exactly like the last step)
|
|
|
|
|
|
|
|
Remaining input: `a b`
|
|
|
|
cur: [a $( a · )* a b] next: [a $( a )* a · b]
|
|
|
|
Finish/Repeat (first item)
|
|
|
|
next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
|
|
|
|
|
|
|
|
- - - Advance over an `a`. - - - (this looks exactly like the last step)
|
|
|
|
|
|
|
|
Remaining input: `b`
|
|
|
|
cur: [a $( a · )* a b] next: [a $( a )* a · b]
|
|
|
|
Finish/Repeat (first item)
|
|
|
|
next: [a $( a )* · a b] [a $( · a )* a b]
|
|
|
|
|
|
|
|
- - - Advance over a `b`. - - -
|
|
|
|
|
|
|
|
Remaining input: ``
|
|
|
|
eof: [a $( a )* a b ·]
|
|
|
|
|
|
|
|
*/
|
2012-06-12 10:59:50 -07:00
|
|
|
|
|
|
|
|
2012-07-27 19:14:46 -07:00
|
|
|
/* to avoid costly uniqueness checks, we require that `match_seq` always has a
|
2012-06-12 10:59:50 -07:00
|
|
|
nonempty body. */
|
|
|
|
|
2013-07-02 12:47:32 -07:00
|
|
|
#[deriving(Clone)]
|
2013-01-29 14:41:40 -08:00
|
|
|
pub enum matcher_pos_up { /* to break a circularity */
|
2013-02-21 00:16:31 -08:00
|
|
|
matcher_pos_up(Option<~MatcherPos>)
|
2012-06-12 10:59:50 -07:00
|
|
|
}
|
|
|
|
|
2013-04-17 12:15:08 -04:00
|
|
|
pub fn is_some(mpu: &matcher_pos_up) -> bool {
|
|
|
|
match *mpu {
|
|
|
|
matcher_pos_up(None) => false,
|
2012-08-03 19:59:04 -07:00
|
|
|
_ => true
|
2012-06-12 10:59:50 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-02 12:47:32 -07:00
|
|
|
#[deriving(Clone)]
|
2013-02-21 00:16:31 -08:00
|
|
|
pub struct MatcherPos {
|
2013-03-22 15:52:50 -07:00
|
|
|
elts: ~[ast::matcher], // maybe should be <'>? Need to understand regions.
|
2012-10-15 14:56:42 -07:00
|
|
|
sep: Option<Token>,
|
2013-02-21 16:17:23 -08:00
|
|
|
idx: uint,
|
|
|
|
up: matcher_pos_up, // mutable for swapping only
|
2013-03-07 18:37:22 -05:00
|
|
|
matches: ~[~[@named_match]],
|
2012-07-23 15:34:43 -07:00
|
|
|
match_lo: uint, match_hi: uint,
|
2012-11-15 19:37:29 -08:00
|
|
|
sp_lo: BytePos,
|
2013-02-21 00:16:31 -08:00
|
|
|
}
|
2012-06-12 10:59:50 -07:00
|
|
|
|
2013-04-17 12:15:08 -04:00
|
|
|
pub fn copy_up(mpu: &matcher_pos_up) -> ~MatcherPos {
|
|
|
|
match *mpu {
|
2013-07-02 12:47:32 -07:00
|
|
|
matcher_pos_up(Some(ref mp)) => (*mp).clone(),
|
2013-10-21 13:08:31 -07:00
|
|
|
_ => fail!()
|
2012-06-12 10:59:50 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-29 14:41:40 -08:00
|
|
|
pub fn count_names(ms: &[matcher]) -> uint {
|
2013-11-20 16:23:04 -08:00
|
|
|
ms.iter().fold(0, |ct, m| {
|
2012-08-06 12:34:08 -07:00
|
|
|
ct + match m.node {
|
2012-08-03 19:59:04 -07:00
|
|
|
match_tok(_) => 0u,
|
2012-12-04 10:50:00 -08:00
|
|
|
match_seq(ref more_ms, _, _, _, _) => count_names((*more_ms)),
|
2012-08-03 19:59:04 -07:00
|
|
|
match_nonterminal(_,_,_) => 1u
|
2013-11-20 16:23:04 -08:00
|
|
|
}})
|
2012-06-12 10:59:50 -07:00
|
|
|
}
|
|
|
|
|
2013-04-17 12:15:08 -04:00
|
|
|
pub fn initial_matcher_pos(ms: ~[matcher], sep: Option<Token>, lo: BytePos)
|
2013-02-21 00:16:31 -08:00
|
|
|
-> ~MatcherPos {
|
2012-07-23 15:34:43 -07:00
|
|
|
let mut match_idx_hi = 0u;
|
2013-08-03 12:45:23 -04:00
|
|
|
for elt in ms.iter() {
|
2012-08-06 12:34:08 -07:00
|
|
|
match elt.node {
|
2012-08-03 19:59:04 -07:00
|
|
|
match_tok(_) => (),
|
|
|
|
match_seq(_,_,_,_,hi) => {
|
2012-07-27 19:14:46 -07:00
|
|
|
match_idx_hi = hi; // it is monotonic...
|
|
|
|
}
|
2012-08-03 19:59:04 -07:00
|
|
|
match_nonterminal(_,_,pos) => {
|
2012-07-27 19:14:46 -07:00
|
|
|
match_idx_hi = pos+1u; // ...so latest is highest
|
|
|
|
}
|
2012-07-23 15:34:43 -07:00
|
|
|
}
|
|
|
|
}
|
2013-03-07 18:37:22 -05:00
|
|
|
let matches = vec::from_fn(count_names(ms), |_i| ~[]);
|
2013-02-21 00:16:31 -08:00
|
|
|
~MatcherPos {
|
|
|
|
elts: ms,
|
|
|
|
sep: sep,
|
2013-02-21 16:17:23 -08:00
|
|
|
idx: 0u,
|
|
|
|
up: matcher_pos_up(None),
|
2013-02-24 21:27:51 -08:00
|
|
|
matches: matches,
|
2013-02-21 00:16:31 -08:00
|
|
|
match_lo: 0u,
|
|
|
|
match_hi: match_idx_hi,
|
|
|
|
sp_lo: lo
|
|
|
|
}
|
2012-06-12 10:59:50 -07:00
|
|
|
}
|
|
|
|
|
2012-07-27 19:14:46 -07:00
|
|
|
// named_match is a pattern-match result for a single ast::match_nonterminal:
|
|
|
|
// so it is associated with a single ident in a parse, and all
|
|
|
|
// matched_nonterminals in the named_match have the same nonterminal type
|
|
|
|
// (expr, item, etc). All the leaves in a single named_match correspond to a
|
|
|
|
// single matcher_nonterminal in the ast::matcher that produced it.
|
2012-07-27 17:42:32 -07:00
|
|
|
//
|
|
|
|
// It should probably be renamed, it has more or less exact correspondence to
|
2012-07-27 19:14:46 -07:00
|
|
|
// ast::match nodes, and the in-memory structure of a particular named_match
|
2012-07-27 17:42:32 -07:00
|
|
|
// represents the match that occurred when a particular subset of an
|
2012-07-27 19:14:46 -07:00
|
|
|
// ast::match -- those ast::matcher nodes leading to a single
|
|
|
|
// match_nonterminal -- was applied to a particular token tree.
|
2012-07-27 17:42:32 -07:00
|
|
|
//
|
2012-07-27 19:14:46 -07:00
|
|
|
// The width of each matched_seq in the named_match, and the identity of the
|
|
|
|
// matched_nonterminals, will depend on the token tree it was applied to: each
|
|
|
|
// matched_seq corresponds to a single match_seq in the originating
|
|
|
|
// ast::matcher. The depth of the named_match structure will therefore depend
|
|
|
|
// only on the nesting depth of ast::match_seqs in the originating
|
|
|
|
// ast::matcher it was derived from.
|
|
|
|
|
2013-01-29 14:41:40 -08:00
|
|
|
pub enum named_match {
|
2013-08-31 18:13:04 +02:00
|
|
|
matched_seq(~[@named_match], codemap::Span),
|
2012-07-27 19:14:46 -07:00
|
|
|
matched_nonterminal(nonterminal)
|
|
|
|
}
|
2012-06-12 10:59:50 -07:00
|
|
|
|
2013-02-21 00:16:31 -08:00
|
|
|
pub type earley_item = ~MatcherPos;
|
2012-06-12 10:59:50 -07:00
|
|
|
|
2013-12-27 11:56:29 -08:00
|
|
|
pub fn nameize(p_s: @ParseSess, ms: &[matcher], res: &[@named_match])
|
2013-09-02 02:50:59 +02:00
|
|
|
-> HashMap<Ident,@named_match> {
|
2013-12-27 11:56:29 -08:00
|
|
|
fn n_rec(p_s: @ParseSess, m: &matcher, res: &[@named_match],
|
2013-09-02 02:50:59 +02:00
|
|
|
ret_val: &mut HashMap<Ident, @named_match>) {
|
2013-05-12 00:25:31 -04:00
|
|
|
match *m {
|
2013-11-28 12:22:53 -08:00
|
|
|
codemap::Spanned {node: match_tok(_), .. } => (),
|
|
|
|
codemap::Spanned {node: match_seq(ref more_ms, _, _, _, _), .. } => {
|
2013-08-03 12:45:23 -04:00
|
|
|
for next_m in more_ms.iter() {
|
2013-05-12 00:25:31 -04:00
|
|
|
n_rec(p_s, next_m, res, ret_val)
|
2012-12-04 21:13:02 -08:00
|
|
|
};
|
2012-06-27 15:29:35 -07:00
|
|
|
}
|
2013-08-31 18:13:04 +02:00
|
|
|
codemap::Spanned {
|
2013-06-04 12:34:25 -07:00
|
|
|
node: match_nonterminal(ref bind_name, _, idx), span: sp
|
2013-01-29 13:54:06 -08:00
|
|
|
} => {
|
2013-06-04 12:34:25 -07:00
|
|
|
if ret_val.contains_key(bind_name) {
|
2012-07-18 16:18:02 -07:00
|
|
|
p_s.span_diagnostic.span_fatal(sp, ~"Duplicated bind name: "+
|
2013-06-13 03:02:55 +10:00
|
|
|
ident_to_str(bind_name))
|
2012-06-27 15:29:35 -07:00
|
|
|
}
|
2013-06-04 12:34:25 -07:00
|
|
|
ret_val.insert(*bind_name, res[idx]);
|
2012-06-27 15:29:35 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-04-03 09:28:36 -04:00
|
|
|
let mut ret_val = HashMap::new();
|
2013-08-03 12:45:23 -04:00
|
|
|
for m in ms.iter() { n_rec(p_s, m, res, &mut ret_val) }
|
2013-06-21 08:29:53 -04:00
|
|
|
ret_val
|
2012-06-27 15:29:35 -07:00
|
|
|
}
|
|
|
|
|
2013-01-29 14:41:40 -08:00
|
|
|
pub enum parse_result {
|
2013-09-02 02:50:59 +02:00
|
|
|
success(HashMap<Ident, @named_match>),
|
2013-08-31 18:13:04 +02:00
|
|
|
failure(codemap::Span, ~str),
|
|
|
|
error(codemap::Span, ~str)
|
2012-07-05 17:33:39 -07:00
|
|
|
}
|
|
|
|
|
2013-12-27 13:40:07 -08:00
|
|
|
pub fn parse_or_else(sess: @ParseSess,
|
|
|
|
cfg: ast::CrateConfig,
|
|
|
|
rdr: @reader,
|
|
|
|
ms: ~[matcher])
|
|
|
|
-> HashMap<Ident, @named_match> {
|
2012-08-06 12:34:08 -07:00
|
|
|
match parse(sess, cfg, rdr, ms) {
|
2012-08-03 19:59:04 -07:00
|
|
|
success(m) => m,
|
2013-03-21 15:41:37 -04:00
|
|
|
failure(sp, str) => sess.span_diagnostic.span_fatal(sp, str),
|
|
|
|
error(sp, str) => sess.span_diagnostic.span_fatal(sp, str)
|
2012-07-12 17:59:59 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-10 11:52:39 -07:00
|
|
|
// perform a token equality check, ignoring syntax context (that is, an unhygienic comparison)
|
2013-09-05 14:14:31 -07:00
|
|
|
pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool {
|
2013-07-10 11:52:39 -07:00
|
|
|
match (t1,t2) {
|
|
|
|
(&token::IDENT(id1,_),&token::IDENT(id2,_)) =>
|
|
|
|
id1.name == id2.name,
|
|
|
|
_ => *t1 == *t2
|
2013-09-05 14:14:31 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-27 11:56:29 -08:00
|
|
|
pub fn parse(sess: @ParseSess,
|
|
|
|
cfg: ast::CrateConfig,
|
2013-12-27 13:40:07 -08:00
|
|
|
rdr: @reader,
|
2013-12-27 11:56:29 -08:00
|
|
|
ms: &[matcher])
|
|
|
|
-> parse_result {
|
2012-06-29 16:26:56 -07:00
|
|
|
let mut cur_eis = ~[];
|
2013-05-12 00:25:31 -04:00
|
|
|
cur_eis.push(initial_matcher_pos(ms.to_owned(), None, rdr.peek().sp.lo));
|
2012-06-12 10:59:50 -07:00
|
|
|
|
|
|
|
loop {
|
2012-06-29 16:26:56 -07:00
|
|
|
let mut bb_eis = ~[]; // black-box parsed by parser.rs
|
|
|
|
let mut next_eis = ~[]; // or proceed normally
|
|
|
|
let mut eof_eis = ~[];
|
2012-06-12 10:59:50 -07:00
|
|
|
|
2013-01-30 09:56:33 -08:00
|
|
|
let TokenAndSpan {tok: tok, sp: sp} = rdr.peek();
|
2012-06-12 10:59:50 -07:00
|
|
|
|
|
|
|
/* we append new items to this while we go */
|
2013-05-09 13:27:24 -07:00
|
|
|
while !cur_eis.is_empty() { /* for each Earley Item */
|
|
|
|
let ei = cur_eis.pop();
|
2012-06-12 10:59:50 -07:00
|
|
|
|
|
|
|
let idx = ei.idx;
|
|
|
|
let len = ei.elts.len();
|
|
|
|
|
|
|
|
/* at end of sequence */
|
|
|
|
if idx >= len {
|
2013-01-04 09:52:07 -05:00
|
|
|
// can't move out of `match`es, so:
|
2013-04-17 12:15:08 -04:00
|
|
|
if is_some(&ei.up) {
|
2012-06-12 10:59:50 -07:00
|
|
|
// hack: a matcher sequence is repeating iff it has a
|
|
|
|
// parent (the top level is just a container)
|
|
|
|
|
|
|
|
|
|
|
|
// disregard separator, try to go up
|
|
|
|
// (remove this condition to make trailing seps ok)
|
|
|
|
if idx == len {
|
|
|
|
// pop from the matcher position
|
|
|
|
|
2013-04-17 12:15:08 -04:00
|
|
|
let mut new_pos = copy_up(&ei.up);
|
2012-06-12 10:59:50 -07:00
|
|
|
|
|
|
|
// update matches (the MBE "parse tree") by appending
|
|
|
|
// each tree as a subtree.
|
|
|
|
|
|
|
|
// I bet this is a perf problem: we're preemptively
|
|
|
|
// doing a lot of array work that will get thrown away
|
|
|
|
// most of the time.
|
2012-07-23 15:34:43 -07:00
|
|
|
|
|
|
|
// Only touch the binders we have actually bound
|
2013-08-03 12:45:23 -04:00
|
|
|
for idx in range(ei.match_lo, ei.match_hi) {
|
2013-07-02 12:47:32 -07:00
|
|
|
let sub = ei.matches[idx].clone();
|
2012-06-27 17:21:41 -07:00
|
|
|
new_pos.matches[idx]
|
2012-07-27 19:14:46 -07:00
|
|
|
.push(@matched_seq(sub,
|
|
|
|
mk_sp(ei.sp_lo,
|
|
|
|
sp.hi)));
|
2012-06-12 10:59:50 -07:00
|
|
|
}
|
|
|
|
|
2012-09-10 18:28:00 -07:00
|
|
|
new_pos.idx += 1;
|
2013-02-15 01:15:53 -08:00
|
|
|
cur_eis.push(new_pos);
|
2012-06-12 10:59:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// can we go around again?
|
|
|
|
|
|
|
|
// the *_t vars are workarounds for the lack of unary move
|
2013-07-02 12:47:32 -07:00
|
|
|
match ei.sep {
|
2012-12-04 10:50:00 -08:00
|
|
|
Some(ref t) if idx == len => { // we need a separator
|
2013-07-10 11:52:39 -07:00
|
|
|
// i'm conflicted about whether this should be hygienic....
|
|
|
|
// though in this case, if the separators are never legal
|
|
|
|
// idents, it shouldn't matter.
|
|
|
|
if token_name_eq(&tok, t) { //pass the separator
|
2013-07-02 12:47:32 -07:00
|
|
|
let mut ei_t = ei.clone();
|
2012-09-10 18:28:00 -07:00
|
|
|
ei_t.idx += 1;
|
2013-02-15 01:15:53 -08:00
|
|
|
next_eis.push(ei_t);
|
2012-06-12 10:59:50 -07:00
|
|
|
}
|
|
|
|
}
|
2012-08-03 19:59:04 -07:00
|
|
|
_ => { // we don't need a separator
|
2013-02-21 16:17:23 -08:00
|
|
|
let mut ei_t = ei;
|
2012-09-10 18:28:00 -07:00
|
|
|
ei_t.idx = 0;
|
2013-02-15 01:15:53 -08:00
|
|
|
cur_eis.push(ei_t);
|
2012-06-12 10:59:50 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2013-02-15 01:15:53 -08:00
|
|
|
eof_eis.push(ei);
|
2012-06-12 10:59:50 -07:00
|
|
|
}
|
|
|
|
} else {
|
2013-07-02 12:47:32 -07:00
|
|
|
match ei.elts[idx].node.clone() {
|
2012-06-12 10:59:50 -07:00
|
|
|
/* need to descend into sequence */
|
2012-12-04 10:50:00 -08:00
|
|
|
match_seq(ref matchers, ref sep, zero_ok,
|
2012-08-03 19:59:04 -07:00
|
|
|
match_idx_lo, match_idx_hi) => {
|
2012-06-12 10:59:50 -07:00
|
|
|
if zero_ok {
|
2013-07-02 12:47:32 -07:00
|
|
|
let mut new_ei = ei.clone();
|
2012-06-12 10:59:50 -07:00
|
|
|
new_ei.idx += 1u;
|
2012-07-23 15:34:43 -07:00
|
|
|
//we specifically matched zero repeats.
|
2013-08-03 12:45:23 -04:00
|
|
|
for idx in range(match_idx_lo, match_idx_hi) {
|
2012-07-27 19:14:46 -07:00
|
|
|
new_ei.matches[idx].push(@matched_seq(~[], sp));
|
2012-07-23 15:34:43 -07:00
|
|
|
}
|
|
|
|
|
2013-02-15 01:15:53 -08:00
|
|
|
cur_eis.push(new_ei);
|
2012-06-12 10:59:50 -07:00
|
|
|
}
|
|
|
|
|
2013-06-29 15:05:50 +10:00
|
|
|
let matches = vec::from_elem(ei.matches.len(), ~[]);
|
2013-02-15 01:15:53 -08:00
|
|
|
let ei_t = ei;
|
2013-02-21 00:16:31 -08:00
|
|
|
cur_eis.push(~MatcherPos {
|
2013-07-02 12:47:32 -07:00
|
|
|
elts: (*matchers).clone(),
|
|
|
|
sep: (*sep).clone(),
|
2013-02-21 16:17:23 -08:00
|
|
|
idx: 0u,
|
|
|
|
up: matcher_pos_up(Some(ei_t)),
|
2013-02-15 01:15:53 -08:00
|
|
|
matches: matches,
|
2012-07-23 15:34:43 -07:00
|
|
|
match_lo: match_idx_lo, match_hi: match_idx_hi,
|
|
|
|
sp_lo: sp.lo
|
2012-06-12 10:59:50 -07:00
|
|
|
});
|
|
|
|
}
|
2013-02-15 01:15:53 -08:00
|
|
|
match_nonterminal(_,_,_) => { bb_eis.push(ei) }
|
2012-12-04 10:50:00 -08:00
|
|
|
match_tok(ref t) => {
|
2013-07-02 12:47:32 -07:00
|
|
|
let mut ei_t = ei.clone();
|
2013-09-05 14:15:00 -07:00
|
|
|
//if (token_name_eq(t,&tok)) {
|
|
|
|
if (token::mtwt_token_eq(t,&tok)) {
|
2012-09-10 18:28:00 -07:00
|
|
|
ei_t.idx += 1;
|
2013-02-15 01:15:53 -08:00
|
|
|
next_eis.push(ei_t);
|
2012-09-10 18:28:00 -07:00
|
|
|
}
|
2012-06-12 10:59:50 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* error messages here could be improved with links to orig. rules */
|
2013-07-10 11:52:39 -07:00
|
|
|
if token_name_eq(&tok, &EOF) {
|
2012-07-05 17:33:39 -07:00
|
|
|
if eof_eis.len() == 1u {
|
2013-03-07 18:37:22 -05:00
|
|
|
let mut v = ~[];
|
2013-08-03 12:45:23 -04:00
|
|
|
for dv in eof_eis[0u].matches.mut_iter() {
|
2013-03-07 18:37:22 -05:00
|
|
|
v.push(dv.pop());
|
|
|
|
}
|
|
|
|
return success(nameize(sess, ms, v));
|
2012-06-12 10:59:50 -07:00
|
|
|
} else if eof_eis.len() > 1u {
|
2012-08-10 10:46:04 -07:00
|
|
|
return error(sp, ~"Ambiguity: multiple successful parses");
|
2012-06-12 10:59:50 -07:00
|
|
|
} else {
|
2012-08-01 17:30:05 -07:00
|
|
|
return failure(sp, ~"Unexpected end of macro invocation");
|
2012-06-12 10:59:50 -07:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (bb_eis.len() > 0u && next_eis.len() > 0u)
|
|
|
|
|| bb_eis.len() > 1u {
|
2013-06-11 02:34:14 +10:00
|
|
|
let nts = bb_eis.map(|ei| {
|
2012-08-06 12:34:08 -07:00
|
|
|
match ei.elts[ei.idx].node {
|
2013-06-04 12:34:25 -07:00
|
|
|
match_nonterminal(ref bind,ref name,_) => {
|
2013-09-27 21:01:58 -07:00
|
|
|
format!("{} ('{}')", ident_to_str(name),
|
2013-06-13 03:02:55 +10:00
|
|
|
ident_to_str(bind))
|
2012-07-24 11:44:17 -07:00
|
|
|
}
|
2013-10-21 13:08:31 -07:00
|
|
|
_ => fail!()
|
2013-06-11 02:34:14 +10:00
|
|
|
} }).connect(" or ");
|
2013-09-27 21:01:58 -07:00
|
|
|
return error(sp, format!(
|
2012-07-05 17:33:39 -07:00
|
|
|
"Local ambiguity: multiple parsing options: \
|
2013-09-27 21:01:58 -07:00
|
|
|
built-in NTs {} or {} other options.",
|
2012-08-22 17:24:52 -07:00
|
|
|
nts, next_eis.len()));
|
2012-06-12 10:59:50 -07:00
|
|
|
} else if (bb_eis.len() == 0u && next_eis.len() == 0u) {
|
2012-09-18 22:36:02 -07:00
|
|
|
return failure(sp, ~"No rules expected the token: "
|
2013-05-17 10:18:35 -07:00
|
|
|
+ to_str(get_ident_interner(), &tok));
|
2012-06-12 10:59:50 -07:00
|
|
|
} else if (next_eis.len() > 0u) {
|
|
|
|
/* Now process the next token */
|
|
|
|
while(next_eis.len() > 0u) {
|
2012-09-27 22:20:47 -07:00
|
|
|
cur_eis.push(next_eis.pop());
|
2012-06-12 10:59:50 -07:00
|
|
|
}
|
|
|
|
rdr.next_token();
|
|
|
|
} else /* bb_eis.len() == 1 */ {
|
2013-12-30 14:04:00 -08:00
|
|
|
let mut rust_parser = Parser(sess, cfg.clone(), rdr.dup());
|
2012-06-12 10:59:50 -07:00
|
|
|
|
2013-02-21 16:17:23 -08:00
|
|
|
let mut ei = bb_eis.pop();
|
2012-08-06 12:34:08 -07:00
|
|
|
match ei.elts[ei.idx].node {
|
2013-06-04 12:34:25 -07:00
|
|
|
match_nonterminal(_, ref name, idx) => {
|
2012-07-27 19:14:46 -07:00
|
|
|
ei.matches[idx].push(@matched_nonterminal(
|
2013-12-30 14:04:00 -08:00
|
|
|
parse_nt(&mut rust_parser, ident_to_str(name))));
|
2012-06-12 10:59:50 -07:00
|
|
|
ei.idx += 1u;
|
|
|
|
}
|
2013-10-21 13:08:31 -07:00
|
|
|
_ => fail!()
|
2012-06-12 10:59:50 -07:00
|
|
|
}
|
2013-02-15 01:15:53 -08:00
|
|
|
cur_eis.push(ei);
|
2012-06-12 10:59:50 -07:00
|
|
|
|
2013-11-20 16:23:04 -08:00
|
|
|
rust_parser.tokens_consumed.times(|| {
|
|
|
|
let _ = rdr.next_token();
|
|
|
|
});
|
2012-06-12 10:59:50 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-28 18:39:09 -07:00
|
|
|
assert!(cur_eis.len() > 0u);
|
2012-06-12 10:59:50 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-30 14:04:00 -08:00
|
|
|
pub fn parse_nt(p: &mut Parser, name: &str) -> nonterminal {
|
2012-08-06 12:34:08 -07:00
|
|
|
match name {
|
2013-05-12 00:25:31 -04:00
|
|
|
"item" => match p.parse_item(~[]) {
|
2012-08-20 12:23:37 -07:00
|
|
|
Some(i) => token::nt_item(i),
|
2013-05-19 01:07:44 -04:00
|
|
|
None => p.fatal("expected an item keyword")
|
2012-08-06 17:14:32 -07:00
|
|
|
},
|
2013-12-01 00:00:39 +02:00
|
|
|
"block" => token::nt_block(p.parse_block()),
|
2013-05-12 00:25:31 -04:00
|
|
|
"stmt" => token::nt_stmt(p.parse_stmt(~[])),
|
2013-05-29 19:59:33 -04:00
|
|
|
"pat" => token::nt_pat(p.parse_pat()),
|
2013-05-12 00:25:31 -04:00
|
|
|
"expr" => token::nt_expr(p.parse_expr()),
|
2013-12-01 00:00:39 +02:00
|
|
|
"ty" => token::nt_ty(p.parse_ty(false /* no need to disambiguate*/)),
|
2012-06-12 10:59:50 -07:00
|
|
|
// this could be handled like a token, since it is one
|
2013-12-30 15:09:41 -08:00
|
|
|
"ident" => match p.token {
|
2013-08-09 19:55:15 +02:00
|
|
|
token::IDENT(sn,b) => { p.bump(); token::nt_ident(~sn,b) }
|
2013-12-30 15:09:41 -08:00
|
|
|
_ => {
|
|
|
|
let token_str = token::to_str(get_ident_interner(), &p.token);
|
|
|
|
p.fatal(~"expected ident, found " + token_str)
|
|
|
|
}
|
2012-08-06 17:14:32 -07:00
|
|
|
},
|
2013-08-07 09:47:28 -07:00
|
|
|
"path" => {
|
|
|
|
token::nt_path(~p.parse_path(LifetimeAndTypesWithoutColons).path)
|
|
|
|
}
|
2013-08-08 13:28:06 -04:00
|
|
|
"attr" => token::nt_attr(@p.parse_attribute(false)),
|
2013-05-12 00:25:31 -04:00
|
|
|
"tt" => {
|
2013-12-30 14:40:31 -08:00
|
|
|
p.quote_depth += 1u; //but in theory, non-quoted tts might be useful
|
2012-07-27 19:14:46 -07:00
|
|
|
let res = token::nt_tt(@p.parse_token_tree());
|
2013-12-30 14:40:31 -08:00
|
|
|
p.quote_depth -= 1u;
|
2012-07-06 14:48:01 -07:00
|
|
|
res
|
|
|
|
}
|
2013-05-12 00:25:31 -04:00
|
|
|
"matchers" => token::nt_matchers(p.parse_matchers()),
|
2012-08-03 19:59:04 -07:00
|
|
|
_ => p.fatal(~"Unsupported builtin nonterminal parser: " + name)
|
2012-06-12 10:59:50 -07:00
|
|
|
}
|
|
|
|
}
|