rust/src/libsyntax/ext/tt/macro_parser.rs

439 lines
16 KiB
Rust
Raw Normal View History

// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
2012-06-12 10:59:50 -07:00
// Earley-like parser for macros.
2012-09-04 11:37:29 -07:00
use ast::{matcher, match_tok, match_seq, match_nonterminal, ident};
2013-01-30 09:56:33 -08:00
use codemap::{BytePos, mk_sp};
use codemap;
use parse::common::*; //resolve bug?
use parse::lexer::*; //resolve bug?
use parse::parse_sess;
use parse::parser::Parser;
use parse::token::{Token, EOF, to_str, nonterminal};
use parse::token;
use core::dvec::DVec;
use core::dvec;
use core::io;
use core::option;
use core::str;
use core::uint;
use core::vec;
use std::oldmap::HashMap;
2012-06-12 10:59:50 -07:00
/* This is an Earley-like parser, without support for in-grammar nonterminals,
only by calling out to the main rust parser for named nonterminals (which it
commits to fully when it hits one in a grammar). This means that there are no
completer or predictor rules, and therefore no need to store one column per
token: instead, there's a set of current Earley items and a set of next
ones. Instead of NTs, we have a special case for Kleene star. The big-O, in
pathological cases, is worse than traditional Earley parsing, but it's an
easier fit for Macro-by-Example-style rules, and I think the overhead is
lower. (In order to prevent the pathological case, we'd need to lazily
construct the resulting `named_match`es at the very end. It'd be a pain,
and require more memory to keep around old items, but it would also save
overhead)*/
/* Quick intro to how the parser works:
A 'position' is a dot in the middle of a matcher, usually represented as a
dot. For example `· a $( a )* a b` is a position, as is `a $( · a )* a b`.
The parser walks through the input a character at a time, maintaining a list
of items consistent with the current position in the input string: `cur_eis`.
As it processes them, it fills up `eof_eis` with items that would be valid if
the macro invocation is now over, `bb_eis` with items that are waiting on
a Rust nonterminal like `$e:expr`, and `next_eis` with items that are waiting
on the a particular token. Most of the logic concerns moving the · through the
repetitions indicated by Kleene stars. It only advances or calls out to the
real Rust parser when no `cur_eis` items remain
Example: Start parsing `a a a a b` against [· a $( a )* a b].
Remaining input: `a a a a b`
next_eis: [· a $( a )* a b]
- - - Advance over an `a`. - - -
Remaining input: `a a a b`
cur: [a · $( a )* a b]
Descend/Skip (first item).
next: [a $( · a )* a b] [a $( a )* · a b].
- - - Advance over an `a`. - - -
Remaining input: `a a b`
cur: [a $( a · )* a b] next: [a $( a )* a · b]
Finish/Repeat (first item)
next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
- - - Advance over an `a`. - - - (this looks exactly like the last step)
Remaining input: `a b`
cur: [a $( a · )* a b] next: [a $( a )* a · b]
Finish/Repeat (first item)
next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
- - - Advance over an `a`. - - - (this looks exactly like the last step)
Remaining input: `b`
cur: [a $( a · )* a b] next: [a $( a )* a · b]
Finish/Repeat (first item)
next: [a $( a )* · a b] [a $( · a )* a b]
- - - Advance over a `b`. - - -
Remaining input: ``
eof: [a $( a )* a b ·]
*/
2012-06-12 10:59:50 -07:00
/* to avoid costly uniqueness checks, we require that `match_seq` always has a
2012-06-12 10:59:50 -07:00
nonempty body. */
pub enum matcher_pos_up { /* to break a circularity */
2012-08-20 12:23:37 -07:00
matcher_pos_up(Option<matcher_pos>)
2012-06-12 10:59:50 -07:00
}
pub fn is_some(&&mpu: matcher_pos_up) -> bool {
match &mpu {
&matcher_pos_up(None) => false,
2012-08-03 19:59:04 -07:00
_ => true
2012-06-12 10:59:50 -07:00
}
}
pub type matcher_pos = ~{
elts: ~[ast::matcher], // maybe should be /&? Need to understand regions.
sep: Option<Token>,
2012-06-12 10:59:50 -07:00
mut idx: uint,
mut up: matcher_pos_up, // mutable for swapping only
2012-08-14 16:54:13 -07:00
matches: ~[DVec<@named_match>],
match_lo: uint, match_hi: uint,
sp_lo: BytePos,
2012-06-12 10:59:50 -07:00
};
pub fn copy_up(&& mpu: matcher_pos_up) -> matcher_pos {
match &mpu {
&matcher_pos_up(Some(ref mp)) => copy (*mp),
_ => fail!()
2012-06-12 10:59:50 -07:00
}
}
pub fn count_names(ms: &[matcher]) -> uint {
2012-06-30 16:19:07 -07:00
vec::foldl(0u, ms, |ct, m| {
2012-08-06 12:34:08 -07:00
ct + match m.node {
2012-08-03 19:59:04 -07:00
match_tok(_) => 0u,
match_seq(ref more_ms, _, _, _, _) => count_names((*more_ms)),
2012-08-03 19:59:04 -07:00
match_nonterminal(_,_,_) => 1u
2012-06-12 10:59:50 -07:00
}})
}
2012-08-01 13:35:33 -07:00
#[allow(non_implicitly_copyable_typarams)]
pub fn initial_matcher_pos(ms: ~[matcher], sep: Option<Token>, lo: BytePos)
-> matcher_pos {
let mut match_idx_hi = 0u;
for ms.each() |elt| {
2012-08-06 12:34:08 -07:00
match elt.node {
2012-08-03 19:59:04 -07:00
match_tok(_) => (),
match_seq(_,_,_,_,hi) => {
match_idx_hi = hi; // it is monotonic...
}
2012-08-03 19:59:04 -07:00
match_nonterminal(_,_,pos) => {
match_idx_hi = pos+1u; // ...so latest is highest
}
}
}
2012-08-20 12:23:37 -07:00
~{elts: ms, sep: sep, mut idx: 0u, mut up: matcher_pos_up(None),
2012-08-27 14:22:25 -07:00
matches: copy vec::from_fn(count_names(ms), |_i| dvec::DVec()),
match_lo: 0u, match_hi: match_idx_hi, sp_lo: lo}
2012-06-12 10:59:50 -07:00
}
// named_match is a pattern-match result for a single ast::match_nonterminal:
// so it is associated with a single ident in a parse, and all
// matched_nonterminals in the named_match have the same nonterminal type
// (expr, item, etc). All the leaves in a single named_match correspond to a
// single matcher_nonterminal in the ast::matcher that produced it.
//
// It should probably be renamed, it has more or less exact correspondence to
// ast::match nodes, and the in-memory structure of a particular named_match
// represents the match that occurred when a particular subset of an
// ast::match -- those ast::matcher nodes leading to a single
// match_nonterminal -- was applied to a particular token tree.
//
// The width of each matched_seq in the named_match, and the identity of the
// matched_nonterminals, will depend on the token tree it was applied to: each
// matched_seq corresponds to a single match_seq in the originating
// ast::matcher. The depth of the named_match structure will therefore depend
// only on the nesting depth of ast::match_seqs in the originating
// ast::matcher it was derived from.
pub enum named_match {
matched_seq(~[@named_match], codemap::span),
matched_nonterminal(nonterminal)
}
2012-06-12 10:59:50 -07:00
pub type earley_item = matcher_pos;
2012-06-12 10:59:50 -07:00
pub fn nameize(p_s: parse_sess, ms: ~[matcher], res: ~[@named_match])
-> HashMap<ident,@named_match> {
fn n_rec(p_s: parse_sess, m: matcher, res: ~[@named_match],
2012-09-10 15:38:28 -07:00
ret_val: HashMap<ident, @named_match>) {
2012-08-06 12:34:08 -07:00
match m {
2013-01-30 09:56:33 -08:00
codemap::spanned {node: match_tok(_), _} => (),
codemap::spanned {node: match_seq(ref more_ms, _, _, _, _), _} => {
2012-12-04 21:13:02 -08:00
for (*more_ms).each() |next_m| {
n_rec(p_s, *next_m, res, ret_val)
};
}
2013-01-30 09:56:33 -08:00
codemap::spanned {
node: match_nonterminal(bind_name, _, idx), span: sp
} => {
if ret_val.contains_key(&bind_name) {
2012-07-18 16:18:02 -07:00
p_s.span_diagnostic.span_fatal(sp, ~"Duplicated bind name: "+
*p_s.interner.get(bind_name))
}
ret_val.insert(bind_name, res[idx]);
}
}
}
2012-09-19 09:41:06 -07:00
let ret_val = HashMap();
for ms.each() |m| { n_rec(p_s, *m, res, ret_val) }
2012-08-01 17:30:05 -07:00
return ret_val;
}
pub enum parse_result {
2012-09-10 15:38:28 -07:00
success(HashMap<ident, @named_match>),
2012-08-10 10:46:04 -07:00
failure(codemap::span, ~str),
error(codemap::span, ~str)
}
pub fn parse_or_else(sess: parse_sess, cfg: ast::crate_cfg, rdr: reader,
ms: ~[matcher]) -> HashMap<ident, @named_match> {
2012-08-06 12:34:08 -07:00
match parse(sess, cfg, rdr, ms) {
2012-08-03 19:59:04 -07:00
success(m) => m,
failure(sp, ref str) => sess.span_diagnostic.span_fatal(sp, (*str)),
error(sp, ref str) => sess.span_diagnostic.span_fatal(sp, (*str))
}
}
pub fn parse(sess: parse_sess,
cfg: ast::crate_cfg,
rdr: reader,
ms: ~[matcher])
-> parse_result {
let mut cur_eis = ~[];
cur_eis.push(initial_matcher_pos(ms, None, rdr.peek().sp.lo));
2012-06-12 10:59:50 -07:00
loop {
let mut bb_eis = ~[]; // black-box parsed by parser.rs
let mut next_eis = ~[]; // or proceed normally
let mut eof_eis = ~[];
2012-06-12 10:59:50 -07:00
2013-01-30 09:56:33 -08:00
let TokenAndSpan {tok: tok, sp: sp} = rdr.peek();
2012-06-12 10:59:50 -07:00
/* we append new items to this while we go */
while cur_eis.len() > 0u { /* for each Earley Item */
2012-09-27 22:20:47 -07:00
let mut ei = cur_eis.pop();
2012-06-12 10:59:50 -07:00
let idx = ei.idx;
let len = ei.elts.len();
/* at end of sequence */
if idx >= len {
// can't move out of `match`es, so:
2012-06-12 10:59:50 -07:00
if is_some(ei.up) {
// hack: a matcher sequence is repeating iff it has a
// parent (the top level is just a container)
// disregard separator, try to go up
// (remove this condition to make trailing seps ok)
if idx == len {
// pop from the matcher position
let new_pos = copy_up(ei.up);
// update matches (the MBE "parse tree") by appending
// each tree as a subtree.
// I bet this is a perf problem: we're preemptively
// doing a lot of array work that will get thrown away
// most of the time.
// Only touch the binders we have actually bound
for uint::range(ei.match_lo, ei.match_hi) |idx| {
let sub = ei.matches[idx].get();
new_pos.matches[idx]
.push(@matched_seq(sub,
mk_sp(ei.sp_lo,
sp.hi)));
2012-06-12 10:59:50 -07:00
}
2012-09-10 18:28:00 -07:00
new_pos.idx += 1;
cur_eis.push(new_pos);
2012-06-12 10:59:50 -07:00
}
// can we go around again?
// the *_t vars are workarounds for the lack of unary move
2012-08-06 12:34:08 -07:00
match copy ei.sep {
Some(ref t) if idx == len => { // we need a separator
if tok == (*t) { //pass the separator
let ei_t = ei;
2012-09-10 18:28:00 -07:00
ei_t.idx += 1;
next_eis.push(ei_t);
2012-06-12 10:59:50 -07:00
}
}
2012-08-03 19:59:04 -07:00
_ => { // we don't need a separator
let ei_t = ei;
2012-09-10 18:28:00 -07:00
ei_t.idx = 0;
cur_eis.push(ei_t);
2012-06-12 10:59:50 -07:00
}
}
} else {
eof_eis.push(ei);
2012-06-12 10:59:50 -07:00
}
} else {
2012-08-06 12:34:08 -07:00
match copy ei.elts[idx].node {
2012-06-12 10:59:50 -07:00
/* need to descend into sequence */
match_seq(ref matchers, ref sep, zero_ok,
2012-08-03 19:59:04 -07:00
match_idx_lo, match_idx_hi) => {
2012-06-12 10:59:50 -07:00
if zero_ok {
let new_ei = copy ei;
new_ei.idx += 1u;
//we specifically matched zero repeats.
for uint::range(match_idx_lo, match_idx_hi) |idx| {
new_ei.matches[idx].push(@matched_seq(~[], sp));
}
cur_eis.push(new_ei);
2012-06-12 10:59:50 -07:00
}
let matches = vec::map(ei.matches, // fresh, same size:
2012-08-27 14:22:25 -07:00
|_m| DVec::<@named_match>());
let ei_t = ei;
cur_eis.push(~{
elts: (*matchers), sep: (*sep), mut idx: 0u,
mut up: matcher_pos_up(Some(ei_t)),
matches: matches,
match_lo: match_idx_lo, match_hi: match_idx_hi,
sp_lo: sp.lo
2012-06-12 10:59:50 -07:00
});
}
match_nonterminal(_,_,_) => { bb_eis.push(ei) }
match_tok(ref t) => {
let ei_t = ei;
if (*t) == tok {
2012-09-10 18:28:00 -07:00
ei_t.idx += 1;
next_eis.push(ei_t);
2012-09-10 18:28:00 -07:00
}
2012-06-12 10:59:50 -07:00
}
}
}
}
/* error messages here could be improved with links to orig. rules */
if tok == EOF {
if eof_eis.len() == 1u {
2012-08-01 17:30:05 -07:00
return success(
nameize(sess, ms,
eof_eis[0u].matches.map(|dv| dv.pop())));
2012-06-12 10:59:50 -07:00
} else if eof_eis.len() > 1u {
2012-08-10 10:46:04 -07:00
return error(sp, ~"Ambiguity: multiple successful parses");
2012-06-12 10:59:50 -07:00
} else {
2012-08-01 17:30:05 -07:00
return failure(sp, ~"Unexpected end of macro invocation");
2012-06-12 10:59:50 -07:00
}
} else {
if (bb_eis.len() > 0u && next_eis.len() > 0u)
|| bb_eis.len() > 1u {
2012-06-30 16:19:07 -07:00
let nts = str::connect(vec::map(bb_eis, |ei| {
2012-08-06 12:34:08 -07:00
match ei.elts[ei.idx].node {
2012-08-03 19:59:04 -07:00
match_nonterminal(bind,name,_) => {
2012-08-22 17:24:52 -07:00
fmt!("%s ('%s')", *sess.interner.get(name),
*sess.interner.get(bind))
2012-07-24 11:44:17 -07:00
}
_ => fail!()
2012-08-03 19:59:04 -07:00
} }), ~" or ");
2012-08-22 17:24:52 -07:00
return error(sp, fmt!(
"Local ambiguity: multiple parsing options: \
built-in NTs %s or %u other options.",
2012-08-22 17:24:52 -07:00
nts, next_eis.len()));
2012-06-12 10:59:50 -07:00
} else if (bb_eis.len() == 0u && next_eis.len() == 0u) {
return failure(sp, ~"No rules expected the token: "
+ to_str(rdr.interner(), tok));
2012-06-12 10:59:50 -07:00
} else if (next_eis.len() > 0u) {
/* Now process the next token */
while(next_eis.len() > 0u) {
2012-09-27 22:20:47 -07:00
cur_eis.push(next_eis.pop());
2012-06-12 10:59:50 -07:00
}
rdr.next_token();
} else /* bb_eis.len() == 1 */ {
let rust_parser = Parser(sess, cfg, rdr.dup());
2012-06-12 10:59:50 -07:00
2012-09-27 22:20:47 -07:00
let ei = bb_eis.pop();
2012-08-06 12:34:08 -07:00
match ei.elts[ei.idx].node {
2012-08-03 19:59:04 -07:00
match_nonterminal(_, name, idx) => {
ei.matches[idx].push(@matched_nonterminal(
2012-07-18 16:18:02 -07:00
parse_nt(rust_parser, *sess.interner.get(name))));
2012-06-12 10:59:50 -07:00
ei.idx += 1u;
}
_ => fail!()
2012-06-12 10:59:50 -07:00
}
cur_eis.push(ei);
2012-06-12 10:59:50 -07:00
for rust_parser.tokens_consumed.times() || {
rdr.next_token();
2012-06-12 10:59:50 -07:00
}
}
}
assert cur_eis.len() > 0u;
}
}
pub fn parse_nt(p: Parser, name: ~str) -> nonterminal {
2012-08-06 12:34:08 -07:00
match name {
~"item" => match p.parse_item(~[]) {
2012-08-20 12:23:37 -07:00
Some(i) => token::nt_item(i),
None => p.fatal(~"expected an item keyword")
},
2012-08-03 19:59:04 -07:00
~"block" => token::nt_block(p.parse_block()),
~"stmt" => token::nt_stmt(p.parse_stmt(~[])),
~"pat" => token::nt_pat(p.parse_pat(true)),
~"expr" => token::nt_expr(p.parse_expr()),
~"ty" => token::nt_ty(p.parse_ty(false /* no need to disambiguate*/)),
2012-06-12 10:59:50 -07:00
// this could be handled like a token, since it is one
2012-08-06 12:34:08 -07:00
~"ident" => match copy p.token {
2012-08-03 19:59:04 -07:00
token::IDENT(sn,b) => { p.bump(); token::nt_ident(sn,b) }
_ => p.fatal(~"expected ident, found "
+ token::to_str(p.reader.interner(), copy p.token))
},
2012-08-03 19:59:04 -07:00
~"path" => token::nt_path(p.parse_path_with_tps(false)),
~"tt" => {
p.quote_depth += 1u; //but in theory, non-quoted tts might be useful
let res = token::nt_tt(@p.parse_token_tree());
2012-07-06 14:48:01 -07:00
p.quote_depth -= 1u;
res
}
2012-08-03 19:59:04 -07:00
~"matchers" => token::nt_matchers(p.parse_matchers()),
_ => p.fatal(~"Unsupported builtin nonterminal parser: " + name)
2012-06-12 10:59:50 -07:00
}
}
// Local Variables:
// mode: rust;
// fill-column: 78;
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// End: