rust/src/libsyntax/ext/tt/macro_parser.rs

548 lines
20 KiB
Rust
Raw Normal View History

// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
2014-07-21 15:57:14 -07:00
//
// ignore-lexer-test FIXME #15679
2014-06-09 13:12:30 -07:00
//! This is an Earley-like parser, without support for in-grammar nonterminals,
//! only by calling out to the main rust parser for named nonterminals (which it
//! commits to fully when it hits one in a grammar). This means that there are no
//! completer or predictor rules, and therefore no need to store one column per
//! token: instead, there's a set of current Earley items and a set of next
//! ones. Instead of NTs, we have a special case for Kleene star. The big-O, in
//! pathological cases, is worse than traditional Earley parsing, but it's an
//! easier fit for Macro-by-Example-style rules, and I think the overhead is
//! lower. (In order to prevent the pathological case, we'd need to lazily
//! construct the resulting `NamedMatch`es at the very end. It'd be a pain,
//! and require more memory to keep around old items, but it would also save
//! overhead)
//!
//! Quick intro to how the parser works:
//!
//! A 'position' is a dot in the middle of a matcher, usually represented as a
//! dot. For example `· a $( a )* a b` is a position, as is `a $( · a )* a b`.
//!
//! The parser walks through the input a character at a time, maintaining a list
//! of items consistent with the current position in the input string: `cur_eis`.
//!
//! As it processes them, it fills up `eof_eis` with items that would be valid if
//! the macro invocation is now over, `bb_eis` with items that are waiting on
//! a Rust nonterminal like `$e:expr`, and `next_eis` with items that are waiting
//! on the a particular token. Most of the logic concerns moving the · through the
//! repetitions indicated by Kleene stars. It only advances or calls out to the
//! real Rust parser when no `cur_eis` items remain
//!
//! Example: Start parsing `a a a a b` against [· a $( a )* a b].
//!
//! Remaining input: `a a a a b`
//! next_eis: [· a $( a )* a b]
//!
//! - - - Advance over an `a`. - - -
//!
//! Remaining input: `a a a b`
//! cur: [a · $( a )* a b]
//! Descend/Skip (first item).
//! next: [a $( · a )* a b] [a $( a )* · a b].
//!
//! - - - Advance over an `a`. - - -
//!
//! Remaining input: `a a b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
//!
//! - - - Advance over an `a`. - - - (this looks exactly like the last step)
//!
//! Remaining input: `a b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
//!
//! - - - Advance over an `a`. - - - (this looks exactly like the last step)
//!
//! Remaining input: `b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b]
//!
//! - - - Advance over a `b`. - - -
//!
//! Remaining input: ``
//! eof: [a $( a )* a b ·]
pub use self::NamedMatch::*;
pub use self::ParseResult::*;
use self::TokenTreeOrTokenTreeVec::*;
use ast;
2014-10-07 00:18:24 +01:00
use ast::{TokenTree, Ident};
2014-10-06 23:00:56 +01:00
use ast::{TtDelimited, TtSequence, TtToken};
2013-01-30 09:56:33 -08:00
use codemap::{BytePos, mk_sp};
use codemap;
use parse::lexer::*; //resolve bug?
use parse::ParseSess;
use parse::attr::ParserAttr;
use parse::parser::{LifetimeAndTypesWithoutColons, Parser};
2014-10-06 23:00:56 +01:00
use parse::token::{Eof, DocComment, MatchNt, SubstNt};
2014-10-27 19:22:52 +11:00
use parse::token::{Token, Nonterminal};
use parse::token;
use print::pprust;
2014-09-13 19:06:01 +03:00
use ptr::P;
2014-10-06 23:00:56 +01:00
use std::mem;
2014-03-27 16:52:27 +02:00
use std::rc::Rc;
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Vacant, Occupied};
2012-06-12 10:59:50 -07:00
2014-10-07 00:18:24 +01:00
// To avoid costly uniqueness checks, we require that `MatchSeq` always has
// a nonempty body.
2012-06-12 10:59:50 -07:00
#[derive(Clone)]
2014-11-02 12:21:16 +01:00
enum TokenTreeOrTokenTreeVec {
Tt(ast::TokenTree),
TtSeq(Rc<Vec<ast::TokenTree>>),
}
impl TokenTreeOrTokenTreeVec {
fn len(&self) -> uint {
match self {
&TtSeq(ref v) => v.len(),
&Tt(ref tt) => tt.len(),
}
}
fn get_tt(&self, index: uint) -> TokenTree {
match self {
&TtSeq(ref v) => v[index].clone(),
&Tt(ref tt) => tt.get_tt(index),
}
}
}
2014-10-06 23:00:56 +01:00
/// an unzipping of `TokenTree`s
#[derive(Clone)]
2014-10-06 23:00:56 +01:00
struct MatcherTtFrame {
2014-11-02 12:21:16 +01:00
elts: TokenTreeOrTokenTreeVec,
2014-10-06 23:00:56 +01:00
idx: uint,
}
#[derive(Clone)]
pub struct MatcherPos {
2014-10-06 23:00:56 +01:00
stack: Vec<MatcherTtFrame>,
2014-11-02 12:21:16 +01:00
top_elts: TokenTreeOrTokenTreeVec,
sep: Option<Token>,
idx: uint,
up: Option<Box<MatcherPos>>,
2014-03-27 16:52:27 +02:00
matches: Vec<Vec<Rc<NamedMatch>>>,
2014-10-06 23:00:56 +01:00
match_lo: uint,
match_cur: uint,
match_hi: uint,
sp_lo: BytePos,
}
2012-06-12 10:59:50 -07:00
2014-10-06 23:00:56 +01:00
pub fn count_names(ms: &[TokenTree]) -> uint {
ms.iter().fold(0, |count, elt| {
count + match elt {
2014-11-02 12:21:16 +01:00
&TtSequence(_, ref seq) => {
seq.num_captures
}
2014-10-06 23:00:56 +01:00
&TtDelimited(_, ref delim) => {
2015-01-07 11:58:31 -05:00
count_names(&delim.tts[])
}
2014-10-06 23:00:56 +01:00
&TtToken(_, MatchNt(..)) => {
1
}
2014-10-06 23:00:56 +01:00
&TtToken(_, _) => 0,
}
2014-10-06 23:00:56 +01:00
})
}
pub fn initial_matcher_pos(ms: Rc<Vec<TokenTree>>, sep: Option<Token>, lo: BytePos)
-> Box<MatcherPos> {
2015-01-07 11:58:31 -05:00
let match_idx_hi = count_names(&ms[]);
2014-12-30 10:51:18 -08:00
let matches: Vec<_> = range(0, match_idx_hi).map(|_| Vec::new()).collect();
2014-04-25 01:08:02 -07:00
box MatcherPos {
2014-10-06 23:00:56 +01:00
stack: vec![],
2014-11-02 12:21:16 +01:00
top_elts: TtSeq(ms),
sep: sep,
idx: 0u,
up: None,
matches: matches,
match_lo: 0u,
2014-10-06 23:00:56 +01:00
match_cur: 0u,
match_hi: match_idx_hi,
sp_lo: lo
}
2012-06-12 10:59:50 -07:00
}
2014-10-07 00:18:24 +01:00
/// NamedMatch is a pattern-match result for a single token::MATCH_NONTERMINAL:
2014-06-09 13:12:30 -07:00
/// so it is associated with a single ident in a parse, and all
2014-10-07 00:18:24 +01:00
/// `MatchedNonterminal`s in the NamedMatch have the same nonterminal type
/// (expr, item, etc). Each leaf in a single NamedMatch corresponds to a
/// single token::MATCH_NONTERMINAL in the TokenTree that produced it.
2014-06-09 13:12:30 -07:00
///
2014-10-06 23:00:56 +01:00
/// The in-memory structure of a particular NamedMatch represents the match
/// that occurred when a particular subset of a matcher was applied to a
/// particular token tree.
2014-06-09 13:12:30 -07:00
///
/// The width of each MatchedSeq in the NamedMatch, and the identity of the
2014-10-07 00:18:24 +01:00
/// `MatchedNonterminal`s, will depend on the token tree it was applied to:
/// each MatchedSeq corresponds to a single TTSeq in the originating
/// token tree. The depth of the NamedMatch structure will therefore depend
/// only on the nesting depth of `ast::TTSeq`s in the originating
/// token tree it was derived from.
pub enum NamedMatch {
2014-03-27 16:52:27 +02:00
MatchedSeq(Vec<Rc<NamedMatch>>, codemap::Span),
MatchedNonterminal(Nonterminal)
}
2012-06-12 10:59:50 -07:00
2014-10-06 23:00:56 +01:00
pub fn nameize(p_s: &ParseSess, ms: &[TokenTree], res: &[Rc<NamedMatch>])
2014-03-27 16:52:27 +02:00
-> HashMap<Ident, Rc<NamedMatch>> {
2014-10-06 23:00:56 +01:00
fn n_rec(p_s: &ParseSess, m: &TokenTree, res: &[Rc<NamedMatch>],
ret_val: &mut HashMap<Ident, Rc<NamedMatch>>, idx: &mut uint) {
match m {
2014-11-02 12:21:16 +01:00
&TtSequence(_, ref seq) => {
for next_m in seq.tts.iter() {
2014-10-06 23:00:56 +01:00
n_rec(p_s, next_m, res, ret_val, idx)
}
}
&TtDelimited(_, ref delim) => {
for next_m in delim.tts.iter() {
n_rec(p_s, next_m, res, ret_val, idx)
}
}
&TtToken(sp, MatchNt(bind_name, _, _, _)) => {
match ret_val.entry(bind_name) {
2014-10-06 23:00:56 +01:00
Vacant(spot) => {
spot.insert(res[*idx].clone());
2014-10-06 23:00:56 +01:00
*idx += 1;
}
Occupied(..) => {
let string = token::get_ident(bind_name);
p_s.span_diagnostic
.span_fatal(sp,
2015-01-07 11:58:31 -05:00
&format!("duplicated bind name: {}",
string.get())[])
2014-10-06 23:00:56 +01:00
}
}
}
2014-10-06 23:00:56 +01:00
&TtToken(_, SubstNt(..)) => panic!("Cannot fill in a NT"),
&TtToken(_, _) => (),
}
}
let mut ret_val = HashMap::new();
2014-10-06 23:00:56 +01:00
let mut idx = 0u;
for m in ms.iter() { n_rec(p_s, m, res, &mut ret_val, &mut idx) }
ret_val
}
pub enum ParseResult {
2014-03-27 16:52:27 +02:00
Success(HashMap<Ident, Rc<NamedMatch>>),
Failure(codemap::Span, String),
Error(codemap::Span, String)
}
pub fn parse_or_else(sess: &ParseSess,
cfg: ast::CrateConfig,
rdr: TtReader,
2014-10-06 23:00:56 +01:00
ms: Vec<TokenTree> )
2014-03-27 16:52:27 +02:00
-> HashMap<Ident, Rc<NamedMatch>> {
2015-01-07 11:58:31 -05:00
match parse(sess, cfg, rdr, &ms[]) {
Success(m) => m,
Failure(sp, str) => {
2015-01-07 11:58:31 -05:00
sess.span_diagnostic.span_fatal(sp, &str[])
}
Error(sp, str) => {
2015-01-07 11:58:31 -05:00
sess.span_diagnostic.span_fatal(sp, &str[])
}
}
}
2014-06-09 13:12:30 -07:00
/// Perform a token equality check, ignoring syntax context (that is, an
/// unhygienic comparison)
pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool {
match (t1,t2) {
2014-10-27 19:22:52 +11:00
(&token::Ident(id1,_),&token::Ident(id2,_))
| (&token::Lifetime(id1),&token::Lifetime(id2)) =>
id1.name == id2.name,
_ => *t1 == *t2
}
}
pub fn parse(sess: &ParseSess,
cfg: ast::CrateConfig,
mut rdr: TtReader,
2014-10-06 23:00:56 +01:00
ms: &[TokenTree])
-> ParseResult {
let mut cur_eis = Vec::new();
2014-10-06 23:00:56 +01:00
cur_eis.push(initial_matcher_pos(Rc::new(ms.iter()
.map(|x| (*x).clone())
.collect()),
None,
rdr.peek().sp.lo));
2012-06-12 10:59:50 -07:00
loop {
let mut bb_eis = Vec::new(); // black-box parsed by parser.rs
let mut next_eis = Vec::new(); // or proceed normally
let mut eof_eis = Vec::new();
2012-06-12 10:59:50 -07:00
let TokenAndSpan { tok, sp } = rdr.peek();
2012-06-12 10:59:50 -07:00
/* we append new items to this while we go */
loop {
2014-10-06 23:00:56 +01:00
let mut ei = match cur_eis.pop() {
None => break, /* for each Earley Item */
Some(ei) => ei,
};
2012-06-12 10:59:50 -07:00
2014-10-06 23:00:56 +01:00
// When unzipped trees end, remove them
2014-11-02 12:21:16 +01:00
while ei.idx >= ei.top_elts.len() {
2014-10-06 23:00:56 +01:00
match ei.stack.pop() {
Some(MatcherTtFrame { elts, idx }) => {
2014-11-02 12:21:16 +01:00
ei.top_elts = elts;
2014-10-06 23:00:56 +01:00
ei.idx = idx + 1;
}
None => break
}
}
2012-06-12 10:59:50 -07:00
let idx = ei.idx;
2014-11-02 12:21:16 +01:00
let len = ei.top_elts.len();
2012-06-12 10:59:50 -07:00
/* at end of sequence */
if idx >= len {
// can't move out of `match`es, so:
if ei.up.is_some() {
2012-06-12 10:59:50 -07:00
// hack: a matcher sequence is repeating iff it has a
// parent (the top level is just a container)
// disregard separator, try to go up
// (remove this condition to make trailing seps ok)
if idx == len {
// pop from the matcher position
let mut new_pos = ei.up.clone().unwrap();
2012-06-12 10:59:50 -07:00
// update matches (the MBE "parse tree") by appending
// each tree as a subtree.
// I bet this is a perf problem: we're preemptively
// doing a lot of array work that will get thrown away
// most of the time.
// Only touch the binders we have actually bound
for idx in range(ei.match_lo, ei.match_hi) {
let sub = (ei.matches[idx]).clone();
(&mut new_pos.matches[idx])
2014-03-27 16:52:27 +02:00
.push(Rc::new(MatchedSeq(sub, mk_sp(ei.sp_lo,
sp.hi))));
2012-06-12 10:59:50 -07:00
}
2014-10-06 23:00:56 +01:00
new_pos.match_cur = ei.match_hi;
2012-09-10 18:28:00 -07:00
new_pos.idx += 1;
cur_eis.push(new_pos);
2012-06-12 10:59:50 -07:00
}
// can we go around again?
// the *_t vars are workarounds for the lack of unary move
2013-07-02 12:47:32 -07:00
match ei.sep {
2014-10-06 23:00:56 +01:00
Some(ref t) if idx == len => { // we need a separator
// i'm conflicted about whether this should be hygienic....
// though in this case, if the separators are never legal
// idents, it shouldn't matter.
if token_name_eq(&tok, t) { //pass the separator
let mut ei_t = ei.clone();
// ei_t.match_cur = ei_t.match_lo;
ei_t.idx += 1;
next_eis.push(ei_t);
}
}
_ => { // we don't need a separator
let mut ei_t = ei;
ei_t.match_cur = ei_t.match_lo;
ei_t.idx = 0;
cur_eis.push(ei_t);
2012-06-12 10:59:50 -07:00
}
}
} else {
eof_eis.push(ei);
2012-06-12 10:59:50 -07:00
}
} else {
2014-11-02 12:21:16 +01:00
match ei.top_elts.get_tt(idx) {
2014-10-06 23:00:56 +01:00
/* need to descend into sequence */
2014-11-02 12:21:16 +01:00
TtSequence(sp, seq) => {
if seq.op == ast::ZeroOrMore {
2014-10-06 23:00:56 +01:00
let mut new_ei = ei.clone();
2014-11-02 12:21:16 +01:00
new_ei.match_cur += seq.num_captures;
2014-10-06 23:00:56 +01:00
new_ei.idx += 1u;
//we specifically matched zero repeats.
2014-11-02 12:21:16 +01:00
for idx in range(ei.match_cur, ei.match_cur + seq.num_captures) {
2015-01-04 17:43:24 +13:00
(&mut new_ei.matches[idx]).push(Rc::new(MatchedSeq(vec![], sp)));
2014-10-06 23:00:56 +01:00
}
cur_eis.push(new_ei);
}
2014-12-30 10:51:18 -08:00
let matches: Vec<_> = range(0, ei.matches.len())
.map(|_| Vec::new()).collect();
2014-10-06 23:00:56 +01:00
let ei_t = ei;
cur_eis.push(box MatcherPos {
stack: vec![],
2014-11-02 12:21:16 +01:00
sep: seq.separator.clone(),
2014-10-06 23:00:56 +01:00
idx: 0u,
matches: matches,
match_lo: ei_t.match_cur,
match_cur: ei_t.match_cur,
2014-11-02 12:21:16 +01:00
match_hi: ei_t.match_cur + seq.num_captures,
2014-10-06 23:00:56 +01:00
up: Some(ei_t),
2014-11-02 12:21:16 +01:00
sp_lo: sp.lo,
top_elts: Tt(TtSequence(sp, seq)),
2014-10-06 23:00:56 +01:00
});
2012-06-12 10:59:50 -07:00
}
2014-10-06 23:00:56 +01:00
TtToken(_, MatchNt(..)) => {
// Built-in nonterminals never start with these tokens,
// so we can eliminate them from consideration.
match tok {
token::CloseDelim(_) => {},
_ => bb_eis.push(ei),
}
}
2014-10-06 23:00:56 +01:00
TtToken(sp, SubstNt(..)) => {
2014-12-10 19:46:38 -08:00
return Error(sp, "Cannot transcribe in macro LHS".to_string())
2014-10-06 23:00:56 +01:00
}
seq @ TtDelimited(..) | seq @ TtToken(_, DocComment(..)) => {
2014-11-02 12:21:16 +01:00
let lower_elts = mem::replace(&mut ei.top_elts, Tt(seq));
2014-10-06 23:00:56 +01:00
let idx = ei.idx;
ei.stack.push(MatcherTtFrame {
2014-11-02 12:21:16 +01:00
elts: lower_elts,
2014-10-06 23:00:56 +01:00
idx: idx,
});
ei.idx = 0;
cur_eis.push(ei);
}
TtToken(_, ref t) => {
let mut ei_t = ei.clone();
if token_name_eq(t,&tok) {
ei_t.idx += 1;
next_eis.push(ei_t);
}
2012-09-10 18:28:00 -07:00
}
2012-06-12 10:59:50 -07:00
}
}
}
/* error messages here could be improved with links to orig. rules */
2014-10-27 19:22:52 +11:00
if token_name_eq(&tok, &token::Eof) {
if eof_eis.len() == 1u {
let mut v = Vec::new();
for dv in (&mut eof_eis[0]).matches.iter_mut() {
v.push(dv.pop().unwrap());
2013-03-07 18:37:22 -05:00
}
2015-01-07 11:58:31 -05:00
return Success(nameize(sess, ms, &v[]));
2012-06-12 10:59:50 -07:00
} else if eof_eis.len() > 1u {
return Error(sp, "ambiguity: multiple successful parses".to_string());
2012-06-12 10:59:50 -07:00
} else {
return Failure(sp, "unexpected end of macro invocation".to_string());
2012-06-12 10:59:50 -07:00
}
} else {
if (bb_eis.len() > 0u && next_eis.len() > 0u)
|| bb_eis.len() > 1u {
let nts = bb_eis.iter().map(|ei| {
2014-11-02 12:21:16 +01:00
match ei.top_elts.get_tt(ei.idx) {
2014-10-06 23:00:56 +01:00
TtToken(_, MatchNt(bind, name, _, _)) => {
(format!("{} ('{}')",
token::get_ident(name),
token::get_ident(bind))).to_string()
2012-07-24 11:44:17 -07:00
}
_ => panic!()
} }).collect::<Vec<String>>().connect(" or ");
return Error(sp, format!(
"local ambiguity: multiple parsing options: \
2013-09-27 21:01:58 -07:00
built-in NTs {} or {} other options.",
nts, next_eis.len()).to_string());
2014-01-19 19:21:14 +11:00
} else if bb_eis.len() == 0u && next_eis.len() == 0u {
return Failure(sp, format!("no rules expected the token `{}`",
pprust::token_to_string(&tok)).to_string());
2014-01-19 19:21:14 +11:00
} else if next_eis.len() > 0u {
2012-06-12 10:59:50 -07:00
/* Now process the next token */
2014-01-19 19:21:14 +11:00
while next_eis.len() > 0u {
cur_eis.push(next_eis.pop().unwrap());
2012-06-12 10:59:50 -07:00
}
rdr.next_token();
} else /* bb_eis.len() == 1 */ {
let mut rust_parser = Parser::new(sess, cfg.clone(), box rdr.clone());
2012-06-12 10:59:50 -07:00
let mut ei = bb_eis.pop().unwrap();
2014-11-02 12:21:16 +01:00
match ei.top_elts.get_tt(ei.idx) {
2014-10-06 23:00:56 +01:00
TtToken(_, MatchNt(_, name, _, _)) => {
let name_string = token::get_ident(name);
2014-10-06 23:00:56 +01:00
let match_cur = ei.match_cur;
(&mut ei.matches[match_cur]).push(Rc::new(MatchedNonterminal(
2014-03-27 16:52:27 +02:00
parse_nt(&mut rust_parser, name_string.get()))));
2012-06-12 10:59:50 -07:00
ei.idx += 1u;
2014-10-06 23:00:56 +01:00
ei.match_cur += 1;
2012-06-12 10:59:50 -07:00
}
_ => panic!()
2012-06-12 10:59:50 -07:00
}
cur_eis.push(ei);
2012-06-12 10:59:50 -07:00
for _ in range(0, rust_parser.tokens_consumed) {
let _ = rdr.next_token();
}
2012-06-12 10:59:50 -07:00
}
}
2013-03-28 18:39:09 -07:00
assert!(cur_eis.len() > 0u);
2012-06-12 10:59:50 -07:00
}
}
pub fn parse_nt(p: &mut Parser, name: &str) -> Nonterminal {
match name {
"tt" => {
p.quote_depth += 1u; //but in theory, non-quoted tts might be useful
let res = token::NtTT(P(p.parse_token_tree()));
p.quote_depth -= 1u;
return res;
}
_ => {}
}
// check at the beginning and the parser checks after each bump
p.check_unknown_macro_variable();
2012-08-06 12:34:08 -07:00
match name {
"item" => match p.parse_item(Vec::new()) {
Some(i) => token::NtItem(i),
None => p.fatal("expected an item keyword")
},
"block" => token::NtBlock(p.parse_block()),
"stmt" => token::NtStmt(p.parse_stmt(Vec::new())),
"pat" => token::NtPat(p.parse_pat()),
"expr" => token::NtExpr(p.parse_expr()),
"ty" => token::NtTy(p.parse_ty()),
2012-06-12 10:59:50 -07:00
// this could be handled like a token, since it is one
"ident" => match p.token {
2014-10-27 19:22:52 +11:00
token::Ident(sn,b) => { p.bump(); token::NtIdent(box sn,b) }
_ => {
let token_str = pprust::token_to_string(&p.token);
2015-01-07 11:58:31 -05:00
p.fatal(&format!("expected ident, found {}",
&token_str[])[])
}
},
"path" => {
token::NtPath(box p.parse_path(LifetimeAndTypesWithoutColons))
}
"meta" => token::NtMeta(p.parse_meta_item()),
_ => {
2015-01-07 11:58:31 -05:00
p.fatal(&format!("unsupported builtin nonterminal parser: {}", name)[])
}
2012-06-12 10:59:50 -07:00
}
}