Use PascalCase for token variants
This commit is contained in:
parent
bd7138dd69
commit
d8b1fa0ae0
26 changed files with 1193 additions and 1148 deletions
|
@ -55,7 +55,7 @@ extern crate syntax;
|
||||||
extern crate rustc;
|
extern crate rustc;
|
||||||
|
|
||||||
use syntax::codemap::Span;
|
use syntax::codemap::Span;
|
||||||
use syntax::parse::token::{IDENT, get_ident};
|
use syntax::parse::token;
|
||||||
use syntax::ast::{TokenTree, TtToken};
|
use syntax::ast::{TokenTree, TtToken};
|
||||||
use syntax::ext::base::{ExtCtxt, MacResult, DummyResult, MacExpr};
|
use syntax::ext::base::{ExtCtxt, MacResult, DummyResult, MacExpr};
|
||||||
use syntax::ext::build::AstBuilder; // trait for expr_uint
|
use syntax::ext::build::AstBuilder; // trait for expr_uint
|
||||||
|
@ -71,7 +71,7 @@ fn expand_rn(cx: &mut ExtCtxt, sp: Span, args: &[TokenTree])
|
||||||
("I", 1)];
|
("I", 1)];
|
||||||
|
|
||||||
let text = match args {
|
let text = match args {
|
||||||
[TtToken(_, IDENT(s, _))] => get_ident(s).to_string(),
|
[TtToken(_, token::Ident(s, _))] => token::get_ident(s).to_string(),
|
||||||
_ => {
|
_ => {
|
||||||
cx.span_err(sp, "argument should be a single identifier");
|
cx.span_err(sp, "argument should be a single identifier");
|
||||||
return DummyResult::any(sp);
|
return DummyResult::any(sp);
|
||||||
|
|
|
@ -30,12 +30,12 @@ use rustc::driver::{session, config};
|
||||||
|
|
||||||
use syntax::ast;
|
use syntax::ast;
|
||||||
use syntax::ast::Name;
|
use syntax::ast::Name;
|
||||||
use syntax::parse::token::*;
|
use syntax::parse::token;
|
||||||
use syntax::parse::lexer::TokenAndSpan;
|
use syntax::parse::lexer::TokenAndSpan;
|
||||||
|
|
||||||
fn parse_token_list(file: &str) -> HashMap<String, Token> {
|
fn parse_token_list(file: &str) -> HashMap<String, Token> {
|
||||||
fn id() -> Token {
|
fn id() -> Token {
|
||||||
IDENT(ast::Ident { name: Name(0), ctxt: 0, }, false)
|
token::Ident(ast::Ident { name: Name(0), ctxt: 0, }, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut res = HashMap::new();
|
let mut res = HashMap::new();
|
||||||
|
@ -52,64 +52,64 @@ fn parse_token_list(file: &str) -> HashMap<String, Token> {
|
||||||
let num = line.slice_from(eq + 1);
|
let num = line.slice_from(eq + 1);
|
||||||
|
|
||||||
let tok = match val {
|
let tok = match val {
|
||||||
"SHR" => BINOP(SHR),
|
"SHR" => token::BinOp(token::Shr),
|
||||||
"DOLLAR" => DOLLAR,
|
"DOLLAR" => token::Dollar,
|
||||||
"LT" => LT,
|
"LT" => token::Lt,
|
||||||
"STAR" => BINOP(STAR),
|
"STAR" => token::BinOp(token::Star),
|
||||||
"FLOAT_SUFFIX" => id(),
|
"FLOAT_SUFFIX" => id(),
|
||||||
"INT_SUFFIX" => id(),
|
"INT_SUFFIX" => id(),
|
||||||
"SHL" => BINOP(SHL),
|
"SHL" => token::BinOp(token::Shl),
|
||||||
"LBRACE" => LBRACE,
|
"LBRACE" => token::LBrace,
|
||||||
"RARROW" => RARROW,
|
"RARROW" => token::Rarrow,
|
||||||
"LIT_STR" => LIT_STR(Name(0)),
|
"LIT_STR" => token::LitStr(Name(0)),
|
||||||
"DOTDOT" => DOTDOT,
|
"DOTDOT" => token::DotDot,
|
||||||
"MOD_SEP" => MOD_SEP,
|
"MOD_SEP" => token::ModSep,
|
||||||
"DOTDOTDOT" => DOTDOTDOT,
|
"DOTDOTDOT" => token::DotDotDot,
|
||||||
"NOT" => NOT,
|
"NOT" => token::Not,
|
||||||
"AND" => BINOP(AND),
|
"AND" => token::BinOp(token::And),
|
||||||
"LPAREN" => LPAREN,
|
"LPAREN" => token::LParen,
|
||||||
"ANDAND" => ANDAND,
|
"ANDAND" => token::AndAnd,
|
||||||
"AT" => AT,
|
"AT" => token::At,
|
||||||
"LBRACKET" => LBRACKET,
|
"LBRACKET" => token::LBracket,
|
||||||
"LIT_STR_RAW" => LIT_STR_RAW(Name(0), 0),
|
"LIT_STR_RAW" => token::LitStrRaw(Name(0), 0),
|
||||||
"RPAREN" => RPAREN,
|
"RPAREN" => token::RParen,
|
||||||
"SLASH" => BINOP(SLASH),
|
"SLASH" => token::BinOp(token::Slash),
|
||||||
"COMMA" => COMMA,
|
"COMMA" => token::Comma,
|
||||||
"LIFETIME" => LIFETIME(ast::Ident { name: Name(0), ctxt: 0 }),
|
"LIFETIME" => token::Lifetime(ast::Ident { name: Name(0), ctxt: 0 }),
|
||||||
"CARET" => BINOP(CARET),
|
"CARET" => token::BinOp(token::Caret),
|
||||||
"TILDE" => TILDE,
|
"TILDE" => token::Tilde,
|
||||||
"IDENT" => id(),
|
"IDENT" => token::Id(),
|
||||||
"PLUS" => BINOP(PLUS),
|
"PLUS" => token::BinOp(token::Plus),
|
||||||
"LIT_CHAR" => LIT_CHAR(Name(0)),
|
"LIT_CHAR" => token::LitChar(Name(0)),
|
||||||
"LIT_BYTE" => LIT_BYTE(Name(0)),
|
"LIT_BYTE" => token::LitByte(Name(0)),
|
||||||
"EQ" => EQ,
|
"EQ" => token::Eq,
|
||||||
"RBRACKET" => RBRACKET,
|
"RBRACKET" => token::RBracket,
|
||||||
"COMMENT" => COMMENT,
|
"COMMENT" => token::Comment,
|
||||||
"DOC_COMMENT" => DOC_COMMENT(Name(0)),
|
"DOC_COMMENT" => token::DocComment(Name(0)),
|
||||||
"DOT" => DOT,
|
"DOT" => token::Dot,
|
||||||
"EQEQ" => EQEQ,
|
"EQEQ" => token::EqEq,
|
||||||
"NE" => NE,
|
"NE" => token::Ne,
|
||||||
"GE" => GE,
|
"GE" => token::Ge,
|
||||||
"PERCENT" => BINOP(PERCENT),
|
"PERCENT" => token::BinOp(token::Percent),
|
||||||
"RBRACE" => RBRACE,
|
"RBRACE" => token::RBrace,
|
||||||
"BINOP" => BINOP(PLUS),
|
"BINOP" => token::BinOp(token::Plus),
|
||||||
"POUND" => POUND,
|
"POUND" => token::Pound,
|
||||||
"OROR" => OROR,
|
"OROR" => token::OrOr,
|
||||||
"LIT_INTEGER" => LIT_INTEGER(Name(0)),
|
"LIT_INTEGER" => token::LitInteger(Name(0)),
|
||||||
"BINOPEQ" => BINOPEQ(PLUS),
|
"BINOPEQ" => token::BinOpEq(token::Plus),
|
||||||
"LIT_FLOAT" => LIT_FLOAT(Name(0)),
|
"LIT_FLOAT" => token::LitFloat(Name(0)),
|
||||||
"WHITESPACE" => WS,
|
"WHITESPACE" => token::Whitespace,
|
||||||
"UNDERSCORE" => UNDERSCORE,
|
"UNDERSCORE" => token::Underscore,
|
||||||
"MINUS" => BINOP(MINUS),
|
"MINUS" => token::BinOp(token::Minus),
|
||||||
"SEMI" => SEMI,
|
"SEMI" => token::Semi,
|
||||||
"COLON" => COLON,
|
"COLON" => token::Colon,
|
||||||
"FAT_ARROW" => FAT_ARROW,
|
"FAT_ARROW" => token::FatArrow,
|
||||||
"OR" => BINOP(OR),
|
"OR" => token::BinOp(token::Or),
|
||||||
"GT" => GT,
|
"GT" => token::Gt,
|
||||||
"LE" => LE,
|
"LE" => token::Le,
|
||||||
"LIT_BINARY" => LIT_BINARY(Name(0)),
|
"LIT_BINARY" => token::LitBinary(Name(0)),
|
||||||
"LIT_BINARY_RAW" => LIT_BINARY_RAW(Name(0), 0),
|
"LIT_BINARY_RAW" => token::LitBinaryRaw(Name(0), 0),
|
||||||
_ => continue
|
_ => continue,
|
||||||
};
|
};
|
||||||
|
|
||||||
res.insert(num.to_string(), tok);
|
res.insert(num.to_string(), tok);
|
||||||
|
@ -119,19 +119,19 @@ fn parse_token_list(file: &str) -> HashMap<String, Token> {
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
fn str_to_binop(s: &str) -> BinOp {
|
fn str_to_binop(s: &str) -> BinOpToken {
|
||||||
match s {
|
match s {
|
||||||
"+" => PLUS,
|
"+" => token::Plus,
|
||||||
"/" => SLASH,
|
"/" => token::Slash,
|
||||||
"-" => MINUS,
|
"-" => token::Minus,
|
||||||
"*" => STAR,
|
"*" => token::Star,
|
||||||
"%" => PERCENT,
|
"%" => token::Percent,
|
||||||
"^" => CARET,
|
"^" => token::Caret,
|
||||||
"&" => AND,
|
"&" => token::And,
|
||||||
"|" => OR,
|
"|" => token::Or,
|
||||||
"<<" => SHL,
|
"<<" => token::Shl,
|
||||||
">>" => SHR,
|
">>" => token::Shr,
|
||||||
_ => fail!("Bad binop str `{}`", s)
|
_ => fail!("Bad binop str `{}`", s),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -186,19 +186,20 @@ fn parse_antlr_token(s: &str, tokens: &HashMap<String, Token>) -> TokenAndSpan {
|
||||||
debug!("What we got: content (`{}`), proto: {}", content, proto_tok);
|
debug!("What we got: content (`{}`), proto: {}", content, proto_tok);
|
||||||
|
|
||||||
let real_tok = match *proto_tok {
|
let real_tok = match *proto_tok {
|
||||||
BINOP(..) => BINOP(str_to_binop(content)),
|
token::BinOp(..) => token::BinOp(str_to_binop(content)),
|
||||||
BINOPEQ(..) => BINOPEQ(str_to_binop(content.slice_to(content.len() - 1))),
|
token::BinOpEq(..) => token::BinOpEq(str_to_binop(content.slice_to(
|
||||||
LIT_STR(..) => LIT_STR(fix(content)),
|
content.len() - 1))),
|
||||||
LIT_STR_RAW(..) => LIT_STR_RAW(fix(content), count(content)),
|
token::LitStr(..) => token::LitStr(fix(content)),
|
||||||
LIT_CHAR(..) => LIT_CHAR(fixchar(content)),
|
token::LitStrRaw(..) => token::LitStrRaw(fix(content), count(content)),
|
||||||
LIT_BYTE(..) => LIT_BYTE(fixchar(content)),
|
token::LitChar(..) => token::LitChar(fixchar(content)),
|
||||||
DOC_COMMENT(..) => DOC_COMMENT(nm),
|
token::LitByte(..) => token::LitByte(fixchar(content)),
|
||||||
LIT_INTEGER(..) => LIT_INTEGER(nm),
|
token::DocComment(..) => token::DocComment(nm),
|
||||||
LIT_FLOAT(..) => LIT_FLOAT(nm),
|
token::LitInteger(..) => token::LitInteger(nm),
|
||||||
LIT_BINARY(..) => LIT_BINARY(nm),
|
token::LitFloat(..) => token::LitFloat(nm),
|
||||||
LIT_BINARY_RAW(..) => LIT_BINARY_RAW(fix(content), count(content)),
|
token::LitBinary(..) => token::LitBinary(nm),
|
||||||
IDENT(..) => IDENT(ast::Ident { name: nm, ctxt: 0 }, true),
|
token::LitBinaryRaw(..) => token::LitBinaryRaw(fix(content), count(content)),
|
||||||
LIFETIME(..) => LIFETIME(ast::Ident { name: nm, ctxt: 0 }),
|
token::Ident(..) => token::Ident(ast::Ident { name: nm, ctxt: 0 }, true),
|
||||||
|
token::Lifetime(..) => token::Lifetime(ast::Ident { name: nm, ctxt: 0 }),
|
||||||
ref t => t.clone()
|
ref t => t.clone()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -222,8 +223,8 @@ fn parse_antlr_token(s: &str, tokens: &HashMap<String, Token>) -> TokenAndSpan {
|
||||||
|
|
||||||
fn tok_cmp(a: &Token, b: &Token) -> bool {
|
fn tok_cmp(a: &Token, b: &Token) -> bool {
|
||||||
match a {
|
match a {
|
||||||
&IDENT(id, _) => match b {
|
&token::Ident(id, _) => match b {
|
||||||
&IDENT(id2, _) => id == id2,
|
&token::Ident(id2, _) => id == id2,
|
||||||
_ => false
|
_ => false
|
||||||
},
|
},
|
||||||
_ => a == b
|
_ => a == b
|
||||||
|
@ -281,19 +282,20 @@ fn main() {
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
matches!(LIT_BYTE(..),
|
matches!(
|
||||||
LIT_CHAR(..),
|
LitByte(..),
|
||||||
LIT_INTEGER(..),
|
LitChar(..),
|
||||||
LIT_FLOAT(..),
|
LitInteger(..),
|
||||||
LIT_STR(..),
|
LitFloat(..),
|
||||||
LIT_STR_RAW(..),
|
LitStr(..),
|
||||||
LIT_BINARY(..),
|
LitStrRaw(..),
|
||||||
LIT_BINARY_RAW(..),
|
LitBinary(..),
|
||||||
IDENT(..),
|
LitBinaryRaw(..),
|
||||||
LIFETIME(..),
|
Ident(..),
|
||||||
INTERPOLATED(..),
|
Lifetime(..),
|
||||||
DOC_COMMENT(..),
|
Interpolated(..),
|
||||||
SHEBANG(..)
|
DocComment(..),
|
||||||
|
Shebang(..)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -634,7 +634,7 @@ fn parse(cx: &mut ExtCtxt, tts: &[ast::TokenTree]) -> Option<String> {
|
||||||
return None
|
return None
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
if !parser.eat(&token::EOF) {
|
if !parser.eat(&token::Eof) {
|
||||||
cx.span_err(parser.span, "only one string literal allowed");
|
cx.span_err(parser.span, "only one string literal allowed");
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
|
@ -428,7 +428,7 @@ impl <'l, 'tcx> DxrVisitor<'l, 'tcx> {
|
||||||
let qualname = format!("{}::{}", qualname, name);
|
let qualname = format!("{}::{}", qualname, name);
|
||||||
let typ = ppaux::ty_to_string(&self.analysis.ty_cx,
|
let typ = ppaux::ty_to_string(&self.analysis.ty_cx,
|
||||||
(*self.analysis.ty_cx.node_types.borrow())[field.node.id as uint]);
|
(*self.analysis.ty_cx.node_types.borrow())[field.node.id as uint]);
|
||||||
match self.span.sub_span_before_token(field.span, token::COLON) {
|
match self.span.sub_span_before_token(field.span, token::Colon) {
|
||||||
Some(sub_span) => self.fmt.field_str(field.span,
|
Some(sub_span) => self.fmt.field_str(field.span,
|
||||||
Some(sub_span),
|
Some(sub_span),
|
||||||
field.node.id,
|
field.node.id,
|
||||||
|
@ -1175,7 +1175,7 @@ impl<'l, 'tcx, 'v> Visitor<'v> for DxrVisitor<'l, 'tcx> {
|
||||||
// 'use' always introduces an alias, if there is not an explicit
|
// 'use' always introduces an alias, if there is not an explicit
|
||||||
// one, there is an implicit one.
|
// one, there is an implicit one.
|
||||||
let sub_span =
|
let sub_span =
|
||||||
match self.span.sub_span_before_token(path.span, token::EQ) {
|
match self.span.sub_span_before_token(path.span, token::Eq) {
|
||||||
Some(sub_span) => Some(sub_span),
|
Some(sub_span) => Some(sub_span),
|
||||||
None => sub_span,
|
None => sub_span,
|
||||||
};
|
};
|
||||||
|
|
|
@ -93,7 +93,7 @@ impl<'a> SpanUtils<'a> {
|
||||||
let mut bracket_count = 0u;
|
let mut bracket_count = 0u;
|
||||||
loop {
|
loop {
|
||||||
let ts = toks.next_token();
|
let ts = toks.next_token();
|
||||||
if ts.tok == token::EOF {
|
if ts.tok == token::Eof {
|
||||||
return self.make_sub_span(span, result)
|
return self.make_sub_span(span, result)
|
||||||
}
|
}
|
||||||
if bracket_count == 0 &&
|
if bracket_count == 0 &&
|
||||||
|
@ -102,9 +102,9 @@ impl<'a> SpanUtils<'a> {
|
||||||
}
|
}
|
||||||
|
|
||||||
bracket_count += match ts.tok {
|
bracket_count += match ts.tok {
|
||||||
token::LT => 1,
|
token::Lt => 1,
|
||||||
token::GT => -1,
|
token::Gt => -1,
|
||||||
token::BINOP(token::SHR) => -2,
|
token::BinOp(token::Shr) => -2,
|
||||||
_ => 0
|
_ => 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -116,7 +116,7 @@ impl<'a> SpanUtils<'a> {
|
||||||
let mut bracket_count = 0u;
|
let mut bracket_count = 0u;
|
||||||
loop {
|
loop {
|
||||||
let ts = toks.next_token();
|
let ts = toks.next_token();
|
||||||
if ts.tok == token::EOF {
|
if ts.tok == token::Eof {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
if bracket_count == 0 &&
|
if bracket_count == 0 &&
|
||||||
|
@ -125,9 +125,9 @@ impl<'a> SpanUtils<'a> {
|
||||||
}
|
}
|
||||||
|
|
||||||
bracket_count += match ts.tok {
|
bracket_count += match ts.tok {
|
||||||
token::LT => 1,
|
token::Lt => 1,
|
||||||
token::GT => -1,
|
token::Gt => -1,
|
||||||
token::BINOP(token::SHR) => -2,
|
token::BinOp(token::Shr) => -2,
|
||||||
_ => 0
|
_ => 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -141,32 +141,32 @@ impl<'a> SpanUtils<'a> {
|
||||||
let mut result = None;
|
let mut result = None;
|
||||||
let mut bracket_count = 0u;
|
let mut bracket_count = 0u;
|
||||||
let mut last_span = None;
|
let mut last_span = None;
|
||||||
while prev.tok != token::EOF {
|
while prev.tok != token::Eof {
|
||||||
last_span = None;
|
last_span = None;
|
||||||
let mut next = toks.next_token();
|
let mut next = toks.next_token();
|
||||||
|
|
||||||
if (next.tok == token::LPAREN ||
|
if (next.tok == token::LParen ||
|
||||||
next.tok == token::LT) &&
|
next.tok == token::Lt) &&
|
||||||
bracket_count == 0 &&
|
bracket_count == 0 &&
|
||||||
is_ident(&prev.tok) {
|
is_ident(&prev.tok) {
|
||||||
result = Some(prev.sp);
|
result = Some(prev.sp);
|
||||||
}
|
}
|
||||||
|
|
||||||
if bracket_count == 0 &&
|
if bracket_count == 0 &&
|
||||||
next.tok == token::MOD_SEP {
|
next.tok == token::ModSep {
|
||||||
let old = prev;
|
let old = prev;
|
||||||
prev = next;
|
prev = next;
|
||||||
next = toks.next_token();
|
next = toks.next_token();
|
||||||
if next.tok == token::LT &&
|
if next.tok == token::Lt &&
|
||||||
is_ident(&old.tok) {
|
is_ident(&old.tok) {
|
||||||
result = Some(old.sp);
|
result = Some(old.sp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bracket_count += match prev.tok {
|
bracket_count += match prev.tok {
|
||||||
token::LPAREN | token::LT => 1,
|
token::LParen | token::Lt => 1,
|
||||||
token::RPAREN | token::GT => -1,
|
token::RParen | token::Gt => -1,
|
||||||
token::BINOP(token::SHR) => -2,
|
token::BinOp(token::Shr) => -2,
|
||||||
_ => 0
|
_ => 0
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -191,21 +191,21 @@ impl<'a> SpanUtils<'a> {
|
||||||
loop {
|
loop {
|
||||||
let next = toks.next_token();
|
let next = toks.next_token();
|
||||||
|
|
||||||
if (next.tok == token::LT ||
|
if (next.tok == token::Lt ||
|
||||||
next.tok == token::COLON) &&
|
next.tok == token::Colon) &&
|
||||||
bracket_count == 0 &&
|
bracket_count == 0 &&
|
||||||
is_ident(&prev.tok) {
|
is_ident(&prev.tok) {
|
||||||
result = Some(prev.sp);
|
result = Some(prev.sp);
|
||||||
}
|
}
|
||||||
|
|
||||||
bracket_count += match prev.tok {
|
bracket_count += match prev.tok {
|
||||||
token::LT => 1,
|
token::Lt => 1,
|
||||||
token::GT => -1,
|
token::Gt => -1,
|
||||||
token::BINOP(token::SHR) => -2,
|
token::BinOp(token::Shr) => -2,
|
||||||
_ => 0
|
_ => 0
|
||||||
};
|
};
|
||||||
|
|
||||||
if next.tok == token::EOF {
|
if next.tok == token::Eof {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
prev = next;
|
prev = next;
|
||||||
|
@ -235,7 +235,7 @@ impl<'a> SpanUtils<'a> {
|
||||||
let mut bracket_count = 0i;
|
let mut bracket_count = 0i;
|
||||||
loop {
|
loop {
|
||||||
let ts = toks.next_token();
|
let ts = toks.next_token();
|
||||||
if ts.tok == token::EOF {
|
if ts.tok == token::Eof {
|
||||||
if bracket_count != 0 {
|
if bracket_count != 0 {
|
||||||
let loc = self.sess.codemap().lookup_char_pos(span.lo);
|
let loc = self.sess.codemap().lookup_char_pos(span.lo);
|
||||||
self.sess.span_bug(span, format!(
|
self.sess.span_bug(span, format!(
|
||||||
|
@ -248,10 +248,10 @@ impl<'a> SpanUtils<'a> {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
bracket_count += match ts.tok {
|
bracket_count += match ts.tok {
|
||||||
token::LT => 1,
|
token::Lt => 1,
|
||||||
token::GT => -1,
|
token::Gt => -1,
|
||||||
token::BINOP(token::SHL) => 2,
|
token::BinOp(token::Shl) => 2,
|
||||||
token::BINOP(token::SHR) => -2,
|
token::BinOp(token::Shr) => -2,
|
||||||
_ => 0
|
_ => 0
|
||||||
};
|
};
|
||||||
if is_ident(&ts.tok) &&
|
if is_ident(&ts.tok) &&
|
||||||
|
@ -265,7 +265,7 @@ impl<'a> SpanUtils<'a> {
|
||||||
let mut toks = self.retokenise_span(span);
|
let mut toks = self.retokenise_span(span);
|
||||||
let mut prev = toks.next_token();
|
let mut prev = toks.next_token();
|
||||||
loop {
|
loop {
|
||||||
if prev.tok == token::EOF {
|
if prev.tok == token::Eof {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
let next = toks.next_token();
|
let next = toks.next_token();
|
||||||
|
@ -282,12 +282,12 @@ impl<'a> SpanUtils<'a> {
|
||||||
let mut toks = self.retokenise_span(span);
|
let mut toks = self.retokenise_span(span);
|
||||||
loop {
|
loop {
|
||||||
let ts = toks.next_token();
|
let ts = toks.next_token();
|
||||||
if ts.tok == token::EOF {
|
if ts.tok == token::Eof {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
if is_keyword(keyword, &ts.tok) {
|
if is_keyword(keyword, &ts.tok) {
|
||||||
let ts = toks.next_token();
|
let ts = toks.next_token();
|
||||||
if ts.tok == token::EOF {
|
if ts.tok == token::Eof {
|
||||||
return None
|
return None
|
||||||
} else {
|
} else {
|
||||||
return self.make_sub_span(span, Some(ts.sp));
|
return self.make_sub_span(span, Some(ts.sp));
|
||||||
|
|
|
@ -17,7 +17,7 @@ use html::escape::Escape;
|
||||||
|
|
||||||
use std::io;
|
use std::io;
|
||||||
use syntax::parse::lexer;
|
use syntax::parse::lexer;
|
||||||
use syntax::parse::token as t;
|
use syntax::parse::token;
|
||||||
use syntax::parse;
|
use syntax::parse;
|
||||||
|
|
||||||
/// Highlights some source code, returning the HTML output.
|
/// Highlights some source code, returning the HTML output.
|
||||||
|
@ -63,19 +63,19 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader,
|
||||||
|
|
||||||
let snip = |sp| sess.span_diagnostic.cm.span_to_snippet(sp).unwrap();
|
let snip = |sp| sess.span_diagnostic.cm.span_to_snippet(sp).unwrap();
|
||||||
|
|
||||||
if next.tok == t::EOF { break }
|
if next.tok == token::Eof { break }
|
||||||
|
|
||||||
let klass = match next.tok {
|
let klass = match next.tok {
|
||||||
t::WS => {
|
token::Whitespace => {
|
||||||
try!(write!(out, "{}", Escape(snip(next.sp).as_slice())));
|
try!(write!(out, "{}", Escape(snip(next.sp).as_slice())));
|
||||||
continue
|
continue
|
||||||
},
|
},
|
||||||
t::COMMENT => {
|
token::Comment => {
|
||||||
try!(write!(out, "<span class='comment'>{}</span>",
|
try!(write!(out, "<span class='comment'>{}</span>",
|
||||||
Escape(snip(next.sp).as_slice())));
|
Escape(snip(next.sp).as_slice())));
|
||||||
continue
|
continue
|
||||||
},
|
},
|
||||||
t::SHEBANG(s) => {
|
token::Shebang(s) => {
|
||||||
try!(write!(out, "{}", Escape(s.as_str())));
|
try!(write!(out, "{}", Escape(s.as_str())));
|
||||||
continue
|
continue
|
||||||
},
|
},
|
||||||
|
@ -83,24 +83,25 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader,
|
||||||
// that it's the address-of operator instead of the and-operator.
|
// that it's the address-of operator instead of the and-operator.
|
||||||
// This allows us to give all pointers their own class (`Box` and
|
// This allows us to give all pointers their own class (`Box` and
|
||||||
// `@` are below).
|
// `@` are below).
|
||||||
t::BINOP(t::AND) if lexer.peek().sp.lo == next.sp.hi => "kw-2",
|
token::BinOp(token::And) if lexer.peek().sp.lo == next.sp.hi => "kw-2",
|
||||||
t::AT | t::TILDE => "kw-2",
|
token::At | token::Tilde => "kw-2",
|
||||||
|
|
||||||
// consider this as part of a macro invocation if there was a
|
// consider this as part of a macro invocation if there was a
|
||||||
// leading identifier
|
// leading identifier
|
||||||
t::NOT if is_macro => { is_macro = false; "macro" }
|
token::Not if is_macro => { is_macro = false; "macro" }
|
||||||
|
|
||||||
// operators
|
// operators
|
||||||
t::EQ | t::LT | t::LE | t::EQEQ | t::NE | t::GE | t::GT |
|
token::Eq | token::Lt | token::Le | token::EqEq | token::Ne | token::Ge | token::Gt |
|
||||||
t::ANDAND | t::OROR | t::NOT | t::BINOP(..) | t::RARROW |
|
token::AndAnd | token::OrOr | token::Not | token::BinOp(..) | token::RArrow |
|
||||||
t::BINOPEQ(..) | t::FAT_ARROW => "op",
|
token::BinOpEq(..) | token::FatArrow => "op",
|
||||||
|
|
||||||
// miscellaneous, no highlighting
|
// miscellaneous, no highlighting
|
||||||
t::DOT | t::DOTDOT | t::DOTDOTDOT | t::COMMA | t::SEMI |
|
token::Dot | token::DotDot | token::DotDotDot | token::Comma | token::Semi |
|
||||||
t::COLON | t::MOD_SEP | t::LARROW | t::LPAREN |
|
token::Colon | token::ModSep | token::LArrow | token::LParen |
|
||||||
t::RPAREN | t::LBRACKET | t::LBRACE | t::RBRACE | t::QUESTION => "",
|
token::RParen | token::LBracket | token::LBrace | token::RBrace |
|
||||||
t::DOLLAR => {
|
token::Question => "",
|
||||||
if t::is_ident(&lexer.peek().tok) {
|
token::Dollar => {
|
||||||
|
if token::is_ident(&lexer.peek().tok) {
|
||||||
is_macro_nonterminal = true;
|
is_macro_nonterminal = true;
|
||||||
"macro-nonterminal"
|
"macro-nonterminal"
|
||||||
} else {
|
} else {
|
||||||
|
@ -112,12 +113,12 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader,
|
||||||
// continue highlighting it as an attribute until the ending ']' is
|
// continue highlighting it as an attribute until the ending ']' is
|
||||||
// seen, so skip out early. Down below we terminate the attribute
|
// seen, so skip out early. Down below we terminate the attribute
|
||||||
// span when we see the ']'.
|
// span when we see the ']'.
|
||||||
t::POUND => {
|
token::Pound => {
|
||||||
is_attribute = true;
|
is_attribute = true;
|
||||||
try!(write!(out, r"<span class='attribute'>#"));
|
try!(write!(out, r"<span class='attribute'>#"));
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
t::RBRACKET => {
|
token::RBracket => {
|
||||||
if is_attribute {
|
if is_attribute {
|
||||||
is_attribute = false;
|
is_attribute = false;
|
||||||
try!(write!(out, "]</span>"));
|
try!(write!(out, "]</span>"));
|
||||||
|
@ -128,15 +129,15 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader,
|
||||||
}
|
}
|
||||||
|
|
||||||
// text literals
|
// text literals
|
||||||
t::LIT_BYTE(..) | t::LIT_BINARY(..) | t::LIT_BINARY_RAW(..) |
|
token::LitByte(..) | token::LitBinary(..) | token::LitBinaryRaw(..) |
|
||||||
t::LIT_CHAR(..) | t::LIT_STR(..) | t::LIT_STR_RAW(..) => "string",
|
token::LitChar(..) | token::LitStr(..) | token::LitStrRaw(..) => "string",
|
||||||
|
|
||||||
// number literals
|
// number literals
|
||||||
t::LIT_INTEGER(..) | t::LIT_FLOAT(..) => "number",
|
token::LitInteger(..) | token::LitFloat(..) => "number",
|
||||||
|
|
||||||
// keywords are also included in the identifier set
|
// keywords are also included in the identifier set
|
||||||
t::IDENT(ident, _is_mod_sep) => {
|
token::Ident(ident, _is_mod_sep) => {
|
||||||
match t::get_ident(ident).get() {
|
match token::get_ident(ident).get() {
|
||||||
"ref" | "mut" => "kw-2",
|
"ref" | "mut" => "kw-2",
|
||||||
|
|
||||||
"self" => "self",
|
"self" => "self",
|
||||||
|
@ -145,12 +146,12 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader,
|
||||||
"Option" | "Result" => "prelude-ty",
|
"Option" | "Result" => "prelude-ty",
|
||||||
"Some" | "None" | "Ok" | "Err" => "prelude-val",
|
"Some" | "None" | "Ok" | "Err" => "prelude-val",
|
||||||
|
|
||||||
_ if t::is_any_keyword(&next.tok) => "kw",
|
_ if token::is_any_keyword(&next.tok) => "kw",
|
||||||
_ => {
|
_ => {
|
||||||
if is_macro_nonterminal {
|
if is_macro_nonterminal {
|
||||||
is_macro_nonterminal = false;
|
is_macro_nonterminal = false;
|
||||||
"macro-nonterminal"
|
"macro-nonterminal"
|
||||||
} else if lexer.peek().tok == t::NOT {
|
} else if lexer.peek().tok == token::Not {
|
||||||
is_macro = true;
|
is_macro = true;
|
||||||
"macro"
|
"macro"
|
||||||
} else {
|
} else {
|
||||||
|
@ -160,9 +161,9 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
t::LIFETIME(..) => "lifetime",
|
token::Lifetime(..) => "lifetime",
|
||||||
t::DOC_COMMENT(..) => "doccomment",
|
token::DocComment(..) => "doccomment",
|
||||||
t::UNDERSCORE | t::EOF | t::INTERPOLATED(..) => "",
|
token::Underscore | token::Eof | token::Interpolated(..) => "",
|
||||||
};
|
};
|
||||||
|
|
||||||
// as mentioned above, use the original source code instead of
|
// as mentioned above, use the original source code instead of
|
||||||
|
|
|
@ -50,7 +50,7 @@ pub fn expand_diagnostic_used<'cx>(ecx: &'cx mut ExtCtxt,
|
||||||
token_tree: &[TokenTree])
|
token_tree: &[TokenTree])
|
||||||
-> Box<MacResult+'cx> {
|
-> Box<MacResult+'cx> {
|
||||||
let code = match token_tree {
|
let code = match token_tree {
|
||||||
[ast::TtToken(_, token::IDENT(code, _))] => code,
|
[ast::TtToken(_, token::Ident(code, _))] => code,
|
||||||
_ => unreachable!()
|
_ => unreachable!()
|
||||||
};
|
};
|
||||||
with_registered_diagnostics(|diagnostics| {
|
with_registered_diagnostics(|diagnostics| {
|
||||||
|
@ -82,12 +82,12 @@ pub fn expand_register_diagnostic<'cx>(ecx: &'cx mut ExtCtxt,
|
||||||
token_tree: &[TokenTree])
|
token_tree: &[TokenTree])
|
||||||
-> Box<MacResult+'cx> {
|
-> Box<MacResult+'cx> {
|
||||||
let (code, description) = match token_tree {
|
let (code, description) = match token_tree {
|
||||||
[ast::TtToken(_, token::IDENT(ref code, _))] => {
|
[ast::TtToken(_, token::Ident(ref code, _))] => {
|
||||||
(code, None)
|
(code, None)
|
||||||
},
|
},
|
||||||
[ast::TtToken(_, token::IDENT(ref code, _)),
|
[ast::TtToken(_, token::Ident(ref code, _)),
|
||||||
ast::TtToken(_, token::COMMA),
|
ast::TtToken(_, token::Comma),
|
||||||
ast::TtToken(_, token::LIT_STR_RAW(description, _))] => {
|
ast::TtToken(_, token::LitStrRaw(description, _))] => {
|
||||||
(code, Some(description))
|
(code, Some(description))
|
||||||
}
|
}
|
||||||
_ => unreachable!()
|
_ => unreachable!()
|
||||||
|
@ -110,7 +110,7 @@ pub fn expand_build_diagnostic_array<'cx>(ecx: &'cx mut ExtCtxt,
|
||||||
token_tree: &[TokenTree])
|
token_tree: &[TokenTree])
|
||||||
-> Box<MacResult+'cx> {
|
-> Box<MacResult+'cx> {
|
||||||
let name = match token_tree {
|
let name = match token_tree {
|
||||||
[ast::TtToken(_, token::IDENT(ref name, _))] => name,
|
[ast::TtToken(_, token::Ident(ref name, _))] => name,
|
||||||
_ => unreachable!()
|
_ => unreachable!()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -72,21 +72,21 @@ pub fn expand_asm<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
|
||||||
asm_str_style = Some(style);
|
asm_str_style = Some(style);
|
||||||
}
|
}
|
||||||
Outputs => {
|
Outputs => {
|
||||||
while p.token != token::EOF &&
|
while p.token != token::Eof &&
|
||||||
p.token != token::COLON &&
|
p.token != token::Colon &&
|
||||||
p.token != token::MOD_SEP {
|
p.token != token::ModSep {
|
||||||
|
|
||||||
if outputs.len() != 0 {
|
if outputs.len() != 0 {
|
||||||
p.eat(&token::COMMA);
|
p.eat(&token::Comma);
|
||||||
}
|
}
|
||||||
|
|
||||||
let (constraint, _str_style) = p.parse_str();
|
let (constraint, _str_style) = p.parse_str();
|
||||||
|
|
||||||
let span = p.last_span;
|
let span = p.last_span;
|
||||||
|
|
||||||
p.expect(&token::LPAREN);
|
p.expect(&token::LParen);
|
||||||
let out = p.parse_expr();
|
let out = p.parse_expr();
|
||||||
p.expect(&token::RPAREN);
|
p.expect(&token::RParen);
|
||||||
|
|
||||||
// Expands a read+write operand into two operands.
|
// Expands a read+write operand into two operands.
|
||||||
//
|
//
|
||||||
|
@ -113,12 +113,12 @@ pub fn expand_asm<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Inputs => {
|
Inputs => {
|
||||||
while p.token != token::EOF &&
|
while p.token != token::Eof &&
|
||||||
p.token != token::COLON &&
|
p.token != token::Colon &&
|
||||||
p.token != token::MOD_SEP {
|
p.token != token::ModSep {
|
||||||
|
|
||||||
if inputs.len() != 0 {
|
if inputs.len() != 0 {
|
||||||
p.eat(&token::COMMA);
|
p.eat(&token::Comma);
|
||||||
}
|
}
|
||||||
|
|
||||||
let (constraint, _str_style) = p.parse_str();
|
let (constraint, _str_style) = p.parse_str();
|
||||||
|
@ -129,21 +129,21 @@ pub fn expand_asm<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
|
||||||
cx.span_err(p.last_span, "input operand constraint contains '+'");
|
cx.span_err(p.last_span, "input operand constraint contains '+'");
|
||||||
}
|
}
|
||||||
|
|
||||||
p.expect(&token::LPAREN);
|
p.expect(&token::LParen);
|
||||||
let input = p.parse_expr();
|
let input = p.parse_expr();
|
||||||
p.expect(&token::RPAREN);
|
p.expect(&token::RParen);
|
||||||
|
|
||||||
inputs.push((constraint, input));
|
inputs.push((constraint, input));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Clobbers => {
|
Clobbers => {
|
||||||
let mut clobs = Vec::new();
|
let mut clobs = Vec::new();
|
||||||
while p.token != token::EOF &&
|
while p.token != token::Eof &&
|
||||||
p.token != token::COLON &&
|
p.token != token::Colon &&
|
||||||
p.token != token::MOD_SEP {
|
p.token != token::ModSep {
|
||||||
|
|
||||||
if clobs.len() != 0 {
|
if clobs.len() != 0 {
|
||||||
p.eat(&token::COMMA);
|
p.eat(&token::Comma);
|
||||||
}
|
}
|
||||||
|
|
||||||
let (s, _str_style) = p.parse_str();
|
let (s, _str_style) = p.parse_str();
|
||||||
|
@ -172,8 +172,8 @@ pub fn expand_asm<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
|
||||||
cx.span_warn(p.last_span, "unrecognized option");
|
cx.span_warn(p.last_span, "unrecognized option");
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.token == token::COMMA {
|
if p.token == token::Comma {
|
||||||
p.eat(&token::COMMA);
|
p.eat(&token::Comma);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
StateNone => ()
|
StateNone => ()
|
||||||
|
@ -183,17 +183,17 @@ pub fn expand_asm<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
|
||||||
// MOD_SEP is a double colon '::' without space in between.
|
// MOD_SEP is a double colon '::' without space in between.
|
||||||
// When encountered, the state must be advanced twice.
|
// When encountered, the state must be advanced twice.
|
||||||
match (&p.token, state.next(), state.next().next()) {
|
match (&p.token, state.next(), state.next().next()) {
|
||||||
(&token::COLON, StateNone, _) |
|
(&token::Colon, StateNone, _) |
|
||||||
(&token::MOD_SEP, _, StateNone) => {
|
(&token::ModSep, _, StateNone) => {
|
||||||
p.bump();
|
p.bump();
|
||||||
break 'statement;
|
break 'statement;
|
||||||
}
|
}
|
||||||
(&token::COLON, st, _) |
|
(&token::Colon, st, _) |
|
||||||
(&token::MOD_SEP, _, st) => {
|
(&token::ModSep, _, st) => {
|
||||||
p.bump();
|
p.bump();
|
||||||
state = st;
|
state = st;
|
||||||
}
|
}
|
||||||
(&token::EOF, _, _) => break 'statement,
|
(&token::Eof, _, _) => break 'statement,
|
||||||
_ => break
|
_ => break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -684,8 +684,8 @@ pub fn get_single_str_from_tts(cx: &ExtCtxt,
|
||||||
cx.span_err(sp, format!("{} takes 1 argument.", name).as_slice());
|
cx.span_err(sp, format!("{} takes 1 argument.", name).as_slice());
|
||||||
} else {
|
} else {
|
||||||
match tts[0] {
|
match tts[0] {
|
||||||
ast::TtToken(_, token::LIT_STR(ident)) => return Some(parse::str_lit(ident.as_str())),
|
ast::TtToken(_, token::LitStr(ident)) => return Some(parse::str_lit(ident.as_str())),
|
||||||
ast::TtToken(_, token::LIT_STR_RAW(ident, _)) => {
|
ast::TtToken(_, token::LitStrRaw(ident, _)) => {
|
||||||
return Some(parse::raw_str_lit(ident.as_str()))
|
return Some(parse::raw_str_lit(ident.as_str()))
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
|
@ -704,12 +704,12 @@ pub fn get_exprs_from_tts(cx: &mut ExtCtxt,
|
||||||
tts: &[ast::TokenTree]) -> Option<Vec<P<ast::Expr>>> {
|
tts: &[ast::TokenTree]) -> Option<Vec<P<ast::Expr>>> {
|
||||||
let mut p = cx.new_parser_from_tts(tts);
|
let mut p = cx.new_parser_from_tts(tts);
|
||||||
let mut es = Vec::new();
|
let mut es = Vec::new();
|
||||||
while p.token != token::EOF {
|
while p.token != token::Eof {
|
||||||
es.push(cx.expander().fold_expr(p.parse_expr()));
|
es.push(cx.expander().fold_expr(p.parse_expr()));
|
||||||
if p.eat(&token::COMMA) {
|
if p.eat(&token::Comma) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if p.token != token::EOF {
|
if p.token != token::Eof {
|
||||||
cx.span_err(sp, "expected token: `,`");
|
cx.span_err(sp, "expected token: `,`");
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,7 +29,7 @@ pub fn expand_cfg<'cx>(cx: &mut ExtCtxt,
|
||||||
let mut p = cx.new_parser_from_tts(tts);
|
let mut p = cx.new_parser_from_tts(tts);
|
||||||
let cfg = p.parse_meta_item();
|
let cfg = p.parse_meta_item();
|
||||||
|
|
||||||
if !p.eat(&token::EOF) {
|
if !p.eat(&token::Eof) {
|
||||||
cx.span_err(sp, "expected 1 cfg-pattern");
|
cx.span_err(sp, "expected 1 cfg-pattern");
|
||||||
return DummyResult::expr(sp);
|
return DummyResult::expr(sp);
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,21 +23,21 @@ pub fn expand_syntax_ext<'cx>(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]
|
||||||
for (i, e) in tts.iter().enumerate() {
|
for (i, e) in tts.iter().enumerate() {
|
||||||
if i & 1 == 1 {
|
if i & 1 == 1 {
|
||||||
match *e {
|
match *e {
|
||||||
ast::TtToken(_, token::COMMA) => (),
|
ast::TtToken(_, token::Comma) => {},
|
||||||
_ => {
|
_ => {
|
||||||
cx.span_err(sp, "concat_idents! expecting comma.");
|
cx.span_err(sp, "concat_idents! expecting comma.");
|
||||||
return DummyResult::expr(sp);
|
return DummyResult::expr(sp);
|
||||||
}
|
},
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
match *e {
|
match *e {
|
||||||
ast::TtToken(_, token::IDENT(ident,_)) => {
|
ast::TtToken(_, token::Ident(ident, _)) => {
|
||||||
res_str.push_str(token::get_ident(ident).get())
|
res_str.push_str(token::get_ident(ident).get())
|
||||||
}
|
},
|
||||||
_ => {
|
_ => {
|
||||||
cx.span_err(sp, "concat_idents! requires ident args.");
|
cx.span_err(sp, "concat_idents! requires ident args.");
|
||||||
return DummyResult::expr(sp);
|
return DummyResult::expr(sp);
|
||||||
}
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -91,7 +91,7 @@ fn parse_args(ecx: &mut ExtCtxt, sp: Span, allow_method: bool,
|
||||||
// Parse the leading function expression (maybe a block, maybe a path)
|
// Parse the leading function expression (maybe a block, maybe a path)
|
||||||
let invocation = if allow_method {
|
let invocation = if allow_method {
|
||||||
let e = p.parse_expr();
|
let e = p.parse_expr();
|
||||||
if !p.eat(&token::COMMA) {
|
if !p.eat(&token::Comma) {
|
||||||
ecx.span_err(sp, "expected token: `,`");
|
ecx.span_err(sp, "expected token: `,`");
|
||||||
return (Call(e), None);
|
return (Call(e), None);
|
||||||
}
|
}
|
||||||
|
@ -99,28 +99,28 @@ fn parse_args(ecx: &mut ExtCtxt, sp: Span, allow_method: bool,
|
||||||
} else {
|
} else {
|
||||||
Call(p.parse_expr())
|
Call(p.parse_expr())
|
||||||
};
|
};
|
||||||
if !p.eat(&token::COMMA) {
|
if !p.eat(&token::Comma) {
|
||||||
ecx.span_err(sp, "expected token: `,`");
|
ecx.span_err(sp, "expected token: `,`");
|
||||||
return (invocation, None);
|
return (invocation, None);
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.token == token::EOF {
|
if p.token == token::Eof {
|
||||||
ecx.span_err(sp, "requires at least a format string argument");
|
ecx.span_err(sp, "requires at least a format string argument");
|
||||||
return (invocation, None);
|
return (invocation, None);
|
||||||
}
|
}
|
||||||
let fmtstr = p.parse_expr();
|
let fmtstr = p.parse_expr();
|
||||||
let mut named = false;
|
let mut named = false;
|
||||||
while p.token != token::EOF {
|
while p.token != token::Eof {
|
||||||
if !p.eat(&token::COMMA) {
|
if !p.eat(&token::Comma) {
|
||||||
ecx.span_err(sp, "expected token: `,`");
|
ecx.span_err(sp, "expected token: `,`");
|
||||||
return (invocation, None);
|
return (invocation, None);
|
||||||
}
|
}
|
||||||
if p.token == token::EOF { break } // accept trailing commas
|
if p.token == token::Eof { break } // accept trailing commas
|
||||||
if named || (token::is_ident(&p.token) &&
|
if named || (token::is_ident(&p.token) &&
|
||||||
p.look_ahead(1, |t| *t == token::EQ)) {
|
p.look_ahead(1, |t| *t == token::Eq)) {
|
||||||
named = true;
|
named = true;
|
||||||
let ident = match p.token {
|
let ident = match p.token {
|
||||||
token::IDENT(i, _) => {
|
token::Ident(i, _) => {
|
||||||
p.bump();
|
p.bump();
|
||||||
i
|
i
|
||||||
}
|
}
|
||||||
|
@ -139,7 +139,7 @@ fn parse_args(ecx: &mut ExtCtxt, sp: Span, allow_method: bool,
|
||||||
};
|
};
|
||||||
let interned_name = token::get_ident(ident);
|
let interned_name = token::get_ident(ident);
|
||||||
let name = interned_name.get();
|
let name = interned_name.get();
|
||||||
p.expect(&token::EQ);
|
p.expect(&token::Eq);
|
||||||
let e = p.parse_expr();
|
let e = p.parse_expr();
|
||||||
match names.find_equiv(&name) {
|
match names.find_equiv(&name) {
|
||||||
None => {}
|
None => {}
|
||||||
|
|
|
@ -515,123 +515,122 @@ fn mk_token_path(cx: &ExtCtxt, sp: Span, name: &str) -> P<ast::Expr> {
|
||||||
cx.expr_path(cx.path_global(sp, idents))
|
cx.expr_path(cx.path_global(sp, idents))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn mk_binop(cx: &ExtCtxt, sp: Span, bop: token::BinOp) -> P<ast::Expr> {
|
fn mk_binop(cx: &ExtCtxt, sp: Span, bop: token::BinOpToken) -> P<ast::Expr> {
|
||||||
let name = match bop {
|
let name = match bop {
|
||||||
PLUS => "PLUS",
|
token::Plus => "Plus",
|
||||||
MINUS => "MINUS",
|
token::Minus => "Minus",
|
||||||
STAR => "STAR",
|
token::Star => "Star",
|
||||||
SLASH => "SLASH",
|
token::Slash => "Slash",
|
||||||
PERCENT => "PERCENT",
|
token::Percent => "Percent",
|
||||||
CARET => "CARET",
|
token::Caret => "Caret",
|
||||||
AND => "AND",
|
token::And => "And",
|
||||||
OR => "OR",
|
token::Or => "Or",
|
||||||
SHL => "SHL",
|
token::Shl => "Shl",
|
||||||
SHR => "SHR"
|
token::Shr => "Shr"
|
||||||
};
|
};
|
||||||
mk_token_path(cx, sp, name)
|
mk_token_path(cx, sp, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn mk_token(cx: &ExtCtxt, sp: Span, tok: &token::Token) -> P<ast::Expr> {
|
fn mk_token(cx: &ExtCtxt, sp: Span, tok: &token::Token) -> P<ast::Expr> {
|
||||||
|
|
||||||
match *tok {
|
match *tok {
|
||||||
BINOP(binop) => {
|
token::BinOp(binop) => {
|
||||||
return cx.expr_call(sp, mk_token_path(cx, sp, "BINOP"), vec!(mk_binop(cx, sp, binop)));
|
return cx.expr_call(sp, mk_token_path(cx, sp, "BinOp"), vec!(mk_binop(cx, sp, binop)));
|
||||||
}
|
}
|
||||||
BINOPEQ(binop) => {
|
token::BinOpEq(binop) => {
|
||||||
return cx.expr_call(sp, mk_token_path(cx, sp, "BINOPEQ"),
|
return cx.expr_call(sp, mk_token_path(cx, sp, "BinOpEq"),
|
||||||
vec!(mk_binop(cx, sp, binop)));
|
vec!(mk_binop(cx, sp, binop)));
|
||||||
}
|
}
|
||||||
|
|
||||||
LIT_BYTE(i) => {
|
token::LitByte(i) => {
|
||||||
let e_byte = mk_name(cx, sp, i.ident());
|
let e_byte = mk_name(cx, sp, i.ident());
|
||||||
|
|
||||||
return cx.expr_call(sp, mk_token_path(cx, sp, "LIT_BYTE"), vec!(e_byte));
|
return cx.expr_call(sp, mk_token_path(cx, sp, "LitByte"), vec!(e_byte));
|
||||||
}
|
}
|
||||||
|
|
||||||
LIT_CHAR(i) => {
|
token::LitChar(i) => {
|
||||||
let e_char = mk_name(cx, sp, i.ident());
|
let e_char = mk_name(cx, sp, i.ident());
|
||||||
|
|
||||||
return cx.expr_call(sp, mk_token_path(cx, sp, "LIT_CHAR"), vec!(e_char));
|
return cx.expr_call(sp, mk_token_path(cx, sp, "LitChar"), vec!(e_char));
|
||||||
}
|
}
|
||||||
|
|
||||||
LIT_INTEGER(i) => {
|
token::LitInteger(i) => {
|
||||||
let e_int = mk_name(cx, sp, i.ident());
|
let e_int = mk_name(cx, sp, i.ident());
|
||||||
return cx.expr_call(sp, mk_token_path(cx, sp, "LIT_INTEGER"), vec!(e_int));
|
return cx.expr_call(sp, mk_token_path(cx, sp, "LitInteger"), vec!(e_int));
|
||||||
}
|
}
|
||||||
|
|
||||||
LIT_FLOAT(fident) => {
|
token::LitFloat(fident) => {
|
||||||
let e_fident = mk_name(cx, sp, fident.ident());
|
let e_fident = mk_name(cx, sp, fident.ident());
|
||||||
return cx.expr_call(sp, mk_token_path(cx, sp, "LIT_FLOAT"), vec!(e_fident));
|
return cx.expr_call(sp, mk_token_path(cx, sp, "LitFloat"), vec!(e_fident));
|
||||||
}
|
}
|
||||||
|
|
||||||
LIT_STR(ident) => {
|
token::LitStr(ident) => {
|
||||||
return cx.expr_call(sp,
|
return cx.expr_call(sp,
|
||||||
mk_token_path(cx, sp, "LIT_STR"),
|
mk_token_path(cx, sp, "LitStr"),
|
||||||
vec!(mk_name(cx, sp, ident.ident())));
|
vec!(mk_name(cx, sp, ident.ident())));
|
||||||
}
|
}
|
||||||
|
|
||||||
LIT_STR_RAW(ident, n) => {
|
token::LitStrRaw(ident, n) => {
|
||||||
return cx.expr_call(sp,
|
return cx.expr_call(sp,
|
||||||
mk_token_path(cx, sp, "LIT_STR_RAW"),
|
mk_token_path(cx, sp, "LitStrRaw"),
|
||||||
vec!(mk_name(cx, sp, ident.ident()), cx.expr_uint(sp, n)));
|
vec!(mk_name(cx, sp, ident.ident()), cx.expr_uint(sp, n)));
|
||||||
}
|
}
|
||||||
|
|
||||||
IDENT(ident, b) => {
|
token::Ident(ident, b) => {
|
||||||
return cx.expr_call(sp,
|
return cx.expr_call(sp,
|
||||||
mk_token_path(cx, sp, "IDENT"),
|
mk_token_path(cx, sp, "Ident"),
|
||||||
vec!(mk_ident(cx, sp, ident), cx.expr_bool(sp, b)));
|
vec!(mk_ident(cx, sp, ident), cx.expr_bool(sp, b)));
|
||||||
}
|
}
|
||||||
|
|
||||||
LIFETIME(ident) => {
|
token::Lifetime(ident) => {
|
||||||
return cx.expr_call(sp,
|
return cx.expr_call(sp,
|
||||||
mk_token_path(cx, sp, "LIFETIME"),
|
mk_token_path(cx, sp, "Lifetime"),
|
||||||
vec!(mk_ident(cx, sp, ident)));
|
vec!(mk_ident(cx, sp, ident)));
|
||||||
}
|
}
|
||||||
|
|
||||||
DOC_COMMENT(ident) => {
|
token::DocComment(ident) => {
|
||||||
return cx.expr_call(sp,
|
return cx.expr_call(sp,
|
||||||
mk_token_path(cx, sp, "DOC_COMMENT"),
|
mk_token_path(cx, sp, "DocComment"),
|
||||||
vec!(mk_name(cx, sp, ident.ident())));
|
vec!(mk_name(cx, sp, ident.ident())));
|
||||||
}
|
}
|
||||||
|
|
||||||
INTERPOLATED(_) => fail!("quote! with interpolated token"),
|
token::Interpolated(_) => fail!("quote! with interpolated token"),
|
||||||
|
|
||||||
_ => ()
|
_ => ()
|
||||||
}
|
}
|
||||||
|
|
||||||
let name = match *tok {
|
let name = match *tok {
|
||||||
EQ => "EQ",
|
token::Eq => "Eq",
|
||||||
LT => "LT",
|
token::Lt => "Lt",
|
||||||
LE => "LE",
|
token::Le => "Le",
|
||||||
EQEQ => "EQEQ",
|
token::EqEq => "EqEq",
|
||||||
NE => "NE",
|
token::Ne => "Ne",
|
||||||
GE => "GE",
|
token::Ge => "Ge",
|
||||||
GT => "GT",
|
token::Gt => "Gt",
|
||||||
ANDAND => "ANDAND",
|
token::AndAnd => "AndAnd",
|
||||||
OROR => "OROR",
|
token::OrOr => "OrOr",
|
||||||
NOT => "NOT",
|
token::Not => "Not",
|
||||||
TILDE => "TILDE",
|
token::Tilde => "Tilde",
|
||||||
AT => "AT",
|
token::At => "At",
|
||||||
DOT => "DOT",
|
token::Dot => "Dot",
|
||||||
DOTDOT => "DOTDOT",
|
token::DotDot => "DotDot",
|
||||||
COMMA => "COMMA",
|
token::Comma => "Comma",
|
||||||
SEMI => "SEMI",
|
token::Semi => "Semi",
|
||||||
COLON => "COLON",
|
token::Colon => "Colon",
|
||||||
MOD_SEP => "MOD_SEP",
|
token::ModSep => "ModSep",
|
||||||
RARROW => "RARROW",
|
token::RArrow => "RArrow",
|
||||||
LARROW => "LARROW",
|
token::LArrow => "LArrow",
|
||||||
FAT_ARROW => "FAT_ARROW",
|
token::FatArrow => "FatArrow",
|
||||||
LPAREN => "LPAREN",
|
token::LParen => "LParen",
|
||||||
RPAREN => "RPAREN",
|
token::RParen => "RParen",
|
||||||
LBRACKET => "LBRACKET",
|
token::LBracket => "LBracket",
|
||||||
RBRACKET => "RBRACKET",
|
token::RBracket => "RBracket",
|
||||||
LBRACE => "LBRACE",
|
token::LBrace => "LBrace",
|
||||||
RBRACE => "RBRACE",
|
token::RBrace => "RBrace",
|
||||||
POUND => "POUND",
|
token::Pound => "Pound",
|
||||||
DOLLAR => "DOLLAR",
|
token::Dollar => "Dollar",
|
||||||
UNDERSCORE => "UNDERSCORE",
|
token::Underscore => "Underscore",
|
||||||
EOF => "EOF",
|
token::Eof => "Eof",
|
||||||
_ => fail!()
|
_ => fail!(),
|
||||||
};
|
};
|
||||||
mk_token_path(cx, sp, name)
|
mk_token_path(cx, sp, name)
|
||||||
}
|
}
|
||||||
|
@ -702,7 +701,7 @@ fn expand_tts(cx: &ExtCtxt, sp: Span, tts: &[ast::TokenTree])
|
||||||
p.quote_depth += 1u;
|
p.quote_depth += 1u;
|
||||||
|
|
||||||
let cx_expr = p.parse_expr();
|
let cx_expr = p.parse_expr();
|
||||||
if !p.eat(&token::COMMA) {
|
if !p.eat(&token::Comma) {
|
||||||
p.fatal("expected token `,`");
|
p.fatal("expected token `,`");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -85,7 +85,7 @@ use parse::lexer::*; //resolve bug?
|
||||||
use parse::ParseSess;
|
use parse::ParseSess;
|
||||||
use parse::attr::ParserAttr;
|
use parse::attr::ParserAttr;
|
||||||
use parse::parser::{LifetimeAndTypesWithoutColons, Parser};
|
use parse::parser::{LifetimeAndTypesWithoutColons, Parser};
|
||||||
use parse::token::{Token, EOF, Nonterminal};
|
use parse::token::{Token, Nonterminal};
|
||||||
use parse::token;
|
use parse::token;
|
||||||
use ptr::P;
|
use ptr::P;
|
||||||
|
|
||||||
|
@ -226,8 +226,8 @@ pub fn parse_or_else(sess: &ParseSess,
|
||||||
/// unhygienic comparison)
|
/// unhygienic comparison)
|
||||||
pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool {
|
pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool {
|
||||||
match (t1,t2) {
|
match (t1,t2) {
|
||||||
(&token::IDENT(id1,_),&token::IDENT(id2,_))
|
(&token::Ident(id1,_),&token::Ident(id2,_))
|
||||||
| (&token::LIFETIME(id1),&token::LIFETIME(id2)) =>
|
| (&token::Lifetime(id1),&token::Lifetime(id2)) =>
|
||||||
id1.name == id2.name,
|
id1.name == id2.name,
|
||||||
_ => *t1 == *t2
|
_ => *t1 == *t2
|
||||||
}
|
}
|
||||||
|
@ -354,9 +354,9 @@ pub fn parse(sess: &ParseSess,
|
||||||
// Built-in nonterminals never start with these tokens,
|
// Built-in nonterminals never start with these tokens,
|
||||||
// so we can eliminate them from consideration.
|
// so we can eliminate them from consideration.
|
||||||
match tok {
|
match tok {
|
||||||
token::RPAREN |
|
token::RParen |
|
||||||
token::RBRACE |
|
token::RBrace |
|
||||||
token::RBRACKET => {},
|
token::RBracket => {},
|
||||||
_ => bb_eis.push(ei)
|
_ => bb_eis.push(ei)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -372,7 +372,7 @@ pub fn parse(sess: &ParseSess,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* error messages here could be improved with links to orig. rules */
|
/* error messages here could be improved with links to orig. rules */
|
||||||
if token_name_eq(&tok, &EOF) {
|
if token_name_eq(&tok, &token::Eof) {
|
||||||
if eof_eis.len() == 1u {
|
if eof_eis.len() == 1u {
|
||||||
let mut v = Vec::new();
|
let mut v = Vec::new();
|
||||||
for dv in eof_eis.get_mut(0).matches.iter_mut() {
|
for dv in eof_eis.get_mut(0).matches.iter_mut() {
|
||||||
|
@ -447,7 +447,7 @@ pub fn parse_nt(p: &mut Parser, name: &str) -> Nonterminal {
|
||||||
"ty" => token::NtTy(p.parse_ty(false /* no need to disambiguate*/)),
|
"ty" => token::NtTy(p.parse_ty(false /* no need to disambiguate*/)),
|
||||||
// this could be handled like a token, since it is one
|
// this could be handled like a token, since it is one
|
||||||
"ident" => match p.token {
|
"ident" => match p.token {
|
||||||
token::IDENT(sn,b) => { p.bump(); token::NtIdent(box sn,b) }
|
token::Ident(sn,b) => { p.bump(); token::NtIdent(box sn,b) }
|
||||||
_ => {
|
_ => {
|
||||||
let token_str = token::to_string(&p.token);
|
let token_str = token::to_string(&p.token);
|
||||||
p.fatal((format!("expected ident, found {}",
|
p.fatal((format!("expected ident, found {}",
|
||||||
|
|
|
@ -20,7 +20,7 @@ use parse::lexer::new_tt_reader;
|
||||||
use parse::parser::Parser;
|
use parse::parser::Parser;
|
||||||
use parse::attr::ParserAttr;
|
use parse::attr::ParserAttr;
|
||||||
use parse::token::{special_idents, gensym_ident};
|
use parse::token::{special_idents, gensym_ident};
|
||||||
use parse::token::{FAT_ARROW, SEMI, NtMatchers, NtTT, EOF};
|
use parse::token::{NtMatchers, NtTT};
|
||||||
use parse::token;
|
use parse::token;
|
||||||
use print;
|
use print;
|
||||||
use ptr::P;
|
use ptr::P;
|
||||||
|
@ -43,10 +43,10 @@ impl<'a> ParserAnyMacro<'a> {
|
||||||
/// allowed to be there.
|
/// allowed to be there.
|
||||||
fn ensure_complete_parse(&self, allow_semi: bool) {
|
fn ensure_complete_parse(&self, allow_semi: bool) {
|
||||||
let mut parser = self.parser.borrow_mut();
|
let mut parser = self.parser.borrow_mut();
|
||||||
if allow_semi && parser.token == SEMI {
|
if allow_semi && parser.token == token::Semi {
|
||||||
parser.bump()
|
parser.bump()
|
||||||
}
|
}
|
||||||
if parser.token != EOF {
|
if parser.token != token::Eof {
|
||||||
let token_str = parser.this_token_to_string();
|
let token_str = parser.this_token_to_string();
|
||||||
let msg = format!("macro expansion ignores token `{}` and any \
|
let msg = format!("macro expansion ignores token `{}` and any \
|
||||||
following",
|
following",
|
||||||
|
@ -89,7 +89,7 @@ impl<'a> MacResult for ParserAnyMacro<'a> {
|
||||||
loop {
|
loop {
|
||||||
let mut parser = self.parser.borrow_mut();
|
let mut parser = self.parser.borrow_mut();
|
||||||
match parser.token {
|
match parser.token {
|
||||||
EOF => break,
|
token::Eof => break,
|
||||||
_ => {
|
_ => {
|
||||||
let attrs = parser.parse_outer_attributes();
|
let attrs = parser.parse_outer_attributes();
|
||||||
ret.push(parser.parse_method(attrs, ast::Inherited))
|
ret.push(parser.parse_method(attrs, ast::Inherited))
|
||||||
|
@ -231,12 +231,13 @@ pub fn add_new_extension<'cx>(cx: &'cx mut ExtCtxt,
|
||||||
let argument_gram = vec!(
|
let argument_gram = vec!(
|
||||||
ms(MatchSeq(vec!(
|
ms(MatchSeq(vec!(
|
||||||
ms(MatchNonterminal(lhs_nm, special_idents::matchers, 0u)),
|
ms(MatchNonterminal(lhs_nm, special_idents::matchers, 0u)),
|
||||||
ms(MatchTok(FAT_ARROW)),
|
ms(MatchTok(token::FatArrow)),
|
||||||
ms(MatchNonterminal(rhs_nm, special_idents::tt, 1u))), Some(SEMI),
|
ms(MatchNonterminal(rhs_nm, special_idents::tt, 1u))),
|
||||||
ast::OneOrMore, 0u, 2u)),
|
Some(token::Semi), ast::OneOrMore, 0u, 2u)),
|
||||||
//to phase into semicolon-termination instead of
|
//to phase into semicolon-termination instead of
|
||||||
//semicolon-separation
|
//semicolon-separation
|
||||||
ms(MatchSeq(vec!(ms(MatchTok(SEMI))), None, ast::ZeroOrMore, 2u, 2u)));
|
ms(MatchSeq(vec!(ms(MatchTok(token::Semi))), None,
|
||||||
|
ast::ZeroOrMore, 2u, 2u)));
|
||||||
|
|
||||||
|
|
||||||
// Parse the macro_rules! invocation (`none` is for no interpolations):
|
// Parse the macro_rules! invocation (`none` is for no interpolations):
|
||||||
|
|
|
@ -13,7 +13,7 @@ use ast::{TokenTree, TtDelimited, TtToken, TtSequence, TtNonterminal, Ident};
|
||||||
use codemap::{Span, DUMMY_SP};
|
use codemap::{Span, DUMMY_SP};
|
||||||
use diagnostic::SpanHandler;
|
use diagnostic::SpanHandler;
|
||||||
use ext::tt::macro_parser::{NamedMatch, MatchedSeq, MatchedNonterminal};
|
use ext::tt::macro_parser::{NamedMatch, MatchedSeq, MatchedNonterminal};
|
||||||
use parse::token::{EOF, INTERPOLATED, IDENT, Token, NtIdent};
|
use parse::token::{Token, NtIdent};
|
||||||
use parse::token;
|
use parse::token;
|
||||||
use parse::lexer::TokenAndSpan;
|
use parse::lexer::TokenAndSpan;
|
||||||
|
|
||||||
|
@ -66,7 +66,7 @@ pub fn new_tt_reader<'a>(sp_diag: &'a SpanHandler,
|
||||||
repeat_idx: Vec::new(),
|
repeat_idx: Vec::new(),
|
||||||
repeat_len: Vec::new(),
|
repeat_len: Vec::new(),
|
||||||
/* dummy values, never read: */
|
/* dummy values, never read: */
|
||||||
cur_tok: EOF,
|
cur_tok: token::Eof,
|
||||||
cur_span: DUMMY_SP,
|
cur_span: DUMMY_SP,
|
||||||
};
|
};
|
||||||
tt_next_token(&mut r); /* get cur_tok and cur_span set up */
|
tt_next_token(&mut r); /* get cur_tok and cur_span set up */
|
||||||
|
@ -158,7 +158,7 @@ pub fn tt_next_token(r: &mut TtReader) -> TokenAndSpan {
|
||||||
loop {
|
loop {
|
||||||
let should_pop = match r.stack.last() {
|
let should_pop = match r.stack.last() {
|
||||||
None => {
|
None => {
|
||||||
assert_eq!(ret_val.tok, EOF);
|
assert_eq!(ret_val.tok, token::Eof);
|
||||||
return ret_val;
|
return ret_val;
|
||||||
}
|
}
|
||||||
Some(frame) => {
|
Some(frame) => {
|
||||||
|
@ -175,7 +175,7 @@ pub fn tt_next_token(r: &mut TtReader) -> TokenAndSpan {
|
||||||
let prev = r.stack.pop().unwrap();
|
let prev = r.stack.pop().unwrap();
|
||||||
match r.stack.last_mut() {
|
match r.stack.last_mut() {
|
||||||
None => {
|
None => {
|
||||||
r.cur_tok = EOF;
|
r.cur_tok = token::Eof;
|
||||||
return ret_val;
|
return ret_val;
|
||||||
}
|
}
|
||||||
Some(frame) => {
|
Some(frame) => {
|
||||||
|
@ -272,13 +272,13 @@ pub fn tt_next_token(r: &mut TtReader) -> TokenAndSpan {
|
||||||
(b) we actually can, since it's a token. */
|
(b) we actually can, since it's a token. */
|
||||||
MatchedNonterminal(NtIdent(box sn, b)) => {
|
MatchedNonterminal(NtIdent(box sn, b)) => {
|
||||||
r.cur_span = sp;
|
r.cur_span = sp;
|
||||||
r.cur_tok = IDENT(sn,b);
|
r.cur_tok = token::Ident(sn,b);
|
||||||
return ret_val;
|
return ret_val;
|
||||||
}
|
}
|
||||||
MatchedNonterminal(ref other_whole_nt) => {
|
MatchedNonterminal(ref other_whole_nt) => {
|
||||||
// FIXME(pcwalton): Bad copy.
|
// FIXME(pcwalton): Bad copy.
|
||||||
r.cur_span = sp;
|
r.cur_span = sp;
|
||||||
r.cur_tok = INTERPOLATED((*other_whole_nt).clone());
|
r.cur_tok = token::Interpolated((*other_whole_nt).clone());
|
||||||
return ret_val;
|
return ret_val;
|
||||||
}
|
}
|
||||||
MatchedSeq(..) => {
|
MatchedSeq(..) => {
|
||||||
|
|
|
@ -602,11 +602,11 @@ pub fn noop_fold_tts<T: Folder>(tts: &[TokenTree], fld: &mut T) -> Vec<TokenTree
|
||||||
// apply ident folder if it's an ident, apply other folds to interpolated nodes
|
// apply ident folder if it's an ident, apply other folds to interpolated nodes
|
||||||
pub fn noop_fold_token<T: Folder>(t: token::Token, fld: &mut T) -> token::Token {
|
pub fn noop_fold_token<T: Folder>(t: token::Token, fld: &mut T) -> token::Token {
|
||||||
match t {
|
match t {
|
||||||
token::IDENT(id, followed_by_colons) => {
|
token::Ident(id, followed_by_colons) => {
|
||||||
token::IDENT(fld.fold_ident(id), followed_by_colons)
|
token::Ident(fld.fold_ident(id), followed_by_colons)
|
||||||
}
|
}
|
||||||
token::LIFETIME(id) => token::LIFETIME(fld.fold_ident(id)),
|
token::Lifetime(id) => token::Lifetime(fld.fold_ident(id)),
|
||||||
token::INTERPOLATED(nt) => token::INTERPOLATED(fld.fold_interpolated(nt)),
|
token::Interpolated(nt) => token::Interpolated(fld.fold_interpolated(nt)),
|
||||||
_ => t
|
_ => t
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,6 @@ use codemap::{spanned, Spanned, mk_sp, Span};
|
||||||
use parse::common::*; //resolve bug?
|
use parse::common::*; //resolve bug?
|
||||||
use parse::token;
|
use parse::token;
|
||||||
use parse::parser::Parser;
|
use parse::parser::Parser;
|
||||||
use parse::token::INTERPOLATED;
|
|
||||||
use ptr::P;
|
use ptr::P;
|
||||||
|
|
||||||
/// A parser that can parse attributes.
|
/// A parser that can parse attributes.
|
||||||
|
@ -36,10 +35,10 @@ impl<'a> ParserAttr for Parser<'a> {
|
||||||
debug!("parse_outer_attributes: self.token={}",
|
debug!("parse_outer_attributes: self.token={}",
|
||||||
self.token);
|
self.token);
|
||||||
match self.token {
|
match self.token {
|
||||||
token::POUND => {
|
token::Pound => {
|
||||||
attrs.push(self.parse_attribute(false));
|
attrs.push(self.parse_attribute(false));
|
||||||
}
|
}
|
||||||
token::DOC_COMMENT(s) => {
|
token::DocComment(s) => {
|
||||||
let attr = ::attr::mk_sugared_doc_attr(
|
let attr = ::attr::mk_sugared_doc_attr(
|
||||||
attr::mk_attr_id(),
|
attr::mk_attr_id(),
|
||||||
self.id_to_interned_str(s.ident()),
|
self.id_to_interned_str(s.ident()),
|
||||||
|
@ -66,11 +65,11 @@ impl<'a> ParserAttr for Parser<'a> {
|
||||||
debug!("parse_attributes: permit_inner={} self.token={}",
|
debug!("parse_attributes: permit_inner={} self.token={}",
|
||||||
permit_inner, self.token);
|
permit_inner, self.token);
|
||||||
let (span, value, mut style) = match self.token {
|
let (span, value, mut style) = match self.token {
|
||||||
token::POUND => {
|
token::Pound => {
|
||||||
let lo = self.span.lo;
|
let lo = self.span.lo;
|
||||||
self.bump();
|
self.bump();
|
||||||
|
|
||||||
let style = if self.eat(&token::NOT) {
|
let style = if self.eat(&token::Not) {
|
||||||
if !permit_inner {
|
if !permit_inner {
|
||||||
let span = self.span;
|
let span = self.span;
|
||||||
self.span_err(span,
|
self.span_err(span,
|
||||||
|
@ -82,10 +81,10 @@ impl<'a> ParserAttr for Parser<'a> {
|
||||||
ast::AttrOuter
|
ast::AttrOuter
|
||||||
};
|
};
|
||||||
|
|
||||||
self.expect(&token::LBRACKET);
|
self.expect(&token::LBracket);
|
||||||
let meta_item = self.parse_meta_item();
|
let meta_item = self.parse_meta_item();
|
||||||
let hi = self.span.hi;
|
let hi = self.span.hi;
|
||||||
self.expect(&token::RBRACKET);
|
self.expect(&token::RBracket);
|
||||||
|
|
||||||
(mk_sp(lo, hi), meta_item, style)
|
(mk_sp(lo, hi), meta_item, style)
|
||||||
}
|
}
|
||||||
|
@ -96,7 +95,7 @@ impl<'a> ParserAttr for Parser<'a> {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if permit_inner && self.eat(&token::SEMI) {
|
if permit_inner && self.eat(&token::Semi) {
|
||||||
self.span_warn(span, "this inner attribute syntax is deprecated. \
|
self.span_warn(span, "this inner attribute syntax is deprecated. \
|
||||||
The new syntax is `#![foo]`, with a bang and no semicolon.");
|
The new syntax is `#![foo]`, with a bang and no semicolon.");
|
||||||
style = ast::AttrInner;
|
style = ast::AttrInner;
|
||||||
|
@ -130,10 +129,10 @@ impl<'a> ParserAttr for Parser<'a> {
|
||||||
let mut next_outer_attrs: Vec<ast::Attribute> = Vec::new();
|
let mut next_outer_attrs: Vec<ast::Attribute> = Vec::new();
|
||||||
loop {
|
loop {
|
||||||
let attr = match self.token {
|
let attr = match self.token {
|
||||||
token::POUND => {
|
token::Pound => {
|
||||||
self.parse_attribute(true)
|
self.parse_attribute(true)
|
||||||
}
|
}
|
||||||
token::DOC_COMMENT(s) => {
|
token::DocComment(s) => {
|
||||||
// we need to get the position of this token before we bump.
|
// we need to get the position of this token before we bump.
|
||||||
let Span { lo, hi, .. } = self.span;
|
let Span { lo, hi, .. } = self.span;
|
||||||
self.bump();
|
self.bump();
|
||||||
|
@ -161,7 +160,7 @@ impl<'a> ParserAttr for Parser<'a> {
|
||||||
/// | IDENT meta_seq
|
/// | IDENT meta_seq
|
||||||
fn parse_meta_item(&mut self) -> P<ast::MetaItem> {
|
fn parse_meta_item(&mut self) -> P<ast::MetaItem> {
|
||||||
let nt_meta = match self.token {
|
let nt_meta = match self.token {
|
||||||
token::INTERPOLATED(token::NtMeta(ref e)) => {
|
token::Interpolated(token::NtMeta(ref e)) => {
|
||||||
Some(e.clone())
|
Some(e.clone())
|
||||||
}
|
}
|
||||||
_ => None
|
_ => None
|
||||||
|
@ -179,7 +178,7 @@ impl<'a> ParserAttr for Parser<'a> {
|
||||||
let ident = self.parse_ident();
|
let ident = self.parse_ident();
|
||||||
let name = self.id_to_interned_str(ident);
|
let name = self.id_to_interned_str(ident);
|
||||||
match self.token {
|
match self.token {
|
||||||
token::EQ => {
|
token::Eq => {
|
||||||
self.bump();
|
self.bump();
|
||||||
let lit = self.parse_lit();
|
let lit = self.parse_lit();
|
||||||
// FIXME #623 Non-string meta items are not serialized correctly;
|
// FIXME #623 Non-string meta items are not serialized correctly;
|
||||||
|
@ -195,7 +194,7 @@ impl<'a> ParserAttr for Parser<'a> {
|
||||||
let hi = self.span.hi;
|
let hi = self.span.hi;
|
||||||
P(spanned(lo, hi, ast::MetaNameValue(name, lit)))
|
P(spanned(lo, hi, ast::MetaNameValue(name, lit)))
|
||||||
}
|
}
|
||||||
token::LPAREN => {
|
token::LParen => {
|
||||||
let inner_items = self.parse_meta_seq();
|
let inner_items = self.parse_meta_seq();
|
||||||
let hi = self.span.hi;
|
let hi = self.span.hi;
|
||||||
P(spanned(lo, hi, ast::MetaList(name, inner_items)))
|
P(spanned(lo, hi, ast::MetaList(name, inner_items)))
|
||||||
|
@ -209,15 +208,15 @@ impl<'a> ParserAttr for Parser<'a> {
|
||||||
|
|
||||||
/// matches meta_seq = ( COMMASEP(meta_item) )
|
/// matches meta_seq = ( COMMASEP(meta_item) )
|
||||||
fn parse_meta_seq(&mut self) -> Vec<P<ast::MetaItem>> {
|
fn parse_meta_seq(&mut self) -> Vec<P<ast::MetaItem>> {
|
||||||
self.parse_seq(&token::LPAREN,
|
self.parse_seq(&token::LParen,
|
||||||
&token::RPAREN,
|
&token::RParen,
|
||||||
seq_sep_trailing_disallowed(token::COMMA),
|
seq_sep_trailing_disallowed(token::Comma),
|
||||||
|p| p.parse_meta_item()).node
|
|p| p.parse_meta_item()).node
|
||||||
}
|
}
|
||||||
|
|
||||||
fn parse_optional_meta(&mut self) -> Vec<P<ast::MetaItem>> {
|
fn parse_optional_meta(&mut self) -> Vec<P<ast::MetaItem>> {
|
||||||
match self.token {
|
match self.token {
|
||||||
token::LPAREN => self.parse_meta_seq(),
|
token::LParen => self.parse_meta_seq(),
|
||||||
_ => Vec::new()
|
_ => Vec::new()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -69,7 +69,7 @@ impl<'a> Reader for StringReader<'a> {
|
||||||
/// Return the next token. EFFECT: advances the string_reader.
|
/// Return the next token. EFFECT: advances the string_reader.
|
||||||
fn next_token(&mut self) -> TokenAndSpan {
|
fn next_token(&mut self) -> TokenAndSpan {
|
||||||
let ret_val = TokenAndSpan {
|
let ret_val = TokenAndSpan {
|
||||||
tok: replace(&mut self.peek_tok, token::UNDERSCORE),
|
tok: replace(&mut self.peek_tok, token::Underscore),
|
||||||
sp: self.peek_span,
|
sp: self.peek_span,
|
||||||
};
|
};
|
||||||
self.advance_token();
|
self.advance_token();
|
||||||
|
@ -92,7 +92,7 @@ impl<'a> Reader for StringReader<'a> {
|
||||||
|
|
||||||
impl<'a> Reader for TtReader<'a> {
|
impl<'a> Reader for TtReader<'a> {
|
||||||
fn is_eof(&self) -> bool {
|
fn is_eof(&self) -> bool {
|
||||||
self.cur_tok == token::EOF
|
self.cur_tok == token::Eof
|
||||||
}
|
}
|
||||||
fn next_token(&mut self) -> TokenAndSpan {
|
fn next_token(&mut self) -> TokenAndSpan {
|
||||||
let r = tt_next_token(self);
|
let r = tt_next_token(self);
|
||||||
|
@ -136,7 +136,7 @@ impl<'a> StringReader<'a> {
|
||||||
curr: Some('\n'),
|
curr: Some('\n'),
|
||||||
filemap: filemap,
|
filemap: filemap,
|
||||||
/* dummy values; not read */
|
/* dummy values; not read */
|
||||||
peek_tok: token::EOF,
|
peek_tok: token::Eof,
|
||||||
peek_span: codemap::DUMMY_SP,
|
peek_span: codemap::DUMMY_SP,
|
||||||
read_embedded_ident: false,
|
read_embedded_ident: false,
|
||||||
};
|
};
|
||||||
|
@ -213,7 +213,7 @@ impl<'a> StringReader<'a> {
|
||||||
},
|
},
|
||||||
None => {
|
None => {
|
||||||
if self.is_eof() {
|
if self.is_eof() {
|
||||||
self.peek_tok = token::EOF;
|
self.peek_tok = token::Eof;
|
||||||
} else {
|
} else {
|
||||||
let start_bytepos = self.last_pos;
|
let start_bytepos = self.last_pos;
|
||||||
self.peek_tok = self.next_token_inner();
|
self.peek_tok = self.next_token_inner();
|
||||||
|
@ -396,9 +396,9 @@ impl<'a> StringReader<'a> {
|
||||||
return self.with_str_from(start_bpos, |string| {
|
return self.with_str_from(start_bpos, |string| {
|
||||||
// but comments with only more "/"s are not
|
// but comments with only more "/"s are not
|
||||||
let tok = if is_doc_comment(string) {
|
let tok = if is_doc_comment(string) {
|
||||||
token::DOC_COMMENT(token::intern(string))
|
token::DocComment(token::intern(string))
|
||||||
} else {
|
} else {
|
||||||
token::COMMENT
|
token::Comment
|
||||||
};
|
};
|
||||||
|
|
||||||
return Some(TokenAndSpan{
|
return Some(TokenAndSpan{
|
||||||
|
@ -410,7 +410,7 @@ impl<'a> StringReader<'a> {
|
||||||
let start_bpos = self.last_pos - BytePos(2);
|
let start_bpos = self.last_pos - BytePos(2);
|
||||||
while !self.curr_is('\n') && !self.is_eof() { self.bump(); }
|
while !self.curr_is('\n') && !self.is_eof() { self.bump(); }
|
||||||
return Some(TokenAndSpan {
|
return Some(TokenAndSpan {
|
||||||
tok: token::COMMENT,
|
tok: token::Comment,
|
||||||
sp: codemap::mk_sp(start_bpos, self.last_pos)
|
sp: codemap::mk_sp(start_bpos, self.last_pos)
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -440,7 +440,7 @@ impl<'a> StringReader<'a> {
|
||||||
let start = self.last_pos;
|
let start = self.last_pos;
|
||||||
while !self.curr_is('\n') && !self.is_eof() { self.bump(); }
|
while !self.curr_is('\n') && !self.is_eof() { self.bump(); }
|
||||||
return Some(TokenAndSpan {
|
return Some(TokenAndSpan {
|
||||||
tok: token::SHEBANG(self.name_from(start)),
|
tok: token::Shebang(self.name_from(start)),
|
||||||
sp: codemap::mk_sp(start, self.last_pos)
|
sp: codemap::mk_sp(start, self.last_pos)
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -466,7 +466,7 @@ impl<'a> StringReader<'a> {
|
||||||
let start_bpos = self.last_pos;
|
let start_bpos = self.last_pos;
|
||||||
while is_whitespace(self.curr) { self.bump(); }
|
while is_whitespace(self.curr) { self.bump(); }
|
||||||
let c = Some(TokenAndSpan {
|
let c = Some(TokenAndSpan {
|
||||||
tok: token::WS,
|
tok: token::Whitespace,
|
||||||
sp: codemap::mk_sp(start_bpos, self.last_pos)
|
sp: codemap::mk_sp(start_bpos, self.last_pos)
|
||||||
});
|
});
|
||||||
debug!("scanning whitespace: {}", c);
|
debug!("scanning whitespace: {}", c);
|
||||||
|
@ -519,9 +519,9 @@ impl<'a> StringReader<'a> {
|
||||||
self.translate_crlf(start_bpos, string,
|
self.translate_crlf(start_bpos, string,
|
||||||
"bare CR not allowed in block doc-comment")
|
"bare CR not allowed in block doc-comment")
|
||||||
} else { string.into_maybe_owned() };
|
} else { string.into_maybe_owned() };
|
||||||
token::DOC_COMMENT(token::intern(string.as_slice()))
|
token::DocComment(token::intern(string.as_slice()))
|
||||||
} else {
|
} else {
|
||||||
token::COMMENT
|
token::Comment
|
||||||
};
|
};
|
||||||
|
|
||||||
Some(TokenAndSpan{
|
Some(TokenAndSpan{
|
||||||
|
@ -642,17 +642,17 @@ impl<'a> StringReader<'a> {
|
||||||
}
|
}
|
||||||
'u' | 'i' => {
|
'u' | 'i' => {
|
||||||
self.scan_int_suffix();
|
self.scan_int_suffix();
|
||||||
return token::LIT_INTEGER(self.name_from(start_bpos));
|
return token::LitInteger(self.name_from(start_bpos));
|
||||||
},
|
},
|
||||||
'f' => {
|
'f' => {
|
||||||
let last_pos = self.last_pos;
|
let last_pos = self.last_pos;
|
||||||
self.scan_float_suffix();
|
self.scan_float_suffix();
|
||||||
self.check_float_base(start_bpos, last_pos, base);
|
self.check_float_base(start_bpos, last_pos, base);
|
||||||
return token::LIT_FLOAT(self.name_from(start_bpos));
|
return token::LitFloat(self.name_from(start_bpos));
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
// just a 0
|
// just a 0
|
||||||
return token::LIT_INTEGER(self.name_from(start_bpos));
|
return token::LitInteger(self.name_from(start_bpos));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if c.is_digit_radix(10) {
|
} else if c.is_digit_radix(10) {
|
||||||
|
@ -665,7 +665,7 @@ impl<'a> StringReader<'a> {
|
||||||
self.err_span_(start_bpos, self.last_pos, "no valid digits found for number");
|
self.err_span_(start_bpos, self.last_pos, "no valid digits found for number");
|
||||||
// eat any suffix
|
// eat any suffix
|
||||||
self.scan_int_suffix();
|
self.scan_int_suffix();
|
||||||
return token::LIT_INTEGER(token::intern("0"));
|
return token::LitInteger(token::intern("0"));
|
||||||
}
|
}
|
||||||
|
|
||||||
// might be a float, but don't be greedy if this is actually an
|
// might be a float, but don't be greedy if this is actually an
|
||||||
|
@ -683,13 +683,13 @@ impl<'a> StringReader<'a> {
|
||||||
}
|
}
|
||||||
let last_pos = self.last_pos;
|
let last_pos = self.last_pos;
|
||||||
self.check_float_base(start_bpos, last_pos, base);
|
self.check_float_base(start_bpos, last_pos, base);
|
||||||
return token::LIT_FLOAT(self.name_from(start_bpos));
|
return token::LitFloat(self.name_from(start_bpos));
|
||||||
} else if self.curr_is('f') {
|
} else if self.curr_is('f') {
|
||||||
// or it might be an integer literal suffixed as a float
|
// or it might be an integer literal suffixed as a float
|
||||||
self.scan_float_suffix();
|
self.scan_float_suffix();
|
||||||
let last_pos = self.last_pos;
|
let last_pos = self.last_pos;
|
||||||
self.check_float_base(start_bpos, last_pos, base);
|
self.check_float_base(start_bpos, last_pos, base);
|
||||||
return token::LIT_FLOAT(self.name_from(start_bpos));
|
return token::LitFloat(self.name_from(start_bpos));
|
||||||
} else {
|
} else {
|
||||||
// it might be a float if it has an exponent
|
// it might be a float if it has an exponent
|
||||||
if self.curr_is('e') || self.curr_is('E') {
|
if self.curr_is('e') || self.curr_is('E') {
|
||||||
|
@ -697,11 +697,11 @@ impl<'a> StringReader<'a> {
|
||||||
self.scan_float_suffix();
|
self.scan_float_suffix();
|
||||||
let last_pos = self.last_pos;
|
let last_pos = self.last_pos;
|
||||||
self.check_float_base(start_bpos, last_pos, base);
|
self.check_float_base(start_bpos, last_pos, base);
|
||||||
return token::LIT_FLOAT(self.name_from(start_bpos));
|
return token::LitFloat(self.name_from(start_bpos));
|
||||||
}
|
}
|
||||||
// but we certainly have an integer!
|
// but we certainly have an integer!
|
||||||
self.scan_int_suffix();
|
self.scan_int_suffix();
|
||||||
return token::LIT_INTEGER(self.name_from(start_bpos));
|
return token::LitInteger(self.name_from(start_bpos));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -889,13 +889,13 @@ impl<'a> StringReader<'a> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn binop(&mut self, op: token::BinOp) -> token::Token {
|
fn binop(&mut self, op: token::BinOpToken) -> token::Token {
|
||||||
self.bump();
|
self.bump();
|
||||||
if self.curr_is('=') {
|
if self.curr_is('=') {
|
||||||
self.bump();
|
self.bump();
|
||||||
return token::BINOPEQ(op);
|
return token::BinOpEq(op);
|
||||||
} else {
|
} else {
|
||||||
return token::BINOP(op);
|
return token::BinOp(op);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -919,12 +919,12 @@ impl<'a> StringReader<'a> {
|
||||||
|
|
||||||
return self.with_str_from(start, |string| {
|
return self.with_str_from(start, |string| {
|
||||||
if string == "_" {
|
if string == "_" {
|
||||||
token::UNDERSCORE
|
token::Underscore
|
||||||
} else {
|
} else {
|
||||||
let is_mod_name = self.curr_is(':') && self.nextch_is(':');
|
let is_mod_name = self.curr_is(':') && self.nextch_is(':');
|
||||||
|
|
||||||
// FIXME: perform NFKC normalization here. (Issue #2253)
|
// FIXME: perform NFKC normalization here. (Issue #2253)
|
||||||
token::IDENT(str_to_ident(string), is_mod_name)
|
token::Ident(str_to_ident(string), is_mod_name)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -938,7 +938,7 @@ impl<'a> StringReader<'a> {
|
||||||
('\x00', Some('n'), Some('a')) => {
|
('\x00', Some('n'), Some('a')) => {
|
||||||
let ast_ident = self.scan_embedded_hygienic_ident();
|
let ast_ident = self.scan_embedded_hygienic_ident();
|
||||||
let is_mod_name = self.curr_is(':') && self.nextch_is(':');
|
let is_mod_name = self.curr_is(':') && self.nextch_is(':');
|
||||||
return token::IDENT(ast_ident, is_mod_name);
|
return token::Ident(ast_ident, is_mod_name);
|
||||||
}
|
}
|
||||||
_ => {}
|
_ => {}
|
||||||
}
|
}
|
||||||
|
@ -946,84 +946,84 @@ impl<'a> StringReader<'a> {
|
||||||
|
|
||||||
match c.expect("next_token_inner called at EOF") {
|
match c.expect("next_token_inner called at EOF") {
|
||||||
// One-byte tokens.
|
// One-byte tokens.
|
||||||
';' => { self.bump(); return token::SEMI; }
|
';' => { self.bump(); return token::Semi; }
|
||||||
',' => { self.bump(); return token::COMMA; }
|
',' => { self.bump(); return token::Comma; }
|
||||||
'.' => {
|
'.' => {
|
||||||
self.bump();
|
self.bump();
|
||||||
return if self.curr_is('.') {
|
return if self.curr_is('.') {
|
||||||
self.bump();
|
self.bump();
|
||||||
if self.curr_is('.') {
|
if self.curr_is('.') {
|
||||||
self.bump();
|
self.bump();
|
||||||
token::DOTDOTDOT
|
token::DotDotDot
|
||||||
} else {
|
} else {
|
||||||
token::DOTDOT
|
token::DotDot
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
token::DOT
|
token::Dot
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
'(' => { self.bump(); return token::LPAREN; }
|
'(' => { self.bump(); return token::LParen; }
|
||||||
')' => { self.bump(); return token::RPAREN; }
|
')' => { self.bump(); return token::RParen; }
|
||||||
'{' => { self.bump(); return token::LBRACE; }
|
'{' => { self.bump(); return token::LBrace; }
|
||||||
'}' => { self.bump(); return token::RBRACE; }
|
'}' => { self.bump(); return token::RBrace; }
|
||||||
'[' => { self.bump(); return token::LBRACKET; }
|
'[' => { self.bump(); return token::LBracket; }
|
||||||
']' => { self.bump(); return token::RBRACKET; }
|
']' => { self.bump(); return token::RBracket; }
|
||||||
'@' => { self.bump(); return token::AT; }
|
'@' => { self.bump(); return token::At; }
|
||||||
'#' => { self.bump(); return token::POUND; }
|
'#' => { self.bump(); return token::Pound; }
|
||||||
'~' => { self.bump(); return token::TILDE; }
|
'~' => { self.bump(); return token::Tilde; }
|
||||||
'?' => { self.bump(); return token::QUESTION; }
|
'?' => { self.bump(); return token::Question; }
|
||||||
':' => {
|
':' => {
|
||||||
self.bump();
|
self.bump();
|
||||||
if self.curr_is(':') {
|
if self.curr_is(':') {
|
||||||
self.bump();
|
self.bump();
|
||||||
return token::MOD_SEP;
|
return token::ModSep;
|
||||||
} else {
|
} else {
|
||||||
return token::COLON;
|
return token::Colon;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
'$' => { self.bump(); return token::DOLLAR; }
|
'$' => { self.bump(); return token::Dollar; }
|
||||||
|
|
||||||
// Multi-byte tokens.
|
// Multi-byte tokens.
|
||||||
'=' => {
|
'=' => {
|
||||||
self.bump();
|
self.bump();
|
||||||
if self.curr_is('=') {
|
if self.curr_is('=') {
|
||||||
self.bump();
|
self.bump();
|
||||||
return token::EQEQ;
|
return token::EqEq;
|
||||||
} else if self.curr_is('>') {
|
} else if self.curr_is('>') {
|
||||||
self.bump();
|
self.bump();
|
||||||
return token::FAT_ARROW;
|
return token::FatArrow;
|
||||||
} else {
|
} else {
|
||||||
return token::EQ;
|
return token::Eq;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
'!' => {
|
'!' => {
|
||||||
self.bump();
|
self.bump();
|
||||||
if self.curr_is('=') {
|
if self.curr_is('=') {
|
||||||
self.bump();
|
self.bump();
|
||||||
return token::NE;
|
return token::Ne;
|
||||||
} else { return token::NOT; }
|
} else { return token::Not; }
|
||||||
}
|
}
|
||||||
'<' => {
|
'<' => {
|
||||||
self.bump();
|
self.bump();
|
||||||
match self.curr.unwrap_or('\x00') {
|
match self.curr.unwrap_or('\x00') {
|
||||||
'=' => { self.bump(); return token::LE; }
|
'=' => { self.bump(); return token::Le; }
|
||||||
'<' => { return self.binop(token::SHL); }
|
'<' => { return self.binop(token::Shl); }
|
||||||
'-' => {
|
'-' => {
|
||||||
self.bump();
|
self.bump();
|
||||||
match self.curr.unwrap_or('\x00') {
|
match self.curr.unwrap_or('\x00') {
|
||||||
_ => { return token::LARROW; }
|
_ => { return token::LArrow; }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => { return token::LT; }
|
_ => { return token::Lt; }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
'>' => {
|
'>' => {
|
||||||
self.bump();
|
self.bump();
|
||||||
match self.curr.unwrap_or('\x00') {
|
match self.curr.unwrap_or('\x00') {
|
||||||
'=' => { self.bump(); return token::GE; }
|
'=' => { self.bump(); return token::Ge; }
|
||||||
'>' => { return self.binop(token::SHR); }
|
'>' => { return self.binop(token::Shr); }
|
||||||
_ => { return token::GT; }
|
_ => { return token::Gt; }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
'\'' => {
|
'\'' => {
|
||||||
|
@ -1056,7 +1056,7 @@ impl<'a> StringReader<'a> {
|
||||||
str_to_ident(lifetime_name)
|
str_to_ident(lifetime_name)
|
||||||
});
|
});
|
||||||
let keyword_checking_token =
|
let keyword_checking_token =
|
||||||
&token::IDENT(keyword_checking_ident, false);
|
&token::Ident(keyword_checking_ident, false);
|
||||||
let last_bpos = self.last_pos;
|
let last_bpos = self.last_pos;
|
||||||
if token::is_keyword(token::keywords::Self,
|
if token::is_keyword(token::keywords::Self,
|
||||||
keyword_checking_token) {
|
keyword_checking_token) {
|
||||||
|
@ -1071,7 +1071,7 @@ impl<'a> StringReader<'a> {
|
||||||
last_bpos,
|
last_bpos,
|
||||||
"invalid lifetime name");
|
"invalid lifetime name");
|
||||||
}
|
}
|
||||||
return token::LIFETIME(ident);
|
return token::Lifetime(ident);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Otherwise it is a character constant:
|
// Otherwise it is a character constant:
|
||||||
|
@ -1087,7 +1087,7 @@ impl<'a> StringReader<'a> {
|
||||||
}
|
}
|
||||||
let id = if valid { self.name_from(start) } else { token::intern("0") };
|
let id = if valid { self.name_from(start) } else { token::intern("0") };
|
||||||
self.bump(); // advance curr past token
|
self.bump(); // advance curr past token
|
||||||
return token::LIT_CHAR(id);
|
return token::LitChar(id);
|
||||||
}
|
}
|
||||||
'b' => {
|
'b' => {
|
||||||
self.bump();
|
self.bump();
|
||||||
|
@ -1095,7 +1095,7 @@ impl<'a> StringReader<'a> {
|
||||||
Some('\'') => self.scan_byte(),
|
Some('\'') => self.scan_byte(),
|
||||||
Some('"') => self.scan_byte_string(),
|
Some('"') => self.scan_byte_string(),
|
||||||
Some('r') => self.scan_raw_byte_string(),
|
Some('r') => self.scan_raw_byte_string(),
|
||||||
_ => unreachable!() // Should have been a token::IDENT above.
|
_ => unreachable!() // Should have been a token::Ident above.
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1118,7 +1118,7 @@ impl<'a> StringReader<'a> {
|
||||||
let id = if valid { self.name_from(start_bpos + BytePos(1)) }
|
let id = if valid { self.name_from(start_bpos + BytePos(1)) }
|
||||||
else { token::intern("??") };
|
else { token::intern("??") };
|
||||||
self.bump();
|
self.bump();
|
||||||
return token::LIT_STR(id);
|
return token::LitStr(id);
|
||||||
}
|
}
|
||||||
'r' => {
|
'r' => {
|
||||||
let start_bpos = self.last_pos;
|
let start_bpos = self.last_pos;
|
||||||
|
@ -1185,33 +1185,33 @@ impl<'a> StringReader<'a> {
|
||||||
} else {
|
} else {
|
||||||
token::intern("??")
|
token::intern("??")
|
||||||
};
|
};
|
||||||
return token::LIT_STR_RAW(id, hash_count);
|
return token::LitStrRaw(id, hash_count);
|
||||||
}
|
}
|
||||||
'-' => {
|
'-' => {
|
||||||
if self.nextch_is('>') {
|
if self.nextch_is('>') {
|
||||||
self.bump();
|
self.bump();
|
||||||
self.bump();
|
self.bump();
|
||||||
return token::RARROW;
|
return token::RArrow;
|
||||||
} else { return self.binop(token::MINUS); }
|
} else { return self.binop(token::Minus); }
|
||||||
}
|
}
|
||||||
'&' => {
|
'&' => {
|
||||||
if self.nextch_is('&') {
|
if self.nextch_is('&') {
|
||||||
self.bump();
|
self.bump();
|
||||||
self.bump();
|
self.bump();
|
||||||
return token::ANDAND;
|
return token::AndAnd;
|
||||||
} else { return self.binop(token::AND); }
|
} else { return self.binop(token::And); }
|
||||||
}
|
}
|
||||||
'|' => {
|
'|' => {
|
||||||
match self.nextch() {
|
match self.nextch() {
|
||||||
Some('|') => { self.bump(); self.bump(); return token::OROR; }
|
Some('|') => { self.bump(); self.bump(); return token::OrOr; }
|
||||||
_ => { return self.binop(token::OR); }
|
_ => { return self.binop(token::Or); }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
'+' => { return self.binop(token::PLUS); }
|
'+' => { return self.binop(token::Plus); }
|
||||||
'*' => { return self.binop(token::STAR); }
|
'*' => { return self.binop(token::Star); }
|
||||||
'/' => { return self.binop(token::SLASH); }
|
'/' => { return self.binop(token::Slash); }
|
||||||
'^' => { return self.binop(token::CARET); }
|
'^' => { return self.binop(token::Caret); }
|
||||||
'%' => { return self.binop(token::PERCENT); }
|
'%' => { return self.binop(token::Percent); }
|
||||||
c => {
|
c => {
|
||||||
let last_bpos = self.last_pos;
|
let last_bpos = self.last_pos;
|
||||||
let bpos = self.pos;
|
let bpos = self.pos;
|
||||||
|
@ -1275,7 +1275,7 @@ impl<'a> StringReader<'a> {
|
||||||
|
|
||||||
let id = if valid { self.name_from(start) } else { token::intern("??") };
|
let id = if valid { self.name_from(start) } else { token::intern("??") };
|
||||||
self.bump(); // advance curr past token
|
self.bump(); // advance curr past token
|
||||||
return token::LIT_BYTE(id);
|
return token::LitByte(id);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn scan_byte_string(&mut self) -> token::Token {
|
fn scan_byte_string(&mut self) -> token::Token {
|
||||||
|
@ -1297,7 +1297,7 @@ impl<'a> StringReader<'a> {
|
||||||
}
|
}
|
||||||
let id = if valid { self.name_from(start) } else { token::intern("??") };
|
let id = if valid { self.name_from(start) } else { token::intern("??") };
|
||||||
self.bump();
|
self.bump();
|
||||||
return token::LIT_BINARY(id);
|
return token::LitBinary(id);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn scan_raw_byte_string(&mut self) -> token::Token {
|
fn scan_raw_byte_string(&mut self) -> token::Token {
|
||||||
|
@ -1348,7 +1348,7 @@ impl<'a> StringReader<'a> {
|
||||||
self.bump();
|
self.bump();
|
||||||
}
|
}
|
||||||
self.bump();
|
self.bump();
|
||||||
return token::LIT_BINARY_RAW(self.name_from_to(content_start_bpos, content_end_bpos),
|
return token::LitBinaryRaw(self.name_from_to(content_start_bpos, content_end_bpos),
|
||||||
hash_count);
|
hash_count);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1431,20 +1431,20 @@ mod test {
|
||||||
"/* my source file */ \
|
"/* my source file */ \
|
||||||
fn main() { println!(\"zebra\"); }\n".to_string());
|
fn main() { println!(\"zebra\"); }\n".to_string());
|
||||||
let id = str_to_ident("fn");
|
let id = str_to_ident("fn");
|
||||||
assert_eq!(string_reader.next_token().tok, token::COMMENT);
|
assert_eq!(string_reader.next_token().tok, token::Comment);
|
||||||
assert_eq!(string_reader.next_token().tok, token::WS);
|
assert_eq!(string_reader.next_token().tok, token::Whitespace);
|
||||||
let tok1 = string_reader.next_token();
|
let tok1 = string_reader.next_token();
|
||||||
let tok2 = TokenAndSpan{
|
let tok2 = TokenAndSpan{
|
||||||
tok:token::IDENT(id, false),
|
tok:token::Ident(id, false),
|
||||||
sp:Span {lo:BytePos(21),hi:BytePos(23),expn_id: NO_EXPANSION}};
|
sp:Span {lo:BytePos(21),hi:BytePos(23),expn_id: NO_EXPANSION}};
|
||||||
assert_eq!(tok1,tok2);
|
assert_eq!(tok1,tok2);
|
||||||
assert_eq!(string_reader.next_token().tok, token::WS);
|
assert_eq!(string_reader.next_token().tok, token::Whitespace);
|
||||||
// the 'main' id is already read:
|
// the 'main' id is already read:
|
||||||
assert_eq!(string_reader.last_pos.clone(), BytePos(28));
|
assert_eq!(string_reader.last_pos.clone(), BytePos(28));
|
||||||
// read another token:
|
// read another token:
|
||||||
let tok3 = string_reader.next_token();
|
let tok3 = string_reader.next_token();
|
||||||
let tok4 = TokenAndSpan{
|
let tok4 = TokenAndSpan{
|
||||||
tok:token::IDENT(str_to_ident("main"), false),
|
tok:token::Ident(str_to_ident("main"), false),
|
||||||
sp:Span {lo:BytePos(24),hi:BytePos(28),expn_id: NO_EXPANSION}};
|
sp:Span {lo:BytePos(24),hi:BytePos(28),expn_id: NO_EXPANSION}};
|
||||||
assert_eq!(tok3,tok4);
|
assert_eq!(tok3,tok4);
|
||||||
// the lparen is already read:
|
// the lparen is already read:
|
||||||
|
@ -1461,64 +1461,64 @@ mod test {
|
||||||
|
|
||||||
// make the identifier by looking up the string in the interner
|
// make the identifier by looking up the string in the interner
|
||||||
fn mk_ident (id: &str, is_mod_name: bool) -> token::Token {
|
fn mk_ident (id: &str, is_mod_name: bool) -> token::Token {
|
||||||
token::IDENT (str_to_ident(id),is_mod_name)
|
token::Ident (str_to_ident(id),is_mod_name)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test] fn doublecolonparsing () {
|
#[test] fn doublecolonparsing () {
|
||||||
check_tokenization(setup(&mk_sh(), "a b".to_string()),
|
check_tokenization(setup(&mk_sh(), "a b".to_string()),
|
||||||
vec!(mk_ident("a",false),
|
vec!(mk_ident("a",false),
|
||||||
token::WS,
|
token::Whitespace,
|
||||||
mk_ident("b",false)));
|
mk_ident("b",false)));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test] fn dcparsing_2 () {
|
#[test] fn dcparsing_2 () {
|
||||||
check_tokenization(setup(&mk_sh(), "a::b".to_string()),
|
check_tokenization(setup(&mk_sh(), "a::b".to_string()),
|
||||||
vec!(mk_ident("a",true),
|
vec!(mk_ident("a",true),
|
||||||
token::MOD_SEP,
|
token::ModSep,
|
||||||
mk_ident("b",false)));
|
mk_ident("b",false)));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test] fn dcparsing_3 () {
|
#[test] fn dcparsing_3 () {
|
||||||
check_tokenization(setup(&mk_sh(), "a ::b".to_string()),
|
check_tokenization(setup(&mk_sh(), "a ::b".to_string()),
|
||||||
vec!(mk_ident("a",false),
|
vec!(mk_ident("a",false),
|
||||||
token::WS,
|
token::Whitespace,
|
||||||
token::MOD_SEP,
|
token::ModSep,
|
||||||
mk_ident("b",false)));
|
mk_ident("b",false)));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test] fn dcparsing_4 () {
|
#[test] fn dcparsing_4 () {
|
||||||
check_tokenization(setup(&mk_sh(), "a:: b".to_string()),
|
check_tokenization(setup(&mk_sh(), "a:: b".to_string()),
|
||||||
vec!(mk_ident("a",true),
|
vec!(mk_ident("a",true),
|
||||||
token::MOD_SEP,
|
token::ModSep,
|
||||||
token::WS,
|
token::Whitespace,
|
||||||
mk_ident("b",false)));
|
mk_ident("b",false)));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test] fn character_a() {
|
#[test] fn character_a() {
|
||||||
assert_eq!(setup(&mk_sh(), "'a'".to_string()).next_token().tok,
|
assert_eq!(setup(&mk_sh(), "'a'".to_string()).next_token().tok,
|
||||||
token::LIT_CHAR(token::intern("a")));
|
token::LitChar(token::intern("a")));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test] fn character_space() {
|
#[test] fn character_space() {
|
||||||
assert_eq!(setup(&mk_sh(), "' '".to_string()).next_token().tok,
|
assert_eq!(setup(&mk_sh(), "' '".to_string()).next_token().tok,
|
||||||
token::LIT_CHAR(token::intern(" ")));
|
token::LitChar(token::intern(" ")));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test] fn character_escaped() {
|
#[test] fn character_escaped() {
|
||||||
assert_eq!(setup(&mk_sh(), "'\\n'".to_string()).next_token().tok,
|
assert_eq!(setup(&mk_sh(), "'\\n'".to_string()).next_token().tok,
|
||||||
token::LIT_CHAR(token::intern("\\n")));
|
token::LitChar(token::intern("\\n")));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test] fn lifetime_name() {
|
#[test] fn lifetime_name() {
|
||||||
assert_eq!(setup(&mk_sh(), "'abc".to_string()).next_token().tok,
|
assert_eq!(setup(&mk_sh(), "'abc".to_string()).next_token().tok,
|
||||||
token::LIFETIME(token::str_to_ident("'abc")));
|
token::Lifetime(token::str_to_ident("'abc")));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test] fn raw_string() {
|
#[test] fn raw_string() {
|
||||||
assert_eq!(setup(&mk_sh(),
|
assert_eq!(setup(&mk_sh(),
|
||||||
"r###\"\"#a\\b\x00c\"\"###".to_string()).next_token()
|
"r###\"\"#a\\b\x00c\"\"###".to_string()).next_token()
|
||||||
.tok,
|
.tok,
|
||||||
token::LIT_STR_RAW(token::intern("\"#a\\b\x00c\""), 3));
|
token::LitStrRaw(token::intern("\"#a\\b\x00c\""), 3));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test] fn line_doc_comments() {
|
#[test] fn line_doc_comments() {
|
||||||
|
@ -1531,10 +1531,10 @@ mod test {
|
||||||
let sh = mk_sh();
|
let sh = mk_sh();
|
||||||
let mut lexer = setup(&sh, "/* /* */ */'a'".to_string());
|
let mut lexer = setup(&sh, "/* /* */ */'a'".to_string());
|
||||||
match lexer.next_token().tok {
|
match lexer.next_token().tok {
|
||||||
token::COMMENT => { },
|
token::Comment => { },
|
||||||
_ => fail!("expected a comment!")
|
_ => fail!("expected a comment!")
|
||||||
}
|
}
|
||||||
assert_eq!(lexer.next_token().tok, token::LIT_CHAR(token::intern("a")));
|
assert_eq!(lexer.next_token().tok, token::LitChar(token::intern("a")));
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -793,34 +793,34 @@ mod test {
|
||||||
let tts = string_to_tts("macro_rules! zip (($a)=>($a))".to_string());
|
let tts = string_to_tts("macro_rules! zip (($a)=>($a))".to_string());
|
||||||
let tts: &[ast::TokenTree] = tts.as_slice();
|
let tts: &[ast::TokenTree] = tts.as_slice();
|
||||||
match tts {
|
match tts {
|
||||||
[ast::TtToken(_, token::IDENT(name_macro_rules, false)),
|
[ast::TtToken(_, token::Ident(name_macro_rules, false)),
|
||||||
ast::TtToken(_, token::NOT),
|
ast::TtToken(_, token::Not),
|
||||||
ast::TtToken(_, token::IDENT(name_zip, false)),
|
ast::TtToken(_, token::Ident(name_zip, false)),
|
||||||
ast::TtDelimited(_, ref macro_delimed)]
|
ast::TtDelimited(_, ref macro_delimed)]
|
||||||
if name_macro_rules.as_str() == "macro_rules"
|
if name_macro_rules.as_str() == "macro_rules"
|
||||||
&& name_zip.as_str() == "zip" => {
|
&& name_zip.as_str() == "zip" => {
|
||||||
let (ref macro_open, ref macro_tts, ref macro_close) = **macro_delimed;
|
let (ref macro_open, ref macro_tts, ref macro_close) = **macro_delimed;
|
||||||
match (macro_open, macro_tts.as_slice(), macro_close) {
|
match (macro_open, macro_tts.as_slice(), macro_close) {
|
||||||
(&ast::Delimiter { token: token::LPAREN, .. },
|
(&ast::Delimiter { token: token::LParen, .. },
|
||||||
[ast::TtDelimited(_, ref first_delimed),
|
[ast::TtDelimited(_, ref first_delimed),
|
||||||
ast::TtToken(_, token::FAT_ARROW),
|
ast::TtToken(_, token::FatArrow),
|
||||||
ast::TtDelimited(_, ref second_delimed)],
|
ast::TtDelimited(_, ref second_delimed)],
|
||||||
&ast::Delimiter { token: token::RPAREN, .. }) => {
|
&ast::Delimiter { token: token::RParen, .. }) => {
|
||||||
let (ref first_open, ref first_tts, ref first_close) = **first_delimed;
|
let (ref first_open, ref first_tts, ref first_close) = **first_delimed;
|
||||||
match (first_open, first_tts.as_slice(), first_close) {
|
match (first_open, first_tts.as_slice(), first_close) {
|
||||||
(&ast::Delimiter { token: token::LPAREN, .. },
|
(&ast::Delimiter { token: token::LParen, .. },
|
||||||
[ast::TtToken(_, token::DOLLAR),
|
[ast::TtToken(_, token::Dollar),
|
||||||
ast::TtToken(_, token::IDENT(name, false))],
|
ast::TtToken(_, token::Ident(name, false))],
|
||||||
&ast::Delimiter { token: token::RPAREN, .. })
|
&ast::Delimiter { token: token::RParen, .. })
|
||||||
if name.as_str() == "a" => {},
|
if name.as_str() == "a" => {},
|
||||||
_ => fail!("value 3: {}", **first_delimed),
|
_ => fail!("value 3: {}", **first_delimed),
|
||||||
}
|
}
|
||||||
let (ref second_open, ref second_tts, ref second_close) = **second_delimed;
|
let (ref second_open, ref second_tts, ref second_close) = **second_delimed;
|
||||||
match (second_open, second_tts.as_slice(), second_close) {
|
match (second_open, second_tts.as_slice(), second_close) {
|
||||||
(&ast::Delimiter { token: token::LPAREN, .. },
|
(&ast::Delimiter { token: token::LParen, .. },
|
||||||
[ast::TtToken(_, token::DOLLAR),
|
[ast::TtToken(_, token::Dollar),
|
||||||
ast::TtToken(_, token::IDENT(name, false))],
|
ast::TtToken(_, token::Ident(name, false))],
|
||||||
&ast::Delimiter { token: token::RPAREN, .. })
|
&ast::Delimiter { token: token::RParen, .. })
|
||||||
if name.as_str() == "a" => {},
|
if name.as_str() == "a" => {},
|
||||||
_ => fail!("value 4: {}", **second_delimed),
|
_ => fail!("value 4: {}", **second_delimed),
|
||||||
}
|
}
|
||||||
|
@ -842,7 +842,7 @@ mod test {
|
||||||
\"fields\":[\
|
\"fields\":[\
|
||||||
null,\
|
null,\
|
||||||
{\
|
{\
|
||||||
\"variant\":\"IDENT\",\
|
\"variant\":\"Ident\",\
|
||||||
\"fields\":[\
|
\"fields\":[\
|
||||||
\"fn\",\
|
\"fn\",\
|
||||||
false\
|
false\
|
||||||
|
@ -855,7 +855,7 @@ mod test {
|
||||||
\"fields\":[\
|
\"fields\":[\
|
||||||
null,\
|
null,\
|
||||||
{\
|
{\
|
||||||
\"variant\":\"IDENT\",\
|
\"variant\":\"Ident\",\
|
||||||
\"fields\":[\
|
\"fields\":[\
|
||||||
\"a\",\
|
\"a\",\
|
||||||
false\
|
false\
|
||||||
|
@ -870,7 +870,7 @@ mod test {
|
||||||
[\
|
[\
|
||||||
{\
|
{\
|
||||||
\"span\":null,\
|
\"span\":null,\
|
||||||
\"token\":\"LPAREN\"\
|
\"token\":\"LParen\"\
|
||||||
},\
|
},\
|
||||||
[\
|
[\
|
||||||
{\
|
{\
|
||||||
|
@ -878,7 +878,7 @@ mod test {
|
||||||
\"fields\":[\
|
\"fields\":[\
|
||||||
null,\
|
null,\
|
||||||
{\
|
{\
|
||||||
\"variant\":\"IDENT\",\
|
\"variant\":\"Ident\",\
|
||||||
\"fields\":[\
|
\"fields\":[\
|
||||||
\"b\",\
|
\"b\",\
|
||||||
false\
|
false\
|
||||||
|
@ -890,7 +890,7 @@ mod test {
|
||||||
\"variant\":\"TtToken\",\
|
\"variant\":\"TtToken\",\
|
||||||
\"fields\":[\
|
\"fields\":[\
|
||||||
null,\
|
null,\
|
||||||
\"COLON\"\
|
\"Colon\"\
|
||||||
]\
|
]\
|
||||||
},\
|
},\
|
||||||
{\
|
{\
|
||||||
|
@ -898,7 +898,7 @@ mod test {
|
||||||
\"fields\":[\
|
\"fields\":[\
|
||||||
null,\
|
null,\
|
||||||
{\
|
{\
|
||||||
\"variant\":\"IDENT\",\
|
\"variant\":\"Ident\",\
|
||||||
\"fields\":[\
|
\"fields\":[\
|
||||||
\"int\",\
|
\"int\",\
|
||||||
false\
|
false\
|
||||||
|
@ -909,7 +909,7 @@ mod test {
|
||||||
],\
|
],\
|
||||||
{\
|
{\
|
||||||
\"span\":null,\
|
\"span\":null,\
|
||||||
\"token\":\"RPAREN\"\
|
\"token\":\"RParen\"\
|
||||||
}\
|
}\
|
||||||
]\
|
]\
|
||||||
]\
|
]\
|
||||||
|
@ -921,7 +921,7 @@ mod test {
|
||||||
[\
|
[\
|
||||||
{\
|
{\
|
||||||
\"span\":null,\
|
\"span\":null,\
|
||||||
\"token\":\"LBRACE\"\
|
\"token\":\"LBrace\"\
|
||||||
},\
|
},\
|
||||||
[\
|
[\
|
||||||
{\
|
{\
|
||||||
|
@ -929,7 +929,7 @@ mod test {
|
||||||
\"fields\":[\
|
\"fields\":[\
|
||||||
null,\
|
null,\
|
||||||
{\
|
{\
|
||||||
\"variant\":\"IDENT\",\
|
\"variant\":\"Ident\",\
|
||||||
\"fields\":[\
|
\"fields\":[\
|
||||||
\"b\",\
|
\"b\",\
|
||||||
false\
|
false\
|
||||||
|
@ -941,13 +941,13 @@ mod test {
|
||||||
\"variant\":\"TtToken\",\
|
\"variant\":\"TtToken\",\
|
||||||
\"fields\":[\
|
\"fields\":[\
|
||||||
null,\
|
null,\
|
||||||
\"SEMI\"\
|
\"Semi\"\
|
||||||
]\
|
]\
|
||||||
}\
|
}\
|
||||||
],\
|
],\
|
||||||
{\
|
{\
|
||||||
\"span\":null,\
|
\"span\":null,\
|
||||||
\"token\":\"RBRACE\"\
|
\"token\":\"RBrace\"\
|
||||||
}\
|
}\
|
||||||
]\
|
]\
|
||||||
]\
|
]\
|
||||||
|
@ -1002,7 +1002,7 @@ mod test {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn parser_done(p: Parser){
|
fn parser_done(p: Parser){
|
||||||
assert_eq!(p.token.clone(), token::EOF);
|
assert_eq!(p.token.clone(), token::Eof);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test] fn parse_ident_pat () {
|
#[test] fn parse_ident_pat () {
|
||||||
|
|
|
@ -118,7 +118,7 @@ impl<'a> ParserObsoleteMethods for parser::Parser<'a> {
|
||||||
|
|
||||||
fn is_obsolete_ident(&mut self, ident: &str) -> bool {
|
fn is_obsolete_ident(&mut self, ident: &str) -> bool {
|
||||||
match self.token {
|
match self.token {
|
||||||
token::IDENT(sid, _) => {
|
token::Ident(sid, _) => {
|
||||||
token::get_ident(sid).equiv(&ident)
|
token::get_ident(sid).equiv(&ident)
|
||||||
}
|
}
|
||||||
_ => false
|
_ => false
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -9,9 +9,7 @@
|
||||||
// except according to those terms.
|
// except according to those terms.
|
||||||
|
|
||||||
use ast;
|
use ast;
|
||||||
use ast::{Ident, Name, Mrk};
|
|
||||||
use ext::mtwt;
|
use ext::mtwt;
|
||||||
use parse::token;
|
|
||||||
use ptr::P;
|
use ptr::P;
|
||||||
use util::interner::{RcStr, StrInterner};
|
use util::interner::{RcStr, StrInterner};
|
||||||
use util::interner;
|
use util::interner;
|
||||||
|
@ -22,94 +20,157 @@ use std::mem;
|
||||||
use std::path::BytesContainer;
|
use std::path::BytesContainer;
|
||||||
use std::rc::Rc;
|
use std::rc::Rc;
|
||||||
|
|
||||||
|
// NOTE(stage0): remove these re-exports after the next snapshot
|
||||||
|
// (needed to allow quotations to pass stage0)
|
||||||
|
#[cfg(stage0)] pub use self::Plus as PLUS;
|
||||||
|
#[cfg(stage0)] pub use self::Minus as MINUS;
|
||||||
|
#[cfg(stage0)] pub use self::Star as STAR;
|
||||||
|
#[cfg(stage0)] pub use self::Slash as SLASH;
|
||||||
|
#[cfg(stage0)] pub use self::Percent as PERCENT;
|
||||||
|
#[cfg(stage0)] pub use self::Caret as CARET;
|
||||||
|
#[cfg(stage0)] pub use self::And as AND;
|
||||||
|
#[cfg(stage0)] pub use self::Or as OR;
|
||||||
|
#[cfg(stage0)] pub use self::Shl as SHL;
|
||||||
|
#[cfg(stage0)] pub use self::Shr as SHR;
|
||||||
|
#[cfg(stage0)] pub use self::Eq as EQ;
|
||||||
|
#[cfg(stage0)] pub use self::Lt as LT;
|
||||||
|
#[cfg(stage0)] pub use self::Le as LE;
|
||||||
|
#[cfg(stage0)] pub use self::EqEq as EQEQ;
|
||||||
|
#[cfg(stage0)] pub use self::Ne as NE;
|
||||||
|
#[cfg(stage0)] pub use self::Ge as GE;
|
||||||
|
#[cfg(stage0)] pub use self::Gt as GT;
|
||||||
|
#[cfg(stage0)] pub use self::AndAnd as ANDAND;
|
||||||
|
#[cfg(stage0)] pub use self::OrOr as OROR;
|
||||||
|
#[cfg(stage0)] pub use self::Not as NOT;
|
||||||
|
#[cfg(stage0)] pub use self::Tilde as TILDE;
|
||||||
|
#[cfg(stage0)] pub use self::BinOp as BINOP;
|
||||||
|
#[cfg(stage0)] pub use self::BinOpEq as BINOPEQ;
|
||||||
|
#[cfg(stage0)] pub use self::At as AT;
|
||||||
|
#[cfg(stage0)] pub use self::Dot as DOT;
|
||||||
|
#[cfg(stage0)] pub use self::DotDot as DOTDOT;
|
||||||
|
#[cfg(stage0)] pub use self::DotDotDot as DOTDOTDOT;
|
||||||
|
#[cfg(stage0)] pub use self::Comma as COMMA;
|
||||||
|
#[cfg(stage0)] pub use self::Semi as SEMI;
|
||||||
|
#[cfg(stage0)] pub use self::Colon as COLON;
|
||||||
|
#[cfg(stage0)] pub use self::ModSep as MOD_SEP;
|
||||||
|
#[cfg(stage0)] pub use self::RArrow as RARROW;
|
||||||
|
#[cfg(stage0)] pub use self::LArrow as LARROW;
|
||||||
|
#[cfg(stage0)] pub use self::FatArrow as FAT_ARROW;
|
||||||
|
#[cfg(stage0)] pub use self::LParen as LPAREN;
|
||||||
|
#[cfg(stage0)] pub use self::RParen as RPAREN;
|
||||||
|
#[cfg(stage0)] pub use self::LBracket as LBRACKET;
|
||||||
|
#[cfg(stage0)] pub use self::RBracket as RBRACKET;
|
||||||
|
#[cfg(stage0)] pub use self::LBrace as LBRACE;
|
||||||
|
#[cfg(stage0)] pub use self::RBrace as RBRACE;
|
||||||
|
#[cfg(stage0)] pub use self::Pound as POUND;
|
||||||
|
#[cfg(stage0)] pub use self::Dollar as DOLLAR;
|
||||||
|
#[cfg(stage0)] pub use self::Question as QUESTION;
|
||||||
|
#[cfg(stage0)] pub use self::LitByte as LIT_BYTE;
|
||||||
|
#[cfg(stage0)] pub use self::LitChar as LIT_CHAR;
|
||||||
|
#[cfg(stage0)] pub use self::LitInteger as LIT_INTEGER;
|
||||||
|
#[cfg(stage0)] pub use self::LitFloat as LIT_FLOAT;
|
||||||
|
#[cfg(stage0)] pub use self::LitStr as LIT_STR;
|
||||||
|
#[cfg(stage0)] pub use self::LitStrRaw as LIT_STR_RAW;
|
||||||
|
#[cfg(stage0)] pub use self::LitBinary as LIT_BINARY;
|
||||||
|
#[cfg(stage0)] pub use self::LitBinaryRaw as LIT_BINARY_RAW;
|
||||||
|
#[cfg(stage0)] pub use self::Ident as IDENT;
|
||||||
|
#[cfg(stage0)] pub use self::Underscore as UNDERSCORE;
|
||||||
|
#[cfg(stage0)] pub use self::Lifetime as LIFETIME;
|
||||||
|
#[cfg(stage0)] pub use self::Interpolated as INTERPOLATED;
|
||||||
|
#[cfg(stage0)] pub use self::DocComment as DOC_COMMENT;
|
||||||
|
#[cfg(stage0)] pub use self::Whitespace as WS;
|
||||||
|
#[cfg(stage0)] pub use self::Comment as COMMENT;
|
||||||
|
#[cfg(stage0)] pub use self::Shebang as SHEBANG;
|
||||||
|
#[cfg(stage0)] pub use self::Eof as EOF;
|
||||||
|
|
||||||
#[allow(non_camel_case_types)]
|
#[allow(non_camel_case_types)]
|
||||||
#[deriving(Clone, Encodable, Decodable, PartialEq, Eq, Hash, Show)]
|
#[deriving(Clone, Encodable, Decodable, PartialEq, Eq, Hash, Show)]
|
||||||
pub enum BinOp {
|
pub enum BinOpToken {
|
||||||
PLUS,
|
Plus,
|
||||||
MINUS,
|
Minus,
|
||||||
STAR,
|
Star,
|
||||||
SLASH,
|
Slash,
|
||||||
PERCENT,
|
Percent,
|
||||||
CARET,
|
Caret,
|
||||||
AND,
|
And,
|
||||||
OR,
|
Or,
|
||||||
SHL,
|
Shl,
|
||||||
SHR,
|
Shr,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(non_camel_case_types)]
|
#[allow(non_camel_case_types)]
|
||||||
#[deriving(Clone, Encodable, Decodable, PartialEq, Eq, Hash, Show)]
|
#[deriving(Clone, Encodable, Decodable, PartialEq, Eq, Hash, Show)]
|
||||||
pub enum Token {
|
pub enum Token {
|
||||||
/* Expression-operator symbols. */
|
/* Expression-operator symbols. */
|
||||||
EQ,
|
Eq,
|
||||||
LT,
|
Lt,
|
||||||
LE,
|
Le,
|
||||||
EQEQ,
|
EqEq,
|
||||||
NE,
|
Ne,
|
||||||
GE,
|
Ge,
|
||||||
GT,
|
Gt,
|
||||||
ANDAND,
|
AndAnd,
|
||||||
OROR,
|
OrOr,
|
||||||
NOT,
|
Not,
|
||||||
TILDE,
|
Tilde,
|
||||||
BINOP(BinOp),
|
BinOp(BinOpToken),
|
||||||
BINOPEQ(BinOp),
|
BinOpEq(BinOpToken),
|
||||||
|
|
||||||
/* Structural symbols */
|
/* Structural symbols */
|
||||||
AT,
|
At,
|
||||||
DOT,
|
Dot,
|
||||||
DOTDOT,
|
DotDot,
|
||||||
DOTDOTDOT,
|
DotDotDot,
|
||||||
COMMA,
|
Comma,
|
||||||
SEMI,
|
Semi,
|
||||||
COLON,
|
Colon,
|
||||||
MOD_SEP,
|
ModSep,
|
||||||
RARROW,
|
RArrow,
|
||||||
LARROW,
|
LArrow,
|
||||||
FAT_ARROW,
|
FatArrow,
|
||||||
LPAREN,
|
LParen,
|
||||||
RPAREN,
|
RParen,
|
||||||
LBRACKET,
|
LBracket,
|
||||||
RBRACKET,
|
RBracket,
|
||||||
LBRACE,
|
LBrace,
|
||||||
RBRACE,
|
RBrace,
|
||||||
POUND,
|
Pound,
|
||||||
DOLLAR,
|
Dollar,
|
||||||
QUESTION,
|
Question,
|
||||||
|
|
||||||
/* Literals */
|
/* Literals */
|
||||||
LIT_BYTE(Name),
|
LitByte(ast::Name),
|
||||||
LIT_CHAR(Name),
|
LitChar(ast::Name),
|
||||||
LIT_INTEGER(Name),
|
LitInteger(ast::Name),
|
||||||
LIT_FLOAT(Name),
|
LitFloat(ast::Name),
|
||||||
LIT_STR(Name),
|
LitStr(ast::Name),
|
||||||
LIT_STR_RAW(Name, uint), /* raw str delimited by n hash symbols */
|
LitStrRaw(ast::Name, uint), /* raw str delimited by n hash symbols */
|
||||||
LIT_BINARY(Name),
|
LitBinary(ast::Name),
|
||||||
LIT_BINARY_RAW(Name, uint), /* raw binary str delimited by n hash symbols */
|
LitBinaryRaw(ast::Name, uint), /* raw binary str delimited by n hash symbols */
|
||||||
|
|
||||||
/* Name components */
|
/* Name components */
|
||||||
/// An identifier contains an "is_mod_name" boolean,
|
/// An identifier contains an "is_mod_name" boolean,
|
||||||
/// indicating whether :: follows this token with no
|
/// indicating whether :: follows this token with no
|
||||||
/// whitespace in between.
|
/// whitespace in between.
|
||||||
IDENT(Ident, bool),
|
Ident(ast::Ident, bool),
|
||||||
UNDERSCORE,
|
Underscore,
|
||||||
LIFETIME(Ident),
|
Lifetime(ast::Ident),
|
||||||
|
|
||||||
/* For interpolation */
|
/* For interpolation */
|
||||||
INTERPOLATED(Nonterminal),
|
Interpolated(Nonterminal),
|
||||||
DOC_COMMENT(Name),
|
DocComment(ast::Name),
|
||||||
|
|
||||||
// Junk. These carry no data because we don't really care about the data
|
// Junk. These carry no data because we don't really care about the data
|
||||||
// they *would* carry, and don't really want to allocate a new ident for
|
// they *would* carry, and don't really want to allocate a new ident for
|
||||||
// them. Instead, users could extract that from the associated span.
|
// them. Instead, users could extract that from the associated span.
|
||||||
|
|
||||||
/// Whitespace
|
/// Whitespace
|
||||||
WS,
|
Whitespace,
|
||||||
/// Comment
|
/// Comment
|
||||||
COMMENT,
|
Comment,
|
||||||
SHEBANG(Name),
|
Shebang(ast::Name),
|
||||||
|
|
||||||
EOF,
|
Eof,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[deriving(Clone, Encodable, Decodable, PartialEq, Eq, Hash)]
|
#[deriving(Clone, Encodable, Decodable, PartialEq, Eq, Hash)]
|
||||||
|
@ -122,7 +183,7 @@ pub enum Nonterminal {
|
||||||
NtExpr( P<ast::Expr>),
|
NtExpr( P<ast::Expr>),
|
||||||
NtTy( P<ast::Ty>),
|
NtTy( P<ast::Ty>),
|
||||||
/// See IDENT, above, for meaning of bool in NtIdent:
|
/// See IDENT, above, for meaning of bool in NtIdent:
|
||||||
NtIdent(Box<Ident>, bool),
|
NtIdent(Box<ast::Ident>, bool),
|
||||||
/// Stuff inside brackets for attributes
|
/// Stuff inside brackets for attributes
|
||||||
NtMeta( P<ast::MetaItem>),
|
NtMeta( P<ast::MetaItem>),
|
||||||
NtPath(Box<ast::Path>),
|
NtPath(Box<ast::Path>),
|
||||||
|
@ -148,161 +209,131 @@ impl fmt::Show for Nonterminal {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn binop_to_string(o: BinOp) -> &'static str {
|
pub fn binop_to_string(o: BinOpToken) -> &'static str {
|
||||||
match o {
|
match o {
|
||||||
PLUS => "+",
|
Plus => "+",
|
||||||
MINUS => "-",
|
Minus => "-",
|
||||||
STAR => "*",
|
Star => "*",
|
||||||
SLASH => "/",
|
Slash => "/",
|
||||||
PERCENT => "%",
|
Percent => "%",
|
||||||
CARET => "^",
|
Caret => "^",
|
||||||
AND => "&",
|
And => "&",
|
||||||
OR => "|",
|
Or => "|",
|
||||||
SHL => "<<",
|
Shl => "<<",
|
||||||
SHR => ">>"
|
Shr => ">>",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn to_string(t: &Token) -> String {
|
pub fn to_string(t: &Token) -> String {
|
||||||
match *t {
|
match *t {
|
||||||
EQ => "=".into_string(),
|
Eq => "=".into_string(),
|
||||||
LT => "<".into_string(),
|
Lt => "<".into_string(),
|
||||||
LE => "<=".into_string(),
|
Le => "<=".into_string(),
|
||||||
EQEQ => "==".into_string(),
|
EqEq => "==".into_string(),
|
||||||
NE => "!=".into_string(),
|
Ne => "!=".into_string(),
|
||||||
GE => ">=".into_string(),
|
Ge => ">=".into_string(),
|
||||||
GT => ">".into_string(),
|
Gt => ">".into_string(),
|
||||||
NOT => "!".into_string(),
|
Not => "!".into_string(),
|
||||||
TILDE => "~".into_string(),
|
Tilde => "~".into_string(),
|
||||||
OROR => "||".into_string(),
|
OrOr => "||".into_string(),
|
||||||
ANDAND => "&&".into_string(),
|
AndAnd => "&&".into_string(),
|
||||||
BINOP(op) => binop_to_string(op).into_string(),
|
BinOp(op) => binop_to_string(op).into_string(),
|
||||||
BINOPEQ(op) => {
|
BinOpEq(op) => format!("{}=", binop_to_string(op)),
|
||||||
let mut s = binop_to_string(op).into_string();
|
|
||||||
s.push_str("=");
|
|
||||||
s
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Structural symbols */
|
/* Structural symbols */
|
||||||
AT => "@".into_string(),
|
At => "@".into_string(),
|
||||||
DOT => ".".into_string(),
|
Dot => ".".into_string(),
|
||||||
DOTDOT => "..".into_string(),
|
DotDot => "..".into_string(),
|
||||||
DOTDOTDOT => "...".into_string(),
|
DotDotDot => "...".into_string(),
|
||||||
COMMA => ",".into_string(),
|
Comma => ",".into_string(),
|
||||||
SEMI => ";".into_string(),
|
Semi => ";".into_string(),
|
||||||
COLON => ":".into_string(),
|
Colon => ":".into_string(),
|
||||||
MOD_SEP => "::".into_string(),
|
ModSep => "::".into_string(),
|
||||||
RARROW => "->".into_string(),
|
RArrow => "->".into_string(),
|
||||||
LARROW => "<-".into_string(),
|
LArrow => "<-".into_string(),
|
||||||
FAT_ARROW => "=>".into_string(),
|
FatArrow => "=>".into_string(),
|
||||||
LPAREN => "(".into_string(),
|
LParen => "(".into_string(),
|
||||||
RPAREN => ")".into_string(),
|
RParen => ")".into_string(),
|
||||||
LBRACKET => "[".into_string(),
|
LBracket => "[".into_string(),
|
||||||
RBRACKET => "]".into_string(),
|
RBracket => "]".into_string(),
|
||||||
LBRACE => "{".into_string(),
|
LBrace => "{".into_string(),
|
||||||
RBRACE => "}".into_string(),
|
RBrace => "}".into_string(),
|
||||||
POUND => "#".into_string(),
|
Pound => "#".into_string(),
|
||||||
DOLLAR => "$".into_string(),
|
Dollar => "$".into_string(),
|
||||||
QUESTION => "?".into_string(),
|
Question => "?".into_string(),
|
||||||
|
|
||||||
/* Literals */
|
/* Literals */
|
||||||
LIT_BYTE(b) => {
|
LitByte(b) => format!("b'{}'", b.as_str()),
|
||||||
format!("b'{}'", b.as_str())
|
LitChar(c) => format!("'{}'", c.as_str()),
|
||||||
}
|
LitFloat(c) => c.as_str().into_string(),
|
||||||
LIT_CHAR(c) => {
|
LitInteger(c) => c.as_str().into_string(),
|
||||||
format!("'{}'", c.as_str())
|
LitStr(s) => format!("\"{}\"", s.as_str()),
|
||||||
}
|
LitStrRaw(s, n) => format!("r{delim}\"{string}\"{delim}",
|
||||||
LIT_INTEGER(c) | LIT_FLOAT(c) => {
|
delim="#".repeat(n),
|
||||||
c.as_str().into_string()
|
string=s.as_str()),
|
||||||
}
|
LitBinary(v) => format!("b\"{}\"", v.as_str()),
|
||||||
|
LitBinaryRaw(s, n) => format!("br{delim}\"{string}\"{delim}",
|
||||||
|
delim="#".repeat(n),
|
||||||
|
string=s.as_str()),
|
||||||
|
|
||||||
LIT_STR(s) => {
|
/* Name components */
|
||||||
format!("\"{}\"", s.as_str())
|
Ident(s, _) => get_ident(s).get().into_string(),
|
||||||
}
|
Lifetime(s) => format!("{}", get_ident(s)),
|
||||||
LIT_STR_RAW(s, n) => {
|
Underscore => "_".into_string(),
|
||||||
format!("r{delim}\"{string}\"{delim}",
|
|
||||||
delim="#".repeat(n), string=s.as_str())
|
|
||||||
}
|
|
||||||
LIT_BINARY(v) => {
|
|
||||||
format!("b\"{}\"", v.as_str())
|
|
||||||
}
|
|
||||||
LIT_BINARY_RAW(s, n) => {
|
|
||||||
format!("br{delim}\"{string}\"{delim}",
|
|
||||||
delim="#".repeat(n), string=s.as_str())
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Name components */
|
/* Other */
|
||||||
IDENT(s, _) => get_ident(s).get().into_string(),
|
DocComment(s) => s.as_str().into_string(),
|
||||||
LIFETIME(s) => {
|
Eof => "<eof>".into_string(),
|
||||||
format!("{}", get_ident(s))
|
Whitespace => " ".into_string(),
|
||||||
}
|
Comment => "/* */".into_string(),
|
||||||
UNDERSCORE => "_".into_string(),
|
Shebang(s) => format!("/* shebang: {}*/", s.as_str()),
|
||||||
|
|
||||||
/* Other */
|
Interpolated(ref nt) => match *nt {
|
||||||
DOC_COMMENT(s) => s.as_str().into_string(),
|
NtExpr(ref e) => ::print::pprust::expr_to_string(&**e),
|
||||||
EOF => "<eof>".into_string(),
|
NtMeta(ref e) => ::print::pprust::meta_item_to_string(&**e),
|
||||||
WS => " ".into_string(),
|
NtTy(ref e) => ::print::pprust::ty_to_string(&**e),
|
||||||
COMMENT => "/* */".into_string(),
|
NtPath(ref e) => ::print::pprust::path_to_string(&**e),
|
||||||
SHEBANG(s) => format!("/* shebang: {}*/", s.as_str()),
|
NtItem(..) => "an interpolated item".into_string(),
|
||||||
|
NtBlock(..) => "an interpolated block".into_string(),
|
||||||
INTERPOLATED(ref nt) => {
|
NtStmt(..) => "an interpolated statement".into_string(),
|
||||||
match nt {
|
NtPat(..) => "an interpolated pattern".into_string(),
|
||||||
&NtExpr(ref e) => ::print::pprust::expr_to_string(&**e),
|
NtIdent(..) => "an interpolated identifier".into_string(),
|
||||||
&NtMeta(ref e) => ::print::pprust::meta_item_to_string(&**e),
|
NtTT(..) => "an interpolated tt".into_string(),
|
||||||
&NtTy(ref e) => ::print::pprust::ty_to_string(&**e),
|
NtMatchers(..) => "an interpolated matcher sequence".into_string(),
|
||||||
&NtPath(ref e) => ::print::pprust::path_to_string(&**e),
|
|
||||||
_ => {
|
|
||||||
let mut s = "an interpolated ".into_string();
|
|
||||||
match *nt {
|
|
||||||
NtItem(..) => s.push_str("item"),
|
|
||||||
NtBlock(..) => s.push_str("block"),
|
|
||||||
NtStmt(..) => s.push_str("statement"),
|
|
||||||
NtPat(..) => s.push_str("pattern"),
|
|
||||||
NtMeta(..) => fail!("should have been handled"),
|
|
||||||
NtExpr(..) => fail!("should have been handled"),
|
|
||||||
NtTy(..) => fail!("should have been handled"),
|
|
||||||
NtIdent(..) => s.push_str("identifier"),
|
|
||||||
NtPath(..) => fail!("should have been handled"),
|
|
||||||
NtTT(..) => s.push_str("tt"),
|
|
||||||
NtMatchers(..) => s.push_str("matcher sequence")
|
|
||||||
};
|
|
||||||
s
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn can_begin_expr(t: &Token) -> bool {
|
pub fn can_begin_expr(t: &Token) -> bool {
|
||||||
match *t {
|
match *t {
|
||||||
LPAREN => true,
|
LParen => true,
|
||||||
LBRACE => true,
|
LBrace => true,
|
||||||
LBRACKET => true,
|
LBracket => true,
|
||||||
IDENT(_, _) => true,
|
Ident(_, _) => true,
|
||||||
UNDERSCORE => true,
|
Underscore => true,
|
||||||
TILDE => true,
|
Tilde => true,
|
||||||
LIT_BYTE(_) => true,
|
LitByte(_) => true,
|
||||||
LIT_CHAR(_) => true,
|
LitChar(_) => true,
|
||||||
LIT_INTEGER(_) => true,
|
LitInteger(_) => true,
|
||||||
LIT_FLOAT(_) => true,
|
LitFloat(_) => true,
|
||||||
LIT_STR(_) => true,
|
LitStr(_) => true,
|
||||||
LIT_STR_RAW(_, _) => true,
|
LitStrRaw(_, _) => true,
|
||||||
LIT_BINARY(_) => true,
|
LitBinary(_) => true,
|
||||||
LIT_BINARY_RAW(_, _) => true,
|
LitBinaryRaw(_, _) => true,
|
||||||
POUND => true,
|
Pound => true,
|
||||||
AT => true,
|
At => true,
|
||||||
NOT => true,
|
Not => true,
|
||||||
BINOP(MINUS) => true,
|
BinOp(Minus) => true,
|
||||||
BINOP(STAR) => true,
|
BinOp(Star) => true,
|
||||||
BINOP(AND) => true,
|
BinOp(And) => true,
|
||||||
BINOP(OR) => true, // in lambda syntax
|
BinOp(Or) => true, // in lambda syntax
|
||||||
OROR => true, // in lambda syntax
|
OrOr => true, // in lambda syntax
|
||||||
MOD_SEP => true,
|
ModSep => true,
|
||||||
INTERPOLATED(NtExpr(..))
|
Interpolated(NtExpr(..)) => true,
|
||||||
| INTERPOLATED(NtIdent(..))
|
Interpolated(NtIdent(..)) => true,
|
||||||
| INTERPOLATED(NtBlock(..))
|
Interpolated(NtBlock(..)) => true,
|
||||||
| INTERPOLATED(NtPath(..)) => true,
|
Interpolated(NtPath(..)) => true,
|
||||||
_ => false
|
_ => false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -310,40 +341,47 @@ pub fn can_begin_expr(t: &Token) -> bool {
|
||||||
/// otherwise `None`.
|
/// otherwise `None`.
|
||||||
pub fn close_delimiter_for(t: &Token) -> Option<Token> {
|
pub fn close_delimiter_for(t: &Token) -> Option<Token> {
|
||||||
match *t {
|
match *t {
|
||||||
LPAREN => Some(RPAREN),
|
LParen => Some(RParen),
|
||||||
LBRACE => Some(RBRACE),
|
LBrace => Some(RBrace),
|
||||||
LBRACKET => Some(RBRACKET),
|
LBracket => Some(RBracket),
|
||||||
_ => None
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_lit(t: &Token) -> bool {
|
pub fn is_lit(t: &Token) -> bool {
|
||||||
match *t {
|
match *t {
|
||||||
LIT_BYTE(_) => true,
|
LitByte(_) => true,
|
||||||
LIT_CHAR(_) => true,
|
LitChar(_) => true,
|
||||||
LIT_INTEGER(_) => true,
|
LitInteger(_) => true,
|
||||||
LIT_FLOAT(_) => true,
|
LitFloat(_) => true,
|
||||||
LIT_STR(_) => true,
|
LitStr(_) => true,
|
||||||
LIT_STR_RAW(_, _) => true,
|
LitStrRaw(_, _) => true,
|
||||||
LIT_BINARY(_) => true,
|
LitBinary(_) => true,
|
||||||
LIT_BINARY_RAW(_, _) => true,
|
LitBinaryRaw(_, _) => true,
|
||||||
_ => false
|
_ => false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_ident(t: &Token) -> bool {
|
pub fn is_ident(t: &Token) -> bool {
|
||||||
match *t { IDENT(_, _) => true, _ => false }
|
match *t {
|
||||||
|
Ident(_, _) => true,
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_ident_or_path(t: &Token) -> bool {
|
pub fn is_ident_or_path(t: &Token) -> bool {
|
||||||
match *t {
|
match *t {
|
||||||
IDENT(_, _) | INTERPOLATED(NtPath(..)) => true,
|
Ident(_, _) => true,
|
||||||
_ => false
|
Interpolated(NtPath(..)) => true,
|
||||||
|
_ => false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_plain_ident(t: &Token) -> bool {
|
pub fn is_plain_ident(t: &Token) -> bool {
|
||||||
match *t { IDENT(_, false) => true, _ => false }
|
match *t {
|
||||||
|
Ident(_, false) => true,
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the first "argument"
|
// Get the first "argument"
|
||||||
|
@ -376,22 +414,28 @@ macro_rules! declare_special_idents_and_keywords {(
|
||||||
$( ($rk_name:expr, $rk_variant:ident, $rk_str:expr); )*
|
$( ($rk_name:expr, $rk_variant:ident, $rk_str:expr); )*
|
||||||
}
|
}
|
||||||
) => {
|
) => {
|
||||||
static STRICT_KEYWORD_START: Name = first!($( Name($sk_name), )*);
|
static STRICT_KEYWORD_START: ast::Name = first!($( ast::Name($sk_name), )*);
|
||||||
static STRICT_KEYWORD_FINAL: Name = last!($( Name($sk_name), )*);
|
static STRICT_KEYWORD_FINAL: ast::Name = last!($( ast::Name($sk_name), )*);
|
||||||
static RESERVED_KEYWORD_START: Name = first!($( Name($rk_name), )*);
|
static RESERVED_KEYWORD_START: ast::Name = first!($( ast::Name($rk_name), )*);
|
||||||
static RESERVED_KEYWORD_FINAL: Name = last!($( Name($rk_name), )*);
|
static RESERVED_KEYWORD_FINAL: ast::Name = last!($( ast::Name($rk_name), )*);
|
||||||
|
|
||||||
pub mod special_idents {
|
pub mod special_idents {
|
||||||
use ast::{Ident, Name};
|
use ast;
|
||||||
$(
|
$(
|
||||||
#[allow(non_uppercase_statics)]
|
#[allow(non_uppercase_statics)]
|
||||||
pub const $si_static: Ident = Ident { name: Name($si_name), ctxt: 0 };
|
pub const $si_static: ast::Ident = ast::Ident {
|
||||||
|
name: ast::Name($si_name),
|
||||||
|
ctxt: 0,
|
||||||
|
};
|
||||||
)*
|
)*
|
||||||
}
|
}
|
||||||
|
|
||||||
pub mod special_names {
|
pub mod special_names {
|
||||||
use ast::Name;
|
use ast;
|
||||||
$( #[allow(non_uppercase_statics)] pub const $si_static: Name = Name($si_name); )*
|
$(
|
||||||
|
#[allow(non_uppercase_statics)]
|
||||||
|
pub const $si_static: ast::Name = ast::Name($si_name);
|
||||||
|
)*
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -402,7 +446,7 @@ macro_rules! declare_special_idents_and_keywords {(
|
||||||
* the language and may not appear as identifiers.
|
* the language and may not appear as identifiers.
|
||||||
*/
|
*/
|
||||||
pub mod keywords {
|
pub mod keywords {
|
||||||
use ast::Name;
|
use ast;
|
||||||
|
|
||||||
pub enum Keyword {
|
pub enum Keyword {
|
||||||
$( $sk_variant, )*
|
$( $sk_variant, )*
|
||||||
|
@ -410,10 +454,10 @@ macro_rules! declare_special_idents_and_keywords {(
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Keyword {
|
impl Keyword {
|
||||||
pub fn to_name(&self) -> Name {
|
pub fn to_name(&self) -> ast::Name {
|
||||||
match *self {
|
match *self {
|
||||||
$( $sk_variant => Name($sk_name), )*
|
$( $sk_variant => ast::Name($sk_name), )*
|
||||||
$( $rk_variant => Name($rk_name), )*
|
$( $rk_variant => ast::Name($rk_name), )*
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -432,9 +476,9 @@ macro_rules! declare_special_idents_and_keywords {(
|
||||||
}}
|
}}
|
||||||
|
|
||||||
// If the special idents get renumbered, remember to modify these two as appropriate
|
// If the special idents get renumbered, remember to modify these two as appropriate
|
||||||
pub const SELF_KEYWORD_NAME: Name = Name(SELF_KEYWORD_NAME_NUM);
|
pub const SELF_KEYWORD_NAME: ast::Name = ast::Name(SELF_KEYWORD_NAME_NUM);
|
||||||
const STATIC_KEYWORD_NAME: Name = Name(STATIC_KEYWORD_NAME_NUM);
|
const STATIC_KEYWORD_NAME: ast::Name = ast::Name(STATIC_KEYWORD_NAME_NUM);
|
||||||
const SUPER_KEYWORD_NAME: Name = Name(SUPER_KEYWORD_NAME_NUM);
|
const SUPER_KEYWORD_NAME: ast::Name = ast::Name(SUPER_KEYWORD_NAME_NUM);
|
||||||
|
|
||||||
pub const SELF_KEYWORD_NAME_NUM: u32 = 1;
|
pub const SELF_KEYWORD_NAME_NUM: u32 = 1;
|
||||||
const STATIC_KEYWORD_NAME_NUM: u32 = 2;
|
const STATIC_KEYWORD_NAME_NUM: u32 = 2;
|
||||||
|
@ -531,27 +575,27 @@ declare_special_idents_and_keywords! {
|
||||||
* operator
|
* operator
|
||||||
*/
|
*/
|
||||||
pub fn token_to_binop(tok: &Token) -> Option<ast::BinOp> {
|
pub fn token_to_binop(tok: &Token) -> Option<ast::BinOp> {
|
||||||
match *tok {
|
match *tok {
|
||||||
BINOP(STAR) => Some(ast::BiMul),
|
BinOp(Star) => Some(ast::BiMul),
|
||||||
BINOP(SLASH) => Some(ast::BiDiv),
|
BinOp(Slash) => Some(ast::BiDiv),
|
||||||
BINOP(PERCENT) => Some(ast::BiRem),
|
BinOp(Percent) => Some(ast::BiRem),
|
||||||
BINOP(PLUS) => Some(ast::BiAdd),
|
BinOp(Plus) => Some(ast::BiAdd),
|
||||||
BINOP(MINUS) => Some(ast::BiSub),
|
BinOp(Minus) => Some(ast::BiSub),
|
||||||
BINOP(SHL) => Some(ast::BiShl),
|
BinOp(Shl) => Some(ast::BiShl),
|
||||||
BINOP(SHR) => Some(ast::BiShr),
|
BinOp(Shr) => Some(ast::BiShr),
|
||||||
BINOP(AND) => Some(ast::BiBitAnd),
|
BinOp(And) => Some(ast::BiBitAnd),
|
||||||
BINOP(CARET) => Some(ast::BiBitXor),
|
BinOp(Caret) => Some(ast::BiBitXor),
|
||||||
BINOP(OR) => Some(ast::BiBitOr),
|
BinOp(Or) => Some(ast::BiBitOr),
|
||||||
LT => Some(ast::BiLt),
|
Lt => Some(ast::BiLt),
|
||||||
LE => Some(ast::BiLe),
|
Le => Some(ast::BiLe),
|
||||||
GE => Some(ast::BiGe),
|
Ge => Some(ast::BiGe),
|
||||||
GT => Some(ast::BiGt),
|
Gt => Some(ast::BiGt),
|
||||||
EQEQ => Some(ast::BiEq),
|
EqEq => Some(ast::BiEq),
|
||||||
NE => Some(ast::BiNe),
|
Ne => Some(ast::BiNe),
|
||||||
ANDAND => Some(ast::BiAnd),
|
AndAnd => Some(ast::BiAnd),
|
||||||
OROR => Some(ast::BiOr),
|
OrOr => Some(ast::BiOr),
|
||||||
_ => None
|
_ => None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// looks like we can get rid of this completely...
|
// looks like we can get rid of this completely...
|
||||||
|
@ -646,7 +690,7 @@ impl<S:Encoder<E>, E> Encodable<S, E> for InternedString {
|
||||||
|
|
||||||
/// Returns the string contents of a name, using the task-local interner.
|
/// Returns the string contents of a name, using the task-local interner.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn get_name(name: Name) -> InternedString {
|
pub fn get_name(name: ast::Name) -> InternedString {
|
||||||
let interner = get_ident_interner();
|
let interner = get_ident_interner();
|
||||||
InternedString::new_from_rc_str(interner.get(name))
|
InternedString::new_from_rc_str(interner.get(name))
|
||||||
}
|
}
|
||||||
|
@ -654,7 +698,7 @@ pub fn get_name(name: Name) -> InternedString {
|
||||||
/// Returns the string contents of an identifier, using the task-local
|
/// Returns the string contents of an identifier, using the task-local
|
||||||
/// interner.
|
/// interner.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn get_ident(ident: Ident) -> InternedString {
|
pub fn get_ident(ident: ast::Ident) -> InternedString {
|
||||||
get_name(ident.name)
|
get_name(ident.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -667,32 +711,32 @@ pub fn intern_and_get_ident(s: &str) -> InternedString {
|
||||||
|
|
||||||
/// Maps a string to its interned representation.
|
/// Maps a string to its interned representation.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn intern(s: &str) -> Name {
|
pub fn intern(s: &str) -> ast::Name {
|
||||||
get_ident_interner().intern(s)
|
get_ident_interner().intern(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// gensym's a new uint, using the current interner.
|
/// gensym's a new uint, using the current interner.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn gensym(s: &str) -> Name {
|
pub fn gensym(s: &str) -> ast::Name {
|
||||||
get_ident_interner().gensym(s)
|
get_ident_interner().gensym(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Maps a string to an identifier with an empty syntax context.
|
/// Maps a string to an identifier with an empty syntax context.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn str_to_ident(s: &str) -> Ident {
|
pub fn str_to_ident(s: &str) -> ast::Ident {
|
||||||
Ident::new(intern(s))
|
ast::Ident::new(intern(s))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Maps a string to a gensym'ed identifier.
|
/// Maps a string to a gensym'ed identifier.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn gensym_ident(s: &str) -> Ident {
|
pub fn gensym_ident(s: &str) -> ast::Ident {
|
||||||
Ident::new(gensym(s))
|
ast::Ident::new(gensym(s))
|
||||||
}
|
}
|
||||||
|
|
||||||
// create a fresh name that maps to the same string as the old one.
|
// create a fresh name that maps to the same string as the old one.
|
||||||
// note that this guarantees that str_ptr_eq(ident_to_string(src),interner_get(fresh_name(src)));
|
// note that this guarantees that str_ptr_eq(ident_to_string(src),interner_get(fresh_name(src)));
|
||||||
// that is, that the new name and the old one are connected to ptr_eq strings.
|
// that is, that the new name and the old one are connected to ptr_eq strings.
|
||||||
pub fn fresh_name(src: &Ident) -> Name {
|
pub fn fresh_name(src: &ast::Ident) -> ast::Name {
|
||||||
let interner = get_ident_interner();
|
let interner = get_ident_interner();
|
||||||
interner.gensym_copy(src.name)
|
interner.gensym_copy(src.name)
|
||||||
// following: debug version. Could work in final except that it's incompatible with
|
// following: debug version. Could work in final except that it's incompatible with
|
||||||
|
@ -703,7 +747,7 @@ pub fn fresh_name(src: &Ident) -> Name {
|
||||||
}
|
}
|
||||||
|
|
||||||
// create a fresh mark.
|
// create a fresh mark.
|
||||||
pub fn fresh_mark() -> Mrk {
|
pub fn fresh_mark() -> ast::Mrk {
|
||||||
gensym("mark").uint() as u32
|
gensym("mark").uint() as u32
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -711,14 +755,14 @@ pub fn fresh_mark() -> Mrk {
|
||||||
|
|
||||||
pub fn is_keyword(kw: keywords::Keyword, tok: &Token) -> bool {
|
pub fn is_keyword(kw: keywords::Keyword, tok: &Token) -> bool {
|
||||||
match *tok {
|
match *tok {
|
||||||
token::IDENT(sid, false) => { kw.to_name() == sid.name }
|
Ident(sid, false) => { kw.to_name() == sid.name }
|
||||||
_ => { false }
|
_ => { false }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_any_keyword(tok: &Token) -> bool {
|
pub fn is_any_keyword(tok: &Token) -> bool {
|
||||||
match *tok {
|
match *tok {
|
||||||
token::IDENT(sid, false) => {
|
Ident(sid, false) => {
|
||||||
let n = sid.name;
|
let n = sid.name;
|
||||||
|
|
||||||
n == SELF_KEYWORD_NAME
|
n == SELF_KEYWORD_NAME
|
||||||
|
@ -733,7 +777,7 @@ pub fn is_any_keyword(tok: &Token) -> bool {
|
||||||
|
|
||||||
pub fn is_strict_keyword(tok: &Token) -> bool {
|
pub fn is_strict_keyword(tok: &Token) -> bool {
|
||||||
match *tok {
|
match *tok {
|
||||||
token::IDENT(sid, false) => {
|
Ident(sid, false) => {
|
||||||
let n = sid.name;
|
let n = sid.name;
|
||||||
|
|
||||||
n == SELF_KEYWORD_NAME
|
n == SELF_KEYWORD_NAME
|
||||||
|
@ -742,7 +786,7 @@ pub fn is_strict_keyword(tok: &Token) -> bool {
|
||||||
|| STRICT_KEYWORD_START <= n
|
|| STRICT_KEYWORD_START <= n
|
||||||
&& n <= STRICT_KEYWORD_FINAL
|
&& n <= STRICT_KEYWORD_FINAL
|
||||||
},
|
},
|
||||||
token::IDENT(sid, true) => {
|
Ident(sid, true) => {
|
||||||
let n = sid.name;
|
let n = sid.name;
|
||||||
|
|
||||||
n != SELF_KEYWORD_NAME
|
n != SELF_KEYWORD_NAME
|
||||||
|
@ -756,7 +800,7 @@ pub fn is_strict_keyword(tok: &Token) -> bool {
|
||||||
|
|
||||||
pub fn is_reserved_keyword(tok: &Token) -> bool {
|
pub fn is_reserved_keyword(tok: &Token) -> bool {
|
||||||
match *tok {
|
match *tok {
|
||||||
token::IDENT(sid, false) => {
|
Ident(sid, false) => {
|
||||||
let n = sid.name;
|
let n = sid.name;
|
||||||
|
|
||||||
RESERVED_KEYWORD_START <= n
|
RESERVED_KEYWORD_START <= n
|
||||||
|
@ -768,7 +812,7 @@ pub fn is_reserved_keyword(tok: &Token) -> bool {
|
||||||
|
|
||||||
pub fn mtwt_token_eq(t1 : &Token, t2 : &Token) -> bool {
|
pub fn mtwt_token_eq(t1 : &Token, t2 : &Token) -> bool {
|
||||||
match (t1,t2) {
|
match (t1,t2) {
|
||||||
(&IDENT(id1,_),&IDENT(id2,_)) | (&LIFETIME(id1),&LIFETIME(id2)) =>
|
(&Ident(id1,_),&Ident(id2,_)) | (&Lifetime(id1),&Lifetime(id2)) =>
|
||||||
mtwt::resolve(id1) == mtwt::resolve(id2),
|
mtwt::resolve(id1) == mtwt::resolve(id2),
|
||||||
_ => *t1 == *t2
|
_ => *t1 == *t2
|
||||||
}
|
}
|
||||||
|
@ -786,9 +830,9 @@ mod test {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test] fn mtwt_token_eq_test() {
|
#[test] fn mtwt_token_eq_test() {
|
||||||
assert!(mtwt_token_eq(>,>));
|
assert!(mtwt_token_eq(&Gt,&Gt));
|
||||||
let a = str_to_ident("bac");
|
let a = str_to_ident("bac");
|
||||||
let a1 = mark_ident(a,92);
|
let a1 = mark_ident(a,92);
|
||||||
assert!(mtwt_token_eq(&IDENT(a,true),&IDENT(a1,false)));
|
assert!(mtwt_token_eq(&Ident(a,true),&Ident(a1,false)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1035,7 +1035,7 @@ impl<'a> State<'a> {
|
||||||
ast::TtToken(_, ref tk) => {
|
ast::TtToken(_, ref tk) => {
|
||||||
try!(word(&mut self.s, parse::token::to_string(tk).as_slice()));
|
try!(word(&mut self.s, parse::token::to_string(tk).as_slice()));
|
||||||
match *tk {
|
match *tk {
|
||||||
parse::token::DOC_COMMENT(..) => {
|
parse::token::DocComment(..) => {
|
||||||
hardbreak(&mut self.s)
|
hardbreak(&mut self.s)
|
||||||
}
|
}
|
||||||
_ => Ok(())
|
_ => Ok(())
|
||||||
|
|
|
@ -17,7 +17,7 @@ extern crate syntax;
|
||||||
extern crate rustc;
|
extern crate rustc;
|
||||||
|
|
||||||
use syntax::codemap::Span;
|
use syntax::codemap::Span;
|
||||||
use syntax::parse::token::{IDENT, get_ident};
|
use syntax::parse::token;
|
||||||
use syntax::ast::{TokenTree, TtToken};
|
use syntax::ast::{TokenTree, TtToken};
|
||||||
use syntax::ext::base::{ExtCtxt, MacResult, DummyResult, MacExpr};
|
use syntax::ext::base::{ExtCtxt, MacResult, DummyResult, MacExpr};
|
||||||
use syntax::ext::build::AstBuilder; // trait for expr_uint
|
use syntax::ext::build::AstBuilder; // trait for expr_uint
|
||||||
|
@ -39,7 +39,7 @@ fn expand_rn(cx: &mut ExtCtxt, sp: Span, args: &[TokenTree])
|
||||||
("I", 1)];
|
("I", 1)];
|
||||||
|
|
||||||
let text = match args {
|
let text = match args {
|
||||||
[TtToken(_, IDENT(s, _))] => get_ident(s).to_string(),
|
[TtToken(_, token::Ident(s, _))] => token::get_ident(s).to_string(),
|
||||||
_ => {
|
_ => {
|
||||||
cx.span_err(sp, "argument should be a single identifier");
|
cx.span_err(sp, "argument should be a single identifier");
|
||||||
return DummyResult::any(sp);
|
return DummyResult::any(sp);
|
||||||
|
|
|
@ -8,4 +8,4 @@
|
||||||
// option. This file may not be copied, modified, or distributed
|
// option. This file may not be copied, modified, or distributed
|
||||||
// except according to those terms.
|
// except according to those terms.
|
||||||
|
|
||||||
type t = { f: () }; //~ ERROR expected type, found token LBRACE
|
type t = { f: () }; //~ ERROR expected type, found token LBrace
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue