Auto merge of #99887 - nnethercote:rm-TreeAndSpacing, r=petrochenkov
Remove `TreeAndSpacing`. A `TokenStream` contains a `Lrc<Vec<(TokenTree, Spacing)>>`. But this is not quite right. `Spacing` makes sense for `TokenTree::Token`, but does not make sense for `TokenTree::Delimited`, because a `TokenTree::Delimited` cannot be joined with another `TokenTree`. This commit fixes this problem, by adding `Spacing` to `TokenTree::Token`, changing `TokenStream` to contain a `Lrc<Vec<TokenTree>>`, and removing the `TreeAndSpacing` typedef. The commit removes these two impls: - `impl From<TokenTree> for TokenStream` - `impl From<TokenTree> for TreeAndSpacing` These were useful, but also resulted in code with many `.into()` calls that was hard to read, particularly for anyone not highly familiar with the relevant types. This commit makes some other changes to compensate: - `TokenTree::token()` becomes `TokenTree::token_{alone,joint}()`. - `TokenStream::token_{alone,joint}()` are added. - `TokenStream::delimited` is added. This results in things like this: ```rust TokenTree::token(token::Semi, stmt.span).into() ``` changing to this: ```rust TokenStream::token_alone(token::Semi, stmt.span) ``` This makes the type of the result, and its spacing, clearer. These changes also simplifies `Cursor` and `CursorRef`, because they no longer need to distinguish between `next` and `next_with_spacing`. r? `@petrochenkov`
This commit is contained in:
commit
1202bbaf48
23 changed files with 317 additions and 307 deletions
|
@ -152,7 +152,7 @@ impl<'cx, 'a> Context<'cx, 'a> {
|
|||
fn build_panic(&self, expr_str: &str, panic_path: Path) -> P<Expr> {
|
||||
let escaped_expr_str = escape_to_fmt(expr_str);
|
||||
let initial = [
|
||||
TokenTree::token(
|
||||
TokenTree::token_alone(
|
||||
token::Literal(token::Lit {
|
||||
kind: token::LitKind::Str,
|
||||
symbol: Symbol::intern(&if self.fmt_string.is_empty() {
|
||||
|
@ -167,12 +167,12 @@ impl<'cx, 'a> Context<'cx, 'a> {
|
|||
}),
|
||||
self.span,
|
||||
),
|
||||
TokenTree::token(token::Comma, self.span),
|
||||
TokenTree::token_alone(token::Comma, self.span),
|
||||
];
|
||||
let captures = self.capture_decls.iter().flat_map(|cap| {
|
||||
[
|
||||
TokenTree::token(token::Ident(cap.ident.name, false), cap.ident.span),
|
||||
TokenTree::token(token::Comma, self.span),
|
||||
TokenTree::token_alone(token::Ident(cap.ident.name, false), cap.ident.span),
|
||||
TokenTree::token_alone(token::Comma, self.span),
|
||||
]
|
||||
});
|
||||
self.cx.expr(
|
||||
|
|
|
@ -20,14 +20,14 @@ pub fn expand_concat_idents<'cx>(
|
|||
for (i, e) in tts.into_trees().enumerate() {
|
||||
if i & 1 == 1 {
|
||||
match e {
|
||||
TokenTree::Token(Token { kind: token::Comma, .. }) => {}
|
||||
TokenTree::Token(Token { kind: token::Comma, .. }, _) => {}
|
||||
_ => {
|
||||
cx.span_err(sp, "concat_idents! expecting comma");
|
||||
return DummyResult::any(sp);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if let TokenTree::Token(token) = e {
|
||||
if let TokenTree::Token(token, _) = e {
|
||||
if let Some((ident, _)) = token.ident() {
|
||||
res_str.push_str(ident.name.as_str());
|
||||
continue;
|
||||
|
|
|
@ -11,8 +11,8 @@ pub fn expand_trace_macros(
|
|||
let mut cursor = tt.into_trees();
|
||||
let mut err = false;
|
||||
let value = match &cursor.next() {
|
||||
Some(TokenTree::Token(token)) if token.is_keyword(kw::True) => true,
|
||||
Some(TokenTree::Token(token)) if token.is_keyword(kw::False) => false,
|
||||
Some(TokenTree::Token(token, _)) if token.is_keyword(kw::True) => true,
|
||||
Some(TokenTree::Token(token, _)) if token.is_keyword(kw::False) => false,
|
||||
_ => {
|
||||
err = true;
|
||||
false
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue