about summary refs log tree commit diff
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2020-05-24 07:46:13 +0000
committerbors <bors@rust-lang.org>2020-05-24 07:46:13 +0000
commit7726070fa755f660b5da3f82f46e07d9c6866f69 (patch)
treec04ccb3e3e40d84659db6a65bcd51561b5511b94
parent52b605c8cb2f730e607de0777a694cd1b9bb3e15 (diff)
parent6cb1c0eb64c28742e9d228027d04fe3ebc190e46 (diff)
downloadrust-7726070fa755f660b5da3f82f46e07d9c6866f69.tar.gz
rust-7726070fa755f660b5da3f82f46e07d9c6866f69.zip
Auto merge of #72524 - RalfJung:rollup-s9f1pcc, r=RalfJung
Rollup of 2 pull requests

Successful merges:

 - #72388 (Recursively expand `TokenKind::Interpolated` in `probably_equal_for_proc_macro`)
 - #72517 (small select cleanup)

Failed merges:

r? @ghost
-rw-r--r--Cargo.lock1
-rw-r--r--src/librustc_ast/token.rs56
-rw-r--r--src/librustc_ast/tokenstream.rs125
-rw-r--r--src/librustc_parse/Cargo.toml1
-rw-r--r--src/librustc_parse/lib.rs214
-rw-r--r--src/librustc_trait_selection/traits/select.rs25
-rw-r--r--src/test/ui/proc-macro/macro-rules-capture.rs18
-rw-r--r--src/test/ui/proc-macro/macro-rules-capture.stderr12
8 files changed, 247 insertions, 205 deletions
diff --git a/Cargo.lock b/Cargo.lock
index 6ce5458ed7a..19ecd2023c6 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4178,6 +4178,7 @@ dependencies = [
  "rustc_lexer",
  "rustc_session",
  "rustc_span",
+ "smallvec 1.4.0",
  "unicode-normalization",
 ]
 
diff --git a/src/librustc_ast/token.rs b/src/librustc_ast/token.rs
index a5b9c2a95bb..2e2bc380e84 100644
--- a/src/librustc_ast/token.rs
+++ b/src/librustc_ast/token.rs
@@ -673,62 +673,6 @@ impl Token {
 
         Some(Token::new(kind, self.span.to(joint.span)))
     }
-
-    // See comments in `Nonterminal::to_tokenstream` for why we care about
-    // *probably* equal here rather than actual equality
-    crate fn probably_equal_for_proc_macro(&self, other: &Token) -> bool {
-        if mem::discriminant(&self.kind) != mem::discriminant(&other.kind) {
-            return false;
-        }
-        match (&self.kind, &other.kind) {
-            (&Eq, &Eq)
-            | (&Lt, &Lt)
-            | (&Le, &Le)
-            | (&EqEq, &EqEq)
-            | (&Ne, &Ne)
-            | (&Ge, &Ge)
-            | (&Gt, &Gt)
-            | (&AndAnd, &AndAnd)
-            | (&OrOr, &OrOr)
-            | (&Not, &Not)
-            | (&Tilde, &Tilde)
-            | (&At, &At)
-            | (&Dot, &Dot)
-            | (&DotDot, &DotDot)
-            | (&DotDotDot, &DotDotDot)
-            | (&DotDotEq, &DotDotEq)
-            | (&Comma, &Comma)
-            | (&Semi, &Semi)
-            | (&Colon, &Colon)
-            | (&ModSep, &ModSep)
-            | (&RArrow, &RArrow)
-            | (&LArrow, &LArrow)
-            | (&FatArrow, &FatArrow)
-            | (&Pound, &Pound)
-            | (&Dollar, &Dollar)
-            | (&Question, &Question)
-            | (&Whitespace, &Whitespace)
-            | (&Comment, &Comment)
-            | (&Eof, &Eof) => true,
-
-            (&BinOp(a), &BinOp(b)) | (&BinOpEq(a), &BinOpEq(b)) => a == b,
-
-            (&OpenDelim(a), &OpenDelim(b)) | (&CloseDelim(a), &CloseDelim(b)) => a == b,
-
-            (&DocComment(a), &DocComment(b)) | (&Shebang(a), &Shebang(b)) => a == b,
-
-            (&Literal(a), &Literal(b)) => a == b,
-
-            (&Lifetime(a), &Lifetime(b)) => a == b,
-            (&Ident(a, b), &Ident(c, d)) => {
-                b == d && (a == c || a == kw::DollarCrate || c == kw::DollarCrate)
-            }
-
-            (&Interpolated(_), &Interpolated(_)) => false,
-
-            _ => panic!("forgot to add a token?"),
-        }
-    }
 }
 
 impl PartialEq<TokenKind> for Token {
diff --git a/src/librustc_ast/tokenstream.rs b/src/librustc_ast/tokenstream.rs
index 075aaa7e5bc..9d0199078fa 100644
--- a/src/librustc_ast/tokenstream.rs
+++ b/src/librustc_ast/tokenstream.rs
@@ -21,8 +21,6 @@ use rustc_macros::HashStable_Generic;
 use rustc_span::{Span, DUMMY_SP};
 use smallvec::{smallvec, SmallVec};
 
-use log::debug;
-
 use std::{iter, mem};
 
 /// When the main rust parser encounters a syntax-extension invocation, it
@@ -68,23 +66,6 @@ impl TokenTree {
         }
     }
 
-    // See comments in `Nonterminal::to_tokenstream` for why we care about
-    // *probably* equal here rather than actual equality
-    //
-    // This is otherwise the same as `eq_unspanned`, only recursing with a
-    // different method.
-    pub fn probably_equal_for_proc_macro(&self, other: &TokenTree) -> bool {
-        match (self, other) {
-            (TokenTree::Token(token), TokenTree::Token(token2)) => {
-                token.probably_equal_for_proc_macro(token2)
-            }
-            (TokenTree::Delimited(_, delim, tts), TokenTree::Delimited(_, delim2, tts2)) => {
-                delim == delim2 && tts.probably_equal_for_proc_macro(&tts2)
-            }
-            _ => false,
-        }
-    }
-
     /// Retrieves the TokenTree's span.
     pub fn span(&self) -> Span {
         match self {
@@ -307,112 +288,6 @@ impl TokenStream {
         t1.next().is_none() && t2.next().is_none()
     }
 
-    // See comments in `Nonterminal::to_tokenstream` for why we care about
-    // *probably* equal here rather than actual equality
-    //
-    // This is otherwise the same as `eq_unspanned`, only recursing with a
-    // different method.
-    pub fn probably_equal_for_proc_macro(&self, other: &TokenStream) -> bool {
-        // When checking for `probably_eq`, we ignore certain tokens that aren't
-        // preserved in the AST. Because they are not preserved, the pretty
-        // printer arbitrarily adds or removes them when printing as token
-        // streams, making a comparison between a token stream generated from an
-        // AST and a token stream which was parsed into an AST more reliable.
-        fn semantic_tree(tree: &TokenTree) -> bool {
-            if let TokenTree::Token(token) = tree {
-                if let
-                    // The pretty printer tends to add trailing commas to
-                    // everything, and in particular, after struct fields.
-                    | token::Comma
-                    // The pretty printer emits `NoDelim` as whitespace.
-                    | token::OpenDelim(DelimToken::NoDelim)
-                    | token::CloseDelim(DelimToken::NoDelim)
-                    // The pretty printer collapses many semicolons into one.
-                    | token::Semi
-                    // The pretty printer collapses whitespace arbitrarily and can
-                    // introduce whitespace from `NoDelim`.
-                    | token::Whitespace
-                    // The pretty printer can turn `$crate` into `::crate_name`
-                    | token::ModSep = token.kind {
-                    return false;
-                }
-            }
-            true
-        }
-
-        // When comparing two `TokenStream`s, we ignore the `IsJoint` information.
-        //
-        // However, `rustc_parse::lexer::tokentrees::TokenStreamBuilder` will
-        // use `Token.glue` on adjacent tokens with the proper `IsJoint`.
-        // Since we are ignoreing `IsJoint`, a 'glued' token (e.g. `BinOp(Shr)`)
-        // and its 'split'/'unglued' compoenents (e.g. `Gt, Gt`) are equivalent
-        // when determining if two `TokenStream`s are 'probably equal'.
-        //
-        // Therefore, we use `break_two_token_op` to convert all tokens
-        // to the 'unglued' form (if it exists). This ensures that two
-        // `TokenStream`s which differ only in how their tokens are glued
-        // will be considered 'probably equal', which allows us to keep spans.
-        //
-        // This is important when the original `TokenStream` contained
-        // extra spaces (e.g. `f :: < Vec < _ > > ( ) ;'). These extra spaces
-        // will be omitted when we pretty-print, which can cause the original
-        // and reparsed `TokenStream`s to differ in the assignment of `IsJoint`,
-        // leading to some tokens being 'glued' together in one stream but not
-        // the other. See #68489 for more details.
-        fn break_tokens(tree: TokenTree) -> impl Iterator<Item = TokenTree> {
-            // In almost all cases, we should have either zero or one levels
-            // of 'unglueing'. However, in some unusual cases, we may need
-            // to iterate breaking tokens mutliple times. For example:
-            // '[BinOpEq(Shr)] => [Gt, Ge] -> [Gt, Gt, Eq]'
-            let mut token_trees: SmallVec<[_; 2]>;
-            if let TokenTree::Token(token) = &tree {
-                let mut out = SmallVec::<[_; 2]>::new();
-                out.push(token.clone());
-                // Iterate to fixpoint:
-                // * We start off with 'out' containing our initial token, and `temp` empty
-                // * If we are able to break any tokens in `out`, then `out` will have
-                //   at least one more element than 'temp', so we will try to break tokens
-                //   again.
-                // * If we cannot break any tokens in 'out', we are done
-                loop {
-                    let mut temp = SmallVec::<[_; 2]>::new();
-                    let mut changed = false;
-
-                    for token in out.into_iter() {
-                        if let Some((first, second)) = token.kind.break_two_token_op() {
-                            temp.push(Token::new(first, DUMMY_SP));
-                            temp.push(Token::new(second, DUMMY_SP));
-                            changed = true;
-                        } else {
-                            temp.push(token);
-                        }
-                    }
-                    out = temp;
-                    if !changed {
-                        break;
-                    }
-                }
-                token_trees = out.into_iter().map(|t| TokenTree::Token(t)).collect();
-                if token_trees.len() != 1 {
-                    debug!("break_tokens: broke {:?} to {:?}", tree, token_trees);
-                }
-            } else {
-                token_trees = SmallVec::new();
-                token_trees.push(tree);
-            }
-            token_trees.into_iter()
-        }
-
-        let mut t1 = self.trees().filter(semantic_tree).flat_map(break_tokens);
-        let mut t2 = other.trees().filter(semantic_tree).flat_map(break_tokens);
-        for (t1, t2) in t1.by_ref().zip(t2.by_ref()) {
-            if !t1.probably_equal_for_proc_macro(&t2) {
-                return false;
-            }
-        }
-        t1.next().is_none() && t2.next().is_none()
-    }
-
     pub fn map_enumerated<F: FnMut(usize, TokenTree) -> TokenTree>(self, mut f: F) -> TokenStream {
         TokenStream(Lrc::new(
             self.0
diff --git a/src/librustc_parse/Cargo.toml b/src/librustc_parse/Cargo.toml
index 7164c678808..0d31a8c7bc1 100644
--- a/src/librustc_parse/Cargo.toml
+++ b/src/librustc_parse/Cargo.toml
@@ -12,6 +12,7 @@ doctest = false
 [dependencies]
 bitflags = "1.0"
 log = "0.4"
+smallvec = { version = "1.0", features = ["union", "may_dangle"] }
 rustc_ast_pretty = { path = "../librustc_ast_pretty" }
 rustc_data_structures = { path = "../librustc_data_structures" }
 rustc_feature = { path = "../librustc_feature" }
diff --git a/src/librustc_parse/lib.rs b/src/librustc_parse/lib.rs
index 8e2a9513d6b..0c817a71281 100644
--- a/src/librustc_parse/lib.rs
+++ b/src/librustc_parse/lib.rs
@@ -7,18 +7,22 @@
 #![feature(or_patterns)]
 
 use rustc_ast::ast;
-use rustc_ast::token::{self, Nonterminal};
-use rustc_ast::tokenstream::{self, TokenStream, TokenTree};
+use rustc_ast::token::{self, DelimToken, Nonterminal, Token, TokenKind};
+use rustc_ast::tokenstream::{self, IsJoint, TokenStream, TokenTree};
 use rustc_ast_pretty::pprust;
 use rustc_data_structures::sync::Lrc;
 use rustc_errors::{Diagnostic, FatalError, Level, PResult};
 use rustc_session::parse::ParseSess;
-use rustc_span::{FileName, SourceFile, Span};
+use rustc_span::symbol::kw;
+use rustc_span::{FileName, SourceFile, Span, DUMMY_SP};
 
+use smallvec::SmallVec;
+
+use std::mem;
 use std::path::Path;
 use std::str;
 
-use log::info;
+use log::{debug, info};
 
 pub const MACRO_ARGUMENTS: Option<&'static str> = Some("macro arguments");
 
@@ -300,7 +304,7 @@ pub fn nt_to_tokenstream(nt: &Nonterminal, sess: &ParseSess, span: Span) -> Toke
     // modifications, including adding/removing typically non-semantic
     // tokens such as extra braces and commas, don't happen.
     if let Some(tokens) = tokens {
-        if tokens.probably_equal_for_proc_macro(&tokens_for_real) {
+        if tokenstream_probably_equal_for_proc_macro(&tokens, &tokens_for_real, sess) {
             return tokens;
         }
         info!(
@@ -373,3 +377,203 @@ fn prepend_attrs(
     builder.push(tokens.clone());
     Some(builder.build())
 }
+
+// See comments in `Nonterminal::to_tokenstream` for why we care about
+// *probably* equal here rather than actual equality
+//
+// This is otherwise the same as `eq_unspanned`, only recursing with a
+// different method.
+pub fn tokenstream_probably_equal_for_proc_macro(
+    first: &TokenStream,
+    other: &TokenStream,
+    sess: &ParseSess,
+) -> bool {
+    // When checking for `probably_eq`, we ignore certain tokens that aren't
+    // preserved in the AST. Because they are not preserved, the pretty
+    // printer arbitrarily adds or removes them when printing as token
+    // streams, making a comparison between a token stream generated from an
+    // AST and a token stream which was parsed into an AST more reliable.
+    fn semantic_tree(tree: &TokenTree) -> bool {
+        if let TokenTree::Token(token) = tree {
+            if let
+                // The pretty printer tends to add trailing commas to
+                // everything, and in particular, after struct fields.
+                | token::Comma
+                // The pretty printer emits `NoDelim` as whitespace.
+                | token::OpenDelim(DelimToken::NoDelim)
+                | token::CloseDelim(DelimToken::NoDelim)
+                // The pretty printer collapses many semicolons into one.
+                | token::Semi
+                // The pretty printer collapses whitespace arbitrarily and can
+                // introduce whitespace from `NoDelim`.
+                | token::Whitespace
+                // The pretty printer can turn `$crate` into `::crate_name`
+                | token::ModSep = token.kind {
+                return false;
+            }
+        }
+        true
+    }
+
+    // When comparing two `TokenStream`s, we ignore the `IsJoint` information.
+    //
+    // However, `rustc_parse::lexer::tokentrees::TokenStreamBuilder` will
+    // use `Token.glue` on adjacent tokens with the proper `IsJoint`.
+    // Since we are ignoreing `IsJoint`, a 'glued' token (e.g. `BinOp(Shr)`)
+    // and its 'split'/'unglued' compoenents (e.g. `Gt, Gt`) are equivalent
+    // when determining if two `TokenStream`s are 'probably equal'.
+    //
+    // Therefore, we use `break_two_token_op` to convert all tokens
+    // to the 'unglued' form (if it exists). This ensures that two
+    // `TokenStream`s which differ only in how their tokens are glued
+    // will be considered 'probably equal', which allows us to keep spans.
+    //
+    // This is important when the original `TokenStream` contained
+    // extra spaces (e.g. `f :: < Vec < _ > > ( ) ;'). These extra spaces
+    // will be omitted when we pretty-print, which can cause the original
+    // and reparsed `TokenStream`s to differ in the assignment of `IsJoint`,
+    // leading to some tokens being 'glued' together in one stream but not
+    // the other. See #68489 for more details.
+    fn break_tokens(tree: TokenTree) -> impl Iterator<Item = TokenTree> {
+        // In almost all cases, we should have either zero or one levels
+        // of 'unglueing'. However, in some unusual cases, we may need
+        // to iterate breaking tokens mutliple times. For example:
+        // '[BinOpEq(Shr)] => [Gt, Ge] -> [Gt, Gt, Eq]'
+        let mut token_trees: SmallVec<[_; 2]>;
+        if let TokenTree::Token(token) = &tree {
+            let mut out = SmallVec::<[_; 2]>::new();
+            out.push(token.clone());
+            // Iterate to fixpoint:
+            // * We start off with 'out' containing our initial token, and `temp` empty
+            // * If we are able to break any tokens in `out`, then `out` will have
+            //   at least one more element than 'temp', so we will try to break tokens
+            //   again.
+            // * If we cannot break any tokens in 'out', we are done
+            loop {
+                let mut temp = SmallVec::<[_; 2]>::new();
+                let mut changed = false;
+
+                for token in out.into_iter() {
+                    if let Some((first, second)) = token.kind.break_two_token_op() {
+                        temp.push(Token::new(first, DUMMY_SP));
+                        temp.push(Token::new(second, DUMMY_SP));
+                        changed = true;
+                    } else {
+                        temp.push(token);
+                    }
+                }
+                out = temp;
+                if !changed {
+                    break;
+                }
+            }
+            token_trees = out.into_iter().map(|t| TokenTree::Token(t)).collect();
+            if token_trees.len() != 1 {
+                debug!("break_tokens: broke {:?} to {:?}", tree, token_trees);
+            }
+        } else {
+            token_trees = SmallVec::new();
+            token_trees.push(tree);
+        }
+        token_trees.into_iter()
+    }
+
+    let expand_nt = |tree: TokenTree| {
+        if let TokenTree::Token(Token { kind: TokenKind::Interpolated(nt), span }) = &tree {
+            nt_to_tokenstream(nt, sess, *span).into_trees()
+        } else {
+            TokenStream::new(vec![(tree, IsJoint::NonJoint)]).into_trees()
+        }
+    };
+
+    // Break tokens after we expand any nonterminals, so that we break tokens
+    // that are produced as a result of nonterminal expansion.
+    let mut t1 = first.trees().filter(semantic_tree).flat_map(expand_nt).flat_map(break_tokens);
+    let mut t2 = other.trees().filter(semantic_tree).flat_map(expand_nt).flat_map(break_tokens);
+    for (t1, t2) in t1.by_ref().zip(t2.by_ref()) {
+        if !tokentree_probably_equal_for_proc_macro(&t1, &t2, sess) {
+            return false;
+        }
+    }
+    t1.next().is_none() && t2.next().is_none()
+}
+
+// See comments in `Nonterminal::to_tokenstream` for why we care about
+// *probably* equal here rather than actual equality
+crate fn token_probably_equal_for_proc_macro(first: &Token, other: &Token) -> bool {
+    use TokenKind::*;
+
+    if mem::discriminant(&first.kind) != mem::discriminant(&other.kind) {
+        return false;
+    }
+    match (&first.kind, &other.kind) {
+        (&Eq, &Eq)
+        | (&Lt, &Lt)
+        | (&Le, &Le)
+        | (&EqEq, &EqEq)
+        | (&Ne, &Ne)
+        | (&Ge, &Ge)
+        | (&Gt, &Gt)
+        | (&AndAnd, &AndAnd)
+        | (&OrOr, &OrOr)
+        | (&Not, &Not)
+        | (&Tilde, &Tilde)
+        | (&At, &At)
+        | (&Dot, &Dot)
+        | (&DotDot, &DotDot)
+        | (&DotDotDot, &DotDotDot)
+        | (&DotDotEq, &DotDotEq)
+        | (&Comma, &Comma)
+        | (&Semi, &Semi)
+        | (&Colon, &Colon)
+        | (&ModSep, &ModSep)
+        | (&RArrow, &RArrow)
+        | (&LArrow, &LArrow)
+        | (&FatArrow, &FatArrow)
+        | (&Pound, &Pound)
+        | (&Dollar, &Dollar)
+        | (&Question, &Question)
+        | (&Whitespace, &Whitespace)
+        | (&Comment, &Comment)
+        | (&Eof, &Eof) => true,
+
+        (&BinOp(a), &BinOp(b)) | (&BinOpEq(a), &BinOpEq(b)) => a == b,
+
+        (&OpenDelim(a), &OpenDelim(b)) | (&CloseDelim(a), &CloseDelim(b)) => a == b,
+
+        (&DocComment(a), &DocComment(b)) | (&Shebang(a), &Shebang(b)) => a == b,
+
+        (&Literal(a), &Literal(b)) => a == b,
+
+        (&Lifetime(a), &Lifetime(b)) => a == b,
+        (&Ident(a, b), &Ident(c, d)) => {
+            b == d && (a == c || a == kw::DollarCrate || c == kw::DollarCrate)
+        }
+
+        // Expanded by `tokenstream_probably_equal_for_proc_macro`
+        (&Interpolated(_), &Interpolated(_)) => unreachable!(),
+
+        _ => panic!("forgot to add a token?"),
+    }
+}
+
+// See comments in `Nonterminal::to_tokenstream` for why we care about
+// *probably* equal here rather than actual equality
+//
+// This is otherwise the same as `eq_unspanned`, only recursing with a
+// different method.
+pub fn tokentree_probably_equal_for_proc_macro(
+    first: &TokenTree,
+    other: &TokenTree,
+    sess: &ParseSess,
+) -> bool {
+    match (first, other) {
+        (TokenTree::Token(token), TokenTree::Token(token2)) => {
+            token_probably_equal_for_proc_macro(token, token2)
+        }
+        (TokenTree::Delimited(_, delim, tts), TokenTree::Delimited(_, delim2, tts2)) => {
+            delim == delim2 && tokenstream_probably_equal_for_proc_macro(&tts, &tts2, sess)
+        }
+        _ => false,
+    }
+}
diff --git a/src/librustc_trait_selection/traits/select.rs b/src/librustc_trait_selection/traits/select.rs
index b402aba65cd..9b3381066a1 100644
--- a/src/librustc_trait_selection/traits/select.rs
+++ b/src/librustc_trait_selection/traits/select.rs
@@ -1058,20 +1058,11 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
                 // Heuristics: show the diagnostics when there are no candidates in crate.
                 if let Ok(candidate_set) = self.assemble_candidates(stack) {
                     let mut no_candidates_apply = true;
-                    {
-                        let evaluated_candidates =
-                            candidate_set.vec.iter().map(|c| self.evaluate_candidate(stack, &c));
-
-                        for ec in evaluated_candidates {
-                            match ec {
-                                Ok(c) => {
-                                    if c.may_apply() {
-                                        no_candidates_apply = false;
-                                        break;
-                                    }
-                                }
-                                Err(e) => return Err(e.into()),
-                            }
+
+                    for c in candidate_set.vec.iter() {
+                        if self.evaluate_candidate(stack, &c)?.may_apply() {
+                            no_candidates_apply = false;
+                            break;
                         }
                     }
 
@@ -3182,11 +3173,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
                 assert_eq!(tys_a.len(), tys_b.len());
 
                 // The last field of the tuple has to exist.
-                let (&a_last, a_mid) = if let Some(x) = tys_a.split_last() {
-                    x
-                } else {
-                    return Err(Unimplemented);
-                };
+                let (&a_last, a_mid) = tys_a.split_last().ok_or(Unimplemented)?;
                 let &b_last = tys_b.last().unwrap();
 
                 // Check that the source tuple with the target's
diff --git a/src/test/ui/proc-macro/macro-rules-capture.rs b/src/test/ui/proc-macro/macro-rules-capture.rs
new file mode 100644
index 00000000000..37436567d70
--- /dev/null
+++ b/src/test/ui/proc-macro/macro-rules-capture.rs
@@ -0,0 +1,18 @@
+// aux-build: test-macros.rs
+
+extern crate test_macros;
+use test_macros::recollect_attr;
+
+macro_rules! reemit {
+    ($name:ident => $($token:expr)*) => {
+
+        #[recollect_attr]
+        pub fn $name() {
+            $($token)*;
+        }
+    }
+}
+
+reemit! { foo => 45u32.into() } //~ ERROR type annotations
+
+fn main() {}
diff --git a/src/test/ui/proc-macro/macro-rules-capture.stderr b/src/test/ui/proc-macro/macro-rules-capture.stderr
new file mode 100644
index 00000000000..6d512846ff7
--- /dev/null
+++ b/src/test/ui/proc-macro/macro-rules-capture.stderr
@@ -0,0 +1,12 @@
+error[E0282]: type annotations needed
+  --> $DIR/macro-rules-capture.rs:16:24
+   |
+LL | reemit! { foo => 45u32.into() }
+   |                  ------^^^^--
+   |                  |     |
+   |                  |     cannot infer type for type parameter `T` declared on the trait `Into`
+   |                  this method call resolves to `T`
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0282`.