about summary refs log tree commit diff
path: root/src/libsyntax/parse
diff options
context:
space:
mode:
authorEduard Burtescu <edy.burt@gmail.com>2014-03-17 09:55:41 +0200
committerEduard Burtescu <edy.burt@gmail.com>2014-03-17 09:55:41 +0200
commite2ebc8f81138bcad019f43a3af0cddb0dc0dcfbc (patch)
tree1d780574526f91999a3a73bebe5112c2fb8ef136 /src/libsyntax/parse
parente02aa722aace1112ba2a7927ef76abe79ba9dae6 (diff)
downloadrust-e2ebc8f81138bcad019f43a3af0cddb0dc0dcfbc.tar.gz
rust-e2ebc8f81138bcad019f43a3af0cddb0dc0dcfbc.zip
Fix rustdoc and tests.
Diffstat (limited to 'src/libsyntax/parse')
-rw-r--r--src/libsyntax/parse/lexer.rs84
-rw-r--r--src/libsyntax/parse/mod.rs3
2 files changed, 32 insertions, 55 deletions
diff --git a/src/libsyntax/parse/lexer.rs b/src/libsyntax/parse/lexer.rs
index 43e1f8756fa..546aefc1297 100644
--- a/src/libsyntax/parse/lexer.rs
+++ b/src/libsyntax/parse/lexer.rs
@@ -1007,28 +1007,24 @@ mod test {
     use std::io::util;
     use std::vec_ng::Vec;
 
-    // represents a testing reader (incl. both reader and interner)
-    struct Env {
-        string_reader: StringReader
+    fn mk_sh() -> diagnostic::SpanHandler {
+        let emitter = diagnostic::EmitterWriter::new(~util::NullWriter);
+        let handler = diagnostic::mk_handler(~emitter);
+        diagnostic::mk_span_handler(handler, CodeMap::new())
     }
 
     // open a string reader for the given string
-    fn setup(teststr: ~str) -> Env {
-        let cm = CodeMap::new();
-        let fm = cm.new_filemap(~"zebra.rs", teststr);
-        let writer = ~util::NullWriter;
-        let emitter = diagnostic::EmitterWriter::new(writer);
-        let handler = diagnostic::mk_handler(~emitter);
-        let span_handler = diagnostic::mk_span_handler(handler, cm);
-        Env {
-            string_reader: new_string_reader(span_handler,fm)
-        }
+    fn setup<'a>(span_handler: &'a diagnostic::SpanHandler,
+                 teststr: ~str) -> StringReader<'a> {
+        let fm = span_handler.cm.new_filemap(~"zebra.rs", teststr);
+        new_string_reader(span_handler, fm)
     }
 
     #[test] fn t1 () {
-        let Env {string_reader} =
-            setup(~"/* my source file */ \
-                    fn main() { println!(\"zebra\"); }\n");
+        let span_handler = mk_sh();
+        let string_reader = setup(&span_handler,
+            ~"/* my source file */ \
+              fn main() { println!(\"zebra\"); }\n");
         let id = str_to_ident("fn");
         let tok1 = string_reader.next_token();
         let tok2 = TokenAndSpan{
@@ -1049,11 +1045,9 @@ mod test {
 
     // check that the given reader produces the desired stream
     // of tokens (stop checking after exhausting the expected vec)
-    fn check_tokenization (env: Env, expected: Vec<token::Token> ) {
+    fn check_tokenization (string_reader: StringReader, expected: Vec<token::Token> ) {
         for expected_tok in expected.iter() {
-            let TokenAndSpan {tok:actual_tok, sp: _} =
-                env.string_reader.next_token();
-            assert_eq!(&actual_tok,expected_tok);
+            assert_eq!(&string_reader.next_token().tok, expected_tok);
         }
     }
 
@@ -1063,71 +1057,55 @@ mod test {
     }
 
     #[test] fn doublecolonparsing () {
-        let env = setup (~"a b");
-        check_tokenization (env,
+        check_tokenization(setup(&mk_sh(), ~"a b"),
                            vec!(mk_ident("a",false),
                              mk_ident("b",false)));
     }
 
     #[test] fn dcparsing_2 () {
-        let env = setup (~"a::b");
-        check_tokenization (env,
+        check_tokenization(setup(&mk_sh(), ~"a::b"),
                            vec!(mk_ident("a",true),
                              token::MOD_SEP,
                              mk_ident("b",false)));
     }
 
     #[test] fn dcparsing_3 () {
-        let env = setup (~"a ::b");
-        check_tokenization (env,
+        check_tokenization(setup(&mk_sh(), ~"a ::b"),
                            vec!(mk_ident("a",false),
                              token::MOD_SEP,
                              mk_ident("b",false)));
     }
 
     #[test] fn dcparsing_4 () {
-        let env = setup (~"a:: b");
-        check_tokenization (env,
+        check_tokenization(setup(&mk_sh(), ~"a:: b"),
                            vec!(mk_ident("a",true),
                              token::MOD_SEP,
                              mk_ident("b",false)));
     }
 
     #[test] fn character_a() {
-        let env = setup(~"'a'");
-        let TokenAndSpan {tok, sp: _} =
-            env.string_reader.next_token();
-        assert_eq!(tok,token::LIT_CHAR('a' as u32));
+        assert_eq!(setup(&mk_sh(), ~"'a'").next_token().tok,
+                   token::LIT_CHAR('a' as u32));
     }
 
     #[test] fn character_space() {
-        let env = setup(~"' '");
-        let TokenAndSpan {tok, sp: _} =
-            env.string_reader.next_token();
-        assert_eq!(tok, token::LIT_CHAR(' ' as u32));
+        assert_eq!(setup(&mk_sh(), ~"' '").next_token().tok,
+                   token::LIT_CHAR(' ' as u32));
     }
 
     #[test] fn character_escaped() {
-        let env = setup(~"'\\n'");
-        let TokenAndSpan {tok, sp: _} =
-            env.string_reader.next_token();
-        assert_eq!(tok, token::LIT_CHAR('\n' as u32));
+        assert_eq!(setup(&mk_sh(), ~"'\\n'").next_token().tok,
+                   token::LIT_CHAR('\n' as u32));
     }
 
     #[test] fn lifetime_name() {
-        let env = setup(~"'abc");
-        let TokenAndSpan {tok, sp: _} =
-            env.string_reader.next_token();
-        let id = token::str_to_ident("abc");
-        assert_eq!(tok, token::LIFETIME(id));
+        assert_eq!(setup(&mk_sh(), ~"'abc").next_token().tok,
+                   token::LIFETIME(token::str_to_ident("abc")));
     }
 
     #[test] fn raw_string() {
-        let env = setup(~"r###\"\"#a\\b\x00c\"\"###");
-        let TokenAndSpan {tok, sp: _} =
-            env.string_reader.next_token();
-        let id = token::str_to_ident("\"#a\\b\x00c\"");
-        assert_eq!(tok, token::LIT_STR_RAW(id, 3));
+        assert_eq!(setup(&mk_sh(), ~"r###\"\"#a\\b\x00c\"\"###").next_token().tok,
+                   token::LIT_STR_RAW(token::str_to_ident("\"#a\\b\x00c\""), 3));
     }
 
     #[test] fn line_doc_comments() {
@@ -1137,10 +1115,8 @@ mod test {
     }
 
     #[test] fn nested_block_comments() {
-        let env = setup(~"/* /* */ */'a'");
-        let TokenAndSpan {tok, sp: _} =
-            env.string_reader.next_token();
-        assert_eq!(tok,token::LIT_CHAR('a' as u32));
+        assert_eq!(setup(&mk_sh(), ~"/* /* */ */'a'").next_token().tok,
+                   token::LIT_CHAR('a' as u32));
     }
 
 }
diff --git a/src/libsyntax/parse/mod.rs b/src/libsyntax/parse/mod.rs
index 79fedf82798..062bc100863 100644
--- a/src/libsyntax/parse/mod.rs
+++ b/src/libsyntax/parse/mod.rs
@@ -584,7 +584,8 @@ mod test {
     }
 
     #[test] fn parse_ident_pat () {
-        let mut parser = string_to_parser(&new_parse_sess(), ~"b");
+        let sess = new_parse_sess();
+        let mut parser = string_to_parser(&sess, ~"b");
         assert!(parser.parse_pat() ==
                    @ast::Pat{id: ast::DUMMY_NODE_ID,
                              node: ast::PatIdent(