about summary refs log tree commit diff
path: root/src/libsyntax/parse
diff options
context:
space:
mode:
Diffstat (limited to 'src/libsyntax/parse')
-rw-r--r--src/libsyntax/parse/comments.rs4
-rw-r--r--src/libsyntax/parse/lexer.rs126
-rw-r--r--src/libsyntax/parse/mod.rs66
-rw-r--r--src/libsyntax/parse/parser.rs2
4 files changed, 94 insertions, 104 deletions
diff --git a/src/libsyntax/parse/comments.rs b/src/libsyntax/parse/comments.rs
index c2a2097de24..ed74fd416d1 100644
--- a/src/libsyntax/parse/comments.rs
+++ b/src/libsyntax/parse/comments.rs
@@ -346,10 +346,10 @@ pub struct Literal {
 // it appears this function is called only from pprust... that's
 // probably not a good thing.
 pub fn gather_comments_and_literals(span_diagnostic:
-                                        @diagnostic::SpanHandler,
+                                        &diagnostic::SpanHandler,
                                     path: ~str,
                                     srdr: &mut io::Reader)
-                                 -> (Vec<Comment> , Vec<Literal> ) {
+                                 -> (Vec<Comment>, Vec<Literal>) {
     let src = srdr.read_to_end().unwrap();
     let src = str::from_utf8_owned(src).unwrap();
     let cm = CodeMap::new();
diff --git a/src/libsyntax/parse/lexer.rs b/src/libsyntax/parse/lexer.rs
index 884fc306f22..43e1f8756fa 100644
--- a/src/libsyntax/parse/lexer.rs
+++ b/src/libsyntax/parse/lexer.rs
@@ -18,6 +18,7 @@ use parse::token::{str_to_ident};
 
 use std::cell::{Cell, RefCell};
 use std::char;
+use std::rc::Rc;
 use std::mem::replace;
 use std::num::from_str_radix;
 
@@ -27,7 +28,7 @@ pub trait Reader {
     fn is_eof(&self) -> bool;
     fn next_token(&self) -> TokenAndSpan;
     fn fatal(&self, ~str) -> !;
-    fn span_diag(&self) -> @SpanHandler;
+    fn span_diag<'a>(&'a self) -> &'a SpanHandler;
     fn peek(&self) -> TokenAndSpan;
     fn dup(&self) -> ~Reader:;
 }
@@ -38,8 +39,8 @@ pub struct TokenAndSpan {
     sp: Span,
 }
 
-pub struct StringReader {
-    span_diagnostic: @SpanHandler,
+pub struct StringReader<'a> {
+    span_diagnostic: &'a SpanHandler,
     // The absolute offset within the codemap of the next character to read
     pos: Cell<BytePos>,
     // The absolute offset within the codemap of the last character read(curr)
@@ -48,36 +49,36 @@ pub struct StringReader {
     col: Cell<CharPos>,
     // The last character to be read
     curr: Cell<Option<char>>,
-    filemap: @codemap::FileMap,
+    filemap: Rc<codemap::FileMap>,
     /* cached: */
     peek_tok: RefCell<token::Token>,
     peek_span: RefCell<Span>,
 }
 
-impl StringReader {
+impl<'a> StringReader<'a> {
     pub fn curr_is(&self, c: char) -> bool {
         self.curr.get() == Some(c)
     }
 }
 
-pub fn new_string_reader(span_diagnostic: @SpanHandler,
-                         filemap: @codemap::FileMap)
-                      -> StringReader {
+pub fn new_string_reader<'a>(span_diagnostic: &'a SpanHandler,
+                             filemap: Rc<codemap::FileMap>)
+                             -> StringReader<'a> {
     let r = new_low_level_string_reader(span_diagnostic, filemap);
     string_advance_token(&r); /* fill in peek_* */
     r
 }
 
 /* For comments.rs, which hackily pokes into 'pos' and 'curr' */
-pub fn new_low_level_string_reader(span_diagnostic: @SpanHandler,
-                                   filemap: @codemap::FileMap)
-                                -> StringReader {
+pub fn new_low_level_string_reader<'a>(span_diagnostic: &'a SpanHandler,
+                                       filemap: Rc<codemap::FileMap>)
+                                       -> StringReader<'a> {
     // Force the initial reader bump to start on a fresh line
     let initial_char = '\n';
     let r = StringReader {
         span_diagnostic: span_diagnostic,
-        pos: Cell::new(filemap.start_pos),
-        last_pos: Cell::new(filemap.start_pos),
+        pos: Cell::new(filemap.deref().start_pos),
+        last_pos: Cell::new(filemap.deref().start_pos),
         col: Cell::new(CharPos(0)),
         curr: Cell::new(Some(initial_char)),
         filemap: filemap,
@@ -92,20 +93,20 @@ pub fn new_low_level_string_reader(span_diagnostic: @SpanHandler,
 // duplicating the string reader is probably a bad idea, in
 // that using them will cause interleaved pushes of line
 // offsets to the underlying filemap...
-fn dup_string_reader(r: &StringReader) -> StringReader {
+fn dup_string_reader<'a>(r: &StringReader<'a>) -> StringReader<'a> {
     StringReader {
         span_diagnostic: r.span_diagnostic,
         pos: Cell::new(r.pos.get()),
         last_pos: Cell::new(r.last_pos.get()),
         col: Cell::new(r.col.get()),
         curr: Cell::new(r.curr.get()),
-        filemap: r.filemap,
+        filemap: r.filemap.clone(),
         peek_tok: r.peek_tok.clone(),
         peek_span: r.peek_span.clone(),
     }
 }
 
-impl Reader for StringReader {
+impl<'a> Reader for StringReader<'a> {
     fn is_eof(&self) -> bool { is_eof(self) }
     // return the next token. EFFECT: advances the string_reader.
     fn next_token(&self) -> TokenAndSpan {
@@ -122,7 +123,7 @@ impl Reader for StringReader {
     fn fatal(&self, m: ~str) -> ! {
         self.span_diagnostic.span_fatal(self.peek_span.get(), m)
     }
-    fn span_diag(&self) -> @SpanHandler { self.span_diagnostic }
+    fn span_diag<'a>(&'a self) -> &'a SpanHandler { self.span_diagnostic }
     fn peek(&self) -> TokenAndSpan {
         // FIXME(pcwalton): Bad copy!
         TokenAndSpan {
@@ -133,7 +134,7 @@ impl Reader for StringReader {
     fn dup(&self) -> ~Reader: { ~dup_string_reader(self) as ~Reader: }
 }
 
-impl Reader for TtReader {
+impl<'a> Reader for TtReader<'a> {
     fn is_eof(&self) -> bool {
         let cur_tok = self.cur_tok.borrow();
         *cur_tok.get() == token::EOF
@@ -146,7 +147,7 @@ impl Reader for TtReader {
     fn fatal(&self, m: ~str) -> ! {
         self.sp_diag.span_fatal(self.cur_span.get(), m);
     }
-    fn span_diag(&self) -> @SpanHandler { self.sp_diag }
+    fn span_diag<'a>(&'a self) -> &'a SpanHandler { self.sp_diag }
     fn peek(&self) -> TokenAndSpan {
         TokenAndSpan {
             tok: self.cur_tok.get(),
@@ -189,7 +190,7 @@ fn fatal_span_verbose(rdr: &StringReader,
                    -> ! {
     let mut m = m;
     m.push_str(": ");
-    let s = rdr.filemap.src.slice(
+    let s = rdr.filemap.deref().src.slice(
                   byte_offset(rdr, from_pos).to_uint(),
                   byte_offset(rdr, to_pos).to_uint());
     m.push_str(s);
@@ -218,7 +219,7 @@ fn string_advance_token(r: &StringReader) {
 }
 
 fn byte_offset(rdr: &StringReader, pos: BytePos) -> BytePos {
-    (pos - rdr.filemap.start_pos)
+    (pos - rdr.filemap.deref().start_pos)
 }
 
 /// Calls `f` with a string slice of the source text spanning from `start`
@@ -240,7 +241,7 @@ fn with_str_from_to<T>(
                     end: BytePos,
                     f: |s: &str| -> T)
                     -> T {
-    f(rdr.filemap.src.slice(
+    f(rdr.filemap.deref().src.slice(
             byte_offset(rdr, start).to_uint(),
             byte_offset(rdr, end).to_uint()))
 }
@@ -250,21 +251,21 @@ fn with_str_from_to<T>(
 pub fn bump(rdr: &StringReader) {
     rdr.last_pos.set(rdr.pos.get());
     let current_byte_offset = byte_offset(rdr, rdr.pos.get()).to_uint();
-    if current_byte_offset < (rdr.filemap.src).len() {
+    if current_byte_offset < rdr.filemap.deref().src.len() {
         assert!(rdr.curr.get().is_some());
         let last_char = rdr.curr.get().unwrap();
-        let next = rdr.filemap.src.char_range_at(current_byte_offset);
+        let next = rdr.filemap.deref().src.char_range_at(current_byte_offset);
         let byte_offset_diff = next.next - current_byte_offset;
         rdr.pos.set(rdr.pos.get() + Pos::from_uint(byte_offset_diff));
         rdr.curr.set(Some(next.ch));
         rdr.col.set(rdr.col.get() + CharPos(1u));
         if last_char == '\n' {
-            rdr.filemap.next_line(rdr.last_pos.get());
+            rdr.filemap.deref().next_line(rdr.last_pos.get());
             rdr.col.set(CharPos(0u));
         }
 
         if byte_offset_diff > 1 {
-            rdr.filemap.record_multibyte_char(rdr.last_pos.get(), byte_offset_diff);
+            rdr.filemap.deref().record_multibyte_char(rdr.last_pos.get(), byte_offset_diff);
         }
     } else {
         rdr.curr.set(None);
@@ -275,8 +276,8 @@ pub fn is_eof(rdr: &StringReader) -> bool {
 }
 pub fn nextch(rdr: &StringReader) -> Option<char> {
     let offset = byte_offset(rdr, rdr.pos.get()).to_uint();
-    if offset < (rdr.filemap.src).len() {
-        Some(rdr.filemap.src.char_at(offset))
+    if offset < rdr.filemap.deref().src.len() {
+        Some(rdr.filemap.deref().src.char_at(offset))
     } else {
         None
     }
@@ -334,56 +335,55 @@ fn consume_any_line_comment(rdr: &StringReader)
                          -> Option<TokenAndSpan> {
     if rdr.curr_is('/') {
         match nextch(rdr) {
-          Some('/') => {
-            bump(rdr);
-            bump(rdr);
-            // line comments starting with "///" or "//!" are doc-comments
-            if rdr.curr_is('/') || rdr.curr_is('!') {
-                let start_bpos = rdr.pos.get() - BytePos(3);
-                while !rdr.curr_is('\n') && !is_eof(rdr) {
-                    bump(rdr);
-                }
-                let ret = with_str_from(rdr, start_bpos, |string| {
-                    // but comments with only more "/"s are not
-                    if !is_line_non_doc_comment(string) {
-                        Some(TokenAndSpan{
-                            tok: token::DOC_COMMENT(str_to_ident(string)),
-                            sp: codemap::mk_sp(start_bpos, rdr.pos.get())
-                        })
-                    } else {
-                        None
+            Some('/') => {
+                bump(rdr);
+                bump(rdr);
+                // line comments starting with "///" or "//!" are doc-comments
+                if rdr.curr_is('/') || rdr.curr_is('!') {
+                    let start_bpos = rdr.pos.get() - BytePos(3);
+                    while !rdr.curr_is('\n') && !is_eof(rdr) {
+                        bump(rdr);
                     }
-                });
+                    let ret = with_str_from(rdr, start_bpos, |string| {
+                        // but comments with only more "/"s are not
+                        if !is_line_non_doc_comment(string) {
+                            Some(TokenAndSpan{
+                                tok: token::DOC_COMMENT(str_to_ident(string)),
+                                sp: codemap::mk_sp(start_bpos, rdr.pos.get())
+                            })
+                        } else {
+                            None
+                        }
+                    });
 
-                if ret.is_some() {
-                    return ret;
+                    if ret.is_some() {
+                        return ret;
+                    }
+                } else {
+                    while !rdr.curr_is('\n') && !is_eof(rdr) { bump(rdr); }
                 }
-            } else {
-                while !rdr.curr_is('\n') && !is_eof(rdr) { bump(rdr); }
+                // Restart whitespace munch.
+                consume_whitespace_and_comments(rdr)
             }
-            // Restart whitespace munch.
-            return consume_whitespace_and_comments(rdr);
-          }
-          Some('*') => { bump(rdr); bump(rdr); return consume_block_comment(rdr); }
-          _ => ()
+            Some('*') => { bump(rdr); bump(rdr); consume_block_comment(rdr) }
+            _ => None
         }
     } else if rdr.curr_is('#') {
         if nextch_is(rdr, '!') {
             // I guess this is the only way to figure out if
             // we're at the beginning of the file...
-            let cmap = @CodeMap::new();
-            {
-                let mut files = cmap.files.borrow_mut();
-                files.get().push(rdr.filemap);
-            }
+            let cmap = CodeMap::new();
+            cmap.files.borrow_mut().get().push(rdr.filemap.clone());
             let loc = cmap.lookup_char_pos_adj(rdr.last_pos.get());
             if loc.line == 1u && loc.col == CharPos(0u) {
                 while !rdr.curr_is('\n') && !is_eof(rdr) { bump(rdr); }
                 return consume_whitespace_and_comments(rdr);
             }
         }
+        None
+    } else {
+        None
     }
-    return None;
 }
 
 pub fn is_block_non_doc_comment(s: &str) -> bool {
@@ -1019,7 +1019,7 @@ mod test {
         let writer = ~util::NullWriter;
         let emitter = diagnostic::EmitterWriter::new(writer);
         let handler = diagnostic::mk_handler(~emitter);
-        let span_handler = diagnostic::mk_span_handler(handler, @cm);
+        let span_handler = diagnostic::mk_span_handler(handler, cm);
         Env {
             string_reader: new_string_reader(span_handler,fm)
         }
diff --git a/src/libsyntax/parse/mod.rs b/src/libsyntax/parse/mod.rs
index 19291f72101..79fedf82798 100644
--- a/src/libsyntax/parse/mod.rs
+++ b/src/libsyntax/parse/mod.rs
@@ -13,13 +13,13 @@
 
 use ast;
 use codemap::{Span, CodeMap, FileMap};
-use codemap;
 use diagnostic::{SpanHandler, mk_span_handler, default_handler};
 use parse::attr::ParserAttr;
 use parse::parser::Parser;
 
 use std::cell::RefCell;
 use std::io::File;
+use std::rc::Rc;
 use std::str;
 use std::vec_ng::Vec;
 
@@ -40,26 +40,20 @@ pub mod obsolete;
 
 // info about a parsing session.
 pub struct ParseSess {
-    cm: @codemap::CodeMap, // better be the same as the one in the reader!
-    span_diagnostic: @SpanHandler, // better be the same as the one in the reader!
+    span_diagnostic: SpanHandler, // better be the same as the one in the reader!
     /// Used to determine and report recursive mod inclusions
-    included_mod_stack: RefCell<Vec<Path> >,
+    included_mod_stack: RefCell<Vec<Path>>,
 }
 
 pub fn new_parse_sess() -> ParseSess {
-    let cm = @CodeMap::new();
     ParseSess {
-        cm: cm,
-        span_diagnostic: mk_span_handler(default_handler(), cm),
+        span_diagnostic: mk_span_handler(default_handler(), CodeMap::new()),
         included_mod_stack: RefCell::new(Vec::new()),
     }
 }
 
-pub fn new_parse_sess_special_handler(sh: @SpanHandler,
-                                      cm: @codemap::CodeMap)
-                                      -> ParseSess {
+pub fn new_parse_sess_special_handler(sh: SpanHandler) -> ParseSess {
     ParseSess {
-        cm: cm,
         span_diagnostic: sh,
         included_mod_stack: RefCell::new(Vec::new()),
     }
@@ -175,40 +169,36 @@ pub fn parse_tts_from_source_str(name: ~str,
 
 // Create a new parser from a source string
 pub fn new_parser_from_source_str<'a>(sess: &'a ParseSess,
-                                     cfg: ast::CrateConfig,
-                                     name: ~str,
-                                     source: ~str)
-                                     -> Parser<'a> {
-    filemap_to_parser(sess,string_to_filemap(sess,source,name),cfg)
+                                      cfg: ast::CrateConfig,
+                                      name: ~str,
+                                      source: ~str)
+                                      -> Parser<'a> {
+    filemap_to_parser(sess, string_to_filemap(sess, source, name), cfg)
 }
 
 /// Create a new parser, handling errors as appropriate
 /// if the file doesn't exist
-pub fn new_parser_from_file<'a>(
-    sess: &'a ParseSess,
-    cfg: ast::CrateConfig,
-    path: &Path
-) -> Parser<'a> {
-    filemap_to_parser(sess,file_to_filemap(sess,path,None),cfg)
+pub fn new_parser_from_file<'a>(sess: &'a ParseSess,
+                                cfg: ast::CrateConfig,
+                                path: &Path) -> Parser<'a> {
+    filemap_to_parser(sess, file_to_filemap(sess, path, None), cfg)
 }
 
 /// Given a session, a crate config, a path, and a span, add
 /// the file at the given path to the codemap, and return a parser.
 /// On an error, use the given span as the source of the problem.
-pub fn new_sub_parser_from_file<'a>(
-    sess: &'a ParseSess,
-    cfg: ast::CrateConfig,
-    path: &Path,
-    sp: Span
-) -> Parser<'a> {
-    filemap_to_parser(sess,file_to_filemap(sess,path,Some(sp)),cfg)
+pub fn new_sub_parser_from_file<'a>(sess: &'a ParseSess,
+                                    cfg: ast::CrateConfig,
+                                    path: &Path,
+                                    sp: Span) -> Parser<'a> {
+    filemap_to_parser(sess, file_to_filemap(sess, path, Some(sp)), cfg)
 }
 
 /// Given a filemap and config, return a parser
 pub fn filemap_to_parser<'a>(sess: &'a ParseSess,
-                             filemap: @FileMap,
+                             filemap: Rc<FileMap>,
                              cfg: ast::CrateConfig) -> Parser<'a> {
-    tts_to_parser(sess,filemap_to_tts(sess,filemap),cfg)
+    tts_to_parser(sess, filemap_to_tts(sess, filemap), cfg)
 }
 
 // must preserve old name for now, because quote! from the *existing*
@@ -216,7 +206,7 @@ pub fn filemap_to_parser<'a>(sess: &'a ParseSess,
 pub fn new_parser_from_tts<'a>(sess: &'a ParseSess,
                                cfg: ast::CrateConfig,
                                tts: Vec<ast::TokenTree>) -> Parser<'a> {
-    tts_to_parser(sess,tts,cfg)
+    tts_to_parser(sess, tts, cfg)
 }
 
 
@@ -225,7 +215,7 @@ pub fn new_parser_from_tts<'a>(sess: &'a ParseSess,
 /// Given a session and a path and an optional span (for error reporting),
 /// add the path to the session's codemap and return the new filemap.
 pub fn file_to_filemap(sess: &ParseSess, path: &Path, spanopt: Option<Span>)
-    -> @FileMap {
+    -> Rc<FileMap> {
     let err = |msg: &str| {
         match spanopt {
             Some(sp) => sess.span_diagnostic.span_fatal(sp, msg),
@@ -251,17 +241,17 @@ pub fn file_to_filemap(sess: &ParseSess, path: &Path, spanopt: Option<Span>)
 // given a session and a string, add the string to
 // the session's codemap and return the new filemap
 pub fn string_to_filemap(sess: &ParseSess, source: ~str, path: ~str)
-                         -> @FileMap {
-    sess.cm.new_filemap(path, source)
+                         -> Rc<FileMap> {
+    sess.span_diagnostic.cm.new_filemap(path, source)
 }
 
 // given a filemap, produce a sequence of token-trees
-pub fn filemap_to_tts(sess: &ParseSess, filemap: @FileMap)
+pub fn filemap_to_tts(sess: &ParseSess, filemap: Rc<FileMap>)
     -> Vec<ast::TokenTree> {
     // it appears to me that the cfg doesn't matter here... indeed,
     // parsing tt's probably shouldn't require a parser at all.
     let cfg = Vec::new();
-    let srdr = lexer::new_string_reader(sess.span_diagnostic, filemap);
+    let srdr = lexer::new_string_reader(&sess.span_diagnostic, filemap);
     let mut p1 = Parser(sess, cfg, ~srdr);
     p1.parse_all_token_trees()
 }
@@ -270,7 +260,7 @@ pub fn filemap_to_tts(sess: &ParseSess, filemap: @FileMap)
 pub fn tts_to_parser<'a>(sess: &'a ParseSess,
                          tts: Vec<ast::TokenTree>,
                          cfg: ast::CrateConfig) -> Parser<'a> {
-    let trdr = lexer::new_tt_reader(sess.span_diagnostic, None, tts);
+    let trdr = lexer::new_tt_reader(&sess.span_diagnostic, None, tts);
     Parser(sess, cfg, ~trdr)
 }
 
diff --git a/src/libsyntax/parse/parser.rs b/src/libsyntax/parse/parser.rs
index d183eb44cc2..27c86956499 100644
--- a/src/libsyntax/parse/parser.rs
+++ b/src/libsyntax/parse/parser.rs
@@ -4150,7 +4150,7 @@ impl<'a> Parser<'a> {
                     outer_attrs: &[ast::Attribute],
                     id_sp: Span)
                     -> (ast::Item_, Vec<ast::Attribute> ) {
-        let mut prefix = Path::new(self.sess.cm.span_to_filename(self.span));
+        let mut prefix = Path::new(self.sess.span_diagnostic.cm.span_to_filename(self.span));
         prefix.pop();
         let mod_path = Path::new(".").join_many(self.mod_path_stack.as_slice());
         let dir_path = prefix.join(&mod_path);