about summary refs log tree commit diff
path: root/src/libsyntax/parse
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2014-01-03 22:36:53 -0800
committerbors <bors@rust-lang.org>2014-01-03 22:36:53 -0800
commit3dd7c49faf5ae3a9158ab242a264c0f0eb99f657 (patch)
tree0393c0b2e10c7579d86c222071bb9c64b0451b60 /src/libsyntax/parse
parent0ff6c12ce94993dae702d597a213eee6b969231a (diff)
parent80921536343e87d2f7d7f19ad90d63f50b557e06 (diff)
downloadrust-3dd7c49faf5ae3a9158ab242a264c0f0eb99f657.tar.gz
rust-3dd7c49faf5ae3a9158ab242a264c0f0eb99f657.zip
auto merge of #11251 : pcwalton/rust/remove-at-mut, r=pcwalton
r? @nikomatsakis 

for the borrow checker changes. Write guards are now eliminated.
Diffstat (limited to 'src/libsyntax/parse')
-rw-r--r--src/libsyntax/parse/comments.rs85
-rw-r--r--src/libsyntax/parse/lexer.rs373
-rw-r--r--src/libsyntax/parse/mod.rs65
-rw-r--r--src/libsyntax/parse/parser.rs73
4 files changed, 313 insertions, 283 deletions
diff --git a/src/libsyntax/parse/comments.rs b/src/libsyntax/parse/comments.rs
index b1390253d19..e0ab7f1535d 100644
--- a/src/libsyntax/parse/comments.rs
+++ b/src/libsyntax/parse/comments.rs
@@ -135,39 +135,44 @@ pub fn strip_doc_comment_decoration(comment: &str) -> ~str {
     fail!("not a doc-comment: {}", comment);
 }
 
-fn read_to_eol(rdr: @mut StringReader) -> ~str {
+fn read_to_eol(rdr: @StringReader) -> ~str {
     let mut val = ~"";
-    while rdr.curr != '\n' && !is_eof(rdr) {
-        val.push_char(rdr.curr);
+    while rdr.curr.get() != '\n' && !is_eof(rdr) {
+        val.push_char(rdr.curr.get());
         bump(rdr);
     }
-    if rdr.curr == '\n' { bump(rdr); }
+    if rdr.curr.get() == '\n' { bump(rdr); }
     return val;
 }
 
-fn read_one_line_comment(rdr: @mut StringReader) -> ~str {
+fn read_one_line_comment(rdr: @StringReader) -> ~str {
     let val = read_to_eol(rdr);
     assert!((val[0] == '/' as u8 && val[1] == '/' as u8) ||
                  (val[0] == '#' as u8 && val[1] == '!' as u8));
     return val;
 }
 
-fn consume_non_eol_whitespace(rdr: @mut StringReader) {
-    while is_whitespace(rdr.curr) && rdr.curr != '\n' && !is_eof(rdr) {
+fn consume_non_eol_whitespace(rdr: @StringReader) {
+    while is_whitespace(rdr.curr.get()) && rdr.curr.get() != '\n' &&
+            !is_eof(rdr) {
         bump(rdr);
     }
 }
 
-fn push_blank_line_comment(rdr: @mut StringReader, comments: &mut ~[cmnt]) {
+fn push_blank_line_comment(rdr: @StringReader, comments: &mut ~[cmnt]) {
     debug!(">>> blank-line comment");
     let v: ~[~str] = ~[];
-    comments.push(cmnt {style: blank_line, lines: v, pos: rdr.last_pos});
+    comments.push(cmnt {
+        style: blank_line,
+        lines: v,
+        pos: rdr.last_pos.get(),
+    });
 }
 
-fn consume_whitespace_counting_blank_lines(rdr: @mut StringReader,
+fn consume_whitespace_counting_blank_lines(rdr: @StringReader,
                                            comments: &mut ~[cmnt]) {
-    while is_whitespace(rdr.curr) && !is_eof(rdr) {
-        if rdr.col == CharPos(0u) && rdr.curr == '\n' {
+    while is_whitespace(rdr.curr.get()) && !is_eof(rdr) {
+        if rdr.col.get() == CharPos(0u) && rdr.curr.get() == '\n' {
             push_blank_line_comment(rdr, &mut *comments);
         }
         bump(rdr);
@@ -175,10 +180,10 @@ fn consume_whitespace_counting_blank_lines(rdr: @mut StringReader,
 }
 
 
-fn read_shebang_comment(rdr: @mut StringReader, code_to_the_left: bool,
+fn read_shebang_comment(rdr: @StringReader, code_to_the_left: bool,
                                             comments: &mut ~[cmnt]) {
     debug!(">>> shebang comment");
-    let p = rdr.last_pos;
+    let p = rdr.last_pos.get();
     debug!("<<< shebang comment");
     comments.push(cmnt {
         style: if code_to_the_left { trailing } else { isolated },
@@ -187,12 +192,12 @@ fn read_shebang_comment(rdr: @mut StringReader, code_to_the_left: bool,
     });
 }
 
-fn read_line_comments(rdr: @mut StringReader, code_to_the_left: bool,
+fn read_line_comments(rdr: @StringReader, code_to_the_left: bool,
                                           comments: &mut ~[cmnt]) {
     debug!(">>> line comments");
-    let p = rdr.last_pos;
+    let p = rdr.last_pos.get();
     let mut lines: ~[~str] = ~[];
-    while rdr.curr == '/' && nextch(rdr) == '/' {
+    while rdr.curr.get() == '/' && nextch(rdr) == '/' {
         let line = read_one_line_comment(rdr);
         debug!("{}", line);
         if is_doc_comment(line) { // doc-comments are not put in comments
@@ -244,22 +249,22 @@ fn trim_whitespace_prefix_and_push_line(lines: &mut ~[~str],
     lines.push(s1);
 }
 
-fn read_block_comment(rdr: @mut StringReader,
+fn read_block_comment(rdr: @StringReader,
                       code_to_the_left: bool,
                       comments: &mut ~[cmnt]) {
     debug!(">>> block comment");
-    let p = rdr.last_pos;
+    let p = rdr.last_pos.get();
     let mut lines: ~[~str] = ~[];
-    let col: CharPos = rdr.col;
+    let col: CharPos = rdr.col.get();
     bump(rdr);
     bump(rdr);
 
     let mut curr_line = ~"/*";
 
     // doc-comments are not really comments, they are attributes
-    if rdr.curr == '*' || rdr.curr == '!' {
-        while !(rdr.curr == '*' && nextch(rdr) == '/') && !is_eof(rdr) {
-            curr_line.push_char(rdr.curr);
+    if rdr.curr.get() == '*' || rdr.curr.get() == '!' {
+        while !(rdr.curr.get() == '*' && nextch(rdr) == '/') && !is_eof(rdr) {
+            curr_line.push_char(rdr.curr.get());
             bump(rdr);
         }
         if !is_eof(rdr) {
@@ -275,22 +280,22 @@ fn read_block_comment(rdr: @mut StringReader,
         while level > 0 {
             debug!("=== block comment level {}", level);
             if is_eof(rdr) {
-                (rdr as @mut reader).fatal(~"unterminated block comment");
+                (rdr as @reader).fatal(~"unterminated block comment");
             }
-            if rdr.curr == '\n' {
+            if rdr.curr.get() == '\n' {
                 trim_whitespace_prefix_and_push_line(&mut lines, curr_line,
                                                      col);
                 curr_line = ~"";
                 bump(rdr);
             } else {
-                curr_line.push_char(rdr.curr);
-                if rdr.curr == '/' && nextch(rdr) == '*' {
+                curr_line.push_char(rdr.curr.get());
+                if rdr.curr.get() == '/' && nextch(rdr) == '*' {
                     bump(rdr);
                     bump(rdr);
                     curr_line.push_char('*');
                     level += 1;
                 } else {
-                    if rdr.curr == '*' && nextch(rdr) == '/' {
+                    if rdr.curr.get() == '*' && nextch(rdr) == '/' {
                         bump(rdr);
                         bump(rdr);
                         curr_line.push_char('/');
@@ -306,28 +311,28 @@ fn read_block_comment(rdr: @mut StringReader,
 
     let mut style = if code_to_the_left { trailing } else { isolated };
     consume_non_eol_whitespace(rdr);
-    if !is_eof(rdr) && rdr.curr != '\n' && lines.len() == 1u {
+    if !is_eof(rdr) && rdr.curr.get() != '\n' && lines.len() == 1u {
         style = mixed;
     }
     debug!("<<< block comment");
     comments.push(cmnt {style: style, lines: lines, pos: p});
 }
 
-fn peeking_at_comment(rdr: @mut StringReader) -> bool {
-    return ((rdr.curr == '/' && nextch(rdr) == '/') ||
-         (rdr.curr == '/' && nextch(rdr) == '*')) ||
-         (rdr.curr == '#' && nextch(rdr) == '!');
+fn peeking_at_comment(rdr: @StringReader) -> bool {
+    return ((rdr.curr.get() == '/' && nextch(rdr) == '/') ||
+         (rdr.curr.get() == '/' && nextch(rdr) == '*')) ||
+         (rdr.curr.get() == '#' && nextch(rdr) == '!');
 }
 
-fn consume_comment(rdr: @mut StringReader,
+fn consume_comment(rdr: @StringReader,
                    code_to_the_left: bool,
                    comments: &mut ~[cmnt]) {
     debug!(">>> consume comment");
-    if rdr.curr == '/' && nextch(rdr) == '/' {
+    if rdr.curr.get() == '/' && nextch(rdr) == '/' {
         read_line_comments(rdr, code_to_the_left, comments);
-    } else if rdr.curr == '/' && nextch(rdr) == '*' {
+    } else if rdr.curr.get() == '/' && nextch(rdr) == '*' {
         read_block_comment(rdr, code_to_the_left, comments);
-    } else if rdr.curr == '#' && nextch(rdr) == '!' {
+    } else if rdr.curr.get() == '#' && nextch(rdr) == '!' {
         read_shebang_comment(rdr, code_to_the_left, comments);
     } else { fail!(); }
     debug!("<<< consume comment");
@@ -342,7 +347,7 @@ pub struct lit {
 // it appears this function is called only from pprust... that's
 // probably not a good thing.
 pub fn gather_comments_and_literals(span_diagnostic:
-                                    @mut diagnostic::SpanHandler,
+                                        @diagnostic::SpanHandler,
                                     path: @str,
                                     srdr: &mut io::Reader)
                                  -> (~[cmnt], ~[lit]) {
@@ -358,7 +363,7 @@ pub fn gather_comments_and_literals(span_diagnostic:
         loop {
             let mut code_to_the_left = !first_read;
             consume_non_eol_whitespace(rdr);
-            if rdr.curr == '\n' {
+            if rdr.curr.get() == '\n' {
                 code_to_the_left = false;
                 consume_whitespace_counting_blank_lines(rdr, &mut comments);
             }
@@ -370,7 +375,7 @@ pub fn gather_comments_and_literals(span_diagnostic:
         }
 
 
-        let bstart = rdr.last_pos;
+        let bstart = rdr.last_pos.get();
         rdr.next_token();
         //discard, and look ahead; we're working with internal state
         let TokenAndSpan {tok: tok, sp: sp} = rdr.peek();
diff --git a/src/libsyntax/parse/lexer.rs b/src/libsyntax/parse/lexer.rs
index be93c962137..3b81b09112b 100644
--- a/src/libsyntax/parse/lexer.rs
+++ b/src/libsyntax/parse/lexer.rs
@@ -18,6 +18,7 @@ use parse::token;
 use parse::token::{str_to_ident};
 
 use std::cast::transmute;
+use std::cell::{Cell, RefCell};
 use std::char;
 use std::num::from_str_radix;
 use std::util;
@@ -25,12 +26,12 @@ use std::util;
 pub use ext::tt::transcribe::{TtReader, new_tt_reader};
 
 pub trait reader {
-    fn is_eof(@mut self) -> bool;
-    fn next_token(@mut self) -> TokenAndSpan;
-    fn fatal(@mut self, ~str) -> !;
-    fn span_diag(@mut self) -> @mut SpanHandler;
-    fn peek(@mut self) -> TokenAndSpan;
-    fn dup(@mut self) -> @mut reader;
+    fn is_eof(@self) -> bool;
+    fn next_token(@self) -> TokenAndSpan;
+    fn fatal(@self, ~str) -> !;
+    fn span_diag(@self) -> @SpanHandler;
+    fn peek(@self) -> TokenAndSpan;
+    fn dup(@self) -> @reader;
 }
 
 #[deriving(Clone, Eq)]
@@ -40,47 +41,47 @@ pub struct TokenAndSpan {
 }
 
 pub struct StringReader {
-    span_diagnostic: @mut SpanHandler,
+    span_diagnostic: @SpanHandler,
     src: @str,
     // The absolute offset within the codemap of the next character to read
-    pos: BytePos,
+    pos: Cell<BytePos>,
     // The absolute offset within the codemap of the last character read(curr)
-    last_pos: BytePos,
+    last_pos: Cell<BytePos>,
     // The column of the next character to read
-    col: CharPos,
+    col: Cell<CharPos>,
     // The last character to be read
-    curr: char,
+    curr: Cell<char>,
     filemap: @codemap::FileMap,
     /* cached: */
-    peek_tok: token::Token,
-    peek_span: Span
+    peek_tok: RefCell<token::Token>,
+    peek_span: RefCell<Span>,
 }
 
-pub fn new_string_reader(span_diagnostic: @mut SpanHandler,
+pub fn new_string_reader(span_diagnostic: @SpanHandler,
                          filemap: @codemap::FileMap)
-                      -> @mut StringReader {
+                      -> @StringReader {
     let r = new_low_level_string_reader(span_diagnostic, filemap);
     string_advance_token(r); /* fill in peek_* */
     return r;
 }
 
 /* For comments.rs, which hackily pokes into 'pos' and 'curr' */
-pub fn new_low_level_string_reader(span_diagnostic: @mut SpanHandler,
+pub fn new_low_level_string_reader(span_diagnostic: @SpanHandler,
                                    filemap: @codemap::FileMap)
-                                -> @mut StringReader {
+                                -> @StringReader {
     // Force the initial reader bump to start on a fresh line
     let initial_char = '\n';
-    let r = @mut StringReader {
+    let r = @StringReader {
         span_diagnostic: span_diagnostic,
         src: filemap.src,
-        pos: filemap.start_pos,
-        last_pos: filemap.start_pos,
-        col: CharPos(0),
-        curr: initial_char,
+        pos: Cell::new(filemap.start_pos),
+        last_pos: Cell::new(filemap.start_pos),
+        col: Cell::new(CharPos(0)),
+        curr: Cell::new(initial_char),
         filemap: filemap,
         /* dummy values; not read */
-        peek_tok: token::EOF,
-        peek_span: codemap::DUMMY_SP
+        peek_tok: RefCell::new(token::EOF),
+        peek_span: RefCell::new(codemap::DUMMY_SP),
     };
     bump(r);
     return r;
@@ -89,78 +90,84 @@ pub fn new_low_level_string_reader(span_diagnostic: @mut SpanHandler,
 // duplicating the string reader is probably a bad idea, in
 // that using them will cause interleaved pushes of line
 // offsets to the underlying filemap...
-fn dup_string_reader(r: @mut StringReader) -> @mut StringReader {
-    @mut StringReader {
+fn dup_string_reader(r: @StringReader) -> @StringReader {
+    @StringReader {
         span_diagnostic: r.span_diagnostic,
         src: r.src,
-        pos: r.pos,
-        last_pos: r.last_pos,
-        col: r.col,
-        curr: r.curr,
+        pos: Cell::new(r.pos.get()),
+        last_pos: Cell::new(r.last_pos.get()),
+        col: Cell::new(r.col.get()),
+        curr: Cell::new(r.curr.get()),
         filemap: r.filemap,
         peek_tok: r.peek_tok.clone(),
-        peek_span: r.peek_span
+        peek_span: r.peek_span.clone(),
     }
 }
 
 impl reader for StringReader {
-    fn is_eof(@mut self) -> bool { is_eof(self) }
+    fn is_eof(@self) -> bool { is_eof(self) }
     // return the next token. EFFECT: advances the string_reader.
-    fn next_token(@mut self) -> TokenAndSpan {
-        let ret_val = TokenAndSpan {
-            tok: util::replace(&mut self.peek_tok, token::UNDERSCORE),
-            sp: self.peek_span,
+    fn next_token(@self) -> TokenAndSpan {
+        let ret_val = {
+            let mut peek_tok = self.peek_tok.borrow_mut();
+            TokenAndSpan {
+                tok: util::replace(peek_tok.get(), token::UNDERSCORE),
+                sp: self.peek_span.get(),
+            }
         };
         string_advance_token(self);
         ret_val
     }
-    fn fatal(@mut self, m: ~str) -> ! {
-        self.span_diagnostic.span_fatal(self.peek_span, m)
+    fn fatal(@self, m: ~str) -> ! {
+        self.span_diagnostic.span_fatal(self.peek_span.get(), m)
     }
-    fn span_diag(@mut self) -> @mut SpanHandler { self.span_diagnostic }
-    fn peek(@mut self) -> TokenAndSpan {
+    fn span_diag(@self) -> @SpanHandler { self.span_diagnostic }
+    fn peek(@self) -> TokenAndSpan {
         // XXX(pcwalton): Bad copy!
         TokenAndSpan {
-            tok: self.peek_tok.clone(),
-            sp: self.peek_span,
+            tok: self.peek_tok.get(),
+            sp: self.peek_span.get(),
         }
     }
-    fn dup(@mut self) -> @mut reader { dup_string_reader(self) as @mut reader }
+    fn dup(@self) -> @reader { dup_string_reader(self) as @reader }
 }
 
 impl reader for TtReader {
-    fn is_eof(@mut self) -> bool { self.cur_tok == token::EOF }
-    fn next_token(@mut self) -> TokenAndSpan {
+    fn is_eof(@self) -> bool {
+        let cur_tok = self.cur_tok.borrow();
+        *cur_tok.get() == token::EOF
+    }
+    fn next_token(@self) -> TokenAndSpan {
         let r = tt_next_token(self);
         debug!("TtReader: r={:?}", r);
         return r;
     }
-    fn fatal(@mut self, m: ~str) -> ! {
-        self.sp_diag.span_fatal(self.cur_span, m);
+    fn fatal(@self, m: ~str) -> ! {
+        self.sp_diag.span_fatal(self.cur_span.get(), m);
     }
-    fn span_diag(@mut self) -> @mut SpanHandler { self.sp_diag }
-    fn peek(@mut self) -> TokenAndSpan {
+    fn span_diag(@self) -> @SpanHandler { self.sp_diag }
+    fn peek(@self) -> TokenAndSpan {
         TokenAndSpan {
-            tok: self.cur_tok.clone(),
-            sp: self.cur_span,
+            tok: self.cur_tok.get(),
+            sp: self.cur_span.get(),
         }
     }
-    fn dup(@mut self) -> @mut reader { dup_tt_reader(self) as @mut reader }
+    fn dup(@self) -> @reader { dup_tt_reader(self) as @reader }
 }
 
 // report a lexical error spanning [`from_pos`, `to_pos`)
-fn fatal_span(rdr: @mut StringReader,
+fn fatal_span(rdr: @StringReader,
               from_pos: BytePos,
               to_pos: BytePos,
               m: ~str)
            -> ! {
-    rdr.peek_span = codemap::mk_sp(from_pos, to_pos);
+    rdr.peek_span.set(codemap::mk_sp(from_pos, to_pos));
     rdr.fatal(m);
 }
 
 // report a lexical error spanning [`from_pos`, `to_pos`), appending an
 // escaped character to the error message
-fn fatal_span_char(rdr: @mut StringReader,
+fn fatal_span_char(rdr: @StringReader,
                    from_pos: BytePos,
                    to_pos: BytePos,
                    m: ~str,
@@ -174,7 +181,7 @@ fn fatal_span_char(rdr: @mut StringReader,
 
 // report a lexical error spanning [`from_pos`, `to_pos`), appending the
 // offending string to the error message
-fn fatal_span_verbose(rdr: @mut StringReader,
+fn fatal_span_verbose(rdr: @StringReader,
                       from_pos: BytePos,
                       to_pos: BytePos,
                       m: ~str)
@@ -190,19 +197,20 @@ fn fatal_span_verbose(rdr: @mut StringReader,
 
 // EFFECT: advance peek_tok and peek_span to refer to the next token.
 // EFFECT: update the interner, maybe.
-fn string_advance_token(r: @mut StringReader) {
+fn string_advance_token(r: @StringReader) {
     match (consume_whitespace_and_comments(r)) {
         Some(comment) => {
-            r.peek_span = comment.sp;
-            r.peek_tok = comment.tok;
+            r.peek_span.set(comment.sp);
+            r.peek_tok.set(comment.tok);
         },
         None => {
             if is_eof(r) {
-                r.peek_tok = token::EOF;
+                r.peek_tok.set(token::EOF);
             } else {
-                let start_bytepos = r.last_pos;
-                r.peek_tok = next_token_inner(r);
-                r.peek_span = codemap::mk_sp(start_bytepos, r.last_pos);
+                let start_bytepos = r.last_pos.get();
+                r.peek_tok.set(next_token_inner(r));
+                r.peek_span.set(codemap::mk_sp(start_bytepos,
+                                               r.last_pos.get()));
             };
         }
     }
@@ -216,17 +224,17 @@ fn byte_offset(rdr: &StringReader, pos: BytePos) -> BytePos {
 /// up to but excluding `rdr.last_pos`, meaning the slice does not include
 /// the character `rdr.curr`.
 pub fn with_str_from<T>(
-                     rdr: @mut StringReader,
+                     rdr: @StringReader,
                      start: BytePos,
                      f: |s: &str| -> T)
                      -> T {
-    with_str_from_to(rdr, start, rdr.last_pos, f)
+    with_str_from_to(rdr, start, rdr.last_pos.get(), f)
 }
 
 /// Calls `f` with astring slice of the source text spanning from `start`
 /// up to but excluding `end`.
 fn with_str_from_to<T>(
-                    rdr: @mut StringReader,
+                    rdr: @StringReader,
                     start: BytePos,
                     end: BytePos,
                     f: |s: &str| -> T)
@@ -238,20 +246,22 @@ fn with_str_from_to<T>(
 
 // EFFECT: advance the StringReader by one character. If a newline is
 // discovered, add it to the FileMap's list of line start offsets.
-pub fn bump(rdr: &mut StringReader) {
-    rdr.last_pos = rdr.pos;
-    let current_byte_offset = byte_offset(rdr, rdr.pos).to_uint();
+pub fn bump(rdr: &StringReader) {
+    rdr.last_pos.set(rdr.pos.get());
+    let current_byte_offset = byte_offset(rdr, rdr.pos.get()).to_uint();
     if current_byte_offset < (rdr.src).len() {
-        assert!(rdr.curr != unsafe { transmute(-1u32) }); // FIXME: #8971: unsound
-        let last_char = rdr.curr;
+        assert!(rdr.curr.get() != unsafe {
+            transmute(-1u32)
+        }); // FIXME: #8971: unsound
+        let last_char = rdr.curr.get();
         let next = rdr.src.char_range_at(current_byte_offset);
         let byte_offset_diff = next.next - current_byte_offset;
-        rdr.pos = rdr.pos + Pos::from_uint(byte_offset_diff);
-        rdr.curr = next.ch;
-        rdr.col = rdr.col + CharPos(1u);
+        rdr.pos.set(rdr.pos.get() + Pos::from_uint(byte_offset_diff));
+        rdr.curr.set(next.ch);
+        rdr.col.set(rdr.col.get() + CharPos(1u));
         if last_char == '\n' {
-            rdr.filemap.next_line(rdr.last_pos);
-            rdr.col = CharPos(0u);
+            rdr.filemap.next_line(rdr.last_pos.get());
+            rdr.col.set(CharPos(0u));
         }
 
         if byte_offset_diff > 1 {
@@ -259,14 +269,14 @@ pub fn bump(rdr: &mut StringReader) {
                 Pos::from_uint(current_byte_offset), byte_offset_diff);
         }
     } else {
-        rdr.curr = unsafe { transmute(-1u32) }; // FIXME: #8971: unsound
+        rdr.curr.set(unsafe { transmute(-1u32) }); // FIXME: #8971: unsound
     }
 }
-pub fn is_eof(rdr: @mut StringReader) -> bool {
-    rdr.curr == unsafe { transmute(-1u32) } // FIXME: #8971: unsound
+pub fn is_eof(rdr: @StringReader) -> bool {
+    rdr.curr.get() == unsafe { transmute(-1u32) } // FIXME: #8971: unsound
 }
-pub fn nextch(rdr: @mut StringReader) -> char {
-    let offset = byte_offset(rdr, rdr.pos).to_uint();
+pub fn nextch(rdr: @StringReader) -> char {
+    let offset = byte_offset(rdr, rdr.pos.get()).to_uint();
     if offset < (rdr.src).len() {
         return rdr.src.char_at(offset);
     } else { return unsafe { transmute(-1u32) }; } // FIXME: #8971: unsound
@@ -296,9 +306,9 @@ fn is_hex_digit(c: char) -> bool {
 
 // EFFECT: eats whitespace and comments.
 // returns a Some(sugared-doc-attr) if one exists, None otherwise.
-fn consume_whitespace_and_comments(rdr: @mut StringReader)
+fn consume_whitespace_and_comments(rdr: @StringReader)
                                 -> Option<TokenAndSpan> {
-    while is_whitespace(rdr.curr) { bump(rdr); }
+    while is_whitespace(rdr.curr.get()) { bump(rdr); }
     return consume_any_line_comment(rdr);
 }
 
@@ -309,17 +319,17 @@ pub fn is_line_non_doc_comment(s: &str) -> bool {
 // PRECONDITION: rdr.curr is not whitespace
 // EFFECT: eats any kind of comment.
 // returns a Some(sugared-doc-attr) if one exists, None otherwise
-fn consume_any_line_comment(rdr: @mut StringReader)
+fn consume_any_line_comment(rdr: @StringReader)
                          -> Option<TokenAndSpan> {
-    if rdr.curr == '/' {
+    if rdr.curr.get() == '/' {
         match nextch(rdr) {
           '/' => {
             bump(rdr);
             bump(rdr);
             // line comments starting with "///" or "//!" are doc-comments
-            if rdr.curr == '/' || rdr.curr == '!' {
-                let start_bpos = rdr.pos - BytePos(3);
-                while rdr.curr != '\n' && !is_eof(rdr) {
+            if rdr.curr.get() == '/' || rdr.curr.get() == '!' {
+                let start_bpos = rdr.pos.get() - BytePos(3);
+                while rdr.curr.get() != '\n' && !is_eof(rdr) {
                     bump(rdr);
                 }
                 let ret = with_str_from(rdr, start_bpos, |string| {
@@ -327,7 +337,7 @@ fn consume_any_line_comment(rdr: @mut StringReader)
                     if !is_line_non_doc_comment(string) {
                         Some(TokenAndSpan{
                             tok: token::DOC_COMMENT(str_to_ident(string)),
-                            sp: codemap::mk_sp(start_bpos, rdr.pos)
+                            sp: codemap::mk_sp(start_bpos, rdr.pos.get())
                         })
                     } else {
                         None
@@ -338,7 +348,7 @@ fn consume_any_line_comment(rdr: @mut StringReader)
                     return ret;
                 }
             } else {
-                while rdr.curr != '\n' && !is_eof(rdr) { bump(rdr); }
+                while rdr.curr.get() != '\n' && !is_eof(rdr) { bump(rdr); }
             }
             // Restart whitespace munch.
             return consume_whitespace_and_comments(rdr);
@@ -346,15 +356,18 @@ fn consume_any_line_comment(rdr: @mut StringReader)
           '*' => { bump(rdr); bump(rdr); return consume_block_comment(rdr); }
           _ => ()
         }
-    } else if rdr.curr == '#' {
+    } else if rdr.curr.get() == '#' {
         if nextch(rdr) == '!' {
             // I guess this is the only way to figure out if
             // we're at the beginning of the file...
             let cmap = @CodeMap::new();
-            (*cmap).files.push(rdr.filemap);
-            let loc = cmap.lookup_char_pos_adj(rdr.last_pos);
+            {
+                let mut files = cmap.files.borrow_mut();
+                files.get().push(rdr.filemap);
+            }
+            let loc = cmap.lookup_char_pos_adj(rdr.last_pos.get());
             if loc.line == 1u && loc.col == CharPos(0u) {
-                while rdr.curr != '\n' && !is_eof(rdr) { bump(rdr); }
+                while rdr.curr.get() != '\n' && !is_eof(rdr) { bump(rdr); }
                 return consume_whitespace_and_comments(rdr);
             }
         }
@@ -367,11 +380,10 @@ pub fn is_block_non_doc_comment(s: &str) -> bool {
 }
 
 // might return a sugared-doc-attr
-fn consume_block_comment(rdr: @mut StringReader)
-                      -> Option<TokenAndSpan> {
+fn consume_block_comment(rdr: @StringReader) -> Option<TokenAndSpan> {
     // block comments starting with "/**" or "/*!" are doc-comments
-    let is_doc_comment = rdr.curr == '*' || rdr.curr == '!';
-    let start_bpos = rdr.pos - BytePos(if is_doc_comment {3} else {2});
+    let is_doc_comment = rdr.curr.get() == '*' || rdr.curr.get() == '!';
+    let start_bpos = rdr.pos.get() - BytePos(if is_doc_comment {3} else {2});
 
     let mut level: int = 1;
     while level > 0 {
@@ -381,12 +393,12 @@ fn consume_block_comment(rdr: @mut StringReader)
             } else {
                 ~"unterminated block comment"
             };
-            fatal_span(rdr, start_bpos, rdr.last_pos, msg);
-        } else if rdr.curr == '/' && nextch(rdr) == '*' {
+            fatal_span(rdr, start_bpos, rdr.last_pos.get(), msg);
+        } else if rdr.curr.get() == '/' && nextch(rdr) == '*' {
             level += 1;
             bump(rdr);
             bump(rdr);
-        } else if rdr.curr == '*' && nextch(rdr) == '/' {
+        } else if rdr.curr.get() == '*' && nextch(rdr) == '/' {
             level -= 1;
             bump(rdr);
             bump(rdr);
@@ -401,7 +413,7 @@ fn consume_block_comment(rdr: @mut StringReader)
             if !is_block_non_doc_comment(string) {
                 Some(TokenAndSpan{
                         tok: token::DOC_COMMENT(str_to_ident(string)),
-                        sp: codemap::mk_sp(start_bpos, rdr.pos)
+                        sp: codemap::mk_sp(start_bpos, rdr.pos.get())
                     })
             } else {
                 None
@@ -415,13 +427,13 @@ fn consume_block_comment(rdr: @mut StringReader)
     if res.is_some() { res } else { consume_whitespace_and_comments(rdr) }
 }
 
-fn scan_exponent(rdr: @mut StringReader, start_bpos: BytePos) -> Option<~str> {
-    let mut c = rdr.curr;
+fn scan_exponent(rdr: @StringReader, start_bpos: BytePos) -> Option<~str> {
+    let mut c = rdr.curr.get();
     let mut rslt = ~"";
     if c == 'e' || c == 'E' {
         rslt.push_char(c);
         bump(rdr);
-        c = rdr.curr;
+        c = rdr.curr.get();
         if c == '-' || c == '+' {
             rslt.push_char(c);
             bump(rdr);
@@ -430,16 +442,16 @@ fn scan_exponent(rdr: @mut StringReader, start_bpos: BytePos) -> Option<~str> {
         if exponent.len() > 0u {
             return Some(rslt + exponent);
         } else {
-            fatal_span(rdr, start_bpos, rdr.last_pos,
+            fatal_span(rdr, start_bpos, rdr.last_pos.get(),
                        ~"scan_exponent: bad fp literal");
         }
     } else { return None::<~str>; }
 }
 
-fn scan_digits(rdr: @mut StringReader, radix: uint) -> ~str {
+fn scan_digits(rdr: @StringReader, radix: uint) -> ~str {
     let mut rslt = ~"";
     loop {
-        let c = rdr.curr;
+        let c = rdr.curr.get();
         if c == '_' { bump(rdr); continue; }
         match char::to_digit(c, radix) {
           Some(_) => {
@@ -451,12 +463,12 @@ fn scan_digits(rdr: @mut StringReader, radix: uint) -> ~str {
     };
 }
 
-fn scan_number(c: char, rdr: @mut StringReader) -> token::Token {
+fn scan_number(c: char, rdr: @StringReader) -> token::Token {
     let mut num_str;
     let mut base = 10u;
     let mut c = c;
     let mut n = nextch(rdr);
-    let start_bpos = rdr.last_pos;
+    let start_bpos = rdr.last_pos.get();
     if c == '0' && n == 'x' {
         bump(rdr);
         bump(rdr);
@@ -471,7 +483,7 @@ fn scan_number(c: char, rdr: @mut StringReader) -> token::Token {
         base = 2u;
     }
     num_str = scan_digits(rdr, base);
-    c = rdr.curr;
+    c = rdr.curr.get();
     nextch(rdr);
     if c == 'u' || c == 'i' {
         enum Result { Signed(ast::int_ty), Unsigned(ast::uint_ty) }
@@ -481,7 +493,7 @@ fn scan_number(c: char, rdr: @mut StringReader) -> token::Token {
             else { Unsigned(ast::ty_u) }
         };
         bump(rdr);
-        c = rdr.curr;
+        c = rdr.curr.get();
         if c == '8' {
             bump(rdr);
             tp = if signed { Signed(ast::ty_i8) }
@@ -505,12 +517,12 @@ fn scan_number(c: char, rdr: @mut StringReader) -> token::Token {
                       else { Unsigned(ast::ty_u64) };
         }
         if num_str.len() == 0u {
-            fatal_span(rdr, start_bpos, rdr.last_pos,
+            fatal_span(rdr, start_bpos, rdr.last_pos.get(),
                        ~"no valid digits found for number");
         }
         let parsed = match from_str_radix::<u64>(num_str, base as uint) {
             Some(p) => p,
-            None => fatal_span(rdr, start_bpos, rdr.last_pos,
+            None => fatal_span(rdr, start_bpos, rdr.last_pos.get(),
                                ~"int literal is too large")
         };
 
@@ -520,7 +532,8 @@ fn scan_number(c: char, rdr: @mut StringReader) -> token::Token {
         }
     }
     let mut is_float = false;
-    if rdr.curr == '.' && !(ident_start(nextch(rdr)) || nextch(rdr) == '.') {
+    if rdr.curr.get() == '.' && !(ident_start(nextch(rdr)) || nextch(rdr) ==
+                                  '.') {
         is_float = true;
         bump(rdr);
         let dec_part = scan_digits(rdr, 10u);
@@ -529,11 +542,11 @@ fn scan_number(c: char, rdr: @mut StringReader) -> token::Token {
     }
     if is_float {
         match base {
-          16u => fatal_span(rdr, start_bpos, rdr.last_pos,
+          16u => fatal_span(rdr, start_bpos, rdr.last_pos.get(),
                             ~"hexadecimal float literal is not supported"),
-          8u => fatal_span(rdr, start_bpos, rdr.last_pos,
+          8u => fatal_span(rdr, start_bpos, rdr.last_pos.get(),
                            ~"octal float literal is not supported"),
-          2u => fatal_span(rdr, start_bpos, rdr.last_pos,
+          2u => fatal_span(rdr, start_bpos, rdr.last_pos.get(),
                            ~"binary float literal is not supported"),
           _ => ()
         }
@@ -546,9 +559,9 @@ fn scan_number(c: char, rdr: @mut StringReader) -> token::Token {
       None => ()
     }
 
-    if rdr.curr == 'f' {
+    if rdr.curr.get() == 'f' {
         bump(rdr);
-        c = rdr.curr;
+        c = rdr.curr.get();
         n = nextch(rdr);
         if c == '3' && n == '2' {
             bump(rdr);
@@ -564,19 +577,20 @@ fn scan_number(c: char, rdr: @mut StringReader) -> token::Token {
             32-bit or 64-bit float, it won't be noticed till the
             back-end.  */
         } else {
-            fatal_span(rdr, start_bpos, rdr.last_pos, ~"expected `f32` or `f64` suffix");
+            fatal_span(rdr, start_bpos, rdr.last_pos.get(),
+                       ~"expected `f32` or `f64` suffix");
         }
     }
     if is_float {
         return token::LIT_FLOAT_UNSUFFIXED(str_to_ident(num_str));
     } else {
         if num_str.len() == 0u {
-            fatal_span(rdr, start_bpos, rdr.last_pos,
+            fatal_span(rdr, start_bpos, rdr.last_pos.get(),
                        ~"no valid digits found for number");
         }
         let parsed = match from_str_radix::<u64>(num_str, base as uint) {
             Some(p) => p,
-            None => fatal_span(rdr, start_bpos, rdr.last_pos,
+            None => fatal_span(rdr, start_bpos, rdr.last_pos.get(),
                                ~"int literal is too large")
         };
 
@@ -585,14 +599,14 @@ fn scan_number(c: char, rdr: @mut StringReader) -> token::Token {
     }
 }
 
-fn scan_numeric_escape(rdr: @mut StringReader, n_hex_digits: uint) -> char {
+fn scan_numeric_escape(rdr: @StringReader, n_hex_digits: uint) -> char {
     let mut accum_int = 0;
     let mut i = n_hex_digits;
-    let start_bpos = rdr.last_pos;
+    let start_bpos = rdr.last_pos.get();
     while i != 0u {
-        let n = rdr.curr;
+        let n = rdr.curr.get();
         if !is_hex_digit(n) {
-            fatal_span_char(rdr, rdr.last_pos, rdr.pos,
+            fatal_span_char(rdr, rdr.last_pos.get(), rdr.pos.get(),
                             ~"illegal character in numeric character escape",
                             n);
         }
@@ -603,7 +617,7 @@ fn scan_numeric_escape(rdr: @mut StringReader, n_hex_digits: uint) -> char {
     }
     match char::from_u32(accum_int as u32) {
         Some(x) => x,
-        None => fatal_span(rdr, start_bpos, rdr.last_pos,
+        None => fatal_span(rdr, start_bpos, rdr.last_pos.get(),
                            ~"illegal numeric character escape")
     }
 }
@@ -626,14 +640,14 @@ fn ident_continue(c: char) -> bool {
 // return the next token from the string
 // EFFECT: advances the input past that token
 // EFFECT: updates the interner
-fn next_token_inner(rdr: @mut StringReader) -> token::Token {
-    let c = rdr.curr;
+fn next_token_inner(rdr: @StringReader) -> token::Token {
+    let c = rdr.curr.get();
     if ident_start(c) && nextch(rdr) != '"' && nextch(rdr) != '#' {
         // Note: r as in r" or r#" is part of a raw string literal,
         // not an identifier, and is handled further down.
 
-        let start = rdr.last_pos;
-        while ident_continue(rdr.curr) {
+        let start = rdr.last_pos.get();
+        while ident_continue(rdr.curr.get()) {
             bump(rdr);
         }
 
@@ -641,7 +655,7 @@ fn next_token_inner(rdr: @mut StringReader) -> token::Token {
             if string == "_" {
                 token::UNDERSCORE
             } else {
-                let is_mod_name = rdr.curr == ':' && nextch(rdr) == ':';
+                let is_mod_name = rdr.curr.get() == ':' && nextch(rdr) == ':';
 
                 // FIXME: perform NFKC normalization here. (Issue #2253)
                 token::IDENT(str_to_ident(string), is_mod_name)
@@ -651,9 +665,9 @@ fn next_token_inner(rdr: @mut StringReader) -> token::Token {
     if is_dec_digit(c) {
         return scan_number(c, rdr);
     }
-    fn binop(rdr: @mut StringReader, op: token::binop) -> token::Token {
+    fn binop(rdr: @StringReader, op: token::binop) -> token::Token {
         bump(rdr);
-        if rdr.curr == '=' {
+        if rdr.curr.get() == '=' {
             bump(rdr);
             return token::BINOPEQ(op);
         } else { return token::BINOP(op); }
@@ -669,9 +683,9 @@ fn next_token_inner(rdr: @mut StringReader) -> token::Token {
       ',' => { bump(rdr); return token::COMMA; }
       '.' => {
           bump(rdr);
-          return if rdr.curr == '.' {
+          return if rdr.curr.get() == '.' {
               bump(rdr);
-              if rdr.curr == '.' {
+              if rdr.curr.get() == '.' {
                   bump(rdr);
                   token::DOTDOTDOT
               } else {
@@ -692,7 +706,7 @@ fn next_token_inner(rdr: @mut StringReader) -> token::Token {
       '~' => { bump(rdr); return token::TILDE; }
       ':' => {
         bump(rdr);
-        if rdr.curr == ':' {
+        if rdr.curr.get() == ':' {
             bump(rdr);
             return token::MOD_SEP;
         } else { return token::COLON; }
@@ -707,10 +721,10 @@ fn next_token_inner(rdr: @mut StringReader) -> token::Token {
       // Multi-byte tokens.
       '=' => {
         bump(rdr);
-        if rdr.curr == '=' {
+        if rdr.curr.get() == '=' {
             bump(rdr);
             return token::EQEQ;
-        } else if rdr.curr == '>' {
+        } else if rdr.curr.get() == '>' {
             bump(rdr);
             return token::FAT_ARROW;
         } else {
@@ -719,19 +733,19 @@ fn next_token_inner(rdr: @mut StringReader) -> token::Token {
       }
       '!' => {
         bump(rdr);
-        if rdr.curr == '=' {
+        if rdr.curr.get() == '=' {
             bump(rdr);
             return token::NE;
         } else { return token::NOT; }
       }
       '<' => {
         bump(rdr);
-        match rdr.curr {
+        match rdr.curr.get() {
           '=' => { bump(rdr); return token::LE; }
           '<' => { return binop(rdr, token::SHL); }
           '-' => {
             bump(rdr);
-            match rdr.curr {
+            match rdr.curr.get() {
               '>' => { bump(rdr); return token::DARROW; }
               _ => { return token::LARROW; }
             }
@@ -741,7 +755,7 @@ fn next_token_inner(rdr: @mut StringReader) -> token::Token {
       }
       '>' => {
         bump(rdr);
-        match rdr.curr {
+        match rdr.curr.get() {
           '=' => { bump(rdr); return token::GE; }
           '>' => { return binop(rdr, token::SHR); }
           _ => { return token::GT; }
@@ -750,14 +764,14 @@ fn next_token_inner(rdr: @mut StringReader) -> token::Token {
       '\'' => {
         // Either a character constant 'a' OR a lifetime name 'abc
         bump(rdr);
-        let start = rdr.last_pos;
-        let mut c2 = rdr.curr;
+        let start = rdr.last_pos.get();
+        let mut c2 = rdr.curr.get();
         bump(rdr);
 
         // If the character is an ident start not followed by another single
         // quote, then this is a lifetime name:
-        if ident_start(c2) && rdr.curr != '\'' {
-            while ident_continue(rdr.curr) {
+        if ident_start(c2) && rdr.curr.get() != '\'' {
+            while ident_continue(rdr.curr.get()) {
                 bump(rdr);
             }
             return with_str_from(rdr, start, |lifetime_name| {
@@ -765,11 +779,12 @@ fn next_token_inner(rdr: @mut StringReader) -> token::Token {
                 let tok = &token::IDENT(ident, false);
 
                 if token::is_keyword(token::keywords::Self, tok) {
-                    fatal_span(rdr, start, rdr.last_pos,
+                    fatal_span(rdr, start, rdr.last_pos.get(),
                                ~"invalid lifetime name: 'self is no longer a special lifetime");
                 } else if token::is_any_keyword(tok) &&
                     !token::is_keyword(token::keywords::Static, tok) {
-                    fatal_span(rdr, start, rdr.last_pos, ~"invalid lifetime name");
+                    fatal_span(rdr, start, rdr.last_pos.get(),
+                               ~"invalid lifetime name");
                 } else {
                     token::LIFETIME(ident)
                 }
@@ -780,8 +795,8 @@ fn next_token_inner(rdr: @mut StringReader) -> token::Token {
         match c2 {
             '\\' => {
                 // '\X' for some X must be a character constant:
-                let escaped = rdr.curr;
-                let escaped_pos = rdr.last_pos;
+                let escaped = rdr.curr.get();
+                let escaped_pos = rdr.last_pos.get();
                 bump(rdr);
                 match escaped {
                     'n' => { c2 = '\n'; }
@@ -795,24 +810,24 @@ fn next_token_inner(rdr: @mut StringReader) -> token::Token {
                     'u' => { c2 = scan_numeric_escape(rdr, 4u); }
                     'U' => { c2 = scan_numeric_escape(rdr, 8u); }
                     c2 => {
-                        fatal_span_char(rdr, escaped_pos, rdr.last_pos,
+                        fatal_span_char(rdr, escaped_pos, rdr.last_pos.get(),
                                         ~"unknown character escape", c2);
                     }
                 }
             }
             '\t' | '\n' | '\r' | '\'' => {
-                fatal_span_char(rdr, start, rdr.last_pos,
+                fatal_span_char(rdr, start, rdr.last_pos.get(),
                                 ~"character constant must be escaped", c2);
             }
             _ => {}
         }
-        if rdr.curr != '\'' {
+        if rdr.curr.get() != '\'' {
             fatal_span_verbose(rdr,
                                // Byte offsetting here is okay because the
                                // character before position `start` is an
                                // ascii single quote.
                                start - BytePos(1),
-                               rdr.last_pos,
+                               rdr.last_pos.get(),
                                ~"unterminated character constant");
         }
         bump(rdr); // advance curr past token
@@ -820,20 +835,20 @@ fn next_token_inner(rdr: @mut StringReader) -> token::Token {
       }
       '"' => {
         let mut accum_str = ~"";
-        let start_bpos = rdr.last_pos;
+        let start_bpos = rdr.last_pos.get();
         bump(rdr);
-        while rdr.curr != '"' {
+        while rdr.curr.get() != '"' {
             if is_eof(rdr) {
-                fatal_span(rdr, start_bpos, rdr.last_pos,
+                fatal_span(rdr, start_bpos, rdr.last_pos.get(),
                            ~"unterminated double quote string");
             }
 
-            let ch = rdr.curr;
+            let ch = rdr.curr.get();
             bump(rdr);
             match ch {
               '\\' => {
-                let escaped = rdr.curr;
-                let escaped_pos = rdr.last_pos;
+                let escaped = rdr.curr.get();
+                let escaped_pos = rdr.last_pos.get();
                 bump(rdr);
                 match escaped {
                   'n' => accum_str.push_char('\n'),
@@ -854,7 +869,7 @@ fn next_token_inner(rdr: @mut StringReader) -> token::Token {
                     accum_str.push_char(scan_numeric_escape(rdr, 8u));
                   }
                   c2 => {
-                    fatal_span_char(rdr, escaped_pos, rdr.last_pos,
+                    fatal_span_char(rdr, escaped_pos, rdr.last_pos.get(),
                                     ~"unknown string escape", c2);
                   }
                 }
@@ -866,32 +881,32 @@ fn next_token_inner(rdr: @mut StringReader) -> token::Token {
         return token::LIT_STR(str_to_ident(accum_str));
       }
       'r' => {
-        let start_bpos = rdr.last_pos;
+        let start_bpos = rdr.last_pos.get();
         bump(rdr);
         let mut hash_count = 0u;
-        while rdr.curr == '#' {
+        while rdr.curr.get() == '#' {
             bump(rdr);
             hash_count += 1;
         }
-        if rdr.curr != '"' {
-            fatal_span_char(rdr, start_bpos, rdr.last_pos,
+        if rdr.curr.get() != '"' {
+            fatal_span_char(rdr, start_bpos, rdr.last_pos.get(),
                             ~"only `#` is allowed in raw string delimitation; \
                               found illegal character",
-                            rdr.curr);
+                            rdr.curr.get());
         }
         bump(rdr);
-        let content_start_bpos = rdr.last_pos;
+        let content_start_bpos = rdr.last_pos.get();
         let mut content_end_bpos;
         'outer: loop {
             if is_eof(rdr) {
-                fatal_span(rdr, start_bpos, rdr.last_pos,
+                fatal_span(rdr, start_bpos, rdr.last_pos.get(),
                            ~"unterminated raw string");
             }
-            if rdr.curr == '"' {
-                content_end_bpos = rdr.last_pos;
+            if rdr.curr.get() == '"' {
+                content_end_bpos = rdr.last_pos.get();
                 for _ in range(0, hash_count) {
                     bump(rdr);
-                    if rdr.curr != '#' {
+                    if rdr.curr.get() != '#' {
                         continue 'outer;
                     }
                 }
@@ -932,14 +947,14 @@ fn next_token_inner(rdr: @mut StringReader) -> token::Token {
       '^' => { return binop(rdr, token::CARET); }
       '%' => { return binop(rdr, token::PERCENT); }
       c => {
-          fatal_span_char(rdr, rdr.last_pos, rdr.pos,
+          fatal_span_char(rdr, rdr.last_pos.get(), rdr.pos.get(),
                           ~"unknown start of token", c);
       }
     }
 }
 
-fn consume_whitespace(rdr: @mut StringReader) {
-    while is_whitespace(rdr.curr) && !is_eof(rdr) { bump(rdr); }
+fn consume_whitespace(rdr: @StringReader) {
+    while is_whitespace(rdr.curr.get()) && !is_eof(rdr) { bump(rdr); }
 }
 
 #[cfg(test)]
@@ -953,7 +968,7 @@ mod test {
 
     // represents a testing reader (incl. both reader and interner)
     struct Env {
-        string_reader: @mut StringReader
+        string_reader: @StringReader
     }
 
     // open a string reader for the given string
@@ -978,7 +993,7 @@ mod test {
             sp:Span {lo:BytePos(21),hi:BytePos(23),expn_info: None}};
         assert_eq!(tok1,tok2);
         // the 'main' id is already read:
-        assert_eq!(string_reader.last_pos.clone(), BytePos(28));
+        assert_eq!(string_reader.last_pos.get().clone(), BytePos(28));
         // read another token:
         let tok3 = string_reader.next_token();
         let tok4 = TokenAndSpan{
@@ -986,7 +1001,7 @@ mod test {
             sp:Span {lo:BytePos(24),hi:BytePos(28),expn_info: None}};
         assert_eq!(tok3,tok4);
         // the lparen is already read:
-        assert_eq!(string_reader.last_pos.clone(), BytePos(29))
+        assert_eq!(string_reader.last_pos.get().clone(), BytePos(29))
     }
 
     // check that the given reader produces the desired stream
diff --git a/src/libsyntax/parse/mod.rs b/src/libsyntax/parse/mod.rs
index 5a8444518aa..c20e7f4aaec 100644
--- a/src/libsyntax/parse/mod.rs
+++ b/src/libsyntax/parse/mod.rs
@@ -19,6 +19,7 @@ use parse::attr::parser_attr;
 use parse::lexer::reader;
 use parse::parser::Parser;
 
+use std::cell::RefCell;
 use std::io;
 use std::io::File;
 use std::str;
@@ -41,27 +42,27 @@ pub mod obsolete;
 // info about a parsing session.
 pub struct ParseSess {
     cm: @codemap::CodeMap, // better be the same as the one in the reader!
-    span_diagnostic: @mut SpanHandler, // better be the same as the one in the reader!
+    span_diagnostic: @SpanHandler, // better be the same as the one in the reader!
     /// Used to determine and report recursive mod inclusions
-    included_mod_stack: ~[Path],
+    included_mod_stack: RefCell<~[Path]>,
 }
 
-pub fn new_parse_sess(demitter: Option<@Emitter>) -> @mut ParseSess {
+pub fn new_parse_sess(demitter: Option<@Emitter>) -> @ParseSess {
     let cm = @CodeMap::new();
-    @mut ParseSess {
+    @ParseSess {
         cm: cm,
         span_diagnostic: mk_span_handler(mk_handler(demitter), cm),
-        included_mod_stack: ~[],
+        included_mod_stack: RefCell::new(~[]),
     }
 }
 
-pub fn new_parse_sess_special_handler(sh: @mut SpanHandler,
+pub fn new_parse_sess_special_handler(sh: @SpanHandler,
                                       cm: @codemap::CodeMap)
-                                   -> @mut ParseSess {
-    @mut ParseSess {
+                                      -> @ParseSess {
+    @ParseSess {
         cm: cm,
         span_diagnostic: sh,
-        included_mod_stack: ~[],
+        included_mod_stack: RefCell::new(~[]),
     }
 }
 
@@ -73,7 +74,7 @@ pub fn new_parse_sess_special_handler(sh: @mut SpanHandler,
 pub fn parse_crate_from_file(
     input: &Path,
     cfg: ast::CrateConfig,
-    sess: @mut ParseSess
+    sess: @ParseSess
 ) -> ast::Crate {
     new_parser_from_file(sess, /*bad*/ cfg.clone(), input).parse_crate_mod()
     // why is there no p.abort_if_errors here?
@@ -82,7 +83,7 @@ pub fn parse_crate_from_file(
 pub fn parse_crate_attrs_from_file(
     input: &Path,
     cfg: ast::CrateConfig,
-    sess: @mut ParseSess
+    sess: @ParseSess
 ) -> ~[ast::Attribute] {
     let mut parser = new_parser_from_file(sess, cfg, input);
     let (inner, _) = parser.parse_inner_attrs_and_next();
@@ -93,7 +94,7 @@ pub fn parse_crate_from_source_str(
     name: @str,
     source: @str,
     cfg: ast::CrateConfig,
-    sess: @mut ParseSess
+    sess: @ParseSess
 ) -> ast::Crate {
     let mut p = new_parser_from_source_str(sess,
                                            /*bad*/ cfg.clone(),
@@ -106,7 +107,7 @@ pub fn parse_crate_attrs_from_source_str(
     name: @str,
     source: @str,
     cfg: ast::CrateConfig,
-    sess: @mut ParseSess
+    sess: @ParseSess
 ) -> ~[ast::Attribute] {
     let mut p = new_parser_from_source_str(sess,
                                            /*bad*/ cfg.clone(),
@@ -120,7 +121,7 @@ pub fn parse_expr_from_source_str(
     name: @str,
     source: @str,
     cfg: ast::CrateConfig,
-    sess: @mut ParseSess
+    sess: @ParseSess
 ) -> @ast::Expr {
     let mut p = new_parser_from_source_str(sess, cfg, name, source);
     maybe_aborted(p.parse_expr(), p)
@@ -131,7 +132,7 @@ pub fn parse_item_from_source_str(
     source: @str,
     cfg: ast::CrateConfig,
     attrs: ~[ast::Attribute],
-    sess: @mut ParseSess
+    sess: @ParseSess
 ) -> Option<@ast::item> {
     let mut p = new_parser_from_source_str(sess, cfg, name, source);
     maybe_aborted(p.parse_item(attrs),p)
@@ -141,7 +142,7 @@ pub fn parse_meta_from_source_str(
     name: @str,
     source: @str,
     cfg: ast::CrateConfig,
-    sess: @mut ParseSess
+    sess: @ParseSess
 ) -> @ast::MetaItem {
     let mut p = new_parser_from_source_str(sess, cfg, name, source);
     maybe_aborted(p.parse_meta_item(),p)
@@ -152,7 +153,7 @@ pub fn parse_stmt_from_source_str(
     source: @str,
     cfg: ast::CrateConfig,
     attrs: ~[ast::Attribute],
-    sess: @mut ParseSess
+    sess: @ParseSess
 ) -> @ast::Stmt {
     let mut p = new_parser_from_source_str(
         sess,
@@ -167,7 +168,7 @@ pub fn parse_tts_from_source_str(
     name: @str,
     source: @str,
     cfg: ast::CrateConfig,
-    sess: @mut ParseSess
+    sess: @ParseSess
 ) -> ~[ast::token_tree] {
     let mut p = new_parser_from_source_str(
         sess,
@@ -191,7 +192,7 @@ pub fn parse_from_source_str<T>(
                              ss: codemap::FileSubstr,
                              source: @str,
                              cfg: ast::CrateConfig,
-                             sess: @mut ParseSess)
+                             sess: @ParseSess)
                              -> T {
     let mut p = new_parser_from_source_substr(sess, cfg, name, ss, source);
     let r = f(&mut p);
@@ -202,7 +203,7 @@ pub fn parse_from_source_str<T>(
 }
 
 // Create a new parser from a source string
-pub fn new_parser_from_source_str(sess: @mut ParseSess,
+pub fn new_parser_from_source_str(sess: @ParseSess,
                                   cfg: ast::CrateConfig,
                                   name: @str,
                                   source: @str)
@@ -212,7 +213,7 @@ pub fn new_parser_from_source_str(sess: @mut ParseSess,
 
 // Create a new parser from a source string where the origin
 // is specified as a substring of another file.
-pub fn new_parser_from_source_substr(sess: @mut ParseSess,
+pub fn new_parser_from_source_substr(sess: @ParseSess,
                                   cfg: ast::CrateConfig,
                                   name: @str,
                                   ss: codemap::FileSubstr,
@@ -224,7 +225,7 @@ pub fn new_parser_from_source_substr(sess: @mut ParseSess,
 /// Create a new parser, handling errors as appropriate
 /// if the file doesn't exist
 pub fn new_parser_from_file(
-    sess: @mut ParseSess,
+    sess: @ParseSess,
     cfg: ast::CrateConfig,
     path: &Path
 ) -> Parser {
@@ -235,7 +236,7 @@ pub fn new_parser_from_file(
 /// the file at the given path to the codemap, and return a parser.
 /// On an error, use the given span as the source of the problem.
 pub fn new_sub_parser_from_file(
-    sess: @mut ParseSess,
+    sess: @ParseSess,
     cfg: ast::CrateConfig,
     path: &Path,
     sp: Span
@@ -244,7 +245,7 @@ pub fn new_sub_parser_from_file(
 }
 
 /// Given a filemap and config, return a parser
-pub fn filemap_to_parser(sess: @mut ParseSess,
+pub fn filemap_to_parser(sess: @ParseSess,
                          filemap: @FileMap,
                          cfg: ast::CrateConfig) -> Parser {
     tts_to_parser(sess,filemap_to_tts(sess,filemap),cfg)
@@ -252,7 +253,7 @@ pub fn filemap_to_parser(sess: @mut ParseSess,
 
 // must preserve old name for now, because quote! from the *existing*
 // compiler expands into it
-pub fn new_parser_from_tts(sess: @mut ParseSess,
+pub fn new_parser_from_tts(sess: @ParseSess,
                      cfg: ast::CrateConfig,
                      tts: ~[ast::token_tree]) -> Parser {
     tts_to_parser(sess,tts,cfg)
@@ -263,7 +264,7 @@ pub fn new_parser_from_tts(sess: @mut ParseSess,
 
 /// Given a session and a path and an optional span (for error reporting),
 /// add the path to the session's codemap and return the new filemap.
-pub fn file_to_filemap(sess: @mut ParseSess, path: &Path, spanopt: Option<Span>)
+pub fn file_to_filemap(sess: @ParseSess, path: &Path, spanopt: Option<Span>)
     -> @FileMap {
     let err = |msg: &str| {
         match spanopt {
@@ -292,35 +293,35 @@ pub fn file_to_filemap(sess: @mut ParseSess, path: &Path, spanopt: Option<Span>)
 
 // given a session and a string, add the string to
 // the session's codemap and return the new filemap
-pub fn string_to_filemap(sess: @mut ParseSess, source: @str, path: @str)
+pub fn string_to_filemap(sess: @ParseSess, source: @str, path: @str)
     -> @FileMap {
     sess.cm.new_filemap(path, source)
 }
 
 // given a session and a string and a path and a FileSubStr, add
 // the string to the CodeMap and return the new FileMap
-pub fn substring_to_filemap(sess: @mut ParseSess, source: @str, path: @str,
+pub fn substring_to_filemap(sess: @ParseSess, source: @str, path: @str,
                            filesubstr: FileSubstr) -> @FileMap {
     sess.cm.new_filemap_w_substr(path,filesubstr,source)
 }
 
 // given a filemap, produce a sequence of token-trees
-pub fn filemap_to_tts(sess: @mut ParseSess, filemap: @FileMap)
+pub fn filemap_to_tts(sess: @ParseSess, filemap: @FileMap)
     -> ~[ast::token_tree] {
     // it appears to me that the cfg doesn't matter here... indeed,
     // parsing tt's probably shouldn't require a parser at all.
     let cfg = ~[];
     let srdr = lexer::new_string_reader(sess.span_diagnostic, filemap);
-    let mut p1 = Parser(sess, cfg, srdr as @mut reader);
+    let mut p1 = Parser(sess, cfg, srdr as @reader);
     p1.parse_all_token_trees()
 }
 
 // given tts and cfg, produce a parser
-pub fn tts_to_parser(sess: @mut ParseSess,
+pub fn tts_to_parser(sess: @ParseSess,
                      tts: ~[ast::token_tree],
                      cfg: ast::CrateConfig) -> Parser {
     let trdr = lexer::new_tt_reader(sess.span_diagnostic, None, tts);
-    Parser(sess, cfg, trdr as @mut reader)
+    Parser(sess, cfg, trdr as @reader)
 }
 
 // abort if necessary
diff --git a/src/libsyntax/parse/parser.rs b/src/libsyntax/parse/parser.rs
index 2428710087f..40a2ef86e4f 100644
--- a/src/libsyntax/parse/parser.rs
+++ b/src/libsyntax/parse/parser.rs
@@ -29,8 +29,7 @@ use ast::{ExprField, ExprFnBlock, ExprIf, ExprIndex};
 use ast::{ExprLit, ExprLogLevel, ExprLoop, ExprMac};
 use ast::{ExprMethodCall, ExprParen, ExprPath, ExprProc, ExprRepeat};
 use ast::{ExprRet, ExprSelf, ExprStruct, ExprTup, ExprUnary};
-use ast::{ExprVec, ExprVstore, ExprVstoreMutBox};
-use ast::{ExprVstoreSlice, ExprVstoreBox};
+use ast::{ExprVec, ExprVstore, ExprVstoreSlice, ExprVstoreBox};
 use ast::{ExprVstoreMutSlice, ExprWhile, ExprForLoop, extern_fn, Field, fn_decl};
 use ast::{ExprVstoreUniq, Onceness, Once, Many};
 use ast::{foreign_item, foreign_item_static, foreign_item_fn, foreign_mod};
@@ -81,6 +80,7 @@ use parse::{new_sub_parser_from_file, ParseSess};
 use opt_vec;
 use opt_vec::OptVec;
 
+use std::cell::Cell;
 use std::hashmap::HashSet;
 use std::util;
 use std::vec;
@@ -286,8 +286,8 @@ struct ParsedItemsAndViewItems {
 
 /* ident is handled by common.rs */
 
-pub fn Parser(sess: @mut ParseSess, cfg: ast::CrateConfig, rdr: @mut reader)
-           -> Parser {
+pub fn Parser(sess: @ParseSess, cfg: ast::CrateConfig, rdr: @reader)
+              -> Parser {
     let tok0 = rdr.next_token();
     let interner = get_ident_interner();
     let span = tok0.sp;
@@ -324,7 +324,7 @@ pub fn Parser(sess: @mut ParseSess, cfg: ast::CrateConfig, rdr: @mut reader)
 }
 
 pub struct Parser {
-    sess: @mut ParseSess,
+    sess: @ParseSess,
     cfg: CrateConfig,
     // the current token:
     token: token::Token,
@@ -340,7 +340,7 @@ pub struct Parser {
     tokens_consumed: uint,
     restriction: restriction,
     quote_depth: uint, // not (yet) related to the quasiquoter
-    reader: @mut reader,
+    reader: @reader,
     interner: @token::ident_interner,
     /// The set of seen errors about obsolete syntax. Used to suppress
     /// extra detail when the same error is seen twice
@@ -1299,7 +1299,7 @@ impl Parser {
         if sigil == OwnedSigil {
             ty_uniq(self.parse_ty(false))
         } else {
-            ty_box(self.parse_mt())
+            ty_box(self.parse_ty(false))
         }
     }
 
@@ -2185,7 +2185,7 @@ impl Parser {
         // unification of matchers and token_trees would vastly improve
         // the interpolation of matchers
         maybe_whole!(self, nt_matchers);
-        let name_idx = @mut 0u;
+        let name_idx = @Cell::new(0u);
         match self.token {
             token::LBRACE | token::LPAREN | token::LBRACKET => {
                 let other_delimiter = token::flip_delimiter(&self.token);
@@ -2200,7 +2200,7 @@ impl Parser {
     // Otherwise, `$( ( )` would be a valid matcher, and `$( () )` would be
     // invalid. It's similar to common::parse_seq.
     pub fn parse_matcher_subseq_upto(&mut self,
-                                     name_idx: @mut uint,
+                                     name_idx: @Cell<uint>,
                                      ket: &token::Token)
                                      -> ~[matcher] {
         let mut ret_val = ~[];
@@ -2217,13 +2217,13 @@ impl Parser {
         return ret_val;
     }
 
-    pub fn parse_matcher(&mut self, name_idx: @mut uint) -> matcher {
+    pub fn parse_matcher(&mut self, name_idx: @Cell<uint>) -> matcher {
         let lo = self.span.lo;
 
         let m = if self.token == token::DOLLAR {
             self.bump();
             if self.token == token::LPAREN {
-                let name_idx_lo = *name_idx;
+                let name_idx_lo = name_idx.get();
                 self.bump();
                 let ms = self.parse_matcher_subseq_upto(name_idx,
                                                         &token::RPAREN);
@@ -2231,13 +2231,13 @@ impl Parser {
                     self.fatal("repetition body must be nonempty");
                 }
                 let (sep, zerok) = self.parse_sep_and_zerok();
-                match_seq(ms, sep, zerok, name_idx_lo, *name_idx)
+                match_seq(ms, sep, zerok, name_idx_lo, name_idx.get())
             } else {
                 let bound_to = self.parse_ident();
                 self.expect(&token::COLON);
                 let nt_name = self.parse_ident();
-                let m = match_nonterminal(bound_to, nt_name, *name_idx);
-                *name_idx += 1u;
+                let m = match_nonterminal(bound_to, nt_name, name_idx.get());
+                name_idx.set(name_idx.get() + 1u);
                 m
             }
         } else {
@@ -2299,17 +2299,14 @@ impl Parser {
           }
           token::AT => {
             self.bump();
-            let m = self.parse_mutability();
             let e = self.parse_prefix_expr();
             hi = e.span.hi;
             // HACK: turn @[...] into a @-evec
             ex = match e.node {
-              ExprVec(..) | ExprRepeat(..) if m == MutMutable =>
-                ExprVstore(e, ExprVstoreMutBox),
               ExprVec(..) |
               ExprLit(@codemap::Spanned { node: lit_str(..), span: _}) |
-              ExprRepeat(..) if m == MutImmutable => ExprVstore(e, ExprVstoreBox),
-              _ => self.mk_unary(UnBox(m), e)
+              ExprRepeat(..) => ExprVstore(e, ExprVstoreBox),
+              _ => self.mk_unary(UnBox, e)
             };
           }
           token::TILDE => {
@@ -4264,21 +4261,28 @@ impl Parser {
                               path: Path,
                               outer_attrs: ~[ast::Attribute],
                               id_sp: Span) -> (ast::item_, ~[ast::Attribute]) {
-        let maybe_i = self.sess.included_mod_stack.iter().position(|p| *p == path);
-        match maybe_i {
-            Some(i) => {
-                let stack = &self.sess.included_mod_stack;
-                let mut err = ~"circular modules: ";
-                for p in stack.slice(i, stack.len()).iter() {
-                    p.display().with_str(|s| err.push_str(s));
-                    err.push_str(" -> ");
+        {
+            let mut included_mod_stack = self.sess
+                                             .included_mod_stack
+                                             .borrow_mut();
+            let maybe_i = included_mod_stack.get()
+                                            .iter()
+                                            .position(|p| *p == path);
+            match maybe_i {
+                Some(i) => {
+                    let mut err = ~"circular modules: ";
+                    let len = included_mod_stack.get().len();
+                    for p in included_mod_stack.get().slice(i, len).iter() {
+                        p.display().with_str(|s| err.push_str(s));
+                        err.push_str(" -> ");
+                    }
+                    path.display().with_str(|s| err.push_str(s));
+                    self.span_fatal(id_sp, err);
                 }
-                path.display().with_str(|s| err.push_str(s));
-                self.span_fatal(id_sp, err);
+                None => ()
             }
-            None => ()
+            included_mod_stack.get().push(path.clone());
         }
-        self.sess.included_mod_stack.push(path.clone());
 
         let mut p0 =
             new_sub_parser_from_file(self.sess,
@@ -4289,7 +4293,12 @@ impl Parser {
         let mod_attrs = vec::append(outer_attrs, inner);
         let first_item_outer_attrs = next;
         let m0 = p0.parse_mod_items(token::EOF, first_item_outer_attrs);
-        self.sess.included_mod_stack.pop();
+        {
+            let mut included_mod_stack = self.sess
+                                             .included_mod_stack
+                                             .borrow_mut();
+            included_mod_stack.get().pop();
+        }
         return (ast::item_mod(m0), mod_attrs);
     }