about summary refs log tree commit diff
path: root/src/rustc/syntax/parse
diff options
context:
space:
mode:
authorGraydon Hoare <graydon@mozilla.com>2012-02-29 11:46:23 -0800
committerGraydon Hoare <graydon@mozilla.com>2012-03-02 18:46:13 -0800
commit87c14f1e3d85751bffffda0b1920be5e726172c4 (patch)
tree371d86e9a7c65b06df5c8f5e6d499cf4730324fc /src/rustc/syntax/parse
parent9228947fe15af96593abf4745d91802b56c205e8 (diff)
downloadrust-87c14f1e3d85751bffffda0b1920be5e726172c4.tar.gz
rust-87c14f1e3d85751bffffda0b1920be5e726172c4.zip
Move src/comp to src/rustc
Diffstat (limited to 'src/rustc/syntax/parse')
-rw-r--r--src/rustc/syntax/parse/eval.rs150
-rw-r--r--src/rustc/syntax/parse/lexer.rs748
-rw-r--r--src/rustc/syntax/parse/parser.rs2747
-rw-r--r--src/rustc/syntax/parse/token.rs199
4 files changed, 3844 insertions, 0 deletions
diff --git a/src/rustc/syntax/parse/eval.rs b/src/rustc/syntax/parse/eval.rs
new file mode 100644
index 00000000000..06f26905550
--- /dev/null
+++ b/src/rustc/syntax/parse/eval.rs
@@ -0,0 +1,150 @@
+
+import front::attr;
+import std::{io, fs};
+import syntax::ast;
+import syntax::parse::token;
+import syntax::parse::parser::{parser, new_parser_from_file,
+                               parse_inner_attrs_and_next,
+                               parse_mod_items, SOURCE_FILE};
+
+export eval_crate_directives_to_mod;
+
+type ctx =
+    @{p: parser,
+      sess: parser::parse_sess,
+      cfg: ast::crate_cfg};
+
+fn eval_crate_directives(cx: ctx, cdirs: [@ast::crate_directive], prefix: str,
+                         &view_items: [@ast::view_item],
+                         &items: [@ast::item]) {
+    for sub_cdir: @ast::crate_directive in cdirs {
+        eval_crate_directive(cx, sub_cdir, prefix, view_items, items);
+    }
+}
+
+fn eval_crate_directives_to_mod(cx: ctx, cdirs: [@ast::crate_directive],
+                                prefix: str, suffix: option<str>)
+    -> (ast::_mod, [ast::attribute]) {
+    #debug("eval crate prefix: %s", prefix);
+    #debug("eval crate suffix: %s",
+           option::from_maybe("none", suffix));
+    let (cview_items, citems, cattrs)
+        = parse_companion_mod(cx, prefix, suffix);
+    let view_items: [@ast::view_item] = [];
+    let items: [@ast::item] = [];
+    eval_crate_directives(cx, cdirs, prefix, view_items, items);
+    ret ({view_items: view_items + cview_items,
+          items: items + citems},
+         cattrs);
+}
+
+/*
+The 'companion mod'. So .rc crates and directory mod crate directives define
+modules but not a .rs file to fill those mods with stuff. The companion mod is
+a convention for location a .rs file to go with them.  For .rc files the
+companion mod is a .rs file with the same name; for directory mods the
+companion mod is a .rs file with the same name as the directory.
+
+We build the path to the companion mod by combining the prefix and the
+optional suffix then adding the .rs extension.
+*/
+fn parse_companion_mod(cx: ctx, prefix: str, suffix: option<str>)
+    -> ([@ast::view_item], [@ast::item], [ast::attribute]) {
+
+    fn companion_file(prefix: str, suffix: option<str>) -> str {
+        ret alt suffix {
+          option::some(s) { fs::connect(prefix, s) }
+          option::none { prefix }
+        } + ".rs";
+    }
+
+    fn file_exists(path: str) -> bool {
+        // Crude, but there's no lib function for this and I'm not
+        // up to writing it just now
+        alt io::file_reader(path) {
+          result::ok(_) { true }
+          result::err(_) { false }
+        }
+    }
+
+    let modpath = companion_file(prefix, suffix);
+    #debug("looking for companion mod %s", modpath);
+    if file_exists(modpath) {
+        #debug("found companion mod");
+        let p0 = new_parser_from_file(cx.sess, cx.cfg, modpath,
+                                     SOURCE_FILE);
+        let inner_attrs = parse_inner_attrs_and_next(p0);
+        let first_item_outer_attrs = inner_attrs.next;
+        let m0 = parse_mod_items(p0, token::EOF, first_item_outer_attrs);
+        cx.sess.chpos = p0.reader.chpos;
+        cx.sess.byte_pos = cx.sess.byte_pos + p0.reader.pos;
+        ret (m0.view_items, m0.items, inner_attrs.inner);
+    } else {
+        ret ([], [], []);
+    }
+}
+
+fn cdir_path_opt(id: str, attrs: [ast::attribute]) -> str {
+    alt attr::get_meta_item_value_str_by_name(attrs, "path") {
+      some(d) {
+        ret d;
+      }
+      none { ret id; }
+    }
+}
+
+fn eval_crate_directive(cx: ctx, cdir: @ast::crate_directive, prefix: str,
+                        &view_items: [@ast::view_item],
+                        &items: [@ast::item]) {
+    alt cdir.node {
+      ast::cdir_src_mod(id, attrs) {
+        let file_path = cdir_path_opt(id + ".rs", attrs);
+        let full_path =
+            if std::fs::path_is_absolute(file_path) {
+                file_path
+            } else { prefix + std::fs::path_sep() + file_path };
+        let p0 =
+            new_parser_from_file(cx.sess, cx.cfg, full_path, SOURCE_FILE);
+        let inner_attrs = parse_inner_attrs_and_next(p0);
+        let mod_attrs = attrs + inner_attrs.inner;
+        let first_item_outer_attrs = inner_attrs.next;
+        let m0 = parse_mod_items(p0, token::EOF, first_item_outer_attrs);
+
+        let i =
+            syntax::parse::parser::mk_item(p0, cdir.span.lo, cdir.span.hi, id,
+                                           ast::item_mod(m0), mod_attrs);
+        // Thread defids, chpos and byte_pos through the parsers
+        cx.sess.chpos = p0.reader.chpos;
+        cx.sess.byte_pos = cx.sess.byte_pos + p0.reader.pos;
+        items += [i];
+      }
+      ast::cdir_dir_mod(id, cdirs, attrs) {
+        let path = cdir_path_opt(id, attrs);
+        let full_path =
+            if std::fs::path_is_absolute(path) {
+                path
+            } else { prefix + std::fs::path_sep() + path };
+        let (m0, a0) = eval_crate_directives_to_mod(
+            cx, cdirs, full_path, none);
+        let i =
+            @{ident: id,
+              attrs: attrs + a0,
+              id: cx.sess.next_id,
+              node: ast::item_mod(m0),
+              span: cdir.span};
+        cx.sess.next_id += 1;
+        items += [i];
+      }
+      ast::cdir_view_item(vi) { view_items += [vi]; }
+      ast::cdir_syntax(pth) { }
+    }
+}
+//
+// Local Variables:
+// mode: rust
+// fill-column: 78;
+// indent-tabs-mode: nil
+// c-basic-offset: 4
+// buffer-file-coding-system: utf-8-unix
+// End:
+//
diff --git a/src/rustc/syntax/parse/lexer.rs b/src/rustc/syntax/parse/lexer.rs
new file mode 100644
index 00000000000..3350d7a7c95
--- /dev/null
+++ b/src/rustc/syntax/parse/lexer.rs
@@ -0,0 +1,748 @@
+
+import std::io;
+import io::reader_util;
+import util::interner;
+import util::interner::intern;
+import driver::diagnostic;
+
+type reader = @{
+    cm: codemap::codemap,
+    span_diagnostic: diagnostic::span_handler,
+    src: @str,
+    len: uint,
+    mutable col: uint,
+    mutable pos: uint,
+    mutable curr: char,
+    mutable chpos: uint,
+    mutable strs: [str],
+    filemap: codemap::filemap,
+    interner: @interner::interner<str>
+};
+
+impl reader for reader {
+    fn is_eof() -> bool { self.curr == -1 as char }
+    fn get_str_from(start: uint) -> str unsafe {
+        // I'm pretty skeptical about this subtraction. What if there's a
+        // multi-byte character before the mark?
+        ret str::slice(*self.src, start - 1u, self.pos - 1u);
+    }
+    fn next() -> char {
+        if self.pos < self.len {
+            ret str::char_at(*self.src, self.pos);
+        } else { ret -1 as char; }
+    }
+    fn bump() {
+        if self.pos < self.len {
+            self.col += 1u;
+            self.chpos += 1u;
+            if self.curr == '\n' {
+                codemap::next_line(self.filemap, self.chpos, self.pos +
+                                   self.filemap.start_pos.byte);
+                self.col = 0u;
+            }
+            let next = str::char_range_at(*self.src, self.pos);
+            self.pos = next.next;
+            self.curr = next.ch;
+        } else {
+            if (self.curr != -1 as char) {
+                self.col += 1u;
+                self.chpos += 1u;
+                self.curr = -1 as char;
+            }
+        }
+    }
+    fn fatal(m: str) -> ! {
+        self.span_diagnostic.span_fatal(
+            ast_util::mk_sp(self.chpos, self.chpos),
+            m)
+    }
+}
+
+fn new_reader(cm: codemap::codemap,
+              span_diagnostic: diagnostic::span_handler,
+              filemap: codemap::filemap,
+              itr: @interner::interner<str>) -> reader {
+    let r = @{cm: cm,
+              span_diagnostic: span_diagnostic,
+              src: filemap.src, len: str::len(*filemap.src),
+              mutable col: 0u, mutable pos: 0u, mutable curr: -1 as char,
+              mutable chpos: filemap.start_pos.ch, mutable strs: [],
+              filemap: filemap, interner: itr};
+    if r.pos < r.len {
+        let next = str::char_range_at(*r.src, r.pos);
+        r.pos = next.next;
+        r.curr = next.ch;
+    }
+    ret r;
+}
+
+fn dec_digit_val(c: char) -> int { ret (c as int) - ('0' as int); }
+
+fn hex_digit_val(c: char) -> int {
+    if in_range(c, '0', '9') { ret (c as int) - ('0' as int); }
+    if in_range(c, 'a', 'f') { ret (c as int) - ('a' as int) + 10; }
+    if in_range(c, 'A', 'F') { ret (c as int) - ('A' as int) + 10; }
+    fail;
+}
+
+fn bin_digit_value(c: char) -> int { if c == '0' { ret 0; } ret 1; }
+
+fn is_whitespace(c: char) -> bool {
+    ret c == ' ' || c == '\t' || c == '\r' || c == '\n';
+}
+
+fn may_begin_ident(c: char) -> bool { ret is_alpha(c) || c == '_'; }
+
+fn in_range(c: char, lo: char, hi: char) -> bool { ret lo <= c && c <= hi; }
+
+fn is_alpha(c: char) -> bool {
+    ret in_range(c, 'a', 'z') || in_range(c, 'A', 'Z');
+}
+
+fn is_dec_digit(c: char) -> bool { ret in_range(c, '0', '9'); }
+
+fn is_alnum(c: char) -> bool { ret is_alpha(c) || is_dec_digit(c); }
+
+fn is_hex_digit(c: char) -> bool {
+    ret in_range(c, '0', '9') || in_range(c, 'a', 'f') ||
+            in_range(c, 'A', 'F');
+}
+
+fn is_bin_digit(c: char) -> bool { ret c == '0' || c == '1'; }
+
+fn consume_whitespace_and_comments(rdr: reader) {
+    while is_whitespace(rdr.curr) { rdr.bump(); }
+    be consume_any_line_comment(rdr);
+}
+
+fn consume_any_line_comment(rdr: reader) {
+    if rdr.curr == '/' {
+        alt rdr.next() {
+          '/' {
+            while rdr.curr != '\n' && !rdr.is_eof() { rdr.bump(); }
+            // Restart whitespace munch.
+
+            be consume_whitespace_and_comments(rdr);
+          }
+          '*' { rdr.bump(); rdr.bump(); be consume_block_comment(rdr); }
+          _ { ret; }
+        }
+    }
+}
+
+fn consume_block_comment(rdr: reader) {
+    let level: int = 1;
+    while level > 0 {
+        if rdr.is_eof() { rdr.fatal("unterminated block comment"); }
+        if rdr.curr == '/' && rdr.next() == '*' {
+            rdr.bump();
+            rdr.bump();
+            level += 1;
+        } else {
+            if rdr.curr == '*' && rdr.next() == '/' {
+                rdr.bump();
+                rdr.bump();
+                level -= 1;
+            } else { rdr.bump(); }
+        }
+    }
+    // restart whitespace munch.
+
+    be consume_whitespace_and_comments(rdr);
+}
+
+fn scan_exponent(rdr: reader) -> option<str> {
+    let c = rdr.curr;
+    let rslt = "";
+    if c == 'e' || c == 'E' {
+        str::push_char(rslt, c);
+        rdr.bump();
+        c = rdr.curr;
+        if c == '-' || c == '+' {
+            str::push_char(rslt, c);
+            rdr.bump();
+        }
+        let exponent = scan_digits(rdr, 10u);
+        if str::len(exponent) > 0u {
+            ret some(rslt + exponent);
+        } else { rdr.fatal("scan_exponent: bad fp literal"); }
+    } else { ret none::<str>; }
+}
+
+fn scan_digits(rdr: reader, radix: uint) -> str {
+    let rslt = "";
+    while true {
+        let c = rdr.curr;
+        if c == '_' { rdr.bump(); cont; }
+        alt char::to_digit(c, radix) {
+          some(d) {
+            str::push_char(rslt, c);
+            rdr.bump();
+          }
+          _ { break; }
+        }
+    }
+    ret rslt;
+}
+
+fn scan_number(c: char, rdr: reader) -> token::token {
+    let num_str, base = 10u, c = c, n = rdr.next();
+    if c == '0' && n == 'x' {
+        rdr.bump();
+        rdr.bump();
+        base = 16u;
+    } else if c == '0' && n == 'b' {
+        rdr.bump();
+        rdr.bump();
+        base = 2u;
+    }
+    num_str = scan_digits(rdr, base);
+    c = rdr.curr;
+    n = rdr.next();
+    if c == 'u' || c == 'i' {
+        let signed = c == 'i', tp = if signed { either::left(ast::ty_i) }
+                                         else { either::right(ast::ty_u) };
+        rdr.bump();
+        c = rdr.curr;
+        if c == '8' {
+            rdr.bump();
+            tp = if signed { either::left(ast::ty_i8) }
+                      else { either::right(ast::ty_u8) };
+        }
+        n = rdr.next();
+        if c == '1' && n == '6' {
+            rdr.bump();
+            rdr.bump();
+            tp = if signed { either::left(ast::ty_i16) }
+                      else { either::right(ast::ty_u16) };
+        } else if c == '3' && n == '2' {
+            rdr.bump();
+            rdr.bump();
+            tp = if signed { either::left(ast::ty_i32) }
+                      else { either::right(ast::ty_u32) };
+        } else if c == '6' && n == '4' {
+            rdr.bump();
+            rdr.bump();
+            tp = if signed { either::left(ast::ty_i64) }
+                      else { either::right(ast::ty_u64) };
+        }
+        if str::len(num_str) == 0u {
+            rdr.fatal("no valid digits found for number");
+        }
+        let parsed = option::get(u64::from_str(num_str, base as u64));
+        alt tp {
+          either::left(t) { ret token::LIT_INT(parsed as i64, t); }
+          either::right(t) { ret token::LIT_UINT(parsed, t); }
+        }
+    }
+    let is_float = false;
+    if rdr.curr == '.' && !(is_alpha(rdr.next()) || rdr.next() == '_') {
+        is_float = true;
+        rdr.bump();
+        let dec_part = scan_digits(rdr, 10u);
+        num_str += "." + dec_part;
+    }
+    alt scan_exponent(rdr) {
+      some(s) {
+        is_float = true;
+        num_str += s;
+      }
+      none {}
+    }
+    if rdr.curr == 'f' {
+        rdr.bump();
+        c = rdr.curr;
+        n = rdr.next();
+        if c == '3' && n == '2' {
+            rdr.bump();
+            rdr.bump();
+            ret token::LIT_FLOAT(intern(*rdr.interner, num_str),
+                                 ast::ty_f32);
+        } else if c == '6' && n == '4' {
+            rdr.bump();
+            rdr.bump();
+            ret token::LIT_FLOAT(intern(*rdr.interner, num_str),
+                                 ast::ty_f64);
+            /* FIXME: if this is out of range for either a 32-bit or
+            64-bit float, it won't be noticed till the back-end */
+        } else {
+            is_float = true;
+        }
+    }
+    if is_float {
+        ret token::LIT_FLOAT(interner::intern(*rdr.interner, num_str),
+                             ast::ty_f);
+    } else {
+        if str::len(num_str) == 0u {
+            rdr.fatal("no valid digits found for number");
+        }
+        let parsed = option::get(u64::from_str(num_str, base as u64));
+        ret token::LIT_INT(parsed as i64, ast::ty_i);
+    }
+}
+
+fn scan_numeric_escape(rdr: reader, n_hex_digits: uint) -> char {
+    let accum_int = 0, i = n_hex_digits;
+    while i != 0u {
+        let n = rdr.curr;
+        rdr.bump();
+        if !is_hex_digit(n) {
+            rdr.fatal(#fmt["illegal numeric character escape: %d", n as int]);
+        }
+        accum_int *= 16;
+        accum_int += hex_digit_val(n);
+        i -= 1u;
+    }
+    ret accum_int as char;
+}
+
+fn next_token(rdr: reader) -> {tok: token::token, chpos: uint, bpos: uint} {
+    consume_whitespace_and_comments(rdr);
+    let start_chpos = rdr.chpos;
+    let start_bpos = rdr.pos;
+    let tok = if rdr.is_eof() { token::EOF } else { next_token_inner(rdr) };
+    ret {tok: tok, chpos: start_chpos, bpos: start_bpos};
+}
+
+fn next_token_inner(rdr: reader) -> token::token {
+    let accum_str = "";
+    let c = rdr.curr;
+    if (c >= 'a' && c <= 'z')
+        || (c >= 'A' && c <= 'Z')
+        || c == '_'
+        || (c > 'z' && char::is_XID_start(c)) {
+        while (c >= 'a' && c <= 'z')
+            || (c >= 'A' && c <= 'Z')
+            || (c >= '0' && c <= '9')
+            || c == '_'
+            || (c > 'z' && char::is_XID_continue(c)) {
+            str::push_char(accum_str, c);
+            rdr.bump();
+            c = rdr.curr;
+        }
+        if str::eq(accum_str, "_") { ret token::UNDERSCORE; }
+        let is_mod_name = c == ':' && rdr.next() == ':';
+
+        // FIXME: perform NFKC normalization here.
+        ret token::IDENT(interner::intern::<str>(*rdr.interner,
+                                                 accum_str), is_mod_name);
+    }
+    if is_dec_digit(c) {
+        ret scan_number(c, rdr);
+    }
+    fn binop(rdr: reader, op: token::binop) -> token::token {
+        rdr.bump();
+        if rdr.curr == '=' {
+            rdr.bump();
+            ret token::BINOPEQ(op);
+        } else { ret token::BINOP(op); }
+    }
+    alt c {
+
+
+
+
+
+      // One-byte tokens.
+      ';' { rdr.bump(); ret token::SEMI; }
+      ',' { rdr.bump(); ret token::COMMA; }
+      '.' {
+        rdr.bump();
+        if rdr.curr == '.' && rdr.next() == '.' {
+            rdr.bump();
+            rdr.bump();
+            ret token::ELLIPSIS;
+        }
+        ret token::DOT;
+      }
+      '(' { rdr.bump(); ret token::LPAREN; }
+      ')' { rdr.bump(); ret token::RPAREN; }
+      '{' { rdr.bump(); ret token::LBRACE; }
+      '}' { rdr.bump(); ret token::RBRACE; }
+      '[' { rdr.bump(); ret token::LBRACKET; }
+      ']' { rdr.bump(); ret token::RBRACKET; }
+      '@' { rdr.bump(); ret token::AT; }
+      '#' {
+        rdr.bump();
+        if rdr.curr == '<' { rdr.bump(); ret token::POUND_LT; }
+        if rdr.curr == '{' { rdr.bump(); ret token::POUND_LBRACE; }
+        ret token::POUND;
+      }
+      '~' { rdr.bump(); ret token::TILDE; }
+      ':' {
+        rdr.bump();
+        if rdr.curr == ':' {
+            rdr.bump();
+            ret token::MOD_SEP;
+        } else { ret token::COLON; }
+      }
+
+      '$' {
+        rdr.bump();
+        if is_dec_digit(rdr.curr) {
+            let val = dec_digit_val(rdr.curr) as uint;
+            while is_dec_digit(rdr.next()) {
+                rdr.bump();
+                val = val * 10u + (dec_digit_val(rdr.curr) as uint);
+            }
+            rdr.bump();
+            ret token::DOLLAR_NUM(val);
+        } else if rdr.curr == '(' {
+            rdr.bump();
+            ret token::DOLLAR_LPAREN;
+        } else {
+            rdr.fatal("expected digit");
+        }
+      }
+
+
+
+
+
+      // Multi-byte tokens.
+      '=' {
+        rdr.bump();
+        if rdr.curr == '=' {
+            rdr.bump();
+            ret token::EQEQ;
+        } else { ret token::EQ; }
+      }
+      '!' {
+        rdr.bump();
+        if rdr.curr == '=' {
+            rdr.bump();
+            ret token::NE;
+        } else { ret token::NOT; }
+      }
+      '<' {
+        rdr.bump();
+        alt rdr.curr {
+          '=' { rdr.bump(); ret token::LE; }
+          '<' { ret binop(rdr, token::LSL); }
+          '-' {
+            rdr.bump();
+            alt rdr.curr {
+              '>' { rdr.bump(); ret token::DARROW; }
+              _ { ret token::LARROW; }
+            }
+          }
+          _ { ret token::LT; }
+        }
+      }
+      '>' {
+        rdr.bump();
+        alt rdr.curr {
+          '=' { rdr.bump(); ret token::GE; }
+          '>' {
+            if rdr.next() == '>' {
+                rdr.bump();
+                ret binop(rdr, token::ASR);
+            } else { ret binop(rdr, token::LSR); }
+          }
+          _ { ret token::GT; }
+        }
+      }
+      '\'' {
+        rdr.bump();
+        let c2 = rdr.curr;
+        rdr.bump();
+        if c2 == '\\' {
+            let escaped = rdr.curr;
+            rdr.bump();
+            alt escaped {
+              'n' { c2 = '\n'; }
+              'r' { c2 = '\r'; }
+              't' { c2 = '\t'; }
+              '\\' { c2 = '\\'; }
+              '\'' { c2 = '\''; }
+              'x' { c2 = scan_numeric_escape(rdr, 2u); }
+              'u' { c2 = scan_numeric_escape(rdr, 4u); }
+              'U' { c2 = scan_numeric_escape(rdr, 8u); }
+              c2 {
+                rdr.fatal(#fmt["unknown character escape: %d", c2 as int]);
+              }
+            }
+        }
+        if rdr.curr != '\'' {
+            rdr.fatal("unterminated character constant");
+        }
+        rdr.bump(); // advance curr past token
+        ret token::LIT_INT(c2 as i64, ast::ty_char);
+      }
+      '"' {
+        let n = rdr.chpos;
+        rdr.bump();
+        while rdr.curr != '"' {
+            if rdr.is_eof() {
+                rdr.fatal(#fmt["unterminated double quote string: %s",
+                             rdr.get_str_from(n)]);
+            }
+
+            let ch = rdr.curr;
+            rdr.bump();
+            alt ch {
+              '\\' {
+                let escaped = rdr.curr;
+                rdr.bump();
+                alt escaped {
+                  'n' { str::push_char(accum_str, '\n'); }
+                  'r' { str::push_char(accum_str, '\r'); }
+                  't' { str::push_char(accum_str, '\t'); }
+                  '\\' { str::push_char(accum_str, '\\'); }
+                  '"' { str::push_char(accum_str, '"'); }
+                  '\n' { consume_whitespace(rdr); }
+                  'x' {
+                    str::push_char(accum_str, scan_numeric_escape(rdr, 2u));
+                  }
+                  'u' {
+                    str::push_char(accum_str, scan_numeric_escape(rdr, 4u));
+                  }
+                  'U' {
+                    str::push_char(accum_str, scan_numeric_escape(rdr, 8u));
+                  }
+                  c2 {
+                    rdr.fatal(#fmt["unknown string escape: %d", c2 as int]);
+                  }
+                }
+              }
+              _ { str::push_char(accum_str, ch); }
+            }
+        }
+        rdr.bump();
+        ret token::LIT_STR(interner::intern::<str>(*rdr.interner,
+                                                   accum_str));
+      }
+      '-' {
+        if rdr.next() == '>' {
+            rdr.bump();
+            rdr.bump();
+            ret token::RARROW;
+        } else { ret binop(rdr, token::MINUS); }
+      }
+      '&' {
+        if rdr.next() == '&' {
+            rdr.bump();
+            rdr.bump();
+            ret token::ANDAND;
+        } else { ret binop(rdr, token::AND); }
+      }
+      '|' {
+        alt rdr.next() {
+          '|' { rdr.bump(); rdr.bump(); ret token::OROR; }
+          _ { ret binop(rdr, token::OR); }
+        }
+      }
+      '+' { ret binop(rdr, token::PLUS); }
+      '*' { ret binop(rdr, token::STAR); }
+      '/' { ret binop(rdr, token::SLASH); }
+      '^' { ret binop(rdr, token::CARET); }
+      '%' { ret binop(rdr, token::PERCENT); }
+      c { rdr.fatal(#fmt["unkown start of token: %d", c as int]); }
+    }
+}
+
+enum cmnt_style {
+    isolated, // No code on either side of each line of the comment
+    trailing, // Code exists to the left of the comment
+    mixed, // Code before /* foo */ and after the comment
+    blank_line, // Just a manual blank line "\n\n", for layout
+}
+
+type cmnt = {style: cmnt_style, lines: [str], pos: uint};
+
+fn read_to_eol(rdr: reader) -> str {
+    let val = "";
+    while rdr.curr != '\n' && !rdr.is_eof() {
+        str::push_char(val, rdr.curr);
+        rdr.bump();
+    }
+    if rdr.curr == '\n' { rdr.bump(); }
+    ret val;
+}
+
+fn read_one_line_comment(rdr: reader) -> str {
+    let val = read_to_eol(rdr);
+    assert (val[0] == '/' as u8 && val[1] == '/' as u8);
+    ret val;
+}
+
+fn consume_whitespace(rdr: reader) {
+    while is_whitespace(rdr.curr) && !rdr.is_eof() { rdr.bump(); }
+}
+
+fn consume_non_eol_whitespace(rdr: reader) {
+    while is_whitespace(rdr.curr) && rdr.curr != '\n' && !rdr.is_eof() {
+        rdr.bump();
+    }
+}
+
+fn push_blank_line_comment(rdr: reader, &comments: [cmnt]) {
+    #debug(">>> blank-line comment");
+    let v: [str] = [];
+    comments += [{style: blank_line, lines: v, pos: rdr.chpos}];
+}
+
+fn consume_whitespace_counting_blank_lines(rdr: reader, &comments: [cmnt]) {
+    while is_whitespace(rdr.curr) && !rdr.is_eof() {
+        if rdr.col == 0u && rdr.curr == '\n' {
+            push_blank_line_comment(rdr, comments);
+        }
+        rdr.bump();
+    }
+}
+
+fn read_line_comments(rdr: reader, code_to_the_left: bool) -> cmnt {
+    #debug(">>> line comments");
+    let p = rdr.chpos;
+    let lines: [str] = [];
+    while rdr.curr == '/' && rdr.next() == '/' {
+        let line = read_one_line_comment(rdr);
+        log(debug, line);
+        lines += [line];
+        consume_non_eol_whitespace(rdr);
+    }
+    #debug("<<< line comments");
+    ret {style: if code_to_the_left { trailing } else { isolated },
+         lines: lines,
+         pos: p};
+}
+
+fn all_whitespace(s: str, begin: uint, end: uint) -> bool {
+    let i: uint = begin;
+    while i != end { if !is_whitespace(s[i] as char) { ret false; } i += 1u; }
+    ret true;
+}
+
+fn trim_whitespace_prefix_and_push_line(&lines: [str],
+                                        s: str, col: uint) unsafe {
+    let s1;
+    if all_whitespace(s, 0u, col) {
+        if col < str::len(s) {
+            s1 = str::slice(s, col, str::len(s));
+        } else { s1 = ""; }
+    } else { s1 = s; }
+    log(debug, "pushing line: " + s1);
+    lines += [s1];
+}
+
+fn read_block_comment(rdr: reader, code_to_the_left: bool) -> cmnt {
+    #debug(">>> block comment");
+    let p = rdr.chpos;
+    let lines: [str] = [];
+    let col: uint = rdr.col;
+    rdr.bump();
+    rdr.bump();
+    let curr_line = "/*";
+    let level: int = 1;
+    while level > 0 {
+        #debug("=== block comment level %d", level);
+        if rdr.is_eof() { rdr.fatal("unterminated block comment"); }
+        if rdr.curr == '\n' {
+            trim_whitespace_prefix_and_push_line(lines, curr_line, col);
+            curr_line = "";
+            rdr.bump();
+        } else {
+            str::push_char(curr_line, rdr.curr);
+            if rdr.curr == '/' && rdr.next() == '*' {
+                rdr.bump();
+                rdr.bump();
+                curr_line += "*";
+                level += 1;
+            } else {
+                if rdr.curr == '*' && rdr.next() == '/' {
+                    rdr.bump();
+                    rdr.bump();
+                    curr_line += "/";
+                    level -= 1;
+                } else { rdr.bump(); }
+            }
+        }
+    }
+    if str::len(curr_line) != 0u {
+        trim_whitespace_prefix_and_push_line(lines, curr_line, col);
+    }
+    let style = if code_to_the_left { trailing } else { isolated };
+    consume_non_eol_whitespace(rdr);
+    if !rdr.is_eof() && rdr.curr != '\n' && vec::len(lines) == 1u {
+        style = mixed;
+    }
+    #debug("<<< block comment");
+    ret {style: style, lines: lines, pos: p};
+}
+
+fn peeking_at_comment(rdr: reader) -> bool {
+    ret rdr.curr == '/' && rdr.next() == '/' ||
+            rdr.curr == '/' && rdr.next() == '*';
+}
+
+fn consume_comment(rdr: reader, code_to_the_left: bool, &comments: [cmnt]) {
+    #debug(">>> consume comment");
+    if rdr.curr == '/' && rdr.next() == '/' {
+        comments += [read_line_comments(rdr, code_to_the_left)];
+    } else if rdr.curr == '/' && rdr.next() == '*' {
+        comments += [read_block_comment(rdr, code_to_the_left)];
+    } else { fail; }
+    #debug("<<< consume comment");
+}
+
+fn is_lit(t: token::token) -> bool {
+    ret alt t {
+          token::LIT_INT(_, _) { true }
+          token::LIT_UINT(_, _) { true }
+          token::LIT_FLOAT(_, _) { true }
+          token::LIT_STR(_) { true }
+          token::LIT_BOOL(_) { true }
+          _ { false }
+        }
+}
+
+type lit = {lit: str, pos: uint};
+
+fn gather_comments_and_literals(cm: codemap::codemap,
+                                span_diagnostic: diagnostic::span_handler,
+                                path: str,
+                                srdr: io::reader) ->
+   {cmnts: [cmnt], lits: [lit]} {
+    let src = @str::from_bytes(srdr.read_whole_stream());
+    let itr = @interner::mk::<str>(str::hash, str::eq);
+    let rdr = new_reader(cm, span_diagnostic,
+                         codemap::new_filemap(path, src, 0u, 0u), itr);
+    let comments: [cmnt] = [];
+    let literals: [lit] = [];
+    let first_read: bool = true;
+    while !rdr.is_eof() {
+        while true {
+            let code_to_the_left = !first_read;
+            consume_non_eol_whitespace(rdr);
+            if rdr.curr == '\n' {
+                code_to_the_left = false;
+                consume_whitespace_counting_blank_lines(rdr, comments);
+            }
+            while peeking_at_comment(rdr) {
+                consume_comment(rdr, code_to_the_left, comments);
+                consume_whitespace_counting_blank_lines(rdr, comments);
+            }
+            break;
+        }
+        let tok = next_token(rdr);
+        if is_lit(tok.tok) {
+            let s = rdr.get_str_from(tok.bpos);
+            literals += [{lit: s, pos: tok.chpos}];
+            log(debug, "tok lit: " + s);
+        } else {
+            log(debug, "tok: " + token::to_str(rdr, tok.tok));
+        }
+        first_read = false;
+    }
+    ret {cmnts: comments, lits: literals};
+}
+
+//
+// Local Variables:
+// mode: rust
+// fill-column: 78;
+// indent-tabs-mode: nil
+// c-basic-offset: 4
+// buffer-file-coding-system: utf-8-unix
+// End:
+//
diff --git a/src/rustc/syntax/parse/parser.rs b/src/rustc/syntax/parse/parser.rs
new file mode 100644
index 00000000000..5837baf1ff9
--- /dev/null
+++ b/src/rustc/syntax/parse/parser.rs
@@ -0,0 +1,2747 @@
+import std::{io, fs};
+import either::{left, right};
+import std::map::{hashmap, new_str_hash};
+import token::can_begin_expr;
+import codemap::{span,fss_none};
+import util::interner;
+import ast::{node_id, spanned};
+import ast_util::{mk_sp, ident_to_path};
+import front::attr;
+import lexer::reader;
+import driver::diagnostic;
+
+enum restriction {
+    UNRESTRICTED,
+    RESTRICT_STMT_EXPR,
+    RESTRICT_NO_CALL_EXPRS,
+    RESTRICT_NO_BAR_OP,
+}
+
+enum file_type { CRATE_FILE, SOURCE_FILE, }
+
+type parse_sess = @{
+    cm: codemap::codemap,
+    mutable next_id: node_id,
+    span_diagnostic: diagnostic::span_handler,
+    // these two must be kept up to date
+    mutable chpos: uint,
+    mutable byte_pos: uint
+};
+
+fn next_node_id(sess: parse_sess) -> node_id {
+    let rv = sess.next_id;
+    sess.next_id += 1;
+    // ID 0 is reserved for the crate and doesn't actually exist in the AST
+    assert rv != 0;
+    ret rv;
+}
+
+type parser = @{
+    sess: parse_sess,
+    cfg: ast::crate_cfg,
+    file_type: file_type,
+    mutable token: token::token,
+    mutable span: span,
+    mutable last_span: span,
+    mutable buffer: [{tok: token::token, span: span}],
+    mutable restriction: restriction,
+    reader: reader,
+    precs: @[op_spec],
+    bad_expr_words: hashmap<str, ()>
+};
+
+impl parser for parser {
+    fn bump() {
+        self.last_span = self.span;
+        if vec::len(self.buffer) == 0u {
+            let next = lexer::next_token(self.reader);
+            self.token = next.tok;
+            self.span = ast_util::mk_sp(next.chpos, self.reader.chpos);
+        } else {
+            let next = vec::pop(self.buffer);
+            self.token = next.tok;
+            self.span = next.span;
+        }
+    }
+    fn swap(next: token::token, lo: uint, hi: uint) {
+        self.token = next;
+        self.span = ast_util::mk_sp(lo, hi);
+    }
+    fn look_ahead(distance: uint) -> token::token {
+        while vec::len(self.buffer) < distance {
+            let next = lexer::next_token(self.reader);
+            let sp = ast_util::mk_sp(next.chpos, self.reader.chpos);
+            self.buffer = [{tok: next.tok, span: sp}] + self.buffer;
+        }
+        ret self.buffer[distance - 1u].tok;
+    }
+    fn fatal(m: str) -> ! {
+        self.sess.span_diagnostic.span_fatal(self.span, m)
+    }
+    fn span_fatal(sp: span, m: str) -> ! {
+        self.sess.span_diagnostic.span_fatal(sp, m)
+    }
+    fn warn(m: str) {
+        self.sess.span_diagnostic.span_warn(self.span, m)
+    }
+    fn get_str(i: token::str_num) -> str {
+        interner::get(*self.reader.interner, i)
+    }
+    fn get_id() -> node_id { next_node_id(self.sess) }
+}
+
+fn new_parser_from_file(sess: parse_sess, cfg: ast::crate_cfg, path: str,
+                        ftype: file_type) ->
+   parser {
+    let src = alt io::read_whole_file_str(path) {
+      result::ok(src) {
+        // FIXME: This copy is unfortunate
+        @src
+      }
+      result::err(e) {
+        sess.span_diagnostic.handler().fatal(e)
+      }
+    };
+    let filemap = codemap::new_filemap(path, src,
+                                       sess.chpos, sess.byte_pos);
+    sess.cm.files += [filemap];
+    let itr = @interner::mk(str::hash, str::eq);
+    let rdr = lexer::new_reader(sess.cm, sess.span_diagnostic, filemap, itr);
+    ret new_parser(sess, cfg, rdr, ftype);
+}
+
+fn new_parser_from_source_str(sess: parse_sess, cfg: ast::crate_cfg,
+                              name: str, ss: codemap::file_substr,
+                              source: @str) -> parser {
+    let ftype = SOURCE_FILE;
+    let filemap = codemap::new_filemap_w_substr
+        (name, ss, source, sess.chpos, sess.byte_pos);
+    sess.cm.files += [filemap];
+    let itr = @interner::mk(str::hash, str::eq);
+    let rdr = lexer::new_reader(sess.cm, sess.span_diagnostic,
+                                filemap, itr);
+    ret new_parser(sess, cfg, rdr, ftype);
+}
+
+fn new_parser(sess: parse_sess, cfg: ast::crate_cfg, rdr: reader,
+              ftype: file_type) -> parser {
+    let tok0 = lexer::next_token(rdr);
+    let span0 = ast_util::mk_sp(tok0.chpos, rdr.chpos);
+    @{sess: sess,
+      cfg: cfg,
+      file_type: ftype,
+      mutable token: tok0.tok,
+      mutable span: span0,
+      mutable last_span: span0,
+      mutable buffer: [],
+      mutable restriction: UNRESTRICTED,
+      reader: rdr,
+      precs: prec_table(),
+      bad_expr_words: bad_expr_word_table()}
+}
+
+// These are the words that shouldn't be allowed as value identifiers,
+// because, if used at the start of a line, they will cause the line to be
+// interpreted as a specific kind of statement, which would be confusing.
+fn bad_expr_word_table() -> hashmap<str, ()> {
+    let words = new_str_hash();
+    for word in ["alt", "assert", "be", "break", "check", "claim",
+                 "class", "const", "cont", "copy", "do", "else", "enum",
+                 "export", "fail", "fn", "for", "if",  "iface", "impl",
+                 "import", "let", "log", "mod", "mutable", "native", "pure",
+                 "resource", "ret", "trait", "type", "unchecked", "unsafe",
+                 "while", "crust", "mut"] {
+        words.insert(word, ());
+    }
+    words
+}
+
+fn unexpected(p: parser, t: token::token) -> ! {
+    let s: str = "unexpected token: '" + token::to_str(p.reader, t) +
+        "'";
+    p.fatal(s);
+}
+
+fn expect(p: parser, t: token::token) {
+    if p.token == t {
+        p.bump();
+    } else {
+        let s: str = "expecting '";
+        s += token::to_str(p.reader, t);
+        s += "' but found '";
+        s += token::to_str(p.reader, p.token);
+        p.fatal(s + "'");
+    }
+}
+
+fn expect_gt(p: parser) {
+    if p.token == token::GT {
+        p.bump();
+    } else if p.token == token::BINOP(token::LSR) {
+        p.swap(token::GT, p.span.lo + 1u, p.span.hi);
+    } else if p.token == token::BINOP(token::ASR) {
+        p.swap(token::BINOP(token::LSR), p.span.lo + 1u, p.span.hi);
+    } else {
+        let s: str = "expecting ";
+        s += token::to_str(p.reader, token::GT);
+        s += ", found ";
+        s += token::to_str(p.reader, p.token);
+        p.fatal(s);
+    }
+}
+
+fn spanned<T: copy>(lo: uint, hi: uint, node: T) -> spanned<T> {
+    ret {node: node, span: ast_util::mk_sp(lo, hi)};
+}
+
+fn parse_ident(p: parser) -> ast::ident {
+    alt p.token {
+      token::IDENT(i, _) { p.bump(); ret p.get_str(i); }
+      _ { p.fatal("expecting ident, found "
+                  + token::to_str(p.reader, p.token)); }
+    }
+}
+
+fn parse_path_list_ident(p: parser) -> ast::path_list_ident {
+    let lo = p.span.lo;
+    let ident = parse_ident(p);
+    let hi = p.span.hi;
+    ret spanned(lo, hi, {name: ident, id: p.get_id()});
+}
+
+fn parse_value_ident(p: parser) -> ast::ident {
+    check_bad_word(p);
+    ret parse_ident(p);
+}
+
+fn eat(p: parser, tok: token::token) -> bool {
+    ret if p.token == tok { p.bump(); true } else { false };
+}
+
+fn is_word(p: parser, word: str) -> bool {
+    ret alt p.token {
+          token::IDENT(sid, false) { str::eq(word, p.get_str(sid)) }
+          _ { false }
+        };
+}
+
+fn eat_word(p: parser, word: str) -> bool {
+    alt p.token {
+      token::IDENT(sid, false) {
+        if str::eq(word, p.get_str(sid)) {
+            p.bump();
+            ret true;
+        } else { ret false; }
+      }
+      _ { ret false; }
+    }
+}
+
+fn expect_word(p: parser, word: str) {
+    if !eat_word(p, word) {
+        p.fatal("expecting " + word + ", found " +
+                    token::to_str(p.reader, p.token));
+    }
+}
+
+fn check_bad_word(p: parser) {
+    alt p.token {
+      token::IDENT(sid, false) {
+        let w = p.get_str(sid);
+        if p.bad_expr_words.contains_key(w) {
+            p.fatal("found " + w + " in expression position");
+        }
+      }
+      _ { }
+    }
+}
+
+fn parse_ty_fn(p: parser) -> ast::fn_decl {
+    fn parse_fn_input_ty(p: parser) -> ast::arg {
+        let mode = parse_arg_mode(p);
+        let name = if is_plain_ident(p) && p.look_ahead(1u) == token::COLON {
+            let name = parse_value_ident(p);
+            p.bump();
+            name
+        } else { "" };
+        ret {mode: mode, ty: parse_ty(p, false), ident: name, id: p.get_id()};
+    }
+    let inputs =
+        parse_seq(token::LPAREN, token::RPAREN, seq_sep(token::COMMA),
+                  parse_fn_input_ty, p);
+    // FIXME: there's no syntax for this right now anyway
+    //  auto constrs = parse_constrs(~[], p);
+    let constrs: [@ast::constr] = [];
+    let (ret_style, ret_ty) = parse_ret_ty(p);
+    ret {inputs: inputs.node, output: ret_ty,
+         purity: ast::impure_fn, cf: ret_style,
+         constraints: constrs};
+}
+
+fn parse_ty_methods(p: parser) -> [ast::ty_method] {
+    parse_seq(token::LBRACE, token::RBRACE, seq_sep_none(), {|p|
+        let attrs = parse_outer_attributes(p);
+        let flo = p.span.lo;
+        let pur = parse_fn_purity(p);
+        let ident = parse_method_name(p);
+        let tps = parse_ty_params(p);
+        let d = parse_ty_fn(p), fhi = p.last_span.hi;
+        expect(p, token::SEMI);
+        {ident: ident, attrs: attrs, decl: {purity: pur with d}, tps: tps,
+         span: ast_util::mk_sp(flo, fhi)}
+    }, p).node
+}
+
+fn parse_mt(p: parser) -> ast::mt {
+    let mutbl = parse_mutability(p);
+    let t = parse_ty(p, false);
+    ret {ty: t, mutbl: mutbl};
+}
+
+fn parse_ty_field(p: parser) -> ast::ty_field {
+    let lo = p.span.lo;
+    let mutbl = parse_mutability(p);
+    let id = parse_ident(p);
+    expect(p, token::COLON);
+    let ty = parse_ty(p, false);
+    ret spanned(lo, ty.span.hi, {ident: id, mt: {ty: ty, mutbl: mutbl}});
+}
+
+// if i is the jth ident in args, return j
+// otherwise, fail
+fn ident_index(p: parser, args: [ast::arg], i: ast::ident) -> uint {
+    let j = 0u;
+    for a: ast::arg in args { if a.ident == i { ret j; } j += 1u; }
+    p.fatal("Unbound variable " + i + " in constraint arg");
+}
+
+fn parse_type_constr_arg(p: parser) -> @ast::ty_constr_arg {
+    let sp = p.span;
+    let carg = ast::carg_base;
+    expect(p, token::BINOP(token::STAR));
+    if p.token == token::DOT {
+        // "*..." notation for record fields
+        p.bump();
+        let pth = parse_path(p);
+        carg = ast::carg_ident(pth);
+    }
+    // No literals yet, I guess?
+    ret @{node: carg, span: sp};
+}
+
+fn parse_constr_arg(args: [ast::arg], p: parser) -> @ast::constr_arg {
+    let sp = p.span;
+    let carg = ast::carg_base;
+    if p.token == token::BINOP(token::STAR) {
+        p.bump();
+    } else {
+        let i: ast::ident = parse_value_ident(p);
+        carg = ast::carg_ident(ident_index(p, args, i));
+    }
+    ret @{node: carg, span: sp};
+}
+
+fn parse_ty_constr(fn_args: [ast::arg], p: parser) -> @ast::constr {
+    let lo = p.span.lo;
+    let path = parse_path(p);
+    let args: {node: [@ast::constr_arg], span: span} =
+        parse_seq(token::LPAREN, token::RPAREN, seq_sep(token::COMMA),
+                  {|p| parse_constr_arg(fn_args, p)}, p);
+    ret @spanned(lo, args.span.hi,
+                 {path: path, args: args.node, id: p.get_id()});
+}
+
+fn parse_constr_in_type(p: parser) -> @ast::ty_constr {
+    let lo = p.span.lo;
+    let path = parse_path(p);
+    let args: [@ast::ty_constr_arg] =
+        parse_seq(token::LPAREN, token::RPAREN, seq_sep(token::COMMA),
+                  parse_type_constr_arg, p).node;
+    let hi = p.span.lo;
+    let tc: ast::ty_constr_ = {path: path, args: args, id: p.get_id()};
+    ret @spanned(lo, hi, tc);
+}
+
+
+fn parse_constrs<T: copy>(pser: fn(parser) -> @ast::constr_general<T>,
+                         p: parser) ->
+   [@ast::constr_general<T>] {
+    let constrs: [@ast::constr_general<T>] = [];
+    while true {
+        let constr = pser(p);
+        constrs += [constr];
+        if p.token == token::COMMA { p.bump(); } else { break; }
+    }
+    constrs
+}
+
+fn parse_type_constraints(p: parser) -> [@ast::ty_constr] {
+    ret parse_constrs(parse_constr_in_type, p);
+}
+
+fn parse_ty_postfix(orig_t: ast::ty_, p: parser, colons_before_params: bool,
+                    lo: uint) -> @ast::ty {
+    if colons_before_params && p.token == token::MOD_SEP {
+        p.bump();
+        expect(p, token::LT);
+    } else if !colons_before_params && p.token == token::LT {
+        p.bump();
+    } else { ret @spanned(lo, p.last_span.hi, orig_t); }
+
+    // If we're here, we have explicit type parameter instantiation.
+    let seq = parse_seq_to_gt(some(token::COMMA), {|p| parse_ty(p, false)},
+                              p);
+
+    alt orig_t {
+      ast::ty_path(pth, ann) {
+        ret @spanned(lo, p.last_span.hi,
+                     ast::ty_path(@spanned(lo, p.last_span.hi,
+                                           {global: pth.node.global,
+                                            idents: pth.node.idents,
+                                            types: seq}), ann));
+      }
+      _ { p.fatal("type parameter instantiation only allowed for paths"); }
+    }
+}
+
+fn parse_ret_ty(p: parser) -> (ast::ret_style, @ast::ty) {
+    ret if eat(p, token::RARROW) {
+        let lo = p.span.lo;
+        if eat(p, token::NOT) {
+            (ast::noreturn, @spanned(lo, p.last_span.hi, ast::ty_bot))
+        } else { (ast::return_val, parse_ty(p, false)) }
+    } else {
+        let pos = p.span.lo;
+        (ast::return_val, @spanned(pos, pos, ast::ty_nil))
+    }
+}
+
+fn parse_ty(p: parser, colons_before_params: bool) -> @ast::ty {
+    let lo = p.span.lo;
+
+    alt have_dollar(p) {
+      some(e) {ret @spanned(lo, p.span.hi,
+                            ast::ty_mac(spanned(lo, p.span.hi, e)))}
+      none {}
+    }
+
+    let t = if p.token == token::LPAREN {
+        p.bump();
+        if p.token == token::RPAREN {
+            p.bump();
+            ast::ty_nil
+        } else {
+            let ts = [parse_ty(p, false)];
+            while p.token == token::COMMA {
+                p.bump();
+                ts += [parse_ty(p, false)];
+            }
+            let t = if vec::len(ts) == 1u { ts[0].node }
+                    else { ast::ty_tup(ts) };
+            expect(p, token::RPAREN);
+            t
+        }
+    } else if p.token == token::AT {
+        p.bump();
+        ast::ty_box(parse_mt(p))
+    } else if p.token == token::TILDE {
+        p.bump();
+        ast::ty_uniq(parse_mt(p))
+    } else if p.token == token::BINOP(token::STAR) {
+        p.bump();
+        ast::ty_ptr(parse_mt(p))
+    } else if p.token == token::LBRACE {
+        let elems =
+            parse_seq(token::LBRACE, token::RBRACE, seq_sep_opt(token::COMMA),
+                      parse_ty_field, p);
+        if vec::len(elems.node) == 0u { unexpected(p, token::RBRACE); }
+        let hi = elems.span.hi;
+
+        let t = ast::ty_rec(elems.node);
+        if p.token == token::COLON {
+            p.bump();
+            ast::ty_constr(@spanned(lo, hi, t), parse_type_constraints(p))
+        } else { t }
+    } else if p.token == token::LBRACKET {
+        expect(p, token::LBRACKET);
+        let t = ast::ty_vec(parse_mt(p));
+        expect(p, token::RBRACKET);
+        t
+    } else if eat_word(p, "fn") {
+        let proto = parse_fn_ty_proto(p);
+        alt proto {
+          ast::proto_bare { p.warn("fn is deprecated, use native fn"); }
+          _ { /* fallthrough */ }
+        }
+        ast::ty_fn(proto, parse_ty_fn(p))
+    } else if eat_word(p, "native") {
+        expect_word(p, "fn");
+        ast::ty_fn(ast::proto_bare, parse_ty_fn(p))
+    } else if p.token == token::MOD_SEP || is_ident(p.token) {
+        let path = parse_path(p);
+        ast::ty_path(path, p.get_id())
+    } else { p.fatal("expecting type"); };
+    ret parse_ty_postfix(t, p, colons_before_params, lo);
+}
+
+fn parse_arg_mode(p: parser) -> ast::mode {
+    if eat(p, token::BINOP(token::AND)) {
+        ast::expl(ast::by_mutbl_ref)
+    } else if eat(p, token::BINOP(token::MINUS)) {
+        ast::expl(ast::by_move)
+    } else if eat(p, token::ANDAND) {
+        ast::expl(ast::by_ref)
+    } else if eat(p, token::BINOP(token::PLUS)) {
+        if eat(p, token::BINOP(token::PLUS)) {
+            ast::expl(ast::by_val)
+        } else {
+            ast::expl(ast::by_copy)
+        }
+    } else { ast::infer(p.get_id()) }
+}
+
+fn parse_arg(p: parser) -> ast::arg {
+    let m = parse_arg_mode(p);
+    let i = parse_value_ident(p);
+    expect(p, token::COLON);
+    let t = parse_ty(p, false);
+    ret {mode: m, ty: t, ident: i, id: p.get_id()};
+}
+
+fn parse_fn_block_arg(p: parser) -> ast::arg {
+    let m = parse_arg_mode(p);
+    let i = parse_value_ident(p);
+    let t = if eat(p, token::COLON) {
+                parse_ty(p, false)
+            } else {
+                @spanned(p.span.lo, p.span.hi, ast::ty_infer)
+            };
+    ret {mode: m, ty: t, ident: i, id: p.get_id()};
+}
+
+fn parse_seq_to_before_gt<T: copy>(sep: option<token::token>,
+                                  f: fn(parser) -> T,
+                                  p: parser) -> [T] {
+    let first = true;
+    let v = [];
+    while p.token != token::GT && p.token != token::BINOP(token::LSR) &&
+              p.token != token::BINOP(token::ASR) {
+        alt sep {
+          some(t) { if first { first = false; } else { expect(p, t); } }
+          _ { }
+        }
+        v += [f(p)];
+    }
+
+    ret v;
+}
+
+fn parse_seq_to_gt<T: copy>(sep: option<token::token>,
+                           f: fn(parser) -> T, p: parser) -> [T] {
+    let v = parse_seq_to_before_gt(sep, f, p);
+    expect_gt(p);
+
+    ret v;
+}
+
+fn parse_seq_lt_gt<T: copy>(sep: option<token::token>,
+                           f: fn(parser) -> T,
+                           p: parser) -> spanned<[T]> {
+    let lo = p.span.lo;
+    expect(p, token::LT);
+    let result = parse_seq_to_before_gt::<T>(sep, f, p);
+    let hi = p.span.hi;
+    expect_gt(p);
+    ret spanned(lo, hi, result);
+}
+
+fn parse_seq_to_end<T: copy>(ket: token::token, sep: seq_sep,
+                            f: fn(parser) -> T, p: parser) -> [T] {
+    let val = parse_seq_to_before_end(ket, sep, f, p);
+    p.bump();
+    ret val;
+}
+
+type seq_sep = {
+    sep: option<token::token>,
+    trailing_opt: bool   // is trailing separator optional?
+};
+
+fn seq_sep(t: token::token) -> seq_sep {
+    ret {sep: option::some(t), trailing_opt: false};
+}
+fn seq_sep_opt(t: token::token) -> seq_sep {
+    ret {sep: option::some(t), trailing_opt: true};
+}
+fn seq_sep_none() -> seq_sep {
+    ret {sep: option::none, trailing_opt: false};
+}
+
+fn parse_seq_to_before_end<T: copy>(ket: token::token,
+                                   sep: seq_sep,
+                                   f: fn(parser) -> T, p: parser) -> [T] {
+    let first: bool = true;
+    let v: [T] = [];
+    while p.token != ket {
+        alt sep.sep {
+          some(t) { if first { first = false; } else { expect(p, t); } }
+          _ { }
+        }
+        if sep.trailing_opt && p.token == ket { break; }
+        v += [f(p)];
+    }
+    ret v;
+}
+
+
+fn parse_seq<T: copy>(bra: token::token, ket: token::token,
+                     sep: seq_sep, f: fn(parser) -> T,
+                     p: parser) -> spanned<[T]> {
+    let lo = p.span.lo;
+    expect(p, bra);
+    let result = parse_seq_to_before_end::<T>(ket, sep, f, p);
+    let hi = p.span.hi;
+    p.bump();
+    ret spanned(lo, hi, result);
+}
+
+fn have_dollar(p: parser) -> option::t<ast::mac_> {
+    alt p.token {
+      token::DOLLAR_NUM(num) {
+        p.bump();
+        some(ast::mac_var(num))
+      }
+      token::DOLLAR_LPAREN {
+        let lo = p.span.lo;
+        p.bump();
+        let e = parse_expr(p);
+        expect(p, token::RPAREN);
+        let hi = p.last_span.hi;
+        some(ast::mac_aq(ast_util::mk_sp(lo,hi), e))
+      }
+      _ {none}
+    }
+}
+
+fn lit_from_token(p: parser, tok: token::token) -> ast::lit_ {
+    alt tok {
+      token::LIT_INT(i, it) { ast::lit_int(i, it) }
+      token::LIT_UINT(u, ut) { ast::lit_uint(u, ut) }
+      token::LIT_FLOAT(s, ft) { ast::lit_float(p.get_str(s), ft) }
+      token::LIT_STR(s) { ast::lit_str(p.get_str(s)) }
+      token::LPAREN { expect(p, token::RPAREN); ast::lit_nil }
+      _ { unexpected(p, tok); }
+    }
+}
+
+fn parse_lit(p: parser) -> ast::lit {
+    let sp = p.span;
+    let lit = if eat_word(p, "true") {
+        ast::lit_bool(true)
+    } else if eat_word(p, "false") {
+        ast::lit_bool(false)
+    } else {
+        let tok = p.token;
+        p.bump();
+        lit_from_token(p, tok)
+    };
+    ret {node: lit, span: sp};
+}
+
+fn is_ident(t: token::token) -> bool {
+    alt t { token::IDENT(_, _) { ret true; } _ { } }
+    ret false;
+}
+
+fn is_plain_ident(p: parser) -> bool {
+    ret alt p.token { token::IDENT(_, false) { true } _ { false } };
+}
+
+fn parse_path(p: parser) -> @ast::path {
+    let lo = p.span.lo;
+    let global = eat(p, token::MOD_SEP), ids = [parse_ident(p)];
+    while p.look_ahead(1u) != token::LT && eat(p, token::MOD_SEP) {
+        ids += [parse_ident(p)];
+    }
+    ret @spanned(lo, p.last_span.hi,
+                 {global: global, idents: ids, types: []});
+}
+
+fn parse_value_path(p: parser) -> @ast::path {
+    let pt = parse_path(p);
+    let last_word = pt.node.idents[vec::len(pt.node.idents)-1u];
+    if p.bad_expr_words.contains_key(last_word) {
+        p.fatal("found " + last_word + " in expression position");
+    }
+    pt
+}
+
+fn parse_path_and_ty_param_substs(p: parser, colons: bool) -> @ast::path {
+    let lo = p.span.lo;
+    let path = parse_path(p);
+    let b = if colons {
+                eat(p, token::MOD_SEP)
+            } else {
+                p.token == token::LT
+            };
+    if b {
+        let seq = parse_seq_lt_gt(some(token::COMMA),
+                                  {|p| parse_ty(p, false)}, p);
+        @spanned(lo, seq.span.hi, {types: seq.node with path.node})
+    } else { path }
+}
+
+fn parse_mutability(p: parser) -> ast::mutability {
+    if eat_word(p, "mutable") {
+        ast::m_mutbl
+    } else if eat_word(p, "mut") {
+        ast::m_mutbl
+    } else if eat_word(p, "const") {
+        ast::m_const
+    } else {
+        ast::m_imm
+    }
+}
+
+fn parse_field(p: parser, sep: token::token) -> ast::field {
+    let lo = p.span.lo;
+    let m = parse_mutability(p);
+    let i = parse_ident(p);
+    expect(p, sep);
+    let e = parse_expr(p);
+    ret spanned(lo, e.span.hi, {mutbl: m, ident: i, expr: e});
+}
+
+fn mk_expr(p: parser, lo: uint, hi: uint, node: ast::expr_) -> @ast::expr {
+    ret @{id: p.get_id(), node: node, span: ast_util::mk_sp(lo, hi)};
+}
+
+fn mk_mac_expr(p: parser, lo: uint, hi: uint, m: ast::mac_) -> @ast::expr {
+    ret @{id: p.get_id(),
+          node: ast::expr_mac({node: m, span: ast_util::mk_sp(lo, hi)}),
+          span: ast_util::mk_sp(lo, hi)};
+}
+
+fn is_bar(t: token::token) -> bool {
+    alt t { token::BINOP(token::OR) | token::OROR { true } _ { false } }
+}
+
+fn mk_lit_u32(p: parser, i: u32) -> @ast::expr {
+    let span = p.span;
+    let lv_lit = @{node: ast::lit_uint(i as u64, ast::ty_u32),
+                   span: span};
+
+    ret @{id: p.get_id(), node: ast::expr_lit(lv_lit), span: span};
+}
+
+// We don't allow single-entry tuples in the true AST; that indicates a
+// parenthesized expression.  However, we preserve them temporarily while
+// parsing because `(while{...})+3` parses differently from `while{...}+3`.
+//
+// To reflect the fact that the @ast::expr is not a true expr that should be
+// part of the AST, we wrap such expressions in the pexpr enum.  They
+// can then be converted to true expressions by a call to `to_expr()`.
+enum pexpr {
+    pexpr(@ast::expr),
+}
+
+fn mk_pexpr(p: parser, lo: uint, hi: uint, node: ast::expr_) -> pexpr {
+    ret pexpr(mk_expr(p, lo, hi, node));
+}
+
+fn to_expr(e: pexpr) -> @ast::expr {
+    alt e.node {
+      ast::expr_tup(es) if vec::len(es) == 1u { es[0u] }
+      _ { *e }
+    }
+}
+
+fn parse_bottom_expr(p: parser) -> pexpr {
+    let lo = p.span.lo;
+    let hi = p.span.hi;
+
+    let ex: ast::expr_;
+
+    alt have_dollar(p) {
+      some(x) {ret pexpr(mk_mac_expr(p, lo, p.span.hi, x));}
+      _ {}
+    }
+
+    if p.token == token::LPAREN {
+        p.bump();
+        if p.token == token::RPAREN {
+            hi = p.span.hi;
+            p.bump();
+            let lit = @spanned(lo, hi, ast::lit_nil);
+            ret mk_pexpr(p, lo, hi, ast::expr_lit(lit));
+        }
+        let es = [parse_expr(p)];
+        while p.token == token::COMMA { p.bump(); es += [parse_expr(p)]; }
+        hi = p.span.hi;
+        expect(p, token::RPAREN);
+
+        // Note: we retain the expr_tup() even for simple
+        // parenthesized expressions, but only for a "little while".
+        // This is so that wrappers around parse_bottom_expr()
+        // can tell whether the expression was parenthesized or not,
+        // which affects expr_is_complete().
+        ret mk_pexpr(p, lo, hi, ast::expr_tup(es));
+    } else if p.token == token::LBRACE {
+        p.bump();
+        if is_word(p, "mut") || is_word(p, "mutable") ||
+               is_plain_ident(p) && p.look_ahead(1u) == token::COLON {
+            let fields = [parse_field(p, token::COLON)];
+            let base = none;
+            while p.token != token::RBRACE {
+                if eat_word(p, "with") { base = some(parse_expr(p)); break; }
+                expect(p, token::COMMA);
+                if p.token == token::RBRACE {
+                    // record ends by an optional trailing comma
+                    break;
+                }
+                fields += [parse_field(p, token::COLON)];
+            }
+            hi = p.span.hi;
+            expect(p, token::RBRACE);
+            ex = ast::expr_rec(fields, base);
+        } else if is_bar(p.token) {
+            ret pexpr(parse_fn_block_expr(p));
+        } else {
+            let blk = parse_block_tail(p, lo, ast::default_blk);
+            ret mk_pexpr(p, blk.span.lo, blk.span.hi, ast::expr_block(blk));
+        }
+    } else if eat_word(p, "if") {
+        ret pexpr(parse_if_expr(p));
+    } else if eat_word(p, "for") {
+        ret pexpr(parse_for_expr(p));
+    } else if eat_word(p, "while") {
+        ret pexpr(parse_while_expr(p));
+    } else if eat_word(p, "do") {
+        ret pexpr(parse_do_while_expr(p));
+    } else if eat_word(p, "alt") {
+        ret pexpr(parse_alt_expr(p));
+    } else if eat_word(p, "fn") {
+        let proto = parse_fn_ty_proto(p);
+        alt proto {
+          ast::proto_bare { p.fatal("fn expr are deprecated, use fn@"); }
+          ast::proto_any { p.fatal("fn* cannot be used in an expression"); }
+          _ { /* fallthrough */ }
+        }
+        ret pexpr(parse_fn_expr(p, proto));
+    } else if eat_word(p, "unchecked") {
+        ret pexpr(parse_block_expr(p, lo, ast::unchecked_blk));
+    } else if eat_word(p, "unsafe") {
+        ret pexpr(parse_block_expr(p, lo, ast::unsafe_blk));
+    } else if p.token == token::LBRACKET {
+        p.bump();
+        let mutbl = parse_mutability(p);
+        let es =
+            parse_seq_to_end(token::RBRACKET, seq_sep(token::COMMA),
+                             parse_expr, p);
+        ex = ast::expr_vec(es, mutbl);
+    } else if p.token == token::POUND_LT {
+        p.bump();
+        let ty = parse_ty(p, false);
+        expect(p, token::GT);
+
+        /* hack: early return to take advantage of specialized function */
+        ret pexpr(mk_mac_expr(p, lo, p.span.hi,
+                              ast::mac_embed_type(ty)));
+    } else if p.token == token::POUND_LBRACE {
+        p.bump();
+        let blk = ast::mac_embed_block(
+            parse_block_tail(p, lo, ast::default_blk));
+        ret pexpr(mk_mac_expr(p, lo, p.span.hi, blk));
+    } else if p.token == token::ELLIPSIS {
+        p.bump();
+        ret pexpr(mk_mac_expr(p, lo, p.span.hi, ast::mac_ellipsis));
+    } else if eat_word(p, "bind") {
+        let e = parse_expr_res(p, RESTRICT_NO_CALL_EXPRS);
+        let es =
+            parse_seq(token::LPAREN, token::RPAREN, seq_sep(token::COMMA),
+                      parse_expr_or_hole, p);
+        hi = es.span.hi;
+        ex = ast::expr_bind(e, es.node);
+    } else if p.token == token::POUND {
+        let ex_ext = parse_syntax_ext(p);
+        hi = ex_ext.span.hi;
+        ex = ex_ext.node;
+    } else if eat_word(p, "fail") {
+        if can_begin_expr(p.token) {
+            let e = parse_expr(p);
+            hi = e.span.hi;
+            ex = ast::expr_fail(some(e));
+        } else { ex = ast::expr_fail(none); }
+    } else if eat_word(p, "log") {
+        expect(p, token::LPAREN);
+        let lvl = parse_expr(p);
+        expect(p, token::COMMA);
+        let e = parse_expr(p);
+        ex = ast::expr_log(2, lvl, e);
+        hi = p.span.hi;
+        expect(p, token::RPAREN);
+    } else if eat_word(p, "assert") {
+        let e = parse_expr(p);
+        ex = ast::expr_assert(e);
+        hi = e.span.hi;
+    } else if eat_word(p, "check") {
+        /* Should be a predicate (pure boolean function) applied to
+           arguments that are all either slot variables or literals.
+           but the typechecker enforces that. */
+        let e = parse_expr(p);
+        hi = e.span.hi;
+        ex = ast::expr_check(ast::checked_expr, e);
+    } else if eat_word(p, "claim") {
+        /* Same rules as check, except that if check-claims
+         is enabled (a command-line flag), then the parser turns
+        claims into check */
+
+        let e = parse_expr(p);
+        hi = e.span.hi;
+        ex = ast::expr_check(ast::claimed_expr, e);
+    } else if eat_word(p, "ret") {
+        if can_begin_expr(p.token) {
+            let e = parse_expr(p);
+            hi = e.span.hi;
+            ex = ast::expr_ret(some(e));
+        } else { ex = ast::expr_ret(none); }
+    } else if eat_word(p, "break") {
+        ex = ast::expr_break;
+        hi = p.span.hi;
+    } else if eat_word(p, "cont") {
+        ex = ast::expr_cont;
+        hi = p.span.hi;
+    } else if eat_word(p, "be") {
+        let e = parse_expr(p);
+
+        // FIXME: Is this the right place for this check?
+        if /*check*/ast_util::is_call_expr(e) {
+            hi = e.span.hi;
+            ex = ast::expr_be(e);
+        } else { p.fatal("Non-call expression in tail call"); }
+    } else if eat_word(p, "copy") {
+        let e = parse_expr(p);
+        ex = ast::expr_copy(e);
+        hi = e.span.hi;
+    } else if p.token == token::MOD_SEP ||
+                  is_ident(p.token) && !is_word(p, "true") &&
+                      !is_word(p, "false") {
+        check_bad_word(p);
+        let pth = parse_path_and_ty_param_substs(p, true);
+        hi = pth.span.hi;
+        ex = ast::expr_path(pth);
+    } else {
+        let lit = parse_lit(p);
+        hi = lit.span.hi;
+        ex = ast::expr_lit(@lit);
+    }
+    ret mk_pexpr(p, lo, hi, ex);
+}
+
+fn parse_block_expr(p: parser,
+                    lo: uint,
+                    blk_mode: ast::blk_check_mode) -> @ast::expr {
+    expect(p, token::LBRACE);
+    let blk = parse_block_tail(p, lo, blk_mode);
+    ret mk_expr(p, blk.span.lo, blk.span.hi, ast::expr_block(blk));
+}
+
+fn parse_syntax_ext(p: parser) -> @ast::expr {
+    let lo = p.span.lo;
+    expect(p, token::POUND);
+    ret parse_syntax_ext_naked(p, lo);
+}
+
+fn parse_syntax_ext_naked(p: parser, lo: uint) -> @ast::expr {
+    alt p.token {
+      token::IDENT(_, _) {}
+      _ { p.fatal("expected a syntax expander name"); }
+    }
+    let pth = parse_path(p);
+    //temporary for a backwards-compatible cycle:
+    let sep = seq_sep(token::COMMA);
+    let e = none;
+    if (p.token == token::LPAREN || p.token == token::LBRACKET) {
+        let es =
+            if p.token == token::LPAREN {
+                parse_seq(token::LPAREN, token::RPAREN,
+                          sep, parse_expr, p)
+            } else {
+                parse_seq(token::LBRACKET, token::RBRACKET,
+                          sep, parse_expr, p)
+            };
+        let hi = es.span.hi;
+        e = some(mk_expr(p, es.span.lo, hi,
+                         ast::expr_vec(es.node, ast::m_imm)));
+    }
+    let b = none;
+    if p.token == token::LBRACE {
+        p.bump();
+        let lo = p.span.lo;
+        let depth = 1u;
+        while (depth > 0u) {
+            alt (p.token) {
+              token::LBRACE {depth += 1u;}
+              token::RBRACE {depth -= 1u;}
+              token::EOF {p.fatal("unexpected EOF in macro body");}
+              _ {}
+            }
+            p.bump();
+        }
+        let hi = p.last_span.lo;
+        b = some({span: mk_sp(lo,hi)});
+    }
+    ret mk_mac_expr(p, lo, p.span.hi, ast::mac_invoc(pth, e, b));
+}
+
+fn parse_dot_or_call_expr(p: parser) -> pexpr {
+    let b = parse_bottom_expr(p);
+    parse_dot_or_call_expr_with(p, b)
+}
+
+fn permits_call(p: parser) -> bool {
+    ret p.restriction != RESTRICT_NO_CALL_EXPRS;
+}
+
+fn parse_dot_or_call_expr_with(p: parser, e0: pexpr) -> pexpr {
+    let e = e0;
+    let lo = e.span.lo;
+    let hi = e.span.hi;
+    while true {
+        // expr.f
+        if eat(p, token::DOT) {
+            alt p.token {
+              token::IDENT(i, _) {
+                hi = p.span.hi;
+                p.bump();
+                let tys = if eat(p, token::MOD_SEP) {
+                    expect(p, token::LT);
+                    parse_seq_to_gt(some(token::COMMA),
+                                    {|p| parse_ty(p, false)}, p)
+                } else { [] };
+                e = mk_pexpr(p, lo, hi,
+                             ast::expr_field(to_expr(e),
+                                             p.get_str(i),
+                                             tys));
+              }
+              t { unexpected(p, t); }
+            }
+            cont;
+        }
+        if expr_is_complete(p, e) { break; }
+        alt p.token {
+          // expr(...)
+          token::LPAREN if permits_call(p) {
+            let es_opt =
+                parse_seq(token::LPAREN, token::RPAREN,
+                          seq_sep(token::COMMA), parse_expr_or_hole, p);
+            hi = es_opt.span.hi;
+
+            let nd =
+                if vec::any(es_opt.node, {|e| option::is_none(e) }) {
+                    ast::expr_bind(to_expr(e), es_opt.node)
+                } else {
+                    let es = vec::map(es_opt.node) {|e| option::get(e) };
+                    ast::expr_call(to_expr(e), es, false)
+                };
+            e = mk_pexpr(p, lo, hi, nd);
+          }
+
+          // expr {|| ... }
+          token::LBRACE if is_bar(p.look_ahead(1u)) && permits_call(p) {
+            p.bump();
+            let blk = parse_fn_block_expr(p);
+            alt e.node {
+              ast::expr_call(f, args, false) {
+                e = pexpr(@{node: ast::expr_call(f, args + [blk], true)
+                            with *to_expr(e)});
+              }
+              _ {
+                e = mk_pexpr(p, lo, p.last_span.hi,
+                            ast::expr_call(to_expr(e), [blk], true));
+              }
+            }
+          }
+
+          // expr[...]
+          token::LBRACKET {
+            p.bump();
+            let ix = parse_expr(p);
+            hi = ix.span.hi;
+            expect(p, token::RBRACKET);
+            p.get_id(); // see ast_util::op_expr_callee_id
+            e = mk_pexpr(p, lo, hi, ast::expr_index(to_expr(e), ix));
+          }
+
+          _ { ret e; }
+        }
+    }
+    ret e;
+}
+
+fn parse_prefix_expr(p: parser) -> pexpr {
+    let lo = p.span.lo;
+    let hi = p.span.hi;
+
+    let ex;
+    alt p.token {
+      token::NOT {
+        p.bump();
+        let e = to_expr(parse_prefix_expr(p));
+        hi = e.span.hi;
+        p.get_id(); // see ast_util::op_expr_callee_id
+        ex = ast::expr_unary(ast::not, e);
+      }
+      token::BINOP(b) {
+        alt b {
+          token::MINUS {
+            p.bump();
+            let e = to_expr(parse_prefix_expr(p));
+            hi = e.span.hi;
+            p.get_id(); // see ast_util::op_expr_callee_id
+            ex = ast::expr_unary(ast::neg, e);
+          }
+          token::STAR {
+            p.bump();
+            let e = to_expr(parse_prefix_expr(p));
+            hi = e.span.hi;
+            ex = ast::expr_unary(ast::deref, e);
+          }
+          _ { ret parse_dot_or_call_expr(p); }
+        }
+      }
+      token::AT {
+        p.bump();
+        let m = parse_mutability(p);
+        let e = to_expr(parse_prefix_expr(p));
+        hi = e.span.hi;
+        ex = ast::expr_unary(ast::box(m), e);
+      }
+      token::TILDE {
+        p.bump();
+        let m = parse_mutability(p);
+        let e = to_expr(parse_prefix_expr(p));
+        hi = e.span.hi;
+        ex = ast::expr_unary(ast::uniq(m), e);
+      }
+      _ { ret parse_dot_or_call_expr(p); }
+    }
+    ret mk_pexpr(p, lo, hi, ex);
+}
+
+type op_spec = {tok: token::token, op: ast::binop, prec: int};
+
+
+// FIXME make this a const, don't store it in parser state
+fn prec_table() -> @[op_spec] {
+    ret @[// 'as' sits between here with 12
+          {tok: token::BINOP(token::STAR), op: ast::mul, prec: 11},
+          {tok: token::BINOP(token::SLASH), op: ast::div, prec: 11},
+          {tok: token::BINOP(token::PERCENT), op: ast::rem, prec: 11},
+          {tok: token::BINOP(token::PLUS), op: ast::add, prec: 10},
+          {tok: token::BINOP(token::MINUS), op: ast::subtract, prec: 10},
+          {tok: token::BINOP(token::LSL), op: ast::lsl, prec: 9},
+          {tok: token::BINOP(token::LSR), op: ast::lsr, prec: 9},
+          {tok: token::BINOP(token::ASR), op: ast::asr, prec: 9},
+          {tok: token::BINOP(token::AND), op: ast::bitand, prec: 8},
+          {tok: token::BINOP(token::CARET), op: ast::bitxor, prec: 7},
+          {tok: token::BINOP(token::OR), op: ast::bitor, prec: 6},
+          {tok: token::LT, op: ast::lt, prec: 4},
+          {tok: token::LE, op: ast::le, prec: 4},
+          {tok: token::GE, op: ast::ge, prec: 4},
+          {tok: token::GT, op: ast::gt, prec: 4},
+          {tok: token::EQEQ, op: ast::eq, prec: 3},
+          {tok: token::NE, op: ast::ne, prec: 3},
+          {tok: token::ANDAND, op: ast::and, prec: 2},
+          {tok: token::OROR, op: ast::or, prec: 1}];
+}
+
+fn parse_binops(p: parser) -> @ast::expr {
+    ret parse_more_binops(p, parse_prefix_expr(p), 0);
+}
+
+const unop_prec: int = 100;
+
+const as_prec: int = 12;
+
+fn parse_more_binops(p: parser, plhs: pexpr, min_prec: int) ->
+   @ast::expr {
+    let lhs = to_expr(plhs);
+    if expr_is_complete(p, plhs) { ret lhs; }
+    let peeked = p.token;
+    if peeked == token::BINOP(token::OR) &&
+       p.restriction == RESTRICT_NO_BAR_OP { ret lhs; }
+    for cur: op_spec in *p.precs {
+        if cur.prec > min_prec && cur.tok == peeked {
+            p.bump();
+            let expr = parse_prefix_expr(p);
+            let rhs = parse_more_binops(p, expr, cur.prec);
+            p.get_id(); // see ast_util::op_expr_callee_id
+            let bin = mk_pexpr(p, lhs.span.lo, rhs.span.hi,
+                              ast::expr_binary(cur.op, lhs, rhs));
+            ret parse_more_binops(p, bin, min_prec);
+        }
+    }
+    if as_prec > min_prec && eat_word(p, "as") {
+        let rhs = parse_ty(p, true);
+        let _as =
+            mk_pexpr(p, lhs.span.lo, rhs.span.hi, ast::expr_cast(lhs, rhs));
+        ret parse_more_binops(p, _as, min_prec);
+    }
+    ret lhs;
+}
+
+fn parse_assign_expr(p: parser) -> @ast::expr {
+    let lo = p.span.lo;
+    let lhs = parse_binops(p);
+    alt p.token {
+      token::EQ {
+        p.bump();
+        let rhs = parse_expr(p);
+        ret mk_expr(p, lo, rhs.span.hi, ast::expr_assign(lhs, rhs));
+      }
+      token::BINOPEQ(op) {
+        p.bump();
+        let rhs = parse_expr(p);
+        let aop = ast::add;
+        alt op {
+          token::PLUS { aop = ast::add; }
+          token::MINUS { aop = ast::subtract; }
+          token::STAR { aop = ast::mul; }
+          token::SLASH { aop = ast::div; }
+          token::PERCENT { aop = ast::rem; }
+          token::CARET { aop = ast::bitxor; }
+          token::AND { aop = ast::bitand; }
+          token::OR { aop = ast::bitor; }
+          token::LSL { aop = ast::lsl; }
+          token::LSR { aop = ast::lsr; }
+          token::ASR { aop = ast::asr; }
+        }
+        p.get_id(); // see ast_util::op_expr_callee_id
+        ret mk_expr(p, lo, rhs.span.hi, ast::expr_assign_op(aop, lhs, rhs));
+      }
+      token::LARROW {
+        p.bump();
+        let rhs = parse_expr(p);
+        ret mk_expr(p, lo, rhs.span.hi, ast::expr_move(lhs, rhs));
+      }
+      token::DARROW {
+        p.bump();
+        let rhs = parse_expr(p);
+        ret mk_expr(p, lo, rhs.span.hi, ast::expr_swap(lhs, rhs));
+      }
+      _ {/* fall through */ }
+    }
+    ret lhs;
+}
+
+fn parse_if_expr_1(p: parser) ->
+   {cond: @ast::expr,
+    then: ast::blk,
+    els: option<@ast::expr>,
+    lo: uint,
+    hi: uint} {
+    let lo = p.last_span.lo;
+    let cond = parse_expr(p);
+    let thn = parse_block(p);
+    let els: option<@ast::expr> = none;
+    let hi = thn.span.hi;
+    if eat_word(p, "else") {
+        let elexpr = parse_else_expr(p);
+        els = some(elexpr);
+        hi = elexpr.span.hi;
+    }
+    ret {cond: cond, then: thn, els: els, lo: lo, hi: hi};
+}
+
+fn parse_if_expr(p: parser) -> @ast::expr {
+    if eat_word(p, "check") {
+        let q = parse_if_expr_1(p);
+        ret mk_expr(p, q.lo, q.hi, ast::expr_if_check(q.cond, q.then, q.els));
+    } else {
+        let q = parse_if_expr_1(p);
+        ret mk_expr(p, q.lo, q.hi, ast::expr_if(q.cond, q.then, q.els));
+    }
+}
+
+// Parses:
+//
+//   CC := [copy ID*; move ID*]
+//
+// where any part is optional and trailing ; is permitted.
+fn parse_capture_clause(p: parser) -> @ast::capture_clause {
+    fn expect_opt_trailing_semi(p: parser) {
+        if !eat(p, token::SEMI) {
+            if p.token != token::RBRACKET {
+                p.fatal("expecting ; or ]");
+            }
+        }
+    }
+
+    fn eat_ident_list(p: parser) -> [@ast::capture_item] {
+        let res = [];
+        while true {
+            alt p.token {
+              token::IDENT(_, _) {
+                let id = p.get_id();
+                let sp = ast_util::mk_sp(p.span.lo, p.span.hi);
+                let ident = parse_ident(p);
+                res += [@{id:id, name:ident, span:sp}];
+                if !eat(p, token::COMMA) {
+                    ret res;
+                }
+              }
+
+              _ { ret res; }
+            }
+        }
+        std::util::unreachable();
+    }
+
+    let copies = [];
+    let moves = [];
+
+    if eat(p, token::LBRACKET) {
+        while !eat(p, token::RBRACKET) {
+            if eat_word(p, "copy") {
+                copies += eat_ident_list(p);
+                expect_opt_trailing_semi(p);
+            } else if eat_word(p, "move") {
+                moves += eat_ident_list(p);
+                expect_opt_trailing_semi(p);
+            } else {
+                let s: str = "expecting send, copy, or move clause";
+                p.fatal(s);
+            }
+        }
+    }
+
+    ret @{copies: copies, moves: moves};
+}
+
+fn parse_fn_expr(p: parser, proto: ast::proto) -> @ast::expr {
+    let lo = p.last_span.lo;
+    let capture_clause = parse_capture_clause(p);
+    let decl = parse_fn_decl(p, ast::impure_fn);
+    let body = parse_block(p);
+    ret mk_expr(p, lo, body.span.hi,
+                ast::expr_fn(proto, decl, body, capture_clause));
+}
+
+fn parse_fn_block_expr(p: parser) -> @ast::expr {
+    let lo = p.last_span.lo;
+    let decl = parse_fn_block_decl(p);
+    let body = parse_block_tail(p, lo, ast::default_blk);
+    ret mk_expr(p, lo, body.span.hi, ast::expr_fn_block(decl, body));
+}
+
+fn parse_else_expr(p: parser) -> @ast::expr {
+    if eat_word(p, "if") {
+        ret parse_if_expr(p);
+    } else {
+        let blk = parse_block(p);
+        ret mk_expr(p, blk.span.lo, blk.span.hi, ast::expr_block(blk));
+    }
+}
+
+fn parse_for_expr(p: parser) -> @ast::expr {
+    let lo = p.last_span.lo;
+    let decl = parse_local(p, false, false);
+    expect_word(p, "in");
+    let seq = parse_expr(p);
+    let body = parse_block_no_value(p);
+    let hi = body.span.hi;
+    ret mk_expr(p, lo, hi, ast::expr_for(decl, seq, body));
+}
+
+fn parse_while_expr(p: parser) -> @ast::expr {
+    let lo = p.last_span.lo;
+    let cond = parse_expr(p);
+    let body = parse_block_no_value(p);
+    let hi = body.span.hi;
+    ret mk_expr(p, lo, hi, ast::expr_while(cond, body));
+}
+
+fn parse_do_while_expr(p: parser) -> @ast::expr {
+    let lo = p.last_span.lo;
+    let body = parse_block_no_value(p);
+    expect_word(p, "while");
+    let cond = parse_expr(p);
+    let hi = cond.span.hi;
+    ret mk_expr(p, lo, hi, ast::expr_do_while(body, cond));
+}
+
+fn parse_alt_expr(p: parser) -> @ast::expr {
+    let lo = p.last_span.lo;
+    let mode = if eat_word(p, "check") { ast::alt_check }
+               else { ast::alt_exhaustive };
+    let discriminant = parse_expr(p);
+    expect(p, token::LBRACE);
+    let arms: [ast::arm] = [];
+    while p.token != token::RBRACE {
+        let pats = parse_pats(p);
+        let guard = none;
+        if eat_word(p, "if") { guard = some(parse_expr(p)); }
+        let blk = parse_block(p);
+        arms += [{pats: pats, guard: guard, body: blk}];
+    }
+    let hi = p.span.hi;
+    p.bump();
+    ret mk_expr(p, lo, hi, ast::expr_alt(discriminant, arms, mode));
+}
+
+fn parse_expr(p: parser) -> @ast::expr {
+    ret parse_expr_res(p, UNRESTRICTED);
+}
+
+fn parse_expr_or_hole(p: parser) -> option<@ast::expr> {
+    alt p.token {
+      token::UNDERSCORE { p.bump(); ret none; }
+      _ { ret some(parse_expr(p)); }
+    }
+}
+
+fn parse_expr_res(p: parser, r: restriction) -> @ast::expr {
+    let old = p.restriction;
+    p.restriction = r;
+    let e = parse_assign_expr(p);
+    p.restriction = old;
+    ret e;
+}
+
+fn parse_initializer(p: parser) -> option<ast::initializer> {
+    alt p.token {
+      token::EQ {
+        p.bump();
+        ret some({op: ast::init_assign, expr: parse_expr(p)});
+      }
+      token::LARROW {
+        p.bump();
+        ret some({op: ast::init_move, expr: parse_expr(p)});
+      }
+      // Now that the the channel is the first argument to receive,
+      // combining it with an initializer doesn't really make sense.
+      // case (token::RECV) {
+      //     p.bump();
+      //     ret some(rec(op = ast::init_recv,
+      //                  expr = parse_expr(p)));
+      // }
+      _ {
+        ret none;
+      }
+    }
+}
+
+fn parse_pats(p: parser) -> [@ast::pat] {
+    let pats = [];
+    while true {
+        pats += [parse_pat(p)];
+        if p.token == token::BINOP(token::OR) { p.bump(); } else { break; }
+    }
+    ret pats;
+}
+
+fn parse_pat(p: parser) -> @ast::pat {
+    let lo = p.span.lo;
+    let hi = p.span.hi;
+    let pat;
+    alt p.token {
+      token::UNDERSCORE { p.bump(); pat = ast::pat_wild; }
+      token::AT {
+        p.bump();
+        let sub = parse_pat(p);
+        pat = ast::pat_box(sub);
+        hi = sub.span.hi;
+      }
+      token::TILDE {
+        p.bump();
+        let sub = parse_pat(p);
+        pat = ast::pat_uniq(sub);
+        hi = sub.span.hi;
+      }
+      token::LBRACE {
+        p.bump();
+        let fields = [];
+        let etc = false;
+        let first = true;
+        while p.token != token::RBRACE {
+            if first { first = false; } else { expect(p, token::COMMA); }
+
+            if p.token == token::UNDERSCORE {
+                p.bump();
+                if p.token != token::RBRACE {
+                    p.fatal("expecting }, found " +
+                                token::to_str(p.reader, p.token));
+                }
+                etc = true;
+                break;
+            }
+
+            let lo1 = p.last_span.lo;
+            let fieldname = parse_ident(p);
+            let hi1 = p.last_span.lo;
+            let fieldpath = ast_util::ident_to_path(ast_util::mk_sp(lo1, hi1),
+                                          fieldname);
+            let subpat;
+            if p.token == token::COLON {
+                p.bump();
+                subpat = parse_pat(p);
+            } else {
+                if p.bad_expr_words.contains_key(fieldname) {
+                    p.fatal("found " + fieldname + " in binding position");
+                }
+                subpat = @{id: p.get_id(),
+                           node: ast::pat_ident(fieldpath, none),
+                           span: ast_util::mk_sp(lo, hi)};
+            }
+            fields += [{ident: fieldname, pat: subpat}];
+        }
+        hi = p.span.hi;
+        p.bump();
+        pat = ast::pat_rec(fields, etc);
+      }
+      token::LPAREN {
+        p.bump();
+        if p.token == token::RPAREN {
+            hi = p.span.hi;
+            p.bump();
+            let lit = @{node: ast::lit_nil, span: ast_util::mk_sp(lo, hi)};
+            let expr = mk_expr(p, lo, hi, ast::expr_lit(lit));
+            pat = ast::pat_lit(expr);
+        } else {
+            let fields = [parse_pat(p)];
+            while p.token == token::COMMA {
+                p.bump();
+                fields += [parse_pat(p)];
+            }
+            if vec::len(fields) == 1u { expect(p, token::COMMA); }
+            hi = p.span.hi;
+            expect(p, token::RPAREN);
+            pat = ast::pat_tup(fields);
+        }
+      }
+      tok {
+        if !is_ident(tok) || is_word(p, "true") || is_word(p, "false") {
+            let val = parse_expr_res(p, RESTRICT_NO_BAR_OP);
+            if eat_word(p, "to") {
+                let end = parse_expr_res(p, RESTRICT_NO_BAR_OP);
+                hi = end.span.hi;
+                pat = ast::pat_range(val, end);
+            } else {
+                hi = val.span.hi;
+                pat = ast::pat_lit(val);
+            }
+        } else if is_plain_ident(p) &&
+            alt p.look_ahead(1u) {
+              token::LPAREN | token::LBRACKET | token::LT { false }
+              _ { true }
+            } {
+            let name = parse_value_path(p);
+            let sub = if eat(p, token::AT) { some(parse_pat(p)) }
+                      else { none };
+            pat = ast::pat_ident(name, sub);
+        } else {
+            let enum_path = parse_path_and_ty_param_substs(p, true);
+            hi = enum_path.span.hi;
+            let args: [@ast::pat];
+            alt p.token {
+              token::LPAREN {
+                let a =
+                    parse_seq(token::LPAREN, token::RPAREN,
+                              seq_sep(token::COMMA), parse_pat, p);
+                args = a.node;
+                hi = a.span.hi;
+              }
+              _ { args = []; }
+            }
+            // at this point, we're not sure whether it's a enum or a bind
+            if vec::len(args) == 0u &&
+               vec::len(enum_path.node.idents) == 1u {
+                pat = ast::pat_ident(enum_path, none);
+            }
+            else {
+                pat = ast::pat_enum(enum_path, args);
+            }
+        }
+      }
+    }
+    ret @{id: p.get_id(), node: pat, span: ast_util::mk_sp(lo, hi)};
+}
+
+fn parse_local(p: parser, is_mutbl: bool,
+               allow_init: bool) -> @ast::local {
+    let lo = p.span.lo;
+    let pat = parse_pat(p);
+    let ty = @spanned(lo, lo, ast::ty_infer);
+    if eat(p, token::COLON) { ty = parse_ty(p, false); }
+    let init = if allow_init { parse_initializer(p) } else { none };
+    ret @spanned(lo, p.last_span.hi,
+                 {is_mutbl: is_mutbl, ty: ty, pat: pat,
+                  init: init, id: p.get_id()});
+}
+
+fn parse_let(p: parser) -> @ast::decl {
+    let is_mutbl = eat_word(p, "mut");
+    let lo = p.span.lo;
+    let locals = [parse_local(p, is_mutbl, true)];
+    while eat(p, token::COMMA) {
+        locals += [parse_local(p, is_mutbl, true)];
+    }
+    ret @spanned(lo, p.last_span.hi, ast::decl_local(locals));
+}
+
+fn parse_instance_var(p:parser) -> ast::class_member {
+    let is_mutbl = ast::class_immutable;
+    expect_word(p, "let");
+    if eat_word(p, "mut") || eat_word(p, "mutable") {
+            is_mutbl = ast::class_mutable;
+    }
+    if !is_plain_ident(p) {
+        p.fatal("expecting ident");
+    }
+    let name = parse_ident(p);
+    expect(p, token::COLON);
+    let ty = parse_ty(p, false);
+    ret ast::instance_var(name, ty, is_mutbl, p.get_id());
+}
+
+fn parse_stmt(p: parser, first_item_attrs: [ast::attribute]) -> @ast::stmt {
+    fn check_expected_item(p: parser, current_attrs: [ast::attribute]) {
+        // If we have attributes then we should have an item
+        if vec::is_not_empty(current_attrs) {
+            p.fatal("expected item");
+        }
+    }
+
+    let lo = p.span.lo;
+    if is_word(p, "let") {
+        check_expected_item(p, first_item_attrs);
+        expect_word(p, "let");
+        let decl = parse_let(p);
+        ret @spanned(lo, decl.span.hi, ast::stmt_decl(decl, p.get_id()));
+    } else {
+        let item_attrs;
+        alt parse_outer_attrs_or_ext(p, first_item_attrs) {
+          none { item_attrs = []; }
+          some(left(attrs)) { item_attrs = attrs; }
+          some(right(ext)) {
+            ret @spanned(lo, ext.span.hi, ast::stmt_expr(ext, p.get_id()));
+          }
+        }
+
+        let item_attrs = first_item_attrs + item_attrs;
+
+        alt parse_item(p, item_attrs) {
+          some(i) {
+            let hi = i.span.hi;
+            let decl = @spanned(lo, hi, ast::decl_item(i));
+            ret @spanned(lo, hi, ast::stmt_decl(decl, p.get_id()));
+          }
+          none() { /* fallthrough */ }
+        }
+
+        check_expected_item(p, item_attrs);
+
+        // Remainder are line-expr stmts.
+        let e = parse_expr_res(p, RESTRICT_STMT_EXPR);
+        ret @spanned(lo, e.span.hi, ast::stmt_expr(e, p.get_id()));
+    }
+}
+
+fn expr_is_complete(p: parser, e: pexpr) -> bool {
+    log(debug, ("expr_is_complete", p.restriction,
+                print::pprust::expr_to_str(*e),
+                expr_requires_semi_to_be_stmt(*e)));
+    ret p.restriction == RESTRICT_STMT_EXPR &&
+        !expr_requires_semi_to_be_stmt(*e);
+}
+
+fn expr_requires_semi_to_be_stmt(e: @ast::expr) -> bool {
+    alt e.node {
+      ast::expr_if(_, _, _) | ast::expr_if_check(_, _, _)
+      | ast::expr_alt(_, _, _) | ast::expr_block(_)
+      | ast::expr_do_while(_, _) | ast::expr_while(_, _)
+      | ast::expr_for(_, _, _)
+      | ast::expr_call(_, _, true) {
+        false
+      }
+      _ { true }
+    }
+}
+
+fn stmt_ends_with_semi(stmt: ast::stmt) -> bool {
+    alt stmt.node {
+      ast::stmt_decl(d, _) {
+        ret alt d.node {
+              ast::decl_local(_) { true }
+              ast::decl_item(_) { false }
+            }
+      }
+      ast::stmt_expr(e, _) {
+        ret expr_requires_semi_to_be_stmt(e);
+      }
+      ast::stmt_semi(e, _) {
+        ret false;
+      }
+    }
+}
+
+fn parse_block(p: parser) -> ast::blk {
+    let (attrs, blk) = parse_inner_attrs_and_block(p, false);
+    assert vec::is_empty(attrs);
+    ret blk;
+}
+
+fn parse_inner_attrs_and_block(
+    p: parser, parse_attrs: bool) -> ([ast::attribute], ast::blk) {
+
+    fn maybe_parse_inner_attrs_and_next(
+        p: parser, parse_attrs: bool) ->
+        {inner: [ast::attribute], next: [ast::attribute]} {
+        if parse_attrs {
+            parse_inner_attrs_and_next(p)
+        } else {
+            {inner: [], next: []}
+        }
+    }
+
+    let lo = p.span.lo;
+    if eat_word(p, "unchecked") {
+        expect(p, token::LBRACE);
+        let {inner, next} = maybe_parse_inner_attrs_and_next(p, parse_attrs);
+        ret (inner, parse_block_tail_(p, lo, ast::unchecked_blk, next));
+    } else if eat_word(p, "unsafe") {
+        expect(p, token::LBRACE);
+        let {inner, next} = maybe_parse_inner_attrs_and_next(p, parse_attrs);
+        ret (inner, parse_block_tail_(p, lo, ast::unsafe_blk, next));
+    } else {
+        expect(p, token::LBRACE);
+        let {inner, next} = maybe_parse_inner_attrs_and_next(p, parse_attrs);
+        ret (inner, parse_block_tail_(p, lo, ast::default_blk, next));
+    }
+}
+
+fn parse_block_no_value(p: parser) -> ast::blk {
+    // We parse blocks that cannot have a value the same as any other block;
+    // the type checker will make sure that the tail expression (if any) has
+    // unit type.
+    ret parse_block(p);
+}
+
+// Precondition: already parsed the '{' or '#{'
+// I guess that also means "already parsed the 'impure'" if
+// necessary, and this should take a qualifier.
+// some blocks start with "#{"...
+fn parse_block_tail(p: parser, lo: uint, s: ast::blk_check_mode) -> ast::blk {
+    parse_block_tail_(p, lo, s, [])
+}
+
+fn parse_block_tail_(p: parser, lo: uint, s: ast::blk_check_mode,
+                     first_item_attrs: [ast::attribute]) -> ast::blk {
+    let stmts = [];
+    let expr = none;
+    let view_items = maybe_parse_view_import_only(p, first_item_attrs);
+    let initial_attrs = first_item_attrs;
+
+    if p.token == token::RBRACE && !vec::is_empty(initial_attrs) {
+        p.fatal("expected item");
+    }
+
+    while p.token != token::RBRACE {
+        alt p.token {
+          token::SEMI {
+            p.bump(); // empty
+          }
+          _ {
+            let stmt = parse_stmt(p, initial_attrs);
+            initial_attrs = [];
+            alt stmt.node {
+              ast::stmt_expr(e, stmt_id) { // Expression without semicolon:
+                alt p.token {
+                  token::SEMI {
+                    p.bump();
+                    stmts += [@{node: ast::stmt_semi(e, stmt_id) with *stmt}];
+                  }
+                  token::RBRACE {
+                    expr = some(e);
+                  }
+                  t {
+                    if stmt_ends_with_semi(*stmt) {
+                        p.fatal("expected ';' or '}' after expression but \
+                                 found '" + token::to_str(p.reader, t) +
+                                "'");
+                    }
+                    stmts += [stmt];
+                  }
+                }
+              }
+
+              _ { // All other kinds of statements:
+                stmts += [stmt];
+
+                if stmt_ends_with_semi(*stmt) {
+                    expect(p, token::SEMI);
+                }
+              }
+            }
+          }
+        }
+    }
+    let hi = p.span.hi;
+    p.bump();
+    let bloc = {view_items: view_items, stmts: stmts, expr: expr,
+                id: p.get_id(), rules: s};
+    ret spanned(lo, hi, bloc);
+}
+
+fn parse_ty_param(p: parser) -> ast::ty_param {
+    let bounds = [];
+    let ident = parse_ident(p);
+    if eat(p, token::COLON) {
+        while p.token != token::COMMA && p.token != token::GT {
+            if eat_word(p, "send") { bounds += [ast::bound_send]; }
+            else if eat_word(p, "copy") { bounds += [ast::bound_copy]; }
+            else { bounds += [ast::bound_iface(parse_ty(p, false))]; }
+        }
+    }
+    ret {ident: ident, id: p.get_id(), bounds: @bounds};
+}
+
+fn parse_ty_params(p: parser) -> [ast::ty_param] {
+    if eat(p, token::LT) {
+        parse_seq_to_gt(some(token::COMMA), parse_ty_param, p)
+    } else { [] }
+}
+
+fn parse_fn_decl(p: parser, purity: ast::purity)
+    -> ast::fn_decl {
+    let inputs: ast::spanned<[ast::arg]> =
+        parse_seq(token::LPAREN, token::RPAREN, seq_sep(token::COMMA),
+                  parse_arg, p);
+    // Use the args list to translate each bound variable
+    // mentioned in a constraint to an arg index.
+    // Seems weird to do this in the parser, but I'm not sure how else to.
+    let constrs = [];
+    if p.token == token::COLON {
+        p.bump();
+        constrs = parse_constrs({|x| parse_ty_constr(inputs.node, x) }, p);
+    }
+    let (ret_style, ret_ty) = parse_ret_ty(p);
+    ret {inputs: inputs.node,
+         output: ret_ty,
+         purity: purity,
+         cf: ret_style,
+         constraints: constrs};
+}
+
+fn parse_fn_block_decl(p: parser) -> ast::fn_decl {
+    let inputs = if eat(p, token::OROR) {
+                     []
+                 } else {
+                     parse_seq(token::BINOP(token::OR),
+                               token::BINOP(token::OR),
+                               seq_sep(token::COMMA),
+                               parse_fn_block_arg, p).node
+                 };
+    let output = if eat(p, token::RARROW) {
+                     parse_ty(p, false)
+                 } else {
+                     @spanned(p.span.lo, p.span.hi, ast::ty_infer)
+                 };
+    ret {inputs: inputs,
+         output: output,
+         purity: ast::impure_fn,
+         cf: ast::return_val,
+         constraints: []};
+}
+
+fn parse_fn_header(p: parser) -> {ident: ast::ident, tps: [ast::ty_param]} {
+    let id = parse_value_ident(p);
+    let ty_params = parse_ty_params(p);
+    ret {ident: id, tps: ty_params};
+}
+
+fn mk_item(p: parser, lo: uint, hi: uint, ident: ast::ident, node: ast::item_,
+           attrs: [ast::attribute]) -> @ast::item {
+    ret @{ident: ident,
+          attrs: attrs,
+          id: p.get_id(),
+          node: node,
+          span: ast_util::mk_sp(lo, hi)};
+}
+
+fn parse_item_fn(p: parser, purity: ast::purity,
+                 attrs: [ast::attribute]) -> @ast::item {
+    let lo = p.last_span.lo;
+    let t = parse_fn_header(p);
+    let decl = parse_fn_decl(p, purity);
+    let (inner_attrs, body) = parse_inner_attrs_and_block(p, true);
+    let attrs = attrs + inner_attrs;
+    ret mk_item(p, lo, body.span.hi, t.ident,
+                ast::item_fn(decl, t.tps, body), attrs);
+}
+
+fn parse_method_name(p: parser) -> ast::ident {
+    alt p.token {
+      token::BINOP(op) { p.bump(); token::binop_to_str(op) }
+      token::NOT { p.bump(); "!" }
+      token::LBRACKET { p.bump(); expect(p, token::RBRACKET); "[]" }
+      _ {
+          let id = parse_value_ident(p);
+          if id == "unary" && eat(p, token::BINOP(token::MINUS)) { "unary-" }
+          else { id }
+      }
+    }
+}
+
+fn parse_method(p: parser) -> @ast::method {
+    let attrs = parse_outer_attributes(p);
+    let lo = p.span.lo, pur = parse_fn_purity(p);
+    let ident = parse_method_name(p);
+    let tps = parse_ty_params(p);
+    let decl = parse_fn_decl(p, pur);
+    let (inner_attrs, body) = parse_inner_attrs_and_block(p, true);
+    let attrs = attrs + inner_attrs;
+    @{ident: ident, attrs: attrs, tps: tps, decl: decl, body: body,
+      id: p.get_id(), span: ast_util::mk_sp(lo, body.span.hi)}
+}
+
+fn parse_item_iface(p: parser, attrs: [ast::attribute]) -> @ast::item {
+    let lo = p.last_span.lo, ident = parse_ident(p),
+        tps = parse_ty_params(p), meths = parse_ty_methods(p);
+    ret mk_item(p, lo, p.last_span.hi, ident,
+                ast::item_iface(tps, meths), attrs);
+}
+
+// Parses three variants (with the initial params always optional):
+//    impl <T: copy> of to_str for [T] { ... }
+//    impl name<T> of to_str for [T] { ... }
+//    impl name<T> for [T] { ... }
+fn parse_item_impl(p: parser, attrs: [ast::attribute]) -> @ast::item {
+    let lo = p.last_span.lo;
+    fn wrap_path(p: parser, pt: @ast::path) -> @ast::ty {
+        @{node: ast::ty_path(pt, p.get_id()), span: pt.span}
+    }
+    let (ident, tps) = if !is_word(p, "of") {
+        if p.token == token::LT { (none, parse_ty_params(p)) }
+        else { (some(parse_ident(p)), parse_ty_params(p)) }
+    } else { (none, []) };
+    let ifce = if eat_word(p, "of") {
+        let path = parse_path_and_ty_param_substs(p, false);
+        if option::is_none(ident) {
+            ident = some(path.node.idents[vec::len(path.node.idents) - 1u]);
+        }
+        some(wrap_path(p, path))
+    } else { none };
+    let ident = alt ident {
+        some(name) { name }
+        none { expect_word(p, "of"); fail; }
+    };
+    expect_word(p, "for");
+    let ty = parse_ty(p, false), meths = [];
+    expect(p, token::LBRACE);
+    while !eat(p, token::RBRACE) { meths += [parse_method(p)]; }
+    ret mk_item(p, lo, p.last_span.hi, ident,
+                ast::item_impl(tps, ifce, ty, meths), attrs);
+}
+
+fn parse_item_res(p: parser, attrs: [ast::attribute]) -> @ast::item {
+    let lo = p.last_span.lo;
+    let ident = parse_value_ident(p);
+    let ty_params = parse_ty_params(p);
+    expect(p, token::LPAREN);
+    let arg_ident = parse_value_ident(p);
+    expect(p, token::COLON);
+    let t = parse_ty(p, false);
+    expect(p, token::RPAREN);
+    let dtor = parse_block_no_value(p);
+    let decl =
+        {inputs:
+             [{mode: ast::expl(ast::by_ref), ty: t,
+               ident: arg_ident, id: p.get_id()}],
+         output: @spanned(lo, lo, ast::ty_nil),
+         purity: ast::impure_fn,
+         cf: ast::return_val,
+         constraints: []};
+    ret mk_item(p, lo, dtor.span.hi, ident,
+                ast::item_res(decl, ty_params, dtor, p.get_id(), p.get_id()),
+                attrs);
+}
+
+fn parse_item_class(p: parser, attrs: [ast::attribute]) -> @ast::item {
+    let lo = p.last_span.lo;
+    let class_name = parse_value_ident(p);
+    let class_path = ident_to_path(p.last_span, class_name);
+    let ty_params = parse_ty_params(p);
+    expect(p, token::LBRACE);
+    let items: [@ast::class_item] = [];
+    let ctor_id = p.get_id();
+    let the_ctor : option<(ast::fn_decl, ast::blk)> = none;
+    while p.token != token::RBRACE {
+        alt parse_class_item(p, class_path) {
+            ctor_decl(a_fn_decl, blk) {
+                the_ctor = some((a_fn_decl, blk));
+            }
+            plain_decl(a_decl) {
+                items += [@{node: {privacy: ast::pub, decl: a_decl},
+                            span: p.last_span}];
+            }
+            priv_decls(some_decls) {
+                items += vec::map(some_decls, {|d|
+                            @{node: {privacy: ast::priv, decl: d},
+                                span: p.last_span}});
+            }
+       }
+    }
+    p.bump();
+    alt the_ctor {
+       some((ct_d, ct_b)) { ret mk_item(p, lo, p.last_span.hi, class_name,
+         ast::item_class(ty_params, items, ctor_id, ct_d, ct_b), attrs); }
+       /*
+         Is it strange for the parser to check this?
+       */
+       none { /* parse error */ fail "Class with no ctor"; }
+    }
+}
+
+// lets us identify the constructor declaration at
+// parse time
+// we don't really want just the fn_decl...
+enum class_contents { ctor_decl(ast::fn_decl, ast::blk),
+                      // assumed to be public
+                      plain_decl(ast::class_member),
+                      // contents of a priv section --
+                      // parse_class_item ensures that
+                      // none of these are a ctor decl
+                      priv_decls([ast::class_member])}
+
+    fn parse_class_item(p:parser, class_name:@ast::path) -> class_contents {
+    if eat_word(p, "new") {
+        // Can ctors have attrs?
+            // result type is always the type of the class
+        let decl_ = parse_fn_decl(p, ast::impure_fn);
+        let decl = {output: @{node: ast::ty_path(class_name, p.get_id()),
+                                  span: decl_.output.span}
+                    with decl_};
+        let body = parse_block(p);
+        ret ctor_decl(decl, body);
+    }
+    // FIXME: refactor
+    else if eat_word(p, "priv") {
+            expect(p, token::LBRACE);
+            let results = [];
+            while p.token != token::RBRACE {
+               alt parse_item(p, []) {
+                 some(i) {
+                     results += [ast::class_method(i)];
+                 }
+                 _ {
+                     let a_var = parse_instance_var(p);
+                     expect(p, token::SEMI);
+                     results += [a_var];
+                 }
+               }
+            }
+            p.bump();
+            ret priv_decls(results);
+    }
+    else {
+        // Probably need to parse attrs
+        alt parse_item(p, []) {
+         some(i) {
+             ret plain_decl(ast::class_method(i));
+         }
+         _ {
+             let a_var = parse_instance_var(p);
+             expect(p, token::SEMI);
+             ret plain_decl(a_var);
+         }
+        }
+    }
+}
+
+fn parse_mod_items(p: parser, term: token::token,
+                   first_item_attrs: [ast::attribute]) -> ast::_mod {
+    // Shouldn't be any view items since we've already parsed an item attr
+    let view_items = maybe_parse_view(p, first_item_attrs);
+    let items: [@ast::item] = [];
+    let initial_attrs = first_item_attrs;
+    while p.token != term {
+        let attrs = initial_attrs + parse_outer_attributes(p);
+        #debug["parse_mod_items: parse_item(attrs=%?)", attrs];
+        alt parse_item(p, attrs) {
+          some(i) { items += [i]; }
+          _ {
+            p.fatal("expected item but found '" +
+                    token::to_str(p.reader, p.token) + "'");
+          }
+        }
+        #debug["parse_mod_items: attrs=%?", attrs];
+        initial_attrs = [];
+    }
+
+    if vec::is_not_empty(initial_attrs) {
+        // We parsed attributes for the first item but didn't find the item
+        p.fatal("expected item");
+    }
+
+    ret {view_items: view_items, items: items};
+}
+
+fn parse_item_const(p: parser, attrs: [ast::attribute]) -> @ast::item {
+    let lo = p.last_span.lo;
+    let id = parse_value_ident(p);
+    expect(p, token::COLON);
+    let ty = parse_ty(p, false);
+    expect(p, token::EQ);
+    let e = parse_expr(p);
+    let hi = p.span.hi;
+    expect(p, token::SEMI);
+    ret mk_item(p, lo, hi, id, ast::item_const(ty, e), attrs);
+}
+
+fn parse_item_mod(p: parser, attrs: [ast::attribute]) -> @ast::item {
+    let lo = p.last_span.lo;
+    let id = parse_ident(p);
+    expect(p, token::LBRACE);
+    let inner_attrs = parse_inner_attrs_and_next(p);
+    let first_item_outer_attrs = inner_attrs.next;
+    let m = parse_mod_items(p, token::RBRACE, first_item_outer_attrs);
+    let hi = p.span.hi;
+    expect(p, token::RBRACE);
+    ret mk_item(p, lo, hi, id, ast::item_mod(m), attrs + inner_attrs.inner);
+}
+
+fn parse_item_native_fn(p: parser, attrs: [ast::attribute],
+                        purity: ast::purity) -> @ast::native_item {
+    let lo = p.last_span.lo;
+    let t = parse_fn_header(p);
+    let decl = parse_fn_decl(p, purity);
+    let hi = p.span.hi;
+    expect(p, token::SEMI);
+    ret @{ident: t.ident,
+          attrs: attrs,
+          node: ast::native_item_fn(decl, t.tps),
+          id: p.get_id(),
+          span: ast_util::mk_sp(lo, hi)};
+}
+
+fn parse_fn_purity(p: parser) -> ast::purity {
+    if eat_word(p, "fn") { ast::impure_fn }
+    else if eat_word(p, "pure") { expect_word(p, "fn"); ast::pure_fn }
+    else if eat_word(p, "unsafe") { expect_word(p, "fn"); ast::unsafe_fn }
+    else { unexpected(p, p.token); }
+}
+
+fn parse_native_item(p: parser, attrs: [ast::attribute]) ->
+   @ast::native_item {
+    parse_item_native_fn(p, attrs, parse_fn_purity(p))
+}
+
+fn parse_native_mod_items(p: parser, first_item_attrs: [ast::attribute]) ->
+   ast::native_mod {
+    // Shouldn't be any view items since we've already parsed an item attr
+    let view_items =
+        if vec::len(first_item_attrs) == 0u {
+            parse_native_view(p)
+        } else { [] };
+    let items: [@ast::native_item] = [];
+    let initial_attrs = first_item_attrs;
+    while p.token != token::RBRACE {
+        let attrs = initial_attrs + parse_outer_attributes(p);
+        initial_attrs = [];
+        items += [parse_native_item(p, attrs)];
+    }
+    ret {view_items: view_items,
+         items: items};
+}
+
+fn parse_item_native_mod(p: parser, attrs: [ast::attribute]) -> @ast::item {
+    let lo = p.last_span.lo;
+    expect_word(p, "mod");
+    let id = parse_ident(p);
+    expect(p, token::LBRACE);
+    let more_attrs = parse_inner_attrs_and_next(p);
+    let inner_attrs = more_attrs.inner;
+    let first_item_outer_attrs = more_attrs.next;
+    let m = parse_native_mod_items(p, first_item_outer_attrs);
+    let hi = p.span.hi;
+    expect(p, token::RBRACE);
+    ret mk_item(p, lo, hi, id, ast::item_native_mod(m), attrs + inner_attrs);
+}
+
+fn parse_type_decl(p: parser) -> {lo: uint, ident: ast::ident} {
+    let lo = p.last_span.lo;
+    let id = parse_ident(p);
+    ret {lo: lo, ident: id};
+}
+
+fn parse_item_type(p: parser, attrs: [ast::attribute]) -> @ast::item {
+    let t = parse_type_decl(p);
+    let tps = parse_ty_params(p);
+    expect(p, token::EQ);
+    let ty = parse_ty(p, false);
+    let hi = p.span.hi;
+    expect(p, token::SEMI);
+    ret mk_item(p, t.lo, hi, t.ident, ast::item_ty(ty, tps), attrs);
+}
+
+fn parse_item_enum(p: parser, attrs: [ast::attribute]) -> @ast::item {
+    let lo = p.last_span.lo;
+    let id = parse_ident(p);
+    let ty_params = parse_ty_params(p);
+    let variants: [ast::variant] = [];
+    // Newtype syntax
+    if p.token == token::EQ {
+        if p.bad_expr_words.contains_key(id) {
+            p.fatal("found " + id + " in enum constructor position");
+        }
+        p.bump();
+        let ty = parse_ty(p, false);
+        expect(p, token::SEMI);
+        let variant =
+            spanned(ty.span.lo, ty.span.hi,
+                    {name: id,
+                     attrs: [],
+                     args: [{ty: ty, id: p.get_id()}],
+                     id: p.get_id(),
+                     disr_expr: none});
+        ret mk_item(p, lo, ty.span.hi, id,
+                    ast::item_enum([variant], ty_params), attrs);
+    }
+    expect(p, token::LBRACE);
+
+    let all_nullary = true, have_disr = false;
+
+    while p.token != token::RBRACE {
+        let variant_attrs = parse_outer_attributes(p);
+        let vlo = p.span.lo;
+        let ident = parse_value_ident(p);
+        let args = [], disr_expr = none;
+        if p.token == token::LPAREN {
+            all_nullary = false;
+            let arg_tys = parse_seq(token::LPAREN, token::RPAREN,
+                                    seq_sep(token::COMMA),
+                                    {|p| parse_ty(p, false)}, p);
+            for ty in arg_tys.node {
+                args += [{ty: ty, id: p.get_id()}];
+            }
+        } else if eat(p, token::EQ) {
+            have_disr = true;
+            disr_expr = some(parse_expr(p));
+        }
+
+        let vr = {name: ident, attrs: variant_attrs,
+                  args: args, id: p.get_id(),
+                  disr_expr: disr_expr};
+        variants += [spanned(vlo, p.last_span.hi, vr)];
+
+        if !eat(p, token::COMMA) { break; }
+    }
+    expect(p, token::RBRACE);
+    if (have_disr && !all_nullary) {
+        p.fatal("discriminator values can only be used with a c-like enum");
+    }
+    ret mk_item(p, lo, p.last_span.hi, id,
+                ast::item_enum(variants, ty_params), attrs);
+}
+
+fn parse_fn_ty_proto(p: parser) -> ast::proto {
+    alt p.token {
+      token::AT {
+        p.bump();
+        ast::proto_box
+      }
+      token::TILDE {
+        p.bump();
+        ast::proto_uniq
+      }
+      token::BINOP(token::AND) {
+        p.bump();
+        ast::proto_block
+      }
+      _ {
+        ast::proto_any
+      }
+    }
+}
+
+fn fn_expr_lookahead(tok: token::token) -> bool {
+    alt tok {
+      token::LPAREN | token::AT | token::TILDE | token::BINOP(_) {
+        true
+      }
+      _ {
+        false
+      }
+    }
+}
+
+fn parse_item(p: parser, attrs: [ast::attribute]) -> option<@ast::item> {
+    if eat_word(p, "const") {
+        ret some(parse_item_const(p, attrs));
+    } else if is_word(p, "fn") && !fn_expr_lookahead(p.look_ahead(1u)) {
+        p.bump();
+        ret some(parse_item_fn(p, ast::impure_fn, attrs));
+    } else if eat_word(p, "pure") {
+        expect_word(p, "fn");
+        ret some(parse_item_fn(p, ast::pure_fn, attrs));
+    } else if is_word(p, "unsafe") && p.look_ahead(1u) != token::LBRACE {
+        p.bump();
+        expect_word(p, "fn");
+        ret some(parse_item_fn(p, ast::unsafe_fn, attrs));
+    } else if eat_word(p, "crust") {
+        expect_word(p, "fn");
+        ret some(parse_item_fn(p, ast::crust_fn, attrs));
+    } else if eat_word(p, "mod") {
+        ret some(parse_item_mod(p, attrs));
+    } else if eat_word(p, "native") {
+        ret some(parse_item_native_mod(p, attrs));
+    } if eat_word(p, "type") {
+        ret some(parse_item_type(p, attrs));
+    } else if eat_word(p, "enum") {
+        ret some(parse_item_enum(p, attrs));
+    } else if eat_word(p, "iface") {
+        ret some(parse_item_iface(p, attrs));
+    } else if eat_word(p, "impl") {
+        ret some(parse_item_impl(p, attrs));
+    } else if eat_word(p, "resource") {
+        ret some(parse_item_res(p, attrs));
+    } else if eat_word(p, "class") {
+        ret some(parse_item_class(p, attrs));
+    }
+else { ret none; }
+}
+
+// A type to distingush between the parsing of item attributes or syntax
+// extensions, which both begin with token.POUND
+type attr_or_ext = option<either::t<[ast::attribute], @ast::expr>>;
+
+fn parse_outer_attrs_or_ext(
+    p: parser,
+    first_item_attrs: [ast::attribute]) -> attr_or_ext {
+    let expect_item_next = vec::is_not_empty(first_item_attrs);
+    if p.token == token::POUND {
+        let lo = p.span.lo;
+        if p.look_ahead(1u) == token::LBRACKET {
+            p.bump();
+            let first_attr = parse_attribute_naked(p, ast::attr_outer, lo);
+            ret some(left([first_attr] + parse_outer_attributes(p)));
+        } else if !(p.look_ahead(1u) == token::LT
+                    || p.look_ahead(1u) == token::LBRACKET
+                    || expect_item_next) {
+            p.bump();
+            ret some(right(parse_syntax_ext_naked(p, lo)));
+        } else { ret none; }
+    } else { ret none; }
+}
+
+// Parse attributes that appear before an item
+fn parse_outer_attributes(p: parser) -> [ast::attribute] {
+    let attrs: [ast::attribute] = [];
+    while p.token == token::POUND {
+        attrs += [parse_attribute(p, ast::attr_outer)];
+    }
+    ret attrs;
+}
+
+fn parse_attribute(p: parser, style: ast::attr_style) -> ast::attribute {
+    let lo = p.span.lo;
+    expect(p, token::POUND);
+    ret parse_attribute_naked(p, style, lo);
+}
+
+fn parse_attribute_naked(p: parser, style: ast::attr_style, lo: uint) ->
+   ast::attribute {
+    expect(p, token::LBRACKET);
+    let meta_item = parse_meta_item(p);
+    expect(p, token::RBRACKET);
+    let hi = p.span.hi;
+    ret spanned(lo, hi, {style: style, value: *meta_item});
+}
+
+// Parse attributes that appear after the opening of an item, each terminated
+// by a semicolon. In addition to a vector of inner attributes, this function
+// also returns a vector that may contain the first outer attribute of the
+// next item (since we can't know whether the attribute is an inner attribute
+// of the containing item or an outer attribute of the first contained item
+// until we see the semi).
+fn parse_inner_attrs_and_next(p: parser) ->
+   {inner: [ast::attribute], next: [ast::attribute]} {
+    let inner_attrs: [ast::attribute] = [];
+    let next_outer_attrs: [ast::attribute] = [];
+    while p.token == token::POUND {
+        if p.look_ahead(1u) != token::LBRACKET {
+            // This is an extension
+            break;
+        }
+        let attr = parse_attribute(p, ast::attr_inner);
+        if p.token == token::SEMI {
+            p.bump();
+            inner_attrs += [attr];
+        } else {
+            // It's not really an inner attribute
+            let outer_attr =
+                spanned(attr.span.lo, attr.span.hi,
+                        {style: ast::attr_outer, value: attr.node.value});
+            next_outer_attrs += [outer_attr];
+            break;
+        }
+    }
+    ret {inner: inner_attrs, next: next_outer_attrs};
+}
+
+fn parse_meta_item(p: parser) -> @ast::meta_item {
+    let lo = p.span.lo;
+    let ident = parse_ident(p);
+    alt p.token {
+      token::EQ {
+        p.bump();
+        let lit = parse_lit(p);
+        let hi = p.span.hi;
+        ret @spanned(lo, hi, ast::meta_name_value(ident, lit));
+      }
+      token::LPAREN {
+        let inner_items = parse_meta_seq(p);
+        let hi = p.span.hi;
+        ret @spanned(lo, hi, ast::meta_list(ident, inner_items));
+      }
+      _ {
+        let hi = p.span.hi;
+        ret @spanned(lo, hi, ast::meta_word(ident));
+      }
+    }
+}
+
+fn parse_meta_seq(p: parser) -> [@ast::meta_item] {
+    ret parse_seq(token::LPAREN, token::RPAREN, seq_sep(token::COMMA),
+                  parse_meta_item, p).node;
+}
+
+fn parse_optional_meta(p: parser) -> [@ast::meta_item] {
+    alt p.token { token::LPAREN { ret parse_meta_seq(p); } _ { ret []; } }
+}
+
+fn parse_use(p: parser) -> ast::view_item_ {
+    let ident = parse_ident(p);
+    let metadata = parse_optional_meta(p);
+    ret ast::view_item_use(ident, metadata, p.get_id());
+}
+
+fn parse_view_path(p: parser) -> @ast::view_path {
+    let lo = p.span.lo;
+    let first_ident = parse_ident(p);
+    let path = [first_ident];
+    #debug("parsed view_path: %s", first_ident);
+    alt p.token {
+      token::EQ {
+        // x = foo::bar
+        p.bump();
+        path = [parse_ident(p)];
+        while p.token == token::MOD_SEP {
+            p.bump();
+            let id = parse_ident(p);
+            path += [id];
+        }
+        let hi = p.span.hi;
+        ret @spanned(lo, hi,
+                     ast::view_path_simple(first_ident,
+                                           @path, p.get_id()));
+      }
+
+      token::MOD_SEP {
+        // foo::bar or foo::{a,b,c} or foo::*
+        while p.token == token::MOD_SEP {
+            p.bump();
+
+            alt p.token {
+
+              token::IDENT(i, _) {
+                p.bump();
+                path += [p.get_str(i)];
+              }
+
+              // foo::bar::{a,b,c}
+              token::LBRACE {
+                let idents =
+                    parse_seq(token::LBRACE, token::RBRACE,
+                              seq_sep(token::COMMA),
+                              parse_path_list_ident, p).node;
+                let hi = p.span.hi;
+                ret @spanned(lo, hi,
+                             ast::view_path_list(@path, idents,
+                                                 p.get_id()));
+              }
+
+              // foo::bar::*
+              token::BINOP(token::STAR) {
+                p.bump();
+                let hi = p.span.hi;
+                ret @spanned(lo, hi,
+                             ast::view_path_glob(@path,
+                                                 p.get_id()));
+              }
+
+              _ { break; }
+            }
+        }
+      }
+      _ { }
+    }
+    let hi = p.span.hi;
+    let last = path[vec::len(path) - 1u];
+    ret @spanned(lo, hi,
+                 ast::view_path_simple(last, @path,
+                                       p.get_id()));
+}
+
+fn parse_view_paths(p: parser) -> [@ast::view_path] {
+    let vp = [parse_view_path(p)];
+    while p.token == token::COMMA {
+        p.bump();
+        vp += [parse_view_path(p)];
+    }
+    ret vp;
+}
+
+fn parse_view_item(p: parser) -> @ast::view_item {
+    let lo = p.span.lo;
+    let the_item =
+        if eat_word(p, "use") {
+            parse_use(p)
+        } else if eat_word(p, "import") {
+            ast::view_item_import(parse_view_paths(p))
+        } else if eat_word(p, "export") {
+            ast::view_item_export(parse_view_paths(p))
+        } else {
+            fail
+    };
+    let hi = p.span.lo;
+    expect(p, token::SEMI);
+    ret @spanned(lo, hi, the_item);
+}
+
+fn is_view_item(p: parser) -> bool {
+    alt p.token {
+      token::IDENT(sid, false) {
+        let st = p.get_str(sid);
+        ret str::eq(st, "use") || str::eq(st, "import") ||
+                str::eq(st, "export");
+      }
+      _ { ret false; }
+    }
+}
+
+fn maybe_parse_view(
+    p: parser,
+    first_item_attrs: [ast::attribute]) -> [@ast::view_item] {
+
+    maybe_parse_view_while(p, first_item_attrs, is_view_item)
+}
+
+fn maybe_parse_view_import_only(
+    p: parser,
+    first_item_attrs: [ast::attribute]) -> [@ast::view_item] {
+
+    maybe_parse_view_while(p, first_item_attrs, bind is_word(_, "import"))
+}
+
+fn maybe_parse_view_while(
+    p: parser,
+    first_item_attrs: [ast::attribute],
+    f: fn@(parser) -> bool) -> [@ast::view_item] {
+
+    if vec::len(first_item_attrs) == 0u {
+        let items = [];
+        while f(p) { items += [parse_view_item(p)]; }
+        ret items;
+    } else {
+        // Shouldn't be any view items since we've already parsed an item attr
+        ret [];
+    }
+}
+
+fn parse_native_view(p: parser) -> [@ast::view_item] {
+    maybe_parse_view_while(p, [], is_view_item)
+}
+
+fn parse_crate_from_source_file(input: str, cfg: ast::crate_cfg,
+                                sess: parse_sess) -> @ast::crate {
+    let p = new_parser_from_file(sess, cfg, input, SOURCE_FILE);
+    let r = parse_crate_mod(p, cfg);
+    sess.chpos = p.reader.chpos;
+    sess.byte_pos = sess.byte_pos + p.reader.pos;
+    ret r;
+}
+
+
+fn parse_expr_from_source_str(name: str, source: @str, cfg: ast::crate_cfg,
+                              sess: parse_sess) -> @ast::expr {
+    let p = new_parser_from_source_str(sess, cfg, name, fss_none, source);
+    let r = parse_expr(p);
+    sess.chpos = p.reader.chpos;
+    sess.byte_pos = sess.byte_pos + p.reader.pos;
+    ret r;
+}
+
+fn parse_from_source_str<T>(f: fn (p: parser) -> T,
+                            name: str, ss: codemap::file_substr,
+                            source: @str, cfg: ast::crate_cfg,
+                            sess: parse_sess)
+    -> T
+{
+    let p = new_parser_from_source_str(sess, cfg, name, ss, source);
+    let r = f(p);
+    if !p.reader.is_eof() {
+        p.reader.fatal("expected end-of-string");
+    }
+    sess.chpos = p.reader.chpos;
+    sess.byte_pos = sess.byte_pos + p.reader.pos;
+    ret r;
+}
+
+fn parse_crate_from_source_str(name: str, source: @str, cfg: ast::crate_cfg,
+                               sess: parse_sess) -> @ast::crate {
+    let p = new_parser_from_source_str(sess, cfg, name, fss_none, source);
+    let r = parse_crate_mod(p, cfg);
+    sess.chpos = p.reader.chpos;
+    sess.byte_pos = sess.byte_pos + p.reader.pos;
+    ret r;
+}
+
+// Parses a source module as a crate
+fn parse_crate_mod(p: parser, _cfg: ast::crate_cfg) -> @ast::crate {
+    let lo = p.span.lo;
+    let crate_attrs = parse_inner_attrs_and_next(p);
+    let first_item_outer_attrs = crate_attrs.next;
+    let m = parse_mod_items(p, token::EOF, first_item_outer_attrs);
+    ret @spanned(lo, p.span.lo,
+                 {directives: [],
+                  module: m,
+                  attrs: crate_attrs.inner,
+                  config: p.cfg});
+}
+
+fn parse_str(p: parser) -> str {
+    alt p.token {
+      token::LIT_STR(s) { p.bump(); p.get_str(s) }
+      _ {
+        p.fatal("expected string literal")
+      }
+    }
+}
+
+// Logic for parsing crate files (.rc)
+//
+// Each crate file is a sequence of directives.
+//
+// Each directive imperatively extends its environment with 0 or more items.
+fn parse_crate_directive(p: parser, first_outer_attr: [ast::attribute]) ->
+   ast::crate_directive {
+
+    // Collect the next attributes
+    let outer_attrs = first_outer_attr + parse_outer_attributes(p);
+    // In a crate file outer attributes are only going to apply to mods
+    let expect_mod = vec::len(outer_attrs) > 0u;
+
+    let lo = p.span.lo;
+    if expect_mod || is_word(p, "mod") {
+        expect_word(p, "mod");
+        let id = parse_ident(p);
+        alt p.token {
+          // mod x = "foo.rs";
+          token::SEMI {
+            let hi = p.span.hi;
+            p.bump();
+            ret spanned(lo, hi, ast::cdir_src_mod(id, outer_attrs));
+          }
+          // mod x = "foo_dir" { ...directives... }
+          token::LBRACE {
+            p.bump();
+            let inner_attrs = parse_inner_attrs_and_next(p);
+            let mod_attrs = outer_attrs + inner_attrs.inner;
+            let next_outer_attr = inner_attrs.next;
+            let cdirs =
+                parse_crate_directives(p, token::RBRACE, next_outer_attr);
+            let hi = p.span.hi;
+            expect(p, token::RBRACE);
+            ret spanned(lo, hi,
+                        ast::cdir_dir_mod(id, cdirs, mod_attrs));
+          }
+          t { unexpected(p, t); }
+        }
+    } else if is_view_item(p) {
+        let vi = parse_view_item(p);
+        ret spanned(lo, vi.span.hi, ast::cdir_view_item(vi));
+    } else { ret p.fatal("expected crate directive"); }
+}
+
+fn parse_crate_directives(p: parser, term: token::token,
+                          first_outer_attr: [ast::attribute]) ->
+   [@ast::crate_directive] {
+
+    // This is pretty ugly. If we have an outer attribute then we can't accept
+    // seeing the terminator next, so if we do see it then fail the same way
+    // parse_crate_directive would
+    if vec::len(first_outer_attr) > 0u && p.token == term {
+        expect_word(p, "mod");
+    }
+
+    let cdirs: [@ast::crate_directive] = [];
+    let first_outer_attr = first_outer_attr;
+    while p.token != term {
+        let cdir = @parse_crate_directive(p, first_outer_attr);
+        cdirs += [cdir];
+        first_outer_attr = [];
+    }
+    ret cdirs;
+}
+
+fn parse_crate_from_crate_file(input: str, cfg: ast::crate_cfg,
+                               sess: parse_sess) -> @ast::crate {
+    let p = new_parser_from_file(sess, cfg, input, CRATE_FILE);
+    let lo = p.span.lo;
+    let prefix = std::fs::dirname(p.reader.filemap.name);
+    let leading_attrs = parse_inner_attrs_and_next(p);
+    let crate_attrs = leading_attrs.inner;
+    let first_cdir_attr = leading_attrs.next;
+    let cdirs = parse_crate_directives(p, token::EOF, first_cdir_attr);
+    sess.chpos = p.reader.chpos;
+    sess.byte_pos = sess.byte_pos + p.reader.pos;
+    let cx =
+        @{p: p,
+          sess: sess,
+          cfg: p.cfg};
+    let (companionmod, _) = fs::splitext(fs::basename(input));
+    let (m, attrs) = eval::eval_crate_directives_to_mod(
+        cx, cdirs, prefix, option::some(companionmod));
+    let hi = p.span.hi;
+    expect(p, token::EOF);
+    ret @spanned(lo, hi,
+                 {directives: cdirs,
+                  module: m,
+                  attrs: crate_attrs + attrs,
+                  config: p.cfg});
+}
+
+fn parse_crate_from_file(input: str, cfg: ast::crate_cfg, sess: parse_sess) ->
+   @ast::crate {
+    if str::ends_with(input, ".rc") {
+        parse_crate_from_crate_file(input, cfg, sess)
+    } else if str::ends_with(input, ".rs") {
+        parse_crate_from_source_file(input, cfg, sess)
+    } else {
+        sess.span_diagnostic.handler().fatal("unknown input file type: " +
+                                             input)
+    }
+}
+
+//
+// Local Variables:
+// mode: rust
+// fill-column: 78;
+// indent-tabs-mode: nil
+// c-basic-offset: 4
+// buffer-file-coding-system: utf-8-unix
+// End:
+//
diff --git a/src/rustc/syntax/parse/token.rs b/src/rustc/syntax/parse/token.rs
new file mode 100644
index 00000000000..60949f7793c
--- /dev/null
+++ b/src/rustc/syntax/parse/token.rs
@@ -0,0 +1,199 @@
+
+import util::interner;
+import lexer::reader;
+
+type str_num = uint;
+
+enum binop {
+    PLUS,
+    MINUS,
+    STAR,
+    SLASH,
+    PERCENT,
+    CARET,
+    AND,
+    OR,
+    LSL,
+    LSR,
+    ASR,
+}
+
+enum token {
+    /* Expression-operator symbols. */
+    EQ,
+    LT,
+    LE,
+    EQEQ,
+    NE,
+    GE,
+    GT,
+    ANDAND,
+    OROR,
+    NOT,
+    TILDE,
+    BINOP(binop),
+    BINOPEQ(binop),
+
+    /* Structural symbols */
+    AT,
+    DOT,
+    ELLIPSIS,
+    COMMA,
+    SEMI,
+    COLON,
+    MOD_SEP,
+    RARROW,
+    LARROW,
+    DARROW,
+    LPAREN,
+    RPAREN,
+    LBRACKET,
+    RBRACKET,
+    LBRACE,
+    RBRACE,
+    POUND,
+    POUND_LBRACE,
+    POUND_LT,
+
+    DOLLAR_LPAREN,
+    DOLLAR_NUM(uint),
+
+    /* Literals */
+    LIT_INT(i64, ast::int_ty),
+    LIT_UINT(u64, ast::uint_ty),
+    LIT_FLOAT(str_num, ast::float_ty),
+    LIT_STR(str_num),
+    LIT_BOOL(bool),
+
+    /* Name components */
+    IDENT(str_num, bool),
+    IDX(int),
+    UNDERSCORE,
+    BRACEQUOTE(str_num),
+    EOF,
+
+}
+
+fn binop_to_str(o: binop) -> str {
+    alt o {
+      PLUS { ret "+"; }
+      MINUS { ret "-"; }
+      STAR { ret "*"; }
+      SLASH { ret "/"; }
+      PERCENT { ret "%"; }
+      CARET { ret "^"; }
+      AND { ret "&"; }
+      OR { ret "|"; }
+      LSL { ret "<<"; }
+      LSR { ret ">>"; }
+      ASR { ret ">>>"; }
+    }
+}
+
+fn to_str(r: reader, t: token) -> str {
+    alt t {
+      EQ { ret "="; }
+      LT { ret "<"; }
+      LE { ret "<="; }
+      EQEQ { ret "=="; }
+      NE { ret "!="; }
+      GE { ret ">="; }
+      GT { ret ">"; }
+      NOT { ret "!"; }
+      TILDE { ret "~"; }
+      OROR { ret "||"; }
+      ANDAND { ret "&&"; }
+      BINOP(op) { ret binop_to_str(op); }
+      BINOPEQ(op) { ret binop_to_str(op) + "="; }
+
+      /* Structural symbols */
+      AT {
+        ret "@";
+      }
+      DOT { ret "."; }
+      ELLIPSIS { ret "..."; }
+      COMMA { ret ","; }
+      SEMI { ret ";"; }
+      COLON { ret ":"; }
+      MOD_SEP { ret "::"; }
+      RARROW { ret "->"; }
+      LARROW { ret "<-"; }
+      DARROW { ret "<->"; }
+      LPAREN { ret "("; }
+      RPAREN { ret ")"; }
+      LBRACKET { ret "["; }
+      RBRACKET { ret "]"; }
+      LBRACE { ret "{"; }
+      RBRACE { ret "}"; }
+      POUND { ret "#"; }
+      POUND_LBRACE { ret "#{"; }
+      POUND_LT { ret "#<"; }
+
+      DOLLAR_LPAREN { ret "$("; }
+      DOLLAR_NUM(u) {
+        ret "$" + uint::to_str(u as uint, 10u);
+      }
+
+      /* Literals */
+      LIT_INT(c, ast::ty_char) {
+        // FIXME: escape.
+        let tmp = "'";
+        str::push_char(tmp, c as char);
+        str::push_char(tmp, '\'');
+        ret tmp;
+      }
+      LIT_INT(i, t) {
+        ret int::to_str(i as int, 10u) + ast_util::int_ty_to_str(t);
+      }
+      LIT_UINT(u, t) {
+        ret uint::to_str(u as uint, 10u) + ast_util::uint_ty_to_str(t);
+      }
+      LIT_FLOAT(s, t) {
+        ret interner::get::<str>(*r.interner, s) +
+            ast_util::float_ty_to_str(t);
+      }
+      LIT_STR(s) { // FIXME: escape.
+        ret "\"" + interner::get::<str>(*r.interner, s) + "\"";
+      }
+      LIT_BOOL(b) { if b { ret "true"; } else { ret "false"; } }
+
+      /* Name components */
+      IDENT(s, _) {
+        ret interner::get::<str>(*r.interner, s);
+      }
+      IDX(i) { ret "_" + int::to_str(i, 10u); }
+      UNDERSCORE { ret "_"; }
+      BRACEQUOTE(_) { ret "<bracequote>"; }
+      EOF { ret "<eof>"; }
+    }
+}
+
+
+pure fn can_begin_expr(t: token) -> bool {
+    alt t {
+      LPAREN { true }
+      LBRACE { true }
+      LBRACKET { true }
+      IDENT(_, _) { true }
+      UNDERSCORE { true }
+      TILDE { true }
+      LIT_INT(_, _) { true }
+      LIT_UINT(_, _) { true }
+      LIT_FLOAT(_, _) { true }
+      LIT_STR(_) { true }
+      POUND { true }
+      AT { true }
+      NOT { true }
+      BINOP(MINUS) { true }
+      BINOP(STAR) { true }
+      MOD_SEP { true }
+      _ { false }
+    }
+}
+
+// Local Variables:
+// fill-column: 78;
+// indent-tabs-mode: nil
+// c-basic-offset: 4
+// buffer-file-coding-system: utf-8-unix
+// End: