diff options
| author | Patrick Walton <pcwalton@mimiga.net> | 2013-01-29 13:54:06 -0800 |
|---|---|---|
| committer | Patrick Walton <pcwalton@mimiga.net> | 2013-01-29 13:55:30 -0800 |
| commit | 95b892c8a723ad88e868e3914c3d29a585008ac2 (patch) | |
| tree | fba7ee4b435c020fb862bda4c932f80bdbf1caf1 /src/libsyntax/parse | |
| parent | 66b07f1e5dd5767ae6aa238a00490f1493c5d443 (diff) | |
| download | rust-95b892c8a723ad88e868e3914c3d29a585008ac2.tar.gz rust-95b892c8a723ad88e868e3914c3d29a585008ac2.zip | |
libsyntax: De-export a lot of libsyntax. rs=deƫxporting
Diffstat (limited to 'src/libsyntax/parse')
| -rw-r--r-- | src/libsyntax/parse/attr.rs | 4 | ||||
| -rw-r--r-- | src/libsyntax/parse/classify.rs | 6 | ||||
| -rw-r--r-- | src/libsyntax/parse/comments.rs | 25 | ||||
| -rw-r--r-- | src/libsyntax/parse/common.rs | 16 | ||||
| -rw-r--r-- | src/libsyntax/parse/lexer.rs | 37 | ||||
| -rw-r--r-- | src/libsyntax/parse/mod.rs | 10 | ||||
| -rw-r--r-- | src/libsyntax/parse/obsolete.rs | 4 | ||||
| -rw-r--r-- | src/libsyntax/parse/parser.rs | 72 | ||||
| -rw-r--r-- | src/libsyntax/parse/prec.rs | 10 | ||||
| -rw-r--r-- | src/libsyntax/parse/token.rs | 123 |
10 files changed, 142 insertions, 165 deletions
diff --git a/src/libsyntax/parse/attr.rs b/src/libsyntax/parse/attr.rs index 375fefa64b4..3ed31c0953c 100644 --- a/src/libsyntax/parse/attr.rs +++ b/src/libsyntax/parse/attr.rs @@ -18,9 +18,7 @@ use parse::token; use core::either::{Either, Left, Right}; -export parser_attr; - -trait parser_attr { +pub trait parser_attr { fn parse_outer_attributes() -> ~[ast::attribute]; fn parse_attribute(style: ast::attr_style) -> ast::attribute; fn parse_attribute_naked(style: ast::attr_style, lo: BytePos) -> diff --git a/src/libsyntax/parse/classify.rs b/src/libsyntax/parse/classify.rs index f0cff630b11..afe6823e76d 100644 --- a/src/libsyntax/parse/classify.rs +++ b/src/libsyntax/parse/classify.rs @@ -15,7 +15,7 @@ use ast; use ast_util::operator_prec; -fn expr_requires_semi_to_be_stmt(e: @ast::expr) -> bool { +pub fn expr_requires_semi_to_be_stmt(e: @ast::expr) -> bool { match e.node { ast::expr_if(*) | ast::expr_match(*) @@ -28,7 +28,7 @@ fn expr_requires_semi_to_be_stmt(e: @ast::expr) -> bool { } } -fn expr_is_simple_block(e: @ast::expr) -> bool { +pub fn expr_is_simple_block(e: @ast::expr) -> bool { match e.node { ast::expr_block( ast::spanned { node: ast::blk_ { rules: ast::default_blk, _ }, _ } @@ -37,7 +37,7 @@ fn expr_is_simple_block(e: @ast::expr) -> bool { } } -fn stmt_ends_with_semi(stmt: ast::stmt) -> bool { +pub fn stmt_ends_with_semi(stmt: ast::stmt) -> bool { return match stmt.node { ast::stmt_decl(d, _) => { match d.node { diff --git a/src/libsyntax/parse/comments.rs b/src/libsyntax/parse/comments.rs index fbe258852e2..26de85548e1 100644 --- a/src/libsyntax/parse/comments.rs +++ b/src/libsyntax/parse/comments.rs @@ -27,14 +27,7 @@ use core::str; use core::uint; use core::vec; -export cmnt; -export lit; -export cmnt_style; -export gather_comments_and_literals; -export is_doc_comment, doc_comment_style, strip_doc_comment_decoration; -export isolated, trailing, mixed, blank_line; - -enum cmnt_style { +pub enum cmnt_style { isolated, // No code on either side of each line of the comment trailing, // Code exists to the left of the comment mixed, // Code before /* foo */ and after the comment @@ -50,16 +43,16 @@ impl cmnt_style : cmp::Eq { } } -type cmnt = {style: cmnt_style, lines: ~[~str], pos: BytePos}; +pub type cmnt = {style: cmnt_style, lines: ~[~str], pos: BytePos}; -fn is_doc_comment(s: ~str) -> bool { +pub fn is_doc_comment(s: ~str) -> bool { s.starts_with(~"///") || s.starts_with(~"//!") || s.starts_with(~"/**") || s.starts_with(~"/*!") } -fn doc_comment_style(comment: ~str) -> ast::attr_style { +pub fn doc_comment_style(comment: ~str) -> ast::attr_style { assert is_doc_comment(comment); if comment.starts_with(~"//!") || comment.starts_with(~"/*!") { ast::attr_inner @@ -68,7 +61,7 @@ fn doc_comment_style(comment: ~str) -> ast::attr_style { } } -fn strip_doc_comment_decoration(comment: ~str) -> ~str { +pub fn strip_doc_comment_decoration(comment: ~str) -> ~str { /// remove whitespace-only lines from the start/end of lines fn vertical_trim(lines: ~[~str]) -> ~[~str] { @@ -306,11 +299,11 @@ fn consume_comment(rdr: string_reader, code_to_the_left: bool, debug!("<<< consume comment"); } -type lit = {lit: ~str, pos: BytePos}; +pub type lit = {lit: ~str, pos: BytePos}; -fn gather_comments_and_literals(span_diagnostic: diagnostic::span_handler, - path: ~str, - srdr: io::Reader) -> +pub fn gather_comments_and_literals(span_diagnostic: diagnostic::span_handler, + path: ~str, + srdr: io::Reader) -> {cmnts: ~[cmnt], lits: ~[lit]} { let src = @str::from_bytes(srdr.read_whole_stream()); let itr = parse::token::mk_fake_ident_interner(); diff --git a/src/libsyntax/parse/common.rs b/src/libsyntax/parse/common.rs index a7af8500f49..583ad982000 100644 --- a/src/libsyntax/parse/common.rs +++ b/src/libsyntax/parse/common.rs @@ -21,26 +21,26 @@ use core::option::{None, Option, Some}; use core::option; use std::map::HashMap; -type seq_sep = { +pub type seq_sep = { sep: Option<token::Token>, trailing_sep_allowed: bool }; -fn seq_sep_trailing_disallowed(t: token::Token) -> seq_sep { +pub fn seq_sep_trailing_disallowed(t: token::Token) -> seq_sep { return {sep: option::Some(t), trailing_sep_allowed: false}; } -fn seq_sep_trailing_allowed(t: token::Token) -> seq_sep { +pub fn seq_sep_trailing_allowed(t: token::Token) -> seq_sep { return {sep: option::Some(t), trailing_sep_allowed: true}; } -fn seq_sep_none() -> seq_sep { +pub fn seq_sep_none() -> seq_sep { return {sep: option::None, trailing_sep_allowed: false}; } -fn token_to_str(reader: reader, ++token: token::Token) -> ~str { +pub fn token_to_str(reader: reader, ++token: token::Token) -> ~str { token::to_str(reader.interner(), token) } -impl Parser { +pub impl Parser { fn unexpected_last(t: token::Token) -> ! { self.span_fatal( copy self.last_span, @@ -229,7 +229,7 @@ impl Parser { } fn parse_seq_lt_gt<T: Copy>(sep: Option<token::Token>, - f: fn(Parser) -> T) -> spanned<~[T]> { + f: fn(Parser) -> T) -> ast::spanned<~[T]> { let lo = self.span.lo; self.expect(token::LT); let result = self.parse_seq_to_before_gt::<T>(sep, f); @@ -277,7 +277,7 @@ impl Parser { // NB: Do not use this function unless you actually plan to place the // spanned list in the AST. fn parse_seq<T: Copy>(bra: token::Token, ket: token::Token, sep: seq_sep, - f: fn(Parser) -> T) -> spanned<~[T]> { + f: fn(Parser) -> T) -> ast::spanned<~[T]> { let lo = self.span.lo; self.expect(bra); let result = self.parse_seq_to_before_end::<T>(ket, sep, f); diff --git a/src/libsyntax/parse/lexer.rs b/src/libsyntax/parse/lexer.rs index 5a0f40f3c12..c3b94182cc2 100644 --- a/src/libsyntax/parse/lexer.rs +++ b/src/libsyntax/parse/lexer.rs @@ -16,7 +16,7 @@ use codemap::{BytePos, CharPos, CodeMap, Pos, span}; use codemap; use diagnostic::span_handler; use ext::tt::transcribe::{tt_next_token}; -use ext::tt::transcribe::{tt_reader, new_tt_reader, dup_tt_reader}; +use ext::tt::transcribe::{dup_tt_reader}; use parse::token; use core::char; @@ -24,14 +24,11 @@ use core::either; use core::str; use core::u64; -use std; +pub use ext::tt::transcribe::{tt_reader, new_tt_reader}; -export reader, string_reader, new_string_reader, is_whitespace; -export tt_reader, new_tt_reader; -export nextch, is_eof, bump, get_str_from, new_low_level_string_reader; -export string_reader_as_reader, tt_reader_as_reader; +use std; -trait reader { +pub trait reader { fn is_eof() -> bool; fn next_token() -> {tok: token::Token, sp: span}; fn fatal(~str) -> !; @@ -41,7 +38,7 @@ trait reader { fn dup() -> reader; } -type string_reader = @{ +pub type string_reader = @{ span_diagnostic: span_handler, src: @~str, // The absolute offset within the codemap of the next character to read @@ -59,18 +56,18 @@ type string_reader = @{ mut peek_span: span }; -fn new_string_reader(span_diagnostic: span_handler, - filemap: @codemap::FileMap, - itr: @token::ident_interner) -> string_reader { +pub fn new_string_reader(span_diagnostic: span_handler, + filemap: @codemap::FileMap, + itr: @token::ident_interner) -> string_reader { let r = new_low_level_string_reader(span_diagnostic, filemap, itr); string_advance_token(r); /* fill in peek_* */ return r; } /* For comments.rs, which hackily pokes into 'pos' and 'curr' */ -fn new_low_level_string_reader(span_diagnostic: span_handler, - filemap: @codemap::FileMap, - itr: @token::ident_interner) +pub fn new_low_level_string_reader(span_diagnostic: span_handler, + filemap: @codemap::FileMap, + itr: @token::ident_interner) -> string_reader { // Force the initial reader bump to start on a fresh line let initial_char = '\n'; @@ -114,7 +111,7 @@ impl string_reader: reader { fn dup() -> reader { dup_string_reader(self) as reader } } -impl tt_reader: reader { +pub impl tt_reader: reader { fn is_eof() -> bool { self.cur_tok == token::EOF } fn next_token() -> {tok: token::Token, sp: span} { /* weird resolve bug: if the following `if`, or any of its @@ -157,7 +154,7 @@ fn byte_offset(rdr: string_reader) -> BytePos { (rdr.pos - rdr.filemap.start_pos) } -fn get_str_from(rdr: string_reader, start: BytePos) -> ~str { +pub fn get_str_from(rdr: string_reader, start: BytePos) -> ~str { unsafe { // I'm pretty skeptical about this subtraction. What if there's a // multi-byte character before the mark? @@ -166,7 +163,7 @@ fn get_str_from(rdr: string_reader, start: BytePos) -> ~str { } } -fn bump(rdr: string_reader) { +pub fn bump(rdr: string_reader) { rdr.last_pos = rdr.pos; let current_byte_offset = byte_offset(rdr).to_uint();; if current_byte_offset < (*rdr.src).len() { @@ -190,10 +187,10 @@ fn bump(rdr: string_reader) { rdr.curr = -1 as char; } } -fn is_eof(rdr: string_reader) -> bool { +pub fn is_eof(rdr: string_reader) -> bool { rdr.curr == -1 as char } -fn nextch(rdr: string_reader) -> char { +pub fn nextch(rdr: string_reader) -> char { let offset = byte_offset(rdr).to_uint(); if offset < (*rdr.src).len() { return str::char_at(*rdr.src, offset); @@ -211,7 +208,7 @@ fn hex_digit_val(c: char) -> int { fn bin_digit_value(c: char) -> int { if c == '0' { return 0; } return 1; } -fn is_whitespace(c: char) -> bool { +pub fn is_whitespace(c: char) -> bool { return c == ' ' || c == '\t' || c == '\r' || c == '\n'; } diff --git a/src/libsyntax/parse/mod.rs b/src/libsyntax/parse/mod.rs index b14b60af134..f9088bfd635 100644 --- a/src/libsyntax/parse/mod.rs +++ b/src/libsyntax/parse/mod.rs @@ -27,32 +27,22 @@ use core::path::Path; use core::result::{Err, Ok, Result}; use core::result; -#[legacy_exports] pub mod lexer; -#[legacy_exports] pub mod parser; -#[legacy_exports] pub mod token; -#[legacy_exports] pub mod comments; -#[legacy_exports] pub mod attr; -#[legacy_exports] /// Common routines shared by parser mods -#[legacy_exports] pub mod common; /// Functions dealing with operator precedence -#[legacy_exports] pub mod prec; /// Routines the parser uses to classify AST nodes -#[legacy_exports] pub mod classify; /// Reporting obsolete syntax -#[legacy_exports] pub mod obsolete; pub type parse_sess = @{ diff --git a/src/libsyntax/parse/obsolete.rs b/src/libsyntax/parse/obsolete.rs index 86dea693f8a..7dad15f9dfc 100644 --- a/src/libsyntax/parse/obsolete.rs +++ b/src/libsyntax/parse/obsolete.rs @@ -50,14 +50,14 @@ pub enum ObsoleteSyntax { ObsoleteUnenforcedBound } -impl ObsoleteSyntax: to_bytes::IterBytes { +pub impl ObsoleteSyntax: to_bytes::IterBytes { #[inline(always)] pure fn iter_bytes(&self, +lsb0: bool, f: to_bytes::Cb) { (*self as uint).iter_bytes(lsb0, f); } } -impl Parser { +pub impl Parser { /// Reports an obsolete syntax non-fatal error. fn obsolete(sp: span, kind: ObsoleteSyntax) { let (kind_str, desc) = match kind { diff --git a/src/libsyntax/parse/parser.rs b/src/libsyntax/parse/parser.rs index 19a52c3550f..25fd13a5999 100644 --- a/src/libsyntax/parse/parser.rs +++ b/src/libsyntax/parse/parser.rs @@ -88,10 +88,6 @@ use core::vec::push; use core::vec; use std::map::HashMap; -export Parser; - -export item_or_view_item, iovi_none, iovi_view_item, iovi_item; - enum restriction { UNRESTRICTED, RESTRICT_STMT_EXPR, @@ -108,7 +104,7 @@ enum class_contents { dtor_decl(blk, ~[attribute], codemap::span), type arg_or_capture_item = Either<arg, capture_item>; type item_info = (ident, item_, Option<~[attribute]>); -enum item_or_view_item { +pub enum item_or_view_item { iovi_none, iovi_item(@item), iovi_foreign_item(@foreign_item), @@ -180,8 +176,9 @@ pure fn maybe_append(+lhs: ~[attribute], rhs: Option<~[attribute]>) /* ident is handled by common.rs */ -fn Parser(sess: parse_sess, cfg: ast::crate_cfg, - +rdr: reader) -> Parser { +pub fn Parser(sess: parse_sess, + cfg: ast::crate_cfg, + +rdr: reader) -> Parser { let tok0 = rdr.next_token(); let span0 = tok0.sp; @@ -209,7 +206,7 @@ fn Parser(sess: parse_sess, cfg: ast::crate_cfg, } } -struct Parser { +pub struct Parser { sess: parse_sess, cfg: crate_cfg, mut token: token::Token, @@ -235,7 +232,7 @@ struct Parser { drop {} /* do not copy the parser; its state is tied to outside state */ } -impl Parser { +pub impl Parser { fn bump() { self.last_span = self.span; let next = if self.buffer_start == self.buffer_end { @@ -813,7 +810,7 @@ impl Parser { self.bump(); self.lit_from_token(tok) }; - spanned { node: lit, span: mk_sp(lo, self.last_span.hi) } + ast::spanned { node: lit, span: mk_sp(lo, self.last_span.hi) } } fn parse_path_without_tps() -> @path { @@ -888,7 +885,7 @@ impl Parser { self.parse_seq_lt_gt(Some(token::COMMA), |p| p.parse_ty(false)) } else { - spanned {node: ~[], span: path.span} + ast::spanned {node: ~[], span: path.span} } }; @@ -930,15 +927,15 @@ impl Parser { @expr { id: self.get_id(), callee_id: self.get_id(), - node: expr_mac(spanned {node: m, span: mk_sp(lo, hi)}), + node: expr_mac(ast::spanned {node: m, span: mk_sp(lo, hi)}), span: mk_sp(lo, hi), } } fn mk_lit_u32(i: u32) -> @expr { let span = self.span; - let lv_lit = @spanned { node: lit_uint(i as u64, ty_u32), - span: span }; + let lv_lit = @ast::spanned { node: lit_uint(i as u64, ty_u32), + span: span }; @expr { id: self.get_id(), @@ -1418,7 +1415,9 @@ impl Parser { hi = e.span.hi; // HACK: turn &[...] into a &-evec ex = match e.node { - expr_vec(*) | expr_lit(@spanned {node: lit_str(_), span: _}) + expr_vec(*) | expr_lit(@ast::spanned { + node: lit_str(_), span: _ + }) if m == m_imm => { expr_vstore(e, expr_vstore_slice) } @@ -1441,7 +1440,8 @@ impl Parser { expr_vec(*) if m == m_mutbl => expr_vstore(e, expr_vstore_mut_box), expr_vec(*) if m == m_imm => expr_vstore(e, expr_vstore_box), - expr_lit(@spanned {node: lit_str(_), span: _}) if m == m_imm => + expr_lit(@ast::spanned { + node: lit_str(_), span: _}) if m == m_imm => expr_vstore(e, expr_vstore_box), _ => expr_unary(box(m), e) }; @@ -1453,7 +1453,8 @@ impl Parser { hi = e.span.hi; // HACK: turn ~[...] into a ~-evec ex = match e.node { - expr_vec(*) | expr_lit(@spanned {node: lit_str(_), span: _}) + expr_vec(*) | expr_lit(@ast::spanned { + node: lit_str(_), span: _}) if m == m_imm => expr_vstore(e, expr_vstore_uniq), _ => expr_unary(uniq(m), e) }; @@ -1808,7 +1809,7 @@ impl Parser { self.eat(token::COMMA); } - let blk = spanned { + let blk = ast::spanned { node: ast::blk_ { view_items: ~[], stmts: ~[], @@ -1957,7 +1958,7 @@ impl Parser { // HACK: parse @"..." as a literal of a vstore @str pat = match sub.node { pat_lit(e@@expr { - node: expr_lit(@spanned {node: lit_str(_), span: _}), _ + node: expr_lit(@ast::spanned {node: lit_str(_), span: _}), _ }) => { let vst = @expr { id: self.get_id(), @@ -1977,7 +1978,7 @@ impl Parser { // HACK: parse ~"..." as a literal of a vstore ~str pat = match sub.node { pat_lit(e@@expr { - node: expr_lit(@spanned {node: lit_str(_), span: _}), _ + node: expr_lit(@ast::spanned {node: lit_str(_), span: _}), _ }) => { let vst = @expr { id: self.get_id(), @@ -1999,7 +2000,8 @@ impl Parser { // HACK: parse &"..." as a literal of a borrowed str pat = match sub.node { pat_lit(e@@expr { - node: expr_lit(@spanned {node: lit_str(_), span: _}), _ + node: expr_lit(@ast::spanned { + node: lit_str(_), span: _}), _ }) => { let vst = @expr { id: self.get_id(), @@ -2024,7 +2026,7 @@ impl Parser { if self.token == token::RPAREN { hi = self.span.hi; self.bump(); - let lit = @spanned {node: lit_nil, span: mk_sp(lo, hi)}; + let lit = @ast::spanned {node: lit_nil, span: mk_sp(lo, hi)}; let expr = self.mk_expr(lo, hi, expr_lit(lit)); pat = pat_lit(expr); } else { @@ -2400,7 +2402,7 @@ impl Parser { match self.token { token::SEMI => { self.bump(); - stmts.push(@spanned { + stmts.push(@ast::spanned { node: stmt_semi(e, stmt_id), .. *stmt}); } @@ -2425,7 +2427,7 @@ impl Parser { match self.token { token::SEMI => { self.bump(); - stmts.push(@spanned { + stmts.push(@ast::spanned { node: stmt_mac((*m), true), .. *stmt}); } @@ -2967,10 +2969,10 @@ impl Parser { let actual_dtor = do the_dtor.map |dtor| { let (d_body, d_attrs, d_s) = *dtor; - spanned { node: ast::struct_dtor_ { id: self.get_id(), - attrs: d_attrs, - self_id: self.get_id(), - body: d_body}, + ast::spanned { node: ast::struct_dtor_ { id: self.get_id(), + attrs: d_attrs, + self_id: self.get_id(), + body: d_body}, span: d_s}}; let _ = self.get_id(); // XXX: Workaround for crazy bug. let new_id = self.get_id(); @@ -3472,10 +3474,10 @@ impl Parser { self.bump(); let mut actual_dtor = do the_dtor.map |dtor| { let (d_body, d_attrs, d_s) = *dtor; - spanned { node: ast::struct_dtor_ { id: self.get_id(), - attrs: d_attrs, - self_id: self.get_id(), - body: d_body }, + ast::spanned { node: ast::struct_dtor_ { id: self.get_id(), + attrs: d_attrs, + self_id: self.get_id(), + body: d_body }, span: d_s } }; @@ -3773,9 +3775,9 @@ impl Parser { _ => self.fatal(~"expected open delimiter") }; let m = ast::mac_invoc_tt(pth, tts); - let m: ast::mac = spanned { node: m, - span: mk_sp(self.span.lo, - self.span.hi) }; + let m: ast::mac = ast::spanned { node: m, + span: mk_sp(self.span.lo, + self.span.hi) }; let item_ = item_mac(m); return iovi_item(self.mk_item(lo, self.last_span.hi, id, item_, visibility, attrs)); diff --git a/src/libsyntax/parse/prec.rs b/src/libsyntax/parse/prec.rs index 10754777129..fff222876aa 100644 --- a/src/libsyntax/parse/prec.rs +++ b/src/libsyntax/parse/prec.rs @@ -8,10 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -export as_prec; -export unop_prec; -export token_to_binop; - use ast::*; use parse::token::*; use parse::token::Token; @@ -19,19 +15,19 @@ use parse::token::Token; use core::prelude::*; /// Unary operators have higher precedence than binary -const unop_prec: uint = 100u; +pub const unop_prec: uint = 100u; /** * Precedence of the `as` operator, which is a binary operator * but is not represented in the precedence table. */ -const as_prec: uint = 11u; +pub const as_prec: uint = 11u; /** * Maps a token to a record specifying the corresponding binary * operator and its precedence */ -fn token_to_binop(tok: Token) -> Option<ast::binop> { +pub fn token_to_binop(tok: Token) -> Option<ast::binop> { match tok { BINOP(STAR) => Some(mul), BINOP(SLASH) => Some(div), diff --git a/src/libsyntax/parse/token.rs b/src/libsyntax/parse/token.rs index a8c0c074588..30d2489a5ee 100644 --- a/src/libsyntax/parse/token.rs +++ b/src/libsyntax/parse/token.rs @@ -25,7 +25,7 @@ use std::map::HashMap; #[auto_encode] #[auto_decode] -enum binop { +pub enum binop { PLUS, MINUS, STAR, @@ -40,7 +40,7 @@ enum binop { #[auto_encode] #[auto_decode] -enum Token { +pub enum Token { /* Expression-operator symbols. */ EQ, LT, @@ -99,7 +99,7 @@ enum Token { #[auto_encode] #[auto_decode] /// For interpolation during macro expansion. -enum nonterminal { +pub enum nonterminal { nt_item(@ast::item), nt_block(ast::blk), nt_stmt(@ast::stmt), @@ -112,7 +112,7 @@ enum nonterminal { nt_matchers(~[ast::matcher]) } -fn binop_to_str(o: binop) -> ~str { +pub fn binop_to_str(o: binop) -> ~str { match o { PLUS => ~"+", MINUS => ~"-", @@ -127,7 +127,7 @@ fn binop_to_str(o: binop) -> ~str { } } -fn to_str(in: @ident_interner, t: Token) -> ~str { +pub fn to_str(in: @ident_interner, t: Token) -> ~str { match t { EQ => ~"=", LT => ~"<", @@ -222,7 +222,7 @@ fn to_str(in: @ident_interner, t: Token) -> ~str { } } -pure fn can_begin_expr(t: Token) -> bool { +pub pure fn can_begin_expr(t: Token) -> bool { match t { LPAREN => true, LBRACE => true, @@ -254,7 +254,7 @@ pure fn can_begin_expr(t: Token) -> bool { } /// what's the opposite delimiter? -fn flip_delimiter(t: token::Token) -> token::Token { +pub fn flip_delimiter(t: token::Token) -> token::Token { match t { token::LPAREN => token::RPAREN, token::LBRACE => token::RBRACE, @@ -268,7 +268,7 @@ fn flip_delimiter(t: token::Token) -> token::Token { -fn is_lit(t: Token) -> bool { +pub fn is_lit(t: Token) -> bool { match t { LIT_INT(_, _) => true, LIT_UINT(_, _) => true, @@ -280,79 +280,80 @@ fn is_lit(t: Token) -> bool { } } -pure fn is_ident(t: Token) -> bool { +pub pure fn is_ident(t: Token) -> bool { match t { IDENT(_, _) => true, _ => false } } -pure fn is_ident_or_path(t: Token) -> bool { +pub pure fn is_ident_or_path(t: Token) -> bool { match t { IDENT(_, _) | INTERPOLATED(nt_path(*)) => true, _ => false } } -pure fn is_plain_ident(t: Token) -> bool { +pub pure fn is_plain_ident(t: Token) -> bool { match t { IDENT(_, false) => true, _ => false } } -pure fn is_bar(t: Token) -> bool { +pub pure fn is_bar(t: Token) -> bool { match t { BINOP(OR) | OROR => true, _ => false } } -mod special_idents { - #[legacy_exports]; +pub mod special_idents { use ast::ident; - const underscore : ident = ident { repr: 0u }; - const anon : ident = ident { repr: 1u }; - const dtor : ident = ident { repr: 2u }; // 'drop', but that's reserved - const invalid : ident = ident { repr: 3u }; // '' - const unary : ident = ident { repr: 4u }; - const not_fn : ident = ident { repr: 5u }; - const idx_fn : ident = ident { repr: 6u }; - const unary_minus_fn : ident = ident { repr: 7u }; - const clownshoes_extensions : ident = ident { repr: 8u }; - - const self_ : ident = ident { repr: 9u }; // 'self' + + pub const underscore : ident = ident { repr: 0u }; + pub const anon : ident = ident { repr: 1u }; + pub const dtor : ident = ident { repr: 2u }; // 'drop', but that's + // reserved + pub const invalid : ident = ident { repr: 3u }; // '' + pub const unary : ident = ident { repr: 4u }; + pub const not_fn : ident = ident { repr: 5u }; + pub const idx_fn : ident = ident { repr: 6u }; + pub const unary_minus_fn : ident = ident { repr: 7u }; + pub const clownshoes_extensions : ident = ident { repr: 8u }; + + pub const self_ : ident = ident { repr: 9u }; // 'self' /* for matcher NTs */ - const item : ident = ident { repr: 10u }; - const block : ident = ident { repr: 11u }; - const stmt : ident = ident { repr: 12u }; - const pat : ident = ident { repr: 13u }; - const expr : ident = ident { repr: 14u }; - const ty : ident = ident { repr: 15u }; - const ident : ident = ident { repr: 16u }; - const path : ident = ident { repr: 17u }; - const tt : ident = ident { repr: 18u }; - const matchers : ident = ident { repr: 19u }; - - const str : ident = ident { repr: 20u }; // for the type + pub const item : ident = ident { repr: 10u }; + pub const block : ident = ident { repr: 11u }; + pub const stmt : ident = ident { repr: 12u }; + pub const pat : ident = ident { repr: 13u }; + pub const expr : ident = ident { repr: 14u }; + pub const ty : ident = ident { repr: 15u }; + pub const ident : ident = ident { repr: 16u }; + pub const path : ident = ident { repr: 17u }; + pub const tt : ident = ident { repr: 18u }; + pub const matchers : ident = ident { repr: 19u }; + + pub const str : ident = ident { repr: 20u }; // for the type /* outside of libsyntax */ - const ty_visitor : ident = ident { repr: 21u }; - const arg : ident = ident { repr: 22u }; - const descrim : ident = ident { repr: 23u }; - const clownshoe_abi : ident = ident { repr: 24u }; - const clownshoe_stack_shim : ident = ident { repr: 25u }; - const tydesc : ident = ident { repr: 26u }; - const literally_dtor : ident = ident { repr: 27u }; - const main : ident = ident { repr: 28u }; - const opaque : ident = ident { repr: 29u }; - const blk : ident = ident { repr: 30u }; - const static : ident = ident { repr: 31u }; - const intrinsic : ident = ident { repr: 32u }; - const clownshoes_foreign_mod: ident = ident { repr: 33 }; - const unnamed_field: ident = ident { repr: 34 }; - const c_abi: ident = ident { repr: 35 }; - const type_self: ident = ident { repr: 36 }; // `Self` + pub const ty_visitor : ident = ident { repr: 21u }; + pub const arg : ident = ident { repr: 22u }; + pub const descrim : ident = ident { repr: 23u }; + pub const clownshoe_abi : ident = ident { repr: 24u }; + pub const clownshoe_stack_shim : ident = ident { repr: 25u }; + pub const tydesc : ident = ident { repr: 26u }; + pub const literally_dtor : ident = ident { repr: 27u }; + pub const main : ident = ident { repr: 28u }; + pub const opaque : ident = ident { repr: 29u }; + pub const blk : ident = ident { repr: 30u }; + pub const static : ident = ident { repr: 31u }; + pub const intrinsic : ident = ident { repr: 32u }; + pub const clownshoes_foreign_mod: ident = ident { repr: 33 }; + pub const unnamed_field: ident = ident { repr: 34 }; + pub const c_abi: ident = ident { repr: 35 }; + pub const type_self: ident = ident { repr: 36 }; // `Self` } -struct ident_interner { +pub struct ident_interner { priv interner: Interner<@~str>, } -impl ident_interner { +pub impl ident_interner { fn intern(val: @~str) -> ast::ident { ast::ident { repr: self.interner.intern(val) } } @@ -377,7 +378,7 @@ macro_rules! interner_key ( (-3 as uint, 0u))) ) -fn mk_ident_interner() -> @ident_interner { +pub fn mk_ident_interner() -> @ident_interner { unsafe { match task::local_data::local_data_get(interner_key!()) { Some(interner) => *interner, @@ -438,7 +439,7 @@ fn mk_ident_interner() -> @ident_interner { /* for when we don't care about the contents; doesn't interact with TLD or serialization */ -fn mk_fake_ident_interner() -> @ident_interner { +pub fn mk_fake_ident_interner() -> @ident_interner { @ident_interner { interner: interner::mk::<@~str>() } } @@ -451,7 +452,7 @@ fn mk_fake_ident_interner() -> @ident_interner { * appear as identifiers at all. Reserved keywords are not used anywhere in * the language and may not appear as identifiers. */ -fn keyword_table() -> HashMap<~str, ()> { +pub fn keyword_table() -> HashMap<~str, ()> { let keywords = HashMap(); for temporary_keyword_table().each_key |word| { keywords.insert(word, ()); @@ -466,7 +467,7 @@ fn keyword_table() -> HashMap<~str, ()> { } /// Keywords that may be used as identifiers -fn temporary_keyword_table() -> HashMap<~str, ()> { +pub fn temporary_keyword_table() -> HashMap<~str, ()> { let words = HashMap(); let keys = ~[ ~"self", ~"static", @@ -478,7 +479,7 @@ fn temporary_keyword_table() -> HashMap<~str, ()> { } /// Full keywords. May not appear anywhere else. -fn strict_keyword_table() -> HashMap<~str, ()> { +pub fn strict_keyword_table() -> HashMap<~str, ()> { let words = HashMap(); let keys = ~[ ~"as", ~"assert", @@ -504,7 +505,7 @@ fn strict_keyword_table() -> HashMap<~str, ()> { words } -fn reserved_keyword_table() -> HashMap<~str, ()> { +pub fn reserved_keyword_table() -> HashMap<~str, ()> { let words = HashMap(); let keys = ~[ ~"be" |
