about summary refs log tree commit diff
path: root/src/libsyntax/ext/tt
diff options
context:
space:
mode:
authorBrian Anderson <banderson@mozilla.com>2012-08-06 12:34:08 -0700
committerBrian Anderson <banderson@mozilla.com>2012-08-06 15:36:30 -0700
commitecaf9e39c9435fa2de4fe393c4b263be36eb2d99 (patch)
tree775f69be65adff65551d96173dd797e32e2c3157 /src/libsyntax/ext/tt
parentd3a9bb1bd4a1d510bbaca2ab1121e4c85a239247 (diff)
downloadrust-ecaf9e39c9435fa2de4fe393c4b263be36eb2d99.tar.gz
rust-ecaf9e39c9435fa2de4fe393c4b263be36eb2d99.zip
Convert alt to match. Stop parsing alt
Diffstat (limited to 'src/libsyntax/ext/tt')
-rw-r--r--src/libsyntax/ext/tt/earley_parser.rs26
-rw-r--r--src/libsyntax/ext/tt/macro_rules.rs11
-rw-r--r--src/libsyntax/ext/tt/transcribe.rs24
3 files changed, 31 insertions, 30 deletions
diff --git a/src/libsyntax/ext/tt/earley_parser.rs b/src/libsyntax/ext/tt/earley_parser.rs
index 6a801f33aa6..f1c7ebb7dad 100644
--- a/src/libsyntax/ext/tt/earley_parser.rs
+++ b/src/libsyntax/ext/tt/earley_parser.rs
@@ -31,7 +31,7 @@ enum matcher_pos_up { /* to break a circularity */
 }
 
 fn is_some(&&mpu: matcher_pos_up) -> bool {
-    alt mpu {
+    match mpu {
       matcher_pos_up(none) => false,
       _ => true
     }
@@ -48,7 +48,7 @@ type matcher_pos = ~{
 };
 
 fn copy_up(&& mpu: matcher_pos_up) -> matcher_pos {
-    alt mpu {
+    match mpu {
       matcher_pos_up(some(mp)) => copy mp,
       _ => fail
     }
@@ -56,7 +56,7 @@ fn copy_up(&& mpu: matcher_pos_up) -> matcher_pos {
 
 fn count_names(ms: &[matcher]) -> uint {
     vec::foldl(0u, ms, |ct, m| {
-        ct + alt m.node {
+        ct + match m.node {
           match_tok(_) => 0u,
           match_seq(more_ms, _, _, _, _) => count_names(more_ms),
           match_nonterminal(_,_,_) => 1u
@@ -68,7 +68,7 @@ fn initial_matcher_pos(ms: ~[matcher], sep: option<token>, lo: uint)
     -> matcher_pos {
     let mut match_idx_hi = 0u;
     for ms.each() |elt| {
-        alt elt.node {
+        match elt.node {
           match_tok(_) => (),
           match_seq(_,_,_,_,hi) => {
             match_idx_hi = hi;       // it is monotonic...
@@ -113,7 +113,7 @@ fn nameize(p_s: parse_sess, ms: ~[matcher], res: ~[@named_match])
     -> hashmap<ident,@named_match> {
     fn n_rec(p_s: parse_sess, m: matcher, res: ~[@named_match],
              ret_val: hashmap<ident, @named_match>) {
-        alt m {
+        match m {
           {node: match_tok(_), span: _} => (),
           {node: match_seq(more_ms, _, _, _, _), span: _} => {
             for more_ms.each() |next_m| { n_rec(p_s, next_m, res, ret_val) };
@@ -139,7 +139,7 @@ enum parse_result {
 
 fn parse_or_else(sess: parse_sess, cfg: ast::crate_cfg, rdr: reader,
                  ms: ~[matcher]) -> hashmap<ident, @named_match> {
-    alt parse(sess, cfg, rdr, ms) {
+    match parse(sess, cfg, rdr, ms) {
       success(m) => m,
       failure(sp, str) => sess.span_diagnostic.span_fatal(sp, str)
     }
@@ -202,7 +202,7 @@ fn parse(sess: parse_sess, cfg: ast::crate_cfg, rdr: reader, ms: ~[matcher])
                     // can we go around again?
 
                     // the *_t vars are workarounds for the lack of unary move
-                    alt copy ei.sep {
+                    match copy ei.sep {
                       some(t) if idx == len => { // we need a separator
                         if tok == t { //pass the separator
                             let ei_t <- ei;
@@ -220,7 +220,7 @@ fn parse(sess: parse_sess, cfg: ast::crate_cfg, rdr: reader, ms: ~[matcher])
                     vec::push(eof_eis, ei);
                 }
             } else {
-                alt copy ei.elts[idx].node {
+                match copy ei.elts[idx].node {
                   /* need to descend into sequence */
                   match_seq(matchers, sep, zero_ok,
                             match_idx_lo, match_idx_hi) => {
@@ -270,7 +270,7 @@ fn parse(sess: parse_sess, cfg: ast::crate_cfg, rdr: reader, ms: ~[matcher])
             if (bb_eis.len() > 0u && next_eis.len() > 0u)
                 || bb_eis.len() > 1u {
                 let nts = str::connect(vec::map(bb_eis, |ei| {
-                    alt ei.elts[ei.idx].node {
+                    match ei.elts[ei.idx].node {
                       match_nonterminal(bind,name,_) => {
                         fmt!{"%s ('%s')", *name, *bind}
                       }
@@ -293,7 +293,7 @@ fn parse(sess: parse_sess, cfg: ast::crate_cfg, rdr: reader, ms: ~[matcher])
                 let rust_parser = parser(sess, cfg, rdr.dup(), SOURCE_FILE);
 
                 let ei = vec::pop(bb_eis);
-                alt ei.elts[ei.idx].node {
+                match ei.elts[ei.idx].node {
                   match_nonterminal(_, name, idx) => {
                     ei.matches[idx].push(@matched_nonterminal(
                         parse_nt(rust_parser, *name)));
@@ -318,8 +318,8 @@ fn parse(sess: parse_sess, cfg: ast::crate_cfg, rdr: reader, ms: ~[matcher])
 }
 
 fn parse_nt(p: parser, name: ~str) -> nonterminal {
-    alt name {
-      ~"item" => alt p.parse_item(~[]) {
+    match name {
+      ~"item" => match p.parse_item(~[]) {
         some(i) => token::nt_item(i),
         none => p.fatal(~"expected an item keyword")
       }
@@ -329,7 +329,7 @@ fn parse_nt(p: parser, name: ~str) -> nonterminal {
       ~"expr" => token::nt_expr(p.parse_expr()),
       ~"ty" => token::nt_ty(p.parse_ty(false /* no need to disambiguate*/)),
       // this could be handled like a token, since it is one
-      ~"ident" => alt copy p.token {
+      ~"ident" => match copy p.token {
         token::IDENT(sn,b) => { p.bump(); token::nt_ident(sn,b) }
         _ => p.fatal(~"expected ident, found "
                      + token::to_str(*p.reader.interner(), copy p.token))
diff --git a/src/libsyntax/ext/tt/macro_rules.rs b/src/libsyntax/ext/tt/macro_rules.rs
index b4fc1f5c484..a870928d50b 100644
--- a/src/libsyntax/ext/tt/macro_rules.rs
+++ b/src/libsyntax/ext/tt/macro_rules.rs
@@ -37,11 +37,11 @@ fn add_new_extension(cx: ext_ctxt, sp: span, name: ident,
                                      arg_reader as reader, argument_gram);
 
     // Extract the arguments:
-    let lhses:~[@named_match] = alt argument_map.get(@~"lhs") {
+    let lhses:~[@named_match] = match argument_map.get(@~"lhs") {
       @matched_seq(s, sp) => s,
       _ => cx.span_bug(sp, ~"wrong-structured lhs")
     };
-    let rhses:~[@named_match] = alt argument_map.get(@~"rhs") {
+    let rhses:~[@named_match] = match argument_map.get(@~"rhs") {
       @matched_seq(s, sp) => s,
       _ => cx.span_bug(sp, ~"wrong-structured rhs")
     };
@@ -58,13 +58,14 @@ fn add_new_extension(cx: ext_ctxt, sp: span, name: ident,
         let itr = cx.parse_sess().interner;
 
         for lhses.eachi() |i, lhs| { // try each arm's matchers
-            alt lhs {
+            match lhs {
               @matched_nonterminal(nt_matchers(mtcs)) => {
                 // `none` is because we're not interpolating
                 let arg_rdr = new_tt_reader(s_d, itr, none, arg) as reader;
-                alt parse(cx.parse_sess(), cx.cfg(), arg_rdr, mtcs) {
+                match parse(cx.parse_sess(), cx.cfg(), arg_rdr, mtcs) {
                   success(named_matches) => {
-                    let rhs = alt rhses[i] { // okay, what's your transcriber?
+                    let rhs = match rhses[i] {
+                        // okay, what's your transcriber?
                       @matched_nonterminal(nt_tt(@tt)) => tt,
                       _ => cx.span_bug(sp, ~"bad thing in rhs")
                     };
diff --git a/src/libsyntax/ext/tt/transcribe.rs b/src/libsyntax/ext/tt/transcribe.rs
index c704fd351ec..693b538ec6d 100644
--- a/src/libsyntax/ext/tt/transcribe.rs
+++ b/src/libsyntax/ext/tt/transcribe.rs
@@ -46,7 +46,7 @@ fn new_tt_reader(sp_diag: span_handler, itr: @interner<@~str>,
     let r = @{sp_diag: sp_diag, interner: itr,
               mut cur: @{readme: src, mut idx: 0u, dotdotdoted: false,
                          sep: none, up: tt_frame_up(option::none)},
-              interpolations: alt interp { /* just a convienience */
+              interpolations: match interp { /* just a convienience */
                 none => std::map::box_str_hash::<@named_match>(),
                 some(x) => x
               },
@@ -61,7 +61,7 @@ fn new_tt_reader(sp_diag: span_handler, itr: @interner<@~str>,
 
 pure fn dup_tt_frame(&&f: tt_frame) -> tt_frame {
     @{readme: f.readme, mut idx: f.idx, dotdotdoted: f.dotdotdoted,
-      sep: f.sep, up: alt f.up {
+      sep: f.sep, up: match f.up {
         tt_frame_up(some(up_frame)) => {
           tt_frame_up(some(dup_tt_frame(up_frame)))
         }
@@ -82,7 +82,7 @@ pure fn dup_tt_reader(&&r: tt_reader) -> tt_reader {
 pure fn lookup_cur_matched_by_matched(r: tt_reader,
                                       start: @named_match) -> @named_match {
     pure fn red(&&ad: @named_match, &&idx: uint) -> @named_match {
-        alt *ad {
+        match *ad {
           matched_nonterminal(_) => {
             // end of the line; duplicate henceforth
             ad
@@ -102,10 +102,10 @@ enum lis {
 
 fn lockstep_iter_size(&&t: token_tree, &&r: tt_reader) -> lis {
     fn lis_merge(lhs: lis, rhs: lis) -> lis {
-        alt lhs {
+        match lhs {
           lis_unconstrained => rhs,
           lis_contradiction(_) => lhs,
-          lis_constraint(l_len, l_id) => alt rhs {
+          lis_constraint(l_len, l_id) => match rhs {
             lis_unconstrained => lhs,
             lis_contradiction(_) => rhs,
             lis_constraint(r_len, _) if l_len == r_len => lhs,
@@ -117,13 +117,13 @@ fn lockstep_iter_size(&&t: token_tree, &&r: tt_reader) -> lis {
           }
         }
     }
-    alt t {
+    match t {
       tt_delim(tts) | tt_seq(_, tts, _, _) => {
         vec::foldl(lis_unconstrained, tts, {|lis, tt|
             lis_merge(lis, lockstep_iter_size(tt, r)) })
       }
       tt_tok(*) => lis_unconstrained,
-      tt_nonterminal(_, name) => alt *lookup_cur_matched(r, name) {
+      tt_nonterminal(_, name) => match *lookup_cur_matched(r, name) {
         matched_nonterminal(_) => lis_unconstrained,
         matched_seq(ads, _) => lis_constraint(ads.len(), name)
       }
@@ -138,7 +138,7 @@ fn tt_next_token(&&r: tt_reader) -> {tok: token, sp: span} {
         if ! r.cur.dotdotdoted
             || r.repeat_idx.last() == r.repeat_len.last() - 1 {
 
-            alt r.cur.up {
+            match r.cur.up {
               tt_frame_up(none) => {
                 r.cur_tok = EOF;
                 return ret_val;
@@ -156,7 +156,7 @@ fn tt_next_token(&&r: tt_reader) -> {tok: token, sp: span} {
         } else { /* repeat */
             r.cur.idx = 0u;
             r.repeat_idx[r.repeat_idx.len() - 1u] += 1u;
-            alt r.cur.sep {
+            match r.cur.sep {
               some(tk) => {
                 r.cur_tok = tk; /* repeat same span, I guess */
                 return ret_val;
@@ -167,7 +167,7 @@ fn tt_next_token(&&r: tt_reader) -> {tok: token, sp: span} {
     }
     loop { /* because it's easiest, this handles `tt_delim` not starting
     with a `tt_tok`, even though it won't happen */
-        alt r.cur.readme[r.cur.idx] {
+        match r.cur.readme[r.cur.idx] {
           tt_delim(tts) => {
             r.cur = @{readme: tts, mut idx: 0u, dotdotdoted: false,
                       sep: none, up: tt_frame_up(option::some(r.cur)) };
@@ -179,7 +179,7 @@ fn tt_next_token(&&r: tt_reader) -> {tok: token, sp: span} {
             return ret_val;
           }
           tt_seq(sp, tts, sep, zerok) => {
-            alt lockstep_iter_size(tt_seq(sp, tts, sep, zerok), r) {
+            match lockstep_iter_size(tt_seq(sp, tts, sep, zerok), r) {
               lis_unconstrained => {
                 r.sp_diag.span_fatal(
                     sp, /* blame macro writer */
@@ -212,7 +212,7 @@ fn tt_next_token(&&r: tt_reader) -> {tok: token, sp: span} {
           }
           // FIXME #2887: think about span stuff here
           tt_nonterminal(sp, ident) => {
-            alt *lookup_cur_matched(r, ident) {
+            match *lookup_cur_matched(r, ident) {
               /* sidestep the interpolation tricks for ident because
               (a) idents can be in lots of places, so it'd be a pain
               (b) we actually can, since it's a token. */