1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
|
// Earley-like parser for macros.
import parse::token;
import parse::token::{token, EOF, to_str, whole_nt};
import parse::lexer::*; //resolve bug?
//import parse::lexer::{reader, tt_reader, tt_reader_as_reader};
import parse::parser::{parser,SOURCE_FILE};
//import parse::common::parser_common;
import parse::common::*; //resolve bug?
import parse::parse_sess;
import dvec::{dvec, extensions};
import ast::{matcher, mtc_tok, mtc_rep, mtc_bb, ident};
import ast_util::mk_sp;
import std::map::{hashmap, box_str_hash};
/* This is an Earley-like parser, without support for nonterminals. This
means that there are no completer or predictor rules, and therefore no need to
store one column per token: instead, there's a set of current Earley items and
a set of next ones. Instead of NTs, we have a special case for Kleene
star. The big-O, in pathological cases, is worse than traditional Earley
parsing, but it's an easier fit for Macro-by-Example-style rules, and I think
the overhead is lower. */
/* to avoid costly uniqueness checks, we require that `mtc_rep` always has a
nonempty body. */
enum matcher_pos_up { /* to break a circularity */
matcher_pos_up(option<matcher_pos>)
}
fn is_some(&&mpu: matcher_pos_up) -> bool {
alt mpu {
matcher_pos_up(none) { false }
_ { true }
}
}
type matcher_pos = ~{
elts: ~[ast::matcher], // maybe should be /&? Need to understand regions.
sep: option<token>,
mut idx: uint,
mut up: matcher_pos_up, // mutable for swapping only
matches: ~[dvec<@arb_depth>],
sp_lo: uint,
};
fn copy_up(&& mpu: matcher_pos_up) -> matcher_pos {
alt mpu {
matcher_pos_up(some(mp)) { copy mp }
_ { fail }
}
}
fn count_names(ms: &[matcher]) -> uint {
vec::foldl(0u, ms, |ct, m| {
ct + alt m.node {
mtc_tok(_) { 0u }
mtc_rep(more_ms, _, _) { count_names(more_ms) }
mtc_bb(_,_,_) { 1u }
}})
}
fn new_matcher_pos(ms: ~[matcher], sep: option<token>, lo: uint)
-> matcher_pos {
~{elts: ms, sep: sep, mut idx: 0u, mut up: matcher_pos_up(none),
matches: copy vec::from_fn(count_names(ms), |_i| dvec::dvec()),
sp_lo: lo}
}
/* logically, an arb_depth should contain only one kind of nonterminal */
enum arb_depth { leaf(whole_nt), seq(~[@arb_depth], codemap::span) }
type earley_item = matcher_pos;
fn nameize(p_s: parse_sess, ms: ~[matcher], res: ~[@arb_depth])
-> hashmap<ident,@arb_depth> {
fn n_rec(p_s: parse_sess, m: matcher, res: ~[@arb_depth],
ret_val: hashmap<ident, @arb_depth>) {
alt m {
{node: mtc_tok(_), span: _} { }
{node: mtc_rep(more_ms, _, _), span: _} {
for more_ms.each() |next_m| { n_rec(p_s, next_m, res, ret_val) };
}
{node: mtc_bb(bind_name, _, idx), span: sp} {
if ret_val.contains_key(bind_name) {
p_s.span_diagnostic.span_fatal(sp, "Duplicated bind name: "
+ *bind_name)
}
ret_val.insert(bind_name, res[idx]);
}
}
}
let ret_val = box_str_hash::<@arb_depth>();
for ms.each() |m| { n_rec(p_s, m, res, ret_val) }
ret ret_val;
}
enum parse_result {
success(hashmap<ident, @arb_depth>),
failure(codemap::span, str)
}
fn parse(sess: parse_sess, cfg: ast::crate_cfg, rdr: reader, ms: ~[matcher])
-> parse_result {
let mut cur_eis = ~[];
vec::push(cur_eis, new_matcher_pos(ms, none, rdr.peek().sp.lo));
loop {
let mut bb_eis = ~[]; // black-box parsed by parser.rs
let mut next_eis = ~[]; // or proceed normally
let mut eof_eis = ~[];
let {tok: tok, sp: sp} = rdr.peek();
/* we append new items to this while we go */
while cur_eis.len() > 0u { /* for each Earley Item */
let mut ei = vec::pop(cur_eis);
let idx = ei.idx;
let len = ei.elts.len();
/* at end of sequence */
if idx >= len {
// can't move out of `alt`s, so:
if is_some(ei.up) {
// hack: a matcher sequence is repeating iff it has a
// parent (the top level is just a container)
// disregard separator, try to go up
// (remove this condition to make trailing seps ok)
if idx == len {
// pop from the matcher position
let new_pos = copy_up(ei.up);
// update matches (the MBE "parse tree") by appending
// each tree as a subtree.
// I bet this is a perf problem: we're preemptively
// doing a lot of array work that will get thrown away
// most of the time.
for ei.matches.eachi() |idx, elt| {
let sub = elt.get();
// Some subtrees don't contain the name at all
if sub.len() == 0u { again; }
new_pos.matches[idx]
.push(@seq(sub, mk_sp(ei.sp_lo,sp.hi)));
}
new_pos.idx += 1u;
vec::push(cur_eis, new_pos);
}
// can we go around again?
// the *_t vars are workarounds for the lack of unary move
alt copy ei.sep {
some(t) if idx == len { // we need a separator
if tok == t { //pass the separator
let ei_t <- ei;
ei_t.idx += 1u;
vec::push(next_eis, ei_t);
}
}
_ { // we don't need a separator
let ei_t <- ei;
ei_t.idx = 0u;
vec::push(cur_eis, ei_t);
}
}
} else {
vec::push(eof_eis, ei);
}
} else {
alt copy ei.elts[idx].node {
/* need to descend into sequence */
mtc_rep(matchers, sep, zero_ok) {
if zero_ok {
let new_ei = copy ei;
new_ei.idx += 1u;
vec::push(cur_eis, new_ei);
}
let matches = vec::map(ei.matches, // fresh, same size:
|_m| dvec::<@arb_depth>());
let ei_t <- ei;
vec::push(cur_eis, ~{
elts: matchers, sep: sep, mut idx: 0u,
mut up: matcher_pos_up(some(ei_t)),
matches: matches, sp_lo: sp.lo
});
}
mtc_bb(_,_,_) { vec::push(bb_eis, ei) }
mtc_tok(t) {
let ei_t <- ei;
if t == tok { ei_t.idx += 1u; vec::push(next_eis, ei_t)}
}
}
}
}
/* error messages here could be improved with links to orig. rules */
if tok == EOF {
if eof_eis.len() == 1u {
ret success(
nameize(sess, ms,
vec::map(eof_eis[0u].matches, |dv| dv.pop())));
} else if eof_eis.len() > 1u {
ret failure(sp, "Ambiguity: multiple successful parses");
} else {
ret failure(sp, "Unexpected end of macro invocation");
}
} else {
if (bb_eis.len() > 0u && next_eis.len() > 0u)
|| bb_eis.len() > 1u {
let nts = str::connect(vec::map(bb_eis, |ei| {
alt ei.elts[ei.idx].node
{ mtc_bb(_,name,_) { *name } _ { fail; } }
}), " or ");
ret failure(sp, #fmt[
"Local ambiguity: multiple parsing options: \
built-in NTs %s or %u other options.",
nts, next_eis.len()]);
} else if (bb_eis.len() == 0u && next_eis.len() == 0u) {
ret failure(sp, "No rules expected the token "
+ to_str(*rdr.interner(), tok));
} else if (next_eis.len() > 0u) {
/* Now process the next token */
while(next_eis.len() > 0u) {
vec::push(cur_eis, vec::pop(next_eis));
}
rdr.next_token();
} else /* bb_eis.len() == 1 */ {
let rust_parser = parser(sess, cfg, rdr.dup(), SOURCE_FILE);
let ei = vec::pop(bb_eis);
alt ei.elts[ei.idx].node {
mtc_bb(_, name, idx) {
ei.matches[idx].push(@leaf(
parse_nt(rust_parser, *name)));
ei.idx += 1u;
}
_ { fail; }
}
vec::push(cur_eis,ei);
/* this would fail if zero-length tokens existed */
while rdr.peek().sp.lo < rust_parser.span.lo {
rdr.next_token();
} /* except for EOF... */
while rust_parser.token == EOF && rdr.peek().tok != EOF {
rdr.next_token();
}
}
}
assert cur_eis.len() > 0u;
}
}
fn parse_nt(p: parser, name: str) -> whole_nt {
alt name {
"item" { alt p.parse_item(~[], ast::public) {
some(i) { token::w_item(i) }
none { p.fatal("expected an item keyword") }
}}
"block" { token::w_block(p.parse_block()) }
"stmt" { token::w_stmt(p.parse_stmt(~[])) }
"pat" { token::w_pat(p.parse_pat()) }
"expr" { token::w_expr(p.parse_expr()) }
"ty" { token::w_ty(p.parse_ty(false /* no need to disambiguate*/)) }
// this could be handled like a token, since it is one
"ident" { alt copy p.token {
token::IDENT(sn,b) { p.bump(); token::w_ident(sn,b) }
_ { p.fatal("expected ident, found "
+ token::to_str(*p.reader.interner(), copy p.token)) }
} }
"path" { token::w_path(p.parse_path_with_tps(false)) }
"tt" {
p.quote_depth += 1u; //but in theory, non-quoted tts might be useful
let res = token::w_tt(@p.parse_token_tree());
p.quote_depth -= 1u;
res
}
"mtcs" { token::w_mtcs(p.parse_matchers()) }
_ { p.fatal("Unsupported builtin nonterminal parser: " + name)}
}
}
// Local Variables:
// mode: rust;
// fill-column: 78;
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// End:
|