diff options
Diffstat (limited to 'src')
614 files changed, 8485 insertions, 7508 deletions
diff --git a/src/cargo/cargo.rs b/src/cargo/cargo.rs index 6f89e0a8fdf..8cb2b0cc21b 100644 --- a/src/cargo/cargo.rs +++ b/src/cargo/cargo.rs @@ -21,15 +21,15 @@ type package = { method: str, description: str, ref: option<str>, - tags: [str]/~, - versions: [(str, str)]/~ + tags: ~[str], + versions: ~[(str, str)] }; type local_package = { name: str, metaname: str, version: str, - files: [str]/~ + files: ~[str] }; type source = @{ @@ -38,7 +38,7 @@ type source = @{ mut method: str, mut key: option<str>, mut keyfp: option<str>, - mut packages: [mut package]/~ + mut packages: ~[mut package] }; type cargo = { @@ -62,21 +62,21 @@ type crate = { desc: option<str>, sigs: option<str>, crate_type: option<str>, - deps: [str]/~ + deps: ~[str] }; type options = { test: bool, mode: mode, - free: [str]/~, + free: ~[str], help: bool, }; enum mode { system_mode, user_mode, local_mode } -fn opts() -> [getopts::opt]/~ { - [optflag("g"), optflag("G"), optflag("test"), - optflag("h"), optflag("help")]/~ +fn opts() -> ~[getopts::opt] { + ~[optflag("g"), optflag("G"), optflag("test"), + optflag("h"), optflag("help")] } fn info(msg: str) { @@ -117,7 +117,7 @@ fn is_uuid(id: str) -> bool { let parts = str::split_str(id, "-"); if vec::len(parts) == 5u { let mut correct = 0u; - for vec::eachi(parts) { |i, part| + for vec::eachi(parts) |i, part| { fn is_hex_digit(ch: char) -> bool { ('0' <= ch && ch <= '9') || ('a' <= ch && ch <= 'f') || @@ -216,13 +216,13 @@ fn assume_source_method(url: str) -> str { "curl" } -fn load_link(mis: [@ast::meta_item]/~) -> (option<str>, +fn load_link(mis: ~[@ast::meta_item]) -> (option<str>, option<str>, option<str>) { let mut name = none; let mut vers = none; let mut uuid = none; - for mis.each {|a| + for mis.each |a| { alt a.node { ast::meta_name_value(v, {node: ast::lit_str(s), span: _}) { alt *v { @@ -240,7 +240,7 @@ fn load_link(mis: [@ast::meta_item]/~) -> (option<str>, fn load_crate(filename: str) -> option<crate> { let sess = parse::new_parse_sess(none); - let c = parse::parse_crate_from_crate_file(filename, []/~, sess); + let c = parse::parse_crate_from_crate_file(filename, ~[], sess); let mut name = none; let mut vers = none; @@ -249,7 +249,7 @@ fn load_crate(filename: str) -> option<crate> { let mut sigs = none; let mut crate_type = none; - for c.node.attrs.each {|a| + for c.node.attrs.each |a| { alt a.node.value.node { ast::meta_name_value(v, {node: ast::lit_str(s), span: _}) { alt *v { @@ -275,7 +275,7 @@ fn load_crate(filename: str) -> option<crate> { } type env = @{ - mut deps: [str]/~ + mut deps: ~[str] }; fn goto_view_item(e: env, i: @ast::view_item) { @@ -283,7 +283,7 @@ fn load_crate(filename: str) -> option<crate> { ast::view_item_use(ident, metas, id) { let name_items = attr::find_meta_items_by_name(metas, "name"); let m = if name_items.is_empty() { - metas + [attr::mk_name_value_item_str(@"name", *ident)]/~ + metas + ~[attr::mk_name_value_item_str(@"name", *ident)] } else { metas }; @@ -291,7 +291,7 @@ fn load_crate(filename: str) -> option<crate> { let mut attr_vers = ""; let mut attr_from = ""; - for m.each { |item| + for m.each |item| { alt attr::get_meta_item_value_str(item) { some(value) { let name = attr::get_meta_item_name(item); @@ -326,11 +326,11 @@ fn load_crate(filename: str) -> option<crate> { } let e = @{ - mut deps: []/~ + mut deps: ~[] }; let v = visit::mk_simple_visitor(@{ - visit_view_item: {|a|goto_view_item(e, a)}, - visit_item: {|a|goto_item(e, a)}, + visit_view_item: |a| goto_view_item(e, a), + visit_item: |a| goto_item(e, a), with *visit::default_simple_visitor() }); @@ -424,7 +424,7 @@ fn parse_source(name: str, j: json::json) -> source { mut method: method, mut key: key, mut keyfp: keyfp, - mut packages: [mut]/~ }; + mut packages: ~[mut] }; } _ { fail "needed dict value in source"; } }; @@ -435,7 +435,7 @@ fn try_parse_sources(filename: str, sources: map::hashmap<str, source>) { let c = io::read_whole_file_str(filename); alt json::from_str(result::get(c)) { ok(json::dict(j)) { - for j.each { |k, v| + for j.each |k, v| { sources.insert(k, parse_source(k, v)); #debug("source: %s", k); } @@ -498,10 +498,10 @@ fn load_one_source_package(src: source, p: map::hashmap<str, json::json>) { _ { none } }; - let mut tags = []/~; + let mut tags = ~[]; alt p.find("tags") { some(json::list(js)) { - for (*js).each {|j| + for (*js).each |j| { alt j { json::string(j) { vec::grow(tags, 1u, *j); } _ { } @@ -528,10 +528,10 @@ fn load_one_source_package(src: source, p: map::hashmap<str, json::json>) { description: description, ref: ref, tags: tags, - versions: []/~ + versions: ~[] }; - alt src.packages.position({ |pkg| pkg.uuid == uuid }) { + alt src.packages.position(|pkg| pkg.uuid == uuid ) { some(idx) { src.packages[idx] = newpkg; log(debug, " updated package: " + src.name + "/" + name); @@ -573,7 +573,7 @@ fn load_source_packages(c: cargo, src: source) { let pkgstr = io::read_whole_file_str(pkgfile); alt json::from_str(result::get(pkgstr)) { ok(json::list(js)) { - for (*js).each {|j| + for (*js).each |j| { alt j { json::dict(p) { load_one_source_package(src, p); @@ -595,7 +595,7 @@ fn load_source_packages(c: cargo, src: source) { }; } -fn build_cargo_options(argv: [str]/~) -> options { +fn build_cargo_options(argv: ~[str]) -> options { let match = alt getopts::getopts(argv, opts()) { result::ok(m) { m } result::err(f) { @@ -667,7 +667,7 @@ fn configure(opts: options) -> cargo { need_dir(c.libdir); need_dir(c.bindir); - for sources.each_key { |k| + for sources.each_key |k| { let mut s = sources.get(k); load_source_packages(c, s); sources.insert(k, s); @@ -685,11 +685,11 @@ fn configure(opts: options) -> cargo { } fn for_each_package(c: cargo, b: fn(source, package)) { - for c.sources.each_value {|v| + for c.sources.each_value |v| { // FIXME (#2280): this temporary shouldn't be // necessary, but seems to be, for borrowing. let pks = copy v.packages; - for vec::each(pks) {|p| + for vec::each(pks) |p| { b(v, p); } } @@ -698,20 +698,20 @@ fn for_each_package(c: cargo, b: fn(source, package)) { // Runs all programs in directory <buildpath> fn run_programs(buildpath: str) { let newv = os::list_dir_path(buildpath); - for newv.each {|ct| - run::run_program(ct, []/~); + for newv.each |ct| { + run::run_program(ct, ~[]); } } // Runs rustc in <path + subdir> with the given flags // and returns <path + subdir> fn run_in_buildpath(what: str, path: str, subdir: str, cf: str, - extra_flags: [str]/~) -> option<str> { + extra_flags: ~[str]) -> option<str> { let buildpath = path::connect(path, subdir); need_dir(buildpath); #debug("%s: %s -> %s", what, cf, buildpath); let p = run::program_output(rustc_sysroot(), - ["--out-dir", buildpath, cf]/~ + extra_flags); + ~["--out-dir", buildpath, cf] + extra_flags); if p.status != 0 { error(#fmt["rustc failed: %d\n%s\n%s", p.status, p.err, p.out]); ret none; @@ -721,7 +721,7 @@ fn run_in_buildpath(what: str, path: str, subdir: str, cf: str, fn test_one_crate(_c: cargo, path: str, cf: str) { let buildpath = alt run_in_buildpath("testing", path, "/test", cf, - [ "--test"]/~) { + ~[ "--test"]) { none { ret; } some(bp) { bp } }; @@ -730,13 +730,13 @@ fn test_one_crate(_c: cargo, path: str, cf: str) { fn install_one_crate(c: cargo, path: str, cf: str) { let buildpath = alt run_in_buildpath("installing", path, - "/build", cf, []/~) { + "/build", cf, ~[]) { none { ret; } some(bp) { bp } }; let newv = os::list_dir_path(buildpath); let exec_suffix = os::exe_suffix(); - for newv.each {|ct| + for newv.each |ct| { if (exec_suffix != "" && str::ends_with(ct, exec_suffix)) || (exec_suffix == "" && !str::starts_with(path::basename(ct), "lib")) { @@ -758,7 +758,7 @@ fn install_one_crate(c: cargo, path: str, cf: str) { fn rustc_sysroot() -> str { alt os::self_exe_path() { some(path) { - let path = [path, "..", "bin", "rustc"]/~; + let path = ~[path, "..", "bin", "rustc"]; check vec::is_not_empty(path); let rustc = path::normalize(path::connect_many(path)); #debug(" rustc: %s", rustc); @@ -772,8 +772,8 @@ fn install_source(c: cargo, path: str) { #debug("source: %s", path); os::change_dir(path); - let mut cratefiles = []/~; - for os::walk_dir(".") {|p| + let mut cratefiles = ~[]; + for os::walk_dir(".") |p| { if str::ends_with(p, ".rc") { vec::push(cratefiles, p); } @@ -783,11 +783,11 @@ fn install_source(c: cargo, path: str) { fail "this doesn't look like a rust package (no .rc files)"; } - for cratefiles.each {|cf| + for cratefiles.each |cf| { alt load_crate(cf) { none { cont; } some(crate) { - for crate.deps.each { |query| + for crate.deps.each |query| { // TODO: handle cyclic dependencies let wd_base = c.workdir + path::path_sep(); @@ -811,11 +811,11 @@ fn install_source(c: cargo, path: str) { } fn install_git(c: cargo, wd: str, url: str, ref: option<str>) { - run::program_output("git", ["clone", url, wd]/~); + run::program_output("git", ~["clone", url, wd]); if option::is_some(ref) { let r = option::get(ref); os::change_dir(wd); - run::run_program("git", ["checkout", r]/~); + run::run_program("git", ~["checkout", r]); } install_source(c, wd); @@ -823,19 +823,19 @@ fn install_git(c: cargo, wd: str, url: str, ref: option<str>) { fn install_curl(c: cargo, wd: str, url: str) { let tarpath = path::connect(wd, "pkg.tar"); - let p = run::program_output("curl", ["-f", "-s", "-o", - tarpath, url]/~); + let p = run::program_output("curl", ~["-f", "-s", "-o", + tarpath, url]); if p.status != 0 { fail #fmt["fetch of %s failed: %s", url, p.err]; } - run::run_program("tar", ["-x", "--strip-components=1", - "-C", wd, "-f", tarpath]/~); + run::run_program("tar", ~["-x", "--strip-components=1", + "-C", wd, "-f", tarpath]); install_source(c, wd); } fn install_file(c: cargo, wd: str, path: str) { - run::program_output("tar", ["-x", "--strip-components=1", - "-C", wd, "-f", path]/~); + run::program_output("tar", ~["-x", "--strip-components=1", + "-C", wd, "-f", path]); install_source(c, wd); } @@ -868,8 +868,8 @@ fn cargo_suggestion(c: cargo, fallback: fn()) } fn install_uuid(c: cargo, wd: str, uuid: str) { - let mut ps = []/~; - for_each_package(c, { |s, p| + let mut ps = ~[]; + for_each_package(c, |s, p| { if p.uuid == uuid { vec::grow(ps, 1u, (s.name, copy p)); } @@ -879,21 +879,21 @@ fn install_uuid(c: cargo, wd: str, uuid: str) { install_package(c, sname, wd, p); ret; } else if vec::len(ps) == 0u { - cargo_suggestion(c, { || + cargo_suggestion(c, || { error("can't find package: " + uuid); }); ret; } error("found multiple packages:"); - for ps.each {|elt| + for ps.each |elt| { let (sname,p) = copy elt; info(" " + sname + "/" + p.uuid + " (" + p.name + ")"); } } fn install_named(c: cargo, wd: str, name: str) { - let mut ps = []/~; - for_each_package(c, { |s, p| + let mut ps = ~[]; + for_each_package(c, |s, p| { if p.name == name { vec::grow(ps, 1u, (s.name, copy p)); } @@ -903,13 +903,13 @@ fn install_named(c: cargo, wd: str, name: str) { install_package(c, sname, wd, p); ret; } else if vec::len(ps) == 0u { - cargo_suggestion(c, { || + cargo_suggestion(c, || { error("can't find package: " + name); }); ret; } error("found multiple packages:"); - for ps.each {|elt| + for ps.each |elt| { let (sname,p) = copy elt; info(" " + sname + "/" + p.uuid + " (" + p.name + ")"); } @@ -919,7 +919,7 @@ fn install_uuid_specific(c: cargo, wd: str, src: str, uuid: str) { alt c.sources.find(src) { some(s) { let packages = copy s.packages; - if vec::any(packages, { |p| + if vec::any(packages, |p| { if p.uuid == uuid { install_package(c, src, wd, p); true @@ -935,7 +935,7 @@ fn install_named_specific(c: cargo, wd: str, src: str, name: str) { alt c.sources.find(src) { some(s) { let packages = copy s.packages; - if vec::any(packages, { |p| + if vec::any(packages, |p| { if p.name == name { install_package(c, src, wd, p); true @@ -962,7 +962,7 @@ fn cmd_uninstall(c: cargo) { // cache instead of looking for it (binaries can be uninstalled by // name only) if is_uuid(target) { - for os::list_dir(lib).each { |file| + for os::list_dir(lib).each |file| { alt str::find_str(file, "-" + target + "-") { some(idx) { let full = path::normalize(path::connect(lib, file)); @@ -979,7 +979,7 @@ fn cmd_uninstall(c: cargo) { error("can't find package with uuid: " + target); } else { - for os::list_dir(lib).each { |file| + for os::list_dir(lib).each |file| { alt str::find_str(file, "lib" + target + "-") { some(idx) { let full = path::normalize(path::connect(lib, @@ -994,7 +994,7 @@ fn cmd_uninstall(c: cargo) { none { cont; } } } - for os::list_dir(bin).each { |file| + for os::list_dir(bin).each |file| { alt str::find_str(file, target) { some(idx) { let full = path::normalize(path::connect(bin, file)); @@ -1065,7 +1065,7 @@ fn install_query(c: cargo, wd: str, target: str) { // a bit of a hack. It should be cleaned up in the future. if target == c.current_install { - for c.dep_cache.each { |k, _v| + for c.dep_cache.each |k, _v| { c.dep_cache.remove(k); } @@ -1082,7 +1082,7 @@ fn cmd_install(c: cargo) unsafe { if vec::len(c.opts.free) == 2u { let cwd = os::getcwd(); - let status = run::run_program("cp", ["-R", cwd, wd]/~); + let status = run::run_program("cp", ~["-R", cwd, wd]); if status != 0 { fail #fmt("could not copy directory: %s", cwd); @@ -1101,7 +1101,7 @@ fn cmd_install(c: cargo) unsafe { } fn sync(c: cargo) { - for c.sources.each_key { |k| + for c.sources.each_key |k| { let mut s = c.sources.get(k); sync_one(c, s); c.sources.insert(k, s); @@ -1134,8 +1134,8 @@ fn sync_one_file(c: cargo, dir: str, src: source) -> bool { alt copy src.key { some(u) { - let p = run::program_output("curl", ["-f", "-s", "-o", keyfile, - u]/~); + let p = run::program_output("curl", ~["-f", "-s", "-o", keyfile, + u]); if p.status != 0 { error(#fmt["fetch for source %s (key %s) failed", name, u]); ret false; @@ -1208,8 +1208,8 @@ fn sync_one_git(c: cargo, dir: str, src: source) -> bool { msg(name, insecure); } else { - let p = run::program_output("git", ["reset", "--hard", - "HEAD@{1}"]/~); + let p = run::program_output("git", ~["reset", "--hard", + "HEAD@{1}"]); if p.status != 0 { msg(name, insecure); @@ -1218,7 +1218,7 @@ fn sync_one_git(c: cargo, dir: str, src: source) -> bool { } if !os::path_exists(path::connect(dir, ".git")) { - let p = run::program_output("git", ["clone", url, dir]/~); + let p = run::program_output("git", ~["clone", url, dir]); if p.status != 0 { error(#fmt["fetch for source %s (url %s) failed", name, url]); @@ -1231,7 +1231,7 @@ fn sync_one_git(c: cargo, dir: str, src: source) -> bool { ret false; } - let p = run::program_output("git", ["pull"]/~); + let p = run::program_output("git", ~["pull"]); if p.status != 0 { error(#fmt["fetch for source %s (url %s) failed", name, url]); @@ -1243,8 +1243,8 @@ fn sync_one_git(c: cargo, dir: str, src: source) -> bool { alt copy src.key { some(u) { - let p = run::program_output("curl", ["-f", "-s", "-o", keyfile, - u]/~); + let p = run::program_output("curl", ~["-f", "-s", "-o", keyfile, + u]); if p.status != 0 { error(#fmt["fetch for source %s (key %s) failed", name, u]); rollback(name, dir, false); @@ -1303,7 +1303,7 @@ fn sync_one_curl(c: cargo, dir: str, src: source) -> bool { url += "/packages.json"; } - let p = run::program_output("curl", ["-f", "-s", "-o", pkgfile, url]/~); + let p = run::program_output("curl", ~["-f", "-s", "-o", pkgfile, url]); if p.status != 0 { error(#fmt["fetch for source %s (url %s) failed", name, url]); @@ -1312,7 +1312,7 @@ fn sync_one_curl(c: cargo, dir: str, src: source) -> bool { if smart { url = src.url + "/source.json"; let p = - run::program_output("curl", ["-f", "-s", "-o", srcfile, url]/~); + run::program_output("curl", ~["-f", "-s", "-o", srcfile, url]); if p.status == 0 { has_src_file = true; @@ -1321,8 +1321,8 @@ fn sync_one_curl(c: cargo, dir: str, src: source) -> bool { alt copy src.key { some(u) { - let p = run::program_output("curl", ["-f", "-s", "-o", keyfile, - u]/~); + let p = run::program_output("curl", ~["-f", "-s", "-o", keyfile, + u]); if p.status != 0 { error(#fmt["fetch for source %s (key %s) failed", name, u]); ret false; @@ -1340,8 +1340,8 @@ fn sync_one_curl(c: cargo, dir: str, src: source) -> bool { url = src.url + ".sig"; } - let mut p = run::program_output("curl", ["-f", "-s", "-o", - sigfile, url]/~); + let mut p = run::program_output("curl", ~["-f", "-s", "-o", + sigfile, url]); if p.status != 0 { error(#fmt["fetch for source %s (sig %s) failed", name, url]); ret false; @@ -1358,8 +1358,8 @@ fn sync_one_curl(c: cargo, dir: str, src: source) -> bool { if smart && has_src_file { url = src.url + "/source.json.sig"; - p = run::program_output("curl", ["-f", "-s", "-o", srcsigfile, - url]/~); + p = run::program_output("curl", + ~["-f", "-s", "-o", srcsigfile, url]); if p.status != 0 { error(#fmt["fetch for source %s (sig %s) failed", name, url]); @@ -1424,14 +1424,14 @@ fn cmd_init(c: cargo) { let destsrcfile = path::connect(c.root, "sources.json"); let p = - run::program_output("curl", ["-f", "-s", "-o", srcfile, srcurl]/~); + run::program_output("curl", ~["-f", "-s", "-o", srcfile, srcurl]); if p.status != 0 { error(#fmt["fetch of sources.json failed: %s", p.out]); ret; } let p = - run::program_output("curl", ["-f", "-s", "-o", sigfile, sigurl]/~); + run::program_output("curl", ~["-f", "-s", "-o", sigfile, sigurl]); if p.status != 0 { error(#fmt["fetch of sources.json.sig failed: %s", p.out]); ret; @@ -1464,15 +1464,13 @@ fn print_pkg(s: source, p: package) { fn print_source(s: source) { info(s.name + " (" + s.url + ")"); - let pks = sort::merge_sort({ |a, b| - a < b - }, copy s.packages); + let pks = sort::merge_sort(|a, b| a < b, copy s.packages); let l = vec::len(pks); - print(io::with_str_writer() { |writer| + print(io::with_str_writer(|writer| { let mut list = " >> "; - vec::iteri(pks) { |i, pk| + do vec::iteri(pks) |i, pk| { if str::len(list) > 78u { writer.write_line(list); list = " >> "; @@ -1481,14 +1479,14 @@ fn print_source(s: source) { } writer.write_line(list); - }); + })); } fn cmd_list(c: cargo) { sync(c); if vec::len(c.opts.free) >= 3u { - vec::iter_between(c.opts.free, 2u, vec::len(c.opts.free)) { |name| + do vec::iter_between(c.opts.free, 2u, vec::len(c.opts.free)) |name| { if !valid_pkg_name(name) { error(#fmt("'%s' is an invalid source name", name)); } else { @@ -1503,7 +1501,7 @@ fn cmd_list(c: cargo) { } } } else { - for c.sources.each_value { |v| + for c.sources.each_value |v| { print_source(v); } } @@ -1520,9 +1518,9 @@ fn cmd_search(c: cargo) { let mut n = 0; let name = c.opts.free[2]; let tags = vec::slice(c.opts.free, 3u, vec::len(c.opts.free)); - for_each_package(c, { |s, p| + for_each_package(c, |s, p| { if (str::contains(p.name, name) || name == "*") && - vec::all(tags, { |t| vec::contains(p.tags, t) }) { + vec::all(tags, |t| vec::contains(p.tags, t) ) { print_pkg(s, p); n += 1; } @@ -1533,7 +1531,7 @@ fn cmd_search(c: cargo) { fn install_to_dir(srcfile: str, destdir: str) { let newfile = path::connect(destdir, path::basename(srcfile)); - let status = run::run_program("cp", ["-r", srcfile, newfile]/~); + let status = run::run_program("cp", ~["-r", srcfile, newfile]); if status == 0 { info(#fmt["installed: '%s'", newfile]); } else { @@ -1569,7 +1567,7 @@ fn dump_sources(c: cargo) { let hash = map::str_hash(); let root = json::dict(hash); - for c.sources.each { |k, v| + for c.sources.each |k, v| { let chash = map::str_hash(); let child = json::dict(chash); @@ -1608,7 +1606,7 @@ fn copy_warn(srcfile: str, destfile: str) { fn cmd_sources(c: cargo) { if vec::len(c.opts.free) < 3u { - for c.sources.each_value { |v| + for c.sources.each_value |v| { info(#fmt("%s (%s) via %s", copy v.name, copy v.url, copy v.method)); } @@ -1619,7 +1617,7 @@ fn cmd_sources(c: cargo) { alt action { "clear" { - for c.sources.each_key { |k| + for c.sources.each_key |k| { c.sources.remove(k); } @@ -1650,7 +1648,7 @@ fn cmd_sources(c: cargo) { mut method: assume_source_method(url), mut key: none, mut keyfp: none, - mut packages: [mut]/~ + mut packages: ~[mut] }); info(#fmt("added source: %s", name)); } @@ -1868,7 +1866,7 @@ Commands: set-method Change the method for a source."); } -fn main(argv: [str]/~) { +fn main(argv: ~[str]) { let o = build_cargo_options(argv); if vec::len(o.free) < 2u { diff --git a/src/cargo/pgp.rs b/src/cargo/pgp.rs index 8c0eb7f6ba2..9078e943cf2 100644 --- a/src/cargo/pgp.rs +++ b/src/cargo/pgp.rs @@ -1,4 +1,4 @@ -fn gpg(args: [str]/~) -> { status: int, out: str, err: str } { +fn gpg(args: ~[str]) -> { status: int, out: str, err: str } { ret run::program_output("gpg", args); } @@ -59,7 +59,7 @@ fn signing_key_fp() -> str { } fn supported() -> bool { - let r = gpg(["--version"]/~); + let r = gpg(~["--version"]); r.status == 0 } @@ -67,7 +67,7 @@ fn init(root: str) { let p = path::connect(root, "gpg"); if !os::path_is_dir(p) { os::make_dir(p, 0x1c0i32); - let p = run::start_program("gpg", ["--homedir", p, "--import"]/~); + let p = run::start_program("gpg", ~["--homedir", p, "--import"]); p.input().write_str(signing_key()); let s = p.finish(); if s != 0 { @@ -79,7 +79,7 @@ fn init(root: str) { fn add(root: str, key: str) { let path = path::connect(root, "gpg"); let p = - run::program_output("gpg", ["--homedir", path, "--import", key]/~); + run::program_output("gpg", ~["--homedir", path, "--import", key]); if p.status != 0 { fail "pgp add failed: " + p.out; } @@ -87,10 +87,10 @@ fn add(root: str, key: str) { fn verify(root: str, data: str, sig: str, keyfp: str) -> bool { let path = path::connect(root, "gpg"); - let p = gpg(["--homedir", path, "--with-fingerprint", "--verify", sig, - data]/~); + let p = gpg(~["--homedir", path, "--with-fingerprint", "--verify", sig, + data]); let res = "Primary key fingerprint: " + keyfp; - for str::split_char(p.err, '\n').each {|line| + for str::split_char(p.err, '\n').each |line| { if line == res { ret true; } } ret false; diff --git a/src/compiletest/compiletest.rs b/src/compiletest/compiletest.rs index 07bb35dc292..359ca0c5bcc 100644 --- a/src/compiletest/compiletest.rs +++ b/src/compiletest/compiletest.rs @@ -21,22 +21,23 @@ import common::mode_pretty; import common::mode; import util::logv; -fn main(args: [str]/~) { +fn main(args: ~[str]) { let config = parse_config(args); log_config(config); run_tests(config); } -fn parse_config(args: [str]/~) -> config { +fn parse_config(args: ~[str]) -> config { let opts = - [getopts::reqopt("compile-lib-path"), getopts::reqopt("run-lib-path"), - getopts::reqopt("rustc-path"), getopts::reqopt("src-base"), - getopts::reqopt("build-base"), getopts::reqopt("aux-base"), - getopts::reqopt("stage-id"), - getopts::reqopt("mode"), getopts::optflag("ignored"), - getopts::optopt("runtool"), getopts::optopt("rustcflags"), - getopts::optflag("verbose"), - getopts::optopt("logfile")]/~; + ~[getopts::reqopt("compile-lib-path"), + getopts::reqopt("run-lib-path"), + getopts::reqopt("rustc-path"), getopts::reqopt("src-base"), + getopts::reqopt("build-base"), getopts::reqopt("aux-base"), + getopts::reqopt("stage-id"), + getopts::reqopt("mode"), getopts::optflag("ignored"), + getopts::optopt("runtool"), getopts::optopt("rustcflags"), + getopts::optflag("verbose"), + getopts::optopt("logfile")]; check (vec::is_not_empty(args)); let args_ = vec::tail(args); @@ -132,10 +133,10 @@ fn test_opts(config: config) -> test::test_opts { } } -fn make_tests(config: config) -> [test::test_desc]/~ { +fn make_tests(config: config) -> ~[test::test_desc] { #debug("making tests from %s", config.src_base); - let mut tests = []/~; - for os::list_dir_path(config.src_base).each {|file| + let mut tests = ~[]; + for os::list_dir_path(config.src_base).each |file| { let file = file; #debug("inspecting file %s", file); if is_test(config, file) { @@ -148,17 +149,17 @@ fn make_tests(config: config) -> [test::test_desc]/~ { fn is_test(config: config, testfile: str) -> bool { // Pretty-printer does not work with .rc files yet let valid_extensions = - alt config.mode { mode_pretty { [".rs"]/~ } _ { [".rc", ".rs"]/~ } }; - let invalid_prefixes = [".", "#", "~"]/~; + alt config.mode { mode_pretty { ~[".rs"] } _ { ~[".rc", ".rs"] } }; + let invalid_prefixes = ~[".", "#", "~"]; let name = path::basename(testfile); let mut valid = false; - for valid_extensions.each {|ext| + for valid_extensions.each |ext| { if str::ends_with(name, ext) { valid = true; } } - for invalid_prefixes.each {|pre| + for invalid_prefixes.each |pre| { if str::starts_with(name, pre) { valid = false; } } @@ -180,9 +181,7 @@ fn make_test_name(config: config, testfile: str) -> str { } fn make_test_closure(config: config, testfile: str) -> test::test_fn { - ret {|| - runtest::run(config, copy testfile); - }; + fn~() { runtest::run(config, copy testfile) } } // Local Variables: diff --git a/src/compiletest/errors.rs b/src/compiletest/errors.rs index 83ce87b61e7..3ccedc60921 100644 --- a/src/compiletest/errors.rs +++ b/src/compiletest/errors.rs @@ -8,8 +8,8 @@ export expected_error; type expected_error = { line: uint, kind: str, msg: str }; // Load any test directives embedded in the file -fn load_errors(testfile: str) -> [expected_error]/~ { - let mut error_patterns = []/~; +fn load_errors(testfile: str) -> ~[expected_error] { + let mut error_patterns = ~[]; let rdr = result::get(io::file_reader(testfile)); let mut line_num = 1u; while !rdr.eof() { @@ -20,11 +20,11 @@ fn load_errors(testfile: str) -> [expected_error]/~ { ret error_patterns; } -fn parse_expected(line_num: uint, line: str) -> [expected_error]/~ unsafe { +fn parse_expected(line_num: uint, line: str) -> ~[expected_error] unsafe { let error_tag = "//~"; let mut idx; alt str::find_str(line, error_tag) { - option::none { ret []/~; } + option::none { ret ~[]; } option::some(nn) { idx = (nn as uint) + str::len(error_tag); } } @@ -49,5 +49,5 @@ fn parse_expected(line_num: uint, line: str) -> [expected_error]/~ unsafe { #debug("line=%u kind=%s msg=%s", line_num - adjust_line, kind, msg); - ret [{line: line_num - adjust_line, kind: kind, msg: msg}]/~; + ret ~[{line: line_num - adjust_line, kind: kind, msg: msg}]; } diff --git a/src/compiletest/header.rs b/src/compiletest/header.rs index 7d286211acb..53f3876d9f6 100644 --- a/src/compiletest/header.rs +++ b/src/compiletest/header.rs @@ -10,26 +10,26 @@ export is_test_ignored; type test_props = { // Lines that should be expected, in order, on standard out - error_patterns: [str]/~, + error_patterns: ~[str], // Extra flags to pass to the compiler compile_flags: option<str>, // If present, the name of a file that this test should match when // pretty-printed pp_exact: option<str>, // Modules from aux directory that should be compiled - aux_builds: [str]/~, + aux_builds: ~[str], // Environment settings to use during execution - exec_env: [(str,str)]/~ + exec_env: ~[(str,str)] }; // Load any test directives embedded in the file fn load_props(testfile: str) -> test_props { - let mut error_patterns = []/~; - let mut aux_builds = []/~; - let mut exec_env = []/~; + let mut error_patterns = ~[]; + let mut aux_builds = ~[]; + let mut exec_env = ~[]; let mut compile_flags = option::none; let mut pp_exact = option::none; - for iter_header(testfile) {|ln| + for iter_header(testfile) |ln| { alt parse_error_pattern(ln) { option::some(ep) { vec::push(error_patterns, ep) } option::none { } @@ -43,11 +43,11 @@ fn load_props(testfile: str) -> test_props { pp_exact = parse_pp_exact(ln, testfile); } - option::iter(parse_aux_build(ln)) {|ab| + do option::iter(parse_aux_build(ln)) |ab| { vec::push(aux_builds, ab); } - option::iter(parse_exec_env(ln)) {|ee| + do option::iter(parse_exec_env(ln)) |ee| { vec::push(exec_env, ee); } }; @@ -62,7 +62,7 @@ fn load_props(testfile: str) -> test_props { fn is_test_ignored(config: config, testfile: str) -> bool { let mut found = false; - for iter_header(testfile) {|ln| + for iter_header(testfile) |ln| { if parse_name_directive(ln, "xfail-test") { ret true; } if parse_name_directive(ln, xfail_target()) { ret true; } if config.mode == common::mode_pretty && @@ -104,7 +104,7 @@ fn parse_compile_flags(line: str) -> option<str> { } fn parse_exec_env(line: str) -> option<(str, str)> { - parse_name_value_directive(line, "exec-env").map {|nv| + do parse_name_value_directive(line, "exec-env").map |nv| { // nv is either FOO or FOO=BAR let strs = str::splitn_char(nv, '=', 1u); alt strs.len() { diff --git a/src/compiletest/procsrv.rs b/src/compiletest/procsrv.rs index 374ad1cda08..d2e8b019bfd 100644 --- a/src/compiletest/procsrv.rs +++ b/src/compiletest/procsrv.rs @@ -5,7 +5,7 @@ import libc::{c_int, pid_t}; export run; #[cfg(target_os = "win32")] -fn target_env(lib_path: str, prog: str) -> [(str,str)]/~ { +fn target_env(lib_path: str, prog: str) -> ~[(str,str)] { let mut env = os::env(); @@ -13,7 +13,7 @@ fn target_env(lib_path: str, prog: str) -> [(str,str)]/~ { assert prog.ends_with(".exe"); let aux_path = prog.slice(0u, prog.len() - 4u) + ".libaux"; - env = vec::map(env) {|pair| + env = do vec::map(env) |pair| { let (k,v) = pair; if k == "PATH" { ("PATH", v + ";" + lib_path + ";" + aux_path) } else { (k,v) } @@ -27,16 +27,16 @@ fn target_env(lib_path: str, prog: str) -> [(str,str)]/~ { #[cfg(target_os = "linux")] #[cfg(target_os = "macos")] #[cfg(target_os = "freebsd")] -fn target_env(_lib_path: str, _prog: str) -> [(str,str)]/~ { - []/~ +fn target_env(_lib_path: str, _prog: str) -> ~[(str,str)] { + ~[] } // FIXME (#2659): This code is duplicated in core::run::program_output fn run(lib_path: str, prog: str, - args: [str]/~, - env: [(str, str)]/~, + args: ~[str], + env: ~[(str, str)], input: option<str>) -> {status: int, out: str, err: str} { let pipe_in = os::pipe(); @@ -60,14 +60,14 @@ fn run(lib_path: str, writeclose(pipe_in.out, input); let p = comm::port(); let ch = comm::chan(p); - task::spawn_sched(task::single_threaded) {|| + do task::spawn_sched(task::single_threaded) || { let errput = readclose(pipe_err.in); comm::send(ch, (2, errput)); - }; - task::spawn_sched(task::single_threaded) {|| + } + do task::spawn_sched(task::single_threaded) || { let output = readclose(pipe_out.in); comm::send(ch, (1, output)); - }; + } let status = run::waitpid(pid); let mut errs = ""; let mut outs = ""; diff --git a/src/compiletest/runtest.rs b/src/compiletest/runtest.rs index 7d33843c0d3..9c131f18564 100644 --- a/src/compiletest/runtest.rs +++ b/src/compiletest/runtest.rs @@ -92,7 +92,7 @@ fn run_pretty_test(config: config, props: test_props, testfile: str) { let rounds = alt props.pp_exact { option::some(_) { 1 } option::none { 2 } }; - let mut srcs = [result::get(io::read_whole_file_str(testfile))]/~; + let mut srcs = ~[result::get(io::read_whole_file_str(testfile))]; let mut round = 0; while round < rounds { @@ -139,12 +139,12 @@ fn run_pretty_test(config: config, props: test_props, testfile: str) { fn print_source(config: config, testfile: str, src: str) -> procres { compose_and_run(config, testfile, make_pp_args(config, testfile), - []/~, config.compile_lib_path, option::some(src)) + ~[], config.compile_lib_path, option::some(src)) } fn make_pp_args(config: config, _testfile: str) -> procargs { let prog = config.rustc_path; - let args = ["-", "--pretty", "normal"]/~; + let args = ~["-", "--pretty", "normal"]; ret {prog: prog, args: args}; } @@ -178,8 +178,8 @@ actual:\n\ fn make_typecheck_args(config: config, testfile: str) -> procargs { let prog = config.rustc_path; - let mut args = ["-", "--no-trans", "--lib", "-L", config.build_base, - "-L", aux_output_dir_name(config, testfile)]/~; + let mut args = ~["-", "--no-trans", "--lib", "-L", config.build_base, + "-L", aux_output_dir_name(config, testfile)]; args += split_maybe_args(config.rustcflags); ret {prog: prog, args: args}; } @@ -199,7 +199,7 @@ fn check_error_patterns(props: test_props, let mut next_err_idx = 0u; let mut next_err_pat = props.error_patterns[next_err_idx]; let mut done = false; - for str::split_char(procres.stderr, '\n').each {|line| + for str::split_char(procres.stderr, '\n').each |line| { if str::contains(line, next_err_pat) { #debug("found error pattern %s", next_err_pat); next_err_idx += 1u; @@ -220,14 +220,14 @@ fn check_error_patterns(props: test_props, fatal_procres(#fmt["error pattern '%s' not found!", missing_patterns[0]], procres); } else { - for missing_patterns.each {|pattern| + for missing_patterns.each |pattern| { error(#fmt["error pattern '%s' not found!", pattern]); } fatal_procres("multiple error patterns not found", procres); } } -fn check_expected_errors(expected_errors: [errors::expected_error]/~, +fn check_expected_errors(expected_errors: ~[errors::expected_error], testfile: str, procres: procres) { @@ -239,7 +239,7 @@ fn check_expected_errors(expected_errors: [errors::expected_error]/~, fatal("process did not return an error status"); } - let prefixes = vec::map(expected_errors, {|ee| + let prefixes = vec::map(expected_errors, |ee| { #fmt("%s:%u:", testfile, ee.line) }); @@ -249,9 +249,9 @@ fn check_expected_errors(expected_errors: [errors::expected_error]/~, // filename:line1:col1: line2:col2: *warning:* msg // where line1:col1: is the starting point, line2:col2: // is the ending point, and * represents ANSI color codes. - for str::split_char(procres.stderr, '\n').each {|line| + for str::split_char(procres.stderr, '\n').each |line| { let mut was_expected = false; - for vec::eachi(expected_errors) {|i, ee| + for vec::eachi(expected_errors) |i, ee| { if !found_flags[i] { #debug["prefix=%s ee.kind=%s ee.msg=%s line=%s", prefixes[i], ee.kind, ee.msg, line]; @@ -277,7 +277,7 @@ fn check_expected_errors(expected_errors: [errors::expected_error]/~, } } - for uint::range(0u, vec::len(found_flags)) {|i| + for uint::range(0u, vec::len(found_flags)) |i| { if !found_flags[i] { let ee = expected_errors[i]; fatal_procres(#fmt["expected %s on line %u not found: %s", @@ -286,13 +286,13 @@ fn check_expected_errors(expected_errors: [errors::expected_error]/~, } } -type procargs = {prog: str, args: [str]/~}; +type procargs = {prog: str, args: ~[str]}; type procres = {status: int, stdout: str, stderr: str, cmdline: str}; fn compile_test(config: config, props: test_props, testfile: str) -> procres { - let link_args = ["-L", aux_output_dir_name(config, testfile)]/~; + let link_args = ~["-L", aux_output_dir_name(config, testfile)]; compose_and_run_compiler( config, props, testfile, make_compile_args(config, props, link_args, @@ -319,14 +319,14 @@ fn compose_and_run_compiler( ensure_dir(aux_output_dir_name(config, testfile)); } - let extra_link_args = ["-L", aux_output_dir_name(config, testfile)]/~; + let extra_link_args = ~["-L", aux_output_dir_name(config, testfile)]; - vec::iter(props.aux_builds) {|rel_ab| + do vec::iter(props.aux_builds) |rel_ab| { let abs_ab = path::connect(config.aux_base, rel_ab); let aux_args = - make_compile_args(config, props, ["--lib"]/~ + extra_link_args, - {|a,b|make_lib_name(a, b, testfile)}, abs_ab); - let auxres = compose_and_run(config, abs_ab, aux_args, []/~, + make_compile_args(config, props, ~["--lib"] + extra_link_args, + |a,b| make_lib_name(a, b, testfile), abs_ab); + let auxres = compose_and_run(config, abs_ab, aux_args, ~[], config.compile_lib_path, option::none); if auxres.status != 0 { fatal_procres( @@ -335,7 +335,7 @@ fn compose_and_run_compiler( } } - compose_and_run(config, testfile, args, []/~, + compose_and_run(config, testfile, args, ~[], config.compile_lib_path, input) } @@ -348,19 +348,19 @@ fn ensure_dir(path: path) { fn compose_and_run(config: config, testfile: str, procargs: procargs, - procenv: [(str, str)]/~, + procenv: ~[(str, str)], lib_path: str, input: option<str>) -> procres { ret program_output(config, testfile, lib_path, procargs.prog, procargs.args, procenv, input); } -fn make_compile_args(config: config, props: test_props, extras: [str]/~, +fn make_compile_args(config: config, props: test_props, extras: ~[str], xform: fn(config, str) -> str, testfile: str) -> procargs { let prog = config.rustc_path; - let mut args = [testfile, "-o", xform(config, testfile), - "-L", config.build_base]/~ + extras; + let mut args = ~[testfile, "-o", xform(config, testfile), + "-L", config.build_base] + extras; args += split_maybe_args(config.rustcflags); args += split_maybe_args(props.compile_flags); ret {prog: prog, args: args}; @@ -390,12 +390,12 @@ fn make_run_args(config: config, _props: test_props, testfile: str) -> split_maybe_args(runtool) }; - let args = toolargs + [make_exe_name(config, testfile)]/~; + let args = toolargs + ~[make_exe_name(config, testfile)]; ret {prog: args[0], args: vec::slice(args, 1u, vec::len(args))}; } -fn split_maybe_args(argstr: option<str>) -> [str]/~ { - fn rm_whitespace(v: [str]/~) -> [str]/~ { +fn split_maybe_args(argstr: option<str>) -> ~[str] { + fn rm_whitespace(v: ~[str]) -> ~[str] { fn flt(&&s: str) -> option<str> { if !str::is_whitespace(s) { option::some(s) } else { option::none } } @@ -404,12 +404,12 @@ fn split_maybe_args(argstr: option<str>) -> [str]/~ { alt argstr { option::some(s) { rm_whitespace(str::split_char(s, ' ')) } - option::none { []/~ } + option::none { ~[] } } } fn program_output(config: config, testfile: str, lib_path: str, prog: str, - args: [str]/~, env: [(str, str)]/~, + args: ~[str], env: ~[(str, str)], input: option<str>) -> procres { let cmdline = { @@ -429,12 +429,12 @@ fn program_output(config: config, testfile: str, lib_path: str, prog: str, #[cfg(target_os = "linux")] #[cfg(target_os = "macos")] #[cfg(target_os = "freebsd")] -fn make_cmdline(_libpath: str, prog: str, args: [str]/~) -> str { +fn make_cmdline(_libpath: str, prog: str, args: ~[str]) -> str { #fmt["%s %s", prog, str::connect(args, " ")] } #[cfg(target_os = "win32")] -fn make_cmdline(libpath: str, prog: str, args: [str]/~) -> str { +fn make_cmdline(libpath: str, prog: str, args: ~[str]) -> str { #fmt["%s %s %s", lib_path_cmd_prefix(libpath), prog, str::connect(args, " ")] } @@ -454,7 +454,7 @@ fn dump_output(config: config, testfile: str, out: str, err: str) { fn dump_output_file(config: config, testfile: str, out: str, extension: str) { let outfile = make_out_name(config, testfile, extension); let writer = result::get( - io::file_writer(outfile, [io::create, io::truncate]/~)); + io::file_writer(outfile, ~[io::create, io::truncate])); writer.write_str(out); } diff --git a/src/etc/combine-tests.py b/src/etc/combine-tests.py index 6810e6fec4d..64d09b1266c 100755 --- a/src/etc/combine-tests.py +++ b/src/etc/combine-tests.py @@ -34,6 +34,8 @@ for t in os.listdir(run_pass): take_args[t] = True if "main(args: [str])" in s: take_args[t] = True + if "main(args: ~[str])" in s: + take_args[t] = True f.close() stage2_tests.sort() diff --git a/src/etc/emacs/rust-mode.el b/src/etc/emacs/rust-mode.el index e8240ae7faf..86e5f867cba 100644 --- a/src/etc/emacs/rust-mode.el +++ b/src/etc/emacs/rust-mode.el @@ -59,7 +59,7 @@ (dolist (word '("assert" "break" "check" "claim" "cont" "copy" - "drop" + "do" "drop" "else" "export" "extern" "fail" "for" "if" "import" diff --git a/src/etc/vim/ftdetect/rust.vim b/src/etc/vim/ftdetect/rust.vim index b2fb35911a9..10b616277c8 100644 --- a/src/etc/vim/ftdetect/rust.vim +++ b/src/etc/vim/ftdetect/rust.vim @@ -1 +1 @@ -au BufRead,BufNewFile *.rs set filetype=rust +au BufRead,BufNewFile *.rs,*.rc set filetype=rust diff --git a/src/etc/vim/syntax/rust.vim b/src/etc/vim/syntax/rust.vim index 47c69463d60..be20020549c 100644 --- a/src/etc/vim/syntax/rust.vim +++ b/src/etc/vim/syntax/rust.vim @@ -3,21 +3,17 @@ " Maintainer: Patrick Walton <pcwalton@mozilla.com> " Last Change: 2010 Oct 13 -" Quit when a syntax file was already loaded -if !exists("main_syntax") - if version < 600 - syntax clear - elseif exists("b:current_syntax") - finish - endif - " we define it here so that included files can test for it - let main_syntax='rust' +if version < 600 + syntax clear +elseif exists("b:current_syntax") + finish endif syn keyword rustAssert assert +syn match rustAssert "assert\(\w\)*" syn keyword rustKeyword alt as break syn keyword rustKeyword check claim cont const copy else export extern fail -syn keyword rustKeyword for if impl import in let log +syn keyword rustKeyword do for if impl import in let log syn keyword rustKeyword loop mod mut of pure syn keyword rustKeyword ret self to unchecked syn match rustKeyword "unsafe" " Allows also matching unsafe::foo() @@ -38,15 +34,15 @@ syn keyword rustType f64 i8 i16 i32 i64 str syn keyword rustBoolean true false syn keyword rustConstant some none " option -" syn keyword rustConstant left right " either -" syn keyword rustConstant ok err " result -" syn keyword rustConstant success failure " task +syn keyword rustConstant left right " either +syn keyword rustConstant ok err " result +syn keyword rustConstant success failure " task " syn keyword rustConstant cons nil " list " syn keyword rustConstant empty node " tree " If foo::bar changes to foo.bar, change this ("::" to "\."). " If foo::bar changes to Foo::bar, change this (first "\w" to "\u"). -syn match rustModPath "\w\(\w\)*::"he=e-2,me=e-2 +syn match rustModPath "\w\(\w\)*::[^<]"he=e-3,me=e-3 syn match rustModPathSep "::" syn region rustString start=+L\="+ skip=+\\\\\|\\"+ end=+"+ diff --git a/src/fuzzer/ast_match.rs b/src/fuzzer/ast_match.rs index 3eee008d376..2b995501769 100644 --- a/src/fuzzer/ast_match.rs +++ b/src/fuzzer/ast_match.rs @@ -1,7 +1,7 @@ use std; import vec; -fn vec_equal<T>(v: [T]/~, u: [T]/~, +fn vec_equal<T>(v: ~[T], u: ~[T], element_equality_test: fn@(&&T, &&T) -> bool) -> bool { let Lv = vec::len(v); @@ -20,11 +20,11 @@ pure fn builtin_equal_int(&&a: int, &&b: int) -> bool { ret a == b; } fn main() { assert (builtin_equal(5, 5)); assert (!builtin_equal(5, 4)); - assert (!vec_equal([5, 5]/~, [5]/~, bind builtin_equal(_, _))); - assert (!vec_equal([5, 5]/~, [5]/~, builtin_equal_int)); - assert (!vec_equal([5, 5]/~, [5, 4]/~, builtin_equal_int)); - assert (!vec_equal([5, 5]/~, [4, 5]/~, builtin_equal_int)); - assert (vec_equal([5, 5]/~, [5, 5]/~, builtin_equal_int)); + assert (!vec_equal(~[5, 5], ~[5], bind builtin_equal(_, _))); + assert (!vec_equal(~[5, 5], ~[5], builtin_equal_int)); + assert (!vec_equal(~[5, 5], ~[5, 4], builtin_equal_int)); + assert (!vec_equal(~[5, 5], ~[4, 5], builtin_equal_int)); + assert (vec_equal(~[5, 5], ~[5, 5], builtin_equal_int)); #error("Pass"); } diff --git a/src/fuzzer/cycles.rs b/src/fuzzer/cycles.rs index 6a456d39d70..9ccef3227fc 100644 --- a/src/fuzzer/cycles.rs +++ b/src/fuzzer/cycles.rs @@ -8,7 +8,7 @@ fn under(r : rand::rng, n : uint) -> uint { } // random choice from a vec -fn choice<T: copy>(r : rand::rng, v : [const T]/~) -> T { +fn choice<T: copy>(r : rand::rng, v : ~[const T]) -> T { assert vec::len(v) != 0u; v[under(r, vec::len(v))] } @@ -32,8 +32,8 @@ type pointy = { mut f : fn@()->(), mut g : fn~()->(), - mut m : [maybe_pointy]/~, - mut n : [mut maybe_pointy]/~, + mut m : ~[maybe_pointy], + mut n : ~[mut maybe_pointy], mut o : {x : int, y : maybe_pointy} }; // To add: objects; ifaces; anything type-parameterized? @@ -47,8 +47,8 @@ fn empty_pointy() -> @pointy { mut f : fn@()->(){}, mut g : fn~()->(){}, - mut m : []/~, - mut n : [mut]/~, + mut m : ~[], + mut n : ~[mut], mut o : {x : 0, y : none} } } @@ -58,7 +58,7 @@ fn nop<T>(_x: T) { } fn test_cycles(r : rand::rng, k: uint, n: uint) { - let v : [mut @pointy]/~ = [mut]/~; + let v : ~[mut @pointy] = ~[mut]; // Create a graph with no edges range(0u, vlen) {|_i| diff --git a/src/fuzzer/fuzzer.rs b/src/fuzzer/fuzzer.rs index 72f7ac493b8..d052e0739de 100644 --- a/src/fuzzer/fuzzer.rs +++ b/src/fuzzer/fuzzer.rs @@ -10,7 +10,7 @@ type context = { mode: test_mode }; // + rng fn write_file(filename: str, content: str) { result::get( - io::file_writer(filename, [io::create, io::truncate]/~)) + io::file_writer(filename, ~[io::create, io::truncate])) .write_str(content); } @@ -18,21 +18,21 @@ fn contains(haystack: str, needle: str) -> bool { str::contains(haystack, needle) } -fn find_rust_files(&files: [str]/~, path: str) { +fn find_rust_files(&files: ~[str], path: str) { if str::ends_with(path, ".rs") && !contains(path, "utf8") { // ignoring "utf8" tests because something is broken - files += [path]/~; + files += ~[path]; } else if os::path_is_dir(path) && !contains(path, "compile-fail") && !contains(path, "build") { - for os::list_dir_path(path).each {|p| + for os::list_dir_path(path).each |p| { find_rust_files(files, p); } } } -fn common_exprs() -> [ast::expr]/~ { +fn common_exprs() -> ~[ast::expr] { fn dse(e: ast::expr_) -> ast::expr { { id: 0, node: e, span: ast_util::dummy_sp() } } @@ -41,7 +41,7 @@ fn common_exprs() -> [ast::expr]/~ { { node: l, span: ast_util::dummy_sp() } } - [dse(ast::expr_break), + ~[dse(ast::expr_break), dse(ast::expr_cont), dse(ast::expr_fail(option::none)), dse(ast::expr_fail(option::some( @@ -54,7 +54,7 @@ fn common_exprs() -> [ast::expr]/~ { @dse(ast::expr_lit(@dsl(ast::lit_bool(true)))))), dse(ast::expr_unary(ast::uniq(ast::m_imm), @dse(ast::expr_lit(@dsl(ast::lit_bool(true)))))) - ]/~ + ] } pure fn safe_to_steal_expr(e: @ast::expr, tm: test_mode) -> bool { @@ -116,16 +116,16 @@ fn safe_to_steal_ty(t: @ast::ty, tm: test_mode) -> bool { // Not type-parameterized: https://github.com/mozilla/rust/issues/898 (FIXED) fn stash_expr_if(c: fn@(@ast::expr, test_mode)->bool, - es: @mut [ast::expr]/~, + es: @mut ~[ast::expr], e: @ast::expr, tm: test_mode) { if c(e, tm) { - *es += [*e]/~; + *es += ~[*e]; } else {/* now my indices are wrong :( */ } } fn stash_ty_if(c: fn@(@ast::ty, test_mode)->bool, - es: @mut [ast::ty]/~, + es: @mut ~[ast::ty], e: @ast::ty, tm: test_mode) { if c(e, tm) { @@ -133,14 +133,14 @@ fn stash_ty_if(c: fn@(@ast::ty, test_mode)->bool, } else {/* now my indices are wrong :( */ } } -type stolen_stuff = {exprs: [ast::expr]/~, tys: [ast::ty]/~}; +type stolen_stuff = {exprs: ~[ast::expr], tys: ~[ast::ty]}; fn steal(crate: ast::crate, tm: test_mode) -> stolen_stuff { - let exprs = @mut []/~; - let tys = @mut []/~; + let exprs = @mut ~[]; + let tys = @mut ~[]; let v = visit::mk_simple_visitor(@{ - visit_expr: {|a|stash_expr_if(safe_to_steal_expr, exprs, a, tm)}, - visit_ty: {|a|stash_ty_if(safe_to_steal_ty, tys, a, tm)} + visit_expr: |a| stash_expr_if(safe_to_steal_expr, exprs, a, tm), + visit_ty: |a| stash_ty_if(safe_to_steal_ty, tys, a, tm) with *visit::default_simple_visitor() }); visit::visit_crate(crate, (), v); @@ -187,10 +187,12 @@ fn replace_expr_in_crate(crate: ast::crate, i: uint, fold::noop_fold_expr(original, fld) } } - let afp = - @{fold_expr: fold::wrap({|a,b| - fold_expr_rep(j, i, newexpr.node, a, b, tm)}) - with *fold::default_ast_fold()}; + let afp = @{ + fold_expr: fold::wrap(|a,b| { + fold_expr_rep(j, i, newexpr.node, a, b, tm) + }) + with *fold::default_ast_fold() + }; let af = fold::make_fold(afp); let crate2: @ast::crate = @af.fold_crate(crate); *crate2 @@ -210,9 +212,10 @@ fn replace_ty_in_crate(crate: ast::crate, i: uint, newty: ast::ty, newty_ } else { fold::noop_fold_ty(original, fld) } } - let afp = - @{fold_ty: fold::wrap({|a,b|fold_ty_rep(j, i, newty.node, a, b, tm)}) - with *fold::default_ast_fold()}; + let afp = @{ + fold_ty: fold::wrap(|a,b| fold_ty_rep(j, i, newty.node, a, b, tm) ) + with *fold::default_ast_fold() + }; let af = fold::make_fold(afp); let crate2: @ast::crate = @af.fold_crate(crate); *crate2 @@ -235,7 +238,7 @@ fn check_variants_of_ast(crate: ast::crate, codemap: codemap::codemap, filename: str, cx: context) { let stolen = steal(crate, cx.mode); let extra_exprs = vec::filter(common_exprs(), - {|a|safe_to_use_expr(a, cx.mode)}); + |a| safe_to_use_expr(a, cx.mode) ); check_variants_T(crate, codemap, filename, "expr", extra_exprs + stolen.exprs, pprust::expr_to_str, replace_expr_in_crate, cx); @@ -248,7 +251,7 @@ fn check_variants_T<T: copy>( codemap: codemap::codemap, filename: str, thing_label: str, - things: [T]/~, + things: ~[T], stringifier: fn@(@T) -> str, replacer: fn@(ast::crate, uint, T, test_mode) -> ast::crate, cx: context @@ -259,23 +262,23 @@ fn check_variants_T<T: copy>( let L = vec::len(things); if L < 100u { - under(uint::min(L, 20u)) {|i| + do under(uint::min(L, 20u)) |i| { log(error, "Replacing... #" + uint::str(i)); - under(uint::min(L, 30u)) {|j| + do under(uint::min(L, 30u)) |j| { log(error, "With... " + stringifier(@things[j])); let crate2 = @replacer(crate, i, things[j], cx.mode); // It would be best to test the *crate* for stability, but // testing the string for stability is easier and ok for now. let handler = diagnostic::mk_handler(none); let str3 = - @as_str({|a|pprust::print_crate( + @as_str(|a|pprust::print_crate( codemap, diagnostic::mk_span_handler(handler, codemap), crate2, filename, io::str_reader(""), a, pprust::no_ann(), - false)}); + false)); alt cx.mode { tm_converge { check_roundtrip_convergence(str3, 1u); @@ -334,19 +337,19 @@ fn check_whole_compiler(code: str, suggested_filename_prefix: str, fn removeIfExists(filename: str) { // So sketchy! assert !contains(filename, " "); - run::program_output("bash", ["-c", "rm " + filename]/~); + run::program_output("bash", ~["-c", "rm " + filename]); } fn removeDirIfExists(filename: str) { // So sketchy! assert !contains(filename, " "); - run::program_output("bash", ["-c", "rm -r " + filename]/~); + run::program_output("bash", ~["-c", "rm -r " + filename]); } fn check_running(exe_filename: str) -> happiness { let p = run::program_output( "/Users/jruderman/scripts/timed_run_rust_program.py", - [exe_filename]/~); + ~[exe_filename]); let comb = p.out + "\n" + p.err; if str::len(comb) > 1u { log(error, "comb comb comb: " + comb); @@ -386,7 +389,7 @@ fn check_compiling(filename: str) -> happiness { let p = run::program_output( "/Users/jruderman/code/rust/build/x86_64-apple-darwin/\ stage1/bin/rustc", - [filename]/~); + ~[filename]); //#error("Status: %d", p.status); if p.status == 0 { @@ -420,16 +423,16 @@ fn parse_and_print(code: @str) -> str { let sess = parse::new_parse_sess(option::none); write_file(filename, *code); let crate = parse::parse_crate_from_source_str( - filename, code, []/~, sess); - io::with_str_reader(*code) { |rdr| - as_str({|a|pprust::print_crate(sess.cm, + filename, code, ~[], sess); + io::with_str_reader(*code, |rdr| { + as_str(|a| pprust::print_crate(sess.cm, sess.span_diagnostic, crate, filename, rdr, a, pprust::no_ann(), - false)}) - } + false) ) + }) } fn has_raw_pointers(c: ast::crate) -> bool { @@ -441,7 +444,7 @@ fn has_raw_pointers(c: ast::crate) -> bool { } } let v = - visit::mk_simple_visitor(@{visit_ty: {|a|visit_ty(has_rp, a)} + visit::mk_simple_visitor(@{visit_ty: |a| visit_ty(has_rp, a) with *visit::default_simple_visitor()}); visit::visit_crate(c, (), v); ret *has_rp; @@ -449,27 +452,27 @@ fn has_raw_pointers(c: ast::crate) -> bool { fn content_is_dangerous_to_run(code: str) -> bool { let dangerous_patterns = - ["xfail-test", + ~["xfail-test", "import", // espeically fs, run "native", "unsafe", - "log"]/~; // python --> rust pipe deadlock? + "log"]; // python --> rust pipe deadlock? - for dangerous_patterns.each {|p| if contains(code, p) { ret true; } } + for dangerous_patterns.each |p| { if contains(code, p) { ret true; } } ret false; } fn content_is_dangerous_to_compile(code: str) -> bool { let dangerous_patterns = - ["xfail-test"]/~; + ~["xfail-test"]; - for dangerous_patterns.each {|p| if contains(code, p) { ret true; } } + for dangerous_patterns.each |p| { if contains(code, p) { ret true; } } ret false; } fn content_might_not_converge(code: str) -> bool { let confusing_patterns = - ["xfail-test", + ~["xfail-test", "xfail-pretty", "self", // crazy rules enforced by parser not typechecker? "spawn", // precedence issues? @@ -477,23 +480,23 @@ fn content_might_not_converge(code: str) -> bool { " be ", // don't want to replace its child with a non-call: // "Non-call expression in tail call" "\n\n\n\n\n" // https://github.com/mozilla/rust/issues/850 - ]/~; + ]; - for confusing_patterns.each {|p| if contains(code, p) { ret true; } } + for confusing_patterns.each |p| { if contains(code, p) { ret true; } } ret false; } fn file_might_not_converge(filename: str) -> bool { - let confusing_files = [ + let confusing_files = ~[ "expr-alt.rs", // pretty-printing "(a = b) = c" // vs "a = b = c" and wrapping "block-arg-in-ternary.rs", // wrapping "move-3-unique.rs", // 0 becomes (0), but both seem reasonable. wtf? "move-3.rs" // 0 becomes (0), but both seem reasonable. wtf? - ]/~; + ]; - for confusing_files.each {|f| if contains(filename, f) { ret true; } } + for confusing_files.each |f| { if contains(filename, f) { ret true; } } ret false; } @@ -519,15 +522,15 @@ fn check_roundtrip_convergence(code: @str, maxIters: uint) { write_file("round-trip-a.rs", *oldv); write_file("round-trip-b.rs", *newv); run::run_program("diff", - ["-w", "-u", "round-trip-a.rs", - "round-trip-b.rs"]/~); + ~["-w", "-u", "round-trip-a.rs", + "round-trip-b.rs"]); fail "Mismatch"; } } -fn check_convergence(files: [str]/~) { +fn check_convergence(files: ~[str]) { #error("pp convergence tests: %u files", vec::len(files)); - for files.each {|file| + for files.each |file| { if !file_might_not_converge(file) { let s = @result::get(io::read_whole_file_str(file)); if !content_might_not_converge(*s) { @@ -540,8 +543,8 @@ fn check_convergence(files: [str]/~) { } } -fn check_variants(files: [str]/~, cx: context) { - for files.each {|file| +fn check_variants(files: ~[str], cx: context) { + for files.each |file| { if cx.mode == tm_converge && file_might_not_converge(file) { #error("Skipping convergence test based on\ file_might_not_converge"); @@ -564,27 +567,27 @@ fn check_variants(files: [str]/~, cx: context) { let crate = parse::parse_crate_from_source_str( file, - s, []/~, sess); - io::with_str_reader(*s) { |rdr| + s, ~[], sess); + io::with_str_reader(*s, |rdr| { #error("%s", - as_str({|a|pprust::print_crate(sess.cm, + as_str(|a| pprust::print_crate(sess.cm, sess.span_diagnostic, crate, file, rdr, a, pprust::no_ann(), - false)})); - } + false) )) + }); check_variants_of_ast(*crate, sess.cm, file, cx); } } -fn main(args: [str]/~) { +fn main(args: ~[str]) { if vec::len(args) != 2u { #error("usage: %s <testdir>", args[0]); ret; } - let mut files = []/~; + let mut files = ~[]; let root = args[1]; find_rust_files(files, root); diff --git a/src/fuzzer/ivec_fuzz.rs b/src/fuzzer/ivec_fuzz.rs index 0f5e95a7d73..002654b6f6e 100644 --- a/src/fuzzer/ivec_fuzz.rs +++ b/src/fuzzer/ivec_fuzz.rs @@ -8,8 +8,8 @@ Idea: provide functions for 'exhaustive' and 'random' modification of vecs. It would be nice if this could be data-driven, so the two functions could share information: - type vec_modifier = rec(fn (<T> v, uint i) -> [T]/~ fun, uint lo, uint di); - const [vec_modifier]/~ vec_modifiers = ~[rec(fun=vec_omit, 0u, 1u), ...]/~; + type vec_modifier = rec(fn (<T> v, uint i) -> ~[T] fun, uint lo, uint di); + const ~[vec_modifier] vec_modifiers = ~[rec(fun=vec_omit, 0u, 1u), ...]/~; But that gives me "error: internal compiler error unimplemented consts that's not a plain literal". https://github.com/graydon/rust/issues/570 @@ -24,23 +24,23 @@ import vec::slice; import vec::len; import int; -fn vec_omit<T: copy>(v: [T]/~, i: uint) -> [T]/~ { +fn vec_omit<T: copy>(v: ~[T], i: uint) -> ~[T] { slice(v, 0u, i) + slice(v, i + 1u, len(v)) } -fn vec_dup<T: copy>(v: [T]/~, i: uint) -> [T]/~ { +fn vec_dup<T: copy>(v: ~[T], i: uint) -> ~[T] { slice(v, 0u, i) + [v[i]] + slice(v, i, len(v)) } -fn vec_swadj<T: copy>(v: [T]/~, i: uint) -> [T]/~ { +fn vec_swadj<T: copy>(v: ~[T], i: uint) -> ~[T] { slice(v, 0u, i) + [v[i + 1u], v[i]] + slice(v, i + 2u, len(v)) } -fn vec_prefix<T: copy>(v: [T]/~, i: uint) -> [T]/~ { slice(v, 0u, i) } -fn vec_suffix<T: copy>(v: [T]/~, i: uint) -> [T]/~ { slice(v, i, len(v)) } +fn vec_prefix<T: copy>(v: ~[T], i: uint) -> ~[T] { slice(v, 0u, i) } +fn vec_suffix<T: copy>(v: ~[T], i: uint) -> ~[T] { slice(v, i, len(v)) } -fn vec_poke<T: copy>(v: [T]/~, i: uint, x: T) -> [T]/~ { - slice(v, 0u, i) + [x]/~ + slice(v, i + 1u, len(v)) +fn vec_poke<T: copy>(v: ~[T], i: uint, x: T) -> ~[T] { + slice(v, 0u, i) + ~[x] + slice(v, i + 1u, len(v)) } -fn vec_insert<T: copy>(v: [T]/~, i: uint, x: T) -> [T]/~ { - slice(v, 0u, i) + [x]/~ + slice(v, i, len(v)) +fn vec_insert<T: copy>(v: ~[T], i: uint, x: T) -> ~[T] { + slice(v, 0u, i) + ~[x] + slice(v, i, len(v)) } // Iterates over 0...length, skipping the specified number on each side. @@ -51,23 +51,23 @@ fn ix(skip_low: uint, skip_high: uint, length: uint, it: block(uint)) { // Returns a bunch of modified versions of v, some of which introduce // new elements (borrowed from xs). -fn vec_edits<T: copy>(v: [T]/~, xs: [T]/~) -> [[T]/~]/~ { - let edits: [[T]/~]/~ = []/~; +fn vec_edits<T: copy>(v: ~[T], xs: ~[T]) -> ~[~[T]] { + let edits: ~[~[T]] = ~[]; let Lv: uint = len(v); if Lv != 1u { // When Lv == 1u, this is redundant with omit. - vec::push(edits, []/~); + vec::push(edits, ~[]); } if Lv >= 3u { // When Lv == 2u, this is redundant with swap. vec::push(edits, vec::reversed(v)); } - ix(0u, 1u, Lv) {|i| edits += [vec_omit(v, i)]/~; } - ix(0u, 1u, Lv) {|i| edits += [vec_dup(v, i)]/~; } - ix(0u, 2u, Lv) {|i| edits += [vec_swadj(v, i)]/~; } - ix(1u, 2u, Lv) {|i| edits += [vec_prefix(v, i)]/~; } - ix(2u, 1u, Lv) {|i| edits += [vec_suffix(v, i)]/~; } + ix(0u, 1u, Lv) {|i| edits += ~[vec_omit(v, i)]; } + ix(0u, 1u, Lv) {|i| edits += ~[vec_dup(v, i)]; } + ix(0u, 2u, Lv) {|i| edits += ~[vec_swadj(v, i)]; } + ix(1u, 2u, Lv) {|i| edits += ~[vec_prefix(v, i)]; } + ix(2u, 1u, Lv) {|i| edits += ~[vec_suffix(v, i)]; } ix(0u, 1u, len(xs)) {|j| ix(0u, 1u, Lv) {|i| @@ -83,7 +83,7 @@ fn vec_edits<T: copy>(v: [T]/~, xs: [T]/~) -> [[T]/~]/~ { // Would be nice if this were built in: // https://github.com/graydon/rust/issues/424 -fn vec_to_str(v: [int]/~) -> str { +fn vec_to_str(v: ~[int]) -> str { let i = 0u; let s = "["; while i < len(v) { @@ -94,19 +94,19 @@ fn vec_to_str(v: [int]/~) -> str { ret s + "]"; } -fn show_edits(a: [int]/~, xs: [int]/~) { +fn show_edits(a: ~[int], xs: ~[int]) { log(error, "=== Edits of " + vec_to_str(a) + " ==="); let b = vec_edits(a, xs); ix(0u, 1u, len(b)) {|i| log(error, vec_to_str(b[i])); } } fn demo_edits() { - let xs = [7, 8]/~; - show_edits([]/~, xs); - show_edits([1]/~, xs); - show_edits([1, 2]/~, xs); - show_edits([1, 2, 3]/~, xs); - show_edits([1, 2, 3, 4]/~, xs); + let xs = ~[7, 8]; + show_edits(~[], xs); + show_edits(~[1], xs); + show_edits(~[1, 2], xs); + show_edits(~[1, 2, 3], xs); + show_edits(~[1, 2, 3, 4], xs); } fn main() { demo_edits(); } diff --git a/src/fuzzer/rand_util.rs b/src/fuzzer/rand_util.rs index 1ef3d140c22..ab3c4185e0a 100644 --- a/src/fuzzer/rand_util.rs +++ b/src/fuzzer/rand_util.rs @@ -8,7 +8,7 @@ fn under(r : rand::rng, n : uint) -> uint { } // random choice from a vec -fn choice<T: copy>(r : rand::rng, v : [T]/~) -> T { +fn choice<T: copy>(r : rand::rng, v : ~[T]) -> T { assert vec::len(v) != 0u; v[under(r, vec::len(v))] } @@ -16,7 +16,7 @@ fn choice<T: copy>(r : rand::rng, v : [T]/~) -> T { fn unlikely(r : rand::rng, n : uint) -> bool { under(r, n) == 0u } // shuffle a vec in place -fn shuffle<T>(r : rand::rng, &v : [mut T]/~) { +fn shuffle<T>(r : rand::rng, &v : ~[mut T]) { let i = vec::len(v); while i >= 2u { // Loop invariant: elements with index >= i have been locked in place. @@ -26,20 +26,20 @@ fn shuffle<T>(r : rand::rng, &v : [mut T]/~) { } // create a shuffled copy of a vec -fn shuffled<T: copy>(r : rand::rng, v : [T]/~) -> [T]/~ { +fn shuffled<T: copy>(r : rand::rng, v : ~[T]) -> ~[T] { let w = vec::to_mut(v); shuffle(r, w); vec::from_mut(w) // Shouldn't this happen automatically? } // sample from a population without replacement -//fn sample<T>(r : rand::rng, pop : [T]/~, k : uint) -> [T]/~ { fail } +//fn sample<T>(r : rand::rng, pop : ~[T], k : uint) -> ~[T] { fail } // Two ways to make a weighted choice. // * weighted_choice is O(number of choices) time // * weighted_vec is O(total weight) space type weighted<T> = { weight: uint, item: T }; -fn weighted_choice<T: copy>(r : rand::rng, v : [weighted<T>]/~) -> T { +fn weighted_choice<T: copy>(r : rand::rng, v : ~[weighted<T>]) -> T { assert vec::len(v) != 0u; let total = 0u; for {weight: weight, item: _} in v { @@ -57,8 +57,8 @@ fn weighted_choice<T: copy>(r : rand::rng, v : [weighted<T>]/~) -> T { core::unreachable(); } -fn weighted_vec<T: copy>(v : [weighted<T>]/~) -> [T]/~ { - let r = []/~; +fn weighted_vec<T: copy>(v : ~[weighted<T>]) -> ~[T] { + let r = ~[]; for {weight: weight, item: item} in v { let i = 0u; while i < weight { @@ -74,19 +74,19 @@ fn main() let r = rand::mk_rng(); log(error, under(r, 5u)); - log(error, choice(r, [10, 20, 30]/~)); + log(error, choice(r, ~[10, 20, 30])); log(error, if unlikely(r, 5u) { "unlikely" } else { "likely" }); - let a = [mut 1, 2, 3]/~; + let a = ~[mut 1, 2, 3]; shuffle(r, a); log(error, a); let i = 0u; - let v = [ + let v = ~[ {weight:1u, item:"low"}, {weight:8u, item:"middle"}, {weight:1u, item:"high"} - ]/~; + ]; let w = weighted_vec(v); while i < 1000u { diff --git a/src/libcore/arc.rs b/src/libcore/arc.rs index 12ada1b9b18..84f8ca8700e 100644 --- a/src/libcore/arc.rs +++ b/src/libcore/arc.rs @@ -109,9 +109,7 @@ impl methods<T: send> for exclusive<T> { unsafe::reinterpret_cast(self.data); let r = { let rec: &ex_data<T> = &(*ptr).data; - rec.lock.lock_cond() {|c| - f(c, &rec.data) - } + rec.lock.lock_cond(|c| f(c, &rec.data)) }; unsafe::forget(ptr); r @@ -135,7 +133,7 @@ fn shared_arc<T: send const>(-data: T) -> shared_arc<T> { let a = arc::arc(data); let p = port(); let c = chan(p); - task::spawn() {|move a| + do task::spawn() |move a| { let mut live = true; let terminate = port(); let get = port(); @@ -168,19 +166,19 @@ mod tests { #[test] fn manually_share_arc() { - let v = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]/~; + let v = ~[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; let arc_v = arc::arc(v); let p = port(); let c = chan(p); - task::spawn() {|| + do task::spawn() || { let p = port(); c.send(chan(p)); let arc_v = p.recv(); - let v = *arc::get::<[int]/~>(&arc_v); + let v = *arc::get::<~[int]>(&arc_v); assert v[3] == 4; }; @@ -194,13 +192,13 @@ mod tests { #[test] fn auto_share_arc() { - let v = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]/~; + let v = ~[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; let (_res, arc_c) = shared_arc(v); let p = port(); let c = chan(p); - task::spawn() {|| + do task::spawn() || { let arc_v = get_arc(arc_c); let v = *get(&arc_v); assert v[2] == 3; @@ -214,27 +212,27 @@ mod tests { #[test] #[ignore] // this can probably infinite loop too. fn exclusive_arc() { - let mut futures = []/~; + let mut futures = ~[]; let num_tasks = 10u; let count = 1000u; let total = exclusive(~mut 0u); - for uint::range(0u, num_tasks) {|_i| + for uint::range(0u, num_tasks) |_i| { let total = total.clone(); - futures += [future::spawn({|| - for uint::range(0u, count) {|_i| - total.with {|_cond, count| + futures += ~[future::spawn(|| { + for uint::range(0u, count) |_i| { + do total.with |_cond, count| { **count += 1u; } } - })]/~; + })]; }; - for futures.each {|f| f.get() }; + for futures.each |f| { f.get() } - total.with {|_cond, total| + do total.with |_cond, total| { assert **total == num_tasks * count }; } diff --git a/src/libcore/bool.rs b/src/libcore/bool.rs index 42df0e29a99..cf7689c57ac 100644 --- a/src/libcore/bool.rs +++ b/src/libcore/bool.rs @@ -65,7 +65,7 @@ pure fn to_bit(v: bool) -> u8 { if v { 1u8 } else { 0u8 } } #[test] fn test_bool_from_str() { - all_values { |v| + do all_values |v| { assert some(v) == from_str(bool::to_str(v)) } } @@ -78,7 +78,7 @@ fn test_bool_to_str() { #[test] fn test_bool_to_bit() { - all_values { |v| + do all_values |v| { assert to_bit(v) == if is_true(v) { 1u8 } else { 0u8 }; } } diff --git a/src/libcore/char.rs b/src/libcore/char.rs index ffead7a523b..8e204d89f3f 100644 --- a/src/libcore/char.rs +++ b/src/libcore/char.rs @@ -140,7 +140,7 @@ fn escape_unicode(c: char) -> str { assert str::len(s) <= pad; let mut out = "\\"; out += str::from_char(c); - for uint::range(str::len(s), pad) {|_i| out += "0"; } + for uint::range(str::len(s), pad) |_i| { out += "0"; } out += s; ret out; } diff --git a/src/libcore/comm.rs b/src/libcore/comm.rs index 4a01ccea324..2d1c4dd978c 100644 --- a/src/libcore/comm.rs +++ b/src/libcore/comm.rs @@ -98,7 +98,7 @@ class port_ptr<T:send> { let po: *rust_port; new(po: *rust_port) { self.po = po; } drop unsafe { - task::unkillable {|| + do task::unkillable || { // Once the port is detached it's guaranteed not to receive further // messages let yield = 0u; @@ -184,11 +184,11 @@ fn peek<T: send>(p: port<T>) -> bool { peek_((**p).po) } #[doc(hidden)] fn recv_chan<T: send>(ch: comm::chan<T>) -> T { - as_raw_port(ch, {|x|recv_(x)}) + as_raw_port(ch, |x|recv_(x)) } fn peek_chan<T: send>(ch: comm::chan<T>) -> bool { - as_raw_port(ch, {|x|peek_(x)}) + as_raw_port(ch, |x|peek_(x)) } #[doc = "Receive on a raw port pointer"] @@ -217,13 +217,13 @@ fn peek_(p: *rust_port) -> bool { #[doc = "Receive on one of two ports"] fn select2<A: send, B: send>(p_a: port<A>, p_b: port<B>) -> either<A, B> { - let ports = [(**p_a).po, (**p_b).po]/~; + let ports = ~[(**p_a).po, (**p_b).po]; let n_ports = 2 as libc::size_t; let yield = 0u, yieldp = ptr::addr_of(yield); let mut resport: *rust_port; resport = rusti::init::<*rust_port>(); - vec::as_buf(ports) {|ports| + do vec::as_buf(ports) |ports| { rustrt::rust_port_select(ptr::addr_of(resport), ports, n_ports, yieldp); } @@ -364,16 +364,16 @@ fn test_select2_rendezvous() { let ch_a = chan(po_a); let ch_b = chan(po_b); - iter::repeat(10u) {|| - task::spawn {|| - iter::repeat(10u) {|| task::yield() } + do iter::repeat(10u) || { + do task::spawn || { + iter::repeat(10u, || task::yield()); send(ch_a, "a"); }; assert select2(po_a, po_b) == either::left("a"); - task::spawn {|| - iter::repeat(10u) {|| task::yield() } + do task::spawn || { + iter::repeat(10u, || task::yield()); send(ch_b, "b"); }; @@ -391,14 +391,14 @@ fn test_select2_stress() { let msgs = 100u; let times = 4u; - iter::repeat(times) {|| - task::spawn {|| - iter::repeat(msgs) {|| + do iter::repeat(times) || { + do task::spawn || { + do iter::repeat(msgs) || { send(ch_a, "a") } }; - task::spawn {|| - iter::repeat(msgs) {|| + do task::spawn || { + do iter::repeat(msgs) || { send(ch_b, "b") } }; @@ -406,7 +406,7 @@ fn test_select2_stress() { let mut as = 0; let mut bs = 0; - iter::repeat(msgs * times * 2u) {|| + do iter::repeat(msgs * times * 2u) || { alt check select2(po_a, po_b) { either::left("a") { as += 1 } either::right("b") { bs += 1 } @@ -440,9 +440,9 @@ fn test_recv_chan_wrong_task() { let po = port(); let ch = chan(po); send(ch, "flower"); - assert result::is_err(task::try {|| + assert result::is_err(task::try(|| recv_chan(ch) - }) + )) } #[test] @@ -462,8 +462,8 @@ fn test_chan_peek() { #[test] fn test_listen() { - listen {|parent| - task::spawn {|| + do listen |parent| { + do task::spawn || { parent.send("oatmeal-salad"); } assert parent.recv() == "oatmeal-salad"; @@ -473,18 +473,18 @@ fn test_listen() { #[test] #[ignore(cfg(windows))] fn test_port_detach_fail() { - iter::repeat(100u) {|| + do iter::repeat(100u) || { let builder = task::builder(); task::unsupervise(builder); - task::run(builder) {|| + do task::run(builder) || { let po = port(); let ch = po.chan(); - task::spawn {|| + do task::spawn || { fail; } - task::spawn {|| + do task::spawn || { ch.send(()); } } diff --git a/src/libcore/core.rc b/src/libcore/core.rc index 8f4b301adaf..fd1d20f9d6e 100644 --- a/src/libcore/core.rc +++ b/src/libcore/core.rc @@ -173,6 +173,12 @@ mod dvec_iter { #[path = "dvec.rs"] mod inst; } +mod dlist; +#[path="iter-trait"] +mod dlist_iter { + #[path ="dlist.rs"] + mod inst; +} // Concurrency mod arc; diff --git a/src/libcore/dlist.rs b/src/libcore/dlist.rs new file mode 100644 index 00000000000..ba71e6b21ca --- /dev/null +++ b/src/libcore/dlist.rs @@ -0,0 +1,643 @@ +#[doc = " +A doubly-linked list. Supports O(1) head, tail, count, push, pop, etc. + +Do not use ==, !=, <, etc on doubly-linked lists -- it may not terminate. +"] + +import dlist_iter::extensions; + +export dlist, dlist_node; +export create, from_elt, from_vec, extensions; + +type dlist_link<T> = option<dlist_node<T>>; + +enum dlist_node<T> = @{ + data: T, + mut root: option<dlist<T>>, + mut prev: dlist_link<T>, + mut next: dlist_link<T> +}; + +// Needs to be an @-box so nodes can back-reference it. +enum dlist<T> = @{ + mut size: uint, + mut hd: dlist_link<T>, + mut tl: dlist_link<T> +}; + +impl private_methods<T> for dlist_node<T> { + pure fn assert_links() { + alt self.next { + some(neighbour) { + alt neighbour.prev { + some(me) { + if !box::ptr_eq(*self, *me) { + fail "Asymmetric next-link in dlist node." + } + } + none { fail "One-way next-link in dlist node." } + } + } + none { } + } + alt self.prev { + some(neighbour) { + alt neighbour.next { + some(me) { + if !box::ptr_eq(*me, *self) { + fail "Asymmetric prev-link in dlist node." + } + } + none { fail "One-way prev-link in dlist node." } + } + } + none { } + } + } +} + +impl extensions<T> for dlist_node<T> { + #[doc = "Get the next node in the list, if there is one."] + pure fn next_link() -> option<dlist_node<T>> { + self.assert_links(); + self.next + } + #[doc = "Get the next node in the list, failing if there isn't one."] + pure fn next_node() -> dlist_node<T> { + alt self.next_link() { + some(nobe) { nobe } + none { fail "This dlist node has no next neighbour." } + } + } + #[doc = "Get the previous node in the list, if there is one."] + pure fn prev_link() -> option<dlist_node<T>> { + self.assert_links(); + self.prev + } + #[doc = "Get the previous node in the list, failing if there isn't one."] + pure fn prev_node() -> dlist_node<T> { + alt self.prev_link() { + some(nobe) { nobe } + none { fail "This dlist node has no previous neighbour." } + } + } + + #[doc = "Remove a node from whatever dlist it's on (failing if none)."] + fn remove() { + if option::is_some(self.root) { + option::get(self.root).remove(self); + } else { + fail "Removing an orphaned dlist node - what do I remove from?" + } + } +} + +#[doc = "Creates a new dlist node with the given data."] +pure fn create_node<T>(+data: T) -> dlist_node<T> { + dlist_node(@{data: data, mut root: none, mut prev: none, mut next: none}) +} + +#[doc = "Creates a new, empty dlist."] +pure fn create<T>() -> dlist<T> { + dlist(@{mut size: 0, mut hd: none, mut tl: none}) +} + +#[doc = "Creates a new dlist with a single element"] +fn from_elt<T>(+data: T) -> dlist<T> { + let list = create(); + list.push(data); + list +} + +fn from_vec<T: copy>(+vec: &[T]) -> dlist<T> { + do vec::foldl(create(), vec) |list,data| { + list.push(data); // Iterating left-to-right -- add newly to the tail. + list + } +} + +impl private_methods<T> for dlist<T> { + pure fn new_link(-data: T) -> dlist_link<T> { + some(dlist_node(@{data: data, mut root: some(self), + mut prev: none, mut next: none})) + } + pure fn assert_mine(nobe: dlist_node<T>) { + alt nobe.root { + some(me) { assert box::ptr_eq(*self, *me); } + none { fail "This node isn't on this dlist." } + } + } + fn make_mine(nobe: dlist_node<T>) { + if option::is_some(nobe.root) { + fail "Cannot insert node that's already on a dlist!" + } + nobe.root = some(self); + } + // Link two nodes together. If either of them are 'none', also sets + // the head and/or tail pointers appropriately. + #[inline(always)] + fn link(+before: dlist_link<T>, +after: dlist_link<T>) { + alt before { + some(neighbour) { neighbour.next = after; } + none { self.hd = after; } + } + alt after { + some(neighbour) { neighbour.prev = before; } + none { self.tl = before; } + } + } + // Remove a node from the list. + fn unlink(nobe: dlist_node<T>) { + self.assert_mine(nobe); + assert self.size > 0; + self.link(nobe.prev, nobe.next); + nobe.prev = none; // Release extraneous references. + nobe.next = none; + nobe.root = none; + self.size -= 1; + } + + fn add_head(+nobe: dlist_link<T>) { + self.link(nobe, self.hd); // Might set tail too. + self.hd = nobe; + self.size += 1; + } + fn add_tail(+nobe: dlist_link<T>) { + self.link(self.tl, nobe); // Might set head too. + self.tl = nobe; + self.size += 1; + } + fn insert_left(nobe: dlist_link<T>, neighbour: dlist_node<T>) { + self.assert_mine(neighbour); + assert self.size > 0; + self.link(neighbour.prev, nobe); + self.link(nobe, some(neighbour)); + self.size += 1; + } + fn insert_right(neighbour: dlist_node<T>, nobe: dlist_link<T>) { + self.assert_mine(neighbour); + assert self.size > 0; + self.link(nobe, neighbour.next); + self.link(some(neighbour), nobe); + self.size += 1; + } +} + +impl extensions<T> for dlist<T> { + #[doc = "Get the size of the list. O(1)."] + pure fn len() -> uint { self.size } + #[doc = "Returns true if the list is empty. O(1)."] + pure fn is_empty() -> bool { self.len() == 0 } + #[doc = "Returns true if the list is not empty. O(1)."] + pure fn is_not_empty() -> bool { self.len() != 0 } + + #[doc = "Add data to the head of the list. O(1)."] + fn push_head(+data: T) { + self.add_head(self.new_link(data)); + } + #[doc = "Add data to the head of the list, and get the new containing + node. O(1)."] + fn push_head_n(+data: T) -> dlist_node<T> { + let mut nobe = self.new_link(data); + self.add_head(nobe); + option::get(nobe) + } + #[doc = "Add data to the tail of the list. O(1)."] + fn push(+data: T) { + self.add_tail(self.new_link(data)); + } + #[doc = "Add data to the tail of the list, and get the new containing + node. O(1)."] + fn push_n(+data: T) -> dlist_node<T> { + let mut nobe = self.new_link(data); + self.add_tail(nobe); + option::get(nobe) + } + #[doc = "Insert data into the middle of the list, left of the given node. + O(1)."] + fn insert_before(+data: T, neighbour: dlist_node<T>) { + self.insert_left(self.new_link(data), neighbour); + } + #[doc = "Insert an existing node in the middle of the list, left of the + given node. O(1)."] + fn insert_n_before(nobe: dlist_node<T>, neighbour: dlist_node<T>) { + self.make_mine(nobe); + self.insert_left(some(nobe), neighbour); + } + #[doc = "Insert data in the middle of the list, left of the given node, + and get its containing node. O(1)."] + fn insert_before_n(+data: T, neighbour: dlist_node<T>) -> dlist_node<T> { + let mut nobe = self.new_link(data); + self.insert_left(nobe, neighbour); + option::get(nobe) + } + #[doc = "Insert data into the middle of the list, right of the given node. + O(1)."] + fn insert_after(+data: T, neighbour: dlist_node<T>) { + self.insert_right(neighbour, self.new_link(data)); + } + #[doc = "Insert an existing node in the middle of the list, right of the + given node. O(1)."] + fn insert_n_after(nobe: dlist_node<T>, neighbour: dlist_node<T>) { + self.make_mine(nobe); + self.insert_right(neighbour, some(nobe)); + } + #[doc = "Insert data in the middle of the list, right of the given node, + and get its containing node. O(1)."] + fn insert_after_n(+data: T, neighbour: dlist_node<T>) -> dlist_node<T> { + let mut nobe = self.new_link(data); + self.insert_right(neighbour, nobe); + option::get(nobe) + } + + #[doc = "Remove a node from the head of the list. O(1)."] + fn pop_n() -> option<dlist_node<T>> { + let hd = self.peek_n(); + hd.map(|nobe| self.unlink(nobe)); + hd + } + #[doc = "Remove a node from the tail of the list. O(1)."] + fn pop_tail_n() -> option<dlist_node<T>> { + let tl = self.peek_tail_n(); + tl.map(|nobe| self.unlink(nobe)); + tl + } + #[doc = "Get the node at the list's head. O(1)."] + pure fn peek_n() -> option<dlist_node<T>> { self.hd } + #[doc = "Get the node at the list's tail. O(1)."] + pure fn peek_tail_n() -> option<dlist_node<T>> { self.tl } + + #[doc = "Get the node at the list's head, failing if empty. O(1)."] + pure fn head_n() -> dlist_node<T> { + alt self.hd { + some(nobe) { nobe } + none { fail "Attempted to get the head of an empty dlist." } + } + } + #[doc = "Get the node at the list's tail, failing if empty. O(1)."] + pure fn tail_n() -> dlist_node<T> { + alt self.tl { + some(nobe) { nobe } + none { fail "Attempted to get the tail of an empty dlist." } + } + } + + #[doc = "Remove a node from anywhere in the list. O(1)."] + fn remove(nobe: dlist_node<T>) { self.unlink(nobe); } + + #[doc = "Check data structure integrity. O(n)."] + fn assert_consistent() { + if option::is_none(self.hd) || option::is_none(self.tl) { + assert option::is_none(self.hd) && option::is_none(self.tl); + } + // iterate forwards + let mut count = 0; + let mut link = self.peek_n(); + let mut rabbit = link; + while option::is_some(link) { + let nobe = option::get(link); + // check self on this list + assert option::is_some(nobe.root) && + box::ptr_eq(*option::get(nobe.root), *self); + // check cycle + if option::is_some(rabbit) { rabbit = option::get(rabbit).next; } + if option::is_some(rabbit) { rabbit = option::get(rabbit).next; } + if option::is_some(rabbit) { + assert !box::ptr_eq(*option::get(rabbit), *nobe); + } + // advance + link = nobe.next_link(); + count += 1; + } + assert count == self.len(); + // iterate backwards - some of this is probably redundant. + link = self.peek_tail_n(); + rabbit = link; + while option::is_some(link) { + let nobe = option::get(link); + // check self on this list + assert option::is_some(nobe.root) && + box::ptr_eq(*option::get(nobe.root), *self); + // check cycle + if option::is_some(rabbit) { rabbit = option::get(rabbit).prev; } + if option::is_some(rabbit) { rabbit = option::get(rabbit).prev; } + if option::is_some(rabbit) { + assert !box::ptr_eq(*option::get(rabbit), *nobe); + } + // advance + link = nobe.prev_link(); + count -= 1; + } + assert count == 0; + } +} + +impl extensions<T: copy> for dlist<T> { + #[doc = "Remove data from the head of the list. O(1)."] + fn pop() -> option<T> { self.pop_n().map (|nobe| nobe.data) } + #[doc = "Remove data from the tail of the list. O(1)."] + fn pop_tail() -> option<T> { self.pop_tail_n().map (|nobe| nobe.data) } + #[doc = "Get data at the list's head. O(1)."] + fn peek() -> option<T> { self.peek_n().map (|nobe| nobe.data) } + #[doc = "Get data at the list's tail. O(1)."] + fn peek_tail() -> option<T> { self.peek_tail_n().map (|nobe| nobe.data) } + #[doc = "Get data at the list's head, failing if empty. O(1)."] + pure fn head() -> T { self.head_n().data } + #[doc = "Get data at the list's tail, failing if empty. O(1)."] + pure fn tail() -> T { self.tail_n().data } +} + +#[cfg(test)] +mod tests { + #[test] + fn test_dlist_is_empty() { + let empty = create::<int>(); + let full1 = from_vec(~[1,2,3]); + assert empty.is_empty(); + assert !full1.is_empty(); + assert !empty.is_not_empty(); + assert full1.is_not_empty(); + } + #[test] + fn test_dlist_head_tail() { + let l = from_vec(~[1,2,3]); + assert l.head() == 1; + assert l.tail() == 3; + assert l.len() == 3; + } + #[test] + fn test_dlist_pop() { + let l = from_vec(~[1,2,3]); + assert l.pop().get() == 1; + assert l.tail() == 3; + assert l.head() == 2; + assert l.pop().get() == 2; + assert l.tail() == 3; + assert l.head() == 3; + assert l.pop().get() == 3; + assert l.is_empty(); + assert l.pop().is_none(); + } + #[test] + fn test_dlist_pop_tail() { + let l = from_vec(~[1,2,3]); + assert l.pop_tail().get() == 3; + assert l.tail() == 2; + assert l.head() == 1; + assert l.pop_tail().get() == 2; + assert l.tail() == 1; + assert l.head() == 1; + assert l.pop_tail().get() == 1; + assert l.is_empty(); + assert l.pop_tail().is_none(); + } + #[test] + fn test_dlist_push() { + let l = create::<int>(); + l.push(1); + assert l.head() == 1; + assert l.tail() == 1; + l.push(2); + assert l.head() == 1; + assert l.tail() == 2; + l.push(3); + assert l.head() == 1; + assert l.tail() == 3; + assert l.len() == 3; + } + #[test] + fn test_dlist_push_head() { + let l = create::<int>(); + l.push_head(3); + assert l.head() == 3; + assert l.tail() == 3; + l.push_head(2); + assert l.head() == 2; + assert l.tail() == 3; + l.push_head(1); + assert l.head() == 1; + assert l.tail() == 3; + assert l.len() == 3; + } + #[test] + fn test_dlist_foldl() { + let l = from_vec(vec::from_fn(101, |x|x)); + assert iter::foldl(l, 0, |accum,elem| accum+elem) == 5050; + } + #[test] + fn test_dlist_remove_head() { + let l = create::<int>(); + l.assert_consistent(); let one = l.push_n(1); + l.assert_consistent(); let _two = l.push_n(2); + l.assert_consistent(); let _three = l.push_n(3); + l.assert_consistent(); assert l.len() == 3; + l.assert_consistent(); l.remove(one); + l.assert_consistent(); assert l.len() == 2; + l.assert_consistent(); assert l.head() == 2; + l.assert_consistent(); assert l.tail() == 3; + l.assert_consistent(); assert l.pop().get() == 2; + l.assert_consistent(); assert l.pop().get() == 3; + l.assert_consistent(); assert l.is_empty(); + } + #[test] + fn test_dlist_remove_mid() { + let l = create::<int>(); + l.assert_consistent(); let _one = l.push_n(1); + l.assert_consistent(); let two = l.push_n(2); + l.assert_consistent(); let _three = l.push_n(3); + l.assert_consistent(); assert l.len() == 3; + l.assert_consistent(); l.remove(two); + l.assert_consistent(); assert l.len() == 2; + l.assert_consistent(); assert l.head() == 1; + l.assert_consistent(); assert l.tail() == 3; + l.assert_consistent(); assert l.pop().get() == 1; + l.assert_consistent(); assert l.pop().get() == 3; + l.assert_consistent(); assert l.is_empty(); + } + #[test] + fn test_dlist_remove_tail() { + let l = create::<int>(); + l.assert_consistent(); let _one = l.push_n(1); + l.assert_consistent(); let _two = l.push_n(2); + l.assert_consistent(); let three = l.push_n(3); + l.assert_consistent(); assert l.len() == 3; + l.assert_consistent(); l.remove(three); + l.assert_consistent(); assert l.len() == 2; + l.assert_consistent(); assert l.head() == 1; + l.assert_consistent(); assert l.tail() == 2; + l.assert_consistent(); assert l.pop().get() == 1; + l.assert_consistent(); assert l.pop().get() == 2; + l.assert_consistent(); assert l.is_empty(); + } + #[test] + fn test_dlist_remove_one_two() { + let l = create::<int>(); + l.assert_consistent(); let one = l.push_n(1); + l.assert_consistent(); let two = l.push_n(2); + l.assert_consistent(); let _three = l.push_n(3); + l.assert_consistent(); assert l.len() == 3; + l.assert_consistent(); l.remove(one); + l.assert_consistent(); l.remove(two); + // and through and through, the vorpal blade went snicker-snack + l.assert_consistent(); assert l.len() == 1; + l.assert_consistent(); assert l.head() == 3; + l.assert_consistent(); assert l.tail() == 3; + l.assert_consistent(); assert l.pop().get() == 3; + l.assert_consistent(); assert l.is_empty(); + } + #[test] + fn test_dlist_remove_one_three() { + let l = create::<int>(); + l.assert_consistent(); let one = l.push_n(1); + l.assert_consistent(); let _two = l.push_n(2); + l.assert_consistent(); let three = l.push_n(3); + l.assert_consistent(); assert l.len() == 3; + l.assert_consistent(); l.remove(one); + l.assert_consistent(); l.remove(three); + l.assert_consistent(); assert l.len() == 1; + l.assert_consistent(); assert l.head() == 2; + l.assert_consistent(); assert l.tail() == 2; + l.assert_consistent(); assert l.pop().get() == 2; + l.assert_consistent(); assert l.is_empty(); + } + #[test] + fn test_dlist_remove_two_three() { + let l = create::<int>(); + l.assert_consistent(); let _one = l.push_n(1); + l.assert_consistent(); let two = l.push_n(2); + l.assert_consistent(); let three = l.push_n(3); + l.assert_consistent(); assert l.len() == 3; + l.assert_consistent(); l.remove(two); + l.assert_consistent(); l.remove(three); + l.assert_consistent(); assert l.len() == 1; + l.assert_consistent(); assert l.head() == 1; + l.assert_consistent(); assert l.tail() == 1; + l.assert_consistent(); assert l.pop().get() == 1; + l.assert_consistent(); assert l.is_empty(); + } + #[test] + fn test_dlist_remove_all() { + let l = create::<int>(); + l.assert_consistent(); let one = l.push_n(1); + l.assert_consistent(); let two = l.push_n(2); + l.assert_consistent(); let three = l.push_n(3); + l.assert_consistent(); assert l.len() == 3; + l.assert_consistent(); l.remove(two); + l.assert_consistent(); l.remove(three); + l.assert_consistent(); l.remove(one); // Twenty-three is number one! + l.assert_consistent(); assert l.peek() == none; + l.assert_consistent(); assert l.is_empty(); + } + #[test] + fn test_dlist_insert_n_before() { + let l = create::<int>(); + l.assert_consistent(); let _one = l.push_n(1); + l.assert_consistent(); let two = l.push_n(2); + l.assert_consistent(); let three = create_node(3); + l.assert_consistent(); assert l.len() == 2; + l.assert_consistent(); l.insert_n_before(three, two); + l.assert_consistent(); assert l.len() == 3; + l.assert_consistent(); assert l.head() == 1; + l.assert_consistent(); assert l.tail() == 2; + l.assert_consistent(); assert l.pop().get() == 1; + l.assert_consistent(); assert l.pop().get() == 3; + l.assert_consistent(); assert l.pop().get() == 2; + l.assert_consistent(); assert l.is_empty(); + } + #[test] + fn test_dlist_insert_n_after() { + let l = create::<int>(); + l.assert_consistent(); let one = l.push_n(1); + l.assert_consistent(); let _two = l.push_n(2); + l.assert_consistent(); let three = create_node(3); + l.assert_consistent(); assert l.len() == 2; + l.assert_consistent(); l.insert_n_after(three, one); + l.assert_consistent(); assert l.len() == 3; + l.assert_consistent(); assert l.head() == 1; + l.assert_consistent(); assert l.tail() == 2; + l.assert_consistent(); assert l.pop().get() == 1; + l.assert_consistent(); assert l.pop().get() == 3; + l.assert_consistent(); assert l.pop().get() == 2; + l.assert_consistent(); assert l.is_empty(); + } + #[test] + fn test_dlist_insert_before_head() { + let l = create::<int>(); + l.assert_consistent(); let one = l.push_n(1); + l.assert_consistent(); let _two = l.push_n(2); + l.assert_consistent(); assert l.len() == 2; + l.assert_consistent(); l.insert_before(3, one); + l.assert_consistent(); assert l.len() == 3; + l.assert_consistent(); assert l.head() == 3; + l.assert_consistent(); assert l.tail() == 2; + l.assert_consistent(); assert l.pop().get() == 3; + l.assert_consistent(); assert l.pop().get() == 1; + l.assert_consistent(); assert l.pop().get() == 2; + l.assert_consistent(); assert l.is_empty(); + } + #[test] + fn test_dlist_insert_after_tail() { + let l = create::<int>(); + l.assert_consistent(); let _one = l.push_n(1); + l.assert_consistent(); let two = l.push_n(2); + l.assert_consistent(); assert l.len() == 2; + l.assert_consistent(); l.insert_after(3, two); + l.assert_consistent(); assert l.len() == 3; + l.assert_consistent(); assert l.head() == 1; + l.assert_consistent(); assert l.tail() == 3; + l.assert_consistent(); assert l.pop().get() == 1; + l.assert_consistent(); assert l.pop().get() == 2; + l.assert_consistent(); assert l.pop().get() == 3; + l.assert_consistent(); assert l.is_empty(); + } + #[test] #[should_fail] + fn test_asymmetric_link() { + let l = create::<int>(); + let one = l.push_n(1); + let two = l.push_n(2); + two.prev = none; + l.assert_consistent(); + } + #[test] #[should_fail] + fn test_cyclic_list() { + let l = create::<int>(); + let one = l.push_n(1); + let _two = l.push_n(2); + let three = l.push_n(3); + three.next = some(one); + one.prev = some(three); + l.assert_consistent(); + } + #[test] #[should_fail] + fn test_headless() { + create::<int>().head(); + } + #[test] #[should_fail] + fn test_insert_already_present_before() { + let l = create::<int>(); + let one = l.push_n(1); + let two = l.push_n(2); + l.insert_n_before(two, one); + } + #[test] #[should_fail] + fn test_insert_already_present_after() { + let l = create::<int>(); + let one = l.push_n(1); + let two = l.push_n(2); + l.insert_n_after(one, two); + } + #[test] #[should_fail] + fn test_insert_before_orphan() { + let l = create::<int>(); + let one = create_node(1); + let two = create_node(2); + l.insert_n_before(one, two); + } + #[test] #[should_fail] + fn test_insert_after_orphan() { + let l = create::<int>(); + let one = create_node(1); + let two = create_node(2); + l.insert_n_after(two, one); + } +} diff --git a/src/libcore/dvec.rs b/src/libcore/dvec.rs index 85bde3b7c59..ea67947073e 100644 --- a/src/libcore/dvec.rs +++ b/src/libcore/dvec.rs @@ -48,26 +48,26 @@ type could only produce 47 million pushes/second. "] type dvec<A> = { - mut data: [mut A]/~ + mut data: ~[mut A] }; #[doc = "Creates a new, empty dvec"] fn dvec<A>() -> dvec<A> { - {mut data: [mut]/~} + {mut data: ~[mut]} } #[doc = "Creates a new dvec with a single element"] fn from_elt<A>(+e: A) -> dvec<A> { - {mut data: [mut e]/~} + {mut data: ~[mut e]} } #[doc = "Creates a new dvec with the contents of a vector"] -fn from_vec<A>(+v: [mut A]/~) -> dvec<A> { +fn from_vec<A>(+v: ~[mut A]) -> dvec<A> { {mut data: v} } #[doc = "Consumes the vector and returns its contents"] -fn unwrap<A>(-d: dvec<A>) -> [mut A]/~ { +fn unwrap<A>(-d: dvec<A>) -> ~[mut A] { let {data: v} <- d; ret v; } @@ -83,7 +83,7 @@ impl private_methods<A> for dvec<A> { } #[inline(always)] - fn borrow<B>(f: fn(-[mut A]/~) -> B) -> B { + fn borrow<B>(f: fn(-~[mut A]) -> B) -> B { unsafe { let mut data = unsafe::reinterpret_cast(null::<()>()); data <-> self.data; @@ -94,7 +94,7 @@ impl private_methods<A> for dvec<A> { } #[inline(always)] - fn return(-data: [mut A]/~) { + fn return(-data: ~[mut A]) { unsafe { self.data <- data; } @@ -113,13 +113,13 @@ impl extensions<A> for dvec<A> { "] #[inline(always)] - fn swap(f: fn(-[mut A]/~) -> [mut A]/~) { - self.borrow { |v| self.return(f(v)) } + fn swap(f: fn(-~[mut A]) -> ~[mut A]) { + self.borrow(|v| self.return(f(v))) } #[doc = "Returns the number of elements currently in the dvec"] fn len() -> uint { - self.borrow { |v| + do self.borrow |v| { let l = v.len(); self.return(v); l @@ -127,14 +127,14 @@ impl extensions<A> for dvec<A> { } #[doc = "Overwrite the current contents"] - fn set(+w: [mut A]/~) { + fn set(+w: ~[mut A]) { self.check_not_borrowed(); self.data <- w; } #[doc = "Remove and return the last element"] fn pop() -> A { - self.borrow { |v| + do self.borrow |v| { let mut v <- v; let result = vec::pop(v); self.return(v); @@ -150,7 +150,7 @@ impl extensions<A> for dvec<A> { let data_ptr: *() = unsafe::reinterpret_cast(data); if data_ptr.is_null() { fail "Recursive use of dvec"; } log(error, "a"); - self.data <- [mut t]/~; + self.data <- ~[mut t]; vec::push_all_move(self.data, data); log(error, "b"); } @@ -164,7 +164,7 @@ impl extensions<A> for dvec<A> { #[doc = "Remove and return the first element"] fn shift() -> A { - self.borrow { |v| + do self.borrow |v| { let mut v = vec::from_mut(v); let result = vec::shift(v); self.return(vec::to_mut(v)); @@ -179,15 +179,15 @@ impl extensions<A:copy> for dvec<A> { Equivalent to `append_iter()` but potentially more efficient. "] - fn push_all(ts: [const A]/&) { + fn push_all(ts: &[const A]) { self.push_slice(ts, 0u, vec::len(ts)); } #[doc = " Appends elements from `from_idx` to `to_idx` (exclusive) "] - fn push_slice(ts: [const A]/&, from_idx: uint, to_idx: uint) { - self.swap { |v| + fn push_slice(ts: &[const A], from_idx: uint, to_idx: uint) { + do self.swap |v| { let mut v <- v; let new_len = vec::len(v) + to_idx - from_idx; vec::reserve(v, new_len); @@ -207,7 +207,7 @@ impl extensions<A:copy> for dvec<A> { attempts to access this vector. "] fn append_iter<A, I:iter::base_iter<A>>(ts: I) { - self.swap { |v| + do self.swap |v| { let mut v = alt ts.size_hint() { none { v } some(h) { @@ -218,7 +218,7 @@ impl extensions<A:copy> for dvec<A> { } }; - for ts.each { |t| vec::push(v, t) }; + for ts.each |t| { vec::push(v, t) }; v } } @@ -228,8 +228,8 @@ impl extensions<A:copy> for dvec<A> { See `unwrap()` if you do not wish to copy the contents. "] - fn get() -> [A]/~ { - self.borrow { |v| + fn get() -> ~[A] { + do self.borrow |v| { let w = vec::from_mut(copy v); self.return(v); w @@ -259,7 +259,7 @@ impl extensions<A:copy> for dvec<A> { growing the vector if necessary. New elements will be initialized with `initval`"] fn grow_set_elt(idx: uint, initval: A, val: A) { - self.swap { |v| + do self.swap |v| { let mut v <- v; vec::grow_set(v, idx, initval, val); v diff --git a/src/libcore/either.rs b/src/libcore/either.rs index 0a1c9ce9e6b..9dadd848415 100644 --- a/src/libcore/either.rs +++ b/src/libcore/either.rs @@ -21,28 +21,28 @@ fn either<T, U, V>(f_left: fn(T) -> V, alt value { left(l) { f_left(l) } right(r) { f_right(r) } } } -fn lefts<T: copy, U>(eithers: [either<T, U>]/~) -> [T]/~ { +fn lefts<T: copy, U>(eithers: ~[either<T, U>]) -> ~[T] { #[doc = "Extracts from a vector of either all the left values"]; - let mut result: [T]/~ = []/~; - for vec::each(eithers) {|elt| + let mut result: ~[T] = ~[]; + for vec::each(eithers) |elt| { alt elt { left(l) { vec::push(result, l); } _ {/* fallthrough */ } } } ret result; } -fn rights<T, U: copy>(eithers: [either<T, U>]/~) -> [U]/~ { +fn rights<T, U: copy>(eithers: ~[either<T, U>]) -> ~[U] { #[doc = "Extracts from a vector of either all the right values"]; - let mut result: [U]/~ = []/~; - for vec::each(eithers) {|elt| + let mut result: ~[U] = ~[]; + for vec::each(eithers) |elt| { alt elt { right(r) { vec::push(result, r); } _ {/* fallthrough */ } } } ret result; } -fn partition<T: copy, U: copy>(eithers: [either<T, U>]/~) - -> {lefts: [T]/~, rights: [U]/~} { +fn partition<T: copy, U: copy>(eithers: ~[either<T, U>]) + -> {lefts: ~[T], rights: ~[U]} { #[doc = " Extracts from a vector of either all the left values and right values @@ -50,9 +50,9 @@ fn partition<T: copy, U: copy>(eithers: [either<T, U>]/~) right values. "]; - let mut lefts: [T]/~ = []/~; - let mut rights: [U]/~ = []/~; - for vec::each(eithers) {|elt| + let mut lefts: ~[T] = ~[]; + let mut rights: ~[U] = ~[]; + for vec::each(eithers) |elt| { alt elt { left(l) { vec::push(lefts, l); } right(r) { vec::push(rights, r); } @@ -115,49 +115,49 @@ fn test_either_right() { #[test] fn test_lefts() { - let input = [left(10), right(11), left(12), right(13), left(14)]/~; + let input = ~[left(10), right(11), left(12), right(13), left(14)]; let result = lefts(input); - assert (result == [10, 12, 14]/~); + assert (result == ~[10, 12, 14]); } #[test] fn test_lefts_none() { - let input: [either<int, int>]/~ = [right(10), right(10)]/~; + let input: ~[either<int, int>] = ~[right(10), right(10)]; let result = lefts(input); assert (vec::len(result) == 0u); } #[test] fn test_lefts_empty() { - let input: [either<int, int>]/~ = []/~; + let input: ~[either<int, int>] = ~[]; let result = lefts(input); assert (vec::len(result) == 0u); } #[test] fn test_rights() { - let input = [left(10), right(11), left(12), right(13), left(14)]/~; + let input = ~[left(10), right(11), left(12), right(13), left(14)]; let result = rights(input); - assert (result == [11, 13]/~); + assert (result == ~[11, 13]); } #[test] fn test_rights_none() { - let input: [either<int, int>]/~ = [left(10), left(10)]/~; + let input: ~[either<int, int>] = ~[left(10), left(10)]; let result = rights(input); assert (vec::len(result) == 0u); } #[test] fn test_rights_empty() { - let input: [either<int, int>]/~ = []/~; + let input: ~[either<int, int>] = ~[]; let result = rights(input); assert (vec::len(result) == 0u); } #[test] fn test_partition() { - let input = [left(10), right(11), left(12), right(13), left(14)]/~; + let input = ~[left(10), right(11), left(12), right(13), left(14)]; let result = partition(input); assert (result.lefts[0] == 10); assert (result.lefts[1] == 12); @@ -168,7 +168,7 @@ fn test_partition() { #[test] fn test_partition_no_lefts() { - let input: [either<int, int>]/~ = [right(10), right(11)]/~; + let input: ~[either<int, int>] = ~[right(10), right(11)]; let result = partition(input); assert (vec::len(result.lefts) == 0u); assert (vec::len(result.rights) == 2u); @@ -176,7 +176,7 @@ fn test_partition_no_lefts() { #[test] fn test_partition_no_rights() { - let input: [either<int, int>]/~ = [left(10), left(11)]/~; + let input: ~[either<int, int>] = ~[left(10), left(11)]; let result = partition(input); assert (vec::len(result.lefts) == 2u); assert (vec::len(result.rights) == 0u); @@ -184,7 +184,7 @@ fn test_partition_no_rights() { #[test] fn test_partition_empty() { - let input: [either<int, int>]/~ = []/~; + let input: ~[either<int, int>] = ~[]; let result = partition(input); assert (vec::len(result.lefts) == 0u); assert (vec::len(result.rights) == 0u); diff --git a/src/libcore/extfmt.rs b/src/libcore/extfmt.rs index d738b4f6615..e156e595490 100644 --- a/src/libcore/extfmt.rs +++ b/src/libcore/extfmt.rs @@ -9,12 +9,12 @@ The 'fmt' extension is modeled on the posix printf system. A posix conversion ostensibly looks like this -> %[parameter]/~[flags]/~[width]/~[.precision]/~[length]/~type +> %~[parameter]~[flags]~[width]~[.precision]~[length]type Given the different numeric type bestiary we have, we omit the 'length' parameter and support slightly different conversions for 'type' -> %[parameter]/~[flags]/~[width]/~[.precision]/~type +> %~[parameter]~[flags]~[width]~[.precision]type we also only support translating-to-rust a tiny subset of the possible combinations at the moment. @@ -71,7 +71,7 @@ mod ct { // A formatted conversion from an expression to a string type conv = {param: option<int>, - flags: [flag]/~, + flags: ~[flag], width: count, precision: count, ty: ty}; @@ -81,11 +81,11 @@ mod ct { enum piece { piece_string(str), piece_conv(conv), } type error_fn = fn@(str) -> ! ; - fn parse_fmt_string(s: str, error: error_fn) -> [piece]/~ { - let mut pieces: [piece]/~ = []/~; + fn parse_fmt_string(s: str, error: error_fn) -> ~[piece] { + let mut pieces: ~[piece] = ~[]; let lim = str::len(s); let mut buf = ""; - fn flush_buf(buf: str, &pieces: [piece]/~) -> str { + fn flush_buf(buf: str, &pieces: ~[piece]) -> str { if str::len(buf) > 0u { let piece = piece_string(buf); vec::push(pieces, piece); @@ -162,19 +162,19 @@ mod ct { }; } fn parse_flags(s: str, i: uint, lim: uint) -> - {flags: [flag]/~, next: uint} { - let noflags: [flag]/~ = []/~; + {flags: ~[flag], next: uint} { + let noflags: ~[flag] = ~[]; if i >= lim { ret {flags: noflags, next: i}; } fn more_(f: flag, s: str, i: uint, lim: uint) -> - {flags: [flag]/~, next: uint} { + {flags: ~[flag], next: uint} { let next = parse_flags(s, i + 1u, lim); let rest = next.flags; let j = next.next; - let curr: [flag]/~ = [f]/~; + let curr: ~[flag] = ~[f]; ret {flags: vec::append(curr, rest), next: j}; } - let more = {|x|more_(x, s, i, lim)}; + let more = |x| more_(x, s, i, lim); let f = s[i]; ret if f == '-' as u8 { more(flag_left_justify) @@ -264,19 +264,17 @@ mod ct { // conditions can be evaluated at compile-time. For now though it's cleaner to // implement it 0this way, I think. mod rt { - enum flag { - flag_left_justify, - flag_left_zero_pad, - flag_space_for_sign, - flag_sign_always, - flag_alternate, - } + const flag_none : u32 = 0u32; + const flag_left_justify : u32 = 0b00000000000000000000000000000001u32; + const flag_left_zero_pad : u32 = 0b00000000000000000000000000000010u32; + const flag_space_for_sign : u32 = 0b00000000000000000000000000000100u32; + const flag_sign_always : u32 = 0b00000000000000000000000000001000u32; + const flag_alternate : u32 = 0b00000000000000000000000000010000u32; + enum count { count_is(int), count_implied, } enum ty { ty_default, ty_bits, ty_hex_upper, ty_hex_lower, ty_octal, } - // FIXME (#1993): May not want to use a vector here for flags; instead - // just use a bool per flag. - type conv = {flags: [flag]/~, width: count, precision: count, ty: ty}; + type conv = {flags: u32, width: count, precision: count, ty: ty}; fn conv_int(cv: conv, i: int) -> str { let radix = 10u; @@ -307,7 +305,6 @@ mod rt { let s = if b { "true" } else { "false" }; // run the boolean conversion through the string conversion logic, // giving it the same rules for precision, etc. - ret conv_str(cv, s); } fn conv_char(cv: conv, c: char) -> str { @@ -430,9 +427,8 @@ mod rt { } ret padstr + s; } - fn have_flag(flags: [flag]/~, f: flag) -> bool { - for vec::each(flags) {|candidate| if candidate == f { ret true; } } - ret false; + fn have_flag(flags: u32, f: u32) -> bool { + flags & f != 0 } } diff --git a/src/libcore/float.rs b/src/libcore/float.rs index 5f6da4c1674..fcca8e420e4 100644 --- a/src/libcore/float.rs +++ b/src/libcore/float.rs @@ -116,10 +116,10 @@ fn to_str_common(num: float, digits: uint, exact: bool) -> str { let mut frac = num - (trunc as float); // stack of digits - let mut fractionalParts = []/~; + let mut fractionalParts = ~[]; // FIXME: (#2608) - // This used to return right away without rounding, as "[-]/~num", + // This used to return right away without rounding, as "~[-]num", // but given epsilon like in f64.rs, I don't see how the comparison // to epsilon did much when only used there. // if (frac < epsilon && !exact) || digits == 0u { ret accum; } @@ -261,7 +261,7 @@ fn from_str(num: str) -> option<float> { _ { ret none; } } - //Determine if first char is '-'/'+'. Set [pos]/~ and [neg]/~ accordingly. + //Determine if first char is '-'/'+'. Set ~[pos] and ~[neg] accordingly. let mut neg = false; //Sign of the result alt str::char_at(num, 0u) { '-' { @@ -345,7 +345,7 @@ fn from_str(num: str) -> option<float> { pos = char_range.next; } let multiplier = pow_with_uint(10u, exponent); - //Note: not [int::pow]/~, otherwise, we'll quickly + //Note: not ~[int::pow], otherwise, we'll quickly //end up with a nice overflow if neg_exponent { total = total / multiplier; diff --git a/src/libcore/future.rs b/src/libcore/future.rs index a164f69ae5c..51aeb3a354c 100644 --- a/src/libcore/future.rs +++ b/src/libcore/future.rs @@ -64,7 +64,7 @@ fn from_port<A:send>(-port: comm::port<A>) -> future<A> { waiting for the result to be received on the port. "]; - from_fn {|| + do from_fn || { comm::recv(port) } } @@ -93,7 +93,7 @@ fn spawn<A:send>(+blk: fn~() -> A) -> future<A> { let mut po = comm::port(); let ch = comm::chan(po); - task::spawn {|| + do task::spawn || { comm::send(ch, blk()) }; from_port(po) @@ -102,7 +102,7 @@ fn spawn<A:send>(+blk: fn~() -> A) -> future<A> { fn get<A:copy>(future: future<A>) -> A { #[doc = "Get the value of the future"]; - with(future) {|v| v } + do with(future) |v| { v } } fn with<A,B>(future: future<A>, blk: fn(A) -> B) -> B { @@ -150,18 +150,18 @@ fn test_iface_get() { #[test] fn test_with() { let f = from_value("nail"); - assert with(f) {|v| v} == "nail"; + assert with(f, |v| v) == "nail"; } #[test] fn test_iface_with() { let f = from_value("kale"); - assert f.with {|v| v} == "kale"; + assert f.with(|v| v) == "kale"; } #[test] fn test_spawn() { - let f = spawn {|| "bale" }; + let f = spawn(|| "bale"); assert get(f) == "bale"; } @@ -169,6 +169,6 @@ fn test_spawn() { #[should_fail] #[ignore(cfg(target_os = "win32"))] fn test_futurefail() { - let f = spawn {|| fail }; + let f = spawn(|| fail); let _x: str = get(f); } diff --git a/src/libcore/int-template.rs b/src/libcore/int-template.rs index 5721ab750b4..ac11f2f1102 100644 --- a/src/libcore/int-template.rs +++ b/src/libcore/int-template.rs @@ -66,7 +66,7 @@ Parse a buffer of bytes * buf - A byte buffer * radix - The base of the number "] -fn parse_buf(buf: [u8]/~, radix: uint) -> option<T> { +fn parse_buf(buf: ~[u8], radix: uint) -> option<T> { if vec::len(buf) == 0u { ret none; } let mut i = vec::len(buf) - 1u; let mut start = 0u; @@ -93,14 +93,14 @@ fn from_str(s: str) -> option<T> { parse_buf(str::bytes(s), 10u) } #[doc = "Convert to a string in a given base"] fn to_str(n: T, radix: uint) -> str { - to_str_bytes(n, radix) {|slice| - vec::unpack_slice(slice) {|p, len| + do to_str_bytes(n, radix) |slice| { + do vec::unpack_slice(slice) |p, len| { unsafe { str::unsafe::from_buf_len(p, len) } } } } -fn to_str_bytes<U>(n: T, radix: uint, f: fn([u8]/&) -> U) -> U { +fn to_str_bytes<U>(n: T, radix: uint, f: fn(v: &[u8]) -> U) -> U { if n < 0 as T { uint::to_str_bytes(true, -n as uint, radix, f) } else { diff --git a/src/libcore/int-template/int.rs b/src/libcore/int-template/int.rs index 2557b1253b7..51149d7e1fb 100644 --- a/src/libcore/int-template/int.rs +++ b/src/libcore/int-template/int.rs @@ -11,7 +11,7 @@ pure fn hash(&&x: int) -> uint { ret x as uint; } #[doc = "Returns `base` raised to the power of `exponent`"] fn pow(base: int, exponent: uint) -> int { - if exponent == 0u { ret 1; } //Not mathemtically true if [base == 0]/~ + if exponent == 0u { ret 1; } //Not mathemtically true if ~[base == 0] if base == 0 { ret 0; } let mut my_pow = exponent; let mut acc = 1; diff --git a/src/libcore/io.rs b/src/libcore/io.rs index 39322f68ad7..8527f58ca6c 100644 --- a/src/libcore/io.rs +++ b/src/libcore/io.rs @@ -30,7 +30,7 @@ enum seek_style { seek_set, seek_end, seek_cur, } // The raw underlying reader iface. All readers must implement this. iface reader { // FIXME (#2004): Seekable really should be orthogonal. - fn read_bytes(uint) -> [u8]/~; + fn read_bytes(uint) -> ~[u8]; fn read_byte() -> int; fn unread_byte(int); fn eof() -> bool; @@ -41,9 +41,9 @@ iface reader { // Generic utility functions defined on readers impl reader_util for reader { - fn read_chars(n: uint) -> [char]/~ { + fn read_chars(n: uint) -> ~[char] { // returns the (consumed offset, n_req), appends characters to &chars - fn chars_from_buf(buf: [u8]/~, &chars: [char]/~) -> (uint, uint) { + fn chars_from_buf(buf: ~[u8], &chars: ~[char]) -> (uint, uint) { let mut i = 0u; while i < vec::len(buf) { let b0 = buf[i]; @@ -75,8 +75,8 @@ impl reader_util for reader { } ret (i, 0u); } - let mut buf: [u8]/~ = []/~; - let mut chars: [char]/~ = []/~; + let mut buf: ~[u8] = ~[]; + let mut chars: ~[char] = ~[]; // might need more bytes, but reading n will never over-read let mut nbread = n; while nbread > 0u { @@ -110,7 +110,7 @@ impl reader_util for reader { } fn read_line() -> str { - let mut buf = []/~; + let mut buf = ~[]; loop { let ch = self.read_byte(); if ch == -1 || ch == 10 { break; } @@ -120,7 +120,7 @@ impl reader_util for reader { } fn read_c_str() -> str { - let mut buf: [u8]/~ = []/~; + let mut buf: ~[u8] = ~[]; loop { let ch = self.read_byte(); if ch < 1 { break; } else { vec::push(buf, ch as u8); } @@ -156,8 +156,8 @@ impl reader_util for reader { val } - fn read_whole_stream() -> [u8]/~ { - let mut buf: [u8]/~ = []/~; + fn read_whole_stream() -> ~[u8] { + let mut buf: ~[u8] = ~[]; while !self.eof() { vec::push_all(buf, self.read_bytes(2048u)); } buf } @@ -192,10 +192,10 @@ fn convert_whence(whence: seek_style) -> i32 { } impl of reader for *libc::FILE { - fn read_bytes(len: uint) -> [u8]/~ { - let mut buf : [mut u8]/~ = [mut]/~; + fn read_bytes(len: uint) -> ~[u8] { + let mut buf : ~[mut u8] = ~[mut]; vec::reserve(buf, len); - vec::as_mut_buf(buf) {|b| + do vec::as_mut_buf(buf) |b| { let read = libc::fread(b as *mut c_void, 1u as size_t, len as size_t, self); unsafe { vec::unsafe::set_len(buf, read as uint) }; @@ -216,7 +216,7 @@ impl of reader for *libc::FILE { // duration of its lifetime. // FIXME there really should be a better way to do this // #2004 impl <T: reader, C> of reader for {base: T, cleanup: C} { - fn read_bytes(len: uint) -> [u8]/~ { self.base.read_bytes(len) } + fn read_bytes(len: uint) -> ~[u8] { self.base.read_bytes(len) } fn read_byte() -> int { self.base.read_byte() } fn unread_byte(byte: int) { self.base.unread_byte(byte); } fn eof() -> bool { self.base.eof() } @@ -245,10 +245,10 @@ fn FILE_reader(f: *libc::FILE, cleanup: bool) -> reader { fn stdin() -> reader { rustrt::rust_get_stdin() as reader } fn file_reader(path: str) -> result<reader, str> { - let f = os::as_c_charp(path, {|pathbuf| - os::as_c_charp("r", {|modebuf| + let f = os::as_c_charp(path, |pathbuf| { + os::as_c_charp("r", |modebuf| libc::fopen(pathbuf, modebuf) - }) + ) }); ret if f as uint == 0u { result::err("error opening " + path) } else { @@ -260,10 +260,10 @@ fn file_reader(path: str) -> result<reader, str> { // Byte buffer readers // TODO: const u8, but this fails with rustboot. -type byte_buf = {buf: [u8]/~, mut pos: uint, len: uint}; +type byte_buf = {buf: ~[u8], mut pos: uint, len: uint}; impl of reader for byte_buf { - fn read_bytes(len: uint) -> [u8]/~ { + fn read_bytes(len: uint) -> ~[u8] { let rest = self.len - self.pos; let mut to_read = len; if rest < to_read { to_read = rest; } @@ -286,19 +286,19 @@ impl of reader for byte_buf { fn tell() -> uint { self.pos } } -fn bytes_reader(bytes: [u8]/~) -> reader { +fn bytes_reader(bytes: ~[u8]) -> reader { bytes_reader_between(bytes, 0u, vec::len(bytes)) } -fn bytes_reader_between(bytes: [u8]/~, start: uint, end: uint) -> reader { +fn bytes_reader_between(bytes: ~[u8], start: uint, end: uint) -> reader { {buf: bytes, mut pos: start, len: end} as reader } -fn with_bytes_reader<t>(bytes: [u8]/~, f: fn(reader) -> t) -> t { +fn with_bytes_reader<t>(bytes: ~[u8], f: fn(reader) -> t) -> t { f(bytes_reader(bytes)) } -fn with_bytes_reader_between<t>(bytes: [u8]/~, start: uint, end: uint, +fn with_bytes_reader_between<t>(bytes: ~[u8], start: uint, end: uint, f: fn(reader) -> t) -> t { f(bytes_reader_between(bytes, start, end)) } @@ -308,7 +308,7 @@ fn str_reader(s: str) -> reader { } fn with_str_reader<T>(s: str, f: fn(reader) -> T) -> T { - str::as_bytes(s) { |bytes| + do str::as_bytes(s) |bytes| { with_bytes_reader_between(bytes, 0u, str::len(s), f) } } @@ -319,22 +319,22 @@ enum fileflag { append, create, truncate, no_flag, } // FIXME (#2004): Seekable really should be orthogonal. // FIXME (#2004): eventually u64 iface writer { - fn write([const u8]/&); + fn write(v: &[const u8]); fn seek(int, seek_style); fn tell() -> uint; fn flush() -> int; } impl <T: writer, C> of writer for {base: T, cleanup: C} { - fn write(bs: [const u8]/&) { self.base.write(bs); } + fn write(bs: &[const u8]) { self.base.write(bs); } fn seek(off: int, style: seek_style) { self.base.seek(off, style); } fn tell() -> uint { self.base.tell() } fn flush() -> int { self.base.flush() } } impl of writer for *libc::FILE { - fn write(v: [const u8]/&) { - vec::unpack_const_slice(v) {|vbuf, len| + fn write(v: &[const u8]) { + do vec::unpack_const_slice(v) |vbuf, len| { let nout = libc::fwrite(vbuf as *c_void, len as size_t, 1u as size_t, self); if nout < 1 as size_t { @@ -361,9 +361,9 @@ fn FILE_writer(f: *libc::FILE, cleanup: bool) -> writer { } impl of writer for fd_t { - fn write(v: [const u8]/&) { + fn write(v: &[const u8]) { let mut count = 0u; - vec::unpack_const_slice(v) {|vbuf, len| + do vec::unpack_const_slice(v) |vbuf, len| { while count < len { let vb = ptr::const_offset(vbuf, count) as *c_void; let nout = libc::write(self, vb, len as size_t); @@ -402,7 +402,7 @@ fn fd_writer(fd: fd_t, cleanup: bool) -> writer { } -fn mk_file_writer(path: str, flags: [fileflag]/~) +fn mk_file_writer(path: str, flags: ~[fileflag]) -> result<writer, str> { #[cfg(windows)] @@ -412,7 +412,7 @@ fn mk_file_writer(path: str, flags: [fileflag]/~) fn wb() -> c_int { O_WRONLY as c_int } let mut fflags: c_int = wb(); - for vec::each(flags) {|f| + for vec::each(flags) |f| { alt f { append { fflags |= O_APPEND as c_int; } create { fflags |= O_CREAT as c_int; } @@ -420,7 +420,7 @@ fn mk_file_writer(path: str, flags: [fileflag]/~) no_flag { } } } - let fd = os::as_c_charp(path) {|pathbuf| + let fd = do os::as_c_charp(path) |pathbuf| { libc::open(pathbuf, fflags, (S_IRUSR | S_IWUSR) as c_int) }; @@ -431,27 +431,27 @@ fn mk_file_writer(path: str, flags: [fileflag]/~) } } -fn u64_to_le_bytes<T>(n: u64, size: uint, f: fn([u8]/&) -> T) -> T { +fn u64_to_le_bytes<T>(n: u64, size: uint, f: fn(v: &[u8]) -> T) -> T { assert size <= 8u; alt size { - 1u { f([n as u8]/&) } - 2u { f([n as u8, - (n >> 8) as u8]/&) } - 4u { f([n as u8, + 1u { f(&[n as u8]) } + 2u { f(&[n as u8, + (n >> 8) as u8]) } + 4u { f(&[n as u8, (n >> 8) as u8, (n >> 16) as u8, - (n >> 24) as u8]/&) } - 8u { f([n as u8, + (n >> 24) as u8]) } + 8u { f(&[n as u8, (n >> 8) as u8, (n >> 16) as u8, (n >> 24) as u8, (n >> 32) as u8, (n >> 40) as u8, (n >> 48) as u8, - (n >> 56) as u8]/&) } + (n >> 56) as u8]) } _ { - let mut bytes: [u8]/~ = []/~, i = size, n = n; + let mut bytes: ~[u8] = ~[], i = size, n = n; while i > 0u { vec::push(bytes, (n & 255_u64) as u8); n >>= 8_u64; @@ -462,26 +462,26 @@ fn u64_to_le_bytes<T>(n: u64, size: uint, f: fn([u8]/&) -> T) -> T { } } -fn u64_to_be_bytes<T>(n: u64, size: uint, f: fn([u8]/&) -> T) -> T { +fn u64_to_be_bytes<T>(n: u64, size: uint, f: fn(v: &[u8]) -> T) -> T { assert size <= 8u; alt size { - 1u { f([n as u8]/&) } - 2u { f([(n >> 8) as u8, - n as u8]/&) } - 4u { f([(n >> 24) as u8, + 1u { f(&[n as u8]) } + 2u { f(&[(n >> 8) as u8, + n as u8]) } + 4u { f(&[(n >> 24) as u8, (n >> 16) as u8, (n >> 8) as u8, - n as u8]/&) } - 8u { f([(n >> 56) as u8, + n as u8]) } + 8u { f(&[(n >> 56) as u8, (n >> 48) as u8, (n >> 40) as u8, (n >> 32) as u8, (n >> 24) as u8, (n >> 16) as u8, (n >> 8) as u8, - n as u8]/&) } + n as u8]) } _ { - let mut bytes: [u8]/~ = []/~; + let mut bytes: ~[u8] = ~[]; let mut i = size; while i > 0u { let shift = ((i - 1u) * 8u) as u64; @@ -493,7 +493,7 @@ fn u64_to_be_bytes<T>(n: u64, size: uint, f: fn([u8]/&) -> T) -> T { } } -fn u64_from_be_bytes(data: [u8]/~, start: uint, size: uint) -> u64 { +fn u64_from_be_bytes(data: ~[u8], start: uint, size: uint) -> u64 { let mut sz = size; assert (sz <= 8u); let mut val = 0_u64; @@ -509,83 +509,83 @@ fn u64_from_be_bytes(data: [u8]/~, start: uint, size: uint) -> u64 { impl writer_util for writer { fn write_char(ch: char) { if ch as uint < 128u { - self.write([ch as u8]/&); + self.write(&[ch as u8]); } else { self.write_str(str::from_char(ch)); } } - fn write_str(s: str/&) { str::byte_slice(s) {|v| self.write(v); } } + fn write_str(s: str/&) { str::byte_slice(s, |v| self.write(v)) } fn write_line(s: str/&) { self.write_str(s); self.write_str("\n"/&); } fn write_int(n: int) { - int::to_str_bytes(n, 10u) {|buf| self.write(buf) } + int::to_str_bytes(n, 10u, |buf| self.write(buf)) } fn write_uint(n: uint) { - uint::to_str_bytes(false, n, 10u) {|buf| self.write(buf) } + uint::to_str_bytes(false, n, 10u, |buf| self.write(buf)) } fn write_le_uint(n: uint, size: uint) { - u64_to_le_bytes(n as u64, size) {|v| self.write(v); } + u64_to_le_bytes(n as u64, size, |v| self.write(v)) } fn write_le_int(n: int, size: uint) { - u64_to_le_bytes(n as u64, size) {|v| self.write(v); } + u64_to_le_bytes(n as u64, size, |v| self.write(v)) } fn write_be_uint(n: uint, size: uint) { - u64_to_be_bytes(n as u64, size) {|v| self.write(v); } + u64_to_be_bytes(n as u64, size, |v| self.write(v)) } fn write_be_int(n: int, size: uint) { - u64_to_be_bytes(n as u64, size) {|v| self.write(v); } + u64_to_be_bytes(n as u64, size, |v| self.write(v)) } fn write_be_u64(n: u64) { - u64_to_be_bytes(n, 8u) {|v| self.write(v); } + u64_to_be_bytes(n, 8u, |v| self.write(v)) } fn write_be_u32(n: u32) { - u64_to_be_bytes(n as u64, 4u) {|v| self.write(v); } + u64_to_be_bytes(n as u64, 4u, |v| self.write(v)) } fn write_be_u16(n: u16) { - u64_to_be_bytes(n as u64, 2u) {|v| self.write(v); } + u64_to_be_bytes(n as u64, 2u, |v| self.write(v)) } fn write_be_i64(n: i64) { - u64_to_be_bytes(n as u64, 8u) {|v| self.write(v); } + u64_to_be_bytes(n as u64, 8u, |v| self.write(v)) } fn write_be_i32(n: i32) { - u64_to_be_bytes(n as u64, 4u) {|v| self.write(v); } + u64_to_be_bytes(n as u64, 4u, |v| self.write(v)) } fn write_be_i16(n: i16) { - u64_to_be_bytes(n as u64, 2u) {|v| self.write(v); } + u64_to_be_bytes(n as u64, 2u, |v| self.write(v)) } fn write_le_u64(n: u64) { - u64_to_le_bytes(n, 8u) {|v| self.write(v); } + u64_to_le_bytes(n, 8u, |v| self.write(v)) } fn write_le_u32(n: u32) { - u64_to_le_bytes(n as u64, 4u) {|v| self.write(v); } + u64_to_le_bytes(n as u64, 4u, |v| self.write(v)) } fn write_le_u16(n: u16) { - u64_to_le_bytes(n as u64, 2u) {|v| self.write(v); } + u64_to_le_bytes(n as u64, 2u, |v| self.write(v)) } fn write_le_i64(n: i64) { - u64_to_le_bytes(n as u64, 8u) {|v| self.write(v); } + u64_to_le_bytes(n as u64, 8u, |v| self.write(v)) } fn write_le_i32(n: i32) { - u64_to_le_bytes(n as u64, 4u) {|v| self.write(v); } + u64_to_le_bytes(n as u64, 4u, |v| self.write(v)) } fn write_le_i16(n: i16) { - u64_to_le_bytes(n as u64, 2u) {|v| self.write(v); } + u64_to_le_bytes(n as u64, 2u, |v| self.write(v)) } - fn write_u8(n: u8) { self.write([n]/&) } + fn write_u8(n: u8) { self.write(&[n]) } } -fn file_writer(path: str, flags: [fileflag]/~) -> result<writer, str> { - result::chain(mk_file_writer(path, flags), { |w| result::ok(w)}) +fn file_writer(path: str, flags: ~[fileflag]) -> result<writer, str> { + result::chain(mk_file_writer(path, flags), |w| result::ok(w)) } // FIXME: fileflags // #2004 fn buffered_file_writer(path: str) -> result<writer, str> { - let f = os::as_c_charp(path) {|pathbuf| - os::as_c_charp("w") {|modebuf| + let f = do os::as_c_charp(path) |pathbuf| { + do os::as_c_charp("w") |modebuf| { libc::fopen(pathbuf, modebuf) } }; @@ -605,7 +605,7 @@ fn println(s: str) { stdout().write_line(s); } type mem_buffer = @{buf: dvec<u8>, mut pos: uint}; impl of writer for mem_buffer { - fn write(v: [const u8]/&) { + fn write(v: &[const u8]) { // Fast path. let vlen = vec::len(v); let buf_len = self.buf.len(); @@ -638,7 +638,7 @@ fn mem_buffer() -> mem_buffer { @{buf: dvec(), mut pos: 0u} } fn mem_buffer_writer(b: mem_buffer) -> writer { b as writer } -fn mem_buffer_buf(b: mem_buffer) -> [u8]/~ { b.buf.get() } +fn mem_buffer_buf(b: mem_buffer) -> ~[u8] { b.buf.get() } fn mem_buffer_str(b: mem_buffer) -> str { str::from_bytes(b.buf.get()) } @@ -650,7 +650,7 @@ fn with_str_writer(f: fn(writer)) -> str { io::mem_buffer_str(buf) } -fn with_buf_writer(f: fn(writer)) -> [u8]/~ { +fn with_buf_writer(f: fn(writer)) -> ~[u8] { let buf = mem_buffer(); let wr = mem_buffer_writer(buf); f(wr); @@ -672,15 +672,15 @@ fn seek_in_buf(offset: int, pos: uint, len: uint, whence: seek_style) -> } fn read_whole_file_str(file: str) -> result<str, str> { - result::chain(read_whole_file(file), { |bytes| + result::chain(read_whole_file(file), |bytes| { result::ok(str::from_bytes(bytes)) }) } // FIXME (#2004): implement this in a low-level way. Going through the // abstractions is pointless. -fn read_whole_file(file: str) -> result<[u8]/~, str> { - result::chain(file_reader(file), { |rdr| +fn read_whole_file(file: str) -> result<~[u8], str> { + result::chain(file_reader(file), |rdr| { result::ok(rdr.read_whole_stream()) }) } @@ -772,7 +772,7 @@ mod tests { { let out: io::writer = result::get( - io::file_writer(tmpfile, [io::create, io::truncate]/~)); + io::file_writer(tmpfile, ~[io::create, io::truncate])); out.write_str(frood); } let inp: io::reader = result::get(io::file_reader(tmpfile)); @@ -784,27 +784,27 @@ mod tests { #[test] fn test_readchars_empty() { let inp : io::reader = io::str_reader(""); - let res : [char]/~ = inp.read_chars(128u); + let res : ~[char] = inp.read_chars(128u); assert(vec::len(res) == 0u); } #[test] fn test_readchars_wide() { let wide_test = "生锈的汤匙切肉汤hello生锈的汤匙切肉汤"; - let ivals : [int]/~ = [ + let ivals : ~[int] = ~[ 29983, 38152, 30340, 27748, 21273, 20999, 32905, 27748, 104, 101, 108, 108, 111, 29983, 38152, 30340, 27748, - 21273, 20999, 32905, 27748]/~; - fn check_read_ln(len : uint, s: str, ivals: [int]/~) { + 21273, 20999, 32905, 27748]; + fn check_read_ln(len : uint, s: str, ivals: ~[int]) { let inp : io::reader = io::str_reader(s); - let res : [char]/~ = inp.read_chars(len); + let res : ~[char] = inp.read_chars(len); if (len <= vec::len(ivals)) { assert(vec::len(res) == len); } assert(vec::slice(ivals, 0u, vec::len(res)) == - vec::map(res, {|x| x as int})); + vec::map(res, |x| x as int)); } let mut i = 0u; while i < 8u { @@ -841,7 +841,7 @@ mod tests { #[test] fn file_writer_bad_name() { - alt io::file_writer("?/?", []/~) { + alt io::file_writer("?/?", ~[]) { result::err(e) { assert str::starts_with(e, "error opening ?/?"); } @@ -862,16 +862,16 @@ mod tests { #[test] fn mem_buffer_overwrite() { let mbuf = mem_buffer(); - mbuf.write([0u8, 1u8, 2u8, 3u8]/~); - assert mem_buffer_buf(mbuf) == [0u8, 1u8, 2u8, 3u8]/~; + mbuf.write(~[0u8, 1u8, 2u8, 3u8]); + assert mem_buffer_buf(mbuf) == ~[0u8, 1u8, 2u8, 3u8]; mbuf.seek(-2, seek_cur); - mbuf.write([4u8, 5u8, 6u8, 7u8]/~); - assert mem_buffer_buf(mbuf) == [0u8, 1u8, 4u8, 5u8, 6u8, 7u8]/~; + mbuf.write(~[4u8, 5u8, 6u8, 7u8]); + assert mem_buffer_buf(mbuf) == ~[0u8, 1u8, 4u8, 5u8, 6u8, 7u8]; mbuf.seek(-2, seek_end); - mbuf.write([8u8]/~); + mbuf.write(~[8u8]); mbuf.seek(1, seek_set); - mbuf.write([9u8]/~); - assert mem_buffer_buf(mbuf) == [0u8, 9u8, 4u8, 5u8, 8u8, 7u8]/~; + mbuf.write(~[9u8]); + assert mem_buffer_buf(mbuf) == ~[0u8, 9u8, 4u8, 5u8, 8u8, 7u8]; } } diff --git a/src/libcore/iter-trait.rs b/src/libcore/iter-trait.rs index 468bebce4b3..1fae52b4afe 100644 --- a/src/libcore/iter-trait.rs +++ b/src/libcore/iter-trait.rs @@ -22,14 +22,14 @@ impl extensions<A> of iter::base_iter<A> for IMPL_T<A> { } impl extensions<A:copy> for IMPL_T<A> { - fn filter_to_vec(pred: fn(A) -> bool) -> [A]/~ { + fn filter_to_vec(pred: fn(A) -> bool) -> ~[A] { iter::filter_to_vec(self, pred) } - fn map_to_vec<B>(op: fn(A) -> B) -> [B]/~ { iter::map_to_vec(self, op) } - fn to_vec() -> [A]/~ { iter::to_vec(self) } + fn map_to_vec<B>(op: fn(A) -> B) -> ~[B] { iter::map_to_vec(self, op) } + fn to_vec() -> ~[A] { iter::to_vec(self) } // FIXME--bug in resolve prevents this from working (#2611) - // fn flat_map_to_vec<B:copy,IB:base_iter<B>>(op: fn(A) -> IB) -> [B]/~ { + // fn flat_map_to_vec<B:copy,IB:base_iter<B>>(op: fn(A) -> IB) -> ~[B] { // iter::flat_map_to_vec(self, op) // } diff --git a/src/libcore/iter-trait/dlist.rs b/src/libcore/iter-trait/dlist.rs new file mode 100644 index 00000000000..f97dce5854d --- /dev/null +++ b/src/libcore/iter-trait/dlist.rs @@ -0,0 +1,36 @@ +type IMPL_T<A> = dlist::dlist<A>; + +#[doc = " +Iterates through the current contents. + +Attempts to access this dlist during iteration are allowed (to allow for e.g. +breadth-first search with in-place enqueues), but removing the current node +is forbidden. +"] +fn EACH<A>(self: IMPL_T<A>, f: fn(A) -> bool) { + import dlist::extensions; + + let mut link = self.peek_n(); + while option::is_some(link) { + let nobe = option::get(link); + // Check dlist invariant. + if !option::is_some(nobe.root) || + !box::ptr_eq(*option::get(nobe.root), *self) { + fail "Iteration encountered a dlist node not on this dlist." + } + f(nobe.data); + // Check that the user didn't do a remove. + // Note that this makes it ok for the user to remove the node and then + // immediately put it back in a different position. I allow this. + if !option::is_some(nobe.root) || + !box::ptr_eq(*option::get(nobe.root), *self) { + fail "Removing a dlist node during iteration is forbidden!" + } + link = nobe.next_link(); + } +} + +fn SIZE_HINT<A>(self: IMPL_T<A>) -> option<uint> { + import dlist::extensions; + some(self.len()) +} diff --git a/src/libcore/iter-trait/dvec.rs b/src/libcore/iter-trait/dvec.rs index c68134153cd..3f1f4db6a4d 100644 --- a/src/libcore/iter-trait/dvec.rs +++ b/src/libcore/iter-trait/dvec.rs @@ -7,7 +7,7 @@ Attempts to access this dvec during iteration will fail. "] fn EACH<A>(self: IMPL_T<A>, f: fn(A) -> bool) { import dvec::extensions; - self.swap { |v| vec::each(v, f); v } + self.swap(|v| { vec::each(v, f); v }) } fn SIZE_HINT<A>(self: IMPL_T<A>) -> option<uint> { diff --git a/src/libcore/iter.rs b/src/libcore/iter.rs index f08fa20fc53..c5f395d376c 100644 --- a/src/libcore/iter.rs +++ b/src/libcore/iter.rs @@ -5,51 +5,51 @@ iface base_iter<A> { fn eachi<A,IA:base_iter<A>>(self: IA, blk: fn(uint, A) -> bool) { let mut i = 0u; - for self.each {|a| + for self.each |a| { if !blk(i, a) { break; } i += 1u; } } fn all<A,IA:base_iter<A>>(self: IA, blk: fn(A) -> bool) -> bool { - for self.each {|a| + for self.each |a| { if !blk(a) { ret false; } } ret true; } fn any<A,IA:base_iter<A>>(self: IA, blk: fn(A) -> bool) -> bool { - for self.each {|a| + for self.each |a| { if blk(a) { ret true; } } ret false; } fn filter_to_vec<A:copy,IA:base_iter<A>>(self: IA, - prd: fn(A) -> bool) -> [A]/~ { - let mut result = []/~; - self.size_hint().iter {|hint| vec::reserve(result, hint); } - for self.each {|a| + prd: fn(A) -> bool) -> ~[A] { + let mut result = ~[]; + self.size_hint().iter(|hint| vec::reserve(result, hint)); + for self.each |a| { if prd(a) { vec::push(result, a); } } ret result; } -fn map_to_vec<A:copy,B,IA:base_iter<A>>(self: IA, op: fn(A) -> B) -> [B]/~ { - let mut result = []/~; - self.size_hint().iter {|hint| vec::reserve(result, hint); } - for self.each {|a| +fn map_to_vec<A:copy,B,IA:base_iter<A>>(self: IA, op: fn(A) -> B) -> ~[B] { + let mut result = ~[]; + self.size_hint().iter(|hint| vec::reserve(result, hint)); + for self.each |a| { vec::push(result, op(a)); } ret result; } fn flat_map_to_vec<A:copy,B:copy,IA:base_iter<A>,IB:base_iter<B>>( - self: IA, op: fn(A) -> IB) -> [B]/~ { + self: IA, op: fn(A) -> IB) -> ~[B] { - let mut result = []/~; - for self.each {|a| - for op(a).each {|b| + let mut result = ~[]; + for self.each |a| { + for op(a).each |b| { vec::push(result, b); } } @@ -58,25 +58,25 @@ fn flat_map_to_vec<A:copy,B:copy,IA:base_iter<A>,IB:base_iter<B>>( fn foldl<A,B,IA:base_iter<A>>(self: IA, +b0: B, blk: fn(B, A) -> B) -> B { let mut b <- b0; - for self.each {|a| + for self.each |a| { b = blk(b, a); } ret b; } -fn to_vec<A:copy,IA:base_iter<A>>(self: IA) -> [A]/~ { - foldl::<A,[A]/~,IA>(self, []/~, {|r, a| vec::append(r, [a]/~) }) +fn to_vec<A:copy,IA:base_iter<A>>(self: IA) -> ~[A] { + foldl::<A,~[A],IA>(self, ~[], |r, a| vec::append(r, ~[a])) } fn contains<A,IA:base_iter<A>>(self: IA, x: A) -> bool { - for self.each {|a| + for self.each |a| { if a == x { ret true; } } ret false; } fn count<A,IA:base_iter<A>>(self: IA, x: A) -> uint { - foldl(self, 0u) {|count, value| + do foldl(self, 0u) |count, value| { if value == x { count + 1u } else { @@ -88,7 +88,7 @@ fn count<A,IA:base_iter<A>>(self: IA, x: A) -> uint { fn position<A,IA:base_iter<A>>(self: IA, f: fn(A) -> bool) -> option<uint> { let mut i = 0; - for self.each {|a| + for self.each |a| { if f(a) { ret some(i); } i += 1; } @@ -108,7 +108,7 @@ fn repeat(times: uint, blk: fn()) { } fn min<A:copy,IA:base_iter<A>>(self: IA) -> A { - alt foldl::<A,option<A>,IA>(self, none) {|a, b| + alt do foldl::<A,option<A>,IA>(self, none) |a, b| { alt a { some(a_) if a_ < b { // FIXME (#2005): Not sure if this is successfully optimized to @@ -124,7 +124,7 @@ fn min<A:copy,IA:base_iter<A>>(self: IA) -> A { } fn max<A:copy,IA:base_iter<A>>(self: IA) -> A { - alt foldl::<A,option<A>,IA>(self, none) {|a, b| + alt do foldl::<A,option<A>,IA>(self, none) |a, b| { alt a { some(a_) if a_ > b { // FIXME (#2005): Not sure if this is successfully optimized to @@ -149,17 +149,17 @@ fn test_enumerate() { #[test] fn test_map_and_to_vec() { - let a = bind vec::iter([0, 1, 2]/~, _); + let a = bind vec::iter(~[0, 1, 2], _); let b = bind map(a, {|i| 2*i}, _); let c = to_vec(b); - assert c == [0, 2, 4]/~; + assert c == ~[0, 2, 4]; } #[test] fn test_map_directly_on_vec() { - let b = bind map([0, 1, 2]/~, {|i| 2*i}, _); + let b = bind map(~[0, 1, 2], {|i| 2*i}, _); let c = to_vec(b); - assert c == [0, 2, 4]/~; + assert c == ~[0, 2, 4]; } #[test] @@ -169,7 +169,7 @@ fn test_filter_on_int_range() { } let l = to_vec(bind filter(bind int::range(0, 10, _), is_even, _)); - assert l == [0, 2, 4, 6, 8]/~; + assert l == ~[0, 2, 4, 6, 8]; } #[test] @@ -179,7 +179,7 @@ fn test_filter_on_uint_range() { } let l = to_vec(bind filter(bind uint::range(0u, 10u, _), is_even, _)); - assert l == [0u, 2u, 4u, 6u, 8u]/~; + assert l == ~[0u, 2u, 4u, 6u, 8u]; } #[test] @@ -194,7 +194,7 @@ fn test_filter_map() { let l = to_vec(bind filter_map( bind int::range(0, 5, _), negativate_the_evens, _)); - assert l == [0, -2, -4]/~; + assert l == ~[0, -2, -4]; } #[test] @@ -204,70 +204,70 @@ fn test_flat_map_with_option() { else { none } } - let a = bind vec::iter([0, 1, 2]/~, _); + let a = bind vec::iter(~[0, 1, 2], _); let b = bind flat_map(a, if_even, _); let c = to_vec(b); - assert c == [0, 2]/~; + assert c == ~[0, 2]; } #[test] fn test_flat_map_with_list() { - fn repeat(&&i: int) -> [int]/~ { - let mut r = []/~; - int::range(0, i) {|_j| r += [i]/~; } + fn repeat(&&i: int) -> ~[int] { + let mut r = ~[]; + int::range(0, i) {|_j| r += ~[i]; } r } - let a = bind vec::iter([0, 1, 2, 3]/~, _); + let a = bind vec::iter(~[0, 1, 2, 3], _); let b = bind flat_map(a, repeat, _); let c = to_vec(b); #debug["c = %?", c]; - assert c == [1, 2, 2, 3, 3, 3]/~; + assert c == ~[1, 2, 2, 3, 3, 3]; } #[test] fn test_repeat() { - let mut c = []/~, i = 0u; + let mut c = ~[], i = 0u; repeat(5u) {|| - c += [(i * i)]/~; + c += ~[(i * i)]; i += 1u; }; #debug["c = %?", c]; - assert c == [0u, 1u, 4u, 9u, 16u]/~; + assert c == ~[0u, 1u, 4u, 9u, 16u]; } #[test] fn test_min() { - assert min([5, 4, 1, 2, 3]/~) == 1; + assert min(~[5, 4, 1, 2, 3]) == 1; } #[test] #[should_fail] #[ignore(cfg(windows))] fn test_min_empty() { - min::<int, [int]/~>([]/~); + min::<int, ~[int]>(~[]); } #[test] fn test_max() { - assert max([1, 2, 4, 2, 3]/~) == 4; + assert max(~[1, 2, 4, 2, 3]) == 4; } #[test] #[should_fail] #[ignore(cfg(windows))] fn test_max_empty() { - max::<int, [int]/~>([]/~); + max::<int, ~[int]>(~[]); } #[test] fn test_reversed() { - assert to_vec(bind reversed([1, 2, 3]/~, _)) == [3, 2, 1]/~; + assert to_vec(bind reversed(~[1, 2, 3], _)) == ~[3, 2, 1]; } #[test] fn test_count() { - assert count([1, 2, 1, 2, 1]/~, 1) == 3u; + assert count(~[1, 2, 1, 2, 1], 1) == 3u; } #[test] @@ -275,7 +275,7 @@ fn test_foldr() { fn sub(&&a: int, &&b: int) -> int { a - b } - let sum = foldr([1, 2, 3, 4]/~, 0, sub); + let sum = foldr(~[1, 2, 3, 4], 0, sub); assert sum == -2; } */ diff --git a/src/libcore/newcomm.rs b/src/libcore/newcomm.rs index 24c4c1cbd9c..c97420646c9 100644 --- a/src/libcore/newcomm.rs +++ b/src/libcore/newcomm.rs @@ -31,7 +31,7 @@ fn chan<T: send>(p: port<T>) -> chan<T> { fn send<T: send>(c: chan<T>, -x: T) { let mut x <- some(x); - (*c).with {|cond, data| + do (*c).with |cond, data| { let mut xx = none; xx <-> x; (*data).push(option::unwrap(xx)); @@ -40,7 +40,7 @@ fn send<T: send>(c: chan<T>, -x: T) { } fn recv<T: send>(p: port<T>) -> T { - (*p).with {|cond, data| + do (*p).with |cond, data| { if (*data).len() == 0u { cond.wait(); } diff --git a/src/libcore/option.rs b/src/libcore/option.rs index fe07198b422..3d47c51a73f 100644 --- a/src/libcore/option.rs +++ b/src/libcore/option.rs @@ -133,10 +133,10 @@ fn test_unwrap_ptr() { #[test] fn test_unwrap_str() { let x = "test"; - let addr_x = str::as_buf(x) {|buf| ptr::addr_of(buf) }; + let addr_x = str::as_buf(x, |buf| ptr::addr_of(buf)); let opt = some(x); let y = unwrap(opt); - let addr_y = str::as_buf(y) {|buf| ptr::addr_of(buf) }; + let addr_y = str::as_buf(y, |buf| ptr::addr_of(buf)); assert addr_x == addr_y; } diff --git a/src/libcore/os.rs b/src/libcore/os.rs index 2c562f179f2..b945a998084 100644 --- a/src/libcore/os.rs +++ b/src/libcore/os.rs @@ -40,20 +40,20 @@ export walk_dir; export as_c_charp, fill_charp_buf; native mod rustrt { - fn rust_env_pairs() -> [str]/~; + fn rust_env_pairs() -> ~[str]; fn rust_getcwd() -> str; fn rust_path_is_dir(path: *libc::c_char) -> c_int; fn rust_path_exists(path: *libc::c_char) -> c_int; - fn rust_list_files(path: str) -> [str]/~; + fn rust_list_files(path: str) -> ~[str]; fn rust_process_wait(handle: c_int) -> c_int; fn last_os_error() -> str; fn rust_set_exit_status(code: libc::intptr_t); } -fn env() -> [(str,str)]/~ { - let mut pairs = []/~; - for vec::each(rustrt::rust_env_pairs()) {|p| +fn env() -> ~[(str,str)] { + let mut pairs = ~[]; + for vec::each(rustrt::rust_env_pairs()) |p| { let vs = str::splitn_char(p, '=', 1u); assert vec::len(vs) == 2u; vec::push(pairs, (vs[0], vs[1])); @@ -64,13 +64,13 @@ fn env() -> [(str,str)]/~ { const tmpbuf_sz : uint = 1000u; fn as_c_charp<T>(s: str, f: fn(*c_char) -> T) -> T { - str::as_c_str(s) {|b| f(b as *c_char) } + str::as_c_str(s, |b| f(b as *c_char)) } fn fill_charp_buf(f: fn(*mut c_char, size_t) -> bool) -> option<str> { let buf = vec::to_mut(vec::from_elem(tmpbuf_sz, 0u8 as c_char)); - vec::as_mut_buf(buf) { |b| + do vec::as_mut_buf(buf) |b| { if f(b, tmpbuf_sz as size_t) unsafe { some(str::unsafe::from_buf(b as *u8)) } else { @@ -95,7 +95,7 @@ mod win32 { let mut done = false; while !done { let buf = vec::to_mut(vec::from_elem(n as uint, 0u16)); - vec::as_mut_buf(buf) {|b| + do vec::as_mut_buf(buf) |b| { let k : dword = f(b, tmpbuf_sz as dword); if k == (0 as dword) { done = true; @@ -116,7 +116,7 @@ mod win32 { fn as_utf16_p<T>(s: str, f: fn(*u16) -> T) -> T { let mut t = str::to_utf16(s); // Null terminate before passing on. - t += [0u16]/~; + t += ~[0u16]; vec::as_buf(t, f) } } @@ -160,7 +160,7 @@ mod global_env { fn get_global_env_chan() -> comm::chan<msg> { let global_ptr = rustrt::rust_global_env_chan_ptr(); - let builder_fn = {|| + let builder_fn = || { let builder = task::builder(); task::unsupervise(builder); task::set_opts(builder, { @@ -182,7 +182,7 @@ mod global_env { fn global_env_task(msg_po: comm::port<msg>) { unsafe { - priv::weaken_task {|weak_po| + do priv::weaken_task |weak_po| { loop { alt comm::select2(msg_po, weak_po) { either::left(msg_getenv(n, resp_ch)) { @@ -220,8 +220,8 @@ mod global_env { import libc::types::os::arch::extra::*; import libc::funcs::extra::kernel32::*; import win32::*; - as_utf16_p(n) {|u| - fill_utf16_buf_and_decode() {|buf, sz| + do as_utf16_p(n) |u| { + do fill_utf16_buf_and_decode() |buf, sz| { GetEnvironmentVariableW(u, buf, sz) } } @@ -233,8 +233,8 @@ mod global_env { // FIXME: remove this when export globs work properly. #1238 import libc::funcs::posix01::unistd::setenv; - str::as_c_str(n) {|nbuf| - str::as_c_str(v) {|vbuf| + do str::as_c_str(n) |nbuf| { + do str::as_c_str(v) |vbuf| { setenv(nbuf, vbuf, 1i32); } } @@ -246,8 +246,8 @@ mod global_env { // FIXME: remove imports when export globs work properly. #1238 import libc::funcs::extra::kernel32::*; import win32::*; - as_utf16_p(n) {|nbuf| - as_utf16_p(v) {|vbuf| + do as_utf16_p(n) |nbuf| { + do as_utf16_p(v) |vbuf| { SetEnvironmentVariableW(nbuf, vbuf); } } @@ -257,7 +257,7 @@ mod global_env { } fn fdopen(fd: c_int) -> *FILE { - ret as_c_charp("r") {|modebuf| + ret do as_c_charp("r") |modebuf| { libc::fdopen(fd, modebuf) }; } @@ -370,10 +370,10 @@ fn self_exe_path() -> option<path> { unsafe { import libc::funcs::bsd44::*; import libc::consts::os::extra::*; - fill_charp_buf() {|buf, sz| - let mib = [CTL_KERN as c_int, + do fill_charp_buf() |buf, sz| { + let mib = ~[CTL_KERN as c_int, KERN_PROC as c_int, - KERN_PROC_PATHNAME as c_int, -1 as c_int]/~; + KERN_PROC_PATHNAME as c_int, -1 as c_int]; sysctl(vec::unsafe::to_ptr(mib), vec::len(mib) as c_uint, buf as *mut c_void, ptr::mut_addr_of(sz), ptr::null(), 0u as size_t) == (0 as c_int) @@ -384,8 +384,8 @@ fn self_exe_path() -> option<path> { #[cfg(target_os = "linux")] fn load_self() -> option<path> { import libc::funcs::posix01::unistd::readlink; - fill_charp_buf() {|buf, sz| - as_c_charp("/proc/self/exe") { |proc_self_buf| + do fill_charp_buf() |buf, sz| { + do as_c_charp("/proc/self/exe") |proc_self_buf| { readlink(proc_self_buf, buf, sz) != (-1 as ssize_t) } } @@ -395,8 +395,7 @@ fn self_exe_path() -> option<path> { fn load_self() -> option<path> { // FIXME: remove imports when export globs work properly. #1238 import libc::funcs::extra::*; - - fill_charp_buf() {|buf, sz| + do fill_charp_buf() |buf, sz| { _NSGetExecutablePath(buf, ptr::mut_addr_of(sz as u32)) == (0 as c_int) } @@ -408,12 +407,12 @@ fn self_exe_path() -> option<path> { import libc::types::os::arch::extra::*; import libc::funcs::extra::kernel32::*; import win32::*; - fill_utf16_buf_and_decode() {|buf, sz| + do fill_utf16_buf_and_decode() |buf, sz| { GetModuleFileNameW(0u as dword, buf, sz) } } - option::map(load_self()) {|pth| + do option::map(load_self()) |pth| { path::dirname(pth) + path::path_sep() } } @@ -453,7 +452,7 @@ fn homedir() -> option<path> { #[cfg(windows)] fn secondary() -> option<path> { - option::chain(getenv("USERPROFILE")) {|p| + do option::chain(getenv("USERPROFILE")) |p| { if !str::is_empty(p) { some(p) } else { @@ -470,7 +469,7 @@ fn walk_dir(p: path, f: fn(path) -> bool) { fn walk_dir_(p: path, f: fn(path) -> bool) -> bool { let mut keepgoing = true; - list_dir(p).each {|q| + do list_dir(p).each |q| { let path = path::connect(p, q); if !f(path) { keepgoing = false; @@ -494,14 +493,14 @@ fn walk_dir(p: path, f: fn(path) -> bool) { #[doc = "Indicates whether a path represents a directory"] fn path_is_dir(p: path) -> bool { - str::as_c_str(p) {|buf| + do str::as_c_str(p) |buf| { rustrt::rust_path_is_dir(buf) != 0 as c_int } } #[doc = "Indicates whether a path exists"] fn path_exists(p: path) -> bool { - str::as_c_str(p) {|buf| + do str::as_c_str(p) |buf| { rustrt::rust_path_exists(buf) != 0 as c_int } } @@ -538,7 +537,7 @@ fn make_dir(p: path, mode: c_int) -> bool { import libc::funcs::extra::kernel32::*; import win32::*; // FIXME: turn mode into something useful? #2623 - as_utf16_p(p) {|buf| + do as_utf16_p(p) |buf| { CreateDirectoryW(buf, unsafe { unsafe::reinterpret_cast(0) }) != (0 as BOOL) } @@ -546,14 +545,14 @@ fn make_dir(p: path, mode: c_int) -> bool { #[cfg(unix)] fn mkdir(p: path, mode: c_int) -> bool { - as_c_charp(p) {|c| + do as_c_charp(p) |c| { libc::mkdir(c, mode as mode_t) == (0 as c_int) } } } #[doc = "Lists the contents of a directory"] -fn list_dir(p: path) -> [str]/~ { +fn list_dir(p: path) -> ~[str] { #[cfg(unix)] fn star(p: str) -> str { p } @@ -569,7 +568,7 @@ fn list_dir(p: path) -> [str]/~ { } } - rustrt::rust_list_files(star(p)).filter {|filename| + do rustrt::rust_list_files(star(p)).filter |filename| { !str::eq(filename, ".") && !str::eq(filename, "..") } } @@ -579,14 +578,14 @@ Lists the contents of a directory This version prepends each entry with the directory. "] -fn list_dir_path(p: path) -> [str]/~ { +fn list_dir_path(p: path) -> ~[str] { let mut p = p; let pl = str::len(p); if pl == 0u || (p[pl - 1u] as char != path::consts::path_sep && p[pl - 1u] as char != path::consts::alt_path_sep) { p += path::path_sep(); } - os::list_dir(p).map {|f| p + f} + os::list_dir(p).map(|f| p + f) } #[doc = "Removes a directory at the specified path"] @@ -599,14 +598,14 @@ fn remove_dir(p: path) -> bool { import libc::funcs::extra::kernel32::*; import libc::types::os::arch::extra::*; import win32::*; - ret as_utf16_p(p) {|buf| + ret do as_utf16_p(p) |buf| { RemoveDirectoryW(buf) != (0 as BOOL) }; } #[cfg(unix)] fn rmdir(p: path) -> bool { - ret as_c_charp(p) {|buf| + ret do as_c_charp(p) |buf| { libc::rmdir(buf) == (0 as c_int) }; } @@ -621,14 +620,14 @@ fn change_dir(p: path) -> bool { import libc::funcs::extra::kernel32::*; import libc::types::os::arch::extra::*; import win32::*; - ret as_utf16_p(p) {|buf| + ret do as_utf16_p(p) |buf| { SetCurrentDirectoryW(buf) != (0 as BOOL) }; } #[cfg(unix)] fn chdir(p: path) -> bool { - ret as_c_charp(p) {|buf| + ret do as_c_charp(p) |buf| { libc::chdir(buf) == (0 as c_int) }; } @@ -644,8 +643,8 @@ fn copy_file(from: path, to: path) -> bool { import libc::funcs::extra::kernel32::*; import libc::types::os::arch::extra::*; import win32::*; - ret as_utf16_p(from) {|fromp| - as_utf16_p(to) {|top| + ret do as_utf16_p(from) |fromp| { + do as_utf16_p(to) |top| { CopyFileW(fromp, top, (0 as BOOL)) != (0 as BOOL) } } @@ -653,16 +652,16 @@ fn copy_file(from: path, to: path) -> bool { #[cfg(unix)] fn do_copy_file(from: path, to: path) -> bool { - let istream = as_c_charp(from) {|fromp| - as_c_charp("rb") {|modebuf| + let istream = do as_c_charp(from) |fromp| { + do as_c_charp("rb") |modebuf| { libc::fopen(fromp, modebuf) } }; if istream as uint == 0u { ret false; } - let ostream = as_c_charp(to) {|top| - as_c_charp("w+b") {|modebuf| + let ostream = do as_c_charp(to) |top| { + do as_c_charp("w+b") |modebuf| { libc::fopen(top, modebuf) } }; @@ -670,13 +669,13 @@ fn copy_file(from: path, to: path) -> bool { fclose(istream); ret false; } - let mut buf : [mut u8]/~ = [mut]/~; + let mut buf : ~[mut u8] = ~[mut]; let bufsize = 8192u; vec::reserve(buf, bufsize); let mut done = false; let mut ok = true; while !done { - vec::as_mut_buf(buf) {|b| + do vec::as_mut_buf(buf) |b| { let nread = libc::fread(b as *mut c_void, 1u as size_t, bufsize as size_t, istream); @@ -708,14 +707,14 @@ fn remove_file(p: path) -> bool { import libc::funcs::extra::kernel32::*; import libc::types::os::arch::extra::*; import win32::*; - ret as_utf16_p(p) {|buf| + ret do as_utf16_p(p) |buf| { DeleteFileW(buf) != (0 as BOOL) }; } #[cfg(unix)] fn unlink(p: path) -> bool { - ret as_c_charp(p) {|buf| + ret do as_c_charp(p) |buf| { libc::unlink(buf) == (0 as c_int) }; } @@ -851,7 +850,7 @@ mod tests { fn test_env_getenv() { let e = env(); assert vec::len(e) > 0u; - for vec::each(e) {|p| + for vec::each(e) |p| { let (n, v) = p; log(debug, n); let v2 = getenv(n); @@ -895,7 +894,7 @@ mod tests { setenv("HOME", ""); assert os::homedir() == none; - option::iter(oldhome, {|s| setenv("HOME", s)}); + option::iter(oldhome, |s| setenv("HOME", s)); } #[test] @@ -925,9 +924,9 @@ mod tests { setenv("USERPROFILE", "/home/PaloAlto"); assert os::homedir() == some("/home/MountainView"); - option::iter(oldhome, {|s| setenv("HOME", s)}); + option::iter(oldhome, |s| setenv("HOME", s)); option::iter(olduserprofile, - {|s| setenv("USERPROFILE", s)}); + |s| setenv("USERPROFILE", s)); } // Issue #712 @@ -940,7 +939,7 @@ mod tests { // Just assuming that we've got some contents in the current directory assert (vec::len(dirs) > 0u); - for vec::each(dirs) {|dir| log(debug, dir); } + for vec::each(dirs) |dir| { log(debug, dir); } } #[test] @@ -971,15 +970,15 @@ mod tests { let out = tempdir + path::path_sep() + "out.txt"; /* Write the temp input file */ - let ostream = as_c_charp(in) {|fromp| - as_c_charp("w+b") {|modebuf| + let ostream = do as_c_charp(in) |fromp| { + do as_c_charp("w+b") |modebuf| { libc::fopen(fromp, modebuf) } }; assert (ostream as uint != 0u); let s = "hello"; - let mut buf = vec::to_mut(str::bytes(s) + [0 as u8]/~); - vec::as_mut_buf(buf) {|b| + let mut buf = vec::to_mut(str::bytes(s) + ~[0 as u8]); + do vec::as_mut_buf(buf) |b| { assert (libc::fwrite(b as *c_void, 1u as size_t, (str::len(s) + 1u) as size_t, ostream) == buf.len() as size_t)}; @@ -989,7 +988,7 @@ mod tests { fail (#fmt("%s doesn't exist", in)); } assert(rs); - let rslt = run::run_program("diff", [in, out]/~); + let rslt = run::run_program("diff", ~[in, out]); assert (rslt == 0); assert (remove_file(in)); assert (remove_file(out)); diff --git a/src/libcore/path.rs b/src/libcore/path.rs index e75f104bea4..1b514b00759 100644 --- a/src/libcore/path.rs +++ b/src/libcore/path.rs @@ -61,9 +61,9 @@ fn path_is_absolute(p: str) -> bool { fn path_sep() -> str { ret str::from_char(consts::path_sep); } fn split_dirname_basename (pp: path) -> {dirname: str, basename: str} { - alt str::rfind(pp, {|ch| + alt str::rfind(pp, |ch| ch == consts::path_sep || ch == consts::alt_path_sep - }) { + ) { some(i) { {dirname: str::slice(pp, 0u, i), basename: str::slice(pp, i + 1u, str::len(pp))} @@ -127,7 +127,7 @@ Connects a vector of path segments into a single path. Inserts path separators as needed. "] -fn connect_many(paths: [path]/~) -> path { +fn connect_many(paths: ~[path]) -> path { ret if vec::len(paths) == 1u { paths[0] } else { @@ -144,8 +144,8 @@ each piece of the path. On Windows, if the path is absolute then the first element of the returned vector will be the drive letter followed by a colon. "] -fn split(p: path) -> [path]/~ { - str::split_nonempty(p, {|c| +fn split(p: path) -> ~[path] { + str::split_nonempty(p, |c| { c == consts::path_sep || c == consts::alt_path_sep }) } @@ -234,22 +234,21 @@ fn normalize(p: path) -> path { ret s; - fn strip_dots(s: [path]/~) -> [path]/~ { - vec::filter_map(s, { |elem| + fn strip_dots(s: ~[path]) -> ~[path] { + vec::filter_map(s, |elem| if elem == "." { option::none } else { option::some(elem) - } - }) + }) } - fn rollup_doubledots(s: [path]/~) -> [path]/~ { + fn rollup_doubledots(s: ~[path]) -> ~[path] { if vec::is_empty(s) { - ret []/~; + ret ~[]; } - let mut t = []/~; + let mut t = ~[]; let mut i = vec::len(s); let mut skip = 0; while i != 0u { @@ -322,28 +321,28 @@ mod tests { #[test] fn split1() { let actual = split("a" + ps() + "b"); - let expected = ["a", "b"]/~; + let expected = ~["a", "b"]; assert actual == expected; } #[test] fn split2() { let actual = split("a" + aps() + "b"); - let expected = ["a", "b"]/~; + let expected = ~["a", "b"]; assert actual == expected; } #[test] fn split3() { let actual = split(ps() + "a" + ps() + "b"); - let expected = ["a", "b"]/~; + let expected = ~["a", "b"]; assert actual == expected; } #[test] fn split4() { let actual = split("a" + ps() + "b" + aps() + "c"); - let expected = ["a", "b", "c"]/~; + let expected = ~["a", "b", "c"]; assert actual == expected; } diff --git a/src/libcore/priv.rs b/src/libcore/priv.rs index 5b59496a488..8d8ce9a9d72 100644 --- a/src/libcore/priv.rs +++ b/src/libcore/priv.rs @@ -41,7 +41,7 @@ unsafe fn chan_from_global_ptr<T: send>( let setup_po = comm::port(); let setup_ch = comm::chan(setup_po); - let setup_ch = task::run_listener(builder()) {|setup_po| + let setup_ch = do task::run_listener(builder()) |setup_po| { let po = comm::port::<T>(); let ch = comm::chan(po); comm::send(setup_ch, ch); @@ -92,7 +92,7 @@ fn test_from_global_chan1() { // Create the global channel, attached to a new task let ch = unsafe { - chan_from_global_ptr(globchanp, task::builder) {|po| + do chan_from_global_ptr(globchanp, task::builder) |po| { let ch = comm::recv(po); comm::send(ch, true); let ch = comm::recv(po); @@ -106,7 +106,7 @@ fn test_from_global_chan1() { // This one just reuses the previous channel let ch = unsafe { - chan_from_global_ptr(globchanp, task::builder) {|po| + do chan_from_global_ptr(globchanp, task::builder) |po| { let ch = comm::recv(po); comm::send(ch, false); } @@ -121,7 +121,7 @@ fn test_from_global_chan1() { #[test] fn test_from_global_chan2() { - iter::repeat(100u) {|| + do iter::repeat(100u) || { // The global channel let globchan = 0u; let globchanp = ptr::addr_of(globchan); @@ -131,13 +131,13 @@ fn test_from_global_chan2() { // Spawn a bunch of tasks that all want to compete to // create the global channel - for uint::range(0u, 10u) {|i| - task::spawn() {|| + for uint::range(0u, 10u) |i| { + do task::spawn || { let ch = unsafe { - chan_from_global_ptr( - globchanp, task::builder) {|po| + do chan_from_global_ptr( + globchanp, task::builder) |po| { - for uint::range(0u, 10u) {|_j| + for uint::range(0u, 10u) |_j| { let ch = comm::recv(po); comm::send(ch, {i}); } @@ -153,7 +153,7 @@ fn test_from_global_chan2() { } // There should be only one winner let mut winners = 0u; - for uint::range(0u, 10u) {|_i| + for uint::range(0u, 10u) |_i| { let res = comm::recv(resultpo); if res { winners += 1u }; } @@ -200,9 +200,9 @@ unsafe fn weaken_task(f: fn(comm::port<()>)) { #[test] fn test_weaken_task_then_unweaken() { - task::try {|| + do task::try || { unsafe { - weaken_task {|_po| + do weaken_task |_po| { } } }; @@ -212,9 +212,9 @@ fn test_weaken_task_then_unweaken() { fn test_weaken_task_wait() { let builder = task::builder(); task::unsupervise(builder); - task::run(builder) {|| + do task::run(builder) || { unsafe { - weaken_task {|po| + do weaken_task |po| { comm::recv(po); } } @@ -224,18 +224,18 @@ fn test_weaken_task_wait() { #[test] fn test_weaken_task_stress() { // Create a bunch of weak tasks - iter::repeat(100u) {|| - task::spawn {|| + do iter::repeat(100u) || { + do task::spawn || { unsafe { - weaken_task {|_po| + do weaken_task |_po| { } } } let builder = task::builder(); task::unsupervise(builder); - task::run(builder) {|| + do task::run(builder) || { unsafe { - weaken_task {|po| + do weaken_task |po| { // Wait for it to tell us to die comm::recv(po); } @@ -247,9 +247,9 @@ fn test_weaken_task_stress() { #[test] #[ignore(cfg(windows))] fn test_weaken_task_fail() { - let res = task::try {|| + let res = do task::try || { unsafe { - weaken_task {|_po| + do weaken_task |_po| { fail; } } diff --git a/src/libcore/ptr.rs b/src/libcore/ptr.rs index bda418d428c..b3eaa3bd16d 100644 --- a/src/libcore/ptr.rs +++ b/src/libcore/ptr.rs @@ -70,7 +70,7 @@ fn mut_offset<T>(ptr: *mut T, count: uint) -> *mut T { #[doc = "Return the offset of the first null pointer in `buf`."] #[inline(always)] unsafe fn buf_len<T>(buf: **T) -> uint { - position(buf) {|i| i == null() } + position(buf, |i| i == null()) } #[doc = "Return the first offset `i` such that `f(buf[i]) == true`."] @@ -149,8 +149,8 @@ fn test() { assert (p.fst == 50); assert (p.snd == 60); - let v0 = [32000u16, 32001u16, 32002u16]/~; - let v1 = [0u16, 0u16, 0u16]/~; + let v0 = ~[32000u16, 32001u16, 32002u16]; + let v1 = ~[0u16, 0u16, 0u16]; ptr::memcpy(ptr::offset(vec::unsafe::to_ptr(v1), 1u), ptr::offset(vec::unsafe::to_ptr(v0), 1u), 1u); @@ -171,9 +171,9 @@ fn test_position() { let s = "hello"; unsafe { - assert 2u == as_c_str(s) {|p| position(p) {|c| c == 'l' as c_char} }; - assert 4u == as_c_str(s) {|p| position(p) {|c| c == 'o' as c_char} }; - assert 5u == as_c_str(s) {|p| position(p) {|c| c == 0 as c_char } }; + assert 2u == as_c_str(s, |p| position(p, |c| c == 'l' as c_char)); + assert 4u == as_c_str(s, |p| position(p, |c| c == 'o' as c_char)); + assert 5u == as_c_str(s, |p| position(p, |c| c == 0 as c_char)); } } @@ -182,11 +182,11 @@ fn test_buf_len() { let s0 = "hello"; let s1 = "there"; let s2 = "thing"; - str::as_c_str(s0) {|p0| - str::as_c_str(s1) {|p1| - str::as_c_str(s2) {|p2| - let v = [p0, p1, p2, null()]/~; - vec::as_buf(v) {|vp| + do str::as_c_str(s0) |p0| { + do str::as_c_str(s1) |p1| { + do str::as_c_str(s2) |p2| { + let v = ~[p0, p1, p2, null()]; + do vec::as_buf(v) |vp| { assert unsafe { buf_len(vp) } == 3u; } } diff --git a/src/libcore/rand.rs b/src/libcore/rand.rs index 059529a79f7..4724c00941c 100644 --- a/src/libcore/rand.rs +++ b/src/libcore/rand.rs @@ -7,9 +7,9 @@ enum rctx {} #[abi = "cdecl"] native mod rustrt { - fn rand_seed() -> [u8]/~; + fn rand_seed() -> ~[u8]; fn rand_new() -> *rctx; - fn rand_new_seeded(seed: [u8]/~) -> *rctx; + fn rand_new_seeded(seed: ~[u8]) -> *rctx; fn rand_next(c: *rctx) -> u32; fn rand_free(c: *rctx); } @@ -151,19 +151,19 @@ impl extensions for rng { } #[doc = "Return a random byte string of the specified length"] - fn gen_bytes(len: uint) -> [u8]/~ { - vec::from_fn(len) {|_i| + fn gen_bytes(len: uint) -> ~[u8] { + do vec::from_fn(len) |_i| { self.gen_u8() } } #[doc = "Choose an item randomly, failing if values is empty"] - fn choose<T:copy>(values: [T]/~) -> T { + fn choose<T:copy>(values: ~[T]) -> T { self.choose_option(values).get() } #[doc = "Choose some(item) randomly, returning none if values is empty"] - fn choose_option<T:copy>(values: [T]/~) -> option<T> { + fn choose_option<T:copy>(values: ~[T]) -> option<T> { if values.is_empty() { none } else { @@ -173,15 +173,15 @@ impl extensions for rng { #[doc = "Choose an item respecting the relative weights, failing if \ the sum of the weights is 0"] - fn choose_weighted<T: copy>(v : [weighted<T>]/~) -> T { + fn choose_weighted<T: copy>(v : ~[weighted<T>]) -> T { self.choose_weighted_option(v).get() } #[doc = "Choose some(item) respecting the relative weights, returning \ none if the sum of the weights is 0"] - fn choose_weighted_option<T:copy>(v: [weighted<T>]/~) -> option<T> { + fn choose_weighted_option<T:copy>(v: ~[weighted<T>]) -> option<T> { let mut total = 0u; - for v.each {|item| + for v.each |item| { total += item.weight; } if total == 0u { @@ -189,7 +189,7 @@ impl extensions for rng { } let chosen = self.gen_uint_range(0u, total); let mut so_far = 0u; - for v.each {|item| + for v.each |item| { so_far += item.weight; if so_far > chosen { ret some(item.item); @@ -200,10 +200,10 @@ impl extensions for rng { #[doc = "Return a vec containing copies of the items, in order, where \ the weight of the item determines how many copies there are"] - fn weighted_vec<T:copy>(v: [weighted<T>]/~) -> [T]/~ { - let mut r = []/~; - for v.each {|item| - for uint::range(0u, item.weight) {|_i| + fn weighted_vec<T:copy>(v: ~[weighted<T>]) -> ~[T] { + let mut r = ~[]; + for v.each |item| { + for uint::range(0u, item.weight) |_i| { vec::push(r, item.item); } } @@ -211,14 +211,14 @@ impl extensions for rng { } #[doc = "Shuffle a vec"] - fn shuffle<T:copy>(values: [T]/~) -> [T]/~ { + fn shuffle<T:copy>(values: ~[T]) -> ~[T] { let mut m = vec::to_mut(values); self.shuffle_mut(m); ret vec::from_mut(m); } #[doc = "Shuffle a mutable vec in place"] - fn shuffle_mut<T>(&&values: [mut T]/~) { + fn shuffle_mut<T>(&&values: ~[mut T]) { let mut i = values.len(); while i >= 2u { // invariant: elements with index >= i have been locked in place. @@ -241,7 +241,7 @@ impl of rng for @rand_res { } #[doc = "Create a new random seed for seeded_rng"] -fn seed() -> [u8]/~ { +fn seed() -> ~[u8] { rustrt::rand_seed() } @@ -254,7 +254,7 @@ fn rng() -> rng { generator constructed with a given seed will generate the same \ sequence of values as all other generators constructed with the \ same seed. The seed may be any length."] -fn seeded_rng(seed: [u8]/~) -> rng { +fn seeded_rng(seed: ~[u8]) -> rng { @rand_res(rustrt::rand_new_seeded(seed)) as rng } @@ -301,7 +301,7 @@ mod tests { #[test] fn rng_seeded_custom_seed() { // much shorter than generated seeds which are 1024 bytes - let seed = [2u8, 32u8, 4u8, 32u8, 51u8]/~; + let seed = ~[2u8, 32u8, 4u8, 32u8, 51u8]; let ra = rand::seeded_rng(seed); let rb = rand::seeded_rng(seed); assert ra.gen_str(100u) == rb.gen_str(100u); @@ -309,7 +309,7 @@ mod tests { #[test] fn rng_seeded_custom_seed2() { - let seed = [2u8, 32u8, 4u8, 32u8, 51u8]/~; + let seed = ~[2u8, 32u8, 4u8, 32u8, 51u8]; let ra = rand::seeded_rng(seed); // Regression test that isaac is actually using the above vector let r = ra.next(); @@ -387,56 +387,56 @@ mod tests { #[test] fn choose() { let r = rand::rng(); - assert r.choose([1, 1, 1]/~) == 1; + assert r.choose(~[1, 1, 1]) == 1; } #[test] fn choose_option() { let r = rand::rng(); - assert r.choose_option([]/~) == none::<int>; - assert r.choose_option([1, 1, 1]/~) == some(1); + assert r.choose_option(~[]) == none::<int>; + assert r.choose_option(~[1, 1, 1]) == some(1); } #[test] fn choose_weighted() { let r = rand::rng(); - assert r.choose_weighted([{weight: 1u, item: 42}]/~) == 42; - assert r.choose_weighted([ + assert r.choose_weighted(~[{weight: 1u, item: 42}]) == 42; + assert r.choose_weighted(~[ {weight: 0u, item: 42}, {weight: 1u, item: 43} - ]/~) == 43; + ]) == 43; } #[test] fn choose_weighted_option() { let r = rand::rng(); - assert r.choose_weighted_option([{weight: 1u, item: 42}]/~) == + assert r.choose_weighted_option(~[{weight: 1u, item: 42}]) == some(42); - assert r.choose_weighted_option([ + assert r.choose_weighted_option(~[ {weight: 0u, item: 42}, {weight: 1u, item: 43} - ]/~) == some(43); - assert r.choose_weighted_option([]/~) == none::<int>; + ]) == some(43); + assert r.choose_weighted_option(~[]) == none::<int>; } #[test] fn weighted_vec() { let r = rand::rng(); - let empty: [int]/~ = []/~; - assert r.weighted_vec([]/~) == empty; - assert r.weighted_vec([ + let empty: ~[int] = ~[]; + assert r.weighted_vec(~[]) == empty; + assert r.weighted_vec(~[ {weight: 0u, item: 3u}, {weight: 1u, item: 2u}, {weight: 2u, item: 1u} - ]/~) == [2u, 1u, 1u]/~; + ]) == ~[2u, 1u, 1u]; } #[test] fn shuffle() { let r = rand::rng(); - let empty: [int]/~ = []/~; - assert r.shuffle([]/~) == empty; - assert r.shuffle([1, 1, 1]/~) == [1, 1, 1]/~; + let empty: ~[int] = ~[]; + assert r.shuffle(~[]) == empty; + assert r.shuffle(~[1, 1, 1]) == ~[1, 1, 1]; } } diff --git a/src/libcore/result.rs b/src/libcore/result.rs index e9d3847ab93..64d5ff9c73c 100644 --- a/src/libcore/result.rs +++ b/src/libcore/result.rs @@ -250,11 +250,11 @@ checking for overflow: } "] fn map_vec<T,U:copy,V:copy>( - ts: [T]/~, op: fn(T) -> result<V,U>) -> result<[V]/~,U> { + ts: ~[T], op: fn(T) -> result<V,U>) -> result<~[V],U> { - let mut vs: [V]/~ = []/~; + let mut vs: ~[V] = ~[]; vec::reserve(vs, vec::len(ts)); - for vec::each(ts) {|t| + for vec::each(ts) |t| { alt op(t) { ok(v) { vec::push(vs, v); } err(u) { ret err(u); } @@ -284,12 +284,12 @@ length. While we do not often use preconditions in the standard library, a precondition is used here because result::t is generally used in 'careful' code contexts where it is both appropriate and easy to accommodate an error like the vectors being of different lengths."] -fn map_vec2<S,T,U:copy,V:copy>(ss: [S]/~, ts: [T]/~, +fn map_vec2<S,T,U:copy,V:copy>(ss: ~[S], ts: ~[T], op: fn(S,T) -> result<V,U>) - : vec::same_length(ss, ts) -> result<[V]/~,U> { + : vec::same_length(ss, ts) -> result<~[V],U> { let n = vec::len(ts); - let mut vs = []/~; + let mut vs = ~[]; vec::reserve(vs, n); let mut i = 0u; while i < n { @@ -307,7 +307,7 @@ Applies op to the pairwise elements from `ss` and `ts`, aborting on error. This could be implemented using `map2()` but it is more efficient on its own as no result vector is built. "] -fn iter_vec2<S,T,U:copy>(ss: [S]/~, ts: [T]/~, +fn iter_vec2<S,T,U:copy>(ss: ~[S], ts: ~[T], op: fn(S,T) -> result<(),U>) : vec::same_length(ss, ts) -> result<(),U> { @@ -362,33 +362,33 @@ mod tests { #[test] fn test_impl_iter() { let mut valid = false; - ok::<str, str>("a").iter { |_x| valid = true; }; + ok::<str, str>("a").iter(|_x| valid = true); assert valid; - err::<str, str>("b").iter { |_x| valid = false; }; + err::<str, str>("b").iter(|_x| valid = false); assert valid; } #[test] fn test_impl_iter_err() { let mut valid = true; - ok::<str, str>("a").iter_err { |_x| valid = false; }; + ok::<str, str>("a").iter_err(|_x| valid = false); assert valid; valid = false; - err::<str, str>("b").iter_err { |_x| valid = true; }; + err::<str, str>("b").iter_err(|_x| valid = true); assert valid; } #[test] fn test_impl_map() { - assert ok::<str, str>("a").map { |_x| "b" } == ok("b"); - assert err::<str, str>("a").map { |_x| "b" } == err("a"); + assert ok::<str, str>("a").map(|_x| "b") == ok("b"); + assert err::<str, str>("a").map(|_x| "b") == err("a"); } #[test] fn test_impl_map_err() { - assert ok::<str, str>("a").map_err { |_x| "b" } == ok("a"); - assert err::<str, str>("a").map_err { |_x| "b" } == err("b"); + assert ok::<str, str>("a").map_err(|_x| "b") == ok("a"); + assert err::<str, str>("a").map_err(|_x| "b") == err("b"); } } diff --git a/src/libcore/run.rs b/src/libcore/run.rs index f51e5993bbc..7bb3b56fa1b 100644 --- a/src/libcore/run.rs +++ b/src/libcore/run.rs @@ -62,14 +62,14 @@ Run a program, providing stdin, stdout and stderr handles The process id of the spawned process "] -fn spawn_process(prog: str, args: [str]/~, - env: option<[(str,str)]/~>, +fn spawn_process(prog: str, args: ~[str], + env: option<~[(str,str)]>, dir: option<str>, in_fd: c_int, out_fd: c_int, err_fd: c_int) -> pid_t { - with_argv(prog, args) {|argv| - with_envp(env) { |envp| - with_dirp(dir) { |dirp| + do with_argv(prog, args) |argv| { + do with_envp(env) |envp| { + do with_dirp(dir) |dirp| { rustrt::rust_run_program(argv, envp, dirp, in_fd, out_fd, err_fd) } @@ -77,39 +77,39 @@ fn spawn_process(prog: str, args: [str]/~, } } -fn with_argv<T>(prog: str, args: [str]/~, +fn with_argv<T>(prog: str, args: ~[str], cb: fn(**libc::c_char) -> T) -> T { - let mut argptrs = str::as_c_str(prog) {|b| [b]/~ }; - let mut tmps = []/~; - for vec::each(args) {|arg| + let mut argptrs = str::as_c_str(prog, |b| ~[b]); + let mut tmps = ~[]; + for vec::each(args) |arg| { let t = @arg; vec::push(tmps, t); - vec::push_all(argptrs, str::as_c_str(*t) {|b| [b]/~ }); + vec::push_all(argptrs, str::as_c_str(*t, |b| ~[b])); } vec::push(argptrs, ptr::null()); vec::as_buf(argptrs, cb) } #[cfg(unix)] -fn with_envp<T>(env: option<[(str,str)]/~>, +fn with_envp<T>(env: option<~[(str,str)]>, cb: fn(*c_void) -> T) -> T { // On posixy systems we can pass a char** for envp, which is // a null-terminated array of "k=v\n" strings. alt env { some(es) if !vec::is_empty(es) { - let mut tmps = []/~; - let mut ptrs = []/~; + let mut tmps = ~[]; + let mut ptrs = ~[]; - for vec::each(es) {|e| + for vec::each(es) |e| { let (k,v) = e; let t = @(#fmt("%s=%s", k, v)); vec::push(tmps, t); - vec::push_all(ptrs, str::as_c_str(*t) {|b| [b]/~}); + vec::push_all(ptrs, str::as_c_str(*t, |b| ~[b])); } vec::push(ptrs, ptr::null()); - vec::as_buf(ptrs) { |p| + vec::as_buf(ptrs, |p| unsafe { cb(::unsafe::reinterpret_cast(p)) } - } + ) } _ { cb(ptr::null()) @@ -118,7 +118,7 @@ fn with_envp<T>(env: option<[(str,str)]/~>, } #[cfg(windows)] -fn with_envp<T>(env: option<[(str,str)]/~>, +fn with_envp<T>(env: option<~[(str,str)]>, cb: fn(*c_void) -> T) -> T { // On win32 we pass an "environment block" which is not a char**, but // rather a concatenation of null-terminated k=v\0 sequences, with a final @@ -126,16 +126,16 @@ fn with_envp<T>(env: option<[(str,str)]/~>, unsafe { alt env { some(es) if !vec::is_empty(es) { - let mut blk : [u8]/~ = []/~; - for vec::each(es) {|e| + let mut blk : ~[u8] = ~[]; + for vec::each(es) |e| { let (k,v) = e; let t = #fmt("%s=%s", k, v); - let mut v : [u8]/~ = ::unsafe::reinterpret_cast(t); + let mut v : ~[u8] = ::unsafe::reinterpret_cast(t); blk += v; ::unsafe::forget(v); } - blk += [0_u8]/~; - vec::as_buf(blk) {|p| cb(::unsafe::reinterpret_cast(p)) } + blk += ~[0_u8]; + vec::as_buf(blk, |p| cb(::unsafe::reinterpret_cast(p))) } _ { cb(ptr::null()) @@ -164,7 +164,7 @@ Spawns a process and waits for it to terminate The process id "] -fn run_program(prog: str, args: [str]/~) -> int { +fn run_program(prog: str, args: ~[str]) -> int { let pid = spawn_process(prog, args, none, none, 0i32, 0i32, 0i32); if pid == -1 as pid_t { fail; } @@ -187,7 +187,7 @@ The class will ensure that file descriptors are closed properly. A class with a <program> field "] -fn start_program(prog: str, args: [str]/~) -> program { +fn start_program(prog: str, args: ~[str]) -> program { let pipe_input = os::pipe(); let pipe_output = os::pipe(); let pipe_err = os::pipe(); @@ -271,7 +271,7 @@ contents of stdout and stderr. A record, {status: int, out: str, err: str} containing the exit code, the contents of stdout and the contents of stderr. "] -fn program_output(prog: str, args: [str]/~) -> +fn program_output(prog: str, args: ~[str]) -> {status: int, out: str, err: str} { let pipe_in = os::pipe(); @@ -298,11 +298,11 @@ fn program_output(prog: str, args: [str]/~) -> // clever way to do this. let p = comm::port(); let ch = comm::chan(p); - task::spawn_sched(task::single_threaded) {|| + do task::spawn_sched(task::single_threaded) || { let errput = readclose(pipe_err.in); comm::send(ch, (2, errput)); }; - task::spawn_sched(task::single_threaded) {|| + do task::spawn_sched(task::single_threaded) || { let output = readclose(pipe_out.in); comm::send(ch, (1, output)); }; @@ -397,9 +397,9 @@ mod tests { // Regression test for memory leaks #[ignore(cfg(windows))] // FIXME (#2626) fn test_leaks() { - run::run_program("echo", []/~); - run::start_program("echo", []/~); - run::program_output("echo", []/~); + run::run_program("echo", ~[]); + run::start_program("echo", ~[]); + run::program_output("echo", ~[]); } #[test] @@ -410,7 +410,7 @@ mod tests { let pid = run::spawn_process( - "cat", []/~, none, none, + "cat", ~[], none, none, pipe_in.in, pipe_out.out, pipe_err.out); os::close(pipe_in.in); os::close(pipe_out.out); @@ -430,7 +430,7 @@ mod tests { #[test] fn waitpid() { - let pid = run::spawn_process("false", []/~, + let pid = run::spawn_process("false", ~[], none, none, 0i32, 0i32, 0i32); let status = run::waitpid(pid); diff --git a/src/libcore/stackwalk.rs b/src/libcore/stackwalk.rs index 0041dde319d..4af007b14dd 100644 --- a/src/libcore/stackwalk.rs +++ b/src/libcore/stackwalk.rs @@ -18,7 +18,7 @@ fn walk_stack(visit: fn(frame) -> bool) { #debug("beginning stack walk"); - frame_address { |frame_pointer| + do frame_address |frame_pointer| { let mut frame_address: *word = unsafe { reinterpret_cast(frame_pointer) }; @@ -44,7 +44,7 @@ fn walk_stack(visit: fn(frame) -> bool) { #[test] fn test_simple() { - for walk_stack { |_frame| + for walk_stack |_frame| { } } @@ -53,7 +53,7 @@ fn test_simple_deep() { fn run(i: int) { if i == 0 { ret } - for walk_stack { |_frame| + for walk_stack |_frame| { unsafe { breakpoint(); } diff --git a/src/libcore/str.rs b/src/libcore/str.rs index 8d98e39791c..3618772027e 100644 --- a/src/libcore/str.rs +++ b/src/libcore/str.rs @@ -122,7 +122,7 @@ Convert a vector of bytes to a UTF-8 string Fails if invalid UTF-8 "] -pure fn from_bytes(+vv: [u8]/~) -> str { +pure fn from_bytes(+vv: ~[u8]) -> str { assert is_utf8(vv); ret unsafe { unsafe::from_bytes(vv) }; } @@ -136,7 +136,7 @@ Fails if invalid UTF-8 "] pure fn from_byte(b: u8) -> str { assert b < 128u8; - let mut v = [b, 0u8]/~; + let mut v = ~[b, 0u8]; unsafe { ::unsafe::transmute(v) } } @@ -154,7 +154,7 @@ fn push_char(&s: str, ch: char) { let new_len = len + nb; reserve_at_least(s, new_len); let off = len; - as_buf(s) {|buf| + do as_buf(s) |buf| { let buf: *mut u8 = ::unsafe::reinterpret_cast(buf); if nb == 1u { *ptr::mut_offset(buf, off) = @@ -208,8 +208,8 @@ fn push_char(&s: str, ch: char) { *ptr::mut_offset(buf, off + nb) = 0u8; } - as_bytes(s) {|bytes| - let mut mut_bytes: [u8]/~ = ::unsafe::reinterpret_cast(bytes); + do as_bytes(s) |bytes| { + let mut mut_bytes: ~[u8] = ::unsafe::reinterpret_cast(bytes); vec::unsafe::set_len(mut_bytes, new_len + 1u); ::unsafe::forget(mut_bytes); } @@ -224,28 +224,28 @@ pure fn from_char(ch: char) -> str { } #[doc = "Convert a vector of chars to a string"] -pure fn from_chars(chs: [const char]/&) -> str { +pure fn from_chars(chs: &[const char]) -> str { let mut buf = ""; unchecked { reserve(buf, chs.len()); - for vec::each(chs) {|ch| push_char(buf, ch); } + for vec::each(chs) |ch| { push_char(buf, ch); } } ret buf; } #[doc = "Concatenate a vector of strings"] -pure fn concat(v: [const str]/&) -> str { +pure fn concat(v: &[const str]) -> str { let mut s: str = ""; - for vec::each(v) {|ss| s += ss; } + for vec::each(v) |ss| { s += ss; } ret s; } #[doc = " Concatenate a vector of strings, placing a given separator between each "] -pure fn connect(v: [const str]/&a, sep: str) -> str { +pure fn connect(v: &[const str], sep: str) -> str { let mut s = "", first = true; - for vec::each(v) {|ss| + for vec::each(v) |ss| { if first { first = false; } else { s += sep; } s += ss; } @@ -289,7 +289,7 @@ fn unshift_char(&s: str, ch: char) { s = from_char(ch) + s; } #[doc = "Returns a string with leading whitespace removed"] pure fn trim_left(+s: str) -> str { - alt find(s, {|c| !char::is_whitespace(c)}) { + alt find(s, |c| !char::is_whitespace(c)) { none { "" } some(first) { if first == 0u { s } @@ -300,7 +300,7 @@ pure fn trim_left(+s: str) -> str { #[doc = "Returns a string with trailing whitespace removed"] pure fn trim_right(+s: str) -> str { - alt rfind(s, {|c| !char::is_whitespace(c)}) { + alt rfind(s, |c| !char::is_whitespace(c)) { none { "" } some(last) { let {next, _} = char_range_at(s, last); @@ -322,10 +322,10 @@ Converts a string to a vector of bytes The result vector is not null-terminated. "] -pure fn bytes(s: str) -> [u8]/~ { +pure fn bytes(s: str) -> ~[u8] { unsafe { let mut s_copy = s; - let mut v: [u8]/~ = ::unsafe::transmute(s_copy); + let mut v: ~[u8] = ::unsafe::transmute(s_copy); vec::unsafe::set_len(v, len(s)); ret v; } @@ -335,15 +335,15 @@ pure fn bytes(s: str) -> [u8]/~ { Work with the string as a byte slice, not including trailing null. "] #[inline(always)] -pure fn byte_slice<T>(s: str/&, f: fn([u8]/&) -> T) -> T { - unpack_slice(s) {|p,n| +pure fn byte_slice<T>(s: str/&, f: fn(v: &[u8]) -> T) -> T { + do unpack_slice(s) |p,n| { unsafe { vec::unsafe::form_slice(p, n-1u, f) } } } #[doc = "Convert a string to a vector of characters"] -pure fn chars(s: str/&) -> [char]/~ { - let mut buf = []/~, i = 0u; +pure fn chars(s: str/&) -> ~[char] { + let mut buf = ~[], i = 0u; let len = len(s); while i < len { let {ch, next} = char_range_at(s, i); @@ -378,7 +378,7 @@ pure fn slice(s: str/&, begin: uint, end: uint) -> str { #[doc = " Splits a string into substrings at each occurrence of a given character "] -pure fn split_char(s: str/&, sep: char) -> [str]/~ { +pure fn split_char(s: str/&, sep: char) -> ~[str] { split_char_inner(s, sep, len(s), true) } @@ -388,22 +388,22 @@ character up to 'count' times The byte must be a valid UTF-8/ASCII byte "] -pure fn splitn_char(s: str/&, sep: char, count: uint) -> [str]/~ { +pure fn splitn_char(s: str/&, sep: char, count: uint) -> ~[str] { split_char_inner(s, sep, count, true) } #[doc = " Like `split_char`, but omits empty strings from the returned vector "] -pure fn split_char_nonempty(s: str/&, sep: char) -> [str]/~ { +pure fn split_char_nonempty(s: str/&, sep: char) -> ~[str] { split_char_inner(s, sep, len(s), false) } pure fn split_char_inner(s: str/&, sep: char, count: uint, allow_empty: bool) - -> [str]/~ { + -> ~[str] { if sep < 128u as char { let b = sep as u8, l = len(s); - let mut result = []/~, done = 0u; + let mut result = ~[], done = 0u; let mut i = 0u, start = 0u; while i < l && done < count { if s[i] == b { @@ -421,13 +421,13 @@ pure fn split_char_inner(s: str/&, sep: char, count: uint, allow_empty: bool) } result } else { - splitn(s, {|cur| cur == sep}, count) + splitn(s, |cur| cur == sep, count) } } #[doc = "Splits a string into substrings using a character function"] -pure fn split(s: str/&, sepfn: fn(char) -> bool) -> [str]/~ { +pure fn split(s: str/&, sepfn: fn(char) -> bool) -> ~[str] { split_inner(s, sepfn, len(s), true) } @@ -435,19 +435,19 @@ pure fn split(s: str/&, sepfn: fn(char) -> bool) -> [str]/~ { Splits a string into substrings using a character function, cutting at most `count` times. "] -pure fn splitn(s: str/&, sepfn: fn(char) -> bool, count: uint) -> [str]/~ { +pure fn splitn(s: str/&, sepfn: fn(char) -> bool, count: uint) -> ~[str] { split_inner(s, sepfn, count, true) } #[doc = "Like `split`, but omits empty strings from the returned vector"] -pure fn split_nonempty(s: str/&, sepfn: fn(char) -> bool) -> [str]/~ { +pure fn split_nonempty(s: str/&, sepfn: fn(char) -> bool) -> ~[str] { split_inner(s, sepfn, len(s), false) } pure fn split_inner(s: str/&, sepfn: fn(cc: char) -> bool, count: uint, - allow_empty: bool) -> [str]/~ { + allow_empty: bool) -> ~[str] { let l = len(s); - let mut result = []/~, i = 0u, start = 0u, done = 0u; + let mut result = ~[], i = 0u, start = 0u, done = 0u; while i < l && done < count { let {ch, next} = char_range_at(s, i); if sepfn(ch) { @@ -495,7 +495,7 @@ pure fn iter_matches(s: str/&a, sep: str/&b, f: fn(uint, uint)) { pure fn iter_between_matches(s: str/&a, sep: str/&b, f: fn(uint, uint)) { let mut last_end = 0u; - iter_matches(s, sep) {|from, to| + do iter_matches(s, sep) |from, to| { f(last_end, from); last_end = to; } @@ -511,17 +511,17 @@ Splits a string into a vector of the substrings separated by a given string assert [\"\", \"XXX\", \"YYY\", \"\"] == split_str(\".XXX.YYY.\", \".\") ~~~ "] -pure fn split_str(s: str/&a, sep: str/&b) -> [str]/~ { - let mut result = []/~; - iter_between_matches(s, sep) {|from, to| +pure fn split_str(s: str/&a, sep: str/&b) -> ~[str] { + let mut result = ~[]; + do iter_between_matches(s, sep) |from, to| { unsafe { vec::push(result, unsafe::slice_bytes(s, from, to)); } } result } -pure fn split_str_nonempty(s: str/&a, sep: str/&b) -> [str]/~ { - let mut result = []/~; - iter_between_matches(s, sep) {|from, to| +pure fn split_str_nonempty(s: str/&a, sep: str/&b) -> ~[str] { + let mut result = ~[]; + do iter_between_matches(s, sep) |from, to| { if to > from { unsafe { vec::push(result, unsafe::slice_bytes(s, from, to)); } } @@ -532,14 +532,14 @@ pure fn split_str_nonempty(s: str/&a, sep: str/&b) -> [str]/~ { #[doc = " Splits a string into a vector of the substrings separated by LF ('\\n') "] -pure fn lines(s: str/&) -> [str]/~ { split_char(s, '\n') } +pure fn lines(s: str/&) -> ~[str] { split_char(s, '\n') } #[doc = " Splits a string into a vector of the substrings separated by LF ('\\n') and/or CR LF ('\\r\\n') "] -pure fn lines_any(s: str/&) -> [str]/~ { - vec::map(lines(s), {|s| +pure fn lines_any(s: str/&) -> ~[str] { + vec::map(lines(s), |s| { let l = len(s); let mut cp = s; if l > 0u && s[l - 1u] == '\r' as u8 { @@ -552,22 +552,22 @@ pure fn lines_any(s: str/&) -> [str]/~ { #[doc = " Splits a string into a vector of the substrings separated by whitespace "] -pure fn words(s: str/&) -> [str]/~ { - split_nonempty(s, {|c| char::is_whitespace(c)}) +pure fn words(s: str/&) -> ~[str] { + split_nonempty(s, |c| char::is_whitespace(c)) } #[doc = "Convert a string to lowercase. ASCII only"] pure fn to_lower(s: str/&) -> str { - map(s, {|c| - unchecked{(libc::tolower(c as libc::c_char)) as char} - }) + map(s, + |c| unchecked{(libc::tolower(c as libc::c_char)) as char} + ) } #[doc = "Convert a string to uppercase. ASCII only"] pure fn to_upper(s: str/&) -> str { - map(s, {|c| - unchecked{(libc::toupper(c as libc::c_char)) as char} - }) + map(s, + |c| unchecked{(libc::toupper(c as libc::c_char)) as char} + ) } #[doc = " @@ -585,7 +585,7 @@ The original string with all occurances of `from` replaced with `to` "] pure fn replace(s: str, from: str, to: str) -> str { let mut result = "", first = true; - iter_between_matches(s, from) {|start, end| + do iter_between_matches(s, from) |start, end| { if first { first = false; } else { result += to; } unsafe { result += unsafe::slice_bytes(s, start, end); } } @@ -622,7 +622,7 @@ pure fn hash(&&s: str) -> uint { // djb hash. // FIXME: replace with murmur. (see #859 and #1616) let mut u: uint = 5381u; - for each(s) {|c| u *= 33u; u += c as uint; } + for each(s) |c| { u *= 33u; u += c as uint; } ret u; } @@ -643,7 +643,7 @@ Return true if a predicate matches any character (and false if it matches none or there are no characters) "] pure fn any(ss: str/&, pred: fn(char) -> bool) -> bool { - !all(ss, {|cc| !pred(cc)}) + !all(ss, |cc| !pred(cc)) } #[doc = "Apply a function to each character"] @@ -651,7 +651,7 @@ pure fn map(ss: str/&, ff: fn(char) -> char) -> str { let mut result = ""; unchecked { reserve(result, len(ss)); - chars_iter(ss) {|cc| + do chars_iter(ss) |cc| { str::push_char(result, ff(cc)); } } @@ -672,7 +672,7 @@ pure fn bytes_iter(ss: str/&, it: fn(u8)) { #[doc = "Iterate over the bytes in a string"] #[inline(always)] pure fn each(s: str/&, it: fn(u8) -> bool) { - eachi(s, {|_i, b| it(b)}) + eachi(s, |_i, b| it(b) ) } #[doc = "Iterate over the bytes in a string, with indices"] @@ -688,7 +688,7 @@ pure fn eachi(s: str/&, it: fn(uint, u8) -> bool) { #[doc = "Iterates over the chars in a string"] #[inline(always)] pure fn each_char(s: str/&, it: fn(char) -> bool) { - each_chari(s, {|_i, c| it(c)}) + each_chari(s, |_i, c| it(c)) } #[doc = "Iterates over the chars in a string, with indices"] @@ -820,7 +820,7 @@ pure fn find_char_between(s: str/&, c: char, start: uint, end: uint) } ret none; } else { - find_between(s, start, end, {|x| x == c}) + find_between(s, start, end, |x| x == c) } } @@ -899,7 +899,7 @@ pure fn rfind_char_between(s: str/&, c: char, start: uint, end: uint) } ret none; } else { - rfind_between(s, start, end, {|x| x == c}) + rfind_between(s, start, end, |x| x == c) } } @@ -1064,7 +1064,7 @@ pure fn rfind_between(s: str/&, start: uint, end: uint, f: fn(char) -> bool) // Utility used by various searching functions pure fn match_at(haystack: str/&a, needle: str/&b, at: uint) -> bool { let mut i = at; - for each(needle) {|c| if haystack[i] != c { ret false; } i += 1u; } + for each(needle) |c| { if haystack[i] != c { ret false; } i += 1u; } ret true; } @@ -1240,7 +1240,7 @@ fn is_alphanumeric(s: str/&) -> bool { Returns the string length/size in bytes not counting the null terminator "] pure fn len(s: str/&) -> uint { - unpack_slice(s) { |_p, n| n - 1u } + do unpack_slice(s) |_p, n| { n - 1u } } #[doc = "Returns the number of characters that a string holds"] @@ -1251,7 +1251,7 @@ Section: Misc */ #[doc = "Determines if a vector of bytes contains valid UTF-8"] -pure fn is_utf8(v: [const u8]/&) -> bool { +pure fn is_utf8(v: &[const u8]) -> bool { let mut i = 0u; let total = vec::len::<u8>(v); while i < total { @@ -1269,7 +1269,7 @@ pure fn is_utf8(v: [const u8]/&) -> bool { } #[doc = "Determines if a vector of `u16` contains valid UTF-16"] -pure fn is_utf16(v: [const u16]/&) -> bool { +pure fn is_utf16(v: &[const u16]) -> bool { let len = vec::len(v); let mut i = 0u; while (i < len) { @@ -1290,9 +1290,9 @@ pure fn is_utf16(v: [const u16]/&) -> bool { } #[doc = "Converts to a vector of `u16` encoded as UTF-16"] -pure fn to_utf16(s: str/&) -> [u16]/~ { - let mut u = []/~; - chars_iter(s) {|cch| +pure fn to_utf16(s: str/&) -> ~[u16] { + let mut u = ~[]; + do chars_iter(s) |cch| { // Arithmetic with u32 literals is easier on the eyes than chars. let mut ch = cch as u32; @@ -1306,13 +1306,13 @@ pure fn to_utf16(s: str/&) -> [u16]/~ { ch -= 0x1_0000_u32; let w1 = 0xD800_u16 | ((ch >> 10) as u16); let w2 = 0xDC00_u16 | ((ch as u16) & 0x3FF_u16); - vec::push_all(u, [w1, w2]/~) + vec::push_all(u, ~[w1, w2]) } } ret u; } -pure fn utf16_chars(v: [const u16]/&, f: fn(char)) { +pure fn utf16_chars(v: &[const u16], f: fn(char)) { let len = vec::len(v); let mut i = 0u; while (i < len && v[i] != 0u16) { @@ -1337,11 +1337,11 @@ pure fn utf16_chars(v: [const u16]/&, f: fn(char)) { } -pure fn from_utf16(v: [const u16]/&) -> str { +pure fn from_utf16(v: &[const u16]) -> str { let mut buf = ""; unchecked { reserve(buf, vec::len(v)); - utf16_chars(v) {|ch| push_char(buf, ch); } + utf16_chars(v, |ch| push_char(buf, ch)); } ret buf; } @@ -1564,7 +1564,7 @@ Loop through a substring, char by char "] pure fn any_between(s: str/&, start: uint, end: uint, it: fn(char) -> bool) -> bool { - !all_between(s, start, end, {|c| !it(c)}) + !all_between(s, start, end, |c| !it(c)) } // UTF-8 tags and ranges @@ -1594,9 +1594,9 @@ interop. let i = str::as_bytes(\"Hello World\") { |bytes| vec::len(bytes) }; ~~~ "] -pure fn as_bytes<T>(s: str, f: fn([u8]/~) -> T) -> T { +pure fn as_bytes<T>(s: str, f: fn(~[u8]) -> T) -> T { unsafe { - let v: *[u8]/~ = ::unsafe::reinterpret_cast(ptr::addr_of(s)); + let v: *~[u8] = ::unsafe::reinterpret_cast(ptr::addr_of(s)); f(*v) } } @@ -1608,7 +1608,7 @@ Allows for unsafe manipulation of strings, which is useful for native interop. "] pure fn as_buf<T>(s: str, f: fn(*u8) -> T) -> T { - as_bytes(s) { |v| unsafe { vec::as_buf(v, f) } } + as_bytes(s, |v| unsafe { vec::as_buf(v, f) }) } #[doc = " @@ -1624,7 +1624,7 @@ let s = str::as_buf(\"PATH\", { |path_buf| libc::getenv(path_buf) }); ~~~ "] pure fn as_c_str<T>(s: str, f: fn(*libc::c_char) -> T) -> T { - as_buf(s) {|buf| f(buf as *libc::c_char) } + as_buf(s, |buf| f(buf as *libc::c_char)) } @@ -1696,7 +1696,7 @@ Returns the number of single-byte characters the string can hold without reallocating "] pure fn capacity(&&s: str) -> uint { - as_bytes(s) {|buf| + do as_bytes(s) |buf| { let vcap = vec::capacity(buf); assert vcap > 0u; vcap - 1u @@ -1708,7 +1708,7 @@ pure fn escape_default(s: str/&) -> str { let mut out: str = ""; unchecked { reserve_at_least(out, str::len(s)); - chars_iter(s) {|c| out += char::escape_default(c); } + chars_iter(s, |c| out += char::escape_default(c)); } ret out; } @@ -1718,7 +1718,7 @@ pure fn escape_unicode(s: str/&) -> str { let mut out: str = ""; unchecked { reserve_at_least(out, str::len(s)); - chars_iter(s) {|c| out += char::escape_unicode(c); } + chars_iter(s, |c| out += char::escape_unicode(c)); } ret out; } @@ -1749,9 +1749,9 @@ mod unsafe { #[doc = "Create a Rust string from a *u8 buffer of the given length"] unsafe fn from_buf_len(buf: *u8, len: uint) -> str { - let mut v: [u8]/~ = []/~; + let mut v: ~[u8] = ~[]; vec::reserve(v, len + 1u); - vec::as_buf(v) {|b| ptr::memcpy(b, buf, len); } + vec::as_buf(v, |b| ptr::memcpy(b, buf, len)); vec::unsafe::set_len(v, len); vec::push(v, 0u8); @@ -1776,7 +1776,7 @@ mod unsafe { Does not verify that the vector contains valid UTF-8. "] - unsafe fn from_bytes(+v: [const u8]/~) -> str { + unsafe fn from_bytes(+v: ~[const u8]) -> str { unsafe { let mut vcopy = ::unsafe::transmute(v); vec::push(vcopy, 0u8); @@ -1789,7 +1789,7 @@ mod unsafe { Does not verify that the byte is valid UTF-8. "] - unsafe fn from_byte(u: u8) -> str { unsafe::from_bytes([u]/~) } + unsafe fn from_byte(u: u8) -> str { unsafe::from_bytes(~[u]) } #[doc = " Takes a bytewise (not UTF-8) slice from a string. @@ -1802,14 +1802,14 @@ mod unsafe { If end is greater than the length of the string. "] unsafe fn slice_bytes(s: str/&, begin: uint, end: uint) -> str { - unpack_slice(s) { |sbuf, n| + do unpack_slice(s) |sbuf, n| { assert (begin <= end); assert (end <= n); - let mut v = []/~; + let mut v = ~[]; vec::reserve(v, end - begin + 1u); unsafe { - vec::as_buf(v) { |vbuf| + do vec::as_buf(v) |vbuf| { let src = ptr::offset(sbuf, begin); ptr::memcpy(vbuf, src, end - begin); } @@ -1826,8 +1826,8 @@ mod unsafe { } #[doc = "Appends a vector of bytes to a string. (Not UTF-8 safe)."] - unsafe fn push_bytes(&s: str, bytes: [u8]/~) { - for vec::each(bytes) {|byte| rustrt::rust_str_push(s, byte); } + unsafe fn push_bytes(&s: str, bytes: ~[u8]) { + for vec::each(bytes) |byte| { rustrt::rust_str_push(s, byte); } } #[doc = " @@ -1865,7 +1865,7 @@ mod unsafe { #[test] fn test_from_buf_len() { unsafe { - let a = [65u8, 65u8, 65u8, 65u8, 65u8, 65u8, 65u8, 0u8]/~; + let a = ~[65u8, 65u8, 65u8, 65u8, 65u8, 65u8, 65u8, 0u8]; let b = vec::unsafe::to_ptr(a); let c = from_buf_len(b, 3u); assert (c == "AAA"); @@ -1955,18 +1955,18 @@ impl extensions/& for str/& { fn slice(begin: uint, end: uint) -> str { slice(self, begin, end) } #[doc = "Splits a string into substrings using a character function"] #[inline] - fn split(sepfn: fn(char) -> bool) -> [str]/~ { split(self, sepfn) } + fn split(sepfn: fn(char) -> bool) -> ~[str] { split(self, sepfn) } #[doc = " Splits a string into substrings at each occurrence of a given character "] #[inline] - fn split_char(sep: char) -> [str]/~ { split_char(self, sep) } + fn split_char(sep: char) -> ~[str] { split_char(self, sep) } #[doc = " Splits a string into a vector of the substrings separated by a given string "] #[inline] - fn split_str(sep: str/&a) -> [str]/~ { split_str(self, sep) } + fn split_str(sep: str/&a) -> ~[str] { split_str(self, sep) } #[doc = "Returns true if one string starts with another"] #[inline] fn starts_with(needle: str/&a) -> bool { starts_with(self, needle) } @@ -2067,79 +2067,79 @@ mod tests { #[test] fn test_split_char() { - fn t(s: str, c: char, u: [str]/~) { + fn t(s: str, c: char, u: ~[str]) { log(debug, "split_byte: " + s); let v = split_char(s, c); #debug("split_byte to: %?", v); - assert vec::all2(v, u, { |a,b| a == b }); + assert vec::all2(v, u, |a,b| a == b); } - t("abc.hello.there", '.', ["abc", "hello", "there"]/~); - t(".hello.there", '.', ["", "hello", "there"]/~); - t("...hello.there.", '.', ["", "", "", "hello", "there", ""]/~); + t("abc.hello.there", '.', ~["abc", "hello", "there"]); + t(".hello.there", '.', ~["", "hello", "there"]); + t("...hello.there.", '.', ~["", "", "", "hello", "there", ""]); - assert ["", "", "", "hello", "there", ""]/~ + assert ~["", "", "", "hello", "there", ""] == split_char("...hello.there.", '.'); - assert [""]/~ == split_char("", 'z'); - assert ["",""]/~ == split_char("z", 'z'); - assert ["ok"]/~ == split_char("ok", 'z'); + assert ~[""] == split_char("", 'z'); + assert ~["",""] == split_char("z", 'z'); + assert ~["ok"] == split_char("ok", 'z'); } #[test] fn test_split_char_2() { let data = "ประเทศไทย中华Việt Nam"; - assert ["ประเทศไทย中华", "iệt Nam"]/~ + assert ~["ประเทศไทย中华", "iệt Nam"] == split_char(data, 'V'); - assert ["ประเ", "ศไ", "ย中华Việt Nam"]/~ + assert ~["ประเ", "ศไ", "ย中华Việt Nam"] == split_char(data, 'ท'); } #[test] fn test_splitn_char() { - fn t(s: str, c: char, n: uint, u: [str]/~) { + fn t(s: str, c: char, n: uint, u: ~[str]) { log(debug, "splitn_byte: " + s); let v = splitn_char(s, c, n); #debug("split_byte to: %?", v); #debug("comparing vs. %?", u); - assert vec::all2(v, u, { |a,b| a == b }); + assert vec::all2(v, u, |a,b| a == b); } - t("abc.hello.there", '.', 0u, ["abc.hello.there"]/~); - t("abc.hello.there", '.', 1u, ["abc", "hello.there"]/~); - t("abc.hello.there", '.', 2u, ["abc", "hello", "there"]/~); - t("abc.hello.there", '.', 3u, ["abc", "hello", "there"]/~); - t(".hello.there", '.', 0u, [".hello.there"]/~); - t(".hello.there", '.', 1u, ["", "hello.there"]/~); - t("...hello.there.", '.', 3u, ["", "", "", "hello.there."]/~); - t("...hello.there.", '.', 5u, ["", "", "", "hello", "there", ""]/~); + t("abc.hello.there", '.', 0u, ~["abc.hello.there"]); + t("abc.hello.there", '.', 1u, ~["abc", "hello.there"]); + t("abc.hello.there", '.', 2u, ~["abc", "hello", "there"]); + t("abc.hello.there", '.', 3u, ~["abc", "hello", "there"]); + t(".hello.there", '.', 0u, ~[".hello.there"]); + t(".hello.there", '.', 1u, ~["", "hello.there"]); + t("...hello.there.", '.', 3u, ~["", "", "", "hello.there."]); + t("...hello.there.", '.', 5u, ~["", "", "", "hello", "there", ""]); - assert [""]/~ == splitn_char("", 'z', 5u); - assert ["",""]/~ == splitn_char("z", 'z', 5u); - assert ["ok"]/~ == splitn_char("ok", 'z', 5u); - assert ["z"]/~ == splitn_char("z", 'z', 0u); - assert ["w.x.y"]/~ == splitn_char("w.x.y", '.', 0u); - assert ["w","x.y"]/~ == splitn_char("w.x.y", '.', 1u); + assert ~[""] == splitn_char("", 'z', 5u); + assert ~["",""] == splitn_char("z", 'z', 5u); + assert ~["ok"] == splitn_char("ok", 'z', 5u); + assert ~["z"] == splitn_char("z", 'z', 0u); + assert ~["w.x.y"] == splitn_char("w.x.y", '.', 0u); + assert ~["w","x.y"] == splitn_char("w.x.y", '.', 1u); } #[test] fn test_splitn_char_2 () { let data = "ประเทศไทย中华Việt Nam"; - assert ["ประเทศไทย中", "Việt Nam"]/~ + assert ~["ประเทศไทย中", "Việt Nam"] == splitn_char(data, '华', 1u); - assert ["", "", "XXX", "YYYzWWWz"]/~ + assert ~["", "", "XXX", "YYYzWWWz"] == splitn_char("zzXXXzYYYzWWWz", 'z', 3u); - assert ["",""]/~ == splitn_char("z", 'z', 5u); - assert [""]/~ == splitn_char("", 'z', 5u); - assert ["ok"]/~ == splitn_char("ok", 'z', 5u); + assert ~["",""] == splitn_char("z", 'z', 5u); + assert ~[""] == splitn_char("", 'z', 5u); + assert ~["ok"] == splitn_char("ok", 'z', 5u); } #[test] fn test_splitn_char_3() { let data = "ประเทศไทย中华Việt Nam"; - assert ["ประเทศไทย中华", "iệt Nam"]/~ + assert ~["ประเทศไทย中华", "iệt Nam"] == splitn_char(data, 'V', 1u); - assert ["ประเ", "ศไทย中华Việt Nam"]/~ + assert ~["ประเ", "ศไทย中华Việt Nam"] == splitn_char(data, 'ท', 1u); } @@ -2160,40 +2160,40 @@ mod tests { t("::hello::there::", "::", 3, ""); let data = "ประเทศไทย中华Việt Nam"; - assert ["ประเทศไทย", "Việt Nam"]/~ + assert ~["ประเทศไทย", "Việt Nam"] == split_str (data, "中华"); - assert ["", "XXX", "YYY", ""]/~ + assert ~["", "XXX", "YYY", ""] == split_str("zzXXXzzYYYzz", "zz"); - assert ["zz", "zYYYz"]/~ + assert ~["zz", "zYYYz"] == split_str("zzXXXzYYYz", "XXX"); - assert ["", "XXX", "YYY", ""]/~ == split_str(".XXX.YYY.", "."); - assert [""]/~ == split_str("", "."); - assert ["",""]/~ == split_str("zz", "zz"); - assert ["ok"]/~ == split_str("ok", "z"); - assert ["","z"]/~ == split_str("zzz", "zz"); - assert ["","","z"]/~ == split_str("zzzzz", "zz"); + assert ~["", "XXX", "YYY", ""] == split_str(".XXX.YYY.", "."); + assert ~[""] == split_str("", "."); + assert ~["",""] == split_str("zz", "zz"); + assert ~["ok"] == split_str("ok", "z"); + assert ~["","z"] == split_str("zzz", "zz"); + assert ~["","","z"] == split_str("zzzzz", "zz"); } #[test] fn test_split() { let data = "ประเทศไทย中华Việt Nam"; - assert ["ประเทศไทย中", "Việt Nam"]/~ - == split (data, {|cc| cc == '华'}); + assert ~["ประเทศไทย中", "Việt Nam"] + == split (data, |cc| cc == '华'); - assert ["", "", "XXX", "YYY", ""]/~ + assert ~["", "", "XXX", "YYY", ""] == split("zzXXXzYYYz", char::is_lowercase); - assert ["zz", "", "", "z", "", "", "z"]/~ + assert ~["zz", "", "", "z", "", "", "z"] == split("zzXXXzYYYz", char::is_uppercase); - assert ["",""]/~ == split("z", {|cc| cc == 'z'}); - assert [""]/~ == split("", {|cc| cc == 'z'}); - assert ["ok"]/~ == split("ok", {|cc| cc == 'z'}); + assert ~["",""] == split("z", |cc| cc == 'z'); + assert ~[""] == split("", |cc| cc == 'z'); + assert ~["ok"] == split("ok", |cc| cc == 'z'); } #[test] @@ -2201,34 +2201,34 @@ mod tests { let lf = "\nMary had a little lamb\nLittle lamb\n"; let crlf = "\r\nMary had a little lamb\r\nLittle lamb\r\n"; - assert ["", "Mary had a little lamb", "Little lamb", ""]/~ + assert ~["", "Mary had a little lamb", "Little lamb", ""] == lines(lf); - assert ["", "Mary had a little lamb", "Little lamb", ""]/~ + assert ~["", "Mary had a little lamb", "Little lamb", ""] == lines_any(lf); - assert ["\r", "Mary had a little lamb\r", "Little lamb\r", ""]/~ + assert ~["\r", "Mary had a little lamb\r", "Little lamb\r", ""] == lines(crlf); - assert ["", "Mary had a little lamb", "Little lamb", ""]/~ + assert ~["", "Mary had a little lamb", "Little lamb", ""] == lines_any(crlf); - assert [""]/~ == lines (""); - assert [""]/~ == lines_any(""); - assert ["",""]/~ == lines ("\n"); - assert ["",""]/~ == lines_any("\n"); - assert ["banana"]/~ == lines ("banana"); - assert ["banana"]/~ == lines_any("banana"); + assert ~[""] == lines (""); + assert ~[""] == lines_any(""); + assert ~["",""] == lines ("\n"); + assert ~["",""] == lines_any("\n"); + assert ~["banana"] == lines ("banana"); + assert ~["banana"] == lines_any("banana"); } #[test] fn test_words () { let data = "\nMary had a little lamb\nLittle lamb\n"; - assert ["Mary","had","a","little","lamb","Little","lamb"]/~ + assert ~["Mary","had","a","little","lamb","Little","lamb"] == words(data); - assert ["ok"]/~ == words("ok"); - assert []/~ == words(""); + assert ~["ok"] == words("ok"); + assert ~[] == words(""); } #[test] @@ -2285,23 +2285,23 @@ mod tests { #[test] fn test_concat() { - fn t(v: [str]/~, s: str) { assert (eq(concat(v), s)); } - t(["you", "know", "I'm", "no", "good"]/~, "youknowI'mnogood"); - let v: [str]/~ = []/~; + fn t(v: ~[str], s: str) { assert (eq(concat(v), s)); } + t(~["you", "know", "I'm", "no", "good"], "youknowI'mnogood"); + let v: ~[str] = ~[]; t(v, ""); - t(["hi"]/~, "hi"); + t(~["hi"], "hi"); } #[test] fn test_connect() { - fn t(v: [str]/~, sep: str, s: str) { + fn t(v: ~[str], sep: str, s: str) { assert (eq(connect(v, sep), s)); } - t(["you", "know", "I'm", "no", "good"]/~, + t(~["you", "know", "I'm", "no", "good"], " ", "you know I'm no good"); - let v: [str]/~ = []/~; + let v: ~[str] = ~[]; t(v, " ", ""); - t(["hi"]/~, " ", "hi"); + t(~["hi"], " ", "hi"); } #[test] @@ -2318,9 +2318,9 @@ mod tests { #[test] fn test_to_lower() { - assert "" == map("", {|c| libc::tolower(c as c_char) as char}); + assert "" == map("", |c| libc::tolower(c as c_char) as char); assert "ymca" == map("YMCA", - {|c| libc::tolower(c as c_char) as char}); + |c| libc::tolower(c as c_char) as char); } #[test] @@ -2553,7 +2553,7 @@ mod tests { #[test] fn test_unsafe_from_bytes() { - let a = [65u8, 65u8, 65u8, 65u8, 65u8, 65u8, 65u8]/~; + let a = ~[65u8, 65u8, 65u8, 65u8, 65u8, 65u8, 65u8]; let b = unsafe { unsafe::from_bytes(a) }; assert (b == "AAAAAAA"); } @@ -2561,7 +2561,7 @@ mod tests { #[test] fn test_from_bytes() { let ss = "ศไทย中华Việt Nam"; - let bb = [0xe0_u8, 0xb8_u8, 0xa8_u8, + let bb = ~[0xe0_u8, 0xb8_u8, 0xa8_u8, 0xe0_u8, 0xb9_u8, 0x84_u8, 0xe0_u8, 0xb8_u8, 0x97_u8, 0xe0_u8, 0xb8_u8, 0xa2_u8, @@ -2570,7 +2570,7 @@ mod tests { 0x56_u8, 0x69_u8, 0xe1_u8, 0xbb_u8, 0x87_u8, 0x74_u8, 0x20_u8, 0x4e_u8, 0x61_u8, - 0x6d_u8]/~; + 0x6d_u8]; assert ss == from_bytes(bb); } @@ -2579,7 +2579,7 @@ mod tests { #[should_fail] #[ignore(cfg(windows))] fn test_from_bytes_fail() { - let bb = [0xff_u8, 0xb8_u8, 0xa8_u8, + let bb = ~[0xff_u8, 0xb8_u8, 0xa8_u8, 0xe0_u8, 0xb9_u8, 0x84_u8, 0xe0_u8, 0xb8_u8, 0x97_u8, 0xe0_u8, 0xb8_u8, 0xa2_u8, @@ -2588,7 +2588,7 @@ mod tests { 0x56_u8, 0x69_u8, 0xe1_u8, 0xbb_u8, 0x87_u8, 0x74_u8, 0x20_u8, 0x4e_u8, 0x61_u8, - 0x6d_u8]/~; + 0x6d_u8]; let _x = from_bytes(bb); } @@ -2596,7 +2596,7 @@ mod tests { #[test] fn test_from_buf() { unsafe { - let a = [65u8, 65u8, 65u8, 65u8, 65u8, 65u8, 65u8, 0u8]/~; + let a = ~[65u8, 65u8, 65u8, 65u8, 65u8, 65u8, 65u8, 0u8]; let b = vec::unsafe::to_ptr(a); let c = unsafe::from_buf(b); assert (c == "AAAAAAA"); @@ -2608,13 +2608,13 @@ mod tests { #[should_fail] fn test_as_bytes_fail() { // Don't double free - as_bytes("") {|_bytes| fail } + as_bytes::<()>("", |_bytes| fail ); } #[test] fn test_as_buf() { let a = "Abcdefg"; - let b = as_buf(a, {|buf| + let b = as_buf(a, |buf| { assert unsafe { *buf } == 65u8; 100 }); @@ -2624,7 +2624,7 @@ mod tests { #[test] fn test_as_buf_small() { let a = "A"; - let b = as_buf(a, {|buf| + let b = as_buf(a, |buf| { assert unsafe { *buf } == 65u8; 100 }); @@ -2635,7 +2635,7 @@ mod tests { fn test_as_buf2() { unsafe { let s = "hello"; - let sb = as_buf(s, {|b| b }); + let sb = as_buf(s, |b| b); let s_cstr = unsafe::from_buf(sb); assert (eq(s_cstr, s)); } @@ -2645,7 +2645,7 @@ mod tests { fn vec_str_conversions() { let s1: str = "All mimsy were the borogoves"; - let v: [u8]/~ = bytes(s1); + let v: ~[u8] = bytes(s1); let s2: str = from_bytes(v); let mut i: uint = 0u; let n1: uint = len(s1); @@ -2689,7 +2689,7 @@ mod tests { #[test] fn test_chars_iter() { let mut i = 0; - chars_iter("x\u03c0y") {|ch| + do chars_iter("x\u03c0y") |ch| { alt check i { 0 { assert ch == 'x'; } 1 { assert ch == '\u03c0'; } @@ -2698,14 +2698,14 @@ mod tests { i += 1; } - chars_iter("") {|_ch| fail; } // should not fail + chars_iter("", |_ch| fail ); // should not fail } #[test] fn test_bytes_iter() { let mut i = 0; - bytes_iter("xyz") {|bb| + do bytes_iter("xyz") |bb| { alt check i { 0 { assert bb == 'x' as u8; } 1 { assert bb == 'y' as u8; } @@ -2714,7 +2714,7 @@ mod tests { i += 1; } - bytes_iter("") {|bb| assert bb == 0u8; } + bytes_iter("", |bb| assert bb == 0u8); } #[test] @@ -2723,7 +2723,7 @@ mod tests { let mut ii = 0; - split_char_iter(data, ' ') {|xx| + do split_char_iter(data, ' ') |xx| { alt ii { 0 { assert "\nMary" == xx; } 1 { assert "had" == xx; } @@ -2741,7 +2741,7 @@ mod tests { let mut ii = 0; - splitn_char_iter(data, ' ', 2u) {|xx| + do splitn_char_iter(data, ' ', 2u) |xx| { alt ii { 0 { assert "\nMary" == xx; } 1 { assert "had" == xx; } @@ -2758,7 +2758,7 @@ mod tests { let mut ii = 0; - words_iter(data) {|ww| + do words_iter(data) |ww| { alt ii { 0 { assert "Mary" == ww; } 1 { assert "had" == ww; } @@ -2769,7 +2769,7 @@ mod tests { ii += 1; } - words_iter("") {|_x| fail; } // should not fail + words_iter("", |_x| fail); // should not fail } #[test] @@ -2778,7 +2778,7 @@ mod tests { let mut ii = 0; - lines_iter(lf) {|x| + do lines_iter(lf) |x| { alt ii { 0 { assert "" == x; } 1 { assert "Mary had a little lamb" == x; } @@ -2792,9 +2792,8 @@ mod tests { #[test] fn test_map() { - assert "" == map("", {|c| libc::toupper(c as c_char) as char}); - assert "YMCA" == map("ymca", {|c| libc::toupper(c as c_char) - as char}); + assert "" == map("", |c| libc::toupper(c as c_char) as char); + assert "YMCA" == map("ymca", |c| libc::toupper(c as c_char) as char); } #[test] @@ -2818,38 +2817,38 @@ mod tests { #[test] fn test_chars() { let ss = "ศไทย中华Việt Nam"; - assert ['ศ','ไ','ท','ย','中','华','V','i','ệ','t',' ','N','a','m']/~ + assert ~['ศ','ไ','ท','ย','中','华','V','i','ệ','t',' ','N','a','m'] == chars(ss); } #[test] fn test_utf16() { let pairs = - [("𐍅𐌿𐌻𐍆𐌹𐌻𐌰\n", - [0xd800_u16, 0xdf45_u16, 0xd800_u16, 0xdf3f_u16, + ~[("𐍅𐌿𐌻𐍆𐌹𐌻𐌰\n", + ~[0xd800_u16, 0xdf45_u16, 0xd800_u16, 0xdf3f_u16, 0xd800_u16, 0xdf3b_u16, 0xd800_u16, 0xdf46_u16, 0xd800_u16, 0xdf39_u16, 0xd800_u16, 0xdf3b_u16, - 0xd800_u16, 0xdf30_u16, 0x000a_u16]/~), + 0xd800_u16, 0xdf30_u16, 0x000a_u16]), ("𐐒𐑉𐐮𐑀𐐲𐑋 𐐏𐐲𐑍\n", - [0xd801_u16, 0xdc12_u16, 0xd801_u16, + ~[0xd801_u16, 0xdc12_u16, 0xd801_u16, 0xdc49_u16, 0xd801_u16, 0xdc2e_u16, 0xd801_u16, 0xdc40_u16, 0xd801_u16, 0xdc32_u16, 0xd801_u16, 0xdc4b_u16, 0x0020_u16, 0xd801_u16, 0xdc0f_u16, 0xd801_u16, 0xdc32_u16, 0xd801_u16, 0xdc4d_u16, - 0x000a_u16]/~), + 0x000a_u16]), ("𐌀𐌖𐌋𐌄𐌑𐌉·𐌌𐌄𐌕𐌄𐌋𐌉𐌑\n", - [0xd800_u16, 0xdf00_u16, 0xd800_u16, 0xdf16_u16, + ~[0xd800_u16, 0xdf00_u16, 0xd800_u16, 0xdf16_u16, 0xd800_u16, 0xdf0b_u16, 0xd800_u16, 0xdf04_u16, 0xd800_u16, 0xdf11_u16, 0xd800_u16, 0xdf09_u16, 0x00b7_u16, 0xd800_u16, 0xdf0c_u16, 0xd800_u16, 0xdf04_u16, 0xd800_u16, 0xdf15_u16, 0xd800_u16, 0xdf04_u16, 0xd800_u16, 0xdf0b_u16, 0xd800_u16, - 0xdf09_u16, 0xd800_u16, 0xdf11_u16, 0x000a_u16 ]/~), + 0xdf09_u16, 0xd800_u16, 0xdf11_u16, 0x000a_u16 ]), ("𐒋𐒘𐒈𐒑𐒛𐒒 𐒕𐒓 𐒈𐒚𐒍 𐒏𐒜𐒒𐒖𐒆 𐒕𐒆\n", - [0xd801_u16, 0xdc8b_u16, 0xd801_u16, 0xdc98_u16, + ~[0xd801_u16, 0xdc8b_u16, 0xd801_u16, 0xdc98_u16, 0xd801_u16, 0xdc88_u16, 0xd801_u16, 0xdc91_u16, 0xd801_u16, 0xdc9b_u16, 0xd801_u16, 0xdc92_u16, 0x0020_u16, 0xd801_u16, 0xdc95_u16, 0xd801_u16, @@ -2859,9 +2858,9 @@ mod tests { 0xdc9c_u16, 0xd801_u16, 0xdc92_u16, 0xd801_u16, 0xdc96_u16, 0xd801_u16, 0xdc86_u16, 0x0020_u16, 0xd801_u16, 0xdc95_u16, 0xd801_u16, 0xdc86_u16, - 0x000a_u16 ]/~) ]/~; + 0x000a_u16 ]) ]; - for vec::each(pairs) {|p| + for vec::each(pairs) |p| { let (s, u) = p; assert to_utf16(s) == u; assert from_utf16(u) == s; @@ -2874,7 +2873,7 @@ mod tests { fn test_each_char() { let s = "abc"; let mut found_b = false; - for each_char(s) {|ch| + for each_char(s) |ch| { if ch == 'b' { found_b = true; break; @@ -2886,7 +2885,7 @@ mod tests { #[test] fn test_unpack_slice() { let a = "hello"; - unpack_slice(a) {|buf, len| + do unpack_slice(a) |buf, len| { unsafe { assert a[0] == 'h' as u8; assert *buf == 'h' as u8; diff --git a/src/libcore/sys.rs b/src/libcore/sys.rs index c90b48a9342..28ef48240ee 100644 --- a/src/libcore/sys.rs +++ b/src/libcore/sys.rs @@ -183,16 +183,16 @@ mod tests { let lock = arc::arc(create_lock()); let lock2 = arc::clone(&lock); - task::spawn {|move lock2| + do task::spawn |move lock2| { let lock = arc::get(&lock2); - (*lock).lock_cond {|c| + do (*lock).lock_cond |c| { c.wait(); } } let mut signaled = false; while !signaled { - (*arc::get(&lock)).lock_cond {|c| + do (*arc::get(&lock)).lock_cond |c| { signaled = c.signal() } } diff --git a/src/libcore/task.rs b/src/libcore/task.rs index 9f379bbd94a..2d4c2b44226 100644 --- a/src/libcore/task.rs +++ b/src/libcore/task.rs @@ -295,7 +295,7 @@ fn future_result(builder: builder) -> future::future<task_result> { with get_opts(builder) }); - future::from_fn {|| + do future::from_fn || { alt comm::recv(po) { exit(_, result) { result } } @@ -307,7 +307,7 @@ fn future_task(builder: builder) -> future::future<task> { let mut po = comm::port(); let ch = comm::chan(po); - add_wrapper(builder) {|body| + do add_wrapper(builder) |body| { fn~() { comm::send(ch, get_task()); body(); @@ -342,7 +342,7 @@ fn run_listener<A:send>(-builder: builder, let setup_po = comm::port(); let setup_ch = comm::chan(setup_po); - run(builder) {|| + do run(builder) || { let po = comm::port(); let mut ch = comm::chan(po); comm::send(setup_ch, ch); @@ -439,7 +439,7 @@ fn try<T:send>(+f: fn~() -> T) -> result<T,()> { let mut builder = builder(); unsupervise(builder); let result = future_result(builder); - run(builder) {|| + do run(builder) || { comm::send(ch, f()); } alt future::get(result) { @@ -540,7 +540,7 @@ fn spawn_raw(opts: task_opts, +f: fn~()) { }; assert !new_task.is_null(); - option::iter(opts.notify_chan) {|c| + do option::iter(opts.notify_chan) |c| { // FIXME (#1087): Would like to do notification in Rust rustrt::rust_task_config_notify(new_task, c); } @@ -615,7 +615,7 @@ crust fn cleanup_task_local_map(map_ptr: *libc::c_void) unsafe { assert !map_ptr.is_null(); // Get and keep the single reference that was created at the beginning. let map: task_local_map = unsafe::reinterpret_cast(map_ptr); - for (*map).each {|entry| + for (*map).each |entry| { alt entry { // Finaliser drops data. We drop the finaliser implicitly here. some((_key, data, finalise_fn)) { finalise_fn(data); } @@ -657,10 +657,10 @@ unsafe fn key_to_key_value<T>(key: local_data_key<T>) -> *libc::c_void { unsafe fn local_data_lookup<T>(map: task_local_map, key: local_data_key<T>) -> option<(uint, *libc::c_void, fn@(+*libc::c_void))> { let key_value = key_to_key_value(key); - let map_pos = (*map).position {|entry| + let map_pos = (*map).position(|entry| alt entry { some((k,_,_)) { k == key_value } none { false } } - }; - map_pos.map {|index| + ); + do map_pos.map |index| { // .get() is guaranteed because of "none { false }" above. let (_, data_ptr, finaliser) = (*map)[index].get(); (index, data_ptr, finaliser) @@ -671,7 +671,7 @@ unsafe fn local_get_helper<T>(task: *rust_task, key: local_data_key<T>, do_pop: bool) -> option<@T> { let map = get_task_local_map(task); // Interpret our findings from the map - local_data_lookup(map, key).map {|result| + do local_data_lookup(map, key).map |result| { // A reference count magically appears on 'data' out of thin air. // 'data' has the reference we originally stored it with. We either // need to erase it from the map or artificially bump the count. @@ -718,7 +718,7 @@ unsafe fn local_set<T>(task: *rust_task, key: local_data_key<T>, -data: @T) { } none { // Find an empty slot. If not, grow the vector. - alt (*map).position({|x| x == none}) { + alt (*map).position(|x| x == none) { some(empty_index) { (*map).set_elt(empty_index, new_entry); } @@ -799,7 +799,7 @@ native mod rustrt { fn test_spawn_raw_simple() { let po = comm::port(); let ch = comm::chan(po); - spawn_raw(default_task_opts()) {|| + do spawn_raw(default_task_opts()) || { comm::send(ch, ()); } comm::recv(po); @@ -812,7 +812,7 @@ fn test_spawn_raw_unsupervise() { supervise: false with default_task_opts() }; - spawn_raw(opts) {|| + do spawn_raw(opts) || { fail; } } @@ -829,7 +829,7 @@ fn test_spawn_raw_notify() { notify_chan: some(notify_ch) with default_task_opts() }; - spawn_raw(opts) {|| + do spawn_raw(opts) || { comm::send(task_ch, get_task()); } let task_ = comm::recv(task_po); @@ -840,7 +840,7 @@ fn test_spawn_raw_notify() { notify_chan: some(notify_ch) with default_task_opts() }; - spawn_raw(opts) {|| + do spawn_raw(opts) || { comm::send(task_ch, get_task()); fail; } @@ -853,7 +853,7 @@ fn test_run_basic() { let po = comm::port(); let ch = comm::chan(po); let buildr = builder(); - run(buildr) {|| + do run(buildr) || { comm::send(ch, ()); } comm::recv(po); @@ -864,13 +864,13 @@ fn test_add_wrapper() { let po = comm::port(); let ch = comm::chan(po); let buildr = builder(); - add_wrapper(buildr) {|body| + do add_wrapper(buildr) |body| { fn~() { body(); comm::send(ch, ()); } } - run(buildr) {||} + do run(buildr) || { } comm::recv(po); } @@ -879,13 +879,13 @@ fn test_add_wrapper() { fn test_future_result() { let buildr = builder(); let result = future_result(buildr); - run(buildr) {||} + do run(buildr) || { } assert future::get(result) == success; let buildr = builder(); let result = future_result(buildr); unsupervise(buildr); - run(buildr) {|| fail } + do run(buildr) || { fail } assert future::get(result) == failure; } @@ -895,7 +895,7 @@ fn test_future_task() { let ch = comm::chan(po); let buildr = builder(); let task1 = future_task(buildr); - run(buildr) {|| comm::send(ch, get_task()) } + do run(buildr) || { comm::send(ch, get_task()) } assert future::get(task1) == comm::recv(po); } @@ -903,7 +903,7 @@ fn test_future_task() { fn test_spawn_listiner_bidi() { let po = comm::port(); let ch = comm::chan(po); - let ch = spawn_listener {|po| + let ch = do spawn_listener |po| { // Now the child has a port called 'po' to read from and // an environment-captured channel called 'ch'. let res = comm::recv(po); @@ -918,7 +918,7 @@ fn test_spawn_listiner_bidi() { #[test] fn test_try_success() { - alt try {|| + alt do try || { "Success!" } { result::ok("Success!") { } @@ -929,7 +929,7 @@ fn test_try_success() { #[test] #[ignore(cfg(windows))] fn test_try_fail() { - alt try {|| + alt do try || { fail } { result::err(()) { } @@ -941,7 +941,7 @@ fn test_try_fail() { #[should_fail] #[ignore(cfg(windows))] fn test_spawn_sched_no_threads() { - spawn_sched(manual_threads(0u)) {|| }; + do spawn_sched(manual_threads(0u)) || { } } #[test] @@ -952,7 +952,7 @@ fn test_spawn_sched() { fn f(i: int, ch: comm::chan<()>) { let parent_sched_id = rustrt::rust_get_sched_id(); - spawn_sched(single_threaded) {|| + do spawn_sched(single_threaded) || { let child_sched_id = rustrt::rust_get_sched_id(); assert parent_sched_id != child_sched_id; @@ -973,9 +973,9 @@ fn test_spawn_sched_childs_on_same_sched() { let po = comm::port(); let ch = comm::chan(po); - spawn_sched(single_threaded) {|| + do spawn_sched(single_threaded) || { let parent_sched_id = rustrt::rust_get_sched_id(); - spawn {|| + do spawn || { let child_sched_id = rustrt::rust_get_sched_id(); // This should be on the same scheduler assert parent_sched_id == child_sched_id; @@ -1002,7 +1002,7 @@ fn test_spawn_sched_blocking() { // Testing that a task in one scheduler can block in foreign code // without affecting other schedulers - iter::repeat(20u) {|| + do iter::repeat(20u) || { let start_po = comm::port(); let start_ch = comm::chan(start_po); @@ -1011,7 +1011,7 @@ fn test_spawn_sched_blocking() { let lock = testrt::rust_dbg_lock_create(); - spawn_sched(single_threaded) {|| + do spawn_sched(single_threaded) || { testrt::rust_dbg_lock_lock(lock); comm::send(start_ch, ()); @@ -1038,7 +1038,7 @@ fn test_spawn_sched_blocking() { let setup_ch = comm::chan(setup_po); let parent_po = comm::port(); let parent_ch = comm::chan(parent_po); - spawn {|| + do spawn || { let child_po = comm::port(); comm::send(setup_ch, comm::chan(child_po)); pingpong(child_po, parent_ch); @@ -1063,7 +1063,7 @@ fn avoid_copying_the_body(spawnfn: fn(+fn~())) { let x = ~1; let x_in_parent = ptr::addr_of(*x) as uint; - spawnfn {|| + do spawnfn || { let x_in_child = ptr::addr_of(*x) as uint; comm::send(ch, x_in_child); } @@ -1079,7 +1079,7 @@ fn test_avoid_copying_the_body_spawn() { #[test] fn test_avoid_copying_the_body_spawn_listener() { - avoid_copying_the_body {|f| + do avoid_copying_the_body |f| { spawn_listener(fn~(move f, _po: comm::port<int>) { f(); }); @@ -1088,9 +1088,9 @@ fn test_avoid_copying_the_body_spawn_listener() { #[test] fn test_avoid_copying_the_body_run() { - avoid_copying_the_body {|f| + do avoid_copying_the_body |f| { let buildr = builder(); - run(buildr) {|| + do run(buildr) || { f(); } } @@ -1098,7 +1098,7 @@ fn test_avoid_copying_the_body_run() { #[test] fn test_avoid_copying_the_body_run_listener() { - avoid_copying_the_body {|f| + do avoid_copying_the_body |f| { let buildr = builder(); run_listener(buildr, fn~(move f, _po: comm::port<int>) { f(); @@ -1108,8 +1108,8 @@ fn test_avoid_copying_the_body_run_listener() { #[test] fn test_avoid_copying_the_body_try() { - avoid_copying_the_body {|f| - try {|| + do avoid_copying_the_body |f| { + do try || { f() }; } @@ -1117,10 +1117,10 @@ fn test_avoid_copying_the_body_try() { #[test] fn test_avoid_copying_the_body_future_task() { - avoid_copying_the_body {|f| + do avoid_copying_the_body |f| { let buildr = builder(); future_task(buildr); - run(buildr) {|| + do run(buildr) || { f(); } } @@ -1128,10 +1128,10 @@ fn test_avoid_copying_the_body_future_task() { #[test] fn test_avoid_copying_the_body_unsupervise() { - avoid_copying_the_body {|f| + do avoid_copying_the_body |f| { let buildr = builder(); unsupervise(buildr); - run(buildr) {|| + do run(buildr) || { f(); } } @@ -1151,7 +1151,7 @@ fn test_osmain() { let po = comm::port(); let ch = comm::chan(po); - run(buildr) {|| + do run(buildr) || { comm::send(ch, ()); } comm::recv(po); @@ -1166,12 +1166,12 @@ fn test_unkillable() { let ch = po.chan(); // We want to do this after failing - spawn {|| + do spawn || { iter::repeat(10u, yield); ch.send(()); } - spawn {|| + do spawn || { yield(); // We want to fail after the unkillable task // blocks on recv @@ -1179,7 +1179,7 @@ fn test_unkillable() { } unsafe { - unkillable {|| + do unkillable || { let p = ~0; let pp: *uint = unsafe::transmute(p); @@ -1198,7 +1198,7 @@ fn test_unkillable() { fn test_tls_multitask() unsafe { fn my_key(+_x: @str) { } local_data_set(my_key, @"parent data"); - task::spawn {|| + do task::spawn || { assert local_data_get(my_key) == none; // TLS shouldn't carry over. local_data_set(my_key, @"child data"); assert *(local_data_get(my_key).get()) == "child data"; @@ -1230,19 +1230,19 @@ fn test_tls_pop() unsafe { #[test] fn test_tls_modify() unsafe { fn my_key(+_x: @str) { } - local_data_modify(my_key) {|data| + local_data_modify(my_key, |data| { alt data { some(@val) { fail "unwelcome value: " + val } none { some(@"first data") } } - } - local_data_modify(my_key) {|data| + }); + local_data_modify(my_key, |data| { alt data { some(@"first data") { some(@"next data") } some(@val) { fail "wrong value: " + val } none { fail "missing value" } } - } + }); assert *(local_data_pop(my_key).get()) == "next data"; } @@ -1254,7 +1254,7 @@ fn test_tls_crust_automorestack_memorial_bug() unsafe { // something within a rust stack segment. Then a subsequent upcall (esp. // for logging, think vsnprintf) would run on a stack smaller than 1 MB. fn my_key(+_x: @str) { } - task::spawn {|| + do task::spawn || { unsafe { local_data_set(my_key, @"hax"); } } } @@ -1264,7 +1264,7 @@ fn test_tls_multiple_types() unsafe { fn str_key(+_x: @str) { } fn box_key(+_x: @@()) { } fn int_key(+_x: @int) { } - task::spawn{|| + do task::spawn || { local_data_set(str_key, @"string data"); local_data_set(box_key, @@()); local_data_set(int_key, @42); @@ -1276,7 +1276,7 @@ fn test_tls_overwrite_multiple_types() unsafe { fn str_key(+_x: @str) { } fn box_key(+_x: @@()) { } fn int_key(+_x: @int) { } - task::spawn{|| + do task::spawn || { local_data_set(str_key, @"string data"); local_data_set(int_key, @42); // This could cause a segfault if overwriting-destruction is done with @@ -1294,7 +1294,7 @@ fn test_tls_cleanup_on_failure() unsafe { fn int_key(+_x: @int) { } local_data_set(str_key, @"parent data"); local_data_set(box_key, @@()); - task::spawn{|| // spawn_linked + do task::spawn || { // spawn_linked local_data_set(str_key, @"string data"); local_data_set(box_key, @@()); local_data_set(int_key, @42); diff --git a/src/libcore/to_str.rs b/src/libcore/to_str.rs index 229afc810e5..359f5ea4a9c 100644 --- a/src/libcore/to_str.rs +++ b/src/libcore/to_str.rs @@ -56,10 +56,10 @@ impl <A: to_str copy, B: to_str copy, C: to_str copy> of to_str for (A, B, C){ } } -impl <A: to_str> of to_str for [A]/~ { +impl <A: to_str> of to_str for ~[A] { fn to_str() -> str { let mut acc = "[", first = true; - for vec::each(self) {|elt| + for vec::each(self) |elt| { if first { first = false; } else { acc += ", "; } acc += elt.to_str(); @@ -98,12 +98,12 @@ mod tests { } fn test_vectors() { - let x: [int]/~ = []/~; - assert x.to_str() == "[]/~"; - assert [1]/~.to_str() == "[1]/~"; - assert [1, 2, 3]/~.to_str() == "[1, 2, 3]/~"; - assert [[]/~, [1]/~, [1, 1]/~]/~.to_str() == - "[[]/~, [1]/~, [1, 1]/~]/~"; + let x: ~[int] = ~[]; + assert x.to_str() == "~[]"; + assert (~[1]).to_str() == "~[1]"; + assert (~[1, 2, 3]).to_str() == "~[1, 2, 3]"; + assert (~[~[], ~[1], ~[1, 1]]).to_str() == + "~[~[], ~[1], ~[1, 1]]"; } fn test_pointer_types() { diff --git a/src/libcore/uint-template.rs b/src/libcore/uint-template.rs index 6ec8d2e8789..7f4ffe97c01 100644 --- a/src/libcore/uint-template.rs +++ b/src/libcore/uint-template.rs @@ -88,7 +88,7 @@ Parse a buffer of bytes `buf` must not be empty "] -fn parse_buf(buf: [u8]/~, radix: uint) -> option<T> { +fn parse_buf(buf: ~[u8], radix: uint) -> option<T> { if vec::len(buf) == 0u { ret none; } let mut i = vec::len(buf) - 1u; let mut power = 1u as T; @@ -131,8 +131,8 @@ Convert to a string in a given base Fails if `radix` < 2 or `radix` > 16 "] fn to_str(num: T, radix: uint) -> str { - to_str_bytes(false, num, radix) {|slice| - vec::unpack_slice(slice) {|p, len| + do to_str_bytes(false, num, radix) |slice| { + do vec::unpack_slice(slice) |p, len| { unsafe { str::unsafe::from_buf_len(p, len) } } } @@ -140,7 +140,7 @@ fn to_str(num: T, radix: uint) -> str { #[doc = "Low-level helper routine for string conversion."] fn to_str_bytes<U>(neg: bool, num: T, radix: uint, - f: fn([u8]/&) -> U) -> U { + f: fn(v: &[u8]) -> U) -> U { #[inline(always)] fn digit(n: T) -> u8 { @@ -177,7 +177,7 @@ fn to_str_bytes<U>(neg: bool, num: T, radix: uint, // in-bounds, no extra cost. unsafe { - vec::unpack_slice(buf) {|p, len| + do vec::unpack_slice(buf) |p, len| { let mp = p as *mut u8; let mut i = len; let mut n = num; diff --git a/src/libcore/unsafe.rs b/src/libcore/unsafe.rs index 999609a537d..98bfa5bbfc4 100644 --- a/src/libcore/unsafe.rs +++ b/src/libcore/unsafe.rs @@ -83,7 +83,7 @@ mod tests { #[test] fn test_transmute2() { unsafe { - assert transmute("L") == [76u8, 0u8]/~; + assert transmute("L") == ~[76u8, 0u8]; } } } diff --git a/src/libcore/vec.rs b/src/libcore/vec.rs index e12a62fc41d..e95598a08c8 100644 --- a/src/libcore/vec.rs +++ b/src/libcore/vec.rs @@ -102,17 +102,17 @@ native mod rusti { type init_op<T> = fn(uint) -> T; #[doc = "Returns true if a vector contains no elements"] -pure fn is_empty<T>(v: [const T]/&) -> bool { - unpack_const_slice(v) {|_p, len| len == 0u} +pure fn is_empty<T>(v: &[const T]) -> bool { + unpack_const_slice(v, |_p, len| len == 0u) } #[doc = "Returns true if a vector contains some elements"] -pure fn is_not_empty<T>(v: [const T]/&) -> bool { - unpack_const_slice(v) {|_p, len| len > 0u} +pure fn is_not_empty<T>(v: &[const T]) -> bool { + unpack_const_slice(v, |_p, len| len > 0u) } #[doc = "Returns true if two vectors have the same length"] -pure fn same_length<T, U>(xs: [const T]/&, ys: [const U]/&) -> bool { +pure fn same_length<T, U>(xs: &[const T], ys: &[const U]) -> bool { len(xs) == len(ys) } @@ -127,7 +127,7 @@ capacity, then no action is taken. * v - A vector * n - The number of elements to reserve space for "] -fn reserve<T>(&v: [const T]/~, n: uint) { +fn reserve<T>(&v: ~[const T], n: uint) { // Only make the (slow) call into the runtime if we have to if capacity(v) < n { let ptr = ptr::addr_of(v) as **unsafe::vec_repr; @@ -151,7 +151,7 @@ capacity, then no action is taken. * v - A vector * n - The number of elements to reserve space for "] -fn reserve_at_least<T>(&v: [const T]/~, n: uint) { +fn reserve_at_least<T>(&v: ~[const T], n: uint) { reserve(v, uint::next_power_of_two(n)); } @@ -159,7 +159,7 @@ fn reserve_at_least<T>(&v: [const T]/~, n: uint) { Returns the number of elements the vector can hold without reallocating "] #[inline(always)] -pure fn capacity<T>(&&v: [const T]/~) -> uint { +pure fn capacity<T>(&&v: ~[const T]) -> uint { unsafe { let repr: **unsafe::vec_repr = ::unsafe::reinterpret_cast(addr_of(v)); (**repr).alloc / sys::size_of::<T>() @@ -168,8 +168,8 @@ pure fn capacity<T>(&&v: [const T]/~) -> uint { #[doc = "Returns the length of a vector"] #[inline(always)] -pure fn len<T>(&&v: [const T]/&) -> uint { - unpack_const_slice(v) {|_p, len| len} +pure fn len<T>(&&v: &[const T]) -> uint { + unpack_const_slice(v, |_p, len| len) } #[doc = " @@ -178,8 +178,8 @@ Creates and initializes an immutable vector. Creates an immutable vector of size `n_elts` and initializes the elements to the value returned by the function `op`. "] -pure fn from_fn<T>(n_elts: uint, op: init_op<T>) -> [T]/~ { - let mut v = []/~; +pure fn from_fn<T>(n_elts: uint, op: init_op<T>) -> ~[T] { + let mut v = ~[]; unchecked{reserve(v, n_elts);} let mut i: uint = 0u; while i < n_elts unsafe { push(v, op(i)); i += 1u; } @@ -192,8 +192,8 @@ Creates and initializes an immutable vector. Creates an immutable vector of size `n_elts` and initializes the elements to the value `t`. "] -pure fn from_elem<T: copy>(n_elts: uint, t: T) -> [T]/~ { - let mut v = []/~; +pure fn from_elem<T: copy>(n_elts: uint, t: T) -> ~[T] { + let mut v = ~[]; unchecked{reserve(v, n_elts)} let mut i: uint = 0u; unsafe { // because push is impure @@ -203,33 +203,33 @@ pure fn from_elem<T: copy>(n_elts: uint, t: T) -> [T]/~ { } #[doc = "Produces a mut vector from an immutable vector."] -fn to_mut<T>(+v: [T]/~) -> [mut T]/~ { +fn to_mut<T>(+v: ~[T]) -> ~[mut T] { unsafe { ::unsafe::transmute(v) } } #[doc = "Produces an immutable vector from a mut vector."] -fn from_mut<T>(+v: [mut T]/~) -> [T]/~ { +fn from_mut<T>(+v: ~[mut T]) -> ~[T] { unsafe { ::unsafe::transmute(v) } } // Accessors #[doc = "Returns the first element of a vector"] -pure fn head<T: copy>(v: [const T]/&) -> T { v[0] } +pure fn head<T: copy>(v: &[const T]) -> T { v[0] } #[doc = "Returns a vector containing all but the first element of a slice"] -pure fn tail<T: copy>(v: [const T]/&) -> [T]/~ { +pure fn tail<T: copy>(v: &[const T]) -> ~[T] { ret slice(v, 1u, len(v)); } #[doc = "Returns a vector containing all but the first `n` \ elements of a slice"] -pure fn tailn<T: copy>(v: [const T]/&, n: uint) -> [T]/~ { +pure fn tailn<T: copy>(v: &[const T], n: uint) -> ~[T] { slice(v, n, len(v)) } #[doc = "Returns a vector containing all but the last element of a slice"] -pure fn init<T: copy>(v: [const T]/&) -> [T]/~ { +pure fn init<T: copy>(v: &[const T]) -> ~[T] { assert len(v) != 0u; slice(v, 0u, len(v) - 1u) } @@ -237,7 +237,7 @@ pure fn init<T: copy>(v: [const T]/&) -> [T]/~ { #[doc = " Returns the last element of the slice `v`, failing if the slice is empty. "] -pure fn last<T: copy>(v: [const T]/&) -> T { +pure fn last<T: copy>(v: &[const T]) -> T { if len(v) == 0u { fail "last_unsafe: empty vector" } v[len(v) - 1u] } @@ -246,16 +246,16 @@ pure fn last<T: copy>(v: [const T]/&) -> T { Returns `some(x)` where `x` is the last element of the slice `v`, or `none` if the vector is empty. "] -pure fn last_opt<T: copy>(v: [const T]/&) -> option<T> { +pure fn last_opt<T: copy>(v: &[const T]) -> option<T> { if len(v) == 0u { ret none; } some(v[len(v) - 1u]) } #[doc = "Returns a copy of the elements from [`start`..`end`) from `v`."] -pure fn slice<T: copy>(v: [const T]/&, start: uint, end: uint) -> [T]/~ { +pure fn slice<T: copy>(v: &[const T], start: uint, end: uint) -> ~[T] { assert (start <= end); assert (end <= len(v)); - let mut result = []/~; + let mut result = ~[]; unchecked { push_all(result, view(v, start, end)); } @@ -263,10 +263,10 @@ pure fn slice<T: copy>(v: [const T]/&, start: uint, end: uint) -> [T]/~ { } #[doc = "Return a slice that points into another slice."] -pure fn view<T: copy>(v: [const T]/&, start: uint, end: uint) -> [T]/&a { +pure fn view<T: copy>(v: &[const T], start: uint, end: uint) -> &a.[T] { assert (start <= end); assert (end <= len(v)); - unpack_slice(v) {|p, _len| + do unpack_slice(v) |p, _len| { unsafe { ::unsafe::reinterpret_cast( (ptr::offset(p, start), (end - start) * sys::size_of::<T>())) @@ -277,12 +277,12 @@ pure fn view<T: copy>(v: [const T]/&, start: uint, end: uint) -> [T]/&a { #[doc = " Split the vector `v` by applying each element against the predicate `f`. "] -fn split<T: copy>(v: [T]/&, f: fn(T) -> bool) -> [[T]/~]/~ { +fn split<T: copy>(v: &[T], f: fn(T) -> bool) -> ~[~[T]] { let ln = len(v); - if (ln == 0u) { ret []/~ } + if (ln == 0u) { ret ~[] } let mut start = 0u; - let mut result = []/~; + let mut result = ~[]; while start < ln { alt position_between(v, start, ln, f) { none { break } @@ -300,13 +300,13 @@ fn split<T: copy>(v: [T]/&, f: fn(T) -> bool) -> [[T]/~]/~ { Split the vector `v` by applying each element against the predicate `f` up to `n` times. "] -fn splitn<T: copy>(v: [T]/&, n: uint, f: fn(T) -> bool) -> [[T]/~]/~ { +fn splitn<T: copy>(v: &[T], n: uint, f: fn(T) -> bool) -> ~[~[T]] { let ln = len(v); - if (ln == 0u) { ret []/~ } + if (ln == 0u) { ret ~[] } let mut start = 0u; let mut count = n; - let mut result = []/~; + let mut result = ~[]; while start < ln && count > 0u { alt position_between(v, start, ln, f) { none { break } @@ -326,12 +326,12 @@ fn splitn<T: copy>(v: [T]/&, n: uint, f: fn(T) -> bool) -> [[T]/~]/~ { Reverse split the vector `v` by applying each element against the predicate `f`. "] -fn rsplit<T: copy>(v: [T]/&, f: fn(T) -> bool) -> [[T]/~]/~ { +fn rsplit<T: copy>(v: &[T], f: fn(T) -> bool) -> ~[~[T]] { let ln = len(v); - if (ln == 0u) { ret []/~ } + if (ln == 0u) { ret ~[] } let mut end = ln; - let mut result = []/~; + let mut result = ~[]; while end > 0u { alt rposition_between(v, 0u, end, f) { none { break } @@ -349,13 +349,13 @@ fn rsplit<T: copy>(v: [T]/&, f: fn(T) -> bool) -> [[T]/~]/~ { Reverse split the vector `v` by applying each element against the predicate `f` up to `n times. "] -fn rsplitn<T: copy>(v: [T]/&, n: uint, f: fn(T) -> bool) -> [[T]/~]/~ { +fn rsplitn<T: copy>(v: &[T], n: uint, f: fn(T) -> bool) -> ~[~[T]] { let ln = len(v); - if (ln == 0u) { ret []/~ } + if (ln == 0u) { ret ~[] } let mut end = ln; let mut count = n; - let mut result = []/~; + let mut result = ~[]; while end > 0u && count > 0u { alt rposition_between(v, 0u, end, f) { none { break } @@ -374,11 +374,11 @@ fn rsplitn<T: copy>(v: [T]/&, n: uint, f: fn(T) -> bool) -> [[T]/~]/~ { // Mutators #[doc = "Removes the first element from a vector and return it"] -fn shift<T>(&v: [T]/~) -> T { +fn shift<T>(&v: ~[T]) -> T { let ln = len::<T>(v); assert (ln > 0u); - let mut vv = []/~; + let mut vv = ~[]; v <-> vv; unsafe { @@ -387,7 +387,7 @@ fn shift<T>(&v: [T]/~) -> T { let vv = unsafe::to_ptr(vv); rr <- *vv; - for uint::range(1u, ln) {|i| + for uint::range(1u, ln) |i| { let r <- *ptr::offset(vv, i); push(v, r); } @@ -399,8 +399,8 @@ fn shift<T>(&v: [T]/~) -> T { } #[doc = "Prepend an element to the vector"] -fn unshift<T>(&v: [T]/~, +x: T) { - let mut vv = [x]/~; +fn unshift<T>(&v: ~[T], +x: T) { + let mut vv = ~[x]; v <-> vv; while len(vv) > 0 { push(v, shift(vv)); @@ -408,7 +408,7 @@ fn unshift<T>(&v: [T]/~, +x: T) { } #[doc = "Remove the last element from a vector and return it"] -fn pop<T>(&v: [const T]/~) -> T { +fn pop<T>(&v: ~[const T]) -> T { let ln = len(v); assert ln > 0u; let valptr = ptr::mut_addr_of(v[ln - 1u]); @@ -421,7 +421,7 @@ fn pop<T>(&v: [const T]/~) -> T { #[doc = "Append an element to a vector"] #[inline(always)] -fn push<T>(&v: [const T]/~, +initval: T) { +fn push<T>(&v: ~[const T], +initval: T) { unsafe { let repr: **unsafe::vec_repr = ::unsafe::reinterpret_cast(addr_of(v)); let fill = (**repr).fill; @@ -438,7 +438,7 @@ fn push<T>(&v: [const T]/~, +initval: T) { } } -fn push_slow<T>(&v: [const T]/~, +initval: T) { +fn push_slow<T>(&v: ~[const T], +initval: T) { unsafe { let ln = v.len(); reserve_at_least(v, ln + 1u); @@ -454,27 +454,25 @@ fn push_slow<T>(&v: [const T]/~, +initval: T) { // Unchecked vector indexing #[inline(always)] -unsafe fn ref<T: copy>(v: [const T]/&, i: uint) -> T { - unpack_slice(v) {|p, _len| - *ptr::offset(p, i) - } +unsafe fn ref<T: copy>(v: &[const T], i: uint) -> T { + unpack_slice(v, |p, _len| *ptr::offset(p, i)) } #[inline(always)] -fn push_all<T: copy>(&v: [const T]/~, rhs: [const T]/&) { +fn push_all<T: copy>(&v: ~[const T], rhs: &[const T]) { reserve(v, v.len() + rhs.len()); - for uint::range(0u, rhs.len()) {|i| + for uint::range(0u, rhs.len()) |i| { push(v, unsafe { ref(rhs, i) }) } } #[inline(always)] -fn push_all_move<T>(&v: [const T]/~, -rhs: [const T]/~) { +fn push_all_move<T>(&v: ~[const T], -rhs: ~[const T]) { reserve(v, v.len() + rhs.len()); unsafe { - unpack_slice(rhs) {|p, len| - for uint::range(0, len) {|i| + do unpack_slice(rhs) |p, len| { + for uint::range(0, len) |i| { let x <- *ptr::offset(p, i); push(v, x); } @@ -485,7 +483,7 @@ fn push_all_move<T>(&v: [const T]/~, -rhs: [const T]/~) { // Appending #[inline(always)] -pure fn append<T: copy>(+lhs: [T]/~, rhs: [const T]/&) -> [T]/~ { +pure fn append<T: copy>(+lhs: ~[T], rhs: &[const T]) -> ~[T] { let mut v <- lhs; unchecked { push_all(v, rhs); @@ -494,15 +492,15 @@ pure fn append<T: copy>(+lhs: [T]/~, rhs: [const T]/&) -> [T]/~ { } #[inline(always)] -pure fn append_one<T>(+lhs: [T]/~, +x: T) -> [T]/~ { +pure fn append_one<T>(+lhs: ~[T], +x: T) -> ~[T] { let mut v <- lhs; unchecked { push(v, x); } v } #[inline(always)] -pure fn append_mut<T: copy>(lhs: [mut T]/&, rhs: [const T]/&) -> [mut T]/~ { - let mut v = [mut]/~; +pure fn append_mut<T: copy>(lhs: &[mut T], rhs: &[const T]) -> ~[mut T] { + let mut v = ~[mut]; let mut i = 0u; while i < lhs.len() { unsafe { // This is impure, but it appears pure to the caller. @@ -529,7 +527,7 @@ Expands a vector in place, initializing the new elements to a given value * n - The number of elements to add * initval - The value for the new elements "] -fn grow<T: copy>(&v: [const T]/~, n: uint, initval: T) { +fn grow<T: copy>(&v: ~[const T], n: uint, initval: T) { reserve_at_least(v, len(v) + n); let mut i: uint = 0u; @@ -549,7 +547,7 @@ Function `init_op` is called `n` times with the values [0..`n`) * init_op - A function to call to retreive each appended element's value "] -fn grow_fn<T>(&v: [const T]/~, n: uint, op: init_op<T>) { +fn grow_fn<T>(&v: ~[const T], n: uint, op: init_op<T>) { reserve_at_least(v, len(v) + n); let mut i: uint = 0u; while i < n { push(v, op(i)); i += 1u; } @@ -564,7 +562,7 @@ of the vector, expands the vector by replicating `initval` to fill the intervening space. "] #[inline(always)] -fn grow_set<T: copy>(&v: [mut T]/~, index: uint, initval: T, val: T) { +fn grow_set<T: copy>(&v: ~[mut T], index: uint, initval: T, val: T) { if index >= len(v) { grow(v, index - len(v) + 1u, initval); } v[index] = val; } @@ -575,20 +573,20 @@ fn grow_set<T: copy>(&v: [mut T]/~, index: uint, initval: T, val: T) { #[doc = " Apply a function to each element of a vector and return the results "] -pure fn map<T, U>(v: [T]/&, f: fn(T) -> U) -> [U]/~ { - let mut result = []/~; +pure fn map<T, U>(v: &[T], f: fn(T) -> U) -> ~[U] { + let mut result = ~[]; unchecked{reserve(result, len(v));} - for each(v) {|elem| unsafe { push(result, f(elem)); } } + for each(v) |elem| { unsafe { push(result, f(elem)); } } ret result; } #[doc = " Apply a function to each element of a vector and return the results "] -pure fn mapi<T, U>(v: [T]/&, f: fn(uint, T) -> U) -> [U]/~ { - let mut result = []/~; +pure fn mapi<T, U>(v: &[T], f: fn(uint, T) -> U) -> ~[U] { + let mut result = ~[]; unchecked{reserve(result, len(v));} - for eachi(v) {|i, elem| unsafe { push(result, f(i, elem)); } } + for eachi(v) |i, elem| { unsafe { push(result, f(i, elem)); } } ret result; } @@ -596,20 +594,20 @@ pure fn mapi<T, U>(v: [T]/&, f: fn(uint, T) -> U) -> [U]/~ { Apply a function to each element of a vector and return a concatenation of each result vector "] -pure fn flat_map<T, U>(v: [T]/&, f: fn(T) -> [U]/~) -> [U]/~ { - let mut result = []/~; - for each(v) {|elem| unchecked{ push_all_move(result, f(elem)); } } +pure fn flat_map<T, U>(v: &[T], f: fn(T) -> ~[U]) -> ~[U] { + let mut result = ~[]; + for each(v) |elem| { unchecked{ push_all_move(result, f(elem)); } } ret result; } #[doc = " Apply a function to each pair of elements and return the results "] -pure fn map2<T: copy, U: copy, V>(v0: [T]/&, v1: [U]/&, - f: fn(T, U) -> V) -> [V]/~ { +pure fn map2<T: copy, U: copy, V>(v0: &[T], v1: &[U], + f: fn(T, U) -> V) -> ~[V] { let v0_len = len(v0); if v0_len != len(v1) { fail; } - let mut u: [V]/~ = []/~; + let mut u: ~[V] = ~[]; let mut i = 0u; while i < v0_len { unsafe { push(u, f(copy v0[i], copy v1[i])) }; @@ -624,10 +622,10 @@ Apply a function to each element of a vector and return the results If function `f` returns `none` then that element is excluded from the resulting vector. "] -pure fn filter_map<T, U: copy>(v: [T]/&, f: fn(T) -> option<U>) - -> [U]/~ { - let mut result = []/~; - for each(v) {|elem| +pure fn filter_map<T, U: copy>(v: &[T], f: fn(T) -> option<U>) + -> ~[U] { + let mut result = ~[]; + for each(v) |elem| { alt f(elem) { none {/* no-op */ } some(result_elem) { unsafe { push(result, result_elem); } } @@ -643,9 +641,9 @@ holds. Apply function `f` to each element of `v` and return a vector containing only those elements for which `f` returned true. "] -pure fn filter<T: copy>(v: [T]/&, f: fn(T) -> bool) -> [T]/~ { - let mut result = []/~; - for each(v) {|elem| +pure fn filter<T: copy>(v: &[T], f: fn(T) -> bool) -> ~[T] { + let mut result = ~[]; + for each(v) |elem| { if f(elem) { unsafe { push(result, elem); } } } ret result; @@ -656,19 +654,19 @@ Concatenate a vector of vectors. Flattens a vector of vectors of T into a single vector of T. "] -pure fn concat<T: copy>(v: [[T]/~]/&) -> [T]/~ { - let mut r = []/~; - for each(v) {|inner| unsafe { push_all(r, inner); } } +pure fn concat<T: copy>(v: &[[T]/~]) -> ~[T] { + let mut r = ~[]; + for each(v) |inner| { unsafe { push_all(r, inner); } } ret r; } #[doc = " Concatenate a vector of vectors, placing a given separator between each "] -pure fn connect<T: copy>(v: [[T]/~]/&, sep: T) -> [T]/~ { - let mut r: [T]/~ = []/~; +pure fn connect<T: copy>(v: &[[T]/~], sep: T) -> ~[T] { + let mut r: ~[T] = ~[]; let mut first = true; - for each(v) {|inner| + for each(v) |inner| { if first { first = false; } else { unsafe { push(r, sep); } } unchecked { push_all(r, inner) }; } @@ -676,18 +674,18 @@ pure fn connect<T: copy>(v: [[T]/~]/&, sep: T) -> [T]/~ { } #[doc = "Reduce a vector from left to right"] -pure fn foldl<T: copy, U>(z: T, v: [U]/&, p: fn(T, U) -> T) -> T { +pure fn foldl<T: copy, U>(z: T, v: &[U], p: fn(T, U) -> T) -> T { let mut accum = z; - iter(v) { |elt| + do iter(v) |elt| { accum = p(accum, elt); } ret accum; } #[doc = "Reduce a vector from right to left"] -pure fn foldr<T, U: copy>(v: [T]/&, z: U, p: fn(T, U) -> U) -> U { +pure fn foldr<T, U: copy>(v: &[T], z: U, p: fn(T, U) -> U) -> U { let mut accum = z; - riter(v) { |elt| + do riter(v) |elt| { accum = p(elt, accum); } ret accum; @@ -698,8 +696,8 @@ Return true if a predicate matches any elements If the vector contains no elements then false is returned. "] -pure fn any<T>(v: [T]/&, f: fn(T) -> bool) -> bool { - for each(v) {|elem| if f(elem) { ret true; } } +pure fn any<T>(v: &[T], f: fn(T) -> bool) -> bool { + for each(v) |elem| { if f(elem) { ret true; } } ret false; } @@ -708,7 +706,7 @@ Return true if a predicate matches any elements in both vectors. If the vectors contains no elements then false is returned. "] -pure fn any2<T, U>(v0: [T]/&, v1: [U]/&, +pure fn any2<T, U>(v0: &[T], v1: &[U], f: fn(T, U) -> bool) -> bool { let v0_len = len(v0); let v1_len = len(v1); @@ -725,8 +723,8 @@ Return true if a predicate matches all elements If the vector contains no elements then true is returned. "] -pure fn all<T>(v: [T]/&, f: fn(T) -> bool) -> bool { - for each(v) {|elem| if !f(elem) { ret false; } } +pure fn all<T>(v: &[T], f: fn(T) -> bool) -> bool { + for each(v) |elem| { if !f(elem) { ret false; } } ret true; } @@ -735,8 +733,8 @@ Return true if a predicate matches all elements If the vector contains no elements then true is returned. "] -pure fn alli<T>(v: [T]/&, f: fn(uint, T) -> bool) -> bool { - for eachi(v) {|i, elem| if !f(i, elem) { ret false; } } +pure fn alli<T>(v: &[T], f: fn(uint, T) -> bool) -> bool { + for eachi(v) |i, elem| { if !f(i, elem) { ret false; } } ret true; } @@ -745,7 +743,7 @@ Return true if a predicate matches all elements in both vectors. If the vectors are not the same size then false is returned. "] -pure fn all2<T, U>(v0: [T]/&, v1: [U]/&, +pure fn all2<T, U>(v0: &[T], v1: &[U], f: fn(T, U) -> bool) -> bool { let v0_len = len(v0); if v0_len != len(v1) { ret false; } @@ -755,15 +753,15 @@ pure fn all2<T, U>(v0: [T]/&, v1: [U]/&, } #[doc = "Return true if a vector contains an element with the given value"] -pure fn contains<T>(v: [T]/&, x: T) -> bool { - for each(v) {|elt| if x == elt { ret true; } } +pure fn contains<T>(v: &[T], x: T) -> bool { + for each(v) |elt| { if x == elt { ret true; } } ret false; } #[doc = "Returns the number of elements that are equal to a given value"] -pure fn count<T>(v: [T]/&, x: T) -> uint { +pure fn count<T>(v: &[T], x: T) -> uint { let mut cnt = 0u; - for each(v) {|elt| if x == elt { cnt += 1u; } } + for each(v) |elt| { if x == elt { cnt += 1u; } } ret cnt; } @@ -774,7 +772,7 @@ Apply function `f` to each element of `v`, starting from the first. When function `f` returns true then an option containing the element is returned. If `f` matches no elements then none is returned. "] -pure fn find<T: copy>(v: [T]/&, f: fn(T) -> bool) -> option<T> { +pure fn find<T: copy>(v: &[T], f: fn(T) -> bool) -> option<T> { find_between(v, 0u, len(v), f) } @@ -785,9 +783,9 @@ Apply function `f` to each element of `v` within the range [`start`, `end`). When function `f` returns true then an option containing the element is returned. If `f` matches no elements then none is returned. "] -pure fn find_between<T: copy>(v: [T]/&, start: uint, end: uint, +pure fn find_between<T: copy>(v: &[T], start: uint, end: uint, f: fn(T) -> bool) -> option<T> { - option::map(position_between(v, start, end, f)) { |i| v[i] } + option::map(position_between(v, start, end, f), |i| v[i]) } #[doc = " @@ -797,7 +795,7 @@ Apply function `f` to each element of `v` in reverse order. When function `f` returns true then an option containing the element is returned. If `f` matches no elements then none is returned. "] -pure fn rfind<T: copy>(v: [T]/&, f: fn(T) -> bool) -> option<T> { +pure fn rfind<T: copy>(v: &[T], f: fn(T) -> bool) -> option<T> { rfind_between(v, 0u, len(v), f) } @@ -808,14 +806,14 @@ Apply function `f` to each element of `v` in reverse order within the range [`start`, `end`). When function `f` returns true then an option containing the element is returned. If `f` matches no elements then none is returned. "] -pure fn rfind_between<T: copy>(v: [T]/&, start: uint, end: uint, +pure fn rfind_between<T: copy>(v: &[T], start: uint, end: uint, f: fn(T) -> bool) -> option<T> { - option::map(rposition_between(v, start, end, f)) { |i| v[i] } + option::map(rposition_between(v, start, end, f), |i| v[i]) } #[doc = "Find the first index containing a matching value"] -pure fn position_elem<T>(v: [T]/&, x: T) -> option<uint> { - position(v) { |y| x == y } +pure fn position_elem<T>(v: &[T], x: T) -> option<uint> { + position(v, |y| x == y) } #[doc = " @@ -825,7 +823,7 @@ Apply function `f` to each element of `v`. When function `f` returns true then an option containing the index is returned. If `f` matches no elements then none is returned. "] -pure fn position<T>(v: [T]/&, f: fn(T) -> bool) -> option<uint> { +pure fn position<T>(v: &[T], f: fn(T) -> bool) -> option<uint> { position_between(v, 0u, len(v), f) } @@ -836,7 +834,7 @@ Apply function `f` to each element of `v` between the range [`start`, `end`). When function `f` returns true then an option containing the index is returned. If `f` matches no elements then none is returned. "] -pure fn position_between<T>(v: [T]/&, start: uint, end: uint, +pure fn position_between<T>(v: &[T], start: uint, end: uint, f: fn(T) -> bool) -> option<uint> { assert start <= end; assert end <= len(v); @@ -846,8 +844,8 @@ pure fn position_between<T>(v: [T]/&, start: uint, end: uint, } #[doc = "Find the last index containing a matching value"] -pure fn rposition_elem<T>(v: [T]/&, x: T) -> option<uint> { - rposition(v) { |y| x == y } +pure fn rposition_elem<T>(v: &[T], x: T) -> option<uint> { + rposition(v, |y| x == y) } #[doc = " @@ -857,7 +855,7 @@ Apply function `f` to each element of `v` in reverse order. When function `f` returns true then an option containing the index is returned. If `f` matches no elements then none is returned. "] -pure fn rposition<T>(v: [T]/&, f: fn(T) -> bool) -> option<uint> { +pure fn rposition<T>(v: &[T], f: fn(T) -> bool) -> option<uint> { rposition_between(v, 0u, len(v), f) } @@ -868,7 +866,7 @@ Apply function `f` to each element of `v` in reverse order between the range [`start`, `end`). When function `f` returns true then an option containing the index is returned. If `f` matches no elements then none is returned. "] -pure fn rposition_between<T>(v: [T]/&, start: uint, end: uint, +pure fn rposition_between<T>(v: &[T], start: uint, end: uint, f: fn(T) -> bool) -> option<uint> { assert start <= end; assert end <= len(v); @@ -892,9 +890,9 @@ vector contains the first element of the i-th tuple of the input vector, and the i-th element of the second vector contains the second element of the i-th tuple of the input vector. "] -pure fn unzip<T: copy, U: copy>(v: [(T, U)]/&) -> ([T]/~, [U]/~) { - let mut as = []/~, bs = []/~; - for each(v) {|p| +pure fn unzip<T: copy, U: copy>(v: &[(T, U)]) -> (~[T], ~[U]) { + let mut as = ~[], bs = ~[]; + for each(v) |p| { let (a, b) = p; unchecked { vec::push(as, a); @@ -910,8 +908,8 @@ Convert two vectors to a vector of pairs Returns a vector of tuples, where the i-th tuple contains contains the i-th elements from each of the input vectors. "] -pure fn zip<T: copy, U: copy>(v: [const T]/&, u: [const U]/&) -> [(T, U)]/~ { - let mut zipped = []/~; +pure fn zip<T: copy, U: copy>(v: &[const T], u: &[const U]) -> ~[(T, U)] { + let mut zipped = ~[]; let sz = len(v); let mut i = 0u; assert sz == len(u); @@ -928,12 +926,12 @@ Swaps two elements in a vector * a - The index of the first element * b - The index of the second element "] -fn swap<T>(&&v: [mut T]/~, a: uint, b: uint) { +fn swap<T>(&&v: ~[mut T], a: uint, b: uint) { v[a] <-> v[b]; } #[doc = "Reverse the order of elements in a vector, in place"] -fn reverse<T>(v: [mut T]/~) { +fn reverse<T>(v: ~[mut T]) { let mut i: uint = 0u; let ln = len::<T>(v); while i < ln / 2u { v[i] <-> v[ln - i - 1u]; i += 1u; } @@ -941,8 +939,8 @@ fn reverse<T>(v: [mut T]/~) { #[doc = "Returns a vector with the order of elements reversed"] -pure fn reversed<T: copy>(v: [const T]/&) -> [T]/~ { - let mut rs: [T]/~ = []/~; +pure fn reversed<T: copy>(v: &[const T]) -> ~[T] { + let mut rs: ~[T] = ~[]; let mut i = len::<T>(v); if i == 0u { ret rs; } else { i -= 1u; } unchecked { @@ -959,7 +957,7 @@ Iterates over slice `v` and, for each element, calls function `f` with the element's value. "] #[inline(always)] -pure fn iter<T>(v: [T]/&, f: fn(T)) { +pure fn iter<T>(v: &[T], f: fn(T)) { iter_between(v, 0u, vec::len(v), f) } @@ -973,8 +971,8 @@ element's value. */ #[inline(always)] -pure fn iter_between<T>(v: [T]/&, start: uint, end: uint, f: fn(T)) { - unpack_slice(v) { |base_ptr, len| +pure fn iter_between<T>(v: &[T], start: uint, end: uint, f: fn(T)) { + do unpack_slice(v) |base_ptr, len| { assert start <= end; assert end <= len; unsafe { @@ -995,8 +993,8 @@ Iterates over a vector, with option to break Return true to continue, false to break. "] #[inline(always)] -pure fn each<T>(v: [const T]/&, f: fn(T) -> bool) { - vec::unpack_slice(v) {|p, n| +pure fn each<T>(v: &[const T], f: fn(T) -> bool) { + do vec::unpack_slice(v) |p, n| { let mut n = n; let mut p = p; while n > 0u { @@ -1015,8 +1013,8 @@ Iterates over a vector's elements and indices Return true to continue, false to break. "] #[inline(always)] -pure fn eachi<T>(v: [const T]/&, f: fn(uint, T) -> bool) { - vec::unpack_slice(v) {|p, n| +pure fn eachi<T>(v: &[const T], f: fn(uint, T) -> bool) { + do vec::unpack_slice(v) |p, n| { let mut i = 0u; let mut p = p; while i < n { @@ -1037,9 +1035,9 @@ Iterates over two vectors simultaneously Both vectors must have the same length "] #[inline] -fn iter2<U, T>(v1: [U]/&, v2: [T]/&, f: fn(U, T)) { +fn iter2<U, T>(v1: &[U], v2: &[T], f: fn(U, T)) { assert len(v1) == len(v2); - for uint::range(0u, len(v1)) {|i| + for uint::range(0u, len(v1)) |i| { f(v1[i], v2[i]) } } @@ -1051,7 +1049,7 @@ Iterates over vector `v` and, for each element, calls function `f` with the element's value and index. "] #[inline(always)] -pure fn iteri<T>(v: [T]/&, f: fn(uint, T)) { +pure fn iteri<T>(v: &[T], f: fn(uint, T)) { let mut i = 0u; let l = len(v); while i < l { f(i, v[i]); i += 1u; } @@ -1063,8 +1061,8 @@ Iterates over a vector in reverse Iterates over vector `v` and, for each element, calls function `f` with the element's value. "] -pure fn riter<T>(v: [T]/&, f: fn(T)) { - riteri(v) { |_i, v| f(v) } +pure fn riter<T>(v: &[T], f: fn(T)) { + riteri(v, |_i, v| f(v)) } #[doc =" @@ -1073,7 +1071,7 @@ Iterates over a vector's elements and indexes in reverse Iterates over vector `v` and, for each element, calls function `f` with the element's value and index. "] -pure fn riteri<T>(v: [T]/&, f: fn(uint, T)) { +pure fn riteri<T>(v: &[T], f: fn(uint, T)) { let mut i = len(v); while 0u < i { i -= 1u; @@ -1091,10 +1089,10 @@ lexicographically sorted). The total number of permutations produced is `len(v)!`. If `v` contains repeated elements, then some permutations are repeated. "] -pure fn permute<T: copy>(v: [T]/&, put: fn([T]/~)) { +pure fn permute<T: copy>(v: &[T], put: fn(~[T])) { let ln = len(v); if ln == 0u { - put([]/~); + put(~[]); } else { let mut i = 0u; while i < ln { @@ -1102,19 +1100,19 @@ pure fn permute<T: copy>(v: [T]/&, put: fn([T]/~)) { let mut rest = slice(v, 0u, i); unchecked { push_all(rest, view(v, i+1u, ln)); - permute(rest) {|permutation| - put(append([elt]/~, permutation)) - } + permute(rest, |permutation| { + put(append(~[elt], permutation)) + }) } i += 1u; } } } -pure fn windowed<TT: copy>(nn: uint, xx: [TT]/&) -> [[TT]/~]/~ { - let mut ww = []/~; +pure fn windowed<TT: copy>(nn: uint, xx: &[TT]) -> ~[~[TT]] { + let mut ww = ~[]; assert 1u <= nn; - vec::iteri (xx, {|ii, _x| + vec::iteri (xx, |ii, _x| { let len = vec::len(xx); if ii+nn <= len unchecked { vec::push(ww, vec::slice(xx, ii, ii+nn)); @@ -1129,19 +1127,19 @@ Work with the buffer of a vector. Allows for unsafe manipulation of vector contents, which is useful for native interop. "] -fn as_buf<E,T>(v: [E]/&, f: fn(*E) -> T) -> T { - unpack_slice(v) { |buf, _len| f(buf) } +fn as_buf<E,T>(v: &[E], f: fn(*E) -> T) -> T { + unpack_slice(v, |buf, _len| f(buf)) } -fn as_mut_buf<E,T>(v: [mut E]/&, f: fn(*mut E) -> T) -> T { - unpack_mut_slice(v) { |buf, _len| f(buf) } +fn as_mut_buf<E,T>(v: &[mut E], f: fn(*mut E) -> T) -> T { + unpack_mut_slice(v, |buf, _len| f(buf)) } #[doc = " Work with the buffer and length of a slice. "] #[inline(always)] -pure fn unpack_slice<T,U>(s: [const T]/&, +pure fn unpack_slice<T,U>(s: &[const T], f: fn(*T, uint) -> U) -> U { unsafe { let v : *(*T,uint) = ::unsafe::reinterpret_cast(ptr::addr_of(s)); @@ -1154,7 +1152,7 @@ pure fn unpack_slice<T,U>(s: [const T]/&, Work with the buffer and length of a slice. "] #[inline(always)] -pure fn unpack_const_slice<T,U>(s: [const T]/&, +pure fn unpack_const_slice<T,U>(s: &[const T], f: fn(*const T, uint) -> U) -> U { unsafe { let v : *(*const T,uint) = @@ -1168,7 +1166,7 @@ pure fn unpack_const_slice<T,U>(s: [const T]/&, Work with the buffer and length of a slice. "] #[inline(always)] -pure fn unpack_mut_slice<T,U>(s: [mut T]/&, +pure fn unpack_mut_slice<T,U>(s: &[mut T], f: fn(*mut T, uint) -> U) -> U { unsafe { let v : *(*const T,uint) = @@ -1178,22 +1176,22 @@ pure fn unpack_mut_slice<T,U>(s: [mut T]/&, } } -impl extensions<T: copy> for [T]/~ { +impl extensions<T: copy> for ~[T] { #[inline(always)] - pure fn +(rhs: [T]/&) -> [T]/~ { + pure fn +(rhs: &[T]) -> ~[T] { append(self, rhs) } } -impl extensions<T: copy> for [mut T]/~ { +impl extensions<T: copy> for ~[mut T] { #[inline(always)] - pure fn +(rhs: [mut T]/&) -> [mut T]/~ { + pure fn +(rhs: &[mut T]) -> ~[mut T] { append_mut(self, rhs) } } #[doc = "Extension methods for vectors"] -impl extensions/&<T> for [const T]/& { +impl extensions/&<T> for &[const T] { #[doc = "Returns true if a vector contains no elements"] #[inline] pure fn is_empty() -> bool { is_empty(self) } @@ -1206,13 +1204,13 @@ impl extensions/&<T> for [const T]/& { } #[doc = "Extension methods for vectors"] -impl extensions/&<T: copy> for [const T]/& { +impl extensions/&<T: copy> for &[const T] { #[doc = "Returns the first element of a vector"] #[inline] pure fn head() -> T { head(self) } #[doc = "Returns all but the last elemnt of a vector"] #[inline] - pure fn init() -> [T]/~ { init(self) } + pure fn init() -> ~[T] { init(self) } #[doc = " Returns the last element of a `v`, failing if the vector is empty. "] @@ -1220,14 +1218,14 @@ impl extensions/&<T: copy> for [const T]/& { pure fn last() -> T { last(self) } #[doc = "Returns a copy of the elements from [`start`..`end`) from `v`."] #[inline] - pure fn slice(start: uint, end: uint) -> [T]/~ { slice(self, start, end) } + pure fn slice(start: uint, end: uint) -> ~[T] { slice(self, start, end) } #[doc = "Returns all but the first element of a vector"] #[inline] - pure fn tail() -> [T]/~ { tail(self) } + pure fn tail() -> ~[T] { tail(self) } } #[doc = "Extension methods for vectors"] -impl extensions/&<T> for [T]/& { +impl extensions/&<T> for &[T] { #[doc = "Reduce a vector from right to left"] #[inline] pure fn foldr<U: copy>(z: U, p: fn(T, U) -> U) -> U { foldr(self, z, p) } @@ -1291,12 +1289,12 @@ impl extensions/&<T> for [T]/& { Apply a function to each element of a vector and return the results "] #[inline] - pure fn map<U>(f: fn(T) -> U) -> [U]/~ { map(self, f) } + pure fn map<U>(f: fn(T) -> U) -> ~[U] { map(self, f) } #[doc = " Apply a function to the index and value of each element in the vector and return the results "] - pure fn mapi<U>(f: fn(uint, T) -> U) -> [U]/~ { + pure fn mapi<U>(f: fn(uint, T) -> U) -> ~[U] { mapi(self, f) } #[doc = "Returns true if the function returns true for all elements. @@ -1310,7 +1308,7 @@ impl extensions/&<T> for [T]/& { of each result vector "] #[inline] - pure fn flat_map<U>(f: fn(T) -> [U]/~) -> [U]/~ { flat_map(self, f) } + pure fn flat_map<U>(f: fn(T) -> ~[U]) -> ~[U] { flat_map(self, f) } #[doc = " Apply a function to each element of a vector and return the results @@ -1318,13 +1316,13 @@ impl extensions/&<T> for [T]/& { the resulting vector. "] #[inline] - pure fn filter_map<U: copy>(f: fn(T) -> option<U>) -> [U]/~ { + pure fn filter_map<U: copy>(f: fn(T) -> option<U>) -> ~[U] { filter_map(self, f) } } #[doc = "Extension methods for vectors"] -impl extensions/&<T: copy> for [T]/& { +impl extensions/&<T: copy> for &[T] { #[doc = " Construct a new vector from the elements of a vector for which some predicate holds. @@ -1333,7 +1331,7 @@ impl extensions/&<T: copy> for [T]/& { only those elements for which `f` returned true. "] #[inline] - pure fn filter(f: fn(T) -> bool) -> [T]/~ { filter(self, f) } + pure fn filter(f: fn(T) -> bool) -> ~[T] { filter(self, f) } #[doc = " Search for the first element that matches a given predicate @@ -1374,7 +1372,7 @@ mod unsafe { * elts - The number of elements in the buffer "] #[inline(always)] - unsafe fn from_buf<T>(ptr: *T, elts: uint) -> [T]/~ { + unsafe fn from_buf<T>(ptr: *T, elts: uint) -> ~[T] { ret ::unsafe::reinterpret_cast( rustrt::vec_from_buf_shared(sys::get_type_desc::<T>(), ptr as *(), @@ -1389,7 +1387,7 @@ mod unsafe { the vector is actually the specified size. "] #[inline(always)] - unsafe fn set_len<T>(&&v: [const T]/~, new_len: uint) { + unsafe fn set_len<T>(&&v: ~[const T], new_len: uint) { let repr: **vec_repr = ::unsafe::reinterpret_cast(addr_of(v)); (**repr).fill = new_len * sys::size_of::<T>(); } @@ -1404,7 +1402,7 @@ mod unsafe { would also make any pointers to it invalid. "] #[inline(always)] - unsafe fn to_ptr<T>(v: [const T]/~) -> *T { + unsafe fn to_ptr<T>(v: ~[const T]) -> *T { let repr: **vec_repr = ::unsafe::reinterpret_cast(addr_of(v)); ret ::unsafe::reinterpret_cast(addr_of((**repr).data)); } @@ -1414,9 +1412,9 @@ mod unsafe { Form a slice from a pointer and length (as a number of units, not bytes). "] #[inline(always)] - unsafe fn form_slice<T,U>(p: *T, len: uint, f: fn([T]/&) -> U) -> U { + unsafe fn form_slice<T,U>(p: *T, len: uint, f: fn(&& &[T]) -> U) -> U { let pair = (p, len * sys::size_of::<T>()); - let v : *([T]/&blk) = + let v : *(&blk.[T]) = ::unsafe::reinterpret_cast(ptr::addr_of(pair)); f(*v) } @@ -1429,7 +1427,7 @@ mod u8 { export hash; #[doc = "Bytewise string comparison"] - pure fn cmp(&&a: [u8]/~, &&b: [u8]/~) -> int { + pure fn cmp(&&a: ~[u8], &&b: ~[u8]) -> int { let a_len = len(a); let b_len = len(b); let n = uint::min(a_len, b_len) as libc::size_t; @@ -1450,31 +1448,31 @@ mod u8 { } #[doc = "Bytewise less than or equal"] - pure fn lt(&&a: [u8]/~, &&b: [u8]/~) -> bool { cmp(a, b) < 0 } + pure fn lt(&&a: ~[u8], &&b: ~[u8]) -> bool { cmp(a, b) < 0 } #[doc = "Bytewise less than or equal"] - pure fn le(&&a: [u8]/~, &&b: [u8]/~) -> bool { cmp(a, b) <= 0 } + pure fn le(&&a: ~[u8], &&b: ~[u8]) -> bool { cmp(a, b) <= 0 } #[doc = "Bytewise equality"] - pure fn eq(&&a: [u8]/~, &&b: [u8]/~) -> bool { unsafe { cmp(a, b) == 0 } } + pure fn eq(&&a: ~[u8], &&b: ~[u8]) -> bool { unsafe { cmp(a, b) == 0 } } #[doc = "Bytewise inequality"] - pure fn ne(&&a: [u8]/~, &&b: [u8]/~) -> bool { unsafe { cmp(a, b) != 0 } } + pure fn ne(&&a: ~[u8], &&b: ~[u8]) -> bool { unsafe { cmp(a, b) != 0 } } #[doc ="Bytewise greater than or equal"] - pure fn ge(&&a: [u8]/~, &&b: [u8]/~) -> bool { cmp(a, b) >= 0 } + pure fn ge(&&a: ~[u8], &&b: ~[u8]) -> bool { cmp(a, b) >= 0 } #[doc = "Bytewise greater than"] - pure fn gt(&&a: [u8]/~, &&b: [u8]/~) -> bool { cmp(a, b) > 0 } + pure fn gt(&&a: ~[u8], &&b: ~[u8]) -> bool { cmp(a, b) > 0 } #[doc = "String hash function"] - fn hash(&&s: [u8]/~) -> uint { + fn hash(&&s: ~[u8]) -> uint { /* Seems to have been tragically copy/pasted from str.rs, or vice versa. But I couldn't figure out how to abstract it out. -- tjc */ let mut u: uint = 5381u; - vec::iter(s, { |c| u *= 33u; u += c as uint; }); + vec::iter(s, |c| {u *= 33u; u += c as uint;}); ret u; } } @@ -1484,7 +1482,7 @@ mod u8 { // // This cannot be used with iter-trait.rs because of the region pointer // required in the slice. -impl extensions/&<A> of iter::base_iter<A> for [const A]/& { +impl extensions/&<A> of iter::base_iter<A> for &[const A] { fn each(blk: fn(A) -> bool) { each(self, blk) } fn size_hint() -> option<uint> { some(len(self)) } fn eachi(blk: fn(uint, A) -> bool) { iter::eachi(self, blk) } @@ -1496,15 +1494,15 @@ impl extensions/&<A> of iter::base_iter<A> for [const A]/& { fn contains(x: A) -> bool { iter::contains(self, x) } fn count(x: A) -> uint { iter::count(self, x) } } -impl extensions/&<A:copy> for [const A]/& { - fn filter_to_vec(pred: fn(A) -> bool) -> [A]/~ { +impl extensions/&<A:copy> for &[const A] { + fn filter_to_vec(pred: fn(A) -> bool) -> ~[A] { iter::filter_to_vec(self, pred) } - fn map_to_vec<B>(op: fn(A) -> B) -> [B]/~ { iter::map_to_vec(self, op) } - fn to_vec() -> [A]/~ { iter::to_vec(self) } + fn map_to_vec<B>(op: fn(A) -> B) -> ~[B] { iter::map_to_vec(self, op) } + fn to_vec() -> ~[A] { iter::to_vec(self) } // FIXME--bug in resolve prevents this from working (#2611) - // fn flat_map_to_vec<B:copy,IB:base_iter<B>>(op: fn(A) -> IB) -> [B]/~ { + // fn flat_map_to_vec<B:copy,IB:base_iter<B>>(op: fn(A) -> IB) -> ~[B] { // iter::flat_map_to_vec(self, op) // } @@ -1536,7 +1534,7 @@ mod tests { fn test_unsafe_ptrs() { unsafe { // Test on-stack copy-from-buf. - let a = [1, 2, 3]/~; + let a = ~[1, 2, 3]; let mut ptr = unsafe::to_ptr(a); let b = unsafe::from_buf(ptr, 3u); assert (len(b) == 3u); @@ -1545,7 +1543,7 @@ mod tests { assert (b[2] == 3); // Test on-heap copy-from-buf. - let c = [1, 2, 3, 4, 5]/~; + let c = ~[1, 2, 3, 4, 5]; ptr = unsafe::to_ptr(c); let d = unsafe::from_buf(ptr, 5u); assert (len(d) == 5u); @@ -1596,58 +1594,58 @@ mod tests { #[test] fn test_is_empty() { - assert (is_empty::<int>([]/~)); - assert (!is_empty([0]/~)); + assert (is_empty::<int>(~[])); + assert (!is_empty(~[0])); } #[test] fn test_is_not_empty() { - assert (is_not_empty([0]/~)); - assert (!is_not_empty::<int>([]/~)); + assert (is_not_empty(~[0])); + assert (!is_not_empty::<int>(~[])); } #[test] fn test_head() { - let a = [11, 12]/~; + let a = ~[11, 12]; assert (head(a) == 11); } #[test] fn test_tail() { - let mut a = [11]/~; - assert (tail(a) == []/~); + let mut a = ~[11]; + assert (tail(a) == ~[]); - a = [11, 12]/~; - assert (tail(a) == [12]/~); + a = ~[11, 12]; + assert (tail(a) == ~[12]); } #[test] fn test_last() { - let mut n = last_opt([]/~); + let mut n = last_opt(~[]); assert (n == none); - n = last_opt([1, 2, 3]/~); + n = last_opt(~[1, 2, 3]); assert (n == some(3)); - n = last_opt([1, 2, 3, 4, 5]/~); + n = last_opt(~[1, 2, 3, 4, 5]); assert (n == some(5)); } #[test] fn test_slice() { // Test on-stack -> on-stack slice. - let mut v = slice([1, 2, 3]/~, 1u, 3u); + let mut v = slice(~[1, 2, 3], 1u, 3u); assert (len(v) == 2u); assert (v[0] == 2); assert (v[1] == 3); // Test on-heap -> on-stack slice. - v = slice([1, 2, 3, 4, 5]/~, 0u, 3u); + v = slice(~[1, 2, 3, 4, 5], 0u, 3u); assert (len(v) == 3u); assert (v[0] == 1); assert (v[1] == 2); assert (v[2] == 3); // Test on-heap -> on-heap slice. - v = slice([1, 2, 3, 4, 5, 6]/~, 1u, 6u); + v = slice(~[1, 2, 3, 4, 5, 6], 1u, 6u); assert (len(v) == 5u); assert (v[0] == 2); assert (v[1] == 3); @@ -1659,7 +1657,7 @@ mod tests { #[test] fn test_pop() { // Test on-stack pop. - let mut v = [1, 2, 3]/~; + let mut v = ~[1, 2, 3]; let mut e = pop(v); assert (len(v) == 2u); assert (v[0] == 1); @@ -1667,7 +1665,7 @@ mod tests { assert (e == 3); // Test on-heap pop. - v = [1, 2, 3, 4, 5]/~; + v = ~[1, 2, 3, 4, 5]; e = pop(v); assert (len(v) == 4u); assert (v[0] == 1); @@ -1680,7 +1678,7 @@ mod tests { #[test] fn test_push() { // Test on-stack push(). - let mut v = []/~; + let mut v = ~[]; push(v, 1); assert (len(v) == 1u); assert (v[0] == 1); @@ -1695,7 +1693,7 @@ mod tests { #[test] fn test_grow() { // Test on-stack grow(). - let mut v = []/~; + let mut v = ~[]; grow(v, 2u, 1); assert (len(v) == 2u); assert (v[0] == 1); @@ -1713,7 +1711,7 @@ mod tests { #[test] fn test_grow_fn() { - let mut v = []/~; + let mut v = ~[]; grow_fn(v, 3u, square); assert (len(v) == 3u); assert (v[0] == 0u); @@ -1723,7 +1721,7 @@ mod tests { #[test] fn test_grow_set() { - let mut v = [mut 1, 2, 3]/~; + let mut v = ~[mut 1, 2, 3]; grow_set(v, 4u, 4, 5); assert (len(v) == 5u); assert (v[0] == 1); @@ -1736,7 +1734,7 @@ mod tests { #[test] fn test_map() { // Test on-stack map. - let mut v = [1u, 2u, 3u]/~; + let mut v = ~[1u, 2u, 3u]; let mut w = map(v, square_ref); assert (len(w) == 3u); assert (w[0] == 1u); @@ -1744,7 +1742,7 @@ mod tests { assert (w[2] == 9u); // Test on-heap map. - v = [1u, 2u, 3u, 4u, 5u]/~; + v = ~[1u, 2u, 3u, 4u, 5u]; w = map(v, square_ref); assert (len(w) == 5u); assert (w[0] == 1u); @@ -1758,8 +1756,8 @@ mod tests { fn test_map2() { fn times(&&x: int, &&y: int) -> int { ret x * y; } let f = times; - let v0 = [1, 2, 3, 4, 5]/~; - let v1 = [5, 4, 3, 2, 1]/~; + let v0 = ~[1, 2, 3, 4, 5]; + let v1 = ~[5, 4, 3, 2, 1]; let u = map2::<int, int, int>(v0, v1, f); let mut i = 0; while i < 5 { assert (v0[i] * v1[i] == u[i]); i += 1; } @@ -1768,14 +1766,14 @@ mod tests { #[test] fn test_filter_map() { // Test on-stack filter-map. - let mut v = [1u, 2u, 3u]/~; + let mut v = ~[1u, 2u, 3u]; let mut w = filter_map(v, square_if_odd); assert (len(w) == 2u); assert (w[0] == 1u); assert (w[1] == 9u); // Test on-heap filter-map. - v = [1u, 2u, 3u, 4u, 5u]/~; + v = ~[1u, 2u, 3u, 4u, 5u]; w = filter_map(v, square_if_odd); assert (len(w) == 3u); assert (w[0] == 1u); @@ -1788,32 +1786,32 @@ mod tests { } else { ret option::none::<int>; } } fn halve_for_sure(&&i: int) -> int { ret i / 2; } - let all_even: [int]/~ = [0, 2, 8, 6]/~; - let all_odd1: [int]/~ = [1, 7, 3]/~; - let all_odd2: [int]/~ = []/~; - let mix: [int]/~ = [9, 2, 6, 7, 1, 0, 0, 3]/~; - let mix_dest: [int]/~ = [1, 3, 0, 0]/~; + let all_even: ~[int] = ~[0, 2, 8, 6]; + let all_odd1: ~[int] = ~[1, 7, 3]; + let all_odd2: ~[int] = ~[]; + let mix: ~[int] = ~[9, 2, 6, 7, 1, 0, 0, 3]; + let mix_dest: ~[int] = ~[1, 3, 0, 0]; assert (filter_map(all_even, halve) == map(all_even, halve_for_sure)); - assert (filter_map(all_odd1, halve) == []/~); - assert (filter_map(all_odd2, halve) == []/~); + assert (filter_map(all_odd1, halve) == ~[]); + assert (filter_map(all_odd2, halve) == ~[]); assert (filter_map(mix, halve) == mix_dest); } #[test] fn test_filter() { - assert filter([1u, 2u, 3u]/~, is_odd) == [1u, 3u]/~; - assert filter([1u, 2u, 4u, 8u, 16u]/~, is_three) == []/~; + assert filter(~[1u, 2u, 3u], is_odd) == ~[1u, 3u]; + assert filter(~[1u, 2u, 4u, 8u, 16u], is_three) == ~[]; } #[test] fn test_foldl() { // Test on-stack fold. - let mut v = [1u, 2u, 3u]/~; + let mut v = ~[1u, 2u, 3u]; let mut sum = foldl(0u, v, add); assert (sum == 6u); // Test on-heap fold. - v = [1u, 2u, 3u, 4u, 5u]/~; + v = ~[1u, 2u, 3u, 4u, 5u]; sum = foldl(0u, v, add); assert (sum == 15u); } @@ -1823,7 +1821,7 @@ mod tests { fn sub(&&a: int, &&b: int) -> int { a - b } - let mut v = [1, 2, 3, 4]/~; + let mut v = ~[1, 2, 3, 4]; let sum = foldl(0, v, sub); assert sum == -10; } @@ -1833,7 +1831,7 @@ mod tests { fn sub(&&a: int, &&b: int) -> int { a - b } - let mut v = [1, 2, 3, 4]/~; + let mut v = ~[1, 2, 3, 4]; let sum = foldr(v, 0, sub); assert sum == -2; } @@ -1841,21 +1839,21 @@ mod tests { #[test] fn test_iter_empty() { let mut i = 0; - iter::<int>([]/~, { |_v| i += 1 }); + iter::<int>(~[], |_v| i += 1); assert i == 0; } #[test] fn test_iter_nonempty() { let mut i = 0; - iter([1, 2, 3]/~, { |v| i += v }); + iter(~[1, 2, 3], |v| i += v); assert i == 6; } #[test] fn test_iteri() { let mut i = 0; - iteri([1, 2, 3]/~, { |j, v| + iteri(~[1, 2, 3], |j, v| { if i == 0 { assert v == 1; } assert j + 1u == v as uint; i += v; @@ -1866,14 +1864,14 @@ mod tests { #[test] fn test_riter_empty() { let mut i = 0; - riter::<int>([]/~, { |_v| i += 1 }); + riter::<int>(~[], |_v| i += 1); assert i == 0; } #[test] fn test_riter_nonempty() { let mut i = 0; - riter([1, 2, 3]/~, { |v| + riter(~[1, 2, 3], |v| { if i == 0 { assert v == 3; } i += v }); @@ -1883,7 +1881,7 @@ mod tests { #[test] fn test_riteri() { let mut i = 0; - riteri([0, 1, 2]/~, { |j, v| + riteri(~[0, 1, 2], |j, v| { if i == 0 { assert v == 2; } assert j == v as uint; i += v; @@ -1893,57 +1891,57 @@ mod tests { #[test] fn test_permute() { - let mut results: [[int]/~]/~; + let mut results: ~[~[int]]; - results = []/~; - permute([]/~) {|v| vec::push(results, v); } - assert results == [[]/~]/~; + results = ~[]; + permute(~[], |v| vec::push(results, v)); + assert results == ~[~[]]; - results = []/~; - permute([7]/~) {|v| results += [v]/~; } - assert results == [[7]/~]/~; + results = ~[]; + permute(~[7], |v| results += ~[v]); + assert results == ~[~[7]]; - results = []/~; - permute([1,1]/~) {|v| results += [v]/~; } - assert results == [[1,1]/~,[1,1]/~]/~; + results = ~[]; + permute(~[1,1], |v| results += ~[v]); + assert results == ~[~[1,1],~[1,1]]; - results = []/~; - permute([5,2,0]/~) {|v| results += [v]/~; } + results = ~[]; + permute(~[5,2,0], |v| results += ~[v]); assert results == - [[5,2,0]/~,[5,0,2]/~,[2,5,0]/~,[2,0,5]/~,[0,5,2]/~,[0,2,5]/~]/~; + ~[~[5,2,0],~[5,0,2],~[2,5,0],~[2,0,5],~[0,5,2],~[0,2,5]]; } #[test] fn test_any_and_all() { - assert (any([1u, 2u, 3u]/~, is_three)); - assert (!any([0u, 1u, 2u]/~, is_three)); - assert (any([1u, 2u, 3u, 4u, 5u]/~, is_three)); - assert (!any([1u, 2u, 4u, 5u, 6u]/~, is_three)); + assert (any(~[1u, 2u, 3u], is_three)); + assert (!any(~[0u, 1u, 2u], is_three)); + assert (any(~[1u, 2u, 3u, 4u, 5u], is_three)); + assert (!any(~[1u, 2u, 4u, 5u, 6u], is_three)); - assert (all([3u, 3u, 3u]/~, is_three)); - assert (!all([3u, 3u, 2u]/~, is_three)); - assert (all([3u, 3u, 3u, 3u, 3u]/~, is_three)); - assert (!all([3u, 3u, 0u, 1u, 2u]/~, is_three)); + assert (all(~[3u, 3u, 3u], is_three)); + assert (!all(~[3u, 3u, 2u], is_three)); + assert (all(~[3u, 3u, 3u, 3u, 3u], is_three)); + assert (!all(~[3u, 3u, 0u, 1u, 2u], is_three)); } #[test] fn test_any2_and_all2() { - assert (any2([2u, 4u, 6u]/~, [2u, 4u, 6u]/~, is_equal)); - assert (any2([1u, 2u, 3u]/~, [4u, 5u, 3u]/~, is_equal)); - assert (!any2([1u, 2u, 3u]/~, [4u, 5u, 6u]/~, is_equal)); - assert (any2([2u, 4u, 6u]/~, [2u, 4u]/~, is_equal)); + assert (any2(~[2u, 4u, 6u], ~[2u, 4u, 6u], is_equal)); + assert (any2(~[1u, 2u, 3u], ~[4u, 5u, 3u], is_equal)); + assert (!any2(~[1u, 2u, 3u], ~[4u, 5u, 6u], is_equal)); + assert (any2(~[2u, 4u, 6u], ~[2u, 4u], is_equal)); - assert (all2([2u, 4u, 6u]/~, [2u, 4u, 6u]/~, is_equal)); - assert (!all2([1u, 2u, 3u]/~, [4u, 5u, 3u]/~, is_equal)); - assert (!all2([1u, 2u, 3u]/~, [4u, 5u, 6u]/~, is_equal)); - assert (!all2([2u, 4u, 6u]/~, [2u, 4u]/~, is_equal)); + assert (all2(~[2u, 4u, 6u], ~[2u, 4u, 6u], is_equal)); + assert (!all2(~[1u, 2u, 3u], ~[4u, 5u, 3u], is_equal)); + assert (!all2(~[1u, 2u, 3u], ~[4u, 5u, 6u], is_equal)); + assert (!all2(~[2u, 4u, 6u], ~[2u, 4u], is_equal)); } #[test] fn test_zip_unzip() { - let v1 = [1, 2, 3]/~; - let v2 = [4, 5, 6]/~; + let v1 = ~[1, 2, 3]; + let v2 = ~[4, 5, 6]; let z1 = zip(v1, v2); @@ -1960,9 +1958,9 @@ mod tests { #[test] fn test_position_elem() { - assert position_elem([]/~, 1) == none; + assert position_elem(~[], 1) == none; - let v1 = [1, 2, 3, 3, 2, 5]/~; + let v1 = ~[1, 2, 3, 3, 2, 5]; assert position_elem(v1, 1) == some(0u); assert position_elem(v1, 2) == some(1u); assert position_elem(v1, 5) == some(5u); @@ -1974,19 +1972,19 @@ mod tests { fn less_than_three(&&i: int) -> bool { ret i < 3; } fn is_eighteen(&&i: int) -> bool { ret i == 18; } - assert position([]/~, less_than_three) == none; + assert position(~[], less_than_three) == none; - let v1 = [5, 4, 3, 2, 1]/~; + let v1 = ~[5, 4, 3, 2, 1]; assert position(v1, less_than_three) == some(3u); assert position(v1, is_eighteen) == none; } #[test] fn test_position_between() { - assert position_between([]/~, 0u, 0u, f) == none; + assert position_between(~[], 0u, 0u, f) == none; fn f(xy: (int, char)) -> bool { let (_x, y) = xy; y == 'b' } - let mut v = [(0, 'a'), (1, 'b'), (2, 'c'), (3, 'b')]/~; + let mut v = ~[(0, 'a'), (1, 'b'), (2, 'c'), (3, 'b')]; assert position_between(v, 0u, 0u, f) == none; assert position_between(v, 0u, 1u, f) == none; @@ -2011,11 +2009,11 @@ mod tests { #[test] fn test_find() { - assert find([]/~, f) == none; + assert find(~[], f) == none; fn f(xy: (int, char)) -> bool { let (_x, y) = xy; y == 'b' } fn g(xy: (int, char)) -> bool { let (_x, y) = xy; y == 'd' } - let mut v = [(0, 'a'), (1, 'b'), (2, 'c'), (3, 'b')]/~; + let mut v = ~[(0, 'a'), (1, 'b'), (2, 'c'), (3, 'b')]; assert find(v, f) == some((1, 'b')); assert find(v, g) == none; @@ -2023,10 +2021,10 @@ mod tests { #[test] fn test_find_between() { - assert find_between([]/~, 0u, 0u, f) == none; + assert find_between(~[], 0u, 0u, f) == none; fn f(xy: (int, char)) -> bool { let (_x, y) = xy; y == 'b' } - let mut v = [(0, 'a'), (1, 'b'), (2, 'c'), (3, 'b')]/~; + let mut v = ~[(0, 'a'), (1, 'b'), (2, 'c'), (3, 'b')]; assert find_between(v, 0u, 0u, f) == none; assert find_between(v, 0u, 1u, f) == none; @@ -2051,11 +2049,11 @@ mod tests { #[test] fn test_rposition() { - assert find([]/~, f) == none; + assert find(~[], f) == none; fn f(xy: (int, char)) -> bool { let (_x, y) = xy; y == 'b' } fn g(xy: (int, char)) -> bool { let (_x, y) = xy; y == 'd' } - let mut v = [(0, 'a'), (1, 'b'), (2, 'c'), (3, 'b')]/~; + let mut v = ~[(0, 'a'), (1, 'b'), (2, 'c'), (3, 'b')]; assert position(v, f) == some(1u); assert position(v, g) == none; @@ -2063,10 +2061,10 @@ mod tests { #[test] fn test_rposition_between() { - assert rposition_between([]/~, 0u, 0u, f) == none; + assert rposition_between(~[], 0u, 0u, f) == none; fn f(xy: (int, char)) -> bool { let (_x, y) = xy; y == 'b' } - let mut v = [(0, 'a'), (1, 'b'), (2, 'c'), (3, 'b')]/~; + let mut v = ~[(0, 'a'), (1, 'b'), (2, 'c'), (3, 'b')]; assert rposition_between(v, 0u, 0u, f) == none; assert rposition_between(v, 0u, 1u, f) == none; @@ -2091,11 +2089,11 @@ mod tests { #[test] fn test_rfind() { - assert rfind([]/~, f) == none; + assert rfind(~[], f) == none; fn f(xy: (int, char)) -> bool { let (_x, y) = xy; y == 'b' } fn g(xy: (int, char)) -> bool { let (_x, y) = xy; y == 'd' } - let mut v = [(0, 'a'), (1, 'b'), (2, 'c'), (3, 'b')]/~; + let mut v = ~[(0, 'a'), (1, 'b'), (2, 'c'), (3, 'b')]; assert rfind(v, f) == some((3, 'b')); assert rfind(v, g) == none; @@ -2103,10 +2101,10 @@ mod tests { #[test] fn test_rfind_between() { - assert rfind_between([]/~, 0u, 0u, f) == none; + assert rfind_between(~[], 0u, 0u, f) == none; fn f(xy: (int, char)) -> bool { let (_x, y) = xy; y == 'b' } - let mut v = [(0, 'a'), (1, 'b'), (2, 'c'), (3, 'b')]/~; + let mut v = ~[(0, 'a'), (1, 'b'), (2, 'c'), (3, 'b')]; assert rfind_between(v, 0u, 0u, f) == none; assert rfind_between(v, 0u, 1u, f) == none; @@ -2131,123 +2129,123 @@ mod tests { #[test] fn reverse_and_reversed() { - let v: [mut int]/~ = [mut 10, 20]/~; + let v: ~[mut int] = ~[mut 10, 20]; assert (v[0] == 10); assert (v[1] == 20); reverse(v); assert (v[0] == 20); assert (v[1] == 10); - let v2 = reversed::<int>([10, 20]/~); + let v2 = reversed::<int>(~[10, 20]); assert (v2[0] == 20); assert (v2[1] == 10); v[0] = 30; assert (v2[0] == 20); // Make sure they work with 0-length vectors too. - let v4 = reversed::<int>([]/~); - assert (v4 == []/~); - let v3: [mut int]/~ = [mut]/~; + let v4 = reversed::<int>(~[]); + assert (v4 == ~[]); + let v3: ~[mut int] = ~[mut]; reverse::<int>(v3); } #[test] fn reversed_mut() { - let v2 = reversed::<int>([mut 10, 20]/~); + let v2 = reversed::<int>(~[mut 10, 20]); assert (v2[0] == 20); assert (v2[1] == 10); } #[test] fn test_init() { - let v = init([1, 2, 3]/~); - assert v == [1, 2]/~; + let v = init(~[1, 2, 3]); + assert v == ~[1, 2]; } #[test] fn test_split() { fn f(&&x: int) -> bool { x == 3 } - assert split([]/~, f) == []/~; - assert split([1, 2]/~, f) == [[1, 2]/~]/~; - assert split([3, 1, 2]/~, f) == [[]/~, [1, 2]/~]/~; - assert split([1, 2, 3]/~, f) == [[1, 2]/~, []/~]/~; - assert split([1, 2, 3, 4, 3, 5]/~, f) == [[1, 2]/~, [4]/~, [5]/~]/~; + assert split(~[], f) == ~[]; + assert split(~[1, 2], f) == ~[~[1, 2]]; + assert split(~[3, 1, 2], f) == ~[~[], ~[1, 2]]; + assert split(~[1, 2, 3], f) == ~[~[1, 2], ~[]]; + assert split(~[1, 2, 3, 4, 3, 5], f) == ~[~[1, 2], ~[4], ~[5]]; } #[test] fn test_splitn() { fn f(&&x: int) -> bool { x == 3 } - assert splitn([]/~, 1u, f) == []/~; - assert splitn([1, 2]/~, 1u, f) == [[1, 2]/~]/~; - assert splitn([3, 1, 2]/~, 1u, f) == [[]/~, [1, 2]/~]/~; - assert splitn([1, 2, 3]/~, 1u, f) == [[1, 2]/~, []/~]/~; - assert splitn([1, 2, 3, 4, 3, 5]/~, 1u, f) == - [[1, 2]/~, [4, 3, 5]/~]/~; + assert splitn(~[], 1u, f) == ~[]; + assert splitn(~[1, 2], 1u, f) == ~[~[1, 2]]; + assert splitn(~[3, 1, 2], 1u, f) == ~[~[], ~[1, 2]]; + assert splitn(~[1, 2, 3], 1u, f) == ~[~[1, 2], ~[]]; + assert splitn(~[1, 2, 3, 4, 3, 5], 1u, f) == + ~[~[1, 2], ~[4, 3, 5]]; } #[test] fn test_rsplit() { fn f(&&x: int) -> bool { x == 3 } - assert rsplit([]/~, f) == []/~; - assert rsplit([1, 2]/~, f) == [[1, 2]/~]/~; - assert rsplit([1, 2, 3]/~, f) == [[1, 2]/~, []/~]/~; - assert rsplit([1, 2, 3, 4, 3, 5]/~, f) == [[1, 2]/~, [4]/~, [5]/~]/~; + assert rsplit(~[], f) == ~[]; + assert rsplit(~[1, 2], f) == ~[~[1, 2]]; + assert rsplit(~[1, 2, 3], f) == ~[~[1, 2], ~[]]; + assert rsplit(~[1, 2, 3, 4, 3, 5], f) == ~[~[1, 2], ~[4], ~[5]]; } #[test] fn test_rsplitn() { fn f(&&x: int) -> bool { x == 3 } - assert rsplitn([]/~, 1u, f) == []/~; - assert rsplitn([1, 2]/~, 1u, f) == [[1, 2]/~]/~; - assert rsplitn([1, 2, 3]/~, 1u, f) == [[1, 2]/~, []/~]/~; - assert rsplitn([1, 2, 3, 4, 3, 5]/~, 1u, f) == - [[1, 2, 3, 4]/~, [5]/~]/~; + assert rsplitn(~[], 1u, f) == ~[]; + assert rsplitn(~[1, 2], 1u, f) == ~[~[1, 2]]; + assert rsplitn(~[1, 2, 3], 1u, f) == ~[~[1, 2], ~[]]; + assert rsplitn(~[1, 2, 3, 4, 3, 5], 1u, f) == + ~[~[1, 2, 3, 4], ~[5]]; } #[test] #[should_fail] #[ignore(cfg(windows))] fn test_init_empty() { - init::<int>([]/~); + init::<int>(~[]); } #[test] fn test_concat() { - assert concat([[1]/~, [2,3]/~]/~) == [1, 2, 3]/~; + assert concat(~[~[1], ~[2,3]]) == ~[1, 2, 3]; } #[test] fn test_connect() { - assert connect([]/~, 0) == []/~; - assert connect([[1]/~, [2, 3]/~]/~, 0) == [1, 0, 2, 3]/~; - assert connect([[1]/~, [2]/~, [3]/~]/~, 0) == [1, 0, 2, 0, 3]/~; + assert connect(~[], 0) == ~[]; + assert connect(~[~[1], ~[2, 3]], 0) == ~[1, 0, 2, 3]; + assert connect(~[~[1], ~[2], ~[3]], 0) == ~[1, 0, 2, 0, 3]; } #[test] fn test_windowed () { - assert [[1u,2u,3u]/~,[2u,3u,4u]/~,[3u,4u,5u]/~,[4u,5u,6u]/~]/~ - == windowed (3u, [1u,2u,3u,4u,5u,6u]/~); + assert ~[~[1u,2u,3u],~[2u,3u,4u],~[3u,4u,5u],~[4u,5u,6u]] + == windowed (3u, ~[1u,2u,3u,4u,5u,6u]); - assert [[1u,2u,3u,4u]/~,[2u,3u,4u,5u]/~,[3u,4u,5u,6u]/~]/~ - == windowed (4u, [1u,2u,3u,4u,5u,6u]/~); + assert ~[~[1u,2u,3u,4u],~[2u,3u,4u,5u],~[3u,4u,5u,6u]] + == windowed (4u, ~[1u,2u,3u,4u,5u,6u]); - assert []/~ == windowed (7u, [1u,2u,3u,4u,5u,6u]/~); + assert ~[] == windowed (7u, ~[1u,2u,3u,4u,5u,6u]); } #[test] #[should_fail] #[ignore(cfg(windows))] fn test_windowed_() { - let _x = windowed (0u, [1u,2u,3u,4u,5u,6u]/~); + let _x = windowed (0u, ~[1u,2u,3u,4u,5u,6u]); } #[test] fn to_mut_no_copy() { unsafe { - let x = [1, 2, 3]/~; + let x = ~[1, 2, 3]; let addr = unsafe::to_ptr(x); let x_mut = to_mut(x); let addr_mut = unsafe::to_ptr(x_mut); @@ -2258,7 +2256,7 @@ mod tests { #[test] fn from_mut_no_copy() { unsafe { - let x = [mut 1, 2, 3]/~; + let x = ~[mut 1, 2, 3]; let addr = unsafe::to_ptr(x); let x_imm = from_mut(x); let addr_imm = unsafe::to_ptr(x_imm); @@ -2268,24 +2266,24 @@ mod tests { #[test] fn test_unshift() { - let mut x = [1, 2, 3]/~; + let mut x = ~[1, 2, 3]; unshift(x, 0); - assert x == [0, 1, 2, 3]/~; + assert x == ~[0, 1, 2, 3]; } #[test] fn test_capacity() { - let mut v = [0u64]/~; + let mut v = ~[0u64]; reserve(v, 10u); assert capacity(v) == 10u; - let mut v = [0u32]/~; + let mut v = ~[0u32]; reserve(v, 10u); assert capacity(v) == 10u; } #[test] fn test_view() { - let v = [1, 2, 3, 4, 5]/~; + let v = ~[1, 2, 3, 4, 5]; let v = view(v, 1u, 3u); assert(len(v) == 2u); assert(v[0] == 2); diff --git a/src/libstd/arena.rs b/src/libstd/arena.rs index ede119bb459..fed358d9388 100644 --- a/src/libstd/arena.rs +++ b/src/libstd/arena.rs @@ -5,11 +5,11 @@ export arena, arena_with_size; import list; import list::{list, cons, nil}; -type chunk = {data: [u8]/~, mut fill: uint}; +type chunk = {data: ~[u8], mut fill: uint}; type arena = {mut chunks: @list<@chunk>}; fn chunk(size: uint) -> @chunk { - let mut v = []/~; + let mut v = ~[]; vec::reserve(v, size); @{ data: v, mut fill: 0u } } diff --git a/src/libstd/bitv.rs b/src/libstd/bitv.rs index 2cbc9ae0187..adc3a7969d6 100644 --- a/src/libstd/bitv.rs +++ b/src/libstd/bitv.rs @@ -22,7 +22,7 @@ export eq_vec; // for the case where nbits <= 32. #[doc = "The bitvector type"] -type bitv = @{storage: [mut uint]/~, nbits: uint}; +type bitv = @{storage: ~[mut uint], nbits: uint}; const uint_bits: uint = 32u + (1u << 32u >> 27u); @@ -45,7 +45,7 @@ fn process(v0: bitv, v1: bitv, op: fn(uint, uint) -> uint) -> bool { assert (vec::len(v0.storage) == len); assert (v0.nbits == v1.nbits); let mut changed = false; - for uint::range(0u, len) {|i| + for uint::range(0u, len) |i| { let w0 = v0.storage[i]; let w1 = v1.storage[i]; let w = op(w0, w1); @@ -89,7 +89,7 @@ fn assign(v0: bitv, v1: bitv) -> bool { fn clone(v: bitv) -> bitv { let storage = vec::to_mut(vec::from_elem(v.nbits / uint_bits + 1u, 0u)); let len = vec::len(v.storage); - for uint::range(0u, len) {|i| storage[i] = v.storage[i]; }; + for uint::range(0u, len) |i| { storage[i] = v.storage[i]; }; ret @{storage: storage, nbits: v.nbits}; } @@ -113,22 +113,22 @@ contain identical elements. fn equal(v0: bitv, v1: bitv) -> bool { if v0.nbits != v1.nbits { ret false; } let len = vec::len(v1.storage); - for uint::iterate(0u, len) {|i| + for uint::iterate(0u, len) |i| { if v0.storage[i] != v1.storage[i] { ret false; } } } #[doc = "Set all bits to 0"] #[inline(always)] -fn clear(v: bitv) { for each_storage(v) {|w| w = 0u } } +fn clear(v: bitv) { for each_storage(v) |w| { w = 0u } } #[doc = "Set all bits to 1"] #[inline(always)] -fn set_all(v: bitv) { for each_storage(v) {|w| w = !0u } } +fn set_all(v: bitv) { for each_storage(v) |w| { w = !0u } } #[doc = "Invert all bits"] #[inline(always)] -fn invert(v: bitv) { for each_storage(v) {|w| w = !w } } +fn invert(v: bitv) { for each_storage(v) |w| { w = !w } } #[doc = " Calculate the difference between two bitvectors @@ -163,14 +163,14 @@ fn set(v: bitv, i: uint, x: bool) { #[doc = "Returns true if all bits are 1"] fn is_true(v: bitv) -> bool { - for each(v) {|i| if !i { ret false; } } + for each(v) |i| { if !i { ret false; } } ret true; } #[doc = "Returns true if all bits are 0"] fn is_false(v: bitv) -> bool { - for each(v) {|i| if i { ret false; } } + for each(v) |i| { if i { ret false; } } ret true; } @@ -183,8 +183,8 @@ Converts the bitvector to a vector of uint with the same length. Each uint in the resulting vector has either value 0u or 1u. "] -fn to_vec(v: bitv) -> [uint]/~ { - let sub = {|x|init_to_vec(v, x)}; +fn to_vec(v: bitv) -> ~[uint] { + let sub = |x| init_to_vec(v, x); ret vec::from_fn::<uint>(v.nbits, sub); } @@ -199,7 +199,7 @@ fn each(v: bitv, f: fn(bool) -> bool) { #[inline(always)] fn each_storage(v: bitv, op: fn(&uint) -> bool) { - for uint::range(0u, vec::len(v.storage)) {|i| + for uint::range(0u, vec::len(v.storage)) |i| { let mut w = v.storage[i]; let b = !op(w); v.storage[i] = w; @@ -215,7 +215,7 @@ is either '0' or '1'. "] fn to_str(v: bitv) -> str { let mut rs = ""; - for each(v) {|i| if i { rs += "1"; } else { rs += "0"; } } + for each(v) |i| { if i { rs += "1"; } else { rs += "0"; } } ret rs; } @@ -225,7 +225,7 @@ Compare a bitvector to a vector of uint The uint vector is expected to only contain the values 0u and 1u. Both the bitvector and vector must have the same length "] -fn eq_vec(v0: bitv, v1: [uint]/~) -> bool { +fn eq_vec(v0: bitv, v1: ~[uint]) -> bool { assert (v0.nbits == vec::len::<uint>(v1)); let len = v0.nbits; let mut i = 0u; @@ -262,9 +262,9 @@ mod tests { fn test_1_element() { let mut act; act = bitv(1u, false); - assert (eq_vec(act, [0u]/~)); + assert (eq_vec(act, ~[0u])); act = bitv(1u, true); - assert (eq_vec(act, [1u]/~)); + assert (eq_vec(act, ~[1u])); } #[test] @@ -273,11 +273,11 @@ mod tests { // all 0 act = bitv(10u, false); - assert (eq_vec(act, [0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u]/~)); + assert (eq_vec(act, ~[0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u])); // all 1 act = bitv(10u, true); - assert (eq_vec(act, [1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u]/~)); + assert (eq_vec(act, ~[1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u])); // mixed act = bitv(10u, false); @@ -286,7 +286,7 @@ mod tests { set(act, 2u, true); set(act, 3u, true); set(act, 4u, true); - assert (eq_vec(act, [1u, 1u, 1u, 1u, 1u, 0u, 0u, 0u, 0u, 0u]/~)); + assert (eq_vec(act, ~[1u, 1u, 1u, 1u, 1u, 0u, 0u, 0u, 0u, 0u])); // mixed act = bitv(10u, false); @@ -295,7 +295,7 @@ mod tests { set(act, 7u, true); set(act, 8u, true); set(act, 9u, true); - assert (eq_vec(act, [0u, 0u, 0u, 0u, 0u, 1u, 1u, 1u, 1u, 1u]/~)); + assert (eq_vec(act, ~[0u, 0u, 0u, 0u, 0u, 1u, 1u, 1u, 1u, 1u])); // mixed act = bitv(10u, false); @@ -303,7 +303,7 @@ mod tests { set(act, 3u, true); set(act, 6u, true); set(act, 9u, true); - assert (eq_vec(act, [1u, 0u, 0u, 1u, 0u, 0u, 1u, 0u, 0u, 1u]/~)); + assert (eq_vec(act, ~[1u, 0u, 0u, 1u, 0u, 0u, 1u, 0u, 0u, 1u])); } #[test] @@ -313,16 +313,16 @@ mod tests { act = bitv(31u, false); assert (eq_vec(act, - [0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, + ~[0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, - 0u, 0u, 0u, 0u, 0u]/~)); + 0u, 0u, 0u, 0u, 0u])); // all 1 act = bitv(31u, true); assert (eq_vec(act, - [1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, + ~[1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, - 1u, 1u, 1u, 1u, 1u]/~)); + 1u, 1u, 1u, 1u, 1u])); // mixed act = bitv(31u, false); @@ -335,9 +335,9 @@ mod tests { set(act, 6u, true); set(act, 7u, true); assert (eq_vec(act, - [1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 0u, 0u, 0u, 0u, 0u, + ~[1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, - 0u, 0u, 0u, 0u, 0u]/~)); + 0u, 0u, 0u, 0u, 0u])); // mixed act = bitv(31u, false); @@ -350,9 +350,9 @@ mod tests { set(act, 22u, true); set(act, 23u, true); assert (eq_vec(act, - [0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, + ~[0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 0u, 0u, - 0u, 0u, 0u, 0u, 0u]/~)); + 0u, 0u, 0u, 0u, 0u])); // mixed act = bitv(31u, false); @@ -364,9 +364,9 @@ mod tests { set(act, 29u, true); set(act, 30u, true); assert (eq_vec(act, - [0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, + ~[0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 1u, 1u, - 1u, 1u, 1u, 1u, 1u]/~)); + 1u, 1u, 1u, 1u, 1u])); // mixed act = bitv(31u, false); @@ -374,9 +374,9 @@ mod tests { set(act, 17u, true); set(act, 30u, true); assert (eq_vec(act, - [0u, 0u, 0u, 1u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, + ~[0u, 0u, 0u, 1u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 1u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, - 0u, 0u, 0u, 0u, 1u]/~)); + 0u, 0u, 0u, 0u, 1u])); } #[test] @@ -386,16 +386,16 @@ mod tests { act = bitv(32u, false); assert (eq_vec(act, - [0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, + ~[0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, - 0u, 0u, 0u, 0u, 0u, 0u]/~)); + 0u, 0u, 0u, 0u, 0u, 0u])); // all 1 act = bitv(32u, true); assert (eq_vec(act, - [1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, + ~[1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, - 1u, 1u, 1u, 1u, 1u, 1u]/~)); + 1u, 1u, 1u, 1u, 1u, 1u])); // mixed act = bitv(32u, false); @@ -408,9 +408,9 @@ mod tests { set(act, 6u, true); set(act, 7u, true); assert (eq_vec(act, - [1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 0u, 0u, 0u, 0u, 0u, + ~[1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, - 0u, 0u, 0u, 0u, 0u, 0u]/~)); + 0u, 0u, 0u, 0u, 0u, 0u])); // mixed act = bitv(32u, false); @@ -423,9 +423,9 @@ mod tests { set(act, 22u, true); set(act, 23u, true); assert (eq_vec(act, - [0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, + ~[0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 0u, 0u, - 0u, 0u, 0u, 0u, 0u, 0u]/~)); + 0u, 0u, 0u, 0u, 0u, 0u])); // mixed act = bitv(32u, false); @@ -438,9 +438,9 @@ mod tests { set(act, 30u, true); set(act, 31u, true); assert (eq_vec(act, - [0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, + ~[0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 1u, 1u, - 1u, 1u, 1u, 1u, 1u, 1u]/~)); + 1u, 1u, 1u, 1u, 1u, 1u])); // mixed act = bitv(32u, false); @@ -449,9 +449,9 @@ mod tests { set(act, 30u, true); set(act, 31u, true); assert (eq_vec(act, - [0u, 0u, 0u, 1u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, + ~[0u, 0u, 0u, 1u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 1u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, - 0u, 0u, 0u, 0u, 1u, 1u]/~)); + 0u, 0u, 0u, 0u, 1u, 1u])); } #[test] @@ -461,16 +461,16 @@ mod tests { act = bitv(33u, false); assert (eq_vec(act, - [0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, + ~[0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, - 0u, 0u, 0u, 0u, 0u, 0u, 0u]/~)); + 0u, 0u, 0u, 0u, 0u, 0u, 0u])); // all 1 act = bitv(33u, true); assert (eq_vec(act, - [1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, + ~[1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, - 1u, 1u, 1u, 1u, 1u, 1u, 1u]/~)); + 1u, 1u, 1u, 1u, 1u, 1u, 1u])); // mixed act = bitv(33u, false); @@ -483,9 +483,9 @@ mod tests { set(act, 6u, true); set(act, 7u, true); assert (eq_vec(act, - [1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 0u, 0u, 0u, 0u, 0u, + ~[1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, - 0u, 0u, 0u, 0u, 0u, 0u, 0u]/~)); + 0u, 0u, 0u, 0u, 0u, 0u, 0u])); // mixed act = bitv(33u, false); @@ -498,9 +498,9 @@ mod tests { set(act, 22u, true); set(act, 23u, true); assert (eq_vec(act, - [0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, + ~[0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 0u, 0u, - 0u, 0u, 0u, 0u, 0u, 0u, 0u]/~)); + 0u, 0u, 0u, 0u, 0u, 0u, 0u])); // mixed act = bitv(33u, false); @@ -513,9 +513,9 @@ mod tests { set(act, 30u, true); set(act, 31u, true); assert (eq_vec(act, - [0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, + ~[0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 1u, 1u, - 1u, 1u, 1u, 1u, 1u, 1u, 0u]/~)); + 1u, 1u, 1u, 1u, 1u, 1u, 0u])); // mixed act = bitv(33u, false); @@ -525,9 +525,9 @@ mod tests { set(act, 31u, true); set(act, 32u, true); assert (eq_vec(act, - [0u, 0u, 0u, 1u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, + ~[0u, 0u, 0u, 1u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 1u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, - 0u, 0u, 0u, 0u, 1u, 1u, 1u]/~)); + 0u, 0u, 0u, 0u, 1u, 1u, 1u])); } #[test] diff --git a/src/libstd/c_vec.rs b/src/libstd/c_vec.rs index 3c89adfb1aa..02ed63981b9 100644 --- a/src/libstd/c_vec.rs +++ b/src/libstd/c_vec.rs @@ -140,7 +140,7 @@ mod tests { assert mem as int != 0; ret unsafe { c_vec_with_dtor(mem as *mut u8, n as uint, - {||free(mem)}) }; + ||free(mem)) }; } #[test] diff --git a/src/libstd/deque.rs b/src/libstd/deque.rs index 2a9e663a08c..501a9dabc5a 100644 --- a/src/libstd/deque.rs +++ b/src/libstd/deque.rs @@ -24,10 +24,10 @@ fn create<T: copy>() -> t<T> { * Grow is only called on full elts, so nelts is also len(elts), unlike * elsewhere. */ - fn grow<T: copy>(nelts: uint, lo: uint, -elts: [mut cell<T>]/~) -> - [mut cell<T>]/~ { + fn grow<T: copy>(nelts: uint, lo: uint, -elts: ~[mut cell<T>]) -> + ~[mut cell<T>] { assert (nelts == vec::len(elts)); - let mut rv = [mut]/~; + let mut rv = ~[mut]; let mut i = 0u; let nalloc = uint::next_power_of_two(nelts + 1u); @@ -57,7 +57,7 @@ fn create<T: copy>() -> t<T> { self.lo = self.elts.len() - 1u; } else { self.lo -= 1u; } if self.lo == self.hi { - self.elts.swap({ |v| grow(self.nelts, oldlo, v) }); + self.elts.swap(|v| grow(self.nelts, oldlo, v)); self.lo = self.elts.len() - 1u; self.hi = self.nelts; } @@ -66,7 +66,7 @@ fn create<T: copy>() -> t<T> { } fn add_back(t: T) { if self.lo == self.hi && self.nelts != 0u { - self.elts.swap({ |v| grow(self.nelts, self.lo, v) }); + self.elts.swap(|v| grow(self.nelts, self.lo, v)); self.lo = 0u; self.hi = self.nelts; } @@ -292,7 +292,7 @@ mod tests { two(17, 42)); #debug("*** test parameterized: taggypar<int>"); - let eq4: eqfn<taggypar<int>> = {|x,y|taggypareq::<int>(x, y)}; + let eq4: eqfn<taggypar<int>> = |x,y| taggypareq::<int>(x, y); test_parameterized::<taggypar<int>>(eq4, onepar::<int>(1), twopar::<int>(1, 2), threepar::<int>(1, 2, 3), diff --git a/src/libstd/ebml.rs b/src/libstd/ebml.rs index 3a7b88ae7a9..70d974f255f 100644 --- a/src/libstd/ebml.rs +++ b/src/libstd/ebml.rs @@ -35,11 +35,11 @@ type ebml_state = {ebml_tag: ebml_tag, tag_pos: uint, data_pos: uint}; // modules within this file. // ebml reading -type doc = {data: @[u8]/~, start: uint, end: uint}; +type doc = {data: @~[u8], start: uint, end: uint}; type tagged_doc = {tag: uint, doc: doc}; -fn vuint_at(data: [u8]/&, start: uint) -> {val: uint, next: uint} { +fn vuint_at(data: &[u8], start: uint) -> {val: uint, next: uint} { let a = data[start]; if a & 0x80u8 != 0u8 { ret {val: (a & 0x7fu8) as uint, next: start + 1u}; @@ -62,11 +62,11 @@ fn vuint_at(data: [u8]/&, start: uint) -> {val: uint, next: uint} { } else { #error("vint too big"); fail; } } -fn doc(data: @[u8]/~) -> doc { +fn doc(data: @~[u8]) -> doc { ret {data: data, start: 0u, end: vec::len::<u8>(*data)}; } -fn doc_at(data: @[u8]/~, start: uint) -> tagged_doc { +fn doc_at(data: @~[u8], start: uint) -> tagged_doc { let elt_tag = vuint_at(*data, start); let elt_size = vuint_at(*data, elt_tag.next); let end = elt_size.next + elt_size.val; @@ -119,7 +119,7 @@ fn tagged_docs(d: doc, tg: uint, it: fn(doc)) { } } -fn doc_data(d: doc) -> [u8]/~ { vec::slice::<u8>(*d.data, d.start, d.end) } +fn doc_data(d: doc) -> ~[u8] { vec::slice::<u8>(*d.data, d.start, d.end) } fn doc_as_str(d: doc) -> str { ret str::from_bytes(doc_data(d)); } @@ -149,23 +149,23 @@ fn doc_as_i32(d: doc) -> i32 { doc_as_u32(d) as i32 } fn doc_as_i64(d: doc) -> i64 { doc_as_u64(d) as i64 } // ebml writing -type writer = {writer: io::writer, mut size_positions: [uint]/~}; +type writer = {writer: io::writer, mut size_positions: ~[uint]}; fn write_sized_vuint(w: io::writer, n: uint, size: uint) { alt size { 1u { - w.write([0x80u8 | (n as u8)]/&); + w.write(&[0x80u8 | (n as u8)]); } 2u { - w.write([0x40u8 | ((n >> 8_u) as u8), n as u8]/&); + w.write(&[0x40u8 | ((n >> 8_u) as u8), n as u8]); } 3u { - w.write([0x20u8 | ((n >> 16_u) as u8), (n >> 8_u) as u8, - n as u8]/&); + w.write(&[0x20u8 | ((n >> 16_u) as u8), (n >> 8_u) as u8, + n as u8]); } 4u { - w.write([0x10u8 | ((n >> 24_u) as u8), (n >> 16_u) as u8, - (n >> 8_u) as u8, n as u8]/&); + w.write(&[0x10u8 | ((n >> 24_u) as u8), (n >> 16_u) as u8, + (n >> 8_u) as u8, n as u8]); } _ { fail #fmt("vint to write too big: %?", n); } }; @@ -180,7 +180,7 @@ fn write_vuint(w: io::writer, n: uint) { } fn writer(w: io::writer) -> writer { - let size_positions: [uint]/~ = []/~; + let size_positions: ~[uint] = ~[]; ret {writer: w, mut size_positions: size_positions}; } @@ -194,7 +194,7 @@ impl writer for writer { // Write a placeholder four-byte size. vec::push(self.size_positions, self.writer.tell()); - let zeroes: [u8]/& = [0u8, 0u8, 0u8, 0u8]/&; + let zeroes: &[u8] = &[0u8, 0u8, 0u8, 0u8]; self.writer.write(zeroes); } @@ -215,54 +215,54 @@ impl writer for writer { self.end_tag(); } - fn wr_tagged_bytes(tag_id: uint, b: [u8]/&) { + fn wr_tagged_bytes(tag_id: uint, b: &[u8]) { write_vuint(self.writer, tag_id); write_vuint(self.writer, vec::len(b)); self.writer.write(b); } fn wr_tagged_u64(tag_id: uint, v: u64) { - io::u64_to_be_bytes(v, 8u) {|v| + do io::u64_to_be_bytes(v, 8u) |v| { self.wr_tagged_bytes(tag_id, v); } } fn wr_tagged_u32(tag_id: uint, v: u32) { - io::u64_to_be_bytes(v as u64, 4u) {|v| + do io::u64_to_be_bytes(v as u64, 4u) |v| { self.wr_tagged_bytes(tag_id, v); } } fn wr_tagged_u16(tag_id: uint, v: u16) { - io::u64_to_be_bytes(v as u64, 2u) {|v| + do io::u64_to_be_bytes(v as u64, 2u) |v| { self.wr_tagged_bytes(tag_id, v); } } fn wr_tagged_u8(tag_id: uint, v: u8) { - self.wr_tagged_bytes(tag_id, [v]/&); + self.wr_tagged_bytes(tag_id, &[v]); } fn wr_tagged_i64(tag_id: uint, v: i64) { - io::u64_to_be_bytes(v as u64, 8u) {|v| + do io::u64_to_be_bytes(v as u64, 8u) |v| { self.wr_tagged_bytes(tag_id, v); } } fn wr_tagged_i32(tag_id: uint, v: i32) { - io::u64_to_be_bytes(v as u64, 4u) {|v| + do io::u64_to_be_bytes(v as u64, 4u) |v| { self.wr_tagged_bytes(tag_id, v); } } fn wr_tagged_i16(tag_id: uint, v: i16) { - io::u64_to_be_bytes(v as u64, 2u) {|v| + do io::u64_to_be_bytes(v as u64, 2u) |v| { self.wr_tagged_bytes(tag_id, v); } } fn wr_tagged_i8(tag_id: uint, v: i8) { - self.wr_tagged_bytes(tag_id, [v as u8]/&); + self.wr_tagged_bytes(tag_id, &[v as u8]); } fn wr_tagged_str(tag_id: uint, v: str) { @@ -275,7 +275,7 @@ impl writer for writer { self.wr_tagged_bytes(tag_id, str::bytes(v)); } - fn wr_bytes(b: [u8]/&) { + fn wr_bytes(b: &[u8]) { #debug["Write %u bytes", vec::len(b)]; self.writer.write(b); } @@ -355,7 +355,7 @@ impl serializer of serialization::serializer for ebml::writer { fn emit_enum_variant_arg(_idx: uint, f: fn()) { f() } fn emit_vec(len: uint, f: fn()) { - self.wr_tag(es_vec as uint) {|| + do self.wr_tag(es_vec as uint) || { self._emit_tagged_uint(es_vec_len, len); f() } @@ -482,7 +482,7 @@ impl deserializer of serialization::deserializer for ebml_deserializer { #debug["read_enum_variant()"]; let idx = self._next_uint(es_enum_vid); #debug[" idx=%u", idx]; - self.push_doc(self.next_doc(es_enum_body)) {|| + do self.push_doc(self.next_doc(es_enum_body)) || { f(idx) } } @@ -494,7 +494,7 @@ impl deserializer of serialization::deserializer for ebml_deserializer { fn read_vec<T:copy>(f: fn(uint) -> T) -> T { #debug["read_vec()"]; - self.push_doc(self.next_doc(es_vec)) {|| + do self.push_doc(self.next_doc(es_vec)) || { let len = self._next_uint(es_vec_len); #debug[" len=%u", len]; f(len) @@ -549,14 +549,14 @@ fn test_option_int() { } fn serialize_0<S: serialization::serializer>(s: S, v: option<int>) { - s.emit_enum("core::option::t") {|| + do s.emit_enum("core::option::t") || { alt v { none { - s.emit_enum_variant("core::option::none", 0u, 0u) {||} + s.emit_enum_variant("core::option::none", 0u, 0u, || { } ); } some(v0) { - s.emit_enum_variant("core::option::some", 1u, 1u) {|| - s.emit_enum_variant_arg(0u) {|| serialize_1(s, v0) } + do s.emit_enum_variant("core::option::some", 1u, 1u) || { + s.emit_enum_variant_arg(0u, || serialize_1(s, v0)); } } } @@ -568,12 +568,12 @@ fn test_option_int() { } fn deserialize_0<S: serialization::deserializer>(s: S) -> option<int> { - s.read_enum("core::option::t") {|| - s.read_enum_variant {|i| + do s.read_enum("core::option::t") || { + do s.read_enum_variant |i| { alt check i { 0u { none } 1u { - let v0 = s.read_enum_variant_arg(0u) {|| + let v0 = do s.read_enum_variant_arg(0u) || { deserialize_1(s) }; some(v0) diff --git a/src/libstd/getopts.rs b/src/libstd/getopts.rs index 803457f5af7..a82d131f516 100644 --- a/src/libstd/getopts.rs +++ b/src/libstd/getopts.rs @@ -134,7 +134,7 @@ enum optval { val(str), given, } The result of checking command line arguments. Contains a vector of matches and a vector of free strings. "] -type match = {opts: [opt]/~, vals: [[optval]/~]/~, free: [str]/~}; +type match = {opts: ~[opt], vals: ~[~[optval]], free: ~[str]}; fn is_arg(arg: str) -> bool { ret str::len(arg) > 1u && arg[0] == '-' as u8; @@ -144,8 +144,8 @@ fn name_str(nm: name) -> str { ret alt nm { short(ch) { str::from_char(ch) } long(s) { s } }; } -fn find_opt(opts: [opt]/~, nm: name) -> option<uint> { - vec::position(opts, { |opt| opt.name == nm }) +fn find_opt(opts: ~[opt], nm: name) -> option<uint> { + vec::position(opts, |opt| opt.name == nm) } #[doc = " @@ -188,11 +188,11 @@ On success returns `ok(opt)`. Use functions such as `opt_present` `opt_str`, etc. to interrogate results. Returns `err(fail_)` on failure. Use <fail_str> to get an error message. "] -fn getopts(args: [str]/~, opts: [opt]/~) -> result unsafe { +fn getopts(args: ~[str], opts: ~[opt]) -> result unsafe { let n_opts = vec::len::<opt>(opts); - fn f(_x: uint) -> [optval]/~ { ret []/~; } + fn f(_x: uint) -> ~[optval] { ret ~[]; } let vals = vec::to_mut(vec::from_fn(n_opts, f)); - let mut free: [str]/~ = []/~; + let mut free: ~[str] = ~[]; let l = vec::len(args); let mut i = 0u; while i < l { @@ -211,16 +211,16 @@ fn getopts(args: [str]/~, opts: [opt]/~) -> result unsafe { let tail = str::slice(cur, 2u, curlen); let tail_eq = str::splitn_char(tail, '=', 1u); if vec::len(tail_eq) <= 1u { - names = [long(tail)]/~; + names = ~[long(tail)]; } else { names = - [long(tail_eq[0])]/~; + ~[long(tail_eq[0])]; i_arg = option::some::<str>(tail_eq[1]); } } else { let mut j = 1u; - names = []/~; + names = ~[]; while j < curlen { let range = str::char_range_at(cur, j); vec::push(names, short(range.ch)); @@ -228,7 +228,7 @@ fn getopts(args: [str]/~, opts: [opt]/~) -> result unsafe { } } let mut name_pos = 0u; - for vec::each(names) {|nm| + for vec::each(names) |nm| { name_pos += 1u; let optid = alt find_opt(opts, nm) { some(id) { id } @@ -281,7 +281,7 @@ fn getopts(args: [str]/~, opts: [opt]/~) -> result unsafe { ret ok({opts: opts, vals: vec::from_mut(vals), free: free}); } -fn opt_vals(m: match, nm: str) -> [optval]/~ { +fn opt_vals(m: match, nm: str) -> ~[optval] { ret alt find_opt(m.opts, mkname(nm)) { some(id) { m.vals[id] } none { #error("No option '%s' defined", nm); fail } @@ -296,8 +296,8 @@ fn opt_present(m: match, nm: str) -> bool { } #[doc = "Returns true if any of several options were matched"] -fn opts_present(m: match, names: [str]/~) -> bool { - for vec::each(names) {|nm| +fn opts_present(m: match, names: ~[str]) -> bool { + for vec::each(names) |nm| { alt find_opt(m.opts, mkname(nm)) { some(_) { ret true; } _ { } @@ -322,8 +322,8 @@ Returns the string argument supplied to one of several matching options Fails if the no option was provided from the given list, or if the no such option took an argument "] -fn opts_str(m: match, names: [str]/~) -> str { - for vec::each(names) {|nm| +fn opts_str(m: match, names: ~[str]) -> str { + for vec::each(names) |nm| { alt opt_val(m, nm) { val(s) { ret s } _ { } @@ -338,9 +338,9 @@ Returns a vector of the arguments provided to all matches of the given option. Used when an option accepts multiple values. "] -fn opt_strs(m: match, nm: str) -> [str]/~ { - let mut acc: [str]/~ = []/~; - for vec::each(opt_vals(m, nm)) {|v| +fn opt_strs(m: match, nm: str) -> ~[str] { + let mut acc: ~[str] = ~[]; + for vec::each(opt_vals(m, nm)) |v| { alt v { val(s) { vec::push(acc, s); } _ { } } } ret acc; @@ -396,8 +396,8 @@ mod tests { // Tests for reqopt #[test] fn test_reqopt_long() { - let args = ["--test=20"]/~; - let opts = [reqopt("test")]/~; + let args = ~["--test=20"]; + let opts = ~[reqopt("test")]; let rs = getopts(args, opts); alt check rs { ok(m) { @@ -409,8 +409,8 @@ mod tests { #[test] fn test_reqopt_long_missing() { - let args = ["blah"]/~; - let opts = [reqopt("test")]/~; + let args = ~["blah"]; + let opts = ~[reqopt("test")]; let rs = getopts(args, opts); alt rs { err(f) { check_fail_type(f, option_missing_); } @@ -420,8 +420,8 @@ mod tests { #[test] fn test_reqopt_long_no_arg() { - let args = ["--test"]/~; - let opts = [reqopt("test")]/~; + let args = ~["--test"]; + let opts = ~[reqopt("test")]; let rs = getopts(args, opts); alt rs { err(f) { check_fail_type(f, argument_missing_); } @@ -431,8 +431,8 @@ mod tests { #[test] fn test_reqopt_long_multi() { - let args = ["--test=20", "--test=30"]/~; - let opts = [reqopt("test")]/~; + let args = ~["--test=20", "--test=30"]; + let opts = ~[reqopt("test")]; let rs = getopts(args, opts); alt rs { err(f) { check_fail_type(f, option_duplicated_); } @@ -442,8 +442,8 @@ mod tests { #[test] fn test_reqopt_short() { - let args = ["-t", "20"]/~; - let opts = [reqopt("t")]/~; + let args = ~["-t", "20"]; + let opts = ~[reqopt("t")]; let rs = getopts(args, opts); alt rs { ok(m) { @@ -456,8 +456,8 @@ mod tests { #[test] fn test_reqopt_short_missing() { - let args = ["blah"]/~; - let opts = [reqopt("t")]/~; + let args = ~["blah"]; + let opts = ~[reqopt("t")]; let rs = getopts(args, opts); alt rs { err(f) { check_fail_type(f, option_missing_); } @@ -467,8 +467,8 @@ mod tests { #[test] fn test_reqopt_short_no_arg() { - let args = ["-t"]/~; - let opts = [reqopt("t")]/~; + let args = ~["-t"]; + let opts = ~[reqopt("t")]; let rs = getopts(args, opts); alt rs { err(f) { check_fail_type(f, argument_missing_); } @@ -478,8 +478,8 @@ mod tests { #[test] fn test_reqopt_short_multi() { - let args = ["-t", "20", "-t", "30"]/~; - let opts = [reqopt("t")]/~; + let args = ~["-t", "20", "-t", "30"]; + let opts = ~[reqopt("t")]; let rs = getopts(args, opts); alt rs { err(f) { check_fail_type(f, option_duplicated_); } @@ -491,8 +491,8 @@ mod tests { // Tests for optopt #[test] fn test_optopt_long() { - let args = ["--test=20"]/~; - let opts = [optopt("test")]/~; + let args = ~["--test=20"]; + let opts = ~[optopt("test")]; let rs = getopts(args, opts); alt rs { ok(m) { @@ -505,8 +505,8 @@ mod tests { #[test] fn test_optopt_long_missing() { - let args = ["blah"]/~; - let opts = [optopt("test")]/~; + let args = ~["blah"]; + let opts = ~[optopt("test")]; let rs = getopts(args, opts); alt rs { ok(m) { assert (!opt_present(m, "test")); } @@ -516,8 +516,8 @@ mod tests { #[test] fn test_optopt_long_no_arg() { - let args = ["--test"]/~; - let opts = [optopt("test")]/~; + let args = ~["--test"]; + let opts = ~[optopt("test")]; let rs = getopts(args, opts); alt rs { err(f) { check_fail_type(f, argument_missing_); } @@ -527,8 +527,8 @@ mod tests { #[test] fn test_optopt_long_multi() { - let args = ["--test=20", "--test=30"]/~; - let opts = [optopt("test")]/~; + let args = ~["--test=20", "--test=30"]; + let opts = ~[optopt("test")]; let rs = getopts(args, opts); alt rs { err(f) { check_fail_type(f, option_duplicated_); } @@ -538,8 +538,8 @@ mod tests { #[test] fn test_optopt_short() { - let args = ["-t", "20"]/~; - let opts = [optopt("t")]/~; + let args = ~["-t", "20"]; + let opts = ~[optopt("t")]; let rs = getopts(args, opts); alt rs { ok(m) { @@ -552,8 +552,8 @@ mod tests { #[test] fn test_optopt_short_missing() { - let args = ["blah"]/~; - let opts = [optopt("t")]/~; + let args = ~["blah"]; + let opts = ~[optopt("t")]; let rs = getopts(args, opts); alt rs { ok(m) { assert (!opt_present(m, "t")); } @@ -563,8 +563,8 @@ mod tests { #[test] fn test_optopt_short_no_arg() { - let args = ["-t"]/~; - let opts = [optopt("t")]/~; + let args = ~["-t"]; + let opts = ~[optopt("t")]; let rs = getopts(args, opts); alt rs { err(f) { check_fail_type(f, argument_missing_); } @@ -574,8 +574,8 @@ mod tests { #[test] fn test_optopt_short_multi() { - let args = ["-t", "20", "-t", "30"]/~; - let opts = [optopt("t")]/~; + let args = ~["-t", "20", "-t", "30"]; + let opts = ~[optopt("t")]; let rs = getopts(args, opts); alt rs { err(f) { check_fail_type(f, option_duplicated_); } @@ -587,8 +587,8 @@ mod tests { // Tests for optflag #[test] fn test_optflag_long() { - let args = ["--test"]/~; - let opts = [optflag("test")]/~; + let args = ~["--test"]; + let opts = ~[optflag("test")]; let rs = getopts(args, opts); alt rs { ok(m) { assert (opt_present(m, "test")); } @@ -598,8 +598,8 @@ mod tests { #[test] fn test_optflag_long_missing() { - let args = ["blah"]/~; - let opts = [optflag("test")]/~; + let args = ~["blah"]; + let opts = ~[optflag("test")]; let rs = getopts(args, opts); alt rs { ok(m) { assert (!opt_present(m, "test")); } @@ -609,8 +609,8 @@ mod tests { #[test] fn test_optflag_long_arg() { - let args = ["--test=20"]/~; - let opts = [optflag("test")]/~; + let args = ~["--test=20"]; + let opts = ~[optflag("test")]; let rs = getopts(args, opts); alt rs { err(f) { @@ -623,8 +623,8 @@ mod tests { #[test] fn test_optflag_long_multi() { - let args = ["--test", "--test"]/~; - let opts = [optflag("test")]/~; + let args = ~["--test", "--test"]; + let opts = ~[optflag("test")]; let rs = getopts(args, opts); alt rs { err(f) { check_fail_type(f, option_duplicated_); } @@ -634,8 +634,8 @@ mod tests { #[test] fn test_optflag_short() { - let args = ["-t"]/~; - let opts = [optflag("t")]/~; + let args = ~["-t"]; + let opts = ~[optflag("t")]; let rs = getopts(args, opts); alt rs { ok(m) { assert (opt_present(m, "t")); } @@ -645,8 +645,8 @@ mod tests { #[test] fn test_optflag_short_missing() { - let args = ["blah"]/~; - let opts = [optflag("t")]/~; + let args = ~["blah"]; + let opts = ~[optflag("t")]; let rs = getopts(args, opts); alt rs { ok(m) { assert (!opt_present(m, "t")); } @@ -656,8 +656,8 @@ mod tests { #[test] fn test_optflag_short_arg() { - let args = ["-t", "20"]/~; - let opts = [optflag("t")]/~; + let args = ~["-t", "20"]; + let opts = ~[optflag("t")]; let rs = getopts(args, opts); alt rs { ok(m) { @@ -671,8 +671,8 @@ mod tests { #[test] fn test_optflag_short_multi() { - let args = ["-t", "-t"]/~; - let opts = [optflag("t")]/~; + let args = ~["-t", "-t"]; + let opts = ~[optflag("t")]; let rs = getopts(args, opts); alt rs { err(f) { check_fail_type(f, option_duplicated_); } @@ -684,8 +684,8 @@ mod tests { // Tests for optmulti #[test] fn test_optmulti_long() { - let args = ["--test=20"]/~; - let opts = [optmulti("test")]/~; + let args = ~["--test=20"]; + let opts = ~[optmulti("test")]; let rs = getopts(args, opts); alt rs { ok(m) { @@ -698,8 +698,8 @@ mod tests { #[test] fn test_optmulti_long_missing() { - let args = ["blah"]/~; - let opts = [optmulti("test")]/~; + let args = ~["blah"]; + let opts = ~[optmulti("test")]; let rs = getopts(args, opts); alt rs { ok(m) { assert (!opt_present(m, "test")); } @@ -709,8 +709,8 @@ mod tests { #[test] fn test_optmulti_long_no_arg() { - let args = ["--test"]/~; - let opts = [optmulti("test")]/~; + let args = ~["--test"]; + let opts = ~[optmulti("test")]; let rs = getopts(args, opts); alt rs { err(f) { check_fail_type(f, argument_missing_); } @@ -720,8 +720,8 @@ mod tests { #[test] fn test_optmulti_long_multi() { - let args = ["--test=20", "--test=30"]/~; - let opts = [optmulti("test")]/~; + let args = ~["--test=20", "--test=30"]; + let opts = ~[optmulti("test")]; let rs = getopts(args, opts); alt rs { ok(m) { @@ -736,8 +736,8 @@ mod tests { #[test] fn test_optmulti_short() { - let args = ["-t", "20"]/~; - let opts = [optmulti("t")]/~; + let args = ~["-t", "20"]; + let opts = ~[optmulti("t")]; let rs = getopts(args, opts); alt rs { ok(m) { @@ -750,8 +750,8 @@ mod tests { #[test] fn test_optmulti_short_missing() { - let args = ["blah"]/~; - let opts = [optmulti("t")]/~; + let args = ~["blah"]; + let opts = ~[optmulti("t")]; let rs = getopts(args, opts); alt rs { ok(m) { assert (!opt_present(m, "t")); } @@ -761,8 +761,8 @@ mod tests { #[test] fn test_optmulti_short_no_arg() { - let args = ["-t"]/~; - let opts = [optmulti("t")]/~; + let args = ~["-t"]; + let opts = ~[optmulti("t")]; let rs = getopts(args, opts); alt rs { err(f) { check_fail_type(f, argument_missing_); } @@ -772,8 +772,8 @@ mod tests { #[test] fn test_optmulti_short_multi() { - let args = ["-t", "20", "-t", "30"]/~; - let opts = [optmulti("t")]/~; + let args = ~["-t", "20", "-t", "30"]; + let opts = ~[optmulti("t")]; let rs = getopts(args, opts); alt rs { ok(m) { @@ -788,8 +788,8 @@ mod tests { #[test] fn test_unrecognized_option_long() { - let args = ["--untest"]/~; - let opts = [optmulti("t")]/~; + let args = ~["--untest"]; + let opts = ~[optmulti("t")]; let rs = getopts(args, opts); alt rs { err(f) { check_fail_type(f, unrecognized_option_); } @@ -799,8 +799,8 @@ mod tests { #[test] fn test_unrecognized_option_short() { - let args = ["-t"]/~; - let opts = [optmulti("test")]/~; + let args = ~["-t"]; + let opts = ~[optmulti("test")]; let rs = getopts(args, opts); alt rs { err(f) { check_fail_type(f, unrecognized_option_); } @@ -811,12 +811,12 @@ mod tests { #[test] fn test_combined() { let args = - ["prog", "free1", "-s", "20", "free2", "--flag", "--long=30", - "-f", "-m", "40", "-m", "50", "-n", "-A B", "-n", "-60 70"]/~; + ~["prog", "free1", "-s", "20", "free2", "--flag", "--long=30", + "-f", "-m", "40", "-m", "50", "-n", "-A B", "-n", "-60 70"]; let opts = - [optopt("s"), optflag("flag"), reqopt("long"), + ~[optopt("s"), optflag("flag"), reqopt("long"), optflag("f"), optmulti("m"), optmulti("n"), - optopt("notpresent")]/~; + optopt("notpresent")]; let rs = getopts(args, opts); alt rs { ok(m) { @@ -839,23 +839,23 @@ mod tests { #[test] fn test_multi() { - let args = ["-e", "foo", "--encrypt", "foo"]/~; - let opts = [optopt("e"), optopt("encrypt")]/~; + let args = ~["-e", "foo", "--encrypt", "foo"]; + let opts = ~[optopt("e"), optopt("encrypt")]; let match = alt getopts(args, opts) { result::ok(m) { m } result::err(f) { fail; } }; - assert opts_present(match, ["e"]/~); - assert opts_present(match, ["encrypt"]/~); - assert opts_present(match, ["encrypt", "e"]/~); - assert opts_present(match, ["e", "encrypt"]/~); - assert !opts_present(match, ["thing"]/~); - assert !opts_present(match, []/~); - - assert opts_str(match, ["e"]/~) == "foo"; - assert opts_str(match, ["encrypt"]/~) == "foo"; - assert opts_str(match, ["e", "encrypt"]/~) == "foo"; - assert opts_str(match, ["encrypt", "e"]/~) == "foo"; + assert opts_present(match, ~["e"]); + assert opts_present(match, ~["encrypt"]); + assert opts_present(match, ~["encrypt", "e"]); + assert opts_present(match, ~["e", "encrypt"]); + assert !opts_present(match, ~["thing"]); + assert !opts_present(match, ~[]); + + assert opts_str(match, ~["e"]) == "foo"; + assert opts_str(match, ~["encrypt"]) == "foo"; + assert opts_str(match, ~["e", "encrypt"]) == "foo"; + assert opts_str(match, ~["encrypt", "e"]) == "foo"; } } diff --git a/src/libstd/json.rs b/src/libstd/json.rs index 947b037841e..859349d4170 100644 --- a/src/libstd/json.rs +++ b/src/libstd/json.rs @@ -30,7 +30,7 @@ enum json { num(float), string(@str), boolean(bool), - list(@[json]/~), + list(@~[json]), dict(map::hashmap<str, json>), null, } @@ -54,7 +54,7 @@ fn to_writer(wr: io::writer, j: json) { list(v) { wr.write_char('['); let mut first = true; - for (*v).each { |item| + for (*v).each |item| { if !first { wr.write_str(", "); } @@ -71,7 +71,7 @@ fn to_writer(wr: io::writer, j: json) { wr.write_str("{ "); let mut first = true; - for d.each { |key, value| + for d.each |key, value| { if !first { wr.write_str(", "); } @@ -90,7 +90,7 @@ fn to_writer(wr: io::writer, j: json) { fn escape_str(s: str) -> str { let mut escaped = "\""; - str::chars_iter(s) { |c| + do str::chars_iter(s) |c| { alt c { '"' { escaped += "\\\""; } '\\' { escaped += "\\\\"; } @@ -110,7 +110,7 @@ fn escape_str(s: str) -> str { #[doc = "Serializes a json value into a string"] fn to_str(j: json) -> str { - io::with_str_writer { |wr| to_writer(wr, j) } + io::with_str_writer(|wr| to_writer(wr, j)) } type parser = { @@ -186,7 +186,7 @@ impl parser for parser { } fn parse_ident(ident: str, value: json) -> result<json, error> { - if str::all(ident, { |c| c == self.next_char() }) { + if str::all(ident, |c| c == self.next_char()) { self.bump(); ok(value) } else { @@ -383,7 +383,7 @@ impl parser for parser { self.bump(); self.parse_whitespace(); - let mut values = []/~; + let mut values = ~[]; if self.ch == ']' { self.bump(); @@ -487,7 +487,7 @@ fn eq(value0: json, value1: json) -> bool { (dict(d0), dict(d1)) { if d0.size() == d1.size() { let mut equal = true; - for d0.each { |k, v0| + for d0.each |k, v0| { alt d1.find(k) { some(v1) { if !eq(v0, v1) { equal = false; } } @@ -585,7 +585,7 @@ impl of to_json for @str { impl <A: to_json copy, B: to_json copy> of to_json for (A, B) { fn to_json() -> json { let (a, b) = self; - list(@[a.to_json(), b.to_json()]/~) + list(@~[a.to_json(), b.to_json()]) } } @@ -593,18 +593,18 @@ impl <A: to_json copy, B: to_json copy, C: to_json copy> of to_json for (A, B, C) { fn to_json() -> json { let (a, b, c) = self; - list(@[a.to_json(), b.to_json(), c.to_json()]/~) + list(@~[a.to_json(), b.to_json(), c.to_json()]) } } -impl <A: to_json> of to_json for [A]/~ { - fn to_json() -> json { list(@self.map { |elt| elt.to_json() }) } +impl <A: to_json> of to_json for ~[A] { + fn to_json() -> json { list(@self.map(|elt| elt.to_json())) } } impl <A: to_json copy> of to_json for hashmap<str, A> { fn to_json() -> json { let d = map::str_hash(); - for self.each() { |key, value| + for self.each() |key, value| { d.insert(copy key, value.to_json()); } dict(d) @@ -632,10 +632,10 @@ impl of to_str::to_str for error { #[cfg(test)] mod tests { - fn mk_dict(items: [(str, json)]/~) -> json { + fn mk_dict(items: ~[(str, json)]) -> json { let d = map::str_hash(); - vec::iter(items) { |item| + do vec::iter(items) |item| { let (key, value) = copy item; d.insert(key, value); }; @@ -670,26 +670,26 @@ mod tests { #[test] fn test_write_list() { - assert to_str(list(@[]/~)) == "[]"; - assert to_str(list(@[boolean(true)]/~)) == "[true]"; - assert to_str(list(@[ + assert to_str(list(@~[])) == "[]"; + assert to_str(list(@~[boolean(true)])) == "[true]"; + assert to_str(list(@~[ boolean(false), null, - list(@[string(@"foo\nbar"), num(3.5f)]/~) - ]/~)) == "[false, null, [\"foo\\nbar\", 3.5]]"; + list(@~[string(@"foo\nbar"), num(3.5f)]) + ])) == "[false, null, [\"foo\\nbar\", 3.5]]"; } #[test] fn test_write_dict() { - assert to_str(mk_dict([]/~)) == "{}"; - assert to_str(mk_dict([("a", boolean(true))]/~)) == "{ \"a\": true }"; - assert to_str(mk_dict([ + assert to_str(mk_dict(~[])) == "{}"; + assert to_str(mk_dict(~[("a", boolean(true))])) == "{ \"a\": true }"; + assert to_str(mk_dict(~[ ("a", boolean(true)), - ("b", list(@[ - mk_dict([("c", string(@"\x0c\r"))]/~), - mk_dict([("d", string(@""))]/~) - ]/~)) - ]/~)) == + ("b", list(@~[ + mk_dict(~[("c", string(@"\x0c\r"))]), + mk_dict(~[("d", string(@""))]) + ])) + ])) == "{ " + "\"a\": true, " + "\"b\": [" + @@ -709,7 +709,7 @@ mod tests { err({line: 1u, col: 6u, msg: @"trailing characters"}); assert from_str("1a") == err({line: 1u, col: 2u, msg: @"trailing characters"}); - assert from_str("[]/~a") == + assert from_str("[]a") == err({line: 1u, col: 3u, msg: @"trailing characters"}); assert from_str("{}a") == err({line: 1u, col: 3u, msg: @"trailing characters"}); @@ -798,15 +798,15 @@ mod tests { assert from_str("[6 7]") == err({line: 1u, col: 4u, msg: @"expecting ',' or ']'"}); - assert from_str("[]") == ok(list(@[]/~)); - assert from_str("[ ]") == ok(list(@[]/~)); - assert from_str("[true]") == ok(list(@[boolean(true)]/~)); - assert from_str("[ false ]") == ok(list(@[boolean(false)]/~)); - assert from_str("[null]") == ok(list(@[null]/~)); - assert from_str("[3, 1]") == ok(list(@[num(3f), num(1f)]/~)); - assert from_str("\n[3, 2]\n") == ok(list(@[num(3f), num(2f)]/~)); + assert from_str("[]") == ok(list(@~[])); + assert from_str("[ ]") == ok(list(@~[])); + assert from_str("[true]") == ok(list(@~[boolean(true)])); + assert from_str("[ false ]") == ok(list(@~[boolean(false)])); + assert from_str("[null]") == ok(list(@~[null])); + assert from_str("[3, 1]") == ok(list(@~[num(3f), num(1f)])); + assert from_str("\n[3, 2]\n") == ok(list(@~[num(3f), num(2f)])); assert from_str("[2, [4, 1]]") == - ok(list(@[num(2f), list(@[num(4f), num(1f)]/~)]/~)); + ok(list(@~[num(2f), list(@~[num(4f), num(1f)])])); } #[test] @@ -835,23 +835,23 @@ mod tests { assert from_str("{\"a\":1,") == err({line: 1u, col: 8u, msg: @"EOF while parsing object"}); - assert eq(result::get(from_str("{}")), mk_dict([]/~)); + assert eq(result::get(from_str("{}")), mk_dict(~[])); assert eq(result::get(from_str("{\"a\": 3}")), - mk_dict([("a", num(3.0f))]/~)); + mk_dict(~[("a", num(3.0f))])); assert eq(result::get(from_str("{ \"a\": null, \"b\" : true }")), - mk_dict([ + mk_dict(~[ ("a", null), - ("b", boolean(true))]/~)); + ("b", boolean(true))])); assert eq(result::get(from_str("\n{ \"a\": null, \"b\" : true }\n")), - mk_dict([ + mk_dict(~[ ("a", null), - ("b", boolean(true))]/~)); + ("b", boolean(true))])); assert eq(result::get(from_str("{\"a\" : 1.0 ,\"b\": [ true ]}")), - mk_dict([ + mk_dict(~[ ("a", num(1.0)), - ("b", list(@[boolean(true)]/~)) - ]/~)); + ("b", list(@~[boolean(true)])) + ])); assert eq(result::get(from_str( "{" + "\"a\": 1.0, " + @@ -861,16 +861,16 @@ mod tests { "{ \"c\": {\"d\": null} } " + "]" + "}")), - mk_dict([ + mk_dict(~[ ("a", num(1.0f)), - ("b", list(@[ + ("b", list(@~[ boolean(true), string(@"foo\nbar"), - mk_dict([ - ("c", mk_dict([("d", null)]/~)) - ]/~) - ]/~)) - ]/~)); + mk_dict(~[ + ("c", mk_dict(~[("d", null)])) + ]) + ])) + ])); } #[test] diff --git a/src/libstd/list.rs b/src/libstd/list.rs index ad7828da8c6..95fc53f49b4 100644 --- a/src/libstd/list.rs +++ b/src/libstd/list.rs @@ -10,8 +10,8 @@ enum list<T> { } #[doc = "Create a list from a vector"] -fn from_vec<T: copy>(v: [T]/~) -> @list<T> { - vec::foldr(v, @nil::<T>, { |h, t| @cons(h, t) }) +fn from_vec<T: copy>(v: &[T]) -> @list<T> { + vec::foldr(v, @nil::<T>, |h, t| @cons(h, t)) } #[doc = " @@ -29,7 +29,7 @@ accumulated result. "] fn foldl<T: copy, U>(z: T, ls: @list<U>, f: fn(T, U) -> T) -> T { let mut accum: T = z; - iter(ls) {|elt| accum = f(accum, elt);} + do iter(ls) |elt| { accum = f(accum, elt);} accum } @@ -55,7 +55,7 @@ fn find<T: copy>(ls: @list<T>, f: fn(T) -> bool) -> option<T> { #[doc = "Returns true if a list contains an element with the given value"] fn has<T: copy>(ls: @list<T>, elt: T) -> bool { - for each(ls) { |e| + for each(ls) |e| { if e == elt { ret true; } } ret false; @@ -77,7 +77,7 @@ pure fn is_not_empty<T: copy>(ls: @list<T>) -> bool { #[doc = "Returns the length of a list"] fn len<T>(ls: @list<T>) -> uint { let mut count = 0u; - iter(ls) {|_e| count += 1u;} + iter(ls, |_e| count += 1u); count } @@ -135,9 +135,9 @@ mod tests { #[test] fn test_is_empty() { - let empty : @list::list<int> = from_vec([]/~); - let full1 = from_vec([1]/~); - let full2 = from_vec(['r', 'u']/~); + let empty : @list::list<int> = from_vec(~[]); + let full1 = from_vec(~[1]); + let full2 = from_vec(~['r', 'u']); assert is_empty(empty); assert !is_empty(full1); @@ -150,7 +150,7 @@ mod tests { #[test] fn test_from_vec() { - let l = from_vec([0, 1, 2]/~); + let l = from_vec(~[0, 1, 2]); assert (head(l) == 0); @@ -163,14 +163,14 @@ mod tests { #[test] fn test_from_vec_empty() { - let empty : @list::list<int> = from_vec([]/~); + let empty : @list::list<int> = from_vec(~[]); assert (empty == @list::nil::<int>); } #[test] fn test_foldl() { fn add(&&a: uint, &&b: int) -> uint { ret a + (b as uint); } - let l = from_vec([0, 1, 2, 3, 4]/~); + let l = from_vec(~[0, 1, 2, 3, 4]); let empty = @list::nil::<int>; assert (list::foldl(0u, l, add) == 10u); assert (list::foldl(0u, empty, add) == 0u); @@ -181,21 +181,21 @@ mod tests { fn sub(&&a: int, &&b: int) -> int { a - b } - let l = from_vec([1, 2, 3, 4]/~); + let l = from_vec(~[1, 2, 3, 4]); assert (list::foldl(0, l, sub) == -10); } #[test] fn test_find_success() { fn match(&&i: int) -> bool { ret i == 2; } - let l = from_vec([0, 1, 2]/~); + let l = from_vec(~[0, 1, 2]); assert (list::find(l, match) == option::some(2)); } #[test] fn test_find_fail() { fn match(&&_i: int) -> bool { ret false; } - let l = from_vec([0, 1, 2]/~); + let l = from_vec(~[0, 1, 2]); let empty = @list::nil::<int>; assert (list::find(l, match) == option::none::<int>); assert (list::find(empty, match) == option::none::<int>); @@ -203,7 +203,7 @@ mod tests { #[test] fn test_has() { - let l = from_vec([5, 8, 6]/~); + let l = from_vec(~[5, 8, 6]); let empty = @list::nil::<int>; assert (list::has(l, 5)); assert (!list::has(l, 7)); @@ -213,7 +213,7 @@ mod tests { #[test] fn test_len() { - let l = from_vec([0, 1, 2]/~); + let l = from_vec(~[0, 1, 2]); let empty = @list::nil::<int>; assert (list::len(l) == 3u); assert (list::len(empty) == 0u); diff --git a/src/libstd/map.rs b/src/libstd/map.rs index 21fe5163bb5..6163f6154a1 100644 --- a/src/libstd/map.rs +++ b/src/libstd/map.rs @@ -89,7 +89,7 @@ mod chained { type t<K, V> = @{ mut count: uint, - mut chains: [mut chain<K,V>]/~, + mut chains: ~[mut chain<K,V>], hasher: hashfn<K>, eqer: eqfn<K> }; @@ -151,7 +151,7 @@ mod chained { let n_old_chains = vec::len(self.chains); let n_new_chains: uint = uint::next_power_of_two(n_old_chains+1u); let new_chains = chains(n_new_chains); - for self.each_entry {|entry| + for self.each_entry |entry| { let idx = entry.hash % n_new_chains; entry.next = new_chains[idx]; new_chains[idx] = present(entry); @@ -256,17 +256,17 @@ mod chained { } fn each(blk: fn(K,V) -> bool) { - for self.each_entry { |entry| + for self.each_entry |entry| { if !blk(entry.key, copy entry.value) { break; } } } - fn each_key(blk: fn(K) -> bool) { self.each { |k, _v| blk(k)} } + fn each_key(blk: fn(K) -> bool) { self.each(|k, _v| blk(k)) } - fn each_value(blk: fn(V) -> bool) { self.each { |_k, v| blk(v)} } + fn each_value(blk: fn(V) -> bool) { self.each(|_k, v| blk(v)) } } - fn chains<K,V>(nchains: uint) -> [mut chain<K,V>]/~ { + fn chains<K,V>(nchains: uint) -> ~[mut chain<K,V>] { ret vec::to_mut(vec::from_elem(nchains, absent)); } @@ -302,11 +302,11 @@ fn str_hash<V: copy>() -> hashmap<str, V> { #[doc = "Construct a hashmap for boxed string keys"] fn box_str_hash<V: copy>() -> hashmap<@str, V> { - ret hashmap({|x: @str|str::hash(*x)}, {|x,y|str::eq(*x,*y)}); + ret hashmap(|x: @str| str::hash(*x), |x,y| str::eq(*x,*y)); } #[doc = "Construct a hashmap for byte string keys"] -fn bytes_hash<V: copy>() -> hashmap<[u8]/~, V> { +fn bytes_hash<V: copy>() -> hashmap<~[u8], V> { ret hashmap(vec::u8::hash, vec::u8::eq); } @@ -330,9 +330,9 @@ fn set_add<K: const copy>(set: set<K>, key: K) -> bool { #[doc = " Convert a set into a vector. "] -fn vec_from_set<T: copy>(s: set<T>) -> [T]/~ { - let mut v = []/~; - s.each_key() {|k| +fn vec_from_set<T: copy>(s: set<T>) -> ~[T] { + let mut v = ~[]; + do s.each_key() |k| { vec::push(v, k); true }; @@ -341,9 +341,9 @@ fn vec_from_set<T: copy>(s: set<T>) -> [T]/~ { #[doc = "Construct a hashmap from a vector"] fn hash_from_vec<K: const copy, V: copy>(hasher: hashfn<K>, eqer: eqfn<K>, - items: [(K, V)]/~) -> hashmap<K, V> { + items: ~[(K, V)]) -> hashmap<K, V> { let map = hashmap(hasher, eqer); - vec::iter(items) { |item| + do vec::iter(items) |item| { let (key, value) = item; map.insert(key, value); } @@ -351,22 +351,22 @@ fn hash_from_vec<K: const copy, V: copy>(hasher: hashfn<K>, eqer: eqfn<K>, } #[doc = "Construct a hashmap from a vector with string keys"] -fn hash_from_strs<V: copy>(items: [(str, V)]/~) -> hashmap<str, V> { +fn hash_from_strs<V: copy>(items: ~[(str, V)]) -> hashmap<str, V> { hash_from_vec(str::hash, str::eq, items) } #[doc = "Construct a hashmap from a vector with byte keys"] -fn hash_from_bytes<V: copy>(items: [([u8]/~, V)]/~) -> hashmap<[u8]/~, V> { +fn hash_from_bytes<V: copy>(items: ~[(~[u8], V)]) -> hashmap<~[u8], V> { hash_from_vec(vec::u8::hash, vec::u8::eq, items) } #[doc = "Construct a hashmap from a vector with int keys"] -fn hash_from_ints<V: copy>(items: [(int, V)]/~) -> hashmap<int, V> { +fn hash_from_ints<V: copy>(items: ~[(int, V)]) -> hashmap<int, V> { hash_from_vec(int::hash, int::eq, items) } #[doc = "Construct a hashmap from a vector with uint keys"] -fn hash_from_uints<V: copy>(items: [(uint, V)]/~) -> hashmap<uint, V> { +fn hash_from_uints<V: copy>(items: ~[(uint, V)]) -> hashmap<uint, V> { hash_from_vec(uint::hash, uint::eq, items) } @@ -615,11 +615,11 @@ mod tests { #[test] fn test_hash_from_vec() { - let map = map::hash_from_strs([ + let map = map::hash_from_strs(~[ ("a", 1), ("b", 2), ("c", 3) - ]/~); + ]); assert map.size() == 3u; assert map.get("a") == 1; assert map.get("b") == 2; diff --git a/src/libstd/md4.rs b/src/libstd/md4.rs index 0b7c3e34d40..d7bd7ed811a 100644 --- a/src/libstd/md4.rs +++ b/src/libstd/md4.rs @@ -1,11 +1,11 @@ -fn md4(msg: [u8]/~) -> {a: u32, b: u32, c: u32, d: u32} { +fn md4(msg: ~[u8]) -> {a: u32, b: u32, c: u32, d: u32} { // subtle: if orig_len is merely uint, then the code below // which performs shifts by 32 bits or more has undefined // results. let orig_len: u64 = (vec::len(msg) * 8u) as u64; // pad message - let mut msg = vec::append(msg, [0x80u8]/~); + let mut msg = vec::append(msg, ~[0x80u8]); let mut bitlen = orig_len + 8u64; while (bitlen + 64u64) % 512u64 > 0u64 { vec::push(msg, 0u8); @@ -82,13 +82,13 @@ fn md4(msg: [u8]/~) -> {a: u32, b: u32, c: u32, d: u32} { ret {a: a, b: b, c: c, d: d}; } -fn md4_str(msg: [u8]/~) -> str { +fn md4_str(msg: ~[u8]) -> str { let {a, b, c, d} = md4(msg); fn app(a: u32, b: u32, c: u32, d: u32, f: fn(u32)) { f(a); f(b); f(c); f(d); } let mut result = ""; - app(a, b, c, d) {|u| + do app(a, b, c, d) |u| { let mut i = 0u32; while i < 4u32 { let byte = (u >> (i * 8u32)) as u8; diff --git a/src/libstd/net_ip.rs b/src/libstd/net_ip.rs index 64cded848d1..f1e15dec6de 100644 --- a/src/libstd/net_ip.rs +++ b/src/libstd/net_ip.rs @@ -66,7 +66,7 @@ j Fails if the string is not a valid IPv4 address } } fn try_parse_addr(ip: str) -> result::result<ip_addr,parse_addr_err> { - let parts = vec::map(str::split_char(ip, '.'), {|s| + let parts = vec::map(str::split_char(ip, '.'), |s| { alt uint::from_str(s) { some(n) if n <= 255u { n } _ { 256u } diff --git a/src/libstd/net_tcp.rs b/src/libstd/net_tcp.rs index 8c637f0089e..c995e021f5f 100644 --- a/src/libstd/net_tcp.rs +++ b/src/libstd/net_tcp.rs @@ -48,7 +48,7 @@ class tcp_socket { }; let close_data_ptr = ptr::addr_of(close_data); let stream_handle_ptr = (*(self.socket_data)).stream_handle_ptr; - iotask::interact((*(self.socket_data)).iotask) {|loop_ptr| + do iotask::interact((*(self.socket_data)).iotask) |loop_ptr| { log(debug, #fmt("interact dtor for tcp_socket stream %? loop %?", stream_handle_ptr, loop_ptr)); uv::ll::set_data_for_uv_handle(stream_handle_ptr, @@ -72,7 +72,7 @@ class tcp_conn_port { let server_stream_ptr = ptr::addr_of((*conn_data_ptr).server_stream); let stream_closed_po = (*(self.conn_data)).stream_closed_po; let iotask = (*conn_data_ptr).iotask; - iotask::interact(iotask) {|loop_ptr| + do iotask::interact(iotask) |loop_ptr| { log(debug, #fmt("dtor for tcp_conn_port loop: %?", loop_ptr)); uv::ll::close(server_stream_ptr, tcp_nl_close_cb); @@ -114,7 +114,7 @@ fn connect(input_ip: ip::ip_addr, port: uint, closed_signal_ch: comm::chan(closed_signal_po) }; let conn_data_ptr = ptr::addr_of(conn_data); - let reader_po = comm::port::<result::result<[u8]/~, tcp_err_data>>(); + let reader_po = comm::port::<result::result<~[u8], tcp_err_data>>(); let stream_handle_ptr = malloc_uv_tcp_t(); *(stream_handle_ptr as *mut uv::ll::uv_tcp_t) = uv::ll::tcp_t(); let socket_data = @{ @@ -131,7 +131,7 @@ fn connect(input_ip: ip::ip_addr, port: uint, // we can send into the interact cb to be handled in libuv.. log(debug, #fmt("stream_handle_ptr outside interact %?", stream_handle_ptr)); - iotask::interact(iotask) {|loop_ptr| + do iotask::interact(iotask) |loop_ptr| { log(debug, "in interact cb for tcp client connect.."); log(debug, #fmt("stream_handle_ptr in interact %?", stream_handle_ptr)); @@ -215,7 +215,7 @@ This value must remain valid for the duration of the `write` call A `result` object with a `nil` value as the `ok` variant, or a `tcp_err_data` value as the `err` variant "] -fn write(sock: tcp_socket, raw_write_data: [u8]/~) +fn write(sock: tcp_socket, raw_write_data: ~[u8]) -> result::result<(), tcp_err_data> unsafe { let socket_data_ptr = ptr::addr_of(*(sock.socket_data)); write_common_impl(socket_data_ptr, raw_write_data) @@ -248,10 +248,10 @@ A `future` value that, once the `write` operation completes, resolves to a `result` object with a `nil` value as the `ok` variant, or a `tcp_err_data` value as the `err` variant "] -fn write_future(sock: tcp_socket, raw_write_data: [u8]/~) +fn write_future(sock: tcp_socket, raw_write_data: ~[u8]) -> future<result::result<(), tcp_err_data>> unsafe { let socket_data_ptr = ptr::addr_of(*(sock.socket_data)); - future_spawn {|| + do future_spawn || { write_common_impl(socket_data_ptr, raw_write_data) } } @@ -271,7 +271,7 @@ on) from until `read_stop` is called, or a `tcp_err_data` record "] fn read_start(sock: tcp_socket) -> result::result<comm::port< - result::result<[u8]/~, tcp_err_data>>, tcp_err_data> unsafe { + result::result<~[u8], tcp_err_data>>, tcp_err_data> unsafe { let socket_data = ptr::addr_of(*(sock.socket_data)); read_start_common_impl(socket_data) } @@ -304,7 +304,7 @@ data received. read attempt. Pass `0u` to wait indefinitely "] fn read(sock: tcp_socket, timeout_msecs: uint) - -> result::result<[u8]/~,tcp_err_data> { + -> result::result<~[u8],tcp_err_data> { let socket_data = ptr::addr_of(*(sock.socket_data)); read_common_impl(socket_data, timeout_msecs) } @@ -338,9 +338,9 @@ Otherwise, use the blocking `tcp::read` function instead. read attempt. Pass `0u` to wait indefinitely "] fn read_future(sock: tcp_socket, timeout_msecs: uint) - -> future<result::result<[u8]/~,tcp_err_data>> { + -> future<result::result<~[u8],tcp_err_data>> { let socket_data = ptr::addr_of(*(sock.socket_data)); - future_spawn {|| + do future_spawn || { read_common_impl(socket_data, timeout_msecs) } } @@ -387,7 +387,7 @@ fn new_listener(host_ip: ip::ip_addr, port: uint, backlog: uint, let setup_po = comm::port::<option<tcp_err_data>>(); let setup_ch = comm::chan(setup_po); - iotask::interact(iotask) {|loop_ptr| + do iotask::interact(iotask) |loop_ptr| { let tcp_addr = ipv4_ip_addr_to_sockaddr_in(host_ip, port); alt uv::ll::tcp_init(loop_ptr, server_stream_ptr) { @@ -488,7 +488,7 @@ fn conn_recv_spawn(server_port: tcp_conn_port, let new_conn_po = (*(server_port.conn_data)).new_conn_po; let iotask = (*(server_port.conn_data)).iotask; let new_conn_result = comm::recv(new_conn_po); - task::spawn {|| + do task::spawn || { let sock_create_result = alt new_conn_result { ok(client_stream_ptr) { conn_port_new_tcp_socket(client_stream_ptr, iotask) @@ -591,7 +591,7 @@ fn accept(new_conn: tcp_new_connection) new_tcp_conn(server_handle_ptr) { let server_data_ptr = uv::ll::get_data_for_uv_handle( server_handle_ptr) as *tcp_listen_fc_data; - let reader_po = comm::port::<result::result<[u8]/~, tcp_err_data>>(); + let reader_po = comm::port::<result::result<~[u8], tcp_err_data>>(); let iotask = (*server_data_ptr).iotask; let stream_handle_ptr = malloc_uv_tcp_t(); *(stream_handle_ptr as *mut uv::ll::uv_tcp_t) = uv::ll::tcp_t(); @@ -709,7 +709,7 @@ fn listen_for_conn(host_ip: ip::ip_addr, port: uint, backlog: uint, let setup_po = comm::port::<option<tcp_err_data>>(); let setup_ch = comm::chan(setup_po); - iotask::interact(iotask) {|loop_ptr| + do iotask::interact(iotask) |loop_ptr| { let tcp_addr = ipv4_ip_addr_to_sockaddr_in(host_ip, port); alt uv::ll::tcp_init(loop_ptr, server_stream_ptr) { @@ -755,7 +755,7 @@ fn listen_for_conn(host_ip: ip::ip_addr, port: uint, backlog: uint, none { on_establish_cb(kill_ch); let kill_result = comm::recv(kill_po); - iotask::interact(iotask) {|loop_ptr| + do iotask::interact(iotask) |loop_ptr| { log(debug, #fmt("tcp::listen post-kill recv hl interact %?", loop_ptr)); (*server_data_ptr).active = false; @@ -791,7 +791,7 @@ Convenience methods extending `net::tcp::tcp_socket` "] impl sock_methods for tcp_socket { fn read_start() -> result::result<comm::port< - result::result<[u8]/~, tcp_err_data>>, tcp_err_data> { + result::result<~[u8], tcp_err_data>>, tcp_err_data> { read_start(self) } fn read_stop() -> @@ -799,18 +799,18 @@ impl sock_methods for tcp_socket { read_stop(self) } fn read(timeout_msecs: uint) -> - result::result<[u8]/~, tcp_err_data> { + result::result<~[u8], tcp_err_data> { read(self, timeout_msecs) } fn read_future(timeout_msecs: uint) -> - future<result::result<[u8]/~, tcp_err_data>> { + future<result::result<~[u8], tcp_err_data>> { read_future(self, timeout_msecs) } - fn write(raw_write_data: [u8]/~) + fn write(raw_write_data: ~[u8]) -> result::result<(), tcp_err_data> { write(self, raw_write_data) } - fn write_future(raw_write_data: [u8]/~) + fn write_future(raw_write_data: ~[u8]) -> future<result::result<(), tcp_err_data>> { write_future(self, raw_write_data) } @@ -819,7 +819,7 @@ impl sock_methods for tcp_socket { // shared implementation for tcp::read fn read_common_impl(socket_data: *tcp_socket_data, timeout_msecs: uint) - -> result::result<[u8]/~,tcp_err_data> unsafe { + -> result::result<~[u8],tcp_err_data> unsafe { log(debug, "starting tcp::read"); let iotask = (*socket_data).iotask; let rs_result = read_start_common_impl(socket_data); @@ -861,7 +861,7 @@ fn read_stop_common_impl(socket_data: *tcp_socket_data) -> let stream_handle_ptr = (*socket_data).stream_handle_ptr; let stop_po = comm::port::<option<tcp_err_data>>(); let stop_ch = comm::chan(stop_po); - iotask::interact((*socket_data).iotask) {|loop_ptr| + do iotask::interact((*socket_data).iotask) |loop_ptr| { log(debug, "in interact cb for tcp::read_stop"); alt uv::ll::read_stop(stream_handle_ptr as *uv::ll::uv_stream_t) { 0i32 { @@ -888,12 +888,12 @@ fn read_stop_common_impl(socket_data: *tcp_socket_data) -> // shared impl for read_start fn read_start_common_impl(socket_data: *tcp_socket_data) -> result::result<comm::port< - result::result<[u8]/~, tcp_err_data>>, tcp_err_data> unsafe { + result::result<~[u8], tcp_err_data>>, tcp_err_data> unsafe { let stream_handle_ptr = (*socket_data).stream_handle_ptr; let start_po = comm::port::<option<uv::ll::uv_err_data>>(); let start_ch = comm::chan(start_po); log(debug, "in tcp::read_start before interact loop"); - iotask::interact((*socket_data).iotask) {|loop_ptr| + do iotask::interact((*socket_data).iotask) |loop_ptr| { log(debug, #fmt("in tcp::read_start interact cb %?", loop_ptr)); alt uv::ll::read_start(stream_handle_ptr as *uv::ll::uv_stream_t, on_alloc_cb, @@ -921,21 +921,21 @@ fn read_start_common_impl(socket_data: *tcp_socket_data) // shared implementation used by write and write_future fn write_common_impl(socket_data_ptr: *tcp_socket_data, - raw_write_data: [u8]/~) + raw_write_data: ~[u8]) -> result::result<(), tcp_err_data> unsafe { let write_req_ptr = ptr::addr_of((*socket_data_ptr).write_req); let stream_handle_ptr = (*socket_data_ptr).stream_handle_ptr; - let write_buf_vec = [ uv::ll::buf_init( + let write_buf_vec = ~[ uv::ll::buf_init( vec::unsafe::to_ptr(raw_write_data), - vec::len(raw_write_data)) ]/~; + vec::len(raw_write_data)) ]; let write_buf_vec_ptr = ptr::addr_of(write_buf_vec); let result_po = comm::port::<tcp_write_result>(); let write_data = { result_ch: comm::chan(result_po) }; let write_data_ptr = ptr::addr_of(write_data); - iotask::interact((*socket_data_ptr).iotask) {|loop_ptr| + do iotask::interact((*socket_data_ptr).iotask) |loop_ptr| { log(debug, #fmt("in interact cb for tcp::write %?", loop_ptr)); alt uv::ll::write(write_req_ptr, stream_handle_ptr, @@ -969,7 +969,7 @@ fn conn_port_new_tcp_socket( iotask: iotask) -> result::result<tcp_socket,tcp_err_data> unsafe { // tcp_nl_on_connection_cb - let reader_po = comm::port::<result::result<[u8]/~, tcp_err_data>>(); + let reader_po = comm::port::<result::result<~[u8], tcp_err_data>>(); let client_socket_data = @{ reader_po : reader_po, reader_ch : comm::chan(reader_po), @@ -979,8 +979,8 @@ fn conn_port_new_tcp_socket( iotask : iotask }; let client_socket_data_ptr = ptr::addr_of(*client_socket_data); - comm::listen {|cont_ch| - iotask::interact(iotask) {|loop_ptr| + do comm::listen |cont_ch| { + do iotask::interact(iotask) |loop_ptr| { log(debug, #fmt("in interact cb 4 conn_port_new_tcp.. loop %?", loop_ptr)); uv::ll::set_data_for_uv_handle(stream_handle_ptr, @@ -1121,7 +1121,7 @@ enum tcp_read_start_result { } enum tcp_read_result { - tcp_read_data([u8]/~), + tcp_read_data(~[u8]), tcp_read_done, tcp_read_err(tcp_err_data) } @@ -1265,8 +1265,8 @@ enum conn_attempt { } type tcp_socket_data = { - reader_po: comm::port<result::result<[u8]/~, tcp_err_data>>, - reader_ch: comm::chan<result::result<[u8]/~, tcp_err_data>>, + reader_po: comm::port<result::result<~[u8], tcp_err_data>>, + reader_ch: comm::chan<result::result<~[u8], tcp_err_data>>, stream_handle_ptr: *uv::ll::uv_tcp_t, connect_req: uv::ll::uv_connect_t, write_req: uv::ll::uv_write_t, @@ -1332,8 +1332,8 @@ mod test { let cont_po = comm::port::<()>(); let cont_ch = comm::chan(cont_po); // server - task::spawn_sched(task::manual_threads(1u)) {|| - let actual_req = comm::listen {|server_ch| + do task::spawn_sched(task::manual_threads(1u)) || { + let actual_req = do comm::listen |server_ch| { run_tcp_test_server( server_ip, server_port, @@ -1347,7 +1347,7 @@ mod test { comm::recv(cont_po); // client log(debug, "server started, firing up client.."); - let actual_resp = comm::listen {|client_ch| + let actual_resp = do comm::listen |client_ch| { run_tcp_test_client( server_ip, server_port, @@ -1376,8 +1376,8 @@ mod test { let cont_po = comm::port::<()>(); let cont_ch = comm::chan(cont_po); // server - task::spawn_sched(task::manual_threads(1u)) {|| - let actual_req = comm::listen {|server_ch| + do task::spawn_sched(task::manual_threads(1u)) || { + let actual_req = do comm::listen |server_ch| { run_tcp_test_server_listener( server_ip, server_port, @@ -1391,7 +1391,7 @@ mod test { comm::recv(cont_po); // client log(debug, "server started, firing up client.."); - let actual_resp = comm::listen {|client_ch| + let actual_resp = do comm::listen |client_ch| { run_tcp_test_client( server_ip, server_port, @@ -1413,23 +1413,21 @@ mod test { cont_ch: comm::chan<()>, iotask: iotask) -> str { - task::spawn_sched(task::manual_threads(1u)) {|| + do task::spawn_sched(task::manual_threads(1u)) || { let server_ip_addr = ip::v4::parse_addr(server_ip); let listen_result = listen_for_conn(server_ip_addr, server_port, 128u, - iotask, - // on_establish_cb -- called when listener is set up - {|kill_ch| + iotask, |kill_ch| { + // on_establish_cb -- called when listener is set up log(debug, #fmt("establish_cb %?", kill_ch)); comm::send(cont_ch, ()); - }, + }, |new_conn, kill_ch| { // risky to run this on the loop, but some users // will want the POWER - {|new_conn, kill_ch| log(debug, "SERVER: new connection!"); - comm::listen {|cont_ch| - task::spawn_sched(task::manual_threads(1u)) {|| + do comm::listen |cont_ch| { + do task::spawn_sched(task::manual_threads(1u)) || { log(debug, "SERVER: starting worker for new req"); let accept_result = accept(new_conn); @@ -1492,7 +1490,7 @@ mod test { cont_ch: comm::chan<()>, iotask: iotask) -> str { - task::spawn_sched(task::manual_threads(1u)) {|| + do task::spawn_sched(task::manual_threads(1u)) || { let server_ip_addr = ip::v4::parse_addr(server_ip); let new_listener_result = new_listener(server_ip_addr, server_port, 128u, iotask); @@ -1571,7 +1569,7 @@ mod test { } } - fn tcp_write_single(sock: tcp_socket, val: [u8]/~) { + fn tcp_write_single(sock: tcp_socket, val: ~[u8]) { let write_result_future = sock.write_future(val); let write_result = write_result_future.get(); if result::is_err(write_result) { diff --git a/src/libstd/par.rs b/src/libstd/par.rs index 83ccdb5c3aa..3447d55827a 100644 --- a/src/libstd/par.rs +++ b/src/libstd/par.rs @@ -20,36 +20,36 @@ return the intermediate results. This is used to build most of the other parallel vector functions, like map or alli."] fn map_slices<A: copy send, B: copy send>( - xs: [A]/~, - f: fn() -> fn~(uint, [A]/&) -> B) - -> [B]/~ { + xs: ~[A], + f: fn() -> fn~(uint, v: &[A]) -> B) + -> ~[B] { let len = xs.len(); if len < min_granularity { log(info, "small slice"); // This is a small vector, fall back on the normal map. - [f()(0u, xs)]/~ + ~[f()(0u, xs)] } else { let num_tasks = uint::min(max_tasks, len / min_granularity); let items_per_task = len / num_tasks; - let mut futures = []/~; + let mut futures = ~[]; let mut base = 0u; log(info, "spawning tasks"); while base < len { let end = uint::min(len, base + items_per_task); // FIXME: why is the ::<A, ()> annotation required here? (#2617) - vec::unpack_slice::<A, ()>(xs) {|p, _len| + do vec::unpack_slice::<A, ()>(xs) |p, _len| { let f = f(); - let f = future_spawn() {|copy base| + let f = do future_spawn() |copy base| { unsafe { let len = end - base; let slice = (ptr::offset(p, base), len * sys::size_of::<A>()); log(info, #fmt("pre-slice: %?", (base, slice))); - let slice : [A]/& = + let slice : &[A] = unsafe::reinterpret_cast(slice); log(info, #fmt("slice: %?", (base, vec::len(slice), end - base))); @@ -66,7 +66,7 @@ fn map_slices<A: copy send, B: copy send>( log(info, #fmt("num_tasks: %?", (num_tasks, futures.len()))); assert(num_tasks == futures.len()); - let r = futures.map() {|ys| + let r = do futures.map() |ys| { ys.get() }; assert(r.len() == futures.len()); @@ -75,24 +75,24 @@ fn map_slices<A: copy send, B: copy send>( } #[doc="A parallel version of map."] -fn map<A: copy send, B: copy send>(xs: [A]/~, f: fn~(A) -> B) -> [B]/~ { - vec::concat(map_slices(xs) {|| - fn~(_base: uint, slice : [A]/&, copy f) -> [B]/~ { +fn map<A: copy send, B: copy send>(xs: ~[A], f: fn~(A) -> B) -> ~[B] { + vec::concat(map_slices(xs, || { + fn~(_base: uint, slice : &[A], copy f) -> ~[B] { vec::map(slice, f) } - }) + })) } #[doc="A parallel version of mapi."] -fn mapi<A: copy send, B: copy send>(xs: [A]/~, - f: fn~(uint, A) -> B) -> [B]/~ { - let slices = map_slices(xs) {|| - fn~(base: uint, slice : [A]/&, copy f) -> [B]/~ { - vec::mapi(slice) {|i, x| +fn mapi<A: copy send, B: copy send>(xs: ~[A], + f: fn~(uint, A) -> B) -> ~[B] { + let slices = map_slices(xs, || { + fn~(base: uint, slice : &[A], copy f) -> ~[B] { + vec::mapi(slice, |i, x| { f(i + base, x) - } + }) } - }; + }); let r = vec::concat(slices); log(info, (r.len(), xs.len())); assert(r.len() == xs.len()); @@ -104,15 +104,15 @@ fn mapi<A: copy send, B: copy send>(xs: [A]/~, In this case, f is a function that creates functions to run over the inner elements. This is to skirt the need for copy constructors."] fn mapi_factory<A: copy send, B: copy send>( - xs: [A]/~, f: fn() -> fn~(uint, A) -> B) -> [B]/~ { - let slices = map_slices(xs) {|| + xs: ~[A], f: fn() -> fn~(uint, A) -> B) -> ~[B] { + let slices = map_slices(xs, || { let f = f(); - fn~(base: uint, slice : [A]/&, move f) -> [B]/~ { - vec::mapi(slice) {|i, x| + fn~(base: uint, slice : &[A], move f) -> ~[B] { + vec::mapi(slice, |i, x| { f(i + base, x) - } + }) } - }; + }); let r = vec::concat(slices); log(info, (r.len(), xs.len())); assert(r.len() == xs.len()); @@ -120,21 +120,21 @@ fn mapi_factory<A: copy send, B: copy send>( } #[doc="Returns true if the function holds for all elements in the vector."] -fn alli<A: copy send>(xs: [A]/~, f: fn~(uint, A) -> bool) -> bool { - vec::all(map_slices(xs) {|| - fn~(base: uint, slice : [A]/&, copy f) -> bool { - vec::alli(slice) {|i, x| +fn alli<A: copy send>(xs: ~[A], f: fn~(uint, A) -> bool) -> bool { + do vec::all(map_slices(xs, || { + fn~(base: uint, slice : &[A], copy f) -> bool { + vec::alli(slice, |i, x| { f(i + base, x) - } + }) } - }) {|x| x } + })) |x| { x } } #[doc="Returns true if the function holds for any elements in the vector."] -fn any<A: copy send>(xs: [A]/~, f: fn~(A) -> bool) -> bool { - vec::any(map_slices(xs) {|| - fn~(_base : uint, slice: [A]/&, copy f) -> bool { +fn any<A: copy send>(xs: ~[A], f: fn~(A) -> bool) -> bool { + do vec::any(map_slices(xs, || { + fn~(_base : uint, slice: &[A], copy f) -> bool { vec::any(slice, f) } - }) {|x| x } + })) |x| { x } } diff --git a/src/libstd/rope.rs b/src/libstd/rope.rs index 3799e7cdd28..a7d54daba9a 100644 --- a/src/libstd/rope.rs +++ b/src/libstd/rope.rs @@ -97,7 +97,7 @@ Add one char to the end of the rope * this function executes in near-constant time "] fn append_char(rope: rope, char: char) -> rope { - ret append_str(rope, @str::from_chars([char]/~)); + ret append_str(rope, @str::from_chars(~[char])); } #[doc = " @@ -118,7 +118,7 @@ Add one char to the beginning of the rope * this function executes in near-constant time "] fn prepend_char(rope: rope, char: char) -> rope { - ret prepend_str(rope, @str::from_chars([char]/~)); + ret prepend_str(rope, @str::from_chars(~[char])); } #[doc = " @@ -153,18 +153,18 @@ If the ropes are balanced initially and have the same height, the resulting rope remains balanced. However, this function does not take any further measure to ensure that the result is balanced. "] -fn concat(v: [rope]/~) -> rope { +fn concat(v: ~[rope]) -> rope { //Copy `v` into a mut vector let mut len = vec::len(v); if len == 0u { ret node::empty; } let ropes = vec::to_mut(vec::from_elem(len, v[0])); - for uint::range(1u, len) {|i| + for uint::range(1u, len) |i| { ropes[i] = v[i]; } //Merge progresively while len > 1u { - for uint::range(0u, len/2u) {|i| + for uint::range(0u, len/2u) |i| { ropes[i] = append_rope(ropes[2u*i], ropes[2u*i+1u]); } if len%2u != 0u { @@ -397,7 +397,7 @@ Loop through a rope, char by char, until the end. * it - A block to execute with each consecutive character of the rope. "] fn iter_chars(rope: rope, it: fn(char)) { - loop_chars(rope) {|x| + do loop_chars(rope) |x| { it(x); true }; @@ -663,7 +663,7 @@ mod node { * byte_start - The byte offset where the slice of `str` starts. * byte_len - The number of bytes from `str` to use. * char_len - The number of chars in `str` in the interval - [byte_start, byte_start+byte_len( + [byte_start, byte_start+byte_len) # Safety notes @@ -752,7 +752,7 @@ mod node { * forest - The forest. This vector is progressively rewritten during execution and should be discarded as meaningless afterwards. "] - fn tree_from_forest_destructive(forest: [mut @node]/~) -> @node { + fn tree_from_forest_destructive(forest: ~[mut @node]) -> @node { let mut i; let mut len = vec::len(forest); while len > 1u { @@ -805,7 +805,7 @@ mod node { option::none { break; } option::some(x) { //TODO: Replace with memcpy or something similar - let mut local_buf: [u8]/~ = + let mut local_buf: ~[u8] = unsafe::reinterpret_cast(*x.content); let mut i = x.byte_offset; while i < x.byte_len { @@ -859,7 +859,7 @@ mod node { fn bal(node: @node) -> option<@node> { if height(node) < hint_max_node_height { ret option::none; } //1. Gather all leaves as a forest - let mut forest = [mut]/~; + let mut forest = ~[mut]; let it = leaf_iterator::start(node); loop { alt (leaf_iterator::next(it)) { @@ -1038,11 +1038,11 @@ mod node { } fn loop_chars(node: @node, it: fn(char) -> bool) -> bool { - ret loop_leaves(node, {|leaf| + ret loop_leaves(node,|leaf| { str::all_between(*leaf.content, leaf.byte_offset, leaf.byte_len, it) - }) + }); } #[doc =" @@ -1113,12 +1113,12 @@ mod node { mod leaf_iterator { type t = { - stack: [mut @node]/~, + stack: ~[mut @node], mut stackpos: int }; fn empty() -> t { - let stack : [mut @node]/~ = [mut]/~; + let stack : ~[mut @node] = ~[mut]; ret {stack: stack, mut stackpos: -1} } @@ -1350,19 +1350,19 @@ mod tests { fn char_at1() { //Generate a large rope let mut r = of_str(@ "123456789"); - for uint::range(0u, 10u){|_i| + for uint::range(0u, 10u) |_i| { r = append_rope(r, r); } //Copy it in the slowest possible way let mut r2 = empty(); - for uint::range(0u, char_len(r)){|i| + for uint::range(0u, char_len(r)) |i| { r2 = append_char(r2, char_at(r, i)); } assert eq(r, r2); let mut r3 = empty(); - for uint::range(0u, char_len(r)){|i| + for uint::range(0u, char_len(r)) |i| { r3 = prepend_char(r3, char_at(r, char_len(r) - i - 1u)); } assert eq(r, r3); @@ -1383,7 +1383,7 @@ mod tests { //Generate a reasonable rope let chunk = of_str(@ "123456789"); let mut r = empty(); - for uint::range(0u, 10u){|_i| + for uint::range(0u, 10u) |_i| { r = append_rope(r, chunk); } diff --git a/src/libstd/serialization.rs b/src/libstd/serialization.rs index 5c1d7f00b62..a5d56bda0ef 100644 --- a/src/libstd/serialization.rs +++ b/src/libstd/serialization.rs @@ -83,32 +83,32 @@ iface deserializer { // // In some cases, these should eventually be coded as traits. -fn emit_from_vec<S: serializer, T>(s: S, v: [T]/~, f: fn(T)) { - s.emit_vec(vec::len(v)) {|| - vec::iteri(v) {|i,e| - s.emit_vec_elt(i) {|| +fn emit_from_vec<S: serializer, T>(s: S, v: ~[T], f: fn(T)) { + do s.emit_vec(vec::len(v)) || { + do vec::iteri(v) |i,e| { + do s.emit_vec_elt(i) || { f(e) } } } } -fn read_to_vec<D: deserializer, T: copy>(d: D, f: fn() -> T) -> [T]/~ { - d.read_vec {|len| - vec::from_fn(len) {|i| - d.read_vec_elt(i) {|| f() } +fn read_to_vec<D: deserializer, T: copy>(d: D, f: fn() -> T) -> ~[T] { + do d.read_vec |len| { + do vec::from_fn(len) |i| { + d.read_vec_elt(i, || f()) } } } impl serializer_helpers<S: serializer> for S { - fn emit_from_vec<T>(v: [T]/~, f: fn(T)) { + fn emit_from_vec<T>(v: ~[T], f: fn(T)) { emit_from_vec(self, v, f) } } impl deserializer_helpers<D: deserializer> for D { - fn read_to_vec<T: copy>(f: fn() -> T) -> [T]/~ { + fn read_to_vec<T: copy>(f: fn() -> T) -> ~[T] { read_to_vec(self, f) } } @@ -234,16 +234,16 @@ fn deserialize_bool<D: deserializer>(d: D) -> bool { } fn serialize_option<S: serializer,T>(s: S, v: option<T>, st: fn(T)) { - s.emit_enum("option") {|| + do s.emit_enum("option") || { alt v { none { - s.emit_enum_variant("none", 0u, 0u) {|| + do s.emit_enum_variant("none", 0u, 0u) || { } } some(v) { - s.emit_enum_variant("some", 1u, 1u) {|| - s.emit_enum_variant_arg(0u) {|| + do s.emit_enum_variant("some", 1u, 1u) || { + do s.emit_enum_variant_arg(0u) || { st(v) } } @@ -254,16 +254,16 @@ fn serialize_option<S: serializer,T>(s: S, v: option<T>, st: fn(T)) { fn deserialize_option<D: deserializer,T: copy>(d: D, st: fn() -> T) -> option<T> { - d.read_enum("option") {|| - d.read_enum_variant {|i| + do d.read_enum("option") || { + do d.read_enum_variant |i| { alt check i { 0u { // none none } 1u { // some(v) - some(d.read_enum_variant_arg(0u) {|| + some(d.read_enum_variant_arg(0u, || { st() - }) + })) } } } diff --git a/src/libstd/sha1.rs b/src/libstd/sha1.rs index e4007341aed..6aed645283d 100644 --- a/src/libstd/sha1.rs +++ b/src/libstd/sha1.rs @@ -22,14 +22,14 @@ export sha1; #[doc = "The SHA-1 interface"] iface sha1 { #[doc = "Provide message input as bytes"] - fn input([u8]/~); + fn input(~[u8]); #[doc = "Provide message input as string"] fn input_str(str); #[doc = " Read the digest as a vector of 20 bytes. After calling this no further input may be provided until reset is called. "] - fn result() -> [u8]/~; + fn result() -> ~[u8]; #[doc = " Read the digest as a hex string. After calling this no further input may be provided until reset is called. @@ -52,18 +52,18 @@ const k3: u32 = 0xCA62C1D6u32; #[doc = "Construct a `sha` object"] fn sha1() -> sha1 { type sha1state = - {h: [mut u32]/~, + {h: ~[mut u32], mut len_low: u32, mut len_high: u32, - msg_block: [mut u8]/~, + msg_block: ~[mut u8], mut msg_block_idx: uint, mut computed: bool, - work_buf: @[mut u32]/~}; + work_buf: @~[mut u32]}; - fn add_input(st: sha1state, msg: [u8]/~) { + fn add_input(st: sha1state, msg: ~[u8]) { /* FIXME: Should be typestate precondition (#2345) */ assert (!st.computed); - for vec::each(msg) {|element| + for vec::each(msg) |element| { st.msg_block[st.msg_block_idx] = element; st.msg_block_idx += 1u; st.len_low += 8u32; @@ -157,15 +157,15 @@ fn sha1() -> sha1 { fn circular_shift(bits: u32, word: u32) -> u32 { ret word << bits | word >> 32u32 - bits; } - fn mk_result(st: sha1state) -> [u8]/~ { + fn mk_result(st: sha1state) -> ~[u8] { if !st.computed { pad_msg(st); st.computed = true; } - let mut rs: [u8]/~ = []/~; - for vec::each(st.h) {|hpart| + let mut rs: ~[u8] = ~[]; + for vec::each(st.h) |hpart| { let a = (hpart >> 24u32 & 0xFFu32) as u8; let b = (hpart >> 16u32 & 0xFFu32) as u8; let c = (hpart >> 8u32 & 0xFFu32) as u8; let d = (hpart & 0xFFu32) as u8; - rs = vec::append(rs, [a, b, c, d]/~); + rs = vec::append(rs, ~[a, b, c, d]); } ret rs; } @@ -231,13 +231,13 @@ fn sha1() -> sha1 { self.h[4] = 0xC3D2E1F0u32; self.computed = false; } - fn input(msg: [u8]/~) { add_input(self, msg); } + fn input(msg: ~[u8]) { add_input(self, msg); } fn input_str(msg: str) { add_input(self, str::bytes(msg)); } - fn result() -> [u8]/~ { ret mk_result(self); } + fn result() -> ~[u8] { ret mk_result(self); } fn result_str() -> str { let r = mk_result(self); let mut s = ""; - for vec::each(r) {|b| s += uint::to_str(b as uint, 16u); } + for vec::each(r) |b| { s += uint::to_str(b as uint, 16u); } ret s; } } @@ -260,7 +260,7 @@ mod tests { #[test] fn test() unsafe { - type test = {input: str, output: [u8]/~}; + type test = {input: str, output: ~[u8]}; fn a_million_letter_a() -> str { let mut i = 0; @@ -270,49 +270,49 @@ mod tests { } // Test messages from FIPS 180-1 - let fips_180_1_tests: [test]/~ = - [{input: "abc", + let fips_180_1_tests: ~[test] = + ~[{input: "abc", output: - [0xA9u8, 0x99u8, 0x3Eu8, 0x36u8, + ~[0xA9u8, 0x99u8, 0x3Eu8, 0x36u8, 0x47u8, 0x06u8, 0x81u8, 0x6Au8, 0xBAu8, 0x3Eu8, 0x25u8, 0x71u8, 0x78u8, 0x50u8, 0xC2u8, 0x6Cu8, - 0x9Cu8, 0xD0u8, 0xD8u8, 0x9Du8]/~}, + 0x9Cu8, 0xD0u8, 0xD8u8, 0x9Du8]}, {input: "abcdbcdecdefdefgefghfghighij" + "hijkijkljklmklmnlmnomnopnopq", output: - [0x84u8, 0x98u8, 0x3Eu8, 0x44u8, + ~[0x84u8, 0x98u8, 0x3Eu8, 0x44u8, 0x1Cu8, 0x3Bu8, 0xD2u8, 0x6Eu8, 0xBAu8, 0xAEu8, 0x4Au8, 0xA1u8, 0xF9u8, 0x51u8, 0x29u8, 0xE5u8, - 0xE5u8, 0x46u8, 0x70u8, 0xF1u8]/~}, + 0xE5u8, 0x46u8, 0x70u8, 0xF1u8]}, {input: a_million_letter_a(), output: - [0x34u8, 0xAAu8, 0x97u8, 0x3Cu8, + ~[0x34u8, 0xAAu8, 0x97u8, 0x3Cu8, 0xD4u8, 0xC4u8, 0xDAu8, 0xA4u8, 0xF6u8, 0x1Eu8, 0xEBu8, 0x2Bu8, 0xDBu8, 0xADu8, 0x27u8, 0x31u8, - 0x65u8, 0x34u8, 0x01u8, 0x6Fu8]/~}]/~; + 0x65u8, 0x34u8, 0x01u8, 0x6Fu8]}]; // Examples from wikipedia - let wikipedia_tests: [test]/~ = - [{input: "The quick brown fox jumps over the lazy dog", + let wikipedia_tests: ~[test] = + ~[{input: "The quick brown fox jumps over the lazy dog", output: - [0x2fu8, 0xd4u8, 0xe1u8, 0xc6u8, + ~[0x2fu8, 0xd4u8, 0xe1u8, 0xc6u8, 0x7au8, 0x2du8, 0x28u8, 0xfcu8, 0xedu8, 0x84u8, 0x9eu8, 0xe1u8, 0xbbu8, 0x76u8, 0xe7u8, 0x39u8, - 0x1bu8, 0x93u8, 0xebu8, 0x12u8]/~}, + 0x1bu8, 0x93u8, 0xebu8, 0x12u8]}, {input: "The quick brown fox jumps over the lazy cog", output: - [0xdeu8, 0x9fu8, 0x2cu8, 0x7fu8, + ~[0xdeu8, 0x9fu8, 0x2cu8, 0x7fu8, 0xd2u8, 0x5eu8, 0x1bu8, 0x3au8, 0xfau8, 0xd3u8, 0xe8u8, 0x5au8, 0x0bu8, 0xd1u8, 0x7du8, 0x9bu8, - 0x10u8, 0x0du8, 0xb4u8, 0xb3u8]/~}]/~; + 0x10u8, 0x0du8, 0xb4u8, 0xb3u8]}]; let tests = fips_180_1_tests + wikipedia_tests; - fn check_vec_eq(v0: [u8]/~, v1: [u8]/~) { + fn check_vec_eq(v0: ~[u8], v1: ~[u8]) { assert (vec::len::<u8>(v0) == vec::len::<u8>(v1)); let len = vec::len::<u8>(v0); let mut i = 0u; @@ -326,7 +326,7 @@ mod tests { // Test that it works when accepting the message all at once let sh = sha1::sha1(); - for vec::each(tests) {|t| + for vec::each(tests) |t| { sh.input_str(t.input); let out = sh.result(); check_vec_eq(t.output, out); @@ -335,7 +335,7 @@ mod tests { // Test that it works when accepting the message in pieces - for vec::each(tests) {|t| + for vec::each(tests) |t| { let len = str::len(t.input); let mut left = len; while left > 0u { diff --git a/src/libstd/smallintmap.rs b/src/libstd/smallintmap.rs index 3369e95f1a3..6583d97908d 100644 --- a/src/libstd/smallintmap.rs +++ b/src/libstd/smallintmap.rs @@ -58,7 +58,7 @@ fn contains_key<T: copy>(self: smallintmap<T>, key: uint) -> bool { impl <V: copy> of map::map<uint, V> for smallintmap<V> { fn size() -> uint { let mut sz = 0u; - for self.v.each {|item| + for self.v.each |item| { alt item { some(_) { sz += 1u; } _ {} } } sz @@ -102,7 +102,7 @@ impl <V: copy> of map::map<uint, V> for smallintmap<V> { } } fn each_value(it: fn(V) -> bool) { - self.each {|_i, v| it(v)} + self.each(|_i, v| it(v)); } } diff --git a/src/libstd/sort.rs b/src/libstd/sort.rs index 4bf26afc2ae..7de8f0cfab8 100644 --- a/src/libstd/sort.rs +++ b/src/libstd/sort.rs @@ -15,19 +15,19 @@ Merge sort. Returns a new vector containing the sorted list. Has worst case O(n log n) performance, best case O(n), but is not space efficient. This is a stable sort. "] -fn merge_sort<T: copy>(le: le<T>, v: [const T]/~) -> [T]/~ { +fn merge_sort<T: copy>(le: le<T>, v: ~[const T]) -> ~[T] { type slice = (uint, uint); ret merge_sort_(le, v, (0u, len(v))); - fn merge_sort_<T: copy>(le: le<T>, v: [const T]/~, slice: slice) - -> [T]/~ { + fn merge_sort_<T: copy>(le: le<T>, v: ~[const T], slice: slice) + -> ~[T] { let begin = tuple::first(slice); let end = tuple::second(slice); let v_len = end - begin; - if v_len == 0u { ret []/~; } - if v_len == 1u { ret [v[begin]]/~; } + if v_len == 0u { ret ~[]; } + if v_len == 1u { ret ~[v[begin]]; } let mid = v_len / 2u + begin; let a = (begin, mid); @@ -35,8 +35,8 @@ fn merge_sort<T: copy>(le: le<T>, v: [const T]/~) -> [T]/~ { ret merge(le, merge_sort_(le, v, a), merge_sort_(le, v, b)); } - fn merge<T: copy>(le: le<T>, a: [T]/~, b: [T]/~) -> [T]/~ { - let mut rs = []/~; + fn merge<T: copy>(le: le<T>, a: ~[T], b: ~[T]) -> ~[T] { + let mut rs = ~[]; vec::reserve(rs, len(a) + len(b)); let a_len = len(a); let mut a_ix = 0u; @@ -54,7 +54,7 @@ fn merge_sort<T: copy>(le: le<T>, v: [const T]/~) -> [T]/~ { } } -fn part<T: copy>(compare_func: le<T>, arr: [mut T]/~, left: uint, +fn part<T: copy>(compare_func: le<T>, arr: ~[mut T], left: uint, right: uint, pivot: uint) -> uint { let pivot_value = arr[pivot]; arr[pivot] <-> arr[right]; @@ -71,7 +71,7 @@ fn part<T: copy>(compare_func: le<T>, arr: [mut T]/~, left: uint, ret storage_index; } -fn qsort<T: copy>(compare_func: le<T>, arr: [mut T]/~, left: uint, +fn qsort<T: copy>(compare_func: le<T>, arr: ~[mut T], left: uint, right: uint) { if right > left { let pivot = (left + right) / 2u; @@ -90,13 +90,13 @@ Quicksort. Sorts a mut vector in place. Has worst case O(n^2) performance, average case O(n log n). This is an unstable sort. "] -fn quick_sort<T: copy>(compare_func: le<T>, arr: [mut T]/~) { +fn quick_sort<T: copy>(compare_func: le<T>, arr: ~[mut T]) { if len::<T>(arr) == 0u { ret; } qsort::<T>(compare_func, arr, 0u, len::<T>(arr) - 1u); } fn qsort3<T: copy>(compare_func_lt: le<T>, compare_func_eq: le<T>, - arr: [mut T]/~, left: int, right: int) { + arr: ~[mut T], left: int, right: int) { if right <= left { ret; } let v: T = arr[right]; let mut i: int = left - 1; @@ -153,15 +153,15 @@ According to these slides this is the algorithm of choice for This is an unstable sort. "] -fn quick_sort3<T: copy ord eq>(arr: [mut T]/~) { +fn quick_sort3<T: copy ord eq>(arr: ~[mut T]) { if len::<T>(arr) == 0u { ret; } - qsort3::<T>({ |x, y| x.lt(y) }, { |x, y| x.eq(y) }, arr, 0, + qsort3::<T>(|x, y| x.lt(y), |x, y| x.eq(y), arr, 0, (len::<T>(arr) as int) - 1); } #[cfg(test)] mod test_qsort3 { - fn check_sort(v1: [mut int]/~, v2: [mut int]/~) { + fn check_sort(v1: ~[mut int], v2: ~[mut int]) { let len = vec::len::<int>(v1); quick_sort3::<int>(v1); let mut i = 0u; @@ -175,24 +175,24 @@ mod test_qsort3 { #[test] fn test() { { - let v1 = [mut 3, 7, 4, 5, 2, 9, 5, 8]/~; - let v2 = [mut 2, 3, 4, 5, 5, 7, 8, 9]/~; + let v1 = ~[mut 3, 7, 4, 5, 2, 9, 5, 8]; + let v2 = ~[mut 2, 3, 4, 5, 5, 7, 8, 9]; check_sort(v1, v2); } { - let v1 = [mut 1, 1, 1]/~; - let v2 = [mut 1, 1, 1]/~; + let v1 = ~[mut 1, 1, 1]; + let v2 = ~[mut 1, 1, 1]; check_sort(v1, v2); } { - let v1: [mut int]/~ = [mut]/~; - let v2: [mut int]/~ = [mut]/~; + let v1: ~[mut int] = ~[mut]; + let v2: ~[mut int] = ~[mut]; check_sort(v1, v2); } - { let v1 = [mut 9]/~; let v2 = [mut 9]/~; check_sort(v1, v2); } + { let v1 = ~[mut 9]; let v2 = ~[mut 9]; check_sort(v1, v2); } { - let v1 = [mut 9, 3, 3, 3, 9]/~; - let v2 = [mut 3, 3, 3, 9, 9]/~; + let v1 = ~[mut 9, 3, 3, 3, 9]; + let v2 = ~[mut 3, 3, 3, 9, 9]; check_sort(v1, v2); } } @@ -200,7 +200,7 @@ mod test_qsort3 { #[cfg(test)] mod test_qsort { - fn check_sort(v1: [mut int]/~, v2: [mut int]/~) { + fn check_sort(v1: ~[mut int], v2: ~[mut int]) { let len = vec::len::<int>(v1); fn leual(&&a: int, &&b: int) -> bool { ret a <= b; } let f = leual; @@ -216,24 +216,24 @@ mod test_qsort { #[test] fn test() { { - let v1 = [mut 3, 7, 4, 5, 2, 9, 5, 8]/~; - let v2 = [mut 2, 3, 4, 5, 5, 7, 8, 9]/~; + let v1 = ~[mut 3, 7, 4, 5, 2, 9, 5, 8]; + let v2 = ~[mut 2, 3, 4, 5, 5, 7, 8, 9]; check_sort(v1, v2); } { - let v1 = [mut 1, 1, 1]/~; - let v2 = [mut 1, 1, 1]/~; + let v1 = ~[mut 1, 1, 1]; + let v2 = ~[mut 1, 1, 1]; check_sort(v1, v2); } { - let v1: [mut int]/~ = [mut]/~; - let v2: [mut int]/~ = [mut]/~; + let v1: ~[mut int] = ~[mut]; + let v2: ~[mut int] = ~[mut]; check_sort(v1, v2); } - { let v1 = [mut 9]/~; let v2 = [mut 9]/~; check_sort(v1, v2); } + { let v1 = ~[mut 9]; let v2 = ~[mut 9]; check_sort(v1, v2); } { - let v1 = [mut 9, 3, 3, 3, 9]/~; - let v2 = [mut 3, 3, 3, 9, 9]/~; + let v1 = ~[mut 9, 3, 3, 3, 9]; + let v2 = ~[mut 3, 3, 3, 9, 9]; check_sort(v1, v2); } } @@ -241,9 +241,9 @@ mod test_qsort { // Regression test for #750 #[test] fn test_simple() { - let names = [mut 2, 1, 3]/~; + let names = ~[mut 2, 1, 3]; - let expected = [1, 2, 3]/~; + let expected = ~[1, 2, 3]; fn le(&&a: int, &&b: int) -> bool { int::le(a, b) } sort::quick_sort(le, names); @@ -251,7 +251,7 @@ mod test_qsort { let immut_names = vec::from_mut(names); let pairs = vec::zip(expected, immut_names); - for vec::each(pairs) {|p| + for vec::each(pairs) |p| { let (a, b) = p; #debug("%d %d", a, b); assert (a == b); @@ -262,7 +262,7 @@ mod test_qsort { #[cfg(test)] mod tests { - fn check_sort(v1: [int]/~, v2: [int]/~) { + fn check_sort(v1: ~[int], v2: ~[int]) { let len = vec::len::<int>(v1); fn le(&&a: int, &&b: int) -> bool { ret a <= b; } let f = le; @@ -278,16 +278,16 @@ mod tests { #[test] fn test() { { - let v1 = [3, 7, 4, 5, 2, 9, 5, 8]/~; - let v2 = [2, 3, 4, 5, 5, 7, 8, 9]/~; + let v1 = ~[3, 7, 4, 5, 2, 9, 5, 8]; + let v2 = ~[2, 3, 4, 5, 5, 7, 8, 9]; check_sort(v1, v2); } - { let v1 = [1, 1, 1]/~; let v2 = [1, 1, 1]/~; check_sort(v1, v2); } - { let v1:[int]/~ = []/~; let v2:[int]/~ = []/~; check_sort(v1, v2); } - { let v1 = [9]/~; let v2 = [9]/~; check_sort(v1, v2); } + { let v1 = ~[1, 1, 1]; let v2 = ~[1, 1, 1]; check_sort(v1, v2); } + { let v1:~[int] = ~[]; let v2:~[int] = ~[]; check_sort(v1, v2); } + { let v1 = ~[9]; let v2 = ~[9]; check_sort(v1, v2); } { - let v1 = [9, 3, 3, 3, 9]/~; - let v2 = [3, 3, 3, 9, 9]/~; + let v1 = ~[9, 3, 3, 3, 9]; + let v2 = ~[3, 3, 3, 9, 9]; check_sort(v1, v2); } } @@ -295,9 +295,9 @@ mod tests { #[test] fn test_merge_sort_mutable() { fn le(&&a: int, &&b: int) -> bool { ret a <= b; } - let v1 = [mut 3, 2, 1]/~; + let v1 = ~[mut 3, 2, 1]; let v2 = merge_sort(le, v1); - assert v2 == [1, 2, 3]/~; + assert v2 == ~[1, 2, 3]; } } diff --git a/src/libstd/term.rs b/src/libstd/term.rs index fdbdc7205da..43e1765f50b 100644 --- a/src/libstd/term.rs +++ b/src/libstd/term.rs @@ -28,16 +28,16 @@ fn esc(writer: io::writer) { writer.write([0x1bu8, '[' as u8]/~); } #[doc = "Reset the foreground and background colors to default"] fn reset(writer: io::writer) { esc(writer); - writer.write(['0' as u8, 'm' as u8]/~); + writer.write(~['0' as u8, 'm' as u8]); } #[doc = "Returns true if the terminal supports color"] fn color_supported() -> bool { - let supported_terms = ["xterm-color", "xterm", - "screen-bce", "xterm-256color"]/~; + let supported_terms = ~["xterm-color", "xterm", + "screen-bce", "xterm-256color"]; ret alt os::getenv("TERM") { option::some(env) { - for vec::each(supported_terms) {|term| + for vec::each(supported_terms) |term| { if str::eq(term, env) { ret true; } } false @@ -50,8 +50,8 @@ fn set_color(writer: io::writer, first_char: u8, color: u8) { assert (color < 16u8); esc(writer); let mut color = color; - if color >= 8u8 { writer.write(['1' as u8, ';' as u8]/~); color -= 8u8; } - writer.write([first_char, ('0' as u8) + color, 'm' as u8]/~); + if color >= 8u8 { writer.write(~['1' as u8, ';' as u8]); color -= 8u8; } + writer.write(~[first_char, ('0' as u8) + color, 'm' as u8]); } #[doc = "Set the foreground color"] diff --git a/src/libstd/test.rs b/src/libstd/test.rs index b8e3b6da4a1..42ff9f6366a 100644 --- a/src/libstd/test.rs +++ b/src/libstd/test.rs @@ -49,7 +49,7 @@ type test_desc = { // The default console test runner. It accepts the command line // arguments and a vector of test_descs (generated at compile time). -fn test_main(args: [str]/~, tests: [test_desc]/~) { +fn test_main(args: ~[str], tests: ~[test_desc]) { let opts = alt parse_opts(args) { either::left(o) { o } @@ -64,9 +64,9 @@ type test_opts = {filter: option<str>, run_ignored: bool, type opt_res = either<test_opts, str>; // Parses command line arguments into test options -fn parse_opts(args: [str]/~) -> opt_res { +fn parse_opts(args: ~[str]) -> opt_res { let args_ = vec::tail(args); - let opts = [getopts::optflag("ignored"), getopts::optopt("logfile")]/~; + let opts = ~[getopts::optflag("ignored"), getopts::optopt("logfile")]; let match = alt getopts::getopts(args_, opts) { ok(m) { m } @@ -97,11 +97,11 @@ type console_test_state = mut passed: uint, mut failed: uint, mut ignored: uint, - mut failures: [test_desc]/~}; + mut failures: ~[test_desc]}; // A simple console test runner fn run_tests_console(opts: test_opts, - tests: [test_desc]/~) -> bool { + tests: ~[test_desc]) -> bool { fn callback(event: testevent, st: console_test_state) { alt event { @@ -142,7 +142,7 @@ fn run_tests_console(opts: test_opts, let log_out = alt opts.logfile { some(path) { - alt io::file_writer(path, [io::create, io::truncate]/~) { + alt io::file_writer(path, ~[io::create, io::truncate]) { result::ok(w) { some(w) } result::err(s) { fail(#fmt("can't open output file: %s", s)) @@ -160,9 +160,9 @@ fn run_tests_console(opts: test_opts, mut passed: 0u, mut failed: 0u, mut ignored: 0u, - mut failures: []/~}; + mut failures: ~[]}; - run_tests(opts, tests, {|x|callback(x, st)}); + run_tests(opts, tests, |x| callback(x, st)); assert (st.passed + st.failed + st.ignored == st.total); let success = st.failed == 0u; @@ -216,9 +216,9 @@ fn run_tests_console(opts: test_opts, fn print_failures(st: console_test_state) { st.out.write_line("\nfailures:"); let failures = copy st.failures; - let failures = vec::map(failures) {|test| test.name}; + let failures = vec::map(failures, |test| test.name); let failures = sort::merge_sort(str::le, failures); - for vec::each(failures) {|name| + for vec::each(failures) |name| { st.out.write_line(#fmt[" %s", name]); } } @@ -250,7 +250,7 @@ fn should_sort_failures_before_printing_them() { mut passed: 0u, mut failed: 0u, mut ignored: 0u, - mut failures: [test_b, test_a]/~}; + mut failures: ~[test_b, test_a]}; print_failures(st); @@ -264,14 +264,14 @@ fn should_sort_failures_before_printing_them() { fn use_color() -> bool { ret get_concurrency() == 1u; } enum testevent { - te_filtered([test_desc]/~), + te_filtered(~[test_desc]), te_wait(test_desc), te_result(test_desc, test_result), } type monitor_msg = (test_desc, test_result); -fn run_tests(opts: test_opts, tests: [test_desc]/~, +fn run_tests(opts: test_opts, tests: ~[test_desc], callback: fn@(testevent)) { let mut filtered_tests = filter_tests(opts, tests); @@ -329,7 +329,7 @@ fn get_concurrency() -> uint { #[warn(no_non_implicitly_copyable_typarams)] fn filter_tests(opts: test_opts, - tests: [test_desc]/~) -> [test_desc]/~ { + tests: ~[test_desc]) -> ~[test_desc] { let mut filtered = copy tests; // Remove tests that don't match the test filter @@ -349,7 +349,7 @@ fn filter_tests(opts: test_opts, } else { ret option::none; } } - let filter = {|x|filter_fn(x, filter_str)}; + let filter = |x| filter_fn(x, filter_str); vec::filter_map(filtered, filter) }; @@ -367,7 +367,7 @@ fn filter_tests(opts: test_opts, } else { ret option::none; } }; - vec::filter_map(filtered, {|x|filter(x)}) + vec::filter_map(filtered, |x| filter(x)) }; // Sort the tests alphabetically @@ -376,7 +376,7 @@ fn filter_tests(opts: test_opts, fn lteq(t1: test_desc, t2: test_desc) -> bool { str::le(t1.name, t2.name) } - sort::merge_sort({|x,y|lteq(x, y)}, filtered) + sort::merge_sort(|x,y| lteq(x, y), filtered) }; ret filtered; @@ -390,7 +390,7 @@ fn run_test(+test: test_desc, monitor_ch: comm::chan<monitor_msg>) { ret; } - task::spawn {|| + do task::spawn || { let testfn = copy test.fn; let mut builder = task::builder(); let result_future = task::future_result(builder); @@ -482,7 +482,7 @@ mod tests { #[test] fn first_free_arg_should_be_a_filter() { - let args = ["progname", "filter"]/~; + let args = ~["progname", "filter"]; let opts = alt parse_opts(args) { either::left(o) { o } _ { fail "Malformed arg in first_free_arg_should_be_a_filter"; } }; assert (str::eq("filter", option::get(opts.filter))); @@ -490,7 +490,7 @@ mod tests { #[test] fn parse_ignored_flag() { - let args = ["progname", "filter", "--ignored"]/~; + let args = ~["progname", "filter", "--ignored"]; let opts = alt parse_opts(args) { either::left(o) { o } _ { fail "Malformed arg in parse_ignored_flag"; } }; assert (opts.run_ignored); @@ -504,8 +504,8 @@ mod tests { let opts = {filter: option::none, run_ignored: true, logfile: option::none}; let tests = - [{name: "1", fn: fn~() { }, ignore: true, should_fail: false}, - {name: "2", fn: fn~() { }, ignore: false, should_fail: false}]/~; + ~[{name: "1", fn: fn~() { }, ignore: true, should_fail: false}, + {name: "2", fn: fn~() { }, ignore: false, should_fail: false}]; let filtered = filter_tests(opts, tests); assert (vec::len(filtered) == 1u); @@ -519,35 +519,35 @@ mod tests { logfile: option::none}; let names = - ["sha1::test", "int::test_to_str", "int::test_pow", + ~["sha1::test", "int::test_to_str", "int::test_pow", "test::do_not_run_ignored_tests", "test::ignored_tests_result_in_ignored", "test::first_free_arg_should_be_a_filter", "test::parse_ignored_flag", "test::filter_for_ignored_option", - "test::sort_tests"]/~; + "test::sort_tests"]; let tests = { let testfn = fn~() { }; - let mut tests = []/~; - for vec::each(names) {|name| + let mut tests = ~[]; + for vec::each(names) |name| { let test = {name: name, fn: copy testfn, ignore: false, should_fail: false}; - tests += [test]/~; + tests += ~[test]; } tests }; let filtered = filter_tests(opts, tests); let expected = - ["int::test_pow", "int::test_to_str", "sha1::test", + ~["int::test_pow", "int::test_to_str", "sha1::test", "test::do_not_run_ignored_tests", "test::filter_for_ignored_option", "test::first_free_arg_should_be_a_filter", "test::ignored_tests_result_in_ignored", "test::parse_ignored_flag", - "test::sort_tests"]/~; + "test::sort_tests"]; let pairs = vec::zip(expected, filtered); - for vec::each(pairs) {|p| let (a, b) = copy p; assert (a == b.name); } + for vec::each(pairs) |p| { let (a, b) = copy p; assert (a == b.name); } } } diff --git a/src/libstd/time.rs b/src/libstd/time.rs index b2d5ea64a98..fa1597925bd 100644 --- a/src/libstd/time.rs +++ b/src/libstd/time.rs @@ -66,14 +66,14 @@ fn tzset() { } type tm = { - tm_sec: i32, // seconds after the minute [0-60]/~ - tm_min: i32, // minutes after the hour [0-59]/~ - tm_hour: i32, // hours after midnight [0-23]/~ - tm_mday: i32, // days of the month [1-31]/~ - tm_mon: i32, // months since January [0-11]/~ + tm_sec: i32, // seconds after the minute ~[0-60] + tm_min: i32, // minutes after the hour ~[0-59] + tm_hour: i32, // hours after midnight ~[0-23] + tm_mday: i32, // days of the month ~[1-31] + tm_mon: i32, // months since January ~[0-11] tm_year: i32, // years since 1900 - tm_wday: i32, // days since Sunday [0-6]/~ - tm_yday: i32, // days since January 1 [0-365]/~ + tm_wday: i32, // days since Sunday ~[0-6] + tm_yday: i32, // days since January 1 ~[0-365] tm_isdst: i32, // Daylight Savings Time flag tm_gmtoff: i32, // offset from UTC in seconds tm_zone: str, // timezone abbreviation @@ -142,7 +142,7 @@ fn strptime(s: str, format: str) -> result<tm, str> { fn match_str(s: str, pos: uint, needle: str) -> bool { let mut i = pos; - for str::each(needle) {|ch| + for str::each(needle) |ch| { if s[i] != ch { ret false; } @@ -151,7 +151,7 @@ fn strptime(s: str, format: str) -> result<tm, str> { ret true; } - fn match_strs(s: str, pos: uint, strs: [(str, i32)]/~) + fn match_strs(s: str, pos: uint, strs: ~[(str, i32)]) -> option<(i32, uint)> { let mut i = 0u; let len = vec::len(strs); @@ -206,7 +206,7 @@ fn strptime(s: str, format: str) -> result<tm, str> { -> result<uint, str> { alt ch { 'A' { - alt match_strs(s, pos, [ + alt match_strs(s, pos, ~[ ("Sunday", 0_i32), ("Monday", 1_i32), ("Tuesday", 2_i32), @@ -214,13 +214,13 @@ fn strptime(s: str, format: str) -> result<tm, str> { ("Thursday", 4_i32), ("Friday", 5_i32), ("Saturday", 6_i32) - ]/~) { + ]) { some(item) { let (v, pos) = item; tm.tm_wday = v; ok(pos) } none { err("Invalid day") } } } 'a' { - alt match_strs(s, pos, [ + alt match_strs(s, pos, ~[ ("Sun", 0_i32), ("Mon", 1_i32), ("Tue", 2_i32), @@ -228,13 +228,13 @@ fn strptime(s: str, format: str) -> result<tm, str> { ("Thu", 4_i32), ("Fri", 5_i32), ("Sat", 6_i32) - ]/~) { + ]) { some(item) { let (v, pos) = item; tm.tm_wday = v; ok(pos) } none { err("Invalid day") } } } 'B' { - alt match_strs(s, pos, [ + alt match_strs(s, pos, ~[ ("January", 0_i32), ("February", 1_i32), ("March", 2_i32), @@ -247,13 +247,13 @@ fn strptime(s: str, format: str) -> result<tm, str> { ("October", 9_i32), ("November", 10_i32), ("December", 11_i32) - ]/~) { + ]) { some(item) { let (v, pos) = item; tm.tm_mon = v; ok(pos) } none { err("Invalid month") } } } 'b' | 'h' { - alt match_strs(s, pos, [ + alt match_strs(s, pos, ~[ ("Jan", 0_i32), ("Feb", 1_i32), ("Mar", 2_i32), @@ -266,7 +266,7 @@ fn strptime(s: str, format: str) -> result<tm, str> { ("Oct", 9_i32), ("Nov", 10_i32), ("Dec", 11_i32) - ]/~) { + ]) { some(item) { let (v, pos) = item; tm.tm_mon = v; ok(pos) } none { err("Invalid month") } } @@ -283,21 +283,21 @@ fn strptime(s: str, format: str) -> result<tm, str> { } 'c' { parse_type(s, pos, 'a', tm) - .chain { |pos| parse_char(s, pos, ' ') } - .chain { |pos| parse_type(s, pos, 'b', tm) } - .chain { |pos| parse_char(s, pos, ' ') } - .chain { |pos| parse_type(s, pos, 'e', tm) } - .chain { |pos| parse_char(s, pos, ' ') } - .chain { |pos| parse_type(s, pos, 'T', tm) } - .chain { |pos| parse_char(s, pos, ' ') } - .chain { |pos| parse_type(s, pos, 'Y', tm) } + .chain(|pos| parse_char(s, pos, ' ')) + .chain(|pos| parse_type(s, pos, 'b', tm)) + .chain(|pos| parse_char(s, pos, ' ')) + .chain(|pos| parse_type(s, pos, 'e', tm)) + .chain(|pos| parse_char(s, pos, ' ')) + .chain(|pos| parse_type(s, pos, 'T', tm)) + .chain(|pos| parse_char(s, pos, ' ')) + .chain(|pos| parse_type(s, pos, 'Y', tm)) } 'D' | 'x' { parse_type(s, pos, 'm', tm) - .chain { |pos| parse_char(s, pos, '/') } - .chain { |pos| parse_type(s, pos, 'd', tm) } - .chain { |pos| parse_char(s, pos, '/') } - .chain { |pos| parse_type(s, pos, 'y', tm) } + .chain(|pos| parse_char(s, pos, '/')) + .chain(|pos| parse_type(s, pos, 'd', tm)) + .chain(|pos| parse_char(s, pos, '/')) + .chain(|pos| parse_type(s, pos, 'y', tm)) } 'd' { alt match_digits(s, pos, 2u, false) { @@ -313,10 +313,10 @@ fn strptime(s: str, format: str) -> result<tm, str> { } 'F' { parse_type(s, pos, 'Y', tm) - .chain { |pos| parse_char(s, pos, '-') } - .chain { |pos| parse_type(s, pos, 'm', tm) } - .chain { |pos| parse_char(s, pos, '-') } - .chain { |pos| parse_type(s, pos, 'd', tm) } + .chain(|pos| parse_char(s, pos, '-')) + .chain(|pos| parse_type(s, pos, 'm', tm)) + .chain(|pos| parse_char(s, pos, '-')) + .chain(|pos| parse_type(s, pos, 'd', tm)) } 'H' { // FIXME (#2350): range check. @@ -385,30 +385,30 @@ fn strptime(s: str, format: str) -> result<tm, str> { } 'n' { parse_char(s, pos, '\n') } 'P' { - alt match_strs(s, pos, [("am", 0_i32), ("pm", 12_i32)]/~) { + alt match_strs(s, pos, ~[("am", 0_i32), ("pm", 12_i32)]) { some(item) { let (v, pos) = item; tm.tm_hour += v; ok(pos) } none { err("Invalid hour") } } } 'p' { - alt match_strs(s, pos, [("AM", 0_i32), ("PM", 12_i32)]/~) { + alt match_strs(s, pos, ~[("AM", 0_i32), ("PM", 12_i32)]) { some(item) { let (v, pos) = item; tm.tm_hour += v; ok(pos) } none { err("Invalid hour") } } } 'R' { parse_type(s, pos, 'H', tm) - .chain { |pos| parse_char(s, pos, ':') } - .chain { |pos| parse_type(s, pos, 'M', tm) } + .chain(|pos| parse_char(s, pos, ':')) + .chain(|pos| parse_type(s, pos, 'M', tm)) } 'r' { parse_type(s, pos, 'I', tm) - .chain { |pos| parse_char(s, pos, ':') } - .chain { |pos| parse_type(s, pos, 'M', tm) } - .chain { |pos| parse_char(s, pos, ':') } - .chain { |pos| parse_type(s, pos, 'S', tm) } - .chain { |pos| parse_char(s, pos, ' ') } - .chain { |pos| parse_type(s, pos, 'p', tm) } + .chain(|pos| parse_char(s, pos, ':')) + .chain(|pos| parse_type(s, pos, 'M', tm)) + .chain(|pos| parse_char(s, pos, ':')) + .chain(|pos| parse_type(s, pos, 'S', tm)) + .chain(|pos| parse_char(s, pos, ' ')) + .chain(|pos| parse_type(s, pos, 'p', tm)) } 'S' { // FIXME (#2350): range check. @@ -424,10 +424,10 @@ fn strptime(s: str, format: str) -> result<tm, str> { //'s' {} 'T' | 'X' { parse_type(s, pos, 'H', tm) - .chain { |pos| parse_char(s, pos, ':') } - .chain { |pos| parse_type(s, pos, 'M', tm) } - .chain { |pos| parse_char(s, pos, ':') } - .chain { |pos| parse_type(s, pos, 'S', tm) } + .chain(|pos| parse_char(s, pos, ':')) + .chain(|pos| parse_type(s, pos, 'M', tm)) + .chain(|pos| parse_char(s, pos, ':')) + .chain(|pos| parse_type(s, pos, 'S', tm)) } 't' { parse_char(s, pos, '\t') } 'u' { @@ -443,10 +443,10 @@ fn strptime(s: str, format: str) -> result<tm, str> { } 'v' { parse_type(s, pos, 'e', tm) - .chain { |pos| parse_char(s, pos, '-') } - .chain { |pos| parse_type(s, pos, 'b', tm) } - .chain { |pos| parse_char(s, pos, '-') } - .chain { |pos| parse_type(s, pos, 'Y', tm) } + .chain(|pos| parse_char(s, pos, '-')) + .chain(|pos| parse_type(s, pos, 'b', tm)) + .chain(|pos| parse_char(s, pos, '-')) + .chain(|pos| parse_type(s, pos, 'Y', tm)) } //'W' {} 'w' { @@ -526,7 +526,7 @@ fn strptime(s: str, format: str) -> result<tm, str> { } } - io::with_str_reader(format) { |rdr| + do io::with_str_reader(format) |rdr| { let tm = { mut tm_sec: 0_i32, mut tm_min: 0_i32, @@ -738,7 +738,7 @@ fn strftime(format: str, tm: tm) -> str { let mut buf = ""; - io::with_str_reader(format) { |rdr| + do io::with_str_reader(format) |rdr| { while !rdr.eof() { alt rdr.read_char() { '%' { buf += parse_type(rdr.read_char(), tm); } @@ -1010,7 +1010,7 @@ mod tests { "Thursday", "Friday", "Saturday" - ]/~.iter { |day| assert test(day, "%A"); } + ]/_.iter(|day| assert test(day, "%A")); [ "Sun", @@ -1020,7 +1020,7 @@ mod tests { "Thu", "Fri", "Sat" - ]/~.iter { |day| assert test(day, "%a"); } + ]/_.iter(|day| assert test(day, "%a")); [ "January", @@ -1035,7 +1035,7 @@ mod tests { "October", "November", "December" - ]/~.iter { |day| assert test(day, "%B"); } + ]/_.iter(|day| assert test(day, "%B")); [ "Jan", @@ -1050,7 +1050,7 @@ mod tests { "Oct", "Nov", "Dec" - ]/~.iter { |day| assert test(day, "%b"); } + ]/_.iter(|day| assert test(day, "%b")); assert test("19", "%C"); assert test("Fri Feb 13 23:31:30 2009", "%c"); diff --git a/src/libstd/timer.rs b/src/libstd/timer.rs index f6981ce39e0..6365c9bd953 100644 --- a/src/libstd/timer.rs +++ b/src/libstd/timer.rs @@ -31,7 +31,7 @@ fn delayed_send<T: copy send>(iotask: iotask, let timer_done_ch_ptr = ptr::addr_of(timer_done_ch); let timer = uv::ll::timer_t(); let timer_ptr = ptr::addr_of(timer); - iotask::interact(iotask) {|loop_ptr| + do iotask::interact(iotask) |loop_ptr| { let init_result = uv::ll::timer_init(loop_ptr, timer_ptr); if (init_result == 0i32) { let start_result = uv::ll::timer_start( @@ -105,11 +105,11 @@ fn recv_timeout<T: copy send>(iotask: iotask, delayed_send(iotask, msecs, timeout_ch, ()); // FIXME: This could be written clearer (#2618) either::either( - {|left_val| + |left_val| { log(debug, #fmt("recv_time .. left_val %?", left_val)); none - }, {|right_val| + }, |right_val| { some(right_val) }, comm::select2(timeout_po, wait_po) ) @@ -151,7 +151,7 @@ mod test { #[test] fn test_gl_timer_sleep_stress1() { let hl_loop = uv::global_loop::get(); - iter::repeat(200u) {|| + do iter::repeat(200u) || { sleep(hl_loop, 1u); } } @@ -165,20 +165,20 @@ mod test { let repeat = 20u; let spec = { - [(1u, 20u), + ~[(1u, 20u), (10u, 10u), - (20u, 2u)]/~ + (20u, 2u)] }; - iter::repeat(repeat) {|| + do iter::repeat(repeat) || { - for spec.each {|spec| + for spec.each |spec| { let (times, maxms) = spec; - task::spawn {|| + do task::spawn || { import rand::*; let rng = rng(); - iter::repeat(times) {|| + do iter::repeat(times) || { sleep(hl_loop, rng.next() as uint % maxms); } comm::send(ch, ()); @@ -186,7 +186,7 @@ mod test { } } - iter::repeat(repeat * spec.len()) {|| + do iter::repeat(repeat * spec.len()) || { comm::recv(po) } } @@ -204,14 +204,14 @@ mod test { let mut failures = 0; let hl_loop = uv::global_loop::get(); - iter::repeat(times as uint) {|| + do iter::repeat(times as uint) || { task::yield(); let expected = rand::rng().gen_str(16u); let test_po = comm::port::<str>(); let test_ch = comm::chan(test_po); - task::spawn() {|| + do task::spawn() || { delayed_send(hl_loop, 1u, test_ch, expected); }; @@ -231,12 +231,12 @@ mod test { let mut failures = 0; let hl_loop = uv::global_loop::get(); - iter::repeat(times as uint) {|| + do iter::repeat(times as uint) || { let expected = rand::rng().gen_str(16u); let test_po = comm::port::<str>(); let test_ch = comm::chan(test_po); - task::spawn() {|| + do task::spawn() || { delayed_send(hl_loop, 1000u, test_ch, expected); }; diff --git a/src/libstd/treemap.rs b/src/libstd/treemap.rs index 6cf98f93fe3..066d2a6501f 100644 --- a/src/libstd/treemap.rs +++ b/src/libstd/treemap.rs @@ -125,7 +125,7 @@ mod tests { fn t(n: @mut int, &&k: int, &&_v: ()) { assert (*n == k); *n += 1; } - traverse(m, {|x,y|t(n, x, y)}); + traverse(m, |x,y| t(n, x, y)); } #[test] diff --git a/src/libstd/uv_global_loop.rs b/src/libstd/uv_global_loop.rs index ec2ce9b684c..aab8040c0a4 100644 --- a/src/libstd/uv_global_loop.rs +++ b/src/libstd/uv_global_loop.rs @@ -40,7 +40,7 @@ fn get_monitor_task_gl() -> iotask unsafe { #debug("ENTERING global_loop::get() loop chan: %?", monitor_loop_chan_ptr); - let builder_fn = {|| + let builder_fn = || { let builder = task::builder(); task::set_opts(builder, { supervise: false, @@ -56,12 +56,12 @@ fn get_monitor_task_gl() -> iotask unsafe { #debug("before priv::chan_from_global_ptr"); type monchan = chan<iotask>; - let monitor_ch = chan_from_global_ptr::<monchan>(monitor_loop_chan_ptr, - builder_fn) {|msg_po| + let monitor_ch = do chan_from_global_ptr::<monchan>(monitor_loop_chan_ptr, + builder_fn) |msg_po| { #debug("global monitor task starting"); // As a weak task the runtime will notify us when to exit - weaken_task() {|weak_exit_po| + do weaken_task() |weak_exit_po| { #debug("global monitor task is now weak"); let hl_loop = spawn_loop(); loop { @@ -87,7 +87,7 @@ fn get_monitor_task_gl() -> iotask unsafe { // once we have a chan to the monitor loop, we ask it for // the libuv loop's async handle - listen { |fetch_ch| + do listen |fetch_ch| { monitor_ch.send(fetch_ch); fetch_ch.recv() } @@ -95,11 +95,11 @@ fn get_monitor_task_gl() -> iotask unsafe { fn spawn_loop() -> iotask unsafe { let builder = task::builder(); - task::add_wrapper(builder) {|task_body| + do task::add_wrapper(builder) |task_body| { fn~(move task_body) { // The I/O loop task also needs to be weak so it doesn't keep // the runtime alive - weaken_task {|weak_exit_po| + do weaken_task |weak_exit_po| { #debug("global libuv task is now weak %?", weak_exit_po); task_body(); @@ -129,7 +129,7 @@ mod test { log(debug, "in simple timer cb"); ll::timer_stop(timer_ptr); let hl_loop = get_gl(); - iotask::interact(hl_loop) {|_loop_ptr| + do iotask::interact(hl_loop) |_loop_ptr| { log(debug, "closing timer"); ll::close(timer_ptr, simple_timer_close_cb); log(debug, "about to deref exit_ch_ptr"); @@ -146,7 +146,7 @@ mod test { exit_ch_ptr)); let timer_handle = ll::timer_t(); let timer_ptr = ptr::addr_of(timer_handle); - iotask::interact(iotask) {|loop_ptr| + do iotask::interact(iotask) |loop_ptr| { log(debug, "user code inside interact loop!!!"); let init_status = ll::timer_init(loop_ptr, timer_ptr); if(init_status == 0i32) { @@ -174,7 +174,7 @@ mod test { let hl_loop = get_gl(); let exit_po = comm::port::<()>(); let exit_ch = comm::chan(exit_po); - task::spawn_sched(task::manual_threads(1u), {|| + task::spawn_sched(task::manual_threads(1u), || { impl_uv_hl_simple_timer(hl_loop); comm::send(exit_ch, ()); }); @@ -191,13 +191,13 @@ mod test { let exit_po = comm::port::<()>(); let exit_ch = comm::chan(exit_po); let cycles = 5000u; - iter::repeat(cycles) {|| - task::spawn_sched(task::manual_threads(1u), {|| + do iter::repeat(cycles) || { + task::spawn_sched(task::manual_threads(1u), || { impl_uv_hl_simple_timer(hl_loop); comm::send(exit_ch, ()); }); }; - iter::repeat(cycles) {|| + do iter::repeat(cycles) || { comm::recv(exit_po); }; log(debug, "test_stress_gl_uv_global_loop_high_level_global_timer"+ diff --git a/src/libstd/uv_iotask.rs b/src/libstd/uv_iotask.rs index 3833cafe15e..c24a3bf8170 100644 --- a/src/libstd/uv_iotask.rs +++ b/src/libstd/uv_iotask.rs @@ -39,9 +39,9 @@ fn spawn_iotask(-builder: task::builder) -> iotask { with get_opts(builder) }); - listen {|iotask_ch| + do listen |iotask_ch| { - run(copy(builder)) {|| + do run(copy(builder)) || { #debug("entering libuv task"); run_loop(iotask_ch); #debug("libuv task exiting"); @@ -211,7 +211,7 @@ mod test { exit_ch: exit_ch }; let ah_data_ptr = ptr::addr_of(ah_data); - interact(iotask) {|loop_ptr| + do interact(iotask) |loop_ptr| { ll::async_init(loop_ptr, ah_ptr, async_handle_cb); ll::set_data_for_uv_handle(ah_ptr, ah_data_ptr as *libc::c_void); ll::async_send(ah_ptr); @@ -224,7 +224,7 @@ mod test { unsafe fn spawn_test_loop(exit_ch: comm::chan<()>) -> iotask { let iotask_port = comm::port::<iotask>(); let iotask_ch = comm::chan(iotask_port); - task::spawn_sched(task::manual_threads(1u)) {|| + do task::spawn_sched(task::manual_threads(1u)) || { run_loop(iotask_ch); exit_ch.send(()); }; @@ -255,13 +255,13 @@ mod test { // called, at least. let work_exit_po = comm::port::<()>(); let work_exit_ch = comm::chan(work_exit_po); - iter::repeat(7u) {|| - task::spawn_sched(task::manual_threads(1u), {|| + do iter::repeat(7u) || { + do task::spawn_sched(task::manual_threads(1u)) || { impl_uv_iotask_async(iotask); comm::send(work_exit_ch, ()); - }); + }; }; - iter::repeat(7u) {|| + do iter::repeat(7u) || { comm::recv(work_exit_po); }; log(debug, "sending teardown_loop msg.."); diff --git a/src/libstd/uv_ll.rs b/src/libstd/uv_ll.rs index db0dcf7f4f4..6004b0cf62e 100644 --- a/src/libstd/uv_ll.rs +++ b/src/libstd/uv_ll.rs @@ -24,7 +24,7 @@ import libc::size_t; // libuv struct mappings type uv_ip4_addr = { - ip: [u8]/~, + ip: ~[u8], port: int }; type uv_ip6_addr = uv_ip4_addr; @@ -616,7 +616,7 @@ unsafe fn accept(server: *libc::c_void, client: *libc::c_void) } unsafe fn write<T>(req: *uv_write_t, stream: *T, - buf_in: *[uv_buf_t]/~, cb: *u8) -> libc::c_int { + buf_in: *~[uv_buf_t], cb: *u8) -> libc::c_int { let buf_ptr = vec::unsafe::to_ptr(*buf_in); let buf_cnt = vec::len(*buf_in) as i32; ret rustrt::rust_uv_write(req as *libc::c_void, @@ -795,13 +795,13 @@ type uv_err_data = { mod test { enum tcp_read_data { tcp_read_eof, - tcp_read_more([u8]/~), + tcp_read_more(~[u8]), tcp_read_error } type request_wrapper = { write_req: *uv_write_t, - req_buf: *[uv_buf_t]/~, + req_buf: *~[uv_buf_t], read_chan: *comm::chan<str> }; @@ -915,9 +915,9 @@ mod test { let req_str_bytes = str::bytes(req_str); let req_msg_ptr: *u8 = vec::unsafe::to_ptr(req_str_bytes); log(debug, #fmt("req_msg ptr: %u", req_msg_ptr as uint)); - let req_msg = [ + let req_msg = ~[ buf_init(req_msg_ptr, vec::len(req_str_bytes)) - ]/~; + ]; // this is the enclosing record, we'll pass a ptr to // this to C.. let write_handle = write_t(); @@ -1115,7 +1115,7 @@ mod test { client: *uv_tcp_t, server: *uv_tcp_t, server_kill_msg: str, - server_resp_buf: *[uv_buf_t]/~, + server_resp_buf: *~[uv_buf_t], server_chan: *comm::chan<str>, server_write_req: *uv_write_t }; @@ -1162,9 +1162,9 @@ mod test { let resp_str_bytes = str::bytes(server_resp_msg); let resp_msg_ptr: *u8 = vec::unsafe::to_ptr(resp_str_bytes); log(debug, #fmt("resp_msg ptr: %u", resp_msg_ptr as uint)); - let resp_msg = [ + let resp_msg = ~[ buf_init(resp_msg_ptr, vec::len(resp_str_bytes)) - ]/~; + ]; let continue_async_handle = async_t(); let continue_async_handle_ptr = @@ -1262,7 +1262,7 @@ mod test { let continue_chan = comm::chan::<bool>(continue_port); let continue_chan_ptr = ptr::addr_of(continue_chan); - task::spawn_sched(task::manual_threads(1u)) {|| + do task::spawn_sched(task::manual_threads(1u)) || { impl_uv_tcp_server(bind_ip, port, kill_server_msg, server_resp_msg, @@ -1275,7 +1275,7 @@ mod test { comm::recv(continue_port); log(debug, "received on continue port, set up tcp client"); - task::spawn_sched(task::manual_threads(1u)) {|| + do task::spawn_sched(task::manual_threads(1u)) || { impl_uv_tcp_request(request_ip, port, kill_server_msg, ptr::addr_of(client_chan)); diff --git a/src/libsyntax/ast.rs b/src/libsyntax/ast.rs index d0876dd1062..90dbc1a82f2 100644 --- a/src/libsyntax/ast.rs +++ b/src/libsyntax/ast.rs @@ -41,9 +41,9 @@ type fn_ident = option<ident>; #[auto_serialize] type path = {span: span, global: bool, - idents: [ident]/~, + idents: ~[ident], rp: option<@region>, - types: [@ty]/~}; + types: ~[@ty]}; #[auto_serialize] type crate_num = int; @@ -66,7 +66,7 @@ enum ty_param_bound { } #[auto_serialize] -type ty_param = {ident: ident, id: node_id, bounds: @[ty_param_bound]/~}; +type ty_param = {ident: ident, id: node_id, bounds: @~[ty_param_bound]}; #[auto_serialize] enum def { @@ -92,19 +92,19 @@ enum def { // The set of meta_items that define the compilation environment of the crate, // used to drive conditional compilation -type crate_cfg = [@meta_item]/~; +type crate_cfg = ~[@meta_item]; type crate = spanned<crate_>; type crate_ = - {directives: [@crate_directive]/~, + {directives: ~[@crate_directive], module: _mod, - attrs: [attribute]/~, + attrs: ~[attribute], config: crate_cfg}; enum crate_directive_ { - cdir_src_mod(ident, [attribute]/~), - cdir_dir_mod(ident, [@crate_directive]/~, [attribute]/~), + cdir_src_mod(ident, ~[attribute]), + cdir_dir_mod(ident, ~[@crate_directive], ~[attribute]), // NB: cdir_view_item is *not* processed by the rest of the compiler, the // attached view_items are sunk into the crate's module during parsing, @@ -124,7 +124,7 @@ type meta_item = spanned<meta_item_>; #[auto_serialize] enum meta_item_ { meta_word(ident), - meta_list(ident, [@meta_item]/~), + meta_list(ident, ~[@meta_item]), meta_name_value(ident, lit), } @@ -132,8 +132,8 @@ enum meta_item_ { type blk = spanned<blk_>; #[auto_serialize] -type blk_ = {view_items: [@view_item]/~, - stmts: [@stmt]/~, +type blk_ = {view_items: ~[@view_item], + stmts: ~[@stmt], expr: option<@expr>, id: node_id, rules: blk_check_mode}; @@ -155,10 +155,10 @@ enum pat_ { // records this pattern's node_id in an auxiliary // set (of "pat_idents that refer to nullary enums") pat_ident(@path, option<@pat>), - pat_enum(@path, option<[@pat]/~>), // "none" means a * pattern where + pat_enum(@path, option<~[@pat]>), // "none" means a * pattern where // we don't bind the fields to names - pat_rec([field_pat]/~, bool), - pat_tup([@pat]/~), + pat_rec(~[field_pat], bool), + pat_tup(~[@pat]), pat_box(@pat), pat_uniq(@pat), pat_lit(@expr), @@ -181,9 +181,9 @@ enum proto { enum vstore { // FIXME (#2112): Change uint to @expr (actually only constant exprs) vstore_fixed(option<uint>), // [1,2,3,4]/_ or 4 - vstore_uniq, // [1,2,3,4]/~ - vstore_box, // [1,2,3,4]/@ - vstore_slice(@region) // [1,2,3,4]/&(foo)? + vstore_uniq, // ~[1,2,3,4] + vstore_box, // @[1,2,3,4] + vstore_slice(@region) // &[1,2,3,4](foo)? } pure fn is_blockish(p: ast::proto) -> bool { @@ -270,10 +270,10 @@ type local = spanned<local_>; type decl = spanned<decl_>; #[auto_serialize] -enum decl_ { decl_local([@local]/~), decl_item(@item), } +enum decl_ { decl_local(~[@local]), decl_item(@item), } #[auto_serialize] -type arm = {pats: [@pat]/~, guard: option<@expr>, body: blk}; +type arm = {pats: ~[@pat], guard: option<@expr>, body: blk}; #[auto_serialize] type field_ = {mutbl: mutability, ident: ident, expr: @expr}; @@ -296,10 +296,10 @@ enum alt_mode { alt_check, alt_exhaustive, } #[auto_serialize] enum expr_ { expr_vstore(@expr, vstore), - expr_vec([@expr]/~, mutability), - expr_rec([field]/~, option<@expr>), - expr_call(@expr, [@expr]/~, bool), // True iff last argument is a block - expr_tup([@expr]/~), + expr_vec(~[@expr], mutability), + expr_rec(~[field], option<@expr>), + expr_call(@expr, ~[@expr], bool), // True iff last argument is a block + expr_tup(~[@expr]), expr_binary(binop, @expr, @expr), expr_unary(unop, @expr), expr_lit(@lit), @@ -310,7 +310,7 @@ enum expr_ { Same semantics as while(true) { body }, but typestate knows that the (implicit) condition is always true. */ expr_loop(blk), - expr_alt(@expr, [arm]/~, alt_mode), + expr_alt(@expr, ~[arm], alt_mode), expr_fn(proto, fn_decl, blk, capture_clause), expr_fn_block(fn_decl, blk, capture_clause), // Inner expr is always an expr_fn_block. We need the wrapping node to @@ -330,7 +330,7 @@ enum expr_ { expr_assign(@expr, @expr), expr_swap(@expr, @expr), expr_assign_op(binop, @expr, @expr), - expr_field(@expr, ident, [@ty]/~), + expr_field(@expr, ident, ~[@ty]), expr_index(@expr, @expr), expr_path(@path), expr_addr_of(mutability, @expr), @@ -362,7 +362,7 @@ type capture_item = @{ }; #[auto_serialize] -type capture_clause = @[capture_item]/~; +type capture_clause = @~[capture_item]; /* // Says whether this is a block the user marked as @@ -376,7 +376,7 @@ enum blk_sort { #[auto_serialize] enum token_tree { /* for macro invocations; parsing is the macro's job */ - tt_delim([token_tree]/~), + tt_delim(~[token_tree]), tt_flat(span, token::token) } @@ -387,7 +387,7 @@ type matcher = spanned<matcher_>; enum matcher_ { mtc_tok(token::token), /* body, separator, zero ok? : */ - mtc_rep([matcher]/~, option<token::token>, bool), + mtc_rep(~[matcher], option<token::token>, bool), mtc_bb(ident, ident, uint) } @@ -441,8 +441,8 @@ type ty_field_ = {ident: ident, mt: mt}; type ty_field = spanned<ty_field_>; #[auto_serialize] -type ty_method = {ident: ident, attrs: [attribute]/~, - decl: fn_decl, tps: [ty_param]/~, span: span}; +type ty_method = {ident: ident, attrs: ~[attribute], + decl: fn_decl, tps: ~[ty_param], span: span}; #[auto_serialize] enum int_ty { ty_i, ty_char, ty_i8, ty_i16, ty_i32, ty_i64, } @@ -481,11 +481,11 @@ enum ty_ { ty_vec(mt), ty_ptr(mt), ty_rptr(@region, mt), - ty_rec([ty_field]/~), + ty_rec(~[ty_field]), ty_fn(proto, fn_decl), - ty_tup([@ty]/~), + ty_tup(~[@ty]), ty_path(@path, node_id), - ty_constr(@ty, [@ty_constr]/~), + ty_constr(@ty, ~[@ty_constr]), ty_vstore(@ty, vstore), ty_mac(mac), // ty_infer means the type should be inferred instead of it having been @@ -525,7 +525,7 @@ type constr_arg = spanned<fn_constr_arg>; #[auto_serialize] type constr_general_<ARG, ID> = - {path: @path, args: [@sp_constr_arg<ARG>]/~, id: ID}; + {path: @path, args: ~[@sp_constr_arg<ARG>], id: ID}; // In the front end, constraints have a node ID attached. // Typeck turns this to a def_id, using the output of resolve. @@ -552,11 +552,11 @@ type arg = {mode: mode, ty: @ty, ident: ident, id: node_id}; #[auto_serialize] type fn_decl = - {inputs: [arg]/~, + {inputs: ~[arg], output: @ty, purity: purity, cf: ret_style, - constraints: [@constr]/~}; + constraints: ~[@constr]}; #[auto_serialize] enum purity { @@ -574,14 +574,14 @@ enum ret_style { } #[auto_serialize] -type method = {ident: ident, attrs: [attribute]/~, - tps: [ty_param]/~, decl: fn_decl, body: blk, +type method = {ident: ident, attrs: ~[attribute], + tps: ~[ty_param], decl: fn_decl, body: blk, id: node_id, span: span, self_id: node_id, vis: visibility}; // always public, unless it's a // class method #[auto_serialize] -type _mod = {view_items: [@view_item]/~, items: [@item]/~}; +type _mod = {view_items: ~[@view_item], items: ~[@item]}; #[auto_serialize] enum foreign_abi { @@ -592,14 +592,14 @@ enum foreign_abi { #[auto_serialize] type foreign_mod = - {view_items: [@view_item]/~, - items: [@foreign_item]/~}; + {view_items: ~[@view_item], + items: ~[@foreign_item]}; #[auto_serialize] type variant_arg = {ty: @ty, id: node_id}; #[auto_serialize] -type variant_ = {name: ident, attrs: [attribute]/~, args: [variant_arg]/~, +type variant_ = {name: ident, attrs: ~[attribute], args: ~[variant_arg], id: node_id, disr_expr: option<@expr>, vis: visibility}; #[auto_serialize] @@ -628,18 +628,18 @@ enum view_path_ { view_path_glob(@path, node_id), // foo::bar::{a,b,c} - view_path_list(@path, [path_list_ident]/~, node_id) + view_path_list(@path, ~[path_list_ident], node_id) } #[auto_serialize] -type view_item = {node: view_item_, attrs: [attribute]/~, +type view_item = {node: view_item_, attrs: ~[attribute], vis: visibility, span: span}; #[auto_serialize] enum view_item_ { - view_item_use(ident, [@meta_item]/~, node_id), - view_item_import([@view_path]/~), - view_item_export([@view_path]/~) + view_item_use(ident, ~[@meta_item], node_id), + view_item_import(~[@view_path]), + view_item_export(~[@view_path]) } // Meta-data associated with an item @@ -667,7 +667,7 @@ type iface_ref = {path: @path, id: node_id}; enum visibility { public, private } #[auto_serialize] -type item = {ident: ident, attrs: [attribute]/~, +type item = {ident: ident, attrs: ~[attribute], id: node_id, node: item_, vis: visibility, span: span}; @@ -680,23 +680,23 @@ enum region_param { #[auto_serialize] enum item_ { item_const(@ty, @expr), - item_fn(fn_decl, [ty_param]/~, blk), + item_fn(fn_decl, ~[ty_param], blk), item_mod(_mod), item_foreign_mod(foreign_mod), - item_ty(@ty, [ty_param]/~, region_param), - item_enum([variant]/~, [ty_param]/~, region_param), - item_class([ty_param]/~, /* ty params for class */ - [@iface_ref]/~, /* ifaces this class implements */ - [@class_member]/~, /* methods, etc. */ + item_ty(@ty, ~[ty_param], region_param), + item_enum(~[variant], ~[ty_param], region_param), + item_class(~[ty_param], /* ty params for class */ + ~[@iface_ref], /* ifaces this class implements */ + ~[@class_member], /* methods, etc. */ /* (not including ctor or dtor) */ class_ctor, /* dtor is optional */ option<class_dtor>, region_param ), - item_iface([ty_param]/~, region_param, [ty_method]/~), - item_impl([ty_param]/~, region_param, option<@iface_ref> /* iface */, - @ty /* self */, [@method]/~), + item_iface(~[ty_param], region_param, ~[ty_method]), + item_impl(~[ty_param], region_param, option<@iface_ref> /* iface */, + @ty /* self */, ~[@method]), } #[auto_serialize] @@ -731,14 +731,14 @@ type class_dtor_ = {id: node_id, #[auto_serialize] type foreign_item = {ident: ident, - attrs: [attribute]/~, + attrs: ~[attribute], node: foreign_item_, id: node_id, span: span}; #[auto_serialize] enum foreign_item_ { - foreign_item_fn(fn_decl, [ty_param]/~), + foreign_item_fn(fn_decl, ~[ty_param]), } // The data we save and restore about an inlined item or method. This is not @@ -749,8 +749,8 @@ enum inlined_item { ii_item(@item), ii_method(def_id /* impl id */, @method), ii_foreign(@foreign_item), - ii_ctor(class_ctor, ident, [ty_param]/~, def_id /* parent id */), - ii_dtor(class_dtor, ident, [ty_param]/~, def_id /* parent id */) + ii_ctor(class_ctor, ident, ~[ty_param], def_id /* parent id */), + ii_dtor(class_dtor, ident, ~[ty_param], def_id /* parent id */) } // diff --git a/src/libsyntax/ast_map.rs b/src/libsyntax/ast_map.rs index 295f3416d36..3b4fe5f91ee 100644 --- a/src/libsyntax/ast_map.rs +++ b/src/libsyntax/ast_map.rs @@ -7,11 +7,11 @@ import ast_util::inlined_item_methods; import diagnostic::span_handler; enum path_elt { path_mod(ident), path_name(ident) } -type path = [path_elt]/~; +type path = ~[path_elt]; /* FIXMEs that say "bad" are as per #2543 */ fn path_to_str_with_sep(p: path, sep: str) -> str { - let strs = vec::map(p) {|e| + let strs = do vec::map(p) |e| { alt e { path_mod(s) { /* FIXME (#2543) */ copy *s } path_name(s) { /* FIXME (#2543) */ copy *s } @@ -45,9 +45,9 @@ enum ast_node { node_local(uint), // Constructor for a class // def_id is parent id - node_ctor(ident, [ty_param]/~, @class_ctor, def_id, @path), + node_ctor(ident, ~[ty_param], @class_ctor, def_id, @path), // Destructor for a class - node_dtor([ty_param]/~, @class_dtor, def_id, @path), + node_dtor(~[ty_param], @class_dtor, def_id, @path), node_block(blk), } @@ -57,7 +57,7 @@ type ctx = {map: map, mut path: path, type vt = visit::vt<ctx>; fn extend(cx: ctx, +elt: ident) -> @path { - @(vec::append(cx.path, [path_name(elt)]/~)) + @(vec::append(cx.path, ~[path_name(elt)])) } fn mk_ast_map_visitor() -> vt { @@ -75,7 +75,7 @@ fn mk_ast_map_visitor() -> vt { fn map_crate(diag: span_handler, c: crate) -> map { let cx = {map: std::map::int_hash(), - mut path: []/~, + mut path: ~[], mut local_id: 0u, diag: diag}; visit::visit_crate(c, cx, mk_ast_map_visitor()); @@ -119,7 +119,7 @@ fn map_decoded_item(diag: span_handler, fn map_fn(fk: visit::fn_kind, decl: fn_decl, body: blk, sp: codemap::span, id: node_id, cx: ctx, v: vt) { - for decl.inputs.each {|a| + for decl.inputs.each |a| { cx.map.insert(a.id, node_arg(/* FIXME (#2543) */ copy a, cx.local_id)); @@ -156,7 +156,7 @@ fn map_block(b: blk, cx: ctx, v: vt) { } fn number_pat(cx: ctx, pat: @pat) { - ast_util::walk_pat(pat) {|p| + do ast_util::walk_pat(pat) |p| { alt p.node { pat_ident(_, _) { cx.map.insert(p.id, node_local(cx.local_id)); @@ -190,13 +190,13 @@ fn map_item(i: @item, cx: ctx, v: vt) { alt i.node { item_impl(_, _, _, _, ms) { let impl_did = ast_util::local_def(i.id); - for ms.each {|m| + for ms.each |m| { map_method(impl_did, extend(cx, i.ident), m, cx); } } item_enum(vs, _, _) { - for vs.each {|v| + for vs.each |v| { cx.map.insert(v.node.id, node_variant( /* FIXME (#2543) */ copy v, i, extend(cx, i.ident))); @@ -207,7 +207,7 @@ fn map_item(i: @item, cx: ctx, v: vt) { either::left(msg) { cx.diag.span_fatal(i.span, msg); } either::right(abi) { abi } }; - for nm.items.each {|nitem| + for nm.items.each |nitem| { cx.map.insert(nitem.id, node_foreign_item(nitem, abi, /* FIXME (#2543) */ @@ -218,12 +218,12 @@ fn map_item(i: @item, cx: ctx, v: vt) { let (_, ms) = ast_util::split_class_items(items); // Map iface refs to their parent classes. This is // so we can find the self_ty - vec::iter(ifces) {|p| cx.map.insert(p.id, + do vec::iter(ifces) |p| { cx.map.insert(p.id, node_item(i, item_path)); }; let d_id = ast_util::local_def(i.id); let p = extend(cx, i.ident); // only need to handle methods - vec::iter(ms) {|m| map_method(d_id, p, m, cx); } + do vec::iter(ms) |m| { map_method(d_id, p, m, cx); } } _ { } } @@ -240,7 +240,7 @@ fn map_item(i: @item, cx: ctx, v: vt) { fn map_view_item(vi: @view_item, cx: ctx, _v: vt) { alt vi.node { view_item_export(vps) { - for vps.each {|vp| + for vps.each |vp| { let (id, name) = alt vp.node { view_path_simple(nm, _, id) { (id, /* FIXME (#2543) */ copy nm) diff --git a/src/libsyntax/ast_util.rs b/src/libsyntax/ast_util.rs index 23326343d38..0115ffb0331 100644 --- a/src/libsyntax/ast_util.rs +++ b/src/libsyntax/ast_util.rs @@ -23,9 +23,9 @@ pure fn dummy_sp() -> span { ret mk_sp(0u, 0u); } pure fn path_name(p: @path) -> str { path_name_i(p.idents) } -pure fn path_name_i(idents: [ident]/~) -> str { +pure fn path_name_i(idents: ~[ident]) -> str { // FIXME: Bad copies (#2543 -- same for everything else that says "bad") - str::connect(idents.map({|i|*i}), "::") + str::connect(idents.map(|i|*i), "::") } pure fn path_to_ident(p: @path) -> ident { vec::last(p.idents) } @@ -152,11 +152,11 @@ pure fn float_ty_to_str(t: float_ty) -> str { fn is_exported(i: ident, m: _mod) -> bool { let mut local = false; let mut parent_enum : option<ident> = none; - for m.items.each {|it| + for m.items.each |it| { if it.ident == i { local = true; } alt it.node { item_enum(variants, _, _) { - for variants.each {|v| + for variants.each |v| { if v.node.name == i { local = true; parent_enum = some(/* FIXME (#2543) */ copy it.ident); @@ -168,11 +168,11 @@ fn is_exported(i: ident, m: _mod) -> bool { if local { break; } } let mut has_explicit_exports = false; - for m.view_items.each {|vi| + for m.view_items.each |vi| { alt vi.node { view_item_export(vps) { has_explicit_exports = true; - for vps.each {|vp| + for vps.each |vp| { alt vp.node { ast::view_path_simple(id, _, _) { if id == i { ret true; } @@ -187,7 +187,7 @@ fn is_exported(i: ident, m: _mod) -> bool { ast::view_path_list(path, ids, _) { if vec::len(path.idents) == 1u { if i == path.idents[0] { ret true; } - for ids.each {|id| + for ids.each |id| { if id.node.name == i { ret true; } } } else { @@ -246,19 +246,19 @@ fn new_def_hash<V: copy>() -> std::map::hashmap<ast::def_id, V> { } fn block_from_expr(e: @expr) -> blk { - let blk_ = default_block([]/~, option::some::<@expr>(e), e.id); + let blk_ = default_block(~[], option::some::<@expr>(e), e.id); ret {node: blk_, span: e.span}; } -fn default_block(+stmts1: [@stmt]/~, expr1: option<@expr>, id1: node_id) -> +fn default_block(+stmts1: ~[@stmt], expr1: option<@expr>, id1: node_id) -> blk_ { - {view_items: []/~, stmts: stmts1, + {view_items: ~[], stmts: stmts1, expr: expr1, id: id1, rules: default_blk} } fn ident_to_path(s: span, +i: ident) -> @path { - @{span: s, global: false, idents: [i]/~, - rp: none, types: []/~} + @{span: s, global: false, idents: ~[i], + rp: none, types: ~[]} } pure fn is_unguarded(&&a: arm) -> bool { @@ -268,7 +268,7 @@ pure fn is_unguarded(&&a: arm) -> bool { } } -pure fn unguarded_pat(a: arm) -> option<[@pat]/~> { +pure fn unguarded_pat(a: arm) -> option<~[@pat]> { if is_unguarded(a) { some(/* FIXME (#2543) */ copy a.pats) } else { none } } @@ -287,15 +287,17 @@ pure fn class_item_ident(ci: @class_member) -> ident { type ivar = {ident: ident, ty: @ty, cm: class_mutability, id: node_id, vis: visibility}; -fn public_methods(ms: [@method]/~) -> [@method]/~ { - vec::filter(ms, {|m| alt m.vis { +fn public_methods(ms: ~[@method]) -> ~[@method] { + vec::filter(ms, + |m| alt m.vis { public { true } - _ { false }}}) + _ { false } + }) } -fn split_class_items(cs: [@class_member]/~) -> ([ivar]/~, [@method]/~) { - let mut vs = []/~, ms = []/~; - for cs.each {|c| +fn split_class_items(cs: ~[@class_member]) -> (~[ivar], ~[@method]) { + let mut vs = ~[], ms = ~[]; + for cs.each |c| { alt c.node { instance_var(i, t, cm, id, vis) { vec::push(vs, {ident: /* FIXME (#2543) */ copy i, @@ -383,9 +385,9 @@ fn operator_prec(op: ast::binop) -> uint { fn dtor_dec() -> fn_decl { let nil_t = @{id: 0, node: ty_nil, span: dummy_sp()}; // dtor has one argument, of type () - {inputs: [{mode: ast::expl(ast::by_ref), - ty: nil_t, ident: @"_", id: 0}]/~, - output: nil_t, purity: impure_fn, cf: return_val, constraints: []/~} + {inputs: ~[{mode: ast::expl(ast::by_ref), + ty: nil_t, ident: @"_", id: 0}], + output: nil_t, purity: impure_fn, cf: return_val, constraints: ~[]} } // ______________________________________________________________________ @@ -408,7 +410,7 @@ fn id_visitor(vfn: fn@(node_id)) -> visit::vt<()> { alt vi.node { view_item_use(_, _, id) { vfn(id) } view_item_import(vps) | view_item_export(vps) { - vec::iter(vps) {|vp| + do vec::iter(vps) |vp| { alt vp.node { view_path_simple(_, _, id) { vfn(id) } view_path_glob(_, id) { vfn(id) } @@ -426,7 +428,7 @@ fn id_visitor(vfn: fn@(node_id)) -> visit::vt<()> { visit_item: fn@(i: @item) { vfn(i.id); alt i.node { - item_enum(vs, _, _) { for vs.each {|v| vfn(v.node.id); } } + item_enum(vs, _, _) { for vs.each |v| { vfn(v.node.id); } } _ {} } }, @@ -472,8 +474,8 @@ fn id_visitor(vfn: fn@(node_id)) -> visit::vt<()> { } }, - visit_ty_params: fn@(ps: [ty_param]/~) { - vec::iter(ps) {|p| vfn(p.id) } + visit_ty_params: fn@(ps: ~[ty_param]) { + vec::iter(ps, |p| vfn(p.id)) }, visit_constr: fn@(_p: @path, _sp: span, id: node_id) { @@ -486,33 +488,33 @@ fn id_visitor(vfn: fn@(node_id)) -> visit::vt<()> { alt fk { visit::fk_ctor(nm, tps, self_id, parent_id) { - vec::iter(tps) {|tp| vfn(tp.id)} + vec::iter(tps, |tp| vfn(tp.id)); vfn(id); vfn(self_id); vfn(parent_id.node); } visit::fk_dtor(tps, self_id, parent_id) { - vec::iter(tps) {|tp| vfn(tp.id)} + vec::iter(tps, |tp| vfn(tp.id)); vfn(id); vfn(self_id); vfn(parent_id.node); } visit::fk_item_fn(_, tps) { - vec::iter(tps) {|tp| vfn(tp.id)} + vec::iter(tps, |tp| vfn(tp.id)); } visit::fk_method(_, tps, m) { vfn(m.self_id); - vec::iter(tps) {|tp| vfn(tp.id)} + vec::iter(tps, |tp| vfn(tp.id)); } visit::fk_anon(_, capture_clause) | visit::fk_fn_block(capture_clause) { - for vec::each(*capture_clause) {|clause| + for vec::each(*capture_clause) |clause| { vfn(clause.id); } } } - vec::iter(d.inputs) {|arg| + do vec::iter(d.inputs) |arg| { vfn(arg.id) } }, @@ -536,7 +538,7 @@ fn visit_ids_for_inlined_item(item: inlined_item, vfn: fn@(node_id)) { fn compute_id_range(visit_ids_fn: fn(fn@(node_id))) -> id_range { let min = @mut int::max_value; let max = @mut int::min_value; - visit_ids_fn { |id| + do visit_ids_fn |id| { *min = int::min(*min, id); *max = int::max(*max, id + 1); } @@ -544,7 +546,7 @@ fn compute_id_range(visit_ids_fn: fn(fn@(node_id))) -> id_range { } fn compute_id_range_for_inlined_item(item: inlined_item) -> id_range { - compute_id_range { |f| visit_ids_for_inlined_item(item, f) } + compute_id_range(|f| visit_ids_for_inlined_item(item, f)) } pure fn is_item_impl(item: @ast::item) -> bool { @@ -558,8 +560,12 @@ fn walk_pat(pat: @pat, it: fn(@pat)) { it(pat); alt pat.node { pat_ident(pth, some(p)) { walk_pat(p, it); } - pat_rec(fields, _) { for fields.each {|f| walk_pat(f.pat, it); } } - pat_enum(_, some(s)) | pat_tup(s) { for s.each {|p| walk_pat(p, it); } } + pat_rec(fields, _) { + for fields.each |f| { walk_pat(f.pat, it); } + } + pat_enum(_, some(s)) | pat_tup(s) { + for s.each |p| { walk_pat(p, it); } + } pat_box(s) | pat_uniq(s) { walk_pat(s, it); } pat_wild | pat_lit(_) | pat_range(_, _) | pat_ident(_, _) | pat_enum(_, _) {} diff --git a/src/libsyntax/attr.rs b/src/libsyntax/attr.rs index 64aacf8f042..e1cfa4830ae 100644 --- a/src/libsyntax/attr.rs +++ b/src/libsyntax/attr.rs @@ -59,7 +59,7 @@ fn mk_name_value_item(+name: ast::ident, +value: ast::lit) ret @dummy_spanned(ast::meta_name_value(name, value)); } -fn mk_list_item(+name: ast::ident, +items: [@ast::meta_item]/~) -> +fn mk_list_item(+name: ast::ident, +items: ~[@ast::meta_item]) -> @ast::meta_item { ret @dummy_spanned(ast::meta_list(name, items)); } @@ -88,9 +88,9 @@ fn mk_sugared_doc_attr(text: str, lo: uint, hi: uint) -> ast::attribute { fn attr_meta(attr: ast::attribute) -> @ast::meta_item { @attr.node.value } // Get the meta_items from inside a vector of attributes -fn attr_metas(attrs: [ast::attribute]/~) -> [@ast::meta_item]/~ { - let mut mitems = []/~; - for attrs.each {|a| vec::push(mitems, attr_meta(a)); } +fn attr_metas(attrs: ~[ast::attribute]) -> ~[@ast::meta_item] { + let mut mitems = ~[]; + for attrs.each |a| { vec::push(mitems, attr_meta(a)); } ret mitems; } @@ -141,7 +141,7 @@ fn get_meta_item_value_str(meta: @ast::meta_item) -> option<@str> { } #[doc = "Gets a list of inner meta items from a list meta_item type"] -fn get_meta_item_list(meta: @ast::meta_item) -> option<[@ast::meta_item]/~> { +fn get_meta_item_list(meta: @ast::meta_item) -> option<~[@ast::meta_item]> { alt meta.node { ast::meta_list(_, l) { option::some(/* FIXME (#2543) */ copy l) } _ { option::none } @@ -170,8 +170,8 @@ fn get_name_value_str_pair( #[doc = " Search a list of attributes and return only those with a specific name "] -fn find_attrs_by_name(attrs: [ast::attribute]/~, +name: str) -> - [ast::attribute]/~ { +fn find_attrs_by_name(attrs: ~[ast::attribute], +name: str) -> + ~[ast::attribute] { let filter = ( fn@(a: ast::attribute) -> option<ast::attribute> { if *get_attr_name(a) == name { @@ -185,8 +185,8 @@ fn find_attrs_by_name(attrs: [ast::attribute]/~, +name: str) -> #[doc = " Searcha list of meta items and return only those with a specific name "] -fn find_meta_items_by_name(metas: [@ast::meta_item]/~, +name: str) -> - [@ast::meta_item]/~ { +fn find_meta_items_by_name(metas: ~[@ast::meta_item], +name: str) -> + ~[@ast::meta_item] { let filter = fn@(&&m: @ast::meta_item) -> option<@ast::meta_item> { if *get_meta_item_name(m) == name { option::some(m) @@ -199,10 +199,10 @@ fn find_meta_items_by_name(metas: [@ast::meta_item]/~, +name: str) -> Returns true if a list of meta items contains another meta item. The comparison is performed structurally. "] -fn contains(haystack: [@ast::meta_item]/~, needle: @ast::meta_item) -> bool { +fn contains(haystack: ~[@ast::meta_item], needle: @ast::meta_item) -> bool { #debug("looking for %s", print::pprust::meta_item_to_str(*needle)); - for haystack.each {|item| + for haystack.each |item| { #debug("looking in %s", print::pprust::meta_item_to_str(*item)); if eq(item, needle) { #debug("found it!"); ret true; } @@ -224,7 +224,7 @@ fn eq(a: @ast::meta_item, b: @ast::meta_item) -> bool { } ast::meta_list(na, la) { - // [Fixme-sorting]/~ + // ~[Fixme-sorting] // FIXME (#607): Needs implementing // This involves probably sorting the list by name and // meta_item variant @@ -233,16 +233,16 @@ fn eq(a: @ast::meta_item, b: @ast::meta_item) -> bool { } } -fn contains_name(metas: [@ast::meta_item]/~, +name: str) -> bool { +fn contains_name(metas: ~[@ast::meta_item], +name: str) -> bool { let matches = find_meta_items_by_name(metas, name); ret vec::len(matches) > 0u; } -fn attrs_contains_name(attrs: [ast::attribute]/~, +name: str) -> bool { +fn attrs_contains_name(attrs: ~[ast::attribute], +name: str) -> bool { vec::is_not_empty(find_attrs_by_name(attrs, name)) } -fn first_attr_value_str_by_name(attrs: [ast::attribute]/~, +name: str) +fn first_attr_value_str_by_name(attrs: ~[ast::attribute], +name: str) -> option<@str> { let mattrs = find_attrs_by_name(attrs, name); if vec::len(mattrs) > 0u { @@ -252,7 +252,7 @@ fn first_attr_value_str_by_name(attrs: [ast::attribute]/~, +name: str) } fn last_meta_item_by_name( - items: [@ast::meta_item]/~, + items: ~[@ast::meta_item], +name: str ) -> option<@ast::meta_item> { let items = attr::find_meta_items_by_name(items, name); @@ -260,7 +260,7 @@ fn last_meta_item_by_name( } fn last_meta_item_value_str_by_name( - items: [@ast::meta_item]/~, + items: ~[@ast::meta_item], +name: str ) -> option<@str> { alt last_meta_item_by_name(items, name) { @@ -275,9 +275,9 @@ fn last_meta_item_value_str_by_name( } fn last_meta_item_list_by_name( - items: [@ast::meta_item]/~, + items: ~[@ast::meta_item], +name: str -) -> option<[@ast::meta_item]/~> { +) -> option<~[@ast::meta_item]> { alt last_meta_item_by_name(items, name) { some(item) { attr::get_meta_item_list(item) @@ -291,7 +291,7 @@ fn last_meta_item_list_by_name( // FIXME (#607): This needs to sort by meta_item variant in addition to // the item name (See [Fixme-sorting]) -fn sort_meta_items(+items: [@ast::meta_item]/~) -> [@ast::meta_item]/~ { +fn sort_meta_items(+items: ~[@ast::meta_item]) -> ~[@ast::meta_item] { fn lteq(&&ma: @ast::meta_item, &&mb: @ast::meta_item) -> bool { fn key(m: @ast::meta_item) -> ast::ident { alt m.node { @@ -304,16 +304,15 @@ fn sort_meta_items(+items: [@ast::meta_item]/~) -> [@ast::meta_item]/~ { } // This is sort of stupid here, converting to a vec of mutables and back - let v: [mut @ast::meta_item]/~ = vec::to_mut(items); + let v: ~[mut @ast::meta_item] = vec::to_mut(items); std::sort::quick_sort(lteq, v); ret vec::from_mut(v); } -fn remove_meta_items_by_name(items: [@ast::meta_item]/~, name: ast::ident) -> - [@ast::meta_item]/~ { +fn remove_meta_items_by_name(items: ~[@ast::meta_item], name: ast::ident) -> + ~[@ast::meta_item] { - ret vec::filter_map(items, { - |item| + ret vec::filter_map(items, |item| { if get_meta_item_name(item) != name { option::some(/* FIXME (#2543) */ copy item) } else { @@ -322,9 +321,9 @@ fn remove_meta_items_by_name(items: [@ast::meta_item]/~, name: ast::ident) -> }); } -fn find_linkage_attrs(attrs: [ast::attribute]/~) -> [ast::attribute]/~ { - let mut found = []/~; - for find_attrs_by_name(attrs, "link").each {|attr| +fn find_linkage_attrs(attrs: ~[ast::attribute]) -> ~[ast::attribute] { + let mut found = ~[]; + for find_attrs_by_name(attrs, "link").each |attr| { alt attr.node.value.node { ast::meta_list(_, _) { vec::push(found, attr) } _ { #debug("ignoring link attribute that has incorrect type"); } @@ -337,15 +336,15 @@ fn find_linkage_attrs(attrs: [ast::attribute]/~) -> [ast::attribute]/~ { From a list of crate attributes get only the meta_items that impact crate linkage "] -fn find_linkage_metas(attrs: [ast::attribute]/~) -> [@ast::meta_item]/~ { - find_linkage_attrs(attrs).flat_map {|attr| +fn find_linkage_metas(attrs: ~[ast::attribute]) -> ~[@ast::meta_item] { + do find_linkage_attrs(attrs).flat_map |attr| { alt check attr.node.value.node { ast::meta_list(_, items) { /* FIXME (#2543) */ copy items } } } } -fn foreign_abi(attrs: [ast::attribute]/~) -> either<str, ast::foreign_abi> { +fn foreign_abi(attrs: ~[ast::attribute]) -> either<str, ast::foreign_abi> { ret alt attr::first_attr_value_str_by_name(attrs, "abi") { option::none { either::right(ast::foreign_abi_cdecl) @@ -372,9 +371,9 @@ enum inline_attr { } #[doc = "True if something like #[inline] is found in the list of attrs."] -fn find_inline_attr(attrs: [ast::attribute]/~) -> inline_attr { +fn find_inline_attr(attrs: ~[ast::attribute]) -> inline_attr { // TODO---validate the usage of #[inline] and #[inline(always)] - vec::foldl(ia_none, attrs) {|ia,attr| + do vec::foldl(ia_none, attrs) |ia,attr| { alt attr.node.value.node { ast::meta_word(@"inline") { ia_hint } ast::meta_list(@"inline", items) { @@ -391,9 +390,9 @@ fn find_inline_attr(attrs: [ast::attribute]/~) -> inline_attr { fn require_unique_names(diagnostic: span_handler, - metas: [@ast::meta_item]/~) { + metas: ~[@ast::meta_item]) { let map = map::str_hash(); - for metas.each {|meta| + for metas.each |meta| { let name = get_meta_item_name(meta); // FIXME: How do I silence the warnings? --pcw (#2619) diff --git a/src/libsyntax/codemap.rs b/src/libsyntax/codemap.rs index a84b6897556..4c30016fdc8 100644 --- a/src/libsyntax/codemap.rs +++ b/src/libsyntax/codemap.rs @@ -43,7 +43,7 @@ enum file_substr { type filemap = @{name: filename, substr: file_substr, src: @str, - start_pos: file_pos, mut lines: [file_pos]/~}; + start_pos: file_pos, mut lines: ~[file_pos]}; type codemap = @{files: dvec<filemap>}; @@ -57,7 +57,7 @@ fn new_filemap_w_substr(+filename: filename, +substr: file_substr, -> filemap { ret @{name: filename, substr: substr, src: src, start_pos: {ch: start_pos_ch, byte: start_pos_byte}, - mut lines: [{ch: start_pos_ch, byte: start_pos_byte}]/~}; + mut lines: ~[{ch: start_pos_ch, byte: start_pos_byte}]}; } fn new_filemap(+filename: filename, src: @str, @@ -174,7 +174,7 @@ fn span_to_str(sp: span, cm: codemap) -> str { lo.line, lo.col, hi.line, hi.col) } -type file_lines = {file: filemap, lines: [uint]/~}; +type file_lines = {file: filemap, lines: ~[uint]}; fn span_to_filename(sp: span, cm: codemap::codemap) -> filename { let lo = lookup_char_pos(cm, sp.lo); @@ -184,8 +184,8 @@ fn span_to_filename(sp: span, cm: codemap::codemap) -> filename { fn span_to_lines(sp: span, cm: codemap::codemap) -> @file_lines { let lo = lookup_char_pos(cm, sp.lo); let hi = lookup_char_pos(cm, sp.hi); - let mut lines = []/~; - for uint::range(lo.line - 1u, hi.line as uint) {|i| + let mut lines = ~[]; + for uint::range(lo.line - 1u, hi.line as uint) |i| { vec::push(lines, i); }; ret @{file: lo.file, lines: lines}; @@ -224,7 +224,7 @@ fn get_snippet(cm: codemap::codemap, fidx: uint, lo: uint, hi: uint) -> str } fn get_filemap(cm: codemap, filename: str) -> filemap { - for cm.files.each {|fm| if fm.name == filename { ret fm; } } + for cm.files.each |fm| { if fm.name == filename { ret fm; } } //XXjdm the following triggers a mismatched type bug // (or expected function, found _|_) fail; // ("asking for " + filename + " which we don't know about"); diff --git a/src/libsyntax/diagnostic.rs b/src/libsyntax/diagnostic.rs index b8ebb27f51b..4e1d8f824e1 100644 --- a/src/libsyntax/diagnostic.rs +++ b/src/libsyntax/diagnostic.rs @@ -207,7 +207,7 @@ fn highlight_lines(cm: codemap::codemap, sp: span, elided = true; } // Print the offending lines - for display_lines.each {|line| + for display_lines.each |line| { io::stderr().write_str(#fmt["%s:%u ", fm.name, line + 1u]); let s = codemap::get_line(fm, line as int) + "\n"; io::stderr().write_str(s); @@ -249,11 +249,9 @@ fn highlight_lines(cm: codemap::codemap, sp: span, } fn print_macro_backtrace(cm: codemap::codemap, sp: span) { - option::iter (sp.expn_info) {|ei| - let ss = option::map_default(ei.callie.span, @"", { - |span| - @codemap::span_to_str(span, cm) - }); + do option::iter (sp.expn_info) |ei| { + let ss = option::map_default(ei.callie.span, @"", + |span| @codemap::span_to_str(span, cm)); print_diagnostic(*ss, note, #fmt("in expansion of #%s", ei.callie.name)); let ss = codemap::span_to_str(ei.call_site, cm); diff --git a/src/libsyntax/ext/auto_serialize.rs b/src/libsyntax/ext/auto_serialize.rs index b89be203e16..d2d685f8f7d 100644 --- a/src/libsyntax/ext/auto_serialize.rs +++ b/src/libsyntax/ext/auto_serialize.rs @@ -84,13 +84,13 @@ mod syntax { export parse; } -type ser_tps_map = map::hashmap<str, fn@(@ast::expr) -> [@ast::stmt]/~>; +type ser_tps_map = map::hashmap<str, fn@(@ast::expr) -> ~[@ast::stmt]>; type deser_tps_map = map::hashmap<str, fn@() -> @ast::expr>; fn expand(cx: ext_ctxt, span: span, _mitem: ast::meta_item, - in_items: [@ast::item]/~) -> [@ast::item]/~ { + in_items: ~[@ast::item]) -> ~[@ast::item] { fn not_auto_serialize(a: ast::attribute) -> bool { attr::get_attr_name(a) != @"auto_serialize" } @@ -100,15 +100,15 @@ fn expand(cx: ext_ctxt, with *item} } - vec::flat_map(in_items) {|in_item| + do vec::flat_map(in_items) |in_item| { alt in_item.node { ast::item_ty(ty, tps, _) { - vec::append([filter_attrs(in_item)]/~, + vec::append(~[filter_attrs(in_item)], ty_fns(cx, in_item.ident, ty, tps)) } ast::item_enum(variants, tps, _) { - vec::append([filter_attrs(in_item)]/~, + vec::append(~[filter_attrs(in_item)], enum_fns(cx, in_item.ident, in_item.span, variants, tps)) } @@ -117,7 +117,7 @@ fn expand(cx: ext_ctxt, cx.span_err(span, "#[auto_serialize] can only be \ applied to type and enum \ definitions"); - [in_item]/~ + ~[in_item] } } } @@ -129,29 +129,29 @@ impl helpers for ext_ctxt { let head = vec::init(base_path.idents); let tail = vec::last(base_path.idents); self.path(base_path.span, - vec::append(head, [@(helper_name + "_" + *tail)]/~)) + vec::append(head, ~[@(helper_name + "_" + *tail)])) } - fn path(span: span, strs: [ast::ident]/~) -> @ast::path { - @{span: span, global: false, idents: strs, rp: none, types: []/~} + fn path(span: span, strs: ~[ast::ident]) -> @ast::path { + @{span: span, global: false, idents: strs, rp: none, types: ~[]} } - fn path_tps(span: span, strs: [ast::ident]/~, - tps: [@ast::ty]/~) -> @ast::path { + fn path_tps(span: span, strs: ~[ast::ident], + tps: ~[@ast::ty]) -> @ast::path { @{span: span, global: false, idents: strs, rp: none, types: tps} } - fn ty_path(span: span, strs: [ast::ident]/~, - tps: [@ast::ty]/~) -> @ast::ty { + fn ty_path(span: span, strs: ~[ast::ident], + tps: ~[@ast::ty]) -> @ast::ty { @{id: self.next_id(), node: ast::ty_path(self.path_tps(span, strs, tps), self.next_id()), span: span} } fn ty_fn(span: span, - -input_tys: [@ast::ty]/~, + -input_tys: ~[@ast::ty], -output: @ast::ty) -> @ast::ty { - let args = vec::map(input_tys) {|ty| + let args = do vec::map(input_tys) |ty| { {mode: ast::expl(ast::by_ref), ty: ty, ident: @"", @@ -163,7 +163,7 @@ impl helpers for ext_ctxt { output: output, purity: ast::impure_fn, cf: ast::return_val, - constraints: []/~}), + constraints: ~[]}), span: span} } @@ -176,11 +176,11 @@ impl helpers for ext_ctxt { } fn var_ref(span: span, name: ast::ident) -> @ast::expr { - self.expr(span, ast::expr_path(self.path(span, [name]/~))) + self.expr(span, ast::expr_path(self.path(span, ~[name]))) } - fn blk(span: span, stmts: [@ast::stmt]/~) -> ast::blk { - {node: {view_items: []/~, + fn blk(span: span, stmts: ~[@ast::stmt]) -> ast::blk { + {node: {view_items: ~[], stmts: stmts, expr: none, id: self.next_id(), @@ -189,8 +189,8 @@ impl helpers for ext_ctxt { } fn expr_blk(expr: @ast::expr) -> ast::blk { - {node: {view_items: []/~, - stmts: []/~, + {node: {view_items: ~[], + stmts: ~[], expr: some(expr), id: self.next_id(), rules: ast::default_blk}, @@ -198,8 +198,8 @@ impl helpers for ext_ctxt { } fn binder_pat(span: span, nm: ast::ident) -> @ast::pat { - let path = @{span: span, global: false, idents: [nm]/~, - rp: none, types: []/~}; + let path = @{span: span, global: false, idents: ~[nm], + rp: none, types: ~[]}; @{id: self.next_id(), node: ast::pat_ident(path, none), span: span} @@ -210,7 +210,7 @@ impl helpers for ext_ctxt { span: expr.span} } - fn alt_stmt(arms: [ast::arm]/~, + fn alt_stmt(arms: ~[ast::arm], span: span, -v: @ast::expr) -> @ast::stmt { self.stmt( self.expr( @@ -237,12 +237,12 @@ impl helpers for ext_ctxt { fn lambda(blk: ast::blk) -> @ast::expr { let ext_cx = self; let blk_e = self.expr(blk.span, ast::expr_block(blk)); - #ast{ {|| $(blk_e) } } + #ast{ || $(blk_e) } } fn clone_folder() -> fold::ast_fold { fold::make_fold(@{ - new_id: {|_id| self.next_id()} + new_id: |_id| self.next_id() with *fold::default_ast_fold() }) } @@ -272,7 +272,7 @@ impl helpers for ext_ctxt { } let fld = fold::make_fold(@{ - new_span: {|a|repl_sp(a, ast_util::dummy_sp(), span)} + new_span: |a| repl_sp(a, ast_util::dummy_sp(), span) with *fold::default_ast_fold() }); @@ -282,7 +282,7 @@ impl helpers for ext_ctxt { fn ser_path(cx: ext_ctxt, tps: ser_tps_map, path: @ast::path, -s: @ast::expr, -v: @ast::expr) - -> [@ast::stmt]/~ { + -> ~[@ast::stmt] { let ext_cx = cx; // required for #ast{} // We want to take a path like a::b::c<...> and generate a call @@ -294,34 +294,36 @@ fn ser_path(cx: ext_ctxt, tps: ser_tps_map, path: @ast::path, ast::expr_path( cx.helper_path(path, "serialize"))); - let ty_args = vec::map(path.types) {|ty| + let ty_args = do vec::map(path.types) |ty| { let sv_stmts = ser_ty(cx, tps, ty, cx.clone(s), #ast{ __v }); let sv = cx.expr(path.span, ast::expr_block(cx.blk(path.span, sv_stmts))); - cx.at(ty.span, #ast{ {|__v| $(sv)} }) + cx.at(ty.span, #ast{ |__v| $(sv) }) }; - [cx.stmt( + ~[cx.stmt( cx.expr( path.span, - ast::expr_call(callee, vec::append([s, v]/~, ty_args), false)))]/~ + ast::expr_call(callee, vec::append(~[s, v], ty_args), false)))] } fn ser_variant(cx: ext_ctxt, tps: ser_tps_map, - tys: [@ast::ty]/~, + tys: ~[@ast::ty], span: span, -s: @ast::expr, - pfn: fn([@ast::pat]/~) -> ast::pat_, + pfn: fn(~[@ast::pat]) -> ast::pat_, bodyfn: fn(-@ast::expr, ast::blk) -> @ast::expr, argfn: fn(-@ast::expr, uint, ast::blk) -> @ast::expr) -> ast::arm { - let vnames = vec::from_fn(vec::len(tys)) {|i| @#fmt["__v%u", i]}; - let pats = vec::from_fn(vec::len(tys)) {|i| + let vnames = do vec::from_fn(vec::len(tys)) |i| { + @#fmt["__v%u", i] + }; + let pats = do vec::from_fn(vec::len(tys)) |i| { cx.binder_pat(tys[i].span, vnames[i]) }; let pat: @ast::pat = @{id: cx.next_id(), node: pfn(pats), span: span}; - let stmts = vec::from_fn(vec::len(tys)) {|i| + let stmts = do vec::from_fn(vec::len(tys)) |i| { let v = cx.var_ref(span, vnames[i]); let arg_blk = cx.blk( @@ -331,9 +333,9 @@ fn ser_variant(cx: ext_ctxt, }; let body_blk = cx.blk(span, stmts); - let body = cx.blk(span, [cx.stmt(bodyfn(s, body_blk))]/~); + let body = cx.blk(span, ~[cx.stmt(bodyfn(s, body_blk))]); - {pats: [pat]/~, guard: none, body: body} + {pats: ~[pat], guard: none, body: body} } fn ser_lambda(cx: ext_ctxt, tps: ser_tps_map, ty: @ast::ty, @@ -343,43 +345,43 @@ fn ser_lambda(cx: ext_ctxt, tps: ser_tps_map, ty: @ast::ty, fn ser_ty(cx: ext_ctxt, tps: ser_tps_map, ty: @ast::ty, -s: @ast::expr, -v: @ast::expr) - -> [@ast::stmt]/~ { + -> ~[@ast::stmt] { let ext_cx = cx; // required for #ast{} alt ty.node { ast::ty_nil { - [#ast[stmt]{$(s).emit_nil()}]/~ + ~[#ast[stmt]{$(s).emit_nil()}] } ast::ty_bot { cx.span_err( ty.span, #fmt["Cannot serialize bottom type"]); - []/~ + ~[] } ast::ty_box(mt) { let l = ser_lambda(cx, tps, mt.ty, cx.clone(s), #ast{ *$(v) }); - [#ast(stmt){$(s).emit_box($(l));}]/~ + ~[#ast(stmt){$(s).emit_box($(l));}] } ast::ty_uniq(mt) { let l = ser_lambda(cx, tps, mt.ty, cx.clone(s), #ast{ *$(v) }); - [#ast(stmt){$(s).emit_uniq($(l));}]/~ + ~[#ast(stmt){$(s).emit_uniq($(l));}] } ast::ty_ptr(_) | ast::ty_rptr(_, _) { cx.span_err(ty.span, "cannot serialize pointer types"); - []/~ + ~[] } ast::ty_rec(flds) { - let fld_stmts = vec::from_fn(vec::len(flds)) {|fidx| + let fld_stmts = do vec::from_fn(vec::len(flds)) |fidx| { let fld = flds[fidx]; let vf = cx.expr(fld.span, ast::expr_field(cx.clone(v), fld.node.ident, - []/~)); + ~[])); let s = cx.clone(s); let f = cx.lit_str(fld.span, fld.node.ident); let i = cx.lit_uint(fld.span, fidx); @@ -387,12 +389,12 @@ fn ser_ty(cx: ext_ctxt, tps: ser_tps_map, #ast(stmt){$(s).emit_rec_field($(f), $(i), $(l));} }; let fld_lambda = cx.lambda(cx.blk(ty.span, fld_stmts)); - [#ast(stmt){$(s).emit_rec($(fld_lambda));}]/~ + ~[#ast(stmt){$(s).emit_rec($(fld_lambda));}] } ast::ty_fn(_, _) { cx.span_err(ty.span, "cannot serialize function types"); - []/~ + ~[] } ast::ty_tup(tys) { @@ -404,29 +406,29 @@ fn ser_ty(cx: ext_ctxt, tps: ser_tps_map, // } // }; - let arms = [ + let arms = ~[ ser_variant( cx, tps, tys, ty.span, s, // Generate pattern (v1, v2, v3) - {|pats| ast::pat_tup(pats)}, + |pats| ast::pat_tup(pats), // Generate body s.emit_tup(3, {|| blk }) - {|-s, blk| + |-s, blk| { let sz = cx.lit_uint(ty.span, vec::len(tys)); let body = cx.lambda(blk); #ast{ $(s).emit_tup($(sz), $(body)) } }, // Generate s.emit_tup_elt(i, {|| blk }) - {|-s, i, blk| + |-s, i, blk| { let idx = cx.lit_uint(ty.span, i); let body = cx.lambda(blk); #ast{ $(s).emit_tup_elt($(idx), $(body)) } }) - ]/~; - [cx.alt_stmt(arms, ty.span, v)]/~ + ]; + ~[cx.alt_stmt(arms, ty.span, v)] } ast::ty_path(path, _) { @@ -449,12 +451,12 @@ fn ser_ty(cx: ext_ctxt, tps: ser_tps_map, ast::ty_mac(_) { cx.span_err(ty.span, "cannot serialize macro types"); - []/~ + ~[] } ast::ty_infer { cx.span_err(ty.span, "cannot serialize inferred types"); - []/~ + ~[] } ast::ty_vstore(@{node: ast::ty_vec(mt),_}, ast::vstore_uniq) | @@ -470,9 +472,9 @@ fn ser_ty(cx: ext_ctxt, tps: ser_tps_map, cx.clone(s), cx.at(ty.span, #ast{ __e }))))); - [#ast(stmt){ - std::serialization::emit_from_vec($(s), $(v), {|__e| $(ser_e) }) - }]/~ + ~[#ast(stmt){ + std::serialization::emit_from_vec($(s), $(v), |__e| $(ser_e)) + }] } ast::ty_vstore(_, _) { @@ -483,61 +485,61 @@ fn ser_ty(cx: ext_ctxt, tps: ser_tps_map, } fn mk_ser_fn(cx: ext_ctxt, span: span, name: ast::ident, - tps: [ast::ty_param]/~, + tps: ~[ast::ty_param], f: fn(ext_ctxt, ser_tps_map, - -@ast::expr, -@ast::expr) -> [@ast::stmt]/~) + -@ast::expr, -@ast::expr) -> ~[@ast::stmt]) -> @ast::item { let ext_cx = cx; // required for #ast - let tp_types = vec::map(tps, {|tp| cx.ty_path(span, [tp.ident]/~, []/~)}); - let v_ty = cx.ty_path(span, [name]/~, tp_types); + let tp_types = vec::map(tps, |tp| cx.ty_path(span, ~[tp.ident], ~[])); + let v_ty = cx.ty_path(span, ~[name], tp_types); let tp_inputs = - vec::map(tps, {|tp| + vec::map(tps, |tp| {mode: ast::expl(ast::by_ref), ty: cx.ty_fn(span, - [cx.ty_path(span, [tp.ident]/~, []/~)]/~, + ~[cx.ty_path(span, ~[tp.ident], ~[])], cx.ty_nil(span)), ident: @("__s" + *tp.ident), - id: cx.next_id()}}); + id: cx.next_id()}); #debug["tp_inputs = %?", tp_inputs]; - let ser_inputs: [ast::arg]/~ = - vec::append([{mode: ast::expl(ast::by_ref), - ty: cx.ty_path(span, [@"__S"]/~, []/~), + let ser_inputs: ~[ast::arg] = + vec::append(~[{mode: ast::expl(ast::by_ref), + ty: cx.ty_path(span, ~[@"__S"], ~[]), ident: @"__s", id: cx.next_id()}, {mode: ast::expl(ast::by_ref), ty: v_ty, ident: @"__v", - id: cx.next_id()}]/~, + id: cx.next_id()}], tp_inputs); let tps_map = map::str_hash(); - vec::iter2(tps, tp_inputs) {|tp, arg| + do vec::iter2(tps, tp_inputs) |tp, arg| { let arg_ident = arg.ident; tps_map.insert( *tp.ident, - fn@(v: @ast::expr) -> [@ast::stmt]/~ { + fn@(v: @ast::expr) -> ~[@ast::stmt] { let f = cx.var_ref(span, arg_ident); #debug["serializing type arg %s", *arg_ident]; - [#ast(stmt){$(f)($(v));}]/~ + ~[#ast(stmt){$(f)($(v));}] }); } - let ser_bnds = @[ + let ser_bnds = @~[ ast::bound_iface(cx.ty_path(span, - [@"std", @"serialization", - @"serializer"]/~, - []/~))]/~; + ~[@"std", @"serialization", + @"serializer"], + ~[]))]; - let ser_tps: [ast::ty_param]/~ = - vec::append([{ident: @"__S", + let ser_tps: ~[ast::ty_param] = + vec::append(~[{ident: @"__S", id: cx.next_id(), - bounds: ser_bnds}]/~, - vec::map(tps) {|tp| cx.clone_ty_param(tp) }); + bounds: ser_bnds}], + vec::map(tps, |tp| cx.clone_ty_param(tp))); let ser_output: @ast::ty = @{id: cx.next_id(), node: ast::ty_nil, @@ -547,13 +549,13 @@ fn mk_ser_fn(cx: ext_ctxt, span: span, name: ast::ident, f(cx, tps_map, #ast{ __s }, #ast{ __v })); @{ident: @("serialize_" + *name), - attrs: []/~, + attrs: ~[], id: cx.next_id(), node: ast::item_fn({inputs: ser_inputs, output: ser_output, purity: ast::impure_fn, cf: ast::return_val, - constraints: []/~}, + constraints: ~[]}, ser_tps, ser_blk), vis: ast::public, @@ -573,12 +575,12 @@ fn deser_path(cx: ext_ctxt, tps: deser_tps_map, path: @ast::path, ast::expr_path( cx.helper_path(path, "deserialize"))); - let ty_args = vec::map(path.types) {|ty| + let ty_args = do vec::map(path.types) |ty| { let dv_expr = deser_ty(cx, tps, ty, cx.clone(d)); cx.lambda(cx.expr_blk(dv_expr)) }; - cx.expr(path.span, ast::expr_call(callee, vec::append([d]/~, ty_args), + cx.expr(path.span, ast::expr_call(callee, vec::append(~[d], ty_args), false)) } @@ -616,7 +618,7 @@ fn deser_ty(cx: ext_ctxt, tps: deser_tps_map, } ast::ty_rec(flds) { - let fields = vec::from_fn(vec::len(flds)) {|fidx| + let fields = do vec::from_fn(vec::len(flds)) |fidx| { let fld = flds[fidx]; let d = cx.clone(d); let f = cx.lit_str(fld.span, fld.node.ident); @@ -645,7 +647,7 @@ fn deser_ty(cx: ext_ctxt, tps: deser_tps_map, // d.read_tup_elt(2u, {||...})) // } - let arg_exprs = vec::from_fn(vec::len(tys)) {|i| + let arg_exprs = do vec::from_fn(vec::len(tys)) |i| { let idx = cx.lit_uint(ty.span, i); let body = deser_lambda(cx, tps, tys[i], cx.clone(d)); #ast{ $(d).read_tup_elt($(idx), $(body)) } @@ -696,34 +698,34 @@ fn deser_ty(cx: ext_ctxt, tps: deser_tps_map, } fn mk_deser_fn(cx: ext_ctxt, span: span, - name: ast::ident, tps: [ast::ty_param]/~, + name: ast::ident, tps: ~[ast::ty_param], f: fn(ext_ctxt, deser_tps_map, -@ast::expr) -> @ast::expr) -> @ast::item { let ext_cx = cx; // required for #ast - let tp_types = vec::map(tps, {|tp| cx.ty_path(span, [tp.ident]/~, []/~)}); - let v_ty = cx.ty_path(span, [name]/~, tp_types); + let tp_types = vec::map(tps, |tp| cx.ty_path(span, ~[tp.ident], ~[])); + let v_ty = cx.ty_path(span, ~[name], tp_types); let tp_inputs = - vec::map(tps, {|tp| + vec::map(tps, |tp| {mode: ast::expl(ast::by_ref), ty: cx.ty_fn(span, - []/~, - cx.ty_path(span, [tp.ident]/~, []/~)), + ~[], + cx.ty_path(span, ~[tp.ident], ~[])), ident: @("__d" + *tp.ident), - id: cx.next_id()}}); + id: cx.next_id()}); #debug["tp_inputs = %?", tp_inputs]; - let deser_inputs: [ast::arg]/~ = - vec::append([{mode: ast::expl(ast::by_ref), - ty: cx.ty_path(span, [@"__D"]/~, []/~), + let deser_inputs: ~[ast::arg] = + vec::append(~[{mode: ast::expl(ast::by_ref), + ty: cx.ty_path(span, ~[@"__D"], ~[]), ident: @"__d", - id: cx.next_id()}]/~, + id: cx.next_id()}], tp_inputs); let tps_map = map::str_hash(); - vec::iter2(tps, tp_inputs) {|tp, arg| + do vec::iter2(tps, tp_inputs) |tp, arg| { let arg_ident = arg.ident; tps_map.insert( *tp.ident, @@ -733,33 +735,33 @@ fn mk_deser_fn(cx: ext_ctxt, span: span, }); } - let deser_bnds = @[ + let deser_bnds = @~[ ast::bound_iface(cx.ty_path( span, - [@"std", @"serialization", @"deserializer"]/~, - []/~))]/~; + ~[@"std", @"serialization", @"deserializer"], + ~[]))]; - let deser_tps: [ast::ty_param]/~ = - vec::append([{ident: @"__D", + let deser_tps: ~[ast::ty_param] = + vec::append(~[{ident: @"__D", id: cx.next_id(), - bounds: deser_bnds}]/~, - vec::map(tps) {|tp| + bounds: deser_bnds}], + vec::map(tps, |tp| { let cloned = cx.clone_ty_param(tp); {bounds: @(vec::append(*cloned.bounds, - [ast::bound_copy]/~)) + ~[ast::bound_copy])) with cloned} - }); + })); let deser_blk = cx.expr_blk(f(cx, tps_map, #ast(expr){__d})); @{ident: @("deserialize_" + *name), - attrs: []/~, + attrs: ~[], id: cx.next_id(), node: ast::item_fn({inputs: deser_inputs, output: v_ty, purity: ast::impure_fn, cf: ast::return_val, - constraints: []/~}, + constraints: ~[]}, deser_tps, deser_blk), vis: ast::public, @@ -767,41 +769,41 @@ fn mk_deser_fn(cx: ext_ctxt, span: span, } fn ty_fns(cx: ext_ctxt, name: ast::ident, - ty: @ast::ty, tps: [ast::ty_param]/~) - -> [@ast::item]/~ { + ty: @ast::ty, tps: ~[ast::ty_param]) + -> ~[@ast::item] { let span = ty.span; - [ - mk_ser_fn(cx, span, name, tps, {|a,b,c,d|ser_ty(a, b, ty, c, d)}), - mk_deser_fn(cx, span, name, tps, {|a,b,c|deser_ty(a, b, ty, c)}) - ]/~ + ~[ + mk_ser_fn(cx, span, name, tps, |a,b,c,d| ser_ty(a, b, ty, c, d)), + mk_deser_fn(cx, span, name, tps, |a,b,c| deser_ty(a, b, ty, c)) + ] } fn ser_enum(cx: ext_ctxt, tps: ser_tps_map, e_name: ast::ident, - e_span: span, variants: [ast::variant]/~, - -s: @ast::expr, -v: @ast::expr) -> [@ast::stmt]/~ { + e_span: span, variants: ~[ast::variant], + -s: @ast::expr, -v: @ast::expr) -> ~[@ast::stmt] { let ext_cx = cx; - let arms = vec::from_fn(vec::len(variants)) {|vidx| + let arms = do vec::from_fn(vec::len(variants)) |vidx| { let variant = variants[vidx]; let v_span = variant.span; let v_name = variant.node.name; - let variant_tys = vec::map(variant.node.args) {|a| a.ty }; + let variant_tys = vec::map(variant.node.args, |a| a.ty); ser_variant( cx, tps, variant_tys, v_span, cx.clone(s), // Generate pattern var(v1, v2, v3) - {|pats| + |pats| { if vec::is_empty(pats) { - ast::pat_ident(cx.path(v_span, [v_name]/~), none) + ast::pat_ident(cx.path(v_span, ~[v_name]), none) } else { - ast::pat_enum(cx.path(v_span, [v_name]/~), some(pats)) + ast::pat_enum(cx.path(v_span, ~[v_name]), some(pats)) } }, // Generate body s.emit_enum_variant("foo", 0u, // 3u, {|| blk }) - {|-s, blk| + |-s, blk| { let v_name = cx.lit_str(v_span, v_name); let v_id = cx.lit_uint(v_span, vidx); let sz = cx.lit_uint(v_span, vec::len(variant_tys)); @@ -813,7 +815,7 @@ fn ser_enum(cx: ext_ctxt, tps: ser_tps_map, e_name: ast::ident, }, // Generate s.emit_enum_variant_arg(i, {|| blk }) - {|-s, i, blk| + |-s, i, blk| { let idx = cx.lit_uint(v_span, i); let body = cx.lambda(blk); #ast[expr]{ @@ -821,22 +823,22 @@ fn ser_enum(cx: ext_ctxt, tps: ser_tps_map, e_name: ast::ident, } }) }; - let lam = cx.lambda(cx.blk(e_span, [cx.alt_stmt(arms, e_span, v)]/~)); + let lam = cx.lambda(cx.blk(e_span, ~[cx.alt_stmt(arms, e_span, v)])); let e_name = cx.lit_str(e_span, e_name); - [#ast(stmt){ $(s).emit_enum($(e_name), $(lam)) }]/~ + ~[#ast(stmt){ $(s).emit_enum($(e_name), $(lam)) }] } fn deser_enum(cx: ext_ctxt, tps: deser_tps_map, e_name: ast::ident, - e_span: span, variants: [ast::variant]/~, + e_span: span, variants: ~[ast::variant], -d: @ast::expr) -> @ast::expr { let ext_cx = cx; - let arms: [ast::arm]/~ = vec::from_fn(vec::len(variants)) {|vidx| + let arms: ~[ast::arm] = do vec::from_fn(vec::len(variants)) |vidx| { let variant = variants[vidx]; let v_span = variant.span; let v_name = variant.node.name; - let tys = vec::map(variant.node.args) {|a| a.ty }; + let tys = vec::map(variant.node.args, |a| a.ty); - let arg_exprs = vec::from_fn(vec::len(tys)) {|i| + let arg_exprs = do vec::from_fn(vec::len(tys)) |i| { let idx = cx.lit_uint(v_span, i); let body = deser_lambda(cx, tps, tys[i], cx.clone(d)); #ast{ $(d).read_enum_variant_arg($(idx), $(body)) } @@ -853,9 +855,9 @@ fn deser_enum(cx: ext_ctxt, tps: deser_tps_map, e_name: ast::ident, } }; - {pats: [@{id: cx.next_id(), + {pats: ~[@{id: cx.next_id(), node: ast::pat_lit(cx.lit_uint(v_span, vidx)), - span: v_span}]/~, + span: v_span}], guard: none, body: cx.expr_blk(body)} }; @@ -864,19 +866,19 @@ fn deser_enum(cx: ext_ctxt, tps: deser_tps_map, e_name: ast::ident, let e_name = cx.lit_str(e_span, e_name); let alt_expr = cx.expr(e_span, ast::expr_alt(#ast{__i}, arms, ast::alt_check)); - let var_lambda = #ast{ {|__i| $(alt_expr)} }; + let var_lambda = #ast{ |__i| $(alt_expr) }; let read_var = #ast{ $(cx.clone(d)).read_enum_variant($(var_lambda)) }; let read_lambda = cx.lambda(cx.expr_blk(read_var)); #ast{ $(d).read_enum($(e_name), $(read_lambda)) } } fn enum_fns(cx: ext_ctxt, e_name: ast::ident, e_span: span, - variants: [ast::variant]/~, tps: [ast::ty_param]/~) - -> [@ast::item]/~ { - [ + variants: ~[ast::variant], tps: ~[ast::ty_param]) + -> ~[@ast::item] { + ~[ mk_ser_fn(cx, e_span, e_name, tps, - {|a,b,c,d|ser_enum(a, b, e_name, e_span, variants, c, d)}), + |a,b,c,d| ser_enum(a, b, e_name, e_span, variants, c, d)), mk_deser_fn(cx, e_span, e_name, tps, - {|a,b,c|deser_enum(a, b, e_name, e_span, variants, c)}) - ]/~ + |a,b,c| deser_enum(a, b, e_name, e_span, variants, c)) + ] } diff --git a/src/libsyntax/ext/base.rs b/src/libsyntax/ext/base.rs index 96a1efdfe7a..28e5c2c5f17 100644 --- a/src/libsyntax/ext/base.rs +++ b/src/libsyntax/ext/base.rs @@ -12,7 +12,7 @@ type macro_def = {ident: ast::ident, ext: syntax_extension}; type macro_definer = fn@(ext_ctxt, span, ast::mac_arg, ast::mac_body) -> macro_def; type item_decorator = - fn@(ext_ctxt, span, ast::meta_item, [@ast::item]/~) -> [@ast::item]/~; + fn@(ext_ctxt, span, ast::meta_item, ~[@ast::item]) -> ~[@ast::item]; type syntax_expander_tt = {expander: syntax_expander_tt_, span: option<span>}; type syntax_expander_tt_ = fn@(ext_ctxt, span, ast::token_tree) -> @ast::expr; @@ -72,7 +72,7 @@ iface ext_ctxt { fn backtrace() -> expn_info; fn mod_push(mod_name: ast::ident); fn mod_pop(); - fn mod_path() -> [ast::ident]/~; + fn mod_path() -> ~[ast::ident]; fn bt_push(ei: codemap::expn_info_); fn bt_pop(); fn span_fatal(sp: span, msg: str) -> !; @@ -88,7 +88,7 @@ fn mk_ctxt(parse_sess: parse::parse_sess, type ctxt_repr = {parse_sess: parse::parse_sess, cfg: ast::crate_cfg, mut backtrace: expn_info, - mut mod_path: [ast::ident]/~}; + mut mod_path: ~[ast::ident]}; impl of ext_ctxt for ctxt_repr { fn codemap() -> codemap { self.parse_sess.cm } fn parse_sess() -> parse::parse_sess { self.parse_sess } @@ -97,7 +97,7 @@ fn mk_ctxt(parse_sess: parse::parse_sess, fn backtrace() -> expn_info { self.backtrace } fn mod_push(i: ast::ident) { vec::push(self.mod_path, i); } fn mod_pop() { vec::pop(self.mod_path); } - fn mod_path() -> [ast::ident]/~ { ret self.mod_path; } + fn mod_path() -> ~[ast::ident] { ret self.mod_path; } fn bt_push(ei: codemap::expn_info_) { alt ei { expanded_from({call_site: cs, callie: callie}) { @@ -145,7 +145,7 @@ fn mk_ctxt(parse_sess: parse::parse_sess, parse_sess: parse_sess, cfg: cfg, mut backtrace: none, - mut mod_path: []/~ + mut mod_path: ~[] }; ret imp as ext_ctxt } @@ -173,24 +173,13 @@ fn expr_to_ident(cx: ext_ctxt, expr: @ast::expr, error: str) -> ast::ident { } } -fn make_new_lit(cx: ext_ctxt, sp: codemap::span, lit: ast::lit_) -> - @ast::expr { - let sp_lit = @{node: lit, span: sp}; - ret @{id: cx.next_id(), node: ast::expr_lit(sp_lit), span: sp}; -} - -fn make_new_expr(cx: ext_ctxt, sp: codemap::span, expr: ast::expr_) -> - @ast::expr { - ret @{id: cx.next_id(), node: expr, span: sp}; -} - fn get_mac_args_no_max(cx: ext_ctxt, sp: span, arg: ast::mac_arg, - min: uint, name: str) -> [@ast::expr]/~ { + min: uint, name: str) -> ~[@ast::expr] { ret get_mac_args(cx, sp, arg, min, none, name); } fn get_mac_args(cx: ext_ctxt, sp: span, arg: ast::mac_arg, - min: uint, max: option<uint>, name: str) -> [@ast::expr]/~ { + min: uint, max: option<uint>, name: str) -> ~[@ast::expr] { alt arg { some(expr) { alt expr.node { diff --git a/src/libsyntax/ext/build.rs b/src/libsyntax/ext/build.rs index 63a4d3d4043..872d1f5eff6 100644 --- a/src/libsyntax/ext/build.rs +++ b/src/libsyntax/ext/build.rs @@ -1,6 +1,11 @@ import codemap::span; import base::ext_ctxt; +fn mk_expr(cx: ext_ctxt, sp: codemap::span, expr: ast::expr_) -> + @ast::expr { + ret @{id: cx.next_id(), node: expr, span: sp}; +} + fn mk_lit(cx: ext_ctxt, sp: span, lit: ast::lit_) -> @ast::expr { let sp_lit = @{node: lit, span: sp}; ret @{id: cx.next_id(), node: ast::expr_lit(sp_lit), span: sp}; @@ -21,67 +26,62 @@ fn mk_binary(cx: ext_ctxt, sp: span, op: ast::binop, lhs: @ast::expr, rhs: @ast::expr) -> @ast::expr { cx.next_id(); // see ast_util::op_expr_callee_id - let binexpr = ast::expr_binary(op, lhs, rhs); - ret @{id: cx.next_id(), node: binexpr, span: sp}; + mk_expr(cx, sp, ast::expr_binary(op, lhs, rhs)) } fn mk_unary(cx: ext_ctxt, sp: span, op: ast::unop, e: @ast::expr) -> @ast::expr { cx.next_id(); // see ast_util::op_expr_callee_id - let expr = ast::expr_unary(op, e); - ret @{id: cx.next_id(), node: expr, span: sp}; + mk_expr(cx, sp, ast::expr_unary(op, e)) } -fn mk_path(cx: ext_ctxt, sp: span, idents: [ast::ident]/~) -> +fn mk_path(cx: ext_ctxt, sp: span, idents: ~[ast::ident]) -> @ast::expr { let path = @{span: sp, global: false, idents: idents, - rp: none, types: []/~}; + rp: none, types: ~[]}; let pathexpr = ast::expr_path(path); - ret @{id: cx.next_id(), node: pathexpr, span: sp}; + mk_expr(cx, sp, pathexpr) } fn mk_access_(cx: ext_ctxt, sp: span, p: @ast::expr, m: ast::ident) -> @ast::expr { - let expr = ast::expr_field(p, m, []/~); - ret @{id: cx.next_id(), node: expr, span: sp}; + mk_expr(cx, sp, ast::expr_field(p, m, ~[])) } -fn mk_access(cx: ext_ctxt, sp: span, p: [ast::ident]/~, m: ast::ident) +fn mk_access(cx: ext_ctxt, sp: span, p: ~[ast::ident], m: ast::ident) -> @ast::expr { let pathexpr = mk_path(cx, sp, p); ret mk_access_(cx, sp, pathexpr, m); } fn mk_call_(cx: ext_ctxt, sp: span, fn_expr: @ast::expr, - args: [@ast::expr]/~) -> @ast::expr { - let callexpr = ast::expr_call(fn_expr, args, false); - ret @{id: cx.next_id(), node: callexpr, span: sp}; + args: ~[@ast::expr]) -> @ast::expr { + mk_expr(cx, sp, ast::expr_call(fn_expr, args, false)) } -fn mk_call(cx: ext_ctxt, sp: span, fn_path: [ast::ident]/~, - args: [@ast::expr]/~) -> @ast::expr { +fn mk_call(cx: ext_ctxt, sp: span, fn_path: ~[ast::ident], + args: ~[@ast::expr]) -> @ast::expr { let pathexpr = mk_path(cx, sp, fn_path); ret mk_call_(cx, sp, pathexpr, args); } // e = expr, t = type -fn mk_vec_e(cx: ext_ctxt, sp: span, exprs: [@ast::expr]/~) -> +fn mk_base_vec_e(cx: ext_ctxt, sp: span, exprs: ~[@ast::expr]) -> @ast::expr { let vecexpr = ast::expr_vec(exprs, ast::m_imm); ret @{id: cx.next_id(), node: vecexpr, span: sp}; } fn mk_vstore_e(cx: ext_ctxt, sp: span, expr: @ast::expr, vst: ast::vstore) -> @ast::expr { - let vstoreexpr = ast::expr_vstore(expr, vst); - ret @{id: cx.next_id(), node: vstoreexpr, span: sp}; + mk_expr(cx, sp, ast::expr_vstore(expr, vst)) } -fn mk_uniq_vec_e(cx: ext_ctxt, sp: span, exprs: [@ast::expr]/~) -> +fn mk_uniq_vec_e(cx: ext_ctxt, sp: span, exprs: ~[@ast::expr]) -> @ast::expr { - mk_vstore_e(cx, sp, mk_vec_e(cx, sp, exprs), ast::vstore_uniq) + mk_vstore_e(cx, sp, mk_base_vec_e(cx, sp, exprs), ast::vstore_uniq) } -fn mk_fixed_vec_e(cx: ext_ctxt, sp: span, exprs: [@ast::expr]/~) -> +fn mk_fixed_vec_e(cx: ext_ctxt, sp: span, exprs: ~[@ast::expr]) -> @ast::expr { - mk_vstore_e(cx, sp, mk_vec_e(cx, sp, exprs), ast::vstore_fixed(none)) + mk_vstore_e(cx, sp, mk_base_vec_e(cx, sp, exprs), ast::vstore_fixed(none)) } fn mk_rec_e(cx: ext_ctxt, sp: span, - fields: [{ident: ast::ident, ex: @ast::expr}]/~) -> + fields: ~[{ident: ast::ident, ex: @ast::expr}]) -> @ast::expr { - let mut astfields: [ast::field]/~ = []/~; - for fields.each {|field| + let mut astfields: ~[ast::field] = ~[]; + for fields.each |field| { let ident = field.ident; let val = field.ex; let astfield = @@ -89,6 +89,6 @@ fn mk_rec_e(cx: ext_ctxt, sp: span, vec::push(astfields, astfield); } let recexpr = ast::expr_rec(astfields, option::none::<@ast::expr>); - ret @{id: cx.next_id(), node: recexpr, span: sp}; + mk_expr(cx, sp, recexpr) } diff --git a/src/libsyntax/ext/concat_idents.rs b/src/libsyntax/ext/concat_idents.rs index 9f445218007..a678304725d 100644 --- a/src/libsyntax/ext/concat_idents.rs +++ b/src/libsyntax/ext/concat_idents.rs @@ -4,12 +4,12 @@ fn expand_syntax_ext(cx: ext_ctxt, sp: codemap::span, arg: ast::mac_arg, _body: ast::mac_body) -> @ast::expr { let args = get_mac_args_no_max(cx,sp,arg,1u,"concat_idents"); let mut res = ""; - for args.each {|e| + for args.each |e| { res += *expr_to_ident(cx, e, "expected an ident"); } ret @{id: cx.next_id(), - node: ast::expr_path(@{span: sp, global: false, idents: [@res]/~, - rp: none, types: []/~}), + node: ast::expr_path(@{span: sp, global: false, idents: ~[@res], + rp: none, types: ~[]}), span: sp}; } diff --git a/src/libsyntax/ext/earley_parser.rs b/src/libsyntax/ext/earley_parser.rs index 223cca25694..a6e47e0941c 100644 --- a/src/libsyntax/ext/earley_parser.rs +++ b/src/libsyntax/ext/earley_parser.rs @@ -32,11 +32,11 @@ fn is_some(&&mpu: matcher_pos_up) -> bool { } type matcher_pos = ~{ - elts: [ast::matcher]/~, // maybe should be /&? Need to understand regions. + elts: ~[ast::matcher], // maybe should be /&? Need to understand regions. sep: option<token>, mut idx: uint, mut up: matcher_pos_up, // mutable for swapping only - matches: [dvec<@arb_depth>]/~ + matches: ~[dvec<@arb_depth>] }; fn copy_up(&& mpu: matcher_pos_up) -> matcher_pos { @@ -46,8 +46,8 @@ fn copy_up(&& mpu: matcher_pos_up) -> matcher_pos { } } -fn count_names(ms: [matcher]/&) -> uint { - vec::foldl(0u, ms, {|ct, m| +fn count_names(ms: &[matcher]) -> uint { + vec::foldl(0u, ms, |ct, m| { ct + alt m.node { mtc_tok(_) { 0u } mtc_rep(more_ms, _, _) { count_names(more_ms) } @@ -55,26 +55,26 @@ fn count_names(ms: [matcher]/&) -> uint { }}) } -fn new_matcher_pos(ms: [matcher]/~, sep: option<token>) -> matcher_pos { +fn new_matcher_pos(ms: ~[matcher], sep: option<token>) -> matcher_pos { ~{elts: ms, sep: sep, mut idx: 0u, mut up: matcher_pos_up(none), - matches: copy vec::from_fn(count_names(ms), {|_i| dvec::dvec()}) } + matches: copy vec::from_fn(count_names(ms), |_i| dvec::dvec()) } } /* logically, an arb_depth should contain only one kind of nonterminal */ -enum arb_depth { leaf(whole_nt), seq([@arb_depth]/~) } +enum arb_depth { leaf(whole_nt), seq(~[@arb_depth]) } type earley_item = matcher_pos; -fn parse(sess: parse_sess, cfg: ast::crate_cfg, rdr: reader, ms: [matcher]/~) - -> [@arb_depth]/~ { - let mut cur_eis = []/~; +fn parse(sess: parse_sess, cfg: ast::crate_cfg, rdr: reader, ms: ~[matcher]) + -> ~[@arb_depth] { + let mut cur_eis = ~[]; vec::push(cur_eis, new_matcher_pos(ms, none)); loop { - let mut bb_eis = []/~; // black-box parsed by parser.rs - let mut next_eis = []/~; // or proceed normally - let mut eof_eis = []/~; + let mut bb_eis = ~[]; // black-box parsed by parser.rs + let mut next_eis = ~[]; // or proceed normally + let mut eof_eis = ~[]; let {tok: tok, sp: _} = rdr.peek(); @@ -106,7 +106,7 @@ fn parse(sess: parse_sess, cfg: ast::crate_cfg, rdr: reader, ms: [matcher]/~) // I bet this is a perf problem: we're preemptively // doing a lot of array work that will get thrown away // most of the time. - for ei.matches.eachi() { |idx, elt| + for ei.matches.eachi() |idx, elt| { new_pos.matches[idx].push(@seq(elt.get())); } @@ -145,7 +145,7 @@ fn parse(sess: parse_sess, cfg: ast::crate_cfg, rdr: reader, ms: [matcher]/~) } let matches = vec::map(ei.matches, // fresh, same size: - {|_m| dvec::<@arb_depth>()}); + |_m| dvec::<@arb_depth>()); let ei_t <- ei; vec::push(cur_eis, ~{ elts: matchers, sep: sep, mut idx: 0u, @@ -165,7 +165,7 @@ fn parse(sess: parse_sess, cfg: ast::crate_cfg, rdr: reader, ms: [matcher]/~) /* error messages here could be improved with links to orig. rules */ if tok == EOF { if eof_eis.len() == 1u { - let ret_val = vec::map(eof_eis[0u].matches, {|dv| dv.pop()}); + let ret_val = vec::map(eof_eis[0u].matches, |dv| dv.pop()); ret ret_val; /* success */ } else if eof_eis.len() > 1u { rdr.fatal("Ambiguity: multiple successful parses"); @@ -175,7 +175,7 @@ fn parse(sess: parse_sess, cfg: ast::crate_cfg, rdr: reader, ms: [matcher]/~) } else { if (bb_eis.len() > 0u && next_eis.len() > 0u) || bb_eis.len() > 1u { - let nts = str::connect(vec::map(bb_eis, {|ei| + let nts = str::connect(vec::map(bb_eis, |ei| { alt ei.elts[ei.idx].node { mtc_bb(_,name,_) { *name } _ { fail; } } }), " or "); @@ -218,12 +218,12 @@ fn parse(sess: parse_sess, cfg: ast::crate_cfg, rdr: reader, ms: [matcher]/~) fn parse_nt(p: parser, name: str) -> whole_nt { alt name { - "item" { alt p.parse_item([]/~, ast::public) { + "item" { alt p.parse_item(~[], ast::public) { some(i) { token::w_item(i) } none { p.fatal("expected an item keyword") } }} "block" { token::w_block(p.parse_block()) } - "stmt" { token::w_stmt(p.parse_stmt([]/~)) } + "stmt" { token::w_stmt(p.parse_stmt(~[])) } "pat" { token::w_pat(p.parse_pat()) } "expr" { token::w_expr(p.parse_expr()) } "ty" { token::w_ty(p.parse_ty(false /* no need to disambiguate*/)) } diff --git a/src/libsyntax/ext/env.rs b/src/libsyntax/ext/env.rs index 26a906f8cf0..b8aa69b7735 100644 --- a/src/libsyntax/ext/env.rs +++ b/src/libsyntax/ext/env.rs @@ -5,6 +5,7 @@ * interface. */ import base::*; +import build::mk_lit; export expand_syntax_ext; fn expand_syntax_ext(cx: ext_ctxt, sp: codemap::span, arg: ast::mac_arg, @@ -22,7 +23,7 @@ fn expand_syntax_ext(cx: ext_ctxt, sp: codemap::span, arg: ast::mac_arg, } fn make_new_str(cx: ext_ctxt, sp: codemap::span, +s: str) -> @ast::expr { - ret make_new_lit(cx, sp, ast::lit_str(@s)); + ret mk_lit(cx, sp, ast::lit_str(@s)); } // // Local Variables: diff --git a/src/libsyntax/ext/expand.rs b/src/libsyntax/ext/expand.rs index e9c6e6e122c..a037d87166a 100644 --- a/src/libsyntax/ext/expand.rs +++ b/src/libsyntax/ext/expand.rs @@ -45,7 +45,7 @@ fn expand_expr(exts: hashmap<str, syntax_extension>, cx: ext_ctxt, some(macro_defining(ext)) { let named_extension = ext(cx, pth.span, args, body); exts.insert(*named_extension.ident, named_extension.ext); - (ast::expr_rec([]/~, none), s) + (ast::expr_rec(~[], none), s) } some(normal_tt(_)) { cx.span_fatal(pth.span, @@ -100,8 +100,8 @@ fn expand_mod_items(exts: hashmap<str, syntax_extension>, cx: ext_ctxt, // For each item, look through the attributes. If any of them are // decorated with "item decorators", then use that function to transform // the item into a new set of items. - let new_items = vec::flat_map(module.items) {|item| - vec::foldr(item.attrs, [item]/~) {|attr, items| + let new_items = do vec::flat_map(module.items) |item| { + do vec::foldr(item.attrs, ~[item]) |attr, items| { let mname = alt attr.node.value.node { ast::meta_word(n) { n } ast::meta_name_value(n, _) { n } @@ -164,10 +164,10 @@ fn expand_crate(parse_sess: parse::parse_sess, let afp = default_ast_fold(); let cx: ext_ctxt = mk_ctxt(parse_sess, cfg); let f_pre = - @{fold_expr: {|a,b,c|expand_expr(exts, cx, a, b, c, afp.fold_expr)}, - fold_mod: {|a,b|expand_mod_items(exts, cx, a, b, afp.fold_mod)}, - fold_item: {|a,b|expand_item(cx, a, b, afp.fold_item)}, - new_span: {|a|new_span(cx, a)} + @{fold_expr: |a,b,c| expand_expr(exts, cx, a, b, c, afp.fold_expr), + fold_mod: |a,b| expand_mod_items(exts, cx, a, b, afp.fold_mod), + fold_item: |a,b| expand_item(cx, a, b, afp.fold_item), + new_span: |a|new_span(cx, a) with *afp}; let f = make_fold(f_pre); let cm = parse_expr_from_source_str("<core-macros>", diff --git a/src/libsyntax/ext/fmt.rs b/src/libsyntax/ext/fmt.rs index 09ebc0b79cd..acf055ccabd 100644 --- a/src/libsyntax/ext/fmt.rs +++ b/src/libsyntax/ext/fmt.rs @@ -35,10 +35,10 @@ fn expand_syntax_ext(cx: ext_ctxt, sp: span, arg: ast::mac_arg, // expressions. Also: Cleanup the naming of these functions. // NOTE: Moved many of the common ones to build.rs --kevina fn pieces_to_expr(cx: ext_ctxt, sp: span, - pieces: [piece]/~, args: [@ast::expr]/~) + pieces: ~[piece], args: ~[@ast::expr]) -> @ast::expr { - fn make_path_vec(_cx: ext_ctxt, ident: ast::ident) -> [ast::ident]/~ { - ret [@"extfmt", @"rt", ident]/~; + fn make_path_vec(_cx: ext_ctxt, ident: ast::ident) -> ~[ast::ident] { + ret ~[@"extfmt", @"rt", ident]; } fn make_rt_path_expr(cx: ext_ctxt, sp: span, ident: ast::ident) -> @ast::expr { @@ -49,20 +49,20 @@ fn pieces_to_expr(cx: ext_ctxt, sp: span, // which tells the RT::conv* functions how to perform the conversion fn make_rt_conv_expr(cx: ext_ctxt, sp: span, cnv: conv) -> @ast::expr { - fn make_flags(cx: ext_ctxt, sp: span, flags: [flag]/~) -> @ast::expr { - let mut flagexprs: [@ast::expr]/~ = []/~; - for flags.each {|f| - let mut fstr; - alt f { - flag_left_justify { fstr = "flag_left_justify"; } - flag_left_zero_pad { fstr = "flag_left_zero_pad"; } - flag_space_for_sign { fstr = "flag_space_for_sign"; } - flag_sign_always { fstr = "flag_sign_always"; } - flag_alternate { fstr = "flag_alternate"; } - } - vec::push(flagexprs, make_rt_path_expr(cx, sp, @fstr)); + fn make_flags(cx: ext_ctxt, sp: span, flags: ~[flag]) -> @ast::expr { + let mut tmp_expr = make_rt_path_expr(cx, sp, @"flag_none"); + for flags.each |f| { + let fstr = alt f { + flag_left_justify { "flag_left_justify" } + flag_left_zero_pad { "flag_left_zero_pad" } + flag_space_for_sign { "flag_space_for_sign" } + flag_sign_always { "flag_sign_always" } + flag_alternate { "flag_alternate" } + }; + tmp_expr = mk_binary(cx, sp, ast::bitor, tmp_expr, + make_rt_path_expr(cx, sp, @fstr)); } - ret mk_uniq_vec_e(cx, sp, flagexprs); + ret tmp_expr; } fn make_count(cx: ext_ctxt, sp: span, cnt: count) -> @ast::expr { alt cnt { @@ -72,7 +72,7 @@ fn pieces_to_expr(cx: ext_ctxt, sp: span, count_is(c) { let count_lit = mk_int(cx, sp, c); let count_is_path = make_path_vec(cx, @"count_is"); - let count_is_args = [count_lit]/~; + let count_is_args = ~[count_lit]; ret mk_call(cx, sp, count_is_path, count_is_args); } _ { cx.span_unimpl(sp, "unimplemented #fmt conversion"); } @@ -97,10 +97,10 @@ fn pieces_to_expr(cx: ext_ctxt, sp: span, width_expr: @ast::expr, precision_expr: @ast::expr, ty_expr: @ast::expr) -> @ast::expr { ret mk_rec_e(cx, sp, - [{ident: @"flags", ex: flags_expr}, + ~[{ident: @"flags", ex: flags_expr}, {ident: @"width", ex: width_expr}, {ident: @"precision", ex: precision_expr}, - {ident: @"ty", ex: ty_expr}]/~); + {ident: @"ty", ex: ty_expr}]); } let rt_conv_flags = make_flags(cx, sp, cnv.flags); let rt_conv_width = make_count(cx, sp, cnv.width); @@ -114,7 +114,7 @@ fn pieces_to_expr(cx: ext_ctxt, sp: span, let fname = "conv_" + conv_type; let path = make_path_vec(cx, @fname); let cnv_expr = make_rt_conv_expr(cx, sp, cnv); - let args = [cnv_expr, arg]/~; + let args = ~[cnv_expr, arg]; ret mk_call(cx, arg.span, path, args); } @@ -136,7 +136,7 @@ fn pieces_to_expr(cx: ext_ctxt, sp: span, option::none { } _ { cx.span_unimpl(sp, unsupported); } } - for cnv.flags.each {|f| + for cnv.flags.each |f| { alt f { flag_left_justify { } flag_sign_always { @@ -191,7 +191,7 @@ fn pieces_to_expr(cx: ext_ctxt, sp: span, some(p) { log(debug, "param: " + int::to_str(p, 10u)); } _ { #debug("param: none"); } } - for c.flags.each {|f| + for c.flags.each |f| { alt f { flag_left_justify { #debug("flag: left justify"); } flag_left_zero_pad { #debug("flag: left zero pad"); } @@ -244,9 +244,9 @@ fn pieces_to_expr(cx: ext_ctxt, sp: span, } let fmt_sp = args[0].span; let mut n = 0u; - let mut piece_exprs = []/~; + let mut piece_exprs = ~[]; let nargs = args.len(); - for pieces.each {|pc| + for pieces.each |pc| { alt pc { piece_string(s) { vec::push(piece_exprs, mk_str(cx, fmt_sp, s)); @@ -275,7 +275,7 @@ fn pieces_to_expr(cx: ext_ctxt, sp: span, } let arg_vec = mk_fixed_vec_e(cx, fmt_sp, piece_exprs); - ret mk_call(cx, fmt_sp, [@"str", @"concat"]/~, [arg_vec]/~); + ret mk_call(cx, fmt_sp, ~[@"str", @"concat"], ~[arg_vec]); } // // Local Variables: diff --git a/src/libsyntax/ext/ident_to_str.rs b/src/libsyntax/ext/ident_to_str.rs index 7dfb70f1520..2cff86d98a7 100644 --- a/src/libsyntax/ext/ident_to_str.rs +++ b/src/libsyntax/ext/ident_to_str.rs @@ -1,11 +1,12 @@ import base::*; +import build::mk_lit; import option; fn expand_syntax_ext(cx: ext_ctxt, sp: codemap::span, arg: ast::mac_arg, _body: ast::mac_body) -> @ast::expr { let args = get_mac_args(cx,sp,arg,1u,option::some(1u),"ident_to_str"); - ret make_new_lit(cx, sp, - ast::lit_str(expr_to_ident(cx, args[0u], - "expected an ident"))); + ret mk_lit(cx, sp, + ast::lit_str(expr_to_ident(cx, args[0u], + "expected an ident"))); } diff --git a/src/libsyntax/ext/log_syntax.rs b/src/libsyntax/ext/log_syntax.rs index 50fac765483..d237cd33839 100644 --- a/src/libsyntax/ext/log_syntax.rs +++ b/src/libsyntax/ext/log_syntax.rs @@ -7,10 +7,10 @@ fn expand_syntax_ext(cx: ext_ctxt, sp: codemap::span, arg: ast::mac_arg, cx.print_backtrace(); io::stdout().write_line( str::connect(vec::map(args, - {|&&ex| print::pprust::expr_to_str(ex)}), ", ") + |&&ex| print::pprust::expr_to_str(ex)), ", ") ); //trivial expression - ret @{id: cx.next_id(), node: ast::expr_rec([]/~, option::none), + ret @{id: cx.next_id(), node: ast::expr_rec(~[], option::none), span: sp}; } diff --git a/src/libsyntax/ext/qquote.rs b/src/libsyntax/ext/qquote.rs index 648532d3024..2bb8c27828c 100644 --- a/src/libsyntax/ext/qquote.rs +++ b/src/libsyntax/ext/qquote.rs @@ -35,7 +35,7 @@ impl of qq_helper for @ast::crate { fn visit(cx: aq_ctxt, v: vt<aq_ctxt>) {visit_crate(*self, cx, v);} fn extract_mac() -> option<ast::mac_> {fail} fn mk_parse_fn(cx: ext_ctxt, sp: span) -> @ast::expr { - mk_path(cx, sp, [@"syntax", @"ext", @"qquote", @"parse_crate"]/~) + mk_path(cx, sp, ~[@"syntax", @"ext", @"qquote", @"parse_crate"]) } fn get_fold_fn() -> str {"fold_crate"} } @@ -49,7 +49,7 @@ impl of qq_helper for @ast::expr { } } fn mk_parse_fn(cx: ext_ctxt, sp: span) -> @ast::expr { - mk_path(cx, sp, [@"syntax", @"ext", @"qquote", @"parse_expr"]/~) + mk_path(cx, sp, ~[@"syntax", @"ext", @"qquote", @"parse_expr"]) } fn get_fold_fn() -> str {"fold_expr"} } @@ -63,7 +63,7 @@ impl of qq_helper for @ast::ty { } } fn mk_parse_fn(cx: ext_ctxt, sp: span) -> @ast::expr { - mk_path(cx, sp, [@"syntax", @"ext", @"qquote", @"parse_ty"]/~) + mk_path(cx, sp, ~[@"syntax", @"ext", @"qquote", @"parse_ty"]) } fn get_fold_fn() -> str {"fold_ty"} } @@ -72,7 +72,7 @@ impl of qq_helper for @ast::item { fn visit(cx: aq_ctxt, v: vt<aq_ctxt>) {visit_item(self, cx, v);} fn extract_mac() -> option<ast::mac_> {fail} fn mk_parse_fn(cx: ext_ctxt, sp: span) -> @ast::expr { - mk_path(cx, sp, [@"syntax", @"ext", @"qquote", @"parse_item"]/~) + mk_path(cx, sp, ~[@"syntax", @"ext", @"qquote", @"parse_item"]) } fn get_fold_fn() -> str {"fold_item"} } @@ -81,7 +81,7 @@ impl of qq_helper for @ast::stmt { fn visit(cx: aq_ctxt, v: vt<aq_ctxt>) {visit_stmt(self, cx, v);} fn extract_mac() -> option<ast::mac_> {fail} fn mk_parse_fn(cx: ext_ctxt, sp: span) -> @ast::expr { - mk_path(cx, sp, [@"syntax", @"ext", @"qquote", @"parse_stmt"]/~) + mk_path(cx, sp, ~[@"syntax", @"ext", @"qquote", @"parse_stmt"]) } fn get_fold_fn() -> str {"fold_stmt"} } @@ -90,24 +90,22 @@ impl of qq_helper for @ast::pat { fn visit(cx: aq_ctxt, v: vt<aq_ctxt>) {visit_pat(self, cx, v);} fn extract_mac() -> option<ast::mac_> {fail} fn mk_parse_fn(cx: ext_ctxt, sp: span) -> @ast::expr { - mk_path(cx, sp, [@"syntax", @"ext", @"qquote", @"parse_pat"]/~) + mk_path(cx, sp, ~[@"syntax", @"ext", @"qquote", @"parse_pat"]) } fn get_fold_fn() -> str {"fold_pat"} } fn gather_anti_quotes<N: qq_helper>(lo: uint, node: N) -> aq_ctxt { - let v = @{visit_expr: {|node, &&cx, v| - visit_aq(node, "from_expr", cx, v)}, - visit_ty: {|node, &&cx, v| - visit_aq(node, "from_ty", cx, v)} + let v = @{visit_expr: |node, &&cx, v| visit_aq(node, "from_expr", cx, v), + visit_ty: |node, &&cx, v| visit_aq(node, "from_ty", cx, v) with *default_visitor()}; let cx = @{lo:lo, gather: dvec()}; node.visit(cx, mk_vt(v)); // FIXME (#2250): Maybe this is an overkill (merge_sort), it might // be better to just keep the gather array in sorted order. - cx.gather.swap { |v| - vec::to_mut(std::sort::merge_sort({|a,b| a.lo < b.lo}, v)) + do cx.gather.swap |v| { + vec::to_mut(std::sort::merge_sort(|a,b| a.lo < b.lo, v)) }; ret cx; } @@ -132,13 +130,13 @@ fn expand_ast(ecx: ext_ctxt, _sp: span, -> @ast::expr { let mut what = "expr"; - option::iter(arg) {|arg| - let args: [@ast::expr]/~ = + do option::iter(arg) |arg| { + let args: ~[@ast::expr] = alt arg.node { ast::expr_vec(elts, _) { elts } _ { ecx.span_fatal - (_sp, "#ast requires arguments of the form `[...]/~`.") + (_sp, "#ast requires arguments of the form `~[...]`.") } }; if vec::len::<@ast::expr>(args) != 1u { @@ -163,14 +161,14 @@ fn expand_ast(ecx: ext_ctxt, _sp: span, }; } -fn parse_crate(p: parser) -> @ast::crate { p.parse_crate_mod([]/~) } +fn parse_crate(p: parser) -> @ast::crate { p.parse_crate_mod(~[]) } fn parse_ty(p: parser) -> @ast::ty { p.parse_ty(false) } -fn parse_stmt(p: parser) -> @ast::stmt { p.parse_stmt([]/~) } +fn parse_stmt(p: parser) -> @ast::stmt { p.parse_stmt(~[]) } fn parse_expr(p: parser) -> @ast::expr { p.parse_expr() } fn parse_pat(p: parser) -> @ast::pat { p.parse_pat() } fn parse_item(p: parser) -> @ast::item { - alt p.parse_item([]/~, ast::public) { + alt p.parse_item(~[], ast::public) { some(item) { item } none { fail "parse_item: parsing an item failed"; } } @@ -193,7 +191,7 @@ fn finish<T: qq_helper> let qcx = gather_anti_quotes(sp.lo, node); let cx = qcx; - for uint::range(1u, cx.gather.len()) {|i| + for uint::range(1u, cx.gather.len()) |i| { assert cx.gather[i-1u].lo < cx.gather[i].lo; // ^^ check that the vector is sorted assert cx.gather[i-1u].hi <= cx.gather[i].lo; @@ -205,7 +203,7 @@ fn finish<T: qq_helper> let mut state = active; let mut i = 0u, j = 0u; let g_len = cx.gather.len(); - str::chars_iter(*str) {|ch| + do str::chars_iter(*str) |ch| { if (j < g_len && i == cx.gather[j].lo) { assert ch == '$'; let repl = #fmt("$%u ", j); @@ -229,56 +227,53 @@ fn finish<T: qq_helper> let cx = ecx; - let cfg_call = {|| - mk_call_(cx, sp, mk_access(cx, sp, [@"ext_cx"]/~, @"cfg"), []/~) - }; + let cfg_call = || mk_call_( + cx, sp, mk_access(cx, sp, ~[@"ext_cx"], @"cfg"), ~[]); - let parse_sess_call = {|| - mk_call_(cx, sp, - mk_access(cx, sp, [@"ext_cx"]/~, @"parse_sess"), []/~) - }; + let parse_sess_call = || mk_call_( + cx, sp, mk_access(cx, sp, ~[@"ext_cx"], @"parse_sess"), ~[]); let pcall = mk_call(cx,sp, - [@"syntax", @"parse", @"parser", - @"parse_from_source_str"]/~, - [node.mk_parse_fn(cx,sp), + ~[@"syntax", @"parse", @"parser", + @"parse_from_source_str"], + ~[node.mk_parse_fn(cx,sp), mk_str(cx,sp, fname), mk_call(cx,sp, - [@"syntax",@"ext", - @"qquote", @"mk_file_substr"]/~, - [mk_str(cx,sp, loc.file.name), + ~[@"syntax",@"ext", + @"qquote", @"mk_file_substr"], + ~[mk_str(cx,sp, loc.file.name), mk_uint(cx,sp, loc.line), - mk_uint(cx,sp, loc.col)]/~), + mk_uint(cx,sp, loc.col)]), mk_unary(cx,sp, ast::box(ast::m_imm), mk_str(cx,sp, str2)), cfg_call(), - parse_sess_call()]/~ + parse_sess_call()] ); let mut rcall = pcall; if (g_len > 0u) { rcall = mk_call(cx,sp, - [@"syntax", @"ext", @"qquote", @"replace"]/~, - [pcall, - mk_uniq_vec_e(cx,sp, qcx.gather.map_to_vec {|g| + ~[@"syntax", @"ext", @"qquote", @"replace"], + ~[pcall, + mk_uniq_vec_e(cx,sp, qcx.gather.map_to_vec(|g| { mk_call(cx,sp, - [@"syntax", @"ext", - @"qquote", @g.constr]/~, - [g.e]/~)}), + ~[@"syntax", @"ext", + @"qquote", @g.constr], + ~[g.e])})), mk_path(cx,sp, - [@"syntax", @"ext", @"qquote", - @node.get_fold_fn()]/~)]/~); + ~[@"syntax", @"ext", @"qquote", + @node.get_fold_fn()])]); } ret rcall; } -fn replace<T>(node: T, repls: [fragment]/~, ff: fn (ast_fold, T) -> T) +fn replace<T>(node: T, repls: ~[fragment], ff: fn (ast_fold, T) -> T) -> T { let aft = default_ast_fold(); - let f_pre = @{fold_expr: {|a,b,c|replace_expr(repls, a, b, c, - aft.fold_expr)}, - fold_ty: {|a,b,c|replace_ty(repls, a, b, c, - aft.fold_ty)} + let f_pre = @{fold_expr: |a,b,c|replace_expr(repls, a, b, c, + aft.fold_expr), + fold_ty: |a,b,c|replace_ty(repls, a, b, c, + aft.fold_ty) with *aft}; ret ff(make_fold(f_pre), node); } @@ -291,7 +286,7 @@ fn fold_item(f: ast_fold, &&n: @ast::item) -> @ast::item {f.fold_item(n)} fn fold_stmt(f: ast_fold, &&n: @ast::stmt) -> @ast::stmt {f.fold_stmt(n)} fn fold_pat(f: ast_fold, &&n: @ast::pat) -> @ast::pat {f.fold_pat(n)} -fn replace_expr(repls: [fragment]/~, +fn replace_expr(repls: ~[fragment], e: ast::expr_, s: span, fld: ast_fold, orig: fn@(ast::expr_, span, ast_fold)->(ast::expr_, span)) -> (ast::expr_, span) @@ -305,7 +300,7 @@ fn replace_expr(repls: [fragment]/~, } } -fn replace_ty(repls: [fragment]/~, +fn replace_ty(repls: ~[fragment], e: ast::ty_, s: span, fld: ast_fold, orig: fn@(ast::ty_, span, ast_fold)->(ast::ty_, span)) -> (ast::ty_, span) diff --git a/src/libsyntax/ext/simplext.rs b/src/libsyntax/ext/simplext.rs index dedd95551f2..2f811891711 100644 --- a/src/libsyntax/ext/simplext.rs +++ b/src/libsyntax/ext/simplext.rs @@ -22,7 +22,7 @@ fn path_to_ident(pth: @path) -> option<ident> { type clause = {params: binders, body: @expr}; /* logically, an arb_depth should contain only one kind of matchable */ -enum arb_depth<T> { leaf(T), seq(@[arb_depth<T>]/~, span), } +enum arb_depth<T> { leaf(T), seq(@~[arb_depth<T>], span), } enum matchable { @@ -70,11 +70,11 @@ fn match_error(cx: ext_ctxt, m: matchable, expected: str) -> ! { type match_result = option<arb_depth<matchable>>; type selector = fn@(matchable) -> match_result; -fn elts_to_ell(cx: ext_ctxt, elts: [@expr]/~) -> - {pre: [@expr]/~, rep: option<@expr>, post: [@expr]/~} { +fn elts_to_ell(cx: ext_ctxt, elts: ~[@expr]) -> + {pre: ~[@expr], rep: option<@expr>, post: ~[@expr]} { let mut idx: uint = 0u; let mut res = none; - for elts.each {|elt| + for elts.each |elt| { alt elt.node { expr_mac(m) { alt m.node { @@ -96,14 +96,14 @@ fn elts_to_ell(cx: ext_ctxt, elts: [@expr]/~) -> } ret alt res { some(val) { val } - none { {pre: elts, rep: none, post: []/~} } + none { {pre: elts, rep: none, post: ~[]} } } } -fn option_flatten_map<T: copy, U: copy>(f: fn@(T) -> option<U>, v: [T]/~) -> - option<[U]/~> { - let mut res = []/~; - for v.each {|elem| +fn option_flatten_map<T: copy, U: copy>(f: fn@(T) -> option<U>, v: ~[T]) -> + option<~[U]> { + let mut res = ~[]; + for v.each |elem| { alt f(elem) { none { ret none; } some(fv) { vec::push(res, fv); } } } ret some(res); @@ -113,7 +113,7 @@ fn a_d_map(ad: arb_depth<matchable>, f: selector) -> match_result { alt ad { leaf(x) { ret f(x); } seq(ads, span) { - alt option_flatten_map({|x| a_d_map(x, f)}, *ads) { + alt option_flatten_map(|x| a_d_map(x, f), *ads) { none { ret none; } some(ts) { ret some(seq(@ts, span)); } } @@ -128,7 +128,7 @@ fn compose_sels(s1: selector, s2: selector) -> selector { some(matches) { a_d_map(matches, s2) } } } - ret {|x|scomp(s1, s2, x)}; + ret { |x| scomp(s1, s2, x) }; } @@ -164,11 +164,11 @@ selectors. */ fn use_selectors_to_bind(b: binders, e: @expr) -> option<bindings> { let res = box_str_hash::<arb_depth<matchable>>(); //need to do this first, to check vec lengths. - for b.literal_ast_matchers.each {|sel| + for b.literal_ast_matchers.each |sel| { alt sel(match_expr(e)) { none { ret none; } _ { } } } let mut never_mind: bool = false; - for b.real_binders.each {|key, val| + for b.real_binders.each |key, val| { alt val(match_expr(e)) { none { never_mind = true; } some(mtc) { res.insert(key, mtc); } @@ -182,7 +182,7 @@ fn use_selectors_to_bind(b: binders, e: @expr) -> option<bindings> { /* use the bindings on the body to generate the expanded code */ fn transcribe(cx: ext_ctxt, b: bindings, body: @expr) -> @expr { - let idx_path: @mut [uint]/~ = @mut []/~; + let idx_path: @mut ~[uint] = @mut ~[]; fn new_id(_old: node_id, cx: ext_ctxt) -> node_id { ret cx.next_id(); } fn new_span(cx: ext_ctxt, sp: span) -> span { /* this discards information in the case of macro-defining macros */ @@ -190,22 +190,22 @@ fn transcribe(cx: ext_ctxt, b: bindings, body: @expr) -> @expr { } let afp = default_ast_fold(); let f_pre = - @{fold_ident: {|x,y|transcribe_ident(cx, b, idx_path, x, y)}, - fold_path: {|x,y|transcribe_path(cx, b, idx_path, x, y)}, - fold_expr: {|x,y,z| + @{fold_ident: |x,y|transcribe_ident(cx, b, idx_path, x, y), + fold_path: |x,y|transcribe_path(cx, b, idx_path, x, y), + fold_expr: |x,y,z| transcribe_expr(cx, b, idx_path, x, y, z, afp.fold_expr) - }, - fold_ty: {|x,y,z| + , + fold_ty: |x,y,z| transcribe_type(cx, b, idx_path, x, y, z, afp.fold_ty) - }, - fold_block: {|x,y,z| + , + fold_block: |x,y,z| transcribe_block(cx, b, idx_path, x, y, z, afp.fold_block) - }, - map_exprs: {|x,y| + , + map_exprs: |x,y| transcribe_exprs(cx, b, idx_path, x, y) - }, - new_id: {|x|new_id(x, cx)} + , + new_id: |x|new_id(x, cx) with *afp}; let f = make_fold(f_pre); let result = f.fold_expr(body); @@ -214,10 +214,10 @@ fn transcribe(cx: ext_ctxt, b: bindings, body: @expr) -> @expr { /* helper: descend into a matcher */ -fn follow(m: arb_depth<matchable>, idx_path: @mut [uint]/~) -> +fn follow(m: arb_depth<matchable>, idx_path: @mut ~[uint]) -> arb_depth<matchable> { let mut res: arb_depth<matchable> = m; - for vec::each(*idx_path) {|idx| + for vec::each(*idx_path) |idx| { res = alt res { leaf(_) { ret res;/* end of the line */ } seq(new_ms, _) { new_ms[idx] } @@ -227,7 +227,7 @@ fn follow(m: arb_depth<matchable>, idx_path: @mut [uint]/~) -> } fn follow_for_trans(cx: ext_ctxt, mmaybe: option<arb_depth<matchable>>, - idx_path: @mut [uint]/~) -> option<matchable> { + idx_path: @mut ~[uint]) -> option<matchable> { alt mmaybe { none { ret none } some(m) { @@ -255,18 +255,18 @@ fn free_vars(b: bindings, e: @expr, it: fn(ident)) { // using fold is a hack: we want visit, but it doesn't hit idents ) : // solve this with macros let f_pre = - @{fold_ident: {|x,y|mark_ident(x, y, b, idents)} + @{fold_ident: |x,y|mark_ident(x, y, b, idents) with *default_ast_fold()}; let f = make_fold(f_pre); f.fold_expr(e); // ignore result - for idents.each_key {|x| it(x); }; + for idents.each_key |x| { it(x); }; } /* handle sequences (anywhere in the AST) of exprs, either real or ...ed */ -fn transcribe_exprs(cx: ext_ctxt, b: bindings, idx_path: @mut [uint]/~, +fn transcribe_exprs(cx: ext_ctxt, b: bindings, idx_path: @mut ~[uint], recur: fn@(&&@expr) -> @expr, - exprs: [@expr]/~) -> [@expr]/~ { + exprs: ~[@expr]) -> ~[@expr] { alt elts_to_ell(cx, exprs) { {pre: pre, rep: repeat_me_maybe, post: post} { let mut res = vec::map(pre, recur); @@ -276,7 +276,7 @@ fn transcribe_exprs(cx: ext_ctxt, b: bindings, idx_path: @mut [uint]/~, let mut repeat: option<{rep_count: uint, name: ident}> = none; /* we need to walk over all the free vars in lockstep, except for the leaves, which are just duplicated */ - free_vars(b, repeat_me) {|fv| + do free_vars(b, repeat_me) |fv| { let cur_pos = follow(b.get(fv), idx_path); alt cur_pos { leaf(_) { } @@ -327,7 +327,7 @@ fn transcribe_exprs(cx: ext_ctxt, b: bindings, idx_path: @mut [uint]/~, // substitute, in a position that's required to be an ident -fn transcribe_ident(cx: ext_ctxt, b: bindings, idx_path: @mut [uint]/~, +fn transcribe_ident(cx: ext_ctxt, b: bindings, idx_path: @mut ~[uint], &&i: ident, _fld: ast_fold) -> ident { ret alt follow_for_trans(cx, b.find(i), idx_path) { some(match_ident(a_id)) { a_id.node } @@ -337,14 +337,14 @@ fn transcribe_ident(cx: ext_ctxt, b: bindings, idx_path: @mut [uint]/~, } -fn transcribe_path(cx: ext_ctxt, b: bindings, idx_path: @mut [uint]/~, +fn transcribe_path(cx: ext_ctxt, b: bindings, idx_path: @mut ~[uint], p: path, _fld: ast_fold) -> path { // Don't substitute into qualified names. if vec::len(p.types) > 0u || vec::len(p.idents) != 1u { ret p; } alt follow_for_trans(cx, b.find(p.idents[0]), idx_path) { some(match_ident(id)) { - {span: id.span, global: false, idents: [id.node]/~, - rp: none, types: []/~} + {span: id.span, global: false, idents: ~[id.node], + rp: none, types: ~[]} } some(match_path(a_pth)) { *a_pth } some(m) { match_error(cx, m, "a path") } @@ -353,7 +353,7 @@ fn transcribe_path(cx: ext_ctxt, b: bindings, idx_path: @mut [uint]/~, } -fn transcribe_expr(cx: ext_ctxt, b: bindings, idx_path: @mut [uint]/~, +fn transcribe_expr(cx: ext_ctxt, b: bindings, idx_path: @mut ~[uint], e: ast::expr_, s: span, fld: ast_fold, orig: fn@(ast::expr_, span, ast_fold)->(ast::expr_, span)) -> (ast::expr_, span) @@ -368,9 +368,9 @@ fn transcribe_expr(cx: ext_ctxt, b: bindings, idx_path: @mut [uint]/~, some(match_ident(id)) { (expr_path(@{span: id.span, global: false, - idents: [id.node]/~, + idents: ~[id.node], rp: none, - types: []/~}), id.span) + types: ~[]}), id.span) } some(match_path(a_pth)) { (expr_path(a_pth), s) } some(match_expr(a_exp)) { (a_exp.node, a_exp.span) } @@ -382,7 +382,7 @@ fn transcribe_expr(cx: ext_ctxt, b: bindings, idx_path: @mut [uint]/~, } } -fn transcribe_type(cx: ext_ctxt, b: bindings, idx_path: @mut [uint]/~, +fn transcribe_type(cx: ext_ctxt, b: bindings, idx_path: @mut ~[uint], t: ast::ty_, s: span, fld: ast_fold, orig: fn@(ast::ty_, span, ast_fold) -> (ast::ty_, span)) -> (ast::ty_, span) @@ -408,7 +408,7 @@ fn transcribe_type(cx: ext_ctxt, b: bindings, idx_path: @mut [uint]/~, /* for parsing reasons, syntax variables bound to blocks must be used like `{v}` */ -fn transcribe_block(cx: ext_ctxt, b: bindings, idx_path: @mut [uint]/~, +fn transcribe_block(cx: ext_ctxt, b: bindings, idx_path: @mut ~[uint], blk: blk_, s: span, fld: ast_fold, orig: fn@(blk_, span, ast_fold) -> (blk_, span)) -> (blk_, span) @@ -459,7 +459,7 @@ fn p_t_s_rec(cx: ext_ctxt, m: matchable, s: selector, b: binders) { } } {pre: pre, rep: none, post: post} { - if post != []/~ { + if post != ~[] { cx.bug("elts_to_ell provided an invalid result"); } p_t_s_r_length(cx, vec::len(pre), false, s, b); @@ -481,7 +481,7 @@ fn p_t_s_rec(cx: ext_ctxt, m: matchable, s: selector, b: binders) { _ { cx.bug("broken traversal in p_t_s_r") } } } - b.literal_ast_matchers.push({|x|select(cx, x, e)}); + b.literal_ast_matchers.push(|x| select(cx, x, e)); } } } @@ -523,7 +523,7 @@ fn p_t_s_r_path(cx: ext_ctxt, p: @path, s: selector, b: binders) { if b.real_binders.contains_key(p_id) { cx.span_fatal(p.span, "duplicate binding identifier"); } - b.real_binders.insert(p_id, compose_sels(s, {|x|select(cx, x)})); + b.real_binders.insert(p_id, compose_sels(s, |x| select(cx, x))); } none { } } @@ -568,7 +568,7 @@ fn p_t_s_r_mac(cx: ext_ctxt, mac: ast::mac, s: selector, b: binders) { _ { none } } } - let final_step = {|x|select_pt_1(cx, x, select_pt_2)}; + let final_step = |x| select_pt_1(cx, x, select_pt_2); b.real_binders.insert(id, compose_sels(s, final_step)); } none { no_des(cx, pth.span, "under `#<>`"); } @@ -588,7 +588,7 @@ fn p_t_s_r_mac(cx: ext_ctxt, mac: ast::mac, s: selector, b: binders) { _ { none } } } - let final_step = {|x|select_pt_1(cx, x, select_pt_2)}; + let final_step = |x| select_pt_1(cx, x, select_pt_2); b.real_binders.insert(id, compose_sels(s, final_step)); } none { no_des(cx, blk.span, "under `#{}`"); } @@ -607,7 +607,7 @@ fn p_t_s_r_ellipses(cx: ext_ctxt, repeat_me: @expr, offset: uint, s: selector, match_expr(e) { alt e.node { expr_vec(arg_elts, _) { - let mut elts = []/~; + let mut elts = ~[]; let mut idx = offset; while idx < vec::len(arg_elts) { vec::push(elts, leaf(match_expr(arg_elts[idx]))); @@ -625,7 +625,7 @@ fn p_t_s_r_ellipses(cx: ext_ctxt, repeat_me: @expr, offset: uint, s: selector, } } p_t_s_rec(cx, match_expr(repeat_me), - compose_sels(s, {|x|select(cx, repeat_me, offset, x)}), b); + compose_sels(s, |x| select(cx, repeat_me, offset, x)), b); } @@ -649,10 +649,10 @@ fn p_t_s_r_length(cx: ext_ctxt, len: uint, at_least: bool, s: selector, } } b.literal_ast_matchers.push( - compose_sels(s, {|x|len_select(cx, x, at_least, len)})); + compose_sels(s, |x| len_select(cx, x, at_least, len))); } -fn p_t_s_r_actual_vector(cx: ext_ctxt, elts: [@expr]/~, _repeat_after: bool, +fn p_t_s_r_actual_vector(cx: ext_ctxt, elts: ~[@expr], _repeat_after: bool, s: selector, b: binders) { let mut idx: uint = 0u; while idx < vec::len(elts) { @@ -670,7 +670,7 @@ fn p_t_s_r_actual_vector(cx: ext_ctxt, elts: [@expr]/~, _repeat_after: bool, } } p_t_s_rec(cx, match_expr(elts[idx]), - compose_sels(s, {|x, copy idx|select(cx, x, idx)}), b); + compose_sels(s, |x, copy idx| select(cx, x, idx)), b); idx += 1u; } } @@ -680,14 +680,14 @@ fn add_new_extension(cx: ext_ctxt, sp: span, arg: ast::mac_arg, let args = get_mac_args_no_max(cx, sp, arg, 0u, "macro"); let mut macro_name: option<@str> = none; - let mut clauses: [@clause]/~ = []/~; - for args.each {|arg| + let mut clauses: ~[@clause] = ~[]; + for args.each |arg| { alt arg.node { expr_vec(elts, mutbl) { if vec::len(elts) != 2u { cx.span_fatal((*arg).span, - "extension clause must consist of [" + - "macro invocation, expansion body]/~"); + "extension clause must consist of ~[" + + "macro invocation, expansion body]"); } @@ -740,14 +740,12 @@ fn add_new_extension(cx: ext_ctxt, sp: span, arg: ast::mac_arg, } _ { cx.span_fatal((*arg).span, - "extension must be [clause, " + " ...]/~"); + "extension must be ~[clause, " + " ...]"); } } } - let ext = {|a,b,c,d, move clauses| - generic_extension(a,b,c,d,clauses) - }; + let ext = |a,b,c,d, move clauses| generic_extension(a,b,c,d,clauses); ret {ident: alt macro_name { @@ -761,12 +759,12 @@ fn add_new_extension(cx: ext_ctxt, sp: span, arg: ast::mac_arg, fn generic_extension(cx: ext_ctxt, sp: span, arg: ast::mac_arg, _body: ast::mac_body, - clauses: [@clause]/~) -> @expr { + clauses: ~[@clause]) -> @expr { let arg = alt arg { some(arg) { arg } none { cx.span_fatal(sp, "macro must have arguments")} }; - for clauses.each {|c| + for clauses.each |c| { alt use_selectors_to_bind(c.params, arg) { some(bindings) { ret transcribe(cx, bindings, c.body); } none { cont; } diff --git a/src/libsyntax/ext/source_util.rs b/src/libsyntax/ext/source_util.rs index ae76cbafef7..ee5e96cc0e4 100644 --- a/src/libsyntax/ext/source_util.rs +++ b/src/libsyntax/ext/source_util.rs @@ -2,6 +2,7 @@ import base::*; import ast; import codemap::span; import print::pprust; +import build::{mk_lit,mk_uniq_vec_e}; export expand_line; export expand_col; @@ -17,7 +18,7 @@ fn expand_line(cx: ext_ctxt, sp: span, arg: ast::mac_arg, _body: ast::mac_body) -> @ast::expr { get_mac_args(cx, sp, arg, 0u, option::some(0u), "line"); let loc = codemap::lookup_char_pos(cx.codemap(), sp.lo); - ret make_new_lit(cx, sp, ast::lit_uint(loc.line as u64, ast::ty_u)); + ret mk_lit(cx, sp, ast::lit_uint(loc.line as u64, ast::ty_u)); } /* #col(): expands to the current column number */ @@ -25,7 +26,7 @@ fn expand_col(cx: ext_ctxt, sp: span, arg: ast::mac_arg, _body: ast::mac_body) -> @ast::expr { get_mac_args(cx, sp, arg, 0u, option::some(0u), "col"); let loc = codemap::lookup_char_pos(cx.codemap(), sp.lo); - ret make_new_lit(cx, sp, ast::lit_uint(loc.col as u64, ast::ty_u)); + ret mk_lit(cx, sp, ast::lit_uint(loc.col as u64, ast::ty_u)); } /* #file(): expands to the current filename */ @@ -36,20 +37,20 @@ fn expand_file(cx: ext_ctxt, sp: span, arg: ast::mac_arg, get_mac_args(cx, sp, arg, 0u, option::some(0u), "file"); let { file: @{ name: filename, _ }, _ } = codemap::lookup_char_pos(cx.codemap(), sp.lo); - ret make_new_lit(cx, sp, ast::lit_str(@filename)); + ret mk_lit(cx, sp, ast::lit_str(@filename)); } fn expand_stringify(cx: ext_ctxt, sp: span, arg: ast::mac_arg, _body: ast::mac_body) -> @ast::expr { let args = get_mac_args(cx, sp, arg, 1u, option::some(1u), "stringify"); - ret make_new_lit(cx, sp, ast::lit_str(@pprust::expr_to_str(args[0]))); + ret mk_lit(cx, sp, ast::lit_str(@pprust::expr_to_str(args[0]))); } fn expand_mod(cx: ext_ctxt, sp: span, arg: ast::mac_arg, _body: ast::mac_body) -> @ast::expr { get_mac_args(cx, sp, arg, 0u, option::some(0u), "file"); - ret make_new_lit(cx, sp, ast::lit_str( - @str::connect(cx.mod_path().map({|x|*x}), "::"))); + ret mk_lit(cx, sp, ast::lit_str( + @str::connect(cx.mod_path().map(|x|*x), "::"))); } fn expand_include(cx: ext_ctxt, sp: span, arg: ast::mac_arg, @@ -76,7 +77,7 @@ fn expand_include_str(cx: ext_ctxt, sp: codemap::span, arg: ast::mac_arg, } } - ret make_new_lit(cx, sp, ast::lit_str(@result::unwrap(res))); + ret mk_lit(cx, sp, ast::lit_str(@result::unwrap(res))); } fn expand_include_bin(cx: ext_ctxt, sp: codemap::span, arg: ast::mac_arg, @@ -87,10 +88,10 @@ fn expand_include_bin(cx: ext_ctxt, sp: codemap::span, arg: ast::mac_arg, alt io::read_whole_file(res_rel_file(cx, sp, file)) { result::ok(src) { - let u8_exprs = vec::map(src) { |char: u8| - make_new_lit(cx, sp, ast::lit_uint(char as u64, ast::ty_u8)) - }; - ret make_new_expr(cx, sp, ast::expr_vec(u8_exprs, ast::m_imm)); + let u8_exprs = vec::map(src, |char: u8| { + mk_lit(cx, sp, ast::lit_uint(char as u64, ast::ty_u8)) + }); + ret mk_uniq_vec_e(cx, sp, u8_exprs); } result::err(e) { cx.parse_sess().span_diagnostic.handler().fatal(e) diff --git a/src/libsyntax/fold.rs b/src/libsyntax/fold.rs index 1a2b19509b7..c949c2e17aa 100644 --- a/src/libsyntax/fold.rs +++ b/src/libsyntax/fold.rs @@ -41,7 +41,7 @@ iface ast_fold { fn fold_ident(&&ident) -> ident; fn fold_path(&&@path) -> @path; fn fold_local(&&@local) -> @local; - fn map_exprs(fn@(&&@expr) -> @expr, [@expr]/~) -> [@expr]/~; + fn map_exprs(fn@(&&@expr) -> @expr, ~[@expr]) -> ~[@expr]; fn new_id(node_id) -> node_id; fn new_span(span) -> span; } @@ -75,7 +75,7 @@ type ast_fold_precursor = @{ fold_ident: fn@(&&ident, ast_fold) -> ident, fold_path: fn@(path, ast_fold) -> path, fold_local: fn@(local_, span, ast_fold) -> (local_, span), - map_exprs: fn@(fn@(&&@expr) -> @expr, [@expr]/~) -> [@expr]/~, + map_exprs: fn@(fn@(&&@expr) -> @expr, ~[@expr]) -> ~[@expr], new_id: fn@(node_id) -> node_id, new_span: fn@(span) -> span}; @@ -87,7 +87,7 @@ fn fold_meta_item_(&&mi: @meta_item, fld: ast_fold) -> @meta_item { alt mi.node { meta_word(id) { meta_word(fld.fold_ident(id)) } meta_list(id, mis) { - let fold_meta_item = {|x|fold_meta_item_(x, fld)}; + let fold_meta_item = |x|fold_meta_item_(x, fld); meta_list(/* FIXME: (#2543) */ copy id, vec::map(mis, fold_meta_item)) } @@ -132,7 +132,7 @@ fn fold_mac_(m: mac, fld: ast_fold) -> mac { } fn fold_fn_decl(decl: ast::fn_decl, fld: ast_fold) -> ast::fn_decl { - ret {inputs: vec::map(decl.inputs, {|x| fold_arg_(x, fld)}), + ret {inputs: vec::map(decl.inputs, |x| fold_arg_(x, fld) ), output: fld.fold_ty(decl.output), purity: decl.purity, cf: decl.cf, @@ -149,16 +149,16 @@ fn fold_ty_param_bound(tpb: ty_param_bound, fld: ast_fold) -> ty_param_bound { fn fold_ty_param(tp: ty_param, fld: ast_fold) -> ty_param { {ident: /* FIXME (#2543) */ copy tp.ident, id: fld.new_id(tp.id), - bounds: @vec::map(*tp.bounds, {|x|fold_ty_param_bound(x, fld)})} + bounds: @vec::map(*tp.bounds, |x| fold_ty_param_bound(x, fld) )} } -fn fold_ty_params(tps: [ty_param]/~, fld: ast_fold) -> [ty_param]/~ { - vec::map(tps, {|x|fold_ty_param(x, fld)}) +fn fold_ty_params(tps: ~[ty_param], fld: ast_fold) -> ~[ty_param] { + vec::map(tps, |x| fold_ty_param(x, fld) ) } fn noop_fold_crate(c: crate_, fld: ast_fold) -> crate_ { - let fold_meta_item = {|x|fold_meta_item_(x, fld)}; - let fold_attribute = {|x|fold_attribute_(x, fld)}; + let fold_meta_item = |x| fold_meta_item_(x, fld); + let fold_attribute = |x| fold_attribute_(x, fld); ret {directives: vec::map(c.directives, fld.fold_crate_directive), module: fld.fold_mod(c.module), @@ -189,8 +189,8 @@ fn noop_fold_view_item(vi: view_item_, _fld: ast_fold) -> view_item_ { fn noop_fold_foreign_item(&&ni: @foreign_item, fld: ast_fold) -> @foreign_item { - let fold_arg = {|x|fold_arg_(x, fld)}; - let fold_attribute = {|x|fold_attribute_(x, fld)}; + let fold_arg = |x| fold_arg_(x, fld); + let fold_attribute = |x| fold_attribute_(x, fld); ret @{ident: fld.fold_ident(ni.ident), attrs: vec::map(ni.attrs, fold_attribute), @@ -212,7 +212,7 @@ fn noop_fold_foreign_item(&&ni: @foreign_item, fld: ast_fold) } fn noop_fold_item(&&i: @item, fld: ast_fold) -> @item { - let fold_attribute = {|x|fold_attribute_(x, fld)}; + let fold_attribute = |x| fold_attribute_(x, fld); ret @{ident: fld.fold_ident(i.ident), attrs: vec::map(i.attrs, fold_attribute), @@ -256,7 +256,7 @@ fn noop_fold_item_underscore(i: item_, fld: ast_fold) -> item_ { let ctor_body = fld.fold_block(ctor.node.body); let ctor_decl = fold_fn_decl(ctor.node.dec, fld); let ctor_id = fld.new_id(ctor.node.id); - let dtor = option::map(m_dtor) {|dtor| + let dtor = do option::map(m_dtor) |dtor| { let dtor_body = fld.fold_block(dtor.node.body); let dtor_id = fld.new_id(dtor.node.id); {node: {body: dtor_body, @@ -264,7 +264,7 @@ fn noop_fold_item_underscore(i: item_, fld: ast_fold) -> item_ { with dtor}}; item_class( /* FIXME (#2543) */ copy typms, - vec::map(ifaces, {|p| fold_iface_ref(p, fld) }), + vec::map(ifaces, |p| fold_iface_ref(p, fld)), vec::map(items, fld.fold_class_item), {node: {body: ctor_body, dec: ctor_decl, @@ -274,7 +274,7 @@ fn noop_fold_item_underscore(i: item_, fld: ast_fold) -> item_ { item_impl(tps, rp, ifce, ty, methods) { item_impl(fold_ty_params(tps, fld), rp, - ifce.map { |p| fold_iface_ref(p, fld) }, + ifce.map(|p| fold_iface_ref(p, fld)), fld.fold_ty(ty), vec::map(methods, fld.fold_method)) } @@ -333,12 +333,12 @@ fn noop_fold_pat(p: pat_, fld: ast_fold) -> pat_ { } pat_lit(e) { pat_lit(fld.fold_expr(e)) } pat_enum(pth, pats) { - pat_enum(fld.fold_path(pth), option::map(pats) - {|pats| vec::map(pats, fld.fold_pat)}) + pat_enum(fld.fold_path(pth), option::map(pats, + |pats| vec::map(pats, fld.fold_pat))) } pat_rec(fields, etc) { - let mut fs = []/~; - for fields.each {|f| + let mut fs = ~[]; + for fields.each |f| { vec::push(fs, {ident: /* FIXME (#2543) */ copy f.ident, pat: fld.fold_pat(f.pat)}); @@ -377,9 +377,9 @@ fn noop_fold_expr(e: expr_, fld: ast_fold) -> expr_ { expr: fld.fold_expr(field.node.expr)}, span: fld.new_span(field.span)}; } - let fold_field = {|x|fold_field_(x, fld)}; + let fold_field = |x| fold_field_(x, fld); - let fold_mac = {|x|fold_mac_(x, fld)}; + let fold_mac = |x| fold_mac_(x, fld); ret alt e { expr_new(p, i, v) { @@ -427,13 +427,13 @@ fn noop_fold_expr(e: expr_, fld: ast_fold) -> expr_ { expr_fn(proto, decl, body, captures) { expr_fn(proto, fold_fn_decl(decl, fld), fld.fold_block(body), - @((*captures).map({|cap_item| + @((*captures).map(|cap_item| { @({id: fld.new_id((*cap_item).id) with *cap_item})}))) } expr_fn_block(decl, body, captures) { expr_fn_block(fold_fn_decl(decl, fld), fld.fold_block(body), - @((*captures).map({|cap_item| + @((*captures).map(|cap_item| { @({id: fld.new_id((*cap_item).id) with *cap_item})}))) } @@ -475,7 +475,7 @@ fn noop_fold_expr(e: expr_, fld: ast_fold) -> expr_ { } fn noop_fold_ty(t: ty_, fld: ast_fold) -> ty_ { - let fold_mac = {|x|fold_mac_(x, fld)}; + let fold_mac = |x| fold_mac_(x, fld); fn fold_mt(mt: mt, fld: ast_fold) -> mt { {ty: fld.fold_ty(mt.ty), mutbl: mt.mutbl} } @@ -491,9 +491,9 @@ fn noop_fold_ty(t: ty_, fld: ast_fold) -> ty_ { ty_vec(mt) {ty_vec(fold_mt(mt, fld))} ty_ptr(mt) {ty_ptr(fold_mt(mt, fld))} ty_rptr(region, mt) {ty_rptr(region, fold_mt(mt, fld))} - ty_rec(fields) {ty_rec(vec::map(fields) {|f| fold_field(f, fld)})} + ty_rec(fields) {ty_rec(vec::map(fields, |f| fold_field(f, fld)))} ty_fn(proto, decl) {ty_fn(proto, fold_fn_decl(decl, fld))} - ty_tup(tys) {ty_tup(vec::map(tys) {|ty| fld.fold_ty(ty)})} + ty_tup(tys) {ty_tup(vec::map(tys, |ty| fld.fold_ty(ty)))} ty_path(path, id) {ty_path(fld.fold_path(path), fld.new_id(id))} ty_constr(ty, constrs) {ty_constr(fld.fold_ty(ty), vec::map(constrs, fld.fold_ty_constr))} @@ -528,10 +528,10 @@ fn noop_fold_variant(v: variant_, fld: ast_fold) -> variant_ { fn fold_variant_arg_(va: variant_arg, fld: ast_fold) -> variant_arg { ret {ty: fld.fold_ty(va.ty), id: fld.new_id(va.id)}; } - let fold_variant_arg = {|x|fold_variant_arg_(x, fld)}; + let fold_variant_arg = |x| fold_variant_arg_(x, fld); let args = vec::map(v.args, fold_variant_arg); - let fold_attribute = {|x|fold_attribute_(x, fld)}; + let fold_attribute = |x| fold_attribute_(x, fld); let attrs = vec::map(v.attrs, fold_attribute); let de = alt v.disr_expr { @@ -573,7 +573,7 @@ fn noop_fold_local(l: local_, fld: ast_fold) -> local_ { /* temporarily eta-expand because of a compiler bug with using `fn<T>` as a value */ -fn noop_map_exprs(f: fn@(&&@expr) -> @expr, es: [@expr]/~) -> [@expr]/~ { +fn noop_map_exprs(f: fn@(&&@expr) -> @expr, es: ~[@expr]) -> ~[@expr] { ret vec::map(es, f); } @@ -625,8 +625,8 @@ impl of ast_fold for ast_fold_precursor { fn fold_view_item(&&x: @view_item) -> @view_item { ret @{node: self.fold_view_item(x.node, self as ast_fold), - attrs: vec::map(x.attrs, {|a| - fold_attribute_(a, self as ast_fold)}), + attrs: vec::map(x.attrs, |a| + fold_attribute_(a, self as ast_fold)), vis: x.vis, span: self.new_span(x.span)}; } @@ -720,7 +720,7 @@ impl of ast_fold for ast_fold_precursor { let (n, s) = self.fold_local(x.node, x.span, self as ast_fold); ret @{node: n, span: self.new_span(s)}; } - fn map_exprs(f: fn@(&&@expr) -> @expr, e: [@expr]/~) -> [@expr]/~ { + fn map_exprs(f: fn@(&&@expr) -> @expr, e: ~[@expr]) -> ~[@expr] { self.map_exprs(f, e) } fn new_id(node_id: ast::node_id) -> node_id { diff --git a/src/libsyntax/parse.rs b/src/libsyntax/parse.rs index b63335b00c9..bf9a7dd2ace 100644 --- a/src/libsyntax/parse.rs +++ b/src/libsyntax/parse.rs @@ -37,8 +37,8 @@ fn new_parse_sess(demitter: option<emitter>) -> parse_sess { ret @{cm: cm, mut next_id: 1, span_diagnostic: mk_span_handler(mk_handler(demitter), cm), - interner: @interner::mk::<@str>({|x|str::hash(*x)}, - {|x,y|str::eq(*x, *y)}), + interner: @interner::mk::<@str>(|x| str::hash(*x), + |x,y| str::eq(*x, *y)), mut chpos: 0u, mut byte_pos: 0u}; } @@ -47,8 +47,8 @@ fn new_parse_sess_special_handler(sh: span_handler, cm: codemap::codemap) ret @{cm: cm, mut next_id: 1, span_diagnostic: sh, - interner: @interner::mk::<@str>({|x|str::hash(*x)}, - {|x,y|str::eq(*x, *y)}), + interner: @interner::mk::<@str>(|x| str::hash(*x), + |x,y| str::eq(*x, *y)), mut chpos: 0u, mut byte_pos: 0u}; } @@ -119,7 +119,7 @@ fn parse_expr_from_source_str(name: str, source: @str, cfg: ast::crate_cfg, } fn parse_item_from_source_str(name: str, source: @str, cfg: ast::crate_cfg, - +attrs: [ast::attribute]/~, + +attrs: ~[ast::attribute], vis: ast::visibility, sess: parse_sess) -> option<@ast::item> { let (p, rdr) = new_parser_etc_from_source_str(sess, cfg, name, @@ -198,7 +198,7 @@ fn new_parser_from_file(sess: parse_sess, cfg: ast::crate_cfg, +path: str, } fn new_parser_from_tt(sess: parse_sess, cfg: ast::crate_cfg, - tt: [ast::token_tree]/~) -> parser { + tt: ~[ast::token_tree]) -> parser { let trdr = lexer::new_tt_reader(sess.span_diagnostic, sess.interner, tt); ret parser(sess, cfg, trdr as reader, parser::SOURCE_FILE) } diff --git a/src/libsyntax/parse/attr.rs b/src/libsyntax/parse/attr.rs index 3b7f31fb79f..d804a927edb 100644 --- a/src/libsyntax/parse/attr.rs +++ b/src/libsyntax/parse/attr.rs @@ -7,11 +7,11 @@ export parser_attr; // A type to distingush between the parsing of item attributes or syntax // extensions, which both begin with token.POUND -type attr_or_ext = option<either<[ast::attribute]/~, @ast::expr>>; +type attr_or_ext = option<either<~[ast::attribute], @ast::expr>>; impl parser_attr for parser { - fn parse_outer_attrs_or_ext(first_item_attrs: [ast::attribute]/~) + fn parse_outer_attrs_or_ext(first_item_attrs: ~[ast::attribute]) -> attr_or_ext { let expect_item_next = vec::is_not_empty(first_item_attrs); @@ -22,7 +22,7 @@ impl parser_attr for parser { self.bump(); let first_attr = self.parse_attribute_naked(ast::attr_outer, lo); - ret some(left(vec::append([first_attr]/~, + ret some(left(vec::append(~[first_attr], self.parse_outer_attributes()))); } else if !(self.look_ahead(1u) == token::LT || self.look_ahead(1u) == token::LBRACKET @@ -42,8 +42,8 @@ impl parser_attr for parser { } // Parse attributes that appear before an item - fn parse_outer_attributes() -> [ast::attribute]/~ { - let mut attrs: [ast::attribute]/~ = []/~; + fn parse_outer_attributes() -> ~[ast::attribute] { + let mut attrs: ~[ast::attribute] = ~[]; loop { alt copy self.token { token::POUND { @@ -92,9 +92,9 @@ impl parser_attr for parser { // is an inner attribute of the containing item or an outer attribute of // the first contained item until we see the semi). fn parse_inner_attrs_and_next() -> - {inner: [ast::attribute]/~, next: [ast::attribute]/~} { - let mut inner_attrs: [ast::attribute]/~ = []/~; - let mut next_outer_attrs: [ast::attribute]/~ = []/~; + {inner: ~[ast::attribute], next: ~[ast::attribute]} { + let mut inner_attrs: ~[ast::attribute] = ~[]; + let mut next_outer_attrs: ~[ast::attribute] = ~[]; loop { alt copy self.token { token::POUND { @@ -157,15 +157,15 @@ impl parser_attr for parser { } } - fn parse_meta_seq() -> [@ast::meta_item]/~ { + fn parse_meta_seq() -> ~[@ast::meta_item] { ret self.parse_seq(token::LPAREN, token::RPAREN, seq_sep_trailing_disallowed(token::COMMA), - {|p| p.parse_meta_item()}).node; + |p| p.parse_meta_item()).node; } - fn parse_optional_meta() -> [@ast::meta_item]/~ { + fn parse_optional_meta() -> ~[@ast::meta_item] { alt self.token { token::LPAREN { ret self.parse_meta_seq(); } - _ { ret []/~; } } + _ { ret ~[]; } } } } diff --git a/src/libsyntax/parse/comments.rs b/src/libsyntax/parse/comments.rs index 7a6a9f0f3d7..e188331dd24 100644 --- a/src/libsyntax/parse/comments.rs +++ b/src/libsyntax/parse/comments.rs @@ -17,7 +17,7 @@ enum cmnt_style { blank_line, // Just a manual blank line "\n\n", for layout } -type cmnt = {style: cmnt_style, lines: [str]/~, pos: uint}; +type cmnt = {style: cmnt_style, lines: ~[str], pos: uint}; fn is_doc_comment(s: str) -> bool { s.starts_with("///") || @@ -53,11 +53,11 @@ fn strip_doc_comment_decoration(comment: str) -> str { fn block_trim(lines: [str]/~, chars: str, max: option<uint>) -> [str]/~ { let mut i = max.get_default(uint::max_value); - for lines.each {|line| + for lines.each |line| { if line.trim().is_empty() { cont; } - for line.each_chari {|j, c| + for line.each_chari |j, c| { if j >= i { break; } @@ -68,7 +68,7 @@ fn strip_doc_comment_decoration(comment: str) -> str { } } - ret lines.map {|line| + ret do lines.map |line| { let chars = str::chars(line); if i > chars.len() { "" @@ -117,14 +117,14 @@ fn consume_non_eol_whitespace(rdr: string_reader) { } } -fn push_blank_line_comment(rdr: string_reader, &comments: [cmnt]/~) { +fn push_blank_line_comment(rdr: string_reader, &comments: ~[cmnt]) { #debug(">>> blank-line comment"); - let v: [str]/~ = []/~; + let v: ~[str] = ~[]; vec::push(comments, {style: blank_line, lines: v, pos: rdr.chpos}); } fn consume_whitespace_counting_blank_lines(rdr: string_reader, - &comments: [cmnt]/~) { + &comments: ~[cmnt]) { while is_whitespace(rdr.curr) && !is_eof(rdr) { if rdr.col == 0u && rdr.curr == '\n' { push_blank_line_comment(rdr, comments); @@ -141,7 +141,7 @@ fn read_shebang_comment(rdr: string_reader, code_to_the_left: bool, #debug("<<< shebang comment"); vec::push(comments, { style: if code_to_the_left { trailing } else { isolated }, - lines: [read_one_line_comment(rdr)]/~, + lines: ~[read_one_line_comment(rdr)], pos: p }); } @@ -150,7 +150,7 @@ fn read_line_comments(rdr: string_reader, code_to_the_left: bool, &comments: [cmnt]/~) { #debug(">>> line comments"); let p = rdr.chpos; - let mut lines: [str]/~ = []/~; + let mut lines: ~[str] = ~[]; while rdr.curr == '/' && nextch(rdr) == '/' { let line = read_one_line_comment(rdr); log(debug, line); @@ -176,7 +176,7 @@ fn all_whitespace(s: str, begin: uint, end: uint) -> bool { ret true; } -fn trim_whitespace_prefix_and_push_line(&lines: [str]/~, +fn trim_whitespace_prefix_and_push_line(&lines: ~[str], s: str, col: uint) unsafe { let mut s1; let len = str::len(s); @@ -193,7 +193,7 @@ fn read_block_comment(rdr: string_reader, code_to_the_left: bool, &comments: [cmnt]/~) { #debug(">>> block comment"); let p = rdr.chpos; - let mut lines: [str]/~ = []/~; + let mut lines: ~[str] = ~[]; let mut col: uint = rdr.col; bump(rdr); bump(rdr); @@ -255,7 +255,7 @@ fn peeking_at_comment(rdr: string_reader) -> bool { } fn consume_comment(rdr: string_reader, code_to_the_left: bool, - &comments: [cmnt]/~) { + &comments: ~[cmnt]) { #debug(">>> consume comment"); if rdr.curr == '/' && nextch(rdr) == '/' { read_line_comments(rdr, code_to_the_left, comments); @@ -272,17 +272,17 @@ type lit = {lit: str, pos: uint}; fn gather_comments_and_literals(span_diagnostic: diagnostic::span_handler, path: str, srdr: io::reader) -> - {cmnts: [cmnt]/~, lits: [lit]/~} { + {cmnts: ~[cmnt], lits: ~[lit]} { let src = @str::from_bytes(srdr.read_whole_stream()); let itr = @interner::mk::<@str>( - {|x|str::hash(*x)}, - {|x,y|str::eq(*x, *y)} + |x| str::hash(*x), + |x,y| str::eq(*x, *y) ); let rdr = lexer::new_low_level_string_reader (span_diagnostic, codemap::new_filemap(path, src, 0u, 0u), itr); - let mut comments: [cmnt]/~ = []/~; - let mut literals: [lit]/~ = []/~; + let mut comments: ~[cmnt] = ~[]; + let mut literals: ~[lit] = ~[]; let mut first_read: bool = true; while !is_eof(rdr) { loop { diff --git a/src/libsyntax/parse/common.rs b/src/libsyntax/parse/common.rs index 8cc6f3d6484..c77d0ba67ee 100644 --- a/src/libsyntax/parse/common.rs +++ b/src/libsyntax/parse/common.rs @@ -149,9 +149,9 @@ impl parser_common for parser { } fn parse_seq_to_before_gt<T: copy>(sep: option<token::token>, - f: fn(parser) -> T) -> [T]/~ { + f: fn(parser) -> T) -> ~[T] { let mut first = true; - let mut v = []/~; + let mut v = ~[]; while self.token != token::GT && self.token != token::BINOP(token::SHR) { alt sep { @@ -166,7 +166,7 @@ impl parser_common for parser { } fn parse_seq_to_gt<T: copy>(sep: option<token::token>, - f: fn(parser) -> T) -> [T]/~ { + f: fn(parser) -> T) -> ~[T] { let v = self.parse_seq_to_before_gt(sep, f); self.expect_gt(); @@ -174,7 +174,7 @@ impl parser_common for parser { } fn parse_seq_lt_gt<T: copy>(sep: option<token::token>, - f: fn(parser) -> T) -> spanned<[T]/~> { + f: fn(parser) -> T) -> spanned<~[T]> { let lo = self.span.lo; self.expect(token::LT); let result = self.parse_seq_to_before_gt::<T>(sep, f); @@ -184,7 +184,7 @@ impl parser_common for parser { } fn parse_seq_to_end<T: copy>(ket: token::token, sep: seq_sep, - f: fn(parser) -> T) -> [T]/~ { + f: fn(parser) -> T) -> ~[T] { let val = self.parse_seq_to_before_end(ket, sep, f); self.bump(); ret val; @@ -192,9 +192,9 @@ impl parser_common for parser { fn parse_seq_to_before_end<T: copy>(ket: token::token, sep: seq_sep, - f: fn(parser) -> T) -> [T]/~ { + f: fn(parser) -> T) -> ~[T] { let mut first: bool = true; - let mut v: [T]/~ = []/~; + let mut v: ~[T] = ~[]; while self.token != ket { alt sep.sep { some(t) { if first { first = false; } @@ -210,7 +210,7 @@ impl parser_common for parser { fn parse_unspanned_seq<T: copy>(bra: token::token, ket: token::token, sep: seq_sep, - f: fn(parser) -> T) -> [T]/~ { + f: fn(parser) -> T) -> ~[T] { self.expect(bra); let result = self.parse_seq_to_before_end::<T>(ket, sep, f); self.bump(); @@ -220,7 +220,7 @@ impl parser_common for parser { // NB: Do not use this function unless you actually plan to place the // spanned list in the AST. fn parse_seq<T: copy>(bra: token::token, ket: token::token, sep: seq_sep, - f: fn(parser) -> T) -> spanned<[T]/~> { + f: fn(parser) -> T) -> spanned<~[T]> { let lo = self.span.lo; self.expect(bra); let result = self.parse_seq_to_before_end::<T>(ket, sep, f); diff --git a/src/libsyntax/parse/eval.rs b/src/libsyntax/parse/eval.rs index efae13342c0..883aedb75a6 100644 --- a/src/libsyntax/parse/eval.rs +++ b/src/libsyntax/parse/eval.rs @@ -8,25 +8,25 @@ type ctx = cfg: ast::crate_cfg}; fn eval_crate_directives(cx: ctx, - cdirs: [@ast::crate_directive]/~, + cdirs: ~[@ast::crate_directive], prefix: str, - &view_items: [@ast::view_item]/~, - &items: [@ast::item]/~) { - for cdirs.each {|sub_cdir| + &view_items: ~[@ast::view_item], + &items: ~[@ast::item]) { + for cdirs.each |sub_cdir| { eval_crate_directive(cx, sub_cdir, prefix, view_items, items); } } -fn eval_crate_directives_to_mod(cx: ctx, cdirs: [@ast::crate_directive]/~, +fn eval_crate_directives_to_mod(cx: ctx, cdirs: ~[@ast::crate_directive], prefix: str, suffix: option<str>) - -> (ast::_mod, [ast::attribute]/~) { + -> (ast::_mod, ~[ast::attribute]) { #debug("eval crate prefix: %s", prefix); #debug("eval crate suffix: %s", option::get_default(suffix, "none")); let (cview_items, citems, cattrs) = parse_companion_mod(cx, prefix, suffix); - let mut view_items: [@ast::view_item]/~ = []/~; - let mut items: [@ast::item]/~ = []/~; + let mut view_items: ~[@ast::view_item] = ~[]; + let mut items: ~[@ast::item] = ~[]; eval_crate_directives(cx, cdirs, prefix, view_items, items); ret ({view_items: vec::append(view_items, cview_items), items: vec::append(items, citems)}, @@ -44,7 +44,7 @@ We build the path to the companion mod by combining the prefix and the optional suffix then adding the .rs extension. */ fn parse_companion_mod(cx: ctx, prefix: str, suffix: option<str>) - -> ([@ast::view_item]/~, [@ast::item]/~, [ast::attribute]/~) { + -> (~[@ast::view_item], ~[@ast::item], ~[ast::attribute]) { fn companion_file(+prefix: str, suffix: option<str>) -> str { ret alt suffix { @@ -74,11 +74,11 @@ fn parse_companion_mod(cx: ctx, prefix: str, suffix: option<str>) cx.sess.byte_pos = cx.sess.byte_pos + r0.pos; ret (m0.view_items, m0.items, inner_attrs.inner); } else { - ret ([]/~, []/~, []/~); + ret (~[], ~[], ~[]); } } -fn cdir_path_opt(id: ast::ident, attrs: [ast::attribute]/~) -> @str { +fn cdir_path_opt(id: ast::ident, attrs: ~[ast::attribute]) -> @str { alt ::attr::first_attr_value_str_by_name(attrs, "path") { some(d) { ret d; @@ -88,8 +88,8 @@ fn cdir_path_opt(id: ast::ident, attrs: [ast::attribute]/~) -> @str { } fn eval_crate_directive(cx: ctx, cdir: @ast::crate_directive, prefix: str, - &view_items: [@ast::view_item]/~, - &items: [@ast::item]/~) { + &view_items: ~[@ast::view_item], + &items: ~[@ast::item]) { alt cdir.node { ast::cdir_src_mod(id, attrs) { let file_path = cdir_path_opt(@(*id + ".rs"), attrs); diff --git a/src/libsyntax/parse/lexer.rs b/src/libsyntax/parse/lexer.rs index fec6d23a03b..8a32ecdac64 100644 --- a/src/libsyntax/parse/lexer.rs +++ b/src/libsyntax/parse/lexer.rs @@ -26,7 +26,7 @@ enum tt_frame_up { /* to break a circularity */ /* TODO: figure out how to have a uniquely linked stack, and change to `~` */ #[doc = "an unzipping of `token_tree`s"] type tt_frame = @{ - readme: [ast::token_tree]/~, + readme: ~[ast::token_tree], mut idx: uint, up: tt_frame_up }; @@ -41,7 +41,7 @@ type tt_reader = @{ }; fn new_tt_reader(span_diagnostic: diagnostic::span_handler, - itr: @interner::interner<@str>, src: [ast::token_tree]/~) + itr: @interner::interner<@str>, src: ~[ast::token_tree]) -> tt_reader { let r = @{span_diagnostic: span_diagnostic, interner: itr, mut cur: @{readme: src, mut idx: 0u, @@ -161,7 +161,7 @@ impl tt_reader_as_reader of reader for tt_reader { } fn string_advance_token(&&r: string_reader) { - for consume_whitespace_and_comments(r).each {|comment| + for consume_whitespace_and_comments(r).each |comment| { r.peek_tok = comment.tok; r.peek_span = comment.sp; ret; diff --git a/src/libsyntax/parse/parser.rs b/src/libsyntax/parse/parser.rs index 1543e1a1ba1..0492ab27346 100644 --- a/src/libsyntax/parse/parser.rs +++ b/src/libsyntax/parse/parser.rs @@ -30,6 +30,7 @@ enum restriction { RESTRICT_STMT_EXPR, RESTRICT_NO_CALL_EXPRS, RESTRICT_NO_BAR_OP, + RESTRICT_NO_BAR_OR_DOUBLEBAR_OP, } enum file_type { CRATE_FILE, SOURCE_FILE, } @@ -52,10 +53,10 @@ enum pexpr { */ enum class_contents { ctor_decl(fn_decl, blk, codemap::span), dtor_decl(blk, codemap::span), - members([@class_member]/~) } + members(~[@class_member]) } type arg_or_capture_item = either<arg, capture_item>; -type item_info = (ident, item_, option<[attribute]/~>); +type item_info = (ident, item_, option<~[attribute]>); class parser { let sess: parse_sess; @@ -159,9 +160,9 @@ class parser { } fn parse_ty_fn_decl(purity: ast::purity) -> fn_decl { - let inputs = self.parse_unspanned_seq( + let inputs = do self.parse_unspanned_seq( token::LPAREN, token::RPAREN, - seq_sep_trailing_disallowed(token::COMMA)) { |p| + seq_sep_trailing_disallowed(token::COMMA)) |p| { let mode = p.parse_arg_mode(); let name = if is_plain_ident(p.token) && p.look_ahead(1u) == token::COLON { @@ -178,16 +179,16 @@ class parser { // functions can't have constrained types. Not sure whether // that would be desirable anyway. See bug for the story on // constrained types. - let constrs: [@constr]/~ = []/~; + let constrs: ~[@constr] = ~[]; let (ret_style, ret_ty) = self.parse_ret_ty(); ret {inputs: inputs, output: ret_ty, purity: purity, cf: ret_style, constraints: constrs}; } - fn parse_ty_methods() -> [ty_method]/~ { - self.parse_unspanned_seq(token::LBRACE, token::RBRACE, - seq_sep_none()) { |p| + fn parse_ty_methods() -> ~[ty_method] { + do self.parse_unspanned_seq(token::LBRACE, token::RBRACE, + seq_sep_none()) |p| { let attrs = p.parse_outer_attributes(); let flo = p.span.lo; let pur = p.parse_fn_purity(); @@ -217,9 +218,9 @@ class parser { // if i is the jth ident in args, return j // otherwise, fail - fn ident_index(args: [arg]/~, i: ident) -> uint { + fn ident_index(args: ~[arg], i: ident) -> uint { let mut j = 0u; - for args.each {|a| if a.ident == i { ret j; } j += 1u; } + for args.each |a| { if a.ident == i { ret j; } j += 1u; } self.fatal("unbound variable `" + *i + "` in constraint arg"); } @@ -237,7 +238,7 @@ class parser { ret @{node: carg, span: sp}; } - fn parse_constr_arg(args: [arg]/~) -> @constr_arg { + fn parse_constr_arg(args: ~[arg]) -> @constr_arg { let sp = self.span; let mut carg = carg_base; if self.token == token::BINOP(token::STAR) { @@ -249,13 +250,13 @@ class parser { ret @{node: carg, span: sp}; } - fn parse_ty_constr(fn_args: [arg]/~) -> @constr { + fn parse_ty_constr(fn_args: ~[arg]) -> @constr { let lo = self.span.lo; let path = self.parse_path_without_tps(); let args = self.parse_unspanned_seq( token::LPAREN, token::RPAREN, seq_sep_trailing_disallowed(token::COMMA), - {|p| p.parse_constr_arg(fn_args)}); + |p| p.parse_constr_arg(fn_args)); ret @spanned(lo, self.span.hi, {path: path, args: args, id: self.get_id()}); } @@ -263,10 +264,10 @@ class parser { fn parse_constr_in_type() -> @ty_constr { let lo = self.span.lo; let path = self.parse_path_without_tps(); - let args: [@ty_constr_arg]/~ = self.parse_unspanned_seq( + let args: ~[@ty_constr_arg] = self.parse_unspanned_seq( token::LPAREN, token::RPAREN, seq_sep_trailing_disallowed(token::COMMA), - {|p| p.parse_type_constr_arg()}); + |p| p.parse_type_constr_arg()); let hi = self.span.lo; let tc: ty_constr_ = {path: path, args: args, id: self.get_id()}; ret @spanned(lo, hi, tc); @@ -274,8 +275,8 @@ class parser { fn parse_constrs<T: copy>(pser: fn(parser) -> @constr_general<T>) -> - [@constr_general<T>]/~ { - let mut constrs: [@constr_general<T>]/~ = []/~; + ~[@constr_general<T>] { + let mut constrs: ~[@constr_general<T>] = ~[]; loop { let constr = pser(self); vec::push(constrs, constr); @@ -284,8 +285,8 @@ class parser { }; } - fn parse_type_constraints() -> [@ty_constr]/~ { - ret self.parse_constrs({|p| p.parse_constr_in_type()}); + fn parse_type_constraints() -> ~[@ty_constr] { + ret self.parse_constrs(|p| p.parse_constr_in_type()); } fn parse_ret_ty() -> (ret_style, @ty) { @@ -361,7 +362,7 @@ class parser { self.bump(); ty_nil } else { - let mut ts = [self.parse_ty(false)]/~; + let mut ts = ~[self.parse_ty(false)]; while self.token == token::COMMA { self.bump(); vec::push(ts, self.parse_ty(false)); @@ -373,7 +374,7 @@ class parser { } } else if self.token == token::AT { self.bump(); - // HACK: turn @[...] into a []/@ + // HACK: turn @[...] into a @-evec alt self.parse_mt() { {ty: t @ @{node: ty_vec(_), _}, mutbl: m_imm} { ty_vstore(t, vstore_box) @@ -382,7 +383,7 @@ class parser { } } else if self.token == token::TILDE { self.bump(); - // HACK: turn ~[...] into a []/~ + // HACK: turn ~[...] into a ~-evec alt self.parse_mt() { {ty: t @ @{node: ty_vec(_), _}, mutbl: m_imm} { ty_vstore(t, vstore_uniq) @@ -396,7 +397,7 @@ class parser { let elems = self.parse_unspanned_seq( token::LBRACE, token::RBRACE, seq_sep_trailing_allowed(token::COMMA), - {|p| p.parse_ty_field()}); + |p| p.parse_ty_field()); if vec::len(elems) == 0u { self.unexpected_last(token::RBRACE); } @@ -418,7 +419,7 @@ class parser { } else if self.token == token::BINOP(token::AND) { self.bump(); let region = self.parse_region_dot(); - // HACK: turn &a.[...] into a []/&a + // HACK: turn &a.[...] into a &a-evec alt self.parse_mt() { {ty: t @ @{node: ty_vec(_), _}, mutbl: m_imm} { ty_vstore(t, vstore_slice(region)) @@ -494,11 +495,11 @@ class parser { } fn parse_arg_or_capture_item() -> arg_or_capture_item { - self.parse_capture_item_or() {|p| p.parse_arg() } + self.parse_capture_item_or(|p| p.parse_arg()) } fn parse_fn_block_arg() -> arg_or_capture_item { - self.parse_capture_item_or() {|p| + do self.parse_capture_item_or |p| { let m = p.parse_arg_mode(); let i = p.parse_value_ident(); let t = if p.eat(token::COLON) { @@ -593,8 +594,8 @@ class parser { } fn parse_path_without_tps() -> @path { - self.parse_path_without_tps_({|p| p.parse_ident()}, - {|p| p.parse_ident()}) + self.parse_path_without_tps_(|p| p.parse_ident(), + |p| p.parse_ident()) } fn parse_path_without_tps_( @@ -603,7 +604,7 @@ class parser { let lo = self.span.lo; let global = self.eat(token::MOD_SEP); - let mut ids = []/~; + let mut ids = ~[]; loop { let is_not_last = self.look_ahead(2u) != token::LT @@ -618,12 +619,12 @@ class parser { } } @{span: mk_sp(lo, self.last_span.hi), global: global, - idents: ids, rp: none, types: []/~} + idents: ids, rp: none, types: ~[]} } fn parse_value_path() -> @path { - self.parse_path_without_tps_({|p| p.parse_ident()}, - {|p| p.parse_value_ident()}) + self.parse_path_without_tps_(|p| p.parse_ident(), + |p| p.parse_value_ident()) } fn parse_path_with_tps(colons: bool) -> @path { @@ -657,9 +658,9 @@ class parser { let tps = { if self.token == token::LT { self.parse_seq_lt_gt(some(token::COMMA), - {|p| p.parse_ty(false)}) + |p| p.parse_ty(false)) } else { - {node: []/~, span: path.span} + {node: ~[], span: path.span} } }; @@ -735,7 +736,7 @@ class parser { let lit = @spanned(lo, hi, lit_nil); ret self.mk_pexpr(lo, hi, expr_lit(lit)); } - let mut es = [self.parse_expr()]/~; + let mut es = ~[self.parse_expr()]; while self.token == token::COMMA { self.bump(); vec::push(es, self.parse_expr()); } @@ -753,7 +754,7 @@ class parser { if self.is_keyword("mut") || is_plain_ident(self.token) && self.look_ahead(1u) == token::COLON { - let mut fields = [self.parse_field(token::COLON)]/~; + let mut fields = ~[self.parse_field(token::COLON)]; let mut base = none; while self.token != token::RBRACE { // optional comma before "with" @@ -775,12 +776,12 @@ class parser { hi = self.span.hi; self.expect(token::RBRACE); ex = expr_rec(fields, base); - } else if token::is_bar(self.token) { - ret pexpr(self.parse_fn_block_expr()); } else { let blk = self.parse_block_tail(lo, default_blk); ret self.mk_pexpr(blk.span.lo, blk.span.hi, expr_block(blk)); } + } else if token::is_bar(self.token) { + ret pexpr(self.parse_lambda_expr()); } else if self.eat_keyword("new") { self.expect(token::LPAREN); let r = self.parse_expr(); @@ -817,7 +818,7 @@ class parser { let mutbl = self.parse_mutability(); let es = self.parse_seq_to_end( token::RBRACKET, seq_sep_trailing_allowed(token::COMMA), - {|p| p.parse_expr()}); + |p| p.parse_expr()); hi = self.span.hi; ex = expr_vec(es, mutbl); } else if self.token == token::POUND @@ -965,10 +966,10 @@ class parser { let es = if self.token == token::LPAREN { self.parse_unspanned_seq(token::LPAREN, token::RPAREN, - sep, {|p| p.parse_expr()}) + sep, |p| p.parse_expr()) } else { self.parse_unspanned_seq(token::LBRACKET, token::RBRACKET, - sep, {|p| p.parse_expr()}) + sep, |p| p.parse_expr()) }; let hi = self.span.hi; e = some(self.mk_expr(lo, hi, expr_vec(es, m_imm))); @@ -1016,8 +1017,8 @@ class parser { let tys = if self.eat(token::MOD_SEP) { self.expect(token::LT); self.parse_seq_to_gt(some(token::COMMA), - {|p| p.parse_ty(false)}) - } else { []/~ }; + |p| p.parse_ty(false)) + } else { ~[] }; e = self.mk_pexpr(lo, hi, expr_field(self.to_expr(e), self.get_str(i), tys)); @@ -1033,33 +1034,13 @@ class parser { let es = self.parse_unspanned_seq( token::LPAREN, token::RPAREN, seq_sep_trailing_disallowed(token::COMMA), - {|p| p.parse_expr()}); + |p| p.parse_expr()); hi = self.span.hi; let nd = expr_call(self.to_expr(e), es, false); e = self.mk_pexpr(lo, hi, nd); } - // expr {|| ... } - token::LBRACE if (token::is_bar(self.look_ahead(1u)) - && self.permits_call()) { - self.bump(); - let blk = self.parse_fn_block_expr(); - alt e.node { - expr_call(f, args, false) { - e = pexpr(@{node: expr_call(f, - vec::append(args, [blk]/~), - true) - with *self.to_expr(e)}); - } - _ { - e = self.mk_pexpr(lo, self.last_span.hi, - expr_call(self.to_expr(e), - [blk]/~, true)); - } - } - } - // expr[...] token::LBRACKET { self.bump(); @@ -1108,12 +1089,12 @@ class parser { token::LPAREN | token::LBRACE | token::LBRACKET { let ket = flip(self.token); tt_delim(vec::append( - [parse_tt_flat(self, true)]/~, + ~[parse_tt_flat(self, true)], vec::append( self.parse_seq_to_before_end( ket, seq_sep_none(), - {|p| p.parse_token_tree()}), - [parse_tt_flat(self, true)]/~))) + |p| p.parse_token_tree()), + ~[parse_tt_flat(self, true)]))) } _ { parse_tt_flat(self, false) } }; @@ -1123,7 +1104,7 @@ class parser { fn parse_tt_mac_demo() -> @expr { let ms = self.parse_seq(token::LBRACE, token::RBRACE, common::seq_sep_none(), - {|p| p.parse_matcher(@mut 0u)}).node; + |p| p.parse_matcher(@mut 0u)).node; let tt = self.parse_token_tree(); alt tt { tt_delim(tts) { @@ -1148,7 +1129,7 @@ class parser { self.bump(); let ms = (self.parse_seq(token::LPAREN, token::RPAREN, common::seq_sep_none(), - {|p| p.parse_matcher(name_idx)}).node); + |p| p.parse_matcher(name_idx)).node); if ms.len() == 0u { self.fatal("repetition body must be nonempty"); } @@ -1205,7 +1186,7 @@ class parser { let m = self.parse_mutability(); let e = self.to_expr(self.parse_prefix_expr()); hi = e.span.hi; - // HACK: turn &[...] into [...]/& + // HACK: turn &[...] into a &-evec ex = alt e.node { expr_vec(*) if m == m_imm { expr_vstore(e, vstore_slice(self.region_from_name(none))) @@ -1221,7 +1202,7 @@ class parser { let m = self.parse_mutability(); let e = self.to_expr(self.parse_prefix_expr()); hi = e.span.hi; - // HACK: turn @[...] into [...]/@ + // HACK: turn @[...] into a @-evec ex = alt e.node { expr_vec(*) if m == m_imm { expr_vstore(e, vstore_box) } _ { expr_unary(box(m), e) } @@ -1232,7 +1213,7 @@ class parser { let m = self.parse_mutability(); let e = self.to_expr(self.parse_prefix_expr()); hi = e.span.hi; - // HACK: turn ~[...] into [...]/~ + // HACK: turn ~[...] into a ~-evec ex = alt e.node { expr_vec(*) if m == m_imm { expr_vstore(e, vstore_uniq) } _ { expr_unary(uniq(m), e) } @@ -1254,7 +1235,14 @@ class parser { if self.expr_is_complete(plhs) { ret lhs; } let peeked = self.token; if peeked == token::BINOP(token::OR) && - self.restriction == RESTRICT_NO_BAR_OP { ret lhs; } + (self.restriction == RESTRICT_NO_BAR_OP || + self.restriction == RESTRICT_NO_BAR_OR_DOUBLEBAR_OP) { + ret lhs; + } + if peeked == token::OROR && + self.restriction == RESTRICT_NO_BAR_OR_DOUBLEBAR_OP { + ret lhs; + } let cur_opt = token_to_binop(peeked); alt cur_opt { some(cur_op) { @@ -1360,19 +1348,63 @@ class parser { // the future, just have to change parse_arg to parse_fn_block_arg. let (decl, capture_clause) = self.parse_fn_decl(impure_fn, - {|p| p.parse_arg_or_capture_item()}); + |p| p.parse_arg_or_capture_item()); let body = self.parse_block(); ret self.mk_expr(lo, body.span.hi, expr_fn(proto, decl, body, capture_clause)); } - fn parse_fn_block_expr() -> @expr { + // `|args| { ... }` like in `do` expressions + fn parse_lambda_block_expr() -> @expr { + self.parse_lambda_expr_( + || { + alt self.token { + token::BINOP(token::OR) | token::OROR { + self.parse_fn_block_decl() + } + _ { + // No argument list - `do foo {` + ({ + { + inputs: ~[], + output: @{ + id: self.get_id(), + node: ty_infer, + span: self.span + }, + purity: impure_fn, + cf: return_val, + constraints: ~[] + } + }, + @~[]) + } + } + }, + || { + let blk = self.parse_block(); + self.mk_expr(blk.span.lo, blk.span.hi, expr_block(blk)) + }) + } + + // `|args| expr` + fn parse_lambda_expr() -> @expr { + self.parse_lambda_expr_(|| self.parse_fn_block_decl(), + || self.parse_expr()) + } + + fn parse_lambda_expr_(parse_decl: fn&() -> (fn_decl, capture_clause), + parse_body: fn&() -> @expr) -> @expr { let lo = self.last_span.lo; - let (decl, captures) = self.parse_fn_block_decl(); - let body = self.parse_block_tail(lo, default_blk); + let (decl, captures) = parse_decl(); + let body = parse_body(); + let fakeblock = {view_items: ~[], stmts: ~[], expr: some(body), + id: self.get_id(), rules: default_blk}; + let fakeblock = spanned(body.span.lo, body.span.hi, + fakeblock); ret self.mk_expr(lo, body.span.hi, - expr_fn_block(decl, body, captures)); + expr_fn_block(decl, fakeblock, captures)); } fn parse_else_expr() -> @expr { @@ -1387,16 +1419,37 @@ class parser { fn parse_sugary_call_expr(keyword: str, ctor: fn(+@expr) -> expr_) -> @expr { let lo = self.last_span; - let call = self.parse_expr_res(RESTRICT_STMT_EXPR); - alt call.node { - expr_call(f, args, true) { - let b_arg = vec::last(args); - let last = self.mk_expr(b_arg.span.lo, b_arg.span.hi, - ctor(b_arg)); - @{node: expr_call(f, vec::append(vec::init(args), [last]/~), true) - with *call} + // Parse the callee `foo` in + // for foo || { + // for foo.bar || { + // etc, or the portion of the call expression before the lambda in + // for foo() || { + // or + // for foo.bar(a) || { + // Turn on the restriction to stop at | or || so we can parse + // them as the lambda arguments + let e = self.parse_expr_res(RESTRICT_NO_BAR_OR_DOUBLEBAR_OP); + alt e.node { + expr_call(f, args, false) { + let block = self.parse_lambda_block_expr(); + let last_arg = self.mk_expr(block.span.lo, block.span.hi, + ctor(block)); + let args = vec::append(args, ~[last_arg]); + @{node: expr_call(f, args, true) + with *e} + } + expr_path(*) | expr_field(*) | expr_call(*) { + let block = self.parse_lambda_block_expr(); + let last_arg = self.mk_expr(block.span.lo, block.span.hi, + ctor(block)); + self.mk_expr(lo.lo, last_arg.span.hi, + expr_call(e, ~[last_arg], true)) } _ { + // There may be other types of expressions that can + // represent the callee in `for` and `do` expressions + // but they aren't represented by tests + #debug("sugary call on %?", e.node); self.span_fatal( lo, #fmt("`%s` must be followed by a block call", keyword)); } @@ -1424,7 +1477,7 @@ class parser { else { alt_exhaustive }; let discriminant = self.parse_expr(); self.expect(token::LBRACE); - let mut arms: [arm]/~ = []/~; + let mut arms: ~[arm] = ~[]; while self.token != token::RBRACE { let pats = self.parse_pats(); let mut guard = none; @@ -1473,8 +1526,8 @@ class parser { } } - fn parse_pats() -> [@pat]/~ { - let mut pats = []/~; + fn parse_pats() -> ~[@pat] { + let mut pats = ~[]; loop { vec::push(pats, self.parse_pat()); if self.token == token::BINOP(token::OR) { self.bump(); } @@ -1502,7 +1555,7 @@ class parser { } token::LBRACE { self.bump(); - let mut fields = []/~; + let mut fields = ~[]; let mut etc = false; let mut first = true; while self.token != token::RBRACE { @@ -1552,7 +1605,7 @@ class parser { let expr = self.mk_expr(lo, hi, expr_lit(lit)); pat = pat_lit(expr); } else { - let mut fields = [self.parse_pat()]/~; + let mut fields = ~[self.parse_pat()]; while self.token == token::COMMA { self.bump(); vec::push(fields, self.parse_pat()); @@ -1587,7 +1640,7 @@ class parser { } else { let enum_path = self.parse_path_with_tps(true); hi = enum_path.span.hi; - let mut args: [@pat]/~ = []/~; + let mut args: ~[@pat] = ~[]; let mut star_pat = false; alt self.token { token::LPAREN { @@ -1602,7 +1655,7 @@ class parser { args = self.parse_unspanned_seq( token::LPAREN, token::RPAREN, seq_sep_trailing_disallowed(token::COMMA), - {|p| p.parse_pat()}); + |p| p.parse_pat()); hi = self.span.hi; } } @@ -1643,7 +1696,7 @@ class parser { fn parse_let() -> @decl { let is_mutbl = self.eat_keyword("mut"); let lo = self.span.lo; - let mut locals = [self.parse_local(is_mutbl, true)]/~; + let mut locals = ~[self.parse_local(is_mutbl, true)]; while self.eat(token::COMMA) { vec::push(locals, self.parse_local(is_mutbl, true)); } @@ -1667,8 +1720,8 @@ class parser { span: mk_sp(lo, self.last_span.hi)}; } - fn parse_stmt(+first_item_attrs: [attribute]/~) -> @stmt { - fn check_expected_item(p: parser, current_attrs: [attribute]/~) { + fn parse_stmt(+first_item_attrs: ~[attribute]) -> @stmt { + fn check_expected_item(p: parser, current_attrs: ~[attribute]) { // If we have attributes then we should have an item if vec::is_not_empty(current_attrs) { p.fatal("expected item"); @@ -1684,7 +1737,7 @@ class parser { } else { let mut item_attrs; alt self.parse_outer_attrs_or_ext(first_item_attrs) { - none { item_attrs = []/~; } + none { item_attrs = ~[]; } some(left(attrs)) { item_attrs = attrs; } some(right(ext)) { ret @spanned(lo, ext.span.hi, stmt_expr(ext, self.get_id())); @@ -1725,14 +1778,14 @@ class parser { } fn parse_inner_attrs_and_block(parse_attrs: bool) - -> ([attribute]/~, blk) { + -> (~[attribute], blk) { fn maybe_parse_inner_attrs_and_next(p: parser, parse_attrs: bool) -> - {inner: [attribute]/~, next: [attribute]/~} { + {inner: ~[attribute], next: ~[attribute]} { if parse_attrs { p.parse_inner_attrs_and_next() } else { - {inner: []/~, next: []/~} + {inner: ~[], next: ~[]} } } @@ -1767,12 +1820,12 @@ class parser { // necessary, and this should take a qualifier. // some blocks start with "#{"... fn parse_block_tail(lo: uint, s: blk_check_mode) -> blk { - self.parse_block_tail_(lo, s, []/~) + self.parse_block_tail_(lo, s, ~[]) } fn parse_block_tail_(lo: uint, s: blk_check_mode, - +first_item_attrs: [attribute]/~) -> blk { - let mut stmts = []/~; + +first_item_attrs: ~[attribute]) -> blk { + let mut stmts = ~[]; let mut expr = none; let {attrs_remaining, view_items} = self.parse_view(first_item_attrs, true); @@ -1789,7 +1842,7 @@ class parser { } _ { let stmt = self.parse_stmt(initial_attrs); - initial_attrs = []/~; + initial_attrs = ~[]; alt stmt.node { stmt_expr(e, stmt_id) { // Expression without semicolon: alt self.token { @@ -1831,7 +1884,7 @@ class parser { } fn parse_ty_param() -> ty_param { - let mut bounds = []/~; + let mut bounds = ~[]; let ident = self.parse_ident(); if self.eat(token::COLON) { while self.token != token::COMMA && self.token != token::GT { @@ -1846,17 +1899,17 @@ class parser { ret {ident: ident, id: self.get_id(), bounds: @bounds}; } - fn parse_ty_params() -> [ty_param]/~ { + fn parse_ty_params() -> ~[ty_param] { if self.eat(token::LT) { - self.parse_seq_to_gt(some(token::COMMA), {|p| p.parse_ty_param()}) - } else { []/~ } + self.parse_seq_to_gt(some(token::COMMA), |p| p.parse_ty_param()) + } else { ~[] } } fn parse_fn_decl(purity: purity, parse_arg_fn: fn(parser) -> arg_or_capture_item) -> (fn_decl, capture_clause) { - let args_or_capture_items: [arg_or_capture_item]/~ = + let args_or_capture_items: ~[arg_or_capture_item] = self.parse_unspanned_seq( token::LPAREN, token::RPAREN, seq_sep_trailing_disallowed(token::COMMA), parse_arg_fn); @@ -1867,10 +1920,10 @@ class parser { // Use the args list to translate each bound variable // mentioned in a constraint to an arg index. // Seems weird to do this in the parser, but I'm not sure how else to. - let mut constrs = []/~; + let mut constrs = ~[]; if self.token == token::COLON { self.bump(); - constrs = self.parse_constrs({|p| p.parse_ty_constr(inputs) }); + constrs = self.parse_constrs(|p| p.parse_ty_constr(inputs)); } let (ret_style, ret_ty) = self.parse_ret_ty(); ret ({inputs: inputs, @@ -1883,12 +1936,12 @@ class parser { fn parse_fn_block_decl() -> (fn_decl, capture_clause) { let inputs_captures = { if self.eat(token::OROR) { - []/~ + ~[] } else { self.parse_unspanned_seq( token::BINOP(token::OR), token::BINOP(token::OR), seq_sep_trailing_disallowed(token::COMMA), - {|p| p.parse_fn_block_arg()}) + |p| p.parse_fn_block_arg()) } }; let output = if self.eat(token::RARROW) { @@ -1900,11 +1953,11 @@ class parser { output: output, purity: impure_fn, cf: return_val, - constraints: []/~}, + constraints: ~[]}, @either::rights(inputs_captures)); } - fn parse_fn_header() -> {ident: ident, tps: [ty_param]/~} { + fn parse_fn_header() -> {ident: ident, tps: ~[ty_param]} { let id = self.parse_value_ident(); let ty_params = self.parse_ty_params(); ret {ident: id, tps: ty_params}; @@ -1912,7 +1965,7 @@ class parser { fn mk_item(lo: uint, hi: uint, +ident: ident, +node: item_, vis: visibility, - +attrs: [attribute]/~) -> @item { + +attrs: ~[attribute]) -> @item { ret @{ident: ident, attrs: attrs, id: self.get_id(), @@ -1923,7 +1976,7 @@ class parser { fn parse_item_fn(purity: purity) -> item_info { let t = self.parse_fn_header(); - let (decl, _) = self.parse_fn_decl(purity, {|p| p.parse_arg()}); + let (decl, _) = self.parse_fn_decl(purity, |p| p.parse_arg()); let (inner_attrs, body) = self.parse_inner_attrs_and_block(true); (t.ident, item_fn(decl, t.tps, body), some(inner_attrs)) } @@ -1948,7 +2001,7 @@ class parser { let lo = self.span.lo, pur = self.parse_fn_purity(); let ident = self.parse_method_name(); let tps = self.parse_ty_params(); - let (decl, _) = self.parse_fn_decl(pur, {|p| p.parse_arg()}); + let (decl, _) = self.parse_fn_decl(pur, |p| p.parse_arg()); let (inner_attrs, body) = self.parse_inner_attrs_and_block(true); let attrs = vec::append(attrs, inner_attrs); @{ident: ident, attrs: attrs, tps: tps, decl: decl, body: body, @@ -1965,9 +2018,9 @@ class parser { } // Parses three variants (with the region/type params always optional): - // impl /&<T: copy> of to_str for [T]/~ { ... } - // impl name/&<T> of to_str for [T]/~ { ... } - // impl name/&<T> for [T]/~ { ... } + // impl /&<T: copy> of to_str for ~[T] { ... } + // impl name/&<T> of to_str for ~[T] { ... } + // impl name/&<T> for ~[T] { ... } fn parse_item_impl() -> item_info { fn wrap_path(p: parser, pt: @path) -> @ty { @{id: p.get_id(), node: ty_path(pt, p.get_id()), span: pt.span} @@ -1979,7 +2032,7 @@ class parser { (none, self.parse_region_param(), self.parse_ty_params()) } else if self.is_keyword("of") { - (none, rp_none, []/~) + (none, rp_none, ~[]) } else { let id = self.parse_ident(); let rp = self.parse_region_param(); @@ -1999,7 +2052,7 @@ class parser { }; self.expect_keyword("for"); let ty = self.parse_ty(false); - let mut meths = []/~; + let mut meths = ~[]; self.expect(token::LBRACE); while !self.eat(token::RBRACE) { vec::push(meths, self.parse_method(public)); @@ -2012,7 +2065,7 @@ class parser { // the return type of the ctor function. fn ident_to_path_tys(i: ident, rp: region_param, - typarams: [ty_param]/~) -> @path { + typarams: ~[ty_param]) -> @path { let s = self.last_span; // Hack. But then, this whole function is in service of a hack. @@ -2021,9 +2074,9 @@ class parser { rp_self { some(self.region_from_name(some(@"self"))) } }; - @{span: s, global: false, idents: [i]/~, + @{span: s, global: false, idents: ~[i], rp: a_r, - types: vec::map(typarams, {|tp| + types: vec::map(typarams, |tp| { @{id: self.get_id(), node: ty_path(ident_to_path(s, tp.ident), self.get_id()), span: s}}) @@ -2035,10 +2088,10 @@ class parser { id: self.get_id()} } - fn parse_iface_ref_list() -> [@iface_ref]/~ { + fn parse_iface_ref_list() -> ~[@iface_ref] { self.parse_seq_to_before_end( token::LBRACE, seq_sep_trailing_disallowed(token::COMMA), - {|p| p.parse_iface_ref()}) + |p| p.parse_iface_ref()) } fn parse_item_class() -> item_info { @@ -2046,11 +2099,11 @@ class parser { let rp = self.parse_region_param(); let ty_params = self.parse_ty_params(); let class_path = self.ident_to_path_tys(class_name, rp, ty_params); - let ifaces : [@iface_ref]/~ = if self.eat(token::COLON) + let ifaces : ~[@iface_ref] = if self.eat(token::COLON) { self.parse_iface_ref_list() } - else { []/~ }; + else { ~[] }; self.expect(token::LBRACE); - let mut ms: [@class_member]/~ = []/~; + let mut ms: ~[@class_member] = ~[]; let ctor_id = self.get_id(); let mut the_ctor : option<(fn_decl, blk, codemap::span)> = none; let mut the_dtor : option<(blk, codemap::span)> = none; @@ -2065,7 +2118,7 @@ class parser { members(mms) { ms = vec::append(ms, mms); } } } - let actual_dtor = option::map(the_dtor) {|dtor| + let actual_dtor = do option::map(the_dtor) |dtor| { let (d_body, d_s) = dtor; {node: {id: self.get_id(), self_id: self.get_id(), @@ -2108,7 +2161,7 @@ class parser { fn parse_ctor(result_ty: ast::ty_) -> class_contents { // FIXME (#2660): Can ctors/dtors have attrs? let lo = self.last_span.lo; - let (decl_, _) = self.parse_fn_decl(impure_fn, {|p| p.parse_arg()}); + let (decl_, _) = self.parse_fn_decl(impure_fn, |p| p.parse_arg()); let decl = {output: @{id: self.get_id(), node: result_ty, span: decl_.output.span} with decl_}; @@ -2135,7 +2188,7 @@ class parser { } else if self.eat_keyword("priv") { self.expect(token::LBRACE); - let mut results = []/~; + let mut results = ~[]; while self.token != token::RBRACE { vec::push(results, self.parse_single_class_item(private)); } @@ -2144,7 +2197,7 @@ class parser { } else { // Probably need to parse attrs - ret members([self.parse_single_class_item(public)]/~); + ret members(~[self.parse_single_class_item(public)]); } } @@ -2155,11 +2208,11 @@ class parser { } fn parse_mod_items(term: token::token, - +first_item_attrs: [attribute]/~) -> _mod { + +first_item_attrs: ~[attribute]) -> _mod { // Shouldn't be any view items since we've already parsed an item attr let {attrs_remaining, view_items} = self.parse_view(first_item_attrs, false); - let mut items: [@item]/~ = []/~; + let mut items: ~[@item] = ~[]; let mut first = true; while self.token != term { let mut attrs = self.parse_outer_attributes(); @@ -2206,11 +2259,11 @@ class parser { (id, item_mod(m), some(inner_attrs.inner)) } - fn parse_item_foreign_fn(+attrs: [attribute]/~, + fn parse_item_foreign_fn(+attrs: ~[attribute], purity: purity) -> @foreign_item { let lo = self.last_span.lo; let t = self.parse_fn_header(); - let (decl, _) = self.parse_fn_decl(purity, {|p| p.parse_arg()}); + let (decl, _) = self.parse_fn_decl(purity, |p| p.parse_arg()); let mut hi = self.span.hi; self.expect(token::SEMI); ret @{ident: t.ident, @@ -2232,22 +2285,22 @@ class parser { else { self.unexpected(); } } - fn parse_foreign_item(+attrs: [attribute]/~) -> + fn parse_foreign_item(+attrs: ~[attribute]) -> @foreign_item { self.parse_item_foreign_fn(attrs, self.parse_fn_purity()) } - fn parse_foreign_mod_items(+first_item_attrs: [attribute]/~) -> + fn parse_foreign_mod_items(+first_item_attrs: ~[attribute]) -> foreign_mod { // Shouldn't be any view items since we've already parsed an item attr let {attrs_remaining, view_items} = self.parse_view(first_item_attrs, false); - let mut items: [@foreign_item]/~ = []/~; + let mut items: ~[@foreign_item] = ~[]; let mut initial_attrs = attrs_remaining; while self.token != token::RBRACE { let attrs = vec::append(initial_attrs, self.parse_outer_attributes()); - initial_attrs = []/~; + initial_attrs = ~[]; vec::push(items, self.parse_foreign_item(attrs)); } ret {view_items: view_items, @@ -2293,7 +2346,7 @@ class parser { let id = self.parse_ident(); let rp = self.parse_region_param(); let ty_params = self.parse_ty_params(); - let mut variants: [variant]/~ = []/~; + let mut variants: ~[variant] = ~[]; // Newtype syntax if self.token == token::EQ { self.check_restricted_keywords_(*id); @@ -2303,12 +2356,12 @@ class parser { let variant = spanned(ty.span.lo, ty.span.hi, {name: id, - attrs: []/~, - args: [{ty: ty, id: self.get_id()}]/~, + attrs: ~[], + args: ~[{ty: ty, id: self.get_id()}], id: self.get_id(), disr_expr: none, vis: public}); - ret (id, item_enum([variant]/~, ty_params, rp), none); + ret (id, item_enum(~[variant], ty_params, rp), none); } self.expect(token::LBRACE); @@ -2319,14 +2372,14 @@ class parser { let vlo = self.span.lo; let vis = self.parse_visibility(default_vis); let ident = self.parse_value_ident(); - let mut args = []/~, disr_expr = none; + let mut args = ~[], disr_expr = none; if self.token == token::LPAREN { all_nullary = false; let arg_tys = self.parse_unspanned_seq( token::LPAREN, token::RPAREN, seq_sep_trailing_disallowed(token::COMMA), - {|p| p.parse_ty(false)}); - for arg_tys.each {|ty| + |p| p.parse_ty(false)); + for arg_tys.each |ty| { vec::push(args, {ty: ty, id: self.get_id()}); } } else if self.eat(token::EQ) { @@ -2380,7 +2433,7 @@ class parser { } } - fn parse_item(+attrs: [attribute]/~, vis: visibility) + fn parse_item(+attrs: ~[attribute], vis: visibility) -> option<@item> { let lo = self.span.lo; let (ident, item_, extra_attrs) = if self.eat_keyword("const") { @@ -2437,20 +2490,20 @@ class parser { fn parse_view_path() -> @view_path { let lo = self.span.lo; let first_ident = self.parse_ident(); - let mut path = [first_ident]/~; + let mut path = ~[first_ident]; #debug("parsed view_path: %s", *first_ident); alt self.token { token::EQ { // x = foo::bar self.bump(); - path = [self.parse_ident()]/~; + path = ~[self.parse_ident()]; while self.token == token::MOD_SEP { self.bump(); let id = self.parse_ident(); vec::push(path, id); } let path = @{span: mk_sp(lo, self.span.hi), global: false, - idents: path, rp: none, types: []/~}; + idents: path, rp: none, types: ~[]}; ret @spanned(lo, self.span.hi, view_path_simple(first_ident, path, self.get_id())); } @@ -2472,10 +2525,10 @@ class parser { let idents = self.parse_unspanned_seq( token::LBRACE, token::RBRACE, seq_sep_trailing_allowed(token::COMMA), - {|p| p.parse_path_list_ident()}); + |p| p.parse_path_list_ident()); let path = @{span: mk_sp(lo, self.span.hi), global: false, idents: path, - rp: none, types: []/~}; + rp: none, types: ~[]}; ret @spanned(lo, self.span.hi, view_path_list(path, idents, self.get_id())); } @@ -2485,7 +2538,7 @@ class parser { self.bump(); let path = @{span: mk_sp(lo, self.span.hi), global: false, idents: path, - rp: none, types: []/~}; + rp: none, types: ~[]}; ret @spanned(lo, self.span.hi, view_path_glob(path, self.get_id())); } @@ -2498,13 +2551,13 @@ class parser { } let last = path[vec::len(path) - 1u]; let path = @{span: mk_sp(lo, self.span.hi), global: false, - idents: path, rp: none, types: []/~}; + idents: path, rp: none, types: ~[]}; ret @spanned(lo, self.span.hi, view_path_simple(last, path, self.get_id())); } - fn parse_view_paths() -> [@view_path]/~ { - let mut vp = [self.parse_view_path()]/~; + fn parse_view_paths() -> ~[@view_path] { + let mut vp = ~[self.parse_view_path()]; while self.token == token::COMMA { self.bump(); vec::push(vp, self.parse_view_path()); @@ -2521,7 +2574,7 @@ class parser { || self.token_is_keyword("export", tok) } - fn parse_view_item(+attrs: [attribute]/~) -> @view_item { + fn parse_view_item(+attrs: ~[attribute]) -> @view_item { let lo = self.span.lo, vis = self.parse_visibility(private); let node = if self.eat_keyword("use") { self.parse_use() @@ -2535,12 +2588,12 @@ class parser { vis: vis, span: mk_sp(lo, self.last_span.hi)} } - fn parse_view(+first_item_attrs: [attribute]/~, - only_imports: bool) -> {attrs_remaining: [attribute]/~, - view_items: [@view_item]/~} { + fn parse_view(+first_item_attrs: ~[attribute], + only_imports: bool) -> {attrs_remaining: ~[attribute], + view_items: ~[@view_item]} { let mut attrs = vec::append(first_item_attrs, self.parse_outer_attributes()); - let mut items = []/~; + let mut items = ~[]; while if only_imports { self.is_keyword("import") } else { self.is_view_item() } { vec::push(items, self.parse_view_item(attrs)); @@ -2556,7 +2609,7 @@ class parser { let first_item_outer_attrs = crate_attrs.next; let m = self.parse_mod_items(token::EOF, first_item_outer_attrs); ret @spanned(lo, self.span.lo, - {directives: []/~, + {directives: ~[], module: m, attrs: crate_attrs.inner, config: self.cfg}); @@ -2577,7 +2630,7 @@ class parser { // // Each directive imperatively extends its environment with 0 or more // items. - fn parse_crate_directive(first_outer_attr: [attribute]/~) -> + fn parse_crate_directive(first_outer_attr: ~[attribute]) -> crate_directive { // Collect the next attributes @@ -2619,8 +2672,8 @@ class parser { } fn parse_crate_directives(term: token::token, - first_outer_attr: [attribute]/~) -> - [@crate_directive]/~ { + first_outer_attr: ~[attribute]) -> + ~[@crate_directive] { // This is pretty ugly. If we have an outer attribute then we can't // accept seeing the terminator next, so if we do see it then fail the @@ -2629,12 +2682,12 @@ class parser { self.expect_keyword("mod"); } - let mut cdirs: [@crate_directive]/~ = []/~; + let mut cdirs: ~[@crate_directive] = ~[]; let mut first_outer_attr = first_outer_attr; while self.token != term { let cdir = @self.parse_crate_directive(first_outer_attr); vec::push(cdirs, cdir); - first_outer_attr = []/~; + first_outer_attr = ~[]; } ret cdirs; } diff --git a/src/libsyntax/parse/token.rs b/src/libsyntax/parse/token.rs index b3db69b5be6..a2bd503ffca 100644 --- a/src/libsyntax/parse/token.rs +++ b/src/libsyntax/parse/token.rs @@ -203,6 +203,8 @@ pure fn can_begin_expr(t: token) -> bool { BINOP(MINUS) { true } BINOP(STAR) { true } BINOP(AND) { true } + BINOP(OR) { true } // in lambda syntax + OROR { true } // in lambda syntax MOD_SEP { true } _ { false } } @@ -241,10 +243,10 @@ in positions that might otherwise contain _value identifiers_. "] fn keyword_table() -> hashmap<str, ()> { let keywords = str_hash(); - for contextual_keyword_table().each_key {|word| + for contextual_keyword_table().each_key |word| { keywords.insert(word, ()); } - for restricted_keyword_table().each_key {|word| + for restricted_keyword_table().each_key |word| { keywords.insert(word, ()); } keywords @@ -253,7 +255,7 @@ fn keyword_table() -> hashmap<str, ()> { #[doc = "Keywords that may be used as identifiers"] fn contextual_keyword_table() -> hashmap<str, ()> { let words = str_hash(); - let keys = [ + let keys = ~[ "as", "else", "move", @@ -265,8 +267,8 @@ fn contextual_keyword_table() -> hashmap<str, ()> { "with", /* temp */ "sep", "many", "at_least_one", "parse" - ]/~; - for keys.each {|word| + ]; + for keys.each |word| { words.insert(word, ()); } words @@ -287,7 +289,7 @@ Reasons: "] fn restricted_keyword_table() -> hashmap<str, ()> { let words = str_hash(); - let keys = [ + let keys = ~[ "alt", "assert", "break", @@ -303,8 +305,8 @@ fn restricted_keyword_table() -> hashmap<str, ()> { "true", "trait", "type", "unchecked", "unsafe", "while" - ]/~; - for keys.each {|word| + ]; + for keys.each |word| { words.insert(word, ()); } words diff --git a/src/libsyntax/print/pp.rs b/src/libsyntax/print/pp.rs index 5f10fe0eb47..58f5ac85d48 100644 --- a/src/libsyntax/print/pp.rs +++ b/src/libsyntax/print/pp.rs @@ -71,7 +71,7 @@ fn tok_str(++t: token) -> str { } } -fn buf_str(toks: [mut token]/~, szs: [mut int]/~, left: uint, right: uint, +fn buf_str(toks: ~[mut token], szs: ~[mut int], left: uint, right: uint, lim: uint) -> str { let n = vec::len(toks); assert (n == vec::len(szs)); @@ -100,9 +100,9 @@ fn mk_printer(out: io::writer, linewidth: uint) -> printer { // fall behind. let n: uint = 3u * linewidth; #debug("mk_printer %u", linewidth); - let token: [mut token]/~ = vec::to_mut(vec::from_elem(n, EOF)); - let size: [mut int]/~ = vec::to_mut(vec::from_elem(n, 0)); - let scan_stack: [mut uint]/~ = vec::to_mut(vec::from_elem(n, 0u)); + let token: ~[mut token] = vec::to_mut(vec::from_elem(n, EOF)); + let size: ~[mut int] = vec::to_mut(vec::from_elem(n, 0)); + let scan_stack: ~[mut uint] = vec::to_mut(vec::from_elem(n, 0u)); @{out: out, buf_len: n, mut margin: linewidth as int, @@ -206,8 +206,8 @@ type printer = @{ mut space: int, // number of spaces left on line mut left: uint, // index of left side of input stream mut right: uint, // index of right side of input stream - token: [mut token]/~, // ring-buffr stream goes through - size: [mut int]/~, // ring-buffer of calculated sizes + token: ~[mut token], // ring-buffr stream goes through + size: ~[mut int], // ring-buffer of calculated sizes mut left_total: int, // running size of stream "...left" mut right_total: int, // running size of stream "...right" // pseudo-stack, really a ring too. Holds the @@ -216,7 +216,7 @@ type printer = @{ // BEGIN (if there is any) on top of it. Stuff is flushed off the // bottom as it becomes irrelevant due to the primary ring-buffer // advancing. - mut scan_stack: [mut uint]/~, + mut scan_stack: ~[mut uint], mut scan_stack_empty: bool, // top==bottom disambiguator mut top: uint, // index of top of scan_stack mut bottom: uint, // index of bottom of scan_stack @@ -231,7 +231,7 @@ impl printer for printer { // be very careful with this! fn replace_last_token(t: token) { self.token[self.right] = t; } fn pretty_print(t: token) { - #debug("pp [%u,%u]/~", self.left, self.right); + #debug("pp ~[%u,%u]", self.left, self.right); alt t { EOF { if !self.scan_stack_empty { @@ -248,17 +248,17 @@ impl printer for printer { self.left = 0u; self.right = 0u; } else { self.advance_right(); } - #debug("pp BEGIN/buffer [%u,%u]/~", self.left, self.right); + #debug("pp BEGIN/buffer ~[%u,%u]", self.left, self.right); self.token[self.right] = t; self.size[self.right] = -self.right_total; self.scan_push(self.right); } END { if self.scan_stack_empty { - #debug("pp END/print [%u,%u]/~", self.left, self.right); + #debug("pp END/print ~[%u,%u]", self.left, self.right); self.print(t, 0); } else { - #debug("pp END/buffer [%u,%u]/~", self.left, self.right); + #debug("pp END/buffer ~[%u,%u]", self.left, self.right); self.advance_right(); self.token[self.right] = t; self.size[self.right] = -1; @@ -272,7 +272,7 @@ impl printer for printer { self.left = 0u; self.right = 0u; } else { self.advance_right(); } - #debug("pp BREAK/buffer [%u,%u]/~", self.left, self.right); + #debug("pp BREAK/buffer ~[%u,%u]", self.left, self.right); self.check_stack(0); self.scan_push(self.right); self.token[self.right] = t; @@ -281,10 +281,10 @@ impl printer for printer { } STRING(s, len) { if self.scan_stack_empty { - #debug("pp STRING/print [%u,%u]/~", self.left, self.right); + #debug("pp STRING/print ~[%u,%u]", self.left, self.right); self.print(t, len); } else { - #debug("pp STRING/buffer [%u,%u]/~", self.left, self.right); + #debug("pp STRING/buffer ~[%u,%u]", self.left, self.right); self.advance_right(); self.token[self.right] = t; self.size[self.right] = len; @@ -295,7 +295,7 @@ impl printer for printer { } } fn check_stream() { - #debug("check_stream [%u, %u]/~ with left_total=%d, right_total=%d", + #debug("check_stream ~[%u, %u] with left_total=%d, right_total=%d", self.left, self.right, self.left_total, self.right_total); if self.right_total - self.left_total > self.space { #debug("scan window is %d, longer than space on line (%d)", @@ -347,7 +347,7 @@ impl printer for printer { assert (self.right != self.left); } fn advance_left(++x: token, L: int) { - #debug("advnce_left [%u,%u]/~, sizeof(%u)=%d", self.left, self.right, + #debug("advnce_left ~[%u,%u], sizeof(%u)=%d", self.left, self.right, self.left, L); if L >= 0 { self.print(x, L); diff --git a/src/libsyntax/print/pprust.rs b/src/libsyntax/print/pprust.rs index 02e9b8931f2..2680fa1a981 100644 --- a/src/libsyntax/print/pprust.rs +++ b/src/libsyntax/print/pprust.rs @@ -26,8 +26,8 @@ fn no_ann() -> pp_ann { type ps = @{s: pp::printer, cm: option<codemap>, - comments: option<[comments::cmnt]/~>, - literals: option<[comments::lit]/~>, + comments: option<~[comments::cmnt]>, + literals: option<~[comments::lit]>, mut cur_cmnt: uint, mut cur_lit: uint, boxes: dvec<pp::breaks>, @@ -46,8 +46,8 @@ fn end(s: ps) { fn rust_printer(writer: io::writer) -> ps { ret @{s: pp::mk_printer(writer, default_columns), cm: none::<codemap>, - comments: none::<[comments::cmnt]/~>, - literals: none::<[comments::lit]/~>, + comments: none::<~[comments::cmnt]>, + literals: none::<~[comments::lit]>, mut cur_cmnt: 0u, mut cur_lit: 0u, boxes: dvec(), @@ -100,16 +100,16 @@ fn item_to_str(i: @ast::item) -> str { ret to_str(i, print_item); } fn attr_to_str(i: ast::attribute) -> str { ret to_str(i, print_attribute); } -fn typarams_to_str(tps: [ast::ty_param]/~) -> str { +fn typarams_to_str(tps: ~[ast::ty_param]) -> str { ret to_str(tps, print_type_params) } fn path_to_str(&&p: @ast::path) -> str { - ret to_str(p, {|a,b|print_path(a, b, false)}); + ret to_str(p, |a,b| print_path(a, b, false)); } fn fun_to_str(decl: ast::fn_decl, name: ast::ident, - params: [ast::ty_param]/~) -> str { + params: ~[ast::ty_param]) -> str { let buffer = io::mem_buffer(); let s = rust_printer(io::mem_buffer_writer(buffer)); print_fn(s, decl, name, params); @@ -122,15 +122,15 @@ fn fun_to_str(decl: ast::fn_decl, name: ast::ident, #[test] fn test_fun_to_str() { let decl: ast::fn_decl = { - inputs: []/~, + inputs: ~[], output: @{id: 0, node: ast::ty_nil, span: ast_util::dummy_sp()}, purity: ast::impure_fn, cf: ast::return_val, - constraints: []/~ + constraints: ~[] }; - assert fun_to_str(decl, "a", []/~) == "fn a()"; + assert fun_to_str(decl, "a", ~[]) == "fn a()"; } fn block_to_str(blk: ast::blk) -> str { @@ -161,8 +161,8 @@ fn variant_to_str(var: ast::variant) -> str { fn test_variant_to_str() { let var = ast_util::respan(ast_util::dummy_sp(), { name: "principle_skinner", - attrs: []/~, - args: []/~, + attrs: ~[], + args: ~[], id: 0, disr_expr: none }); @@ -257,10 +257,10 @@ fn synth_comment(s: ps, text: str) { word(s.s, "*/"); } -fn commasep<IN>(s: ps, b: breaks, elts: [IN]/~, op: fn(ps, IN)) { +fn commasep<IN>(s: ps, b: breaks, elts: ~[IN], op: fn(ps, IN)) { box(s, 0u, b); let mut first = true; - for elts.each {|elt| + for elts.each |elt| { if first { first = false; } else { word_space(s, ","); } op(s, elt); } @@ -268,12 +268,12 @@ fn commasep<IN>(s: ps, b: breaks, elts: [IN]/~, op: fn(ps, IN)) { } -fn commasep_cmnt<IN>(s: ps, b: breaks, elts: [IN]/~, op: fn(ps, IN), +fn commasep_cmnt<IN>(s: ps, b: breaks, elts: ~[IN], op: fn(ps, IN), get_span: fn(IN) -> codemap::span) { box(s, 0u, b); let len = vec::len::<IN>(elts); let mut i = 0u; - for elts.each {|elt| + for elts.each |elt| { maybe_print_comment(s, get_span(elt).hi); op(s, elt); i += 1u; @@ -287,26 +287,26 @@ fn commasep_cmnt<IN>(s: ps, b: breaks, elts: [IN]/~, op: fn(ps, IN), end(s); } -fn commasep_exprs(s: ps, b: breaks, exprs: [@ast::expr]/~) { +fn commasep_exprs(s: ps, b: breaks, exprs: ~[@ast::expr]) { fn expr_span(&&expr: @ast::expr) -> codemap::span { ret expr.span; } commasep_cmnt(s, b, exprs, print_expr, expr_span); } -fn print_mod(s: ps, _mod: ast::_mod, attrs: [ast::attribute]/~) { +fn print_mod(s: ps, _mod: ast::_mod, attrs: ~[ast::attribute]) { print_inner_attributes(s, attrs); - for _mod.view_items.each {|vitem| + for _mod.view_items.each |vitem| { print_view_item(s, vitem); } - for _mod.items.each {|item| print_item(s, item); } + for _mod.items.each |item| { print_item(s, item); } } fn print_foreign_mod(s: ps, nmod: ast::foreign_mod, - attrs: [ast::attribute]/~) { + attrs: ~[ast::attribute]) { print_inner_attributes(s, attrs); - for nmod.view_items.each {|vitem| + for nmod.view_items.each |vitem| { print_view_item(s, vitem); } - for nmod.items.each {|item| print_foreign_item(s, item); } + for nmod.items.each |item| { print_foreign_item(s, item); } } fn print_region(s: ps, region: @ast::region) { @@ -481,7 +481,7 @@ fn print_item(s: ps, &&item: @ast::item) { end(s); } else { bopen(s); - for variants.each {|v| + for variants.each |v| { space_if_not_bol(s); maybe_print_comment(s, v.span.lo); print_outer_attributes(s, v.node.attrs); @@ -501,23 +501,23 @@ fn print_item(s: ps, &&item: @ast::item) { print_type_params(s, tps); if vec::len(ifaces) != 0u { word_space(s, ":"); - commasep(s, inconsistent, ifaces, {|s, p| - print_path(s, p.path, false)}); + commasep(s, inconsistent, ifaces, |s, p| + print_path(s, p.path, false)); } bopen(s); hardbreak_if_not_bol(s); maybe_print_comment(s, ctor.span.lo); head(s, "new"); - print_fn_args_and_ret(s, ctor.node.dec, []/~); + print_fn_args_and_ret(s, ctor.node.dec, ~[]); space(s.s); print_block(s, ctor.node.body); - option::iter(m_dtor) {|dtor| + do option::iter(m_dtor) |dtor| { hardbreak_if_not_bol(s); maybe_print_comment(s, dtor.span.lo); head(s, "drop"); print_block(s, dtor.node.body); } - for items.each {|ci| + for items.each |ci| { /* FIXME (#1893): collect all private items and print them in a single "priv" section @@ -565,7 +565,7 @@ fn print_item(s: ps, &&item: @ast::item) { print_region_param(s, rp); print_type_params(s, tps); space(s.s); - option::iter(ifce, {|p| + option::iter(ifce, |p| { word_nbsp(s, "of"); print_path(s, p.path, false); space(s.s); @@ -574,7 +574,7 @@ fn print_item(s: ps, &&item: @ast::item) { print_type(s, ty); space(s.s); bopen(s); - for methods.each {|meth| + for methods.each |meth| { print_method(s, meth); } bclose(s, item.span); @@ -586,7 +586,7 @@ fn print_item(s: ps, &&item: @ast::item) { print_type_params(s, tps); word(s.s, " "); bopen(s); - for methods.each {|meth| print_ty_method(s, meth); } + for methods.each |meth| { print_ty_method(s, meth); } bclose(s, item.span); } } @@ -630,9 +630,9 @@ fn print_method(s: ps, meth: @ast::method) { print_block_with_attrs(s, meth.body, meth.attrs); } -fn print_outer_attributes(s: ps, attrs: [ast::attribute]/~) { +fn print_outer_attributes(s: ps, attrs: ~[ast::attribute]) { let mut count = 0; - for attrs.each {|attr| + for attrs.each |attr| { alt attr.node.style { ast::attr_outer { print_attribute(s, attr); count += 1; } _ {/* fallthrough */ } @@ -641,9 +641,9 @@ fn print_outer_attributes(s: ps, attrs: [ast::attribute]/~) { if count > 0 { hardbreak_if_not_bol(s); } } -fn print_inner_attributes(s: ps, attrs: [ast::attribute]/~) { +fn print_inner_attributes(s: ps, attrs: ~[ast::attribute]) { let mut count = 0; - for attrs.each {|attr| + for attrs.each |attr| { alt attr.node.style { ast::attr_inner { print_attribute(s, attr); @@ -697,7 +697,7 @@ fn print_block(s: ps, blk: ast::blk) { print_possibly_embedded_block(s, blk, block_normal, indent_unit); } -fn print_block_with_attrs(s: ps, blk: ast::blk, attrs: [ast::attribute]/~) { +fn print_block_with_attrs(s: ps, blk: ast::blk, attrs: ~[ast::attribute]) { print_possibly_embedded_block_(s, blk, block_normal, indent_unit, attrs); } @@ -706,11 +706,11 @@ enum embed_type { block_macro, block_block_fn, block_normal, } fn print_possibly_embedded_block(s: ps, blk: ast::blk, embedded: embed_type, indented: uint) { print_possibly_embedded_block_( - s, blk, embedded, indented, []/~); + s, blk, embedded, indented, ~[]); } fn print_possibly_embedded_block_(s: ps, blk: ast::blk, embedded: embed_type, - indented: uint, attrs: [ast::attribute]/~) { + indented: uint, attrs: ~[ast::attribute]) { alt blk.node.rules { ast::unchecked_blk { word(s.s, "unchecked"); } ast::unsafe_blk { word(s.s, "unsafe"); } @@ -727,8 +727,8 @@ fn print_possibly_embedded_block_(s: ps, blk: ast::blk, embedded: embed_type, print_inner_attributes(s, attrs); - for blk.node.view_items.each {|vi| print_view_item(s, vi); } - for blk.node.stmts.each {|st| + for blk.node.view_items.each |vi| { print_view_item(s, vi); } + for blk.node.stmts.each |st| { print_stmt(s, *st); } alt blk.node.expr { @@ -804,7 +804,7 @@ fn print_mac(s: ps, m: ast::mac) { some(@{node: ast::expr_vec(_, _), _}) { } _ { word(s.s, " "); } } - option::iter(arg, {|a|print_expr(s, a)}); + option::iter(arg, |a| print_expr(s, a)); // FIXME: extension 'body' (#2339) } ast::mac_embed_type(ty) { @@ -887,6 +887,7 @@ fn print_expr(s: ps, &&expr: @ast::expr) { let blk_arg = vec::pop(base_args); alt blk_arg.node { ast::expr_loop_body(_) { word_nbsp(s, "for"); } + ast::expr_do_body(_) { word_nbsp(s, "do"); } _ {} } some(blk_arg) @@ -950,12 +951,12 @@ fn print_expr(s: ps, &&expr: @ast::expr) { print_maybe_parens_discrim(s, expr); space(s.s); bopen(s); - for arms.each {|arm| + for arms.each |arm| { space(s.s); cbox(s, alt_indent_unit); ibox(s, 0u); let mut first = true; - for arm.pats.each {|p| + for arm.pats.each |p| { if first { first = false; } else { space(s.s); word_space(s, "|"); } @@ -983,13 +984,12 @@ fn print_expr(s: ps, &&expr: @ast::expr) { print_block(s, body); } ast::expr_fn_block(decl, body, cap_clause) { - // containing cbox, will be closed by print-block at } - cbox(s, indent_unit); - // head-box, will be closed by print-block at start - ibox(s, 0u); - word(s.s, "{"); print_fn_block_args(s, decl, *cap_clause); - print_possibly_embedded_block(s, body, block_block_fn, indent_unit); + // The parser always adds an extra implicit block around lambdas + assert body.node.stmts.is_empty(); + assert body.node.expr.is_some(); + space(s.s); + print_expr(s, body.node.expr.get()); } ast::expr_loop_body(body) { print_expr(s, body); @@ -1143,8 +1143,8 @@ fn print_decl(s: ps, decl: @ast::decl) { word_nbsp(s, "let"); // if any are mut, all are mut - if vec::any(locs) {|l| l.node.is_mutbl } { - assert vec::all(locs) {|l| l.node.is_mutbl }; + if vec::any(locs, |l| l.node.is_mutbl) { + assert vec::all(locs, |l| l.node.is_mutbl); word_nbsp(s, "mut"); } @@ -1184,7 +1184,7 @@ fn print_path(s: ps, &&path: @ast::path, colons_before_params: bool) { maybe_print_comment(s, path.span.lo); if path.global { word(s.s, "::"); } let mut first = true; - for path.idents.each {|id| + for path.idents.each |id| { if first { first = false; } else { word(s.s, "::"); } word(s.s, *id); } @@ -1271,22 +1271,22 @@ fn print_pat(s: ps, &&pat: @ast::pat) { } fn print_fn(s: ps, decl: ast::fn_decl, name: ast::ident, - typarams: [ast::ty_param]/~) { + typarams: ~[ast::ty_param]) { alt decl.purity { ast::impure_fn { head(s, "fn") } _ { head(s, purity_to_str(decl.purity) + " fn") } } word(s.s, *name); print_type_params(s, typarams); - print_fn_args_and_ret(s, decl, []/~); + print_fn_args_and_ret(s, decl, ~[]); } fn print_fn_args(s: ps, decl: ast::fn_decl, - cap_items: [ast::capture_item]/~) { + cap_items: ~[ast::capture_item]) { commasep(s, inconsistent, decl.inputs, print_arg); if cap_items.is_not_empty() { let mut first = decl.inputs.is_empty(); - for cap_items.each { |cap_item| + for cap_items.each |cap_item| { if first { first = false; } else { word_space(s, ","); } if cap_item.is_move { word_nbsp(s, "move") } else { word_nbsp(s, "copy") } @@ -1296,11 +1296,11 @@ fn print_fn_args(s: ps, decl: ast::fn_decl, } fn print_fn_args_and_ret(s: ps, decl: ast::fn_decl, - cap_items: [ast::capture_item]/~) { + cap_items: ~[ast::capture_item]) { popen(s); print_fn_args(s, decl, cap_items); pclose(s); - word(s.s, constrs_str(decl.constraints, {|c| + word(s.s, constrs_str(decl.constraints, |c| { ast_fn_constr_to_str(decl, c) })); @@ -1313,7 +1313,7 @@ fn print_fn_args_and_ret(s: ps, decl: ast::fn_decl, } fn print_fn_block_args(s: ps, decl: ast::fn_decl, - cap_items: [ast::capture_item]/~) { + cap_items: ~[ast::capture_item]) { word(s.s, "|"); print_fn_args(s, decl, cap_items); word(s.s, "|"); @@ -1341,10 +1341,10 @@ fn print_arg_mode(s: ps, m: ast::mode) { if ms != "" { word(s.s, ms); } } -fn print_bounds(s: ps, bounds: @[ast::ty_param_bound]/~) { +fn print_bounds(s: ps, bounds: @~[ast::ty_param_bound]) { if vec::len(*bounds) > 0u { word(s.s, ":"); - for vec::each(*bounds) {|bound| + for vec::each(*bounds) |bound| { nbsp(s); alt bound { ast::bound_copy { word(s.s, "copy"); } @@ -1363,7 +1363,7 @@ fn print_region_param(s: ps, rp: ast::region_param) { } } -fn print_type_params(s: ps, &¶ms: [ast::ty_param]/~) { +fn print_type_params(s: ps, &¶ms: ~[ast::ty_param]) { if vec::len(params) > 0u { word(s.s, "<"); fn printParam(s: ps, param: ast::ty_param) { @@ -1412,7 +1412,7 @@ fn print_view_path(s: ps, &&vp: @ast::view_path) { ast::view_path_list(path, idents, _) { print_path(s, path, false); word(s.s, "::{"); - commasep(s, inconsistent, idents) {|s, w| + do commasep(s, inconsistent, idents) |s, w| { word(s.s, *w.node.name) } word(s.s, "}"); @@ -1420,7 +1420,7 @@ fn print_view_path(s: ps, &&vp: @ast::view_path) { } } -fn print_view_paths(s: ps, vps: [@ast::view_path]/~) { +fn print_view_paths(s: ps, vps: ~[@ast::view_path]) { commasep(s, inconsistent, vps, print_view_path); } @@ -1492,7 +1492,7 @@ fn print_arg(s: ps, input: ast::arg) { fn print_ty_fn(s: ps, opt_proto: option<ast::proto>, decl: ast::fn_decl, id: option<ast::ident>, - tps: option<[ast::ty_param]/~>) { + tps: option<~[ast::ty_param]>) { ibox(s, indent_unit); word(s.s, opt_proto_to_str(opt_proto)); alt id { some(id) { word(s.s, " "); word(s.s, *id); } _ { } } @@ -1635,7 +1635,7 @@ fn print_comment(s: ps, cmnt: comments::cmnt) { } comments::isolated { pprust::hardbreak_if_not_bol(s); - for cmnt.lines.each {|line| + for cmnt.lines.each |line| { // Don't print empty lines because they will end up as trailing // whitespace if str::is_not_empty(line) { word(s.s, line); } @@ -1649,7 +1649,7 @@ fn print_comment(s: ps, cmnt: comments::cmnt) { hardbreak(s.s); } else { ibox(s, 0u); - for cmnt.lines.each {|line| + for cmnt.lines.each |line| { if str::is_not_empty(line) { word(s.s, line); } hardbreak(s.s); } @@ -1695,11 +1695,11 @@ fn next_comment(s: ps) -> option<comments::cmnt> { } fn constr_args_to_str<T>(f: fn@(T) -> str, - args: [@ast::sp_constr_arg<T>]/~) -> + args: ~[@ast::sp_constr_arg<T>]) -> str { let mut comma = false; let mut s = "("; - for args.each {|a| + for args.each |a| { if comma { s += ", "; } else { comma = true; } s += constr_arg_to_str::<T>(f, a.node); } @@ -1727,7 +1727,7 @@ fn ast_ty_fn_constr_to_str(&&c: @ast::constr) -> str { } fn ast_fn_constr_to_str(decl: ast::fn_decl, &&c: @ast::constr) -> str { - let arg_to_str = {|a|fn_arg_idx_to_str(decl, a)}; + let arg_to_str = |a| fn_arg_idx_to_str(decl, a); ret path_to_str(c.node.path) + constr_args_to_str(arg_to_str, c.node.args); } @@ -1740,9 +1740,9 @@ fn ty_constr_to_str(&&c: @ast::ty_constr) -> str { c.node.args); } -fn constrs_str<T>(constrs: [T]/~, elt: fn(T) -> str) -> str { +fn constrs_str<T>(constrs: ~[T], elt: fn(T) -> str) -> str { let mut s = "", colon = true; - for constrs.each {|c| + for constrs.each |c| { if colon { s += " : "; colon = false; } else { s += ", "; } s += elt(c); } diff --git a/src/libsyntax/visit.rs b/src/libsyntax/visit.rs index dad722d2182..48f2e57de1c 100644 --- a/src/libsyntax/visit.rs +++ b/src/libsyntax/visit.rs @@ -13,13 +13,13 @@ import codemap::span; enum vt<E> { mk_vt(visitor<E>), } enum fn_kind { - fk_item_fn(ident, [ty_param]/~), //< an item declared with fn() - fk_method(ident, [ty_param]/~, @method), + fk_item_fn(ident, ~[ty_param]), //< an item declared with fn() + fk_method(ident, ~[ty_param], @method), fk_anon(proto, capture_clause), //< an anonymous function like fn@(...) fk_fn_block(capture_clause), //< a block {||...} - fk_ctor(ident, [ty_param]/~, node_id /* self id */, + fk_ctor(ident, ~[ty_param], node_id /* self id */, def_id /* parent class id */), // class constructor - fk_dtor([ty_param]/~, node_id /* self id */, + fk_dtor(~[ty_param], node_id /* self id */, def_id /* parent class id */) // class destructor } @@ -33,13 +33,13 @@ fn name_of_fn(fk: fn_kind) -> ident { } } -fn tps_of_fn(fk: fn_kind) -> [ty_param]/~ { +fn tps_of_fn(fk: fn_kind) -> ~[ty_param] { alt fk { fk_item_fn(_, tps) | fk_method(_, tps, _) | fk_ctor(_, tps, _, _) | fk_dtor(tps, _, _) { /* FIXME (#2543) */ copy tps } - fk_anon(*) | fk_fn_block(*) { []/~ } + fk_anon(*) | fk_fn_block(*) { ~[] } } } @@ -58,28 +58,28 @@ type visitor<E> = visit_decl: fn@(@decl, E, vt<E>), visit_expr: fn@(@expr, E, vt<E>), visit_ty: fn@(@ty, E, vt<E>), - visit_ty_params: fn@([ty_param]/~, E, vt<E>), + visit_ty_params: fn@(~[ty_param], E, vt<E>), visit_constr: fn@(@path, span, node_id, E, vt<E>), visit_fn: fn@(fn_kind, fn_decl, blk, span, node_id, E, vt<E>), visit_class_item: fn@(@class_member, E, vt<E>)}; fn default_visitor<E>() -> visitor<E> { - ret @{visit_mod: {|a,b,c,d,e|visit_mod::<E>(a, b, c, d, e)}, - visit_view_item: {|a,b,c|visit_view_item::<E>(a, b, c)}, - visit_foreign_item: {|a,b,c|visit_foreign_item::<E>(a, b, c)}, - visit_item: {|a,b,c|visit_item::<E>(a, b, c)}, - visit_local: {|a,b,c|visit_local::<E>(a, b, c)}, - visit_block: {|a,b,c|visit_block::<E>(a, b, c)}, - visit_stmt: {|a,b,c|visit_stmt::<E>(a, b, c)}, - visit_arm: {|a,b,c|visit_arm::<E>(a, b, c)}, - visit_pat: {|a,b,c|visit_pat::<E>(a, b, c)}, - visit_decl: {|a,b,c|visit_decl::<E>(a, b, c)}, - visit_expr: {|a,b,c|visit_expr::<E>(a, b, c)}, - visit_ty: {|a,b,c|skip_ty::<E>(a, b, c)}, - visit_ty_params: {|a,b,c|visit_ty_params::<E>(a, b, c)}, - visit_constr: {|a,b,c,d,e|visit_constr::<E>(a, b, c, d, e)}, - visit_fn: {|a,b,c,d,e,f,g|visit_fn::<E>(a, b, c, d, e, f, g)}, - visit_class_item: {|a,b,c|visit_class_item::<E>(a, b, c)}}; + ret @{visit_mod: |a,b,c,d,e|visit_mod::<E>(a, b, c, d, e), + visit_view_item: |a,b,c|visit_view_item::<E>(a, b, c), + visit_foreign_item: |a,b,c|visit_foreign_item::<E>(a, b, c), + visit_item: |a,b,c|visit_item::<E>(a, b, c), + visit_local: |a,b,c|visit_local::<E>(a, b, c), + visit_block: |a,b,c|visit_block::<E>(a, b, c), + visit_stmt: |a,b,c|visit_stmt::<E>(a, b, c), + visit_arm: |a,b,c|visit_arm::<E>(a, b, c), + visit_pat: |a,b,c|visit_pat::<E>(a, b, c), + visit_decl: |a,b,c|visit_decl::<E>(a, b, c), + visit_expr: |a,b,c|visit_expr::<E>(a, b, c), + visit_ty: |a,b,c|skip_ty::<E>(a, b, c), + visit_ty_params: |a,b,c|visit_ty_params::<E>(a, b, c), + visit_constr: |a,b,c,d,e|visit_constr::<E>(a, b, c, d, e), + visit_fn: |a,b,c,d,e,f,g|visit_fn::<E>(a, b, c, d, e, f, g), + visit_class_item: |a,b,c|visit_class_item::<E>(a, b, c)}; } fn visit_crate<E>(c: crate, e: E, v: vt<E>) { @@ -90,7 +90,7 @@ fn visit_crate_directive<E>(cd: @crate_directive, e: E, v: vt<E>) { alt cd.node { cdir_src_mod(_, _) { } cdir_dir_mod(_, cdirs, _) { - for cdirs.each {|cdir| + for cdirs.each |cdir| { visit_crate_directive(cdir, e, v); } } @@ -100,8 +100,8 @@ fn visit_crate_directive<E>(cd: @crate_directive, e: E, v: vt<E>) { } fn visit_mod<E>(m: _mod, _sp: span, _id: node_id, e: E, v: vt<E>) { - for m.view_items.each {|vi| v.visit_view_item(vi, e, v); } - for m.items.each {|i| v.visit_item(i, e, v); } + for m.view_items.each |vi| { v.visit_view_item(vi, e, v); } + for m.items.each |i| { v.visit_item(i, e, v); } } fn visit_view_item<E>(_vi: @view_item, _e: E, _v: vt<E>) { } @@ -122,8 +122,8 @@ fn visit_item<E>(i: @item, e: E, v: vt<E>) { } item_mod(m) { v.visit_mod(m, i.span, i.id, e, v); } item_foreign_mod(nm) { - for nm.view_items.each {|vi| v.visit_view_item(vi, e, v); } - for nm.items.each {|ni| v.visit_foreign_item(ni, e, v); } + for nm.view_items.each |vi| { v.visit_view_item(vi, e, v); } + for nm.items.each |ni| { v.visit_foreign_item(ni, e, v); } } item_ty(t, tps, rp) { v.visit_ty(t, e, v); @@ -131,34 +131,34 @@ fn visit_item<E>(i: @item, e: E, v: vt<E>) { } item_enum(variants, tps, _) { v.visit_ty_params(tps, e, v); - for variants.each {|vr| - for vr.node.args.each {|va| v.visit_ty(va.ty, e, v); } + for variants.each |vr| { + for vr.node.args.each |va| { v.visit_ty(va.ty, e, v); } } } item_impl(tps, _rp, ifce, ty, methods) { v.visit_ty_params(tps, e, v); - option::iter(ifce, {|p| visit_path(p.path, e, v)}); + option::iter(ifce, |p| visit_path(p.path, e, v)); v.visit_ty(ty, e, v); - for methods.each {|m| + for methods.each |m| { visit_method_helper(m, e, v) } } item_class(tps, ifaces, members, ctor, m_dtor, _) { v.visit_ty_params(tps, e, v); - for members.each {|m| + for members.each |m| { v.visit_class_item(m, e, v); } - for ifaces.each {|p| visit_path(p.path, e, v); } + for ifaces.each |p| { visit_path(p.path, e, v); } visit_class_ctor_helper(ctor, i.ident, tps, ast_util::local_def(i.id), e, v); - option::iter(m_dtor) {|dtor| + do option::iter(m_dtor) |dtor| { visit_class_dtor_helper(dtor, tps, ast_util::local_def(i.id), e, v)}; } item_iface(tps, _rp, methods) { v.visit_ty_params(tps, e, v); - for methods.each {|m| - for m.decl.inputs.each {|a| v.visit_ty(a.ty, e, v); } + for methods.each |m| { + for m.decl.inputs.each |a| { v.visit_ty(a.ty, e, v); } v.visit_ty_params(m.tps, e, v); v.visit_ty(m.decl.output, e, v); } @@ -186,12 +186,12 @@ fn visit_ty<E>(t: @ty, e: E, v: vt<E>) { v.visit_ty(mt.ty, e, v); } ty_rec(flds) { - for flds.each {|f| v.visit_ty(f.node.mt.ty, e, v); } + for flds.each |f| { v.visit_ty(f.node.mt.ty, e, v); } } - ty_tup(ts) { for ts.each {|tt| v.visit_ty(tt, e, v); } } + ty_tup(ts) { for ts.each |tt| { v.visit_ty(tt, e, v); } } ty_fn(_, decl) { - for decl.inputs.each {|a| v.visit_ty(a.ty, e, v); } - for decl.constraints.each {|c| + for decl.inputs.each |a| { v.visit_ty(a.ty, e, v); } + for decl.constraints.each |c| { v.visit_constr(c.node.path, c.span, c.node.id, e, v); } v.visit_ty(decl.output, e, v); @@ -202,7 +202,7 @@ fn visit_ty<E>(t: @ty, e: E, v: vt<E>) { } ty_constr(t, cs) { v.visit_ty(t, e, v); - for cs.each {|tc| + for cs.each |tc| { v.visit_constr(tc.node.path, tc.span, tc.node.id, e, v); } } @@ -220,26 +220,26 @@ fn visit_constr<E>(_operator: @path, _sp: span, _id: node_id, _e: E, } fn visit_path<E>(p: @path, e: E, v: vt<E>) { - for p.types.each {|tp| v.visit_ty(tp, e, v); } + for p.types.each |tp| { v.visit_ty(tp, e, v); } } fn visit_pat<E>(p: @pat, e: E, v: vt<E>) { alt p.node { pat_enum(path, children) { visit_path(path, e, v); - option::iter(children) {|children| - for children.each {|child| v.visit_pat(child, e, v); }} + do option::iter(children) |children| { + for children.each |child| { v.visit_pat(child, e, v); }} } pat_rec(fields, _) { - for fields.each {|f| v.visit_pat(f.pat, e, v); } + for fields.each |f| { v.visit_pat(f.pat, e, v); } } - pat_tup(elts) { for elts.each {|elt| v.visit_pat(elt, e, v); } } + pat_tup(elts) { for elts.each |elt| { v.visit_pat(elt, e, v); } } pat_box(inner) | pat_uniq(inner) { v.visit_pat(inner, e, v); } pat_ident(path, inner) { visit_path(path, e, v); - option::iter(inner) {|subpat| v.visit_pat(subpat, e, v)}; + do option::iter(inner) |subpat| { v.visit_pat(subpat, e, v)}; } pat_lit(ex) { v.visit_expr(ex, e, v); } pat_range(e1, e2) { v.visit_expr(e1, e, v); v.visit_expr(e2, e, v); } @@ -256,9 +256,9 @@ fn visit_foreign_item<E>(ni: @foreign_item, e: E, v: vt<E>) { } } -fn visit_ty_params<E>(tps: [ty_param]/~, e: E, v: vt<E>) { - for tps.each {|tp| - for vec::each(*tp.bounds) {|bound| +fn visit_ty_params<E>(tps: ~[ty_param], e: E, v: vt<E>) { + for tps.each |tp| { + for vec::each(*tp.bounds) |bound| { alt bound { bound_iface(t) { v.visit_ty(t, e, v); } bound_copy | bound_send | bound_const { } @@ -268,8 +268,8 @@ fn visit_ty_params<E>(tps: [ty_param]/~, e: E, v: vt<E>) { } fn visit_fn_decl<E>(fd: fn_decl, e: E, v: vt<E>) { - for fd.inputs.each {|a| v.visit_ty(a.ty, e, v); } - for fd.constraints.each {|c| + for fd.inputs.each |a| { v.visit_ty(a.ty, e, v); } + for fd.constraints.each |c| { v.visit_constr(c.node.path, c.span, c.node.id, e, v); } v.visit_ty(fd.output, e, v); @@ -286,7 +286,7 @@ fn visit_method_helper<E>(m: @method, e: E, v: vt<E>) { } // Similar logic to the comment on visit_method_helper - Tim -fn visit_class_ctor_helper<E>(ctor: class_ctor, nm: ident, tps: [ty_param]/~, +fn visit_class_ctor_helper<E>(ctor: class_ctor, nm: ident, tps: ~[ty_param], parent_id: def_id, e: E, v: vt<E>) { v.visit_fn(fk_ctor(/* FIXME (#2543) */ copy nm, /* FIXME (#2543) */ copy tps, @@ -295,7 +295,7 @@ fn visit_class_ctor_helper<E>(ctor: class_ctor, nm: ident, tps: [ty_param]/~, } -fn visit_class_dtor_helper<E>(dtor: class_dtor, tps: [ty_param]/~, +fn visit_class_dtor_helper<E>(dtor: class_dtor, tps: ~[ty_param], parent_id: def_id, e: E, v: vt<E>) { v.visit_fn(fk_dtor(/* FIXME (#2543) */ copy tps, dtor.node.self_id, parent_id), ast_util::dtor_dec(), @@ -311,8 +311,8 @@ fn visit_fn<E>(fk: fn_kind, decl: fn_decl, body: blk, _sp: span, } fn visit_block<E>(b: ast::blk, e: E, v: vt<E>) { - for b.node.view_items.each {|vi| v.visit_view_item(vi, e, v); } - for b.node.stmts.each {|s| v.visit_stmt(s, e, v); } + for b.node.view_items.each |vi| { v.visit_view_item(vi, e, v); } + for b.node.stmts.each |s| { v.visit_stmt(s, e, v); } visit_expr_opt(b.node.expr, e, v); } @@ -327,7 +327,7 @@ fn visit_stmt<E>(s: @stmt, e: E, v: vt<E>) { fn visit_decl<E>(d: @decl, e: E, v: vt<E>) { alt d.node { decl_local(locs) { - for locs.each {|loc| v.visit_local(loc, e, v); } + for locs.each |loc| { v.visit_local(loc, e, v); } } decl_item(it) { v.visit_item(it, e, v); } } @@ -337,14 +337,14 @@ fn visit_expr_opt<E>(eo: option<@expr>, e: E, v: vt<E>) { alt eo { none { } some(ex) { v.visit_expr(ex, e, v); } } } -fn visit_exprs<E>(exprs: [@expr]/~, e: E, v: vt<E>) { - for exprs.each {|ex| v.visit_expr(ex, e, v); } +fn visit_exprs<E>(exprs: ~[@expr], e: E, v: vt<E>) { + for exprs.each |ex| { v.visit_expr(ex, e, v); } } fn visit_mac<E>(m: mac, e: E, v: vt<E>) { alt m.node { ast::mac_invoc(pth, arg, body) { - option::map(arg) {|arg| v.visit_expr(arg, e, v)}; } + option::map(arg, |arg| v.visit_expr(arg, e, v)); } ast::mac_invoc_tt(pth, tt) { /* no user-serviceable parts inside */ } ast::mac_embed_type(ty) { v.visit_ty(ty, e, v); } ast::mac_embed_block(blk) { v.visit_block(blk, e, v); } @@ -363,10 +363,10 @@ fn visit_expr<E>(ex: @expr, e: E, v: vt<E>) { expr_vstore(x, _) { v.visit_expr(x, e, v); } expr_vec(es, _) { visit_exprs(es, e, v); } expr_rec(flds, base) { - for flds.each {|f| v.visit_expr(f.node.expr, e, v); } + for flds.each |f| { v.visit_expr(f.node.expr, e, v); } visit_expr_opt(base, e, v); } - expr_tup(elts) { for elts.each {|el| v.visit_expr(el, e, v); } } + expr_tup(elts) { for elts.each |el| { v.visit_expr(el, e, v); } } expr_call(callee, args, _) { visit_exprs(args, e, v); v.visit_expr(callee, e, v); @@ -393,7 +393,7 @@ fn visit_expr<E>(ex: @expr, e: E, v: vt<E>) { expr_loop(b) { v.visit_block(b, e, v); } expr_alt(x, arms, _) { v.visit_expr(x, e, v); - for arms.each {|a| v.visit_arm(a, e, v); } + for arms.each |a| { v.visit_arm(a, e, v); } } expr_fn(proto, decl, body, cap_clause) { v.visit_fn(fk_anon(proto, cap_clause), decl, body, @@ -414,7 +414,7 @@ fn visit_expr<E>(ex: @expr, e: E, v: vt<E>) { } expr_field(x, _, tys) { v.visit_expr(x, e, v); - for tys.each {|tp| v.visit_ty(tp, e, v); } + for tys.each |tp| { v.visit_ty(tp, e, v); } } expr_index(a, b) { v.visit_expr(a, e, v); v.visit_expr(b, e, v); } expr_path(p) { visit_path(p, e, v); } @@ -431,7 +431,7 @@ fn visit_expr<E>(ex: @expr, e: E, v: vt<E>) { } fn visit_arm<E>(a: arm, e: E, v: vt<E>) { - for a.pats.each {|p| v.visit_pat(p, e, v); } + for a.pats.each |p| { v.visit_pat(p, e, v); } visit_expr_opt(a.guard, e, v); v.visit_block(a.body, e, v); } @@ -454,7 +454,7 @@ type simple_visitor = visit_decl: fn@(@decl), visit_expr: fn@(@expr), visit_ty: fn@(@ty), - visit_ty_params: fn@([ty_param]/~), + visit_ty_params: fn@(~[ty_param]), visit_constr: fn@(@path, span, node_id), visit_fn: fn@(fn_kind, fn_decl, blk, span, node_id), visit_class_item: fn@(@class_member)}; @@ -474,7 +474,7 @@ fn default_simple_visitor() -> simple_visitor { visit_decl: fn@(_d: @decl) { }, visit_expr: fn@(_e: @expr) { }, visit_ty: simple_ignore_ty, - visit_ty_params: fn@(_ps: [ty_param]/~) {}, + visit_ty_params: fn@(_ps: ~[ty_param]) {}, visit_constr: fn@(_p: @path, _sp: span, _id: node_id) { }, visit_fn: fn@(_fk: fn_kind, _d: fn_decl, _b: blk, _sp: span, _id: node_id) { }, @@ -533,8 +533,8 @@ fn mk_simple_visitor(v: simple_visitor) -> vt<()> { f(ty); visit_ty(ty, e, v); } - fn v_ty_params(f: fn@([ty_param]/~), - ps: [ty_param]/~, + fn v_ty_params(f: fn@(~[ty_param]), + ps: ~[ty_param], &&e: (), v: vt<()>) { f(ps); visit_ty_params(ps, e, v); @@ -551,9 +551,9 @@ fn mk_simple_visitor(v: simple_visitor) -> vt<()> { visit_fn(fk, decl, body, sp, id, e, v); } let visit_ty = if v.visit_ty == simple_ignore_ty { - {|a,b,c| skip_ty(a, b, c)} + |a,b,c| skip_ty(a, b, c) } else { - {|a,b,c| v_ty(v.visit_ty, a, b, c)} + |a,b,c| v_ty(v.visit_ty, a, b, c) }; fn v_class_item(f: fn@(@class_member), cm: @class_member, &&e: (), @@ -561,33 +561,28 @@ fn mk_simple_visitor(v: simple_visitor) -> vt<()> { f(cm); visit_class_item(cm, e, v); } - ret mk_vt(@{visit_mod: {|a,b,c,d,e|v_mod(v.visit_mod, a, b, c, d, e)}, - visit_view_item: {|a,b,c| - v_view_item(v.visit_view_item, a, b, c) - }, + ret mk_vt(@{visit_mod: |a,b,c,d,e|v_mod(v.visit_mod, a, b, c, d, e), + visit_view_item: |a,b,c| + v_view_item(v.visit_view_item, a, b, c), visit_foreign_item: - {|a,b,c|v_foreign_item(v.visit_foreign_item, a, b, c)}, - visit_item: {|a,b,c|v_item(v.visit_item, a, b, c)}, - visit_local: {|a,b,c|v_local(v.visit_local, a, b, c)}, - visit_block: {|a,b,c|v_block(v.visit_block, a, b, c)}, - visit_stmt: {|a,b,c|v_stmt(v.visit_stmt, a, b, c)}, - visit_arm: {|a,b,c|v_arm(v.visit_arm, a, b, c)}, - visit_pat: {|a,b,c|v_pat(v.visit_pat, a, b, c)}, - visit_decl: {|a,b,c|v_decl(v.visit_decl, a, b, c)}, - visit_expr: {|a,b,c|v_expr(v.visit_expr, a, b, c)}, + |a,b,c|v_foreign_item(v.visit_foreign_item, a, b, c), + visit_item: |a,b,c|v_item(v.visit_item, a, b, c), + visit_local: |a,b,c|v_local(v.visit_local, a, b, c), + visit_block: |a,b,c|v_block(v.visit_block, a, b, c), + visit_stmt: |a,b,c|v_stmt(v.visit_stmt, a, b, c), + visit_arm: |a,b,c|v_arm(v.visit_arm, a, b, c), + visit_pat: |a,b,c|v_pat(v.visit_pat, a, b, c), + visit_decl: |a,b,c|v_decl(v.visit_decl, a, b, c), + visit_expr: |a,b,c|v_expr(v.visit_expr, a, b, c), visit_ty: visit_ty, - visit_ty_params: {|a,b,c| - v_ty_params(v.visit_ty_params, a, b, c) - }, - visit_constr: {|a,b,c,d,e| - v_constr(v.visit_constr, a, b, c, d, e) - }, - visit_fn: {|a,b,c,d,e,f,g| - v_fn(v.visit_fn, a, b, c, d, e, f, g) - }, - visit_class_item: {|a,b,c| + visit_ty_params: |a,b,c| + v_ty_params(v.visit_ty_params, a, b, c), + visit_constr: |a,b,c,d,e| + v_constr(v.visit_constr, a, b, c, d, e), + visit_fn: |a,b,c,d,e,f,g| + v_fn(v.visit_fn, a, b, c, d, e, f, g), + visit_class_item: |a,b,c| v_class_item(v.visit_class_item, a, b, c) - } }); } diff --git a/src/rustc/back/link.rs b/src/rustc/back/link.rs index ac68287f8b7..5603300e040 100644 --- a/src/rustc/back/link.rs +++ b/src/rustc/back/link.rs @@ -69,18 +69,16 @@ mod write { output_type_bitcode { if opts.optimize != 0u { let filename = mk_intermediate_name(output, "no-opt.bc"); - str::as_c_str(filename, - {|buf| - llvm::LLVMWriteBitcodeToFile(llmod, buf) - }); + str::as_c_str(filename, |buf| { + llvm::LLVMWriteBitcodeToFile(llmod, buf) + }); } } _ { let filename = mk_intermediate_name(output, "bc"); - str::as_c_str(filename, - {|buf| - llvm::LLVMWriteBitcodeToFile(llmod, buf) - }); + str::as_c_str(filename, |buf| { + llvm::LLVMWriteBitcodeToFile(llmod, buf) + }); } } } @@ -151,18 +149,17 @@ mod write { let filename = mk_intermediate_name(output, "opt.bc"); llvm::LLVMRunPassManager(pm.llpm, llmod); - str::as_c_str(filename, - {|buf| - llvm::LLVMWriteBitcodeToFile(llmod, buf) - }); + str::as_c_str(filename, |buf| { + llvm::LLVMWriteBitcodeToFile(llmod, buf) + }); pm = mk_pass_manager(); // Save the assembly file if -S is used if opts.output_type == output_type_assembly { let _: () = str::as_c_str( sess.targ_cfg.target_strs.target_triple, - {|buf_t| - str::as_c_str(output, {|buf_o| + |buf_t| { + str::as_c_str(output, |buf_o| { llvm::LLVMRustWriteOutputFile( pm.llpm, llmod, @@ -170,7 +167,9 @@ mod write { buf_o, lib::llvm::AssemblyFile as c_uint, CodeGenOptLevel, - true)})}); + true) + }) + }); } @@ -178,37 +177,39 @@ mod write { // This .o is needed when an exe is built if opts.output_type == output_type_object || opts.output_type == output_type_exe { - let _: () = - str::as_c_str( - sess.targ_cfg.target_strs.target_triple, - {|buf_t| - str::as_c_str(output, {|buf_o| - llvm::LLVMRustWriteOutputFile( - pm.llpm, - llmod, - buf_t, - buf_o, - lib::llvm::ObjectFile as c_uint, - CodeGenOptLevel, - true)})}); - } - } else { - // If we aren't saving temps then just output the file - // type corresponding to the '-c' or '-S' flag used - - let _: () = - str::as_c_str( + let _: () = str::as_c_str( sess.targ_cfg.target_strs.target_triple, - {|buf_t| - str::as_c_str(output, {|buf_o| + |buf_t| { + str::as_c_str(output, |buf_o| { llvm::LLVMRustWriteOutputFile( pm.llpm, llmod, buf_t, buf_o, - FileType as c_uint, + lib::llvm::ObjectFile as c_uint, CodeGenOptLevel, - true)})}); + true) + }) + }); + } + } else { + // If we aren't saving temps then just output the file + // type corresponding to the '-c' or '-S' flag used + + let _: () = str::as_c_str( + sess.targ_cfg.target_strs.target_triple, + |buf_t| { + str::as_c_str(output, |buf_o| { + llvm::LLVMRustWriteOutputFile( + pm.llpm, + llmod, + buf_t, + buf_o, + FileType as c_uint, + CodeGenOptLevel, + true) + }) + }); } // Clean up and return @@ -219,14 +220,14 @@ mod write { if opts.output_type == output_type_llvm_assembly { // Given options "-S --emit-llvm": output LLVM assembly - str::as_c_str(output, {|buf_o| + str::as_c_str(output, |buf_o| { llvm::LLVMRustAddPrintModulePass(pm.llpm, llmod, buf_o)}); } else { // If only a bitcode file is asked for by using the '--emit-llvm' // flag, then output it here llvm::LLVMRunPassManager(pm.llpm, llmod); str::as_c_str(output, - {|buf| llvm::LLVMWriteBitcodeToFile(llmod, buf) }); + |buf| llvm::LLVMWriteBitcodeToFile(llmod, buf) ); } llvm::LLVMDisposeModule(llmod); @@ -292,16 +293,16 @@ fn build_link_meta(sess: session, c: ast::crate, output: str, type provided_metas = {name: option<@str>, vers: option<@str>, - cmh_items: [@ast::meta_item]/~}; + cmh_items: ~[@ast::meta_item]}; fn provided_link_metas(sess: session, c: ast::crate) -> provided_metas { let mut name: option<@str> = none; let mut vers: option<@str> = none; - let mut cmh_items: [@ast::meta_item]/~ = []/~; + let mut cmh_items: ~[@ast::meta_item] = ~[]; let linkage_metas = attr::find_linkage_metas(c.node.attrs); attr::require_unique_names(sess.diagnostic(), linkage_metas); - for linkage_metas.each {|meta| + for linkage_metas.each |meta| { if *attr::get_meta_item_name(meta) == "name" { alt attr::get_meta_item_value_str(meta) { some(v) { name = some(v); } @@ -320,7 +321,7 @@ fn build_link_meta(sess: session, c: ast::crate, output: str, // This calculates CMH as defined above fn crate_meta_extras_hash(sha: sha1, _crate: ast::crate, metas: provided_metas, - dep_hashes: [@str]/~) -> str { + dep_hashes: ~[@str]) -> str { fn len_and_str(s: str) -> str { ret #fmt["%u_%s", str::len(s), s]; } @@ -332,7 +333,7 @@ fn build_link_meta(sess: session, c: ast::crate, output: str, let cmh_items = attr::sort_meta_items(metas.cmh_items); sha.reset(); - for cmh_items.each {|m_| + for cmh_items.each |m_| { let m = m_; alt m.node { ast::meta_name_value(key, value) { @@ -347,7 +348,7 @@ fn build_link_meta(sess: session, c: ast::crate, output: str, } } - for dep_hashes.each {|dh| + for dep_hashes.each |dh| { sha.input_str(len_and_str(*dh)); } @@ -443,7 +444,7 @@ fn get_symbol_hash(ccx: @crate_ctxt, t: ty::t) -> str { // gas doesn't! fn sanitize(s: str) -> str { let mut result = ""; - str::chars_iter(s) {|c| + do str::chars_iter(s) |c| { alt c { '@' { result += "_sbox_"; } '~' { result += "_ubox_"; } @@ -479,7 +480,7 @@ fn mangle(ss: path) -> str { let mut n = "_ZN"; // Begin name-sequence. - for ss.each {|s| + for ss.each |s| { alt s { path_name(s) | path_mod(s) { let sani = sanitize(*s); n += #fmt["%u%s", str::len(sani), sani]; @@ -505,7 +506,7 @@ fn mangle_internal_name_by_type_only(ccx: @crate_ctxt, str { let s = @util::ppaux::ty_to_short_str(ccx.tcx, t); let hash = get_symbol_hash(ccx, t); - ret mangle([path_name(name), path_name(s), path_name(@hash)]/~); + ret mangle(~[path_name(name), path_name(s), path_name(@hash)]); } fn mangle_internal_name_by_path_and_seq(ccx: @crate_ctxt, path: path, @@ -579,7 +580,7 @@ fn link_binary(sess: session, // The invocations of cc share some flags across platforms let mut cc_args = - vec::append([stage]/~, sess.targ_cfg.target_strs.cc_args); + vec::append(~[stage], sess.targ_cfg.target_strs.cc_args); vec::push(cc_args, "-o"); vec::push(cc_args, output); vec::push(cc_args, obj_filename); @@ -593,7 +594,7 @@ fn link_binary(sess: session, // # Crate linking let cstore = sess.cstore; - for cstore::get_used_crate_files(cstore).each {|cratepath| + for cstore::get_used_crate_files(cstore).each |cratepath| { if str::ends_with(cratepath, ".rlib") { vec::push(cc_args, cratepath); cont; @@ -606,7 +607,7 @@ fn link_binary(sess: session, } let ula = cstore::get_used_link_args(cstore); - for ula.each {|arg| vec::push(cc_args, arg); } + for ula.each |arg| { vec::push(cc_args, arg); } // # Native library linking @@ -617,11 +618,11 @@ fn link_binary(sess: session, // forces to make sure that library can be found at runtime. let addl_paths = sess.opts.addl_lib_search_paths; - for addl_paths.each {|path| vec::push(cc_args, "-L" + path); } + for addl_paths.each |path| { vec::push(cc_args, "-L" + path); } // The names of the native libraries let used_libs = cstore::get_used_libraries(cstore); - for used_libs.each {|l| vec::push(cc_args, "-l" + l); } + for used_libs.each |l| { vec::push(cc_args, "-l" + l); } if sess.building_library { vec::push(cc_args, lib_cmd); @@ -642,7 +643,7 @@ fn link_binary(sess: session, // On linux librt and libdl are an indirect dependencies via rustrt, // and binutils 2.22+ won't add them automatically if sess.targ_cfg.os == session::os_linux { - vec::push_all(cc_args, ["-lrt", "-ldl"]/~); + vec::push_all(cc_args, ~["-lrt", "-ldl"]); // LLVM implements the `frem` instruction as a call to `fmod`, // which lives in libm. Similar to above, on some linuxes we @@ -651,13 +652,13 @@ fn link_binary(sess: session, } if sess.targ_cfg.os == session::os_freebsd { - vec::push_all(cc_args, ["-pthread", "-lrt", + vec::push_all(cc_args, ~["-pthread", "-lrt", "-L/usr/local/lib", "-lexecinfo", "-L/usr/local/lib/gcc46", "-L/usr/local/lib/gcc44", "-lstdc++", "-Wl,-z,origin", "-Wl,-rpath,/usr/local/lib/gcc46", - "-Wl,-rpath,/usr/local/lib/gcc44"]/~); + "-Wl,-rpath,/usr/local/lib/gcc44"]); } // OS X 10.6 introduced 'compact unwind info', which is produced by the @@ -689,7 +690,7 @@ fn link_binary(sess: session, // Clean up on Darwin if sess.targ_cfg.os == session::os_macos { - run::run_program("dsymutil", [output]/~); + run::run_program("dsymutil", ~[output]); } // Remove the temporary object file if we aren't saving temps diff --git a/src/rustc/back/rpath.rs b/src/rustc/back/rpath.rs index d0a36d3ad5a..55e4ba8d082 100644 --- a/src/rustc/back/rpath.rs +++ b/src/rustc/back/rpath.rs @@ -13,12 +13,12 @@ pure fn not_win32(os: session::os) -> bool { } } -fn get_rpath_flags(sess: session::session, out_filename: str) -> [str]/~ { +fn get_rpath_flags(sess: session::session, out_filename: str) -> ~[str] { let os = sess.targ_cfg.os; // No rpath on windows if os == session::os_win32 { - ret []/~; + ret ~[]; } #debug("preparing the RPATH!"); @@ -37,25 +37,25 @@ fn get_rpath_flags(sess: session::session, out_filename: str) -> [str]/~ { } fn get_sysroot_absolute_rt_lib(sess: session::session) -> path::path { - let mut path = vec::append([sess.filesearch.sysroot()]/~, + let mut path = vec::append(~[sess.filesearch.sysroot()], filesearch::relative_target_lib_path( sess.opts.target_triple)); vec::push(path, os::dll_filename("rustrt")); path::connect_many(path) } -fn rpaths_to_flags(rpaths: [str]/~) -> [str]/~ { - vec::map(rpaths, { |rpath| #fmt("-Wl,-rpath,%s",rpath)}) +fn rpaths_to_flags(rpaths: ~[str]) -> ~[str] { + vec::map(rpaths, |rpath| #fmt("-Wl,-rpath,%s",rpath) ) } fn get_rpaths(os: session::os, cwd: path::path, sysroot: path::path, - output: path::path, libs: [path::path]/~, - target_triple: str) -> [str]/~ { + output: path::path, libs: ~[path::path], + target_triple: str) -> ~[str] { #debug("cwd: %s", cwd); #debug("sysroot: %s", sysroot); #debug("output: %s", output); #debug("libs:"); - for libs.each {|libpath| + for libs.each |libpath| { #debug(" %s", libpath); } #debug("target_triple: %s", target_triple); @@ -70,11 +70,11 @@ fn get_rpaths(os: session::os, cwd: path::path, sysroot: path::path, let abs_rpaths = get_absolute_rpaths(cwd, libs); // And a final backup rpath to the global library location. - let fallback_rpaths = [get_install_prefix_rpath(cwd, target_triple)]/~; + let fallback_rpaths = ~[get_install_prefix_rpath(cwd, target_triple)]; - fn log_rpaths(desc: str, rpaths: [str]/~) { + fn log_rpaths(desc: str, rpaths: ~[str]) { #debug("%s rpaths:", desc); - for rpaths.each {|rpath| + for rpaths.each |rpath| { #debug(" %s", rpath); } } @@ -95,8 +95,8 @@ fn get_rpaths(os: session::os, cwd: path::path, sysroot: path::path, fn get_rpaths_relative_to_output(os: session::os, cwd: path::path, output: path::path, - libs: [path::path]/~) -> [str]/~ { - vec::map(libs, {|a| + libs: ~[path::path]) -> ~[str] { + vec::map(libs, |a| { check not_win32(os); get_rpath_relative_to_output(os, cwd, output, a) }) @@ -141,8 +141,8 @@ fn get_relative_to(abs1: path::path, abs2: path::path) -> path::path { start_idx += 1u; } - let mut path = []/~; - for uint::range(start_idx, len1 - 1u) {|_i| vec::push(path, ".."); }; + let mut path = ~[]; + for uint::range(start_idx, len1 - 1u) |_i| { vec::push(path, ".."); }; vec::push_all(path, vec::view(split2, start_idx, len2 - 1u)); @@ -153,8 +153,8 @@ fn get_relative_to(abs1: path::path, abs2: path::path) -> path::path { } } -fn get_absolute_rpaths(cwd: path::path, libs: [path::path]/~) -> [str]/~ { - vec::map(libs, {|a|get_absolute_rpath(cwd, a)}) +fn get_absolute_rpaths(cwd: path::path, libs: ~[path::path]) -> ~[str] { + vec::map(libs, |a| get_absolute_rpath(cwd, a) ) } fn get_absolute_rpath(cwd: path::path, &&lib: path::path) -> str { @@ -177,15 +177,15 @@ fn get_install_prefix_rpath(cwd: path::path, target_triple: str) -> str { } let path = vec::append( - [install_prefix]/~, + ~[install_prefix], filesearch::relative_target_lib_path(target_triple)); get_absolute(cwd, path::connect_many(path)) } -fn minimize_rpaths(rpaths: [str]/~) -> [str]/~ { +fn minimize_rpaths(rpaths: ~[str]) -> ~[str] { let set = map::str_hash::<()>(); - let mut minimized = []/~; - for rpaths.each {|rpath| + let mut minimized = ~[]; + for rpaths.each |rpath| { if !set.contains_key(rpath) { vec::push(minimized, rpath); set.insert(rpath, ()); @@ -198,8 +198,8 @@ fn minimize_rpaths(rpaths: [str]/~) -> [str]/~ { mod test { #[test] fn test_rpaths_to_flags() { - let flags = rpaths_to_flags(["path1", "path2"]/~); - assert flags == ["-Wl,-rpath,path1", "-Wl,-rpath,path2"]/~; + let flags = rpaths_to_flags(~["path1", "path2"]); + assert flags == ~["-Wl,-rpath,path1", "-Wl,-rpath,path2"]; } #[test] @@ -233,15 +233,15 @@ mod test { #[test] fn test_minimize1() { - let res = minimize_rpaths(["rpath1", "rpath2", "rpath1"]/~); - assert res == ["rpath1", "rpath2"]/~; + let res = minimize_rpaths(~["rpath1", "rpath2", "rpath1"]); + assert res == ~["rpath1", "rpath2"]; } #[test] fn test_minimize2() { - let res = minimize_rpaths(["1a", "2", "2", "1a", "4a", - "1a", "2", "3", "4a", "3"]/~); - assert res == ["1a", "2", "4a", "3"]/~; + let res = minimize_rpaths(~["1a", "2", "2", "1a", "4a", + "1a", "2", "3", "4a", "3"]); + assert res == ~["1a", "2", "4a", "3"]; } #[test] diff --git a/src/rustc/back/target_strs.rs b/src/rustc/back/target_strs.rs index 24665c2ab36..ca11e4c6d69 100644 --- a/src/rustc/back/target_strs.rs +++ b/src/rustc/back/target_strs.rs @@ -3,5 +3,5 @@ type t = { meta_sect_name: str, data_layout: str, target_triple: str, - cc_args: [str]/~ + cc_args: ~[str] }; diff --git a/src/rustc/back/upcall.rs b/src/rustc/back/upcall.rs index eb2fe10a821..b7ee7008d2f 100644 --- a/src/rustc/back/upcall.rs +++ b/src/rustc/back/upcall.rs @@ -33,79 +33,79 @@ fn declare_upcalls(targ_cfg: @session::config, tydesc_type: TypeRef, llmod: ModuleRef) -> @upcalls { fn decl(llmod: ModuleRef, prefix: str, name: str, - tys: [TypeRef]/~, rv: TypeRef) -> + tys: ~[TypeRef], rv: TypeRef) -> ValueRef { - let mut arg_tys: [TypeRef]/~ = []/~; - for tys.each {|t| vec::push(arg_tys, t); } + let mut arg_tys: ~[TypeRef] = ~[]; + for tys.each |t| { vec::push(arg_tys, t); } let fn_ty = T_fn(arg_tys, rv); ret base::decl_cdecl_fn(llmod, prefix + name, fn_ty); } fn nothrow(f: ValueRef) -> ValueRef { base::set_no_unwind(f); f } - let d = {|a,b,c|decl(llmod, "upcall_", a, b, c)}; - let dv = {|a,b|decl(llmod, "upcall_", a, b, T_void())}; + let d = |a,b,c| decl(llmod, "upcall_", a, b, c); + let dv = |a,b| decl(llmod, "upcall_", a, b, T_void()); let int_t = T_int(targ_cfg); let size_t = T_size_t(targ_cfg); - ret @{_fail: dv("fail", [T_ptr(T_i8()), + ret @{_fail: dv("fail", ~[T_ptr(T_i8()), T_ptr(T_i8()), - size_t]/~), - trace: dv("trace", [T_ptr(T_i8()), + size_t]), + trace: dv("trace", ~[T_ptr(T_i8()), T_ptr(T_i8()), - int_t]/~), + int_t]), malloc: nothrow(d("malloc", - [T_ptr(tydesc_type), int_t]/~, + ~[T_ptr(tydesc_type), int_t], T_ptr(T_i8()))), free: - nothrow(dv("free", [T_ptr(T_i8())]/~)), + nothrow(dv("free", ~[T_ptr(T_i8())])), exchange_malloc: nothrow(d("exchange_malloc", - [T_ptr(tydesc_type), int_t]/~, + ~[T_ptr(tydesc_type), int_t], T_ptr(T_i8()))), exchange_free: - nothrow(dv("exchange_free", [T_ptr(T_i8())]/~)), + nothrow(dv("exchange_free", ~[T_ptr(T_i8())])), validate_box: - nothrow(dv("validate_box", [T_ptr(T_i8())]/~)), + nothrow(dv("validate_box", ~[T_ptr(T_i8())])), mark: - d("mark", [T_ptr(T_i8())]/~, int_t), + d("mark", ~[T_ptr(T_i8())], int_t), vec_grow: - nothrow(dv("vec_grow", [T_ptr(T_ptr(T_i8())), int_t]/~)), + nothrow(dv("vec_grow", ~[T_ptr(T_ptr(T_i8())), int_t])), str_new_uniq: - nothrow(d("str_new_uniq", [T_ptr(T_i8()), int_t]/~, + nothrow(d("str_new_uniq", ~[T_ptr(T_i8()), int_t], T_ptr(T_i8()))), str_new_shared: - nothrow(d("str_new_shared", [T_ptr(T_i8()), int_t]/~, + nothrow(d("str_new_shared", ~[T_ptr(T_i8()), int_t], T_ptr(T_i8()))), str_concat: - nothrow(d("str_concat", [T_ptr(T_i8()), - T_ptr(T_i8())]/~, + nothrow(d("str_concat", ~[T_ptr(T_i8()), + T_ptr(T_i8())], T_ptr(T_i8()))), cmp_type: dv("cmp_type", - [T_ptr(T_i1()), T_ptr(tydesc_type), + ~[T_ptr(T_i1()), T_ptr(tydesc_type), T_ptr(T_ptr(tydesc_type)), T_ptr(T_i8()), T_ptr(T_i8()), - T_i8()]/~), + T_i8()]), log_type: - dv("log_type", [T_ptr(tydesc_type), - T_ptr(T_i8()), T_i32()]/~), + dv("log_type", ~[T_ptr(tydesc_type), + T_ptr(T_i8()), T_i32()]), alloc_c_stack: - d("alloc_c_stack", [size_t]/~, T_ptr(T_i8())), + d("alloc_c_stack", ~[size_t], T_ptr(T_i8())), call_shim_on_c_stack: d("call_shim_on_c_stack", // arguments: void *args, void *fn_ptr - [T_ptr(T_i8()), T_ptr(T_i8())]/~, + ~[T_ptr(T_i8()), T_ptr(T_i8())], int_t), call_shim_on_rust_stack: d("call_shim_on_rust_stack", - [T_ptr(T_i8()), T_ptr(T_i8())]/~, int_t), + ~[T_ptr(T_i8()), T_ptr(T_i8())], int_t), rust_personality: - nothrow(d("rust_personality", []/~, T_i32())), + nothrow(d("rust_personality", ~[], T_i32())), reset_stack_limit: - nothrow(dv("reset_stack_limit", []/~)) + nothrow(dv("reset_stack_limit", ~[])) }; } // diff --git a/src/rustc/back/x86.rs b/src/rustc/back/x86.rs index df68aee37e5..63ca5b60e65 100644 --- a/src/rustc/back/x86.rs +++ b/src/rustc/back/x86.rs @@ -35,7 +35,7 @@ fn get_target_strs(target_os: session::os) -> target_strs::t { session::os_freebsd { "i686-unknown-freebsd" } }, - cc_args: ["-m32"]/~ + cc_args: ~["-m32"] }; } diff --git a/src/rustc/back/x86_64.rs b/src/rustc/back/x86_64.rs index 6936b3cec7d..b3e6c518dd4 100644 --- a/src/rustc/back/x86_64.rs +++ b/src/rustc/back/x86_64.rs @@ -42,7 +42,7 @@ fn get_target_strs(target_os: session::os) -> target_strs::t { session::os_freebsd { "x86_64-unknown-freebsd" } }, - cc_args: ["-m64"]/~ + cc_args: ~["-m64"] }; } diff --git a/src/rustc/driver/driver.rs b/src/rustc/driver/driver.rs index 52c731f93ae..a90d960717c 100644 --- a/src/rustc/driver/driver.rs +++ b/src/rustc/driver/driver.rs @@ -48,7 +48,7 @@ fn default_configuration(sess: session, argv0: str, input: input) -> session::arch_arm { "arm" } }; - ret [ // Target bindings. + ret ~[ // Target bindings. attr::mk_word_item(@os::family()), mk(@"target_os", os::sysname()), mk(@"target_family", os::family()), @@ -56,7 +56,7 @@ fn default_configuration(sess: session, argv0: str, input: input) -> mk(@"target_libc", libc), // Build bindings. mk(@"build_compiler", argv0), - mk(@"build_input", source_name(input))]/~; + mk(@"build_input", source_name(input))]; } fn build_configuration(sess: session, argv0: str, input: input) -> @@ -70,19 +70,19 @@ fn build_configuration(sess: session, argv0: str, input: input) -> { if sess.opts.test && !attr::contains_name(user_cfg, "test") { - [attr::mk_word_item(@"test")]/~ - } else { []/~ } + ~[attr::mk_word_item(@"test")] + } else { ~[] } }; ret vec::append(vec::append(user_cfg, gen_cfg), default_cfg); } // Convert strings provided as --cfg [cfgspec] into a crate_cfg -fn parse_cfgspecs(cfgspecs: [str]/~) -> ast::crate_cfg { +fn parse_cfgspecs(cfgspecs: ~[str]) -> ast::crate_cfg { // FIXME (#2399): It would be nice to use the parser to parse all // varieties of meta_item here. At the moment we just support the // meta_word variant. - let mut words = []/~; - for cfgspecs.each {|s| vec::push(words, attr::mk_word_item(@s)); } + let mut words = ~[]; + for cfgspecs.each |s| { vec::push(words, attr::mk_word_item(@s)); } ret words; } @@ -131,95 +131,111 @@ fn compile_upto(sess: session, cfg: ast::crate_cfg, -> {crate: @ast::crate, tcx: option<ty::ctxt>} { let time_passes = sess.time_passes(); let mut crate = time(time_passes, "parsing", - {||parse_input(sess, cfg, input)}); + ||parse_input(sess, cfg, input) ); if upto == cu_parse { ret {crate: crate, tcx: none}; } sess.building_library = session::building_library( sess.opts.crate_type, crate, sess.opts.test); - crate = - time(time_passes, "configuration", - {|copy crate|front::config::strip_unconfigured_items(crate)}); - crate = - time(time_passes, "maybe building test harness", - {|copy crate|front::test::modify_for_testing(sess, crate)}); - crate = - time(time_passes, "expansion", - {|copy crate|syntax::ext::expand::expand_crate( - sess.parse_sess, sess.opts.cfg, crate)}); + crate = time(time_passes, "configuration", |copy crate| { + front::config::strip_unconfigured_items(crate) + }); + + crate = time(time_passes, "maybe building test harness", |copy crate| { + front::test::modify_for_testing(sess, crate) + }); + + crate = time(time_passes, "expansion", |copy crate| { + syntax::ext::expand::expand_crate( + sess.parse_sess, sess.opts.cfg, crate) + }); if upto == cu_expand { ret {crate: crate, tcx: none}; } - crate = - time(time_passes, "intrinsic injection", {|copy crate| - front::intrinsic_inject::inject_intrinsic(sess, crate) - }); + crate = time(time_passes, "intrinsic injection", |copy crate| { + front::intrinsic_inject::inject_intrinsic(sess, crate) + }); - crate = - time(time_passes, "core injection", {|copy crate| - front::core_inject::maybe_inject_libcore_ref(sess, crate) - }); + crate = time(time_passes, "core injection", |copy crate| { + front::core_inject::maybe_inject_libcore_ref(sess, crate) + }); - time(time_passes, "building warning settings table", {|copy crate| + time(time_passes, "building warning settings table", |copy crate| { lint::build_settings_crate(sess, crate) }); - let ast_map = - time(time_passes, "ast indexing", {|copy crate| + let ast_map = time(time_passes, "ast indexing", |copy crate| { syntax::ast_map::map_crate(sess.diagnostic(), *crate) }); - time(time_passes, "external crate/lib resolution", {|copy crate| + + time(time_passes, "external crate/lib resolution", |copy crate| { creader::read_crates( sess.diagnostic(), *crate, sess.cstore, sess.filesearch, session::sess_os_to_meta_os(sess.targ_cfg.os), sess.opts.static) }); - let {def_map, exp_map, impl_map} = - time(time_passes, "resolution", {|copy crate| - resolve::resolve_crate(sess, ast_map, crate) - }); - let freevars = - time(time_passes, "freevar finding", {|copy crate| - freevars::annotate_freevars(def_map, crate) - }); - let region_map = - time(time_passes, "region resolution", {|copy crate| - middle::region::resolve_crate(sess, def_map, crate) - }); + + let { def_map, exp_map, impl_map + } = time(time_passes, "resolution", |copy crate| { + resolve::resolve_crate(sess, ast_map, crate) + }); + + let freevars = time(time_passes, "freevar finding", |copy crate| { + freevars::annotate_freevars(def_map, crate) + }); + + let region_map = time(time_passes, "region resolution", |copy crate| { + middle::region::resolve_crate(sess, def_map, crate) + }); + let ty_cx = ty::mk_ctxt(sess, def_map, ast_map, freevars, region_map); - let (method_map, vtable_map) = - time(time_passes, "typechecking", {|copy crate| - typeck::check_crate(ty_cx, impl_map, crate) - }); - time(time_passes, "const checking", {|copy crate| + + let ( method_map, vtable_map + ) = time(time_passes, "typechecking", |copy crate| { + typeck::check_crate(ty_cx, impl_map, crate) + }); + + time(time_passes, "const checking", |copy crate| { middle::check_const::check_crate( sess, crate, ast_map, def_map, method_map, ty_cx) }); if upto == cu_typeck { ret {crate: crate, tcx: some(ty_cx)}; } - time(time_passes, "block-use checking", - {|copy crate|middle::block_use::check_crate(ty_cx, crate)}); - time(time_passes, "loop checking", - {|copy crate|middle::check_loop::check_crate(ty_cx, crate)}); - time(time_passes, "alt checking", - {|copy crate|middle::check_alt::check_crate(ty_cx, crate)}); - let last_use_map = - time(time_passes, "liveness checking", {|copy crate| - middle::liveness::check_crate(ty_cx, method_map, crate) - }); - time(time_passes, "typestate checking", - {|copy crate|middle::tstate::ck::check_crate(ty_cx, crate)}); - let (root_map, mutbl_map) = time( - time_passes, "borrow checking", - {|copy crate|middle::borrowck::check_crate(ty_cx, method_map, - last_use_map, crate)}); - time(time_passes, "kind checking", {|copy crate| + time(time_passes, "block-use checking", |copy crate| { + middle::block_use::check_crate(ty_cx, crate) + }); + + time(time_passes, "loop checking", |copy crate| { + middle::check_loop::check_crate(ty_cx, crate) + }); + + time(time_passes, "alt checking", |copy crate| { + middle::check_alt::check_crate(ty_cx, crate) + }); + + let last_use_map = time(time_passes, "liveness checking", |copy crate| { + middle::liveness::check_crate(ty_cx, method_map, crate) + }); + + time(time_passes, "typestate checking", |copy crate| { + middle::tstate::ck::check_crate(ty_cx, crate) + }); + + let ( root_map, mutbl_map + ) = time(time_passes, "borrow checking", |copy crate| { + middle::borrowck::check_crate(ty_cx, method_map, + last_use_map, crate) + }); + + time(time_passes, "kind checking", |copy crate| { kind::check_crate(ty_cx, method_map, last_use_map, crate) }); - time(time_passes, "lint checking", - {|copy crate|lint::check_crate(ty_cx, crate)}); + + time(time_passes, "lint checking", |copy crate| { + lint::check_crate(ty_cx, crate) + }); if upto == cu_no_trans { ret {crate: crate, tcx: some(ty_cx)}; } let outputs = option::get(outputs); @@ -229,13 +245,14 @@ fn compile_upto(sess: session, cfg: ast::crate_cfg, impl_map: impl_map, method_map: method_map, vtable_map: vtable_map}; - let (llmod, link_meta) = - time(time_passes, "translation", - {|copy crate|trans::base::trans_crate( - sess, crate, ty_cx, outputs.obj_filename, - exp_map, maps)}); - time(time_passes, "LLVM passes", - {||link::write::run_passes(sess, llmod, outputs.obj_filename)}); + let (llmod, link_meta) = time(time_passes, "translation", |copy crate| { + trans::base::trans_crate(sess, crate, ty_cx, outputs.obj_filename, + exp_map, maps) + }); + + time(time_passes, "LLVM passes", || { + link::write::run_passes(sess, llmod, outputs.obj_filename) + }); let stop_after_codegen = sess.opts.output_type != link::output_type_exe || @@ -243,9 +260,11 @@ fn compile_upto(sess: session, cfg: ast::crate_cfg, if stop_after_codegen { ret {crate: crate, tcx: some(ty_cx)}; } - time(time_passes, "linking", - {||link::link_binary(sess, outputs.obj_filename, - outputs.out_filename, link_meta)}); + time(time_passes, "linking", || { + link::link_binary(sess, outputs.obj_filename, + outputs.out_filename, link_meta) + }); + ret {crate: crate, tcx: some(ty_cx)}; } @@ -313,7 +332,7 @@ fn pretty_print_input(sess: session, cfg: ast::crate_cfg, input: input, let ann = alt ppm { ppm_typed { {pre: ann_paren_for_expr, - post: {|a|ann_typed_post(option::get(tcx), a)}} + post: |a| ann_typed_post(option::get(tcx), a) } } ppm_identified | ppm_expanded_identified { {pre: ann_paren_for_expr, post: ann_identified_post} @@ -322,7 +341,7 @@ fn pretty_print_input(sess: session, cfg: ast::crate_cfg, input: input, }; let is_expanded = upto != cu_parse; let src = codemap::get_filemap(sess.codemap, source_name(input)).src; - io::with_str_reader(*src) { |rdr| + do io::with_str_reader(*src) |rdr| { pprust::print_crate(sess.codemap, sess.span_diagnostic, crate, source_name(input), rdr, io::stdout(), ann, is_expanded); @@ -417,7 +436,7 @@ fn build_session_options(match: getopts::match, let lint_flags = vec::append(getopts::opt_strs(match, "W"), getopts::opt_strs(match, "warn")); let lint_dict = lint::get_lint_dict(); - let lint_opts = vec::map(lint_flags) {|flag| + let lint_opts = do vec::map(lint_flags) |flag| { alt lint::lookup_lint(lint_dict, flag) { (flag, none) { early_error(demitter, #fmt("unknown warning: %s", flag)) @@ -429,9 +448,9 @@ fn build_session_options(match: getopts::match, let mut debugging_opts = 0u; let debug_flags = getopts::opt_strs(match, "Z"); let debug_map = session::debugging_opts_map(); - for debug_flags.each { |debug_flag| + for debug_flags.each |debug_flag| { let mut this_bit = 0u; - for debug_map.each { |pair| + for debug_map.each |pair| { let (name, _, bit) = pair; if name == debug_flag { this_bit = bit; break; } } @@ -563,8 +582,8 @@ fn parse_pretty(sess: session, &&name: str) -> pp_mode { "`identified`"); } -fn opts() -> [getopts::opt]/~ { - ret [optflag("h"), optflag("help"), optflag("v"), optflag("version"), +fn opts() -> ~[getopts::opt] { + ret ~[optflag("h"), optflag("help"), optflag("v"), optflag("version"), optflag("emit-llvm"), optflagopt("pretty"), optflag("ls"), optflag("parse-only"), optflag("no-trans"), optflag("O"), optopt("opt-level"), optmulti("L"), optflag("S"), @@ -577,7 +596,7 @@ fn opts() -> [getopts::opt]/~ { optmulti("Z"), optmulti("cfg"), optflag("test"), - optflag("lib"), optflag("bin"), optflag("static"), optflag("gc")]/~; + optflag("lib"), optflag("bin"), optflag("static"), optflag("gc")]; } type output_filenames = @{out_filename: str, obj_filename:str}; @@ -692,7 +711,7 @@ mod test { #[test] fn test_switch_implies_cfg_test() { let match = - alt getopts::getopts(["--test"]/~, opts()) { + alt getopts::getopts(~["--test"], opts()) { ok(m) { m } err(f) { fail "test_switch_implies_cfg_test: " + getopts::fail_str(f); } @@ -708,7 +727,7 @@ mod test { #[test] fn test_switch_implies_cfg_test_unless_cfg_test() { let match = - alt getopts::getopts(["--test", "--cfg=test"]/~, opts()) { + alt getopts::getopts(~["--test", "--cfg=test"], opts()) { ok(m) { m } err(f) { fail "test_switch_implies_cfg_test_unless_cfg_test: " + getopts::fail_str(f); } diff --git a/src/rustc/driver/rustc.rs b/src/rustc/driver/rustc.rs index 35e633573ce..e7661f40b26 100644 --- a/src/rustc/driver/rustc.rs +++ b/src/rustc/driver/rustc.rs @@ -80,7 +80,7 @@ Options: fn describe_warnings() { let lint_dict = lint::get_lint_dict(); let mut max_key = 0u; - for lint_dict.each_key {|k| max_key = uint::max(k.len(), max_key); } + for lint_dict.each_key |k| { max_key = uint::max(k.len(), max_key); } fn padded(max: uint, s: str) -> str { str::from_bytes(vec::from_elem(max - s.len(), ' ' as u8)) + s } @@ -89,7 +89,7 @@ fn describe_warnings() { padded(max_key, "name"), "default", "meaning")); io::println(#fmt(" %s %7.7s %s\n", padded(max_key, "----"), "-------", "-------")); - for lint_dict.each {|k, v| + for lint_dict.each |k, v| { let k = str::replace(k, "_", "-"); io::println(#fmt(" %s %7.7s %s", padded(max_key, k), @@ -103,13 +103,13 @@ fn describe_warnings() { fn describe_debug_flags() { io::println(#fmt("\nAvailable debug options:\n")); - for session::debugging_opts_map().each { |pair| + for session::debugging_opts_map().each |pair| { let (name, desc, _) = pair; io::println(#fmt(" -Z%-20s -- %s", name, desc)); } } -fn run_compiler(args: [str]/~, demitter: diagnostic::emitter) { +fn run_compiler(args: ~[str], demitter: diagnostic::emitter) { // Don't display log spew by default. Can override with RUST_LOG. logging::console_off(); @@ -169,7 +169,7 @@ fn run_compiler(args: [str]/~, demitter: diagnostic::emitter) { let pretty = option::map(getopts::opt_default(match, "pretty", "normal"), - {|a|parse_pretty(sess, a)}); + |a| parse_pretty(sess, a) ); alt pretty { some::<pp_mode>(ppm) { pretty_print_input(sess, cfg, input, ppm); ret; } none::<pp_mode> {/* continue */ } @@ -211,7 +211,7 @@ fn monitor(+f: fn~(diagnostic::emitter)) { let p = comm::port(); let ch = comm::chan(p); - alt task::try {|| + alt do task::try || { // The 'diagnostics emitter'. Every error, warning, etc. should // go through this function. @@ -243,14 +243,12 @@ fn monitor(+f: fn~(diagnostic::emitter)) { diagnostic::error); for [ - "the compiler hit an unexpected failure path. \ this is a bug", "try running with RUST_LOG=rustc=0,::rt::backtrace \ to get further details and report the results \ to github.com/mozilla/rust/issues" - - ]/~.each {|note| + ]/_.each |note| { diagnostic::emit(none, note, diagnostic::note) } } @@ -260,8 +258,8 @@ fn monitor(+f: fn~(diagnostic::emitter)) { } } -fn main(args: [str]/~) { - monitor {|demitter| +fn main(args: ~[str]) { + do monitor |demitter| { run_compiler(args, demitter); } } diff --git a/src/rustc/driver/session.rs b/src/rustc/driver/session.rs index 4308a986efd..5c0dc72f33a 100644 --- a/src/rustc/driver/session.rs +++ b/src/rustc/driver/session.rs @@ -36,8 +36,8 @@ const trace: uint = 128u; // It should be removed const no_rt: uint = 256u; -fn debugging_opts_map() -> [(str, str, uint)]/~ { - [("ppregions", "prettyprint regions with \ +fn debugging_opts_map() -> ~[(str, str, uint)] { + ~[("ppregions", "prettyprint regions with \ internal repr details", ppregions), ("time-passes", "measure time of each rustc pass", time_passes), ("count-llvm-insns", "count where LLVM \ @@ -48,7 +48,7 @@ fn debugging_opts_map() -> [(str, str, uint)]/~ { ("no-verify", "skip LLVM verification", no_verify), ("trace", "emit trace logs", trace), ("no-rt", "do not link to the runtime", no_rt) - ]/~ + ] } type options = @@ -59,10 +59,10 @@ type options = optimize: uint, debuginfo: bool, extra_debuginfo: bool, - lint_opts: [(lint::lint, lint::level)]/~, + lint_opts: ~[(lint::lint, lint::level)], save_temps: bool, output_type: back::link::output_type, - addl_lib_search_paths: [str]/~, + addl_lib_search_paths: ~[str], maybe_sysroot: option<str>, target_triple: str, cfg: ast::crate_cfg, @@ -72,7 +72,7 @@ type options = debugging_opts: uint, }; -type crate_metadata = {name: str, data: [u8]/~}; +type crate_metadata = {name: str, data: ~[u8]}; type session = @{targ_cfg: @config, opts: @options, @@ -172,13 +172,13 @@ fn basic_options() -> @options { optimize: 0u, debuginfo: false, extra_debuginfo: false, - lint_opts: []/~, + lint_opts: ~[], save_temps: false, output_type: link::output_type_exe, - addl_lib_search_paths: []/~, + addl_lib_search_paths: ~[], maybe_sysroot: none, target_triple: driver::host_triple(), - cfg: []/~, + cfg: ~[], test: false, parse_only: false, no_trans: false, @@ -239,14 +239,14 @@ mod test { } fn make_crate(with_bin: bool, with_lib: bool) -> @ast::crate { - let mut attrs = []/~; - if with_bin { attrs += [make_crate_type_attr("bin")]/~; } - if with_lib { attrs += [make_crate_type_attr("lib")]/~; } + let mut attrs = ~[]; + if with_bin { attrs += ~[make_crate_type_attr("bin")]; } + if with_lib { attrs += ~[make_crate_type_attr("lib")]; } @ast_util::respan(ast_util::dummy_sp(), { - directives: []/~, - module: {view_items: []/~, items: []/~}, + directives: ~[], + module: {view_items: ~[], items: ~[]}, attrs: attrs, - config: []/~ + config: ~[] }) } diff --git a/src/rustc/front/config.rs b/src/rustc/front/config.rs index d78a673624f..9db9147ecaf 100644 --- a/src/rustc/front/config.rs +++ b/src/rustc/front/config.rs @@ -4,7 +4,7 @@ export strip_unconfigured_items; export metas_in_cfg; export strip_items; -type in_cfg_pred = fn@([ast::attribute]/~) -> bool; +type in_cfg_pred = fn@(~[ast::attribute]) -> bool; type ctxt = @{ in_cfg: in_cfg_pred @@ -13,7 +13,7 @@ type ctxt = @{ // Support conditional compilation by transforming the AST, stripping out // any items that do not belong in the current configuration fn strip_unconfigured_items(crate: @ast::crate) -> @ast::crate { - strip_items(crate) {|attrs| + do strip_items(crate) |attrs| { in_cfg(crate.node.config, attrs) } } @@ -24,9 +24,9 @@ fn strip_items(crate: @ast::crate, in_cfg: in_cfg_pred) let ctxt = @{in_cfg: in_cfg}; let precursor = - @{fold_mod: {|a,b|fold_mod(ctxt, a, b)}, - fold_block: fold::wrap({|a,b|fold_block(ctxt, a, b)}), - fold_foreign_mod: {|a,b|fold_foreign_mod(ctxt, a, b)} + @{fold_mod: |a,b| fold_mod(ctxt, a, b), + fold_block: fold::wrap(|a,b| fold_block(ctxt, a, b) ), + fold_foreign_mod: |a,b| fold_foreign_mod(ctxt, a, b) with *fold::default_ast_fold()}; let fold = fold::make_fold(precursor); @@ -41,7 +41,7 @@ fn filter_item(cx: ctxt, &&item: @ast::item) -> fn fold_mod(cx: ctxt, m: ast::_mod, fld: fold::ast_fold) -> ast::_mod { - let filter = {|a|filter_item(cx, a)}; + let filter = |a| filter_item(cx, a); let filtered_items = vec::filter_map(m.items, filter); ret {view_items: vec::map(m.view_items, fld.fold_view_item), items: vec::map(filtered_items, fld.fold_item)}; @@ -56,7 +56,7 @@ fn filter_foreign_item(cx: ctxt, &&item: @ast::foreign_item) -> fn fold_foreign_mod(cx: ctxt, nm: ast::foreign_mod, fld: fold::ast_fold) -> ast::foreign_mod { - let filter = {|a|filter_foreign_item(cx, a)}; + let filter = |a| filter_foreign_item(cx, a); let filtered_items = vec::filter_map(nm.items, filter); ret {view_items: vec::map(nm.view_items, fld.fold_view_item), items: filtered_items}; @@ -81,7 +81,7 @@ fn filter_stmt(cx: ctxt, &&stmt: @ast::stmt) -> fn fold_block(cx: ctxt, b: ast::blk_, fld: fold::ast_fold) -> ast::blk_ { - let filter = {|a|filter_stmt(cx, a)}; + let filter = |a| filter_stmt(cx, a); let filtered_stmts = vec::filter_map(b.stmts, filter); ret {view_items: b.view_items, stmts: vec::map(filtered_stmts, fld.fold_stmt), @@ -100,11 +100,11 @@ fn foreign_item_in_cfg(cx: ctxt, item: @ast::foreign_item) -> bool { // Determine if an item should be translated in the current crate // configuration based on the item's attributes -fn in_cfg(cfg: ast::crate_cfg, attrs: [ast::attribute]/~) -> bool { +fn in_cfg(cfg: ast::crate_cfg, attrs: ~[ast::attribute]) -> bool { metas_in_cfg(cfg, attr::attr_metas(attrs)) } -fn metas_in_cfg(cfg: ast::crate_cfg, metas: [@ast::meta_item]/~) -> bool { +fn metas_in_cfg(cfg: ast::crate_cfg, metas: ~[@ast::meta_item]) -> bool { // The "cfg" attributes on the item let cfg_metas = attr::find_meta_items_by_name(metas, "cfg"); @@ -113,12 +113,12 @@ fn metas_in_cfg(cfg: ast::crate_cfg, metas: [@ast::meta_item]/~) -> bool { // so we can match against them. This is the list of configurations for // which the item is valid let cfg_metas = vec::concat(vec::filter_map(cfg_metas, - {|&&i| attr::get_meta_item_list(i)})); + |&&i| attr::get_meta_item_list(i) )); let has_cfg_metas = vec::len(cfg_metas) > 0u; if !has_cfg_metas { ret true; } - for cfg_metas.each {|cfg_mi| + for cfg_metas.each |cfg_mi| { if attr::contains(cfg, cfg_mi) { ret true; } } diff --git a/src/rustc/front/core_inject.rs b/src/rustc/front/core_inject.rs index f042c6c9e1b..446e0f25fae 100644 --- a/src/rustc/front/core_inject.rs +++ b/src/rustc/front/core_inject.rs @@ -30,18 +30,18 @@ fn inject_libcore_ref(sess: session, let n1 = sess.next_node_id(); let n2 = sess.next_node_id(); - let vi1 = @{node: ast::view_item_use(@"core", []/~, n1), - attrs: []/~, + let vi1 = @{node: ast::view_item_use(@"core", ~[], n1), + attrs: ~[], vis: ast::public, span: dummy_sp()}; let vp = spanned(ast::view_path_glob(ident_to_path(dummy_sp(), @"core"), n2)); - let vi2 = @{node: ast::view_item_import([vp]/~), - attrs: []/~, + let vi2 = @{node: ast::view_item_import(~[vp]), + attrs: ~[], vis: ast::public, span: dummy_sp()}; - let vis = vec::append([vi1, vi2]/~, crate.node.module.view_items); + let vis = vec::append(~[vi1, vi2], crate.node.module.view_items); ret @{node: {module: { view_items: vis with crate.node.module } with crate.node} with *crate } diff --git a/src/rustc/front/intrinsic_inject.rs b/src/rustc/front/intrinsic_inject.rs index f215d95e2de..9f2741804bf 100644 --- a/src/rustc/front/intrinsic_inject.rs +++ b/src/rustc/front/intrinsic_inject.rs @@ -12,7 +12,7 @@ fn inject_intrinsic(sess: session, let item = parse::parse_item_from_source_str("<intrinsic>", intrinsic_module, sess.opts.cfg, - []/~, ast::public, + ~[], ast::public, sess.parse_sess); let item = alt item { @@ -22,7 +22,7 @@ fn inject_intrinsic(sess: session, } }; - let items = vec::append([item]/~, crate.node.module.items); + let items = vec::append(~[item], crate.node.module.items); ret @{node: {module: { items: items with crate.node.module } with crate.node} with *crate } diff --git a/src/rustc/front/test.rs b/src/rustc/front/test.rs index 2e80d248de9..ce101a5ce4e 100644 --- a/src/rustc/front/test.rs +++ b/src/rustc/front/test.rs @@ -15,13 +15,13 @@ export modify_for_testing; type node_id_gen = fn@() -> ast::node_id; -type test = {span: span, path: [ast::ident]/~, +type test = {span: span, path: ~[ast::ident], ignore: bool, should_fail: bool}; type test_ctxt = @{sess: session::session, crate: @ast::crate, - mut path: [ast::ident]/~, + mut path: ~[ast::ident], testfns: dvec<test>}; // Traverse the crate, collecting all the test functions, eliding any @@ -41,13 +41,13 @@ fn generate_test_harness(sess: session::session, let cx: test_ctxt = @{sess: sess, crate: crate, - mut path: []/~, + mut path: ~[], testfns: dvec()}; let precursor = - @{fold_crate: fold::wrap({|a,b|fold_crate(cx, a, b)}), - fold_item: {|a,b|fold_item(cx, a, b)}, - fold_mod: {|a,b|fold_mod(cx, a, b)} with *fold::default_ast_fold()}; + @{fold_crate: fold::wrap(|a,b| fold_crate(cx, a, b) ), + fold_item: |a,b| fold_item(cx, a, b), + fold_mod: |a,b| fold_mod(cx, a, b) with *fold::default_ast_fold()}; let fold = fold::make_fold(precursor); let res = @fold.fold_crate(*crate); @@ -57,7 +57,7 @@ fn generate_test_harness(sess: session::session, fn strip_test_functions(crate: @ast::crate) -> @ast::crate { // When not compiling with --test we should not compile the // #[test] functions - config::strip_items(crate) {|attrs| + do config::strip_items(crate) |attrs| { !attr::contains_name(attr::attr_metas(attrs), "test") } } @@ -147,7 +147,7 @@ fn is_ignored(cx: test_ctxt, i: @ast::item) -> bool { let ignoreattrs = attr::find_attrs_by_name(i.attrs, "ignore"); let ignoreitems = attr::attr_metas(ignoreattrs); let cfg_metas = vec::concat(vec::filter_map(ignoreitems, - {|&&i| attr::get_meta_item_list(i)})); + |&&i| attr::get_meta_item_list(i) )); ret if vec::is_not_empty(ignoreitems) { config::metas_in_cfg(cx.crate.node.config, cfg_metas) } else { @@ -170,11 +170,11 @@ We're going to be building a module that looks more or less like: mod __test { - fn main(args: [str]/~) -> int { + fn main(args: ~[str]) -> int { std::test::test_main(args, tests()) } - fn tests() -> [std::test::test_desc]/~ { + fn tests() -> ~[std::test::test_desc] { ... the list of tests in the crate ... } } @@ -188,14 +188,14 @@ fn mk_test_module(cx: test_ctxt) -> @ast::item { // The synthesized main function which will call the console test runner // with our list of tests let mainfn = mk_main(cx); - let testmod: ast::_mod = {view_items: []/~, items: [mainfn, testsfn]/~}; + let testmod: ast::_mod = {view_items: ~[], items: ~[mainfn, testsfn]}; let item_ = ast::item_mod(testmod); // This attribute tells resolve to let us call unexported functions let resolve_unexported_attr = attr::mk_attr(attr::mk_word_item(@"!resolve_unexported")); let item: ast::item = {ident: @"__test", - attrs: [resolve_unexported_attr]/~, + attrs: ~[resolve_unexported_attr], id: cx.sess.next_node_id(), node: item_, vis: ast::public, @@ -210,31 +210,31 @@ fn nospan<T: copy>(t: T) -> ast::spanned<T> { ret {node: t, span: dummy_sp()}; } -fn path_node(ids: [ast::ident]/~) -> @ast::path { - @{span: dummy_sp(), global: false, idents: ids, rp: none, types: []/~} +fn path_node(ids: ~[ast::ident]) -> @ast::path { + @{span: dummy_sp(), global: false, idents: ids, rp: none, types: ~[]} } fn mk_tests(cx: test_ctxt) -> @ast::item { let ret_ty = mk_test_desc_vec_ty(cx); let decl: ast::fn_decl = - {inputs: []/~, + {inputs: ~[], output: ret_ty, purity: ast::impure_fn, cf: ast::return_val, - constraints: []/~}; + constraints: ~[]}; // The vector of test_descs for this crate let test_descs = mk_test_desc_vec(cx); let body_: ast::blk_ = - default_block([]/~, option::some(test_descs), cx.sess.next_node_id()); + default_block(~[], option::some(test_descs), cx.sess.next_node_id()); let body = nospan(body_); - let item_ = ast::item_fn(decl, []/~, body); + let item_ = ast::item_fn(decl, ~[], body); let item: ast::item = {ident: @"tests", - attrs: []/~, + attrs: ~[], id: cx.sess.next_node_id(), node: item_, vis: ast::public, @@ -242,7 +242,7 @@ fn mk_tests(cx: test_ctxt) -> @ast::item { ret @item; } -fn mk_path(cx: test_ctxt, path: [ast::ident]/~) -> [ast::ident]/~ { +fn mk_path(cx: test_ctxt, path: ~[ast::ident]) -> ~[ast::ident] { // For tests that are inside of std we don't want to prefix // the paths with std:: let is_std = { @@ -253,12 +253,12 @@ fn mk_path(cx: test_ctxt, path: [ast::ident]/~) -> [ast::ident]/~ { } }; if is_std { path } - else { vec::append([@"std"]/~, path) } + else { vec::append(~[@"std"], path) } } -// The ast::ty of [std::test::test_desc]/~ +// The ast::ty of ~[std::test::test_desc] fn mk_test_desc_vec_ty(cx: test_ctxt) -> @ast::ty { - let test_desc_ty_path = path_node(mk_path(cx, [@"test", @"test_desc"]/~)); + let test_desc_ty_path = path_node(mk_path(cx, ~[@"test", @"test_desc"])); let test_desc_ty: ast::ty = {id: cx.sess.next_node_id(), @@ -277,8 +277,8 @@ fn mk_test_desc_vec_ty(cx: test_ctxt) -> @ast::ty { fn mk_test_desc_vec(cx: test_ctxt) -> @ast::expr { #debug("building test vector from %u tests", cx.testfns.len()); - let mut descs = []/~; - for cx.testfns.each {|test| + let mut descs = ~[]; + for cx.testfns.each |test| { vec::push(descs, mk_test_desc_rec(cx, test)); } @@ -339,7 +339,7 @@ fn mk_test_desc_rec(cx: test_ctxt, test: test) -> @ast::expr { nospan({mutbl: ast::m_imm, ident: @"should_fail", expr: @fail_expr}); let desc_rec_: ast::expr_ = - ast::expr_rec([name_field, fn_field, ignore_field, fail_field]/~, + ast::expr_rec(~[name_field, fn_field, ignore_field, fail_field], option::none); let desc_rec: ast::expr = {id: cx.sess.next_node_id(), node: desc_rec_, span: span}; @@ -354,7 +354,7 @@ fn mk_test_wrapper(cx: test_ctxt, span: span) -> @ast::expr { let call_expr: ast::expr = { id: cx.sess.next_node_id(), - node: ast::expr_call(@fn_path_expr, []/~, false), + node: ast::expr_call(@fn_path_expr, ~[], false), span: span }; @@ -362,16 +362,16 @@ fn mk_test_wrapper(cx: test_ctxt, ast::stmt_semi(@call_expr, cx.sess.next_node_id())); let wrapper_decl: ast::fn_decl = { - inputs: []/~, + inputs: ~[], output: @{id: cx.sess.next_node_id(), node: ast::ty_nil, span: span}, purity: ast::impure_fn, cf: ast::return_val, - constraints: []/~ + constraints: ~[] }; let wrapper_body: ast::blk = nospan({ - view_items: []/~, - stmts: [@call_stmt]/~, + view_items: ~[], + stmts: ~[@call_stmt], expr: option::none, id: cx.sess.next_node_id(), rules: ast::default_blk @@ -380,7 +380,7 @@ fn mk_test_wrapper(cx: test_ctxt, let wrapper_expr: ast::expr = { id: cx.sess.next_node_id(), node: ast::expr_fn(ast::proto_bare, wrapper_decl, - wrapper_body, @[]/~), + wrapper_body, @~[]), span: span }; @@ -388,7 +388,7 @@ fn mk_test_wrapper(cx: test_ctxt, } fn mk_main(cx: test_ctxt) -> @ast::item { - let str_pt = path_node([@"str"]/~); + let str_pt = path_node(~[@"str"]); let str_ty = @{id: cx.sess.next_node_id(), node: ast::ty_path(str_pt, cx.sess.next_node_id()), span: dummy_sp()}; @@ -412,23 +412,23 @@ fn mk_main(cx: test_ctxt) -> @ast::item { span: dummy_sp()}; let decl: ast::fn_decl = - {inputs: [args_arg]/~, + {inputs: ~[args_arg], output: @ret_ty, purity: ast::impure_fn, cf: ast::return_val, - constraints: []/~}; + constraints: ~[]}; let test_main_call_expr = mk_test_main_call(cx); let body_: ast::blk_ = - default_block([]/~, option::some(test_main_call_expr), + default_block(~[], option::some(test_main_call_expr), cx.sess.next_node_id()); let body = {node: body_, span: dummy_sp()}; - let item_ = ast::item_fn(decl, []/~, body); + let item_ = ast::item_fn(decl, ~[], body); let item: ast::item = {ident: @"main", - attrs: []/~, + attrs: ~[], id: cx.sess.next_node_id(), node: item_, vis: ast::public, @@ -439,7 +439,7 @@ fn mk_main(cx: test_ctxt) -> @ast::item { fn mk_test_main_call(cx: test_ctxt) -> @ast::expr { // Get the args passed to main so we can pass the to test_main - let args_path = path_node([@"args"]/~); + let args_path = path_node(~[@"args"]); let args_path_expr_: ast::expr_ = ast::expr_path(args_path); @@ -447,20 +447,20 @@ fn mk_test_main_call(cx: test_ctxt) -> @ast::expr { {id: cx.sess.next_node_id(), node: args_path_expr_, span: dummy_sp()}; // Call __test::test to generate the vector of test_descs - let test_path = path_node([@"tests"]/~); + let test_path = path_node(~[@"tests"]); let test_path_expr_: ast::expr_ = ast::expr_path(test_path); let test_path_expr: ast::expr = {id: cx.sess.next_node_id(), node: test_path_expr_, span: dummy_sp()}; - let test_call_expr_ = ast::expr_call(@test_path_expr, []/~, false); + let test_call_expr_ = ast::expr_call(@test_path_expr, ~[], false); let test_call_expr: ast::expr = {id: cx.sess.next_node_id(), node: test_call_expr_, span: dummy_sp()}; // Call std::test::test_main - let test_main_path = path_node(mk_path(cx, [@"test", @"test_main"]/~)); + let test_main_path = path_node(mk_path(cx, ~[@"test", @"test_main"])); let test_main_path_expr_: ast::expr_ = ast::expr_path(test_main_path); @@ -470,7 +470,7 @@ fn mk_test_main_call(cx: test_ctxt) -> @ast::expr { let test_main_call_expr_: ast::expr_ = ast::expr_call(@test_main_path_expr, - [@args_path_expr, @test_call_expr]/~, false); + ~[@args_path_expr, @test_call_expr], false); let test_main_call_expr: ast::expr = {id: cx.sess.next_node_id(), node: test_main_call_expr_, diff --git a/src/rustc/lib/llvm.rs b/src/rustc/lib/llvm.rs index dec7d363155..60bb9c57f1c 100644 --- a/src/rustc/lib/llvm.rs +++ b/src/rustc/lib/llvm.rs @@ -128,6 +128,31 @@ enum TypeKind { X86_MMX = 15 } +enum AtomicBinOp { + Xchg = 0, + Add = 1, + Sub = 2, + And = 3, + Nand = 4, + Or = 5, + Xor = 6, + Max = 7, + Min = 8, + UMax = 9, + UMin = 10, +} + +enum AtomicOrdering { + NotAtomic = 0, + Unordered = 1, + Monotonic = 2, + // Consume = 3, // Not specified yet. + Acquire = 4, + Release = 5, + AcquireRelease = 6, + SequentiallyConsistent = 7 +} + // FIXME: Not used right now, but will be once #2334 is fixed // Consts for the LLVMCodeGenFileType type (in include/llvm/c/TargetMachine.h) enum FileType { @@ -772,6 +797,11 @@ native mod llvm { fn LLVMBuildPtrDiff(B: BuilderRef, LHS: ValueRef, RHS: ValueRef, Name: *c_char) -> ValueRef; + /* Atomic Operations */ + fn LLVMBuildAtomicRMW(B: BuilderRef, ++Op: AtomicBinOp, + LHS: ValueRef, RHS: ValueRef, + ++Order: AtomicOrdering) -> ValueRef; + /* Selected entries from the downcasts. */ fn LLVMIsATerminatorInst(Inst: ValueRef) -> ValueRef; @@ -981,10 +1011,10 @@ fn mk_type_names() -> type_names { } fn type_to_str(names: type_names, ty: TypeRef) -> str { - ret type_to_str_inner(names, []/~, ty); + ret type_to_str_inner(names, ~[], ty); } -fn type_to_str_inner(names: type_names, outer0: [TypeRef]/~, ty: TypeRef) -> +fn type_to_str_inner(names: type_names, outer0: ~[TypeRef], ty: TypeRef) -> str { alt type_has_name(names, ty) { option::some(n) { ret n; } @@ -995,11 +1025,11 @@ fn type_to_str_inner(names: type_names, outer0: [TypeRef]/~, ty: TypeRef) -> let kind = llvm::LLVMGetTypeKind(ty); - fn tys_str(names: type_names, outer: [TypeRef]/~, - tys: [TypeRef]/~) -> str { + fn tys_str(names: type_names, outer: ~[TypeRef], + tys: ~[TypeRef]) -> str { let mut s: str = ""; let mut first: bool = true; - for tys.each {|t| + for tys.each |t| { if first { first = false; } else { s += ", "; } s += type_to_str_inner(names, outer, t); } @@ -1049,7 +1079,7 @@ fn type_to_str_inner(names: type_names, outer0: [TypeRef]/~, ty: TypeRef) -> } Pointer { let mut i: uint = 0u; - for outer0.each {|tout| + for outer0.each |tout| { i += 1u; if tout as int == ty as int { let n: uint = vec::len::<TypeRef>(outer0) - i; @@ -1083,7 +1113,7 @@ fn float_width(llt: TypeRef) -> uint { }; } -fn fn_ty_param_tys(fn_ty: TypeRef) -> [TypeRef]/~ unsafe { +fn fn_ty_param_tys(fn_ty: TypeRef) -> ~[TypeRef] unsafe { let args = vec::from_elem(llvm::LLVMCountParamTypes(fn_ty) as uint, 0 as TypeRef); llvm::LLVMGetParamTypes(fn_ty, vec::unsafe::to_ptr(args)); @@ -1103,7 +1133,7 @@ type target_data = {lltd: TargetDataRef, dtor: @target_data_res}; fn mk_target_data(string_rep: str) -> target_data { let lltd = - str::as_c_str(string_rep, {|buf| llvm::LLVMCreateTargetData(buf) }); + str::as_c_str(string_rep, |buf| llvm::LLVMCreateTargetData(buf) ); ret {lltd: lltd, dtor: @target_data_res(lltd)}; } diff --git a/src/rustc/metadata/astencode.rs b/src/rustc/metadata/astencode.rs index 9ba1f222a7e..79807a3a0ba 100644 --- a/src/rustc/metadata/astencode.rs +++ b/src/rustc/metadata/astencode.rs @@ -66,9 +66,9 @@ fn encode_inlined_item(ecx: @e::encode_ctxt, ebml_w: ebml::writer, path: ast_map::path, ii: ast::inlined_item) { - #debug["> Encoding inlined item: %s::%s (%u)", + #debug~["> Encoding inlined item: %s::%s (%u)", ast_map::path_to_str(path), ii.ident(), - ebml_w.writer.tell()]/~; + ebml_w.writer.tell()]; let id_range = compute_id_range_for_inlined_item(ii); ebml_w.wr_tag(c::tag_ast as uint) {|| @@ -77,9 +77,9 @@ fn encode_inlined_item(ecx: @e::encode_ctxt, encode_side_tables_for_ii(ecx, ebml_w, ii); } - #debug["< Encoded inlined fn: %s::%s (%u)", + #debug~["< Encoded inlined fn: %s::%s (%u)", ast_map::path_to_str(path), ii.ident(), - ebml_w.writer.tell()]/~; + ebml_w.writer.tell()]; } fn decode_inlined_item(cdata: cstore::crate_metadata, @@ -102,8 +102,8 @@ fn decode_inlined_item(cdata: cstore::crate_metadata, ast_map::map_decoded_item(tcx.sess, dcx.tcx.items, path, ii); #debug["Fn named: %s", ii.ident()]; decode_side_tables(xcx, ast_doc); - #debug["< Decoded inlined fn: %s::%s", - ast_map::path_to_str(path), ii.ident()]/~; + #debug~["< Decoded inlined fn: %s::%s", + ast_map::path_to_str(path), ii.ident()]; alt ii { ast::ii_item(i) { #debug(">>> DECODED ITEM >>>\n%s\n<<< DECODED ITEM <<<", @@ -522,7 +522,7 @@ impl helpers for ebml::writer { e::write_type(ecx, self, ty) } - fn emit_tys(ecx: @e::encode_ctxt, tys: [ty::t]/~) { + fn emit_tys(ecx: @e::encode_ctxt, tys: ~[ty::t]) { self.emit_from_vec(tys) {|ty| e::write_type(ecx, self, ty) } @@ -707,7 +707,7 @@ fn encode_side_tables_for_id(ecx: @e::encode_ctxt, impl decoder for ebml::doc { fn as_int() -> int { ebml::doc_as_u64(self) as int } - fn []/~(tag: c::astencode_tag) -> ebml::doc { + fn ~[](tag: c::astencode_tag) -> ebml::doc { ebml::get_doc(self, tag as uint) } fn opt_child(tag: c::astencode_tag) -> option<ebml::doc> { @@ -727,11 +727,11 @@ impl decoder for ebml::ebml_deserializer { xcx.tr_def_id(_)) } - fn read_tys(xcx: extended_decode_ctxt) -> [ty::t]/~ { + fn read_tys(xcx: extended_decode_ctxt) -> ~[ty::t] { self.read_to_vec {|| self.read_ty(xcx) } } - fn read_bounds(xcx: extended_decode_ctxt) -> @[ty::param_bound]/~ { + fn read_bounds(xcx: extended_decode_ctxt) -> @~[ty::param_bound] { tydecode::parse_bounds_data( self.parent.data, self.pos, xcx.dcx.cdata.cnum, xcx.dcx.tcx, xcx.tr_def_id(_)) @@ -763,9 +763,9 @@ fn decode_side_tables(xcx: extended_decode_ctxt, let id0 = entry_doc[c::tag_table_id].as_int(); let id = xcx.tr_id(id0); - #debug[">> Side table document with tag 0x%x \ + #debug~[">> Side table document with tag 0x%x \ found for id %d (orig %d)", - tag, id, id0]/~; + tag, id, id0]; if tag == (c::tag_table_mutbl as uint) { dcx.maps.mutbl_map.insert(id, ()); @@ -859,7 +859,7 @@ type fake_session = (); #[cfg(test)] impl of fake_ext_ctxt for fake_session { - fn cfg() -> ast::crate_cfg { []/~ } + fn cfg() -> ast::crate_cfg { ~[] } fn parse_sess() -> parse::parse_sess { new_parse_sess() } } @@ -922,13 +922,13 @@ fn test_simplification() { let item_in = ast::ii_item(#ast(item) { fn new_int_alist<B: copy>() -> alist<int, B> { fn eq_int(&&a: int, &&b: int) -> bool { a == b } - ret {eq_fn: eq_int, mut data: []/~}; + ret {eq_fn: eq_int, mut data: ~[]}; } }); let item_out = simplify_ast(item_in); let item_exp = ast::ii_item(#ast(item) { fn new_int_alist<B: copy>() -> alist<int, B> { - ret {eq_fn: eq_int, mut data: []/~}; + ret {eq_fn: eq_int, mut data: ~[]}; } }); alt (item_out, item_exp) { diff --git a/src/rustc/metadata/common.rs b/src/rustc/metadata/common.rs index 9aa7748c5a4..99df3c79f74 100644 --- a/src/rustc/metadata/common.rs +++ b/src/rustc/metadata/common.rs @@ -128,7 +128,7 @@ fn hash_node_id(&&node_id: int) -> uint { ret 177573u ^ (node_id as uint); } fn hash_path(&&s: str) -> uint { let mut h = 5381u; - for str::each(s) {|ch| h = (h << 5u) + h ^ (ch as uint); } + for str::each(s) |ch| { h = (h << 5u) + h ^ (ch as uint); } ret h; } diff --git a/src/rustc/metadata/creader.rs b/src/rustc/metadata/creader.rs index aa02bb84c8c..8f920e6bbbe 100644 --- a/src/rustc/metadata/creader.rs +++ b/src/rustc/metadata/creader.rs @@ -31,8 +31,8 @@ fn read_crates(diag: span_handler, crate: ast::crate, mut next_crate_num: 1}; let v = visit::mk_simple_visitor(@{visit_view_item: - {|a|visit_view_item(e, a)}, - visit_item: {|a|visit_item(e, a)} + |a| visit_view_item(e, a), + visit_item: |a| visit_item(e, a) with *visit::default_simple_visitor()}); visit::visit_crate(crate, (), v); dump_crates(e.crate_cache); @@ -43,50 +43,50 @@ type cache_entry = { cnum: int, span: span, hash: @str, - metas: @[@ast::meta_item]/~ + metas: @~[@ast::meta_item] }; fn dump_crates(crate_cache: dvec<cache_entry>) { #debug("resolved crates:"); - for crate_cache.each {|entry| + for crate_cache.each |entry| { #debug("cnum: %?", entry.cnum); #debug("span: %?", entry.span); #debug("hash: %?", entry.hash); - let attrs = [ + let attrs = ~[ attr::mk_attr(attr::mk_list_item(@"link", *entry.metas)) - ]/~; - for attr::find_linkage_attrs(attrs).each {|attr| + ]; + for attr::find_linkage_attrs(attrs).each |attr| { #debug("meta: %s", pprust::attr_to_str(attr)); } } } fn warn_if_multiple_versions(diag: span_handler, - crate_cache: [cache_entry]/~) { + crate_cache: ~[cache_entry]) { import either::*; if crate_cache.len() != 0u { let name = loader::crate_name_from_metas(*crate_cache.last().metas); let {lefts: matches, rights: non_matches} = - partition(crate_cache.map_to_vec {|entry| + partition(crate_cache.map_to_vec(|entry| { let othername = loader::crate_name_from_metas(*entry.metas); if name == othername { left(entry) } else { right(entry) } - }); + })); assert matches.is_not_empty(); if matches.len() != 1u { diag.handler().warn( #fmt("using multiple versions of crate `%s`", *name)); - for matches.each {|match| + for matches.each |match| { diag.span_note(match.span, "used here"); - let attrs = [ + let attrs = ~[ attr::mk_attr(attr::mk_list_item(@"link", *match.metas)) - ]/~; + ]; loader::note_linkage_attrs(diag, attrs); } } @@ -147,7 +147,7 @@ fn visit_item(e: env, i: @ast::item) { e.diag.span_fatal(i.span, "library '" + *foreign_name + "' already added: can't specify link_args."); } - for link_args.each {|a| + for link_args.each |a| { alt attr::get_meta_item_value_str(attr::attr_meta(a)) { some(linkarg) { cstore::add_used_link_args(cstore, *linkarg); @@ -161,7 +161,7 @@ fn visit_item(e: env, i: @ast::item) { } fn metas_with(ident: ast::ident, key: ast::ident, - metas: [@ast::meta_item]/~) -> [@ast::meta_item]/~ { + metas: ~[@ast::meta_item]) -> ~[@ast::meta_item] { let name_items = attr::find_meta_items_by_name(metas, *key); if name_items.is_empty() { vec::append_one(metas, attr::mk_name_value_item_str(key, *ident)) @@ -171,14 +171,14 @@ fn metas_with(ident: ast::ident, key: ast::ident, } fn metas_with_ident(ident: ast::ident, - metas: [@ast::meta_item]/~) -> [@ast::meta_item]/~ { + metas: ~[@ast::meta_item]) -> ~[@ast::meta_item] { metas_with(ident, @"name", metas) } -fn existing_match(e: env, metas: [@ast::meta_item]/~, hash: str) -> +fn existing_match(e: env, metas: ~[@ast::meta_item], hash: str) -> option<int> { - for e.crate_cache.each {|c| + for e.crate_cache.each |c| { if loader::metadata_matches(*c.metas, metas) && (hash.is_empty() || *c.hash == hash) { ret some(c.cnum); @@ -187,7 +187,7 @@ fn existing_match(e: env, metas: [@ast::meta_item]/~, hash: str) -> ret none; } -fn resolve_crate(e: env, ident: ast::ident, metas: [@ast::meta_item]/~, +fn resolve_crate(e: env, ident: ast::ident, metas: ~[@ast::meta_item], hash: str, span: span) -> ast::crate_num { let metas = metas_with_ident(ident, metas); @@ -241,15 +241,15 @@ fn resolve_crate(e: env, ident: ast::ident, metas: [@ast::meta_item]/~, } // Go through the crate metadata and load any crates that it references -fn resolve_crate_deps(e: env, cdata: @[u8]/~) -> cstore::cnum_map { +fn resolve_crate_deps(e: env, cdata: @~[u8]) -> cstore::cnum_map { #debug("resolving deps of external crate"); // The map from crate numbers in the crate we're resolving to local crate // numbers let cnum_map = int_hash::<ast::crate_num>(); - for decoder::get_crate_deps(cdata).each {|dep| + for decoder::get_crate_deps(cdata).each |dep| { let extrn_cnum = dep.cnum; let cname = dep.name; - let cmetas = metas_with(dep.vers, @"vers", []/~); + let cmetas = metas_with(dep.vers, @"vers", ~[]); #debug("resolving dep crate %s ver: %s hash: %s", *dep.name, *dep.vers, *dep.hash); alt existing_match(e, metas_with_ident(cname, cmetas), *dep.hash) { diff --git a/src/rustc/metadata/csearch.rs b/src/rustc/metadata/csearch.rs index 6b30bf3090b..700913498e9 100644 --- a/src/rustc/metadata/csearch.rs +++ b/src/rustc/metadata/csearch.rs @@ -39,10 +39,10 @@ fn get_type_param_count(cstore: cstore::cstore, def: ast::def_id) -> uint { } fn lookup_defs(cstore: cstore::cstore, cnum: ast::crate_num, - path: [ast::ident]/~) -> [ast::def]/~ { - let mut result = []/~; + path: ~[ast::ident]) -> ~[ast::def] { + let mut result = ~[]; #debug("lookup_defs: path = %? cnum = %?", path, cnum); - for resolve_path(cstore, cnum, path).each {|elt| + for resolve_path(cstore, cnum, path).each |elt| { let (c, data, def) = elt; vec::push(result, decoder::lookup_def(c, data, def)); } @@ -60,13 +60,13 @@ fn lookup_method_purity(cstore: cstore::cstore, did: ast::def_id) /* Returns a vector of possible def IDs for a given path, in a given crate */ fn resolve_path(cstore: cstore::cstore, cnum: ast::crate_num, - path: [ast::ident]/~) -> - [(ast::crate_num, @[u8]/~, ast::def_id)]/~ { + path: ~[ast::ident]) -> + ~[(ast::crate_num, @~[u8], ast::def_id)] { let cm = cstore::get_crate_data(cstore, cnum); #debug("resolve_path %s in crates[%d]:%s", ast_util::path_name_i(path), cnum, cm.name); - let mut result = []/~; - for decoder::resolve_path(path, cm.data).each {|def| + let mut result = ~[]; + for decoder::resolve_path(path, cm.data).each |def| { if def.crate == ast::local_crate { vec::push(result, (cnum, cm.data, def)); } else { @@ -88,7 +88,7 @@ fn get_item_path(tcx: ty::ctxt, def: ast::def_id) -> ast_map::path { // FIXME #1920: This path is not always correct if the crate is not linked // into the root namespace. - vec::append([ast_map::path_mod(@cdata.name)]/~, path) + vec::append(~[ast_map::path_mod(@cdata.name)], path) } enum found_ast { @@ -110,7 +110,7 @@ fn maybe_get_item_ast(tcx: ty::ctxt, def: ast::def_id, } fn get_enum_variants(tcx: ty::ctxt, def: ast::def_id) - -> [ty::variant_info]/~ { + -> ~[ty::variant_info] { let cstore = tcx.cstore; let cdata = cstore::get_crate_data(cstore, def.crate); ret decoder::get_enum_variants(cdata, def.node, tcx) @@ -118,20 +118,20 @@ fn get_enum_variants(tcx: ty::ctxt, def: ast::def_id) fn get_impls_for_mod(cstore: cstore::cstore, def: ast::def_id, name: option<ast::ident>) - -> @[@decoder::_impl]/~ { + -> @~[@decoder::_impl] { let cdata = cstore::get_crate_data(cstore, def.crate); - decoder::get_impls_for_mod(cdata, def.node, name) {|cnum| + do decoder::get_impls_for_mod(cdata, def.node, name) |cnum| { cstore::get_crate_data(cstore, cnum) } } -fn get_iface_methods(tcx: ty::ctxt, def: ast::def_id) -> @[ty::method]/~ { +fn get_iface_methods(tcx: ty::ctxt, def: ast::def_id) -> @~[ty::method] { let cstore = tcx.cstore; let cdata = cstore::get_crate_data(cstore, def.crate); decoder::get_iface_methods(cdata, def.node, tcx) } -fn get_class_fields(tcx: ty::ctxt, def: ast::def_id) -> [ty::field_ty]/~ { +fn get_class_fields(tcx: ty::ctxt, def: ast::def_id) -> ~[ty::field_ty] { let cstore = tcx.cstore; let cdata = cstore::get_crate_data(cstore, def.crate); decoder::get_class_fields(cdata, def.node) @@ -151,16 +151,16 @@ fn get_field_type(tcx: ty::ctxt, class_id: ast::def_id, #debug("Looking up %?", class_id); let class_doc = expect(tcx.diag, decoder::maybe_find_item(class_id.node, all_items), - {|| #fmt("get_field_type: class ID %? not found", - class_id)}); + || #fmt("get_field_type: class ID %? not found", + class_id) ); #debug("looking up %? : %?", def, class_doc); let the_field = expect(tcx.diag, decoder::maybe_find_item(def.node, class_doc), - {|| #fmt("get_field_type: in class %?, field ID %? not found", - class_id, def)}); + || #fmt("get_field_type: in class %?, field ID %? not found", + class_id, def) ); #debug("got field data %?", the_field); let ty = decoder::item_type(def, the_field, tcx, cdata); - ret {bounds: @[]/~, rp: ast::rp_none, ty: ty}; + ret {bounds: @~[], rp: ast::rp_none, ty: ty}; } // Given a def_id for an impl or class, return the iface it implements, diff --git a/src/rustc/metadata/cstore.rs b/src/rustc/metadata/cstore.rs index 8d828aed69d..f912dd92a4a 100644 --- a/src/rustc/metadata/cstore.rs +++ b/src/rustc/metadata/cstore.rs @@ -40,7 +40,7 @@ type cnum_map = map::hashmap<ast::crate_num, ast::crate_num>; type mod_path_map = map::hashmap<ast::def_id, @str>; type crate_metadata = @{name: str, - data: @[u8]/~, + data: @~[u8], cnum_map: cnum_map, cnum: ast::crate_num}; @@ -55,9 +55,9 @@ type cstore_private = @{metas: map::hashmap<ast::crate_num, crate_metadata>, use_crate_map: use_crate_map, mod_path_map: mod_path_map, - mut used_crate_files: [str]/~, - mut used_libraries: [str]/~, - mut used_link_args: [str]/~}; + mut used_crate_files: ~[str], + mut used_libraries: ~[str], + mut used_link_args: ~[str]}; // Map from node_id's of local use statements to crate numbers type use_crate_map = map::hashmap<ast::node_id, ast::crate_num>; @@ -74,9 +74,9 @@ fn mk_cstore() -> cstore { ret private(@{metas: meta_cache, use_crate_map: crate_map, mod_path_map: mod_path_map, - mut used_crate_files: []/~, - mut used_libraries: []/~, - mut used_link_args: []/~}); + mut used_crate_files: ~[], + mut used_libraries: ~[], + mut used_link_args: ~[]}); } fn get_crate_data(cstore: cstore, cnum: ast::crate_num) -> crate_metadata { @@ -96,7 +96,7 @@ fn get_crate_vers(cstore: cstore, cnum: ast::crate_num) -> @str { fn set_crate_data(cstore: cstore, cnum: ast::crate_num, data: crate_metadata) { p(cstore).metas.insert(cnum, data); - vec::iter(decoder::get_crate_module_paths(data.data)) {|dp| + do vec::iter(decoder::get_crate_module_paths(data.data)) |dp| { let (did, path) = dp; let d = {crate: cnum, node: did.node}; p(cstore).mod_path_map.insert(d, @path); @@ -108,7 +108,7 @@ fn have_crate_data(cstore: cstore, cnum: ast::crate_num) -> bool { } fn iter_crate_data(cstore: cstore, i: fn(ast::crate_num, crate_metadata)) { - for p(cstore).metas.each {|k,v| i(k, v);}; + for p(cstore).metas.each |k,v| { i(k, v);}; } fn add_used_crate_file(cstore: cstore, lib: str) { @@ -117,7 +117,7 @@ fn add_used_crate_file(cstore: cstore, lib: str) { } } -fn get_used_crate_files(cstore: cstore) -> [str]/~ { +fn get_used_crate_files(cstore: cstore) -> ~[str] { ret p(cstore).used_crate_files; } @@ -129,7 +129,7 @@ fn add_used_library(cstore: cstore, lib: str) -> bool { ret true; } -fn get_used_libraries(cstore: cstore) -> [str]/~ { +fn get_used_libraries(cstore: cstore) -> ~[str] { ret p(cstore).used_libraries; } @@ -137,7 +137,7 @@ fn add_used_link_args(cstore: cstore, args: str) { vec::push_all(p(cstore).used_link_args, str::split_char(args, ' ')); } -fn get_used_link_args(cstore: cstore) -> [str]/~ { +fn get_used_link_args(cstore: cstore) -> ~[str] { ret p(cstore).used_link_args; } @@ -153,11 +153,11 @@ fn find_use_stmt_cnum(cstore: cstore, // returns hashes of crates directly used by this crate. Hashes are // sorted by crate name. -fn get_dep_hashes(cstore: cstore) -> [@str]/~ { +fn get_dep_hashes(cstore: cstore) -> ~[@str] { type crate_hash = {name: @str, hash: @str}; - let mut result = []/~; + let mut result = ~[]; - for p(cstore).use_crate_map.each_value {|cnum| + for p(cstore).use_crate_map.each_value |cnum| { let cdata = cstore::get_crate_data(cstore, cnum); let hash = decoder::get_crate_hash(cdata.data); #debug("Add hash[%s]: %s", cdata.name, *hash); @@ -168,17 +168,17 @@ fn get_dep_hashes(cstore: cstore) -> [@str]/~ { } let sorted = std::sort::merge_sort(lteq, result); #debug("sorted:"); - for sorted.each {|x| + for sorted.each |x| { #debug(" hash[%s]: %s", *x.name, *x.hash); } fn mapper(ch: crate_hash) -> @str { ret ch.hash; } ret vec::map(sorted, mapper); } -fn get_path(cstore: cstore, d: ast::def_id) -> [ast::ident]/~ { +fn get_path(cstore: cstore, d: ast::def_id) -> ~[ast::ident] { // let f = bind str::split_str(_, "::"); - option::map_default(p(cstore).mod_path_map.find(d), []/~, - {|ds| str::split_str(*ds, "::").map({|x|@x})}) + option::map_default(p(cstore).mod_path_map.find(d), ~[], + |ds| str::split_str(*ds, "::").map(|x| @x ) ) } // Local Variables: // mode: rust diff --git a/src/rustc/metadata/decoder.rs b/src/rustc/metadata/decoder.rs index 300b8be8e28..ccb3472325b 100644 --- a/src/rustc/metadata/decoder.rs +++ b/src/rustc/metadata/decoder.rs @@ -53,8 +53,8 @@ export translate_def_id; // what crate that's in and give us a def_id that makes sense for the current // build. -fn lookup_hash(d: ebml::doc, eq_fn: fn@([u8]/~) -> bool, hash: uint) -> - [ebml::doc]/~ { +fn lookup_hash(d: ebml::doc, eq_fn: fn@(~[u8]) -> bool, hash: uint) -> + ~[ebml::doc] { let index = ebml::get_doc(d, tag_index); let table = ebml::get_doc(index, tag_index_table); let hash_pos = table.start + hash % 256u * 4u; @@ -62,9 +62,9 @@ fn lookup_hash(d: ebml::doc, eq_fn: fn@([u8]/~) -> bool, hash: uint) -> let {tag:_, doc:bucket} = ebml::doc_at(d.data, pos); // Awkward logic because we can't ret from foreach yet - let mut result: [ebml::doc]/~ = []/~; + let mut result: ~[ebml::doc] = ~[]; let belt = tag_index_buckets_bucket_elt; - ebml::tagged_docs(bucket, belt) {|elt| + do ebml::tagged_docs(bucket, belt) |elt| { let pos = io::u64_from_be_bytes(*elt.data, elt.start, 4u) as uint; if eq_fn(vec::slice::<u8>(*elt.data, elt.start + 4u, elt.end)) { vec::push(result, ebml::doc_at(d.data, pos).doc); @@ -74,10 +74,10 @@ fn lookup_hash(d: ebml::doc, eq_fn: fn@([u8]/~) -> bool, hash: uint) -> } fn maybe_find_item(item_id: int, items: ebml::doc) -> option<ebml::doc> { - fn eq_item(bytes: [u8]/~, item_id: int) -> bool { + fn eq_item(bytes: ~[u8], item_id: int) -> bool { ret io::u64_from_be_bytes(bytes, 0u, 4u) as int == item_id; } - let eqer = {|a|eq_item(a, item_id)}; + let eqer = |a| eq_item(a, item_id); let found = lookup_hash(items, eqer, hash_node_id(item_id)); if vec::len(found) == 0u { ret option::none::<ebml::doc>; @@ -90,7 +90,7 @@ fn find_item(item_id: int, items: ebml::doc) -> ebml::doc { // Looks up an item in the given metadata and returns an ebml doc pointing // to the item data. -fn lookup_item(item_id: int, data: @[u8]/~) -> ebml::doc { +fn lookup_item(item_id: int, data: @~[u8]) -> ebml::doc { let items = ebml::get_doc(ebml::doc(data), tag_items); alt maybe_find_item(item_id, items) { none { fail(#fmt("lookup_item: id not found: %d", item_id)); } @@ -110,7 +110,7 @@ fn item_symbol(item: ebml::doc) -> str { fn item_parent_item(d: ebml::doc) -> option<ast::def_id> { let mut found = none; - ebml::tagged_docs(d, tag_items_data_parent_item) {|did| + do ebml::tagged_docs(d, tag_items_data_parent_item) |did| { found = some(parse_def_id(ebml::doc_data(did))); } found @@ -123,25 +123,26 @@ fn class_member_id(d: ebml::doc, cdata: cmd) -> ast::def_id { fn field_mutability(d: ebml::doc) -> ast::class_mutability { // Use maybe_get_doc in case it's a method - option::map_default(ebml::maybe_get_doc(d, tag_class_mut), - ast::class_immutable, - {|d| - alt ebml::doc_as_u8(d) as char { - 'm' { ast::class_mutable } - _ { ast::class_immutable } - } - }) + option::map_default( + ebml::maybe_get_doc(d, tag_class_mut), + ast::class_immutable, + |d| { + alt ebml::doc_as_u8(d) as char { + 'm' { ast::class_mutable } + _ { ast::class_immutable } + } + }) } fn variant_disr_val(d: ebml::doc) -> option<int> { - option::chain(ebml::maybe_get_doc(d, tag_disr_val)) {|val_doc| + do option::chain(ebml::maybe_get_doc(d, tag_disr_val)) |val_doc| { int::parse_buf(ebml::doc_data(val_doc), 10u) } } fn doc_type(doc: ebml::doc, tcx: ty::ctxt, cdata: cmd) -> ty::t { let tp = ebml::get_doc(doc, tag_items_data_item_type); - parse_ty_data(tp.data, cdata.cnum, tp.start, tcx, {|did| + parse_ty_data(tp.data, cdata.cnum, tp.start, tcx, |did| { translate_def_id(cdata, did) }) } @@ -157,17 +158,17 @@ fn item_type(item_id: ast::def_id, item: ebml::doc, fn item_impl_iface(item: ebml::doc, tcx: ty::ctxt, cdata: cmd) -> option<ty::t> { let mut result = none; - ebml::tagged_docs(item, tag_impl_iface) {|ity| + do ebml::tagged_docs(item, tag_impl_iface) |ity| { result = some(doc_type(ity, tcx, cdata)); }; result } fn item_ty_param_bounds(item: ebml::doc, tcx: ty::ctxt, cdata: cmd) - -> @[ty::param_bounds]/~ { - let mut bounds = []/~; - ebml::tagged_docs(item, tag_items_data_item_ty_param_bounds) {|p| - let bd = parse_bounds_data(p.data, p.start, cdata.cnum, tcx, {|did| + -> @~[ty::param_bounds] { + let mut bounds = ~[]; + do ebml::tagged_docs(item, tag_items_data_item_ty_param_bounds) |p| { + let bd = parse_bounds_data(p.data, p.start, cdata.cnum, tcx, |did| { translate_def_id(cdata, did) }); vec::push(bounds, bd); @@ -190,14 +191,14 @@ fn item_ty_region_param(item: ebml::doc) -> ast::region_param { fn item_ty_param_count(item: ebml::doc) -> uint { let mut n = 0u; ebml::tagged_docs(item, tag_items_data_item_ty_param_bounds, - {|_p| n += 1u; }); + |_p| n += 1u ); n } -fn enum_variant_ids(item: ebml::doc, cdata: cmd) -> [ast::def_id]/~ { - let mut ids: [ast::def_id]/~ = []/~; +fn enum_variant_ids(item: ebml::doc, cdata: cmd) -> ~[ast::def_id] { + let mut ids: ~[ast::def_id] = ~[]; let v = tag_items_data_item_variant; - ebml::tagged_docs(item, v) {|p| + do ebml::tagged_docs(item, v) |p| { let ext = parse_def_id(ebml::doc_data(p)); vec::push(ids, {crate: cdata.cnum, node: ext.node}); }; @@ -206,17 +207,17 @@ fn enum_variant_ids(item: ebml::doc, cdata: cmd) -> [ast::def_id]/~ { // Given a path and serialized crate metadata, returns the IDs of the // definitions the path may refer to. -fn resolve_path(path: [ast::ident]/~, data: @[u8]/~) -> [ast::def_id]/~ { - fn eq_item(data: [u8]/~, s: str) -> bool { +fn resolve_path(path: ~[ast::ident], data: @~[u8]) -> ~[ast::def_id] { + fn eq_item(data: ~[u8], s: str) -> bool { ret str::eq(str::from_bytes(data), s); } let s = ast_util::path_name_i(path); let md = ebml::doc(data); let paths = ebml::get_doc(md, tag_paths); - let eqer = {|a|eq_item(a, s)}; - let mut result: [ast::def_id]/~ = []/~; + let eqer = |a| eq_item(a, s); + let mut result: ~[ast::def_id] = ~[]; #debug("resolve_path: looking up %s", s); - for lookup_hash(paths, eqer, hash_path(s)).each {|doc| + for lookup_hash(paths, eqer, hash_path(s)).each |doc| { let did_doc = ebml::get_doc(doc, tag_def_id); vec::push(result, parse_def_id(ebml::doc_data(did_doc))); } @@ -229,10 +230,10 @@ fn item_path(item_doc: ebml::doc) -> ast_map::path { let len_doc = ebml::get_doc(path_doc, tag_path_len); let len = ebml::doc_as_u32(len_doc) as uint; - let mut result = []/~; + let mut result = ~[]; vec::reserve(result, len); - ebml::docs(path_doc) {|tag, elt_doc| + do ebml::docs(path_doc) |tag, elt_doc| { if tag == tag_path_elt_mod { let str = ebml::doc_as_str(elt_doc); vec::push(result, ast_map::path_mod(@str)); @@ -252,11 +253,11 @@ fn item_name(item: ebml::doc) -> ast::ident { @str::from_bytes(ebml::doc_data(name)) } -fn lookup_item_name(data: @[u8]/~, id: ast::node_id) -> ast::ident { +fn lookup_item_name(data: @~[u8], id: ast::node_id) -> ast::ident { item_name(lookup_item(id, data)) } -fn lookup_def(cnum: ast::crate_num, data: @[u8]/~, did_: ast::def_id) -> +fn lookup_def(cnum: ast::crate_num, data: @~[u8], did_: ast::def_id) -> ast::def { let item = lookup_item(did_.node, data); let fam_ch = item_family(item); @@ -288,12 +289,12 @@ fn get_type(cdata: cmd, id: ast::node_id, tcx: ty::ctxt) let t = item_type({crate: cdata.cnum, node: id}, item, tcx, cdata); let tp_bounds = if family_has_type_params(item_family(item)) { item_ty_param_bounds(item, tcx, cdata) - } else { @[]/~ }; + } else { @~[] }; let rp = item_ty_region_param(item); ret {bounds: tp_bounds, rp: rp, ty: t}; } -fn get_type_param_count(data: @[u8]/~, id: ast::node_id) -> uint { +fn get_type_param_count(data: @~[u8], id: ast::node_id) -> uint { item_ty_param_count(lookup_item(id, data)) } @@ -306,7 +307,7 @@ fn get_impl_method(cdata: cmd, id: ast::node_id, name: ast::ident) -> ast::def_id { let items = ebml::get_doc(ebml::doc(cdata.data), tag_items); let mut found = none; - ebml::tagged_docs(find_item(id, items), tag_item_impl_method) {|mid| + do ebml::tagged_docs(find_item(id, items), tag_item_impl_method) |mid| { let m_did = parse_def_id(ebml::doc_data(mid)); if item_name(find_item(m_did.node, items)) == name { found = some(translate_def_id(cdata, m_did)); @@ -323,7 +324,7 @@ fn get_class_method(cdata: cmd, id: ast::node_id, some(it) { it } none { fail (#fmt("get_class_method: class id not found \ when looking up method %s", *name)) }}; - ebml::tagged_docs(cls_items, tag_item_iface_method) {|mid| + do ebml::tagged_docs(cls_items, tag_item_iface_method) |mid| { let m_did = class_member_id(mid, cdata); if item_name(mid) == name { found = some(m_did); @@ -343,7 +344,7 @@ fn class_dtor(cdata: cmd, id: ast::node_id) -> option<ast::def_id> { none { fail (#fmt("class_dtor: class id not found \ when looking up dtor for %d", id)); } }; - ebml::tagged_docs(cls_items, tag_item_dtor) {|doc| + do ebml::tagged_docs(cls_items, tag_item_dtor) |doc| { let doc1 = ebml::get_doc(doc, tag_def_id); let did = parse_def_id(ebml::doc_data(doc1)); found = some(translate_def_id(cdata, did)); @@ -351,7 +352,7 @@ fn class_dtor(cdata: cmd, id: ast::node_id) -> option<ast::def_id> { found } -fn get_symbol(data: @[u8]/~, id: ast::node_id) -> str { +fn get_symbol(data: @~[u8], id: ast::node_id) -> str { ret item_symbol(lookup_item(id, data)); } @@ -392,22 +393,22 @@ fn maybe_get_item_ast(cdata: cmd, tcx: ty::ctxt, } fn get_enum_variants(cdata: cmd, id: ast::node_id, tcx: ty::ctxt) - -> [ty::variant_info]/~ { + -> ~[ty::variant_info] { let data = cdata.data; let items = ebml::get_doc(ebml::doc(data), tag_items); let item = find_item(id, items); - let mut infos: [ty::variant_info]/~ = []/~; + let mut infos: ~[ty::variant_info] = ~[]; let variant_ids = enum_variant_ids(item, cdata); let mut disr_val = 0; - for variant_ids.each {|did| + for variant_ids.each |did| { let item = find_item(did.node, items); let ctor_ty = item_type({crate: cdata.cnum, node: id}, item, tcx, cdata); let name = item_name(item); - let mut arg_tys: [ty::t]/~ = []/~; + let mut arg_tys: ~[ty::t] = ~[]; alt ty::get(ctor_ty).struct { ty::ty_fn(f) { - for f.inputs.each {|a| vec::push(arg_tys, a.ty); } + for f.inputs.each |a| { vec::push(arg_tys, a.ty); } } _ { /* Nullary enum variant. */ } } @@ -424,12 +425,12 @@ fn get_enum_variants(cdata: cmd, id: ast::node_id, tcx: ty::ctxt) // NB: These types are duplicated in resolve.rs type method_info = {did: ast::def_id, n_tps: uint, ident: ast::ident}; -type _impl = {did: ast::def_id, ident: ast::ident, methods: [@method_info]/~}; +type _impl = {did: ast::def_id, ident: ast::ident, methods: ~[@method_info]}; fn item_impl_methods(cdata: cmd, item: ebml::doc, base_tps: uint) - -> [@method_info]/~ { - let mut rslt = []/~; - ebml::tagged_docs(item, tag_item_impl_method) {|doc| + -> ~[@method_info] { + let mut rslt = ~[]; + do ebml::tagged_docs(item, tag_item_impl_method) |doc| { let m_did = parse_def_id(ebml::doc_data(doc)); let mth_item = lookup_item(m_did.node, cdata.data); vec::push(rslt, @{did: translate_def_id(cdata, m_did), @@ -443,11 +444,11 @@ fn item_impl_methods(cdata: cmd, item: ebml::doc, base_tps: uint) fn get_impls_for_mod(cdata: cmd, m_id: ast::node_id, name: option<ast::ident>, get_cdata: fn(ast::crate_num) -> cmd) - -> @[@_impl]/~ { + -> @~[@_impl] { let data = cdata.data; let mod_item = lookup_item(m_id, data); - let mut result = []/~; - ebml::tagged_docs(mod_item, tag_mod_impl) {|doc| + let mut result = ~[]; + do ebml::tagged_docs(mod_item, tag_mod_impl) |doc| { let did = parse_def_id(ebml::doc_data(doc)); let local_did = translate_def_id(cdata, did); // The impl may be defined in a different crate. Ask the caller @@ -469,11 +470,11 @@ fn get_impls_for_mod(cdata: cmd, m_id: ast::node_id, /* Works for both classes and ifaces */ fn get_iface_methods(cdata: cmd, id: ast::node_id, tcx: ty::ctxt) - -> @[ty::method]/~ { + -> @~[ty::method] { let data = cdata.data; let item = lookup_item(id, data); - let mut result = []/~; - ebml::tagged_docs(item, tag_item_iface_method) {|mth| + let mut result = ~[]; + do ebml::tagged_docs(item, tag_item_iface_method) |mth| { let bounds = item_ty_param_bounds(mth, tcx, cdata); let name = item_name(mth); let ty = doc_type(mth, tcx, cdata); @@ -494,11 +495,11 @@ fn get_iface_methods(cdata: cmd, id: ast::node_id, tcx: ty::ctxt) // Helper function that gets either fields or methods fn get_class_members(cdata: cmd, id: ast::node_id, - p: fn(char) -> bool) -> [ty::field_ty]/~ { + p: fn(char) -> bool) -> ~[ty::field_ty] { let data = cdata.data; let item = lookup_item(id, data); - let mut result = []/~; - ebml::tagged_docs(item, tag_item_field) {|an_item| + let mut result = ~[]; + do ebml::tagged_docs(item, tag_item_field) |an_item| { let f = item_family(an_item); if p(f) { let name = item_name(an_item); @@ -519,8 +520,8 @@ pure fn family_to_visibility(family: char) -> ast::visibility { } /* 'g' for public field, 'j' for private field */ -fn get_class_fields(cdata: cmd, id: ast::node_id) -> [ty::field_ty]/~ { - get_class_members(cdata, id, {|f| f == 'g' || f == 'j'}) +fn get_class_fields(cdata: cmd, id: ast::node_id) -> ~[ty::field_ty] { + get_class_members(cdata, id, |f| f == 'g' || f == 'j') } fn family_has_type_params(fam_ch: char) -> bool { @@ -576,14 +577,14 @@ fn item_family_to_str(fam: char) -> str { } } -fn get_meta_items(md: ebml::doc) -> [@ast::meta_item]/~ { - let mut items: [@ast::meta_item]/~ = []/~; - ebml::tagged_docs(md, tag_meta_item_word) {|meta_item_doc| +fn get_meta_items(md: ebml::doc) -> ~[@ast::meta_item] { + let mut items: ~[@ast::meta_item] = ~[]; + do ebml::tagged_docs(md, tag_meta_item_word) |meta_item_doc| { let nd = ebml::get_doc(meta_item_doc, tag_meta_item_name); let n = str::from_bytes(ebml::doc_data(nd)); vec::push(items, attr::mk_word_item(@n)); }; - ebml::tagged_docs(md, tag_meta_item_name_value) {|meta_item_doc| + do ebml::tagged_docs(md, tag_meta_item_name_value) |meta_item_doc| { let nd = ebml::get_doc(meta_item_doc, tag_meta_item_name); let vd = ebml::get_doc(meta_item_doc, tag_meta_item_value); let n = str::from_bytes(ebml::doc_data(nd)); @@ -592,7 +593,7 @@ fn get_meta_items(md: ebml::doc) -> [@ast::meta_item]/~ { // but currently the encoder just drops them vec::push(items, attr::mk_name_value_item_str(@n, v)); }; - ebml::tagged_docs(md, tag_meta_item_list) {|meta_item_doc| + do ebml::tagged_docs(md, tag_meta_item_list) |meta_item_doc| { let nd = ebml::get_doc(meta_item_doc, tag_meta_item_name); let n = str::from_bytes(ebml::doc_data(nd)); let subitems = get_meta_items(meta_item_doc); @@ -601,11 +602,11 @@ fn get_meta_items(md: ebml::doc) -> [@ast::meta_item]/~ { ret items; } -fn get_attributes(md: ebml::doc) -> [ast::attribute]/~ { - let mut attrs: [ast::attribute]/~ = []/~; +fn get_attributes(md: ebml::doc) -> ~[ast::attribute] { + let mut attrs: ~[ast::attribute] = ~[]; alt ebml::maybe_get_doc(md, tag_attributes) { option::some(attrs_d) { - ebml::tagged_docs(attrs_d, tag_attribute) {|attr_doc| + do ebml::tagged_docs(attrs_d, tag_attribute) |attr_doc| { let meta_items = get_meta_items(attr_doc); // Currently it's only possible to have a single meta item on // an attribute @@ -623,7 +624,7 @@ fn get_attributes(md: ebml::doc) -> [ast::attribute]/~ { } fn list_meta_items(meta_items: ebml::doc, out: io::writer) { - for get_meta_items(meta_items).each {|mi| + for get_meta_items(meta_items).each |mi| { out.write_str(#fmt["%s\n", pprust::meta_item_to_str(*mi)]); } } @@ -631,29 +632,29 @@ fn list_meta_items(meta_items: ebml::doc, out: io::writer) { fn list_crate_attributes(md: ebml::doc, hash: @str, out: io::writer) { out.write_str(#fmt("=Crate Attributes (%s)=\n", *hash)); - for get_attributes(md).each {|attr| + for get_attributes(md).each |attr| { out.write_str(#fmt["%s\n", pprust::attribute_to_str(attr)]); } out.write_str("\n\n"); } -fn get_crate_attributes(data: @[u8]/~) -> [ast::attribute]/~ { +fn get_crate_attributes(data: @~[u8]) -> ~[ast::attribute] { ret get_attributes(ebml::doc(data)); } type crate_dep = {cnum: ast::crate_num, name: ast::ident, vers: @str, hash: @str}; -fn get_crate_deps(data: @[u8]/~) -> [crate_dep]/~ { - let mut deps: [crate_dep]/~ = []/~; +fn get_crate_deps(data: @~[u8]) -> ~[crate_dep] { + let mut deps: ~[crate_dep] = ~[]; let cratedoc = ebml::doc(data); let depsdoc = ebml::get_doc(cratedoc, tag_crate_deps); let mut crate_num = 1; fn docstr(doc: ebml::doc, tag_: uint) -> str { str::from_bytes(ebml::doc_data(ebml::get_doc(doc, tag_))) } - ebml::tagged_docs(depsdoc, tag_crate_dep) {|depdoc| + do ebml::tagged_docs(depsdoc, tag_crate_dep) |depdoc| { vec::push(deps, {cnum: crate_num, name: @docstr(depdoc, tag_crate_dep_name), vers: @docstr(depdoc, tag_crate_dep_vers), @@ -663,10 +664,10 @@ fn get_crate_deps(data: @[u8]/~) -> [crate_dep]/~ { ret deps; } -fn list_crate_deps(data: @[u8]/~, out: io::writer) { +fn list_crate_deps(data: @~[u8], out: io::writer) { out.write_str("=External Dependencies=\n"); - for get_crate_deps(data).each {|dep| + for get_crate_deps(data).each |dep| { out.write_str(#fmt["%d %s-%s-%s\n", dep.cnum, *dep.name, *dep.hash, *dep.vers]); } @@ -674,13 +675,13 @@ fn list_crate_deps(data: @[u8]/~, out: io::writer) { out.write_str("\n"); } -fn get_crate_hash(data: @[u8]/~) -> @str { +fn get_crate_hash(data: @~[u8]) -> @str { let cratedoc = ebml::doc(data); let hashdoc = ebml::get_doc(cratedoc, tag_crate_hash); ret @str::from_bytes(ebml::doc_data(hashdoc)); } -fn get_crate_vers(data: @[u8]/~) -> @str { +fn get_crate_vers(data: @~[u8]) -> @str { let attrs = decoder::get_crate_attributes(data); ret alt attr::last_meta_item_value_str_by_name( attr::find_linkage_metas(attrs), "vers") { @@ -689,23 +690,23 @@ fn get_crate_vers(data: @[u8]/~) -> @str { }; } -fn list_crate_items(bytes: @[u8]/~, md: ebml::doc, out: io::writer) { +fn list_crate_items(bytes: @~[u8], md: ebml::doc, out: io::writer) { out.write_str("=Items=\n"); let items = ebml::get_doc(md, tag_items); - iter_crate_items(bytes) {|path, did| + do iter_crate_items(bytes) |path, did| { out.write_str(#fmt["%s (%s)\n", path, describe_def(items, did)]); } out.write_str("\n"); } -fn iter_crate_items(bytes: @[u8]/~, proc: fn(str, ast::def_id)) { +fn iter_crate_items(bytes: @~[u8], proc: fn(str, ast::def_id)) { let md = ebml::doc(bytes); let paths = ebml::get_doc(md, tag_paths); let index = ebml::get_doc(paths, tag_index); let bs = ebml::get_doc(index, tag_index_buckets); - ebml::tagged_docs(bs, tag_index_buckets_bucket) {|bucket| + do ebml::tagged_docs(bs, tag_index_buckets_bucket) |bucket| { let et = tag_index_buckets_bucket_elt; - ebml::tagged_docs(bucket, et) {|elt| + do ebml::tagged_docs(bucket, et) |elt| { let data = read_path(elt); let {tag:_, doc:def} = ebml::doc_at(bytes, data.pos); let did_doc = ebml::get_doc(def, tag_def_id); @@ -715,16 +716,16 @@ fn iter_crate_items(bytes: @[u8]/~, proc: fn(str, ast::def_id)) { }; } -fn get_crate_module_paths(bytes: @[u8]/~) -> [(ast::def_id, str)]/~ { +fn get_crate_module_paths(bytes: @~[u8]) -> ~[(ast::def_id, str)] { fn mod_of_path(p: str) -> str { str::connect(vec::init(str::split_str(p, "::")), "::") } // find all module (path, def_ids), which are not // fowarded path due to renamed import or reexport - let mut res = []/~; + let mut res = ~[]; let mods = map::str_hash(); - iter_crate_items(bytes) {|path, did| + do iter_crate_items(bytes) |path, did| { let m = mod_of_path(path); if str::is_not_empty(m) { // if m has a sub-item, it must be a module @@ -735,13 +736,13 @@ fn get_crate_module_paths(bytes: @[u8]/~) -> [(ast::def_id, str)]/~ { // unified later by using the mods map vec::push(res, (did, path)); } - ret vec::filter(res) {|x| + ret do vec::filter(res) |x| { let (_, xp) = x; mods.contains_key(xp) } } -fn list_crate_metadata(bytes: @[u8]/~, out: io::writer) { +fn list_crate_metadata(bytes: @~[u8], out: io::writer) { let hash = get_crate_hash(bytes); let md = ebml::doc(bytes); list_crate_attributes(md, hash, out); diff --git a/src/rustc/metadata/encoder.rs b/src/rustc/metadata/encoder.rs index 1e4521276d2..0c01414847d 100644 --- a/src/rustc/metadata/encoder.rs +++ b/src/rustc/metadata/encoder.rs @@ -43,8 +43,8 @@ type encode_parms = { diag: span_handler, tcx: ty::ctxt, reachable: hashmap<ast::node_id, ()>, - reexports: [(str, def_id)]/~, - impl_map: fn@(ast::node_id) -> [(ident, def_id)]/~, + reexports: ~[(str, def_id)], + impl_map: fn@(ast::node_id) -> ~[(ident, def_id)], item_symbols: hashmap<ast::node_id, str>, discrim_symbols: hashmap<ast::node_id, str>, link_meta: link_meta, @@ -56,8 +56,8 @@ enum encode_ctxt = { diag: span_handler, tcx: ty::ctxt, reachable: hashmap<ast::node_id, ()>, - reexports: [(str, def_id)]/~, - impl_map: fn@(ast::node_id) -> [(ident, def_id)]/~, + reexports: ~[(str, def_id)], + impl_map: fn@(ast::node_id) -> ~[(ident, def_id)], item_symbols: hashmap<ast::node_id, str>, discrim_symbols: hashmap<ast::node_id, str>, link_meta: link_meta, @@ -87,41 +87,41 @@ fn encode_name_and_def_id(ebml_w: ebml::writer, nm: ident, } fn encode_region_param(ebml_w: ebml::writer, rp: region_param) { - ebml_w.wr_tag(tag_region_param) {|| + do ebml_w.wr_tag(tag_region_param) || { serialize_region_param(ebml_w, rp) } } fn encode_named_def_id(ebml_w: ebml::writer, name: ident, id: def_id) { - ebml_w.wr_tag(tag_paths_data_item) {|| + do ebml_w.wr_tag(tag_paths_data_item) || { encode_name(ebml_w, name); encode_def_id(ebml_w, id); } } fn encode_mutability(ebml_w: ebml::writer, mt: class_mutability) { - ebml_w.wr_tag(tag_class_mut) {|| - ebml_w.writer.write([alt mt { class_immutable { 'i' } - class_mutable { 'm' } } as u8]/&); + do ebml_w.wr_tag(tag_class_mut) || { + ebml_w.writer.write(&[alt mt { class_immutable { 'i' } + class_mutable { 'm' } } as u8]); } } type entry<T> = {val: T, pos: uint}; -fn encode_enum_variant_paths(ebml_w: ebml::writer, variants: [variant]/~, - path: [ident]/~, &index: [entry<str>]/~) { - for variants.each {|variant| +fn encode_enum_variant_paths(ebml_w: ebml::writer, variants: ~[variant], + path: ~[ident], &index: ~[entry<str>]) { + for variants.each |variant| { add_to_index(ebml_w, path, index, variant.node.name); - ebml_w.wr_tag(tag_paths_data_item) {|| + do ebml_w.wr_tag(tag_paths_data_item) || { encode_name(ebml_w, variant.node.name); encode_def_id(ebml_w, local_def(variant.node.id)); } } } -fn add_to_index(ebml_w: ebml::writer, path: [ident]/&, &index: [entry<str>]/~, +fn add_to_index(ebml_w: ebml::writer, path: &[ident], &index: ~[entry<str>], name: ident) { - let mut full_path = []/~; + let mut full_path = ~[]; vec::push_all(full_path, path); vec::push(full_path, name); vec::push(index, {val: ast_util::path_name_i(full_path), @@ -129,16 +129,16 @@ fn add_to_index(ebml_w: ebml::writer, path: [ident]/&, &index: [entry<str>]/~, } fn encode_foreign_module_item_paths(ebml_w: ebml::writer, nmod: foreign_mod, - path: [ident]/~, &index: [entry<str>]/~) { - for nmod.items.each {|nitem| + path: ~[ident], &index: ~[entry<str>]) { + for nmod.items.each |nitem| { add_to_index(ebml_w, path, index, nitem.ident); encode_named_def_id(ebml_w, nitem.ident, local_def(nitem.id)); } } fn encode_class_item_paths(ebml_w: ebml::writer, - items: [@class_member]/~, path: [ident]/~, &index: [entry<str>]/~) { - for items.each {|it| + items: ~[@class_member], path: ~[ident], &index: ~[entry<str>]) { + for items.each |it| { alt ast_util::class_member_visibility(it) { private { cont; } public { @@ -154,9 +154,9 @@ fn encode_class_item_paths(ebml_w: ebml::writer, } fn encode_module_item_paths(ebml_w: ebml::writer, ecx: @encode_ctxt, - module: _mod, path: [ident]/~, - &index: [entry<str>]/~) { - for module.items.each {|it| + module: _mod, path: ~[ident], + &index: ~[entry<str>]) { + for module.items.each |it| { if !reachable(ecx, it.id) || !ast_util::is_exported(it.ident, module) { cont; } if !ast_util::is_item_impl(it) { @@ -170,7 +170,7 @@ fn encode_module_item_paths(ebml_w: ebml::writer, ecx: @encode_ctxt, encode_named_def_id(ebml_w, it.ident, local_def(it.id)); } item_mod(_mod) { - ebml_w.wr_tag(tag_paths_data_mod) {|| + do ebml_w.wr_tag(tag_paths_data_mod) || { encode_name_and_def_id(ebml_w, it.ident, it.id); encode_module_item_paths(ebml_w, ecx, _mod, vec::append_one(path, it.ident), @@ -178,7 +178,7 @@ fn encode_module_item_paths(ebml_w: ebml::writer, ecx: @encode_ctxt, } } item_foreign_mod(nmod) { - ebml_w.wr_tag(tag_paths_data_mod) {|| + do ebml_w.wr_tag(tag_paths_data_mod) || { encode_name_and_def_id(ebml_w, it.ident, it.id); encode_foreign_module_item_paths( ebml_w, nmod, @@ -186,15 +186,15 @@ fn encode_module_item_paths(ebml_w: ebml::writer, ecx: @encode_ctxt, } } item_ty(_, tps, _) { - ebml_w.wr_tag(tag_paths_data_item) {|| + do ebml_w.wr_tag(tag_paths_data_item) || { encode_name_and_def_id(ebml_w, it.ident, it.id); } } item_class(_, _, items, ctor, m_dtor, _) { - ebml_w.wr_tag(tag_paths_data_item) {|| + do ebml_w.wr_tag(tag_paths_data_item) || { encode_name_and_def_id(ebml_w, it.ident, it.id); } - ebml_w.wr_tag(tag_paths) {|| + do ebml_w.wr_tag(tag_paths) || { // We add the same ident twice: for the // class and for its ctor add_to_index(ebml_w, path, index, it.ident); @@ -206,13 +206,13 @@ fn encode_module_item_paths(ebml_w: ebml::writer, ecx: @encode_ctxt, } } item_enum(variants, _, _) { - ebml_w.wr_tag(tag_paths_data_item) {|| + do ebml_w.wr_tag(tag_paths_data_item) || { encode_name_and_def_id(ebml_w, it.ident, it.id); } encode_enum_variant_paths(ebml_w, variants, path, index); } item_iface(*) { - ebml_w.wr_tag(tag_paths_data_item) {|| + do ebml_w.wr_tag(tag_paths_data_item) || { encode_name_and_def_id(ebml_w, it.ident, it.id); } } @@ -228,9 +228,9 @@ fn encode_iface_ref(ebml_w: ebml::writer, ecx: @encode_ctxt, t: @iface_ref) { } fn encode_item_paths(ebml_w: ebml::writer, ecx: @encode_ctxt, crate: @crate) - -> [entry<str>]/~ { - let mut index: [entry<str>]/~ = []/~; - let mut path: [ident]/~ = []/~; + -> ~[entry<str>] { + let mut index: ~[entry<str>] = ~[]; + let mut path: ~[ident] = ~[]; ebml_w.start_tag(tag_paths); encode_module_item_paths(ebml_w, ecx, crate.node.module, path, index); encode_reexport_paths(ebml_w, ecx, index); @@ -239,8 +239,8 @@ fn encode_item_paths(ebml_w: ebml::writer, ecx: @encode_ctxt, crate: @crate) } fn encode_reexport_paths(ebml_w: ebml::writer, - ecx: @encode_ctxt, &index: [entry<str>]/~) { - for ecx.reexports.each {|reexport| + ecx: @encode_ctxt, &index: ~[entry<str>]) { + for ecx.reexports.each |reexport| { let (path, def_id) = reexport; vec::push(index, {val: path, pos: ebml_w.writer.tell()}); ebml_w.start_tag(tag_paths_data_item); @@ -254,20 +254,20 @@ fn encode_reexport_paths(ebml_w: ebml::writer, // Item info table encoding fn encode_family(ebml_w: ebml::writer, c: char) { ebml_w.start_tag(tag_items_data_item_family); - ebml_w.writer.write([c as u8]/&); + ebml_w.writer.write(&[c as u8]); ebml_w.end_tag(); } fn def_to_str(did: def_id) -> str { ret #fmt["%d:%d", did.crate, did.node]; } fn encode_type_param_bounds(ebml_w: ebml::writer, ecx: @encode_ctxt, - params: [ty_param]/~) { + params: ~[ty_param]) { let ty_str_ctxt = @{diag: ecx.diag, ds: def_to_str, tcx: ecx.tcx, - reachable: {|a|reachable(ecx, a)}, + reachable: |a| reachable(ecx, a), abbrevs: tyencode::ac_use_abbrevs(ecx.type_abbrevs)}; - for params.each {|param| + for params.each |param| { ebml_w.start_tag(tag_items_data_item_ty_param_bounds); let bs = ecx.tcx.ty_param_bounds.get(param.id); tyencode::enc_bounds(ebml_w.writer, ty_str_ctxt, bs); @@ -286,7 +286,7 @@ fn write_type(ecx: @encode_ctxt, ebml_w: ebml::writer, typ: ty::t) { @{diag: ecx.diag, ds: def_to_str, tcx: ecx.tcx, - reachable: {|a|reachable(ecx, a)}, + reachable: |a| reachable(ecx, a), abbrevs: tyencode::ac_use_abbrevs(ecx.type_abbrevs)}; tyencode::enc_ty(ebml_w.writer, ty_str_ctxt, typ); } @@ -329,13 +329,13 @@ fn encode_parent_item(ebml_w: ebml::writer, id: def_id) { } fn encode_enum_variant_info(ecx: @encode_ctxt, ebml_w: ebml::writer, - id: node_id, variants: [variant]/~, - path: ast_map::path, index: @mut [entry<int>]/~, - ty_params: [ty_param]/~) { + id: node_id, variants: ~[variant], + path: ast_map::path, index: @mut ~[entry<int>], + ty_params: ~[ty_param]) { let mut disr_val = 0; let mut i = 0; let vi = ty::enum_variants(ecx.tcx, {crate: local_crate, node: id}); - for variants.each {|variant| + for variants.each |variant| { vec::push(*index, {val: variant.node.id, pos: ebml_w.writer.tell()}); ebml_w.start_tag(tag_items_data_item); encode_def_id(ebml_w, local_def(variant.node.id)); @@ -372,9 +372,9 @@ fn encode_path(ebml_w: ebml::writer, ebml_w.wr_tagged_str(tag, *name); } - ebml_w.wr_tag(tag_path) {|| + do ebml_w.wr_tag(tag_path) || { ebml_w.wr_tagged_u32(tag_path_len, (vec::len(path) + 1u) as u32); - vec::iter(path) {|pe| encode_path_elt(ebml_w, pe); } + do vec::iter(path) |pe| { encode_path_elt(ebml_w, pe); } encode_path_elt(ebml_w, name); } } @@ -386,7 +386,7 @@ fn encode_info_for_mod(ecx: @encode_ctxt, ebml_w: ebml::writer, md: _mod, encode_family(ebml_w, 'm'); encode_name(ebml_w, name); let impls = ecx.impl_map(id); - for impls.each {|i| + for impls.each |i| { let (ident, did) = i; if ast_util::is_exported(ident, md) { ebml_w.start_tag(tag_mod_impl); @@ -425,14 +425,14 @@ fn encode_visibility(ebml_w: ebml::writer, visibility: visibility) { /* Returns an index of items in this class */ fn encode_info_for_class(ecx: @encode_ctxt, ebml_w: ebml::writer, id: node_id, path: ast_map::path, - class_tps: [ty_param]/~, - items: [@class_member]/~, - global_index: @mut[entry<int>]/~) -> [entry<int>]/~ { + class_tps: ~[ty_param], + items: ~[@class_member], + global_index: @mut~[entry<int>]) -> ~[entry<int>] { /* Each class has its own index, since different classes may have fields with the same name */ - let index = @mut []/~; + let index = @mut ~[]; let tcx = ecx.tcx; - for items.each {|ci| + for items.each |ci| { /* We encode both private and public fields -- need to include private fields to get the offsets right */ alt ci.node { @@ -472,7 +472,7 @@ fn encode_info_for_class(ecx: @encode_ctxt, ebml_w: ebml::writer, fn encode_info_for_fn(ecx: @encode_ctxt, ebml_w: ebml::writer, id: node_id, ident: ident, path: ast_map::path, - item: option<inlined_item>, tps: [ty_param]/~, + item: option<inlined_item>, tps: ~[ty_param], decl: fn_decl) { ebml_w.start_tag(tag_items_data_item); encode_name(ebml_w, ident); @@ -498,7 +498,7 @@ fn encode_info_for_fn(ecx: @encode_ctxt, ebml_w: ebml::writer, fn encode_info_for_method(ecx: @encode_ctxt, ebml_w: ebml::writer, impl_path: ast_map::path, should_inline: bool, parent_id: node_id, - m: @method, all_tps: [ty_param]/~) { + m: @method, all_tps: ~[ty_param]) { #debug("encode_info_for_method: %d %s %u", m.id, *m.ident, all_tps.len()); ebml_w.start_tag(tag_items_data_item); encode_def_id(ebml_w, local_def(m.id)); @@ -527,7 +527,7 @@ fn purity_fn_family(p: purity) -> char { } -fn should_inline(attrs: [attribute]/~) -> bool { +fn should_inline(attrs: ~[attribute]) -> bool { alt attr::find_inline_attr(attrs) { attr::ia_none { false } attr::ia_hint | attr::ia_always { true } @@ -536,7 +536,7 @@ fn should_inline(attrs: [attribute]/~) -> bool { fn encode_info_for_item(ecx: @encode_ctxt, ebml_w: ebml::writer, item: @item, - index: @mut [entry<int>]/~, path: ast_map::path) { + index: @mut ~[entry<int>], path: ast_map::path) { let tcx = ecx.tcx; let must_write = @@ -544,10 +544,10 @@ fn encode_info_for_item(ecx: @encode_ctxt, ebml_w: ebml::writer, item: @item, if !must_write && !reachable(ecx, item.id) { ret; } fn add_to_index_(item: @item, ebml_w: ebml::writer, - index: @mut [entry<int>]/~) { + index: @mut ~[entry<int>]) { vec::push(*index, {val: item.id, pos: ebml_w.writer.tell()}); } - let add_to_index = {|copy ebml_w|add_to_index_(item, ebml_w, index)}; + let add_to_index = |copy ebml_w| add_to_index_(item, ebml_w, index); alt item.node { item_const(_, _) { @@ -602,13 +602,13 @@ fn encode_info_for_item(ecx: @encode_ctxt, ebml_w: ebml::writer, item: @item, } item_enum(variants, tps, rp) { add_to_index(); - ebml_w.wr_tag(tag_items_data_item) {|| + do ebml_w.wr_tag(tag_items_data_item) || { encode_def_id(ebml_w, local_def(item.id)); encode_family(ebml_w, 't'); encode_type_param_bounds(ebml_w, ecx, tps); encode_type(ecx, ebml_w, node_id_to_type(tcx, item.id)); encode_name(ebml_w, item.ident); - for variants.each {|v| + for variants.each |v| { encode_variant_id(ebml_w, local_def(v.node.id)); } ecx.encode_inlined_item(ecx, ebml_w, path, ii_item(item)); @@ -626,7 +626,7 @@ fn encode_info_for_item(ecx: @encode_ctxt, ebml_w: ebml::writer, item: @item, let idx = encode_info_for_class(ecx, ebml_w, item.id, path, tps, items, index); /* Encode the dtor */ - option::iter(m_dtor) {|dtor| + do option::iter(m_dtor) |dtor| { vec::push(*index, {val: dtor.node.id, pos: ebml_w.writer.tell()}); encode_info_for_fn(ecx, ebml_w, dtor.node.id, @(*item.ident + "_dtor"), path, if tps.len() > 0u { @@ -646,13 +646,13 @@ fn encode_info_for_item(ecx: @encode_ctxt, ebml_w: ebml::writer, item: @item, encode_name(ebml_w, item.ident); encode_path(ebml_w, path, ast_map::path_name(item.ident)); encode_region_param(ebml_w, rp); - for ifaces.each {|t| + for ifaces.each |t| { encode_iface_ref(ebml_w, ecx, t); } /* Encode the dtor */ /* Encode id for dtor */ - option::iter(m_dtor) {|dtor| - ebml_w.wr_tag(tag_item_dtor) {|| + do option::iter(m_dtor) |dtor| { + do ebml_w.wr_tag(tag_item_dtor) || { encode_def_id(ebml_w, local_def(dtor.node.id)); } }; @@ -661,14 +661,14 @@ fn encode_info_for_item(ecx: @encode_ctxt, ebml_w: ebml::writer, item: @item, for methods, write all the stuff get_iface_method needs to know*/ let (fs,ms) = ast_util::split_class_items(items); - for fs.each {|f| + for fs.each |f| { ebml_w.start_tag(tag_item_field); encode_visibility(ebml_w, f.vis); encode_name(ebml_w, f.ident); encode_def_id(ebml_w, local_def(f.id)); ebml_w.end_tag(); } - for ms.each {|m| + for ms.each |m| { alt m.vis { private { /* do nothing */ } public { @@ -703,12 +703,12 @@ fn encode_info_for_item(ecx: @encode_ctxt, ebml_w: ebml::writer, item: @item, encode_type_param_bounds(ebml_w, ecx, tps); encode_type(ecx, ebml_w, node_id_to_type(tcx, item.id)); encode_name(ebml_w, item.ident); - for methods.each {|m| + for methods.each |m| { ebml_w.start_tag(tag_item_impl_method); ebml_w.writer.write(str::bytes(def_to_str(local_def(m.id)))); ebml_w.end_tag(); } - option::iter(ifce) {|t| + do option::iter(ifce) |t| { encode_iface_ref(ebml_w, ecx, t) }; encode_path(ebml_w, path, ast_map::path_name(item.ident)); @@ -716,7 +716,7 @@ fn encode_info_for_item(ecx: @encode_ctxt, ebml_w: ebml::writer, item: @item, let impl_path = vec::append_one(path, ast_map::path_name(item.ident)); - for methods.each {|m| + for methods.each |m| { vec::push(*index, {val: m.id, pos: ebml_w.writer.tell()}); encode_info_for_method(ecx, ebml_w, impl_path, should_inline(m.attrs), item.id, m, @@ -733,7 +733,7 @@ fn encode_info_for_item(ecx: @encode_ctxt, ebml_w: ebml::writer, item: @item, encode_type(ecx, ebml_w, node_id_to_type(tcx, item.id)); encode_name(ebml_w, item.ident); let mut i = 0u; - for vec::each(*ty::iface_methods(tcx, local_def(item.id))) {|mty| + for vec::each(*ty::iface_methods(tcx, local_def(item.id))) |mty| { ebml_w.start_tag(tag_item_iface_method); encode_name(ebml_w, mty.ident); encode_type_param_bounds(ebml_w, ecx, ms[i].tps); @@ -750,7 +750,7 @@ fn encode_info_for_item(ecx: @encode_ctxt, ebml_w: ebml::writer, item: @item, fn encode_info_for_foreign_item(ecx: @encode_ctxt, ebml_w: ebml::writer, nitem: @foreign_item, - index: @mut [entry<int>]/~, + index: @mut ~[entry<int>], path: ast_map::path, abi: foreign_abi) { if !reachable(ecx, nitem.id) { ret; } vec::push(*index, {val: nitem.id, pos: ebml_w.writer.tell()}); @@ -775,15 +775,15 @@ fn encode_info_for_foreign_item(ecx: @encode_ctxt, ebml_w: ebml::writer, } fn encode_info_for_items(ecx: @encode_ctxt, ebml_w: ebml::writer, - crate: @crate) -> [entry<int>]/~ { - let index = @mut []/~; + crate: @crate) -> ~[entry<int>] { + let index = @mut ~[]; ebml_w.start_tag(tag_items_data); vec::push(*index, {val: crate_node_id, pos: ebml_w.writer.tell()}); encode_info_for_mod(ecx, ebml_w, crate.node.module, - crate_node_id, []/~, @""); + crate_node_id, ~[], @""); visit::visit_crate(*crate, (), visit::mk_vt(@{ - visit_expr: {|_e, _cx, _v|}, - visit_item: {|i, cx, v, copy ebml_w| + visit_expr: |_e, _cx, _v| { }, + visit_item: |i, cx, v, copy ebml_w| { visit::visit_item(i, cx, v); alt check ecx.tcx.items.get(i.id) { ast_map::node_item(_, pt) { @@ -806,7 +806,7 @@ fn encode_info_for_items(ecx: @encode_ctxt, ebml_w: ebml::writer, } } }, - visit_foreign_item: {|ni, cx, v, copy ebml_w| + visit_foreign_item: |ni, cx, v, copy ebml_w| { visit::visit_foreign_item(ni, cx, v); alt check ecx.tcx.items.get(ni.id) { ast_map::node_foreign_item(_, abi, pt) { @@ -824,32 +824,32 @@ fn encode_info_for_items(ecx: @encode_ctxt, ebml_w: ebml::writer, // Path and definition ID indexing -fn create_index<T: copy>(index: [entry<T>]/~, hash_fn: fn@(T) -> uint) -> - [@[entry<T>]/~]/~ { - let mut buckets: [@mut [entry<T>]/~]/~ = []/~; - for uint::range(0u, 256u) {|_i| vec::push(buckets, @mut []/~); }; - for index.each {|elt| +fn create_index<T: copy>(index: ~[entry<T>], hash_fn: fn@(T) -> uint) -> + ~[@~[entry<T>]] { + let mut buckets: ~[@mut ~[entry<T>]] = ~[]; + for uint::range(0u, 256u) |_i| { vec::push(buckets, @mut ~[]); }; + for index.each |elt| { let h = hash_fn(elt.val); vec::push(*buckets[h % 256u], elt); } - let mut buckets_frozen = []/~; - for buckets.each {|bucket| + let mut buckets_frozen = ~[]; + for buckets.each |bucket| { vec::push(buckets_frozen, @*bucket); } ret buckets_frozen; } -fn encode_index<T>(ebml_w: ebml::writer, buckets: [@[entry<T>]/~]/~, +fn encode_index<T>(ebml_w: ebml::writer, buckets: ~[@~[entry<T>]], write_fn: fn(io::writer, T)) { let writer = ebml_w.writer; ebml_w.start_tag(tag_index); - let mut bucket_locs: [uint]/~ = []/~; + let mut bucket_locs: ~[uint] = ~[]; ebml_w.start_tag(tag_index_buckets); - for buckets.each {|bucket| + for buckets.each |bucket| { vec::push(bucket_locs, ebml_w.writer.tell()); ebml_w.start_tag(tag_index_buckets_bucket); - for vec::each(*bucket) {|elt| + for vec::each(*bucket) |elt| { ebml_w.start_tag(tag_index_buckets_bucket_elt); writer.write_be_uint(elt.pos, 4u); write_fn(writer, elt.val); @@ -859,7 +859,7 @@ fn encode_index<T>(ebml_w: ebml::writer, buckets: [@[entry<T>]/~]/~, } ebml_w.end_tag(); ebml_w.start_tag(tag_index_table); - for bucket_locs.each {|pos| writer.write_be_uint(pos, 4u); } + for bucket_locs.each |pos| { writer.write_be_uint(pos, 4u); } ebml_w.end_tag(); ebml_w.end_tag(); } @@ -899,7 +899,7 @@ fn encode_meta_item(ebml_w: ebml::writer, mi: meta_item) { ebml_w.start_tag(tag_meta_item_name); ebml_w.writer.write(str::bytes(*name)); ebml_w.end_tag(); - for items.each {|inner_item| + for items.each |inner_item| { encode_meta_item(ebml_w, *inner_item); } ebml_w.end_tag(); @@ -907,9 +907,9 @@ fn encode_meta_item(ebml_w: ebml::writer, mi: meta_item) { } } -fn encode_attributes(ebml_w: ebml::writer, attrs: [attribute]/~) { +fn encode_attributes(ebml_w: ebml::writer, attrs: ~[attribute]) { ebml_w.start_tag(tag_attributes); - for attrs.each {|attr| + for attrs.each |attr| { ebml_w.start_tag(tag_attribute); encode_meta_item(ebml_w, attr.node.value); ebml_w.end_tag(); @@ -921,9 +921,9 @@ fn encode_attributes(ebml_w: ebml::writer, attrs: [attribute]/~) { // metadata that Rust cares about for linking crates. This attribute requires // 'name' and 'vers' items, so if the user didn't provide them we will throw // them in anyway with default values. -fn synthesize_crate_attrs(ecx: @encode_ctxt, crate: @crate) -> [attribute]/~ { +fn synthesize_crate_attrs(ecx: @encode_ctxt, crate: @crate) -> ~[attribute] { - fn synthesize_link_attr(ecx: @encode_ctxt, items: [@meta_item]/~) -> + fn synthesize_link_attr(ecx: @encode_ctxt, items: ~[@meta_item]) -> attribute { assert (*ecx.link_meta.name != ""); @@ -940,15 +940,15 @@ fn synthesize_crate_attrs(ecx: @encode_ctxt, crate: @crate) -> [attribute]/~ { attr::remove_meta_items_by_name(tmp, @"vers") }; - let meta_items = vec::append([name_item, vers_item]/~, other_items); + let meta_items = vec::append(~[name_item, vers_item], other_items); let link_item = attr::mk_list_item(@"link", meta_items); ret attr::mk_attr(link_item); } - let mut attrs: [attribute]/~ = []/~; + let mut attrs: ~[attribute] = ~[]; let mut found_link_attr = false; - for crate.node.attrs.each {|attr| + for crate.node.attrs.each |attr| { vec::push( attrs, if *attr::get_attr_name(attr) != "link" { @@ -964,20 +964,20 @@ fn synthesize_crate_attrs(ecx: @encode_ctxt, crate: @crate) -> [attribute]/~ { }); } - if !found_link_attr { vec::push(attrs, synthesize_link_attr(ecx, []/~)); } + if !found_link_attr { vec::push(attrs, synthesize_link_attr(ecx, ~[])); } ret attrs; } fn encode_crate_deps(ebml_w: ebml::writer, cstore: cstore::cstore) { - fn get_ordered_deps(cstore: cstore::cstore) -> [decoder::crate_dep]/~ { + fn get_ordered_deps(cstore: cstore::cstore) -> ~[decoder::crate_dep] { type hashkv = @{key: crate_num, val: cstore::crate_metadata}; type numdep = decoder::crate_dep; // Pull the cnums and name,vers,hash out of cstore - let mut deps: [mut numdep]/~ = [mut]/~; - cstore::iter_crate_data(cstore) {|key, val| + let mut deps: ~[mut numdep] = ~[mut]; + do cstore::iter_crate_data(cstore) |key, val| { let dep = {cnum: key, name: @val.name, vers: decoder::get_crate_vers(val.data), hash: decoder::get_crate_hash(val.data)}; @@ -990,7 +990,7 @@ fn encode_crate_deps(ebml_w: ebml::writer, cstore: cstore::cstore) { // Sanity-check the crate numbers let mut expected_cnum = 1; - for deps.each {|n| + for deps.each |n| { assert (n.cnum == expected_cnum); expected_cnum += 1; } @@ -1004,7 +1004,7 @@ fn encode_crate_deps(ebml_w: ebml::writer, cstore: cstore::cstore) { // FIXME (#2166): This is not nearly enough to support correct versioning // but is enough to get transitive crate dependencies working. ebml_w.start_tag(tag_crate_deps); - for get_ordered_deps(cstore).each {|dep| + for get_ordered_deps(cstore).each |dep| { encode_crate_dep(ebml_w, dep); } ebml_w.end_tag(); @@ -1030,7 +1030,7 @@ fn encode_hash(ebml_w: ebml::writer, hash: str) { ebml_w.end_tag(); } -fn encode_metadata(parms: encode_parms, crate: @crate) -> [u8]/~ { +fn encode_metadata(parms: encode_parms, crate: @crate) -> ~[u8] { let ecx: @encode_ctxt = @encode_ctxt({ diag: parms.diag, tcx: parms.tcx, @@ -1072,7 +1072,7 @@ fn encode_metadata(parms: encode_parms, crate: @crate) -> [u8]/~ { // Pad this, since something (LLVM, presumably) is cutting off the // remaining % 4 bytes. - buf_w.write([0u8, 0u8, 0u8, 0u8]/&); + buf_w.write(&[0u8, 0u8, 0u8, 0u8]); io::mem_buffer_buf(buf) } @@ -1081,7 +1081,7 @@ fn encoded_ty(tcx: ty::ctxt, t: ty::t) -> str { let cx = @{diag: tcx.diag, ds: def_to_str, tcx: tcx, - reachable: {|_id| false}, + reachable: |_id| false, abbrevs: tyencode::ac_no_abbrevs}; let buf = io::mem_buffer(); tyencode::enc_ty(io::mem_buffer_writer(buf), cx, t); diff --git a/src/rustc/metadata/filesearch.rs b/src/rustc/metadata/filesearch.rs index 9d382467214..ab42afb1edd 100644 --- a/src/rustc/metadata/filesearch.rs +++ b/src/rustc/metadata/filesearch.rs @@ -25,20 +25,20 @@ fn pick_file(file: path, path: path) -> option<path> { iface filesearch { fn sysroot() -> path; - fn lib_search_paths() -> [path]/~; + fn lib_search_paths() -> ~[path]; fn get_target_lib_path() -> path; fn get_target_lib_file_path(file: path) -> path; } fn mk_filesearch(maybe_sysroot: option<path>, target_triple: str, - addl_lib_search_paths: [path]/~) -> filesearch { + addl_lib_search_paths: ~[path]) -> filesearch { type filesearch_impl = {sysroot: path, - addl_lib_search_paths: [path]/~, + addl_lib_search_paths: ~[path], target_triple: str}; impl of filesearch for filesearch_impl { fn sysroot() -> path { self.sysroot } - fn lib_search_paths() -> [path]/~ { + fn lib_search_paths() -> ~[path] { let mut paths = self.addl_lib_search_paths; vec::push(paths, @@ -70,9 +70,9 @@ fn mk_filesearch(maybe_sysroot: option<path>, fn search<T: copy>(filesearch: filesearch, pick: pick<T>) -> option<T> { let mut rslt = none; - for filesearch.lib_search_paths().each {|lib_search_path| + for filesearch.lib_search_paths().each |lib_search_path| { #debug("searching %s", lib_search_path); - for os::list_dir_path(lib_search_path).each {|path| + for os::list_dir_path(lib_search_path).each |path| { #debug("testing %s", path); let maybe_picked = pick(path); if option::is_some(maybe_picked) { @@ -88,13 +88,13 @@ fn search<T: copy>(filesearch: filesearch, pick: pick<T>) -> option<T> { ret rslt; } -fn relative_target_lib_path(target_triple: str) -> [path]/~ { - [libdir(), "rustc", target_triple, libdir()]/~ +fn relative_target_lib_path(target_triple: str) -> ~[path] { + ~[libdir(), "rustc", target_triple, libdir()] } fn make_target_lib_path(sysroot: path, target_triple: str) -> path { - let path = vec::append([sysroot]/~, + let path = vec::append(~[sysroot], relative_target_lib_path(target_triple)); let path = path::connect_many(path); ret path; @@ -117,7 +117,7 @@ fn get_sysroot(maybe_sysroot: option<path>) -> path { } fn get_cargo_sysroot() -> result<path, str> { - let path = [get_default_sysroot(), libdir(), "cargo"]/~; + let path = ~[get_default_sysroot(), libdir(), "cargo"]; result::ok(path::connect_many(path)) } @@ -134,7 +134,7 @@ fn get_cargo_root() -> result<path, str> { } fn get_cargo_root_nearest() -> result<path, str> { - result::chain(get_cargo_root()) { |p| + do result::chain(get_cargo_root()) |p| { let cwd = os::getcwd(); let mut dirname = path::dirname(cwd); let mut dirpath = path::split(dirname); @@ -158,13 +158,13 @@ fn get_cargo_root_nearest() -> result<path, str> { } fn get_cargo_lib_path() -> result<path, str> { - result::chain(get_cargo_root()) { |p| + do result::chain(get_cargo_root()) |p| { result::ok(path::connect(p, libdir())) } } fn get_cargo_lib_path_nearest() -> result<path, str> { - result::chain(get_cargo_root_nearest()) { |p| + do result::chain(get_cargo_root_nearest()) |p| { result::ok(path::connect(p, libdir())) } } diff --git a/src/rustc/metadata/loader.rs b/src/rustc/metadata/loader.rs index ffa3716f4fb..9b9571de413 100644 --- a/src/rustc/metadata/loader.rs +++ b/src/rustc/metadata/loader.rs @@ -33,13 +33,13 @@ type ctxt = { filesearch: filesearch, span: span, ident: ast::ident, - metas: [@ast::meta_item]/~, + metas: ~[@ast::meta_item], hash: str, os: os, static: bool }; -fn load_library_crate(cx: ctxt) -> {ident: str, data: @[u8]/~} { +fn load_library_crate(cx: ctxt) -> {ident: str, data: @~[u8]} { alt find_library_crate(cx) { some(t) { ret t; } none { @@ -49,7 +49,7 @@ fn load_library_crate(cx: ctxt) -> {ident: str, data: @[u8]/~} { } } -fn find_library_crate(cx: ctxt) -> option<{ident: str, data: @[u8]/~}> { +fn find_library_crate(cx: ctxt) -> option<{ident: str, data: @~[u8]}> { attr::require_unique_names(cx.diag, cx.metas); find_library_crate_aux(cx, libname(cx), cx.filesearch) } @@ -67,13 +67,13 @@ fn libname(cx: ctxt) -> {prefix: str, suffix: str} { fn find_library_crate_aux(cx: ctxt, nn: {prefix: str, suffix: str}, filesearch: filesearch::filesearch) -> - option<{ident: str, data: @[u8]/~}> { + option<{ident: str, data: @~[u8]}> { let crate_name = crate_name_from_metas(cx.metas); let prefix: str = nn.prefix + *crate_name + "-"; let suffix: str = nn.suffix; - let mut matches = []/~; - filesearch::search(filesearch, { |path| + let mut matches = ~[]; + filesearch::search(filesearch, |path| { #debug("inspecting file %s", path); let f: str = path::basename(path); if !(str::starts_with(f, prefix) && str::ends_with(f, suffix)) { @@ -109,7 +109,7 @@ fn find_library_crate_aux(cx: ctxt, cx.diag.span_err( cx.span, #fmt("multiple matching crates for `%s`", *crate_name)); cx.diag.handler().note("candidates:"); - for matches.each {|match| + for matches.each |match| { cx.diag.handler().note(#fmt("path: %s", match.ident)); let attrs = decoder::get_crate_attributes(match.data); note_linkage_attrs(cx.diag, attrs); @@ -119,7 +119,7 @@ fn find_library_crate_aux(cx: ctxt, } } -fn crate_name_from_metas(metas: [@ast::meta_item]/~) -> @str { +fn crate_name_from_metas(metas: ~[@ast::meta_item]) -> @str { let name_items = attr::find_meta_items_by_name(metas, "name"); alt vec::last_opt(name_items) { some(i) { @@ -134,13 +134,13 @@ fn crate_name_from_metas(metas: [@ast::meta_item]/~) -> @str { } } -fn note_linkage_attrs(diag: span_handler, attrs: [ast::attribute]/~) { - for attr::find_linkage_attrs(attrs).each {|attr| +fn note_linkage_attrs(diag: span_handler, attrs: ~[ast::attribute]) { + for attr::find_linkage_attrs(attrs).each |attr| { diag.handler().note(#fmt("meta: %s", pprust::attr_to_str(attr))); } } -fn crate_matches(crate_data: @[u8]/~, metas: [@ast::meta_item]/~, +fn crate_matches(crate_data: @~[u8], metas: ~[@ast::meta_item], hash: str) -> bool { let attrs = decoder::get_crate_attributes(crate_data); let linkage_metas = attr::find_linkage_metas(attrs); @@ -151,18 +151,18 @@ fn crate_matches(crate_data: @[u8]/~, metas: [@ast::meta_item]/~, metadata_matches(linkage_metas, metas) } -fn metadata_matches(extern_metas: [@ast::meta_item]/~, - local_metas: [@ast::meta_item]/~) -> bool { +fn metadata_matches(extern_metas: ~[@ast::meta_item], + local_metas: ~[@ast::meta_item]) -> bool { #debug("matching %u metadata requirements against %u items", vec::len(local_metas), vec::len(extern_metas)); #debug("crate metadata:"); - for extern_metas.each {|have| + for extern_metas.each |have| { #debug(" %s", pprust::meta_item_to_str(*have)); } - for local_metas.each {|needed| + for local_metas.each |needed| { #debug("looking for %s", pprust::meta_item_to_str(*needed)); if !attr::contains(extern_metas, needed) { #debug("missing %s", pprust::meta_item_to_str(*needed)); @@ -173,14 +173,14 @@ fn metadata_matches(extern_metas: [@ast::meta_item]/~, } fn get_metadata_section(os: os, - filename: str) -> option<@[u8]/~> unsafe { - let mb = str::as_c_str(filename, {|buf| + filename: str) -> option<@~[u8]> unsafe { + let mb = str::as_c_str(filename, |buf| { llvm::LLVMRustCreateMemoryBufferWithContentsOfFile(buf) }); - if mb as int == 0 { ret option::none::<@[u8]/~>; } + if mb as int == 0 { ret option::none::<@~[u8]>; } let of = alt mk_object_file(mb) { option::some(of) { of } - _ { ret option::none::<@[u8]/~>; } + _ { ret option::none::<@~[u8]>; } }; let si = mk_section_iter(of.llof); while llvm::LLVMIsSectionIteratorAtEnd(of.llof, si.llsi) == False { @@ -196,7 +196,7 @@ fn get_metadata_section(os: os, } llvm::LLVMMoveToNextSection(si.llsi); } - ret option::none::<@[u8]/~>; + ret option::none::<@~[u8]>; } fn meta_section_name(os: os) -> str { diff --git a/src/rustc/metadata/tydecode.rs b/src/rustc/metadata/tydecode.rs index 57c340ad755..ed510dad3ce 100644 --- a/src/rustc/metadata/tydecode.rs +++ b/src/rustc/metadata/tydecode.rs @@ -17,7 +17,7 @@ export parse_bounds_data; // Callback to translate defs to strs or back: type conv_did = fn(ast::def_id) -> ast::def_id; -type pstate = {data: @[u8]/~, crate: int, mut pos: uint, tcx: ty::ctxt}; +type pstate = {data: @~[u8], crate: int, mut pos: uint, tcx: ty::ctxt}; fn peek(st: @pstate) -> char { st.data[st.pos] as char @@ -37,7 +37,7 @@ fn next_byte(st: @pstate) -> u8 { fn parse_ident(st: @pstate, last: char) -> ast::ident { fn is_last(b: char, c: char) -> bool { ret c == b; } - ret parse_ident_(st, {|a|is_last(last, a)}); + ret parse_ident_(st, |a| is_last(last, a) ); } fn parse_ident_(st: @pstate, is_last: fn@(char) -> bool) -> @@ -50,7 +50,7 @@ fn parse_ident_(st: @pstate, is_last: fn@(char) -> bool) -> } -fn parse_ty_data(data: @[u8]/~, crate_num: int, pos: uint, tcx: ty::ctxt, +fn parse_ty_data(data: @~[u8], crate_num: int, pos: uint, tcx: ty::ctxt, conv: conv_did) -> ty::t { let st = @{data: data, crate: crate_num, mut pos: pos, tcx: tcx}; parse_ty(st, conv) @@ -65,8 +65,8 @@ fn parse_ret_ty(st: @pstate, conv: conv_did) -> (ast::ret_style, ty::t) { fn parse_constrs_gen<T: copy>(st: @pstate, conv: conv_did, pser: fn(@pstate) - -> ast::constr_arg_general_<T>) -> [@ty::constr_general<T>]/~ { - let mut rslt: [@ty::constr_general<T>]/~ = []/~; + -> ast::constr_arg_general_<T>) -> ~[@ty::constr_general<T>] { + let mut rslt: ~[@ty::constr_general<T>] = ~[]; alt peek(st) { ':' { loop { @@ -80,16 +80,16 @@ fn parse_constrs_gen<T: copy>(st: @pstate, conv: conv_did, rslt } -fn parse_constrs(st: @pstate, conv: conv_did) -> [@ty::constr]/~ { +fn parse_constrs(st: @pstate, conv: conv_did) -> ~[@ty::constr] { parse_constrs_gen(st, conv, parse_constr_arg) } -fn parse_ty_constrs(st: @pstate, conv: conv_did) -> [@ty::type_constr]/~ { +fn parse_ty_constrs(st: @pstate, conv: conv_did) -> ~[@ty::type_constr] { parse_constrs_gen(st, conv, parse_ty_constr_arg) } fn parse_path(st: @pstate) -> @ast::path { - let mut idents: [ast::ident]/~ = []/~; + let mut idents: ~[ast::ident] = ~[]; fn is_last(c: char) -> bool { ret c == '(' || c == ':'; } vec::push(idents, parse_ident_(st, is_last)); loop { @@ -99,7 +99,7 @@ fn parse_path(st: @pstate) -> @ast::path { if c == '(' { ret @{span: ast_util::dummy_sp(), global: false, idents: idents, - rp: none, types: []/~}; + rp: none, types: ~[]}; } else { vec::push(idents, parse_ident_(st, is_last)); } } } @@ -143,7 +143,7 @@ fn parse_constr<T: copy>(st: @pstate, conv: conv_did, -> @ty::constr_general<T> { // FIXME: use real spans and not a bogus one (#2407) let sp = ast_util::dummy_sp(); - let mut args: [@sp_constr_arg<T>]/~ = []/~; + let mut args: ~[@sp_constr_arg<T>] = ~[]; let pth = parse_path(st); let mut ignore: char = next(st); assert (ignore == '('); @@ -192,9 +192,9 @@ fn parse_vstore(st: @pstate) -> ty::vstore { } fn parse_substs(st: @pstate, conv: conv_did) -> ty::substs { - let self_r = parse_opt(st) {|| parse_region(st) }; + let self_r = parse_opt(st, || parse_region(st) ); - let self_ty = parse_opt(st) {|| parse_ty(st, conv) }; + let self_ty = parse_opt(st, || parse_ty(st, conv) ); assert next(st) == '['; let mut params: [ty::t]/~ = []/~; @@ -467,7 +467,7 @@ fn parse_ty_fn(st: @pstate, conv: conv_did) -> ty::fn_ty { // Rust metadata parsing -fn parse_def_id(buf: [u8]/~) -> ast::def_id { +fn parse_def_id(buf: ~[u8]) -> ast::def_id { let mut colon_idx = 0u; let len = vec::len(buf); while colon_idx < len && buf[colon_idx] != ':' as u8 { colon_idx += 1u; } @@ -491,15 +491,15 @@ fn parse_def_id(buf: [u8]/~) -> ast::def_id { ret {crate: crate_num, node: def_num}; } -fn parse_bounds_data(data: @[u8]/~, start: uint, +fn parse_bounds_data(data: @~[u8], start: uint, crate_num: int, tcx: ty::ctxt, conv: conv_did) - -> @[ty::param_bound]/~ { + -> @~[ty::param_bound] { let st = @{data: data, crate: crate_num, mut pos: start, tcx: tcx}; parse_bounds(st, conv) } -fn parse_bounds(st: @pstate, conv: conv_did) -> @[ty::param_bound]/~ { - let mut bounds = []/~; +fn parse_bounds(st: @pstate, conv: conv_did) -> @~[ty::param_bound] { + let mut bounds = ~[]; loop { vec::push(bounds, alt check next(st) { 'S' { ty::bound_send } diff --git a/src/rustc/metadata/tyencode.rs b/src/rustc/metadata/tyencode.rs index 1b97603cd7c..4190b0440af 100644 --- a/src/rustc/metadata/tyencode.rs +++ b/src/rustc/metadata/tyencode.rs @@ -115,10 +115,10 @@ fn enc_opt<T>(w: io::writer, t: option<T>, enc_f: fn(T)) { } fn enc_substs(w: io::writer, cx: @ctxt, substs: ty::substs) { - enc_opt(w, substs.self_r) { |r| enc_region(w, cx, r) } - enc_opt(w, substs.self_ty) { |t| enc_ty(w, cx, t) } + do enc_opt(w, substs.self_r) |r| { enc_region(w, cx, r) } + do enc_opt(w, substs.self_ty) |t| { enc_ty(w, cx, t) } w.write_char('['); - for substs.tps.each { |t| enc_ty(w, cx, t); } + for substs.tps.each |t| { enc_ty(w, cx, t); } w.write_char(']'); } @@ -231,7 +231,7 @@ fn enc_sty(w: io::writer, cx: @ctxt, st: ty::sty) { } ty::ty_tup(ts) { w.write_str("T["/&); - for ts.each {|t| enc_ty(w, cx, t); } + for ts.each |t| { enc_ty(w, cx, t); } w.write_char(']'); } ty::ty_box(mt) { w.write_char('@'); enc_mt(w, cx, mt); } @@ -255,7 +255,7 @@ fn enc_sty(w: io::writer, cx: @ctxt, st: ty::sty) { ty::ty_unboxed_vec(mt) { w.write_char('U'); enc_mt(w, cx, mt); } ty::ty_rec(fields) { w.write_str("R["/&); - for fields.each {|field| + for fields.each |field| { w.write_str(*field.ident); w.write_char('='); enc_mt(w, cx, field.mt); @@ -290,7 +290,7 @@ fn enc_sty(w: io::writer, cx: @ctxt, st: ty::sty) { ty::ty_constr(ty, cs) { w.write_str("A["/&); enc_ty(w, cx, ty); - for cs.each {|tc| enc_ty_constr(w, cx, tc); } + for cs.each |tc| { enc_ty_constr(w, cx, tc); } w.write_char(']'); } ty::ty_opaque_box { w.write_char('B'); } @@ -341,13 +341,13 @@ fn enc_ty_fn(w: io::writer, cx: @ctxt, ft: ty::fn_ty) { enc_proto(w, ft.proto); enc_purity(w, ft.purity); w.write_char('['); - for ft.inputs.each {|arg| + for ft.inputs.each |arg| { enc_mode(w, cx, arg.mode); enc_ty(w, cx, arg.ty); } w.write_char(']'); let mut colon = true; - for ft.constraints.each {|c| + for ft.constraints.each |c| { if colon { w.write_char(':'); colon = false; @@ -368,7 +368,7 @@ fn enc_constr_gen<T>(w: io::writer, cx: @ctxt, w.write_str(cx.ds(c.node.id)); w.write_char('|'); let mut semi = false; - for c.node.args.each {|a| + for c.node.args.each |a| { if semi { w.write_char(';'); } else { semi = true; } write_arg(a); } @@ -376,7 +376,7 @@ fn enc_constr_gen<T>(w: io::writer, cx: @ctxt, } fn enc_constr(w: io::writer, cx: @ctxt, c: @ty::constr) { - enc_constr_gen(w, cx, c, {|a| + enc_constr_gen(w, cx, c, |a| { alt a.node { carg_base { w.write_char('*'); } carg_ident(i) { w.write_uint(i); } @@ -386,7 +386,7 @@ fn enc_constr(w: io::writer, cx: @ctxt, c: @ty::constr) { } fn enc_ty_constr(w: io::writer, cx: @ctxt, c: @ty::type_constr) { - enc_constr_gen(w, cx, c, {|a| + enc_constr_gen(w, cx, c, |a| { alt a.node { carg_base { w.write_char('*'); } carg_ident(p) { w.write_str(path_to_str(p)); } @@ -395,8 +395,8 @@ fn enc_ty_constr(w: io::writer, cx: @ctxt, c: @ty::type_constr) { }); } -fn enc_bounds(w: io::writer, cx: @ctxt, bs: @[ty::param_bound]/~) { - for vec::each(*bs) {|bound| +fn enc_bounds(w: io::writer, cx: @ctxt, bs: @~[ty::param_bound]) { + for vec::each(*bs) |bound| { alt bound { ty::bound_send { w.write_char('S'); } ty::bound_copy { w.write_char('C'); } diff --git a/src/rustc/middle/astencode.rs b/src/rustc/middle/astencode.rs index 484a87ab32e..3b9b4549817 100644 --- a/src/rustc/middle/astencode.rs +++ b/src/rustc/middle/astencode.rs @@ -87,7 +87,7 @@ fn encode_inlined_item(ecx: @e::encode_ctxt, ebml_w.writer.tell()]; let id_range = ast_util::compute_id_range_for_inlined_item(ii); - ebml_w.wr_tag(c::tag_ast as uint) {|| + do ebml_w.wr_tag(c::tag_ast as uint) || { ast_util::serialize_id_range(ebml_w, id_range); encode_ast(ebml_w, simplify_ast(ii)); encode_side_tables_for_ii(ecx, maps, ebml_w, ii); @@ -210,7 +210,7 @@ impl deserializer_helpers<D: deserializer> for D { // but eventually we should add entries to the local codemap as required. fn encode_ast(ebml_w: ebml::writer, item: ast::inlined_item) { - ebml_w.wr_tag(c::tag_tree as uint) {|| + do ebml_w.wr_tag(c::tag_tree as uint) || { ast::serialize_inlined_item(ebml_w, item) } } @@ -227,7 +227,7 @@ fn encode_ast(ebml_w: ebml::writer, item: ast::inlined_item) { // inlined items. fn simplify_ast(ii: ast::inlined_item) -> ast::inlined_item { fn drop_nested_items(blk: ast::blk_, fld: fold::ast_fold) -> ast::blk_ { - let stmts_sans_items = vec::filter(blk.stmts) {|stmt| + let stmts_sans_items = do vec::filter(blk.stmts) |stmt| { alt stmt.node { ast::stmt_expr(_, _) | ast::stmt_semi(_, _) | ast::stmt_decl(@{node: ast::decl_local(_), span: _}, _) { true } @@ -278,8 +278,8 @@ fn decode_ast(par_doc: ebml::doc) -> ast::inlined_item { fn renumber_ast(xcx: extended_decode_ctxt, ii: ast::inlined_item) -> ast::inlined_item { let fld = fold::make_fold(@{ - new_id: {|a|xcx.tr_id(a)}, - new_span: {|a|xcx.tr_span(a)} + new_id: |a| xcx.tr_id(a), + new_span: |a| xcx.tr_span(a) with *fold::default_ast_fold() }); @@ -425,7 +425,7 @@ fn encode_vtable_res(ecx: @e::encode_ctxt, // ty::t doesn't work, and there is no way (atm) to have // hand-written serialization routines combine with auto-generated // ones. perhaps we should fix this. - ebml_w.emit_from_vec(*dr) {|vtable_origin| + do ebml_w.emit_from_vec(*dr) |vtable_origin| { encode_vtable_origin(ecx, ebml_w, vtable_origin) } } @@ -433,37 +433,37 @@ fn encode_vtable_res(ecx: @e::encode_ctxt, fn encode_vtable_origin(ecx: @e::encode_ctxt, ebml_w: ebml::writer, vtable_origin: typeck::vtable_origin) { - ebml_w.emit_enum("vtable_origin") {|| + do ebml_w.emit_enum("vtable_origin") || { alt vtable_origin { typeck::vtable_static(def_id, tys, vtable_res) { - ebml_w.emit_enum_variant("vtable_static", 0u, 3u) {|| - ebml_w.emit_enum_variant_arg(0u) {|| + do ebml_w.emit_enum_variant("vtable_static", 0u, 3u) || { + do ebml_w.emit_enum_variant_arg(0u) || { ebml_w.emit_def_id(def_id) } - ebml_w.emit_enum_variant_arg(1u) {|| + do ebml_w.emit_enum_variant_arg(1u) || { ebml_w.emit_tys(ecx, tys); } - ebml_w.emit_enum_variant_arg(2u) {|| + do ebml_w.emit_enum_variant_arg(2u) || { encode_vtable_res(ecx, ebml_w, vtable_res); } } } typeck::vtable_param(pn, bn) { - ebml_w.emit_enum_variant("vtable_param", 1u, 2u) {|| - ebml_w.emit_enum_variant_arg(0u) {|| + do ebml_w.emit_enum_variant("vtable_param", 1u, 2u) || { + do ebml_w.emit_enum_variant_arg(0u) || { ebml_w.emit_uint(pn); } - ebml_w.emit_enum_variant_arg(1u) {|| + do ebml_w.emit_enum_variant_arg(1u) || { ebml_w.emit_uint(bn); } } } typeck::vtable_iface(def_id, tys) { - ebml_w.emit_enum_variant("vtable_iface", 1u, 3u) {|| - ebml_w.emit_enum_variant_arg(0u) {|| + do ebml_w.emit_enum_variant("vtable_iface", 1u, 3u) || { + do ebml_w.emit_enum_variant_arg(0u) || { ebml_w.emit_def_id(def_id) } - ebml_w.emit_enum_variant_arg(1u) {|| + do ebml_w.emit_enum_variant_arg(1u) || { ebml_w.emit_tys(ecx, tys); } } @@ -475,43 +475,43 @@ fn encode_vtable_origin(ecx: @e::encode_ctxt, impl helpers for ebml::ebml_deserializer { fn read_vtable_res(xcx: extended_decode_ctxt) -> typeck::vtable_res { - @self.read_to_vec {|| self.read_vtable_origin(xcx) } + @self.read_to_vec(|| self.read_vtable_origin(xcx) ) } fn read_vtable_origin(xcx: extended_decode_ctxt) -> typeck::vtable_origin { - self.read_enum("vtable_origin") {|| - self.read_enum_variant {|i| + do self.read_enum("vtable_origin") || { + do self.read_enum_variant |i| { alt check i { 0u { typeck::vtable_static( - self.read_enum_variant_arg(0u) {|| + do self.read_enum_variant_arg(0u) || { self.read_def_id(xcx) }, - self.read_enum_variant_arg(1u) {|| + do self.read_enum_variant_arg(1u) || { self.read_tys(xcx) }, - self.read_enum_variant_arg(2u) {|| + do self.read_enum_variant_arg(2u) || { self.read_vtable_res(xcx) } ) } 1u { typeck::vtable_param( - self.read_enum_variant_arg(0u) {|| + do self.read_enum_variant_arg(0u) || { self.read_uint() }, - self.read_enum_variant_arg(1u) {|| + do self.read_enum_variant_arg(1u) || { self.read_uint() } ) } 2u { typeck::vtable_iface( - self.read_enum_variant_arg(0u) {|| + do self.read_enum_variant_arg(0u) || { self.read_def_id(xcx) }, - self.read_enum_variant_arg(1u) {|| + do self.read_enum_variant_arg(1u) || { self.read_tys(xcx) } ) @@ -530,7 +530,7 @@ impl helpers for @e::encode_ctxt { @{diag: self.tcx.sess.diagnostic(), ds: e::def_to_str, tcx: self.tcx, - reachable: {|a|encoder::reachable(self, a)}, + reachable: |a| encoder::reachable(self, a), abbrevs: tyencode::ac_use_abbrevs(self.type_abbrevs)} } } @@ -540,8 +540,8 @@ impl helpers for ebml::writer { e::write_type(ecx, self, ty) } - fn emit_tys(ecx: @e::encode_ctxt, tys: [ty::t]/~) { - self.emit_from_vec(tys) {|ty| + fn emit_tys(ecx: @e::encode_ctxt, tys: ~[ty::t]) { + do self.emit_from_vec(tys) |ty| { e::write_type(ecx, self, ty) } } @@ -551,16 +551,16 @@ impl helpers for ebml::writer { } fn emit_tpbt(ecx: @e::encode_ctxt, tpbt: ty::ty_param_bounds_and_ty) { - self.emit_rec {|| - self.emit_rec_field("bounds", 0u) {|| - self.emit_from_vec(*tpbt.bounds) {|bs| + do self.emit_rec || { + do self.emit_rec_field("bounds", 0u) || { + do self.emit_from_vec(*tpbt.bounds) |bs| { self.emit_bounds(ecx, bs) } } - self.emit_rec_field("rp", 1u) {|| + do self.emit_rec_field("rp", 1u) || { ast::serialize_region_param(self, tpbt.rp) } - self.emit_rec_field("ty", 2u) {|| + do self.emit_rec_field("ty", 2u) || { self.emit_ty(ecx, tpbt.ty); } } @@ -569,7 +569,7 @@ impl helpers for ebml::writer { impl writer for ebml::writer { fn tag(tag_id: c::astencode_tag, f: fn()) { - self.wr_tag(tag_id as uint) {|| f() } + do self.wr_tag(tag_id as uint) || { f() } } fn id(id: ast::node_id) { @@ -581,7 +581,7 @@ fn encode_side_tables_for_ii(ecx: @e::encode_ctxt, maps: maps, ebml_w: ebml::writer, ii: ast::inlined_item) { - ebml_w.wr_tag(c::tag_table as uint) {|| + do ebml_w.wr_tag(c::tag_table as uint) || { ast_util::visit_ids_for_inlined_item( ii, fn@(id: ast::node_id, copy ebml_w) { @@ -601,37 +601,37 @@ fn encode_side_tables_for_id(ecx: @e::encode_ctxt, #debug["Encoding side tables for id %d", id]; - option::iter(tcx.def_map.find(id)) {|def| - ebml_w.tag(c::tag_table_def) {|| + do option::iter(tcx.def_map.find(id)) |def| { + do ebml_w.tag(c::tag_table_def) || { ebml_w.id(id); - ebml_w.tag(c::tag_table_val) {|| + do ebml_w.tag(c::tag_table_val) || { ast::serialize_def(ebml_w, def) } } } - option::iter((*tcx.node_types).find(id as uint)) {|ty| - ebml_w.tag(c::tag_table_node_type) {|| + do option::iter((*tcx.node_types).find(id as uint)) |ty| { + do ebml_w.tag(c::tag_table_node_type) || { ebml_w.id(id); - ebml_w.tag(c::tag_table_val) {|| + do ebml_w.tag(c::tag_table_val) || { e::write_type(ecx, ebml_w, ty) } } } - option::iter(tcx.node_type_substs.find(id)) {|tys| - ebml_w.tag(c::tag_table_node_type_subst) {|| + do option::iter(tcx.node_type_substs.find(id)) |tys| { + do ebml_w.tag(c::tag_table_node_type_subst) || { ebml_w.id(id); - ebml_w.tag(c::tag_table_val) {|| + do ebml_w.tag(c::tag_table_val) || { ebml_w.emit_tys(ecx, tys) } } } - option::iter(tcx.freevars.find(id)) {|fv| - ebml_w.tag(c::tag_table_freevars) {|| + do option::iter(tcx.freevars.find(id)) |fv| { + do ebml_w.tag(c::tag_table_freevars) || { ebml_w.id(id); - ebml_w.tag(c::tag_table_val) {|| - ebml_w.emit_from_vec(*fv) {|fv_entry| + do ebml_w.tag(c::tag_table_val) || { + do ebml_w.emit_from_vec(*fv) |fv_entry| { encode_freevar_entry(ebml_w, *fv_entry) } } @@ -639,19 +639,19 @@ fn encode_side_tables_for_id(ecx: @e::encode_ctxt, } let lid = {crate: ast::local_crate, node: id}; - option::iter(tcx.tcache.find(lid)) {|tpbt| - ebml_w.tag(c::tag_table_tcache) {|| + do option::iter(tcx.tcache.find(lid)) |tpbt| { + do ebml_w.tag(c::tag_table_tcache) || { ebml_w.id(id); - ebml_w.tag(c::tag_table_val) {|| + do ebml_w.tag(c::tag_table_val) || { ebml_w.emit_tpbt(ecx, tpbt); } } } - option::iter(tcx.ty_param_bounds.find(id)) {|pbs| - ebml_w.tag(c::tag_table_param_bounds) {|| + do option::iter(tcx.ty_param_bounds.find(id)) |pbs| { + do ebml_w.tag(c::tag_table_param_bounds) || { ebml_w.id(id); - ebml_w.tag(c::tag_table_val) {|| + do ebml_w.tag(c::tag_table_val) || { ebml_w.emit_bounds(ecx, pbs) } } @@ -671,17 +671,17 @@ fn encode_side_tables_for_id(ecx: @e::encode_ctxt, // } //} - option::iter(maps.mutbl_map.find(id)) {|_m| - ebml_w.tag(c::tag_table_mutbl) {|| + do option::iter(maps.mutbl_map.find(id)) |_m| { + do ebml_w.tag(c::tag_table_mutbl) || { ebml_w.id(id); } } - option::iter(maps.last_use_map.find(id)) {|m| - ebml_w.tag(c::tag_table_last_use) {|| + do option::iter(maps.last_use_map.find(id)) |m| { + do ebml_w.tag(c::tag_table_last_use) || { ebml_w.id(id); - ebml_w.tag(c::tag_table_val) {|| - ebml_w.emit_from_vec((*m).get()) {|id| + do ebml_w.tag(c::tag_table_val) || { + do ebml_w.emit_from_vec((*m).get()) |id| { ebml_w.emit_int(id); } } @@ -691,28 +691,28 @@ fn encode_side_tables_for_id(ecx: @e::encode_ctxt, // impl_map is not used except when emitting metadata, // don't need to keep it. - option::iter(maps.method_map.find(id)) {|mme| - ebml_w.tag(c::tag_table_method_map) {|| + do option::iter(maps.method_map.find(id)) |mme| { + do ebml_w.tag(c::tag_table_method_map) || { ebml_w.id(id); - ebml_w.tag(c::tag_table_val) {|| + do ebml_w.tag(c::tag_table_val) || { serialize_method_map_entry(ebml_w, mme) } } } - option::iter(maps.vtable_map.find(id)) {|dr| - ebml_w.tag(c::tag_table_vtable_map) {|| + do option::iter(maps.vtable_map.find(id)) |dr| { + do ebml_w.tag(c::tag_table_vtable_map) || { ebml_w.id(id); - ebml_w.tag(c::tag_table_val) {|| + do ebml_w.tag(c::tag_table_val) || { encode_vtable_res(ecx, ebml_w, dr); } } } - option::iter(tcx.borrowings.find(id)) {|borrow| - ebml_w.tag(c::tag_table_borrowings) {|| + do option::iter(tcx.borrowings.find(id)) |borrow| { + do ebml_w.tag(c::tag_table_borrowings) || { ebml_w.id(id); - ebml_w.tag(c::tag_table_val) {|| + do ebml_w.tag(c::tag_table_val) || { ty::serialize_borrow(ebml_w, borrow) } } @@ -738,32 +738,32 @@ impl decoder for ebml::ebml_deserializer { tydecode::parse_ty_data( self.parent.data, xcx.dcx.cdata.cnum, self.pos, xcx.dcx.tcx, - {|a|xcx.tr_def_id(a)}) + |a| xcx.tr_def_id(a) ) } - fn read_tys(xcx: extended_decode_ctxt) -> [ty::t]/~ { - self.read_to_vec {|| self.read_ty(xcx) } + fn read_tys(xcx: extended_decode_ctxt) -> ~[ty::t] { + self.read_to_vec(|| self.read_ty(xcx) ) } - fn read_bounds(xcx: extended_decode_ctxt) -> @[ty::param_bound]/~ { + fn read_bounds(xcx: extended_decode_ctxt) -> @~[ty::param_bound] { tydecode::parse_bounds_data( self.parent.data, self.pos, xcx.dcx.cdata.cnum, xcx.dcx.tcx, - {|a|xcx.tr_def_id(a)}) + |a| xcx.tr_def_id(a) ) } fn read_ty_param_bounds_and_ty(xcx: extended_decode_ctxt) -> ty::ty_param_bounds_and_ty { - self.read_rec {|| + do self.read_rec || { { - bounds: self.read_rec_field("bounds", 0u) {|| - @self.read_to_vec {|| self.read_bounds(xcx) } - }, - rp: self.read_rec_field("rp", 1u) {|| + bounds: self.read_rec_field("bounds", 0u, || { + @self.read_to_vec(|| self.read_bounds(xcx) ) + }), + rp: self.read_rec_field("rp", 1u, || { ast::deserialize_region_param(self) - }, - ty: self.read_rec_field("ty", 2u) {|| + }), + ty: self.read_rec_field("ty", 2u, || { self.read_ty(xcx) - } + }) } } } @@ -773,7 +773,7 @@ fn decode_side_tables(xcx: extended_decode_ctxt, ast_doc: ebml::doc) { let dcx = xcx.dcx; let tbl_doc = ast_doc[c::tag_table]; - ebml::docs(tbl_doc) {|tag, entry_doc| + do ebml::docs(tbl_doc) |tag, entry_doc| { let id0 = entry_doc[c::tag_table_id].as_int(); let id = xcx.tr_id(id0); @@ -796,9 +796,9 @@ fn decode_side_tables(xcx: extended_decode_ctxt, let tys = val_dsr.read_tys(xcx); dcx.tcx.node_type_substs.insert(id, tys); } else if tag == (c::tag_table_freevars as uint) { - let fv_info = @val_dsr.read_to_vec {|| + let fv_info = @val_dsr.read_to_vec(|| { @val_dsr.read_freevar_entry(xcx) - }; + }); dcx.tcx.freevars.insert(id, fv_info); } else if tag == (c::tag_table_tcache as uint) { let tpbt = val_dsr.read_ty_param_bounds_and_ty(xcx); @@ -808,9 +808,9 @@ fn decode_side_tables(xcx: extended_decode_ctxt, let bounds = val_dsr.read_bounds(xcx); dcx.tcx.ty_param_bounds.insert(id, bounds); } else if tag == (c::tag_table_last_use as uint) { - let ids = val_dsr.read_to_vec {|| + let ids = val_dsr.read_to_vec(|| { xcx.tr_id(val_dsr.read_int()) - }; + }); let dvec = @dvec::from_vec(vec::to_mut(ids)); dcx.maps.last_use_map.insert(id, dvec); } else if tag == (c::tag_table_method_map as uint) { @@ -838,7 +838,7 @@ fn decode_side_tables(xcx: extended_decode_ctxt, #[cfg(test)] fn encode_item_ast(ebml_w: ebml::writer, item: @ast::item) { - ebml_w.wr_tag(c::tag_tree as uint) {|| + do ebml_w.wr_tag(c::tag_tree as uint) || { ast::serialize_item(ebml_w, *item); } } @@ -861,7 +861,7 @@ type fake_session = (); #[cfg(test)] impl of fake_ext_ctxt for fake_session { - fn cfg() -> ast::crate_cfg { []/~ } + fn cfg() -> ast::crate_cfg { ~[] } fn parse_sess() -> parse::parse_sess { parse::new_parse_sess(none) } } @@ -881,9 +881,9 @@ fn roundtrip(in_item: @ast::item) { #debug["out_item = %s", pprust::item_to_str(out_item)]; let exp_str = - io::with_str_writer {|w| ast::serialize_item(w, *in_item) }; + io::with_str_writer(|w| ast::serialize_item(w, *in_item) ); let out_str = - io::with_str_writer {|w| ast::serialize_item(w, *out_item) }; + io::with_str_writer(|w| ast::serialize_item(w, *out_item) ); #debug["expected string: %s", exp_str]; #debug["actual string : %s", out_str]; @@ -924,13 +924,13 @@ fn test_simplification() { let item_in = ast::ii_item(#ast(item) { fn new_int_alist<B: copy>() -> alist<int, B> { fn eq_int(&&a: int, &&b: int) -> bool { a == b } - ret {eq_fn: eq_int, mut data: []/~}; + ret {eq_fn: eq_int, mut data: ~[]}; } }); let item_out = simplify_ast(item_in); let item_exp = ast::ii_item(#ast(item) { fn new_int_alist<B: copy>() -> alist<int, B> { - ret {eq_fn: eq_int, mut data: []/~}; + ret {eq_fn: eq_int, mut data: ~[]}; } }); alt (item_out, item_exp) { diff --git a/src/rustc/middle/block_use.rs b/src/rustc/middle/block_use.rs index 9fcc3d286fc..e6bafb4c48a 100644 --- a/src/rustc/middle/block_use.rs +++ b/src/rustc/middle/block_use.rs @@ -28,7 +28,7 @@ fn visit_expr(ex: @expr, cx: ctx, v: visit::vt<ctx>) { cx.allow_block = true; v.visit_expr(f, cx, v); let mut i = 0u; - for ty::ty_fn_args(ty::expr_ty(cx.tcx, f)).each {|arg_t| + for ty::ty_fn_args(ty::expr_ty(cx.tcx, f)).each |arg_t| { cx.allow_block = (ty::arg_mode(cx.tcx, arg_t) == by_ref); v.visit_expr(args[i], cx, v); i += 1u; diff --git a/src/rustc/middle/borrowck.rs b/src/rustc/middle/borrowck.rs index 7ce20dbd82f..69b38cc1b2c 100644 --- a/src/rustc/middle/borrowck.rs +++ b/src/rustc/middle/borrowck.rs @@ -194,7 +194,7 @@ type borrowck_ctxt = @{tcx: ty::ctxt, root_map: root_map, mutbl_map: mutbl_map}; -// a map mapping id's of expressions of gc'd type (@T, []/@, etc) where +// a map mapping id's of expressions of gc'd type (@T, @[], etc) where // the box needs to be kept live to the id of the scope for which they // must stay live. type root_map = hashmap<root_map_key, ast::node_id>; @@ -450,7 +450,7 @@ impl to_str_methods for borrowck_ctxt { self.cat_to_repr(cmt.cat), cmt.id, self.mut_to_str(cmt.mutbl), - cmt.lp.map_default("none", { |p| self.lp_to_str(p) }), + cmt.lp.map_default("none", |p| self.lp_to_str(p) ), ty_to_str(self.tcx, cmt.ty)] } diff --git a/src/rustc/middle/borrowck/categorization.rs b/src/rustc/middle/borrowck/categorization.rs index f594f335de2..7df58bf43d0 100644 --- a/src/rustc/middle/borrowck/categorization.rs +++ b/src/rustc/middle/borrowck/categorization.rs @@ -280,7 +280,7 @@ impl public_methods for borrowck_ctxt { cmt: cmt) -> cmt { @{id: arg.id(), span: arg.span(), cat: cat_comp(cmt, comp_variant(enum_did)), - lp: cmt.lp.map { |l| @lp_comp(l, comp_variant(enum_did)) }, + lp: cmt.lp.map(|l| @lp_comp(l, comp_variant(enum_did)) ), mutbl: cmt.mutbl, // imm iff in an immutable context ty: self.tcx.ty(arg)} } @@ -311,9 +311,7 @@ impl public_methods for borrowck_ctxt { m_mutbl | m_const { f_mutbl } }; let f_comp = comp_field(f_name, f_mutbl); - let lp = base_cmt.lp.map { |lp| - @lp_comp(lp, f_comp) - }; + let lp = base_cmt.lp.map(|lp| @lp_comp(lp, f_comp) ); @{id: node.id(), span: node.span(), cat: cat_comp(base_cmt, f_comp), lp:lp, mutbl: m, ty: self.tcx.ty(node)} @@ -321,10 +319,10 @@ impl public_methods for borrowck_ctxt { fn cat_deref<N:ast_node>(node: N, base_cmt: cmt, derefs: uint, expl: bool) -> option<cmt> { - ty::deref(self.tcx, base_cmt.ty, expl).map { |mt| + do ty::deref(self.tcx, base_cmt.ty, expl).map |mt| { alt deref_kind(self.tcx, base_cmt.ty) { deref_ptr(ptr) { - let lp = base_cmt.lp.chain { |l| + let lp = do base_cmt.lp.chain |l| { // Given that the ptr itself is loanable, we can // loan out deref'd uniq ptrs as the data they are // the only way to reach the data they point at. @@ -341,7 +339,7 @@ impl public_methods for borrowck_ctxt { } deref_comp(comp) { - let lp = base_cmt.lp.map { |l| @lp_comp(l, comp) }; + let lp = base_cmt.lp.map(|l| @lp_comp(l, comp) ); @{id:node.id(), span:node.span(), cat:cat_comp(base_cmt, comp), lp:lp, mutbl:mt.mutbl, ty:mt.ty} @@ -367,7 +365,7 @@ impl public_methods for borrowck_ctxt { deref_ptr(ptr) { // make deref of vectors explicit, as explained in the comment at // the head of this section - let deref_lp = base_cmt.lp.map { |lp| @lp_deref(lp, ptr) }; + let deref_lp = base_cmt.lp.map(|lp| @lp_deref(lp, ptr) ); let deref_cmt = @{id:expr.id, span:expr.span, cat:cat_deref(base_cmt, 0u, ptr), lp:deref_lp, mutbl:m_imm, ty:mt.ty}; @@ -383,7 +381,7 @@ impl public_methods for borrowck_ctxt { fn comp(expr: @ast::expr, of_cmt: cmt, vect: ty::t, mt: ty::mt) -> cmt { let comp = comp_index(vect, mt.mutbl); - let index_lp = of_cmt.lp.map { |lp| @lp_comp(lp, comp) }; + let index_lp = of_cmt.lp.map(|lp| @lp_comp(lp, comp) ); @{id:expr.id, span:expr.span, cat:cat_comp(of_cmt, comp), lp:index_lp, mutbl:mt.mutbl, ty:mt.ty} @@ -393,7 +391,7 @@ impl public_methods for borrowck_ctxt { fn cat_tuple_elt<N: ast_node>(elt: N, cmt: cmt) -> cmt { @{id: elt.id(), span: elt.span(), cat: cat_comp(cmt, comp_tuple), - lp: cmt.lp.map { |l| @lp_comp(l, comp_tuple) }, + lp: cmt.lp.map(|l| @lp_comp(l, comp_tuple) ), mutbl: cmt.mutbl, // imm iff in an immutable context ty: self.tcx.ty(elt)} } @@ -432,14 +430,14 @@ fn field_mutbl(tcx: ty::ctxt, // Need to refactor so that records/class fields can be treated uniformly. alt ty::get(base_ty).struct { ty::ty_rec(fields) { - for fields.each { |f| + for fields.each |f| { if f.ident == f_name { ret some(f.mt.mutbl); } } } ty::ty_class(did, substs) { - for ty::lookup_class_fields(tcx, did).each { |fld| + for ty::lookup_class_fields(tcx, did).each |fld| { if fld.ident == f_name { let m = alt fld.mutability { ast::class_mutable { ast::m_mutbl } diff --git a/src/rustc/middle/borrowck/check_loans.rs b/src/rustc/middle/borrowck/check_loans.rs index c9ac6bf3a63..617c1f8df8a 100644 --- a/src/rustc/middle/borrowck/check_loans.rs +++ b/src/rustc/middle/borrowck/check_loans.rs @@ -23,7 +23,7 @@ enum check_loan_ctxt = @{ // we are in a ctor, we track the self id mut in_ctor: bool, mut declared_purity: ast::purity, - mut fn_args: @[ast::node_id]/~ + mut fn_args: @~[ast::node_id] }; // if we are enforcing purity, why are we doing so? @@ -45,7 +45,7 @@ fn check_loans(bccx: borrowck_ctxt, reported: int_hash(), mut in_ctor: false, mut declared_purity: ast::impure_fn, - mut fn_args: @[]/~}); + mut fn_args: @~[]}); let vt = visit::mk_vt(@{visit_expr: check_loans_in_expr, visit_local: check_loans_in_local, visit_block: check_loans_in_block, @@ -120,9 +120,9 @@ impl methods for check_loan_ctxt { let req_loan_map = self.req_maps.req_loan_map; loop { - for req_loan_map.find(scope_id).each { |loanss| - for (*loanss).each { |loans| - for (*loans).each { |loan| + for req_loan_map.find(scope_id).each |loanss| { + for (*loanss).each |loans| { + for (*loans).each |loan| { if !f(loan) { ret; } } } @@ -138,7 +138,7 @@ impl methods for check_loan_ctxt { fn walk_loans_of(scope_id: ast::node_id, lp: @loan_path, f: fn(loan) -> bool) { - for self.walk_loans(scope_id) { |loan| + for self.walk_loans(scope_id) |loan| { if loan.lp == lp { if !f(loan) { ret; } } @@ -160,7 +160,7 @@ impl methods for check_loan_ctxt { #debug["check_pure_callee_or_arg(pc=%?, expr=%?, \ callee_id=%d, ty=%s)", pc, - opt_expr.map({|e| pprust::expr_to_str(e)}), + opt_expr.map(|e| pprust::expr_to_str(e) ), callee_id, ty_to_str(self.tcx(), ty::node_id_to_type(tcx, callee_id))]; @@ -244,9 +244,9 @@ impl methods for check_loan_ctxt { }; let par_scope_id = self.tcx().region_map.get(scope_id); - for self.walk_loans(par_scope_id) { |old_loan| - for (*new_loanss).each { |new_loans| - for (*new_loans).each { |new_loan| + for self.walk_loans(par_scope_id) |old_loan| { + for (*new_loanss).each |new_loans| { + for (*new_loans).each |new_loan| { if old_loan.lp != new_loan.lp { cont; } alt (old_loan.mutbl, new_loan.mutbl) { (m_const, _) | (_, m_const) | @@ -333,7 +333,7 @@ impl methods for check_loan_ctxt { // which will be checked for compat separately in // check_for_conflicting_loans() if at != at_mutbl_ref { - for cmt.lp.each { |lp| + for cmt.lp.each |lp| { self.check_for_loan_conflicting_with_assignment( at, ex, cmt, lp); } @@ -348,7 +348,7 @@ impl methods for check_loan_ctxt { cmt: cmt, lp: @loan_path) { - for self.walk_loans_of(ex.id, lp) { |loan| + for self.walk_loans_of(ex.id, lp) |loan| { alt loan.mutbl { m_mutbl | m_const { /*ok*/ } m_imm { @@ -439,7 +439,7 @@ impl methods for check_loan_ctxt { none { ret; } some(lp) { lp } }; - for self.walk_loans_of(cmt.id, lp) { |loan| + for self.walk_loans_of(cmt.id, lp) |loan| { self.bccx.span_err( cmt.span, #fmt["moving out of %s prohibited due to outstanding loan", @@ -461,7 +461,7 @@ impl methods for check_loan_ctxt { none { ret; } some(lp) { lp } }; - for self.walk_loans_of(cmt.id, lp) { |_loan| + for self.walk_loans_of(cmt.id, lp) |_loan| { #debug["Removing last use entry %? due to outstanding loan", expr.id]; self.bccx.last_use_map.remove(expr.id); @@ -473,13 +473,13 @@ impl methods for check_loan_ctxt { callee: option<@ast::expr>, callee_id: ast::node_id, callee_span: span, - args: [@ast::expr]/~) { + args: ~[@ast::expr]) { alt self.purity(expr.id) { none {} some(pc) { self.check_pure_callee_or_arg( pc, callee, callee_id, callee_span); - for args.each { |arg| + for args.each |arg| { self.check_pure_callee_or_arg( pc, some(arg), arg.id, arg.span); } @@ -488,7 +488,7 @@ impl methods for check_loan_ctxt { let arg_tys = ty::ty_fn_args( ty::node_id_to_type(self.tcx(), callee_id)); - vec::iter2(args, arg_tys) { |arg, arg_ty| + do vec::iter2(args, arg_tys) |arg, arg_ty| { alt ty::resolved_mode(self.tcx(), arg_ty.mode) { ast::by_move { self.check_move_out(arg); @@ -508,9 +508,9 @@ fn check_loans_in_fn(fk: visit::fn_kind, decl: ast::fn_decl, body: ast::blk, visitor: visit::vt<check_loan_ctxt>) { #debug["purity on entry=%?", copy self.declared_purity]; - save_and_restore(self.in_ctor) {|| - save_and_restore(self.declared_purity) {|| - save_and_restore(self.fn_args) {|| + do save_and_restore(self.in_ctor) || { + do save_and_restore(self.declared_purity) || { + do save_and_restore(self.fn_args) || { let is_stack_closure = self.is_stack_closure(id); // In principle, we could consider fk_anon(*) or @@ -523,7 +523,7 @@ fn check_loans_in_fn(fk: visit::fn_kind, decl: ast::fn_decl, body: ast::blk, visit::fk_ctor(*) { self.in_ctor = true; self.declared_purity = decl.purity; - self.fn_args = @decl.inputs.map({|i| i.id}); + self.fn_args = @decl.inputs.map(|i| i.id ); } visit::fk_anon(*) | visit::fk_fn_block(*) if is_stack_closure { @@ -535,7 +535,7 @@ fn check_loans_in_fn(fk: visit::fn_kind, decl: ast::fn_decl, body: ast::blk, visit::fk_dtor(*) { self.in_ctor = false; self.declared_purity = decl.purity; - self.fn_args = @decl.inputs.map({|i| i.id}); + self.fn_args = @decl.inputs.map(|i| i.id ); } } @@ -582,7 +582,7 @@ fn check_loans_in_expr(expr: @ast::expr, } ast::expr_fn(_, _, _, cap_clause) | ast::expr_fn_block(_, _, cap_clause) { - for (*cap_clause).each { |cap_item| + for (*cap_clause).each |cap_item| { if cap_item.is_move { let def = self.tcx().def_map.get(cap_item.id); @@ -618,7 +618,7 @@ fn check_loans_in_expr(expr: @ast::expr, none, ast_util::op_expr_callee_id(expr), expr.span, - [rval]/~); + ~[rval]); } ast::expr_unary(*) | ast::expr_index(*) if self.bccx.method_map.contains_key(expr.id) { @@ -626,7 +626,7 @@ fn check_loans_in_expr(expr: @ast::expr, none, ast_util::op_expr_callee_id(expr), expr.span, - []/~); + ~[]); } _ { } } @@ -637,7 +637,7 @@ fn check_loans_in_expr(expr: @ast::expr, fn check_loans_in_block(blk: ast::blk, &&self: check_loan_ctxt, vt: visit::vt<check_loan_ctxt>) { - save_and_restore(self.declared_purity) {|| + do save_and_restore(self.declared_purity) || { self.check_for_conflicting_loans(blk.node.id); alt blk.node.rules { diff --git a/src/rustc/middle/borrowck/gather_loans.rs b/src/rustc/middle/borrowck/gather_loans.rs index 5c4b804164c..443f135f46d 100644 --- a/src/rustc/middle/borrowck/gather_loans.rs +++ b/src/rustc/middle/borrowck/gather_loans.rs @@ -33,7 +33,7 @@ fn req_loans_in_expr(ex: @ast::expr, #debug["req_loans_in_expr(ex=%s)", pprust::expr_to_str(ex)]; // If this expression is borrowed, have to ensure it remains valid: - for tcx.borrowings.find(ex.id).each { |borrow| + for tcx.borrowings.find(ex.id).each |borrow| { let cmt = self.bccx.cat_borrow_of_expr(ex); let scope_r = ty::re_scope(borrow.scope_id); self.guarantee_valid(cmt, borrow.mutbl, scope_r); @@ -56,7 +56,7 @@ fn req_loans_in_expr(ex: @ast::expr, ast::expr_call(f, args, _) { let arg_tys = ty::ty_fn_args(ty::expr_ty(self.tcx(), f)); let scope_r = ty::re_scope(ex.id); - vec::iter2(args, arg_tys) { |arg, arg_ty| + do vec::iter2(args, arg_tys) |arg, arg_ty| { alt ty::resolved_mode(self.tcx(), arg_ty.mode) { ast::by_mutbl_ref { let arg_cmt = self.bccx.cat_expr(arg); @@ -86,7 +86,7 @@ fn req_loans_in_expr(ex: @ast::expr, // passing the buck onto us to enforce this) // // FIXME (#2493): this handling is not really adequate. - // For example, if there is a type like, {f: [int]/~}, we + // For example, if there is a type like, {f: ~[int]}, we // will ignore it, but we ought to be requiring it to be // immutable (whereas something like {f:int} would be // fine). @@ -114,8 +114,8 @@ fn req_loans_in_expr(ex: @ast::expr, ast::expr_alt(ex_v, arms, _) { let cmt = self.bccx.cat_expr(ex_v); - for arms.each { |arm| - for arm.pats.each { |pat| + for arms.each |arm| { + for arm.pats.each |pat| { self.gather_pat(cmt, pat, arm.body.node.id, ex.id); } } @@ -215,7 +215,7 @@ impl methods for gather_loan_ctxt { }; let result = { - self.check_mutbl(req_mutbl, cmt).chain { |_ok| + do self.check_mutbl(req_mutbl, cmt).chain |_ok| { self.bccx.preserve(cmt, opt_scope_id) } }; @@ -280,7 +280,7 @@ impl methods for gather_loan_ctxt { } none { self.req_maps.req_loan_map.insert( - scope_id, @dvec::from_vec([mut loans]/~)); + scope_id, @dvec::from_vec(~[mut loans])); } } } @@ -345,7 +345,7 @@ impl methods for gather_loan_ctxt { not variant", e])} }; - for subpats.each { |subpat| + for subpats.each |subpat| { let subcmt = self.bccx.cat_variant(subpat, enum_did, cmt); self.gather_pat(subcmt, subpat, arm_id, alt_id); } @@ -375,14 +375,14 @@ impl methods for gather_loan_ctxt { self.guarantee_valid(cmt1, m_const, arm_scope); - for o_pat.each { |p| + for o_pat.each |p| { self.gather_pat(cmt, p, arm_id, alt_id); } } ast::pat_rec(field_pats, _) { // {f1: p1, ..., fN: pN} - for field_pats.each { |fp| + for field_pats.each |fp| { let cmt_field = self.bccx.cat_field(fp.pat, cmt, fp.ident); self.gather_pat(cmt_field, fp.pat, arm_id, alt_id); } @@ -390,7 +390,7 @@ impl methods for gather_loan_ctxt { ast::pat_tup(subpats) { // (p1, ..., pN) - for subpats.each { |subpat| + for subpats.each |subpat| { let subcmt = self.bccx.cat_tuple_elt(subpat, cmt); self.gather_pat(subcmt, subpat, arm_id, alt_id); } diff --git a/src/rustc/middle/capture.rs b/src/rustc/middle/capture.rs index 0ddaf7da659..bdb4adacf7e 100644 --- a/src/rustc/middle/capture.rs +++ b/src/rustc/middle/capture.rs @@ -38,9 +38,9 @@ fn check_capture_clause(tcx: ty::ctxt, let freevars = freevars::get_freevars(tcx, fn_expr_id); let seen_defs = map::int_hash(); - for (*cap_clause).each { |cap_item| + for (*cap_clause).each |cap_item| { let cap_def = tcx.def_map.get(cap_item.id); - if !vec::any(*freevars, {|fv| fv.def == cap_def}) { + if !vec::any(*freevars, |fv| fv.def == cap_def ) { tcx.sess.span_warn( cap_item.span, #fmt("captured variable '%s' not used in closure", @@ -60,13 +60,13 @@ fn check_capture_clause(tcx: ty::ctxt, fn compute_capture_vars(tcx: ty::ctxt, fn_expr_id: ast::node_id, fn_proto: ast::proto, - cap_clause: ast::capture_clause) -> [capture_var]/~ { + cap_clause: ast::capture_clause) -> ~[capture_var] { let freevars = freevars::get_freevars(tcx, fn_expr_id); let cap_map = map::int_hash(); // first add entries for anything explicitly named in the cap clause - for (*cap_clause).each { |cap_item| + for (*cap_clause).each |cap_item| { #debug("Doing capture var: %s (%?)", *cap_item.name, cap_item.id); @@ -75,7 +75,7 @@ fn compute_capture_vars(tcx: ty::ctxt, if cap_item.is_move { // if we are moving the value in, but it's not actually used, // must drop it. - if vec::any(*freevars, {|fv| fv.def == cap_def}) { + if vec::any(*freevars, |fv| fv.def == cap_def ) { cap_map.insert(cap_def_id, {def:cap_def, span: cap_item.span, cap_item: some(cap_item), @@ -89,7 +89,7 @@ fn compute_capture_vars(tcx: ty::ctxt, } else { // if we are copying the value in, but it's not actually used, // just ignore it. - if vec::any(*freevars, {|fv| fv.def == cap_def}) { + if vec::any(*freevars, |fv| fv.def == cap_def ) { cap_map.insert(cap_def_id, {def:cap_def, span: cap_item.span, cap_item: some(cap_item), @@ -106,7 +106,7 @@ fn compute_capture_vars(tcx: ty::ctxt, ast::proto_bare | ast::proto_box | ast::proto_uniq { cap_copy } }; - vec::iter(*freevars) { |fvar| + do vec::iter(*freevars) |fvar| { let fvar_def_id = ast_util::def_id_of_def(fvar.def).node; alt cap_map.find(fvar_def_id) { option::some(_) { /* was explicitly named, do nothing */ } @@ -119,7 +119,7 @@ fn compute_capture_vars(tcx: ty::ctxt, } } - let mut result = []/~; - for cap_map.each_value { |cap_var| vec::push(result, cap_var); } + let mut result = ~[]; + for cap_map.each_value |cap_var| { vec::push(result, cap_var); } ret result; } diff --git a/src/rustc/middle/check_alt.rs b/src/rustc/middle/check_alt.rs index 76084576ee8..1d72e3e95d5 100644 --- a/src/rustc/middle/check_alt.rs +++ b/src/rustc/middle/check_alt.rs @@ -13,8 +13,8 @@ import std::map::hashmap; fn check_crate(tcx: ty::ctxt, crate: @crate) { visit::visit_crate(*crate, (), visit::mk_vt(@{ - visit_expr: {|a,b,c|check_expr(tcx, a, b, c)}, - visit_local: {|a,b,c|check_local(tcx, a, b, c)} + visit_expr: |a,b,c| check_expr(tcx, a, b, c), + visit_local: |a,b,c| check_local(tcx, a, b, c) with *visit::default_visitor::<()>() })); tcx.sess.abort_if_errors(); @@ -36,11 +36,11 @@ fn check_expr(tcx: ty::ctxt, ex: @expr, &&s: (), v: visit::vt<()>) { } // Check for unreachable patterns -fn check_arms(tcx: ty::ctxt, arms: [arm]/~) { - let mut seen = []/~; - for arms.each {|arm| - for arm.pats.each {|pat| - let v = [pat]/~; +fn check_arms(tcx: ty::ctxt, arms: ~[arm]) { + let mut seen = ~[]; + for arms.each |arm| { + for arm.pats.each |pat| { + let v = ~[pat]; alt is_useful(tcx, seen, v) { not_useful { tcx.sess.span_err(pat.span, "unreachable pattern"); @@ -59,8 +59,8 @@ fn raw_pat(p: @pat) -> @pat { } } -fn check_exhaustive(tcx: ty::ctxt, sp: span, pats: [@pat]/~) { - let ext = alt is_useful(tcx, vec::map(pats, {|p| [p]/~}), [wild()]/~) { +fn check_exhaustive(tcx: ty::ctxt, sp: span, pats: ~[@pat]) { + let ext = alt is_useful(tcx, vec::map(pats, |p| ~[p]), ~[wild()]) { not_useful { ret; } // This is good, wildcard pattern isn't reachable useful_ { none } useful(ty, ctor) { @@ -74,7 +74,7 @@ fn check_exhaustive(tcx: ty::ctxt, sp: span, pats: [@pat]/~) { ty::ty_enum(id, _) { let vid = alt check ctor { variant(id) { id } }; alt check vec::find(*ty::enum_variants(tcx, id), - {|v| v.id == vid}) { + |v| v.id == vid) { some(v) { some(v.name) } } } @@ -89,7 +89,7 @@ fn check_exhaustive(tcx: ty::ctxt, sp: span, pats: [@pat]/~) { tcx.sess.span_err(sp, msg); } -type matrix = [[@pat]/~]/~; +type matrix = ~[~[@pat]]; enum useful { useful(ty::t, ctor), useful_, not_useful } @@ -111,10 +111,10 @@ enum ctor { // checking (if a wildcard pattern is useful in relation to a matrix, the // matrix isn't exhaustive). -fn is_useful(tcx: ty::ctxt, m: matrix, v: [@pat]/~) -> useful { +fn is_useful(tcx: ty::ctxt, m: matrix, v: ~[@pat]) -> useful { if m.len() == 0u { ret useful_; } if m[0].len() == 0u { ret not_useful; } - let real_pat = alt vec::find(m, {|r| r[0].id != 0}) { + let real_pat = alt vec::find(m, |r| r[0].id != 0) { some(r) { r[0] } none { v[0] } }; let left_ty = if real_pat.id == 0 { ty::mk_nil(tcx) } @@ -136,7 +136,7 @@ fn is_useful(tcx: ty::ctxt, m: matrix, v: [@pat]/~) -> useful { } } ty::ty_enum(eid, _) { - for (*ty::enum_variants(tcx, eid)).each {|va| + for (*ty::enum_variants(tcx, eid)).each |va| { alt is_useful_specialized(tcx, m, v, variant(va.id), va.args.len(), left_ty) { not_useful {} @@ -152,7 +152,7 @@ fn is_useful(tcx: ty::ctxt, m: matrix, v: [@pat]/~) -> useful { } } some(ctor) { - alt is_useful(tcx, vec::filter_map(m, {|r| default(tcx, r)}), + alt is_useful(tcx, vec::filter_map(m, |r| default(tcx, r) ), vec::tail(v)) { useful_ { useful(left_ty, ctor) } u { u } @@ -167,9 +167,9 @@ fn is_useful(tcx: ty::ctxt, m: matrix, v: [@pat]/~) -> useful { } } -fn is_useful_specialized(tcx: ty::ctxt, m: matrix, v: [@pat]/~, ctor: ctor, +fn is_useful_specialized(tcx: ty::ctxt, m: matrix, v: ~[@pat], ctor: ctor, arity: uint, lty: ty::t) -> useful { - let ms = vec::filter_map(m, {|r| specialize(tcx, r, ctor, arity, lty)}); + let ms = vec::filter_map(m, |r| specialize(tcx, r, ctor, arity, lty) ); alt is_useful(tcx, ms, option::get(specialize(tcx, v, ctor, arity, lty))){ useful_ { useful(lty, ctor) } u { u } @@ -211,21 +211,21 @@ fn is_wild(tcx: ty::ctxt, p: @pat) -> bool { fn missing_ctor(tcx: ty::ctxt, m: matrix, left_ty: ty::t) -> option<ctor> { alt ty::get(left_ty).struct { ty::ty_box(_) | ty::ty_uniq(_) | ty::ty_tup(_) | ty::ty_rec(_) { - for m.each {|r| + for m.each |r| { if !is_wild(tcx, r[0]) { ret none; } } ret some(single); } ty::ty_enum(eid, _) { - let mut found = []/~; - for m.each {|r| - option::iter(pat_ctor_id(tcx, r[0])) {|id| + let mut found = ~[]; + for m.each |r| { + do option::iter(pat_ctor_id(tcx, r[0])) |id| { if !vec::contains(found, id) { vec::push(found, id); } } } let variants = ty::enum_variants(tcx, eid); if found.len() != (*variants).len() { - for vec::each(*variants) {|v| + for vec::each(*variants) |v| { if !found.contains(variant(v.id)) { ret some(variant(v.id)); } @@ -236,7 +236,7 @@ fn missing_ctor(tcx: ty::ctxt, m: matrix, left_ty: ty::t) -> option<ctor> { ty::ty_nil { none } ty::ty_bool { let mut true_found = false, false_found = false; - for m.each {|r| + for m.each |r| { alt check pat_ctor_id(tcx, r[0]) { none {} some(val(const_int(1i64))) { true_found = true; } @@ -258,7 +258,7 @@ fn ctor_arity(tcx: ty::ctxt, ctor: ctor, ty: ty::t) -> uint { ty::ty_box(_) | ty::ty_uniq(_) { 1u } ty::ty_enum(eid, _) { let id = alt check ctor { variant(id) { id } }; - alt check vec::find(*ty::enum_variants(tcx, eid), {|v| v.id == id}) { + alt check vec::find(*ty::enum_variants(tcx, eid), |v| v.id == id ) { some(v) { v.args.len() } } } @@ -270,8 +270,8 @@ fn wild() -> @pat { @{id: 0, node: pat_wild, span: syntax::ast_util::dummy_sp()} } -fn specialize(tcx: ty::ctxt, r: [@pat]/~, ctor_id: ctor, arity: uint, - left_ty: ty::t) -> option<[@pat]/~> { +fn specialize(tcx: ty::ctxt, r: ~[@pat], ctor_id: ctor, arity: uint, + left_ty: ty::t) -> option<~[@pat]> { let r0 = raw_pat(r[0]); alt r0.node { pat_wild { some(vec::append(vec::from_elem(arity, wild()), @@ -301,15 +301,15 @@ fn specialize(tcx: ty::ctxt, r: [@pat]/~, ctor_id: ctor, arity: uint, let ty_flds = alt check ty::get(left_ty).struct { ty::ty_rec(flds) { flds } }; - let args = vec::map(ty_flds, {|ty_f| - alt vec::find(flds, {|f| f.ident == ty_f.ident}) { + let args = vec::map(ty_flds, |ty_f| { + alt vec::find(flds, |f| f.ident == ty_f.ident ) { some(f) { f.pat } _ { wild() } } }); some(vec::append(args, vec::tail(r))) } pat_tup(args) { some(vec::append(args, vec::tail(r))) } - pat_box(a) | pat_uniq(a) { some(vec::append([a]/~, vec::tail(r))) } + pat_box(a) | pat_uniq(a) { some(vec::append(~[a], vec::tail(r))) } pat_lit(expr) { let e_v = eval_const_expr(tcx, expr); let match = alt check ctor_id { @@ -335,7 +335,7 @@ fn specialize(tcx: ty::ctxt, r: [@pat]/~, ctor_id: ctor, arity: uint, } } -fn default(tcx: ty::ctxt, r: [@pat]/~) -> option<[@pat]/~> { +fn default(tcx: ty::ctxt, r: ~[@pat]) -> option<~[@pat]> { if is_wild(tcx, r[0]) { some(vec::tail(r)) } else { none } } @@ -363,17 +363,17 @@ fn is_refutable(tcx: ty::ctxt, pat: @pat) -> bool { pat_wild | pat_ident(_, none) { false } pat_lit(_) | pat_range(_, _) { true } pat_rec(fields, _) { - for fields.each {|it| + for fields.each |it| { if is_refutable(tcx, it.pat) { ret true; } } false } pat_tup(elts) { - for elts.each {|elt| if is_refutable(tcx, elt) { ret true; } } + for elts.each |elt| { if is_refutable(tcx, elt) { ret true; } } false } pat_enum(_, some(args)) { - for args.each {|p| if is_refutable(tcx, p) { ret true; } }; + for args.each |p| { if is_refutable(tcx, p) { ret true; } }; false } pat_enum(_,_) { false } diff --git a/src/rustc/middle/check_const.rs b/src/rustc/middle/check_const.rs index 9abeda8dded..16c714ac057 100644 --- a/src/rustc/middle/check_const.rs +++ b/src/rustc/middle/check_const.rs @@ -8,11 +8,10 @@ fn check_crate(sess: session, crate: @crate, ast_map: ast_map::map, def_map: resolve::def_map, method_map: typeck::method_map, tcx: ty::ctxt) { visit::visit_crate(*crate, false, visit::mk_vt(@{ - visit_item: {|a,b,c|check_item(sess, ast_map, def_map, a, b, c)}, + visit_item: |a,b,c| check_item(sess, ast_map, def_map, a, b, c), visit_pat: check_pat, - visit_expr: {|a,b,c| + visit_expr: |a,b,c| check_expr(sess, def_map, method_map, tcx, a, b, c) - } with *visit::default_visitor() })); sess.abort_if_errors(); @@ -26,8 +25,8 @@ fn check_item(sess: session, ast_map: ast_map::map, def_map: resolve::def_map, check_item_recursion(sess, ast_map, def_map, it); } item_enum(vs, _, _) { - for vs.each {|var| - option::iter(var.node.disr_expr) {|ex| + for vs.each |var| { + do option::iter(var.node.disr_expr) |ex| { v.visit_expr(ex, true, v); } } diff --git a/src/rustc/middle/check_loop.rs b/src/rustc/middle/check_loop.rs index 098ec82ac0a..44fbdaef7ce 100644 --- a/src/rustc/middle/check_loop.rs +++ b/src/rustc/middle/check_loop.rs @@ -6,10 +6,10 @@ type ctx = {in_loop: bool, can_ret: bool}; fn check_crate(tcx: ty::ctxt, crate: @crate) { visit::visit_crate(*crate, {in_loop: false,can_ret: true}, visit::mk_vt(@{ - visit_item: {|i, _cx, v| + visit_item: |i, _cx, v| { visit::visit_item(i, {in_loop: false, can_ret: true}, v); }, - visit_expr: {|e: @expr, cx: ctx, v: visit::vt<ctx>| + visit_expr: |e: @expr, cx: ctx, v: visit::vt<ctx>| { alt e.node { expr_while(e, b) { v.visit_expr(e, cx, v); diff --git a/src/rustc/middle/freevars.rs b/src/rustc/middle/freevars.rs index 9ebf4d13970..ae581290ab4 100644 --- a/src/rustc/middle/freevars.rs +++ b/src/rustc/middle/freevars.rs @@ -23,7 +23,7 @@ type freevar_entry = { def: ast::def, //< The variable being accessed free. span: span //< First span where it is accessed (there can be multiple) }; -type freevar_info = @[@freevar_entry]/~; +type freevar_info = @~[@freevar_entry]; type freevar_map = hashmap<ast::node_id, freevar_info>; // Searches through part of the AST for all references to locals or @@ -34,7 +34,7 @@ type freevar_map = hashmap<ast::node_id, freevar_info>; fn collect_freevars(def_map: resolve::def_map, blk: ast::blk) -> freevar_info { let seen = int_hash(); - let refs = @mut []/~; + let refs = @mut ~[]; fn ignore_item(_i: @ast::item, &&_depth: int, _v: visit::vt<int>) { } diff --git a/src/rustc/middle/kind.rs b/src/rustc/middle/kind.rs index d3bd8e75316..1f73a8fc999 100644 --- a/src/rustc/middle/kind.rs +++ b/src/rustc/middle/kind.rs @@ -40,7 +40,7 @@ import lint::{non_implicitly_copyable_typarams,implicit_copies}; // types. fn kind_to_str(k: kind) -> str { - let mut kinds = []/~; + let mut kinds = ~[]; if ty::kind_lteq(kind_const(), k) { vec::push(kinds, "const"); } @@ -101,7 +101,7 @@ fn with_appropriate_checker(cx: ctx, id: node_id, b: fn(check_fn)) { if !is_move { check_copy(cx, id, var_t, sp, is_implicit); } // check that only immutable variables are implicitly copied in - for fv.each { |fv| + for fv.each |fv| { check_imm_free_var(cx, fv.def, fv.span); } } @@ -113,7 +113,7 @@ fn with_appropriate_checker(cx: ctx, id: node_id, b: fn(check_fn)) { if !is_move { check_copy(cx, id, var_t, sp, is_implicit); } // check that only immutable variables are implicitly copied in - for fv.each { |fv| + for fv.each |fv| { check_imm_free_var(cx, fv.def, fv.span); } } @@ -150,7 +150,7 @@ fn check_fn(fk: visit::fn_kind, decl: fn_decl, body: blk, sp: span, // Find the check function that enforces the appropriate bounds for this // kind of function: - with_appropriate_checker(cx, fn_id) { |chk| + do with_appropriate_checker(cx, fn_id) |chk| { // Begin by checking the variables in the capture clause, if any. // Here we slightly abuse the map function to both check and report @@ -160,9 +160,9 @@ fn check_fn(fk: visit::fn_kind, decl: fn_decl, body: blk, sp: span, let cap_clause = alt fk { visit::fk_anon(_, cc) | visit::fk_fn_block(cc) { cc } visit::fk_item_fn(*) | visit::fk_method(*) | - visit::fk_ctor(*) | visit::fk_dtor(*) { @[]/~ } + visit::fk_ctor(*) | visit::fk_dtor(*) { @~[] } }; - let captured_vars = (*cap_clause).map { |cap_item| + let captured_vars = do (*cap_clause).map |cap_item| { let cap_def = cx.tcx.def_map.get(cap_item.id); let cap_def_id = ast_util::def_id_of_def(cap_def).node; let ty = ty::node_id_to_type(cx.tcx, cap_def_id); @@ -172,7 +172,7 @@ fn check_fn(fk: visit::fn_kind, decl: fn_decl, body: blk, sp: span, // Iterate over any free variables that may not have appeared in the // capture list. Ensure that they too are of the appropriate kind. - for vec::each(*freevars::get_freevars(cx.tcx, fn_id)) {|fv| + for vec::each(*freevars::get_freevars(cx.tcx, fn_id)) |fv| { let id = ast_util::def_id_of_def(fv.def).node; // skip over free variables that appear in the cap clause @@ -217,7 +217,7 @@ fn check_expr(e: @expr, cx: ctx, v: visit::vt<ctx>) { check_copy_ex(cx, rs, false); } expr_rec(fields, def) { - for fields.each {|field| maybe_copy(cx, field.node.expr); } + for fields.each |field| { maybe_copy(cx, field.node.expr); } alt def { some(ex) { // All noncopyable fields must be overridden @@ -226,8 +226,8 @@ fn check_expr(e: @expr, cx: ctx, v: visit::vt<ctx>) { ty::ty_rec(f) { f } _ { cx.tcx.sess.span_bug(ex.span, "bad expr type in record"); } }; - for ty_fields.each {|tf| - if !vec::any(fields, {|f| f.node.ident == tf.ident}) && + for ty_fields.each |tf| { + if !vec::any(fields, |f| f.node.ident == tf.ident ) && !ty::kind_can_be_copied(ty::type_kind(cx.tcx, tf.mt.ty)) { cx.tcx.sess.span_err(ex.span, "copying a noncopyable value"); @@ -238,11 +238,11 @@ fn check_expr(e: @expr, cx: ctx, v: visit::vt<ctx>) { } } expr_tup(exprs) | expr_vec(exprs, _) { - for exprs.each {|expr| maybe_copy(cx, expr); } + for exprs.each |expr| { maybe_copy(cx, expr); } } expr_call(f, args, _) { let mut i = 0u; - for ty::ty_fn_args(ty::expr_ty(cx.tcx, f)).each {|arg_t| + for ty::ty_fn_args(ty::expr_ty(cx.tcx, f)).each |arg_t| { alt ty::arg_mode(cx.tcx, arg_t) { by_copy { maybe_copy(cx, args[i]); } by_ref | by_val | by_mutbl_ref | by_move { } @@ -251,7 +251,7 @@ fn check_expr(e: @expr, cx: ctx, v: visit::vt<ctx>) { } } expr_path(_) | expr_field(_, _, _) { - option::iter(cx.tcx.node_type_substs.find(e.id)) {|ts| + do option::iter(cx.tcx.node_type_substs.find(e.id)) |ts| { let bounds = alt check e.node { expr_path(_) { let did = ast_util::def_id_of_def(cx.tcx.def_map.get(e.id)); @@ -286,7 +286,7 @@ fn check_expr(e: @expr, cx: ctx, v: visit::vt<ctx>) { %s (%u tys), declared = %? (%u tys)", tys_to_str(cx.tcx, ts), ts.len(), *bounds, (*bounds).len()); } - vec::iter2(ts, *bounds) {|ty, bound| + do vec::iter2(ts, *bounds) |ty, bound| { check_bounds(cx, e.id, e.span, ty, bound) } } @@ -299,7 +299,7 @@ fn check_expr(e: @expr, cx: ctx, v: visit::vt<ctx>) { fn check_stmt(stmt: @stmt, cx: ctx, v: visit::vt<ctx>) { alt stmt.node { stmt_decl(@{node: decl_local(locals), _}, _) { - for locals.each {|local| + for locals.each |local| { alt local.node.init { some({op: init_assign, expr}) { maybe_copy(cx, expr); } _ {} @@ -314,10 +314,10 @@ fn check_stmt(stmt: @stmt, cx: ctx, v: visit::vt<ctx>) { fn check_ty(aty: @ty, cx: ctx, v: visit::vt<ctx>) { alt aty.node { ty_path(_, id) { - option::iter(cx.tcx.node_type_substs.find(id)) {|ts| + do option::iter(cx.tcx.node_type_substs.find(id)) |ts| { let did = ast_util::def_id_of_def(cx.tcx.def_map.get(id)); let bounds = ty::lookup_item_type(cx.tcx, did).bounds; - vec::iter2(ts, *bounds) {|ty, bound| + do vec::iter2(ts, *bounds) |ty, bound| { check_bounds(cx, aty.id, aty.span, ty, bound) } } diff --git a/src/rustc/middle/lint.rs b/src/rustc/middle/lint.rs index 54467207c51..ec35f7046aa 100644 --- a/src/rustc/middle/lint.rs +++ b/src/rustc/middle/lint.rs @@ -82,7 +82,7 @@ type lint_dict = hashmap<str,lint_spec>; '-' to '_' in command-line flags */ fn get_lint_dict() -> lint_dict { - let v = [ + let v = ~[ ("ctypes", @{lint: ctypes, desc: "proper use of core::libc types in native modules", @@ -134,7 +134,7 @@ fn get_lint_dict() -> lint_dict { desc: "implicit copies of non implicitly copyable data", default: warn}) - ]/~; + ]; hash_from_strs(v) } @@ -206,15 +206,15 @@ impl methods for ctxt { current lint context, call the provided function, then reset the warnings in effect to their previous state. "] - fn with_warn_attrs(attrs: [ast::attribute]/~, f: fn(ctxt)) { + fn with_warn_attrs(attrs: ~[ast::attribute], f: fn(ctxt)) { let mut new_ctxt = self; let metas = attr::attr_metas(attr::find_attrs_by_name(attrs, "warn")); - for metas.each {|meta| + for metas.each |meta| { alt meta.node { ast::meta_list(_, metas) { - for metas.each {|meta| + for metas.each |meta| { alt meta.node { ast::meta_word(lintname) { alt lookup_lint(self.dict, *lintname) { @@ -273,7 +273,7 @@ fn lookup_lint(dict: lint_dict, s: str) } fn build_settings_item(i: @ast::item, &&cx: ctxt, v: visit::vt<ctxt>) { - cx.with_warn_attrs(i.attrs) {|cx| + do cx.with_warn_attrs(i.attrs) |cx| { if !cx.is_default { cx.sess.warning_settings.settings_map.insert(i.id, cx.curr); } @@ -289,17 +289,17 @@ fn build_settings_crate(sess: session::session, crate: @ast::crate) { sess: sess}; // Install defaults. - for cx.dict.each {|_k, spec| cx.set_level(spec.lint, spec.default); } + for cx.dict.each |_k, spec| { cx.set_level(spec.lint, spec.default); } // Install command-line options, overriding defaults. - for sess.opts.lint_opts.each {|pair| + for sess.opts.lint_opts.each |pair| { let (lint,level) = pair; cx.set_level(lint, level); } - cx.with_warn_attrs(crate.node.attrs) {|cx| + do cx.with_warn_attrs(crate.node.attrs) |cx| { // Copy out the default settings - for cx.curr.each {|k, v| + for cx.curr.each |k, v| { sess.warning_settings.default_settings.insert(k, v); } @@ -327,7 +327,7 @@ fn check_item(i: @ast::item, cx: ty::ctxt) { // not traverse into subitems, since that is handled by the outer // lint visitor. fn item_stopping_visitor<E>(v: visit::vt<E>) -> visit::vt<E> { - visit::mk_vt(@{visit_item: {|_i, _e, _v| } with **v}) + visit::mk_vt(@{visit_item: |_i, _e, _v| { } with **v}) } fn check_item_while_true(cx: ty::ctxt, it: @ast::item) { @@ -357,8 +357,8 @@ fn check_item_ctypes(cx: ty::ctxt, it: @ast::item) { fn check_foreign_fn(cx: ty::ctxt, fn_id: ast::node_id, decl: ast::fn_decl) { - let tys = vec::map(decl.inputs) {|a| a.ty }; - for vec::each(vec::append_one(tys, decl.output)) {|ty| + let tys = vec::map(decl.inputs, |a| a.ty ); + for vec::each(vec::append_one(tys, decl.output)) |ty| { alt ty.node { ast::ty_path(_, id) { alt cx.def_map.get(id) { @@ -387,7 +387,7 @@ fn check_item_ctypes(cx: ty::ctxt, it: @ast::item) { alt it.node { ast::item_foreign_mod(nmod) if attr::foreign_abi(it.attrs) != either::right(ast::foreign_abi_rust_intrinsic) { - for nmod.items.each {|ni| + for nmod.items.each |ni| { alt ni.node { ast::foreign_item_fn(decl, tps) { check_foreign_fn(cx, it.id, decl); @@ -456,7 +456,7 @@ fn check_item_old_vecs(cx: ty::ctxt, it: @ast::item) { } ast::ty_path(@{span: _, global: _, idents: ids, rp: none, types: _}, _) - if ids == [@"str"]/~ && (! uses_vstore.contains_key(t.id)) { + if ids == ~[@"str"] && (! uses_vstore.contains_key(t.id)) { cx.sess.span_lint( old_strs, t.id, it.id, t.span, "deprecated str type"); diff --git a/src/rustc/middle/liveness.rs b/src/rustc/middle/liveness.rs index 436443d1538..fe05e817a20 100644 --- a/src/rustc/middle/liveness.rs +++ b/src/rustc/middle/liveness.rs @@ -220,9 +220,9 @@ class ir_maps { let live_node_map: hashmap<node_id, live_node>; let variable_map: hashmap<node_id, variable>; let field_map: hashmap<ident, variable>; - let capture_map: hashmap<node_id, @[capture_info]/~>; - let mut var_kinds: [var_kind]/~; - let mut lnks: [live_node_kind]/~; + let capture_map: hashmap<node_id, @~[capture_info]>; + let mut var_kinds: ~[var_kind]; + let mut lnks: ~[live_node_kind]; new(tcx: ty::ctxt, method_map: typeck::method_map, last_use_map: last_use_map) { @@ -236,8 +236,8 @@ class ir_maps { self.variable_map = int_hash(); self.capture_map = int_hash(); self.field_map = box_str_hash(); - self.var_kinds = []/~; - self.lnks = []/~; + self.var_kinds = ~[]; + self.lnks = ~[]; } fn add_live_node(lnk: live_node_kind) -> live_node { @@ -297,11 +297,11 @@ class ir_maps { } } - fn set_captures(node_id: node_id, +cs: [capture_info]/~) { + fn set_captures(node_id: node_id, +cs: ~[capture_info]) { self.capture_map.insert(node_id, @cs); } - fn captures(expr: @expr) -> @[capture_info]/~ { + fn captures(expr: @expr) -> @~[capture_info] { alt self.capture_map.find(expr.id) { some(caps) {caps} none { @@ -351,7 +351,7 @@ fn visit_fn(fk: visit::fn_kind, decl: fn_decl, body: blk, #debug["creating fn_maps: %x", ptr::addr_of(*fn_maps) as uint]; - for decl.inputs.each { |arg| + for decl.inputs.each |arg| { #debug["adding argument %d", arg.id]; let mode = ty::resolved_mode(self.tcx, arg.mode); (*fn_maps).add_variable(vk_arg(arg.id, arg.ident, mode)); @@ -397,7 +397,7 @@ fn visit_fn(fk: visit::fn_kind, decl: fn_decl, body: blk, } fn add_class_fields(self: @ir_maps, did: def_id) { - for ty::lookup_class_fields(self.tcx, did).each { |field_ty| + for ty::lookup_class_fields(self.tcx, did).each |field_ty| { assert field_ty.id.crate == local_crate; let var = (*self).add_variable(vk_field(field_ty.ident)); self.field_map.insert(field_ty.ident, var); @@ -406,7 +406,7 @@ fn add_class_fields(self: @ir_maps, did: def_id) { fn visit_local(local: @local, &&self: @ir_maps, vt: vt<@ir_maps>) { let def_map = self.tcx.def_map; - pat_util::pat_bindings(def_map, local.node.pat) { |p_id, sp, path| + do pat_util::pat_bindings(def_map, local.node.pat) |p_id, sp, path| { #debug["adding local variable %d", p_id]; let name = ast_util::path_to_ident(path); (*self).add_live_node_for_node(p_id, lnk_vdef(sp)); @@ -435,8 +435,8 @@ fn visit_expr(expr: @expr, &&self: @ir_maps, vt: vt<@ir_maps>) { let proto = ty::ty_fn_proto(ty::expr_ty(self.tcx, expr)); let cvs = capture::compute_capture_vars(self.tcx, expr.id, proto, cap_clause); - let mut call_caps = []/~; - for cvs.each { |cv| + let mut call_caps = ~[]; + for cvs.each |cv| { alt relevant_def(cv.def) { some(rv) { let cv_ln = (*self).add_live_node(lnk_freevar(cv.span)); @@ -511,8 +511,8 @@ class liveness { let tcx: ty::ctxt; let ir: @ir_maps; let s: specials; - let successors: [mut live_node]/~; - let users: [mut users]/~; + let successors: ~[mut live_node]; + let users: ~[mut users]; let mut break_ln: live_node; let mut cont_ln: live_node; @@ -560,9 +560,9 @@ class liveness { alt expr.node { expr_path(_) { let def = self.tcx.def_map.get(expr.id); - relevant_def(def).map { |rdef| - self.variable_from_rdef(rdef, expr.span) - } + relevant_def(def).map( + |rdef| self.variable_from_rdef(rdef, expr.span) + ) } _ {none} } @@ -576,9 +576,9 @@ class liveness { span: span) -> option<variable> { alt self.tcx.def_map.find(node_id) { some(def) { - relevant_def(def).map { |rdef| - self.variable_from_rdef(rdef, span) - } + relevant_def(def).map( + |rdef| self.variable_from_rdef(rdef, span) + ) } none { self.tcx.sess.span_bug( @@ -589,7 +589,7 @@ class liveness { fn pat_bindings(pat: @pat, f: fn(live_node, variable, span)) { let def_map = self.tcx.def_map; - pat_util::pat_bindings(def_map, pat) {|p_id, sp, _n| + do pat_util::pat_bindings(def_map, pat) |p_id, sp, _n| { let ln = self.live_node(p_id, sp); let var = self.variable(p_id, sp); f(ln, var, sp); @@ -635,7 +635,7 @@ class liveness { fn indices(ln: live_node, op: fn(uint)) { let node_base_idx = self.idx(ln, variable(0u)); - for uint::range(0u, self.ir.num_vars) { |var_idx| + for uint::range(0u, self.ir.num_vars) |var_idx| { op(node_base_idx + var_idx) } } @@ -644,7 +644,7 @@ class liveness { op: fn(uint, uint)) { let node_base_idx = self.idx(ln, variable(0u)); let succ_base_idx = self.idx(succ_ln, variable(0u)); - for uint::range(0u, self.ir.num_vars) { |var_idx| + for uint::range(0u, self.ir.num_vars) |var_idx| { op(node_base_idx + var_idx, succ_base_idx + var_idx); } } @@ -653,7 +653,7 @@ class liveness { ln: live_node, test: fn(uint) -> live_node) { let node_base_idx = self.idx(ln, variable(0u)); - for uint::range(0u, self.ir.num_vars) { |var_idx| + for uint::range(0u, self.ir.num_vars) |var_idx| { let idx = node_base_idx + var_idx; if test(idx).is_valid() { wr.write_str(" "); @@ -663,15 +663,15 @@ class liveness { } fn ln_str(ln: live_node) -> str { - io::with_str_writer { |wr| + do io::with_str_writer |wr| { wr.write_str("[ln("); wr.write_uint(*ln); wr.write_str(") of kind "); wr.write_str(#fmt["%?", copy self.ir.lnks[*ln]]); wr.write_str(" reads"); - self.write_vars(wr, ln, {|idx| self.users[idx].reader}); + self.write_vars(wr, ln, |idx| self.users[idx].reader ); wr.write_str(" writes"); - self.write_vars(wr, ln, {|idx| self.users[idx].writer}); + self.write_vars(wr, ln, |idx| self.users[idx].writer ); wr.write_str(" "); wr.write_str(" precedes "); wr.write_str((copy self.successors[*ln]).to_str()); @@ -695,9 +695,9 @@ class liveness { fn init_from_succ(ln: live_node, succ_ln: live_node) { // more efficient version of init_empty() / merge_from_succ() self.successors[*ln] = succ_ln; - self.indices2(ln, succ_ln) { |idx, succ_idx| - self.users[idx] = self.users[succ_idx]; - } + self.indices2(ln, succ_ln, |idx, succ_idx| { + self.users[idx] = self.users[succ_idx] + }); #debug["init_from_succ(ln=%s, succ=%s)", self.ln_str(ln), self.ln_str(succ_ln)]; } @@ -707,7 +707,7 @@ class liveness { if ln == succ_ln { ret false; } let mut changed = false; - self.indices2(ln, succ_ln) { |idx, succ_idx| + do self.indices2(ln, succ_ln) |idx, succ_idx| { changed |= copy_if_invalid(copy self.users[succ_idx].reader, self.users[idx].reader); changed |= copy_if_invalid(copy self.users[succ_idx].writer, @@ -776,14 +776,14 @@ class liveness { // effectively a return---this only occurs in `for` loops, // where the body is really a closure. let entry_ln: live_node = - self.with_loop_nodes(self.s.exit_ln, self.s.exit_ln) {|| + self.with_loop_nodes(self.s.exit_ln, self.s.exit_ln, || { self.propagate_through_fn_block(decl, body) - }; + }); // hack to skip the loop unless #debug is enabled: #debug["^^ liveness computation results for body %d (entry=%s)", { - for uint::range(0u, self.ir.num_live_nodes) { |ln_idx| + for uint::range(0u, self.ir.num_live_nodes) |ln_idx| { #debug["%s", self.ln_str(live_node(ln_idx))]; } body.node.id @@ -795,7 +795,7 @@ class liveness { fn propagate_through_fn_block(decl: fn_decl, blk: blk) -> live_node { // inputs passed by & mode should be considered live on exit: - for decl.inputs.each { |arg| + for decl.inputs.each |arg| { alt ty::resolved_mode(self.tcx, arg.mode) { by_mutbl_ref | by_ref | by_val { // These are "non-owned" modes, so register a read at @@ -816,7 +816,7 @@ class liveness { self.acc(self.s.exit_ln, self.s.self_var, ACC_READ); // in a ctor, there is an implicit use of self.f for all fields f: - for self.ir.field_map.each_value { |var| + for self.ir.field_map.each_value |var| { self.acc(self.s.exit_ln, var, ACC_READ|ACC_USE); } @@ -832,7 +832,7 @@ class liveness { fn propagate_through_block(blk: blk, succ: live_node) -> live_node { let succ = self.propagate_through_opt_expr(blk.node.expr, succ); - blk.node.stmts.foldr(succ) { |stmt, succ| + do blk.node.stmts.foldr(succ) |stmt, succ| { self.propagate_through_stmt(stmt, succ) } } @@ -852,7 +852,7 @@ class liveness { fn propagate_through_decl(decl: @decl, succ: live_node) -> live_node { alt decl.node { decl_local(locals) { - locals.foldr(succ) { |local, succ| + do locals.foldr(succ) |local, succ| { self.propagate_through_local(local, succ) } } @@ -877,9 +877,9 @@ class liveness { // initialization, which is mildly more complex than checking // once at the func header but otherwise equivalent. - let opt_init = local.node.init.map { |i| i.expr }; + let opt_init = local.node.init.map(|i| i.expr ); let mut succ = self.propagate_through_opt_expr(opt_init, succ); - self.pat_bindings(local.node.pat) { |ln, var, _sp| + do self.pat_bindings(local.node.pat) |ln, var, _sp| { self.init_from_succ(ln, succ); self.define(ln, var); succ = ln; @@ -887,16 +887,16 @@ class liveness { succ } - fn propagate_through_exprs(exprs: [@expr]/~, + fn propagate_through_exprs(exprs: ~[@expr], succ: live_node) -> live_node { - exprs.foldr(succ) { |expr, succ| + do exprs.foldr(succ) |expr, succ| { self.propagate_through_expr(expr, succ) } } fn propagate_through_opt_expr(opt_expr: option<@expr>, succ: live_node) -> live_node { - opt_expr.foldl(succ) { |succ, expr| + do opt_expr.foldl(succ) |succ, expr| { self.propagate_through_expr(expr, succ) } } @@ -930,7 +930,7 @@ class liveness { // the construction of a closure itself is not important, // but we have to consider the closed over variables. let caps = (*self.ir).captures(expr); - (*caps).foldr(succ) { |cap, succ| + do (*caps).foldr(succ) |cap, succ| { self.init_from_succ(cap.ln, succ); let var = self.variable_from_rdef(cap.rv, expr.span); self.acc(cap.ln, var, ACC_READ | ACC_USE); @@ -987,7 +987,7 @@ class liveness { let ln = self.live_node(expr.id, expr.span); self.init_empty(ln, succ); let mut first_merge = true; - for arms.each { |arm| + for arms.each |arm| { let arm_succ = self.propagate_through_opt_expr( arm.guard, @@ -1063,7 +1063,7 @@ class liveness { expr_rec(fields, with_expr) { let succ = self.propagate_through_opt_expr(with_expr, succ); - fields.foldr(succ) { |field, succ| + do fields.foldr(succ) |field, succ| { self.propagate_through_expr(field.node.expr, succ) } } @@ -1096,7 +1096,7 @@ class liveness { expr_log(_, l, r) | expr_index(l, r) | expr_binary(_, l, r) { - self.propagate_through_exprs([l, r]/~, succ) + self.propagate_through_exprs(~[l, r], succ) } expr_assert(e) | @@ -1241,7 +1241,7 @@ class liveness { let ln = self.live_node(expr.id, expr.span); if acc != 0u { self.init_from_succ(ln, succ); - for self.ir.field_map.each_value { |var| + for self.ir.field_map.each_value |var| { self.acc(ln, var, acc); } } @@ -1273,10 +1273,10 @@ class liveness { alt def { def_self(_) { // Note: the field_map is empty unless we are in a ctor - ret self.ir.field_map.find(fld).map { |var| + ret self.ir.field_map.find(fld).map(|var| { let ln = self.live_node(expr.id, expr.span); (ln, var) - }; + }); } _ { ret none; } } @@ -1320,17 +1320,17 @@ class liveness { first_merge = false; } let cond_ln = self.propagate_through_opt_expr(cond, ln); - let body_ln = self.with_loop_nodes(succ, ln) {|| + let body_ln = self.with_loop_nodes(succ, ln, || { self.propagate_through_block(body, cond_ln) - }; + }); // repeat until fixed point is reached: while self.merge_from_succ(ln, body_ln, first_merge) { first_merge = false; assert cond_ln == self.propagate_through_opt_expr(cond, ln); - assert body_ln == self.with_loop_nodes(succ, ln) {|| + assert body_ln == self.with_loop_nodes(succ, ln, || { self.propagate_through_block(body, cond_ln) - }; + }); } cond_ln @@ -1373,7 +1373,7 @@ fn check_local(local: @local, &&self: @liveness, vt: vt<@liveness>) { // should not be live at this point. #debug["check_local() with no initializer"]; - (*self).pat_bindings(local.node.pat) { |ln, var, sp| + do (*self).pat_bindings(local.node.pat) |ln, var, sp| { if !self.warn_about_unused(sp, ln, var) { alt (*self).live_on_exit(ln, var) { none { /* not live: good */ } @@ -1394,7 +1394,7 @@ fn check_local(local: @local, &&self: @liveness, vt: vt<@liveness>) { fn check_expr(expr: @expr, &&self: @liveness, vt: vt<@liveness>) { alt expr.node { expr_path(_) { - for (*self).variable_from_def_map(expr.id, expr.span).each { |var| + for (*self).variable_from_def_map(expr.id, expr.span).each |var| { let ln = (*self).live_node(expr.id, expr.span); self.consider_last_use(expr, ln, var); } @@ -1404,7 +1404,7 @@ fn check_expr(expr: @expr, &&self: @liveness, vt: vt<@liveness>) { expr_fn(_, _, _, cap_clause) | expr_fn_block(_, _, cap_clause) { let caps = (*self.ir).captures(expr); - for (*caps).each { |cap| + for (*caps).each |cap| { let var = (*self).variable_from_rdef(cap.rv, expr.span); self.consider_last_use(expr, cap.ln, var); if cap.is_move { @@ -1438,7 +1438,7 @@ fn check_expr(expr: @expr, &&self: @liveness, vt: vt<@liveness>) { expr_call(f, args, _) { let targs = ty::ty_fn_args(ty::expr_ty(self.tcx, f)); vt.visit_expr(f, self, vt); - vec::iter2(args, targs) { |arg_expr, arg_ty| + do vec::iter2(args, targs) |arg_expr, arg_ty| { alt ty::resolved_mode(self.tcx, arg_ty.mode) { by_val | by_copy | by_ref | by_mutbl_ref{ vt.visit_expr(arg_expr, self, vt); @@ -1480,7 +1480,7 @@ enum read_kind { impl check_methods for @liveness { fn check_fields(sp: span, entry_ln: live_node) { - for self.ir.field_map.each { |nm, var| + for self.ir.field_map.each |nm, var| { alt (*self).live_on_entry(entry_ln, var) { none { /* ok */ } some(lnk_exit) { @@ -1621,7 +1621,7 @@ impl check_methods for @liveness { } fn check_for_reassignments_in_pat(pat: @pat) { - (*self).pat_bindings(pat) { |ln, var, sp| + do (*self).pat_bindings(pat) |ln, var, sp| { self.check_for_reassignment(ln, var, sp); } } @@ -1728,7 +1728,7 @@ impl check_methods for @liveness { } fn warn_about_unused_args(sp: span, decl: fn_decl, entry_ln: live_node) { - for decl.inputs.each { |arg| + for decl.inputs.each |arg| { let var = (*self).variable(arg.id, arg.ty.span); alt ty::resolved_mode(self.tcx, arg.mode) { by_mutbl_ref { @@ -1752,7 +1752,7 @@ impl check_methods for @liveness { } fn warn_about_unused_or_dead_vars_in_pat(pat: @pat) { - (*self).pat_bindings(pat) { |ln, var, sp| + do (*self).pat_bindings(pat) |ln, var, sp| { if !self.warn_about_unused(sp, ln, var) { self.warn_about_dead_assign(sp, ln, var); } @@ -1761,7 +1761,7 @@ impl check_methods for @liveness { fn warn_about_unused(sp: span, ln: live_node, var: variable) -> bool { if !(*self).used_on_entry(ln, var) { - for self.should_warn(var).each { |name| + for self.should_warn(var).each |name| { // annoying: for parameters in funcs like `fn(x: int) // {ret}`, there is only one node, so asking about @@ -1788,7 +1788,7 @@ impl check_methods for @liveness { fn warn_about_dead_assign(sp: span, ln: live_node, var: variable) { if (*self).live_on_exit(ln, var).is_none() { - for self.should_warn(var).each { |name| + for self.should_warn(var).each |name| { self.tcx.sess.span_warn( sp, #fmt["value assigned to `%s` is never read", *name]); diff --git a/src/rustc/middle/pat_util.rs b/src/rustc/middle/pat_util.rs index ba463ace91d..16be926d7be 100644 --- a/src/rustc/middle/pat_util.rs +++ b/src/rustc/middle/pat_util.rs @@ -15,7 +15,7 @@ type pat_id_map = std::map::hashmap<ident, node_id>; // use the node_id of their namesake in the first pattern. fn pat_id_map(dm: resolve::def_map, pat: @pat) -> pat_id_map { let map = std::map::box_str_hash(); - pat_bindings(dm, pat) {|p_id, _s, n| + do pat_bindings(dm, pat) |p_id, _s, n| { map.insert(path_to_ident(n), p_id); }; ret map; @@ -39,7 +39,7 @@ fn pat_is_variant(dm: resolve::def_map, pat: @pat) -> bool { // Could return a constrained type in order to express that (future work) fn pat_bindings(dm: resolve::def_map, pat: @pat, it: fn(node_id, span, @path)) { - walk_pat(pat) {|p| + do walk_pat(pat) |p| { alt p.node { pat_ident(pth, _) if !pat_is_variant(dm, p) { it(p.id, p.span, pth); @@ -49,8 +49,8 @@ fn pat_bindings(dm: resolve::def_map, pat: @pat, } } -fn pat_binding_ids(dm: resolve::def_map, pat: @pat) -> [node_id]/~ { - let mut found = []/~; - pat_bindings(dm, pat) {|b_id, _sp, _pt| vec::push(found, b_id); }; +fn pat_binding_ids(dm: resolve::def_map, pat: @pat) -> ~[node_id] { + let mut found = ~[]; + pat_bindings(dm, pat, |b_id, _sp, _pt| vec::push(found, b_id) ); ret found; } diff --git a/src/rustc/middle/region.rs b/src/rustc/middle/region.rs index 37959b51fd5..15faf3d2b7c 100644 --- a/src/rustc/middle/region.rs +++ b/src/rustc/middle/region.rs @@ -214,8 +214,8 @@ fn nearest_common_ancestor(region_map: region_map, scope_a: ast::node_id, scope_b: ast::node_id) -> option<ast::node_id> { fn ancestors_of(region_map: region_map, scope: ast::node_id) - -> [ast::node_id]/~ { - let mut result = [scope]/~; + -> ~[ast::node_id] { + let mut result = ~[scope]; let mut scope = scope; loop { alt region_map.find(scope) { @@ -235,7 +235,7 @@ fn nearest_common_ancestor(region_map: region_map, scope_a: ast::node_id, let mut a_index = vec::len(a_ancestors) - 1u; let mut b_index = vec::len(b_ancestors) - 1u; - // Here, [ab]/~_ancestors is a vector going from narrow to broad. + // Here, ~[ab]_ancestors is a vector going from narrow to broad. // The end of each vector will be the item where the scope is // defined; if there are any common ancestors, then the tails of // the vector will be the same. So basically we want to walk @@ -332,7 +332,7 @@ fn resolve_expr(expr: @ast::expr, cx: ctxt, visitor: visit::vt<ctxt>) { // although the capture items are not expressions per se, they // do get "evaluated" in some sense as copies or moves of the // relevant variables so we parent them like an expression - for (*cap_clause).each { |cap_item| + for (*cap_clause).each |cap_item| { record_parent(cx, cap_item.id); } visit::visit_expr(expr, cx, visitor); @@ -375,7 +375,7 @@ fn resolve_fn(fk: visit::fn_kind, decl: ast::fn_decl, body: ast::blk, fn_cx.parent: %?", body.node.id, cx.parent, fn_cx.parent]; - for decl.inputs.each { |input| + for decl.inputs.each |input| { cx.region_map.insert(input.id, body.node.id); } diff --git a/src/rustc/middle/resolve.rs b/src/rustc/middle/resolve.rs index a2ea60379b9..143901a9ead 100644 --- a/src/rustc/middle/resolve.rs +++ b/src/rustc/middle/resolve.rs @@ -34,13 +34,13 @@ enum scope { scope_toplevel, scope_crate, scope_item(@ast::item), - scope_bare_fn(ast::fn_decl, node_id, [ast::ty_param]/~), - scope_fn_expr(ast::fn_decl, node_id, [ast::ty_param]/~), + scope_bare_fn(ast::fn_decl, node_id, ~[ast::ty_param]), + scope_fn_expr(ast::fn_decl, node_id, ~[ast::ty_param]), scope_foreign_item(@ast::foreign_item), scope_loop(@ast::local), // there's only 1 decl per loop. scope_block(ast::blk, @mut uint, @mut uint), scope_arm(ast::arm), - scope_method(node_id, [ast::ty_param]/~), + scope_method(node_id, ~[ast::ty_param]), } type scopes = @list<scope>; @@ -50,13 +50,13 @@ fn top_scope() -> scopes { } enum import_state { - todo(ast::ident, @[ast::ident]/~, span, scopes), - is_glob(@[ast::ident]/~, scopes, span), + todo(ast::ident, @~[ast::ident], span, scopes), + is_glob(@~[ast::ident], scopes, span), resolving(span), resolved(option<def>, /* value */ option<def>, /* type */ option<def>, /* module */ - @[@_impl]/~, /* impls */ + @~[@_impl], /* impls */ /* used for reporting unused import warning */ ast::ident, span), } @@ -79,7 +79,7 @@ fn new_ext_hash() -> ext_hash { ret ast_util::def_eq(v1.did, v2.did) && str::eq(*v1.ident, *v2.ident) && v1.ns == v2.ns; } - std::map::hashmap(hash, {|a, b| a == b}) + std::map::hashmap(hash, |a, b| a == b) } enum mod_index_entry { @@ -88,7 +88,7 @@ enum mod_index_entry { mie_item(@ast::item), mie_foreign_item(@ast::foreign_item), mie_enum_variant(/* variant index */uint, - /*parts of enum item*/ [variant]/~, + /*parts of enum item*/ ~[variant], node_id, span), } @@ -101,7 +101,7 @@ type indexed_mod = { m: option<ast::_mod>, index: mod_index, glob_imports: dvec<glob_imp_def>, - mut globbed_exports: [ident]/~, + mut globbed_exports: ~[ident], glob_imported_names: hashmap<ident, glob_import_state>, path: str }; @@ -111,12 +111,12 @@ type indexed_mod = { control.*/ type def_map = hashmap<node_id, def>; -type ext_map = hashmap<def_id, [ident]/~>; +type ext_map = hashmap<def_id, ~[ident]>; type impl_map = hashmap<node_id, iscopes>; -type impl_cache = hashmap<def_id, option<@[@_impl]/~>>; +type impl_cache = hashmap<def_id, option<@~[@_impl]>>; type exp = {reexp: bool, id: def_id}; -type exp_map = hashmap<node_id, [exp]/~>; +type exp_map = hashmap<node_id, ~[exp]>; type env = {cstore: cstore::cstore, @@ -125,15 +125,15 @@ type env = imports: hashmap<node_id, import_state>, exp_map: exp_map, mod_map: hashmap<node_id, @indexed_mod>, - block_map: hashmap<node_id, [glob_imp_def]/~>, + block_map: hashmap<node_id, ~[glob_imp_def]>, ext_map: ext_map, impl_map: impl_map, impl_cache: impl_cache, ext_cache: ext_hash, used_imports: {mut track: bool, - mut data: [node_id]/~}, + mut data: ~[node_id]}, reported: dvec<{ident: ast::ident, sc: scope}>, - mut ignored_imports: [node_id]/~, + mut ignored_imports: ~[node_id], mut current_tp: option<uint>, mut resolve_unexported: bool, sess: session}; @@ -158,7 +158,7 @@ fn resolve_crate(sess: session, amap: ast_map::map, crate: @ast::crate) -> check_for_collisions(e, *crate); // FIXME: move this to the lint pass when rewriting resolve. (#1634) - for sess.opts.lint_opts.each {|pair| + for sess.opts.lint_opts.each |pair| { let (lint,level) = pair; if lint == lint::unused_imports && level != lint::ignore { check_unused_imports(e, level); @@ -181,9 +181,9 @@ fn create_env(sess: session, amap: ast_map::map) -> @env { impl_map: int_hash(), impl_cache: new_def_hash(), ext_cache: new_ext_hash(), - used_imports: {mut track: false, mut data: []/~}, + used_imports: {mut track: false, mut data: ~[]}, reported: dvec(), - mut ignored_imports: []/~, + mut ignored_imports: ~[], mut current_tp: none, mut resolve_unexported: false, sess: sess} @@ -192,7 +192,7 @@ fn create_env(sess: session, amap: ast_map::map) -> @env { fn iter_export_paths(vi: ast::view_item, f: fn(vp: @ast::view_path)) { alt vi.node { ast::view_item_export(vps) { - for vps.each {|vp| + for vps.each |vp| { f(vp); } } @@ -203,7 +203,7 @@ fn iter_export_paths(vi: ast::view_item, f: fn(vp: @ast::view_path)) { fn iter_import_paths(vi: ast::view_item, f: fn(vp: @ast::view_path)) { alt vi.node { ast::view_item_import(vps) { - for vps.each {|vp| f(vp);} + for vps.each |vp| { f(vp);} } _ {} } @@ -212,7 +212,7 @@ fn iter_import_paths(vi: ast::view_item, f: fn(vp: @ast::view_path)) { fn iter_effective_import_paths(vi: ast::view_item, f: fn(vp: @ast::view_path)) { iter_import_paths(vi, f); - iter_export_paths(vi) {|vp| + do iter_export_paths(vi) |vp| { alt vp.node { ast::view_path_simple(_, _, _) { } // FIXME (but also see #1893): support uniform ident-list exports @@ -231,7 +231,7 @@ fn iter_effective_import_paths(vi: ast::view_item, fn map_crate(e: @env, c: @ast::crate) { fn index_vi(e: @env, i: @ast::view_item, &&sc: scopes, _v: vt<scopes>) { - iter_effective_import_paths(*i) { |vp| + do iter_effective_import_paths(*i) |vp| { alt vp.node { ast::view_path_simple(name, path, id) { e.imports.insert(id, todo(name, @path.idents, vp.span, @@ -241,7 +241,7 @@ fn map_crate(e: @env, c: @ast::crate) { e.imports.insert(id, is_glob(@path.idents, sc, vp.span)); } ast::view_path_list(mod_path, idents, _) { - for idents.each {|ident| + for idents.each |ident| { let t = todo(ident.node.name, @(vec::append_one(mod_path.idents, ident.node.name)), @@ -255,7 +255,7 @@ fn map_crate(e: @env, c: @ast::crate) { fn path_from_scope(sc: scopes, n: str) -> str { let mut path = n + "::"; - list::iter(sc) {|s| + do list::iter(sc) |s| { alt s { scope_item(i) { path = *i.ident + "::" + path; } _ {} @@ -272,7 +272,7 @@ fn map_crate(e: @env, c: @ast::crate) { @{m: some(md), index: index_mod(md), glob_imports: dvec(), - mut globbed_exports: []/~, + mut globbed_exports: ~[], glob_imported_names: box_str_hash(), path: path_from_scope(sc, *i.ident)}); } @@ -281,7 +281,7 @@ fn map_crate(e: @env, c: @ast::crate) { @{m: none::<ast::_mod>, index: index_nmod(nmd), glob_imports: dvec(), - mut globbed_exports: []/~, + mut globbed_exports: ~[], glob_imported_names: box_str_hash(), path: path_from_scope(sc, *i.ident)}); } @@ -294,7 +294,7 @@ fn map_crate(e: @env, c: @ast::crate) { // So we wind up reusing the glob-import machinery when looking at // glob exports. They just do re-exporting in a later step. fn link_glob(e: @env, vi: @ast::view_item, &&sc: scopes, _v: vt<scopes>) { - iter_effective_import_paths(*vi) { |vp| + do iter_effective_import_paths(*vi) |vp| { alt vp.node { ast::view_path_glob(path, _) { alt follow_import(*e, sc, path.idents, vp.span) { @@ -307,7 +307,7 @@ fn map_crate(e: @env, c: @ast::crate) { scope_block(b, _, _) { let globs = alt e.block_map.find(b.node.id) { some(globs) { vec::append_one(globs, glob) } - none { [glob]/~ } + none { ~[glob] } }; e.block_map.insert(b.node.id, globs); } @@ -329,8 +329,8 @@ fn map_crate(e: @env, c: @ast::crate) { // First, find all the modules, and index the names that they contain let v_map_mod = - @{visit_view_item: {|a,b,c|index_vi(e, a, b, c)}, - visit_item: {|a,b,c|index_i(e, a, b, c)}, + @{visit_view_item: |a,b,c| index_vi(e, a, b, c), + visit_item: |a,b,c| index_i(e, a, b, c), visit_block: visit_block_with_scope with *visit::default_visitor::<scopes>()}; visit::visit_crate(*c, top_scope(), visit::mk_vt(v_map_mod)); @@ -340,15 +340,15 @@ fn map_crate(e: @env, c: @ast::crate) { @{m: some(c.node.module), index: index_mod(c.node.module), glob_imports: dvec(), - mut globbed_exports: []/~, + mut globbed_exports: ~[], glob_imported_names: box_str_hash(), path: ""}); // Next, assemble the links for globbed imports and exports. let v_link_glob = - @{visit_view_item: {|a,b,c|link_glob(e, a, b, c)}, + @{visit_view_item: |a,b,c| link_glob(e, a, b, c), visit_block: visit_block_with_scope, - visit_item: {|a,b,c|visit_item_with_scope(e, a, b, c)} + visit_item: |a,b,c| visit_item_with_scope(e, a, b, c) with *visit::default_visitor::<scopes>()}; visit::visit_crate(*c, top_scope(), visit::mk_vt(v_link_glob)); @@ -356,7 +356,7 @@ fn map_crate(e: @env, c: @ast::crate) { fn resolve_imports(e: env) { e.used_imports.track = true; - for e.imports.each {|id, v| + for e.imports.each |id, v| { alt check v { todo(name, path, span, scopes) { resolve_import(e, id, name, *path, span, scopes); @@ -372,7 +372,7 @@ fn resolve_imports(e: env) { // using lint-specific control flags presently but resolve-specific data // structures. Should use the general lint framework (with scopes, attrs). fn check_unused_imports(e: @env, level: lint::level) { - for e.imports.each {|k, v| + for e.imports.each |k, v| { alt v { resolved(_, _, _, _, name, sp) { if !vec::contains(e.used_imports.data, k) { @@ -415,17 +415,17 @@ fn resolve_names(e: @env, c: @ast::crate) { e.used_imports.track = true; let v = @{visit_foreign_item: visit_foreign_item_with_scope, - visit_item: {|a,b,c|walk_item(e, a, b, c)}, + visit_item: |a,b,c| walk_item(e, a, b, c), visit_block: visit_block_with_scope, visit_decl: visit_decl_with_scope, visit_arm: visit_arm_with_scope, - visit_local: {|a,b,c|visit_local_with_scope(e, a, b, c)}, - visit_pat: {|a,b,c|walk_pat(e, a, b, c)}, - visit_expr: {|a,b,c|walk_expr(e, a, b ,c)}, - visit_ty: {|a,b,c|walk_ty(e, a, b, c)}, - visit_ty_params: {|a,b,c|walk_tps(e, a, b, c)}, - visit_constr: {|a,b,c,d,f|walk_constr(e, a, b, c, d, f)}, - visit_fn: {|a,b,c,d,f,g,h| + visit_local: |a,b,c| visit_local_with_scope(e, a, b, c), + visit_pat: |a,b,c| walk_pat(e, a, b, c), + visit_expr: |a,b,c| walk_expr(e, a, b ,c), + visit_ty: |a,b,c| walk_ty(e, a, b, c), + visit_ty_params: |a,b,c| walk_tps(e, a, b, c), + visit_constr: |a,b,c,d,f| walk_constr(e, a, b, c, d, f), + visit_fn: |a,b,c,d,f,g,h| { visit_fn_with_scope(e, a, b, c, d, f, g, h) } with *visit::default_visitor()}; @@ -440,10 +440,10 @@ fn resolve_names(e: @env, c: @ast::crate) { refer to, so it's possible to resolve them. */ ast::item_impl(_, _, ifce, _, _) { - ifce.iter {|p| resolve_iface_ref(p, sc, e);} + ifce.iter(|p| resolve_iface_ref(p, sc, e)) } ast::item_class(_, ifaces, _, _, _, _) { - for ifaces.each {|p| + for ifaces.each |p| { resolve_iface_ref(p, sc, e); } } @@ -460,7 +460,7 @@ fn resolve_names(e: @env, c: @ast::crate) { } ast::expr_fn(_, _, _, cap_clause) | ast::expr_fn_block(_, _, cap_clause) { - for (*cap_clause).each { |ci| + for (*cap_clause).each |ci| { resolve_capture_item(e, sc, ci); } } @@ -477,13 +477,13 @@ fn resolve_names(e: @env, c: @ast::crate) { _ { } } } - fn walk_tps(e: @env, tps: [ast::ty_param]/~, + fn walk_tps(e: @env, tps: ~[ast::ty_param], &&sc: scopes, v: vt<scopes>) { let outer_current_tp = e.current_tp; let mut current = 0u; - for tps.each {|tp| + for tps.each |tp| { e.current_tp = some(current); - for vec::each(*tp.bounds) {|bound| + for vec::each(*tp.bounds) |bound| { alt bound { bound_iface(t) { v.visit_ty(t, sc, v); } _ {} @@ -554,23 +554,23 @@ fn visit_item_with_scope(e: @env, i: @ast::item, alt i.node { ast::item_impl(tps, _, ifce, sty, methods) { v.visit_ty_params(tps, sc, v); - option::iter(ifce) {|p| visit::visit_path(p.path, sc, v)}; + option::iter(ifce, |p| visit::visit_path(p.path, sc, v)); v.visit_ty(sty, sc, v); - for methods.each {|m| + for methods.each |m| { v.visit_ty_params(m.tps, sc, v); let msc = @cons(scope_method(m.self_id, vec::append(tps, m.tps)), sc); - v.visit_fn(visit::fk_method(m.ident, []/~, m), + v.visit_fn(visit::fk_method(m.ident, ~[], m), m.decl, m.body, m.span, m.id, msc, v); } } ast::item_iface(tps, _, methods) { v.visit_ty_params(tps, sc, v); let isc = @cons(scope_method(i.id, tps), sc); - for methods.each {|m| + for methods.each |m| { v.visit_ty_params(m.tps, isc, v); let msc = @cons(scope_method(i.id, vec::append(tps, m.tps)), sc); - for m.decl.inputs.each {|a| v.visit_ty(a.ty, msc, v); } + for m.decl.inputs.each |a| { v.visit_ty(a.ty, msc, v); } v.visit_ty(m.decl.output, msc, v); } } @@ -581,14 +581,14 @@ fn visit_item_with_scope(e: @env, i: @ast::item, let ctor_scope = @cons(scope_method(ctor.node.self_id, tps), class_scope); /* visit the iface refs in the class scope */ - for ifaces.each {|p| + for ifaces.each |p| { visit::visit_path(p.path, class_scope, v); } visit_fn_with_scope(e, visit::fk_ctor(i.ident, tps, ctor.node.self_id, local_def(i.id)), ctor.node.dec, ctor.node.body, ctor.span, ctor.node.id, ctor_scope, v); - option::iter(m_dtor) {|dtor| + do option::iter(m_dtor) |dtor| { let dtor_scope = @cons(scope_method(dtor.node.self_id, tps), class_scope); @@ -599,7 +599,7 @@ fn visit_item_with_scope(e: @env, i: @ast::item, dtor_scope, v); }; /* visit the items */ - for members.each {|cm| + for members.each |cm| { alt cm.node { class_method(m) { let msc = @cons(scope_method(m.self_id, @@ -629,7 +629,7 @@ fn visit_fn_with_scope(e: @env, fk: visit::fn_kind, decl: ast::fn_decl, // is this a main fn declaration? alt fk { visit::fk_item_fn(nm, _) { - if is_main_name([ast_map::path_name(nm)]/~) && + if is_main_name(~[ast_map::path_name(nm)]) && !e.sess.building_library { // This is a main function -- set it in the session // as the main ID @@ -641,15 +641,15 @@ fn visit_fn_with_scope(e: @env, fk: visit::fn_kind, decl: ast::fn_decl, // here's where we need to set up the mapping // for f's constrs in the table. - for decl.constraints.each {|c| resolve_constr(e, c, sc, v); } + for decl.constraints.each |c| { resolve_constr(e, c, sc, v); } let scope = alt fk { visit::fk_item_fn(_, tps) | visit::fk_method(_, tps, _) | visit::fk_ctor(_, tps, _, _) | visit::fk_dtor(tps, _, _) { scope_bare_fn(decl, id, tps) } visit::fk_anon(ast::proto_bare, _) { - scope_bare_fn(decl, id, []/~) } + scope_bare_fn(decl, id, ~[]) } visit::fk_anon(_, _) | visit::fk_fn_block(_) { - scope_fn_expr(decl, id, []/~) } + scope_fn_expr(decl, id, ~[]) } }; visit::visit_fn(fk, decl, body, sp, id, @cons(scope, sc), v); @@ -658,8 +658,8 @@ fn visit_fn_with_scope(e: @env, fk: visit::fn_kind, decl: ast::fn_decl, fn visit_block_with_scope(b: ast::blk, &&sc: scopes, v: vt<scopes>) { let pos = @mut 0u, loc = @mut 0u; let block_sc = @cons(scope_block(b, pos, loc), sc); - for b.node.view_items.each {|vi| v.visit_view_item(vi, block_sc, v); } - for b.node.stmts.each {|stmt| + for b.node.view_items.each |vi| { v.visit_view_item(vi, block_sc, v); } + for b.node.stmts.each |stmt| { v.visit_stmt(stmt, block_sc, v);; *pos += 1u;; *loc = 0u; @@ -674,14 +674,14 @@ fn visit_decl_with_scope(d: @decl, &&sc: scopes, v: vt<scopes>) { }; alt d.node { decl_local(locs) { - for locs.each {|loc| v.visit_local(loc, sc, v);; *loc_pos += 1u; } + for locs.each |loc| { v.visit_local(loc, sc, v);; *loc_pos += 1u; } } decl_item(it) { v.visit_item(it, sc, v); } } } fn visit_arm_with_scope(a: ast::arm, &&sc: scopes, v: vt<scopes>) { - for a.pats.each {|p| v.visit_pat(p, sc, v); } + for a.pats.each |p| { v.visit_pat(p, sc, v); } let sc_inner = @cons(scope_arm(a), sc); visit::visit_expr_opt(a.guard, sc_inner, v); v.visit_block(a.body, sc_inner, v); @@ -694,7 +694,7 @@ fn visit_local_with_scope(e: @env, loc: @local, &&sc: scopes, v:vt<scopes>) { // scope. We disallow this, in order to make alt patterns consisting of a // single identifier unambiguous (does the pattern "foo" refer to enum // foo, or is it binding a new name foo?) - ast_util::walk_pat(loc.node.pat) { |p| + do ast_util::walk_pat(loc.node.pat) |p| { alt p.node { pat_ident(path, _) { alt lookup_in_scope(*e, sc, loc.span, path_to_ident(path), @@ -717,7 +717,7 @@ fn visit_local_with_scope(e: @env, loc: @local, &&sc: scopes, v:vt<scopes>) { } -fn follow_import(e: env, &&sc: scopes, path: [ident]/~, sp: span) -> +fn follow_import(e: env, &&sc: scopes, path: ~[ident], sp: span) -> option<def> { let path_len = vec::len(path); let mut dcur = lookup_in_scope_strict(e, sc, sp, path[0], ns_module); @@ -738,7 +738,7 @@ fn follow_import(e: env, &&sc: scopes, path: [ident]/~, sp: span) -> alt dcur { some(ast::def_mod(_)) | some(ast::def_foreign_mod(_)) { ret dcur; } _ { - e.sess.span_err(sp, str::connect(path.map({|x|*x}), "::") + + e.sess.span_err(sp, str::connect(path.map(|x|*x), "::") + " does not name a module."); ret none; } @@ -761,10 +761,10 @@ fn resolve_constr(e: @env, c: @ast::constr, &&sc: scopes, _v: vt<scopes>) { // Import resolution fn resolve_import(e: env, n_id: node_id, name: ast::ident, - ids: [ast::ident]/~, sp: codemap::span, &&sc: scopes) { + ids: ~[ast::ident], sp: codemap::span, &&sc: scopes) { fn register(e: env, id: node_id, cx: ctxt, sp: codemap::span, name: ast::ident, lookup: fn(namespace) -> option<def>, - impls: [@_impl]/~) { + impls: ~[@_impl]) { let val = lookup(ns_val), typ = lookup(ns_type), md = lookup(ns_module); if is_none(val) && is_none(typ) && is_none(md) && @@ -776,11 +776,11 @@ fn resolve_import(e: env, n_id: node_id, name: ast::ident, } // Temporarily disable this import and the imports coming after during // resolution of this import. - fn find_imports_after(e: env, id: node_id, &&sc: scopes) -> [node_id]/~ { - fn lst(my_id: node_id, vis: [@view_item]/~) -> [node_id]/~ { - let mut imports = []/~, found = false; - for vis.each {|vi| - iter_effective_import_paths(*vi) {|vp| + fn find_imports_after(e: env, id: node_id, &&sc: scopes) -> ~[node_id] { + fn lst(my_id: node_id, vis: ~[@view_item]) -> ~[node_id] { + let mut imports = ~[], found = false; + for vis.each |vi| { + do iter_effective_import_paths(*vi) |vp| { alt vp.node { view_path_simple(_, _, id) | view_path_glob(_, id) { @@ -788,7 +788,7 @@ fn resolve_import(e: env, n_id: node_id, name: ast::ident, if found { vec::push(imports, id); } } view_path_list(_, ids, _) { - for ids.each {|id| + for ids.each |id| { if id.node.id == my_id { found = true; } if found { vec::push(imports, id.node.id); } } @@ -826,7 +826,7 @@ fn resolve_import(e: env, n_id: node_id, name: ast::ident, let end_id = ids[n_idents - 1u]; if n_idents == 1u { register(e, n_id, in_scope(sc), sp, name, - {|ns| lookup_in_scope(e, sc, sp, end_id, ns, true) }, []/~); + |ns| lookup_in_scope(e, sc, sp, end_id, ns, true), ~[]); } else { alt lookup_in_scope(e, sc, sp, ids[0], ns_module, true) { none { @@ -836,9 +836,9 @@ fn resolve_import(e: env, n_id: node_id, name: ast::ident, let mut dcur = dcur_, i = 1u; loop { if i == n_idents - 1u { - let mut impls = []/~; + let mut impls = ~[]; find_impls_in_mod(e, dcur, impls, some(end_id)); - register(e, n_id, in_mod(dcur), sp, name, {|ns| + register(e, n_id, in_mod(dcur), sp, name, |ns| { lookup_in_mod(e, dcur, sp, end_id, ns, outside) }, impls); break; @@ -864,7 +864,7 @@ fn resolve_import(e: env, n_id: node_id, name: ast::ident, // import alt e.imports.find(n_id) { some(resolving(sp)) { - e.imports.insert(n_id, resolved(none, none, none, @[]/~, @"", sp)); + e.imports.insert(n_id, resolved(none, none, none, @~[], @"", sp)); } _ { } } @@ -884,7 +884,7 @@ enum ctxt { in_mod(def), in_scope(scopes), } fn unresolved_err(e: env, cx: ctxt, sp: span, name: ident, kind: str) { fn find_fn_or_mod_scope(sc: scopes) -> option<scope> { - for list::each(sc) {|cur| + for list::each(sc) |cur| { alt cur { scope_crate | scope_bare_fn(_, _, _) | scope_fn_expr(_, _, _) | scope_item(@{node: ast::item_mod(_), _}) { @@ -900,7 +900,7 @@ fn unresolved_err(e: env, cx: ctxt, sp: span, name: ident, kind: str) { in_scope(sc) { alt find_fn_or_mod_scope(sc) { some(err_scope) { - for e.reported.each {|rs| + for e.reported.each |rs| { if str::eq(*rs.ident, *name) && err_scope == rs.sc { ret; } } e.reported.push({ident: name, sc: err_scope}); @@ -914,7 +914,7 @@ fn unresolved_err(e: env, cx: ctxt, sp: span, name: ident, kind: str) { path = @(e.mod_map.get(did.node).path + *path); } else if did.node != ast::crate_node_id { let paths = e.ext_map.get(did); - path = @str::connect(vec::append_one(paths, path).map({|x|*x}), + path = @str::connect(vec::append_one(paths, path).map(|x|*x), "::"); } } @@ -1115,7 +1115,7 @@ fn lookup_in_scope(e: env, &&sc: scopes, sp: span, name: ident, ns: namespace, ret none; } let mut left_fn = false; - let mut closing = []/~; + let mut closing = ~[]; // Used to determine whether self is in scope let mut left_fn_level2 = false; let mut sc = sc; @@ -1165,10 +1165,10 @@ fn lookup_in_scope(e: env, &&sc: scopes, sp: span, name: ident, ns: namespace, }; } -fn lookup_in_ty_params(e: env, name: ident, ty_params: [ast::ty_param]/~) +fn lookup_in_ty_params(e: env, name: ident, ty_params: ~[ast::ty_param]) -> option<def> { let mut n = 0u; - for ty_params.each {|tp| + for ty_params.each |tp| { if str::eq(*tp.ident, *name) && alt e.current_tp { some(cur) { n < cur } none { true } } { ret some(ast::def_ty_param(local_def(tp.id), n)); } @@ -1180,7 +1180,7 @@ fn lookup_in_ty_params(e: env, name: ident, ty_params: [ast::ty_param]/~) fn lookup_in_pat(e: env, name: ident, pat: @ast::pat) -> option<node_id> { let mut found = none; - pat_util::pat_bindings(e.def_map, pat) {|p_id, _sp, n| + do pat_util::pat_bindings(e.def_map, pat) |p_id, _sp, n| { if str::eq(*path_to_ident(n), *name) { found = some(p_id); } }; @@ -1188,11 +1188,11 @@ fn lookup_in_pat(e: env, name: ident, pat: @ast::pat) -> option<node_id> { } fn lookup_in_fn(e: env, name: ident, decl: ast::fn_decl, - ty_params: [ast::ty_param]/~, + ty_params: ~[ast::ty_param], ns: namespace) -> option<def> { alt ns { ns_val { - for decl.inputs.each {|a| + for decl.inputs.each |a| { if str::eq(*a.ident, *name) { ret some(ast::def_arg(a.id, a.mode)); } @@ -1242,7 +1242,7 @@ fn lookup_in_block(e: env, name: ident, sp: span, b: ast::blk_, pos: uint, } else { alt ns { ns_val { - for variants.each {|v| + for variants.each |v| { if str::eq(*v.node.name, *name) { let i = v.node.id; ret some(ast::def_variant @@ -1269,7 +1269,7 @@ fn lookup_in_block(e: env, name: ident, sp: span, b: ast::blk_, pos: uint, _ { } } } - for b.view_items.each {|vi| + for b.view_items.each |vi| { let mut is_import = false; alt vi.node { ast::view_item_import(_) { is_import = true; } @@ -1279,7 +1279,7 @@ fn lookup_in_block(e: env, name: ident, sp: span, b: ast::blk_, pos: uint, alt vi.node { ast::view_item_import(vps) | ast::view_item_export(vps) { - for vps.each {|vp| + for vps.each |vp| { alt vp.node { ast::view_path_simple(ident, _, id) { if is_import && name == ident { @@ -1288,7 +1288,7 @@ fn lookup_in_block(e: env, name: ident, sp: span, b: ast::blk_, pos: uint, } ast::view_path_list(path, idents, _) { - for idents.each {|ident| + for idents.each |ident| { if name == ident.node.name { ret lookup_import(e, ident.node.id, ns); } @@ -1374,7 +1374,7 @@ fn lookup_in_mod(e: env, m: def, sp: span, name: ident, ns: namespace, // examining a module in an external crate let cached = e.ext_cache.find({did: defid, ident: name, ns: ns}); if !is_none(cached) { ret cached; } - let mut path = [name]/~; + let mut path = ~[name]; if defid.node != ast::crate_node_id { path = vec::append(cstore::get_path(e.cstore, defid), path); } @@ -1487,7 +1487,7 @@ fn lookup_in_local_mod(e: env, node_id: node_id, sp: span, id: ident, alt inf.index.find(id) { none { } some(lst) { - let found = list_search(lst, {|x| lookup_in_mie(e, x, ns)}); + let found = list_search(lst, |x| lookup_in_mie(e, x, ns)); if !is_none(found) { ret found; } @@ -1497,7 +1497,7 @@ fn lookup_in_local_mod(e: env, node_id: node_id, sp: span, id: ident, ret lookup_glob_in_mod(e, inf, sp, id, ns, outside); } -fn lookup_in_globs(e: env, globs: [glob_imp_def]/~, sp: span, id: ident, +fn lookup_in_globs(e: env, globs: ~[glob_imp_def], sp: span, id: ident, ns: namespace, dr: dir) -> option<def> { fn lookup_in_mod_(e: env, def: glob_imp_def, sp: span, name: ident, ns: namespace, dr: dir) -> option<glob_imp_def> { @@ -1518,14 +1518,14 @@ fn lookup_in_globs(e: env, globs: [glob_imp_def]/~, sp: span, id: ident, } let g = copy globs; // FIXME #2405 let matches = vec::filter_map(g, - {|x| lookup_in_mod_(e, x, sp, id, ns, dr)}); + |x| lookup_in_mod_(e, x, sp, id, ns, dr)); if vec::len(matches) == 0u { ret none; } else if vec::len(matches) == 1u { ret some(matches[0].def); } else { - for matches.each {|match| + for matches.each |match| { let sp = match.path.span; e.sess.span_note(sp, #fmt["'%s' is imported here", *id]); } @@ -1602,9 +1602,9 @@ fn add_to_index(index: hashmap<ident, @list<mod_index_entry>>, id: ident, } } -fn index_view_items(view_items: [@ast::view_item]/~, +fn index_view_items(view_items: ~[@ast::view_item], index: hashmap<ident, @list<mod_index_entry>>) { - for view_items.each {|vi| + for view_items.each |vi| { alt vi.node { ast::view_item_use(ident, _, id) { add_to_index(index, ident, mie_view_item(ident, id, vi.span)); @@ -1612,13 +1612,13 @@ fn index_view_items(view_items: [@ast::view_item]/~, _ {} } - iter_effective_import_paths(*vi) {|vp| + do iter_effective_import_paths(*vi) |vp| { alt vp.node { ast::view_path_simple(ident, _, id) { add_to_index(index, ident, mie_import_ident(id, vp.span)); } ast::view_path_list(_, idents, _) { - for idents.each {|ident| + for idents.each |ident| { add_to_index(index, ident.node.name, mie_import_ident(ident.node.id, ident.span)); @@ -1637,7 +1637,7 @@ fn index_mod(md: ast::_mod) -> mod_index { index_view_items(md.view_items, index); - for md.items.each {|it| + for md.items.each |it| { alt it.node { ast::item_const(_, _) | ast::item_fn(_, _, _) | ast::item_mod(_) | ast::item_foreign_mod(_) | ast::item_ty(_, _, _) | @@ -1647,7 +1647,7 @@ fn index_mod(md: ast::_mod) -> mod_index { ast::item_enum(variants, _, _) { add_to_index(index, it.ident, mie_item(it)); let mut variant_idx: uint = 0u; - for variants.each {|v| + for variants.each |v| { add_to_index(index, v.node.name, mie_enum_variant(variant_idx, variants, it.id, it.span)); @@ -1669,7 +1669,7 @@ fn index_nmod(md: ast::foreign_mod) -> mod_index { index_view_items(md.view_items, index); - for md.items.each {|it| + for md.items.each |it| { add_to_index(index, it.ident, mie_foreign_item(it)); } ret index; @@ -1691,10 +1691,10 @@ fn ns_for_def(d: def) -> namespace { } } -fn lookup_external(e: env, cnum: int, ids: [ident]/~, ns: namespace) -> +fn lookup_external(e: env, cnum: int, ids: ~[ident], ns: namespace) -> option<def> { let mut result = none; - for csearch::lookup_defs(e.sess.cstore, cnum, ids).each {|d| + for csearch::lookup_defs(e.sess.cstore, cnum, ids).each |d| { e.ext_map.insert(def_id_of_def(d), ids); if ns == ns_for_def(d) { result = some(d); } } @@ -1706,16 +1706,16 @@ fn lookup_external(e: env, cnum: int, ids: [ident]/~, ns: namespace) -> fn check_for_collisions(e: @env, c: ast::crate) { // Module indices make checking those relatively simple -- just check each // name for multiple entities in the same namespace. - for e.mod_map.each_value {|val| - for val.index.each {|k, v| check_mod_name(*e, k, v); }; + for e.mod_map.each_value |val| { + for val.index.each |k, v| { check_mod_name(*e, k, v); }; }; // Other scopes have to be checked the hard way. let v = - @{visit_item: {|a,b,c|check_item(e, a, b, c)}, - visit_block: {|a,b,c|check_block(e, a, b, c)}, - visit_arm: {|a,b,c|check_arm(e, a, b, c)}, - visit_expr: {|a,b,c|check_expr(e, a, b, c)}, - visit_ty: {|a,b,c|check_ty(e, a, b, c)} + @{visit_item: |a,b,c| check_item(e, a, b, c), + visit_block: |a,b,c| check_block(e, a, b, c), + visit_arm: |a,b,c| check_arm(e, a, b, c), + visit_expr: |a,b,c| check_expr(e, a, b, c), + visit_ty: |a,b,c| check_ty(e, a, b, c) with *visit::default_visitor()}; visit::visit_crate(c, (), visit::mk_vt(v)); } @@ -1764,28 +1764,28 @@ fn mie_span(mie: mod_index_entry) -> span { } fn check_item(e: @env, i: @ast::item, &&x: (), v: vt<()>) { - fn typaram_names(tps: [ast::ty_param]/~) -> [ident]/~ { - let mut x: [ast::ident]/~ = []/~; - for tps.each {|tp| vec::push(x, tp.ident); } + fn typaram_names(tps: ~[ast::ty_param]) -> ~[ident] { + let mut x: ~[ast::ident] = ~[]; + for tps.each |tp| { vec::push(x, tp.ident); } ret x; } visit::visit_item(i, x, v); alt i.node { ast::item_fn(decl, ty_params, _) { check_fn(*e, i.span, decl); - ensure_unique(*e, i.span, ty_params, {|tp| tp.ident}, + ensure_unique(*e, i.span, ty_params, |tp| tp.ident, "type parameter"); } ast::item_enum(_, ty_params, _) { - ensure_unique(*e, i.span, ty_params, {|tp| tp.ident}, + ensure_unique(*e, i.span, ty_params, |tp| tp.ident, "type parameter"); } ast::item_iface(_, _, methods) { - ensure_unique(*e, i.span, methods, {|m| m.ident}, + ensure_unique(*e, i.span, methods, |m| m.ident, "method"); } ast::item_impl(_, _, _, _, methods) { - ensure_unique(*e, i.span, methods, {|m| m.ident}, + ensure_unique(*e, i.span, methods, |m| m.ident, "method"); } _ { } @@ -1793,7 +1793,7 @@ fn check_item(e: @env, i: @ast::item, &&x: (), v: vt<()>) { } fn check_pat(e: @env, ch: checker, p: @ast::pat) { - pat_util::pat_bindings(e.def_map, p) {|_i, p_sp, n| + do pat_util::pat_bindings(e.def_map, p) |_i, p_sp, n| { add_name(ch, p_sp, path_to_ident(n)); }; } @@ -1815,8 +1815,8 @@ fn check_arm(e: @env, a: ast::arm, &&x: (), v: vt<()>) { e.sess.span_err(a.pats[i].span, "inconsistent number of bindings"); } else { - for ch.seen.each {|name| - if is_none(vec::find(seen0, {|x|str::eq(*name, *x)})) { + for ch.seen.each |name| { + if is_none(vec::find(seen0, |x| str::eq(*name, *x))) { // Fight the alias checker let name_ = name; e.sess.span_err(a.pats[i].span, @@ -1833,15 +1833,15 @@ fn check_block(e: @env, b: ast::blk, &&x: (), v: vt<()>) { let values = checker(*e, "value"); let types = checker(*e, "type"); let mods = checker(*e, "module"); - for b.node.stmts.each {|st| + for b.node.stmts.each |st| { alt st.node { ast::stmt_decl(d, _) { alt d.node { ast::decl_local(locs) { let local_values = checker(*e, "value"); - for locs.each {|loc| - pat_util::pat_bindings(e.def_map, loc.node.pat) - {|_i, p_sp, n| + for locs.each |loc| { + do pat_util::pat_bindings(e.def_map, loc.node.pat) + |_i, p_sp, n| { let ident = path_to_ident(n); add_name(local_values, p_sp, ident); check_name(values, p_sp, ident); @@ -1852,7 +1852,7 @@ fn check_block(e: @env, b: ast::blk, &&x: (), v: vt<()>) { alt it.node { ast::item_enum(variants, _, _) { add_name(types, it.span, it.ident); - for variants.each {|v| + for variants.each |v| { add_name(values, v.span, v.node.name); } } @@ -1909,7 +1909,7 @@ fn checker(e: env, kind: str) -> checker { } fn check_name(ch: checker, sp: span, name: ident) { - for ch.seen.each {|s| + for ch.seen.each |s| { if str::eq(*s, *name) { ch.sess.span_fatal( sp, "duplicate " + ch.kind + " name: " + *name); @@ -1921,10 +1921,10 @@ fn add_name(ch: checker, sp: span, name: ident) { ch.seen.push(name); } -fn ensure_unique<T>(e: env, sp: span, elts: [T]/~, id: fn(T) -> ident, +fn ensure_unique<T>(e: env, sp: span, elts: ~[T], id: fn(T) -> ident, kind: str) { let ch = checker(e, kind); - for elts.each {|elt| add_name(ch, sp, id(elt)); } + for elts.each |elt| { add_name(ch, sp, id(elt)); } } fn check_exports(e: @env) { @@ -1942,15 +1942,15 @@ fn check_exports(e: @env) { assert mid.crate == ast::local_crate; let ixm = e.mod_map.get(mid.node); - for ixm.index.each {|ident, mies| - list::iter(mies) {|mie| + for ixm.index.each |ident, mies| { + do list::iter(mies) |mie| { alt mie { mie_item(item) { let defs = - [ found_def_item(item, ns_val), + ~[ found_def_item(item, ns_val), found_def_item(item, ns_type), - found_def_item(item, ns_module) ]/~; - for defs.each {|d| + found_def_item(item, ns_module) ]; + for defs.each |d| { alt d { some(def) { f(ident, def); @@ -1984,14 +1984,14 @@ fn check_exports(e: @env) { fn maybe_add_reexport(e: @env, export_id: node_id, def: option<def>) { - option::iter(def) {|def| + do option::iter(def) |def| { add_export(e, export_id, def_id_of_def(def), true); } } fn add_export(e: @env, export_id: node_id, target_id: def_id, reexp: bool) { let found = alt e.exp_map.find(export_id) { - some(f) { f } none { []/~ } + some(f) { f } none { ~[] } }; e.exp_map.insert(export_id, vec::append_one(found, @@ -2004,7 +2004,7 @@ fn check_exports(e: @env) { if _mod.index.contains_key(ident) { found_something = true; let xs = _mod.index.get(ident); - list::iter(xs) {|x| + do list::iter(xs) |x| { alt x { mie_import_ident(id, _) { alt check e.imports.get(id) { @@ -2045,7 +2045,7 @@ fn check_exports(e: @env) { e.sess.span_fatal(sp, #fmt("undefined id %s in an export", *id)); } some(ms) { - let maybe_id = list_search(ms) {|m| + let maybe_id = do list_search(ms) |m| { alt m { mie_item(@{node: item_enum(_, _, _), id, _}) { some(id) } _ { none } @@ -2062,14 +2062,14 @@ fn check_exports(e: @env) { fn check_export_enum_list(e: @env, export_id: node_id, _mod: @indexed_mod, span: codemap::span, id: ast::ident, - ids: [ast::path_list_ident]/~) { + ids: ~[ast::path_list_ident]) { let parent_id = check_enum_ok(e, span, id, _mod); add_export(e, export_id, local_def(parent_id), false); - for ids.each {|variant_id| + for ids.each |variant_id| { let mut found = false; alt _mod.index.find(variant_id.node.name) { some(ms) { - list::iter(ms) {|m| + do list::iter(ms) |m| { alt m { mie_enum_variant(_, _, actual_parent_id, _) { found = true; @@ -2093,13 +2093,13 @@ fn check_exports(e: @env) { } } - for e.mod_map.each_value {|_mod| + for e.mod_map.each_value |_mod| { alt _mod.m { some(m) { let glob_is_re_exported = int_hash(); - for m.view_items.each {|vi| - iter_export_paths(*vi) { |vp| + for m.view_items.each |vi| { + do iter_export_paths(*vi) |vp| { alt vp.node { ast::view_path_simple(ident, _, id) { check_export(e, ident, _mod, id, vi); @@ -2121,13 +2121,13 @@ fn check_exports(e: @env) { } // Now follow the export-glob links and fill in the // globbed_exports and exp_map lists. - for _mod.glob_imports.each {|glob| + for _mod.glob_imports.each |glob| { let id = alt check glob.path.node { ast::view_path_glob(_, node_id) { node_id } }; if ! glob_is_re_exported.contains_key(id) { cont; } - iter_mod(*e, glob.def, - glob.path.span, outside) {|ident, def| + do iter_mod(*e, glob.def, + glob.path.span, outside) |ident, def| { vec::push(_mod.globbed_exports, ident); maybe_add_reexport(e, id, some(def)); } @@ -2149,22 +2149,22 @@ type method_info = {did: def_id, n_tps: uint, ident: ast::ident}; is the ident of the iface that's being implemented * methods: the item's methods */ -type _impl = {did: def_id, ident: ast::ident, methods: [@method_info]/~}; -type iscopes = @list<@[@_impl]/~>; +type _impl = {did: def_id, ident: ast::ident, methods: ~[@method_info]}; +type iscopes = @list<@~[@_impl]>; fn resolve_impls(e: @env, c: @ast::crate) { visit::visit_crate(*c, @nil, visit::mk_vt(@{ - visit_block: {|a,b,c|visit_block_with_impl_scope(e, a, b, c)}, - visit_mod: {|a,b,c,d,f|visit_mod_with_impl_scope(e, a, b, c, d, f)}, - visit_expr: {|a,b,c|resolve_impl_in_expr(e, a, b, c)} + visit_block: |a,b,c| visit_block_with_impl_scope(e, a, b, c), + visit_mod: |a,b,c,d,f| visit_mod_with_impl_scope(e, a, b, c, d, f), + visit_expr: |a,b,c| resolve_impl_in_expr(e, a, b, c) with *visit::default_visitor() })); } fn find_impls_in_view_item(e: env, vi: @ast::view_item, - &impls: [@_impl]/~, sc: option<iscopes>) { + &impls: ~[@_impl], sc: option<iscopes>) { fn lookup_imported_impls(e: env, id: node_id, - act: fn(@[@_impl]/~)) { + act: fn(@~[@_impl])) { alt e.imports.get(id) { resolved(_, _, _, is, _, _) { act(is); } todo(name, path, span, scopes) { @@ -2177,15 +2177,15 @@ fn find_impls_in_view_item(e: env, vi: @ast::view_item, } } - iter_effective_import_paths(*vi) { |vp| + do iter_effective_import_paths(*vi) |vp| { alt vp.node { ast::view_path_simple(name, pt, id) { - let mut found = []/~; + let mut found = ~[]; if vec::len(pt.idents) == 1u { - option::iter(sc) {|sc| - list::iter(sc) {|level| + do option::iter(sc) |sc| { + do list::iter(sc) |level| { if vec::len(found) == 0u { - for vec::each(*level) {|imp| + for vec::each(*level) |imp| { if imp.ident == pt.idents[0] { vec::push(found, @{ident: name with *imp}); @@ -2198,8 +2198,8 @@ fn find_impls_in_view_item(e: env, vi: @ast::view_item, } } } else { - lookup_imported_impls(e, id) {|is| - for vec::each(*is) {|i| + do lookup_imported_impls(e, id) |is| { + for vec::each(*is) |i| { vec::push(impls, @{ident: name with *i}); } } @@ -2207,10 +2207,10 @@ fn find_impls_in_view_item(e: env, vi: @ast::view_item, } ast::view_path_list(base, names, _) { - for names.each {|nm| - lookup_imported_impls(e, nm.node.id) {|is| + for names.each |nm| { + lookup_imported_impls(e, nm.node.id, |is| { vec::push_all(impls, *is); - } + }) } } @@ -2234,7 +2234,7 @@ fn find_impls_in_view_item(e: env, vi: @ast::view_item, item is a class; and none otherwise. Each record describes one interface implemented by i. */ -fn find_impls_in_item(e: env, i: @ast::item, &impls: [@_impl]/~, +fn find_impls_in_item(e: env, i: @ast::item, &impls: ~[@_impl], name: option<ident>, ck_exports: option<@indexed_mod>) { alt i.node { @@ -2246,7 +2246,7 @@ fn find_impls_in_item(e: env, i: @ast::item, &impls: [@_impl]/~, } { vec::push(impls, @{did: local_def(i.id), ident: i.ident, - methods: vec::map(mthds, {|m| + methods: vec::map(mthds, |m| { @{did: local_def(m.id), n_tps: vec::len(m.tps), ident: m.ident} @@ -2256,22 +2256,22 @@ fn find_impls_in_item(e: env, i: @ast::item, &impls: [@_impl]/~, ast::item_class(tps, ifces, items, _, _, _) { let (_, mthds) = ast_util::split_class_items(items); let n_tps = tps.len(); - vec::iter(ifces) {|p| - // The def_id, in this case, identifies the combination of - // class and iface - vec::push(impls, @{did: local_def(p.id), - ident: i.ident, - methods: vec::map(mthds, {|m| - @{did: local_def(m.id), - n_tps: n_tps + m.tps.len(), - ident: m.ident}})}); + do vec::iter(ifces) |p| { + // The def_id, in this case, identifies the combination of + // class and iface + vec::push(impls, @{did: local_def(p.id), + ident: i.ident, + methods: vec::map(mthds, |m| { + @{did: local_def(m.id), + n_tps: n_tps + m.tps.len(), + ident: m.ident}})}); } } _ {} } } -fn find_impls_in_mod_by_id(e: env, defid: def_id, &impls: [@_impl]/~, +fn find_impls_in_mod_by_id(e: env, defid: def_id, &impls: ~[@_impl], name: option<ident>) { let mut cached; alt e.impl_cache.find(defid) { @@ -2280,16 +2280,16 @@ fn find_impls_in_mod_by_id(e: env, defid: def_id, &impls: [@_impl]/~, none { e.impl_cache.insert(defid, none); cached = if defid.crate == ast::local_crate { - let mut tmp = []/~; + let mut tmp = ~[]; let mi = e.mod_map.get(defid.node); let md = option::get(mi.m); - for md.view_items.each {|vi| + for md.view_items.each |vi| { find_impls_in_view_item(e, vi, tmp, none); } - for md.items.each {|i| + for md.items.each |i| { find_impls_in_item(e, i, tmp, none, none); } - @vec::filter(tmp) {|i| is_exported(e, i.ident, mi)} + @vec::filter(tmp, |i| is_exported(e, i.ident, mi)) } else { csearch::get_impls_for_mod(e.sess.cstore, defid, none) }; @@ -2298,7 +2298,7 @@ fn find_impls_in_mod_by_id(e: env, defid: def_id, &impls: [@_impl]/~, } alt name { some(n) { - for vec::each(*cached) {|im| + for vec::each(*cached) |im| { if n == im.ident { vec::push(impls, im); } } } @@ -2306,7 +2306,7 @@ fn find_impls_in_mod_by_id(e: env, defid: def_id, &impls: [@_impl]/~, } } -fn find_impls_in_mod(e: env, m: def, &impls: [@_impl]/~, +fn find_impls_in_mod(e: env, m: def, &impls: ~[@_impl], name: option<ident>) { alt m { ast::def_mod(defid) { @@ -2318,11 +2318,11 @@ fn find_impls_in_mod(e: env, m: def, &impls: [@_impl]/~, fn visit_block_with_impl_scope(e: @env, b: ast::blk, &&sc: iscopes, v: vt<iscopes>) { - let mut impls = []/~; - for b.node.view_items.each {|vi| + let mut impls = ~[]; + for b.node.view_items.each |vi| { find_impls_in_view_item(*e, vi, impls, some(sc)); } - for b.node.stmts.each {|st| + for b.node.stmts.each |st| { alt st.node { ast::stmt_decl(@{node: ast::decl_item(i), _}, _) { find_impls_in_item(*e, i, impls, none, none); @@ -2336,11 +2336,11 @@ fn visit_block_with_impl_scope(e: @env, b: ast::blk, &&sc: iscopes, fn visit_mod_with_impl_scope(e: @env, m: ast::_mod, s: span, id: node_id, &&sc: iscopes, v: vt<iscopes>) { - let mut impls = []/~; - for m.view_items.each {|vi| + let mut impls = ~[]; + for m.view_items.each |vi| { find_impls_in_view_item(*e, vi, impls, some(sc)); } - for m.items.each {|i| find_impls_in_item(*e, i, impls, none, none); } + for m.items.each |i| { find_impls_in_item(*e, i, impls, none, none); } let impls = @impls; visit::visit_mod(m, s, id, if vec::len(*impls) > 0u { @cons(impls, sc) diff --git a/src/rustc/middle/trans/alt.rs b/src/rustc/middle/trans/alt.rs index 5833ba0170f..f40a94e113b 100644 --- a/src/rustc/middle/trans/alt.rs +++ b/src/rustc/middle/trans/alt.rs @@ -71,30 +71,30 @@ fn trans_opt(bcx: block, o: opt) -> opt_result { fn variant_opt(tcx: ty::ctxt, pat_id: ast::node_id) -> opt { let vdef = ast_util::variant_def_ids(tcx.def_map.get(pat_id)); let variants = ty::enum_variants(tcx, vdef.enm); - for vec::each(*variants) {|v| + for vec::each(*variants) |v| { if vdef.var == v.id { ret var(v.disr_val, vdef); } } core::unreachable(); } -type bind_map = [{ident: ast::ident, val: ValueRef}]/~; +type bind_map = ~[{ident: ast::ident, val: ValueRef}]; fn assoc(key: ast::ident, list: bind_map) -> option<ValueRef> { - for vec::each(list) {|elt| + for vec::each(list) |elt| { if str::eq(*elt.ident, *key) { ret some(elt.val); } } ret none; } type match_branch = - @{pats: [@ast::pat]/~, + @{pats: ~[@ast::pat], bound: bind_map, data: @{bodycx: block, guard: option<@ast::expr>, id_map: pat_id_map}}; -type match = [match_branch]/~; +type match = ~[match_branch]; fn has_nested_bindings(m: match, col: uint) -> bool { - for vec::each(m) {|br| + for vec::each(m) |br| { alt br.pats[col].node { ast::pat_ident(_, some(_)) { ret true; } _ {} @@ -104,19 +104,19 @@ fn has_nested_bindings(m: match, col: uint) -> bool { } fn expand_nested_bindings(m: match, col: uint, val: ValueRef) -> match { - let mut result = []/~; - for vec::each(m) {|br| + let mut result = ~[]; + for vec::each(m) |br| { alt br.pats[col].node { ast::pat_ident(name, some(inner)) { let pats = vec::append( vec::slice(br.pats, 0u, col), - vec::append([inner]/~, + vec::append(~[inner], vec::view(br.pats, col + 1u, br.pats.len()))); vec::push(result, @{pats: pats, bound: vec::append( - br.bound, [{ident: path_to_ident(name), - val: val}]/~) + br.bound, ~[{ident: path_to_ident(name), + val: val}]) with *br}); } _ { vec::push(result, br); } @@ -125,12 +125,12 @@ fn expand_nested_bindings(m: match, col: uint, val: ValueRef) -> match { result } -type enter_pat = fn(@ast::pat) -> option<[@ast::pat]/~>; +type enter_pat = fn(@ast::pat) -> option<~[@ast::pat]>; fn enter_match(dm: def_map, m: match, col: uint, val: ValueRef, e: enter_pat) -> match { - let mut result = []/~; - for vec::each(m) {|br| + let mut result = ~[]; + for vec::each(m) |br| { alt e(br.pats[col]) { some(sub) { let pats = vec::append( @@ -140,7 +140,7 @@ fn enter_match(dm: def_map, m: match, col: uint, val: ValueRef, let bound = alt self.node { ast::pat_ident(name, none) if !pat_is_variant(dm, self) { vec::append(br.bound, - [{ident: path_to_ident(name), val: val}]/~) + ~[{ident: path_to_ident(name), val: val}]) } _ { br.bound } }; @@ -153,11 +153,11 @@ fn enter_match(dm: def_map, m: match, col: uint, val: ValueRef, } fn enter_default(dm: def_map, m: match, col: uint, val: ValueRef) -> match { - enter_match(dm, m, col, val) {|p| + do enter_match(dm, m, col, val) |p| { alt p.node { - ast::pat_wild | ast::pat_rec(_, _) | ast::pat_tup(_) { some([]/~) } + ast::pat_wild | ast::pat_rec(_, _) | ast::pat_tup(_) { some(~[]) } ast::pat_ident(_, none) if !pat_is_variant(dm, p) { - some([]/~) + some(~[]) } _ { none } } @@ -167,7 +167,7 @@ fn enter_default(dm: def_map, m: match, col: uint, val: ValueRef) -> match { fn enter_opt(tcx: ty::ctxt, m: match, opt: opt, col: uint, variant_size: uint, val: ValueRef) -> match { let dummy = @{id: 0, node: ast::pat_wild, span: dummy_sp()}; - enter_match(tcx.def_map, m, col, val) {|p| + do enter_match(tcx.def_map, m, col, val) |p| { alt p.node { ast::pat_enum(_, subpats) { if opt_eq(tcx, variant_opt(tcx, p.id), opt) { @@ -176,30 +176,30 @@ fn enter_opt(tcx: ty::ctxt, m: match, opt: opt, col: uint, else { none } } ast::pat_ident(_, none) if pat_is_variant(tcx.def_map, p) { - if opt_eq(tcx, variant_opt(tcx, p.id), opt) { some([]/~) } + if opt_eq(tcx, variant_opt(tcx, p.id), opt) { some(~[]) } else { none } } ast::pat_lit(l) { - if opt_eq(tcx, lit(l), opt) { some([]/~) } else { none } + if opt_eq(tcx, lit(l), opt) { some(~[]) } else { none } } ast::pat_range(l1, l2) { - if opt_eq(tcx, range(l1, l2), opt) { some([]/~) } else { none } + if opt_eq(tcx, range(l1, l2), opt) { some(~[]) } else { none } } _ { some(vec::from_elem(variant_size, dummy)) } } } } -fn enter_rec(dm: def_map, m: match, col: uint, fields: [ast::ident]/~, +fn enter_rec(dm: def_map, m: match, col: uint, fields: ~[ast::ident], val: ValueRef) -> match { let dummy = @{id: 0, node: ast::pat_wild, span: dummy_sp()}; - enter_match(dm, m, col, val) {|p| + do enter_match(dm, m, col, val) |p| { alt p.node { ast::pat_rec(fpats, _) { - let mut pats = []/~; - for vec::each(fields) {|fname| + let mut pats = ~[]; + for vec::each(fields) |fname| { let mut pat = dummy; - for vec::each(fpats) {|fpat| + for vec::each(fpats) |fpat| { if str::eq(*fpat.ident, *fname) { pat = fpat.pat; break; } } vec::push(pats, pat); @@ -214,7 +214,7 @@ fn enter_rec(dm: def_map, m: match, col: uint, fields: [ast::ident]/~, fn enter_tup(dm: def_map, m: match, col: uint, val: ValueRef, n_elts: uint) -> match { let dummy = @{id: 0, node: ast::pat_wild, span: dummy_sp()}; - enter_match(dm, m, col, val) {|p| + do enter_match(dm, m, col, val) |p| { alt p.node { ast::pat_tup(elts) { some(elts) } _ { some(vec::from_elem(n_elts, dummy)) } @@ -224,32 +224,32 @@ fn enter_tup(dm: def_map, m: match, col: uint, val: ValueRef, fn enter_box(dm: def_map, m: match, col: uint, val: ValueRef) -> match { let dummy = @{id: 0, node: ast::pat_wild, span: dummy_sp()}; - enter_match(dm, m, col, val) {|p| + do enter_match(dm, m, col, val) |p| { alt p.node { - ast::pat_box(sub) { some([sub]/~) } - _ { some([dummy]/~) } + ast::pat_box(sub) { some(~[sub]) } + _ { some(~[dummy]) } } } } fn enter_uniq(dm: def_map, m: match, col: uint, val: ValueRef) -> match { let dummy = @{id: 0, node: ast::pat_wild, span: dummy_sp()}; - enter_match(dm, m, col, val) {|p| + do enter_match(dm, m, col, val) |p| { alt p.node { - ast::pat_uniq(sub) { some([sub]/~) } - _ { some([dummy]/~) } + ast::pat_uniq(sub) { some(~[sub]) } + _ { some(~[dummy]) } } } } -fn get_options(ccx: @crate_ctxt, m: match, col: uint) -> [opt]/~ { +fn get_options(ccx: @crate_ctxt, m: match, col: uint) -> ~[opt] { fn add_to_set(tcx: ty::ctxt, &&set: dvec<opt>, val: opt) { - if set.any({|l| opt_eq(tcx, l, val)}) {ret;} + if set.any(|l| opt_eq(tcx, l, val)) {ret;} set.push(val); } let found = dvec(); - for vec::each(m) {|br| + for vec::each(m) |br| { let cur = br.pats[col]; if pat_is_variant(ccx.tcx.def_map, cur) { add_to_set(ccx.tcx, found, variant_opt(ccx.tcx, br.pats[col].id)); @@ -268,7 +268,7 @@ fn get_options(ccx: @crate_ctxt, m: match, col: uint) -> [opt]/~ { fn extract_variant_args(bcx: block, pat_id: ast::node_id, vdefs: {enm: def_id, var: def_id}, val: ValueRef) -> - {vals: [ValueRef]/~, bcx: block} { + {vals: ~[ValueRef], bcx: block} { let _icx = bcx.insn_ctxt("alt::extract_variant_args"); let ccx = bcx.fcx.ccx; let enum_ty_substs = alt check ty::get(node_id_type(bcx, pat_id)).struct { @@ -281,24 +281,24 @@ fn extract_variant_args(bcx: block, pat_id: ast::node_id, if size > 0u && (*variants).len() != 1u { let enumptr = PointerCast(bcx, val, T_opaque_enum_ptr(ccx)); - blobptr = GEPi(bcx, enumptr, [0u, 1u]/~); + blobptr = GEPi(bcx, enumptr, ~[0u, 1u]); } let vdefs_tg = vdefs.enm; let vdefs_var = vdefs.var; - let args = vec::from_fn(size) { |i| + let args = do vec::from_fn(size) |i| { GEP_enum(bcx, blobptr, vdefs_tg, vdefs_var, enum_ty_substs, i) }; ret {vals: args, bcx: bcx}; } -fn collect_record_fields(m: match, col: uint) -> [ast::ident]/~ { - let mut fields: [ast::ident]/~ = []/~; - for vec::each(m) {|br| +fn collect_record_fields(m: match, col: uint) -> ~[ast::ident] { + let mut fields: ~[ast::ident] = ~[]; + for vec::each(m) |br| { alt br.pats[col].node { ast::pat_rec(fs, _) { - for vec::each(fs) {|f| - if !vec::any(fields, {|x| str::eq(*f.ident, *x)}) { + for vec::each(fs) |f| { + if !vec::any(fields, |x| str::eq(*f.ident, *x)) { vec::push(fields, f.ident); } } @@ -310,7 +310,7 @@ fn collect_record_fields(m: match, col: uint) -> [ast::ident]/~ { } fn root_pats_as_necessary(bcx: block, m: match, col: uint, val: ValueRef) { - for vec::each(m) {|br| + for vec::each(m) |br| { let pat_id = br.pats[col].id; alt bcx.ccx().maps.root_map.find({id:pat_id, derefs:0u}) { @@ -330,21 +330,21 @@ fn root_pats_as_necessary(bcx: block, m: match, col: uint, val: ValueRef) { } fn any_box_pat(m: match, col: uint) -> bool { - for vec::each(m) {|br| + for vec::each(m) |br| { alt br.pats[col].node { ast::pat_box(_) { ret true; } _ { } } } ret false; } fn any_uniq_pat(m: match, col: uint) -> bool { - for vec::each(m) {|br| + for vec::each(m) |br| { alt br.pats[col].node { ast::pat_uniq(_) { ret true; } _ { } } } ret false; } fn any_tup_pat(m: match, col: uint) -> bool { - for vec::each(m) {|br| + for vec::each(m) |br| { alt br.pats[col].node { ast::pat_tup(_) { ret true; } _ { } } } ret false; @@ -362,14 +362,14 @@ fn pick_col(m: match) -> uint { } } let scores = vec::to_mut(vec::from_elem(m[0].pats.len(), 0u)); - for vec::each(m) {|br| + for vec::each(m) |br| { let mut i = 0u; - for vec::each(br.pats) {|p| scores[i] += score(p); i += 1u; } + for vec::each(br.pats) |p| { scores[i] += score(p); i += 1u; } } let mut max_score = 0u; let mut best_col = 0u; let mut i = 0u; - for vec::each(scores) {|score| + for vec::each(scores) |score| { // Irrefutable columns always go first, they'd only be duplicated in // the branches. if score == 0u { ret i; } @@ -381,8 +381,8 @@ fn pick_col(m: match) -> uint { ret best_col; } -fn compile_submatch(bcx: block, m: match, vals: [ValueRef]/~, - chk: option<mk_fail>, &exits: [exit_node]/~) { +fn compile_submatch(bcx: block, m: match, vals: ~[ValueRef], + chk: option<mk_fail>, &exits: ~[exit_node]) { let _icx = bcx.insn_ctxt("alt::compile_submatch"); let mut bcx = bcx; let tcx = bcx.tcx(), dm = tcx.def_map; @@ -393,16 +393,16 @@ fn compile_submatch(bcx: block, m: match, vals: [ValueRef]/~, some(e) { // Temporarily set bindings. They'll be rewritten to PHI nodes // for the actual arm block. - for data.id_map.each {|key, val| + for data.id_map.each |key, val| { let loc = local_mem(option::get(assoc(key, m[0].bound))); bcx.fcx.lllocals.insert(val, loc); }; let {bcx: guard_cx, val} = { - with_scope_result(bcx, e.info(), "guard") {|bcx| + do with_scope_result(bcx, e.info(), "guard") |bcx| { trans_temp_expr(bcx, e) } }; - bcx = with_cond(guard_cx, Not(guard_cx, val)) {|bcx| + bcx = do with_cond(guard_cx, Not(guard_cx, val)) |bcx| { compile_submatch(bcx, vec::tail(m), vals, chk, exits); bcx }; @@ -427,7 +427,7 @@ fn compile_submatch(bcx: block, m: match, vals: [ValueRef]/~, vec::view(vals, col + 1u, vals.len())); let ccx = bcx.fcx.ccx; let mut pat_id = 0; - for vec::each(m) {|br| + for vec::each(m) |br| { // Find a real id (we're adding placeholder wildcard patterns, but // each column is guaranteed to have at least one real pattern) if pat_id == 0 { pat_id = br.pats[col].id; } @@ -439,10 +439,10 @@ fn compile_submatch(bcx: block, m: match, vals: [ValueRef]/~, // Separate path for extracting and binding record fields if rec_fields.len() > 0u { let fields = ty::get_fields(node_id_type(bcx, pat_id)); - let mut rec_vals = []/~; - for vec::each(rec_fields) {|field_name| + let mut rec_vals = ~[]; + for vec::each(rec_fields) |field_name| { let ix = option::get(ty::field_idx(field_name, fields)); - vec::push(rec_vals, GEPi(bcx, val, [0u, ix]/~)); + vec::push(rec_vals, GEPi(bcx, val, ~[0u, ix])); } compile_submatch(bcx, enter_rec(dm, m, col, rec_fields, val), vec::append(rec_vals, vals_left), chk, exits); @@ -455,9 +455,9 @@ fn compile_submatch(bcx: block, m: match, vals: [ValueRef]/~, ty::ty_tup(elts) { elts.len() } _ { ccx.sess.bug("non-tuple type in tuple pattern"); } }; - let mut tup_vals = []/~, i = 0u; + let mut tup_vals = ~[], i = 0u; while i < n_tup_elts { - vec::push(tup_vals, GEPi(bcx, val, [0u, i]/~)); + vec::push(tup_vals, GEPi(bcx, val, ~[0u, i])); i += 1u; } compile_submatch(bcx, enter_tup(dm, m, col, val, n_tup_elts), @@ -470,9 +470,9 @@ fn compile_submatch(bcx: block, m: match, vals: [ValueRef]/~, let llbox = Load(bcx, val); let box_no_addrspace = non_gc_box_cast(bcx, llbox); let unboxed = - GEPi(bcx, box_no_addrspace, [0u, abi::box_field_body]/~); + GEPi(bcx, box_no_addrspace, ~[0u, abi::box_field_body]); compile_submatch(bcx, enter_box(dm, m, col, val), - vec::append([unboxed]/~, vals_left), chk, exits); + vec::append(~[unboxed], vals_left), chk, exits); ret; } @@ -480,9 +480,9 @@ fn compile_submatch(bcx: block, m: match, vals: [ValueRef]/~, let llbox = Load(bcx, val); let box_no_addrspace = non_gc_box_cast(bcx, llbox); let unboxed = - GEPi(bcx, box_no_addrspace, [0u, abi::box_field_body]/~); + GEPi(bcx, box_no_addrspace, ~[0u, abi::box_field_body]); compile_submatch(bcx, enter_uniq(dm, m, col, val), - vec::append([unboxed]/~, vals_left), chk, exits); + vec::append(~[unboxed], vals_left), chk, exits); ret; } @@ -499,7 +499,7 @@ fn compile_submatch(bcx: block, m: match, vals: [ValueRef]/~, } else { let enumptr = PointerCast(bcx, val, T_opaque_enum_ptr(ccx)); - let discrimptr = GEPi(bcx, enumptr, [0u, 0u]/~); + let discrimptr = GEPi(bcx, enumptr, ~[0u, 0u]); test_val = Load(bcx, discrimptr); kind = switch; } @@ -516,7 +516,7 @@ fn compile_submatch(bcx: block, m: match, vals: [ValueRef]/~, } } } - for vec::each(opts) {|o| + for vec::each(opts) |o| { alt o { range(_, _) { kind = compare; break; } _ { } @@ -535,7 +535,7 @@ fn compile_submatch(bcx: block, m: match, vals: [ValueRef]/~, let len = opts.len(); let mut i = 0u; // Compile subtrees for each option - for vec::each(opts) {|opt| + for vec::each(opts) |opt| { i += 1u; let mut opt_cx = else_cx; if !exhaustive || i < len { @@ -553,7 +553,7 @@ fn compile_submatch(bcx: block, m: match, vals: [ValueRef]/~, compare { let t = node_id_type(bcx, pat_id); let {bcx: after_cx, val: matches} = { - with_scope_result(bcx, none, "compare_scope") {|bcx| + do with_scope_result(bcx, none, "compare_scope") |bcx| { alt trans_opt(bcx, opt) { single_result({bcx, val}) { trans_compare(bcx, ast::eq, test_val, t, val, t) @@ -575,7 +575,7 @@ fn compile_submatch(bcx: block, m: match, vals: [ValueRef]/~, } } else if kind == compare { Br(bcx, else_cx.llbb); } let mut size = 0u; - let mut unpacked = []/~; + let mut unpacked = ~[]; alt opt { var(_, vdef) { let args = extract_variant_args(opt_cx, pat_id, vdef, val); @@ -599,15 +599,15 @@ fn compile_submatch(bcx: block, m: match, vals: [ValueRef]/~, } // Returns false for unreachable blocks -fn make_phi_bindings(bcx: block, map: [exit_node]/~, +fn make_phi_bindings(bcx: block, map: ~[exit_node], ids: pat_util::pat_id_map) -> bool { let _icx = bcx.insn_ctxt("alt::make_phi_bindings"); let our_block = bcx.llbb as uint; let mut success = true, bcx = bcx; - for ids.each {|name, node_id| - let mut llbbs = []/~; - let mut vals = []/~; - for vec::each(map) {|ex| + for ids.each |name, node_id| { + let mut llbbs = ~[]; + let mut vals = ~[]; + for vec::each(map) |ex| { if ex.to as uint == our_block { alt assoc(name, ex.bound) { some(val) { @@ -632,31 +632,31 @@ fn make_phi_bindings(bcx: block, map: [exit_node]/~, fn trans_alt(bcx: block, alt_expr: @ast::expr, expr: @ast::expr, - arms: [ast::arm]/~, + arms: ~[ast::arm], mode: ast::alt_mode, dest: dest) -> block { let _icx = bcx.insn_ctxt("alt::trans_alt"); - with_scope(bcx, alt_expr.info(), "alt") {|bcx| + do with_scope(bcx, alt_expr.info(), "alt") |bcx| { trans_alt_inner(bcx, expr, arms, mode, dest) } } -fn trans_alt_inner(scope_cx: block, expr: @ast::expr, arms: [ast::arm]/~, +fn trans_alt_inner(scope_cx: block, expr: @ast::expr, arms: ~[ast::arm], mode: ast::alt_mode, dest: dest) -> block { let _icx = scope_cx.insn_ctxt("alt::trans_alt_inner"); let bcx = scope_cx, tcx = bcx.tcx(); - let mut bodies = []/~, match = []/~; + let mut bodies = ~[], match = ~[]; let {bcx, val, _} = trans_temp_expr(bcx, expr); if bcx.unreachable { ret bcx; } - for vec::each(arms) {|a| + for vec::each(arms) |a| { let body = scope_block(bcx, a.body.info(), "case_body"); let id_map = pat_util::pat_id_map(tcx.def_map, a.pats[0]); vec::push(bodies, body); - for vec::each(a.pats) {|p| - vec::push(match, @{pats: [p]/~, - bound: []/~, + for vec::each(a.pats) |p| { + vec::push(match, @{pats: ~[p], + bound: ~[], data: @{bodycx: body, guard: a.guard, id_map: id_map}}); } @@ -674,17 +674,17 @@ fn trans_alt_inner(scope_cx: block, expr: @ast::expr, arms: [ast::arm]/~, *done = some(fail_cx.llbb); ret fail_cx.llbb; } - some({||mk_fail(scope_cx, expr.span, fail_cx)}) + some(|| mk_fail(scope_cx, expr.span, fail_cx)) } ast::alt_exhaustive { none } }; - let mut exit_map = []/~; + let mut exit_map = ~[]; let t = node_id_type(bcx, expr.id); let spilled = spill_if_immediate(bcx, val, t); - compile_submatch(bcx, match, [spilled]/~, mk_fail, exit_map); + compile_submatch(bcx, match, ~[spilled], mk_fail, exit_map); - let mut arm_cxs = []/~, arm_dests = []/~, i = 0u; - for vec::each(arms) {|a| + let mut arm_cxs = ~[], arm_dests = ~[], i = 0u; + for vec::each(arms) |a| { let body_cx = bodies[i]; let id_map = pat_util::pat_id_map(tcx.def_map, a.pats[0]); if make_phi_bindings(body_cx, exit_map, id_map) { @@ -728,23 +728,23 @@ fn bind_irrefutable_pat(bcx: block, pat: @ast::pat, val: ValueRef, let vdefs = ast_util::variant_def_ids(ccx.tcx.def_map.get(pat.id)); let args = extract_variant_args(bcx, pat.id, vdefs, val); let mut i = 0; - option::iter(sub) {|sub| for vec::each(args.vals) {|argval| + do option::iter(sub) |sub| { for vec::each(args.vals) |argval| { bcx = bind_irrefutable_pat(bcx, sub[i], argval, make_copy); i += 1; }} } ast::pat_rec(fields, _) { let rec_fields = ty::get_fields(node_id_type(bcx, pat.id)); - for vec::each(fields) {|f| + for vec::each(fields) |f| { let ix = option::get(ty::field_idx(f.ident, rec_fields)); - let fldptr = GEPi(bcx, val, [0u, ix]/~); + let fldptr = GEPi(bcx, val, ~[0u, ix]); bcx = bind_irrefutable_pat(bcx, f.pat, fldptr, make_copy); } } ast::pat_tup(elems) { let mut i = 0u; - for vec::each(elems) {|elem| - let fldptr = GEPi(bcx, val, [0u, i]/~); + for vec::each(elems) |elem| { + let fldptr = GEPi(bcx, val, ~[0u, i]); bcx = bind_irrefutable_pat(bcx, elem, fldptr, make_copy); i += 1u; } @@ -752,13 +752,13 @@ fn bind_irrefutable_pat(bcx: block, pat: @ast::pat, val: ValueRef, ast::pat_box(inner) { let llbox = Load(bcx, val); let unboxed = - GEPi(bcx, llbox, [0u, abi::box_field_body]/~); + GEPi(bcx, llbox, ~[0u, abi::box_field_body]); bcx = bind_irrefutable_pat(bcx, inner, unboxed, true); } ast::pat_uniq(inner) { let llbox = Load(bcx, val); let unboxed = - GEPi(bcx, llbox, [0u, abi::box_field_body]/~); + GEPi(bcx, llbox, ~[0u, abi::box_field_body]); bcx = bind_irrefutable_pat(bcx, inner, unboxed, true); } ast::pat_wild | ast::pat_lit(_) | ast::pat_range(_, _) { } diff --git a/src/rustc/middle/trans/base.rs b/src/rustc/middle/trans/base.rs index 5c2cbc09b09..8cef9f12f66 100644 --- a/src/rustc/middle/trans/base.rs +++ b/src/rustc/middle/trans/base.rs @@ -114,11 +114,11 @@ impl fcx_icx for fn_ctxt { } } -fn join_returns(parent_cx: block, in_cxs: [block]/~, - in_ds: [dest]/~, out_dest: dest) -> block { +fn join_returns(parent_cx: block, in_cxs: ~[block], + in_ds: ~[dest], out_dest: dest) -> block { let out = sub_block(parent_cx, "join"); let mut reachable = false, i = 0u, phi = none; - for vec::each(in_cxs) {|cx| + for vec::each(in_cxs) |cx| { if !cx.unreachable { Br(cx, out.llbb); reachable = true; @@ -172,7 +172,7 @@ fn log_fn_time(ccx: @crate_ctxt, name: str, start: time::timespec, fn decl_fn(llmod: ModuleRef, name: str, cc: lib::llvm::CallConv, llty: TypeRef) -> ValueRef { - let llfn: ValueRef = str::as_c_str(name, {|buf| + let llfn: ValueRef = str::as_c_str(name, |buf| { llvm::LLVMGetOrInsertFunction(llmod, buf, llty) }); lib::llvm::SetFunctionCallConv(llfn, cc); @@ -204,7 +204,7 @@ fn get_extern_fn(externs: hashmap<str, ValueRef>, llmod: ModuleRef, name: str, fn get_extern_const(externs: hashmap<str, ValueRef>, llmod: ModuleRef, name: str, ty: TypeRef) -> ValueRef { if externs.contains_key(name) { ret externs.get(name); } - let c = str::as_c_str(name, {|buf| llvm::LLVMAddGlobal(llmod, ty, buf) }); + let c = str::as_c_str(name, |buf| llvm::LLVMAddGlobal(llmod, ty, buf)); externs.insert(name, c); ret c; } @@ -222,14 +222,14 @@ fn get_simple_extern_fn(cx: block, } fn trans_foreign_call(cx: block, externs: hashmap<str, ValueRef>, - llmod: ModuleRef, name: str, args: [ValueRef]/~) -> + llmod: ModuleRef, name: str, args: ~[ValueRef]) -> ValueRef { let _icx = cx.insn_ctxt("trans_foreign_call"); let n = args.len() as int; let llforeign: ValueRef = get_simple_extern_fn(cx, externs, llmod, name, n); - let mut call_args: [ValueRef]/~ = []/~; - for vec::each(args) {|a| + let mut call_args: ~[ValueRef] = ~[]; + for vec::each(args) |a| { vec::push(call_args, a); } ret Call(cx, llforeign, call_args); @@ -237,14 +237,14 @@ fn trans_foreign_call(cx: block, externs: hashmap<str, ValueRef>, fn trans_free(cx: block, v: ValueRef) -> block { let _icx = cx.insn_ctxt("trans_free"); - Call(cx, cx.ccx().upcalls.free, [PointerCast(cx, v, T_ptr(T_i8()))]/~); + Call(cx, cx.ccx().upcalls.free, ~[PointerCast(cx, v, T_ptr(T_i8()))]); cx } fn trans_unique_free(cx: block, v: ValueRef) -> block { - let _icx = cx.insn_ctxt("trans_shared_free"); + let _icx = cx.insn_ctxt("trans_unique_free"); Call(cx, cx.ccx().upcalls.exchange_free, - [PointerCast(cx, v, T_ptr(T_i8()))]/~); + ~[PointerCast(cx, v, T_ptr(T_i8()))]); ret cx; } @@ -298,7 +298,7 @@ fn arrayalloca(cx: block, t: TypeRef, v: ValueRef) -> ValueRef { fn ptr_offs(bcx: block, base: ValueRef, sz: ValueRef) -> ValueRef { let _icx = bcx.insn_ctxt("ptr_offs"); let raw = PointerCast(bcx, base, T_ptr(T_i8())); - InBoundsGEP(bcx, raw, [sz]/~) + InBoundsGEP(bcx, raw, ~[sz]) } // Increment a pointer by a given amount and then cast it to be a pointer @@ -316,19 +316,19 @@ fn bump_ptr(bcx: block, t: ty::t, base: ValueRef, sz: ValueRef) -> // @llblobptr is the data part of a enum value; its actual type // is meaningless, as it will be cast away. fn GEP_enum(bcx: block, llblobptr: ValueRef, enum_id: ast::def_id, - variant_id: ast::def_id, ty_substs: [ty::t]/~, + variant_id: ast::def_id, ty_substs: ~[ty::t], ix: uint) -> ValueRef { let _icx = bcx.insn_ctxt("GEP_enum"); let ccx = bcx.ccx(); let variant = ty::enum_variant_with_id(ccx.tcx, enum_id, variant_id); assert ix < variant.args.len(); - let arg_lltys = vec::map(variant.args, {|aty| + let arg_lltys = vec::map(variant.args, |aty| { type_of(ccx, ty::subst_tps(ccx.tcx, ty_substs, aty)) }); let typed_blobptr = PointerCast(bcx, llblobptr, T_ptr(T_struct(arg_lltys))); - GEPi(bcx, typed_blobptr, [0u, ix]/~) + GEPi(bcx, typed_blobptr, ~[0u, ix]) } // Returns a pointer to the body for the box. The box may be an opaque @@ -342,7 +342,7 @@ fn opaque_box_body(bcx: block, let _icx = bcx.insn_ctxt("opaque_box_body"); let ccx = bcx.ccx(); let boxptr = PointerCast(bcx, boxptr, T_ptr(T_box_header(ccx))); - let bodyptr = GEPi(bcx, boxptr, [1u]/~); + let bodyptr = GEPi(bcx, boxptr, ~[1u]); PointerCast(bcx, bodyptr, T_ptr(type_of(ccx, body_t))) } @@ -370,7 +370,7 @@ fn malloc_raw_dyn(bcx: block, t: ty::t, heap: heap, lazily_emit_all_tydesc_glue(ccx, copy static_ti); // Allocate space: - let rval = Call(bcx, upcall, [lltydesc, size]/~); + let rval = Call(bcx, upcall, ~[lltydesc, size]); ret PointerCast(bcx, rval, llty); } @@ -388,7 +388,7 @@ fn malloc_general_dyn(bcx: block, t: ty::t, heap: heap, size: ValueRef) -> let _icx = bcx.insn_ctxt("malloc_general"); let llbox = malloc_raw_dyn(bcx, t, heap, size); let non_gc_box = non_gc_box_cast(bcx, llbox); - let body = GEPi(bcx, non_gc_box, [0u, abi::box_field_body]/~); + let body = GEPi(bcx, non_gc_box, ~[0u, abi::box_field_body]); ret {box: llbox, body: body}; } @@ -422,12 +422,12 @@ fn get_tydesc(ccx: @crate_ctxt, t: ty::t, fn get_static_tydesc(ccx: @crate_ctxt, t: ty::t) -> @tydesc_info { alt ccx.tydescs.find(t) { - some(inf) { ret inf; } - none { + some(inf) { inf } + _ { ccx.stats.n_static_tydescs += 1u; let inf = declare_tydesc(ccx, t); ccx.tydescs.insert(t, inf); - ret inf; + inf } } } @@ -454,7 +454,7 @@ fn set_inline_hint(f: ValueRef) { as c_ulonglong, 0u as c_ulonglong); } -fn set_inline_hint_if_appr(attrs: [ast::attribute]/~, +fn set_inline_hint_if_appr(attrs: ~[ast::attribute], llfn: ValueRef) { alt attr::find_inline_attr(attrs) { attr::ia_hint { set_inline_hint(llfn); } @@ -490,17 +490,16 @@ fn note_unique_llvm_symbol(ccx: @crate_ctxt, sym: str) { // Generates the declaration for (but doesn't emit) a type descriptor. fn declare_tydesc(ccx: @crate_ctxt, t: ty::t) -> @tydesc_info { let _icx = ccx.insn_ctxt("declare_tydesc"); - log(debug, "+++ declare_tydesc " + ty_to_str(ccx.tcx, t)); let llty = type_of(ccx, t); let llsize = llsize_of(ccx, llty); let llalign = llalign_of(ccx, llty); - let mut name; //XXX this triggers duplicate LLVM symbols - if false /*ccx.sess.opts.debuginfo*/ { - name = mangle_internal_name_by_type_only(ccx, t, @"tydesc"); - } else { name = mangle_internal_name_by_seq(ccx, @"tydesc"); } + let name = if false /*ccx.sess.opts.debuginfo*/ { + mangle_internal_name_by_type_only(ccx, t, @"tydesc") + } else { mangle_internal_name_by_seq(ccx, @"tydesc") }; note_unique_llvm_symbol(ccx, name); - let gvar = str::as_c_str(name, {|buf| + log(debug, #fmt("+++ declare_tydesc %s %s", ty_to_str(ccx.tcx, t), name)); + let gvar = str::as_c_str(name, |buf| { llvm::LLVMAddGlobal(ccx.llmod, ccx.tydesc_type, buf) }); let inf = @@ -538,7 +537,7 @@ fn declare_generic_glue(ccx: @crate_ctxt, t: ty::t, llfnty: TypeRef, fn make_generic_glue_inner(ccx: @crate_ctxt, t: ty::t, llfn: ValueRef, helper: glue_helper) -> ValueRef { let _icx = ccx.insn_ctxt("make_generic_glue_inner"); - let fcx = new_fn_ctxt(ccx, []/~, llfn, none); + let fcx = new_fn_ctxt(ccx, ~[], llfn, none); lib::llvm::SetLinkage(llfn, lib::llvm::InternalLinkage); ccx.stats.n_glues_created += 1u; // Any nontrivial glue is with values passed *by alias*; this is a @@ -575,7 +574,7 @@ fn make_generic_glue(ccx: @crate_ctxt, t: ty::t, llfn: ValueRef, fn emit_tydescs(ccx: @crate_ctxt) { let _icx = ccx.insn_ctxt("emit_tydescs"); - for ccx.tydescs.each {|key, val| + for ccx.tydescs.each |key, val| { let glue_fn_ty = T_ptr(T_glue_fn(ccx)); let ti = val; let take_glue = @@ -606,21 +605,21 @@ fn emit_tydescs(ccx: @crate_ctxt) { let tydesc = C_named_struct(ccx.tydesc_type, - [C_null(T_ptr(T_ptr(ccx.tydesc_type))), + ~[C_null(T_ptr(T_ptr(ccx.tydesc_type))), ti.size, // size ti.align, // align take_glue, // take_glue drop_glue, // drop_glue free_glue, // free_glue visit_glue, // visit_glue - C_int(ccx, 0), // ununsed - C_int(ccx, 0), // ununsed - C_int(ccx, 0), // ununsed - C_int(ccx, 0), // ununsed + C_int(ccx, 0), // unused + C_int(ccx, 0), // unused + C_int(ccx, 0), // unused + C_int(ccx, 0), // unused C_shape(ccx, shape), // shape shape_tables, // shape_tables - C_int(ccx, 0), // ununsed - C_int(ccx, 0)]/~); // unused + C_int(ccx, 0), // unused + C_int(ccx, 0)]); // unused let gvar = ti.tydesc; llvm::LLVMSetInitializer(gvar, tydesc); @@ -652,7 +651,7 @@ fn make_take_glue(bcx: block, v: ValueRef, t: ty::t) { closure::make_fn_glue(bcx, v, t, take_ty) } ty::ty_iface(_, _) { - let llbox = Load(bcx, GEPi(bcx, v, [0u, 1u]/~)); + let llbox = Load(bcx, GEPi(bcx, v, ~[0u, 1u])); incr_refcnt_of_boxed(bcx, llbox); bcx } @@ -672,7 +671,7 @@ fn incr_refcnt_of_boxed(cx: block, box_ptr: ValueRef) { let _icx = cx.insn_ctxt("incr_refcnt_of_boxed"); let ccx = cx.ccx(); maybe_validate_box(cx, box_ptr); - let rc_ptr = GEPi(cx, box_ptr, [0u, abi::box_field_refcnt]/~); + let rc_ptr = GEPi(cx, box_ptr, ~[0u, abi::box_field_refcnt]); let rc = Load(cx, rc_ptr); let rc = Add(cx, rc, C_int(ccx, 1)); Store(cx, rc, rc_ptr); @@ -698,14 +697,16 @@ fn make_free_glue(bcx: block, v: ValueRef, t: ty::t) { let bcx = alt ty::get(t).struct { ty::ty_box(body_mt) { let v = PointerCast(bcx, v, type_of(ccx, t)); - let body = GEPi(bcx, v, [0u, abi::box_field_body]/~); + let body = GEPi(bcx, v, ~[0u, abi::box_field_body]); let bcx = drop_ty(bcx, body, body_mt.ty); trans_free(bcx, v) } ty::ty_opaque_box { let v = PointerCast(bcx, v, type_of(ccx, t)); - let td = Load(bcx, GEPi(bcx, v, [0u, abi::box_field_tydesc]/~)); - let valptr = GEPi(bcx, v, [0u, abi::box_field_body]/~); + let td = Load(bcx, GEPi(bcx, v, ~[0u, abi::box_field_tydesc])); + let valptr = GEPi(bcx, v, ~[0u, abi::box_field_body]); + // Generate code that, dynamically, indexes into the + // tydesc and calls the drop glue that got set dynamically call_tydesc_glue_full(bcx, valptr, td, abi::tydesc_field_drop_glue, none); trans_free(bcx, v) @@ -732,7 +733,7 @@ fn make_free_glue(bcx: block, v: ValueRef, t: ty::t) { } ty::ty_class(did,substs) { // Call the dtor if there is one - option::map_default(ty::ty_dtor(bcx.tcx(), did), bcx) {|dt_id| + do option::map_default(ty::ty_dtor(bcx.tcx(), did), bcx) |dt_id| { trans_class_drop(bcx, v, dt_id, did, substs) } } @@ -744,11 +745,11 @@ fn make_free_glue(bcx: block, v: ValueRef, t: ty::t) { fn trans_class_drop(bcx: block, v0: ValueRef, dtor_did: ast::def_id, class_did: ast::def_id, substs: ty::substs) -> block { - let drop_flag = GEPi(bcx, v0, [0u, 0u]/~); - with_cond(bcx, IsNotNull(bcx, Load(bcx, drop_flag))) {|cx| + let drop_flag = GEPi(bcx, v0, ~[0u, 0u]); + do with_cond(bcx, IsNotNull(bcx, Load(bcx, drop_flag))) |cx| { let mut bcx = cx; // We have to cast v0 - let classptr = GEPi(bcx, v0, [0u, 1u]/~); + let classptr = GEPi(bcx, v0, ~[0u, 1u]); // Find and call the actual destructor let dtor_addr = get_res_dtor(bcx.ccx(), dtor_did, class_did, substs.tps); // The second argument is the "self" argument for drop @@ -759,13 +760,13 @@ fn trans_class_drop(bcx: block, v0: ValueRef, dtor_did: ast::def_id, // of the output pointer and the environment (self) assert(params.len() == 2u); let self_arg = PointerCast(bcx, v0, params[1u]); - let args = [bcx.fcx.llretptr, self_arg]/~; + let args = ~[bcx.fcx.llretptr, self_arg]; Call(bcx, dtor_addr, args); // Drop the fields for vec::eachi(ty::class_items_as_mutable_fields(bcx.tcx(), class_did, substs)) - {|i, fld| - let llfld_a = GEPi(bcx, classptr, [0u, i]/~); + |i, fld| { + let llfld_a = GEPi(bcx, classptr, ~[0u, i]); bcx = drop_ty(bcx, llfld_a, fld.mt.ty); } Store(bcx, C_u8(0u), drop_flag); @@ -806,7 +807,7 @@ fn make_drop_glue(bcx: block, v0: ValueRef, t: ty::t) { closure::make_fn_glue(bcx, v0, t, drop_ty) } ty::ty_iface(_, _) { - let llbox = Load(bcx, GEPi(bcx, v0, [0u, 1u]/~)); + let llbox = Load(bcx, GEPi(bcx, v0, ~[0u, 1u])); decr_refcnt_maybe_free(bcx, llbox, ty::mk_opaque_box(ccx.tcx)) } ty::ty_opaque_closure_ptr(ck) { @@ -823,7 +824,7 @@ fn make_drop_glue(bcx: block, v0: ValueRef, t: ty::t) { } fn get_res_dtor(ccx: @crate_ctxt, did: ast::def_id, - parent_id: ast::def_id, substs: [ty::t]/~) + parent_id: ast::def_id, substs: ~[ty::t]) -> ValueRef { let _icx = ccx.insn_ctxt("trans_res_dtor"); if (substs.len() > 0u) { @@ -853,7 +854,7 @@ fn maybe_validate_box(_cx: block, _box_ptr: ValueRef) { // let ccx = cx.ccx(); // warn_not_to_commit(ccx, "validate_box() is uncommented"); // let raw_box_ptr = PointerCast(cx, box_ptr, T_ptr(T_i8())); - // Call(cx, ccx.upcalls.validate_box, [raw_box_ptr]/~); + // Call(cx, ccx.upcalls.validate_box, ~[raw_box_ptr]); } fn decr_refcnt_maybe_free(bcx: block, box_ptr: ValueRef, t: ty::t) -> block { @@ -863,19 +864,19 @@ fn decr_refcnt_maybe_free(bcx: block, box_ptr: ValueRef, t: ty::t) -> block { let llbox_ty = T_opaque_box_ptr(ccx); let box_ptr = PointerCast(bcx, box_ptr, llbox_ty); - with_cond(bcx, IsNotNull(bcx, box_ptr)) {|bcx| - let rc_ptr = GEPi(bcx, box_ptr, [0u, abi::box_field_refcnt]/~); + do with_cond(bcx, IsNotNull(bcx, box_ptr)) |bcx| { + let rc_ptr = GEPi(bcx, box_ptr, ~[0u, abi::box_field_refcnt]); let rc = Sub(bcx, Load(bcx, rc_ptr), C_int(ccx, 1)); Store(bcx, rc, rc_ptr); let zero_test = ICmp(bcx, lib::llvm::IntEQ, C_int(ccx, 0), rc); - with_cond(bcx, zero_test) {|bcx| free_ty(bcx, box_ptr, t)} + with_cond(bcx, zero_test, |bcx| free_ty(bcx, box_ptr, t)) } } // Structural comparison: a rather involved form of glue. fn maybe_name_value(cx: @crate_ctxt, v: ValueRef, s: str) { if cx.sess.opts.save_temps { - let _: () = str::as_c_str(s, {|buf| llvm::LLVMSetValueName(v, buf) }); + let _: () = str::as_c_str(s, |buf| llvm::LLVMSetValueName(v, buf)); } } @@ -886,7 +887,7 @@ enum scalar_type { nil_type, signed_int, unsigned_int, floating_point, } fn compare_scalar_types(cx: block, lhs: ValueRef, rhs: ValueRef, t: ty::t, op: ast::binop) -> result { - let f = {|a|compare_scalar_values(cx, lhs, rhs, a, op)}; + let f = |a| compare_scalar_values(cx, lhs, rhs, a, op); alt ty::get(t).struct { ty::ty_nil { ret rslt(cx, f(nil_type)); } @@ -970,12 +971,12 @@ fn compare_scalar_values(cx: block, lhs: ValueRef, rhs: ValueRef, type val_pair_fn = fn@(block, ValueRef, ValueRef) -> block; type val_and_ty_fn = fn@(block, ValueRef, ty::t) -> block; -fn load_inbounds(cx: block, p: ValueRef, idxs: [uint]/~) -> ValueRef { +fn load_inbounds(cx: block, p: ValueRef, idxs: ~[uint]) -> ValueRef { ret Load(cx, GEPi(cx, p, idxs)); } fn store_inbounds(cx: block, v: ValueRef, p: ValueRef, - idxs: [uint]/~) { + idxs: ~[uint]) { Store(cx, v, GEPi(cx, p, idxs)); } @@ -986,7 +987,7 @@ fn iter_structural_ty(cx: block, av: ValueRef, t: ty::t, fn iter_variant(cx: block, a_tup: ValueRef, variant: ty::variant_info, - tps: [ty::t]/~, tid: ast::def_id, + tps: ~[ty::t], tid: ast::def_id, f: val_and_ty_fn) -> block { let _icx = cx.insn_ctxt("iter_variant"); if variant.args.len() == 0u { ret cx; } @@ -997,7 +998,7 @@ fn iter_structural_ty(cx: block, av: ValueRef, t: ty::t, ty::ty_fn({inputs: args, _}) { let mut j = 0u; let v_id = variant.id; - for vec::each(args) {|a| + for vec::each(args) |a| { let llfldp_a = GEP_enum(cx, a_tup, tid, v_id, tps, j); let ty_subst = ty::subst_tps(ccx.tcx, tps, a.ty); cx = f(cx, llfldp_a, ty_subst); @@ -1015,8 +1016,8 @@ fn iter_structural_ty(cx: block, av: ValueRef, t: ty::t, let mut cx = cx; alt ty::get(t).struct { ty::ty_rec(fields) { - for vec::eachi(fields) {|i, fld| - let llfld_a = GEPi(cx, av, [0u, i]/~); + for vec::eachi(fields) |i, fld| { + let llfld_a = GEPi(cx, av, ~[0u, i]); cx = f(cx, llfld_a, fld.mt.ty); } } @@ -1026,8 +1027,8 @@ fn iter_structural_ty(cx: block, av: ValueRef, t: ty::t, cx = tvec::iter_vec_raw(cx, base, t, len, f); } ty::ty_tup(args) { - for vec::eachi(args) {|i, arg| - let llfld_a = GEPi(cx, av, [0u, i]/~); + for vec::eachi(args) |i, arg| { + let llfld_a = GEPi(cx, av, ~[0u, i]); cx = f(cx, llfld_a, arg); } } @@ -1044,8 +1045,8 @@ fn iter_structural_ty(cx: block, av: ValueRef, t: ty::t, let ccx = cx.ccx(); let llenumty = T_opaque_enum_ptr(ccx); let av_enum = PointerCast(cx, av, llenumty); - let lldiscrim_a_ptr = GEPi(cx, av_enum, [0u, 0u]/~); - let llunion_a_ptr = GEPi(cx, av_enum, [0u, 1u]/~); + let lldiscrim_a_ptr = GEPi(cx, av_enum, ~[0u, 0u]); + let llunion_a_ptr = GEPi(cx, av_enum, ~[0u, 1u]); let lldiscrim_a = Load(cx, lldiscrim_a_ptr); // NB: we must hit the discriminant first so that structural @@ -1055,7 +1056,7 @@ fn iter_structural_ty(cx: block, av: ValueRef, t: ty::t, Unreachable(unr_cx); let llswitch = Switch(cx, lldiscrim_a, unr_cx.llbb, n_variants); let next_cx = sub_block(cx, "enum-iter-next"); - for vec::each(*variants) {|variant| + for vec::each(*variants) |variant| { let variant_cx = sub_block(cx, "enum-iter-variant-" + @@ -1071,13 +1072,13 @@ fn iter_structural_ty(cx: block, av: ValueRef, t: ty::t, ty::ty_class(did, substs) { // Take the drop bit into account let classptr = if is_some(ty::ty_dtor(cx.tcx(), did)) { - GEPi(cx, av, [0u, 1u]/~) + GEPi(cx, av, ~[0u, 1u]) } else { av }; for vec::eachi(ty::class_items_as_mutable_fields(cx.tcx(), did, substs)) - {|i, fld| - let llfld_a = GEPi(cx, classptr, [0u, i]/~); + |i, fld| { + let llfld_a = GEPi(cx, classptr, ~[0u, i]); cx = f(cx, llfld_a, fld.mt.ty); } } @@ -1194,15 +1195,16 @@ fn call_tydesc_glue_full(++cx: block, v: ValueRef, tydesc: ValueRef, let llfn = { alt static_glue_fn { none { - let llfnptr = GEPi(cx, tydesc, [0u, field]/~); + // Select out the glue function to call from the tydesc + let llfnptr = GEPi(cx, tydesc, ~[0u, field]); Load(cx, llfnptr) } some(sgf) { sgf } } }; - Call(cx, llfn, [C_null(T_ptr(T_nil())), C_null(T_ptr(T_nil())), - C_null(T_ptr(T_ptr(cx.ccx().tydesc_type))), llrawptr]/~); + Call(cx, llfn, ~[C_null(T_ptr(T_nil())), C_null(T_ptr(T_nil())), + C_null(T_ptr(T_ptr(cx.ccx().tydesc_type))), llrawptr]); } // See [Note-arg-mode] @@ -1228,13 +1230,13 @@ fn call_cmp_glue(bcx: block, lhs: ValueRef, rhs: ValueRef, t: ty::t, let llrawrhsptr = BitCast(bcx, llrhs, T_ptr(T_i8())); let lltydesc = get_tydesc_simple(bcx.ccx(), t); let lltydescs = - Load(bcx, GEPi(bcx, lltydesc, [0u, abi::tydesc_field_first_param]/~)); + Load(bcx, GEPi(bcx, lltydesc, ~[0u, abi::tydesc_field_first_param])); let llfn = bcx.ccx().upcalls.cmp_type; let llcmpresultptr = alloca(bcx, T_i1()); - Call(bcx, llfn, [llcmpresultptr, lltydesc, lltydescs, - llrawlhsptr, llrawrhsptr, llop]/~); + Call(bcx, llfn, ~[llcmpresultptr, lltydesc, lltydescs, + llrawlhsptr, llrawrhsptr, llop]); ret Load(bcx, llcmpresultptr); } @@ -1318,7 +1320,7 @@ fn call_memmove(cx: block, dst: ValueRef, src: ValueRef, let size = IntCast(cx, n_bytes, ccx.int_type); let align = C_i32(1i32); let volatile = C_bool(false); - Call(cx, memmove, [dst_ptr, src_ptr, size, align, volatile]/~); + Call(cx, memmove, ~[dst_ptr, src_ptr, size, align, volatile]); } fn memmove_ty(bcx: block, dst: ValueRef, src: ValueRef, t: ty::t) { @@ -1352,7 +1354,7 @@ fn copy_val(cx: block, action: copy_action, dst: ValueRef, let dstcmp = load_if_immediate(cx, dst, t); let cast = PointerCast(cx, dstcmp, val_ty(src)); // Self-copy check - with_cond(cx, ICmp(cx, lib::llvm::IntNE, cast, src)) {|bcx| + do with_cond(cx, ICmp(cx, lib::llvm::IntNE, cast, src)) |bcx| { copy_val_no_check(bcx, action, dst, src, t) } } else { @@ -1503,8 +1505,8 @@ fn trans_unary(bcx: block, op: ast::unop, e: @ast::expr, ret trans_call_inner( bcx, un_expr.info(), fty, expr_ty(bcx, un_expr), - {|bcx| impl::trans_method_callee(bcx, callee_id, e, mentry) }, - arg_exprs([]/~), dest); + |bcx| impl::trans_method_callee(bcx, callee_id, e, mentry), + arg_exprs(~[]), dest); } _ {} } @@ -1579,8 +1581,8 @@ fn trans_compare(cx: block, op: ast::binop, lhs: ValueRef, fn cast_shift_expr_rhs(cx: block, op: ast::binop, lhs: ValueRef, rhs: ValueRef) -> ValueRef { cast_shift_rhs(op, lhs, rhs, - {|a,b|Trunc(cx, a, b)}, - {|a,b|ZExt(cx, a, b)}) + |a,b| Trunc(cx, a, b), + |a,b| ZExt(cx, a, b)) } fn cast_shift_const_rhs(op: ast::binop, @@ -1635,7 +1637,7 @@ fn fail_if_zero(cx: block, span: span, divmod: ast::binop, ty_to_str(cx.ccx().tcx, rhs_t)); } }; - with_cond(cx, is_zero) {|bcx| + do with_cond(cx, is_zero) |bcx| { trans_fail(bcx, some(span), text) } } @@ -1740,12 +1742,12 @@ fn trans_assign_op(bcx: block, ex: @ast::expr, op: ast::binop, let bcx = trans_call_inner( bcx, ex.info(), fty, expr_ty(bcx, ex), - {|bcx| + |bcx| { // FIXME (#2528): provide the already-computed address, not // the expr. impl::trans_method_callee(bcx, callee_id, dst, origin) }, - arg_exprs([src]/~), save_in(target)); + arg_exprs(~[src]), save_in(target)); ret move_val(bcx, DROP_EXISTING, lhs_res.val, {bcx: bcx, val: target, kind: owned}, @@ -1754,7 +1756,7 @@ fn trans_assign_op(bcx: block, ex: @ast::expr, op: ast::binop, _ {} } - // Special case for `+= [x]/~` + // Special case for `+= ~[x]` alt ty::get(t).struct { ty::ty_vec(_) { alt src.node { @@ -1822,7 +1824,7 @@ fn autoderef(cx: block, e_id: ast::node_id, alt ty::get(t1).struct { ty::ty_box(mt) { - let body = GEPi(cx, v1, [0u, abi::box_field_body]/~); + let body = GEPi(cx, v1, ~[0u, abi::box_field_body]); t1 = mt.ty; // Since we're changing levels of box indirection, we may have @@ -1868,7 +1870,7 @@ fn trans_lazy_binop(bcx: block, op: lazy_binop_ty, a: @ast::expr, b: @ast::expr, dest: dest) -> block { let _icx = bcx.insn_ctxt("trans_lazy_binop"); let {bcx: past_lhs, val: lhs} = { - with_scope_result(bcx, a.info(), "lhs") { |bcx| + do with_scope_result(bcx, a.info(), "lhs") |bcx| { trans_temp_expr(bcx, a) } }; @@ -1880,7 +1882,7 @@ fn trans_lazy_binop(bcx: block, op: lazy_binop_ty, a: @ast::expr, lazy_or { CondBr(past_lhs, lhs, join.llbb, before_rhs.llbb); } } let {bcx: past_rhs, val: rhs} = { - with_scope_result(before_rhs, b.info(), "rhs") { |bcx| + do with_scope_result(before_rhs, b.info(), "rhs") |bcx| { trans_temp_expr(bcx, b) } }; @@ -1888,7 +1890,7 @@ fn trans_lazy_binop(bcx: block, op: lazy_binop_ty, a: @ast::expr, if past_rhs.unreachable { ret store_in_dest(join, lhs, dest); } Br(past_rhs, join.llbb); let phi = - Phi(join, T_bool(), [lhs, rhs]/~, [past_lhs.llbb, past_rhs.llbb]/~); + Phi(join, T_bool(), ~[lhs, rhs], ~[past_lhs.llbb, past_rhs.llbb]); ret store_in_dest(join, phi, dest); } @@ -1903,10 +1905,10 @@ fn trans_binary(bcx: block, op: ast::binop, lhs: @ast::expr, ret trans_call_inner( bcx, ex.info(), fty, expr_ty(bcx, ex), - {|bcx| + |bcx| { impl::trans_method_callee(bcx, callee_id, lhs, origin) }, - arg_exprs([rhs]/~), dest); + arg_exprs(~[rhs]), dest); } _ {} } @@ -1966,7 +1968,7 @@ fn trans_if(cx: block, cond: @ast::expr, thn: ast::blk, }; let else_bcx = trans_block_cleanups(else_bcx, else_cx); ret join_returns(cx, - [then_bcx, else_bcx]/~, [then_dest, else_dest]/~, dest); + ~[then_bcx, else_bcx], ~[then_dest, else_dest], dest); } fn trans_while(cx: block, cond: @ast::expr, body: ast::blk) @@ -2055,31 +2057,31 @@ fn normalize_for_monomorphization(tcx: ty::ctxt, ty: ty::t) -> option<ty::t> { ty::ty_box(mt) { some(ty::mk_opaque_box(tcx)) } ty::ty_fn(fty) { some(ty::mk_fn(tcx, {purity: ast::impure_fn, proto: fty.proto, - inputs: []/~, + inputs: ~[], output: ty::mk_nil(tcx), ret_style: ast::return_val, - constraints: []/~})) } + constraints: ~[]})) } ty::ty_iface(_, _) { some(ty::mk_fn(tcx, {purity: ast::impure_fn, proto: ast::proto_box, - inputs: []/~, + inputs: ~[], output: ty::mk_nil(tcx), ret_style: ast::return_val, - constraints: []/~})) } + constraints: ~[]})) } ty::ty_ptr(_) { some(ty::mk_uint(tcx)) } _ { none } } } -fn make_mono_id(ccx: @crate_ctxt, item: ast::def_id, substs: [ty::t]/~, +fn make_mono_id(ccx: @crate_ctxt, item: ast::def_id, substs: ~[ty::t], vtables: option<typeck::vtable_res>, - param_uses: option<[type_use::type_uses]/~>) -> mono_id { + param_uses: option<~[type_use::type_uses]>) -> mono_id { let precise_param_ids = alt vtables { some(vts) { let bounds = ty::lookup_item_type(ccx.tcx, item).bounds; let mut i = 0u; - vec::map2(*bounds, substs, {|bounds, subst| - let mut v = []/~; - for vec::each(*bounds) {|bound| + vec::map2(*bounds, substs, |bounds, subst| { + let mut v = ~[]; + for vec::each(*bounds) |bound| { alt bound { ty::bound_iface(_) { vec::push(v, impl::vtable_id(ccx, vts[i])); @@ -2092,12 +2094,12 @@ fn make_mono_id(ccx: @crate_ctxt, item: ast::def_id, substs: [ty::t]/~, }) } none { - vec::map(substs, {|subst| mono_precise(subst, none)}) + vec::map(substs, |subst| mono_precise(subst, none)) } }; let param_ids = alt param_uses { some(uses) { - vec::map2(precise_param_ids, uses, {|id, uses| + vec::map2(precise_param_ids, uses, |id, uses| { alt check id { mono_precise(_, some(_)) { id } mono_precise(subst, none) { @@ -2123,33 +2125,35 @@ fn make_mono_id(ccx: @crate_ctxt, item: ast::def_id, substs: [ty::t]/~, } fn monomorphic_fn(ccx: @crate_ctxt, fn_id: ast::def_id, - real_substs: [ty::t]/~, + real_substs: ~[ty::t], vtables: option<typeck::vtable_res>, ref_id: option<ast::node_id>) -> {val: ValueRef, must_cast: bool} { let _icx = ccx.insn_ctxt("monomorphic_fn"); let mut must_cast = false; - let substs = vec::map(real_substs, {|t| + let substs = vec::map(real_substs, |t| { alt normalize_for_monomorphization(ccx.tcx, t) { some(t) { must_cast = true; t } none { t } } }); - #debug["monomorphic_fn(fn_id=%? (%s), real_substs=%?, substs=%?", - fn_id, ty::item_path_str(ccx.tcx, fn_id), - real_substs.map({|s| ty_to_str(ccx.tcx, s)}), - substs.map({|s| ty_to_str(ccx.tcx, s)})]; - - for real_substs.each() {|s| assert !ty::type_has_params(s); }; - for substs.each() {|s| assert !ty::type_has_params(s); }; + for real_substs.each() |s| { assert !ty::type_has_params(s); } + for substs.each() |s| { assert !ty::type_has_params(s); } let param_uses = type_use::type_uses_for(ccx, fn_id, substs.len()); let hash_id = make_mono_id(ccx, fn_id, substs, vtables, some(param_uses)); if vec::any(hash_id.params, - {|p| alt p { mono_precise(_, _) { false } _ { true } } }) { + |p| alt p { mono_precise(_, _) { false } _ { true } }) { must_cast = true; } + + #debug["monomorphic_fn(fn_id=%? (%s), real_substs=%?, substs=%?, \ + hash_id = %?", + fn_id, ty::item_path_str(ccx.tcx, fn_id), + real_substs.map(|s| ty_to_str(ccx.tcx, s)), + substs.map(|s| ty_to_str(ccx.tcx, s)), hash_id]; + alt ccx.monomorphized.find(hash_id) { some(val) { ret {val: val, must_cast: must_cast}; @@ -2161,9 +2165,9 @@ fn monomorphic_fn(ccx: @crate_ctxt, fn_id: ast::def_id, let mut llitem_ty = tpt.ty; let map_node = session::expect(ccx.sess, ccx.tcx.items.find(fn_id.node), - {|| #fmt("While monomorphizing %?, couldn't find it in the item map \ + || #fmt("While monomorphizing %?, couldn't find it in the item map \ (may have attempted to monomorphize an item defined in a different \ - crate?)", fn_id)}); + crate?)", fn_id)); // Get the path so that we can create a symbol let (pt, name, span) = alt map_node { ast_map::node_item(i, pt) { (pt, i.ident, i.span) } @@ -2203,10 +2207,10 @@ fn monomorphic_fn(ccx: @crate_ctxt, fn_id: ast::def_id, } ccx.monomorphizing.insert(fn_id, depth + 1u); - let pt = vec::append(*pt, [path_name(@ccx.names(*name))]/~); + let pt = vec::append(*pt, ~[path_name(@ccx.names(*name))]); let s = mangle_exported_name(ccx, pt, mono_ty); - let mk_lldecl = {|| + let mk_lldecl = || { let lldecl = decl_internal_cdecl_fn(ccx.llmod, s, llfty); ccx.monomorphized.insert(hash_id, lldecl); lldecl @@ -2231,7 +2235,7 @@ fn monomorphic_fn(ccx: @crate_ctxt, fn_id: ast::def_id, } ast_map::node_variant(v, enum_item, _) { let tvs = ty::enum_variants(ccx.tcx, local_def(enum_item.id)); - let this_tv = option::get(vec::find(*tvs, {|tv| + let this_tv = option::get(vec::find(*tvs, |tv| { tv.id.node == fn_id.node})); let d = mk_lldecl(); set_inline_hint(d); @@ -2254,7 +2258,7 @@ fn monomorphic_fn(ccx: @crate_ctxt, fn_id: ast::def_id, let tp_tys = ty::ty_params_to_tys(ccx.tcx, tps); trans_class_ctor(ccx, pt, ctor.node.dec, ctor.node.body, d, option::get_default(psubsts, - {tys:tp_tys, vtables: none, bounds: @[]/~}), + {tys:tp_tys, vtables: none, bounds: @~[]}), fn_id.node, parent_id, ctor.span); d } @@ -2299,7 +2303,7 @@ fn maybe_instantiate_inline(ccx: @crate_ctxt, fn_id: ast::def_id) none { // Not seen yet alt csearch::maybe_get_item_ast( ccx.tcx, fn_id, - {|a,b,c,d| + |a,b,c,d| { astencode::decode_inlined_item(a, b, ccx.maps, c, d) }) { @@ -2327,7 +2331,7 @@ fn maybe_instantiate_inline(ccx: @crate_ctxt, fn_id: ast::def_id) ast::item_enum(_, _, _) { let vs_here = ty::enum_variants(ccx.tcx, local_def(item.id)); let vs_there = ty::enum_variants(ccx.tcx, parent_id); - vec::iter2(*vs_here, *vs_there) {|here, there| + do vec::iter2(*vs_here, *vs_there) |here, there| { if there.id == fn_id { my_id = here.id.node; } ccx.external.insert(there.id, some(here.id.node)); } @@ -2348,7 +2352,7 @@ fn maybe_instantiate_inline(ccx: @crate_ctxt, fn_id: ast::def_id) let llfn = get_item_val(ccx, mth.id); let path = vec::append( ty::item_path(ccx.tcx, impl_did), - [path_name(mth.ident)]/~); + ~[path_name(mth.ident)]); trans_fn(ccx, path, mth.decl, mth.body, llfn, impl_self(impl_ty), none, mth.id); } @@ -2366,14 +2370,14 @@ fn maybe_instantiate_inline(ccx: @crate_ctxt, fn_id: ast::def_id) fn lval_static_fn(bcx: block, fn_id: ast::def_id, id: ast::node_id) -> lval_maybe_callee { let _icx = bcx.insn_ctxt("lval_static_fn"); - let vts = option::map(bcx.ccx().maps.vtable_map.find(id), {|vts| + let vts = option::map(bcx.ccx().maps.vtable_map.find(id), |vts| { impl::resolve_vtables_in_fn_ctxt(bcx.fcx, vts) }); lval_static_fn_inner(bcx, fn_id, id, node_id_type_params(bcx, id), vts) } fn lval_static_fn_inner(bcx: block, fn_id: ast::def_id, id: ast::node_id, - tys: [ty::t]/~, vtables: option<typeck::vtable_res>) + tys: ~[ty::t], vtables: option<typeck::vtable_res>) -> lval_maybe_callee { let _icx = bcx.insn_ctxt("lval_static_fn_inner"); let ccx = bcx.ccx(), tcx = ccx.tcx; @@ -2429,7 +2433,7 @@ fn lookup_discriminant(ccx: @crate_ctxt, vid: ast::def_id) -> ValueRef { // It's an external discriminant that we haven't seen yet. assert (vid.crate != ast::local_crate); let sym = csearch::get_symbol(ccx.sess.cstore, vid); - let gvar = str::as_c_str(sym, {|buf| + let gvar = str::as_c_str(sym, |buf| { llvm::LLVMAddGlobal(ccx.llmod, ccx.int_type, buf) }); lib::llvm::SetLinkage(gvar, lib::llvm::ExternalLinkage); @@ -2509,7 +2513,7 @@ fn trans_var(cx: block, def: ast::def, id: ast::node_id)-> lval_maybe_callee { // Nullary variant. let enum_ty = node_id_type(cx, id); let llenumptr = alloc_ty(cx, enum_ty); - let lldiscrimptr = GEPi(cx, llenumptr, [0u, 0u]/~); + let lldiscrimptr = GEPi(cx, llenumptr, ~[0u, 0u]); let lldiscrim_gv = lookup_discriminant(ccx, vid); let lldiscrim = Load(cx, lldiscrim_gv); Store(cx, lldiscrim, lldiscrimptr); @@ -2569,9 +2573,9 @@ fn trans_rec_field_inner(bcx: block, val: ValueRef, ty: ty::t, (If any other code does the same thing, that's a bug */ let val = if llderef { - GEPi(bcx, GEPi(bcx, val, [0u, 1u]/~), [0u, ix]/~) + GEPi(bcx, GEPi(bcx, val, ~[0u, 1u]), ~[0u, ix]) } - else { GEPi(bcx, val, [0u, ix]/~) }; + else { GEPi(bcx, val, ~[0u, ix]) }; ret {bcx: bcx, val: val, kind: owned}; } @@ -2618,11 +2622,11 @@ fn trans_index(cx: block, ex: @ast::expr, base: @ast::expr, #debug("trans_index: len %s", val_str(bcx.ccx().tn, len)); let bounds_check = ICmp(bcx, lib::llvm::IntUGE, scaled_ix, len); - let bcx = with_cond(bcx, bounds_check) {|bcx| + let bcx = do with_cond(bcx, bounds_check) |bcx| { // fail: bad bounds check. trans_fail(bcx, some(ex.span), "bounds check") }; - let elt = InBoundsGEP(bcx, base, [ix_val]/~); + let elt = InBoundsGEP(bcx, base, ~[ix_val]); ret lval_owned(bcx, PointerCast(bcx, elt, T_ptr(llunitty))); } @@ -2706,11 +2710,11 @@ fn trans_lval(cx: block, e: @ast::expr) -> lval_result { let val = alt check ty::get(t).struct { ty::ty_box(_) { let non_gc_val = non_gc_box_cast(sub.bcx, sub.val); - GEPi(sub.bcx, non_gc_val, [0u, abi::box_field_body]/~) + GEPi(sub.bcx, non_gc_val, ~[0u, abi::box_field_body]) } ty::ty_uniq(_) { let non_gc_val = non_gc_box_cast(sub.bcx, sub.val); - GEPi(sub.bcx, non_gc_val, [0u, abi::box_field_body]/~) + GEPi(sub.bcx, non_gc_val, ~[0u, abi::box_field_body]) } ty::ty_enum(_, _) { let ety = expr_ty(cx, e); @@ -2857,7 +2861,7 @@ fn trans_cast(cx: block, e: @ast::expr, id: ast::node_id, let cx = e_res.bcx; let llenumty = T_opaque_enum_ptr(ccx); let av_enum = PointerCast(cx, e_res.val, llenumty); - let lldiscrim_a_ptr = GEPi(cx, av_enum, [0u, 0u]/~); + let lldiscrim_a_ptr = GEPi(cx, av_enum, ~[0u, 0u]); let lldiscrim_a = Load(cx, lldiscrim_a_ptr); alt k_out { cast_integral {int_cast(e_res.bcx, ll_t_out, @@ -2889,7 +2893,7 @@ fn trans_loop_body(bcx: block, e: @ast::expr, ret_flag: option<ValueRef>, // temp_cleanups: cleanups that should run only if failure occurs before the // call takes place: fn trans_arg_expr(cx: block, arg: ty::arg, lldestty: TypeRef, e: @ast::expr, - &temp_cleanups: [ValueRef]/~, ret_flag: option<ValueRef>, + &temp_cleanups: ~[ValueRef], ret_flag: option<ValueRef>, derefs: uint) -> result { #debug("+++ trans_arg_expr on %s", expr_to_str(e)); @@ -2994,8 +2998,8 @@ fn trans_arg_expr(cx: block, arg: ty::arg, lldestty: TypeRef, e: @ast::expr, } // when invoking a method, an argument of type @T or ~T can be implicltly -// converted to an argument of type &T. Similarly, [T]/~ can be converted to -// [T]/& and so on. If such a conversion (called borrowing) is necessary, +// converted to an argument of type &T. Similarly, ~[T] can be converted to +// &[T] and so on. If such a conversion (called borrowing) is necessary, // then the borrowings table will have an appropriate entry inserted. This // routine consults this table and performs these adaptations. It returns a // new location for the borrowed result as well as a new type for the argument @@ -3012,7 +3016,7 @@ fn adapt_borrowed_value(lv: lval_result, alt ty::get(e_ty).struct { ty::ty_uniq(mt) | ty::ty_box(mt) { let box_ptr = load_value_from_lval_result(lv, e_ty); - let body_ptr = GEPi(bcx, box_ptr, [0u, abi::box_field_body]/~); + let body_ptr = GEPi(bcx, box_ptr, ~[0u, abi::box_field_body]); let rptr_ty = ty::mk_rptr(bcx.tcx(), ty::re_static, mt); ret {lv: lval_temp(bcx, body_ptr), ty: rptr_ty}; } @@ -3030,14 +3034,14 @@ fn adapt_borrowed_value(lv: lval_result, let unit_ty = ty::sequence_element_type(ccx.tcx, e_ty); let llunit_ty = type_of(ccx, unit_ty); let (base, len) = tvec::get_base_and_len(bcx, val, e_ty); - let p = alloca(bcx, T_struct([T_ptr(llunit_ty), ccx.int_type]/~)); + let p = alloca(bcx, T_struct(~[T_ptr(llunit_ty), ccx.int_type])); #debug("adapt_borrowed_value: adapting %s to %s", val_str(bcx.ccx().tn, val), val_str(bcx.ccx().tn, p)); - Store(bcx, base, GEPi(bcx, p, [0u, abi::slice_elt_base]/~)); - Store(bcx, len, GEPi(bcx, p, [0u, abi::slice_elt_len]/~)); + Store(bcx, base, GEPi(bcx, p, ~[0u, abi::slice_elt_base])); + Store(bcx, len, GEPi(bcx, p, ~[0u, abi::slice_elt_len])); // this isn't necessarily the type that rust would assign but it's // close enough for trans purposes, as it will have the same runtime @@ -3058,8 +3062,8 @@ fn adapt_borrowed_value(lv: lval_result, } enum call_args { - arg_exprs([@ast::expr]/~), - arg_vals([ValueRef]/~) + arg_exprs(~[@ast::expr]), + arg_vals(~[ValueRef]) } // NB: must keep 4 fns in sync: @@ -3070,11 +3074,11 @@ enum call_args { // - trans_args fn trans_args(cx: block, llenv: ValueRef, args: call_args, fn_ty: ty::t, dest: dest, ret_flag: option<ValueRef>) - -> {bcx: block, args: [ValueRef]/~, retslot: ValueRef} { + -> {bcx: block, args: ~[ValueRef], retslot: ValueRef} { let _icx = cx.insn_ctxt("trans_args"); - let mut temp_cleanups = []/~; + let mut temp_cleanups = ~[]; let arg_tys = ty::ty_fn_args(fn_ty); - let mut llargs: [ValueRef]/~ = []/~; + let mut llargs: ~[ValueRef] = ~[]; let ccx = cx.ccx(); let mut bcx = cx; @@ -3105,7 +3109,7 @@ fn trans_args(cx: block, llenv: ValueRef, args: call_args, fn_ty: ty::t, arg_exprs(es) { let llarg_tys = type_of_explicit_args(ccx, arg_tys); let last = es.len() - 1u; - vec::iteri(es) {|i, e| + do vec::iteri(es) |i, e| { let r = trans_arg_expr(bcx, arg_tys[i], llarg_tys[i], e, temp_cleanups, if i == last { ret_flag } else { none }, 0u); @@ -3121,7 +3125,7 @@ fn trans_args(cx: block, llenv: ValueRef, args: call_args, fn_ty: ty::t, // now that all arguments have been successfully built, we can revoke any // temporary cleanups, as they are only needed if argument construction // should fail (for example, cleanup of copy mode args). - vec::iter(temp_cleanups) {|c| + do vec::iter(temp_cleanups) |c| { revoke_clean(bcx, c) } @@ -3136,14 +3140,14 @@ fn trans_call(in_cx: block, call_ex: @ast::expr, f: @ast::expr, let _icx = in_cx.insn_ctxt("trans_call"); trans_call_inner( in_cx, call_ex.info(), expr_ty(in_cx, f), node_id_type(in_cx, id), - {|cx| trans_callee(cx, f)}, args, dest) + |cx| trans_callee(cx, f), args, dest) } fn body_contains_ret(body: ast::blk) -> bool { let cx = {mut found: false}; visit::visit_block(body, cx, visit::mk_vt(@{ - visit_item: {|_i, _cx, _v|}, - visit_expr: {|e: @ast::expr, cx: {mut found: bool}, v| + visit_item: |_i, _cx, _v| { }, + visit_expr: |e: @ast::expr, cx: {mut found: bool}, v| { if !cx.found { alt e.node { ast::expr_ret(_) { cx.found = true; } @@ -3165,7 +3169,7 @@ fn trans_call_inner( args: call_args, dest: dest) -> block { - with_scope(in_cx, call_info, "call") {|cx| + do with_scope(in_cx, call_info, "call") |cx| { let ret_in_loop = alt args { arg_exprs(args) { args.len() > 0u && alt vec::last(args).node { ast::expr_loop_body(@{node: ast::expr_fn_block(_, body, _), _}) { @@ -3199,9 +3203,9 @@ fn trans_call_inner( faddr = load_if_immediate(bcx, faddr, fn_expr_ty); } let pair = faddr; - faddr = GEPi(bcx, pair, [0u, abi::fn_field_code]/~); + faddr = GEPi(bcx, pair, ~[0u, abi::fn_field_code]); faddr = Load(bcx, faddr); - let llclosure = GEPi(bcx, pair, [0u, abi::fn_field_box]/~); + let llclosure = GEPi(bcx, pair, ~[0u, abi::fn_field_box]); Load(bcx, llclosure) } }; @@ -3233,8 +3237,8 @@ fn trans_call_inner( if ty::type_is_bot(ret_ty) { Unreachable(bcx); } else if ret_in_loop { - bcx = with_cond(bcx, Load(bcx, option::get(ret_flag))) {|bcx| - option::iter(copy bcx.fcx.loop_ret) {|lret| + bcx = do with_cond(bcx, Load(bcx, option::get(ret_flag))) |bcx| { + do option::iter(copy bcx.fcx.loop_ret) |lret| { Store(bcx, C_bool(true), lret.flagptr); Store(bcx, C_bool(false), bcx.fcx.llretptr); } @@ -3247,7 +3251,7 @@ fn trans_call_inner( } } -fn invoke(bcx: block, llfn: ValueRef, llargs: [ValueRef]/~) -> block { +fn invoke(bcx: block, llfn: ValueRef, llargs: ~[ValueRef]) -> block { let _icx = bcx.insn_ctxt("invoke_"); if bcx.unreachable { ret bcx; } if need_invoke(bcx) { @@ -3272,7 +3276,7 @@ fn need_invoke(bcx: block) -> bool { loop { alt cur.kind { block_scope(inf) { - for vec::each(inf.cleanups) {|cleanup| + for vec::each(inf.cleanups) |cleanup| { alt cleanup { clean(_, cleanup_type) | clean_temp(_, _, cleanup_type) { if cleanup_type == normal_exit_and_unwind { @@ -3293,7 +3297,7 @@ fn need_invoke(bcx: block) -> bool { fn have_cached_lpad(bcx: block) -> bool { let mut res = false; - in_lpad_scope_cx(bcx) {|inf| + do in_lpad_scope_cx(bcx) |inf| { alt inf.landing_pad { some(_) { res = true; } none { res = false; } @@ -3321,7 +3325,7 @@ fn get_landing_pad(bcx: block) -> BasicBlockRef { let _icx = bcx.insn_ctxt("get_landing_pad"); let mut cached = none, pad_bcx = bcx; // Guaranteed to be set below - in_lpad_scope_cx(bcx) {|inf| + do in_lpad_scope_cx(bcx) |inf| { // If there is a valid landing pad still around, use it alt copy inf.landing_pad { some(target) { cached = some(target); } @@ -3335,7 +3339,7 @@ fn get_landing_pad(bcx: block) -> BasicBlockRef { // The landing pad return type (the type being propagated). Not sure what // this represents but it's determined by the personality function and // this is what the EH proposal example uses. - let llretty = T_struct([T_ptr(T_i8()), T_i32()]/~); + let llretty = T_struct(~[T_ptr(T_i8()), T_i32()]); // The exception handling personality function. This is the C++ // personality function __gxx_personality_v0, wrapped in our naming // convention. @@ -3348,7 +3352,7 @@ fn get_landing_pad(bcx: block) -> BasicBlockRef { // Because we may have unwound across a stack boundary, we must call into // the runtime to figure out which stack segment we are on and place the // stack limit back into the TLS. - Call(pad_bcx, bcx.ccx().upcalls.reset_stack_limit, []/~); + Call(pad_bcx, bcx.ccx().upcalls.reset_stack_limit, ~[]); // We store the retval in a function-central alloca, so that calls to // Resume can find it. @@ -3366,30 +3370,30 @@ fn get_landing_pad(bcx: block) -> BasicBlockRef { ret pad_bcx.llbb; } -fn trans_tup(bcx: block, elts: [@ast::expr]/~, dest: dest) -> block { +fn trans_tup(bcx: block, elts: ~[@ast::expr], dest: dest) -> block { let _icx = bcx.insn_ctxt("trans_tup"); let mut bcx = bcx; let addr = alt dest { ignore { - for vec::each(elts) {|ex| bcx = trans_expr(bcx, ex, ignore); } + for vec::each(elts) |ex| { bcx = trans_expr(bcx, ex, ignore); } ret bcx; } save_in(pos) { pos } _ { bcx.tcx().sess.bug("trans_tup: weird dest"); } }; - let mut temp_cleanups = []/~; - for vec::eachi(elts) {|i, e| - let dst = GEPi(bcx, addr, [0u, i]/~); + let mut temp_cleanups = ~[]; + for vec::eachi(elts) |i, e| { + let dst = GEPi(bcx, addr, ~[0u, i]); let e_ty = expr_ty(bcx, e); bcx = trans_expr_save_in(bcx, e, dst); add_clean_temp_mem(bcx, dst, e_ty); vec::push(temp_cleanups, dst); } - for vec::each(temp_cleanups) {|cleanup| revoke_clean(bcx, cleanup); } + for vec::each(temp_cleanups) |cleanup| { revoke_clean(bcx, cleanup); } ret bcx; } -fn trans_rec(bcx: block, fields: [ast::field]/~, +fn trans_rec(bcx: block, fields: ~[ast::field], base: option<@ast::expr>, id: ast::node_id, dest: dest) -> block { let _icx = bcx.insn_ctxt("trans_rec"); @@ -3397,7 +3401,7 @@ fn trans_rec(bcx: block, fields: [ast::field]/~, let mut bcx = bcx; let addr = alt check dest { ignore { - for vec::each(fields) {|fld| + for vec::each(fields) |fld| { bcx = trans_expr(bcx, fld.node.expr, ignore); } ret bcx; @@ -3407,12 +3411,12 @@ fn trans_rec(bcx: block, fields: [ast::field]/~, let ty_fields = alt check ty::get(t).struct { ty::ty_rec(f) { f } }; - let mut temp_cleanups = []/~; - for fields.each {|fld| - let ix = option::get(vec::position(ty_fields, {|ft| + let mut temp_cleanups = ~[]; + for fields.each |fld| { + let ix = option::get(vec::position(ty_fields, |ft| { str::eq(*fld.node.ident, *ft.ident) })); - let dst = GEPi(bcx, addr, [0u, ix]/~); + let dst = GEPi(bcx, addr, ~[0u, ix]); bcx = trans_expr_save_in(bcx, fld.node.expr, dst); add_clean_temp_mem(bcx, dst, ty_fields[ix].mt.ty); vec::push(temp_cleanups, dst); @@ -3422,10 +3426,10 @@ fn trans_rec(bcx: block, fields: [ast::field]/~, let {bcx: cx, val: base_val} = trans_temp_expr(bcx, bexp); bcx = cx; // Copy over inherited fields - for ty_fields.eachi {|i, tf| - if !vec::any(fields, {|f| str::eq(*f.node.ident, *tf.ident)}) { - let dst = GEPi(bcx, addr, [0u, i]/~); - let base = GEPi(bcx, base_val, [0u, i]/~); + for ty_fields.eachi |i, tf| { + if !vec::any(fields, |f| str::eq(*f.node.ident, *tf.ident)) { + let dst = GEPi(bcx, addr, ~[0u, i]); + let base = GEPi(bcx, base_val, ~[0u, i]); let val = load_if_immediate(bcx, base, tf.mt.ty); bcx = copy_val(bcx, INIT, dst, val, tf.mt.ty); } @@ -3436,7 +3440,7 @@ fn trans_rec(bcx: block, fields: [ast::field]/~, // Now revoke the cleanups as we pass responsibility for the data // structure on to the caller - for temp_cleanups.each {|cleanup| revoke_clean(bcx, cleanup); } + for temp_cleanups.each |cleanup| { revoke_clean(bcx, cleanup); } ret bcx; } @@ -3581,7 +3585,7 @@ fn trans_expr(bcx: block, e: @ast::expr, dest: dest) -> block { ret alt::trans_alt(bcx, e, expr, arms, mode, dest); } ast::expr_block(blk) { - ret with_scope(bcx, blk.info(), "block-expr body") {|bcx| + ret do with_scope(bcx, blk.info(), "block-expr body") |bcx| { trans_block(bcx, blk, dest) }; } @@ -3649,10 +3653,8 @@ fn trans_expr(bcx: block, e: @ast::expr, dest: dest) -> block { ret trans_call_inner( bcx, e.info(), fty, expr_ty(bcx, e), - { |bcx| - impl::trans_method_callee(bcx, callee_id, base, origin) - }, - arg_exprs([idx]/~), dest); + |bcx| impl::trans_method_callee(bcx, callee_id, base, origin), + arg_exprs(~[idx]), dest); } // These return nothing @@ -3693,7 +3695,7 @@ fn trans_expr(bcx: block, e: @ast::expr, dest: dest) -> block { otherwise. */ let c = get_extern_const(bcx.ccx().externs, bcx.ccx().llmod, "check_claims", T_bool()); - ret with_cond(bcx, Load(bcx, c)) {|bcx| + ret do with_cond(bcx, Load(bcx, c)) |bcx| { trans_check_expr(bcx, e, a, "Claim") }; } @@ -3766,12 +3768,12 @@ fn trans_expr(bcx: block, e: @ast::expr, dest: dest) -> block { let llval_ty = type_of(ccx, expr_ty(bcx, val)); let args = - [llsize_of(ccx, llval_ty), llalign_of(ccx, llval_ty)]/~; + ~[llsize_of(ccx, llval_ty), llalign_of(ccx, llval_ty)]; let origin = bcx.ccx().maps.method_map.get(alloc_id); let bcx = trans_call_inner( bcx, e.info(), node_id_type(bcx, alloc_id), void_ty, - {|bcx| impl::trans_method_callee(bcx, alloc_id, - pool, origin) }, + |bcx| impl::trans_method_callee(bcx, alloc_id, + pool, origin), arg_vals(args), save_in(voidval)); @@ -3869,10 +3871,10 @@ fn trans_log(log_ex: @ast::expr, lvl: @ast::expr, } let modpath = vec::append( - [path_mod(ccx.link_meta.name)]/~, - vec::filter(bcx.fcx.path, {|e| + ~[path_mod(ccx.link_meta.name)], + vec::filter(bcx.fcx.path, |e| alt e { path_mod(_) { true } _ { false } } - })); + )); let modname = path_str(modpath); let global = if ccx.module_data.contains_key(modname) { @@ -3880,7 +3882,7 @@ fn trans_log(log_ex: @ast::expr, lvl: @ast::expr, } else { let s = link::mangle_internal_name_by_path_and_seq( ccx, modpath, @"loglevel"); - let global = str::as_c_str(s, {|buf| + let global = str::as_c_str(s, |buf| { llvm::LLVMAddGlobal(ccx.llmod, T_i32(), buf) }); llvm::LLVMSetGlobalConstant(global, False); @@ -3891,20 +3893,21 @@ fn trans_log(log_ex: @ast::expr, lvl: @ast::expr, }; let current_level = Load(bcx, global); let {bcx, val: level} = { - with_scope_result(bcx, lvl.info(), "level") {|bcx| + do with_scope_result(bcx, lvl.info(), "level") |bcx| { trans_temp_expr(bcx, lvl) } }; - with_cond(bcx, ICmp(bcx, lib::llvm::IntUGE, current_level, level)) {|bcx| - with_scope(bcx, log_ex.info(), "log") {|bcx| + do with_cond(bcx, ICmp(bcx, lib::llvm::IntUGE, current_level, level)) + |bcx| { + do with_scope(bcx, log_ex.info(), "log") |bcx| { let {bcx, val, _} = trans_temp_expr(bcx, e); let e_ty = expr_ty(bcx, e); let tydesc = get_tydesc_simple(ccx, e_ty); // Call the polymorphic log function. let val = spill_if_immediate(bcx, val, e_ty); let val = PointerCast(bcx, val, T_ptr(T_i8())); - Call(bcx, ccx.upcalls.log_type, [tydesc, val, level]/~); + Call(bcx, ccx.upcalls.log_type, ~[tydesc, val, level]); bcx } } @@ -3915,11 +3918,11 @@ fn trans_check_expr(bcx: block, chk_expr: @ast::expr, let _icx = bcx.insn_ctxt("trans_check_expr"); let expr_str = s + " " + expr_to_str(pred_expr) + " failed"; let {bcx, val} = { - with_scope_result(bcx, chk_expr.info(), "check") {|bcx| + do with_scope_result(bcx, chk_expr.info(), "check") |bcx| { trans_temp_expr(bcx, pred_expr) } }; - with_cond(bcx, Not(bcx, val)) {|bcx| + do with_cond(bcx, Not(bcx, val)) |bcx| { trans_fail(bcx, some(pred_expr.span), expr_str) } } @@ -3971,7 +3974,7 @@ fn trans_trace(bcx: block, sp_opt: option<span>, trace_str: str) { let ccx = bcx.ccx(); let V_trace_str = PointerCast(bcx, V_trace_str, T_ptr(T_i8())); let V_filename = PointerCast(bcx, V_filename, T_ptr(T_i8())); - let args = [V_trace_str, V_filename, C_int(ccx, V_line)]/~; + let args = ~[V_trace_str, V_filename, C_int(ccx, V_line)]; Call(bcx, ccx.upcalls.trace, args); } @@ -4000,7 +4003,7 @@ fn trans_fail_value(bcx: block, sp_opt: option<span>, }; let V_str = PointerCast(bcx, V_fail_str, T_ptr(T_i8())); let V_filename = PointerCast(bcx, V_filename, T_ptr(T_i8())); - let args = [V_str, V_filename, C_int(ccx, V_line)]/~; + let args = ~[V_str, V_filename, C_int(ccx, V_line)]; let bcx = invoke(bcx, bcx.ccx().upcalls._fail, args); Unreachable(bcx); ret bcx; @@ -4128,7 +4131,7 @@ fn trans_stmt(cx: block, s: ast::stmt) -> block { ast::stmt_decl(d, _) { alt d.node { ast::decl_local(locals) { - for vec::each(locals) {|local| + for vec::each(locals) |local| { bcx = init_local(bcx, local); if cx.sess().opts.extra_debuginfo { debuginfo::create_local_var(bcx, local); @@ -4151,19 +4154,19 @@ fn new_block(cx: fn_ctxt, parent: option<block>, +kind: block_kind, let s = if cx.ccx.sess.opts.save_temps || cx.ccx.sess.opts.debuginfo { cx.ccx.names(name) } else { "" }; - let llbb: BasicBlockRef = str::as_c_str(s, {|buf| + let llbb: BasicBlockRef = str::as_c_str(s, |buf| { llvm::LLVMAppendBasicBlock(cx.llfn, buf) }); let bcx = mk_block(llbb, parent, kind, opt_node_info, cx); - option::iter(parent) {|cx| + do option::iter(parent) |cx| { if cx.unreachable { Unreachable(bcx); } }; ret bcx; } fn simple_block_scope() -> block_kind { - block_scope({loop_break: none, mut cleanups: []/~, - mut cleanup_paths: []/~, mut landing_pad: none}) + block_scope({loop_break: none, mut cleanups: ~[], + mut cleanup_paths: ~[], mut landing_pad: none}) } // Use this when you're at the top block of a function or the like. @@ -4183,8 +4186,8 @@ fn loop_scope_block(bcx: block, loop_break: block, n: str, opt_node_info: option<node_info>) -> block { ret new_block(bcx.fcx, some(bcx), block_scope({ loop_break: some(loop_break), - mut cleanups: []/~, - mut cleanup_paths: []/~, + mut cleanups: ~[], + mut cleanup_paths: ~[], mut landing_pad: none }), n, opt_node_info); } @@ -4219,7 +4222,7 @@ fn trans_block_cleanups_(bcx: block, cleanup_cx: block, is_lpad: bool) -> alt check cleanup_cx.kind { block_scope({cleanups, _}) { let cleanups = copy cleanups; - vec::riter(cleanups) {|cu| + do vec::riter(cleanups) |cu| { alt cu { clean(cfn, cleanup_type) | clean_temp(_, cfn, cleanup_type) { // Some types don't need to be cleaned up during @@ -4255,7 +4258,7 @@ fn cleanup_and_leave(bcx: block, upto: option<BasicBlockRef>, alt cur.kind { block_scope(inf) if inf.cleanups.len() > 0u { for vec::find(inf.cleanup_paths, - {|cp| cp.target == leave}).each {|cp| + |cp| cp.target == leave).each |cp| { Br(bcx, cp.dest); ret; } @@ -4323,12 +4326,12 @@ fn with_cond(bcx: block, val: ValueRef, f: fn(block) -> block) -> block { } fn block_locals(b: ast::blk, it: fn(@ast::local)) { - for vec::each(b.node.stmts) {|s| + for vec::each(b.node.stmts) |s| { alt s.node { ast::stmt_decl(d, _) { alt d.node { ast::decl_local(locals) { - for vec::each(locals) {|local| it(local); } + for vec::each(locals) |local| { it(local); } } _ {/* fall through */ } } @@ -4357,8 +4360,8 @@ fn alloc_local(cx: block, local: @ast::local) -> block { }; let val = alloc_ty(cx, t); if cx.sess().opts.debuginfo { - option::iter(simple_name) {|name| - str::as_c_str(*name, {|buf| + do option::iter(simple_name) |name| { + str::as_c_str(*name, |buf| { llvm::LLVMSetValueName(val, buf) }); } @@ -4371,8 +4374,8 @@ fn trans_block(bcx: block, b: ast::blk, dest: dest) -> block { let _icx = bcx.insn_ctxt("trans_block"); let mut bcx = bcx; - block_locals(b) {|local| bcx = alloc_local(bcx, local); }; - for vec::each(b.node.stmts) {|s| + do block_locals(b) |local| { bcx = alloc_local(bcx, local); }; + for vec::each(b.node.stmts) |s| { debuginfo::update_source_pos(bcx, b.span); bcx = trans_stmt(bcx, *s); } @@ -4390,12 +4393,12 @@ fn trans_block(bcx: block, b: ast::blk, dest: dest) // Creates the standard set of basic blocks for a function fn mk_standard_basic_blocks(llfn: ValueRef) -> {sa: BasicBlockRef, ca: BasicBlockRef, rt: BasicBlockRef} { - {sa: str::as_c_str("static_allocas", {|buf| - llvm::LLVMAppendBasicBlock(llfn, buf) }), - ca: str::as_c_str("load_env", {|buf| - llvm::LLVMAppendBasicBlock(llfn, buf) }), - rt: str::as_c_str("return", {|buf| - llvm::LLVMAppendBasicBlock(llfn, buf) })} + {sa: str::as_c_str("static_allocas", + |buf| llvm::LLVMAppendBasicBlock(llfn, buf)), + ca: str::as_c_str("load_env", + |buf| llvm::LLVMAppendBasicBlock(llfn, buf)), + rt: str::as_c_str("return", + |buf| llvm::LLVMAppendBasicBlock(llfn, buf))} } @@ -4450,7 +4453,7 @@ fn new_fn_ctxt(ccx: @crate_ctxt, path: path, llfndecl: ValueRef, // field of the fn_ctxt with fn create_llargs_for_fn_args(cx: fn_ctxt, ty_self: self_arg, - args: [ast::arg]/~) { + args: ~[ast::arg]) { let _icx = cx.insn_ctxt("create_llargs_for_fn_args"); // Skip the implicit arguments 0, and 1. let mut arg_n = first_real_arg; @@ -4463,7 +4466,7 @@ fn create_llargs_for_fn_args(cx: fn_ctxt, // Populate the llargs field of the function context with the ValueRefs // that we get from llvm::LLVMGetParam for each argument. - for vec::each(args) {|arg| + for vec::each(args) |arg| { let llarg = llvm::LLVMGetParam(cx.llfn, arg_n as c_uint); assert (llarg as int != 0); // Note that this uses local_mem even for things passed by value. @@ -4474,8 +4477,8 @@ fn create_llargs_for_fn_args(cx: fn_ctxt, } } -fn copy_args_to_allocas(fcx: fn_ctxt, bcx: block, args: [ast::arg]/~, - arg_tys: [ty::arg]/~) -> block { +fn copy_args_to_allocas(fcx: fn_ctxt, bcx: block, args: ~[ast::arg], + arg_tys: ~[ty::arg]) -> block { let _icx = fcx.insn_ctxt("copy_args_to_allocas"); let tcx = bcx.tcx(); let mut arg_n: uint = 0u, bcx = bcx; @@ -4483,7 +4486,7 @@ fn copy_args_to_allocas(fcx: fn_ctxt, bcx: block, args: [ast::arg]/~, tcx.sess.bug("someone forgot\ to document an invariant in copy_args_to_allocas!"); }; - for vec::each(arg_tys) {|arg| + for vec::each(arg_tys) |arg| { let id = args[arg_n].id; let argval = alt fcx.llargs.get(id) { local_mem(v) { v } _ { epic_fail() } }; @@ -4594,11 +4597,13 @@ fn trans_fn(ccx: @crate_ctxt, else { {sec: 0i64, nsec: 0i32} }; let _icx = ccx.insn_ctxt("trans_fn"); trans_closure(ccx, path, decl, body, llfndecl, ty_self, - param_substs, id, {|fcx| - if ccx.sess.opts.extra_debuginfo { - debuginfo::create_function(fcx); - } - }, {|_bcx|}); + param_substs, id, + |fcx| { + if ccx.sess.opts.extra_debuginfo { + debuginfo::create_function(fcx); + } + }, + |_bcx| { }); if do_time { let end = time::get_time(); log_fn_time(ccx, path_str(path), start, end); @@ -4611,18 +4616,17 @@ fn trans_enum_variant(ccx: @crate_ctxt, enum_id: ast::node_id, llfndecl: ValueRef) { let _icx = ccx.insn_ctxt("trans_enum_variant"); // Translate variant arguments to function arguments. - let fn_args = vec::map(variant.node.args, {|varg| + let fn_args = vec::map(variant.node.args, |varg| {mode: ast::expl(ast::by_copy), ty: varg.ty, ident: @"arg", - id: varg.id} - }); - let fcx = new_fn_ctxt_w_id(ccx, []/~, llfndecl, variant.node.id, + id: varg.id}); + let fcx = new_fn_ctxt_w_id(ccx, ~[], llfndecl, variant.node.id, param_substs, none); create_llargs_for_fn_args(fcx, no_self, fn_args); let ty_param_substs = alt param_substs { some(substs) { substs.tys } - none { []/~ } + none { ~[] } }; let bcx = top_scope_block(fcx, none), lltop = bcx.llbb; let arg_tys = ty::ty_fn_args(node_id_type(bcx, variant.node.id)); @@ -4634,13 +4638,13 @@ fn trans_enum_variant(ccx: @crate_ctxt, enum_id: ast::node_id, } else { let llenumptr = PointerCast(bcx, fcx.llretptr, T_opaque_enum_ptr(ccx)); - let lldiscrimptr = GEPi(bcx, llenumptr, [0u, 0u]/~); + let lldiscrimptr = GEPi(bcx, llenumptr, ~[0u, 0u]); Store(bcx, C_int(ccx, disr), lldiscrimptr); - GEPi(bcx, llenumptr, [0u, 1u]/~) + GEPi(bcx, llenumptr, ~[0u, 1u]) }; let t_id = local_def(enum_id); let v_id = local_def(variant.node.id); - for vec::eachi(variant.node.args) {|i, va| + for vec::eachi(variant.node.args) |i, va| { let lldestptr = GEP_enum(bcx, llblobptr, t_id, v_id, ty_param_substs, i); // If this argument to this function is a enum, it'll have come in to @@ -4826,10 +4830,10 @@ fn trans_class_ctor(ccx: @crate_ctxt, path: path, decl: ast::fn_decl, parent_id)) { // Initialize the drop flag let one = C_u8(1u); - let flag = GEPi(bcx_top, selfptr, [0u, 0u]/~); + let flag = GEPi(bcx_top, selfptr, ~[0u, 0u]); Store(bcx_top, one, flag); // Select the pointer to the class itself - GEPi(bcx_top, selfptr, [0u, 1u]/~) + GEPi(bcx_top, selfptr, ~[0u, 1u]) } else { selfptr }; @@ -4839,9 +4843,9 @@ fn trans_class_ctor(ccx: @crate_ctxt, path: path, decl: ast::fn_decl, let mut bcx = bcx_top; // Initialize fields to zero so init assignments can validly // drop their LHS - for fields.each {|field| + for fields.each |field| { let ix = field_idx_strict(bcx.tcx(), sp, field.ident, fields); - bcx = zero_mem(bcx, GEPi(bcx, valptr, [0u, ix]/~), field.mt.ty); + bcx = zero_mem(bcx, GEPi(bcx, valptr, ~[0u, ix]), field.mt.ty); } // note we don't want to take *or* drop self. @@ -4867,14 +4871,14 @@ fn trans_class_dtor(ccx: @crate_ctxt, path: path, /* Look up the parent class's def_id */ let mut class_ty = ty::lookup_item_type(tcx, parent_id).ty; /* Substitute in the class type if necessary */ - option::iter(psubsts) {|ss| + do option::iter(psubsts) |ss| { class_ty = ty::subst_tps(tcx, ss.tys, class_ty); } /* The dtor takes a (null) output pointer, and a self argument, and returns () */ - let lldty = T_fn([T_ptr(type_of(ccx, ty::mk_nil(tcx))), - T_ptr(type_of(ccx, class_ty))]/~, + let lldty = T_fn(~[T_ptr(type_of(ccx, ty::mk_nil(tcx))), + T_ptr(type_of(ccx, class_ty))], llvm::LLVMVoidType()); let s = get_dtor_symbol(ccx, path, dtor_id, psubsts); @@ -4885,7 +4889,7 @@ fn trans_class_dtor(ccx: @crate_ctxt, path: path, /* If we're monomorphizing, register the monomorphized decl for the dtor */ - option::iter(hash_id) {|h_id| + do option::iter(hash_id) |h_id| { ccx.monomorphized.insert(h_id, lldecl); } /* Translate the dtor body */ @@ -4906,15 +4910,15 @@ fn trans_item(ccx: @crate_ctxt, item: ast::item) { foreign::trans_extern_fn(ccx, vec::append( *path, - [path_name(item.ident)]/~), + ~[path_name(item.ident)]), decl, body, llfndecl, item.id); } else if tps.len() == 0u { let llfndecl = get_item_val(ccx, item.id); trans_fn(ccx, - vec::append(*path, [path_name(item.ident)]/~), + vec::append(*path, ~[path_name(item.ident)]), decl, body, llfndecl, no_self, none, item.id); } else { - for vec::each(body.node.stmts) {|stmt| + for vec::each(body.node.stmts) |stmt| { alt stmt.node { ast::stmt_decl(@{node: ast::decl_item(i), _}, _) { trans_item(ccx, *i); @@ -4935,7 +4939,7 @@ fn trans_item(ccx: @crate_ctxt, item: ast::item) { let degen = variants.len() == 1u; let vi = ty::enum_variants(ccx.tcx, local_def(item.id)); let mut i = 0; - for vec::each(variants) {|variant| + for vec::each(variants) |variant| { if variant.node.args.len() > 0u { let llfn = get_item_val(ccx, variant.node.id); trans_enum_variant(ccx, item.id, variant, @@ -4958,11 +4962,11 @@ fn trans_item(ccx: @crate_ctxt, item: ast::item) { if tps.len() == 0u { let psubsts = {tys: ty::ty_params_to_tys(ccx.tcx, tps), vtables: none, - bounds: @[]/~}; + bounds: @~[]}; trans_class_ctor(ccx, *path, ctor.node.dec, ctor.node.body, get_item_val(ccx, ctor.node.id), psubsts, ctor.node.id, local_def(item.id), ctor.span); - option::iter(m_dtor) {|dtor| + do option::iter(m_dtor) |dtor| { trans_class_dtor(ccx, *path, dtor.node.body, dtor.node.id, none, none, local_def(item.id)); }; @@ -4984,7 +4988,7 @@ fn trans_item(ccx: @crate_ctxt, item: ast::item) { // and control visibility. fn trans_mod(ccx: @crate_ctxt, m: ast::_mod) { let _icx = ccx.insn_ctxt("trans_mod"); - for vec::each(m.items) {|item| trans_item(ccx, *item); } + for vec::each(m.items) |item| { trans_item(ccx, *item); } } fn get_pair_fn_ty(llpairty: TypeRef) -> TypeRef { @@ -5020,7 +5024,7 @@ fn register_fn_fuller(ccx: @crate_ctxt, sp: span, path: path, llfn } -// Create a _rust_main(args: [str]/~) function which will be called from the +// Create a _rust_main(args: ~[str]) function which will be called from the // runtime rust_start function fn create_main_wrapper(ccx: @crate_ctxt, sp: span, main_llfn: ValueRef, main_node_type: ty::t) { @@ -5047,18 +5051,18 @@ fn create_main_wrapper(ccx: @crate_ctxt, sp: span, main_llfn: ValueRef, {mode: ast::expl(ast::by_val), ty: ty::mk_vec(ccx.tcx, {ty: unit_ty, mutbl: ast::m_imm})}; let nt = ty::mk_nil(ccx.tcx); - let llfty = type_of_fn(ccx, [vecarg_ty]/~, nt); + let llfty = type_of_fn(ccx, ~[vecarg_ty], nt); let llfdecl = decl_fn(ccx.llmod, "_rust_main", lib::llvm::CCallConv, llfty); - let fcx = new_fn_ctxt(ccx, []/~, llfdecl, none); + let fcx = new_fn_ctxt(ccx, ~[], llfdecl, none); let bcx = top_scope_block(fcx, none); let lltop = bcx.llbb; let lloutputarg = llvm::LLVMGetParam(llfdecl, 0 as c_uint); let llenvarg = llvm::LLVMGetParam(llfdecl, 1 as c_uint); - let mut args = [lloutputarg, llenvarg]/~; + let mut args = ~[lloutputarg, llenvarg]; if takes_argv { vec::push(args, llvm::LLVMGetParam(llfdecl, 2 as c_uint)); } @@ -5075,20 +5079,20 @@ fn create_main_wrapper(ccx: @crate_ctxt, sp: span, main_llfn: ValueRef, fn main_name() -> str { ret "WinMain@16"; } #[cfg(unix)] fn main_name() -> str { ret "main"; } - let llfty = T_fn([ccx.int_type, ccx.int_type]/~, ccx.int_type); + let llfty = T_fn(~[ccx.int_type, ccx.int_type], ccx.int_type); let llfn = decl_cdecl_fn(ccx.llmod, main_name(), llfty); - let llbb = str::as_c_str("top", {|buf| + let llbb = str::as_c_str("top", |buf| { llvm::LLVMAppendBasicBlock(llfn, buf) }); let bld = ccx.builder.B; llvm::LLVMPositionBuilderAtEnd(bld, llbb); let crate_map = ccx.crate_map; - let start_ty = T_fn([val_ty(rust_main), ccx.int_type, ccx.int_type, - val_ty(crate_map)]/~, ccx.int_type); + let start_ty = T_fn(~[val_ty(rust_main), ccx.int_type, ccx.int_type, + val_ty(crate_map)], ccx.int_type); let start = decl_cdecl_fn(ccx.llmod, "rust_start", start_ty); - let args = [rust_main, llvm::LLVMGetParam(llfn, 0 as c_uint), - llvm::LLVMGetParam(llfn, 1 as c_uint), crate_map]/~; + let args = ~[rust_main, llvm::LLVMGetParam(llfn, 0 as c_uint), + llvm::LLVMGetParam(llfn, 1 as c_uint), crate_map]; let result = unsafe { llvm::LLVMBuildCall(bld, start, vec::unsafe::to_ptr(args), args.len() as c_uint, noname()) @@ -5110,9 +5114,9 @@ fn create_real_fn_pair(cx: block, llfnty: TypeRef, llfn: ValueRef, fn fill_fn_pair(bcx: block, pair: ValueRef, llfn: ValueRef, llenvptr: ValueRef) { let ccx = bcx.ccx(); - let code_cell = GEPi(bcx, pair, [0u, abi::fn_field_code]/~); + let code_cell = GEPi(bcx, pair, ~[0u, abi::fn_field_code]); Store(bcx, llfn, code_cell); - let env_cell = GEPi(bcx, pair, [0u, abi::fn_field_box]/~); + let env_cell = GEPi(bcx, pair, ~[0u, abi::fn_field_box]); let llenvblobptr = PointerCast(bcx, llenvptr, T_opaque_box_ptr(ccx)); Store(bcx, llenvblobptr, env_cell); } @@ -5122,7 +5126,7 @@ fn item_path(ccx: @crate_ctxt, i: @ast::item) -> path { *alt check ccx.tcx.items.get(i.id) { ast_map::node_item(_, p) { p } }, - [path_name(i.ident)]/~) + ~[path_name(i.ident)]) } /* If there's already a symbol for the dtor with <id> and substs <substs>, @@ -5135,7 +5139,7 @@ fn get_dtor_symbol(ccx: @crate_ctxt, path: path, id: ast::node_id, none if is_none(substs) { let s = mangle_exported_name( ccx, - vec::append(path, [path_name(@ccx.names("dtor"))]/~), + vec::append(path, ~[path_name(@ccx.names("dtor"))]), t); ccx.item_symbols.insert(id, s); s @@ -5149,7 +5153,7 @@ fn get_dtor_symbol(ccx: @crate_ctxt, path: path, id: ast::node_id, mangle_exported_name( ccx, vec::append(path, - [path_name(@ccx.names("dtor"))]/~), + ~[path_name(@ccx.names("dtor"))]), mono_ty) } none { @@ -5169,12 +5173,12 @@ fn get_item_val(ccx: @crate_ctxt, id: ast::node_id) -> ValueRef { let mut exprt = false; let val = alt check ccx.tcx.items.get(id) { ast_map::node_item(i, pth) { - let my_path = vec::append(*pth, [path_name(i.ident)]/~); + let my_path = vec::append(*pth, ~[path_name(i.ident)]); alt check i.node { ast::item_const(_, _) { let typ = ty::node_id_to_type(ccx.tcx, i.id); let s = mangle_exported_name(ccx, my_path, typ); - let g = str::as_c_str(s, {|buf| + let g = str::as_c_str(s, |buf| { llvm::LLVMAddGlobal(ccx.llmod, type_of(ccx, typ), buf) }); ccx.item_symbols.insert(i.id, s); @@ -5194,8 +5198,8 @@ fn get_item_val(ccx: @crate_ctxt, id: ast::node_id) -> ValueRef { ast_map::node_method(m, impl_id, pth) { exprt = true; let mty = ty::node_id_to_type(ccx.tcx, id); - let pth = vec::append(*pth, [path_name(@ccx.names("meth")), - path_name(m.ident)]/~); + let pth = vec::append(*pth, ~[path_name(@ccx.names("meth")), + path_name(m.ident)]); let llfn = register_fn_full(ccx, m.span, pth, id, mty); set_inline_hint_if_appr(m.attrs, llfn); llfn @@ -5203,11 +5207,11 @@ fn get_item_val(ccx: @crate_ctxt, id: ast::node_id) -> ValueRef { ast_map::node_foreign_item(ni, _, pth) { exprt = true; register_fn(ccx, ni.span, - vec::append(*pth, [path_name(ni.ident)]/~), + vec::append(*pth, ~[path_name(ni.ident)]), ni.id) } ast_map::node_ctor(nm, tps, ctor, _, pt) { - let my_path = vec::append(*pt, [path_name(nm)]/~); + let my_path = vec::append(*pt, ~[path_name(nm)]); register_fn(ccx, ctor.span, my_path, ctor.node.id) } ast_map::node_dtor(tps, dt, parent_id, pt) { @@ -5220,8 +5224,8 @@ fn get_item_val(ccx: @crate_ctxt, id: ast::node_id) -> ValueRef { let class_ty = ty::lookup_item_type(tcx, parent_id).ty; // This code shouldn't be reached if the class is generic assert !ty::type_has_params(class_ty); - let lldty = T_fn([T_ptr(type_of(ccx, ty::mk_nil(tcx))), - T_ptr(type_of(ccx, class_ty))]/~, + let lldty = T_fn(~[T_ptr(type_of(ccx, ty::mk_nil(tcx))), + T_ptr(type_of(ccx, class_ty))], llvm::LLVMVoidType()); let s = get_dtor_symbol(ccx, *pt, dt.node.id, none); @@ -5234,8 +5238,8 @@ fn get_item_val(ccx: @crate_ctxt, id: ast::node_id) -> ValueRef { ast_map::node_variant(v, enm, pth) { assert v.node.args.len() != 0u; let pth = vec::append(*pth, - [path_name(enm.ident), - path_name(v.node.name)]/~); + ~[path_name(enm.ident), + path_name(v.node.name)]); let llfn = alt check enm.node { ast::item_enum(_, _, _) { register_fn(ccx, v.span, pth, id) @@ -5263,13 +5267,13 @@ fn trans_constant(ccx: @crate_ctxt, it: @ast::item) { node: it.id}); let mut i = 0; let path = item_path(ccx, it); - for vec::each(variants) {|variant| - let p = vec::append(path, [path_name(variant.node.name), - path_name(@"discrim")]/~); + for vec::each(variants) |variant| { + let p = vec::append(path, ~[path_name(variant.node.name), + path_name(@"discrim")]); let s = mangle_exported_name(ccx, p, ty::mk_int(ccx.tcx)); let disr_val = vi[i].disr_val; note_unique_llvm_symbol(ccx, s); - let discrim_gvar = str::as_c_str(s, {|buf| + let discrim_gvar = str::as_c_str(s, |buf| { llvm::LLVMAddGlobal(ccx.llmod, ccx.int_type, buf) }); llvm::LLVMSetInitializer(discrim_gvar, C_int(ccx, disr_val)); @@ -5286,7 +5290,7 @@ fn trans_constant(ccx: @crate_ctxt, it: @ast::item) { fn trans_constants(ccx: @crate_ctxt, crate: @ast::crate) { visit::visit_crate(*crate, (), visit::mk_simple_visitor(@{ - visit_item: {|a|trans_constant(ccx, a)} + visit_item: |a| trans_constant(ccx, a) with *visit::default_simple_visitor() })); } @@ -5301,23 +5305,23 @@ fn p2i(ccx: @crate_ctxt, v: ValueRef) -> ValueRef { } fn declare_intrinsics(llmod: ModuleRef) -> hashmap<str, ValueRef> { - let T_memmove32_args: [TypeRef]/~ = - [T_ptr(T_i8()), T_ptr(T_i8()), T_i32(), T_i32(), T_i1()]/~; - let T_memmove64_args: [TypeRef]/~ = - [T_ptr(T_i8()), T_ptr(T_i8()), T_i64(), T_i32(), T_i1()]/~; - let T_memset32_args: [TypeRef]/~ = - [T_ptr(T_i8()), T_i8(), T_i32(), T_i32(), T_i1()]/~; - let T_memset64_args: [TypeRef]/~ = - [T_ptr(T_i8()), T_i8(), T_i64(), T_i32(), T_i1()]/~; - let T_trap_args: [TypeRef]/~ = []/~; - let T_frameaddress_args: [TypeRef]/~ = [T_i32()]/~; + let T_memmove32_args: ~[TypeRef] = + ~[T_ptr(T_i8()), T_ptr(T_i8()), T_i32(), T_i32(), T_i1()]; + let T_memmove64_args: ~[TypeRef] = + ~[T_ptr(T_i8()), T_ptr(T_i8()), T_i64(), T_i32(), T_i1()]; + let T_memset32_args: ~[TypeRef] = + ~[T_ptr(T_i8()), T_i8(), T_i32(), T_i32(), T_i1()]; + let T_memset64_args: ~[TypeRef] = + ~[T_ptr(T_i8()), T_i8(), T_i64(), T_i32(), T_i1()]; + let T_trap_args: ~[TypeRef] = ~[]; + let T_frameaddress_args: ~[TypeRef] = ~[T_i32()]; let gcroot = decl_cdecl_fn(llmod, "llvm.gcroot", - T_fn([T_ptr(T_ptr(T_i8())), T_ptr(T_i8())]/~, + T_fn(~[T_ptr(T_ptr(T_i8())), T_ptr(T_i8())], T_void())); let gcread = decl_cdecl_fn(llmod, "llvm.gcread", - T_fn([T_ptr(T_i8()), T_ptr(T_ptr(T_i8()))]/~, + T_fn(~[T_ptr(T_i8()), T_ptr(T_ptr(T_i8()))], T_void())); let memmove32 = decl_cdecl_fn(llmod, "llvm.memmove.p0i8.p0i8.i32", @@ -5352,17 +5356,17 @@ fn declare_dbg_intrinsics(llmod: ModuleRef, intrinsics: hashmap<str, ValueRef>) { let declare = decl_cdecl_fn(llmod, "llvm.dbg.declare", - T_fn([T_metadata(), T_metadata()]/~, T_void())); + T_fn(~[T_metadata(), T_metadata()], T_void())); let value = decl_cdecl_fn(llmod, "llvm.dbg.value", - T_fn([T_metadata(), T_i64(), T_metadata()]/~, + T_fn(~[T_metadata(), T_i64(), T_metadata()], T_void())); intrinsics.insert("llvm.dbg.declare", declare); intrinsics.insert("llvm.dbg.value", value); } fn trap(bcx: block) { - let v: [ValueRef]/~ = []/~; + let v: ~[ValueRef] = ~[]; alt bcx.ccx().intrinsics.find("llvm.trap") { some(x) { Call(bcx, x, v); } _ { bcx.sess().bug("unbound llvm.trap in trap"); } @@ -5370,19 +5374,19 @@ fn trap(bcx: block) { } fn create_module_map(ccx: @crate_ctxt) -> ValueRef { - let elttype = T_struct([ccx.int_type, ccx.int_type]/~); + let elttype = T_struct(~[ccx.int_type, ccx.int_type]); let maptype = T_array(elttype, ccx.module_data.size() + 1u); - let map = str::as_c_str("_rust_mod_map", {|buf| + let map = str::as_c_str("_rust_mod_map", |buf| { llvm::LLVMAddGlobal(ccx.llmod, maptype, buf) }); lib::llvm::SetLinkage(map, lib::llvm::InternalLinkage); - let mut elts: [ValueRef]/~ = []/~; - for ccx.module_data.each {|key, val| - let elt = C_struct([p2i(ccx, C_cstr(ccx, key)), - p2i(ccx, val)]/~); + let mut elts: ~[ValueRef] = ~[]; + for ccx.module_data.each |key, val| { + let elt = C_struct(~[p2i(ccx, C_cstr(ccx, key)), + p2i(ccx, val)]); vec::push(elts, elt); - }; - let term = C_struct([C_int(ccx, 0), C_int(ccx, 0)]/~); + } + let term = C_struct(~[C_int(ccx, 0), C_int(ccx, 0)]); vec::push(elts, term); llvm::LLVMSetInitializer(map, C_array(elttype, elts)); ret map; @@ -5401,8 +5405,8 @@ fn decl_crate_map(sess: session::session, mapmeta: link_meta, } else { "toplevel" }; let sym_name = "_rust_crate_map_" + mapname; let arrtype = T_array(int_type, n_subcrates as uint); - let maptype = T_struct([int_type, arrtype]/~); - let map = str::as_c_str(sym_name, {|buf| + let maptype = T_struct(~[int_type, arrtype]); + let map = str::as_c_str(sym_name, |buf| { llvm::LLVMAddGlobal(llmod, maptype, buf) }); lib::llvm::SetLinkage(map, lib::llvm::ExternalLinkage); @@ -5410,7 +5414,7 @@ fn decl_crate_map(sess: session::session, mapmeta: link_meta, } fn fill_crate_map(ccx: @crate_ctxt, map: ValueRef) { - let mut subcrates: [ValueRef]/~ = []/~; + let mut subcrates: ~[ValueRef] = ~[]; let mut i = 1; let cstore = ccx.sess.cstore; while cstore::have_crate_data(cstore, i) { @@ -5418,7 +5422,7 @@ fn fill_crate_map(ccx: @crate_ctxt, map: ValueRef) { let nm = "_rust_crate_map_" + cdata.name + "_" + *cstore::get_crate_vers(cstore, i) + "_" + *cstore::get_crate_hash(cstore, i); - let cr = str::as_c_str(nm, {|buf| + let cr = str::as_c_str(nm, |buf| { llvm::LLVMAddGlobal(ccx.llmod, ccx.int_type, buf) }); vec::push(subcrates, p2i(ccx, cr)); @@ -5426,22 +5430,22 @@ fn fill_crate_map(ccx: @crate_ctxt, map: ValueRef) { } vec::push(subcrates, C_int(ccx, 0)); llvm::LLVMSetInitializer(map, C_struct( - [p2i(ccx, create_module_map(ccx)), - C_array(ccx.int_type, subcrates)]/~)); + ~[p2i(ccx, create_module_map(ccx)), + C_array(ccx.int_type, subcrates)])); } fn crate_ctxt_to_encode_parms(cx: @crate_ctxt) -> encoder::encode_parms { let encode_inlined_item = - {|a,b,c,d|astencode::encode_inlined_item(a, b, c, d, cx.maps)}; + |a,b,c,d| astencode::encode_inlined_item(a, b, c, d, cx.maps); ret { diag: cx.sess.diagnostic(), tcx: cx.tcx, reachable: cx.reachable, reexports: reexports(cx), - impl_map: {|a|impl_map(cx, a)}, + impl_map: |a| impl_map(cx, a), item_symbols: cx.item_symbols, discrim_symbols: cx.discrim_symbols, link_meta: cx.link_meta, @@ -5449,10 +5453,10 @@ fn crate_ctxt_to_encode_parms(cx: @crate_ctxt) encode_inlined_item: encode_inlined_item }; - fn reexports(cx: @crate_ctxt) -> [(str, ast::def_id)]/~ { - let mut reexports = []/~; - for cx.exp_map.each {|exp_id, defs| - for defs.each {|def| + fn reexports(cx: @crate_ctxt) -> ~[(str, ast::def_id)] { + let mut reexports = ~[]; + for cx.exp_map.each |exp_id, defs| { + for defs.each |def| { if !def.reexp { cont; } let path = alt check cx.tcx.items.get(exp_id) { ast_map::node_export(_, path) { @@ -5466,11 +5470,10 @@ fn crate_ctxt_to_encode_parms(cx: @crate_ctxt) } fn impl_map(cx: @crate_ctxt, - id: ast::node_id) -> [(ast::ident, ast::def_id)]/~ { - let mut result = []/~; - for list::each(cx.maps.impl_map.get(id)) { - |impls| - vec::push_all(result, (*impls).map({|i| (i.ident, i.did) })); + id: ast::node_id) -> ~[(ast::ident, ast::def_id)] { + let mut result = ~[]; + for list::each(cx.maps.impl_map.get(id)) |impls| { + vec::push_all(result, (*impls).map(|i| (i.ident, i.did))); } ret result; } @@ -5480,23 +5483,23 @@ fn write_metadata(cx: @crate_ctxt, crate: @ast::crate) { if !cx.sess.building_library { ret; } let encode_parms = crate_ctxt_to_encode_parms(cx); let llmeta = C_bytes(encoder::encode_metadata(encode_parms, crate)); - let llconst = C_struct([llmeta]/~); - let mut llglobal = str::as_c_str("rust_metadata", {|buf| + let llconst = C_struct(~[llmeta]); + let mut llglobal = str::as_c_str("rust_metadata", |buf| { llvm::LLVMAddGlobal(cx.llmod, val_ty(llconst), buf) }); llvm::LLVMSetInitializer(llglobal, llconst); - str::as_c_str(cx.sess.targ_cfg.target_strs.meta_sect_name, {|buf| + str::as_c_str(cx.sess.targ_cfg.target_strs.meta_sect_name, |buf| { llvm::LLVMSetSection(llglobal, buf) }); lib::llvm::SetLinkage(llglobal, lib::llvm::InternalLinkage); let t_ptr_i8 = T_ptr(T_i8()); llglobal = llvm::LLVMConstBitCast(llglobal, t_ptr_i8); - let llvm_used = str::as_c_str("llvm.used", {|buf| + let llvm_used = str::as_c_str("llvm.used", |buf| { llvm::LLVMAddGlobal(cx.llmod, T_array(t_ptr_i8, 1u), buf) }); lib::llvm::SetLinkage(llvm_used, lib::llvm::AppendingLinkage); - llvm::LLVMSetInitializer(llvm_used, C_array(t_ptr_i8, [llglobal]/~)); + llvm::LLVMSetInitializer(llvm_used, C_array(t_ptr_i8, ~[llglobal])); } // Writes the current ABI version into the crate. @@ -5524,7 +5527,7 @@ fn trans_crate(sess: session::session, crate: @ast::crate, tcx: ty::ctxt, // 1. http://llvm.org/bugs/show_bug.cgi?id=11479 let llmod_id = *link_meta.name + ".rc"; - let llmod = str::as_c_str(llmod_id, {|buf| + let llmod = str::as_c_str(llmod_id, |buf| { llvm::LLVMModuleCreateWithNameInContext (buf, llvm::LLVMGetGlobalContext()) }); @@ -5532,10 +5535,10 @@ fn trans_crate(sess: session::session, crate: @ast::crate, tcx: ty::ctxt, let targ_triple = sess.targ_cfg.target_strs.target_triple; let _: () = str::as_c_str(data_layout, - {|buf| llvm::LLVMSetDataLayout(llmod, buf) }); + |buf| llvm::LLVMSetDataLayout(llmod, buf)); let _: () = str::as_c_str(targ_triple, - {|buf| llvm::LLVMSetTarget(llmod, buf) }); + |buf| llvm::LLVMSetTarget(llmod, buf)); let targ_cfg = sess.targ_cfg; let td = mk_target_data(sess.targ_cfg.target_strs.data_layout); let tn = mk_type_names(); @@ -5575,10 +5578,10 @@ fn trans_crate(sess: session::session, crate: @ast::crate, tcx: ty::ctxt, discrim_symbols: int_hash::<str>(), tydescs: ty::new_ty_hash(), external: ast_util::new_def_hash(), - monomorphized: map::hashmap(hash_mono_id, {|a, b| a == b}), + monomorphized: map::hashmap(hash_mono_id, |a, b| a == b), monomorphizing: ast_util::new_def_hash(), type_use_cache: ast_util::new_def_hash(), - vtables: map::hashmap(hash_mono_id, {|a, b| a == b}), + vtables: map::hashmap(hash_mono_id, |a, b| a == b), const_cstr_cache: map::str_hash(), module_data: str_hash::<ValueRef>(), lltypes: ty::new_ty_hash(), @@ -5594,9 +5597,9 @@ fn trans_crate(sess: session::session, crate: @ast::crate, tcx: ty::ctxt, mut n_glues_created: 0u, mut n_null_glues: 0u, mut n_real_glues: 0u, - llvm_insn_ctxt: @mut []/~, + llvm_insn_ctxt: @mut ~[], llvm_insns: str_hash(), - fn_times: @mut []/~}, + fn_times: @mut ~[]}, upcalls: upcall::declare_upcalls(targ_cfg, tn, tydesc_type, llmod), @@ -5642,14 +5645,14 @@ fn trans_crate(sess: session::session, crate: @ast::crate, tcx: ty::ctxt, // FIXME (#2280): this temporary shouldn't be // necessary, but seems to be, for borrowing. let times = copy *ccx.stats.fn_times; - for vec::each(times) {|timing| + for vec::each(times) |timing| { io::println(#fmt("time: %s took %d ms", timing.ident, timing.time)); } } if ccx.sess.count_llvm_insns() { - for ccx.stats.llvm_insns.each { |k, v| + for ccx.stats.llvm_insns.each |k, v| { io::println(#fmt("%-7u %s", v, k)); } } diff --git a/src/rustc/middle/trans/build.rs b/src/rustc/middle/trans/build.rs index b5eae6caa84..8c261a402b4 100644 --- a/src/rustc/middle/trans/build.rs +++ b/src/rustc/middle/trans/build.rs @@ -5,7 +5,7 @@ import syntax::codemap; import codemap::span; import lib::llvm::{ValueRef, TypeRef, BasicBlockRef, BuilderRef, ModuleRef}; import lib::llvm::{Opcode, IntPredicate, RealPredicate, True, False, - CallConv, TypeKind}; + CallConv, TypeKind, AtomicBinOp, AtomicOrdering}; import common::*; import driver::session::session; @@ -79,7 +79,7 @@ fn Ret(cx: block, V: ValueRef) { llvm::LLVMBuildRet(B(cx), V); } -fn AggregateRet(cx: block, RetVals: [ValueRef]/~) { +fn AggregateRet(cx: block, RetVals: ~[ValueRef]) { if cx.unreachable { ret; } assert (!cx.terminated); cx.terminated = true; @@ -134,14 +134,14 @@ fn noname() -> *libc::c_char unsafe { ret unsafe::reinterpret_cast(ptr::addr_of(cnull)); } -fn Invoke(cx: block, Fn: ValueRef, Args: [ValueRef]/~, +fn Invoke(cx: block, Fn: ValueRef, Args: ~[ValueRef], Then: BasicBlockRef, Catch: BasicBlockRef) { if cx.unreachable { ret; } assert (!cx.terminated); cx.terminated = true; #debug["Invoke(%s with arguments (%s))", val_str(cx.ccx().tn, Fn), - str::connect(vec::map(Args, {|a|val_str(cx.ccx().tn, a)}), + str::connect(vec::map(Args, |a| val_str(cx.ccx().tn, a)), ", ")]; unsafe { count_insn(cx, "invoke"); @@ -151,7 +151,7 @@ fn Invoke(cx: block, Fn: ValueRef, Args: [ValueRef]/~, } } -fn FastInvoke(cx: block, Fn: ValueRef, Args: [ValueRef]/~, +fn FastInvoke(cx: block, Fn: ValueRef, Args: ~[ValueRef], Then: BasicBlockRef, Catch: BasicBlockRef) { if cx.unreachable { ret; } assert (!cx.terminated); @@ -417,7 +417,7 @@ fn Store(cx: block, Val: ValueRef, Ptr: ValueRef) { llvm::LLVMBuildStore(B(cx), Val, Ptr); } -fn GEP(cx: block, Pointer: ValueRef, Indices: [ValueRef]/~) -> ValueRef { +fn GEP(cx: block, Pointer: ValueRef, Indices: ~[ValueRef]) -> ValueRef { if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(T_nil())); } unsafe { count_insn(cx, "gep"); @@ -428,14 +428,14 @@ fn GEP(cx: block, Pointer: ValueRef, Indices: [ValueRef]/~) -> ValueRef { // Simple wrapper around GEP that takes an array of ints and wraps them // in C_i32() -fn GEPi(cx: block, base: ValueRef, ixs: [uint]/~) -> ValueRef { - let mut v: [ValueRef]/~ = []/~; - for vec::each(ixs) {|i| vec::push(v, C_i32(i as i32)); } +fn GEPi(cx: block, base: ValueRef, ixs: ~[uint]) -> ValueRef { + let mut v: ~[ValueRef] = ~[]; + for vec::each(ixs) |i| { vec::push(v, C_i32(i as i32)); } count_insn(cx, "gepi"); ret InBoundsGEP(cx, base, v); } -fn InBoundsGEP(cx: block, Pointer: ValueRef, Indices: [ValueRef]/~) -> +fn InBoundsGEP(cx: block, Pointer: ValueRef, Indices: ~[ValueRef]) -> ValueRef { if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(T_nil())); } unsafe { @@ -607,7 +607,7 @@ fn EmptyPhi(cx: block, Ty: TypeRef) -> ValueRef { ret llvm::LLVMBuildPhi(B(cx), Ty, noname()); } -fn Phi(cx: block, Ty: TypeRef, vals: [ValueRef]/~, bbs: [BasicBlockRef]/~) +fn Phi(cx: block, Ty: TypeRef, vals: ~[ValueRef], bbs: ~[BasicBlockRef]) -> ValueRef { if cx.unreachable { ret llvm::LLVMGetUndef(Ty); } assert vals.len() == bbs.len(); @@ -654,32 +654,32 @@ fn add_comment(bcx: block, text: str) { if !ccx.sess.no_asm_comments() { let sanitized = str::replace(text, "$", ""); let comment_text = "# " + sanitized; - let asm = str::as_c_str(comment_text, {|c| - str::as_c_str("", {|e| + let asm = str::as_c_str(comment_text, |c| { + str::as_c_str("", |e| { count_insn(bcx, "inlineasm"); - llvm::LLVMConstInlineAsm(T_fn([]/~, T_void()), c, e, + llvm::LLVMConstInlineAsm(T_fn(~[], T_void()), c, e, False, False) }) }); - Call(bcx, asm, []/~); + Call(bcx, asm, ~[]); } } -fn Call(cx: block, Fn: ValueRef, Args: [ValueRef]/~) -> ValueRef { +fn Call(cx: block, Fn: ValueRef, Args: ~[ValueRef]) -> ValueRef { if cx.unreachable { ret _UndefReturn(cx, Fn); } unsafe { count_insn(cx, "call"); #debug["Call(Fn=%s, Args=%?)", val_str(cx.ccx().tn, Fn), - Args.map { |arg| val_str(cx.ccx().tn, arg) }]; + Args.map(|arg| val_str(cx.ccx().tn, arg))]; ret llvm::LLVMBuildCall(B(cx), Fn, vec::unsafe::to_ptr(Args), Args.len() as c_uint, noname()); } } -fn FastCall(cx: block, Fn: ValueRef, Args: [ValueRef]/~) -> ValueRef { +fn FastCall(cx: block, Fn: ValueRef, Args: ~[ValueRef]) -> ValueRef { if cx.unreachable { ret _UndefReturn(cx, Fn); } unsafe { count_insn(cx, "fastcall"); @@ -690,7 +690,7 @@ fn FastCall(cx: block, Fn: ValueRef, Args: [ValueRef]/~) -> ValueRef { } } -fn CallWithConv(cx: block, Fn: ValueRef, Args: [ValueRef]/~, +fn CallWithConv(cx: block, Fn: ValueRef, Args: ~[ValueRef], Conv: CallConv) -> ValueRef { if cx.unreachable { ret _UndefReturn(cx, Fn); } unsafe { @@ -775,11 +775,11 @@ fn Trap(cx: block) { let BB: BasicBlockRef = llvm::LLVMGetInsertBlock(b); let FN: ValueRef = llvm::LLVMGetBasicBlockParent(BB); let M: ModuleRef = llvm::LLVMGetGlobalParent(FN); - let T: ValueRef = str::as_c_str("llvm.trap", {|buf| + let T: ValueRef = str::as_c_str("llvm.trap", |buf| { llvm::LLVMGetNamedFunction(M, buf) }); assert (T as int != 0); - let Args: [ValueRef]/~ = []/~; + let Args: ~[ValueRef] = ~[]; unsafe { count_insn(cx, "trap"); llvm::LLVMBuildCall(b, T, vec::unsafe::to_ptr(Args), @@ -807,6 +807,13 @@ fn Resume(cx: block, Exn: ValueRef) -> ValueRef { ret llvm::LLVMBuildResume(B(cx), Exn); } +// Atomic Operations +fn AtomicRMW(cx: block, op: AtomicBinOp, + dst: ValueRef, src: ValueRef, + order: AtomicOrdering) -> ValueRef { + llvm::LLVMBuildAtomicRMW(B(cx), op, dst, src, order) +} + // // Local Variables: // mode: rust diff --git a/src/rustc/middle/trans/closure.rs b/src/rustc/middle/trans/closure.rs index 360e1169d51..8bd2a61836c 100644 --- a/src/rustc/middle/trans/closure.rs +++ b/src/rustc/middle/trans/closure.rs @@ -122,12 +122,12 @@ fn mk_tuplified_uniq_cbox_ty(tcx: ty::ctxt, cdata_ty: ty::t) -> ty::t { // Given a closure ty, emits a corresponding tuple ty fn mk_closure_tys(tcx: ty::ctxt, - bound_values: [environment_value]/~) - -> (ty::t, [ty::t]/~) { - let mut bound_tys = []/~; + bound_values: ~[environment_value]) + -> (ty::t, ~[ty::t]) { + let mut bound_tys = ~[]; // Compute the closed over data - for vec::each(bound_values) {|bv| + for vec::each(bound_values) |bv| { vec::push(bound_tys, alt bv { env_copy(_, t, _) { t } env_move(_, t, _) { t } @@ -137,8 +137,8 @@ fn mk_closure_tys(tcx: ty::ctxt, } let bound_data_ty = ty::mk_tup(tcx, bound_tys); // FIXME[mono] remove tuple of tydescs from closure types (#2531) - let cdata_ty = ty::mk_tup(tcx, [ty::mk_tup(tcx, []/~), - bound_data_ty]/~); + let cdata_ty = ty::mk_tup(tcx, ~[ty::mk_tup(tcx, ~[]), + bound_data_ty]); #debug["cdata_ty=%s", ty_to_str(tcx, cdata_ty)]; ret (cdata_ty, bound_tys); } @@ -146,7 +146,7 @@ fn mk_closure_tys(tcx: ty::ctxt, fn allocate_cbox(bcx: block, ck: ty::closure_kind, cdata_ty: ty::t) - -> (block, ValueRef, [ValueRef]/~) { + -> (block, ValueRef, ~[ValueRef]) { let _icx = bcx.insn_ctxt("closure::allocate_cbox"); let ccx = bcx.ccx(), tcx = ccx.tcx; @@ -155,7 +155,7 @@ fn allocate_cbox(bcx: block, // Initialize ref count to arbitrary value for debugging: let ccx = bcx.ccx(); let llbox = PointerCast(bcx, llbox, T_opaque_box_ptr(ccx)); - let ref_cnt = GEPi(bcx, llbox, [0u, abi::box_field_refcnt]/~); + let ref_cnt = GEPi(bcx, llbox, ~[0u, abi::box_field_refcnt]); let rc = C_int(ccx, 0x12345678); Store(bcx, rc, ref_cnt); } @@ -164,7 +164,7 @@ fn allocate_cbox(bcx: block, cdata_ty: ty::t, llbox: ValueRef, &ti: option<@tydesc_info>) -> block { - let bound_tydesc = GEPi(bcx, llbox, [0u, abi::box_field_tydesc]/~); + let bound_tydesc = GEPi(bcx, llbox, ~[0u, abi::box_field_tydesc]); let td = base::get_tydesc(bcx.ccx(), cdata_ty, ti); Store(bcx, td, bound_tydesc); bcx @@ -172,7 +172,7 @@ fn allocate_cbox(bcx: block, // Allocate and initialize the box: let mut ti = none; - let mut temp_cleanups = []/~; + let mut temp_cleanups = ~[]; let (bcx, llbox) = alt ck { ty::ck_box { get_tydesc(ccx, cdata_ty, ti); @@ -209,7 +209,7 @@ type closure_result = { // heap allocated closure that copies the upvars into environment. // Otherwise, it is stack allocated and copies pointers to the upvars. fn store_environment(bcx: block, - bound_values: [environment_value]/~, + bound_values: ~[environment_value], ck: ty::closure_kind) -> closure_result { let _icx = bcx.insn_ctxt("closure::store_environment"); let ccx = bcx.ccx(), tcx = ccx.tcx; @@ -233,7 +233,7 @@ fn store_environment(bcx: block, // Copy expr values into boxed bindings. let mut bcx = bcx; - vec::iteri(bound_values) { |i, bv| + do vec::iteri(bound_values) |i, bv| { #debug["Copy %s into closure", ev_to_str(ccx, bv)]; if !ccx.sess.no_asm_comments() { @@ -242,7 +242,7 @@ fn store_environment(bcx: block, } let bound_data = GEPi(bcx, llbox, - [0u, abi::box_field_body, abi::closure_body_bindings, i]/~); + ~[0u, abi::box_field_body, abi::closure_body_bindings, i]); alt bv { env_expr(e, _) { bcx = base::trans_expr_save_in(bcx, e, bound_data); @@ -275,7 +275,7 @@ fn store_environment(bcx: block, } } } - for vec::each(temp_cleanups) {|cleanup| revoke_clean(bcx, cleanup); } + for vec::each(temp_cleanups) |cleanup| { revoke_clean(bcx, cleanup); } ret {llbox: llbox, cdata_ty: cdata_ty, bcx: bcx}; } @@ -283,18 +283,18 @@ fn store_environment(bcx: block, // Given a context and a list of upvars, build a closure. This just // collects the upvars and packages them up for store_environment. fn build_closure(bcx0: block, - cap_vars: [capture::capture_var]/~, + cap_vars: ~[capture::capture_var], ck: ty::closure_kind, id: ast::node_id, include_ret_handle: option<ValueRef>) -> closure_result { let _icx = bcx0.insn_ctxt("closure::build_closure"); // If we need to, package up the iterator body to call - let mut env_vals = []/~; + let mut env_vals = ~[]; let mut bcx = bcx0; let ccx = bcx.ccx(), tcx = ccx.tcx; // Package up the captured upvars - vec::iter(cap_vars) { |cap_var| + do vec::iter(cap_vars) |cap_var| { #debug["Building closure: captured variable %?", cap_var]; let lv = trans_local_var(bcx, cap_var.def); let nid = ast_util::def_id_of_def(cap_var.def).node; @@ -323,7 +323,7 @@ fn build_closure(bcx0: block, } } } - option::iter(include_ret_handle) {|flagptr| + do option::iter(include_ret_handle) |flagptr| { let our_ret = alt bcx.fcx.loop_ret { some({retptr, _}) { retptr } none { bcx.fcx.llretptr } @@ -343,7 +343,7 @@ fn build_closure(bcx0: block, // with the upvars and type descriptors. fn load_environment(fcx: fn_ctxt, cdata_ty: ty::t, - cap_vars: [capture::capture_var]/~, + cap_vars: ~[capture::capture_var], load_ret_handle: bool, ck: ty::closure_kind) { let _icx = fcx.insn_ctxt("closure::load_environment"); @@ -354,12 +354,12 @@ fn load_environment(fcx: fn_ctxt, // Populate the upvars from the environment. let mut i = 0u; - vec::iter(cap_vars) { |cap_var| + do vec::iter(cap_vars) |cap_var| { alt cap_var.mode { capture::cap_drop { /* ignore */ } _ { let mut upvarptr = - GEPi(bcx, llcdata, [0u, abi::closure_body_bindings, i]/~); + GEPi(bcx, llcdata, ~[0u, abi::closure_body_bindings, i]); alt ck { ty::ck_block { upvarptr = Load(bcx, upvarptr); } ty::ck_uniq | ty::ck_box { } @@ -372,10 +372,10 @@ fn load_environment(fcx: fn_ctxt, } if load_ret_handle { let flagptr = Load(bcx, GEPi(bcx, llcdata, - [0u, abi::closure_body_bindings, i]/~)); + ~[0u, abi::closure_body_bindings, i])); let retptr = Load(bcx, GEPi(bcx, llcdata, - [0u, abi::closure_body_bindings, i+1u]/~)); + ~[0u, abi::closure_body_bindings, i+1u])); fcx.loop_ret = some({flagptr: flagptr, retptr: retptr}); } } @@ -404,10 +404,10 @@ fn trans_expr_fn(bcx: block, let {llbox, cdata_ty, bcx} = build_closure(bcx, cap_vars, ck, id, ret_handle); trans_closure(ccx, sub_path, decl, body, llfn, no_self, - bcx.fcx.param_substs, id, {|fcx| + bcx.fcx.param_substs, id, |fcx| { load_environment(fcx, cdata_ty, cap_vars, option::is_some(ret_handle), ck); - }, {|bcx| + }, |bcx| { if option::is_some(is_loop_body) { Store(bcx, C_bool(true), bcx.fcx.llretptr); } @@ -421,7 +421,7 @@ fn trans_expr_fn(bcx: block, ast::proto_uniq { trans_closure_env(ty::ck_uniq) } ast::proto_bare { trans_closure(ccx, sub_path, decl, body, llfn, no_self, none, - id, {|_fcx|}, {|_bcx|}); + id, |_fcx| { }, |_bcx| { }); C_null(T_opaque_box_ptr(ccx)) } }; @@ -431,17 +431,17 @@ fn trans_expr_fn(bcx: block, fn trans_bind_1(cx: block, outgoing_fty: ty::t, f_res: lval_maybe_callee, - args: [option<@ast::expr>]/~, pair_ty: ty::t, + args: ~[option<@ast::expr>], pair_ty: ty::t, dest: dest) -> block { let _icx = cx.insn_ctxt("closure::trans_bind1"); let ccx = cx.ccx(); - let mut bound: [@ast::expr]/~ = []/~; - for vec::each(args) {|argopt| + let mut bound: ~[@ast::expr] = ~[]; + for vec::each(args) |argopt| { alt argopt { none { } some(e) { vec::push(bound, e); } } } let mut bcx = f_res.bcx; if dest == ignore { - for vec::each(bound) {|ex| bcx = trans_expr(bcx, ex, ignore); } + for vec::each(bound) |ex| { bcx = trans_expr(bcx, ex, ignore); } ret bcx; } @@ -456,29 +456,31 @@ fn trans_bind_1(cx: block, outgoing_fty: ty::t, // Arrange for the bound function to live in the first binding spot // if the function is not statically known. let (env_vals, target_info) = alt f_res.env { - null_env { ([]/~, target_static(f_res.val)) } + null_env { (~[], target_static(f_res.val)) } is_closure { // Cast the function we are binding to be the type that the // closure will expect it to have. The type the closure knows // about has the type parameters substituted with the real types. let llclosurety = T_ptr(type_of(ccx, outgoing_fty)); let src_loc = PointerCast(bcx, f_res.val, llclosurety); - ([env_copy(src_loc, pair_ty, owned)]/~, target_closure) + (~[env_copy(src_loc, pair_ty, owned)], target_closure) } self_env(slf, slf_t, none) { - ([env_copy(slf, slf_t, owned)]/~, target_static_self(f_res.val)) + (~[env_copy(slf, slf_t, owned)], target_static_self(f_res.val)) } self_env(_, slf_t, some(slf)) { let cast = PointerCast(bcx, f_res.val, T_ptr(T_nil())); - ([env_copy(cast, ty::mk_nil_ptr(ccx.tcx), owned_imm), - env_copy(slf, slf_t, owned_imm)]/~, target_self) + (~[env_copy(cast, ty::mk_nil_ptr(ccx.tcx), owned_imm), + env_copy(slf, slf_t, owned_imm)], target_self) } }; // Actually construct the closure let {llbox, cdata_ty, bcx} = store_environment( bcx, vec::append(env_vals, - vec::map(bound, {|x| env_expr(x, expr_ty(bcx, x))})), + vec::map(bound, |x| { + env_expr(x, expr_ty(bcx, x)) + })), ty::ck_box); // Make thunk @@ -502,9 +504,9 @@ fn make_fn_glue( let tcx = cx.tcx(); let fn_env = fn@(ck: ty::closure_kind) -> block { - let box_cell_v = GEPi(cx, v, [0u, abi::fn_field_box]/~); + let box_cell_v = GEPi(cx, v, ~[0u, abi::fn_field_box]); let box_ptr_v = Load(cx, box_cell_v); - with_cond(cx, IsNotNull(cx, box_ptr_v)) {|bcx| + do with_cond(cx, IsNotNull(cx, box_ptr_v)) |bcx| { let closure_ty = ty::mk_opaque_closure_ptr(tcx, ck); glue_fn(bcx, box_cell_v, closure_ty) } @@ -537,31 +539,31 @@ fn make_opaque_cbox_take_glue( let ccx = bcx.ccx(), tcx = ccx.tcx; let llopaquecboxty = T_opaque_box_ptr(ccx); let cbox_in = Load(bcx, cboxptr); - with_cond(bcx, IsNotNull(bcx, cbox_in)) {|bcx| + do with_cond(bcx, IsNotNull(bcx, cbox_in)) |bcx| { // Load the size from the type descr found in the cbox let cbox_in = PointerCast(bcx, cbox_in, llopaquecboxty); - let tydescptr = GEPi(bcx, cbox_in, [0u, abi::box_field_tydesc]/~); + let tydescptr = GEPi(bcx, cbox_in, ~[0u, abi::box_field_tydesc]); let tydesc = Load(bcx, tydescptr); let tydesc = PointerCast(bcx, tydesc, T_ptr(ccx.tydesc_type)); - let sz = Load(bcx, GEPi(bcx, tydesc, [0u, abi::tydesc_field_size]/~)); + let sz = Load(bcx, GEPi(bcx, tydesc, ~[0u, abi::tydesc_field_size])); // Adjust sz to account for the rust_opaque_box header fields let sz = Add(bcx, sz, shape::llsize_of(ccx, T_box_header(ccx))); // Allocate memory, update original ptr, and copy existing data let malloc = ccx.upcalls.exchange_malloc; - let cbox_out = Call(bcx, malloc, [tydesc, sz]/~); + let cbox_out = Call(bcx, malloc, ~[tydesc, sz]); let cbox_out = PointerCast(bcx, cbox_out, llopaquecboxty); call_memmove(bcx, cbox_out, cbox_in, sz); Store(bcx, cbox_out, cboxptr); // Take the (deeply cloned) type descriptor - let tydesc_out = GEPi(bcx, cbox_out, [0u, abi::box_field_tydesc]/~); + let tydesc_out = GEPi(bcx, cbox_out, ~[0u, abi::box_field_tydesc]); let bcx = take_ty(bcx, tydesc_out, ty::mk_type(tcx)); // Take the data in the tuple let ti = none; - let cdata_out = GEPi(bcx, cbox_out, [0u, abi::box_field_body]/~); + let cdata_out = GEPi(bcx, cbox_out, ~[0u, abi::box_field_body]); call_tydesc_glue_full(bcx, cdata_out, tydesc, abi::tydesc_field_take_glue, ti); bcx @@ -599,17 +601,17 @@ fn make_opaque_cbox_free_glue( } let ccx = bcx.ccx(); - with_cond(bcx, IsNotNull(bcx, cbox)) {|bcx| + do with_cond(bcx, IsNotNull(bcx, cbox)) |bcx| { // Load the type descr found in the cbox let lltydescty = T_ptr(ccx.tydesc_type); let cbox = PointerCast(bcx, cbox, T_opaque_cbox_ptr(ccx)); - let tydescptr = GEPi(bcx, cbox, [0u, abi::box_field_tydesc]/~); + let tydescptr = GEPi(bcx, cbox, ~[0u, abi::box_field_tydesc]); let tydesc = Load(bcx, tydescptr); let tydesc = PointerCast(bcx, tydesc, lltydescty); // Drop the tuple data then free the descriptor let ti = none; - let cdata = GEPi(bcx, cbox, [0u, abi::box_field_body]/~); + let cdata = GEPi(bcx, cbox, ~[0u, abi::box_field_body]); call_tydesc_glue_full(bcx, cdata, tydesc, abi::tydesc_field_drop_glue, ti); @@ -638,7 +640,7 @@ fn trans_bind_thunk(ccx: @crate_ctxt, path: path, incoming_fty: ty::t, outgoing_fty: ty::t, - args: [option<@ast::expr>]/~, + args: ~[option<@ast::expr>], cdata_ty: ty::t, target_info: target_info) -> {val: ValueRef, ty: TypeRef} { @@ -705,25 +707,25 @@ fn trans_bind_thunk(ccx: @crate_ctxt, (fptr, llvm::LLVMGetUndef(T_opaque_cbox_ptr(ccx)), 0u) } target_closure { - let pair = GEPi(bcx, llcdata, [0u, abi::closure_body_bindings, 0u]/~); + let pair = GEPi(bcx, llcdata, ~[0u, abi::closure_body_bindings, 0u]); let lltargetenv = - Load(bcx, GEPi(bcx, pair, [0u, abi::fn_field_box]/~)); + Load(bcx, GEPi(bcx, pair, ~[0u, abi::fn_field_box])); let lltargetfn = Load - (bcx, GEPi(bcx, pair, [0u, abi::fn_field_code]/~)); + (bcx, GEPi(bcx, pair, ~[0u, abi::fn_field_code])); (lltargetfn, lltargetenv, 1u) } target_self { let fptr = Load(bcx, GEPi(bcx, llcdata, - [0u, abi::closure_body_bindings, 0u]/~)); + ~[0u, abi::closure_body_bindings, 0u])); let slfbox = - GEPi(bcx, llcdata, [0u, abi::closure_body_bindings, 1u]/~); + GEPi(bcx, llcdata, ~[0u, abi::closure_body_bindings, 1u]); let selfptr = - GEPi(bcx, Load(bcx, slfbox), [0u, abi::box_field_body]/~); + GEPi(bcx, Load(bcx, slfbox), ~[0u, abi::box_field_body]); (fptr, PointerCast(bcx, selfptr, T_opaque_cbox_ptr(ccx)), 2u) } target_static_self(fptr) { let slfptr = - GEPi(bcx, llcdata, [0u, abi::closure_body_bindings, 0u]/~); + GEPi(bcx, llcdata, ~[0u, abi::closure_body_bindings, 0u]); (fptr, PointerCast(bcx, slfptr, T_opaque_cbox_ptr(ccx)), 1u) } }; @@ -735,19 +737,19 @@ fn trans_bind_thunk(ccx: @crate_ctxt, let outgoing_args = ty::ty_fn_args(outgoing_fty); // Set up the three implicit arguments to the thunk. - let mut llargs: [ValueRef]/~ = [fcx.llretptr, lltargetenv]/~; + let mut llargs: ~[ValueRef] = ~[fcx.llretptr, lltargetenv]; let mut a: uint = first_real_arg; // retptr, env come first let mut b: uint = starting_idx; let mut outgoing_arg_index: uint = 0u; - for vec::each(args) {|arg| + for vec::each(args) |arg| { let out_arg = outgoing_args[outgoing_arg_index]; alt arg { // Arg provided at binding time; thunk copies it from // closure. some(e) { let mut val = - GEPi(bcx, llcdata, [0u, abi::closure_body_bindings, b]/~); + GEPi(bcx, llcdata, ~[0u, abi::closure_body_bindings, b]); alt ty::resolved_mode(tcx, out_arg.mode) { ast::by_val { diff --git a/src/rustc/middle/trans/common.rs b/src/rustc/middle/trans/common.rs index 0f7b82de314..3125f3d84b8 100644 --- a/src/rustc/middle/trans/common.rs +++ b/src/rustc/middle/trans/common.rs @@ -68,9 +68,9 @@ type stats = mut n_glues_created: uint, mut n_null_glues: uint, mut n_real_glues: uint, - llvm_insn_ctxt: @mut [str]/~, + llvm_insn_ctxt: @mut ~[str], llvm_insns: hashmap<str, uint>, - fn_times: @mut [{ident: str, time: int}]/~}; + fn_times: @mut ~[{ident: str, time: int}]}; class BuilderRef_res { let B: BuilderRef; @@ -102,7 +102,7 @@ type crate_ctxt = { monomorphized: hashmap<mono_id, ValueRef>, monomorphizing: hashmap<ast::def_id, uint>, // Cache computed type parameter uses (see type_use.rs) - type_use_cache: hashmap<ast::def_id, [type_use::type_uses]/~>, + type_use_cache: hashmap<ast::def_id, ~[type_use::type_uses]>, // Cache generated vtables vtables: hashmap<mono_id, ValueRef>, // Cache of constant strings, @@ -139,9 +139,9 @@ type val_self_pair = {v: ValueRef, t: ty::t}; enum local_val { local_mem(ValueRef), local_imm(ValueRef), } -type param_substs = {tys: [ty::t]/~, +type param_substs = {tys: ~[ty::t], vtables: option<typeck::vtable_res>, - bounds: @[ty::param_bounds]/~}; + bounds: @~[ty::param_bounds]}; // Function context. Every LLVM function we create will have one of // these. @@ -233,7 +233,7 @@ type cleanup_path = {target: option<BasicBlockRef>, dest: BasicBlockRef}; fn scope_clean_changed(info: scope_info) { - if info.cleanup_paths.len() > 0u { info.cleanup_paths = []/~; } + if info.cleanup_paths.len() > 0u { info.cleanup_paths = ~[]; } info.landing_pad = none; } @@ -251,8 +251,8 @@ fn add_clean(cx: block, val: ValueRef, ty: ty::t) { cx.to_str(), val_str(cx.ccx().tn, val), ty_to_str(cx.ccx().tcx, ty)]; let cleanup_type = cleanup_type(cx.tcx(), ty); - in_scope_cx(cx) {|info| - vec::push(info.cleanups, clean({|a|base::drop_ty(a, val, ty)}, + do in_scope_cx(cx) |info| { + vec::push(info.cleanups, clean(|a| base::drop_ty(a, val, ty), cleanup_type)); scope_clean_changed(info); } @@ -271,8 +271,8 @@ fn add_clean_temp(cx: block, val: ValueRef, ty: ty::t) { ret base::drop_ty(bcx, val, ty); } } - in_scope_cx(cx) {|info| - vec::push(info.cleanups, clean_temp(val, {|a|do_drop(a, val, ty)}, + do in_scope_cx(cx) |info| { + vec::push(info.cleanups, clean_temp(val, |a| do_drop(a, val, ty), cleanup_type)); scope_clean_changed(info); } @@ -283,19 +283,19 @@ fn add_clean_temp_mem(cx: block, val: ValueRef, ty: ty::t) { cx.to_str(), val_str(cx.ccx().tn, val), ty_to_str(cx.ccx().tcx, ty)]; let cleanup_type = cleanup_type(cx.tcx(), ty); - in_scope_cx(cx) {|info| + do in_scope_cx(cx) |info| { vec::push(info.cleanups, - clean_temp(val, {|a|base::drop_ty(a, val, ty)}, + clean_temp(val, |a| base::drop_ty(a, val, ty), cleanup_type)); scope_clean_changed(info); } } fn add_clean_free(cx: block, ptr: ValueRef, heap: heap) { let free_fn = alt heap { - heap_shared { {|a|base::trans_free(a, ptr)} } - heap_exchange { {|a|base::trans_unique_free(a, ptr)} } + heap_shared { |a| base::trans_free(a, ptr) } + heap_exchange { |a| base::trans_unique_free(a, ptr) } }; - in_scope_cx(cx) {|info| + do in_scope_cx(cx) |info| { vec::push(info.cleanups, clean_temp(ptr, free_fn, normal_exit_and_unwind)); scope_clean_changed(info); @@ -307,10 +307,10 @@ fn add_clean_free(cx: block, ptr: ValueRef, heap: heap) { // this will be more involved. For now, we simply zero out the local, and the // drop glue checks whether it is zero. fn revoke_clean(cx: block, val: ValueRef) { - in_scope_cx(cx) {|info| - option::iter(vec::position(info.cleanups, {|cu| + do in_scope_cx(cx) |info| { + do option::iter(vec::position(info.cleanups, |cu| { alt cu { clean_temp(v, _, _) if v == val { true } _ { false } } - })) {|i| + })) |i| { info.cleanups = vec::append(vec::slice(info.cleanups, 0u, i), vec::view(info.cleanups, @@ -339,10 +339,10 @@ type scope_info = { // A list of functions that must be run at when leaving this // block, cleaning up any variables that were introduced in the // block. - mut cleanups: [cleanup]/~, + mut cleanups: ~[cleanup], // Existing cleanup paths that may be reused, indexed by destination and // cleared when the set of cleanups changes. - mut cleanup_paths: [cleanup_path]/~, + mut cleanup_paths: ~[cleanup_path], // Unwinding landing pad. Also cleared when cleanups change. mut landing_pad: option<BasicBlockRef>, }; @@ -361,7 +361,7 @@ impl node_info for ast::blk { impl node_info for option<@ast::expr> { fn info() -> option<node_info> { - self.chain { |s| s.info() } + self.chain(|s| s.info()) } } @@ -572,35 +572,35 @@ fn T_size_t(targ_cfg: @session::config) -> TypeRef { ret T_int(targ_cfg); } -fn T_fn(inputs: [TypeRef]/~, output: TypeRef) -> TypeRef unsafe { +fn T_fn(inputs: ~[TypeRef], output: TypeRef) -> TypeRef unsafe { ret llvm::LLVMFunctionType(output, to_ptr(inputs), inputs.len() as c_uint, False); } fn T_fn_pair(cx: @crate_ctxt, tfn: TypeRef) -> TypeRef { - ret T_struct([T_ptr(tfn), T_opaque_cbox_ptr(cx)]/~); + ret T_struct(~[T_ptr(tfn), T_opaque_cbox_ptr(cx)]); } fn T_ptr(t: TypeRef) -> TypeRef { ret llvm::LLVMPointerType(t, 0u as c_uint); } -fn T_struct(elts: [TypeRef]/~) -> TypeRef unsafe { +fn T_struct(elts: ~[TypeRef]) -> TypeRef unsafe { ret llvm::LLVMStructType(to_ptr(elts), elts.len() as c_uint, False); } fn T_named_struct(name: str) -> TypeRef { let c = llvm::LLVMGetGlobalContext(); - ret str::as_c_str(name, {|buf| llvm::LLVMStructCreateNamed(c, buf) }); + ret str::as_c_str(name, |buf| llvm::LLVMStructCreateNamed(c, buf)); } -fn set_struct_body(t: TypeRef, elts: [TypeRef]/~) unsafe { +fn set_struct_body(t: TypeRef, elts: ~[TypeRef]) unsafe { llvm::LLVMStructSetBody(t, to_ptr(elts), elts.len() as c_uint, False); } -fn T_empty_struct() -> TypeRef { ret T_struct([]/~); } +fn T_empty_struct() -> TypeRef { ret T_struct(~[]); } // A vtable is, in reality, a vtable pointer followed by zero or more pointers // to tydescs and other vtables that it closes over. But the types and number @@ -624,8 +624,8 @@ fn T_task(targ_cfg: @session::config) -> TypeRef { let t_int = T_int(targ_cfg); let elems = - [t_int, t_int, t_int, t_int, - t_int, t_int, t_int, t_int]/~; + ~[t_int, t_int, t_int, t_int, + t_int, t_int, t_int, t_int]; set_struct_body(t, elems); ret t; } @@ -633,7 +633,7 @@ fn T_task(targ_cfg: @session::config) -> TypeRef { fn T_tydesc_field(cx: @crate_ctxt, field: uint) -> TypeRef unsafe { // Bit of a kludge: pick the fn typeref out of the tydesc.. - let tydesc_elts: [TypeRef]/~ = + let tydesc_elts: ~[TypeRef] = vec::from_elem::<TypeRef>(abi::n_tydesc_fields, T_nil()); llvm::LLVMGetStructElementTypes(cx.tydesc_type, @@ -655,15 +655,15 @@ fn T_tydesc(targ_cfg: @session::config) -> TypeRef { let tydescpp = T_ptr(T_ptr(tydesc)); let pvoid = T_ptr(T_i8()); let glue_fn_ty = - T_ptr(T_fn([T_ptr(T_nil()), T_ptr(T_nil()), tydescpp, - pvoid]/~, T_void())); + T_ptr(T_fn(~[T_ptr(T_nil()), T_ptr(T_nil()), tydescpp, + pvoid], T_void())); let int_type = T_int(targ_cfg); let elems = - [tydescpp, int_type, int_type, + ~[tydescpp, int_type, int_type, glue_fn_ty, glue_fn_ty, glue_fn_ty, glue_fn_ty, int_type, int_type, int_type, int_type, - T_ptr(T_i8()), T_ptr(T_i8()), int_type, int_type]/~; + T_ptr(T_i8()), T_ptr(T_i8()), int_type, int_type]; set_struct_body(tydesc, elems); ret tydesc; } @@ -674,9 +674,9 @@ fn T_array(t: TypeRef, n: uint) -> TypeRef { // Interior vector. fn T_vec2(targ_cfg: @session::config, t: TypeRef) -> TypeRef { - ret T_struct([T_int(targ_cfg), // fill + ret T_struct(~[T_int(targ_cfg), // fill T_int(targ_cfg), // alloc - T_array(t, 0u)]/~); // elements + T_array(t, 0u)]); // elements } fn T_vec(ccx: @crate_ctxt, t: TypeRef) -> TypeRef { @@ -693,14 +693,14 @@ fn T_opaque_vec(targ_cfg: @session::config) -> TypeRef { // returns). fn tuplify_box_ty(tcx: ty::ctxt, t: ty::t) -> ty::t { let ptr = ty::mk_ptr(tcx, {ty: ty::mk_nil(tcx), mutbl: ast::m_imm}); - ret ty::mk_tup(tcx, [ty::mk_uint(tcx), ty::mk_type(tcx), + ret ty::mk_tup(tcx, ~[ty::mk_uint(tcx), ty::mk_type(tcx), ptr, ptr, - t]/~); + t]); } -fn T_box_header_fields(cx: @crate_ctxt) -> [TypeRef]/~ { +fn T_box_header_fields(cx: @crate_ctxt) -> ~[TypeRef] { let ptr = T_ptr(T_i8()); - ret [cx.int_type, T_ptr(cx.tydesc_type), ptr, ptr]/~; + ret ~[cx.int_type, T_ptr(cx.tydesc_type), ptr, ptr]; } fn T_box_header(cx: @crate_ctxt) -> TypeRef { @@ -708,7 +708,7 @@ fn T_box_header(cx: @crate_ctxt) -> TypeRef { } fn T_box(cx: @crate_ctxt, t: TypeRef) -> TypeRef { - ret T_struct(vec::append(T_box_header_fields(cx), [t]/~)); + ret T_struct(vec::append(T_box_header_fields(cx), ~[t])); } fn T_box_ptr(t: TypeRef) -> TypeRef { @@ -725,7 +725,7 @@ fn T_opaque_box_ptr(cx: @crate_ctxt) -> TypeRef { } fn T_unique(cx: @crate_ctxt, t: TypeRef) -> TypeRef { - ret T_struct(vec::append(T_box_header_fields(cx), [t]/~)); + ret T_struct(vec::append(T_box_header_fields(cx), ~[t])); } fn T_unique_ptr(t: TypeRef) -> TypeRef { @@ -734,12 +734,12 @@ fn T_unique_ptr(t: TypeRef) -> TypeRef { } fn T_port(cx: @crate_ctxt, _t: TypeRef) -> TypeRef { - ret T_struct([cx.int_type]/~); // Refcount + ret T_struct(~[cx.int_type]); // Refcount } fn T_chan(cx: @crate_ctxt, _t: TypeRef) -> TypeRef { - ret T_struct([cx.int_type]/~); // Refcount + ret T_struct(~[cx.int_type]); // Refcount } @@ -770,7 +770,7 @@ fn T_enum_discrim(cx: @crate_ctxt) -> TypeRef { fn T_opaque_enum(cx: @crate_ctxt) -> TypeRef { let s = "opaque_enum"; alt name_has_type(cx.tn, s) { some(t) { ret t; } _ {} } - let t = T_struct([T_enum_discrim(cx), T_i8()]/~); + let t = T_struct(~[T_enum_discrim(cx), T_i8()]); associate_type(cx.tn, s, t); ret t; } @@ -784,7 +784,7 @@ fn T_captured_tydescs(cx: @crate_ctxt, n: uint) -> TypeRef { } fn T_opaque_iface(cx: @crate_ctxt) -> TypeRef { - T_struct([T_ptr(cx.tydesc_type), T_opaque_box_ptr(cx)]/~) + T_struct(~[T_ptr(cx.tydesc_type), T_opaque_box_ptr(cx)]) } fn T_opaque_port_ptr() -> TypeRef { ret T_ptr(T_i8()); } @@ -800,7 +800,7 @@ fn C_integral(t: TypeRef, u: u64, sign_extend: Bool) -> ValueRef { } fn C_floating(s: str, t: TypeRef) -> ValueRef { - ret str::as_c_str(s, {|buf| llvm::LLVMConstRealOfString(t, buf) }); + ret str::as_c_str(s, |buf| llvm::LLVMConstRealOfString(t, buf)); } fn C_nil() -> ValueRef { @@ -840,12 +840,12 @@ fn C_cstr(cx: @crate_ctxt, s: str) -> ValueRef { none { } } - let sc = str::as_c_str(s) {|buf| + let sc = do str::as_c_str(s) |buf| { llvm::LLVMConstString(buf, str::len(s) as c_uint, False) }; let g = str::as_c_str(cx.names("str"), - {|buf| llvm::LLVMAddGlobal(cx.llmod, val_ty(sc), buf) }); + |buf| llvm::LLVMAddGlobal(cx.llmod, val_ty(sc), buf)); llvm::LLVMSetInitializer(g, sc); llvm::LLVMSetGlobalConstant(g, True); lib::llvm::SetLinkage(g, lib::llvm::InternalLinkage); @@ -857,48 +857,48 @@ fn C_cstr(cx: @crate_ctxt, s: str) -> ValueRef { fn C_estr_slice(cx: @crate_ctxt, s: str) -> ValueRef { let cs = llvm::LLVMConstPointerCast(C_cstr(cx, s), T_ptr(T_i8())); - C_struct([cs, C_uint(cx, str::len(s) + 1u /* +1 for null */)]/~) + C_struct(~[cs, C_uint(cx, str::len(s) + 1u /* +1 for null */)]) } // Returns a Plain Old LLVM String: fn C_postr(s: str) -> ValueRef { - ret str::as_c_str(s) {|buf| + ret do str::as_c_str(s) |buf| { llvm::LLVMConstString(buf, str::len(s) as c_uint, False) }; } fn C_zero_byte_arr(size: uint) -> ValueRef unsafe { let mut i = 0u; - let mut elts: [ValueRef]/~ = []/~; + let mut elts: ~[ValueRef] = ~[]; while i < size { vec::push(elts, C_u8(0u)); i += 1u; } ret llvm::LLVMConstArray(T_i8(), vec::unsafe::to_ptr(elts), elts.len() as c_uint); } -fn C_struct(elts: [ValueRef]/~) -> ValueRef unsafe { +fn C_struct(elts: ~[ValueRef]) -> ValueRef unsafe { ret llvm::LLVMConstStruct(vec::unsafe::to_ptr(elts), elts.len() as c_uint, False); } -fn C_named_struct(T: TypeRef, elts: [ValueRef]/~) -> ValueRef unsafe { +fn C_named_struct(T: TypeRef, elts: ~[ValueRef]) -> ValueRef unsafe { ret llvm::LLVMConstNamedStruct(T, vec::unsafe::to_ptr(elts), elts.len() as c_uint); } -fn C_array(ty: TypeRef, elts: [ValueRef]/~) -> ValueRef unsafe { +fn C_array(ty: TypeRef, elts: ~[ValueRef]) -> ValueRef unsafe { ret llvm::LLVMConstArray(ty, vec::unsafe::to_ptr(elts), elts.len() as c_uint); } -fn C_bytes(bytes: [u8]/~) -> ValueRef unsafe { +fn C_bytes(bytes: ~[u8]) -> ValueRef unsafe { ret llvm::LLVMConstString( unsafe::reinterpret_cast(vec::unsafe::to_ptr(bytes)), bytes.len() as c_uint, False); } -fn C_shape(ccx: @crate_ctxt, bytes: [u8]/~) -> ValueRef { +fn C_shape(ccx: @crate_ctxt, bytes: ~[u8]) -> ValueRef { let llshape = C_bytes(bytes); - let llglobal = str::as_c_str(ccx.names("shape"), {|buf| + let llglobal = str::as_c_str(ccx.names("shape"), |buf| { llvm::LLVMAddGlobal(ccx.llmod, val_ty(llshape), buf) }); llvm::LLVMSetInitializer(llglobal, llshape); @@ -913,19 +913,19 @@ fn get_param(fndecl: ValueRef, param: uint) -> ValueRef { // Used to identify cached monomorphized functions and vtables enum mono_param_id { - mono_precise(ty::t, option<[mono_id]/~>), + mono_precise(ty::t, option<~[mono_id]>), mono_any, mono_repr(uint /* size */, uint /* align */), } -type mono_id = @{def: ast::def_id, params: [mono_param_id]/~}; +type mono_id = @{def: ast::def_id, params: ~[mono_param_id]}; fn hash_mono_id(&&mi: mono_id) -> uint { let mut h = syntax::ast_util::hash_def(mi.def); - for vec::each(mi.params) {|param| + for vec::each(mi.params) |param| { h = h * alt param { mono_precise(ty, vts) { let mut h = ty::type_id(ty); - option::iter(vts) {|vts| - for vec::each(vts) {|vt| h += hash_mono_id(vt); } + do option::iter(vts) |vts| { + for vec::each(vts) |vt| { h += hash_mono_id(vt); } } h } @@ -954,7 +954,7 @@ fn align_to(cx: block, off: ValueRef, align: ValueRef) -> ValueRef { fn path_str(p: path) -> str { let mut r = "", first = true; - for vec::each(p) {|e| + for vec::each(p) |e| { alt e { ast_map::path_name(s) | ast_map::path_mod(s) { if first { first = false; } else { r += "::"; } @@ -975,19 +975,19 @@ fn node_id_type(bcx: block, id: ast::node_id) -> ty::t { fn expr_ty(bcx: block, ex: @ast::expr) -> ty::t { node_id_type(bcx, ex.id) } -fn node_id_type_params(bcx: block, id: ast::node_id) -> [ty::t]/~ { +fn node_id_type_params(bcx: block, id: ast::node_id) -> ~[ty::t] { let tcx = bcx.tcx(); let params = ty::node_id_to_type_params(tcx, id); alt bcx.fcx.param_substs { some(substs) { - vec::map(params) {|t| ty::subst_tps(tcx, substs.tys, t) } + vec::map(params, |t| ty::subst_tps(tcx, substs.tys, t)) } _ { params } } } fn field_idx_strict(cx: ty::ctxt, sp: span, ident: ast::ident, - fields: [ty::field]/~) + fields: ~[ty::field]) -> uint { alt ty::field_idx(ident, fields) { none { cx.sess.span_bug(sp, #fmt("base expr doesn't appear to \ @@ -996,7 +996,7 @@ fn field_idx_strict(cx: ty::ctxt, sp: span, ident: ast::ident, } } -fn dummy_substs(tps: [ty::t]/~) -> ty::substs { +fn dummy_substs(tps: ~[ty::t]) -> ty::substs { {self_r: some(ty::re_bound(ty::br_self)), self_ty: none, tps: tps} diff --git a/src/rustc/middle/trans/debuginfo.rs b/src/rustc/middle/trans/debuginfo.rs index 7c75b9b3ae4..bbef68aaad6 100644 --- a/src/rustc/middle/trans/debuginfo.rs +++ b/src/rustc/middle/trans/debuginfo.rs @@ -48,7 +48,7 @@ const DW_ATE_unsigned: int = 0x07; const DW_ATE_unsigned_char: int = 0x08; fn llstr(s: str) -> ValueRef { - str::as_c_str(s, {|sbuf| + str::as_c_str(s, |sbuf| { llvm::LLVMMDString(sbuf, str::len(s) as libc::c_uint) }) } @@ -64,7 +64,7 @@ fn lli64(val: int) -> ValueRef { fn lli1(bval: bool) -> ValueRef { C_bool(bval) } -fn llmdnode(elems: [ValueRef]/~) -> ValueRef unsafe { +fn llmdnode(elems: ~[ValueRef]) -> ValueRef unsafe { llvm::LLVMMDNode(vec::unsafe::to_ptr(elems), vec::len(elems) as libc::c_uint) } @@ -76,7 +76,7 @@ fn llnull() -> ValueRef unsafe { } fn add_named_metadata(cx: @crate_ctxt, name: str, val: ValueRef) { - str::as_c_str(name, {|sbuf| + str::as_c_str(name, |sbuf| { llvm::LLVMAddNamedMetadataOperand(cx.llmod, sbuf, val) }) } @@ -99,7 +99,7 @@ fn update_cache(cache: metadata_cache, mdtag: int, val: debug_metadata) { let existing = if cache.contains_key(mdtag) { cache.get(mdtag) } else { - []/~ + ~[] }; cache.insert(mdtag, vec::append_one(existing, val)); } @@ -115,7 +115,7 @@ type block_md = {start: codemap::loc, end: codemap::loc}; type argument_md = {id: ast::node_id}; type retval_md = {id: ast::node_id}; -type metadata_cache = hashmap<int, [debug_metadata]/~>; +type metadata_cache = hashmap<int, ~[debug_metadata]>; enum debug_metadata { file_metadata(@metadata<file_md>), @@ -150,7 +150,7 @@ fn cached_metadata<T: copy>(cache: metadata_cache, mdtag: int, eq: fn(md: T) -> bool) -> option<T> unsafe { if cache.contains_key(mdtag) { let items = cache.get(mdtag); - for items.each {|item| + for items.each |item| { let md: T = md_from_metadata::<T>(item); if eq(md) { ret option::some(md); @@ -166,14 +166,14 @@ fn create_compile_unit(cx: @crate_ctxt) let crate_name = option::get(cx.dbg_cx).crate_file; let tg = CompileUnitTag; alt cached_metadata::<@metadata<compile_unit_md>>(cache, tg, - {|md| md.data.name == crate_name}) { + |md| md.data.name == crate_name) { option::some(md) { ret md; } option::none {} } let (_, work_dir) = get_file_path_and_dir(cx.sess.working_dir, crate_name); - let unit_metadata = [lltag(tg), + let unit_metadata = ~[lltag(tg), llunused(), lli32(DW_LANG_RUST), llstr(crate_name), @@ -183,7 +183,7 @@ fn create_compile_unit(cx: @crate_ctxt) lli1(cx.sess.opts.optimize != 0u), llstr(""), // flags (???) lli32(0) // runtime version (???) - ]/~; + ]; let unit_node = llmdnode(unit_metadata); add_named_metadata(cx, "llvm.dbg.cu", unit_node); let mdval = @{node: unit_node, data: {name: crate_name}}; @@ -209,7 +209,7 @@ fn create_file(cx: @crate_ctxt, full_path: str) -> @metadata<file_md> { let cache = get_cache(cx);; let tg = FileDescriptorTag; alt cached_metadata::<@metadata<file_md>>( - cache, tg, {|md| md.data.path == full_path}) { + cache, tg, |md| md.data.path == full_path) { option::some(md) { ret md; } option::none {} } @@ -217,10 +217,10 @@ fn create_file(cx: @crate_ctxt, full_path: str) -> @metadata<file_md> { let (file_path, work_dir) = get_file_path_and_dir(cx.sess.working_dir, full_path); let unit_node = create_compile_unit(cx).node; - let file_md = [lltag(tg), + let file_md = ~[lltag(tg), llstr(file_path), llstr(work_dir), - unit_node]/~; + unit_node]; let val = llmdnode(file_md); let mdval = @{node: val, data: {path: full_path}}; update_cache(cache, tg, file_metadata(mdval)); @@ -262,13 +262,13 @@ fn create_block(cx: block) -> @metadata<block_md> { option::some(v) { vec::len(v) as int } option::none { 0 } }; - let lldata = [lltag(tg), + let lldata = ~[lltag(tg), parent, lli32(start.line as int), lli32(start.col as int), file_node.node, lli32(unique_id) - ]/~; + ]; let val = llmdnode(lldata); let mdval = @{node: val, data: {start: start, end: end}}; //update_cache(cache, tg, block_metadata(mdval)); @@ -286,7 +286,7 @@ fn create_basic_type(cx: @crate_ctxt, t: ty::t, ty: ast::prim_ty, span: span) let cache = get_cache(cx); let tg = BasicTypeDescriptorTag; alt cached_metadata::<@metadata<tydesc_md>>( - cache, tg, {|md| ty::type_id(t) == md.data.hash}) { + cache, tg, |md| ty::type_id(t) == md.data.hash) { option::some(md) { ret md; } option::none {} } @@ -319,7 +319,7 @@ fn create_basic_type(cx: @crate_ctxt, t: ty::t, ty: ast::prim_ty, span: span) let file_node = create_file(cx, fname); let cu_node = create_compile_unit(cx); let (size, align) = size_and_align_of(cx, t); - let lldata = [lltag(tg), + let lldata = ~[lltag(tg), cu_node.node, llstr(name), file_node.node, @@ -328,7 +328,7 @@ fn create_basic_type(cx: @crate_ctxt, t: ty::t, ty: ast::prim_ty, span: span) lli64(align * 8), // alignment in bits lli64(0), //XXX offset? lli32(0), //XXX flags? - lli32(encoding)]/~; + lli32(encoding)]; let llnode = llmdnode(lldata); let mdval = @{node: llnode, data: {hash: ty::type_id(t)}}; update_cache(cache, tg, tydesc_metadata(mdval)); @@ -362,7 +362,7 @@ type struct_ctxt = { file: ValueRef, name: str, line: int, - mut members: [ValueRef]/~, + mut members: ~[ValueRef], mut total_size: int, align: int }; @@ -378,7 +378,7 @@ fn create_structure(file: @metadata<file_md>, name: str, line: int) let cx = @{file: file.node, name: name, line: line, - mut members: []/~, + mut members: ~[], mut total_size: 0, align: 64 //XXX different alignment per arch? }; @@ -388,7 +388,7 @@ fn create_structure(file: @metadata<file_md>, name: str, line: int) fn create_derived_type(type_tag: int, file: ValueRef, name: str, line: int, size: int, align: int, offset: int, ty: ValueRef) -> ValueRef { - let lldata = [lltag(type_tag), + let lldata = ~[lltag(type_tag), file, llstr(name), file, @@ -397,7 +397,7 @@ fn create_derived_type(type_tag: int, file: ValueRef, name: str, line: int, lli64(align), lli64(offset), lli32(0), - ty]/~; + ty]; ret llmdnode(lldata); } @@ -409,7 +409,7 @@ fn add_member(cx: @struct_ctxt, name: str, line: int, size: int, align: int, cx.total_size += size * 8; } -fn create_record(cx: @crate_ctxt, t: ty::t, fields: [ast::ty_field]/~, +fn create_record(cx: @crate_ctxt, t: ty::t, fields: ~[ast::ty_field], span: span) -> @metadata<tydesc_md> { let fname = filename_from_span(cx, span); let file_node = create_file(cx, fname); @@ -417,7 +417,7 @@ fn create_record(cx: @crate_ctxt, t: ty::t, fields: [ast::ty_field]/~, option::get(cx.dbg_cx).names("rec"), line_from_span(cx.sess.codemap, span) as int); - for fields.each {|field| + for fields.each |field| { let field_t = ty::get_field(t, field.node.ident).mt.ty; let ty_md = create_ty(cx, field_t, field.node.mt.ty); let (size, align) = size_and_align_of(cx, field_t); @@ -461,9 +461,9 @@ fn create_boxed_type(cx: @crate_ctxt, outer: ty::t, _inner: ty::t, fn create_composite_type(type_tag: int, name: str, file: ValueRef, line: int, size: int, align: int, offset: int, derived: option<ValueRef>, - members: option<[ValueRef]/~>) + members: option<~[ValueRef]>) -> ValueRef { - let lldata = [lltag(type_tag), + let lldata = ~[lltag(type_tag), file, llstr(name), // type name file, // source file definition @@ -484,7 +484,7 @@ fn create_composite_type(type_tag: int, name: str, file: ValueRef, line: int, }, lli32(0), // runtime language llnull() - ]/~; + ]; ret llmdnode(lldata); } @@ -501,12 +501,12 @@ fn create_vec(cx: @crate_ctxt, vec_t: ty::t, elem_t: ty::t, sys::min_align_of::<libc::size_t>() as int, size_t_type.node); add_member(scx, "alloc", 0, sys::size_of::<libc::size_t>() as int, sys::min_align_of::<libc::size_t>() as int, size_t_type.node); - let subrange = llmdnode([lltag(SubrangeTag), lli64(0), lli64(0)]/~); + let subrange = llmdnode(~[lltag(SubrangeTag), lli64(0), lli64(0)]); let (arr_size, arr_align) = size_and_align_of(cx, elem_t); let data_ptr = create_composite_type(ArrayTypeTag, "", file_node.node, 0, arr_size, arr_align, 0, option::some(elem_ty_md.node), - option::some([subrange]/~)); + option::some(~[subrange])); add_member(scx, "data", 0, 0, // clang says the size should be 0 sys::min_align_of::<u8>() as int, data_ptr); let llnode = finish_structure(scx); @@ -548,7 +548,7 @@ fn create_ty(_cx: @crate_ctxt, _t: ty::t, _ty: @ast::ty) ty::ty_uniq(mt) { ast::ty_uniq({ty: t_to_ty(cx, mt.ty, span), mutbl: mt.mutbl}) } ty::ty_rec(fields) { - let fs = []/~; + let fs = ~[]; for field in fields { vec::push(fs, {node: {ident: field.ident, mt: {ty: t_to_ty(cx, field.mt.ty, span), @@ -623,14 +623,14 @@ fn filename_from_span(cx: @crate_ctxt, sp: codemap::span) -> str { fn create_var(type_tag: int, context: ValueRef, name: str, file: ValueRef, line: int, ret_ty: ValueRef) -> ValueRef { - let lldata = [lltag(type_tag), + let lldata = ~[lltag(type_tag), context, llstr(name), file, lli32(line), ret_ty, lli32(0) - ]/~; + ]; ret llmdnode(lldata); } @@ -640,7 +640,7 @@ fn create_local_var(bcx: block, local: @ast::local) let cache = get_cache(cx); let tg = AutoVariableTag; alt cached_metadata::<@metadata<local_var_md>>( - cache, tg, {|md| md.data.id == local.node.id}) { + cache, tg, |md| md.data.id == local.node.id) { option::some(md) { ret md; } option::none {} } @@ -678,7 +678,7 @@ fn create_local_var(bcx: block, local: @ast::local) } } }; - let declargs = [llmdnode([llptr]/~), mdnode]/~; + let declargs = ~[llmdnode(~[llptr]), mdnode]; trans::build::Call(bcx, cx.intrinsics.get("llvm.dbg.declare"), declargs); ret mdval; @@ -690,7 +690,7 @@ fn create_arg(bcx: block, arg: ast::arg, sp: span) let cache = get_cache(cx); let tg = ArgVariableTag; alt cached_metadata::<@metadata<argument_md>>( - cache, ArgVariableTag, {|md| md.data.id == arg.id}) { + cache, ArgVariableTag, |md| md.data.id == arg.id) { option::some(md) { ret md; } option::none {} } @@ -709,7 +709,7 @@ fn create_arg(bcx: block, arg: ast::arg, sp: span) let llptr = alt fcx.llargs.get(arg.id) { local_mem(v) | local_imm(v) { v } }; - let declargs = [llmdnode([llptr]/~), mdnode]/~; + let declargs = ~[llmdnode(~[llptr]), mdnode]; trans::build::Call(bcx, cx.intrinsics.get("llvm.dbg.declare"), declargs); ret mdval; @@ -722,10 +722,10 @@ fn update_source_pos(cx: block, s: span) { let cm = cx.sess().codemap; let blockmd = create_block(cx); let loc = codemap::lookup_char_pos(cm, s.lo); - let scopedata = [lli32(loc.line as int), + let scopedata = ~[lli32(loc.line as int), lli32(loc.col as int), blockmd.node, - llnull()]/~; + llnull()]; let dbgscope = llmdnode(scopedata); llvm::LLVMSetCurrentDebugLocation(trans::build::B(cx), dbgscope); } @@ -778,7 +778,7 @@ fn create_function(fcx: fn_ctxt) -> @metadata<subprogram_md> { let cache = get_cache(cx); alt cached_metadata::<@metadata<subprogram_md>>( - cache, SubprogramTag, {|md| md.data.id == id}) { + cache, SubprogramTag, |md| md.data.id == id) { option::some(md) { ret md; } option::none {} } @@ -796,9 +796,9 @@ fn create_function(fcx: fn_ctxt) -> @metadata<subprogram_md> { }; let sub_node = create_composite_type(SubroutineTag, "", file_node, 0, 0, 0, 0, option::none, - option::some([ty_node]/~)); + option::some(~[ty_node])); - let fn_metadata = [lltag(SubprogramTag), + let fn_metadata = ~[lltag(SubprogramTag), llunused(), file_node, llstr(*ident), @@ -818,7 +818,7 @@ fn create_function(fcx: fn_ctxt) -> @metadata<subprogram_md> { //list of template params //func decl descriptor //list of func vars - ]/~; + ]; let val = llmdnode(fn_metadata); add_named_metadata(cx, "llvm.dbg.sp", val); let mdval = @{node: val, data: {id: id}}; diff --git a/src/rustc/middle/trans/foreign.rs b/src/rustc/middle/trans/foreign.rs index c7f5d1932b7..58c7fd1349e 100644 --- a/src/rustc/middle/trans/foreign.rs +++ b/src/rustc/middle/trans/foreign.rs @@ -7,7 +7,9 @@ import libc::c_uint; import syntax::{attr, ast_map}; import lib::llvm::{ llvm, TypeRef, ValueRef, ModuleRef, CallConv, Attribute, - StructRetAttribute, ByValAttribute + StructRetAttribute, ByValAttribute, + SequentiallyConsistent, Acquire, Release, + Xchg, Add, Sub }; import syntax::{ast, ast_util}; import back::{link, abi}; @@ -44,7 +46,7 @@ fn is_sse(++c: x86_64_reg_class) -> bool { }; } -fn is_ymm(cls: [x86_64_reg_class]/~) -> bool { +fn is_ymm(cls: ~[x86_64_reg_class]) -> bool { let len = vec::len(cls); ret (len > 2u && is_sse(cls[0]) && @@ -56,16 +58,16 @@ fn is_ymm(cls: [x86_64_reg_class]/~) -> bool { cls[3] == sseup_class); } -fn classify_ty(ty: TypeRef) -> [x86_64_reg_class]/~ { +fn classify_ty(ty: TypeRef) -> ~[x86_64_reg_class] { fn align(off: uint, ty: TypeRef) -> uint { let a = ty_align(ty); ret (off + a - 1u) / a * a; } - fn struct_tys(ty: TypeRef) -> [TypeRef]/~ { + fn struct_tys(ty: TypeRef) -> ~[TypeRef] { let n = llvm::LLVMCountStructElementTypes(ty); let elts = vec::from_elem(n as uint, ptr::null()); - vec::as_buf(elts) {|buf| + do vec::as_buf(elts) |buf| { llvm::LLVMGetStructElementTypes(ty, buf); } ret elts; @@ -80,7 +82,7 @@ fn classify_ty(ty: TypeRef) -> [x86_64_reg_class]/~ { 2 /* float */ { 4u } 3 /* double */ { 8u } 10 /* struct */ { - vec::foldl(0u, struct_tys(ty)) {|a, t| + do vec::foldl(0u, struct_tys(ty)) |a, t| { uint::max(a, ty_align(t)) } } @@ -103,7 +105,7 @@ fn classify_ty(ty: TypeRef) -> [x86_64_reg_class]/~ { 2 /* float */ { 4u } 3 /* double */ { 8u } 10 /* struct */ { - vec::foldl(0u, struct_tys(ty)) {|s, t| + do vec::foldl(0u, struct_tys(ty)) |s, t| { s + ty_size(t) } } @@ -119,13 +121,13 @@ fn classify_ty(ty: TypeRef) -> [x86_64_reg_class]/~ { }; } - fn all_mem(cls: [mut x86_64_reg_class]/~) { - for uint::range(0u, cls.len()) { |i| + fn all_mem(cls: ~[mut x86_64_reg_class]) { + for uint::range(0u, cls.len()) |i| { cls[i] = memory_class; } } - fn unify(cls: [mut x86_64_reg_class]/~, + fn unify(cls: ~[mut x86_64_reg_class], i: uint, newv: x86_64_reg_class) { if cls[i] == newv { @@ -150,14 +152,14 @@ fn classify_ty(ty: TypeRef) -> [x86_64_reg_class]/~ { } } - fn classify_struct(tys: [TypeRef]/~, - cls: [mut x86_64_reg_class]/~, i: uint, + fn classify_struct(tys: ~[TypeRef], + cls: ~[mut x86_64_reg_class], i: uint, off: uint) { if vec::is_empty(tys) { classify(T_i64(), cls, i, off); } else { let mut field_off = off; - for vec::each(tys) {|ty| + for vec::each(tys) |ty| { field_off = align(field_off, ty); classify(ty, cls, i, field_off); field_off += ty_size(ty); @@ -166,7 +168,7 @@ fn classify_ty(ty: TypeRef) -> [x86_64_reg_class]/~ { } fn classify(ty: TypeRef, - cls: [mut x86_64_reg_class]/~, ix: uint, + cls: ~[mut x86_64_reg_class], ix: uint, off: uint) { let t_align = ty_align(ty); let t_size = ty_size(ty); @@ -216,7 +218,7 @@ fn classify_ty(ty: TypeRef) -> [x86_64_reg_class]/~ { } } - fn fixup(ty: TypeRef, cls: [mut x86_64_reg_class]/~) { + fn fixup(ty: TypeRef, cls: ~[mut x86_64_reg_class]) { let mut i = 0u; let llty = llvm::LLVMGetTypeKind(ty) as int; let e = vec::len(cls); @@ -274,10 +276,10 @@ fn classify_ty(ty: TypeRef) -> [x86_64_reg_class]/~ { ret vec::from_mut(cls); } -fn llreg_ty(cls: [x86_64_reg_class]/~) -> TypeRef { - fn llvec_len(cls: [x86_64_reg_class]/~) -> uint { +fn llreg_ty(cls: ~[x86_64_reg_class]) -> TypeRef { + fn llvec_len(cls: ~[x86_64_reg_class]) -> uint { let mut len = 1u; - for vec::each(cls) {|c| + for vec::each(cls) |c| { if c != sseup_class { break; } @@ -286,7 +288,7 @@ fn llreg_ty(cls: [x86_64_reg_class]/~) -> TypeRef { ret len; } - let mut tys = []/~; + let mut tys = ~[]; let mut i = 0u; let e = vec::len(cls); while i < e { @@ -323,13 +325,13 @@ type x86_64_llty = { }; type x86_64_tys = { - arg_tys: [x86_64_llty]/~, + arg_tys: ~[x86_64_llty], ret_ty: x86_64_llty, - attrs: [option<Attribute>]/~, + attrs: ~[option<Attribute>], sret: bool }; -fn x86_64_tys(atys: [TypeRef]/~, +fn x86_64_tys(atys: ~[TypeRef], rty: TypeRef, ret_def: bool) -> x86_64_tys { fn is_reg_ty(ty: TypeRef) -> bool { @@ -342,18 +344,18 @@ fn x86_64_tys(atys: [TypeRef]/~, }; } - fn is_pass_byval(cls: [x86_64_reg_class]/~) -> bool { + fn is_pass_byval(cls: ~[x86_64_reg_class]) -> bool { ret cls[0] == memory_class || cls[0] == x87_class || cls[0] == complex_x87_class; } - fn is_ret_bysret(cls: [x86_64_reg_class]/~) -> bool { + fn is_ret_bysret(cls: ~[x86_64_reg_class]) -> bool { ret cls[0] == memory_class; } fn x86_64_ty(ty: TypeRef, - is_mem_cls: fn(cls: [x86_64_reg_class]/~) -> bool, + is_mem_cls: fn(cls: ~[x86_64_reg_class]) -> bool, attr: Attribute) -> (x86_64_llty, option<Attribute>) { let mut cast = false; let mut ty_attr = option::none; @@ -371,9 +373,9 @@ fn x86_64_tys(atys: [TypeRef]/~, ret ({ cast: cast, ty: llty }, ty_attr); } - let mut arg_tys = []/~; - let mut attrs = []/~; - for vec::each(atys) {|t| + let mut arg_tys = ~[]; + let mut attrs = ~[]; + for vec::each(atys) |t| { let (ty, attr) = x86_64_ty(t, is_pass_byval, ByValAttribute); vec::push(arg_tys, ty); vec::push(attrs, attr); @@ -382,11 +384,11 @@ fn x86_64_tys(atys: [TypeRef]/~, StructRetAttribute); let sret = option::is_some(ret_attr); if sret { - arg_tys = vec::append([ret_ty]/~, arg_tys); + arg_tys = vec::append(~[ret_ty], arg_tys); ret_ty = { cast: false, ty: T_void() }; - attrs = vec::append([ret_attr]/~, attrs); + attrs = vec::append(~[ret_attr], attrs); } else if !ret_def { ret_ty = { cast: false, ty: T_void() @@ -402,12 +404,12 @@ fn x86_64_tys(atys: [TypeRef]/~, fn decl_x86_64_fn(tys: x86_64_tys, decl: fn(fnty: TypeRef) -> ValueRef) -> ValueRef { - let atys = vec::map(tys.arg_tys) {|t| t.ty }; + let atys = vec::map(tys.arg_tys, |t| t.ty); let rty = tys.ret_ty.ty; let fnty = T_fn(atys, rty); let llfn = decl(fnty); - vec::iteri(tys.attrs) {|i, a| + do vec::iteri(tys.attrs) |i, a| { alt a { option::some(attr) { let llarg = get_param(llfn, i); @@ -427,7 +429,7 @@ fn link_name(i: @ast::foreign_item) -> str { } type c_stack_tys = { - arg_tys: [TypeRef]/~, + arg_tys: ~[TypeRef], ret_ty: TypeRef, ret_def: bool, bundle_ty: TypeRef, @@ -436,7 +438,7 @@ type c_stack_tys = { }; fn c_arg_and_ret_lltys(ccx: @crate_ctxt, - id: ast::node_id) -> ([TypeRef]/~, TypeRef, ty::t) { + id: ast::node_id) -> (~[TypeRef], TypeRef, ty::t) { alt ty::get(ty::node_id_to_type(ccx.tcx, id)).struct { ty::ty_fn({inputs: arg_tys, output: ret_ty, _}) { let llargtys = type_of_explicit_args(ccx, arg_tys); @@ -462,13 +464,13 @@ fn c_stack_tys(ccx: @crate_ctxt, ret_ty: llretty, ret_def: ret_def, bundle_ty: bundle_ty, - shim_fn_ty: T_fn([T_ptr(bundle_ty)]/~, T_void()), + shim_fn_ty: T_fn(~[T_ptr(bundle_ty)], T_void()), x86_64_tys: x86_64 }; } type shim_arg_builder = fn(bcx: block, tys: @c_stack_tys, - llargbundle: ValueRef) -> [ValueRef]/~; + llargbundle: ValueRef) -> ~[ValueRef]; type shim_ret_builder = fn(bcx: block, tys: @c_stack_tys, llargbundle: ValueRef, llretval: ValueRef); @@ -485,7 +487,7 @@ fn build_shim_fn_(ccx: @crate_ctxt, ccx.llmod, shim_name, tys.shim_fn_ty); // Declare the body of the shim function: - let fcx = new_fn_ctxt(ccx, []/~, llshimfn, none); + let fcx = new_fn_ctxt(ccx, ~[], llshimfn, none); let bcx = top_scope_block(fcx, none); let lltop = bcx.llbb; let llargbundle = get_param(llshimfn, 0u); @@ -519,7 +521,7 @@ fn build_wrap_fn_(ccx: @crate_ctxt, ret_builder: wrap_ret_builder) { let _icx = ccx.insn_ctxt("foreign::build_wrap_fn_"); - let fcx = new_fn_ctxt(ccx, []/~, llwrapfn, none); + let fcx = new_fn_ctxt(ccx, ~[], llwrapfn, none); let bcx = top_scope_block(fcx, none); let lltop = bcx.llbb; @@ -530,7 +532,7 @@ fn build_wrap_fn_(ccx: @crate_ctxt, // Create call itself. let llshimfnptr = PointerCast(bcx, llshimfn, T_ptr(T_i8())); let llrawargbundle = PointerCast(bcx, llargbundle, T_ptr(T_i8())); - Call(bcx, shim_upcall, [llrawargbundle, llshimfnptr]/~); + Call(bcx, shim_upcall, ~[llrawargbundle, llshimfnptr]); ret_builder(bcx, tys, llargbundle); tie_up_header_blocks(fcx, lltop); @@ -588,9 +590,9 @@ fn trans_foreign_mod(ccx: @crate_ctxt, let _icx = ccx.insn_ctxt("foreign::build_shim_fn"); fn build_args(bcx: block, tys: @c_stack_tys, - llargbundle: ValueRef) -> [ValueRef]/~ { + llargbundle: ValueRef) -> ~[ValueRef] { let _icx = bcx.insn_ctxt("foreign::shim::build_args"); - let mut llargvals = []/~; + let mut llargvals = ~[]; let mut i = 0u; let n = vec::len(tys.arg_tys); @@ -599,23 +601,23 @@ fn trans_foreign_mod(ccx: @crate_ctxt, let mut atys = x86_64.arg_tys; let mut attrs = x86_64.attrs; if x86_64.sret { - let llretptr = GEPi(bcx, llargbundle, [0u, n]/~); + let llretptr = GEPi(bcx, llargbundle, ~[0u, n]); let llretloc = Load(bcx, llretptr); - llargvals = [llretloc]/~; + llargvals = ~[llretloc]; atys = vec::tail(atys); attrs = vec::tail(attrs); } while i < n { let llargval = if atys[i].cast { let arg_ptr = GEPi(bcx, llargbundle, - [0u, i]/~); + ~[0u, i]); let arg_ptr = BitCast(bcx, arg_ptr, T_ptr(atys[i].ty)); Load(bcx, arg_ptr) } else if option::is_some(attrs[i]) { - GEPi(bcx, llargbundle, [0u, i]/~) + GEPi(bcx, llargbundle, ~[0u, i]) } else { - load_inbounds(bcx, llargbundle, [0u, i]/~) + load_inbounds(bcx, llargbundle, ~[0u, i]) }; vec::push(llargvals, llargval); i += 1u; @@ -624,7 +626,7 @@ fn trans_foreign_mod(ccx: @crate_ctxt, _ { while i < n { let llargval = load_inbounds(bcx, llargbundle, - [0u, i]/~); + ~[0u, i]); vec::push(llargvals, llargval); i += 1u; } @@ -638,7 +640,7 @@ fn trans_foreign_mod(ccx: @crate_ctxt, let _icx = bcx.insn_ctxt("foreign::shim::build_ret"); alt tys.x86_64_tys { some(x86_64) { - vec::iteri(x86_64.attrs) {|i, a| + do vec::iteri(x86_64.attrs) |i, a| { alt a { some(attr) { llvm::LLVMAddInstrAttribute( @@ -652,7 +654,7 @@ fn trans_foreign_mod(ccx: @crate_ctxt, ret; } let n = vec::len(tys.arg_tys); - let llretptr = GEPi(bcx, llargbundle, [0u, n]/~); + let llretptr = GEPi(bcx, llargbundle, ~[0u, n]); let llretloc = Load(bcx, llretptr); if x86_64.ret_ty.cast { let tmp_ptr = BitCast(bcx, llretloc, @@ -666,7 +668,7 @@ fn trans_foreign_mod(ccx: @crate_ctxt, if tys.ret_def { let n = vec::len(tys.arg_tys); // R** llretptr = &args->r; - let llretptr = GEPi(bcx, llargbundle, [0u, n]/~); + let llretptr = GEPi(bcx, llargbundle, ~[0u, n]); // R* llretloc = *llretptr; /* (args->r) */ let llretloc = Load(bcx, llretptr); // *args->r = r; @@ -689,7 +691,7 @@ fn trans_foreign_mod(ccx: @crate_ctxt, // Declare the "prototype" for the base function F: alt tys.x86_64_tys { some(x86_64) { - decl_x86_64_fn(x86_64) {|fnty| + do decl_x86_64_fn(x86_64) |fnty| { decl_fn(ccx.llmod, lname, cc, fnty) } } @@ -705,12 +707,12 @@ fn trans_foreign_mod(ccx: @crate_ctxt, fn build_direct_fn(ccx: @crate_ctxt, decl: ValueRef, item: @ast::foreign_item, tys: @c_stack_tys, cc: lib::llvm::CallConv) { - let fcx = new_fn_ctxt(ccx, []/~, decl, none); + let fcx = new_fn_ctxt(ccx, ~[], decl, none); let bcx = top_scope_block(fcx, none), lltop = bcx.llbb; let llbasefn = base_fn(ccx, link_name(item), tys, cc); let ty = ty::lookup_item_type(ccx.tcx, ast_util::local_def(item.id)).ty; - let args = vec::from_fn(ty::ty_fn_args(ty).len(), {|i| + let args = vec::from_fn(ty::ty_fn_args(ty).len(), |i| { get_param(decl, i + first_real_arg) }); let retval = Call(bcx, llbasefn, args); @@ -736,11 +738,11 @@ fn trans_foreign_mod(ccx: @crate_ctxt, let implicit_args = first_real_arg; // ret + env while i < n { let llargval = get_param(llwrapfn, i + implicit_args); - store_inbounds(bcx, llargval, llargbundle, [0u, i]/~); + store_inbounds(bcx, llargval, llargbundle, ~[0u, i]); i += 1u; } let llretptr = get_param(llwrapfn, 0u); - store_inbounds(bcx, llretptr, llargbundle, [0u, n]/~); + store_inbounds(bcx, llretptr, llargbundle, ~[0u, n]); } fn build_ret(bcx: block, _tys: @c_stack_tys, @@ -760,7 +762,7 @@ fn trans_foreign_mod(ccx: @crate_ctxt, ast::foreign_abi_stdcall { lib::llvm::X86StdcallCallConv } }; - for vec::each(foreign_mod.items) {|foreign_item| + for vec::each(foreign_mod.items) |foreign_item| { alt foreign_item.node { ast::foreign_item_fn(fn_decl, typarams) { let id = foreign_item.id; @@ -786,9 +788,9 @@ fn trans_foreign_mod(ccx: @crate_ctxt, } }; let psubsts = { - tys: []/~, + tys: ~[], vtables: none, - bounds: @[]/~ + bounds: @~[] }; trans_intrinsic(ccx, llwrapfn, foreign_item, *path, psubsts, none); @@ -806,6 +808,69 @@ fn trans_intrinsic(ccx: @crate_ctxt, decl: ValueRef, item: @ast::foreign_item, some(substs), some(item.span)); let mut bcx = top_scope_block(fcx, none), lltop = bcx.llbb; alt check *item.ident { + "atomic_xchng" { + let old = AtomicRMW(bcx, Xchg, + get_param(decl, first_real_arg), + get_param(decl, first_real_arg + 1u), + SequentiallyConsistent); + Store(bcx, old, fcx.llretptr); + } + "atomic_xchng_acq" { + let old = AtomicRMW(bcx, Xchg, + get_param(decl, first_real_arg), + get_param(decl, first_real_arg + 1u), + Acquire); + Store(bcx, old, fcx.llretptr); + } + "atomic_xchng_rel" { + let old = AtomicRMW(bcx, Xchg, + get_param(decl, first_real_arg), + get_param(decl, first_real_arg + 1u), + Release); + Store(bcx, old, fcx.llretptr); + } + "atomic_add" { + let old = AtomicRMW(bcx, Add, + get_param(decl, first_real_arg), + get_param(decl, first_real_arg + 1u), + SequentiallyConsistent); + Store(bcx, old, fcx.llretptr); + } + "atomic_add_acq" { + let old = AtomicRMW(bcx, Add, + get_param(decl, first_real_arg), + get_param(decl, first_real_arg + 1u), + Acquire); + Store(bcx, old, fcx.llretptr); + } + "atomic_add_rel" { + let old = AtomicRMW(bcx, Add, + get_param(decl, first_real_arg), + get_param(decl, first_real_arg + 1u), + Release); + Store(bcx, old, fcx.llretptr); + } + "atomic_sub" { + let old = AtomicRMW(bcx, Sub, + get_param(decl, first_real_arg), + get_param(decl, first_real_arg + 1u), + SequentiallyConsistent); + Store(bcx, old, fcx.llretptr); + } + "atomic_sub_acq" { + let old = AtomicRMW(bcx, Sub, + get_param(decl, first_real_arg), + get_param(decl, first_real_arg + 1u), + Acquire); + Store(bcx, old, fcx.llretptr); + } + "atomic_sub_rel" { + let old = AtomicRMW(bcx, Sub, + get_param(decl, first_real_arg), + get_param(decl, first_real_arg + 1u), + Release); + Store(bcx, old, fcx.llretptr); + } "size_of" { let tp_ty = substs.tys[0]; let lltp_ty = type_of::type_of(ccx, tp_ty); @@ -894,28 +959,26 @@ fn trans_intrinsic(ccx: @crate_ctxt, decl: ValueRef, item: @ast::foreign_item, } "frame_address" { let frameaddress = ccx.intrinsics.get("llvm.frameaddress"); - let frameaddress_val = Call(bcx, frameaddress, [C_i32(0i32)]/~); + let frameaddress_val = Call(bcx, frameaddress, ~[C_i32(0i32)]); let fty = ty::mk_fn(bcx.tcx(), { purity: ast::impure_fn, proto: ast::proto_any, - inputs: [{ + inputs: ~[{ mode: ast::expl(ast::by_val), ty: ty::mk_imm_ptr( bcx.tcx(), ty::mk_mach_uint(bcx.tcx(), ast::ty_u8)) - }]/~, + }], output: ty::mk_nil(bcx.tcx()), ret_style: ast::return_val, - constraints: []/~ + constraints: ~[] }); bcx = trans_call_inner(bcx, none, fty, ty::mk_nil(bcx.tcx()), - { |bcx| - lval_no_env( - bcx, - get_param(decl, first_real_arg), - temporary) - }, - arg_vals([frameaddress_val]/~), ignore); + |bcx| lval_no_env( + bcx, + get_param(decl, first_real_arg), + temporary), + arg_vals(~[frameaddress_val]), ignore); } } build_return(bcx); @@ -946,17 +1009,17 @@ fn trans_extern_fn(ccx: @crate_ctxt, path: ast_map::path, decl: ast::fn_decl, let _icx = ccx.insn_ctxt("foreign::extern::build_shim_fn"); fn build_args(bcx: block, tys: @c_stack_tys, - llargbundle: ValueRef) -> [ValueRef]/~ { + llargbundle: ValueRef) -> ~[ValueRef] { let _icx = bcx.insn_ctxt("foreign::crust::shim::build_args"); - let mut llargvals = []/~; + let mut llargvals = ~[]; let mut i = 0u; let n = vec::len(tys.arg_tys); - let llretptr = load_inbounds(bcx, llargbundle, [0u, n]/~); + let llretptr = load_inbounds(bcx, llargbundle, ~[0u, n]); vec::push(llargvals, llretptr); let llenvptr = C_null(T_opaque_box_ptr(bcx.ccx())); vec::push(llargvals, llenvptr); while i < n { - let llargval = load_inbounds(bcx, llargbundle, [0u, i]/~); + let llargval = load_inbounds(bcx, llargbundle, ~[0u, i]); vec::push(llargvals, llargval); i += 1u; } @@ -1009,29 +1072,29 @@ fn trans_extern_fn(ccx: @crate_ctxt, path: ast_map::path, decl: ast::fn_decl, if option::is_some(attrs[i]) { argval = Load(bcx, argval); store_inbounds(bcx, argval, llargbundle, - [0u, i]/~); + ~[0u, i]); } else if atys[i].cast { - let argptr = GEPi(bcx, llargbundle, [0u, i]/~); + let argptr = GEPi(bcx, llargbundle, ~[0u, i]); let argptr = BitCast(bcx, argptr, T_ptr(atys[i].ty)); Store(bcx, argval, argptr); } else { store_inbounds(bcx, argval, llargbundle, - [0u, i]/~); + ~[0u, i]); } i += 1u; } - store_inbounds(bcx, llretptr, llargbundle, [0u, n]/~); + store_inbounds(bcx, llretptr, llargbundle, ~[0u, n]); } _ { let llretptr = alloca(bcx, tys.ret_ty); let n = vec::len(tys.arg_tys); - for uint::range(0u, n) {|i| + for uint::range(0u, n) |i| { let llargval = get_param(llwrapfn, i); store_inbounds(bcx, llargval, llargbundle, - [0u, i]/~); + ~[0u, i]); }; - store_inbounds(bcx, llretptr, llargbundle, [0u, n]/~); + store_inbounds(bcx, llretptr, llargbundle, ~[0u, n]); } } } @@ -1046,7 +1109,7 @@ fn trans_extern_fn(ccx: @crate_ctxt, path: ast_map::path, decl: ast::fn_decl, ret; } let n = vec::len(tys.arg_tys); - let llretval = load_inbounds(bcx, llargbundle, [0u, n]/~); + let llretval = load_inbounds(bcx, llargbundle, ~[0u, n]); let llretval = if x86_64.ret_ty.cast { let retptr = BitCast(bcx, llretval, T_ptr(x86_64.ret_ty.ty)); @@ -1058,7 +1121,7 @@ fn trans_extern_fn(ccx: @crate_ctxt, path: ast_map::path, decl: ast::fn_decl, } _ { let n = vec::len(tys.arg_tys); - let llretval = load_inbounds(bcx, llargbundle, [0u, n]/~); + let llretval = load_inbounds(bcx, llargbundle, ~[0u, n]); let llretval = Load(bcx, llretval); Ret(bcx, llretval); } @@ -1088,7 +1151,7 @@ fn register_extern_fn(ccx: @crate_ctxt, sp: span, ret if ccx.sess.targ_cfg.arch == arch_x86_64 { let ret_def = !ty::type_is_bot(ret_ty) && !ty::type_is_nil(ret_ty); let x86_64 = x86_64_tys(llargtys, llretty, ret_def); - decl_x86_64_fn(x86_64) {|fnty| + do decl_x86_64_fn(x86_64) |fnty| { register_fn_fuller(ccx, sp, path, node_id, t, lib::llvm::CCallConv, fnty) } diff --git a/src/rustc/middle/trans/impl.rs b/src/rustc/middle/trans/impl.rs index 53f553e4fde..d9ea8316054 100644 --- a/src/rustc/middle/trans/impl.rs +++ b/src/rustc/middle/trans/impl.rs @@ -16,11 +16,11 @@ import lib::llvm::llvm::LLVMGetParam; import std::map::hashmap; fn trans_impl(ccx: @crate_ctxt, path: path, name: ast::ident, - methods: [@ast::method]/~, tps: [ast::ty_param]/~) { + methods: ~[@ast::method], tps: ~[ast::ty_param]) { let _icx = ccx.insn_ctxt("impl::trans_impl"); if tps.len() > 0u { ret; } let sub_path = vec::append_one(path, path_name(name)); - for vec::each(methods) {|m| + for vec::each(methods) |m| { if m.tps.len() == 0u { let llfn = get_item_val(ccx, m.id); trans_fn(ccx, @@ -36,14 +36,14 @@ fn trans_self_arg(bcx: block, base: @ast::expr, derefs: uint) -> result { let _icx = bcx.insn_ctxt("impl::trans_self_arg"); let basety = expr_ty(bcx, base); let m_by_ref = ast::expl(ast::by_ref); - let mut temp_cleanups = []/~; + let mut temp_cleanups = ~[]; let result = trans_arg_expr(bcx, {mode: m_by_ref, ty: basety}, T_ptr(type_of::type_of(bcx.ccx(), basety)), base, temp_cleanups, none, derefs); // by-ref self argument should not require cleanup in the case of // other arguments failing: - assert temp_cleanups == []/~; + assert temp_cleanups == ~[]; ret result; } @@ -75,9 +75,9 @@ fn trans_method_callee(bcx: block, callee_id: ast::node_id, } } -fn method_from_methods(ms: [@ast::method]/~, name: ast::ident) +fn method_from_methods(ms: ~[@ast::method], name: ast::ident) -> ast::def_id { - local_def(option::get(vec::find(ms, {|m| m.ident == name})).id) + local_def(option::get(vec::find(ms, |m| m.ident == name)).id) } fn method_with_name(ccx: @crate_ctxt, impl_id: ast::def_id, @@ -152,16 +152,16 @@ fn trans_iface_callee(bcx: block, val: ValueRef, -> lval_maybe_callee { let _icx = bcx.insn_ctxt("impl::trans_iface_callee"); let ccx = bcx.ccx(); - let vtable = Load(bcx, PointerCast(bcx, GEPi(bcx, val, [0u, 0u]/~), + let vtable = Load(bcx, PointerCast(bcx, GEPi(bcx, val, ~[0u, 0u]), T_ptr(T_ptr(T_vtable())))); - let llbox = Load(bcx, GEPi(bcx, val, [0u, 1u]/~)); + let llbox = Load(bcx, GEPi(bcx, val, ~[0u, 1u])); // FIXME[impl] I doubt this is alignment-safe (#2534) - let self = GEPi(bcx, llbox, [0u, abi::box_field_body]/~); + let self = GEPi(bcx, llbox, ~[0u, abi::box_field_body]); let env = self_env(self, ty::mk_opaque_box(bcx.tcx()), some(llbox)); let llfty = type_of::type_of_fn_from_ty(ccx, callee_ty); let vtable = PointerCast(bcx, vtable, T_ptr(T_array(T_ptr(llfty), n_method + 1u))); - let mptr = Load(bcx, GEPi(bcx, vtable, [0u, n_method]/~)); + let mptr = Load(bcx, GEPi(bcx, vtable, ~[0u, n_method])); {bcx: bcx, val: mptr, kind: owned, env: env} } @@ -170,9 +170,9 @@ fn find_vtable_in_fn_ctxt(ps: param_substs, n_param: uint, n_bound: uint) let mut vtable_off = n_bound, i = 0u; // Vtables are stored in a flat array, finding the right one is // somewhat awkward - for vec::each(*ps.bounds) {|bounds| + for vec::each(*ps.bounds) |bounds| { if i >= n_param { break; } - for vec::each(*bounds) {|bound| + for vec::each(*bounds) |bound| { alt bound { ty::bound_iface(_) { vtable_off += 1u; } _ {} } } i += 1u; @@ -182,7 +182,7 @@ fn find_vtable_in_fn_ctxt(ps: param_substs, n_param: uint, n_bound: uint) fn resolve_vtables_in_fn_ctxt(fcx: fn_ctxt, vts: typeck::vtable_res) -> typeck::vtable_res { - @vec::map(*vts, {|d| resolve_vtable_in_fn_ctxt(fcx, d)}) + @vec::map(*vts, |d| resolve_vtable_in_fn_ctxt(fcx, d)) } fn resolve_vtable_in_fn_ctxt(fcx: fn_ctxt, vt: typeck::vtable_origin) @@ -191,9 +191,7 @@ fn resolve_vtable_in_fn_ctxt(fcx: fn_ctxt, vt: typeck::vtable_origin) typeck::vtable_static(iid, tys, sub) { let tys = alt fcx.param_substs { some(substs) { - vec::map(tys, {|t| - ty::subst_tps(fcx.ccx.tcx, substs.tys, t) - }) + vec::map(tys, |t| ty::subst_tps(fcx.ccx.tcx, substs.tys, t)) } _ { tys } }; @@ -219,7 +217,7 @@ fn vtable_id(ccx: @crate_ctxt, origin: typeck::vtable_origin) -> mono_id { } typeck::vtable_iface(iface_id, substs) { @{def: iface_id, - params: vec::map(substs, {|t| mono_precise(t, none)})} + params: vec::map(substs, |t| mono_precise(t, none))} } } } @@ -239,10 +237,10 @@ fn get_vtable(ccx: @crate_ctxt, origin: typeck::vtable_origin) } } -fn make_vtable(ccx: @crate_ctxt, ptrs: [ValueRef]/~) -> ValueRef { +fn make_vtable(ccx: @crate_ctxt, ptrs: ~[ValueRef]) -> ValueRef { let _icx = ccx.insn_ctxt("impl::make_vtable"); let tbl = C_struct(ptrs); - let vt_gvar = str::as_c_str(ccx.names("vtable"), {|buf| + let vt_gvar = str::as_c_str(ccx.names("vtable"), |buf| { llvm::LLVMAddGlobal(ccx.llmod, val_ty(tbl), buf) }); llvm::LLVMSetInitializer(vt_gvar, tbl); @@ -251,16 +249,16 @@ fn make_vtable(ccx: @crate_ctxt, ptrs: [ValueRef]/~) -> ValueRef { vt_gvar } -fn make_impl_vtable(ccx: @crate_ctxt, impl_id: ast::def_id, substs: [ty::t]/~, +fn make_impl_vtable(ccx: @crate_ctxt, impl_id: ast::def_id, substs: ~[ty::t], vtables: typeck::vtable_res) -> ValueRef { let _icx = ccx.insn_ctxt("impl::make_impl_vtable"); let tcx = ccx.tcx; let ifce_id = expect(ccx.sess, ty::ty_to_def_id(option::get(ty::impl_iface(tcx, impl_id))), - {|| "make_impl_vtable: non-iface-type implemented"}); + || "make_impl_vtable: non-iface-type implemented"); let has_tps = (*ty::lookup_item_type(ccx.tcx, impl_id).bounds).len() > 0u; - make_vtable(ccx, vec::map(*ty::iface_methods(tcx, ifce_id)) {|im| + make_vtable(ccx, vec::map(*ty::iface_methods(tcx, ifce_id), |im| { let fty = ty::subst_tps(tcx, substs, ty::mk_fn(tcx, im.fty)); if (*im.tps).len() > 0u || ty::type_has_self(fty) { C_null(T_ptr(T_nil())) @@ -279,7 +277,7 @@ fn make_impl_vtable(ccx: @crate_ctxt, impl_id: ast::def_id, substs: [ty::t]/~, trans_external_path(ccx, m_id, fty) } } - }) + })) } fn trans_cast(bcx: block, val: @ast::expr, id: ast::node_id, dest: dest) @@ -293,12 +291,12 @@ fn trans_cast(bcx: block, val: @ast::expr, id: ast::node_id, dest: dest) let bcx = trans_expr_save_in(bcx, val, body); revoke_clean(bcx, llbox); let result = get_dest_addr(dest); - Store(bcx, llbox, PointerCast(bcx, GEPi(bcx, result, [0u, 1u]/~), + Store(bcx, llbox, PointerCast(bcx, GEPi(bcx, result, ~[0u, 1u]), T_ptr(val_ty(llbox)))); let orig = ccx.maps.vtable_map.get(id)[0]; let orig = resolve_vtable_in_fn_ctxt(bcx.fcx, orig); let vtable = get_vtable(bcx.ccx(), orig); - Store(bcx, vtable, PointerCast(bcx, GEPi(bcx, result, [0u, 0u]/~), + Store(bcx, vtable, PointerCast(bcx, GEPi(bcx, result, ~[0u, 0u]), T_ptr(val_ty(vtable)))); bcx } diff --git a/src/rustc/middle/trans/reachable.rs b/src/rustc/middle/trans/reachable.rs index 344e68bbc19..5c60585ca7c 100644 --- a/src/rustc/middle/trans/reachable.rs +++ b/src/rustc/middle/trans/reachable.rs @@ -31,13 +31,13 @@ fn find_reachable(crate_mod: _mod, exp_map: resolve::exp_map, rmap } -fn traverse_exports(cx: ctx, vis: [@view_item]/~) -> bool { +fn traverse_exports(cx: ctx, vis: ~[@view_item]) -> bool { let mut found_export = false; - for vec::each(vis) {|vi| + for vec::each(vis) |vi| { alt vi.node { view_item_export(vps) { found_export = true; - for vec::each(vps) {|vp| + for vec::each(vps) |vp| { alt vp.node { view_path_simple(_, _, id) | view_path_glob(_, id) | view_path_list(_, _, id) { @@ -53,8 +53,8 @@ fn traverse_exports(cx: ctx, vis: [@view_item]/~) -> bool { } fn traverse_export(cx: ctx, exp_id: node_id) { - option::iter(cx.exp_map.find(exp_id)) {|defs| - for vec::each(defs) {|def| traverse_def_id(cx, def.id); } + do option::iter(cx.exp_map.find(exp_id)) |defs| { + for vec::each(defs) |def| { traverse_def_id(cx, def.id); } } } @@ -80,7 +80,7 @@ fn traverse_def_id(cx: ctx, did: def_id) { fn traverse_public_mod(cx: ctx, m: _mod) { if !traverse_exports(cx, m.view_items) { // No exports, so every local item is exported - for vec::each(m.items) {|item| traverse_public_item(cx, item); } + for vec::each(m.items) |item| { traverse_public_item(cx, item); } } } @@ -91,7 +91,7 @@ fn traverse_public_item(cx: ctx, item: @item) { item_mod(m) { traverse_public_mod(cx, m); } item_foreign_mod(nm) { if !traverse_exports(cx, nm.view_items) { - for vec::each(nm.items) {|item| cx.rmap.insert(item.id, ()); } + for vec::each(nm.items) |item| { cx.rmap.insert(item.id, ()); } } } item_fn(_, tps, blk) { @@ -101,7 +101,7 @@ fn traverse_public_item(cx: ctx, item: @item) { } } item_impl(tps, _, _, _, ms) { - for vec::each(ms) {|m| + for vec::each(ms) |m| { if tps.len() > 0u || m.tps.len() > 0u || attr::find_inline_attr(m.attrs) != attr::ia_none { cx.rmap.insert(m.id, ()); @@ -111,14 +111,14 @@ fn traverse_public_item(cx: ctx, item: @item) { } item_class(tps, _ifaces, items, ctor, m_dtor, _) { cx.rmap.insert(ctor.node.id, ()); - option::iter(m_dtor) {|dtor| + do option::iter(m_dtor) |dtor| { cx.rmap.insert(dtor.node.id, ()); // dtors don't have attrs if tps.len() > 0u { traverse_inline_body(cx, dtor.node.body); } } - for vec::each(items) {|item| + for vec::each(items) |item| { alt item.node { class_method(m) { cx.rmap.insert(m.id, ()); @@ -156,7 +156,7 @@ fn traverse_ty(ty: @ty, cx: ctx, v: visit::vt<ctx>) { some(d) { traverse_def_id(cx, def_id_of_def(d)); } none { /* do nothing -- but should we fail here? */ } } - for p.types.each {|t| v.visit_ty(t, cx, v); }; + for p.types.each |t| { v.visit_ty(t, cx, v); }; } _ { visit::visit_ty(ty, cx, v); } } @@ -201,8 +201,8 @@ fn traverse_inline_body(cx: ctx, body: blk) { fn traverse_all_resources(cx: ctx, crate_mod: _mod) { visit::visit_mod(crate_mod, ast_util::dummy_sp(), 0, cx, visit::mk_vt(@{ - visit_expr: {|_e, _cx, _v|}, - visit_item: {|i, cx, v| + visit_expr: |_e, _cx, _v| { }, + visit_item: |i, cx, v| { visit::visit_item(i, cx, v); alt i.node { item_class(_, _, _, _, some(_), _) { diff --git a/src/rustc/middle/trans/reflect.rs b/src/rustc/middle/trans/reflect.rs index b00eb7b5dc5..730686b9df7 100644 --- a/src/rustc/middle/trans/reflect.rs +++ b/src/rustc/middle/trans/reflect.rs @@ -12,7 +12,7 @@ import util::ppaux::ty_to_str; enum reflector = { visitor_val: ValueRef, - visitor_methods: @[ty::method]/~, + visitor_methods: @~[ty::method], mut bcx: block }; @@ -31,21 +31,21 @@ impl methods for reflector { do_spill_noroot(self.bcx, ss) } - fn c_size_and_align(t: ty::t) -> [ValueRef]/~ { + fn c_size_and_align(t: ty::t) -> ~[ValueRef] { let tr = type_of::type_of(self.bcx.ccx(), t); let s = shape::llsize_of_real(self.bcx.ccx(), tr); let a = shape::llalign_of_min(self.bcx.ccx(), tr); - ret [self.c_uint(s), - self.c_uint(a)]/~; + ret ~[self.c_uint(s), + self.c_uint(a)]; } - fn visit(ty_name: str, args: [ValueRef]/~) { + fn visit(ty_name: str, args: ~[ValueRef]) { let tcx = self.bcx.tcx(); let mth_idx = option::get(ty::method_idx(@("visit_" + ty_name), *self.visitor_methods)); let mth_ty = ty::mk_fn(tcx, self.visitor_methods[mth_idx].fty); let v = self.visitor_val; - let get_lval = {|bcx| + let get_lval = |bcx| { let callee = impl::trans_iface_callee(bcx, v, mth_ty, mth_idx); #debug("calling mth ty %s, lltype %s", @@ -55,7 +55,7 @@ impl methods for reflector { }; #debug("passing %u args:", vec::len(args)); let bcx = self.bcx; - for args.eachi {|i, a| + for args.eachi |i, a| { #debug("arg %u: %s", i, val_str(bcx.ccx().tn, a)); } self.bcx = @@ -69,35 +69,35 @@ impl methods for reflector { abi::tydesc_field_visit_glue); } - fn bracketed_t(bracket_name: str, t: ty::t, extra: [ValueRef]/~) { + fn bracketed_t(bracket_name: str, t: ty::t, extra: ~[ValueRef]) { self.visit("enter_" + bracket_name, extra); self.visit_tydesc(t); self.visit("leave_" + bracket_name, extra); } - fn bracketed_mt(bracket_name: str, mt: ty::mt, extra: [ValueRef]/~) { + fn bracketed_mt(bracket_name: str, mt: ty::mt, extra: ~[ValueRef]) { self.bracketed_t(bracket_name, mt.ty, - vec::append([self.c_uint(mt.mutbl as uint)]/~, + vec::append(~[self.c_uint(mt.mutbl as uint)], extra)); } fn vstore_name_and_extra(t: ty::t, vstore: ty::vstore, - f: fn(str,[ValueRef]/~)) { + f: fn(str,~[ValueRef])) { alt vstore { ty::vstore_fixed(n) { - let extra = vec::append([self.c_uint(n)]/~, + let extra = vec::append(~[self.c_uint(n)], self.c_size_and_align(t)); f("fixed", extra) } - ty::vstore_slice(_) { f("slice", []/~) } - ty::vstore_uniq { f("uniq", []/~);} - ty::vstore_box { f("box", []/~); } + ty::vstore_slice(_) { f("slice", ~[]) } + ty::vstore_uniq { f("uniq", ~[]);} + ty::vstore_box { f("box", ~[]); } } } fn leaf(name: str) { - self.visit(name, []/~); + self.visit(name, ~[]); } // Entrypoint @@ -127,40 +127,40 @@ impl methods for reflector { ty::ty_float(ast::ty_f64) { self.leaf("f64") } ty::ty_str { self.leaf("str") } - ty::ty_vec(mt) { self.bracketed_mt("vec", mt, []/~) } + ty::ty_vec(mt) { self.bracketed_mt("vec", mt, ~[]) } ty::ty_estr(vst) { - self.vstore_name_and_extra(t, vst) {|name, extra| + do self.vstore_name_and_extra(t, vst) |name, extra| { self.visit("estr_" + name, extra) } } ty::ty_evec(mt, vst) { - self.vstore_name_and_extra(t, vst) {|name, extra| + do self.vstore_name_and_extra(t, vst) |name, extra| { self.bracketed_mt("evec_" + name, mt, extra) } } - ty::ty_box(mt) { self.bracketed_mt("box", mt, []/~) } - ty::ty_uniq(mt) { self.bracketed_mt("uniq", mt, []/~) } - ty::ty_ptr(mt) { self.bracketed_mt("ptr", mt, []/~) } - ty::ty_rptr(_, mt) { self.bracketed_mt("rptr", mt, []/~) } + ty::ty_box(mt) { self.bracketed_mt("box", mt, ~[]) } + ty::ty_uniq(mt) { self.bracketed_mt("uniq", mt, ~[]) } + ty::ty_ptr(mt) { self.bracketed_mt("ptr", mt, ~[]) } + ty::ty_rptr(_, mt) { self.bracketed_mt("rptr", mt, ~[]) } ty::ty_rec(fields) { - let extra = (vec::append([self.c_uint(vec::len(fields))]/~, + let extra = (vec::append(~[self.c_uint(vec::len(fields))], self.c_size_and_align(t))); self.visit("enter_rec", extra); - for fields.eachi {|i, field| + for fields.eachi |i, field| { self.bracketed_mt("rec_field", field.mt, - [self.c_uint(i), - self.c_slice(*field.ident)]/~); + ~[self.c_uint(i), + self.c_slice(*field.ident)]); } self.visit("leave_rec", extra); } ty::ty_tup(tys) { - let extra = (vec::append([self.c_uint(vec::len(tys))]/~, + let extra = (vec::append(~[self.c_uint(vec::len(tys))], self.c_size_and_align(t))); self.visit("enter_tup", extra); - for tys.eachi {|i, t| - self.bracketed_t("tup_field", t, [self.c_uint(i)]/~); + for tys.eachi |i, t| { + self.bracketed_t("tup_field", t, ~[self.c_uint(i)]); } self.visit("leave_tup", extra); } @@ -185,12 +185,12 @@ impl methods for reflector { ast::noreturn { 0u } ast::return_val { 1u } }; - let extra = [self.c_uint(pureval), + let extra = ~[self.c_uint(pureval), self.c_uint(protoval), self.c_uint(vec::len(fty.inputs)), - self.c_uint(retval)]/~; + self.c_uint(retval)]; self.visit("enter_fn", extra); - for fty.inputs.eachi {|i, arg| + for fty.inputs.eachi |i, arg| { let modeval = alt arg.mode { ast::infer(_) { 0u } ast::expl(e) { @@ -204,11 +204,11 @@ impl methods for reflector { } }; self.bracketed_t("fn_input", arg.ty, - [self.c_uint(i), - self.c_uint(modeval)]/~); + ~[self.c_uint(i), + self.c_uint(modeval)]); } self.bracketed_t("fn_output", fty.output, - [self.c_uint(retval)]/~); + ~[self.c_uint(retval)]); self.visit("leave_fn", extra); } @@ -216,14 +216,14 @@ impl methods for reflector { let bcx = self.bcx; let tcx = bcx.ccx().tcx; let fields = ty::class_items_as_fields(tcx, did, substs); - let extra = vec::append([self.c_uint(vec::len(fields))]/~, + let extra = vec::append(~[self.c_uint(vec::len(fields))], self.c_size_and_align(t)); self.visit("enter_class", extra); - for fields.eachi {|i, field| + for fields.eachi |i, field| { self.bracketed_mt("class_field", field.mt, - [self.c_uint(i), - self.c_slice(*field.ident)]/~); + ~[self.c_uint(i), + self.c_slice(*field.ident)]); } self.visit("leave_class", extra); } @@ -236,19 +236,19 @@ impl methods for reflector { let bcx = self.bcx; let tcx = bcx.ccx().tcx; let variants = ty::substd_enum_variants(tcx, did, substs); - let extra = vec::append([self.c_uint(vec::len(variants))]/~, + let extra = vec::append(~[self.c_uint(vec::len(variants))], self.c_size_and_align(t)); self.visit("enter_enum", extra); - for variants.eachi {|i, v| - let extra = [self.c_uint(i), + for variants.eachi |i, v| { + let extra = ~[self.c_uint(i), self.c_int(v.disr_val), self.c_uint(vec::len(v.args)), - self.c_slice(*v.name)]/~; + self.c_slice(*v.name)]; self.visit("enter_enum_variant", extra); - for v.args.eachi {|j, a| + for v.args.eachi |j, a| { self.bracketed_t("enum_variant_field", a, - [self.c_uint(j)]/~); + ~[self.c_uint(j)]); } self.visit("leave_enum_variant", extra); } @@ -259,20 +259,20 @@ impl methods for reflector { ty::ty_iface(_, _) { self.leaf("iface") } ty::ty_var(_) { self.leaf("var") } ty::ty_var_integral(_) { self.leaf("var_integral") } - ty::ty_param(n, _) { self.visit("param", [self.c_uint(n)]/~) } + ty::ty_param(n, _) { self.visit("param", ~[self.c_uint(n)]) } ty::ty_self { self.leaf("self") } ty::ty_type { self.leaf("type") } ty::ty_opaque_box { self.leaf("opaque_box") } - ty::ty_constr(t, _) { self.bracketed_t("constr", t, []/~) } + ty::ty_constr(t, _) { self.bracketed_t("constr", t, ~[]) } ty::ty_opaque_closure_ptr(ck) { let ckval = alt ck { ty::ck_block { 0u } ty::ck_box { 1u } ty::ck_uniq { 2u } }; - self.visit("closure_ptr", [self.c_uint(ckval)]/~) + self.visit("closure_ptr", ~[self.c_uint(ckval)]) } - ty::ty_unboxed_vec(mt) { self.bracketed_mt("vec", mt, []/~) } + ty::ty_unboxed_vec(mt) { self.bracketed_mt("vec", mt, ~[]) } } } } diff --git a/src/rustc/middle/trans/shape.rs b/src/rustc/middle/trans/shape.rs index 33e6051d70b..2e2403090f5 100644 --- a/src/rustc/middle/trans/shape.rs +++ b/src/rustc/middle/trans/shape.rs @@ -23,12 +23,12 @@ import option::is_some; import ty_ctxt = middle::ty::ctxt; type nominal_id = @{did: ast::def_id, parent_id: option<ast::def_id>, - tps: [ty::t]/~}; + tps: ~[ty::t]}; fn mk_nominal_id(tcx: ty::ctxt, did: ast::def_id, parent_id: option<ast::def_id>, - tps: [ty::t]/~) -> nominal_id { - let tps_norm = tps.map { |t| ty::normalize_ty(tcx, t) }; + tps: ~[ty::t]) -> nominal_id { + let tps_norm = tps.map(|t| ty::normalize_ty(tcx, t)); @{did: did, parent_id: parent_id, tps: tps_norm} } @@ -38,7 +38,7 @@ fn hash_nominal_id(&&ri: nominal_id) -> uint { h += ri.did.crate as uint; h *= 33u; h += ri.did.node as uint; - for vec::each(ri.tps) {|t| + for vec::each(ri.tps) |t| { h *= 33u; h += ty::type_id(t); } @@ -49,7 +49,7 @@ fn eq_nominal_id(&&mi: nominal_id, &&ni: nominal_id) -> bool { if mi.did != ni.did { false } else { - vec::all2(mi.tps, ni.tps) { |m_tp, n_tp| + do vec::all2(mi.tps, ni.tps) |m_tp, n_tp| { ty::type_id(m_tp) == ty::type_id(n_tp) } } @@ -101,7 +101,7 @@ fn mk_global(ccx: @crate_ctxt, name: str, llval: ValueRef, internal: bool) -> ValueRef { let llglobal = str::as_c_str(name, - {|buf| + |buf| { lib::llvm::llvm::LLVMAddGlobal(ccx.llmod, val_ty(llval), buf) }); @@ -137,7 +137,7 @@ enum enum_kind { fn enum_kind(ccx: @crate_ctxt, did: ast::def_id) -> enum_kind { let variants = ty::enum_variants(ccx.tcx, did); - if vec::any(*variants) {|v| vec::len(v.args) > 0u} { + if vec::any(*variants, |v| vec::len(v.args) > 0u) { if vec::len(*variants) == 1u { tk_newtype } else { tk_complex } } else { @@ -185,7 +185,7 @@ fn s_send_tydesc(_tcx: ty_ctxt) -> u8 { fn mk_ctxt(llmod: ModuleRef) -> ctxt { let llshapetablesty = trans::common::T_named_struct("shapes"); - let llshapetables = str::as_c_str("shapes", {|buf| + let llshapetables = str::as_c_str("shapes", |buf| { lib::llvm::llvm::LLVMAddGlobal(llmod, llshapetablesty, buf) }); @@ -198,46 +198,46 @@ fn mk_ctxt(llmod: ModuleRef) -> ctxt { llshapetables: llshapetables}; } -fn add_bool(&dest: [u8]/~, val: bool) { - dest += [if val { 1u8 } else { 0u8 }]/~; +fn add_bool(&dest: ~[u8], val: bool) { + dest += ~[if val { 1u8 } else { 0u8 }]; } -fn add_u16(&dest: [u8]/~, val: u16) { - dest += [(val & 0xffu16) as u8, (val >> 8u16) as u8]/~; +fn add_u16(&dest: ~[u8], val: u16) { + dest += ~[(val & 0xffu16) as u8, (val >> 8u16) as u8]; } -fn add_substr(&dest: [u8]/~, src: [u8]/~) { +fn add_substr(&dest: ~[u8], src: ~[u8]) { add_u16(dest, vec::len(src) as u16); dest += src; } -fn shape_of(ccx: @crate_ctxt, t: ty::t) -> [u8]/~ { +fn shape_of(ccx: @crate_ctxt, t: ty::t) -> ~[u8] { alt ty::get(t).struct { ty::ty_nil | ty::ty_bool | ty::ty_uint(ast::ty_u8) | - ty::ty_bot { [shape_u8]/~ } - ty::ty_int(ast::ty_i) { [s_int(ccx.tcx)]/~ } - ty::ty_float(ast::ty_f) { [s_float(ccx.tcx)]/~ } - ty::ty_uint(ast::ty_u) | ty::ty_ptr(_) { [s_uint(ccx.tcx)]/~ } - ty::ty_type { [s_tydesc(ccx.tcx)]/~ } - ty::ty_int(ast::ty_i8) { [shape_i8]/~ } - ty::ty_uint(ast::ty_u16) { [shape_u16]/~ } - ty::ty_int(ast::ty_i16) { [shape_i16]/~ } - ty::ty_uint(ast::ty_u32) { [shape_u32]/~ } - ty::ty_int(ast::ty_i32) | ty::ty_int(ast::ty_char) { [shape_i32]/~ } - ty::ty_uint(ast::ty_u64) { [shape_u64]/~ } - ty::ty_int(ast::ty_i64) { [shape_i64]/~ } - ty::ty_float(ast::ty_f32) { [shape_f32]/~ } - ty::ty_float(ast::ty_f64) { [shape_f64]/~ } + ty::ty_bot { ~[shape_u8] } + ty::ty_int(ast::ty_i) { ~[s_int(ccx.tcx)] } + ty::ty_float(ast::ty_f) { ~[s_float(ccx.tcx)] } + ty::ty_uint(ast::ty_u) | ty::ty_ptr(_) { ~[s_uint(ccx.tcx)] } + ty::ty_type { ~[s_tydesc(ccx.tcx)] } + ty::ty_int(ast::ty_i8) { ~[shape_i8] } + ty::ty_uint(ast::ty_u16) { ~[shape_u16] } + ty::ty_int(ast::ty_i16) { ~[shape_i16] } + ty::ty_uint(ast::ty_u32) { ~[shape_u32] } + ty::ty_int(ast::ty_i32) | ty::ty_int(ast::ty_char) { ~[shape_i32] } + ty::ty_uint(ast::ty_u64) { ~[shape_u64] } + ty::ty_int(ast::ty_i64) { ~[shape_i64] } + ty::ty_float(ast::ty_f32) { ~[shape_f32] } + ty::ty_float(ast::ty_f64) { ~[shape_f64] } ty::ty_estr(ty::vstore_uniq) | ty::ty_str { shape_of(ccx, tvec::expand_boxed_vec_ty(ccx.tcx, t)) } ty::ty_enum(did, substs) { alt enum_kind(ccx, did) { - tk_unit { [s_variant_enum_t(ccx.tcx)]/~ } - tk_enum { [s_variant_enum_t(ccx.tcx)]/~ } + tk_unit { ~[s_variant_enum_t(ccx.tcx)] } + tk_enum { ~[s_variant_enum_t(ccx.tcx)] } tk_newtype | tk_complex { - let mut s = [shape_enum]/~, id; + let mut s = ~[shape_enum], id; let nom_id = mk_nominal_id(ccx.tcx, did, none, substs.tps); alt ccx.shape_cx.tag_id_to_index.find(nom_id) { none { @@ -260,14 +260,14 @@ fn shape_of(ccx: @crate_ctxt, t: ty::t) -> [u8]/~ { } ty::ty_estr(ty::vstore_box) | ty::ty_evec(_, ty::vstore_box) | - ty::ty_box(_) | ty::ty_opaque_box { [shape_box]/~ } + ty::ty_box(_) | ty::ty_opaque_box { ~[shape_box] } ty::ty_uniq(mt) { - let mut s = [shape_uniq]/~; + let mut s = ~[shape_uniq]; add_substr(s, shape_of(ccx, mt.ty)); s } ty::ty_unboxed_vec(mt) { - let mut s = [shape_unboxed_vec]/~; + let mut s = ~[shape_unboxed_vec]; add_bool(s, ty::type_is_pod(ccx.tcx, mt.ty)); add_substr(s, shape_of(ccx, mt.ty)); s @@ -278,7 +278,7 @@ fn shape_of(ccx: @crate_ctxt, t: ty::t) -> [u8]/~ { } ty::ty_estr(ty::vstore_fixed(n)) { - let mut s = [shape_fixedvec]/~; + let mut s = ~[shape_fixedvec]; let u8_t = ty::mk_mach_uint(ccx.tcx, ast::ty_u8); assert (n + 1u) <= 0xffffu; add_u16(s, (n + 1u) as u16); @@ -288,7 +288,7 @@ fn shape_of(ccx: @crate_ctxt, t: ty::t) -> [u8]/~ { } ty::ty_evec(mt, ty::vstore_fixed(n)) { - let mut s = [shape_fixedvec]/~; + let mut s = ~[shape_fixedvec]; assert n <= 0xffffu; add_u16(s, n as u16); add_bool(s, ty::type_is_pod(ccx.tcx, mt.ty)); @@ -297,7 +297,7 @@ fn shape_of(ccx: @crate_ctxt, t: ty::t) -> [u8]/~ { } ty::ty_estr(ty::vstore_slice(r)) { - let mut s = [shape_slice]/~; + let mut s = ~[shape_slice]; let u8_t = ty::mk_mach_uint(ccx.tcx, ast::ty_u8); add_bool(s, true); // is_pod add_bool(s, true); // is_str @@ -306,7 +306,7 @@ fn shape_of(ccx: @crate_ctxt, t: ty::t) -> [u8]/~ { } ty::ty_evec(mt, ty::vstore_slice(r)) { - let mut s = [shape_slice]/~; + let mut s = ~[shape_slice]; add_bool(s, ty::type_is_pod(ccx.tcx, mt.ty)); add_bool(s, false); // is_str add_substr(s, shape_of(ccx, mt.ty)); @@ -314,31 +314,31 @@ fn shape_of(ccx: @crate_ctxt, t: ty::t) -> [u8]/~ { } ty::ty_rec(fields) { - let mut s = [shape_struct]/~, sub = []/~; - for vec::each(fields) {|f| + let mut s = ~[shape_struct], sub = ~[]; + for vec::each(fields) |f| { sub += shape_of(ccx, f.mt.ty); } add_substr(s, sub); s } ty::ty_tup(elts) { - let mut s = [shape_struct]/~, sub = []/~; - for vec::each(elts) {|elt| + let mut s = ~[shape_struct], sub = ~[]; + for vec::each(elts) |elt| { sub += shape_of(ccx, elt); } add_substr(s, sub); s } - ty::ty_iface(_, _) { [shape_box_fn]/~ } + ty::ty_iface(_, _) { ~[shape_box_fn] } ty::ty_class(did, substs) { // same as records, unless there's a dtor let tps = substs.tps; let m_dtor_did = ty::ty_dtor(ccx.tcx, did); let mut s = if option::is_some(m_dtor_did) { - [shape_res]/~ + ~[shape_res] } - else { [shape_struct]/~ }, sub = []/~; - option::iter(m_dtor_did) {|dtor_did| + else { ~[shape_struct] }, sub = ~[]; + do option::iter(m_dtor_did) |dtor_did| { let ri = @{did: dtor_did, parent_id: some(did), tps: tps}; let id = interner::intern(ccx.shape_cx.resources, ri); add_u16(s, id as u16); @@ -347,26 +347,26 @@ fn shape_of(ccx: @crate_ctxt, t: ty::t) -> [u8]/~ { // hasn't changed since we started monomorphizing. add_u16(s, 0_u16); }; - for ty::class_items_as_mutable_fields(ccx.tcx, did, substs).each {|f| + for ty::class_items_as_mutable_fields(ccx.tcx, did, substs).each |f| { sub += shape_of(ccx, f.mt.ty); } add_substr(s, sub); s } ty::ty_rptr(_, mt) { - let mut s = [shape_rptr]/~; + let mut s = ~[shape_rptr]; add_substr(s, shape_of(ccx, mt.ty)); s } ty::ty_param(*) { ccx.tcx.sess.bug("non-monomorphized type parameter"); } - ty::ty_fn({proto: ast::proto_box, _}) { [shape_box_fn]/~ } - ty::ty_fn({proto: ast::proto_uniq, _}) { [shape_uniq_fn]/~ } + ty::ty_fn({proto: ast::proto_box, _}) { ~[shape_box_fn] } + ty::ty_fn({proto: ast::proto_uniq, _}) { ~[shape_uniq_fn] } ty::ty_fn({proto: ast::proto_block, _}) | - ty::ty_fn({proto: ast::proto_any, _}) { [shape_stack_fn]/~ } - ty::ty_fn({proto: ast::proto_bare, _}) { [shape_bare_fn]/~ } - ty::ty_opaque_closure_ptr(_) { [shape_opaque_closure_ptr]/~ } + ty::ty_fn({proto: ast::proto_any, _}) { ~[shape_stack_fn] } + ty::ty_fn({proto: ast::proto_bare, _}) { ~[shape_bare_fn] } + ty::ty_opaque_closure_ptr(_) { ~[shape_opaque_closure_ptr] } ty::ty_constr(inner_t, _) { shape_of(ccx, inner_t) } ty::ty_var(_) | ty::ty_var_integral(_) | ty::ty_self { ccx.sess.bug("shape_of: unexpected type struct found"); @@ -374,9 +374,9 @@ fn shape_of(ccx: @crate_ctxt, t: ty::t) -> [u8]/~ { } } -fn shape_of_variant(ccx: @crate_ctxt, v: ty::variant_info) -> [u8]/~ { - let mut s = []/~; - for vec::each(v.args) {|t| s += shape_of(ccx, t); } +fn shape_of_variant(ccx: @crate_ctxt, v: ty::variant_info) -> ~[u8] { + let mut s = ~[]; + for vec::each(v.args) |t| { s += shape_of(ccx, t); } ret s; } @@ -384,23 +384,23 @@ fn gen_enum_shapes(ccx: @crate_ctxt) -> ValueRef { // Loop over all the enum variants and write their shapes into a // data buffer. As we do this, it's possible for us to discover // new enums, so we must do this first. - let mut data = []/~; - let mut offsets = []/~; + let mut data = ~[]; + let mut offsets = ~[]; let mut i = 0u; - let mut enum_variants = []/~; + let mut enum_variants = ~[]; while i < ccx.shape_cx.tag_order.len() { let {did, substs} = ccx.shape_cx.tag_order[i]; let variants = @ty::substd_enum_variants(ccx.tcx, did, substs); - vec::iter(*variants) {|v| - offsets += [vec::len(data) as u16]/~; + do vec::iter(*variants) |v| { + offsets += ~[vec::len(data) as u16]; let variant_shape = shape_of_variant(ccx, v); add_substr(data, variant_shape); - let zname = str::bytes(*v.name) + [0u8]/~; + let zname = str::bytes(*v.name) + ~[0u8]; add_substr(data, zname); } - enum_variants += [variants]/~; + enum_variants += ~[variants]; i += 1u; } @@ -408,13 +408,13 @@ fn gen_enum_shapes(ccx: @crate_ctxt) -> ValueRef { // info records for each enum) and the info space (which contains offsets // to each variant shape). As we do so, build up the header. - let mut header = []/~; - let mut inf = []/~; + let mut header = ~[]; + let mut inf = ~[]; let header_sz = 2u16 * ccx.shape_cx.next_tag_id; let data_sz = vec::len(data) as u16; let mut inf_sz = 0u16; - for enum_variants.each { |variants| + for enum_variants.each |variants| { let num_variants = vec::len(*variants) as u16; add_u16(header, header_sz + inf_sz); inf_sz += 2u16 * (num_variants + 2u16) + 3u16; @@ -424,9 +424,9 @@ fn gen_enum_shapes(ccx: @crate_ctxt) -> ValueRef { // variant. Also construct the largest-variant table for each enum, which // contains the variants that the size-of operation needs to look at. - let mut lv_table = []/~; + let mut lv_table = ~[]; let mut i = 0u; - for enum_variants.each { |variants| + for enum_variants.each |variants| { add_u16(inf, vec::len(*variants) as u16); // Construct the largest-variants table. @@ -435,11 +435,11 @@ fn gen_enum_shapes(ccx: @crate_ctxt) -> ValueRef { let lv = largest_variants(ccx, variants); add_u16(lv_table, vec::len(lv) as u16); - for vec::each(lv) {|v| add_u16(lv_table, v as u16); } + for vec::each(lv) |v| { add_u16(lv_table, v as u16); } // Determine whether the enum has dynamic size. - assert !vec::any(*variants, {|v| - vec::any(v.args, {|t| ty::type_has_params(t)}) + assert !vec::any(*variants, |v| { + vec::any(v.args, |t| ty::type_has_params(t)) }); // If we can, write in the static size and alignment of the enum. @@ -448,10 +448,10 @@ fn gen_enum_shapes(ccx: @crate_ctxt) -> ValueRef { // Write in the static size and alignment of the enum. add_u16(inf, size_align.size); - inf += [size_align.align]/~; + inf += ~[size_align.align]; // Now write in the offset of each variant. - for vec::each(*variants) {|_v| + for vec::each(*variants) |_v| { add_u16(inf, header_sz + inf_sz + offsets[i]); i += 1u; } @@ -470,18 +470,18 @@ fn gen_enum_shapes(ccx: @crate_ctxt) -> ValueRef { /* tjc: Not annotating FIXMEs in this module because of #1498 */ fn largest_variants(ccx: @crate_ctxt, - variants: @[ty::variant_info]/~) -> [uint]/~ { + variants: @~[ty::variant_info]) -> ~[uint] { // Compute the minimum and maximum size and alignment for each // variant. // // NB: We could do better here; e.g. we know that any // variant that contains (T,T) must be as least as large as // any variant that contains just T. - let mut ranges = []/~; - for vec::each(*variants) {|variant| + let mut ranges = ~[]; + for vec::each(*variants) |variant| { let mut bounded = true; let mut min_size = 0u, min_align = 0u; - for vec::each(variant.args) {|elem_t| + for vec::each(variant.args) |elem_t| { if ty::type_has_params(elem_t) { // NB: We could do better here; this causes us to // conservatively assume that (int, T) has minimum size 0, @@ -495,13 +495,13 @@ fn gen_enum_shapes(ccx: @crate_ctxt) -> ValueRef { } ranges += - [{size: {min: min_size, bounded: bounded}, - align: {min: min_align, bounded: bounded}}]/~; + ~[{size: {min: min_size, bounded: bounded}, + align: {min: min_align, bounded: bounded}}]; } // Initialize the candidate set to contain all variants. - let mut candidates = [mut]/~; - for vec::each(*variants) {|_v| candidates += [mut true]/~; } + let mut candidates = ~[mut]; + for vec::each(*variants) |_v| { candidates += ~[mut true]; } // Do a pairwise comparison among all variants still in the // candidate set. Throw out any variant that we know has size @@ -534,7 +534,7 @@ fn gen_enum_shapes(ccx: @crate_ctxt) -> ValueRef { } // Return the resulting set. - let mut result = []/~; + let mut result = ~[]; let mut i = 0u; while i < vec::len(candidates) { if candidates[i] { vec::push(result, i); } @@ -543,16 +543,16 @@ fn gen_enum_shapes(ccx: @crate_ctxt) -> ValueRef { ret result; } - fn compute_static_enum_size(ccx: @crate_ctxt, largest_variants: [uint]/~, - variants: @[ty::variant_info]/~) + fn compute_static_enum_size(ccx: @crate_ctxt, largest_variants: ~[uint], + variants: @~[ty::variant_info]) -> size_align { let mut max_size = 0u16; let mut max_align = 1u8; - for vec::each(largest_variants) {|vid| + for vec::each(largest_variants) |vid| { // We increment a "virtual data pointer" to compute the size. - let mut lltys = []/~; - for vec::each(variants[vid].args) {|typ| - lltys += [type_of::type_of(ccx, typ)]/~; + let mut lltys = ~[]; + for vec::each(variants[vid].args) |typ| { + lltys += ~[type_of::type_of(ccx, typ)]; } let llty = trans::common::T_struct(lltys); @@ -578,13 +578,13 @@ fn gen_enum_shapes(ccx: @crate_ctxt) -> ValueRef { } fn gen_resource_shapes(ccx: @crate_ctxt) -> ValueRef { - let mut dtors = []/~; + let mut dtors = ~[]; let len = interner::len(ccx.shape_cx.resources); - for uint::range(0u, len) {|i| + for uint::range(0u, len) |i| { let ri = interner::get(ccx.shape_cx.resources, i); - for ri.tps.each() {|s| assert !ty::type_has_params(s); } - option::iter(ri.parent_id) {|id| - dtors += [trans::base::get_res_dtor(ccx, ri.did, id, ri.tps)]/~; + for ri.tps.each() |s| { assert !ty::type_has_params(s); } + do option::iter(ri.parent_id) |id| { + dtors += ~[trans::base::get_res_dtor(ccx, ri.did, id, ri.tps)]; } } ret mk_global(ccx, "resource_shapes", C_struct(dtors), true); @@ -594,12 +594,12 @@ fn gen_shape_tables(ccx: @crate_ctxt) { let lltagstable = gen_enum_shapes(ccx); let llresourcestable = gen_resource_shapes(ccx); trans::common::set_struct_body(ccx.shape_cx.llshapetablesty, - [val_ty(lltagstable), - val_ty(llresourcestable)]/~); + ~[val_ty(lltagstable), + val_ty(llresourcestable)]); let lltables = C_named_struct(ccx.shape_cx.llshapetablesty, - [lltagstable, llresourcestable]/~); + ~[lltagstable, llresourcestable]); lib::llvm::llvm::LLVMSetInitializer(ccx.shape_cx.llshapetables, lltables); lib::llvm::llvm::LLVMSetGlobalConstant(ccx.shape_cx.llshapetables, True); lib::llvm::SetLinkage(ccx.shape_cx.llshapetables, @@ -694,7 +694,7 @@ fn static_size_of_enum(cx: @crate_ctxt, t: ty::t) -> uint { // Compute max(variant sizes). let mut max_size = 0u; let variants = ty::enum_variants(cx.tcx, tid); - for vec::each(*variants) {|variant| + for vec::each(*variants) |variant| { let tup_ty = simplify_type(cx.tcx, ty::mk_tup(cx.tcx, variant.args)); // Perform any type parameter substitutions. @@ -728,21 +728,21 @@ fn simplify_type(tcx: ty::ctxt, typ: ty::t) -> ty::t { ty::ty_evec(_, ty::vstore_uniq) | ty::ty_evec(_, ty::vstore_box) | ty::ty_estr(ty::vstore_uniq) | ty::ty_estr(ty::vstore_box) | ty::ty_ptr(_) | ty::ty_rptr(_,_) { nilptr(tcx) } - ty::ty_fn(_) { ty::mk_tup(tcx, [nilptr(tcx), nilptr(tcx)]/~) } + ty::ty_fn(_) { ty::mk_tup(tcx, ~[nilptr(tcx), nilptr(tcx)]) } ty::ty_evec(_, ty::vstore_slice(_)) | ty::ty_estr(ty::vstore_slice(_)) { - ty::mk_tup(tcx, [nilptr(tcx), ty::mk_int(tcx)]/~) + ty::mk_tup(tcx, ~[nilptr(tcx), ty::mk_int(tcx)]) } // Reduce a class type to a record type in which all the fields are // simplified ty::ty_class(did, substs) { let simpl_fields = (if is_some(ty::ty_dtor(tcx, did)) { // remember the drop flag - [{ident: @"drop", mt: {ty: + ~[{ident: @"drop", mt: {ty: ty::mk_u8(tcx), - mutbl: ast::m_mutbl}}]/~ } - else { []/~ }) + - ty::lookup_class_fields(tcx, did).map {|f| + mutbl: ast::m_mutbl}}] } + else { ~[] }) + + do ty::lookup_class_fields(tcx, did).map |f| { let t = ty::lookup_field_type(tcx, did, f.id, substs); {ident: f.ident, mt: {ty: simplify_type(tcx, t), mutbl: ast::m_const}} @@ -752,5 +752,5 @@ fn simplify_type(tcx: ty::ctxt, typ: ty::t) -> ty::t { _ { typ } } } - ty::fold_ty(tcx, typ) {|t| simplifier(tcx, t) } + ty::fold_ty(tcx, typ, |t| simplifier(tcx, t)) } diff --git a/src/rustc/middle/trans/tvec.rs b/src/rustc/middle/trans/tvec.rs index 58d415ad266..a15bb293813 100644 --- a/src/rustc/middle/trans/tvec.rs +++ b/src/rustc/middle/trans/tvec.rs @@ -35,30 +35,30 @@ fn expand_boxed_vec_ty(tcx: ty::ctxt, t: ty::t) -> ty::t { fn get_fill(bcx: block, vptr: ValueRef) -> ValueRef { let _icx = bcx.insn_ctxt("tvec::get_fill"); - Load(bcx, GEPi(bcx, vptr, [0u, abi::vec_elt_fill]/~)) + Load(bcx, GEPi(bcx, vptr, ~[0u, abi::vec_elt_fill])) } fn set_fill(bcx: block, vptr: ValueRef, fill: ValueRef) { - Store(bcx, fill, GEPi(bcx, vptr, [0u, abi::vec_elt_fill]/~)); + Store(bcx, fill, GEPi(bcx, vptr, ~[0u, abi::vec_elt_fill])); } fn get_alloc(bcx: block, vptr: ValueRef) -> ValueRef { - Load(bcx, GEPi(bcx, vptr, [0u, abi::vec_elt_alloc]/~)) + Load(bcx, GEPi(bcx, vptr, ~[0u, abi::vec_elt_alloc])) } fn get_bodyptr(bcx: block, vptr: ValueRef) -> ValueRef { - non_gc_box_cast(bcx, GEPi(bcx, vptr, [0u, abi::box_field_body]/~)) + non_gc_box_cast(bcx, GEPi(bcx, vptr, ~[0u, abi::box_field_body])) } fn get_dataptr(bcx: block, vptr: ValueRef) -> ValueRef { let _icx = bcx.insn_ctxt("tvec::get_dataptr"); - GEPi(bcx, vptr, [0u, abi::vec_elt_elems, 0u]/~) + GEPi(bcx, vptr, ~[0u, abi::vec_elt_elems, 0u]) } fn pointer_add(bcx: block, ptr: ValueRef, bytes: ValueRef) -> ValueRef { let _icx = bcx.insn_ctxt("tvec::pointer_add"); let old_ty = val_ty(ptr); let bptr = PointerCast(bcx, ptr, T_ptr(T_i8())); - ret PointerCast(bcx, InBoundsGEP(bcx, bptr, [bytes]/~), old_ty); + ret PointerCast(bcx, InBoundsGEP(bcx, bptr, ~[bytes]), old_ty); } fn alloc_raw(bcx: block, unit_ty: ty::t, @@ -70,8 +70,8 @@ fn alloc_raw(bcx: block, unit_ty: ty::t, let vecsize = Add(bcx, alloc, llsize_of(ccx, ccx.opaque_vec_type)); let {box, body} = base::malloc_general_dyn(bcx, vecbodyty, heap, vecsize); - Store(bcx, fill, GEPi(bcx, body, [0u, abi::vec_elt_fill]/~)); - Store(bcx, alloc, GEPi(bcx, body, [0u, abi::vec_elt_alloc]/~)); + Store(bcx, fill, GEPi(bcx, body, ~[0u, abi::vec_elt_fill])); + Store(bcx, alloc, GEPi(bcx, body, ~[0u, abi::vec_elt_alloc])); ret {bcx: bcx, val: box}; } fn alloc_uniq_raw(bcx: block, unit_ty: ty::t, @@ -118,13 +118,13 @@ fn make_drop_glue_unboxed(bcx: block, vptr: ValueRef, vec_ty: ty::t) -> } else { bcx } } -fn trans_evec(bcx: block, args: [@ast::expr]/~, +fn trans_evec(bcx: block, args: ~[@ast::expr], vst: ast::vstore, id: ast::node_id, dest: dest) -> block { let _icx = bcx.insn_ctxt("tvec::trans_evec"); let ccx = bcx.ccx(); let mut bcx = bcx; if dest == base::ignore { - for vec::each(args) {|arg| + for vec::each(args) |arg| { bcx = base::trans_expr(bcx, arg, base::ignore); } ret bcx; @@ -163,10 +163,10 @@ fn trans_evec(bcx: block, args: [@ast::expr]/~, let len = Mul(bcx, n, unit_sz); - let p = base::alloca(bcx, T_struct([T_ptr(llunitty), - ccx.int_type]/~)); - Store(bcx, vp, GEPi(bcx, p, [0u, abi::slice_elt_base]/~)); - Store(bcx, len, GEPi(bcx, p, [0u, abi::slice_elt_len]/~)); + let p = base::alloca(bcx, T_struct(~[T_ptr(llunitty), + ccx.int_type])); + Store(bcx, vp, GEPi(bcx, p, ~[0u, abi::slice_elt_base])); + Store(bcx, len, GEPi(bcx, p, ~[0u, abi::slice_elt_len])); {bcx: bcx, val: p, dataptr: vp} } @@ -188,19 +188,19 @@ fn trans_evec(bcx: block, args: [@ast::expr]/~, // Store the individual elements. - let mut i = 0u, temp_cleanups = [val]/~; + let mut i = 0u, temp_cleanups = ~[val]; #debug("trans_evec: v: %s, dataptr: %s", val_str(ccx.tn, val), val_str(ccx.tn, dataptr)); - for vec::each(args) {|e| - let lleltptr = InBoundsGEP(bcx, dataptr, [C_uint(ccx, i)]/~); + for vec::each(args) |e| { + let lleltptr = InBoundsGEP(bcx, dataptr, ~[C_uint(ccx, i)]); bcx = base::trans_expr_save_in(bcx, e, lleltptr); add_clean_temp_mem(bcx, lleltptr, unit_ty); vec::push(temp_cleanups, lleltptr); i += 1u; } - for vec::each(temp_cleanups) {|cln| revoke_clean(bcx, cln); } + for vec::each(temp_cleanups) |cln| { revoke_clean(bcx, cln); } alt vst { ast::vstore_fixed(_) { @@ -248,14 +248,14 @@ fn get_base_and_len(cx: block, v: ValueRef, e_ty: ty::t) alt vstore { ty::vstore_fixed(n) { - let base = GEPi(cx, v, [0u, 0u]/~); + let base = GEPi(cx, v, ~[0u, 0u]); let n = if ty::type_is_str(e_ty) { n + 1u } else { n }; let len = Mul(cx, C_uint(ccx, n), unit_sz); (base, len) } ty::vstore_slice(_) { - let base = Load(cx, GEPi(cx, v, [0u, abi::slice_elt_base]/~)); - let len = Load(cx, GEPi(cx, v, [0u, abi::slice_elt_len]/~)); + let base = Load(cx, GEPi(cx, v, ~[0u, abi::slice_elt_base])); + let len = Load(cx, GEPi(cx, v, ~[0u, abi::slice_elt_len])); (base, len) } ty::vstore_uniq | ty::vstore_box { @@ -274,7 +274,7 @@ fn trans_estr(bcx: block, s: @str, vstore: ast::vstore, let c = alt vstore { ast::vstore_fixed(_) { - // "hello"/_ => "hello"/5 => [i8 x 6]/~ in llvm + // "hello"/_ => "hello"/5 => ~[i8 x 6] in llvm #debug("trans_estr: fixed: %s", *s); C_postr(*s) } @@ -288,7 +288,7 @@ fn trans_estr(bcx: block, s: @str, vstore: ast::vstore, ast::vstore_uniq { let cs = PointerCast(bcx, C_cstr(ccx, *s), T_ptr(T_i8())); let len = C_uint(ccx, str::len(*s)); - let c = Call(bcx, ccx.upcalls.str_new_uniq, [cs, len]/~); + let c = Call(bcx, ccx.upcalls.str_new_uniq, ~[cs, len]); PointerCast(bcx, c, T_unique_ptr(T_unique(ccx, T_vec(ccx, T_i8())))) } @@ -296,7 +296,7 @@ fn trans_estr(bcx: block, s: @str, vstore: ast::vstore, ast::vstore_box { let cs = PointerCast(bcx, C_cstr(ccx, *s), T_ptr(T_i8())); let len = C_uint(ccx, str::len(*s)); - let c = Call(bcx, ccx.upcalls.str_new_shared, [cs, len]/~); + let c = Call(bcx, ccx.upcalls.str_new_shared, ~[cs, len]); PointerCast(bcx, c, T_box_ptr(T_box(ccx, T_vec(ccx, T_i8())))) } @@ -323,7 +323,7 @@ fn trans_append(bcx: block, vec_ty: ty::t, lhsptr: ValueRef, let opaque_lhs = PointerCast(bcx, lhsptr, T_ptr(T_ptr(T_i8()))); Call(bcx, ccx.upcalls.vec_grow, - [opaque_lhs, new_fill]/~); + ~[opaque_lhs, new_fill]); // Was overwritten if we resized let lhs = Load(bcx, lhsptr); let rhs = Select(bcx, self_append, lhs, rhs); @@ -335,35 +335,35 @@ fn trans_append(bcx: block, vec_ty: ty::t, lhsptr: ValueRef, if strings { lhs_off = Sub(bcx, lhs_off, C_int(ccx, 1)); } let write_ptr = pointer_add(bcx, lhs_data, lhs_off); let write_ptr_ptr = do_spill_noroot(bcx, write_ptr); - iter_vec_uniq(bcx, rhs, vec_ty, rfill, {|bcx, addr, _ty| + iter_vec_uniq(bcx, rhs, vec_ty, rfill, |bcx, addr, _ty| { let write_ptr = Load(bcx, write_ptr_ptr); let bcx = copy_val(bcx, INIT, write_ptr, load_if_immediate(bcx, addr, unit_ty), unit_ty); - Store(bcx, InBoundsGEP(bcx, write_ptr, [C_int(ccx, 1)]/~), + Store(bcx, InBoundsGEP(bcx, write_ptr, ~[C_int(ccx, 1)]), write_ptr_ptr); bcx }) } fn trans_append_literal(bcx: block, vptrptr: ValueRef, vec_ty: ty::t, - vals: [@ast::expr]/~) -> block { + vals: ~[@ast::expr]) -> block { let _icx = bcx.insn_ctxt("tvec::trans_append_literal"); let mut bcx = bcx, ccx = bcx.ccx(); let elt_ty = ty::sequence_element_type(bcx.tcx(), vec_ty); let elt_llty = type_of::type_of(ccx, elt_ty); let elt_sz = shape::llsize_of(ccx, elt_llty); let scratch = base::alloca(bcx, elt_llty); - for vec::each(vals) {|val| + for vec::each(vals) |val| { bcx = base::trans_expr_save_in(bcx, val, scratch); let vptr = get_bodyptr(bcx, Load(bcx, vptrptr)); let old_fill = get_fill(bcx, vptr); let new_fill = Add(bcx, old_fill, elt_sz); let do_grow = ICmp(bcx, lib::llvm::IntUGT, new_fill, get_alloc(bcx, vptr)); - bcx = base::with_cond(bcx, do_grow) {|bcx| + bcx = do base::with_cond(bcx, do_grow) |bcx| { let pt = PointerCast(bcx, vptrptr, T_ptr(T_ptr(T_i8()))); - Call(bcx, ccx.upcalls.vec_grow, [pt, new_fill]/~); + Call(bcx, ccx.upcalls.vec_grow, ~[pt, new_fill]); bcx }; let vptr = get_bodyptr(bcx, Load(bcx, vptrptr)); @@ -386,7 +386,7 @@ fn trans_add(bcx: block, vec_ty: ty::t, lhs: ValueRef, if ty::get(vec_ty).struct == ty::ty_str { let lhs = PointerCast(bcx, lhs, T_ptr(T_i8())); let rhs = PointerCast(bcx, rhs, T_ptr(T_i8())); - let n = Call(bcx, ccx.upcalls.str_concat, [lhs, rhs]/~); + let n = Call(bcx, ccx.upcalls.str_concat, ~[lhs, rhs]); let n = PointerCast( bcx, n, T_unique_ptr(T_unique(ccx, T_vec(ccx, llunitty)))); ret base::store_in_dest(bcx, n, dest); @@ -407,7 +407,7 @@ fn trans_add(bcx: block, vec_ty: ty::t, lhs: ValueRef, let write_ptr = Load(bcx, write_ptr_ptr); let bcx = copy_val(bcx, INIT, write_ptr, load_if_immediate(bcx, addr, unit_ty), unit_ty); - Store(bcx, InBoundsGEP(bcx, write_ptr, [C_int(ccx, 1)]/~), + Store(bcx, InBoundsGEP(bcx, write_ptr, ~[C_int(ccx, 1)]), write_ptr_ptr); ret bcx; }; @@ -437,7 +437,7 @@ fn iter_vec_raw(bcx: block, data_ptr: ValueRef, vec_ty: ty::t, let header_cx = sub_block(bcx, "iter_vec_loop_header"); Br(bcx, header_cx.llbb); let data_ptr = - Phi(header_cx, val_ty(data_ptr), [data_ptr]/~, [bcx.llbb]/~); + Phi(header_cx, val_ty(data_ptr), ~[data_ptr], ~[bcx.llbb]); let not_yet_at_end = ICmp(header_cx, lib::llvm::IntULT, data_ptr, data_end_ptr); let body_cx = sub_block(header_cx, "iter_vec_loop_body"); @@ -445,7 +445,7 @@ fn iter_vec_raw(bcx: block, data_ptr: ValueRef, vec_ty: ty::t, CondBr(header_cx, not_yet_at_end, body_cx.llbb, next_cx.llbb); let body_cx = f(body_cx, data_ptr, unit_ty); AddIncomingToPhi(data_ptr, InBoundsGEP(body_cx, data_ptr, - [C_int(bcx.ccx(), 1)]/~), + ~[C_int(bcx.ccx(), 1)]), body_cx.llbb); Br(body_cx, header_cx.llbb); ret next_cx; diff --git a/src/rustc/middle/trans/type_of.rs b/src/rustc/middle/trans/type_of.rs index 82dfc71630f..3a34e67f010 100644 --- a/src/rustc/middle/trans/type_of.rs +++ b/src/rustc/middle/trans/type_of.rs @@ -15,8 +15,8 @@ export type_of_fn; export type_of_non_gc_box; fn type_of_explicit_args(cx: @crate_ctxt, - inputs: [ty::arg]/~) -> [TypeRef]/~ { - vec::map(inputs) {|arg| + inputs: ~[ty::arg]) -> ~[TypeRef] { + do vec::map(inputs) |arg| { let arg_ty = arg.ty; let llty = type_of(cx, arg_ty); alt ty::resolved_mode(cx.tcx, arg.mode) { @@ -26,9 +26,9 @@ fn type_of_explicit_args(cx: @crate_ctxt, } } -fn type_of_fn(cx: @crate_ctxt, inputs: [ty::arg]/~, +fn type_of_fn(cx: @crate_ctxt, inputs: ~[ty::arg], output: ty::t) -> TypeRef { - let mut atys: [TypeRef]/~ = []/~; + let mut atys: ~[TypeRef] = ~[]; // Arg 0: Output pointer. vec::push(atys, T_ptr(type_of(cx, output))); @@ -116,13 +116,13 @@ fn type_of(cx: @crate_ctxt, t: ty::t) -> TypeRef { ty::ty_rptr(_, mt) { T_ptr(type_of(cx, mt.ty)) } ty::ty_evec(mt, ty::vstore_slice(_)) { - T_struct([T_ptr(type_of(cx, mt.ty)), - T_uint_ty(cx, ast::ty_u)]/~) + T_struct(~[T_ptr(type_of(cx, mt.ty)), + T_uint_ty(cx, ast::ty_u)]) } ty::ty_estr(ty::vstore_slice(_)) { - T_struct([T_ptr(T_i8()), - T_uint_ty(cx, ast::ty_u)]/~) + T_struct(~[T_ptr(T_i8()), + T_uint_ty(cx, ast::ty_u)]) } ty::ty_estr(ty::vstore_fixed(n)) { @@ -134,8 +134,8 @@ fn type_of(cx: @crate_ctxt, t: ty::t) -> TypeRef { } ty::ty_rec(fields) { - let mut tys: [TypeRef]/~ = []/~; - for vec::each(fields) {|f| + let mut tys: ~[TypeRef] = ~[]; + for vec::each(fields) |f| { let mt_ty = f.mt.ty; vec::push(tys, type_of(cx, mt_ty)); } @@ -145,8 +145,8 @@ fn type_of(cx: @crate_ctxt, t: ty::t) -> TypeRef { ty::ty_iface(_, _) { T_opaque_iface(cx) } ty::ty_type { T_ptr(cx.tydesc_type) } ty::ty_tup(elts) { - let mut tys = []/~; - for vec::each(elts) {|elt| + let mut tys = ~[]; + for vec::each(elts) |elt| { vec::push(tys, type_of(cx, elt)); } T_struct(tys) @@ -175,14 +175,14 @@ fn type_of(cx: @crate_ctxt, t: ty::t) -> TypeRef { ty::ty_class(did, ts) { // Only instance vars are record fields at runtime. let fields = lookup_class_fields(cx.tcx, did); - let mut tys = vec::map(fields) {|f| + let mut tys = do vec::map(fields) |f| { let t = ty::lookup_field_type(cx.tcx, did, f.id, ts); type_of(cx, t) }; if ty::ty_dtor(cx.tcx, did) != none { // resource type - tys = [T_i8(), T_struct(tys)]/~; + tys = ~[T_i8(), T_struct(tys)]; } common::set_struct_body(llty, tys); @@ -215,13 +215,13 @@ fn type_of_enum(cx: @crate_ctxt, did: ast::def_id, t: ty::t) let degen = (*ty::enum_variants(cx.tcx, did)).len() == 1u; let size = shape::static_size_of_enum(cx, t); if !degen { - [T_enum_discrim(cx), T_array(T_i8(), size)]/~ + ~[T_enum_discrim(cx), T_array(T_i8(), size)] } else if size == 0u { - [T_enum_discrim(cx)]/~ + ~[T_enum_discrim(cx)] } else { - [T_array(T_i8(), size)]/~ + ~[T_array(T_i8(), size)] } }; @@ -251,8 +251,8 @@ fn llvm_type_name(cx: @crate_ctxt, t: ty::t) -> str { } fn type_of_dtor(ccx: @crate_ctxt, self_ty: ty::t) -> TypeRef { - T_fn([T_ptr(type_of(ccx, ty::mk_nil(ccx.tcx))), - T_ptr(type_of(ccx, self_ty))]/~, + T_fn(~[T_ptr(type_of(ccx, ty::mk_nil(ccx.tcx))), + T_ptr(type_of(ccx, self_ty))], llvm::LLVMVoidType()) } diff --git a/src/rustc/middle/trans/type_use.rs b/src/rustc/middle/trans/type_use.rs index 2be90746b49..15d69df36b1 100644 --- a/src/rustc/middle/trans/type_use.rs +++ b/src/rustc/middle/trans/type_use.rs @@ -31,10 +31,10 @@ const use_repr: uint = 1u; // Dependency on size/alignment and take/drop glue const use_tydesc: uint = 2u; // Takes the tydesc, or compares type ctx = {ccx: @crate_ctxt, - uses: [mut type_uses]/~}; + uses: ~[mut type_uses]}; fn type_uses_for(ccx: @crate_ctxt, fn_id: def_id, n_tps: uint) - -> [type_uses]/~ { + -> ~[type_uses] { alt ccx.type_use_cache.find(fn_id) { some(uses) { ret uses; } none {} @@ -47,7 +47,7 @@ fn type_uses_for(ccx: @crate_ctxt, fn_id: def_id, n_tps: uint) let cx = {ccx: ccx, uses: vec::to_mut(vec::from_elem(n_tps, 0u))}; alt ty::get(ty::lookup_item_type(cx.ccx.tcx, fn_id).ty).struct { ty::ty_fn({inputs, _}) { - for vec::each(inputs) {|arg| + for vec::each(inputs) |arg| { if arg.mode == expl(by_val) { type_needs(cx, use_repr, arg.ty); } } } @@ -70,21 +70,26 @@ fn type_uses_for(ccx: @crate_ctxt, fn_id: def_id, n_tps: uint) handle_body(cx, body); } ast_map::node_variant(_, _, _) { - for uint::range(0u, n_tps) {|n| cx.uses[n] |= use_repr;} + for uint::range(0u, n_tps) |n| { cx.uses[n] |= use_repr;} } ast_map::node_foreign_item(i@@{node: foreign_item_fn(_, _), _}, abi, _) { if abi == foreign_abi_rust_intrinsic { let flags = alt check *i.ident { - "visit_ty" { 3u } + "visit_ty" { use_repr | use_tydesc } "size_of" | "pref_align_of" | "min_align_of" | "init" | "reinterpret_cast" | "move_val" | "move_val_init" { use_repr } "get_tydesc" | "needs_drop" { use_tydesc } + "atomic_xchng" | "atomic_add" | "atomic_sub" | + "atomic_xchng_acq" | "atomic_add_acq" | "atomic_sub_acq" | + "atomic_xchng_rel" | "atomic_add_rel" | "atomic_sub_rel" { + 0u + } "forget" | "addr_of" { 0u } }; - for uint::range(0u, n_tps) {|n| cx.uses[n] |= flags;} + for uint::range(0u, n_tps) |n| { cx.uses[n] |= flags;} } } ast_map::node_ctor(_, _, ctor, _, _){ @@ -103,22 +108,28 @@ fn type_uses_for(ccx: @crate_ctxt, fn_id: def_id, n_tps: uint) fn type_needs(cx: ctx, use: uint, ty: ty::t) { let mut done = true; // Optimization -- don't descend type if all params already have this use - for vec::each(cx.uses) {|u| if u & use != use { done = false } } + for vec::each(cx.uses) |u| { if u & use != use { done = false } } if !done { type_needs_inner(cx, use, ty, @nil); } } fn type_needs_inner(cx: ctx, use: uint, ty: ty::t, enums_seen: @list<def_id>) { - ty::maybe_walk_ty(ty) {|ty| + do ty::maybe_walk_ty(ty) |ty| { if ty::type_has_params(ty) { alt ty::get(ty).struct { - ty::ty_fn(_) | ty::ty_ptr(_) | ty::ty_rptr(_, _) | - ty::ty_box(_) | ty::ty_iface(_, _) { false } + /* + This previously included ty_box -- that was wrong + because if we cast an @T to an iface (for example) and return + it, we depend on the drop glue for T (we have to write the + right tydesc into the result) + */ + ty::ty_fn(_) | ty::ty_ptr(_) | ty::ty_rptr(_, _) + | ty::ty_iface(_, _) { false } ty::ty_enum(did, substs) { - if option::is_none(list::find(enums_seen, {|id| id == did})) { + if option::is_none(list::find(enums_seen, |id| id == did)) { let seen = @cons(did, enums_seen); - for vec::each(*ty::enum_variants(cx.ccx.tcx, did)) {|v| - for vec::each(v.args) {|aty| + for vec::each(*ty::enum_variants(cx.ccx.tcx, did)) |v| { + for vec::each(v.args) |aty| { let t = ty::subst(cx.ccx.tcx, substs, aty); type_needs_inner(cx, use, t, seen); } @@ -146,10 +157,21 @@ fn mark_for_expr(cx: ctx, e: @expr) { expr_vec(_, _) | expr_rec(_, _) | expr_tup(_) | expr_unary(box(_), _) | expr_unary(uniq(_), _) | - expr_cast(_, _) | expr_binary(add, _, _) | + expr_binary(add, _, _) | expr_copy(_) | expr_move(_, _) { node_type_needs(cx, use_repr, e.id); } + expr_cast(base, _) { + let result_t = ty::node_id_to_type(cx.ccx.tcx, e.id); + alt ty::get(result_t).struct { + ty::ty_iface(*) { + // When we're casting to an iface, we need the + // tydesc for the expr that's being cast. + node_type_needs(cx, use_tydesc, base.id); + } + _ {} + } + } expr_binary(op, lhs, _) { alt op { eq | lt | le | ne | ge | gt { @@ -159,18 +181,19 @@ fn mark_for_expr(cx: ctx, e: @expr) { } } expr_path(_) { - cx.ccx.tcx.node_type_substs.find(e.id).iter {|ts| + do cx.ccx.tcx.node_type_substs.find(e.id).iter |ts| { let id = ast_util::def_id_of_def(cx.ccx.tcx.def_map.get(e.id)); - vec::iter2(type_uses_for(cx.ccx, id, ts.len()), ts) {|uses, subst| - type_needs(cx, uses, subst) - } + vec::iter2(type_uses_for(cx.ccx, id, ts.len()), ts, + |uses, subst| { + type_needs(cx, uses, subst) + }) } } expr_fn(*) | expr_fn_block(*) { alt ty::ty_fn_proto(ty::expr_ty(cx.ccx.tcx, e)) { proto_bare | proto_any | proto_uniq {} proto_box | proto_block { - for vec::each(*freevars::get_freevars(cx.ccx.tcx, e.id)) {|fv| + for vec::each(*freevars::get_freevars(cx.ccx.tcx, e.id)) |fv| { let node_id = ast_util::def_id_of_def(fv.def).node; node_type_needs(cx, use_repr, node_id); } @@ -187,12 +210,12 @@ fn mark_for_expr(cx: ctx, e: @expr) { let base_ty = ty::node_id_to_type(cx.ccx.tcx, base.id); type_needs(cx, use_repr, ty::type_autoderef(cx.ccx.tcx, base_ty)); - option::iter(cx.ccx.maps.method_map.find(e.id)) {|mth| + do option::iter(cx.ccx.maps.method_map.find(e.id)) |mth| { alt mth.origin { typeck::method_static(did) { - option::iter(cx.ccx.tcx.node_type_substs.find(e.id)) {|ts| - vec::iter2(type_uses_for(cx.ccx, did, ts.len()), ts) - {|uses, subst| type_needs(cx, uses, subst)} + do option::iter(cx.ccx.tcx.node_type_substs.find(e.id)) |ts| { + do vec::iter2(type_uses_for(cx.ccx, did, ts.len()), ts) + |uses, subst| { type_needs(cx, uses, subst)} } } typeck::method_param({param_num: param, _}) { @@ -209,14 +232,14 @@ fn mark_for_expr(cx: ctx, e: @expr) { node_type_needs(cx, use_repr, v.id); } expr_call(f, _, _) { - vec::iter(ty::ty_fn_args(ty::node_id_to_type(cx.ccx.tcx, f.id))) {|a| + vec::iter(ty::ty_fn_args(ty::node_id_to_type(cx.ccx.tcx, f.id)), |a| { alt a.mode { expl(by_move) | expl(by_copy) | expl(by_val) { type_needs(cx, use_repr, a.ty); } _ {} } - } + }) } expr_alt(_, _, _) | expr_block(_) | expr_if(_, _, _) | expr_while(_, _) | expr_fail(_) | expr_break | expr_cont | @@ -229,25 +252,25 @@ fn mark_for_expr(cx: ctx, e: @expr) { fn handle_body(cx: ctx, body: blk) { let v = visit::mk_vt(@{ - visit_expr: {|e, cx, v| + visit_expr: |e, cx, v| { visit::visit_expr(e, cx, v); mark_for_expr(cx, e); }, - visit_local: {|l, cx, v| + visit_local: |l, cx, v| { visit::visit_local(l, cx, v); node_type_needs(cx, use_repr, l.node.id); }, - visit_pat: {|p, cx, v| + visit_pat: |p, cx, v| { visit::visit_pat(p, cx, v); node_type_needs(cx, use_repr, p.id); }, - visit_block: {|b, cx, v| + visit_block: |b, cx, v| { visit::visit_block(b, cx, v); - option::iter(b.node.expr) {|e| + do option::iter(b.node.expr) |e| { node_type_needs(cx, use_repr, e.id); } }, - visit_item: {|_i, _cx, _v|} + visit_item: |_i, _cx, _v| { } with *visit::default_visitor() }); v.visit_block(body, cx, v); diff --git a/src/rustc/middle/trans/uniq.rs b/src/rustc/middle/trans/uniq.rs index e0440147f16..992ed26ee73 100644 --- a/src/rustc/middle/trans/uniq.rs +++ b/src/rustc/middle/trans/uniq.rs @@ -10,7 +10,7 @@ export make_free_glue, autoderef, duplicate; fn make_free_glue(bcx: block, vptr: ValueRef, t: ty::t) -> block { let _icx = bcx.insn_ctxt("uniq::make_free_glue"); - with_cond(bcx, IsNotNull(bcx, vptr)) {|bcx| + do with_cond(bcx, IsNotNull(bcx, vptr)) |bcx| { let content_ty = content_ty(t); let body_ptr = opaque_box_body(bcx, content_ty, vptr); let bcx = drop_ty(bcx, body_ptr, content_ty); @@ -44,9 +44,9 @@ fn duplicate(bcx: block, v: ValueRef, t: ty::t) -> result { let bcx = copy_val(bcx, INIT, dst_body, src_body, content_ty); let src_tydesc_ptr = GEPi(bcx, src_box, - [0u, back::abi::box_field_tydesc]/~); + ~[0u, back::abi::box_field_tydesc]); let dst_tydesc_ptr = GEPi(bcx, dst_box, - [0u, back::abi::box_field_tydesc]/~); + ~[0u, back::abi::box_field_tydesc]); let td = Load(bcx, src_tydesc_ptr); Store(bcx, td, dst_tydesc_ptr); diff --git a/src/rustc/middle/tstate/annotate.rs b/src/rustc/middle/tstate/annotate.rs index 42b6e90b4d8..e6beae9e014 100644 --- a/src/rustc/middle/tstate/annotate.rs +++ b/src/rustc/middle/tstate/annotate.rs @@ -7,13 +7,13 @@ import aux::{num_constraints, get_fn_info, crate_ctxt, add_node}; import ann::empty_ann; import pat_util::pat_binding_ids; -fn collect_ids_expr(e: @expr, rs: @mut [node_id]/~) { vec::push(*rs, e.id); } +fn collect_ids_expr(e: @expr, rs: @mut ~[node_id]) { vec::push(*rs, e.id); } -fn collect_ids_block(b: blk, rs: @mut [node_id]/~) { +fn collect_ids_block(b: blk, rs: @mut ~[node_id]) { vec::push(*rs, b.node.id); } -fn collect_ids_stmt(s: @stmt, rs: @mut [node_id]/~) { +fn collect_ids_stmt(s: @stmt, rs: @mut ~[node_id]) { alt s.node { stmt_decl(_, id) | stmt_expr(_, id) | stmt_semi(_, id) { #debug["node_id %s", int::str(id)]; @@ -23,30 +23,30 @@ fn collect_ids_stmt(s: @stmt, rs: @mut [node_id]/~) { } } -fn collect_ids_local(tcx: ty::ctxt, l: @local, rs: @mut [node_id]/~) { +fn collect_ids_local(tcx: ty::ctxt, l: @local, rs: @mut ~[node_id]) { vec::push_all(*rs, pat_binding_ids(tcx.def_map, l.node.pat)); } -fn node_ids_in_fn(tcx: ty::ctxt, body: blk, rs: @mut [node_id]/~) { +fn node_ids_in_fn(tcx: ty::ctxt, body: blk, rs: @mut ~[node_id]) { let collect_ids = - visit::mk_simple_visitor(@{visit_expr: {|a|collect_ids_expr(a, rs)}, - visit_block: {|a|collect_ids_block(a, rs)}, - visit_stmt: {|a|collect_ids_stmt(a, rs)}, - visit_local: {|a| - collect_ids_local(tcx, a, rs)} + visit::mk_simple_visitor(@{visit_expr: |a| collect_ids_expr(a, rs), + visit_block: |a| collect_ids_block(a, rs), + visit_stmt: |a| collect_ids_stmt(a, rs), + visit_local: |a| + collect_ids_local(tcx, a, rs) with *visit::default_simple_visitor()}); collect_ids.visit_block(body, (), collect_ids); } -fn init_vecs(ccx: crate_ctxt, node_ids: [node_id]/~, len: uint) { - for node_ids.each {|i| +fn init_vecs(ccx: crate_ctxt, node_ids: ~[node_id], len: uint) { + for node_ids.each |i| { log(debug, int::str(i) + " |-> " + uint::str(len)); add_node(ccx, i, empty_ann(len)); } } fn visit_fn(ccx: crate_ctxt, num_constraints: uint, body: blk) { - let node_ids: @mut [node_id]/~ = @mut []/~; + let node_ids: @mut ~[node_id] = @mut ~[]; node_ids_in_fn(ccx.tcx, body, node_ids); let node_id_vec = *node_ids; init_vecs(ccx, node_id_vec, num_constraints); @@ -61,7 +61,7 @@ fn annotate_in_fn(ccx: crate_ctxt, _fk: visit::fn_kind, _decl: fn_decl, fn annotate_crate(ccx: crate_ctxt, crate: crate) { let do_ann = visit::mk_simple_visitor( - @{visit_fn: {|a,b,c,d,e|annotate_in_fn(ccx, a, b, c, d, e)} + @{visit_fn: |a,b,c,d,e| annotate_in_fn(ccx, a, b, c, d, e) with *visit::default_simple_visitor()}); visit::visit_crate(crate, (), do_ann); } diff --git a/src/rustc/middle/tstate/auxiliary.rs b/src/rustc/middle/tstate/auxiliary.rs index 23ea039d2c0..d50e445482a 100644 --- a/src/rustc/middle/tstate/auxiliary.rs +++ b/src/rustc/middle/tstate/auxiliary.rs @@ -34,10 +34,10 @@ fn def_id_to_str(d: def_id) -> str { ret int::str(d.crate) + "," + int::str(d.node); } -fn comma_str(args: [@constr_arg_use]/~) -> str { +fn comma_str(args: ~[@constr_arg_use]) -> str { let mut rslt = ""; let mut comma = false; - for args.each {|a| + for args.each |a| { if comma { rslt += ", "; } else { comma = true; } alt a.node { carg_base { rslt += "*"; } @@ -58,7 +58,7 @@ fn constraint_to_str(tcx: ty::ctxt, c: sp_constr) -> str { fn tritv_to_str(fcx: fn_ctxt, v: tritv::t) -> str { let mut s = ""; let mut comma = false; - for constraints(fcx).each {|p| + for constraints(fcx).each |p| { alt tritv_get(v, p.bit_num) { dont_care { } tt { @@ -79,7 +79,7 @@ fn log_tritv(fcx: fn_ctxt, v: tritv::t) { fn first_difference_string(fcx: fn_ctxt, expected: tritv::t, actual: tritv::t) -> str { let mut s = ""; - for constraints(fcx).each {|c| + for constraints(fcx).each |c| { if tritv_get(expected, c.bit_num) == ttrue && tritv_get(actual, c.bit_num) != ttrue { s = constraint_to_str(fcx.ccx.tcx, c.c); @@ -93,9 +93,9 @@ fn log_tritv_err(fcx: fn_ctxt, v: tritv::t) { log(error, tritv_to_str(fcx, v)); } -fn tos(v: [uint]/~) -> str { +fn tos(v: ~[uint]) -> str { let mut rslt = ""; - for v.each {|i| + for v.each |i| { if i == 0u { rslt += "0"; } else if i == 1u { rslt += "1"; } else { rslt += "?"; } @@ -103,9 +103,9 @@ fn tos(v: [uint]/~) -> str { ret rslt; } -fn log_cond(v: [uint]/~) { log(debug, tos(v)); } +fn log_cond(v: ~[uint]) { log(debug, tos(v)); } -fn log_cond_err(v: [uint]/~) { log(error, tos(v)); } +fn log_cond_err(v: ~[uint]) { log(error, tos(v)); } fn log_pp(pp: pre_and_post) { let p1 = tritv::to_vec(pp.precondition); @@ -145,7 +145,7 @@ fn log_states_err(pp: pre_and_post_state) { fn print_ident(i: ident) { log(debug, " " + *i + " "); } -fn print_idents(&idents: [ident]/~) { +fn print_idents(&idents: ~[ident]) { if vec::len::<ident>(idents) == 0u { ret; } log(debug, "an ident: " + *vec::pop::<ident>(idents)); print_idents(idents); @@ -180,7 +180,7 @@ to represent predicate *arguments* however. This type Both types store an ident and span, for error-logging purposes. */ -type pred_args_ = {args: [@constr_arg_use]/~, bit_num: uint}; +type pred_args_ = {args: ~[@constr_arg_use], bit_num: uint}; type pred_args = spanned<pred_args_>; @@ -203,7 +203,7 @@ type constraint = { type tsconstr = { path: @path, def_id: def_id, - args: [@constr_arg_use]/~ + args: ~[@constr_arg_use] }; type sp_constr = spanned<tsconstr>; @@ -224,11 +224,11 @@ type fn_info = {constrs: constr_map, num_constraints: uint, cf: ret_style, - used_vars: @mut [node_id]/~, + used_vars: @mut ~[node_id], ignore: bool}; /* mapping from node ID to typestate annotation */ -type node_ann_table = @mut [mut ts_ann]/~; +type node_ann_table = @mut ~[mut ts_ann]; /* mapping from function name to fn_info map */ @@ -436,7 +436,7 @@ fn pure_exp(ccx: crate_ctxt, id: node_id, p: prestate) -> bool { fn num_constraints(m: fn_info) -> uint { ret m.num_constraints; } fn new_crate_ctxt(cx: ty::ctxt) -> crate_ctxt { - let na: [mut ts_ann]/~ = [mut]/~; + let na: ~[mut ts_ann] = ~[mut]; ret {tcx: cx, node_anns: @mut na, fm: int_hash::<fn_info>()}; } @@ -450,10 +450,10 @@ fn controlflow_expr(ccx: crate_ctxt, e: @expr) -> ret_style { } } -fn constraints_expr(cx: ty::ctxt, e: @expr) -> [@ty::constr]/~ { +fn constraints_expr(cx: ty::ctxt, e: @expr) -> ~[@ty::constr] { alt ty::get(ty::node_id_to_type(cx, e.id)).struct { ty::ty_fn(f) { ret f.constraints; } - _ { ret []/~; } + _ { ret ~[]; } } } @@ -471,9 +471,9 @@ fn node_id_to_def(ccx: crate_ctxt, id: node_id) -> option<def> { ret ccx.tcx.def_map.find(id); } -fn norm_a_constraint(id: def_id, c: constraint) -> [norm_constraint]/~ { - let mut rslt: [norm_constraint]/~ = []/~; - for (*c.descs).each {|pd| +fn norm_a_constraint(id: def_id, c: constraint) -> ~[norm_constraint] { + let mut rslt: ~[norm_constraint] = ~[]; + for (*c.descs).each |pd| { vec::push(rslt, {bit_num: pd.node.bit_num, c: respan(pd.span, {path: c.path, @@ -486,9 +486,9 @@ fn norm_a_constraint(id: def_id, c: constraint) -> [norm_constraint]/~ { // Tried to write this as an iterator, but I got a // non-exhaustive match in trans. -fn constraints(fcx: fn_ctxt) -> [norm_constraint]/~ { - let mut rslt: [norm_constraint]/~ = []/~; - for fcx.enclosing.constrs.each {|key, val| +fn constraints(fcx: fn_ctxt) -> ~[norm_constraint] { + let mut rslt: ~[norm_constraint] = ~[]; + for fcx.enclosing.constrs.each |key, val| { vec::push_all(rslt, norm_a_constraint(key, val)); }; ret rslt; @@ -497,10 +497,10 @@ fn constraints(fcx: fn_ctxt) -> [norm_constraint]/~ { // FIXME (#2539): Would rather take an immutable vec as an argument, // should freeze it at some earlier point. fn match_args(fcx: fn_ctxt, occs: @dvec<pred_args>, - occ: [@constr_arg_use]/~) -> uint { + occ: ~[@constr_arg_use]) -> uint { #debug("match_args: looking at %s", constr_args_to_str(fn@(i: inst) -> str { ret *i.ident; }, occ)); - for (*occs).each {|pd| + for (*occs).each |pd| { log(debug, "match_args: candidate " + pred_args_to_str(pd)); fn eq(p: inst, q: inst) -> bool { ret p.node == q.node; } @@ -550,10 +550,10 @@ fn expr_to_constr_arg(tcx: ty::ctxt, e: @expr) -> @constr_arg_use { } fn exprs_to_constr_args(tcx: ty::ctxt, - args: [@expr]/~) -> [@constr_arg_use]/~ { - let f = {|a|expr_to_constr_arg(tcx, a)}; - let mut rslt: [@constr_arg_use]/~ = []/~; - for args.each {|e| vec::push(rslt, f(e)); } + args: ~[@expr]) -> ~[@constr_arg_use] { + let f = |a| expr_to_constr_arg(tcx, a); + let mut rslt: ~[@constr_arg_use] = ~[]; + for args.each |e| { vec::push(rslt, f(e)); } rslt } @@ -585,10 +585,10 @@ fn pred_args_to_str(p: pred_args) -> str { + ">" } -fn substitute_constr_args(cx: ty::ctxt, actuals: [@expr]/~, c: @ty::constr) -> +fn substitute_constr_args(cx: ty::ctxt, actuals: ~[@expr], c: @ty::constr) -> tsconstr { - let mut rslt: [@constr_arg_use]/~ = []/~; - for c.node.args.each {|a| + let mut rslt: ~[@constr_arg_use] = ~[]; + for c.node.args.each |a| { vec::push(rslt, substitute_arg(cx, actuals, a)); } ret {path: c.node.path, @@ -596,7 +596,7 @@ fn substitute_constr_args(cx: ty::ctxt, actuals: [@expr]/~, c: @ty::constr) -> args: rslt}; } -fn substitute_arg(cx: ty::ctxt, actuals: [@expr]/~, a: @constr_arg) -> +fn substitute_arg(cx: ty::ctxt, actuals: ~[@expr], a: @constr_arg) -> @constr_arg_use { let num_actuals = vec::len(actuals); alt a.node { @@ -612,11 +612,11 @@ fn substitute_arg(cx: ty::ctxt, actuals: [@expr]/~, a: @constr_arg) -> } } -fn pred_args_matches(pattern: [constr_arg_general_<inst>]/~, +fn pred_args_matches(pattern: ~[constr_arg_general_<inst>], desc: pred_args) -> bool { let mut i = 0u; - for desc.node.args.each {|c| + for desc.node.args.each |c| { let n = pattern[i]; alt c.node { carg_ident(p) { @@ -638,10 +638,10 @@ fn pred_args_matches(pattern: [constr_arg_general_<inst>]/~, ret true; } -fn find_instance_(pattern: [constr_arg_general_<inst>]/~, - descs: [pred_args]/~) -> +fn find_instance_(pattern: ~[constr_arg_general_<inst>], + descs: ~[pred_args]) -> option<uint> { - for descs.each {|d| + for descs.each |d| { if pred_args_matches(pattern, d) { ret some(d.node.bit_num); } } ret none; @@ -654,16 +654,16 @@ enum dest { call // RHS is passed to a function } -type subst = [{from: inst, to: inst}]/~; +type subst = ~[{from: inst, to: inst}]; fn find_instances(_fcx: fn_ctxt, subst: subst, - c: constraint) -> [{from: uint, to: uint}]/~ { + c: constraint) -> ~[{from: uint, to: uint}] { - if vec::len(subst) == 0u { ret []/~; } - let mut res = []/~; - (*c.descs).swap { |v| + if vec::len(subst) == 0u { ret ~[]; } + let mut res = ~[]; + do (*c.descs).swap |v| { let v <- vec::from_mut(v); - for v.each { |d| + for v.each |d| { if args_mention(d.node.args, find_in_subst_bool, subst) { let old_bit_num = d.node.bit_num; let newv = replace(subst, d); @@ -679,7 +679,7 @@ fn find_instances(_fcx: fn_ctxt, subst: subst, } fn find_in_subst(id: node_id, s: subst) -> option<inst> { - for s.each {|p| + for s.each |p| { if id == p.from.node { ret some(p.to); } } ret none; @@ -689,24 +689,24 @@ fn find_in_subst_bool(s: subst, id: node_id) -> bool { is_some(find_in_subst(id, s)) } -fn insts_to_str(stuff: [constr_arg_general_<inst>]/~) -> str { +fn insts_to_str(stuff: ~[constr_arg_general_<inst>]) -> str { let mut rslt = "<"; - for stuff.each {|i| + for stuff.each |i| { rslt += " " + alt i { carg_ident(p) { *p.ident } carg_base { "*" } - carg_lit(_) { "[lit]/~" } + carg_lit(_) { "~[lit]" } } + " "; } rslt += ">"; rslt } -fn replace(subst: subst, d: pred_args) -> [constr_arg_general_<inst>]/~ { - let mut rslt: [constr_arg_general_<inst>]/~ = []/~; - for d.node.args.each {|c| +fn replace(subst: subst, d: pred_args) -> ~[constr_arg_general_<inst>] { + let mut rslt: ~[constr_arg_general_<inst>] = ~[]; + for d.node.args.each |c| { alt c.node { carg_ident(p) { alt find_in_subst(p.node, subst) { @@ -727,7 +727,7 @@ enum if_ty { if_check, plain_if, } fn for_constraints_mentioning(fcx: fn_ctxt, id: node_id, f: fn(norm_constraint)) { - for constraints(fcx).each {|c| + for constraints(fcx).each |c| { if constraint_mentions(fcx, c, id) { f(c); } }; } @@ -797,19 +797,19 @@ fn copy_in_poststate_two(fcx: fn_ctxt, src_post: poststate, ty: oper_type) { let mut subst; alt ty { - oper_swap { subst = [{from: dest, to: src}, {from: src, to: dest}]/~; } + oper_swap { subst = ~[{from: dest, to: src}, {from: src, to: dest}]; } oper_assign_op { ret; // Don't do any propagation } - _ { subst = [{from: src, to: dest}]/~; } + _ { subst = ~[{from: src, to: dest}]; } } - for fcx.enclosing.constrs.each_value {|val| + for fcx.enclosing.constrs.each_value |val| { // replace any occurrences of the src def_id with the // dest def_id let insts = find_instances(fcx, subst, val); - for insts.each {|p| + for insts.each |p| { if bitvectors::promises_(p.from, src_post) { set_in_poststate_(p.to, target_post); } @@ -821,8 +821,8 @@ fn forget_in_postcond(fcx: fn_ctxt, parent_exp: node_id, dead_v: node_id) { // In the postcondition given by parent_exp, clear the bits // for any constraints mentioning dead_v let d = local_node_id_to_local_def_id(fcx, dead_v); - option::iter(d) {|d_id| - for_constraints_mentioning(fcx, d_id) {|c| + do option::iter(d) |d_id| { + do for_constraints_mentioning(fcx, d_id) |c| { #debug("clearing constraint %u %s", c.bit_num, constraint_to_str(fcx.ccx.tcx, c.c)); @@ -838,29 +838,29 @@ fn forget_in_poststate(fcx: fn_ctxt, p: poststate, dead_v: node_id) -> bool { // for any constraints mentioning dead_v let d = local_node_id_to_local_def_id(fcx, dead_v); let mut changed = false; - option::iter(d) {|d_id| - for_constraints_mentioning(fcx, d_id) {|c| + do option::iter(d) |d_id| { + do for_constraints_mentioning(fcx, d_id) |c| { changed |= clear_in_poststate_(c.bit_num, p); } } ret changed; } -fn any_eq(v: [node_id]/~, d: node_id) -> bool { - for v.each {|i| if i == d { ret true; } } +fn any_eq(v: ~[node_id], d: node_id) -> bool { + for v.each |i| { if i == d { ret true; } } false } fn constraint_mentions(_fcx: fn_ctxt, c: norm_constraint, v: node_id) -> bool { - ret args_mention(c.c.node.args, any_eq, [v]/~); + ret args_mention(c.c.node.args, any_eq, ~[v]); } -fn args_mention<T>(args: [@constr_arg_use]/~, - q: fn([T]/~, node_id) -> bool, - s: [T]/~) -> bool { +fn args_mention<T>(args: ~[@constr_arg_use], + q: fn(~[T], node_id) -> bool, + s: ~[T]) -> bool { - for args.each {|a| + for args.each |a| { alt a.node { carg_ident(p1) { if q(s, p1.node) { ret true; } } _ { } } } ret false; @@ -881,12 +881,12 @@ fn do_nothing<T>(_fk: visit::fn_kind, _decl: fn_decl, _body: blk, } -fn args_to_constr_args(tcx: ty::ctxt, args: [arg]/~, - indices: [@sp_constr_arg<uint>]/~) - -> [@constr_arg_use]/~ { - let mut actuals: [@constr_arg_use]/~ = []/~; +fn args_to_constr_args(tcx: ty::ctxt, args: ~[arg], + indices: ~[@sp_constr_arg<uint>]) + -> ~[@constr_arg_use] { + let mut actuals: ~[@constr_arg_use] = ~[]; let num_args = vec::len(args); - for indices.each {|a| + for indices.each |a| { vec::push( actuals, @respan(a.span, @@ -908,7 +908,7 @@ fn args_to_constr_args(tcx: ty::ctxt, args: [arg]/~, ret actuals; } -fn ast_constr_to_ts_constr(tcx: ty::ctxt, args: [arg]/~, c: @constr) -> +fn ast_constr_to_ts_constr(tcx: ty::ctxt, args: ~[arg], c: @constr) -> tsconstr { let tconstr = ty::ast_constr_to_constr(tcx, c); ret {path: tconstr.node.path, @@ -916,35 +916,35 @@ fn ast_constr_to_ts_constr(tcx: ty::ctxt, args: [arg]/~, c: @constr) -> args: args_to_constr_args(tcx, args, tconstr.node.args)}; } -fn ast_constr_to_sp_constr(tcx: ty::ctxt, args: [arg]/~, c: @constr) -> +fn ast_constr_to_sp_constr(tcx: ty::ctxt, args: ~[arg], c: @constr) -> sp_constr { let tconstr = ast_constr_to_ts_constr(tcx, args, c); ret respan(c.span, tconstr); } -type binding = {lhs: [dest]/~, rhs: option<initializer>}; +type binding = {lhs: ~[dest], rhs: option<initializer>}; fn local_to_bindings(tcx: ty::ctxt, loc: @local) -> binding { - let mut lhs = []/~; - pat_bindings(tcx.def_map, loc.node.pat) {|p_id, _s, name| + let mut lhs = ~[]; + do pat_bindings(tcx.def_map, loc.node.pat) |p_id, _s, name| { vec::push(lhs, local_dest({ident: path_to_ident(name), node: p_id})); }; {lhs: lhs, rhs: loc.node.init} } -fn locals_to_bindings(tcx: ty::ctxt, locals: [@local]/~) -> [binding]/~ { - let mut rslt = []/~; - for locals.each {|loc| vec::push(rslt, local_to_bindings(tcx, loc)); } +fn locals_to_bindings(tcx: ty::ctxt, locals: ~[@local]) -> ~[binding] { + let mut rslt = ~[]; + for locals.each |loc| { vec::push(rslt, local_to_bindings(tcx, loc)); } ret rslt; } -fn callee_modes(fcx: fn_ctxt, callee: node_id) -> [mode]/~ { +fn callee_modes(fcx: fn_ctxt, callee: node_id) -> ~[mode] { let ty = ty::type_autoderef(fcx.ccx.tcx, ty::node_id_to_type(fcx.ccx.tcx, callee)); alt ty::get(ty).struct { ty::ty_fn({inputs: args, _}) { - let mut modes = []/~; - for args.each {|arg| vec::push(modes, arg.mode); } + let mut modes = ~[]; + for args.each |arg| { vec::push(modes, arg.mode); } ret modes; } _ { @@ -955,8 +955,8 @@ fn callee_modes(fcx: fn_ctxt, callee: node_id) -> [mode]/~ { } } -fn callee_arg_init_ops(fcx: fn_ctxt, callee: node_id) -> [init_op]/~ { - vec::map(callee_modes(fcx, callee)) {|m| +fn callee_arg_init_ops(fcx: fn_ctxt, callee: node_id) -> ~[init_op] { + do vec::map(callee_modes(fcx, callee)) |m| { alt ty::resolved_mode(fcx.ccx.tcx, m) { by_move { init_move } by_copy | by_ref | by_val | by_mutbl_ref { init_assign } @@ -964,12 +964,12 @@ fn callee_arg_init_ops(fcx: fn_ctxt, callee: node_id) -> [init_op]/~ { } } -fn arg_bindings(ops: [init_op]/~, es: [@expr]/~) -> [binding]/~ { - let mut bindings: [binding]/~ = []/~; +fn arg_bindings(ops: ~[init_op], es: ~[@expr]) -> ~[binding] { + let mut bindings: ~[binding] = ~[]; let mut i = 0u; - for ops.each {|op| + for ops.each |op| { vec::push(bindings, - {lhs: [call]/~, rhs: some({op: op, expr: es[i]})}); + {lhs: ~[call], rhs: some({op: op, expr: es[i]})}); i += 1u; } ret bindings; diff --git a/src/rustc/middle/tstate/bitvectors.rs b/src/rustc/middle/tstate/bitvectors.rs index c1ca98d5f90..83151d8627a 100644 --- a/src/rustc/middle/tstate/bitvectors.rs +++ b/src/rustc/middle/tstate/bitvectors.rs @@ -45,11 +45,11 @@ fn seq_tritv(p: postcond, q: postcond) { } } -fn seq_postconds(fcx: fn_ctxt, ps: [postcond]/~) -> postcond { +fn seq_postconds(fcx: fn_ctxt, ps: ~[postcond]) -> postcond { let sz = vec::len(ps); if sz >= 1u { let prev = tritv_clone(ps[0]); - vec::iter_between(ps, 1u, sz) {|p| seq_tritv(prev, p); } + vec::iter_between(ps, 1u, sz, |p| seq_tritv(prev, p) ); ret prev; } else { ret ann::empty_poststate(num_constraints(fcx.enclosing)); } } @@ -58,11 +58,11 @@ fn seq_postconds(fcx: fn_ctxt, ps: [postcond]/~) -> postcond { // return the precondition for evaluating each expr in order. // So, if e0's post is {x} and e1's pre is {x, y, z}, the entire // precondition shouldn't include x. -fn seq_preconds(fcx: fn_ctxt, pps: [pre_and_post]/~) -> precond { +fn seq_preconds(fcx: fn_ctxt, pps: ~[pre_and_post]) -> precond { let sz: uint = vec::len(pps); let num_vars: uint = num_constraints(fcx.enclosing); - fn seq_preconds_go(fcx: fn_ctxt, pps: [pre_and_post]/~, + fn seq_preconds_go(fcx: fn_ctxt, pps: ~[pre_and_post], idx: uint, first: pre_and_post) -> precond { let mut idx = idx; diff --git a/src/rustc/middle/tstate/ck.rs b/src/rustc/middle/tstate/ck.rs index 5283c3b6f05..b5e9c5d32c8 100644 --- a/src/rustc/middle/tstate/ck.rs +++ b/src/rustc/middle/tstate/ck.rs @@ -80,7 +80,7 @@ fn check_states_against_conditions(fcx: fn_ctxt, let visitor = visit::mk_vt( @{visit_stmt: check_states_stmt, visit_expr: check_states_expr, - visit_fn: {|a,b,c,d,e,f,g| + visit_fn: |a,b,c,d,e,f,g| { do_nothing::<fn_ctxt>(a, b, c, d, e, f, g) } with *visit::default_visitor::<fn_ctxt>()}); diff --git a/src/rustc/middle/tstate/collect_locals.rs b/src/rustc/middle/tstate/collect_locals.rs index 5264ee66b06..6c5eebbcb0b 100644 --- a/src/rustc/middle/tstate/collect_locals.rs +++ b/src/rustc/middle/tstate/collect_locals.rs @@ -10,7 +10,7 @@ import aux::*; import std::map::hashmap; import dvec::{dvec, extensions}; -type ctxt = {cs: @mut [sp_constr]/~, tcx: ty::ctxt}; +type ctxt = {cs: @mut ~[sp_constr], tcx: ty::ctxt}; fn collect_pred(e: @expr, cx: ctxt, v: visit::vt<ctxt>) { alt e.node { @@ -22,7 +22,7 @@ fn collect_pred(e: @expr, cx: ctxt, v: visit::vt<ctxt>) { // If it's a call, generate appropriate instances of the // call's constraints. expr_call(operator, operands, _) { - for constraints_expr(cx.tcx, operator).each {|c| + for constraints_expr(cx.tcx, operator).each |c| { let ct: sp_constr = respan(c.span, aux::substitute_constr_args(cx.tcx, operands, c)); @@ -41,7 +41,7 @@ fn find_locals(tcx: ty::ctxt, f_body: blk, sp: span, id: node_id) -> ctxt { - let cx: ctxt = {cs: @mut []/~, tcx: tcx}; + let cx: ctxt = {cs: @mut ~[], tcx: tcx}; let visitor = visit::default_visitor::<ctxt>(); let visitor = @{visit_expr: collect_pred, @@ -126,13 +126,13 @@ fn mk_fn_info(ccx: crate_ctxt, } /* if this function has any constraints, instantiate them to the argument names and add them */ - for f_decl.constraints.each {|c| + for f_decl.constraints.each |c| { let sc = ast_constr_to_sp_constr(cx.tcx, f_decl.inputs, c); next = add_constraint(cx.tcx, sc, next, res_map); } } - let v: @mut [node_id]/~ = @mut []/~; + let v: @mut ~[node_id] = @mut ~[]; let rslt = {constrs: res_map, num_constraints: next, @@ -150,9 +150,7 @@ fn mk_fn_info(ccx: crate_ctxt, fn mk_f_to_fn_info(ccx: crate_ctxt, c: @crate) { let visitor = visit::mk_simple_visitor(@{ - visit_fn: {|a,b,c,d,e| - mk_fn_info(ccx, a, b, c, d, e) - } + visit_fn: |a,b,c,d,e| mk_fn_info(ccx, a, b, c, d, e) with *visit::default_simple_visitor()}); visit::visit_crate(*c, (), visitor); } diff --git a/src/rustc/middle/tstate/pre_post_conditions.rs b/src/rustc/middle/tstate/pre_post_conditions.rs index 63a03197a7a..197ed4b3108 100644 --- a/src/rustc/middle/tstate/pre_post_conditions.rs +++ b/src/rustc/middle/tstate/pre_post_conditions.rs @@ -53,7 +53,7 @@ fn find_pre_post_item(ccx: crate_ctxt, i: item) { fail "find_pre_post_item: shouldn't be called on item_class"; } item_impl(_, _, _, _, ms) { - for ms.each {|m| find_pre_post_method(ccx, m); } + for ms.each |m| { find_pre_post_method(ccx, m); } } } } @@ -63,17 +63,17 @@ fn find_pre_post_item(ccx: crate_ctxt, i: item) { sets the precondition in a to be the result of combining the preconditions for <args>, and the postcondition in a to be the union of all postconditions for <args> */ -fn find_pre_post_exprs(fcx: fn_ctxt, args: [@expr]/~, id: node_id) { +fn find_pre_post_exprs(fcx: fn_ctxt, args: ~[@expr], id: node_id) { if vec::len::<@expr>(args) > 0u { #debug["find_pre_post_exprs: oper = %s", expr_to_str(args[0])]; } fn do_one(fcx: fn_ctxt, e: @expr) { find_pre_post_expr(fcx, e); } - for args.each {|e| do_one(fcx, e); } + for args.each |e| { do_one(fcx, e); } fn get_pp(ccx: crate_ctxt, &&e: @expr) -> pre_and_post { ret expr_pp(ccx, e); } - let pps = vec::map(args, {|a|get_pp(fcx.ccx, a)}); + let pps = vec::map(args, |a| get_pp(fcx.ccx, a) ); set_pre_and_post(fcx.ccx, id, seq_preconds(fcx, pps), seq_postconds(fcx, vec::map(pps, get_post))); @@ -84,8 +84,8 @@ fn find_pre_post_loop(fcx: fn_ctxt, index: @expr, body: blk, id: node_id) { find_pre_post_block(fcx, body); let loop_precond = - seq_preconds(fcx, [expr_pp(fcx.ccx, index), - block_pp(fcx.ccx, body)]/~); + seq_preconds(fcx, ~[expr_pp(fcx.ccx, index), + block_pp(fcx.ccx, body)]); let loop_postcond = intersect_states(expr_postcond(fcx.ccx, index), block_postcond(fcx.ccx, body)); @@ -111,8 +111,8 @@ fn join_then_else(fcx: fn_ctxt, antec: @expr, conseq: blk, let precond_res = seq_preconds(fcx, - [expr_pp(fcx.ccx, antec), - block_pp(fcx.ccx, conseq)]/~); + ~[expr_pp(fcx.ccx, antec), + block_pp(fcx.ccx, conseq)]); set_pre_and_post(fcx.ccx, id, precond_res, expr_poststate(fcx.ccx, antec)); } @@ -125,12 +125,12 @@ fn join_then_else(fcx: fn_ctxt, antec: @expr, conseq: blk, find_pre_post_expr(fcx, altern); let precond_false_case = seq_preconds(fcx, - [expr_pp(fcx.ccx, antec), - expr_pp(fcx.ccx, altern)]/~); + ~[expr_pp(fcx.ccx, antec), + expr_pp(fcx.ccx, altern)]); let postcond_false_case = seq_postconds(fcx, - [expr_postcond(fcx.ccx, antec), - expr_postcond(fcx.ccx, altern)]/~); + ~[expr_postcond(fcx.ccx, antec), + expr_postcond(fcx.ccx, altern)]); /* Be sure to set the bit for the check condition here, so that it's *not* set in the alternative. */ @@ -143,15 +143,15 @@ fn join_then_else(fcx: fn_ctxt, antec: @expr, conseq: blk, } let precond_true_case = seq_preconds(fcx, - [expr_pp(fcx.ccx, antec), - block_pp(fcx.ccx, conseq)]/~); + ~[expr_pp(fcx.ccx, antec), + block_pp(fcx.ccx, conseq)]); let postcond_true_case = seq_postconds(fcx, - [expr_postcond(fcx.ccx, antec), - block_postcond(fcx.ccx, conseq)]/~); + ~[expr_postcond(fcx.ccx, antec), + block_postcond(fcx.ccx, conseq)]); let precond_res = - seq_postconds(fcx, [precond_true_case, precond_false_case]/~); + seq_postconds(fcx, ~[precond_true_case, precond_false_case]); let postcond_res = intersect_states(postcond_true_case, postcond_false_case); set_pre_and_post(fcx.ccx, id, precond_res, postcond_res); @@ -170,10 +170,10 @@ fn gen_if_local(fcx: fn_ctxt, lhs: @expr, rhs: @expr, larger_id: node_id, set_pre_and_post(fcx.ccx, larger_id, p.precondition, p.postcondition); } - _ { find_pre_post_exprs(fcx, [lhs, rhs]/~, larger_id); } + _ { find_pre_post_exprs(fcx, ~[lhs, rhs], larger_id); } } } - _ { find_pre_post_exprs(fcx, [lhs, rhs]/~, larger_id); } + _ { find_pre_post_exprs(fcx, ~[lhs, rhs], larger_id); } } } @@ -228,9 +228,9 @@ fn handle_update(fcx: fn_ctxt, parent: @expr, lhs: @expr, rhs: @expr, } } -fn forget_args_moved_in(fcx: fn_ctxt, parent: @expr, modes: [mode]/~, - operands: [@expr]/~) { - vec::iteri(modes) {|i,mode| +fn forget_args_moved_in(fcx: fn_ctxt, parent: @expr, modes: ~[mode], + operands: ~[@expr]) { + do vec::iteri(modes) |i,mode| { alt ty::resolved_mode(fcx.ccx.tcx, mode) { by_move { forget_in_postcond(fcx, parent.id, operands[i].id); } by_ref | by_val | by_mutbl_ref | by_copy { } @@ -259,7 +259,7 @@ fn find_pre_post_expr(fcx: fn_ctxt, e: @expr) { find_pre_post_exprs(fcx, args, e.id); /* see if the call has any constraints on its type */ - for constraints_expr(fcx.ccx.tcx, operator).each {|c| + for constraints_expr(fcx.ccx.tcx, operator).each |c| { let i = bit_num(fcx, substitute_constr_args(fcx.ccx.tcx, args, c)); require(i, expr_pp(fcx.ccx, e)); @@ -287,20 +287,20 @@ fn find_pre_post_expr(fcx: fn_ctxt, e: @expr) { clear_pp(rslt); } expr_new(p, _, v) { - find_pre_post_exprs(fcx, [p, v]/~, e.id); + find_pre_post_exprs(fcx, ~[p, v], e.id); } expr_log(_, lvl, arg) { - find_pre_post_exprs(fcx, [lvl, arg]/~, e.id); + find_pre_post_exprs(fcx, ~[lvl, arg], e.id); } expr_fn(_, _, _, cap_clause) | expr_fn_block(_, _, cap_clause) { find_pre_post_expr_fn_upvars(fcx, e); - for (*cap_clause).each { |cap_item| + for (*cap_clause).each |cap_item| { let d = local_node_id_to_local_def_id(fcx, cap_item.id); - option::iter(d, { |id| use_var(fcx, id) }); + option::iter(d, |id| use_var(fcx, id) ); } - for (*cap_clause).each { |cap_item| + for (*cap_clause).each |cap_item| { if cap_item.is_move { log(debug, ("forget_in_postcond: ", cap_item)); forget_in_postcond(fcx, e.id, cap_item.id); @@ -325,7 +325,7 @@ fn find_pre_post_expr(fcx: fn_ctxt, e: @expr) { /* Different from expr_assign in that the lhs *must* already be initialized */ - find_pre_post_exprs(fcx, [lhs, rhs]/~, e.id); + find_pre_post_exprs(fcx, ~[lhs, rhs], e.id); forget_in_postcond(fcx, e.id, lhs.id); } expr_lit(_) { clear_pp(expr_pp(fcx.ccx, e)); } @@ -352,11 +352,11 @@ fn find_pre_post_expr(fcx: fn_ctxt, e: @expr) { find_pre_post_expr(fcx, r); let overall_pre = seq_preconds(fcx, - [expr_pp(fcx.ccx, l), expr_pp(fcx.ccx, r)]/~); + ~[expr_pp(fcx.ccx, l), expr_pp(fcx.ccx, r)]); set_precondition(node_id_to_ts_ann(fcx.ccx, e.id), overall_pre); set_postcondition(node_id_to_ts_ann(fcx.ccx, e.id), expr_postcond(fcx.ccx, l)); - } else { find_pre_post_exprs(fcx, [l, r]/~, e.id); } + } else { find_pre_post_exprs(fcx, ~[l, r], e.id); } } expr_addr_of(_, x) | expr_cast(x, _) | expr_unary(_, x) | expr_loop_body(x) | expr_do_body(x) | expr_assert(x) | expr_copy(x) { @@ -368,8 +368,8 @@ fn find_pre_post_expr(fcx: fn_ctxt, e: @expr) { find_pre_post_block(fcx, body); set_pre_and_post(fcx.ccx, e.id, seq_preconds(fcx, - [expr_pp(fcx.ccx, test), - block_pp(fcx.ccx, body)]/~), + ~[expr_pp(fcx.ccx, test), + block_pp(fcx.ccx, body)]), intersect_states(expr_postcond(fcx.ccx, test), block_postcond(fcx.ccx, body))); } @@ -386,7 +386,7 @@ fn find_pre_post_expr(fcx: fn_ctxt, e: @expr) { set_pre_and_post(fcx.ccx, e.id, block_precond(fcx.ccx, body), loop_postcond); } - expr_index(val, sub) { find_pre_post_exprs(fcx, [val, sub]/~, e.id); } + expr_index(val, sub) { find_pre_post_exprs(fcx, ~[val, sub], e.id); } expr_alt(ex, alts, _) { find_pre_post_expr(fcx, ex); fn do_an_alt(fcx: fn_ctxt, an_alt: arm) -> pre_and_post { @@ -397,11 +397,11 @@ fn find_pre_post_expr(fcx: fn_ctxt, e: @expr) { find_pre_post_block(fcx, an_alt.body); ret block_pp(fcx.ccx, an_alt.body); } - let mut alt_pps = []/~; - for alts.each {|a| vec::push(alt_pps, do_an_alt(fcx, a)); } + let mut alt_pps = ~[]; + for alts.each |a| { vec::push(alt_pps, do_an_alt(fcx, a)); } fn combine_pp(antec: pre_and_post, fcx: fn_ctxt, &&pp: pre_and_post, &&next: pre_and_post) -> pre_and_post { - union(pp.precondition, seq_preconds(fcx, [antec, next]/~)); + union(pp.precondition, seq_preconds(fcx, ~[antec, next])); intersect(pp.postcondition, next.postcondition); ret pp; } @@ -409,7 +409,7 @@ fn find_pre_post_expr(fcx: fn_ctxt, e: @expr) { let e_pp = {precondition: empty_prestate(num_local_vars), postcondition: false_postcond(num_local_vars)}; - let g = {|a,b|combine_pp(antec_pp, fcx, a, b)}; + let g = |a,b| combine_pp(antec_pp, fcx, a, b); let alts_overall_pp = vec::foldl(e_pp, alt_pps, g); set_pre_and_post(fcx.ccx, e.id, alts_overall_pp.precondition, @@ -457,14 +457,14 @@ fn find_pre_post_stmt(fcx: fn_ctxt, s: stmt) { alt adecl.node { decl_local(alocals) { let prev_pp = empty_pre_post(num_constraints(fcx.enclosing)); - for alocals.each {|alocal| + for alocals.each |alocal| { alt alocal.node.init { some(an_init) { /* LHS always becomes initialized, whether or not this is a move */ find_pre_post_expr(fcx, an_init.expr); - pat_bindings(fcx.ccx.tcx.def_map, alocal.node.pat) - {|p_id, _s, _n| + do pat_bindings(fcx.ccx.tcx.def_map, alocal.node.pat) + |p_id, _s, _n| { copy_pre_post(fcx.ccx, p_id, an_init.expr); }; /* Inherit ann from initializer, and add var being @@ -477,8 +477,8 @@ fn find_pre_post_stmt(fcx: fn_ctxt, s: stmt) { _ { } } - pat_bindings(fcx.ccx.tcx.def_map, alocal.node.pat) - {|p_id, _s, n| + do pat_bindings(fcx.ccx.tcx.def_map, alocal.node.pat) + |p_id, _s, n| { let ident = path_to_ident(n); alt p { some(p) { @@ -497,7 +497,7 @@ fn find_pre_post_stmt(fcx: fn_ctxt, s: stmt) { guaranteed */ let e_pp = expr_pp(fcx.ccx, an_init.expr); tritv_copy(prev_pp.precondition, - seq_preconds(fcx, [prev_pp, e_pp]/~)); + seq_preconds(fcx, ~[prev_pp, e_pp])); /* Include the LHSs too, since those aren't in the postconds of the RHSs themselves */ @@ -505,8 +505,8 @@ fn find_pre_post_stmt(fcx: fn_ctxt, s: stmt) { prev_pp.postcondition); } none { - pat_bindings(fcx.ccx.tcx.def_map, alocal.node.pat) - {|p_id, _s, _n| + do pat_bindings(fcx.ccx.tcx.def_map, alocal.node.pat) + |p_id, _s, _n| { clear_pp(node_id_to_ts_ann(fcx.ccx, p_id).conditions); }; clear_pp(node_id_to_ts_ann(fcx.ccx, id).conditions); @@ -549,13 +549,13 @@ fn find_pre_post_block(fcx: fn_ctxt, b: blk) { fn do_one_(fcx: fn_ctxt, s: @stmt) { find_pre_post_stmt(fcx, *s); } - for b.node.stmts.each {|s| do_one_(fcx, s); } + for b.node.stmts.each |s| { do_one_(fcx, s); } fn do_inner_(fcx: fn_ctxt, &&e: @expr) { find_pre_post_expr(fcx, e); } - let do_inner = {|a|do_inner_(fcx, a)}; + let do_inner = |a| do_inner_(fcx, a); option::map::<@expr, ()>(b.node.expr, do_inner); - let mut pps: [pre_and_post]/~ = []/~; - for b.node.stmts.each {|s| vec::push(pps, stmt_pp(fcx.ccx, *s)); } + let mut pps: ~[pre_and_post] = ~[]; + for b.node.stmts.each |s| { vec::push(pps, stmt_pp(fcx.ccx, *s)); } alt b.node.expr { none {/* no-op */ } some(e) { vec::push(pps, expr_pp(fcx.ccx, e)); } @@ -563,8 +563,8 @@ fn find_pre_post_block(fcx: fn_ctxt, b: blk) { let block_precond = seq_preconds(fcx, pps); - let mut postconds = []/~; - for pps.each {|pp| vec::push(postconds, get_post(pp)); } + let mut postconds = ~[]; + for pps.each |pp| { vec::push(postconds, get_post(pp)); } /* A block may be empty, so this next line ensures that the postconds vector is non-empty. */ diff --git a/src/rustc/middle/tstate/states.rs b/src/rustc/middle/tstate/states.rs index 64748a43b99..adf1efc8562 100644 --- a/src/rustc/middle/tstate/states.rs +++ b/src/rustc/middle/tstate/states.rs @@ -54,18 +54,18 @@ fn handle_move_or_copy(fcx: fn_ctxt, post: poststate, rhs_path: @path, } } -fn seq_states(fcx: fn_ctxt, pres: prestate, bindings: [binding]/~) -> +fn seq_states(fcx: fn_ctxt, pres: prestate, bindings: ~[binding]) -> {changed: bool, post: poststate} { let mut changed = false; let mut post = tritv_clone(pres); - for bindings.each {|b| + for bindings.each |b| { alt b.rhs { some(an_init) { // an expression, with or without a destination changed |= find_pre_post_state_expr(fcx, post, an_init.expr) || changed; post = tritv_clone(expr_poststate(fcx.ccx, an_init.expr)); - for b.lhs.each {|d| + for b.lhs.each |d| { alt an_init.expr.node { expr_path(p) { handle_move_or_copy(fcx, post, p, an_init.expr.id, d, @@ -166,7 +166,7 @@ fn find_pre_post_state_two(fcx: fn_ctxt, pres: prestate, lhs: @expr, } fn find_pre_post_state_call(fcx: fn_ctxt, pres: prestate, a: @expr, - id: node_id, ops: [init_op]/~, bs: [@expr]/~, + id: node_id, ops: ~[init_op], bs: ~[@expr], cf: ret_style) -> bool { let mut changed = find_pre_post_state_expr(fcx, pres, a); // FIXME (#2178): This could be a typestate constraint (except we're @@ -183,7 +183,7 @@ fn find_pre_post_state_call(fcx: fn_ctxt, pres: prestate, a: @expr, } fn find_pre_post_state_exprs(fcx: fn_ctxt, pres: prestate, id: node_id, - ops: [init_op]/~, es: [@expr]/~, + ops: ~[init_op], es: ~[@expr], cf: ret_style) -> bool { let rs = seq_states(fcx, pres, arg_bindings(ops, es)); let mut changed = rs.changed | set_prestate_ann(fcx.ccx, id, pres); @@ -271,7 +271,7 @@ fn find_pre_post_state_cap_clause(fcx: fn_ctxt, e_id: node_id, let ccx = fcx.ccx; let pres_changed = set_prestate_ann(ccx, e_id, pres); let post = tritv_clone(pres); - for (*cap_clause).each { |cap_item| + for (*cap_clause).each |cap_item| { if cap_item.is_move { forget_in_poststate(fcx, post, cap_item.id); } @@ -332,10 +332,11 @@ fn find_pre_post_state_expr(fcx: fn_ctxt, pres: prestate, e: @expr) -> bool { let base_pres = alt vec::last_opt(exs) { none { pres } some(f) { expr_poststate(fcx.ccx, f) }}; - option::iter(maybe_base, {|base| + option::iter(maybe_base, |base| { changed |= find_pre_post_state_expr(fcx, base_pres, base) | - set_poststate_ann(fcx.ccx, e.id, - expr_poststate(fcx.ccx, base))}); + set_poststate_ann(fcx.ccx, e.id, + expr_poststate(fcx.ccx, base)) + }); ret changed; } expr_tup(elts) { @@ -404,7 +405,7 @@ fn find_pre_post_state_expr(fcx: fn_ctxt, pres: prestate, e: @expr) -> bool { /* conservative approximation: if a loop contains a break or cont, we assume nothing about the poststate */ - /* which is still unsound -- see [Break-unsound]/~ */ + /* which is still unsound -- see ~[Break-unsound] */ if has_nonlocal_exits(body) { ret changed | set_poststate_ann(fcx.ccx, e.id, pres); } else { @@ -423,7 +424,7 @@ fn find_pre_post_state_expr(fcx: fn_ctxt, pres: prestate, e: @expr) -> bool { /* conservative approximation: if a loop contains a break or cont, we assume nothing about the poststate (so, we set all predicates to "don't know" */ - /* which is still unsound -- see [Break-unsound]/~ */ + /* which is still unsound -- see ~[Break-unsound] */ if may_break(body) { /* Only do this if there are *breaks* not conts. An infinite loop with conts is still an infinite loop. @@ -449,7 +450,7 @@ fn find_pre_post_state_expr(fcx: fn_ctxt, pres: prestate, e: @expr) -> bool { let mut a_post; if vec::len(alts) > 0u { a_post = false_postcond(num_constrs); - for alts.each {|an_alt| + for alts.each |an_alt| { alt an_alt.guard { some(e) { changed |= find_pre_post_state_expr(fcx, e_post, e); @@ -483,8 +484,10 @@ fn find_pre_post_state_expr(fcx: fn_ctxt, pres: prestate, e: @expr) -> bool { let post = false_postcond(num_constrs); ret set_prestate_ann(fcx.ccx, e.id, pres) | set_poststate_ann(fcx.ccx, e.id, post) | - option::map_default(maybe_fail_val, false, {|fail_val| - find_pre_post_state_expr(fcx, pres, fail_val)}); + option::map_default( + maybe_fail_val, false, + |fail_val| + find_pre_post_state_expr(fcx, pres, fail_val) ); } expr_check(_, p) { /* predicate p holds after this expression executes */ @@ -563,7 +566,7 @@ fn find_pre_post_state_block(fcx: fn_ctxt, pres0: prestate, b: blk) -> bool { initializes. Then <pres> becomes the new poststate. */ let mut changed = false; - for b.node.stmts.each {|s| + for b.node.stmts.each |s| { changed |= find_pre_post_state_stmt(fcx, pres, s); pres = stmt_poststate(fcx.ccx, *s); } @@ -591,7 +594,7 @@ fn find_pre_post_state_fn(fcx: fn_ctxt, // Instantiate any constraints on the arguments so we can use them let block_pre = block_prestate(fcx.ccx, f_body); - for f_decl.constraints.each {|c| + for f_decl.constraints.each |c| { let tsc = ast_constr_to_ts_constr(fcx.ccx.tcx, f_decl.inputs, c); set_in_prestate_constr(fcx, tsc, block_pre); } diff --git a/src/rustc/middle/tstate/tritv.rs b/src/rustc/middle/tstate/tritv.rs index 7f1ba5f0db8..c3e221ef32c 100644 --- a/src/rustc/middle/tstate/tritv.rs +++ b/src/rustc/middle/tstate/tritv.rs @@ -249,9 +249,9 @@ fn tritv_doesntcare(v: t) -> bool { ret true; } -fn to_vec(v: t) -> [uint]/~ { +fn to_vec(v: t) -> ~[uint] { let mut i: uint = 0u; - let mut rslt: [uint]/~ = []/~; + let mut rslt: ~[uint] = ~[]; while i < v.nbits { vec::push(rslt, alt tritv_get(v, i) { diff --git a/src/rustc/middle/ty.rs b/src/rustc/middle/ty.rs index be490f98fe4..8e47c3abaf2 100644 --- a/src/rustc/middle/ty.rs +++ b/src/rustc/middle/ty.rs @@ -172,15 +172,15 @@ type arg = {mode: ast::mode, ty: t}; type field = {ident: ast::ident, mt: mt}; -type param_bounds = @[param_bound]/~; +type param_bounds = @~[param_bound]; type method = {ident: ast::ident, - tps: @[param_bounds]/~, + tps: @~[param_bounds], fty: fn_ty, purity: ast::purity, vis: ast::visibility}; -type constr_table = hashmap<ast::node_id, [constr]/~>; +type constr_table = hashmap<ast::node_id, ~[constr]>; type mt = {ty: t, mutbl: ast::mutability}; @@ -234,7 +234,7 @@ type ctxt = // of this node. This only applies to nodes that refer to entities // parameterized by type parameters, such as generic fns, types, or // other items. - node_type_substs: hashmap<node_id, [t]/~>, + node_type_substs: hashmap<node_id, ~[t]>, items: ast_map::map, intrinsic_ifaces: hashmap<ast::ident, (ast::def_id, t)>, @@ -246,8 +246,8 @@ type ctxt = needs_unwind_cleanup_cache: hashmap<t, bool>, kind_cache: hashmap<t, kind>, ast_ty_to_ty_cache: hashmap<@ast::ty, ast_ty_to_ty_cache_entry>, - enum_var_cache: hashmap<def_id, @[variant_info]/~>, - iface_method_cache: hashmap<def_id, @[method]/~>, + enum_var_cache: hashmap<def_id, @~[variant_info]>, + iface_method_cache: hashmap<def_id, @~[method]>, ty_param_bounds: hashmap<ast::node_id, param_bounds>, inferred_modes: hashmap<ast::node_id, ast::mode>, // maps the id of borrowed expr to scope of borrowed ptr @@ -305,10 +305,10 @@ enum closure_kind { type fn_ty = {purity: ast::purity, proto: ast::proto, - inputs: [arg]/~, + inputs: ~[arg], output: t, ret_style: ret_style, - constraints: [@constr]/~}; + constraints: ~[@constr]}; // See discussion at head of region.rs enum region { @@ -340,7 +340,7 @@ type opt_region = option<region>; type substs = { self_r: opt_region, self_ty: option<ty::t>, - tps: [t]/~ + tps: ~[t] }; // NB: If you change this, you'll probably want to change the corresponding @@ -361,18 +361,18 @@ enum sty { ty_evec(mt, vstore), ty_ptr(mt), ty_rptr(region, mt), - ty_rec([field]/~), + ty_rec(~[field]), ty_fn(fn_ty), ty_iface(def_id, substs), ty_class(def_id, substs), - ty_tup([t]/~), + ty_tup(~[t]), ty_var(tv_vid), // type variable during typechecking ty_var_integral(tvi_vid), // type variable during typechecking, for // integral types only ty_param(uint, def_id), // type parameter ty_self, // special, implicit `self` type parameter - ty_constr(t, [@type_constr]/~), + ty_constr(t, ~[@type_constr]), // "Fake" types, used for trans purposes ty_type, // type_desc* @@ -458,7 +458,7 @@ impl of to_str::to_str for purity { fn param_bounds_to_kind(bounds: param_bounds) -> kind { let mut kind = kind_noncopyable(); - for vec::each(*bounds) {|bound| + for vec::each(*bounds) |bound| { alt bound { bound_copy { kind = raise_kind(kind, kind_implicitly_copyable()); @@ -471,7 +471,7 @@ fn param_bounds_to_kind(bounds: param_bounds) -> kind { kind } -type ty_param_bounds_and_ty = {bounds: @[param_bounds]/~, +type ty_param_bounds_and_ty = {bounds: @~[param_bounds], rp: ast::region_param, ty: t}; @@ -491,17 +491,17 @@ fn mk_rcache() -> creader_cache { } fn new_ty_hash<V: copy>() -> map::hashmap<t, V> { - map::hashmap({|&&t: t| type_id(t)}, - {|&&a: t, &&b: t| type_id(a) == type_id(b)}) + map::hashmap(|&&t: t| type_id(t), + |&&a: t, &&b: t| type_id(a) == type_id(b)) } fn mk_ctxt(s: session::session, dm: resolve::def_map, amap: ast_map::map, freevars: freevars::freevar_map, region_map: middle::region::region_map) -> ctxt { - let interner = map::hashmap({|&&k: intern_key| + let interner = map::hashmap(|&&k: intern_key| { hash_type_structure(k.struct) + option::map_default(k.o_def_id, 0u, ast_util::hash_def) - }, {|&&a, &&b| a == b}); + }, |&&a, &&b| a == b); let vecs_implicitly_copyable = get_warning_level(s.warning_settings.default_settings, vecs_not_implicitly_copyable) == ignore; @@ -557,8 +557,8 @@ fn mk_t_with_id(cx: ctxt, st: sty, o_def_id: option<ast::def_id>) -> t { } fn sflags(substs: substs) -> uint { let mut f = 0u; - for substs.tps.each {|tt| f |= get(tt).flags; } - substs.self_r.iter { |r| f |= rflags(r) } + for substs.tps.each |tt| { f |= get(tt).flags; } + substs.self_r.iter(|r| f |= rflags(r)); ret f; } alt st { @@ -587,13 +587,13 @@ fn mk_t_with_id(cx: ctxt, st: sty, o_def_id: option<ast::def_id>) -> t { flags |= get(m.ty).flags; } ty_rec(flds) { - for flds.each {|f| flags |= get(f.mt.ty).flags; } + for flds.each |f| { flags |= get(f.mt.ty).flags; } } ty_tup(ts) { - for ts.each {|tt| flags |= get(tt).flags; } + for ts.each |tt| { flags |= get(tt).flags; } } ty_fn(f) { - for f.inputs.each {|a| flags |= get(a.ty).flags; } + for f.inputs.each |a| { flags |= get(a.ty).flags; } flags |= get(f.output).flags; } ty_constr(tt, _) { @@ -691,13 +691,13 @@ fn mk_mut_unboxed_vec(cx: ctxt, ty: t) -> t { } -fn mk_rec(cx: ctxt, fs: [field]/~) -> t { mk_t(cx, ty_rec(fs)) } +fn mk_rec(cx: ctxt, fs: ~[field]) -> t { mk_t(cx, ty_rec(fs)) } -fn mk_constr(cx: ctxt, t: t, cs: [@type_constr]/~) -> t { +fn mk_constr(cx: ctxt, t: t, cs: ~[@type_constr]) -> t { mk_t(cx, ty_constr(t, cs)) } -fn mk_tup(cx: ctxt, ts: [t]/~) -> t { mk_t(cx, ty_tup(ts)) } +fn mk_tup(cx: ctxt, ts: ~[t]) -> t { mk_t(cx, ty_tup(ts)) } fn mk_fn(cx: ctxt, fty: fn_ty) -> t { mk_t(cx, ty_fn(fty)) } @@ -756,7 +756,7 @@ fn encl_region(cx: ctxt, id: ast::node_id) -> ty::region { } fn walk_ty(ty: t, f: fn(t)) { - maybe_walk_ty(ty, {|t| f(t); true}); + maybe_walk_ty(ty, |t| { f(t); true }); } fn maybe_walk_ty(ty: t, f: fn(t) -> bool) { @@ -773,14 +773,14 @@ fn maybe_walk_ty(ty: t, f: fn(t) -> bool) { } ty_enum(_, substs) | ty_class(_, substs) | ty_iface(_, substs) { - for substs.tps.each {|subty| maybe_walk_ty(subty, f); } + for substs.tps.each |subty| { maybe_walk_ty(subty, f); } } ty_rec(fields) { - for fields.each {|fl| maybe_walk_ty(fl.mt.ty, f); } + for fields.each |fl| { maybe_walk_ty(fl.mt.ty, f); } } - ty_tup(ts) { for ts.each {|tt| maybe_walk_ty(tt, f); } } + ty_tup(ts) { for ts.each |tt| { maybe_walk_ty(tt, f); } } ty_fn(ft) { - for ft.inputs.each {|a| maybe_walk_ty(a.ty, f); } + for ft.inputs.each |a| { maybe_walk_ty(a.ty, f); } maybe_walk_ty(ft.output, f); } ty_constr(sub, _) { maybe_walk_ty(sub, f); } @@ -795,8 +795,8 @@ fn fold_sty_to_ty(tcx: ty::ctxt, sty: sty, foldop: fn(t) -> t) -> t { fn fold_sty(sty: sty, fldop: fn(t) -> t) -> sty { fn fold_substs(substs: substs, fldop: fn(t) -> t) -> substs { {self_r: substs.self_r, - self_ty: substs.self_ty.map { |t| fldop(t) }, - tps: substs.tps.map { |t| fldop(t) }} + self_ty: substs.self_ty.map(|t| fldop(t)), + tps: substs.tps.map(|t| fldop(t))} } alt sty { @@ -825,7 +825,7 @@ fn fold_sty(sty: sty, fldop: fn(t) -> t) -> sty { ty_iface(did, fold_substs(substs, fldop)) } ty_rec(fields) { - let new_fields = vec::map(fields) {|fl| + let new_fields = do vec::map(fields) |fl| { let new_ty = fldop(fl.mt.ty); let new_mt = {ty: new_ty, mutbl: fl.mt.mutbl}; {ident: fl.ident, mt: new_mt} @@ -833,14 +833,14 @@ fn fold_sty(sty: sty, fldop: fn(t) -> t) -> sty { ty_rec(new_fields) } ty_tup(ts) { - let new_ts = vec::map(ts) {|tt| fldop(tt) }; + let new_ts = vec::map(ts, |tt| fldop(tt)); ty_tup(new_ts) } ty_fn(f) { - let new_args = vec::map(f.inputs) {|a| + let new_args = vec::map(f.inputs, |a| { let new_ty = fldop(a.ty); {mode: a.mode, ty: new_ty} - }; + }); let new_output = fldop(f.output); ty_fn({inputs: new_args, output: new_output with f}) } @@ -863,7 +863,7 @@ fn fold_sty(sty: sty, fldop: fn(t) -> t) -> sty { // Folds types from the bottom up. fn fold_ty(cx: ctxt, t0: t, fldop: fn(t) -> t) -> t { - let sty = fold_sty(get(t0).struct) {|t| fold_ty(cx, fldop(t), fldop) }; + let sty = fold_sty(get(t0).struct, |t| fold_ty(cx, fldop(t), fldop)); fldop(mk_t(cx, sty)) } @@ -876,9 +876,9 @@ fn walk_regions_and_ty( if (walkt(ty)) { fold_regions_and_ty( cx, ty, - { |r| walkr(r); r }, - { |t| walkt(t); walk_regions_and_ty(cx, t, walkr, walkt); t }, - { |t| walkt(t); walk_regions_and_ty(cx, t, walkr, walkt); t }); + |r| { walkr(r); r }, + |t| { walkt(t); walk_regions_and_ty(cx, t, walkr, walkt); t }, + |t| { walkt(t); walk_regions_and_ty(cx, t, walkr, walkt); t }); } } @@ -894,9 +894,9 @@ fn fold_regions_and_ty( fldr: fn(r: region) -> region, fldt: fn(t: t) -> t) -> substs { - {self_r: substs.self_r.map { |r| fldr(r) }, - self_ty: substs.self_ty.map { |t| fldt(t) }, - tps: substs.tps.map { |t| fldt(t) }} + {self_r: substs.self_r.map(|r| fldr(r)), + self_ty: substs.self_ty.map(|t| fldt(t)), + tps: substs.tps.map(|t| fldt(t))} } let tb = ty::get(ty); @@ -925,14 +925,10 @@ fn fold_regions_and_ty( ty::mk_iface(cx, def_id, fold_substs(substs, fldr, fldt)) } sty @ ty_fn(_) { - fold_sty_to_ty(cx, sty) {|t| - fldfnt(t) - } + fold_sty_to_ty(cx, sty, |t| fldfnt(t)) } sty { - fold_sty_to_ty(cx, sty) {|t| - fldt(t) - } + fold_sty_to_ty(cx, sty, |t| fldt(t)) } } } @@ -949,9 +945,9 @@ fn fold_regions( if !type_has_regions(ty) { ret ty; } fold_regions_and_ty( cx, ty, - { |r| fldr(r, in_fn) }, - { |t| do_fold(cx, t, true, fldr) }, - { |t| do_fold(cx, t, in_fn, fldr) }) + |r| fldr(r, in_fn), + |t| do_fold(cx, t, true, fldr), + |t| do_fold(cx, t, in_fn, fldr)) } do_fold(cx, ty, false, fldr) } @@ -981,7 +977,7 @@ fn fold_region(cx: ctxt, t0: t, fldop: fn(region, bool) -> region) -> t { t0 } sty { - fold_sty_to_ty(cx, sty) {|t| + do fold_sty_to_ty(cx, sty) |t| { do_fold(cx, t, under_r, fldop) } } @@ -992,13 +988,13 @@ fn fold_region(cx: ctxt, t0: t, fldop: fn(region, bool) -> region) -> t { } // Substitute *only* type parameters. Used in trans where regions are erased. -fn subst_tps(cx: ctxt, tps: [t]/~, typ: t) -> t { +fn subst_tps(cx: ctxt, tps: ~[t], typ: t) -> t { if tps.len() == 0u { ret typ; } let tb = ty::get(typ); if !tbox_has_flag(tb, has_params) { ret typ; } alt tb.struct { ty_param(idx, _) { tps[idx] } - sty { fold_sty_to_ty(cx, sty) {|t| subst_tps(cx, tps, t) } } + sty { fold_sty_to_ty(cx, sty, |t| subst_tps(cx, tps, t)) } } } @@ -1010,9 +1006,9 @@ fn substs_is_noop(substs: substs) -> bool { fn substs_to_str(cx: ctxt, substs: substs) -> str { #fmt["substs(self_r=%s, self_ty=%s, tps=%?)", - substs.self_r.map_default("none", { |r| region_to_str(cx, r) }), - substs.self_ty.map_default("none", { |t| ty_to_str(cx, t) }), - substs.tps.map { |t| ty_to_str(cx, t) }] + substs.self_r.map_default("none", |r| region_to_str(cx, r)), + substs.self_ty.map_default("none", |t| ty_to_str(cx, t)), + substs.tps.map(|t| ty_to_str(cx, t))] } fn subst(cx: ctxt, @@ -1039,14 +1035,12 @@ fn subst(cx: ctxt, _ { fold_regions_and_ty( cx, typ, - { |r| - alt r { - re_bound(br_self) {substs.self_r.get()} - _ {r} - } + |r| alt r { + re_bound(br_self) {substs.self_r.get()} + _ {r} }, - { |t| do_subst(cx, substs, t) }, - { |t| do_subst(cx, substs, t) }) + |t| do_subst(cx, substs, t), + |t| do_subst(cx, substs, t)) } } } @@ -1206,26 +1200,28 @@ fn type_needs_drop(cx: ctxt, ty: t) -> bool { ty_evec(mt, vstore_fixed(_)) { type_needs_drop(cx, mt.ty) } ty_unboxed_vec(mt) { type_needs_drop(cx, mt.ty) } ty_rec(flds) { - for flds.each {|f| if type_needs_drop(cx, f.mt.ty) { accum = true; } } + for flds.each |f| { + if type_needs_drop(cx, f.mt.ty) { accum = true; } + } accum } ty_class(did, substs) { // Any class with a dtor needs a drop option::is_some(ty_dtor(cx, did)) || { - for vec::each(ty::class_items_as_fields(cx, did, substs)) {|f| + for vec::each(ty::class_items_as_fields(cx, did, substs)) |f| { if type_needs_drop(cx, f.mt.ty) { accum = true; } } accum } } ty_tup(elts) { - for elts.each {|m| if type_needs_drop(cx, m) { accum = true; } } + for elts.each |m| { if type_needs_drop(cx, m) { accum = true; } } accum } ty_enum(did, substs) { let variants = enum_variants(cx, did); - for vec::each(*variants) {|variant| - for variant.args.each {|aty| + for vec::each(*variants) |variant| { + for variant.args.each |aty| { // Perform any type parameter substitutions. let arg_ty = subst(cx, substs, aty); if type_needs_drop(cx, arg_ty) { accum = true; } @@ -1276,7 +1272,7 @@ fn type_needs_unwind_cleanup_(cx: ctxt, ty: t, let mut encountered_box = encountered_box; let mut needs_unwind_cleanup = false; - maybe_walk_ty(ty) {|ty| + do maybe_walk_ty(ty) |ty| { let old_encountered_box = encountered_box; let result = alt get(ty).struct { ty_box(_) | ty_opaque_box { @@ -1289,8 +1285,8 @@ fn type_needs_unwind_cleanup_(cx: ctxt, ty: t, true } ty_enum(did, substs) { - for vec::each(*enum_variants(cx, did)) {|v| - for v.args.each {|aty| + for vec::each(*enum_variants(cx, did)) |v| { + for v.args.each |aty| { let t = subst(cx, substs, aty); needs_unwind_cleanup |= type_needs_unwind_cleanup_(cx, t, tycache, @@ -1537,7 +1533,7 @@ fn type_kind(cx: ctxt, ty: t) -> kind { // Records lower to the lowest of their members. ty_rec(flds) { let mut lowest = kind_top(); - for flds.each {|f| + for flds.each |f| { lowest = lower_kind(lowest, mutable_type_kind(cx, f.mt)); } lowest @@ -1548,7 +1544,7 @@ fn type_kind(cx: ctxt, ty: t) -> kind { // also factor out this code, copied from the records case let mut lowest = kind_top(); let flds = class_items_as_fields(cx, did, substs); - for flds.each {|f| + for flds.each |f| { lowest = lower_kind(lowest, mutable_type_kind(cx, f.mt)); } // ...but classes with dtors are never copyable (they can be @@ -1561,7 +1557,7 @@ fn type_kind(cx: ctxt, ty: t) -> kind { // Tuples lower to the lowest of their members. ty_tup(tys) { let mut lowest = kind_top(); - for tys.each {|ty| lowest = lower_kind(lowest, type_kind(cx, ty)); } + for tys.each |ty| { lowest = lower_kind(lowest, type_kind(cx, ty)); } lowest } // Enums lower to the lowest of their variants. @@ -1569,10 +1565,10 @@ fn type_kind(cx: ctxt, ty: t) -> kind { let mut lowest = kind_top(); let variants = enum_variants(cx, did); if vec::len(*variants) == 0u { - lowest = kind_noncopyable(); + lowest = kind_send_only(); } else { - for vec::each(*variants) {|variant| - for variant.args.each {|aty| + for vec::each(*variants) |variant| { + for variant.args.each |aty| { // Perform any type parameter substitutions. let arg_ty = subst(cx, substs, aty); lowest = lower_kind(lowest, type_kind(cx, arg_ty)); @@ -1603,7 +1599,7 @@ fn type_kind(cx: ctxt, ty: t) -> kind { // True if instantiating an instance of `ty` requires an instance of `r_ty`. fn is_instantiable(cx: ctxt, r_ty: t) -> bool { - fn type_requires(cx: ctxt, seen: @mut [def_id]/~, + fn type_requires(cx: ctxt, seen: @mut ~[def_id], r_ty: t, ty: t) -> bool { #debug["type_requires(%s, %s)?", ty_to_str(cx, r_ty), @@ -1621,7 +1617,7 @@ fn is_instantiable(cx: ctxt, r_ty: t) -> bool { ret r; } - fn subtypes_require(cx: ctxt, seen: @mut [def_id]/~, + fn subtypes_require(cx: ctxt, seen: @mut ~[def_id], r_ty: t, ty: t) -> bool { #debug["subtypes_require(%s, %s)?", ty_to_str(cx, r_ty), @@ -1665,7 +1661,7 @@ fn is_instantiable(cx: ctxt, r_ty: t) -> bool { } ty_rec(fields) { - vec::any(fields) {|field| + do vec::any(fields) |field| { type_requires(cx, seen, r_ty, field.mt.ty) } } @@ -1680,16 +1676,14 @@ fn is_instantiable(cx: ctxt, r_ty: t) -> bool { ty_class(did, substs) { vec::push(*seen, did); - let r = vec::any(class_items_as_fields(cx, did, substs)) {|f| - type_requires(cx, seen, r_ty, f.mt.ty)}; + let r = vec::any(class_items_as_fields(cx, did, substs), + |f| type_requires(cx, seen, r_ty, f.mt.ty)); vec::pop(*seen); r } ty_tup(ts) { - vec::any(ts) {|t| - type_requires(cx, seen, r_ty, t) - } + vec::any(ts, |t| type_requires(cx, seen, r_ty, t)) } ty_enum(did, _) if vec::contains(*seen, did) { @@ -1699,12 +1693,12 @@ fn is_instantiable(cx: ctxt, r_ty: t) -> bool { ty_enum(did, substs) { vec::push(*seen, did); let vs = enum_variants(cx, did); - let r = vec::len(*vs) > 0u && vec::all(*vs) {|variant| - vec::any(variant.args) {|aty| + let r = vec::len(*vs) > 0u && vec::all(*vs, |variant| { + vec::any(variant.args, |aty| { let sty = subst(cx, substs, aty); type_requires(cx, seen, r_ty, sty) - } - }; + }) + }); vec::pop(*seen); r } @@ -1718,7 +1712,7 @@ fn is_instantiable(cx: ctxt, r_ty: t) -> bool { ret r; } - let seen = @mut []/~; + let seen = @mut ~[]; !subtypes_require(cx, seen, r_ty, r_ty) } @@ -1729,8 +1723,8 @@ fn type_structurally_contains(cx: ctxt, ty: t, test: fn(sty) -> bool) -> if test(sty) { ret true; } alt sty { ty_enum(did, substs) { - for vec::each(*enum_variants(cx, did)) {|variant| - for variant.args.each {|aty| + for vec::each(*enum_variants(cx, did)) |variant| { + for variant.args.each |aty| { let sty = subst(cx, substs, aty); if type_structurally_contains(cx, sty, test) { ret true; } } @@ -1738,13 +1732,13 @@ fn type_structurally_contains(cx: ctxt, ty: t, test: fn(sty) -> bool) -> ret false; } ty_rec(fields) { - for fields.each {|field| + for fields.each |field| { if type_structurally_contains(cx, field.mt.ty, test) { ret true; } } ret false; } ty_class(did, substs) { - for lookup_class_fields(cx, did).each {|field| + for lookup_class_fields(cx, did).each |field| { let ft = lookup_field_type(cx, did, field.id, substs); if type_structurally_contains(cx, ft, test) { ret true; } } @@ -1752,7 +1746,7 @@ fn type_structurally_contains(cx: ctxt, ty: t, test: fn(sty) -> bool) -> } ty_tup(ts) { - for ts.each {|tt| + for ts.each |tt| { if type_structurally_contains(cx, tt, test) { ret true; } } ret false; @@ -1768,7 +1762,7 @@ fn type_structurally_contains(cx: ctxt, ty: t, test: fn(sty) -> bool) -> // distinguished from the value itself. I.e. types with mut content that's // not shared through a pointer. fn type_allows_implicit_copy(cx: ctxt, ty: t) -> bool { - ret !type_structurally_contains(cx, ty, {|sty| + ret !type_structurally_contains(cx, ty, |sty| { alt sty { ty_param(_, _) { true } @@ -1780,7 +1774,7 @@ fn type_allows_implicit_copy(cx: ctxt, ty: t) -> bool { mt.mutbl != ast::m_imm } ty_rec(fields) { - vec::any(fields, {|f| f.mt.mutbl != ast::m_imm}) + vec::any(fields, |f| f.mt.mutbl != ast::m_imm) } _ { false } } @@ -1788,7 +1782,7 @@ fn type_allows_implicit_copy(cx: ctxt, ty: t) -> bool { } fn type_structurally_contains_uniques(cx: ctxt, ty: t) -> bool { - ret type_structurally_contains(cx, ty, {|sty| + ret type_structurally_contains(cx, ty, |sty| { alt sty { ty_uniq(_) | ty_vec(_) | @@ -1841,7 +1835,7 @@ fn type_is_pod(cx: ctxt, ty: t) -> bool { // Structural types ty_enum(did, substs) { let variants = enum_variants(cx, did); - for vec::each(*variants) {|variant| + for vec::each(*variants) |variant| { let tup_ty = mk_tup(cx, variant.args); // Perform any type parameter substitutions. @@ -1850,12 +1844,12 @@ fn type_is_pod(cx: ctxt, ty: t) -> bool { } } ty_rec(flds) { - for flds.each {|f| + for flds.each |f| { if !type_is_pod(cx, f.mt.ty) { result = false; } } } ty_tup(elts) { - for elts.each {|elt| if !type_is_pod(cx, elt) { result = false; } } + for elts.each |elt| { if !type_is_pod(cx, elt) { result = false; } } } ty_estr(vstore_fixed(_)) { result = true; } ty_evec(mt, vstore_fixed(_)) | ty_unboxed_vec(mt) { @@ -1865,11 +1859,11 @@ fn type_is_pod(cx: ctxt, ty: t) -> bool { ty_param(_, _) { result = false; } ty_opaque_closure_ptr(_) { result = true; } ty_class(did, substs) { - result = vec::any(lookup_class_fields(cx, did)) { |f| + result = vec::any(lookup_class_fields(cx, did), |f| { let fty = ty::lookup_item_type(cx, f.id); let sty = subst(cx, substs, fty.ty); type_is_pod(cx, sty) - }; + }); } ty_estr(vstore_slice(*)) | ty_evec(_, vstore_slice(*)) { @@ -1897,7 +1891,7 @@ fn type_is_c_like_enum(cx: ctxt, ty: t) -> bool { alt get(ty).struct { ty_enum(did, substs) { let variants = enum_variants(cx, did); - let some_n_ary = vec::any(*variants, {|v| vec::len(v.args) > 0u}); + let some_n_ary = vec::any(*variants, |v| vec::len(v.args) > 0u); ret !some_n_ary; } _ { ret false;} @@ -1976,7 +1970,7 @@ fn hash_bound_region(br: bound_region) -> uint { fn br_hashmap<V:copy>() -> hashmap<bound_region, V> { map::hashmap(hash_bound_region, - {|&&a: bound_region, &&b: bound_region| a == b }) + |&&a: bound_region, &&b: bound_region| a == b) } // Type hashing. @@ -1987,15 +1981,15 @@ fn hash_type_structure(st: sty) -> uint { (h << 2u) + (did.node as uint) } fn hash_subty(id: uint, subty: t) -> uint { (id << 2u) + type_id(subty) } - fn hash_subtys(id: uint, subtys: [t]/~) -> uint { + fn hash_subtys(id: uint, subtys: ~[t]) -> uint { let mut h = id; - for subtys.each {|s| h = (h << 2u) + type_id(s) } + for subtys.each |s| { h = (h << 2u) + type_id(s) } h } fn hash_type_constr(id: uint, c: @type_constr) -> uint { let mut h = id; h = (h << 2u) + hash_def(h, c.node.id); - for c.node.args.each {|a| + for c.node.args.each |a| { alt a.node { carg_base { h += h << 2u; } carg_lit(_) { fail "lit args not implemented yet"; } @@ -2048,12 +2042,12 @@ fn hash_type_structure(st: sty) -> uint { ty_tup(ts) { hash_subtys(25u, ts) } ty_rec(fields) { let mut h = 26u; - for fields.each {|f| h = hash_subty(h, f.mt.ty); } + for fields.each |f| { h = hash_subty(h, f.mt.ty); } h } ty_fn(f) { let mut h = 27u; - for f.inputs.each {|a| h = hash_subty(h, a.ty); } + for f.inputs.each |a| { h = hash_subty(h, a.ty); } hash_subty(h, f.output) } ty_self { 28u } @@ -2065,7 +2059,7 @@ fn hash_type_structure(st: sty) -> uint { ty_ptr(mt) { hash_subty(35u, mt.ty) } ty_constr(t, cs) { let mut h = hash_subty(36u, t); - for cs.each {|c| h = (h << 2u) + hash_type_constr(h, c); } + for cs.each |c| { h = (h << 2u) + hash_type_constr(h, c); } h } ty_uniq(mt) { hash_subty(37u, mt.ty) } @@ -2108,10 +2102,10 @@ fn arg_eq<T>(eq: fn(T, T) -> bool, } fn args_eq<T>(eq: fn(T, T) -> bool, - a: [@sp_constr_arg<T>]/~, - b: [@sp_constr_arg<T>]/~) -> bool { + a: ~[@sp_constr_arg<T>], + b: ~[@sp_constr_arg<T>]) -> bool { let mut i: uint = 0u; - for a.each {|arg| + for a.each |arg| { if !arg_eq(eq, arg, b[i]) { ret false; } i += 1u; } @@ -2124,10 +2118,10 @@ fn constr_eq(c: @constr, d: @constr) -> bool { args_eq(eq_int, c.node.args, d.node.args); } -fn constrs_eq(cs: [@constr]/~, ds: [@constr]/~) -> bool { +fn constrs_eq(cs: ~[@constr], ds: ~[@constr]) -> bool { if vec::len(cs) != vec::len(ds) { ret false; } let mut i = 0u; - for cs.each {|c| if !constr_eq(c, ds[i]) { ret false; } i += 1u; } + for cs.each |c| { if !constr_eq(c, ds[i]) { ret false; } i += 1u; } ret true; } @@ -2139,9 +2133,9 @@ fn node_id_to_type(cx: ctxt, id: ast::node_id) -> t { } } -fn node_id_to_type_params(cx: ctxt, id: ast::node_id) -> [t]/~ { +fn node_id_to_type_params(cx: ctxt, id: ast::node_id) -> ~[t] { alt cx.node_type_substs.find(id) { - none { ret []/~; } + none { ret ~[]; } some(ts) { ret ts; } } } @@ -2151,7 +2145,7 @@ fn node_id_has_type_params(cx: ctxt, id: ast::node_id) -> bool { } // Type accessors for substructures of types -fn ty_fn_args(fty: t) -> [arg]/~ { +fn ty_fn_args(fty: t) -> ~[arg] { alt get(fty).struct { ty_fn(f) { f.inputs } _ { fail "ty_fn_args() called on non-fn type"; } @@ -2187,8 +2181,8 @@ fn is_fn_ty(fty: t) -> bool { } // Returns a vec of all the input and output types of fty. -fn tys_in_fn_ty(fty: fn_ty) -> [t]/~ { - vec::append_one(fty.inputs.map({|a| a.ty}), fty.output) +fn tys_in_fn_ty(fty: fn_ty) -> ~[t] { + vec::append_one(fty.inputs.map(|a| a.ty), fty.output) } // Just checks whether it's a fn that returns bool, @@ -2237,7 +2231,7 @@ fn expr_ty(cx: ctxt, expr: @ast::expr) -> t { } fn expr_ty_params_and_ty(cx: ctxt, - expr: @ast::expr) -> {params: [t]/~, ty: t} { + expr: @ast::expr) -> {params: ~[t], ty: t} { ret {params: node_id_to_type_params(cx, expr.id), ty: node_id_to_type(cx, expr.id)}; } @@ -2264,27 +2258,27 @@ fn stmt_node_id(s: @ast::stmt) -> ast::node_id { } } -fn field_idx(id: ast::ident, fields: [field]/~) -> option<uint> { +fn field_idx(id: ast::ident, fields: ~[field]) -> option<uint> { let mut i = 0u; - for fields.each {|f| if f.ident == id { ret some(i); } i += 1u; } + for fields.each |f| { if f.ident == id { ret some(i); } i += 1u; } ret none; } fn get_field(rec_ty: t, id: ast::ident) -> field { - alt check vec::find(get_fields(rec_ty), {|f| str::eq(*f.ident, *id) }) { + alt check vec::find(get_fields(rec_ty), |f| str::eq(*f.ident, *id)) { some(f) { f } } } -fn get_fields(rec_ty:t) -> [field]/~ { +fn get_fields(rec_ty:t) -> ~[field] { alt check get(rec_ty).struct { ty_rec(fields) { fields } } } -fn method_idx(id: ast::ident, meths: [method]/~) -> option<uint> { +fn method_idx(id: ast::ident, meths: ~[method]) -> option<uint> { let mut i = 0u; - for meths.each {|m| if m.ident == id { ret some(i); } i += 1u; } + for meths.each |m| { if m.ident == id { ret some(i); } i += 1u; } ret none; } @@ -2292,9 +2286,9 @@ fn occurs_check(tcx: ctxt, sp: span, vid: tv_vid, rt: t) { // Returns a vec of all the type variables occurring in `ty`. It may // contain duplicates. (Integral type vars aren't counted.) - fn vars_in_type(ty: t) -> [tv_vid]/~ { - let mut rslt = []/~; - walk_ty(ty) {|ty| + fn vars_in_type(ty: t) -> ~[tv_vid] { + let mut rslt = ~[]; + do walk_ty(ty) |ty| { alt get(ty).struct { ty_var(v) { vec::push(rslt, v); } _ { } } } rslt @@ -2516,11 +2510,11 @@ fn def_has_ty_params(def: ast::def) -> bool { } } -fn store_iface_methods(cx: ctxt, id: ast::node_id, ms: @[method]/~) { +fn store_iface_methods(cx: ctxt, id: ast::node_id, ms: @~[method]) { cx.iface_method_cache.insert(ast_util::local_def(id), ms); } -fn iface_methods(cx: ctxt, id: ast::def_id) -> @[method]/~ { +fn iface_methods(cx: ctxt, id: ast::def_id) -> @~[method] { alt cx.iface_method_cache.find(id) { some(ms) { ret ms; } _ {} @@ -2568,16 +2562,15 @@ fn ty_to_def_id(ty: t) -> option<ast::def_id> { } // Enum information -type variant_info = @{args: [t]/~, ctor_ty: t, name: ast::ident, +type variant_info = @{args: ~[t], ctor_ty: t, name: ast::ident, id: ast::def_id, disr_val: int}; fn substd_enum_variants(cx: ctxt, id: ast::def_id, - substs: substs) -> [variant_info]/~ { - vec::map(*enum_variants(cx, id)) { |variant_info| - let substd_args = vec::map(variant_info.args) {|aty| - subst(cx, substs, aty) - }; + substs: substs) -> ~[variant_info] { + do vec::map(*enum_variants(cx, id)) |variant_info| { + let substd_args = vec::map(variant_info.args, + |aty| subst(cx, substs, aty)); let substd_ctor_ty = subst(cx, substs, variant_info.ctor_ty); @@ -2661,7 +2654,7 @@ fn enum_is_univariant(cx: ctxt, id: ast::def_id) -> bool { vec::len(*enum_variants(cx, id)) == 1u } -fn enum_variants(cx: ctxt, id: ast::def_id) -> @[variant_info]/~ { +fn enum_variants(cx: ctxt, id: ast::def_id) -> @~[variant_info] { alt cx.enum_var_cache.find(id) { some(variants) { ret variants; } _ { /* fallthrough */ } @@ -2678,12 +2671,12 @@ fn enum_variants(cx: ctxt, id: ast::def_id) -> @[variant_info]/~ { alt cx.items.get(id.node) { ast_map::node_item(@{node: ast::item_enum(variants, _, _), _}, _) { let mut disr_val = -1; - @vec::map(variants, {|variant| + @vec::map(variants, |variant| { let ctor_ty = node_id_to_type(cx, variant.node.id); let arg_tys = { if vec::len(variant.node.args) > 0u { - ty_fn_args(ctor_ty).map { |a| a.ty } - } else { []/~ } + ty_fn_args(ctor_ty).map(|a| a.ty) + } else { ~[] } }; alt variant.node.disr_expr { some (ex) { @@ -2767,7 +2760,7 @@ fn lookup_field_type(tcx: ctxt, class_id: def_id, id: def_id, // Look up the list of field names and IDs for a given class // Fails if the id is not bound to a class. -fn lookup_class_fields(cx: ctxt, did: ast::def_id) -> [field_ty]/~ { +fn lookup_class_fields(cx: ctxt, did: ast::def_id) -> ~[field_ty] { if did.crate == ast::local_crate { alt cx.items.find(did.node) { some(ast_map::node_item(i,_)) { @@ -2791,14 +2784,14 @@ fn lookup_class_fields(cx: ctxt, did: ast::def_id) -> [field_ty]/~ { fn lookup_class_field(cx: ctxt, parent: ast::def_id, field_id: ast::def_id) -> field_ty { - alt vec::find(lookup_class_fields(cx, parent)) - {|f| f.id.node == field_id.node} { + alt vec::find(lookup_class_fields(cx, parent), + |f| f.id.node == field_id.node) { some(t) { t } none { cx.sess.bug("class ID not found in parent's fields"); } } } -fn lookup_public_fields(cx: ctxt, did: ast::def_id) -> [field_ty]/~ { +fn lookup_public_fields(cx: ctxt, did: ast::def_id) -> ~[field_ty] { vec::filter(lookup_class_fields(cx, did), is_public) } @@ -2812,12 +2805,12 @@ pure fn is_public(f: field_ty) -> bool { // Look up the list of method names and IDs for a given class // Fails if the id is not bound to a class. fn lookup_class_method_ids(cx: ctxt, did: ast::def_id) - : is_local(did) -> [{name: ident, id: node_id, vis: visibility}]/~ { + : is_local(did) -> ~[{name: ident, id: node_id, vis: visibility}] { alt cx.items.find(did.node) { some(ast_map::node_item(@{node: item_class(_,_,items,_,_,_), _}, _)) { let (_,ms) = split_class_items(items); - vec::map(ms, {|m| {name: m.ident, id: m.id, - vis: m.vis}}) + vec::map(ms, |m| {name: m.ident, id: m.id, + vis: m.vis}) } _ { cx.sess.bug("lookup_class_method_ids: id not bound to a class"); @@ -2834,7 +2827,7 @@ fn lookup_class_method_by_name(cx:ctxt, did: ast::def_id, name: ident, sp: span) -> def_id { if check is_local(did) { let ms = lookup_class_method_ids(cx, did); - for ms.each {|m| + for ms.each |m| { if m.name == name { ret ast_util::local_def(m.id); } @@ -2847,9 +2840,9 @@ fn lookup_class_method_by_name(cx:ctxt, did: ast::def_id, name: ident, } } -fn class_field_tys(items: [@class_member]/~) -> [field_ty]/~ { - let mut rslt = []/~; - for items.each {|it| +fn class_field_tys(items: ~[@class_member]) -> ~[field_ty] { + let mut rslt = ~[]; + for items.each |it| { alt it.node { instance_var(nm, _, cm, id, vis) { vec::push(rslt, {ident: nm, id: ast_util::local_def(id), @@ -2868,25 +2861,25 @@ fn class_field_tys(items: [@class_member]/~) -> [field_ty]/~ { // mutable, regardless of how they were declared. It's meant to // be used in trans. fn class_items_as_mutable_fields(cx:ctxt, did: ast::def_id, - substs: substs) -> [field]/~ { - class_item_fields(cx, did, substs, {|_mt| m_mutbl}) + substs: substs) -> ~[field] { + class_item_fields(cx, did, substs, |_mt| m_mutbl) } // Same as class_items_as_mutable_fields, but doesn't change // mutability. fn class_items_as_fields(cx:ctxt, did: ast::def_id, - substs: substs) -> [field]/~ { - class_item_fields(cx, did, substs, {|mt| alt mt { + substs: substs) -> ~[field] { + class_item_fields(cx, did, substs, |mt| alt mt { class_mutable { m_mutbl } - class_immutable { m_imm }}}) + class_immutable { m_imm }}) } fn class_item_fields(cx:ctxt, did: ast::def_id, substs: substs, frob_mutability: fn(class_mutability) -> mutability) - -> [field]/~ { - let mut rslt = []/~; - for lookup_class_fields(cx, did).each {|f| + -> ~[field] { + let mut rslt = ~[]; + for lookup_class_fields(cx, did).each |f| { // consider all instance vars mut, because the // constructor may mutate all vars vec::push(rslt, {ident: f.ident, mt: @@ -2954,18 +2947,18 @@ fn is_binopable(_cx: ctxt, ty: t, op: ast::binop) -> bool { const t: bool = true; const f: bool = false; - let tbl = [ + let tbl = ~[ /*. add, shift, bit . sub, rel, logic . mult, eq, */ - /*other*/ [f, f, f, f, t, t, f, f]/~, - /*bool*/ [f, f, f, f, t, t, t, t]/~, - /*int*/ [t, t, t, t, t, t, t, f]/~, - /*float*/ [t, t, t, f, t, t, f, f]/~, - /*str*/ [t, f, f, f, t, t, f, f]/~, - /*vec*/ [t, f, f, f, t, t, f, f]/~, - /*bot*/ [f, f, f, f, t, t, f, f]/~, - /*struct*/ [t, t, t, t, t, t, t, t]/~]/~; + /*other*/ ~[f, f, f, f, t, t, f, f], + /*bool*/ ~[f, f, f, f, t, t, t, t], + /*int*/ ~[t, t, t, t, t, t, t, f], + /*float*/ ~[t, t, t, f, t, t, f, f], + /*str*/ ~[t, f, f, f, t, t, f, f], + /*vec*/ ~[t, f, f, f, t, t, f, f], + /*bot*/ ~[f, f, f, f, t, t, f, f], + /*struct*/ ~[t, t, t, t, t, t, t, t]]; ret tbl[tycat(ty)][opcat(op)]; } @@ -2988,8 +2981,8 @@ fn ast_constr_to_constr<T>(tcx: ctxt, c: @ast::constr_general<T>) -> } } -fn ty_params_to_tys(tcx: ty::ctxt, tps: [ast::ty_param]/~) -> [t]/~ { - vec::from_fn(tps.len(), {|i| +fn ty_params_to_tys(tcx: ty::ctxt, tps: ~[ast::ty_param]) -> ~[t] { + vec::from_fn(tps.len(), |i| { ty::mk_param(tcx, i, ast_util::local_def(tps[i].id)) }) } @@ -3029,7 +3022,7 @@ fn normalize_ty(cx: ctxt, t: t) -> t { // types, which isn't necessary after #2187 let t = mk_t(cx, mach_sty(cx.sess.targ_cfg, t)); - let sty = fold_sty(get(t).struct) {|t| normalize_ty(cx, t) }; + let sty = fold_sty(get(t).struct, |t| { normalize_ty(cx, t) }); let t_norm = mk_t(cx, sty); cx.normalized_cache.insert(t, t_norm); ret t_norm; diff --git a/src/rustc/middle/typeck.rs b/src/rustc/middle/typeck.rs index 95f4e6c0c95..6dbce3b62bb 100644 --- a/src/rustc/middle/typeck.rs +++ b/src/rustc/middle/typeck.rs @@ -120,7 +120,7 @@ type method_map_entry = { type method_map = hashmap<ast::node_id, method_map_entry>; // Resolutions for bounds of all parameters, left to right, for a given path. -type vtable_res = @[vtable_origin]/~; +type vtable_res = @~[vtable_origin]; enum vtable_origin { /* @@ -128,7 +128,7 @@ enum vtable_origin { from whence comes the vtable, and tys are the type substs. vtable_res is the vtable itself */ - vtable_static(ast::def_id, [ty::t]/~, vtable_res), + vtable_static(ast::def_id, ~[ty::t], vtable_res), /* Dynamic vtable, comes from a parameter that has a bound on it: fn foo<T: quux, baz, bar>(a: T) -- a's vtable would have a @@ -142,7 +142,7 @@ enum vtable_origin { Dynamic vtable, comes from something known to have an interface type. def_id refers to the iface item, tys are the substs */ - vtable_iface(ast::def_id, [ty::t]/~), + vtable_iface(ast::def_id, ~[ty::t]), } type vtable_map = hashmap<ast::node_id, vtable_res>; @@ -163,7 +163,7 @@ fn write_ty_to_tcx(tcx: ty::ctxt, node_id: ast::node_id, ty: ty::t) { } fn write_substs_to_tcx(tcx: ty::ctxt, node_id: ast::node_id, - +substs: [ty::t]/~) { + +substs: ~[ty::t]) { if substs.len() > 0u { tcx.node_type_substs.insert(node_id, substs); } @@ -183,7 +183,7 @@ fn lookup_def_ccx(ccx: @crate_ctxt, sp: span, id: ast::node_id) -> ast::def { } fn no_params(t: ty::t) -> ty::ty_param_bounds_and_ty { - {bounds: @[]/~, rp: ast::rp_none, ty: t} + {bounds: @~[], rp: ast::rp_none, ty: t} } fn require_same_types( @@ -259,7 +259,7 @@ fn check_main_fn_ty(ccx: @crate_ctxt, if !ok { tcx.sess.span_err(main_span, #fmt("Wrong type in main function: found `%s`, \ - expecting `native fn([str]/~) -> ()` \ + expecting `native fn(~[str]) -> ()` \ or `native fn() -> ()`", ty_to_str(tcx, main_t))); } diff --git a/src/rustc/middle/typeck/astconv.rs b/src/rustc/middle/typeck/astconv.rs index eeb4d035cab..a6f1501efe7 100644 --- a/src/rustc/middle/typeck/astconv.rs +++ b/src/rustc/middle/typeck/astconv.rs @@ -121,7 +121,7 @@ fn ast_path_to_substs_and_ty<AC: ast_conv, RS: region_scope copy>( #fmt["wrong number of type arguments, expected %u but found %u", (*decl_bounds).len(), path.types.len()]); } - let tps = path.types.map { |a_t| ast_ty_to_ty(self, rscope, a_t) }; + let tps = path.types.map(|a_t| ast_ty_to_ty(self, rscope, a_t)); let substs = {self_r:self_r, self_ty:none, tps:tps}; {substs: substs, ty: ty::subst(tcx, substs, decl_ty)} @@ -170,7 +170,7 @@ fn ast_ty_to_ty<AC: ast_conv, RS: region_scope copy>( ret ty::mk_evec(tcx, mt, vst); } - // HACK: if we get a []/~, we assume that it was actually a + // HACK: if we get a ~[], we assume that it was actually a // [] that got written down, and we throw away the /~... ty::ty_evec(mt, vstore_uniq) { ret ty::mk_evec(tcx, mt, vst); @@ -244,11 +244,11 @@ fn ast_ty_to_ty<AC: ast_conv, RS: region_scope copy>( ty::mk_rptr(tcx, r, mt) } ast::ty_tup(fields) { - let flds = vec::map(fields) { |t| ast_ty_to_ty(self, rscope, t) }; + let flds = vec::map(fields, |t| ast_ty_to_ty(self, rscope, t)); ty::mk_tup(tcx, flds) } ast::ty_rec(fields) { - let flds = fields.map {|f| + let flds = do fields.map |f| { let tm = ast_mt_to_mt(self, rscope, f.node.mt); {ident: f.node.ident, mt: tm} }; @@ -337,8 +337,8 @@ fn ast_ty_to_ty<AC: ast_conv, RS: region_scope copy>( "implied fixed length for bound"); } ast::ty_constr(t, cs) { - let mut out_cs = []/~; - for cs.each {|constr| + let mut out_cs = ~[]; + for cs.each |constr| { vec::push(out_cs, ty::ast_constr_to_constr(tcx, constr)); } ty::mk_constr(tcx, ast_ty_to_ty(self, rscope, t), out_cs) @@ -402,7 +402,7 @@ fn ty_of_arg<AC: ast_conv, RS: region_scope copy>( {mode: mode, ty: ty} } -type expected_tys = option<{inputs: [ty::arg]/~, +type expected_tys = option<{inputs: ~[ty::arg], output: ty::t}>; fn ty_of_fn_decl<AC: ast_conv, RS: region_scope copy>( @@ -412,13 +412,13 @@ fn ty_of_fn_decl<AC: ast_conv, RS: region_scope copy>( expected_tys: expected_tys) -> ty::fn_ty { #debug["ty_of_fn_decl"]; - indent {|| + do indent || { // new region names that appear inside of the fn decl are bound to // that function type let rb = in_binding_rscope(rscope); - let input_tys = decl.inputs.mapi { |i, a| - let expected_arg_ty = expected_tys.chain { |e| + let input_tys = do decl.inputs.mapi |i, a| { + let expected_arg_ty = do expected_tys.chain |e| { // no guarantee that the correct number of expected args // were supplied if i < e.inputs.len() {some(e.inputs[i])} else {none} @@ -426,16 +426,16 @@ fn ty_of_fn_decl<AC: ast_conv, RS: region_scope copy>( ty_of_arg(self, rb, a, expected_arg_ty) }; - let expected_ret_ty = expected_tys.map { |e| e.output }; + let expected_ret_ty = expected_tys.map(|e| e.output); let output_ty = alt decl.output.node { ast::ty_infer if expected_ret_ty.is_some() {expected_ret_ty.get()} ast::ty_infer {self.ty_infer(decl.output.span)} _ {ast_ty_to_ty(self, rb, decl.output)} }; - let out_constrs = vec::map(decl.constraints) {|constr| + let out_constrs = vec::map(decl.constraints, |constr| { ty::ast_constr_to_constr(self.tcx(), constr) - }; + }); {purity: decl.purity, proto: proto, inputs: input_tys, output: output_ty, ret_style: decl.cf, constraints: out_constrs} diff --git a/src/rustc/middle/typeck/check.rs b/src/rustc/middle/typeck/check.rs index bb19f7df7ca..aac959b4cb4 100644 --- a/src/rustc/middle/typeck/check.rs +++ b/src/rustc/middle/typeck/check.rs @@ -89,7 +89,7 @@ type fn_ctxt = infcx: infer::infer_ctxt, locals: hashmap<ast::node_id, tv_vid>, - mut blocks: [ast::node_id]/~, // stack of blocks in scope, may be empty + mut blocks: ~[ast::node_id], // stack of blocks in scope, may be empty in_scope_regions: isr_alist, node_types: smallintmap::smallintmap<ty::t>, @@ -107,7 +107,7 @@ fn blank_fn_ctxt(ccx: @crate_ctxt, rty: ty::t) -> @fn_ctxt { purity: ast::pure_fn, infcx: infer::new_infer_ctxt(ccx.tcx), locals: int_hash(), - mut blocks: []/~, + mut blocks: ~[], in_scope_regions: @nil, node_types: smallintmap::mk(), node_type_substs: map::int_hash(), @@ -124,7 +124,7 @@ impl methods for isr_alist { } fn find(br: ty::bound_region) -> option<ty::region> { - for list::each(self) { |isr| + for list::each(self) |isr| { let (isr_br, isr_r) = isr; if isr_br == br { ret some(isr_r); } } @@ -134,7 +134,7 @@ impl methods for isr_alist { fn check_item_types(ccx: @crate_ctxt, crate: @ast::crate) { let visit = visit::mk_simple_visitor(@{ - visit_item: {|a|check_item(ccx, a)} + visit_item: |a| check_item(ccx, a) with *visit::default_simple_visitor() }); visit::visit_crate(*crate, (), visit); @@ -167,18 +167,18 @@ fn check_fn(ccx: @crate_ctxt, let {isr, self_ty, fn_ty} = { let old_isr = option::map_default(old_fcx, @nil, - { |fcx| fcx.in_scope_regions }); + |fcx| fcx.in_scope_regions); replace_bound_regions_in_fn_ty(tcx, old_isr, self_ty, fn_ty, - { |br| ty::re_free(body.node.id, br) }) + |br| ty::re_free(body.node.id, br)) }; - let arg_tys = fn_ty.inputs.map { |a| a.ty }; + let arg_tys = fn_ty.inputs.map(|a| a.ty); let ret_ty = fn_ty.output; #debug["check_fn(arg_tys=%?, ret_ty=%?, self_ty=%?)", - arg_tys.map {|a| ty_to_str(tcx, a) }, + arg_tys.map(|a| ty_to_str(tcx, a)), ty_to_str(tcx, ret_ty), - option::map(self_ty) {|st| ty_to_str(tcx, st) }]; + option::map(self_ty, |st| ty_to_str(tcx, st))]; // ______________________________________________________________________ // Create the function context. This is either derived from scratch or, @@ -217,7 +217,7 @@ fn check_fn(ccx: @crate_ctxt, purity: purity, infcx: infcx, locals: locals, - mut blocks: []/~, + mut blocks: ~[], in_scope_regions: isr, node_types: node_types, node_type_substs: node_type_substs, @@ -239,7 +239,7 @@ fn check_fn(ccx: @crate_ctxt, } let mut i = 0u; - vec::iter(arg_tys) {|arg| + do vec::iter(arg_tys) |arg| { fcx.write_ty(decl.inputs[i].id, arg); i += 1u; } @@ -257,7 +257,7 @@ fn check_fn(ccx: @crate_ctxt, fn gather_locals(fcx: @fn_ctxt, decl: ast::fn_decl, body: ast::blk, - arg_tys: [ty::t]/~) { + arg_tys: ~[ty::t]) { let tcx = fcx.ccx.tcx; let assign = fn@(nid: ast::node_id, ty_opt: option<ty::t>) { @@ -272,7 +272,7 @@ fn check_fn(ccx: @crate_ctxt, }; // Add formal parameters. - vec::iter2(arg_tys, decl.inputs) {|arg_ty, input| + do vec::iter2(arg_tys, decl.inputs) |arg_ty, input| { assign(input.id, some(arg_ty)); #debug["Argument %s is assigned to %s", *input.ident, fcx.locals.get(input.id).to_str()]; @@ -356,7 +356,7 @@ fn check_item(ccx: @crate_ctxt, it: @ast::item) { } ast::item_impl(tps, rp, _, ty, ms) { let self_ty = ccx.to_ty(rscope::type_rscope(rp), ty); - for ms.each {|m| check_method(ccx, m, self_ty);} + for ms.each |m| { check_method(ccx, m, self_ty);} } ast::item_class(tps, ifaces, members, ctor, m_dtor, rp) { let tcx = ccx.tcx; @@ -368,7 +368,7 @@ fn check_item(ccx: @crate_ctxt, it: @ast::item) { // Write the ctor's self's type write_ty_to_tcx(tcx, ctor.node.self_id, class_t); - option::iter(m_dtor) {|dtor| + do option::iter(m_dtor) |dtor| { // typecheck the dtor check_bare_fn(ccx, ast_util::dtor_dec(), dtor.node.body, dtor.node.id, @@ -377,7 +377,7 @@ fn check_item(ccx: @crate_ctxt, it: @ast::item) { write_ty_to_tcx(tcx, dtor.node.self_id, class_t); }; // typecheck the members - for members.each {|m| check_class_member(ccx, class_t, m); } + for members.each |m| { check_class_member(ccx, class_t, m); } // Check that there's at least one field let (fields,_) = split_class_items(members); if fields.len() < 1u { @@ -394,11 +394,11 @@ fn check_item(ccx: @crate_ctxt, it: @ast::item) { ast::item_foreign_mod(m) { if syntax::attr::foreign_abi(it.attrs) == either::right(ast::foreign_abi_rust_intrinsic) { - for m.items.each { |item| + for m.items.each |item| { check_intrinsic_type(ccx, item); } } else { - for m.items.each { |item| + for m.items.each |item| { let tpt = ty::lookup_item_type(ccx.tcx, local_def(item.id)); if (*tpt.bounds).is_not_empty() { ccx.tcx.sess.span_err( @@ -430,7 +430,7 @@ impl of region_scope for @fn_ctxt { result::ok(self.infcx.next_region_var()) } fn named_region(id: ast::ident) -> result<ty::region, str> { - empty_rscope.named_region(id).chain_err { |_e| + do empty_rscope.named_region(id).chain_err |_e| { alt self.in_scope_regions.find(ty::br_named(id)) { some(r) { result::ok(r) } none if *id == "blk" { self.block_region() } @@ -565,7 +565,7 @@ impl methods for @fn_ctxt { fn do_autoderef(fcx: @fn_ctxt, sp: span, t: ty::t) -> ty::t { let mut t1 = t; - let mut enum_dids = []/~; + let mut enum_dids = ~[]; loop { let sty = structure_of(fcx, sp, t1); @@ -632,15 +632,15 @@ fn check_expr_with(fcx: @fn_ctxt, expr: @ast::expr, expected: ty::t) -> bool { fn check_expr(fcx: @fn_ctxt, expr: @ast::expr, expected: option<ty::t>) -> bool { - ret check_expr_with_unifier(fcx, expr, expected) {|| - for expected.each {|t| + ret do check_expr_with_unifier(fcx, expr, expected) || { + for expected.each |t| { demand::suptype(fcx, expr.span, t, fcx.expr_ty(expr)); } }; } // determine the `self` type, using fresh variables for all variables -// declared on the impl declaration e.g., `impl<A,B> for [(A,B)]/~` +// declared on the impl declaration e.g., `impl<A,B> for ~[(A,B)]` // would return ($0, $1) where $0 and $1 are freshly instantiated type // variables. fn impl_self_ty(fcx: @fn_ctxt, did: ast::def_id) -> ty_param_substs_and_ty { @@ -693,11 +693,11 @@ fn impl_self_ty(fcx: @fn_ctxt, did: ast::def_id) -> ty_param_substs_and_ty { // Only for fields! Returns <none> for methods> // Indifferent to privacy flags fn lookup_field_ty(tcx: ty::ctxt, class_id: ast::def_id, - items:[ty::field_ty]/~, fieldname: ast::ident, + items:~[ty::field_ty], fieldname: ast::ident, substs: ty::substs) -> option<ty::t> { - let o_field = vec::find(items, {|f| f.ident == fieldname}); - option::map(o_field) {|f| + let o_field = vec::find(items, |f| f.ident == fieldname); + do option::map(o_field) |f| { ty::lookup_field_type(tcx, class_id, f.id, substs) } } @@ -714,7 +714,7 @@ fn check_expr_with_unifier(fcx: @fn_ctxt, // expressions. fn check_call_or_bind( fcx: @fn_ctxt, sp: span, call_expr_id: ast::node_id, in_fty: ty::t, - args: [option<@ast::expr>]/~) -> {fty: ty::t, bot: bool} { + args: ~[option<@ast::expr>]) -> {fty: ty::t, bot: bool} { let mut bot = false; @@ -733,7 +733,7 @@ fn check_expr_with_unifier(fcx: @fn_ctxt, sty @ ty::ty_fn(fn_ty) { replace_bound_regions_in_fn_ty( fcx.ccx.tcx, @nil, none, fn_ty, - { |_br| fcx.infcx.next_region_var() }).fn_ty + |_br| fcx.infcx.next_region_var()).fn_ty } sty { // I would like to make this span_err, but it's @@ -756,7 +756,7 @@ fn check_expr_with_unifier(fcx: @fn_ctxt, // if the wrong number of arguments were supplied let expected_arg_count = vec::len(fn_ty.inputs); let arg_tys = if expected_arg_count == supplied_arg_count { - fn_ty.inputs.map { |a| a.ty } + fn_ty.inputs.map(|a| a.ty) } else { fcx.ccx.tcx.sess.span_err( sp, #fmt["this function takes %u parameter%s but %u \ @@ -781,8 +781,8 @@ fn check_expr_with_unifier(fcx: @fn_ctxt, // functions. This is so that we have more information about the types // of arguments when we typecheck the functions. This isn't really the // right way to do this. - for [false, true]/~.each { |check_blocks| - for args.eachi {|i, a_opt| + for [false, true]/_.each |check_blocks| { + for args.eachi |i, a_opt| { alt a_opt { some(a) { let is_block = alt a.node { @@ -792,10 +792,10 @@ fn check_expr_with_unifier(fcx: @fn_ctxt, if is_block == check_blocks { let arg_ty = arg_tys[i]; bot |= check_expr_with_unifier( - fcx, a, some(arg_ty)) {|| - demand::assign(fcx, a.span, call_expr_id, - arg_ty, a); - }; + fcx, a, some(arg_ty), + || demand::assign(fcx, a.span, call_expr_id, + arg_ty, a) + ); } } none { } @@ -817,14 +817,14 @@ fn check_expr_with_unifier(fcx: @fn_ctxt, // A generic function for doing all of the checking for call expressions fn check_call(fcx: @fn_ctxt, sp: span, call_expr_id: ast::node_id, - f: @ast::expr, args: [@ast::expr]/~) -> bool { + f: @ast::expr, args: ~[@ast::expr]) -> bool { let mut bot = check_expr(fcx, f, none); let fn_ty = fcx.expr_ty(f); // Call the generic checker. let fty = { - let args_opt = args.map { |arg| some(arg) }; + let args_opt = args.map(|arg| some(arg)); let r = check_call_or_bind(fcx, sp, call_expr_id, fn_ty, args_opt); bot |= r.bot; @@ -890,11 +890,11 @@ fn check_expr_with_unifier(fcx: @fn_ctxt, } fn lookup_op_method(fcx: @fn_ctxt, op_ex: @ast::expr, self_ex: @ast::expr, self_t: ty::t, - opname: str, args: [option<@ast::expr>]/~) + opname: str, args: ~[option<@ast::expr>]) -> option<(ty::t, bool)> { let callee_id = ast_util::op_expr_callee_id(op_ex); let lkup = method::lookup(fcx, op_ex, self_ex, op_ex.id, - callee_id, @opname, self_t, []/~, false); + callee_id, @opname, self_t, ~[], false); alt lkup.method() { some(origin) { let {fty: method_ty, bot: bot} = { @@ -965,7 +965,7 @@ fn check_expr_with_unifier(fcx: @fn_ctxt, some(name) { alt lookup_op_method(fcx, ex, lhs_expr, lhs_resolved_t, - name, [some(rhs)]/~) { + name, ~[some(rhs)]) { some(pair) { ret pair; } _ {} } @@ -983,7 +983,7 @@ fn check_expr_with_unifier(fcx: @fn_ctxt, fn check_user_unop(fcx: @fn_ctxt, op_str: str, mname: str, ex: @ast::expr, rhs_expr: @ast::expr, rhs_t: ty::t) -> ty::t { - alt lookup_op_method(fcx, ex, rhs_expr, rhs_t, mname, []/~) { + alt lookup_op_method(fcx, ex, rhs_expr, rhs_t, mname, ~[]) { some((ret_ty, _)) { ret_ty } _ { fcx.ccx.tcx.sess.span_err( @@ -1021,7 +1021,7 @@ fn check_expr_with_unifier(fcx: @fn_ctxt, expected: option<ty::t>) { let tcx = fcx.ccx.tcx; - let expected_tys = unpack_expected(fcx, expected) { |sty| + let expected_tys = do unpack_expected(fcx, expected) |sty| { alt sty { ty::ty_fn(fn_ty) {some({inputs:fn_ty.inputs, output:fn_ty.output})} @@ -1057,7 +1057,7 @@ fn check_expr_with_unifier(fcx: @fn_ctxt, ast::expr_vec(args, mutbl) { let tt = ast_expr_vstore_to_vstore(fcx, ev, vec::len(args), vst); let t: ty::t = fcx.infcx.next_ty_var(); - for args.each {|e| bot |= check_expr_with(fcx, e, t); } + for args.each |e| { bot |= check_expr_with(fcx, e, t); } ty::mk_evec(tcx, {ty: t, mutbl: mutbl}, tt) } _ { @@ -1106,7 +1106,7 @@ fn check_expr_with_unifier(fcx: @fn_ctxt, fcx.write_nil(expr.id); } ast::expr_unary(unop, oprnd) { - let exp_inner = unpack_expected(fcx, expected) {|sty| + let exp_inner = do unpack_expected(fcx, expected) |sty| { alt unop { ast::box(_) | ast::uniq(_) { alt sty { @@ -1182,9 +1182,9 @@ fn check_expr_with_unifier(fcx: @fn_ctxt, fcx.write_ty(id, oprnd_t); } ast::expr_addr_of(mutbl, oprnd) { - bot = check_expr(fcx, oprnd, unpack_expected(fcx, expected) {|ty| + bot = check_expr(fcx, oprnd, unpack_expected(fcx, expected, |ty| alt ty { ty::ty_rptr(_, mt) { some(mt.ty) } _ { none } } - }); + )); let region = region_of(fcx, oprnd); let tm = { ty: fcx.expr_ty(oprnd), mutbl: mutbl }; let oprnd_t = ty::mk_rptr(tcx, region, tm); @@ -1280,9 +1280,9 @@ fn check_expr_with_unifier(fcx: @fn_ctxt, } ast::expr_fn_block(decl, body, cap_clause) { // Take the prototype from the expected type, but default to block: - let proto = unpack_expected(fcx, expected, {|sty| + let proto = unpack_expected(fcx, expected, |sty| alt sty { ty::ty_fn({proto, _}) { some(proto) } _ { none } } - }).get_default(ast::proto_box); + ).get_default(ast::proto_box); check_expr_fn(fcx, expr, proto, decl, body, false, expected); capture::check_capture_clause(tcx, expr.id, cap_clause); } @@ -1293,7 +1293,7 @@ fn check_expr_with_unifier(fcx: @fn_ctxt, // parameter. The catch here is that we need to validate two things: // 1. a closure that returns a bool is expected // 2. the cloure that was given returns unit - let expected_sty = unpack_expected(fcx, expected, {|x|some(x)}).get(); + let expected_sty = unpack_expected(fcx, expected, |x| some(x)).get(); let (inner_ty, proto) = alt expected_sty { ty::ty_fn(fty) { alt infer::mk_subty(fcx.infcx, fty.output, ty::mk_bool(tcx)) { @@ -1330,7 +1330,7 @@ fn check_expr_with_unifier(fcx: @fn_ctxt, } } ast::expr_do_body(b) { - let expected_sty = unpack_expected(fcx, expected, {|x|some(x)}).get(); + let expected_sty = unpack_expected(fcx, expected, |x| some(x)).get(); let (inner_ty, proto) = alt expected_sty { ty::ty_fn(fty) { (ty::mk_fn(tcx, fty), fty.proto) @@ -1411,18 +1411,18 @@ fn check_expr_with_unifier(fcx: @fn_ctxt, } ast::expr_vec(args, mutbl) { let t: ty::t = fcx.infcx.next_ty_var(); - for args.each {|e| bot |= check_expr_with(fcx, e, t); } + for args.each |e| { bot |= check_expr_with(fcx, e, t); } let typ = ty::mk_vec(tcx, {ty: t, mutbl: mutbl}); fcx.write_ty(id, typ); } ast::expr_tup(elts) { - let mut elt_ts = []/~; + let mut elt_ts = ~[]; vec::reserve(elt_ts, vec::len(elts)); - let flds = unpack_expected(fcx, expected) {|sty| + let flds = unpack_expected(fcx, expected, |sty| { alt sty { ty::ty_tup(flds) { some(flds) } _ { none } } - }; - for elts.eachi {|i, e| - check_expr(fcx, e, flds.map {|fs| fs[i]}); + }); + for elts.eachi |i, e| { + check_expr(fcx, e, flds.map(|fs| fs[i])); let ety = fcx.expr_ty(e); vec::push(elt_ts, ety); } @@ -1430,17 +1430,17 @@ fn check_expr_with_unifier(fcx: @fn_ctxt, fcx.write_ty(id, typ); } ast::expr_rec(fields, base) { - option::iter(base) {|b| check_expr(fcx, b, expected); } + option::iter(base, |b| { check_expr(fcx, b, expected); }); let expected = if expected == none && base != none { some(fcx.expr_ty(base.get())) } else { expected }; - let flds = unpack_expected(fcx, expected) {|sty| + let flds = unpack_expected(fcx, expected, |sty| alt sty { ty::ty_rec(flds) { some(flds) } _ { none } } - }; - let fields_t = vec::map(fields, {|f| - bot |= check_expr(fcx, f.node.expr, flds.chain {|flds| - vec::find(flds) {|tf| tf.ident == f.node.ident} - }.map {|tf| tf.mt.ty}); + ); + let fields_t = vec::map(fields, |f| { + bot |= check_expr(fcx, f.node.expr, flds.chain(|flds| + vec::find(flds, |tf| tf.ident == f.node.ident) + ).map(|tf| tf.mt.ty)); let expr_t = fcx.expr_ty(f.node.expr); let expr_mt = {ty: expr_t, mutbl: f.node.mutbl}; // for the most precise error message, @@ -1463,9 +1463,9 @@ fn check_expr_with_unifier(fcx: @fn_ctxt, } }; fcx.write_ty(id, bexpr_t); - for fields_t.each {|f| + for fields_t.each |f| { let mut found = false; - for base_fields.each {|bf| + for base_fields.each |bf| { if str::eq(*f.node.ident, *bf.ident) { demand::suptype(fcx, f.span, bf.mt.ty, f.node.mt.ty); found = true; @@ -1533,7 +1533,7 @@ fn check_expr_with_unifier(fcx: @fn_ctxt, _ {} } if !handled { - let tps = vec::map(tys) { |ty| fcx.to_ty(ty) }; + let tps = vec::map(tys, |ty| fcx.to_ty(ty)); let is_self_ref = self_ref(fcx, base.id); // this will be the call or block that immediately @@ -1575,7 +1575,7 @@ fn check_expr_with_unifier(fcx: @fn_ctxt, let resolved = structurally_resolved_type(fcx, expr.span, raw_base_t); alt lookup_op_method(fcx, expr, base, resolved, "[]", - [some(idx)]/~) { + ~[some(idx)]) { some((ret_ty, _)) { fcx.write_ty(id, ret_ty); } _ { tcx.sess.span_fatal( @@ -1593,7 +1593,7 @@ fn check_expr_with_unifier(fcx: @fn_ctxt, let p_ty = fcx.expr_ty(p); let lkup = method::lookup(fcx, p, p, expr.id, alloc_id, - @"alloc", p_ty, []/~, false); + @"alloc", p_ty, ~[], false); alt lkup.method() { some(entry) { fcx.ccx.method_map.insert(alloc_id, entry); @@ -1607,11 +1607,11 @@ fn check_expr_with_unifier(fcx: @fn_ctxt, let m = ast::expl(ty::default_arg_mode_for_ty(ty_uint)); ty::mk_fn(tcx, {purity: ast::impure_fn, proto: ast::proto_any, - inputs: [{mode: m, ty: ty_uint}, - {mode: m, ty: ty_uint}]/~, + inputs: ~[{mode: m, ty: ty_uint}, + {mode: m, ty: ty_uint}], output: ty_nilp, ret_style: ast::return_val, - constraints: []/~}) + constraints: ~[]}) }; demand::suptype(fcx, expr.span, @@ -1698,7 +1698,7 @@ fn check_stmt(fcx: @fn_ctxt, stmt: @ast::stmt) -> bool { node_id = id; alt decl.node { ast::decl_local(ls) { - for ls.each {|l| bot |= check_decl_local(fcx, l); } + for ls.each |l| { bot |= check_decl_local(fcx, l); } } ast::decl_item(_) {/* ignore for now */ } } @@ -1735,7 +1735,7 @@ fn check_block(fcx0: @fn_ctxt, blk: ast::blk) -> bool { vec::push(fcx.blocks, blk.node.id); let mut bot = false; let mut warned = false; - for blk.node.stmts.each {|s| + for blk.node.stmts.each |s| { if bot && !warned && alt s.node { ast::stmt_decl(@{node: ast::decl_local(_), _}, _) | @@ -1792,14 +1792,14 @@ fn check_instantiable(tcx: ty::ctxt, fn check_enum_variants(ccx: @crate_ctxt, sp: span, - vs: [ast::variant]/~, + vs: ~[ast::variant], id: ast::node_id) { let rty = ty::node_id_to_type(ccx.tcx, id); let fcx = blank_fn_ctxt(ccx, rty); - let mut disr_vals: [int]/~ = []/~; + let mut disr_vals: ~[int] = ~[]; let mut disr_val = 0; - let mut variants = []/~; - for vs.each {|v| + let mut variants = ~[]; + for vs.each |v| { alt v.node.disr_expr { some(e) { check_expr(fcx, e, none); @@ -1829,8 +1829,8 @@ fn check_enum_variants(ccx: @crate_ctxt, vec::push(disr_vals, disr_val); let ctor_ty = ty::node_id_to_type(ccx.tcx, v.node.id); let arg_tys = if v.node.args.len() > 0u { - ty::ty_fn_args(ctor_ty).map {|a| a.ty } - } else { []/~ }; + ty::ty_fn_args(ctor_ty).map(|a| a.ty) + } else { ~[] }; vec::push(variants, @{args: arg_tys, ctor_ty: ctor_ty, name: v.node.name, id: local_def(v.node.id), disr_val: disr_val}); @@ -1842,7 +1842,7 @@ fn check_enum_variants(ccx: @crate_ctxt, // Check that it is possible to represent this enum: let mut outer = true, did = local_def(id); - if ty::type_structurally_contains(ccx.tcx, rty, {|sty| + if ty::type_structurally_contains(ccx.tcx, rty, |sty| { alt sty { ty::ty_enum(id, _) if id == did { if outer { outer = false; false } @@ -1887,7 +1887,7 @@ fn check_pred_expr(fcx: @fn_ctxt, e: @ast::expr) -> bool { in constraint"); } } - for operands.each {|operand| + for operands.each |operand| { if !ast_util::is_constraint_arg(operand) { let s = "constraint args must be slot variables or literals"; @@ -1907,12 +1907,12 @@ fn check_pred_expr(fcx: @fn_ctxt, e: @ast::expr) -> bool { ret bot; } -fn check_constraints(fcx: @fn_ctxt, cs: [@ast::constr]/~, - args: [ast::arg]/~) { +fn check_constraints(fcx: @fn_ctxt, cs: ~[@ast::constr], + args: ~[ast::arg]) { let num_args = vec::len(args); - for cs.each {|c| - let mut c_args = []/~; - for c.node.args.each {|a| + for cs.each |c| { + let mut c_args = ~[]; + for c.node.args.each |a| { vec::push(c_args, // "base" should not occur in a fn type thing, as of // yet, b/c we don't allow constraints on the return type @@ -1937,8 +1937,8 @@ fn check_constraints(fcx: @fn_ctxt, cs: [@ast::constr]/~, ast::carg_ident(i) { if i < num_args { let p = @{span: a.span, global: false, - idents: [args[i].ident]/~, - rp: none, types: []/~}; + idents: ~[args[i].ident], + rp: none, types: ~[]}; let arg_occ_node_id = fcx.ccx.tcx.sess.next_node_id(); fcx.ccx.tcx.def_map.insert @@ -2019,7 +2019,7 @@ fn ty_param_bounds_and_ty_for_def(fcx: @fn_ctxt, sp: span, defn: ast::def) -> ast::def_fn(id, ast::extern_fn) { // Crust functions are just u8 pointers ret { - bounds: @[]/~, + bounds: @~[], rp: ast::rp_none, ty: ty::mk_ptr( fcx.ccx.tcx, @@ -2098,7 +2098,7 @@ fn instantiate_path(fcx: @fn_ctxt, (sp, "not enough type parameters provided for this item"); fcx.infcx.next_ty_vars(ty_param_count) } else { - pth.types.map { |aty| fcx.to_ty(aty) } + pth.types.map(|aty| fcx.to_ty(aty)) }; let substs = {self_r: self_r, self_ty: none, tps: tps}; @@ -2169,7 +2169,7 @@ fn ast_expr_vstore_to_vstore(fcx: @fn_ctxt, e: @ast::expr, n: uint, fn check_bounds_are_used(ccx: @crate_ctxt, span: span, - tps: [ast::ty_param]/~, + tps: ~[ast::ty_param], rp: ast::region_param, ty: ty::t) { let mut r_used = alt rp { @@ -2182,13 +2182,13 @@ fn check_bounds_are_used(ccx: @crate_ctxt, ty::walk_regions_and_ty( ccx.tcx, ty, - { |r| + |r| { alt r { ty::re_bound(_) { r_used = true; } _ { } } }, - { |t| + |t| { alt ty::get(t).struct { ty::ty_param(idx, _) { tps_used[idx] = true; } _ { } @@ -2202,7 +2202,7 @@ fn check_bounds_are_used(ccx: @crate_ctxt, reference-parameterized type"); } - for tps_used.eachi { |i, b| + for tps_used.eachi |i, b| { if !b { ccx.tcx.sess.span_err( span, #fmt["type parameter `%s` is unused", *tps[i].ident]); @@ -2220,42 +2220,50 @@ fn check_intrinsic_type(ccx: @crate_ctxt, it: @ast::foreign_item) { let tcx = ccx.tcx; let (n_tps, inputs, output) = alt *it.ident { "size_of" | - "pref_align_of" | "min_align_of" { (1u, []/~, ty::mk_uint(ccx.tcx)) } - "get_tydesc" { (1u, []/~, ty::mk_nil_ptr(tcx)) } - "init" { (1u, []/~, param(ccx, 0u)) } - "forget" { (1u, [arg(ast::by_move, param(ccx, 0u))]/~, + "pref_align_of" | "min_align_of" { (1u, ~[], ty::mk_uint(ccx.tcx)) } + "get_tydesc" { (1u, ~[], ty::mk_nil_ptr(tcx)) } + "init" { (1u, ~[], param(ccx, 0u)) } + "forget" { (1u, ~[arg(ast::by_move, param(ccx, 0u))], ty::mk_nil(tcx)) } - "reinterpret_cast" { (2u, [arg(ast::by_ref, param(ccx, 0u))]/~, + "reinterpret_cast" { (2u, ~[arg(ast::by_ref, param(ccx, 0u))], param(ccx, 1u)) } - "addr_of" { (1u, [arg(ast::by_ref, param(ccx, 0u))]/~, + "addr_of" { (1u, ~[arg(ast::by_ref, param(ccx, 0u))], ty::mk_imm_ptr(tcx, param(ccx, 0u))) } "move_val" | "move_val_init" { - (1u, [arg(ast::by_mutbl_ref, param(ccx, 0u)), - arg(ast::by_move, param(ccx, 0u))]/~, + (1u, ~[arg(ast::by_mutbl_ref, param(ccx, 0u)), + arg(ast::by_move, param(ccx, 0u))], ty::mk_nil(tcx)) } - "needs_drop" { (1u, []/~, ty::mk_bool(tcx)) } + "needs_drop" { (1u, ~[], ty::mk_bool(tcx)) } + + "atomic_xchng" | "atomic_add" | "atomic_sub" | + "atomic_xchng_acq" | "atomic_add_acq" | "atomic_sub_acq" | + "atomic_xchng_rel" | "atomic_add_rel" | "atomic_sub_rel" { + (0u, ~[arg(ast::by_mutbl_ref, ty::mk_int(tcx)), + arg(ast::by_val, ty::mk_int(tcx))], + ty::mk_int(tcx)) + } "visit_ty" { assert ccx.tcx.intrinsic_ifaces.contains_key(@"ty_visitor"); let (_, visitor_iface) = ccx.tcx.intrinsic_ifaces.get(@"ty_visitor"); - (1u, [arg(ast::by_ref, visitor_iface)]/~, ty::mk_nil(tcx)) + (1u, ~[arg(ast::by_ref, visitor_iface)], ty::mk_nil(tcx)) } "frame_address" { let fty = ty::mk_fn(ccx.tcx, { purity: ast::impure_fn, proto: ast::proto_any, - inputs: [{ + inputs: ~[{ mode: ast::expl(ast::by_val), ty: ty::mk_imm_ptr( ccx.tcx, ty::mk_mach_uint(ccx.tcx, ast::ty_u8)) - }]/~, + }], output: ty::mk_nil(ccx.tcx), ret_style: ast::return_val, - constraints: []/~ + constraints: ~[] }); - (0u, [arg(ast::by_ref, fty)]/~, ty::mk_nil(tcx)) + (0u, ~[arg(ast::by_ref, fty)], ty::mk_nil(tcx)) } other { tcx.sess.span_err(it.span, "unrecognized intrinsic function: `" + @@ -2267,7 +2275,7 @@ fn check_intrinsic_type(ccx: @crate_ctxt, it: @ast::foreign_item) { proto: ast::proto_bare, inputs: inputs, output: output, ret_style: ast::return_val, - constraints: []/~}); + constraints: ~[]}); let i_ty = ty::lookup_item_type(ccx.tcx, local_def(it.id)); let i_n_tps = (*i_ty.bounds).len(); if i_n_tps != n_tps { @@ -2277,8 +2285,8 @@ fn check_intrinsic_type(ccx: @crate_ctxt, it: @ast::foreign_item) { } else { require_same_types( tcx, none, it.span, i_ty.ty, fty, - {|| #fmt["intrinsic has wrong type: \ + || #fmt["intrinsic has wrong type: \ expected `%s`", - ty_to_str(ccx.tcx, fty)]}); + ty_to_str(ccx.tcx, fty)]); } } diff --git a/src/rustc/middle/typeck/check/alt.rs b/src/rustc/middle/typeck/check/alt.rs index 9aa4d37fd77..e45c5f308ea 100644 --- a/src/rustc/middle/typeck/check/alt.rs +++ b/src/rustc/middle/typeck/check/alt.rs @@ -4,7 +4,7 @@ import middle::typeck::infer::methods; // next_ty_var, fn check_alt(fcx: @fn_ctxt, expr: @ast::expr, discrim: @ast::expr, - arms: [ast::arm]/~) -> bool { + arms: ~[ast::arm]) -> bool { let tcx = fcx.ccx.tcx; let mut bot; @@ -13,7 +13,7 @@ fn check_alt(fcx: @fn_ctxt, // Typecheck the patterns first, so that we get types for all the // bindings. - for arms.each {|arm| + for arms.each |arm| { let pcx = { fcx: fcx, map: pat_id_map(tcx.def_map, arm.pats[0]), @@ -22,12 +22,12 @@ fn check_alt(fcx: @fn_ctxt, pat_region: ty::re_scope(expr.id) }; - for arm.pats.each {|p| check_pat(pcx, p, pattern_ty);} + for arm.pats.each |p| { check_pat(pcx, p, pattern_ty);} } // Now typecheck the blocks. let mut result_ty = fcx.infcx.next_ty_var(); let mut arm_non_bot = false; - for arms.each {|arm| + for arms.each |arm| { alt arm.guard { some(e) { check_expr_with(fcx, e, ty::mk_bool(tcx)); } none { } @@ -52,7 +52,7 @@ type pat_ctxt = { }; fn check_pat_variant(pcx: pat_ctxt, pat: @ast::pat, path: @ast::path, - subpats: option<[@ast::pat]/~>, expected: ty::t) { + subpats: option<~[@ast::pat]>, expected: ty::t) { // Typecheck the path. let fcx = pcx.fcx; @@ -79,7 +79,7 @@ fn check_pat_variant(pcx: pat_ctxt, pat: @ast::pat, path: @ast::path, let vinfo = ty::enum_variant_with_id( tcx, v_def_ids.enm, v_def_ids.var); - vinfo.args.map { |t| ty::subst(tcx, expected_substs, t) } + vinfo.args.map(|t| { ty::subst(tcx, expected_substs, t) }) }; let arg_len = arg_types.len(), subpats_len = alt subpats { none { arg_len } @@ -96,8 +96,8 @@ fn check_pat_variant(pcx: pat_ctxt, pat: @ast::pat, path: @ast::path, tcx.sess.span_fatal(pat.span, s); } - option::iter(subpats) {|pats| - vec::iter2(pats, arg_types) {|subpat, arg_ty| + do option::iter(subpats) |pats| { + do vec::iter2(pats, arg_types) |subpat, arg_ty| { check_pat(pcx, subpat, arg_ty); } }; @@ -143,7 +143,7 @@ fn check_pat(pcx: pat_ctxt, pat: @ast::pat, expected: ty::t) { #debug["pat_range ending type: %?", e_ty]; if !require_same_types( tcx, some(fcx.infcx), pat.span, b_ty, e_ty, - {|| "mismatched types in range" }) { + || "mismatched types in range") { // no-op } else if !ty::type_is_numeric(b_ty) { tcx.sess.span_err(pat.span, "non-numeric type used in range"); @@ -170,7 +170,7 @@ fn check_pat(pcx: pat_ctxt, pat: @ast::pat, expected: ty::t) { } } ast::pat_ident(path, c) { - check_pat_variant(pcx, pat, path, some([]/~), expected); + check_pat_variant(pcx, pat, path, some(~[]), expected); } ast::pat_enum(path, subpats) { check_pat_variant(pcx, pat, path, subpats, expected); @@ -197,8 +197,8 @@ fn check_pat(pcx: pat_ctxt, pat: @ast::pat, expected: ty::t) { fn matches(name: ast::ident, f: ty::field) -> bool { ret str::eq(*name, *f.ident); } - for fields.each {|f| - alt vec::find(ex_fields, {|a|matches(f.ident, a)}) { + for fields.each |f| { + alt vec::find(ex_fields, |a| matches(f.ident, a)) { some(field) { check_pat(pcx, f.pat, field.mt.ty); } @@ -230,7 +230,7 @@ fn check_pat(pcx: pat_ctxt, pat: @ast::pat, expected: ty::t) { fields", vec::len(ex_elts), e_count]); } let mut i = 0u; - for elts.each {|elt| + for elts.each |elt| { check_pat(pcx, elt, ex_elts[i]); i += 1u; } diff --git a/src/rustc/middle/typeck/check/method.rs b/src/rustc/middle/typeck/check/method.rs index bf3f4b1f3b6..3cbdfe98198 100644 --- a/src/rustc/middle/typeck/check/method.rs +++ b/src/rustc/middle/typeck/check/method.rs @@ -26,7 +26,7 @@ class lookup { let mut derefs: uint; let candidates: dvec<candidate>; let candidate_impls: hashmap<def_id, ()>; - let supplied_tps: [ty::t]/~; + let supplied_tps: ~[ty::t]; let include_private: bool; new(fcx: @fn_ctxt, @@ -36,7 +36,7 @@ class lookup { node_id: ast::node_id, //node id where to store type of fn m_name: ast::ident, //b in a.b(...) self_ty: ty::t, //type of a in a.b(...) - supplied_tps: [ty::t]/~, //Xs in a.b::<Xs>(...) + supplied_tps: ~[ty::t], //Xs in a.b::<Xs>(...) include_private: bool) { self.fcx = fcx; @@ -105,7 +105,7 @@ class lookup { self.expr.span, "multiple applicable methods in scope"); - for self.candidates.eachi { |i, candidate| + for self.candidates.eachi |i, candidate| { alt candidate.entry.origin { method_static(did) { self.report_static_candidate(i, did); @@ -163,7 +163,7 @@ class lookup { let tcx = self.tcx(); let mut iface_bnd_idx = 0u; // count only iface bounds let bounds = tcx.ty_param_bounds.get(did.node); - for vec::each(*bounds) {|bound| + for vec::each(*bounds) |bound| { let (iid, bound_substs) = alt bound { ty::bound_copy | ty::bound_send | ty::bound_const { cont; /* ok */ @@ -176,7 +176,7 @@ class lookup { }; let ifce_methods = ty::iface_methods(tcx, iid); - alt vec::position(*ifce_methods, {|m| m.ident == self.m_name}) { + alt vec::position(*ifce_methods, |m| m.ident == self.m_name) { none { /* check next bound */ iface_bnd_idx += 1u; @@ -210,7 +210,7 @@ class lookup { #debug["method_from_iface"]; let ms = *ty::iface_methods(self.tcx(), did); - for ms.eachi {|i, m| + for ms.eachi |i, m| { if m.ident != self.m_name { cont; } let m_fty = ty::mk_fn(self.tcx(), m.fty); @@ -246,7 +246,7 @@ class lookup { let ms = *ty::iface_methods(self.tcx(), did); - for ms.each {|m| + for ms.each |m| { if m.ident != self.m_name { cont; } if m.vis == ast::private && !self.include_private { @@ -296,10 +296,10 @@ class lookup { #debug["method_from_scope"]; - for list::each(impls_vecs) {|impls| - for vec::each(*impls) {|im| + for list::each(impls_vecs) |impls| { + for vec::each(*impls) |im| { // Check whether this impl has a method with the right name. - for im.methods.find({|m| m.ident == self.m_name}).each {|m| + for im.methods.find(|m| m.ident == self.m_name).each |m| { // determine the `self` of the impl with fresh // variables for each parameter: diff --git a/src/rustc/middle/typeck/check/regionck.rs b/src/rustc/middle/typeck/check/regionck.rs index 48166f09963..9350f577804 100644 --- a/src/rustc/middle/typeck/check/regionck.rs +++ b/src/rustc/middle/typeck/check/regionck.rs @@ -55,7 +55,7 @@ fn visit_local(l: @ast::local, &&rcx: rcx, v: rvt) { } v.visit_ty(l.node.ty, rcx, v); - for l.node.init.each { |i| + for l.node.init.each |i| { v.visit_expr(i.expr, rcx, v); } } @@ -134,8 +134,8 @@ fn visit_node(id: ast::node_id, span: span, rcx: rcx) -> bool { let e = rcx.errors_reported; ty::walk_regions_and_ty( tcx, ty, - { |r| constrain_region(rcx, encl_region, span, r); }, - { |t| ty::type_has_regions(t) }); + |r| constrain_region(rcx, encl_region, span, r), + |t| ty::type_has_regions(t)); ret (e == rcx.errors_reported); fn constrain_region(rcx: rcx, diff --git a/src/rustc/middle/typeck/check/regionmanip.rs b/src/rustc/middle/typeck/check/regionmanip.rs index a6c1c0e6d32..2231d9b320a 100644 --- a/src/rustc/middle/typeck/check/regionmanip.rs +++ b/src/rustc/middle/typeck/check/regionmanip.rs @@ -12,25 +12,25 @@ fn replace_bound_regions_in_fn_ty( fn_ty: ty::fn_ty} { let mut all_tys = ty::tys_in_fn_ty(fn_ty); - for self_ty.each { |t| vec::push(all_tys, t) } + for self_ty.each |t| { vec::push(all_tys, t) } #debug["replace_bound_regions_in_fn_ty(self_ty=%?, fn_ty=%s, all_tys=%?)", - self_ty.map { |t| ty_to_str(tcx, t) }, + self_ty.map(|t| ty_to_str(tcx, t)), ty_to_str(tcx, ty::mk_fn(tcx, fn_ty)), - all_tys.map { |t| ty_to_str(tcx, t) }]; + all_tys.map(|t| ty_to_str(tcx, t))]; let _i = indenter(); - let isr = create_bound_region_mapping(tcx, isr, all_tys) { |br| + let isr = do create_bound_region_mapping(tcx, isr, all_tys) |br| { #debug["br=%?", br]; mapf(br) }; - let t_fn = ty::fold_sty_to_ty(tcx, ty::ty_fn(fn_ty)) { |t| + let t_fn = ty::fold_sty_to_ty(tcx, ty::ty_fn(fn_ty), |t| { replace_bound_regions(tcx, isr, t) - }; - let t_self = self_ty.map { |t| replace_bound_regions(tcx, isr, t) }; + }); + let t_self = self_ty.map(|t| replace_bound_regions(tcx, isr, t)); #debug["result of replace_bound_regions_in_fn_ty: self_ty=%?, fn_ty=%s", - t_self.map { |t| ty_to_str(tcx, t) }, + t_self.map(|t| ty_to_str(tcx, t)), ty_to_str(tcx, t_fn)]; ret {isr: isr, @@ -50,7 +50,7 @@ fn replace_bound_regions_in_fn_ty( fn create_bound_region_mapping( tcx: ty::ctxt, isr: isr_alist, - tys: [ty::t]/~, + tys: ~[ty::t], to_r: fn(ty::bound_region) -> ty::region) -> isr_alist { // Takes `isr` (described above), `to_r` (described above), @@ -78,7 +78,7 @@ fn replace_bound_regions_in_fn_ty( } // For each type `ty` in `tys`... - tys.foldl(isr) { |isr, ty| + do tys.foldl(isr) |isr, ty| { let mut isr = isr; // Using fold_regions is inefficient, because it @@ -86,7 +86,7 @@ fn replace_bound_regions_in_fn_ty( // terms of locating all the regions within the various // kinds of types. This had already caused me several // bugs so I decided to switch over. - ty::fold_regions(tcx, ty) { |r, in_fn| + do ty::fold_regions(tcx, ty) |r, in_fn| { if !in_fn { isr = append_isr(isr, to_r, r); } r }; @@ -104,7 +104,7 @@ fn replace_bound_regions_in_fn_ty( isr: isr_alist, ty: ty::t) -> ty::t { - ty::fold_regions(tcx, ty) { |r, in_fn| + do ty::fold_regions(tcx, ty) |r, in_fn| { alt r { // As long as we are not within a fn() type, `&T` is // mapped to the free region anon_r. But within a fn diff --git a/src/rustc/middle/typeck/check/vtable.rs b/src/rustc/middle/typeck/check/vtable.rs index f89492e11b7..b5391d96a8c 100644 --- a/src/rustc/middle/typeck/check/vtable.rs +++ b/src/rustc/middle/typeck/check/vtable.rs @@ -1,20 +1,20 @@ import check::{fn_ctxt, impl_self_ty, methods}; -fn has_iface_bounds(tps: [ty::param_bounds]/~) -> bool { - vec::any(tps, {|bs| - vec::any(*bs, {|b| +fn has_iface_bounds(tps: ~[ty::param_bounds]) -> bool { + vec::any(tps, |bs| { + vec::any(*bs, |b| { alt b { ty::bound_iface(_) { true } _ { false } } }) }) } fn lookup_vtables(fcx: @fn_ctxt, isc: resolve::iscopes, sp: span, - bounds: @[ty::param_bounds]/~, substs: ty::substs, + bounds: @~[ty::param_bounds], substs: ty::substs, allow_unsafe: bool) -> vtable_res { let tcx = fcx.ccx.tcx; - let mut result = []/~, i = 0u; - for substs.tps.each {|ty| - for vec::each(*bounds[i]) {|bound| + let mut result = ~[], i = 0u; + for substs.tps.each |ty| { + for vec::each(*bounds[i]) |bound| { alt bound { ty::bound_iface(i_ty) { let i_ty = ty::subst(tcx, substs, i_ty); @@ -65,7 +65,7 @@ fn lookup_vtable(fcx: @fn_ctxt, isc: resolve::iscopes, sp: span, alt ty::get(ty).struct { ty::ty_param(n, did) { let mut n_bound = 0u; - for vec::each(*tcx.ty_param_bounds.get(did.node)) { |bound| + for vec::each(*tcx.ty_param_bounds.get(did.node)) |bound| { alt bound { ty::bound_send | ty::bound_copy | ty::bound_const { /* ignore */ @@ -88,7 +88,7 @@ fn lookup_vtable(fcx: @fn_ctxt, isc: resolve::iscopes, sp: span, ty::ty_iface(did, substs) if iface_id == did { relate_iface_tys(fcx, sp, iface_ty, ty); if !allow_unsafe { - for vec::each(*ty::iface_methods(tcx, did)) {|m| + for vec::each(*ty::iface_methods(tcx, did)) |m| { if ty::type_has_self(ty::mk_fn(tcx, m.fty)) { tcx.sess.span_err( sp, "a boxed iface with self types may not be \ @@ -105,11 +105,11 @@ fn lookup_vtable(fcx: @fn_ctxt, isc: resolve::iscopes, sp: span, } _ { - let mut found = []/~; + let mut found = ~[]; - for list::each(isc) {|impls| + for list::each(isc) |impls| { /* For each impl in scope... */ - for vec::each(*impls) {|im| + for vec::each(*impls) |im| { // im = one specific impl // find the iface that im implements (if any) let of_ty = alt ty::impl_iface(tcx, im.did) { @@ -181,15 +181,15 @@ fn fixup_ty(fcx: @fn_ctxt, sp: span, ty: ty::t) -> ty::t { } } -fn connect_iface_tps(fcx: @fn_ctxt, sp: span, impl_tys: [ty::t]/~, - iface_tys: [ty::t]/~, impl_did: ast::def_id) { +fn connect_iface_tps(fcx: @fn_ctxt, sp: span, impl_tys: ~[ty::t], + iface_tys: ~[ty::t], impl_did: ast::def_id) { let tcx = fcx.ccx.tcx; let ity = option::get(ty::impl_iface(tcx, impl_did)); let iface_ty = ty::subst_tps(tcx, impl_tys, ity); alt check ty::get(iface_ty).struct { ty::ty_iface(_, substs) { vec::iter2(substs.tps, iface_tys, - {|a, b| demand::suptype(fcx, sp, a, b);}); + |a, b| demand::suptype(fcx, sp, a, b)); } } } @@ -252,7 +252,7 @@ fn resolve_expr(ex: @ast::expr, &&fcx: @fn_ctxt, v: visit::vt<@fn_ctxt>) { Map this expression to that vtable (that is: "ex has vtable <vtable>") */ - cx.vtable_map.insert(ex.id, @[vtable]/~); + cx.vtable_map.insert(ex.id, @~[vtable]); } _ {} } diff --git a/src/rustc/middle/typeck/check/writeback.rs b/src/rustc/middle/typeck/check/writeback.rs index 205a07f9206..9b51a3cb98e 100644 --- a/src/rustc/middle/typeck/check/writeback.rs +++ b/src/rustc/middle/typeck/check/writeback.rs @@ -40,8 +40,8 @@ fn resolve_type_vars_for_node(wbcx: wb_ctxt, sp: span, id: ast::node_id) write_ty_to_tcx(tcx, id, t); alt fcx.opt_node_ty_substs(id) { some(substs) { - let mut new_tps = []/~; - for substs.tps.each {|subst| + let mut new_tps = ~[]; + for substs.tps.each |subst| { alt resolve_type_vars_in_type(fcx, sp, subst) { some(t) { vec::push(new_tps, t); } none { wbcx.success = false; ret none; } @@ -83,7 +83,7 @@ fn visit_expr(e: @ast::expr, wbcx: wb_ctxt, v: wb_vt) { alt e.node { ast::expr_fn(_, decl, _, _) | ast::expr_fn_block(decl, _, _) { - vec::iter(decl.inputs) {|input| + do vec::iter(decl.inputs) |input| { let r_ty = resolve_type_vars_for_node(wbcx, e.span, input.id); // Just in case we never constrained the mode to anything, @@ -176,7 +176,7 @@ fn resolve_type_vars_in_fn(fcx: @fn_ctxt, let wbcx = {fcx: fcx, mut success: true}; let visit = mk_visitor(); visit.visit_block(blk, wbcx, visit); - for decl.inputs.each {|arg| + for decl.inputs.each |arg| { resolve_type_vars_for_node(wbcx, arg.ty.span, arg.id); } ret wbcx.success; diff --git a/src/rustc/middle/typeck/collect.rs b/src/rustc/middle/typeck/collect.rs index 6e4b7a370ab..a713ae67548 100644 --- a/src/rustc/middle/typeck/collect.rs +++ b/src/rustc/middle/typeck/collect.rs @@ -28,16 +28,16 @@ fn collect_item_types(ccx: @crate_ctxt, crate: @ast::crate) { // FIXME (#2592): hooking into the "intrinsic" root module is crude. // There ought to be a better approach. Attributes? - for crate.node.module.items.each {|crate_item| + for crate.node.module.items.each |crate_item| { if *crate_item.ident == "intrinsic" { alt crate_item.node { ast::item_mod(m) { - for m.items.each {|intrinsic_item| + for m.items.each |intrinsic_item| { alt intrinsic_item.node { ast::item_iface(_, _, _) { let def_id = { crate: ast::local_crate, node: intrinsic_item.id }; - let substs = {self_r: none, self_ty: none, tps: []/~}; + let substs = {self_r: none, self_ty: none, tps: ~[]}; let ty = ty::mk_iface(ccx.tcx, def_id, substs); ccx.tcx.intrinsic_ifaces.insert (intrinsic_item.ident, (def_id, ty)); @@ -53,8 +53,8 @@ fn collect_item_types(ccx: @crate_ctxt, crate: @ast::crate) { } visit::visit_crate(*crate, (), visit::mk_simple_visitor(@{ - visit_item: {|a|convert(ccx, a)}, - visit_foreign_item: {|a|convert_foreign(ccx, a)} + visit_item: |a|convert(ccx, a), + visit_foreign_item: |a|convert_foreign(ccx, a) with *visit::default_simple_visitor() })); } @@ -96,29 +96,29 @@ impl of ast_conv for @crate_ctxt { fn get_enum_variant_types(ccx: @crate_ctxt, enum_ty: ty::t, - variants: [ast::variant]/~, - ty_params: [ast::ty_param]/~, + variants: ~[ast::variant], + ty_params: ~[ast::ty_param], rp: ast::region_param) { let tcx = ccx.tcx; // Create a set of parameter types shared among all the variants. - for variants.each {|variant| + for variants.each |variant| { // Nullary enum constructors get turned into constants; n-ary enum // constructors get turned into functions. let result_ty = if vec::len(variant.node.args) == 0u { enum_ty } else { let rs = type_rscope(rp); - let args = variant.node.args.map { |va| + let args = variant.node.args.map(|va| { let arg_ty = ccx.to_ty(rs, va.ty); {mode: ast::expl(ast::by_copy), ty: arg_ty} - }; + }); ty::mk_fn(tcx, {purity: ast::pure_fn, proto: ast::proto_box, inputs: args, output: enum_ty, ret_style: ast::return_val, - constraints: []/~}) + constraints: ~[]}) }; let tpt = {bounds: ty_param_bounds(ccx, ty_params), rp: rp, @@ -130,24 +130,24 @@ fn get_enum_variant_types(ccx: @crate_ctxt, fn ensure_iface_methods(ccx: @crate_ctxt, id: ast::node_id) { fn store_methods<T>(ccx: @crate_ctxt, id: ast::node_id, - stuff: [T]/~, f: fn@(T) -> ty::method) { + stuff: ~[T], f: fn@(T) -> ty::method) { ty::store_iface_methods(ccx.tcx, id, @vec::map(stuff, f)); } let tcx = ccx.tcx; alt check tcx.items.get(id) { ast_map::node_item(@{node: ast::item_iface(_, rp, ms), _}, _) { - store_methods::<ast::ty_method>(ccx, id, ms) {|m| + store_methods::<ast::ty_method>(ccx, id, ms, |m| { ty_of_ty_method(ccx, m, rp) - }; + }); } ast_map::node_item(@{node: ast::item_class(_,_,its,_,_,rp), _}, _) { let (_,ms) = split_class_items(its); // All methods need to be stored, since lookup_method // relies on the same method cache for self-calls - store_methods::<@ast::method>(ccx, id, ms) {|m| + store_methods::<@ast::method>(ccx, id, ms, |m| { ty_of_method(ccx, m, rp) - }; + }); } } } @@ -196,7 +196,7 @@ fn compare_impl_method(tcx: ty::ctxt, sp: span, replace_bound_self(tcx, impl_fty, dummy_self_r) }; let if_fty = { - let dummy_tps = vec::from_fn((*if_m.tps).len()) { |i| + let dummy_tps = do vec::from_fn((*if_m.tps).len()) |i| { // hack: we don't know the def id of the impl tp, but it // is not important for unification ty::mk_param(tcx, i + impl_tps, {crate: 0, node: 0}) @@ -211,32 +211,32 @@ fn compare_impl_method(tcx: ty::ctxt, sp: span, }; require_same_types( tcx, none, sp, impl_fty, if_fty, - {|| "method `" + *if_m.ident + "` has an incompatible type"}); + || "method `" + *if_m.ident + "` has an incompatible type"); ret; // Replaces bound references to the self region with `with_r`. fn replace_bound_self(tcx: ty::ctxt, ty: ty::t, with_r: ty::region) -> ty::t { - ty::fold_regions(tcx, ty) { |r, _in_fn| + do ty::fold_regions(tcx, ty) |r, _in_fn| { if r == ty::re_bound(ty::br_self) {with_r} else {r} } } } fn check_methods_against_iface(ccx: @crate_ctxt, - tps: [ast::ty_param]/~, + tps: ~[ast::ty_param], rp: ast::region_param, selfty: ty::t, a_ifacety: @ast::iface_ref, - ms: [converted_method]/~) { + ms: ~[converted_method]) { let tcx = ccx.tcx; let (did, tpt) = instantiate_iface_ref(ccx, a_ifacety, rp); if did.crate == ast::local_crate { ensure_iface_methods(ccx, did.node); } - for vec::each(*ty::iface_methods(tcx, did)) {|if_m| - alt vec::find(ms, {|m| if_m.ident == m.mty.ident}) { + for vec::each(*ty::iface_methods(tcx, did)) |if_m| { + alt vec::find(ms, |m| if_m.ident == m.mty.ident) { some({mty: m, id, span}) { if m.purity != if_m.purity { ccx.tcx.sess.span_err( @@ -259,7 +259,7 @@ fn check_methods_against_iface(ccx: @crate_ctxt, fn convert_class_item(ccx: @crate_ctxt, rp: ast::region_param, - bounds: @[ty::param_bounds]/~, + bounds: @~[ty::param_bounds], v: ast_util::ivar) { let tt = ccx.to_ty(type_rscope(rp), v.ty); write_ty_to_tcx(ccx.tcx, v.id, tt); @@ -270,13 +270,13 @@ fn convert_class_item(ccx: @crate_ctxt, type converted_method = {mty: ty::method, id: ast::node_id, span: span}; fn convert_methods(ccx: @crate_ctxt, - ms: [@ast::method]/~, + ms: ~[@ast::method], rp: ast::region_param, - rcvr_bounds: @[ty::param_bounds]/~, - self_ty: ty::t) -> [converted_method]/~ { + rcvr_bounds: @~[ty::param_bounds], + self_ty: ty::t) -> ~[converted_method] { let tcx = ccx.tcx; - vec::map(ms) { |m| + do vec::map(ms) |m| { write_ty_to_tcx(tcx, m.self_id, self_ty); let bounds = ty_param_bounds(ccx, m.tps); let mty = ty_of_method(ccx, m, rp); @@ -313,7 +313,7 @@ fn convert(ccx: @crate_ctxt, it: @ast::item) { ty: selfty}); let cms = convert_methods(ccx, ms, rp, i_bounds, selfty); - for ifce.each { |t| + for ifce.each |t| { check_methods_against_iface(ccx, tps, rp, selfty, t, cms); } } @@ -330,8 +330,8 @@ fn convert(ccx: @crate_ctxt, it: @ast::item) { write_ty_to_tcx(tcx, it.id, tpt.ty); tcx.tcache.insert(local_def(it.id), tpt); // Write the ctor type - let t_args = ctor.node.dec.inputs.map {|a| - ty_of_arg(ccx, type_rscope(rp), a, none)}; + let t_args = ctor.node.dec.inputs.map( + |a| ty_of_arg(ccx, type_rscope(rp), a, none) ); let t_res = ty::mk_class(tcx, local_def(it.id), {self_r: alt rp { ast::rp_none { none } @@ -344,13 +344,13 @@ fn convert(ccx: @crate_ctxt, it: @ast::item) { inputs: t_args, output: t_res, ret_style: ast::return_val, - constraints: []/~}); // tjc TODO + constraints: ~[]}); // tjc TODO write_ty_to_tcx(tcx, ctor.node.id, t_ctor); tcx.tcache.insert(local_def(ctor.node.id), {bounds: tpt.bounds, rp: rp, ty: t_ctor}); - option::iter(m_dtor) {|dtor| + do option::iter(m_dtor) |dtor| { // Write the dtor type let t_dtor = ty::mk_fn( tcx, @@ -366,13 +366,13 @@ fn convert(ccx: @crate_ctxt, it: @ast::item) { // Write the type of each of the members let (fields, methods) = split_class_items(members); - for fields.each {|f| + for fields.each |f| { convert_class_item(ccx, rp, tpt.bounds, f); } let {bounds, substs} = mk_substs(ccx, tps, rp); let selfty = ty::mk_class(tcx, local_def(it.id), substs); let cms = convert_methods(ccx, methods, rp, bounds, selfty); - for ifaces.each { |ifce| + for ifaces.each |ifce| { check_methods_against_iface(ccx, tps, rp, selfty, ifce, cms); // FIXME #2434---this is somewhat bogus, but it seems that @@ -549,26 +549,26 @@ fn ty_of_foreign_item(ccx: @crate_ctxt, it: @ast::foreign_item) } } fn ty_param_bounds(ccx: @crate_ctxt, - params: [ast::ty_param]/~) -> @[ty::param_bounds]/~ { + params: ~[ast::ty_param]) -> @~[ty::param_bounds] { fn compute_bounds(ccx: @crate_ctxt, param: ast::ty_param) -> ty::param_bounds { - @vec::flat_map(*param.bounds) { |b| + @do vec::flat_map(*param.bounds) |b| { alt b { - ast::bound_send { [ty::bound_send]/~ } - ast::bound_copy { [ty::bound_copy]/~ } - ast::bound_const { [ty::bound_const]/~ } + ast::bound_send { ~[ty::bound_send] } + ast::bound_copy { ~[ty::bound_copy] } + ast::bound_const { ~[ty::bound_const] } ast::bound_iface(t) { let ity = ast_ty_to_ty(ccx, empty_rscope, t); alt ty::get(ity).struct { ty::ty_iface(*) { - [ty::bound_iface(ity)]/~ + ~[ty::bound_iface(ity)] } _ { ccx.tcx.sess.span_err( t.span, "type parameter bounds must be \ interface types"); - []/~ + ~[] } } } @@ -576,7 +576,7 @@ fn ty_param_bounds(ccx: @crate_ctxt, } } - @params.map { |param| + @do params.map |param| { alt ccx.tcx.ty_param_bounds.find(param.id) { some(bs) { bs } none { @@ -590,12 +590,12 @@ fn ty_param_bounds(ccx: @crate_ctxt, fn ty_of_foreign_fn_decl(ccx: @crate_ctxt, decl: ast::fn_decl, - ty_params: [ast::ty_param]/~, + ty_params: ~[ast::ty_param], def_id: ast::def_id) -> ty::ty_param_bounds_and_ty { let bounds = ty_param_bounds(ccx, ty_params); let rb = in_binding_rscope(empty_rscope); - let input_tys = decl.inputs.map { |a| ty_of_arg(ccx, rb, a, none) }; + let input_tys = decl.inputs.map(|a| ty_of_arg(ccx, rb, a, none) ); let output_ty = ast_ty_to_ty(ccx, rb, decl.output); let t_fn = ty::mk_fn(ccx.tcx, {purity: decl.purity, @@ -603,27 +603,27 @@ fn ty_of_foreign_fn_decl(ccx: @crate_ctxt, inputs: input_tys, output: output_ty, ret_style: ast::return_val, - constraints: []/~}); + constraints: ~[]}); let tpt = {bounds: bounds, rp: ast::rp_none, ty: t_fn}; ccx.tcx.tcache.insert(def_id, tpt); ret tpt; } -fn mk_ty_params(ccx: @crate_ctxt, atps: [ast::ty_param]/~) - -> {bounds: @[ty::param_bounds]/~, params: [ty::t]/~} { +fn mk_ty_params(ccx: @crate_ctxt, atps: ~[ast::ty_param]) + -> {bounds: @~[ty::param_bounds], params: ~[ty::t]} { let mut i = 0u; let bounds = ty_param_bounds(ccx, atps); {bounds: bounds, - params: vec::map(atps, {|atp| + params: vec::map(atps, |atp| { let t = ty::mk_param(ccx.tcx, i, local_def(atp.id)); i += 1u; t })} } -fn mk_substs(ccx: @crate_ctxt, atps: [ast::ty_param]/~, rp: ast::region_param) - -> {bounds: @[ty::param_bounds]/~, substs: ty::substs} { +fn mk_substs(ccx: @crate_ctxt, atps: ~[ast::ty_param], rp: ast::region_param) + -> {bounds: @~[ty::param_bounds], substs: ty::substs} { let {bounds, params} = mk_ty_params(ccx, atps); let self_r = alt rp { diff --git a/src/rustc/middle/typeck/infer.rs b/src/rustc/middle/typeck/infer.rs index 9f941871692..ba4d9152228 100644 --- a/src/rustc/middle/typeck/infer.rs +++ b/src/rustc/middle/typeck/infer.rs @@ -293,7 +293,7 @@ enum var_value<V:copy, T:copy> { type vals_and_bindings<V:copy, T:copy> = { vals: smallintmap<var_value<V, T>>, - mut bindings: [(V, var_value<V, T>)]/~ + mut bindings: ~[(V, var_value<V, T>)] }; enum node<V:copy, T:copy> = { @@ -346,40 +346,40 @@ type fres<T> = result::result<T, fixup_err>; fn new_infer_ctxt(tcx: ty::ctxt) -> infer_ctxt { infer_ctxt(@{tcx: tcx, - tvb: {vals: smallintmap::mk(), mut bindings: []/~}, - tvib: {vals: smallintmap::mk(), mut bindings: []/~}, - rb: {vals: smallintmap::mk(), mut bindings: []/~}, + tvb: {vals: smallintmap::mk(), mut bindings: ~[]}, + tvib: {vals: smallintmap::mk(), mut bindings: ~[]}, + rb: {vals: smallintmap::mk(), mut bindings: ~[]}, ty_var_counter: @mut 0u, ty_var_integral_counter: @mut 0u, region_var_counter: @mut 0u})} fn mk_subty(cx: infer_ctxt, a: ty::t, b: ty::t) -> ures { #debug["mk_subty(%s <: %s)", a.to_str(cx), b.to_str(cx)]; - indent {|| cx.commit {|| sub(cx).tys(a, b) } }.to_ures() + indent(|| cx.commit(|| sub(cx).tys(a, b) ) ).to_ures() } fn can_mk_subty(cx: infer_ctxt, a: ty::t, b: ty::t) -> ures { #debug["can_mk_subty(%s <: %s)", a.to_str(cx), b.to_str(cx)]; - indent {|| cx.probe {|| sub(cx).tys(a, b) } }.to_ures() + indent(|| cx.probe(|| sub(cx).tys(a, b) ) ).to_ures() } fn mk_subr(cx: infer_ctxt, a: ty::region, b: ty::region) -> ures { #debug["mk_subr(%s <: %s)", a.to_str(cx), b.to_str(cx)]; - indent {|| cx.commit {|| sub(cx).regions(a, b) } }.to_ures() + indent(|| cx.commit(|| sub(cx).regions(a, b) ) ).to_ures() } fn mk_eqty(cx: infer_ctxt, a: ty::t, b: ty::t) -> ures { #debug["mk_eqty(%s <: %s)", a.to_str(cx), b.to_str(cx)]; - indent {|| cx.commit {|| cx.eq_tys(a, b) } }.to_ures() + indent(|| cx.commit(|| cx.eq_tys(a, b) ) ).to_ures() } fn mk_assignty(cx: infer_ctxt, anmnt: assignment, a: ty::t, b: ty::t) -> ures { #debug["mk_assignty(%? / %s <: %s)", anmnt, a.to_str(cx), b.to_str(cx)]; - indent {|| cx.commit {|| + indent(|| cx.commit(|| cx.assign_tys(anmnt, a, b) - } }.to_ures() + ) ).to_ures() } fn can_mk_assignty(cx: infer_ctxt, anmnt: assignment, @@ -392,9 +392,9 @@ fn can_mk_assignty(cx: infer_ctxt, anmnt: assignment, // used in method lookup, and there must be exactly one match or an // error is reported. Still, it should be fixed. - indent {|| cx.probe {|| + indent(|| cx.probe(|| cx.assign_tys(anmnt, a, b) - } }.to_ures() + ) ).to_ures() } // See comment on the type `resolve_state` below @@ -418,7 +418,7 @@ fn resolve_deep(cx: infer_ctxt, a: ty::t, force_vars: force_level) impl methods for ures { fn then<T:copy>(f: fn() -> result<T,ty::type_err>) -> result<T,ty::type_err> { - self.chain() {|_i| f() } + self.chain(|_i| f()) } } @@ -431,7 +431,7 @@ impl methods<T:copy> for cres<T> { } fn compare(t: T, f: fn() -> ty::type_err) -> cres<T> { - self.chain {|s| + do self.chain |s| { if s == t { self } else { @@ -520,7 +520,7 @@ impl of st for ty::t { impl of st for ty::region { fn sub(infcx: infer_ctxt, &&b: ty::region) -> ures { - sub(infcx).regions(self, b).chain {|_r| ok(()) } + sub(infcx).regions(self, b).chain(|_r| ok(())) } fn lub(infcx: infer_ctxt, &&b: ty::region) -> cres<ty::region> { @@ -556,8 +556,8 @@ impl transaction_methods for infer_ctxt { // TODO---could use a vec::clear() that ran destructors but kept // the vec at its currently allocated length - self.tvb.bindings = []/~; - self.rb.bindings = []/~; + self.tvb.bindings = ~[]; + self.rb.bindings = ~[]; ret r; } @@ -604,8 +604,8 @@ impl methods for infer_ctxt { ty::mk_var(self.tcx, self.next_ty_var_id()) } - fn next_ty_vars(n: uint) -> [ty::t]/~ { - vec::from_fn(n) {|_i| self.next_ty_var() } + fn next_ty_vars(n: uint) -> ~[ty::t] { + vec::from_fn(n, |_i| self.next_ty_var()) } fn next_ty_var_integral_id() -> tvi_vid { @@ -706,7 +706,7 @@ impl unify_methods for infer_ctxt { ok(b) } (some(v_a), some(v_b)) { - merge_op(v_a, v_b).chain {|v| + do merge_op(v_a, v_b).chain |v| { ok(some(v)) } } @@ -719,11 +719,11 @@ impl unify_methods for infer_ctxt { glb: fn(V,V) -> cres<V>) -> cres<bounds<V>> { let _r = indenter(); - self.merge_bnd(a.ub, b.ub, glb).chain {|ub| + do self.merge_bnd(a.ub, b.ub, glb).chain |ub| { #debug["glb of ubs %s and %s is %s", a.ub.to_str(self), b.ub.to_str(self), ub.to_str(self)]; - self.merge_bnd(a.lb, b.lb, lub).chain {|lb| + do self.merge_bnd(a.lb, b.lb, lub).chain |lb| { #debug["lub of lbs %s and %s is %s", a.lb.to_str(self), b.lb.to_str(self), lb.to_str(self)]; @@ -771,11 +771,11 @@ impl unify_methods for infer_ctxt { // them explicitly gives the type inferencer more // information and helps to produce tighter bounds // when necessary. - indent {|| - self.bnds(a.lb, b.ub).then {|| - self.bnds(b.lb, a.ub).then {|| - self.merge_bnd(a.ub, b.ub, {|x, y| x.glb(self, y)}).chain {|ub| - self.merge_bnd(a.lb, b.lb, {|x, y| x.lub(self, y)}).chain {|lb| + do indent || { + do self.bnds(a.lb, b.ub).then || { + do self.bnds(b.lb, a.ub).then || { + do self.merge_bnd(a.ub, b.ub, |x, y| x.glb(self, y) ).chain |ub| { + do self.merge_bnd(a.lb, b.lb, |x, y| x.lub(self, y) ).chain |lb| { let bnds = {lb: lb, ub: ub}; #debug["merge(%s): bnds=%s", v_id.to_str(), @@ -783,10 +783,10 @@ impl unify_methods for infer_ctxt { // the new bounds must themselves // be relatable: - self.bnds(bnds.lb, bnds.ub).then {|| + do self.bnds(bnds.lb, bnds.ub).then || { self.set(vb, v_id, root(bnds, rank)); uok() - } + } }}}}} } @@ -812,7 +812,7 @@ impl unify_methods for infer_ctxt { // see if we can make those types subtypes. alt (a_bounds.ub, b_bounds.lb) { (some(a_ub), some(b_lb)) { - let r = self.try {|| a_ub.sub(self, b_lb) }; + let r = self.try(|| a_ub.sub(self, b_lb)); alt r { ok(()) { ret result::ok(()); } err(_) { /*fallthrough */ } @@ -835,17 +835,13 @@ impl unify_methods for infer_ctxt { // i.e., b should redirect to a. self.set(vb, b_id, redirect(a_id)); self.set_var_to_merged_bounds( - vb, a_id, a_bounds, b_bounds, nde_a.rank).then {|| - uok() - } + vb, a_id, a_bounds, b_bounds, nde_a.rank).then(|| uok() ) } else if nde_a.rank < nde_b.rank { #debug["vars(): b has smaller rank"]; // b has greater rank, so a should redirect to b. self.set(vb, a_id, redirect(b_id)); self.set_var_to_merged_bounds( - vb, b_id, a_bounds, b_bounds, nde_b.rank).then {|| - uok() - } + vb, b_id, a_bounds, b_bounds, nde_b.rank).then(|| uok() ) } else { #debug["vars(): a and b have equal rank"]; assert nde_a.rank == nde_b.rank; @@ -854,9 +850,8 @@ impl unify_methods for infer_ctxt { // to a and increment a's rank. self.set(vb, b_id, redirect(a_id)); self.set_var_to_merged_bounds( - vb, a_id, a_bounds, b_bounds, nde_a.rank + 1u).then {|| - uok() - } + vb, a_id, a_bounds, b_bounds, nde_a.rank + 1u + ).then(|| uok() ) } } @@ -991,7 +986,7 @@ impl unify_methods for infer_ctxt { let actual_arg_len = vec::len(actual_constr.node.args); if expected_arg_len != actual_arg_len { ret err_res; } let mut i = 0u; - for expected.node.args.each {|a| + for expected.node.args.each |a| { let actual = actual_constr.node.args[i]; alt a.node { ast::carg_base { @@ -1026,7 +1021,7 @@ impl unify_methods for infer_ctxt { a: bound<T>, b: bound<T>) -> ures { #debug("bnds(%s <: %s)", a.to_str(self), b.to_str(self)); - indent {|| + do indent || { alt (a, b) { (none, none) | (some(_), none) | @@ -1041,10 +1036,10 @@ impl unify_methods for infer_ctxt { } fn constrvecs( - as: [@ty::type_constr]/~, bs: [@ty::type_constr]/~) -> ures { + as: ~[@ty::type_constr], bs: ~[@ty::type_constr]) -> ures { if check vec::same_length(as, bs) { - iter_vec2(as, bs) {|a,b| + do iter_vec2(as, bs) |a,b| { self.constrs(a, b) } } else { @@ -1053,24 +1048,24 @@ impl unify_methods for infer_ctxt { } fn sub_tys(a: ty::t, b: ty::t) -> ures { - sub(self).tys(a, b).chain {|_t| ok(()) } + sub(self).tys(a, b).chain(|_t| ok(()) ) } fn sub_regions(a: ty::region, b: ty::region) -> ures { - sub(self).regions(a, b).chain {|_t| ok(()) } + sub(self).regions(a, b).chain(|_t| ok(()) ) } fn eq_tys(a: ty::t, b: ty::t) -> ures { - self.sub_tys(a, b).then {|| + self.sub_tys(a, b).then(|| { self.sub_tys(b, a) - } + }) } fn eq_regions(a: ty::region, b: ty::region) -> ures { #debug["eq_regions(%s, %s)", a.to_str(self), b.to_str(self)]; - indent {|| - self.sub_regions(a, b).then {|| + do indent || { + do self.sub_regions(a, b).then || { self.sub_regions(b, a) } } @@ -1082,8 +1077,8 @@ impl unify_methods for infer_ctxt { // resolution. The first is a shallow resolution: this only resolves // one layer, but does not resolve any nested variables. So, for // example, if we have two variables A and B, and the constraint that -// A <: [B]/~ and B <: int, then shallow resolution on A would yield -// [B]/~. Deep resolution, on the other hand, would yield [int]/~. +// A <: ~[B] and B <: int, then shallow resolution on A would yield +// ~[B]. Deep resolution, on the other hand, would yield ~[int]. // // But there is one more knob: the `force_level` variable controls // the behavior in the face of unconstrained type and region @@ -1107,8 +1102,8 @@ type resolve_state = @{ deep: bool, force_vars: force_level, mut err: option<fixup_err>, - mut r_seen: [region_vid]/~, - mut v_seen: [tv_vid]/~ + mut r_seen: ~[region_vid], + mut v_seen: ~[tv_vid] }; fn resolver(infcx: infer_ctxt, deep: bool, fvars: force_level) @@ -1117,8 +1112,8 @@ fn resolver(infcx: infer_ctxt, deep: bool, fvars: force_level) deep: deep, force_vars: fvars, mut err: none, - mut r_seen: []/~, - mut v_seen: []/~} + mut r_seen: ~[], + mut v_seen: ~[]} } impl methods for resolve_state { @@ -1134,7 +1129,7 @@ impl methods for resolve_state { // allow us to pass back errors in any useful way. assert vec::is_empty(self.v_seen) && vec::is_empty(self.r_seen); - let rty = indent {|| self.resolve1(typ) }; + let rty = indent(|| self.resolve1(typ) ); assert vec::is_empty(self.v_seen) && vec::is_empty(self.r_seen); alt self.err { none { @@ -1166,9 +1161,9 @@ impl methods for resolve_state { _ { ty::fold_regions_and_ty( self.infcx.tcx, typ, - { |r| self.resolve_region(r) }, - { |t| self.resolve_if_deep(t) }, - { |t| self.resolve_if_deep(t) }) + |r| self.resolve_region(r), + |t| self.resolve_if_deep(t), + |t| self.resolve_if_deep(t)) } } }) @@ -1308,14 +1303,14 @@ impl methods for resolve_state { // // Assuming we have a bound from both sides, we will then examine // these bounds and see if they have the form (@M_a T_a, &rb.M_b T_b) -// (resp. ~M_a T_a, [M_a T_a]/~, etc). If they do not, we fall back to +// (resp. ~M_a T_a, ~[M_a T_a], etc). If they do not, we fall back to // subtyping. // // If they *do*, then we know that the two types could never be // subtypes of one another. We will then construct a type @const T_b // and ensure that type a is a subtype of that. This allows for the -// possibility of assigning from a type like (say) @[mut T1]/~ to a type -// &[T2]/~ where T1 <: T2. This might seem surprising, since the `@` +// possibility of assigning from a type like (say) @~[mut T1] to a type +// &~[T2] where T1 <: T2. This might seem surprising, since the `@` // points at mutable memory but the `&` points at immutable memory. // This would in fact be unsound, except for the borrowck, which comes // later and guarantees that such mutability conversions are safe. @@ -1462,11 +1457,11 @@ impl assignment for infer_ctxt { anmnt, a.to_str(self), nr_b.to_str(self), r_b.to_str(self)]; - indent {|| - self.sub_tys(a, nr_b).then {|| + do indent || { + do self.sub_tys(a, nr_b).then || { let r_a = ty::re_scope(anmnt.borrow_scope); #debug["anmnt=%?", anmnt]; - sub(self).contraregions(r_a, r_b).chain {|_r| + do sub(self).contraregions(r_a, r_b).chain |_r| { // if successful, add an entry indicating that // borrowing occurred #debug["borrowing expression #%?", anmnt]; @@ -1533,7 +1528,7 @@ iface combine { fn mts(a: ty::mt, b: ty::mt) -> cres<ty::mt>; fn contratys(a: ty::t, b: ty::t) -> cres<ty::t>; fn tys(a: ty::t, b: ty::t) -> cres<ty::t>; - fn tps(as: [ty::t]/~, bs: [ty::t]/~) -> cres<[ty::t]/~>; + fn tps(as: ~[ty::t], bs: ~[ty::t]) -> cres<~[ty::t]>; fn self_tys(a: option<ty::t>, b: option<ty::t>) -> cres<option<ty::t>>; fn substs(as: ty::substs, bs: ty::substs) -> cres<ty::substs>; fn fns(a: ty::fn_ty, b: ty::fn_ty) -> cres<ty::fn_ty>; @@ -1564,7 +1559,7 @@ fn super_substs<C:combine>( ok(none) } (some(a), some(b)) { - infcx.eq_regions(a, b).then {|| + do infcx.eq_regions(a, b).then || { ok(some(a)) } } @@ -1582,9 +1577,10 @@ fn super_substs<C:combine>( } } - self.tps(a.tps, b.tps).chain { |tps| - self.self_tys(a.self_ty, b.self_ty).chain { |self_ty| - eq_opt_regions(self.infcx(), a.self_r, b.self_r).chain { |self_r| + do self.tps(a.tps, b.tps).chain |tps| { + do self.self_tys(a.self_ty, b.self_ty).chain |self_ty| { + do eq_opt_regions(self.infcx(), a.self_r, b.self_r).chain + |self_r| { ok({self_r: self_r, self_ty: self_ty, tps: tps}) } } @@ -1592,7 +1588,7 @@ fn super_substs<C:combine>( } fn super_tps<C:combine>( - self: C, as: [ty::t]/~, bs: [ty::t]/~) -> cres<[ty::t]/~> { + self: C, as: ~[ty::t], bs: ~[ty::t]) -> cres<~[ty::t]> { // Note: type parameters are always treated as *invariant* // (otherwise the type system would be unsound). In the @@ -1600,11 +1596,9 @@ fn super_tps<C:combine>( // variance. if check vec::same_length(as, bs) { - iter_vec2(as, bs) {|a, b| + iter_vec2(as, bs, |a, b| { self.infcx().eq_tys(a, b) - }.then {|| - ok(as) - } + }).then(|| ok(as) ) } else { err(ty::terr_ty_param_size(bs.len(), as.len())) } @@ -1621,9 +1615,7 @@ fn super_self_tys<C:combine>( ok(none) } (some(a), some(b)) { - self.infcx().eq_tys(a, b).then {|| - ok(some(a)) - } + self.infcx().eq_tys(a, b).then(|| ok(some(a)) ) } (none, some(_)) | (some(_), none) { @@ -1639,11 +1631,9 @@ fn super_flds<C:combine>( self: C, a: ty::field, b: ty::field) -> cres<ty::field> { if a.ident == b.ident { - self.mts(a.mt, b.mt).chain {|mt| - ok({ident: a.ident, mt: mt}) - }.chain_err {|e| - err(ty::terr_in_field(@e, a.ident)) - } + self.mts(a.mt, b.mt) + .chain(|mt| ok({ident: a.ident, mt: mt}) ) + .chain_err(|e| err(ty::terr_in_field(@e, a.ident)) ) } else { err(ty::terr_record_fields(b.ident, a.ident)) } @@ -1661,8 +1651,8 @@ fn super_args<C:combine>( self: C, a: ty::arg, b: ty::arg) -> cres<ty::arg> { - self.modes(a.mode, b.mode).chain {|m| - self.contratys(a.ty, b.ty).chain {|t| + do self.modes(a.mode, b.mode).chain |m| { + do self.contratys(a.ty, b.ty).chain |t| { ok({mode: m, ty: t}) } } @@ -1674,7 +1664,7 @@ fn super_vstores<C:combine>( alt (a, b) { (ty::vstore_slice(a_r), ty::vstore_slice(b_r)) { - self.contraregions(a_r, b_r).chain {|r| + do self.contraregions(a_r, b_r).chain |r| { ok(ty::vstore_slice(r)) } } @@ -1692,21 +1682,21 @@ fn super_vstores<C:combine>( fn super_fns<C:combine>( self: C, a_f: ty::fn_ty, b_f: ty::fn_ty) -> cres<ty::fn_ty> { - fn argvecs<C:combine>(self: C, a_args: [ty::arg]/~, - b_args: [ty::arg]/~) -> cres<[ty::arg]/~> { + fn argvecs<C:combine>(self: C, a_args: ~[ty::arg], + b_args: ~[ty::arg]) -> cres<~[ty::arg]> { if check vec::same_length(a_args, b_args) { - map_vec2(a_args, b_args) {|a, b| self.args(a, b) } + map_vec2(a_args, b_args, |a, b| self.args(a, b) ) } else { err(ty::terr_arg_count) } } - self.protos(a_f.proto, b_f.proto).chain {|p| - self.ret_styles(a_f.ret_style, b_f.ret_style).chain {|rs| - argvecs(self, a_f.inputs, b_f.inputs).chain {|inputs| - self.tys(a_f.output, b_f.output).chain {|output| - self.purities(a_f.purity, b_f.purity).chain {|purity| + do self.protos(a_f.proto, b_f.proto).chain |p| { + do self.ret_styles(a_f.ret_style, b_f.ret_style).chain |rs| { + do argvecs(self, a_f.inputs, b_f.inputs).chain |inputs| { + do self.tys(a_f.output, b_f.output).chain |output| { + do self.purities(a_f.purity, b_f.purity).chain |purity| { // FIXME: uncomment if #2588 doesn't get accepted: // self.infcx().constrvecs(a_f.constraints, // b_f.constraints).then {|| @@ -1743,18 +1733,18 @@ fn super_tys<C:combine>( // Have to handle these first (ty::ty_var_integral(a_id), ty::ty_var_integral(b_id)) { - self.infcx().vars_integral(self.infcx().tvib, a_id, b_id).then {|| - ok(a) } + self.infcx().vars_integral(self.infcx().tvib, a_id, b_id) + .then(|| ok(a) ) } (ty::ty_var_integral(a_id), ty::ty_int(_)) | (ty::ty_var_integral(a_id), ty::ty_uint(_)) { - self.infcx().vart_integral(self.infcx().tvib, a_id, b).then {|| - ok(a) } + self.infcx().vart_integral(self.infcx().tvib, a_id, b) + .then(|| ok(a) ) } (ty::ty_int(_), ty::ty_var_integral(b_id)) | (ty::ty_uint(_), ty::ty_var_integral(b_id)) { - self.infcx().tvar_integral(self.infcx().tvib, a, b_id).then {|| - ok(a) } + self.infcx().tvar_integral(self.infcx().tvib, a, b_id) + .then(|| ok(a) ) } (ty::ty_int(_), _) | @@ -1786,78 +1776,76 @@ fn super_tys<C:combine>( (ty::ty_enum(a_id, a_substs), ty::ty_enum(b_id, b_substs)) if a_id == b_id { - self.substs(a_substs, b_substs).chain {|tps| + do self.substs(a_substs, b_substs).chain |tps| { ok(ty::mk_enum(tcx, a_id, tps)) } } (ty::ty_iface(a_id, a_substs), ty::ty_iface(b_id, b_substs)) if a_id == b_id { - self.substs(a_substs, b_substs).chain {|substs| + do self.substs(a_substs, b_substs).chain |substs| { ok(ty::mk_iface(tcx, a_id, substs)) } } (ty::ty_class(a_id, a_substs), ty::ty_class(b_id, b_substs)) if a_id == b_id { - self.substs(a_substs, b_substs).chain {|substs| + do self.substs(a_substs, b_substs).chain |substs| { ok(ty::mk_class(tcx, a_id, substs)) } } (ty::ty_box(a_mt), ty::ty_box(b_mt)) { - self.mts(a_mt, b_mt).chain {|mt| + do self.mts(a_mt, b_mt).chain |mt| { ok(ty::mk_box(tcx, mt)) } } (ty::ty_uniq(a_mt), ty::ty_uniq(b_mt)) { - self.mts(a_mt, b_mt).chain {|mt| + do self.mts(a_mt, b_mt).chain |mt| { ok(ty::mk_uniq(tcx, mt)) } } (ty::ty_vec(a_mt), ty::ty_vec(b_mt)) { - self.mts(a_mt, b_mt).chain {|mt| + do self.mts(a_mt, b_mt).chain |mt| { ok(ty::mk_vec(tcx, mt)) } } (ty::ty_ptr(a_mt), ty::ty_ptr(b_mt)) { - self.mts(a_mt, b_mt).chain {|mt| + do self.mts(a_mt, b_mt).chain |mt| { ok(ty::mk_ptr(tcx, mt)) } } (ty::ty_rptr(a_r, a_mt), ty::ty_rptr(b_r, b_mt)) { - self.contraregions(a_r, b_r).chain {|r| - self.mts(a_mt, b_mt).chain {|mt| + do self.contraregions(a_r, b_r).chain |r| { + do self.mts(a_mt, b_mt).chain |mt| { ok(ty::mk_rptr(tcx, r, mt)) } } } (ty::ty_evec(a_mt, vs_a), ty::ty_evec(b_mt, vs_b)) { - self.mts(a_mt, b_mt).chain {|mt| - self.vstores(ty::terr_vec, vs_a, vs_b).chain {|vs| + do self.mts(a_mt, b_mt).chain |mt| { + do self.vstores(ty::terr_vec, vs_a, vs_b).chain |vs| { ok(ty::mk_evec(tcx, mt, vs)) } } } (ty::ty_estr(vs_a), ty::ty_estr(vs_b)) { - self.vstores(ty::terr_str, vs_a, vs_b).chain {|vs| + do self.vstores(ty::terr_str, vs_a, vs_b).chain |vs| { ok(ty::mk_estr(tcx,vs)) } } (ty::ty_rec(as), ty::ty_rec(bs)) { if check vec::same_length(as, bs) { - map_vec2(as, bs) {|a,b| + map_vec2(as, bs, |a,b| { self.flds(a, b) - }.chain {|flds| - ok(ty::mk_rec(tcx, flds)) - } + }).chain(|flds| ok(ty::mk_rec(tcx, flds)) ) } else { err(ty::terr_record_size(bs.len(), as.len())) } @@ -1865,23 +1853,22 @@ fn super_tys<C:combine>( (ty::ty_tup(as), ty::ty_tup(bs)) { if check vec::same_length(as, bs) { - map_vec2(as, bs) {|a, b| self.tys(a, b) }.chain {|ts| - ok(ty::mk_tup(tcx, ts)) - } + map_vec2(as, bs, |a, b| self.tys(a, b) ) + .chain(|ts| ok(ty::mk_tup(tcx, ts)) ) } else { err(ty::terr_tuple_size(bs.len(), as.len())) } } (ty::ty_fn(a_fty), ty::ty_fn(b_fty)) { - self.fns(a_fty, b_fty).chain {|fty| + do self.fns(a_fty, b_fty).chain |fty| { ok(ty::mk_fn(tcx, fty)) } } (ty::ty_constr(a_t, a_constrs), ty::ty_constr(b_t, b_constrs)) { - self.tys(a_t, b_t).chain {|t| - self.infcx().constrvecs(a_constrs, b_constrs).then {|| + do self.tys(a_t, b_t).chain |t| { + do self.infcx().constrvecs(a_constrs, b_constrs).then || { ok(ty::mk_constr(tcx, t, a_constrs)) } } @@ -1910,25 +1897,25 @@ impl of combine for sub { self.tag(), a.to_str(self.infcx()), b.to_str(self.infcx())]; - indent {|| + do indent || { alt (a, b) { (ty::re_var(a_id), ty::re_var(b_id)) { - self.infcx().vars(self.rb, a_id, b_id).then {|| + do self.infcx().vars(self.rb, a_id, b_id).then || { ok(a) } } (ty::re_var(a_id), _) { - self.infcx().vart(self.rb, a_id, b).then {|| + do self.infcx().vart(self.rb, a_id, b).then || { ok(a) } } (_, ty::re_var(b_id)) { - self.infcx().tvar(self.rb, a, b_id).then {|| + do self.infcx().tvar(self.rb, a, b_id).then || { ok(a) } } _ { - self.lub().regions(a, b).compare(b) {|| + do self.lub().regions(a, b).compare(b) || { ty::terr_regions_differ(b, a) } } @@ -1947,50 +1934,50 @@ impl of combine for sub { m_mutbl { // If supertype is mut, subtype must match exactly // (i.e., invariant if mut): - self.infcx().eq_tys(a.ty, b.ty).then {|| ok(a) } + self.infcx().eq_tys(a.ty, b.ty).then(|| ok(a) ) } m_imm | m_const { // Otherwise we can be covariant: - self.tys(a.ty, b.ty).chain {|_t| ok(a) } + self.tys(a.ty, b.ty).chain(|_t| ok(a) ) } } } fn protos(a: ast::proto, b: ast::proto) -> cres<ast::proto> { - self.lub().protos(a, b).compare(b) {|| + self.lub().protos(a, b).compare(b, || { ty::terr_proto_mismatch(b, a) - } + }) } fn purities(f1: purity, f2: purity) -> cres<purity> { - self.lub().purities(f1, f2).compare(f2) {|| + self.lub().purities(f1, f2).compare(f2, || { ty::terr_purity_mismatch(f2, f1) - } + }) } fn ret_styles(a: ret_style, b: ret_style) -> cres<ret_style> { - self.lub().ret_styles(a, b).compare(b) {|| + self.lub().ret_styles(a, b).compare(b, || { ty::terr_ret_style_mismatch(b, a) - } + }) } fn tys(a: ty::t, b: ty::t) -> cres<ty::t> { #debug("%s.tys(%s, %s)", self.tag(), a.to_str(*self), b.to_str(*self)); if a == b { ret ok(a); } - indent {|| + do indent || { alt (ty::get(a).struct, ty::get(b).struct) { (ty::ty_bot, _) { ok(a) } (ty::ty_var(a_id), ty::ty_var(b_id)) { - self.infcx().vars(self.tvb, a_id, b_id).then {|| ok(a) } + self.infcx().vars(self.tvb, a_id, b_id).then(|| ok(a) ) } (ty::ty_var(a_id), _) { - self.infcx().vart(self.tvb, a_id, b).then {|| ok(a) } + self.infcx().vart(self.tvb, a_id, b).then(|| ok(a) ) } (_, ty::ty_var(b_id)) { - self.infcx().tvar(self.tvb, a, b_id).then {|| ok(a) } + self.infcx().tvar(self.tvb, a, b_id).then(|| ok(a) ) } (_, ty::ty_bot) { err(ty::terr_sorts(b, a)) @@ -2011,7 +1998,7 @@ impl of combine for sub { // First, we instantiate each bound region in the subtype with a fresh // region variable. let {fn_ty: a_fn_ty, _} = { - replace_bound_regions_in_fn_ty(self.tcx, @nil, none, a) { |br| + do replace_bound_regions_in_fn_ty(self.tcx, @nil, none, a) |br| { // N.B.: The name of the bound region doesn't have // anything to do with the region variable that's created // for it. The only thing we're doing with `br` here is @@ -2027,7 +2014,7 @@ impl of combine for sub { // Second, we instantiate each bound region in the supertype with a // fresh concrete region. let {fn_ty: b_fn_ty, _} = { - replace_bound_regions_in_fn_ty(self.tcx, @nil, none, b) { |br| + do replace_bound_regions_in_fn_ty(self.tcx, @nil, none, b) |br| { // FIXME: eventually re_skolemized (issue #2263) ty::re_bound(br) } @@ -2061,7 +2048,7 @@ impl of combine for sub { super_substs(self, as, bs) } - fn tps(as: [ty::t]/~, bs: [ty::t]/~) -> cres<[ty::t]/~> { + fn tps(as: ~[ty::t], bs: ~[ty::t]) -> cres<~[ty::t]> { super_tps(self, as, bs) } @@ -2093,21 +2080,19 @@ impl of combine for lub { alt m { m_imm | m_const { - self.tys(a.ty, b.ty).chain {|t| - ok({ty: t, mutbl: m}) - } + self.tys(a.ty, b.ty).chain(|t| ok({ty: t, mutbl: m}) ) } m_mutbl { - self.infcx().try {|| - self.infcx().eq_tys(a.ty, b.ty).then {|| + self.infcx().try(|| { + self.infcx().eq_tys(a.ty, b.ty).then(|| { ok({ty: a.ty, mutbl: m}) - } - }.chain_err {|_e| - self.tys(a.ty, b.ty).chain {|t| + }) + }).chain_err(|_e| { + self.tys(a.ty, b.ty).chain(|t| { ok({ty: t, mutbl: m_const}) - } - } + }) + }) } } } @@ -2159,7 +2144,7 @@ impl of combine for lub { a.to_str(self.infcx()), b.to_str(self.infcx())]; - indent {|| + do indent || { alt (a, b) { (ty::re_static, _) | (_, ty::re_static) { ok(ty::re_static) // nothing lives longer than static @@ -2247,7 +2232,7 @@ impl of combine for lub { super_substs(self, as, bs) } - fn tps(as: [ty::t]/~, bs: [ty::t]/~) -> cres<[ty::t]/~> { + fn tps(as: ~[ty::t], bs: ~[ty::t]) -> cres<~[ty::t]> { super_tps(self, as, bs) } @@ -2272,19 +2257,19 @@ impl of combine for glb { // If one side or both is mut, then the GLB must use // the precise type from the mut side. (m_mutbl, m_const) { - sub(*self).tys(a.ty, b.ty).chain {|_t| + sub(*self).tys(a.ty, b.ty).chain(|_t| { ok({ty: a.ty, mutbl: m_mutbl}) - } + }) } (m_const, m_mutbl) { - sub(*self).tys(b.ty, a.ty).chain {|_t| + sub(*self).tys(b.ty, a.ty).chain(|_t| { ok({ty: b.ty, mutbl: m_mutbl}) - } + }) } (m_mutbl, m_mutbl) { - self.infcx().eq_tys(a.ty, b.ty).then {|| + self.infcx().eq_tys(a.ty, b.ty).then(|| { ok({ty: a.ty, mutbl: m_mutbl}) - } + }) } // If one side or both is immutable, we can use the GLB of @@ -2292,17 +2277,17 @@ impl of combine for glb { (m_imm, m_const) | (m_const, m_imm) | (m_imm, m_imm) { - self.tys(a.ty, b.ty).chain {|t| + self.tys(a.ty, b.ty).chain(|t| { ok({ty: t, mutbl: m_imm}) - } + }) } // If both sides are const, then we can use GLB of both // sides and mutbl of only `m_const`. (m_const, m_const) { - self.tys(a.ty, b.ty).chain {|t| + self.tys(a.ty, b.ty).chain(|t| { ok({ty: t, mutbl: m_const}) - } + }) } // There is no mutual subtype of these combinations. @@ -2356,7 +2341,7 @@ impl of combine for glb { a.to_str(self.infcx()), b.to_str(self.infcx())]; - indent {|| + do indent || { alt (a, b) { (ty::re_static, r) | (r, ty::re_static) { // static lives longer than everything else @@ -2446,7 +2431,7 @@ impl of combine for glb { super_substs(self, as, bs) } - fn tps(as: [ty::t]/~, bs: [ty::t]/~) -> cres<[ty::t]/~> { + fn tps(as: ~[ty::t], bs: ~[ty::t]) -> cres<~[ty::t]> { super_tps(self, as, bs) } @@ -2494,7 +2479,7 @@ fn lattice_tys<L:lattice_ops combine>( a.to_str(self.infcx()), b.to_str(self.infcx())); if a == b { ret ok(a); } - indent {|| + do indent || { alt (ty::get(a).struct, ty::get(b).struct) { (ty::ty_bot, _) { self.ty_bot(b) } (_, ty::ty_bot) { self.ty_bot(a) } @@ -2502,17 +2487,17 @@ fn lattice_tys<L:lattice_ops combine>( (ty::ty_var(a_id), ty::ty_var(b_id)) { lattice_vars(self, self.infcx().tvb, a, a_id, b_id, - {|x, y| self.tys(x, y) }) + |x, y| self.tys(x, y) ) } (ty::ty_var(a_id), _) { lattice_var_t(self, self.infcx().tvb, a_id, b, - {|x, y| self.tys(x, y) }) + |x, y| self.tys(x, y) ) } (_, ty::ty_var(b_id)) { lattice_var_t(self, self.infcx().tvb, b_id, a, - {|x, y| self.tys(x, y) }) + |x, y| self.tys(x, y) ) } _ { super_tys(self, a, b) @@ -2529,13 +2514,13 @@ fn lattice_rvars<L:lattice_ops combine>( (ty::re_var(a_id), ty::re_var(b_id)) { lattice_vars(self, self.infcx().rb, a, a_id, b_id, - {|x, y| self.regions(x, y) }) + |x, y| self.regions(x, y) ) } (ty::re_var(v_id), r) | (r, ty::re_var(v_id)) { lattice_var_t(self, self.infcx().rb, v_id, r, - {|x, y| self.regions(x, y) }) + |x, y| self.regions(x, y) ) } _ { @@ -2580,7 +2565,7 @@ fn lattice_vars<V:copy vid, T:copy to_str st, L:lattice_ops combine>( let a_bnd = self.bnd(a_bounds), b_bnd = self.bnd(b_bounds); alt (a_bnd, b_bnd) { (some(a_ty), some(b_ty)) { - alt self.infcx().try {|| c_ts(a_ty, b_ty) } { + alt self.infcx().try(|| c_ts(a_ty, b_ty) ) { ok(t) { ret ok(t); } err(_) { /*fallthrough */ } } @@ -2590,9 +2575,7 @@ fn lattice_vars<V:copy vid, T:copy to_str st, L:lattice_ops combine>( // Otherwise, we need to merge A and B into one variable. We can // then use either variable as an upper bound: - self.infcx().vars(vb, a_vid, b_vid).then {|| - ok(a_t) - } + self.infcx().vars(vb, a_vid, b_vid).then(|| ok(a_t) ) } fn lattice_var_t<V:copy vid, T:copy to_str st, L:lattice_ops combine>( @@ -2623,7 +2606,7 @@ fn lattice_var_t<V:copy vid, T:copy to_str st, L:lattice_ops combine>( // and then return b. #debug["bnd=none"]; let a_bounds = self.with_bnd(a_bounds, b); - self.infcx().bnds(a_bounds.lb, a_bounds.ub).then {|| + do self.infcx().bnds(a_bounds.lb, a_bounds.ub).then || { self.infcx().set(vb, a_id, root(a_bounds, nde_a.rank)); ok(b) diff --git a/src/rustc/middle/typeck/rscope.rs b/src/rustc/middle/typeck/rscope.rs index 2420250aa1a..f9f467a90fa 100644 --- a/src/rustc/middle/typeck/rscope.rs +++ b/src/rustc/middle/typeck/rscope.rs @@ -28,7 +28,7 @@ impl of region_scope for type_rscope { } } fn named_region(id: ast::ident) -> result<ty::region, str> { - empty_rscope.named_region(id).chain_err { |_e| + do empty_rscope.named_region(id).chain_err |_e| { if *id == "self" { self.anon_region() } else { result::err("named regions other than `self` are not \ @@ -62,7 +62,7 @@ impl of region_scope for @binding_rscope { result::ok(ty::re_bound(ty::br_anon)) } fn named_region(id: ast::ident) -> result<ty::region, str> { - self.base.named_region(id).chain_err {|_e| + do self.base.named_region(id).chain_err |_e| { result::ok(ty::re_bound(ty::br_named(id))) } } diff --git a/src/rustc/util/common.rs b/src/rustc/util/common.rs index 2b1c88f1ea1..6a594879d1d 100644 --- a/src/rustc/util/common.rs +++ b/src/rustc/util/common.rs @@ -29,9 +29,9 @@ type flag = hashmap<str, ()>; fn field_expr(f: ast::field) -> @ast::expr { ret f.node.expr; } -fn field_exprs(fields: [ast::field]/~) -> [@ast::expr]/~ { - let mut es = []/~; - for fields.each {|f| vec::push(es, f.node.expr); } +fn field_exprs(fields: ~[ast::field]) -> ~[@ast::expr] { + let mut es = ~[]; + for fields.each |f| { vec::push(es, f.node.expr); } ret es; } @@ -39,8 +39,8 @@ fn field_exprs(fields: [ast::field]/~) -> [@ast::expr]/~ { // of b -- skipping any inner loops (loop, while, loop_body) fn loop_query(b: ast::blk, p: fn@(ast::expr_) -> bool) -> bool { let rs = @mut false; - let visit_expr = {|e: @ast::expr, &&flag: @mut bool, - v: visit::vt<@mut bool>| + let visit_expr = + |e: @ast::expr, &&flag: @mut bool, v: visit::vt<@mut bool>| { *flag |= p(e.node); alt e.node { // Skip inner loops, since a break in the inner loop isn't a @@ -56,13 +56,13 @@ fn loop_query(b: ast::blk, p: fn@(ast::expr_) -> bool) -> bool { } fn has_nonlocal_exits(b: ast::blk) -> bool { - loop_query(b) {|e| alt e { + do loop_query(b) |e| { alt e { ast::expr_break | ast::expr_cont { true } _ { false }}} } fn may_break(b: ast::blk) -> bool { - loop_query(b) {|e| alt e { + do loop_query(b) |e| { alt e { ast::expr_break { true } _ { false }}} } diff --git a/src/rustc/util/ppaux.rs b/src/rustc/util/ppaux.rs index d7a25fabfa6..674f79ece79 100644 --- a/src/rustc/util/ppaux.rs +++ b/src/rustc/util/ppaux.rs @@ -96,9 +96,9 @@ fn vstore_to_str(cx: ctxt, vs: ty::vstore) -> str { } } -fn tys_to_str(cx: ctxt, ts: [t]/~) -> str { +fn tys_to_str(cx: ctxt, ts: ~[t]) -> str { let mut rs = ""; - for ts.each {|t| rs += ty_to_str(cx, t); } + for ts.each |t| { rs += ty_to_str(cx, t); } rs } @@ -121,8 +121,8 @@ fn ty_to_str(cx: ctxt, typ: t) -> str { } fn fn_to_str(cx: ctxt, purity: ast::purity, proto: ast::proto, ident: option<ast::ident>, - inputs: [arg]/~, output: t, cf: ast::ret_style, - constrs: [@constr]/~) -> str { + inputs: ~[arg], output: t, cf: ast::ret_style, + constrs: ~[@constr]) -> str { let mut s; s = alt purity { @@ -132,8 +132,8 @@ fn ty_to_str(cx: ctxt, typ: t) -> str { s += proto_to_str(proto); alt ident { some(i) { s += " "; s += *i; } _ { } } s += "("; - let mut strs = []/~; - for inputs.each {|a| vec::push(strs, fn_input_to_str(cx, a)); } + let mut strs = ~[]; + for inputs.each |a| { vec::push(strs, fn_input_to_str(cx, a)); } s += str::connect(strs, ", "); s += ")"; if ty::get(output).struct != ty_nil { @@ -156,7 +156,7 @@ fn ty_to_str(cx: ctxt, typ: t) -> str { } // if there is an id, print that instead of the structural type: - for ty::type_def_id(typ).each { |def_id| + for ty::type_def_id(typ).each |def_id| { // note that this typedef cannot have type parameters ret ast_map::path_to_str(ty::item_path(cx, def_id)); } @@ -189,13 +189,13 @@ fn ty_to_str(cx: ctxt, typ: t) -> str { ty_unboxed_vec(tm) { "unboxed_vec<" + mt_to_str(cx, tm) + ">" } ty_type { "type" } ty_rec(elems) { - let mut strs: [str]/~ = []/~; - for elems.each {|fld| vec::push(strs, field_to_str(cx, fld)); } + let mut strs: ~[str] = ~[]; + for elems.each |fld| { vec::push(strs, field_to_str(cx, fld)); } "{" + str::connect(strs, ",") + "}" } ty_tup(elems) { - let mut strs = []/~; - for elems.each {|elem| vec::push(strs, ty_to_str(cx, elem)); } + let mut strs = ~[]; + for elems.each |elem| { vec::push(strs, ty_to_str(cx, elem)); } "(" + str::connect(strs, ",") + ")" } ty_fn(f) { @@ -205,7 +205,7 @@ fn ty_to_str(cx: ctxt, typ: t) -> str { ty_var(v) { v.to_str() } ty_var_integral(v) { v.to_str() } ty_param(id, _) { - "'" + str::from_bytes([('a' as u8) + (id as u8)]/~) + "'" + str::from_bytes(~[('a' as u8) + (id as u8)]) } ty_self { "self" } ty_enum(did, substs) | ty_class(did, substs) { @@ -234,7 +234,7 @@ fn ty_to_str(cx: ctxt, typ: t) -> str { fn parameterized(cx: ctxt, base: str, self_r: option<ty::region>, - tps: [ty::t]/~) -> str { + tps: ~[ty::t]) -> str { let r_str = alt self_r { none { "" } @@ -244,7 +244,7 @@ fn parameterized(cx: ctxt, }; if vec::len(tps) > 0u { - let strs = vec::map(tps, {|t| ty_to_str(cx, t)}); + let strs = vec::map(tps, |t| ty_to_str(cx, t) ); #fmt["%s%s<%s>", base, r_str, str::connect(strs, ",")] } else { #fmt["%s%s", base, r_str] @@ -262,10 +262,10 @@ fn constr_to_str(c: @constr) -> str { pprust::constr_args_to_str(pprust::uint_to_str, c.node.args); } -fn constrs_str(constrs: [@constr]/~) -> str { +fn constrs_str(constrs: ~[@constr]) -> str { let mut s = ""; let mut colon = true; - for constrs.each {|c| + for constrs.each |c| { if colon { s += " : "; colon = false; } else { s += ", "; } s += constr_to_str(c); } diff --git a/src/rustdoc/astsrv.rs b/src/rustdoc/astsrv.rs index f1a702c9df8..255d49b6f74 100644 --- a/src/rustdoc/astsrv.rs +++ b/src/rustdoc/astsrv.rs @@ -60,7 +60,7 @@ fn from_file<T>(file: str, owner: srv_owner<T>) -> T { fn run<T>(owner: srv_owner<T>, source: str, +parse: parser) -> T { let srv_ = srv({ - ch: task::spawn_listener {|po| + ch: do task::spawn_listener |po| { act(po, source, parse); } }); @@ -199,8 +199,8 @@ fn build_error_handlers( #[test] fn should_prune_unconfigured_items() { let source = "#[cfg(shut_up_and_leave_me_alone)]fn a() { }"; - from_str(source) {|srv| - exec(srv) {|ctxt| + do from_str(source) |srv| { + do exec(srv) |ctxt| { assert vec::is_empty(ctxt.ast.node.module.items); } } @@ -209,8 +209,8 @@ fn should_prune_unconfigured_items() { #[test] fn srv_should_build_ast_map() { let source = "fn a() { }"; - from_str(source) {|srv| - exec(srv) {|ctxt| + do from_str(source) |srv| { + do exec(srv) |ctxt| { assert ctxt.ast_map.size() != 0u }; } @@ -219,8 +219,8 @@ fn srv_should_build_ast_map() { #[test] fn srv_should_build_reexport_map() { let source = "import a::b; export b; mod a { mod b { } }"; - from_str(source) {|srv| - exec(srv) {|ctxt| + do from_str(source) |srv| { + do exec(srv) |ctxt| { assert ctxt.exp_map.size() != 0u }; } @@ -232,14 +232,14 @@ fn srv_should_resolve_external_crates() { fn f() -> std::sha1::sha1 {\ std::sha1::mk_sha1() }"; // Just testing that resolve doesn't crash - from_str(source) {|_srv| } + from_str(source, |_srv| { } ) } #[test] fn srv_should_resolve_core_crate() { let source = "fn a() -> option { fail }"; // Just testing that resolve doesn't crash - from_str(source) {|_srv| } + from_str(source, |_srv| { } ) } #[test] @@ -247,26 +247,26 @@ fn srv_should_resolve_non_existant_imports() { // We want to ignore things we can't resolve. Shouldn't // need to be able to find external crates to create docs. let source = "import wooboo; fn a() { }"; - from_str(source) {|_srv| } + from_str(source, |_srv| { } ) } #[test] fn srv_should_resolve_non_existant_uses() { let source = "use forble; fn a() { }"; - from_str(source) {|_srv| } + from_str(source, |_srv| { } ) } #[test] fn should_ignore_external_import_paths_that_dont_exist() { let source = "use forble; import forble::bippy;"; - from_str(source) {|_srv| } + from_str(source, |_srv| { } ) } #[test] fn srv_should_return_request_result() { let source = "fn a() { }"; - from_str(source) {|srv| - let result = exec(srv) {|_ctxt| 1000}; + do from_str(source) |srv| { + let result = exec(srv, |_ctxt| 1000 ); assert result == 1000; } } diff --git a/src/rustdoc/attr_parser.rs b/src/rustdoc/attr_parser.rs index e0a3b12c07c..56c42902ac2 100644 --- a/src/rustdoc/attr_parser.rs +++ b/src/rustdoc/attr_parser.rs @@ -20,7 +20,7 @@ type crate_attrs = { #[cfg(test)] mod test { - fn parse_attributes(source: str) -> [ast::attribute]/~ { + fn parse_attributes(source: str) -> ~[ast::attribute] { import syntax::parse; import parse::parser; import parse::attr::parser_attr; @@ -29,14 +29,14 @@ mod test { let parse_sess = syntax::parse::new_parse_sess(none); let parser = parse::new_parser_from_source_str( - parse_sess, []/~, "-", codemap::fss_none, @source); + parse_sess, ~[], "-", codemap::fss_none, @source); parser.parse_outer_attributes() } } fn doc_meta( - attrs: [ast::attribute]/~ + attrs: ~[ast::attribute] ) -> option<@ast::meta_item> { #[doc = @@ -44,7 +44,7 @@ fn doc_meta( doc attribute"]; let doc_attrs = attr::find_attrs_by_name(attrs, "doc"); - let doc_metas = doc_attrs.map {|attr| + let doc_metas = do doc_attrs.map |attr| { attr::attr_meta(attr::desugar_doc_attr(attr)) }; @@ -58,12 +58,12 @@ fn doc_meta( } } -fn parse_crate(attrs: [ast::attribute]/~) -> crate_attrs { +fn parse_crate(attrs: ~[ast::attribute]) -> crate_attrs { let link_metas = attr::find_linkage_metas(attrs); { name: attr::last_meta_item_value_str_by_name( - link_metas, "name").map({|x|*x}) + link_metas, "name").map(|x| *x ) } } @@ -91,10 +91,10 @@ fn should_not_extract_crate_name_if_no_name_value_in_link_attribute() { assert attrs.name == none; } -fn parse_desc(attrs: [ast::attribute]/~) -> option<str> { +fn parse_desc(attrs: ~[ast::attribute]) -> option<str> { alt doc_meta(attrs) { some(meta) { - attr::get_meta_item_value_str(meta).map({|x|*x}) + attr::get_meta_item_value_str(meta).map(|x| *x ) } none { none } } @@ -116,7 +116,7 @@ fn parse_desc_should_parse_simple_doc_attributes() { assert attrs == some("basic"); } -fn parse_hidden(attrs: [ast::attribute]/~) -> bool { +fn parse_hidden(attrs: ~[ast::attribute]) -> bool { alt doc_meta(attrs) { some(meta) { alt attr::get_meta_item_list(meta) { diff --git a/src/rustdoc/attr_pass.rs b/src/rustdoc/attr_pass.rs index 3a2c7eb2f25..0a00a5ef58c 100644 --- a/src/rustdoc/attr_pass.rs +++ b/src/rustdoc/attr_pass.rs @@ -42,7 +42,7 @@ fn fold_crate( let srv = fold.ctxt; let doc = fold::default_seq_fold_crate(fold, doc); - let attrs = astsrv::exec(srv) {|ctxt| + let attrs = do astsrv::exec(srv) |ctxt| { let attrs = ctxt.ast.node.attrs; attr_parser::parse_crate(attrs) }; @@ -74,7 +74,7 @@ fn fold_item( let desc = if doc.id == ast::crate_node_id { // This is the top-level mod, use the crate attributes - astsrv::exec(srv) {|ctxt| + do astsrv::exec(srv) |ctxt| { attr_parser::parse_desc(ctxt.ast.node.attrs) } } else { @@ -90,8 +90,8 @@ fn fold_item( fn parse_item_attrs<T:send>( srv: astsrv::srv, id: doc::ast_id, - +parse_attrs: fn~([ast::attribute]/~) -> T) -> T { - astsrv::exec(srv) {|ctxt| + +parse_attrs: fn~(~[ast::attribute]) -> T) -> T { + do astsrv::exec(srv) |ctxt| { let attrs = alt ctxt.ast_map.get(id) { ast_map::node_item(item, _) { item.attrs } ast_map::node_foreign_item(item, _, _) { item.attrs } @@ -143,16 +143,16 @@ fn fold_enum( let doc = fold::default_seq_fold_enum(fold, doc); { - variants: par::anymap(doc.variants) {|variant| - let desc = astsrv::exec(srv) {|ctxt| + variants: do par::anymap(doc.variants) |variant| { + let desc = do astsrv::exec(srv) |ctxt| { alt check ctxt.ast_map.get(doc_id) { ast_map::node_item(@{ node: ast::item_enum(ast_variants, _, _), _ }, _) { let ast_variant = option::get( - vec::find(ast_variants) {|v| + vec::find(ast_variants, |v| { *v.node.name == variant.name - }); + })); attr_parser::parse_desc(ast_variant.node.attrs) } @@ -197,31 +197,31 @@ fn fold_iface( fn merge_method_attrs( srv: astsrv::srv, item_id: doc::ast_id, - docs: [doc::methoddoc]/~ -) -> [doc::methoddoc]/~ { + docs: ~[doc::methoddoc] +) -> ~[doc::methoddoc] { // Create an assoc list from method name to attributes - let attrs: [(str, option<str>)]/~ = astsrv::exec(srv) {|ctxt| + let attrs: ~[(str, option<str>)] = do astsrv::exec(srv) |ctxt| { alt ctxt.ast_map.get(item_id) { ast_map::node_item(@{ node: ast::item_iface(_, _, methods), _ }, _) { - par::seqmap(methods) {|method| + par::seqmap(methods, |method| { (*method.ident, attr_parser::parse_desc(method.attrs)) - } + }) } ast_map::node_item(@{ node: ast::item_impl(_, _, _, _, methods), _ }, _) { - par::seqmap(methods) {|method| + par::seqmap(methods, |method| { (*method.ident, attr_parser::parse_desc(method.attrs)) - } + }) } _ { fail "unexpected item" } } }; - vec::map2(docs, attrs) {|doc, attrs| + do vec::map2(docs, attrs) |doc, attrs| { assert doc.name == tuple::first(attrs); let desc = tuple::second(attrs); @@ -282,7 +282,7 @@ fn should_extract_impl_method_docs() { #[cfg(test)] mod test { fn mk_doc(source: str) -> doc::doc { - astsrv::from_str(source) {|srv| + do astsrv::from_str(source) |srv| { let doc = extract::from_srv(srv, ""); run(srv, doc) } diff --git a/src/rustdoc/config.rs b/src/rustdoc/config.rs index 7a5c3cf67ca..c316b175e66 100644 --- a/src/rustdoc/config.rs +++ b/src/rustdoc/config.rs @@ -39,8 +39,8 @@ fn opt_output_style() -> str { "output-style" } fn opt_pandoc_cmd() -> str { "pandoc-cmd" } fn opt_help() -> str { "h" } -fn opts() -> [(getopts::opt, str)]/~ { - [ +fn opts() -> ~[(getopts::opt, str)] { + ~[ (getopts::optopt(opt_output_dir()), "--output-dir <val> put documents here"), (getopts::optopt(opt_output_format()), @@ -51,15 +51,15 @@ fn opts() -> [(getopts::opt, str)]/~ { "--pandoc-cmd <val> the command for running pandoc"), (getopts::optflag(opt_help()), "-h print help") - ]/~ + ] } fn usage() { import io::println; - println("Usage: rustdoc [options]/~ <cratefile>\n"); + println("Usage: rustdoc ~[options] <cratefile>\n"); println("Options:\n"); - for opts().each {|opt| + for opts().each |opt| { println(#fmt(" %s", tuple::second(opt))); } println(""); @@ -75,9 +75,9 @@ fn default_config(input_crate: str) -> config { } } -type program_output = fn~(str, [str]/~) -> {status: int, out: str, err: str}; +type program_output = fn~(str, ~[str]) -> {status: int, out: str, err: str}; -fn mock_program_output(_prog: str, _args: [str]/~) -> { +fn mock_program_output(_prog: str, _args: ~[str]) -> { status: int, out: str, err: str } { { @@ -87,12 +87,12 @@ fn mock_program_output(_prog: str, _args: [str]/~) -> { } } -fn parse_config(args: [str]/~) -> result<config, str> { +fn parse_config(args: ~[str]) -> result<config, str> { parse_config_(args, run::program_output) } fn parse_config_( - args: [str]/~, + args: ~[str], program_output: program_output ) -> result<config, str> { let args = vec::tail(args); @@ -122,19 +122,21 @@ fn config_from_opts( let config = default_config(input_crate); let result = result::ok(config); - let result = result::chain(result) {|config| + let result = do result::chain(result) |config| { let output_dir = getopts::opt_maybe_str(match, opt_output_dir()); result::ok({ output_dir: option::get_default(output_dir, config.output_dir) with config }) }; - let result = result::chain(result) {|config| + let result = do result::chain(result) |config| { let output_format = getopts::opt_maybe_str( match, opt_output_format()); - option::map_default(output_format, result::ok(config)) - {|output_format| - result::chain(parse_output_format(output_format)) {|output_format| + do option::map_default(output_format, result::ok(config)) + |output_format| { + do result::chain(parse_output_format(output_format)) + |output_format| { + result::ok({ output_format: output_format with config @@ -142,11 +144,12 @@ fn config_from_opts( } } }; - let result = result::chain(result) {|config| + let result = do result::chain(result) |config| { let output_style = getopts::opt_maybe_str(match, opt_output_style()); - option::map_default(output_style, result::ok(config)) - {|output_style| - result::chain(parse_output_style(output_style)) {|output_style| + do option::map_default(output_style, result::ok(config)) + |output_style| { + do result::chain(parse_output_style(output_style)) + |output_style| { result::ok({ output_style: output_style with config @@ -154,11 +157,11 @@ fn config_from_opts( } } }; - let result = result::chain(result) {|config| + let result = do result::chain(result) |config| { let pandoc_cmd = getopts::opt_maybe_str(match, opt_pandoc_cmd()); let pandoc_cmd = maybe_find_pandoc( config, pandoc_cmd, program_output); - result::chain(pandoc_cmd) {|pandoc_cmd| + do result::chain(pandoc_cmd) |pandoc_cmd| { result::ok({ pandoc_cmd: pandoc_cmd with config @@ -194,19 +197,19 @@ fn maybe_find_pandoc( } let possible_pandocs = alt maybe_pandoc_cmd { - some(pandoc_cmd) { [pandoc_cmd]/~ } + some(pandoc_cmd) { ~[pandoc_cmd] } none { - ["pandoc"]/~ + alt os::homedir() { + ~["pandoc"] + alt os::homedir() { some(dir) { - [path::connect(dir, ".cabal/bin/pandoc")]/~ + ~[path::connect(dir, ".cabal/bin/pandoc")] } - none { []/~ } + none { ~[] } } } }; - let pandoc = vec::find(possible_pandocs) {|pandoc| - let output = program_output(pandoc, ["--version"]/~); + let pandoc = do vec::find(possible_pandocs) |pandoc| { + let output = program_output(pandoc, ~["--version"]); #debug("testing pandoc cmd %s: %?", pandoc, output); output.status == 0 }; @@ -224,7 +227,7 @@ fn should_find_pandoc() { output_format: pandoc_html with default_config("test") }; - let mock_program_output = fn~(_prog: str, _args: [str]/~) -> { + let mock_program_output = fn~(_prog: str, _args: ~[str]) -> { status: int, out: str, err: str } { { @@ -241,7 +244,7 @@ fn should_error_with_no_pandoc() { output_format: pandoc_html with default_config("test") }; - let mock_program_output = fn~(_prog: str, _args: [str]/~) -> { + let mock_program_output = fn~(_prog: str, _args: ~[str]) -> { status: int, out: str, err: str } { { @@ -254,108 +257,108 @@ fn should_error_with_no_pandoc() { #[cfg(test)] mod test { - fn parse_config(args: [str]/~) -> result<config, str> { + fn parse_config(args: ~[str]) -> result<config, str> { parse_config_(args, mock_program_output) } } #[test] fn should_error_with_no_crates() { - let config = test::parse_config(["rustdoc"]/~); + let config = test::parse_config(~["rustdoc"]); assert result::get_err(config) == "no crates specified"; } #[test] fn should_error_with_multiple_crates() { let config = - test::parse_config(["rustdoc", "crate1.rc", "crate2.rc"]/~); + test::parse_config(~["rustdoc", "crate1.rc", "crate2.rc"]); assert result::get_err(config) == "multiple crates specified"; } #[test] fn should_set_output_dir_to_cwd_if_not_provided() { - let config = test::parse_config(["rustdoc", "crate.rc"]/~); + let config = test::parse_config(~["rustdoc", "crate.rc"]); assert result::get(config).output_dir == "."; } #[test] fn should_set_output_dir_if_provided() { - let config = test::parse_config([ + let config = test::parse_config(~[ "rustdoc", "crate.rc", "--output-dir", "snuggles" - ]/~); + ]); assert result::get(config).output_dir == "snuggles"; } #[test] fn should_set_output_format_to_pandoc_html_if_not_provided() { - let config = test::parse_config(["rustdoc", "crate.rc"]/~); + let config = test::parse_config(~["rustdoc", "crate.rc"]); assert result::get(config).output_format == pandoc_html; } #[test] fn should_set_output_format_to_markdown_if_requested() { - let config = test::parse_config([ + let config = test::parse_config(~[ "rustdoc", "crate.rc", "--output-format", "markdown" - ]/~); + ]); assert result::get(config).output_format == markdown; } #[test] fn should_set_output_format_to_pandoc_html_if_requested() { - let config = test::parse_config([ + let config = test::parse_config(~[ "rustdoc", "crate.rc", "--output-format", "html" - ]/~); + ]); assert result::get(config).output_format == pandoc_html; } #[test] fn should_error_on_bogus_format() { - let config = test::parse_config([ + let config = test::parse_config(~[ "rustdoc", "crate.rc", "--output-format", "bogus" - ]/~); + ]); assert result::get_err(config) == "unknown output format 'bogus'"; } #[test] fn should_set_output_style_to_doc_per_mod_by_default() { - let config = test::parse_config(["rustdoc", "crate.rc"]/~); + let config = test::parse_config(~["rustdoc", "crate.rc"]); assert result::get(config).output_style == doc_per_mod; } #[test] fn should_set_output_style_to_one_doc_if_requested() { - let config = test::parse_config([ + let config = test::parse_config(~[ "rustdoc", "crate.rc", "--output-style", "doc-per-crate" - ]/~); + ]); assert result::get(config).output_style == doc_per_crate; } #[test] fn should_set_output_style_to_doc_per_mod_if_requested() { - let config = test::parse_config([ + let config = test::parse_config(~[ "rustdoc", "crate.rc", "--output-style", "doc-per-mod" - ]/~); + ]); assert result::get(config).output_style == doc_per_mod; } #[test] fn should_error_on_bogus_output_style() { - let config = test::parse_config([ + let config = test::parse_config(~[ "rustdoc", "crate.rc", "--output-style", "bogus" - ]/~); + ]); assert result::get_err(config) == "unknown output style 'bogus'"; } #[test] fn should_set_pandoc_command_if_requested() { - let config = test::parse_config([ + let config = test::parse_config(~[ "rustdoc", "crate.rc", "--pandoc-cmd", "panda-bear-doc" - ]/~); + ]); assert result::get(config).pandoc_cmd == some("panda-bear-doc"); } #[test] fn should_set_pandoc_command_when_using_pandoc() { - let config = test::parse_config(["rustdoc", "crate.rc"]/~); + let config = test::parse_config(~["rustdoc", "crate.rc"]); assert result::get(config).pandoc_cmd == some("pandoc"); } diff --git a/src/rustdoc/demo.rs b/src/rustdoc/demo.rs index b40995a0c82..e05f7080f08 100644 --- a/src/rustdoc/demo.rs +++ b/src/rustdoc/demo.rs @@ -25,12 +25,12 @@ enum omnomnomy { #[doc = "Delicious sugar cookies"] cookie, #[doc = "It's pizza"] - pizza_pie([uint]/~) + pizza_pie(~[uint]) } fn take_my_order_please( _waitress: waitress, - _order: [omnomnomy]/~ + _order: ~[omnomnomy] ) -> uint { #[doc = " diff --git a/src/rustdoc/desc_to_brief_pass.rs b/src/rustdoc/desc_to_brief_pass.rs index 83fda9f7b5c..f4b0a52a268 100644 --- a/src/rustdoc/desc_to_brief_pass.rs +++ b/src/rustdoc/desc_to_brief_pass.rs @@ -42,12 +42,10 @@ fn fold_iface(fold: fold::fold<()>, doc: doc::ifacedoc) -> doc::ifacedoc { let doc =fold::default_seq_fold_iface(fold, doc); { - methods: par::anymap(doc.methods) {|doc| - { - brief: extract(doc.desc) - with doc - } - } + methods: par::anymap(doc.methods, |doc| { + brief: extract(doc.desc) + with doc + }) with doc } } @@ -56,12 +54,10 @@ fn fold_impl(fold: fold::fold<()>, doc: doc::impldoc) -> doc::impldoc { let doc =fold::default_seq_fold_impl(fold, doc); { - methods: par::anymap(doc.methods) {|doc| - { - brief: extract(doc.desc) - with doc - } - } + methods: par::anymap(doc.methods, |doc| { + brief: extract(doc.desc) + with doc + }) with doc } } @@ -88,7 +84,7 @@ fn should_promote_impl_method_desc() { #[cfg(test)] mod test { fn mk_doc(source: str) -> doc::doc { - astsrv::from_str(source) {|srv| + do astsrv::from_str(source) |srv| { let doc = extract::from_srv(srv, ""); let doc = attr_pass::mk_pass().f(srv, doc); run(srv, doc) @@ -134,7 +130,7 @@ fn first_sentence_(s: str) -> str { let mut dotcount = 0; // The index of the character following a single dot. This allows // Things like [0..1) to appear in the brief description - let idx = str::find(s) {|ch| + let idx = do str::find(s) |ch| { if ch == '.' { dotcount += 1; false @@ -161,11 +157,11 @@ fn first_sentence_(s: str) -> str { } } -fn paragraphs(s: str) -> [str]/~ { +fn paragraphs(s: str) -> ~[str] { let lines = str::lines_any(s); let mut whitespace_lines = 0; let mut accum = ""; - let paras = vec::foldl([]/~, lines) {|paras, line| + let paras = do vec::foldl(~[], lines) |paras, line| { let mut res = paras; if str::is_whitespace(line) { @@ -173,7 +169,7 @@ fn paragraphs(s: str) -> [str]/~ { } else { if whitespace_lines > 0 { if str::is_not_empty(accum) { - res += [accum]/~; + res += ~[accum]; accum = ""; } } @@ -191,7 +187,7 @@ fn paragraphs(s: str) -> [str]/~ { }; if str::is_not_empty(accum) { - paras + [accum]/~ + paras + ~[accum] } else { paras } @@ -200,13 +196,13 @@ fn paragraphs(s: str) -> [str]/~ { #[test] fn test_paragraphs_1() { let paras = paragraphs("1\n\n2"); - assert paras == ["1", "2"]/~; + assert paras == ~["1", "2"]; } #[test] fn test_paragraphs_2() { let paras = paragraphs("\n\n1\n1\n\n2\n\n"); - assert paras == ["1\n1", "2"]/~; + assert paras == ~["1\n1", "2"]; } #[test] diff --git a/src/rustdoc/doc.rs b/src/rustdoc/doc.rs index 3665e9cc331..cd83b70bbea 100644 --- a/src/rustdoc/doc.rs +++ b/src/rustdoc/doc.rs @@ -3,7 +3,7 @@ type ast_id = int; type doc = { - pages: [page]/~ + pages: ~[page] }; enum page { @@ -41,10 +41,10 @@ enum itemtag { type itemdoc = { id: ast_id, name: str, - path: [str]/~, + path: ~[str], brief: option<str>, desc: option<str>, - sections: [section]/~, + sections: ~[section], // Indicates that this node is a reexport of a different item reexport: bool }; @@ -56,13 +56,13 @@ type simpleitemdoc = { type moddoc = { item: itemdoc, - items: [itemtag]/~, + items: ~[itemtag], index: option<index> }; type nmoddoc = { item: itemdoc, - fns: [fndoc]/~, + fns: ~[fndoc], index: option<index> }; @@ -72,7 +72,7 @@ type fndoc = simpleitemdoc; type enumdoc = { item: itemdoc, - variants: [variantdoc]/~ + variants: ~[variantdoc] }; type variantdoc = { @@ -83,14 +83,14 @@ type variantdoc = { type ifacedoc = { item: itemdoc, - methods: [methoddoc]/~ + methods: ~[methoddoc] }; type methoddoc = { name: str, brief: option<str>, desc: option<str>, - sections: [section]/~, + sections: ~[section], sig: option<str> }; @@ -98,13 +98,13 @@ type impldoc = { item: itemdoc, iface_ty: option<str>, self_ty: option<str>, - methods: [methoddoc]/~ + methods: ~[methoddoc] }; type tydoc = simpleitemdoc; type index = { - entries: [index_entry]/~ + entries: ~[index_entry] }; #[doc = " @@ -128,12 +128,12 @@ type index_entry = { impl util for doc { fn cratedoc() -> cratedoc { - option::get(vec::foldl(none, self.pages) {|_m, page| + option::get(vec::foldl(none, self.pages, |_m, page| { alt page { doc::cratepage(doc) { some(doc) } _ { none } } - }) + })) } fn cratemod() -> moddoc { @@ -144,8 +144,8 @@ impl util for doc { #[doc = "Some helper methods on moddoc, mostly for testing"] impl util for moddoc { - fn mods() -> [moddoc]/~ { - vec::filter_map(self.items) {|itemtag| + fn mods() -> ~[moddoc] { + do vec::filter_map(self.items) |itemtag| { alt itemtag { modtag(moddoc) { some(moddoc) } _ { none } @@ -153,8 +153,8 @@ impl util for moddoc { } } - fn nmods() -> [nmoddoc]/~ { - vec::filter_map(self.items) {|itemtag| + fn nmods() -> ~[nmoddoc] { + do vec::filter_map(self.items) |itemtag| { alt itemtag { nmodtag(nmoddoc) { some(nmoddoc) } _ { none } @@ -162,8 +162,8 @@ impl util for moddoc { } } - fn fns() -> [fndoc]/~ { - vec::filter_map(self.items) {|itemtag| + fn fns() -> ~[fndoc] { + do vec::filter_map(self.items) |itemtag| { alt itemtag { fntag(fndoc) { some(fndoc) } _ { none } @@ -171,8 +171,8 @@ impl util for moddoc { } } - fn consts() -> [constdoc]/~ { - vec::filter_map(self.items) {|itemtag| + fn consts() -> ~[constdoc] { + do vec::filter_map(self.items) |itemtag| { alt itemtag { consttag(constdoc) { some(constdoc) } _ { none } @@ -180,8 +180,8 @@ impl util for moddoc { } } - fn enums() -> [enumdoc]/~ { - vec::filter_map(self.items) {|itemtag| + fn enums() -> ~[enumdoc] { + do vec::filter_map(self.items) |itemtag| { alt itemtag { enumtag(enumdoc) { some(enumdoc) } _ { none } @@ -189,8 +189,8 @@ impl util for moddoc { } } - fn ifaces() -> [ifacedoc]/~ { - vec::filter_map(self.items) {|itemtag| + fn ifaces() -> ~[ifacedoc] { + do vec::filter_map(self.items) |itemtag| { alt itemtag { ifacetag(ifacedoc) { some(ifacedoc) } _ { none } @@ -198,8 +198,8 @@ impl util for moddoc { } } - fn impls() -> [impldoc]/~ { - vec::filter_map(self.items) {|itemtag| + fn impls() -> ~[impldoc] { + do vec::filter_map(self.items) |itemtag| { alt itemtag { impltag(impldoc) { some(impldoc) } _ { none } @@ -207,8 +207,8 @@ impl util for moddoc { } } - fn types() -> [tydoc]/~ { - vec::filter_map(self.items) {|itemtag| + fn types() -> ~[tydoc] { + do vec::filter_map(self.items) |itemtag| { alt itemtag { tytag(tydoc) { some(tydoc) } _ { none } @@ -217,10 +217,10 @@ impl util for moddoc { } } -impl util for [page]/~ { +impl util for ~[page] { - fn mods() -> [moddoc]/~ { - vec::filter_map(self) {|page| + fn mods() -> ~[moddoc] { + do vec::filter_map(self) |page| { alt page { itempage(modtag(moddoc)) { some(moddoc) } _ { none } @@ -228,8 +228,8 @@ impl util for [page]/~ { } } - fn nmods() -> [nmoddoc]/~ { - vec::filter_map(self) {|page| + fn nmods() -> ~[nmoddoc] { + do vec::filter_map(self) |page| { alt page { itempage(nmodtag(nmoddoc)) { some(nmoddoc) } _ { none } @@ -237,8 +237,8 @@ impl util for [page]/~ { } } - fn fns() -> [fndoc]/~ { - vec::filter_map(self) {|page| + fn fns() -> ~[fndoc] { + do vec::filter_map(self) |page| { alt page { itempage(fntag(fndoc)) { some(fndoc) } _ { none } @@ -246,8 +246,8 @@ impl util for [page]/~ { } } - fn consts() -> [constdoc]/~ { - vec::filter_map(self) {|page| + fn consts() -> ~[constdoc] { + do vec::filter_map(self) |page| { alt page { itempage(consttag(constdoc)) { some(constdoc) } _ { none } @@ -255,8 +255,8 @@ impl util for [page]/~ { } } - fn enums() -> [enumdoc]/~ { - vec::filter_map(self) {|page| + fn enums() -> ~[enumdoc] { + do vec::filter_map(self) |page| { alt page { itempage(enumtag(enumdoc)) { some(enumdoc) } _ { none } @@ -264,8 +264,8 @@ impl util for [page]/~ { } } - fn ifaces() -> [ifacedoc]/~ { - vec::filter_map(self) {|page| + fn ifaces() -> ~[ifacedoc] { + do vec::filter_map(self) |page| { alt page { itempage(ifacetag(ifacedoc)) { some(ifacedoc) } _ { none } @@ -273,8 +273,8 @@ impl util for [page]/~ { } } - fn impls() -> [impldoc]/~ { - vec::filter_map(self) {|page| + fn impls() -> ~[impldoc] { + do vec::filter_map(self) |page| { alt page { itempage(impltag(impldoc)) { some(impldoc) } _ { none } @@ -282,8 +282,8 @@ impl util for [page]/~ { } } - fn types() -> [tydoc]/~ { - vec::filter_map(self) {|page| + fn types() -> ~[tydoc] { + do vec::filter_map(self) |page| { alt page { itempage(tytag(tydoc)) { some(tydoc) } _ { none } @@ -344,7 +344,7 @@ impl util<A:item> for A { self.item().name } - fn path() -> [str]/~ { + fn path() -> ~[str] { self.item().path } @@ -356,7 +356,7 @@ impl util<A:item> for A { self.item().desc } - fn sections() -> [section]/~ { + fn sections() -> ~[section] { self.item().sections } } diff --git a/src/rustdoc/extract.rs b/src/rustdoc/extract.rs index 343d32019de..66585d5683e 100644 --- a/src/rustdoc/extract.rs +++ b/src/rustdoc/extract.rs @@ -11,7 +11,7 @@ fn from_srv( #[doc = "Use the AST service to create a document tree"]; - astsrv::exec(srv) {|ctxt| + do astsrv::exec(srv) |ctxt| { extract(ctxt.ast, default_name) } } @@ -21,11 +21,11 @@ fn extract( default_name: str ) -> doc::doc { { - pages: [ + pages: ~[ doc::cratepage({ topmod: top_moddoc_from_crate(crate, default_name), }) - ]/~ + ] } } @@ -41,10 +41,10 @@ fn mk_itemdoc(id: ast::node_id, name: ast::ident) -> doc::itemdoc { { id: id, name: *name, - path: []/~, + path: ~[], brief: none, desc: none, - sections: []/~, + sections: ~[], reexport: false } } @@ -55,7 +55,7 @@ fn moddoc_from_mod( ) -> doc::moddoc { { item: itemdoc, - items: vec::filter_map(module.items) {|item| + items: do vec::filter_map(module.items) |item| { let itemdoc = mk_itemdoc(item.id, item.ident); alt item.node { ast::item_mod(m) { @@ -113,7 +113,7 @@ fn nmoddoc_from_mod( ) -> doc::nmoddoc { { item: itemdoc, - fns: par::seqmap(module.items) {|item| + fns: do par::seqmap(module.items) |item| { let itemdoc = mk_itemdoc(item.id, item.ident); alt item.node { ast::foreign_item_fn(_, _) { @@ -148,7 +148,7 @@ fn should_extract_const_name_and_id() { fn enumdoc_from_enum( itemdoc: doc::itemdoc, - variants: [ast::variant]/~ + variants: ~[ast::variant] ) -> doc::enumdoc { { item: itemdoc, @@ -157,8 +157,8 @@ fn enumdoc_from_enum( } fn variantdocs_from_variants( - variants: [ast::variant]/~ -) -> [doc::variantdoc]/~ { + variants: ~[ast::variant] +) -> ~[doc::variantdoc] { par::seqmap(variants, variantdoc_from_variant) } @@ -185,16 +185,16 @@ fn should_extract_enum_variants() { fn ifacedoc_from_iface( itemdoc: doc::itemdoc, - methods: [ast::ty_method]/~ + methods: ~[ast::ty_method] ) -> doc::ifacedoc { { item: itemdoc, - methods: par::seqmap(methods) {|method| + methods: do par::seqmap(methods) |method| { { name: *method.ident, brief: none, desc: none, - sections: []/~, + sections: ~[], sig: none } } @@ -215,18 +215,18 @@ fn should_extract_iface_methods() { fn impldoc_from_impl( itemdoc: doc::itemdoc, - methods: [@ast::method]/~ + methods: ~[@ast::method] ) -> doc::impldoc { { item: itemdoc, iface_ty: none, self_ty: none, - methods: par::seqmap(methods) {|method| + methods: do par::seqmap(methods) |method| { { name: *method.ident, brief: none, desc: none, - sections: []/~, + sections: ~[], sig: none } } @@ -339,7 +339,7 @@ mod test { #[test] fn extract_from_seq_srv() { let source = ""; - astsrv::from_str(source) {|srv| + do astsrv::from_str(source) |srv| { let doc = from_srv(srv, "name"); assert doc.cratemod().name() == "name"; } diff --git a/src/rustdoc/fold.rs b/src/rustdoc/fold.rs index e502bb7ce41..9d560c7486d 100644 --- a/src/rustdoc/fold.rs +++ b/src/rustdoc/fold.rs @@ -83,57 +83,57 @@ fn mk_fold<T:copy>( fn default_any_fold<T:send copy>(ctxt: T) -> fold<T> { mk_fold( ctxt, - {|f, d| default_seq_fold_doc(f, d)}, - {|f, d| default_seq_fold_crate(f, d)}, - {|f, d| default_seq_fold_item(f, d)}, - {|f, d| default_any_fold_mod(f, d)}, - {|f, d| default_any_fold_nmod(f, d)}, - {|f, d| default_seq_fold_fn(f, d)}, - {|f, d| default_seq_fold_const(f, d)}, - {|f, d| default_seq_fold_enum(f, d)}, - {|f, d| default_seq_fold_iface(f, d)}, - {|f, d| default_seq_fold_impl(f, d)}, - {|f, d| default_seq_fold_type(f, d)} + |f, d| default_seq_fold_doc(f, d), + |f, d| default_seq_fold_crate(f, d), + |f, d| default_seq_fold_item(f, d), + |f, d| default_any_fold_mod(f, d), + |f, d| default_any_fold_nmod(f, d), + |f, d| default_seq_fold_fn(f, d), + |f, d| default_seq_fold_const(f, d), + |f, d| default_seq_fold_enum(f, d), + |f, d| default_seq_fold_iface(f, d), + |f, d| default_seq_fold_impl(f, d), + |f, d| default_seq_fold_type(f, d) ) } fn default_seq_fold<T:copy>(ctxt: T) -> fold<T> { mk_fold( ctxt, - {|f, d| default_seq_fold_doc(f, d)}, - {|f, d| default_seq_fold_crate(f, d)}, - {|f, d| default_seq_fold_item(f, d)}, - {|f, d| default_seq_fold_mod(f, d)}, - {|f, d| default_seq_fold_nmod(f, d)}, - {|f, d| default_seq_fold_fn(f, d)}, - {|f, d| default_seq_fold_const(f, d)}, - {|f, d| default_seq_fold_enum(f, d)}, - {|f, d| default_seq_fold_iface(f, d)}, - {|f, d| default_seq_fold_impl(f, d)}, - {|f, d| default_seq_fold_type(f, d)} + |f, d| default_seq_fold_doc(f, d), + |f, d| default_seq_fold_crate(f, d), + |f, d| default_seq_fold_item(f, d), + |f, d| default_seq_fold_mod(f, d), + |f, d| default_seq_fold_nmod(f, d), + |f, d| default_seq_fold_fn(f, d), + |f, d| default_seq_fold_const(f, d), + |f, d| default_seq_fold_enum(f, d), + |f, d| default_seq_fold_iface(f, d), + |f, d| default_seq_fold_impl(f, d), + |f, d| default_seq_fold_type(f, d) ) } fn default_par_fold<T:send copy>(ctxt: T) -> fold<T> { mk_fold( ctxt, - {|f, d| default_seq_fold_doc(f, d)}, - {|f, d| default_seq_fold_crate(f, d)}, - {|f, d| default_seq_fold_item(f, d)}, - {|f, d| default_par_fold_mod(f, d)}, - {|f, d| default_par_fold_nmod(f, d)}, - {|f, d| default_seq_fold_fn(f, d)}, - {|f, d| default_seq_fold_const(f, d)}, - {|f, d| default_seq_fold_enum(f, d)}, - {|f, d| default_seq_fold_iface(f, d)}, - {|f, d| default_seq_fold_impl(f, d)}, - {|f, d| default_seq_fold_type(f, d)} + |f, d| default_seq_fold_doc(f, d), + |f, d| default_seq_fold_crate(f, d), + |f, d| default_seq_fold_item(f, d), + |f, d| default_par_fold_mod(f, d), + |f, d| default_par_fold_nmod(f, d), + |f, d| default_seq_fold_fn(f, d), + |f, d| default_seq_fold_const(f, d), + |f, d| default_seq_fold_enum(f, d), + |f, d| default_seq_fold_iface(f, d), + |f, d| default_seq_fold_impl(f, d), + |f, d| default_seq_fold_type(f, d) ) } fn default_seq_fold_doc<T>(fold: fold<T>, doc: doc::doc) -> doc::doc { { - pages: par::seqmap(doc.pages) {|page| + pages: do par::seqmap(doc.pages) |page| { alt page { doc::cratepage(doc) { doc::cratepage(fold.fold_crate(fold, doc)) @@ -169,9 +169,9 @@ fn default_any_fold_mod<T:send copy>( ) -> doc::moddoc { { item: fold.fold_item(fold, doc.item), - items: par::anymap(doc.items) {|itemtag, copy fold| + items: par::anymap(doc.items, |itemtag, copy fold| { fold_itemtag(fold, itemtag) - } + }) with doc } } @@ -182,9 +182,9 @@ fn default_seq_fold_mod<T>( ) -> doc::moddoc { { item: fold.fold_item(fold, doc.item), - items: par::seqmap(doc.items) {|itemtag| + items: par::seqmap(doc.items, |itemtag| { fold_itemtag(fold, itemtag) - } + }) with doc } } @@ -195,9 +195,9 @@ fn default_par_fold_mod<T:send copy>( ) -> doc::moddoc { { item: fold.fold_item(fold, doc.item), - items: par::parmap(doc.items) {|itemtag, copy fold| + items: par::parmap(doc.items, |itemtag, copy fold| { fold_itemtag(fold, itemtag) - } + }) with doc } } @@ -208,9 +208,9 @@ fn default_any_fold_nmod<T:send copy>( ) -> doc::nmoddoc { { item: fold.fold_item(fold, doc.item), - fns: par::anymap(doc.fns) {|fndoc, copy fold| + fns: par::anymap(doc.fns, |fndoc, copy fold| { fold.fold_fn(fold, fndoc) - } + }) with doc } } @@ -221,9 +221,9 @@ fn default_seq_fold_nmod<T>( ) -> doc::nmoddoc { { item: fold.fold_item(fold, doc.item), - fns: par::seqmap(doc.fns) {|fndoc| + fns: par::seqmap(doc.fns, |fndoc| { fold.fold_fn(fold, fndoc) - } + }) with doc } } @@ -234,9 +234,9 @@ fn default_par_fold_nmod<T:send copy>( ) -> doc::nmoddoc { { item: fold.fold_item(fold, doc.item), - fns: par::parmap(doc.fns) {|fndoc, copy fold| + fns: par::parmap(doc.fns, |fndoc, copy fold| { fold.fold_fn(fold, fndoc) - } + }) with doc } } diff --git a/src/rustdoc/markdown_index_pass.rs b/src/rustdoc/markdown_index_pass.rs index 837390cb055..25691cb79d9 100644 --- a/src/rustdoc/markdown_index_pass.rs +++ b/src/rustdoc/markdown_index_pass.rs @@ -55,9 +55,9 @@ fn build_mod_index( config: config::config ) -> doc::index { { - entries: par::anymap(doc.items) {|doc| + entries: par::anymap(doc.items, |doc| { item_to_entry(doc, config) - } + }) } } @@ -66,9 +66,9 @@ fn build_nmod_index( config: config::config ) -> doc::index { { - entries: par::anymap(doc.fns) {|doc| + entries: par::anymap(doc.fns, |doc| { item_to_entry(doc::fntag(doc), config) - } + }) } } @@ -132,7 +132,7 @@ fn pandoc_header_id(header: str) -> str { #[test] fn should_remove_punctuation_from_headers() { assert pandoc_header_id("impl foo of bar<A>") == "impl-foo-of-bara"; - assert pandoc_header_id("fn@([~A]/~)") == "fna"; + assert pandoc_header_id("fn@(~[~A])") == "fna"; } #[test] @@ -215,7 +215,7 @@ fn should_index_foreign_mod_contents() { #[cfg(test)] mod test { fn mk_doc(output_style: config::output_style, source: str) -> doc::doc { - astsrv::from_str(source) {|srv| + do astsrv::from_str(source) |srv| { let config = { output_style: output_style with config::default_config("whatever") diff --git a/src/rustdoc/markdown_pass.rs b/src/rustdoc/markdown_pass.rs index 05da858ee3a..a616897f3b2 100644 --- a/src/rustdoc/markdown_pass.rs +++ b/src/rustdoc/markdown_pass.rs @@ -82,7 +82,7 @@ fn write_markdown( doc: doc::doc, +writer_factory: writer_factory ) { - par::anymap(doc.pages) {|page| + do par::anymap(doc.pages) |page| { let ctxt = { w: writer_factory(page) }; @@ -115,7 +115,7 @@ fn should_request_new_writer_for_each_page() { let doc = page_pass::mk_pass(config::doc_per_mod).f(srv, doc); write_markdown(doc, writer_factory); // We expect two pages to have been written - iter::repeat(2u) {|| + do iter::repeat(2u) || { comm::recv(po); } } @@ -146,7 +146,7 @@ fn should_write_title_for_each_page() { "#[link(name = \"core\")]; mod a { }"); let doc = page_pass::mk_pass(config::doc_per_mod).f(srv, doc); write_markdown(doc, writer_factory); - iter::repeat(2u) {|| + do iter::repeat(2u) || { let (page, markdown) = comm::recv(po); alt page { doc::cratepage(_) { @@ -211,7 +211,7 @@ fn header_kind(doc: doc::itemtag) -> str { } fn header_name(doc: doc::itemtag) -> str { - let fullpath = str::connect(doc.path() + [doc.name()]/~, "::"); + let fullpath = str::connect(doc.path() + ~[doc.name()], "::"); alt doc { doc::modtag(_) if doc.id() != syntax::ast::crate_node_id { fullpath @@ -275,7 +275,7 @@ fn should_write_full_path_to_mod() { fn write_common( ctxt: ctxt, desc: option<str>, - sections: [doc::section]/~ + sections: ~[doc::section] ) { write_desc(ctxt, desc); write_sections(ctxt, sections); @@ -294,8 +294,8 @@ fn write_desc( } } -fn write_sections(ctxt: ctxt, sections: [doc::section]/~) { - vec::iter(sections) {|section| +fn write_sections(ctxt: ctxt, sections: ~[doc::section]) { + do vec::iter(sections) |section| { write_section(ctxt, section); } } @@ -325,7 +325,7 @@ fn write_mod_contents( write_index(ctxt, option::get(doc.index)); } - for doc.items.each {|itemtag| + for doc.items.each |itemtag| { write_item(ctxt, itemtag); } } @@ -377,7 +377,7 @@ fn write_index(ctxt: ctxt, index: doc::index) { ret; } - for index.entries.each {|entry| + for index.entries.each |entry| { let header = header_text_(entry.kind, entry.name); let id = entry.link; if option::is_some(entry.brief) { @@ -427,7 +427,7 @@ fn write_nmod(ctxt: ctxt, doc: doc::nmoddoc) { write_index(ctxt, option::get(doc.index)); } - for doc.fns.each {|fndoc| + for doc.fns.each |fndoc| { write_item_header(ctxt, doc::fntag(fndoc)); write_fn(ctxt, fndoc); } @@ -468,7 +468,7 @@ fn write_fnlike( ctxt: ctxt, sig: option<str>, desc: option<str>, - sections: [doc::section]/~ + sections: ~[doc::section] ) { write_sig(ctxt, sig); write_common(ctxt, desc, sections); @@ -486,7 +486,7 @@ fn write_sig(ctxt: ctxt, sig: option<str>) { fn code_block_indent(s: str) -> str { let lines = str::lines_any(s); - let indented = par::seqmap(lines, { |line| #fmt(" %s", line) }); + let indented = par::seqmap(lines, |line| #fmt(" %s", line) ); str::connect(indented, "\n") } @@ -512,18 +512,18 @@ fn should_insert_blank_line_after_fn_signature() { fn should_correctly_indent_fn_signature() { let doc = test::create_doc("fn a() { }"); let doc = { - pages: [ + pages: ~[ doc::cratepage({ topmod: { - items: [doc::fntag({ + items: ~[doc::fntag({ sig: some("line 1\nline 2") with doc.cratemod().fns()[0] - })]/~ + })] with doc.cratemod() } with doc.cratedoc() }) - ]/~ + ] }; let markdown = test::write_markdown_str(doc); assert str::contains(markdown, " line 1\n line 2"); @@ -580,7 +580,7 @@ fn should_write_enum_description() { fn write_variants( ctxt: ctxt, - docs: [doc::variantdoc]/~ + docs: ~[doc::variantdoc] ) { if vec::is_empty(docs) { ret; @@ -588,7 +588,7 @@ fn write_variants( write_header_(ctxt, h4, "Variants"); - vec::iter(docs, {|variant| write_variant(ctxt, variant) }); + vec::iter(docs, |variant| write_variant(ctxt, variant) ); ctxt.w.write_line(""); } @@ -644,8 +644,8 @@ fn write_iface(ctxt: ctxt, doc: doc::ifacedoc) { write_methods(ctxt, doc.methods); } -fn write_methods(ctxt: ctxt, docs: [doc::methoddoc]/~) { - vec::iter(docs) {|doc| write_method(ctxt, doc) } +fn write_methods(ctxt: ctxt, docs: ~[doc::methoddoc]) { + do vec::iter(docs) |doc| { write_method(ctxt, doc) } } fn write_method(ctxt: ctxt, doc: doc::methoddoc) { @@ -760,7 +760,7 @@ mod test { } fn create_doc_srv(source: str) -> (astsrv::srv, doc::doc) { - astsrv::from_str(source) {|srv| + do astsrv::from_str(source) |srv| { let config = { output_style: config::doc_per_crate diff --git a/src/rustdoc/markdown_writer.rs b/src/rustdoc/markdown_writer.rs index b1f7ef3a970..ee968174aa2 100644 --- a/src/rustdoc/markdown_writer.rs +++ b/src/rustdoc/markdown_writer.rs @@ -56,7 +56,7 @@ fn markdown_writer( page: doc::page ) -> writer { let filename = make_local_filename(config, page); - generic_writer {|markdown| + do generic_writer |markdown| { write_file(filename, markdown); } } @@ -69,16 +69,16 @@ fn pandoc_writer( let pandoc_cmd = option::get(config.pandoc_cmd); let filename = make_local_filename(config, page); - let pandoc_args = [ + let pandoc_args = ~[ "--standalone", "--section-divs", "--from=markdown", "--to=html", "--css=rust.css", "--output=" + filename - ]/~; + ]; - generic_writer {|markdown| + do generic_writer |markdown| { import io::writer_util; #debug("pandoc cmd: %s", pandoc_cmd); @@ -101,14 +101,14 @@ fn pandoc_writer( let stdout_po = comm::port(); let stdout_ch = comm::chan(stdout_po); - task::spawn_sched(task::single_threaded) {|| + do task::spawn_sched(task::single_threaded) || { comm::send(stdout_ch, readclose(pipe_out.in)); } let stdout = comm::recv(stdout_po); let stderr_po = comm::port(); let stderr_ch = comm::chan(stderr_po); - task::spawn_sched(task::single_threaded) {|| + do task::spawn_sched(task::single_threaded) || { comm::send(stderr_ch, readclose(pipe_err.in)); } let stderr = comm::recv(stderr_po); @@ -137,7 +137,7 @@ fn readclose(fd: libc::c_int) -> str { } fn generic_writer(+process: fn~(markdown: str)) -> writer { - let ch = task::spawn_listener {|po: comm::port<writeinstr>| + let ch = do task::spawn_listener |po: comm::port<writeinstr>| { let mut markdown = ""; let mut keep_going = true; while keep_going { @@ -178,7 +178,7 @@ fn make_filename( } } doc::itempage(doc) { - str::connect(doc.path() + [doc.name()]/~, "_") + str::connect(doc.path() + ~[doc.name()], "_") } } }; @@ -236,7 +236,7 @@ fn should_name_mod_file_names_by_path() { #[cfg(test)] mod test { fn mk_doc(name: str, source: str) -> doc::doc { - astsrv::from_str(source) {|srv| + do astsrv::from_str(source) |srv| { let doc = extract::from_srv(srv, name); let doc = path_pass::mk_pass().f(srv, doc); doc @@ -247,7 +247,7 @@ mod test { fn write_file(path: str, s: str) { import io::writer_util; - alt io::file_writer(path, [io::create, io::truncate]/~) { + alt io::file_writer(path, ~[io::create, io::truncate]) { result::ok(writer) { writer.write_str(s); } @@ -262,7 +262,7 @@ fn future_writer_factory( let writer_factory = fn~(page: doc::page) -> writer { let writer_po = comm::port(); let writer_ch = comm::chan(writer_po); - task::spawn {|| + do task::spawn || { let (writer, future) = future_writer(); comm::send(writer_ch, writer); let s = future::get(future); @@ -280,7 +280,7 @@ fn future_writer() -> (writer, future::future<str>) { let writer = fn~(+instr: writeinstr) { comm::send(chan, copy instr); }; - let future = future::from_fn {|| + let future = do future::from_fn || { let mut res = ""; loop { alt comm::recv(port) { diff --git a/src/rustdoc/page_pass.rs b/src/rustdoc/page_pass.rs index 50be2a5415d..80af8f2553f 100644 --- a/src/rustdoc/page_pass.rs +++ b/src/rustdoc/page_pass.rs @@ -32,7 +32,7 @@ fn run( let result_port = comm::port(); let result_chan = comm::chan(result_port); - let page_chan = task::spawn_listener {|page_port| + let page_chan = do task::spawn_listener |page_port| { comm::send(result_chan, make_doc_from_pages(page_port)); }; @@ -44,11 +44,11 @@ type page_port = comm::port<option<doc::page>>; type page_chan = comm::chan<option<doc::page>>; fn make_doc_from_pages(page_port: page_port) -> doc::doc { - let mut pages = []/~; + let mut pages = ~[]; loop { let val = comm::recv(page_port); if option::is_some(val) { - pages += [option::unwrap(val)]/~; + pages += ~[option::unwrap(val)]; } else { break; } @@ -106,7 +106,7 @@ fn fold_mod( fn strip_mod(doc: doc::moddoc) -> doc::moddoc { { - items: vec::filter(doc.items) {|item| + items: do vec::filter(doc.items) |item| { alt item { doc::modtag(_) { false } doc::nmodtag(_) { false } @@ -166,7 +166,7 @@ mod test { output_style: config::output_style, source: str ) -> doc::doc { - astsrv::from_str(source) {|srv| + do astsrv::from_str(source) |srv| { let doc = extract::from_srv(srv, ""); run(srv, doc, output_style) } diff --git a/src/rustdoc/par.rs b/src/rustdoc/par.rs index c07c3d50c7c..d64a89714ce 100644 --- a/src/rustdoc/par.rs +++ b/src/rustdoc/par.rs @@ -1,31 +1,31 @@ export anymap, seqmap, parmap; -fn anymap<T:send, U:send>(v: [T]/~, f: fn~(T) -> U) -> [U]/~ { +fn anymap<T:send, U:send>(v: ~[T], f: fn~(T) -> U) -> ~[U] { parmap(v, f) } -fn seqmap<T, U>(v: [T]/~, f: fn(T) -> U) -> [U]/~ { +fn seqmap<T, U>(v: ~[T], f: fn(T) -> U) -> ~[U] { vec::map(v, f) } -fn parmap<T:send, U:send>(v: [T]/~, f: fn~(T) -> U) -> [U]/~ unsafe { - let futures = vec::map(v) {|elt| +fn parmap<T:send, U:send>(v: ~[T], f: fn~(T) -> U) -> ~[U] unsafe { + let futures = do vec::map(v) |elt| { let po = comm::port(); let ch = comm::chan(po); let addr = ptr::addr_of(elt); - task::spawn {|copy f| + do task::spawn |copy f| { comm::send(ch, f(*addr)); } po }; - vec::map(futures) {|future| + do vec::map(futures) |future| { comm::recv(future) } } #[test] fn test_parallel_map() { - let i = [1, 2, 3, 4]/~; - let j = parmap(i) {|e| e + 1 }; - assert j == [2, 3, 4, 5]/~; + let i = ~[1, 2, 3, 4]; + let j = parmap(i, |e| e + 1 ); + assert j == ~[2, 3, 4, 5]; } diff --git a/src/rustdoc/parse.rs b/src/rustdoc/parse.rs index 433cc825a31..eca2273c5b2 100644 --- a/src/rustdoc/parse.rs +++ b/src/rustdoc/parse.rs @@ -12,12 +12,12 @@ export from_file, from_str, from_file_sess, from_str_sess; fn from_file(file: str) -> @ast::crate { parse::parse_crate_from_file( - file, []/~, parse::new_parse_sess(none)) + file, ~[], parse::new_parse_sess(none)) } fn from_str(source: str) -> @ast::crate { parse::parse_crate_from_source_str( - "-", @source, []/~, parse::new_parse_sess(none)) + "-", @source, ~[], parse::new_parse_sess(none)) } fn from_file_sess(sess: session::session, file: str) -> @ast::crate { diff --git a/src/rustdoc/path_pass.rs b/src/rustdoc/path_pass.rs index 377551ed983..99be48c0362 100644 --- a/src/rustdoc/path_pass.rs +++ b/src/rustdoc/path_pass.rs @@ -13,14 +13,14 @@ fn mk_pass() -> pass { type ctxt = { srv: astsrv::srv, - mut path: [str]/~ + mut path: ~[str] }; #[warn(no_non_implicitly_copyable_typarams)] fn run(srv: astsrv::srv, doc: doc::doc) -> doc::doc { let ctxt = { srv: srv, - mut path: []/~ + mut path: ~[] }; let fold = fold::fold({ fold_item: fold_item, @@ -66,42 +66,42 @@ fn fold_nmod(fold: fold::fold<ctxt>, doc: doc::nmoddoc) -> doc::nmoddoc { #[test] fn should_record_mod_paths() { let source = "mod a { mod b { mod c { } } mod d { mod e { } } }"; - astsrv::from_str(source) {|srv| + do astsrv::from_str(source) |srv| { let doc = extract::from_srv(srv, ""); let doc = run(srv, doc); assert doc.cratemod().mods()[0].mods()[0].mods()[0].path() - == ["a", "b"]/~; + == ~["a", "b"]; assert doc.cratemod().mods()[0].mods()[1].mods()[0].path() - == ["a", "d"]/~; + == ~["a", "d"]; } } #[test] fn should_record_fn_paths() { let source = "mod a { fn b() { } }"; - astsrv::from_str(source) {|srv| + do astsrv::from_str(source) |srv| { let doc = extract::from_srv(srv, ""); let doc = run(srv, doc); - assert doc.cratemod().mods()[0].fns()[0].path() == ["a"]/~; + assert doc.cratemod().mods()[0].fns()[0].path() == ~["a"]; } } #[test] fn should_record_foreign_mod_paths() { let source = "mod a { native mod b { } }"; - astsrv::from_str(source) {|srv| + do astsrv::from_str(source) |srv| { let doc = extract::from_srv(srv, ""); let doc = run(srv, doc); - assert doc.cratemod().mods()[0].nmods()[0].path() == ["a"]/~; + assert doc.cratemod().mods()[0].nmods()[0].path() == ~["a"]; } } #[test] fn should_record_foreign_fn_paths() { let source = "native mod a { fn b(); }"; - astsrv::from_str(source) {|srv| + do astsrv::from_str(source) |srv| { let doc = extract::from_srv(srv, ""); let doc = run(srv, doc); - assert doc.cratemod().nmods()[0].fns[0].path() == ["a"]/~; + assert doc.cratemod().nmods()[0].fns[0].path() == ~["a"]; } } diff --git a/src/rustdoc/prune_hidden_pass.rs b/src/rustdoc/prune_hidden_pass.rs index e2c09a33a7e..783ca6d3f14 100644 --- a/src/rustdoc/prune_hidden_pass.rs +++ b/src/rustdoc/prune_hidden_pass.rs @@ -25,9 +25,9 @@ fn fold_mod( let doc = fold::default_any_fold_mod(fold, doc); { - items: vec::filter(doc.items) {|itemtag| + items: vec::filter(doc.items, |itemtag| { !is_hidden(fold.ctxt, itemtag.item()) - } + }) with doc } } @@ -36,10 +36,10 @@ fn is_hidden(srv: astsrv::srv, doc: doc::itemdoc) -> bool { import syntax::ast_map; let id = doc.id; - astsrv::exec(srv) {|ctxt| + do astsrv::exec(srv) |ctxt| { let attrs = alt ctxt.ast_map.get(id) { ast_map::node_item(item, _) { item.attrs } - _ { []/~ } + _ { ~[] } }; attr_parser::parse_hidden(attrs) } @@ -54,7 +54,7 @@ fn should_prune_hidden_items() { #[cfg(test)] mod test { fn mk_doc(source: str) -> doc::doc { - astsrv::from_str(source) {|srv| + do astsrv::from_str(source) |srv| { let doc = extract::from_srv(srv, ""); run(srv, doc) } diff --git a/src/rustdoc/prune_unexported_pass.rs b/src/rustdoc/prune_unexported_pass.rs index b564af58794..753100c1f09 100644 --- a/src/rustdoc/prune_unexported_pass.rs +++ b/src/rustdoc/prune_unexported_pass.rs @@ -30,7 +30,7 @@ fn fold_mod(fold: fold::fold<astsrv::srv>, doc: doc::moddoc) -> doc::moddoc { } } -fn exported_items(srv: astsrv::srv, doc: doc::moddoc) -> [doc::itemtag]/~ { +fn exported_items(srv: astsrv::srv, doc: doc::moddoc) -> ~[doc::itemtag] { exported_things( srv, doc, exported_items_from_crate, @@ -41,9 +41,9 @@ fn exported_items(srv: astsrv::srv, doc: doc::moddoc) -> [doc::itemtag]/~ { fn exported_things<T>( srv: astsrv::srv, doc: doc::moddoc, - from_crate: fn(astsrv::srv, doc::moddoc) -> [T]/~, - from_mod: fn(astsrv::srv, doc::moddoc) -> [T]/~ -) -> [T]/~ { + from_crate: fn(astsrv::srv, doc::moddoc) -> ~[T], + from_mod: fn(astsrv::srv, doc::moddoc) -> ~[T] +) -> ~[T] { if doc.id() == ast::crate_node_id { from_crate(srv, doc) } else { @@ -54,15 +54,15 @@ fn exported_things<T>( fn exported_items_from_crate( srv: astsrv::srv, doc: doc::moddoc -) -> [doc::itemtag]/~ { +) -> ~[doc::itemtag] { exported_items_from(srv, doc, is_exported_from_crate) } fn exported_items_from_mod( srv: astsrv::srv, doc: doc::moddoc -) -> [doc::itemtag]/~ { - exported_items_from(srv, doc, {|a,b| +) -> ~[doc::itemtag] { + exported_items_from(srv, doc, |a,b| { is_exported_from_mod(a, doc.id(), b) }) } @@ -71,8 +71,8 @@ fn exported_items_from( srv: astsrv::srv, doc: doc::moddoc, is_exported: fn(astsrv::srv, str) -> bool -) -> [doc::itemtag]/~ { - vec::filter_map(doc.items) { |itemtag| +) -> ~[doc::itemtag] { + do vec::filter_map(doc.items) |itemtag| { let itemtag = alt itemtag { doc::enumtag(enumdoc) { // Also need to check variant exportedness @@ -96,8 +96,8 @@ fn exported_variants_from( srv: astsrv::srv, doc: doc::enumdoc, is_exported: fn(astsrv::srv, str) -> bool -) -> [doc::variantdoc]/~ { - vec::filter_map(doc.variants) { |doc| +) -> ~[doc::variantdoc] { + do vec::filter_map(doc.variants) |doc| { if is_exported(srv, doc.name) { some(doc) } else { @@ -111,7 +111,7 @@ fn is_exported_from_mod( mod_id: doc::ast_id, item_name: str ) -> bool { - astsrv::exec(srv) {|ctxt| + do astsrv::exec(srv) |ctxt| { alt ctxt.ast_map.get(mod_id) { ast_map::node_item(item, _) { alt item.node { @@ -132,7 +132,7 @@ fn is_exported_from_crate( srv: astsrv::srv, item_name: str ) -> bool { - astsrv::exec(srv) {|ctxt| + do astsrv::exec(srv) |ctxt| { ast_util::is_exported(@item_name, ctxt.ast.node.module) } } @@ -223,7 +223,7 @@ fn should_prune_unexported_types() { #[test] fn should_not_prune_reexports() { fn mk_doc(source: str) -> doc::doc { - astsrv::from_str(source) {|srv| + do astsrv::from_str(source) |srv| { let doc = extract::from_srv(srv, ""); let doc = reexport_pass::mk_pass().f(srv, doc); run(srv, doc) @@ -238,7 +238,7 @@ fn should_not_prune_reexports() { #[cfg(test)] mod test { fn mk_doc(source: str) -> doc::doc { - astsrv::from_str(source) {|srv| + do astsrv::from_str(source) |srv| { let doc = extract::from_srv(srv, ""); run(srv, doc) } diff --git a/src/rustdoc/reexport_pass.rs b/src/rustdoc/reexport_pass.rs index b0b471397ed..a998faeac31 100644 --- a/src/rustdoc/reexport_pass.rs +++ b/src/rustdoc/reexport_pass.rs @@ -21,7 +21,7 @@ fn mk_pass() -> pass { type def_set = map::set<ast::def_id>; type def_map = map::hashmap<ast::def_id, doc::itemtag>; -type path_map = map::hashmap<str, [(str, doc::itemtag)]/~>; +type path_map = map::hashmap<str, ~[(str, doc::itemtag)]>; fn run(srv: astsrv::srv, doc: doc::doc) -> doc::doc { @@ -43,22 +43,22 @@ fn run(srv: astsrv::srv, doc: doc::doc) -> doc::doc { // to association lists. Yuck. fn to_assoc_list<K:copy, V:copy>( map: map::hashmap<K, V> -) -> [(K, V)]/~ { +) -> ~[(K, V)] { - let mut vec = []/~; - for map.each {|k, v| - vec += [(k, v)]/~; + let mut vec = ~[]; + for map.each |k, v| { + vec += ~[(k, v)]; } ret vec; } fn from_assoc_list<K:copy, V:copy>( - list: [(K, V)]/~, + list: ~[(K, V)], new_hash: fn() -> map::hashmap<K, V> ) -> map::hashmap<K, V> { let map = new_hash(); - vec::iter(list) {|elt| + do vec::iter(list) |elt| { let (k, v) = elt; map.insert(k, v); } @@ -66,28 +66,28 @@ fn from_assoc_list<K:copy, V:copy>( } fn from_def_assoc_list<V:copy>( - list: [(ast::def_id, V)]/~ + list: ~[(ast::def_id, V)] ) -> map::hashmap<ast::def_id, V> { from_assoc_list(list, ast_util::new_def_hash) } fn from_str_assoc_list<V:copy>( - list: [(str, V)]/~ + list: ~[(str, V)] ) -> map::hashmap<str, V> { from_assoc_list(list, map::str_hash) } fn build_reexport_def_set(srv: astsrv::srv) -> def_set { - let assoc_list = astsrv::exec(srv) {|ctxt| + let assoc_list = do astsrv::exec(srv) |ctxt| { let def_set = ast_util::new_def_hash(); - for ctxt.exp_map.each {|_id, defs| - for defs.each {|def| + for ctxt.exp_map.each |_id, defs| { + for defs.each |def| { if def.reexp { def_set.insert(def.id, ()); } } } - for find_reexport_impls(ctxt).each {|def| + for find_reexport_impls(ctxt).each |def| { def_set.insert(def, ()); } to_assoc_list(def_set) @@ -96,10 +96,10 @@ fn build_reexport_def_set(srv: astsrv::srv) -> def_set { from_def_assoc_list(assoc_list) } -fn find_reexport_impls(ctxt: astsrv::ctxt) -> [ast::def_id]/~ { - let defs = @mut []/~; - for_each_reexported_impl(ctxt) {|_mod_id, i| - *defs += [i.did]/~ +fn find_reexport_impls(ctxt: astsrv::ctxt) -> ~[ast::def_id] { + let defs = @mut ~[]; + do for_each_reexported_impl(ctxt) |_mod_id, i| { + *defs += ~[i.did] } ret *defs; } @@ -136,7 +136,7 @@ fn build_reexport_def_map( fn fold_mod(fold: fold::fold<ctxt>, doc: doc::moddoc) -> doc::moddoc { let doc = fold::default_seq_fold_mod(fold, doc); - for doc.items.each {|item| + for doc.items.each |item| { let def_id = ast_util::local_def(item.id()); if fold.ctxt.def_set.contains_key(def_id) { fold.ctxt.def_map.insert(def_id, item); @@ -149,7 +149,7 @@ fn build_reexport_def_map( fn fold_nmod(fold: fold::fold<ctxt>, doc: doc::nmoddoc) -> doc::nmoddoc { let doc = fold::default_seq_fold_nmod(fold, doc); - for doc.fns.each {|fndoc| + for doc.fns.each |fndoc| { let def_id = ast_util::local_def(fndoc.id()); if fold.ctxt.def_set.contains_key(def_id) { fold.ctxt.def_map.insert(def_id, doc::fntag(fndoc)); @@ -166,12 +166,12 @@ fn build_reexport_path_map(srv: astsrv::srv, -def_map: def_map) -> path_map { let def_assoc_list = to_assoc_list(def_map); #debug("def_map: %?", def_assoc_list); - let assoc_list = astsrv::exec(srv) {|ctxt| + let assoc_list = do astsrv::exec(srv) |ctxt| { let def_map = from_def_assoc_list(def_assoc_list); - let path_map = map::str_hash::<[(str,doc::itemtag)]/~>(); + let path_map = map::str_hash::<~[(str,doc::itemtag)]>(); - for ctxt.exp_map.each {|exp_id, defs| + for ctxt.exp_map.each |exp_id, defs| { let path = alt check ctxt.ast_map.get(exp_id) { ast_map::node_export(_, path) { path } }; @@ -182,20 +182,20 @@ fn build_reexport_path_map(srv: astsrv::srv, -def_map: def_map) -> path_map { }; let modpath = ast_map::path_to_str(vec::init(*path)); - let mut reexportdocs = []/~; - for defs.each {|def| + let mut reexportdocs = ~[]; + for defs.each |def| { if !def.reexp { cont; } alt def_map.find(def.id) { some(itemtag) { - reexportdocs += [(*name, itemtag)]/~; + reexportdocs += ~[(*name, itemtag)]; } _ {} } } if reexportdocs.len() > 0u { - option::iter(path_map.find(modpath)) {|docs| - reexportdocs = docs + vec::filter(reexportdocs, {|x| + do option::iter(path_map.find(modpath)) |docs| { + reexportdocs = docs + vec::filter(reexportdocs, |x| { !vec::contains(docs, x) }); } @@ -205,11 +205,11 @@ fn build_reexport_path_map(srv: astsrv::srv, -def_map: def_map) -> path_map { } } - for find_reexport_impl_docs(ctxt, def_map).each {|elt| + for find_reexport_impl_docs(ctxt, def_map).each |elt| { let (path, doc) = elt; let docs = alt path_map.find(path) { - some(docs) { docs + [(doc)]/~ } - none { [doc]/~ } + some(docs) { docs + ~[(doc)] } + none { ~[doc] } }; path_map.insert(path, docs); } @@ -223,10 +223,10 @@ fn build_reexport_path_map(srv: astsrv::srv, -def_map: def_map) -> path_map { fn find_reexport_impl_docs( ctxt: astsrv::ctxt, def_map: def_map -) -> [(str, (str, doc::itemtag))]/~ { - let docs = @mut []/~; +) -> ~[(str, (str, doc::itemtag))] { + let docs = @mut ~[]; - for_each_reexported_impl(ctxt) {|mod_id, i| + do for_each_reexported_impl(ctxt) |mod_id, i| { let path = alt ctxt.ast_map.find(mod_id) { some(ast_map::node_item(item, path)) { let path = ast_map::path_to_str(*path); @@ -245,7 +245,7 @@ fn find_reexport_impl_docs( let doc = alt check def_map.find(i.did) { some(doc) { doc } }; - *docs += [(path, (ident, doc))]/~; + *docs += ~[(path, (ident, doc))]; } ret *docs; @@ -256,7 +256,7 @@ fn for_each_reexported_impl( f: fn@(ast::node_id, resolve::_impl) ) { let visitor = @{ - visit_mod: {|a,b,c|visit_mod(ctxt, f, a, b, c)} + visit_mod: |a,b,c| visit_mod(ctxt, f, a, b, c) with *visit::default_simple_visitor() }; let visitor = visit::mk_simple_visitor(visitor); @@ -272,7 +272,7 @@ fn for_each_reexported_impl( let all_impls = all_impls(m); alt check *ctxt.impl_map.get(mod_id) { list::cons(impls, @list::nil) { - for vec::each(*impls) {|i| + for vec::each(*impls) |i| { // This impl is not an item in the current mod if !all_impls.contains_key(i.did) { // Ignore external impls because I don't @@ -289,7 +289,7 @@ fn for_each_reexported_impl( fn all_impls(m: ast::_mod) -> map::set<ast::def_id> { let all_impls = ast_util::new_def_hash(); - for m.items.each {|item| + for m.items.each |item| { alt item.node { ast::item_impl(_, _, _, _, _) { all_impls.insert(ast_util::local_def(item.id), ()); @@ -322,7 +322,7 @@ fn merge_reexports( let path = if is_topmod { doc.path() } else { - doc.path() + [doc.name()]/~ + doc.path() + ~[doc.name()] }; let new_items = get_new_items(path, fold.ctxt); @@ -334,16 +334,16 @@ fn merge_reexports( } } - fn get_new_items(path: [str]/~, path_map: path_map) -> [doc::itemtag]/~ { + fn get_new_items(path: ~[str], path_map: path_map) -> ~[doc::itemtag] { #debug("looking for reexports in path %?", path); alt path_map.find(str::connect(path, "::")) { some(name_docs) { - vec::foldl([]/~, name_docs) {|v, name_doc| + do vec::foldl(~[], name_docs) |v, name_doc| { let (name, doc) = name_doc; - v + [reexport_doc(doc, name)]/~ + v + ~[reexport_doc(doc, name)] } } - none { []/~ } + none { ~[] } } } @@ -462,7 +462,7 @@ fn should_duplicate_multiple_reexported_items() { import a::b; import a::c; \ export b; export c; \ }"; - astsrv::from_str(source) {|srv| + do astsrv::from_str(source) |srv| { let doc = extract::from_srv(srv, ""); let doc = path_pass::mk_pass().f(srv, doc); let doc = run(srv, doc); @@ -484,7 +484,7 @@ fn should_rename_items_reexported_with_different_names() { #[test] fn should_reexport_in_topmod() { fn mk_doc(source: str) -> doc::doc { - astsrv::from_str(source) {|srv| + do astsrv::from_str(source) |srv| { let doc = extract::from_srv(srv, "core"); let doc = path_pass::mk_pass().f(srv, doc); run(srv, doc) @@ -515,7 +515,7 @@ fn should_not_reexport_multiple_times() { #[cfg(test)] mod test { fn mk_doc(source: str) -> doc::doc { - astsrv::from_str(source) {|srv| + do astsrv::from_str(source) |srv| { let doc = extract::from_srv(srv, ""); let doc = path_pass::mk_pass().f(srv, doc); run(srv, doc) diff --git a/src/rustdoc/rustdoc.rs b/src/rustdoc/rustdoc.rs index 07749fcb41c..0d7f59e6b6d 100755 --- a/src/rustdoc/rustdoc.rs +++ b/src/rustdoc/rustdoc.rs @@ -11,7 +11,7 @@ type pass = { fn run_passes( srv: astsrv::srv, doc: doc::doc, - passes: [pass]/~ + passes: ~[pass] ) -> doc::doc { #[doc( @@ -31,11 +31,11 @@ fn run_passes( )]; let mut passno = 0; - vec::foldl(doc, passes) {|doc, pass| + do vec::foldl(doc, passes) |doc, pass| { log(debug, #fmt("pass #%d", passno)); passno += 1; log(debug, doc); - time(pass.name) {|| + do time(pass.name) || { pass.f(srv, doc) } } @@ -48,18 +48,18 @@ fn test_run_passes() { doc: doc::doc ) -> doc::doc { { - pages: [ + pages: ~[ doc::cratepage({ topmod: { item: { name: doc.cratemod().name() + "two" with doc.cratemod().item }, - items: []/~, + items: ~[], index: none } }) - ]/~ + ] } } fn pass2( @@ -67,23 +67,23 @@ fn test_run_passes() { doc: doc::doc ) -> doc::doc { { - pages: [ + pages: ~[ doc::cratepage({ topmod: { item: { name: doc.cratemod().name() + "three" with doc.cratemod().item }, - items: []/~, + items: ~[], index: none } }) - ]/~ + ] } } let source = ""; - astsrv::from_str(source) {|srv| - let passes = [ + do astsrv::from_str(source) |srv| { + let passes = ~[ { name: "", f: pass1 @@ -92,14 +92,14 @@ fn test_run_passes() { name: "", f: pass2 } - ]/~; + ]; let doc = extract::from_srv(srv, "one"); let doc = run_passes(srv, doc, passes); assert doc.cratemod().name() == "onetwothree"; } } -fn main(args: [str]/~) { +fn main(args: ~[str]) { if vec::contains(args, "-h") { config::usage(); @@ -129,15 +129,15 @@ fn time<T>(what: str, f: fn() -> T) -> T { fn run(config: config::config) { let source_file = config.input_crate; - astsrv::from_file(source_file) {|srv| - time("wait_ast") {|| - astsrv::exec(srv) {|_ctxt| () } + do astsrv::from_file(source_file) |srv| { + do time("wait_ast") || { + do astsrv::exec(srv) |_ctxt| { } }; - let doc = time("extract") {|| + let doc = time("extract", || { let default_name = source_file; extract::from_srv(srv, default_name) - }; - run_passes(srv, doc, [ + }); + run_passes(srv, doc, ~[ reexport_pass::mk_pass(), prune_unexported_pass::mk_pass(), tystr_pass::mk_pass(), @@ -156,6 +156,6 @@ fn run(config: config::config) { markdown_pass::mk_pass( markdown_writer::make_writer_factory(config) ) - ]/~); + ]); } } diff --git a/src/rustdoc/sectionalize_pass.rs b/src/rustdoc/sectionalize_pass.rs index 9a92d527115..85879c94ba9 100644 --- a/src/rustdoc/sectionalize_pass.rs +++ b/src/rustdoc/sectionalize_pass.rs @@ -34,7 +34,7 @@ fn fold_iface(fold: fold::fold<()>, doc: doc::ifacedoc) -> doc::ifacedoc { let doc = fold::default_seq_fold_iface(fold, doc); { - methods: par::anymap(doc.methods) {|method| + methods: do par::anymap(doc.methods) |method| { let (desc, sections) = sectionalize(method.desc); { @@ -51,7 +51,7 @@ fn fold_impl(fold: fold::fold<()>, doc: doc::impldoc) -> doc::impldoc { let doc = fold::default_seq_fold_impl(fold, doc); { - methods: par::anymap(doc.methods) {|method| + methods: do par::anymap(doc.methods) |method| { let (desc, sections) = sectionalize(method.desc); { @@ -64,7 +64,7 @@ fn fold_impl(fold: fold::fold<()>, doc: doc::impldoc) -> doc::impldoc { } } -fn sectionalize(desc: option<str>) -> (option<str>, [doc::section]/~) { +fn sectionalize(desc: option<str>) -> (option<str>, ~[doc::section]) { #[doc = " @@ -85,20 +85,20 @@ fn sectionalize(desc: option<str>) -> (option<str>, [doc::section]/~) { "]; if option::is_none(desc) { - ret (none, []/~); + ret (none, ~[]); } let lines = str::lines(option::get(desc)); let mut new_desc = none::<str>; let mut current_section = none; - let mut sections = []/~; + let mut sections = ~[]; - for lines.each {|line| + for lines.each |line| { alt parse_header(line) { some(header) { if option::is_some(current_section) { - sections += [option::get(current_section)]/~; + sections += ~[option::get(current_section)]; } current_section = some({ header: header, @@ -129,7 +129,7 @@ fn sectionalize(desc: option<str>) -> (option<str>, [doc::section]/~) { } if option::is_some(current_section) { - sections += [option::get(current_section)]/~; + sections += ~[option::get(current_section)]; } (new_desc, sections) @@ -228,7 +228,7 @@ fn should_sectionalize_impl_methods() { #[cfg(test)] mod test { fn mk_doc(source: str) -> doc::doc { - astsrv::from_str(source) {|srv| + do astsrv::from_str(source) |srv| { let doc = extract::from_srv(srv, ""); let doc = attr_pass::mk_pass().f(srv, doc); run(srv, doc) diff --git a/src/rustdoc/sort_item_name_pass.rs b/src/rustdoc/sort_item_name_pass.rs index 8ed03f56d23..c791c099b43 100644 --- a/src/rustdoc/sort_item_name_pass.rs +++ b/src/rustdoc/sort_item_name_pass.rs @@ -3,15 +3,15 @@ export mk_pass; fn mk_pass() -> pass { - sort_pass::mk_pass("sort_item_name") { |item1, item2| + sort_pass::mk_pass("sort_item_name", |item1, item2| { str::le(item1.name(), item2.name()) - } + }) } #[test] fn test() { let source = "mod z { } fn y() { }"; - astsrv::from_str(source) {|srv| + do astsrv::from_str(source) |srv| { let doc = extract::from_srv(srv, ""); let doc = mk_pass().f(srv, doc); assert doc.cratemod().items[0].name() == "y"; diff --git a/src/rustdoc/sort_item_type_pass.rs b/src/rustdoc/sort_item_type_pass.rs index 736d7c01bb3..7536da57150 100644 --- a/src/rustdoc/sort_item_type_pass.rs +++ b/src/rustdoc/sort_item_type_pass.rs @@ -3,7 +3,7 @@ export mk_pass; fn mk_pass() -> pass { - sort_pass::mk_pass("sort_item_type") { |item1, item2| + do sort_pass::mk_pass("sort_item_type") |item1, item2| { fn score(item: doc::itemtag) -> int { alt item { doc::consttag(_) { 0 } @@ -32,7 +32,7 @@ fn test() { iface iiface { fn a(); } \ impl iimpl for int { fn a() { } } \ type itype = int;"; - astsrv::from_str(source) {|srv| + do astsrv::from_str(source) |srv| { let doc = extract::from_srv(srv, ""); let doc = mk_pass().f(srv, doc); assert doc.cratemod().items[0].name() == "iconst"; diff --git a/src/rustdoc/sort_pass.rs b/src/rustdoc/sort_pass.rs index 794e190fa5b..7cc537851dc 100644 --- a/src/rustdoc/sort_pass.rs +++ b/src/rustdoc/sort_pass.rs @@ -47,7 +47,7 @@ fn test() { } let source = "mod z { mod y { } fn x() { } } mod w { }"; - astsrv::from_str(source) {|srv| + do astsrv::from_str(source) |srv| { let doc = extract::from_srv(srv, ""); let doc = mk_pass("", name_lteq).f(srv, doc); assert doc.cratemod().mods()[0].name() == "w"; @@ -64,7 +64,7 @@ fn should_be_stable() { } let source = "mod a { mod b { } } mod c { mod d { } }"; - astsrv::from_str(source) {|srv| + do astsrv::from_str(source) |srv| { let doc = extract::from_srv(srv, ""); let doc = mk_pass("", always_eq).f(srv, doc); assert doc.cratemod().mods()[0].items[0].name() == "b"; diff --git a/src/rustdoc/text_pass.rs b/src/rustdoc/text_pass.rs index fb34cb8ae84..6d3ef691f64 100644 --- a/src/rustdoc/text_pass.rs +++ b/src/rustdoc/text_pass.rs @@ -30,7 +30,7 @@ fn run( } fn maybe_apply_op(op: op, s: option<str>) -> option<str> { - option::map(s) {|s| op(s) } + option::map(s, |s| op(s) ) } fn fold_item(fold: fold::fold<op>, doc: doc::itemdoc) -> doc::itemdoc { @@ -44,20 +44,18 @@ fn fold_item(fold: fold::fold<op>, doc: doc::itemdoc) -> doc::itemdoc { } } -fn apply_to_sections(op: op, sections: [doc::section]/~) -> [doc::section]/~ { - par::anymap(sections) {|section, copy op| - { - header: op(section.header), - body: op(section.body) - } - } +fn apply_to_sections(op: op, sections: ~[doc::section]) -> ~[doc::section] { + par::anymap(sections, |section, copy op| { + header: op(section.header), + body: op(section.body) + }) } fn fold_enum(fold: fold::fold<op>, doc: doc::enumdoc) -> doc::enumdoc { let doc = fold::default_seq_fold_enum(fold, doc); { - variants: par::anymap(doc.variants) {|variant, copy fold| + variants: do par::anymap(doc.variants) |variant, copy fold| { { desc: maybe_apply_op(fold.ctxt, variant.desc) with variant @@ -76,8 +74,8 @@ fn fold_iface(fold: fold::fold<op>, doc: doc::ifacedoc) -> doc::ifacedoc { } } -fn apply_to_methods(op: op, docs: [doc::methoddoc]/~) -> [doc::methoddoc]/~ { - par::anymap(docs) {|doc, copy op| +fn apply_to_methods(op: op, docs: ~[doc::methoddoc]) -> ~[doc::methoddoc] { + do par::anymap(docs) |doc, copy op| { { brief: maybe_apply_op(op, doc.brief), desc: maybe_apply_op(op, doc.desc), @@ -253,12 +251,12 @@ fn should_execute_on_impl_method_section_bodies() { #[cfg(test)] mod test { fn mk_doc(source: str) -> doc::doc { - astsrv::from_str(source) {|srv| + do astsrv::from_str(source) |srv| { let doc = extract::from_srv(srv, ""); let doc = attr_pass::mk_pass().f(srv, doc); let doc = desc_to_brief_pass::mk_pass().f(srv, doc); let doc = sectionalize_pass::mk_pass().f(srv, doc); - mk_pass("", {|s| str::trim(s)}).f(srv, doc) + mk_pass("", |s| str::trim(s) ).f(srv, doc) } } } diff --git a/src/rustdoc/trim_pass.rs b/src/rustdoc/trim_pass.rs index e3c79de22bb..4f7a90743ac 100644 --- a/src/rustdoc/trim_pass.rs +++ b/src/rustdoc/trim_pass.rs @@ -10,7 +10,7 @@ is interpreted as the brief description. export mk_pass; fn mk_pass() -> pass { - text_pass::mk_pass("trim", {|s| str::trim(s)}) + text_pass::mk_pass("trim", |s| str::trim(s) ) } #[test] @@ -23,7 +23,7 @@ fn should_trim_text() { #[cfg(test)] mod test { fn mk_doc(source: str) -> doc::doc { - astsrv::from_str(source) {|srv| + do astsrv::from_str(source) |srv| { let doc = extract::from_srv(srv, ""); let doc = attr_pass::mk_pass().f(srv, doc); mk_pass().f(srv, doc) diff --git a/src/rustdoc/tystr_pass.rs b/src/rustdoc/tystr_pass.rs index 553e9d32e53..69b86bff19b 100644 --- a/src/rustdoc/tystr_pass.rs +++ b/src/rustdoc/tystr_pass.rs @@ -45,7 +45,7 @@ fn fold_fn( } fn get_fn_sig(srv: astsrv::srv, fn_id: doc::ast_id) -> option<str> { - astsrv::exec(srv) {|ctxt| + do astsrv::exec(srv) |ctxt| { alt check ctxt.ast_map.get(fn_id) { ast_map::node_item(@{ ident: ident, @@ -80,7 +80,7 @@ fn fold_const( let srv = fold.ctxt; { - sig: some(astsrv::exec(srv) {|ctxt| + sig: some(do astsrv::exec(srv) |ctxt| { alt check ctxt.ast_map.get(doc.id()) { ast_map::node_item(@{ node: ast::item_const(ty, _), _ @@ -107,14 +107,14 @@ fn fold_enum( let srv = fold.ctxt; { - variants: par::anymap(doc.variants) {|variant| - let sig = astsrv::exec(srv) {|ctxt| + variants: do par::anymap(doc.variants) |variant| { + let sig = do astsrv::exec(srv) |ctxt| { alt check ctxt.ast_map.get(doc_id) { ast_map::node_item(@{ node: ast::item_enum(ast_variants, _, _), _ }, _) { let ast_variant = option::get( - vec::find(ast_variants) {|v| + do vec::find(ast_variants) |v| { *v.node.name == variant.name }); @@ -151,9 +151,9 @@ fn fold_iface( fn merge_methods( srv: astsrv::srv, item_id: doc::ast_id, - docs: [doc::methoddoc]/~ -) -> [doc::methoddoc]/~ { - par::anymap(docs) {|doc| + docs: ~[doc::methoddoc] +) -> ~[doc::methoddoc] { + do par::anymap(docs) |doc| { { sig: get_method_sig(srv, item_id, doc.name) with doc @@ -166,14 +166,14 @@ fn get_method_sig( item_id: doc::ast_id, method_name: str ) -> option<str> { - astsrv::exec(srv) {|ctxt| + do astsrv::exec(srv) |ctxt| { alt check ctxt.ast_map.get(item_id) { ast_map::node_item(@{ node: ast::item_iface(_, _, methods), _ }, _) { - alt check vec::find(methods) {|method| + alt check vec::find(methods, |method| { *method.ident == method_name - } { + }) { some(method) { some(pprust::fun_to_str( method.decl, @@ -186,9 +186,9 @@ fn get_method_sig( ast_map::node_item(@{ node: ast::item_impl(_, _, _, _, methods), _ }, _) { - alt check vec::find(methods) {|method| + alt check vec::find(methods, |method| { *method.ident == method_name - } { + }) { some(method) { some(pprust::fun_to_str( method.decl, @@ -216,14 +216,14 @@ fn fold_impl( let srv = fold.ctxt; - let (iface_ty, self_ty) = astsrv::exec(srv) {|ctxt| + let (iface_ty, self_ty) = do astsrv::exec(srv) |ctxt| { alt ctxt.ast_map.get(doc.id()) { ast_map::node_item(@{ node: ast::item_impl(_, _, iface_ty, self_ty, _), _ }, _) { - let iface_ty = option::map(iface_ty) {|p| + let iface_ty = option::map(iface_ty, |p| { pprust::path_to_str(p.path) - }; + }); (iface_ty, some(pprust::ty_to_str(self_ty))) } _ { fail "expected impl" } @@ -271,7 +271,7 @@ fn fold_type( let srv = fold.ctxt; { - sig: astsrv::exec(srv) {|ctxt| + sig: do astsrv::exec(srv) |ctxt| { alt ctxt.ast_map.get(doc.id()) { ast_map::node_item(@{ ident: ident, @@ -300,7 +300,7 @@ fn should_add_type_signatures() { #[cfg(test)] mod test { fn mk_doc(source: str) -> doc::doc { - astsrv::from_str(source) {|srv| + do astsrv::from_str(source) |srv| { let doc = extract::from_srv(srv, ""); run(srv, doc) } diff --git a/src/rustdoc/unindent_pass.rs b/src/rustdoc/unindent_pass.rs index c7063f4ca05..b5e11b0dfd9 100644 --- a/src/rustdoc/unindent_pass.rs +++ b/src/rustdoc/unindent_pass.rs @@ -21,7 +21,8 @@ fn unindent(s: str) -> str { let lines = str::lines_any(s); let mut saw_first_line = false; let mut saw_second_line = false; - let min_indent = vec::foldl(uint::max_value, lines) {|min_indent, line| + let min_indent = do vec::foldl(uint::max_value, lines) + |min_indent, line| { // After we see the first non-whitespace line, look at // the line we have. If it is not whitespace, and therefore @@ -47,7 +48,7 @@ fn unindent(s: str) -> str { } else { saw_first_line = true; let mut spaces = 0u; - str::all(line) {|char| + do str::all(line) |char| { // Only comparing against space because I wouldn't // know what to do with mixed whitespace chars if char == ' ' { @@ -62,8 +63,8 @@ fn unindent(s: str) -> str { }; if check vec::is_not_empty(lines) { - let unindented = [str::trim(vec::head(lines))]/~ - + par::anymap(vec::tail(lines)) {|line| + let unindented = ~[str::trim(vec::head(lines))] + + do par::anymap(vec::tail(lines)) |line| { if str::is_whitespace(line) { line } else { diff --git a/src/rustllvm/RustWrapper.cpp b/src/rustllvm/RustWrapper.cpp index bdf16266d2e..6d99b0f4cb1 100644 --- a/src/rustllvm/RustWrapper.cpp +++ b/src/rustllvm/RustWrapper.cpp @@ -172,3 +172,13 @@ extern "C" LLVMTypeRef LLVMMetadataTypeInContext(LLVMContextRef C) { extern "C" LLVMTypeRef LLVMMetadataType(void) { return LLVMMetadataTypeInContext(LLVMGetGlobalContext()); } + +extern "C" LLVMValueRef LLVMBuildAtomicRMW(LLVMBuilderRef B, + AtomicRMWInst::BinOp op, + LLVMValueRef target, + LLVMValueRef source, + AtomicOrdering order) { + return wrap(unwrap(B)->CreateAtomicRMW(op, + unwrap(target), unwrap(source), + order)); +} diff --git a/src/rustllvm/rustllvm.def.in b/src/rustllvm/rustllvm.def.in index 5f38087c18a..8d82d930fd6 100644 --- a/src/rustllvm/rustllvm.def.in +++ b/src/rustllvm/rustllvm.def.in @@ -82,6 +82,7 @@ LLVMArrayType LLVMBasicBlockAsValue LLVMBlockAddress LLVMBuildAShr +LLVMBuildAtomicRMW LLVMBuildAdd LLVMBuildAggregateRet LLVMBuildAlloca diff --git a/src/snapshots.txt b/src/snapshots.txt index fe90dae15cf..5715a5477d3 100644 --- a/src/snapshots.txt +++ b/src/snapshots.txt @@ -1,3 +1,27 @@ +S 2012-06-30 ed834f0 + macos-i386 8fc728a70801311e8de88b4f1bcfb3cdbfe1ca44 + macos-x86_64 2102405e1aa524431f4c833c038f4e8c07f6be92 + linux-i386 18d79b62a4e5472ef1065ab7c11f451d33af4481 + linux-x86_64 3e08d33682d7dfcb2783521ab9d9f165adb579ae + winnt-i386 b019cfaf07c8b29f259475ea843a583a15da76b7 + +S 2012-06-30 1c13507 + freebsd-x86_64 87c699b23a10443a7a44ba1a0975ac7cde6355d2 + +S 2012-06-28 7aa43b2 + macos-i386 9c1373ffdfb409d7ade9ed560bcbdf0c26eede51 + macos-x86_64 1ce9c1cd10d0eb06daca0c796bab8efa2af8c59e + linux-i386 0a240d29360f4fd72732d2e7877a03d63dab4e7c + linux-x86_64 1137ccd9de01b69dfce96647d44d4686f9915090 + winnt-i386 a8b610307de821f5a38532d3de2d46c7ac0e1e25 + +S 2012-06-28 810677e + macos-x86_64 ee659583a09bb8466985428e4baa16498eedf4fb + macos-i386 8e97646a4a87c239ce5075c24b8bfb490dd90cf9 + linux-x86_64 0520e6f907981b6900a1b98eee43d5416c47c801 + linux-i386 fa960de5a5e21a822aca6c2924426bef2cc74367 + winnt-i386 d331a09b93fa6081908a8f1c06e4d67565256074 + S 2012-06-26 b9d3ad0 macos-x86_64 48206274146453f19f35be553469ac40d6319884 macos-i386 042bc8d4275947e74f65e52eda30eb0780e8f385 diff --git a/src/test/auxiliary/cci_capture_clause.rs b/src/test/auxiliary/cci_capture_clause.rs index 07223c10049..d14ed0e99f5 100644 --- a/src/test/auxiliary/cci_capture_clause.rs +++ b/src/test/auxiliary/cci_capture_clause.rs @@ -5,7 +5,7 @@ import comm::*; fn foo<T: send copy>(x: T) -> port<T> { let p = port(); let c = chan(p); - task::spawn() {|copy c, copy x| + do task::spawn() |copy c, copy x| { c.send(x); } p diff --git a/src/test/auxiliary/cci_class_5.rs b/src/test/auxiliary/cci_class_5.rs index 3fdadcda68d..9a3f0a0ad36 100644 --- a/src/test/auxiliary/cci_class_5.rs +++ b/src/test/auxiliary/cci_class_5.rs @@ -3,7 +3,7 @@ mod kitties { class cat { priv { let mut meows : uint; - fn nap() { for uint::range(1u, 10000u) {|_i|}} + fn nap() { for uint::range(1u, 10000u) |_i|{}} } let how_hungry : int; diff --git a/src/test/auxiliary/cci_class_6.rs b/src/test/auxiliary/cci_class_6.rs index 8460b670753..ed52788440f 100644 --- a/src/test/auxiliary/cci_class_6.rs +++ b/src/test/auxiliary/cci_class_6.rs @@ -2,17 +2,17 @@ mod kitties { class cat<U> { priv { - let mut info : [U]/~; + let mut info : ~[U]; let mut meows : uint; } let how_hungry : int; - new(in_x : uint, in_y : int, -in_info: [U]/~) + new(in_x : uint, in_y : int, -in_info: ~[U]) { self.meows = in_x; self.how_hungry = in_y; self.info <- in_info; } - fn speak<T>(stuff: [T]/~) { + fn speak<T>(stuff: ~[T]) { self.meows += stuff.len(); } fn meow_count() -> uint { self.meows } diff --git a/src/test/auxiliary/cci_iter_lib.rs b/src/test/auxiliary/cci_iter_lib.rs index af575dc00f6..ca7140f4504 100644 --- a/src/test/auxiliary/cci_iter_lib.rs +++ b/src/test/auxiliary/cci_iter_lib.rs @@ -1,7 +1,7 @@ #[link(name="cci_iter_lib", vers="0.0")]; #[inline] -fn iter<T>(v: [T]/~, f: fn(T)) { +fn iter<T>(v: ~[T], f: fn(T)) { let mut i = 0u; let n = vec::len(v); while i < n { diff --git a/src/test/auxiliary/cci_nested_lib.rs b/src/test/auxiliary/cci_nested_lib.rs index 87ab9bbcfea..cb3d9de652f 100644 --- a/src/test/auxiliary/cci_nested_lib.rs +++ b/src/test/auxiliary/cci_nested_lib.rs @@ -9,7 +9,7 @@ fn alist_add<A: copy, B: copy>(lst: alist<A,B>, k: A, v: B) { fn alist_get<A: copy, B: copy>(lst: alist<A,B>, k: A) -> B { let eq_fn = lst.eq_fn; - for lst.data.each {|entry| + for lst.data.each |entry| { if eq_fn(entry.key, k) { ret entry.value; } } fail; diff --git a/src/test/auxiliary/cci_no_inline_lib.rs b/src/test/auxiliary/cci_no_inline_lib.rs index 468ebeb198c..2adad73dd72 100644 --- a/src/test/auxiliary/cci_no_inline_lib.rs +++ b/src/test/auxiliary/cci_no_inline_lib.rs @@ -1,7 +1,7 @@ #[link(name="cci_no_inline_lib", vers="0.0")]; // same as cci_iter_lib, more-or-less, but not marked inline -fn iter(v: [uint]/~, f: fn(uint)) { +fn iter(v: ~[uint], f: fn(uint)) { let mut i = 0u; let n = vec::len(v); while i < n { diff --git a/src/test/auxiliary/issue2378a.rs b/src/test/auxiliary/issue2378a.rs index dfe0f582435..3a681ef7a3b 100644 --- a/src/test/auxiliary/issue2378a.rs +++ b/src/test/auxiliary/issue2378a.rs @@ -1,7 +1,7 @@ enum maybe<T> { just(T), nothing } impl methods<T:copy> for maybe<T> { - fn []/~(idx: uint) -> T { + fn ~[](idx: uint) -> T { alt self { just(t) { t } nothing { fail; } diff --git a/src/test/auxiliary/issue2378b.rs b/src/test/auxiliary/issue2378b.rs index e26be8c6973..322ee2ed444 100644 --- a/src/test/auxiliary/issue2378b.rs +++ b/src/test/auxiliary/issue2378b.rs @@ -6,7 +6,7 @@ import issue2378a::methods; type two_maybes<T> = {a: maybe<T>, b: maybe<T>}; impl methods<T:copy> for two_maybes<T> { - fn []/~(idx: uint) -> (T, T) { + fn ~[](idx: uint) -> (T, T) { (self.a[idx], self.b[idx]) } } \ No newline at end of file diff --git a/src/test/auxiliary/test_comm.rs b/src/test/auxiliary/test_comm.rs index 789836c9d63..50553fbdacf 100644 --- a/src/test/auxiliary/test_comm.rs +++ b/src/test/auxiliary/test_comm.rs @@ -34,7 +34,7 @@ class port_ptr<T:send> { self.po = po; } drop unsafe { #debug("in the port_ptr destructor"); - task::unkillable {|| + do task::unkillable || { let yield = 0u; let yieldp = ptr::addr_of(yield); rustrt::rust_port_begin_detach(self.po, yieldp); diff --git a/src/test/bench/core-std.rs b/src/test/bench/core-std.rs index e2cacc2e6d6..52c279716ea 100644 --- a/src/test/bench/core-std.rs +++ b/src/test/bench/core-std.rs @@ -8,7 +8,7 @@ import std::map::{map, hashmap}; import io::{reader, reader_util}; -fn main(argv: [str]/~) { +fn main(argv: ~[str]) { #macro[ [#bench[id], maybe_run_test(argv, #stringify(id), id) @@ -25,7 +25,7 @@ fn main(argv: [str]/~) { #bench[vec_push_all]; } -fn maybe_run_test(argv: [str]/&, name: str, test: fn()) { +fn maybe_run_test(argv: &[str], name: str, test: fn()) { let mut run_test = false; if os::getenv("RUST_BENCH").is_some() { run_test = true } @@ -44,7 +44,7 @@ fn maybe_run_test(argv: [str]/&, name: str, test: fn()) { fn shift_push() { let mut v1 = vec::from_elem(30000, 1); - let mut v2 = []/~; + let mut v2 = ~[]; while v1.len() > 0 { vec::push(v2, vec::shift(v1)); @@ -57,7 +57,7 @@ fn read_line() { "src/test/bench/shootout-k-nucleotide.data" ); - for int::range(0, 3) {|_i| + for int::range(0, 3) |_i| { let reader = result::get(io::file_reader(path)); while !reader.eof() { reader.read_line(); @@ -70,12 +70,12 @@ fn str_set() { let s = map::hashmap(str::hash, str::eq); - for int::range(0, 1000) {|_i| + for int::range(0, 1000) |_i| { map::set_add(s, r.gen_str(10)); } let mut found = 0; - for int::range(0, 1000) {|_i| + for int::range(0, 1000) |_i| { alt s.find(r.gen_str(10)) { some(_) { found += 1; } none { } @@ -86,7 +86,7 @@ fn str_set() { fn vec_plus() { let r = rand::rng(); - let mut v = []/~; + let mut v = ~[]; let mut i = 0; while i < 1500 { let rv = vec::from_elem(r.gen_uint_range(0, i + 1), i); @@ -103,7 +103,7 @@ fn vec_plus() { fn vec_append() { let r = rand::rng(); - let mut v = []/~; + let mut v = ~[]; let mut i = 0; while i < 1500 { let rv = vec::from_elem(r.gen_uint_range(0, i + 1), i); @@ -120,8 +120,8 @@ fn vec_append() { fn vec_push_all() { let r = rand::rng(); - let mut v = []/~; - for uint::range(0, 1500) {|i| + let mut v = ~[]; + for uint::range(0, 1500) |i| { let mut rv = vec::from_elem(r.gen_uint_range(0, i + 1), i); if r.gen_bool() { vec::push_all(v, rv); diff --git a/src/test/bench/core-uint-to-str.rs b/src/test/bench/core-uint-to-str.rs index 44211aad3f3..5dac712414d 100644 --- a/src/test/bench/core-uint-to-str.rs +++ b/src/test/bench/core-uint-to-str.rs @@ -1,15 +1,15 @@ -fn main(args: [str]/~) { +fn main(args: ~[str]) { let args = if os::getenv("RUST_BENCH").is_some() { - ["", "10000000"]/~ + ~["", "10000000"] } else if args.len() <= 1u { - ["", "100000"]/~ + ~["", "100000"] } else { args }; let n = uint::from_str(args[1]).get(); - for uint::range(0u, n) {|i| + for uint::range(0u, n) |i| { let x = uint::to_str(i, 10u); log(debug, x); } diff --git a/src/test/bench/core-vec-append.rs b/src/test/bench/core-vec-append.rs index 34d756bb1e4..7e4a327ea12 100644 --- a/src/test/bench/core-vec-append.rs +++ b/src/test/bench/core-vec-append.rs @@ -4,27 +4,27 @@ use std; import dvec::{dvec, extensions}; import io::writer_util; -fn collect_raw(num: uint) -> [uint]/~ { - let mut result = []/~; - for uint::range(0u, num) { |i| +fn collect_raw(num: uint) -> ~[uint] { + let mut result = ~[]; + for uint::range(0u, num) |i| { vec::push(result, i); } ret result; } -fn collect_dvec(num: uint) -> [mut uint]/~ { +fn collect_dvec(num: uint) -> ~[mut uint] { let result = dvec(); - for uint::range(0u, num) { |i| + for uint::range(0u, num) |i| { result.push(i); } ret dvec::unwrap(result); } -fn main(args: [str]/~) { +fn main(args: ~[str]) { let args = if os::getenv("RUST_BENCH").is_some() { - ["", "50000000"]/~ + ~["", "50000000"] } else if args.len() <= 1u { - ["", "100000"]/~ + ~["", "100000"] } else { args }; @@ -37,9 +37,9 @@ fn main(args: [str]/~) { // check each vector assert raw_v.len() == max; - for raw_v.eachi { |i, v| assert i == v; } + for raw_v.eachi |i, v| { assert i == v; } assert dvec_v.len() == max; - for dvec_v.eachi { |i, v| assert i == v; } + for dvec_v.eachi |i, v| { assert i == v; } let raw = mid - start; let dvec = end - mid; diff --git a/src/test/bench/graph500-bfs.rs b/src/test/bench/graph500-bfs.rs index 0c768d05a0e..9e24e9defcc 100644 --- a/src/test/bench/graph500-bfs.rs +++ b/src/test/bench/graph500-bfs.rs @@ -16,10 +16,10 @@ import comm::*; import int::abs; type node_id = i64; -type graph = [[node_id]/~]/~; -type bfs_result = [node_id]/~; +type graph = ~[~[node_id]]; +type bfs_result = ~[node_id]; -fn make_edges(scale: uint, edgefactor: uint) -> [(node_id, node_id)]/~ { +fn make_edges(scale: uint, edgefactor: uint) -> ~[(node_id, node_id)] { let r = rand::xorshift(); fn choose_edge(i: node_id, j: node_id, scale: uint, r: rand::rng) @@ -60,38 +60,38 @@ fn make_edges(scale: uint, edgefactor: uint) -> [(node_id, node_id)]/~ { } } - vec::from_fn((1u << scale) * edgefactor) {|_i| + do vec::from_fn((1u << scale) * edgefactor) |_i| { choose_edge(0i64, 0i64, scale, r) } } -fn make_graph(N: uint, edges: [(node_id, node_id)]/~) -> graph { - let graph = vec::from_fn(N) {|_i| - map::hashmap::<node_id, ()>({|x| x as uint }, {|x, y| x == y }) +fn make_graph(N: uint, edges: ~[(node_id, node_id)]) -> graph { + let graph = do vec::from_fn(N) |_i| { + map::hashmap::<node_id, ()>(|x| x as uint , |x, y| x == y ) }; - vec::each(edges) {|e| + do vec::each(edges) |e| { let (i, j) = e; map::set_add(graph[i], j); map::set_add(graph[j], i); true } - graph.map() {|v| + do graph.map() |v| { map::vec_from_set(v) } } -fn gen_search_keys(graph: graph, n: uint) -> [node_id]/~ { - let keys = map::hashmap::<node_id, ()>({|x| x as uint }, {|x, y| x == y }); +fn gen_search_keys(graph: graph, n: uint) -> ~[node_id] { + let keys = map::hashmap::<node_id, ()>(|x| x as uint , |x, y| x == y ); let r = rand::rng(); while keys.size() < n { let k = r.gen_uint_range(0u, graph.len()); - if graph[k].len() > 0u && vec::any(graph[k]) {|i| + if graph[k].len() > 0u && vec::any(graph[k], |i| { i != k as node_id - } { + }) { map::set_add(keys, k as node_id); } } @@ -102,7 +102,7 @@ fn gen_search_keys(graph: graph, n: uint) -> [node_id]/~ { Nodes that are unreachable have a parent of -1."] fn bfs(graph: graph, key: node_id) -> bfs_result { - let marks : [mut node_id]/~ + let marks : ~[mut node_id] = vec::to_mut(vec::from_elem(vec::len(graph), -1i64)); let Q = deque::create(); @@ -113,7 +113,7 @@ fn bfs(graph: graph, key: node_id) -> bfs_result { while Q.size() > 0u { let t = Q.pop_front(); - graph[t].each() {|k| + do graph[t].each() |k| { if marks[k] == -1i64 { marks[k] = t; Q.add_back(k); @@ -140,7 +140,7 @@ fn bfs2(graph: graph, key: node_id) -> bfs_result { black(node_id) }; - let mut colors = vec::from_fn(graph.len()) {|i| + let mut colors = do vec::from_fn(graph.len()) |i| { if i as node_id == key { gray(key) } @@ -161,7 +161,7 @@ fn bfs2(graph: graph, key: node_id) -> bfs_result { // Do the BFS. log(info, #fmt("PBFS iteration %?", i)); i += 1u; - colors = colors.mapi() {|i, c| + colors = do colors.mapi() |i, c| { let c : color = c; alt c { white { @@ -171,7 +171,7 @@ fn bfs2(graph: graph, key: node_id) -> bfs_result { let mut color = white; - neighbors.each() {|k| + do neighbors.each() |k| { if is_gray(colors[k]) { color = gray(k); false @@ -188,7 +188,7 @@ fn bfs2(graph: graph, key: node_id) -> bfs_result { } // Convert the results. - vec::map(colors) {|c| + do vec::map(colors) |c| { alt c { white { -1i64 } black(parent) { parent } @@ -209,7 +209,7 @@ fn pbfs(&&graph: arc::arc<graph>, key: node_id) -> bfs_result { black(node_id) }; - let mut colors = vec::from_fn((*arc::get(&graph)).len()) {|i| + let mut colors = do vec::from_fn((*arc::get(&graph)).len()) |i| { if i as node_id == key { gray(key) } @@ -235,7 +235,7 @@ fn pbfs(&&graph: arc::arc<graph>, key: node_id) -> bfs_result { let color = arc::arc(colors); - colors = par::mapi_factory(*arc::get(&color)) {|| + colors = do par::mapi_factory(*arc::get(&color)) || { let colors = arc::clone(&color); let graph = arc::clone(&graph); fn~(i: uint, c: color) -> color { @@ -250,7 +250,7 @@ fn pbfs(&&graph: arc::arc<graph>, key: node_id) -> bfs_result { let mut color = white; - neighbors.each() {|k| + do neighbors.each() |k| { if is_gray(colors[k]) { color = gray(k); false @@ -268,7 +268,7 @@ fn pbfs(&&graph: arc::arc<graph>, key: node_id) -> bfs_result { } // Convert the results. - par::map(colors) {|c| + do par::map(colors) |c| { alt c { white { -1i64 } black(parent) { parent } @@ -278,7 +278,7 @@ fn pbfs(&&graph: arc::arc<graph>, key: node_id) -> bfs_result { } #[doc="Performs at least some of the validation in the Graph500 spec."] -fn validate(edges: [(node_id, node_id)]/~, +fn validate(edges: ~[(node_id, node_id)], root: node_id, tree: bfs_result) -> bool { // There are 5 things to test. Below is code for each of them. @@ -291,9 +291,9 @@ fn validate(edges: [(node_id, node_id)]/~, log(info, "Verifying tree structure..."); let mut status = true; - let level = tree.map() {|parent| + let level = do tree.map() |parent| { let mut parent = parent; - let mut path = []/~; + let mut path = ~[]; if parent == -1i64 { // This node was not in the tree. @@ -322,7 +322,7 @@ fn validate(edges: [(node_id, node_id)]/~, log(info, "Verifying tree edges..."); - let status = tree.alli() {|k, parent| + let status = do tree.alli() |k, parent| { if parent != root && parent != -1i64 { level[parent] == level[k] - 1 } @@ -338,7 +338,7 @@ fn validate(edges: [(node_id, node_id)]/~, log(info, "Verifying graph edges..."); - let status = edges.all() {|e| + let status = do edges.all() |e| { let (u, v) = e; abs(level[u] - level[v]) <= 1 @@ -355,7 +355,7 @@ fn validate(edges: [(node_id, node_id)]/~, log(info, "Verifying tree and graph edges..."); - let status = par::alli(tree) {|u, v| + let status = do par::alli(tree) |u, v| { let u = u as node_id; if v == -1i64 || u == root { true @@ -371,11 +371,11 @@ fn validate(edges: [(node_id, node_id)]/~, true } -fn main(args: [str]/~) { +fn main(args: ~[str]) { let args = if os::getenv("RUST_BENCH").is_some() { - ["", "15", "48"]/~ + ~["", "15", "48"] } else if args.len() <= 1u { - ["", "10", "16"]/~ + ~["", "10", "16"] } else { args }; @@ -397,7 +397,7 @@ fn main(args: [str]/~) { let stop = time::precise_time_s(); let mut total_edges = 0u; - vec::each(graph) {|edges| total_edges += edges.len(); true }; + vec::each(graph, |edges| { total_edges += edges.len(); true }); io::stdout().write_line(#fmt("Generated graph with %? edges in %? seconds.", total_edges / 2u, @@ -408,7 +408,7 @@ fn main(args: [str]/~) { let graph_arc = arc::arc(copy graph); - gen_search_keys(graph, num_keys).map() {|root| + do gen_search_keys(graph, num_keys).map() |root| { io::stdout().write_line(""); io::stdout().write_line(#fmt("Search key: %?", root)); diff --git a/src/test/bench/msgsend-ring-new.rs b/src/test/bench/msgsend-ring-new.rs index b23bd25436b..3d72895038c 100644 --- a/src/test/bench/msgsend-ring-new.rs +++ b/src/test/bench/msgsend-ring-new.rs @@ -15,17 +15,17 @@ fn thread_ring(i: uint, num_chan: chan<uint>, num_port: port<uint>) { // Send/Receive lots of messages. - for uint::range(0u, count) {|j| + for uint::range(0u, count) |j| { num_chan.send(i * j); num_port.recv(); }; } -fn main(args: [str]/~) { +fn main(args: ~[str]) { let args = if os::getenv("RUST_BENCH").is_some() { - ["", "100", "10000"]/~ + ~["", "100", "10000"] } else if args.len() <= 1u { - ["", "100", "1000"]/~ + ~["", "100", "1000"] } else { args }; @@ -39,18 +39,18 @@ fn main(args: [str]/~) { let start = time::precise_time_s(); // create the ring - let mut futures = []/~; + let mut futures = ~[]; - for uint::range(1u, num_tasks) {|i| + for uint::range(1u, num_tasks) |i| { let get_chan = port(); let get_chan_chan = chan(get_chan); { let num_chan = num_chan.clone(); - futures += [future::spawn {|move num_chan, move get_chan_chan| + futures += ~[do future::spawn |move num_chan, move get_chan_chan| { let p = port(); get_chan_chan.send(chan(p)); thread_ring(i, msg_per_task, num_chan, p) - }]/~; + }]; } num_chan = get_chan.recv(); @@ -60,7 +60,7 @@ fn main(args: [str]/~) { thread_ring(0u, msg_per_task, num_chan, num_port); // synchronize - for futures.each {|f| f.get() }; + for futures.each |f| { f.get() }; let stop = time::precise_time_s(); diff --git a/src/test/bench/msgsend-ring.rs b/src/test/bench/msgsend-ring.rs index 754061a5955..59da4b6c9c1 100644 --- a/src/test/bench/msgsend-ring.rs +++ b/src/test/bench/msgsend-ring.rs @@ -15,17 +15,17 @@ fn thread_ring(i: uint, num_chan: comm::chan<uint>, num_port: comm::port<uint>) { // Send/Receive lots of messages. - for uint::range(0u, count) {|j| + for uint::range(0u, count) |j| { num_chan.send(i * j); num_port.recv(); }; } -fn main(args: [str]/~) { +fn main(args: ~[str]) { let args = if os::getenv("RUST_BENCH").is_some() { - ["", "100", "10000"]/~ + ~["", "100", "10000"] } else if args.len() <= 1u { - ["", "100", "1000"]/~ + ~["", "100", "1000"] } else { args }; @@ -39,17 +39,17 @@ fn main(args: [str]/~) { let start = time::precise_time_s(); // create the ring - let mut futures = []/~; + let mut futures = ~[]; - for uint::range(1u, num_tasks) {|i| + for uint::range(1u, num_tasks) |i| { let get_chan = port(); let get_chan_chan = chan(get_chan); - futures += [future::spawn {|copy num_chan, move get_chan_chan| + futures += ~[do future::spawn |copy num_chan, move get_chan_chan| { let p = port(); get_chan_chan.send(chan(p)); thread_ring(i, msg_per_task, num_chan, p) - }]/~; + }]; num_chan = get_chan.recv(); }; @@ -58,7 +58,7 @@ fn main(args: [str]/~) { thread_ring(0u, msg_per_task, num_chan, num_port); // synchronize - for futures.each {|f| f.get() }; + for futures.each |f| { f.get() }; let stop = time::precise_time_s(); diff --git a/src/test/bench/msgsend.rs b/src/test/bench/msgsend.rs index eef7b0d7257..2b47e78204f 100644 --- a/src/test/bench/msgsend.rs +++ b/src/test/bench/msgsend.rs @@ -27,27 +27,27 @@ fn server(requests: comm::port<request>, responses: comm::chan<uint>) { comm::send(responses, count); } -fn run(args: [str]/~) { +fn run(args: ~[str]) { let from_child = comm::port(); let to_parent = comm::chan(from_child); - let to_child = task::spawn_listener {|po| + let to_child = do task::spawn_listener |po| { server(po, to_parent); }; let size = option::get(uint::from_str(args[1])); let workers = option::get(uint::from_str(args[2])); let start = std::time::precise_time_s(); let to_child = to_child; - let mut worker_results = []/~; - for uint::range(0u, workers) {|_i| + let mut worker_results = ~[]; + for uint::range(0u, workers) |_i| { let builder = task::builder(); vec::push(worker_results, task::future_result(builder)); - task::run(builder) {|| - for uint::range(0u, size / workers) {|_i| + do task::run(builder) || { + for uint::range(0u, size / workers) |_i| { comm::send(to_child, bytes(100u)); } }; } - vec::iter(worker_results) {|r| future::get(r); } + vec::iter(worker_results, |r| { future::get(r); } ); comm::send(to_child, stop); let result = comm::recv(from_child); let end = std::time::precise_time_s(); @@ -58,11 +58,11 @@ fn run(args: [str]/~) { io::stdout().write_str(#fmt("Throughput=%f per sec\n", thruput)); } -fn main(args: [str]/~) { +fn main(args: ~[str]) { let args = if os::getenv("RUST_BENCH").is_some() { - ["", "1000000", "10000"]/~ + ~["", "1000000", "10000"] } else if args.len() <= 1u { - ["", "10000", "4"]/~ + ~["", "10000", "4"] } else { args }; diff --git a/src/test/bench/shootout-ackermann.rs b/src/test/bench/shootout-ackermann.rs index 7d74e453523..e1fd385957b 100644 --- a/src/test/bench/shootout-ackermann.rs +++ b/src/test/bench/shootout-ackermann.rs @@ -12,11 +12,11 @@ fn ack(m: int, n: int) -> int { } } -fn main(args: [str]/~) { +fn main(args: ~[str]) { let args = if os::getenv("RUST_BENCH").is_some() { - ["", "12"]/~ + ~["", "12"] } else if args.len() <= 1u { - ["", "8"]/~ + ~["", "8"] } else { args }; diff --git a/src/test/bench/shootout-binarytrees.rs b/src/test/bench/shootout-binarytrees.rs index bcda26ce183..82d18f36279 100644 --- a/src/test/bench/shootout-binarytrees.rs +++ b/src/test/bench/shootout-binarytrees.rs @@ -22,11 +22,11 @@ fn bottom_up_tree(arena: &a.arena::arena, item: int, depth: int) -> &a.tree { ret new(*arena) nil; } -fn main(args: [str]/~) { +fn main(args: ~[str]) { let args = if os::getenv("RUST_BENCH").is_some() { - ["", "17"]/~ + ~["", "17"] } else if args.len() <= 1u { - ["", "8"]/~ + ~["", "8"] } else { args }; diff --git a/src/test/bench/shootout-fannkuchredux.rs b/src/test/bench/shootout-fannkuchredux.rs index b4bdc8d7ba9..a537a5adc1c 100644 --- a/src/test/bench/shootout-fannkuchredux.rs +++ b/src/test/bench/shootout-fannkuchredux.rs @@ -58,11 +58,11 @@ fn fannkuch(n: int) -> int { ret flips; } -fn main(args: [str]/~) { +fn main(args: ~[str]) { let args = if os::getenv("RUST_BENCH").is_some() { - ["", "10"]/~ + ~["", "10"] } else if args.len() <= 1u { - ["", "8"]/~ + ~["", "8"] } else { args }; diff --git a/src/test/bench/shootout-fasta.rs b/src/test/bench/shootout-fasta.rs index bd903b373b4..5f7dbdf71ff 100644 --- a/src/test/bench/shootout-fasta.rs +++ b/src/test/bench/shootout-fasta.rs @@ -23,16 +23,16 @@ fn myrandom_next(r: myrandom, mx: u32) -> u32 { type aminoacids = {ch: char, prob: u32}; -fn make_cumulative(aa: [aminoacids]/~) -> [aminoacids]/~ { +fn make_cumulative(aa: ~[aminoacids]) -> ~[aminoacids] { let mut cp: u32 = 0u32; - let mut ans: [aminoacids]/~ = []/~; - for aa.each {|a| cp += a.prob; ans += [{ch: a.ch, prob: cp}]/~; } + let mut ans: ~[aminoacids] = ~[]; + for aa.each |a| { cp += a.prob; ans += ~[{ch: a.ch, prob: cp}]; } ret ans; } -fn select_random(r: u32, genelist: [aminoacids]/~) -> char { +fn select_random(r: u32, genelist: ~[aminoacids]) -> char { if r < genelist[0].prob { ret genelist[0].ch; } - fn bisect(v: [aminoacids]/~, lo: uint, hi: uint, target: u32) -> char { + fn bisect(v: ~[aminoacids], lo: uint, hi: uint, target: u32) -> char { if hi > lo + 1u { let mid: uint = lo + (hi - lo) / 2u; if target < v[mid].prob { @@ -43,11 +43,11 @@ fn select_random(r: u32, genelist: [aminoacids]/~) -> char { ret bisect(genelist, 0u, vec::len::<aminoacids>(genelist) - 1u, r); } -fn make_random_fasta(wr: io::writer, id: str, desc: str, genelist: [aminoacids]/~, n: int) { +fn make_random_fasta(wr: io::writer, id: str, desc: str, genelist: ~[aminoacids], n: int) { wr.write_line(">" + id + " " + desc); let rng = @{mut last: std::rand::rng().next()}; let mut op: str = ""; - for uint::range(0u, n as uint) {|_i| + for uint::range(0u, n as uint) |_i| { str::push_char(op, select_random(myrandom_next(rng, 100u32), genelist)); if str::len(op) >= LINE_LENGTH() { @@ -62,7 +62,7 @@ fn make_repeat_fasta(wr: io::writer, id: str, desc: str, s: str, n: int) unsafe wr.write_line(">" + id + " " + desc); let mut op: str = ""; let sl: uint = str::len(s); - for uint::range(0u, n as uint) {|i| + for uint::range(0u, n as uint) |i| { str::unsafe::push_byte(op, s[i % sl]); if str::len(op) >= LINE_LENGTH() { wr.write_line(op); @@ -74,33 +74,33 @@ fn make_repeat_fasta(wr: io::writer, id: str, desc: str, s: str, n: int) unsafe fn acid(ch: char, prob: u32) -> aminoacids { ret {ch: ch, prob: prob}; } -fn main(args: [str]/~) { +fn main(args: ~[str]) { let args = if os::getenv("RUST_BENCH").is_some() { // alioth tests k-nucleotide with this data at 25,000,000 - ["", "5000000"]/~ + ~["", "5000000"] } else if args.len() <= 1u { - ["", "1000"]/~ + ~["", "1000"] } else { args }; let writer = if os::getenv("RUST_BENCH").is_some() { - result::get(io::file_writer("./shootout-fasta.data", [io::truncate, io::create]/~)) + result::get(io::file_writer("./shootout-fasta.data", ~[io::truncate, io::create])) } else { io::stdout() }; let n = int::from_str(args[1]).get(); - let iub: [aminoacids]/~ = - make_cumulative([acid('a', 27u32), acid('c', 12u32), acid('g', 12u32), + let iub: ~[aminoacids] = + make_cumulative(~[acid('a', 27u32), acid('c', 12u32), acid('g', 12u32), acid('t', 27u32), acid('B', 2u32), acid('D', 2u32), acid('H', 2u32), acid('K', 2u32), acid('M', 2u32), acid('N', 2u32), acid('R', 2u32), acid('S', 2u32), - acid('V', 2u32), acid('W', 2u32), acid('Y', 2u32)]/~); - let homosapiens: [aminoacids]/~ = - make_cumulative([acid('a', 30u32), acid('c', 20u32), acid('g', 20u32), - acid('t', 30u32)]/~); + acid('V', 2u32), acid('W', 2u32), acid('Y', 2u32)]); + let homosapiens: ~[aminoacids] = + make_cumulative(~[acid('a', 30u32), acid('c', 20u32), acid('g', 20u32), + acid('t', 30u32)]); let alu: str = "GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGG" + "GAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGA" + diff --git a/src/test/bench/shootout-fibo.rs b/src/test/bench/shootout-fibo.rs index 79d5f6cc08a..dcdc92ad943 100644 --- a/src/test/bench/shootout-fibo.rs +++ b/src/test/bench/shootout-fibo.rs @@ -8,11 +8,11 @@ fn fib(n: int) -> int { } } -fn main(args: [str]/~) { +fn main(args: ~[str]) { let args = if os::getenv("RUST_BENCH").is_some() { - ["", "40"]/~ + ~["", "40"] } else if args.len() <= 1u { - ["", "30"]/~ + ~["", "30"] } else { args }; diff --git a/src/test/bench/shootout-k-nucleotide.rs b/src/test/bench/shootout-k-nucleotide.rs index fc9c999a533..0054aad92f6 100644 --- a/src/test/bench/shootout-k-nucleotide.rs +++ b/src/test/bench/shootout-k-nucleotide.rs @@ -10,7 +10,7 @@ import std::map::hashmap; import std::sort; // given a map, print a sorted version of it -fn sort_and_fmt(mm: hashmap<[u8]/~, uint>, total: uint) -> str { +fn sort_and_fmt(mm: hashmap<~[u8], uint>, total: uint) -> str { fn pct(xx: uint, yy: uint) -> float { ret (xx as float) * 100f / (yy as float); } @@ -28,14 +28,14 @@ fn sort_and_fmt(mm: hashmap<[u8]/~, uint>, total: uint) -> str { } // sort by key, then by value - fn sortKV<TT: copy, UU: copy>(orig: [(TT,UU)]/~) -> [(TT,UU)]/~ { + fn sortKV<TT: copy, UU: copy>(orig: ~[(TT,UU)]) -> ~[(TT,UU)] { ret sort::merge_sort(le_by_val, sort::merge_sort(le_by_key, orig)); } - let mut pairs = []/~; + let mut pairs = ~[]; // map -> [(k,%)] - mm.each(fn&(key: [u8]/~, val: uint) -> bool { + mm.each(fn&(key: ~[u8], val: uint) -> bool { vec::push(pairs, (key, pct(val, total))); ret true; }); @@ -44,7 +44,7 @@ fn sort_and_fmt(mm: hashmap<[u8]/~, uint>, total: uint) -> str { let mut buffer = ""; - pairs_sorted.each(fn&(kv: ([u8]/~, float)) -> bool unsafe { + pairs_sorted.each(fn&(kv: (~[u8], float)) -> bool unsafe { let (k,v) = kv; buffer += (#fmt["%s %0.3f\n", str::to_upper(str::unsafe::from_bytes(k)), v]); ret true; @@ -54,7 +54,7 @@ fn sort_and_fmt(mm: hashmap<[u8]/~, uint>, total: uint) -> str { } // given a map, search for the frequency of a pattern -fn find(mm: hashmap<[u8]/~, uint>, key: str) -> uint { +fn find(mm: hashmap<~[u8], uint>, key: str) -> uint { alt mm.find(str::bytes(str::to_lower(key))) { option::none { ret 0u; } option::some(num) { ret num; } @@ -62,7 +62,7 @@ fn find(mm: hashmap<[u8]/~, uint>, key: str) -> uint { } // given a map, increment the counter for a key -fn update_freq(mm: hashmap<[u8]/~, uint>, key: [u8]/&) { +fn update_freq(mm: hashmap<~[u8], uint>, key: &[u8]) { let key = vec::slice(key, 0, key.len()); alt mm.find(key) { option::none { mm.insert(key, 1u ); } @@ -70,11 +70,11 @@ fn update_freq(mm: hashmap<[u8]/~, uint>, key: [u8]/&) { } } -// given a [u8]/~, for each window call a function +// given a ~[u8], for each window call a function // i.e., for "hello" and windows of size four, // run it("hell") and it("ello"), then return "llo" -fn windows_with_carry(bb: [const u8]/~, nn: uint, - it: fn(window: [u8]/&)) -> [u8]/~ { +fn windows_with_carry(bb: ~[const u8], nn: uint, + it: fn(window: &[u8])) -> ~[u8] { let mut ii = 0u; let len = vec::len(bb); @@ -86,21 +86,21 @@ fn windows_with_carry(bb: [const u8]/~, nn: uint, ret vec::slice(bb, len - (nn - 1u), len); } -fn make_sequence_processor(sz: uint, from_parent: comm::port<[u8]/~>, +fn make_sequence_processor(sz: uint, from_parent: comm::port<~[u8]>, to_parent: comm::chan<str>) { - let freqs: hashmap<[u8]/~, uint> = map::bytes_hash(); - let mut carry: [u8]/~ = []/~; + let freqs: hashmap<~[u8], uint> = map::bytes_hash(); + let mut carry: ~[u8] = ~[]; let mut total: uint = 0u; - let mut line: [u8]/~; + let mut line: ~[u8]; loop { line = comm::recv(from_parent); - if line == []/~ { break; } + if line == ~[] { break; } - carry = windows_with_carry(carry + line, sz, { |window| + carry = windows_with_carry(carry + line, sz, |window| { update_freq(freqs, window); total += 1u; }); @@ -122,7 +122,7 @@ fn make_sequence_processor(sz: uint, from_parent: comm::port<[u8]/~>, } // given a FASTA file on stdin, process sequence THREE -fn main(args: [str]/~) { +fn main(args: ~[str]) { let rdr = if os::getenv("RUST_BENCH").is_some() { // FIXME: Using this compile-time env variable is a crummy way to // get to this massive data set, but #include_bin chokes on it (#2598) @@ -138,11 +138,11 @@ fn main(args: [str]/~) { // initialize each sequence sorter - let sizes = [1u,2u,3u,4u,6u,12u,18u]/~; - let from_child = vec::map (sizes, { |_sz| comm::port() }); - let to_parent = vec::mapi(sizes, { |ii, _sz| comm::chan(from_child[ii]) }); - let to_child = vec::mapi(sizes, fn@(ii: uint, sz: uint) -> comm::chan<[u8]/~> { - ret task::spawn_listener { |from_parent| + let sizes = ~[1u,2u,3u,4u,6u,12u,18u]; + let from_child = vec::map (sizes, |_sz| comm::port() ); + let to_parent = vec::mapi(sizes, |ii, _sz| comm::chan(from_child[ii]) ); + let to_child = vec::mapi(sizes, fn@(ii: uint, sz: uint) -> comm::chan<~[u8]> { + ret do task::spawn_listener |from_parent| { make_sequence_processor(sz, from_parent, to_parent[ii]); }; }); @@ -174,7 +174,7 @@ fn main(args: [str]/~) { (_, true) { let line_bytes = str::bytes(line); - for sizes.eachi { |ii, _sz| + for sizes.eachi |ii, _sz| { let mut lb = line_bytes; comm::send(to_child[ii], lb); } @@ -186,12 +186,12 @@ fn main(args: [str]/~) { } // finish... - for sizes.eachi { |ii, _sz| - comm::send(to_child[ii], []/~); + for sizes.eachi |ii, _sz| { + comm::send(to_child[ii], ~[]); } // now fetch and print result messages - for sizes.eachi { |ii, _sz| + for sizes.eachi |ii, _sz| { io::println(comm::recv(from_child[ii])); } } diff --git a/src/test/bench/shootout-mandelbrot.rs b/src/test/bench/shootout-mandelbrot.rs index 2d06ae1324c..d400990ff6d 100644 --- a/src/test/bench/shootout-mandelbrot.rs +++ b/src/test/bench/shootout-mandelbrot.rs @@ -17,7 +17,7 @@ import io::writer_util; import std::map::hashmap; type cmplx = {re: f64, im: f64}; -type line = {i: uint, b: [u8]/~}; +type line = {i: uint, b: ~[u8]}; impl arith for cmplx { fn *(x: cmplx) -> cmplx { @@ -65,12 +65,11 @@ fn fillbyte(x: cmplx, incr: f64) -> u8 { fn chanmb(i: uint, size: uint, ch: comm::chan<line>) -> () { - let mut crv = []/~; + let mut crv = ~[]; let incr = 2f64/(size as f64); let y = incr*(i as f64) - 1f64; let xincr = 8f64*incr; - for uint::range(0_u, size/8_u) { - |j| + for uint::range(0_u, size/8_u) |j| { let x = {re: xincr*(j as f64) - 1.5f64, im: y}; vec::push(crv, fillbyte(x, incr)); }; @@ -80,7 +79,7 @@ fn chanmb(i: uint, size: uint, ch: comm::chan<line>) -> () type devnull = {dn: int}; impl of io::writer for devnull { - fn write(_b: [const u8]/&) {} + fn write(_b: &[const u8]) {} fn seek(_i: int, _s: io::seek_style) {} fn tell() -> uint {0_u} fn flush() -> int {0} @@ -101,7 +100,7 @@ fn writer(path: str, writech: comm::chan<comm::chan<line>>, size: uint) _ { result::get( io::file_writer(path, - [io::create, io::truncate]/~)) + ~[io::create, io::truncate])) } }; cout.write_line("P4"); @@ -121,7 +120,7 @@ fn writer(path: str, writech: comm::chan<comm::chan<line>>, size: uint) #debug("WS %u", prev); // FIXME (#2280): this temporary shouldn't be // necessary, but seems to be, for borrowing. - let v : [u8]/~ = lines.get(prev); + let v : ~[u8] = lines.get(prev); cout.write(v); done += 1_u; lines.remove(prev); @@ -140,9 +139,9 @@ fn writer(path: str, writech: comm::chan<comm::chan<line>>, size: uint) } } -fn main(args: [str]/~) { +fn main(args: ~[str]) { let args = if os::getenv("RUST_BENCH").is_some() { - ["", "4000", "10"]/~ + ~["", "4000", "10"] } else { args }; @@ -158,12 +157,12 @@ fn main(args: [str]/~) { let writep = comm::port(); let writech = comm::chan(writep); - task::spawn {|| + do task::spawn || { writer(path, writech, size); }; let ch = comm::recv(writep); - for uint::range(0_u, size) {|j| - task::spawn {|| chanmb(j, size, ch);}; + for uint::range(0_u, size) |j| { + task::spawn(|| chanmb(j, size, ch) ); if j % yieldevery == 0_u { #debug("Y %u", j); task::yield(); diff --git a/src/test/bench/shootout-nbody.rs b/src/test/bench/shootout-nbody.rs index 6e5e6717861..775cbe1977f 100644 --- a/src/test/bench/shootout-nbody.rs +++ b/src/test/bench/shootout-nbody.rs @@ -13,16 +13,16 @@ native mod libc { fn sqrt(n: float) -> float; } -fn main(args: [str]/~) { +fn main(args: ~[str]) { let args = if os::getenv("RUST_BENCH").is_some() { - ["", "4000000"]/~ + ~["", "4000000"] } else if args.len() <= 1u { - ["", "100000"]/~ + ~["", "100000"] } else { args }; let n = int::from_str(args[1]).get(); - let bodies: [Body::props]/~ = NBodySystem::MakeNBodySystem(); + let bodies: ~[Body::props] = NBodySystem::MakeNBodySystem(); io::println(#fmt("%f", NBodySystem::energy(bodies))); let mut i: int = 0; while i < n { NBodySystem::advance(bodies, 0.01); i += 1; } @@ -34,11 +34,11 @@ fn main(args: [str]/~) { mod NBodySystem { - fn MakeNBodySystem() -> [Body::props]/~ { + fn MakeNBodySystem() -> ~[Body::props] { // these each return a Body::props - let bodies: [Body::props]/~ = - [Body::sun(), Body::jupiter(), Body::saturn(), Body::uranus(), - Body::neptune()]/~; + let bodies: ~[Body::props] = + ~[Body::sun(), Body::jupiter(), Body::saturn(), Body::uranus(), + Body::neptune()]; let mut px: float = 0.0; let mut py: float = 0.0; @@ -59,7 +59,7 @@ mod NBodySystem { ret bodies; } - fn advance(bodies: [Body::props]/~, dt: float) { + fn advance(bodies: ~[Body::props], dt: float) { let mut i: int = 0; while i < 5 { @@ -98,7 +98,7 @@ mod NBodySystem { b.z += dt * b.vz; } - fn energy(bodies: [Body::props]/~) -> float unsafe { + fn energy(bodies: ~[Body::props]) -> float unsafe { let mut dx: float; let mut dy: float; let mut dz: float; diff --git a/src/test/bench/shootout-pfib.rs b/src/test/bench/shootout-pfib.rs index 63578897639..8ce91865b04 100644 --- a/src/test/bench/shootout-pfib.rs +++ b/src/test/bench/shootout-pfib.rs @@ -32,22 +32,22 @@ fn fib(n: int) -> int { } else { let p = port(); let ch = chan(p); - task::spawn {|| pfib(ch, n - 1); }; - task::spawn {|| pfib(ch, n - 2); }; + task::spawn(|| pfib(ch, n - 1) ); + task::spawn(|| pfib(ch, n - 2) ); send(c, recv(p) + recv(p)); } } let p = port(); let ch = chan(p); - let t = task::spawn {|| pfib(ch, n); }; + let t = task::spawn(|| pfib(ch, n) ); ret recv(p); } type config = {stress: bool}; -fn parse_opts(argv: [str]/~) -> config { - let opts = [getopts::optflag("stress")]/~; +fn parse_opts(argv: ~[str]) -> config { + let opts = ~[getopts::optflag("stress")]; let opt_args = vec::slice(argv, 1u, vec::len(argv)); @@ -69,20 +69,20 @@ fn stress_task(&&id: int) { } fn stress(num_tasks: int) { - let mut results = []/~; - for range(0, num_tasks) {|i| + let mut results = ~[]; + for range(0, num_tasks) |i| { let builder = task::builder(); - results += [task::future_result(builder)]/~; - task::run(builder) {|| stress_task(i); } + results += ~[task::future_result(builder)]; + task::run(builder, || stress_task(i) ); } - for results.each {|r| future::get(r); } + for results.each |r| { future::get(r); } } -fn main(args: [str]/~) { +fn main(args: ~[str]) { let args = if os::getenv("RUST_BENCH").is_some() { - ["", "20"]/~ + ~["", "20"] } else if args.len() <= 1u { - ["", "8"]/~ + ~["", "8"] } else { args }; @@ -99,8 +99,8 @@ fn main(args: [str]/~) { let out = io::stdout(); - for range(1, max + 1) {|n| - for range(0, num_trials) {|i| + for range(1, max + 1) |n| { + for range(0, num_trials) |i| { let start = time::precise_time_ns(); let fibn = fib(n); let stop = time::precise_time_ns(); diff --git a/src/test/bench/shootout-spectralnorm.rs b/src/test/bench/shootout-spectralnorm.rs index 4678ce41ca8..c5a952aab8d 100644 --- a/src/test/bench/shootout-spectralnorm.rs +++ b/src/test/bench/shootout-spectralnorm.rs @@ -6,7 +6,7 @@ fn eval_A(i: uint, j: uint) -> float { 1.0/(((i+j)*(i+j+1u)/2u+i+1u) as float) } -fn eval_A_times_u(u: [const float]/~, Au: [mut float]/~) { +fn eval_A_times_u(u: ~[const float], Au: ~[mut float]) { let N = vec::len(u); let mut i = 0u; while i < N { @@ -20,7 +20,7 @@ fn eval_A_times_u(u: [const float]/~, Au: [mut float]/~) { } } -fn eval_At_times_u(u: [const float]/~, Au: [mut float]/~) { +fn eval_At_times_u(u: ~[const float], Au: ~[mut float]) { let N = vec::len(u); let mut i = 0u; while i < N { @@ -34,17 +34,17 @@ fn eval_At_times_u(u: [const float]/~, Au: [mut float]/~) { } } -fn eval_AtA_times_u(u: [const float]/~, AtAu: [mut float]/~) { +fn eval_AtA_times_u(u: ~[const float], AtAu: ~[mut float]) { let v = vec::to_mut(vec::from_elem(vec::len(u), 0.0)); eval_A_times_u(u, v); eval_At_times_u(v, AtAu); } -fn main(args: [str]/~) { +fn main(args: ~[str]) { let args = if os::getenv("RUST_BENCH").is_some() { - ["", "2000"]/~ + ~["", "2000"] } else if args.len() <= 1u { - ["", "1000"]/~ + ~["", "1000"] } else { args }; diff --git a/src/test/bench/shootout-threadring.rs b/src/test/bench/shootout-threadring.rs index 7e16160f765..be2e1fc3ad8 100644 --- a/src/test/bench/shootout-threadring.rs +++ b/src/test/bench/shootout-threadring.rs @@ -8,9 +8,9 @@ fn start(+token: int) { let p = comm::port(); let mut ch = comm::chan(p); - for int::range(2, n_threads + 1) { |i| + for int::range(2, n_threads + 1) |i| { let id = n_threads + 2 - i; - let to_child = task::spawn_listener::<int> {|p, copy ch| + let to_child = do task::spawn_listener::<int> |p, copy ch| { roundtrip(id, p, ch) }; ch = to_child; @@ -37,11 +37,11 @@ fn roundtrip(id: int, p: comm::port<int>, ch: comm::chan<int>) { } } -fn main(args: [str]/~) { +fn main(args: ~[str]) { let args = if os::getenv("RUST_BENCH").is_some() { - ["", "2000000"]/~ + ~["", "2000000"] } else if args.len() <= 1u { - ["", "1000"]/~ + ~["", "1000"] } else { args }; diff --git a/src/test/bench/std-smallintmap.rs b/src/test/bench/std-smallintmap.rs index ac25e864278..4a712e2d995 100644 --- a/src/test/bench/std-smallintmap.rs +++ b/src/test/bench/std-smallintmap.rs @@ -6,22 +6,22 @@ import std::smallintmap::{smallintmap, map}; import io::writer_util; fn append_sequential(min: uint, max: uint, map: smallintmap<uint>) { - for uint::range(min, max) { |i| + for uint::range(min, max) |i| { map.insert(i, i + 22u); } } fn check_sequential(min: uint, max: uint, map: smallintmap<uint>) { - for uint::range(min, max) { |i| + for uint::range(min, max) |i| { assert map.get(i) == i + 22u; } } -fn main(args: [str]/~) { +fn main(args: ~[str]) { let args = if os::getenv("RUST_BENCH").is_some() { - ["", "100000", "100"]/~ + ~["", "100000", "100"] } else if args.len() <= 1u { - ["", "10000", "50"]/~ + ~["", "10000", "50"] } else { args }; @@ -31,7 +31,7 @@ fn main(args: [str]/~) { let mut checkf = 0.0; let mut appendf = 0.0; - for uint::range(0u, rep) {|_r| + for uint::range(0u, rep) |_r| { let map = smallintmap::mk(); let start = std::time::precise_time_s(); append_sequential(0u, max, map); diff --git a/src/test/bench/sudoku.rs b/src/test/bench/sudoku.rs index 5b3b104a342..61055b49c5d 100644 --- a/src/test/bench/sudoku.rs +++ b/src/test/bench/sudoku.rs @@ -22,7 +22,7 @@ import io::{writer_util, reader_util}; export grid_t, read_grid, solve_grid, write_grid; // internal type of sudoku grids -type grid = [[mut u8]/~]/~; +type grid = ~[~[mut u8]]; // exported type of sudoku grids enum grid_t { grid_ctor(grid), } @@ -51,7 +51,7 @@ fn solve_grid(g: grid_t) { if start_color < 10u8 { // colors not yet used let avail = bitv::bitv(10u, false); - for u8::range(start_color, 10u8) { |color| + for u8::range(start_color, 10u8) |color| { bitv::set(avail, color as uint, true); } @@ -59,7 +59,7 @@ fn solve_grid(g: grid_t) { drop_colors(g, avail, row, col); // find first remaining color that is available - for uint::range(1u, 10u) {|i| + for uint::range(1u, 10u) |i| { if bitv::get(avail, i) { g[row][col] = i as u8; ret true; @@ -77,9 +77,9 @@ fn solve_grid(g: grid_t) { if color != 0u8 { bitv::set(colors, color as uint, false); } } - let it = {|a,b|drop_color(g, avail, a, b)}; + let it = |a,b| drop_color(g, avail, a, b); - for u8::range(0u8, 9u8) { |idx| + for u8::range(0u8, 9u8) |idx| { it(idx, col); /* check same column fields */ it(row, idx); /* check same row fields */ } @@ -87,16 +87,16 @@ fn solve_grid(g: grid_t) { // check same block fields let row0 = (row / 3u8) * 3u8; let col0 = (col / 3u8) * 3u8; - for u8::range(row0, row0 + 3u8) { |alt_row| - for u8::range(col0, col0 + 3u8) { |alt_col| it(alt_row, alt_col); } + for u8::range(row0, row0 + 3u8) |alt_row| { + for u8::range(col0, col0 + 3u8) |alt_col| { it(alt_row, alt_col); } } } - let mut work: [(u8, u8)]/~ = []/~; /* queue of uncolored fields */ - for u8::range(0u8, 9u8) { |row| - for u8::range(0u8, 9u8) { |col| + let mut work: ~[(u8, u8)] = ~[]; /* queue of uncolored fields */ + for u8::range(0u8, 9u8) |row| { + for u8::range(0u8, 9u8) |col| { let color = (*g)[row][col]; - if color == 0u8 { work += [(row, col)]/~; } + if color == 0u8 { work += ~[(row, col)]; } } } @@ -117,20 +117,20 @@ fn solve_grid(g: grid_t) { } fn write_grid(f: io::writer, g: grid_t) { - for u8::range(0u8, 9u8) { |row| + for u8::range(0u8, 9u8) |row| { f.write_str(#fmt("%u", (*g)[row][0] as uint)); - for u8::range(1u8, 9u8) { |col| + for u8::range(1u8, 9u8) |col| { f.write_str(#fmt(" %u", (*g)[row][col] as uint)); } f.write_char('\n'); } } -fn main(args: [str]/~) { +fn main(args: ~[str]) { let grid = if vec::len(args) == 1u { // FIXME create sudoku inline since nested vec consts dont work yet // (#571) - let g = vec::from_fn(10u, {|_i| + let g = vec::from_fn(10u, |_i| { vec::to_mut(vec::from_elem(10u, 0 as u8)) }); g[0][1] = 4u8; diff --git a/src/test/bench/task-perf-alloc-unwind.rs b/src/test/bench/task-perf-alloc-unwind.rs index f10e198e29f..d898acb28ab 100644 --- a/src/test/bench/task-perf-alloc-unwind.rs +++ b/src/test/bench/task-perf-alloc-unwind.rs @@ -17,9 +17,9 @@ fn main() { } fn run(repeat: int, depth: int) { - iter::repeat(repeat as uint) { || + do iter::repeat(repeat as uint) || { #debug("starting %.4f", precise_time_s()); - task::try { || + do task::try || { recurse_or_fail(depth, none) }; #debug("stopping %.4f", precise_time_s()); @@ -36,7 +36,7 @@ enum st { fn_box: fn@() -> @nillist, fn_unique: fn~() -> ~nillist, tuple: (@nillist, ~nillist), - vec: [@nillist]/~, + vec: ~[@nillist], res: r }) } @@ -62,7 +62,7 @@ fn recurse_or_fail(depth: int, st: option<st>) { fn_box: fn@() -> @nillist { @nil::<()> }, fn_unique: fn~() -> ~nillist { ~nil::<()> }, tuple: (@nil, ~nil), - vec: [@nil]/~, + vec: ~[@nil], res: r(@nil) }) } @@ -77,7 +77,7 @@ fn recurse_or_fail(depth: int, st: option<st>) { fn_unique: fn~() -> ~nillist { ~cons((), @*fn_unique()) }, tuple: (@cons((), first(st.tuple)), ~cons((), @*second(st.tuple))), - vec: st.vec + [@cons((), st.vec.last())]/~, + vec: st.vec + ~[@cons((), st.vec.last())], res: r(@cons((), st.res._l)) }) } diff --git a/src/test/bench/task-perf-one-million.rs b/src/test/bench/task-perf-one-million.rs index be33193f4f0..43c778d3737 100644 --- a/src/test/bench/task-perf-one-million.rs +++ b/src/test/bench/task-perf-one-million.rs @@ -9,16 +9,16 @@ enum msg { fn calc(children: uint, parent_ch: comm::chan<msg>) { let port = comm::port(); let chan = comm::chan(port); - let mut child_chs = []/~; + let mut child_chs = ~[]; let mut sum = 0; - iter::repeat (children) {|| - task::spawn {|| + do iter::repeat (children) || { + do task::spawn || { calc(0u, chan); }; } - iter::repeat (children) {|| + do iter::repeat (children) || { alt check comm::recv(port) { ready(child_ch) { vec::push(child_chs, child_ch); @@ -30,13 +30,13 @@ fn calc(children: uint, parent_ch: comm::chan<msg>) { alt check comm::recv(port) { start { - vec::iter (child_chs) { |child_ch| + do vec::iter (child_chs) |child_ch| { comm::send(child_ch, start); } } } - iter::repeat (children) {|| + do iter::repeat (children) || { alt check comm::recv(port) { done(child_sum) { sum += child_sum; } } @@ -45,11 +45,11 @@ fn calc(children: uint, parent_ch: comm::chan<msg>) { comm::send(parent_ch, done(sum + 1)); } -fn main(args: [str]/~) { +fn main(args: ~[str]) { let args = if os::getenv("RUST_BENCH").is_some() { - ["", "100000"]/~ + ~["", "100000"] } else if args.len() <= 1u { - ["", "100"]/~ + ~["", "100"] } else { args }; @@ -57,7 +57,7 @@ fn main(args: [str]/~) { let children = uint::from_str(args[1]).get(); let port = comm::port(); let chan = comm::chan(port); - task::spawn {|| + do task::spawn || { calc(children, chan); }; alt check comm::recv(port) { diff --git a/src/test/bench/task-perf-spawnalot.rs b/src/test/bench/task-perf-spawnalot.rs index 05260e1fd3e..7cc9446250e 100644 --- a/src/test/bench/task-perf-spawnalot.rs +++ b/src/test/bench/task-perf-spawnalot.rs @@ -1,22 +1,22 @@ fn f(&&n: uint) { let mut i = 0u; while i < n { - task::try {|| g() }; + task::try(|| g() ); i += 1u; } } fn g() { } -fn main(args: [str]/~) { +fn main(args: ~[str]) { let args = if os::getenv("RUST_BENCH").is_some() { - ["", "400"]/~ + ~["", "400"] } else if args.len() <= 1u { - ["", "10"]/~ + ~["", "10"] } else { args }; let n = uint::from_str(args[1]).get(); let mut i = 0u; - while i < n { task::spawn {|| f(n); }; i += 1u; } + while i < n { task::spawn(|| f(n) ); i += 1u; } } diff --git a/src/test/bench/task-perf-word-count-generic.rs b/src/test/bench/task-perf-word-count-generic.rs index e4528333373..fa22e77eac7 100644 --- a/src/test/bench/task-perf-word-count-generic.rs +++ b/src/test/bench/task-perf-word-count-generic.rs @@ -32,10 +32,10 @@ import comm::methods; // These used to be in task, but they disappeard. type joinable_task = port<()>; -fn spawn_joinable(f: fn~()) -> joinable_task { +fn spawn_joinable(+f: fn~()) -> joinable_task { let p = port(); let c = chan(p); - task::spawn() {|| + do task::spawn() |move f| { f(); c.send(()); } @@ -92,12 +92,12 @@ mod map_reduce { fn start_mappers<K1: copy send, K2: copy send, V: copy send>( map: mapper<K1, K2, V>, - ctrl: chan<ctrl_proto<K2, V>>, inputs: [K1]/~) - -> [joinable_task]/~ + ctrl: chan<ctrl_proto<K2, V>>, inputs: ~[K1]) + -> ~[joinable_task] { - let mut tasks = []/~; - for inputs.each {|i| - tasks += [spawn_joinable {|| map_task(map, ctrl, i)}]/~; + let mut tasks = ~[]; + for inputs.each |i| { + tasks += ~[spawn_joinable(|| map_task(map, ctrl, i) )]; } ret tasks; } @@ -170,13 +170,13 @@ mod map_reduce { ret none; } - reduce(key, {||get(p, ref_count, is_done)}); + reduce(key, || get(p, ref_count, is_done) ); } fn map_reduce<K1: copy send, K2: copy send, V: copy send>( map: mapper<K1, K2, V>, reduce: reducer<K2, V>, - inputs: [K1]/~) + inputs: ~[K1]) { let ctrl = port(); @@ -207,9 +207,9 @@ mod map_reduce { let p = port(); let ch = chan(p); let r = reduce, kk = k; - tasks += [ - spawn_joinable {|| reduce_task(r, kk, ch) } - ]/~; + tasks += ~[ + spawn_joinable(|| reduce_task(r, kk, ch) ) + ]; c = recv(p); treemap::insert(reducers, k, c); } @@ -225,11 +225,11 @@ mod map_reduce { } treemap::traverse(reducers, finish); - for tasks.each {|t| join(t); } + for tasks.each |t| { join(t); } } } -fn main(argv: [str]/~) { +fn main(argv: ~[str]) { if vec::len(argv) < 2u { let out = io::stdout(); diff --git a/src/test/bench/task-perf-word-count.rs b/src/test/bench/task-perf-word-count.rs index a1493badbf3..83c0eb53d99 100644 --- a/src/test/bench/task-perf-word-count.rs +++ b/src/test/bench/task-perf-word-count.rs @@ -58,13 +58,13 @@ mod map_reduce { enum reduce_proto { emit_val(int), done, ref, release, } - fn start_mappers(ctrl: chan<ctrl_proto>, -inputs: [str]/~) -> - [future::future<task::task_result>]/~ { - let mut results = []/~; - for inputs.each {|i| + fn start_mappers(ctrl: chan<ctrl_proto>, -inputs: ~[str]) -> + ~[future::future<task::task_result>] { + let mut results = ~[]; + for inputs.each |i| { let builder = task::builder(); - results += [task::future_result(builder)]/~; - task::run(builder) {|| map_task(ctrl, i)} + results += ~[task::future_result(builder)]; + task::run(builder, || map_task(ctrl, i)); } ret results; } @@ -91,9 +91,9 @@ mod map_reduce { send(c, emit_val(val)); } - map(input, {|a,b|emit(intermediates, ctrl, a, b)}); + map(input, |a,b| emit(intermediates, ctrl, a, b) ); - for intermediates.each_value {|v| send(v, release); } + for intermediates.each_value |v| { send(v, release); } send(ctrl, mapper_done); } @@ -125,10 +125,10 @@ mod map_reduce { ret none; } - reduce(key, {||get(p, state)}); + reduce(key, || get(p, state) ); } - fn map_reduce(-inputs: [str]/~) { + fn map_reduce(-inputs: ~[str]) { let ctrl = port::<ctrl_proto>(); // This task becomes the master control task. It task::_spawns @@ -161,8 +161,8 @@ mod map_reduce { let p = port(); let ch = chan(p); let builder = task::builder(); - results += [task::future_result(builder)]/~; - task::run(builder) {||reduce_task(k, ch)} + results += ~[task::future_result(builder)]; + task::run(builder, || reduce_task(k, ch) ); c = recv(p); reducers.insert(k, c); } @@ -172,15 +172,15 @@ mod map_reduce { } } - for reducers.each_value {|v| send(v, done); } + for reducers.each_value |v| { send(v, done); } - for results.each {|r| future::get(r); } + for results.each |r| { future::get(r); } } } -fn main(argv: [str]/~) { +fn main(argv: ~[str]) { let inputs = if vec::len(argv) < 2u { - [input1(), input2(), input3()]/~ + ~[input1(), input2(), input3()] } else { vec::map(vec::slice(argv, 1u, vec::len(argv)), {|f| result::get(io::read_whole_file_str(f)) }) diff --git a/src/test/compile-fail/ambig_impl_unify.rs b/src/test/compile-fail/ambig_impl_unify.rs index 4a0d4f259dd..bb2009fc931 100644 --- a/src/test/compile-fail/ambig_impl_unify.rs +++ b/src/test/compile-fail/ambig_impl_unify.rs @@ -1,12 +1,12 @@ -impl methods for [uint]/~ { +impl methods for ~[uint] { fn foo() -> int {1} //~ NOTE candidate #1 is `methods::foo` } -impl methods for [int]/~ { +impl methods for ~[int] { fn foo() -> int {2} //~ NOTE candidate #2 is `methods::foo` } fn main() { - let x = []/~; + let x = ~[]; x.foo(); //~ ERROR multiple applicable methods in scope } \ No newline at end of file diff --git a/src/test/compile-fail/assign-super.rs b/src/test/compile-fail/assign-super.rs index a7cb5b38970..fab93a259c2 100644 --- a/src/test/compile-fail/assign-super.rs +++ b/src/test/compile-fail/assign-super.rs @@ -1,5 +1,5 @@ fn main() { - let mut x: [mut int]/~ = [mut 3]/~; - let y: [int]/~ = [3]/~; + let mut x: ~[mut int] = ~[mut 3]; + let y: ~[int] = ~[3]; x = y; //~ ERROR values differ in mutability } \ No newline at end of file diff --git a/src/test/compile-fail/bad-expr-path.rs b/src/test/compile-fail/bad-expr-path.rs index 34690d29a3c..62fc81649ff 100644 --- a/src/test/compile-fail/bad-expr-path.rs +++ b/src/test/compile-fail/bad-expr-path.rs @@ -2,4 +2,4 @@ mod m1 { } -fn main(args: [str]/~) { log(debug, m1::a); } +fn main(args: ~[str]) { log(debug, m1::a); } diff --git a/src/test/compile-fail/bad-expr-path2.rs b/src/test/compile-fail/bad-expr-path2.rs index 14bcfdf8352..fe2501b29f7 100644 --- a/src/test/compile-fail/bad-expr-path2.rs +++ b/src/test/compile-fail/bad-expr-path2.rs @@ -4,4 +4,4 @@ mod m1 { mod a { } } -fn main(args: [str]/~) { log(debug, m1::a); } +fn main(args: ~[str]) { log(debug, m1::a); } diff --git a/src/test/compile-fail/bad-for-loop.rs b/src/test/compile-fail/bad-for-loop.rs index 5808fcbb161..65a7ee4211c 100644 --- a/src/test/compile-fail/bad-for-loop.rs +++ b/src/test/compile-fail/bad-for-loop.rs @@ -1,4 +1,4 @@ fn main() { fn baz(_x: fn() -> int) {} - for baz {|_e| } //~ ERROR should return `bool` + for baz |_e| { } //~ ERROR should return `bool` } diff --git a/src/test/compile-fail/bad-main.rs b/src/test/compile-fail/bad-main.rs index 775f1137e1b..e616a05eabe 100644 --- a/src/test/compile-fail/bad-main.rs +++ b/src/test/compile-fail/bad-main.rs @@ -1,3 +1,3 @@ -// error-pattern:expecting `native fn([str]/~) +// error-pattern:expecting `native fn(~[str]) fn main(x: int) { } diff --git a/src/test/compile-fail/bad-module.rs b/src/test/compile-fail/bad-module.rs index d96ea37cc78..cda91b7f904 100644 --- a/src/test/compile-fail/bad-module.rs +++ b/src/test/compile-fail/bad-module.rs @@ -1,4 +1,4 @@ // error-pattern: unresolved import import thing; -fn main() { let foo = thing::len([]/~); } +fn main() { let foo = thing::len(~[]); } diff --git a/src/test/compile-fail/bad-var-env-capture-in-block-arg.rs b/src/test/compile-fail/bad-var-env-capture-in-block-arg.rs index 21c9d7203ed..898c164e9dc 100644 --- a/src/test/compile-fail/bad-var-env-capture-in-block-arg.rs +++ b/src/test/compile-fail/bad-var-env-capture-in-block-arg.rs @@ -1,7 +1,7 @@ fn main() { let x = 3; fn blah(_a: native fn()) {} - blah({|| + blah(|| { log(debug, x); //~ ERROR attempted dynamic environment capture }); } \ No newline at end of file diff --git a/src/test/compile-fail/block-arg-as-stmt-with-value.rs b/src/test/compile-fail/block-arg-as-stmt-with-value.rs index e89a10a1246..b8e34aefd6f 100644 --- a/src/test/compile-fail/block-arg-as-stmt-with-value.rs +++ b/src/test/compile-fail/block-arg-as-stmt-with-value.rs @@ -1,8 +1,8 @@ fn compute1() -> float { - let v = [0f, 1f, 2f, 3f]/~; + let v = ~[0f, 1f, 2f, 3f]; - vec::foldl(0f, v) { |x, y| x + y } - 10f + do vec::foldl(0f, v) |x, y| { x + y } - 10f //~^ ERROR mismatched types: expected `()` } diff --git a/src/test/compile-fail/block-coerce-no.rs b/src/test/compile-fail/block-coerce-no.rs index 45298ccaf8e..e789775fe7d 100644 --- a/src/test/compile-fail/block-coerce-no.rs +++ b/src/test/compile-fail/block-coerce-no.rs @@ -11,6 +11,6 @@ fn coerce(b: fn()) -> native fn() { fn main() { let i = 8; - let f = coerce({|| log(error, i); }); + let f = coerce(|| log(error, i) ); f(); } diff --git a/src/test/compile-fail/block-deinitializes-upvar.rs b/src/test/compile-fail/block-deinitializes-upvar.rs index c4ea3483196..679e3c4797b 100644 --- a/src/test/compile-fail/block-deinitializes-upvar.rs +++ b/src/test/compile-fail/block-deinitializes-upvar.rs @@ -4,5 +4,5 @@ fn main() { let mut x = @{x: 17, y: 2}; let y = @{x: 5, y: 5}; - force({|| x <- y;}); + force(|| x <- y ); } diff --git a/src/test/compile-fail/block-must-not-have-result-for.rs b/src/test/compile-fail/block-must-not-have-result-for.rs index e069a356aae..41a2182ba2b 100644 --- a/src/test/compile-fail/block-must-not-have-result-for.rs +++ b/src/test/compile-fail/block-must-not-have-result-for.rs @@ -1,7 +1,7 @@ // error-pattern:mismatched types: expected `()` but found `bool` fn main() { - for vec::each([0]/~) {|_i| + for vec::each(~[0]) |_i| { true } } \ No newline at end of file diff --git a/src/test/compile-fail/borrowck-assign-comp-idx.rs b/src/test/compile-fail/borrowck-assign-comp-idx.rs index accd228dc03..1ec0b923431 100644 --- a/src/test/compile-fail/borrowck-assign-comp-idx.rs +++ b/src/test/compile-fail/borrowck-assign-comp-idx.rs @@ -1,7 +1,7 @@ type point = { x: int, y: int }; fn a() { - let mut p = [mut 1]/~; + let mut p = ~[mut 1]; // Create an immutable pointer into p's contents: let _q: &int = &p[0]; //~ NOTE loan of mutable vec content granted here @@ -9,15 +9,15 @@ fn a() { p[0] = 5; //~ ERROR assigning to mutable vec content prohibited due to outstanding loan } -fn borrow(_x: [int]/&, _f: fn()) {} +fn borrow(_x: &[int], _f: fn()) {} fn b() { // here we alias the mutable vector into an imm slice and try to // modify the original: - let mut p = [mut 1]/~; + let mut p = ~[mut 1]; - borrow(p) {|| //~ NOTE loan of mutable vec content granted here + do borrow(p) || { //~ NOTE loan of mutable vec content granted here p[0] = 5; //~ ERROR assigning to mutable vec content prohibited due to outstanding loan } } @@ -25,8 +25,8 @@ fn b() { fn c() { // Legal because the scope of the borrow does not include the // modification: - let mut p = [mut 1]/~; - borrow(p, {||}); + let mut p = ~[mut 1]; + borrow(p, ||{}); p[0] = 5; } diff --git a/src/test/compile-fail/borrowck-binding-mutbl.rs b/src/test/compile-fail/borrowck-binding-mutbl.rs index d3de94942ed..8a188fe9239 100644 --- a/src/test/compile-fail/borrowck-binding-mutbl.rs +++ b/src/test/compile-fail/borrowck-binding-mutbl.rs @@ -1,8 +1,8 @@ -fn impure(_v: [int]/~) { +fn impure(_v: ~[int]) { } fn main() { - let x = {mut f: [3]/~}; + let x = {mut f: ~[3]}; alt x { {f: v} => { diff --git a/src/test/compile-fail/borrowck-lend-flow.rs b/src/test/compile-fail/borrowck-lend-flow.rs index 26618d3436e..e867d525e1a 100644 --- a/src/test/compile-fail/borrowck-lend-flow.rs +++ b/src/test/compile-fail/borrowck-lend-flow.rs @@ -62,7 +62,7 @@ fn while_aliased_mut_cond(cond: bool, cond2: bool) { fn loop_in_block() { let mut v = ~3, w = ~4; let mut _x = &mut w; - for uint::range(0u, 10u) {|_i| + for uint::range(0u, 10u) |_i| { borrow(v); //~ ERROR loan of mutable variable declared in an outer block as immutable conflicts with prior loan _x = &mut v; //~ NOTE prior loan as mutable granted here } @@ -76,7 +76,7 @@ fn at_most_once_block() { let mut v = ~3, w = ~4; let mut _x = &mut w; - at_most_once {|| + do at_most_once || { borrow(v); //~ ERROR loan of mutable variable declared in an outer block as immutable conflicts with prior loan _x = &mut v; //~ NOTE prior loan as mutable granted here } diff --git a/src/test/compile-fail/borrowck-loan-blocks-move-cc.rs b/src/test/compile-fail/borrowck-loan-blocks-move-cc.rs index 78a4de4fc19..0f5d61b510c 100644 --- a/src/test/compile-fail/borrowck-loan-blocks-move-cc.rs +++ b/src/test/compile-fail/borrowck-loan-blocks-move-cc.rs @@ -5,7 +5,7 @@ fn borrow(v: &int, f: fn(x: &int)) { fn box_imm() { let mut v = ~3; let _w = &mut v; //~ NOTE loan of mutable local variable granted here - task::spawn { |move v| + do task::spawn |move v| { //~^ ERROR moving out of mutable local variable prohibited due to outstanding loan #debug["v=%d", *v]; } diff --git a/src/test/compile-fail/borrowck-loan-blocks-mut-uniq.rs b/src/test/compile-fail/borrowck-loan-blocks-mut-uniq.rs index ffed2371334..ad9eb9c4874 100644 --- a/src/test/compile-fail/borrowck-loan-blocks-mut-uniq.rs +++ b/src/test/compile-fail/borrowck-loan-blocks-mut-uniq.rs @@ -4,7 +4,7 @@ fn borrow(v: &int, f: fn(x: &int)) { fn box_imm() { let mut v = ~3; - borrow(v) { |w| //~ NOTE loan of mutable local variable granted here + do borrow(v) |w| { //~ NOTE loan of mutable local variable granted here v = ~4; //~ ERROR assigning to mutable variable declared in an outer block prohibited due to outstanding loan assert *v == 3; assert *w == 4; diff --git a/src/test/compile-fail/borrowck-loan-rcvr.rs b/src/test/compile-fail/borrowck-loan-rcvr.rs index b749996585b..df93fd44c6e 100644 --- a/src/test/compile-fail/borrowck-loan-rcvr.rs +++ b/src/test/compile-fail/borrowck-loan-rcvr.rs @@ -19,7 +19,7 @@ fn a() { p.impurem(); // But in this case we do not honor the loan: - p.blockm {|| //~ NOTE loan of mutable local variable granted here + do p.blockm || { //~ NOTE loan of mutable local variable granted here p.x = 10; //~ ERROR assigning to mutable field prohibited due to outstanding loan } } diff --git a/src/test/compile-fail/borrowck-loan-vec-content.rs b/src/test/compile-fail/borrowck-loan-vec-content.rs index 5542f0f4268..07f197afa74 100644 --- a/src/test/compile-fail/borrowck-loan-vec-content.rs +++ b/src/test/compile-fail/borrowck-loan-vec-content.rs @@ -7,14 +7,14 @@ fn takes_imm_elt(_v: &int, f: fn()) { } fn has_mut_vec_and_does_not_try_to_change_it() { - let v = [mut 1, 2, 3]/~; - takes_imm_elt(&v[0]) {|| + let v = ~[mut 1, 2, 3]; + do takes_imm_elt(&v[0]) || { } } fn has_mut_vec_but_tries_to_change_it() { - let v = [mut 1, 2, 3]/~; - takes_imm_elt(&v[0]) {|| //~ NOTE loan of mutable vec content granted here + let v = ~[mut 1, 2, 3]; + do takes_imm_elt(&v[0]) || { //~ NOTE loan of mutable vec content granted here v[1] = 4; //~ ERROR assigning to mutable vec content prohibited due to outstanding loan } } @@ -24,8 +24,8 @@ fn takes_const_elt(_v: &const int, f: fn()) { } fn has_mut_vec_and_tries_to_change_it() { - let v = [mut 1, 2, 3]/~; - takes_const_elt(&const v[0]) {|| + let v = ~[mut 1, 2, 3]; + do takes_const_elt(&const v[0]) || { v[1] = 4; } } diff --git a/src/test/compile-fail/borrowck-mut-vec-as-imm-slice-bad.rs b/src/test/compile-fail/borrowck-mut-vec-as-imm-slice-bad.rs index 4df662e90c5..ad9877c2674 100644 --- a/src/test/compile-fail/borrowck-mut-vec-as-imm-slice-bad.rs +++ b/src/test/compile-fail/borrowck-mut-vec-as-imm-slice-bad.rs @@ -1,14 +1,14 @@ -fn want_slice(v: [int]/&) -> int { +fn want_slice(v: &[int]) -> int { let mut sum = 0; - for vec::each(v) { |i| sum += i; } + for vec::each(v) |i| { sum += i; } ret sum; } -fn has_mut_vec(+v: @[mut int]/~) -> int { +fn has_mut_vec(+v: @~[mut int]) -> int { want_slice(*v) //~ ERROR illegal borrow unless pure: creating immutable alias to aliasable, mutable memory //~^ NOTE impure due to access to impure function } fn main() { - assert has_mut_vec(@[mut 1, 2, 3]/~) == 6; + assert has_mut_vec(@~[mut 1, 2, 3]) == 6; } \ No newline at end of file diff --git a/src/test/compile-fail/cap-clause-with-stack-closure.rs b/src/test/compile-fail/cap-clause-with-stack-closure.rs index c7192852c37..fbfbfb198cc 100644 --- a/src/test/compile-fail/cap-clause-with-stack-closure.rs +++ b/src/test/compile-fail/cap-clause-with-stack-closure.rs @@ -3,12 +3,12 @@ fn bar(_f: @int) {} fn main() { let x = @3; - foo {|| bar(x); } + foo(|| bar(x) ); let x = @3; - foo {|copy x| bar(x); } //~ ERROR cannot capture values explicitly with a block closure + foo(|copy x| bar(x) ); //~ ERROR cannot capture values explicitly with a block closure let x = @3; - foo {|move x| bar(x); } //~ ERROR cannot capture values explicitly with a block closure + foo(|move x| bar(x) ); //~ ERROR cannot capture values explicitly with a block closure } diff --git a/src/test/compile-fail/do-lambda-requires-braces.rs b/src/test/compile-fail/do-lambda-requires-braces.rs new file mode 100644 index 00000000000..c0fc3766006 --- /dev/null +++ b/src/test/compile-fail/do-lambda-requires-braces.rs @@ -0,0 +1,5 @@ +fn main() { + do something + |x| do somethingelse //~ ERROR: expecting '{' but found 'do' + |y| say(x, y) +} diff --git a/src/test/compile-fail/do1.rs b/src/test/compile-fail/do1.rs index 2034bfcbc58..a6a5bdd041f 100644 --- a/src/test/compile-fail/do1.rs +++ b/src/test/compile-fail/do1.rs @@ -1,3 +1,3 @@ fn main() { - let x = do y; //~ ERROR: `do` must be followed by a block call + let x = do y; //~ ERROR: expecting '{' but found } diff --git a/src/test/compile-fail/do2.rs b/src/test/compile-fail/do2.rs index 9108c03747f..919d186c40d 100644 --- a/src/test/compile-fail/do2.rs +++ b/src/test/compile-fail/do2.rs @@ -1,5 +1,5 @@ fn f(f: fn@(int) -> bool) -> bool { f(10i) } fn main() { - assert do f() { |i| i == 10i } == 10i; //~ ERROR: expected `bool` but found `int` + assert do f() |i| { i == 10i } == 10i; //~ ERROR: expected `bool` but found `int` } diff --git a/src/test/compile-fail/empty-vec-trailing-comma.rs b/src/test/compile-fail/empty-vec-trailing-comma.rs index 5cc08681e99..2abd554ed6c 100644 --- a/src/test/compile-fail/empty-vec-trailing-comma.rs +++ b/src/test/compile-fail/empty-vec-trailing-comma.rs @@ -1,3 +1,3 @@ fn main() { - let v = [,]/~; //~ ERROR unexpected token: ',' + let v = ~[,]; //~ ERROR unexpected token: ',' } diff --git a/src/test/compile-fail/evec-subtyping.rs b/src/test/compile-fail/evec-subtyping.rs index 2688213740e..2eabfa0c48c 100644 --- a/src/test/compile-fail/evec-subtyping.rs +++ b/src/test/compile-fail/evec-subtyping.rs @@ -1,14 +1,14 @@ -fn wants_box(x: [uint]/@) { } -fn wants_uniq(x: [uint]/~) { } +fn wants_box(x: @[uint]) { } +fn wants_uniq(x: ~[uint]) { } fn wants_three(x: [uint]/3) { } -fn has_box(x: [uint]/@) { +fn has_box(x: @[uint]) { wants_box(x); wants_uniq(x); //~ ERROR [] storage differs: expected ~ but found @ wants_three(x); //~ ERROR [] storage differs: expected 3 but found @ } -fn has_uniq(x: [uint]/~) { +fn has_uniq(x: ~[uint]) { wants_box(x); //~ ERROR [] storage differs: expected @ but found ~ wants_uniq(x); wants_three(x); //~ ERROR [] storage differs: expected 3 but found ~ diff --git a/src/test/compile-fail/fail-type-err.rs b/src/test/compile-fail/fail-type-err.rs index 2bf07c0bf91..f0582e4c9bb 100644 --- a/src/test/compile-fail/fail-type-err.rs +++ b/src/test/compile-fail/fail-type-err.rs @@ -1,2 +1,2 @@ // error-pattern:expected `str` but found `[int]/~` -fn main() { fail [0i]/~; } +fn main() { fail ~[0i]; } diff --git a/src/test/compile-fail/for-loop-decl.rs b/src/test/compile-fail/for-loop-decl.rs index e166a2e7f0f..00f9433eef0 100644 --- a/src/test/compile-fail/for-loop-decl.rs +++ b/src/test/compile-fail/for-loop-decl.rs @@ -10,7 +10,7 @@ fn bitv_to_str(enclosing: fn_info, v: bitv::bitv) -> str { let s = ""; // error is that the value type in the hash map is var_info, not a box - for enclosing.vars.each_value {|val| + for enclosing.vars.each_value |val| { if bitv::get(v, val) { s += "foo"; } } ret s; diff --git a/src/test/compile-fail/import.rs b/src/test/compile-fail/import.rs index 992f4e66eb8..eb47db0725a 100644 --- a/src/test/compile-fail/import.rs +++ b/src/test/compile-fail/import.rs @@ -4,4 +4,4 @@ import zed::baz; mod zed { fn bar() { #debug("bar"); } } -fn main(args: [str]/~) { bar(); } +fn main(args: ~[str]) { bar(); } diff --git a/src/test/compile-fail/import2.rs b/src/test/compile-fail/import2.rs index ea0c2dcd9b0..6d503a62a47 100644 --- a/src/test/compile-fail/import2.rs +++ b/src/test/compile-fail/import2.rs @@ -4,4 +4,4 @@ mod baz { } mod zed { fn bar() { #debug("bar3"); } } -fn main(args: [str]/~) { bar(); } +fn main(args: ~[str]) { bar(); } diff --git a/src/test/compile-fail/import3.rs b/src/test/compile-fail/import3.rs index 9bb941c6daf..939c38fed6a 100644 --- a/src/test/compile-fail/import3.rs +++ b/src/test/compile-fail/import3.rs @@ -1,4 +1,4 @@ // error-pattern: unresolved modulename import main::bar; -fn main(args: [str]/~) { #debug("foo"); } +fn main(args: ~[str]) { #debug("foo"); } diff --git a/src/test/compile-fail/import4.rs b/src/test/compile-fail/import4.rs index ab9a9e2e544..d4688a4fafd 100644 --- a/src/test/compile-fail/import4.rs +++ b/src/test/compile-fail/import4.rs @@ -3,4 +3,4 @@ mod a { import foo = b::foo; export foo; } mod b { import foo = a::foo; export foo; } -fn main(args: [str]/~) { #debug("loop"); } +fn main(args: ~[str]) { #debug("loop"); } diff --git a/src/test/compile-fail/infinite-vec-type-recursion.rs b/src/test/compile-fail/infinite-vec-type-recursion.rs index 35d01d0b624..3c4dfbecfb9 100644 --- a/src/test/compile-fail/infinite-vec-type-recursion.rs +++ b/src/test/compile-fail/infinite-vec-type-recursion.rs @@ -1,6 +1,6 @@ // -*- rust -*- // error-pattern: illegal recursive type -type x = [x]/~; +type x = ~[x]; -fn main() { let b: x = []/~; } +fn main() { let b: x = ~[]; } diff --git a/src/test/compile-fail/issue-1655.rs b/src/test/compile-fail/issue-1655.rs index 32785c23167..bfaca3155f8 100644 --- a/src/test/compile-fail/issue-1655.rs +++ b/src/test/compile-fail/issue-1655.rs @@ -1,11 +1,11 @@ // error-pattern:expected item mod blade_runner { - #[doc( + #~[doc( brief = "Blade Runner is probably the best movie ever", desc = "I like that in the world of Blade Runner it is always raining, and that it's always night time. And Aliens was also a really good movie. Alien 3 was crap though." - )]/~ + )] } diff --git a/src/test/compile-fail/issue-1896.rs b/src/test/compile-fail/issue-1896.rs index 765e014f7d8..f01d5b23f84 100644 --- a/src/test/compile-fail/issue-1896.rs +++ b/src/test/compile-fail/issue-1896.rs @@ -3,6 +3,6 @@ type t<T> = { f: fn() -> T }; fn f<T>(_x: t<T>) {} fn main() { - let x: t<()> = { f: { || () } }; //~ ERROR expressions with stack closure + let x: t<()> = { f: || () }; //~ ERROR expressions with stack closure f(x); } diff --git a/src/test/compile-fail/issue-1965.rs b/src/test/compile-fail/issue-1965.rs index 0774c0d21c2..b5e68b51ed4 100644 --- a/src/test/compile-fail/issue-1965.rs +++ b/src/test/compile-fail/issue-1965.rs @@ -3,5 +3,5 @@ fn test(-x: uint) {} fn main() { let i = 3u; - for uint::range(0u, 10u) {|_x| test(i)} + for uint::range(0u, 10u) |_x| {test(i)} } diff --git a/src/test/compile-fail/issue-2149.rs b/src/test/compile-fail/issue-2149.rs index c8b36e0da18..df2138fc15a 100644 --- a/src/test/compile-fail/issue-2149.rs +++ b/src/test/compile-fail/issue-2149.rs @@ -1,11 +1,11 @@ -impl monad<A> for [A]/~ { - fn bind<B>(f: fn(A) -> [B]/~) { +impl monad<A> for ~[A] { + fn bind<B>(f: fn(A) -> ~[B]) { let mut r = fail; - for self.each {|elt| r += f(elt); } + for self.each |elt| { r += f(elt); } //~^ WARNING unreachable expression //~^^ ERROR the type of this value must be known } } fn main() { - ["hi"].bind {|x| [x] }; + ["hi"].bind({|x| [x] }); } \ No newline at end of file diff --git a/src/test/compile-fail/issue-2150.rs b/src/test/compile-fail/issue-2150.rs index c523de89e84..79f0598baac 100644 --- a/src/test/compile-fail/issue-2150.rs +++ b/src/test/compile-fail/issue-2150.rs @@ -1,6 +1,6 @@ -fn fail_len(v: [const int]/~) -> uint { +fn fail_len(v: ~[const int]) -> uint { let mut i = fail; - for v.each {|x| i += 1u; } + for v.each |x| { i += 1u; } //~^ WARNING unreachable statement //~^^ ERROR the type of this value must be known ret i; diff --git a/src/test/compile-fail/issue-2151.rs b/src/test/compile-fail/issue-2151.rs index 06ee782e779..f97f57639b1 100644 --- a/src/test/compile-fail/issue-2151.rs +++ b/src/test/compile-fail/issue-2151.rs @@ -1,5 +1,5 @@ fn main() { - vec::iter(fail) {|i| + do vec::iter(fail) |i| { log (debug, i * 2); //~^ ERROR the type of this value must be known }; diff --git a/src/test/compile-fail/issue-2487-b.rs b/src/test/compile-fail/issue-2487-b.rs index 9868bef2ec3..f36c52ce354 100644 --- a/src/test/compile-fail/issue-2487-b.rs +++ b/src/test/compile-fail/issue-2487-b.rs @@ -6,7 +6,7 @@ class socket { drop { } fn set_identity() { - closure { || + do closure || { setsockopt_bytes(self.sock) //~ ERROR copying a noncopyable value } } diff --git a/src/test/compile-fail/issue-2509-a.rs b/src/test/compile-fail/issue-2509-a.rs index 7039bec64d1..e1c6002af12 100644 --- a/src/test/compile-fail/issue-2509-a.rs +++ b/src/test/compile-fail/issue-2509-a.rs @@ -4,6 +4,6 @@ class c { //~ ERROR a class must have at least one field fn main() { let a = c(); - let x = [a]/~; + let x = ~[a]; let _y = x[0]; } diff --git a/src/test/compile-fail/kindck-implicit-close-over-mut-var.rs b/src/test/compile-fail/kindck-implicit-close-over-mut-var.rs index f1da5ef1328..07e0a6aecdd 100644 --- a/src/test/compile-fail/kindck-implicit-close-over-mut-var.rs +++ b/src/test/compile-fail/kindck-implicit-close-over-mut-var.rs @@ -3,7 +3,7 @@ fn use(_i: int) {} fn foo() { // Here, i is *moved* into the closure: Not actually OK let mut i = 0; - task::spawn {|| + do task::spawn || { use(i); //~ ERROR mutable variables cannot be implicitly captured } } @@ -13,7 +13,7 @@ fn bar() { // is mutable: bad let mut i = 0; while i < 10 { - task::spawn {|| + do task::spawn || { use(i); //~ ERROR mutable variables cannot be implicitly captured } i += 1; @@ -24,7 +24,7 @@ fn car() { // Here, i is mutable, but *explicitly* copied: let mut i = 0; while i < 10 { - task::spawn {|copy i| + do task::spawn |copy i| { use(i); } i += 1; diff --git a/src/test/compile-fail/lambda-mutate-nested.rs b/src/test/compile-fail/lambda-mutate-nested.rs index a989c543b54..944282a4deb 100644 --- a/src/test/compile-fail/lambda-mutate-nested.rs +++ b/src/test/compile-fail/lambda-mutate-nested.rs @@ -5,7 +5,7 @@ fn f2(x: fn()) { x(); } fn main() { let i = 0; - let ctr = fn@ () -> int { f2({|| i = i + 1; }); ret i; }; + let ctr = fn@ () -> int { f2(|| i = i + 1 ); ret i; }; log(error, ctr()); log(error, ctr()); log(error, ctr()); diff --git a/src/test/compile-fail/liveness-closure-require-ret.rs b/src/test/compile-fail/liveness-closure-require-ret.rs index 6a05ccf0605..105ffa11703 100644 --- a/src/test/compile-fail/liveness-closure-require-ret.rs +++ b/src/test/compile-fail/liveness-closure-require-ret.rs @@ -1,3 +1,4 @@ +// xfail-test After the closure syntax change this started failing with the wrong error message // error-pattern: not all control paths return fn force(f: fn() -> int) -> int { f() } -fn main() { log(error, force({|| })); } +fn main() { log(error, force(|| {})); } diff --git a/src/test/compile-fail/liveness-issue-2163.rs b/src/test/compile-fail/liveness-issue-2163.rs index 0165cd8e86f..a0c52f1b4a9 100644 --- a/src/test/compile-fail/liveness-issue-2163.rs +++ b/src/test/compile-fail/liveness-issue-2163.rs @@ -1,5 +1,6 @@ -fn main(_s: [str]/~) { - let a: [int]/~ = []/~; - vec::each(a) { |_x| //~ ERROR not all control paths return a value +// xfail-test After the closure syntax change this started failing with the wrong error message +fn main(_s: ~[str]) { + let a: ~[int] = ~[]; + do vec::each(a) |_x| { //~ ERROR not all control paths return a value } } diff --git a/src/test/compile-fail/liveness-use-in-index-lvalue.rs b/src/test/compile-fail/liveness-use-in-index-lvalue.rs index de0706ecb6c..8aee7acf819 100644 --- a/src/test/compile-fail/liveness-use-in-index-lvalue.rs +++ b/src/test/compile-fail/liveness-use-in-index-lvalue.rs @@ -1,5 +1,5 @@ fn test() { - let w: [int]/~; + let w: ~[int]; w[5] = 0; //~ ERROR use of possibly uninitialized variable: `w` } diff --git a/src/test/compile-fail/lub-in-args.rs b/src/test/compile-fail/lub-in-args.rs index dd44a137161..c94739d6011 100644 --- a/src/test/compile-fail/lub-in-args.rs +++ b/src/test/compile-fail/lub-in-args.rs @@ -1,8 +1,8 @@ fn two_args<T>(x: T, y: T) { } fn main() { - let x: [mut int]/~ = [mut 3]/~; - let y: [int]/~ = [3]/~; + let x: ~[mut int] = ~[mut 3]; + let y: ~[int] = ~[3]; let a: @mut int = @mut 3; let b: @int = @3; diff --git a/src/test/compile-fail/mutable-huh-variance-box.rs b/src/test/compile-fail/mutable-huh-variance-box.rs index 7fbb8bca0f7..637f9226957 100644 --- a/src/test/compile-fail/mutable-huh-variance-box.rs +++ b/src/test/compile-fail/mutable-huh-variance-box.rs @@ -1,10 +1,10 @@ // error-pattern: mismatched types fn main() { - let v = @mut [0]/~; + let v = @mut ~[0]; - fn f(&&v: @mut [const int]/~) { - *v = [mut 3]/~ + fn f(&&v: @mut ~[const int]) { + *v = ~[mut 3] } f(v); diff --git a/src/test/compile-fail/mutable-huh-variance-deep.rs b/src/test/compile-fail/mutable-huh-variance-deep.rs index 1d7de6b078f..b42d6ec0c59 100644 --- a/src/test/compile-fail/mutable-huh-variance-deep.rs +++ b/src/test/compile-fail/mutable-huh-variance-deep.rs @@ -1,9 +1,9 @@ // error-pattern: mismatched types fn main() { - let v = [mut @mut ~mut [0]/~]/~; + let v = ~[mut @mut ~mut ~[0]]; - fn f(&&v: [mut @mut ~mut [const int]/~]/~) { + fn f(&&v: ~[mut @mut ~mut ~[const int]]) { } f(v); diff --git a/src/test/compile-fail/mutable-huh-variance-ptr.rs b/src/test/compile-fail/mutable-huh-variance-ptr.rs index 2b807f6cc24..f6350571289 100644 --- a/src/test/compile-fail/mutable-huh-variance-ptr.rs +++ b/src/test/compile-fail/mutable-huh-variance-ptr.rs @@ -3,12 +3,12 @@ use std; fn main() { - let a = [0]/~; - let v: *mut [int]/~ = ptr::mut_addr_of(a); + let a = ~[0]; + let v: *mut ~[int] = ptr::mut_addr_of(a); - fn f(&&v: *mut [const int]/~) { + fn f(&&v: *mut ~[const int]) { unsafe { - *v = [mut 3]/~ + *v = ~[mut 3] } } diff --git a/src/test/compile-fail/mutable-huh-variance-rec.rs b/src/test/compile-fail/mutable-huh-variance-rec.rs index 326dcd2c1ae..77e65af5f8c 100644 --- a/src/test/compile-fail/mutable-huh-variance-rec.rs +++ b/src/test/compile-fail/mutable-huh-variance-rec.rs @@ -1,10 +1,10 @@ // error-pattern: mismatched types fn main() { - let v = {mut g: [0]/~}; + let v = {mut g: ~[0]}; - fn f(&&v: {mut g: [const int]/~}) { - v.g = [mut 3]/~ + fn f(&&v: {mut g: ~[const int]}) { + v.g = ~[mut 3] } f(v); diff --git a/src/test/compile-fail/mutable-huh-variance-unique.rs b/src/test/compile-fail/mutable-huh-variance-unique.rs index 2cc0bcb6145..7218fb44362 100644 --- a/src/test/compile-fail/mutable-huh-variance-unique.rs +++ b/src/test/compile-fail/mutable-huh-variance-unique.rs @@ -1,10 +1,10 @@ // error-pattern: mismatched types fn main() { - let v = ~mut [0]/~; + let v = ~mut ~[0]; - fn f(&&v: ~mut [const int]/~) { - *v = [mut 3]/~ + fn f(&&v: ~mut ~[const int]) { + *v = ~[mut 3] } f(v); diff --git a/src/test/compile-fail/mutable-huh-variance-vec1.rs b/src/test/compile-fail/mutable-huh-variance-vec1.rs index 3a3575e001f..18ca917e8b6 100644 --- a/src/test/compile-fail/mutable-huh-variance-vec1.rs +++ b/src/test/compile-fail/mutable-huh-variance-vec1.rs @@ -1,10 +1,10 @@ fn main() { // Note: explicit type annot is required here // because otherwise the inference gets smart - // and assigns a type of [mut [const int]/~]/~. - let v: [mut [int]/~]/~ = [mut [0]/~]/~; + // and assigns a type of ~[mut ~[const int]]. + let v: ~[mut ~[int]] = ~[mut ~[0]]; - fn f(&&v: [mut [const int]/~]/~) { + fn f(&&v: ~[mut ~[const int]]) { v[0] = [mut 3] } diff --git a/src/test/compile-fail/mutable-huh-variance-vec2.rs b/src/test/compile-fail/mutable-huh-variance-vec2.rs index 69a69b447be..0135c08c128 100644 --- a/src/test/compile-fail/mutable-huh-variance-vec2.rs +++ b/src/test/compile-fail/mutable-huh-variance-vec2.rs @@ -1,10 +1,10 @@ fn main() { // Note: explicit type annot is required here // because otherwise the inference gets smart - // and assigns a type of [mut [const int]/~]/~. - let v: [mut [mut int]/~]/~ = [mut [mut 0]/~]/~; + // and assigns a type of ~[mut ~[const int]]. + let v: ~[mut ~[mut int]] = ~[mut ~[mut 0]]; - fn f(&&v: [mut [const int]/~]/~) { + fn f(&&v: ~[mut ~[const int]]) { v[0] = [3] } diff --git a/src/test/compile-fail/mutable-huh-variance-vec3.rs b/src/test/compile-fail/mutable-huh-variance-vec3.rs index 81844c69341..fde3d8a152a 100644 --- a/src/test/compile-fail/mutable-huh-variance-vec3.rs +++ b/src/test/compile-fail/mutable-huh-variance-vec3.rs @@ -1,10 +1,10 @@ fn main() { // Note: explicit type annot is required here // because otherwise the inference gets smart - // and assigns a type of [mut [const int]/~]/~. + // and assigns a type of ~[mut ~[const int]]. let v: [mut[mut[int]]] = [mut [mut [0]]]; - fn f(&&v: [mut [mut [const int]/~]/~]/~) { + fn f(&&v: ~[mut ~[mut ~[const int]]]) { v[0][1] = [mut 3] } diff --git a/src/test/compile-fail/mutable-huh-variance-vec4.rs b/src/test/compile-fail/mutable-huh-variance-vec4.rs index ebb535bcdd0..f234bc677c4 100644 --- a/src/test/compile-fail/mutable-huh-variance-vec4.rs +++ b/src/test/compile-fail/mutable-huh-variance-vec4.rs @@ -3,26 +3,26 @@ fn main() { // Note: here we do not have any type annotations // but we do express conflicting requirements: - let v = [mut [0]/~]/~; - let w = [mut [mut 0]/~]/~; - let x = [mut [mut 0]/~]/~; + let v = ~[mut ~[0]]; + let w = ~[mut ~[mut 0]]; + let x = ~[mut ~[mut 0]]; - fn f(&&v: [mut [int]/~]/~) { + fn f(&&v: ~[mut ~[int]]) { v[0] = [3] } - fn g(&&v: [const [const int]/~]/~) { + fn g(&&v: ~[const ~[const int]]) { } - fn h(&&v: [mut [mut int]/~]/~) { + fn h(&&v: ~[mut ~[mut int]]) { v[0] = [mut 3] } - fn i(&&v: [mut [const int]/~]/~) { + fn i(&&v: ~[mut ~[const int]]) { v[0] = [mut 3] } - fn j(&&v: [[const int]/~]/~) { + fn j(&&v: ~[~[const int]]) { } f(v); @@ -38,7 +38,7 @@ fn main() { j(w); //~ ERROR (values differ in mutability) // Note that without adding f() or h() to the mix, it is valid for - // x to have the type [mut [const int]/~]/~, and thus we can safely + // x to have the type ~[mut ~[const int]], and thus we can safely // call g() and i() but not j(): g(x); i(x); diff --git a/src/test/compile-fail/mutable-huh-vec-assign.rs b/src/test/compile-fail/mutable-huh-vec-assign.rs index 9bf6b457513..aad87ccbb6e 100644 --- a/src/test/compile-fail/mutable-huh-vec-assign.rs +++ b/src/test/compile-fail/mutable-huh-vec-assign.rs @@ -1,10 +1,10 @@ fn main() { - fn f(&&v: [const int]/~) { + fn f(&&v: ~[const int]) { // This shouldn't be possible v[0] = 1 //~ ERROR assigning to const vec content } - let v = [0]/~; + let v = ~[0]; f(v); } diff --git a/src/test/compile-fail/nested-ty-params.rs b/src/test/compile-fail/nested-ty-params.rs index 12e2205c3b2..de696f21b25 100644 --- a/src/test/compile-fail/nested-ty-params.rs +++ b/src/test/compile-fail/nested-ty-params.rs @@ -1,5 +1,5 @@ // error-pattern:attempt to use a type argument out of scope -fn hd<U>(v: [U]/~) -> U { +fn hd<U>(v: ~[U]) -> U { fn hd1(w: [U]) -> U { ret w[0]; } ret hd1(v); diff --git a/src/test/compile-fail/no-capture-arc.rs b/src/test/compile-fail/no-capture-arc.rs index ec489529739..299b491605e 100644 --- a/src/test/compile-fail/no-capture-arc.rs +++ b/src/test/compile-fail/no-capture-arc.rs @@ -3,10 +3,10 @@ import comm::*; fn main() { - let v = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]/~; + let v = ~[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; let arc_v = arc::arc(v); - task::spawn() {|| + do task::spawn() || { let v = *arc::get(&arc_v); assert v[3] == 4; }; diff --git a/src/test/compile-fail/no-reuse-move-arc.rs b/src/test/compile-fail/no-reuse-move-arc.rs index 87923c9cbeb..31fc99e276a 100644 --- a/src/test/compile-fail/no-reuse-move-arc.rs +++ b/src/test/compile-fail/no-reuse-move-arc.rs @@ -1,10 +1,10 @@ import comm::*; fn main() { - let v = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]/~; + let v = ~[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; let arc_v = arc::arc(v); - task::spawn() {|move arc_v| //~ NOTE move of variable occurred here + do task::spawn() |move arc_v| { //~ NOTE move of variable occurred here let v = *arc::get(&arc_v); assert v[3] == 4; }; diff --git a/src/test/compile-fail/no-send-res-ports.rs b/src/test/compile-fail/no-send-res-ports.rs index b6ff823615a..b6304693149 100644 --- a/src/test/compile-fail/no-send-res-ports.rs +++ b/src/test/compile-fail/no-send-res-ports.rs @@ -7,7 +7,7 @@ fn main() { let x = ~mut some(foo(comm::port())); - task::spawn {|move x| //~ ERROR not a sendable value + do task::spawn |move x| { //~ ERROR not a sendable value let mut y = none; *x <-> y; log(error, y); diff --git a/src/test/compile-fail/non-const.rs b/src/test/compile-fail/non-const.rs index 5319b28e910..885ac1a8f94 100644 --- a/src/test/compile-fail/non-const.rs +++ b/src/test/compile-fail/non-const.rs @@ -17,8 +17,8 @@ class r2 { fn main() { foo({f: 3}); foo({mut f: 3}); //~ ERROR missing `const` - foo([1]/~); - foo([mut 1]/~); //~ ERROR missing `const` + foo(~[1]); + foo(~[mut 1]); //~ ERROR missing `const` foo(~1); foo(~mut 1); //~ ERROR missing `const` foo(@1); diff --git a/src/test/compile-fail/non-copyable-void.rs b/src/test/compile-fail/non-copyable-void.rs index 6a77d0fb6e5..59136683e6f 100644 --- a/src/test/compile-fail/non-copyable-void.rs +++ b/src/test/compile-fail/non-copyable-void.rs @@ -1,5 +1,5 @@ fn main() { - let x : *[int]/~ = ptr::addr_of([1,2,3]/~); + let x : *~[int] = ptr::addr_of(~[1,2,3]); let y : *libc::c_void = x as *libc::c_void; unsafe { let _z = *y; diff --git a/src/test/compile-fail/nonsense-constraints.rs b/src/test/compile-fail/nonsense-constraints.rs index e05581aa10b..2ac5ac128fc 100644 --- a/src/test/compile-fail/nonsense-constraints.rs +++ b/src/test/compile-fail/nonsense-constraints.rs @@ -3,10 +3,10 @@ use std; import uint; -fn enum_chars(start: u8, end: u8) : uint::le(start, end) -> [char]/~ { +fn enum_chars(start: u8, end: u8) : uint::le(start, end) -> ~[char] { let i = start; - let r = []/~; - while i <= end { r += [i as char]/~; i += 1u as u8; } + let r = ~[]; + while i <= end { r += ~[i as char]; i += 1u as u8; } ret r; } diff --git a/src/test/compile-fail/pattern-tyvar-2.rs b/src/test/compile-fail/pattern-tyvar-2.rs index 162240ed8aa..4d60457d87f 100644 --- a/src/test/compile-fail/pattern-tyvar-2.rs +++ b/src/test/compile-fail/pattern-tyvar-2.rs @@ -6,7 +6,7 @@ import option::some; // error-pattern: mismatched types -enum bar { t1((), option<[int]/~>), t2, } +enum bar { t1((), option<~[int]>), t2, } fn foo(t: bar) -> int { alt t { t1(_, some(x)) { ret x * 3; } _ { fail; } } } diff --git a/src/test/compile-fail/pattern-tyvar.rs b/src/test/compile-fail/pattern-tyvar.rs index 60c555413ae..8be922cd93a 100644 --- a/src/test/compile-fail/pattern-tyvar.rs +++ b/src/test/compile-fail/pattern-tyvar.rs @@ -5,7 +5,7 @@ import option::some; // error-pattern: mismatched types -enum bar { t1((), option<[int]/~>), t2, } +enum bar { t1((), option<~[int]>), t2, } fn foo(t: bar) { alt t { diff --git a/src/test/compile-fail/private-method.rs b/src/test/compile-fail/private-method.rs index b6a16c33a2b..1a2fd1a6eb1 100644 --- a/src/test/compile-fail/private-method.rs +++ b/src/test/compile-fail/private-method.rs @@ -2,7 +2,7 @@ class cat { priv { let mut meows : uint; - fn nap() { uint::range(1u, 10000u) {|_i|}} + fn nap() { uint::range(1u, 10000u, |_i|{})} } let how_hungry : int; diff --git a/src/test/compile-fail/pure-higher-order.rs b/src/test/compile-fail/pure-higher-order.rs index 830f065b623..13455b1d74f 100644 --- a/src/test/compile-fail/pure-higher-order.rs +++ b/src/test/compile-fail/pure-higher-order.rs @@ -9,7 +9,7 @@ pure fn range(from: uint, to: uint, f: fn(uint)) { } pure fn range2(from: uint, to: uint, f: fn(uint)) { - range(from, to) { |i| + do range(from, to) |i| { f(i*2u); } } @@ -31,7 +31,7 @@ pure fn range6(from: uint, to: uint, x: @{f: fn(uint)}) { } pure fn range7(from: uint, to: uint) { - range(from, to) { |i| + do range(from, to) |i| { print(i); //~ ERROR access to impure function prohibited in pure context } } diff --git a/src/test/compile-fail/pure-loop-body.rs b/src/test/compile-fail/pure-loop-body.rs index 7a7f92e6a51..e418a2f201f 100644 --- a/src/test/compile-fail/pure-loop-body.rs +++ b/src/test/compile-fail/pure-loop-body.rs @@ -7,13 +7,13 @@ pure fn range(from: uint, to: uint, f: fn(uint) -> bool) { } pure fn range2(from: uint, to: uint, f: fn(uint)) { - for range(from, to) { |i| + for range(from, to) |i| { f(i*2u); } } pure fn range3(from: uint, to: uint, f: {x: fn(uint)}) { - for range(from, to) { |i| + for range(from, to) |i| { f.x(i*2u); //~ ERROR access to impure function prohibited } } diff --git a/src/test/compile-fail/qquote-1.rs b/src/test/compile-fail/qquote-1.rs index d460864ac83..43a3536ed16 100644 --- a/src/test/compile-fail/qquote-1.rs +++ b/src/test/compile-fail/qquote-1.rs @@ -29,7 +29,7 @@ impl of fake_ext_ctxt for fake_session { } fn mk_ctxt() -> fake_ext_ctxt { - let opts : fake_options = {cfg: []/~}; + let opts : fake_options = {cfg: ~[]}; {opts: @opts, parse_sess: new_parse_sess()} as fake_ext_ctxt } diff --git a/src/test/compile-fail/qquote-2.rs b/src/test/compile-fail/qquote-2.rs index 4cf49e183ba..c83d26c3a57 100644 --- a/src/test/compile-fail/qquote-2.rs +++ b/src/test/compile-fail/qquote-2.rs @@ -29,7 +29,7 @@ impl of fake_ext_ctxt for fake_session { } fn mk_ctxt() -> fake_ext_ctxt { - let opts : fake_options = {cfg: []/~}; + let opts : fake_options = {cfg: ~[]}; {opts: @opts, parse_sess: new_parse_sess()} as fake_ext_ctxt } diff --git a/src/test/compile-fail/regions-addr-of-upvar-self.rs b/src/test/compile-fail/regions-addr-of-upvar-self.rs index f7e39ee5243..7fa455487e1 100644 --- a/src/test/compile-fail/regions-addr-of-upvar-self.rs +++ b/src/test/compile-fail/regions-addr-of-upvar-self.rs @@ -6,7 +6,7 @@ class dog { } fn chase_cat() { - uint::range(0u, 10u) { |i| + for uint::range(0u, 10u) |i| { let p: &static.mut uint = &mut self.food; //~ ERROR mismatched types *p = 3u; } diff --git a/src/test/compile-fail/regions-escape-loop-via-vec.rs b/src/test/compile-fail/regions-escape-loop-via-vec.rs index c1b096c5f51..5f57a3e87b1 100644 --- a/src/test/compile-fail/regions-escape-loop-via-vec.rs +++ b/src/test/compile-fail/regions-escape-loop-via-vec.rs @@ -2,13 +2,13 @@ // This generates a ton of error msgs at the moment. fn broken() -> int { let mut x = 3; - let mut y = [&mut x]/~; //~ ERROR reference is not valid + let mut y = ~[&mut x]; //~ ERROR reference is not valid while x < 10 { let mut z = x; - y += [&mut z]/~; + y += ~[&mut z]; x += 1; } - vec::foldl(0, y) {|v, p| v + *p } + vec::foldl(0, y, |v, p| v + *p ) //~^ ERROR reference is not valid //~^^ ERROR reference is not valid //~^^^ ERROR reference is not valid diff --git a/src/test/compile-fail/regions-out-of-scope-slice.rs b/src/test/compile-fail/regions-out-of-scope-slice.rs index 02ec5b1b643..c6936cf8392 100644 --- a/src/test/compile-fail/regions-out-of-scope-slice.rs +++ b/src/test/compile-fail/regions-out-of-scope-slice.rs @@ -6,7 +6,7 @@ fn foo(cond: bool) { let mut x; //~ ERROR foo if cond { - x = [1,2,3]/&blk; + x = &[1,2,3]blk; } } diff --git a/src/test/compile-fail/regions-scoping.rs b/src/test/compile-fail/regions-scoping.rs index f694f6a10fa..6e2bd60c4cc 100644 --- a/src/test/compile-fail/regions-scoping.rs +++ b/src/test/compile-fail/regions-scoping.rs @@ -1,7 +1,7 @@ fn with<T>(t: T, f: fn(T)) { f(t) } fn nested(x: &x.int) { // (1) - with( + do with( fn&(x: &x.int, // Refers to the region `x` at (1) y: &y.int, // A fresh region `y` (2) z: fn(x: &x.int, // Refers to `x` at (1) @@ -24,19 +24,19 @@ fn nested(x: &x.int) { // (1) //~^ ERROR mismatched types: expected `&x.int` but found `&y.int` fail; } - ) {|foo| + ) |foo| { - let a: &x.int = foo(x, x) { |_x, _y, z| z }; - let b: &x.int = foo(x, a) { |_x, _y, z| z }; - let c: &x.int = foo(a, a) { |_x, _y, z| z }; + let a: &x.int = foo(x, x, |_x, _y, z| z ); + let b: &x.int = foo(x, a, |_x, _y, z| z ); + let c: &x.int = foo(a, a, |_x, _y, z| z ); let z = 3i; - let d: &x.int = foo(x, x) { |_x, _y, z| z }; - let e: &x.int = foo(x, &z) { |_x, _y, z| z }; - let f: &x.int = foo(&z, &z) { |_x, _y, z| z }; //~ ERROR mismatched types: expected `&x.int` but found + let d: &x.int = foo(x, x, |_x, _y, z| z ); + let e: &x.int = foo(x, &z, |_x, _y, z| z ); + let f: &x.int = foo(&z, &z, |_x, _y, z| z ); //~ ERROR mismatched types: expected `&x.int` but found - foo(x, &z) { |x, _y, _z| x }; //~ ERROR mismatched types: expected `&z.int` but found `&x.int` - foo(x, &z) { |_x, y, _z| y }; //~ ERROR mismatched types: expected `&z.int` but found `&<block at + foo(x, &z, |x, _y, _z| x ); //~ ERROR mismatched types: expected `&z.int` but found `&x.int` + foo(x, &z, |_x, y, _z| y ); //~ ERROR mismatched types: expected `&z.int` but found `&<block at } } diff --git a/src/test/compile-fail/seq-args.rs b/src/test/compile-fail/seq-args.rs index 8edc11e84f6..db2cb9b7a17 100644 --- a/src/test/compile-fail/seq-args.rs +++ b/src/test/compile-fail/seq-args.rs @@ -2,7 +2,7 @@ use std; fn main() { iface seq { } -impl <T> of seq<T> for [T]/~ { //~ ERROR wrong number of type arguments +impl <T> of seq<T> for ~[T] { //~ ERROR wrong number of type arguments /* ... */ } impl of seq<bool> for u32 { diff --git a/src/test/compile-fail/tag-that-dare-not-speak-its-name.rs b/src/test/compile-fail/tag-that-dare-not-speak-its-name.rs index a7b0bb7175d..d57a3e5d204 100644 --- a/src/test/compile-fail/tag-that-dare-not-speak-its-name.rs +++ b/src/test/compile-fail/tag-that-dare-not-speak-its-name.rs @@ -5,7 +5,7 @@ use core; -fn last<T: copy>(v: [const T]/~) -> core::option<T> { +fn last<T: copy>(v: ~[const T]) -> core::option<T> { fail; } diff --git a/src/test/compile-fail/tstate-unsat-in-called-fn-expr.rs b/src/test/compile-fail/tstate-unsat-in-called-fn-expr.rs index a02598a305a..e19393c40f5 100644 --- a/src/test/compile-fail/tstate-unsat-in-called-fn-expr.rs +++ b/src/test/compile-fail/tstate-unsat-in-called-fn-expr.rs @@ -2,7 +2,7 @@ fn foo(v: [int]) : vec::is_empty(v) { #debug("%d", v[0]); } fn main() { let f = fn@() { - let v = [1]/~; + let v = ~[1]; foo(v); //~ ERROR unsatisfied precondition constraint }(); log(error, f); diff --git a/src/test/compile-fail/tstate-unsat-in-fn-expr.rs b/src/test/compile-fail/tstate-unsat-in-fn-expr.rs index 18189629d86..c56662f8562 100644 --- a/src/test/compile-fail/tstate-unsat-in-fn-expr.rs +++ b/src/test/compile-fail/tstate-unsat-in-fn-expr.rs @@ -2,7 +2,7 @@ fn foo(v: [int]) : vec::is_empty(v) { #debug("%d", v[0]); } fn main() { let f = fn@() { - let v = [1]/~; + let v = ~[1]; foo(v); //~ ERROR unsatisfied precondition constraint }; log(error, f()); diff --git a/src/test/compile-fail/unique-vec-res.rs b/src/test/compile-fail/unique-vec-res.rs index 5159d0ddbfe..a58ae84459d 100644 --- a/src/test/compile-fail/unique-vec-res.rs +++ b/src/test/compile-fail/unique-vec-res.rs @@ -6,15 +6,15 @@ class r { drop { *(self.i) = *(self.i) + 1; } } -fn f<T>(+i: [T]/~, +j: [T]/~) { +fn f<T>(+i: ~[T], +j: ~[T]) { let k = i + j; } fn main() { let i1 = @mut 0; let i2 = @mut 1; - let r1 <- [~r(i1)]/~; - let r2 <- [~r(i2)]/~; + let r1 <- ~[~r(i1)]; + let r2 <- ~[~r(i2)]; f(r1, r2); log(debug, (r2, *i1)); log(debug, (r1, *i2)); diff --git a/src/test/compile-fail/vec-add.rs b/src/test/compile-fail/vec-add.rs index 6224fed7434..16af1cb7471 100644 --- a/src/test/compile-fail/vec-add.rs +++ b/src/test/compile-fail/vec-add.rs @@ -4,19 +4,19 @@ // the right hand side in all cases. We are getting compiler errors // about this now, so I'm xfailing the test for now. -eholk -fn add(i: [int]/~, m: [mut int]/~, c: [const int]/~) { +fn add(i: ~[int], m: ~[mut int], c: ~[const int]) { // Check that: // (1) vectors of any two mutabilities can be added // (2) result has mutability of lhs - add(i + [3]/~, - m + [3]/~, - [3]/~); + add(i + ~[3], + m + ~[3], + ~[3]); - add(i + [mut 3]/~, - m + [mut 3]/~, - [mut 3]/~); + add(i + ~[mut 3], + m + ~[mut 3], + ~[mut 3]); add(i + i, m + i, @@ -30,33 +30,33 @@ fn add(i: [int]/~, m: [mut int]/~, c: [const int]/~) { m + c, c); - add(m + [3]/~, //~ ERROR mismatched types - m + [3]/~, - m + [3]/~); + add(m + ~[3], //~ ERROR mismatched types + m + ~[3], + m + ~[3]); - add(i + [3]/~, - i + [3]/~, //~ ERROR mismatched types - i + [3]/~); + add(i + ~[3], + i + ~[3], //~ ERROR mismatched types + i + ~[3]); - add(c + [3]/~, //~ ERROR mismatched types + add(c + ~[3], //~ ERROR mismatched types //~^ ERROR binary operation + cannot be applied - c + [3]/~, //~ ERROR binary operation + cannot be applied + c + ~[3], //~ ERROR binary operation + cannot be applied //~^ mismatched types - [3]/~); + ~[3]); - add(m + [mut 3]/~, //~ ERROR mismatched types - m + [mut 3]/~, - m + [mut 3]/~); + add(m + ~[mut 3], //~ ERROR mismatched types + m + ~[mut 3], + m + ~[mut 3]); - add(i + [mut 3]/~, - i + [mut 3]/~, //~ ERROR mismatched types - i + [mut 3]/~); + add(i + ~[mut 3], + i + ~[mut 3], //~ ERROR mismatched types + i + ~[mut 3]); - add(c + [mut 3]/~, //~ ERROR binary operation + cannot be applied + add(c + ~[mut 3], //~ ERROR binary operation + cannot be applied //~^ mismatched types - c + [mut 3]/~, //~ ERROR binary operation + cannot be applied + c + ~[mut 3], //~ ERROR binary operation + cannot be applied //~^ mismatched types - [mut 3]/~); + ~[mut 3]); add(m + i, //~ ERROR mismatched types m + i, diff --git a/src/test/compile-fail/vec-concat-bug.rs b/src/test/compile-fail/vec-concat-bug.rs index 6545a56806e..33873c087a5 100644 --- a/src/test/compile-fail/vec-concat-bug.rs +++ b/src/test/compile-fail/vec-concat-bug.rs @@ -1,11 +1,11 @@ -fn concat<T: copy>(v: [const [const T]/~]/~) -> [T]/~ { - let mut r = []/~; +fn concat<T: copy>(v: ~[const ~[const T]]) -> ~[T] { + let mut r = ~[]; // Earlier versions of our type checker accepted this: - vec::iter(v) {|&&inner: [T]/~| + vec::iter(v, |&&inner: ~[T]| { //~^ ERROR values differ in mutability r += inner; - } + }); ret r; } diff --git a/src/test/compile-fail/vec-field.rs b/src/test/compile-fail/vec-field.rs index 936b14b6c7d..148ecf05d1a 100644 --- a/src/test/compile-fail/vec-field.rs +++ b/src/test/compile-fail/vec-field.rs @@ -2,7 +2,7 @@ // issue #367 fn f() { - let v = [1i]/~; + let v = ~[1i]; log(debug, v.some_field_name); //type error } diff --git a/src/test/compile-fail/vec-res-add.rs b/src/test/compile-fail/vec-res-add.rs index 4ce5ad3cbf0..f622665d082 100644 --- a/src/test/compile-fail/vec-res-add.rs +++ b/src/test/compile-fail/vec-res-add.rs @@ -8,8 +8,8 @@ class r { fn main() { // This can't make sense as it would copy the classes - let i <- [r(0)]/~; - let j <- [r(1)]/~; + let i <- ~[r(0)]; + let j <- ~[r(1)]; let k = i + j; log(debug, j); } diff --git a/src/test/compile-fail/vector-no-ann.rs b/src/test/compile-fail/vector-no-ann.rs index a8365b66870..6918a248742 100644 --- a/src/test/compile-fail/vector-no-ann.rs +++ b/src/test/compile-fail/vector-no-ann.rs @@ -1,3 +1,3 @@ fn main() { - let _foo = []/~; //~ ERROR unconstrained type + let _foo = ~[]; //~ ERROR unconstrained type } diff --git a/src/test/pretty/blank-lines.rs b/src/test/pretty/blank-lines.rs index 837b85280ec..2c22432401b 100644 --- a/src/test/pretty/blank-lines.rs +++ b/src/test/pretty/blank-lines.rs @@ -1,5 +1,5 @@ // pp-exact -fn f() -> [int]/~ { +fn f() -> [int]/3 { let picard = 0; let data = 1; @@ -7,7 +7,7 @@ fn f() -> [int]/~ { let worf = 2; - let enterprise = [picard, data, worf]/~; + let enterprise = [picard, data, worf]/_; diff --git a/src/test/pretty/block-arg-disambig.rs b/src/test/pretty/block-arg-disambig.rs index e57c7be48a9..4418d82a0e6 100644 --- a/src/test/pretty/block-arg-disambig.rs +++ b/src/test/pretty/block-arg-disambig.rs @@ -1,2 +1,5 @@ +// FIXME: The disambiguation the pretty printer does here +// is probably not necessary anymore + fn blk1(b: fn()) -> fn@() { ret fn@() { }; } -fn test1() { (blk1 {|| #debug["hi"]; })(); } +fn test1() { (do blk1 || { #debug["hi"]; })(); } diff --git a/src/test/pretty/block-disambig.rs b/src/test/pretty/block-disambig.rs index a6e0cc7f610..0f504bb8e89 100644 --- a/src/test/pretty/block-disambig.rs +++ b/src/test/pretty/block-disambig.rs @@ -39,9 +39,9 @@ fn test8() -> int { fn test9() { let regs = @mut 0; alt check true { true { } } *regs += 1; } fn test10() -> int { - let regs = @mut [0]/~; + let regs = @mut ~[0]; alt check true { true { } } (*regs)[0] } -fn test11() -> [int]/~ { if true { } [1, 2]/~ } +fn test11() -> ~[int] { if true { } ~[1, 2] } diff --git a/src/test/pretty/disamb-stmt-expr.rs b/src/test/pretty/disamb-stmt-expr.rs index b525d0a2da5..546020b6cd8 100644 --- a/src/test/pretty/disamb-stmt-expr.rs +++ b/src/test/pretty/disamb-stmt-expr.rs @@ -6,5 +6,5 @@ fn id(f: fn() -> int) -> int { f() } -fn wsucc(n: int) -> int { (id {|| 1 }) - 0 } +fn wsucc(n: int) -> int { (do id || { 1 }) - 0 } fn main() { } diff --git a/src/test/pretty/do1.rs b/src/test/pretty/do1.rs new file mode 100644 index 00000000000..1536a294d18 --- /dev/null +++ b/src/test/pretty/do1.rs @@ -0,0 +1,7 @@ +// pretty-exact + +fn f(f: fn@(int)) { f(10) } + +fn main() { + do f |i| { assert i == 10 } +} diff --git a/src/test/pretty/doc-comments.rs b/src/test/pretty/doc-comments.rs index 835c1d9640a..175940f008a 100644 --- a/src/test/pretty/doc-comments.rs +++ b/src/test/pretty/doc-comments.rs @@ -6,7 +6,7 @@ fn a() { } fn b() { - //! some single line inner-docs + //~ some single line inner-docs } /* diff --git a/src/test/pretty/vec-comments.rs b/src/test/pretty/vec-comments.rs index 5492484d0c8..6520cb5e5e5 100644 --- a/src/test/pretty/vec-comments.rs +++ b/src/test/pretty/vec-comments.rs @@ -3,27 +3,27 @@ // pp-exact:vec-comments.pp fn main() { let v1 = - [ + ~[ // Comment 0, // Comment 1, // Comment - 2]/~; + 2]; let v2 = - [0, // Comment + ~[0, // Comment 1, // Comment - 2]/~; // Comment + 2]; // Comment let v3 = - [ + ~[ /* Comment */ 0, /* Comment */ 1, /* Comment */ - 2]/~; + 2]; let v4 = - [0, /* Comment */ + ~[0, /* Comment */ 1, /* Comment */ - 2]/~; /* Comment */ + 2]; /* Comment */ } diff --git a/src/test/pretty/vec-type.rs b/src/test/pretty/vec-type.rs index af111a64c3f..f84e0f9d25b 100644 --- a/src/test/pretty/vec-type.rs +++ b/src/test/pretty/vec-type.rs @@ -1,5 +1,5 @@ // pp-exact:vec-type.pp -fn f1(x: [int]/~) { } +fn f1(x: ~[int]) { } -fn g1() { f1([1, 2, 3]/~); } +fn g1() { f1(~[1, 2, 3]); } diff --git a/src/test/run-fail/bug-2470-bounds-check-overflow-2.rs b/src/test/run-fail/bug-2470-bounds-check-overflow-2.rs index dcea4e893f6..111367c8020 100644 --- a/src/test/run-fail/bug-2470-bounds-check-overflow-2.rs +++ b/src/test/run-fail/bug-2470-bounds-check-overflow-2.rs @@ -2,7 +2,7 @@ // error-pattern:bounds check fn main() { - let x = [1u,2u,3u]/~; + let x = ~[1u,2u,3u]; // This should cause a bounds-check failure, but may not if we do our // bounds checking by comparing a scaled index value to the vector's diff --git a/src/test/run-fail/bug-2470-bounds-check-overflow-3.rs b/src/test/run-fail/bug-2470-bounds-check-overflow-3.rs index a547bc1a3f3..7e9853eaccb 100644 --- a/src/test/run-fail/bug-2470-bounds-check-overflow-3.rs +++ b/src/test/run-fail/bug-2470-bounds-check-overflow-3.rs @@ -3,7 +3,7 @@ #[cfg(target_arch="x86")] fn main() { - let x = [1u,2u,3u]/~; + let x = ~[1u,2u,3u]; // This should cause a bounds-check failure, but may not if we do our // bounds checking by truncating the index value to the size of the @@ -23,6 +23,6 @@ fn main() { #[cfg(target_arch="x86_64")] fn main() { // This version just fails anyways, for symmetry on 64-bit hosts. - let x = [1u,2u,3u]/~; + let x = ~[1u,2u,3u]; #error("ov3 0x%x", x[200]); } diff --git a/src/test/run-fail/bug-2470-bounds-check-overflow.rs b/src/test/run-fail/bug-2470-bounds-check-overflow.rs index 710cecd99e2..3fcc768c4d9 100644 --- a/src/test/run-fail/bug-2470-bounds-check-overflow.rs +++ b/src/test/run-fail/bug-2470-bounds-check-overflow.rs @@ -8,8 +8,8 @@ fn main() { // address of the 0th cell in the array (even though the index is // huge). - let x = [1u,2u,3u]/~; - vec::unpack_slice(x) {|p, _len| + let x = ~[1u,2u,3u]; + do vec::unpack_slice(x) |p, _len| { let base = p as uint; // base = 0x1230 say let idx = base / sys::size_of::<uint>(); // idx = 0x0246 say #error("ov1 base = 0x%x", base); diff --git a/src/test/run-fail/crust-fail.rs b/src/test/run-fail/crust-fail.rs index 7b844cc3b47..8dac4036c0a 100644 --- a/src/test/run-fail/crust-fail.rs +++ b/src/test/run-fail/crust-fail.rs @@ -21,8 +21,8 @@ fn count(n: uint) -> uint { } fn main() { - iter::repeat(10u) {|| - task::spawn {|| + do iter::repeat(10u) || { + do task::spawn || { let result = count(5u); #debug("result = %?", result); fail; diff --git a/src/test/run-fail/for-each-loop-fail.rs b/src/test/run-fail/for-each-loop-fail.rs index cdb054a4e8d..a52b75f6cad 100644 --- a/src/test/run-fail/for-each-loop-fail.rs +++ b/src/test/run-fail/for-each-loop-fail.rs @@ -1,4 +1,4 @@ // error-pattern:moop use std; import uint; -fn main() { uint::range(0u, 10u) {|_i| fail "moop"; } } +fn main() { for uint::range(0u, 10u) |_i| { fail "moop"; } } diff --git a/src/test/run-fail/issue-2144.rs b/src/test/run-fail/issue-2144.rs index 446c488ad1a..1c64b20c0b7 100644 --- a/src/test/run-fail/issue-2144.rs +++ b/src/test/run-fail/issue-2144.rs @@ -3,7 +3,7 @@ // Don't leak when the landing pads need to request more stack // than is allowed during normal execution -fn useBlock(f: fn~() -> uint) { useBlock({|| 22u }) } +fn useBlock(f: fn~() -> uint) { useBlock(|| 22u ) } fn main() { - useBlock({|| 22u }); + useBlock(|| 22u ); } diff --git a/src/test/run-fail/issue-2156.rs b/src/test/run-fail/issue-2156.rs index 90ed994ee8f..921842a32d2 100644 --- a/src/test/run-fail/issue-2156.rs +++ b/src/test/run-fail/issue-2156.rs @@ -4,7 +4,7 @@ use std; import io::{reader, reader_util}; fn main() { - io::with_str_reader("") { |rdr| + do io::with_str_reader("") |rdr| { alt rdr.read_char() { '=' { } _ { fail } } } } diff --git a/src/test/run-fail/linked-failure.rs b/src/test/run-fail/linked-failure.rs index 62ecac925bb..6189567d017 100644 --- a/src/test/run-fail/linked-failure.rs +++ b/src/test/run-fail/linked-failure.rs @@ -10,6 +10,6 @@ fn child() { assert (1 == 2); } fn main() { let p = port::<int>(); - task::spawn {|| child(); }; + task::spawn(|| child() ); let x = recv(p); } diff --git a/src/test/run-fail/linked-failure2.rs b/src/test/run-fail/linked-failure2.rs index 5350159475c..f2ad8fb39ea 100644 --- a/src/test/run-fail/linked-failure2.rs +++ b/src/test/run-fail/linked-failure2.rs @@ -11,6 +11,6 @@ fn child() { fail; } fn main() { let p = port::<int>(); - task::spawn {|| child(); }; + task::spawn(|| child() ); task::yield(); } diff --git a/src/test/run-fail/linked-failure3.rs b/src/test/run-fail/linked-failure3.rs index 4c807712c65..22f6661716c 100644 --- a/src/test/run-fail/linked-failure3.rs +++ b/src/test/run-fail/linked-failure3.rs @@ -10,12 +10,12 @@ fn grandchild() { fail "grandchild dies"; } fn child() { let p = port::<int>(); - task::spawn {|| grandchild(); }; + task::spawn(|| grandchild() ); let x = recv(p); } fn main() { let p = port::<int>(); - task::spawn {|| child(); }; + task::spawn(|| child() ); let x = recv(p); } diff --git a/src/test/run-fail/linked-failure4.rs b/src/test/run-fail/linked-failure4.rs index 629cc925749..fb7d596145d 100644 --- a/src/test/run-fail/linked-failure4.rs +++ b/src/test/run-fail/linked-failure4.rs @@ -10,7 +10,7 @@ fn child() { assert (1 == 2); } fn parent() { let p = port::<int>(); - task::spawn {|| child(); }; + task::spawn(|| child() ); let x = recv(p); } @@ -22,6 +22,6 @@ fn sleeper() { } fn main() { - task::spawn {|| sleeper(); }; - task::spawn {|| parent(); }; + task::spawn(|| sleeper() ); + task::spawn(|| parent() ); } \ No newline at end of file diff --git a/src/test/run-fail/morestack2.rs b/src/test/run-fail/morestack2.rs index d68e9c49789..9ecc3c6a07b 100644 --- a/src/test/run-fail/morestack2.rs +++ b/src/test/run-fail/morestack2.rs @@ -34,7 +34,7 @@ class and_then_get_big_again { } fn main() { - task::spawn {|| + do task::spawn || { let r = and_then_get_big_again(4); getbig_call_c_and_fail(10000); }; diff --git a/src/test/run-fail/morestack3.rs b/src/test/run-fail/morestack3.rs index 0f0aea5ccd1..281b36e27a3 100644 --- a/src/test/run-fail/morestack3.rs +++ b/src/test/run-fail/morestack3.rs @@ -27,7 +27,7 @@ class and_then_get_big_again { } fn main() { - task::spawn {|| + do task::spawn || { getbig_and_fail(400); }; } \ No newline at end of file diff --git a/src/test/run-fail/morestack4.rs b/src/test/run-fail/morestack4.rs index b1c86d5110a..a10f23c9117 100644 --- a/src/test/run-fail/morestack4.rs +++ b/src/test/run-fail/morestack4.rs @@ -20,7 +20,7 @@ class and_then_get_big_again { } fn main() { - task::spawn {|| + do task::spawn || { getbig_and_fail(1); }; } \ No newline at end of file diff --git a/src/test/run-fail/rt-set-exit-status-fail2.rs b/src/test/run-fail/rt-set-exit-status-fail2.rs index 2c2665446cd..a38b9ed1f70 100644 --- a/src/test/run-fail/rt-set-exit-status-fail2.rs +++ b/src/test/run-fail/rt-set-exit-status-fail2.rs @@ -13,7 +13,7 @@ class r { fn main() { log(error, "whatever"); - task::spawn {|| + do task::spawn || { let i = r(5); }; fail; diff --git a/src/test/run-fail/small-negative-indexing.rs b/src/test/run-fail/small-negative-indexing.rs index 0d73cc3e6c5..96f0c12c760 100644 --- a/src/test/run-fail/small-negative-indexing.rs +++ b/src/test/run-fail/small-negative-indexing.rs @@ -1,6 +1,6 @@ // error-pattern:bounds check fn main() { - let v = vec::from_fn(1024u) {|n| n}; + let v = vec::from_fn(1024u, {|n| n}); // this should trip a bounds check log(error, v[-1i8]); } diff --git a/src/test/run-fail/spawnfail.rs b/src/test/run-fail/spawnfail.rs index 8d342d7fe74..5d780f1f5f1 100644 --- a/src/test/run-fail/spawnfail.rs +++ b/src/test/run-fail/spawnfail.rs @@ -8,5 +8,5 @@ fn main() { fn f() { fail; } - task::spawn {|| f(); }; + task::spawn(|| f() ); } \ No newline at end of file diff --git a/src/test/run-fail/task-comm-recv-block.rs b/src/test/run-fail/task-comm-recv-block.rs index 50a32ed296c..1e703bc47f9 100644 --- a/src/test/run-fail/task-comm-recv-block.rs +++ b/src/test/run-fail/task-comm-recv-block.rs @@ -10,7 +10,7 @@ fn goodfail() { } fn main() { - task::spawn {|| goodfail(); }; + task::spawn(|| goodfail() ); let po = comm::port(); // We shouldn't be able to get past this recv since there's no // message available diff --git a/src/test/run-fail/unwind-box-vec.rs b/src/test/run-fail/unwind-box-vec.rs index 616a9e75cef..f47b98446a6 100644 --- a/src/test/run-fail/unwind-box-vec.rs +++ b/src/test/run-fail/unwind-box-vec.rs @@ -5,7 +5,7 @@ fn failfn() { } fn main() { - let x = @[0, 1, 2, 3, 4, 5]/~; + let x = @~[0, 1, 2, 3, 4, 5]; failfn(); log(error, x); } \ No newline at end of file diff --git a/src/test/run-fail/unwind-interleaved.rs b/src/test/run-fail/unwind-interleaved.rs index d914ff82219..60d75eefe0f 100644 --- a/src/test/run-fail/unwind-interleaved.rs +++ b/src/test/run-fail/unwind-interleaved.rs @@ -5,8 +5,8 @@ fn a() { } fn b() { fail; } fn main() { - let x = [0]/~; + let x = ~[0]; a(); - let y = [0]/~; + let y = ~[0]; b(); } \ No newline at end of file diff --git a/src/test/run-fail/unwind-iter.rs b/src/test/run-fail/unwind-iter.rs index bc272ab3b97..ef3d38ab86e 100644 --- a/src/test/run-fail/unwind-iter.rs +++ b/src/test/run-fail/unwind-iter.rs @@ -7,5 +7,5 @@ fn x(it: fn(int)) { fn main() { let a = @0; - x {|_i|}; + x(|_i| { } ); } \ No newline at end of file diff --git a/src/test/run-fail/unwind-iter2.rs b/src/test/run-fail/unwind-iter2.rs index e1e93ac94cd..babf617dd72 100644 --- a/src/test/run-fail/unwind-iter2.rs +++ b/src/test/run-fail/unwind-iter2.rs @@ -6,5 +6,5 @@ fn x(it: fn(int)) { } fn main() { - x {|_x| fail; }; + x(|_x| fail ); } \ No newline at end of file diff --git a/src/test/run-fail/unwind-lambda.rs b/src/test/run-fail/unwind-lambda.rs index 72fd69ec675..cc516c550a2 100644 --- a/src/test/run-fail/unwind-lambda.rs +++ b/src/test/run-fail/unwind-lambda.rs @@ -6,7 +6,7 @@ fn main() { fn@(tasties: @str, macerate: fn(str)) { macerate(*tasties); - } (carrots, { |food| + } (carrots, |food| { let mush = food + cheese; let f = fn@() { let chew = mush + cheese; diff --git a/src/test/run-fail/unwind-misc-1.rs b/src/test/run-fail/unwind-misc-1.rs index abd91751460..7dc9b112f23 100644 --- a/src/test/run-fail/unwind-misc-1.rs +++ b/src/test/run-fail/unwind-misc-1.rs @@ -7,19 +7,19 @@ import uint; fn main() { let count = @mut 0u; - fn hash(&&s: [@str]/~) -> uint { + fn hash(&&s: ~[@str]) -> uint { if (vec::len(s) > 0u && str::eq(*s[0], "boom")) { fail; } ret 10u; } - fn eq(&&s: [@str]/~, &&t: [@str]/~) -> bool { + fn eq(&&s: ~[@str], &&t: ~[@str]) -> bool { ret s == t; } let map = map::hashmap(hash, eq); - let mut arr = []/~; - for uint::range(0u, 10u) {|i| - arr += [@"key stuff"]/~; - map.insert(arr, arr + [@"value stuff"]/~); + let mut arr = ~[]; + for uint::range(0u, 10u) |i| { + arr += ~[@"key stuff"]; + map.insert(arr, arr + ~[@"value stuff"]); } - map.insert([@"boom"]/~, []/~); + map.insert(~[@"boom"], ~[]); } diff --git a/src/test/run-fail/unwind-partial-box.rs b/src/test/run-fail/unwind-partial-box.rs index 7ff5c6ec7f3..757029b5eeb 100644 --- a/src/test/run-fail/unwind-partial-box.rs +++ b/src/test/run-fail/unwind-partial-box.rs @@ -1,6 +1,6 @@ // error-pattern:fail -fn f() -> [int]/~ { fail; } +fn f() -> ~[int] { fail; } // Voodoo. In unwind-alt we had to do this to trigger the bug. Might // have been to do with memory allocation patterns. diff --git a/src/test/run-fail/unwind-partial-unique.rs b/src/test/run-fail/unwind-partial-unique.rs index 5f998e802dc..142d35044f4 100644 --- a/src/test/run-fail/unwind-partial-unique.rs +++ b/src/test/run-fail/unwind-partial-unique.rs @@ -1,6 +1,6 @@ // error-pattern:fail -fn f() -> [int]/~ { fail; } +fn f() -> ~[int] { fail; } // Voodoo. In unwind-alt we had to do this to trigger the bug. Might // have been to do with memory allocation patterns. diff --git a/src/test/run-fail/unwind-partial-vec.rs b/src/test/run-fail/unwind-partial-vec.rs index feb7449c532..5dc7c6a99b1 100644 --- a/src/test/run-fail/unwind-partial-vec.rs +++ b/src/test/run-fail/unwind-partial-vec.rs @@ -1,6 +1,6 @@ // error-pattern:fail -fn f() -> [int]/~ { fail; } +fn f() -> ~[int] { fail; } // Voodoo. In unwind-alt we had to do this to trigger the bug. Might // have been to do with memory allocation patterns. @@ -9,7 +9,7 @@ fn prime() { } fn partial() { - let x = [[0]/~, f(), [0]/~]/~; + let x = ~[~[0], f(), ~[0]]; } fn main() { diff --git a/src/test/run-fail/unwind-rec.rs b/src/test/run-fail/unwind-rec.rs index 58fb32cf3e5..bafc312e641 100644 --- a/src/test/run-fail/unwind-rec.rs +++ b/src/test/run-fail/unwind-rec.rs @@ -1,6 +1,6 @@ // error-pattern:fail -fn build() -> [int]/~ { +fn build() -> ~[int] { fail; } diff --git a/src/test/run-fail/unwind-rec2.rs b/src/test/run-fail/unwind-rec2.rs index 10e9cd70323..37c19281ea6 100644 --- a/src/test/run-fail/unwind-rec2.rs +++ b/src/test/run-fail/unwind-rec2.rs @@ -1,10 +1,10 @@ // error-pattern:fail -fn build1() -> [int]/~ { - [0,0,0,0,0,0,0]/~ +fn build1() -> ~[int] { + ~[0,0,0,0,0,0,0] } -fn build2() -> [int]/~ { +fn build2() -> ~[int] { fail; } diff --git a/src/test/run-fail/unwind-tup.rs b/src/test/run-fail/unwind-tup.rs index 219e4d2a311..7b44eae2604 100644 --- a/src/test/run-fail/unwind-tup.rs +++ b/src/test/run-fail/unwind-tup.rs @@ -1,6 +1,6 @@ // error-pattern:fail -fn fold_local() -> @[int]/~{ +fn fold_local() -> @~[int]{ fail; } diff --git a/src/test/run-fail/unwind-tup2.rs b/src/test/run-fail/unwind-tup2.rs index cba38527da9..757b9911868 100644 --- a/src/test/run-fail/unwind-tup2.rs +++ b/src/test/run-fail/unwind-tup2.rs @@ -1,10 +1,10 @@ // error-pattern:fail -fn fold_local() -> @[int]/~{ - @[0,0,0,0,0,0]/~ +fn fold_local() -> @~[int]{ + @~[0,0,0,0,0,0] } -fn fold_remote() -> @[int]/~{ +fn fold_remote() -> @~[int]{ fail; } diff --git a/src/test/run-fail/vec-overrun.rs b/src/test/run-fail/vec-overrun.rs index f0d21ec7a73..8301a05f76b 100644 --- a/src/test/run-fail/vec-overrun.rs +++ b/src/test/run-fail/vec-overrun.rs @@ -2,7 +2,7 @@ // error-pattern:bounds check fn main() { - let v: [int]/~ = [10]/~; + let v: ~[int] = ~[10]; let x: int = 0; assert (v[x] == 10); // Bounds-check failure. diff --git a/src/test/run-fail/vec-underrun.rs b/src/test/run-fail/vec-underrun.rs index faeeed44603..1228e95ac1e 100644 --- a/src/test/run-fail/vec-underrun.rs +++ b/src/test/run-fail/vec-underrun.rs @@ -2,7 +2,7 @@ // error-pattern:bounds check fn main() { - let v: [int]/~ = [10, 20]/~; + let v: ~[int] = ~[10, 20]; let x: int = 0; assert (v[x] == 10); // Bounds-check failure. diff --git a/src/test/run-fail/zip-different-lengths.rs b/src/test/run-fail/zip-different-lengths.rs index f41c03e7e76..7eaa5bb5ec3 100644 --- a/src/test/run-fail/zip-different-lengths.rs +++ b/src/test/run-fail/zip-different-lengths.rs @@ -6,18 +6,18 @@ import uint; import u8; import vec::*; -fn enum_chars(start: u8, end: u8) -> [char]/~ { +fn enum_chars(start: u8, end: u8) -> ~[char] { assert start < end; let mut i = start; - let mut r = []/~; + let mut r = ~[]; while i <= end { vec::push(r, i as char); i += 1u as u8; } ret r; } -fn enum_uints(start: uint, end: uint) -> [uint]/~ { +fn enum_uints(start: uint, end: uint) -> ~[uint] { assert start < end; let mut i = start; - let mut r = []/~; + let mut r = ~[]; while i <= end { vec::push(r, i); i += 1u; } ret r; } diff --git a/src/test/run-pass-fulldeps/issue-1926.rs b/src/test/run-pass-fulldeps/issue-1926.rs index 856e916ac0c..6be105e52ca 100644 --- a/src/test/run-pass-fulldeps/issue-1926.rs +++ b/src/test/run-pass-fulldeps/issue-1926.rs @@ -39,7 +39,7 @@ impl of fake_ext_ctxt for fake_session { } fn mk_ctxt() -> fake_ext_ctxt { - let opts : fake_options = {cfg: []/~}; + let opts : fake_options = {cfg: ~[]}; {opts: @opts, parse_sess: new_parse_sess()} as fake_ext_ctxt } diff --git a/src/test/run-pass-fulldeps/qquote.rs b/src/test/run-pass-fulldeps/qquote.rs index d6da96d0393..ecd9ee45034 100644 --- a/src/test/run-pass-fulldeps/qquote.rs +++ b/src/test/run-pass-fulldeps/qquote.rs @@ -19,7 +19,7 @@ iface fake_ext_ctxt { type fake_session = (); impl of fake_ext_ctxt for fake_session { - fn cfg() -> ast::crate_cfg { []/~ } + fn cfg() -> ast::crate_cfg { ~[] } fn parse_sess() -> parse::parse_sess { parse::new_parse_sess(none) } } @@ -79,8 +79,8 @@ fn main() { // issue #1926 let s = #ast(expr){__s}; let e = #ast(expr){__e}; - let call = #ast(expr){$(s).foo {|__e| $(e)}}; - check_pp(call, pprust::print_expr, "__s.foo {|__e| __e }") + let call = #ast(expr){$(s).foo(|__e| $(e) )}; + check_pp(call, pprust::print_expr, "__s.foo(|__e| __e)") } fn check_pp<T>(expr: T, f: fn(pprust::ps, T), expect: str) { diff --git a/src/test/run-pass/alloca-from-derived-tydesc.rs b/src/test/run-pass/alloca-from-derived-tydesc.rs index df4fac9945f..4f6f215226e 100644 --- a/src/test/run-pass/alloca-from-derived-tydesc.rs +++ b/src/test/run-pass/alloca-from-derived-tydesc.rs @@ -1,7 +1,7 @@ enum option<T> { some(T), none, } -type r<T> = {mut v: [option<T>]/~}; +type r<T> = {mut v: ~[option<T>]}; -fn f<T>() -> [T]/~ { ret []/~; } +fn f<T>() -> ~[T] { ret ~[]; } -fn main() { let r: r<int> = {mut v: []/~}; r.v = f(); } +fn main() { let r: r<int> = {mut v: ~[]}; r.v = f(); } diff --git a/src/test/run-pass/alt-join.rs b/src/test/run-pass/alt-join.rs index ecc24877d2f..a7b6bd98ecd 100644 --- a/src/test/run-pass/alt-join.rs +++ b/src/test/run-pass/alt-join.rs @@ -4,12 +4,12 @@ import option; fn foo<T>(y: option<T>) { let mut x: int; - let mut rs: [int]/~ = []/~; + let mut rs: ~[int] = ~[]; /* tests that x doesn't get put in the precondition for the entire if expression */ if true { - } else { alt y { none::<T> { x = 17; } _ { x = 42; } } rs += [x]/~; } + } else { alt y { none::<T> { x = 17; } _ { x = 42; } } rs += ~[x]; } ret; } diff --git a/src/test/run-pass/alt-phi.rs b/src/test/run-pass/alt-phi.rs index 58b19eeeed2..5a99563e2af 100644 --- a/src/test/run-pass/alt-phi.rs +++ b/src/test/run-pass/alt-phi.rs @@ -7,7 +7,7 @@ fn foo(it: fn(int)) { it(10); } fn main() { let mut x = true; alt a { - a { x = true; foo {|_i|} } + a { x = true; foo(|_i| { } ) } b { x = false; } c { x = false; } } diff --git a/src/test/run-pass/argument-passing.rs b/src/test/run-pass/argument-passing.rs index bd8f56eec0b..4a349a827a4 100644 --- a/src/test/run-pass/argument-passing.rs +++ b/src/test/run-pass/argument-passing.rs @@ -12,6 +12,6 @@ fn main() { assert (f1(a, b, c) == 6); assert (a.x == 0); assert (b == 10); - assert (f2(a.x, {|x| a.x = 50; }) == 0); + assert (f2(a.x, |x| a.x = 50 ) == 0); assert (a.x == 50); } diff --git a/src/test/run-pass/argv.rs b/src/test/run-pass/argv.rs index 42f724f1861..fa2a05365e9 100644 --- a/src/test/run-pass/argv.rs +++ b/src/test/run-pass/argv.rs @@ -1,5 +1,5 @@ -fn main(args: [str]/~) { - let vs: [str]/~ = ["hi", "there", "this", "is", "a", "vec"]/~; - let vvs: [[str]/~]/~ = [args, vs]/~; - for vvs.each {|vs| for vs.each {|s| log(debug, s); } } +fn main(args: ~[str]) { + let vs: ~[str] = ~["hi", "there", "this", "is", "a", "vec"]; + let vvs: ~[~[str]] = ~[args, vs]; + for vvs.each |vs| { for vs.each |s| { log(debug, s); } } } diff --git a/src/test/run-pass/auto-loop.rs b/src/test/run-pass/auto-loop.rs index 7a795f7cdfe..f33f0b3bc8f 100644 --- a/src/test/run-pass/auto-loop.rs +++ b/src/test/run-pass/auto-loop.rs @@ -1,5 +1,5 @@ fn main() { let mut sum = 0; - for vec::each([1, 2, 3, 4, 5]/~) {|x| sum += x; } + for vec::each(~[1, 2, 3, 4, 5]) |x| { sum += x; } assert (sum == 15); } diff --git a/src/test/run-pass/auto_serialize.rs b/src/test/run-pass/auto_serialize.rs index 6a3d54b5e35..5e9c22d0509 100644 --- a/src/test/run-pass/auto_serialize.rs +++ b/src/test/run-pass/auto_serialize.rs @@ -18,7 +18,7 @@ fn test_ser_and_deser<A>(a1: A, // check the pretty printer: io_ser_fn(io::stdout(), a1); - let s = io::with_str_writer {|w| io_ser_fn(w, a1) }; + let s = io::with_str_writer(|w| io_ser_fn(w, a1) ); #debug["s == %?", s]; assert s == expected; @@ -58,7 +58,7 @@ type some_rec = {v: uint_vec}; enum an_enum = some_rec; #[auto_serialize] -type uint_vec = [uint]/~; +type uint_vec = ~[uint]; #[auto_serialize] type point = {x: uint, y: uint}; @@ -91,7 +91,7 @@ fn main() { deserialize_spanned_uint, serialize_spanned_uint); - test_ser_and_deser(an_enum({v: [1u, 2u, 3u]/~}), + test_ser_and_deser(an_enum({v: ~[1u, 2u, 3u]}), "an_enum({v: [1u, 2u, 3u]})", serialize_an_enum, deserialize_an_enum, @@ -103,7 +103,7 @@ fn main() { deserialize_point, serialize_point); - test_ser_and_deser([1u, 2u, 3u]/~, + test_ser_and_deser(~[1u, 2u, 3u], "[1u, 2u, 3u]", serialize_uint_vec, deserialize_uint_vec, diff --git a/src/test/run-pass/autobind.rs b/src/test/run-pass/autobind.rs index 24d80c23150..1a50b3ba3e6 100644 --- a/src/test/run-pass/autobind.rs +++ b/src/test/run-pass/autobind.rs @@ -1,9 +1,9 @@ -fn f<T: copy>(x: [T]) -> T { ret x[0]; } +fn f<T: copy>(x: ~[T]) -> T { ret x[0]; } -fn g(act: fn([int]/~) -> int) -> int { ret act([1, 2, 3]/~); } +fn g(act: fn(~[int]) -> int) -> int { ret act(~[1, 2, 3]); } fn main() { assert (g(f) == 1); - let f1: fn([str]/~) -> str = f; - assert (f1(["x", "y", "z"]/~) == "x"); + let f1: fn(~[str]) -> str = f; + assert (f1(~["x", "y", "z"]) == "x"); } diff --git a/src/test/run-pass/basic-1.rs b/src/test/run-pass/basic-1.rs index 55a5ac7314c..4b912938e4a 100644 --- a/src/test/run-pass/basic-1.rs +++ b/src/test/run-pass/basic-1.rs @@ -12,8 +12,8 @@ fn a(c: chan<int>) { send(c, 10); } fn main() { let p = port(); let ch = chan(p); - task::spawn {|| a(ch); }; - task::spawn {|| a(ch); }; + task::spawn(|| a(ch) ); + task::spawn(|| a(ch) ); let mut n: int = 0; n = recv(p); n = recv(p); diff --git a/src/test/run-pass/basic-2.rs b/src/test/run-pass/basic-2.rs index e45ff2c4c71..039caef3d99 100644 --- a/src/test/run-pass/basic-2.rs +++ b/src/test/run-pass/basic-2.rs @@ -13,8 +13,8 @@ fn a(c: chan<int>) { #debug("task a0"); #debug("task a1"); send(c, 10); } fn main() { let p = port(); let ch = chan(p); - task::spawn {|| a(ch); }; - task::spawn {|| b(ch); }; + task::spawn(|| a(ch) ); + task::spawn(|| b(ch) ); let mut n: int = 0; n = recv(p); n = recv(p); diff --git a/src/test/run-pass/basic.rs b/src/test/run-pass/basic.rs index 9a53eb358e9..9c2b4311b90 100644 --- a/src/test/run-pass/basic.rs +++ b/src/test/run-pass/basic.rs @@ -32,8 +32,8 @@ fn main() { let s: str = "hello there"; let p = comm::port(); let ch = comm::chan(p); - task::spawn {|| a(ch); }; - task::spawn {|| b(ch); }; + task::spawn(|| a(ch) ); + task::spawn(|| b(ch) ); let mut x: int = 10; x = g(n, s); log(debug, x); diff --git a/src/test/run-pass/block-arg-call-as.rs b/src/test/run-pass/block-arg-call-as.rs index bf65441e7fa..94c55ba33a1 100644 --- a/src/test/run-pass/block-arg-call-as.rs +++ b/src/test/run-pass/block-arg-call-as.rs @@ -17,10 +17,10 @@ fn asAny( f : fn()->uint ) -> uint { } fn main() { - let x = asSendfn({|| 22u}); + let x = asSendfn(|| 22u); assert(x == 22u); - let x = asLambda({|| 22u}); + let x = asLambda(|| 22u); assert(x == 22u); - let x = asBlock({|| 22u}); + let x = asBlock(|| 22u); assert(x == 22u); } diff --git a/src/test/run-pass/block-arg-can-be-followed-by-binop.rs b/src/test/run-pass/block-arg-can-be-followed-by-binop.rs index 1b00df3b750..53f158471e8 100644 --- a/src/test/run-pass/block-arg-can-be-followed-by-binop.rs +++ b/src/test/run-pass/block-arg-can-be-followed-by-binop.rs @@ -1,8 +1,8 @@ fn main() { - let v = [-1f, 0f, 1f, 2f, 3f]/~; + let v = ~[-1f, 0f, 1f, 2f, 3f]; - // Trailing expressions require parentheses: - let y = vec::foldl(0f, v) { |x, y| x + y } + 10f; + // Trailing expressions don't require parentheses: + let y = do vec::foldl(0f, v) |x, y| { x + y } + 10f; assert y == 15f; } diff --git a/src/test/run-pass/block-arg-can-be-followed-by-block-arg.rs b/src/test/run-pass/block-arg-can-be-followed-by-block-arg.rs index 57ced911004..f2d7c5a7cce 100644 --- a/src/test/run-pass/block-arg-can-be-followed-by-block-arg.rs +++ b/src/test/run-pass/block-arg-can-be-followed-by-block-arg.rs @@ -1,6 +1,6 @@ fn main() { fn f(i: fn() -> uint) -> uint { i() } - let v = [-1f, 0f, 1f, 2f, 3f]/~; - let z = vec::foldl(f, v) { |x, _y| x } { || 22u }; + let v = ~[-1f, 0f, 1f, 2f, 3f]; + let z = do do vec::foldl(f, v) |x, _y| { x } || { 22u }; assert z == 22u; } diff --git a/src/test/run-pass/block-arg-can-be-followed-by-call.rs b/src/test/run-pass/block-arg-can-be-followed-by-call.rs index 31bb412de3b..c3bb0cb77a7 100644 --- a/src/test/run-pass/block-arg-can-be-followed-by-call.rs +++ b/src/test/run-pass/block-arg-can-be-followed-by-call.rs @@ -1,6 +1,6 @@ fn main() { fn f(i: uint) -> uint { i } - let v = [-1f, 0f, 1f, 2f, 3f]/~; - let z = vec::foldl(f, v) { |x, _y| x } (22u); + let v = ~[-1f, 0f, 1f, 2f, 3f]; + let z = do vec::foldl(f, v) |x, _y| { x } (22u); assert z == 22u; } diff --git a/src/test/run-pass/block-arg-in-parentheses.rs b/src/test/run-pass/block-arg-in-parentheses.rs index 9d51c993898..4d479418877 100644 --- a/src/test/run-pass/block-arg-in-parentheses.rs +++ b/src/test/run-pass/block-arg-in-parentheses.rs @@ -1,26 +1,26 @@ -fn w_semi(v: [int]/~) -> int { +fn w_semi(v: ~[int]) -> int { // the semicolon causes compiler not to // complain about the ignored return value: - vec::foldl(0, v) {|x,y| x+y}; + do vec::foldl(0, v) |x,y| { x+y }; -10 } -fn w_paren1(v: [int]/~) -> int { - (vec::foldl(0, v) {|x,y| x+y}) - 10 +fn w_paren1(v: ~[int]) -> int { + (do vec::foldl(0, v) |x,y| { x+y }) - 10 } -fn w_paren2(v: [int]/~) -> int { - (vec::foldl(0, v) {|x,y| x+y} - 10) +fn w_paren2(v: ~[int]) -> int { + (do vec::foldl(0, v) |x,y| { x+y} - 10) } -fn w_ret(v: [int]/~) -> int { - ret vec::foldl(0, v) {|x,y| x+y} - 10; +fn w_ret(v: ~[int]) -> int { + ret do vec::foldl(0, v) |x,y| { x+y } - 10; } fn main() { - assert w_semi([0, 1, 2, 3]/~) == -10; - assert w_paren1([0, 1, 2, 3]/~) == -4; - assert w_paren2([0, 1, 2, 3]/~) == -4; - assert w_ret([0, 1, 2, 3]/~) == -4; + assert w_semi(~[0, 1, 2, 3]) == -10; + assert w_paren1(~[0, 1, 2, 3]) == -4; + assert w_paren2(~[0, 1, 2, 3]) == -4; + assert w_ret(~[0, 1, 2, 3]) == -4; } diff --git a/src/test/run-pass/block-arg-used-as-any.rs b/src/test/run-pass/block-arg-used-as-any.rs index a13e03d94ce..8a2d77c4d96 100644 --- a/src/test/run-pass/block-arg-used-as-any.rs +++ b/src/test/run-pass/block-arg-used-as-any.rs @@ -3,6 +3,6 @@ fn call_any(f: fn() -> uint) -> uint { } fn main() { - let x_r = call_any {|| 22u }; + let x_r = do call_any || { 22u }; assert x_r == 22u; } diff --git a/src/test/run-pass/block-arg-used-as-lambda.rs b/src/test/run-pass/block-arg-used-as-lambda.rs index 18e8513b090..46f3276a4e5 100644 --- a/src/test/run-pass/block-arg-used-as-lambda.rs +++ b/src/test/run-pass/block-arg-used-as-lambda.rs @@ -3,7 +3,7 @@ fn to_lambda(f: fn@(uint) -> uint) -> fn@(uint) -> uint { } fn main() { - let x: fn@(uint) -> uint = to_lambda({ |x| x * 2u }); + let x: fn@(uint) -> uint = to_lambda(|x| x * 2u ); let y = to_lambda(x); let x_r = x(22u); diff --git a/src/test/run-pass/block-arg.rs b/src/test/run-pass/block-arg.rs index f33d12f4361..2bcdcfccdfd 100644 --- a/src/test/run-pass/block-arg.rs +++ b/src/test/run-pass/block-arg.rs @@ -1,35 +1,35 @@ // Check usage and precedence of block arguments in expressions: fn main() { - let v = [-1f, 0f, 1f, 2f, 3f]/~; + let v = ~[-1f, 0f, 1f, 2f, 3f]; // Statement form does not require parentheses: - vec::iter(v) { |i| + do vec::iter(v) |i| { log(info, i); } // Usable at all: - let mut any_negative = vec::any(v) { |e| float::is_negative(e) }; + let mut any_negative = do vec::any(v) |e| { float::is_negative(e) }; assert any_negative; // Higher precedence than assignments: - any_negative = vec::any(v) { |e| float::is_negative(e) }; + any_negative = do vec::any(v) |e| { float::is_negative(e) }; assert any_negative; // Higher precedence than unary operations: - let abs_v = vec::map(v) { |e| float::abs(e) }; - assert vec::all(abs_v) { |e| float::is_nonnegative(e) }; - assert !vec::any(abs_v) { |e| float::is_negative(e) }; + let abs_v = do vec::map(v) |e| { float::abs(e) }; + assert do vec::all(abs_v) |e| { float::is_nonnegative(e) }; + assert !do vec::any(abs_v) |e| { float::is_negative(e) }; // Usable in funny statement-like forms: - if !vec::any(v) { |e| float::is_positive(e) } { + if !do vec::any(v) |e| { float::is_positive(e) } { assert false; } - alt vec::all(v) { |e| float::is_negative(e) } { + alt do vec::all(v) |e| { float::is_negative(e) } { true { fail "incorrect answer."; } false { } } alt 3 { - _ if vec::any(v) { |e| float::is_negative(e) } { + _ if do vec::any(v) |e| { float::is_negative(e) } { } _ { fail "wrong answer."; @@ -38,15 +38,15 @@ fn main() { // Lower precedence than binary operations: - let w = vec::foldl(0f, v, { |x, y| x + y }) + 10f; - let y = vec::foldl(0f, v) { |x, y| x + y } + 10f; - let z = 10f + vec::foldl(0f, v) { |x, y| x + y }; + let w = do vec::foldl(0f, v) |x, y| { x + y } + 10f; + let y = do vec::foldl(0f, v) |x, y| { x + y } + 10f; + let z = 10f + do vec::foldl(0f, v) |x, y| { x + y }; assert w == y; assert y == z; - // They are not allowed as the tail of a block without parentheses: + // In the tail of a block let w = - if true { vec::any(abs_v, { |e| float::is_nonnegative(e) }) } + if true { do vec::any(abs_v) |e| { float::is_nonnegative(e) } } else { false }; assert w; } diff --git a/src/test/run-pass/block-explicit-types.rs b/src/test/run-pass/block-explicit-types.rs index 230df12e621..ca6fe9b659d 100644 --- a/src/test/run-pass/block-explicit-types.rs +++ b/src/test/run-pass/block-explicit-types.rs @@ -1,4 +1,4 @@ fn main() { fn as_buf<T>(s: str, f: fn(str) -> T) -> T { f(s) } - as_buf("foo", {|foo: str| -> () log(error, foo);}); + as_buf("foo", |foo: str| -> () log(error, foo) ); } diff --git a/src/test/run-pass/block-iter-1.rs b/src/test/run-pass/block-iter-1.rs index 12b7372227a..e9d548dd6ef 100644 --- a/src/test/run-pass/block-iter-1.rs +++ b/src/test/run-pass/block-iter-1.rs @@ -1,9 +1,9 @@ -fn iter_vec<T>(v: [T]/~, f: fn(T)) { for v.each {|x| f(x); } } +fn iter_vec<T>(v: ~[T], f: fn(T)) { for v.each |x| { f(x); } } fn main() { - let v = [1, 2, 3, 4, 5, 6, 7]/~; + let v = ~[1, 2, 3, 4, 5, 6, 7]; let mut odds = 0; - iter_vec(v, {|i| + iter_vec(v, |i| { log(error, i); if i % 2 == 1 { odds += 1; diff --git a/src/test/run-pass/block-iter-2.rs b/src/test/run-pass/block-iter-2.rs index ac96dbed60f..125e003a3f5 100644 --- a/src/test/run-pass/block-iter-2.rs +++ b/src/test/run-pass/block-iter-2.rs @@ -1,10 +1,10 @@ -fn iter_vec<T>(v: [T]/~, f: fn(T)) { for v.each {|x| f(x); } } +fn iter_vec<T>(v: ~[T], f: fn(T)) { for v.each |x| { f(x); } } fn main() { - let v = [1, 2, 3, 4, 5]/~; + let v = ~[1, 2, 3, 4, 5]; let mut sum = 0; - iter_vec(v, {|i| - iter_vec(v, {|j| + iter_vec(v, |i| { + iter_vec(v, |j| { log(error, i * j); sum += i * j; }); diff --git a/src/test/run-pass/block-vec-map2.rs b/src/test/run-pass/block-vec-map2.rs index 087268010c8..4c98bb840b4 100644 --- a/src/test/run-pass/block-vec-map2.rs +++ b/src/test/run-pass/block-vec-map2.rs @@ -3,9 +3,9 @@ import vec; fn main() { let v = - vec::map2([1, 2, 3, 4, 5]/~, - [true, false, false, true, true]/~, - {|i, b| if b { -i } else { i } }); + vec::map2(~[1, 2, 3, 4, 5], + ~[true, false, false, true, true], + |i, b| if b { -i } else { i } ); log(error, v); - assert (v == [-1, 2, 3, -4, -5]/~); + assert (v == ~[-1, 2, 3, -4, -5]); } diff --git a/src/test/run-pass/borrowck-borrow-from-expr-block.rs b/src/test/run-pass/borrowck-borrow-from-expr-block.rs index 08e102af4e8..723d0079617 100644 --- a/src/test/run-pass/borrowck-borrow-from-expr-block.rs +++ b/src/test/run-pass/borrowck-borrow-from-expr-block.rs @@ -4,7 +4,7 @@ fn borrow(x: &int, f: fn(x: &int)) { fn test1(x: @~int) { // Right now, at least, this induces a copy of the unique pointer: - borrow({*x}) { |p| + do borrow({*x}) |p| { let x_a = ptr::addr_of(**x); assert (x_a as uint) != (p as uint); assert unsafe{*x_a} == *p; diff --git a/src/test/run-pass/borrowck-mut-vec-as-imm-slice.rs b/src/test/run-pass/borrowck-mut-vec-as-imm-slice.rs index 1795d3b5aed..e5ea87b9996 100644 --- a/src/test/run-pass/borrowck-mut-vec-as-imm-slice.rs +++ b/src/test/run-pass/borrowck-mut-vec-as-imm-slice.rs @@ -1,13 +1,13 @@ -fn want_slice(v: [int]/&) -> int { +fn want_slice(v: &[int]) -> int { let mut sum = 0; - for vec::each(v) { |i| sum += i; } + for vec::each(v) |i| { sum += i; } ret sum; } -fn has_mut_vec(+v: [mut int]/~) -> int { +fn has_mut_vec(+v: ~[mut int]) -> int { want_slice(v) } fn main() { - assert has_mut_vec([mut 1, 2, 3]/~) == 6; + assert has_mut_vec(~[mut 1, 2, 3]) == 6; } \ No newline at end of file diff --git a/src/test/run-pass/borrowck-preserve-box-in-field.rs b/src/test/run-pass/borrowck-preserve-box-in-field.rs index 09353e3d39f..9320d91e4a0 100644 --- a/src/test/run-pass/borrowck-preserve-box-in-field.rs +++ b/src/test/run-pass/borrowck-preserve-box-in-field.rs @@ -9,7 +9,7 @@ fn borrow(x: &int, f: fn(x: &int)) { fn main() { let mut x = @{f: ~3}; - borrow(x.f) {|b_x| + do borrow(x.f) |b_x| { assert *b_x == 3; assert ptr::addr_of(*x.f) == ptr::addr_of(*b_x); x = @{f: ~4}; diff --git a/src/test/run-pass/borrowck-preserve-box-in-uniq.rs b/src/test/run-pass/borrowck-preserve-box-in-uniq.rs index 475e5eaea8c..abe1faeed59 100644 --- a/src/test/run-pass/borrowck-preserve-box-in-uniq.rs +++ b/src/test/run-pass/borrowck-preserve-box-in-uniq.rs @@ -9,7 +9,7 @@ fn borrow(x: &int, f: fn(x: &int)) { fn main() { let mut x = ~mut @{f: ~3}; - borrow(x.f) {|b_x| + do borrow(x.f) |b_x| { assert *b_x == 3; assert ptr::addr_of(*x.f) == ptr::addr_of(*b_x); *x = @{f: ~4}; diff --git a/src/test/run-pass/borrowck-preserve-box.rs b/src/test/run-pass/borrowck-preserve-box.rs index d4d2f5a2055..68c7c768618 100644 --- a/src/test/run-pass/borrowck-preserve-box.rs +++ b/src/test/run-pass/borrowck-preserve-box.rs @@ -9,7 +9,7 @@ fn borrow(x: &int, f: fn(x: &int)) { fn main() { let mut x = @3; - borrow(x) {|b_x| + do borrow(x) |b_x| { assert *b_x == 3; assert ptr::addr_of(*x) == ptr::addr_of(*b_x); x = @22; diff --git a/src/test/run-pass/borrowck-preserve-expl-deref.rs b/src/test/run-pass/borrowck-preserve-expl-deref.rs index 0778b895137..f04c81527c2 100644 --- a/src/test/run-pass/borrowck-preserve-expl-deref.rs +++ b/src/test/run-pass/borrowck-preserve-expl-deref.rs @@ -9,7 +9,7 @@ fn borrow(x: &int, f: fn(x: &int)) { fn main() { let mut x = @{f: ~3}; - borrow((*x).f) {|b_x| + do borrow((*x).f) |b_x| { assert *b_x == 3; assert ptr::addr_of(*x.f) == ptr::addr_of(*b_x); x = @{f: ~4}; diff --git a/src/test/run-pass/break.rs b/src/test/run-pass/break.rs index 008410c22e5..d8c6fb29c3b 100644 --- a/src/test/run-pass/break.rs +++ b/src/test/run-pass/break.rs @@ -6,7 +6,7 @@ fn main() { assert (i == 10); loop { i += 1; if i == 20 { break; } } assert (i == 20); - for vec::each([1, 2, 3, 4, 5, 6]/~) {|x| + for vec::each(~[1, 2, 3, 4, 5, 6]) |x| { if x == 3 { break; } assert (x <= 3); } i = 0; @@ -16,7 +16,7 @@ fn main() { i += 1; if i % 2 == 0 { cont; } assert (i % 2 != 0); if i >= 10 { break; } } - for vec::each([1, 2, 3, 4, 5, 6]/~) {|x| + for vec::each(~[1, 2, 3, 4, 5, 6]) |x| { if x % 2 == 0 { cont; } assert (x % 2 != 0); } diff --git a/src/test/run-pass/cap-clause-move.rs b/src/test/run-pass/cap-clause-move.rs index c2e0f4b58dd..260c1d45b76 100644 --- a/src/test/run-pass/cap-clause-move.rs +++ b/src/test/run-pass/cap-clause-move.rs @@ -8,8 +8,8 @@ fn main() { let x = ~2; let y = ptr::addr_of(*x) as uint; - let lam_copy: fn@() -> uint = { |copy x| ptr::addr_of(*x) as uint }; - let lam_move: fn@() -> uint = { |move x| ptr::addr_of(*x) as uint }; + let lam_copy: fn@() -> uint = |copy x| ptr::addr_of(*x) as uint; + let lam_move: fn@() -> uint = |move x| ptr::addr_of(*x) as uint; assert lam_copy() != y; assert lam_move() == y; @@ -22,8 +22,8 @@ fn main() { let x = ~4; let y = ptr::addr_of(*x) as uint; - let lam_copy: fn~() -> uint = { |copy x| ptr::addr_of(*x) as uint }; - let lam_move: fn~() -> uint = { |move x| ptr::addr_of(*x) as uint }; + let lam_copy: fn~() -> uint = |copy x| ptr::addr_of(*x) as uint; + let lam_move: fn~() -> uint = |move x| ptr::addr_of(*x) as uint; assert lam_copy() != y; assert lam_move() == y; } diff --git a/src/test/run-pass/cci_impl_exe.rs b/src/test/run-pass/cci_impl_exe.rs index c5811d775c0..d69982b58de 100644 --- a/src/test/run-pass/cci_impl_exe.rs +++ b/src/test/run-pass/cci_impl_exe.rs @@ -8,7 +8,7 @@ fn main() { //let bt0 = sys::frame_address(); //#debug["%?", bt0]; - 3u.to(10u) {|i| + do 3u.to(10u) |i| { io::print(#fmt["%u\n", i]); //let bt1 = sys::frame_address(); diff --git a/src/test/run-pass/cci_iter_exe.rs b/src/test/run-pass/cci_iter_exe.rs index 4535e0a8a78..9e8986bd5ba 100644 --- a/src/test/run-pass/cci_iter_exe.rs +++ b/src/test/run-pass/cci_iter_exe.rs @@ -6,7 +6,7 @@ use cci_iter_lib; fn main() { //let bt0 = sys::rusti::frame_address(1u32); //#debug["%?", bt0]; - cci_iter_lib::iter([1, 2, 3]/~) {|i| + do cci_iter_lib::iter(~[1, 2, 3]) |i| { io::print(#fmt["%d", i]); //assert bt0 == sys::rusti::frame_address(2u32); } diff --git a/src/test/run-pass/cci_no_inline_exe.rs b/src/test/run-pass/cci_no_inline_exe.rs index 1336cd77c6b..070cad2dfb4 100644 --- a/src/test/run-pass/cci_no_inline_exe.rs +++ b/src/test/run-pass/cci_no_inline_exe.rs @@ -12,7 +12,7 @@ fn main() { // actually working. //let bt0 = sys::frame_address(); //#debug["%?", bt0]; - iter([1u, 2u, 3u]/~) {|i| + do iter(~[1u, 2u, 3u]) |i| { io::print(#fmt["%u\n", i]); //let bt1 = sys::frame_address(); diff --git a/src/test/run-pass/chan-leak.rs b/src/test/run-pass/chan-leak.rs index 152f74d683d..ffd115c5b4e 100644 --- a/src/test/run-pass/chan-leak.rs +++ b/src/test/run-pass/chan-leak.rs @@ -24,7 +24,7 @@ fn request_task(c: chan<ctx>) { fn new_cx() -> ctx { let p = port(); let ch = chan(p); - let t = task::spawn {|| request_task(ch); }; + let t = task::spawn(|| request_task(ch) ); let mut cx: ctx; cx = recv(p); ret cx; diff --git a/src/test/run-pass/child-outlives-parent.rs b/src/test/run-pass/child-outlives-parent.rs index 4eabfa478a7..1c3ad872477 100644 --- a/src/test/run-pass/child-outlives-parent.rs +++ b/src/test/run-pass/child-outlives-parent.rs @@ -5,4 +5,4 @@ import task; fn child2(&&s: str) { } -fn main() { let x = task::spawn {|| child2("hi"); }; } +fn main() { let x = task::spawn(|| child2("hi") ); } diff --git a/src/test/run-pass/class-cast-to-iface-multiple-types.rs b/src/test/run-pass/class-cast-to-iface-multiple-types.rs index e2dc7b732a8..89f5e656a68 100644 --- a/src/test/run-pass/class-cast-to-iface-multiple-types.rs +++ b/src/test/run-pass/class-cast-to-iface-multiple-types.rs @@ -51,7 +51,7 @@ class cat : noisy { } fn annoy_neighbors<T: noisy>(critter: T) { - for uint::range(0u, 10u) {|i| critter.speak(); } + for uint::range(0u, 10u) |i| { critter.speak(); } } fn main() { diff --git a/src/test/run-pass/class-iface-bounded-param.rs b/src/test/run-pass/class-iface-bounded-param.rs index e6af2bd60bd..8f7c4113268 100644 --- a/src/test/run-pass/class-iface-bounded-param.rs +++ b/src/test/run-pass/class-iface-bounded-param.rs @@ -10,7 +10,7 @@ class keys<K: copy, V: copy, M: copy map<K,V>> self.map = map; } - fn each(blk: fn(K) -> bool) { self.map.each { |k, _v| blk(k)} } + fn each(blk: fn(K) -> bool) { self.map.each(|k, _v| blk(k) ) } fn size_hint() -> option<uint> { some(self.map.size()) } fn eachi(blk: fn(uint, K) -> bool) { iter::eachi(self, blk) } } @@ -19,5 +19,5 @@ fn main() { let m = int_hash(); m.insert(1, 2); m.insert(3, 4); - assert iter::to_vec(keys(m)) == [1, 3]/~; + assert iter::to_vec(keys(m)) == ~[1, 3]; } diff --git a/src/test/run-pass/class-impl-parameterized-iface.rs b/src/test/run-pass/class-impl-parameterized-iface.rs index 224907ff64d..127a743427c 100644 --- a/src/test/run-pass/class-impl-parameterized-iface.rs +++ b/src/test/run-pass/class-impl-parameterized-iface.rs @@ -53,16 +53,16 @@ class cat : map<int, bool> { } } fn each_key(&&f: fn(&&int) -> bool) { - for self.each {|k, _v| if !f(k) { break; } cont;}; + for self.each |k, _v| { if !f(k) { break; } cont;}; } fn each_value(&&f: fn(&&bool) -> bool) { - for self.each {|_k, v| if !f(v) { break; } cont;}; + for self.each |_k, v| { if !f(v) { break; } cont;}; } } fn main() { let nyan : cat = cat(0, 2, "nyan"); - for uint::range(1u, 5u) {|_i| nyan.speak(); } + for uint::range(1u, 5u) |_i| { nyan.speak(); } // cat returns true if uint input is greater than // the number of meows so far assert(nyan.get(1)); diff --git a/src/test/run-pass/class-impl-very-parameterized-iface.rs b/src/test/run-pass/class-impl-very-parameterized-iface.rs index b0cf6e64ded..3ebd9071993 100644 --- a/src/test/run-pass/class-impl-very-parameterized-iface.rs +++ b/src/test/run-pass/class-impl-very-parameterized-iface.rs @@ -77,21 +77,21 @@ class cat<T: copy> : map<int, T> { } fn each_key(&&f: fn(&&int) -> bool) { - for self.each {|k, _v| if !f(k) { break; } cont;}; + for self.each |k, _v| { if !f(k) { break; } cont;}; } fn each_value(&&f: fn(&&T) -> bool) { - for self.each {|_k, v| if !f(v) { break; } cont;}; + for self.each |_k, v| { if !f(v) { break; } cont;}; } } fn main() { let nyan : cat<str> = cat(0, 2, "nyan"); - for uint::range(1u, 5u) {|_i| nyan.speak(); } + for uint::range(1u, 5u) |_i| { nyan.speak(); } assert(nyan.find(1) == some("nyan")); assert(nyan.find(10) == none); let spotty : cat<cat_type> = cat(2, 57, tuxedo); - for uint::range(0u, 6u) {|_i| spotty.speak(); } + for uint::range(0u, 6u) |_i| { spotty.speak(); } assert(spotty.size() == 8u); assert(spotty.contains_key(2)); assert(spotty.get(3) == tuxedo); diff --git a/src/test/run-pass/class-implement-iface-cross-crate.rs b/src/test/run-pass/class-implement-iface-cross-crate.rs index 5add42fb263..5a778217b5f 100644 --- a/src/test/run-pass/class-implement-iface-cross-crate.rs +++ b/src/test/run-pass/class-implement-iface-cross-crate.rs @@ -40,6 +40,6 @@ fn main() { let nyan = cat(0u, 2, "nyan"); nyan.eat(); assert(!nyan.eat()); - for uint::range(1u, 10u) {|_i| nyan.speak(); }; + for uint::range(1u, 10u) |_i| { nyan.speak(); }; assert(nyan.eat()); } \ No newline at end of file diff --git a/src/test/run-pass/class-implement-ifaces.rs b/src/test/run-pass/class-implement-ifaces.rs index f772ef957d7..1f592323488 100644 --- a/src/test/run-pass/class-implement-ifaces.rs +++ b/src/test/run-pass/class-implement-ifaces.rs @@ -43,6 +43,6 @@ fn main() { let nyan = cat(0u, 2, "nyan"); nyan.eat(); assert(!nyan.eat()); - for uint::range(1u, 10u) {|_i| make_speak(nyan); }; + for uint::range(1u, 10u) |_i| { make_speak(nyan); }; assert(nyan.eat()); } \ No newline at end of file diff --git a/src/test/run-pass/class-implements-multiple-ifaces.rs b/src/test/run-pass/class-implements-multiple-ifaces.rs index 3e1a9895665..d7e6f4f28f0 100644 --- a/src/test/run-pass/class-implements-multiple-ifaces.rs +++ b/src/test/run-pass/class-implements-multiple-ifaces.rs @@ -18,8 +18,8 @@ iface bitey { fn bite() -> body_part; } -fn vec_includes<T>(xs: [T]/~, x: T) -> bool { - for each(xs) {|y| if y == x { ret true; }} +fn vec_includes<T>(xs: ~[T], x: T) -> bool { + for each(xs) |y| { if y == x { ret true; }} ret false; } @@ -53,7 +53,7 @@ class cat : noisy, scratchy, bitey { let t : hashmap<body_part, uint> = hashmap::<body_part, uint>(hsher, eqer); self.bite_counts = t; - iter([finger, toe, nose, ear]/~) {|p| + do iter(~[finger, toe, nose, ear]) |p| { self.bite_counts.insert(p, 0u); }; } @@ -61,19 +61,19 @@ class cat : noisy, scratchy, bitey { fn speak() -> int { self.meow() as int } fn meow_count() -> uint { *self.meows } fn scratch() -> option<furniture> { - let all = [chair, couch, bed]/~; + let all = ~[chair, couch, bed]; log(error, self.scratched); let mut rslt = none; - for each(all) {|thing| if !self.scratched.contains(thing) { + for each(all) |thing| { if !self.scratched.contains(thing) { self.scratched.push(thing); ret some(thing); }} rslt } fn bite() -> body_part { #error("In bite()"); - let all = [toe, nose, ear]/~; + let all = ~[toe, nose, ear]; let mut min = finger; - iter(all) {|next| + do iter(all) |next| { #debug("min = %?", min); if self.bite_counts.get(next) < self.bite_counts.get(min) { min = next; @@ -85,19 +85,19 @@ class cat : noisy, scratchy, bitey { } fn annoy_neighbors<T: noisy>(critter: T) { - for uint::range(0u, 10u) {|i| + for uint::range(0u, 10u) |i| { let what = critter.speak(); #debug("%u %d", i, what); } } fn bite_everything<T: bitey>(critter: T) -> bool { - let mut left : [body_part]/~ = [finger, toe, nose, ear]/~; + let mut left : ~[body_part] = ~[finger, toe, nose, ear]; while vec::len(left) > 0u { let part = critter.bite(); #debug("%? %?", left, part); if vec_includes(left, part) { - left = vec::filter(left, {|p| p != part}); + left = vec::filter(left, |p| p != part ); } else { ret false; diff --git a/src/test/run-pass/class-poly-methods-cross-crate.rs b/src/test/run-pass/class-poly-methods-cross-crate.rs index 9bdfb8af1d9..1703ad035c8 100644 --- a/src/test/run-pass/class-poly-methods-cross-crate.rs +++ b/src/test/run-pass/class-poly-methods-cross-crate.rs @@ -4,12 +4,12 @@ use cci_class_6; import cci_class_6::kitties::*; fn main() { - let nyan : cat<char> = cat::<char>(52u, 99, ['p']/~); - let kitty = cat(1000u, 2, ["tabby"]/~); + let nyan : cat<char> = cat::<char>(52u, 99, ~['p']); + let kitty = cat(1000u, 2, ~["tabby"]); assert(nyan.how_hungry == 99); assert(kitty.how_hungry == 2); - nyan.speak([1u,2u,3u]/~); + nyan.speak(~[1u,2u,3u]); assert(nyan.meow_count() == 55u); - kitty.speak(["meow", "mew", "purr", "chirp"]/~); + kitty.speak(~["meow", "mew", "purr", "chirp"]); assert(kitty.meow_count() == 1004u); } diff --git a/src/test/run-pass/class-poly-methods.rs b/src/test/run-pass/class-poly-methods.rs index 9cd9224be93..d40e2441b69 100644 --- a/src/test/run-pass/class-poly-methods.rs +++ b/src/test/run-pass/class-poly-methods.rs @@ -1,28 +1,28 @@ class cat<U> { priv { - let mut info : [U]/~; + let mut info : ~[U]; let mut meows : uint; } let how_hungry : int; - new(in_x : uint, in_y : int, -in_info: [U]/~) + new(in_x : uint, in_y : int, -in_info: ~[U]) { self.meows = in_x; self.how_hungry = in_y; self.info <- in_info; } - fn speak<T>(stuff: [T]/~) { + fn speak<T>(stuff: ~[T]) { self.meows += stuff.len(); } fn meow_count() -> uint { self.meows } } fn main() { - let nyan : cat<int> = cat::<int>(52u, 99, [9]/~); - let kitty = cat(1000u, 2, ["tabby"]/~); + let nyan : cat<int> = cat::<int>(52u, 99, ~[9]); + let kitty = cat(1000u, 2, ~["tabby"]); assert(nyan.how_hungry == 99); assert(kitty.how_hungry == 2); - nyan.speak([1,2,3]/~); + nyan.speak(~[1,2,3]); assert(nyan.meow_count() == 55u); - kitty.speak(["meow", "mew", "purr", "chirp"]/~); + kitty.speak(~["meow", "mew", "purr", "chirp"]); assert(kitty.meow_count() == 1004u); } diff --git a/src/test/run-pass/classes-cross-crate.rs b/src/test/run-pass/classes-cross-crate.rs index 90407e77678..5e454368e47 100644 --- a/src/test/run-pass/classes-cross-crate.rs +++ b/src/test/run-pass/classes-cross-crate.rs @@ -7,6 +7,6 @@ fn main() { let nyan = cat(0u, 2, "nyan"); nyan.eat(); assert(!nyan.eat()); - for uint::range(1u, 10u) {|_i| nyan.speak(); }; + for uint::range(1u, 10u) |_i| { nyan.speak(); }; assert(nyan.eat()); } \ No newline at end of file diff --git a/src/test/run-pass/classes.rs b/src/test/run-pass/classes.rs index a8eaca96ac5..d7aaf6e3b04 100644 --- a/src/test/run-pass/classes.rs +++ b/src/test/run-pass/classes.rs @@ -35,6 +35,6 @@ fn main() { let nyan = cat(0u, 2, "nyan"); nyan.eat(); assert(!nyan.eat()); - for uint::range(1u, 10u) {|_i| nyan.speak(); }; + for uint::range(1u, 10u) |_i| { nyan.speak(); }; assert(nyan.eat()); } \ No newline at end of file diff --git a/src/test/run-pass/cleanup-copy-mode.rs b/src/test/run-pass/cleanup-copy-mode.rs index 3f5e92cdb75..479a0e3d78d 100644 --- a/src/test/run-pass/cleanup-copy-mode.rs +++ b/src/test/run-pass/cleanup-copy-mode.rs @@ -2,8 +2,8 @@ fn adder(+x: @int, +y: @int) -> int { ret *x + *y; } fn failer() -> @int { fail; } fn main() { - assert(result::is_err(task::try {|| + assert(result::is_err(task::try(|| { adder(@2, failer()); () - })); + }))); } diff --git a/src/test/run-pass/comm.rs b/src/test/run-pass/comm.rs index d3b648e4475..bb6f6585850 100644 --- a/src/test/run-pass/comm.rs +++ b/src/test/run-pass/comm.rs @@ -10,7 +10,7 @@ import task; fn main() { let p = comm::port(); let ch = comm::chan(p); - let t = task::spawn {|| child(ch); }; + let t = task::spawn(|| child(ch) ); let y = recv(p); #error("received"); log(error, y); diff --git a/src/test/run-pass/command-line-args.rs b/src/test/run-pass/command-line-args.rs index bc74be14d8b..0bffb268762 100644 --- a/src/test/run-pass/command-line-args.rs +++ b/src/test/run-pass/command-line-args.rs @@ -1,3 +1,3 @@ -fn main(args: [str]/~) { log(debug, args[0]); } +fn main(args: ~[str]) { log(debug, args[0]); } diff --git a/src/test/run-pass/const-bound.rs b/src/test/run-pass/const-bound.rs index f2a436e0374..36ca25cd1bb 100644 --- a/src/test/run-pass/const-bound.rs +++ b/src/test/run-pass/const-bound.rs @@ -7,7 +7,7 @@ fn foo<T: copy const>(x: T) -> T { x } fn main() { foo(1); foo("hi"); - foo([1, 2, 3]/~); + foo(~[1, 2, 3]); foo({field: 42}); foo((1, 2u)); foo(@1); diff --git a/src/test/run-pass/crust-call-deep2.rs b/src/test/run-pass/crust-call-deep2.rs index a79a05ebde2..ff6ee8ec35a 100644 --- a/src/test/run-pass/crust-call-deep2.rs +++ b/src/test/run-pass/crust-call-deep2.rs @@ -19,7 +19,7 @@ fn count(n: uint) -> uint { fn main() { // Make sure we're on a task with small Rust stacks (main currently // has a large stack) - task::spawn {|| + do task::spawn || { let result = count(1000u); #debug("result = %?", result); assert result == 1000u; diff --git a/src/test/run-pass/crust-call-scrub.rs b/src/test/run-pass/crust-call-scrub.rs index ea8897b1519..3b2d0a4ba29 100644 --- a/src/test/run-pass/crust-call-scrub.rs +++ b/src/test/run-pass/crust-call-scrub.rs @@ -23,7 +23,7 @@ fn count(n: uint) -> uint { fn main() { // Make sure we're on a task with small Rust stacks (main currently // has a large stack) - task::spawn {|| + do task::spawn || { let result = count(12u); #debug("result = %?", result); assert result == 2048u; diff --git a/src/test/run-pass/crust-stress.rs b/src/test/run-pass/crust-stress.rs index e96c45a8cdf..170dffdeb5e 100644 --- a/src/test/run-pass/crust-stress.rs +++ b/src/test/run-pass/crust-stress.rs @@ -20,8 +20,8 @@ fn count(n: uint) -> uint { } fn main() { - iter::repeat(100u) {|| - task::spawn {|| + do iter::repeat(100u) || { + do task::spawn || { assert count(5u) == 16u; }; } diff --git a/src/test/run-pass/crust-yield.rs b/src/test/run-pass/crust-yield.rs index d7dc4c6cf51..d6d6e9b0bd2 100644 --- a/src/test/run-pass/crust-yield.rs +++ b/src/test/run-pass/crust-yield.rs @@ -17,8 +17,8 @@ fn count(n: uint) -> uint { } fn main() { - iter::repeat(10u) {|| - task::spawn {|| + do iter::repeat(10u) || { + do task::spawn || { let result = count(5u); #debug("result = %?", result); assert result == 16u; diff --git a/src/test/run-pass/cycle-collection4.rs b/src/test/run-pass/cycle-collection4.rs index fbe6389fc87..81f224598dc 100644 --- a/src/test/run-pass/cycle-collection4.rs +++ b/src/test/run-pass/cycle-collection4.rs @@ -1,10 +1,10 @@ type foo = { mut z : fn@() }; fn nop() { } -fn nop_foo(_y: [int]/~, _x : @foo) { } +fn nop_foo(_y: ~[int], _x : @foo) { } fn main() { let w = @{ mut z: {||nop()} }; - let x = {||nop_foo([]/~, w)}; + let x = {||nop_foo(~[], w)}; w.z = x; } \ No newline at end of file diff --git a/src/test/run-pass/deep-vector.rs b/src/test/run-pass/deep-vector.rs index c7bb5e3f48c..52dd6caaf0f 100644 --- a/src/test/run-pass/deep-vector.rs +++ b/src/test/run-pass/deep-vector.rs @@ -1,5 +1,5 @@ fn main() { - let _x = [ + let _x = ~[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -1998,5 +1998,5 @@ fn main() { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 - ]/~; + ]; } \ No newline at end of file diff --git a/src/test/run-pass/deep-vector2.rs b/src/test/run-pass/deep-vector2.rs index 7a2eaa37f24..a92c0a216e6 100644 --- a/src/test/run-pass/deep-vector2.rs +++ b/src/test/run-pass/deep-vector2.rs @@ -1,5 +1,5 @@ fn main() { - let x = [ + let x = ~[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -7998,5 +7998,5 @@ fn main() { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 - ]/~; + ]; } \ No newline at end of file diff --git a/src/test/run-pass/do-for-no-args.rs b/src/test/run-pass/do-for-no-args.rs new file mode 100644 index 00000000000..d3f860e16ec --- /dev/null +++ b/src/test/run-pass/do-for-no-args.rs @@ -0,0 +1,10 @@ +// Testing that we can drop the || in for/do exprs + +fn f(f: fn@() -> bool) { } + +fn d(f: fn@()) { } + +fn main() { + for f { } + do d { } +} \ No newline at end of file diff --git a/src/test/run-pass/do-pure.rs b/src/test/run-pass/do-pure.rs index 35d43c8fa19..06b555c0eaa 100644 --- a/src/test/run-pass/do-pure.rs +++ b/src/test/run-pass/do-pure.rs @@ -2,8 +2,8 @@ pure fn f(f: fn()) { } pure fn g() { - // `f { || }` is considered pure, so `do f { || }` should be too - do f { || } + // `f || { }` is considered pure, so `do f || { }` should be too + do f || { } } fn main() { diff --git a/src/test/run-pass/do-stack.rs b/src/test/run-pass/do-stack.rs index c7fec67542d..57c5ee9bede 100644 --- a/src/test/run-pass/do-stack.rs +++ b/src/test/run-pass/do-stack.rs @@ -1,5 +1,5 @@ fn f(f: fn&(int)) { f(10) } fn main() { - do f() { |i| assert i == 10 } + do f() |i| { assert i == 10 } } diff --git a/src/test/run-pass/do1.rs b/src/test/run-pass/do1.rs index af173d62d6a..7a25966fedb 100644 --- a/src/test/run-pass/do1.rs +++ b/src/test/run-pass/do1.rs @@ -1,5 +1,5 @@ fn f(f: fn@(int)) { f(10) } fn main() { - do f() { |i| assert i == 10 } + do f() |i| { assert i == 10 } } diff --git a/src/test/run-pass/do2.rs b/src/test/run-pass/do2.rs index c8028f806da..96797dad8f8 100644 --- a/src/test/run-pass/do2.rs +++ b/src/test/run-pass/do2.rs @@ -1,5 +1,5 @@ fn f(f: fn@(int) -> int) -> int { f(10) } fn main() { - assert do f() { |i| i } == 10; + assert do f() |i| { i } == 10; } diff --git a/src/test/run-pass/do3.rs b/src/test/run-pass/do3.rs index c4796eb2070..30d5261fa75 100644 --- a/src/test/run-pass/do3.rs +++ b/src/test/run-pass/do3.rs @@ -1,5 +1,5 @@ fn f(f: fn@(int) -> int) -> int { f(10) } fn main() { - assert do f { |i| i } == 10; + assert do f |i| { i } == 10; } diff --git a/src/test/run-pass/dvec-test.rs b/src/test/run-pass/dvec-test.rs index d3e2038922f..998df76b93f 100644 --- a/src/test/run-pass/dvec-test.rs +++ b/src/test/run-pass/dvec-test.rs @@ -4,22 +4,22 @@ fn main() { let d = dvec(); d.push(3); d.push(4); - assert d.get() == [3, 4]/~; - d.set([mut 5]/~); + assert d.get() == ~[3, 4]; + d.set(~[mut 5]); d.push(6); d.push(7); d.push(8); d.push(9); d.push(10); - d.push_all([11, 12, 13]/~); - d.push_slice([11, 12, 13]/~, 1u, 2u); + d.push_all(~[11, 12, 13]); + d.push_slice(~[11, 12, 13], 1u, 2u); - let exp = [5, 6, 7, 8, 9, 10, 11, 12, 13, 12]/~; + let exp = ~[5, 6, 7, 8, 9, 10, 11, 12, 13, 12]; assert d.get() == exp; assert d.get() == exp; assert d.len() == exp.len(); - for d.eachi { |i, e| + for d.eachi |i, e| { assert e == exp[i]; } diff --git a/src/test/run-pass/empty-mutable-vec.rs b/src/test/run-pass/empty-mutable-vec.rs index cb2c13a8844..8a1662a90a5 100644 --- a/src/test/run-pass/empty-mutable-vec.rs +++ b/src/test/run-pass/empty-mutable-vec.rs @@ -1,3 +1,3 @@ -fn main() { let v: [mut int]/~ = [mut]/~; } +fn main() { let v: ~[mut int] = ~[mut]; } diff --git a/src/test/run-pass/evec-slice.rs b/src/test/run-pass/evec-slice.rs index 8073a9be95f..52988294585 100644 --- a/src/test/run-pass/evec-slice.rs +++ b/src/test/run-pass/evec-slice.rs @@ -1,14 +1,14 @@ fn main() { - let x : [int]/& = [1,2,3,4,5]/&; - let mut z = [1,2,3,4,5]/&; + let x : &[int] = &[1,2,3,4,5]; + let mut z = &[1,2,3,4,5]; z = x; assert z[0] == 1; assert z[4] == 5; - let a : [int]/& = [1,1,1,1,1]/&; - let b : [int]/& = [2,2,2,2,2]/&; - let c : [int]/& = [2,2,2,2,3]/&; - let cc : [int]/& = [2,2,2,2,2,2]/&; + let a : &[int] = &[1,1,1,1,1]; + let b : &[int] = &[2,2,2,2,2]; + let c : &[int] = &[2,2,2,2,3]; + let cc : &[int] = &[2,2,2,2,2,2]; log(debug, a); diff --git a/src/test/run-pass/expr-alt-fail.rs b/src/test/run-pass/expr-alt-fail.rs index 47c2c46a532..52ffd09fba3 100644 --- a/src/test/run-pass/expr-alt-fail.rs +++ b/src/test/run-pass/expr-alt-fail.rs @@ -4,7 +4,7 @@ fn test_simple() { } fn test_box() { - let r = alt true { true { [10]/~ } false { fail } }; + let r = alt true { true { ~[10] } false { fail } }; assert (r[0] == 10); } diff --git a/src/test/run-pass/expr-fn.rs b/src/test/run-pass/expr-fn.rs index a872884f95c..b197d3c7e28 100644 --- a/src/test/run-pass/expr-fn.rs +++ b/src/test/run-pass/expr-fn.rs @@ -4,7 +4,7 @@ fn test_int() { } fn test_vec() { - fn f() -> [int]/~ { [10, 11]/~ } + fn f() -> ~[int] { ~[10, 11] } assert (f()[1] == 11); } diff --git a/src/test/run-pass/for-destruct.rs b/src/test/run-pass/for-destruct.rs index ef28e7255c2..7b5ec2af8ea 100644 --- a/src/test/run-pass/for-destruct.rs +++ b/src/test/run-pass/for-destruct.rs @@ -1,5 +1,5 @@ fn main() { - for vec::each([{x: 10, y: 20}, {x: 30, y: 0}]/~) {|elt| + for vec::each(~[{x: 10, y: 20}, {x: 30, y: 0}]) |elt| { assert (elt.x + elt.y == 30); } } diff --git a/src/test/run-pass/for-loop-fail.rs b/src/test/run-pass/for-loop-fail.rs index 0c72a5d05f8..82551bbc6ff 100644 --- a/src/test/run-pass/for-loop-fail.rs +++ b/src/test/run-pass/for-loop-fail.rs @@ -1 +1 @@ -fn main() { let x: [int]/~ = []/~; for x.each {|_i| fail "moop"; } } +fn main() { let x: ~[int] = ~[]; for x.each |_i| { fail "moop"; } } diff --git a/src/test/run-pass/foreach-nested.rs b/src/test/run-pass/foreach-nested.rs index 007117b04cb..b6b06a9d4fc 100644 --- a/src/test/run-pass/foreach-nested.rs +++ b/src/test/run-pass/foreach-nested.rs @@ -5,11 +5,11 @@ fn two(it: fn(int)) { it(0); it(1); } fn main() { - let a: [mut int]/~ = [mut -1, -1, -1, -1]/~; + let a: ~[mut int] = ~[mut -1, -1, -1, -1]; let mut p: int = 0; - two {|i| - two {|j| a[p] = 10 * i + j; p += 1; }; - }; + do two |i| { + do two |j| { a[p] = 10 * i + j; p += 1; } + } assert (a[0] == 0); assert (a[1] == 1); assert (a[2] == 10); diff --git a/src/test/run-pass/foreach-put-structured.rs b/src/test/run-pass/foreach-put-structured.rs index 4bf37df776d..557e29be6fc 100644 --- a/src/test/run-pass/foreach-put-structured.rs +++ b/src/test/run-pass/foreach-put-structured.rs @@ -9,7 +9,7 @@ fn pairs(it: fn((int, int))) { fn main() { let mut i: int = 10; let mut j: int = 0; - pairs() {|p| + do pairs() |p| { let (_0, _1) = p; log(debug, _0); log(debug, _1); diff --git a/src/test/run-pass/foreach-simple-outer-slot.rs b/src/test/run-pass/foreach-simple-outer-slot.rs index c27b2606664..daa8106419c 100644 --- a/src/test/run-pass/foreach-simple-outer-slot.rs +++ b/src/test/run-pass/foreach-simple-outer-slot.rs @@ -4,7 +4,7 @@ // -*- rust -*- fn main() { let mut sum: int = 0; - first_ten {|i| #debug("main"); log(debug, i); sum = sum + i; }; + do first_ten |i| { #debug("main"); log(debug, i); sum = sum + i; } #debug("sum"); log(debug, sum); assert (sum == 45); diff --git a/src/test/run-pass/generic-ivec-leak.rs b/src/test/run-pass/generic-ivec-leak.rs index 22726635da5..1dd9552a3b5 100644 --- a/src/test/run-pass/generic-ivec-leak.rs +++ b/src/test/run-pass/generic-ivec-leak.rs @@ -1,4 +1,4 @@ enum wrapper<T> { wrapped(T), } -fn main() { let w = wrapped([1, 2, 3, 4, 5]/~); } +fn main() { let w = wrapped(~[1, 2, 3, 4, 5]); } diff --git a/src/test/run-pass/generic-ivec.rs b/src/test/run-pass/generic-ivec.rs index c7d5828fbcd..39ba083a5a3 100644 --- a/src/test/run-pass/generic-ivec.rs +++ b/src/test/run-pass/generic-ivec.rs @@ -1,3 +1,3 @@ fn f<T>(v: @T) { } -fn main() { f(@[1, 2, 3, 4, 5]/~); } +fn main() { f(@~[1, 2, 3, 4, 5]); } diff --git a/src/test/run-pass/hashmap-memory.rs b/src/test/run-pass/hashmap-memory.rs index b0abb2ed6db..0d59e7d1e34 100644 --- a/src/test/run-pass/hashmap-memory.rs +++ b/src/test/run-pass/hashmap-memory.rs @@ -31,11 +31,11 @@ mod map_reduce { type mapper = native fn(str, putter); - enum ctrl_proto { find_reducer([u8]/~, chan<int>), mapper_done, } + enum ctrl_proto { find_reducer(~[u8], chan<int>), mapper_done, } - fn start_mappers(ctrl: chan<ctrl_proto>, inputs: [str]/~) { - for inputs.each {|i| - task::spawn {|| map_task(ctrl, i); }; + fn start_mappers(ctrl: chan<ctrl_proto>, inputs: ~[str]) { + for inputs.each |i| { + task::spawn(|| map_task(ctrl, i) ); } } @@ -59,11 +59,11 @@ mod map_reduce { } } - map(input, {|a,b|emit(intermediates, ctrl, a, b)}); + map(input, |a,b| emit(intermediates, ctrl, a, b) ); send(ctrl, mapper_done); } - fn map_reduce(inputs: [str]/~) { + fn map_reduce(inputs: ~[str]) { let ctrl = port(); // This task becomes the master control task. It spawns others @@ -94,5 +94,5 @@ mod map_reduce { } fn main() { - map_reduce::map_reduce(["../src/test/run-pass/hashmap-memory.rs"]/~); + map_reduce::map_reduce(~["../src/test/run-pass/hashmap-memory.rs"]); } diff --git a/src/test/run-pass/iface-generic.rs b/src/test/run-pass/iface-generic.rs index f52498c1182..13be78d8616 100644 --- a/src/test/run-pass/iface-generic.rs +++ b/src/test/run-pass/iface-generic.rs @@ -12,26 +12,26 @@ impl of to_str for () { } iface map<T> { - fn map<U>(f: fn(T) -> U) -> [U]/~; + fn map<U>(f: fn(T) -> U) -> ~[U]; } -impl <T> of map<T> for [T]/~ { - fn map<U>(f: fn(T) -> U) -> [U]/~ { - let mut r = []/~; - for self.each {|x| r += [f(x)]/~; } +impl <T> of map<T> for ~[T] { + fn map<U>(f: fn(T) -> U) -> ~[U] { + let mut r = ~[]; + for self.each |x| { r += ~[f(x)]; } r } } -fn foo<U, T: map<U>>(x: T) -> [str]/~ { - x.map({|_e| "hi" }) +fn foo<U, T: map<U>>(x: T) -> ~[str] { + x.map(|_e| "hi" ) } -fn bar<U: to_str, T: map<U>>(x: T) -> [str]/~ { - x.map({|_e| _e.to_str() }) +fn bar<U: to_str, T: map<U>>(x: T) -> ~[str] { + x.map(|_e| _e.to_str() ) } fn main() { - assert foo([1]) == ["hi"]/~; - assert bar::<int, [int]>([4, 5]) == ["4", "5"]/~; - assert bar::<str, [str]>(["x", "y"]/~) == ["x", "y"]/~; - assert bar::<(), [()]>([()]) == ["()"]/~; + assert foo(~[1]) == ~["hi"]; + assert bar::<int, ~[int]>(~[4, 5]) == ~["4", "5"]; + assert bar::<str, ~[str]>(~["x", "y"]) == ~["x", "y"]; + assert bar::<(), ~[()]>(~[()]) == ~["()"]; } diff --git a/src/test/run-pass/iface-to-str.rs b/src/test/run-pass/iface-to-str.rs index 1490ec3d93f..a3d5b535371 100644 --- a/src/test/run-pass/iface-to-str.rs +++ b/src/test/run-pass/iface-to-str.rs @@ -6,23 +6,23 @@ impl of to_str for int { fn to_str() -> str { int::str(self) } } -impl <T: to_str> of to_str for [T]/~ { +impl <T: to_str> of to_str for ~[T] { fn to_str() -> str { - "[" + str::connect(vec::map(self, {|e| e.to_str()}), ", ") + "]" + "[" + str::connect(vec::map(self, |e| e.to_str() ), ", ") + "]" } } fn main() { assert 1.to_str() == "1"; - assert [2, 3, 4]/~.to_str() == "[2, 3, 4]"; + assert (~[2, 3, 4]).to_str() == "[2, 3, 4]"; fn indirect<T: to_str>(x: T) -> str { x.to_str() + "!" } - assert indirect([10, 20]/~) == "[10, 20]!"; + assert indirect(~[10, 20]) == "[10, 20]!"; fn indirect2<T: to_str>(x: T) -> str { indirect(x) } - assert indirect2([1]/~) == "[1]!"; + assert indirect2(~[1]) == "[1]!"; } diff --git a/src/test/run-pass/impl-variance.rs b/src/test/run-pass/impl-variance.rs index b78c8241efa..a5ea5a75de3 100644 --- a/src/test/run-pass/impl-variance.rs +++ b/src/test/run-pass/impl-variance.rs @@ -1,12 +1,12 @@ -impl extensions<T> for [const T]/~ { +impl extensions<T> for ~[const T] { fn foo() -> uint { vec::len(self) } } fn main() { - let v = [const 0]/~; + let v = ~[const 0]; assert v.foo() == 1u; - let v = [0]/~; + let v = ~[0]; assert v.foo() == 1u; - let v = [mut 0]/~; + let v = ~[mut 0]; assert v.foo() == 1u; } \ No newline at end of file diff --git a/src/test/run-pass/import-glob-crate.rs b/src/test/run-pass/import-glob-crate.rs index 92519f9945f..4f0afc50952 100644 --- a/src/test/run-pass/import-glob-crate.rs +++ b/src/test/run-pass/import-glob-crate.rs @@ -4,6 +4,6 @@ import vec::*; fn main() { let mut v = from_elem(0u, 0); - v = vec::append(v, [4, 2]/~); - assert (reversed(v) == [2, 4]/~); + v = vec::append(v, ~[4, 2]); + assert (reversed(v) == ~[2, 4]); } diff --git a/src/test/run-pass/import-in-block.rs b/src/test/run-pass/import-in-block.rs index 33876d9800e..02837529af4 100644 --- a/src/test/run-pass/import-in-block.rs +++ b/src/test/run-pass/import-in-block.rs @@ -3,9 +3,9 @@ use std; fn main() { import vec; import vec::to_mut; - log(debug, vec::len(to_mut([1, 2]/~))); + log(debug, vec::len(to_mut(~[1, 2]))); { import vec::*; - log(debug, len([2]/~)); + log(debug, len(~[2])); } } diff --git a/src/test/run-pass/import4.rs b/src/test/run-pass/import4.rs index a307e7ac190..9f3207f6b6f 100644 --- a/src/test/run-pass/import4.rs +++ b/src/test/run-pass/import4.rs @@ -5,4 +5,4 @@ mod zed { fn bar() { #debug("bar"); } } -fn main(args: [str]/~) { let zed = 42; bar(); } +fn main(args: ~[str]) { let zed = 42; bar(); } diff --git a/src/test/run-pass/import5.rs b/src/test/run-pass/import5.rs index 11c5a31fdbb..f389dc61e53 100644 --- a/src/test/run-pass/import5.rs +++ b/src/test/run-pass/import5.rs @@ -7,4 +7,4 @@ mod foo { } } -fn main(args: [str]/~) { bar(); } +fn main(args: ~[str]) { bar(); } diff --git a/src/test/run-pass/import7.rs b/src/test/run-pass/import7.rs index a63673214a2..b76ec2ccb4f 100644 --- a/src/test/run-pass/import7.rs +++ b/src/test/run-pass/import7.rs @@ -12,4 +12,4 @@ mod bar { mod zed { } } } -fn main(args: [str]/~) { baz(); } +fn main(args: ~[str]) { baz(); } diff --git a/src/test/run-pass/infer-fn-tail-expr.rs b/src/test/run-pass/infer-fn-tail-expr.rs index d9fb1216a4e..ea46376ab19 100644 --- a/src/test/run-pass/infer-fn-tail-expr.rs +++ b/src/test/run-pass/infer-fn-tail-expr.rs @@ -1,5 +1,5 @@ // issue #680 -fn f() -> [int]/~ { []/~ } +fn f() -> ~[int] { ~[] } fn main() { } diff --git a/src/test/run-pass/infer-with-expected.rs b/src/test/run-pass/infer-with-expected.rs index 20a224a7fa7..6583aa529f2 100644 --- a/src/test/run-pass/infer-with-expected.rs +++ b/src/test/run-pass/infer-with-expected.rs @@ -7,6 +7,6 @@ fn eat_tup(_r: ~@(int, fn@({x: int, y: int}) -> int)) {} fn eat_rec(_r: @~{a: int, b: fn@({x: int, y: int}) -> int}) {} fn main() { - eat_tup(~@(10, {|a| a.x})); - eat_rec(@~{a: 10, b: {|a| a.x}}); + eat_tup(~@(10, |a| a.x )); + eat_rec(@~{a: 10, b: |a| a.x }); } diff --git a/src/test/run-pass/integral-indexing.rs b/src/test/run-pass/integral-indexing.rs index c41ff28a2d8..222e63d4928 100644 --- a/src/test/run-pass/integral-indexing.rs +++ b/src/test/run-pass/integral-indexing.rs @@ -3,7 +3,7 @@ // This is a testcase for issue #94. fn main() { - let v: [int]/~ = [0, 1, 2, 3, 4, 5]/~; + let v: ~[int] = ~[0, 1, 2, 3, 4, 5]; let s: str = "abcdef"; assert (v[3u] == 3); assert (v[3u8] == 3); diff --git a/src/test/run-pass/intrinsic-atomics.rs b/src/test/run-pass/intrinsic-atomics.rs new file mode 100644 index 00000000000..d2418abcb77 --- /dev/null +++ b/src/test/run-pass/intrinsic-atomics.rs @@ -0,0 +1,37 @@ +#[abi = "rust-intrinsic"] +native mod rusti { + fn atomic_xchng(&dst: int, src: int) -> int; + fn atomic_xchng_acq(&dst: int, src: int) -> int; + fn atomic_xchng_rel(&dst: int, src: int) -> int; + + fn atomic_add(&dst: int, src: int) -> int; + fn atomic_add_acq(&dst: int, src: int) -> int; + fn atomic_add_rel(&dst: int, src: int) -> int; + + fn atomic_sub(&dst: int, src: int) -> int; + fn atomic_sub_acq(&dst: int, src: int) -> int; + fn atomic_sub_rel(&dst: int, src: int) -> int; +} + +fn main() { + let mut x = 1; + + assert rusti::atomic_xchng(x, 0) == 1; + assert x == 0; + + assert rusti::atomic_xchng_acq(x, 1) == 0; + assert x == 1; + + assert rusti::atomic_xchng_rel(x, 0) == 1; + assert x == 0; + + assert rusti::atomic_add(x, 1) == 0; + assert rusti::atomic_add_acq(x, 1) == 1; + assert rusti::atomic_add_rel(x, 1) == 2; + assert x == 3; + + assert rusti::atomic_sub(x, 1) == 3; + assert rusti::atomic_sub_acq(x, 1) == 2; + assert rusti::atomic_sub_rel(x, 1) == 1; + assert x == 0; +} diff --git a/src/test/run-pass/intrinsic-frame-address.rs b/src/test/run-pass/intrinsic-frame-address.rs index 84b2a1c995d..6af3c7555d0 100644 --- a/src/test/run-pass/intrinsic-frame-address.rs +++ b/src/test/run-pass/intrinsic-frame-address.rs @@ -4,7 +4,7 @@ native mod rusti { } fn main() { - rusti::frame_address {|addr| + do rusti::frame_address |addr| { assert addr.is_not_null(); } } diff --git a/src/test/run-pass/issue-1821.rs b/src/test/run-pass/issue-1821.rs index 07100a20860..0a442d2e658 100644 --- a/src/test/run-pass/issue-1821.rs +++ b/src/test/run-pass/issue-1821.rs @@ -1,5 +1,5 @@ // Issue #1821 - Don't recurse trying to typecheck this enum t { - foo([t]/~) + foo(~[t]) } fn main() {} \ No newline at end of file diff --git a/src/test/run-pass/issue-1989.rs b/src/test/run-pass/issue-1989.rs index 07d13b56edc..2ee30d48571 100644 --- a/src/test/run-pass/issue-1989.rs +++ b/src/test/run-pass/issue-1989.rs @@ -19,6 +19,6 @@ fn empty_pointy() -> @pointy { fn main() { - let v = [empty_pointy(), empty_pointy()]/~; + let v = ~[empty_pointy(), empty_pointy()]; v[0].a = p(v[0]); } diff --git a/src/test/run-pass/issue-2101.rs b/src/test/run-pass/issue-2101.rs index 4ae5a11c566..f05269e29e9 100644 --- a/src/test/run-pass/issue-2101.rs +++ b/src/test/run-pass/issue-2101.rs @@ -9,7 +9,7 @@ fn init(ar: &a.arena::arena, str: str) -> &a.hold { new(*ar) s(str) } -fn main(args: [str]/~) { +fn main(args: ~[str]) { let ar = arena::arena(); let leak = init(&ar, args[0]); alt *leak { diff --git a/src/test/run-pass/issue-2185.rs b/src/test/run-pass/issue-2185.rs index 151361660f5..b8531b68e2e 100644 --- a/src/test/run-pass/issue-2185.rs +++ b/src/test/run-pass/issue-2185.rs @@ -11,18 +11,18 @@ impl<A> of iterable<A> for fn@(fn(A)) { } impl of iterable<uint> for fn@(fn(uint)) { - fn iter(blk: fn(&&uint)) { self { |i| blk(i) } } + fn iter(blk: fn(&&uint)) { self( |i| blk(i) ) } } fn filter<A,IA:iterable<A>>(self: IA, prd: fn@(A) -> bool, blk: fn(A)) { - self.iter {|a| + do self.iter |a| { if prd(a) { blk(a) } } } fn foldl<A,B,IA:iterable<A>>(self: IA, +b0: B, blk: fn(B, A) -> B) -> B { let mut b <- b0; - self.iter { |a| + do self.iter |a| { b <- blk(b, a); } ret b; @@ -37,12 +37,12 @@ fn range(lo: uint, hi: uint, it: fn(uint)) { } fn main() { - let range = {|a|range(0u, 1000u, a)}; - let filt = {|a|filter( + let range = |a| range(0u, 1000u, a); + let filt = |a| filter( range, - {|&&n: uint| n % 3u != 0u && n % 5u != 0u }, - a)}; - let sum = foldl(filt, 0u) {|accum, &&n: uint| accum + n }; + |&&n: uint| n % 3u != 0u && n % 5u != 0u, + a); + let sum = foldl(filt, 0u, |accum, &&n: uint| accum + n ); io::println(#fmt("%u", sum)); } \ No newline at end of file diff --git a/src/test/run-pass/issue-2487-a.rs b/src/test/run-pass/issue-2487-a.rs index ab1d9ccb3be..3d93e64ef82 100644 --- a/src/test/run-pass/issue-2487-a.rs +++ b/src/test/run-pass/issue-2487-a.rs @@ -6,7 +6,7 @@ class socket { drop { } fn set_identity() { - closure { || + do closure || { setsockopt_bytes(copy self.sock) } } diff --git a/src/test/run-pass/issue-2502.rs b/src/test/run-pass/issue-2502.rs index add0c3e6dff..359467dbe62 100644 --- a/src/test/run-pass/issue-2502.rs +++ b/src/test/run-pass/issue-2502.rs @@ -1,11 +1,11 @@ class font/& { - let fontbuf: &self.[u8]/~; + let fontbuf: &self.~[u8]; - new(fontbuf: &self.[u8]/~) { + new(fontbuf: &self.~[u8]) { self.fontbuf = fontbuf; } - fn buf() -> &self.[u8]/~ { + fn buf() -> &self.~[u8] { self.fontbuf } } diff --git a/src/test/run-pass/issue-2611.rs b/src/test/run-pass/issue-2611.rs index dc2a2f1b005..a6603876131 100644 --- a/src/test/run-pass/issue-2611.rs +++ b/src/test/run-pass/issue-2611.rs @@ -3,7 +3,7 @@ import iter; import iter::base_iter; impl Q<A> for base_iter<A> { - fn flat_map_to_vec<B:copy, IB:base_iter<B>>(op: fn(B) -> IB) -> [B]/~ { + fn flat_map_to_vec<B:copy, IB:base_iter<B>>(op: fn(B) -> IB) -> ~[B] { iter::flat_map_to_vec(self, op) } } diff --git a/src/test/run-pass/issue-2631-b.rs b/src/test/run-pass/issue-2631-b.rs index 81c4f4bbedb..fbacb4fee72 100644 --- a/src/test/run-pass/issue-2631-b.rs +++ b/src/test/run-pass/issue-2631-b.rs @@ -10,7 +10,7 @@ import std::map::str_hash; import dvec; fn main() { - let v = [mut @"hi"]/~; + let v = ~[mut @"hi"]; let m: req::header_map = str_hash(); m.insert("METHOD", @dvec::from_vec(v)); request::<int>(m); diff --git a/src/test/run-pass/issue-507.rs b/src/test/run-pass/issue-507.rs index 7490f06d548..f1dab3d8a0a 100644 --- a/src/test/run-pass/issue-507.rs +++ b/src/test/run-pass/issue-507.rs @@ -17,14 +17,14 @@ import comm::recv; fn grandchild(c: chan<int>) { send(c, 42); } fn child(c: chan<int>) { - task::spawn {|| grandchild(c); } + task::spawn(|| grandchild(c) ) } fn main() { let p = comm::port(); let ch = chan(p); - task::spawn {|| child(ch); } + task::spawn(|| child(ch) ); let x: int = recv(p); diff --git a/src/test/run-pass/issue-687.rs b/src/test/run-pass/issue-687.rs index a721aa0ddea..33422314883 100644 --- a/src/test/run-pass/issue-687.rs +++ b/src/test/run-pass/issue-687.rs @@ -7,16 +7,16 @@ import comm::port; import comm::recv; import comm::send; -enum msg { closed, received([u8]/~), } +enum msg { closed, received(~[u8]), } -fn producer(c: chan<[u8]/~>) { - send(c, [1u8, 2u8, 3u8, 4u8]/~); - let empty: [u8]/~ = []/~; +fn producer(c: chan<~[u8]>) { + send(c, ~[1u8, 2u8, 3u8, 4u8]); + let empty: ~[u8] = ~[]; send(c, empty); } -fn packager(cb: chan<chan<[u8]/~>>, msg: chan<msg>) { - let p: port<[u8]/~> = port(); +fn packager(cb: chan<chan<~[u8]>>, msg: chan<msg>) { + let p: port<~[u8]> = port(); send(cb, chan(p)); loop { #debug("waiting for bytes"); @@ -39,12 +39,12 @@ fn packager(cb: chan<chan<[u8]/~>>, msg: chan<msg>) { fn main() { let p: port<msg> = port(); let ch = chan(p); - let recv_reader: port<chan<[u8]/~>> = port(); + let recv_reader: port<chan<~[u8]>> = port(); let recv_reader_chan = chan(recv_reader); - let pack = task::spawn {|| packager(recv_reader_chan, ch); }; + let pack = task::spawn(|| packager(recv_reader_chan, ch) ); - let source_chan: chan<[u8]/~> = recv(recv_reader); - let prod = task::spawn {|| producer(source_chan); }; + let source_chan: chan<~[u8]> = recv(recv_reader); + let prod = task::spawn(|| producer(source_chan) ); loop { let msg = recv(p); diff --git a/src/test/run-pass/issue-783.rs b/src/test/run-pass/issue-783.rs index 76d26e0b025..54e3e9e87e5 100644 --- a/src/test/run-pass/issue-783.rs +++ b/src/test/run-pass/issue-783.rs @@ -10,7 +10,7 @@ fn a() { } let p = port(); let ch = chan(p); - spawn {|| b(ch); }; + spawn(|| b(ch) ); recv(p); } let mut i = 0; @@ -21,7 +21,7 @@ fn a() { } fn main() { - iter::repeat(100u) {|| - spawn {|| a(); } + do iter::repeat(100u) || { + spawn(|| a() ); } } diff --git a/src/test/run-pass/iter-all.rs b/src/test/run-pass/iter-all.rs index d2625414399..f33b44cc53d 100644 --- a/src/test/run-pass/iter-all.rs +++ b/src/test/run-pass/iter-all.rs @@ -1,11 +1,11 @@ fn is_even(&&x: uint) -> bool { (x % 2u) == 0u } fn main() { - assert ![1u, 2u]/~.all(is_even); - assert [2u, 4u]/~.all(is_even); - assert []/~.all(is_even); + assert ![1u, 2u]/_.all(is_even); + assert [2u, 4u]/_.all(is_even); + assert []/_.all(is_even); assert !some(1u).all(is_even); assert some(2u).all(is_even); assert none.all(is_even); -} \ No newline at end of file +} diff --git a/src/test/run-pass/iter-any.rs b/src/test/run-pass/iter-any.rs index 8cba862d8fa..c43911a6c1f 100644 --- a/src/test/run-pass/iter-any.rs +++ b/src/test/run-pass/iter-any.rs @@ -1,11 +1,11 @@ fn is_even(&&x: uint) -> bool { (x % 2u) == 0u } fn main() { - assert ![1u, 3u]/~.any(is_even); - assert [1u, 2u]/~.any(is_even); - assert ![]/~.any(is_even); + assert ![1u, 3u]/_.any(is_even); + assert [1u, 2u]/_.any(is_even); + assert ![]/_.any(is_even); assert !some(1u).any(is_even); assert some(2u).any(is_even); assert !none.any(is_even); -} \ No newline at end of file +} diff --git a/src/test/run-pass/iter-contains.rs b/src/test/run-pass/iter-contains.rs index bd5e7a4bf52..961326f6d28 100644 --- a/src/test/run-pass/iter-contains.rs +++ b/src/test/run-pass/iter-contains.rs @@ -1,10 +1,10 @@ fn main() { - assert []/~.contains(22u) == false; - assert [1u, 3u]/~.contains(22u) == false; - assert [22u, 1u, 3u]/~.contains(22u) == true; - assert [1u, 22u, 3u]/~.contains(22u) == true; - assert [1u, 3u, 22u]/~.contains(22u) == true; + assert []/_.contains(22u) == false; + assert [1u, 3u]/_.contains(22u) == false; + assert [22u, 1u, 3u]/_.contains(22u) == true; + assert [1u, 22u, 3u]/_.contains(22u) == true; + assert [1u, 3u, 22u]/_.contains(22u) == true; assert none.contains(22u) == false; assert some(1u).contains(22u) == false; assert some(22u).contains(22u) == true; -} \ No newline at end of file +} diff --git a/src/test/run-pass/iter-count.rs b/src/test/run-pass/iter-count.rs index 3cca6d748c8..945e27ef173 100644 --- a/src/test/run-pass/iter-count.rs +++ b/src/test/run-pass/iter-count.rs @@ -1,9 +1,9 @@ fn main() { - assert []/~.count(22u) == 0u; - assert [1u, 3u]/~.count(22u) == 0u; - assert [22u, 1u, 3u]/~.count(22u) == 1u; - assert [22u, 1u, 22u]/~.count(22u) == 2u; + assert []/_.count(22u) == 0u; + assert [1u, 3u]/_.count(22u) == 0u; + assert [22u, 1u, 3u]/_.count(22u) == 1u; + assert [22u, 1u, 22u]/_.count(22u) == 2u; assert none.count(22u) == 0u; assert some(1u).count(22u) == 0u; assert some(22u).count(22u) == 1u; -} \ No newline at end of file +} diff --git a/src/test/run-pass/iter-eachi.rs b/src/test/run-pass/iter-eachi.rs index 432a0080143..6336ab79aa7 100644 --- a/src/test/run-pass/iter-eachi.rs +++ b/src/test/run-pass/iter-eachi.rs @@ -1,18 +1,18 @@ fn main() { let mut c = 0u; - for [1u, 2u, 3u, 4u, 5u]/~.eachi { |i, v| + for [1u, 2u, 3u, 4u, 5u]/_.eachi |i, v| { assert (i + 1u) == v; c += 1u; } assert c == 5u; - for none::<uint>.eachi { |i, v| fail; } + for none::<uint>.eachi |i, v| { fail; } let mut c = 0u; - for some(1u).eachi { |i, v| + for some(1u).eachi |i, v| { assert (i + 1u) == v; c += 1u; } assert c == 1u; -} \ No newline at end of file +} diff --git a/src/test/run-pass/iter-filter-to-vec.rs b/src/test/run-pass/iter-filter-to-vec.rs index c16cdc69ad1..96d2bf6d1ce 100644 --- a/src/test/run-pass/iter-filter-to-vec.rs +++ b/src/test/run-pass/iter-filter-to-vec.rs @@ -1,9 +1,9 @@ fn is_even(&&x: uint) -> bool { (x % 2u) == 0u } fn main() { - assert [1u, 3u]/~.filter_to_vec(is_even) == []/~; - assert [1u, 2u, 3u]/~.filter_to_vec(is_even) == [2u]/~; - assert none.filter_to_vec(is_even) == []/~; - assert some(1u).filter_to_vec(is_even) == []/~; - assert some(2u).filter_to_vec(is_even) == [2u]/~; -} \ No newline at end of file + assert [1u, 3u]/_.filter_to_vec(is_even) == ~[]; + assert [1u, 2u, 3u]/_.filter_to_vec(is_even) == ~[2u]; + assert none.filter_to_vec(is_even) == ~[]; + assert some(1u).filter_to_vec(is_even) == ~[]; + assert some(2u).filter_to_vec(is_even) == ~[2u]; +} diff --git a/src/test/run-pass/iter-flat-map-to-vec.rs b/src/test/run-pass/iter-flat-map-to-vec.rs index 92fdafc7f95..852a190158f 100644 --- a/src/test/run-pass/iter-flat-map-to-vec.rs +++ b/src/test/run-pass/iter-flat-map-to-vec.rs @@ -1,21 +1,21 @@ // xfail-test -- flat_map_to_vec currently disable -fn repeat(&&x: uint) -> [uint]/~ { [x, x]/~ } +fn repeat(&&x: uint) -> ~[uint] { ~[x, x] } fn incd_if_even(&&x: uint) -> option<uint> { if (x % 2u) == 0u {some(x + 1u)} else {none} } fn main() { - assert [1u, 3u]/~.flat_map_to_vec(repeat) == [1u, 1u, 3u, 3u]/~; - assert []/~.flat_map_to_vec(repeat) == []/~; - assert none.flat_map_to_vec(repeat) == []/~; - assert some(1u).flat_map_to_vec(repeat) == [1u, 1u]/~; - assert some(2u).flat_map_to_vec(repeat) == [2u, 2u]/~; + assert ~[1u, 3u].flat_map_to_vec(repeat) == ~[1u, 1u, 3u, 3u]; + assert ~[].flat_map_to_vec(repeat) == ~[]; + assert none.flat_map_to_vec(repeat) == ~[]; + assert some(1u).flat_map_to_vec(repeat) == ~[1u, 1u]; + assert some(2u).flat_map_to_vec(repeat) == ~[2u, 2u]; - assert [1u, 2u, 5u]/~.flat_map_to_vec(incd_if_even) == [3u]/~; - assert []/~.flat_map_to_vec(incd_if_even) == []/~; - assert none.flat_map_to_vec(incd_if_even) == []/~; - assert some(1u).flat_map_to_vec(incd_if_even) == []/~; - assert some(2u).flat_map_to_vec(incd_if_even) == [3u]/~; + assert ~[1u, 2u, 5u].flat_map_to_vec(incd_if_even) == ~[3u]; + assert ~[].flat_map_to_vec(incd_if_even) == ~[]; + assert none.flat_map_to_vec(incd_if_even) == ~[]; + assert some(1u).flat_map_to_vec(incd_if_even) == ~[]; + assert some(2u).flat_map_to_vec(incd_if_even) == ~[3u]; } \ No newline at end of file diff --git a/src/test/run-pass/iter-foldl.rs b/src/test/run-pass/iter-foldl.rs index d2678c5ba85..4c4e422c851 100644 --- a/src/test/run-pass/iter-foldl.rs +++ b/src/test/run-pass/iter-foldl.rs @@ -1,9 +1,9 @@ fn add(&&x: float, &&y: uint) -> float { x + (y as float) } fn main() { - assert [1u, 3u]/~.foldl(20f, add) == 24f; - assert []/~.foldl(20f, add) == 20f; + assert [1u, 3u]/_.foldl(20f, add) == 24f; + assert []/_.foldl(20f, add) == 20f; assert none.foldl(20f, add) == 20f; assert some(1u).foldl(20f, add) == 21f; assert some(2u).foldl(20f, add) == 22f; -} \ No newline at end of file +} diff --git a/src/test/run-pass/iter-map-to-vec.rs b/src/test/run-pass/iter-map-to-vec.rs index 68bf3248ecb..157caf78c08 100644 --- a/src/test/run-pass/iter-map-to-vec.rs +++ b/src/test/run-pass/iter-map-to-vec.rs @@ -1,9 +1,9 @@ fn inc(&&x: uint) -> uint { x + 1u } fn main() { - assert [1u, 3u]/~.map_to_vec(inc) == [2u, 4u]/~; - assert [1u, 2u, 3u]/~.map_to_vec(inc) == [2u, 3u, 4u]/~; - assert none.map_to_vec(inc) == []/~; - assert some(1u).map_to_vec(inc) == [2u]/~; - assert some(2u).map_to_vec(inc) == [3u]/~; -} \ No newline at end of file + assert [1u, 3u]/_.map_to_vec(inc) == ~[2u, 4u]; + assert [1u, 2u, 3u]/_.map_to_vec(inc) == ~[2u, 3u, 4u]; + assert none.map_to_vec(inc) == ~[]; + assert some(1u).map_to_vec(inc) == ~[2u]; + assert some(2u).map_to_vec(inc) == ~[3u]; +} diff --git a/src/test/run-pass/iter-min-max.rs b/src/test/run-pass/iter-min-max.rs index 4c67670367e..204ef3da535 100644 --- a/src/test/run-pass/iter-min-max.rs +++ b/src/test/run-pass/iter-min-max.rs @@ -1,11 +1,11 @@ fn is_even(&&x: uint) -> bool { (x % 2u) == 0u } fn main() { - assert [1u, 3u]/~.min() == 1u; - assert [3u, 1u]/~.min() == 1u; + assert [1u, 3u]/_.min() == 1u; + assert [3u, 1u]/_.min() == 1u; assert some(1u).min() == 1u; - assert [1u, 3u]/~.max() == 3u; - assert [3u, 1u]/~.max() == 3u; + assert [1u, 3u]/_.max() == 3u; + assert [3u, 1u]/_.max() == 3u; assert some(3u).max() == 3u; -} \ No newline at end of file +} diff --git a/src/test/run-pass/iter-range.rs b/src/test/run-pass/iter-range.rs index 797049b5d51..2f26fc22226 100644 --- a/src/test/run-pass/iter-range.rs +++ b/src/test/run-pass/iter-range.rs @@ -8,6 +8,6 @@ fn range(a: int, b: int, it: fn(int)) { fn main() { let mut sum: int = 0; - range(0, 100) {|x| sum += x; } + range(0, 100, |x| sum += x ); log(debug, sum); } diff --git a/src/test/run-pass/iter-to-vec.rs b/src/test/run-pass/iter-to-vec.rs index c8fcb881934..0197cb73ab1 100644 --- a/src/test/run-pass/iter-to-vec.rs +++ b/src/test/run-pass/iter-to-vec.rs @@ -1,8 +1,8 @@ fn main() { - assert [1u, 3u]/~.to_vec() == [1u, 3u]/~; - let e: [uint]/~ = []/~; - assert e.to_vec() == []/~; - assert none::<uint>.to_vec() == []/~; - assert some(1u).to_vec() == [1u]/~; - assert some(2u).to_vec() == [2u]/~; -} \ No newline at end of file + assert [1u, 3u]/_.to_vec() == ~[1u, 3u]; + let e: ~[uint] = ~[]; + assert e.to_vec() == ~[]; + assert none::<uint>.to_vec() == ~[]; + assert some(1u).to_vec() == ~[1u]; + assert some(2u).to_vec() == ~[2u]; +} diff --git a/src/test/run-pass/ivec-add.rs b/src/test/run-pass/ivec-add.rs index 6ca1b2fb5d4..216344942e7 100644 --- a/src/test/run-pass/ivec-add.rs +++ b/src/test/run-pass/ivec-add.rs @@ -1,6 +1,6 @@ -fn double<T: copy>(a: T) -> [T]/~ { ret [a]/~ + [a]/~; } +fn double<T: copy>(a: T) -> ~[T] { ret ~[a] + ~[a]; } -fn double_int(a: int) -> [int]/~ { ret [a]/~ + [a]/~; } +fn double_int(a: int) -> ~[int] { ret ~[a] + ~[a]; } fn main() { let mut d = double(1); diff --git a/src/test/run-pass/ivec-pass-by-value.rs b/src/test/run-pass/ivec-pass-by-value.rs index 69c19f5d9b0..9055ddfc9fe 100644 --- a/src/test/run-pass/ivec-pass-by-value.rs +++ b/src/test/run-pass/ivec-pass-by-value.rs @@ -1,3 +1,3 @@ -fn f(a: [int]/~) { } -fn main() { f([1, 2, 3, 4, 5]/~); } +fn f(a: ~[int]) { } +fn main() { f(~[1, 2, 3, 4, 5]); } diff --git a/src/test/run-pass/ivec-tag.rs b/src/test/run-pass/ivec-tag.rs index a1d0eccb50a..26cd2f6a1f5 100644 --- a/src/test/run-pass/ivec-tag.rs +++ b/src/test/run-pass/ivec-tag.rs @@ -7,16 +7,16 @@ import comm::port; import comm::send; import comm::recv; -fn producer(c: chan<[u8]/~>) { +fn producer(c: chan<~[u8]>) { send(c, - [1u8, 2u8, 3u8, 4u8, 5u8, 6u8, 7u8, 8u8, 9u8, 10u8, 11u8, 12u8, - 13u8]/~); + ~[1u8, 2u8, 3u8, 4u8, 5u8, 6u8, 7u8, 8u8, 9u8, 10u8, 11u8, 12u8, + 13u8]); } fn main() { - let p: port<[u8]/~> = port(); + let p: port<~[u8]> = port(); let ch = chan(p); - let prod = task::spawn {|| producer(ch); }; + let prod = task::spawn(|| producer(ch) ); - let data: [u8]/~ = recv(p); + let data: ~[u8] = recv(p); } diff --git a/src/test/run-pass/lambda-infer-unresolved.rs b/src/test/run-pass/lambda-infer-unresolved.rs index 3bf96f6dccb..ceb91791fa0 100644 --- a/src/test/run-pass/lambda-infer-unresolved.rs +++ b/src/test/run-pass/lambda-infer-unresolved.rs @@ -1,7 +1,7 @@ // This should typecheck even though the type of e is not fully // resolved when we finish typechecking the fn@. fn main() { - let e = @{mut refs: []/~, n: 0}; + let e = @{mut refs: ~[], n: 0}; let f = fn@ () { log(error, e.n); }; - e.refs += [1]/~; + e.refs += ~[1]; } diff --git a/src/test/run-pass/last-use-corner-cases.rs b/src/test/run-pass/last-use-corner-cases.rs index 2ae8986c7f1..6963cdf5d16 100644 --- a/src/test/run-pass/last-use-corner-cases.rs +++ b/src/test/run-pass/last-use-corner-cases.rs @@ -26,6 +26,6 @@ fn main() { // Verify that blocks can't interfere with each other. fn two_blocks(a: fn(), b: fn()) { a(); b(); a(); b(); } let q = ~50; - two_blocks({|| let a = q; assert *a == 50;}, - {|| let a = q; assert *a == 50;}); + two_blocks(|| { let a = q; assert *a == 50;}, + || { let a = q; assert *a == 50;}); } diff --git a/src/test/run-pass/last-use-in-block.rs b/src/test/run-pass/last-use-in-block.rs index 6915c35b2f6..a9e367a89d4 100644 --- a/src/test/run-pass/last-use-in-block.rs +++ b/src/test/run-pass/last-use-in-block.rs @@ -10,7 +10,7 @@ fn lp<T>(s: str, f: fn(str) -> T) -> T { fn apply<T>(s: str, f: fn(str) -> T) -> T { fn g<T>(s: str, f: fn(str) -> T) -> T {f(s)} - g(s) {|v| let r = f(v); r } + g(s, |v| { let r = f(v); r }) } fn main() {} diff --git a/src/test/run-pass/last-use-is-capture.rs b/src/test/run-pass/last-use-is-capture.rs index b238dab3026..8dcb2f2cf85 100644 --- a/src/test/run-pass/last-use-is-capture.rs +++ b/src/test/run-pass/last-use-is-capture.rs @@ -4,5 +4,5 @@ fn main() { fn invoke(f: fn@()) { f(); } let k = ~22; let _u = {a: k}; - invoke {||log(error, k);} + invoke(|| log(error, k) ) } diff --git a/src/test/run-pass/lazychan.rs b/src/test/run-pass/lazychan.rs index 8651c81a41f..cbdd1e9aabf 100644 --- a/src/test/run-pass/lazychan.rs +++ b/src/test/run-pass/lazychan.rs @@ -9,13 +9,13 @@ fn main() { let ch = chan(p); let mut y: int; - task::spawn {|| child(ch); }; + task::spawn(|| child(ch) ); y = recv(p); #debug("received 1"); log(debug, y); assert (y == 10); - task::spawn {|| child(ch); }; + task::spawn(|| child(ch) ); y = recv(p); #debug("received 2"); log(debug, y); diff --git a/src/test/run-pass/linear-for-loop.rs b/src/test/run-pass/linear-for-loop.rs index c963c781053..7bb9c3060cf 100644 --- a/src/test/run-pass/linear-for-loop.rs +++ b/src/test/run-pass/linear-for-loop.rs @@ -1,14 +1,14 @@ fn main() { - let x = [1, 2, 3]/~; + let x = ~[1, 2, 3]; let mut y = 0; - for x.each {|i| log(debug, i); y += i; } + for x.each |i| { log(debug, i); y += i; } log(debug, y); assert (y == 6); let s = "hello there"; let mut i: int = 0; - for str::each(s) {|c| + for str::each(s) |c| { if i == 0 { assert (c == 'h' as u8); } if i == 1 { assert (c == 'e' as u8); } if i == 2 { assert (c == 'l' as u8); } diff --git a/src/test/run-pass/liveness-move-in-loop.rs b/src/test/run-pass/liveness-move-in-loop.rs index edc3d97d794..e0a05c9b297 100644 --- a/src/test/run-pass/liveness-move-in-loop.rs +++ b/src/test/run-pass/liveness-move-in-loop.rs @@ -1,11 +1,11 @@ fn take(-x: int) -> int {x} fn the_loop() { - let mut list = []/~; + let mut list = ~[]; loop { let x = 5; if x > 3 { - list += [take(x)]/~; + list += ~[take(x)]; } else { break; } diff --git a/src/test/run-pass/log-knows-the-names-of-variants-in-std.rs b/src/test/run-pass/log-knows-the-names-of-variants-in-std.rs index 78d912ff4c8..c099375cb7c 100644 --- a/src/test/run-pass/log-knows-the-names-of-variants-in-std.rs +++ b/src/test/run-pass/log-knows-the-names-of-variants-in-std.rs @@ -11,7 +11,7 @@ fn check_log<T>(exp: str, v: T) { } fn main() { - let x = list::from_vec([a(22u), b("hi")]/~); + let x = list::from_vec(~[a(22u), b("hi")]); let exp = "@cons(a(22), @cons(b(~\"hi\"), @nil))"; assert #fmt["%?", x] == exp; check_log(exp, x); diff --git a/src/test/run-pass/log-linearized.rs b/src/test/run-pass/log-linearized.rs index 8bf9a94fa00..eb3bca93c07 100644 --- a/src/test/run-pass/log-linearized.rs +++ b/src/test/run-pass/log-linearized.rs @@ -5,10 +5,10 @@ enum option<T> { some(T), } -type smallintmap<T> = @{mut v: [mut option<T>]/~}; +type smallintmap<T> = @{mut v: ~[mut option<T>]}; fn mk<T>() -> smallintmap<T> { - let v: [mut option<T>]/~ = [mut]/~; + let v: ~[mut option<T>] = ~[mut]; ret @{mut v: v}; } diff --git a/src/test/run-pass/log-str.rs b/src/test/run-pass/log-str.rs index 5c086717748..7f424bf69f7 100644 --- a/src/test/run-pass/log-str.rs +++ b/src/test/run-pass/log-str.rs @@ -1,4 +1,4 @@ fn main() { - assert "~[1, 2, 3]" == sys::log_str([1, 2, 3]/~); + assert "~[1, 2, 3]" == sys::log_str(~[1, 2, 3]); assert #fmt["%?/%6?", [1, 2, 3]/~, "hi"] == "~[1, 2, 3]/ ~\"hi\""; } diff --git a/src/test/run-pass/loop-scope.rs b/src/test/run-pass/loop-scope.rs index f43f8673a50..9dc53e726d6 100644 --- a/src/test/run-pass/loop-scope.rs +++ b/src/test/run-pass/loop-scope.rs @@ -1,6 +1,6 @@ fn main() { - let x = [10, 20, 30]/~; + let x = ~[10, 20, 30]; let mut sum = 0; - for x.each {|x| sum += x; } + for x.each |x| { sum += x; } assert (sum == 60); } diff --git a/src/test/run-pass/lots-a-fail.rs b/src/test/run-pass/lots-a-fail.rs index 820fe715b39..da2253fb615 100644 --- a/src/test/run-pass/lots-a-fail.rs +++ b/src/test/run-pass/lots-a-fail.rs @@ -9,13 +9,13 @@ fn die() { } fn iloop() { - task::spawn {|| die(); }; + task::spawn(|| die() ); } fn main() { - for uint::range(0u, 100u) {|_i| + for uint::range(0u, 100u) |_i| { let builder = task::builder(); task::unsupervise(builder); - task::run(builder) {|| iloop(); }; + task::run(builder, || iloop() ); } } \ No newline at end of file diff --git a/src/test/run-pass/macro-by-example-2.rs b/src/test/run-pass/macro-by-example-2.rs index e1a7b97e888..668405c19f7 100644 --- a/src/test/run-pass/macro-by-example-2.rs +++ b/src/test/run-pass/macro-by-example-2.rs @@ -1,3 +1,9 @@ +// I can't for the life of me manage to untangle all of the brackets +// in this test. I am just suppessing the old_vec diagnostic. This +// doesn't actually care what sort of vector it uses, so if we change +// what vectors mean, it shouldn't mind... +#[warn(no_old_vecs)]; + fn main() { #macro[[#zip_or_unzip[[x, ...], [y, ...]], [[x, y], ...]], [#zip_or_unzip[[xx, yy], ...], [[xx, ...], [yy, ...]]]]; diff --git a/src/test/run-pass/main-ivec.rs b/src/test/run-pass/main-ivec.rs index b7c01eb9ad1..a62147f286f 100644 --- a/src/test/run-pass/main-ivec.rs +++ b/src/test/run-pass/main-ivec.rs @@ -1 +1 @@ -fn main(args: [str]/~) { for args.each {|s| log(debug, s); } } +fn main(args: ~[str]) { for args.each |s| { log(debug, s); } } diff --git a/src/test/run-pass/many.rs b/src/test/run-pass/many.rs index 0914e059b95..9083e1841c7 100644 --- a/src/test/run-pass/many.rs +++ b/src/test/run-pass/many.rs @@ -10,7 +10,7 @@ fn sub(parent: comm::chan<int>, id: int) { } else { let p = comm::port(); let ch = comm::chan(p); - let child = task::spawn {|| sub(ch, id - 1); }; + let child = task::spawn(|| sub(ch, id - 1) ); let y = comm::recv(p); comm::send(parent, y + 1); } @@ -19,7 +19,7 @@ fn sub(parent: comm::chan<int>, id: int) { fn main() { let p = comm::port(); let ch = comm::chan(p); - let child = task::spawn {|| sub(ch, 200); }; + let child = task::spawn(|| sub(ch, 200) ); let y = comm::recv(p); #debug("transmission complete"); log(debug, y); diff --git a/src/test/run-pass/maybe-mutable.rs b/src/test/run-pass/maybe-mutable.rs index 41a649052b7..abbf50ed67b 100644 --- a/src/test/run-pass/maybe-mutable.rs +++ b/src/test/run-pass/maybe-mutable.rs @@ -2,15 +2,15 @@ // -*- rust -*- -fn len(v: [const int]/~) -> uint { +fn len(v: ~[const int]) -> uint { let mut i = 0u; while i < vec::len(v) { i += 1u; } ret i; } fn main() { - let v0 = [1, 2, 3, 4, 5]/~; + let v0 = ~[1, 2, 3, 4, 5]; log(debug, len(v0)); - let v1 = [mut 1, 2, 3, 4, 5]/~; + let v1 = ~[mut 1, 2, 3, 4, 5]; log(debug, len(v1)); } diff --git a/src/test/run-pass/mod-view-items.rs b/src/test/run-pass/mod-view-items.rs index c92821da978..9104469d435 100644 --- a/src/test/run-pass/mod-view-items.rs +++ b/src/test/run-pass/mod-view-items.rs @@ -7,7 +7,7 @@ mod m { use std; import vec; - fn f() -> [int]/~ { vec::from_elem(1u, 0) } + fn f() -> ~[int] { vec::from_elem(1u, 0) } } fn main() { let x = m::f(); } diff --git a/src/test/run-pass/monad.rs b/src/test/run-pass/monad.rs index 671311018b5..674460a6a80 100644 --- a/src/test/run-pass/monad.rs +++ b/src/test/run-pass/monad.rs @@ -1,7 +1,7 @@ -impl monad<A> for [A]/~ { - fn bind<B>(f: fn(A) -> [B]/~) -> [B]/~ { - let mut r = []/~; - for self.each {|elt| r += f(elt); } +impl monad<A> for ~[A] { + fn bind<B>(f: fn(A) -> ~[B]) -> ~[B] { + let mut r = ~[]; + for self.each |elt| { r += f(elt); } r } } @@ -16,12 +16,12 @@ impl monad<A> for option<A> { } fn transform(x: option<int>) -> option<str> { - x.bind {|n| some(n + 1)}.bind {|n| some(int::str(n))} + x.bind(|n| some(n + 1) ).bind(|n| some(int::str(n)) ) } fn main() { assert transform(some(10)) == some("11"); assert transform(none) == none; - assert ["hi"]/~.bind {|x| [x, x + "!"]/~}.bind {|x| [x, x + "?"]/~} == - ["hi", "hi?", "hi!", "hi!?"]/~; + assert (~["hi"]).bind(|x| ~[x, x + "!"] ).bind(|x| ~[x, x + "?"] ) == + ~["hi", "hi?", "hi!", "hi!?"]; } diff --git a/src/test/run-pass/morestack5.rs b/src/test/run-pass/morestack5.rs index ea4f6466ae8..cafd2fbc491 100644 --- a/src/test/run-pass/morestack5.rs +++ b/src/test/run-pass/morestack5.rs @@ -12,7 +12,7 @@ fn getbig(&&i: int) { fn main() { let mut sz = 400u; while sz < 500u { - task::try {|| getbig(200) }; + task::try(|| getbig(200) ); sz += 1u; } } \ No newline at end of file diff --git a/src/test/run-pass/morestack6.rs b/src/test/run-pass/morestack6.rs index 3b0195511ef..eb0f71e67ed 100644 --- a/src/test/run-pass/morestack6.rs +++ b/src/test/run-pass/morestack6.rs @@ -46,18 +46,18 @@ fn runtest2(f: fn~(), frame_backoff: u32, last_stk: *u8) -> u32 { } fn main() { - let fns = [ + let fns = ~[ calllink01, calllink02, calllink03, calllink08, calllink09, calllink10 - ]/~; + ]; let rng = rand::rng(); - for fns.each {|f| + for fns.each |f| { let sz = rng.next() % 256u32 + 256u32; let frame_backoff = rng.next() % 10u32 + 1u32; - task::try {|| runtest(f, frame_backoff) }; + task::try(|| runtest(f, frame_backoff) ); } } diff --git a/src/test/run-pass/move-3-unique.rs b/src/test/run-pass/move-3-unique.rs index a642b194aec..ad757d5cc12 100644 --- a/src/test/run-pass/move-3-unique.rs +++ b/src/test/run-pass/move-3-unique.rs @@ -10,7 +10,7 @@ fn test(x: bool, foo: ~{x: int, y: int, z: int}) -> int { fn main() { let x = ~{x: 1, y: 2, z: 3}; - for uint::range(0u, 10000u) {|_i| + for uint::range(0u, 10000u) |_i| { assert (test(true, x) == 2); } assert (test(false, x) == 5); diff --git a/src/test/run-pass/move-3.rs b/src/test/run-pass/move-3.rs index 2e7c30dcf5d..1ba6ed29fbb 100644 --- a/src/test/run-pass/move-3.rs +++ b/src/test/run-pass/move-3.rs @@ -10,7 +10,7 @@ fn test(x: bool, foo: @{x: int, y: int, z: int}) -> int { fn main() { let x = @{x: 1, y: 2, z: 3}; - for uint::range(0u, 10000u) {|i| + for uint::range(0u, 10000u) |i| { assert (test(true, x) == 2); } assert (test(false, x) == 5); diff --git a/src/test/run-pass/move-arg-2.rs b/src/test/run-pass/move-arg-2.rs index c70f3733ab7..01e287c0db0 100644 --- a/src/test/run-pass/move-arg-2.rs +++ b/src/test/run-pass/move-arg-2.rs @@ -1,10 +1,10 @@ -fn test(-foo: @[int]/~) { assert (foo[0] == 10); } +fn test(-foo: @~[int]) { assert (foo[0] == 10); } fn main() { - let x = @[10]/~; + let x = @~[10]; // Test forgetting a local by move-in test(x); // Test forgetting a temporary by move-in. - test(@[10]/~); + test(@~[10]); } diff --git a/src/test/run-pass/mutable-alias-vec.rs b/src/test/run-pass/mutable-alias-vec.rs index edc682b892d..bb18a535739 100644 --- a/src/test/run-pass/mutable-alias-vec.rs +++ b/src/test/run-pass/mutable-alias-vec.rs @@ -3,10 +3,10 @@ // -*- rust -*- use std; -fn grow(&v: [int]/~) { v += [1]/~; } +fn grow(&v: ~[int]) { v += ~[1]; } fn main() { - let mut v: [int]/~ = []/~; + let mut v: ~[int] = ~[]; grow(v); grow(v); grow(v); diff --git a/src/test/run-pass/mutable-huh-variance-vec1.rs b/src/test/run-pass/mutable-huh-variance-vec1.rs index 403716321bc..23deca00fed 100644 --- a/src/test/run-pass/mutable-huh-variance-vec1.rs +++ b/src/test/run-pass/mutable-huh-variance-vec1.rs @@ -1,12 +1,12 @@ // error-pattern: mismatched types fn main() { - let v = [[0]/~]/~; + let v = ~[~[0]]; // This is ok because the outer vec is covariant with respect // to the inner vec. If the outer vec was mut then we // couldn't do this. - fn f(&&v: [[const int]/~]/~) { + fn f(&&v: ~[~[const int]]) { } f(v); diff --git a/src/test/run-pass/mutable-huh-variance-vec2.rs b/src/test/run-pass/mutable-huh-variance-vec2.rs index 56c8def9ac6..06e43df668e 100644 --- a/src/test/run-pass/mutable-huh-variance-vec2.rs +++ b/src/test/run-pass/mutable-huh-variance-vec2.rs @@ -1,12 +1,12 @@ // error-pattern: mismatched types fn main() { - let v = [[0]/~]/~; + let v = ~[~[0]]; // This is ok because the outer vec is covariant with respect // to the inner vec. If the outer vec was mut then we // couldn't do this. - fn f(&&v: [const [const int]/~]/~) { + fn f(&&v: ~[const ~[const int]]) { } f(v); diff --git a/src/test/run-pass/mutable-vec-drop.rs b/src/test/run-pass/mutable-vec-drop.rs index b7aa4278b49..985464ba2a1 100644 --- a/src/test/run-pass/mutable-vec-drop.rs +++ b/src/test/run-pass/mutable-vec-drop.rs @@ -1,6 +1,6 @@ fn main() { // This just tests whether the vec leaks its members. - let pvec: [mut @{a: int, b: int}]/~ = - [mut @{a: 1, b: 2}, @{a: 3, b: 4}, @{a: 5, b: 6}]/~; + let pvec: ~[mut @{a: int, b: int}] = + ~[mut @{a: 1, b: 2}, @{a: 3, b: 4}, @{a: 5, b: 6}]; } diff --git a/src/test/run-pass/native-fn-linkname.rs b/src/test/run-pass/native-fn-linkname.rs index eaa6522e2d1..e25b34e4fd3 100644 --- a/src/test/run-pass/native-fn-linkname.rs +++ b/src/test/run-pass/native-fn-linkname.rs @@ -12,7 +12,7 @@ native mod libc { fn strlen(str: str) -> uint unsafe { // C string is terminated with a zero - let bytes = str::bytes(str) + [0u8]/~; + let bytes = str::bytes(str) + ~[0u8]; ret libc::my_strlen(vec::unsafe::to_ptr(bytes)); } diff --git a/src/test/run-pass/native2.rs b/src/test/run-pass/native2.rs index 5fbef59101e..377d7310375 100644 --- a/src/test/run-pass/native2.rs +++ b/src/test/run-pass/native2.rs @@ -24,4 +24,4 @@ native mod libc { #[nolink] native mod baz { } -fn main(args: [str]/~) { } +fn main(args: ~[str]) { } diff --git a/src/test/run-pass/newlambdas-ret-infer.rs b/src/test/run-pass/newlambdas-ret-infer.rs new file mode 100644 index 00000000000..e550155cb60 --- /dev/null +++ b/src/test/run-pass/newlambdas-ret-infer.rs @@ -0,0 +1,9 @@ +// Test that the lambda kind is inferred correctly as a return +// expression + +fn shared() -> fn@() { ret || (); } + +fn unique() -> fn~() { ret || (); } + +fn main() { +} diff --git a/src/test/run-pass/newlambdas-ret-infer2.rs b/src/test/run-pass/newlambdas-ret-infer2.rs new file mode 100644 index 00000000000..8b9a0be8332 --- /dev/null +++ b/src/test/run-pass/newlambdas-ret-infer2.rs @@ -0,0 +1,10 @@ +// xfail-test fn~ is not inferred +// Test that the lambda kind is inferred correctly as a return +// expression + +fn shared() -> fn@() { || () } + +fn unique() -> fn~() { || () } + +fn main() { +} diff --git a/src/test/run-pass/newlambdas.rs b/src/test/run-pass/newlambdas.rs new file mode 100644 index 00000000000..f6c7d382585 --- /dev/null +++ b/src/test/run-pass/newlambdas.rs @@ -0,0 +1,19 @@ +// Tests for the new |args| expr lambda syntax + +fn f(i: int, f: fn(int) -> int) -> int { f(i) } + +fn g(g: fn()) { } + +fn ff() -> fn@(int) -> int { + ret |x| x + 1; +} + +fn main() { + assert f(10, |a| a) == 10; + g(||()); + assert do f(10) |a| { a } == 10; + do g() || { } + let _x: fn@() -> int = || 10; + let _y: fn@(int) -> int = |a| a; + assert ff()(10) == 11; +} diff --git a/src/test/run-pass/newtype-polymorphic.rs b/src/test/run-pass/newtype-polymorphic.rs index 4398f40a08a..d5f2d62bdfa 100644 --- a/src/test/run-pass/newtype-polymorphic.rs +++ b/src/test/run-pass/newtype-polymorphic.rs @@ -1,11 +1,11 @@ -enum myvec<X> = [X]/~; +enum myvec<X> = ~[X]; -fn myvec_deref<X: copy>(mv: myvec<X>) -> [X]/~ { ret *mv; } +fn myvec_deref<X: copy>(mv: myvec<X>) -> ~[X] { ret *mv; } fn myvec_elt<X: copy>(mv: myvec<X>) -> X { ret mv[0]; } fn main() { - let mv = myvec([1, 2, 3]/~); + let mv = myvec(~[1, 2, 3]); assert (myvec_deref(mv)[1] == 2); assert (myvec_elt(mv) == 1); assert (mv[2] == 3); diff --git a/src/test/run-pass/operator-overloading-leaks.rs b/src/test/run-pass/operator-overloading-leaks.rs index bcb344f5acd..20f6201bd6b 100644 --- a/src/test/run-pass/operator-overloading-leaks.rs +++ b/src/test/run-pass/operator-overloading-leaks.rs @@ -1,12 +1,12 @@ // The cases commented as "Leaks" need to not leak. Issue #2581 -impl methods<T: copy> for [T]/~ { - fn -(x: [T]/&) -> [T]/~ { - [x[0], x[0], x[0]]/~ +impl methods<T: copy> for ~[T] { + fn -(x: &[T]) -> ~[T] { + ~[x[0], x[0], x[0]] } - fn foo(x: [T]/&) -> [T]/~ { - [x[0], x[0], x[0]]/~ + fn foo(x: &[T]) -> ~[T] { + ~[x[0], x[0], x[0]] } } @@ -30,23 +30,23 @@ impl methods for @int { fn main() { // leaks - let mut bar = [1, 2, 3]/~; - bar -= [3, 2, 1]/~; - bar -= [4, 5, 6]/~; + let mut bar = ~[1, 2, 3]; + bar -= ~[3, 2, 1]; + bar -= ~[4, 5, 6]; io::println(#fmt("%?", bar)); // okay - let mut bar = [1, 2, 3]/~; - bar = bar.foo([3, 2, 1]/~); - bar = bar.foo([4, 5, 6]/~); + let mut bar = ~[1, 2, 3]; + bar = bar.foo(~[3, 2, 1]); + bar = bar.foo(~[4, 5, 6]); io::println(#fmt("%?", bar)); // okay - let mut bar = [1, 2, 3]/~; - bar = bar - [3, 2, 1]/~; - bar = bar - [4, 5, 6]/~; + let mut bar = ~[1, 2, 3]; + bar = bar - ~[3, 2, 1]; + bar = bar - ~[4, 5, 6]; io::println(#fmt("%?", bar)); diff --git a/src/test/run-pass/option-ext.rs b/src/test/run-pass/option-ext.rs index cde7d73e720..12c0feac891 100644 --- a/src/test/run-pass/option-ext.rs +++ b/src/test/run-pass/option-ext.rs @@ -1,4 +1,4 @@ -fn main(args: [str]/~) { +fn main(args: ~[str]) { let thing = "{{ f }}"; let f = str::find_str(thing, "{{"); diff --git a/src/test/run-pass/osmain.rs b/src/test/run-pass/osmain.rs index 981426ce62a..17aec9ecbbd 100644 --- a/src/test/run-pass/osmain.rs +++ b/src/test/run-pass/osmain.rs @@ -24,7 +24,7 @@ fn run(i: int) { }; task::set_opts(builder, opts); task::unsupervise(builder); - task::run(builder) {|| + do task::run(builder) || { task::yield(); let builder = task::builder(); let opts = { @@ -36,7 +36,7 @@ fn run(i: int) { }; task::set_opts(builder, opts); task::unsupervise(builder); - task::run(builder) {|| + do task::run(builder) || { task::yield(); run(i - 1); task::yield(); diff --git a/src/test/run-pass/path.rs b/src/test/run-pass/path.rs index 96a7a7229b6..6bccf726921 100644 --- a/src/test/run-pass/path.rs +++ b/src/test/run-pass/path.rs @@ -4,4 +4,4 @@ mod foo { fn bar(offset: uint) { } } -fn main(args: [str]/~) { foo::bar(0u); } +fn main(args: ~[str]) { foo::bar(0u); } diff --git a/src/test/run-pass/pattern-bound-var-in-for-each.rs b/src/test/run-pass/pattern-bound-var-in-for-each.rs index a2f008660fd..a2823561e62 100644 --- a/src/test/run-pass/pattern-bound-var-in-for-each.rs +++ b/src/test/run-pass/pattern-bound-var-in-for-each.rs @@ -6,7 +6,7 @@ fn foo(src: uint) { alt some(src) { some(src_id) { - for uint::range(0u, 10u) {|i| + for uint::range(0u, 10u) |i| { let yyy = src_id; assert (yyy == 0u); } diff --git a/src/test/run-pass/private-method.rs b/src/test/run-pass/private-method.rs index cb14fd537b8..a6c1929dc1f 100644 --- a/src/test/run-pass/private-method.rs +++ b/src/test/run-pass/private-method.rs @@ -1,7 +1,7 @@ class cat { priv { let mut meows : uint; - fn nap() { for uint::range(1u, 10u) {|_i|}} + fn nap() { for uint::range(1u, 10u) |_i| { }} } let how_hungry : int; diff --git a/src/test/run-pass/pure-sum.rs b/src/test/run-pass/pure-sum.rs index 823f6751c23..1b24c65e8fa 100644 --- a/src/test/run-pass/pure-sum.rs +++ b/src/test/run-pass/pure-sum.rs @@ -1,6 +1,6 @@ // Check that pure functions can modify local state. -pure fn sums_to(v: [int]/~, sum: int) -> bool { +pure fn sums_to(v: ~[int], sum: int) -> bool { let mut i = 0u, sum0 = 0; while i < v.len() { sum0 += v[i]; @@ -9,7 +9,7 @@ pure fn sums_to(v: [int]/~, sum: int) -> bool { ret sum0 == sum; } -pure fn sums_to_using_uniq(v: [int]/~, sum: int) -> bool { +pure fn sums_to_using_uniq(v: ~[int], sum: int) -> bool { let mut i = 0u, sum0 = ~mut 0; while i < v.len() { *sum0 += v[i]; @@ -18,7 +18,7 @@ pure fn sums_to_using_uniq(v: [int]/~, sum: int) -> bool { ret *sum0 == sum; } -pure fn sums_to_using_rec(v: [int]/~, sum: int) -> bool { +pure fn sums_to_using_rec(v: ~[int], sum: int) -> bool { let mut i = 0u, sum0 = {f: 0}; while i < v.len() { sum0.f += v[i]; @@ -27,7 +27,7 @@ pure fn sums_to_using_rec(v: [int]/~, sum: int) -> bool { ret sum0.f == sum; } -pure fn sums_to_using_uniq_rec(v: [int]/~, sum: int) -> bool { +pure fn sums_to_using_uniq_rec(v: ~[int], sum: int) -> bool { let mut i = 0u, sum0 = {f: ~mut 0}; while i < v.len() { *sum0.f += v[i]; diff --git a/src/test/run-pass/rcvr-borrowed-to-slice.rs b/src/test/run-pass/rcvr-borrowed-to-slice.rs index 6718de9d9d6..f03defbd6fc 100644 --- a/src/test/run-pass/rcvr-borrowed-to-slice.rs +++ b/src/test/run-pass/rcvr-borrowed-to-slice.rs @@ -1,26 +1,26 @@ // Note: impl on a slice -impl foo/& for [int]/& { +impl foo/& for &[int] { fn sum() -> int { let mut sum = 0; - for vec::each(self) { |e| sum += e; } + for vec::each(self) |e| { sum += e; } ret sum; } } -fn call_sum(x: [int]/&) -> int { x.sum() } +fn call_sum(x: &[int]) -> int { x.sum() } fn main() { - let x = [1, 2, 3]/~; + let x = ~[1, 2, 3]; let y = call_sum(x); #debug["y==%d", y]; assert y == 6; - let x = [mut 1, 2, 3]/~; + let x = ~[mut 1, 2, 3]; let y = x.sum(); #debug["y==%d", y]; assert y == 6; - let x = [1, 2, 3]/~; + let x = ~[1, 2, 3]; let y = x.sum(); #debug["y==%d", y]; assert y == 6; diff --git a/src/test/run-pass/reflect-visit-data.rs b/src/test/run-pass/reflect-visit-data.rs index d0818bda7c4..12cda37a1f2 100644 --- a/src/test/run-pass/reflect-visit-data.rs +++ b/src/test/run-pass/reflect-visit-data.rs @@ -250,50 +250,50 @@ impl ptr_visitor<V: ty_visitor movable_ptr> } fn visit_enter_vec(mtbl: uint) -> bool { - self.align_to::<[u8]/~>(); + self.align_to::<~[u8]>(); if ! self.inner.visit_enter_vec(mtbl) { ret false; } true } fn visit_leave_vec(mtbl: uint) -> bool { if ! self.inner.visit_leave_vec(mtbl) { ret false; } - self.bump_past::<[u8]/~>(); + self.bump_past::<~[u8]>(); true } fn visit_enter_evec_box(mtbl: uint) -> bool { - self.align_to::<[u8]/@>(); + self.align_to::<@[u8]>(); if ! self.inner.visit_enter_evec_box(mtbl) { ret false; } true } fn visit_leave_evec_box(mtbl: uint) -> bool { if ! self.inner.visit_leave_evec_box(mtbl) { ret false; } - self.bump_past::<[u8]/@>(); + self.bump_past::<@[u8]>(); true } fn visit_enter_evec_uniq(mtbl: uint) -> bool { - self.align_to::<[u8]/~>(); + self.align_to::<~[u8]>(); if ! self.inner.visit_enter_evec_uniq(mtbl) { ret false; } true } fn visit_leave_evec_uniq(mtbl: uint) -> bool { if ! self.inner.visit_leave_evec_uniq(mtbl) { ret false; } - self.bump_past::<[u8]/~>(); + self.bump_past::<~[u8]>(); true } fn visit_enter_evec_slice(mtbl: uint) -> bool { - self.align_to::<[u8]/&static>(); + self.align_to::<&[u8]static>(); if ! self.inner.visit_enter_evec_slice(mtbl) { ret false; } true } fn visit_leave_evec_slice(mtbl: uint) -> bool { if ! self.inner.visit_leave_evec_slice(mtbl) { ret false; } - self.bump_past::<[u8]/&static>(); + self.bump_past::<&[u8]static>(); true } @@ -547,7 +547,7 @@ impl ptr_visitor<V: ty_visitor movable_ptr> enum my_visitor = @{ mut ptr1: *c_void, mut ptr2: *c_void, - mut vals: [str]/~ + mut vals: ~[str] }; impl extra_methods for my_visitor { @@ -572,7 +572,7 @@ impl of ty_visitor for my_visitor { fn visit_bool() -> bool { /* self.get::<bool>() {|b| - self.vals += [bool::to_str(b)]/~; + self.vals += ~[bool::to_str(b)]; } */ true @@ -580,7 +580,7 @@ impl of ty_visitor for my_visitor { fn visit_int() -> bool { /* self.get::<int>() {|i| - self.vals += [int::to_str(i, 10u)]/~; + self.vals += ~[int::to_str(i, 10u)]; } */ true @@ -699,7 +699,7 @@ fn main() { let p = ptr::addr_of(r) as *c_void; let u = my_visitor(@{mut ptr1: p, mut ptr2: p, - mut vals: []/~}); + mut vals: ~[]}); let v = ptr_visit_adaptor({inner: u}); let vv = v as intrinsic::ty_visitor; intrinsic::visit_ty::<(int,int,int,bool,bool)>(vv); diff --git a/src/test/run-pass/reflect-visit-type.rs b/src/test/run-pass/reflect-visit-type.rs index 33f5115df5f..19bad13e066 100644 --- a/src/test/run-pass/reflect-visit-type.rs +++ b/src/test/run-pass/reflect-visit-type.rs @@ -1,7 +1,7 @@ // FIXME: un-xfail after snapshot // xfail-test -enum my_visitor = @{ mut types: [str]/~ }; +enum my_visitor = @{ mut types: ~[str] }; impl of intrinsic::ty_visitor for my_visitor { fn visit_bot() -> bool { @@ -139,14 +139,14 @@ impl of intrinsic::ty_visitor for my_visitor { } fn main() { - let v = my_visitor(@{mut types: []/~}); + let v = my_visitor(@{mut types: ~[]}); let vv = v as intrinsic::ty_visitor; intrinsic::visit_ty::<bool>(vv); intrinsic::visit_ty::<int>(vv); intrinsic::visit_ty::<i8>(vv); intrinsic::visit_ty::<i16>(vv); - intrinsic::visit_ty::<[int]/~>(vv); + intrinsic::visit_ty::<~[int]>(vv); for (copy v.types).each {|s| io::println(#fmt("type: %s", s)); diff --git a/src/test/run-pass/regions-borrow-evec-at.rs b/src/test/run-pass/regions-borrow-evec-at.rs index e061f566e12..df62a1e5763 100644 --- a/src/test/run-pass/regions-borrow-evec-at.rs +++ b/src/test/run-pass/regions-borrow-evec-at.rs @@ -1,11 +1,11 @@ // xfail-test -fn foo(x: [uint]/&) -> uint { +fn foo(x: &[uint]) -> uint { x[0] } fn main() { - let p = [22u]/@; + let p = @[22u]; let r = foo(p); assert r == 22u; } diff --git a/src/test/run-pass/regions-borrow-evec-fixed.rs b/src/test/run-pass/regions-borrow-evec-fixed.rs index 50306b57139..5a0875109f6 100644 --- a/src/test/run-pass/regions-borrow-evec-fixed.rs +++ b/src/test/run-pass/regions-borrow-evec-fixed.rs @@ -1,6 +1,6 @@ // xfail-test -fn foo(x: [int]/&) -> int { +fn foo(x: &[int]) -> int { x[0] } diff --git a/src/test/run-pass/regions-borrow-evec-uniq.rs b/src/test/run-pass/regions-borrow-evec-uniq.rs index 8ee9dc7f958..211c37b3e29 100644 --- a/src/test/run-pass/regions-borrow-evec-uniq.rs +++ b/src/test/run-pass/regions-borrow-evec-uniq.rs @@ -1,13 +1,13 @@ -fn foo(x: [int]/&) -> int { +fn foo(x: &[int]) -> int { x[0] } fn main() { - let p = [1,2,3,4,5]/~; + let p = ~[1,2,3,4,5]; let r = foo(p); assert r == 1; - let p = [5,4,3,2,1]/~; + let p = ~[5,4,3,2,1]; let r = foo(p); assert r == 5; } diff --git a/src/test/run-pass/regions-params.rs b/src/test/run-pass/regions-params.rs index 9e483f063a2..0a378235682 100644 --- a/src/test/run-pass/regions-params.rs +++ b/src/test/run-pass/regions-params.rs @@ -3,9 +3,9 @@ fn region_identity(x: &r.uint) -> &r.uint { x } fn apply<T>(t: T, f: fn(T) -> T) -> T { f(t) } fn parameterized(x: &uint) -> uint { - let z = apply(x) {|y| + let z = apply(x, ({|y| region_identity(y) - }; + })); *z } diff --git a/src/test/run-pass/ret-break-cont-in-block.rs b/src/test/run-pass/ret-break-cont-in-block.rs index ff2b8030326..9df02394887 100644 --- a/src/test/run-pass/ret-break-cont-in-block.rs +++ b/src/test/run-pass/ret-break-cont-in-block.rs @@ -1,4 +1,4 @@ -fn iter<T>(v: [T]/~, it: fn(T) -> bool) { +fn iter<T>(v: ~[T], it: fn(T) -> bool) { let mut i = 0u, l = v.len(); while i < l { if !it(v[i]) { break; } @@ -6,19 +6,19 @@ fn iter<T>(v: [T]/~, it: fn(T) -> bool) { } } -fn find_pos<T>(n: T, h: [T]/~) -> option<uint> { +fn find_pos<T>(n: T, h: ~[T]) -> option<uint> { let mut i = 0u; - for iter(h) {|e| + for iter(h) |e| { if e == n { ret some(i); } i += 1u; } none } -fn bail_deep(x: [[bool]/~]/~) { +fn bail_deep(x: ~[~[bool]]) { let mut seen = false; - for iter(x) {|x| - for iter(x) {|x| + for iter(x) |x| { + for iter(x) |x| { assert !seen; if x { seen = true; ret; } } @@ -27,8 +27,8 @@ fn bail_deep(x: [[bool]/~]/~) { } fn ret_deep() -> str { - for iter([1, 2]/~) {|e| - for iter([3, 4]/~) {|x| + for iter(~[1, 2]) |e| { + for iter(~[3, 4]) |x| { if e + x > 4 { ret "hi"; } } } @@ -37,7 +37,7 @@ fn ret_deep() -> str { fn main() { let mut last = 0; - for vec::all([1, 2, 3, 4, 5, 6, 7]/~) {|e| + for vec::all(~[1, 2, 3, 4, 5, 6, 7]) |e| { last = e; if e == 5 { break; } if e % 2 == 1 { cont; } @@ -45,13 +45,13 @@ fn main() { }; assert last == 5; - assert find_pos(1, [0, 1, 2, 3]/~) == some(1u); - assert find_pos(1, [0, 4, 2, 3]/~) == none; - assert find_pos("hi", ["foo", "bar", "baz", "hi"]/~) == some(3u); + assert find_pos(1, ~[0, 1, 2, 3]) == some(1u); + assert find_pos(1, ~[0, 4, 2, 3]) == none; + assert find_pos("hi", ~["foo", "bar", "baz", "hi"]) == some(3u); - bail_deep([[false, false]/~, [true, true]/~, [false, true]/~]/~); - bail_deep([[true]/~]/~); - bail_deep([[false, false, false]/~]/~); + bail_deep(~[~[false, false], ~[true, true], ~[false, true]]); + bail_deep(~[~[true]]); + bail_deep(~[~[false, false, false]]); assert ret_deep() == "hi"; } diff --git a/src/test/run-pass/rt-circular-buffer.rs b/src/test/run-pass/rt-circular-buffer.rs index b35927529fb..e7db39c2edf 100644 --- a/src/test/run-pass/rt-circular-buffer.rs +++ b/src/test/run-pass/rt-circular-buffer.rs @@ -32,7 +32,7 @@ fn test_init() { fn test_grow() { let myport = port(); let mychan = chan(myport); - for uint::range(0u, 100u) {|i| + for uint::range(0u, 100u) |i| { let val: record = {val1: 0u32, val2: 0u32, val3: 0u32}; comm::send(mychan, val); } @@ -50,11 +50,11 @@ fn test_shrink1() { fn test_shrink2() { let myport = port(); let mychan = chan(myport); - for uint::range(0u, 100u) {|_i| + for uint::range(0u, 100u) |_i| { let val: record = {val1: 0u32, val2: 0u32, val3: 0u32}; send(mychan, val); } - for uint::range(0u, 100u) {|_i| let x = recv(myport); } + for uint::range(0u, 100u) |_i| { let x = recv(myport); } } @@ -62,7 +62,7 @@ fn test_shrink2() { fn test_rotate() { let myport = port(); let mychan = chan(myport); - for uint::range(0u, 100u) {|i| + for uint::range(0u, 100u) |i| { let val = {val1: i as u32, val2: i as u32, val3: i as u32}; send(mychan, val); let x = recv(myport); @@ -78,13 +78,13 @@ fn test_rotate() { fn test_rotate_grow() { let myport = port::<record>(); let mychan = chan(myport); - for uint::range(0u, 10u) {|j| - for uint::range(0u, 10u) {|i| + for uint::range(0u, 10u) |j| { + for uint::range(0u, 10u) |i| { let val: record = {val1: i as u32, val2: i as u32, val3: i as u32}; send(mychan, val); } - for uint::range(0u, 10u) {|i| + for uint::range(0u, 10u) |i| { let x = recv(myport); assert (x.val1 == i as u32); assert (x.val2 == i as u32); diff --git a/src/test/run-pass/send-iloop.rs b/src/test/run-pass/send-iloop.rs index 904eef7a2e8..6f2560f086f 100644 --- a/src/test/run-pass/send-iloop.rs +++ b/src/test/run-pass/send-iloop.rs @@ -9,7 +9,7 @@ fn die() { } fn iloop() { - task::spawn {|| die(); }; + task::spawn(|| die() ); let p = comm::port::<()>(); let c = comm::chan(p); loop { @@ -21,9 +21,9 @@ fn iloop() { } fn main() { - for uint::range(0u, 16u) {|_i| + for uint::range(0u, 16u) |_i| { let builder = task::builder(); task::unsupervise(builder); - task::run(builder) {|| iloop(); } + task::run(builder, || iloop() ); } } \ No newline at end of file diff --git a/src/test/run-pass/send-resource.rs b/src/test/run-pass/send-resource.rs index a733f998160..22fe1df937b 100644 --- a/src/test/run-pass/send-resource.rs +++ b/src/test/run-pass/send-resource.rs @@ -11,7 +11,7 @@ fn main() { let p = port(); let c = chan(p); - spawn() {|| + do spawn() || { let p = port(); c.send(chan(p)); diff --git a/src/test/run-pass/sendfn-deep-copy.rs b/src/test/run-pass/sendfn-deep-copy.rs index a5fd483e3f8..666c6805593 100644 --- a/src/test/run-pass/sendfn-deep-copy.rs +++ b/src/test/run-pass/sendfn-deep-copy.rs @@ -8,7 +8,7 @@ fn main() { test05(); } fn mk_counter<A:copy>() -> fn~(A) -> (A,uint) { // The only reason that the counter is generic is so that it closes // over both a type descriptor and some data. - let v = [mut 0u]/~; + let v = ~[mut 0u]; ret fn~(a: A) -> (A,uint) { let n = v[0]; v[0] = n + 1u; diff --git a/src/test/run-pass/sendfn-generic-fn.rs b/src/test/run-pass/sendfn-generic-fn.rs index 9e1e5127a24..ba56b12d8f7 100644 --- a/src/test/run-pass/sendfn-generic-fn.rs +++ b/src/test/run-pass/sendfn-generic-fn.rs @@ -27,7 +27,7 @@ fn spawn<A: copy, B: copy>(f: native fn(fn~(A,B)->pair<A,B>)) { let arg = fn~(a: A, b: B) -> pair<A,B> { ret make_generic_record(a, b); }; - task::spawn {|| f(arg); }; + task::spawn(|| f(arg) ); } fn test05() { diff --git a/src/test/run-pass/seq-compare.rs b/src/test/run-pass/seq-compare.rs index 423d755076f..00ddcbd9a65 100644 --- a/src/test/run-pass/seq-compare.rs +++ b/src/test/run-pass/seq-compare.rs @@ -4,13 +4,13 @@ fn main() { assert ("hello" < "hellr"); assert ("hello " > "hello"); assert ("hello" != "there"); - assert ([1, 2, 3, 4]/~ > [1, 2, 3]/~); - assert ([1, 2, 3]/~ < [1, 2, 3, 4]/~); - assert ([1, 2, 4, 4]/~ > [1, 2, 3, 4]/~); - assert ([1, 2, 3, 4]/~ < [1, 2, 4, 4]/~); - assert ([1, 2, 3]/~ <= [1, 2, 3]/~); - assert ([1, 2, 3]/~ <= [1, 2, 3, 3]/~); - assert ([1, 2, 3, 4]/~ > [1, 2, 3]/~); - assert ([1, 2, 3]/~ == [1, 2, 3]/~); - assert ([1, 2, 3]/~ != [1, 1, 3]/~); + assert (~[1, 2, 3, 4] > ~[1, 2, 3]); + assert (~[1, 2, 3] < ~[1, 2, 3, 4]); + assert (~[1, 2, 4, 4] > ~[1, 2, 3, 4]); + assert (~[1, 2, 3, 4] < ~[1, 2, 4, 4]); + assert (~[1, 2, 3] <= ~[1, 2, 3]); + assert (~[1, 2, 3] <= ~[1, 2, 3, 3]); + assert (~[1, 2, 3, 4] > ~[1, 2, 3]); + assert (~[1, 2, 3] == ~[1, 2, 3]); + assert (~[1, 2, 3] != ~[1, 1, 3]); } diff --git a/src/test/run-pass/shadow.rs b/src/test/run-pass/shadow.rs index 11ee08d3c2e..0194f4558cb 100644 --- a/src/test/run-pass/shadow.rs +++ b/src/test/run-pass/shadow.rs @@ -1,15 +1,15 @@ // -*- rust -*- -fn foo(c: [int]/~) { +fn foo(c: ~[int]) { let a: int = 5; - let mut b: [int]/~ = []/~; + let mut b: ~[int] = ~[]; alt none::<int> { some::<int>(_) { - for c.each {|i| + for c.each |i| { log(debug, a); let a = 17; - b += [a]/~; + b += ~[a]; } } _ { } @@ -18,4 +18,4 @@ fn foo(c: [int]/~) { enum t<T> { none, some(T), } -fn main() { let x = 10; let x = x + 20; assert (x == 30); foo([]/~); } +fn main() { let x = 10; let x = x + 20; assert (x == 30); foo(~[]); } diff --git a/src/test/run-pass/shape_intrinsic_tag_then_rec.rs b/src/test/run-pass/shape_intrinsic_tag_then_rec.rs index d041040772b..151d6b123c7 100644 --- a/src/test/run-pass/shape_intrinsic_tag_then_rec.rs +++ b/src/test/run-pass/shape_intrinsic_tag_then_rec.rs @@ -16,14 +16,14 @@ enum opt_span { type span = {lo: uint, hi: uint, expanded_from: opt_span}; type spanned<T> = { data: T, span: span }; type ty_ = uint; -type path_ = { global: bool, idents: [str]/~, types: [@ty]/~ }; +type path_ = { global: bool, idents: ~[str], types: ~[@ty] }; type path = spanned<path_>; type ty = spanned<ty_>; fn main() { let sp: span = {lo: 57451u, hi: 57542u, expanded_from: os_none}; let t: @ty = @{ data: 3u, span: sp }; - let p_: path_ = { global: true, idents: ["hi"]/~, types: [t] }; + let p_: path_ = { global: true, idents: ~["hi"], types: ~[t] }; let p: path = { data: p_, span: sp }; let x = { sp: sp, path: p }; log(error, x.path); diff --git a/src/test/run-pass/size-and-align.rs b/src/test/run-pass/size-and-align.rs index 3617546d0fc..b52ecafdebe 100644 --- a/src/test/run-pass/size-and-align.rs +++ b/src/test/run-pass/size-and-align.rs @@ -4,7 +4,7 @@ // -*- rust -*- enum clam<T> { a(T, int), b, } -fn uhoh<T>(v: [clam<T>]/~) { +fn uhoh<T>(v: ~[clam<T>]) { alt v[1] { a::<T>(t, u) { #debug("incorrect"); log(debug, u); fail; } b::<T> { #debug("correct"); } @@ -12,6 +12,6 @@ fn uhoh<T>(v: [clam<T>]/~) { } fn main() { - let v: [clam<int>]/~ = [b::<int>, b::<int>, a::<int>(42, 17)]/~; + let v: ~[clam<int>] = ~[b::<int>, b::<int>, a::<int>(42, 17)]; uhoh::<int>(v); } diff --git a/src/test/run-pass/spawn-fn.rs b/src/test/run-pass/spawn-fn.rs index a7daf058abe..502b62e3a52 100644 --- a/src/test/run-pass/spawn-fn.rs +++ b/src/test/run-pass/spawn-fn.rs @@ -10,9 +10,9 @@ fn x(s: str, n: int) { } fn main() { - task::spawn {|| x("hello from first spawned fn", 65); }; - task::spawn {|| x("hello from second spawned fn", 66); }; - task::spawn {|| x("hello from third spawned fn", 67); }; + task::spawn(|| x("hello from first spawned fn", 65) ); + task::spawn(|| x("hello from second spawned fn", 66) ); + task::spawn(|| x("hello from third spawned fn", 67) ); let mut i: int = 30; while i > 0 { i = i - 1; #debug("parent sleeping"); yield(); } } diff --git a/src/test/run-pass/spawn-types.rs b/src/test/run-pass/spawn-types.rs index 727d8326b4e..420d6793a44 100644 --- a/src/test/run-pass/spawn-types.rs +++ b/src/test/run-pass/spawn-types.rs @@ -19,5 +19,5 @@ fn iotask(cx: ctx, ip: str) { fn main() { let p = comm::port::<int>(); let ch = comm::chan(p); - task::spawn {|| iotask(ch, "localhost"); }; + task::spawn(|| iotask(ch, "localhost") ); } diff --git a/src/test/run-pass/spawn.rs b/src/test/run-pass/spawn.rs index 600cbdb06b8..04404294d92 100644 --- a/src/test/run-pass/spawn.rs +++ b/src/test/run-pass/spawn.rs @@ -5,7 +5,7 @@ use std; import task; fn main() { - task::spawn {|| child(10); }; + task::spawn(|| child(10) ); } fn child(&&i: int) { log(error, i); assert (i == 10); } diff --git a/src/test/run-pass/spawn2.rs b/src/test/run-pass/spawn2.rs index 86e2c996517..13fb94b656c 100644 --- a/src/test/run-pass/spawn2.rs +++ b/src/test/run-pass/spawn2.rs @@ -3,7 +3,7 @@ use std; import task::spawn; -fn main() { spawn {|| child((10, 20, 30, 40, 50, 60, 70, 80, 90)); }; } +fn main() { spawn(|| child((10, 20, 30, 40, 50, 60, 70, 80, 90)) ); } fn child(&&args: (int, int, int, int, int, int, int, int, int)) { let (i1, i2, i3, i4, i5, i6, i7, i8, i9) = args; diff --git a/src/test/run-pass/static-impl.rs b/src/test/run-pass/static-impl.rs index edceedae71d..149efcf5da4 100644 --- a/src/test/run-pass/static-impl.rs +++ b/src/test/run-pass/static-impl.rs @@ -17,12 +17,12 @@ impl util for uint { } } -impl util<T> for [T]/~ { +impl util<T> for ~[T] { fn length_() -> uint { vec::len(self) } - fn iter_(f: fn(T)) { for self.each {|x| f(x); } } - fn map_<U>(f: fn(T) -> U) -> [U]/~ { - let mut r = []/~; - for self.each {|elt| r += [f(elt)]/~; } + fn iter_(f: fn(T)) { for self.each |x| { f(x); } } + fn map_<U>(f: fn(T) -> U) -> ~[U] { + let mut r = ~[]; + for self.each |elt| { r += ~[f(elt)]; } r } } @@ -33,10 +33,10 @@ fn main() { assert 10u.plus() == 30; assert "hi".plus() == 200; - assert [1]/~.length_().str() == "1"; - assert [3, 4]/~.map_({|a| a + 4})[0] == 7; - assert [3, 4]/~.map_::<uint>({|a| a as uint + 4u})[0] == 7u; + assert (~[1]).length_().str() == "1"; + assert (~[3, 4]).map_(|a| a + 4 )[0] == 7; + assert (~[3, 4]).map_::<uint>(|a| a as uint + 4u )[0] == 7u; let mut x = 0u; - 10u.times {|_n| x += 2u;} + 10u.times(|_n| x += 2u ); assert x == 20u; } diff --git a/src/test/run-pass/swap-2.rs b/src/test/run-pass/swap-2.rs index b0a7bd30d81..9cc81c52ef4 100644 --- a/src/test/run-pass/swap-2.rs +++ b/src/test/run-pass/swap-2.rs @@ -1,7 +1,7 @@ -fn swap<T>(v: [mut T]/~, i: int, j: int) { v[i] <-> v[j]; } +fn swap<T>(v: ~[mut T], i: int, j: int) { v[i] <-> v[j]; } fn main() { - let a: [mut int]/~ = [mut 0, 1, 2, 3, 4, 5, 6]/~; + let a: ~[mut int] = ~[mut 0, 1, 2, 3, 4, 5, 6]; swap(a, 2, 4); assert (a[2] == 4); assert (a[4] == 2); diff --git a/src/test/run-pass/tag-in-block.rs b/src/test/run-pass/tag-in-block.rs index 5d3088cc8ae..9107249b2b0 100644 --- a/src/test/run-pass/tag-in-block.rs +++ b/src/test/run-pass/tag-in-block.rs @@ -6,4 +6,4 @@ fn foo() { fn baz() { zed(nil); } } -fn main(args: [str]/~) { } +fn main(args: ~[str]) { } diff --git a/src/test/run-pass/task-comm-0.rs b/src/test/run-pass/task-comm-0.rs index 75c282e7a90..532daf3651c 100644 --- a/src/test/run-pass/task-comm-0.rs +++ b/src/test/run-pass/task-comm-0.rs @@ -20,7 +20,7 @@ fn test05_start(ch : chan<int>) { fn test05() { let po = comm::port(); let ch = comm::chan(po); - task::spawn {|| test05_start(ch); }; + task::spawn(|| test05_start(ch) ); let mut value = comm::recv(po); log(error, value); value = comm::recv(po); diff --git a/src/test/run-pass/task-comm-1.rs b/src/test/run-pass/task-comm-1.rs index 713d487f3d8..ae0a39862d5 100644 --- a/src/test/run-pass/task-comm-1.rs +++ b/src/test/run-pass/task-comm-1.rs @@ -3,6 +3,6 @@ fn main() { test00(); } fn start() { #debug("Started / Finished task."); } fn test00() { - task::try {|| start() }; + task::try(|| start() ); #debug("Completing."); } diff --git a/src/test/run-pass/task-comm-10.rs b/src/test/run-pass/task-comm-10.rs index 907b4fc769d..90ad3563152 100644 --- a/src/test/run-pass/task-comm-10.rs +++ b/src/test/run-pass/task-comm-10.rs @@ -19,7 +19,7 @@ fn start(c: comm::chan<comm::chan<str>>) { fn main() { let p = comm::port(); let ch = comm::chan(p); - let child = task::spawn {|| start(ch); }; + let child = task::spawn(|| start(ch) ); let c = comm::recv(p); comm::send(c, "A"); diff --git a/src/test/run-pass/task-comm-11.rs b/src/test/run-pass/task-comm-11.rs index b1e23b2443a..f96f9d148d0 100644 --- a/src/test/run-pass/task-comm-11.rs +++ b/src/test/run-pass/task-comm-11.rs @@ -10,6 +10,6 @@ fn start(c: comm::chan<comm::chan<int>>) { fn main() { let p = comm::port(); let ch = comm::chan(p); - let child = task::spawn {|| start(ch); }; + let child = task::spawn(|| start(ch) ); let c = comm::recv(p); } diff --git a/src/test/run-pass/task-comm-12.rs b/src/test/run-pass/task-comm-12.rs index 51e61298290..28cb65a9ca2 100644 --- a/src/test/run-pass/task-comm-12.rs +++ b/src/test/run-pass/task-comm-12.rs @@ -9,7 +9,7 @@ fn test00() { let i: int = 0; let builder = task::builder(); let r = task::future_result(builder); - task::run(builder) {|| start(i); }; + task::run(builder, || start(i) ); // Sleep long enough for the task to finish. let mut i = 0; diff --git a/src/test/run-pass/task-comm-13.rs b/src/test/run-pass/task-comm-13.rs index eb8ffc6b8a8..3ded4aac5ae 100644 --- a/src/test/run-pass/task-comm-13.rs +++ b/src/test/run-pass/task-comm-13.rs @@ -12,6 +12,6 @@ fn main() { #debug("Check that we don't deadlock."); let p = comm::port::<int>(); let ch = comm::chan(p); - task::try {|| start(ch, 0, 10) }; + task::try(|| start(ch, 0, 10) ); #debug("Joined task"); } diff --git a/src/test/run-pass/task-comm-14.rs b/src/test/run-pass/task-comm-14.rs index 6efcd8e6eed..ef9c4ae5010 100644 --- a/src/test/run-pass/task-comm-14.rs +++ b/src/test/run-pass/task-comm-14.rs @@ -10,7 +10,7 @@ fn main() { let mut i = 10; while (i > 0) { log(debug, i); - task::spawn {|copy i| child(i, ch); }; + task::spawn(|copy i| child(i, ch) ); i = i - 1; } diff --git a/src/test/run-pass/task-comm-15.rs b/src/test/run-pass/task-comm-15.rs index 952306bebe1..d9291fd6898 100644 --- a/src/test/run-pass/task-comm-15.rs +++ b/src/test/run-pass/task-comm-15.rs @@ -18,6 +18,6 @@ fn main() { // the child's point of view the receiver may die. We should // drop messages on the floor in this case, and not crash! let ch = comm::chan(p); - let child = task::spawn {|| start(ch, 10); }; + let child = task::spawn(|| start(ch, 10) ); let c = comm::recv(p); } diff --git a/src/test/run-pass/task-comm-16.rs b/src/test/run-pass/task-comm-16.rs index 5c7b8c7b282..009661acedb 100644 --- a/src/test/run-pass/task-comm-16.rs +++ b/src/test/run-pass/task-comm-16.rs @@ -25,7 +25,7 @@ fn test_rec() { fn test_vec() { let po = port(); let ch = chan(po); - let v0: [int]/~ = [0, 1, 2]/~; + let v0: ~[int] = ~[0, 1, 2]; send(ch, v0); let v1 = recv(po); assert (v1[0] == 0); diff --git a/src/test/run-pass/task-comm-17.rs b/src/test/run-pass/task-comm-17.rs index 0db319c4c35..307dde20825 100644 --- a/src/test/run-pass/task-comm-17.rs +++ b/src/test/run-pass/task-comm-17.rs @@ -9,5 +9,5 @@ fn f() { } fn main() { - task::spawn {|| f() }; + task::spawn(|| f() ); } \ No newline at end of file diff --git a/src/test/run-pass/task-comm-3.rs b/src/test/run-pass/task-comm-3.rs index 17cd1a1ce93..54a67413b50 100644 --- a/src/test/run-pass/task-comm-3.rs +++ b/src/test/run-pass/task-comm-3.rs @@ -30,11 +30,11 @@ fn test00() { let mut i: int = 0; // Create and spawn tasks... - let mut results = []/~; + let mut results = ~[]; while i < number_of_tasks { let builder = task::builder(); - results += [task::future_result(builder)]/~; - task::run(builder) {|copy i| + results += ~[task::future_result(builder)]; + do task::run(builder) |copy i| { test00_start(ch, i, number_of_messages) } i = i + 1; @@ -42,7 +42,7 @@ fn test00() { // Read from spawned tasks... let mut sum = 0; - for results.each {|r| + for results.each |r| { i = 0; while i < number_of_messages { let value = recv(po); @@ -52,7 +52,7 @@ fn test00() { } // Join spawned tasks... - for results.each {|r| future::get(r); } + for results.each |r| { future::get(r); } #debug("Completed: Final number is: "); log(error, sum); diff --git a/src/test/run-pass/task-comm-7.rs b/src/test/run-pass/task-comm-7.rs index 61ce4009c21..2a983e9e1a1 100644 --- a/src/test/run-pass/task-comm-7.rs +++ b/src/test/run-pass/task-comm-7.rs @@ -16,16 +16,16 @@ fn test00() { let number_of_messages: int = 10; let c = comm::chan(p); - task::spawn {|| + do task::spawn || { test00_start(c, number_of_messages * 0, number_of_messages); } - task::spawn {|| + do task::spawn || { test00_start(c, number_of_messages * 1, number_of_messages); } - task::spawn {|| + do task::spawn || { test00_start(c, number_of_messages * 2, number_of_messages); } - task::spawn {|| + do task::spawn || { test00_start(c, number_of_messages * 3, number_of_messages); } diff --git a/src/test/run-pass/task-comm-9.rs b/src/test/run-pass/task-comm-9.rs index e5c24334734..1523c7cb3ea 100644 --- a/src/test/run-pass/task-comm-9.rs +++ b/src/test/run-pass/task-comm-9.rs @@ -18,7 +18,7 @@ fn test00() { let builder = task::builder(); let r = task::future_result(builder); - task::run(builder) {|| + do task::run(builder) || { test00_start(ch, number_of_messages); } diff --git a/src/test/run-pass/task-comm-chan-cleanup4.rs b/src/test/run-pass/task-comm-chan-cleanup4.rs index 7c32b978ae8..65c6ab79ce8 100644 --- a/src/test/run-pass/task-comm-chan-cleanup4.rs +++ b/src/test/run-pass/task-comm-chan-cleanup4.rs @@ -7,22 +7,22 @@ import task; // results in the string not being freed fn starship(&&ch: comm::chan<str>) { - for int::range(0, 10) { |_i| + for int::range(0, 10) |_i| { comm::send(ch, "pew pew"); } } fn starbase() { - for int::range(0, 10) { |_i| + for int::range(0, 10) |_i| { let p = comm::port(); let c = comm::chan(p); - task::spawn {|| starship(c);}; + task::spawn(|| starship(c) ); task::yield(); } } fn main() { - for int::range(0, 10) { |_i| - task::spawn {|| starbase();}; + for int::range(0, 10) |_i| { + task::spawn(|| starbase() ); } } \ No newline at end of file diff --git a/src/test/run-pass/task-comm.rs b/src/test/run-pass/task-comm.rs index 97c2a8643ad..546148ed386 100644 --- a/src/test/run-pass/task-comm.rs +++ b/src/test/run-pass/task-comm.rs @@ -38,22 +38,22 @@ fn test00() { let mut i: int = 0; - let mut results = []/~; + let mut results = ~[]; while i < number_of_tasks { i = i + 1; let builder = task::builder(); - results += [task::future_result(builder)]/~; - task::run(builder) {|copy i| + results += ~[task::future_result(builder)]; + do task::run(builder) |copy i| { test00_start(ch, i, number_of_messages); } } let mut sum: int = 0; - for results.each {|r| + for results.each |r| { i = 0; while i < number_of_messages { sum += recv(po); i = i + 1; } } - for results.each {|r| future::get(r); } + for results.each |r| { future::get(r); } #debug("Completed: Final number is: "); assert (sum == @@ -89,7 +89,7 @@ fn test04_start() { fn test04() { #debug("Spawning lots of tasks."); let mut i: int = 4; - while i > 0 { i = i - 1; task::spawn {|| test04_start(); }; } + while i > 0 { i = i - 1; task::spawn(|| test04_start() ); } #debug("Finishing up."); } @@ -104,7 +104,7 @@ fn test05_start(ch: chan<int>) { fn test05() { let po = comm::port(); let ch = chan(po); - task::spawn {|| test05_start(ch); }; + task::spawn(|| test05_start(ch) ); let mut value: int; value = recv(po); value = recv(po); @@ -125,18 +125,18 @@ fn test06() { let mut i: int = 0; - let mut results = []/~; + let mut results = ~[]; while i < number_of_tasks { i = i + 1; let builder = task::builder(); - results += [task::future_result(builder)]/~; - task::run(builder) {|copy i| + results += ~[task::future_result(builder)]; + do task::run(builder) |copy i| { test06_start(i); }; } - for results.each {|r| future::get(r); } + for results.each |r| { future::get(r); } } diff --git a/src/test/run-pass/task-killjoin-rsrc.rs b/src/test/run-pass/task-killjoin-rsrc.rs index 6dd5827836e..2f7b275cd73 100644 --- a/src/test/run-pass/task-killjoin-rsrc.rs +++ b/src/test/run-pass/task-killjoin-rsrc.rs @@ -10,11 +10,11 @@ class notify { let ch: comm::chan<bool>; let v: @mut bool; new(ch: comm::chan<bool>, v: @mut bool) { self.ch = ch; self.v = v; } drop { - #error["notify: task=%? v=%x unwinding=%b b=%b", + #error~["notify: task=%? v=%x unwinding=%b b=%b", task::get_task(), ptr::addr_of(*(self.v)) as uint, task::failing(), - *(self.v)]/~; + *(self.v)]; let b = *(self.v); comm::send(self.ch, b); } @@ -24,9 +24,9 @@ fn joinable(f: fn~()) -> comm::port<bool> { fn wrapper(+pair: (comm::chan<bool>, fn())) { let (c, f) = pair; let b = @mut false; - #error["wrapper: task=%? allocated v=%x", + #error~["wrapper: task=%? allocated v=%x", task::get_task(), - ptr::addr_of(*b) as uint]/~; + ptr::addr_of(*b) as uint]; let _r = notify(c, b); f(); *b = true; diff --git a/src/test/run-pass/task-killjoin.rs b/src/test/run-pass/task-killjoin.rs index 77d6bc9e565..642e501de31 100644 --- a/src/test/run-pass/task-killjoin.rs +++ b/src/test/run-pass/task-killjoin.rs @@ -20,13 +20,13 @@ fn supervisor() { // Unsupervise this task so the process doesn't return a failure status as // a result of the main task being killed. let f = supervised; - task::try {|| supervised() }; + task::try(|| supervised() ); } fn main() { let builder = task::builder(); task::unsupervise(builder); - task::run(builder) {|| supervisor(); } + task::run(builder, || supervisor() ) } // Local Variables: diff --git a/src/test/run-pass/task-life-0.rs b/src/test/run-pass/task-life-0.rs index 1aead283758..2232bff1bc7 100644 --- a/src/test/run-pass/task-life-0.rs +++ b/src/test/run-pass/task-life-0.rs @@ -1,7 +1,7 @@ use std; import task; fn main() { - task::spawn {|| child("Hello"); }; + task::spawn(|| child("Hello") ); } fn child(&&s: str) { diff --git a/src/test/run-pass/terminate-in-initializer.rs b/src/test/run-pass/terminate-in-initializer.rs index aee8b9ab641..f491ad97d68 100644 --- a/src/test/run-pass/terminate-in-initializer.rs +++ b/src/test/run-pass/terminate-in-initializer.rs @@ -12,13 +12,13 @@ fn test_ret() { let x: @int = ret; } fn test_fail() { fn f() { let x: @int = fail; } - task::try {|| f() }; + task::try(|| f() ); } fn test_fail_indirect() { fn f() -> ! { fail; } fn g() { let x: @int = f(); } - task::try {|| g() }; + task::try(|| g() ); } fn main() { diff --git a/src/test/run-pass/test-ignore-cfg.rs b/src/test/run-pass/test-ignore-cfg.rs index 1ad69e9a897..0aad5bd13d3 100644 --- a/src/test/run-pass/test-ignore-cfg.rs +++ b/src/test/run-pass/test-ignore-cfg.rs @@ -22,10 +22,10 @@ fn checktests() { let tests = __test::tests(); let shouldignore = option::get( - vec::find(tests, {|t| t.name == "shouldignore"})); + vec::find(tests, |t| t.name == "shouldignore" )); assert shouldignore.ignore == true; let shouldnotignore = option::get( - vec::find(tests, {|t| t.name == "shouldnotignore"})); + vec::find(tests, |t| t.name == "shouldnotignore" )); assert shouldnotignore.ignore == false; } \ No newline at end of file diff --git a/src/test/run-pass/threads.rs b/src/test/run-pass/threads.rs index 358074fecf8..d30420aa26e 100644 --- a/src/test/run-pass/threads.rs +++ b/src/test/run-pass/threads.rs @@ -5,7 +5,7 @@ import task; fn main() { let mut i = 10; - while i > 0 { task::spawn {|copy i| child(i); }; i = i - 1; } + while i > 0 { task::spawn(|copy i| child(i) ); i = i - 1; } #debug("main thread exiting"); } diff --git a/src/test/run-pass/too-much-recursion.rs b/src/test/run-pass/too-much-recursion.rs index 3dd2ebb3859..b09c3e11498 100644 --- a/src/test/run-pass/too-much-recursion.rs +++ b/src/test/run-pass/too-much-recursion.rs @@ -7,7 +7,7 @@ fn main() { let builder = task::builder(); task::unsupervise(builder); - task::run(builder) {|| + do task::run(builder) || { fn f() { f() }; f(); }; diff --git a/src/test/run-pass/type-param.rs b/src/test/run-pass/type-param.rs index 68be7c56d4b..4d5f4a2fd17 100644 --- a/src/test/run-pass/type-param.rs +++ b/src/test/run-pass/type-param.rs @@ -2,4 +2,4 @@ type lteq<T> = native fn(T) -> bool; -fn main(args: [str]/~) { } +fn main(args: ~[str]) { } diff --git a/src/test/run-pass/type-params-in-for-each.rs b/src/test/run-pass/type-params-in-for-each.rs index 9173e258003..5400faa1df5 100644 --- a/src/test/run-pass/type-params-in-for-each.rs +++ b/src/test/run-pass/type-params-in-for-each.rs @@ -5,8 +5,8 @@ fn range(lo: uint, hi: uint, it: fn(uint)) { while lo_ < hi { it(lo_); lo_ += 1u; } } -fn create_index<T>(index: [{a: T, b: uint}]/~, hash_fn: native fn(T) -> uint) { - range(0u, 256u) {|_i| let bucket: [T]/~ = []/~; } +fn create_index<T>(index: ~[{a: T, b: uint}], hash_fn: native fn(T) -> uint) { + range(0u, 256u, |_i| { let bucket: ~[T] = ~[]; } ) } fn main() { } diff --git a/src/test/run-pass/type-ptr.rs b/src/test/run-pass/type-ptr.rs index 9569954ebe6..f469ebd3dd0 100644 --- a/src/test/run-pass/type-ptr.rs +++ b/src/test/run-pass/type-ptr.rs @@ -2,4 +2,4 @@ fn f(a: *int) -> *int { ret a; } fn g(a: *int) -> *int { let b = f(a); ret b; } -fn main(args: [str]/~) { ret; } +fn main(args: ~[str]) { ret; } diff --git a/src/test/run-pass/unique-assign-generic.rs b/src/test/run-pass/unique-assign-generic.rs index 230aa21cb90..8addb1acaf2 100644 --- a/src/test/run-pass/unique-assign-generic.rs +++ b/src/test/run-pass/unique-assign-generic.rs @@ -6,6 +6,6 @@ fn f<T: copy>(t: T) -> T { fn main() { let t = f(~100); assert t == ~100; - let t = f(~@[100]/~); - assert t == ~@[100]/~; + let t = f(~@~[100]); + assert t == ~@~[100]; } diff --git a/src/test/run-pass/unique-create.rs b/src/test/run-pass/unique-create.rs index 24714a94873..b48f39231a2 100644 --- a/src/test/run-pass/unique-create.rs +++ b/src/test/run-pass/unique-create.rs @@ -3,5 +3,5 @@ fn main() { } fn vec() { - [0]/~; + ~[0]; } \ No newline at end of file diff --git a/src/test/run-pass/unique-in-vec-copy.rs b/src/test/run-pass/unique-in-vec-copy.rs index ed9ffd26ae8..f16e4b5e412 100644 --- a/src/test/run-pass/unique-in-vec-copy.rs +++ b/src/test/run-pass/unique-in-vec-copy.rs @@ -1,5 +1,5 @@ fn main() { - let a = [~mut 10]/~; + let a = ~[~mut 10]; let b = a; assert *a[0] == 10; diff --git a/src/test/run-pass/unique-in-vec.rs b/src/test/run-pass/unique-in-vec.rs index 62391debea2..f2fdeb5a5fa 100644 --- a/src/test/run-pass/unique-in-vec.rs +++ b/src/test/run-pass/unique-in-vec.rs @@ -1,3 +1,3 @@ fn main() { - assert ([~100]/~)[0] == ~100; + assert (~[~100])[0] == ~100; } diff --git a/src/test/run-pass/unique-send-2.rs b/src/test/run-pass/unique-send-2.rs index 9fce7de19c4..7169993f664 100644 --- a/src/test/run-pass/unique-send-2.rs +++ b/src/test/run-pass/unique-send-2.rs @@ -12,13 +12,13 @@ fn main() { let ch = comm::chan(p); let n = 100u; let mut expected = 0u; - for uint::range(0u, n) {|i| - task::spawn {|| child(ch, i); }; + for uint::range(0u, n) |i| { + task::spawn(|| child(ch, i) ); expected += i; } let mut actual = 0u; - for uint::range(0u, n) {|_i| + for uint::range(0u, n) |_i| { let j = comm::recv(p); actual += *j; } diff --git a/src/test/run-pass/unwind-box.rs b/src/test/run-pass/unwind-box.rs index 5634b818794..6dc78f9314f 100644 --- a/src/test/run-pass/unwind-box.rs +++ b/src/test/run-pass/unwind-box.rs @@ -10,5 +10,5 @@ fn f() { fn main() { let builder = task::builder(); task::unsupervise(builder); - task::run(builder) {|| f(); } + task::run(builder, || f() ); } \ No newline at end of file diff --git a/src/test/run-pass/unwind-resource.rs b/src/test/run-pass/unwind-resource.rs index 7e09b5cca99..6bbb38f5031 100644 --- a/src/test/run-pass/unwind-resource.rs +++ b/src/test/run-pass/unwind-resource.rs @@ -23,7 +23,7 @@ fn main() { let c = comm::chan(p); let builder = task::builder(); task::unsupervise(builder); - task::run(builder) {|| f(c); } + task::run(builder, || f(c) ); #error("hiiiiiiiii"); assert comm::recv(p); } \ No newline at end of file diff --git a/src/test/run-pass/unwind-resource2.rs b/src/test/run-pass/unwind-resource2.rs index 20120cd6ddd..35255e94fb2 100644 --- a/src/test/run-pass/unwind-resource2.rs +++ b/src/test/run-pass/unwind-resource2.rs @@ -17,5 +17,5 @@ fn f() { fn main() { let builder = task::builder(); task::unsupervise(builder); - task::run(builder) {|| f(); } + task::run(builder, || f() ); } \ No newline at end of file diff --git a/src/test/run-pass/unwind-unique.rs b/src/test/run-pass/unwind-unique.rs index 2b733dcf9c1..5f7fbf3d41a 100644 --- a/src/test/run-pass/unwind-unique.rs +++ b/src/test/run-pass/unwind-unique.rs @@ -10,5 +10,5 @@ fn f() { fn main() { let builder = task::builder(); task::unsupervise(builder); - task::run(builder) {|| f(); } + task::run(builder, || f() ); } \ No newline at end of file diff --git a/src/test/run-pass/utf8.rs b/src/test/run-pass/utf8.rs index 2ba0f47eee4..c18133f7051 100644 --- a/src/test/run-pass/utf8.rs +++ b/src/test/run-pass/utf8.rs @@ -31,7 +31,7 @@ fn main() { fn check_str_eq(a: str, b: str) { let mut i: int = 0; - for str::each(a) {|ab| + for str::each(a) |ab| { log(debug, i); log(debug, ab); let bb: u8 = b[i]; diff --git a/src/test/run-pass/utf8_chars.rs b/src/test/run-pass/utf8_chars.rs index 99c6a0cc510..ad7bc8fe2dc 100644 --- a/src/test/run-pass/utf8_chars.rs +++ b/src/test/run-pass/utf8_chars.rs @@ -4,7 +4,7 @@ import vec; fn main() { // Chars of 1, 2, 3, and 4 bytes - let chs: [char]/~ = ['e', 'é', '€', 0x10000 as char]/~; + let chs: ~[char] = ~['e', 'é', '€', 0x10000 as char]; let s: str = str::from_chars(chs); assert (str::len(s) == 10u); @@ -15,9 +15,9 @@ fn main() { assert (str::char_at(s, 1u) == 'é'); assert (str::is_utf8(str::bytes(s))); - assert (!str::is_utf8([0x80_u8]/~)); - assert (!str::is_utf8([0xc0_u8]/~)); - assert (!str::is_utf8([0xc0_u8, 0x10_u8]/~)); + assert (!str::is_utf8(~[0x80_u8])); + assert (!str::is_utf8(~[0xc0_u8])); + assert (!str::is_utf8(~[0xc0_u8, 0x10_u8])); let mut stack = "a×c€"; assert (str::pop_char(stack) == '€'); diff --git a/src/test/run-pass/vec-concat.rs b/src/test/run-pass/vec-concat.rs index 6aa5f5ef424..9c2209ad243 100644 --- a/src/test/run-pass/vec-concat.rs +++ b/src/test/run-pass/vec-concat.rs @@ -1,8 +1,8 @@ // -*- rust -*- fn main() { - let a: [int]/~ = [1, 2, 3, 4, 5]/~; - let b: [int]/~ = [6, 7, 8, 9, 0]/~; - let v: [int]/~ = a + b; + let a: ~[int] = ~[1, 2, 3, 4, 5]; + let b: ~[int] = ~[6, 7, 8, 9, 0]; + let v: ~[int] = a + b; log(debug, v[9]); assert (v[0] == 1); assert (v[7] == 8); diff --git a/src/test/run-pass/vec-drop.rs b/src/test/run-pass/vec-drop.rs index 9b75d0c2155..ce6070bbabd 100644 --- a/src/test/run-pass/vec-drop.rs +++ b/src/test/run-pass/vec-drop.rs @@ -3,6 +3,6 @@ fn main() { // This just tests whether the vec leaks its members. - let pvec: [@{x: int, y: int}]/~ = - [@{x: 1, y: 2}, @{x: 3, y: 4}, @{x: 5, y: 6}]/~; + let pvec: ~[@{x: int, y: int}] = + ~[@{x: 1, y: 2}, @{x: 3, y: 4}, @{x: 5, y: 6}]; } diff --git a/src/test/run-pass/vec-growth.rs b/src/test/run-pass/vec-growth.rs index b6c4aa3659f..baa7552f383 100644 --- a/src/test/run-pass/vec-growth.rs +++ b/src/test/run-pass/vec-growth.rs @@ -1,11 +1,11 @@ fn main() { - let mut v = [1]/~; - v += [2]/~; - v += [3]/~; - v += [4]/~; - v += [5]/~; + let mut v = ~[1]; + v += ~[2]; + v += ~[3]; + v += ~[4]; + v += ~[5]; assert (v[0] == 1); assert (v[1] == 2); assert (v[2] == 3); diff --git a/src/test/run-pass/vec-ivec-deadlock.rs b/src/test/run-pass/vec-ivec-deadlock.rs index 4fb7eb8f7f5..5609a76231b 100644 --- a/src/test/run-pass/vec-ivec-deadlock.rs +++ b/src/test/run-pass/vec-ivec-deadlock.rs @@ -1 +1 @@ -fn main() { let a = [1, 2, 3, 4, 5]/~; let mut b = [a, a]/~; b += b; } +fn main() { let a = ~[1, 2, 3, 4, 5]; let mut b = ~[a, a]; b += b; } diff --git a/src/test/run-pass/vec-late-init.rs b/src/test/run-pass/vec-late-init.rs index 6f5efa0bd01..cb0895e46e8 100644 --- a/src/test/run-pass/vec-late-init.rs +++ b/src/test/run-pass/vec-late-init.rs @@ -1,7 +1,7 @@ fn main() { - let mut later: [int]/~; - if true { later = [1]/~; } else { later = [2]/~; } + let mut later: ~[int]; + if true { later = ~[1]; } else { later = ~[2]; } log(debug, later[0]); } diff --git a/src/test/run-pass/vec-push.rs b/src/test/run-pass/vec-push.rs index 2505b7141b1..3e47f7e0a3b 100644 --- a/src/test/run-pass/vec-push.rs +++ b/src/test/run-pass/vec-push.rs @@ -1 +1 @@ -fn main() { let mut v = [1, 2, 3]/~; vec::push(v, 1); } +fn main() { let mut v = ~[1, 2, 3]; vec::push(v, 1); } diff --git a/src/test/run-pass/vec-self-append.rs b/src/test/run-pass/vec-self-append.rs index c897a327a5e..aecf51ec210 100644 --- a/src/test/run-pass/vec-self-append.rs +++ b/src/test/run-pass/vec-self-append.rs @@ -3,7 +3,7 @@ import vec; fn test_heap_to_heap() { // a spills onto the heap - let mut a = [0, 1, 2, 3, 4]/~; + let mut a = ~[0, 1, 2, 3, 4]; a += a; assert (vec::len(a) == 10u); assert (a[0] == 0); @@ -20,7 +20,7 @@ fn test_heap_to_heap() { fn test_stack_to_heap() { // a is entirely on the stack - let mut a = [0, 1, 2]/~; + let mut a = ~[0, 1, 2]; // a spills to the heap a += a; assert (vec::len(a) == 6u); @@ -34,7 +34,7 @@ fn test_stack_to_heap() { fn test_loop() { // Make sure we properly handle repeated self-appends. - let mut a: [int]/~ = [0]/~; + let mut a: ~[int] = ~[0]; let mut i = 20; let mut expected_len = 1u; while i > 0 { diff --git a/src/test/run-pass/vec-slice-drop.rs b/src/test/run-pass/vec-slice-drop.rs index e8709d8f108..2c5225fa0f9 100644 --- a/src/test/run-pass/vec-slice-drop.rs +++ b/src/test/run-pass/vec-slice-drop.rs @@ -8,7 +8,7 @@ class foo { fn main() { let x = @mut 0; { - let l = [foo(x)]/&; + let l = &[foo(x)]; assert *l[0].x == 0; } assert *x == 1; diff --git a/src/test/run-pass/vec-slice.rs b/src/test/run-pass/vec-slice.rs index e1961d59b39..11983fb5420 100644 --- a/src/test/run-pass/vec-slice.rs +++ b/src/test/run-pass/vec-slice.rs @@ -1,5 +1,5 @@ fn main() { - let v = [1,2,3,4,5]/~; + let v = ~[1,2,3,4,5]; let v2 = vec::slice(v, 1, 3); assert (v2[0] == 2); assert (v2[1] == 3); diff --git a/src/test/run-pass/vec-trailing-comma.rs b/src/test/run-pass/vec-trailing-comma.rs index b0dac0abd7e..344c5f7065a 100644 --- a/src/test/run-pass/vec-trailing-comma.rs +++ b/src/test/run-pass/vec-trailing-comma.rs @@ -1,10 +1,10 @@ // Issue #2482. fn main() { - let v1: [int]/~ = [10, 20, 30,]/~; - let v2: [int]/~ = [10, 20, 30]/~; + let v1: ~[int] = ~[10, 20, 30,]; + let v2: ~[int] = ~[10, 20, 30]; assert (v1[2] == v2[2]); - let v3: [int]/~ = [10,]/~; - let v4: [int]/~ = [10]/~; + let v3: ~[int] = ~[10,]; + let v4: ~[int] = ~[10]; assert (v3[0] == v4[0]); } diff --git a/src/test/run-pass/vec.rs b/src/test/run-pass/vec.rs index cc70cd8cf8a..e81176cf398 100644 --- a/src/test/run-pass/vec.rs +++ b/src/test/run-pass/vec.rs @@ -3,7 +3,7 @@ // -*- rust -*- fn main() { - let v: [int]/~ = [10, 20]/~; + let v: ~[int] = ~[10, 20]; assert (v[0] == 10); assert (v[1] == 20); let mut x: int = 0; diff --git a/src/test/run-pass/vector-no-ann-2.rs b/src/test/run-pass/vector-no-ann-2.rs index 7e0e614c0df..79f087dd0ec 100644 --- a/src/test/run-pass/vector-no-ann-2.rs +++ b/src/test/run-pass/vector-no-ann-2.rs @@ -1 +1 @@ -fn main() { let quux: @[uint]/~ = @[]/~; } +fn main() { let quux: @~[uint] = @~[]; } diff --git a/src/test/run-pass/while-with-break.rs b/src/test/run-pass/while-with-break.rs index 68c0779657e..8150dc7968f 100644 --- a/src/test/run-pass/while-with-break.rs +++ b/src/test/run-pass/while-with-break.rs @@ -8,8 +8,8 @@ fn main() { log(debug, i); i = i + 1; if i == 95 { - let v: [int]/~ = - [1, 2, 3, 4, 5]/~; // we check that it is freed by break + let v: ~[int] = + ~[1, 2, 3, 4, 5]; // we check that it is freed by break #debug("breaking"); break; diff --git a/src/test/run-pass/yield.rs b/src/test/run-pass/yield.rs index 041d8470a6f..5578ca251cc 100644 --- a/src/test/run-pass/yield.rs +++ b/src/test/run-pass/yield.rs @@ -6,7 +6,7 @@ import task::*; fn main() { let builder = task::builder(); let result = task::future_result(builder); - task::run(builder) {|| child(); } + task::run(builder, || child() ); #error("1"); yield(); #error("2"); diff --git a/src/test/run-pass/yield1.rs b/src/test/run-pass/yield1.rs index 6218baa29bc..b0a4458b792 100644 --- a/src/test/run-pass/yield1.rs +++ b/src/test/run-pass/yield1.rs @@ -6,7 +6,7 @@ import task::*; fn main() { let builder = task::builder(); let result = task::future_result(builder); - task::run(builder) {|| child(); } + task::run(builder, || child() ); #error("1"); yield(); future::get(result); diff --git a/src/test/run-pass/zip-same-length.rs b/src/test/run-pass/zip-same-length.rs index 4c5c3343512..13a50348469 100644 --- a/src/test/run-pass/zip-same-length.rs +++ b/src/test/run-pass/zip-same-length.rs @@ -5,18 +5,18 @@ import uint; import u8; import vec::{head, is_not_empty, last, same_length, zip}; -fn enum_chars(start: u8, end: u8) -> [char]/~ { +fn enum_chars(start: u8, end: u8) -> ~[char] { assert start < end; let mut i = start; - let mut r = []/~; + let mut r = ~[]; while i <= end { vec::push(r, i as char); i += 1u as u8; } ret r; } -fn enum_uints(start: uint, end: uint) -> [uint]/~ { +fn enum_uints(start: uint, end: uint) -> ~[uint] { assert start < end; let mut i = start; - let mut r = []/~; + let mut r = ~[]; while i <= end { vec::push(r, i); i += 1u; } ret r; } |
