about summary refs log tree commit diff
path: root/src/comp
diff options
context:
space:
mode:
authorMarijn Haverbeke <marijnh@gmail.com>2012-02-17 13:17:40 +0100
committerMarijn Haverbeke <marijnh@gmail.com>2012-02-17 23:03:12 +0100
commitff429645461c83dca048d8a7088c04ee15cc96c8 (patch)
treeb0e7f4ff8ff8becd67aa3f90fcdbf7b5cb029a18 /src/comp
parent1c1261bcb86841cc5fdedd9db0f0ced8a178cb4d (diff)
downloadrust-ff429645461c83dca048d8a7088c04ee15cc96c8.tar.gz
rust-ff429645461c83dca048d8a7088c04ee15cc96c8.zip
Clean up some of trans using block combinators
`with_scope` and `with_cond` can be used to wrap a piece of code in a
scope block, or conditionalize it on a value, without doing all the
context-creation and jumping by hand.

Also renames @block_ctxt to block to reduce noise.
Diffstat (limited to 'src/comp')
-rw-r--r--src/comp/middle/debuginfo.rs8
-rw-r--r--src/comp/middle/trans/alt.rs206
-rw-r--r--src/comp/middle/trans/base.rs933
-rw-r--r--src/comp/middle/trans/build.rs198
-rw-r--r--src/comp/middle/trans/closure.rs80
-rw-r--r--src/comp/middle/trans/common.rs68
-rw-r--r--src/comp/middle/trans/impl.rs30
-rw-r--r--src/comp/middle/trans/native.rs30
-rw-r--r--src/comp/middle/trans/shape.rs20
-rw-r--r--src/comp/middle/trans/tvec.rs76
-rw-r--r--src/comp/middle/trans/uniq.rs46
11 files changed, 791 insertions, 904 deletions
diff --git a/src/comp/middle/debuginfo.rs b/src/comp/middle/debuginfo.rs
index 1349a43e7e0..efd9203911c 100644
--- a/src/comp/middle/debuginfo.rs
+++ b/src/comp/middle/debuginfo.rs
@@ -224,7 +224,7 @@ fn line_from_span(cm: codemap::codemap, sp: span) -> uint {
     codemap::lookup_char_pos(cm, sp.lo).line
 }
 
-fn create_block(cx: @block_ctxt) -> @metadata<block_md> {
+fn create_block(cx: block) -> @metadata<block_md> {
     let cache = get_cache(bcx_ccx(cx));
     let cx = cx;
     while option::is_none(cx.block_span) {
@@ -677,7 +677,7 @@ fn create_var(type_tag: int, context: ValueRef, name: str, file: ValueRef,
     ret llmdnode(lldata);
 }
 
-fn create_local_var(bcx: @block_ctxt, local: @ast::local)
+fn create_local_var(bcx: block, local: @ast::local)
     -> @metadata<local_var_md> unsafe {
     let cx = bcx_ccx(bcx);
     let cache = get_cache(cx);
@@ -728,7 +728,7 @@ fn create_local_var(bcx: @block_ctxt, local: @ast::local)
     ret mdval;
 }
 
-fn create_arg(bcx: @block_ctxt, arg: ast::arg, sp: span)
+fn create_arg(bcx: block, arg: ast::arg, sp: span)
     -> @metadata<argument_md> unsafe {
     let fcx = bcx_fcx(bcx);
     let cx = fcx_ccx(fcx);
@@ -763,7 +763,7 @@ fn create_arg(bcx: @block_ctxt, arg: ast::arg, sp: span)
     ret mdval;
 }
 
-fn update_source_pos(cx: @block_ctxt, s: span) {
+fn update_source_pos(cx: block, s: span) {
     if !bcx_ccx(cx).sess.opts.debuginfo {
         ret;
     }
diff --git a/src/comp/middle/trans/alt.rs b/src/comp/middle/trans/alt.rs
index 158db860794..38cd5e83af0 100644
--- a/src/comp/middle/trans/alt.rs
+++ b/src/comp/middle/trans/alt.rs
@@ -3,8 +3,7 @@ import lib::llvm::llvm;
 import lib::llvm::{ValueRef, BasicBlockRef};
 import pat_util::*;
 import build::*;
-import base::{new_sub_block_ctxt, new_scope_block_ctxt,
-              new_real_block_ctxt, load_if_immediate};
+import base::*;
 import syntax::ast;
 import syntax::ast_util;
 import syntax::ast_util::{dummy_sp};
@@ -38,28 +37,28 @@ enum opt_result {
     single_result(result),
     range_result(result, result),
 }
-fn trans_opt(bcx: @block_ctxt, o: opt) -> opt_result {
+fn trans_opt(bcx: block, o: opt) -> opt_result {
     let ccx = bcx_ccx(bcx), bcx = bcx;
     alt o {
       lit(l) {
         alt l.node {
           ast::expr_lit(@{node: ast::lit_str(s), _}) {
             let strty = ty::mk_str(bcx_tcx(bcx));
-            let cell = base::empty_dest_cell();
-            bcx = tvec::trans_str(bcx, s, base::by_val(cell));
+            let cell = empty_dest_cell();
+            bcx = tvec::trans_str(bcx, s, by_val(cell));
             add_clean_temp(bcx, *cell, strty);
             ret single_result(rslt(bcx, *cell));
           }
           _ {
             ret single_result(
-                rslt(bcx, base::trans_const_expr(ccx, l)));
+                rslt(bcx, trans_const_expr(ccx, l)));
           }
         }
       }
       var(disr_val, _) { ret single_result(rslt(bcx, C_int(ccx, disr_val))); }
       range(l1, l2) {
-        ret range_result(rslt(bcx, base::trans_const_expr(ccx, l1)),
-                         rslt(bcx, base::trans_const_expr(ccx, l2)));
+        ret range_result(rslt(bcx, trans_const_expr(ccx, l1)),
+                         rslt(bcx, trans_const_expr(ccx, l2)));
       }
     }
 }
@@ -259,9 +258,9 @@ fn get_options(ccx: @crate_ctxt, m: match, col: uint) -> [opt] {
     ret found;
 }
 
-fn extract_variant_args(bcx: @block_ctxt, pat_id: ast::node_id,
+fn extract_variant_args(bcx: block, pat_id: ast::node_id,
                         vdefs: {enm: def_id, var: def_id}, val: ValueRef) ->
-   {vals: [ValueRef], bcx: @block_ctxt} {
+   {vals: [ValueRef], bcx: block} {
     let ccx = bcx.fcx.ccx, bcx = bcx;
     // invariant:
     // pat_id must have the same length ty_param_substs as vdefs?
@@ -285,7 +284,7 @@ fn extract_variant_args(bcx: @block_ctxt, pat_id: ast::node_id,
             // invariant needed:
             // how do we know it even makes sense to pass in ty_param_substs
             // here? What if it's [] and the enum type has variables in it?
-            base::GEP_enum(bcx, blobptr, vdefs_tg, vdefs_var,
+            GEP_enum(bcx, blobptr, vdefs_tg, vdefs_var,
                             ty_param_substs, i);
         bcx = r.bcx;
         args += [r.val];
@@ -363,7 +362,7 @@ fn pick_col(m: match) -> uint {
     ret best_col;
 }
 
-fn compile_submatch(bcx: @block_ctxt, m: match, vals: [ValueRef], f: mk_fail,
+fn compile_submatch(bcx: block, m: match, vals: [ValueRef], f: mk_fail,
                     &exits: [exit_node]) {
     let bcx = bcx;
     if m.len() == 0u { Br(bcx, f()); ret; }
@@ -371,23 +370,19 @@ fn compile_submatch(bcx: @block_ctxt, m: match, vals: [ValueRef], f: mk_fail,
         let data = m[0].data;
         alt data.guard {
           some(e) {
-            let guard_cx = new_scope_block_ctxt(bcx, "submatch_guard");
-            Br(bcx, guard_cx.llbb);
-            // Temporarily set bindings. They'll be rewritten to PHI nodes for
-            // the actual arm block.
+            // Temporarily set bindings. They'll be rewritten to PHI nodes
+            // for the actual arm block.
             data.id_map.items {|key, val|
-                let local = local_mem(option::get(assoc(key, m[0].bound)));
-                bcx.fcx.lllocals.insert(val, local);
+                let loc = local_mem(option::get(assoc(key, m[0].bound)));
+                bcx.fcx.lllocals.insert(val, loc);
+            };
+            let {bcx: guard_cx, val} = with_scope_result(bcx, "guard") {|bcx|
+                trans_temp_expr(bcx, e)
+            };
+            bcx = with_cond(guard_cx, Not(guard_cx, val)) {|bcx|
+                compile_submatch(bcx, vec::tail(m), vals, f, exits);
+                bcx
             };
-            let {bcx: guard_bcx, val: guard_val} =
-                base::trans_temp_expr(guard_cx, e);
-            guard_bcx = base::trans_block_cleanups(guard_bcx, guard_cx);
-            let next_cx = new_sub_block_ctxt(guard_cx, "submatch_next");
-            let else_cx = new_sub_block_ctxt(guard_cx, "submatch_else");
-            CondBr(guard_bcx, guard_val, next_cx.llbb, else_cx.llbb);
-            compile_submatch(else_cx, vec::slice(m, 1u, m.len()), vals, f,
-                             exits);
-            bcx = next_cx;
           }
           _ { }
         }
@@ -425,7 +420,7 @@ fn compile_submatch(bcx: @block_ctxt, m: match, vals: [ValueRef], f: mk_fail,
         let rec_vals = [];
         for field_name: ast::ident in rec_fields {
             let ix = option::get(ty::field_idx(field_name, fields));
-            let r = base::GEP_tup_like(bcx, rec_ty, val, [0, ix as int]);
+            let r = GEP_tup_like(bcx, rec_ty, val, [0, ix as int]);
             rec_vals += [r.val];
             bcx = r.bcx;
         }
@@ -442,7 +437,7 @@ fn compile_submatch(bcx: @block_ctxt, m: match, vals: [ValueRef], f: mk_fail,
         };
         let tup_vals = [], i = 0u;
         while i < n_tup_elts {
-            let r = base::GEP_tup_like(bcx, tup_ty, val, [0, i as int]);
+            let r = GEP_tup_like(bcx, tup_ty, val, [0, i as int]);
             tup_vals += [r.val];
             bcx = r.bcx;
             i += 1u;
@@ -507,7 +502,7 @@ fn compile_submatch(bcx: @block_ctxt, m: match, vals: [ValueRef], f: mk_fail,
     let else_cx =
         alt kind {
           no_branch | single { bcx }
-          _ { new_sub_block_ctxt(bcx, "match_else") }
+          _ { sub_block(bcx, "match_else") }
         };
     let sw;
     if kind == switch {
@@ -521,7 +516,7 @@ fn compile_submatch(bcx: @block_ctxt, m: match, vals: [ValueRef], f: mk_fail,
 
      // Compile subtrees for each option
     for opt: opt in opts {
-        let opt_cx = new_sub_block_ctxt(bcx, "match_case");
+        let opt_cx = sub_block(bcx, "match_case");
         alt kind {
           single { Br(bcx, opt_cx.llbb); }
           switch {
@@ -536,35 +531,24 @@ fn compile_submatch(bcx: @block_ctxt, m: match, vals: [ValueRef], f: mk_fail,
             }
           }
           compare {
-            let compare_cx = new_scope_block_ctxt(bcx, "compare_scope");
-            Br(bcx, compare_cx.llbb);
-            bcx = compare_cx;
             let t = node_id_type(bcx, pat_id);
-            let res = trans_opt(bcx, opt);
-            alt res {
-              single_result(r) {
-                bcx = r.bcx;
-                let eq =
-                    base::trans_compare(bcx, ast::eq, test_val, t, r.val, t);
-                let cleanup_cx = base::trans_block_cleanups(
-                    eq.bcx, compare_cx);
-                bcx = new_sub_block_ctxt(bcx, "compare_next");
-                CondBr(cleanup_cx, eq.val, opt_cx.llbb, bcx.llbb);
-              }
-              range_result(rbegin, rend) {
-                bcx = rend.bcx;
-                let ge = base::trans_compare(bcx, ast::ge, test_val, t,
-                                              rbegin.val, t);
-                let le = base::trans_compare(ge.bcx, ast::le, test_val, t,
-                                              rend.val, t);
-                let in_range = rslt(le.bcx, And(le.bcx, ge.val, le.val));
-                bcx = in_range.bcx;
-                let cleanup_cx =
-                    base::trans_block_cleanups(bcx, compare_cx);
-                bcx = new_sub_block_ctxt(bcx, "compare_next");
-                CondBr(cleanup_cx, in_range.val, opt_cx.llbb, bcx.llbb);
-              }
-            }
+            let {bcx: after_cx, val: matches} =
+                with_scope_result(bcx, "compare_scope") {|bcx|
+                alt trans_opt(bcx, opt) {
+                  single_result({bcx, val}) {
+                    trans_compare(bcx, ast::eq, test_val, t, val, t)
+                  }
+                  range_result({val: vbegin, _}, {bcx, val: vend}) {
+                    let {bcx, val: ge} = trans_compare(bcx, ast::ge, test_val,
+                                                       t, vbegin, t);
+                    let {bcx, val: le} = trans_compare(bcx, ast::le, test_val,
+                                                       t, vend, t);
+                    {bcx: bcx, val: And(bcx, ge, le)}
+                  }
+                }
+            };
+            bcx = sub_block(after_cx, "compare_next");
+            CondBr(after_cx, matches, opt_cx.llbb, bcx.llbb);
           }
           _ { }
         }
@@ -592,7 +576,7 @@ fn compile_submatch(bcx: @block_ctxt, m: match, vals: [ValueRef], f: mk_fail,
 }
 
 // Returns false for unreachable blocks
-fn make_phi_bindings(bcx: @block_ctxt, map: [exit_node],
+fn make_phi_bindings(bcx: block, map: [exit_node],
                      ids: pat_util::pat_id_map) -> bool {
     let our_block = bcx.llbb as uint;
     let success = true, bcx = bcx;
@@ -623,8 +607,8 @@ fn make_phi_bindings(bcx: @block_ctxt, map: [exit_node],
                         make_phi_bindings"); }
                 };
                 let e_ty = node_id_type(bcx, node_id);
-                let {bcx: abcx, val: alloc} = base::alloc_ty(bcx, e_ty);
-                bcx = base::copy_val(abcx, base::INIT, alloc,
+                let {bcx: abcx, val: alloc} = alloc_ty(bcx, e_ty);
+                bcx = copy_val(abcx, INIT, alloc,
                                       load_if_immediate(abcx, local, e_ty),
                                       e_ty);
                 add_clean(bcx, alloc, e_ty);
@@ -637,76 +621,72 @@ fn make_phi_bindings(bcx: @block_ctxt, map: [exit_node],
     ret success;
 }
 
-fn trans_alt(cx: @block_ctxt, expr: @ast::expr, arms_: [ast::arm],
-             dest: base::dest) -> @block_ctxt {
-    let bodies = [];
-    let match: match = [];
-    let alt_cx = new_scope_block_ctxt(cx, "alt");
-    Br(cx, alt_cx.llbb);
-
-    let er = base::trans_temp_expr(alt_cx, expr);
-    if er.bcx.unreachable { ret er.bcx; }
-
-    /*
-      n.b. nothing else in this module should need to normalize,
-      b/c of this call
-     */
-    let arms = normalize_arms(bcx_tcx(cx), arms_);
-
-    for a: ast::arm in arms {
-        let body = new_real_block_ctxt(er.bcx, "case_body",
-                                       a.body.span);
-        let id_map = pat_util::pat_id_map(bcx_tcx(cx), a.pats[0]);
+fn trans_alt(bcx: block, expr: @ast::expr, arms: [ast::arm],
+             dest: dest) -> block {
+    with_scope(bcx, "alt") {|bcx| trans_alt_inner(bcx, expr, arms, dest)}
+}
+
+fn trans_alt_inner(scope_cx: block, expr: @ast::expr, arms: [ast::arm],
+                   dest: dest) -> block {
+    let bcx = scope_cx, tcx = bcx_tcx(bcx);
+    let bodies = [], match = [];
+
+    let {bcx, val, _} = trans_temp_expr(bcx, expr);
+    if bcx.unreachable { ret bcx; }
+
+    // n.b. nothing else in this module should need to normalize,
+    // b/c of this call
+    let arms = normalize_arms(tcx, arms);
+
+    for a in arms {
+        let body = scope_block(bcx, "case_body");
+        body.block_span = some(a.body.span);
+        let id_map = pat_util::pat_id_map(tcx, a.pats[0]);
         bodies += [body];
-        for p: @ast::pat in a.pats {
-            match +=
-                [@{pats: [p],
-                   bound: [],
-                   data: @{body: body.llbb, guard: a.guard, id_map: id_map}}];
+        for p in a.pats {
+            match += [@{pats: [p],
+                        bound: [],
+                        data: @{body: body.llbb, guard: a.guard,
+                                id_map: id_map}}];
         }
     }
 
     // Cached fail-on-fallthrough block
     let fail_cx = @mutable none;
-    fn mk_fail(cx: @block_ctxt, sp: span,
+    fn mk_fail(bcx: block, sp: span,
                done: @mutable option<BasicBlockRef>) -> BasicBlockRef {
         alt *done { some(bb) { ret bb; } _ { } }
-        let fail_cx = new_sub_block_ctxt(cx, "case_fallthrough");
-        base::trans_fail(fail_cx, some(sp), "non-exhaustive match failure");;
+        let fail_cx = sub_block(bcx, "case_fallthrough");
+        trans_fail(fail_cx, some(sp), "non-exhaustive match failure");;
         *done = some(fail_cx.llbb);
         ret fail_cx.llbb;
     }
 
     let exit_map = [];
-    let t = node_id_type(cx, expr.id);
-    let vr = base::spill_if_immediate(er.bcx, er.val, t);
-    compile_submatch(vr.bcx, match, [vr.val],
-                     bind mk_fail(alt_cx, expr.span, fail_cx), exit_map);
+    let t = node_id_type(bcx, expr.id);
+    let {bcx, val: spilled} = spill_if_immediate(bcx, val, t);
+    compile_submatch(bcx, match, [spilled],
+                     bind mk_fail(scope_cx, expr.span, fail_cx), exit_map);
 
     let arm_cxs = [], arm_dests = [], i = 0u;
-    for a: ast::arm in arms {
+    for a in arms {
         let body_cx = bodies[i];
         if make_phi_bindings(body_cx, exit_map,
-                             pat_util::pat_id_map(bcx_tcx(cx),
-                                                  a.pats[0])) {
-            let arm_dest = base::dup_for_join(dest);
+                             pat_util::pat_id_map(tcx, a.pats[0])) {
+            let arm_dest = dup_for_join(dest);
             arm_dests += [arm_dest];
-            let arm_cx = base::trans_block(body_cx, a.body, arm_dest);
-            arm_cx = base::trans_block_cleanups(arm_cx, body_cx);
+            let arm_cx = trans_block(body_cx, a.body, arm_dest);
+            arm_cx = trans_block_cleanups(arm_cx, body_cx);
             arm_cxs += [arm_cx];
         }
         i += 1u;
     }
-    let after_cx = base::join_returns(alt_cx, arm_cxs, arm_dests, dest);
-    let next_cx = new_sub_block_ctxt(cx, "next");
-    if after_cx.unreachable { Unreachable(next_cx); }
-    base::cleanup_and_Br(after_cx, alt_cx, next_cx.llbb);
-    ret next_cx;
+    join_returns(scope_cx, arm_cxs, arm_dests, dest)
 }
 
 // Not alt-related, but similar to the pattern-munging code above
-fn bind_irrefutable_pat(bcx: @block_ctxt, pat: @ast::pat, val: ValueRef,
-                        make_copy: bool) -> @block_ctxt {
+fn bind_irrefutable_pat(bcx: block, pat: @ast::pat, val: ValueRef,
+                        make_copy: bool) -> block {
     let ccx = bcx.fcx.ccx, bcx = bcx;
 
     // Necessary since bind_irrefutable_pat is called outside trans_alt
@@ -717,10 +697,10 @@ fn bind_irrefutable_pat(bcx: @block_ctxt, pat: @ast::pat, val: ValueRef,
             // FIXME: Could constrain pat_bind to make this
             // check unnecessary.
             check (type_has_static_size(ccx, ty));
-            let llty = base::type_of(ccx, ty);
-            let alloc = base::alloca(bcx, llty);
-            bcx = base::copy_val(bcx, base::INIT, alloc,
-                                  base::load_if_immediate(bcx, val, ty), ty);
+            let llty = type_of(ccx, ty);
+            let alloc = alloca(bcx, llty);
+            bcx = copy_val(bcx, INIT, alloc,
+                                  load_if_immediate(bcx, val, ty), ty);
             bcx.fcx.lllocals.insert(pat.id, local_mem(alloc));
             add_clean(bcx, alloc, ty);
         } else { bcx.fcx.lllocals.insert(pat.id, local_mem(val)); }
@@ -745,7 +725,7 @@ fn bind_irrefutable_pat(bcx: @block_ctxt, pat: @ast::pat, val: ValueRef,
         for f: ast::field_pat in fields {
             let ix = option::get(ty::field_idx(f.ident, rec_fields));
             // how to get rid of this check?
-            let r = base::GEP_tup_like(bcx, rec_ty, val, [0, ix as int]);
+            let r = GEP_tup_like(bcx, rec_ty, val, [0, ix as int]);
             bcx = bind_irrefutable_pat(r.bcx, f.pat, r.val, make_copy);
         }
       }
@@ -753,7 +733,7 @@ fn bind_irrefutable_pat(bcx: @block_ctxt, pat: @ast::pat, val: ValueRef,
         let tup_ty = node_id_type(bcx, pat.id);
         let i = 0u;
         for elem in elems {
-            let r = base::GEP_tup_like(bcx, tup_ty, val, [0, i as int]);
+            let r = GEP_tup_like(bcx, tup_ty, val, [0, i as int]);
             bcx = bind_irrefutable_pat(r.bcx, elem, r.val, make_copy);
             i += 1u;
         }
diff --git a/src/comp/middle/trans/base.rs b/src/comp/middle/trans/base.rs
index 62039d472a9..a99c70b1519 100644
--- a/src/comp/middle/trans/base.rs
+++ b/src/comp/middle/trans/base.rs
@@ -275,7 +275,7 @@ fn get_extern_const(externs: hashmap<str, ValueRef>, llmod: ModuleRef,
     ret c;
 }
 
-fn get_simple_extern_fn(cx: @block_ctxt,
+fn get_simple_extern_fn(cx: block,
                         externs: hashmap<str, ValueRef>,
                         llmod: ModuleRef,
                         name: str, n_args: int) -> ValueRef {
@@ -286,7 +286,7 @@ fn get_simple_extern_fn(cx: @block_ctxt,
     ret get_extern_fn(externs, llmod, name, lib::llvm::CCallConv, t);
 }
 
-fn trans_native_call(cx: @block_ctxt, externs: hashmap<str, ValueRef>,
+fn trans_native_call(cx: block, externs: hashmap<str, ValueRef>,
                      llmod: ModuleRef, name: str, args: [ValueRef]) ->
    ValueRef {
     let n = args.len() as int;
@@ -299,37 +299,37 @@ fn trans_native_call(cx: @block_ctxt, externs: hashmap<str, ValueRef>,
     ret Call(cx, llnative, call_args);
 }
 
-fn trans_free(cx: @block_ctxt, v: ValueRef) -> @block_ctxt {
+fn trans_free(cx: block, v: ValueRef) -> block {
     Call(cx, bcx_ccx(cx).upcalls.free, [PointerCast(cx, v, T_ptr(T_i8()))]);
     cx
 }
 
-fn trans_shared_free(cx: @block_ctxt, v: ValueRef) -> @block_ctxt {
+fn trans_shared_free(cx: block, v: ValueRef) -> block {
     Call(cx, bcx_ccx(cx).upcalls.shared_free,
          [PointerCast(cx, v, T_ptr(T_i8()))]);
     ret cx;
 }
 
-fn umax(cx: @block_ctxt, a: ValueRef, b: ValueRef) -> ValueRef {
+fn umax(cx: block, a: ValueRef, b: ValueRef) -> ValueRef {
     let cond = ICmp(cx, lib::llvm::IntULT, a, b);
     ret Select(cx, cond, b, a);
 }
 
-fn umin(cx: @block_ctxt, a: ValueRef, b: ValueRef) -> ValueRef {
+fn umin(cx: block, a: ValueRef, b: ValueRef) -> ValueRef {
     let cond = ICmp(cx, lib::llvm::IntULT, a, b);
     ret Select(cx, cond, a, b);
 }
 
-fn alloca(cx: @block_ctxt, t: TypeRef) -> ValueRef {
+fn alloca(cx: block, t: TypeRef) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(t); }
-    ret Alloca(new_raw_block_ctxt(cx.fcx, cx.fcx.llstaticallocas), t);
+    ret Alloca(raw_block(cx.fcx, cx.fcx.llstaticallocas), t);
 }
 
-fn dynastack_alloca(cx: @block_ctxt, t: TypeRef, n: ValueRef, ty: ty::t) ->
+fn dynastack_alloca(cx: block, t: TypeRef, n: ValueRef, ty: ty::t) ->
    ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(t)); }
     let bcx = cx;
-    let dy_cx = new_raw_block_ctxt(cx.fcx, cx.fcx.lldynamicallocas);
+    let dy_cx = raw_block(cx.fcx, cx.fcx.lldynamicallocas);
     alt bcx_fcx(cx).llobstacktoken {
       none {
         bcx_fcx(cx).llobstacktoken =
@@ -351,21 +351,21 @@ fn dynastack_alloca(cx: @block_ctxt, t: TypeRef, n: ValueRef, ty: ty::t) ->
 
 fn mk_obstack_token(ccx: @crate_ctxt, fcx: @fn_ctxt) ->
    ValueRef {
-    let cx = new_raw_block_ctxt(fcx, fcx.lldynamicallocas);
+    let cx = raw_block(fcx, fcx.lldynamicallocas);
     ret Call(cx, ccx.upcalls.dynastack_mark, []);
 }
 
 // Given a pointer p, returns a pointer sz(p) (i.e., inc'd by sz bytes).
 // The type of the returned pointer is always i8*.  If you care about the
 // return type, use bump_ptr().
-fn ptr_offs(bcx: @block_ctxt, base: ValueRef, sz: ValueRef) -> ValueRef {
+fn ptr_offs(bcx: block, base: ValueRef, sz: ValueRef) -> ValueRef {
     let raw = PointerCast(bcx, base, T_ptr(T_i8()));
     GEP(bcx, raw, [sz])
 }
 
 // Increment a pointer by a given amount and then cast it to be a pointer
 // to a given type.
-fn bump_ptr(bcx: @block_ctxt, t: ty::t, base: ValueRef, sz: ValueRef) ->
+fn bump_ptr(bcx: block, t: ty::t, base: ValueRef, sz: ValueRef) ->
    ValueRef {
     let ccx = bcx_ccx(bcx);
     let bumped = ptr_offs(bcx, base, sz);
@@ -380,13 +380,13 @@ fn bump_ptr(bcx: @block_ctxt, t: ty::t, base: ValueRef, sz: ValueRef) ->
 // ty::struct and knows what to do when it runs into a ty_param stuck in the
 // middle of the thing it's GEP'ing into. Much like size_of and align_of,
 // above.
-fn GEP_tup_like(bcx: @block_ctxt, t: ty::t, base: ValueRef, ixs: [int])
+fn GEP_tup_like(bcx: block, t: ty::t, base: ValueRef, ixs: [int])
     -> result {
-    fn compute_off(bcx: @block_ctxt,
+    fn compute_off(bcx: block,
                    off: ValueRef,
                    t: ty::t,
                    ixs: [int],
-                   n: uint) -> (@block_ctxt, ValueRef, ty::t) {
+                   n: uint) -> (block, ValueRef, ty::t) {
         if n == ixs.len() {
             ret (bcx, off, t);
         }
@@ -436,7 +436,7 @@ fn GEP_tup_like(bcx: @block_ctxt, t: ty::t, base: ValueRef, ixs: [int])
 // This function uses GEP_tup_like() above and automatically performs casts as
 // appropriate. @llblobptr is the data part of a enum value; its actual type
 // is meaningless, as it will be cast away.
-fn GEP_enum(cx: @block_ctxt, llblobptr: ValueRef, enum_id: ast::def_id,
+fn GEP_enum(cx: block, llblobptr: ValueRef, enum_id: ast::def_id,
            variant_id: ast::def_id, ty_substs: [ty::t],
            ix: uint) : valid_variant_index(ix, cx, enum_id, variant_id) ->
    result {
@@ -487,7 +487,7 @@ fn GEP_enum(cx: @block_ctxt, llblobptr: ValueRef, enum_id: ast::def_id,
 
 // trans_shared_malloc: expects a type indicating which pointer type we want
 // and a size indicating how much space we want malloc'd.
-fn trans_shared_malloc(cx: @block_ctxt, llptr_ty: TypeRef, llsize: ValueRef)
+fn trans_shared_malloc(cx: block, llptr_ty: TypeRef, llsize: ValueRef)
    -> result {
     // FIXME: need a table to collect tydesc globals.
 
@@ -503,7 +503,7 @@ fn trans_shared_malloc(cx: @block_ctxt, llptr_ty: TypeRef, llsize: ValueRef)
 // known.
 //
 // The runtime equivalent is box_body() in "rust_internal.h".
-fn opaque_box_body(bcx: @block_ctxt,
+fn opaque_box_body(bcx: block,
                       body_t: ty::t,
                       boxptr: ValueRef) -> ValueRef {
     let ccx = bcx_ccx(bcx);
@@ -519,7 +519,7 @@ fn opaque_box_body(bcx: @block_ctxt,
 // trans_malloc_boxed_raw: expects an unboxed type and returns a pointer to
 // enough space for a box of that type.  This includes a rust_opaque_box
 // header.
-fn trans_malloc_boxed_raw(bcx: @block_ctxt, t: ty::t,
+fn trans_malloc_boxed_raw(bcx: block, t: ty::t,
                           &static_ti: option<@tydesc_info>) -> result {
     let bcx = bcx, ccx = bcx_ccx(bcx);
 
@@ -540,8 +540,8 @@ fn trans_malloc_boxed_raw(bcx: @block_ctxt, t: ty::t,
 
 // trans_malloc_boxed: usefully wraps trans_malloc_box_raw; allocates a box,
 // initializes the reference count to 1, and pulls out the body and rc
-fn trans_malloc_boxed(bcx: @block_ctxt, t: ty::t) ->
-   {bcx: @block_ctxt, box: ValueRef, body: ValueRef} {
+fn trans_malloc_boxed(bcx: block, t: ty::t) ->
+   {bcx: block, box: ValueRef, body: ValueRef} {
     let ti = none;
     let {bcx, val:box} = trans_malloc_boxed_raw(bcx, t, ti);
     let body = GEPi(bcx, box, [0, abi::box_field_body]);
@@ -553,7 +553,7 @@ fn trans_malloc_boxed(bcx: @block_ctxt, t: ty::t) ->
 // Given a type and a field index into its corresponding type descriptor,
 // returns an LLVM ValueRef of that field from the tydesc, generating the
 // tydesc if necessary.
-fn field_of_tydesc(cx: @block_ctxt, t: ty::t, escapes: bool, field: int) ->
+fn field_of_tydesc(cx: block, t: ty::t, escapes: bool, field: int) ->
    result {
     let tydesc = get_tydesc_simple(cx, t, escapes);
     ret rslt(tydesc.bcx,
@@ -564,7 +564,7 @@ fn field_of_tydesc(cx: @block_ctxt, t: ty::t, escapes: bool, field: int) ->
 // each of the ty params it uses (from the current frame) and a vector of the
 // indices of the ty params present in the type. This is used solely for
 // constructing derived tydescs.
-fn linearize_ty_params(cx: @block_ctxt, t: ty::t) ->
+fn linearize_ty_params(cx: block, t: ty::t) ->
    {params: [uint], descs: [ValueRef]} {
     let param_vals = [], param_defs = [];
     ty::walk_ty(bcx_tcx(cx), t) {|t|
@@ -581,7 +581,7 @@ fn linearize_ty_params(cx: @block_ctxt, t: ty::t) ->
     ret {params: param_defs, descs: param_vals};
 }
 
-fn trans_stack_local_derived_tydesc(cx: @block_ctxt, llsz: ValueRef,
+fn trans_stack_local_derived_tydesc(cx: block, llsz: ValueRef,
                                     llalign: ValueRef, llroottydesc: ValueRef,
                                     llfirstparam: ValueRef, n_params: uint)
     -> ValueRef {
@@ -607,7 +607,7 @@ fn trans_stack_local_derived_tydesc(cx: @block_ctxt, llsz: ValueRef,
     ret llmyroottydesc;
 }
 
-fn get_derived_tydesc(cx: @block_ctxt, t: ty::t, escapes: bool,
+fn get_derived_tydesc(cx: block, t: ty::t, escapes: bool,
                       &static_ti: option<@tydesc_info>) -> result {
     alt cx.fcx.derived_tydescs.find(t) {
       some(info) {
@@ -621,7 +621,7 @@ fn get_derived_tydesc(cx: @block_ctxt, t: ty::t, escapes: bool,
     }
 
     bcx_ccx(cx).stats.n_derived_tydescs += 1u;
-    let bcx = new_raw_block_ctxt(cx.fcx, cx.fcx.llderivedtydescs);
+    let bcx = raw_block(cx.fcx, cx.fcx.llderivedtydescs);
     let tys = linearize_ty_params(bcx, t);
     let root_ti = get_static_tydesc(bcx_ccx(bcx), t, tys.params);
     static_ti = some(root_ti);
@@ -681,7 +681,7 @@ fn get_tydesc_simple(bcx: block, t: ty::t, escapes: bool) -> result {
     get_tydesc(bcx, t, escapes, ti)
 }
 
-fn get_tydesc(cx: @block_ctxt, t: ty::t, escapes: bool,
+fn get_tydesc(cx: block, t: ty::t, escapes: bool,
               &static_ti: option<@tydesc_info>) -> result {
 
     // Is the supplied type a type param? If so, return the passed-in tydesc.
@@ -780,7 +780,7 @@ fn declare_tydesc(ccx: @crate_ctxt, t: ty::t, ty_params: [uint])
     ret info;
 }
 
-type glue_helper = fn@(@block_ctxt, ValueRef, ty::t);
+type glue_helper = fn@(block, ValueRef, ty::t);
 
 fn declare_generic_glue(ccx: @crate_ctxt, t: ty::t, llfnty: TypeRef,
                         name: str) -> ValueRef {
@@ -813,7 +813,7 @@ fn make_generic_glue_inner(ccx: @crate_ctxt, t: ty::t,
 
     let ty_param_count = ty_params.len();
     let lltyparams = llvm::LLVMGetParam(llfn, 2u as c_uint);
-    let load_env_bcx = new_raw_block_ctxt(fcx, fcx.llloadenv);
+    let load_env_bcx = raw_block(fcx, fcx.llloadenv);
     let lltydescs = [mutable];
     let p = 0u;
     while p < ty_param_count {
@@ -825,7 +825,7 @@ fn make_generic_glue_inner(ccx: @crate_ctxt, t: ty::t,
 
     fcx.lltyparams = vec::map_mut(lltydescs, {|d| {desc: d, dicts: none}});
 
-    let bcx = new_top_block_ctxt(fcx, none);
+    let bcx = top_scope_block(fcx, none);
     let lltop = bcx.llbb;
     let llrawptr0 = llvm::LLVMGetParam(llfn, 3u as c_uint);
     let llval0 = BitCast(bcx, llrawptr0, llty);
@@ -899,7 +899,7 @@ fn emit_tydescs(ccx: @crate_ctxt) {
     };
 }
 
-fn make_take_glue(cx: @block_ctxt, v: ValueRef, t: ty::t) {
+fn make_take_glue(cx: block, v: ValueRef, t: ty::t) {
     let bcx = cx;
     // NB: v is a *pointer* to type t here, not a direct value.
     bcx = alt ty::get(t).struct {
@@ -943,7 +943,7 @@ fn make_take_glue(cx: @block_ctxt, v: ValueRef, t: ty::t) {
     build_return(bcx);
 }
 
-fn incr_refcnt_of_boxed(cx: @block_ctxt, box_ptr: ValueRef) -> @block_ctxt {
+fn incr_refcnt_of_boxed(cx: block, box_ptr: ValueRef) -> block {
     let ccx = bcx_ccx(cx);
     maybe_validate_box(cx, box_ptr);
     let rc_ptr = GEPi(cx, box_ptr, [0, abi::box_field_refcnt]);
@@ -953,7 +953,7 @@ fn incr_refcnt_of_boxed(cx: @block_ctxt, box_ptr: ValueRef) -> @block_ctxt {
     ret cx;
 }
 
-fn make_free_glue(bcx: @block_ctxt, v: ValueRef, t: ty::t) {
+fn make_free_glue(bcx: block, v: ValueRef, t: ty::t) {
     // v is a pointer to the actual box component of the type here. The
     // ValueRef will have the wrong type here (make_generic_glue is casting
     // everything to a pointer to the type that the glue acts on).
@@ -999,7 +999,7 @@ fn make_free_glue(bcx: @block_ctxt, v: ValueRef, t: ty::t) {
     build_return(bcx);
 }
 
-fn make_drop_glue(bcx: @block_ctxt, v0: ValueRef, t: ty::t) {
+fn make_drop_glue(bcx: block, v0: ValueRef, t: ty::t) {
     // NB: v0 is an *alias* of type t here, not a direct value.
     let ccx = bcx_ccx(bcx);
     let bcx = alt ty::get(t).struct {
@@ -1032,47 +1032,40 @@ fn make_drop_glue(bcx: @block_ctxt, v0: ValueRef, t: ty::t) {
     build_return(bcx);
 }
 
-fn trans_res_drop(cx: @block_ctxt, rs: ValueRef, did: ast::def_id,
-                  inner_t: ty::t, tps: [ty::t]) -> @block_ctxt {
-    let ccx = bcx_ccx(cx);
+fn trans_res_drop(bcx: block, rs: ValueRef, did: ast::def_id,
+                  inner_t: ty::t, tps: [ty::t]) -> block {
+    let ccx = bcx_ccx(bcx);
     let inner_t_s = ty::substitute_type_params(ccx.tcx, tps, inner_t);
     let tup_ty = ty::mk_tup(ccx.tcx, [ty::mk_int(ccx.tcx), inner_t_s]);
-    let drop_cx = new_sub_block_ctxt(cx, "drop res");
-    let next_cx = new_sub_block_ctxt(cx, "next");
-
-    let drop_flag = GEP_tup_like(cx, tup_ty, rs, [0, 0]);
-    let cx = drop_flag.bcx;
-    let null_test = IsNull(cx, Load(cx, drop_flag.val));
-    CondBr(cx, null_test, next_cx.llbb, drop_cx.llbb);
-    cx = drop_cx;
-
-    let val = GEP_tup_like(cx, tup_ty, rs, [0, 1]);
-    cx = val.bcx;
-    // Find and call the actual destructor.
-    let dtor_addr = common::get_res_dtor(ccx, did, inner_t);
-    let args = [cx.fcx.llretptr, null_env_ptr(cx)];
-    for tp: ty::t in tps {
-        let td = get_tydesc_simple(cx, tp, false);
-        args += [td.val];
-        cx = td.bcx;
-    }
-    // Kludge to work around the fact that we know the precise type of the
-    // value here, but the dtor expects a type that still has opaque pointers
-    // for type variables.
-    let val_llty = lib::llvm::fn_ty_param_tys
-        (llvm::LLVMGetElementType
-         (llvm::LLVMTypeOf(dtor_addr)))[args.len()];
-    let val_cast = BitCast(cx, val.val, val_llty);
-    Call(cx, dtor_addr, args + [val_cast]);
-
-    cx = drop_ty(cx, val.val, inner_t_s);
-    // FIXME #1184: Resource flag is larger than necessary
-    Store(cx, C_int(ccx, 0), drop_flag.val);
-    Br(cx, next_cx.llbb);
-    ret next_cx;
+
+    let {bcx, val: drop_flag} = GEP_tup_like(bcx, tup_ty, rs, [0, 0]);
+    with_cond(bcx, IsNotNull(bcx, Load(bcx, drop_flag))) {|bcx|
+        let {bcx, val: valptr} = GEP_tup_like(bcx, tup_ty, rs, [0, 1]);
+        // Find and call the actual destructor.
+        let dtor_addr = common::get_res_dtor(ccx, did, inner_t);
+        let args = [bcx.fcx.llretptr, null_env_ptr(bcx)];
+        for tp in tps {
+            let td = get_tydesc_simple(bcx, tp, false);
+            args += [td.val];
+            bcx = td.bcx;
+        }
+        // Kludge to work around the fact that we know the precise type of the
+        // value here, but the dtor expects a type that still has opaque
+        // pointers for type variables.
+        let val_llty = lib::llvm::fn_ty_param_tys
+            (llvm::LLVMGetElementType
+             (llvm::LLVMTypeOf(dtor_addr)))[args.len()];
+        let val_cast = BitCast(bcx, valptr, val_llty);
+        Call(bcx, dtor_addr, args + [val_cast]);
+
+        bcx = drop_ty(bcx, valptr, inner_t_s);
+        // FIXME #1184: Resource flag is larger than necessary
+        Store(bcx, C_int(ccx, 0), drop_flag);
+        bcx
+    }
 }
 
-fn maybe_validate_box(_cx: @block_ctxt, _box_ptr: ValueRef) {
+fn maybe_validate_box(_cx: block, _box_ptr: ValueRef) {
     // Uncomment this when debugging annoying use-after-free
     // bugs.  But do not commit with this uncommented!  Big performance hit.
 
@@ -1083,28 +1076,19 @@ fn maybe_validate_box(_cx: @block_ctxt, _box_ptr: ValueRef) {
     // Call(cx, ccx.upcalls.validate_box, [raw_box_ptr]);
 }
 
-fn decr_refcnt_maybe_free(cx: @block_ctxt, box_ptr: ValueRef, t: ty::t)
-    -> @block_ctxt {
-    let ccx = bcx_ccx(cx);
-
-    maybe_validate_box(cx, box_ptr);
+fn decr_refcnt_maybe_free(bcx: block, box_ptr: ValueRef, t: ty::t) -> block {
+    let ccx = bcx_ccx(bcx);
+    maybe_validate_box(bcx, box_ptr);
 
-    let rc_adj_cx = new_sub_block_ctxt(cx, "rc--");
-    let free_cx = new_sub_block_ctxt(cx, "free");
-    let next_cx = new_sub_block_ctxt(cx, "next");
     let llbox_ty = T_opaque_box_ptr(ccx);
-    let box_ptr = PointerCast(cx, box_ptr, llbox_ty);
-    let null_test = IsNull(cx, box_ptr);
-    CondBr(cx, null_test, next_cx.llbb, rc_adj_cx.llbb);
-    let rc_ptr = GEPi(rc_adj_cx, box_ptr, [0, abi::box_field_refcnt]);
-    let rc = Load(rc_adj_cx, rc_ptr);
-    rc = Sub(rc_adj_cx, rc, C_int(ccx, 1));
-    Store(rc_adj_cx, rc, rc_ptr);
-    let zero_test = ICmp(rc_adj_cx, lib::llvm::IntEQ, C_int(ccx, 0), rc);
-    CondBr(rc_adj_cx, zero_test, free_cx.llbb, next_cx.llbb);
-    let free_cx = free_ty(free_cx, box_ptr, t);
-    Br(free_cx, next_cx.llbb);
-    ret next_cx;
+    let box_ptr = PointerCast(bcx, box_ptr, llbox_ty);
+    with_cond(bcx, IsNotNull(bcx, box_ptr)) {|bcx|
+        let rc_ptr = GEPi(bcx, box_ptr, [0, abi::box_field_refcnt]);
+        let rc = Sub(bcx, Load(bcx, rc_ptr), C_int(ccx, 1));
+        Store(bcx, rc, rc_ptr);
+        let zero_test = ICmp(bcx, lib::llvm::IntEQ, C_int(ccx, 0), rc);
+        with_cond(bcx, zero_test) {|bcx| free_ty(bcx, box_ptr, t)}
+    }
 }
 
 // Structural comparison: a rather involved form of glue.
@@ -1119,7 +1103,7 @@ fn maybe_name_value(cx: @crate_ctxt, v: ValueRef, s: str) {
 enum scalar_type { nil_type, signed_int, unsigned_int, floating_point, }
 
 
-fn compare_scalar_types(cx: @block_ctxt, lhs: ValueRef, rhs: ValueRef,
+fn compare_scalar_types(cx: block, lhs: ValueRef, rhs: ValueRef,
                         t: ty::t, op: ast::binop) -> result {
     let f = bind compare_scalar_values(cx, lhs, rhs, _, op);
 
@@ -1144,9 +1128,9 @@ fn compare_scalar_types(cx: @block_ctxt, lhs: ValueRef, rhs: ValueRef,
 
 
 // A helper function to do the actual comparison of scalar values.
-fn compare_scalar_values(cx: @block_ctxt, lhs: ValueRef, rhs: ValueRef,
+fn compare_scalar_values(cx: block, lhs: ValueRef, rhs: ValueRef,
                          nt: scalar_type, op: ast::binop) -> ValueRef {
-    fn die_(cx: @block_ctxt) -> ! {
+    fn die_(cx: block) -> ! {
         bcx_tcx(cx).sess.bug("compare_scalar_values: must be a\
           comparison operator");
     }
@@ -1201,38 +1185,24 @@ fn compare_scalar_values(cx: @block_ctxt, lhs: ValueRef, rhs: ValueRef,
     }
 }
 
-type val_pair_fn = fn@(@block_ctxt, ValueRef, ValueRef) -> @block_ctxt;
-type val_and_ty_fn = fn@(@block_ctxt, ValueRef, ty::t) -> @block_ctxt;
+type val_pair_fn = fn@(block, ValueRef, ValueRef) -> block;
+type val_and_ty_fn = fn@(block, ValueRef, ty::t) -> block;
 
-fn load_inbounds(cx: @block_ctxt, p: ValueRef, idxs: [int]) -> ValueRef {
+fn load_inbounds(cx: block, p: ValueRef, idxs: [int]) -> ValueRef {
     ret Load(cx, GEPi(cx, p, idxs));
 }
 
-fn store_inbounds(cx: @block_ctxt, v: ValueRef, p: ValueRef,
+fn store_inbounds(cx: block, v: ValueRef, p: ValueRef,
                   idxs: [int]) {
     Store(cx, v, GEPi(cx, p, idxs));
 }
 
 // Iterates through the elements of a structural type.
-fn iter_structural_ty(cx: @block_ctxt, av: ValueRef, t: ty::t,
-                      f: val_and_ty_fn) -> @block_ctxt {
-    fn iter_boxpp(cx: @block_ctxt, box_cell: ValueRef, f: val_and_ty_fn) ->
-       @block_ctxt {
-        let box_ptr = Load(cx, box_cell);
-        let tnil = ty::mk_nil(bcx_tcx(cx));
-        let tbox = ty::mk_imm_box(bcx_tcx(cx), tnil);
-        let inner_cx = new_sub_block_ctxt(cx, "iter box");
-        let next_cx = new_sub_block_ctxt(cx, "next");
-        let null_test = IsNull(cx, box_ptr);
-        CondBr(cx, null_test, next_cx.llbb, inner_cx.llbb);
-        let inner_cx = f(inner_cx, box_cell, tbox);
-        Br(inner_cx, next_cx.llbb);
-        ret next_cx;
-    }
-
-    fn iter_variant(cx: @block_ctxt, a_tup: ValueRef,
+fn iter_structural_ty(cx: block, av: ValueRef, t: ty::t,
+                      f: val_and_ty_fn) -> block {
+    fn iter_variant(cx: block, a_tup: ValueRef,
                     variant: ty::variant_info, tps: [ty::t], tid: ast::def_id,
-                    f: val_and_ty_fn) -> @block_ctxt {
+                    f: val_and_ty_fn) -> block {
         if variant.args.len() == 0u { ret cx; }
         let fn_ty = variant.ctor_ty;
         let ccx = bcx_ccx(cx);
@@ -1305,13 +1275,13 @@ fn iter_structural_ty(cx: @block_ctxt, av: ValueRef, t: ty::t,
         // NB: we must hit the discriminant first so that structural
         // comparison know not to proceed when the discriminants differ.
         cx = f(cx, lldiscrim_a_ptr, ty::mk_int(bcx_tcx(cx)));
-        let unr_cx = new_sub_block_ctxt(cx, "enum-iter-unr");
+        let unr_cx = sub_block(cx, "enum-iter-unr");
         Unreachable(unr_cx);
         let llswitch = Switch(cx, lldiscrim_a, unr_cx.llbb, n_variants);
-        let next_cx = new_sub_block_ctxt(cx, "enum-iter-next");
+        let next_cx = sub_block(cx, "enum-iter-next");
         for variant: ty::variant_info in *variants {
             let variant_cx =
-                new_sub_block_ctxt(cx,
+                sub_block(cx,
                                    "enum-iter-variant-" +
                                        int::to_str(variant.disr_val, 10u));
             AddCase(llswitch, C_int(ccx, variant.disr_val), variant_cx.llbb);
@@ -1398,7 +1368,7 @@ fn lazily_emit_tydesc_glue(ccx: @crate_ctxt, field: int,
     }
 }
 
-fn call_tydesc_glue_full(cx: @block_ctxt, v: ValueRef, tydesc: ValueRef,
+fn call_tydesc_glue_full(cx: block, v: ValueRef, tydesc: ValueRef,
                          field: int, static_ti: option<@tydesc_info>) {
     lazily_emit_tydesc_glue(bcx_ccx(cx), field, static_ti);
 
@@ -1434,15 +1404,15 @@ fn call_tydesc_glue_full(cx: @block_ctxt, v: ValueRef, tydesc: ValueRef,
                     lltydescs, llrawptr]);
 }
 
-fn call_tydesc_glue(cx: @block_ctxt, v: ValueRef, t: ty::t, field: int) ->
-   @block_ctxt {
+fn call_tydesc_glue(cx: block, v: ValueRef, t: ty::t, field: int) ->
+   block {
     let ti: option<@tydesc_info> = none::<@tydesc_info>;
     let {bcx: bcx, val: td} = get_tydesc(cx, t, false, ti);
     call_tydesc_glue_full(bcx, v, td, field, ti);
     ret bcx;
 }
 
-fn call_cmp_glue(cx: @block_ctxt, lhs: ValueRef, rhs: ValueRef, t: ty::t,
+fn call_cmp_glue(cx: block, lhs: ValueRef, rhs: ValueRef, t: ty::t,
                  llop: ValueRef) -> result {
     // We can't use call_tydesc_glue_full() and friends here because compare
     // glue has a special signature.
@@ -1473,21 +1443,21 @@ fn call_cmp_glue(cx: @block_ctxt, lhs: ValueRef, rhs: ValueRef, t: ty::t,
     ret rslt(bcx, Load(bcx, llcmpresultptr));
 }
 
-fn take_ty(cx: @block_ctxt, v: ValueRef, t: ty::t) -> @block_ctxt {
+fn take_ty(cx: block, v: ValueRef, t: ty::t) -> block {
     if ty::type_needs_drop(bcx_tcx(cx), t) {
         ret call_tydesc_glue(cx, v, t, abi::tydesc_field_take_glue);
     }
     ret cx;
 }
 
-fn drop_ty(cx: @block_ctxt, v: ValueRef, t: ty::t) -> @block_ctxt {
+fn drop_ty(cx: block, v: ValueRef, t: ty::t) -> block {
     if ty::type_needs_drop(bcx_tcx(cx), t) {
         ret call_tydesc_glue(cx, v, t, abi::tydesc_field_drop_glue);
     }
     ret cx;
 }
 
-fn drop_ty_immediate(bcx: @block_ctxt, v: ValueRef, t: ty::t) -> @block_ctxt {
+fn drop_ty_immediate(bcx: block, v: ValueRef, t: ty::t) -> block {
     alt ty::get(t).struct {
       ty::ty_uniq(_) | ty::ty_vec(_) | ty::ty_str { free_ty(bcx, v, t) }
       ty::ty_box(_) | ty::ty_opaque_box {
@@ -1498,7 +1468,7 @@ fn drop_ty_immediate(bcx: @block_ctxt, v: ValueRef, t: ty::t) -> @block_ctxt {
     }
 }
 
-fn take_ty_immediate(bcx: @block_ctxt, v: ValueRef, t: ty::t) -> result {
+fn take_ty_immediate(bcx: block, v: ValueRef, t: ty::t) -> result {
     alt ty::get(t).struct {
       ty::ty_box(_) | ty::ty_opaque_box {
         rslt(incr_refcnt_of_boxed(bcx, v), v)
@@ -1511,14 +1481,14 @@ fn take_ty_immediate(bcx: @block_ctxt, v: ValueRef, t: ty::t) -> result {
     }
 }
 
-fn free_ty(cx: @block_ctxt, v: ValueRef, t: ty::t) -> @block_ctxt {
+fn free_ty(cx: block, v: ValueRef, t: ty::t) -> block {
     if ty::type_needs_drop(bcx_tcx(cx), t) {
         ret call_tydesc_glue(cx, v, t, abi::tydesc_field_free_glue);
     }
     ret cx;
 }
 
-fn call_memmove(cx: @block_ctxt, dst: ValueRef, src: ValueRef,
+fn call_memmove(cx: block, dst: ValueRef, src: ValueRef,
                 n_bytes: ValueRef) -> result {
     // TODO: Provide LLVM with better alignment information when the alignment
     // is statically known (it must be nothing more than a constant int, or
@@ -1543,8 +1513,8 @@ fn call_memmove(cx: @block_ctxt, dst: ValueRef, src: ValueRef,
     ret rslt(cx, ret_val);
 }
 
-fn memmove_ty(bcx: @block_ctxt, dst: ValueRef, src: ValueRef, t: ty::t) ->
-    @block_ctxt {
+fn memmove_ty(bcx: block, dst: ValueRef, src: ValueRef, t: ty::t) ->
+    block {
     let ccx = bcx_ccx(bcx);
     if check type_has_static_size(ccx, t) {
         if ty::type_is_structural(t) {
@@ -1570,27 +1540,24 @@ fn type_is_structural_or_param(t: ty::t) -> bool {
     }
 }
 
-fn copy_val(cx: @block_ctxt, action: copy_action, dst: ValueRef,
-            src: ValueRef, t: ty::t) -> @block_ctxt {
+fn copy_val(cx: block, action: copy_action, dst: ValueRef,
+            src: ValueRef, t: ty::t) -> block {
     if action == DROP_EXISTING &&
         (type_is_structural_or_param(t) ||
          ty::type_is_unique(t)) {
-        let do_copy_cx = new_sub_block_ctxt(cx, "do_copy");
-        let next_cx = new_sub_block_ctxt(cx, "next");
         let dstcmp = load_if_immediate(cx, dst, t);
-        let self_assigning =
-            ICmp(cx, lib::llvm::IntNE,
-                 PointerCast(cx, dstcmp, val_ty(src)), src);
-        CondBr(cx, self_assigning, do_copy_cx.llbb, next_cx.llbb);
-        do_copy_cx = copy_val_no_check(do_copy_cx, action, dst, src, t);
-        Br(do_copy_cx, next_cx.llbb);
-        ret next_cx;
+        let cast = PointerCast(cx, dstcmp, val_ty(src));
+        // Self-copy check
+        with_cond(cx, ICmp(cx, lib::llvm::IntNE, cast, src)) {|bcx|
+            copy_val_no_check(bcx, action, dst, src, t)
+        }
+    } else {
+        copy_val_no_check(cx, action, dst, src, t)
     }
-    ret copy_val_no_check(cx, action, dst, src, t);
 }
 
-fn copy_val_no_check(bcx: @block_ctxt, action: copy_action, dst: ValueRef,
-                     src: ValueRef, t: ty::t) -> @block_ctxt {
+fn copy_val_no_check(bcx: block, action: copy_action, dst: ValueRef,
+                     src: ValueRef, t: ty::t) -> block {
     let ccx = bcx_ccx(bcx), bcx = bcx;
     if ty::type_is_scalar(t) {
         Store(bcx, src, dst);
@@ -1618,8 +1585,8 @@ fn copy_val_no_check(bcx: @block_ctxt, action: copy_action, dst: ValueRef,
 // FIXME: We always zero out the source. Ideally we would detect the
 // case where a variable is always deinitialized by block exit and thus
 // doesn't need to be dropped.
-fn move_val(cx: @block_ctxt, action: copy_action, dst: ValueRef,
-            src: lval_result, t: ty::t) -> @block_ctxt {
+fn move_val(cx: block, action: copy_action, dst: ValueRef,
+            src: lval_result, t: ty::t) -> block {
     let src_val = src.val;
     let tcx = bcx_tcx(cx), cx = cx;
     if ty::type_is_scalar(t) {
@@ -1649,9 +1616,9 @@ fn move_val(cx: @block_ctxt, action: copy_action, dst: ValueRef,
                              ty_to_str(tcx, t));
 }
 
-fn store_temp_expr(cx: @block_ctxt, action: copy_action, dst: ValueRef,
+fn store_temp_expr(cx: block, action: copy_action, dst: ValueRef,
                    src: lval_result, t: ty::t, last_use: bool)
-    -> @block_ctxt {
+    -> block {
     // Lvals in memory are not temporaries. Copy them.
     if src.kind != temporary && !last_use {
         let v = if src.kind == owned {
@@ -1677,7 +1644,7 @@ fn trans_crate_lit(cx: @crate_ctxt, lit: ast::lit) -> ValueRef {
     }
 }
 
-fn trans_lit(cx: @block_ctxt, lit: ast::lit, dest: dest) -> @block_ctxt {
+fn trans_lit(cx: block, lit: ast::lit, dest: dest) -> block {
     if dest == ignore { ret cx; }
     alt lit.node {
       ast::lit_str(s) { ret tvec::trans_str(cx, s, dest); }
@@ -1687,8 +1654,8 @@ fn trans_lit(cx: @block_ctxt, lit: ast::lit, dest: dest) -> @block_ctxt {
     }
 }
 
-fn trans_unary(bcx: @block_ctxt, op: ast::unop, e: @ast::expr,
-               un_expr: @ast::expr, dest: dest) -> @block_ctxt {
+fn trans_unary(bcx: block, op: ast::unop, e: @ast::expr,
+               un_expr: @ast::expr, dest: dest) -> block {
     // Check for user-defined method call
     alt bcx_ccx(bcx).method_map.find(un_expr.id) {
       some(origin) {
@@ -1741,7 +1708,7 @@ fn trans_unary(bcx: @block_ctxt, op: ast::unop, e: @ast::expr,
     }
 }
 
-fn trans_compare(cx: @block_ctxt, op: ast::binop, lhs: ValueRef,
+fn trans_compare(cx: block, op: ast::binop, lhs: ValueRef,
                  _lhs_t: ty::t, rhs: ValueRef, rhs_t: ty::t) -> result {
     if ty::type_is_scalar(rhs_t) {
       let rs = compare_scalar_types(cx, lhs, rhs, rhs_t, op);
@@ -1774,9 +1741,9 @@ fn trans_compare(cx: @block_ctxt, op: ast::binop, lhs: ValueRef,
 
 // Important to get types for both lhs and rhs, because one might be _|_
 // and the other not.
-fn trans_eager_binop(cx: @block_ctxt, op: ast::binop, lhs: ValueRef,
+fn trans_eager_binop(cx: block, op: ast::binop, lhs: ValueRef,
                      lhs_t: ty::t, rhs: ValueRef, rhs_t: ty::t, dest: dest)
-    -> @block_ctxt {
+    -> block {
     if dest == ignore { ret cx; }
     let intype = lhs_t;
     if ty::type_is_bot(intype) { intype = rhs_t; }
@@ -1825,8 +1792,8 @@ fn trans_eager_binop(cx: @block_ctxt, op: ast::binop, lhs: ValueRef,
     ret store_in_dest(cx, val, dest);
 }
 
-fn trans_assign_op(bcx: @block_ctxt, ex: @ast::expr, op: ast::binop,
-                   dst: @ast::expr, src: @ast::expr) -> @block_ctxt {
+fn trans_assign_op(bcx: block, ex: @ast::expr, op: ast::binop,
+                   dst: @ast::expr, src: @ast::expr) -> block {
     let t = expr_ty(bcx, src);
     let lhs_res = trans_lval(bcx, dst);
     assert (lhs_res.kind == owned);
@@ -1870,7 +1837,7 @@ fn trans_assign_op(bcx: @block_ctxt, ex: @ast::expr, op: ast::binop,
                           save_in(lhs_res.val));
 }
 
-fn autoderef(cx: @block_ctxt, v: ValueRef, t: ty::t) -> result_t {
+fn autoderef(cx: block, v: ValueRef, t: ty::t) -> result_t {
     let v1: ValueRef = v;
     let t1: ty::t = t;
     let ccx = bcx_ccx(cx);
@@ -1919,41 +1886,31 @@ fn autoderef(cx: @block_ctxt, v: ValueRef, t: ty::t) -> result_t {
 // refinement types would obviate the need for this
 enum lazy_binop_ty { lazy_and, lazy_or }
 
-fn trans_lazy_binop(bcx: @block_ctxt, op: lazy_binop_ty, a: @ast::expr,
-                    b: @ast::expr, dest: dest) -> @block_ctxt {
-    let is_and = alt op { lazy_and { true } lazy_or { false } };
-    let lhs_res = trans_temp_expr(bcx, a);
-    if lhs_res.bcx.unreachable { ret lhs_res.bcx; }
-    let rhs_cx = new_scope_block_ctxt(lhs_res.bcx, "rhs");
-    let rhs_res = trans_temp_expr(rhs_cx, b);
-
-    let lhs_past_cx = new_scope_block_ctxt(lhs_res.bcx, "lhs");
-    // The following line ensures that any cleanups for rhs
-    // are done within the block for rhs. This is necessary
-    // because and/or are lazy. So the rhs may never execute,
-    // and the cleanups can't be pushed into later code.
-    let rhs_bcx = trans_block_cleanups(rhs_res.bcx, rhs_cx);
-    if is_and {
-        CondBr(lhs_res.bcx, lhs_res.val, rhs_cx.llbb, lhs_past_cx.llbb);
-    } else {
-        CondBr(lhs_res.bcx, lhs_res.val, lhs_past_cx.llbb, rhs_cx.llbb);
-    }
+fn trans_lazy_binop(bcx: block, op: lazy_binop_ty, a: @ast::expr,
+                    b: @ast::expr, dest: dest) -> block {
+
+    let {bcx: past_lhs, val: lhs} = with_scope_result(bcx, "lhs")
+        {|bcx| trans_temp_expr(bcx, a)};
+    if past_lhs.unreachable { ret past_lhs; }
+    let join = sub_block(bcx, "join"), before_rhs = sub_block(bcx, "rhs");
 
-    let join_cx = new_sub_block_ctxt(bcx, "join");
-    Br(lhs_past_cx, join_cx.llbb);
-    if rhs_bcx.unreachable {
-        ret store_in_dest(join_cx, C_bool(!is_and), dest);
+    alt op {
+      lazy_and { CondBr(past_lhs, lhs, before_rhs.llbb, join.llbb); }
+      lazy_or { CondBr(past_lhs, lhs, join.llbb, before_rhs.llbb); }
     }
-    Br(rhs_bcx, join_cx.llbb);
-    let phi = Phi(join_cx, T_bool(), [C_bool(!is_and), rhs_res.val],
-                  [lhs_past_cx.llbb, rhs_bcx.llbb]);
-    ret store_in_dest(join_cx, phi, dest);
+    let {bcx: past_rhs, val: rhs} = with_scope_result(before_rhs, "rhs")
+        {|bcx| trans_temp_expr(bcx, b)};
+
+    if past_rhs.unreachable { ret store_in_dest(join, lhs, dest); }
+    Br(past_rhs, join.llbb);
+    let phi = Phi(join, T_bool(), [lhs, rhs], [past_lhs.llbb, past_rhs.llbb]);
+    ret store_in_dest(join, phi, dest);
 }
 
 
 
-fn trans_binary(bcx: @block_ctxt, op: ast::binop, lhs: @ast::expr,
-                rhs: @ast::expr, dest: dest, ex: @ast::expr) -> @block_ctxt {
+fn trans_binary(bcx: block, op: ast::binop, lhs: @ast::expr,
+                rhs: @ast::expr, dest: dest, ex: @ast::expr) -> block {
     // User-defined operators
     alt bcx_ccx(bcx).method_map.find(ex.id) {
       some(origin) {
@@ -2002,9 +1959,9 @@ fn dup_for_join(dest: dest) -> dest {
     }
 }
 
-fn join_returns(parent_cx: @block_ctxt, in_cxs: [@block_ctxt],
-                in_ds: [dest], out_dest: dest) -> @block_ctxt {
-    let out = new_sub_block_ctxt(parent_cx, "join");
+fn join_returns(parent_cx: block, in_cxs: [block],
+                in_ds: [dest], out_dest: dest) -> block {
+    let out = sub_block(parent_cx, "join");
     let reachable = false, i = 0u, phi = none;
     for cx in in_cxs {
         if !cx.unreachable {
@@ -2034,7 +1991,7 @@ fn join_returns(parent_cx: @block_ctxt, in_cxs: [@block_ctxt],
 }
 
 // Used to put an immediate value in a dest.
-fn store_in_dest(bcx: @block_ctxt, val: ValueRef, dest: dest) -> @block_ctxt {
+fn store_in_dest(bcx: block, val: ValueRef, dest: dest) -> block {
     alt dest {
       ignore {}
       by_val(cell) { *cell = val; }
@@ -2051,18 +2008,17 @@ fn get_dest_addr(dest: dest) -> ValueRef {
     }
 }
 
-fn trans_if(cx: @block_ctxt, cond: @ast::expr, thn: ast::blk,
+fn trans_if(cx: block, cond: @ast::expr, thn: ast::blk,
             els: option<@ast::expr>, dest: dest)
-    -> @block_ctxt {
+    -> block {
     let {bcx, val: cond_val} = trans_temp_expr(cx, cond);
 
     let then_dest = dup_for_join(dest);
     let else_dest = dup_for_join(dest);
-    let then_cx = new_real_block_ctxt(bcx, "then", thn.span);
-    let else_cx = new_real_block_ctxt(bcx, "else", alt els {
-        some(e) { e.span }
-        _ { ast_util::dummy_sp() }
-    });
+    let then_cx = scope_block(bcx, "then");
+    then_cx.block_span = some(thn.span);
+    let else_cx = scope_block(bcx, "else");
+    option::may(els) {|e| else_cx.block_span = some(e.span); }
     CondBr(bcx, cond_val, then_cx.llbb, else_cx.llbb);
     let then_bcx = trans_block(then_cx, thn, then_dest);
     then_bcx = trans_block_cleanups(then_bcx, then_cx);
@@ -2090,15 +2046,14 @@ fn trans_if(cx: @block_ctxt, cond: @ast::expr, thn: ast::blk,
     ret join_returns(cx, [then_bcx, else_bcx], [then_dest, else_dest], dest);
 }
 
-fn trans_for(cx: @block_ctxt, local: @ast::local, seq: @ast::expr,
-             body: ast::blk) -> @block_ctxt {
-    fn inner(bcx: @block_ctxt, local: @ast::local, curr: ValueRef, t: ty::t,
-             body: ast::blk, outer_next_cx: @block_ctxt) -> @block_ctxt {
-        let next_cx = new_sub_block_ctxt(bcx, "next");
-        let scope_cx =
-            new_loop_scope_block_ctxt(bcx, cont_other(next_cx),
-                                      outer_next_cx, "for loop scope",
-                                      body.span);
+fn trans_for(cx: block, local: @ast::local, seq: @ast::expr,
+             body: ast::blk) -> block {
+    fn inner(bcx: block, local: @ast::local, curr: ValueRef, t: ty::t,
+             body: ast::blk, outer_next_cx: block) -> block {
+        let next_cx = sub_block(bcx, "next");
+        let scope_cx = loop_scope_block(bcx, cont_other(next_cx),
+                                        outer_next_cx, "for loop scope",
+                                        body.span);
         Br(bcx, scope_cx.llbb);
         let curr = PointerCast(bcx, curr,
                                T_ptr(type_of_or_i8(bcx_ccx(bcx), t)));
@@ -2109,7 +2064,7 @@ fn trans_for(cx: @block_ctxt, local: @ast::local, seq: @ast::expr,
         ret next_cx;
     }
     let ccx = bcx_ccx(cx);
-    let next_cx = new_sub_block_ctxt(cx, "next");
+    let next_cx = sub_block(cx, "next");
     let seq_ty = expr_ty(cx, seq);
     let {bcx: bcx, val: seq} = trans_temp_expr(cx, seq);
     let seq = PointerCast(bcx, seq, T_ptr(ccx.opaque_vec_type));
@@ -2123,12 +2078,12 @@ fn trans_for(cx: @block_ctxt, local: @ast::local, seq: @ast::expr,
     ret next_cx;
 }
 
-fn trans_while(cx: @block_ctxt, cond: @ast::expr, body: ast::blk)
-    -> @block_ctxt {
-    let next_cx = new_sub_block_ctxt(cx, "while next");
-    let cond_cx = new_loop_scope_block_ctxt(cx, cont_self, next_cx,
+fn trans_while(cx: block, cond: @ast::expr, body: ast::blk)
+    -> block {
+    let next_cx = sub_block(cx, "while next");
+    let cond_cx = loop_scope_block(cx, cont_self, next_cx,
                                             "while cond", body.span);
-    let body_cx = new_scope_block_ctxt(cond_cx, "while loop body");
+    let body_cx = scope_block(cond_cx, "while loop body");
     Br(cx, cond_cx.llbb);
     let cond_res = trans_temp_expr(cond_cx, cond);
     let cond_bcx = trans_block_cleanups(cond_res.bcx, cond_cx);
@@ -2138,14 +2093,14 @@ fn trans_while(cx: @block_ctxt, cond: @ast::expr, body: ast::blk)
     ret next_cx;
 }
 
-fn trans_do_while(cx: @block_ctxt, body: ast::blk, cond: @ast::expr) ->
-    @block_ctxt {
-    let next_cx = new_sub_block_ctxt(cx, "next");
+fn trans_do_while(cx: block, body: ast::blk, cond: @ast::expr) ->
+    block {
+    let next_cx = sub_block(cx, "next");
     let body_cx =
-        new_loop_scope_block_ctxt(cx, cont_self, next_cx,
+        loop_scope_block(cx, cont_self, next_cx,
                                   "do-while loop body", body.span);
     let body_end = trans_block(body_cx, body, ignore);
-    let cond_cx = new_scope_block_ctxt(body_cx, "do-while cond");
+    let cond_cx = scope_block(body_cx, "do-while cond");
     cleanup_and_Br(body_end, body_cx, cond_cx.llbb);
     let cond_res = trans_temp_expr(cond_cx, cond);
     let cond_bcx = trans_block_cleanups(cond_res.bcx, cond_cx);
@@ -2172,41 +2127,41 @@ enum lval_kind {
     owned_imm, //< Non-temporary value passed by value
 }
 type local_var_result = {val: ValueRef, kind: lval_kind};
-type lval_result = {bcx: @block_ctxt, val: ValueRef, kind: lval_kind};
+type lval_result = {bcx: block, val: ValueRef, kind: lval_kind};
 enum callee_env {
     null_env,
     is_closure,
     self_env(ValueRef, ty::t),
     dict_env(ValueRef, ValueRef),
 }
-type lval_maybe_callee = {bcx: @block_ctxt,
+type lval_maybe_callee = {bcx: block,
                           val: ValueRef,
                           kind: lval_kind,
                           env: callee_env,
                           generic: generic_callee};
 
-fn null_env_ptr(bcx: @block_ctxt) -> ValueRef {
+fn null_env_ptr(bcx: block) -> ValueRef {
     C_null(T_opaque_box_ptr(bcx_ccx(bcx)))
 }
 
-fn lval_from_local_var(bcx: @block_ctxt, r: local_var_result) -> lval_result {
+fn lval_from_local_var(bcx: block, r: local_var_result) -> lval_result {
     ret { bcx: bcx, val: r.val, kind: r.kind };
 }
 
-fn lval_owned(bcx: @block_ctxt, val: ValueRef) -> lval_result {
+fn lval_owned(bcx: block, val: ValueRef) -> lval_result {
     ret {bcx: bcx, val: val, kind: owned};
 }
-fn lval_temp(bcx: @block_ctxt, val: ValueRef) -> lval_result {
+fn lval_temp(bcx: block, val: ValueRef) -> lval_result {
     ret {bcx: bcx, val: val, kind: temporary};
 }
 
-fn lval_no_env(bcx: @block_ctxt, val: ValueRef, kind: lval_kind)
+fn lval_no_env(bcx: block, val: ValueRef, kind: lval_kind)
     -> lval_maybe_callee {
     ret {bcx: bcx, val: val, kind: kind, env: is_closure,
          generic: generic_none};
 }
 
-fn trans_external_path(cx: @block_ctxt, did: ast::def_id,
+fn trans_external_path(cx: block, did: ast::def_id,
                        tpt: ty::ty_param_bounds_and_ty) -> ValueRef {
     let ccx = cx.fcx.ccx;
     let name = csearch::get_symbol(ccx.sess.cstore, did);
@@ -2277,7 +2232,7 @@ fn monomorphic_fn(ccx: @crate_ctxt, fn_id: ast::def_id, substs: [ty::t],
     some({llfn: lldecl, fty: mono_ty})
 }
 
-fn lval_static_fn(bcx: @block_ctxt, fn_id: ast::def_id, id: ast::node_id,
+fn lval_static_fn(bcx: block, fn_id: ast::def_id, id: ast::node_id,
                   substs: option<([ty::t], typeck::dict_res)>)
     -> lval_maybe_callee {
     let ccx = bcx_ccx(bcx);
@@ -2376,7 +2331,7 @@ fn lookup_discriminant(ccx: @crate_ctxt, vid: ast::def_id) -> ValueRef {
     }
 }
 
-fn trans_local_var(cx: @block_ctxt, def: ast::def) -> local_var_result {
+fn trans_local_var(cx: block, def: ast::def) -> local_var_result {
     fn take_local(table: hashmap<ast::node_id, local_val>,
                   id: ast::node_id) -> local_var_result {
         alt table.find(id) {
@@ -2410,12 +2365,12 @@ fn trans_local_var(cx: @block_ctxt, def: ast::def) -> local_var_result {
     }
 }
 
-fn trans_path(cx: @block_ctxt, id: ast::node_id)
+fn trans_path(cx: block, id: ast::node_id)
     -> lval_maybe_callee {
     ret trans_var(cx, bcx_tcx(cx).def_map.get(id), id);
 }
 
-fn trans_var(cx: @block_ctxt, def: ast::def, id: ast::node_id)
+fn trans_var(cx: block, def: ast::def, id: ast::node_id)
     -> lval_maybe_callee {
     let ccx = bcx_ccx(cx);
     alt def {
@@ -2458,7 +2413,7 @@ fn trans_var(cx: @block_ctxt, def: ast::def, id: ast::node_id)
     }
 }
 
-fn trans_rec_field(bcx: @block_ctxt, base: @ast::expr,
+fn trans_rec_field(bcx: block, base: @ast::expr,
                    field: ast::ident) -> lval_result {
     let {bcx, val} = trans_temp_expr(bcx, base);
     let {bcx, val, ty} = autoderef(bcx, val, expr_ty(bcx, base));
@@ -2473,7 +2428,7 @@ fn trans_rec_field(bcx: @block_ctxt, base: @ast::expr,
     ret {bcx: bcx, val: val, kind: owned};
 }
 
-fn trans_index(cx: @block_ctxt, ex: @ast::expr, base: @ast::expr,
+fn trans_index(cx: block, ex: @ast::expr, base: @ast::expr,
                idx: @ast::expr) -> lval_result {
     let base_ty = expr_ty(cx, base);
     let exp = trans_temp_expr(cx, base);
@@ -2501,33 +2456,28 @@ fn trans_index(cx: @block_ctxt, ex: @ast::expr, base: @ast::expr,
     maybe_name_value(bcx_ccx(cx), scaled_ix, "scaled_ix");
     let lim = tvec::get_fill(bcx, v);
     let body = tvec::get_dataptr(bcx, v, type_of_or_i8(ccx, unit_ty));
-    let bounds_check = ICmp(bcx, lib::llvm::IntULT, scaled_ix, lim);
-    let fail_cx = new_sub_block_ctxt(bcx, "fail");
-    let next_cx = new_sub_block_ctxt(bcx, "next");
-    let ncx = bcx_ccx(next_cx);
-    CondBr(bcx, bounds_check, next_cx.llbb, fail_cx.llbb);
-    // fail: bad bounds check.
-
-    trans_fail(fail_cx, some(ex.span), "bounds check");
-    let elt =
-        if check type_has_static_size(ncx, unit_ty) {
-            let elt_1 = GEP(next_cx, body, [ix_val]);
-            let llunitty = type_of(ncx, unit_ty);
-            PointerCast(next_cx, elt_1, T_ptr(llunitty))
-        } else {
-            body = PointerCast(next_cx, body, T_ptr(T_i8()));
-            GEP(next_cx, body, [scaled_ix])
-        };
-
-    ret lval_owned(next_cx, elt);
+    let bounds_check = ICmp(bcx, lib::llvm::IntUGE, scaled_ix, lim);
+    bcx = with_cond(bcx, bounds_check) {|bcx|
+        // fail: bad bounds check.
+        trans_fail(bcx, some(ex.span), "bounds check")
+    };
+    let elt = if check type_has_static_size(ccx, unit_ty) {
+        let elt_1 = GEP(bcx, body, [ix_val]);
+        let llunitty = type_of(ccx, unit_ty);
+        PointerCast(bcx, elt_1, T_ptr(llunitty))
+    } else {
+        body = PointerCast(bcx, body, T_ptr(T_i8()));
+        GEP(bcx, body, [scaled_ix])
+    };
+    ret lval_owned(bcx, elt);
 }
 
-fn expr_is_lval(bcx: @block_ctxt, e: @ast::expr) -> bool {
+fn expr_is_lval(bcx: block, e: @ast::expr) -> bool {
     let ccx = bcx_ccx(bcx);
     ty::expr_is_lval(ccx.method_map, e)
 }
 
-fn trans_callee(bcx: @block_ctxt, e: @ast::expr) -> lval_maybe_callee {
+fn trans_callee(bcx: block, e: @ast::expr) -> lval_maybe_callee {
     alt e.node {
       ast::expr_path(_) { ret trans_path(bcx, e.id); }
       ast::expr_field(base, ident, _) {
@@ -2553,7 +2503,7 @@ fn trans_callee(bcx: @block_ctxt, e: @ast::expr) -> lval_maybe_callee {
 // The additional bool returned indicates whether it's mem (that is
 // represented as an alloca or heap, hence needs a 'load' to be used as an
 // immediate).
-fn trans_lval(cx: @block_ctxt, e: @ast::expr) -> lval_result {
+fn trans_lval(cx: block, e: @ast::expr) -> lval_result {
     alt e.node {
       ast::expr_path(_) {
         let v = trans_path(cx, e.id);
@@ -2621,7 +2571,7 @@ fn lval_maybe_callee_to_lval(c: lval_maybe_callee, ty: ty::t) -> lval_result {
     }
 }
 
-fn int_cast(bcx: @block_ctxt, lldsttype: TypeRef, llsrctype: TypeRef,
+fn int_cast(bcx: block, lldsttype: TypeRef, llsrctype: TypeRef,
             llsrc: ValueRef, signed: bool) -> ValueRef {
     let srcsz = llvm::LLVMGetIntTypeWidth(llsrctype);
     let dstsz = llvm::LLVMGetIntTypeWidth(lldsttype);
@@ -2634,7 +2584,7 @@ fn int_cast(bcx: @block_ctxt, lldsttype: TypeRef, llsrctype: TypeRef,
         } else { ZExtOrBitCast(bcx, llsrc, lldsttype) };
 }
 
-fn float_cast(bcx: @block_ctxt, lldsttype: TypeRef, llsrctype: TypeRef,
+fn float_cast(bcx: block, lldsttype: TypeRef, llsrctype: TypeRef,
               llsrc: ValueRef) -> ValueRef {
     let srcsz = lib::llvm::float_width(llsrctype);
     let dstsz = lib::llvm::float_width(lldsttype);
@@ -2645,8 +2595,8 @@ fn float_cast(bcx: @block_ctxt, lldsttype: TypeRef, llsrctype: TypeRef,
         } else { llsrc };
 }
 
-fn trans_cast(cx: @block_ctxt, e: @ast::expr, id: ast::node_id,
-              dest: dest) -> @block_ctxt {
+fn trans_cast(cx: block, e: @ast::expr, id: ast::node_id,
+              dest: dest) -> block {
     let ccx = bcx_ccx(cx);
     let t_out = node_id_type(cx, id);
     alt ty::get(t_out).struct {
@@ -2718,7 +2668,7 @@ fn trans_cast(cx: @block_ctxt, e: @ast::expr, id: ast::node_id,
     ret store_in_dest(e_res.bcx, newval, dest);
 }
 
-fn trans_arg_expr(cx: @block_ctxt, arg: ty::arg, lldestty: TypeRef,
+fn trans_arg_expr(cx: block, arg: ty::arg, lldestty: TypeRef,
                   e: @ast::expr) -> result {
     let ccx = bcx_ccx(cx);
     let e_ty = expr_ty(cx, e);
@@ -2786,10 +2736,10 @@ fn trans_arg_expr(cx: @block_ctxt, arg: ty::arg, lldestty: TypeRef,
 //  - create_llargs_for_fn_args.
 //  - new_fn_ctxt
 //  - trans_args
-fn trans_args(cx: @block_ctxt, llenv: ValueRef,
+fn trans_args(cx: block, llenv: ValueRef,
               gen: generic_callee, es: [@ast::expr], fn_ty: ty::t,
               dest: dest)
-   -> {bcx: @block_ctxt,
+   -> {bcx: block,
        args: [ValueRef],
        retslot: ValueRef} {
 
@@ -2883,118 +2833,110 @@ fn trans_args(cx: @block_ctxt, llenv: ValueRef,
          retslot: llretslot};
 }
 
-fn trans_call(in_cx: @block_ctxt, f: @ast::expr,
+fn trans_call(in_cx: block, f: @ast::expr,
               args: [@ast::expr], id: ast::node_id, dest: dest)
-    -> @block_ctxt {
+    -> block {
     trans_call_inner(in_cx, expr_ty(in_cx, f),
                      {|cx| trans_callee(cx, f)}, args, id, dest)
 }
 
-fn trans_call_inner(in_cx: @block_ctxt, fn_expr_ty: ty::t,
-                    get_callee: fn(@block_ctxt) -> lval_maybe_callee,
+fn trans_call_inner(in_cx: block, fn_expr_ty: ty::t,
+                    get_callee: fn(block) -> lval_maybe_callee,
                     args: [@ast::expr], id: ast::node_id, dest: dest)
-    -> @block_ctxt {
-    // NB: 'f' isn't necessarily a function; it might be an entire self-call
-    // expression because of the hack that allows us to process self-calls
-    // with trans_call.
-    let cx = new_scope_block_ctxt(in_cx, "call");
-    Br(in_cx, cx.llbb);
-    let f_res = get_callee(cx);
-    let bcx = f_res.bcx, ccx = bcx_ccx(cx);
-
-    let faddr = f_res.val;
-    let llenv, dict_param = none;
-    alt f_res.env {
-      null_env {
-        llenv = llvm::LLVMGetUndef(T_opaque_box_ptr(ccx));
-      }
-      self_env(e, _) {
-        llenv = PointerCast(bcx, e, T_opaque_box_ptr(ccx));
-      }
-      dict_env(dict, e) {
-        llenv = PointerCast(bcx, e, T_opaque_box_ptr(ccx));
-        dict_param = some(dict);
-      }
-      is_closure {
-        // It's a closure. Have to fetch the elements
-        if f_res.kind == owned {
-            faddr = load_if_immediate(bcx, faddr, fn_expr_ty);
-        }
-        let pair = faddr;
-        faddr = GEPi(bcx, pair, [0, abi::fn_field_code]);
-        faddr = Load(bcx, faddr);
-        let llclosure = GEPi(bcx, pair, [0, abi::fn_field_box]);
-        llenv = Load(bcx, llclosure);
-      }
-    }
-
-    let ret_ty = node_id_type(bcx, id);
-    let args_res =
-        trans_args(bcx, llenv, f_res.generic, args, fn_expr_ty, dest);
-    bcx = args_res.bcx;
-    let llargs = args_res.args;
-    option::may(dict_param) {|dict| llargs = [dict] + llargs}
-    let llretslot = args_res.retslot;
-
-    /* If the block is terminated,
-       then one or more of the args has
-       type _|_. Since that means it diverges, the code
-       for the call itself is unreachable. */
-    bcx = invoke_full(bcx, faddr, llargs);
-    alt dest {
-      ignore {
-        if llvm::LLVMIsUndef(llretslot) != lib::llvm::True {
-            bcx = drop_ty(bcx, llretslot, ret_ty);
+    -> block {
+    with_scope(in_cx, "call") {|cx|
+        let f_res = get_callee(cx);
+        let bcx = f_res.bcx, ccx = bcx_ccx(cx);
+
+        let faddr = f_res.val;
+        let llenv, dict_param = none;
+        alt f_res.env {
+          null_env {
+            llenv = llvm::LLVMGetUndef(T_opaque_box_ptr(ccx));
+          }
+          self_env(e, _) {
+            llenv = PointerCast(bcx, e, T_opaque_box_ptr(ccx));
+          }
+          dict_env(dict, e) {
+            llenv = PointerCast(bcx, e, T_opaque_box_ptr(ccx));
+            dict_param = some(dict);
+          }
+          is_closure {
+            // It's a closure. Have to fetch the elements
+            if f_res.kind == owned {
+                faddr = load_if_immediate(bcx, faddr, fn_expr_ty);
+            }
+            let pair = faddr;
+            faddr = GEPi(bcx, pair, [0, abi::fn_field_code]);
+            faddr = Load(bcx, faddr);
+            let llclosure = GEPi(bcx, pair, [0, abi::fn_field_box]);
+            llenv = Load(bcx, llclosure);
+          }
         }
-      }
-      save_in(_) { } // Already saved by callee
-      by_val(cell) {
-        *cell = Load(bcx, llretslot);
-      }
-    }
 
-    let next_cx = new_sub_block_ctxt(in_cx, "next");
-    if bcx.unreachable || ty::type_is_bot(ret_ty) {
-        Unreachable(next_cx);
+        let ret_ty = node_id_type(bcx, id);
+        let args_res =
+            trans_args(bcx, llenv, f_res.generic, args, fn_expr_ty, dest);
+        bcx = args_res.bcx;
+        let llargs = args_res.args;
+        option::may(dict_param) {|dict| llargs = [dict] + llargs}
+        let llretslot = args_res.retslot;
+
+        /* If the block is terminated,
+        then one or more of the args has
+        type _|_. Since that means it diverges, the code
+        for the call itself is unreachable. */
+        bcx = invoke_full(bcx, faddr, llargs);
+        alt dest {
+          ignore {
+            if llvm::LLVMIsUndef(llretslot) != lib::llvm::True {
+                bcx = drop_ty(bcx, llretslot, ret_ty);
+            }
+          }
+          save_in(_) { } // Already saved by callee
+          by_val(cell) {
+            *cell = Load(bcx, llretslot);
+          }
+        }
+        if ty::type_is_bot(ret_ty) { Unreachable(bcx); }
+        bcx
     }
-    cleanup_and_Br(bcx, cx, next_cx.llbb);
-    ret next_cx;
 }
 
-fn invoke(bcx: @block_ctxt, llfn: ValueRef,
-          llargs: [ValueRef]) -> @block_ctxt {
+fn invoke(bcx: block, llfn: ValueRef,
+          llargs: [ValueRef]) -> block {
     ret invoke_(bcx, llfn, llargs, Invoke);
 }
 
-fn invoke_full(bcx: @block_ctxt, llfn: ValueRef, llargs: [ValueRef])
-    -> @block_ctxt {
+fn invoke_full(bcx: block, llfn: ValueRef, llargs: [ValueRef])
+    -> block {
     ret invoke_(bcx, llfn, llargs, Invoke);
 }
 
-fn invoke_(bcx: @block_ctxt, llfn: ValueRef, llargs: [ValueRef],
-           invoker: fn(@block_ctxt, ValueRef, [ValueRef],
-                       BasicBlockRef, BasicBlockRef)) -> @block_ctxt {
+fn invoke_(bcx: block, llfn: ValueRef, llargs: [ValueRef],
+           invoker: fn(block, ValueRef, [ValueRef],
+                       BasicBlockRef, BasicBlockRef)) -> block {
     // FIXME: May be worth turning this into a plain call when there are no
     // cleanups to run
     if bcx.unreachable { ret bcx; }
-    let normal_bcx = new_sub_block_ctxt(bcx, "normal return");
+    let normal_bcx = sub_block(bcx, "normal return");
     invoker(bcx, llfn, llargs, normal_bcx.llbb, get_landing_pad(bcx));
     ret normal_bcx;
 }
 
-fn get_landing_pad(bcx: @block_ctxt) -> BasicBlockRef {
-    fn in_lpad_scope_cx(bcx: @block_ctxt, f: fn(scope_info)) {
+fn get_landing_pad(bcx: block) -> BasicBlockRef {
+    fn in_lpad_scope_cx(bcx: block, f: fn(scope_info)) {
         let bcx = bcx;
         while true {
             alt bcx.kind {
-              scope_block(info) {
+              block_scope(info) {
                 if info.cleanups.len() > 0u || bcx.parent == parent_none {
                     f(info); ret;
                 }
               }
               _ {}
             }
-            bcx = alt check bcx.parent { parent_some(b) { b } };
+            bcx = block_parent(bcx);
         }
     }
 
@@ -3005,7 +2947,7 @@ fn get_landing_pad(bcx: @block_ctxt) -> BasicBlockRef {
           some(target) { cached = some(target); ret; }
           none {}
         }
-        pad_bcx = new_sub_block_ctxt(bcx, "unwind");
+        pad_bcx = sub_block(bcx, "unwind");
         info.landing_pad = some(pad_bcx.llbb);
     }
     alt cached { some(b) { ret b; } none {} } // Can't return from block above
@@ -3043,8 +2985,8 @@ fn get_landing_pad(bcx: @block_ctxt) -> BasicBlockRef {
     ret pad_bcx.llbb;
 }
 
-fn trans_tup(bcx: @block_ctxt, elts: [@ast::expr], id: ast::node_id,
-             dest: dest) -> @block_ctxt {
+fn trans_tup(bcx: block, elts: [@ast::expr], id: ast::node_id,
+             dest: dest) -> block {
     let t = node_id_type(bcx, id);
     let bcx = bcx;
     let addr = alt dest {
@@ -3068,9 +3010,9 @@ fn trans_tup(bcx: @block_ctxt, elts: [@ast::expr], id: ast::node_id,
     ret bcx;
 }
 
-fn trans_rec(bcx: @block_ctxt, fields: [ast::field],
+fn trans_rec(bcx: block, fields: [ast::field],
              base: option<@ast::expr>, id: ast::node_id,
-             dest: dest) -> @block_ctxt {
+             dest: dest) -> block {
     let t = node_id_type(bcx, id);
     let bcx = bcx;
     let addr = alt dest {
@@ -3124,8 +3066,8 @@ fn trans_rec(bcx: @block_ctxt, fields: [ast::field],
 
 // Store the result of an expression in the given memory location, ensuring
 // that nil or bot expressions get ignore rather than save_in as destination.
-fn trans_expr_save_in(bcx: @block_ctxt, e: @ast::expr, dest: ValueRef)
-    -> @block_ctxt {
+fn trans_expr_save_in(bcx: block, e: @ast::expr, dest: ValueRef)
+    -> block {
     let t = expr_ty(bcx, e);
     let do_ignore = ty::type_is_bot(t) || ty::type_is_nil(t);
     ret trans_expr(bcx, e, if do_ignore { ignore } else { save_in(dest) });
@@ -3136,7 +3078,7 @@ fn trans_expr_save_in(bcx: @block_ctxt, e: @ast::expr, dest: ValueRef)
 // field in the returned struct). For non-intermediates, use trans_expr or
 // trans_expr_save_in. For intermediates where you don't care about lval-ness,
 // use trans_temp_expr.
-fn trans_temp_lval(bcx: @block_ctxt, e: @ast::expr) -> lval_result {
+fn trans_temp_lval(bcx: block, e: @ast::expr) -> lval_result {
     let bcx = bcx;
     if expr_is_lval(bcx, e) {
         ret trans_lval(bcx, e);
@@ -3161,7 +3103,7 @@ fn trans_temp_lval(bcx: @block_ctxt, e: @ast::expr) -> lval_result {
 
 // Use only for intermediate values. See trans_expr and trans_expr_save_in for
 // expressions that must 'end up somewhere' (or get ignored).
-fn trans_temp_expr(bcx: @block_ctxt, e: @ast::expr) -> result {
+fn trans_temp_expr(bcx: block, e: @ast::expr) -> result {
     let {bcx, val, kind} = trans_temp_lval(bcx, e);
     if kind == owned {
         val = load_if_immediate(bcx, val, expr_ty(bcx, e));
@@ -3173,7 +3115,7 @@ fn trans_temp_expr(bcx: @block_ctxt, e: @ast::expr) -> result {
 // the result. Invariants:
 // - exprs returning nil or bot always get dest=ignore
 // - exprs with non-immediate type never get dest=by_val
-fn trans_expr(bcx: @block_ctxt, e: @ast::expr, dest: dest) -> @block_ctxt {
+fn trans_expr(bcx: block, e: @ast::expr, dest: dest) -> block {
     let tcx = bcx_tcx(bcx);
     debuginfo::update_source_pos(bcx, e.span);
 
@@ -3191,13 +3133,10 @@ fn trans_expr(bcx: @block_ctxt, e: @ast::expr, dest: dest) -> @block_ctxt {
         ret alt::trans_alt(bcx, expr, arms, dest);
       }
       ast::expr_block(blk) {
-        let sub_cx = new_real_block_ctxt(bcx, "block-expr body", blk.span);
-        Br(bcx, sub_cx.llbb);
-        let sub_bcx = trans_block(sub_cx, blk, dest);
-        let next_cx = new_sub_block_ctxt(bcx, "next");
-        if sub_bcx.unreachable { Unreachable(next_cx); }
-        cleanup_and_Br(sub_bcx, sub_cx, next_cx.llbb);
-        ret next_cx;
+        ret with_scope(bcx, "block-expr body") {|bcx|
+            bcx.block_span = some(blk.span);
+            trans_block(bcx, blk, dest)
+        };
       }
       ast::expr_rec(args, base) {
         ret trans_rec(bcx, args, base, e.id, dest);
@@ -3305,18 +3244,11 @@ fn trans_expr(bcx: @block_ctxt, e: @ast::expr, dest: dest) -> @block_ctxt {
            check the value of that variable, doing nothing
            if it's set to false and acting like a check
            otherwise. */
-        let c =
-            get_extern_const(bcx_ccx(bcx).externs, bcx_ccx(bcx).llmod,
-                             "check_claims", T_bool());
-        let cond = Load(bcx, c);
-
-        let then_cx = new_scope_block_ctxt(bcx, "claim_then");
-        let check_cx = trans_check_expr(then_cx, a, "Claim");
-        let next_cx = new_sub_block_ctxt(bcx, "join");
-
-        CondBr(bcx, cond, then_cx.llbb, next_cx.llbb);
-        Br(check_cx, next_cx.llbb);
-        ret next_cx;
+        let c = get_extern_const(bcx_ccx(bcx).externs, bcx_ccx(bcx).llmod,
+                                 "check_claims", T_bool());
+        ret with_cond(bcx, Load(bcx, c)) {|bcx|
+            trans_check_expr(bcx, a, "Claim")
+        };
       }
       ast::expr_for(decl, seq, body) {
         assert dest == ignore;
@@ -3370,7 +3302,7 @@ fn trans_expr(bcx: @block_ctxt, e: @ast::expr, dest: dest) -> @block_ctxt {
     }
 }
 
-fn lval_to_dps(bcx: @block_ctxt, e: @ast::expr, dest: dest) -> @block_ctxt {
+fn lval_to_dps(bcx: block, e: @ast::expr, dest: dest) -> block {
     let lv = trans_lval(bcx, e), ccx = bcx_ccx(bcx);
     let {bcx, val, kind} = lv;
     let last_use = kind == owned && ccx.last_uses.contains_key(e.id);
@@ -3400,7 +3332,7 @@ fn lval_to_dps(bcx: @block_ctxt, e: @ast::expr, dest: dest) -> @block_ctxt {
     ret bcx;
 }
 
-fn do_spill(cx: @block_ctxt, v: ValueRef, t: ty::t) -> result {
+fn do_spill(cx: block, v: ValueRef, t: ty::t) -> result {
     // We have a value but we have to spill it, and root it, to pass by alias.
     let bcx = cx;
 
@@ -3419,30 +3351,30 @@ fn do_spill(cx: @block_ctxt, v: ValueRef, t: ty::t) -> result {
 
 // Since this function does *not* root, it is the caller's responsibility to
 // ensure that the referent is pointed to by a root.
-fn do_spill_noroot(cx: @block_ctxt, v: ValueRef) -> ValueRef {
+fn do_spill_noroot(cx: block, v: ValueRef) -> ValueRef {
     let llptr = alloca(cx, val_ty(v));
     Store(cx, v, llptr);
     ret llptr;
 }
 
-fn spill_if_immediate(cx: @block_ctxt, v: ValueRef, t: ty::t) -> result {
+fn spill_if_immediate(cx: block, v: ValueRef, t: ty::t) -> result {
     if ty::type_is_immediate(t) { ret do_spill(cx, v, t); }
     ret rslt(cx, v);
 }
 
-fn load_if_immediate(cx: @block_ctxt, v: ValueRef, t: ty::t) -> ValueRef {
+fn load_if_immediate(cx: block, v: ValueRef, t: ty::t) -> ValueRef {
     if ty::type_is_immediate(t) { ret Load(cx, v); }
     ret v;
 }
 
-fn trans_log(lvl: @ast::expr, cx: @block_ctxt, e: @ast::expr) -> @block_ctxt {
-    let ccx = bcx_ccx(cx);
-    if ty::type_is_bot(expr_ty(cx, lvl)) {
-       ret trans_expr(cx, lvl, ignore);
+fn trans_log(lvl: @ast::expr, bcx: block, e: @ast::expr) -> block {
+    let ccx = bcx_ccx(bcx);
+    if ty::type_is_bot(expr_ty(bcx, lvl)) {
+       ret trans_expr(bcx, lvl, ignore);
     }
 
     let modpath = [path_mod(ccx.link_meta.name)] +
-        vec::filter(cx.fcx.path, {|e|
+        vec::filter(bcx.fcx.path, {|e|
             alt e { path_mod(_) { true } _ { false } }
         });
     let modname = path_str(modpath);
@@ -3461,51 +3393,37 @@ fn trans_log(lvl: @ast::expr, cx: @block_ctxt, e: @ast::expr) -> @block_ctxt {
         ccx.module_data.insert(modname, global);
         global
     };
-    let level_cx = new_scope_block_ctxt(cx, "level");
-    let log_cx = new_scope_block_ctxt(cx, "log");
-    let after_cx = new_sub_block_ctxt(cx, "after");
-    let load = Load(cx, global);
-
-    Br(cx, level_cx.llbb);
-    let level_res = trans_temp_expr(level_cx, lvl);
-    let test = ICmp(level_res.bcx, lib::llvm::IntUGE,
-                    load, level_res.val);
-    let level_bcx = trans_block_cleanups(level_res.bcx, level_cx);
-
-    CondBr(level_bcx, test, log_cx.llbb, after_cx.llbb);
-    let sub = trans_temp_expr(log_cx, e);
-    let e_ty = expr_ty(cx, e);
-    let log_bcx = sub.bcx;
-
-    let r = get_tydesc_simple(log_bcx, e_ty, false);
-    log_bcx = r.bcx;
-    let lltydesc = r.val;
-
-    // Call the polymorphic log function.
-    r = spill_if_immediate(log_bcx, sub.val, e_ty);
-    log_bcx = r.bcx;
-    let llvalptr = r.val;
-    let llval_i8 = PointerCast(log_bcx, llvalptr, T_ptr(T_i8()));
-
-    Call(log_bcx, ccx.upcalls.log_type,
-         [lltydesc, llval_i8, level_res.val]);
+    let current_level = Load(bcx, global);
+    let {bcx, val: level} = with_scope_result(bcx, "level") {|bcx|
+        trans_temp_expr(bcx, lvl)
+    };
 
-    cleanup_and_Br(log_bcx, log_cx, after_cx.llbb);
-    ret after_cx;
+    with_cond(bcx, ICmp(bcx, lib::llvm::IntUGE, current_level, level)) {|bcx|
+        with_scope(bcx, "log") {|bcx|
+            let {bcx, val, _} = trans_temp_expr(bcx, e);
+            let e_ty = expr_ty(bcx, e);
+            let {bcx, val: tydesc} = get_tydesc_simple(bcx, e_ty, false);
+            // Call the polymorphic log function.
+            let {bcx, val} = spill_if_immediate(bcx, val, e_ty);
+            let val = PointerCast(bcx, val, T_ptr(T_i8()));
+            Call(bcx, ccx.upcalls.log_type, [tydesc, val, level]);
+            bcx
+        }
+    }
 }
 
-fn trans_check_expr(cx: @block_ctxt, e: @ast::expr, s: str) -> @block_ctxt {
-    let cond_res = trans_temp_expr(cx, e);
+fn trans_check_expr(bcx: block, e: @ast::expr, s: str) -> block {
     let expr_str = s + " " + expr_to_str(e) + " failed";
-    let fail_cx = new_sub_block_ctxt(cx, "fail");
-    trans_fail(fail_cx, some::<span>(e.span), expr_str);
-    let next_cx = new_sub_block_ctxt(cx, "next");
-    CondBr(cond_res.bcx, cond_res.val, next_cx.llbb, fail_cx.llbb);
-    ret next_cx;
+    let {bcx, val} = with_scope_result(bcx, "check") {|bcx|
+        trans_temp_expr(bcx, e)
+    };
+    with_cond(bcx, Not(bcx, val)) {|bcx|
+        trans_fail(bcx, some(e.span), expr_str)
+    }
 }
 
-fn trans_fail_expr(bcx: @block_ctxt, sp_opt: option<span>,
-                   fail_expr: option<@ast::expr>) -> @block_ctxt {
+fn trans_fail_expr(bcx: block, sp_opt: option<span>,
+                   fail_expr: option<@ast::expr>) -> block {
     let bcx = bcx;
     alt fail_expr {
       some(expr) {
@@ -3531,14 +3449,14 @@ fn trans_fail_expr(bcx: @block_ctxt, sp_opt: option<span>,
     }
 }
 
-fn trans_fail(bcx: @block_ctxt, sp_opt: option<span>, fail_str: str) ->
-    @block_ctxt {
+fn trans_fail(bcx: block, sp_opt: option<span>, fail_str: str) ->
+    block {
     let V_fail_str = C_cstr(bcx_ccx(bcx), fail_str);
     ret trans_fail_value(bcx, sp_opt, V_fail_str);
 }
 
-fn trans_fail_value(bcx: @block_ctxt, sp_opt: option<span>,
-                    V_fail_str: ValueRef) -> @block_ctxt {
+fn trans_fail_value(bcx: block, sp_opt: option<span>,
+                    V_fail_str: ValueRef) -> block {
     let ccx = bcx_ccx(bcx);
     let V_filename;
     let V_line;
@@ -3559,13 +3477,13 @@ fn trans_fail_value(bcx: @block_ctxt, sp_opt: option<span>,
     ret bcx;
 }
 
-fn trans_break_cont(bcx: @block_ctxt, to_end: bool)
-    -> @block_ctxt {
+fn trans_break_cont(bcx: block, to_end: bool)
+    -> block {
     // Locate closest loop block, outputting cleanup as we go.
     let unwind = bcx, target = bcx;
     while true {
         alt unwind.kind {
-          scope_block({is_loop: some({cnt, brk}), _}) {
+          block_scope({is_loop: some({cnt, brk}), _}) {
             target = if to_end {
                 brk
             } else {
@@ -3591,15 +3509,15 @@ fn trans_break_cont(bcx: @block_ctxt, to_end: bool)
     ret bcx;
 }
 
-fn trans_break(cx: @block_ctxt) -> @block_ctxt {
+fn trans_break(cx: block) -> block {
     ret trans_break_cont(cx, true);
 }
 
-fn trans_cont(cx: @block_ctxt) -> @block_ctxt {
+fn trans_cont(cx: block) -> block {
     ret trans_break_cont(cx, false);
 }
 
-fn trans_ret(bcx: @block_ctxt, e: option<@ast::expr>) -> @block_ctxt {
+fn trans_ret(bcx: block, e: option<@ast::expr>) -> block {
     let bcx = bcx;
     alt e {
       some(x) { bcx = trans_expr_save_in(bcx, x, bcx.fcx.llretptr); }
@@ -3610,17 +3528,17 @@ fn trans_ret(bcx: @block_ctxt, e: option<@ast::expr>) -> @block_ctxt {
     ret bcx;
 }
 
-fn build_return(bcx: @block_ctxt) { Br(bcx, bcx_fcx(bcx).llreturn); }
+fn build_return(bcx: block) { Br(bcx, bcx_fcx(bcx).llreturn); }
 
-// fn trans_be(cx: &@block_ctxt, e: &@ast::expr) -> result {
-fn trans_be(cx: @block_ctxt, e: @ast::expr) : ast_util::is_call_expr(e) ->
-   @block_ctxt {
+// fn trans_be(cx: &block, e: &@ast::expr) -> result {
+fn trans_be(cx: block, e: @ast::expr) : ast_util::is_call_expr(e) ->
+   block {
     // FIXME: Turn this into a real tail call once
     // calling convention issues are settled
     ret trans_ret(cx, some(e));
 }
 
-fn init_local(bcx: @block_ctxt, local: @ast::local) -> @block_ctxt {
+fn init_local(bcx: block, local: @ast::local) -> block {
     let ty = node_id_type(bcx, local.node.id);
     let llptr = alt bcx.fcx.lllocals.find(local.node.id) {
       some(local_mem(v)) { v }
@@ -3665,8 +3583,8 @@ fn init_local(bcx: @block_ctxt, local: @ast::local) -> @block_ctxt {
     ret alt::bind_irrefutable_pat(bcx, local.node.pat, llptr, false);
 }
 
-fn zero_alloca(cx: @block_ctxt, llptr: ValueRef, t: ty::t)
-    -> @block_ctxt {
+fn zero_alloca(cx: block, llptr: ValueRef, t: ty::t)
+    -> block {
     let bcx = cx;
     let ccx = bcx_ccx(cx);
     if check type_has_static_size(ccx, t) {
@@ -3689,7 +3607,7 @@ fn zero_alloca(cx: @block_ctxt, llptr: ValueRef, t: ty::t)
     ret bcx;
 }
 
-fn trans_stmt(cx: @block_ctxt, s: ast::stmt) -> @block_ctxt {
+fn trans_stmt(cx: block, s: ast::stmt) -> block {
     #debug["trans_expr(%s)", stmt_to_str(s)];
 
     if (!bcx_ccx(cx).sess.opts.no_asm_comments) {
@@ -3724,8 +3642,8 @@ fn trans_stmt(cx: @block_ctxt, s: ast::stmt) -> @block_ctxt {
 
 // You probably don't want to use this one. See the
 // next three functions instead.
-fn new_block_ctxt(cx: @fn_ctxt, parent: block_parent, kind: block_kind,
-                  name: str, block_span: option<span>) -> @block_ctxt {
+fn new_block(cx: @fn_ctxt, parent: block_parent, kind: block_kind,
+             name: str, block_span: option<span>) -> block {
     let s = "";
     if cx.ccx.sess.opts.save_temps || cx.ccx.sess.opts.debuginfo {
         s = cx.ccx.names(name);
@@ -3738,7 +3656,7 @@ fn new_block_ctxt(cx: @fn_ctxt, parent: block_parent, kind: block_kind,
                 mutable unreachable: false,
                 parent: parent,
                 kind: kind,
-                block_span: block_span,
+                mutable block_span: block_span,
                 fcx: cx};
     alt parent {
       parent_some(cx) {
@@ -3749,32 +3667,26 @@ fn new_block_ctxt(cx: @fn_ctxt, parent: block_parent, kind: block_kind,
     ret bcx;
 }
 
-fn simple_scope_block() -> block_kind {
-    scope_block({is_loop: none, mutable cleanups: [],
+fn simple_block_scope() -> block_kind {
+    block_scope({is_loop: none, mutable cleanups: [],
                  mutable cleanup_paths: [], mutable landing_pad: none})
 }
 
 // Use this when you're at the top block of a function or the like.
-fn new_top_block_ctxt(fcx: @fn_ctxt, sp: option<span>) -> @block_ctxt {
-    ret new_block_ctxt(fcx, parent_none, simple_scope_block(),
-                       "function top level", sp);
-}
-
-// Use this when you're at a curly-brace or similar lexical scope.
-fn new_scope_block_ctxt(bcx: @block_ctxt, n: str) -> @block_ctxt {
-    ret new_block_ctxt(bcx.fcx, parent_some(bcx), simple_scope_block(),
-                       n, none);
+fn top_scope_block(fcx: @fn_ctxt, sp: option<span>) -> block {
+    ret new_block(fcx, parent_none, simple_block_scope(),
+                  "function top level", sp);
 }
 
-fn new_real_block_ctxt(bcx: @block_ctxt, n: str, sp: span) -> @block_ctxt {
-    ret new_block_ctxt(bcx.fcx, parent_some(bcx), simple_scope_block(),
-                       n, some(sp));
+fn scope_block(bcx: block, n: str) -> block {
+    ret new_block(bcx.fcx, parent_some(bcx), simple_block_scope(),
+                  n, none);
 }
 
-fn new_loop_scope_block_ctxt(bcx: @block_ctxt, _cont: loop_cont,
-                             _break: @block_ctxt, n: str, sp: span)
-    -> @block_ctxt {
-    ret new_block_ctxt(bcx.fcx, parent_some(bcx), scope_block({
+fn loop_scope_block(bcx: block, _cont: loop_cont,
+                    _break: block, n: str, sp: span)
+    -> block {
+    ret new_block(bcx.fcx, parent_some(bcx), block_scope({
         is_loop: some({cnt: _cont, brk: _break}),
         mutable cleanups: [],
         mutable cleanup_paths: [],
@@ -3784,34 +3696,34 @@ fn new_loop_scope_block_ctxt(bcx: @block_ctxt, _cont: loop_cont,
 
 
 // Use this when you're making a general CFG BB within a scope.
-fn new_sub_block_ctxt(bcx: @block_ctxt, n: str) -> @block_ctxt {
-    ret new_block_ctxt(bcx.fcx, parent_some(bcx), non_scope_block, n, none);
+fn sub_block(bcx: block, n: str) -> block {
+    ret new_block(bcx.fcx, parent_some(bcx), block_non_scope, n, none);
 }
 
-fn new_raw_block_ctxt(fcx: @fn_ctxt, llbb: BasicBlockRef) -> @block_ctxt {
+fn raw_block(fcx: @fn_ctxt, llbb: BasicBlockRef) -> block {
     ret @{llbb: llbb,
           mutable terminated: false,
           mutable unreachable: false,
           parent: parent_none,
-          kind: non_scope_block,
-          block_span: none,
+          kind: block_non_scope,
+          mutable block_span: none,
           fcx: fcx};
 }
 
 
 // trans_block_cleanups: Go through all the cleanups attached to this
-// block_ctxt and execute them.
+// block and execute them.
 //
 // When translating a block that introdces new variables during its scope, we
 // need to make sure those variables go out of scope when the block ends.  We
 // do that by running a 'cleanup' function for each variable.
 // trans_block_cleanups runs all the cleanup functions for the block.
-fn trans_block_cleanups(bcx: @block_ctxt, cleanup_cx: @block_ctxt) ->
-   @block_ctxt {
+fn trans_block_cleanups(bcx: block, cleanup_cx: block) ->
+   block {
     if bcx.unreachable { ret bcx; }
     let bcx = bcx;
     alt check cleanup_cx.kind {
-      scope_block({cleanups, _}) {
+      block_scope({cleanups, _}) {
         vec::riter(cleanups) {|cu|
             alt cu { clean(cfn) | clean_temp(_, cfn) { bcx = cfn(bcx); } }
         }
@@ -3823,19 +3735,19 @@ fn trans_block_cleanups(bcx: @block_ctxt, cleanup_cx: @block_ctxt) ->
 // In the last argument, some(block) mean jump to this block, and none means
 // this is a landing pad and leaving should be accomplished with a resume
 // instruction.
-fn cleanup_and_leave(bcx: @block_ctxt, upto: option<BasicBlockRef>,
+fn cleanup_and_leave(bcx: block, upto: option<BasicBlockRef>,
                      leave: option<BasicBlockRef>) {
     let cur = bcx, bcx = bcx;
     while true {
         alt cur.kind {
-          scope_block(info) if info.cleanups.len() > 0u {
+          block_scope(info) if info.cleanups.len() > 0u {
             for exists in info.cleanup_paths {
                 if exists.target == leave {
                     Br(bcx, exists.dest);
                     ret;
                 }
             }
-            let sub_cx = new_sub_block_ctxt(bcx, "cleanup");
+            let sub_cx = sub_block(bcx, "cleanup");
             Br(bcx, sub_cx.llbb);
             info.cleanup_paths += [{target: leave, dest: sub_cx.llbb}];
             bcx = trans_block_cleanups(sub_cx, cur);
@@ -3857,18 +3769,43 @@ fn cleanup_and_leave(bcx: @block_ctxt, upto: option<BasicBlockRef>,
     }
 }
 
-fn cleanup_and_Br(bcx: @block_ctxt, upto: @block_ctxt,
+fn cleanup_and_Br(bcx: block, upto: block,
                   target: BasicBlockRef) {
     cleanup_and_leave(bcx, some(upto.llbb), some(target));
 }
 
-fn trans_fn_cleanups(fcx: @fn_ctxt, cx: @block_ctxt) {
-    alt fcx.llobstacktoken {
-      some(lltoken_) {
-        let lltoken = lltoken_; // satisfy alias checker
+fn leave_block(bcx: block, out_of: block) -> block {
+    let next_cx = sub_block(block_parent(out_of), "next");
+    if bcx.unreachable { Unreachable(next_cx); }
+    cleanup_and_Br(bcx, out_of, next_cx.llbb);
+    next_cx
+}
+
+fn with_scope(bcx: block, name: str, f: fn(block) -> block) -> block {
+    let scope_cx = scope_block(bcx, name);
+    Br(bcx, scope_cx.llbb);
+    leave_block(f(scope_cx), scope_cx)
+}
+
+fn with_scope_result(bcx: block, name: str, f: fn(block) -> result)
+    -> result {
+    let scope_cx = scope_block(bcx, name);
+    Br(bcx, scope_cx.llbb);
+    let {bcx, val} = f(scope_cx);
+    {bcx: leave_block(bcx, scope_cx), val: val}
+}
+
+fn with_cond(bcx: block, val: ValueRef, f: fn(block) -> block) -> block {
+    let next_cx = sub_block(bcx, "next"), cond_cx = sub_block(bcx, "cond");
+    CondBr(bcx, val, cond_cx.llbb, next_cx.llbb);
+    let after_cx = f(cond_cx);
+    if !after_cx.terminated { Br(after_cx, next_cx.llbb); }
+    next_cx
+}
+
+fn trans_fn_cleanups(fcx: @fn_ctxt, cx: block) {
+    option::may(fcx.llobstacktoken) {|lltoken|
         Call(cx, fcx_ccx(fcx).upcalls.dynastack_free, [lltoken]);
-      }
-      none {/* nothing to do */ }
     }
 }
 
@@ -3888,17 +3825,17 @@ fn block_locals(b: ast::blk, it: fn(@ast::local)) {
     }
 }
 
-fn alloc_ty(cx: @block_ctxt, t: ty::t) -> result {
+fn alloc_ty(cx: block, t: ty::t) -> result {
     let bcx = cx, ccx = bcx_ccx(cx);
     let llty = type_of(ccx, t);
     let val = if type_has_static_size(ccx, t) {
         alloca(bcx, llty)
     } else {
         // NB: we have to run this particular 'size_of' in a
-        // block_ctxt built on the llderivedtydescs block for the fn,
+        // block built on the llderivedtydescs block for the fn,
         // so that the size dominates the array_alloca that
         // comes next.
-        let n = size_of(new_raw_block_ctxt(cx.fcx, cx.fcx.llderivedtydescs),
+        let n = size_of(raw_block(cx.fcx, cx.fcx.llderivedtydescs),
                         t);
         bcx.fcx.llderivedtydescs = n.bcx.llbb;
         PointerCast(bcx, dynastack_alloca(bcx, T_i8(), n.val, t), T_ptr(llty))
@@ -3914,7 +3851,7 @@ fn alloc_ty(cx: @block_ctxt, t: ty::t) -> result {
     ret rslt(cx, val);
 }
 
-fn alloc_local(cx: @block_ctxt, local: @ast::local) -> @block_ctxt {
+fn alloc_local(cx: block, local: @ast::local) -> block {
     let t = node_id_type(cx, local.node.id);
     let p = normalize_pat(bcx_tcx(cx), local.node.pat);
     let is_simple = alt p.node {
@@ -3945,8 +3882,8 @@ fn alloc_local(cx: @block_ctxt, local: @ast::local) -> @block_ctxt {
     ret r.bcx;
 }
 
-fn trans_block(bcx: @block_ctxt, b: ast::blk, dest: dest)
-    -> @block_ctxt {
+fn trans_block(bcx: block, b: ast::blk, dest: dest)
+    -> block {
     let bcx = bcx;
     block_locals(b) {|local| bcx = alloc_local(bcx, local); };
     for s: @ast::stmt in b.node.stmts {
@@ -4084,8 +4021,8 @@ fn create_llargs_for_fn_args(cx: @fn_ctxt, ty_self: self_arg,
     }
 }
 
-fn copy_args_to_allocas(fcx: @fn_ctxt, bcx: @block_ctxt, args: [ast::arg],
-                        arg_tys: [ty::arg]) -> @block_ctxt {
+fn copy_args_to_allocas(fcx: @fn_ctxt, bcx: block, args: [ast::arg],
+                        arg_tys: [ty::arg]) -> block {
     let tcx = bcx_tcx(bcx);
     let arg_n: uint = 0u, bcx = bcx;
     let epic_fail = fn@() -> ! {
@@ -4123,16 +4060,16 @@ fn copy_args_to_allocas(fcx: @fn_ctxt, bcx: @block_ctxt, args: [ast::arg],
 // lldynamicallocas -> lltop edges, and builds the return block.
 fn finish_fn(fcx: @fn_ctxt, lltop: BasicBlockRef) {
     tie_up_header_blocks(fcx, lltop);
-    let ret_cx = new_raw_block_ctxt(fcx, fcx.llreturn);
+    let ret_cx = raw_block(fcx, fcx.llreturn);
     trans_fn_cleanups(fcx, ret_cx);
     RetVoid(ret_cx);
 }
 
 fn tie_up_header_blocks(fcx: @fn_ctxt, lltop: BasicBlockRef) {
-    Br(new_raw_block_ctxt(fcx, fcx.llstaticallocas), fcx.llloadenv);
-    Br(new_raw_block_ctxt(fcx, fcx.llloadenv), fcx.llderivedtydescs_first);
-    Br(new_raw_block_ctxt(fcx, fcx.llderivedtydescs), fcx.lldynamicallocas);
-    Br(new_raw_block_ctxt(fcx, fcx.lldynamicallocas), lltop);
+    Br(raw_block(fcx, fcx.llstaticallocas), fcx.llloadenv);
+    Br(raw_block(fcx, fcx.llloadenv), fcx.llderivedtydescs_first);
+    Br(raw_block(fcx, fcx.llderivedtydescs), fcx.lldynamicallocas);
+    Br(raw_block(fcx, fcx.lldynamicallocas), lltop);
 }
 
 enum self_arg { impl_self(ty::t), no_self, }
@@ -4154,7 +4091,7 @@ fn trans_closure(ccx: @crate_ctxt, path: path, decl: ast::fn_decl,
 
     // Create the first basic block in the function and keep a handle on it to
     //  pass to finish_fn later.
-    let bcx_top = new_top_block_ctxt(fcx, some(body.span)), bcx = bcx_top;
+    let bcx_top = top_scope_block(fcx, some(body.span)), bcx = bcx_top;
     let lltop = bcx.llbb;
     let block_ty = node_id_type(bcx, body.node.id);
 
@@ -4208,7 +4145,7 @@ fn trans_res_ctor(ccx: @crate_ctxt, path: path, dtor: ast::fn_decl,
     let fcx = new_fn_ctxt_w_id(ccx, path, llfndecl, ctor_id,
                                param_substs, none);
     create_llargs_for_fn_args(fcx, no_self, dtor.inputs, ty_params);
-    let bcx = new_top_block_ctxt(fcx, none), lltop = bcx.llbb;
+    let bcx = top_scope_block(fcx, none), lltop = bcx.llbb;
     let fty = node_id_type(bcx, ctor_id);
     let arg_t = ty::ty_fn_args(fty)[0].ty;
     let tup_t = ty::mk_tup(ccx.tcx, [ty::mk_int(ccx.tcx), arg_t]);
@@ -4261,7 +4198,7 @@ fn trans_enum_variant(ccx: @crate_ctxt, enum_id: ast::node_id,
         })
       }
     };
-    let bcx = new_top_block_ctxt(fcx, none), lltop = bcx.llbb;
+    let bcx = top_scope_block(fcx, none), lltop = bcx.llbb;
     let arg_tys = ty::ty_fn_args(node_id_type(bcx, variant.node.id));
     bcx = copy_args_to_allocas(fcx, bcx, fn_args, arg_tys);
 
@@ -4538,7 +4475,7 @@ fn create_main_wrapper(ccx: @crate_ctxt, sp: span, main_llfn: ValueRef,
 
         let fcx = new_fn_ctxt(ccx, [], llfdecl, none);
 
-        let bcx = new_top_block_ctxt(fcx, none);
+        let bcx = top_scope_block(fcx, none);
         let lltop = bcx.llbb;
 
         let lloutputarg = llvm::LLVMGetParam(llfdecl, 0 as c_uint);
@@ -4588,14 +4525,14 @@ fn create_main_wrapper(ccx: @crate_ctxt, sp: span, main_llfn: ValueRef,
 // Create a /real/ closure: this is like create_fn_pair, but creates a
 // a fn value on the stack with a specified environment (which need not be
 // on the stack).
-fn create_real_fn_pair(cx: @block_ctxt, llfnty: TypeRef, llfn: ValueRef,
+fn create_real_fn_pair(cx: block, llfnty: TypeRef, llfn: ValueRef,
                        llenvptr: ValueRef) -> ValueRef {
     let pair = alloca(cx, T_fn_pair(bcx_ccx(cx), llfnty));
     fill_fn_pair(cx, pair, llfn, llenvptr);
     ret pair;
 }
 
-fn fill_fn_pair(bcx: @block_ctxt, pair: ValueRef, llfn: ValueRef,
+fn fill_fn_pair(bcx: block, pair: ValueRef, llfn: ValueRef,
                 llenvptr: ValueRef) {
     let ccx = bcx_ccx(bcx);
     let code_cell = GEPi(bcx, pair, [0, abi::fn_field_code]);
@@ -4778,7 +4715,7 @@ fn trans_constants(ccx: @crate_ctxt, crate: @ast::crate) {
     }));
 }
 
-fn vp2i(cx: @block_ctxt, v: ValueRef) -> ValueRef {
+fn vp2i(cx: block, v: ValueRef) -> ValueRef {
     let ccx = bcx_ccx(cx);
     ret PtrToInt(cx, v, ccx.int_type);
 }
@@ -4839,7 +4776,7 @@ fn declare_dbg_intrinsics(llmod: ModuleRef,
     intrinsics.insert("llvm.dbg.value", value);
 }
 
-fn trap(bcx: @block_ctxt) {
+fn trap(bcx: block) {
     let v: [ValueRef] = [];
     alt bcx_ccx(bcx).intrinsics.find("llvm.trap") {
       some(x) { Call(bcx, x, v); }
diff --git a/src/comp/middle/trans/build.rs b/src/comp/middle/trans/build.rs
index a65636d0e62..95738143150 100644
--- a/src/comp/middle/trans/build.rs
+++ b/src/comp/middle/trans/build.rs
@@ -6,10 +6,10 @@ import codemap::span;
 import lib::llvm::{ValueRef, TypeRef, BasicBlockRef, BuilderRef, ModuleRef};
 import lib::llvm::{Opcode, IntPredicate, RealPredicate, True, False,
                    CallConv};
-import common::{block_ctxt, T_ptr, T_nil, T_i8, T_i1, T_void,
+import common::{block, T_ptr, T_nil, T_i8, T_i1, T_void,
                 T_fn, val_ty, bcx_ccx, C_i32, val_str};
 
-fn B(cx: @block_ctxt) -> BuilderRef {
+fn B(cx: block) -> BuilderRef {
     let b = *cx.fcx.ccx.builder;
     llvm::LLVMPositionBuilderAtEnd(b, cx.llbb);
     ret b;
@@ -23,21 +23,21 @@ fn B(cx: @block_ctxt) -> BuilderRef {
 // for (fail/break/ret statements, call to diverging functions, etc), and
 // further instructions to the block should simply be ignored.
 
-fn RetVoid(cx: @block_ctxt) {
+fn RetVoid(cx: block) {
     if cx.unreachable { ret; }
     assert (!cx.terminated);
     cx.terminated = true;
     llvm::LLVMBuildRetVoid(B(cx));
 }
 
-fn Ret(cx: @block_ctxt, V: ValueRef) {
+fn Ret(cx: block, V: ValueRef) {
     if cx.unreachable { ret; }
     assert (!cx.terminated);
     cx.terminated = true;
     llvm::LLVMBuildRet(B(cx), V);
 }
 
-fn AggregateRet(cx: @block_ctxt, RetVals: [ValueRef]) {
+fn AggregateRet(cx: block, RetVals: [ValueRef]) {
     if cx.unreachable { ret; }
     assert (!cx.terminated);
     cx.terminated = true;
@@ -47,14 +47,14 @@ fn AggregateRet(cx: @block_ctxt, RetVals: [ValueRef]) {
     }
 }
 
-fn Br(cx: @block_ctxt, Dest: BasicBlockRef) {
+fn Br(cx: block, Dest: BasicBlockRef) {
     if cx.unreachable { ret; }
     assert (!cx.terminated);
     cx.terminated = true;
     llvm::LLVMBuildBr(B(cx), Dest);
 }
 
-fn CondBr(cx: @block_ctxt, If: ValueRef, Then: BasicBlockRef,
+fn CondBr(cx: block, If: ValueRef, Then: BasicBlockRef,
           Else: BasicBlockRef) {
     if cx.unreachable { ret; }
     assert (!cx.terminated);
@@ -62,7 +62,7 @@ fn CondBr(cx: @block_ctxt, If: ValueRef, Then: BasicBlockRef,
     llvm::LLVMBuildCondBr(B(cx), If, Then, Else);
 }
 
-fn Switch(cx: @block_ctxt, V: ValueRef, Else: BasicBlockRef, NumCases: uint)
+fn Switch(cx: block, V: ValueRef, Else: BasicBlockRef, NumCases: uint)
     -> ValueRef {
     if cx.unreachable { ret _Undef(V); }
     assert !cx.terminated;
@@ -75,7 +75,7 @@ fn AddCase(S: ValueRef, OnVal: ValueRef, Dest: BasicBlockRef) {
     llvm::LLVMAddCase(S, OnVal, Dest);
 }
 
-fn IndirectBr(cx: @block_ctxt, Addr: ValueRef, NumDests: uint) {
+fn IndirectBr(cx: block, Addr: ValueRef, NumDests: uint) {
     if cx.unreachable { ret; }
     assert (!cx.terminated);
     cx.terminated = true;
@@ -89,7 +89,7 @@ fn noname() -> sbuf unsafe {
     ret unsafe::reinterpret_cast(ptr::addr_of(cnull));
 }
 
-fn Invoke(cx: @block_ctxt, Fn: ValueRef, Args: [ValueRef],
+fn Invoke(cx: block, Fn: ValueRef, Args: [ValueRef],
           Then: BasicBlockRef, Catch: BasicBlockRef) {
     if cx.unreachable { ret; }
     assert (!cx.terminated);
@@ -105,7 +105,7 @@ fn Invoke(cx: @block_ctxt, Fn: ValueRef, Args: [ValueRef],
     }
 }
 
-fn FastInvoke(cx: @block_ctxt, Fn: ValueRef, Args: [ValueRef],
+fn FastInvoke(cx: block, Fn: ValueRef, Args: [ValueRef],
               Then: BasicBlockRef, Catch: BasicBlockRef) {
     if cx.unreachable { ret; }
     assert (!cx.terminated);
@@ -118,7 +118,7 @@ fn FastInvoke(cx: @block_ctxt, Fn: ValueRef, Args: [ValueRef],
     }
 }
 
-fn Unreachable(cx: @block_ctxt) {
+fn Unreachable(cx: block) {
     if cx.unreachable { ret; }
     cx.unreachable = true;
     if !cx.terminated { llvm::LLVMBuildUnreachable(B(cx)); }
@@ -129,188 +129,188 @@ fn _Undef(val: ValueRef) -> ValueRef {
 }
 
 /* Arithmetic */
-fn Add(cx: @block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
+fn Add(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(LHS); }
     ret llvm::LLVMBuildAdd(B(cx), LHS, RHS, noname());
 }
 
-fn NSWAdd(cx: @block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
+fn NSWAdd(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(LHS); }
     ret llvm::LLVMBuildNSWAdd(B(cx), LHS, RHS, noname());
 }
 
-fn NUWAdd(cx: @block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
+fn NUWAdd(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(LHS); }
     ret llvm::LLVMBuildNUWAdd(B(cx), LHS, RHS, noname());
 }
 
-fn FAdd(cx: @block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
+fn FAdd(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(LHS); }
     ret llvm::LLVMBuildFAdd(B(cx), LHS, RHS, noname());
 }
 
-fn Sub(cx: @block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
+fn Sub(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(LHS); }
     ret llvm::LLVMBuildSub(B(cx), LHS, RHS, noname());
 }
 
-fn NSWSub(cx: @block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
+fn NSWSub(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(LHS); }
     ret llvm::LLVMBuildNSWSub(B(cx), LHS, RHS, noname());
 }
 
-fn NUWSub(cx: @block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
+fn NUWSub(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(LHS); }
     ret llvm::LLVMBuildNUWSub(B(cx), LHS, RHS, noname());
 }
 
-fn FSub(cx: @block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
+fn FSub(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(LHS); }
     ret llvm::LLVMBuildFSub(B(cx), LHS, RHS, noname());
 }
 
-fn Mul(cx: @block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
+fn Mul(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(LHS); }
     ret llvm::LLVMBuildMul(B(cx), LHS, RHS, noname());
 }
 
-fn NSWMul(cx: @block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
+fn NSWMul(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(LHS); }
     ret llvm::LLVMBuildNSWMul(B(cx), LHS, RHS, noname());
 }
 
-fn NUWMul(cx: @block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
+fn NUWMul(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(LHS); }
     ret llvm::LLVMBuildNUWMul(B(cx), LHS, RHS, noname());
 }
 
-fn FMul(cx: @block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
+fn FMul(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(LHS); }
     ret llvm::LLVMBuildFMul(B(cx), LHS, RHS, noname());
 }
 
-fn UDiv(cx: @block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
+fn UDiv(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(LHS); }
     ret llvm::LLVMBuildUDiv(B(cx), LHS, RHS, noname());
 }
 
-fn SDiv(cx: @block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
+fn SDiv(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(LHS); }
     ret llvm::LLVMBuildSDiv(B(cx), LHS, RHS, noname());
 }
 
-fn ExactSDiv(cx: @block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
+fn ExactSDiv(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(LHS); }
     ret llvm::LLVMBuildExactSDiv(B(cx), LHS, RHS, noname());
 }
 
-fn FDiv(cx: @block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
+fn FDiv(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(LHS); }
     ret llvm::LLVMBuildFDiv(B(cx), LHS, RHS, noname());
 }
 
-fn URem(cx: @block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
+fn URem(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(LHS); }
     ret llvm::LLVMBuildURem(B(cx), LHS, RHS, noname());
 }
 
-fn SRem(cx: @block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
+fn SRem(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(LHS); }
     ret llvm::LLVMBuildSRem(B(cx), LHS, RHS, noname());
 }
 
-fn FRem(cx: @block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
+fn FRem(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(LHS); }
     ret llvm::LLVMBuildFRem(B(cx), LHS, RHS, noname());
 }
 
-fn Shl(cx: @block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
+fn Shl(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(LHS); }
     ret llvm::LLVMBuildShl(B(cx), LHS, RHS, noname());
 }
 
-fn LShr(cx: @block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
+fn LShr(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(LHS); }
     ret llvm::LLVMBuildLShr(B(cx), LHS, RHS, noname());
 }
 
-fn AShr(cx: @block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
+fn AShr(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(LHS); }
     ret llvm::LLVMBuildAShr(B(cx), LHS, RHS, noname());
 }
 
-fn And(cx: @block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
+fn And(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(LHS); }
     ret llvm::LLVMBuildAnd(B(cx), LHS, RHS, noname());
 }
 
-fn Or(cx: @block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
+fn Or(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(LHS); }
     ret llvm::LLVMBuildOr(B(cx), LHS, RHS, noname());
 }
 
-fn Xor(cx: @block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
+fn Xor(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(LHS); }
     ret llvm::LLVMBuildXor(B(cx), LHS, RHS, noname());
 }
 
-fn BinOp(cx: @block_ctxt, Op: Opcode, LHS: ValueRef, RHS: ValueRef) ->
+fn BinOp(cx: block, Op: Opcode, LHS: ValueRef, RHS: ValueRef) ->
    ValueRef {
     if cx.unreachable { ret _Undef(LHS); }
     ret llvm::LLVMBuildBinOp(B(cx), Op, LHS, RHS, noname());
 }
 
-fn Neg(cx: @block_ctxt, V: ValueRef) -> ValueRef {
+fn Neg(cx: block, V: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(V); }
     ret llvm::LLVMBuildNeg(B(cx), V, noname());
 }
 
-fn NSWNeg(cx: @block_ctxt, V: ValueRef) -> ValueRef {
+fn NSWNeg(cx: block, V: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(V); }
     ret llvm::LLVMBuildNSWNeg(B(cx), V, noname());
 }
 
-fn NUWNeg(cx: @block_ctxt, V: ValueRef) -> ValueRef {
+fn NUWNeg(cx: block, V: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(V); }
     ret llvm::LLVMBuildNUWNeg(B(cx), V, noname());
 }
-fn FNeg(cx: @block_ctxt, V: ValueRef) -> ValueRef {
+fn FNeg(cx: block, V: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(V); }
     ret llvm::LLVMBuildFNeg(B(cx), V, noname());
 }
 
-fn Not(cx: @block_ctxt, V: ValueRef) -> ValueRef {
+fn Not(cx: block, V: ValueRef) -> ValueRef {
     if cx.unreachable { ret _Undef(V); }
     ret llvm::LLVMBuildNot(B(cx), V, noname());
 }
 
 /* Memory */
-fn Malloc(cx: @block_ctxt, Ty: TypeRef) -> ValueRef {
+fn Malloc(cx: block, Ty: TypeRef) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(T_i8())); }
     ret llvm::LLVMBuildMalloc(B(cx), Ty, noname());
 }
 
-fn ArrayMalloc(cx: @block_ctxt, Ty: TypeRef, Val: ValueRef) -> ValueRef {
+fn ArrayMalloc(cx: block, Ty: TypeRef, Val: ValueRef) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(T_i8())); }
     ret llvm::LLVMBuildArrayMalloc(B(cx), Ty, Val, noname());
 }
 
-fn Alloca(cx: @block_ctxt, Ty: TypeRef) -> ValueRef {
+fn Alloca(cx: block, Ty: TypeRef) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(Ty)); }
     ret llvm::LLVMBuildAlloca(B(cx), Ty, noname());
 }
 
-fn ArrayAlloca(cx: @block_ctxt, Ty: TypeRef, Val: ValueRef) -> ValueRef {
+fn ArrayAlloca(cx: block, Ty: TypeRef, Val: ValueRef) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(Ty)); }
     ret llvm::LLVMBuildArrayAlloca(B(cx), Ty, Val, noname());
 }
 
-fn Free(cx: @block_ctxt, PointerVal: ValueRef) {
+fn Free(cx: block, PointerVal: ValueRef) {
     if cx.unreachable { ret; }
     llvm::LLVMBuildFree(B(cx), PointerVal);
 }
 
-fn Load(cx: @block_ctxt, PointerVal: ValueRef) -> ValueRef {
+fn Load(cx: block, PointerVal: ValueRef) -> ValueRef {
     let ccx = cx.fcx.ccx;
     if cx.unreachable {
         let ty = val_ty(PointerVal);
@@ -321,12 +321,12 @@ fn Load(cx: @block_ctxt, PointerVal: ValueRef) -> ValueRef {
     ret llvm::LLVMBuildLoad(B(cx), PointerVal, noname());
 }
 
-fn Store(cx: @block_ctxt, Val: ValueRef, Ptr: ValueRef) {
+fn Store(cx: block, Val: ValueRef, Ptr: ValueRef) {
     if cx.unreachable { ret; }
     llvm::LLVMBuildStore(B(cx), Val, Ptr);
 }
 
-fn GEP(cx: @block_ctxt, Pointer: ValueRef, Indices: [ValueRef]) -> ValueRef {
+fn GEP(cx: block, Pointer: ValueRef, Indices: [ValueRef]) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(T_nil())); }
     unsafe {
         ret llvm::LLVMBuildGEP(B(cx), Pointer, vec::to_ptr(Indices),
@@ -336,13 +336,13 @@ fn GEP(cx: @block_ctxt, Pointer: ValueRef, Indices: [ValueRef]) -> ValueRef {
 
 // Simple wrapper around GEP that takes an array of ints and wraps them
 // in C_i32()
-fn GEPi(cx: @block_ctxt, base: ValueRef, ixs: [int]) -> ValueRef {
+fn GEPi(cx: block, base: ValueRef, ixs: [int]) -> ValueRef {
     let v: [ValueRef] = [];
     for i: int in ixs { v += [C_i32(i as i32)]; }
     ret InBoundsGEP(cx, base, v);
 }
 
-fn InBoundsGEP(cx: @block_ctxt, Pointer: ValueRef, Indices: [ValueRef]) ->
+fn InBoundsGEP(cx: block, Pointer: ValueRef, Indices: [ValueRef]) ->
    ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(T_nil())); }
     unsafe {
@@ -353,142 +353,142 @@ fn InBoundsGEP(cx: @block_ctxt, Pointer: ValueRef, Indices: [ValueRef]) ->
     }
 }
 
-fn StructGEP(cx: @block_ctxt, Pointer: ValueRef, Idx: uint) -> ValueRef {
+fn StructGEP(cx: block, Pointer: ValueRef, Idx: uint) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(T_nil())); }
     ret llvm::LLVMBuildStructGEP(B(cx), Pointer, Idx as c_uint, noname());
 }
 
-fn GlobalString(cx: @block_ctxt, _Str: sbuf) -> ValueRef {
+fn GlobalString(cx: block, _Str: sbuf) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(T_i8())); }
     ret llvm::LLVMBuildGlobalString(B(cx), _Str, noname());
 }
 
-fn GlobalStringPtr(cx: @block_ctxt, _Str: sbuf) -> ValueRef {
+fn GlobalStringPtr(cx: block, _Str: sbuf) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(T_ptr(T_i8())); }
     ret llvm::LLVMBuildGlobalStringPtr(B(cx), _Str, noname());
 }
 
 /* Casts */
-fn Trunc(cx: @block_ctxt, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
+fn Trunc(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
     ret llvm::LLVMBuildTrunc(B(cx), Val, DestTy, noname());
 }
 
-fn ZExt(cx: @block_ctxt, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
+fn ZExt(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
     ret llvm::LLVMBuildZExt(B(cx), Val, DestTy, noname());
 }
 
-fn SExt(cx: @block_ctxt, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
+fn SExt(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
     ret llvm::LLVMBuildSExt(B(cx), Val, DestTy, noname());
 }
 
-fn FPToUI(cx: @block_ctxt, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
+fn FPToUI(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
     ret llvm::LLVMBuildFPToUI(B(cx), Val, DestTy, noname());
 }
 
-fn FPToSI(cx: @block_ctxt, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
+fn FPToSI(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
     ret llvm::LLVMBuildFPToSI(B(cx), Val, DestTy, noname());
 }
 
-fn UIToFP(cx: @block_ctxt, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
+fn UIToFP(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
     ret llvm::LLVMBuildUIToFP(B(cx), Val, DestTy, noname());
 }
 
-fn SIToFP(cx: @block_ctxt, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
+fn SIToFP(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
     ret llvm::LLVMBuildSIToFP(B(cx), Val, DestTy, noname());
 }
 
-fn FPTrunc(cx: @block_ctxt, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
+fn FPTrunc(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
     ret llvm::LLVMBuildFPTrunc(B(cx), Val, DestTy, noname());
 }
 
-fn FPExt(cx: @block_ctxt, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
+fn FPExt(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
     ret llvm::LLVMBuildFPExt(B(cx), Val, DestTy, noname());
 }
 
-fn PtrToInt(cx: @block_ctxt, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
+fn PtrToInt(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
     ret llvm::LLVMBuildPtrToInt(B(cx), Val, DestTy, noname());
 }
 
-fn IntToPtr(cx: @block_ctxt, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
+fn IntToPtr(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
     ret llvm::LLVMBuildIntToPtr(B(cx), Val, DestTy, noname());
 }
 
-fn BitCast(cx: @block_ctxt, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
+fn BitCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
     ret llvm::LLVMBuildBitCast(B(cx), Val, DestTy, noname());
 }
 
-fn ZExtOrBitCast(cx: @block_ctxt, Val: ValueRef, DestTy: TypeRef) ->
+fn ZExtOrBitCast(cx: block, Val: ValueRef, DestTy: TypeRef) ->
    ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
     ret llvm::LLVMBuildZExtOrBitCast(B(cx), Val, DestTy, noname());
 }
 
-fn SExtOrBitCast(cx: @block_ctxt, Val: ValueRef, DestTy: TypeRef) ->
+fn SExtOrBitCast(cx: block, Val: ValueRef, DestTy: TypeRef) ->
    ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
     ret llvm::LLVMBuildSExtOrBitCast(B(cx), Val, DestTy, noname());
 }
 
-fn TruncOrBitCast(cx: @block_ctxt, Val: ValueRef, DestTy: TypeRef) ->
+fn TruncOrBitCast(cx: block, Val: ValueRef, DestTy: TypeRef) ->
    ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
     ret llvm::LLVMBuildTruncOrBitCast(B(cx), Val, DestTy, noname());
 }
 
-fn Cast(cx: @block_ctxt, Op: Opcode, Val: ValueRef, DestTy: TypeRef,
+fn Cast(cx: block, Op: Opcode, Val: ValueRef, DestTy: TypeRef,
         _Name: sbuf) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
     ret llvm::LLVMBuildCast(B(cx), Op, Val, DestTy, noname());
 }
 
-fn PointerCast(cx: @block_ctxt, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
+fn PointerCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
     ret llvm::LLVMBuildPointerCast(B(cx), Val, DestTy, noname());
 }
 
-fn IntCast(cx: @block_ctxt, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
+fn IntCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
     ret llvm::LLVMBuildIntCast(B(cx), Val, DestTy, noname());
 }
 
-fn FPCast(cx: @block_ctxt, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
+fn FPCast(cx: block, Val: ValueRef, DestTy: TypeRef) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(DestTy); }
     ret llvm::LLVMBuildFPCast(B(cx), Val, DestTy, noname());
 }
 
 
 /* Comparisons */
-fn ICmp(cx: @block_ctxt, Op: IntPredicate, LHS: ValueRef, RHS: ValueRef)
+fn ICmp(cx: block, Op: IntPredicate, LHS: ValueRef, RHS: ValueRef)
     -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(T_i1()); }
     ret llvm::LLVMBuildICmp(B(cx), Op as c_uint, LHS, RHS, noname());
 }
 
-fn FCmp(cx: @block_ctxt, Op: RealPredicate, LHS: ValueRef, RHS: ValueRef)
+fn FCmp(cx: block, Op: RealPredicate, LHS: ValueRef, RHS: ValueRef)
     -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(T_i1()); }
     ret llvm::LLVMBuildFCmp(B(cx), Op as c_uint, LHS, RHS, noname());
 }
 
 /* Miscellaneous instructions */
-fn EmptyPhi(cx: @block_ctxt, Ty: TypeRef) -> ValueRef {
+fn EmptyPhi(cx: block, Ty: TypeRef) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(Ty); }
     ret llvm::LLVMBuildPhi(B(cx), Ty, noname());
 }
 
-fn Phi(cx: @block_ctxt, Ty: TypeRef, vals: [ValueRef], bbs: [BasicBlockRef])
+fn Phi(cx: block, Ty: TypeRef, vals: [ValueRef], bbs: [BasicBlockRef])
    -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(Ty); }
     assert vals.len() == bbs.len();
@@ -509,7 +509,7 @@ fn AddIncomingToPhi(phi: ValueRef, val: ValueRef, bb: BasicBlockRef) {
     }
 }
 
-fn _UndefReturn(cx: @block_ctxt, Fn: ValueRef) -> ValueRef {
+fn _UndefReturn(cx: block, Fn: ValueRef) -> ValueRef {
     let ccx = cx.fcx.ccx;
     let ty = val_ty(Fn);
     let retty = if llvm::LLVMGetTypeKind(ty) == 8 as c_int {
@@ -517,7 +517,7 @@ fn _UndefReturn(cx: @block_ctxt, Fn: ValueRef) -> ValueRef {
     ret llvm::LLVMGetUndef(retty);
 }
 
-fn add_span_comment(bcx: @block_ctxt, sp: span, text: str) {
+fn add_span_comment(bcx: block, sp: span, text: str) {
     let ccx = bcx_ccx(bcx);
     if (!ccx.sess.opts.no_asm_comments) {
         let s = text + " (" + codemap::span_to_str(sp, ccx.sess.codemap)
@@ -527,7 +527,7 @@ fn add_span_comment(bcx: @block_ctxt, sp: span, text: str) {
     }
 }
 
-fn add_comment(bcx: @block_ctxt, text: str) {
+fn add_comment(bcx: block, text: str) {
     let ccx = bcx_ccx(bcx);
     if (!ccx.sess.opts.no_asm_comments) {
         check str::is_not_empty("$");
@@ -543,7 +543,7 @@ fn add_comment(bcx: @block_ctxt, text: str) {
     }
 }
 
-fn Call(cx: @block_ctxt, Fn: ValueRef, Args: [ValueRef]) -> ValueRef {
+fn Call(cx: block, Fn: ValueRef, Args: [ValueRef]) -> ValueRef {
     if cx.unreachable { ret _UndefReturn(cx, Fn); }
     unsafe {
         ret llvm::LLVMBuildCall(B(cx), Fn, vec::to_ptr(Args),
@@ -551,7 +551,7 @@ fn Call(cx: @block_ctxt, Fn: ValueRef, Args: [ValueRef]) -> ValueRef {
     }
 }
 
-fn FastCall(cx: @block_ctxt, Fn: ValueRef, Args: [ValueRef]) -> ValueRef {
+fn FastCall(cx: block, Fn: ValueRef, Args: [ValueRef]) -> ValueRef {
     if cx.unreachable { ret _UndefReturn(cx, Fn); }
     unsafe {
         let v = llvm::LLVMBuildCall(B(cx), Fn, vec::to_ptr(Args),
@@ -561,7 +561,7 @@ fn FastCall(cx: @block_ctxt, Fn: ValueRef, Args: [ValueRef]) -> ValueRef {
     }
 }
 
-fn CallWithConv(cx: @block_ctxt, Fn: ValueRef, Args: [ValueRef],
+fn CallWithConv(cx: block, Fn: ValueRef, Args: [ValueRef],
                 Conv: CallConv) -> ValueRef {
     if cx.unreachable { ret _UndefReturn(cx, Fn); }
     unsafe {
@@ -572,64 +572,64 @@ fn CallWithConv(cx: @block_ctxt, Fn: ValueRef, Args: [ValueRef],
     }
 }
 
-fn Select(cx: @block_ctxt, If: ValueRef, Then: ValueRef, Else: ValueRef) ->
+fn Select(cx: block, If: ValueRef, Then: ValueRef, Else: ValueRef) ->
    ValueRef {
     if cx.unreachable { ret _Undef(Then); }
     ret llvm::LLVMBuildSelect(B(cx), If, Then, Else, noname());
 }
 
-fn VAArg(cx: @block_ctxt, list: ValueRef, Ty: TypeRef) -> ValueRef {
+fn VAArg(cx: block, list: ValueRef, Ty: TypeRef) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(Ty); }
     ret llvm::LLVMBuildVAArg(B(cx), list, Ty, noname());
 }
 
-fn ExtractElement(cx: @block_ctxt, VecVal: ValueRef, Index: ValueRef) ->
+fn ExtractElement(cx: block, VecVal: ValueRef, Index: ValueRef) ->
    ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(T_nil()); }
     ret llvm::LLVMBuildExtractElement(B(cx), VecVal, Index, noname());
 }
 
-fn InsertElement(cx: @block_ctxt, VecVal: ValueRef, EltVal: ValueRef,
+fn InsertElement(cx: block, VecVal: ValueRef, EltVal: ValueRef,
                  Index: ValueRef) {
     if cx.unreachable { ret; }
     llvm::LLVMBuildInsertElement(B(cx), VecVal, EltVal, Index, noname());
 }
 
-fn ShuffleVector(cx: @block_ctxt, V1: ValueRef, V2: ValueRef,
+fn ShuffleVector(cx: block, V1: ValueRef, V2: ValueRef,
                  Mask: ValueRef) {
     if cx.unreachable { ret; }
     llvm::LLVMBuildShuffleVector(B(cx), V1, V2, Mask, noname());
 }
 
-fn ExtractValue(cx: @block_ctxt, AggVal: ValueRef, Index: uint) -> ValueRef {
+fn ExtractValue(cx: block, AggVal: ValueRef, Index: uint) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(T_nil()); }
     ret llvm::LLVMBuildExtractValue(B(cx), AggVal, Index as c_uint, noname());
 }
 
-fn InsertValue(cx: @block_ctxt, AggVal: ValueRef, EltVal: ValueRef,
+fn InsertValue(cx: block, AggVal: ValueRef, EltVal: ValueRef,
                Index: uint) {
     if cx.unreachable { ret; }
     llvm::LLVMBuildInsertValue(B(cx), AggVal, EltVal, Index as c_uint,
                                noname());
 }
 
-fn IsNull(cx: @block_ctxt, Val: ValueRef) -> ValueRef {
+fn IsNull(cx: block, Val: ValueRef) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(T_i1()); }
     ret llvm::LLVMBuildIsNull(B(cx), Val, noname());
 }
 
-fn IsNotNull(cx: @block_ctxt, Val: ValueRef) -> ValueRef {
+fn IsNotNull(cx: block, Val: ValueRef) -> ValueRef {
     if cx.unreachable { ret llvm::LLVMGetUndef(T_i1()); }
     ret llvm::LLVMBuildIsNotNull(B(cx), Val, noname());
 }
 
-fn PtrDiff(cx: @block_ctxt, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
+fn PtrDiff(cx: block, LHS: ValueRef, RHS: ValueRef) -> ValueRef {
     let ccx = cx.fcx.ccx;
     if cx.unreachable { ret llvm::LLVMGetUndef(ccx.int_type); }
     ret llvm::LLVMBuildPtrDiff(B(cx), LHS, RHS, noname());
 }
 
-fn Trap(cx: @block_ctxt) {
+fn Trap(cx: block) {
     if cx.unreachable { ret; }
     let b = B(cx);
     let BB: BasicBlockRef = llvm::LLVMGetInsertBlock(b);
@@ -646,18 +646,18 @@ fn Trap(cx: @block_ctxt) {
     }
 }
 
-fn LandingPad(cx: @block_ctxt, Ty: TypeRef, PersFn: ValueRef,
+fn LandingPad(cx: block, Ty: TypeRef, PersFn: ValueRef,
               NumClauses: uint) -> ValueRef {
     assert !cx.terminated && !cx.unreachable;
     ret llvm::LLVMBuildLandingPad(B(cx), Ty, PersFn,
                                   NumClauses as c_uint, noname());
 }
 
-fn SetCleanup(_cx: @block_ctxt, LandingPad: ValueRef) {
+fn SetCleanup(_cx: block, LandingPad: ValueRef) {
     llvm::LLVMSetCleanup(LandingPad, lib::llvm::True);
 }
 
-fn Resume(cx: @block_ctxt, Exn: ValueRef) -> ValueRef {
+fn Resume(cx: block, Exn: ValueRef) -> ValueRef {
     assert (!cx.terminated);
     cx.terminated = true;
     ret llvm::LLVMBuildResume(B(cx), Exn);
diff --git a/src/comp/middle/trans/closure.rs b/src/comp/middle/trans/closure.rs
index 2f311137bf6..bb07da27ca8 100644
--- a/src/comp/middle/trans/closure.rs
+++ b/src/comp/middle/trans/closure.rs
@@ -162,15 +162,15 @@ fn mk_closure_tys(tcx: ty::ctxt,
     ret (cdata_ty, bound_tys);
 }
 
-fn allocate_cbox(bcx: @block_ctxt,
+fn allocate_cbox(bcx: block,
                  ck: ty::closure_kind,
                  cdata_ty: ty::t)
-    -> (@block_ctxt, ValueRef, [ValueRef]) {
+    -> (block, ValueRef, [ValueRef]) {
 
     // let ccx = bcx_ccx(bcx);
     let ccx = bcx_ccx(bcx), tcx = ccx.tcx;
 
-    fn nuke_ref_count(bcx: @block_ctxt, box: ValueRef) {
+    fn nuke_ref_count(bcx: block, box: ValueRef) {
         // Initialize ref count to arbitrary value for debugging:
         let ccx = bcx_ccx(bcx);
         let box = PointerCast(bcx, box, T_opaque_box_ptr(ccx));
@@ -179,10 +179,10 @@ fn allocate_cbox(bcx: @block_ctxt,
         Store(bcx, rc, ref_cnt);
     }
 
-    fn store_uniq_tydesc(bcx: @block_ctxt,
+    fn store_uniq_tydesc(bcx: block,
                          cdata_ty: ty::t,
                          box: ValueRef,
-                         &ti: option::t<@tydesc_info>) -> @block_ctxt {
+                         &ti: option::t<@tydesc_info>) -> block {
         let ccx = bcx_ccx(bcx);
         let bound_tydesc = GEPi(bcx, box, [0, abi::box_field_tydesc]);
         let {bcx, val: td} = base::get_tydesc(bcx, cdata_ty, true, ti);
@@ -224,10 +224,10 @@ fn allocate_cbox(bcx: @block_ctxt,
 type closure_result = {
     llbox: ValueRef,     // llvalue of ptr to closure
     cdata_ty: ty::t,      // type of the closure data
-    bcx: @block_ctxt     // final bcx
+    bcx: block     // final bcx
 };
 
-fn cast_if_we_can(bcx: @block_ctxt, llbox: ValueRef, t: ty::t) -> ValueRef {
+fn cast_if_we_can(bcx: block, llbox: ValueRef, t: ty::t) -> ValueRef {
     let ccx = bcx_ccx(bcx);
     if check type_has_static_size(ccx, t) {
         let llty = type_of(ccx, t);
@@ -242,12 +242,12 @@ fn cast_if_we_can(bcx: @block_ctxt, llbox: ValueRef, t: ty::t) -> ValueRef {
 // heap allocated closure that copies the upvars into environment.
 // Otherwise, it is stack allocated and copies pointers to the upvars.
 fn store_environment(
-    bcx: @block_ctxt, lltyparams: [fn_ty_param],
+    bcx: block, lltyparams: [fn_ty_param],
     bound_values: [environment_value],
     ck: ty::closure_kind)
     -> closure_result {
 
-    fn maybe_clone_tydesc(bcx: @block_ctxt,
+    fn maybe_clone_tydesc(bcx: block,
                           ck: ty::closure_kind,
                           td: ValueRef) -> ValueRef {
         ret alt ck {
@@ -349,7 +349,7 @@ fn store_environment(
 
 // Given a context and a list of upvars, build a closure. This just
 // collects the upvars and packages them up for store_environment.
-fn build_closure(bcx0: @block_ctxt,
+fn build_closure(bcx0: block,
                  cap_vars: [capture::capture_var],
                  ck: ty::closure_kind)
     -> closure_result {
@@ -386,12 +386,12 @@ fn build_closure(bcx0: @block_ctxt,
 // Given an enclosing block context, a new function context, a closure type,
 // and a list of upvars, generate code to load and populate the environment
 // with the upvars and type descriptors.
-fn load_environment(enclosing_cx: @block_ctxt,
+fn load_environment(enclosing_cx: block,
                     fcx: @fn_ctxt,
                     cdata_ty: ty::t,
                     cap_vars: [capture::capture_var],
                     ck: ty::closure_kind) {
-    let bcx = new_raw_block_ctxt(fcx, fcx.llloadenv);
+    let bcx = raw_block(fcx, fcx.llloadenv);
 
     // Load a pointer to the closure data, skipping over the box header:
     let llcdata = base::opaque_box_body(bcx, cdata_ty, fcx.llenv);
@@ -440,14 +440,14 @@ fn load_environment(enclosing_cx: @block_ctxt,
     }
 }
 
-fn trans_expr_fn(bcx: @block_ctxt,
+fn trans_expr_fn(bcx: block,
                  proto: ast::proto,
                  decl: ast::fn_decl,
                  body: ast::blk,
                  sp: span,
                  id: ast::node_id,
                  cap_clause: ast::capture_clause,
-                 dest: dest) -> @block_ctxt {
+                 dest: dest) -> block {
     if dest == ignore { ret bcx; }
     let ccx = bcx_ccx(bcx), bcx = bcx;
     let fty = node_id_type(bcx, id);
@@ -482,17 +482,17 @@ fn trans_expr_fn(bcx: @block_ctxt,
     ret bcx;
 }
 
-fn trans_bind(cx: @block_ctxt, f: @ast::expr, args: [option<@ast::expr>],
-              id: ast::node_id, dest: dest) -> @block_ctxt {
+fn trans_bind(cx: block, f: @ast::expr, args: [option<@ast::expr>],
+              id: ast::node_id, dest: dest) -> block {
     let f_res = trans_callee(cx, f);
     ret trans_bind_1(cx, expr_ty(cx, f), f_res, args,
                      node_id_type(cx, id), dest);
 }
 
-fn trans_bind_1(cx: @block_ctxt, outgoing_fty: ty::t,
+fn trans_bind_1(cx: block, outgoing_fty: ty::t,
                 f_res: lval_maybe_callee,
                 args: [option<@ast::expr>], pair_ty: ty::t,
-                dest: dest) -> @block_ctxt {
+                dest: dest) -> block {
     let ccx = bcx_ccx(cx);
     let bound: [@ast::expr] = [];
     for argopt: option<@ast::expr> in args {
@@ -572,33 +572,19 @@ fn trans_bind_1(cx: @block_ctxt, outgoing_fty: ty::t,
     ret bcx;
 }
 
-fn make_null_test(
-    in_bcx: @block_ctxt,
-    ptr: ValueRef,
-    blk: fn(@block_ctxt) -> @block_ctxt)
-    -> @block_ctxt {
-    let not_null_bcx = new_sub_block_ctxt(in_bcx, "not null");
-    let next_bcx = new_sub_block_ctxt(in_bcx, "next");
-    let null_test = IsNull(in_bcx, ptr);
-    CondBr(in_bcx, null_test, next_bcx.llbb, not_null_bcx.llbb);
-    let not_null_bcx = blk(not_null_bcx);
-    Br(not_null_bcx, next_bcx.llbb);
-    ret next_bcx;
-}
-
 fn make_fn_glue(
-    cx: @block_ctxt,
+    cx: block,
     v: ValueRef,
     t: ty::t,
-    glue_fn: fn@(@block_ctxt, v: ValueRef, t: ty::t) -> @block_ctxt)
-    -> @block_ctxt {
+    glue_fn: fn@(block, v: ValueRef, t: ty::t) -> block)
+    -> block {
     let bcx = cx;
     let tcx = bcx_tcx(cx);
 
-    let fn_env = fn@(ck: ty::closure_kind) -> @block_ctxt {
+    let fn_env = fn@(ck: ty::closure_kind) -> block {
         let box_cell_v = GEPi(cx, v, [0, abi::fn_field_box]);
         let box_ptr_v = Load(cx, box_cell_v);
-        make_null_test(cx, box_ptr_v) {|bcx|
+        with_cond(cx, IsNotNull(cx, box_ptr_v)) {|bcx|
             let closure_ty = ty::mk_opaque_closure_ptr(tcx, ck);
             glue_fn(bcx, box_cell_v, closure_ty)
         }
@@ -615,10 +601,10 @@ fn make_fn_glue(
 }
 
 fn make_opaque_cbox_take_glue(
-    bcx: @block_ctxt,
+    bcx: block,
     ck: ty::closure_kind,
     cboxptr: ValueRef)     // ptr to ptr to the opaque closure
-    -> @block_ctxt {
+    -> block {
     // Easy cases:
     alt ck {
       ty::ck_block { ret bcx; }
@@ -631,7 +617,7 @@ fn make_opaque_cbox_take_glue(
     let tcx = bcx_tcx(bcx);
     let llopaquecboxty = T_opaque_box_ptr(ccx);
     let cbox_in = Load(bcx, cboxptr);
-    make_null_test(bcx, cbox_in) {|bcx|
+    with_cond(bcx, IsNotNull(bcx, cbox_in)) {|bcx|
         // Load the size from the type descr found in the cbox
         let cbox_in = PointerCast(bcx, cbox_in, llopaquecboxty);
         let tydescptr = GEPi(bcx, cbox_in, [0, abi::box_field_tydesc]);
@@ -663,10 +649,10 @@ fn make_opaque_cbox_take_glue(
 }
 
 fn make_opaque_cbox_drop_glue(
-    bcx: @block_ctxt,
+    bcx: block,
     ck: ty::closure_kind,
     cboxptr: ValueRef)     // ptr to the opaque closure
-    -> @block_ctxt {
+    -> block {
     alt ck {
       ty::ck_block { bcx }
       ty::ck_box {
@@ -681,10 +667,10 @@ fn make_opaque_cbox_drop_glue(
 }
 
 fn make_opaque_cbox_free_glue(
-    bcx: @block_ctxt,
+    bcx: block,
     ck: ty::closure_kind,
     cbox: ValueRef)     // ptr to the opaque closure
-    -> @block_ctxt {
+    -> block {
     alt ck {
       ty::ck_block { ret bcx; }
       ty::ck_box | ty::ck_uniq { /* hard cases: */ }
@@ -692,7 +678,7 @@ fn make_opaque_cbox_free_glue(
 
     let ccx = bcx_ccx(bcx);
     let tcx = bcx_tcx(bcx);
-    make_null_test(bcx, cbox) {|bcx|
+    with_cond(bcx, IsNotNull(bcx, cbox)) {|bcx|
         // Load the type descr found in the cbox
         let lltydescty = T_ptr(ccx.tydesc_type);
         let cbox = PointerCast(bcx, cbox, T_opaque_cbox_ptr(ccx));
@@ -783,13 +769,13 @@ fn trans_bind_thunk(ccx: @crate_ctxt,
     // Create a new function context and block context for the thunk, and hold
     // onto a pointer to the first block in the function for later use.
     let fcx = new_fn_ctxt(ccx, path, llthunk, none);
-    let bcx = new_top_block_ctxt(fcx, none);
+    let bcx = top_scope_block(fcx, none);
     let lltop = bcx.llbb;
     // Since we might need to construct derived tydescs that depend on
     // our bound tydescs, we need to load tydescs out of the environment
     // before derived tydescs are constructed. To do this, we load them
     // in the load_env block.
-    let l_bcx = new_raw_block_ctxt(fcx, fcx.llloadenv);
+    let l_bcx = raw_block(fcx, fcx.llloadenv);
 
     // The 'llenv' that will arrive in the thunk we're creating is an
     // environment that will contain the values of its arguments and a
diff --git a/src/comp/middle/trans/common.rs b/src/comp/middle/trans/common.rs
index 00587c03144..6de08e2cfef 100644
--- a/src/comp/middle/trans/common.rs
+++ b/src/comp/middle/trans/common.rs
@@ -218,8 +218,8 @@ fn warn_not_to_commit(ccx: @crate_ctxt, msg: str) {
 }
 
 enum cleanup {
-    clean(fn@(@block_ctxt) -> @block_ctxt),
-    clean_temp(ValueRef, fn@(@block_ctxt) -> @block_ctxt),
+    clean(fn@(block) -> block),
+    clean_temp(ValueRef, fn@(block) -> block),
 }
 
 // Used to remember and reuse existing cleanup paths
@@ -232,17 +232,17 @@ fn scope_clean_changed(info: scope_info) {
     info.landing_pad = none;
 }
 
-fn add_clean(cx: @block_ctxt, val: ValueRef, ty: ty::t) {
+fn add_clean(cx: block, val: ValueRef, ty: ty::t) {
     if !ty::type_needs_drop(bcx_tcx(cx), ty) { ret; }
     in_scope_cx(cx) {|info|
         info.cleanups += [clean(bind drop_ty(_, val, ty))];
         scope_clean_changed(info);
     }
 }
-fn add_clean_temp(cx: @block_ctxt, val: ValueRef, ty: ty::t) {
+fn add_clean_temp(cx: block, val: ValueRef, ty: ty::t) {
     if !ty::type_needs_drop(bcx_tcx(cx), ty) { ret; }
-    fn do_drop(bcx: @block_ctxt, val: ValueRef, ty: ty::t) ->
-       @block_ctxt {
+    fn do_drop(bcx: block, val: ValueRef, ty: ty::t) ->
+       block {
         if ty::type_is_immediate(ty) {
             ret base::drop_ty_immediate(bcx, val, ty);
         } else {
@@ -254,14 +254,14 @@ fn add_clean_temp(cx: @block_ctxt, val: ValueRef, ty: ty::t) {
         scope_clean_changed(info);
     }
 }
-fn add_clean_temp_mem(cx: @block_ctxt, val: ValueRef, ty: ty::t) {
+fn add_clean_temp_mem(cx: block, val: ValueRef, ty: ty::t) {
     if !ty::type_needs_drop(bcx_tcx(cx), ty) { ret; }
     in_scope_cx(cx) {|info|
         info.cleanups += [clean_temp(val, bind drop_ty(_, val, ty))];
         scope_clean_changed(info);
     }
 }
-fn add_clean_free(cx: @block_ctxt, ptr: ValueRef, shared: bool) {
+fn add_clean_free(cx: block, ptr: ValueRef, shared: bool) {
     let free_fn = if shared { bind base::trans_shared_free(_, ptr) }
                   else { bind base::trans_free(_, ptr) };
     in_scope_cx(cx) {|info|
@@ -274,7 +274,7 @@ fn add_clean_free(cx: @block_ctxt, ptr: ValueRef, shared: bool) {
 // to a system where we can also cancel the cleanup on local variables, but
 // this will be more involved. For now, we simply zero out the local, and the
 // drop glue checks whether it is zero.
-fn revoke_clean(cx: @block_ctxt, val: ValueRef) {
+fn revoke_clean(cx: block, val: ValueRef) {
     in_scope_cx(cx) {|info|
         let i = 0u;
         for cu in info.cleanups {
@@ -317,18 +317,18 @@ enum block_kind {
     // cleaned up. May correspond to an actual block in the language, but also
     // to an implicit scope, for example, calls introduce an implicit scope in
     // which the arguments are evaluated and cleaned up.
-    scope_block(scope_info),
+    block_scope(scope_info),
     // A non-scope block is a basic block created as a translation artifact
     // from translating code that expresses conditional logic rather than by
     // explicit { ... } block structure in the source language.  It's called a
     // non-scope block because it doesn't introduce a new variable scope.
-    non_scope_block,
+    block_non_scope,
 }
 
-enum loop_cont { cont_self, cont_other(@block_ctxt), }
+enum loop_cont { cont_self, cont_other(block), }
 
 type scope_info = {
-    is_loop: option<{cnt: loop_cont, brk: @block_ctxt}>,
+    is_loop: option<{cnt: loop_cont, brk: block}>,
     // A list of functions that must be run at when leaving this
     // block, cleaning up any variables that were introduced in the
     // block.
@@ -345,7 +345,7 @@ type scope_info = {
 // code.  Each basic block we generate is attached to a function, typically
 // with many basic blocks per function.  All the basic blocks attached to a
 // function are organized as a directed graph.
-type block_ctxt = {
+type block = @{
     // The BasicBlockRef returned from a call to
     // llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic
     // block to the function pointed to by llfn.  We insert
@@ -359,7 +359,7 @@ type block_ctxt = {
     kind: block_kind,
     // The source span where the block came from, if it is a block that
     // actually appears in the source code.
-    block_span: option<span>,
+    mutable block_span: option<span>,
     // The function context for the function to which this block is
     // attached.
     fcx: @fn_ctxt
@@ -367,12 +367,12 @@ type block_ctxt = {
 
 // FIXME: we should be able to use option<@block_parent> here but
 // the infinite-enum check in rustboot gets upset.
-enum block_parent { parent_none, parent_some(@block_ctxt), }
+enum block_parent { parent_none, parent_some(block), }
 
-type result = {bcx: @block_ctxt, val: ValueRef};
-type result_t = {bcx: @block_ctxt, val: ValueRef, ty: ty::t};
+type result = {bcx: block, val: ValueRef};
+type result_t = {bcx: block, val: ValueRef, ty: ty::t};
 
-fn rslt(bcx: @block_ctxt, val: ValueRef) -> result {
+fn rslt(bcx: block, val: ValueRef) -> result {
     {bcx: bcx, val: val}
 }
 
@@ -393,23 +393,27 @@ fn struct_elt(llstructty: TypeRef, n: uint) -> TypeRef unsafe {
     ret llvm::LLVMGetElementType(elt_tys[n]);
 }
 
-fn in_scope_cx(cx: @block_ctxt, f: fn(scope_info)) {
+fn in_scope_cx(cx: block, f: fn(scope_info)) {
     let cur = cx;
     while true {
         alt cur.kind {
-          scope_block(info) { f(info); ret; }
+          block_scope(info) { f(info); ret; }
           _ {}
         }
-        cur = alt check cur.parent { parent_some(b) { b } };
+        cur = block_parent(cur);
     }
 }
 
+fn block_parent(cx: block) -> block {
+    alt check cx.parent { parent_some(b) { b } }
+}
+
 // Accessors
 // TODO: When we have overloading, simplify these names!
 
-pure fn bcx_tcx(bcx: @block_ctxt) -> ty::ctxt { ret bcx.fcx.ccx.tcx; }
-pure fn bcx_ccx(bcx: @block_ctxt) -> @crate_ctxt { ret bcx.fcx.ccx; }
-pure fn bcx_fcx(bcx: @block_ctxt) -> @fn_ctxt { ret bcx.fcx; }
+pure fn bcx_tcx(bcx: block) -> ty::ctxt { ret bcx.fcx.ccx.tcx; }
+pure fn bcx_ccx(bcx: block) -> @crate_ctxt { ret bcx.fcx.ccx; }
+pure fn bcx_fcx(bcx: block) -> @fn_ctxt { ret bcx.fcx; }
 pure fn fcx_ccx(fcx: @fn_ctxt) -> @crate_ctxt { ret fcx.ccx; }
 pure fn fcx_tcx(fcx: @fn_ctxt) -> ty::ctxt { ret fcx.ccx.tcx; }
 pure fn ccx_tcx(ccx: @crate_ctxt) -> ty::ctxt { ret ccx.tcx; }
@@ -838,7 +842,7 @@ fn C_shape(ccx: @crate_ctxt, bytes: [u8]) -> ValueRef {
 }
 
 
-pure fn valid_variant_index(ix: uint, cx: @block_ctxt, enum_id: ast::def_id,
+pure fn valid_variant_index(ix: uint, cx: block, enum_id: ast::def_id,
                             variant_id: ast::def_id) -> bool {
 
     // Handwaving: it's ok to pretend this code is referentially
@@ -882,17 +886,17 @@ fn hash_mono_id(&&mi: mono_id) -> uint {
     h
 }
 
-fn umax(cx: @block_ctxt, a: ValueRef, b: ValueRef) -> ValueRef {
+fn umax(cx: block, a: ValueRef, b: ValueRef) -> ValueRef {
     let cond = build::ICmp(cx, lib::llvm::IntULT, a, b);
     ret build::Select(cx, cond, b, a);
 }
 
-fn umin(cx: @block_ctxt, a: ValueRef, b: ValueRef) -> ValueRef {
+fn umin(cx: block, a: ValueRef, b: ValueRef) -> ValueRef {
     let cond = build::ICmp(cx, lib::llvm::IntULT, a, b);
     ret build::Select(cx, cond, a, b);
 }
 
-fn align_to(cx: @block_ctxt, off: ValueRef, align: ValueRef) -> ValueRef {
+fn align_to(cx: block, off: ValueRef, align: ValueRef) -> ValueRef {
     let mask = build::Sub(cx, align, C_int(bcx_ccx(cx), 1));
     let bumped = build::Add(cx, off, mask);
     ret build::And(cx, bumped, build::Not(cx, mask));
@@ -910,7 +914,7 @@ fn path_str(p: path) -> str {
     r
 }
 
-fn node_id_type(bcx: @block_ctxt, id: ast::node_id) -> ty::t {
+fn node_id_type(bcx: block, id: ast::node_id) -> ty::t {
     let tcx = bcx_tcx(bcx);
     let t = ty::node_id_to_type(tcx, id);
     alt bcx.fcx.param_substs {
@@ -918,10 +922,10 @@ fn node_id_type(bcx: @block_ctxt, id: ast::node_id) -> ty::t {
       _ { t }
     }
 }
-fn expr_ty(bcx: @block_ctxt, ex: @ast::expr) -> ty::t {
+fn expr_ty(bcx: block, ex: @ast::expr) -> ty::t {
     node_id_type(bcx, ex.id)
 }
-fn node_id_type_params(bcx: @block_ctxt, id: ast::node_id) -> [ty::t] {
+fn node_id_type_params(bcx: block, id: ast::node_id) -> [ty::t] {
     let tcx = bcx_tcx(bcx);
     let params = ty::node_id_to_type_params(tcx, id);
     alt bcx.fcx.param_substs {
diff --git a/src/comp/middle/trans/impl.rs b/src/comp/middle/trans/impl.rs
index e3970a3297d..57721682982 100644
--- a/src/comp/middle/trans/impl.rs
+++ b/src/comp/middle/trans/impl.rs
@@ -59,14 +59,14 @@ fn trans_impl(ccx: @crate_ctxt, path: path, name: ast::ident,
     }
 }
 
-fn trans_self_arg(bcx: @block_ctxt, base: @ast::expr) -> result {
+fn trans_self_arg(bcx: block, base: @ast::expr) -> result {
     let basety = expr_ty(bcx, base);
     let m_by_ref = ast::expl(ast::by_ref);
     trans_arg_expr(bcx, {mode: m_by_ref, ty: basety},
                    T_ptr(type_of_or_i8(bcx_ccx(bcx), basety)), base)
 }
 
-fn trans_method_callee(bcx: @block_ctxt, callee_id: ast::node_id,
+fn trans_method_callee(bcx: block, callee_id: ast::node_id,
                        self: @ast::expr, origin: typeck::method_origin)
     -> lval_maybe_callee {
     alt origin {
@@ -91,7 +91,7 @@ fn trans_method_callee(bcx: @block_ctxt, callee_id: ast::node_id,
 }
 
 // Method callee where the method is statically known
-fn trans_static_callee(bcx: @block_ctxt, callee_id: ast::node_id,
+fn trans_static_callee(bcx: block, callee_id: ast::node_id,
                        base: @ast::expr, did: ast::def_id,
                        substs: option<([ty::t], typeck::dict_res)>)
     -> lval_maybe_callee {
@@ -107,7 +107,7 @@ fn wrapper_fn_ty(ccx: @crate_ctxt, dict_ty: TypeRef, fty: ty::t,
     {ty: fty, llty: T_fn([dict_ty] + inputs, output)}
 }
 
-fn trans_vtable_callee(bcx: @block_ctxt, env: callee_env, dict: ValueRef,
+fn trans_vtable_callee(bcx: block, env: callee_env, dict: ValueRef,
                        callee_id: ast::node_id, iface_id: ast::def_id,
                        n_method: uint) -> lval_maybe_callee {
     let bcx = bcx, ccx = bcx_ccx(bcx), tcx = ccx.tcx;
@@ -140,7 +140,7 @@ fn trans_vtable_callee(bcx: @block_ctxt, env: callee_env, dict: ValueRef,
      generic: generic}
 }
 
-fn trans_monomorphized_callee(bcx: @block_ctxt, callee_id: ast::node_id,
+fn trans_monomorphized_callee(bcx: block, callee_id: ast::node_id,
                               base: @ast::expr, iface_id: ast::def_id,
                               n_method: uint, n_param: uint, n_bound: uint,
                               substs: param_substs) -> lval_maybe_callee {
@@ -172,7 +172,7 @@ fn trans_monomorphized_callee(bcx: @block_ctxt, callee_id: ast::node_id,
 
 
 // Method callee where the dict comes from a type param
-fn trans_param_callee(bcx: @block_ctxt, callee_id: ast::node_id,
+fn trans_param_callee(bcx: block, callee_id: ast::node_id,
                       base: @ast::expr, iface_id: ast::def_id, n_method: uint,
                       n_param: uint, n_bound: uint) -> lval_maybe_callee {
     let {bcx, val} = trans_self_arg(bcx, base);
@@ -182,7 +182,7 @@ fn trans_param_callee(bcx: @block_ctxt, callee_id: ast::node_id,
 }
 
 // Method callee where the dict comes from a boxed iface
-fn trans_iface_callee(bcx: @block_ctxt, callee_id: ast::node_id,
+fn trans_iface_callee(bcx: block, callee_id: ast::node_id,
                       base: @ast::expr, iface_id: ast::def_id, n_method: uint)
     -> lval_maybe_callee {
     let {bcx, val} = trans_temp_expr(bcx, base);
@@ -266,12 +266,12 @@ fn resolve_dicts_in_fn_ctxt(fcx: @fn_ctxt, dicts: typeck::dict_res)
 }
 
 fn trans_wrapper(ccx: @crate_ctxt, pt: path, llfty: TypeRef,
-                 fill: fn(ValueRef, @block_ctxt) -> @block_ctxt)
+                 fill: fn(ValueRef, block) -> block)
     -> ValueRef {
     let name = link::mangle_internal_name_by_path(ccx, pt);
     let llfn = decl_internal_cdecl_fn(ccx.llmod, name, llfty);
     let fcx = new_fn_ctxt(ccx, [], llfn, none);
-    let bcx = new_top_block_ctxt(fcx, none), lltop = bcx.llbb;
+    let bcx = top_scope_block(fcx, none), lltop = bcx.llbb;
     let bcx = fill(llfn, bcx);
     build_return(bcx);
     finish_fn(fcx, lltop);
@@ -396,7 +396,7 @@ fn dict_is_static(tcx: ty::ctxt, origin: typeck::dict_origin) -> bool {
     }
 }
 
-fn get_dict(bcx: @block_ctxt, origin: typeck::dict_origin) -> result {
+fn get_dict(bcx: block, origin: typeck::dict_origin) -> result {
     let ccx = bcx_ccx(bcx);
     alt origin {
       typeck::dict_static(impl_did, tys, sub_origins) {
@@ -453,7 +453,7 @@ fn dict_id(tcx: ty::ctxt, origin: typeck::dict_origin) -> dict_id {
     }
 }
 
-fn get_static_dict(bcx: @block_ctxt, origin: typeck::dict_origin)
+fn get_static_dict(bcx: block, origin: typeck::dict_origin)
     -> ValueRef {
     let ccx = bcx_ccx(bcx);
     let id = dict_id(ccx.tcx, origin);
@@ -474,8 +474,8 @@ fn get_static_dict(bcx: @block_ctxt, origin: typeck::dict_origin)
     cast
 }
 
-fn get_dict_ptrs(bcx: @block_ctxt, origin: typeck::dict_origin)
-    -> {bcx: @block_ctxt, ptrs: [ValueRef]} {
+fn get_dict_ptrs(bcx: block, origin: typeck::dict_origin)
+    -> {bcx: block, ptrs: [ValueRef]} {
     let ccx = bcx_ccx(bcx);
     fn get_vtable(ccx: @crate_ctxt, did: ast::def_id) -> ValueRef {
         if did.crate == ast::local_crate {
@@ -517,8 +517,8 @@ fn get_dict_ptrs(bcx: @block_ctxt, origin: typeck::dict_origin)
     }
 }
 
-fn trans_cast(bcx: @block_ctxt, val: @ast::expr, id: ast::node_id, dest: dest)
-    -> @block_ctxt {
+fn trans_cast(bcx: block, val: @ast::expr, id: ast::node_id, dest: dest)
+    -> block {
     if dest == ignore { ret trans_expr(bcx, val, ignore); }
     let ccx = bcx_ccx(bcx);
     let v_ty = expr_ty(bcx, val);
diff --git a/src/comp/middle/trans/native.rs b/src/comp/middle/trans/native.rs
index 3830c363879..83c7bafe194 100644
--- a/src/comp/middle/trans/native.rs
+++ b/src/comp/middle/trans/native.rs
@@ -54,10 +54,10 @@ fn c_stack_tys(ccx: @crate_ctxt,
     };
 }
 
-type shim_arg_builder = fn(bcx: @block_ctxt, tys: @c_stack_tys,
+type shim_arg_builder = fn(bcx: block, tys: @c_stack_tys,
                            llargbundle: ValueRef) -> [ValueRef];
 
-type shim_ret_builder = fn(bcx: @block_ctxt, tys: @c_stack_tys,
+type shim_ret_builder = fn(bcx: block, tys: @c_stack_tys,
                            llargbundle: ValueRef, llretval: ValueRef);
 
 fn build_shim_fn_(ccx: @crate_ctxt,
@@ -73,7 +73,7 @@ fn build_shim_fn_(ccx: @crate_ctxt,
 
     // Declare the body of the shim function:
     let fcx = new_fn_ctxt(ccx, [], llshimfn, none);
-    let bcx = new_top_block_ctxt(fcx, none);
+    let bcx = top_scope_block(fcx, none);
     let lltop = bcx.llbb;
     let llargbundle = llvm::LLVMGetParam(llshimfn, 0 as c_uint);
     let llargvals = arg_builder(bcx, tys, llargbundle);
@@ -90,11 +90,11 @@ fn build_shim_fn_(ccx: @crate_ctxt,
     ret llshimfn;
 }
 
-type wrap_arg_builder = fn(bcx: @block_ctxt, tys: @c_stack_tys,
+type wrap_arg_builder = fn(bcx: block, tys: @c_stack_tys,
                            llwrapfn: ValueRef,
                            llargbundle: ValueRef);
 
-type wrap_ret_builder = fn(bcx: @block_ctxt, tys: @c_stack_tys,
+type wrap_ret_builder = fn(bcx: block, tys: @c_stack_tys,
                            llargbundle: ValueRef);
 
 fn build_wrap_fn_(ccx: @crate_ctxt,
@@ -106,7 +106,7 @@ fn build_wrap_fn_(ccx: @crate_ctxt,
                   ret_builder: wrap_ret_builder) {
 
     let fcx = new_fn_ctxt(ccx, [], llwrapfn, none);
-    let bcx = new_top_block_ctxt(fcx, none);
+    let bcx = top_scope_block(fcx, none);
     let lltop = bcx.llbb;
 
     // Allocate the struct and write the arguments into it.
@@ -122,7 +122,7 @@ fn build_wrap_fn_(ccx: @crate_ctxt,
     tie_up_header_blocks(fcx, lltop);
 
     // Make sure our standard return block (that we didn't use) is terminated
-    let ret_cx = new_raw_block_ctxt(fcx, fcx.llreturn);
+    let ret_cx = raw_block(fcx, fcx.llreturn);
     Unreachable(ret_cx);
 }
 
@@ -168,7 +168,7 @@ fn trans_native_mod(ccx: @crate_ctxt,
                      tys: @c_stack_tys,
                      cc: lib::llvm::CallConv) -> ValueRef {
 
-        fn build_args(bcx: @block_ctxt, tys: @c_stack_tys,
+        fn build_args(bcx: block, tys: @c_stack_tys,
                       llargbundle: ValueRef) -> [ValueRef] {
             let llargvals = [];
             let i = 0u;
@@ -181,7 +181,7 @@ fn trans_native_mod(ccx: @crate_ctxt,
             ret llargvals;
         }
 
-        fn build_ret(bcx: @block_ctxt, tys: @c_stack_tys,
+        fn build_ret(bcx: block, tys: @c_stack_tys,
                      llargbundle: ValueRef, llretval: ValueRef)  {
             if tys.ret_def {
                 let n = vec::len(tys.arg_tys);
@@ -210,7 +210,7 @@ fn trans_native_mod(ccx: @crate_ctxt,
                      llshimfn: ValueRef,
                      llwrapfn: ValueRef) {
 
-        fn build_args(bcx: @block_ctxt, tys: @c_stack_tys,
+        fn build_args(bcx: block, tys: @c_stack_tys,
                       llwrapfn: ValueRef, llargbundle: ValueRef,
                       num_tps: uint) {
             let i = 0u, n = vec::len(tys.arg_tys);
@@ -226,7 +226,7 @@ fn trans_native_mod(ccx: @crate_ctxt,
             store_inbounds(bcx, llretptr, llargbundle, [0, n as int]);
         }
 
-        fn build_ret(bcx: @block_ctxt, _tys: @c_stack_tys,
+        fn build_ret(bcx: block, _tys: @c_stack_tys,
                      _llargbundle: ValueRef) {
             RetVoid(bcx);
         }
@@ -283,7 +283,7 @@ fn trans_crust_fn(ccx: @crate_ctxt, path: ast_map::path, decl: ast::fn_decl,
     fn build_shim_fn(ccx: @crate_ctxt, path: ast_map::path,
                      llrustfn: ValueRef, tys: @c_stack_tys) -> ValueRef {
 
-        fn build_args(bcx: @block_ctxt, tys: @c_stack_tys,
+        fn build_args(bcx: block, tys: @c_stack_tys,
                       llargbundle: ValueRef) -> [ValueRef] {
             let llargvals = [];
             let i = 0u;
@@ -300,7 +300,7 @@ fn trans_crust_fn(ccx: @crate_ctxt, path: ast_map::path, decl: ast::fn_decl,
             ret llargvals;
         }
 
-        fn build_ret(_bcx: @block_ctxt, _tys: @c_stack_tys,
+        fn build_ret(_bcx: block, _tys: @c_stack_tys,
                      _llargbundle: ValueRef, _llretval: ValueRef)  {
             // Nop. The return pointer in the Rust ABI function
             // is wired directly into the return slot in the shim struct
@@ -316,7 +316,7 @@ fn trans_crust_fn(ccx: @crate_ctxt, path: ast_map::path, decl: ast::fn_decl,
     fn build_wrap_fn(ccx: @crate_ctxt, llshimfn: ValueRef,
                      llwrapfn: ValueRef, tys: @c_stack_tys) {
 
-        fn build_args(bcx: @block_ctxt, tys: @c_stack_tys,
+        fn build_args(bcx: block, tys: @c_stack_tys,
                       llwrapfn: ValueRef, llargbundle: ValueRef) {
             let llretptr = alloca(bcx, tys.ret_ty);
             let i = 0u, n = vec::len(tys.arg_tys);
@@ -329,7 +329,7 @@ fn trans_crust_fn(ccx: @crate_ctxt, path: ast_map::path, decl: ast::fn_decl,
             store_inbounds(bcx, llretptr, llargbundle, [0, n as int]);
         }
 
-        fn build_ret(bcx: @block_ctxt, tys: @c_stack_tys,
+        fn build_ret(bcx: block, tys: @c_stack_tys,
                      llargbundle: ValueRef) {
             let n = vec::len(tys.arg_tys);
             let llretval = load_inbounds(bcx, llargbundle, [0, n as int]);
diff --git a/src/comp/middle/trans/shape.rs b/src/comp/middle/trans/shape.rs
index 3a4cb891a3e..d1ea547aa83 100644
--- a/src/comp/middle/trans/shape.rs
+++ b/src/comp/middle/trans/shape.rs
@@ -8,7 +8,7 @@ import driver::session::session;
 import trans::base;
 import middle::trans::common::{crate_ctxt, val_ty, C_bytes, C_int,
                                C_named_struct, C_struct, T_enum_variant,
-                               block_ctxt, result, rslt, bcx_ccx, bcx_tcx,
+                               block, result, rslt, bcx_ccx, bcx_tcx,
                                type_has_static_size, umax, umin, align_to,
                                tydesc_info};
 import back::abi;
@@ -593,19 +593,19 @@ fn gen_shape_tables(ccx: @crate_ctxt) {
 // compute sizeof / alignof
 
 type metrics = {
-    bcx: @block_ctxt,
+    bcx: block,
     sz: ValueRef,
     align: ValueRef
 };
 
 type tag_metrics = {
-    bcx: @block_ctxt,
+    bcx: block,
     sz: ValueRef,
     align: ValueRef,
     payload_align: ValueRef
 };
 
-fn size_of(bcx: @block_ctxt, t: ty::t) -> result {
+fn size_of(bcx: block, t: ty::t) -> result {
     let ccx = bcx_ccx(bcx);
     if check type_has_static_size(ccx, t) {
         rslt(bcx, llsize_of(ccx, base::type_of(ccx, t)))
@@ -615,7 +615,7 @@ fn size_of(bcx: @block_ctxt, t: ty::t) -> result {
     }
 }
 
-fn align_of(bcx: @block_ctxt, t: ty::t) -> result {
+fn align_of(bcx: block, t: ty::t) -> result {
     let ccx = bcx_ccx(bcx);
     if check type_has_static_size(ccx, t) {
         rslt(bcx, llalign_of(ccx, base::type_of(ccx, t)))
@@ -625,7 +625,7 @@ fn align_of(bcx: @block_ctxt, t: ty::t) -> result {
     }
 }
 
-fn metrics(bcx: @block_ctxt, t: ty::t) -> metrics {
+fn metrics(bcx: block, t: ty::t) -> metrics {
     let ccx = bcx_ccx(bcx);
     if check type_has_static_size(ccx, t) {
         let llty = base::type_of(ccx, t);
@@ -688,8 +688,8 @@ fn static_size_of_enum(cx: @crate_ctxt, t: ty::t)
     }
 }
 
-fn dynamic_metrics(cx: @block_ctxt, t: ty::t) -> metrics {
-    fn align_elements(cx: @block_ctxt, elts: [ty::t]) -> metrics {
+fn dynamic_metrics(cx: block, t: ty::t) -> metrics {
+    fn align_elements(cx: block, elts: [ty::t]) -> metrics {
         //
         // C padding rules:
         //
@@ -736,7 +736,7 @@ fn dynamic_metrics(cx: @block_ctxt, t: ty::t) -> metrics {
         let bcx = cx;
         let ccx = bcx_ccx(bcx);
 
-        let compute_max_variant_size = fn@(bcx: @block_ctxt) -> result {
+        let compute_max_variant_size = fn@(bcx: block) -> result {
             // Compute max(variant sizes).
             let bcx = bcx;
             let max_size: ValueRef = C_int(ccx, 0);
@@ -799,7 +799,7 @@ fn simplify_type(tcx: ty::ctxt, typ: ty::t) -> ty::t {
 }
 
 // Given a tag type `ty`, returns the offset of the payload.
-//fn tag_payload_offs(bcx: @block_ctxt, tag_id: ast::def_id, tps: [ty::t])
+//fn tag_payload_offs(bcx: block, tag_id: ast::def_id, tps: [ty::t])
 //    -> ValueRef {
 //    alt tag_kind(tag_id) {
 //      tk_unit | tk_enum | tk_newtype { C_int(bcx_ccx(bcx), 0) }
diff --git a/src/comp/middle/trans/tvec.rs b/src/comp/middle/trans/tvec.rs
index b0f4ced69ae..35494cc82dd 100644
--- a/src/comp/middle/trans/tvec.rs
+++ b/src/comp/middle/trans/tvec.rs
@@ -4,28 +4,28 @@ import lib::llvm::{ValueRef, TypeRef};
 import back::abi;
 import base::{call_memmove, trans_shared_malloc, type_of_or_i8,
                INIT, copy_val, load_if_immediate, get_tydesc,
-               new_sub_block_ctxt, do_spill_noroot,
+               sub_block, do_spill_noroot,
                dest};
 import shape::{llsize_of, size_of};
 import build::*;
 import common::*;
 
-fn get_fill(bcx: @block_ctxt, vptr: ValueRef) -> ValueRef {
+fn get_fill(bcx: block, vptr: ValueRef) -> ValueRef {
     Load(bcx, GEPi(bcx, vptr, [0, abi::vec_elt_fill]))
 }
-fn get_dataptr(bcx: @block_ctxt, vptr: ValueRef, unit_ty: TypeRef)
+fn get_dataptr(bcx: block, vptr: ValueRef, unit_ty: TypeRef)
     -> ValueRef {
     let ptr = GEPi(bcx, vptr, [0, abi::vec_elt_elems]);
     PointerCast(bcx, ptr, T_ptr(unit_ty))
 }
 
-fn pointer_add(bcx: @block_ctxt, ptr: ValueRef, bytes: ValueRef) -> ValueRef {
+fn pointer_add(bcx: block, ptr: ValueRef, bytes: ValueRef) -> ValueRef {
     let old_ty = val_ty(ptr);
     let bptr = PointerCast(bcx, ptr, T_ptr(T_i8()));
     ret PointerCast(bcx, InBoundsGEP(bcx, bptr, [bytes]), old_ty);
 }
 
-fn alloc_raw(bcx: @block_ctxt, fill: ValueRef, alloc: ValueRef) -> result {
+fn alloc_raw(bcx: block, fill: ValueRef, alloc: ValueRef) -> result {
     let ccx = bcx_ccx(bcx);
     let llvecty = ccx.opaque_vec_type;
     let vecsize = Add(bcx, alloc, llsize_of(ccx, llvecty));
@@ -37,13 +37,13 @@ fn alloc_raw(bcx: @block_ctxt, fill: ValueRef, alloc: ValueRef) -> result {
 }
 
 type alloc_result =
-    {bcx: @block_ctxt,
+    {bcx: block,
      val: ValueRef,
      unit_ty: ty::t,
      llunitsz: ValueRef,
      llunitty: TypeRef};
 
-fn alloc(bcx: @block_ctxt, vec_ty: ty::t, elts: uint) -> alloc_result {
+fn alloc(bcx: block, vec_ty: ty::t, elts: uint) -> alloc_result {
     let ccx = bcx_ccx(bcx);
     let unit_ty = ty::sequence_element_type(bcx_tcx(bcx), vec_ty);
     let llunitty = type_of_or_i8(ccx, unit_ty);
@@ -66,7 +66,7 @@ fn alloc(bcx: @block_ctxt, vec_ty: ty::t, elts: uint) -> alloc_result {
          llunitty: llunitty};
 }
 
-fn duplicate(bcx: @block_ctxt, vptr: ValueRef, vec_ty: ty::t) -> result {
+fn duplicate(bcx: block, vptr: ValueRef, vec_ty: ty::t) -> result {
     let ccx = bcx_ccx(bcx);
     let fill = get_fill(bcx, vptr);
     let size = Add(bcx, fill, llsize_of(ccx, ccx.opaque_vec_type));
@@ -80,23 +80,19 @@ fn duplicate(bcx: @block_ctxt, vptr: ValueRef, vec_ty: ty::t) -> result {
     }
     ret rslt(bcx, newptr);
 }
-fn make_free_glue(bcx: @block_ctxt, vptr: ValueRef, vec_ty: ty::t) ->
-   @block_ctxt {
-    let unit_ty = ty::sequence_element_type(bcx_tcx(bcx), vec_ty);
-    let drop_cx = new_sub_block_ctxt(bcx, "drop");
-    let next_cx = new_sub_block_ctxt(bcx, "next");
-    let null_test = IsNull(bcx, vptr);
-    CondBr(bcx, null_test, next_cx.llbb, drop_cx.llbb);
-    if ty::type_needs_drop(bcx_tcx(bcx), unit_ty) {
-        drop_cx = iter_vec(drop_cx, vptr, vec_ty, base::drop_ty);
+fn make_free_glue(bcx: block, vptr: ValueRef, vec_ty: ty::t) ->
+   block {
+    let tcx = bcx_tcx(bcx), unit_ty = ty::sequence_element_type(tcx, vec_ty);
+    base::with_cond(bcx, IsNotNull(bcx, vptr)) {|bcx|
+        let bcx = if ty::type_needs_drop(tcx, unit_ty) {
+            iter_vec(bcx, vptr, vec_ty, base::drop_ty)
+        } else { bcx };
+        base::trans_shared_free(bcx, vptr)
     }
-    drop_cx = base::trans_shared_free(drop_cx, vptr);
-    Br(drop_cx, next_cx.llbb);
-    ret next_cx;
 }
 
-fn trans_vec(bcx: @block_ctxt, args: [@ast::expr], id: ast::node_id,
-             dest: dest) -> @block_ctxt {
+fn trans_vec(bcx: block, args: [@ast::expr], id: ast::node_id,
+             dest: dest) -> block {
     let ccx = bcx_ccx(bcx), bcx = bcx;
     if dest == base::ignore {
         for arg in args {
@@ -129,7 +125,7 @@ fn trans_vec(bcx: @block_ctxt, args: [@ast::expr], id: ast::node_id,
     ret base::store_in_dest(bcx, vptr, dest);
 }
 
-fn trans_str(bcx: @block_ctxt, s: str, dest: dest) -> @block_ctxt {
+fn trans_str(bcx: block, s: str, dest: dest) -> block {
     let veclen = str::len_bytes(s) + 1u; // +1 for \0
     let {bcx: bcx, val: sptr, _} =
         alloc(bcx, ty::mk_str(bcx_tcx(bcx)), veclen);
@@ -142,8 +138,8 @@ fn trans_str(bcx: @block_ctxt, s: str, dest: dest) -> @block_ctxt {
     ret base::store_in_dest(bcx, sptr, dest);
 }
 
-fn trans_append(cx: @block_ctxt, vec_ty: ty::t, lhsptr: ValueRef,
-                rhs: ValueRef) -> @block_ctxt {
+fn trans_append(cx: block, vec_ty: ty::t, lhsptr: ValueRef,
+                rhs: ValueRef) -> block {
     // Cast to opaque interior vector types if necessary.
     let ccx = bcx_ccx(cx);
     let unit_ty = ty::sequence_element_type(bcx_tcx(cx), vec_ty);
@@ -206,8 +202,8 @@ fn trans_append(cx: @block_ctxt, vec_ty: ty::t, lhsptr: ValueRef,
     ret bcx;
 }
 
-fn trans_append_literal(bcx: @block_ctxt, vptrptr: ValueRef, vec_ty: ty::t,
-                        vals: [@ast::expr]) -> @block_ctxt {
+fn trans_append_literal(bcx: block, vptrptr: ValueRef, vec_ty: ty::t,
+                        vals: [@ast::expr]) -> block {
     let ccx = bcx_ccx(bcx);
     let elt_ty = ty::sequence_element_type(bcx_tcx(bcx), vec_ty);
     let ti = none;
@@ -227,8 +223,8 @@ fn trans_append_literal(bcx: @block_ctxt, vptrptr: ValueRef, vec_ty: ty::t,
     ret bcx;
 }
 
-fn trans_add(bcx: @block_ctxt, vec_ty: ty::t, lhs: ValueRef,
-             rhs: ValueRef, dest: dest) -> @block_ctxt {
+fn trans_add(bcx: block, vec_ty: ty::t, lhs: ValueRef,
+             rhs: ValueRef, dest: dest) -> block {
     let ccx = bcx_ccx(bcx);
     let strings = alt ty::get(vec_ty).struct {
       ty::ty_str { true }
@@ -247,8 +243,8 @@ fn trans_add(bcx: @block_ctxt, vec_ty: ty::t, lhs: ValueRef,
 
     let write_ptr_ptr = do_spill_noroot
         (bcx, get_dataptr(bcx, new_vec_ptr, llunitty));
-    let copy_fn = fn@(bcx: @block_ctxt, addr: ValueRef,
-                      _ty: ty::t) -> @block_ctxt {
+    let copy_fn = fn@(bcx: block, addr: ValueRef,
+                      _ty: ty::t) -> block {
         let ccx = bcx_ccx(bcx);
         let write_ptr = Load(bcx, write_ptr_ptr);
         let bcx = copy_val(bcx, INIT, write_ptr,
@@ -269,12 +265,12 @@ fn trans_add(bcx: @block_ctxt, vec_ty: ty::t, lhs: ValueRef,
     ret base::store_in_dest(bcx, new_vec_ptr, dest);
 }
 
-type val_and_ty_fn = fn@(@block_ctxt, ValueRef, ty::t) -> result;
+type val_and_ty_fn = fn@(block, ValueRef, ty::t) -> result;
 
-type iter_vec_block = fn(@block_ctxt, ValueRef, ty::t) -> @block_ctxt;
+type iter_vec_block = fn(block, ValueRef, ty::t) -> block;
 
-fn iter_vec_raw(bcx: @block_ctxt, vptr: ValueRef, vec_ty: ty::t,
-                fill: ValueRef, f: iter_vec_block) -> @block_ctxt {
+fn iter_vec_raw(bcx: block, vptr: ValueRef, vec_ty: ty::t,
+                fill: ValueRef, f: iter_vec_block) -> block {
     let ccx = bcx_ccx(bcx);
     let unit_ty = ty::sequence_element_type(bcx_tcx(bcx), vec_ty);
     let llunitty = type_of_or_i8(ccx, unit_ty);
@@ -288,13 +284,13 @@ fn iter_vec_raw(bcx: @block_ctxt, vptr: ValueRef, vec_ty: ty::t,
     let data_end_ptr = pointer_add(bcx, data_ptr, fill);
 
     // Now perform the iteration.
-    let header_cx = new_sub_block_ctxt(bcx, "iter_vec_loop_header");
+    let header_cx = sub_block(bcx, "iter_vec_loop_header");
     Br(bcx, header_cx.llbb);
     let data_ptr = Phi(header_cx, val_ty(data_ptr), [data_ptr], [bcx.llbb]);
     let not_yet_at_end =
         ICmp(header_cx, lib::llvm::IntULT, data_ptr, data_end_ptr);
-    let body_cx = new_sub_block_ctxt(header_cx, "iter_vec_loop_body");
-    let next_cx = new_sub_block_ctxt(header_cx, "iter_vec_next");
+    let body_cx = sub_block(header_cx, "iter_vec_loop_body");
+    let next_cx = sub_block(header_cx, "iter_vec_next");
     CondBr(header_cx, not_yet_at_end, body_cx.llbb, next_cx.llbb);
     body_cx = f(body_cx, data_ptr, unit_ty);
     let increment =
@@ -307,8 +303,8 @@ fn iter_vec_raw(bcx: @block_ctxt, vptr: ValueRef, vec_ty: ty::t,
     ret next_cx;
 }
 
-fn iter_vec(bcx: @block_ctxt, vptr: ValueRef, vec_ty: ty::t,
-            f: iter_vec_block) -> @block_ctxt {
+fn iter_vec(bcx: block, vptr: ValueRef, vec_ty: ty::t,
+            f: iter_vec_block) -> block {
     let ccx = bcx_ccx(bcx);
     let vptr = PointerCast(bcx, vptr, T_ptr(ccx.opaque_vec_type));
     ret iter_vec_raw(bcx, vptr, vec_ty, get_fill(bcx, vptr), f);
diff --git a/src/comp/middle/trans/uniq.rs b/src/comp/middle/trans/uniq.rs
index 46cddf4a7e3..b1b701fdc66 100644
--- a/src/comp/middle/trans/uniq.rs
+++ b/src/comp/middle/trans/uniq.rs
@@ -2,31 +2,22 @@ import syntax::ast;
 import lib::llvm::ValueRef;
 import common::*;
 import build::*;
-import base::{
-    trans_shared_malloc,
-    type_of,
-    INIT,
-    trans_shared_free,
-    drop_ty,
-    new_sub_block_ctxt,
-    load_if_immediate,
-    dest
-};
-import shape::{size_of};
+import base::*;
+import shape::size_of;
 
 export trans_uniq, make_free_glue, autoderef, duplicate, alloc_uniq;
 
-fn trans_uniq(bcx: @block_ctxt, contents: @ast::expr,
-              node_id: ast::node_id, dest: dest) -> @block_ctxt {
+fn trans_uniq(bcx: block, contents: @ast::expr,
+              node_id: ast::node_id, dest: dest) -> block {
     let uniq_ty = node_id_type(bcx, node_id);
     let {bcx, val: llptr} = alloc_uniq(bcx, uniq_ty);
     add_clean_free(bcx, llptr, true);
-    bcx = base::trans_expr_save_in(bcx, contents, llptr);
+    bcx = trans_expr_save_in(bcx, contents, llptr);
     revoke_clean(bcx, llptr);
-    ret base::store_in_dest(bcx, llptr, dest);
+    ret store_in_dest(bcx, llptr, dest);
 }
 
-fn alloc_uniq(cx: @block_ctxt, uniq_ty: ty::t) -> result {
+fn alloc_uniq(cx: block, uniq_ty: ty::t) -> result {
     let bcx = cx;
     let contents_ty = content_ty(uniq_ty);
     let r = size_of(bcx, contents_ty);
@@ -42,19 +33,12 @@ fn alloc_uniq(cx: @block_ctxt, uniq_ty: ty::t) -> result {
     ret rslt(bcx, llptr);
 }
 
-fn make_free_glue(cx: @block_ctxt, vptr: ValueRef, t: ty::t)
-    -> @block_ctxt {
-    let bcx = cx;
-    let free_cx = new_sub_block_ctxt(bcx, "uniq_free");
-    let next_cx = new_sub_block_ctxt(bcx, "uniq_free_next");
-    let null_test = IsNull(bcx, vptr);
-    CondBr(bcx, null_test, next_cx.llbb, free_cx.llbb);
-
-    let bcx = free_cx;
-    let bcx = drop_ty(bcx, vptr, content_ty(t));
-    let bcx = trans_shared_free(bcx, vptr);
-    Br(bcx, next_cx.llbb);
-    next_cx
+fn make_free_glue(bcx: block, vptr: ValueRef, t: ty::t)
+    -> block {
+    with_cond(bcx, IsNotNull(bcx, vptr)) {|bcx|
+        let bcx = drop_ty(bcx, vptr, content_ty(t));
+        trans_shared_free(bcx, vptr)
+    }
 }
 
 fn content_ty(t: ty::t) -> ty::t {
@@ -69,12 +53,12 @@ fn autoderef(v: ValueRef, t: ty::t) -> {v: ValueRef, t: ty::t} {
     ret {v: v, t: content_ty};
 }
 
-fn duplicate(bcx: @block_ctxt, v: ValueRef, t: ty::t) -> result {
+fn duplicate(bcx: block, v: ValueRef, t: ty::t) -> result {
     let content_ty = content_ty(t);
     let {bcx, val: llptr} = alloc_uniq(bcx, t);
 
     let src = load_if_immediate(bcx, v, content_ty);
     let dst = llptr;
-    let bcx = base::copy_val(bcx, INIT, dst, src, content_ty);
+    let bcx = copy_val(bcx, INIT, dst, src, content_ty);
     ret rslt(bcx, dst);
 }
\ No newline at end of file