about summary refs log tree commit diff
path: root/src
diff options
context:
space:
mode:
authorMarijn Haverbeke <marijnh@gmail.com>2011-08-30 09:59:30 +0200
committerMarijn Haverbeke <marijnh@gmail.com>2011-08-30 13:04:57 +0200
commitc1ba28c7779a68b74b26ef70acd81d324d97e7dc (patch)
treea5f8bf7b1f265ff300a10c8a5ca624f6fecb1f9e /src
parent41fb042def23e93742357238e0ea4fd2c6ec0f19 (diff)
downloadrust-c1ba28c7779a68b74b26ef70acd81d324d97e7dc.tar.gz
rust-c1ba28c7779a68b74b26ef70acd81d324d97e7dc.zip
Glob-import trans_build in other trans files
The capitalization already prevents name clashes. Being able to refer to the
bitcode-construction primitives directly makes the code cleaner.
Diffstat (limited to 'src')
-rw-r--r--src/comp/middle/trans.rs750
-rw-r--r--src/comp/middle/trans_alt.rs43
-rw-r--r--src/comp/middle/trans_objects.rs106
3 files changed, 449 insertions, 450 deletions
diff --git a/src/comp/middle/trans.rs b/src/comp/middle/trans.rs
index 60704036c22..d3f169716d3 100644
--- a/src/comp/middle/trans.rs
+++ b/src/comp/middle/trans.rs
@@ -70,7 +70,7 @@ import syntax::print::pprust::expr_to_str;
 import syntax::print::pprust::path_to_str;
 
 import trans_common::*;
-import bld = trans_build;
+import trans_build::*;
 
 import trans_objects::trans_anon_obj;
 import trans_objects::trans_obj;
@@ -399,38 +399,38 @@ fn trans_native_call(cx: &@block_ctxt, externs: &hashmap<istr, ValueRef>,
     let llnative: ValueRef = get_simple_extern_fn(externs, llmod, name, n);
     let call_args: [ValueRef] = [];
     for a: ValueRef in args {
-        call_args += [bld::ZExtOrBitCast(cx, a, T_int())];
+        call_args += [ZExtOrBitCast(cx, a, T_int())];
     }
-    ret bld::Call(cx, llnative, call_args);
+    ret Call(cx, llnative, call_args);
 }
 
 fn trans_non_gc_free(cx: &@block_ctxt, v: ValueRef) -> result {
-    bld::Call(cx, bcx_ccx(cx).upcalls.free,
-                  [cx.fcx.lltaskptr, bld::PointerCast(cx, v, T_ptr(T_i8())),
+    Call(cx, bcx_ccx(cx).upcalls.free,
+                  [cx.fcx.lltaskptr, PointerCast(cx, v, T_ptr(T_i8())),
                    C_int(0)]);
     ret rslt(cx, C_int(0));
 }
 
 fn trans_shared_free(cx: &@block_ctxt, v: ValueRef) -> result {
-    bld::Call(cx, bcx_ccx(cx).upcalls.shared_free,
-                  [cx.fcx.lltaskptr, bld::PointerCast(cx, v, T_ptr(T_i8()))]);
+    Call(cx, bcx_ccx(cx).upcalls.shared_free,
+                  [cx.fcx.lltaskptr, PointerCast(cx, v, T_ptr(T_i8()))]);
     ret rslt(cx, C_int(0));
 }
 
 fn umax(cx: &@block_ctxt, a: ValueRef, b: ValueRef) -> ValueRef {
-    let cond = bld::ICmp(cx, lib::llvm::LLVMIntULT, a, b);
-    ret bld::Select(cx, cond, b, a);
+    let cond = ICmp(cx, lib::llvm::LLVMIntULT, a, b);
+    ret Select(cx, cond, b, a);
 }
 
 fn umin(cx: &@block_ctxt, a: ValueRef, b: ValueRef) -> ValueRef {
-    let cond = bld::ICmp(cx, lib::llvm::LLVMIntULT, a, b);
-    ret bld::Select(cx, cond, a, b);
+    let cond = ICmp(cx, lib::llvm::LLVMIntULT, a, b);
+    ret Select(cx, cond, a, b);
 }
 
 fn align_to(cx: &@block_ctxt, off: ValueRef, align: ValueRef) -> ValueRef {
-    let mask = bld::Sub(cx, align, C_int(1));
-    let bumped = bld::Add(cx, off, mask);
-    ret bld::And(cx, bumped, bld::Not(cx, mask));
+    let mask = Sub(cx, align, C_int(1));
+    let bumped = Add(cx, off, mask);
+    ret And(cx, bumped, Not(cx, mask));
 }
 
 
@@ -469,7 +469,7 @@ fn align_of(cx: &@block_ctxt, t: ty::t) -> result {
 }
 
 fn alloca(cx: &@block_ctxt, t: TypeRef) -> ValueRef {
-    ret bld::Alloca(new_raw_block_ctxt(cx.fcx, cx.fcx.llstaticallocas), t);
+    ret Alloca(new_raw_block_ctxt(cx.fcx, cx.fcx.llstaticallocas), t);
 }
 
 fn array_alloca(cx: &@block_ctxt, t: TypeRef, n: ValueRef) -> ValueRef {
@@ -485,15 +485,15 @@ fn array_alloca(cx: &@block_ctxt, t: TypeRef, n: ValueRef) -> ValueRef {
     }
 
     let dynastack_alloc = bcx_ccx(bcx).upcalls.dynastack_alloc;
-    let llsz = bld::Mul(dy_cx, C_uint(llsize_of_real(bcx_ccx(bcx), t)), n);
-    let llresult = bld::Call(dy_cx, dynastack_alloc, [lltaskptr, llsz]);
-    ret bld::PointerCast(dy_cx, llresult, T_ptr(t));
+    let llsz = Mul(dy_cx, C_uint(llsize_of_real(bcx_ccx(bcx), t)), n);
+    let llresult = Call(dy_cx, dynastack_alloc, [lltaskptr, llsz]);
+    ret PointerCast(dy_cx, llresult, T_ptr(t));
 }
 
 fn mk_obstack_token(ccx: &@crate_ctxt, fcx: @fn_ctxt,
                     lltaskptr: ValueRef) -> ValueRef {
     let cx = new_raw_block_ctxt(fcx, fcx.lldynamicallocas);
-    ret bld::Call(cx, ccx.upcalls.dynastack_mark, [lltaskptr]);
+    ret Call(cx, ccx.upcalls.dynastack_mark, [lltaskptr]);
 }
 
 
@@ -581,7 +581,7 @@ fn dynamic_size_of(cx: &@block_ctxt, t: ty::t) -> result {
             let elt_size = size_of(bcx, e);
             bcx = elt_size.bcx;
             let aligned_off = align_to(bcx, off, elt_align.val);
-            off = bld::Add(bcx, aligned_off, elt_size.val);
+            off = Add(bcx, aligned_off, elt_size.val);
             max_align = umax(bcx, max_align, elt_align.val);
         }
         off = align_to(bcx, off, max_align);
@@ -590,7 +590,7 @@ fn dynamic_size_of(cx: &@block_ctxt, t: ty::t) -> result {
     alt ty::struct(bcx_tcx(cx), t) {
       ty::ty_param(p, _) {
         let szptr = field_of_tydesc(cx, t, false, abi::tydesc_field_size);
-        ret rslt(szptr.bcx, bld::Load(szptr.bcx, szptr.val));
+        ret rslt(szptr.bcx, Load(szptr.bcx, szptr.val));
       }
       ty::ty_rec(flds) {
         let tys: [ty::t] = [];
@@ -607,7 +607,7 @@ fn dynamic_size_of(cx: &@block_ctxt, t: ty::t) -> result {
         // Compute max(variant sizes).
 
         let max_size: ValueRef = alloca(bcx, T_int());
-        bld::Store(bcx, C_int(0), max_size);
+        Store(bcx, C_int(0), max_size);
         let variants = ty::tag_variants(bcx_tcx(bcx), tid);
         for variant: ty::variant_info in variants {
             // Perform type substitution on the raw argument types.
@@ -621,13 +621,13 @@ fn dynamic_size_of(cx: &@block_ctxt, t: ty::t) -> result {
             let rslt = align_elements(bcx, tys);
             bcx = rslt.bcx;
             let this_size = rslt.val;
-            let old_max_size = bld::Load(bcx, max_size);
-            bld::Store(bcx, umax(bcx, this_size, old_max_size), max_size);
+            let old_max_size = Load(bcx, max_size);
+            Store(bcx, umax(bcx, this_size, old_max_size), max_size);
         }
-        let max_size_val = bld::Load(bcx, max_size);
+        let max_size_val = Load(bcx, max_size);
         let total_size =
             if std::vec::len(variants) != 1u {
-                bld::Add(bcx, max_size_val, llsize_of(T_int()))
+                Add(bcx, max_size_val, llsize_of(T_int()))
             } else { max_size_val };
         ret rslt(bcx, total_size);
       }
@@ -638,7 +638,7 @@ fn dynamic_align_of(cx: &@block_ctxt, t: ty::t) -> result {
     alt ty::struct(bcx_tcx(cx), t) {
       ty::ty_param(p, _) {
         let aptr = field_of_tydesc(cx, t, false, abi::tydesc_field_align);
-        ret rslt(aptr.bcx, bld::Load(aptr.bcx, aptr.val));
+        ret rslt(aptr.bcx, Load(aptr.bcx, aptr.val));
       }
       ty::ty_rec(flds) {
         let a = C_int(1);
@@ -671,18 +671,18 @@ fn dynamic_align_of(cx: &@block_ctxt, t: ty::t) -> result {
 fn GEPi(cx: &@block_ctxt, base: ValueRef, ixs: &[int]) -> ValueRef {
     let v: [ValueRef] = [];
     for i: int in ixs { v += [C_int(i)]; }
-    ret bld::InBoundsGEP(cx, base, v);
+    ret InBoundsGEP(cx, base, v);
 }
 
 // Increment a pointer by a given amount and then cast it to be a pointer
 // to a given type.
 fn bump_ptr(bcx: &@block_ctxt, t: ty::t, base: ValueRef, sz: ValueRef) ->
    ValueRef {
-    let raw = bld::PointerCast(bcx, base, T_ptr(T_i8()));
-    let bumped = bld::GEP(bcx, raw, [sz]);
+    let raw = PointerCast(bcx, base, T_ptr(T_i8()));
+    let bumped = GEP(bcx, raw, [sz]);
     if ty::type_has_dynamic_size(bcx_tcx(bcx), t) { ret bumped; }
     let typ = T_ptr(type_of(bcx_ccx(bcx), bcx.sp, t));
-    ret bld::PointerCast(bcx, bumped, typ);
+    ret PointerCast(bcx, bumped, typ);
 }
 
 // Replacement for the LLVM 'GEP' instruction when field-indexing into a
@@ -796,7 +796,7 @@ fn GEP_tag(cx: @block_ctxt, llblobptr: ValueRef, tag_id: &ast::def_id,
     let llunionptr: ValueRef;
     if !ty::type_has_dynamic_size(bcx_tcx(cx), tup_ty) {
         let llty = type_of(bcx_ccx(cx), cx.sp, tup_ty);
-        llunionptr = bld::TruncOrBitCast(cx, llblobptr, T_ptr(llty));
+        llunionptr = TruncOrBitCast(cx, llblobptr, T_ptr(llty));
     } else { llunionptr = llblobptr; }
     // Do the GEP_tup_like().
 
@@ -806,7 +806,7 @@ fn GEP_tag(cx: @block_ctxt, llblobptr: ValueRef, tag_id: &ast::def_id,
     let val;
     if !ty::type_has_dynamic_size(bcx_tcx(cx), elem_ty) {
         let llelemty = type_of(bcx_ccx(rs.bcx), cx.sp, elem_ty);
-        val = bld::PointerCast(rs.bcx, rs.val, T_ptr(llelemty));
+        val = PointerCast(rs.bcx, rs.val, T_ptr(llelemty));
     } else { val = rs.val; }
     ret rslt(rs.bcx, val);
 }
@@ -819,9 +819,9 @@ fn trans_raw_malloc(cx: &@block_ctxt, llptr_ty: TypeRef, llsize: ValueRef) ->
 
     let tydesc = C_null(T_ptr(bcx_ccx(cx).tydesc_type));
     let rval =
-        bld::Call(cx, bcx_ccx(cx).upcalls.malloc,
+        Call(cx, bcx_ccx(cx).upcalls.malloc,
                       [cx.fcx.lltaskptr, llsize, tydesc]);
-    ret rslt(cx, bld::PointerCast(cx, rval, llptr_ty));
+    ret rslt(cx, PointerCast(cx, rval, llptr_ty));
 }
 
 // trans_shared_malloc: expects a type indicating which pointer type we want
@@ -832,9 +832,9 @@ fn trans_shared_malloc(cx: &@block_ctxt, llptr_ty: TypeRef, llsize: ValueRef)
 
     let tydesc = C_null(T_ptr(bcx_ccx(cx).tydesc_type));
     let rval =
-        bld::Call(cx, bcx_ccx(cx).upcalls.shared_malloc,
+        Call(cx, bcx_ccx(cx).upcalls.shared_malloc,
                       [cx.fcx.lltaskptr, llsize, tydesc]);
-    ret rslt(cx, bld::PointerCast(cx, rval, llptr_ty));
+    ret rslt(cx, PointerCast(cx, rval, llptr_ty));
 }
 
 // trans_malloc_boxed_raw: expects an unboxed type and returns a pointer to
@@ -868,7 +868,7 @@ fn trans_malloc_boxed(cx: &@block_ctxt, t: ty::t) ->
     let res = trans_malloc_boxed_raw(cx, t);
     let box = res.val;
     let rc = GEPi(res.bcx, box, [0, abi::box_rc_field_refcnt]);
-    bld::Store(res.bcx, C_int(1), rc);
+    Store(res.bcx, C_int(1), rc);
     let body = GEPi(res.bcx, box, [0, abi::box_rc_field_body]);
     ret {bcx: res.bcx, box: res.val, body: body};
 }
@@ -883,7 +883,7 @@ fn field_of_tydesc(cx: &@block_ctxt, t: ty::t, escapes: bool, field: int) ->
     let ti = none::<@tydesc_info>;
     let tydesc = get_tydesc(cx, t, escapes, tps_normal, ti).result;
     ret rslt(tydesc.bcx,
-             bld::GEP(tydesc.bcx, tydesc.val, [C_int(0), C_int(field)]));
+             GEP(tydesc.bcx, tydesc.val, [C_int(0), C_int(field)]));
 }
 
 
@@ -922,8 +922,8 @@ fn trans_stack_local_derived_tydesc(cx: &@block_ctxt, llsz: ValueRef,
     let llmyroottydesc = alloca(cx, bcx_ccx(cx).tydesc_type);
 
     // By convention, desc 0 is the root descriptor.
-    llroottydesc = bld::Load(cx, llroottydesc);
-    bld::Store(cx, llroottydesc, llmyroottydesc);
+    llroottydesc = Load(cx, llroottydesc);
+    Store(cx, llroottydesc, llmyroottydesc);
 
     // Store a pointer to the rest of the descriptors.
     store_inbounds(cx, llfirstparam, llmyroottydesc,
@@ -989,17 +989,17 @@ fn get_derived_tydesc(cx: &@block_ctxt, t: ty::t, escapes: bool,
     // If the type descriptor escapes, we need to add in the root as
     // the first parameter, because upcall_get_type_desc() expects it.
     if escapes {
-        bld::Store(bcx, root, GEPi(bcx, llparamtydescs, [0, 0]));
+        Store(bcx, root, GEPi(bcx, llparamtydescs, [0, 0]));
         i += 1;
     }
 
     for td: ValueRef in tys.descs {
-        bld::Store(bcx, td, GEPi(bcx, llparamtydescs, [0, i]));
+        Store(bcx, td, GEPi(bcx, llparamtydescs, [0, i]));
         i += 1;
     }
 
     let llfirstparam =
-        bld::PointerCast(bcx, llparamtydescs,
+        PointerCast(bcx, llparamtydescs,
                          T_ptr(T_ptr(bcx_ccx(bcx).tydesc_type)));
 
     // The top bit indicates whether this type descriptor describes an object
@@ -1014,7 +1014,7 @@ fn get_derived_tydesc(cx: &@block_ctxt, t: ty::t, escapes: bool,
     let v;
     if escapes {
         let td_val =
-            bld::Call(bcx, bcx_ccx(bcx).upcalls.get_type_desc,
+            Call(bcx, bcx_ccx(bcx).upcalls.get_type_desc,
                            [bcx.fcx.lltaskptr, C_null(T_ptr(T_nil())), sz.val,
                             align.val, C_uint(1u + n_params),
                             llfirstparam, C_uint(obj_params)]);
@@ -1192,8 +1192,8 @@ fn make_generic_glue_inner(cx: &@local_ctxt, sp: &span, t: ty::t,
     let lltydescs = [mutable];
     let p = 0u;
     while p < ty_param_count {
-        let llparam = bld::GEP(copy_args_bcx, lltyparams, [C_int(p as int)]);
-        llparam = bld::Load(copy_args_bcx, llparam);
+        let llparam = GEP(copy_args_bcx, lltyparams, [C_int(p as int)]);
+        llparam = Load(copy_args_bcx, llparam);
         std::vec::grow_set(lltydescs, ty_params[p], 0 as ValueRef, llparam);
         p += 1u;
     }
@@ -1206,14 +1206,14 @@ fn make_generic_glue_inner(cx: &@local_ctxt, sp: &span, t: ty::t,
     let bcx = new_top_block_ctxt(fcx);
     let lltop = bcx.llbb;
     let llrawptr0 = llvm::LLVMGetParam(llfn, 4u);
-    let llval0 = bld::BitCast(bcx, llrawptr0, llty);
+    let llval0 = BitCast(bcx, llrawptr0, llty);
     alt helper {
       default_helper(helper) {
         helper(bcx, llval0, t);
       }
       copy_helper(helper) {
         let llrawptr1 = llvm::LLVMGetParam(llfn, 5u);
-        let llval1 = bld::BitCast(bcx, llrawptr1, llty);
+        let llval1 = BitCast(bcx, llrawptr1, llty);
         helper(bcx, llval0, llval1, t);
       }
     }
@@ -1312,7 +1312,7 @@ fn make_take_glue(cx: &@block_ctxt, v: ValueRef, t: ty::t) {
     let bcx = cx;
     // NB: v is an *alias* of type t here, not a direct value.
     if ty::type_is_boxed(bcx_tcx(bcx), t) {
-        bcx = incr_refcnt_of_boxed(bcx, bld::Load(bcx, v)).bcx;
+        bcx = incr_refcnt_of_boxed(bcx, Load(bcx, v)).bcx;
     } else if ty::type_is_structural(bcx_tcx(bcx), t) {
         bcx = iter_structural_ty(bcx, v, t, take_ty).bcx;
     } else if ty::type_is_ivec(bcx_tcx(bcx), t) {
@@ -1325,17 +1325,17 @@ fn make_take_glue(cx: &@block_ctxt, v: ValueRef, t: ty::t) {
 
 fn incr_refcnt_of_boxed(cx: &@block_ctxt, box_ptr: ValueRef) -> result {
     let rc_ptr =
-        bld::GEP(cx, box_ptr, [C_int(0), C_int(abi::box_rc_field_refcnt)]);
-    let rc = bld::Load(cx, rc_ptr);
+        GEP(cx, box_ptr, [C_int(0), C_int(abi::box_rc_field_refcnt)]);
+    let rc = Load(cx, rc_ptr);
     let rc_adj_cx = new_sub_block_ctxt(cx, ~"rc++");
     let next_cx = new_sub_block_ctxt(cx, ~"next");
     let const_test =
-        bld::ICmp(cx, lib::llvm::LLVMIntEQ, C_int(abi::const_refcount as int),
+        ICmp(cx, lib::llvm::LLVMIntEQ, C_int(abi::const_refcount as int),
                       rc);
-    bld::CondBr(cx, const_test, next_cx.llbb, rc_adj_cx.llbb);
-    rc = bld::Add(rc_adj_cx, rc, C_int(1));
-    bld::Store(rc_adj_cx, rc, rc_ptr);
-    bld::Br(rc_adj_cx, next_cx.llbb);
+    CondBr(cx, const_test, next_cx.llbb, rc_adj_cx.llbb);
+    rc = Add(rc_adj_cx, rc, C_int(1));
+    Store(rc_adj_cx, rc, rc_ptr);
+    Br(rc_adj_cx, next_cx.llbb);
     ret rslt(next_cx, C_nil());
 }
 
@@ -1344,15 +1344,15 @@ fn make_free_glue(cx: &@block_ctxt, v0: ValueRef, t: ty::t) {
     let rs =
         alt ty::struct(bcx_tcx(cx), t) {
           ty::ty_str. {
-            let v = bld::Load(cx, v0);
+            let v = Load(cx, v0);
             if !bcx_ccx(cx).sess.get_opts().do_gc {
                 trans_non_gc_free(cx, v)
             } else { rslt(cx, C_nil()) }
           }
           ty::ty_box(body_mt) {
-            let v = bld::Load(cx, v0);
+            let v = Load(cx, v0);
             let body =
-                bld::GEP(cx, v, [C_int(0), C_int(abi::box_rc_field_body)]);
+                GEP(cx, v, [C_int(0), C_int(abi::box_rc_field_body)]);
             let body_ty = body_mt.ty;
             let rs = drop_ty(cx, body, body_ty);
             if !bcx_ccx(cx).sess.get_opts().do_gc {
@@ -1364,17 +1364,17 @@ fn make_free_glue(cx: &@block_ctxt, v0: ValueRef, t: ty::t) {
             // Call through the obj's own fields-drop glue first.
             // Then free the body.
             let box_cell =
-                bld::GEP(cx, v0, [C_int(0), C_int(abi::obj_field_box)]);
-            let b = bld::Load(cx, box_cell);
+                GEP(cx, v0, [C_int(0), C_int(abi::obj_field_box)]);
+            let b = Load(cx, box_cell);
             let ccx = bcx_ccx(cx);
             let llbox_ty = T_opaque_obj_ptr(*ccx);
-            b = bld::PointerCast(cx, b, llbox_ty);
+            b = PointerCast(cx, b, llbox_ty);
             let body =
-                bld::GEP(cx, b, [C_int(0), C_int(abi::box_rc_field_body)]);
+                GEP(cx, b, [C_int(0), C_int(abi::box_rc_field_body)]);
             let tydescptr =
-                bld::GEP(cx, body,
+                GEP(cx, body,
                              [C_int(0), C_int(abi::obj_body_elt_tydesc)]);
-            let tydesc = bld::Load(cx, tydescptr);
+            let tydesc = Load(cx, tydescptr);
             let ti = none::<@tydesc_info>;
             call_tydesc_glue_full(cx, body, tydesc,
                                   abi::tydesc_field_drop_glue, ti);
@@ -1386,18 +1386,18 @@ fn make_free_glue(cx: &@block_ctxt, v0: ValueRef, t: ty::t) {
             // Call through the closure's own fields-drop glue first.
             // Then free the body.
             let box_cell =
-                bld::GEP(cx, v0, [C_int(0), C_int(abi::fn_field_box)]);
-            let v = bld::Load(cx, box_cell);
+                GEP(cx, v0, [C_int(0), C_int(abi::fn_field_box)]);
+            let v = Load(cx, box_cell);
             let body =
-                bld::GEP(cx, v, [C_int(0), C_int(abi::box_rc_field_body)]);
+                GEP(cx, v, [C_int(0), C_int(abi::box_rc_field_body)]);
             let bindings =
-                bld::GEP(cx, body,
+                GEP(cx, body,
                              [C_int(0), C_int(abi::closure_elt_bindings)]);
             let tydescptr =
-                bld::GEP(cx, body,
+                GEP(cx, body,
                              [C_int(0), C_int(abi::closure_elt_tydesc)]);
             let ti = none::<@tydesc_info>;
-            call_tydesc_glue_full(cx, bindings, bld::Load(cx, tydescptr),
+            call_tydesc_glue_full(cx, bindings, Load(cx, tydescptr),
                                   abi::tydesc_field_drop_glue, ti);
             if !bcx_ccx(cx).sess.get_opts().do_gc {
                 trans_non_gc_free(cx, v)
@@ -1422,10 +1422,10 @@ fn make_drop_glue(cx: &@block_ctxt, v0: ValueRef, t: ty::t) {
             rslt(ivec::make_drop_glue(cx, v0, t), C_nil())
           }
           ty::ty_box(_) { decr_refcnt_maybe_free(cx, v0, v0, t) }
-          ty::ty_uniq(_) { trans_shared_free(cx, bld::Load(cx, v0)) }
+          ty::ty_uniq(_) { trans_shared_free(cx, Load(cx, v0)) }
           ty::ty_obj(_) {
             let box_cell =
-                bld::GEP(cx, v0, [C_int(0), C_int(abi::obj_field_box)]);
+                GEP(cx, v0, [C_int(0), C_int(abi::obj_field_box)]);
             decr_refcnt_maybe_free(cx, box_cell, v0, t)
           }
           ty::ty_res(did, inner, tps) {
@@ -1433,7 +1433,7 @@ fn make_drop_glue(cx: &@block_ctxt, v0: ValueRef, t: ty::t) {
           }
           ty::ty_fn(_, _, _, _, _) {
             let box_cell =
-                bld::GEP(cx, v0, [C_int(0), C_int(abi::fn_field_box)]);
+                GEP(cx, v0, [C_int(0), C_int(abi::fn_field_box)]);
             decr_refcnt_maybe_free(cx, box_cell, v0, t)
           }
           _ {
@@ -1457,8 +1457,8 @@ fn trans_res_drop(cx: @block_ctxt, rs: ValueRef, did: &ast::def_id,
 
     let drop_flag = GEP_tup_like(cx, tup_ty, rs, [0, 0]);
     cx = drop_flag.bcx;
-    let null_test = bld::IsNull(cx, bld::Load(cx, drop_flag.val));
-    bld::CondBr(cx, null_test, next_cx.llbb, drop_cx.llbb);
+    let null_test = IsNull(cx, Load(cx, drop_flag.val));
+    CondBr(cx, null_test, next_cx.llbb, drop_cx.llbb);
     cx = drop_cx;
 
     let val = GEP_tup_like(cx, tup_ty, rs, [0, 1]);
@@ -1466,10 +1466,10 @@ fn trans_res_drop(cx: @block_ctxt, rs: ValueRef, did: &ast::def_id,
     // Find and call the actual destructor.
     let dtor_pair = trans_common::get_res_dtor(ccx, cx.sp, did, inner_t);
     let dtor_addr =
-        bld::Load(cx, bld::GEP(cx, dtor_pair,
+        Load(cx, GEP(cx, dtor_pair,
                                    [C_int(0), C_int(abi::fn_field_code)]));
     let dtor_env =
-        bld::Load(cx, bld::GEP(cx, dtor_pair,
+        Load(cx, GEP(cx, dtor_pair,
                                    [C_int(0), C_int(abi::fn_field_box)]));
     let args = [cx.fcx.llretptr, cx.fcx.lltaskptr, dtor_env];
     for tp: ty::t in tps {
@@ -1485,12 +1485,12 @@ fn trans_res_drop(cx: @block_ctxt, rs: ValueRef, did: &ast::def_id,
         lib::llvm::fn_ty_param_tys(
             llvm::LLVMGetElementType(
                 llvm::LLVMTypeOf(dtor_addr)))[std::vec::len(args)];
-    let val_cast = bld::BitCast(cx, val.val, val_llty);
-    bld::FastCall(cx, dtor_addr, args + [val_cast]);
+    let val_cast = BitCast(cx, val.val, val_llty);
+    FastCall(cx, dtor_addr, args + [val_cast]);
 
     cx = drop_ty(cx, val.val, inner_t_s).bcx;
-    bld::Store(cx, C_int(0), drop_flag.val);
-    bld::Br(cx, next_cx.llbb);
+    Store(cx, C_int(0), drop_flag.val);
+    Br(cx, next_cx.llbb);
     ret rslt(next_cx, C_nil());
 }
 
@@ -1501,30 +1501,30 @@ fn decr_refcnt_maybe_free(cx: &@block_ctxt, box_ptr_alias: ValueRef,
     let rc_adj_cx = new_sub_block_ctxt(cx, ~"rc--");
     let free_cx = new_sub_block_ctxt(cx, ~"free");
     let next_cx = new_sub_block_ctxt(cx, ~"next");
-    let box_ptr = bld::Load(cx, box_ptr_alias);
+    let box_ptr = Load(cx, box_ptr_alias);
     let llbox_ty = T_opaque_obj_ptr(*ccx);
-    box_ptr = bld::PointerCast(cx, box_ptr, llbox_ty);
-    let null_test = bld::IsNull(cx, box_ptr);
-    bld::CondBr(cx, null_test, next_cx.llbb, load_rc_cx.llbb);
+    box_ptr = PointerCast(cx, box_ptr, llbox_ty);
+    let null_test = IsNull(cx, box_ptr);
+    CondBr(cx, null_test, next_cx.llbb, load_rc_cx.llbb);
     let rc_ptr =
-        bld::GEP(load_rc_cx, box_ptr,
+        GEP(load_rc_cx, box_ptr,
                              [C_int(0), C_int(abi::box_rc_field_refcnt)]);
-    let rc = bld::Load(load_rc_cx, rc_ptr);
+    let rc = Load(load_rc_cx, rc_ptr);
     let const_test =
-        bld::ICmp(load_rc_cx, lib::llvm::LLVMIntEQ,
+        ICmp(load_rc_cx, lib::llvm::LLVMIntEQ,
                               C_int(abi::const_refcount as int), rc);
-    bld::CondBr(load_rc_cx, const_test, next_cx.llbb, rc_adj_cx.llbb);
-    rc = bld::Sub(rc_adj_cx, rc, C_int(1));
-    bld::Store(rc_adj_cx, rc, rc_ptr);
-    let zero_test = bld::ICmp(rc_adj_cx, lib::llvm::LLVMIntEQ, C_int(0), rc);
-    bld::CondBr(rc_adj_cx, zero_test, free_cx.llbb, next_cx.llbb);
+    CondBr(load_rc_cx, const_test, next_cx.llbb, rc_adj_cx.llbb);
+    rc = Sub(rc_adj_cx, rc, C_int(1));
+    Store(rc_adj_cx, rc, rc_ptr);
+    let zero_test = ICmp(rc_adj_cx, lib::llvm::LLVMIntEQ, C_int(0), rc);
+    CondBr(rc_adj_cx, zero_test, free_cx.llbb, next_cx.llbb);
     let free_res =
         free_ty(free_cx, full_alias, t);
-    bld::Br(free_res.bcx, next_cx.llbb);
+    Br(free_res.bcx, next_cx.llbb);
     let t_else = T_nil();
     let v_else = C_nil();
     let phi =
-        bld::Phi(next_cx, t_else, [v_else, v_else, v_else, free_res.val],
+        Phi(next_cx, t_else, [v_else, v_else, v_else, free_res.val],
                           [cx.llbb, load_rc_cx.llbb, rc_adj_cx.llbb,
                            free_res.bcx.llbb]);
     ret rslt(next_cx, phi);
@@ -1622,7 +1622,7 @@ fn compare_scalar_values(cx: &@block_ctxt, lhs: ValueRef, rhs: ValueRef,
     }
     // FIXME: This wouldn't be necessary if we could bind methods off of
     // objects and therefore abstract over FCmp and ICmp (issue #435).  Then
-    // we could just write, e.g., "cmp_fn = bind bld::FCmp(cx, _, _, _);" in
+    // we could just write, e.g., "cmp_fn = bind FCmp(cx, _, _, _);" in
     // the above, and "auto eq_result = cmp_fn(eq_cmp, lhs, rhs);" in the
     // below.
 
@@ -1632,28 +1632,28 @@ fn compare_scalar_values(cx: &@block_ctxt, lhs: ValueRef, rhs: ValueRef,
         if nt == nil_type {
             r = C_bool(op != 0u);
         } else if nt == floating_point {
-            r = bld::FCmp(cx, op, lhs, rhs);
-        } else { r = bld::ICmp(cx, op, lhs, rhs); }
+            r = FCmp(cx, op, lhs, rhs);
+        } else { r = ICmp(cx, op, lhs, rhs); }
         ret r;
     }
     let last_cx = new_sub_block_ctxt(cx, ~"last");
     let eq_cx = new_sub_block_ctxt(cx, ~"eq");
     let eq_result = generic_cmp(eq_cx, nt, eq_cmp, lhs, rhs);
-    bld::Br(eq_cx, last_cx.llbb);
+    Br(eq_cx, last_cx.llbb);
     let lt_cx = new_sub_block_ctxt(cx, ~"lt");
     let lt_result = generic_cmp(lt_cx, nt, lt_cmp, lhs, rhs);
-    bld::Br(lt_cx, last_cx.llbb);
+    Br(lt_cx, last_cx.llbb);
     let le_cx = new_sub_block_ctxt(cx, ~"le");
     let le_result = generic_cmp(le_cx, nt, le_cmp, lhs, rhs);
-    bld::Br(le_cx, last_cx.llbb);
+    Br(le_cx, last_cx.llbb);
     let unreach_cx = new_sub_block_ctxt(cx, ~"unreach");
-    bld::Unreachable(unreach_cx);
-    let llswitch = bld::Switch(cx, llop, unreach_cx.llbb, 3u);
+    Unreachable(unreach_cx);
+    let llswitch = Switch(cx, llop, unreach_cx.llbb, 3u);
     llvm::LLVMAddCase(llswitch, C_u8(abi::cmp_glue_op_eq), eq_cx.llbb);
     llvm::LLVMAddCase(llswitch, C_u8(abi::cmp_glue_op_lt), lt_cx.llbb);
     llvm::LLVMAddCase(llswitch, C_u8(abi::cmp_glue_op_le), le_cx.llbb);
     let last_result =
-        bld::Phi(last_cx, T_i1(), [eq_result, lt_result, le_result],
+        Phi(last_cx, T_i1(), [eq_result, lt_result, le_result],
                           [eq_cx.llbb, lt_cx.llbb, le_cx.llbb]);
     ret rslt(last_cx, last_result);
 }
@@ -1676,33 +1676,33 @@ fn iter_structural_ty(cx: &@block_ctxt, v: ValueRef, t: ty::t,
 
 fn load_inbounds(cx: &@block_ctxt, p: ValueRef, idxs: &[ValueRef]) ->
    ValueRef {
-    ret bld::Load(cx, bld::InBoundsGEP(cx, p, idxs));
+    ret Load(cx, InBoundsGEP(cx, p, idxs));
 }
 
 fn store_inbounds(cx: &@block_ctxt, v: ValueRef, p: ValueRef,
                   idxs: &[ValueRef]) {
-    bld::Store(cx, v, bld::InBoundsGEP(cx, p, idxs));
+    Store(cx, v, InBoundsGEP(cx, p, idxs));
 }
 
 // This uses store and inboundsGEP, but it only doing so superficially; it's
 // really storing an incremented pointer to another pointer.
 fn incr_ptr(cx: &@block_ctxt, p: ValueRef, incr: ValueRef, pp: ValueRef) {
-    bld::Store(cx, bld::InBoundsGEP(cx, p, [incr]), pp);
+    Store(cx, InBoundsGEP(cx, p, [incr]), pp);
 }
 
 fn iter_structural_ty_full(cx: &@block_ctxt, av: ValueRef, t: ty::t,
                            f: &val_and_ty_fn) -> result {
     fn iter_boxpp(cx: @block_ctxt, box_cell: ValueRef, f: &val_and_ty_fn) ->
        result {
-        let box_ptr = bld::Load(cx, box_cell);
+        let box_ptr = Load(cx, box_cell);
         let tnil = ty::mk_nil(bcx_tcx(cx));
         let tbox = ty::mk_imm_box(bcx_tcx(cx), tnil);
         let inner_cx = new_sub_block_ctxt(cx, ~"iter box");
         let next_cx = new_sub_block_ctxt(cx, ~"next");
-        let null_test = bld::IsNull(cx, box_ptr);
-        bld::CondBr(cx, null_test, next_cx.llbb, inner_cx.llbb);
+        let null_test = IsNull(cx, box_ptr);
+        CondBr(cx, null_test, next_cx.llbb, inner_cx.llbb);
         let r = f(inner_cx, box_cell, tbox);
-        bld::Br(r.bcx, next_cx.llbb);
+        Br(r.bcx, next_cx.llbb);
         ret rslt(next_cx, C_nil());
     }
 
@@ -1770,18 +1770,18 @@ fn iter_structural_ty_full(cx: &@block_ctxt, av: ValueRef, t: ty::t,
         }
 
         let lltagty = T_opaque_tag_ptr(bcx_ccx(cx).tn);
-        let av_tag = bld::PointerCast(cx, av, lltagty);
-        let lldiscrim_a_ptr = bld::GEP(cx, av_tag, [C_int(0), C_int(0)]);
-        let llunion_a_ptr = bld::GEP(cx, av_tag, [C_int(0), C_int(1)]);
-        let lldiscrim_a = bld::Load(cx, lldiscrim_a_ptr);
+        let av_tag = PointerCast(cx, av, lltagty);
+        let lldiscrim_a_ptr = GEP(cx, av_tag, [C_int(0), C_int(0)]);
+        let llunion_a_ptr = GEP(cx, av_tag, [C_int(0), C_int(1)]);
+        let lldiscrim_a = Load(cx, lldiscrim_a_ptr);
 
         // NB: we must hit the discriminant first so that structural
         // comparison know not to proceed when the discriminants differ.
         let bcx = cx;
         bcx = f(bcx, lldiscrim_a_ptr, ty::mk_int(bcx_tcx(cx))).bcx;
         let unr_cx = new_sub_block_ctxt(bcx, ~"tag-iter-unr");
-        bld::Unreachable(unr_cx);
-        let llswitch = bld::Switch(bcx, lldiscrim_a, unr_cx.llbb, n_variants);
+        Unreachable(unr_cx);
+        let llswitch = Switch(bcx, lldiscrim_a, unr_cx.llbb, n_variants);
         let next_cx = new_sub_block_ctxt(bcx, ~"tag-iter-next");
         let i = 0u;
         for variant: ty::variant_info in variants {
@@ -1793,19 +1793,19 @@ fn iter_structural_ty_full(cx: &@block_ctxt, av: ValueRef, t: ty::t,
             variant_cx =
                 iter_variant(variant_cx, llunion_a_ptr, variant, tps, tid,
                              f).bcx;
-            bld::Br(variant_cx, next_cx.llbb);
+            Br(variant_cx, next_cx.llbb);
             i += 1u;
         }
         ret rslt(next_cx, C_nil());
       }
       ty::ty_fn(_, _, _, _, _) {
         let box_cell_a =
-            bld::GEP(cx, av, [C_int(0), C_int(abi::fn_field_box)]);
+            GEP(cx, av, [C_int(0), C_int(abi::fn_field_box)]);
         ret iter_boxpp(cx, box_cell_a, f);
       }
       ty::ty_obj(_) {
         let box_cell_a =
-            bld::GEP(cx, av, [C_int(0), C_int(abi::obj_field_box)]);
+            GEP(cx, av, [C_int(0), C_int(abi::obj_field_box)]);
         ret iter_boxpp(cx, box_cell_a, f);
       }
       _ { bcx_ccx(cx).sess.unimpl(~"type in iter_structural_ty_full"); }
@@ -1832,23 +1832,23 @@ fn iter_sequence_raw(cx: @block_ctxt, dst: ValueRef,
     let cond_cx = new_scope_block_ctxt(cx, ~"sequence-iter cond");
     let body_cx = new_scope_block_ctxt(cx, ~"sequence-iter body");
     let next_cx = new_sub_block_ctxt(cx, ~"next");
-    bld::Br(bcx, cond_cx.llbb);
+    Br(bcx, cond_cx.llbb);
     let dst_curr: ValueRef =
-        bld::Phi(cond_cx, T_int(), [dst_int], [bcx.llbb]);
+        Phi(cond_cx, T_int(), [dst_int], [bcx.llbb]);
     let src_curr: ValueRef =
-        bld::Phi(cond_cx, T_int(), [src_int], [bcx.llbb]);
+        Phi(cond_cx, T_int(), [src_int], [bcx.llbb]);
     let end_test =
-        bld::ICmp(cond_cx, lib::llvm::LLVMIntULT, src_curr, src_lim_int);
-    bld::CondBr(cond_cx, end_test, body_cx.llbb, next_cx.llbb);
+        ICmp(cond_cx, lib::llvm::LLVMIntULT, src_curr, src_lim_int);
+    CondBr(cond_cx, end_test, body_cx.llbb, next_cx.llbb);
     let dst_curr_ptr = vi2p(body_cx, dst_curr, T_ptr(T_i8()));
     let src_curr_ptr = vi2p(body_cx, src_curr, T_ptr(T_i8()));
     let body_res = f(body_cx, dst_curr_ptr, src_curr_ptr);
     body_cx = body_res.bcx;
-    let dst_next = bld::Add(body_cx, dst_curr, elt_sz);
-    let src_next = bld::Add(body_cx, src_curr, elt_sz);
-    bld::Br(body_cx, cond_cx.llbb);
-    bld::AddIncomingToPhi(dst_curr, [dst_next], [body_cx.llbb]);
-    bld::AddIncomingToPhi(src_curr, [src_next], [body_cx.llbb]);
+    let dst_next = Add(body_cx, dst_curr, elt_sz);
+    let src_next = Add(body_cx, src_curr, elt_sz);
+    Br(body_cx, cond_cx.llbb);
+    AddIncomingToPhi(dst_curr, [dst_next], [body_cx.llbb]);
+    AddIncomingToPhi(src_curr, [src_next], [body_cx.llbb]);
     ret rslt(next_cx, C_nil());
 }
 
@@ -1865,7 +1865,7 @@ fn iter_sequence_inner(cx: &@block_ctxt, src: ValueRef,
             let llty = type_of(bcx_ccx(cx), cx.sp, elt_ty);
             llptrty = T_ptr(llty);
         } else { llptrty = T_ptr(T_ptr(T_i8())); }
-        let p = bld::PointerCast(cx, src, llptrty);
+        let p = PointerCast(cx, src, llptrty);
         ret f(cx, load_if_immediate(cx, p, elt_ty), elt_ty);
     }
     let elt_sz = size_of(cx, elt_ty);
@@ -1884,9 +1884,9 @@ fn iter_sequence(cx: @block_ctxt, v: ValueRef, t: ty::t, f: &val_and_ty_fn)
         let len;
         let llunit_ty = type_of_or_i8(bcx, elt_ty);
         if !interior {
-            p0 = bld::GEP(bcx, v, [C_int(0), C_int(abi::vec_elt_data)]);
-            let lp = bld::GEP(bcx, v, [C_int(0), C_int(abi::vec_elt_fill)]);
-            len = bld::Load(bcx, lp);
+            p0 = GEP(bcx, v, [C_int(0), C_int(abi::vec_elt_data)]);
+            let lp = GEP(bcx, v, [C_int(0), C_int(abi::vec_elt_fill)]);
+            len = Load(bcx, lp);
         } else {
             len = ivec::get_fill(bcx, v);
             p0 = ivec::get_dataptr(bcx, v, llunit_ty);
@@ -1895,10 +1895,10 @@ fn iter_sequence(cx: @block_ctxt, v: ValueRef, t: ty::t, f: &val_and_ty_fn)
         if trailing_null {
             let unit_sz = size_of(bcx, elt_ty);
             bcx = unit_sz.bcx;
-            len = bld::Sub(bcx, len, unit_sz.val);
+            len = Sub(bcx, len, unit_sz.val);
         }
         let p1 =
-            vi2p(bcx, bld::Add(bcx, vp2i(bcx, p0), len), T_ptr(llunit_ty));
+            vi2p(bcx, Add(bcx, vp2i(bcx, p0), len), T_ptr(llunit_ty));
         ret iter_sequence_inner(bcx, p0, p1, elt_ty, f);
     }
 
@@ -2046,22 +2046,22 @@ fn call_tydesc_glue_full(cx: &@block_ctxt, v: ValueRef, tydesc: ValueRef,
       }
     }
 
-    let llrawptr = bld::PointerCast(cx, v, T_ptr(T_i8()));
+    let llrawptr = PointerCast(cx, v, T_ptr(T_i8()));
     let lltydescs =
-        bld::GEP(cx, tydesc,
+        GEP(cx, tydesc,
                      [C_int(0), C_int(abi::tydesc_field_first_param)]);
-    lltydescs = bld::Load(cx, lltydescs);
+    lltydescs = Load(cx, lltydescs);
 
     let llfn;
     alt static_glue_fn {
       none. {
-        let llfnptr = bld::GEP(cx, tydesc, [C_int(0), C_int(field)]);
-        llfn = bld::Load(cx, llfnptr);
+        let llfnptr = GEP(cx, tydesc, [C_int(0), C_int(field)]);
+        llfn = Load(cx, llfnptr);
       }
       some(sgf) { llfn = sgf; }
     }
 
-    bld::Call(cx, llfn,
+    Call(cx, llfn,
                   [C_null(T_ptr(T_nil())), cx.fcx.lltaskptr,
                    C_null(T_ptr(T_nil())), lltydescs, llrawptr]);
 }
@@ -2081,24 +2081,24 @@ fn call_cmp_glue(cx: &@block_ctxt, lhs: ValueRef, rhs: ValueRef, t: ty::t,
 
     let lllhs = spill_if_immediate(cx, lhs, t);
     let llrhs = spill_if_immediate(cx, rhs, t);
-    let llrawlhsptr = bld::BitCast(cx, lllhs, T_ptr(T_i8()));
-    let llrawrhsptr = bld::BitCast(cx, llrhs, T_ptr(T_i8()));
+    let llrawlhsptr = BitCast(cx, lllhs, T_ptr(T_i8()));
+    let llrawrhsptr = BitCast(cx, llrhs, T_ptr(T_i8()));
     let ti = none::<@tydesc_info>;
     let r = get_tydesc(cx, t, false, tps_normal, ti).result;
     lazily_emit_tydesc_glue(cx, abi::tydesc_field_cmp_glue, ti);
     let lltydesc = r.val;
     let lltydescs =
-        bld::GEP(r.bcx, lltydesc,
+        GEP(r.bcx, lltydesc,
                         [C_int(0), C_int(abi::tydesc_field_first_param)]);
-    lltydescs = bld::Load(r.bcx, lltydescs);
+    lltydescs = Load(r.bcx, lltydescs);
 
     let llfn;
     alt ti {
       none. {
         let llfnptr =
-            bld::GEP(r.bcx, lltydesc,
+            GEP(r.bcx, lltydesc,
                             [C_int(0), C_int(abi::tydesc_field_cmp_glue)]);
-        llfn = bld::Load(r.bcx, llfnptr);
+        llfn = Load(r.bcx, llfnptr);
       }
       some(sti) { llfn = option::get(sti.cmp_glue); }
     }
@@ -2107,8 +2107,8 @@ fn call_cmp_glue(cx: &@block_ctxt, lhs: ValueRef, rhs: ValueRef, t: ty::t,
     let llargs: [ValueRef] =
         [llcmpresultptr, r.bcx.fcx.lltaskptr, lltydesc, lltydescs,
          llrawlhsptr, llrawrhsptr, llop];
-    bld::Call(r.bcx, llfn, llargs);
-    ret rslt(r.bcx, bld::Load(r.bcx, llcmpresultptr));
+    Call(r.bcx, llfn, llargs);
+    ret rslt(r.bcx, Load(r.bcx, llcmpresultptr));
 }
 
 fn call_copy_glue(cx: &@block_ctxt, dst: ValueRef, src: ValueRef, t: ty::t,
@@ -2116,36 +2116,36 @@ fn call_copy_glue(cx: &@block_ctxt, dst: ValueRef, src: ValueRef, t: ty::t,
     // You can't call this on immediate types. Those are simply copied with
     // Load/Store.
     assert !type_is_immediate(bcx_ccx(cx), t);
-    let srcptr = bld::BitCast(cx, src, T_ptr(T_i8()));
-    let dstptr = bld::BitCast(cx, dst, T_ptr(T_i8()));
+    let srcptr = BitCast(cx, src, T_ptr(T_i8()));
+    let dstptr = BitCast(cx, dst, T_ptr(T_i8()));
     let ti = none;
     let {bcx, val: lltydesc} =
         get_tydesc(cx, t, false, tps_normal, ti).result;
     lazily_emit_tydesc_glue(cx, abi::tydesc_field_copy_glue, ti);
-    let lltydescs = bld::GEP
+    let lltydescs = GEP
         (bcx, lltydesc, [C_int(0), C_int(abi::tydesc_field_first_param)]);
-    lltydescs = bld::Load(bcx, lltydescs);
+    lltydescs = Load(bcx, lltydescs);
 
     let llfn = alt ti {
       none. {
-        bld::Load(bcx, bld::GEP
+        Load(bcx, GEP
             (bcx, lltydesc, [C_int(0), C_int(abi::tydesc_field_copy_glue)]))
       }
       some(sti) { option::get(sti.copy_glue) }
     };
-    bld::Call(bcx, llfn, [C_null(T_ptr(T_nil())), bcx.fcx.lltaskptr,
+    Call(bcx, llfn, [C_null(T_ptr(T_nil())), bcx.fcx.lltaskptr,
                           C_null(T_ptr(T_nil())), lltydescs, srcptr, dstptr]);
     if take {
         lazily_emit_tydesc_glue(cx, abi::tydesc_field_take_glue, ti);
         llfn = alt ti {
           none. {
-            bld::Load(bcx, bld::GEP(bcx, lltydesc,
+            Load(bcx, GEP(bcx, lltydesc,
                                     [C_int(0),
                                      C_int(abi::tydesc_field_take_glue)]))
           }
           some(sti) { option::get(sti.take_glue) }
         };
-        bld::Call(bcx, llfn, [C_null(T_ptr(T_nil())), bcx.fcx.lltaskptr,
+        Call(bcx, llfn, [C_null(T_ptr(T_nil())), bcx.fcx.lltaskptr,
                               C_null(T_ptr(T_nil())), lltydescs, dstptr]);
     }
     ret bcx;
@@ -2193,13 +2193,13 @@ fn call_memmove(cx: &@block_ctxt, dst: ValueRef, src: ValueRef,
     let i = bcx_ccx(cx).intrinsics;
     assert (i.contains_key(~"llvm.memmove.p0i8.p0i8.i32"));
     let memmove = i.get(~"llvm.memmove.p0i8.p0i8.i32");
-    let src_ptr = bld::PointerCast(cx, src, T_ptr(T_i8()));
-    let dst_ptr = bld::PointerCast(cx, dst, T_ptr(T_i8()));
-    let size = bld::IntCast(cx, n_bytes, T_i32());
+    let src_ptr = PointerCast(cx, src, T_ptr(T_i8()));
+    let dst_ptr = PointerCast(cx, dst, T_ptr(T_i8()));
+    let size = IntCast(cx, n_bytes, T_i32());
     let align = C_int(1);
     let volatile = C_bool(false);
     ret rslt(cx,
-             bld::Call(cx, memmove,
+             Call(cx, memmove,
                            [dst_ptr, src_ptr, size, align, volatile]));
 }
 
@@ -2210,15 +2210,15 @@ fn call_bzero(cx: &@block_ctxt, dst: ValueRef, n_bytes: ValueRef,
     let i = bcx_ccx(cx).intrinsics;
     assert (i.contains_key(~"llvm.memset.p0i8.i32"));
     let memset = i.get(~"llvm.memset.p0i8.i32");
-    let dst_ptr = bld::PointerCast(cx, dst, T_ptr(T_i8()));
-    let size = bld::IntCast(cx, n_bytes, T_i32());
+    let dst_ptr = PointerCast(cx, dst, T_ptr(T_i8()));
+    let size = IntCast(cx, n_bytes, T_i32());
     let align =
         if lib::llvm::llvm::LLVMIsConstant(align_bytes) == True {
-            bld::IntCast(cx, align_bytes, T_i32())
-        } else { bld::IntCast(cx, C_int(0), T_i32()) };
+            IntCast(cx, align_bytes, T_i32())
+        } else { IntCast(cx, C_int(0), T_i32()) };
     let volatile = C_bool(false);
     ret rslt(cx,
-             bld::Call(cx, memset,
+             Call(cx, memset,
                            [dst_ptr, C_u8(0u), size, align, volatile]));
 }
 
@@ -2230,7 +2230,7 @@ fn memmove_ty(cx: &@block_ctxt, dst: ValueRef, src: ValueRef, t: ty::t) ->
     } else if ty::type_is_structural(bcx_tcx(cx), t) {
         let llsz = llsize_of(type_of(bcx_ccx(cx), cx.sp, t));
         ret call_memmove(cx, dst, src, llsz);
-    } else { ret rslt(cx, bld::Store(cx, bld::Load(cx, src), dst)); }
+    } else { ret rslt(cx, Store(cx, Load(cx, src), dst)); }
 }
 
 tag copy_action { INIT; DROP_EXISTING; }
@@ -2251,11 +2251,11 @@ fn copy_val(cx: &@block_ctxt, action: copy_action, dst: ValueRef,
         let do_copy_cx = new_sub_block_ctxt(cx, ~"do_copy");
         let next_cx = new_sub_block_ctxt(cx, ~"next");
         let self_assigning =
-            bld::ICmp(cx, lib::llvm::LLVMIntNE,
-                          bld::PointerCast(cx, dst, val_ty(src)), src);
-        bld::CondBr(cx, self_assigning, do_copy_cx.llbb, next_cx.llbb);
+            ICmp(cx, lib::llvm::LLVMIntNE,
+                          PointerCast(cx, dst, val_ty(src)), src);
+        CondBr(cx, self_assigning, do_copy_cx.llbb, next_cx.llbb);
         do_copy_cx = copy_val_no_check(do_copy_cx, action, dst, src, t);
-        bld::Br(do_copy_cx, next_cx.llbb);
+        Br(do_copy_cx, next_cx.llbb);
         ret next_cx;
     }
     ret copy_val_no_check(cx, action, dst, src, t);
@@ -2271,7 +2271,7 @@ fn copy_val_no_check(cx: &@block_ctxt, action: copy_action, dst: ValueRef,
     }
 
     if ty::type_is_scalar(ccx.tcx, t) || ty::type_is_native(ccx.tcx, t) {
-        bld::Store(cx, src, dst);
+        Store(cx, src, dst);
         ret cx;
     } else if ty::type_is_nil(ccx.tcx, t) || ty::type_is_bot(ccx.tcx, t) {
         ret cx;
@@ -2280,7 +2280,7 @@ fn copy_val_no_check(cx: &@block_ctxt, action: copy_action, dst: ValueRef,
         let bcx = if action == DROP_EXISTING {
             drop_ty(cx, dst, t).bcx
         } else { cx };
-        bld::Store(bcx, src, dst);
+        Store(bcx, src, dst);
         bcx = take_ty(bcx, dst, t).bcx;
         ret bcx;
     } else if type_is_structural_or_param(ccx.tcx, t) {
@@ -2306,18 +2306,18 @@ fn move_val(cx: @block_ctxt, action: copy_action, dst: ValueRef,
     let tcx = bcx_tcx(cx);
     if ty::type_is_scalar(tcx, t) ||
            ty::type_is_native(tcx, t) {
-        if src.is_mem { src_val = bld::Load(cx, src_val); }
-        bld::Store(cx, src_val, dst);
+        if src.is_mem { src_val = Load(cx, src_val); }
+        Store(cx, src_val, dst);
         ret cx;
     } else if ty::type_is_nil(tcx, t) || ty::type_is_bot(tcx, t) {
         ret cx;
     } else if ty::type_is_unique(tcx, t) ||
               ty::type_is_boxed(tcx, t) {
-        if src.is_mem { src_val = bld::Load(cx, src_val); }
+        if src.is_mem { src_val = Load(cx, src_val); }
         if action == DROP_EXISTING {
             cx = drop_ty(cx, dst, t).bcx;
         }
-        bld::Store(cx, src_val, dst);
+        Store(cx, src_val, dst);
         if src.is_mem { ret zero_alloca(cx, src.res.val, t).bcx; }
 
         // If we're here, it must be a temporary.
@@ -2410,13 +2410,13 @@ fn trans_unary(cx: &@block_ctxt, op: ast::unop, e: &@ast::expr,
     alt op {
       ast::not. {
         let sub = trans_expr(cx, e);
-        ret rslt(sub.bcx, bld::Not(sub.bcx, sub.val));
+        ret rslt(sub.bcx, Not(sub.bcx, sub.val));
       }
       ast::neg. {
         let sub = trans_expr(cx, e);
         if ty::struct(bcx_tcx(cx), e_ty) == ty::ty_float {
-            ret rslt(sub.bcx, bld::FNeg(sub.bcx, sub.val));
-        } else { ret rslt(sub.bcx, bld::Neg(sub.bcx, sub.val)); }
+            ret rslt(sub.bcx, FNeg(sub.bcx, sub.val));
+        } else { ret rslt(sub.bcx, Neg(sub.bcx, sub.val)); }
       }
       ast::box(_) {
         let lv = trans_lval(cx, e);
@@ -2430,7 +2430,7 @@ fn trans_unary(cx: &@block_ctxt, op: ast::unop, e: &@ast::expr,
         // on whether they're boxed or not.
         if !ty::type_has_dynamic_size(bcx_tcx(cx), e_ty) {
             let llety = T_ptr(type_of(bcx_ccx(sub.bcx), e.span, e_ty));
-            body = bld::PointerCast(sub.bcx, body, llety);
+            body = PointerCast(sub.bcx, body, llety);
         }
         let bcx = move_val_if_temp(sub.bcx, INIT, body, lv, e_ty);
         ret rslt(bcx, sub.box);
@@ -2459,7 +2459,7 @@ fn trans_compare(cx: &@block_ctxt, op: ast::binop, lhs: ValueRef,
     alt op {
       ast::eq. | ast::lt. | ast::le. { ret rslt(rs.bcx, rs.val); }
       ast::ne. | ast::ge. | ast::gt. {
-        ret rslt(rs.bcx, bld::Not(rs.bcx, rs.val));
+        ret rslt(rs.bcx, Not(rs.bcx, rs.val));
       }
     }
 }
@@ -2482,10 +2482,10 @@ fn trans_evec_append(cx: &@block_ctxt, t: ty::t, lhs: ValueRef,
     lazily_emit_tydesc_glue(cx, abi::tydesc_field_drop_glue, ti);
     lazily_emit_tydesc_glue(cx, abi::tydesc_field_free_glue, ti);
     bcx = llelt_tydesc.bcx;
-    let dst = bld::PointerCast(bcx, lhs, T_ptr(T_opaque_vec_ptr()));
-    let src = bld::PointerCast(bcx, rhs, T_opaque_vec_ptr());
+    let dst = PointerCast(bcx, lhs, T_ptr(T_opaque_vec_ptr()));
+    let src = PointerCast(bcx, rhs, T_opaque_vec_ptr());
     ret rslt(bcx,
-             bld::Call(bcx, bcx_ccx(cx).upcalls.evec_append,
+             Call(bcx, bcx_ccx(cx).upcalls.evec_append,
                             [cx.fcx.lltaskptr, llvec_tydesc.val,
                              llelt_tydesc.val, dst, src, skip_null]));
 }
@@ -2510,7 +2510,7 @@ fn trans_eager_binop(cx: &@block_ctxt, op: ast::binop, lhs: ValueRef,
     // operation.
     if ty::type_is_bot(bcx_tcx(cx), lhs_t) ||
            ty::type_is_bot(bcx_tcx(cx), rhs_t) {
-        ret rslt(cx, bld::Unreachable(cx));
+        ret rslt(cx, Unreachable(cx));
     }
 
     let is_float = false;
@@ -2530,37 +2530,37 @@ fn trans_eager_binop(cx: &@block_ctxt, op: ast::binop, lhs: ValueRef,
             ret trans_evec_add(cx, intype, lhs, rhs);
         }
         if is_float {
-            ret rslt(cx, bld::FAdd(cx, lhs, rhs));
-        } else { ret rslt(cx, bld::Add(cx, lhs, rhs)); }
+            ret rslt(cx, FAdd(cx, lhs, rhs));
+        } else { ret rslt(cx, Add(cx, lhs, rhs)); }
       }
       ast::sub. {
         if is_float {
-            ret rslt(cx, bld::FSub(cx, lhs, rhs));
-        } else { ret rslt(cx, bld::Sub(cx, lhs, rhs)); }
+            ret rslt(cx, FSub(cx, lhs, rhs));
+        } else { ret rslt(cx, Sub(cx, lhs, rhs)); }
       }
       ast::mul. {
         if is_float {
-            ret rslt(cx, bld::FMul(cx, lhs, rhs));
-        } else { ret rslt(cx, bld::Mul(cx, lhs, rhs)); }
+            ret rslt(cx, FMul(cx, lhs, rhs));
+        } else { ret rslt(cx, Mul(cx, lhs, rhs)); }
       }
       ast::div. {
-        if is_float { ret rslt(cx, bld::FDiv(cx, lhs, rhs)); }
+        if is_float { ret rslt(cx, FDiv(cx, lhs, rhs)); }
         if ty::type_is_signed(bcx_tcx(cx), intype) {
-            ret rslt(cx, bld::SDiv(cx, lhs, rhs));
-        } else { ret rslt(cx, bld::UDiv(cx, lhs, rhs)); }
+            ret rslt(cx, SDiv(cx, lhs, rhs));
+        } else { ret rslt(cx, UDiv(cx, lhs, rhs)); }
       }
       ast::rem. {
-        if is_float { ret rslt(cx, bld::FRem(cx, lhs, rhs)); }
+        if is_float { ret rslt(cx, FRem(cx, lhs, rhs)); }
         if ty::type_is_signed(bcx_tcx(cx), intype) {
-            ret rslt(cx, bld::SRem(cx, lhs, rhs));
-        } else { ret rslt(cx, bld::URem(cx, lhs, rhs)); }
-      }
-      ast::bitor. { ret rslt(cx, bld::Or(cx, lhs, rhs)); }
-      ast::bitand. { ret rslt(cx, bld::And(cx, lhs, rhs)); }
-      ast::bitxor. { ret rslt(cx, bld::Xor(cx, lhs, rhs)); }
-      ast::lsl. { ret rslt(cx, bld::Shl(cx, lhs, rhs)); }
-      ast::lsr. { ret rslt(cx, bld::LShr(cx, lhs, rhs)); }
-      ast::asr. { ret rslt(cx, bld::AShr(cx, lhs, rhs)); }
+            ret rslt(cx, SRem(cx, lhs, rhs));
+        } else { ret rslt(cx, URem(cx, lhs, rhs)); }
+      }
+      ast::bitor. { ret rslt(cx, Or(cx, lhs, rhs)); }
+      ast::bitand. { ret rslt(cx, And(cx, lhs, rhs)); }
+      ast::bitxor. { ret rslt(cx, Xor(cx, lhs, rhs)); }
+      ast::lsl. { ret rslt(cx, Shl(cx, lhs, rhs)); }
+      ast::lsr. { ret rslt(cx, LShr(cx, lhs, rhs)); }
+      ast::asr. { ret rslt(cx, AShr(cx, lhs, rhs)); }
       _ { ret trans_compare(cx, op, lhs, lhs_t, rhs, rhs_t); }
     }
 }
@@ -2573,7 +2573,7 @@ fn autoderef(cx: &@block_ctxt, v: ValueRef, t: ty::t) -> result_t {
         alt ty::struct(ccx.tcx, t1) {
           ty::ty_box(mt) {
             let body =
-                bld::GEP(cx, v1, [C_int(0), C_int(abi::box_rc_field_body)]);
+                GEP(cx, v1, [C_int(0), C_int(abi::box_rc_field_body)]);
             t1 = mt.ty;
 
             // Since we're changing levels of box indirection, we may have
@@ -2582,13 +2582,13 @@ fn autoderef(cx: &@block_ctxt, v: ValueRef, t: ty::t) -> result_t {
             // or not.
             if !ty::type_has_dynamic_size(ccx.tcx, mt.ty) {
                 let llty = type_of(ccx, cx.sp, mt.ty);
-                v1 = bld::PointerCast(cx, body, T_ptr(llty));
+                v1 = PointerCast(cx, body, T_ptr(llty));
             } else { v1 = body; }
           }
           ty::ty_uniq(t) { fail "autoderef uniq unimplemented"; }
           ty::ty_res(did, inner, tps) {
             t1 = ty::substitute_type_params(ccx.tcx, tps, inner);
-            v1 = bld::GEP(cx, v1, [C_int(0), C_int(1)]);
+            v1 = GEP(cx, v1, [C_int(0), C_int(1)]);
           }
           ty::ty_tag(did, tps) {
             let variants = ty::tag_variants(ccx.tcx, did);
@@ -2599,7 +2599,7 @@ fn autoderef(cx: &@block_ctxt, v: ValueRef, t: ty::t) -> result_t {
             t1 =
                 ty::substitute_type_params(ccx.tcx, tps, variants[0].args[0]);
             if !ty::type_has_dynamic_size(ccx.tcx, t1) {
-                v1 = bld::PointerCast(cx, v1, T_ptr(type_of(ccx, cx.sp, t1)));
+                v1 = PointerCast(cx, v1, T_ptr(type_of(ccx, cx.sp, t1)));
             }
           }
           _ { break; }
@@ -2628,7 +2628,7 @@ fn trans_binary(cx: &@block_ctxt, op: ast::binop, a: &@ast::expr,
         // because and/or are lazy. So the rhs may never execute,
         // and the cleanups can't be pushed into later code.
         let rhs_bcx = trans_block_cleanups(rhs_res.bcx, rhs_cx);
-        bld::CondBr(lhs_res.bcx, lhs_res.val, rhs_cx.llbb, lhs_false_cx.llbb);
+        CondBr(lhs_res.bcx, lhs_res.val, rhs_cx.llbb, lhs_false_cx.llbb);
         ret join_results(cx, T_bool(),
                          [lhs_false_res, {bcx: rhs_bcx, val: rhs_res.val}]);
       }
@@ -2642,7 +2642,7 @@ fn trans_binary(cx: &@block_ctxt, op: ast::binop, a: &@ast::expr,
 
         // see the and case for an explanation
         let rhs_bcx = trans_block_cleanups(rhs_res.bcx, rhs_cx);
-        bld::CondBr(lhs_res.bcx, lhs_res.val, lhs_true_cx.llbb, rhs_cx.llbb);
+        CondBr(lhs_res.bcx, lhs_res.val, lhs_true_cx.llbb, rhs_cx.llbb);
         ret join_results(cx, T_bool(),
                          [lhs_true_res, {bcx: rhs_bcx, val: rhs_res.val}]);
       }
@@ -2684,15 +2684,15 @@ fn join_results(parent_cx: &@block_ctxt, t: TypeRef, ins: &[result]) ->
     // We have >1 incoming edges. Make a join block and br+phi them into it.
 
     let join_cx = new_sub_block_ctxt(parent_cx, ~"join");
-    for r: result in live { bld::Br(r.bcx, join_cx.llbb); }
-    let phi = bld::Phi(join_cx, t, vals, bbs);
+    for r: result in live { Br(r.bcx, join_cx.llbb); }
+    let phi = Phi(join_cx, t, vals, bbs);
     ret rslt(join_cx, phi);
 }
 
 fn join_branches(parent_cx: &@block_ctxt, ins: &[result]) -> @block_ctxt {
     let out = new_sub_block_ctxt(parent_cx, ~"join");
     for r: result in ins {
-        if !is_terminated(r.bcx) { bld::Br(r.bcx, out.llbb); }
+        if !is_terminated(r.bcx) { Br(r.bcx, out.llbb); }
     }
     ret out;
 }
@@ -2708,7 +2708,7 @@ fn trans_if(cx: &@block_ctxt, cond: &@ast::expr, thn: &ast::blk,
         // No need to generate code for comparison,
         // since the cond diverges.
         if !is_terminated(cx) {
-            ret rslt(cx, bld::Unreachable(cx));
+            ret rslt(cx, Unreachable(cx));
         } else { ret cond_res; }
     }
 
@@ -2736,7 +2736,7 @@ fn trans_if(cx: &@block_ctxt, cond: &@ast::expr, thn: &ast::blk,
           }
           _ { rslt(else_cx, C_nil()) }
         };
-    bld::CondBr(cond_res.bcx, cond_res.val, then_cx.llbb, else_cx.llbb);
+    CondBr(cond_res.bcx, cond_res.val, then_cx.llbb, else_cx.llbb);
     ret rslt(join_branches(cx, [then_res, else_res]), C_nil());
 }
 
@@ -2751,7 +2751,7 @@ fn trans_for(cx: &@block_ctxt, local: &@ast::local, seq: &@ast::expr,
             new_loop_scope_block_ctxt(cx,
                                       option::some::<@block_ctxt>(next_cx),
                                       outer_next_cx, ~"for loop scope");
-        bld::Br(cx, scope_cx.llbb);
+        Br(cx, scope_cx.llbb);
         let local_res = alloc_local(scope_cx, local);
         let bcx = copy_val(local_res.bcx, INIT, local_res.val, curr, t);
         add_clean(scope_cx, local_res.val, t);
@@ -2761,7 +2761,7 @@ fn trans_for(cx: &@block_ctxt, local: &@ast::local, seq: &@ast::expr,
                                             false);
         bcx = trans_block(bcx, body, return).bcx;
         if !is_terminated(bcx) {
-            bld::Br(bcx, next_cx.llbb);
+            Br(bcx, next_cx.llbb);
             // otherwise, this code is unreachable
         }
         ret rslt(next_cx, C_nil());
@@ -2772,7 +2772,7 @@ fn trans_for(cx: &@block_ctxt, local: &@ast::local, seq: &@ast::expr,
     let it =
         iter_sequence(seq_res.bcx, seq_res.val, seq_ty,
                       bind inner(_, local, _, _, body, next_cx));
-    bld::Br(it.bcx, next_cx.llbb);
+    Br(it.bcx, next_cx.llbb);
     ret rslt(next_cx, it.val);
 }
 
@@ -2841,7 +2841,7 @@ fn build_environment(bcx: @block_ctxt, lltydescs: [ValueRef],
         lazily_emit_tydesc_glue(bcx, abi::tydesc_field_drop_glue, ti);
         lazily_emit_tydesc_glue(bcx, abi::tydesc_field_free_glue, ti);
         bcx = bindings_tydesc.bcx;
-        bld::Store(bcx, bindings_tydesc.val, bound_tydesc);
+        Store(bcx, bindings_tydesc.val, bound_tydesc);
     }
 
     // Copy expr values into boxed bindings.
@@ -2856,7 +2856,7 @@ fn build_environment(bcx: @block_ctxt, lltydescs: [ValueRef],
         bcx = bound.bcx;
         if copying {
             bcx = move_val_if_temp(bcx, INIT, bound.val, lv, bound_tys[i]);
-        } else { bld::Store(bcx, lv.res.val, bound.val); }
+        } else { Store(bcx, lv.res.val, bound.val); }
         i += 1u;
     }
 
@@ -2869,7 +2869,7 @@ fn build_environment(bcx: @block_ctxt, lltydescs: [ValueRef],
     i = 0u;
     for td: ValueRef in lltydescs {
         let ty_param_slot = GEPi(bcx, ty_params_slot.val, [0, i as int]);
-        bld::Store(bcx, td, ty_param_slot);
+        Store(bcx, td, ty_param_slot);
         i += 1u;
     }
 
@@ -2920,10 +2920,10 @@ fn find_environment_tydescs(bcx: &@block_ctxt, envty: ty::t,
             let llenv = GEPi(bcx, closure, [0, abi::box_rc_field_body]);
             // Load the tydesc and find the size of the body
             let lldesc =
-                bld::Load(bcx, GEPi(bcx, llenv,
+                Load(bcx, GEPi(bcx, llenv,
                                     [0, abi::closure_elt_tydesc]));
             let llsz =
-                bld::Load(bcx, GEPi(bcx, lldesc,
+                Load(bcx, GEPi(bcx, lldesc,
                                     [0, abi::tydesc_field_size]));
 
             // Get the bindings pointer and add the size to it
@@ -2941,7 +2941,7 @@ fn load_environment(enclosing_cx: &@block_ctxt, fcx: &@fn_ctxt, envty: ty::t,
 
     let ty = ty::mk_imm_box(bcx_tcx(bcx), envty);
     let llty = type_of(bcx_ccx(bcx), bcx.sp, ty);
-    let llclosure = bld::PointerCast(bcx, fcx.llenv, llty);
+    let llclosure = PointerCast(bcx, fcx.llenv, llty);
 
     // Populate the type parameters from the environment. We need to
     // do this first because the tydescs are needed to index into
@@ -2951,7 +2951,7 @@ fn load_environment(enclosing_cx: &@block_ctxt, fcx: &@fn_ctxt, envty: ty::t,
     let i = 0u;
     while i < tydesc_count {
         let lltydescptr = GEPi(bcx, lltydescs, [0, i as int]);
-        fcx.lltydescs += [bld::Load(bcx, lltydescptr)];
+        fcx.lltydescs += [Load(bcx, lltydescptr)];
         i += 1u;
     }
 
@@ -2962,7 +2962,7 @@ fn load_environment(enclosing_cx: &@block_ctxt, fcx: &@fn_ctxt, envty: ty::t,
     // the iterbody.
     if !copying && !option::is_none(enclosing_cx.fcx.lliterbody) {
         let iterbodyptr = GEP_tup_like(bcx, ty, llclosure, path + [0]);
-        fcx.lliterbody = some(bld::Load(bcx, iterbodyptr.val));
+        fcx.lliterbody = some(Load(bcx, iterbodyptr.val));
         bcx = iterbodyptr.bcx;
         i += 1u;
     }
@@ -2972,7 +2972,7 @@ fn load_environment(enclosing_cx: &@block_ctxt, fcx: &@fn_ctxt, envty: ty::t,
         let upvarptr = GEP_tup_like(bcx, ty, llclosure, path + [i as int]);
         bcx = upvarptr.bcx;
         let llupvarptr = upvarptr.val;
-        if !copying { llupvarptr = bld::Load(bcx, llupvarptr); }
+        if !copying { llupvarptr = Load(bcx, llupvarptr); }
         let def_id = ast_util::def_id_of_def(bcx_tcx(bcx).
                                              def_map.get(upvar_id));
         fcx.llupvars.insert(def_id.node, llupvarptr);
@@ -3073,10 +3073,10 @@ fn trans_while(cx: &@block_ctxt, cond: &@ast::expr, body: &ast::blk) ->
         new_scope_block_ctxt(cond_cx, ~"while loop body");
     let body_res = trans_block(body_cx, body, return);
     let cond_res = trans_expr(cond_cx, cond);
-    bld::Br(body_res.bcx, cond_cx.llbb);
+    Br(body_res.bcx, cond_cx.llbb);
     let cond_bcx = trans_block_cleanups(cond_res.bcx, cond_cx);
-    bld::CondBr(cond_bcx, cond_res.val, body_cx.llbb, next_cx.llbb);
-    bld::Br(cx, cond_cx.llbb);
+    CondBr(cond_bcx, cond_res.val, body_cx.llbb, next_cx.llbb);
+    Br(cx, cond_cx.llbb);
     ret rslt(next_cx, C_nil());
 }
 
@@ -3091,13 +3091,13 @@ fn trans_do_while(cx: &@block_ctxt, body: &ast::blk, cond: &@ast::expr) ->
         // This is kind of ridiculous, but no permutations
         // involving body_res or body_cx.val worked.
         let rs = trans_block(cx, body, return);
-        if !is_terminated(next_cx) { bld::Unreachable(next_cx); }
-        if !is_terminated(body_cx) { bld::Unreachable(body_cx); }
+        if !is_terminated(next_cx) { Unreachable(next_cx); }
+        if !is_terminated(body_cx) { Unreachable(body_cx); }
         ret rs;
     }
     let cond_res = trans_expr(body_res.bcx, cond);
-    bld::CondBr(cond_res.bcx, cond_res.val, body_cx.llbb, next_cx.llbb);
-    bld::Br(cx, body_cx.llbb);
+    CondBr(cond_res.bcx, cond_res.val, body_cx.llbb, next_cx.llbb);
+    Br(cx, body_cx.llbb);
     ret rslt(next_cx, body_res.val);
 }
 
@@ -3230,13 +3230,13 @@ fn trans_var(cx: &@block_ctxt, sp: &span, id: ast::node_id) -> lval_result {
             let lltagblob = alloc_result.val;
             let lltagty = type_of_tag(ccx, sp, tid, tag_ty);
             let bcx = alloc_result.bcx;
-            let lltagptr = bld::PointerCast(bcx, lltagblob, T_ptr(lltagty));
+            let lltagptr = PointerCast(bcx, lltagblob, T_ptr(lltagty));
             if std::vec::len(ty::tag_variants(ccx.tcx, tid)) != 1u {
                 let lldiscrim_gv = lookup_discriminant(bcx.fcx.lcx, vid);
-                let lldiscrim = bld::Load(bcx, lldiscrim_gv);
+                let lldiscrim = Load(bcx, lldiscrim_gv);
                 let lldiscrimptr =
-                    bld::GEP(bcx, lltagptr, [C_int(0), C_int(0)]);
-                bld::Store(bcx, lldiscrim, lldiscrimptr);
+                    GEP(bcx, lltagptr, [C_int(0), C_int(0)]);
+                Store(bcx, lldiscrim, lldiscrimptr);
             }
             ret lval_val(bcx, lltagptr);
           }
@@ -3283,20 +3283,20 @@ fn trans_field(cx: &@block_ctxt, sp: &span, v: ValueRef, t0: ty::t,
       ty::ty_obj(methods) {
         let ix: uint = ty::method_idx(bcx_ccx(cx).sess, sp, field, methods);
         let vtbl =
-            bld::GEP(r.bcx, r.val, [C_int(0), C_int(abi::obj_field_vtbl)]);
-        vtbl = bld::Load(r.bcx, vtbl);
+            GEP(r.bcx, r.val, [C_int(0), C_int(abi::obj_field_vtbl)]);
+        vtbl = Load(r.bcx, vtbl);
 
         let vtbl_type = T_ptr(T_array(T_ptr(T_nil()), ix + 1u));
-        vtbl = bld::PointerCast(cx, vtbl, vtbl_type);
+        vtbl = PointerCast(cx, vtbl, vtbl_type);
 
-        let v = bld::GEP(r.bcx, vtbl, [C_int(0), C_int(ix as int)]);
+        let v = GEP(r.bcx, vtbl, [C_int(0), C_int(ix as int)]);
         let fn_ty: ty::t = ty::method_ty_to_fn_ty(bcx_tcx(cx), methods[ix]);
         let tcx = bcx_tcx(cx);
         let ll_fn_ty =
             type_of_fn_full(bcx_ccx(cx), sp, ty::ty_fn_proto(tcx, fn_ty),
                             true, ty::ty_fn_args(tcx, fn_ty),
                             ty::ty_fn_ret(tcx, fn_ty), 0u);
-        v = bld::PointerCast(r.bcx, v, T_ptr(T_ptr(ll_fn_ty)));
+        v = PointerCast(r.bcx, v, T_ptr(T_ptr(ll_fn_ty)));
         let lvo = lval_mem(r.bcx, v);
         ret {llobj: some::<ValueRef>(r.val), method_ty: some::<ty::t>(fn_ty)
                 with lvo};
@@ -3323,15 +3323,15 @@ fn trans_index(cx: &@block_ctxt, sp: &span, base: &@ast::expr,
     let ix_size = llsize_of_real(bcx_ccx(cx), val_ty(ix.val));
     let int_size = llsize_of_real(bcx_ccx(cx), T_int());
     if ix_size < int_size {
-        ix_val = bld::ZExt(bcx, ix.val, T_int());
+        ix_val = ZExt(bcx, ix.val, T_int());
     } else if ix_size > int_size {
-        ix_val = bld::Trunc(bcx, ix.val, T_int());
+        ix_val = Trunc(bcx, ix.val, T_int());
     } else { ix_val = ix.val; }
     let unit_ty = node_id_type(bcx_ccx(cx), id);
     let unit_sz = size_of(bcx, unit_ty);
     bcx = unit_sz.bcx;
     maybe_name_value(bcx_ccx(cx), unit_sz.val, ~"unit_sz");
-    let scaled_ix = bld::Mul(bcx, ix_val, unit_sz.val);
+    let scaled_ix = Mul(bcx, ix_val, unit_sz.val);
     maybe_name_value(bcx_ccx(cx), scaled_ix, ~"scaled_ix");
     let interior_len_and_data;
     if is_interior {
@@ -3343,14 +3343,14 @@ fn trans_index(cx: &@block_ctxt, sp: &span, base: &@ast::expr,
     alt interior_len_and_data {
       some(lad) { lim = lad.len; }
       none. {
-        lim = bld::GEP(bcx, v, [C_int(0), C_int(abi::vec_elt_fill)]);
-        lim = bld::Load(bcx, lim);
+        lim = GEP(bcx, v, [C_int(0), C_int(abi::vec_elt_fill)]);
+        lim = Load(bcx, lim);
       }
     }
-    let bounds_check = bld::ICmp(bcx, lib::llvm::LLVMIntULT, scaled_ix, lim);
+    let bounds_check = ICmp(bcx, lib::llvm::LLVMIntULT, scaled_ix, lim);
     let fail_cx = new_sub_block_ctxt(bcx, ~"fail");
     let next_cx = new_sub_block_ctxt(bcx, ~"next");
-    bld::CondBr(bcx, bounds_check, next_cx.llbb, fail_cx.llbb);
+    CondBr(bcx, bounds_check, next_cx.llbb, fail_cx.llbb);
     // fail: bad bounds check.
 
     trans_fail(fail_cx, some::<span>(sp), ~"bounds check");
@@ -3359,20 +3359,20 @@ fn trans_index(cx: &@block_ctxt, sp: &span, base: &@ast::expr,
       some(lad) { body = lad.data; }
       none. {
         body =
-            bld::GEP(next_cx, v,
+            GEP(next_cx, v,
                               [C_int(0), C_int(abi::vec_elt_data), C_int(0)]);
       }
     }
     let elt;
     if ty::type_has_dynamic_size(bcx_tcx(cx), unit_ty) {
-        body = bld::PointerCast(next_cx, body, T_ptr(T_i8()));
-        elt = bld::GEP(next_cx, body, [scaled_ix]);
+        body = PointerCast(next_cx, body, T_ptr(T_i8()));
+        elt = GEP(next_cx, body, [scaled_ix]);
     } else {
-        elt = bld::GEP(next_cx, body, [ix_val]);
+        elt = GEP(next_cx, body, [ix_val]);
         // We're crossing a box boundary here, so we may need to pointer cast.
 
         let llunitty = type_of(bcx_ccx(next_cx), sp, unit_ty);
-        elt = bld::PointerCast(next_cx, elt, T_ptr(llunitty));
+        elt = PointerCast(next_cx, elt, T_ptr(llunitty));
     }
     ret lval_mem(next_cx, elt);
 }
@@ -3399,13 +3399,13 @@ fn trans_lval_gen(cx: &@block_ctxt, e: &@ast::expr) -> lval_result {
         let val =
             alt ty::struct(ccx.tcx, t) {
               ty::ty_box(_) {
-                bld::InBoundsGEP(sub.bcx, sub.val,
+                InBoundsGEP(sub.bcx, sub.val,
                                           [C_int(0),
                                            C_int(abi::box_rc_field_body)])
               }
               ty::ty_uniq(_) { fail "uniq lval translation unimplemented" }
               ty::ty_res(_, _, _) {
-                bld::InBoundsGEP(sub.bcx, sub.val, [C_int(0), C_int(1)])
+                InBoundsGEP(sub.bcx, sub.val, [C_int(0), C_int(1)])
               }
               ty::ty_tag(_, _) {
                 let ety = ty::expr_ty(ccx.tcx, e);
@@ -3413,7 +3413,7 @@ fn trans_lval_gen(cx: &@block_ctxt, e: &@ast::expr) -> lval_result {
                 if ty::type_has_dynamic_size(ccx.tcx, ety) {
                     ellty = T_typaram_ptr(ccx.tn);
                 } else { ellty = T_ptr(type_of(ccx, e.span, ety)); }
-                bld::PointerCast(sub.bcx, sub.val, ellty)
+                PointerCast(sub.bcx, sub.val, ellty)
               }
               ty::ty_ptr(_) { sub.val }
             };
@@ -3464,12 +3464,12 @@ fn int_cast(bcx: &@block_ctxt, lldsttype: TypeRef, llsrctype: TypeRef,
     let srcsz = llvm::LLVMGetIntTypeWidth(llsrctype);
     let dstsz = llvm::LLVMGetIntTypeWidth(lldsttype);
     ret if dstsz == srcsz {
-            bld::BitCast(bcx, llsrc, lldsttype)
+            BitCast(bcx, llsrc, lldsttype)
         } else if srcsz > dstsz {
-            bld::TruncOrBitCast(bcx, llsrc, lldsttype)
+            TruncOrBitCast(bcx, llsrc, lldsttype)
         } else if signed {
-            bld::SExtOrBitCast(bcx, llsrc, lldsttype)
-        } else { bld::ZExtOrBitCast(bcx, llsrc, lldsttype) };
+            SExtOrBitCast(bcx, llsrc, lldsttype)
+        } else { ZExtOrBitCast(bcx, llsrc, lldsttype) };
 }
 
 fn float_cast(bcx: &@block_ctxt, lldsttype: TypeRef, llsrctype: TypeRef,
@@ -3477,9 +3477,9 @@ fn float_cast(bcx: &@block_ctxt, lldsttype: TypeRef, llsrctype: TypeRef,
     let srcsz = lib::llvm::float_width(llsrctype);
     let dstsz = lib::llvm::float_width(lldsttype);
     ret if dstsz > srcsz {
-            bld::FPExt(bcx, llsrc, lldsttype)
+            FPExt(bcx, llsrc, lldsttype)
         } else if srcsz > dstsz {
-            bld::FPTrunc(bcx, llsrc, lldsttype)
+            FPTrunc(bcx, llsrc, lldsttype)
         } else { llsrc };
 }
 
@@ -3515,22 +3515,22 @@ fn trans_cast(cx: &@block_ctxt, e: &@ast::expr, id: ast::node_id) -> result {
           }
           {in: integral., out: float.} {
             if s_in {
-                bld::SIToFP(e_res.bcx, e_res.val, ll_t_out)
-            } else { bld::UIToFP(e_res.bcx, e_res.val, ll_t_out) }
+                SIToFP(e_res.bcx, e_res.val, ll_t_out)
+            } else { UIToFP(e_res.bcx, e_res.val, ll_t_out) }
           }
           {in: float., out: integral.} {
             if ty::type_is_signed(ccx.tcx, t_out) {
-                bld::FPToSI(e_res.bcx, e_res.val, ll_t_out)
-            } else { bld::FPToUI(e_res.bcx, e_res.val, ll_t_out) }
+                FPToSI(e_res.bcx, e_res.val, ll_t_out)
+            } else { FPToUI(e_res.bcx, e_res.val, ll_t_out) }
           }
           {in: integral., out: native_.} {
-            bld::IntToPtr(e_res.bcx, e_res.val, ll_t_out)
+            IntToPtr(e_res.bcx, e_res.val, ll_t_out)
           }
           {in: native_., out: integral.} {
-            bld::PtrToInt(e_res.bcx, e_res.val, ll_t_out)
+            PtrToInt(e_res.bcx, e_res.val, ll_t_out)
           }
           {in: native_., out: native_.} {
-            bld::PointerCast(e_res.bcx, e_res.val, ll_t_out)
+            PointerCast(e_res.bcx, e_res.val, ll_t_out)
           }
           _ { ccx.sess.bug(~"Translating unsupported cast.") }
         };
@@ -3595,7 +3595,7 @@ fn trans_bind_thunk(cx: &@local_ctxt, sp: &span, incoming_fty: ty::t,
     let closure_ty = ty::mk_imm_box(cx.ccx.tcx, env_ty);
     let llclosure_ptr_ty = type_of(cx.ccx, sp, closure_ty);
     let llclosure =
-        bld::PointerCast(copy_args_bcx, fcx.llenv, llclosure_ptr_ty);
+        PointerCast(copy_args_bcx, fcx.llenv, llclosure_ptr_ty);
 
     // "target", in this context, means the function that's having some of its
     // arguments bound and that will be called inside the thunk we're
@@ -3618,8 +3618,8 @@ fn trans_bind_thunk(cx: &@local_ctxt, sp: &span, incoming_fty: ty::t,
     // And then, pick out the target function's own environment.  That's what
     // we'll use as the environment the thunk gets.
     let lltargetclosure =
-        bld::GEP(bcx, lltarget, [C_int(0), C_int(abi::fn_field_box)]);
-    lltargetclosure = bld::Load(bcx, lltargetclosure);
+        GEP(bcx, lltarget, [C_int(0), C_int(abi::fn_field_box)]);
+    lltargetclosure = Load(bcx, lltargetclosure);
 
     // Get f's return type, which will also be the return type of the entire
     // bind expression.
@@ -3633,7 +3633,7 @@ fn trans_bind_thunk(cx: &@local_ctxt, sp: &span, incoming_fty: ty::t,
     let llretptr = fcx.llretptr;
     if ty::type_contains_params(cx.ccx.tcx, outgoing_ret_ty) {
         let llretty = type_of_inner(cx.ccx, sp, outgoing_ret_ty);
-        llretptr = bld::PointerCast(bcx, llretptr, T_ptr(llretty));
+        llretptr = PointerCast(bcx, llretptr, T_ptr(llretty));
     }
 
     // Set up the three implicit arguments to the thunk.
@@ -3647,7 +3647,7 @@ fn trans_bind_thunk(cx: &@local_ctxt, sp: &span, incoming_fty: ty::t,
                          [0, abi::box_rc_field_body,
                           abi::closure_elt_ty_params, i as int]);
         copy_args_bcx = lltyparam_ptr.bcx;
-        let td = bld::Load(copy_args_bcx, lltyparam_ptr.val);
+        let td = Load(copy_args_bcx, lltyparam_ptr.val);
         llargs += [td];
         fcx.lltydescs += [td];
         i += 1u;
@@ -3679,10 +3679,10 @@ fn trans_bind_thunk(cx: &@local_ctxt, sp: &span, incoming_fty: ty::t,
             if ty::type_contains_params(cx.ccx.tcx, out_arg.ty) {
                 let ty =
                     if is_val { T_ptr(llout_arg_ty) } else { llout_arg_ty };
-                val = bld::PointerCast(bcx, val, ty);
+                val = PointerCast(bcx, val, ty);
             }
             if is_val && type_is_immediate(cx.ccx, e_ty) {
-                val = bld::Load(bcx, val);
+                val = Load(bcx, val);
             }
             llargs += [val];
             b += 1;
@@ -3693,7 +3693,7 @@ fn trans_bind_thunk(cx: &@local_ctxt, sp: &span, incoming_fty: ty::t,
           none. {
             let arg: ValueRef = llvm::LLVMGetParam(llthunk, a);
             if ty::type_contains_params(cx.ccx.tcx, out_arg.ty) {
-                arg = bld::PointerCast(bcx, arg, llout_arg_ty);
+                arg = PointerCast(bcx, arg, llout_arg_ty);
             }
             llargs += [arg];
             a += 1u;
@@ -3703,7 +3703,7 @@ fn trans_bind_thunk(cx: &@local_ctxt, sp: &span, incoming_fty: ty::t,
     }
 
     let lltargetfn =
-        bld::GEP(bcx, lltarget, [C_int(0), C_int(abi::fn_field_code)]);
+        GEP(bcx, lltarget, [C_int(0), C_int(abi::fn_field_code)]);
 
     // Cast the outgoing function to the appropriate type.
     // This is necessary because the type of the function that we have
@@ -3711,9 +3711,9 @@ fn trans_bind_thunk(cx: &@local_ctxt, sp: &span, incoming_fty: ty::t,
     // needs to take.
     let lltargetty =
         type_of_fn_from_ty(bcx_ccx(bcx), sp, outgoing_fty, ty_param_count);
-    lltargetfn = bld::PointerCast(bcx, lltargetfn, T_ptr(T_ptr(lltargetty)));
-    lltargetfn = bld::Load(bcx, lltargetfn);
-    bld::FastCall(bcx, lltargetfn, llargs);
+    lltargetfn = PointerCast(bcx, lltargetfn, T_ptr(T_ptr(lltargetty)));
+    lltargetfn = Load(bcx, lltargetfn);
+    FastCall(bcx, lltargetfn, llargs);
     build_return(bcx);
     finish_fn(fcx, lltop);
     ret {val: llthunk, ty: llthunk_ty};
@@ -3762,7 +3762,7 @@ fn trans_bind_1(cx: &@block_ctxt, f: &@ast::expr, f_res: &lval_result,
             // about has the type parameters substituted with the real types.
             let llclosurety =
                 T_ptr(type_of(bcx_ccx(cx), cx.sp, outgoing_fty));
-            let src_loc = bld::PointerCast(bcx, f_res.res.val, llclosurety);
+            let src_loc = PointerCast(bcx, f_res.res.val, llclosurety);
             let bound_f = {res: {bcx: bcx, val: src_loc} with f_res};
             ([outgoing_fty], [bound_f], none)
         } else { ([], [], some(f_res.res.val)) };
@@ -3830,9 +3830,9 @@ fn trans_arg_expr(cx: &@block_ctxt, arg: &ty::arg, lldestty0: TypeRef,
             add_clean_temp(bcx, val, e_ty);
         } else {
             if ty::type_is_ivec(ccx.tcx, e_ty) {
-                let arg_copy = do_spill(bcx, bld::Load(bcx, val));
+                let arg_copy = do_spill(bcx, Load(bcx, val));
                 bcx = take_ty(bcx, arg_copy, e_ty).bcx;
-                val = bld::Load(bcx, arg_copy);
+                val = Load(bcx, arg_copy);
             } else if lv.is_mem {
                 bcx = take_ty(bcx, val, e_ty).bcx;
                 val = load_if_immediate(bcx, val, e_ty);
@@ -3848,7 +3848,7 @@ fn trans_arg_expr(cx: &@block_ctxt, arg: &ty::arg, lldestty0: TypeRef,
 
     if !is_bot && ty::type_contains_params(ccx.tcx, arg.ty) {
         let lldestty = lldestty0;
-        val = bld::PointerCast(bcx, val, lldestty);
+        val = PointerCast(bcx, val, lldestty);
     }
 
     // Collect arg for later if it happens to be one we've moving out.
@@ -3918,7 +3918,7 @@ fn trans_args(cx: &@block_ctxt, llenv: ValueRef,
         // of. If so, cast the caller's view of the restlot to the callee's
         // view, for the sake of making a type-compatible call.
         let llretty = T_ptr(type_of_inner(bcx_ccx(bcx), bcx.sp, retty));
-        llargs += [bld::PointerCast(cx, llretslot, llretty)];
+        llargs += [PointerCast(cx, llretslot, llretty)];
     } else { llargs += [llretslot]; }
 
     // Arg 1: task pointer.
@@ -3938,9 +3938,9 @@ fn trans_args(cx: &@block_ctxt, llenv: ValueRef,
             if ty::type_contains_params(bcx_tcx(cx), retty) {
                 let body_ty = ty::mk_iter_body_fn(bcx_tcx(cx), retty);
                 let body_llty = type_of_inner(bcx_ccx(cx), cx.sp, body_ty);
-                bld::PointerCast(bcx, lli, T_ptr(body_llty))
+                PointerCast(bcx, lli, T_ptr(body_llty))
             } else { lli };
-        llargs += [bld::Load(cx, lli)];
+        llargs += [Load(cx, lli)];
       }
     }
 
@@ -3977,7 +3977,7 @@ fn trans_call(in_cx: &@block_ctxt, f: &@ast::expr,
     // expression because of the hack that allows us to process self-calls
     // with trans_call.
     let cx = new_scope_block_ctxt(in_cx, ~"call");
-    bld::Br(in_cx, cx.llbb);
+    Br(in_cx, cx.llbb);
     let f_res = trans_lval_gen(cx, f);
     let fn_ty: ty::t;
     alt f_res.method_ty {
@@ -3995,7 +3995,7 @@ fn trans_call(in_cx: &@block_ctxt, f: &@ast::expr,
     alt f_res.llobj {
       some(ob) {
         // It's a vtbl entry.
-        faddr = bld::Load(bcx, faddr);
+        faddr = Load(bcx, faddr);
         llenv = ob;
       }
       none. {
@@ -4006,11 +4006,11 @@ fn trans_call(in_cx: &@block_ctxt, f: &@ast::expr,
         fn_ty = res.ty;
 
         let pair = res.val;
-        faddr = bld::GEP(bcx, pair, [C_int(0), C_int(abi::fn_field_code)]);
-        faddr = bld::Load(bcx, faddr);
+        faddr = GEP(bcx, pair, [C_int(0), C_int(abi::fn_field_code)]);
+        faddr = Load(bcx, faddr);
         let llclosure =
-            bld::GEP(bcx, pair, [C_int(0), C_int(abi::fn_field_box)]);
-        llenv = bld::Load(bcx, llclosure);
+            GEP(bcx, pair, [C_int(0), C_int(abi::fn_field_box)]);
+        llenv = Load(bcx, llclosure);
       }
     }
 
@@ -4034,7 +4034,7 @@ fn trans_call(in_cx: &@block_ctxt, f: &@ast::expr,
        for the call itself is unreachable. */
     let retval = C_nil();
     if !is_terminated(bcx) {
-        bld::FastCall(bcx, faddr, llargs);
+        FastCall(bcx, faddr, llargs);
         alt lliterbody {
           none. {
             if !ty::type_is_nil(bcx_tcx(cx), ret_ty) {
@@ -4061,7 +4061,7 @@ fn trans_call(in_cx: &@block_ctxt, f: &@ast::expr,
         }
         bcx = trans_block_cleanups(bcx, cx);
         let next_cx = new_sub_block_ctxt(in_cx, ~"next");
-        bld::Br(bcx, next_cx.llbb);
+        Br(bcx, next_cx.llbb);
         bcx = next_cx;
     }
     ret rslt(bcx, retval);
@@ -4197,8 +4197,8 @@ fn trans_expr_out(cx: &@block_ctxt, e: &@ast::expr, output: out_method) ->
         let sub =
             with_out_method(bind trans_block(sub_cx, blk, _), cx, e.id,
                             output);
-        bld::Br(cx, sub_cx.llbb);
-        if !is_terminated(sub.bcx) { bld::Br(sub.bcx, next_cx.llbb); }
+        Br(cx, sub_cx.llbb);
+        if !is_terminated(sub.bcx) { Br(sub.bcx, next_cx.llbb); }
         ret rslt(next_cx, sub.val);
       }
       ast::expr_copy(a) {
@@ -4314,14 +4314,14 @@ fn trans_expr_out(cx: &@block_ctxt, e: &@ast::expr, output: out_method) ->
         let c =
             get_extern_const(bcx_ccx(cx).externs, bcx_ccx(cx).llmod,
                              ~"check_claims", T_bool());
-        let cond = bld::Load(cx, c);
+        let cond = Load(cx, c);
 
         let then_cx = new_scope_block_ctxt(cx, ~"claim_then");
         let check_res = trans_check_expr(then_cx, a, ~"Claim");
         let else_cx = new_scope_block_ctxt(cx, ~"else");
         let els = rslt(else_cx, C_nil());
 
-        bld::CondBr(cx, cond, then_cx.llbb, else_cx.llbb);
+        CondBr(cx, cond, then_cx.llbb, else_cx.llbb);
         ret rslt(join_branches(cx, [check_res, els]), C_nil());
       }
       ast::expr_break. { ret trans_break(e.span, cx); }
@@ -4387,7 +4387,7 @@ fn type_is_immediate(ccx: &@crate_ctxt, t: ty::t) -> bool {
 fn do_spill(cx: &@block_ctxt, v: ValueRef) -> ValueRef {
     // We have a value but we have to spill it to pass by alias.
     let llptr = alloca(cx, val_ty(v));
-    bld::Store(cx, v, llptr);
+    Store(cx, v, llptr);
     ret llptr;
 }
 
@@ -4397,7 +4397,7 @@ fn spill_if_immediate(cx: &@block_ctxt, v: ValueRef, t: ty::t) -> ValueRef {
 }
 
 fn load_if_immediate(cx: &@block_ctxt, v: ValueRef, t: ty::t) -> ValueRef {
-    if type_is_immediate(bcx_ccx(cx), t) { ret bld::Load(cx, v); }
+    if type_is_immediate(bcx_ccx(cx), t) { ret Load(cx, v); }
     ret v;
 }
 
@@ -4424,9 +4424,9 @@ fn trans_log(lvl: int, cx: &@block_ctxt, e: &@ast::expr) -> result {
     }
     let log_cx = new_scope_block_ctxt(cx, ~"log");
     let after_cx = new_sub_block_ctxt(cx, ~"after");
-    let load = bld::Load(cx, global);
-    let test = bld::ICmp(cx, lib::llvm::LLVMIntSGE, load, C_int(lvl));
-    bld::CondBr(cx, test, log_cx.llbb, after_cx.llbb);
+    let load = Load(cx, global);
+    let test = ICmp(cx, lib::llvm::LLVMIntSGE, load, C_int(lvl));
+    CondBr(cx, test, log_cx.llbb, after_cx.llbb);
     let sub = trans_expr(log_cx, e);
     let e_ty = ty::expr_ty(bcx_tcx(cx), e);
     let log_bcx = sub.bcx;
@@ -4437,13 +4437,13 @@ fn trans_log(lvl: int, cx: &@block_ctxt, e: &@ast::expr) -> result {
 
     // Call the polymorphic log function.
     let llvalptr = spill_if_immediate(log_bcx, sub.val, e_ty);
-    let llval_i8 = bld::PointerCast(log_bcx, llvalptr, T_ptr(T_i8()));
+    let llval_i8 = PointerCast(log_bcx, llvalptr, T_ptr(T_i8()));
 
-    bld::Call(log_bcx, bcx_ccx(log_bcx).upcalls.log_type,
+    Call(log_bcx, bcx_ccx(log_bcx).upcalls.log_type,
                        [log_bcx.fcx.lltaskptr, r.val, llval_i8, C_int(lvl)]);
 
     log_bcx = trans_block_cleanups(log_bcx, log_cx);
-    bld::Br(log_bcx, after_cx.llbb);
+    Br(log_bcx, after_cx.llbb);
     ret rslt(after_cx, C_nil());
 }
 
@@ -4453,7 +4453,7 @@ fn trans_check_expr(cx: &@block_ctxt, e: &@ast::expr, s: &istr) -> result {
     let fail_cx = new_sub_block_ctxt(cx, ~"fail");
     trans_fail(fail_cx, some::<span>(e.span), expr_str);
     let next_cx = new_sub_block_ctxt(cx, ~"next");
-    bld::CondBr(cond_res.bcx, cond_res.val, next_cx.llbb, fail_cx.llbb);
+    CondBr(cond_res.bcx, cond_res.val, next_cx.llbb, fail_cx.llbb);
     ret rslt(next_cx, C_nil());
 }
 
@@ -4470,7 +4470,7 @@ fn trans_fail_expr(cx: &@block_ctxt, sp_opt: &option::t<span>,
 
         if ty::type_is_str(tcx, e_ty) {
             let elt =
-                bld::GEP(bcx, expr_res.val,
+                GEP(bcx, expr_res.val,
                               [C_int(0), C_int(abi::vec_elt_data)]);
             ret trans_fail_value(bcx, sp_opt, elt);
         } else {
@@ -4501,27 +4501,27 @@ fn trans_fail_value(cx: &@block_ctxt, sp_opt: &option::t<span>,
       }
       none. { V_filename = C_cstr(bcx_ccx(cx), ~"<runtime>"); V_line = 0; }
     }
-    let V_str = bld::PointerCast(cx, V_fail_str, T_ptr(T_i8()));
-    V_filename = bld::PointerCast(cx, V_filename, T_ptr(T_i8()));
+    let V_str = PointerCast(cx, V_fail_str, T_ptr(T_i8()));
+    V_filename = PointerCast(cx, V_filename, T_ptr(T_i8()));
     let args = [cx.fcx.lltaskptr, V_str, V_filename, C_int(V_line)];
-    bld::Call(cx, bcx_ccx(cx).upcalls._fail, args);
-    bld::Unreachable(cx);
+    Call(cx, bcx_ccx(cx).upcalls._fail, args);
+    Unreachable(cx);
     ret rslt(cx, C_nil());
 }
 
 fn trans_put(in_cx: &@block_ctxt, e: &option::t<@ast::expr>) -> result {
     let cx = new_scope_block_ctxt(in_cx, ~"put");
-    bld::Br(in_cx, cx.llbb);
+    Br(in_cx, cx.llbb);
     let llcallee = C_nil();
     let llenv = C_nil();
     alt { cx.fcx.lliterbody } {
       some(lli) {
         let slot = alloca(cx, val_ty(lli));
-        bld::Store(cx, lli, slot);
-        llcallee = bld::GEP(cx, slot, [C_int(0), C_int(abi::fn_field_code)]);
-        llcallee = bld::Load(cx, llcallee);
-        llenv = bld::GEP(cx, slot, [C_int(0), C_int(abi::fn_field_box)]);
-        llenv = bld::Load(cx, llenv);
+        Store(cx, lli, slot);
+        llcallee = GEP(cx, slot, [C_int(0), C_int(abi::fn_field_code)]);
+        llcallee = Load(cx, llcallee);
+        llenv = GEP(cx, slot, [C_int(0), C_int(abi::fn_field_box)]);
+        llenv = Load(cx, llenv);
       }
     }
     let bcx = cx;
@@ -4540,10 +4540,10 @@ fn trans_put(in_cx: &@block_ctxt, e: &option::t<@ast::expr>) -> result {
         llargs += [r.val];
       }
     }
-    bld::FastCall(bcx, llcallee, llargs);
+    FastCall(bcx, llcallee, llargs);
     bcx = trans_block_cleanups(bcx, cx);
     let next_cx = new_sub_block_ctxt(in_cx, ~"next");
-    bld::Br(bcx, next_cx.llbb);
+    Br(bcx, next_cx.llbb);
     ret rslt(next_cx, C_nil());
 }
 
@@ -4561,7 +4561,7 @@ fn trans_uniq(cx: &@block_ctxt, contents: &@ast::expr) -> lval_result {
     bcx = r.bcx;
     let llptrptr = r.val;
 
-    let llptr = bld::Load(bcx, llptrptr);
+    let llptr = Load(bcx, llptrptr);
     r = trans_expr_out(bcx, contents, save_in(llptr));
     ret lval_val(r.bcx, llptrptr);
 }
@@ -4576,11 +4576,11 @@ fn trans_break_cont(sp: &span, cx: &@block_ctxt, to_end: bool) -> result {
         alt { cleanup_cx.kind } {
           LOOP_SCOPE_BLOCK(_cont, _break) {
             if to_end {
-                bld::Br(bcx, _break.llbb);
+                Br(bcx, _break.llbb);
             } else {
                 alt _cont {
-                  option::some(_cont) { bld::Br(bcx, _cont.llbb); }
-                  _ { bld::Br(bcx, cleanup_cx.llbb); }
+                  option::some(_cont) { Br(bcx, _cont.llbb); }
+                  _ { Br(bcx, cleanup_cx.llbb); }
                 }
             }
             ret rslt(new_sub_block_ctxt(bcx, ~"break_cont.unreachable"),
@@ -4638,7 +4638,7 @@ fn trans_ret(cx: &@block_ctxt, e: &option::t<@ast::expr>) -> result {
       }
       _ {
         let t = llvm::LLVMGetElementType(val_ty(cx.fcx.llretptr));
-        bld::Store(bcx, C_null(t), cx.fcx.llretptr);
+        Store(bcx, C_null(t), cx.fcx.llretptr);
       }
     }
     // run all cleanups and back out.
@@ -4656,7 +4656,7 @@ fn trans_ret(cx: &@block_ctxt, e: &option::t<@ast::expr>) -> result {
     ret rslt(new_sub_block_ctxt(bcx, ~"ret.unreachable"), C_nil());
 }
 
-fn build_return(bcx: &@block_ctxt) { bld::Br(bcx, bcx_fcx(bcx).llreturn); }
+fn build_return(bcx: &@block_ctxt) { Br(bcx, bcx_fcx(bcx).llreturn); }
 
 // fn trans_be(cx: &@block_ctxt, e: &@ast::expr) -> result {
 fn trans_be(cx: &@block_ctxt, e: &@ast::expr)
@@ -4747,7 +4747,7 @@ fn zero_alloca(cx: &@block_ctxt, llptr: ValueRef, t: ty::t) -> result {
         bcx = call_bzero(llsz.bcx, llptr, llsz.val, C_int(0)).bcx;
     } else {
         let llty = type_of(bcx_ccx(bcx), cx.sp, t);
-        bld::Store(bcx, C_null(llty), llptr);
+        Store(bcx, C_null(llty), llptr);
     }
     ret rslt(bcx, llptr);
 }
@@ -4859,7 +4859,7 @@ fn trans_fn_cleanups(fcx: &@fn_ctxt, cx: &@block_ctxt) {
     alt fcx.llobstacktoken {
       some(lltoken_) {
         let lltoken = lltoken_; // satisfy alias checker
-        bld::Call(cx, fcx_ccx(fcx).upcalls.dynastack_free,
+        Call(cx, fcx_ccx(fcx).upcalls.dynastack_free,
                   [fcx.lltaskptr, lltoken]);
       }
       none. {/* nothing to do */ }
@@ -5207,18 +5207,18 @@ fn populate_fn_ctxt_from_llself(fcx: @fn_ctxt, llself: val_self_pair) {
     let n_typarams = std::vec::len::<ast::ty_param>(bcx.fcx.lcx.obj_typarams);
     let llobj_box_ty: TypeRef = T_obj_ptr(*bcx_ccx(bcx), n_typarams);
     let box_cell =
-        bld::GEP(bcx, llself.v, [C_int(0), C_int(abi::obj_field_box)]);
-    let box_ptr = bld::Load(bcx, box_cell);
-    box_ptr = bld::PointerCast(bcx, box_ptr, llobj_box_ty);
+        GEP(bcx, llself.v, [C_int(0), C_int(abi::obj_field_box)]);
+    let box_ptr = Load(bcx, box_cell);
+    box_ptr = PointerCast(bcx, box_ptr, llobj_box_ty);
     let obj_typarams =
-        bld::GEP(bcx, box_ptr,
+        GEP(bcx, box_ptr,
                       [C_int(0), C_int(abi::box_rc_field_body),
                        C_int(abi::obj_body_elt_typarams)]);
     // The object fields immediately follow the type parameters, so we skip
     // over them to get the pointer.
 
     let et = llvm::LLVMGetElementType(val_ty(obj_typarams));
-    let obj_fields = bld::Add(bcx, vp2i(bcx, obj_typarams), llsize_of(et));
+    let obj_fields = Add(bcx, vp2i(bcx, obj_typarams), llsize_of(et));
     // If we can (i.e. the type is statically sized), then cast the resulting
     // fields pointer to the appropriate LLVM type. If not, just leave it as
     // i8 *.
@@ -5230,8 +5230,8 @@ fn populate_fn_ctxt_from_llself(fcx: @fn_ctxt, llself: val_self_pair) {
     let i: int = 0;
     for p: ast::ty_param in fcx.lcx.obj_typarams {
         let lltyparam: ValueRef =
-            bld::GEP(bcx, obj_typarams, [C_int(0), C_int(i)]);
-        lltyparam = bld::Load(bcx, lltyparam);
+            GEP(bcx, obj_typarams, [C_int(0), C_int(i)]);
+        lltyparam = Load(bcx, lltyparam);
         fcx.lltydescs += [lltyparam];
         i += 1;
     }
@@ -5250,16 +5250,16 @@ fn populate_fn_ctxt_from_llself(fcx: @fn_ctxt, llself: val_self_pair) {
 // Ties up the llstaticallocas -> llcopyargs -> llderivedtydescs ->
 // lldynamicallocas -> lltop edges, and builds the return block.
 fn finish_fn(fcx: &@fn_ctxt, lltop: BasicBlockRef) {
-    bld::Br(new_raw_block_ctxt(fcx, fcx.llstaticallocas), fcx.llcopyargs);
-    bld::Br(new_raw_block_ctxt(fcx, fcx.llcopyargs),
+    Br(new_raw_block_ctxt(fcx, fcx.llstaticallocas), fcx.llcopyargs);
+    Br(new_raw_block_ctxt(fcx, fcx.llcopyargs),
             fcx.llderivedtydescs_first);
-    bld::Br(new_raw_block_ctxt(fcx, fcx.llderivedtydescs),
+    Br(new_raw_block_ctxt(fcx, fcx.llderivedtydescs),
             fcx.lldynamicallocas);
-    bld::Br(new_raw_block_ctxt(fcx, fcx.lldynamicallocas), lltop);
+    Br(new_raw_block_ctxt(fcx, fcx.lldynamicallocas), lltop);
 
     let ret_cx = new_raw_block_ctxt(fcx, fcx.llreturn);
     trans_fn_cleanups(fcx, ret_cx);
-    bld::RetVoid(ret_cx);
+    RetVoid(ret_cx);
 }
 
 // trans_closure: Builds an LLVM function out of a source function.
@@ -5387,7 +5387,7 @@ fn trans_res_ctor(cx: @local_ctxt, sp: &span, dtor: &ast::_fn,
     let llretptr = fcx.llretptr;
     if ty::type_has_dynamic_size(cx.ccx.tcx, ret_t) {
         let llret_t = T_ptr(T_struct([T_i32(), llvm::LLVMTypeOf(arg)]));
-        llretptr = bld::BitCast(bcx, llretptr, llret_t);
+        llretptr = BitCast(bcx, llretptr, llret_t);
     }
 
     let dst = GEP_tup_like(bcx, tup_t, llretptr, [0, 1]);
@@ -5395,7 +5395,7 @@ fn trans_res_ctor(cx: @local_ctxt, sp: &span, dtor: &ast::_fn,
     bcx = copy_val(bcx, INIT, dst.val, arg, arg_t);
     let flag = GEP_tup_like(bcx, tup_t, llretptr, [0, 0]);
     bcx = flag.bcx;
-    bld::Store(bcx, C_int(1), flag.val);
+    Store(bcx, C_int(1), flag.val);
     build_return(bcx);
     finish_fn(fcx, lltop);
 }
@@ -5449,11 +5449,11 @@ fn trans_tag_variant(cx: @local_ctxt, tag_id: ast::node_id,
             fcx.llretptr
         } else {
             let lltagptr =
-                bld::PointerCast(bcx, fcx.llretptr,
+                PointerCast(bcx, fcx.llretptr,
                                       T_opaque_tag_ptr(fcx.lcx.ccx.tn));
-            let lldiscrimptr = bld::GEP(bcx, lltagptr, [C_int(0), C_int(0)]);
-            bld::Store(bcx, C_int(index), lldiscrimptr);
-            bld::GEP(bcx, lltagptr, [C_int(0), C_int(1)])
+            let lldiscrimptr = GEP(bcx, lltagptr, [C_int(0), C_int(0)]);
+            Store(bcx, C_int(index), lldiscrimptr);
+            GEP(bcx, lltagptr, [C_int(0), C_int(1)])
         };
     i = 0u;
     for va: ast::variant_arg in variant.node.args {
@@ -5469,7 +5469,7 @@ fn trans_tag_variant(cx: @local_ctxt, tag_id: ast::node_id,
 
         let llargptr;
         alt fcx.llargs.find(va.id) {
-          some(x) { llargptr = bld::PointerCast(bcx, x, val_ty(lldestptr)); }
+          some(x) { llargptr = PointerCast(bcx, x, val_ty(lldestptr)); }
           none. {
             bcx_ccx(bcx).sess.bug(~"unbound argptr in \
                                       trans_tag_variant");
@@ -5480,7 +5480,7 @@ fn trans_tag_variant(cx: @local_ctxt, tag_id: ast::node_id,
         if ty::type_is_structural(cx.ccx.tcx, arg_ty) ||
                ty::type_has_dynamic_size(cx.ccx.tcx, arg_ty) {
             llargval = llargptr;
-        } else { llargval = bld::Load(bcx, llargptr); }
+        } else { llargval = Load(bcx, llargptr); }
         bcx = copy_val(bcx, INIT, lldestptr, llargval, arg_ty);
         i += 1u;
     }
@@ -5658,7 +5658,7 @@ fn create_main_wrapper(ccx: &@crate_ctxt, sp: &span, main_llfn: ValueRef,
         let llargvarg = llvm::LLVMGetParam(llfdecl, 3u);
         let args = [lloutputarg, lltaskarg, llenvarg];
         if takes_ivec { args += [llargvarg]; }
-        bld::FastCall(bcx, main_llfn, args);
+        FastCall(bcx, main_llfn, args);
         build_return(bcx);
 
         finish_fn(fcx, lltop);
@@ -5694,12 +5694,12 @@ fn create_real_fn_pair(cx: &@block_ctxt, llfnty: TypeRef, llfn: ValueRef,
     let lcx = cx.fcx.lcx;
 
     let pair = alloca(cx, T_fn_pair(*lcx.ccx, llfnty));
-    let code_cell = bld::GEP(cx, pair, [C_int(0), C_int(abi::fn_field_code)]);
-    bld::Store(cx, llfn, code_cell);
-    let env_cell = bld::GEP(cx, pair, [C_int(0), C_int(abi::fn_field_box)]);
+    let code_cell = GEP(cx, pair, [C_int(0), C_int(abi::fn_field_code)]);
+    Store(cx, llfn, code_cell);
+    let env_cell = GEP(cx, pair, [C_int(0), C_int(abi::fn_field_box)]);
     let llenvblobptr =
-        bld::PointerCast(cx, llenvptr, T_opaque_closure_ptr(*lcx.ccx));
-    bld::Store(cx, llenvblobptr, env_cell);
+        PointerCast(cx, llenvptr, T_opaque_closure_ptr(*lcx.ccx));
+    Store(cx, llenvblobptr, env_cell);
     ret pair;
 }
 
@@ -5832,12 +5832,12 @@ fn decl_native_fn_and_pair(ccx: &@crate_ctxt, sp: &span, path: &[istr],
                 let llsrctype = type_of(bcx_ccx(cx), cx.sp, t);
                 if llvm::LLVMGetIntTypeWidth(lldsttype) >
                        llvm::LLVMGetIntTypeWidth(llsrctype) {
-                    ret bld::ZExtOrBitCast(cx, v, T_int());
+                    ret ZExtOrBitCast(cx, v, T_int());
                 }
-                ret bld::TruncOrBitCast(cx, v, T_int());
+                ret TruncOrBitCast(cx, v, T_int());
             }
             if ty::type_is_fp(bcx_tcx(cx), t) {
-                ret bld::FPToSI(cx, v, T_int());
+                ret FPToSI(cx, v, T_int());
             }
         }
         ret vp2i(cx, v);
@@ -5865,8 +5865,8 @@ fn decl_native_fn_and_pair(ccx: &@crate_ctxt, sp: &span, path: &[istr],
                           llnativefnty);
         let r =
             if cc == lib::llvm::LLVMCCallConv {
-                bld::Call(bcx, llnativefn, call_args)
-            } else { bld::CallWithConv(bcx, llnativefn, call_args, cc) };
+                Call(bcx, llnativefn, call_args)
+            } else { CallWithConv(bcx, llnativefn, call_args, cc) };
         let rptr = bcx.fcx.llretptr;
         ret {val: r, rptr: rptr};
     }
@@ -5913,14 +5913,14 @@ fn decl_native_fn_and_pair(ccx: &@crate_ctxt, sp: &span, path: &[istr],
       _ {
         r = trans_native_call(new_raw_block_ctxt(bcx.fcx, bcx.llbb),
                               ccx.externs, ccx.llmod, name, call_args);
-        rptr = bld::BitCast(bcx, fcx.llretptr, T_ptr(T_i32()));
+        rptr = BitCast(bcx, fcx.llretptr, T_ptr(T_i32()));
       }
     }
     // We don't store the return value if it's nil, to avoid stomping on a nil
     // pointer. This is the only concession made to non-i32 return values. See
     // the FIXME above.
 
-    if !rty_is_nil && !uses_retptr { bld::Store(bcx, r, rptr); }
+    if !rty_is_nil && !uses_retptr { Store(bcx, r, rptr); }
 
     build_return(bcx);
     finish_fn(fcx, lltop);
@@ -6066,11 +6066,11 @@ fn trans_constants(ccx: &@crate_ctxt, crate: @ast::crate) {
 }
 
 fn vp2i(cx: &@block_ctxt, v: ValueRef) -> ValueRef {
-    ret bld::PtrToInt(cx, v, T_int());
+    ret PtrToInt(cx, v, T_int());
 }
 
 fn vi2p(cx: &@block_ctxt, v: ValueRef, t: TypeRef) -> ValueRef {
-    ret bld::IntToPtr(cx, v, t);
+    ret IntToPtr(cx, v, t);
 }
 
 fn p2i(v: ValueRef) -> ValueRef { ret llvm::LLVMConstPtrToInt(v, T_int()); }
@@ -6119,7 +6119,7 @@ fn declare_intrinsics(llmod: ModuleRef) -> hashmap<istr, ValueRef> {
 fn trap(bcx: &@block_ctxt) {
     let v: [ValueRef] = [];
     alt bcx_ccx(bcx).intrinsics.find(~"llvm.trap") {
-      some(x) { bld::Call(bcx, x, v); }
+      some(x) { Call(bcx, x, v); }
       _ { bcx_ccx(bcx).sess.bug(~"unbound llvm.trap in trap"); }
     }
 }
@@ -6131,13 +6131,13 @@ fn decl_no_op_type_glue(llmod: ModuleRef, taskptr_type: TypeRef) -> ValueRef {
 }
 
 fn vec_fill(bcx: &@block_ctxt, v: ValueRef) -> ValueRef {
-    ret bld::Load(bcx, bld::GEP(bcx, v,
+    ret Load(bcx, GEP(bcx, v,
                                      [C_int(0), C_int(abi::vec_elt_fill)]));
 }
 
 fn vec_p0(bcx: &@block_ctxt, v: ValueRef) -> ValueRef {
-    let p = bld::GEP(bcx, v, [C_int(0), C_int(abi::vec_elt_data)]);
-    ret bld::PointerCast(bcx, p, T_ptr(T_i8()));
+    let p = GEP(bcx, v, [C_int(0), C_int(abi::vec_elt_data)]);
+    ret PointerCast(bcx, p, T_ptr(T_i8()));
 }
 
 fn make_glues(llmod: ModuleRef, taskptr_type: TypeRef) -> @glue_fns {
diff --git a/src/comp/middle/trans_alt.rs b/src/comp/middle/trans_alt.rs
index 53193ad98be..76f4ef1ec0b 100644
--- a/src/comp/middle/trans_alt.rs
+++ b/src/comp/middle/trans_alt.rs
@@ -10,7 +10,7 @@ import lib::llvm::llvm;
 import lib::llvm::llvm::ValueRef;
 import lib::llvm::llvm::TypeRef;
 import lib::llvm::llvm::BasicBlockRef;
-import bld = trans_build;
+import trans_build::*;
 import trans::new_sub_block_ctxt;
 import trans::new_scope_block_ctxt;
 import trans::load_if_immediate;
@@ -212,9 +212,9 @@ fn extract_variant_args(bcx: @block_ctxt, pat_id: ast::node_id,
         vec::len(ty::tag_variant_with_id(ccx.tcx, vdefs.tg, vdefs.var).args);
     if size > 0u && vec::len(variants) != 1u {
         let tagptr =
-            bld::PointerCast(bcx, val,
+            PointerCast(bcx, val,
                                   trans_common::T_opaque_tag_ptr(ccx.tn));
-        blobptr = bld::GEP(bcx, tagptr, [C_int(0), C_int(1)]);
+        blobptr = GEP(bcx, tagptr, [C_int(0), C_int(1)]);
     }
     let i = 0u;
     while i < size {
@@ -291,7 +291,7 @@ fn pick_col(m: &match) -> uint {
 
 fn compile_submatch(bcx: @block_ctxt, m: &match, vals: [ValueRef],
                     f: &mk_fail, exits: &mutable [exit_node]) {
-    if vec::len(m) == 0u { bld::Br(bcx, f()); ret; }
+    if vec::len(m) == 0u { Br(bcx, f()); ret; }
     if vec::len(m[0].pats) == 0u {
         let data = m[0].data;
         alt data.guard {
@@ -299,7 +299,7 @@ fn compile_submatch(bcx: @block_ctxt, m: &match, vals: [ValueRef],
             let guard_cx = new_scope_block_ctxt(bcx, ~"guard");
             let next_cx = new_sub_block_ctxt(bcx, ~"next");
             let else_cx = new_sub_block_ctxt(bcx, ~"else");
-            bld::Br(bcx, guard_cx.llbb);
+            Br(bcx, guard_cx.llbb);
             // Temporarily set bindings. They'll be rewritten to PHI nodes for
             // the actual arm block.
             for each @{key, val} in data.id_map.items() {
@@ -310,7 +310,7 @@ fn compile_submatch(bcx: @block_ctxt, m: &match, vals: [ValueRef],
             let {bcx: guard_bcx, val: guard_val} =
                 trans::trans_expr(guard_cx, e);
             guard_bcx = trans::trans_block_cleanups(guard_bcx, guard_cx);
-            bld::CondBr(guard_bcx, guard_val, next_cx.llbb, else_cx.llbb);
+            CondBr(guard_bcx, guard_val, next_cx.llbb, else_cx.llbb);
             compile_submatch(else_cx, vec::slice(m, 1u, vec::len(m)),
                              vals, f, exits);
             bcx = next_cx;
@@ -318,7 +318,7 @@ fn compile_submatch(bcx: @block_ctxt, m: &match, vals: [ValueRef],
           _ {}
         }
         exits += [{bound: m[0].bound, from: bcx.llbb, to: data.body}];
-        bld::Br(bcx, data.body);
+        Br(bcx, data.body);
         ret;
     }
 
@@ -375,9 +375,9 @@ fn compile_submatch(bcx: @block_ctxt, m: &match, vals: [ValueRef],
 
     // Unbox in case of a box field
     if any_box_pat(m, col) {
-        let box = bld::Load(bcx, val);
+        let box = Load(bcx, val);
         let unboxed =
-            bld::InBoundsGEP(bcx, box,
+            InBoundsGEP(bcx, box,
                                   [C_int(0),
                                    C_int(back::abi::box_rc_field_body)]);
         compile_submatch(bcx, enter_box(m, col, val), [unboxed] + vals_left,
@@ -397,15 +397,15 @@ fn compile_submatch(bcx: @block_ctxt, m: &match, vals: [ValueRef],
                 kind = single;
             } else {
                 let tagptr =
-                    bld::PointerCast(bcx, val,
+                    PointerCast(bcx, val,
                         trans_common::T_opaque_tag_ptr(ccx.tn));
-                let discrimptr = bld::GEP(bcx, tagptr, [C_int(0), C_int(0)]);
-                test_val = bld::Load(bcx, discrimptr);
+                let discrimptr = GEP(bcx, tagptr, [C_int(0), C_int(0)]);
+                test_val = Load(bcx, discrimptr);
                 kind = switch;
             }
           }
           lit(l) {
-            test_val = bld::Load(bcx, val);
+            test_val = Load(bcx, val);
             kind = alt l.node { ast::lit_str(_, _) { compare } _ { switch } };
           }
         }
@@ -417,14 +417,14 @@ fn compile_submatch(bcx: @block_ctxt, m: &match, vals: [ValueRef],
         };
     let sw =
         if kind == switch {
-            bld::Switch(bcx, test_val, else_cx.llbb, vec::len(opts))
+            Switch(bcx, test_val, else_cx.llbb, vec::len(opts))
         } else { C_int(0) }; // Placeholder for when not using a switch
 
      // Compile subtrees for each option
     for opt: opt in opts {
         let opt_cx = new_sub_block_ctxt(bcx, ~"match_case");
         alt kind {
-          single. { bld::Br(bcx, opt_cx.llbb); }
+          single. { Br(bcx, opt_cx.llbb); }
           switch. {
             let r = trans_opt(bcx, opt);
             bcx = r.bcx;
@@ -437,7 +437,7 @@ fn compile_submatch(bcx: @block_ctxt, m: &match, vals: [ValueRef],
             let eq =
                 trans::trans_compare(bcx, ast::eq, test_val, t, r.val, t);
             bcx = new_sub_block_ctxt(bcx, ~"next");
-            bld::CondBr(eq.bcx, eq.val, opt_cx.llbb, bcx.llbb);
+            CondBr(eq.bcx, eq.val, opt_cx.llbb, bcx.llbb);
           }
           _ { }
         }
@@ -457,7 +457,7 @@ fn compile_submatch(bcx: @block_ctxt, m: &match, vals: [ValueRef],
     }
 
     // Compile the fall-through case
-    if kind == compare { bld::Br(bcx, else_cx.llbb); }
+    if kind == compare { Br(bcx, else_cx.llbb); }
     if kind != single {
         compile_submatch(else_cx, enter_default(m, col, val), vals_left, f,
                          exits);
@@ -481,7 +481,7 @@ fn make_phi_bindings(bcx: &@block_ctxt, map: &[exit_node],
             }
         }
         if vec::len(vals) > 0u {
-            let phi = bld::Phi(bcx, val_ty(vals[0]), vals, llbbs);
+            let phi = Phi(bcx, val_ty(vals[0]), vals, llbbs);
             bcx.fcx.lllocals.insert(item.val, phi);
         } else { success = false; }
     }
@@ -498,7 +498,7 @@ fn trans_alt(cx: &@block_ctxt, expr: &@ast::expr, arms: &[ast::arm],
         // No need to generate code for alt,
         // since the disc diverges.
         if !is_terminated(cx) {
-            ret rslt(cx, bld::Unreachable(cx));
+            ret rslt(cx, Unreachable(cx));
         } else { ret er; }
     }
 
@@ -596,9 +596,8 @@ fn bind_irrefutable_pat(bcx: @block_ctxt, pat: &@ast::pat, val: ValueRef,
         }
       }
       ast::pat_box(inner) {
-        let box = bld::Load(bcx, val);
-        let unboxed =
-            bld::InBoundsGEP(bcx, box,
+        let box = Load(bcx, val);
+        let unboxed = InBoundsGEP(bcx, box,
                                   [C_int(0),
                                    C_int(back::abi::box_rc_field_body)]);
         bcx = bind_irrefutable_pat(bcx, inner, unboxed, table, true);
diff --git a/src/comp/middle/trans_objects.rs b/src/comp/middle/trans_objects.rs
index 44b025c2e84..1268fa1ee17 100644
--- a/src/comp/middle/trans_objects.rs
+++ b/src/comp/middle/trans_objects.rs
@@ -22,7 +22,7 @@ import syntax::codemap::span;
 
 import trans_common::*;
 import trans::*;
-import bld = trans_build;
+import trans_build::*;
 
 export trans_anon_obj;
 export trans_obj;
@@ -81,17 +81,17 @@ fn trans_obj(cx: @local_ctxt, sp: &span, ob: &ast::_obj,
     // abi::obj_field_vtbl and abi::obj_field_box simply specify words 0 and 1
     // of 'pair'.
     let pair_vtbl =
-        bld::GEP(bcx, pair, [C_int(0), C_int(abi::obj_field_vtbl)]);
-    let pair_box = bld::GEP(bcx, pair, [C_int(0), C_int(abi::obj_field_box)]);
+        GEP(bcx, pair, [C_int(0), C_int(abi::obj_field_vtbl)]);
+    let pair_box = GEP(bcx, pair, [C_int(0), C_int(abi::obj_field_box)]);
 
     // Make a vtable for this object: a static array of pointers to functions.
     // It will be located in the read-only memory of the executable we're
     // creating and will contain ValueRefs for all of this object's methods.
     // create_vtbl returns a pointer to the vtable, which we store.
     let vtbl = create_vtbl(cx, sp, self_ty, ob, ty_params, none, []);
-    vtbl = bld::PointerCast(bcx, vtbl, T_ptr(T_empty_struct()));
+    vtbl = PointerCast(bcx, vtbl, T_ptr(T_empty_struct()));
 
-    bld::Store(bcx, vtbl, pair_vtbl);
+    Store(bcx, vtbl, pair_vtbl);
 
     // Next we have to take care of the other half of the pair we're
     // returning: a boxed (reference-counted) tuple containing a tydesc,
@@ -104,7 +104,7 @@ fn trans_obj(cx: @local_ctxt, sp: &span, ob: &ast::_obj,
         // there's not much to do.
 
         // Store null into pair, if no args or typarams.
-        bld::Store(bcx, C_null(llbox_ty), pair_box);
+        Store(bcx, C_null(llbox_ty), pair_box);
     } else {
         let obj_fields: [ty::t] = [];
         for a: ty::arg in arg_tys { obj_fields += [a.ty]; }
@@ -146,7 +146,7 @@ fn trans_obj(cx: @local_ctxt, sp: &span, ob: &ast::_obj,
         lazily_emit_tydesc_glue(bcx, abi::tydesc_field_drop_glue, ti);
         lazily_emit_tydesc_glue(bcx, abi::tydesc_field_free_glue, ti);
         bcx = body_td.bcx;
-        bld::Store(bcx, body_td.val, body_tydesc.val);
+        Store(bcx, body_td.val, body_tydesc.val);
 
         // Copy the object's type parameters and fields into the space we
         // allocated for the object body.  (This is something like saving the
@@ -193,8 +193,8 @@ fn trans_obj(cx: @local_ctxt, sp: &span, ob: &ast::_obj,
         }
 
         // Store box ptr in outer pair.
-        let p = bld::PointerCast(bcx, box.box, llbox_ty);
-        bld::Store(bcx, p, pair_box);
+        let p = PointerCast(bcx, box.box, llbox_ty);
+        Store(bcx, p, pair_box);
     }
     build_return(bcx);
 
@@ -287,11 +287,11 @@ fn trans_anon_obj(bcx: @block_ctxt, sp: &span, anon_obj: &ast::anon_obj,
 
     // Grab onto the first and second elements of the pair.
     let pair_vtbl =
-        bld::GEP(bcx, pair, [C_int(0), C_int(abi::obj_field_vtbl)]);
-    let pair_box = bld::GEP(bcx, pair, [C_int(0), C_int(abi::obj_field_box)]);
+        GEP(bcx, pair, [C_int(0), C_int(abi::obj_field_vtbl)]);
+    let pair_box = GEP(bcx, pair, [C_int(0), C_int(abi::obj_field_box)]);
 
-    vtbl = bld::PointerCast(bcx, vtbl, T_ptr(T_empty_struct()));
-    bld::Store(bcx, vtbl, pair_vtbl);
+    vtbl = PointerCast(bcx, vtbl, T_ptr(T_empty_struct()));
+    Store(bcx, vtbl, pair_vtbl);
 
     // Next we have to take care of the other half of the pair we're
     // returning: a boxed (reference-counted) tuple containing a tydesc,
@@ -303,7 +303,7 @@ fn trans_anon_obj(bcx: @block_ctxt, sp: &span, anon_obj: &ast::anon_obj,
 
         // If the object we're translating has no fields and no inner_obj,
         // there's not much to do.
-        bld::Store(bcx, C_null(llbox_ty), pair_box);
+        Store(bcx, C_null(llbox_ty), pair_box);
 
     } else {
 
@@ -333,7 +333,7 @@ fn trans_anon_obj(bcx: @block_ctxt, sp: &span, anon_obj: &ast::anon_obj,
         lazily_emit_tydesc_glue(bcx, abi::tydesc_field_drop_glue, ti);
         lazily_emit_tydesc_glue(bcx, abi::tydesc_field_free_glue, ti);
         bcx = body_td.bcx;
-        bld::Store(bcx, body_td.val, body_tydesc.val);
+        Store(bcx, body_td.val, body_tydesc.val);
 
         // Copy the object's fields into the space we allocated for the object
         // body.  (This is something like saving the lexical environment of a
@@ -377,8 +377,8 @@ fn trans_anon_obj(bcx: @block_ctxt, sp: &span, anon_obj: &ast::anon_obj,
         }
 
         // Store box ptr in outer pair.
-        let p = bld::PointerCast(bcx, box.box, llbox_ty);
-        bld::Store(bcx, p, pair_box);
+        let p = PointerCast(bcx, box.box, llbox_ty);
+        Store(bcx, p, pair_box);
     }
 
     // return the object we built.
@@ -630,15 +630,15 @@ fn process_bkwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
 
     // Cast to self-stack's type.
     let llenv =
-        bld::PointerCast(bcx, fcx.llenv,
+        PointerCast(bcx, fcx.llenv,
             T_ptr(T_struct([cx.ccx.rust_object_type,
                             T_ptr(cx.ccx.rust_object_type)])));
-    let llself_obj_ptr = bld::GEP(bcx, llenv, [C_int(0), C_int(1)]);
-    llself_obj_ptr = bld::Load(bcx, llself_obj_ptr);
+    let llself_obj_ptr = GEP(bcx, llenv, [C_int(0), C_int(1)]);
+    llself_obj_ptr = Load(bcx, llself_obj_ptr);
 
     // Cast it back to pointer-to-object-type, so LLVM won't complain.
     llself_obj_ptr =
-        bld::PointerCast(bcx, llself_obj_ptr, T_ptr(cx.ccx.rust_object_type));
+        PointerCast(bcx, llself_obj_ptr, T_ptr(cx.ccx.rust_object_type));
 
     // The 'llretptr' that will arrive in the backwarding function we're
     // creating also needs to be the correct type.  Cast it to the method's
@@ -646,7 +646,7 @@ fn process_bkwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
     let llretptr = fcx.llretptr;
     if ty::type_contains_params(cx.ccx.tcx, m.output) {
         let llretty = type_of_inner(cx.ccx, sp, m.output);
-        llretptr = bld::PointerCast(bcx, llretptr, T_ptr(llretty));
+        llretptr = PointerCast(bcx, llretptr, T_ptr(llretty));
     }
 
     // Get the index of the method we want.
@@ -666,12 +666,12 @@ fn process_bkwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
     let vtbl_type = T_ptr(T_array(T_ptr(T_nil()), ix + 1u));
 
     let llouter_obj_vtbl =
-        bld::GEP(bcx, llself_obj_ptr, [C_int(0), C_int(abi::obj_field_vtbl)]);
-    llouter_obj_vtbl = bld::Load(bcx, llouter_obj_vtbl);
-    llouter_obj_vtbl = bld::PointerCast(bcx, llouter_obj_vtbl, vtbl_type);
+        GEP(bcx, llself_obj_ptr, [C_int(0), C_int(abi::obj_field_vtbl)]);
+    llouter_obj_vtbl = Load(bcx, llouter_obj_vtbl);
+    llouter_obj_vtbl = PointerCast(bcx, llouter_obj_vtbl, vtbl_type);
 
     let llouter_mthd =
-        bld::GEP(bcx, llouter_obj_vtbl, [C_int(0), C_int(ix as int)]);
+        GEP(bcx, llouter_obj_vtbl, [C_int(0), C_int(ix as int)]);
 
     // Set up the outer method to be called.
     let outer_mthd_ty = ty::method_ty_to_fn_ty(cx.ccx.tcx, *m);
@@ -681,8 +681,8 @@ fn process_bkwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
                         m.inputs, m.output,
                         std::vec::len::<ast::ty_param>(ty_params));
     llouter_mthd =
-        bld::PointerCast(bcx, llouter_mthd, T_ptr(T_ptr(llouter_mthd_ty)));
-    llouter_mthd = bld::Load(bcx, llouter_mthd);
+        PointerCast(bcx, llouter_mthd, T_ptr(T_ptr(llouter_mthd_ty)));
+    llouter_mthd = Load(bcx, llouter_mthd);
 
     // Set up the three implicit arguments to the outer method we'll need to
     // call.
@@ -703,7 +703,7 @@ fn process_bkwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
     }
 
     // And, finally, call the outer method.
-    bld::FastCall(bcx, llouter_mthd, llouter_mthd_args);
+    FastCall(bcx, llouter_mthd, llouter_mthd_args);
 
     build_return(bcx);
     finish_fn(fcx, lltop);
@@ -765,7 +765,7 @@ fn process_fwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
     let llretptr = fcx.llretptr;
     if ty::type_contains_params(cx.ccx.tcx, m.output) {
         let llretty = type_of_inner(cx.ccx, sp, m.output);
-        llretptr = bld::PointerCast(bcx, llretptr, T_ptr(llretty));
+        llretptr = PointerCast(bcx, llretptr, T_ptr(llretty));
     }
 
     // Now, we have to get the the inner_obj's vtbl out of the self_obj.  This
@@ -774,16 +774,16 @@ fn process_fwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
     // First, grab the box out of the self_obj.  It contains a refcount and a
     // body.
     let llself_obj_box =
-        bld::GEP(bcx, llself_obj_ptr, [C_int(0), C_int(abi::obj_field_box)]);
-    llself_obj_box = bld::Load(bcx, llself_obj_box);
+        GEP(bcx, llself_obj_ptr, [C_int(0), C_int(abi::obj_field_box)]);
+    llself_obj_box = Load(bcx, llself_obj_box);
 
     let ccx = bcx_ccx(bcx);
     let llbox_ty = T_opaque_obj_ptr(*ccx);
-    llself_obj_box = bld::PointerCast(bcx, llself_obj_box, llbox_ty);
+    llself_obj_box = PointerCast(bcx, llself_obj_box, llbox_ty);
 
     // Now, reach into the box and grab the body.
     let llself_obj_body =
-        bld::GEP(bcx, llself_obj_box,
+        GEP(bcx, llself_obj_box,
                       [C_int(0), C_int(abi::box_rc_field_body)]);
 
     // Now, we need to figure out exactly what type the body is supposed to be
@@ -793,7 +793,7 @@ fn process_fwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
                                 some(inner_obj_ty));
     // And cast to that type.
     llself_obj_body =
-        bld::PointerCast(bcx, llself_obj_body,
+        PointerCast(bcx, llself_obj_body,
                               T_ptr(type_of(cx.ccx, sp, body_ty)));
 
     // Now, reach into the body and grab the inner_obj.
@@ -807,13 +807,13 @@ fn process_fwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
     // method's entry out of the vtable so that the forwarding function can
     // call it.
     let llinner_obj_vtbl =
-        bld::GEP(bcx, llinner_obj.val,
+        GEP(bcx, llinner_obj.val,
                       [C_int(0), C_int(abi::obj_field_vtbl)]);
-    llinner_obj_vtbl = bld::Load(bcx, llinner_obj_vtbl);
+    llinner_obj_vtbl = Load(bcx, llinner_obj_vtbl);
 
     let llinner_obj_body =
-        bld::GEP(bcx, llinner_obj.val, [C_int(0), C_int(abi::obj_field_box)]);
-    llinner_obj_body = bld::Load(bcx, llinner_obj_body);
+        GEP(bcx, llinner_obj.val, [C_int(0), C_int(abi::obj_field_box)]);
+    llinner_obj_body = Load(bcx, llinner_obj_body);
 
     // Get the index of the method we want.
     let ix: uint = 0u;
@@ -830,10 +830,10 @@ fn process_fwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
 
     // Pick out the original method from the vtable.
     let vtbl_type = T_ptr(T_array(T_ptr(T_nil()), ix + 1u));
-    llinner_obj_vtbl = bld::PointerCast(bcx, llinner_obj_vtbl, vtbl_type);
+    llinner_obj_vtbl = PointerCast(bcx, llinner_obj_vtbl, vtbl_type);
 
     let llorig_mthd =
-        bld::GEP(bcx, llinner_obj_vtbl, [C_int(0), C_int(ix as int)]);
+        GEP(bcx, llinner_obj_vtbl, [C_int(0), C_int(ix as int)]);
 
     // Set up the original method to be called.
     let orig_mthd_ty = ty::method_ty_to_fn_ty(cx.ccx.tcx, *m);
@@ -843,8 +843,8 @@ fn process_fwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
                         m.inputs, m.output,
                         std::vec::len::<ast::ty_param>(ty_params));
     llorig_mthd =
-        bld::PointerCast(bcx, llorig_mthd, T_ptr(T_ptr(llorig_mthd_ty)));
-    llorig_mthd = bld::Load(bcx, llorig_mthd);
+        PointerCast(bcx, llorig_mthd, T_ptr(T_ptr(llorig_mthd_ty)));
+    llorig_mthd = Load(bcx, llorig_mthd);
 
     // Set up the self-stack.
     let self_stack =
@@ -857,7 +857,7 @@ fn process_fwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
 
     // Cast self_stack back to pointer-to-object-type to make LLVM happy.
     self_stack =
-        bld::PointerCast(bcx, self_stack, T_ptr(cx.ccx.rust_object_type));
+        PointerCast(bcx, self_stack, T_ptr(cx.ccx.rust_object_type));
 
     // Set up the three implicit arguments to the original method we'll need
     // to call.
@@ -877,7 +877,7 @@ fn process_fwding_mthd(cx: @local_ctxt, sp: &span, m: @ty::method,
     }
 
     // And, finally, call the original (inner) method.
-    bld::FastCall(bcx, llorig_mthd, llorig_mthd_args);
+    FastCall(bcx, llorig_mthd, llorig_mthd_args);
 
     build_return(bcx);
     finish_fn(fcx, lltop);
@@ -955,19 +955,19 @@ fn populate_self_stack(bcx: @block_ctxt, self_stack: ValueRef,
                        inner_obj_body: ValueRef) -> ValueRef {
 
     // Drop the outer obj into the second slot.
-    let self_pair_ptr = bld::GEP(bcx, self_stack, [C_int(0), C_int(1)]);
-    bld::Store(bcx, outer_obj, self_pair_ptr);
+    let self_pair_ptr = GEP(bcx, self_stack, [C_int(0), C_int(1)]);
+    Store(bcx, outer_obj, self_pair_ptr);
 
     // Drop in the backwarding vtbl.
-    let wrapper_pair = bld::GEP(bcx, self_stack, [C_int(0), C_int(0)]);
-    let wrapper_vtbl_ptr = bld::GEP(bcx, wrapper_pair, [C_int(0), C_int(0)]);
+    let wrapper_pair = GEP(bcx, self_stack, [C_int(0), C_int(0)]);
+    let wrapper_vtbl_ptr = GEP(bcx, wrapper_pair, [C_int(0), C_int(0)]);
     let backwarding_vtbl_cast =
-        bld::PointerCast(bcx, backwarding_vtbl, T_ptr(T_empty_struct()));
-    bld::Store(bcx, backwarding_vtbl_cast, wrapper_vtbl_ptr);
+        PointerCast(bcx, backwarding_vtbl, T_ptr(T_empty_struct()));
+    Store(bcx, backwarding_vtbl_cast, wrapper_vtbl_ptr);
 
     // Drop in the inner obj body.
-    let wrapper_body_ptr = bld::GEP(bcx, wrapper_pair, [C_int(0), C_int(1)]);
-    bld::Store(bcx, inner_obj_body, wrapper_body_ptr);
+    let wrapper_body_ptr = GEP(bcx, wrapper_pair, [C_int(0), C_int(1)]);
+    Store(bcx, inner_obj_body, wrapper_body_ptr);
 
     ret self_stack;
 }