diff options
| author | Marijn Haverbeke <marijnh@gmail.com> | 2012-01-17 13:36:00 +0100 |
|---|---|---|
| committer | Marijn Haverbeke <marijnh@gmail.com> | 2012-01-17 14:04:55 +0100 |
| commit | 1c7a62c93b21ad1958db80b39bca14a09ea8b4bc (patch) | |
| tree | 6b3df97b7023bc3c5ac8b6ba51993c88d878bc96 /src/comp | |
| parent | e046360e690534229bd674ff2b1f61d324820230 (diff) | |
| download | rust-1c7a62c93b21ad1958db80b39bca14a09ea8b4bc.tar.gz rust-1c7a62c93b21ad1958db80b39bca14a09ea8b4bc.zip | |
Use a memset upcall to zero things without static alignment
This fixes issues #843 and #1546. The cost of an upcall is unfortunate, though. I assume there must be a way to simply manually compute the pointer or size, using something akin to the formula in `align_to` in `rust_util.h`. I could not get this to work, unfortunately.
Diffstat (limited to 'src/comp')
| -rw-r--r-- | src/comp/back/upcall.rs | 2 | ||||
| -rw-r--r-- | src/comp/middle/trans.rs | 32 |
2 files changed, 18 insertions, 16 deletions
diff --git a/src/comp/back/upcall.rs b/src/comp/back/upcall.rs index a6bf286a2a8..115cd53276d 100644 --- a/src/comp/back/upcall.rs +++ b/src/comp/back/upcall.rs @@ -16,6 +16,7 @@ type upcalls = free: ValueRef, shared_malloc: ValueRef, shared_free: ValueRef, + memset: ValueRef, mark: ValueRef, create_shared_type_desc: ValueRef, free_shared_type_desc: ValueRef, @@ -64,6 +65,7 @@ fn declare_upcalls(targ_cfg: @session::config, T_ptr(T_i8())), shared_free: dv("shared_free", [T_ptr(T_i8())]), + memset: dv("memset", [T_ptr(T_i8()), T_i8(), T_i32(), T_i32()]), mark: d("mark", [T_ptr(T_i8())], int_t), create_shared_type_desc: diff --git a/src/comp/middle/trans.rs b/src/comp/middle/trans.rs index ba7882331b1..2a029c22b3d 100644 --- a/src/comp/middle/trans.rs +++ b/src/comp/middle/trans.rs @@ -1942,21 +1942,21 @@ fn call_memmove(cx: @block_ctxt, dst: ValueRef, src: ValueRef, } fn call_bzero(cx: @block_ctxt, dst: ValueRef, n_bytes: ValueRef, - align_bytes: ValueRef) -> result { + align_bytes: ValueRef) -> @block_ctxt { // FIXME: switch to the 64-bit variant when on such a platform. - let ccx = bcx_ccx(cx); - let i = ccx.intrinsics; - assert (i.contains_key("llvm.memset.p0i8.i32")); - let memset = i.get("llvm.memset.p0i8.i32"); + let ccx = bcx_ccx(cx), dst = dst; let dst_ptr = PointerCast(cx, dst, T_ptr(T_i8())); let size = IntCast(cx, n_bytes, T_i32()); - let align = - if lib::llvm::llvm::LLVMIsConstant(align_bytes) == True { - IntCast(cx, align_bytes, T_i32()) - } else { IntCast(cx, C_int(ccx, 0), T_i32()) }; - let volatile = C_bool(false); - ret rslt(cx, - Call(cx, memset, [dst_ptr, C_u8(0u), size, align, volatile])); + let align = IntCast(cx, align_bytes, T_i32()); + if lib::llvm::llvm::LLVMIsConstant(align_bytes) != True { + // Use our own upcall (see issue 843), since the LLVM intrinsic can + // only handle constant alignments. + Call(cx, ccx.upcalls.memset, [dst_ptr, C_u8(0u), size, align]); + } else { + let memset = ccx.intrinsics.get("llvm.memset.p0i8.i32"); + Call(cx, memset, [dst_ptr, C_u8(0u), size, align, C_bool(false)]); + } + cx } fn memmove_ty(bcx: @block_ctxt, dst: ValueRef, src: ValueRef, t: ty::t) -> @@ -3977,13 +3977,13 @@ fn zero_alloca(cx: @block_ctxt, llptr: ValueRef, t: ty::t) let sp = cx.sp; let llty = type_of(ccx, sp, t); Store(bcx, C_null(llty), llptr); + bcx } else { - let llsz = size_of(bcx, t); + let {bcx, val: llsz} = size_of(bcx, t); // FIXME passing in the align here is correct, but causes issue #843 - // let llalign = align_of(llsz.bcx, t); - bcx = call_bzero(llsz.bcx, llptr, llsz.val, C_int(ccx, 0)).bcx; + let {bcx, val: align} = align_of(bcx, t); + call_bzero(bcx, llptr, llsz, align) } - ret bcx; } fn trans_stmt(cx: @block_ctxt, s: ast::stmt) -> @block_ctxt { |
