diff options
| author | Alex Crichton <alex@alexcrichton.com> | 2013-04-23 19:33:33 -0400 |
|---|---|---|
| committer | Alex Crichton <alex@alexcrichton.com> | 2013-04-23 19:59:13 -0400 |
| commit | 4c08a8d6c31f55bfeed0ef0c2bf8b91f90415cfe (patch) | |
| tree | a21a928f49a8acf805d87b3e7d11a43b342d3cdd | |
| parent | c089a17854669925c008a5944d0490f1692dde7e (diff) | |
| download | rust-4c08a8d6c31f55bfeed0ef0c2bf8b91f90415cfe.tar.gz rust-4c08a8d6c31f55bfeed0ef0c2bf8b91f90415cfe.zip | |
Removing more unnecessary unsafe blocks throughout
| -rw-r--r-- | src/libcore/gc.rs | 102 | ||||
| -rw-r--r-- | src/libcore/pipes.rs | 4 | ||||
| -rw-r--r-- | src/libcore/rt/sched/local_sched.rs | 6 | ||||
| -rw-r--r-- | src/libcore/rt/uvll.rs | 4 | ||||
| -rw-r--r-- | src/libcore/unstable.rs | 20 | ||||
| -rw-r--r-- | src/libcore/unstable/weak_task.rs | 4 | ||||
| -rw-r--r-- | src/librustc/middle/trans/base.rs | 6 | ||||
| -rw-r--r-- | src/libstd/arc.rs | 34 |
8 files changed, 83 insertions, 97 deletions
diff --git a/src/libcore/gc.rs b/src/libcore/gc.rs index 71d9ab439f3..a6bae3c7663 100644 --- a/src/libcore/gc.rs +++ b/src/libcore/gc.rs @@ -231,66 +231,64 @@ unsafe fn walk_gc_roots(mem: Memory, sentinel: **Word, visitor: Visitor) { // the stack. let mut reached_sentinel = ptr::is_null(sentinel); for stackwalk::walk_stack |frame| { - unsafe { - let pc = last_ret; - let Segment {segment: next_segment, boundary: boundary} = - find_segment_for_frame(frame.fp, segment); - segment = next_segment; - // Each stack segment is bounded by a morestack frame. The - // morestack frame includes two return addresses, one for - // morestack itself, at the normal offset from the frame - // pointer, and then a second return address for the - // function prologue (which called morestack after - // determining that it had hit the end of the stack). - // Since morestack itself takes two parameters, the offset - // for this second return address is 3 greater than the - // return address for morestack. - let ret_offset = if boundary { 4 } else { 1 }; - last_ret = *ptr::offset(frame.fp, ret_offset) as *Word; - - if ptr::is_null(pc) { - loop; - } + let pc = last_ret; + let Segment {segment: next_segment, boundary: boundary} = + find_segment_for_frame(frame.fp, segment); + segment = next_segment; + // Each stack segment is bounded by a morestack frame. The + // morestack frame includes two return addresses, one for + // morestack itself, at the normal offset from the frame + // pointer, and then a second return address for the + // function prologue (which called morestack after + // determining that it had hit the end of the stack). + // Since morestack itself takes two parameters, the offset + // for this second return address is 3 greater than the + // return address for morestack. + let ret_offset = if boundary { 4 } else { 1 }; + last_ret = *ptr::offset(frame.fp, ret_offset) as *Word; + + if ptr::is_null(pc) { + loop; + } - let mut delay_reached_sentinel = reached_sentinel; - let sp = is_safe_point(pc); - match sp { - Some(sp_info) => { - for walk_safe_point(frame.fp, sp_info) |root, tydesc| { - // Skip roots until we see the sentinel. - if !reached_sentinel { - if root == sentinel { - delay_reached_sentinel = true; - } - loop; + let mut delay_reached_sentinel = reached_sentinel; + let sp = is_safe_point(pc); + match sp { + Some(sp_info) => { + for walk_safe_point(frame.fp, sp_info) |root, tydesc| { + // Skip roots until we see the sentinel. + if !reached_sentinel { + if root == sentinel { + delay_reached_sentinel = true; } + loop; + } - // Skip null pointers, which can occur when a - // unique pointer has already been freed. - if ptr::is_null(*root) { - loop; - } + // Skip null pointers, which can occur when a + // unique pointer has already been freed. + if ptr::is_null(*root) { + loop; + } - if ptr::is_null(tydesc) { - // Root is a generic box. - let refcount = **root; - if mem | task_local_heap != 0 && refcount != -1 { - if !visitor(root, tydesc) { return; } - } else if mem | exchange_heap != 0 && refcount == -1 { - if !visitor(root, tydesc) { return; } - } - } else { - // Root is a non-immediate. - if mem | stack != 0 { - if !visitor(root, tydesc) { return; } - } + if ptr::is_null(tydesc) { + // Root is a generic box. + let refcount = **root; + if mem | task_local_heap != 0 && refcount != -1 { + if !visitor(root, tydesc) { return; } + } else if mem | exchange_heap != 0 && refcount == -1 { + if !visitor(root, tydesc) { return; } + } + } else { + // Root is a non-immediate. + if mem | stack != 0 { + if !visitor(root, tydesc) { return; } } } - } - None => () } - reached_sentinel = delay_reached_sentinel; + } + None => () } + reached_sentinel = delay_reached_sentinel; } } diff --git a/src/libcore/pipes.rs b/src/libcore/pipes.rs index 36cfdbf5617..2ec3afca612 100644 --- a/src/libcore/pipes.rs +++ b/src/libcore/pipes.rs @@ -156,9 +156,7 @@ pub impl PacketHeader { unsafe fn unblock(&self) { let old_task = swap_task(&mut self.blocked_task, ptr::null()); if !old_task.is_null() { - unsafe { - rustrt::rust_task_deref(old_task) - } + rustrt::rust_task_deref(old_task) } match swap_state_acq(&mut self.state, Empty) { Empty | Blocked => (), diff --git a/src/libcore/rt/sched/local_sched.rs b/src/libcore/rt/sched/local_sched.rs index 77fbadf0bb7..2d1e06163be 100644 --- a/src/libcore/rt/sched/local_sched.rs +++ b/src/libcore/rt/sched/local_sched.rs @@ -80,10 +80,8 @@ pub unsafe fn unsafe_borrow() -> &mut Scheduler { } pub unsafe fn unsafe_borrow_io() -> &mut IoFactoryObject { - unsafe { - let sched = unsafe_borrow(); - return sched.event_loop.io().unwrap(); - } + let sched = unsafe_borrow(); + return sched.event_loop.io().unwrap(); } fn tls_key() -> tls::Key { diff --git a/src/libcore/rt/uvll.rs b/src/libcore/rt/uvll.rs index 640a69743ba..b7eff217ff8 100644 --- a/src/libcore/rt/uvll.rs +++ b/src/libcore/rt/uvll.rs @@ -98,7 +98,7 @@ pub enum uv_req_type { pub unsafe fn malloc_handle(handle: uv_handle_type) -> *c_void { assert!(handle != UV_UNKNOWN_HANDLE && handle != UV_HANDLE_TYPE_MAX); - let size = unsafe { rust_uv_handle_size(handle as uint) }; + let size = rust_uv_handle_size(handle as uint); let p = malloc(size); assert!(p.is_not_null()); return p; @@ -110,7 +110,7 @@ pub unsafe fn free_handle(v: *c_void) { pub unsafe fn malloc_req(req: uv_req_type) -> *c_void { assert!(req != UV_UNKNOWN_REQ && req != UV_REQ_TYPE_MAX); - let size = unsafe { rust_uv_req_size(req as uint) }; + let size = rust_uv_req_size(req as uint); let p = malloc(size); assert!(p.is_not_null()); return p; diff --git a/src/libcore/unstable.rs b/src/libcore/unstable.rs index a6bb93c20cd..4a69de26f6b 100644 --- a/src/libcore/unstable.rs +++ b/src/libcore/unstable.rs @@ -262,18 +262,16 @@ pub impl<T:Owned> Exclusive<T> { // the exclusive. Supporting that is a work in progress. #[inline(always)] unsafe fn with<U>(&self, f: &fn(x: &mut T) -> U) -> U { - unsafe { - let rec = get_shared_mutable_state(&self.x); - do (*rec).lock.lock { - if (*rec).failed { - fail!( - ~"Poisoned exclusive - another task failed inside!"); - } - (*rec).failed = true; - let result = f(&mut (*rec).data); - (*rec).failed = false; - result + let rec = get_shared_mutable_state(&self.x); + do (*rec).lock.lock { + if (*rec).failed { + fail!( + ~"Poisoned exclusive - another task failed inside!"); } + (*rec).failed = true; + let result = f(&mut (*rec).data); + (*rec).failed = false; + result } } diff --git a/src/libcore/unstable/weak_task.rs b/src/libcore/unstable/weak_task.rs index 4e2174fd5d2..7a30bb92111 100644 --- a/src/libcore/unstable/weak_task.rs +++ b/src/libcore/unstable/weak_task.rs @@ -43,11 +43,11 @@ pub unsafe fn weaken_task(f: &fn(Port<ShutdownMsg>)) { let task = get_task_id(); // Expect the weak task service to be alive assert!(service.try_send(RegisterWeakTask(task, shutdown_chan))); - unsafe { rust_dec_kernel_live_count(); } + rust_dec_kernel_live_count(); do (|| { f(shutdown_port.take()) }).finally || { - unsafe { rust_inc_kernel_live_count(); } + rust_inc_kernel_live_count(); // Service my have already exited service.send(UnregisterWeakTask(task)); } diff --git a/src/librustc/middle/trans/base.rs b/src/librustc/middle/trans/base.rs index b86e9a51293..68405f4fc5f 100644 --- a/src/librustc/middle/trans/base.rs +++ b/src/librustc/middle/trans/base.rs @@ -2628,13 +2628,11 @@ pub fn get_item_val(ccx: @CrateContext, id: ast::node_id) -> ValueRef { let class_ty = ty::lookup_item_type(tcx, parent_id).ty; // This code shouldn't be reached if the class is generic assert!(!ty::type_has_params(class_ty)); - let lldty = unsafe { - T_fn(~[ + let lldty = T_fn(~[ T_ptr(T_i8()), T_ptr(type_of(ccx, class_ty)) ], - T_nil()) - }; + T_nil()); let s = get_dtor_symbol(ccx, /*bad*/copy *pt, dt.node.id, None); /* Make the declaration for the dtor */ diff --git a/src/libstd/arc.rs b/src/libstd/arc.rs index 8abe0262314..33aa6171de4 100644 --- a/src/libstd/arc.rs +++ b/src/libstd/arc.rs @@ -177,15 +177,13 @@ pub impl<T:Owned> MutexARC<T> { */ #[inline(always)] unsafe fn access<U>(&self, blk: &fn(x: &mut T) -> U) -> U { - unsafe { - let state = get_shared_mutable_state(&self.x); - // Borrowck would complain about this if the function were - // not already unsafe. See borrow_rwlock, far below. - do (&(*state).lock).lock { - check_poison(true, (*state).failed); - let _z = PoisonOnFail(&mut (*state).failed); - blk(&mut (*state).data) - } + let state = get_shared_mutable_state(&self.x); + // Borrowck would complain about this if the function were + // not already unsafe. See borrow_rwlock, far below. + do (&(*state).lock).lock { + check_poison(true, (*state).failed); + let _z = PoisonOnFail(&mut (*state).failed); + blk(&mut (*state).data) } } @@ -195,16 +193,14 @@ pub impl<T:Owned> MutexARC<T> { &self, blk: &fn(x: &'x mut T, c: &'c Condvar) -> U) -> U { - unsafe { - let state = get_shared_mutable_state(&self.x); - do (&(*state).lock).lock_cond |cond| { - check_poison(true, (*state).failed); - let _z = PoisonOnFail(&mut (*state).failed); - blk(&mut (*state).data, - &Condvar {is_mutex: true, - failed: &mut (*state).failed, - cond: cond }) - } + let state = get_shared_mutable_state(&self.x); + do (&(*state).lock).lock_cond |cond| { + check_poison(true, (*state).failed); + let _z = PoisonOnFail(&mut (*state).failed); + blk(&mut (*state).data, + &Condvar {is_mutex: true, + failed: &mut (*state).failed, + cond: cond }) } } } |
