diff options
| author | Patrick Walton <pcwalton@mimiga.net> | 2014-03-05 15:28:08 -0800 |
|---|---|---|
| committer | Huon Wilson <dbau.pp+github@gmail.com> | 2014-03-21 23:37:21 +1100 |
| commit | af79a5aa7da4f42fc0939a19f46fa73b894d6e9a (patch) | |
| tree | 60c2e72eea83a8a4c70c76d6fe91967aeaf77632 /src/libsync | |
| parent | 579eb2400b3cb5d9cf03a5c8792d63630489193a (diff) | |
| download | rust-af79a5aa7da4f42fc0939a19f46fa73b894d6e9a.tar.gz rust-af79a5aa7da4f42fc0939a19f46fa73b894d6e9a.zip | |
test: Make manual changes to deal with the fallout from removal of
`~[T]` in test, libgetopts, compiletest, librustdoc, and libnum.
Diffstat (limited to 'src/libsync')
| -rw-r--r-- | src/libsync/arc.rs | 15 | ||||
| -rw-r--r-- | src/libsync/sync/mod.rs | 25 | ||||
| -rw-r--r-- | src/libsync/task_pool.rs | 12 |
3 files changed, 28 insertions, 24 deletions
diff --git a/src/libsync/arc.rs b/src/libsync/arc.rs index 71adab71734..513ca799997 100644 --- a/src/libsync/arc.rs +++ b/src/libsync/arc.rs @@ -583,25 +583,26 @@ mod tests { use super::{Arc, RWArc, MutexArc, CowArc}; use std::task; + use std::vec_ng::Vec; #[test] fn manually_share_arc() { - let v = ~[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + let v = vec!(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); let arc_v = Arc::new(v); let (tx, rx) = channel(); task::spawn(proc() { - let arc_v: Arc<~[int]> = rx.recv(); + let arc_v: Arc<Vec<int>> = rx.recv(); let v = arc_v.get().clone(); - assert_eq!(v[3], 4); + assert_eq!(*v.get(3), 4); }); tx.send(arc_v.clone()); - assert_eq!(arc_v.get()[2], 3); - assert_eq!(arc_v.get()[4], 5); + assert_eq!(*arc_v.get().get(2), 3); + assert_eq!(*arc_v.get().get(4), 5); info!("{:?}", arc_v); } @@ -803,7 +804,7 @@ mod tests { }); // Readers try to catch the writer in the act - let mut children = ~[]; + let mut children = Vec::new(); for _ in range(0, 5) { let arc3 = arc.clone(); let mut builder = task::task(); @@ -857,7 +858,7 @@ mod tests { let arc = RWArc::new(0); // Reader tasks - let mut reader_convos = ~[]; + let mut reader_convos = Vec::new(); for _ in range(0, 10) { let ((tx1, rx1), (tx2, rx2)) = (channel(), channel()); reader_convos.push((tx1, rx2)); diff --git a/src/libsync/sync/mod.rs b/src/libsync/sync/mod.rs index 3bb60046b03..c50eeae18d2 100644 --- a/src/libsync/sync/mod.rs +++ b/src/libsync/sync/mod.rs @@ -161,10 +161,10 @@ impl<Q:Send> Sem<Q> { } #[doc(hidden)] -impl Sem<~[WaitQueue]> { +impl Sem<Vec<WaitQueue> > { fn new_and_signal(count: int, num_condvars: uint) - -> Sem<~[WaitQueue]> { - let mut queues = ~[]; + -> Sem<Vec<WaitQueue> > { + let mut queues = Vec::new(); for _ in range(0, num_condvars) { queues.push(WaitQueue::new()); } Sem::new(count, queues) } @@ -182,7 +182,7 @@ enum ReacquireOrderLock<'a> { pub struct Condvar<'a> { // The 'Sem' object associated with this condvar. This is the one that's // atomically-unlocked-and-descheduled upon and reacquired during wakeup. - priv sem: &'a Sem<~[WaitQueue]>, + priv sem: &'a Sem<Vec<WaitQueue> >, // This is (can be) an extra semaphore which is held around the reacquire // operation on the first one. This is only used in cvars associated with // rwlocks, and is needed to ensure that, when a downgrader is trying to @@ -230,7 +230,7 @@ impl<'a> Condvar<'a> { } // Create waiter nobe, and enqueue ourself to // be woken up by a signaller. - wait_end = Some(state.blocked[condvar_id].wait_end()); + wait_end = Some(state.blocked.get(condvar_id).wait_end()); } else { out_of_bounds = Some(state.blocked.len()); } @@ -265,7 +265,7 @@ impl<'a> Condvar<'a> { let mut result = false; self.sem.with(|state| { if condvar_id < state.blocked.len() { - result = state.blocked[condvar_id].signal(); + result = state.blocked.get(condvar_id).signal(); } else { out_of_bounds = Some(state.blocked.len()); } @@ -290,7 +290,7 @@ impl<'a> Condvar<'a> { // To avoid :broadcast_heavy, we make a new waitqueue, // swap it out with the old one, and broadcast on the // old one outside of the little-lock. - queue = Some(replace(&mut state.blocked[condvar_id], + queue = Some(replace(state.blocked.get_mut(condvar_id), WaitQueue::new())); } else { out_of_bounds = Some(state.blocked.len()); @@ -326,7 +326,7 @@ fn check_cvar_bounds<U>( } #[doc(hidden)] -impl Sem<~[WaitQueue]> { +impl Sem<Vec<WaitQueue> > { // The only other places that condvars get built are rwlock.write_cond() // and rwlock_write_mode. pub fn access_cond<U>(&self, blk: |c: &Condvar| -> U) -> U { @@ -391,7 +391,7 @@ impl Semaphore { * unwinds. */ -pub struct Mutex { priv sem: Sem<~[WaitQueue]> } +pub struct Mutex { priv sem: Sem<Vec<WaitQueue> > } impl Clone for Mutex { /// Create a new handle to the mutex. fn clone(&self) -> Mutex { @@ -461,7 +461,7 @@ struct RWLockInner { */ pub struct RWLock { priv order_lock: Semaphore, - priv access_lock: Sem<~[WaitQueue]>, + priv access_lock: Sem<Vec<WaitQueue> >, priv state: UnsafeArc<RWLockInner>, } @@ -765,6 +765,7 @@ mod tests { use std::result; use std::task; use std::comm::Empty; + use std::vec_ng::Vec; /************************************************************************ * Semaphore tests @@ -931,7 +932,7 @@ mod tests { #[cfg(test)] fn test_mutex_cond_broadcast_helper(num_waiters: uint) { let m = Mutex::new(); - let mut rxs = ~[]; + let mut rxs = vec!(); for _ in range(0, num_waiters) { let mi = m.clone(); @@ -1200,7 +1201,7 @@ mod tests { } } let x = RWLock::new(); - let mut rxs = ~[]; + let mut rxs = vec!(); for _ in range(0, num_waiters) { let xi = x.clone(); diff --git a/src/libsync/task_pool.rs b/src/libsync/task_pool.rs index 7670e9cf50a..709dafd5b93 100644 --- a/src/libsync/task_pool.rs +++ b/src/libsync/task_pool.rs @@ -14,7 +14,6 @@ /// parallelism. use std::task; -use std::slice; enum Msg<T> { Execute(proc(&T)), @@ -22,7 +21,7 @@ enum Msg<T> { } pub struct TaskPool<T> { - priv channels: ~[Sender<Msg<T>>], + priv channels: Vec<Sender<Msg<T>>>, priv next_index: uint, } @@ -46,7 +45,7 @@ impl<T> TaskPool<T> { -> TaskPool<T> { assert!(n_tasks >= 1); - let channels = slice::from_fn(n_tasks, |i| { + let channels = Vec::from_fn(n_tasks, |i| { let (tx, rx) = channel::<Msg<T>>(); let init_fn = init_fn_factory(); @@ -66,13 +65,16 @@ impl<T> TaskPool<T> { tx }); - return TaskPool { channels: channels, next_index: 0 }; + return TaskPool { + channels: channels, + next_index: 0, + }; } /// Executes the function `f` on a task in the pool. The function /// receives a reference to the local data returned by the `init_fn`. pub fn execute(&mut self, f: proc(&T)) { - self.channels[self.next_index].send(Execute(f)); + self.channels.get(self.next_index).send(Execute(f)); self.next_index += 1; if self.next_index == self.channels.len() { self.next_index = 0; } } |
