diff options
| author | Niko Matsakis <niko@alum.mit.edu> | 2013-02-26 14:34:00 -0500 |
|---|---|---|
| committer | Niko Matsakis <niko@alum.mit.edu> | 2013-03-06 15:12:57 -0500 |
| commit | 3168fe06ff69970be329583f560a3ccd9c00c874 (patch) | |
| tree | 1c84b080a887c23434f4049fd701413d7b2a49af /src/libstd | |
| parent | 876b6ba792f83f1b50d1356e7305f334b5ba2f05 (diff) | |
| download | rust-3168fe06ff69970be329583f560a3ccd9c00c874.tar.gz rust-3168fe06ff69970be329583f560a3ccd9c00c874.zip | |
Add manual &self/ and &static/ and /&self declarations that
are currently inferred. New rules are coming that will require them to be explicit. All add some explicit self declarations.
Diffstat (limited to 'src/libstd')
| -rw-r--r-- | src/libstd/arc.rs | 55 | ||||
| -rw-r--r-- | src/libstd/arena.rs | 16 | ||||
| -rw-r--r-- | src/libstd/base64.rs | 4 | ||||
| -rw-r--r-- | src/libstd/bigint.rs | 28 | ||||
| -rw-r--r-- | src/libstd/flatpipes.rs | 4 | ||||
| -rw-r--r-- | src/libstd/json.rs | 6 | ||||
| -rw-r--r-- | src/libstd/serialize.rs | 6 | ||||
| -rw-r--r-- | src/libstd/smallintmap.rs | 4 | ||||
| -rw-r--r-- | src/libstd/sort.rs | 11 | ||||
| -rw-r--r-- | src/libstd/stats.rs | 2 | ||||
| -rw-r--r-- | src/libstd/sync.rs | 97 | ||||
| -rw-r--r-- | src/libstd/treemap.rs | 13 | ||||
| -rw-r--r-- | src/libstd/workcache.rs | 27 |
13 files changed, 159 insertions, 114 deletions
diff --git a/src/libstd/arc.rs b/src/libstd/arc.rs index e29474f82ef..b9b39063667 100644 --- a/src/libstd/arc.rs +++ b/src/libstd/arc.rs @@ -25,12 +25,17 @@ use core::ptr; use core::task; /// As sync::condvar, a mechanism for unlock-and-descheduling and signalling. -pub struct Condvar { is_mutex: bool, failed: &mut bool, cond: &sync::Condvar } +pub struct Condvar { + is_mutex: bool, + failed: &self/mut bool, + cond: &self/sync::Condvar/&self +} -pub impl &Condvar { +pub impl Condvar/&self { /// Atomically exit the associated ARC and block until a signal is sent. #[inline(always)] - fn wait() { self.wait_on(0) } + fn wait(&self) { self.wait_on(0) } + /** * Atomically exit the associated ARC and block on a specified condvar * until a signal is sent on that same condvar (as sync::cond.wait_on). @@ -38,33 +43,37 @@ pub impl &Condvar { * wait() is equivalent to wait_on(0). */ #[inline(always)] - fn wait_on(condvar_id: uint) { + fn wait_on(&self, condvar_id: uint) { assert !*self.failed; self.cond.wait_on(condvar_id); // This is why we need to wrap sync::condvar. check_poison(self.is_mutex, *self.failed); } + /// Wake up a blocked task. Returns false if there was no blocked task. #[inline(always)] - fn signal() -> bool { self.signal_on(0) } + fn signal(&self) -> bool { self.signal_on(0) } + /** * Wake up a blocked task on a specified condvar (as * sync::cond.signal_on). Returns false if there was no blocked task. */ #[inline(always)] - fn signal_on(condvar_id: uint) -> bool { + fn signal_on(&self, condvar_id: uint) -> bool { assert !*self.failed; self.cond.signal_on(condvar_id) } + /// Wake up all blocked tasks. Returns the number of tasks woken. #[inline(always)] - fn broadcast() -> uint { self.broadcast_on(0) } + fn broadcast(&self) -> uint { self.broadcast_on(0) } + /** * Wake up all blocked tasks on a specified condvar (as * sync::cond.broadcast_on). Returns Returns the number of tasks woken. */ #[inline(always)] - fn broadcast_on(condvar_id: uint) -> uint { + fn broadcast_on(&self, condvar_id: uint) -> uint { assert !*self.failed; self.cond.broadcast_on(condvar_id) } @@ -141,7 +150,7 @@ impl<T:Owned> Clone for MutexARC<T> { } } -pub impl<T:Owned> &MutexARC<T> { +pub impl<T:Owned> MutexARC<T> { /** * Access the underlying mutable data with mutual exclusion from other @@ -167,7 +176,7 @@ pub impl<T:Owned> &MutexARC<T> { * blocked on the mutex) will also fail immediately. */ #[inline(always)] - unsafe fn access<U>(blk: fn(x: &mut T) -> U) -> U { + unsafe fn access<U>(&self, blk: fn(x: &mut T) -> U) -> U { unsafe { let state = get_shared_mutable_state(&self.x); // Borrowck would complain about this if the function were @@ -179,9 +188,13 @@ pub impl<T:Owned> &MutexARC<T> { } } } + /// As access(), but with a condvar, as sync::mutex.lock_cond(). #[inline(always)] - unsafe fn access_cond<U>(blk: fn(x: &x/mut T, c: &c/Condvar) -> U) -> U { + unsafe fn access_cond<U>( + &self, + blk: &fn(x: &x/mut T, c: &c/Condvar) -> U) -> U + { unsafe { let state = get_shared_mutable_state(&self.x); do (&(*state).lock).lock_cond |cond| { @@ -276,7 +289,7 @@ pub impl<T:Const + Owned> RWARC<T> { } -pub impl<T:Const + Owned> &RWARC<T> { +pub impl<T:Const + Owned> RWARC<T> { /** * Access the underlying data mutably. Locks the rwlock in write mode; * other readers and writers will block. @@ -288,7 +301,7 @@ pub impl<T:Const + Owned> &RWARC<T> { * poison the ARC, so subsequent readers and writers will both also fail. */ #[inline(always)] - fn write<U>(blk: fn(x: &mut T) -> U) -> U { + fn write<U>(&self, blk: fn(x: &mut T) -> U) -> U { unsafe { let state = get_shared_mutable_state(&self.x); do (*borrow_rwlock(state)).write { @@ -300,7 +313,7 @@ pub impl<T:Const + Owned> &RWARC<T> { } /// As write(), but with a condvar, as sync::rwlock.write_cond(). #[inline(always)] - fn write_cond<U>(blk: fn(x: &x/mut T, c: &c/Condvar) -> U) -> U { + fn write_cond<U>(&self, blk: fn(x: &x/mut T, c: &c/Condvar) -> U) -> U { unsafe { let state = get_shared_mutable_state(&self.x); do (*borrow_rwlock(state)).write_cond |cond| { @@ -389,13 +402,13 @@ fn borrow_rwlock<T:Const + Owned>(state: *const RWARCInner<T>) -> *RWlock { /// The "write permission" token used for RWARC.write_downgrade(). pub enum RWWriteMode<T> = - (&mut T, sync::RWlockWriteMode, PoisonOnFail); + (&self/mut T, sync::RWlockWriteMode/&self, PoisonOnFail); /// The "read permission" token used for RWARC.write_downgrade(). -pub enum RWReadMode<T> = (&T, sync::RWlockReadMode); +pub enum RWReadMode<T> = (&self/T, sync::RWlockReadMode/&self); -pub impl<T:Const + Owned> &RWWriteMode<T> { +pub impl<T:Const + Owned> RWWriteMode/&self<T> { /// Access the pre-downgrade RWARC in write mode. - fn write<U>(blk: fn(x: &mut T) -> U) -> U { + fn write<U>(&self, blk: fn(x: &mut T) -> U) -> U { match *self { RWWriteMode((ref data, ref token, _)) => { do token.write { @@ -405,7 +418,7 @@ pub impl<T:Const + Owned> &RWWriteMode<T> { } } /// Access the pre-downgrade RWARC in write mode with a condvar. - fn write_cond<U>(blk: fn(x: &x/mut T, c: &c/Condvar) -> U) -> U { + fn write_cond<U>(&self, blk: fn(x: &x/mut T, c: &c/Condvar) -> U) -> U { match *self { RWWriteMode((ref data, ref token, ref poison)) => { do token.write_cond |cond| { @@ -423,9 +436,9 @@ pub impl<T:Const + Owned> &RWWriteMode<T> { } } -pub impl<T:Const + Owned> &RWReadMode<T> { +pub impl<T:Const + Owned> RWReadMode/&self<T> { /// Access the post-downgrade rwlock in read mode. - fn read<U>(blk: fn(x: &T) -> U) -> U { + fn read<U>(&self, blk: fn(x: &T) -> U) -> U { match *self { RWReadMode((data, ref token)) => { do token.read { blk(data) } diff --git a/src/libstd/arena.rs b/src/libstd/arena.rs index c7b50bf8908..7bbd5cd41a3 100644 --- a/src/libstd/arena.rs +++ b/src/libstd/arena.rs @@ -160,9 +160,9 @@ unsafe fn un_bitpack_tydesc_ptr(p: uint) -> (*TypeDesc, bool) { (reinterpret_cast(&(p & !1)), p & 1 == 1) } -pub impl &Arena { +pub impl Arena { // Functions for the POD part of the arena - fn alloc_pod_grow(n_bytes: uint, align: uint) -> *u8 { + fn alloc_pod_grow(&self, n_bytes: uint, align: uint) -> *u8 { // Allocate a new chunk. let chunk_size = at_vec::capacity(self.pod_head.data); let new_min_chunk_size = uint::max(n_bytes, chunk_size); @@ -174,7 +174,7 @@ pub impl &Arena { } #[inline(always)] - fn alloc_pod_inner(n_bytes: uint, align: uint) -> *u8 { + fn alloc_pod_inner(&self, n_bytes: uint, align: uint) -> *u8 { let head = &mut self.pod_head; let start = round_up_to(head.fill, align); @@ -193,7 +193,7 @@ pub impl &Arena { } #[inline(always)] - fn alloc_pod<T>(op: fn() -> T) -> &self/T { + fn alloc_pod<T>(&self, op: fn() -> T) -> &self/T { unsafe { let tydesc = sys::get_type_desc::<T>(); let ptr = self.alloc_pod_inner((*tydesc).size, (*tydesc).align); @@ -204,7 +204,7 @@ pub impl &Arena { } // Functions for the non-POD part of the arena - fn alloc_nonpod_grow(n_bytes: uint, align: uint) -> (*u8, *u8) { + fn alloc_nonpod_grow(&self, n_bytes: uint, align: uint) -> (*u8, *u8) { // Allocate a new chunk. let chunk_size = at_vec::capacity(self.head.data); let new_min_chunk_size = uint::max(n_bytes, chunk_size); @@ -216,7 +216,7 @@ pub impl &Arena { } #[inline(always)] - fn alloc_nonpod_inner(n_bytes: uint, align: uint) -> (*u8, *u8) { + fn alloc_nonpod_inner(&self, n_bytes: uint, align: uint) -> (*u8, *u8) { let head = &mut self.head; let tydesc_start = head.fill; @@ -238,7 +238,7 @@ pub impl &Arena { } #[inline(always)] - fn alloc_nonpod<T>(op: fn() -> T) -> &self/T { + fn alloc_nonpod<T>(&self, op: fn() -> T) -> &self/T { unsafe { let tydesc = sys::get_type_desc::<T>(); let (ty_ptr, ptr) = @@ -260,7 +260,7 @@ pub impl &Arena { // The external interface #[inline(always)] - fn alloc<T>(op: fn() -> T) -> &self/T { + fn alloc<T>(&self, op: fn() -> T) -> &self/T { unsafe { if !rusti::needs_drop::<T>() { self.alloc_pod(op) diff --git a/src/libstd/base64.rs b/src/libstd/base64.rs index 17b3cda0713..dceb39312da 100644 --- a/src/libstd/base64.rs +++ b/src/libstd/base64.rs @@ -16,7 +16,7 @@ pub trait ToBase64 { pure fn to_base64() -> ~str; } -impl ToBase64 for &[u8] { +impl ToBase64 for &self/[u8] { pure fn to_base64() -> ~str { let chars = str::chars( ~"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" @@ -69,7 +69,7 @@ impl ToBase64 for &[u8] { } } -impl ToBase64 for &str { +impl ToBase64 for &self/str { pure fn to_base64() -> ~str { str::to_bytes(self).to_base64() } diff --git a/src/libstd/bigint.rs b/src/libstd/bigint.rs index e8836c58662..a3109c00c01 100644 --- a/src/libstd/bigint.rs +++ b/src/libstd/bigint.rs @@ -1045,7 +1045,9 @@ mod biguint_tests { assert BigUint::new(~[0, 0, -1]).to_uint() == uint::max_value; } - const sum_triples: &[(&[BigDigit], &[BigDigit], &[BigDigit])] = &[ + const sum_triples: &static/[(&static/[BigDigit], + &static/[BigDigit], + &static/[BigDigit])] = &[ (&[], &[], &[]), (&[], &[ 1], &[ 1]), (&[ 1], &[ 1], &[ 2]), @@ -1083,7 +1085,9 @@ mod biguint_tests { } } - const mul_triples: &[(&[BigDigit], &[BigDigit], &[BigDigit])] = &[ + const mul_triples: &static/[(&static/[BigDigit], + &static/[BigDigit], + &static/[BigDigit])] = &[ (&[], &[], &[]), (&[], &[ 1], &[]), (&[ 2], &[], &[]), @@ -1107,8 +1111,10 @@ mod biguint_tests { (&[ 0, 0, 1], &[ 0, 0, 0, 1], &[0, 0, 0, 0, 0, 1]) ]; - const divmod_quadruples: &[(&[BigDigit], &[BigDigit], - &[BigDigit], &[BigDigit])] + const divmod_quadruples: &static/[(&static/[BigDigit], + &static/[BigDigit], + &static/[BigDigit], + &static/[BigDigit])] = &[ (&[ 1], &[ 2], &[], &[1]), (&[ 1, 1], &[ 2], &[-1/2+1], &[1]), @@ -1393,7 +1399,9 @@ mod bigint_tests { ).to_uint() == 0; } - const sum_triples: &[(&[BigDigit], &[BigDigit], &[BigDigit])] = &[ + const sum_triples: &static/[(&static/[BigDigit], + &static/[BigDigit], + &static/[BigDigit])] = &[ (&[], &[], &[]), (&[], &[ 1], &[ 1]), (&[ 1], &[ 1], &[ 2]), @@ -1443,7 +1451,9 @@ mod bigint_tests { } } - const mul_triples: &[(&[BigDigit], &[BigDigit], &[BigDigit])] = &[ + const mul_triples: &static/[(&static/[BigDigit], + &static/[BigDigit], + &static/[BigDigit])] = &[ (&[], &[], &[]), (&[], &[ 1], &[]), (&[ 2], &[], &[]), @@ -1467,8 +1477,10 @@ mod bigint_tests { (&[ 0, 0, 1], &[ 0, 0, 0, 1], &[0, 0, 0, 0, 0, 1]) ]; - const divmod_quadruples: &[(&[BigDigit], &[BigDigit], - &[BigDigit], &[BigDigit])] + const divmod_quadruples: &static/[(&static/[BigDigit], + &static/[BigDigit], + &static/[BigDigit], + &static/[BigDigit])] = &[ (&[ 1], &[ 2], &[], &[1]), (&[ 1, 1], &[ 2], &[-1/2+1], &[1]), diff --git a/src/libstd/flatpipes.rs b/src/libstd/flatpipes.rs index 8f6ec2b9cd9..798c5ae57c7 100644 --- a/src/libstd/flatpipes.rs +++ b/src/libstd/flatpipes.rs @@ -468,8 +468,8 @@ pub mod flatteners { static fn from_writer(w: Writer) -> Self; } - impl FromReader for json::Decoder { - static fn from_reader(r: Reader) -> json::Decoder { + impl FromReader for json::Decoder/&self { + static fn from_reader(r: Reader) -> json::Decoder/&self { match json::from_reader(r) { Ok(json) => { json::Decoder(json) diff --git a/src/libstd/json.rs b/src/libstd/json.rs index 7993f15f622..cfa66ae000a 100644 --- a/src/libstd/json.rs +++ b/src/libstd/json.rs @@ -749,14 +749,14 @@ pub fn from_str(s: &str) -> Result<Json, Error> { pub struct Decoder { priv json: Json, - priv mut stack: ~[&Json], + priv mut stack: ~[&self/Json], } pub fn Decoder(json: Json) -> Decoder { Decoder { json: json, stack: ~[] } } -priv impl Decoder { +priv impl Decoder/&self { fn peek(&self) -> &self/Json { if self.stack.len() == 0 { self.stack.push(&self.json); } self.stack[self.stack.len() - 1] @@ -768,7 +768,7 @@ priv impl Decoder { } } -impl serialize::Decoder for Decoder { +impl serialize::Decoder for Decoder/&self { fn read_nil(&self) -> () { debug!("read_nil"); match *self.pop() { diff --git a/src/libstd/serialize.rs b/src/libstd/serialize.rs index 66db951e12b..5bbd926ba6b 100644 --- a/src/libstd/serialize.rs +++ b/src/libstd/serialize.rs @@ -213,7 +213,7 @@ impl<D:Decoder> Decodable<D> for i64 { } } -impl<S:Encoder> Encodable<S> for &str { +impl<S:Encoder> Encodable<S> for &self/str { fn encode(&self, s: &S) { s.emit_borrowed_str(*self) } } @@ -286,7 +286,7 @@ impl<D:Decoder> Decodable<D> for () { } } -impl<S:Encoder,T:Encodable<S>> Encodable<S> for &T { +impl<S:Encoder,T:Encodable<S>> Encodable<S> for &self/T { fn encode(&self, s: &S) { s.emit_borrowed(|| (**self).encode(s)) } @@ -316,7 +316,7 @@ impl<D:Decoder,T:Decodable<D>> Decodable<D> for @T { } } -impl<S:Encoder,T:Encodable<S>> Encodable<S> for &[T] { +impl<S:Encoder,T:Encodable<S>> Encodable<S> for &self/[T] { fn encode(&self, s: &S) { do s.emit_borrowed_vec(self.len()) { for self.eachi |i, e| { diff --git a/src/libstd/smallintmap.rs b/src/libstd/smallintmap.rs index 68d22f7c919..aad8fab834f 100644 --- a/src/libstd/smallintmap.rs +++ b/src/libstd/smallintmap.rs @@ -22,7 +22,7 @@ pub struct SmallIntMap<T> { priv v: ~[Option<T>], } -impl<V> BaseIter<(uint, &V)> for SmallIntMap<V> { +impl<V> BaseIter<(uint, &self/V)> for SmallIntMap<V> { /// Visit all key-value pairs in order pure fn each(&self, it: fn(&(uint, &self/V)) -> bool) { for uint::range(0, self.v.len()) |i| { @@ -36,7 +36,7 @@ impl<V> BaseIter<(uint, &V)> for SmallIntMap<V> { pure fn size_hint(&self) -> Option<uint> { Some(self.len()) } } -impl<V> ReverseIter<(uint, &V)> for SmallIntMap<V> { +impl<V> ReverseIter<(uint, &self/V)> for SmallIntMap<V> { /// Visit all key-value pairs in reverse order pure fn each_reverse(&self, it: fn(&(uint, &self/V)) -> bool) { for uint::range_rev(self.v.len(), 0) |i| { diff --git a/src/libstd/sort.rs b/src/libstd/sort.rs index 43fab9df163..5c037b5bac5 100644 --- a/src/libstd/sort.rs +++ b/src/libstd/sort.rs @@ -17,7 +17,7 @@ use core::util; use core::vec::{len, push}; use core::vec; -type Le<T> = pure fn(v1: &T, v2: &T) -> bool; +type Le<T> = &self/pure fn(v1: &T, v2: &T) -> bool; /** * Merge sort. Returns a new vector containing the sorted list. @@ -169,7 +169,7 @@ pub trait Sort { fn qsort(self); } -impl<T:Copy + Ord + Eq> Sort for &mut [T] { +impl<T:Copy + Ord + Eq> Sort for &self/mut [T] { fn qsort(self) { quick_sort3(self); } } @@ -1178,11 +1178,10 @@ mod big_tests { struct LVal { val: uint, - key: fn(@uint), - + key: &self/fn(@uint), } - impl Drop for LVal { + impl Drop for LVal/&self { fn finalize(&self) { let x = unsafe { task::local_data::local_data_get(self.key) }; match x { @@ -1196,7 +1195,7 @@ mod big_tests { } } - impl Ord for LVal { + impl Ord for LVal/&self { pure fn lt(&self, other: &a/LVal/&self) -> bool { (*self).val < other.val } diff --git a/src/libstd/stats.rs b/src/libstd/stats.rs index 7dafdec95e0..b786699351e 100644 --- a/src/libstd/stats.rs +++ b/src/libstd/stats.rs @@ -30,7 +30,7 @@ pub trait Stats { fn median_abs_dev_pct(self) -> f64; } -impl Stats for &[f64] { +impl Stats for &self/[f64] { fn sum(self) -> f64 { vec::foldl(0.0, self, |p,q| p + *q) } diff --git a/src/libstd/sync.rs b/src/libstd/sync.rs index 83f80f94382..e02d09954d3 100644 --- a/src/libstd/sync.rs +++ b/src/libstd/sync.rs @@ -98,7 +98,7 @@ fn new_sem_and_signal(count: int, num_condvars: uint) } #[doc(hidden)] -pub impl<Q:Owned> &Sem<Q> { +pub impl<Q:Owned> &self/Sem<Q> { fn acquire() { let mut waiter_nobe = None; unsafe { @@ -134,7 +134,7 @@ pub impl<Q:Owned> &Sem<Q> { } // FIXME(#3154) move both copies of this into Sem<Q>, and unify the 2 structs #[doc(hidden)] -pub impl &Sem<()> { +pub impl &self/Sem<()> { fn access<U>(blk: fn() -> U) -> U { let mut release = None; unsafe { @@ -147,7 +147,7 @@ pub impl &Sem<()> { } } #[doc(hidden)] -pub impl &Sem<~[Waitqueue]> { +pub impl &self/Sem<~[Waitqueue]> { fn access<U>(blk: fn() -> U) -> U { let mut release = None; unsafe { @@ -162,11 +162,11 @@ pub impl &Sem<~[Waitqueue]> { // FIXME(#3588) should go inside of access() #[doc(hidden)] -type SemRelease = SemReleaseGeneric<()>; -type SemAndSignalRelease = SemReleaseGeneric<~[Waitqueue]>; -struct SemReleaseGeneric<Q> { sem: &Sem<Q> } +type SemRelease = SemReleaseGeneric/&self<()>; +type SemAndSignalRelease = SemReleaseGeneric/&self<~[Waitqueue]>; +struct SemReleaseGeneric<Q> { sem: &self/Sem<Q> } -impl<Q:Owned> Drop for SemReleaseGeneric<Q> { +impl<Q:Owned> Drop for SemReleaseGeneric/&self<Q> { fn finalize(&self) { self.sem.release(); } @@ -186,11 +186,11 @@ fn SemAndSignalRelease(sem: &r/Sem<~[Waitqueue]>) } /// A mechanism for atomic-unlock-and-deschedule blocking and signalling. -pub struct Condvar { priv sem: &Sem<~[Waitqueue]> } +pub struct Condvar { priv sem: &self/Sem<~[Waitqueue]> } -impl Drop for Condvar { fn finalize(&self) {} } +impl Drop for Condvar/&self { fn finalize(&self) {} } -pub impl &Condvar { +pub impl Condvar/&self { /** * Atomically drop the associated lock, and block until a signal is sent. * @@ -199,7 +199,8 @@ pub impl &Condvar { * while waiting on a condition variable will wake up, fail, and unlock * the associated lock as it unwinds. */ - fn wait() { self.wait_on(0) } + fn wait(&self) { self.wait_on(0) } + /** * As wait(), but can specify which of multiple condition variables to * wait on. Only a signal_on() or broadcast_on() with the same condvar_id @@ -211,7 +212,7 @@ pub impl &Condvar { * * wait() is equivalent to wait_on(0). */ - fn wait_on(condvar_id: uint) { + fn wait_on(&self, condvar_id: uint) { // Create waiter nobe. let (WaitEnd, SignalEnd) = comm::oneshot(); let mut WaitEnd = Some(WaitEnd); @@ -256,10 +257,10 @@ pub impl &Condvar { // mutex during unwinding. As long as the wrapper (mutex, etc) is // bounded in when it gets released, this shouldn't hang forever. struct SemAndSignalReacquire { - sem: &Sem<~[Waitqueue]>, + sem: &self/Sem<~[Waitqueue]>, } - impl Drop for SemAndSignalReacquire { + impl Drop for SemAndSignalReacquire/&self { fn finalize(&self) { unsafe { // Needs to succeed, instead of itself dying. @@ -279,9 +280,10 @@ pub impl &Condvar { } /// Wake up a blocked task. Returns false if there was no blocked task. - fn signal() -> bool { self.signal_on(0) } + fn signal(&self) -> bool { self.signal_on(0) } + /// As signal, but with a specified condvar_id. See wait_on. - fn signal_on(condvar_id: uint) -> bool { + fn signal_on(&self, condvar_id: uint) -> bool { let mut out_of_bounds = None; let mut result = false; unsafe { @@ -299,9 +301,10 @@ pub impl &Condvar { } /// Wake up all blocked tasks. Returns the number of tasks woken. - fn broadcast() -> uint { self.broadcast_on(0) } + fn broadcast(&self) -> uint { self.broadcast_on(0) } + /// As broadcast, but with a specified condvar_id. See wait_on. - fn broadcast_on(condvar_id: uint) -> uint { + fn broadcast_on(&self, condvar_id: uint) -> uint { let mut out_of_bounds = None; let mut queue = None; unsafe { @@ -342,9 +345,9 @@ fn check_cvar_bounds<U>(out_of_bounds: Option<uint>, id: uint, act: &str, } #[doc(hidden)] -pub impl &Sem<~[Waitqueue]> { +pub impl Sem<~[Waitqueue]> { // The only other place that condvars get built is rwlock_write_mode. - fn access_cond<U>(blk: fn(c: &Condvar) -> U) -> U { + fn access_cond<U>(&self, blk: fn(c: &Condvar) -> U) -> U { do self.access { blk(&Condvar { sem: self }) } } } @@ -368,18 +371,18 @@ impl Clone for Semaphore { } } -pub impl &Semaphore { +pub impl Semaphore { /** * Acquire a resource represented by the semaphore. Blocks if necessary * until resource(s) become available. */ - fn acquire() { (&self.sem).acquire() } + fn acquire(&self) { (&self.sem).acquire() } /** * Release a held resource represented by the semaphore. Wakes a blocked * contending task, if any exist. Won't block the caller. */ - fn release() { (&self.sem).release() } + fn release(&self) { (&self.sem).release() } /// Run a function with ownership of one of the semaphore's resources. fn access<U>(blk: fn() -> U) -> U { (&self.sem).access(blk) } @@ -416,12 +419,12 @@ impl Clone for Mutex { fn clone(&self) -> Mutex { Mutex { sem: Sem((*self.sem).clone()) } } } -pub impl &Mutex { +pub impl Mutex { /// Run a function with ownership of the mutex. - fn lock<U>(blk: fn() -> U) -> U { (&self.sem).access(blk) } + fn lock<U>(&self, blk: fn() -> U) -> U { (&self.sem).access(blk) } /// Run a function with ownership of the mutex and a handle to a condvar. - fn lock_cond<U>(blk: fn(c: &Condvar) -> U) -> U { + fn lock_cond<U>(&self, blk: fn(c: &Condvar) -> U) -> U { (&self.sem).access_cond(blk) } } @@ -465,9 +468,9 @@ pub fn rwlock_with_condvars(num_condvars: uint) -> RWlock { read_count: 0 }) } } -pub impl &RWlock { +pub impl RWlock { /// Create a new handle to the rwlock. - fn clone() -> RWlock { + fn clone(&self) -> RWlock { RWlock { order_lock: (&(self.order_lock)).clone(), access_lock: Sem((*self.access_lock).clone()), state: self.state.clone() } @@ -477,7 +480,7 @@ pub impl &RWlock { * Run a function with the rwlock in read mode. Calls to 'read' from other * tasks may run concurrently with this one. */ - fn read<U>(blk: fn() -> U) -> U { + fn read<U>(&self, blk: fn() -> U) -> U { let mut release = None; unsafe { do task::unkillable { @@ -508,7 +511,7 @@ pub impl &RWlock { * Run a function with the rwlock in write mode. No calls to 'read' or * 'write' from other tasks will run concurrently with this one. */ - fn write<U>(blk: fn() -> U) -> U { + fn write<U>(&self, blk: fn() -> U) -> U { unsafe { do task::unkillable { (&self.order_lock).acquire(); @@ -526,7 +529,7 @@ pub impl &RWlock { * the waiting task is signalled. (Note: a writer that waited and then * was signalled might reacquire the lock before other waiting writers.) */ - fn write_cond<U>(blk: fn(c: &Condvar) -> U) -> U { + fn write_cond<U>(&self, blk: fn(c: &Condvar) -> U) -> U { // NB: You might think I should thread the order_lock into the cond // wait call, so that it gets waited on before access_lock gets // reacquired upon being woken up. However, (a) this would be not @@ -561,7 +564,7 @@ pub impl &RWlock { * } * ~~~ */ - fn write_downgrade<U>(blk: fn(v: RWlockWriteMode) -> U) -> U { + fn write_downgrade<U>(&self, blk: fn(v: RWlockWriteMode) -> U) -> U { // Implementation slightly different from the slicker 'write's above. // The exit path is conditional on whether the caller downgrades. let mut _release = None; @@ -577,7 +580,7 @@ pub impl &RWlock { } /// To be called inside of the write_downgrade block. - fn downgrade(token: RWlockWriteMode/&a) -> RWlockReadMode/&a { + fn downgrade(&self, token: RWlockWriteMode/&a) -> RWlockReadMode/&a { if !ptr::ref_eq(self, token.lock) { fail!(~"Can't downgrade() with a different rwlock's write_mode!"); } @@ -606,10 +609,10 @@ pub impl &RWlock { // FIXME(#3588) should go inside of read() #[doc(hidden)] struct RWlockReleaseRead { - lock: &RWlock, + lock: &self/RWlock, } -impl Drop for RWlockReleaseRead { +impl Drop for RWlockReleaseRead/&self { fn finalize(&self) { unsafe { do task::unkillable { @@ -640,10 +643,10 @@ fn RWlockReleaseRead(lock: &r/RWlock) -> RWlockReleaseRead/&r { // FIXME(#3588) should go inside of downgrade() #[doc(hidden)] struct RWlockReleaseDowngrade { - lock: &RWlock, + lock: &self/RWlock, } -impl Drop for RWlockReleaseDowngrade { +impl Drop for RWlockReleaseDowngrade/&self { fn finalize(&self) { unsafe { do task::unkillable { @@ -680,23 +683,25 @@ fn RWlockReleaseDowngrade(lock: &r/RWlock) -> RWlockReleaseDowngrade/&r { } /// The "write permission" token used for rwlock.write_downgrade(). -pub struct RWlockWriteMode { priv lock: &RWlock } -impl Drop for RWlockWriteMode { fn finalize(&self) {} } +pub struct RWlockWriteMode { priv lock: &self/RWlock } +impl Drop for RWlockWriteMode/&self { fn finalize(&self) {} } + /// The "read permission" token used for rwlock.write_downgrade(). -pub struct RWlockReadMode { priv lock: &RWlock } -impl Drop for RWlockReadMode { fn finalize(&self) {} } +pub struct RWlockReadMode { priv lock: &self/RWlock } +impl Drop for RWlockReadMode/&self { fn finalize(&self) {} } -pub impl &RWlockWriteMode { +pub impl RWlockWriteMode/&self { /// Access the pre-downgrade rwlock in write mode. - fn write<U>(blk: fn() -> U) -> U { blk() } + fn write<U>(&self, blk: fn() -> U) -> U { blk() } /// Access the pre-downgrade rwlock in write mode with a condvar. - fn write_cond<U>(blk: fn(c: &Condvar) -> U) -> U { + fn write_cond<U>(&self, blk: fn(c: &Condvar) -> U) -> U { blk(&Condvar { sem: &self.lock.access_lock }) } } -pub impl &RWlockReadMode { + +pub impl RWlockReadMode/&self { /// Access the post-downgrade rwlock in read mode. - fn read<U>(blk: fn() -> U) -> U { blk() } + fn read<U>(&self, blk: fn() -> U) -> U { blk() } } /**************************************************************************** diff --git a/src/libstd/treemap.rs b/src/libstd/treemap.rs index a8d343f8098..d1fe1d4c67a 100644 --- a/src/libstd/treemap.rs +++ b/src/libstd/treemap.rs @@ -94,7 +94,7 @@ impl<K: Ord + TotalOrd, V> Ord for TreeMap<K, V> { } } -impl<K: TotalOrd, V> BaseIter<(&K, &V)> for TreeMap<K, V> { +impl<'self, K: TotalOrd, V> BaseIter<(&'self K, &'self V)> for TreeMap<K, V> { /// Visit all key-value pairs in order pure fn each(&self, f: fn(&(&self/K, &self/V)) -> bool) { each(&self.root, f) @@ -102,7 +102,10 @@ impl<K: TotalOrd, V> BaseIter<(&K, &V)> for TreeMap<K, V> { pure fn size_hint(&self) -> Option<uint> { Some(self.len()) } } -impl<K: TotalOrd, V> ReverseIter<(&K, &V)> for TreeMap<K, V> { +impl<'self, K: TotalOrd, V> + ReverseIter<(&'self K, &'self V)> + for TreeMap<K, V> +{ /// Visit all key-value pairs in reverse order pure fn each_reverse(&self, f: fn(&(&self/K, &self/V)) -> bool) { each_reverse(&self.root, f); @@ -195,8 +198,8 @@ pub impl<K: TotalOrd, V> TreeMap<K, V> { /// Lazy forward iterator over a map pub struct TreeMapIterator<K, V> { - priv stack: ~[&~TreeNode<K, V>], - priv node: &Option<~TreeNode<K, V>> + priv stack: ~[&self/~TreeNode<K, V>], + priv node: &self/Option<~TreeNode<K, V>> } /// Advance the iterator to the next node (in order) and return a @@ -494,7 +497,7 @@ pub impl <T: TotalOrd> TreeSet<T> { /// Lazy forward iterator over a set pub struct TreeSetIterator<T> { - priv iter: TreeMapIterator<T, ()> + priv iter: TreeMapIterator/&self<T, ()> } /// Advance the iterator to the next node (in order). If this iterator is diff --git a/src/libstd/workcache.rs b/src/libstd/workcache.rs index c46c2d17ed0..e5435ca18b7 100644 --- a/src/libstd/workcache.rs +++ b/src/libstd/workcache.rs @@ -168,7 +168,8 @@ struct Database { } pub impl Database { - fn prepare(&mut self, fn_name: &str, + fn prepare(&mut self, + fn_name: &str, declared_inputs: &WorkMap) -> Option<(WorkMap, WorkMap, ~str)> { let k = json_encode(&(fn_name, declared_inputs)); @@ -233,7 +234,9 @@ fn json_encode<T:Encodable<json::Encoder>>(t: &T) -> ~str { } } -fn json_decode<T:Decodable<json::Decoder>>(s: &str) -> T { +fn json_decode<T:Decodable<json::Decoder/&static>>( // FIXME(#5121) + s: &str) -> T +{ do io::with_str_reader(s) |rdr| { let j = result::unwrap(json::from_reader(rdr)); Decodable::decode(&json::Decoder(j)) @@ -261,7 +264,9 @@ pub impl Context { Context{db: db, logger: lg, cfg: cfg, freshness: LinearMap::new()} } - fn prep<T:Owned + Encodable<json::Encoder> + Decodable<json::Decoder>>( + fn prep<T:Owned + + Encodable<json::Encoder> + + Decodable<json::Decoder/&static>>( // FIXME(#5121) @self, fn_name:&str, blk: fn(@Mut<Prep>)->Work<T>) -> Work<T> { @@ -277,7 +282,9 @@ trait TPrep { fn declare_input(&self, kind:&str, name:&str, val:&str); fn is_fresh(&self, cat:&str, kind:&str, name:&str, val:&str) -> bool; fn all_fresh(&self, cat:&str, map:&WorkMap) -> bool; - fn exec<T:Owned + Encodable<json::Encoder> + Decodable<json::Decoder>>( + fn exec<T:Owned + + Encodable<json::Encoder> + + Decodable<json::Decoder/&static>>( // FIXME(#5121) &self, blk: ~fn(&Exec) -> T) -> Work<T>; } @@ -316,7 +323,9 @@ impl TPrep for @Mut<Prep> { return true; } - fn exec<T:Owned + Encodable<json::Encoder> + Decodable<json::Decoder>>( + fn exec<T:Owned + + Encodable<json::Encoder> + + Decodable<json::Decoder/&static>>( // FIXME(#5121) &self, blk: ~fn(&Exec) -> T) -> Work<T> { let mut bo = Some(blk); @@ -355,14 +364,18 @@ impl TPrep for @Mut<Prep> { } } -pub impl<T:Owned+Encodable<json::Encoder>+Decodable<json::Decoder>> Work<T> { +pub impl<T:Owned + + Encodable<json::Encoder> + + Decodable<json::Decoder/&static>> Work<T> { // FIXME(#5121) static fn new(p: @Mut<Prep>, e: Either<T,PortOne<(Exec,T)>>) -> Work<T> { Work { prep: p, res: Some(e) } } } // FIXME (#3724): movable self. This should be in impl Work. -fn unwrap<T:Owned + Encodable<json::Encoder> + Decodable<json::Decoder>>( +fn unwrap<T:Owned + + Encodable<json::Encoder> + + Decodable<json::Decoder/&static>>( // FIXME(#5121) w: Work<T>) -> T { let mut ww = w; let mut s = None; |
