about summary refs log tree commit diff
path: root/src/libsync
diff options
context:
space:
mode:
Diffstat (limited to 'src/libsync')
-rw-r--r--src/libsync/arc.rs1168
-rw-r--r--src/libsync/lib.rs22
-rw-r--r--src/libsync/lock.rs816
-rw-r--r--src/libsync/mpsc_intrusive.rs (renamed from src/libsync/sync/mpsc_intrusive.rs)19
-rw-r--r--src/libsync/mutex.rs (renamed from src/libsync/sync/mutex.rs)143
-rw-r--r--src/libsync/one.rs (renamed from src/libsync/sync/one.rs)18
-rw-r--r--src/libsync/raw.rs (renamed from src/libsync/sync/mod.rs)1023
7 files changed, 1578 insertions, 1631 deletions
diff --git a/src/libsync/arc.rs b/src/libsync/arc.rs
index 0bc3b121a88..28841b780a4 100644
--- a/src/libsync/arc.rs
+++ b/src/libsync/arc.rs
@@ -11,571 +11,247 @@
 /*!
  * Concurrency-enabled mechanisms for sharing mutable and/or immutable state
  * between tasks.
- *
- * # Example
- *
- * In this example, a large vector of floats is shared between several tasks.
- * With simple pipes, without Arc, a copy would have to be made for each task.
- *
- * ```rust
- * extern crate sync;
- * extern crate rand;
- *
- * use std::slice;
- * use sync::Arc;
- *
- * fn main() {
- *     let numbers = slice::from_fn(100, |i| (i as f32) * rand::random());
- *     let shared_numbers = Arc::new(numbers);
- *
- *     for _ in range(0, 10) {
- *         let (tx, rx) = channel();
- *         tx.send(shared_numbers.clone());
- *
- *         spawn(proc() {
- *             let shared_numbers = rx.recv();
- *             let local_numbers = shared_numbers.get();
- *
- *             // Work with the local numbers
- *         });
- *     }
- * }
- * ```
  */
 
-#[allow(missing_doc, dead_code)];
-
-
-use sync;
-use sync::{Mutex, RWLock};
-
 use std::cast;
-use std::kinds::{Share, marker};
-use std::sync::arc::UnsafeArc;
-use std::task;
-
-/// As sync::condvar, a mechanism for unlock-and-descheduling and
-/// signaling, for use with the Arc types.
-pub struct ArcCondvar<'a> {
-    priv is_mutex: bool,
-    priv failed: &'a bool,
-    priv cond: &'a sync::Condvar<'a>
+use std::ptr;
+use std::rt::global_heap;
+use std::sync::atomics;
+
+/// An atomically reference counted wrapper for shared state.
+///
+/// # Example
+///
+/// In this example, a large vector of floats is shared between several tasks.
+/// With simple pipes, without `Arc`, a copy would have to be made for each
+/// task.
+///
+/// ```rust
+/// use sync::Arc;
+///
+/// fn main() {
+///     let numbers = Vec::from_fn(100, |i| i as f32);
+///     let shared_numbers = Arc::new(numbers);
+///
+///     for _ in range(0, 10) {
+///         let child_numbers = shared_numbers.clone();
+///
+///         spawn(proc() {
+///             let local_numbers = child_numbers.as_slice();
+///
+///             // Work with the local numbers
+///         });
+///     }
+/// }
+/// ```
+#[unsafe_no_drop_flag]
+pub struct Arc<T> {
+    priv x: *mut ArcInner<T>,
 }
 
-impl<'a> ArcCondvar<'a> {
-    /// Atomically exit the associated Arc and block until a signal is sent.
-    #[inline]
-    pub fn wait(&self) { self.wait_on(0) }
-
-    /**
-     * Atomically exit the associated Arc and block on a specified condvar
-     * until a signal is sent on that same condvar (as sync::cond.wait_on).
-     *
-     * wait() is equivalent to wait_on(0).
-     */
-    #[inline]
-    pub fn wait_on(&self, condvar_id: uint) {
-        assert!(!*self.failed);
-        self.cond.wait_on(condvar_id);
-        // This is why we need to wrap sync::condvar.
-        check_poison(self.is_mutex, *self.failed);
-    }
-
-    /// Wake up a blocked task. Returns false if there was no blocked task.
-    #[inline]
-    pub fn signal(&self) -> bool { self.signal_on(0) }
-
-    /**
-     * Wake up a blocked task on a specified condvar (as
-     * sync::cond.signal_on). Returns false if there was no blocked task.
-     */
-    #[inline]
-    pub fn signal_on(&self, condvar_id: uint) -> bool {
-        assert!(!*self.failed);
-        self.cond.signal_on(condvar_id)
-    }
-
-    /// Wake up all blocked tasks. Returns the number of tasks woken.
-    #[inline]
-    pub fn broadcast(&self) -> uint { self.broadcast_on(0) }
-
-    /**
-     * Wake up all blocked tasks on a specified condvar (as
-     * sync::cond.broadcast_on). Returns the number of tasks woken.
-     */
-    #[inline]
-    pub fn broadcast_on(&self, condvar_id: uint) -> uint {
-        assert!(!*self.failed);
-        self.cond.broadcast_on(condvar_id)
-    }
+/// A weak pointer to an `Arc`.
+///
+/// Weak pointers will not keep the data inside of the `Arc` alive, and can be
+/// used to break cycles between `Arc` pointers.
+#[unsafe_no_drop_flag]
+pub struct Weak<T> {
+    priv x: *mut ArcInner<T>,
 }
 
-/****************************************************************************
- * Immutable Arc
- ****************************************************************************/
-
-/// An atomically reference counted wrapper for shared immutable state.
-pub struct Arc<T> { priv x: UnsafeArc<T> }
-
+struct ArcInner<T> {
+    strong: atomics::AtomicUint,
+    weak: atomics::AtomicUint,
+    data: T,
+}
 
-/**
- * Access the underlying data in an atomically reference counted
- * wrapper.
- */
 impl<T: Share + Send> Arc<T> {
     /// Create an atomically reference counted wrapper.
     #[inline]
     pub fn new(data: T) -> Arc<T> {
-        Arc { x: UnsafeArc::new(data) }
+        // Start the weak pointer count as 1 which is the weak pointer that's
+        // held by all the strong pointers (kinda), see std/rc.rs for more info
+        let x = ~ArcInner {
+            strong: atomics::AtomicUint::new(1),
+            weak: atomics::AtomicUint::new(1),
+            data: data,
+        };
+        Arc { x: unsafe { cast::transmute(x) } }
     }
 
     #[inline]
-    pub fn get<'a>(&'a self) -> &'a T {
-        unsafe { &*self.x.get_immut() }
+    fn inner<'a>(&'a self) -> &'a ArcInner<T> {
+        // This unsafety is ok because while this arc is alive we're guaranteed
+        // that the inner pointer is valid. Furthermore, we know that the
+        // `ArcInner` structure itself is `Share` because the inner data is
+        // `Share` as well, so we're ok loaning out an immutable pointer to
+        // these contents.
+        unsafe { &*self.x }
+    }
+
+    /// Downgrades a strong pointer to a weak pointer
+    ///
+    /// Weak pointers will not keep the data alive. Once all strong references
+    /// to the underlying data have been dropped, the data itself will be
+    /// destroyed.
+    pub fn downgrade(&self) -> Weak<T> {
+        // See the clone() impl for why this is relaxed
+        self.inner().weak.fetch_add(1, atomics::Relaxed);
+        Weak { x: self.x }
     }
 }
 
 impl<T: Share + Send> Clone for Arc<T> {
-    /**
-    * Duplicate an atomically reference counted wrapper.
-    *
-    * The resulting two `arc` objects will point to the same underlying data
-    * object. However, one of the `arc` objects can be sent to another task,
-    * allowing them to share the underlying data.
-    */
+    /// Duplicate an atomically reference counted wrapper.
+    ///
+    /// The resulting two `Arc` objects will point to the same underlying data
+    /// object. However, one of the `Arc` objects can be sent to another task,
+    /// allowing them to share the underlying data.
     #[inline]
     fn clone(&self) -> Arc<T> {
-        Arc { x: self.x.clone() }
+        // Using a relaxed ordering is alright here, as knowledge of the
+        // original reference prevents other threads from erroneously deleting
+        // the object.
+        //
+        // As explained in the [Boost documentation][1], Increasing the
+        // reference counter can always be done with memory_order_relaxed: New
+        // references to an object can only be formed from an existing
+        // reference, and passing an existing reference from one thread to
+        // another must already provide any required synchronization.
+        //
+        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
+        self.inner().strong.fetch_add(1, atomics::Relaxed);
+        Arc { x: self.x }
     }
 }
 
-/****************************************************************************
- * Mutex protected Arc (unsafe)
- ****************************************************************************/
-
-#[doc(hidden)]
-struct MutexArcInner<T> { lock: Mutex, failed: bool, data: T }
-
-/// An Arc with mutable data protected by a blocking mutex.
-pub struct MutexArc<T> {
-    priv x: UnsafeArc<MutexArcInner<T>>,
-}
-
-impl<T:Send> Clone for MutexArc<T> {
-    /// Duplicate a mutex-protected Arc. See arc::clone for more details.
+// FIXME(#13042): this should have T: Send, and use self.inner()
+impl<T> Deref<T> for Arc<T> {
     #[inline]
-    fn clone(&self) -> MutexArc<T> {
-        // NB: Cloning the underlying mutex is not necessary. Its reference
-        // count would be exactly the same as the shared state's.
-        MutexArc { x: self.x.clone() }
+    fn deref<'a>(&'a self) -> &'a T {
+        let inner = unsafe { &*self.x };
+        &inner.data
     }
 }
 
-impl<T:Send> MutexArc<T> {
-    /// Create a mutex-protected Arc with the supplied data.
-    pub fn new(user_data: T) -> MutexArc<T> {
-        MutexArc::new_with_condvars(user_data, 1)
-    }
-
-    /**
-     * Create a mutex-protected Arc with the supplied data and a specified number
-     * of condvars (as sync::Mutex::new_with_condvars).
-     */
-    pub fn new_with_condvars(user_data: T, num_condvars: uint) -> MutexArc<T> {
-        let data = MutexArcInner {
-            lock: Mutex::new_with_condvars(num_condvars),
-            failed: false, data: user_data
-        };
-        MutexArc { x: UnsafeArc::new(data) }
-    }
-
-    /**
-     * Access the underlying mutable data with mutual exclusion from other
-     * tasks. The argument closure will be run with the mutex locked; all
-     * other tasks wishing to access the data will block until the closure
-     * finishes running.
-     *
-     * If you wish to nest MutexArcs, one strategy for ensuring safety at
-     * runtime is to add a "nesting level counter" inside the stored data, and
-     * when traversing the arcs, assert that they monotonically decrease.
-     *
-     * # Failure
-     *
-     * Failing while inside the Arc will unlock the Arc while unwinding, so
-     * that other tasks won't block forever. It will also poison the Arc:
-     * any tasks that subsequently try to access it (including those already
-     * blocked on the mutex) will also fail immediately.
-     */
-    #[inline]
-    pub fn access<U>(&self, blk: |x: &mut T| -> U) -> U {
-        let state = self.x.get();
-        unsafe {
-            // Borrowck would complain about this if the code were
-            // not already unsafe. See borrow_rwlock, far below.
-            (&(*state).lock).lock(|| {
-                check_poison(true, (*state).failed);
-                let _z = PoisonOnFail::new(&mut (*state).failed);
-                blk(&mut (*state).data)
-            })
-        }
-    }
-
-    /// As access(), but with a condvar, as sync::mutex.lock_cond().
+impl<T: Send + Share + Clone> Arc<T> {
+    /// Acquires a mutable pointer to the inner contents by guaranteeing that
+    /// the reference count is one (no sharing is possible).
+    ///
+    /// This is also referred to as a copy-on-write operation because the inner
+    /// data is cloned if the reference count is greater than one.
     #[inline]
-    pub fn access_cond<U>(&self, blk: |x: &mut T, c: &ArcCondvar| -> U) -> U {
-        let state = self.x.get();
-        unsafe {
-            (&(*state).lock).lock_cond(|cond| {
-                check_poison(true, (*state).failed);
-                let _z = PoisonOnFail::new(&mut (*state).failed);
-                blk(&mut (*state).data,
-                    &ArcCondvar {is_mutex: true,
-                            failed: &(*state).failed,
-                            cond: cond })
-            })
-        }
-    }
-}
-
-// Common code for {mutex.access,rwlock.write}{,_cond}.
-#[inline]
-#[doc(hidden)]
-fn check_poison(is_mutex: bool, failed: bool) {
-    if failed {
-        if is_mutex {
-            fail!("Poisoned MutexArc - another task failed inside!");
-        } else {
-            fail!("Poisoned rw_arc - another task failed inside!");
+    #[experimental]
+    pub fn make_unique<'a>(&'a mut self) -> &'a mut T {
+        if self.inner().strong.load(atomics::SeqCst) != 1 {
+            *self = Arc::new(self.deref().clone())
         }
+        // This unsafety is ok because we're guaranteed that the pointer
+        // returned is the *only* pointer that will ever be returned to T. Our
+        // reference count is guaranteed to be 1 at this point, and we required
+        // the Arc itself to be `mut`, so we're returning the only possible
+        // reference to the inner data.
+        unsafe { cast::transmute_mut(self.deref()) }
     }
 }
 
-#[doc(hidden)]
-struct PoisonOnFail {
-    flag: *mut bool,
-    failed: bool,
-}
-
-impl Drop for PoisonOnFail {
+#[unsafe_destructor]
+impl<T: Share + Send> Drop for Arc<T> {
     fn drop(&mut self) {
-        unsafe {
-            /* assert!(!*self.failed);
-               -- might be false in case of cond.wait() */
-            if !self.failed && task::failing() {
-                *self.flag = true;
-            }
+        // This structure has #[unsafe_no_drop_flag], so this drop glue may run
+        // more than once (but it is guaranteed to be zeroed after the first if
+        // it's run more than once)
+        if self.x.is_null() { return }
+
+        // Because `fetch_sub` is already atomic, we do not need to synchronize
+        // with other threads unless we are going to delete the object. This
+        // same logic applies to the below `fetch_sub` to the `weak` count.
+        if self.inner().strong.fetch_sub(1, atomics::Release) != 0 { return }
+
+        // This fence is needed to prevent reordering of use of the data and
+        // deletion of the data. Because it is marked `Release`, the
+        // decreasing of the reference count sychronizes with this `Acquire`
+        // fence. This means that use of the data happens before decreasing
+        // the refernce count, which happens before this fence, which
+        // happens before the deletion of the data.
+        //
+        // As explained in the [Boost documentation][1],
+        //
+        // It is important to enforce any possible access to the object in
+        // one thread (through an existing reference) to *happen before*
+        // deleting the object in a different thread. This is achieved by a
+        // "release" operation after dropping a reference (any access to the
+        // object through this reference must obviously happened before),
+        // and an "acquire" operation before deleting the object.
+        //
+        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
+        atomics::fence(atomics::Acquire);
+
+        // Destroy the data at this time, even though we may not free the box
+        // allocation itself (there may still be weak pointers lying around).
+        unsafe { drop(ptr::read(&self.inner().data)); }
+
+        if self.inner().weak.fetch_sub(1, atomics::Release) == 0 {
+            atomics::fence(atomics::Acquire);
+            unsafe { global_heap::exchange_free(self.x as *u8) }
         }
     }
 }
 
-impl PoisonOnFail {
-    fn new<'a>(flag: &'a mut bool) -> PoisonOnFail {
-        PoisonOnFail {
-            flag: flag,
-            failed: task::failing()
+impl<T: Share + Send> Weak<T> {
+    /// Attempts to upgrade this weak reference to a strong reference.
+    ///
+    /// This method will fail to upgrade this reference if the strong reference
+    /// count has already reached 0, but if there are still other active strong
+    /// references this function will return a new strong reference to the data
+    pub fn upgrade(&self) -> Option<Arc<T>> {
+        // We use a CAS loop to increment the strong count instead of a
+        // fetch_add because once the count hits 0 is must never be above 0.
+        let inner = self.inner();
+        loop {
+            let n = inner.strong.load(atomics::SeqCst);
+            if n == 0 { return None }
+            let old = inner.strong.compare_and_swap(n, n + 1, atomics::SeqCst);
+            if old == n { return Some(Arc { x: self.x }) }
         }
     }
-}
 
-/****************************************************************************
- * R/W lock protected Arc
- ****************************************************************************/
-
-#[doc(hidden)]
-struct RWArcInner<T> { lock: RWLock, failed: bool, data: T }
-/**
- * A dual-mode Arc protected by a reader-writer lock. The data can be accessed
- * mutably or immutably, and immutably-accessing tasks may run concurrently.
- *
- * Unlike mutex_arcs, rw_arcs are safe, because they cannot be nested.
- */
-pub struct RWArc<T> {
-    priv x: UnsafeArc<RWArcInner<T>>,
-    priv marker: marker::NoShare,
-}
-
-impl<T: Share + Send> Clone for RWArc<T> {
-    /// Duplicate a rwlock-protected Arc. See arc::clone for more details.
     #[inline]
-    fn clone(&self) -> RWArc<T> {
-        RWArc {
-            x: self.x.clone(),
-            marker: marker::NoShare
-        }
+    fn inner<'a>(&'a self) -> &'a ArcInner<T> {
+        // See comments above for why this is "safe"
+        unsafe { &*self.x }
     }
-
 }
 
-impl<T: Share + Send> RWArc<T> {
-    /// Create a reader/writer Arc with the supplied data.
-    pub fn new(user_data: T) -> RWArc<T> {
-        RWArc::new_with_condvars(user_data, 1)
-    }
-
-    /**
-     * Create a reader/writer Arc with the supplied data and a specified number
-     * of condvars (as sync::RWLock::new_with_condvars).
-     */
-    pub fn new_with_condvars(user_data: T, num_condvars: uint) -> RWArc<T> {
-        let data = RWArcInner {
-            lock: RWLock::new_with_condvars(num_condvars),
-            failed: false, data: user_data
-        };
-        RWArc {
-            x: UnsafeArc::new(data),
-            marker: marker::NoShare
-        }
-    }
-
-    /**
-     * Access the underlying data mutably. Locks the rwlock in write mode;
-     * other readers and writers will block.
-     *
-     * # Failure
-     *
-     * Failing while inside the Arc will unlock the Arc while unwinding, so
-     * that other tasks won't block forever. As MutexArc.access, it will also
-     * poison the Arc, so subsequent readers and writers will both also fail.
-     */
+impl<T: Share + Send> Clone for Weak<T> {
     #[inline]
-    pub fn write<U>(&self, blk: |x: &mut T| -> U) -> U {
-        unsafe {
-            let state = self.x.get();
-            (*borrow_rwlock(state)).write(|| {
-                check_poison(false, (*state).failed);
-                let _z = PoisonOnFail::new(&mut (*state).failed);
-                blk(&mut (*state).data)
-            })
-        }
-    }
-
-    /// As write(), but with a condvar, as sync::rwlock.write_cond().
-    #[inline]
-    pub fn write_cond<U>(&self,
-                         blk: |x: &mut T, c: &ArcCondvar| -> U)
-                         -> U {
-        unsafe {
-            let state = self.x.get();
-            (*borrow_rwlock(state)).write_cond(|cond| {
-                check_poison(false, (*state).failed);
-                let _z = PoisonOnFail::new(&mut (*state).failed);
-                blk(&mut (*state).data,
-                    &ArcCondvar {is_mutex: false,
-                              failed: &(*state).failed,
-                              cond: cond})
-            })
-        }
-    }
-
-    /**
-     * Access the underlying data immutably. May run concurrently with other
-     * reading tasks.
-     *
-     * # Failure
-     *
-     * Failing will unlock the Arc while unwinding. However, unlike all other
-     * access modes, this will not poison the Arc.
-     */
-    pub fn read<U>(&self, blk: |x: &T| -> U) -> U {
-        unsafe {
-            let state = self.x.get();
-            (*state).lock.read(|| {
-                check_poison(false, (*state).failed);
-                blk(&(*state).data)
-            })
-        }
-    }
-
-    /**
-     * As write(), but with the ability to atomically 'downgrade' the lock.
-     * See sync::rwlock.write_downgrade(). The RWWriteMode token must be used
-     * to obtain the &mut T, and can be transformed into a RWReadMode token by
-     * calling downgrade(), after which a &T can be obtained instead.
-     *
-     * # Example
-     *
-     * ```rust
-     * use sync::RWArc;
-     *
-     * let arc = RWArc::new(1);
-     * arc.write_downgrade(|mut write_token| {
-     *     write_token.write_cond(|state, condvar| {
-     *         // ... exclusive access with mutable state ...
-     *     });
-     *     let read_token = arc.downgrade(write_token);
-     *     read_token.read(|state| {
-     *         // ... shared access with immutable state ...
-     *     });
-     * })
-     * ```
-     */
-    pub fn write_downgrade<U>(&self, blk: |v: RWWriteMode<T>| -> U) -> U {
-        unsafe {
-            let state = self.x.get();
-            (*borrow_rwlock(state)).write_downgrade(|write_mode| {
-                check_poison(false, (*state).failed);
-                blk(RWWriteMode {
-                    data: &mut (*state).data,
-                    token: write_mode,
-                    poison: PoisonOnFail::new(&mut (*state).failed)
-                })
-            })
-        }
-    }
-
-    /// To be called inside of the write_downgrade block.
-    pub fn downgrade<'a>(&self, token: RWWriteMode<'a, T>)
-                         -> RWReadMode<'a, T> {
-        unsafe {
-            // The rwlock should assert that the token belongs to us for us.
-            let state = self.x.get();
-            let RWWriteMode {
-                data: data,
-                token: t,
-                poison: _poison
-            } = token;
-            // Let readers in
-            let new_token = (*state).lock.downgrade(t);
-            // Whatever region the input reference had, it will be safe to use
-            // the same region for the output reference. (The only 'unsafe' part
-            // of this cast is removing the mutability.)
-            let new_data = data;
-            // Downgrade ensured the token belonged to us. Just a sanity check.
-            assert!((&(*state).data as *T as uint) == (new_data as *mut T as uint));
-            // Produce new token
-            RWReadMode {
-                data: new_data,
-                token: new_token,
-            }
-        }
-    }
-}
-
-// Borrowck rightly complains about immutably aliasing the rwlock in order to
-// lock it. This wraps the unsafety, with the justification that the 'lock'
-// field is never overwritten; only 'failed' and 'data'.
-#[doc(hidden)]
-fn borrow_rwlock<T: Share + Send>(state: *mut RWArcInner<T>) -> *RWLock {
-    unsafe { cast::transmute(&(*state).lock) }
-}
-
-/// The "write permission" token used for RWArc.write_downgrade().
-pub struct RWWriteMode<'a, T> {
-    priv data: &'a mut T,
-    priv token: sync::RWLockWriteMode<'a>,
-    priv poison: PoisonOnFail,
-}
-
-/// The "read permission" token used for RWArc.write_downgrade().
-pub struct RWReadMode<'a, T> {
-    priv data: &'a T,
-    priv token: sync::RWLockReadMode<'a>,
-}
-
-impl<'a, T: Share + Send> RWWriteMode<'a, T> {
-    /// Access the pre-downgrade RWArc in write mode.
-    pub fn write<U>(&mut self, blk: |x: &mut T| -> U) -> U {
-        match *self {
-            RWWriteMode {
-                data: &ref mut data,
-                token: ref token,
-                poison: _
-            } => {
-                token.write(|| blk(data))
-            }
-        }
-    }
-
-    /// Access the pre-downgrade RWArc in write mode with a condvar.
-    pub fn write_cond<U>(&mut self,
-                         blk: |x: &mut T, c: &ArcCondvar| -> U)
-                         -> U {
-        match *self {
-            RWWriteMode {
-                data: &ref mut data,
-                token: ref token,
-                poison: ref poison
-            } => {
-                token.write_cond(|cond| {
-                    unsafe {
-                        let cvar = ArcCondvar {
-                            is_mutex: false,
-                            failed: &*poison.flag,
-                            cond: cond
-                        };
-                        blk(data, &cvar)
-                    }
-                })
-            }
-        }
-    }
-}
-
-impl<'a, T: Share + Send> RWReadMode<'a, T> {
-    /// Access the post-downgrade rwlock in read mode.
-    pub fn read<U>(&self, blk: |x: &T| -> U) -> U {
-        match *self {
-            RWReadMode {
-                data: data,
-                token: ref token
-            } => {
-                token.read(|| blk(data))
-            }
-        }
+    fn clone(&self) -> Weak<T> {
+        // See comments in Arc::clone() for why this is relaxed
+        self.inner().weak.fetch_add(1, atomics::Relaxed);
+        Weak { x: self.x }
     }
 }
 
-/****************************************************************************
- * Copy-on-write Arc
- ****************************************************************************/
-
-pub struct CowArc<T> { priv x: UnsafeArc<T> }
-
-/// A Copy-on-write Arc functions the same way as an `arc` except it allows
-/// mutation of the contents if there is only a single reference to
-/// the data. If there are multiple references the data is automatically
-/// cloned and the task modifies the cloned data in place of the shared data.
-impl<T: Clone + Send + Share> CowArc<T> {
-    /// Create a copy-on-write atomically reference counted wrapper
-    #[inline]
-    pub fn new(data: T) -> CowArc<T> {
-        CowArc { x: UnsafeArc::new(data) }
-    }
-
-    #[inline]
-    pub fn get<'a>(&'a self) -> &'a T {
-        unsafe { &*self.x.get_immut() }
-    }
-
-    /// get a mutable reference to the contents. If there are more then one
-    /// reference to the contents of the `CowArc` will be cloned
-    /// and this reference updated to point to the cloned data.
-    #[inline]
-    pub fn get_mut<'a>(&'a mut self) -> &'a mut T {
-        if !self.x.is_owned() {
-            *self = CowArc::new(self.get().clone())
+#[unsafe_destructor]
+impl<T: Share + Send> Drop for Weak<T> {
+    fn drop(&mut self) {
+        // see comments above for why this check is here
+        if self.x.is_null() { return }
+
+        // If we find out that we were the last weak pointer, then its time to
+        // deallocate the data entirely. See the discussion in Arc::drop() about
+        // the memory orderings
+        if self.inner().weak.fetch_sub(1, atomics::Release) == 0 {
+            atomics::fence(atomics::Acquire);
+            unsafe { global_heap::exchange_free(self.x as *u8) }
         }
-        unsafe { &mut *self.x.get() }
-    }
-}
-
-impl<T: Clone + Send + Share> Clone for CowArc<T> {
-    /// Duplicate a Copy-on-write Arc. See arc::clone for more details.
-    fn clone(&self) -> CowArc<T> {
-        CowArc { x: self.x.clone() }
     }
 }
 
-
-
-/****************************************************************************
- * Tests
- ****************************************************************************/
-
 #[cfg(test)]
+#[allow(experimental)]
 mod tests {
-
-    use super::{Arc, RWArc, MutexArc, CowArc};
+    use super::{Arc, Weak};
+    use Mutex;
 
     use std::task;
 
@@ -588,455 +264,89 @@ mod tests {
 
         task::spawn(proc() {
             let arc_v: Arc<Vec<int>> = rx.recv();
-
-            let v = arc_v.get().clone();
-            assert_eq!(*v.get(3), 4);
+            assert_eq!(*arc_v.get(3), 4);
         });
 
         tx.send(arc_v.clone());
 
-        assert_eq!(*arc_v.get().get(2), 3);
-        assert_eq!(*arc_v.get().get(4), 5);
+        assert_eq!(*arc_v.get(2), 3);
+        assert_eq!(*arc_v.get(4), 5);
 
         info!("{:?}", arc_v);
     }
 
     #[test]
-    fn test_mutex_arc_condvar() {
-        let arc = ~MutexArc::new(false);
-        let arc2 = ~arc.clone();
-        let (tx, rx) = channel();
-        task::spawn(proc() {
-            // wait until parent gets in
-            rx.recv();
-            arc2.access_cond(|state, cond| {
-                *state = true;
-                cond.signal();
-            })
-        });
-
-        arc.access_cond(|state, cond| {
-            tx.send(());
-            assert!(!*state);
-            while !*state {
-                cond.wait();
-            }
-        })
-    }
-
-    #[test] #[should_fail]
-    fn test_arc_condvar_poison() {
-        let arc = ~MutexArc::new(1);
-        let arc2 = ~arc.clone();
-        let (tx, rx) = channel();
-
-        spawn(proc() {
-            let _ = rx.recv();
-            arc2.access_cond(|one, cond| {
-                cond.signal();
-                // Parent should fail when it wakes up.
-                assert_eq!(*one, 0);
-            })
-        });
-
-        arc.access_cond(|one, cond| {
-            tx.send(());
-            while *one == 1 {
-                cond.wait();
-            }
-        })
-    }
-
-    #[test] #[should_fail]
-    fn test_mutex_arc_poison() {
-        let arc = ~MutexArc::new(1);
-        let arc2 = ~arc.clone();
-        let _ = task::try(proc() {
-            arc2.access(|one| {
-                assert_eq!(*one, 2);
-            })
-        });
-        arc.access(|one| {
-            assert_eq!(*one, 1);
-        })
-    }
-
-    #[test]
-    fn test_mutex_arc_nested() {
-        // Tests nested mutexes and access
-        // to underlaying data.
-        let arc = ~MutexArc::new(1);
-        let arc2 = ~MutexArc::new(*arc);
-        task::spawn(proc() {
-            (*arc2).access(|mutex| {
-                (*mutex).access(|one| {
-                    assert!(*one == 1);
-                })
-            })
-        });
-    }
-
-    #[test]
-    fn test_mutex_arc_access_in_unwind() {
-        let arc = MutexArc::new(1i);
-        let arc2 = arc.clone();
-        let _ = task::try::<()>(proc() {
-            struct Unwinder {
-                i: MutexArc<int>
-            }
-            impl Drop for Unwinder {
-                fn drop(&mut self) {
-                    self.i.access(|num| *num += 1);
-                }
-            }
-            let _u = Unwinder { i: arc2 };
-            fail!();
-        });
-        assert_eq!(2, arc.access(|n| *n));
-    }
-
-    #[test] #[should_fail]
-    fn test_rw_arc_poison_wr() {
-        let arc = RWArc::new(1);
-        let arc2 = arc.clone();
-        let _ = task::try(proc() {
-            arc2.write(|one| {
-                assert_eq!(*one, 2);
-            })
-        });
-        arc.read(|one| {
-            assert_eq!(*one, 1);
-        })
-    }
-
-    #[test] #[should_fail]
-    fn test_rw_arc_poison_ww() {
-        let arc = RWArc::new(1);
-        let arc2 = arc.clone();
-        let _ = task::try(proc() {
-            arc2.write(|one| {
-                assert_eq!(*one, 2);
-            })
-        });
-        arc.write(|one| {
-            assert_eq!(*one, 1);
-        })
-    }
-    #[test] #[should_fail]
-    fn test_rw_arc_poison_dw() {
-        let arc = RWArc::new(1);
-        let arc2 = arc.clone();
-        let _ = task::try(proc() {
-            arc2.write_downgrade(|mut write_mode| {
-                write_mode.write(|one| {
-                    assert_eq!(*one, 2);
-                })
-            })
-        });
-        arc.write(|one| {
-            assert_eq!(*one, 1);
-        })
-    }
-    #[test]
-    fn test_rw_arc_no_poison_rr() {
-        let arc = RWArc::new(1);
-        let arc2 = arc.clone();
-        let _ = task::try(proc() {
-            arc2.read(|one| {
-                assert_eq!(*one, 2);
-            })
-        });
-        arc.read(|one| {
-            assert_eq!(*one, 1);
-        })
-    }
-    #[test]
-    fn test_rw_arc_no_poison_rw() {
-        let arc = RWArc::new(1);
-        let arc2 = arc.clone();
-        let _ = task::try(proc() {
-            arc2.read(|one| {
-                assert_eq!(*one, 2);
-            })
-        });
-        arc.write(|one| {
-            assert_eq!(*one, 1);
-        })
-    }
-    #[test]
-    fn test_rw_arc_no_poison_dr() {
-        let arc = RWArc::new(1);
-        let arc2 = arc.clone();
-        let _ = task::try(proc() {
-            arc2.write_downgrade(|write_mode| {
-                let read_mode = arc2.downgrade(write_mode);
-                read_mode.read(|one| {
-                    assert_eq!(*one, 2);
-                })
-            })
-        });
-        arc.write(|one| {
-            assert_eq!(*one, 1);
-        })
-    }
-    #[test]
-    fn test_rw_arc() {
-        let arc = RWArc::new(0);
-        let arc2 = arc.clone();
-        let (tx, rx) = channel();
-
-        task::spawn(proc() {
-            arc2.write(|num| {
-                for _ in range(0, 10) {
-                    let tmp = *num;
-                    *num = -1;
-                    task::deschedule();
-                    *num = tmp + 1;
-                }
-                tx.send(());
-            })
-        });
+    fn test_cowarc_clone_make_unique() {
+        let mut cow0 = Arc::new(75u);
+        let mut cow1 = cow0.clone();
+        let mut cow2 = cow1.clone();
 
-        // Readers try to catch the writer in the act
-        let mut children = Vec::new();
-        for _ in range(0, 5) {
-            let arc3 = arc.clone();
-            let mut builder = task::task();
-            children.push(builder.future_result());
-            builder.spawn(proc() {
-                arc3.read(|num| {
-                    assert!(*num >= 0);
-                })
-            });
-        }
+        assert!(75 == *cow0.make_unique());
+        assert!(75 == *cow1.make_unique());
+        assert!(75 == *cow2.make_unique());
 
-        // Wait for children to pass their asserts
-        for r in children.mut_iter() {
-            let _ = r.recv();
-        }
+        *cow0.make_unique() += 1;
+        *cow1.make_unique() += 2;
+        *cow2.make_unique() += 3;
 
-        // Wait for writer to finish
-        rx.recv();
-        arc.read(|num| {
-            assert_eq!(*num, 10);
-        })
-    }
+        assert!(76 == *cow0);
+        assert!(77 == *cow1);
+        assert!(78 == *cow2);
 
-    #[test]
-    fn test_rw_arc_access_in_unwind() {
-        let arc = RWArc::new(1i);
-        let arc2 = arc.clone();
-        let _ = task::try::<()>(proc() {
-            struct Unwinder {
-                i: RWArc<int>
-            }
-            impl Drop for Unwinder {
-                fn drop(&mut self) {
-                    self.i.write(|num| *num += 1);
-                }
-            }
-            let _u = Unwinder { i: arc2 };
-            fail!();
-        });
-        assert_eq!(2, arc.read(|n| *n));
+        // none should point to the same backing memory
+        assert!(*cow0 != *cow1);
+        assert!(*cow0 != *cow2);
+        assert!(*cow1 != *cow2);
     }
 
     #[test]
-    fn test_rw_downgrade() {
-        // (1) A downgrader gets in write mode and does cond.wait.
-        // (2) A writer gets in write mode, sets state to 42, and does signal.
-        // (3) Downgrader wakes, sets state to 31337.
-        // (4) tells writer and all other readers to contend as it downgrades.
-        // (5) Writer attempts to set state back to 42, while downgraded task
-        //     and all reader tasks assert that it's 31337.
-        let arc = RWArc::new(0);
-
-        // Reader tasks
-        let mut reader_convos = Vec::new();
-        for _ in range(0, 10) {
-            let ((tx1, rx1), (tx2, rx2)) = (channel(), channel());
-            reader_convos.push((tx1, rx2));
-            let arcn = arc.clone();
-            task::spawn(proc() {
-                rx1.recv(); // wait for downgrader to give go-ahead
-                arcn.read(|state| {
-                    assert_eq!(*state, 31337);
-                    tx2.send(());
-                })
-            });
-        }
-
-        // Writer task
-        let arc2 = arc.clone();
-        let ((tx1, rx1), (tx2, rx2)) = (channel(), channel());
-        task::spawn(proc() {
-            rx1.recv();
-            arc2.write_cond(|state, cond| {
-                assert_eq!(*state, 0);
-                *state = 42;
-                cond.signal();
-            });
-            rx1.recv();
-            arc2.write(|state| {
-                // This shouldn't happen until after the downgrade read
-                // section, and all other readers, finish.
-                assert_eq!(*state, 31337);
-                *state = 42;
-            });
-            tx2.send(());
-        });
-
-        // Downgrader (us)
-        arc.write_downgrade(|mut write_mode| {
-            write_mode.write_cond(|state, cond| {
-                tx1.send(()); // send to another writer who will wake us up
-                while *state == 0 {
-                    cond.wait();
-                }
-                assert_eq!(*state, 42);
-                *state = 31337;
-                // send to other readers
-                for &(ref mut rc, _) in reader_convos.mut_iter() {
-                    rc.send(())
-                }
-            });
-            let read_mode = arc.downgrade(write_mode);
-            read_mode.read(|state| {
-                // complete handshake with other readers
-                for &(_, ref mut rp) in reader_convos.mut_iter() {
-                    rp.recv()
-                }
-                tx1.send(()); // tell writer to try again
-                assert_eq!(*state, 31337);
-            });
-        });
-
-        rx2.recv(); // complete handshake with writer
-    }
-    #[cfg(test)]
-    fn test_rw_write_cond_downgrade_read_race_helper() {
-        // Tests that when a downgrader hands off the "reader cloud" lock
-        // because of a contending reader, a writer can't race to get it
-        // instead, which would result in readers_and_writers. This tests
-        // the sync module rather than this one, but it's here because an
-        // rwarc gives us extra shared state to help check for the race.
-        // If you want to see this test fail, go to sync.rs and replace the
-        // line in RWLock::write_cond() that looks like:
-        //     "blk(&ArcCondvar { order: opt_lock, ..*cond })"
-        // with just "blk(cond)".
-        let x = RWArc::new(true);
-        let (tx, rx) = channel();
+    fn test_cowarc_clone_unique2() {
+        let mut cow0 = Arc::new(75u);
+        let cow1 = cow0.clone();
+        let cow2 = cow1.clone();
 
-        // writer task
-        let xw = x.clone();
-        task::spawn(proc() {
-            xw.write_cond(|state, c| {
-                tx.send(()); // tell downgrader it's ok to go
-                c.wait();
-                // The core of the test is here: the condvar reacquire path
-                // must involve order_lock, so that it cannot race with a reader
-                // trying to receive the "reader cloud lock hand-off".
-                *state = false;
-            })
-        });
+        assert!(75 == *cow0);
+        assert!(75 == *cow1);
+        assert!(75 == *cow2);
 
-        rx.recv(); // wait for writer to get in
+        *cow0.make_unique() += 1;
 
-        x.write_downgrade(|mut write_mode| {
-            write_mode.write_cond(|state, c| {
-                assert!(*state);
-                // make writer contend in the cond-reacquire path
-                c.signal();
-            });
-            // make a reader task to trigger the "reader cloud lock" handoff
-            let xr = x.clone();
-            let (tx, rx) = channel();
-            task::spawn(proc() {
-                tx.send(());
-                xr.read(|_state| { })
-            });
-            rx.recv(); // wait for reader task to exist
+        assert!(76 == *cow0);
+        assert!(75 == *cow1);
+        assert!(75 == *cow2);
 
-            let read_mode = x.downgrade(write_mode);
-            read_mode.read(|state| {
-                // if writer mistakenly got in, make sure it mutates state
-                // before we assert on it
-                for _ in range(0, 5) { task::deschedule(); }
-                // make sure writer didn't get in.
-                assert!(*state);
-            })
-        });
-    }
-    #[test]
-    fn test_rw_write_cond_downgrade_read_race() {
-        // Ideally the above test case would have deschedule statements in it that
-        // helped to expose the race nearly 100% of the time... but adding
-        // deschedules in the intuitively-right locations made it even less likely,
-        // and I wasn't sure why :( . This is a mediocre "next best" option.
-        for _ in range(0, 8) { test_rw_write_cond_downgrade_read_race_helper(); }
+        // cow1 and cow2 should share the same contents
+        // cow0 should have a unique reference
+        assert!(*cow0 != *cow1);
+        assert!(*cow0 != *cow2);
+        assert!(*cow1 == *cow2);
     }
 
     #[test]
-    fn test_cowarc_clone()
-    {
-        let cow0 = CowArc::new(75u);
-        let cow1 = cow0.clone();
-        let cow2 = cow1.clone();
-
-        assert!(75 == *cow0.get());
-        assert!(75 == *cow1.get());
-        assert!(75 == *cow2.get());
-
-        assert!(cow0.get() == cow1.get());
-        assert!(cow0.get() == cow2.get());
+    fn test_live() {
+        let x = Arc::new(5);
+        let y = x.downgrade();
+        assert!(y.upgrade().is_some());
     }
 
     #[test]
-    fn test_cowarc_clone_get_mut()
-    {
-        let mut cow0 = CowArc::new(75u);
-        let mut cow1 = cow0.clone();
-        let mut cow2 = cow1.clone();
-
-        assert!(75 == *cow0.get_mut());
-        assert!(75 == *cow1.get_mut());
-        assert!(75 == *cow2.get_mut());
-
-        *cow0.get_mut() += 1;
-        *cow1.get_mut() += 2;
-        *cow2.get_mut() += 3;
-
-        assert!(76 == *cow0.get());
-        assert!(77 == *cow1.get());
-        assert!(78 == *cow2.get());
-
-        // none should point to the same backing memory
-        assert!(cow0.get() != cow1.get());
-        assert!(cow0.get() != cow2.get());
-        assert!(cow1.get() != cow2.get());
+    fn test_dead() {
+        let x = Arc::new(5);
+        let y = x.downgrade();
+        drop(x);
+        assert!(y.upgrade().is_none());
     }
 
     #[test]
-    fn test_cowarc_clone_get_mut2()
-    {
-        let mut cow0 = CowArc::new(75u);
-        let cow1 = cow0.clone();
-        let cow2 = cow1.clone();
-
-        assert!(75 == *cow0.get());
-        assert!(75 == *cow1.get());
-        assert!(75 == *cow2.get());
-
-        *cow0.get_mut() += 1;
+    fn weak_self_cyclic() {
+        struct Cycle {
+            x: Mutex<Option<Weak<Cycle>>>
+        }
 
-        assert!(76 == *cow0.get());
-        assert!(75 == *cow1.get());
-        assert!(75 == *cow2.get());
+        let a = Arc::new(Cycle { x: Mutex::new(None) });
+        let b = a.clone().downgrade();
+        *a.deref().x.lock().deref_mut() = Some(b);
 
-        // cow1 and cow2 should share the same contents
-        // cow0 should have a unique reference
-        assert!(cow0.get() != cow1.get());
-        assert!(cow0.get() != cow2.get());
-        assert!(cow1.get() == cow2.get());
+        // hopefully we don't double-free (or leak)...
     }
 }
diff --git a/src/libsync/lib.rs b/src/libsync/lib.rs
index 70874a029ac..d166076e96e 100644
--- a/src/libsync/lib.rs
+++ b/src/libsync/lib.rs
@@ -20,18 +20,28 @@
       html_favicon_url = "http://www.rust-lang.org/favicon.ico",
       html_root_url = "http://static.rust-lang.org/doc/master")];
 #[feature(phase)];
+#[deny(missing_doc, deprecated_owned_vector)];
 
-#[cfg(test)] #[phase(syntax, link)] extern crate log;
+#[cfg(test)]
+#[phase(syntax, link)] extern crate log;
 
-pub use arc::{Arc, MutexArc, RWArc, RWWriteMode, RWReadMode, ArcCondvar, CowArc};
-pub use sync::{Mutex, RWLock, Condvar, Semaphore, RWLockWriteMode,
-               RWLockReadMode, Barrier, one, mutex};
 pub use comm::{DuplexStream, SyncSender, SyncReceiver, rendezvous, duplex};
 pub use task_pool::TaskPool;
 pub use future::Future;
+pub use arc::{Arc, Weak};
+pub use lock::{Mutex, MutexGuard, Condvar, Barrier,
+               RWLock, RWLockReadGuard, RWLockWriteGuard};
+
+// The mutex/rwlock in this module are not meant for reexport
+pub use raw::{Semaphore, SemaphoreGuard};
 
 mod arc;
-mod sync;
 mod comm;
-mod task_pool;
 mod future;
+mod lock;
+mod mpsc_intrusive;
+mod task_pool;
+
+pub mod raw;
+pub mod mutex;
+pub mod one;
diff --git a/src/libsync/lock.rs b/src/libsync/lock.rs
new file mode 100644
index 00000000000..6ddd0d400f2
--- /dev/null
+++ b/src/libsync/lock.rs
@@ -0,0 +1,816 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Wrappers for safe, shared, mutable memory between tasks
+//!
+//! The wrappers in this module build on the primitives from `sync::raw` to
+//! provide safe interfaces around using the primitive locks. These primitives
+//! implement a technique called "poisoning" where when a task failed with a
+//! held lock, all future attempts to use the lock will fail.
+//!
+//! For example, if two tasks are contending on a mutex and one of them fails
+//! after grabbing the lock, the second task will immediately fail because the
+//! lock is now poisoned.
+
+use std::task;
+use std::ty::Unsafe;
+
+use raw;
+
+/****************************************************************************
+ * Poisoning helpers
+ ****************************************************************************/
+
+struct PoisonOnFail<'a> {
+    flag: &'a mut bool,
+    failed: bool,
+}
+
+impl<'a> PoisonOnFail<'a> {
+    fn check(flag: bool, name: &str) {
+        if flag {
+            fail!("Poisoned {} - another task failed inside!", name);
+        }
+    }
+
+    fn new<'a>(flag: &'a mut bool, name: &str) -> PoisonOnFail<'a> {
+        PoisonOnFail::check(*flag, name);
+        PoisonOnFail {
+            flag: flag,
+            failed: task::failing()
+        }
+    }
+}
+
+#[unsafe_destructor]
+impl<'a> Drop for PoisonOnFail<'a> {
+    fn drop(&mut self) {
+        if !self.failed && task::failing() {
+            *self.flag = true;
+        }
+    }
+}
+
+/****************************************************************************
+ * Condvar
+ ****************************************************************************/
+
+enum Inner<'a> {
+    InnerMutex(raw::MutexGuard<'a>),
+    InnerRWLock(raw::RWLockWriteGuard<'a>),
+}
+
+impl<'b> Inner<'b> {
+    fn cond<'a>(&'a self) -> &'a raw::Condvar<'b> {
+        match *self {
+            InnerMutex(ref m) => &m.cond,
+            InnerRWLock(ref m) => &m.cond,
+        }
+    }
+}
+
+/// A condition variable, a mechanism for unlock-and-descheduling and
+/// signaling, for use with the lock types.
+pub struct Condvar<'a> {
+    priv name: &'static str,
+    // n.b. Inner must be after PoisonOnFail because we must set the poison flag
+    //      *inside* the mutex, and struct fields are destroyed top-to-bottom
+    //      (destroy the lock guard last).
+    priv poison: PoisonOnFail<'a>,
+    priv inner: Inner<'a>,
+}
+
+impl<'a> Condvar<'a> {
+    /// Atomically exit the associated lock and block until a signal is sent.
+    ///
+    /// wait() is equivalent to wait_on(0).
+    ///
+    /// # Failure
+    ///
+    /// A task which is killed while waiting on a condition variable will wake
+    /// up, fail, and unlock the associated lock as it unwinds.
+    #[inline]
+    pub fn wait(&self) { self.wait_on(0) }
+
+    /// Atomically exit the associated lock and block on a specified condvar
+    /// until a signal is sent on that same condvar.
+    ///
+    /// The associated lock must have been initialised with an appropriate
+    /// number of condvars. The condvar_id must be between 0 and num_condvars-1
+    /// or else this call will fail.
+    #[inline]
+    pub fn wait_on(&self, condvar_id: uint) {
+        assert!(!*self.poison.flag);
+        self.inner.cond().wait_on(condvar_id);
+        // This is why we need to wrap sync::condvar.
+        PoisonOnFail::check(*self.poison.flag, self.name);
+    }
+
+    /// Wake up a blocked task. Returns false if there was no blocked task.
+    #[inline]
+    pub fn signal(&self) -> bool { self.signal_on(0) }
+
+    /// Wake up a blocked task on a specified condvar (as
+    /// sync::cond.signal_on). Returns false if there was no blocked task.
+    #[inline]
+    pub fn signal_on(&self, condvar_id: uint) -> bool {
+        assert!(!*self.poison.flag);
+        self.inner.cond().signal_on(condvar_id)
+    }
+
+    /// Wake up all blocked tasks. Returns the number of tasks woken.
+    #[inline]
+    pub fn broadcast(&self) -> uint { self.broadcast_on(0) }
+
+    /// Wake up all blocked tasks on a specified condvar (as
+    /// sync::cond.broadcast_on). Returns the number of tasks woken.
+    #[inline]
+    pub fn broadcast_on(&self, condvar_id: uint) -> uint {
+        assert!(!*self.poison.flag);
+        self.inner.cond().broadcast_on(condvar_id)
+    }
+}
+
+/****************************************************************************
+ * Mutex
+ ****************************************************************************/
+
+/// A wrapper type which provides synchronized access to the underlying data, of
+/// type `T`. A mutex always provides exclusive access, and concurrent requests
+/// will block while the mutex is already locked.
+///
+/// # Example
+///
+/// ```
+/// use sync::{Mutex, Arc};
+///
+/// let mutex = Arc::new(Mutex::new(1));
+/// let mutex2 = mutex.clone();
+///
+/// spawn(proc() {
+///     let mut val = mutex2.lock();
+///     *val += 1;
+///     val.cond.signal();
+/// });
+///
+/// let mut value = mutex.lock();
+/// while *value != 2 {
+///     value.cond.wait();
+/// }
+/// ```
+pub struct Mutex<T> {
+    priv lock: raw::Mutex,
+    priv failed: Unsafe<bool>,
+    priv data: Unsafe<T>,
+}
+
+/// An guard which is created by locking a mutex. Through this guard the
+/// underlying data can be accessed.
+pub struct MutexGuard<'a, T> {
+    priv data: &'a mut T,
+    /// Inner condition variable connected to the locked mutex that this guard
+    /// was created from. This can be used for atomic-unlock-and-deschedule.
+    cond: Condvar<'a>,
+}
+
+impl<T: Send> Mutex<T> {
+    /// Creates a new mutex to protect the user-supplied data.
+    pub fn new(user_data: T) -> Mutex<T> {
+        Mutex::new_with_condvars(user_data, 1)
+    }
+
+    /// Create a new mutex, with a specified number of associated condvars.
+    ///
+    /// This will allow calling wait_on/signal_on/broadcast_on with condvar IDs
+    /// between 0 and num_condvars-1. (If num_condvars is 0, lock_cond will be
+    /// allowed but any operations on the condvar will fail.)
+    pub fn new_with_condvars(user_data: T, num_condvars: uint) -> Mutex<T> {
+        Mutex {
+            lock: raw::Mutex::new_with_condvars(num_condvars),
+            failed: Unsafe::new(false),
+            data: Unsafe::new(user_data),
+        }
+    }
+
+    /// Access the underlying mutable data with mutual exclusion from other
+    /// tasks. The returned value is an RAII guard which will unlock the mutex
+    /// when dropped. All concurrent tasks attempting to lock the mutex will
+    /// block while the returned value is still alive.
+    ///
+    /// # Failure
+    ///
+    /// Failing while inside the Mutex will unlock the Mutex while unwinding, so
+    /// that other tasks won't block forever. It will also poison the Mutex:
+    /// any tasks that subsequently try to access it (including those already
+    /// blocked on the mutex) will also fail immediately.
+    #[inline]
+    pub fn lock<'a>(&'a self) -> MutexGuard<'a, T> {
+        let guard = self.lock.lock();
+
+        // These two accesses are safe because we're guranteed at this point
+        // that we have exclusive access to this mutex. We are indeed able to
+        // promote ourselves from &Mutex to `&mut T`
+        let poison = unsafe { &mut *self.failed.get() };
+        let data = unsafe { &mut *self.data.get() };
+
+        MutexGuard {
+            data: data,
+            cond: Condvar {
+                name: "Mutex",
+                poison: PoisonOnFail::new(poison, "Mutex"),
+                inner: InnerMutex(guard),
+            },
+        }
+    }
+}
+
+// FIXME(#13042): these should both have T: Send
+impl<'a, T> Deref<T> for MutexGuard<'a, T> {
+    fn deref<'a>(&'a self) -> &'a T { &*self.data }
+}
+impl<'a, T> DerefMut<T> for MutexGuard<'a, T> {
+    fn deref_mut<'a>(&'a mut self) -> &'a mut T { &mut *self.data }
+}
+
+/****************************************************************************
+ * R/W lock protected lock
+ ****************************************************************************/
+
+/// A dual-mode reader-writer lock. The data can be accessed mutably or
+/// immutably, and immutably-accessing tasks may run concurrently.
+///
+/// # Example
+///
+/// ```
+/// use sync::{RWLock, Arc};
+///
+/// let lock1 = Arc::new(RWLock::new(1));
+/// let lock2 = lock1.clone();
+///
+/// spawn(proc() {
+///     let mut val = lock2.write();
+///     *val = 3;
+///     let val = val.downgrade();
+///     println!("{}", *val);
+/// });
+///
+/// let val = lock1.read();
+/// println!("{}", *val);
+/// ```
+pub struct RWLock<T> {
+    priv lock: raw::RWLock,
+    priv failed: Unsafe<bool>,
+    priv data: Unsafe<T>,
+}
+
+/// A guard which is created by locking an rwlock in write mode. Through this
+/// guard the underlying data can be accessed.
+pub struct RWLockWriteGuard<'a, T> {
+    priv data: &'a mut T,
+    /// Inner condition variable that can be used to sleep on the write mode of
+    /// this rwlock.
+    cond: Condvar<'a>,
+}
+
+/// A guard which is created by locking an rwlock in read mode. Through this
+/// guard the underlying data can be accessed.
+pub struct RWLockReadGuard<'a, T> {
+    priv data: &'a T,
+    priv guard: raw::RWLockReadGuard<'a>,
+}
+
+impl<T: Send + Share> RWLock<T> {
+    /// Create a reader/writer lock with the supplied data.
+    pub fn new(user_data: T) -> RWLock<T> {
+        RWLock::new_with_condvars(user_data, 1)
+    }
+
+    /// Create a reader/writer lock with the supplied data and a specified number
+    /// of condvars (as sync::RWLock::new_with_condvars).
+    pub fn new_with_condvars(user_data: T, num_condvars: uint) -> RWLock<T> {
+        RWLock {
+            lock: raw::RWLock::new_with_condvars(num_condvars),
+            failed: Unsafe::new(false),
+            data: Unsafe::new(user_data),
+        }
+    }
+
+    /// Access the underlying data mutably. Locks the rwlock in write mode;
+    /// other readers and writers will block.
+    ///
+    /// # Failure
+    ///
+    /// Failing while inside the lock will unlock the lock while unwinding, so
+    /// that other tasks won't block forever. As Mutex.lock, it will also poison
+    /// the lock, so subsequent readers and writers will both also fail.
+    #[inline]
+    pub fn write<'a>(&'a self) -> RWLockWriteGuard<'a, T> {
+        let guard = self.lock.write();
+
+        // These two accesses are safe because we're guranteed at this point
+        // that we have exclusive access to this rwlock. We are indeed able to
+        // promote ourselves from &RWLock to `&mut T`
+        let poison = unsafe { &mut *self.failed.get() };
+        let data = unsafe { &mut *self.data.get() };
+
+        RWLockWriteGuard {
+            data: data,
+            cond: Condvar {
+                name: "RWLock",
+                poison: PoisonOnFail::new(poison, "RWLock"),
+                inner: InnerRWLock(guard),
+            },
+        }
+    }
+
+    /// Access the underlying data immutably. May run concurrently with other
+    /// reading tasks.
+    ///
+    /// # Failure
+    ///
+    /// Failing will unlock the lock while unwinding. However, unlike all other
+    /// access modes, this will not poison the lock.
+    pub fn read<'a>(&'a self) -> RWLockReadGuard<'a, T> {
+        let guard = self.lock.read();
+        PoisonOnFail::check(unsafe { *self.failed.get() }, "RWLock");
+        RWLockReadGuard {
+            guard: guard,
+            data: unsafe { &*self.data.get() },
+        }
+    }
+}
+
+impl<'a, T: Send + Share> RWLockWriteGuard<'a, T> {
+    /// Consumes this write lock token, returning a new read lock token.
+    ///
+    /// This will allow pending readers to come into the lock.
+    pub fn downgrade(self) -> RWLockReadGuard<'a, T> {
+        let RWLockWriteGuard { data, cond } = self;
+        // convert the data to read-only explicitly
+        let data = &*data;
+        let guard = match cond.inner {
+            InnerMutex(..) => unreachable!(),
+            InnerRWLock(guard) => guard.downgrade()
+        };
+        RWLockReadGuard { guard: guard, data: data }
+    }
+}
+
+// FIXME(#13042): these should all have T: Send + Share
+impl<'a, T> Deref<T> for RWLockReadGuard<'a, T> {
+    fn deref<'a>(&'a self) -> &'a T { self.data }
+}
+impl<'a, T> Deref<T> for RWLockWriteGuard<'a, T> {
+    fn deref<'a>(&'a self) -> &'a T { &*self.data }
+}
+impl<'a, T> DerefMut<T> for RWLockWriteGuard<'a, T> {
+    fn deref_mut<'a>(&'a mut self) -> &'a mut T { &mut *self.data }
+}
+
+/****************************************************************************
+ * Barrier
+ ****************************************************************************/
+
+/// A barrier enables multiple tasks to synchronize the beginning
+/// of some computation.
+///
+/// ```rust
+/// use sync::{Arc, Barrier};
+///
+/// let barrier = Arc::new(Barrier::new(10));
+/// for _ in range(0, 10) {
+///     let c = barrier.clone();
+///     // The same messages will be printed together.
+///     // You will NOT see any interleaving.
+///     spawn(proc() {
+///         println!("before wait");
+///         c.wait();
+///         println!("after wait");
+///     });
+/// }
+/// ```
+pub struct Barrier {
+    priv lock: Mutex<BarrierState>,
+    priv num_tasks: uint,
+}
+
+// The inner state of a double barrier
+struct BarrierState {
+    count: uint,
+    generation_id: uint,
+}
+
+impl Barrier {
+    /// Create a new barrier that can block a given number of tasks.
+    pub fn new(num_tasks: uint) -> Barrier {
+        Barrier {
+            lock: Mutex::new(BarrierState {
+                count: 0,
+                generation_id: 0,
+            }),
+            num_tasks: num_tasks,
+        }
+    }
+
+    /// Block the current task until a certain number of tasks is waiting.
+    pub fn wait(&self) {
+        let mut lock = self.lock.lock();
+        let local_gen = lock.generation_id;
+        lock.count += 1;
+        if lock.count < self.num_tasks {
+            // We need a while loop to guard against spurious wakeups.
+            // http://en.wikipedia.org/wiki/Spurious_wakeup
+            while local_gen == lock.generation_id &&
+                  lock.count < self.num_tasks {
+                lock.cond.wait();
+            }
+        } else {
+            lock.count = 0;
+            lock.generation_id += 1;
+            lock.cond.broadcast();
+        }
+    }
+}
+
+/****************************************************************************
+ * Tests
+ ****************************************************************************/
+
+#[cfg(test)]
+mod tests {
+    use std::comm::Empty;
+    use std::task;
+
+    use arc::Arc;
+    use super::{Mutex, Barrier, RWLock};
+
+    #[test]
+    fn test_mutex_arc_condvar() {
+        let arc = Arc::new(Mutex::new(false));
+        let arc2 = arc.clone();
+        let (tx, rx) = channel();
+        task::spawn(proc() {
+            // wait until parent gets in
+            rx.recv();
+            let mut lock = arc2.lock();
+            *lock = true;
+            lock.cond.signal();
+        });
+
+        let lock = arc.lock();
+        tx.send(());
+        assert!(!*lock);
+        while !*lock {
+            lock.cond.wait();
+        }
+    }
+
+    #[test] #[should_fail]
+    fn test_arc_condvar_poison() {
+        let arc = Arc::new(Mutex::new(1));
+        let arc2 = arc.clone();
+        let (tx, rx) = channel();
+
+        spawn(proc() {
+            rx.recv();
+            let lock = arc2.lock();
+            lock.cond.signal();
+            // Parent should fail when it wakes up.
+            fail!();
+        });
+
+        let lock = arc.lock();
+        tx.send(());
+        while *lock == 1 {
+            lock.cond.wait();
+        }
+    }
+
+    #[test] #[should_fail]
+    fn test_mutex_arc_poison() {
+        let arc = Arc::new(Mutex::new(1));
+        let arc2 = arc.clone();
+        let _ = task::try(proc() {
+            let lock = arc2.lock();
+            assert_eq!(*lock, 2);
+        });
+        let lock = arc.lock();
+        assert_eq!(*lock, 1);
+    }
+
+    #[test]
+    fn test_mutex_arc_nested() {
+        // Tests nested mutexes and access
+        // to underlaying data.
+        let arc = Arc::new(Mutex::new(1));
+        let arc2 = Arc::new(Mutex::new(arc));
+        task::spawn(proc() {
+            let lock = arc2.lock();
+            let lock2 = lock.deref().lock();
+            assert_eq!(*lock2, 1);
+        });
+    }
+
+    #[test]
+    fn test_mutex_arc_access_in_unwind() {
+        let arc = Arc::new(Mutex::new(1i));
+        let arc2 = arc.clone();
+        let _ = task::try::<()>(proc() {
+            struct Unwinder {
+                i: Arc<Mutex<int>>,
+            }
+            impl Drop for Unwinder {
+                fn drop(&mut self) {
+                    let mut lock = self.i.lock();
+                    *lock += 1;
+                }
+            }
+            let _u = Unwinder { i: arc2 };
+            fail!();
+        });
+        let lock = arc.lock();
+        assert_eq!(*lock, 2);
+    }
+
+    #[test] #[should_fail]
+    fn test_rw_arc_poison_wr() {
+        let arc = Arc::new(RWLock::new(1));
+        let arc2 = arc.clone();
+        let _ = task::try(proc() {
+            let lock = arc2.write();
+            assert_eq!(*lock, 2);
+        });
+        let lock = arc.read();
+        assert_eq!(*lock, 1);
+    }
+    #[test] #[should_fail]
+    fn test_rw_arc_poison_ww() {
+        let arc = Arc::new(RWLock::new(1));
+        let arc2 = arc.clone();
+        let _ = task::try(proc() {
+            let lock = arc2.write();
+            assert_eq!(*lock, 2);
+        });
+        let lock = arc.write();
+        assert_eq!(*lock, 1);
+    }
+    #[test]
+    fn test_rw_arc_no_poison_rr() {
+        let arc = Arc::new(RWLock::new(1));
+        let arc2 = arc.clone();
+        let _ = task::try(proc() {
+            let lock = arc2.read();
+            assert_eq!(*lock, 2);
+        });
+        let lock = arc.read();
+        assert_eq!(*lock, 1);
+    }
+    #[test]
+    fn test_rw_arc_no_poison_rw() {
+        let arc = Arc::new(RWLock::new(1));
+        let arc2 = arc.clone();
+        let _ = task::try(proc() {
+            let lock = arc2.read();
+            assert_eq!(*lock, 2);
+        });
+        let lock = arc.write();
+        assert_eq!(*lock, 1);
+    }
+    #[test]
+    fn test_rw_arc_no_poison_dr() {
+        let arc = Arc::new(RWLock::new(1));
+        let arc2 = arc.clone();
+        let _ = task::try(proc() {
+            let lock = arc2.write().downgrade();
+            assert_eq!(*lock, 2);
+        });
+        let lock = arc.write();
+        assert_eq!(*lock, 1);
+    }
+
+    #[test]
+    fn test_rw_arc() {
+        let arc = Arc::new(RWLock::new(0));
+        let arc2 = arc.clone();
+        let (tx, rx) = channel();
+
+        task::spawn(proc() {
+            let mut lock = arc2.write();
+            for _ in range(0, 10) {
+                let tmp = *lock;
+                *lock = -1;
+                task::deschedule();
+                *lock = tmp + 1;
+            }
+            tx.send(());
+        });
+
+        // Readers try to catch the writer in the act
+        let mut children = Vec::new();
+        for _ in range(0, 5) {
+            let arc3 = arc.clone();
+            let mut builder = task::task();
+            children.push(builder.future_result());
+            builder.spawn(proc() {
+                let lock = arc3.read();
+                assert!(*lock >= 0);
+            });
+        }
+
+        // Wait for children to pass their asserts
+        for r in children.mut_iter() {
+            assert!(r.recv().is_ok());
+        }
+
+        // Wait for writer to finish
+        rx.recv();
+        let lock = arc.read();
+        assert_eq!(*lock, 10);
+    }
+
+    #[test]
+    fn test_rw_arc_access_in_unwind() {
+        let arc = Arc::new(RWLock::new(1i));
+        let arc2 = arc.clone();
+        let _ = task::try::<()>(proc() {
+            struct Unwinder {
+                i: Arc<RWLock<int>>,
+            }
+            impl Drop for Unwinder {
+                fn drop(&mut self) {
+                    let mut lock = self.i.write();
+                    *lock += 1;
+                }
+            }
+            let _u = Unwinder { i: arc2 };
+            fail!();
+        });
+        let lock = arc.read();
+        assert_eq!(*lock, 2);
+    }
+
+    #[test]
+    fn test_rw_downgrade() {
+        // (1) A downgrader gets in write mode and does cond.wait.
+        // (2) A writer gets in write mode, sets state to 42, and does signal.
+        // (3) Downgrader wakes, sets state to 31337.
+        // (4) tells writer and all other readers to contend as it downgrades.
+        // (5) Writer attempts to set state back to 42, while downgraded task
+        //     and all reader tasks assert that it's 31337.
+        let arc = Arc::new(RWLock::new(0));
+
+        // Reader tasks
+        let mut reader_convos = Vec::new();
+        for _ in range(0, 10) {
+            let ((tx1, rx1), (tx2, rx2)) = (channel(), channel());
+            reader_convos.push((tx1, rx2));
+            let arcn = arc.clone();
+            task::spawn(proc() {
+                rx1.recv(); // wait for downgrader to give go-ahead
+                let lock = arcn.read();
+                assert_eq!(*lock, 31337);
+                tx2.send(());
+            });
+        }
+
+        // Writer task
+        let arc2 = arc.clone();
+        let ((tx1, rx1), (tx2, rx2)) = (channel(), channel());
+        task::spawn(proc() {
+            rx1.recv();
+            {
+                let mut lock = arc2.write();
+                assert_eq!(*lock, 0);
+                *lock = 42;
+                lock.cond.signal();
+            }
+            rx1.recv();
+            {
+                let mut lock = arc2.write();
+                // This shouldn't happen until after the downgrade read
+                // section, and all other readers, finish.
+                assert_eq!(*lock, 31337);
+                *lock = 42;
+            }
+            tx2.send(());
+        });
+
+        // Downgrader (us)
+        let mut lock = arc.write();
+        tx1.send(()); // send to another writer who will wake us up
+        while *lock == 0 {
+            lock.cond.wait();
+        }
+        assert_eq!(*lock, 42);
+        *lock = 31337;
+        // send to other readers
+        for &(ref mut rc, _) in reader_convos.mut_iter() {
+            rc.send(())
+        }
+        let lock = lock.downgrade();
+        // complete handshake with other readers
+        for &(_, ref mut rp) in reader_convos.mut_iter() {
+            rp.recv()
+        }
+        tx1.send(()); // tell writer to try again
+        assert_eq!(*lock, 31337);
+        drop(lock);
+
+        rx2.recv(); // complete handshake with writer
+    }
+
+    #[cfg(test)]
+    fn test_rw_write_cond_downgrade_read_race_helper() {
+        // Tests that when a downgrader hands off the "reader cloud" lock
+        // because of a contending reader, a writer can't race to get it
+        // instead, which would result in readers_and_writers. This tests
+        // the raw module rather than this one, but it's here because an
+        // rwarc gives us extra shared state to help check for the race.
+        let x = Arc::new(RWLock::new(true));
+        let (tx, rx) = channel();
+
+        // writer task
+        let xw = x.clone();
+        task::spawn(proc() {
+            let mut lock = xw.write();
+            tx.send(()); // tell downgrader it's ok to go
+            lock.cond.wait();
+            // The core of the test is here: the condvar reacquire path
+            // must involve order_lock, so that it cannot race with a reader
+            // trying to receive the "reader cloud lock hand-off".
+            *lock = false;
+        });
+
+        rx.recv(); // wait for writer to get in
+
+        let lock = x.write();
+        assert!(*lock);
+        // make writer contend in the cond-reacquire path
+        lock.cond.signal();
+        // make a reader task to trigger the "reader cloud lock" handoff
+        let xr = x.clone();
+        let (tx, rx) = channel();
+        task::spawn(proc() {
+            tx.send(());
+            drop(xr.read());
+        });
+        rx.recv(); // wait for reader task to exist
+
+        let lock = lock.downgrade();
+        // if writer mistakenly got in, make sure it mutates state
+        // before we assert on it
+        for _ in range(0, 5) { task::deschedule(); }
+        // make sure writer didn't get in.
+        assert!(*lock);
+    }
+    #[test]
+    fn test_rw_write_cond_downgrade_read_race() {
+        // Ideally the above test case would have deschedule statements in it
+        // that helped to expose the race nearly 100% of the time... but adding
+        // deschedules in the intuitively-right locations made it even less
+        // likely, and I wasn't sure why :( . This is a mediocre "next best"
+        // option.
+        for _ in range(0, 8) {
+            test_rw_write_cond_downgrade_read_race_helper();
+        }
+    }
+
+    /************************************************************************
+     * Barrier tests
+     ************************************************************************/
+    #[test]
+    fn test_barrier() {
+        let barrier = Arc::new(Barrier::new(10));
+        let (tx, rx) = channel();
+
+        for _ in range(0, 9) {
+            let c = barrier.clone();
+            let tx = tx.clone();
+            spawn(proc() {
+                c.wait();
+                tx.send(true);
+            });
+        }
+
+        // At this point, all spawned tasks should be blocked,
+        // so we shouldn't get anything from the port
+        assert!(match rx.try_recv() {
+            Empty => true,
+            _ => false,
+        });
+
+        barrier.wait();
+        // Now, the barrier is cleared and we should get data.
+        for _ in range(0, 9) {
+            rx.recv();
+        }
+    }
+}
+
diff --git a/src/libsync/sync/mpsc_intrusive.rs b/src/libsync/mpsc_intrusive.rs
index 0f13a4980d9..12e8ca48ba1 100644
--- a/src/libsync/sync/mpsc_intrusive.rs
+++ b/src/libsync/mpsc_intrusive.rs
@@ -35,6 +35,7 @@
 
 use std::cast;
 use std::sync::atomics;
+use std::ty::Unsafe;
 
 // NB: all links are done as AtomicUint instead of AtomicPtr to allow for static
 // initialization.
@@ -50,7 +51,7 @@ pub struct DummyNode {
 
 pub struct Queue<T> {
     head: atomics::AtomicUint,
-    tail: *mut Node<T>,
+    tail: Unsafe<*mut Node<T>>,
     stub: DummyNode,
 }
 
@@ -58,14 +59,14 @@ impl<T: Send> Queue<T> {
     pub fn new() -> Queue<T> {
         Queue {
             head: atomics::AtomicUint::new(0),
-            tail: 0 as *mut Node<T>,
+            tail: Unsafe::new(0 as *mut Node<T>),
             stub: DummyNode {
                 next: atomics::AtomicUint::new(0),
             },
         }
     }
 
-    pub unsafe fn push(&mut self, node: *mut Node<T>) {
+    pub unsafe fn push(&self, node: *mut Node<T>) {
         (*node).next.store(0, atomics::Release);
         let prev = self.head.swap(node as uint, atomics::AcqRel);
 
@@ -93,8 +94,8 @@ impl<T: Send> Queue<T> {
     /// Right now consumers of this queue must be ready for this fact. Just
     /// because `pop` returns `None` does not mean that there is not data
     /// on the queue.
-    pub unsafe fn pop(&mut self) -> Option<*mut Node<T>> {
-        let tail = self.tail;
+    pub unsafe fn pop(&self) -> Option<*mut Node<T>> {
+        let tail = *self.tail.get();
         let mut tail = if !tail.is_null() {tail} else {
             cast::transmute(&self.stub)
         };
@@ -103,12 +104,12 @@ impl<T: Send> Queue<T> {
             if next.is_null() {
                 return None;
             }
-            self.tail = next;
+            *self.tail.get() = next;
             tail = next;
             next = (*next).next(atomics::Relaxed);
         }
         if !next.is_null() {
-            self.tail = next;
+            *self.tail.get() = next;
             return Some(tail);
         }
         let head = self.head.load(atomics::Acquire) as *mut Node<T>;
@@ -119,7 +120,7 @@ impl<T: Send> Queue<T> {
         self.push(stub);
         next = (*tail).next(atomics::Relaxed);
         if !next.is_null() {
-            self.tail = next;
+            *self.tail.get() = next;
             return Some(tail);
         }
         return None
@@ -133,7 +134,7 @@ impl<T: Send> Node<T> {
             next: atomics::AtomicUint::new(0),
         }
     }
-    pub unsafe fn next(&mut self, ord: atomics::Ordering) -> *mut Node<T> {
+    pub unsafe fn next(&self, ord: atomics::Ordering) -> *mut Node<T> {
         cast::transmute::<uint, *mut Node<T>>(self.next.load(ord))
     }
 }
diff --git a/src/libsync/sync/mutex.rs b/src/libsync/mutex.rs
index 9901cda423b..b01c82eb7ac 100644
--- a/src/libsync/sync/mutex.rs
+++ b/src/libsync/mutex.rs
@@ -57,13 +57,16 @@
 // times in order to manage a few flags about who's blocking where and whether
 // it's locked or not.
 
+use std::kinds::marker;
+use std::mem;
 use std::rt::local::Local;
 use std::rt::task::{BlockedTask, Task};
 use std::rt::thread::Thread;
 use std::sync::atomics;
+use std::ty::Unsafe;
 use std::unstable::mutex;
 
-use q = sync::mpsc_intrusive;
+use q = mpsc_intrusive;
 
 pub static LOCKED: uint = 1 << 0;
 pub static GREEN_BLOCKED: uint = 1 << 1;
@@ -85,7 +88,7 @@ pub static NATIVE_BLOCKED: uint = 1 << 2;
 /// ```rust
 /// use sync::mutex::Mutex;
 ///
-/// let mut m = Mutex::new();
+/// let m = Mutex::new();
 /// let guard = m.lock();
 /// // do some work
 /// drop(guard); // unlock the lock
@@ -126,14 +129,15 @@ enum Flavor {
 pub struct StaticMutex {
     /// Current set of flags on this mutex
     priv state: atomics::AtomicUint,
+    /// an OS mutex used by native threads
+    priv lock: mutex::StaticNativeMutex,
+
     /// Type of locking operation currently on this mutex
-    priv flavor: Flavor,
+    priv flavor: Unsafe<Flavor>,
     /// uint-cast of the green thread waiting for this mutex
-    priv green_blocker: uint,
+    priv green_blocker: Unsafe<uint>,
     /// uint-cast of the native thread waiting for this mutex
-    priv native_blocker: uint,
-    /// an OS mutex used by native threads
-    priv lock: mutex::StaticNativeMutex,
+    priv native_blocker: Unsafe<uint>,
 
     /// A concurrent mpsc queue used by green threads, along with a count used
     /// to figure out when to dequeue and enqueue.
@@ -145,7 +149,7 @@ pub struct StaticMutex {
 /// dropped (falls out of scope), the lock will be unlocked.
 #[must_use]
 pub struct Guard<'a> {
-    priv lock: &'a mut StaticMutex,
+    priv lock: &'a StaticMutex,
 }
 
 /// Static initialization of a mutex. This constant can be used to initialize
@@ -153,13 +157,16 @@ pub struct Guard<'a> {
 pub static MUTEX_INIT: StaticMutex = StaticMutex {
     lock: mutex::NATIVE_MUTEX_INIT,
     state: atomics::INIT_ATOMIC_UINT,
-    flavor: Unlocked,
-    green_blocker: 0,
-    native_blocker: 0,
+    flavor: Unsafe { value: Unlocked, marker1: marker::InvariantType },
+    green_blocker: Unsafe { value: 0, marker1: marker::InvariantType },
+    native_blocker: Unsafe { value: 0, marker1: marker::InvariantType },
     green_cnt: atomics::INIT_ATOMIC_UINT,
     q: q::Queue {
         head: atomics::INIT_ATOMIC_UINT,
-        tail: 0 as *mut q::Node<uint>,
+        tail: Unsafe {
+            value: 0 as *mut q::Node<uint>,
+            marker1: marker::InvariantType,
+        },
         stub: q::DummyNode {
             next: atomics::INIT_ATOMIC_UINT,
         }
@@ -168,14 +175,18 @@ pub static MUTEX_INIT: StaticMutex = StaticMutex {
 
 impl StaticMutex {
     /// Attempts to grab this lock, see `Mutex::try_lock`
-    pub fn try_lock<'a>(&'a mut self) -> Option<Guard<'a>> {
+    pub fn try_lock<'a>(&'a self) -> Option<Guard<'a>> {
         // Attempt to steal the mutex from an unlocked state.
         //
         // FIXME: this can mess up the fairness of the mutex, seems bad
         match self.state.compare_and_swap(0, LOCKED, atomics::SeqCst) {
             0 => {
-                assert!(self.flavor == Unlocked);
-                self.flavor = TryLockAcquisition;
+                // After acquiring the mutex, we can safely access the inner
+                // fields.
+                let prev = unsafe {
+                    mem::replace(&mut *self.flavor.get(), TryLockAcquisition)
+                };
+                assert_eq!(prev, Unlocked);
                 Some(Guard::new(self))
             }
             _ => None
@@ -183,19 +194,15 @@ impl StaticMutex {
     }
 
     /// Acquires this lock, see `Mutex::lock`
-    pub fn lock<'a>(&'a mut self) -> Guard<'a> {
+    pub fn lock<'a>(&'a self) -> Guard<'a> {
         // First, attempt to steal the mutex from an unlocked state. The "fast
         // path" needs to have as few atomic instructions as possible, and this
         // one cmpxchg is already pretty expensive.
         //
         // FIXME: this can mess up the fairness of the mutex, seems bad
-        match self.state.compare_and_swap(0, LOCKED, atomics::SeqCst) {
-            0 => {
-                assert!(self.flavor == Unlocked);
-                self.flavor = TryLockAcquisition;
-                return Guard::new(self)
-            }
-            _ => {}
+        match self.try_lock() {
+            Some(guard) => return guard,
+            None => {}
         }
 
         // After we've failed the fast path, then we delegate to the differnet
@@ -219,11 +226,14 @@ impl StaticMutex {
         let mut old = match self.state.compare_and_swap(0, LOCKED,
                                                         atomics::SeqCst) {
             0 => {
-                self.flavor = if can_block {
+                let flavor = if can_block {
                     NativeAcquisition
                 } else {
                     GreenAcquisition
                 };
+                // We've acquired the lock, so this unsafe access to flavor is
+                // allowed.
+                unsafe { *self.flavor.get() = flavor; }
                 return Guard::new(self)
             }
             old => old,
@@ -237,13 +247,15 @@ impl StaticMutex {
         let t: ~Task = Local::take();
         t.deschedule(1, |task| {
             let task = unsafe { task.cast_to_uint() };
-            if can_block {
-                assert_eq!(self.native_blocker, 0);
-                self.native_blocker = task;
+
+            // These accesses are protected by the respective native/green
+            // mutexes which were acquired above.
+            let prev = if can_block {
+                unsafe { mem::replace(&mut *self.native_blocker.get(), task) }
             } else {
-                assert_eq!(self.green_blocker, 0);
-                self.green_blocker = task;
-            }
+                unsafe { mem::replace(&mut *self.green_blocker.get(), task) }
+            };
+            assert_eq!(prev, 0);
 
             loop {
                 assert_eq!(old & native_bit, 0);
@@ -264,14 +276,23 @@ impl StaticMutex {
                                                             old | LOCKED,
                                                             atomics::SeqCst) {
                         n if n == old => {
-                            assert_eq!(self.flavor, Unlocked);
-                            if can_block {
-                                self.native_blocker = 0;
-                                self.flavor = NativeAcquisition;
+                            // After acquiring the lock, we have access to the
+                            // flavor field, and we've regained access to our
+                            // respective native/green blocker field.
+                            let prev = if can_block {
+                                unsafe {
+                                    *self.native_blocker.get() = 0;
+                                    mem::replace(&mut *self.flavor.get(),
+                                                 NativeAcquisition)
+                                }
                             } else {
-                                self.green_blocker = 0;
-                                self.flavor = GreenAcquisition;
-                            }
+                                unsafe {
+                                    *self.green_blocker.get() = 0;
+                                    mem::replace(&mut *self.flavor.get(),
+                                                 GreenAcquisition)
+                                }
+                            };
+                            assert_eq!(prev, Unlocked);
                             return Err(unsafe {
                                 BlockedTask::cast_from_uint(task)
                             })
@@ -287,16 +308,16 @@ impl StaticMutex {
 
     // Tasks which can block are super easy. These tasks just call the blocking
     // `lock()` function on an OS mutex
-    fn native_lock(&mut self, t: ~Task) {
+    fn native_lock(&self, t: ~Task) {
         Local::put(t);
         unsafe { self.lock.lock_noguard(); }
     }
 
-    fn native_unlock(&mut self) {
+    fn native_unlock(&self) {
         unsafe { self.lock.unlock_noguard(); }
     }
 
-    fn green_lock(&mut self, t: ~Task) {
+    fn green_lock(&self, t: ~Task) {
         // Green threads flag their presence with an atomic counter, and if they
         // fail to be the first to the mutex, they enqueue themselves on a
         // concurrent internal queue with a stack-allocated node.
@@ -318,7 +339,7 @@ impl StaticMutex {
         });
     }
 
-    fn green_unlock(&mut self) {
+    fn green_unlock(&self) {
         // If we're the only green thread, then no need to check the queue,
         // otherwise the fixme above forces us to spin for a bit.
         if self.green_cnt.fetch_sub(1, atomics::SeqCst) == 1 { return }
@@ -333,7 +354,7 @@ impl StaticMutex {
         task.wake().map(|t| t.reawaken());
     }
 
-    fn unlock(&mut self) {
+    fn unlock(&self) {
         // Unlocking this mutex is a little tricky. We favor any task that is
         // manually blocked (not in each of the separate locks) in order to help
         // provide a little fairness (green threads will wake up the pending
@@ -351,8 +372,7 @@ impl StaticMutex {
         // task needs to be woken, and in this case it's ok that the "mutex
         // halves" are unlocked, we're just mainly dealing with the atomic state
         // of the outer mutex.
-        let flavor = self.flavor;
-        self.flavor = Unlocked;
+        let flavor = unsafe { mem::replace(&mut *self.flavor.get(), Unlocked) };
 
         let mut state = self.state.load(atomics::SeqCst);
         let mut unlocked = false;
@@ -362,18 +382,18 @@ impl StaticMutex {
             if state & GREEN_BLOCKED != 0 {
                 self.unset(state, GREEN_BLOCKED);
                 task = unsafe {
-                    BlockedTask::cast_from_uint(self.green_blocker)
+                    *self.flavor.get() = GreenAcquisition;
+                    let task = mem::replace(&mut *self.green_blocker.get(), 0);
+                    BlockedTask::cast_from_uint(task)
                 };
-                self.green_blocker = 0;
-                self.flavor = GreenAcquisition;
                 break;
             } else if state & NATIVE_BLOCKED != 0 {
                 self.unset(state, NATIVE_BLOCKED);
                 task = unsafe {
-                    BlockedTask::cast_from_uint(self.native_blocker)
+                    *self.flavor.get() = NativeAcquisition;
+                    let task = mem::replace(&mut *self.native_blocker.get(), 0);
+                    BlockedTask::cast_from_uint(task)
                 };
-                self.native_blocker = 0;
-                self.flavor = NativeAcquisition;
                 break;
             } else {
                 assert_eq!(state, LOCKED);
@@ -405,7 +425,7 @@ impl StaticMutex {
     }
 
     /// Loops around a CAS to unset the `bit` in `state`
-    fn unset(&mut self, mut state: uint, bit: uint) {
+    fn unset(&self, mut state: uint, bit: uint) {
         loop {
             assert!(state & bit != 0);
             let new = state ^ bit;
@@ -426,7 +446,7 @@ impl StaticMutex {
     /// *all* platforms. It may be the case that some platforms do not leak
     /// memory if this method is not called, but this is not guaranteed to be
     /// true on all platforms.
-    pub unsafe fn destroy(&mut self) {
+    pub unsafe fn destroy(&self) {
         self.lock.destroy()
     }
 }
@@ -437,9 +457,9 @@ impl Mutex {
         Mutex {
             lock: StaticMutex {
                 state: atomics::AtomicUint::new(0),
-                flavor: Unlocked,
-                green_blocker: 0,
-                native_blocker: 0,
+                flavor: Unsafe::new(Unlocked),
+                green_blocker: Unsafe::new(0),
+                native_blocker: Unsafe::new(0),
                 green_cnt: atomics::AtomicUint::new(0),
                 q: q::Queue::new(),
                 lock: unsafe { mutex::StaticNativeMutex::new() },
@@ -454,7 +474,7 @@ impl Mutex {
     /// guard is dropped.
     ///
     /// This function does not block.
-    pub fn try_lock<'a>(&'a mut self) -> Option<Guard<'a>> {
+    pub fn try_lock<'a>(&'a self) -> Option<Guard<'a>> {
         self.lock.try_lock()
     }
 
@@ -464,13 +484,14 @@ impl Mutex {
     /// the mutex. Upon returning, the task is the only task with the mutex
     /// held. An RAII guard is returned to allow scoped unlock of the lock. When
     /// the guard goes out of scope, the mutex will be unlocked.
-    pub fn lock<'a>(&'a mut self) -> Guard<'a> { self.lock.lock() }
+    pub fn lock<'a>(&'a self) -> Guard<'a> { self.lock.lock() }
 }
 
 impl<'a> Guard<'a> {
-    fn new<'b>(lock: &'b mut StaticMutex) -> Guard<'b> {
+    fn new<'b>(lock: &'b StaticMutex) -> Guard<'b> {
         if cfg!(debug) {
-            assert!(lock.flavor != Unlocked);
+            // once we've acquired a lock, it's ok to access the flavor
+            assert!(unsafe { *lock.flavor.get() != Unlocked });
             assert!(lock.state.load(atomics::SeqCst) & LOCKED != 0);
         }
         Guard { lock: lock }
@@ -501,7 +522,7 @@ mod test {
 
     #[test]
     fn smoke() {
-        let mut m = Mutex::new();
+        let m = Mutex::new();
         drop(m.lock());
         drop(m.lock());
     }
@@ -552,7 +573,7 @@ mod test {
 
     #[test]
     fn trylock() {
-        let mut m = Mutex::new();
+        let m = Mutex::new();
         assert!(m.try_lock().is_some());
     }
 }
diff --git a/src/libsync/sync/one.rs b/src/libsync/one.rs
index c5e83bed0ed..161f759ca2d 100644
--- a/src/libsync/sync/one.rs
+++ b/src/libsync/one.rs
@@ -15,7 +15,8 @@
 
 use std::int;
 use std::sync::atomics;
-use sync::mutex::{StaticMutex, MUTEX_INIT};
+
+use mutex::{StaticMutex, MUTEX_INIT};
 
 /// A type which can be used to run a one-time global initialization. This type
 /// is *unsafe* to use because it is built on top of the `Mutex` in this module.
@@ -62,7 +63,7 @@ impl Once {
     ///
     /// When this function returns, it is guaranteed that some initialization
     /// has run and completed (it may not be the closure specified).
-    pub fn doit(&mut self, f: ||) {
+    pub fn doit(&self, f: ||) {
         // Implementation-wise, this would seem like a fairly trivial primitive.
         // The stickler part is where our mutexes currently require an
         // allocation, and usage of a `Once` should't leak this allocation.
@@ -101,14 +102,13 @@ impl Once {
         // If the count is negative, then someone else finished the job,
         // otherwise we run the job and record how many people will try to grab
         // this lock
-        {
-            let _guard = self.mutex.lock();
-            if self.cnt.load(atomics::SeqCst) > 0 {
-                f();
-                let prev = self.cnt.swap(int::MIN, atomics::SeqCst);
-                self.lock_cnt.store(prev, atomics::SeqCst);
-            }
+        let guard = self.mutex.lock();
+        if self.cnt.load(atomics::SeqCst) > 0 {
+            f();
+            let prev = self.cnt.swap(int::MIN, atomics::SeqCst);
+            self.lock_cnt.store(prev, atomics::SeqCst);
         }
+        drop(guard);
 
         // Last one out cleans up after everyone else, no leaks!
         if self.lock_cnt.fetch_add(-1, atomics::SeqCst) == 1 {
diff --git a/src/libsync/sync/mod.rs b/src/libsync/raw.rs
index 2217706d4f0..36f0748fe71 100644
--- a/src/libsync/sync/mod.rs
+++ b/src/libsync/raw.rs
@@ -8,42 +8,34 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-#[allow(missing_doc)];
-
-/**
- * The concurrency primitives you know and love.
- *
- * Maybe once we have a "core exports x only to std" mechanism, these can be
- * in std.
- */
+//! Raw concurrency primitives you know and love.
+//!
+//! These primitives are not recommended for general use, but are provided for
+//! flavorful use-cases. It is recommended to use the types at the top of the
+//! `sync` crate which wrap values directly and provide safer abstractions for
+//! containing data.
 
 use std::cast;
 use std::comm;
 use std::kinds::marker;
 use std::mem::replace;
-use std::sync::arc::UnsafeArc;
 use std::sync::atomics;
 use std::unstable::finally::Finally;
 
-use arc::MutexArc;
+use mutex;
 
 /****************************************************************************
  * Internals
  ****************************************************************************/
 
-pub mod mutex;
-pub mod one;
-mod mpsc_intrusive;
-
 // Each waiting task receives on one of these.
-#[doc(hidden)]
 type WaitEnd = Receiver<()>;
-#[doc(hidden)]
 type SignalEnd = Sender<()>;
 // A doubly-ended queue of waiting tasks.
-#[doc(hidden)]
-struct WaitQueue { head: Receiver<SignalEnd>,
-                   tail: Sender<SignalEnd> }
+struct WaitQueue {
+    head: Receiver<SignalEnd>,
+    tail: Sender<SignalEnd>,
+}
 
 impl WaitQueue {
     fn new() -> WaitQueue {
@@ -90,33 +82,49 @@ impl WaitQueue {
 }
 
 // The building-block used to make semaphores, mutexes, and rwlocks.
-struct SemInner<Q> {
+struct Sem<Q> {
     lock: mutex::Mutex,
+    // n.b, we need Sem to be `Share`, but the WaitQueue type is not send/share
+    //      (for good reason). We have an internal invariant on this semaphore,
+    //      however, that the queue is never accessed outside of a locked
+    //      context. For this reason, we shove these behind a pointer which will
+    //      be inferred to be `Share`.
+    //
+    // FIXME: this requires an extra allocation, which is bad.
+    inner: *()
+}
+
+struct SemInner<Q> {
     count: int,
-    waiters:   WaitQueue,
+    waiters: WaitQueue,
     // Can be either unit or another waitqueue. Some sems shouldn't come with
     // a condition variable attached, others should.
-    blocked:   Q
+    blocked: Q,
 }
 
-struct Sem<Q>(UnsafeArc<SemInner<Q>>);
+#[must_use]
+struct SemGuard<'a, Q> {
+    sem: &'a Sem<Q>,
+}
 
-#[doc(hidden)]
-impl<Q:Send> Sem<Q> {
+impl<Q: Send> Sem<Q> {
     fn new(count: int, q: Q) -> Sem<Q> {
-        Sem(UnsafeArc::new(SemInner {
-            count: count,
-            waiters: WaitQueue::new(),
-            blocked: q,
+        let inner = unsafe {
+            cast::transmute(~SemInner {
+                waiters: WaitQueue::new(),
+                count: count,
+                blocked: q,
+            })
+        };
+        Sem {
             lock: mutex::Mutex::new(),
-        }))
+            inner: inner,
+        }
     }
 
     unsafe fn with(&self, f: |&mut SemInner<Q>|) {
-        let Sem(ref arc) = *self;
-        let state = arc.get();
-        let _g = (*state).lock.lock();
-        f(cast::transmute(state));
+        let _g = self.lock.lock();
+        f(&mut *(self.inner as *mut SemInner<Q>))
     }
 
     pub fn acquire(&self) {
@@ -130,7 +138,8 @@ impl<Q:Send> Sem<Q> {
                     waiter_nobe = Some(state.waiters.wait_end());
                 }
             });
-            // Uncomment if you wish to test for sem races. Not valgrind-friendly.
+            // Uncomment if you wish to test for sem races. Not
+            // valgrind-friendly.
             /* for _ in range(0, 1000) { task::deschedule(); } */
             // Need to wait outside the exclusive.
             if waiter_nobe.is_some() {
@@ -150,24 +159,42 @@ impl<Q:Send> Sem<Q> {
         }
     }
 
-    pub fn access<U>(&self, blk: || -> U) -> U {
-        (|| {
-            self.acquire();
-            blk()
-        }).finally(|| {
-            self.release();
-        })
+    pub fn access<'a>(&'a self) -> SemGuard<'a, Q> {
+        self.acquire();
+        SemGuard { sem: self }
     }
 }
 
-#[doc(hidden)]
-impl Sem<Vec<WaitQueue> > {
-    fn new_and_signal(count: int, num_condvars: uint)
-        -> Sem<Vec<WaitQueue> > {
+#[unsafe_destructor]
+impl<Q: Send> Drop for Sem<Q> {
+    fn drop(&mut self) {
+        let _waiters: ~SemInner<Q> = unsafe { cast::transmute(self.inner) };
+        self.inner = 0 as *();
+    }
+}
+
+#[unsafe_destructor]
+impl<'a, Q: Send> Drop for SemGuard<'a, Q> {
+    fn drop(&mut self) {
+        self.sem.release();
+    }
+}
+
+impl Sem<Vec<WaitQueue>> {
+    fn new_and_signal(count: int, num_condvars: uint) -> Sem<Vec<WaitQueue>> {
         let mut queues = Vec::new();
         for _ in range(0, num_condvars) { queues.push(WaitQueue::new()); }
         Sem::new(count, queues)
     }
+
+    // The only other places that condvars get built are rwlock.write_cond()
+    // and rwlock_write_mode.
+    pub fn access_cond<'a>(&'a self) -> SemCondGuard<'a> {
+        SemCondGuard {
+            guard: self.access(),
+            cvar: Condvar { sem: self, order: Nothing, nopod: marker::NoPod },
+        }
+    }
 }
 
 // FIXME(#3598): Want to use an Option down below, but we need a custom enum
@@ -195,27 +222,23 @@ pub struct Condvar<'a> {
 }
 
 impl<'a> Condvar<'a> {
-    /**
-     * Atomically drop the associated lock, and block until a signal is sent.
-     *
-     * # Failure
-     * A task which is killed (i.e., by linked failure with another task)
-     * while waiting on a condition variable will wake up, fail, and unlock
-     * the associated lock as it unwinds.
-     */
+    /// Atomically drop the associated lock, and block until a signal is sent.
+    ///
+    /// # Failure
+    ///
+    /// A task which is killed while waiting on a condition variable will wake
+    /// up, fail, and unlock the associated lock as it unwinds.
     pub fn wait(&self) { self.wait_on(0) }
 
-    /**
-     * As wait(), but can specify which of multiple condition variables to
-     * wait on. Only a signal_on() or broadcast_on() with the same condvar_id
-     * will wake this thread.
-     *
-     * The associated lock must have been initialised with an appropriate
-     * number of condvars. The condvar_id must be between 0 and num_condvars-1
-     * or else this call will fail.
-     *
-     * wait() is equivalent to wait_on(0).
-     */
+    /// As wait(), but can specify which of multiple condition variables to
+    /// wait on. Only a signal_on() or broadcast_on() with the same condvar_id
+    /// will wake this thread.
+    ///
+    /// The associated lock must have been initialised with an appropriate
+    /// number of condvars. The condvar_id must be between 0 and num_condvars-1
+    /// or else this call will fail.
+    ///
+    /// wait() is equivalent to wait_on(0).
     pub fn wait_on(&self, condvar_id: uint) {
         let mut wait_end = None;
         let mut out_of_bounds = None;
@@ -248,7 +271,10 @@ impl<'a> Condvar<'a> {
             }).finally(|| {
                 // Reacquire the condvar.
                 match self.order {
-                    Just(lock) => lock.access(|| self.sem.acquire()),
+                    Just(lock) => {
+                        let _g = lock.access();
+                        self.sem.acquire();
+                    }
                     Nothing => self.sem.acquire(),
                 }
             })
@@ -309,7 +335,6 @@ impl<'a> Condvar<'a> {
 // Checks whether a condvar ID was out of bounds, and fails if so, or does
 // something else next on success.
 #[inline]
-#[doc(hidden)]
 fn check_cvar_bounds<U>(
                      out_of_bounds: Option<uint>,
                      id: uint,
@@ -325,19 +350,10 @@ fn check_cvar_bounds<U>(
     }
 }
 
-#[doc(hidden)]
-impl Sem<Vec<WaitQueue> > {
-    // The only other places that condvars get built are rwlock.write_cond()
-    // and rwlock_write_mode.
-    pub fn access_cond<U>(&self, blk: |c: &Condvar| -> U) -> U {
-        self.access(|| {
-            blk(&Condvar {
-                sem: self,
-                order: Nothing,
-                nopod: marker::NoPod
-            })
-        })
-    }
+#[must_use]
+struct SemCondGuard<'a> {
+    guard: SemGuard<'a, Vec<WaitQueue>>,
+    cvar: Condvar<'a>,
 }
 
 /****************************************************************************
@@ -345,15 +361,15 @@ impl Sem<Vec<WaitQueue> > {
  ****************************************************************************/
 
 /// A counting, blocking, bounded-waiting semaphore.
-pub struct Semaphore { priv sem: Sem<()> }
-
+pub struct Semaphore {
+    priv sem: Sem<()>,
+}
 
-impl Clone for Semaphore {
-    /// Create a new handle to the semaphore.
-    fn clone(&self) -> Semaphore {
-        let Sem(ref lock) = self.sem;
-        Semaphore { sem: Sem(lock.clone()) }
-    }
+/// An RAII guard used to represent an acquired resource to a semaphore. When
+/// dropped, this value will release the resource back to the semaphore.
+#[must_use]
+pub struct SemaphoreGuard<'a> {
+    priv guard: SemGuard<'a, ()>,
 }
 
 impl Semaphore {
@@ -362,66 +378,64 @@ impl Semaphore {
         Semaphore { sem: Sem::new(count, ()) }
     }
 
-    /**
-     * Acquire a resource represented by the semaphore. Blocks if necessary
-     * until resource(s) become available.
-     */
-    pub fn acquire(&self) { (&self.sem).acquire() }
+    /// Acquire a resource represented by the semaphore. Blocks if necessary
+    /// until resource(s) become available.
+    pub fn acquire(&self) { self.sem.acquire() }
 
-    /**
-     * Release a held resource represented by the semaphore. Wakes a blocked
-     * contending task, if any exist. Won't block the caller.
-     */
-    pub fn release(&self) { (&self.sem).release() }
+    /// Release a held resource represented by the semaphore. Wakes a blocked
+    /// contending task, if any exist. Won't block the caller.
+    pub fn release(&self) { self.sem.release() }
 
-    /// Run a function with ownership of one of the semaphore's resources.
-    pub fn access<U>(&self, blk: || -> U) -> U { (&self.sem).access(blk) }
+    /// Acquire a resource of this semaphore, returning an RAII guard which will
+    /// release the resource when dropped.
+    pub fn access<'a>(&'a self) -> SemaphoreGuard<'a> {
+        SemaphoreGuard { guard: self.sem.access() }
+    }
 }
 
 /****************************************************************************
  * Mutexes
  ****************************************************************************/
 
-/**
- * A blocking, bounded-waiting, mutual exclusion lock with an associated
- * FIFO condition variable.
- *
- * # Failure
- * A task which fails while holding a mutex will unlock the mutex as it
- * unwinds.
- */
-
-pub struct Mutex { priv sem: Sem<Vec<WaitQueue> > }
-impl Clone for Mutex {
-    /// Create a new handle to the mutex.
-    fn clone(&self) -> Mutex {
-        let Sem(ref queue) = self.sem;
-        Mutex { sem: Sem(queue.clone()) } }
+/// A blocking, bounded-waiting, mutual exclusion lock with an associated
+/// FIFO condition variable.
+///
+/// # Failure
+/// A task which fails while holding a mutex will unlock the mutex as it
+/// unwinds.
+pub struct Mutex {
+    priv sem: Sem<Vec<WaitQueue>>,
+}
+
+/// An RAII structure which is used to gain access to a mutex's condition
+/// variable. Additionally, when a value of this type is dropped, the
+/// corresponding mutex is also unlocked.
+#[must_use]
+pub struct MutexGuard<'a> {
+    priv guard: SemGuard<'a, Vec<WaitQueue>>,
+    /// Inner condition variable which is connected to the outer mutex, and can
+    /// be used for atomic-unlock-and-deschedule.
+    cond: Condvar<'a>,
 }
 
 impl Mutex {
     /// Create a new mutex, with one associated condvar.
     pub fn new() -> Mutex { Mutex::new_with_condvars(1) }
 
-    /**
-    * Create a new mutex, with a specified number of associated condvars. This
-    * will allow calling wait_on/signal_on/broadcast_on with condvar IDs between
-    * 0 and num_condvars-1. (If num_condvars is 0, lock_cond will be allowed but
-    * any operations on the condvar will fail.)
-    */
+    /// Create a new mutex, with a specified number of associated condvars. This
+    /// will allow calling wait_on/signal_on/broadcast_on with condvar IDs
+    /// between 0 and num_condvars-1. (If num_condvars is 0, lock_cond will be
+    /// allowed but any operations on the condvar will fail.)
     pub fn new_with_condvars(num_condvars: uint) -> Mutex {
         Mutex { sem: Sem::new_and_signal(1, num_condvars) }
     }
 
-
-    /// Run a function with ownership of the mutex.
-    pub fn lock<U>(&self, blk: || -> U) -> U {
-        (&self.sem).access(blk)
-    }
-
-    /// Run a function with ownership of the mutex and a handle to a condvar.
-    pub fn lock_cond<U>(&self, blk: |c: &Condvar| -> U) -> U {
-        (&self.sem).access_cond(blk)
+    /// Acquires ownership of this mutex, returning an RAII guard which will
+    /// unlock the mutex when dropped. The associated condition variable can
+    /// also be accessed through the returned guard.
+    pub fn lock<'a>(&'a self) -> MutexGuard<'a> {
+        let SemCondGuard { guard, cvar } = self.sem.access_cond();
+        MutexGuard { guard: guard, cond: cvar }
     }
 }
 
@@ -431,118 +445,95 @@ impl Mutex {
 
 // NB: Wikipedia - Readers-writers_problem#The_third_readers-writers_problem
 
-#[doc(hidden)]
-struct RWLockInner {
-    // You might ask, "Why don't you need to use an atomic for the mode flag?"
-    // This flag affects the behaviour of readers (for plain readers, they
-    // assert on it; for downgraders, they use it to decide which mode to
-    // unlock for). Consider that the flag is only unset when the very last
-    // reader exits; therefore, it can never be unset during a reader/reader
-    // (or reader/downgrader) race.
-    // By the way, if we didn't care about the assert in the read unlock path,
-    // we could instead store the mode flag in write_downgrade's stack frame,
-    // and have the downgrade tokens store a reference to it.
-    read_mode:  bool,
+/// A blocking, no-starvation, reader-writer lock with an associated condvar.
+///
+/// # Failure
+///
+/// A task which fails while holding an rwlock will unlock the rwlock as it
+/// unwinds.
+pub struct RWLock {
+    priv order_lock:  Semaphore,
+    priv access_lock: Sem<Vec<WaitQueue>>,
+
     // The only way the count flag is ever accessed is with xadd. Since it is
     // a read-modify-write operation, multiple xadds on different cores will
     // always be consistent with respect to each other, so a monotonic/relaxed
     // consistency ordering suffices (i.e., no extra barriers are needed).
+    //
     // FIXME(#6598): The atomics module has no relaxed ordering flag, so I use
     // acquire/release orderings superfluously. Change these someday.
-    read_count: atomics::AtomicUint,
+    priv read_count: atomics::AtomicUint,
 }
 
-/**
- * A blocking, no-starvation, reader-writer lock with an associated condvar.
- *
- * # Failure
- * A task which fails while holding an rwlock will unlock the rwlock as it
- * unwinds.
- */
-pub struct RWLock {
-    priv order_lock:  Semaphore,
-    priv access_lock: Sem<Vec<WaitQueue> >,
-    priv state:       UnsafeArc<RWLockInner>,
+/// An RAII helper which is created by acquiring a read lock on an RWLock. When
+/// dropped, this will unlock the RWLock.
+#[must_use]
+pub struct RWLockReadGuard<'a> {
+    priv lock: &'a RWLock,
+}
+
+/// An RAII helper which is created by acquiring a write lock on an RWLock. When
+/// dropped, this will unlock the RWLock.
+///
+/// A value of this type can also be consumed to downgrade to a read-only lock.
+#[must_use]
+pub struct RWLockWriteGuard<'a> {
+    priv lock: &'a RWLock,
+    /// Inner condition variable that is connected to the write-mode of the
+    /// outer rwlock.
+    cond: Condvar<'a>,
 }
 
 impl RWLock {
     /// Create a new rwlock, with one associated condvar.
     pub fn new() -> RWLock { RWLock::new_with_condvars(1) }
 
-    /**
-    * Create a new rwlock, with a specified number of associated condvars.
-    * Similar to mutex_with_condvars.
-    */
+    /// Create a new rwlock, with a specified number of associated condvars.
+    /// Similar to mutex_with_condvars.
     pub fn new_with_condvars(num_condvars: uint) -> RWLock {
-        let state = UnsafeArc::new(RWLockInner {
-            read_mode:  false,
+        RWLock {
+            order_lock: Semaphore::new(1),
+            access_lock: Sem::new_and_signal(1, num_condvars),
             read_count: atomics::AtomicUint::new(0),
-        });
-        RWLock { order_lock:  Semaphore::new(1),
-                access_lock: Sem::new_and_signal(1, num_condvars),
-                state:       state, }
-    }
-
-    /// Create a new handle to the rwlock.
-    pub fn clone(&self) -> RWLock {
-        let Sem(ref access_lock_queue) = self.access_lock;
-        RWLock { order_lock:  (&(self.order_lock)).clone(),
-                 access_lock: Sem(access_lock_queue.clone()),
-                 state:       self.state.clone() }
-    }
-
-    /**
-     * Run a function with the rwlock in read mode. Calls to 'read' from other
-     * tasks may run concurrently with this one.
-     */
-    pub fn read<U>(&self, blk: || -> U) -> U {
-        unsafe {
-            (&self.order_lock).access(|| {
-                let state = &mut *self.state.get();
-                let old_count = state.read_count.fetch_add(1, atomics::Acquire);
-                if old_count == 0 {
-                    (&self.access_lock).acquire();
-                    state.read_mode = true;
-                }
-            });
-            (|| {
-                blk()
-            }).finally(|| {
-                let state = &mut *self.state.get();
-                assert!(state.read_mode);
-                let old_count = state.read_count.fetch_sub(1, atomics::Release);
-                assert!(old_count > 0);
-                if old_count == 1 {
-                    state.read_mode = false;
-                    // Note: this release used to be outside of a locked access
-                    // to exclusive-protected state. If this code is ever
-                    // converted back to such (instead of using atomic ops),
-                    // this access MUST NOT go inside the exclusive access.
-                    (&self.access_lock).release();
-                }
-            })
         }
     }
 
-    /**
-     * Run a function with the rwlock in write mode. No calls to 'read' or
-     * 'write' from other tasks will run concurrently with this one.
-     */
-    pub fn write<U>(&self, blk: || -> U) -> U {
-        (&self.order_lock).acquire();
-        (&self.access_lock).access(|| {
-            (&self.order_lock).release();
-            blk()
-        })
-    }
+    /// Acquires a read-lock, returning an RAII guard that will unlock the lock
+    /// when dropped. Calls to 'read' from other tasks may run concurrently with
+    /// this one.
+    pub fn read<'a>(&'a self) -> RWLockReadGuard<'a> {
+        let _guard = self.order_lock.access();
+        let old_count = self.read_count.fetch_add(1, atomics::Acquire);
+        if old_count == 0 {
+            self.access_lock.acquire();
+        }
+        RWLockReadGuard { lock: self }
+    }
+
+    /// Acquire a write-lock, returning an RAII guard that will unlock the lock
+    /// when dropped. No calls to 'read' or 'write' from other tasks will run
+    /// concurrently with this one.
+    ///
+    /// You can also downgrade a write to a read by calling the `downgrade`
+    /// method on the returned guard. Additionally, the guard will contain a
+    /// `Condvar` attached to this lock.
+    ///
+    /// # Example
+    ///
+    /// ```rust
+    /// use sync::raw::RWLock;
+    ///
+    /// let lock = RWLock::new();
+    /// let write = lock.write();
+    /// // ... exclusive access ...
+    /// let read = write.downgrade();
+    /// // ... shared access ...
+    /// drop(read);
+    /// ```
+    pub fn write<'a>(&'a self) -> RWLockWriteGuard<'a> {
+        let _g = self.order_lock.access();
+        self.access_lock.acquire();
 
-    /**
-     * As write(), but also with a handle to a condvar. Waiting on this
-     * condvar will allow readers and writers alike to take the rwlock before
-     * the waiting task is signalled. (Note: a writer that waited and then
-     * was signalled might reacquire the lock before other waiting writers.)
-     */
-    pub fn write_cond<U>(&self, blk: |c: &Condvar| -> U) -> U {
         // It's important to thread our order lock into the condvar, so that
         // when a cond.wait() wakes up, it uses it while reacquiring the
         // access lock. If we permitted a waking-up writer to "cut in line",
@@ -569,188 +560,60 @@ impl RWLock {
         // which can't happen until T2 finishes the downgrade-read entirely.
         // The astute reader will also note that making waking writers use the
         // order_lock is better for not starving readers.
-        (&self.order_lock).acquire();
-        (&self.access_lock).access_cond(|cond| {
-            (&self.order_lock).release();
-            let opt_lock = Just(&self.order_lock);
-            blk(&Condvar { sem: cond.sem, order: opt_lock,
-                           nopod: marker::NoPod })
-        })
-    }
-
-    /**
-     * As write(), but with the ability to atomically 'downgrade' the lock;
-     * i.e., to become a reader without letting other writers get the lock in
-     * the meantime (such as unlocking and then re-locking as a reader would
-     * do). The block takes a "write mode token" argument, which can be
-     * transformed into a "read mode token" by calling downgrade(). Example:
-     *
-     * # Example
-     *
-     * ```rust
-     * use sync::RWLock;
-     *
-     * let lock = RWLock::new();
-     * lock.write_downgrade(|mut write_token| {
-     *     write_token.write_cond(|condvar| {
-     *         // ... exclusive access ...
-     *     });
-     *     let read_token = lock.downgrade(write_token);
-     *     read_token.read(|| {
-     *         // ... shared access ...
-     *     })
-     * })
-     * ```
-     */
-    pub fn write_downgrade<U>(&self, blk: |v: RWLockWriteMode| -> U) -> U {
-        // Implementation slightly different from the slicker 'write's above.
-        // The exit path is conditional on whether the caller downgrades.
-        (&self.order_lock).acquire();
-        (&self.access_lock).acquire();
-        (&self.order_lock).release();
-        (|| {
-            blk(RWLockWriteMode { lock: self, nopod: marker::NoPod })
-        }).finally(|| {
-            let writer_or_last_reader;
-            // Check if we're releasing from read mode or from write mode.
-            let state = unsafe { &mut *self.state.get() };
-            if state.read_mode {
-                // Releasing from read mode.
-                let old_count = state.read_count.fetch_sub(1, atomics::Release);
-                assert!(old_count > 0);
-                // Check if other readers remain.
-                if old_count == 1 {
-                    // Case 1: Writer downgraded & was the last reader
-                    writer_or_last_reader = true;
-                    state.read_mode = false;
-                } else {
-                    // Case 2: Writer downgraded & was not the last reader
-                    writer_or_last_reader = false;
-                }
-            } else {
-                // Case 3: Writer did not downgrade
-                writer_or_last_reader = true;
-            }
-            if writer_or_last_reader {
-                // Nobody left inside; release the "reader cloud" lock.
-                (&self.access_lock).release();
-            }
-        })
-    }
-
-    /// To be called inside of the write_downgrade block.
-    pub fn downgrade<'a>(&self, token: RWLockWriteMode<'a>)
-                         -> RWLockReadMode<'a> {
-        if !((self as *RWLock) == (token.lock as *RWLock)) {
-            fail!("Can't downgrade() with a different rwlock's write_mode!");
-        }
-        unsafe {
-            let state = &mut *self.state.get();
-            assert!(!state.read_mode);
-            state.read_mode = true;
-            // If a reader attempts to enter at this point, both the
-            // downgrader and reader will set the mode flag. This is fine.
-            let old_count = state.read_count.fetch_add(1, atomics::Release);
-            // If another reader was already blocking, we need to hand-off
-            // the "reader cloud" access lock to them.
-            if old_count != 0 {
-                // Guaranteed not to let another writer in, because
-                // another reader was holding the order_lock. Hence they
-                // must be the one to get the access_lock (because all
-                // access_locks are acquired with order_lock held). See
-                // the comment in write_cond for more justification.
-                (&self.access_lock).release();
+        RWLockWriteGuard {
+            lock: self,
+            cond: Condvar {
+                sem: &self.access_lock,
+                order: Just(&self.order_lock),
+                nopod: marker::NoPod,
             }
         }
-        RWLockReadMode { lock: token.lock, nopod: marker::NoPod }
     }
 }
 
-/// The "write permission" token used for rwlock.write_downgrade().
-
-pub struct RWLockWriteMode<'a> { priv lock: &'a RWLock, priv nopod: marker::NoPod }
-/// The "read permission" token used for rwlock.write_downgrade().
-pub struct RWLockReadMode<'a> { priv lock: &'a RWLock,
-                                   priv nopod: marker::NoPod }
-
-impl<'a> RWLockWriteMode<'a> {
-    /// Access the pre-downgrade rwlock in write mode.
-    pub fn write<U>(&self, blk: || -> U) -> U { blk() }
-    /// Access the pre-downgrade rwlock in write mode with a condvar.
-    pub fn write_cond<U>(&self, blk: |c: &Condvar| -> U) -> U {
-        // Need to make the condvar use the order lock when reacquiring the
-        // access lock. See comment in RWLock::write_cond for why.
-        blk(&Condvar { sem:        &self.lock.access_lock,
-                       order: Just(&self.lock.order_lock),
-                       nopod: marker::NoPod })
+impl<'a> RWLockWriteGuard<'a> {
+    /// Consumes this write lock and converts it into a read lock.
+    pub fn downgrade(self) -> RWLockReadGuard<'a> {
+        let lock = self.lock;
+        // Don't run the destructor of the write guard, we're in charge of
+        // things from now on
+        unsafe { cast::forget(self) }
+
+        let old_count = lock.read_count.fetch_add(1, atomics::Release);
+        // If another reader was already blocking, we need to hand-off
+        // the "reader cloud" access lock to them.
+        if old_count != 0 {
+            // Guaranteed not to let another writer in, because
+            // another reader was holding the order_lock. Hence they
+            // must be the one to get the access_lock (because all
+            // access_locks are acquired with order_lock held). See
+            // the comment in write_cond for more justification.
+            lock.access_lock.release();
+        }
+        RWLockReadGuard { lock: lock }
     }
 }
 
-impl<'a> RWLockReadMode<'a> {
-    /// Access the post-downgrade rwlock in read mode.
-    pub fn read<U>(&self, blk: || -> U) -> U { blk() }
-}
-
-/// A barrier enables multiple tasks to synchronize the beginning
-/// of some computation.
-///
-/// ```rust
-/// use sync::Barrier;
-///
-/// let barrier = Barrier::new(10);
-/// for _ in range(0, 10) {
-///     let c = barrier.clone();
-///     // The same messages will be printed together.
-///     // You will NOT see any interleaving.
-///     spawn(proc() {
-///         println!("before wait");
-///         c.wait();
-///         println!("after wait");
-///     });
-/// }
-/// ```
-#[deriving(Clone)]
-pub struct Barrier {
-    priv arc: MutexArc<BarrierState>,
-    priv num_tasks: uint,
-}
-
-// The inner state of a double barrier
-struct BarrierState {
-    count: uint,
-    generation_id: uint,
+#[unsafe_destructor]
+impl<'a> Drop for RWLockWriteGuard<'a> {
+    fn drop(&mut self) {
+        self.lock.access_lock.release();
+    }
 }
 
-impl Barrier {
-    /// Create a new barrier that can block a given number of tasks.
-    pub fn new(num_tasks: uint) -> Barrier {
-        Barrier {
-            arc: MutexArc::new(BarrierState {
-                count: 0,
-                generation_id: 0,
-            }),
-            num_tasks: num_tasks,
+#[unsafe_destructor]
+impl<'a> Drop for RWLockReadGuard<'a> {
+    fn drop(&mut self) {
+        let old_count = self.lock.read_count.fetch_sub(1, atomics::Release);
+        assert!(old_count > 0);
+        if old_count == 1 {
+            // Note: this release used to be outside of a locked access
+            // to exclusive-protected state. If this code is ever
+            // converted back to such (instead of using atomic ops),
+            // this access MUST NOT go inside the exclusive access.
+            self.lock.access_lock.release();
         }
     }
-
-    /// Block the current task until a certain number of tasks is waiting.
-    pub fn wait(&self) {
-        self.arc.access_cond(|state, cond| {
-            let local_gen = state.generation_id;
-            state.count += 1;
-            if state.count < self.num_tasks {
-                // We need a while loop to guard against spurious wakeups.
-                // http://en.wikipedia.org/wiki/Spurious_wakeup
-                while local_gen == state.generation_id && state.count < self.num_tasks {
-                    cond.wait();
-                }
-            } else {
-                state.count = 0;
-                state.generation_id += 1;
-                cond.broadcast();
-            }
-        });
-    }
 }
 
 /****************************************************************************
@@ -759,12 +622,12 @@ impl Barrier {
 
 #[cfg(test)]
 mod tests {
-    use sync::{Semaphore, Mutex, RWLock, Barrier, Condvar};
+    use arc::Arc;
+    use super::{Semaphore, Mutex, RWLock, Condvar};
 
     use std::cast;
     use std::result;
     use std::task;
-    use std::comm::Empty;
 
     /************************************************************************
      * Semaphore tests
@@ -779,26 +642,24 @@ mod tests {
     #[test]
     fn test_sem_basic() {
         let s = Semaphore::new(1);
-        s.access(|| { })
+        let _g = s.access();
     }
     #[test]
     fn test_sem_as_mutex() {
-        let s = Semaphore::new(1);
+        let s = Arc::new(Semaphore::new(1));
         let s2 = s.clone();
         task::spawn(proc() {
-            s2.access(|| {
-                for _ in range(0, 5) { task::deschedule(); }
-            })
-        });
-        s.access(|| {
+            let _g = s2.access();
             for _ in range(0, 5) { task::deschedule(); }
-        })
+        });
+        let _g = s.access();
+        for _ in range(0, 5) { task::deschedule(); }
     }
     #[test]
     fn test_sem_as_cvar() {
         /* Child waits and parent signals */
         let (tx, rx) = channel();
-        let s = Semaphore::new(0);
+        let s = Arc::new(Semaphore::new(0));
         let s2 = s.clone();
         task::spawn(proc() {
             s2.acquire();
@@ -810,7 +671,7 @@ mod tests {
 
         /* Parent waits and child signals */
         let (tx, rx) = channel();
-        let s = Semaphore::new(0);
+        let s = Arc::new(Semaphore::new(0));
         let s2 = s.clone();
         task::spawn(proc() {
             for _ in range(0, 5) { task::deschedule(); }
@@ -824,40 +685,37 @@ mod tests {
     fn test_sem_multi_resource() {
         // Parent and child both get in the critical section at the same
         // time, and shake hands.
-        let s = Semaphore::new(2);
+        let s = Arc::new(Semaphore::new(2));
         let s2 = s.clone();
         let (tx1, rx1) = channel();
         let (tx2, rx2) = channel();
         task::spawn(proc() {
-            s2.access(|| {
-                let _ = rx2.recv();
-                tx1.send(());
-            })
+            let _g = s2.access();
+            let _ = rx2.recv();
+            tx1.send(());
         });
-        s.access(|| {
-            tx2.send(());
-            let _ = rx1.recv();
-        })
+        let _g = s.access();
+        tx2.send(());
+        let _ = rx1.recv();
     }
     #[test]
     fn test_sem_runtime_friendly_blocking() {
         // Force the runtime to schedule two threads on the same sched_loop.
         // When one blocks, it should schedule the other one.
-        let s = Semaphore::new(1);
+        let s = Arc::new(Semaphore::new(1));
         let s2 = s.clone();
         let (tx, rx) = channel();
-        let mut child_data = Some((s2, tx));
-        s.access(|| {
-            let (s2, tx) = child_data.take_unwrap();
+        {
+            let _g = s.access();
             task::spawn(proc() {
                 tx.send(());
-                s2.access(|| { });
+                drop(s2.access());
                 tx.send(());
             });
-            let _ = rx.recv(); // wait for child to come alive
+            rx.recv(); // wait for child to come alive
             for _ in range(0, 5) { task::deschedule(); } // let the child contend
-        });
-        let _ = rx.recv(); // wait for child to be done
+        }
+        rx.recv(); // wait for child to be done
     }
     /************************************************************************
      * Mutex tests
@@ -867,93 +725,90 @@ mod tests {
         // Unsafely achieve shared state, and do the textbook
         // "load tmp = move ptr; inc tmp; store ptr <- tmp" dance.
         let (tx, rx) = channel();
-        let m = Mutex::new();
+        let m = Arc::new(Mutex::new());
         let m2 = m.clone();
         let mut sharedstate = ~0;
         {
-            let ptr: *int = &*sharedstate;
+            let ptr: *mut int = &mut *sharedstate;
             task::spawn(proc() {
-                let sharedstate: &mut int =
-                    unsafe { cast::transmute(ptr) };
-                access_shared(sharedstate, &m2, 10);
+                access_shared(ptr, &m2, 10);
                 tx.send(());
             });
         }
         {
-            access_shared(sharedstate, &m, 10);
+            access_shared(&mut *sharedstate, &m, 10);
             let _ = rx.recv();
 
             assert_eq!(*sharedstate, 20);
         }
 
-        fn access_shared(sharedstate: &mut int, m: &Mutex, n: uint) {
+        fn access_shared(sharedstate: *mut int, m: &Arc<Mutex>, n: uint) {
             for _ in range(0, n) {
-                m.lock(|| {
-                    let oldval = *sharedstate;
-                    task::deschedule();
-                    *sharedstate = oldval + 1;
-                })
+                let _g = m.lock();
+                let oldval = unsafe { *sharedstate };
+                task::deschedule();
+                unsafe { *sharedstate = oldval + 1; }
             }
         }
     }
     #[test]
     fn test_mutex_cond_wait() {
-        let m = Mutex::new();
+        let m = Arc::new(Mutex::new());
 
         // Child wakes up parent
-        m.lock_cond(|cond| {
+        {
+            let lock = m.lock();
             let m2 = m.clone();
             task::spawn(proc() {
-                m2.lock_cond(|cond| {
-                    let woken = cond.signal();
-                    assert!(woken);
-                })
+                let lock = m2.lock();
+                let woken = lock.cond.signal();
+                assert!(woken);
             });
-            cond.wait();
-        });
+            lock.cond.wait();
+        }
         // Parent wakes up child
         let (tx, rx) = channel();
         let m3 = m.clone();
         task::spawn(proc() {
-            m3.lock_cond(|cond| {
-                tx.send(());
-                cond.wait();
-                tx.send(());
-            })
+            let lock = m3.lock();
+            tx.send(());
+            lock.cond.wait();
+            tx.send(());
         });
-        let _ = rx.recv(); // Wait until child gets in the mutex
-        m.lock_cond(|cond| {
-            let woken = cond.signal();
+        rx.recv(); // Wait until child gets in the mutex
+        {
+            let lock = m.lock();
+            let woken = lock.cond.signal();
             assert!(woken);
-        });
-        let _ = rx.recv(); // Wait until child wakes up
+        }
+        rx.recv(); // Wait until child wakes up
     }
-    #[cfg(test)]
+
     fn test_mutex_cond_broadcast_helper(num_waiters: uint) {
-        let m = Mutex::new();
-        let mut rxs = vec!();
+        let m = Arc::new(Mutex::new());
+        let mut rxs = Vec::new();
 
         for _ in range(0, num_waiters) {
             let mi = m.clone();
             let (tx, rx) = channel();
             rxs.push(rx);
             task::spawn(proc() {
-                mi.lock_cond(|cond| {
-                    tx.send(());
-                    cond.wait();
-                    tx.send(());
-                })
+                let lock = mi.lock();
+                tx.send(());
+                lock.cond.wait();
+                tx.send(());
             });
         }
 
         // wait until all children get in the mutex
-        for rx in rxs.mut_iter() { let _ = rx.recv(); }
-        m.lock_cond(|cond| {
-            let num_woken = cond.broadcast();
+        for rx in rxs.mut_iter() { rx.recv(); }
+        {
+            let lock = m.lock();
+            let num_woken = lock.cond.broadcast();
             assert_eq!(num_woken, num_waiters);
-        });
+        }
         // wait until all children wake up
-        for rx in rxs.mut_iter() { let _ = rx.recv(); }
+        for rx in rxs.mut_iter() { rx.recv(); }
     }
     #[test]
     fn test_mutex_cond_broadcast() {
@@ -965,61 +820,57 @@ mod tests {
     }
     #[test]
     fn test_mutex_cond_no_waiter() {
-        let m = Mutex::new();
+        let m = Arc::new(Mutex::new());
         let m2 = m.clone();
         let _ = task::try(proc() {
-            m.lock_cond(|_x| { })
+            drop(m.lock());
         });
-        m2.lock_cond(|cond| {
-            assert!(!cond.signal());
-        })
+        let lock = m2.lock();
+        assert!(!lock.cond.signal());
     }
     #[test]
     fn test_mutex_killed_simple() {
         use std::any::Any;
 
         // Mutex must get automatically unlocked if failed/killed within.
-        let m = Mutex::new();
+        let m = Arc::new(Mutex::new());
         let m2 = m.clone();
 
         let result: result::Result<(), ~Any> = task::try(proc() {
-            m2.lock(|| {
-                fail!();
-            })
+            let _lock = m2.lock();
+            fail!();
         });
         assert!(result.is_err());
         // child task must have finished by the time try returns
-        m.lock(|| { })
+        drop(m.lock());
     }
     #[test]
     fn test_mutex_cond_signal_on_0() {
         // Tests that signal_on(0) is equivalent to signal().
-        let m = Mutex::new();
-        m.lock_cond(|cond| {
-            let m2 = m.clone();
-            task::spawn(proc() {
-                m2.lock_cond(|cond| {
-                    cond.signal_on(0);
-                })
-            });
-            cond.wait();
-        })
+        let m = Arc::new(Mutex::new());
+        let lock = m.lock();
+        let m2 = m.clone();
+        task::spawn(proc() {
+            let lock = m2.lock();
+            lock.cond.signal_on(0);
+        });
+        lock.cond.wait();
     }
     #[test]
     fn test_mutex_no_condvars() {
         let result = task::try(proc() {
             let m = Mutex::new_with_condvars(0);
-            m.lock_cond(|cond| { cond.wait(); })
+            m.lock().cond.wait();
         });
         assert!(result.is_err());
         let result = task::try(proc() {
             let m = Mutex::new_with_condvars(0);
-            m.lock_cond(|cond| { cond.signal(); })
+            m.lock().cond.signal();
         });
         assert!(result.is_err());
         let result = task::try(proc() {
             let m = Mutex::new_with_condvars(0);
-            m.lock_cond(|cond| { cond.broadcast(); })
+            m.lock().cond.broadcast();
         });
         assert!(result.is_err());
     }
@@ -1029,23 +880,16 @@ mod tests {
     #[cfg(test)]
     pub enum RWLockMode { Read, Write, Downgrade, DowngradeRead }
     #[cfg(test)]
-    fn lock_rwlock_in_mode(x: &RWLock, mode: RWLockMode, blk: ||) {
+    fn lock_rwlock_in_mode(x: &Arc<RWLock>, mode: RWLockMode, blk: ||) {
         match mode {
-            Read => x.read(blk),
-            Write => x.write(blk),
-            Downgrade =>
-                x.write_downgrade(|mode| {
-                    mode.write(|| { blk() });
-                }),
-            DowngradeRead =>
-                x.write_downgrade(|mode| {
-                    let mode = x.downgrade(mode);
-                    mode.read(|| { blk() });
-                }),
+            Read => { let _g = x.read(); blk() }
+            Write => { let _g = x.write(); blk() }
+            Downgrade => { let _g = x.write(); blk() }
+            DowngradeRead => { let _g = x.write().downgrade(); blk() }
         }
     }
     #[cfg(test)]
-    fn test_rwlock_exclusion(x: &RWLock,
+    fn test_rwlock_exclusion(x: Arc<RWLock>,
                              mode1: RWLockMode,
                              mode2: RWLockMode) {
         // Test mutual exclusion between readers and writers. Just like the
@@ -1063,14 +907,14 @@ mod tests {
             });
         }
         {
-            access_shared(sharedstate, x, mode2, 10);
+            access_shared(sharedstate, &x, mode2, 10);
             let _ = rx.recv();
 
             assert_eq!(*sharedstate, 20);
         }
 
-        fn access_shared(sharedstate: &mut int, x: &RWLock, mode: RWLockMode,
-                         n: uint) {
+        fn access_shared(sharedstate: &mut int, x: &Arc<RWLock>,
+                         mode: RWLockMode, n: uint) {
             for _ in range(0, n) {
                 lock_rwlock_in_mode(x, mode, || {
                     let oldval = *sharedstate;
@@ -1082,132 +926,127 @@ mod tests {
     }
     #[test]
     fn test_rwlock_readers_wont_modify_the_data() {
-        test_rwlock_exclusion(&RWLock::new(), Read, Write);
-        test_rwlock_exclusion(&RWLock::new(), Write, Read);
-        test_rwlock_exclusion(&RWLock::new(), Read, Downgrade);
-        test_rwlock_exclusion(&RWLock::new(), Downgrade, Read);
+        test_rwlock_exclusion(Arc::new(RWLock::new()), Read, Write);
+        test_rwlock_exclusion(Arc::new(RWLock::new()), Write, Read);
+        test_rwlock_exclusion(Arc::new(RWLock::new()), Read, Downgrade);
+        test_rwlock_exclusion(Arc::new(RWLock::new()), Downgrade, Read);
+        test_rwlock_exclusion(Arc::new(RWLock::new()), Write, DowngradeRead);
+        test_rwlock_exclusion(Arc::new(RWLock::new()), DowngradeRead, Write);
     }
     #[test]
     fn test_rwlock_writers_and_writers() {
-        test_rwlock_exclusion(&RWLock::new(), Write, Write);
-        test_rwlock_exclusion(&RWLock::new(), Write, Downgrade);
-        test_rwlock_exclusion(&RWLock::new(), Downgrade, Write);
-        test_rwlock_exclusion(&RWLock::new(), Downgrade, Downgrade);
+        test_rwlock_exclusion(Arc::new(RWLock::new()), Write, Write);
+        test_rwlock_exclusion(Arc::new(RWLock::new()), Write, Downgrade);
+        test_rwlock_exclusion(Arc::new(RWLock::new()), Downgrade, Write);
+        test_rwlock_exclusion(Arc::new(RWLock::new()), Downgrade, Downgrade);
     }
     #[cfg(test)]
-    fn test_rwlock_handshake(x: &RWLock,
-                                 mode1: RWLockMode,
-                                 mode2: RWLockMode,
-                                 make_mode2_go_first: bool) {
+    fn test_rwlock_handshake(x: Arc<RWLock>,
+                             mode1: RWLockMode,
+                             mode2: RWLockMode,
+                             make_mode2_go_first: bool) {
         // Much like sem_multi_resource.
         let x2 = x.clone();
         let (tx1, rx1) = channel();
         let (tx2, rx2) = channel();
         task::spawn(proc() {
             if !make_mode2_go_first {
-                let _ = rx2.recv(); // parent sends to us once it locks, or ...
+                rx2.recv(); // parent sends to us once it locks, or ...
             }
             lock_rwlock_in_mode(&x2, mode2, || {
                 if make_mode2_go_first {
                     tx1.send(()); // ... we send to it once we lock
                 }
-                let _ = rx2.recv();
+                rx2.recv();
                 tx1.send(());
             })
         });
         if make_mode2_go_first {
-            let _ = rx1.recv(); // child sends to us once it locks, or ...
+            rx1.recv(); // child sends to us once it locks, or ...
         }
-        lock_rwlock_in_mode(x, mode1, || {
+        lock_rwlock_in_mode(&x, mode1, || {
             if !make_mode2_go_first {
                 tx2.send(()); // ... we send to it once we lock
             }
             tx2.send(());
-            let _ = rx1.recv();
+            rx1.recv();
         })
     }
     #[test]
     fn test_rwlock_readers_and_readers() {
-        test_rwlock_handshake(&RWLock::new(), Read, Read, false);
+        test_rwlock_handshake(Arc::new(RWLock::new()), Read, Read, false);
         // The downgrader needs to get in before the reader gets in, otherwise
         // they cannot end up reading at the same time.
-        test_rwlock_handshake(&RWLock::new(), DowngradeRead, Read, false);
-        test_rwlock_handshake(&RWLock::new(), Read, DowngradeRead, true);
+        test_rwlock_handshake(Arc::new(RWLock::new()), DowngradeRead, Read, false);
+        test_rwlock_handshake(Arc::new(RWLock::new()), Read, DowngradeRead, true);
         // Two downgrade_reads can never both end up reading at the same time.
     }
     #[test]
     fn test_rwlock_downgrade_unlock() {
         // Tests that downgrade can unlock the lock in both modes
-        let x = RWLock::new();
+        let x = Arc::new(RWLock::new());
         lock_rwlock_in_mode(&x, Downgrade, || { });
-        test_rwlock_handshake(&x, Read, Read, false);
-        let y = RWLock::new();
+        test_rwlock_handshake(x, Read, Read, false);
+        let y = Arc::new(RWLock::new());
         lock_rwlock_in_mode(&y, DowngradeRead, || { });
-        test_rwlock_exclusion(&y, Write, Write);
+        test_rwlock_exclusion(y, Write, Write);
     }
     #[test]
     fn test_rwlock_read_recursive() {
         let x = RWLock::new();
-        x.read(|| { x.read(|| { }) })
+        let _g1 = x.read();
+        let _g2 = x.read();
     }
     #[test]
     fn test_rwlock_cond_wait() {
         // As test_mutex_cond_wait above.
-        let x = RWLock::new();
+        let x = Arc::new(RWLock::new());
 
         // Child wakes up parent
-        x.write_cond(|cond| {
+        {
+            let lock = x.write();
             let x2 = x.clone();
             task::spawn(proc() {
-                x2.write_cond(|cond| {
-                    let woken = cond.signal();
-                    assert!(woken);
-                })
+                let lock = x2.write();
+                assert!(lock.cond.signal());
             });
-            cond.wait();
-        });
+            lock.cond.wait();
+        }
         // Parent wakes up child
         let (tx, rx) = channel();
         let x3 = x.clone();
         task::spawn(proc() {
-            x3.write_cond(|cond| {
-                tx.send(());
-                cond.wait();
-                tx.send(());
-            })
-        });
-        let _ = rx.recv(); // Wait until child gets in the rwlock
-        x.read(|| { }); // Must be able to get in as a reader in the meantime
-        x.write_cond(|cond| { // Or as another writer
-            let woken = cond.signal();
-            assert!(woken);
+            let lock = x3.write();
+            tx.send(());
+            lock.cond.wait();
+            tx.send(());
         });
-        let _ = rx.recv(); // Wait until child wakes up
-        x.read(|| { }); // Just for good measure
+        rx.recv(); // Wait until child gets in the rwlock
+        drop(x.read()); // Must be able to get in as a reader
+        {
+            let x = x.write();
+            assert!(x.cond.signal());
+        }
+        rx.recv(); // Wait until child wakes up
+        drop(x.read()); // Just for good measure
     }
     #[cfg(test)]
-    fn test_rwlock_cond_broadcast_helper(num_waiters: uint,
-                                             dg1: bool,
-                                             dg2: bool) {
+    fn test_rwlock_cond_broadcast_helper(num_waiters: uint) {
         // Much like the mutex broadcast test. Downgrade-enabled.
-        fn lock_cond(x: &RWLock, downgrade: bool, blk: |c: &Condvar|) {
-            if downgrade {
-                x.write_downgrade(|mode| {
-                    mode.write_cond(|c| { blk(c) });
-                });
-            } else {
-                x.write_cond(|c| { blk(c) });
-            }
+        fn lock_cond(x: &Arc<RWLock>, blk: |c: &Condvar|) {
+            let lock = x.write();
+            blk(&lock.cond);
         }
-        let x = RWLock::new();
-        let mut rxs = vec!();
+
+        let x = Arc::new(RWLock::new());
+        let mut rxs = Vec::new();
 
         for _ in range(0, num_waiters) {
             let xi = x.clone();
             let (tx, rx) = channel();
             rxs.push(rx);
             task::spawn(proc() {
-                lock_cond(&xi, dg1, |cond| {
+                lock_cond(&xi, |cond| {
                     tx.send(());
                     cond.wait();
                     tx.send(());
@@ -1217,7 +1056,7 @@ mod tests {
 
         // wait until all children get in the mutex
         for rx in rxs.mut_iter() { let _ = rx.recv(); }
-        lock_cond(&x, dg2, |cond| {
+        lock_cond(&x, |cond| {
             let num_woken = cond.broadcast();
             assert_eq!(num_woken, num_waiters);
         });
@@ -1226,21 +1065,15 @@ mod tests {
     }
     #[test]
     fn test_rwlock_cond_broadcast() {
-        test_rwlock_cond_broadcast_helper(0, true, true);
-        test_rwlock_cond_broadcast_helper(0, true, false);
-        test_rwlock_cond_broadcast_helper(0, false, true);
-        test_rwlock_cond_broadcast_helper(0, false, false);
-        test_rwlock_cond_broadcast_helper(12, true, true);
-        test_rwlock_cond_broadcast_helper(12, true, false);
-        test_rwlock_cond_broadcast_helper(12, false, true);
-        test_rwlock_cond_broadcast_helper(12, false, false);
+        test_rwlock_cond_broadcast_helper(0);
+        test_rwlock_cond_broadcast_helper(12);
     }
     #[cfg(test)]
     fn rwlock_kill_helper(mode1: RWLockMode, mode2: RWLockMode) {
         use std::any::Any;
 
         // Mutex must get automatically unlocked if failed/killed within.
-        let x = RWLock::new();
+        let x = Arc::new(RWLock::new());
         let x2 = x.clone();
 
         let result: result::Result<(), ~Any> = task::try(proc() {
@@ -1283,48 +1116,4 @@ mod tests {
         rwlock_kill_helper(Downgrade, DowngradeRead);
         rwlock_kill_helper(Downgrade, DowngradeRead);
     }
-    #[test] #[should_fail]
-    fn test_rwlock_downgrade_cant_swap() {
-        // Tests that you can't downgrade with a different rwlock's token.
-        let x = RWLock::new();
-        let y = RWLock::new();
-        x.write_downgrade(|xwrite| {
-            let mut xopt = Some(xwrite);
-            y.write_downgrade(|_ywrite| {
-                y.downgrade(xopt.take_unwrap());
-                error!("oops, y.downgrade(x) should have failed!");
-            })
-        })
-    }
-
-    /************************************************************************
-     * Barrier tests
-     ************************************************************************/
-    #[test]
-    fn test_barrier() {
-        let barrier = Barrier::new(10);
-        let (tx, rx) = channel();
-
-        for _ in range(0, 9) {
-            let c = barrier.clone();
-            let tx = tx.clone();
-            spawn(proc() {
-                c.wait();
-                tx.send(true);
-            });
-        }
-
-        // At this point, all spawned tasks should be blocked,
-        // so we shouldn't get anything from the port
-        assert!(match rx.try_recv() {
-            Empty => true,
-            _ => false,
-        });
-
-        barrier.wait();
-        // Now, the barrier is cleared and we should get data.
-        for _ in range(0, 9) {
-            rx.recv();
-        }
-    }
 }