about summary refs log tree commit diff
diff options
context:
space:
mode:
authorSteven Stewart-Gallus <sstewartgallus00@mylangara.bc.ca>2013-07-22 13:57:40 -0700
committerSteven Stewart-Gallus <sstewartgallus00@mylangara.bc.ca>2013-07-27 22:06:29 -0700
commitd0b7515aedcaa161bb206e651a374d7ff27e52a7 (patch)
tree9497eea70b9db687a4d6bd4f84bcefa836451b1d
parent3078e83c3f1a643ddbdefa78095e4fbda3cecc02 (diff)
downloadrust-d0b7515aedcaa161bb206e651a374d7ff27e52a7.tar.gz
rust-d0b7515aedcaa161bb206e651a374d7ff27e52a7.zip
Change concurrency primitives to standard naming conventions
To be more specific:

`UPPERCASETYPE` was changed to `UppercaseType`
`type_new` was changed to `Type::new`
`type_function(value)` was changed to `value.method()`
-rw-r--r--doc/po/tutorial-tasks.md.pot34
-rw-r--r--doc/tutorial-tasks.md34
-rw-r--r--src/libextra/arc.rs220
-rw-r--r--src/libextra/sync.rs334
-rw-r--r--src/libextra/workcache.rs24
-rw-r--r--src/libstd/comm.rs4
-rw-r--r--src/libstd/rt/kill.rs4
-rw-r--r--src/libstd/rt/message_queue.rs4
-rw-r--r--src/libstd/rt/sleeper_list.rs4
-rw-r--r--src/libstd/rt/uv/uvio.rs4
-rw-r--r--src/libstd/rt/work_queue.rs4
-rw-r--r--src/libstd/task/mod.rs2
-rw-r--r--src/libstd/task/spawn.rs10
-rw-r--r--src/libstd/unstable/sync.rs100
-rw-r--r--src/libstd/vec.rs6
-rw-r--r--src/test/bench/graph500-bfs.rs6
-rw-r--r--src/test/bench/msgsend-ring-mutex-arcs.rs8
-rw-r--r--src/test/bench/msgsend-ring-rw-arcs.rs8
-rw-r--r--src/test/compile-fail/arc-rw-cond-shouldnt-escape.rs2
-rw-r--r--src/test/compile-fail/arc-rw-read-mode-shouldnt-escape.rs2
-rw-r--r--src/test/compile-fail/arc-rw-state-shouldnt-escape.rs2
-rw-r--r--src/test/compile-fail/arc-rw-write-mode-cond-shouldnt-escape.rs2
-rw-r--r--src/test/compile-fail/arc-rw-write-mode-shouldnt-escape.rs2
-rw-r--r--src/test/compile-fail/no-capture-arc.rs2
-rw-r--r--src/test/compile-fail/no-reuse-move-arc.rs2
-rw-r--r--src/test/compile-fail/once-cant-call-twice-on-heap.rs2
-rw-r--r--src/test/compile-fail/once-cant-call-twice-on-stack.rs2
-rw-r--r--src/test/compile-fail/once-cant-move-out-of-non-once-on-heap.rs2
-rw-r--r--src/test/compile-fail/once-cant-move-out-of-non-once-on-stack.rs2
-rw-r--r--src/test/compile-fail/sync-cond-shouldnt-escape.rs2
-rw-r--r--src/test/compile-fail/sync-rwlock-cond-shouldnt-escape.rs2
-rw-r--r--src/test/compile-fail/sync-rwlock-read-mode-shouldnt-escape.rs2
-rw-r--r--src/test/compile-fail/sync-rwlock-write-mode-cond-shouldnt-escape.rs2
-rw-r--r--src/test/compile-fail/sync-rwlock-write-mode-shouldnt-escape.rs2
-rw-r--r--src/test/run-fail/issue-2444.rs2
-rw-r--r--src/test/run-pass/bind-by-move.rs4
-rw-r--r--src/test/run-pass/match-ref-binding-in-guard-3256.rs2
-rw-r--r--src/test/run-pass/once-move-out-on-heap.rs2
-rw-r--r--src/test/run-pass/once-move-out-on-stack.rs2
-rw-r--r--src/test/run-pass/trait-bounds-in-arc.rs10
-rw-r--r--src/test/run-pass/writealias.rs2
41 files changed, 435 insertions, 431 deletions
diff --git a/doc/po/tutorial-tasks.md.pot b/doc/po/tutorial-tasks.md.pot
index 43c0f4c0924..1315dbf65b2 100644
--- a/doc/po/tutorial-tasks.md.pot
+++ b/doc/po/tutorial-tasks.md.pot
@@ -135,7 +135,7 @@ msgstr ""
 #. type: Bullet: '* '
 #: doc/tutorial-tasks.md:56
 msgid ""
-"[`extra::arc`] - The ARC (atomically reference counted) type, for safely "
+"[`extra::arc`] - The Arc (atomically reference counted) type, for safely "
 "sharing immutable data,"
 msgstr ""
 
@@ -597,7 +597,7 @@ msgstr ""
 
 #. type: Plain text
 #: doc/tutorial-tasks.md:338
-msgid "## Sharing immutable data without copy: ARC"
+msgid "## Sharing immutable data without copy: Arc"
 msgstr ""
 
 #. type: Plain text
@@ -613,18 +613,18 @@ msgstr ""
 #: doc/tutorial-tasks.md:347
 msgid ""
 "To tackle this issue, one can use an Atomically Reference Counted wrapper "
-"(`ARC`) as implemented in the `extra` library of Rust. With an ARC, the data "
-"will no longer be copied for each task. The ARC acts as a reference to the "
+"(`Arc`) as implemented in the `extra` library of Rust. With an Arc, the data "
+"will no longer be copied for each task. The Arc acts as a reference to the "
 "shared data and only this reference is shared and cloned."
 msgstr ""
 
 #. type: Plain text
 #: doc/tutorial-tasks.md:355
 msgid ""
-"Here is a small example showing how to use ARCs. We wish to run concurrently "
+"Here is a small example showing how to use Arcs. We wish to run concurrently "
 "several computations on a single large vector of floats. Each task needs the "
 "full vector to perform its duty.  ~~~ # use std::vec; # use std::uint; # use "
-"std::rand; use extra::arc::ARC;"
+"std::rand; use extra::arc::Arc;"
 msgstr ""
 
 #. type: Plain text
@@ -648,7 +648,7 @@ msgstr ""
 #. type: Plain text
 #: doc/tutorial-tasks.md:365
 #, no-wrap
-msgid "    let numbers_arc = ARC(numbers);\n"
+msgid "    let numbers_arc = Arc::new(numbers);\n"
 msgstr ""
 
 #. type: Plain text
@@ -665,7 +665,7 @@ msgstr ""
 #, no-wrap
 msgid ""
 "        do spawn {\n"
-"            let local_arc : ARC<~[float]> = port.recv();\n"
+"            let local_arc : Arc<~[float]> = port.recv();\n"
 "            let task_numbers = local_arc.get();\n"
 "            println(fmt!(\"%u-norm = %?\", num, pnorm(task_numbers, num)));\n"
 "        }\n"
@@ -679,23 +679,23 @@ msgstr ""
 msgid ""
 "The function `pnorm` performs a simple computation on the vector (it "
 "computes the sum of its items at the power given as argument and takes the "
-"inverse power of this value). The ARC on the vector is created by the line "
-"~~~ # use extra::arc::ARC; # use std::vec; # use std::rand; # let numbers = "
+"inverse power of this value). The Arc on the vector is created by the line "
+"~~~ # use extra::arc::Arc; # use std::vec; # use std::rand; # let numbers = "
 "vec::from_fn(1000000, |_| rand::random::<float>()); let "
-"numbers_arc=ARC(numbers); ~~~ and a clone of it is sent to each task ~~~ # "
-"use extra::arc::ARC; # use std::vec; # use std::rand; # let numbers=vec::"
+"numbers_arc=Arc::new(numbers); ~~~ and a clone of it is sent to each task ~~~ # "
+"use extra::arc::Arc; # use std::vec; # use std::rand; # let numbers=vec::"
 "from_fn(1000000, |_| rand::random::<float>()); # let numbers_arc = "
-"ARC(numbers); # let (port, chan)  = stream(); chan.send(numbers_arc."
+"Arc::new(numbers); # let (port, chan)  = stream(); chan.send(numbers_arc."
 "clone()); ~~~ copying only the wrapper and not its contents."
 msgstr ""
 
 #. type: Plain text
 #: doc/tutorial-tasks.md:414
 msgid ""
-"Each task recovers the underlying data by ~~~ # use extra::arc::ARC; # use "
+"Each task recovers the underlying data by ~~~ # use extra::arc::Arc; # use "
 "std::vec; # use std::rand; # let numbers=vec::from_fn(1000000, |_| rand::"
-"random::<float>()); # let numbers_arc=ARC(numbers); # let (port, chan)  = "
-"stream(); # chan.send(numbers_arc.clone()); # let local_arc : ARC<~[float]> "
+"random::<float>()); # let numbers_arc=Arc::new(numbers); # let (port, chan)  = "
+"stream(); # chan.send(numbers_arc.clone()); # let local_arc : Arc<~[float]> "
 "= port.recv(); let task_numbers = local_arc.get(); ~~~ and can use it as if "
 "it were local."
 msgstr ""
@@ -703,7 +703,7 @@ msgstr ""
 #. type: Plain text
 #: doc/tutorial-tasks.md:416
 msgid ""
-"The `arc` module also implements ARCs around mutable data that are not "
+"The `arc` module also implements Arcs around mutable data that are not "
 "covered here."
 msgstr ""
 
diff --git a/doc/tutorial-tasks.md b/doc/tutorial-tasks.md
index d302916025c..2b48bd63fff 100644
--- a/doc/tutorial-tasks.md
+++ b/doc/tutorial-tasks.md
@@ -50,7 +50,7 @@ concurrency at this writing:
 * [`std::pipes`] - The underlying messaging infrastructure,
 * [`extra::comm`] - Additional messaging types based on `std::pipes`,
 * [`extra::sync`] - More exotic synchronization tools, including locks,
-* [`extra::arc`] - The ARC (atomically reference counted) type,
+* [`extra::arc`] - The Arc (atomically reference counted) type,
   for safely sharing immutable data,
 * [`extra::future`] - A type representing values that may be computed concurrently and retrieved at a later time.
 
@@ -334,24 +334,24 @@ fn main() {
 }
 ~~~
 
-## Sharing immutable data without copy: ARC
+## Sharing immutable data without copy: Arc
 
 To share immutable data between tasks, a first approach would be to only use pipes as we have seen
 previously. A copy of the data to share would then be made for each task. In some cases, this would
 add up to a significant amount of wasted memory and would require copying the same data more than
 necessary.
 
-To tackle this issue, one can use an Atomically Reference Counted wrapper (`ARC`) as implemented in
-the `extra` library of Rust. With an ARC, the data will no longer be copied for each task. The ARC
+To tackle this issue, one can use an Atomically Reference Counted wrapper (`Arc`) as implemented in
+the `extra` library of Rust. With an Arc, the data will no longer be copied for each task. The Arc
 acts as a reference to the shared data and only this reference is shared and cloned.
 
-Here is a small example showing how to use ARCs. We wish to run concurrently several computations on
+Here is a small example showing how to use Arcs. We wish to run concurrently several computations on
 a single large vector of floats. Each task needs the full vector to perform its duty.
 ~~~
 # use std::vec;
 # use std::uint;
 # use std::rand;
-use extra::arc::ARC;
+use extra::arc::Arc;
 
 fn pnorm(nums: &~[float], p: uint) -> float {
     nums.iter().fold(0.0, |a,b| a+(*b).pow(&(p as float)) ).pow(&(1f / (p as float)))
@@ -361,14 +361,14 @@ fn main() {
     let numbers = vec::from_fn(1000000, |_| rand::random::<float>());
     println(fmt!("Inf-norm = %?",  *numbers.iter().max().unwrap()));
 
-    let numbers_arc = ARC(numbers);
+    let numbers_arc = Arc::new(numbers);
 
     for uint::range(1,10) |num| {
         let (port, chan)  = stream();
         chan.send(numbers_arc.clone());
 
         do spawn {
-            let local_arc : ARC<~[float]> = port.recv();
+            let local_arc : Arc<~[float]> = port.recv();
             let task_numbers = local_arc.get();
             println(fmt!("%u-norm = %?", num, pnorm(task_numbers, num)));
         }
@@ -377,22 +377,22 @@ fn main() {
 ~~~
 
 The function `pnorm` performs a simple computation on the vector (it computes the sum of its items
-at the power given as argument and takes the inverse power of this value). The ARC on the vector is
+at the power given as argument and takes the inverse power of this value). The Arc on the vector is
 created by the line
 ~~~
-# use extra::arc::ARC;
+# use extra::arc::Arc;
 # use std::vec;
 # use std::rand;
 # let numbers = vec::from_fn(1000000, |_| rand::random::<float>());
-let numbers_arc=ARC(numbers);
+let numbers_arc=Arc::new(numbers);
 ~~~
 and a clone of it is sent to each task
 ~~~
-# use extra::arc::ARC;
+# use extra::arc::Arc;
 # use std::vec;
 # use std::rand;
 # let numbers=vec::from_fn(1000000, |_| rand::random::<float>());
-# let numbers_arc = ARC(numbers);
+# let numbers_arc = Arc::new(numbers);
 # let (port, chan)  = stream();
 chan.send(numbers_arc.clone());
 ~~~
@@ -400,19 +400,19 @@ copying only the wrapper and not its contents.
 
 Each task recovers the underlying data by
 ~~~
-# use extra::arc::ARC;
+# use extra::arc::Arc;
 # use std::vec;
 # use std::rand;
 # let numbers=vec::from_fn(1000000, |_| rand::random::<float>());
-# let numbers_arc=ARC(numbers);
+# let numbers_arc=Arc::new(numbers);
 # let (port, chan)  = stream();
 # chan.send(numbers_arc.clone());
-# let local_arc : ARC<~[float]> = port.recv();
+# let local_arc : Arc<~[float]> = port.recv();
 let task_numbers = local_arc.get();
 ~~~
 and can use it as if it were local.
 
-The `arc` module also implements ARCs around mutable data that are not covered here.
+The `arc` module also implements Arcs around mutable data that are not covered here.
 
 # Handling task failure
 
diff --git a/src/libextra/arc.rs b/src/libextra/arc.rs
index 7a8d6950120..ebcfd9770eb 100644
--- a/src/libextra/arc.rs
+++ b/src/libextra/arc.rs
@@ -15,13 +15,13 @@
  * # Example
  *
  * In this example, a large vector of floats is shared between several tasks.
- * With simple pipes, without ARC, a copy would have to be made for each task.
+ * With simple pipes, without Arc, a copy would have to be made for each task.
  *
  * ~~~ {.rust}
  * extern mod std;
  * use extra::arc;
  * let numbers=vec::from_fn(100, |ind| (ind as float)*rand::random());
- * let shared_numbers=arc::ARC(numbers);
+ * let shared_numbers=arc::Arc::new(numbers);
  *
  *   for 10.times {
  *       let (port, chan)  = stream();
@@ -41,7 +41,7 @@
 
 
 use sync;
-use sync::{Mutex, mutex_with_condvars, RWlock, rwlock_with_condvars};
+use sync::{Mutex, RWLock};
 
 use std::cast;
 use std::unstable::sync::UnsafeAtomicRcBox;
@@ -56,12 +56,12 @@ pub struct Condvar<'self> {
 }
 
 impl<'self> Condvar<'self> {
-    /// Atomically exit the associated ARC and block until a signal is sent.
+    /// Atomically exit the associated Arc and block until a signal is sent.
     #[inline]
     pub fn wait(&self) { self.wait_on(0) }
 
     /**
-     * Atomically exit the associated ARC and block on a specified condvar
+     * Atomically exit the associated Arc and block on a specified condvar
      * until a signal is sent on that same condvar (as sync::cond.wait_on).
      *
      * wait() is equivalent to wait_on(0).
@@ -104,37 +104,38 @@ impl<'self> Condvar<'self> {
 }
 
 /****************************************************************************
- * Immutable ARC
+ * Immutable Arc
  ****************************************************************************/
 
 /// An atomically reference counted wrapper for shared immutable state.
-pub struct ARC<T> { priv x: UnsafeAtomicRcBox<T> }
+pub struct Arc<T> { priv x: UnsafeAtomicRcBox<T> }
 
-/// Create an atomically reference counted wrapper.
-pub fn ARC<T:Freeze + Send>(data: T) -> ARC<T> {
-    ARC { x: UnsafeAtomicRcBox::new(data) }
-}
 
 /**
  * Access the underlying data in an atomically reference counted
  * wrapper.
  */
-impl<T:Freeze+Send> ARC<T> {
+impl<T:Freeze+Send> Arc<T> {
+    /// Create an atomically reference counted wrapper.
+    pub fn new(data: T) -> Arc<T> {
+        Arc { x: UnsafeAtomicRcBox::new(data) }
+    }
+
     pub fn get<'a>(&'a self) -> &'a T {
         unsafe { &*self.x.get_immut() }
     }
 
     /**
-     * Retrieve the data back out of the ARC. This function blocks until the
+     * Retrieve the data back out of the Arc. This function blocks until the
      * reference given to it is the last existing one, and then unwrap the data
      * instead of destroying it.
      *
      * If multiple tasks call unwrap, all but the first will fail. Do not call
-     * unwrap from a task that holds another reference to the same ARC; it is
+     * unwrap from a task that holds another reference to the same Arc; it is
      * guaranteed to deadlock.
      */
     pub fn unwrap(self) -> T {
-        let ARC { x: x } = self;
+        let Arc { x: x } = self;
         unsafe { x.unwrap() }
     }
 }
@@ -146,47 +147,48 @@ impl<T:Freeze+Send> ARC<T> {
  * object. However, one of the `arc` objects can be sent to another task,
  * allowing them to share the underlying data.
  */
-impl<T:Freeze + Send> Clone for ARC<T> {
-    fn clone(&self) -> ARC<T> {
-        ARC { x: self.x.clone() }
+impl<T:Freeze + Send> Clone for Arc<T> {
+    fn clone(&self) -> Arc<T> {
+        Arc { x: self.x.clone() }
     }
 }
 
 /****************************************************************************
- * Mutex protected ARC (unsafe)
+ * Mutex protected Arc (unsafe)
  ****************************************************************************/
 
 #[doc(hidden)]
-struct MutexARCInner<T> { priv lock: Mutex, priv failed: bool, priv data: T }
-/// An ARC with mutable data protected by a blocking mutex.
-struct MutexARC<T> { priv x: UnsafeAtomicRcBox<MutexARCInner<T>> }
+struct MutexArcInner<T> { priv lock: Mutex, priv failed: bool, priv data: T }
+/// An Arc with mutable data protected by a blocking mutex.
+struct MutexArc<T> { priv x: UnsafeAtomicRcBox<MutexArcInner<T>> }
 
-/// Create a mutex-protected ARC with the supplied data.
-pub fn MutexARC<T:Send>(user_data: T) -> MutexARC<T> {
-    mutex_arc_with_condvars(user_data, 1)
-}
-/**
- * Create a mutex-protected ARC with the supplied data and a specified number
- * of condvars (as sync::mutex_with_condvars).
- */
-pub fn mutex_arc_with_condvars<T:Send>(user_data: T,
-                                    num_condvars: uint) -> MutexARC<T> {
-    let data =
-        MutexARCInner { lock: mutex_with_condvars(num_condvars),
-                          failed: false, data: user_data };
-    MutexARC { x: UnsafeAtomicRcBox::new(data) }
-}
 
-impl<T:Send> Clone for MutexARC<T> {
-    /// Duplicate a mutex-protected ARC, as arc::clone.
-    fn clone(&self) -> MutexARC<T> {
+impl<T:Send> Clone for MutexArc<T> {
+    /// Duplicate a mutex-protected Arc, as arc::clone.
+    fn clone(&self) -> MutexArc<T> {
         // NB: Cloning the underlying mutex is not necessary. Its reference
         // count would be exactly the same as the shared state's.
-        MutexARC { x: self.x.clone() }
+        MutexArc { x: self.x.clone() }
     }
 }
 
-impl<T:Send> MutexARC<T> {
+impl<T:Send> MutexArc<T> {
+    /// Create a mutex-protected Arc with the supplied data.
+    pub fn new(user_data: T) -> MutexArc<T> {
+        MutexArc::new_with_condvars(user_data, 1)
+    }
+
+    /**
+     * Create a mutex-protected Arc with the supplied data and a specified number
+     * of condvars (as sync::Mutex::new_with_condvars).
+     */
+    pub fn new_with_condvars(user_data: T,
+                             num_condvars: uint) -> MutexArc<T> {
+        let data =
+            MutexArcInner { lock: Mutex::new_with_condvars(num_condvars),
+                           failed: false, data: user_data };
+        MutexArc { x: UnsafeAtomicRcBox::new(data) }
+    }
 
     /**
      * Access the underlying mutable data with mutual exclusion from other
@@ -195,10 +197,10 @@ impl<T:Send> MutexARC<T> {
      * finishes running.
      *
      * The reason this function is 'unsafe' is because it is possible to
-     * construct a circular reference among multiple ARCs by mutating the
+     * construct a circular reference among multiple Arcs by mutating the
      * underlying data. This creates potential for deadlock, but worse, this
-     * will guarantee a memory leak of all involved ARCs. Using mutex ARCs
-     * inside of other ARCs is safe in absence of circular references.
+     * will guarantee a memory leak of all involved Arcs. Using mutex Arcs
+     * inside of other Arcs is safe in absence of circular references.
      *
      * If you wish to nest mutex_arcs, one strategy for ensuring safety at
      * runtime is to add a "nesting level counter" inside the stored data, and
@@ -206,8 +208,8 @@ impl<T:Send> MutexARC<T> {
      *
      * # Failure
      *
-     * Failing while inside the ARC will unlock the ARC while unwinding, so
-     * that other tasks won't block forever. It will also poison the ARC:
+     * Failing while inside the Arc will unlock the Arc while unwinding, so
+     * that other tasks won't block forever. It will also poison the Arc:
      * any tasks that subsequently try to access it (including those already
      * blocked on the mutex) will also fail immediately.
      */
@@ -247,11 +249,11 @@ impl<T:Send> MutexARC<T> {
      * Will additionally fail if another task has failed while accessing the arc.
      */
     pub fn unwrap(self) -> T {
-        let MutexARC { x: x } = self;
+        let MutexArc { x: x } = self;
         let inner = unsafe { x.unwrap() };
-        let MutexARCInner { failed: failed, data: data, _ } = inner;
+        let MutexArcInner { failed: failed, data: data, _ } = inner;
         if failed {
-            fail!(~"Can't unwrap poisoned MutexARC - another task failed inside!");
+            fail!(~"Can't unwrap poisoned MutexArc - another task failed inside!");
         }
         data
     }
@@ -263,7 +265,7 @@ impl<T:Send> MutexARC<T> {
 fn check_poison(is_mutex: bool, failed: bool) {
     if failed {
         if is_mutex {
-            fail!("Poisoned MutexARC - another task failed inside!");
+            fail!("Poisoned MutexArc - another task failed inside!");
         } else {
             fail!("Poisoned rw_arc - another task failed inside!");
         }
@@ -294,60 +296,58 @@ fn PoisonOnFail<'r>(failed: &'r mut bool) -> PoisonOnFail {
 }
 
 /****************************************************************************
- * R/W lock protected ARC
+ * R/W lock protected Arc
  ****************************************************************************/
 
 #[doc(hidden)]
-struct RWARCInner<T> { priv lock: RWlock, priv failed: bool, priv data: T }
+struct RWArcInner<T> { priv lock: RWLock, priv failed: bool, priv data: T }
 /**
- * A dual-mode ARC protected by a reader-writer lock. The data can be accessed
+ * A dual-mode Arc protected by a reader-writer lock. The data can be accessed
  * mutably or immutably, and immutably-accessing tasks may run concurrently.
  *
  * Unlike mutex_arcs, rw_arcs are safe, because they cannot be nested.
  */
 #[no_freeze]
-struct RWARC<T> {
-    priv x: UnsafeAtomicRcBox<RWARCInner<T>>,
+struct RWArc<T> {
+    priv x: UnsafeAtomicRcBox<RWArcInner<T>>,
 }
 
-/// Create a reader/writer ARC with the supplied data.
-pub fn RWARC<T:Freeze + Send>(user_data: T) -> RWARC<T> {
-    rw_arc_with_condvars(user_data, 1)
-}
-/**
- * Create a reader/writer ARC with the supplied data and a specified number
- * of condvars (as sync::rwlock_with_condvars).
- */
-pub fn rw_arc_with_condvars<T:Freeze + Send>(
-    user_data: T,
-    num_condvars: uint) -> RWARC<T>
-{
-    let data =
-        RWARCInner { lock: rwlock_with_condvars(num_condvars),
-                     failed: false, data: user_data };
-    RWARC { x: UnsafeAtomicRcBox::new(data), }
-}
-
-impl<T:Freeze + Send> RWARC<T> {
-    /// Duplicate a rwlock-protected ARC, as arc::clone.
-    pub fn clone(&self) -> RWARC<T> {
-        RWARC {
+impl<T:Freeze + Send> RWArc<T> {
+    /// Duplicate a rwlock-protected Arc, as arc::clone.
+    pub fn clone(&self) -> RWArc<T> {
+        RWArc {
             x: self.x.clone(),
         }
     }
 
 }
 
-impl<T:Freeze + Send> RWARC<T> {
+impl<T:Freeze + Send> RWArc<T> {
+    /// Create a reader/writer Arc with the supplied data.
+    pub fn new(user_data: T) -> RWArc<T> {
+        RWArc::new_with_condvars(user_data, 1)
+    }
+
+    /**
+     * Create a reader/writer Arc with the supplied data and a specified number
+     * of condvars (as sync::RWLock::new_with_condvars).
+     */
+    pub fn new_with_condvars(user_data: T, num_condvars: uint) -> RWArc<T> {
+        let data =
+            RWArcInner { lock: RWLock::new_with_condvars(num_condvars),
+                        failed: false, data: user_data };
+        RWArc { x: UnsafeAtomicRcBox::new(data), }
+    }
+
     /**
      * Access the underlying data mutably. Locks the rwlock in write mode;
      * other readers and writers will block.
      *
      * # Failure
      *
-     * Failing while inside the ARC will unlock the ARC while unwinding, so
-     * that other tasks won't block forever. As MutexARC.access, it will also
-     * poison the ARC, so subsequent readers and writers will both also fail.
+     * Failing while inside the Arc will unlock the Arc while unwinding, so
+     * that other tasks won't block forever. As MutexArc.access, it will also
+     * poison the Arc, so subsequent readers and writers will both also fail.
      */
     #[inline]
     pub fn write<U>(&self, blk: &fn(x: &mut T) -> U) -> U {
@@ -385,8 +385,8 @@ impl<T:Freeze + Send> RWARC<T> {
      *
      * # Failure
      *
-     * Failing will unlock the ARC while unwinding. However, unlike all other
-     * access modes, this will not poison the ARC.
+     * Failing will unlock the Arc while unwinding. However, unlike all other
+     * access modes, this will not poison the Arc.
      */
     pub fn read<U>(&self, blk: &fn(x: &T) -> U) -> U {
         unsafe {
@@ -467,11 +467,11 @@ impl<T:Freeze + Send> RWARC<T> {
      * in write mode.
      */
     pub fn unwrap(self) -> T {
-        let RWARC { x: x, _ } = self;
+        let RWArc { x: x, _ } = self;
         let inner = unsafe { x.unwrap() };
-        let RWARCInner { failed: failed, data: data, _ } = inner;
+        let RWArcInner { failed: failed, data: data, _ } = inner;
         if failed {
-            fail!(~"Can't unwrap poisoned RWARC - another task failed inside!")
+            fail!(~"Can't unwrap poisoned RWArc - another task failed inside!")
         }
         data
     }
@@ -481,25 +481,25 @@ impl<T:Freeze + Send> RWARC<T> {
 // lock it. This wraps the unsafety, with the justification that the 'lock'
 // field is never overwritten; only 'failed' and 'data'.
 #[doc(hidden)]
-fn borrow_rwlock<T:Freeze + Send>(state: *mut RWARCInner<T>) -> *RWlock {
+fn borrow_rwlock<T:Freeze + Send>(state: *mut RWArcInner<T>) -> *RWLock {
     unsafe { cast::transmute(&(*state).lock) }
 }
 
-/// The "write permission" token used for RWARC.write_downgrade().
+/// The "write permission" token used for RWArc.write_downgrade().
 pub struct RWWriteMode<'self, T> {
     data: &'self mut T,
-    token: sync::RWlockWriteMode<'self>,
+    token: sync::RWLockWriteMode<'self>,
     poison: PoisonOnFail,
 }
 
-/// The "read permission" token used for RWARC.write_downgrade().
+/// The "read permission" token used for RWArc.write_downgrade().
 pub struct RWReadMode<'self, T> {
     data: &'self T,
-    token: sync::RWlockReadMode<'self>,
+    token: sync::RWLockReadMode<'self>,
 }
 
 impl<'self, T:Freeze + Send> RWWriteMode<'self, T> {
-    /// Access the pre-downgrade RWARC in write mode.
+    /// Access the pre-downgrade RWArc in write mode.
     pub fn write<U>(&mut self, blk: &fn(x: &mut T) -> U) -> U {
         match *self {
             RWWriteMode {
@@ -514,7 +514,7 @@ impl<'self, T:Freeze + Send> RWWriteMode<'self, T> {
         }
     }
 
-    /// Access the pre-downgrade RWARC in write mode with a condvar.
+    /// Access the pre-downgrade RWArc in write mode with a condvar.
     pub fn write_cond<'x, 'c, U>(&mut self,
                                  blk: &fn(x: &'x mut T, c: &'c Condvar) -> U)
                                  -> U {
@@ -570,7 +570,7 @@ mod tests {
     #[test]
     fn manually_share_arc() {
         let v = ~[1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
-        let arc_v = ARC(v);
+        let arc_v = Arc::new(v);
 
         let (p, c) = comm::stream();
 
@@ -578,7 +578,7 @@ mod tests {
             let p = comm::PortSet::new();
             c.send(p.chan());
 
-            let arc_v : ARC<~[int]> = p.recv();
+            let arc_v : Arc<~[int]> = p.recv();
 
             let v = (*arc_v.get()).clone();
             assert_eq!(v[3], 4);
@@ -596,7 +596,7 @@ mod tests {
     #[test]
     fn test_mutex_arc_condvar() {
         unsafe {
-            let arc = ~MutexARC(false);
+            let arc = ~MutexArc::new(false);
             let arc2 = ~arc.clone();
             let (p,c) = comm::oneshot();
             let (c,p) = (Cell::new(c), Cell::new(p));
@@ -620,7 +620,7 @@ mod tests {
     #[test] #[should_fail] #[ignore(cfg(windows))]
     fn test_arc_condvar_poison() {
         unsafe {
-            let arc = ~MutexARC(1);
+            let arc = ~MutexArc::new(1);
             let arc2 = ~arc.clone();
             let (p, c) = comm::stream();
 
@@ -644,7 +644,7 @@ mod tests {
     #[test] #[should_fail] #[ignore(cfg(windows))]
     fn test_mutex_arc_poison() {
         unsafe {
-            let arc = ~MutexARC(1);
+            let arc = ~MutexArc::new(1);
             let arc2 = ~arc.clone();
             do task::try || {
                 do arc2.access |one| {
@@ -658,7 +658,7 @@ mod tests {
     }
     #[test] #[should_fail] #[ignore(cfg(windows))]
     pub fn test_mutex_arc_unwrap_poison() {
-        let arc = MutexARC(1);
+        let arc = MutexArc::new(1);
         let arc2 = ~(&arc).clone();
         let (p, c) = comm::stream();
         do task::spawn {
@@ -675,7 +675,7 @@ mod tests {
     }
     #[test] #[should_fail] #[ignore(cfg(windows))]
     fn test_rw_arc_poison_wr() {
-        let arc = ~RWARC(1);
+        let arc = ~RWArc::new(1);
         let arc2 = (*arc).clone();
         do task::try || {
             do arc2.write |one| {
@@ -688,7 +688,7 @@ mod tests {
     }
     #[test] #[should_fail] #[ignore(cfg(windows))]
     fn test_rw_arc_poison_ww() {
-        let arc = ~RWARC(1);
+        let arc = ~RWArc::new(1);
         let arc2 = (*arc).clone();
         do task::try || {
             do arc2.write |one| {
@@ -701,7 +701,7 @@ mod tests {
     }
     #[test] #[should_fail] #[ignore(cfg(windows))]
     fn test_rw_arc_poison_dw() {
-        let arc = ~RWARC(1);
+        let arc = ~RWArc::new(1);
         let arc2 = (*arc).clone();
         do task::try || {
             do arc2.write_downgrade |mut write_mode| {
@@ -716,7 +716,7 @@ mod tests {
     }
     #[test] #[ignore(cfg(windows))]
     fn test_rw_arc_no_poison_rr() {
-        let arc = ~RWARC(1);
+        let arc = ~RWArc::new(1);
         let arc2 = (*arc).clone();
         do task::try || {
             do arc2.read |one| {
@@ -729,7 +729,7 @@ mod tests {
     }
     #[test] #[ignore(cfg(windows))]
     fn test_rw_arc_no_poison_rw() {
-        let arc = ~RWARC(1);
+        let arc = ~RWArc::new(1);
         let arc2 = (*arc).clone();
         do task::try || {
             do arc2.read |one| {
@@ -742,7 +742,7 @@ mod tests {
     }
     #[test] #[ignore(cfg(windows))]
     fn test_rw_arc_no_poison_dr() {
-        let arc = ~RWARC(1);
+        let arc = ~RWArc::new(1);
         let arc2 = (*arc).clone();
         do task::try || {
             do arc2.write_downgrade |write_mode| {
@@ -758,7 +758,7 @@ mod tests {
     }
     #[test]
     fn test_rw_arc() {
-        let arc = ~RWARC(0);
+        let arc = ~RWArc::new(0);
         let arc2 = (*arc).clone();
         let (p,c) = comm::stream();
 
@@ -806,7 +806,7 @@ mod tests {
         // (4) tells writer and all other readers to contend as it downgrades.
         // (5) Writer attempts to set state back to 42, while downgraded task
         //     and all reader tasks assert that it's 31337.
-        let arc = ~RWARC(0);
+        let arc = ~RWArc::new(0);
 
         // Reader tasks
         let mut reader_convos = ~[];
@@ -884,10 +884,10 @@ mod tests {
         // the sync module rather than this one, but it's here because an
         // rwarc gives us extra shared state to help check for the race.
         // If you want to see this test fail, go to sync.rs and replace the
-        // line in RWlock::write_cond() that looks like:
+        // line in RWLock::write_cond() that looks like:
         //     "blk(&Condvar { order: opt_lock, ..*cond })"
         // with just "blk(cond)".
-        let x = ~RWARC(true);
+        let x = ~RWArc::new(true);
         let (wp, wc) = comm::stream();
 
         // writer task
diff --git a/src/libextra/sync.rs b/src/libextra/sync.rs
index 632f5d7827d..743c4347a4b 100644
--- a/src/libextra/sync.rs
+++ b/src/libextra/sync.rs
@@ -19,7 +19,7 @@
 use std::borrow;
 use std::comm;
 use std::task;
-use std::unstable::sync::{Exclusive, exclusive, UnsafeAtomicRcBox};
+use std::unstable::sync::{Exclusive, UnsafeAtomicRcBox};
 use std::unstable::atomics;
 use std::util;
 
@@ -34,48 +34,47 @@ type WaitEnd = comm::PortOne<()>;
 type SignalEnd = comm::ChanOne<()>;
 // A doubly-ended queue of waiting tasks.
 #[doc(hidden)]
-struct Waitqueue { head: comm::Port<SignalEnd>,
+struct WaitQueue { head: comm::Port<SignalEnd>,
                    tail: comm::Chan<SignalEnd> }
 
-#[doc(hidden)]
-fn new_waitqueue() -> Waitqueue {
-    let (block_head, block_tail) = comm::stream();
-    Waitqueue { head: block_head, tail: block_tail }
-}
+impl WaitQueue {
+    fn new() -> WaitQueue {
+        let (block_head, block_tail) = comm::stream();
+        WaitQueue { head: block_head, tail: block_tail }
+    }
 
-// Signals one live task from the queue.
-#[doc(hidden)]
-fn signal_waitqueue(q: &Waitqueue) -> bool {
-    // The peek is mandatory to make sure recv doesn't block.
-    if q.head.peek() {
-        // Pop and send a wakeup signal. If the waiter was killed, its port
-        // will have closed. Keep trying until we get a live task.
-        if comm::try_send_one(q.head.recv(), ()) {
-            true
+    // Signals one live task from the queue.
+    fn signal(&self) -> bool {
+        // The peek is mandatory to make sure recv doesn't block.
+        if self.head.peek() {
+            // Pop and send a wakeup signal. If the waiter was killed, its port
+            // will have closed. Keep trying until we get a live task.
+            if comm::try_send_one(self.head.recv(), ()) {
+                true
+            } else {
+                self.signal()
+            }
         } else {
-            signal_waitqueue(q)
+            false
         }
-    } else {
-        false
     }
-}
 
-#[doc(hidden)]
-fn broadcast_waitqueue(q: &Waitqueue) -> uint {
-    let mut count = 0;
-    while q.head.peek() {
-        if comm::try_send_one(q.head.recv(), ()) {
-            count += 1;
+    fn broadcast(&self) -> uint {
+        let mut count = 0;
+        while self.head.peek() {
+            if comm::try_send_one(self.head.recv(), ()) {
+                count += 1;
+            }
         }
+        count
     }
-    count
 }
 
 // The building-block used to make semaphores, mutexes, and rwlocks.
 #[doc(hidden)]
 struct SemInner<Q> {
     count: int,
-    waiters:   Waitqueue,
+    waiters:   WaitQueue,
     // Can be either unit or another waitqueue. Some sems shouldn't come with
     // a condition variable attached, others should.
     blocked:   Q
@@ -84,23 +83,14 @@ struct SemInner<Q> {
 #[doc(hidden)]
 struct Sem<Q>(Exclusive<SemInner<Q>>);
 
-#[doc(hidden)]
-fn new_sem<Q:Send>(count: int, q: Q) -> Sem<Q> {
-    Sem(exclusive(SemInner {
-        count: count, waiters: new_waitqueue(), blocked: q }))
-}
-#[doc(hidden)]
-fn new_sem_and_signal(count: int, num_condvars: uint)
-        -> Sem<~[Waitqueue]> {
-    let mut queues = ~[];
-    for num_condvars.times {
-        queues.push(new_waitqueue());
-    }
-    new_sem(count, queues)
-}
 
 #[doc(hidden)]
 impl<Q:Send> Sem<Q> {
+    fn new(count: int, q: Q) -> Sem<Q> {
+        Sem(Exclusive::new(SemInner {
+            count: count, waiters: WaitQueue::new(), blocked: q }))
+    }
+
     pub fn acquire(&self) {
         unsafe {
             let mut waiter_nobe = None;
@@ -129,7 +119,7 @@ impl<Q:Send> Sem<Q> {
             do (**self).with |state| {
                 state.count += 1;
                 if state.count <= 0 {
-                    signal_waitqueue(&state.waiters);
+                    state.waiters.signal();
                 }
             }
         }
@@ -151,7 +141,16 @@ impl Sem<()> {
 }
 
 #[doc(hidden)]
-impl Sem<~[Waitqueue]> {
+impl Sem<~[WaitQueue]> {
+    fn new_and_signal(count: int, num_condvars: uint)
+        -> Sem<~[WaitQueue]> {
+        let mut queues = ~[];
+        for num_condvars.times {
+            queues.push(WaitQueue::new());
+        }
+        Sem::new(count, queues)
+    }
+
     pub fn access_waitqueue<U>(&self, blk: &fn() -> U) -> U {
         let mut release = None;
         unsafe {
@@ -168,7 +167,7 @@ impl Sem<~[Waitqueue]> {
 #[doc(hidden)]
 type SemRelease<'self> = SemReleaseGeneric<'self, ()>;
 #[doc(hidden)]
-type SemAndSignalRelease<'self> = SemReleaseGeneric<'self, ~[Waitqueue]>;
+type SemAndSignalRelease<'self> = SemReleaseGeneric<'self, ~[WaitQueue]>;
 #[doc(hidden)]
 struct SemReleaseGeneric<'self, Q> { sem: &'self Sem<Q> }
 
@@ -188,7 +187,7 @@ fn SemRelease<'r>(sem: &'r Sem<()>) -> SemRelease<'r> {
 }
 
 #[doc(hidden)]
-fn SemAndSignalRelease<'r>(sem: &'r Sem<~[Waitqueue]>)
+fn SemAndSignalRelease<'r>(sem: &'r Sem<~[WaitQueue]>)
                         -> SemAndSignalRelease<'r> {
     SemReleaseGeneric {
         sem: sem
@@ -207,7 +206,7 @@ enum ReacquireOrderLock<'self> {
 pub struct Condvar<'self> {
     // The 'Sem' object associated with this condvar. This is the one that's
     // atomically-unlocked-and-descheduled upon and reacquired during wakeup.
-    priv sem: &'self Sem<~[Waitqueue]>,
+    priv sem: &'self Sem<~[WaitQueue]>,
     // This is (can be) an extra semaphore which is held around the reacquire
     // operation on the first one. This is only used in cvars associated with
     // rwlocks, and is needed to ensure that, when a downgrader is trying to
@@ -257,7 +256,7 @@ impl<'self> Condvar<'self> {
                         // Drop the lock.
                         state.count += 1;
                         if state.count <= 0 {
-                            signal_waitqueue(&state.waiters);
+                            state.waiters.signal();
                         }
                         // Enqueue ourself to be woken up by a signaller.
                         let SignalEnd = SignalEnd.take_unwrap();
@@ -288,7 +287,7 @@ impl<'self> Condvar<'self> {
         // mutex during unwinding. As long as the wrapper (mutex, etc) is
         // bounded in when it gets released, this shouldn't hang forever.
         struct CondvarReacquire<'self> {
-            sem: &'self Sem<~[Waitqueue]>,
+            sem: &'self Sem<~[WaitQueue]>,
             order: ReacquireOrderLock<'self>,
         }
 
@@ -322,7 +321,7 @@ impl<'self> Condvar<'self> {
             let mut result = false;
             do (**self.sem).with |state| {
                 if condvar_id < state.blocked.len() {
-                    result = signal_waitqueue(&state.blocked[condvar_id]);
+                    result = state.blocked[condvar_id].signal();
                 } else {
                     out_of_bounds = Some(state.blocked.len());
                 }
@@ -347,14 +346,14 @@ impl<'self> Condvar<'self> {
                     // swap it out with the old one, and broadcast on the
                     // old one outside of the little-lock.
                     queue = Some(util::replace(&mut state.blocked[condvar_id],
-                                               new_waitqueue()));
+                                               WaitQueue::new()));
                 } else {
                     out_of_bounds = Some(state.blocked.len());
                 }
             }
             do check_cvar_bounds(out_of_bounds, condvar_id, "cond.signal_on()") {
                 let queue = queue.take_unwrap();
-                broadcast_waitqueue(&queue)
+                queue.broadcast()
             }
         }
     }
@@ -376,7 +375,7 @@ fn check_cvar_bounds<U>(out_of_bounds: Option<uint>, id: uint, act: &str,
 }
 
 #[doc(hidden)]
-impl Sem<~[Waitqueue]> {
+impl Sem<~[WaitQueue]> {
     // The only other places that condvars get built are rwlock.write_cond()
     // and rwlock_write_mode.
     pub fn access_cond<U>(&self, blk: &fn(c: &Condvar) -> U) -> U {
@@ -393,10 +392,6 @@ impl Sem<~[Waitqueue]> {
 /// A counting, blocking, bounded-waiting semaphore.
 struct Semaphore { priv sem: Sem<()> }
 
-/// Create a new semaphore with the specified count.
-pub fn semaphore(count: int) -> Semaphore {
-    Semaphore { sem: new_sem(count, ()) }
-}
 
 impl Clone for Semaphore {
     /// Create a new handle to the semaphore.
@@ -406,6 +401,11 @@ impl Clone for Semaphore {
 }
 
 impl Semaphore {
+    /// Create a new semaphore with the specified count.
+    pub fn new(count: int) -> Semaphore {
+        Semaphore { sem: Sem::new(count, ()) }
+    }
+
     /**
      * Acquire a resource represented by the semaphore. Blocks if necessary
      * until resource(s) become available.
@@ -434,19 +434,7 @@ impl Semaphore {
  * A task which fails while holding a mutex will unlock the mutex as it
  * unwinds.
  */
-pub struct Mutex { priv sem: Sem<~[Waitqueue]> }
-
-/// Create a new mutex, with one associated condvar.
-pub fn Mutex() -> Mutex { mutex_with_condvars(1) }
-/**
- * Create a new mutex, with a specified number of associated condvars. This
- * will allow calling wait_on/signal_on/broadcast_on with condvar IDs between
- * 0 and num_condvars-1. (If num_condvars is 0, lock_cond will be allowed but
- * any operations on the condvar will fail.)
- */
-pub fn mutex_with_condvars(num_condvars: uint) -> Mutex {
-    Mutex { sem: new_sem_and_signal(1, num_condvars) }
-}
+pub struct Mutex { priv sem: Sem<~[WaitQueue]> }
 
 impl Clone for Mutex {
     /// Create a new handle to the mutex.
@@ -454,6 +442,20 @@ impl Clone for Mutex {
 }
 
 impl Mutex {
+    /// Create a new mutex, with one associated condvar.
+    pub fn new() -> Mutex { Mutex::new_with_condvars(1) }
+
+    /**
+    * Create a new mutex, with a specified number of associated condvars. This
+    * will allow calling wait_on/signal_on/broadcast_on with condvar IDs between
+    * 0 and num_condvars-1. (If num_condvars is 0, lock_cond will be allowed but
+    * any operations on the condvar will fail.)
+    */
+    pub fn new_with_condvars(num_condvars: uint) -> Mutex {
+        Mutex { sem: Sem::new_and_signal(1, num_condvars) }
+    }
+
+
     /// Run a function with ownership of the mutex.
     pub fn lock<U>(&self, blk: &fn() -> U) -> U {
         (&self.sem).access_waitqueue(blk)
@@ -472,7 +474,7 @@ impl Mutex {
 // NB: Wikipedia - Readers-writers_problem#The_third_readers-writers_problem
 
 #[doc(hidden)]
-struct RWlockInner {
+struct RWLockInner {
     // You might ask, "Why don't you need to use an atomic for the mode flag?"
     // This flag affects the behaviour of readers (for plain readers, they
     // assert on it; for downgraders, they use it to decide which mode to
@@ -499,33 +501,33 @@ struct RWlockInner {
  * A task which fails while holding an rwlock will unlock the rwlock as it
  * unwinds.
  */
-pub struct RWlock {
+pub struct RWLock {
     priv order_lock:  Semaphore,
-    priv access_lock: Sem<~[Waitqueue]>,
-    priv state:       UnsafeAtomicRcBox<RWlockInner>,
+    priv access_lock: Sem<~[WaitQueue]>,
+    priv state:       UnsafeAtomicRcBox<RWLockInner>,
 }
 
-/// Create a new rwlock, with one associated condvar.
-pub fn RWlock() -> RWlock { rwlock_with_condvars(1) }
+impl RWLock {
+    /// Create a new rwlock, with one associated condvar.
+    pub fn new() -> RWLock { RWLock::new_with_condvars(1) }
 
-/**
- * Create a new rwlock, with a specified number of associated condvars.
- * Similar to mutex_with_condvars.
- */
-pub fn rwlock_with_condvars(num_condvars: uint) -> RWlock {
-    let state = UnsafeAtomicRcBox::new(RWlockInner {
-        read_mode:  false,
-        read_count: atomics::AtomicUint::new(0),
-    });
-    RWlock { order_lock:  semaphore(1),
-             access_lock: new_sem_and_signal(1, num_condvars),
-             state:       state, }
-}
+    /**
+    * Create a new rwlock, with a specified number of associated condvars.
+    * Similar to mutex_with_condvars.
+    */
+    pub fn new_with_condvars(num_condvars: uint) -> RWLock {
+        let state = UnsafeAtomicRcBox::new(RWLockInner {
+            read_mode:  false,
+            read_count: atomics::AtomicUint::new(0),
+        });
+        RWLock { order_lock:  Semaphore::new(1),
+                access_lock: Sem::new_and_signal(1, num_condvars),
+                state:       state, }
+    }
 
-impl RWlock {
     /// Create a new handle to the rwlock.
-    pub fn clone(&self) -> RWlock {
-        RWlock { order_lock:  (&(self.order_lock)).clone(),
+    pub fn clone(&self) -> RWLock {
+        RWLock { order_lock:  (&(self.order_lock)).clone(),
                  access_lock: Sem((*self.access_lock).clone()),
                  state:       self.state.clone() }
     }
@@ -546,7 +548,7 @@ impl RWlock {
                         state.read_mode = true;
                     }
                 }
-                release = Some(RWlockReleaseRead(self));
+                release = Some(RWLockReleaseRead(self));
             }
         }
         blk()
@@ -638,7 +640,7 @@ impl RWlock {
      * }
      * ~~~
      */
-    pub fn write_downgrade<U>(&self, blk: &fn(v: RWlockWriteMode) -> U) -> U {
+    pub fn write_downgrade<U>(&self, blk: &fn(v: RWLockWriteMode) -> U) -> U {
         // Implementation slightly different from the slicker 'write's above.
         // The exit path is conditional on whether the caller downgrades.
         let mut _release = None;
@@ -648,14 +650,14 @@ impl RWlock {
                 (&self.access_lock).acquire();
                 (&self.order_lock).release();
             }
-            _release = Some(RWlockReleaseDowngrade(self));
+            _release = Some(RWLockReleaseDowngrade(self));
         }
-        blk(RWlockWriteMode { lock: self })
+        blk(RWLockWriteMode { lock: self })
     }
 
     /// To be called inside of the write_downgrade block.
-    pub fn downgrade<'a>(&self, token: RWlockWriteMode<'a>)
-                         -> RWlockReadMode<'a> {
+    pub fn downgrade<'a>(&self, token: RWLockWriteMode<'a>)
+                         -> RWLockReadMode<'a> {
         if !borrow::ref_eq(self, token.lock) {
             fail!("Can't downgrade() with a different rwlock's write_mode!");
         }
@@ -679,19 +681,19 @@ impl RWlock {
                 }
             }
         }
-        RWlockReadMode { lock: token.lock }
+        RWLockReadMode { lock: token.lock }
     }
 }
 
 // FIXME(#3588) should go inside of read()
 #[doc(hidden)]
-struct RWlockReleaseRead<'self> {
-    lock: &'self RWlock,
+struct RWLockReleaseRead<'self> {
+    lock: &'self RWLock,
 }
 
 #[doc(hidden)]
 #[unsafe_destructor]
-impl<'self> Drop for RWlockReleaseRead<'self> {
+impl<'self> Drop for RWLockReleaseRead<'self> {
     fn drop(&self) {
         unsafe {
             do task::unkillable {
@@ -713,8 +715,8 @@ impl<'self> Drop for RWlockReleaseRead<'self> {
 }
 
 #[doc(hidden)]
-fn RWlockReleaseRead<'r>(lock: &'r RWlock) -> RWlockReleaseRead<'r> {
-    RWlockReleaseRead {
+fn RWLockReleaseRead<'r>(lock: &'r RWLock) -> RWLockReleaseRead<'r> {
+    RWLockReleaseRead {
         lock: lock
     }
 }
@@ -722,13 +724,13 @@ fn RWlockReleaseRead<'r>(lock: &'r RWlock) -> RWlockReleaseRead<'r> {
 // FIXME(#3588) should go inside of downgrade()
 #[doc(hidden)]
 #[unsafe_destructor]
-struct RWlockReleaseDowngrade<'self> {
-    lock: &'self RWlock,
+struct RWLockReleaseDowngrade<'self> {
+    lock: &'self RWLock,
 }
 
 #[doc(hidden)]
 #[unsafe_destructor]
-impl<'self> Drop for RWlockReleaseDowngrade<'self> {
+impl<'self> Drop for RWLockReleaseDowngrade<'self> {
     fn drop(&self) {
         unsafe {
             do task::unkillable {
@@ -762,36 +764,36 @@ impl<'self> Drop for RWlockReleaseDowngrade<'self> {
 }
 
 #[doc(hidden)]
-fn RWlockReleaseDowngrade<'r>(lock: &'r RWlock)
-                           -> RWlockReleaseDowngrade<'r> {
-    RWlockReleaseDowngrade {
+fn RWLockReleaseDowngrade<'r>(lock: &'r RWLock)
+                           -> RWLockReleaseDowngrade<'r> {
+    RWLockReleaseDowngrade {
         lock: lock
     }
 }
 
 /// The "write permission" token used for rwlock.write_downgrade().
-pub struct RWlockWriteMode<'self> { priv lock: &'self RWlock }
+pub struct RWLockWriteMode<'self> { priv lock: &'self RWLock }
 #[unsafe_destructor]
-impl<'self> Drop for RWlockWriteMode<'self> { fn drop(&self) {} }
+impl<'self> Drop for RWLockWriteMode<'self> { fn drop(&self) {} }
 
 /// The "read permission" token used for rwlock.write_downgrade().
-pub struct RWlockReadMode<'self> { priv lock: &'self RWlock }
+pub struct RWLockReadMode<'self> { priv lock: &'self RWLock }
 #[unsafe_destructor]
-impl<'self> Drop for RWlockReadMode<'self> { fn drop(&self) {} }
+impl<'self> Drop for RWLockReadMode<'self> { fn drop(&self) {} }
 
-impl<'self> RWlockWriteMode<'self> {
+impl<'self> RWLockWriteMode<'self> {
     /// Access the pre-downgrade rwlock in write mode.
     pub fn write<U>(&self, blk: &fn() -> U) -> U { blk() }
     /// Access the pre-downgrade rwlock in write mode with a condvar.
     pub fn write_cond<U>(&self, blk: &fn(c: &Condvar) -> U) -> U {
         // Need to make the condvar use the order lock when reacquiring the
-        // access lock. See comment in RWlock::write_cond for why.
+        // access lock. See comment in RWLock::write_cond for why.
         blk(&Condvar { sem:        &self.lock.access_lock,
                        order: Just(&self.lock.order_lock), })
     }
 }
 
-impl<'self> RWlockReadMode<'self> {
+impl<'self> RWLockReadMode<'self> {
     /// Access the post-downgrade rwlock in read mode.
     pub fn read<U>(&self, blk: &fn() -> U) -> U { blk() }
 }
@@ -816,19 +818,19 @@ mod tests {
      ************************************************************************/
     #[test]
     fn test_sem_acquire_release() {
-        let s = ~semaphore(1);
+        let s = ~Semaphore::new(1);
         s.acquire();
         s.release();
         s.acquire();
     }
     #[test]
     fn test_sem_basic() {
-        let s = ~semaphore(1);
+        let s = ~Semaphore::new(1);
         do s.access { }
     }
     #[test]
     fn test_sem_as_mutex() {
-        let s = ~semaphore(1);
+        let s = ~Semaphore::new(1);
         let s2 = ~s.clone();
         do task::spawn || {
             do s2.access {
@@ -843,7 +845,7 @@ mod tests {
     fn test_sem_as_cvar() {
         /* Child waits and parent signals */
         let (p,c) = comm::stream();
-        let s = ~semaphore(0);
+        let s = ~Semaphore::new(0);
         let s2 = ~s.clone();
         do task::spawn || {
             s2.acquire();
@@ -855,7 +857,7 @@ mod tests {
 
         /* Parent waits and child signals */
         let (p,c) = comm::stream();
-        let s = ~semaphore(0);
+        let s = ~Semaphore::new(0);
         let s2 = ~s.clone();
         do task::spawn || {
             for 5.times { task::yield(); }
@@ -869,7 +871,7 @@ mod tests {
     fn test_sem_multi_resource() {
         // Parent and child both get in the critical section at the same
         // time, and shake hands.
-        let s = ~semaphore(2);
+        let s = ~Semaphore::new(2);
         let s2 = ~s.clone();
         let (p1,c1) = comm::stream();
         let (p2,c2) = comm::stream();
@@ -889,7 +891,7 @@ mod tests {
         // Force the runtime to schedule two threads on the same sched_loop.
         // When one blocks, it should schedule the other one.
         do task::spawn_sched(task::ManualThreads(1)) {
-            let s = ~semaphore(1);
+            let s = ~Semaphore::new(1);
             let s2 = ~s.clone();
             let (p,c) = comm::stream();
             let child_data = Cell::new((s2, c));
@@ -914,7 +916,7 @@ mod tests {
         // Unsafely achieve shared state, and do the textbook
         // "load tmp = move ptr; inc tmp; store ptr <- tmp" dance.
         let (p,c) = comm::stream();
-        let m = ~Mutex();
+        let m = ~Mutex::new();
         let m2 = m.clone();
         let mut sharedstate = ~0;
         {
@@ -946,7 +948,7 @@ mod tests {
     }
     #[test]
     fn test_mutex_cond_wait() {
-        let m = ~Mutex();
+        let m = ~Mutex::new();
 
         // Child wakes up parent
         do m.lock_cond |cond| {
@@ -978,7 +980,7 @@ mod tests {
     }
     #[cfg(test)]
     fn test_mutex_cond_broadcast_helper(num_waiters: uint) {
-        let m = ~Mutex();
+        let m = ~Mutex::new();
         let mut ports = ~[];
 
         for num_waiters.times {
@@ -1013,7 +1015,7 @@ mod tests {
     }
     #[test]
     fn test_mutex_cond_no_waiter() {
-        let m = ~Mutex();
+        let m = ~Mutex::new();
         let m2 = ~m.clone();
         do task::try || {
             do m.lock_cond |_x| { }
@@ -1025,7 +1027,7 @@ mod tests {
     #[test] #[ignore(cfg(windows))]
     fn test_mutex_killed_simple() {
         // Mutex must get automatically unlocked if failed/killed within.
-        let m = ~Mutex();
+        let m = ~Mutex::new();
         let m2 = ~m.clone();
 
         let result: result::Result<(),()> = do task::try || {
@@ -1041,7 +1043,7 @@ mod tests {
     fn test_mutex_killed_cond() {
         // Getting killed during cond wait must not corrupt the mutex while
         // unwinding (e.g. double unlock).
-        let m = ~Mutex();
+        let m = ~Mutex::new();
         let m2 = ~m.clone();
 
         let result: result::Result<(),()> = do task::try || {
@@ -1065,7 +1067,7 @@ mod tests {
     }
     #[test] #[ignore(cfg(windows))]
     fn test_mutex_killed_broadcast() {
-        let m = ~Mutex();
+        let m = ~Mutex::new();
         let m2 = ~m.clone();
         let (p,c) = comm::stream();
 
@@ -1120,7 +1122,7 @@ mod tests {
     #[test]
     fn test_mutex_cond_signal_on_0() {
         // Tests that signal_on(0) is equivalent to signal().
-        let m = ~Mutex();
+        let m = ~Mutex::new();
         do m.lock_cond |cond| {
             let m2 = ~m.clone();
             do task::spawn || {
@@ -1134,7 +1136,7 @@ mod tests {
     #[test] #[ignore(cfg(windows))]
     fn test_mutex_different_conds() {
         let result = do task::try {
-            let m = ~mutex_with_condvars(2);
+            let m = ~Mutex::new_with_condvars(2);
             let m2 = ~m.clone();
             let (p,c) = comm::stream();
             do task::spawn || {
@@ -1155,17 +1157,17 @@ mod tests {
     #[test] #[ignore(cfg(windows))]
     fn test_mutex_no_condvars() {
         let result = do task::try {
-            let m = ~mutex_with_condvars(0);
+            let m = ~Mutex::new_with_condvars(0);
             do m.lock_cond |cond| { cond.wait(); }
         };
         assert!(result.is_err());
         let result = do task::try {
-            let m = ~mutex_with_condvars(0);
+            let m = ~Mutex::new_with_condvars(0);
             do m.lock_cond |cond| { cond.signal(); }
         };
         assert!(result.is_err());
         let result = do task::try {
-            let m = ~mutex_with_condvars(0);
+            let m = ~Mutex::new_with_condvars(0);
             do m.lock_cond |cond| { cond.broadcast(); }
         };
         assert!(result.is_err());
@@ -1174,9 +1176,9 @@ mod tests {
      * Reader/writer lock tests
      ************************************************************************/
     #[cfg(test)]
-    pub enum RWlockMode { Read, Write, Downgrade, DowngradeRead }
+    pub enum RWLockMode { Read, Write, Downgrade, DowngradeRead }
     #[cfg(test)]
-    fn lock_rwlock_in_mode(x: &RWlock, mode: RWlockMode, blk: &fn()) {
+    fn lock_rwlock_in_mode(x: &RWLock, mode: RWLockMode, blk: &fn()) {
         match mode {
             Read => x.read(blk),
             Write => x.write(blk),
@@ -1192,9 +1194,9 @@ mod tests {
         }
     }
     #[cfg(test)]
-    fn test_rwlock_exclusion(x: ~RWlock,
-                                 mode1: RWlockMode,
-                                 mode2: RWlockMode) {
+    fn test_rwlock_exclusion(x: ~RWLock,
+                                 mode1: RWLockMode,
+                                 mode2: RWLockMode) {
         // Test mutual exclusion between readers and writers. Just like the
         // mutex mutual exclusion test, a ways above.
         let (p,c) = comm::stream();
@@ -1216,7 +1218,7 @@ mod tests {
             assert_eq!(*sharedstate, 20);
         }
 
-        fn access_shared(sharedstate: &mut int, x: &RWlock, mode: RWlockMode,
+        fn access_shared(sharedstate: &mut int, x: &RWLock, mode: RWLockMode,
                          n: uint) {
             for n.times {
                 do lock_rwlock_in_mode(x, mode) {
@@ -1229,22 +1231,22 @@ mod tests {
     }
     #[test]
     fn test_rwlock_readers_wont_modify_the_data() {
-        test_rwlock_exclusion(~RWlock(), Read, Write);
-        test_rwlock_exclusion(~RWlock(), Write, Read);
-        test_rwlock_exclusion(~RWlock(), Read, Downgrade);
-        test_rwlock_exclusion(~RWlock(), Downgrade, Read);
+        test_rwlock_exclusion(~RWLock::new(), Read, Write);
+        test_rwlock_exclusion(~RWLock::new(), Write, Read);
+        test_rwlock_exclusion(~RWLock::new(), Read, Downgrade);
+        test_rwlock_exclusion(~RWLock::new(), Downgrade, Read);
     }
     #[test]
     fn test_rwlock_writers_and_writers() {
-        test_rwlock_exclusion(~RWlock(), Write, Write);
-        test_rwlock_exclusion(~RWlock(), Write, Downgrade);
-        test_rwlock_exclusion(~RWlock(), Downgrade, Write);
-        test_rwlock_exclusion(~RWlock(), Downgrade, Downgrade);
+        test_rwlock_exclusion(~RWLock::new(), Write, Write);
+        test_rwlock_exclusion(~RWLock::new(), Write, Downgrade);
+        test_rwlock_exclusion(~RWLock::new(), Downgrade, Write);
+        test_rwlock_exclusion(~RWLock::new(), Downgrade, Downgrade);
     }
     #[cfg(test)]
-    fn test_rwlock_handshake(x: ~RWlock,
-                                 mode1: RWlockMode,
-                                 mode2: RWlockMode,
+    fn test_rwlock_handshake(x: ~RWLock,
+                                 mode1: RWLockMode,
+                                 mode2: RWLockMode,
                                  make_mode2_go_first: bool) {
         // Much like sem_multi_resource.
         let x2 = (*x).clone();
@@ -1275,32 +1277,32 @@ mod tests {
     }
     #[test]
     fn test_rwlock_readers_and_readers() {
-        test_rwlock_handshake(~RWlock(), Read, Read, false);
+        test_rwlock_handshake(~RWLock::new(), Read, Read, false);
         // The downgrader needs to get in before the reader gets in, otherwise
         // they cannot end up reading at the same time.
-        test_rwlock_handshake(~RWlock(), DowngradeRead, Read, false);
-        test_rwlock_handshake(~RWlock(), Read, DowngradeRead, true);
+        test_rwlock_handshake(~RWLock::new(), DowngradeRead, Read, false);
+        test_rwlock_handshake(~RWLock::new(), Read, DowngradeRead, true);
         // Two downgrade_reads can never both end up reading at the same time.
     }
     #[test]
     fn test_rwlock_downgrade_unlock() {
         // Tests that downgrade can unlock the lock in both modes
-        let x = ~RWlock();
+        let x = ~RWLock::new();
         do lock_rwlock_in_mode(x, Downgrade) { }
         test_rwlock_handshake(x, Read, Read, false);
-        let y = ~RWlock();
+        let y = ~RWLock::new();
         do lock_rwlock_in_mode(y, DowngradeRead) { }
         test_rwlock_exclusion(y, Write, Write);
     }
     #[test]
     fn test_rwlock_read_recursive() {
-        let x = ~RWlock();
+        let x = ~RWLock::new();
         do x.read { do x.read { } }
     }
     #[test]
     fn test_rwlock_cond_wait() {
         // As test_mutex_cond_wait above.
-        let x = ~RWlock();
+        let x = ~RWLock::new();
 
         // Child wakes up parent
         do x.write_cond |cond| {
@@ -1337,7 +1339,7 @@ mod tests {
                                              dg1: bool,
                                              dg2: bool) {
         // Much like the mutex broadcast test. Downgrade-enabled.
-        fn lock_cond(x: &RWlock, downgrade: bool, blk: &fn(c: &Condvar)) {
+        fn lock_cond(x: &RWLock, downgrade: bool, blk: &fn(c: &Condvar)) {
             if downgrade {
                 do x.write_downgrade |mode| {
                     do mode.write_cond |c| { blk(c) }
@@ -1346,7 +1348,7 @@ mod tests {
                 do x.write_cond |c| { blk(c) }
             }
         }
-        let x = ~RWlock();
+        let x = ~RWLock::new();
         let mut ports = ~[];
 
         for num_waiters.times {
@@ -1383,9 +1385,9 @@ mod tests {
         test_rwlock_cond_broadcast_helper(12, false, false);
     }
     #[cfg(test)] #[ignore(cfg(windows))]
-    fn rwlock_kill_helper(mode1: RWlockMode, mode2: RWlockMode) {
+    fn rwlock_kill_helper(mode1: RWLockMode, mode2: RWLockMode) {
         // Mutex must get automatically unlocked if failed/killed within.
-        let x = ~RWlock();
+        let x = ~RWLock::new();
         let x2 = (*x).clone();
 
         let result: result::Result<(),()> = do task::try || {
@@ -1431,8 +1433,8 @@ mod tests {
     #[test] #[should_fail] #[ignore(cfg(windows))]
     fn test_rwlock_downgrade_cant_swap() {
         // Tests that you can't downgrade with a different rwlock's token.
-        let x = ~RWlock();
-        let y = ~RWlock();
+        let x = ~RWLock::new();
+        let y = ~RWLock::new();
         do x.write_downgrade |xwrite| {
             let mut xopt = Some(xwrite);
             do y.write_downgrade |_ywrite| {
diff --git a/src/libextra/workcache.rs b/src/libextra/workcache.rs
index 42210d0cd89..125f3a5cd4a 100644
--- a/src/libextra/workcache.rs
+++ b/src/libextra/workcache.rs
@@ -15,7 +15,7 @@ use digest::DigestUtil;
 use json;
 use sha1::Sha1;
 use serialize::{Encoder, Encodable, Decoder, Decodable};
-use arc::{ARC,RWARC};
+use arc::{Arc,RWArc};
 use treemap::TreeMap;
 
 use std::cell::Cell;
@@ -176,10 +176,10 @@ impl Logger {
 
 #[deriving(Clone)]
 struct Context {
-    db: RWARC<Database>,
-    logger: RWARC<Logger>,
-    cfg: ARC<json::Object>,
-    freshness: ARC<TreeMap<~str,extern fn(&str,&str)->bool>>
+    db: RWArc<Database>,
+    logger: RWArc<Logger>,
+    cfg: Arc<json::Object>,
+    freshness: Arc<TreeMap<~str,extern fn(&str,&str)->bool>>
 }
 
 struct Prep<'self> {
@@ -229,14 +229,14 @@ fn digest_file(path: &Path) -> ~str {
 
 impl Context {
 
-    pub fn new(db: RWARC<Database>,
-               lg: RWARC<Logger>,
-               cfg: ARC<json::Object>) -> Context {
+    pub fn new(db: RWArc<Database>,
+               lg: RWArc<Logger>,
+               cfg: Arc<json::Object>) -> Context {
         Context {
             db: db,
             logger: lg,
             cfg: cfg,
-            freshness: ARC(TreeMap::new())
+            freshness: Arc::new(TreeMap::new())
         }
     }
 
@@ -383,9 +383,9 @@ fn test() {
         r.get_ref().write_str("int main() { return 0; }");
     }
 
-    let cx = Context::new(RWARC(Database::new(Path("db.json"))),
-                          RWARC(Logger::new()),
-                          ARC(TreeMap::new()));
+    let cx = Context::new(RWArc::new(Database::new(Path("db.json"))),
+                          RWArc::new(Logger::new()),
+                          Arc::new(TreeMap::new()));
 
     let s = do cx.with_prep("test1") |prep| {
 
diff --git a/src/libstd/comm.rs b/src/libstd/comm.rs
index b9dacc142ce..9fe6aa57958 100644
--- a/src/libstd/comm.rs
+++ b/src/libstd/comm.rs
@@ -22,7 +22,7 @@ use option::{Option, Some, None};
 use uint;
 use vec::OwnedVector;
 use util::replace;
-use unstable::sync::{Exclusive, exclusive};
+use unstable::sync::Exclusive;
 use rtcomm = rt::comm;
 use rt;
 
@@ -228,7 +228,7 @@ impl<T: Send> SharedChan<T> {
     pub fn new(c: Chan<T>) -> SharedChan<T> {
         let Chan { inner } = c;
         let c = match inner {
-            Left(c) => Left(exclusive(c)),
+            Left(c) => Left(Exclusive::new(c)),
             Right(c) => Right(rtcomm::SharedChan::new(c))
         };
         SharedChan { inner: c }
diff --git a/src/libstd/rt/kill.rs b/src/libstd/rt/kill.rs
index cfd8e46dfdb..2bf4543df50 100644
--- a/src/libstd/rt/kill.rs
+++ b/src/libstd/rt/kill.rs
@@ -57,7 +57,7 @@ struct KillHandleInner {
 
     // Shared state between task and children for exit code propagation. These
     // are here so we can re-use the kill handle to implement watched children
-    // tasks. Using a separate ARClike would introduce extra atomic adds/subs
+    // tasks. Using a separate Arc-like would introduce extra atomic adds/subs
     // into common spawn paths, so this is just for speed.
 
     // Locklessly accessed; protected by the enclosing refcount's barriers.
@@ -217,7 +217,7 @@ impl KillHandle {
             // Exit code propagation fields
             any_child_failed: false,
             child_tombstones: None,
-            graveyard_lock:   LittleLock(),
+            graveyard_lock:   LittleLock::new(),
         }));
         (handle, flag_clone)
     }
diff --git a/src/libstd/rt/message_queue.rs b/src/libstd/rt/message_queue.rs
index 6ef07577415..8518ddaeae1 100644
--- a/src/libstd/rt/message_queue.rs
+++ b/src/libstd/rt/message_queue.rs
@@ -16,7 +16,7 @@ use kinds::Send;
 use vec::OwnedVector;
 use cell::Cell;
 use option::*;
-use unstable::sync::{Exclusive, exclusive};
+use unstable::sync::Exclusive;
 use clone::Clone;
 
 pub struct MessageQueue<T> {
@@ -27,7 +27,7 @@ pub struct MessageQueue<T> {
 impl<T: Send> MessageQueue<T> {
     pub fn new() -> MessageQueue<T> {
         MessageQueue {
-            queue: ~exclusive(~[])
+            queue: ~Exclusive::new(~[])
         }
     }
 
diff --git a/src/libstd/rt/sleeper_list.rs b/src/libstd/rt/sleeper_list.rs
index 3d6e9ef5635..d327023de97 100644
--- a/src/libstd/rt/sleeper_list.rs
+++ b/src/libstd/rt/sleeper_list.rs
@@ -15,7 +15,7 @@ use container::Container;
 use vec::OwnedVector;
 use option::{Option, Some, None};
 use cell::Cell;
-use unstable::sync::{Exclusive, exclusive};
+use unstable::sync::Exclusive;
 use rt::sched::SchedHandle;
 use clone::Clone;
 
@@ -26,7 +26,7 @@ pub struct SleeperList {
 impl SleeperList {
     pub fn new() -> SleeperList {
         SleeperList {
-            stack: ~exclusive(~[])
+            stack: ~Exclusive::new(~[])
         }
     }
 
diff --git a/src/libstd/rt/uv/uvio.rs b/src/libstd/rt/uv/uvio.rs
index fc4a668bdf6..5397b5f2c5c 100644
--- a/src/libstd/rt/uv/uvio.rs
+++ b/src/libstd/rt/uv/uvio.rs
@@ -28,7 +28,7 @@ use rt::io::{standard_error, OtherIoError};
 use rt::tube::Tube;
 use rt::local::Local;
 use str::StrSlice;
-use unstable::sync::{Exclusive, exclusive};
+use unstable::sync::Exclusive;
 
 #[cfg(test)] use container::Container;
 #[cfg(test)] use uint;
@@ -158,7 +158,7 @@ pub struct UvRemoteCallback {
 
 impl UvRemoteCallback {
     pub fn new(loop_: &mut Loop, f: ~fn()) -> UvRemoteCallback {
-        let exit_flag = exclusive(false);
+        let exit_flag = Exclusive::new(false);
         let exit_flag_clone = exit_flag.clone();
         let async = do AsyncWatcher::new(loop_) |watcher, status| {
             assert!(status.is_none());
diff --git a/src/libstd/rt/work_queue.rs b/src/libstd/rt/work_queue.rs
index 00d27744268..24792f3904e 100644
--- a/src/libstd/rt/work_queue.rs
+++ b/src/libstd/rt/work_queue.rs
@@ -11,7 +11,7 @@
 use container::Container;
 use option::*;
 use vec::OwnedVector;
-use unstable::sync::{Exclusive, exclusive};
+use unstable::sync::Exclusive;
 use cell::Cell;
 use kinds::Send;
 use clone::Clone;
@@ -24,7 +24,7 @@ pub struct WorkQueue<T> {
 impl<T: Send> WorkQueue<T> {
     pub fn new() -> WorkQueue<T> {
         WorkQueue {
-            queue: ~exclusive(~[])
+            queue: ~Exclusive::new(~[])
         }
     }
 
diff --git a/src/libstd/task/mod.rs b/src/libstd/task/mod.rs
index 1ce8641085b..df927cb6a7a 100644
--- a/src/libstd/task/mod.rs
+++ b/src/libstd/task/mod.rs
@@ -677,7 +677,7 @@ pub unsafe fn rekillable<U>(f: &fn() -> U) -> U {
 
 /**
  * A stronger version of unkillable that also inhibits scheduling operations.
- * For use with exclusive ARCs, which use pthread mutexes directly.
+ * For use with exclusive Arcs, which use pthread mutexes directly.
  */
 pub unsafe fn atomically<U>(f: &fn() -> U) -> U {
     use rt::task::Task;
diff --git a/src/libstd/task/spawn.rs b/src/libstd/task/spawn.rs
index a17bb2b1632..61dcc33c629 100644
--- a/src/libstd/task/spawn.rs
+++ b/src/libstd/task/spawn.rs
@@ -91,7 +91,7 @@ use task::unkillable;
 use to_bytes::IterBytes;
 use uint;
 use util;
-use unstable::sync::{Exclusive, exclusive};
+use unstable::sync::Exclusive;
 use rt::{OldTaskContext, TaskContext, SchedulerContext, GlobalContext, context};
 use rt::local::Local;
 use rt::task::Task;
@@ -545,7 +545,7 @@ impl RuntimeGlue {
                             // Main task, doing first spawn ever. Lazily initialise here.
                             let mut members = TaskSet::new();
                             members.insert(OldTask(me));
-                            let tasks = exclusive(Some(TaskGroupData {
+                            let tasks = Exclusive::new(Some(TaskGroupData {
                                 members: members,
                                 descendants: TaskSet::new(),
                             }));
@@ -569,7 +569,7 @@ impl RuntimeGlue {
                         let mut members = TaskSet::new();
                         let my_handle = (*me).death.kill_handle.get_ref().clone();
                         members.insert(NewTask(my_handle));
-                        let tasks = exclusive(Some(TaskGroupData {
+                        let tasks = Exclusive::new(Some(TaskGroupData {
                             members: members,
                             descendants: TaskSet::new(),
                         }));
@@ -596,7 +596,7 @@ fn gen_child_taskgroup(linked: bool, supervised: bool)
             (spawner_group.tasks.clone(), ancestors, spawner_group.is_main)
         } else {
             // Child is in a separate group from spawner.
-            let g = exclusive(Some(TaskGroupData {
+            let g = Exclusive::new(Some(TaskGroupData {
                 members:     TaskSet::new(),
                 descendants: TaskSet::new(),
             }));
@@ -605,7 +605,7 @@ fn gen_child_taskgroup(linked: bool, supervised: bool)
                 assert!(new_generation < uint::max_value);
                 // Child's ancestors start with the spawner.
                 // Build a new node in the ancestor list.
-                AncestorList(Some(exclusive(AncestorNode {
+                AncestorList(Some(Exclusive::new(AncestorNode {
                     generation: new_generation,
                     parent_group: spawner_group.tasks.clone(),
                     ancestors: ancestors,
diff --git a/src/libstd/unstable/sync.rs b/src/libstd/unstable/sync.rs
index d4de402a33e..e83f3a53e4c 100644
--- a/src/libstd/unstable/sync.rs
+++ b/src/libstd/unstable/sync.rs
@@ -85,7 +85,7 @@ impl<T: Send> UnsafeAtomicRcBox<T> {
     }
 
     /// Wait until all other handles are dropped, then retrieve the enclosed
-    /// data. See extra::arc::ARC for specific semantics documentation.
+    /// data. See extra::arc::Arc for specific semantics documentation.
     /// If called when the task is already unkillable, unwrap will unkillably
     /// block; otherwise, an unwrapping task can be killed by linked failure.
     pub unsafe fn unwrap(self) -> T {
@@ -146,7 +146,7 @@ impl<T: Send> UnsafeAtomicRcBox<T> {
                 // If 'put' returns the server end back to us, we were rejected;
                 // someone else was trying to unwrap. Avoid guaranteed deadlock.
                 cast::forget(data);
-                fail!("Another task is already unwrapping this ARC!");
+                fail!("Another task is already unwrapping this Arc!");
             }
         }
     }
@@ -236,11 +236,13 @@ impl<T> Drop for UnsafeAtomicRcBox<T>{
 
 /****************************************************************************/
 
-#[allow(non_camel_case_types)] // runtime type
-type rust_little_lock = *libc::c_void;
+enum RTLittleLock {
+    // We know nothing about the runtime's representation of the
+    // little lock so we leave the definition empty.
+}
 
 pub struct LittleLock {
-    l: rust_little_lock,
+    l: *RTLittleLock,
 }
 
 impl Drop for LittleLock {
@@ -251,15 +253,15 @@ impl Drop for LittleLock {
     }
 }
 
-pub fn LittleLock() -> LittleLock {
-    unsafe {
-        LittleLock {
-            l: rust_create_little_lock()
+impl LittleLock {
+    pub fn new() -> LittleLock {
+        unsafe {
+            LittleLock {
+                l: rust_create_little_lock()
+            }
         }
     }
-}
 
-impl LittleLock {
     #[inline]
     pub unsafe fn lock<T>(&self, f: &fn() -> T) -> T {
         do atomically {
@@ -285,45 +287,45 @@ struct ExData<T> {
  * # Safety note
  *
  * This uses a pthread mutex, not one that's aware of the userspace scheduler.
- * The user of an exclusive must be careful not to invoke any functions that may
+ * The user of an Exclusive must be careful not to invoke any functions that may
  * reschedule the task while holding the lock, or deadlock may result. If you
- * need to block or yield while accessing shared state, use extra::sync::RWARC.
+ * need to block or yield while accessing shared state, use extra::sync::RWArc.
  */
 pub struct Exclusive<T> {
     x: UnsafeAtomicRcBox<ExData<T>>
 }
 
-pub fn exclusive<T:Send>(user_data: T) -> Exclusive<T> {
-    let data = ExData {
-        lock: LittleLock(),
-        failed: false,
-        data: user_data
-    };
-    Exclusive {
-        x: UnsafeAtomicRcBox::new(data)
-    }
-}
-
 impl<T:Send> Clone for Exclusive<T> {
-    // Duplicate an exclusive ARC, as std::arc::clone.
+    // Duplicate an Exclusive Arc, as std::arc::clone.
     fn clone(&self) -> Exclusive<T> {
         Exclusive { x: self.x.clone() }
     }
 }
 
 impl<T:Send> Exclusive<T> {
-    // Exactly like std::arc::mutex_arc,access(), but with the little_lock
+    pub fn new(user_data: T) -> Exclusive<T> {
+        let data = ExData {
+            lock: LittleLock::new(),
+            failed: false,
+            data: user_data
+        };
+        Exclusive {
+            x: UnsafeAtomicRcBox::new(data)
+        }
+    }
+
+    // Exactly like std::arc::MutexArc,access(), but with the LittleLock
     // instead of a proper mutex. Same reason for being unsafe.
     //
     // Currently, scheduling operations (i.e., yielding, receiving on a pipe,
     // accessing the provided condition variable) are prohibited while inside
-    // the exclusive. Supporting that is a work in progress.
+    // the Exclusive. Supporting that is a work in progress.
     #[inline]
     pub unsafe fn with<U>(&self, f: &fn(x: &mut T) -> U) -> U {
         let rec = self.x.get();
         do (*rec).lock.lock {
             if (*rec).failed {
-                fail!("Poisoned exclusive - another task failed inside!");
+                fail!("Poisoned Exclusive::new - another task failed inside!");
             }
             (*rec).failed = true;
             let result = f(&mut (*rec).data);
@@ -341,7 +343,7 @@ impl<T:Send> Exclusive<T> {
 
     pub fn unwrap(self) -> T {
         let Exclusive { x: x } = self;
-        // Someday we might need to unkillably unwrap an exclusive, but not today.
+        // Someday we might need to unkillably unwrap an Exclusive, but not today.
         let inner = unsafe { x.unwrap() };
         let ExData { data: user_data, _ } = inner; // will destroy the LittleLock
         user_data
@@ -349,10 +351,10 @@ impl<T:Send> Exclusive<T> {
 }
 
 extern {
-    fn rust_create_little_lock() -> rust_little_lock;
-    fn rust_destroy_little_lock(lock: rust_little_lock);
-    fn rust_lock_little_lock(lock: rust_little_lock);
-    fn rust_unlock_little_lock(lock: rust_little_lock);
+    fn rust_create_little_lock() -> *RTLittleLock;
+    fn rust_destroy_little_lock(lock: *RTLittleLock);
+    fn rust_lock_little_lock(lock: *RTLittleLock);
+    fn rust_unlock_little_lock(lock: *RTLittleLock);
 }
 
 #[cfg(test)]
@@ -360,20 +362,20 @@ mod tests {
     use cell::Cell;
     use comm;
     use option::*;
-    use super::{exclusive, UnsafeAtomicRcBox};
+    use super::{Exclusive, UnsafeAtomicRcBox};
     use task;
     use uint;
     use util;
 
     #[test]
-    fn exclusive_arc() {
+    fn exclusive_new_arc() {
         unsafe {
             let mut futures = ~[];
 
             let num_tasks = 10;
             let count = 10;
 
-            let total = exclusive(~0);
+            let total = Exclusive::new(~0);
 
             for uint::range(0, num_tasks) |_i| {
                 let total = total.clone();
@@ -399,11 +401,11 @@ mod tests {
     }
 
     #[test] #[should_fail] #[ignore(cfg(windows))]
-    fn exclusive_poison() {
+    fn exclusive_new_poison() {
         unsafe {
-            // Tests that if one task fails inside of an exclusive, subsequent
+            // Tests that if one task fails inside of an Exclusive::new, subsequent
             // accesses will also fail.
-            let x = exclusive(1);
+            let x = Exclusive::new(1);
             let x2 = x.clone();
             do task::try || {
                 do x2.with |one| {
@@ -466,15 +468,15 @@ mod tests {
     }
 
     #[test]
-    fn exclusive_unwrap_basic() {
+    fn exclusive_new_unwrap_basic() {
         // Unlike the above, also tests no double-freeing of the LittleLock.
-        let x = exclusive(~~"hello");
+        let x = Exclusive::new(~~"hello");
         assert!(x.unwrap() == ~~"hello");
     }
 
     #[test]
-    fn exclusive_unwrap_contended() {
-        let x = exclusive(~~"hello");
+    fn exclusive_new_unwrap_contended() {
+        let x = Exclusive::new(~~"hello");
         let x2 = Cell::new(x.clone());
         do task::spawn {
             let x2 = x2.take();
@@ -484,7 +486,7 @@ mod tests {
         assert!(x.unwrap() == ~~"hello");
 
         // Now try the same thing, but with the child task blocking.
-        let x = exclusive(~~"hello");
+        let x = Exclusive::new(~~"hello");
         let x2 = Cell::new(x.clone());
         let mut res = None;
         let mut builder = task::task();
@@ -499,8 +501,8 @@ mod tests {
     }
 
     #[test] #[should_fail] #[ignore(cfg(windows))]
-    fn exclusive_unwrap_conflict() {
-        let x = exclusive(~~"hello");
+    fn exclusive_new_unwrap_conflict() {
+        let x = Exclusive::new(~~"hello");
         let x2 = Cell::new(x.clone());
         let mut res = None;
         let mut builder = task::task();
@@ -515,14 +517,14 @@ mod tests {
     }
 
     #[test] #[ignore(cfg(windows))]
-    fn exclusive_unwrap_deadlock() {
+    fn exclusive_new_unwrap_deadlock() {
         // This is not guaranteed to get to the deadlock before being killed,
         // but it will show up sometimes, and if the deadlock were not there,
         // the test would nondeterministically fail.
         let result = do task::try {
-            // a task that has two references to the same exclusive will
+            // a task that has two references to the same Exclusive::new will
             // deadlock when it unwraps. nothing to be done about that.
-            let x = exclusive(~~"hello");
+            let x = Exclusive::new(~~"hello");
             let x2 = x.clone();
             do task::spawn {
                 for 10.times { task::yield(); } // try to let the unwrapper go
diff --git a/src/libstd/vec.rs b/src/libstd/vec.rs
index 379deff233c..54057be5268 100644
--- a/src/libstd/vec.rs
+++ b/src/libstd/vec.rs
@@ -2563,9 +2563,9 @@ mod tests {
     #[test]
     fn test_swap_remove_noncopyable() {
         // Tests that we don't accidentally run destructors twice.
-        let mut v = ~[::unstable::sync::exclusive(()),
-                      ::unstable::sync::exclusive(()),
-                      ::unstable::sync::exclusive(())];
+        let mut v = ~[::unstable::sync::Exclusive::new(()),
+                      ::unstable::sync::Exclusive::new(()),
+                      ::unstable::sync::Exclusive::new(())];
         let mut _e = v.swap_remove(0);
         assert_eq!(v.len(), 2);
         _e = v.swap_remove(1);
diff --git a/src/test/bench/graph500-bfs.rs b/src/test/bench/graph500-bfs.rs
index f17b6658f9d..00add20fb7d 100644
--- a/src/test/bench/graph500-bfs.rs
+++ b/src/test/bench/graph500-bfs.rs
@@ -230,7 +230,7 @@ fn bfs2(graph: graph, key: node_id) -> bfs_result {
 }
 
 /// A parallel version of the bfs function.
-fn pbfs(graph: &arc::ARC<graph>, key: node_id) -> bfs_result {
+fn pbfs(graph: &arc::Arc<graph>, key: node_id) -> bfs_result {
     // This works by doing functional updates of a color vector.
 
     let graph_vec = graph.get(); // FIXME #3387 requires this temp
@@ -263,7 +263,7 @@ fn pbfs(graph: &arc::ARC<graph>, key: node_id) -> bfs_result {
         i += 1;
         let old_len = colors.len();
 
-        let color = arc::ARC(colors);
+        let color = arc::Arc::new(colors);
 
         let color_vec = color.get(); // FIXME #3387 requires this temp
         colors = do par::mapi(*color_vec) {
@@ -444,7 +444,7 @@ fn main() {
     let mut total_seq = 0.0;
     let mut total_par = 0.0;
 
-    let graph_arc = arc::ARC(graph.clone());
+    let graph_arc = arc::Arc::new(graph.clone());
 
     do gen_search_keys(graph, num_keys).map() |root| {
         io::stdout().write_line("");
diff --git a/src/test/bench/msgsend-ring-mutex-arcs.rs b/src/test/bench/msgsend-ring-mutex-arcs.rs
index 0fa641e395e..c685057874a 100644
--- a/src/test/bench/msgsend-ring-mutex-arcs.rs
+++ b/src/test/bench/msgsend-ring-mutex-arcs.rs
@@ -11,9 +11,9 @@
 // This test creates a bunch of tasks that simultaneously send to each
 // other in a ring. The messages should all be basically
 // independent.
-// This is like msgsend-ring-pipes but adapted to use ARCs.
+// This is like msgsend-ring-pipes but adapted to use Arcs.
 
-// This also serves as a pipes test, because ARCs are implemented with pipes.
+// This also serves as a pipes test, because Arcs are implemented with pipes.
 
 extern mod extra;
 
@@ -26,7 +26,7 @@ use std::os;
 use std::uint;
 
 // A poor man's pipe.
-type pipe = arc::MutexARC<~[uint]>;
+type pipe = arc::MutexArc<~[uint]>;
 
 fn send(p: &pipe, msg: uint) {
     unsafe {
@@ -48,7 +48,7 @@ fn recv(p: &pipe) -> uint {
 }
 
 fn init() -> (pipe,pipe) {
-    let m = arc::MutexARC(~[]);
+    let m = arc::MutexArc::new(~[]);
     ((&m).clone(), m)
 }
 
diff --git a/src/test/bench/msgsend-ring-rw-arcs.rs b/src/test/bench/msgsend-ring-rw-arcs.rs
index 09d1c632d0e..e7def11b266 100644
--- a/src/test/bench/msgsend-ring-rw-arcs.rs
+++ b/src/test/bench/msgsend-ring-rw-arcs.rs
@@ -11,9 +11,9 @@
 // This test creates a bunch of tasks that simultaneously send to each
 // other in a ring. The messages should all be basically
 // independent.
-// This is like msgsend-ring-pipes but adapted to use ARCs.
+// This is like msgsend-ring-pipes but adapted to use Arcs.
 
-// This also serves as a pipes test, because ARCs are implemented with pipes.
+// This also serves as a pipes test, because Arcs are implemented with pipes.
 
 extern mod extra;
 
@@ -26,7 +26,7 @@ use std::os;
 use std::uint;
 
 // A poor man's pipe.
-type pipe = arc::RWARC<~[uint]>;
+type pipe = arc::RWArc<~[uint]>;
 
 fn send(p: &pipe, msg: uint) {
     do p.write_cond |state, cond| {
@@ -44,7 +44,7 @@ fn recv(p: &pipe) -> uint {
 }
 
 fn init() -> (pipe,pipe) {
-    let x = arc::RWARC(~[]);
+    let x = arc::RWArc::new(~[]);
     ((&x).clone(), x)
 }
 
diff --git a/src/test/compile-fail/arc-rw-cond-shouldnt-escape.rs b/src/test/compile-fail/arc-rw-cond-shouldnt-escape.rs
index b00b701191e..f8a3c5c573a 100644
--- a/src/test/compile-fail/arc-rw-cond-shouldnt-escape.rs
+++ b/src/test/compile-fail/arc-rw-cond-shouldnt-escape.rs
@@ -12,7 +12,7 @@
 extern mod extra;
 use extra::arc;
 fn main() {
-    let x = ~arc::RWARC(1);
+    let x = ~arc::RWArc::new(1);
     let mut y = None;
     do x.write_cond |_one, cond| {
         y = Some(cond);
diff --git a/src/test/compile-fail/arc-rw-read-mode-shouldnt-escape.rs b/src/test/compile-fail/arc-rw-read-mode-shouldnt-escape.rs
index 6d4b774fd5f..451616a074f 100644
--- a/src/test/compile-fail/arc-rw-read-mode-shouldnt-escape.rs
+++ b/src/test/compile-fail/arc-rw-read-mode-shouldnt-escape.rs
@@ -11,7 +11,7 @@
 extern mod extra;
 use extra::arc;
 fn main() {
-    let x = ~arc::RWARC(1);
+    let x = ~arc::RWArc::new(1);
     let mut y = None;
     do x.write_downgrade |write_mode| {
         y = Some(x.downgrade(write_mode));
diff --git a/src/test/compile-fail/arc-rw-state-shouldnt-escape.rs b/src/test/compile-fail/arc-rw-state-shouldnt-escape.rs
index 001e6cf922f..44657dfd0ef 100644
--- a/src/test/compile-fail/arc-rw-state-shouldnt-escape.rs
+++ b/src/test/compile-fail/arc-rw-state-shouldnt-escape.rs
@@ -11,7 +11,7 @@
 extern mod extra;
 use extra::arc;
 fn main() {
-    let x = ~arc::RWARC(1);
+    let x = ~arc::RWArc::new(1);
     let mut y = None; //~ ERROR lifetime of variable does not enclose its declaration
     do x.write |one| {
         y = Some(one);
diff --git a/src/test/compile-fail/arc-rw-write-mode-cond-shouldnt-escape.rs b/src/test/compile-fail/arc-rw-write-mode-cond-shouldnt-escape.rs
index 59e899dbbf2..44ac0e722d3 100644
--- a/src/test/compile-fail/arc-rw-write-mode-cond-shouldnt-escape.rs
+++ b/src/test/compile-fail/arc-rw-write-mode-cond-shouldnt-escape.rs
@@ -12,7 +12,7 @@
 extern mod extra;
 use extra::arc;
 fn main() {
-    let x = ~arc::RWARC(1);
+    let x = ~arc::RWArc::new(1);
     let mut y = None;
     do x.write_downgrade |write_mode| {
         do (&write_mode).write_cond |_one, cond| {
diff --git a/src/test/compile-fail/arc-rw-write-mode-shouldnt-escape.rs b/src/test/compile-fail/arc-rw-write-mode-shouldnt-escape.rs
index 2599fb4dfa0..b6535b6189a 100644
--- a/src/test/compile-fail/arc-rw-write-mode-shouldnt-escape.rs
+++ b/src/test/compile-fail/arc-rw-write-mode-shouldnt-escape.rs
@@ -12,7 +12,7 @@
 extern mod extra;
 use extra::arc;
 fn main() {
-    let x = ~arc::RWARC(1);
+    let x = ~arc::RWArc::new(1);
     let mut y = None;
     do x.write_downgrade |write_mode| {
         y = Some(write_mode);
diff --git a/src/test/compile-fail/no-capture-arc.rs b/src/test/compile-fail/no-capture-arc.rs
index b036071fd87..fd54b6638c6 100644
--- a/src/test/compile-fail/no-capture-arc.rs
+++ b/src/test/compile-fail/no-capture-arc.rs
@@ -17,7 +17,7 @@ use std::task;
 
 fn main() {
     let v = ~[1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
-    let arc_v = arc::ARC(v);
+    let arc_v = arc::Arc::new(v);
 
     do task::spawn() {
         let v = arc_v.get();
diff --git a/src/test/compile-fail/no-reuse-move-arc.rs b/src/test/compile-fail/no-reuse-move-arc.rs
index 28f3ea7af9f..a17196ae298 100644
--- a/src/test/compile-fail/no-reuse-move-arc.rs
+++ b/src/test/compile-fail/no-reuse-move-arc.rs
@@ -15,7 +15,7 @@ use std::task;
 
 fn main() {
     let v = ~[1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
-    let arc_v = arc::ARC(v);
+    let arc_v = arc::Arc::new(v);
 
     do task::spawn() {
         let v = arc_v.get();
diff --git a/src/test/compile-fail/once-cant-call-twice-on-heap.rs b/src/test/compile-fail/once-cant-call-twice-on-heap.rs
index 4436675d69a..dddd9a1868c 100644
--- a/src/test/compile-fail/once-cant-call-twice-on-heap.rs
+++ b/src/test/compile-fail/once-cant-call-twice-on-heap.rs
@@ -21,7 +21,7 @@ fn foo(blk: ~once fn()) {
 }
 
 fn main() {
-    let x = arc::ARC(true);
+    let x = arc::Arc::new(true);
     do foo {
         assert!(*x.get());
         util::ignore(x);
diff --git a/src/test/compile-fail/once-cant-call-twice-on-stack.rs b/src/test/compile-fail/once-cant-call-twice-on-stack.rs
index 10877be549e..45110bd4bc6 100644
--- a/src/test/compile-fail/once-cant-call-twice-on-stack.rs
+++ b/src/test/compile-fail/once-cant-call-twice-on-stack.rs
@@ -22,7 +22,7 @@ fn foo(blk: &once fn()) {
 }
 
 fn main() {
-    let x = arc::ARC(true);
+    let x = arc::Arc::new(true);
     do foo {
         assert!(*x.get());
         util::ignore(x);
diff --git a/src/test/compile-fail/once-cant-move-out-of-non-once-on-heap.rs b/src/test/compile-fail/once-cant-move-out-of-non-once-on-heap.rs
index 61f158cec27..cc40fb6b8d8 100644
--- a/src/test/compile-fail/once-cant-move-out-of-non-once-on-heap.rs
+++ b/src/test/compile-fail/once-cant-move-out-of-non-once-on-heap.rs
@@ -21,7 +21,7 @@ fn foo(blk: ~fn()) {
 }
 
 fn main() {
-    let x = arc::ARC(true);
+    let x = arc::Arc::new(true);
     do foo {
         assert!(*x.get());
         util::ignore(x); //~ ERROR cannot move out of captured outer variable
diff --git a/src/test/compile-fail/once-cant-move-out-of-non-once-on-stack.rs b/src/test/compile-fail/once-cant-move-out-of-non-once-on-stack.rs
index 42c8b9a9998..aedeb92df78 100644
--- a/src/test/compile-fail/once-cant-move-out-of-non-once-on-stack.rs
+++ b/src/test/compile-fail/once-cant-move-out-of-non-once-on-stack.rs
@@ -21,7 +21,7 @@ fn foo(blk: &fn()) {
 }
 
 fn main() {
-    let x = arc::ARC(true);
+    let x = arc::Arc::new(true);
     do foo {
         assert!(*x.get());
         util::ignore(x); //~ ERROR cannot move out of captured outer variable
diff --git a/src/test/compile-fail/sync-cond-shouldnt-escape.rs b/src/test/compile-fail/sync-cond-shouldnt-escape.rs
index 2006027e797..928953de390 100644
--- a/src/test/compile-fail/sync-cond-shouldnt-escape.rs
+++ b/src/test/compile-fail/sync-cond-shouldnt-escape.rs
@@ -13,7 +13,7 @@ extern mod extra;
 use extra::sync;
 
 fn main() {
-    let m = ~sync::Mutex();
+    let m = ~sync::Mutex::new();
     let mut cond = None;
     do m.lock_cond |c| {
         cond = Some(c);
diff --git a/src/test/compile-fail/sync-rwlock-cond-shouldnt-escape.rs b/src/test/compile-fail/sync-rwlock-cond-shouldnt-escape.rs
index 4108201f911..03779b3ffe3 100644
--- a/src/test/compile-fail/sync-rwlock-cond-shouldnt-escape.rs
+++ b/src/test/compile-fail/sync-rwlock-cond-shouldnt-escape.rs
@@ -12,7 +12,7 @@
 extern mod extra;
 use extra::sync;
 fn main() {
-    let x = ~sync::RWlock();
+    let x = ~sync::RWLock::new();
     let mut y = None;
     do x.write_cond |cond| {
         y = Some(cond);
diff --git a/src/test/compile-fail/sync-rwlock-read-mode-shouldnt-escape.rs b/src/test/compile-fail/sync-rwlock-read-mode-shouldnt-escape.rs
index 4bec5fa270a..6ce3869bab2 100644
--- a/src/test/compile-fail/sync-rwlock-read-mode-shouldnt-escape.rs
+++ b/src/test/compile-fail/sync-rwlock-read-mode-shouldnt-escape.rs
@@ -12,7 +12,7 @@
 extern mod extra;
 use extra::sync;
 fn main() {
-    let x = ~sync::RWlock();
+    let x = ~sync::RWLock::new();
     let mut y = None;
     do x.write_downgrade |write_mode| {
         y = Some(x.downgrade(write_mode));
diff --git a/src/test/compile-fail/sync-rwlock-write-mode-cond-shouldnt-escape.rs b/src/test/compile-fail/sync-rwlock-write-mode-cond-shouldnt-escape.rs
index 43b4d9aabb8..fab16894a65 100644
--- a/src/test/compile-fail/sync-rwlock-write-mode-cond-shouldnt-escape.rs
+++ b/src/test/compile-fail/sync-rwlock-write-mode-cond-shouldnt-escape.rs
@@ -12,7 +12,7 @@
 extern mod extra;
 use extra::sync;
 fn main() {
-    let x = ~sync::RWlock();
+    let x = ~sync::RWLock::new();
     let mut y = None;
     do x.write_downgrade |write_mode| {
         do (&write_mode).write_cond |cond| {
diff --git a/src/test/compile-fail/sync-rwlock-write-mode-shouldnt-escape.rs b/src/test/compile-fail/sync-rwlock-write-mode-shouldnt-escape.rs
index 15af7be5246..7210fbf37b4 100644
--- a/src/test/compile-fail/sync-rwlock-write-mode-shouldnt-escape.rs
+++ b/src/test/compile-fail/sync-rwlock-write-mode-shouldnt-escape.rs
@@ -12,7 +12,7 @@
 extern mod extra;
 use extra::sync;
 fn main() {
-    let x = ~sync::RWlock();
+    let x = ~sync::RWLock::new();
     let mut y = None;
     do x.write_downgrade |write_mode| {
         y = Some(write_mode);
diff --git a/src/test/run-fail/issue-2444.rs b/src/test/run-fail/issue-2444.rs
index 0ab1528e4fb..5e7c7cf519b 100644
--- a/src/test/run-fail/issue-2444.rs
+++ b/src/test/run-fail/issue-2444.rs
@@ -13,7 +13,7 @@
 extern mod extra;
 use extra::arc;
 
-enum e<T> { e(arc::ARC<T>) }
+enum e<T> { e(arc::Arc<T>) }
 
 fn foo() -> e<int> {fail!();}
 
diff --git a/src/test/run-pass/bind-by-move.rs b/src/test/run-pass/bind-by-move.rs
index 5cde389ff75..9d808d19af2 100644
--- a/src/test/run-pass/bind-by-move.rs
+++ b/src/test/run-pass/bind-by-move.rs
@@ -11,10 +11,10 @@
 // xfail-fast
 extern mod extra;
 use extra::arc;
-fn dispose(_x: arc::ARC<bool>) { unsafe { } }
+fn dispose(_x: arc::Arc<bool>) { unsafe { } }
 
 pub fn main() {
-    let p = arc::ARC(true);
+    let p = arc::Arc::new(true);
     let x = Some(p);
     match x {
         Some(z) => { dispose(z); },
diff --git a/src/test/run-pass/match-ref-binding-in-guard-3256.rs b/src/test/run-pass/match-ref-binding-in-guard-3256.rs
index a199a75de56..e1d2f0e1c48 100644
--- a/src/test/run-pass/match-ref-binding-in-guard-3256.rs
+++ b/src/test/run-pass/match-ref-binding-in-guard-3256.rs
@@ -12,7 +12,7 @@ use std::unstable;
 
 pub fn main() {
     unsafe {
-        let x = Some(unstable::sync::exclusive(true));
+        let x = Some(unstable::sync::Exclusive::new(true));
         match x {
             Some(ref z) if z.with(|b| *b) => {
                 do z.with |b| { assert!(*b); }
diff --git a/src/test/run-pass/once-move-out-on-heap.rs b/src/test/run-pass/once-move-out-on-heap.rs
index 38b23fd128d..744cd1066cf 100644
--- a/src/test/run-pass/once-move-out-on-heap.rs
+++ b/src/test/run-pass/once-move-out-on-heap.rs
@@ -21,7 +21,7 @@ fn foo(blk: ~once fn()) {
 }
 
 fn main() {
-    let x = arc::ARC(true);
+    let x = arc::Arc::new(true);
     do foo {
         assert!(*x.get());
         util::ignore(x);
diff --git a/src/test/run-pass/once-move-out-on-stack.rs b/src/test/run-pass/once-move-out-on-stack.rs
index e881f576673..002503a5566 100644
--- a/src/test/run-pass/once-move-out-on-stack.rs
+++ b/src/test/run-pass/once-move-out-on-stack.rs
@@ -22,7 +22,7 @@ fn foo(blk: &once fn()) {
 }
 
 fn main() {
-    let x = arc::ARC(true);
+    let x = arc::Arc::new(true);
     do foo {
         assert!(*x.get());
         util::ignore(x);
diff --git a/src/test/run-pass/trait-bounds-in-arc.rs b/src/test/run-pass/trait-bounds-in-arc.rs
index a3b2ea02db3..0677b63b5aa 100644
--- a/src/test/run-pass/trait-bounds-in-arc.rs
+++ b/src/test/run-pass/trait-bounds-in-arc.rs
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-// Tests that a heterogeneous list of existential types can be put inside an ARC
+// Tests that a heterogeneous list of existential types can be put inside an Arc
 // and shared between tasks as long as all types fulfill Freeze+Send.
 
 // xfail-fast
@@ -64,7 +64,7 @@ fn main() {
     let dogge1 = Dogge { bark_decibels: 100, tricks_known: 42, name: ~"alan_turing" };
     let dogge2 = Dogge { bark_decibels: 55,  tricks_known: 11, name: ~"albert_einstein" };
     let fishe = Goldfyshe { swim_speed: 998, name: ~"alec_guinness" };
-    let arc = arc::ARC(~[~catte  as ~Pet:Freeze+Send,
+    let arc = arc::Arc::new(~[~catte  as ~Pet:Freeze+Send,
                          ~dogge1 as ~Pet:Freeze+Send,
                          ~fishe  as ~Pet:Freeze+Send,
                          ~dogge2 as ~Pet:Freeze+Send]);
@@ -82,21 +82,21 @@ fn main() {
     p3.recv();
 }
 
-fn check_legs(arc: arc::ARC<~[~Pet:Freeze+Send]>) {
+fn check_legs(arc: arc::Arc<~[~Pet:Freeze+Send]>) {
     let mut legs = 0;
     for arc.get().iter().advance |pet| {
         legs += pet.num_legs();
     }
     assert!(legs == 12);
 }
-fn check_names(arc: arc::ARC<~[~Pet:Freeze+Send]>) {
+fn check_names(arc: arc::Arc<~[~Pet:Freeze+Send]>) {
     for arc.get().iter().advance |pet| {
         do pet.name |name| {
             assert!(name[0] == 'a' as u8 && name[1] == 'l' as u8);
         }
     }
 }
-fn check_pedigree(arc: arc::ARC<~[~Pet:Freeze+Send]>) {
+fn check_pedigree(arc: arc::Arc<~[~Pet:Freeze+Send]>) {
     for arc.get().iter().advance |pet| {
         assert!(pet.of_good_pedigree());
     }
diff --git a/src/test/run-pass/writealias.rs b/src/test/run-pass/writealias.rs
index 8f990e07015..2db954d27c1 100644
--- a/src/test/run-pass/writealias.rs
+++ b/src/test/run-pass/writealias.rs
@@ -17,7 +17,7 @@ fn f(p: &mut Point) { p.z = 13; }
 
 pub fn main() {
     unsafe {
-        let x = Some(unstable::sync::exclusive(true));
+        let x = Some(unstable::sync::Exclusive::new(true));
         match x {
             Some(ref z) if z.with(|b| *b) => {
                 do z.with |b| { assert!(*b); }