about summary refs log tree commit diff
path: root/src/libstd
diff options
context:
space:
mode:
Diffstat (limited to 'src/libstd')
-rw-r--r--src/libstd/comm.rs4
-rw-r--r--src/libstd/rt/kill.rs4
-rw-r--r--src/libstd/rt/message_queue.rs4
-rw-r--r--src/libstd/rt/sleeper_list.rs4
-rw-r--r--src/libstd/rt/uv/uvio.rs4
-rw-r--r--src/libstd/rt/work_queue.rs4
-rw-r--r--src/libstd/task/mod.rs2
-rw-r--r--src/libstd/task/spawn.rs10
-rw-r--r--src/libstd/unstable/sync.rs84
-rw-r--r--src/libstd/vec.rs6
10 files changed, 63 insertions, 63 deletions
diff --git a/src/libstd/comm.rs b/src/libstd/comm.rs
index b9dacc142ce..9fe6aa57958 100644
--- a/src/libstd/comm.rs
+++ b/src/libstd/comm.rs
@@ -22,7 +22,7 @@ use option::{Option, Some, None};
 use uint;
 use vec::OwnedVector;
 use util::replace;
-use unstable::sync::{Exclusive, exclusive};
+use unstable::sync::Exclusive;
 use rtcomm = rt::comm;
 use rt;
 
@@ -228,7 +228,7 @@ impl<T: Send> SharedChan<T> {
     pub fn new(c: Chan<T>) -> SharedChan<T> {
         let Chan { inner } = c;
         let c = match inner {
-            Left(c) => Left(exclusive(c)),
+            Left(c) => Left(Exclusive::new(c)),
             Right(c) => Right(rtcomm::SharedChan::new(c))
         };
         SharedChan { inner: c }
diff --git a/src/libstd/rt/kill.rs b/src/libstd/rt/kill.rs
index cfd8e46dfdb..2bf4543df50 100644
--- a/src/libstd/rt/kill.rs
+++ b/src/libstd/rt/kill.rs
@@ -57,7 +57,7 @@ struct KillHandleInner {
 
     // Shared state between task and children for exit code propagation. These
     // are here so we can re-use the kill handle to implement watched children
-    // tasks. Using a separate ARClike would introduce extra atomic adds/subs
+    // tasks. Using a separate Arc-like would introduce extra atomic adds/subs
     // into common spawn paths, so this is just for speed.
 
     // Locklessly accessed; protected by the enclosing refcount's barriers.
@@ -217,7 +217,7 @@ impl KillHandle {
             // Exit code propagation fields
             any_child_failed: false,
             child_tombstones: None,
-            graveyard_lock:   LittleLock(),
+            graveyard_lock:   LittleLock::new(),
         }));
         (handle, flag_clone)
     }
diff --git a/src/libstd/rt/message_queue.rs b/src/libstd/rt/message_queue.rs
index 6ef07577415..8518ddaeae1 100644
--- a/src/libstd/rt/message_queue.rs
+++ b/src/libstd/rt/message_queue.rs
@@ -16,7 +16,7 @@ use kinds::Send;
 use vec::OwnedVector;
 use cell::Cell;
 use option::*;
-use unstable::sync::{Exclusive, exclusive};
+use unstable::sync::Exclusive;
 use clone::Clone;
 
 pub struct MessageQueue<T> {
@@ -27,7 +27,7 @@ pub struct MessageQueue<T> {
 impl<T: Send> MessageQueue<T> {
     pub fn new() -> MessageQueue<T> {
         MessageQueue {
-            queue: ~exclusive(~[])
+            queue: ~Exclusive::new(~[])
         }
     }
 
diff --git a/src/libstd/rt/sleeper_list.rs b/src/libstd/rt/sleeper_list.rs
index 3d6e9ef5635..d327023de97 100644
--- a/src/libstd/rt/sleeper_list.rs
+++ b/src/libstd/rt/sleeper_list.rs
@@ -15,7 +15,7 @@ use container::Container;
 use vec::OwnedVector;
 use option::{Option, Some, None};
 use cell::Cell;
-use unstable::sync::{Exclusive, exclusive};
+use unstable::sync::Exclusive;
 use rt::sched::SchedHandle;
 use clone::Clone;
 
@@ -26,7 +26,7 @@ pub struct SleeperList {
 impl SleeperList {
     pub fn new() -> SleeperList {
         SleeperList {
-            stack: ~exclusive(~[])
+            stack: ~Exclusive::new(~[])
         }
     }
 
diff --git a/src/libstd/rt/uv/uvio.rs b/src/libstd/rt/uv/uvio.rs
index fc4a668bdf6..5397b5f2c5c 100644
--- a/src/libstd/rt/uv/uvio.rs
+++ b/src/libstd/rt/uv/uvio.rs
@@ -28,7 +28,7 @@ use rt::io::{standard_error, OtherIoError};
 use rt::tube::Tube;
 use rt::local::Local;
 use str::StrSlice;
-use unstable::sync::{Exclusive, exclusive};
+use unstable::sync::Exclusive;
 
 #[cfg(test)] use container::Container;
 #[cfg(test)] use uint;
@@ -158,7 +158,7 @@ pub struct UvRemoteCallback {
 
 impl UvRemoteCallback {
     pub fn new(loop_: &mut Loop, f: ~fn()) -> UvRemoteCallback {
-        let exit_flag = exclusive(false);
+        let exit_flag = Exclusive::new(false);
         let exit_flag_clone = exit_flag.clone();
         let async = do AsyncWatcher::new(loop_) |watcher, status| {
             assert!(status.is_none());
diff --git a/src/libstd/rt/work_queue.rs b/src/libstd/rt/work_queue.rs
index 00d27744268..24792f3904e 100644
--- a/src/libstd/rt/work_queue.rs
+++ b/src/libstd/rt/work_queue.rs
@@ -11,7 +11,7 @@
 use container::Container;
 use option::*;
 use vec::OwnedVector;
-use unstable::sync::{Exclusive, exclusive};
+use unstable::sync::Exclusive;
 use cell::Cell;
 use kinds::Send;
 use clone::Clone;
@@ -24,7 +24,7 @@ pub struct WorkQueue<T> {
 impl<T: Send> WorkQueue<T> {
     pub fn new() -> WorkQueue<T> {
         WorkQueue {
-            queue: ~exclusive(~[])
+            queue: ~Exclusive::new(~[])
         }
     }
 
diff --git a/src/libstd/task/mod.rs b/src/libstd/task/mod.rs
index 1ce8641085b..df927cb6a7a 100644
--- a/src/libstd/task/mod.rs
+++ b/src/libstd/task/mod.rs
@@ -677,7 +677,7 @@ pub unsafe fn rekillable<U>(f: &fn() -> U) -> U {
 
 /**
  * A stronger version of unkillable that also inhibits scheduling operations.
- * For use with exclusive ARCs, which use pthread mutexes directly.
+ * For use with exclusive Arcs, which use pthread mutexes directly.
  */
 pub unsafe fn atomically<U>(f: &fn() -> U) -> U {
     use rt::task::Task;
diff --git a/src/libstd/task/spawn.rs b/src/libstd/task/spawn.rs
index a17bb2b1632..61dcc33c629 100644
--- a/src/libstd/task/spawn.rs
+++ b/src/libstd/task/spawn.rs
@@ -91,7 +91,7 @@ use task::unkillable;
 use to_bytes::IterBytes;
 use uint;
 use util;
-use unstable::sync::{Exclusive, exclusive};
+use unstable::sync::Exclusive;
 use rt::{OldTaskContext, TaskContext, SchedulerContext, GlobalContext, context};
 use rt::local::Local;
 use rt::task::Task;
@@ -545,7 +545,7 @@ impl RuntimeGlue {
                             // Main task, doing first spawn ever. Lazily initialise here.
                             let mut members = TaskSet::new();
                             members.insert(OldTask(me));
-                            let tasks = exclusive(Some(TaskGroupData {
+                            let tasks = Exclusive::new(Some(TaskGroupData {
                                 members: members,
                                 descendants: TaskSet::new(),
                             }));
@@ -569,7 +569,7 @@ impl RuntimeGlue {
                         let mut members = TaskSet::new();
                         let my_handle = (*me).death.kill_handle.get_ref().clone();
                         members.insert(NewTask(my_handle));
-                        let tasks = exclusive(Some(TaskGroupData {
+                        let tasks = Exclusive::new(Some(TaskGroupData {
                             members: members,
                             descendants: TaskSet::new(),
                         }));
@@ -596,7 +596,7 @@ fn gen_child_taskgroup(linked: bool, supervised: bool)
             (spawner_group.tasks.clone(), ancestors, spawner_group.is_main)
         } else {
             // Child is in a separate group from spawner.
-            let g = exclusive(Some(TaskGroupData {
+            let g = Exclusive::new(Some(TaskGroupData {
                 members:     TaskSet::new(),
                 descendants: TaskSet::new(),
             }));
@@ -605,7 +605,7 @@ fn gen_child_taskgroup(linked: bool, supervised: bool)
                 assert!(new_generation < uint::max_value);
                 // Child's ancestors start with the spawner.
                 // Build a new node in the ancestor list.
-                AncestorList(Some(exclusive(AncestorNode {
+                AncestorList(Some(Exclusive::new(AncestorNode {
                     generation: new_generation,
                     parent_group: spawner_group.tasks.clone(),
                     ancestors: ancestors,
diff --git a/src/libstd/unstable/sync.rs b/src/libstd/unstable/sync.rs
index d4de402a33e..b6fc5b1f662 100644
--- a/src/libstd/unstable/sync.rs
+++ b/src/libstd/unstable/sync.rs
@@ -85,7 +85,7 @@ impl<T: Send> UnsafeAtomicRcBox<T> {
     }
 
     /// Wait until all other handles are dropped, then retrieve the enclosed
-    /// data. See extra::arc::ARC for specific semantics documentation.
+    /// data. See extra::arc::Arc for specific semantics documentation.
     /// If called when the task is already unkillable, unwrap will unkillably
     /// block; otherwise, an unwrapping task can be killed by linked failure.
     pub unsafe fn unwrap(self) -> T {
@@ -146,7 +146,7 @@ impl<T: Send> UnsafeAtomicRcBox<T> {
                 // If 'put' returns the server end back to us, we were rejected;
                 // someone else was trying to unwrap. Avoid guaranteed deadlock.
                 cast::forget(data);
-                fail!("Another task is already unwrapping this ARC!");
+                fail!("Another task is already unwrapping this Arc!");
             }
         }
     }
@@ -251,15 +251,15 @@ impl Drop for LittleLock {
     }
 }
 
-pub fn LittleLock() -> LittleLock {
-    unsafe {
-        LittleLock {
-            l: rust_create_little_lock()
+impl LittleLock {
+    pub fn new() -> LittleLock {
+        unsafe {
+            LittleLock {
+                l: rust_create_little_lock()
+            }
         }
     }
-}
 
-impl LittleLock {
     #[inline]
     pub unsafe fn lock<T>(&self, f: &fn() -> T) -> T {
         do atomically {
@@ -285,45 +285,45 @@ struct ExData<T> {
  * # Safety note
  *
  * This uses a pthread mutex, not one that's aware of the userspace scheduler.
- * The user of an exclusive must be careful not to invoke any functions that may
+ * The user of an Exclusive must be careful not to invoke any functions that may
  * reschedule the task while holding the lock, or deadlock may result. If you
- * need to block or yield while accessing shared state, use extra::sync::RWARC.
+ * need to block or yield while accessing shared state, use extra::sync::RWArc.
  */
 pub struct Exclusive<T> {
     x: UnsafeAtomicRcBox<ExData<T>>
 }
 
-pub fn exclusive<T:Send>(user_data: T) -> Exclusive<T> {
-    let data = ExData {
-        lock: LittleLock(),
-        failed: false,
-        data: user_data
-    };
-    Exclusive {
-        x: UnsafeAtomicRcBox::new(data)
-    }
-}
-
 impl<T:Send> Clone for Exclusive<T> {
-    // Duplicate an exclusive ARC, as std::arc::clone.
+    // Duplicate an Exclusive Arc, as std::arc::clone.
     fn clone(&self) -> Exclusive<T> {
         Exclusive { x: self.x.clone() }
     }
 }
 
 impl<T:Send> Exclusive<T> {
-    // Exactly like std::arc::mutex_arc,access(), but with the little_lock
+    pub fn new(user_data: T) -> Exclusive<T> {
+        let data = ExData {
+            lock: LittleLock::new(),
+            failed: false,
+            data: user_data
+        };
+        Exclusive {
+            x: UnsafeAtomicRcBox::new(data)
+        }
+    }
+
+    // Exactly like std::arc::MutexArc,access(), but with the LittleLock
     // instead of a proper mutex. Same reason for being unsafe.
     //
     // Currently, scheduling operations (i.e., yielding, receiving on a pipe,
     // accessing the provided condition variable) are prohibited while inside
-    // the exclusive. Supporting that is a work in progress.
+    // the Exclusive. Supporting that is a work in progress.
     #[inline]
     pub unsafe fn with<U>(&self, f: &fn(x: &mut T) -> U) -> U {
         let rec = self.x.get();
         do (*rec).lock.lock {
             if (*rec).failed {
-                fail!("Poisoned exclusive - another task failed inside!");
+                fail!("Poisoned Exclusive::new - another task failed inside!");
             }
             (*rec).failed = true;
             let result = f(&mut (*rec).data);
@@ -341,7 +341,7 @@ impl<T:Send> Exclusive<T> {
 
     pub fn unwrap(self) -> T {
         let Exclusive { x: x } = self;
-        // Someday we might need to unkillably unwrap an exclusive, but not today.
+        // Someday we might need to unkillably unwrap an Exclusive, but not today.
         let inner = unsafe { x.unwrap() };
         let ExData { data: user_data, _ } = inner; // will destroy the LittleLock
         user_data
@@ -360,20 +360,20 @@ mod tests {
     use cell::Cell;
     use comm;
     use option::*;
-    use super::{exclusive, UnsafeAtomicRcBox};
+    use super::{Exclusive, UnsafeAtomicRcBox};
     use task;
     use uint;
     use util;
 
     #[test]
-    fn exclusive_arc() {
+    fn exclusive_new_arc() {
         unsafe {
             let mut futures = ~[];
 
             let num_tasks = 10;
             let count = 10;
 
-            let total = exclusive(~0);
+            let total = Exclusive::new(~0);
 
             for uint::range(0, num_tasks) |_i| {
                 let total = total.clone();
@@ -399,11 +399,11 @@ mod tests {
     }
 
     #[test] #[should_fail] #[ignore(cfg(windows))]
-    fn exclusive_poison() {
+    fn exclusive_new_poison() {
         unsafe {
-            // Tests that if one task fails inside of an exclusive, subsequent
+            // Tests that if one task fails inside of an Exclusive::new, subsequent
             // accesses will also fail.
-            let x = exclusive(1);
+            let x = Exclusive::new(1);
             let x2 = x.clone();
             do task::try || {
                 do x2.with |one| {
@@ -466,15 +466,15 @@ mod tests {
     }
 
     #[test]
-    fn exclusive_unwrap_basic() {
+    fn exclusive_new_unwrap_basic() {
         // Unlike the above, also tests no double-freeing of the LittleLock.
-        let x = exclusive(~~"hello");
+        let x = Exclusive::new(~~"hello");
         assert!(x.unwrap() == ~~"hello");
     }
 
     #[test]
-    fn exclusive_unwrap_contended() {
-        let x = exclusive(~~"hello");
+    fn exclusive_new_unwrap_contended() {
+        let x = Exclusive::new(~~"hello");
         let x2 = Cell::new(x.clone());
         do task::spawn {
             let x2 = x2.take();
@@ -484,7 +484,7 @@ mod tests {
         assert!(x.unwrap() == ~~"hello");
 
         // Now try the same thing, but with the child task blocking.
-        let x = exclusive(~~"hello");
+        let x = Exclusive::new(~~"hello");
         let x2 = Cell::new(x.clone());
         let mut res = None;
         let mut builder = task::task();
@@ -499,8 +499,8 @@ mod tests {
     }
 
     #[test] #[should_fail] #[ignore(cfg(windows))]
-    fn exclusive_unwrap_conflict() {
-        let x = exclusive(~~"hello");
+    fn exclusive_new_unwrap_conflict() {
+        let x = Exclusive::new(~~"hello");
         let x2 = Cell::new(x.clone());
         let mut res = None;
         let mut builder = task::task();
@@ -515,14 +515,14 @@ mod tests {
     }
 
     #[test] #[ignore(cfg(windows))]
-    fn exclusive_unwrap_deadlock() {
+    fn exclusive_new_unwrap_deadlock() {
         // This is not guaranteed to get to the deadlock before being killed,
         // but it will show up sometimes, and if the deadlock were not there,
         // the test would nondeterministically fail.
         let result = do task::try {
-            // a task that has two references to the same exclusive will
+            // a task that has two references to the same Exclusive::new will
             // deadlock when it unwraps. nothing to be done about that.
-            let x = exclusive(~~"hello");
+            let x = Exclusive::new(~~"hello");
             let x2 = x.clone();
             do task::spawn {
                 for 10.times { task::yield(); } // try to let the unwrapper go
diff --git a/src/libstd/vec.rs b/src/libstd/vec.rs
index 379deff233c..54057be5268 100644
--- a/src/libstd/vec.rs
+++ b/src/libstd/vec.rs
@@ -2563,9 +2563,9 @@ mod tests {
     #[test]
     fn test_swap_remove_noncopyable() {
         // Tests that we don't accidentally run destructors twice.
-        let mut v = ~[::unstable::sync::exclusive(()),
-                      ::unstable::sync::exclusive(()),
-                      ::unstable::sync::exclusive(())];
+        let mut v = ~[::unstable::sync::Exclusive::new(()),
+                      ::unstable::sync::Exclusive::new(()),
+                      ::unstable::sync::Exclusive::new(())];
         let mut _e = v.swap_remove(0);
         assert_eq!(v.len(), 2);
         _e = v.swap_remove(1);