about summary refs log tree commit diff
path: root/src/libstd
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2014-03-24 18:11:51 -0700
committerbors <bors@rust-lang.org>2014-03-24 18:11:51 -0700
commit6bf3fca8ff90bbeff8d5c437aa784d0dbf8f9455 (patch)
tree7fe1f4e9c71ec942f54defdd4b1be123f212804f /src/libstd
parentbcaaffbe1e1c6a6a3abdabdb4fdaef36358dae33 (diff)
parent218461d01049242e3544337055b7f6d06943344b (diff)
downloadrust-6bf3fca8ff90bbeff8d5c437aa784d0dbf8f9455.tar.gz
rust-6bf3fca8ff90bbeff8d5c437aa784d0dbf8f9455.zip
auto merge of #12900 : alexcrichton/rust/rewrite-sync, r=brson
* Remove clone-ability from all primitives. All shared state will now come
  from the usage of the primitives being shared, not the primitives being
  inherently shareable. This allows for fewer allocations for stack-allocated
  primitives.
* Add `Mutex<T>` and `RWLock<T>` which are stack-allocated primitives for purely
  wrapping a piece of data
* Remove `RWArc<T>` in favor of `Arc<RWLock<T>>`
* Remove `MutexArc<T>` in favor of `Arc<Mutex<T>>`
* Shuffle around where things are located
  * The `arc` module now only contains `Arc`
  * A new `lock` module contains `Mutex`, `RWLock`, and `Barrier`
  * A new `raw` module contains the primitive implementations of `Semaphore`,
    `Mutex`, and `RWLock`
* The Deref/DerefMut trait was implemented where appropriate
* `CowArc` was removed, the functionality is now part of `Arc` and is tagged
  with `#[experimental]`.
* The crate now has #[deny(missing_doc)]
* `Arc` now supports weak pointers

This is not a large-scale rewrite of the functionality contained within the
`sync` crate, but rather a shuffling of who does what an a thinner hierarchy of
ownership to allow for better composability.
Diffstat (limited to 'src/libstd')
-rw-r--r--src/libstd/comm/shared.rs2
-rw-r--r--src/libstd/intrinsics.rs1
-rw-r--r--src/libstd/rt/bookkeeping.rs4
-rw-r--r--src/libstd/sync/atomics.rs6
-rw-r--r--src/libstd/unstable/mutex.rs99
-rw-r--r--src/libstd/unstable/sync.rs4
6 files changed, 61 insertions, 55 deletions
diff --git a/src/libstd/comm/shared.rs b/src/libstd/comm/shared.rs
index 8c8ae85e4ea..e8ba9d6e628 100644
--- a/src/libstd/comm/shared.rs
+++ b/src/libstd/comm/shared.rs
@@ -68,7 +68,7 @@ pub enum Failure {
 impl<T: Send> Packet<T> {
     // Creation of a packet *must* be followed by a call to inherit_blocker
     pub fn new() -> Packet<T> {
-        let mut p = Packet {
+        let p = Packet {
             queue: mpsc::Queue::new(),
             cnt: atomics::AtomicInt::new(0),
             steals: 0,
diff --git a/src/libstd/intrinsics.rs b/src/libstd/intrinsics.rs
index 78e3df4b6f8..55e7746a44d 100644
--- a/src/libstd/intrinsics.rs
+++ b/src/libstd/intrinsics.rs
@@ -164,7 +164,6 @@ pub trait TyVisitor {
     fn visit_self(&mut self) -> bool;
 }
 
-
 extern "rust-intrinsic" {
 
     // NB: These intrinsics take unsafe pointers because they mutate aliased
diff --git a/src/libstd/rt/bookkeeping.rs b/src/libstd/rt/bookkeeping.rs
index 5851a6a39c6..932cd7af033 100644
--- a/src/libstd/rt/bookkeeping.rs
+++ b/src/libstd/rt/bookkeeping.rs
@@ -34,7 +34,7 @@ pub fn increment() {
 pub fn decrement() {
     unsafe {
         if TASK_COUNT.fetch_sub(1, atomics::SeqCst) == 1 {
-            let mut guard = TASK_LOCK.lock();
+            let guard = TASK_LOCK.lock();
             guard.signal();
         }
     }
@@ -44,7 +44,7 @@ pub fn decrement() {
 /// the entry points of native programs
 pub fn wait_for_other_tasks() {
     unsafe {
-        let mut guard = TASK_LOCK.lock();
+        let guard = TASK_LOCK.lock();
         while TASK_COUNT.load(atomics::SeqCst) > 0 {
             guard.wait();
         }
diff --git a/src/libstd/sync/atomics.rs b/src/libstd/sync/atomics.rs
index 32e0ec3ad9d..d5f6fac2296 100644
--- a/src/libstd/sync/atomics.rs
+++ b/src/libstd/sync/atomics.rs
@@ -39,8 +39,7 @@
 //!
 //! A simple spinlock:
 //!
-//! ```ignore
-//! # // FIXME: Needs PR #12430
+//! ```
 //! extern crate sync;
 //!
 //! use sync::Arc;
@@ -68,8 +67,7 @@
 //!
 //! Transferring a heap object with `AtomicOption`:
 //!
-//! ```ignore
-//! # // FIXME: Needs PR #12430
+//! ```
 //! extern crate sync;
 //!
 //! use sync::Arc;
diff --git a/src/libstd/unstable/mutex.rs b/src/libstd/unstable/mutex.rs
index 2a3c8963980..9802271e28f 100644
--- a/src/libstd/unstable/mutex.rs
+++ b/src/libstd/unstable/mutex.rs
@@ -86,7 +86,7 @@ pub struct NativeMutex {
 /// then.
 #[must_use]
 pub struct LockGuard<'a> {
-    priv lock: &'a mut StaticNativeMutex
+    priv lock: &'a StaticNativeMutex
 }
 
 pub static NATIVE_MUTEX_INIT: StaticNativeMutex = StaticNativeMutex {
@@ -106,6 +106,7 @@ impl StaticNativeMutex {
     /// already hold the lock.
     ///
     /// # Example
+    ///
     /// ```rust
     /// use std::unstable::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
     /// static mut LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
@@ -114,7 +115,7 @@ impl StaticNativeMutex {
     ///     // critical section...
     /// } // automatically unlocked in `_guard`'s destructor
     /// ```
-    pub unsafe fn lock<'a>(&'a mut self) -> LockGuard<'a> {
+    pub unsafe fn lock<'a>(&'a self) -> LockGuard<'a> {
         self.inner.lock();
 
         LockGuard { lock: self }
@@ -122,7 +123,7 @@ impl StaticNativeMutex {
 
     /// Attempts to acquire the lock. The value returned is `Some` if
     /// the attempt succeeded.
-    pub unsafe fn trylock<'a>(&'a mut self) -> Option<LockGuard<'a>> {
+    pub unsafe fn trylock<'a>(&'a self) -> Option<LockGuard<'a>> {
         if self.inner.trylock() {
             Some(LockGuard { lock: self })
         } else {
@@ -134,7 +135,7 @@ impl StaticNativeMutex {
     ///
     /// These needs to be paired with a call to `.unlock_noguard`. Prefer using
     /// `.lock`.
-    pub unsafe fn lock_noguard(&mut self) { self.inner.lock() }
+    pub unsafe fn lock_noguard(&self) { self.inner.lock() }
 
     /// Attempts to acquire the lock without creating a
     /// `LockGuard`. The value returned is whether the lock was
@@ -142,28 +143,28 @@ impl StaticNativeMutex {
     ///
     /// If `true` is returned, this needs to be paired with a call to
     /// `.unlock_noguard`. Prefer using `.trylock`.
-    pub unsafe fn trylock_noguard(&mut self) -> bool {
+    pub unsafe fn trylock_noguard(&self) -> bool {
         self.inner.trylock()
     }
 
     /// Unlocks the lock. This assumes that the current thread already holds the
     /// lock.
-    pub unsafe fn unlock_noguard(&mut self) { self.inner.unlock() }
+    pub unsafe fn unlock_noguard(&self) { self.inner.unlock() }
 
     /// Block on the internal condition variable.
     ///
     /// This function assumes that the lock is already held. Prefer
     /// using `LockGuard.wait` since that guarantees that the lock is
     /// held.
-    pub unsafe fn wait_noguard(&mut self) { self.inner.wait() }
+    pub unsafe fn wait_noguard(&self) { self.inner.wait() }
 
     /// Signals a thread in `wait` to wake up
-    pub unsafe fn signal_noguard(&mut self) { self.inner.signal() }
+    pub unsafe fn signal_noguard(&self) { self.inner.signal() }
 
     /// This function is especially unsafe because there are no guarantees made
     /// that no other thread is currently holding the lock or waiting on the
     /// condition variable contained inside.
-    pub unsafe fn destroy(&mut self) { self.inner.destroy() }
+    pub unsafe fn destroy(&self) { self.inner.destroy() }
 }
 
 impl NativeMutex {
@@ -190,13 +191,13 @@ impl NativeMutex {
     ///     } // automatically unlocked in `_guard`'s destructor
     /// }
     /// ```
-    pub unsafe fn lock<'a>(&'a mut self) -> LockGuard<'a> {
+    pub unsafe fn lock<'a>(&'a self) -> LockGuard<'a> {
         self.inner.lock()
     }
 
     /// Attempts to acquire the lock. The value returned is `Some` if
     /// the attempt succeeded.
-    pub unsafe fn trylock<'a>(&'a mut self) -> Option<LockGuard<'a>> {
+    pub unsafe fn trylock<'a>(&'a self) -> Option<LockGuard<'a>> {
         self.inner.trylock()
     }
 
@@ -204,7 +205,7 @@ impl NativeMutex {
     ///
     /// These needs to be paired with a call to `.unlock_noguard`. Prefer using
     /// `.lock`.
-    pub unsafe fn lock_noguard(&mut self) { self.inner.lock_noguard() }
+    pub unsafe fn lock_noguard(&self) { self.inner.lock_noguard() }
 
     /// Attempts to acquire the lock without creating a
     /// `LockGuard`. The value returned is whether the lock was
@@ -212,23 +213,23 @@ impl NativeMutex {
     ///
     /// If `true` is returned, this needs to be paired with a call to
     /// `.unlock_noguard`. Prefer using `.trylock`.
-    pub unsafe fn trylock_noguard(&mut self) -> bool {
+    pub unsafe fn trylock_noguard(&self) -> bool {
         self.inner.trylock_noguard()
     }
 
     /// Unlocks the lock. This assumes that the current thread already holds the
     /// lock.
-    pub unsafe fn unlock_noguard(&mut self) { self.inner.unlock_noguard() }
+    pub unsafe fn unlock_noguard(&self) { self.inner.unlock_noguard() }
 
     /// Block on the internal condition variable.
     ///
     /// This function assumes that the lock is already held. Prefer
     /// using `LockGuard.wait` since that guarantees that the lock is
     /// held.
-    pub unsafe fn wait_noguard(&mut self) { self.inner.wait_noguard() }
+    pub unsafe fn wait_noguard(&self) { self.inner.wait_noguard() }
 
     /// Signals a thread in `wait` to wake up
-    pub unsafe fn signal_noguard(&mut self) { self.inner.signal_noguard() }
+    pub unsafe fn signal_noguard(&self) { self.inner.signal_noguard() }
 }
 
 impl Drop for NativeMutex {
@@ -239,12 +240,12 @@ impl Drop for NativeMutex {
 
 impl<'a> LockGuard<'a> {
     /// Block on the internal condition variable.
-    pub unsafe fn wait(&mut self) {
+    pub unsafe fn wait(&self) {
         self.lock.wait_noguard()
     }
 
     /// Signals a thread in `wait` to wake up.
-    pub unsafe fn signal(&mut self) {
+    pub unsafe fn signal(&self) {
         self.lock.signal_noguard()
     }
 }
@@ -262,6 +263,8 @@ mod imp {
     use self::os::{PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER,
                    pthread_mutex_t, pthread_cond_t};
     use mem;
+    use ty::Unsafe;
+    use kinds::marker;
 
     type pthread_mutexattr_t = libc::c_void;
     type pthread_condattr_t = libc::c_void;
@@ -369,40 +372,46 @@ mod imp {
     }
 
     pub struct Mutex {
-        priv lock: pthread_mutex_t,
-        priv cond: pthread_cond_t,
+        priv lock: Unsafe<pthread_mutex_t>,
+        priv cond: Unsafe<pthread_cond_t>,
     }
 
     pub static MUTEX_INIT: Mutex = Mutex {
-        lock: PTHREAD_MUTEX_INITIALIZER,
-        cond: PTHREAD_COND_INITIALIZER,
+        lock: Unsafe {
+            value: PTHREAD_MUTEX_INITIALIZER,
+            marker1: marker::InvariantType,
+        },
+        cond: Unsafe {
+            value: PTHREAD_COND_INITIALIZER,
+            marker1: marker::InvariantType,
+        },
     };
 
     impl Mutex {
         pub unsafe fn new() -> Mutex {
-            let mut m = Mutex {
-                lock: mem::init(),
-                cond: mem::init(),
+            let m = Mutex {
+                lock: Unsafe::new(mem::init()),
+                cond: Unsafe::new(mem::init()),
             };
 
-            pthread_mutex_init(&mut m.lock, 0 as *libc::c_void);
-            pthread_cond_init(&mut m.cond, 0 as *libc::c_void);
+            pthread_mutex_init(m.lock.get(), 0 as *libc::c_void);
+            pthread_cond_init(m.cond.get(), 0 as *libc::c_void);
 
             return m;
         }
 
-        pub unsafe fn lock(&mut self) { pthread_mutex_lock(&mut self.lock); }
-        pub unsafe fn unlock(&mut self) { pthread_mutex_unlock(&mut self.lock); }
-        pub unsafe fn signal(&mut self) { pthread_cond_signal(&mut self.cond); }
-        pub unsafe fn wait(&mut self) {
-            pthread_cond_wait(&mut self.cond, &mut self.lock);
+        pub unsafe fn lock(&self) { pthread_mutex_lock(self.lock.get()); }
+        pub unsafe fn unlock(&self) { pthread_mutex_unlock(self.lock.get()); }
+        pub unsafe fn signal(&self) { pthread_cond_signal(self.cond.get()); }
+        pub unsafe fn wait(&self) {
+            pthread_cond_wait(self.cond.get(), self.lock.get());
         }
-        pub unsafe fn trylock(&mut self) -> bool {
-            pthread_mutex_trylock(&mut self.lock) == 0
+        pub unsafe fn trylock(&self) -> bool {
+            pthread_mutex_trylock(self.lock.get()) == 0
         }
-        pub unsafe fn destroy(&mut self) {
-            pthread_mutex_destroy(&mut self.lock);
-            pthread_cond_destroy(&mut self.cond);
+        pub unsafe fn destroy(&self) {
+            pthread_mutex_destroy(self.lock.get());
+            pthread_cond_destroy(self.cond.get());
         }
     }
 
@@ -454,37 +463,37 @@ mod imp {
                 cond: atomics::AtomicUint::new(init_cond()),
             }
         }
-        pub unsafe fn lock(&mut self) {
+        pub unsafe fn lock(&self) {
             EnterCriticalSection(self.getlock() as LPCRITICAL_SECTION)
         }
-        pub unsafe fn trylock(&mut self) -> bool {
+        pub unsafe fn trylock(&self) -> bool {
             TryEnterCriticalSection(self.getlock() as LPCRITICAL_SECTION) != 0
         }
-        pub unsafe fn unlock(&mut self) {
+        pub unsafe fn unlock(&self) {
             LeaveCriticalSection(self.getlock() as LPCRITICAL_SECTION)
         }
 
-        pub unsafe fn wait(&mut self) {
+        pub unsafe fn wait(&self) {
             self.unlock();
             WaitForSingleObject(self.getcond() as HANDLE, libc::INFINITE);
             self.lock();
         }
 
-        pub unsafe fn signal(&mut self) {
+        pub unsafe fn signal(&self) {
             assert!(SetEvent(self.getcond() as HANDLE) != 0);
         }
 
         /// This function is especially unsafe because there are no guarantees made
         /// that no other thread is currently holding the lock or waiting on the
         /// condition variable contained inside.
-        pub unsafe fn destroy(&mut self) {
+        pub unsafe fn destroy(&self) {
             let lock = self.lock.swap(0, atomics::SeqCst);
             let cond = self.cond.swap(0, atomics::SeqCst);
             if lock != 0 { free_lock(lock) }
             if cond != 0 { free_cond(cond) }
         }
 
-        unsafe fn getlock(&mut self) -> *mut c_void {
+        unsafe fn getlock(&self) -> *mut c_void {
             match self.lock.load(atomics::SeqCst) {
                 0 => {}
                 n => return n as *mut c_void
@@ -498,7 +507,7 @@ mod imp {
             return self.lock.load(atomics::SeqCst) as *mut c_void;
         }
 
-        unsafe fn getcond(&mut self) -> *mut c_void {
+        unsafe fn getcond(&self) -> *mut c_void {
             match self.cond.load(atomics::SeqCst) {
                 0 => {}
                 n => return n as *mut c_void
diff --git a/src/libstd/unstable/sync.rs b/src/libstd/unstable/sync.rs
index de004f0af3e..367967b8e67 100644
--- a/src/libstd/unstable/sync.rs
+++ b/src/libstd/unstable/sync.rs
@@ -79,7 +79,7 @@ impl<T:Send> Exclusive<T> {
     #[inline]
     pub unsafe fn hold_and_signal(&self, f: |x: &mut T|) {
         let rec = self.x.get();
-        let mut guard = (*rec).lock.lock();
+        let guard = (*rec).lock.lock();
         if (*rec).failed {
             fail!("Poisoned Exclusive::new - another task failed inside!");
         }
@@ -92,7 +92,7 @@ impl<T:Send> Exclusive<T> {
     #[inline]
     pub unsafe fn hold_and_wait(&self, f: |x: &T| -> bool) {
         let rec = self.x.get();
-        let mut l = (*rec).lock.lock();
+        let l = (*rec).lock.lock();
         if (*rec).failed {
             fail!("Poisoned Exclusive::new - another task failed inside!");
         }