about summary refs log tree commit diff
path: root/src/libsync
diff options
context:
space:
mode:
authorAlex Crichton <alex@alexcrichton.com>2014-07-02 11:08:21 -0700
committerAlex Crichton <alex@alexcrichton.com>2014-07-02 11:08:21 -0700
commitff1dd44b40a7243f43a8d32ba8bd6026197c320b (patch)
tree4460cbf0a917a289d1d3744d9645c5ab131ea9df /src/libsync
parentaa1163b92de7717eb7c5eba002b4012e0574a7fe (diff)
parentca2778ede7c21efc3cf2e4e1152875ec09360770 (diff)
downloadrust-ff1dd44b40a7243f43a8d32ba8bd6026197c320b.tar.gz
rust-ff1dd44b40a7243f43a8d32ba8bd6026197c320b.zip
Merge remote-tracking branch 'origin/master' into 0.11.0-release
Conflicts:
	src/libstd/lib.rs
Diffstat (limited to 'src/libsync')
-rw-r--r--src/libsync/atomics.rs4
-rw-r--r--src/libsync/comm/duplex.rs6
-rw-r--r--src/libsync/comm/mod.rs29
-rw-r--r--src/libsync/comm/select.rs7
-rw-r--r--src/libsync/deque.rs17
-rw-r--r--src/libsync/lock.rs4
-rw-r--r--src/libsync/mpsc_intrusive.rs2
-rw-r--r--src/libsync/mpsc_queue.rs4
-rw-r--r--src/libsync/raw.rs2
-rw-r--r--src/libsync/spsc_queue.rs6
10 files changed, 60 insertions, 21 deletions
diff --git a/src/libsync/atomics.rs b/src/libsync/atomics.rs
index 8ce17b9bf3b..195efb844a7 100644
--- a/src/libsync/atomics.rs
+++ b/src/libsync/atomics.rs
@@ -143,7 +143,7 @@ impl<T> AtomicOption<T> {
     /// Remove the value, leaving the `AtomicOption` empty.
     #[inline]
     pub fn take(&self, order: Ordering) -> Option<Box<T>> {
-        unsafe { self.swap(mem::transmute(0), order) }
+        unsafe { self.swap(mem::transmute(0u), order) }
     }
 
     /// Replace an empty value with a non-empty value.
@@ -155,7 +155,7 @@ impl<T> AtomicOption<T> {
     pub fn fill(&self, val: Box<T>, order: Ordering) -> Option<Box<T>> {
         unsafe {
             let val = mem::transmute(val);
-            let expected = mem::transmute(0);
+            let expected = mem::transmute(0u);
             let oldval = self.p.compare_and_swap(expected, val, order);
             if oldval == expected {
                 None
diff --git a/src/libsync/comm/duplex.rs b/src/libsync/comm/duplex.rs
index 3840e55bb42..44dd63cbf6c 100644
--- a/src/libsync/comm/duplex.rs
+++ b/src/libsync/comm/duplex.rs
@@ -15,6 +15,10 @@ Higher level communication abstractions.
 */
 
 #![allow(missing_doc)]
+#![deprecated = "This type is replaced by having a pair of channels. This type \
+                 is not fully composable with other channels in terms of \
+                 or possible semantics on a duplex stream. It will be removed \
+                 soon"]
 
 use core::prelude::*;
 
@@ -64,7 +68,7 @@ mod test {
         let (left, right) = duplex();
 
         left.send("abc".to_string());
-        right.send(123);
+        right.send(123i);
 
         assert!(left.recv() == 123);
         assert!(right.recv() == "abc".to_string());
diff --git a/src/libsync/comm/mod.rs b/src/libsync/comm/mod.rs
index 3e8f4eef370..6c09a021c43 100644
--- a/src/libsync/comm/mod.rs
+++ b/src/libsync/comm/mod.rs
@@ -370,6 +370,7 @@ static RESCHED_FREQ: int = 256;
 
 /// The receiving-half of Rust's channel type. This half can only be owned by
 /// one task
+#[unstable]
 pub struct Receiver<T> {
     inner: Unsafe<Flavor<T>>,
     receives: Cell<uint>,
@@ -380,12 +381,14 @@ pub struct Receiver<T> {
 /// An iterator over messages on a receiver, this iterator will block
 /// whenever `next` is called, waiting for a new message, and `None` will be
 /// returned when the corresponding channel has hung up.
+#[unstable]
 pub struct Messages<'a, T> {
     rx: &'a Receiver<T>
 }
 
 /// The sending-half of Rust's asynchronous channel type. This half can only be
 /// owned by one task, but it can be cloned to send to other tasks.
+#[unstable]
 pub struct Sender<T> {
     inner: Unsafe<Flavor<T>>,
     sends: Cell<uint>,
@@ -395,6 +398,7 @@ pub struct Sender<T> {
 
 /// The sending-half of Rust's synchronous channel type. This half can only be
 /// owned by one task, but it can be cloned to send to other tasks.
+#[unstable = "this type may be renamed, but it will always exist"]
 pub struct SyncSender<T> {
     inner: Arc<Unsafe<sync::Packet<T>>>,
     // can't share in an arc
@@ -404,6 +408,7 @@ pub struct SyncSender<T> {
 /// This enumeration is the list of the possible reasons that try_recv could not
 /// return data when called.
 #[deriving(PartialEq, Clone, Show)]
+#[experimental = "this is likely to be removed in changing try_recv()"]
 pub enum TryRecvError {
     /// This channel is currently empty, but the sender(s) have not yet
     /// disconnected, so data may yet become available.
@@ -416,6 +421,7 @@ pub enum TryRecvError {
 /// This enumeration is the list of the possible error outcomes for the
 /// `SyncSender::try_send` method.
 #[deriving(PartialEq, Clone, Show)]
+#[experimental = "this is likely to be removed in changing try_send()"]
 pub enum TrySendError<T> {
     /// The data could not be sent on the channel because it would require that
     /// the callee block to send the data.
@@ -478,6 +484,7 @@ impl<T> UnsafeFlavor<T> for Receiver<T> {
 /// // Let's see what that answer was
 /// println!("{}", rx.recv());
 /// ```
+#[unstable]
 pub fn channel<T: Send>() -> (Sender<T>, Receiver<T>) {
     let a = Arc::new(Unsafe::new(oneshot::Packet::new()));
     (Sender::new(Oneshot(a.clone())), Receiver::new(Oneshot(a)))
@@ -514,6 +521,8 @@ pub fn channel<T: Send>() -> (Sender<T>, Receiver<T>) {
 /// assert_eq!(rx.recv(), 1i);
 /// assert_eq!(rx.recv(), 2i);
 /// ```
+#[unstable = "this function may be renamed to more accurately reflect the type \
+              of channel that is is creating"]
 pub fn sync_channel<T: Send>(bound: uint) -> (SyncSender<T>, Receiver<T>) {
     let a = Arc::new(Unsafe::new(sync::Packet::new(bound)));
     (SyncSender::new(a.clone()), Receiver::new(Sync(a)))
@@ -547,6 +556,8 @@ impl<T: Send> Sender<T> {
     ///
     /// The purpose of this functionality is to propagate failure among tasks.
     /// If failure is not desired, then consider using the `send_opt` method
+    #[experimental = "this function is being considered candidate for removal \
+                      to adhere to the general guidelines of rust"]
     pub fn send(&self, t: T) {
         if self.send_opt(t).is_err() {
             fail!("sending on a closed channel");
@@ -583,6 +594,7 @@ impl<T: Send> Sender<T> {
     /// drop(rx);
     /// assert_eq!(tx.send_opt(1i), Err(1));
     /// ```
+    #[unstable = "this function may be renamed to send() in the future"]
     pub fn send_opt(&self, t: T) -> Result<(), T> {
         // In order to prevent starvation of other tasks in situations where
         // a task sends repeatedly without ever receiving, we occasionally
@@ -638,6 +650,7 @@ impl<T: Send> Sender<T> {
     }
 }
 
+#[unstable]
 impl<T: Send> Clone for Sender<T> {
     fn clone(&self) -> Sender<T> {
         let (packet, sleeper) = match *unsafe { self.inner() } {
@@ -719,6 +732,8 @@ impl<T: Send> SyncSender<T> {
     /// If failure is not desired, you can achieve the same semantics with the
     /// `SyncSender::send_opt` method which will not fail if the receiver
     /// disconnects.
+    #[experimental = "this function is being considered candidate for removal \
+                      to adhere to the general guidelines of rust"]
     pub fn send(&self, t: T) {
         if self.send_opt(t).is_err() {
             fail!("sending on a closed channel");
@@ -736,6 +751,7 @@ impl<T: Send> SyncSender<T> {
     /// # Failure
     ///
     /// This function cannot fail.
+    #[unstable = "this function may be renamed to send() in the future"]
     pub fn send_opt(&self, t: T) -> Result<(), T> {
         unsafe { (*self.inner.get()).send(t) }
     }
@@ -753,11 +769,14 @@ impl<T: Send> SyncSender<T> {
     /// # Failure
     ///
     /// This function cannot fail
+    #[unstable = "the return type of this function is candidate for \
+                  modification"]
     pub fn try_send(&self, t: T) -> Result<(), TrySendError<T>> {
         unsafe { (*self.inner.get()).try_send(t) }
     }
 }
 
+#[unstable]
 impl<T: Send> Clone for SyncSender<T> {
     fn clone(&self) -> SyncSender<T> {
         unsafe { (*self.inner.get()).clone_chan(); }
@@ -800,6 +819,8 @@ impl<T: Send> Receiver<T> {
     ///
     /// * If blocking is not desired, then the `try_recv` method will attempt to
     ///   peek at a value on this receiver.
+    #[experimental = "this function is being considered candidate for removal \
+                      to adhere to the general guidelines of rust"]
     pub fn recv(&self) -> T {
         match self.recv_opt() {
             Ok(t) => t,
@@ -817,6 +838,7 @@ impl<T: Send> Receiver<T> {
     /// block on a receiver.
     ///
     /// This function cannot fail.
+    #[unstable = "the return type of this function may be altered"]
     pub fn try_recv(&self) -> Result<T, TryRecvError> {
         // If a thread is spinning in try_recv, we should take the opportunity
         // to reschedule things occasionally. See notes above in scheduling on
@@ -881,6 +903,7 @@ impl<T: Send> Receiver<T> {
     ///
     /// If the channel has hung up, then `Err` is returned. Otherwise `Ok` of
     /// the value found on the receiver is returned.
+    #[unstable = "this function may be renamed to recv()"]
     pub fn recv_opt(&self) -> Result<T, ()> {
         loop {
             let new_port = match *unsafe { self.inner() } {
@@ -917,6 +940,7 @@ impl<T: Send> Receiver<T> {
 
     /// Returns an iterator which will block waiting for messages, but never
     /// `fail!`. It will return `None` when the channel has hung up.
+    #[unstable]
     pub fn iter<'a>(&'a self) -> Messages<'a, T> {
         Messages { rx: self }
     }
@@ -1009,6 +1033,7 @@ impl<T: Send> select::Packet for Receiver<T> {
     }
 }
 
+#[unstable]
 impl<'a, T: Send> Iterator<T> for Messages<'a, T> {
     fn next(&mut self) -> Option<T> { self.rx.recv_opt().ok() }
 }
@@ -1543,7 +1568,7 @@ mod test {
         let (tx, rx) = channel();
         let (cdone, pdone) = channel();
         let t = Thread::start(proc() {
-            let mut hits = 0;
+            let mut hits = 0u;
             while hits < 10 {
                 match rx.try_recv() {
                     Ok(()) => { hits += 1; }
@@ -1993,7 +2018,7 @@ mod sync_tests {
         let (tx, rx) = sync_channel::<()>(0);
         let (cdone, pdone) = channel();
         let t = Thread::start(proc() {
-            let mut hits = 0;
+            let mut hits = 0u;
             while hits < 10 {
                 match rx.try_recv() {
                     Ok(()) => { hits += 1; }
diff --git a/src/libsync/comm/select.rs b/src/libsync/comm/select.rs
index 8d56f9a003b..230bca624f5 100644
--- a/src/libsync/comm/select.rs
+++ b/src/libsync/comm/select.rs
@@ -44,6 +44,13 @@
 //! ```
 
 #![allow(dead_code)]
+#![experimental = "This implementation, while likely sufficient, is unsafe and \
+                   likely to be error prone. At some point in the future this \
+                   module will likely be replaced, and it is currently \
+                   unknown how much API breakage that will cause. The ability \
+                   to select over a number of channels will remain forever, \
+                   but no guarantees beyond this are being made"]
+
 
 use core::prelude::*;
 
diff --git a/src/libsync/deque.rs b/src/libsync/deque.rs
index 18608a0a370..8d2192aeb53 100644
--- a/src/libsync/deque.rs
+++ b/src/libsync/deque.rs
@@ -30,15 +30,15 @@
 //!     let (mut worker, mut stealer) = pool.deque();
 //!
 //!     // Only the worker may push/pop
-//!     worker.push(1);
+//!     worker.push(1i);
 //!     worker.pop();
 //!
 //!     // Stealers take data from the other end of the deque
-//!     worker.push(1);
+//!     worker.push(1i);
 //!     stealer.steal();
 //!
 //!     // Stealers can be cloned to have many stealers stealing in parallel
-//!     worker.push(1);
+//!     worker.push(1i);
 //!     let mut stealer2 = stealer.clone();
 //!     stealer2.steal();
 
@@ -137,7 +137,7 @@ pub struct BufferPool<T> {
 ///   2. We can certainly avoid bounds checks using *T instead of Vec<T>, although
 ///      LLVM is probably pretty good at doing this already.
 struct Buffer<T> {
-    storage: *T,
+    storage: *const T,
     log_size: uint,
 }
 
@@ -354,7 +354,7 @@ impl<T: Send> Buffer<T> {
         let size = buffer_alloc_size::<T>(log_size);
         let buffer = allocate(size, min_align_of::<T>());
         Buffer {
-            storage: buffer as *T,
+            storage: buffer as *const T,
             log_size: log_size,
         }
     }
@@ -364,7 +364,9 @@ impl<T: Send> Buffer<T> {
     // Apparently LLVM cannot optimize (foo % (1 << bar)) into this implicitly
     fn mask(&self) -> int { (1 << self.log_size) - 1 }
 
-    unsafe fn elem(&self, i: int) -> *T { self.storage.offset(i & self.mask()) }
+    unsafe fn elem(&self, i: int) -> *const T {
+        self.storage.offset(i & self.mask())
+    }
 
     // This does not protect against loading duplicate values of the same cell,
     // nor does this clear out the contents contained within. Hence, this is a
@@ -610,7 +612,8 @@ mod tests {
             let s = s.clone();
             let unique_box = box AtomicUint::new(0);
             let thread_box = unsafe {
-                *mem::transmute::<&Box<AtomicUint>, **mut AtomicUint>(&unique_box)
+                *mem::transmute::<&Box<AtomicUint>,
+                                  *const *mut AtomicUint>(&unique_box)
             };
             (Thread::start(proc() {
                 unsafe {
diff --git a/src/libsync/lock.rs b/src/libsync/lock.rs
index dff9fee2b77..1d119bafea1 100644
--- a/src/libsync/lock.rs
+++ b/src/libsync/lock.rs
@@ -158,7 +158,7 @@ impl<'a> Condvar<'a> {
 /// ```
 /// use sync::{Mutex, Arc};
 ///
-/// let mutex = Arc::new(Mutex::new(1));
+/// let mutex = Arc::new(Mutex::new(1i));
 /// let mutex2 = mutex.clone();
 ///
 /// spawn(proc() {
@@ -487,7 +487,7 @@ mod tests {
 
     #[test] #[should_fail]
     fn test_arc_condvar_poison() {
-        let arc = Arc::new(Mutex::new(1));
+        let arc = Arc::new(Mutex::new(1i));
         let arc2 = arc.clone();
         let (tx, rx) = channel();
 
diff --git a/src/libsync/mpsc_intrusive.rs b/src/libsync/mpsc_intrusive.rs
index 6af733ddb4b..2b6886ab7f4 100644
--- a/src/libsync/mpsc_intrusive.rs
+++ b/src/libsync/mpsc_intrusive.rs
@@ -104,7 +104,7 @@ impl<T: Send> Queue<T> {
             mem::transmute(&self.stub)
         };
         let mut next = (*tail).next(atomics::Relaxed);
-        if tail as uint == &self.stub as *DummyNode as uint {
+        if tail as uint == &self.stub as *const DummyNode as uint {
             if next.is_null() {
                 return None;
             }
diff --git a/src/libsync/mpsc_queue.rs b/src/libsync/mpsc_queue.rs
index 4bb0acf580c..ecd37e68880 100644
--- a/src/libsync/mpsc_queue.rs
+++ b/src/libsync/mpsc_queue.rs
@@ -167,8 +167,8 @@ mod tests {
     #[test]
     fn test_full() {
         let q = Queue::new();
-        q.push(box 1);
-        q.push(box 2);
+        q.push(box 1i);
+        q.push(box 2i);
     }
 
     #[test]
diff --git a/src/libsync/raw.rs b/src/libsync/raw.rs
index 35865e65612..26cc0b2c6a2 100644
--- a/src/libsync/raw.rs
+++ b/src/libsync/raw.rs
@@ -890,7 +890,7 @@ mod tests {
         let x2 = x.clone();
         let mut sharedstate = box 0;
         {
-            let ptr: *int = &*sharedstate;
+            let ptr: *const int = &*sharedstate;
             task::spawn(proc() {
                 let sharedstate: &mut int =
                     unsafe { mem::transmute(ptr) };
diff --git a/src/libsync/spsc_queue.rs b/src/libsync/spsc_queue.rs
index a4da1fd2335..2834d404c18 100644
--- a/src/libsync/spsc_queue.rs
+++ b/src/libsync/spsc_queue.rs
@@ -252,8 +252,8 @@ mod test {
     #[test]
     fn drop_full() {
         let q = Queue::new(0);
-        q.push(box 1);
-        q.push(box 2);
+        q.push(box 1i);
+        q.push(box 2i);
     }
 
     #[test]
@@ -284,7 +284,7 @@ mod test {
                 for _ in range(0u, 100000) {
                     loop {
                         match b.pop() {
-                            Some(1) => break,
+                            Some(1i) => break,
                             Some(_) => fail!(),
                             None => {}
                         }