diff options
| author | Aaron Turon <aturon@mozilla.com> | 2014-08-04 15:42:36 -0700 |
|---|---|---|
| committer | Aaron Turon <aturon@mozilla.com> | 2014-08-04 16:03:21 -0700 |
| commit | 68bde0a07396efb415d61047c6b2a8183f47ef30 (patch) | |
| tree | 2a0d63e3153abbe4f62f15d06ee72c94c7772b2d /src/libsync | |
| parent | 9de20198aedb3c3419ee503755e04bcc198d3a94 (diff) | |
| download | rust-68bde0a07396efb415d61047c6b2a8183f47ef30.tar.gz rust-68bde0a07396efb415d61047c6b2a8183f47ef30.zip | |
stabilize atomics (now atomic)
This commit stabilizes the `std::sync::atomics` module, renaming it to `std::sync::atomic` to match library precedent elsewhere, and tightening up behavior around incorrect memory ordering annotations. The vast majority of the module is now `stable`. However, the `AtomicOption` type has been deprecated, since it is essentially unused and is not truly a primitive atomic type. It will eventually be replaced by a higher-level abstraction like MVars. Due to deprecations, this is a: [breaking-change]
Diffstat (limited to 'src/libsync')
| -rw-r--r-- | src/libsync/atomic.rs (renamed from src/libsync/atomics.rs) | 17 | ||||
| -rw-r--r-- | src/libsync/comm/oneshot.rs | 32 | ||||
| -rw-r--r-- | src/libsync/comm/shared.rs | 90 | ||||
| -rw-r--r-- | src/libsync/comm/stream.rs | 62 | ||||
| -rw-r--r-- | src/libsync/comm/sync.rs | 12 | ||||
| -rw-r--r-- | src/libsync/deque.rs | 4 | ||||
| -rw-r--r-- | src/libsync/lib.rs | 2 | ||||
| -rw-r--r-- | src/libsync/mpmc_bounded_queue.rs | 2 | ||||
| -rw-r--r-- | src/libsync/mpsc_intrusive.rs | 32 | ||||
| -rw-r--r-- | src/libsync/mpsc_queue.rs | 2 | ||||
| -rw-r--r-- | src/libsync/mutex.rs | 38 | ||||
| -rw-r--r-- | src/libsync/one.rs | 24 | ||||
| -rw-r--r-- | src/libsync/raw.rs | 12 | ||||
| -rw-r--r-- | src/libsync/spsc_queue.rs | 2 |
14 files changed, 166 insertions, 165 deletions
diff --git a/src/libsync/atomics.rs b/src/libsync/atomic.rs index 0be124ad584..101d869451c 100644 --- a/src/libsync/atomics.rs +++ b/src/libsync/atomic.rs @@ -41,7 +41,7 @@ //! //! ``` //! use std::sync::Arc; -//! use std::sync::atomics::{AtomicUint, SeqCst}; +//! use std::sync::atomic::{AtomicUint, SeqCst}; //! use std::task::deschedule; //! //! fn main() { @@ -67,7 +67,7 @@ //! //! ``` //! use std::sync::Arc; -//! use std::sync::atomics::{AtomicOption, SeqCst}; +//! use std::sync::atomic::{AtomicOption, SeqCst}; //! //! fn main() { //! struct BigObject; @@ -91,7 +91,7 @@ //! Keep a global count of live tasks: //! //! ``` -//! use std::sync::atomics::{AtomicUint, SeqCst, INIT_ATOMIC_UINT}; +//! use std::sync::atomic::{AtomicUint, SeqCst, INIT_ATOMIC_UINT}; //! //! static mut GLOBAL_TASK_COUNT: AtomicUint = INIT_ATOMIC_UINT; //! @@ -106,16 +106,18 @@ use core::prelude::*; use alloc::boxed::Box; use core::mem; -pub use core::atomics::{AtomicBool, AtomicInt, AtomicUint, AtomicPtr}; -pub use core::atomics::{Ordering, Relaxed, Release, Acquire, AcqRel, SeqCst}; -pub use core::atomics::{INIT_ATOMIC_BOOL, INIT_ATOMIC_INT, INIT_ATOMIC_UINT}; -pub use core::atomics::fence; +pub use core::atomic::{AtomicBool, AtomicInt, AtomicUint, AtomicPtr}; +pub use core::atomic::{Ordering, Relaxed, Release, Acquire, AcqRel, SeqCst}; +pub use core::atomic::{INIT_ATOMIC_BOOL, INIT_ATOMIC_INT, INIT_ATOMIC_UINT}; +pub use core::atomic::fence; /// An atomic, nullable unique pointer /// /// This can be used as the concurrency primitive for operations that transfer /// owned heap objects across tasks. #[unsafe_no_drop_flag] +#[deprecated = "no longer used; will eventually be replaced by a higher-level\ + concept like MVar"] pub struct AtomicOption<T> { p: AtomicUint, } @@ -227,4 +229,3 @@ mod test { assert!(p.take(SeqCst) == Some(box 2)); } } - diff --git a/src/libsync/comm/oneshot.rs b/src/libsync/comm/oneshot.rs index c9782db5c24..188bea83ac8 100644 --- a/src/libsync/comm/oneshot.rs +++ b/src/libsync/comm/oneshot.rs @@ -39,7 +39,7 @@ use core::mem; use rustrt::local::Local; use rustrt::task::{Task, BlockedTask}; -use atomics; +use atomic; use comm::Receiver; // Various states you can find a port in. @@ -49,7 +49,7 @@ static DISCONNECTED: uint = 2; pub struct Packet<T> { // Internal state of the chan/port pair (stores the blocked task as well) - state: atomics::AtomicUint, + state: atomic::AtomicUint, // One-shot data slot location data: Option<T>, // when used for the second time, a oneshot channel must be upgraded, and @@ -86,7 +86,7 @@ impl<T: Send> Packet<T> { Packet { data: None, upgrade: NothingSent, - state: atomics::AtomicUint::new(EMPTY), + state: atomic::AtomicUint::new(EMPTY), } } @@ -100,7 +100,7 @@ impl<T: Send> Packet<T> { self.data = Some(t); self.upgrade = SendUsed; - match self.state.swap(DATA, atomics::SeqCst) { + match self.state.swap(DATA, atomic::SeqCst) { // Sent the data, no one was waiting EMPTY => Ok(()), @@ -136,11 +136,11 @@ impl<T: Send> Packet<T> { pub fn recv(&mut self) -> Result<T, Failure<T>> { // Attempt to not block the task (it's a little expensive). If it looks // like we're not empty, then immediately go through to `try_recv`. - if self.state.load(atomics::SeqCst) == EMPTY { + if self.state.load(atomic::SeqCst) == EMPTY { let t: Box<Task> = Local::take(); t.deschedule(1, |task| { let n = unsafe { task.cast_to_uint() }; - match self.state.compare_and_swap(EMPTY, n, atomics::SeqCst) { + match self.state.compare_and_swap(EMPTY, n, atomic::SeqCst) { // Nothing on the channel, we legitimately block EMPTY => Ok(()), @@ -160,7 +160,7 @@ impl<T: Send> Packet<T> { } pub fn try_recv(&mut self) -> Result<T, Failure<T>> { - match self.state.load(atomics::SeqCst) { + match self.state.load(atomic::SeqCst) { EMPTY => Err(Empty), // We saw some data on the channel, but the channel can be used @@ -170,7 +170,7 @@ impl<T: Send> Packet<T> { // the state changes under our feet we'd rather just see that state // change. DATA => { - self.state.compare_and_swap(DATA, EMPTY, atomics::SeqCst); + self.state.compare_and_swap(DATA, EMPTY, atomic::SeqCst); match self.data.take() { Some(data) => Ok(data), None => unreachable!(), @@ -207,7 +207,7 @@ impl<T: Send> Packet<T> { }; self.upgrade = GoUp(up); - match self.state.swap(DISCONNECTED, atomics::SeqCst) { + match self.state.swap(DISCONNECTED, atomic::SeqCst) { // If the channel is empty or has data on it, then we're good to go. // Senders will check the data before the upgrade (in case we // plastered over the DATA state). @@ -223,7 +223,7 @@ impl<T: Send> Packet<T> { } pub fn drop_chan(&mut self) { - match self.state.swap(DISCONNECTED, atomics::SeqCst) { + match self.state.swap(DISCONNECTED, atomic::SeqCst) { DATA | DISCONNECTED | EMPTY => {} // If someone's waiting, we gotta wake them up @@ -235,7 +235,7 @@ impl<T: Send> Packet<T> { } pub fn drop_port(&mut self) { - match self.state.swap(DISCONNECTED, atomics::SeqCst) { + match self.state.swap(DISCONNECTED, atomic::SeqCst) { // An empty channel has nothing to do, and a remotely disconnected // channel also has nothing to do b/c we're about to run the drop // glue @@ -258,7 +258,7 @@ impl<T: Send> Packet<T> { // If Ok, the value is whether this port has data, if Err, then the upgraded // port needs to be checked instead of this one. pub fn can_recv(&mut self) -> Result<bool, Receiver<T>> { - match self.state.load(atomics::SeqCst) { + match self.state.load(atomic::SeqCst) { EMPTY => Ok(false), // Welp, we tried DATA => Ok(true), // we have some un-acquired data DISCONNECTED if self.data.is_some() => Ok(true), // we have data @@ -283,7 +283,7 @@ impl<T: Send> Packet<T> { // because there is data, or fail because there is an upgrade pending. pub fn start_selection(&mut self, task: BlockedTask) -> SelectionResult<T> { let n = unsafe { task.cast_to_uint() }; - match self.state.compare_and_swap(EMPTY, n, atomics::SeqCst) { + match self.state.compare_and_swap(EMPTY, n, atomic::SeqCst) { EMPTY => SelSuccess, DATA => SelCanceled(unsafe { BlockedTask::cast_from_uint(n) }), DISCONNECTED if self.data.is_some() => { @@ -317,7 +317,7 @@ impl<T: Send> Packet<T> { // // The return value indicates whether there's data on this port. pub fn abort_selection(&mut self) -> Result<bool, Receiver<T>> { - let state = match self.state.load(atomics::SeqCst) { + let state = match self.state.load(atomic::SeqCst) { // Each of these states means that no further activity will happen // with regard to abortion selection s @ EMPTY | @@ -326,7 +326,7 @@ impl<T: Send> Packet<T> { // If we've got a blocked task, then use an atomic to gain ownership // of it (may fail) - n => self.state.compare_and_swap(n, EMPTY, atomics::SeqCst) + n => self.state.compare_and_swap(n, EMPTY, atomic::SeqCst) }; // Now that we've got ownership of our state, figure out what to do @@ -367,6 +367,6 @@ impl<T: Send> Packet<T> { #[unsafe_destructor] impl<T: Send> Drop for Packet<T> { fn drop(&mut self) { - assert_eq!(self.state.load(atomics::SeqCst), DISCONNECTED); + assert_eq!(self.state.load(atomic::SeqCst), DISCONNECTED); } } diff --git a/src/libsync/comm/shared.rs b/src/libsync/comm/shared.rs index d13b2c32978..979b0ebcf8f 100644 --- a/src/libsync/comm/shared.rs +++ b/src/libsync/comm/shared.rs @@ -28,7 +28,7 @@ use rustrt::mutex::NativeMutex; use rustrt::task::{Task, BlockedTask}; use rustrt::thread::Thread; -use atomics; +use atomic; use mpsc = mpsc_queue; static DISCONNECTED: int = int::MIN; @@ -40,17 +40,17 @@ static MAX_STEALS: int = 1 << 20; pub struct Packet<T> { queue: mpsc::Queue<T>, - cnt: atomics::AtomicInt, // How many items are on this channel + cnt: atomic::AtomicInt, // How many items are on this channel steals: int, // How many times has a port received without blocking? - to_wake: atomics::AtomicUint, // Task to wake up + to_wake: atomic::AtomicUint, // Task to wake up // The number of channels which are currently using this packet. - channels: atomics::AtomicInt, + channels: atomic::AtomicInt, // See the discussion in Port::drop and the channel send methods for what // these are used for - port_dropped: atomics::AtomicBool, - sender_drain: atomics::AtomicInt, + port_dropped: atomic::AtomicBool, + sender_drain: atomic::AtomicInt, // this lock protects various portions of this implementation during // select() @@ -68,12 +68,12 @@ impl<T: Send> Packet<T> { pub fn new() -> Packet<T> { let p = Packet { queue: mpsc::Queue::new(), - cnt: atomics::AtomicInt::new(0), + cnt: atomic::AtomicInt::new(0), steals: 0, - to_wake: atomics::AtomicUint::new(0), - channels: atomics::AtomicInt::new(2), - port_dropped: atomics::AtomicBool::new(false), - sender_drain: atomics::AtomicInt::new(0), + to_wake: atomic::AtomicUint::new(0), + channels: atomic::AtomicInt::new(2), + port_dropped: atomic::AtomicBool::new(false), + sender_drain: atomic::AtomicInt::new(0), select_lock: unsafe { NativeMutex::new() }, }; return p; @@ -96,11 +96,11 @@ impl<T: Send> Packet<T> { pub fn inherit_blocker(&mut self, task: Option<BlockedTask>) { match task { Some(task) => { - assert_eq!(self.cnt.load(atomics::SeqCst), 0); - assert_eq!(self.to_wake.load(atomics::SeqCst), 0); + assert_eq!(self.cnt.load(atomic::SeqCst), 0); + assert_eq!(self.to_wake.load(atomic::SeqCst), 0); self.to_wake.store(unsafe { task.cast_to_uint() }, - atomics::SeqCst); - self.cnt.store(-1, atomics::SeqCst); + atomic::SeqCst); + self.cnt.store(-1, atomic::SeqCst); // This store is a little sketchy. What's happening here is // that we're transferring a blocker from a oneshot or stream @@ -138,7 +138,7 @@ impl<T: Send> Packet<T> { pub fn send(&mut self, t: T) -> Result<(), T> { // See Port::drop for what's going on - if self.port_dropped.load(atomics::SeqCst) { return Err(t) } + if self.port_dropped.load(atomic::SeqCst) { return Err(t) } // Note that the multiple sender case is a little trickier // semantically than the single sender case. The logic for @@ -165,12 +165,12 @@ impl<T: Send> Packet<T> { // preflight check serves as the definitive "this will never be // received". Once we get beyond this check, we have permanently // entered the realm of "this may be received" - if self.cnt.load(atomics::SeqCst) < DISCONNECTED + FUDGE { + if self.cnt.load(atomic::SeqCst) < DISCONNECTED + FUDGE { return Err(t) } self.queue.push(t); - match self.cnt.fetch_add(1, atomics::SeqCst) { + match self.cnt.fetch_add(1, atomic::SeqCst) { -1 => { self.take_to_wake().wake().map(|t| t.reawaken()); } @@ -187,9 +187,9 @@ impl<T: Send> Packet<T> { n if n < DISCONNECTED + FUDGE => { // see the comment in 'try' for a shared channel for why this // window of "not disconnected" is ok. - self.cnt.store(DISCONNECTED, atomics::SeqCst); + self.cnt.store(DISCONNECTED, atomic::SeqCst); - if self.sender_drain.fetch_add(1, atomics::SeqCst) == 0 { + if self.sender_drain.fetch_add(1, atomic::SeqCst) == 0 { loop { // drain the queue, for info on the thread yield see the // discussion in try_recv @@ -202,7 +202,7 @@ impl<T: Send> Packet<T> { } // maybe we're done, if we're not the last ones // here, then we need to go try again. - if self.sender_drain.fetch_sub(1, atomics::SeqCst) == 1 { + if self.sender_drain.fetch_sub(1, atomic::SeqCst) == 1 { break } } @@ -242,15 +242,15 @@ impl<T: Send> Packet<T> { // Essentially the exact same thing as the stream decrement function. fn decrement(&mut self, task: BlockedTask) -> Result<(), BlockedTask> { - assert_eq!(self.to_wake.load(atomics::SeqCst), 0); + assert_eq!(self.to_wake.load(atomic::SeqCst), 0); let n = unsafe { task.cast_to_uint() }; - self.to_wake.store(n, atomics::SeqCst); + self.to_wake.store(n, atomic::SeqCst); let steals = self.steals; self.steals = 0; - match self.cnt.fetch_sub(1 + steals, atomics::SeqCst) { - DISCONNECTED => { self.cnt.store(DISCONNECTED, atomics::SeqCst); } + match self.cnt.fetch_sub(1 + steals, atomic::SeqCst) { + DISCONNECTED => { self.cnt.store(DISCONNECTED, atomic::SeqCst); } // If we factor in our steals and notice that the channel has no // data, we successfully sleep n => { @@ -259,7 +259,7 @@ impl<T: Send> Packet<T> { } } - self.to_wake.store(0, atomics::SeqCst); + self.to_wake.store(0, atomic::SeqCst); Err(unsafe { BlockedTask::cast_from_uint(n) }) } @@ -311,9 +311,9 @@ impl<T: Send> Packet<T> { // might decrement steals. Some(data) => { if self.steals > MAX_STEALS { - match self.cnt.swap(0, atomics::SeqCst) { + match self.cnt.swap(0, atomic::SeqCst) { DISCONNECTED => { - self.cnt.store(DISCONNECTED, atomics::SeqCst); + self.cnt.store(DISCONNECTED, atomic::SeqCst); } n => { let m = cmp::min(n, self.steals); @@ -330,7 +330,7 @@ impl<T: Send> Packet<T> { // See the discussion in the stream implementation for why we try // again. None => { - match self.cnt.load(atomics::SeqCst) { + match self.cnt.load(atomic::SeqCst) { n if n != DISCONNECTED => Err(Empty), _ => { match self.queue.pop() { @@ -348,20 +348,20 @@ impl<T: Send> Packet<T> { // Prepares this shared packet for a channel clone, essentially just bumping // a refcount. pub fn clone_chan(&mut self) { - self.channels.fetch_add(1, atomics::SeqCst); + self.channels.fetch_add(1, atomic::SeqCst); } // Decrement the reference count on a channel. This is called whenever a // Chan is dropped and may end up waking up a receiver. It's the receiver's // responsibility on the other end to figure out that we've disconnected. pub fn drop_chan(&mut self) { - match self.channels.fetch_sub(1, atomics::SeqCst) { + match self.channels.fetch_sub(1, atomic::SeqCst) { 1 => {} n if n > 1 => return, n => fail!("bad number of channels left {}", n), } - match self.cnt.swap(DISCONNECTED, atomics::SeqCst) { + match self.cnt.swap(DISCONNECTED, atomic::SeqCst) { -1 => { self.take_to_wake().wake().map(|t| t.reawaken()); } DISCONNECTED => {} n => { assert!(n >= 0); } @@ -371,11 +371,11 @@ impl<T: Send> Packet<T> { // See the long discussion inside of stream.rs for why the queue is drained, // and why it is done in this fashion. pub fn drop_port(&mut self) { - self.port_dropped.store(true, atomics::SeqCst); + self.port_dropped.store(true, atomic::SeqCst); let mut steals = self.steals; while { let cnt = self.cnt.compare_and_swap( - steals, DISCONNECTED, atomics::SeqCst); + steals, DISCONNECTED, atomic::SeqCst); cnt != DISCONNECTED && cnt != steals } { // See the discussion in 'try_recv' for why we yield @@ -391,8 +391,8 @@ impl<T: Send> Packet<T> { // Consumes ownership of the 'to_wake' field. fn take_to_wake(&mut self) -> BlockedTask { - let task = self.to_wake.load(atomics::SeqCst); - self.to_wake.store(0, atomics::SeqCst); + let task = self.to_wake.load(atomic::SeqCst); + self.to_wake.store(0, atomic::SeqCst); assert!(task != 0); unsafe { BlockedTask::cast_from_uint(task) } } @@ -407,15 +407,15 @@ impl<T: Send> Packet<T> { // This is different than the stream version because there's no need to peek // at the queue, we can just look at the local count. pub fn can_recv(&mut self) -> bool { - let cnt = self.cnt.load(atomics::SeqCst); + let cnt = self.cnt.load(atomic::SeqCst); cnt == DISCONNECTED || cnt - self.steals > 0 } // increment the count on the channel (used for selection) fn bump(&mut self, amt: int) -> int { - match self.cnt.fetch_add(amt, atomics::SeqCst) { + match self.cnt.fetch_add(amt, atomic::SeqCst) { DISCONNECTED => { - self.cnt.store(DISCONNECTED, atomics::SeqCst); + self.cnt.store(DISCONNECTED, atomic::SeqCst); DISCONNECTED } n => n @@ -460,13 +460,13 @@ impl<T: Send> Packet<T> { // the channel count and figure out what we should do to make it // positive. let steals = { - let cnt = self.cnt.load(atomics::SeqCst); + let cnt = self.cnt.load(atomic::SeqCst); if cnt < 0 && cnt != DISCONNECTED {-cnt} else {0} }; let prev = self.bump(steals + 1); if prev == DISCONNECTED { - assert_eq!(self.to_wake.load(atomics::SeqCst), 0); + assert_eq!(self.to_wake.load(atomic::SeqCst), 0); true } else { let cur = prev + steals + 1; @@ -474,7 +474,7 @@ impl<T: Send> Packet<T> { if prev < 0 { self.take_to_wake().trash(); } else { - while self.to_wake.load(atomics::SeqCst) != 0 { + while self.to_wake.load(atomic::SeqCst) != 0 { Thread::yield_now(); } } @@ -495,8 +495,8 @@ impl<T: Send> Drop for Packet<T> { // disconnection, but also a proper fence before the read of // `to_wake`, so this assert cannot be removed with also removing // the `to_wake` assert. - assert_eq!(self.cnt.load(atomics::SeqCst), DISCONNECTED); - assert_eq!(self.to_wake.load(atomics::SeqCst), 0); - assert_eq!(self.channels.load(atomics::SeqCst), 0); + assert_eq!(self.cnt.load(atomic::SeqCst), DISCONNECTED); + assert_eq!(self.to_wake.load(atomic::SeqCst), 0); + assert_eq!(self.channels.load(atomic::SeqCst), 0); } } diff --git a/src/libsync/comm/stream.rs b/src/libsync/comm/stream.rs index f8a28b7600f..11c563301e0 100644 --- a/src/libsync/comm/stream.rs +++ b/src/libsync/comm/stream.rs @@ -26,7 +26,7 @@ use rustrt::local::Local; use rustrt::task::{Task, BlockedTask}; use rustrt::thread::Thread; -use atomics; +use atomic; use comm::Receiver; use spsc = spsc_queue; @@ -39,11 +39,11 @@ static MAX_STEALS: int = 1 << 20; pub struct Packet<T> { queue: spsc::Queue<Message<T>>, // internal queue for all message - cnt: atomics::AtomicInt, // How many items are on this channel + cnt: atomic::AtomicInt, // How many items are on this channel steals: int, // How many times has a port received without blocking? - to_wake: atomics::AtomicUint, // Task to wake up + to_wake: atomic::AtomicUint, // Task to wake up - port_dropped: atomics::AtomicBool, // flag if the channel has been destroyed. + port_dropped: atomic::AtomicBool, // flag if the channel has been destroyed. } pub enum Failure<T> { @@ -76,11 +76,11 @@ impl<T: Send> Packet<T> { Packet { queue: unsafe { spsc::Queue::new(128) }, - cnt: atomics::AtomicInt::new(0), + cnt: atomic::AtomicInt::new(0), steals: 0, - to_wake: atomics::AtomicUint::new(0), + to_wake: atomic::AtomicUint::new(0), - port_dropped: atomics::AtomicBool::new(false), + port_dropped: atomic::AtomicBool::new(false), } } @@ -89,7 +89,7 @@ impl<T: Send> Packet<T> { // If the other port has deterministically gone away, then definitely // must return the data back up the stack. Otherwise, the data is // considered as being sent. - if self.port_dropped.load(atomics::SeqCst) { return Err(t) } + if self.port_dropped.load(atomic::SeqCst) { return Err(t) } match self.do_send(Data(t)) { UpSuccess | UpDisconnected => {}, @@ -100,14 +100,14 @@ impl<T: Send> Packet<T> { pub fn upgrade(&mut self, up: Receiver<T>) -> UpgradeResult { // If the port has gone away, then there's no need to proceed any // further. - if self.port_dropped.load(atomics::SeqCst) { return UpDisconnected } + if self.port_dropped.load(atomic::SeqCst) { return UpDisconnected } self.do_send(GoUp(up)) } fn do_send(&mut self, t: Message<T>) -> UpgradeResult { self.queue.push(t); - match self.cnt.fetch_add(1, atomics::SeqCst) { + match self.cnt.fetch_add(1, atomic::SeqCst) { // As described in the mod's doc comment, -1 == wakeup -1 => UpWoke(self.take_to_wake()), // As as described before, SPSC queues must be >= -2 @@ -121,7 +121,7 @@ impl<T: Send> Packet<T> { // will never remove this data. We can only have at most one item to // drain (the port drains the rest). DISCONNECTED => { - self.cnt.store(DISCONNECTED, atomics::SeqCst); + self.cnt.store(DISCONNECTED, atomic::SeqCst); let first = self.queue.pop(); let second = self.queue.pop(); assert!(second.is_none()); @@ -140,8 +140,8 @@ impl<T: Send> Packet<T> { // Consumes ownership of the 'to_wake' field. fn take_to_wake(&mut self) -> BlockedTask { - let task = self.to_wake.load(atomics::SeqCst); - self.to_wake.store(0, atomics::SeqCst); + let task = self.to_wake.load(atomic::SeqCst); + self.to_wake.store(0, atomic::SeqCst); assert!(task != 0); unsafe { BlockedTask::cast_from_uint(task) } } @@ -150,15 +150,15 @@ impl<T: Send> Packet<T> { // back if it shouldn't sleep. Note that this is the location where we take // steals into account. fn decrement(&mut self, task: BlockedTask) -> Result<(), BlockedTask> { - assert_eq!(self.to_wake.load(atomics::SeqCst), 0); + assert_eq!(self.to_wake.load(atomic::SeqCst), 0); let n = unsafe { task.cast_to_uint() }; - self.to_wake.store(n, atomics::SeqCst); + self.to_wake.store(n, atomic::SeqCst); let steals = self.steals; self.steals = 0; - match self.cnt.fetch_sub(1 + steals, atomics::SeqCst) { - DISCONNECTED => { self.cnt.store(DISCONNECTED, atomics::SeqCst); } + match self.cnt.fetch_sub(1 + steals, atomic::SeqCst) { + DISCONNECTED => { self.cnt.store(DISCONNECTED, atomic::SeqCst); } // If we factor in our steals and notice that the channel has no // data, we successfully sleep n => { @@ -167,7 +167,7 @@ impl<T: Send> Packet<T> { } } - self.to_wake.store(0, atomics::SeqCst); + self.to_wake.store(0, atomic::SeqCst); Err(unsafe { BlockedTask::cast_from_uint(n) }) } @@ -214,9 +214,9 @@ impl<T: Send> Packet<T> { // adding back in whatever we couldn't factor into steals. Some(data) => { if self.steals > MAX_STEALS { - match self.cnt.swap(0, atomics::SeqCst) { + match self.cnt.swap(0, atomic::SeqCst) { DISCONNECTED => { - self.cnt.store(DISCONNECTED, atomics::SeqCst); + self.cnt.store(DISCONNECTED, atomic::SeqCst); } n => { let m = cmp::min(n, self.steals); @@ -234,7 +234,7 @@ impl<T: Send> Packet<T> { } None => { - match self.cnt.load(atomics::SeqCst) { + match self.cnt.load(atomic::SeqCst) { n if n != DISCONNECTED => Err(Empty), // This is a little bit of a tricky case. We failed to pop @@ -263,7 +263,7 @@ impl<T: Send> Packet<T> { pub fn drop_chan(&mut self) { // Dropping a channel is pretty simple, we just flag it as disconnected // and then wakeup a blocker if there is one. - match self.cnt.swap(DISCONNECTED, atomics::SeqCst) { + match self.cnt.swap(DISCONNECTED, atomic::SeqCst) { -1 => { self.take_to_wake().wake().map(|t| t.reawaken()); } DISCONNECTED => {} n => { assert!(n >= 0); } @@ -290,7 +290,7 @@ impl<T: Send> Packet<T> { // sends are gated on this flag, so we're immediately guaranteed that // there are a bounded number of active sends that we'll have to deal // with. - self.port_dropped.store(true, atomics::SeqCst); + self.port_dropped.store(true, atomic::SeqCst); // Now that we're guaranteed to deal with a bounded number of senders, // we need to drain the queue. This draining process happens atomically @@ -303,7 +303,7 @@ impl<T: Send> Packet<T> { let mut steals = self.steals; while { let cnt = self.cnt.compare_and_swap( - steals, DISCONNECTED, atomics::SeqCst); + steals, DISCONNECTED, atomic::SeqCst); cnt != DISCONNECTED && cnt != steals } { loop { @@ -348,9 +348,9 @@ impl<T: Send> Packet<T> { // increment the count on the channel (used for selection) fn bump(&mut self, amt: int) -> int { - match self.cnt.fetch_add(amt, atomics::SeqCst) { + match self.cnt.fetch_add(amt, atomic::SeqCst) { DISCONNECTED => { - self.cnt.store(DISCONNECTED, atomics::SeqCst); + self.cnt.store(DISCONNECTED, atomic::SeqCst); DISCONNECTED } n => n @@ -400,7 +400,7 @@ impl<T: Send> Packet<T> { // of time until the data is actually sent. if was_upgrade { assert_eq!(self.steals, 0); - assert_eq!(self.to_wake.load(atomics::SeqCst), 0); + assert_eq!(self.to_wake.load(atomic::SeqCst), 0); return Ok(true) } @@ -413,7 +413,7 @@ impl<T: Send> Packet<T> { // If we were previously disconnected, then we know for sure that there // is no task in to_wake, so just keep going let has_data = if prev == DISCONNECTED { - assert_eq!(self.to_wake.load(atomics::SeqCst), 0); + assert_eq!(self.to_wake.load(atomic::SeqCst), 0); true // there is data, that data is that we're disconnected } else { let cur = prev + steals + 1; @@ -436,7 +436,7 @@ impl<T: Send> Packet<T> { if prev < 0 { self.take_to_wake().trash(); } else { - while self.to_wake.load(atomics::SeqCst) != 0 { + while self.to_wake.load(atomic::SeqCst) != 0 { Thread::yield_now(); } } @@ -475,7 +475,7 @@ impl<T: Send> Drop for Packet<T> { // disconnection, but also a proper fence before the read of // `to_wake`, so this assert cannot be removed with also removing // the `to_wake` assert. - assert_eq!(self.cnt.load(atomics::SeqCst), DISCONNECTED); - assert_eq!(self.to_wake.load(atomics::SeqCst), 0); + assert_eq!(self.cnt.load(atomic::SeqCst), DISCONNECTED); + assert_eq!(self.to_wake.load(atomic::SeqCst), 0); } } diff --git a/src/libsync/comm/sync.rs b/src/libsync/comm/sync.rs index e872952d9ee..aef02f654c1 100644 --- a/src/libsync/comm/sync.rs +++ b/src/libsync/comm/sync.rs @@ -44,12 +44,12 @@ use rustrt::local::Local; use rustrt::mutex::{NativeMutex, LockGuard}; use rustrt::task::{Task, BlockedTask}; -use atomics; +use atomic; pub struct Packet<T> { /// Only field outside of the mutex. Just done for kicks, but mainly because /// the other shared channel already had the code implemented - channels: atomics::AtomicUint, + channels: atomic::AtomicUint, /// The state field is protected by this mutex lock: NativeMutex, @@ -131,7 +131,7 @@ fn wakeup(task: BlockedTask, guard: LockGuard) { impl<T: Send> Packet<T> { pub fn new(cap: uint) -> Packet<T> { Packet { - channels: atomics::AtomicUint::new(1), + channels: atomic::AtomicUint::new(1), lock: unsafe { NativeMutex::new() }, state: UnsafeCell::new(State { disconnected: false, @@ -303,12 +303,12 @@ impl<T: Send> Packet<T> { // Prepares this shared packet for a channel clone, essentially just bumping // a refcount. pub fn clone_chan(&self) { - self.channels.fetch_add(1, atomics::SeqCst); + self.channels.fetch_add(1, atomic::SeqCst); } pub fn drop_chan(&self) { // Only flag the channel as disconnected if we're the last channel - match self.channels.fetch_sub(1, atomics::SeqCst) { + match self.channels.fetch_sub(1, atomic::SeqCst) { 1 => {} _ => return } @@ -411,7 +411,7 @@ impl<T: Send> Packet<T> { #[unsafe_destructor] impl<T: Send> Drop for Packet<T> { fn drop(&mut self) { - assert_eq!(self.channels.load(atomics::SeqCst), 0); + assert_eq!(self.channels.load(atomic::SeqCst), 0); let (_g, state) = self.lock(); assert!(state.queue.dequeue().is_none()); assert!(state.canceled.is_none()); diff --git a/src/libsync/deque.rs b/src/libsync/deque.rs index c541cc02774..d5a05e7a681 100644 --- a/src/libsync/deque.rs +++ b/src/libsync/deque.rs @@ -61,7 +61,7 @@ use core::mem::{forget, min_align_of, size_of, transmute}; use core::ptr; use rustrt::exclusive::Exclusive; -use atomics::{AtomicInt, AtomicPtr, SeqCst}; +use atomic::{AtomicInt, AtomicPtr, SeqCst}; // Once the queue is less than 1/K full, then it will be downsized. Note that // the deque requires that this number be less than 2. @@ -414,7 +414,7 @@ mod tests { use std::rt::thread::Thread; use std::rand; use std::rand::Rng; - use atomics::{AtomicBool, INIT_ATOMIC_BOOL, SeqCst, + use atomic::{AtomicBool, INIT_ATOMIC_BOOL, SeqCst, AtomicUint, INIT_ATOMIC_UINT}; use std::vec; diff --git a/src/libsync/lib.rs b/src/libsync/lib.rs index f6a1684b669..de98f79093e 100644 --- a/src/libsync/lib.rs +++ b/src/libsync/lib.rs @@ -49,7 +49,7 @@ pub use raw::{Semaphore, SemaphoreGuard}; // Core building blocks for all primitives in this crate -pub mod atomics; +pub mod atomic; // Concurrent data structures diff --git a/src/libsync/mpmc_bounded_queue.rs b/src/libsync/mpmc_bounded_queue.rs index d54186dc221..949ef3bc34c 100644 --- a/src/libsync/mpmc_bounded_queue.rs +++ b/src/libsync/mpmc_bounded_queue.rs @@ -37,7 +37,7 @@ use collections::Vec; use core::num::next_power_of_two; use core::cell::UnsafeCell; -use atomics::{AtomicUint,Relaxed,Release,Acquire}; +use atomic::{AtomicUint,Relaxed,Release,Acquire}; struct Node<T> { sequence: AtomicUint, diff --git a/src/libsync/mpsc_intrusive.rs b/src/libsync/mpsc_intrusive.rs index 11f124293b1..1f7841de7c1 100644 --- a/src/libsync/mpsc_intrusive.rs +++ b/src/libsync/mpsc_intrusive.rs @@ -37,7 +37,7 @@ use core::prelude::*; -use core::atomics; +use core::atomic; use core::mem; use core::cell::UnsafeCell; @@ -45,16 +45,16 @@ use core::cell::UnsafeCell; // initialization. pub struct Node<T> { - pub next: atomics::AtomicUint, + pub next: atomic::AtomicUint, pub data: T, } pub struct DummyNode { - pub next: atomics::AtomicUint, + pub next: atomic::AtomicUint, } pub struct Queue<T> { - pub head: atomics::AtomicUint, + pub head: atomic::AtomicUint, pub tail: UnsafeCell<*mut Node<T>>, pub stub: DummyNode, } @@ -62,26 +62,26 @@ pub struct Queue<T> { impl<T: Send> Queue<T> { pub fn new() -> Queue<T> { Queue { - head: atomics::AtomicUint::new(0), + head: atomic::AtomicUint::new(0), tail: UnsafeCell::new(0 as *mut Node<T>), stub: DummyNode { - next: atomics::AtomicUint::new(0), + next: atomic::AtomicUint::new(0), }, } } pub unsafe fn push(&self, node: *mut Node<T>) { - (*node).next.store(0, atomics::Release); - let prev = self.head.swap(node as uint, atomics::AcqRel); + (*node).next.store(0, atomic::Release); + let prev = self.head.swap(node as uint, atomic::AcqRel); // Note that this code is slightly modified to allow static // initialization of these queues with rust's flavor of static // initialization. if prev == 0 { - self.stub.next.store(node as uint, atomics::Release); + self.stub.next.store(node as uint, atomic::Release); } else { let prev = prev as *mut Node<T>; - (*prev).next.store(node as uint, atomics::Release); + (*prev).next.store(node as uint, atomic::Release); } } @@ -103,26 +103,26 @@ impl<T: Send> Queue<T> { let mut tail = if !tail.is_null() {tail} else { mem::transmute(&self.stub) }; - let mut next = (*tail).next(atomics::Relaxed); + let mut next = (*tail).next(atomic::Relaxed); if tail as uint == &self.stub as *const DummyNode as uint { if next.is_null() { return None; } *self.tail.get() = next; tail = next; - next = (*next).next(atomics::Relaxed); + next = (*next).next(atomic::Relaxed); } if !next.is_null() { *self.tail.get() = next; return Some(tail); } - let head = self.head.load(atomics::Acquire) as *mut Node<T>; + let head = self.head.load(atomic::Acquire) as *mut Node<T>; if tail != head { return None; } let stub = mem::transmute(&self.stub); self.push(stub); - next = (*tail).next(atomics::Relaxed); + next = (*tail).next(atomic::Relaxed); if !next.is_null() { *self.tail.get() = next; return Some(tail); @@ -135,10 +135,10 @@ impl<T: Send> Node<T> { pub fn new(t: T) -> Node<T> { Node { data: t, - next: atomics::AtomicUint::new(0), + next: atomic::AtomicUint::new(0), } } - pub unsafe fn next(&self, ord: atomics::Ordering) -> *mut Node<T> { + pub unsafe fn next(&self, ord: atomic::Ordering) -> *mut Node<T> { mem::transmute::<uint, *mut Node<T>>(self.next.load(ord)) } } diff --git a/src/libsync/mpsc_queue.rs b/src/libsync/mpsc_queue.rs index 4f5dd07a6e5..012574808f3 100644 --- a/src/libsync/mpsc_queue.rs +++ b/src/libsync/mpsc_queue.rs @@ -46,7 +46,7 @@ use alloc::boxed::Box; use core::mem; use core::cell::UnsafeCell; -use atomics::{AtomicPtr, Release, Acquire, AcqRel, Relaxed}; +use atomic::{AtomicPtr, Release, Acquire, AcqRel, Relaxed}; /// A result of the `pop` function. pub enum PopResult<T> { diff --git a/src/libsync/mutex.rs b/src/libsync/mutex.rs index 1aa84e8f8d1..61d895dd406 100644 --- a/src/libsync/mutex.rs +++ b/src/libsync/mutex.rs @@ -60,7 +60,7 @@ use core::prelude::*; use alloc::boxed::Box; -use core::atomics; +use core::atomic; use core::mem; use core::cell::UnsafeCell; use rustrt::local::Local; @@ -137,7 +137,7 @@ enum Flavor { /// ``` pub struct StaticMutex { /// Current set of flags on this mutex - state: atomics::AtomicUint, + state: atomic::AtomicUint, /// an OS mutex used by native threads lock: mutex::StaticNativeMutex, @@ -151,7 +151,7 @@ pub struct StaticMutex { /// A concurrent mpsc queue used by green threads, along with a count used /// to figure out when to dequeue and enqueue. q: q::Queue<uint>, - green_cnt: atomics::AtomicUint, + green_cnt: atomic::AtomicUint, } /// An RAII implementation of a "scoped lock" of a mutex. When this structure is @@ -165,16 +165,16 @@ pub struct Guard<'a> { /// other mutex constants. pub static MUTEX_INIT: StaticMutex = StaticMutex { lock: mutex::NATIVE_MUTEX_INIT, - state: atomics::INIT_ATOMIC_UINT, + state: atomic::INIT_ATOMIC_UINT, flavor: UnsafeCell { value: Unlocked }, green_blocker: UnsafeCell { value: 0 }, native_blocker: UnsafeCell { value: 0 }, - green_cnt: atomics::INIT_ATOMIC_UINT, + green_cnt: atomic::INIT_ATOMIC_UINT, q: q::Queue { - head: atomics::INIT_ATOMIC_UINT, + head: atomic::INIT_ATOMIC_UINT, tail: UnsafeCell { value: 0 as *mut q::Node<uint> }, stub: q::DummyNode { - next: atomics::INIT_ATOMIC_UINT, + next: atomic::INIT_ATOMIC_UINT, } } }; @@ -185,7 +185,7 @@ impl StaticMutex { // Attempt to steal the mutex from an unlocked state. // // FIXME: this can mess up the fairness of the mutex, seems bad - match self.state.compare_and_swap(0, LOCKED, atomics::SeqCst) { + match self.state.compare_and_swap(0, LOCKED, atomic::SeqCst) { 0 => { // After acquiring the mutex, we can safely access the inner // fields. @@ -230,7 +230,7 @@ impl StaticMutex { // allow threads coming out of the native_lock function to try their // best to not hit a cvar in deschedule. let mut old = match self.state.compare_and_swap(0, LOCKED, - atomics::SeqCst) { + atomic::SeqCst) { 0 => { let flavor = if can_block { NativeAcquisition @@ -272,7 +272,7 @@ impl StaticMutex { if old & LOCKED != 0 { old = match self.state.compare_and_swap(old, old | native_bit, - atomics::SeqCst) { + atomic::SeqCst) { n if n == old => return Ok(()), n => n }; @@ -280,7 +280,7 @@ impl StaticMutex { assert_eq!(old, 0); old = match self.state.compare_and_swap(old, old | LOCKED, - atomics::SeqCst) { + atomic::SeqCst) { n if n == old => { // After acquiring the lock, we have access to the // flavor field, and we've regained access to our @@ -330,7 +330,7 @@ impl StaticMutex { // // FIXME: There isn't a cancellation currently of an enqueue, forcing // the unlocker to spin for a bit. - if self.green_cnt.fetch_add(1, atomics::SeqCst) == 0 { + if self.green_cnt.fetch_add(1, atomic::SeqCst) == 0 { Local::put(t); return } @@ -348,7 +348,7 @@ impl StaticMutex { fn green_unlock(&self) { // If we're the only green thread, then no need to check the queue, // otherwise the fixme above forces us to spin for a bit. - if self.green_cnt.fetch_sub(1, atomics::SeqCst) == 1 { return } + if self.green_cnt.fetch_sub(1, atomic::SeqCst) == 1 { return } let node; loop { match unsafe { self.q.pop() } { @@ -380,7 +380,7 @@ impl StaticMutex { // of the outer mutex. let flavor = unsafe { mem::replace(&mut *self.flavor.get(), Unlocked) }; - let mut state = self.state.load(atomics::SeqCst); + let mut state = self.state.load(atomic::SeqCst); let mut unlocked = false; let task; loop { @@ -412,7 +412,7 @@ impl StaticMutex { } unlocked = true; } - match self.state.compare_and_swap(LOCKED, 0, atomics::SeqCst) { + match self.state.compare_and_swap(LOCKED, 0, atomic::SeqCst) { LOCKED => return, n => { state = n; } } @@ -435,7 +435,7 @@ impl StaticMutex { loop { assert!(state & bit != 0); let new = state ^ bit; - match self.state.compare_and_swap(state, new, atomics::SeqCst) { + match self.state.compare_and_swap(state, new, atomic::SeqCst) { n if n == state => break, n => { state = n; } } @@ -462,11 +462,11 @@ impl Mutex { pub fn new() -> Mutex { Mutex { lock: box StaticMutex { - state: atomics::AtomicUint::new(0), + state: atomic::AtomicUint::new(0), flavor: UnsafeCell::new(Unlocked), green_blocker: UnsafeCell::new(0), native_blocker: UnsafeCell::new(0), - green_cnt: atomics::AtomicUint::new(0), + green_cnt: atomic::AtomicUint::new(0), q: q::Queue::new(), lock: unsafe { mutex::StaticNativeMutex::new() }, } @@ -498,7 +498,7 @@ impl<'a> Guard<'a> { if cfg!(debug) { // once we've acquired a lock, it's ok to access the flavor assert!(unsafe { *lock.flavor.get() != Unlocked }); - assert!(lock.state.load(atomics::SeqCst) & LOCKED != 0); + assert!(lock.state.load(atomic::SeqCst) & LOCKED != 0); } Guard { lock: lock } } diff --git a/src/libsync/one.rs b/src/libsync/one.rs index 6fad2c8aa40..4594345d2a3 100644 --- a/src/libsync/one.rs +++ b/src/libsync/one.rs @@ -16,7 +16,7 @@ use core::prelude::*; use core::int; -use core::atomics; +use core::atomic; use mutex::{StaticMutex, MUTEX_INIT}; @@ -40,15 +40,15 @@ use mutex::{StaticMutex, MUTEX_INIT}; /// ``` pub struct Once { mutex: StaticMutex, - cnt: atomics::AtomicInt, - lock_cnt: atomics::AtomicInt, + cnt: atomic::AtomicInt, + lock_cnt: atomic::AtomicInt, } /// Initialization value for static `Once` values. pub static ONCE_INIT: Once = Once { mutex: MUTEX_INIT, - cnt: atomics::INIT_ATOMIC_INT, - lock_cnt: atomics::INIT_ATOMIC_INT, + cnt: atomic::INIT_ATOMIC_INT, + lock_cnt: atomic::INIT_ATOMIC_INT, }; impl Once { @@ -63,7 +63,7 @@ impl Once { /// has run and completed (it may not be the closure specified). pub fn doit(&self, f: ||) { // Optimize common path: load is much cheaper than fetch_add. - if self.cnt.load(atomics::SeqCst) < 0 { + if self.cnt.load(atomic::SeqCst) < 0 { return } @@ -94,11 +94,11 @@ impl Once { // calling `doit` will return immediately before the initialization has // completed. - let prev = self.cnt.fetch_add(1, atomics::SeqCst); + let prev = self.cnt.fetch_add(1, atomic::SeqCst); if prev < 0 { // Make sure we never overflow, we'll never have int::MIN // simultaneous calls to `doit` to make this value go back to 0 - self.cnt.store(int::MIN, atomics::SeqCst); + self.cnt.store(int::MIN, atomic::SeqCst); return } @@ -106,15 +106,15 @@ impl Once { // otherwise we run the job and record how many people will try to grab // this lock let guard = self.mutex.lock(); - if self.cnt.load(atomics::SeqCst) > 0 { + if self.cnt.load(atomic::SeqCst) > 0 { f(); - let prev = self.cnt.swap(int::MIN, atomics::SeqCst); - self.lock_cnt.store(prev, atomics::SeqCst); + let prev = self.cnt.swap(int::MIN, atomic::SeqCst); + self.lock_cnt.store(prev, atomic::SeqCst); } drop(guard); // Last one out cleans up after everyone else, no leaks! - if self.lock_cnt.fetch_add(-1, atomics::SeqCst) == 1 { + if self.lock_cnt.fetch_add(-1, atomic::SeqCst) == 1 { unsafe { self.mutex.destroy() } } } diff --git a/src/libsync/raw.rs b/src/libsync/raw.rs index e7a2d3e0639..49f60fe6f00 100644 --- a/src/libsync/raw.rs +++ b/src/libsync/raw.rs @@ -17,7 +17,7 @@ use core::prelude::*; -use core::atomics; +use core::atomic; use core::finally::Finally; use core::kinds::marker; use core::mem; @@ -458,7 +458,7 @@ pub struct RWLock { // // FIXME(#6598): The atomics module has no relaxed ordering flag, so I use // acquire/release orderings superfluously. Change these someday. - read_count: atomics::AtomicUint, + read_count: atomic::AtomicUint, } /// An RAII helper which is created by acquiring a read lock on an RWLock. When @@ -490,7 +490,7 @@ impl RWLock { RWLock { order_lock: Semaphore::new(1), access_lock: Sem::new_and_signal(1, num_condvars), - read_count: atomics::AtomicUint::new(0), + read_count: atomic::AtomicUint::new(0), } } @@ -499,7 +499,7 @@ impl RWLock { /// this one. pub fn read<'a>(&'a self) -> RWLockReadGuard<'a> { let _guard = self.order_lock.access(); - let old_count = self.read_count.fetch_add(1, atomics::Acquire); + let old_count = self.read_count.fetch_add(1, atomic::Acquire); if old_count == 0 { self.access_lock.acquire(); } @@ -575,7 +575,7 @@ impl<'a> RWLockWriteGuard<'a> { // things from now on unsafe { mem::forget(self) } - let old_count = lock.read_count.fetch_add(1, atomics::Release); + let old_count = lock.read_count.fetch_add(1, atomic::Release); // If another reader was already blocking, we need to hand-off // the "reader cloud" access lock to them. if old_count != 0 { @@ -600,7 +600,7 @@ impl<'a> Drop for RWLockWriteGuard<'a> { #[unsafe_destructor] impl<'a> Drop for RWLockReadGuard<'a> { fn drop(&mut self) { - let old_count = self.lock.read_count.fetch_sub(1, atomics::Release); + let old_count = self.lock.read_count.fetch_sub(1, atomic::Release); assert!(old_count > 0); if old_count == 1 { // Note: this release used to be outside of a locked access diff --git a/src/libsync/spsc_queue.rs b/src/libsync/spsc_queue.rs index d8cd44f9935..578e518cb8f 100644 --- a/src/libsync/spsc_queue.rs +++ b/src/libsync/spsc_queue.rs @@ -42,7 +42,7 @@ use core::mem; use core::cell::UnsafeCell; use alloc::arc::Arc; -use atomics::{AtomicPtr, Relaxed, AtomicUint, Acquire, Release}; +use atomic::{AtomicPtr, Relaxed, AtomicUint, Acquire, Release}; // Node within the linked list queue of messages to send struct Node<T> { |
