about summary refs log tree commit diff
path: root/src
diff options
context:
space:
mode:
authorAaron Turon <aturon@mozilla.com>2014-08-04 15:42:36 -0700
committerAaron Turon <aturon@mozilla.com>2014-08-04 16:03:21 -0700
commit68bde0a07396efb415d61047c6b2a8183f47ef30 (patch)
tree2a0d63e3153abbe4f62f15d06ee72c94c7772b2d /src
parent9de20198aedb3c3419ee503755e04bcc198d3a94 (diff)
downloadrust-68bde0a07396efb415d61047c6b2a8183f47ef30.tar.gz
rust-68bde0a07396efb415d61047c6b2a8183f47ef30.zip
stabilize atomics (now atomic)
This commit stabilizes the `std::sync::atomics` module, renaming it to
`std::sync::atomic` to match library precedent elsewhere, and tightening
up behavior around incorrect memory ordering annotations.

The vast majority of the module is now `stable`. However, the
`AtomicOption` type has been deprecated, since it is essentially unused
and is not truly a primitive atomic type. It will eventually be replaced
by a higher-level abstraction like MVars.

Due to deprecations, this is a:

[breaking-change]
Diffstat (limited to 'src')
-rw-r--r--src/liballoc/arc.rs56
-rw-r--r--src/libcore/atomic.rs (renamed from src/libcore/atomics.rs)101
-rw-r--r--src/libcore/lib.rs2
-rw-r--r--src/libcoretest/atomic.rs (renamed from src/libcoretest/atomics.rs)2
-rw-r--r--src/libcoretest/lib.rs2
-rw-r--r--src/libgreen/basic.rs18
-rw-r--r--src/libgreen/lib.rs2
-rw-r--r--src/libgreen/stack.rs8
-rw-r--r--src/libnative/io/pipe_win32.rs18
-rw-r--r--src/libnative/io/timer_unix.rs6
-rw-r--r--src/librustrt/at_exit_imp.rs18
-rw-r--r--src/librustrt/bookkeeping.rs10
-rw-r--r--src/librustrt/mutex.rs30
-rw-r--r--src/librustrt/task.rs2
-rw-r--r--src/librustrt/unwind.rs32
-rw-r--r--src/libstd/io/tempfile.rs6
-rw-r--r--src/libstd/io/test.rs2
-rw-r--r--src/libstd/os.rs2
-rw-r--r--src/libstd/rt/backtrace.rs8
-rw-r--r--src/libstd/rt/util.rs8
-rw-r--r--src/libstd/sync/mod.rs8
-rw-r--r--src/libsync/atomic.rs (renamed from src/libsync/atomics.rs)17
-rw-r--r--src/libsync/comm/oneshot.rs32
-rw-r--r--src/libsync/comm/shared.rs90
-rw-r--r--src/libsync/comm/stream.rs62
-rw-r--r--src/libsync/comm/sync.rs12
-rw-r--r--src/libsync/deque.rs4
-rw-r--r--src/libsync/lib.rs2
-rw-r--r--src/libsync/mpmc_bounded_queue.rs2
-rw-r--r--src/libsync/mpsc_intrusive.rs32
-rw-r--r--src/libsync/mpsc_queue.rs2
-rw-r--r--src/libsync/mutex.rs38
-rw-r--r--src/libsync/one.rs24
-rw-r--r--src/libsync/raw.rs12
-rw-r--r--src/libsync/spsc_queue.rs2
-rw-r--r--src/test/compile-fail/std-uncopyable-atomics.rs2
36 files changed, 366 insertions, 308 deletions
diff --git a/src/liballoc/arc.rs b/src/liballoc/arc.rs
index bf477781aab..3b0ed528060 100644
--- a/src/liballoc/arc.rs
+++ b/src/liballoc/arc.rs
@@ -13,7 +13,7 @@
 //! Concurrency-enabled mechanisms for sharing mutable and/or immutable state
 //! between tasks.
 
-use core::atomics;
+use core::atomic;
 use core::clone::Clone;
 use core::kinds::{Share, Send};
 use core::mem::{min_align_of, size_of, drop};
@@ -71,8 +71,8 @@ pub struct Weak<T> {
 }
 
 struct ArcInner<T> {
-    strong: atomics::AtomicUint,
-    weak: atomics::AtomicUint,
+    strong: atomic::AtomicUint,
+    weak: atomic::AtomicUint,
     data: T,
 }
 
@@ -84,8 +84,8 @@ impl<T: Share + Send> Arc<T> {
         // Start the weak pointer count as 1 which is the weak pointer that's
         // held by all the strong pointers (kinda), see std/rc.rs for more info
         let x = box ArcInner {
-            strong: atomics::AtomicUint::new(1),
-            weak: atomics::AtomicUint::new(1),
+            strong: atomic::AtomicUint::new(1),
+            weak: atomic::AtomicUint::new(1),
             data: data,
         };
         Arc { _ptr: unsafe { mem::transmute(x) } }
@@ -109,7 +109,7 @@ impl<T: Share + Send> Arc<T> {
     #[experimental = "Weak pointers may not belong in this module."]
     pub fn downgrade(&self) -> Weak<T> {
         // See the clone() impl for why this is relaxed
-        self.inner().weak.fetch_add(1, atomics::Relaxed);
+        self.inner().weak.fetch_add(1, atomic::Relaxed);
         Weak { _ptr: self._ptr }
     }
 }
@@ -134,7 +134,7 @@ impl<T: Share + Send> Clone for Arc<T> {
         // another must already provide any required synchronization.
         //
         // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
-        self.inner().strong.fetch_add(1, atomics::Relaxed);
+        self.inner().strong.fetch_add(1, atomic::Relaxed);
         Arc { _ptr: self._ptr }
     }
 }
@@ -159,8 +159,8 @@ impl<T: Send + Share + Clone> Arc<T> {
         // Note that we hold a strong reference, which also counts as
         // a weak reference, so we only clone if there is an
         // additional reference of either kind.
-        if self.inner().strong.load(atomics::SeqCst) != 1 ||
-           self.inner().weak.load(atomics::SeqCst) != 1 {
+        if self.inner().strong.load(atomic::SeqCst) != 1 ||
+           self.inner().weak.load(atomic::SeqCst) != 1 {
             *self = Arc::new(self.deref().clone())
         }
         // This unsafety is ok because we're guaranteed that the pointer
@@ -185,7 +185,7 @@ impl<T: Share + Send> Drop for Arc<T> {
         // Because `fetch_sub` is already atomic, we do not need to synchronize
         // with other threads unless we are going to delete the object. This
         // same logic applies to the below `fetch_sub` to the `weak` count.
-        if self.inner().strong.fetch_sub(1, atomics::Release) != 1 { return }
+        if self.inner().strong.fetch_sub(1, atomic::Release) != 1 { return }
 
         // This fence is needed to prevent reordering of use of the data and
         // deletion of the data. Because it is marked `Release`, the
@@ -204,14 +204,14 @@ impl<T: Share + Send> Drop for Arc<T> {
         // and an "acquire" operation before deleting the object.
         //
         // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
-        atomics::fence(atomics::Acquire);
+        atomic::fence(atomic::Acquire);
 
         // Destroy the data at this time, even though we may not free the box
         // allocation itself (there may still be weak pointers lying around).
         unsafe { drop(ptr::read(&self.inner().data)); }
 
-        if self.inner().weak.fetch_sub(1, atomics::Release) == 1 {
-            atomics::fence(atomics::Acquire);
+        if self.inner().weak.fetch_sub(1, atomic::Release) == 1 {
+            atomic::fence(atomic::Acquire);
             unsafe { deallocate(self._ptr as *mut u8, size_of::<ArcInner<T>>(),
                                 min_align_of::<ArcInner<T>>()) }
         }
@@ -230,9 +230,9 @@ impl<T: Share + Send> Weak<T> {
         // fetch_add because once the count hits 0 is must never be above 0.
         let inner = self.inner();
         loop {
-            let n = inner.strong.load(atomics::SeqCst);
+            let n = inner.strong.load(atomic::SeqCst);
             if n == 0 { return None }
-            let old = inner.strong.compare_and_swap(n, n + 1, atomics::SeqCst);
+            let old = inner.strong.compare_and_swap(n, n + 1, atomic::SeqCst);
             if old == n { return Some(Arc { _ptr: self._ptr }) }
         }
     }
@@ -249,7 +249,7 @@ impl<T: Share + Send> Clone for Weak<T> {
     #[inline]
     fn clone(&self) -> Weak<T> {
         // See comments in Arc::clone() for why this is relaxed
-        self.inner().weak.fetch_add(1, atomics::Relaxed);
+        self.inner().weak.fetch_add(1, atomic::Relaxed);
         Weak { _ptr: self._ptr }
     }
 }
@@ -264,8 +264,8 @@ impl<T: Share + Send> Drop for Weak<T> {
         // If we find out that we were the last weak pointer, then its time to
         // deallocate the data entirely. See the discussion in Arc::drop() about
         // the memory orderings
-        if self.inner().weak.fetch_sub(1, atomics::Release) == 1 {
-            atomics::fence(atomics::Acquire);
+        if self.inner().weak.fetch_sub(1, atomic::Release) == 1 {
+            atomic::fence(atomic::Acquire);
             unsafe { deallocate(self._ptr as *mut u8, size_of::<ArcInner<T>>(),
                                 min_align_of::<ArcInner<T>>()) }
         }
@@ -281,13 +281,13 @@ mod tests {
     use std::mem::drop;
     use std::ops::Drop;
     use std::option::{Option, Some, None};
-    use std::sync::atomics;
+    use std::sync::atomic;
     use std::task;
     use std::vec::Vec;
     use super::{Arc, Weak};
     use std::sync::Mutex;
 
-    struct Canary(*mut atomics::AtomicUint);
+    struct Canary(*mut atomic::AtomicUint);
 
     impl Drop for Canary
     {
@@ -295,7 +295,7 @@ mod tests {
             unsafe {
                 match *self {
                     Canary(c) => {
-                        (*c).fetch_add(1, atomics::SeqCst);
+                        (*c).fetch_add(1, atomic::SeqCst);
                     }
                 }
             }
@@ -413,20 +413,20 @@ mod tests {
 
     #[test]
     fn drop_arc() {
-        let mut canary = atomics::AtomicUint::new(0);
-        let x = Arc::new(Canary(&mut canary as *mut atomics::AtomicUint));
+        let mut canary = atomic::AtomicUint::new(0);
+        let x = Arc::new(Canary(&mut canary as *mut atomic::AtomicUint));
         drop(x);
-        assert!(canary.load(atomics::Acquire) == 1);
+        assert!(canary.load(atomic::Acquire) == 1);
     }
 
     #[test]
     fn drop_arc_weak() {
-        let mut canary = atomics::AtomicUint::new(0);
-        let arc = Arc::new(Canary(&mut canary as *mut atomics::AtomicUint));
+        let mut canary = atomic::AtomicUint::new(0);
+        let arc = Arc::new(Canary(&mut canary as *mut atomic::AtomicUint));
         let arc_weak = arc.downgrade();
-        assert!(canary.load(atomics::Acquire) == 0);
+        assert!(canary.load(atomic::Acquire) == 0);
         drop(arc);
-        assert!(canary.load(atomics::Acquire) == 1);
+        assert!(canary.load(atomic::Acquire) == 1);
         drop(arc_weak);
     }
 }
diff --git a/src/libcore/atomics.rs b/src/libcore/atomic.rs
index 466a1738e82..e248b934b69 100644
--- a/src/libcore/atomics.rs
+++ b/src/libcore/atomic.rs
@@ -10,29 +10,35 @@
 
 //! Core atomic primitives
 
+#![stable]
+
 use intrinsics;
 use std::kinds::marker;
 use cell::UnsafeCell;
 
 /// An atomic boolean type.
+#[stable]
 pub struct AtomicBool {
     v: UnsafeCell<uint>,
     nocopy: marker::NoCopy
 }
 
 /// A signed atomic integer type, supporting basic atomic arithmetic operations
+#[stable]
 pub struct AtomicInt {
     v: UnsafeCell<int>,
     nocopy: marker::NoCopy
 }
 
 /// An unsigned atomic integer type, supporting basic atomic arithmetic operations
+#[stable]
 pub struct AtomicUint {
     v: UnsafeCell<uint>,
     nocopy: marker::NoCopy
 }
 
 /// An unsafe atomic pointer. Only supports basic atomic operations
+#[stable]
 pub struct AtomicPtr<T> {
     p: UnsafeCell<uint>,
     nocopy: marker::NoCopy
@@ -49,6 +55,7 @@ pub struct AtomicPtr<T> {
 /// Rust's memory orderings are the same as in C++[1].
 ///
 /// 1: http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync
+#[stable]
 pub enum Ordering {
     /// No ordering constraints, only atomic operations
     Relaxed,
@@ -69,18 +76,22 @@ pub enum Ordering {
 }
 
 /// An `AtomicBool` initialized to `false`
+#[unstable = "may be renamed, pending conventions for static initalizers"]
 pub static INIT_ATOMIC_BOOL: AtomicBool =
         AtomicBool { v: UnsafeCell { value: 0 }, nocopy: marker::NoCopy };
 /// An `AtomicInt` initialized to `0`
+#[unstable = "may be renamed, pending conventions for static initalizers"]
 pub static INIT_ATOMIC_INT: AtomicInt =
         AtomicInt { v: UnsafeCell { value: 0 }, nocopy: marker::NoCopy };
 /// An `AtomicUint` initialized to `0`
+#[unstable = "may be renamed, pending conventions for static initalizers"]
 pub static INIT_ATOMIC_UINT: AtomicUint =
         AtomicUint { v: UnsafeCell { value: 0, }, nocopy: marker::NoCopy };
 
 // NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly
 static UINT_TRUE: uint = -1;
 
+#[stable]
 impl AtomicBool {
     /// Create a new `AtomicBool`
     pub fn new(v: bool) -> AtomicBool {
@@ -89,12 +100,20 @@ impl AtomicBool {
     }
 
     /// Load the value
+    ///
+    /// # Failure
+    ///
+    /// Fails if `order` is `Release` or `AcqRel`.
     #[inline]
     pub fn load(&self, order: Ordering) -> bool {
         unsafe { atomic_load(self.v.get() as *const uint, order) > 0 }
     }
 
     /// Store the value
+    ///
+    /// # Failure
+    ///
+    /// Fails if `order` is `Acquire` or `AcqRel`.
     #[inline]
     pub fn store(&self, val: bool, order: Ordering) {
         let val = if val { UINT_TRUE } else { 0 };
@@ -120,7 +139,7 @@ impl AtomicBool {
     ///
     /// ```rust
     /// use std::sync::Arc;
-    /// use std::sync::atomics::{AtomicBool, SeqCst};
+    /// use std::sync::atomic::{AtomicBool, SeqCst};
     /// use std::task::deschedule;
     ///
     /// fn main() {
@@ -170,7 +189,7 @@ impl AtomicBool {
     /// # Examples
     ///
     /// ```
-    /// use std::sync::atomics::{AtomicBool, SeqCst};
+    /// use std::sync::atomic::{AtomicBool, SeqCst};
     ///
     /// let foo = AtomicBool::new(true);
     /// assert_eq!(true, foo.fetch_and(false, SeqCst));
@@ -200,7 +219,7 @@ impl AtomicBool {
     /// # Examples
     ///
     /// ```
-    /// use std::sync::atomics::{AtomicBool, SeqCst};
+    /// use std::sync::atomic::{AtomicBool, SeqCst};
     ///
     /// let foo = AtomicBool::new(true);
     /// assert_eq!(true, foo.fetch_nand(false, SeqCst));
@@ -231,7 +250,7 @@ impl AtomicBool {
     /// # Examples
     ///
     /// ```
-    /// use std::sync::atomics::{AtomicBool, SeqCst};
+    /// use std::sync::atomic::{AtomicBool, SeqCst};
     ///
     /// let foo = AtomicBool::new(true);
     /// assert_eq!(true, foo.fetch_or(false, SeqCst));
@@ -261,7 +280,7 @@ impl AtomicBool {
     /// # Examples
     ///
     /// ```
-    /// use std::sync::atomics::{AtomicBool, SeqCst};
+    /// use std::sync::atomic::{AtomicBool, SeqCst};
     ///
     /// let foo = AtomicBool::new(true);
     /// assert_eq!(true, foo.fetch_xor(false, SeqCst));
@@ -283,6 +302,7 @@ impl AtomicBool {
     }
 }
 
+#[stable]
 impl AtomicInt {
     /// Create a new `AtomicInt`
     pub fn new(v: int) -> AtomicInt {
@@ -290,12 +310,20 @@ impl AtomicInt {
     }
 
     /// Load the value
+    ///
+    /// # Failure
+    ///
+    /// Fails if `order` is `Release` or `AcqRel`.
     #[inline]
     pub fn load(&self, order: Ordering) -> int {
         unsafe { atomic_load(self.v.get() as *const int, order) }
     }
 
     /// Store the value
+    ///
+    /// # Failure
+    ///
+    /// Fails if `order` is `Acquire` or `AcqRel`.
     #[inline]
     pub fn store(&self, val: int, order: Ordering) {
         unsafe { atomic_store(self.v.get(), val, order); }
@@ -322,7 +350,7 @@ impl AtomicInt {
     /// # Examples
     ///
     /// ```
-    /// use std::sync::atomics::{AtomicInt, SeqCst};
+    /// use std::sync::atomic::{AtomicInt, SeqCst};
     ///
     /// let foo = AtomicInt::new(0);
     /// assert_eq!(0, foo.fetch_add(10, SeqCst));
@@ -338,7 +366,7 @@ impl AtomicInt {
     /// # Examples
     ///
     /// ```
-    /// use std::sync::atomics::{AtomicInt, SeqCst};
+    /// use std::sync::atomic::{AtomicInt, SeqCst};
     ///
     /// let foo = AtomicInt::new(0);
     /// assert_eq!(0, foo.fetch_sub(10, SeqCst));
@@ -354,7 +382,7 @@ impl AtomicInt {
     /// # Examples
     ///
     /// ```
-    /// use std::sync::atomics::{AtomicUint, SeqCst};
+    /// use std::sync::atomic::{AtomicUint, SeqCst};
     ///
     /// let foo = AtomicUint::new(0b101101);
     /// assert_eq!(0b101101, foo.fetch_and(0b110011, SeqCst));
@@ -369,7 +397,7 @@ impl AtomicInt {
     /// # Examples
     ///
     /// ```
-    /// use std::sync::atomics::{AtomicUint, SeqCst};
+    /// use std::sync::atomic::{AtomicUint, SeqCst};
     ///
     /// let foo = AtomicUint::new(0b101101);
     /// assert_eq!(0b101101, foo.fetch_or(0b110011, SeqCst));
@@ -384,7 +412,7 @@ impl AtomicInt {
     /// # Examples
     ///
     /// ```
-    /// use std::sync::atomics::{AtomicUint, SeqCst};
+    /// use std::sync::atomic::{AtomicUint, SeqCst};
     ///
     /// let foo = AtomicUint::new(0b101101);
     /// assert_eq!(0b101101, foo.fetch_xor(0b110011, SeqCst));
@@ -395,6 +423,7 @@ impl AtomicInt {
     }
 }
 
+#[stable]
 impl AtomicUint {
     /// Create a new `AtomicUint`
     pub fn new(v: uint) -> AtomicUint {
@@ -402,12 +431,20 @@ impl AtomicUint {
     }
 
     /// Load the value
+    ///
+    /// # Failure
+    ///
+    /// Fails if `order` is `Release` or `AcqRel`.
     #[inline]
     pub fn load(&self, order: Ordering) -> uint {
         unsafe { atomic_load(self.v.get() as *const uint, order) }
     }
 
     /// Store the value
+    ///
+    /// # Failure
+    ///
+    /// Fails if `order` is `Acquire` or `AcqRel`.
     #[inline]
     pub fn store(&self, val: uint, order: Ordering) {
         unsafe { atomic_store(self.v.get(), val, order); }
@@ -434,7 +471,7 @@ impl AtomicUint {
     /// # Examples
     ///
     /// ```
-    /// use std::sync::atomics::{AtomicUint, SeqCst};
+    /// use std::sync::atomic::{AtomicUint, SeqCst};
     ///
     /// let foo = AtomicUint::new(0);
     /// assert_eq!(0, foo.fetch_add(10, SeqCst));
@@ -450,7 +487,7 @@ impl AtomicUint {
     /// # Examples
     ///
     /// ```
-    /// use std::sync::atomics::{AtomicUint, SeqCst};
+    /// use std::sync::atomic::{AtomicUint, SeqCst};
     ///
     /// let foo = AtomicUint::new(10);
     /// assert_eq!(10, foo.fetch_sub(10, SeqCst));
@@ -466,7 +503,7 @@ impl AtomicUint {
     /// # Examples
     ///
     /// ```
-    /// use std::sync::atomics::{AtomicUint, SeqCst};
+    /// use std::sync::atomic::{AtomicUint, SeqCst};
     ///
     /// let foo = AtomicUint::new(0b101101);
     /// assert_eq!(0b101101, foo.fetch_and(0b110011, SeqCst));
@@ -481,7 +518,7 @@ impl AtomicUint {
     /// # Examples
     ///
     /// ```
-    /// use std::sync::atomics::{AtomicUint, SeqCst};
+    /// use std::sync::atomic::{AtomicUint, SeqCst};
     ///
     /// let foo = AtomicUint::new(0b101101);
     /// assert_eq!(0b101101, foo.fetch_or(0b110011, SeqCst));
@@ -496,7 +533,7 @@ impl AtomicUint {
     /// # Examples
     ///
     /// ```
-    /// use std::sync::atomics::{AtomicUint, SeqCst};
+    /// use std::sync::atomic::{AtomicUint, SeqCst};
     ///
     /// let foo = AtomicUint::new(0b101101);
     /// assert_eq!(0b101101, foo.fetch_xor(0b110011, SeqCst));
@@ -507,6 +544,7 @@ impl AtomicUint {
     }
 }
 
+#[stable]
 impl<T> AtomicPtr<T> {
     /// Create a new `AtomicPtr`
     pub fn new(p: *mut T) -> AtomicPtr<T> {
@@ -514,6 +552,10 @@ impl<T> AtomicPtr<T> {
     }
 
     /// Load the value
+    ///
+    /// # Failure
+    ///
+    /// Fails if `order` is `Release` or `AcqRel`.
     #[inline]
     pub fn load(&self, order: Ordering) -> *mut T {
         unsafe {
@@ -522,6 +564,10 @@ impl<T> AtomicPtr<T> {
     }
 
     /// Store the value
+    ///
+    /// # Failure
+    ///
+    /// Fails if `order` is `Acquire` or `AcqRel`.
     #[inline]
     pub fn store(&self, ptr: *mut T, order: Ordering) {
         unsafe { atomic_store(self.p.get(), ptr as uint, order); }
@@ -552,7 +598,9 @@ unsafe fn atomic_store<T>(dst: *mut T, val: T, order:Ordering) {
     match order {
         Release => intrinsics::atomic_store_rel(dst, val),
         Relaxed => intrinsics::atomic_store_relaxed(dst, val),
-        _       => intrinsics::atomic_store(dst, val)
+        SeqCst  => intrinsics::atomic_store(dst, val),
+        Acquire => fail!("there is no such thing as an acquire store"),
+        AcqRel  => fail!("there is no such thing as an acquire/release store"),
     }
 }
 
@@ -561,7 +609,9 @@ unsafe fn atomic_load<T>(dst: *const T, order:Ordering) -> T {
     match order {
         Acquire => intrinsics::atomic_load_acq(dst),
         Relaxed => intrinsics::atomic_load_relaxed(dst),
-        _       => intrinsics::atomic_load(dst)
+        SeqCst  => intrinsics::atomic_load(dst),
+        Release => fail!("there is no such thing as a release load"),
+        AcqRel  => fail!("there is no such thing as an acquire/release load"),
     }
 }
 
@@ -572,7 +622,7 @@ unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
         Release => intrinsics::atomic_xchg_rel(dst, val),
         AcqRel  => intrinsics::atomic_xchg_acqrel(dst, val),
         Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
-        _       => intrinsics::atomic_xchg(dst, val)
+        SeqCst  => intrinsics::atomic_xchg(dst, val)
     }
 }
 
@@ -584,7 +634,7 @@ unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
         Release => intrinsics::atomic_xadd_rel(dst, val),
         AcqRel  => intrinsics::atomic_xadd_acqrel(dst, val),
         Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
-        _       => intrinsics::atomic_xadd(dst, val)
+        SeqCst  => intrinsics::atomic_xadd(dst, val)
     }
 }
 
@@ -596,7 +646,7 @@ unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
         Release => intrinsics::atomic_xsub_rel(dst, val),
         AcqRel  => intrinsics::atomic_xsub_acqrel(dst, val),
         Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
-        _       => intrinsics::atomic_xsub(dst, val)
+        SeqCst  => intrinsics::atomic_xsub(dst, val)
     }
 }
 
@@ -607,7 +657,7 @@ unsafe fn atomic_compare_and_swap<T>(dst: *mut T, old:T, new:T, order: Ordering)
         Release => intrinsics::atomic_cxchg_rel(dst, old, new),
         AcqRel  => intrinsics::atomic_cxchg_acqrel(dst, old, new),
         Relaxed => intrinsics::atomic_cxchg_relaxed(dst, old, new),
-        _       => intrinsics::atomic_cxchg(dst, old, new),
+        SeqCst  => intrinsics::atomic_cxchg(dst, old, new),
     }
 }
 
@@ -618,7 +668,7 @@ unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
         Release => intrinsics::atomic_and_rel(dst, val),
         AcqRel  => intrinsics::atomic_and_acqrel(dst, val),
         Relaxed => intrinsics::atomic_and_relaxed(dst, val),
-        _       => intrinsics::atomic_and(dst, val)
+        SeqCst  => intrinsics::atomic_and(dst, val)
     }
 }
 
@@ -629,7 +679,7 @@ unsafe fn atomic_nand<T>(dst: *mut T, val: T, order: Ordering) -> T {
         Release => intrinsics::atomic_nand_rel(dst, val),
         AcqRel  => intrinsics::atomic_nand_acqrel(dst, val),
         Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
-        _       => intrinsics::atomic_nand(dst, val)
+        SeqCst  => intrinsics::atomic_nand(dst, val)
     }
 }
 
@@ -641,7 +691,7 @@ unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
         Release => intrinsics::atomic_or_rel(dst, val),
         AcqRel  => intrinsics::atomic_or_acqrel(dst, val),
         Relaxed => intrinsics::atomic_or_relaxed(dst, val),
-        _       => intrinsics::atomic_or(dst, val)
+        SeqCst  => intrinsics::atomic_or(dst, val)
     }
 }
 
@@ -653,7 +703,7 @@ unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
         Release => intrinsics::atomic_xor_rel(dst, val),
         AcqRel  => intrinsics::atomic_xor_acqrel(dst, val),
         Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
-        _       => intrinsics::atomic_xor(dst, val)
+        SeqCst  => intrinsics::atomic_xor(dst, val)
     }
 }
 
@@ -679,6 +729,7 @@ unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
 ///
 /// Fails if `order` is `Relaxed`
 #[inline]
+#[stable]
 pub fn fence(order: Ordering) {
     unsafe {
         match order {
diff --git a/src/libcore/lib.rs b/src/libcore/lib.rs
index 2809bda4f6e..e4285ed597c 100644
--- a/src/libcore/lib.rs
+++ b/src/libcore/lib.rs
@@ -113,7 +113,7 @@ pub mod ty {
 /* Core types and methods on primitives */
 
 pub mod any;
-pub mod atomics;
+pub mod atomic;
 pub mod bool;
 pub mod cell;
 pub mod char;
diff --git a/src/libcoretest/atomics.rs b/src/libcoretest/atomic.rs
index 3f960ae1f26..e8fae3fa6df 100644
--- a/src/libcoretest/atomics.rs
+++ b/src/libcoretest/atomic.rs
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use core::atomics::*;
+use core::atomic::*;
 
 #[test]
 fn bool_() {
diff --git a/src/libcoretest/lib.rs b/src/libcoretest/lib.rs
index 3a3cac542c9..0b8ae09c9a3 100644
--- a/src/libcoretest/lib.rs
+++ b/src/libcoretest/lib.rs
@@ -14,7 +14,7 @@ extern crate test;
 extern crate libc;
 
 mod any;
-mod atomics;
+mod atomic;
 mod cell;
 mod char;
 mod cmp;
diff --git a/src/libgreen/basic.rs b/src/libgreen/basic.rs
index d0224749781..e48786f3374 100644
--- a/src/libgreen/basic.rs
+++ b/src/libgreen/basic.rs
@@ -16,7 +16,7 @@
 //! loop if no other one is provided (and M:N scheduling is desired).
 
 use alloc::arc::Arc;
-use std::sync::atomics;
+use std::sync::atomic;
 use std::mem;
 use std::rt::rtio::{EventLoop, IoFactory, RemoteCallback};
 use std::rt::rtio::{PausableIdleCallback, Callback};
@@ -33,7 +33,7 @@ struct BasicLoop {
     next_remote: uint,
     messages: Arc<Exclusive<Vec<Message>>>,
     idle: Option<Box<Callback + Send>>,
-    idle_active: Option<Arc<atomics::AtomicBool>>,
+    idle_active: Option<Arc<atomic::AtomicBool>>,
 }
 
 enum Message { RunRemote(uint), RemoveRemote(uint) }
@@ -89,7 +89,7 @@ impl BasicLoop {
     fn idle(&mut self) {
         match self.idle {
             Some(ref mut idle) => {
-                if self.idle_active.get_ref().load(atomics::SeqCst) {
+                if self.idle_active.get_ref().load(atomic::SeqCst) {
                     idle.call();
                 }
             }
@@ -98,7 +98,7 @@ impl BasicLoop {
     }
 
     fn has_idle(&self) -> bool {
-        self.idle.is_some() && self.idle_active.get_ref().load(atomics::SeqCst)
+        self.idle.is_some() && self.idle_active.get_ref().load(atomic::SeqCst)
     }
 }
 
@@ -136,7 +136,7 @@ impl EventLoop for BasicLoop {
                               -> Box<PausableIdleCallback + Send> {
         rtassert!(self.idle.is_none());
         self.idle = Some(cb);
-        let a = Arc::new(atomics::AtomicBool::new(true));
+        let a = Arc::new(atomic::AtomicBool::new(true));
         self.idle_active = Some(a.clone());
         box BasicPausable { active: a } as Box<PausableIdleCallback + Send>
     }
@@ -183,21 +183,21 @@ impl Drop for BasicRemote {
 }
 
 struct BasicPausable {
-    active: Arc<atomics::AtomicBool>,
+    active: Arc<atomic::AtomicBool>,
 }
 
 impl PausableIdleCallback for BasicPausable {
     fn pause(&mut self) {
-        self.active.store(false, atomics::SeqCst);
+        self.active.store(false, atomic::SeqCst);
     }
     fn resume(&mut self) {
-        self.active.store(true, atomics::SeqCst);
+        self.active.store(true, atomic::SeqCst);
     }
 }
 
 impl Drop for BasicPausable {
     fn drop(&mut self) {
-        self.active.store(false, atomics::SeqCst);
+        self.active.store(false, atomic::SeqCst);
     }
 }
 
diff --git a/src/libgreen/lib.rs b/src/libgreen/lib.rs
index 6e4e2ac0fee..5c280a31db7 100644
--- a/src/libgreen/lib.rs
+++ b/src/libgreen/lib.rs
@@ -232,7 +232,7 @@ use std::rt::rtio;
 use std::rt::thread::Thread;
 use std::rt::task::TaskOpts;
 use std::rt;
-use std::sync::atomics::{SeqCst, AtomicUint, INIT_ATOMIC_UINT};
+use std::sync::atomic::{SeqCst, AtomicUint, INIT_ATOMIC_UINT};
 use std::sync::deque;
 use std::task::{TaskBuilder, Spawner};
 
diff --git a/src/libgreen/stack.rs b/src/libgreen/stack.rs
index af53766617c..601758f8a25 100644
--- a/src/libgreen/stack.rs
+++ b/src/libgreen/stack.rs
@@ -9,7 +9,7 @@
 // except according to those terms.
 
 use std::ptr;
-use std::sync::atomics;
+use std::sync::atomic;
 use std::os::{errno, page_size, MemoryMap, MapReadable, MapWritable,
               MapNonStandardFlags, getenv};
 use libc;
@@ -158,8 +158,8 @@ impl StackPool {
 }
 
 fn max_cached_stacks() -> uint {
-    static mut AMT: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
-    match unsafe { AMT.load(atomics::SeqCst) } {
+    static mut AMT: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
+    match unsafe { AMT.load(atomic::SeqCst) } {
         0 => {}
         n => return n - 1,
     }
@@ -169,7 +169,7 @@ fn max_cached_stacks() -> uint {
     let amt = amt.unwrap_or(10);
     // 0 is our sentinel value, so ensure that we'll never see 0 after
     // initialization has run
-    unsafe { AMT.store(amt + 1, atomics::SeqCst); }
+    unsafe { AMT.store(amt + 1, atomic::SeqCst); }
     return amt;
 }
 
diff --git a/src/libnative/io/pipe_win32.rs b/src/libnative/io/pipe_win32.rs
index d5b22b4b018..87129bba845 100644
--- a/src/libnative/io/pipe_win32.rs
+++ b/src/libnative/io/pipe_win32.rs
@@ -92,7 +92,7 @@ use std::os;
 use std::ptr;
 use std::rt::rtio;
 use std::rt::rtio::{IoResult, IoError};
-use std::sync::atomics;
+use std::sync::atomic;
 use std::rt::mutex;
 
 use super::c;
@@ -128,8 +128,8 @@ impl Drop for Event {
 struct Inner {
     handle: libc::HANDLE,
     lock: mutex::NativeMutex,
-    read_closed: atomics::AtomicBool,
-    write_closed: atomics::AtomicBool,
+    read_closed: atomic::AtomicBool,
+    write_closed: atomic::AtomicBool,
 }
 
 impl Inner {
@@ -137,8 +137,8 @@ impl Inner {
         Inner {
             handle: handle,
             lock: unsafe { mutex::NativeMutex::new() },
-            read_closed: atomics::AtomicBool::new(false),
-            write_closed: atomics::AtomicBool::new(false),
+            read_closed: atomic::AtomicBool::new(false),
+            write_closed: atomic::AtomicBool::new(false),
         }
     }
 }
@@ -326,11 +326,11 @@ impl UnixStream {
     fn handle(&self) -> libc::HANDLE { self.inner.handle }
 
     fn read_closed(&self) -> bool {
-        self.inner.read_closed.load(atomics::SeqCst)
+        self.inner.read_closed.load(atomic::SeqCst)
     }
 
     fn write_closed(&self) -> bool {
-        self.inner.write_closed.load(atomics::SeqCst)
+        self.inner.write_closed.load(atomic::SeqCst)
     }
 
     fn cancel_io(&self) -> IoResult<()> {
@@ -525,14 +525,14 @@ impl rtio::RtioPipe for UnixStream {
         // and 2 with a lock with respect to close_read(), we're guaranteed that
         // no thread will erroneously sit in a read forever.
         let _guard = unsafe { self.inner.lock.lock() };
-        self.inner.read_closed.store(true, atomics::SeqCst);
+        self.inner.read_closed.store(true, atomic::SeqCst);
         self.cancel_io()
     }
 
     fn close_write(&mut self) -> IoResult<()> {
         // see comments in close_read() for why this lock is necessary
         let _guard = unsafe { self.inner.lock.lock() };
-        self.inner.write_closed.store(true, atomics::SeqCst);
+        self.inner.write_closed.store(true, atomic::SeqCst);
         self.cancel_io()
     }
 
diff --git a/src/libnative/io/timer_unix.rs b/src/libnative/io/timer_unix.rs
index 87c320e0457..06d48f2f886 100644
--- a/src/libnative/io/timer_unix.rs
+++ b/src/libnative/io/timer_unix.rs
@@ -52,7 +52,7 @@ use std::os;
 use std::ptr;
 use std::rt::rtio;
 use std::rt::rtio::IoResult;
-use std::sync::atomics;
+use std::sync::atomic;
 use std::comm;
 
 use io::c;
@@ -207,8 +207,8 @@ impl Timer {
         // instead of ()
         unsafe { HELPER.boot(|| {}, helper); }
 
-        static mut ID: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
-        let id = unsafe { ID.fetch_add(1, atomics::Relaxed) };
+        static mut ID: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
+        let id = unsafe { ID.fetch_add(1, atomic::Relaxed) };
         Ok(Timer {
             id: id,
             inner: Some(box Inner {
diff --git a/src/librustrt/at_exit_imp.rs b/src/librustrt/at_exit_imp.rs
index 9e4c4229676..81376692981 100644
--- a/src/librustrt/at_exit_imp.rs
+++ b/src/librustrt/at_exit_imp.rs
@@ -17,21 +17,21 @@ use core::prelude::*;
 use alloc::boxed::Box;
 use collections::MutableSeq;
 use collections::vec::Vec;
-use core::atomics;
+use core::atomic;
 use core::mem;
 
 use exclusive::Exclusive;
 
 type Queue = Exclusive<Vec<proc():Send>>;
 
-static mut QUEUE: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
-static mut RUNNING: atomics::AtomicBool = atomics::INIT_ATOMIC_BOOL;
+static mut QUEUE: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
+static mut RUNNING: atomic::AtomicBool = atomic::INIT_ATOMIC_BOOL;
 
 pub fn init() {
     let state: Box<Queue> = box Exclusive::new(Vec::new());
     unsafe {
-        rtassert!(!RUNNING.load(atomics::SeqCst));
-        assert!(QUEUE.swap(mem::transmute(state), atomics::SeqCst) == 0);
+        rtassert!(!RUNNING.load(atomic::SeqCst));
+        assert!(QUEUE.swap(mem::transmute(state), atomic::SeqCst) == 0);
     }
 }
 
@@ -41,8 +41,8 @@ pub fn push(f: proc():Send) {
         // all with respect to `run`, meaning that this could theoretically be a
         // use-after-free. There's not much we can do to protect against that,
         // however. Let's just assume a well-behaved runtime and go from there!
-        rtassert!(!RUNNING.load(atomics::SeqCst));
-        let queue = QUEUE.load(atomics::SeqCst);
+        rtassert!(!RUNNING.load(atomic::SeqCst));
+        let queue = QUEUE.load(atomic::SeqCst);
         rtassert!(queue != 0);
         (*(queue as *const Queue)).lock().push(f);
     }
@@ -50,8 +50,8 @@ pub fn push(f: proc():Send) {
 
 pub fn run() {
     let cur = unsafe {
-        rtassert!(!RUNNING.load(atomics::SeqCst));
-        let queue = QUEUE.swap(0, atomics::SeqCst);
+        rtassert!(!RUNNING.load(atomic::SeqCst));
+        let queue = QUEUE.swap(0, atomic::SeqCst);
         rtassert!(queue != 0);
 
         let queue: Box<Queue> = mem::transmute(queue);
diff --git a/src/librustrt/bookkeeping.rs b/src/librustrt/bookkeeping.rs
index dc96aecff80..a88bc86828f 100644
--- a/src/librustrt/bookkeeping.rs
+++ b/src/librustrt/bookkeeping.rs
@@ -18,12 +18,12 @@
 //! each respective runtime to make sure that they call increment() and
 //! decrement() manually.
 
-use core::atomics;
+use core::atomic;
 use core::ops::Drop;
 
 use mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
 
-static mut TASK_COUNT: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
+static mut TASK_COUNT: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
 static mut TASK_LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
 
 pub struct Token { _private: () }
@@ -35,13 +35,13 @@ impl Drop for Token {
 /// Increment the number of live tasks, returning a token which will decrement
 /// the count when dropped.
 pub fn increment() -> Token {
-    let _ = unsafe { TASK_COUNT.fetch_add(1, atomics::SeqCst) };
+    let _ = unsafe { TASK_COUNT.fetch_add(1, atomic::SeqCst) };
     Token { _private: () }
 }
 
 pub fn decrement() {
     unsafe {
-        if TASK_COUNT.fetch_sub(1, atomics::SeqCst) == 1 {
+        if TASK_COUNT.fetch_sub(1, atomic::SeqCst) == 1 {
             let guard = TASK_LOCK.lock();
             guard.signal();
         }
@@ -53,7 +53,7 @@ pub fn decrement() {
 pub fn wait_for_other_tasks() {
     unsafe {
         let guard = TASK_LOCK.lock();
-        while TASK_COUNT.load(atomics::SeqCst) > 0 {
+        while TASK_COUNT.load(atomic::SeqCst) > 0 {
             guard.wait();
         }
     }
diff --git a/src/librustrt/mutex.rs b/src/librustrt/mutex.rs
index f24c2609d09..08da9b8aad1 100644
--- a/src/librustrt/mutex.rs
+++ b/src/librustrt/mutex.rs
@@ -519,7 +519,7 @@ mod imp {
 #[cfg(windows)]
 mod imp {
     use alloc::libc_heap::malloc_raw;
-    use core::atomics;
+    use core::atomic;
     use core::ptr;
     use libc::{HANDLE, BOOL, LPSECURITY_ATTRIBUTES, c_void, DWORD, LPCSTR};
     use libc;
@@ -533,20 +533,20 @@ mod imp {
 
     pub struct Mutex {
         // pointers for the lock/cond handles, atomically updated
-        lock: atomics::AtomicUint,
-        cond: atomics::AtomicUint,
+        lock: atomic::AtomicUint,
+        cond: atomic::AtomicUint,
     }
 
     pub static MUTEX_INIT: Mutex = Mutex {
-        lock: atomics::INIT_ATOMIC_UINT,
-        cond: atomics::INIT_ATOMIC_UINT,
+        lock: atomic::INIT_ATOMIC_UINT,
+        cond: atomic::INIT_ATOMIC_UINT,
     };
 
     impl Mutex {
         pub unsafe fn new() -> Mutex {
             Mutex {
-                lock: atomics::AtomicUint::new(init_lock()),
-                cond: atomics::AtomicUint::new(init_cond()),
+                lock: atomic::AtomicUint::new(init_lock()),
+                cond: atomic::AtomicUint::new(init_cond()),
             }
         }
         pub unsafe fn lock(&self) {
@@ -573,38 +573,38 @@ mod imp {
         /// that no other thread is currently holding the lock or waiting on the
         /// condition variable contained inside.
         pub unsafe fn destroy(&self) {
-            let lock = self.lock.swap(0, atomics::SeqCst);
-            let cond = self.cond.swap(0, atomics::SeqCst);
+            let lock = self.lock.swap(0, atomic::SeqCst);
+            let cond = self.cond.swap(0, atomic::SeqCst);
             if lock != 0 { free_lock(lock) }
             if cond != 0 { free_cond(cond) }
         }
 
         unsafe fn getlock(&self) -> *mut c_void {
-            match self.lock.load(atomics::SeqCst) {
+            match self.lock.load(atomic::SeqCst) {
                 0 => {}
                 n => return n as *mut c_void
             }
             let lock = init_lock();
-            match self.lock.compare_and_swap(0, lock, atomics::SeqCst) {
+            match self.lock.compare_and_swap(0, lock, atomic::SeqCst) {
                 0 => return lock as *mut c_void,
                 _ => {}
             }
             free_lock(lock);
-            return self.lock.load(atomics::SeqCst) as *mut c_void;
+            return self.lock.load(atomic::SeqCst) as *mut c_void;
         }
 
         unsafe fn getcond(&self) -> *mut c_void {
-            match self.cond.load(atomics::SeqCst) {
+            match self.cond.load(atomic::SeqCst) {
                 0 => {}
                 n => return n as *mut c_void
             }
             let cond = init_cond();
-            match self.cond.compare_and_swap(0, cond, atomics::SeqCst) {
+            match self.cond.compare_and_swap(0, cond, atomic::SeqCst) {
                 0 => return cond as *mut c_void,
                 _ => {}
             }
             free_cond(cond);
-            return self.cond.load(atomics::SeqCst) as *mut c_void;
+            return self.cond.load(atomic::SeqCst) as *mut c_void;
         }
     }
 
diff --git a/src/librustrt/task.rs b/src/librustrt/task.rs
index 5873c8273e7..881756c5266 100644
--- a/src/librustrt/task.rs
+++ b/src/librustrt/task.rs
@@ -18,7 +18,7 @@ use core::prelude::*;
 use alloc::arc::Arc;
 use alloc::boxed::{BoxAny, Box};
 use core::any::Any;
-use core::atomics::{AtomicUint, SeqCst};
+use core::atomic::{AtomicUint, SeqCst};
 use core::iter::Take;
 use core::kinds::marker;
 use core::mem;
diff --git a/src/librustrt/unwind.rs b/src/librustrt/unwind.rs
index 344d3a0f103..cd2fb2efcec 100644
--- a/src/librustrt/unwind.rs
+++ b/src/librustrt/unwind.rs
@@ -63,7 +63,7 @@ use alloc::boxed::Box;
 use collections::string::String;
 use collections::vec::Vec;
 use core::any::Any;
-use core::atomics;
+use core::atomic;
 use core::cmp;
 use core::fmt;
 use core::intrinsics;
@@ -91,16 +91,16 @@ pub type Callback = fn(msg: &Any + Send, file: &'static str, line: uint);
 //
 // For more information, see below.
 static MAX_CALLBACKS: uint = 16;
-static mut CALLBACKS: [atomics::AtomicUint, ..MAX_CALLBACKS] =
-        [atomics::INIT_ATOMIC_UINT, atomics::INIT_ATOMIC_UINT,
-         atomics::INIT_ATOMIC_UINT, atomics::INIT_ATOMIC_UINT,
-         atomics::INIT_ATOMIC_UINT, atomics::INIT_ATOMIC_UINT,
-         atomics::INIT_ATOMIC_UINT, atomics::INIT_ATOMIC_UINT,
-         atomics::INIT_ATOMIC_UINT, atomics::INIT_ATOMIC_UINT,
-         atomics::INIT_ATOMIC_UINT, atomics::INIT_ATOMIC_UINT,
-         atomics::INIT_ATOMIC_UINT, atomics::INIT_ATOMIC_UINT,
-         atomics::INIT_ATOMIC_UINT, atomics::INIT_ATOMIC_UINT];
-static mut CALLBACK_CNT: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
+static mut CALLBACKS: [atomic::AtomicUint, ..MAX_CALLBACKS] =
+        [atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
+         atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
+         atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
+         atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
+         atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
+         atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
+         atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
+         atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT];
+static mut CALLBACK_CNT: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
 
 impl Unwinder {
     pub fn new() -> Unwinder {
@@ -466,11 +466,11 @@ fn begin_unwind_inner(msg: Box<Any + Send>, file_line: &(&'static str, uint)) ->
     // callback. Additionally, CALLBACK_CNT may briefly be higher than
     // MAX_CALLBACKS, so we're sure to clamp it as necessary.
     let callbacks = unsafe {
-        let amt = CALLBACK_CNT.load(atomics::SeqCst);
+        let amt = CALLBACK_CNT.load(atomic::SeqCst);
         CALLBACKS.slice_to(cmp::min(amt, MAX_CALLBACKS))
     };
     for cb in callbacks.iter() {
-        match cb.load(atomics::SeqCst) {
+        match cb.load(atomic::SeqCst) {
             0 => {}
             n => {
                 let f: Callback = unsafe { mem::transmute(n) };
@@ -518,18 +518,18 @@ fn begin_unwind_inner(msg: Box<Any + Send>, file_line: &(&'static str, uint)) ->
 /// currently possible to unregister a callback once it has been registered.
 #[experimental]
 pub unsafe fn register(f: Callback) -> bool {
-    match CALLBACK_CNT.fetch_add(1, atomics::SeqCst) {
+    match CALLBACK_CNT.fetch_add(1, atomic::SeqCst) {
         // The invocation code has knowledge of this window where the count has
         // been incremented, but the callback has not been stored. We're
         // guaranteed that the slot we're storing into is 0.
         n if n < MAX_CALLBACKS => {
-            let prev = CALLBACKS[n].swap(mem::transmute(f), atomics::SeqCst);
+            let prev = CALLBACKS[n].swap(mem::transmute(f), atomic::SeqCst);
             rtassert!(prev == 0);
             true
         }
         // If we accidentally bumped the count too high, pull it back.
         _ => {
-            CALLBACK_CNT.store(MAX_CALLBACKS, atomics::SeqCst);
+            CALLBACK_CNT.store(MAX_CALLBACKS, atomic::SeqCst);
             false
         }
     }
diff --git a/src/libstd/io/tempfile.rs b/src/libstd/io/tempfile.rs
index f580dfd80f0..1d53ed81437 100644
--- a/src/libstd/io/tempfile.rs
+++ b/src/libstd/io/tempfile.rs
@@ -19,7 +19,7 @@ use option::{Option, None, Some};
 use os;
 use path::{Path, GenericPath};
 use result::{Ok, Err};
-use sync::atomics;
+use sync::atomic;
 
 #[cfg(stage0)]
 use iter::Iterator; // NOTE(stage0): Remove after snapshot.
@@ -42,13 +42,13 @@ impl TempDir {
             return TempDir::new_in(&os::make_absolute(tmpdir), suffix);
         }
 
-        static mut CNT: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
+        static mut CNT: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
 
         for _ in range(0u, 1000) {
             let filename =
                 format!("rs-{}-{}-{}",
                         unsafe { libc::getpid() },
-                        unsafe { CNT.fetch_add(1, atomics::SeqCst) },
+                        unsafe { CNT.fetch_add(1, atomic::SeqCst) },
                         suffix);
             let p = tmpdir.join(filename);
             match fs::mkdir(&p, io::UserRWX) {
diff --git a/src/libstd/io/test.rs b/src/libstd/io/test.rs
index 26e854d9d99..331cfa1a59e 100644
--- a/src/libstd/io/test.rs
+++ b/src/libstd/io/test.rs
@@ -16,7 +16,7 @@ use libc;
 use os;
 use prelude::*;
 use std::io::net::ip::*;
-use sync::atomics::{AtomicUint, INIT_ATOMIC_UINT, Relaxed};
+use sync::atomic::{AtomicUint, INIT_ATOMIC_UINT, Relaxed};
 
 macro_rules! iotest (
     { fn $name:ident() $b:block $(#[$a:meta])* } => (
diff --git a/src/libstd/os.rs b/src/libstd/os.rs
index dfa691d1823..6ba964441ee 100644
--- a/src/libstd/os.rs
+++ b/src/libstd/os.rs
@@ -48,7 +48,7 @@ use result::{Err, Ok, Result};
 use slice::{Vector, ImmutableVector, MutableVector, ImmutableEqVector};
 use str::{Str, StrSlice, StrAllocating};
 use string::String;
-use sync::atomics::{AtomicInt, INIT_ATOMIC_INT, SeqCst};
+use sync::atomic::{AtomicInt, INIT_ATOMIC_INT, SeqCst};
 use vec::Vec;
 
 #[cfg(unix)]
diff --git a/src/libstd/rt/backtrace.rs b/src/libstd/rt/backtrace.rs
index 80493ebb4a9..6c02cc631f5 100644
--- a/src/libstd/rt/backtrace.rs
+++ b/src/libstd/rt/backtrace.rs
@@ -20,7 +20,7 @@ use option::{Some, None};
 use os;
 use result::{Ok, Err};
 use str::StrSlice;
-use sync::atomics;
+use sync::atomic;
 use unicode::char::UnicodeChar;
 
 pub use self::imp::write;
@@ -28,9 +28,9 @@ pub use self::imp::write;
 // For now logging is turned off by default, and this function checks to see
 // whether the magical environment variable is present to see if it's turned on.
 pub fn log_enabled() -> bool {
-    static mut ENABLED: atomics::AtomicInt = atomics::INIT_ATOMIC_INT;
+    static mut ENABLED: atomic::AtomicInt = atomic::INIT_ATOMIC_INT;
     unsafe {
-        match ENABLED.load(atomics::SeqCst) {
+        match ENABLED.load(atomic::SeqCst) {
             1 => return false,
             2 => return true,
             _ => {}
@@ -41,7 +41,7 @@ pub fn log_enabled() -> bool {
         Some(..) => 2,
         None => 1,
     };
-    unsafe { ENABLED.store(val, atomics::SeqCst); }
+    unsafe { ENABLED.store(val, atomic::SeqCst); }
     val == 2
 }
 
diff --git a/src/libstd/rt/util.rs b/src/libstd/rt/util.rs
index fa30ddbcc48..ed24ed2a569 100644
--- a/src/libstd/rt/util.rs
+++ b/src/libstd/rt/util.rs
@@ -14,7 +14,7 @@ use libc::uintptr_t;
 use option::{Some, None, Option};
 use os;
 use str::Str;
-use sync::atomics;
+use sync::atomic;
 
 /// Dynamically inquire about whether we're running under V.
 /// You should usually not use this unless your test definitely
@@ -41,8 +41,8 @@ pub fn limit_thread_creation_due_to_osx_and_valgrind() -> bool {
 }
 
 pub fn min_stack() -> uint {
-    static mut MIN: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
-    match unsafe { MIN.load(atomics::SeqCst) } {
+    static mut MIN: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
+    match unsafe { MIN.load(atomic::SeqCst) } {
         0 => {}
         n => return n - 1,
     }
@@ -50,7 +50,7 @@ pub fn min_stack() -> uint {
     let amt = amt.unwrap_or(2 * 1024 * 1024);
     // 0 is our sentinel value, so ensure that we'll never see 0 after
     // initialization has run
-    unsafe { MIN.store(amt + 1, atomics::SeqCst); }
+    unsafe { MIN.store(amt + 1, atomic::SeqCst); }
     return amt;
 }
 
diff --git a/src/libstd/sync/mod.rs b/src/libstd/sync/mod.rs
index cc32818baa4..1d189f8d4bc 100644
--- a/src/libstd/sync/mod.rs
+++ b/src/libstd/sync/mod.rs
@@ -17,12 +17,18 @@
 
 #![experimental]
 
-pub use core_sync::{atomics, deque, mpmc_bounded_queue, mpsc_queue, spsc_queue};
+#[stable]
+pub use core_sync::atomic;
+
+pub use core_sync::{deque, mpmc_bounded_queue, mpsc_queue, spsc_queue};
 pub use core_sync::{Arc, Weak, Mutex, MutexGuard, Condvar, Barrier};
 pub use core_sync::{RWLock, RWLockReadGuard, RWLockWriteGuard};
 pub use core_sync::{Semaphore, SemaphoreGuard};
 pub use core_sync::one::{Once, ONCE_INIT};
 
+#[deprecated = "use atomic instead"]
+pub use atomics = core_sync::atomic;
+
 pub use self::future::Future;
 pub use self::task_pool::TaskPool;
 
diff --git a/src/libsync/atomics.rs b/src/libsync/atomic.rs
index 0be124ad584..101d869451c 100644
--- a/src/libsync/atomics.rs
+++ b/src/libsync/atomic.rs
@@ -41,7 +41,7 @@
 //!
 //! ```
 //! use std::sync::Arc;
-//! use std::sync::atomics::{AtomicUint, SeqCst};
+//! use std::sync::atomic::{AtomicUint, SeqCst};
 //! use std::task::deschedule;
 //!
 //! fn main() {
@@ -67,7 +67,7 @@
 //!
 //! ```
 //! use std::sync::Arc;
-//! use std::sync::atomics::{AtomicOption, SeqCst};
+//! use std::sync::atomic::{AtomicOption, SeqCst};
 //!
 //! fn main() {
 //!     struct BigObject;
@@ -91,7 +91,7 @@
 //! Keep a global count of live tasks:
 //!
 //! ```
-//! use std::sync::atomics::{AtomicUint, SeqCst, INIT_ATOMIC_UINT};
+//! use std::sync::atomic::{AtomicUint, SeqCst, INIT_ATOMIC_UINT};
 //!
 //! static mut GLOBAL_TASK_COUNT: AtomicUint = INIT_ATOMIC_UINT;
 //!
@@ -106,16 +106,18 @@ use core::prelude::*;
 use alloc::boxed::Box;
 use core::mem;
 
-pub use core::atomics::{AtomicBool, AtomicInt, AtomicUint, AtomicPtr};
-pub use core::atomics::{Ordering, Relaxed, Release, Acquire, AcqRel, SeqCst};
-pub use core::atomics::{INIT_ATOMIC_BOOL, INIT_ATOMIC_INT, INIT_ATOMIC_UINT};
-pub use core::atomics::fence;
+pub use core::atomic::{AtomicBool, AtomicInt, AtomicUint, AtomicPtr};
+pub use core::atomic::{Ordering, Relaxed, Release, Acquire, AcqRel, SeqCst};
+pub use core::atomic::{INIT_ATOMIC_BOOL, INIT_ATOMIC_INT, INIT_ATOMIC_UINT};
+pub use core::atomic::fence;
 
 /// An atomic, nullable unique pointer
 ///
 /// This can be used as the concurrency primitive for operations that transfer
 /// owned heap objects across tasks.
 #[unsafe_no_drop_flag]
+#[deprecated = "no longer used; will eventually be replaced by a higher-level\
+                concept like MVar"]
 pub struct AtomicOption<T> {
     p: AtomicUint,
 }
@@ -227,4 +229,3 @@ mod test {
         assert!(p.take(SeqCst) == Some(box 2));
     }
 }
-
diff --git a/src/libsync/comm/oneshot.rs b/src/libsync/comm/oneshot.rs
index c9782db5c24..188bea83ac8 100644
--- a/src/libsync/comm/oneshot.rs
+++ b/src/libsync/comm/oneshot.rs
@@ -39,7 +39,7 @@ use core::mem;
 use rustrt::local::Local;
 use rustrt::task::{Task, BlockedTask};
 
-use atomics;
+use atomic;
 use comm::Receiver;
 
 // Various states you can find a port in.
@@ -49,7 +49,7 @@ static DISCONNECTED: uint = 2;
 
 pub struct Packet<T> {
     // Internal state of the chan/port pair (stores the blocked task as well)
-    state: atomics::AtomicUint,
+    state: atomic::AtomicUint,
     // One-shot data slot location
     data: Option<T>,
     // when used for the second time, a oneshot channel must be upgraded, and
@@ -86,7 +86,7 @@ impl<T: Send> Packet<T> {
         Packet {
             data: None,
             upgrade: NothingSent,
-            state: atomics::AtomicUint::new(EMPTY),
+            state: atomic::AtomicUint::new(EMPTY),
         }
     }
 
@@ -100,7 +100,7 @@ impl<T: Send> Packet<T> {
         self.data = Some(t);
         self.upgrade = SendUsed;
 
-        match self.state.swap(DATA, atomics::SeqCst) {
+        match self.state.swap(DATA, atomic::SeqCst) {
             // Sent the data, no one was waiting
             EMPTY => Ok(()),
 
@@ -136,11 +136,11 @@ impl<T: Send> Packet<T> {
     pub fn recv(&mut self) -> Result<T, Failure<T>> {
         // Attempt to not block the task (it's a little expensive). If it looks
         // like we're not empty, then immediately go through to `try_recv`.
-        if self.state.load(atomics::SeqCst) == EMPTY {
+        if self.state.load(atomic::SeqCst) == EMPTY {
             let t: Box<Task> = Local::take();
             t.deschedule(1, |task| {
                 let n = unsafe { task.cast_to_uint() };
-                match self.state.compare_and_swap(EMPTY, n, atomics::SeqCst) {
+                match self.state.compare_and_swap(EMPTY, n, atomic::SeqCst) {
                     // Nothing on the channel, we legitimately block
                     EMPTY => Ok(()),
 
@@ -160,7 +160,7 @@ impl<T: Send> Packet<T> {
     }
 
     pub fn try_recv(&mut self) -> Result<T, Failure<T>> {
-        match self.state.load(atomics::SeqCst) {
+        match self.state.load(atomic::SeqCst) {
             EMPTY => Err(Empty),
 
             // We saw some data on the channel, but the channel can be used
@@ -170,7 +170,7 @@ impl<T: Send> Packet<T> {
             // the state changes under our feet we'd rather just see that state
             // change.
             DATA => {
-                self.state.compare_and_swap(DATA, EMPTY, atomics::SeqCst);
+                self.state.compare_and_swap(DATA, EMPTY, atomic::SeqCst);
                 match self.data.take() {
                     Some(data) => Ok(data),
                     None => unreachable!(),
@@ -207,7 +207,7 @@ impl<T: Send> Packet<T> {
         };
         self.upgrade = GoUp(up);
 
-        match self.state.swap(DISCONNECTED, atomics::SeqCst) {
+        match self.state.swap(DISCONNECTED, atomic::SeqCst) {
             // If the channel is empty or has data on it, then we're good to go.
             // Senders will check the data before the upgrade (in case we
             // plastered over the DATA state).
@@ -223,7 +223,7 @@ impl<T: Send> Packet<T> {
     }
 
     pub fn drop_chan(&mut self) {
-        match self.state.swap(DISCONNECTED, atomics::SeqCst) {
+        match self.state.swap(DISCONNECTED, atomic::SeqCst) {
             DATA | DISCONNECTED | EMPTY => {}
 
             // If someone's waiting, we gotta wake them up
@@ -235,7 +235,7 @@ impl<T: Send> Packet<T> {
     }
 
     pub fn drop_port(&mut self) {
-        match self.state.swap(DISCONNECTED, atomics::SeqCst) {
+        match self.state.swap(DISCONNECTED, atomic::SeqCst) {
             // An empty channel has nothing to do, and a remotely disconnected
             // channel also has nothing to do b/c we're about to run the drop
             // glue
@@ -258,7 +258,7 @@ impl<T: Send> Packet<T> {
     // If Ok, the value is whether this port has data, if Err, then the upgraded
     // port needs to be checked instead of this one.
     pub fn can_recv(&mut self) -> Result<bool, Receiver<T>> {
-        match self.state.load(atomics::SeqCst) {
+        match self.state.load(atomic::SeqCst) {
             EMPTY => Ok(false), // Welp, we tried
             DATA => Ok(true),   // we have some un-acquired data
             DISCONNECTED if self.data.is_some() => Ok(true), // we have data
@@ -283,7 +283,7 @@ impl<T: Send> Packet<T> {
     // because there is data, or fail because there is an upgrade pending.
     pub fn start_selection(&mut self, task: BlockedTask) -> SelectionResult<T> {
         let n = unsafe { task.cast_to_uint() };
-        match self.state.compare_and_swap(EMPTY, n, atomics::SeqCst) {
+        match self.state.compare_and_swap(EMPTY, n, atomic::SeqCst) {
             EMPTY => SelSuccess,
             DATA => SelCanceled(unsafe { BlockedTask::cast_from_uint(n) }),
             DISCONNECTED if self.data.is_some() => {
@@ -317,7 +317,7 @@ impl<T: Send> Packet<T> {
     //
     // The return value indicates whether there's data on this port.
     pub fn abort_selection(&mut self) -> Result<bool, Receiver<T>> {
-        let state = match self.state.load(atomics::SeqCst) {
+        let state = match self.state.load(atomic::SeqCst) {
             // Each of these states means that no further activity will happen
             // with regard to abortion selection
             s @ EMPTY |
@@ -326,7 +326,7 @@ impl<T: Send> Packet<T> {
 
             // If we've got a blocked task, then use an atomic to gain ownership
             // of it (may fail)
-            n => self.state.compare_and_swap(n, EMPTY, atomics::SeqCst)
+            n => self.state.compare_and_swap(n, EMPTY, atomic::SeqCst)
         };
 
         // Now that we've got ownership of our state, figure out what to do
@@ -367,6 +367,6 @@ impl<T: Send> Packet<T> {
 #[unsafe_destructor]
 impl<T: Send> Drop for Packet<T> {
     fn drop(&mut self) {
-        assert_eq!(self.state.load(atomics::SeqCst), DISCONNECTED);
+        assert_eq!(self.state.load(atomic::SeqCst), DISCONNECTED);
     }
 }
diff --git a/src/libsync/comm/shared.rs b/src/libsync/comm/shared.rs
index d13b2c32978..979b0ebcf8f 100644
--- a/src/libsync/comm/shared.rs
+++ b/src/libsync/comm/shared.rs
@@ -28,7 +28,7 @@ use rustrt::mutex::NativeMutex;
 use rustrt::task::{Task, BlockedTask};
 use rustrt::thread::Thread;
 
-use atomics;
+use atomic;
 use mpsc = mpsc_queue;
 
 static DISCONNECTED: int = int::MIN;
@@ -40,17 +40,17 @@ static MAX_STEALS: int = 1 << 20;
 
 pub struct Packet<T> {
     queue: mpsc::Queue<T>,
-    cnt: atomics::AtomicInt, // How many items are on this channel
+    cnt: atomic::AtomicInt, // How many items are on this channel
     steals: int, // How many times has a port received without blocking?
-    to_wake: atomics::AtomicUint, // Task to wake up
+    to_wake: atomic::AtomicUint, // Task to wake up
 
     // The number of channels which are currently using this packet.
-    channels: atomics::AtomicInt,
+    channels: atomic::AtomicInt,
 
     // See the discussion in Port::drop and the channel send methods for what
     // these are used for
-    port_dropped: atomics::AtomicBool,
-    sender_drain: atomics::AtomicInt,
+    port_dropped: atomic::AtomicBool,
+    sender_drain: atomic::AtomicInt,
 
     // this lock protects various portions of this implementation during
     // select()
@@ -68,12 +68,12 @@ impl<T: Send> Packet<T> {
     pub fn new() -> Packet<T> {
         let p = Packet {
             queue: mpsc::Queue::new(),
-            cnt: atomics::AtomicInt::new(0),
+            cnt: atomic::AtomicInt::new(0),
             steals: 0,
-            to_wake: atomics::AtomicUint::new(0),
-            channels: atomics::AtomicInt::new(2),
-            port_dropped: atomics::AtomicBool::new(false),
-            sender_drain: atomics::AtomicInt::new(0),
+            to_wake: atomic::AtomicUint::new(0),
+            channels: atomic::AtomicInt::new(2),
+            port_dropped: atomic::AtomicBool::new(false),
+            sender_drain: atomic::AtomicInt::new(0),
             select_lock: unsafe { NativeMutex::new() },
         };
         return p;
@@ -96,11 +96,11 @@ impl<T: Send> Packet<T> {
     pub fn inherit_blocker(&mut self, task: Option<BlockedTask>) {
         match task {
             Some(task) => {
-                assert_eq!(self.cnt.load(atomics::SeqCst), 0);
-                assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
+                assert_eq!(self.cnt.load(atomic::SeqCst), 0);
+                assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
                 self.to_wake.store(unsafe { task.cast_to_uint() },
-                                   atomics::SeqCst);
-                self.cnt.store(-1, atomics::SeqCst);
+                                   atomic::SeqCst);
+                self.cnt.store(-1, atomic::SeqCst);
 
                 // This store is a little sketchy. What's happening here is
                 // that we're transferring a blocker from a oneshot or stream
@@ -138,7 +138,7 @@ impl<T: Send> Packet<T> {
 
     pub fn send(&mut self, t: T) -> Result<(), T> {
         // See Port::drop for what's going on
-        if self.port_dropped.load(atomics::SeqCst) { return Err(t) }
+        if self.port_dropped.load(atomic::SeqCst) { return Err(t) }
 
         // Note that the multiple sender case is a little trickier
         // semantically than the single sender case. The logic for
@@ -165,12 +165,12 @@ impl<T: Send> Packet<T> {
         // preflight check serves as the definitive "this will never be
         // received". Once we get beyond this check, we have permanently
         // entered the realm of "this may be received"
-        if self.cnt.load(atomics::SeqCst) < DISCONNECTED + FUDGE {
+        if self.cnt.load(atomic::SeqCst) < DISCONNECTED + FUDGE {
             return Err(t)
         }
 
         self.queue.push(t);
-        match self.cnt.fetch_add(1, atomics::SeqCst) {
+        match self.cnt.fetch_add(1, atomic::SeqCst) {
             -1 => {
                 self.take_to_wake().wake().map(|t| t.reawaken());
             }
@@ -187,9 +187,9 @@ impl<T: Send> Packet<T> {
             n if n < DISCONNECTED + FUDGE => {
                 // see the comment in 'try' for a shared channel for why this
                 // window of "not disconnected" is ok.
-                self.cnt.store(DISCONNECTED, atomics::SeqCst);
+                self.cnt.store(DISCONNECTED, atomic::SeqCst);
 
-                if self.sender_drain.fetch_add(1, atomics::SeqCst) == 0 {
+                if self.sender_drain.fetch_add(1, atomic::SeqCst) == 0 {
                     loop {
                         // drain the queue, for info on the thread yield see the
                         // discussion in try_recv
@@ -202,7 +202,7 @@ impl<T: Send> Packet<T> {
                         }
                         // maybe we're done, if we're not the last ones
                         // here, then we need to go try again.
-                        if self.sender_drain.fetch_sub(1, atomics::SeqCst) == 1 {
+                        if self.sender_drain.fetch_sub(1, atomic::SeqCst) == 1 {
                             break
                         }
                     }
@@ -242,15 +242,15 @@ impl<T: Send> Packet<T> {
 
     // Essentially the exact same thing as the stream decrement function.
     fn decrement(&mut self, task: BlockedTask) -> Result<(), BlockedTask> {
-        assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
+        assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
         let n = unsafe { task.cast_to_uint() };
-        self.to_wake.store(n, atomics::SeqCst);
+        self.to_wake.store(n, atomic::SeqCst);
 
         let steals = self.steals;
         self.steals = 0;
 
-        match self.cnt.fetch_sub(1 + steals, atomics::SeqCst) {
-            DISCONNECTED => { self.cnt.store(DISCONNECTED, atomics::SeqCst); }
+        match self.cnt.fetch_sub(1 + steals, atomic::SeqCst) {
+            DISCONNECTED => { self.cnt.store(DISCONNECTED, atomic::SeqCst); }
             // If we factor in our steals and notice that the channel has no
             // data, we successfully sleep
             n => {
@@ -259,7 +259,7 @@ impl<T: Send> Packet<T> {
             }
         }
 
-        self.to_wake.store(0, atomics::SeqCst);
+        self.to_wake.store(0, atomic::SeqCst);
         Err(unsafe { BlockedTask::cast_from_uint(n) })
     }
 
@@ -311,9 +311,9 @@ impl<T: Send> Packet<T> {
             // might decrement steals.
             Some(data) => {
                 if self.steals > MAX_STEALS {
-                    match self.cnt.swap(0, atomics::SeqCst) {
+                    match self.cnt.swap(0, atomic::SeqCst) {
                         DISCONNECTED => {
-                            self.cnt.store(DISCONNECTED, atomics::SeqCst);
+                            self.cnt.store(DISCONNECTED, atomic::SeqCst);
                         }
                         n => {
                             let m = cmp::min(n, self.steals);
@@ -330,7 +330,7 @@ impl<T: Send> Packet<T> {
             // See the discussion in the stream implementation for why we try
             // again.
             None => {
-                match self.cnt.load(atomics::SeqCst) {
+                match self.cnt.load(atomic::SeqCst) {
                     n if n != DISCONNECTED => Err(Empty),
                     _ => {
                         match self.queue.pop() {
@@ -348,20 +348,20 @@ impl<T: Send> Packet<T> {
     // Prepares this shared packet for a channel clone, essentially just bumping
     // a refcount.
     pub fn clone_chan(&mut self) {
-        self.channels.fetch_add(1, atomics::SeqCst);
+        self.channels.fetch_add(1, atomic::SeqCst);
     }
 
     // Decrement the reference count on a channel. This is called whenever a
     // Chan is dropped and may end up waking up a receiver. It's the receiver's
     // responsibility on the other end to figure out that we've disconnected.
     pub fn drop_chan(&mut self) {
-        match self.channels.fetch_sub(1, atomics::SeqCst) {
+        match self.channels.fetch_sub(1, atomic::SeqCst) {
             1 => {}
             n if n > 1 => return,
             n => fail!("bad number of channels left {}", n),
         }
 
-        match self.cnt.swap(DISCONNECTED, atomics::SeqCst) {
+        match self.cnt.swap(DISCONNECTED, atomic::SeqCst) {
             -1 => { self.take_to_wake().wake().map(|t| t.reawaken()); }
             DISCONNECTED => {}
             n => { assert!(n >= 0); }
@@ -371,11 +371,11 @@ impl<T: Send> Packet<T> {
     // See the long discussion inside of stream.rs for why the queue is drained,
     // and why it is done in this fashion.
     pub fn drop_port(&mut self) {
-        self.port_dropped.store(true, atomics::SeqCst);
+        self.port_dropped.store(true, atomic::SeqCst);
         let mut steals = self.steals;
         while {
             let cnt = self.cnt.compare_and_swap(
-                            steals, DISCONNECTED, atomics::SeqCst);
+                            steals, DISCONNECTED, atomic::SeqCst);
             cnt != DISCONNECTED && cnt != steals
         } {
             // See the discussion in 'try_recv' for why we yield
@@ -391,8 +391,8 @@ impl<T: Send> Packet<T> {
 
     // Consumes ownership of the 'to_wake' field.
     fn take_to_wake(&mut self) -> BlockedTask {
-        let task = self.to_wake.load(atomics::SeqCst);
-        self.to_wake.store(0, atomics::SeqCst);
+        let task = self.to_wake.load(atomic::SeqCst);
+        self.to_wake.store(0, atomic::SeqCst);
         assert!(task != 0);
         unsafe { BlockedTask::cast_from_uint(task) }
     }
@@ -407,15 +407,15 @@ impl<T: Send> Packet<T> {
     // This is different than the stream version because there's no need to peek
     // at the queue, we can just look at the local count.
     pub fn can_recv(&mut self) -> bool {
-        let cnt = self.cnt.load(atomics::SeqCst);
+        let cnt = self.cnt.load(atomic::SeqCst);
         cnt == DISCONNECTED || cnt - self.steals > 0
     }
 
     // increment the count on the channel (used for selection)
     fn bump(&mut self, amt: int) -> int {
-        match self.cnt.fetch_add(amt, atomics::SeqCst) {
+        match self.cnt.fetch_add(amt, atomic::SeqCst) {
             DISCONNECTED => {
-                self.cnt.store(DISCONNECTED, atomics::SeqCst);
+                self.cnt.store(DISCONNECTED, atomic::SeqCst);
                 DISCONNECTED
             }
             n => n
@@ -460,13 +460,13 @@ impl<T: Send> Packet<T> {
         // the channel count and figure out what we should do to make it
         // positive.
         let steals = {
-            let cnt = self.cnt.load(atomics::SeqCst);
+            let cnt = self.cnt.load(atomic::SeqCst);
             if cnt < 0 && cnt != DISCONNECTED {-cnt} else {0}
         };
         let prev = self.bump(steals + 1);
 
         if prev == DISCONNECTED {
-            assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
+            assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
             true
         } else {
             let cur = prev + steals + 1;
@@ -474,7 +474,7 @@ impl<T: Send> Packet<T> {
             if prev < 0 {
                 self.take_to_wake().trash();
             } else {
-                while self.to_wake.load(atomics::SeqCst) != 0 {
+                while self.to_wake.load(atomic::SeqCst) != 0 {
                     Thread::yield_now();
                 }
             }
@@ -495,8 +495,8 @@ impl<T: Send> Drop for Packet<T> {
         // disconnection, but also a proper fence before the read of
         // `to_wake`, so this assert cannot be removed with also removing
         // the `to_wake` assert.
-        assert_eq!(self.cnt.load(atomics::SeqCst), DISCONNECTED);
-        assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
-        assert_eq!(self.channels.load(atomics::SeqCst), 0);
+        assert_eq!(self.cnt.load(atomic::SeqCst), DISCONNECTED);
+        assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
+        assert_eq!(self.channels.load(atomic::SeqCst), 0);
     }
 }
diff --git a/src/libsync/comm/stream.rs b/src/libsync/comm/stream.rs
index f8a28b7600f..11c563301e0 100644
--- a/src/libsync/comm/stream.rs
+++ b/src/libsync/comm/stream.rs
@@ -26,7 +26,7 @@ use rustrt::local::Local;
 use rustrt::task::{Task, BlockedTask};
 use rustrt::thread::Thread;
 
-use atomics;
+use atomic;
 use comm::Receiver;
 use spsc = spsc_queue;
 
@@ -39,11 +39,11 @@ static MAX_STEALS: int = 1 << 20;
 pub struct Packet<T> {
     queue: spsc::Queue<Message<T>>, // internal queue for all message
 
-    cnt: atomics::AtomicInt, // How many items are on this channel
+    cnt: atomic::AtomicInt, // How many items are on this channel
     steals: int, // How many times has a port received without blocking?
-    to_wake: atomics::AtomicUint, // Task to wake up
+    to_wake: atomic::AtomicUint, // Task to wake up
 
-    port_dropped: atomics::AtomicBool, // flag if the channel has been destroyed.
+    port_dropped: atomic::AtomicBool, // flag if the channel has been destroyed.
 }
 
 pub enum Failure<T> {
@@ -76,11 +76,11 @@ impl<T: Send> Packet<T> {
         Packet {
             queue: unsafe { spsc::Queue::new(128) },
 
-            cnt: atomics::AtomicInt::new(0),
+            cnt: atomic::AtomicInt::new(0),
             steals: 0,
-            to_wake: atomics::AtomicUint::new(0),
+            to_wake: atomic::AtomicUint::new(0),
 
-            port_dropped: atomics::AtomicBool::new(false),
+            port_dropped: atomic::AtomicBool::new(false),
         }
     }
 
@@ -89,7 +89,7 @@ impl<T: Send> Packet<T> {
         // If the other port has deterministically gone away, then definitely
         // must return the data back up the stack. Otherwise, the data is
         // considered as being sent.
-        if self.port_dropped.load(atomics::SeqCst) { return Err(t) }
+        if self.port_dropped.load(atomic::SeqCst) { return Err(t) }
 
         match self.do_send(Data(t)) {
             UpSuccess | UpDisconnected => {},
@@ -100,14 +100,14 @@ impl<T: Send> Packet<T> {
     pub fn upgrade(&mut self, up: Receiver<T>) -> UpgradeResult {
         // If the port has gone away, then there's no need to proceed any
         // further.
-        if self.port_dropped.load(atomics::SeqCst) { return UpDisconnected }
+        if self.port_dropped.load(atomic::SeqCst) { return UpDisconnected }
 
         self.do_send(GoUp(up))
     }
 
     fn do_send(&mut self, t: Message<T>) -> UpgradeResult {
         self.queue.push(t);
-        match self.cnt.fetch_add(1, atomics::SeqCst) {
+        match self.cnt.fetch_add(1, atomic::SeqCst) {
             // As described in the mod's doc comment, -1 == wakeup
             -1 => UpWoke(self.take_to_wake()),
             // As as described before, SPSC queues must be >= -2
@@ -121,7 +121,7 @@ impl<T: Send> Packet<T> {
             // will never remove this data. We can only have at most one item to
             // drain (the port drains the rest).
             DISCONNECTED => {
-                self.cnt.store(DISCONNECTED, atomics::SeqCst);
+                self.cnt.store(DISCONNECTED, atomic::SeqCst);
                 let first = self.queue.pop();
                 let second = self.queue.pop();
                 assert!(second.is_none());
@@ -140,8 +140,8 @@ impl<T: Send> Packet<T> {
 
     // Consumes ownership of the 'to_wake' field.
     fn take_to_wake(&mut self) -> BlockedTask {
-        let task = self.to_wake.load(atomics::SeqCst);
-        self.to_wake.store(0, atomics::SeqCst);
+        let task = self.to_wake.load(atomic::SeqCst);
+        self.to_wake.store(0, atomic::SeqCst);
         assert!(task != 0);
         unsafe { BlockedTask::cast_from_uint(task) }
     }
@@ -150,15 +150,15 @@ impl<T: Send> Packet<T> {
     // back if it shouldn't sleep. Note that this is the location where we take
     // steals into account.
     fn decrement(&mut self, task: BlockedTask) -> Result<(), BlockedTask> {
-        assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
+        assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
         let n = unsafe { task.cast_to_uint() };
-        self.to_wake.store(n, atomics::SeqCst);
+        self.to_wake.store(n, atomic::SeqCst);
 
         let steals = self.steals;
         self.steals = 0;
 
-        match self.cnt.fetch_sub(1 + steals, atomics::SeqCst) {
-            DISCONNECTED => { self.cnt.store(DISCONNECTED, atomics::SeqCst); }
+        match self.cnt.fetch_sub(1 + steals, atomic::SeqCst) {
+            DISCONNECTED => { self.cnt.store(DISCONNECTED, atomic::SeqCst); }
             // If we factor in our steals and notice that the channel has no
             // data, we successfully sleep
             n => {
@@ -167,7 +167,7 @@ impl<T: Send> Packet<T> {
             }
         }
 
-        self.to_wake.store(0, atomics::SeqCst);
+        self.to_wake.store(0, atomic::SeqCst);
         Err(unsafe { BlockedTask::cast_from_uint(n) })
     }
 
@@ -214,9 +214,9 @@ impl<T: Send> Packet<T> {
             // adding back in whatever we couldn't factor into steals.
             Some(data) => {
                 if self.steals > MAX_STEALS {
-                    match self.cnt.swap(0, atomics::SeqCst) {
+                    match self.cnt.swap(0, atomic::SeqCst) {
                         DISCONNECTED => {
-                            self.cnt.store(DISCONNECTED, atomics::SeqCst);
+                            self.cnt.store(DISCONNECTED, atomic::SeqCst);
                         }
                         n => {
                             let m = cmp::min(n, self.steals);
@@ -234,7 +234,7 @@ impl<T: Send> Packet<T> {
             }
 
             None => {
-                match self.cnt.load(atomics::SeqCst) {
+                match self.cnt.load(atomic::SeqCst) {
                     n if n != DISCONNECTED => Err(Empty),
 
                     // This is a little bit of a tricky case. We failed to pop
@@ -263,7 +263,7 @@ impl<T: Send> Packet<T> {
     pub fn drop_chan(&mut self) {
         // Dropping a channel is pretty simple, we just flag it as disconnected
         // and then wakeup a blocker if there is one.
-        match self.cnt.swap(DISCONNECTED, atomics::SeqCst) {
+        match self.cnt.swap(DISCONNECTED, atomic::SeqCst) {
             -1 => { self.take_to_wake().wake().map(|t| t.reawaken()); }
             DISCONNECTED => {}
             n => { assert!(n >= 0); }
@@ -290,7 +290,7 @@ impl<T: Send> Packet<T> {
         // sends are gated on this flag, so we're immediately guaranteed that
         // there are a bounded number of active sends that we'll have to deal
         // with.
-        self.port_dropped.store(true, atomics::SeqCst);
+        self.port_dropped.store(true, atomic::SeqCst);
 
         // Now that we're guaranteed to deal with a bounded number of senders,
         // we need to drain the queue. This draining process happens atomically
@@ -303,7 +303,7 @@ impl<T: Send> Packet<T> {
         let mut steals = self.steals;
         while {
             let cnt = self.cnt.compare_and_swap(
-                            steals, DISCONNECTED, atomics::SeqCst);
+                            steals, DISCONNECTED, atomic::SeqCst);
             cnt != DISCONNECTED && cnt != steals
         } {
             loop {
@@ -348,9 +348,9 @@ impl<T: Send> Packet<T> {
 
     // increment the count on the channel (used for selection)
     fn bump(&mut self, amt: int) -> int {
-        match self.cnt.fetch_add(amt, atomics::SeqCst) {
+        match self.cnt.fetch_add(amt, atomic::SeqCst) {
             DISCONNECTED => {
-                self.cnt.store(DISCONNECTED, atomics::SeqCst);
+                self.cnt.store(DISCONNECTED, atomic::SeqCst);
                 DISCONNECTED
             }
             n => n
@@ -400,7 +400,7 @@ impl<T: Send> Packet<T> {
         // of time until the data is actually sent.
         if was_upgrade {
             assert_eq!(self.steals, 0);
-            assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
+            assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
             return Ok(true)
         }
 
@@ -413,7 +413,7 @@ impl<T: Send> Packet<T> {
         // If we were previously disconnected, then we know for sure that there
         // is no task in to_wake, so just keep going
         let has_data = if prev == DISCONNECTED {
-            assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
+            assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
             true // there is data, that data is that we're disconnected
         } else {
             let cur = prev + steals + 1;
@@ -436,7 +436,7 @@ impl<T: Send> Packet<T> {
             if prev < 0 {
                 self.take_to_wake().trash();
             } else {
-                while self.to_wake.load(atomics::SeqCst) != 0 {
+                while self.to_wake.load(atomic::SeqCst) != 0 {
                     Thread::yield_now();
                 }
             }
@@ -475,7 +475,7 @@ impl<T: Send> Drop for Packet<T> {
         // disconnection, but also a proper fence before the read of
         // `to_wake`, so this assert cannot be removed with also removing
         // the `to_wake` assert.
-        assert_eq!(self.cnt.load(atomics::SeqCst), DISCONNECTED);
-        assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
+        assert_eq!(self.cnt.load(atomic::SeqCst), DISCONNECTED);
+        assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
     }
 }
diff --git a/src/libsync/comm/sync.rs b/src/libsync/comm/sync.rs
index e872952d9ee..aef02f654c1 100644
--- a/src/libsync/comm/sync.rs
+++ b/src/libsync/comm/sync.rs
@@ -44,12 +44,12 @@ use rustrt::local::Local;
 use rustrt::mutex::{NativeMutex, LockGuard};
 use rustrt::task::{Task, BlockedTask};
 
-use atomics;
+use atomic;
 
 pub struct Packet<T> {
     /// Only field outside of the mutex. Just done for kicks, but mainly because
     /// the other shared channel already had the code implemented
-    channels: atomics::AtomicUint,
+    channels: atomic::AtomicUint,
 
     /// The state field is protected by this mutex
     lock: NativeMutex,
@@ -131,7 +131,7 @@ fn wakeup(task: BlockedTask, guard: LockGuard) {
 impl<T: Send> Packet<T> {
     pub fn new(cap: uint) -> Packet<T> {
         Packet {
-            channels: atomics::AtomicUint::new(1),
+            channels: atomic::AtomicUint::new(1),
             lock: unsafe { NativeMutex::new() },
             state: UnsafeCell::new(State {
                 disconnected: false,
@@ -303,12 +303,12 @@ impl<T: Send> Packet<T> {
     // Prepares this shared packet for a channel clone, essentially just bumping
     // a refcount.
     pub fn clone_chan(&self) {
-        self.channels.fetch_add(1, atomics::SeqCst);
+        self.channels.fetch_add(1, atomic::SeqCst);
     }
 
     pub fn drop_chan(&self) {
         // Only flag the channel as disconnected if we're the last channel
-        match self.channels.fetch_sub(1, atomics::SeqCst) {
+        match self.channels.fetch_sub(1, atomic::SeqCst) {
             1 => {}
             _ => return
         }
@@ -411,7 +411,7 @@ impl<T: Send> Packet<T> {
 #[unsafe_destructor]
 impl<T: Send> Drop for Packet<T> {
     fn drop(&mut self) {
-        assert_eq!(self.channels.load(atomics::SeqCst), 0);
+        assert_eq!(self.channels.load(atomic::SeqCst), 0);
         let (_g, state) = self.lock();
         assert!(state.queue.dequeue().is_none());
         assert!(state.canceled.is_none());
diff --git a/src/libsync/deque.rs b/src/libsync/deque.rs
index c541cc02774..d5a05e7a681 100644
--- a/src/libsync/deque.rs
+++ b/src/libsync/deque.rs
@@ -61,7 +61,7 @@ use core::mem::{forget, min_align_of, size_of, transmute};
 use core::ptr;
 use rustrt::exclusive::Exclusive;
 
-use atomics::{AtomicInt, AtomicPtr, SeqCst};
+use atomic::{AtomicInt, AtomicPtr, SeqCst};
 
 // Once the queue is less than 1/K full, then it will be downsized. Note that
 // the deque requires that this number be less than 2.
@@ -414,7 +414,7 @@ mod tests {
     use std::rt::thread::Thread;
     use std::rand;
     use std::rand::Rng;
-    use atomics::{AtomicBool, INIT_ATOMIC_BOOL, SeqCst,
+    use atomic::{AtomicBool, INIT_ATOMIC_BOOL, SeqCst,
                   AtomicUint, INIT_ATOMIC_UINT};
     use std::vec;
 
diff --git a/src/libsync/lib.rs b/src/libsync/lib.rs
index f6a1684b669..de98f79093e 100644
--- a/src/libsync/lib.rs
+++ b/src/libsync/lib.rs
@@ -49,7 +49,7 @@ pub use raw::{Semaphore, SemaphoreGuard};
 
 // Core building blocks for all primitives in this crate
 
-pub mod atomics;
+pub mod atomic;
 
 // Concurrent data structures
 
diff --git a/src/libsync/mpmc_bounded_queue.rs b/src/libsync/mpmc_bounded_queue.rs
index d54186dc221..949ef3bc34c 100644
--- a/src/libsync/mpmc_bounded_queue.rs
+++ b/src/libsync/mpmc_bounded_queue.rs
@@ -37,7 +37,7 @@ use collections::Vec;
 use core::num::next_power_of_two;
 use core::cell::UnsafeCell;
 
-use atomics::{AtomicUint,Relaxed,Release,Acquire};
+use atomic::{AtomicUint,Relaxed,Release,Acquire};
 
 struct Node<T> {
     sequence: AtomicUint,
diff --git a/src/libsync/mpsc_intrusive.rs b/src/libsync/mpsc_intrusive.rs
index 11f124293b1..1f7841de7c1 100644
--- a/src/libsync/mpsc_intrusive.rs
+++ b/src/libsync/mpsc_intrusive.rs
@@ -37,7 +37,7 @@
 
 use core::prelude::*;
 
-use core::atomics;
+use core::atomic;
 use core::mem;
 use core::cell::UnsafeCell;
 
@@ -45,16 +45,16 @@ use core::cell::UnsafeCell;
 // initialization.
 
 pub struct Node<T> {
-    pub next: atomics::AtomicUint,
+    pub next: atomic::AtomicUint,
     pub data: T,
 }
 
 pub struct DummyNode {
-    pub next: atomics::AtomicUint,
+    pub next: atomic::AtomicUint,
 }
 
 pub struct Queue<T> {
-    pub head: atomics::AtomicUint,
+    pub head: atomic::AtomicUint,
     pub tail: UnsafeCell<*mut Node<T>>,
     pub stub: DummyNode,
 }
@@ -62,26 +62,26 @@ pub struct Queue<T> {
 impl<T: Send> Queue<T> {
     pub fn new() -> Queue<T> {
         Queue {
-            head: atomics::AtomicUint::new(0),
+            head: atomic::AtomicUint::new(0),
             tail: UnsafeCell::new(0 as *mut Node<T>),
             stub: DummyNode {
-                next: atomics::AtomicUint::new(0),
+                next: atomic::AtomicUint::new(0),
             },
         }
     }
 
     pub unsafe fn push(&self, node: *mut Node<T>) {
-        (*node).next.store(0, atomics::Release);
-        let prev = self.head.swap(node as uint, atomics::AcqRel);
+        (*node).next.store(0, atomic::Release);
+        let prev = self.head.swap(node as uint, atomic::AcqRel);
 
         // Note that this code is slightly modified to allow static
         // initialization of these queues with rust's flavor of static
         // initialization.
         if prev == 0 {
-            self.stub.next.store(node as uint, atomics::Release);
+            self.stub.next.store(node as uint, atomic::Release);
         } else {
             let prev = prev as *mut Node<T>;
-            (*prev).next.store(node as uint, atomics::Release);
+            (*prev).next.store(node as uint, atomic::Release);
         }
     }
 
@@ -103,26 +103,26 @@ impl<T: Send> Queue<T> {
         let mut tail = if !tail.is_null() {tail} else {
             mem::transmute(&self.stub)
         };
-        let mut next = (*tail).next(atomics::Relaxed);
+        let mut next = (*tail).next(atomic::Relaxed);
         if tail as uint == &self.stub as *const DummyNode as uint {
             if next.is_null() {
                 return None;
             }
             *self.tail.get() = next;
             tail = next;
-            next = (*next).next(atomics::Relaxed);
+            next = (*next).next(atomic::Relaxed);
         }
         if !next.is_null() {
             *self.tail.get() = next;
             return Some(tail);
         }
-        let head = self.head.load(atomics::Acquire) as *mut Node<T>;
+        let head = self.head.load(atomic::Acquire) as *mut Node<T>;
         if tail != head {
             return None;
         }
         let stub = mem::transmute(&self.stub);
         self.push(stub);
-        next = (*tail).next(atomics::Relaxed);
+        next = (*tail).next(atomic::Relaxed);
         if !next.is_null() {
             *self.tail.get() = next;
             return Some(tail);
@@ -135,10 +135,10 @@ impl<T: Send> Node<T> {
     pub fn new(t: T) -> Node<T> {
         Node {
             data: t,
-            next: atomics::AtomicUint::new(0),
+            next: atomic::AtomicUint::new(0),
         }
     }
-    pub unsafe fn next(&self, ord: atomics::Ordering) -> *mut Node<T> {
+    pub unsafe fn next(&self, ord: atomic::Ordering) -> *mut Node<T> {
         mem::transmute::<uint, *mut Node<T>>(self.next.load(ord))
     }
 }
diff --git a/src/libsync/mpsc_queue.rs b/src/libsync/mpsc_queue.rs
index 4f5dd07a6e5..012574808f3 100644
--- a/src/libsync/mpsc_queue.rs
+++ b/src/libsync/mpsc_queue.rs
@@ -46,7 +46,7 @@ use alloc::boxed::Box;
 use core::mem;
 use core::cell::UnsafeCell;
 
-use atomics::{AtomicPtr, Release, Acquire, AcqRel, Relaxed};
+use atomic::{AtomicPtr, Release, Acquire, AcqRel, Relaxed};
 
 /// A result of the `pop` function.
 pub enum PopResult<T> {
diff --git a/src/libsync/mutex.rs b/src/libsync/mutex.rs
index 1aa84e8f8d1..61d895dd406 100644
--- a/src/libsync/mutex.rs
+++ b/src/libsync/mutex.rs
@@ -60,7 +60,7 @@
 use core::prelude::*;
 
 use alloc::boxed::Box;
-use core::atomics;
+use core::atomic;
 use core::mem;
 use core::cell::UnsafeCell;
 use rustrt::local::Local;
@@ -137,7 +137,7 @@ enum Flavor {
 /// ```
 pub struct StaticMutex {
     /// Current set of flags on this mutex
-    state: atomics::AtomicUint,
+    state: atomic::AtomicUint,
     /// an OS mutex used by native threads
     lock: mutex::StaticNativeMutex,
 
@@ -151,7 +151,7 @@ pub struct StaticMutex {
     /// A concurrent mpsc queue used by green threads, along with a count used
     /// to figure out when to dequeue and enqueue.
     q: q::Queue<uint>,
-    green_cnt: atomics::AtomicUint,
+    green_cnt: atomic::AtomicUint,
 }
 
 /// An RAII implementation of a "scoped lock" of a mutex. When this structure is
@@ -165,16 +165,16 @@ pub struct Guard<'a> {
 /// other mutex constants.
 pub static MUTEX_INIT: StaticMutex = StaticMutex {
     lock: mutex::NATIVE_MUTEX_INIT,
-    state: atomics::INIT_ATOMIC_UINT,
+    state: atomic::INIT_ATOMIC_UINT,
     flavor: UnsafeCell { value: Unlocked },
     green_blocker: UnsafeCell { value: 0 },
     native_blocker: UnsafeCell { value: 0 },
-    green_cnt: atomics::INIT_ATOMIC_UINT,
+    green_cnt: atomic::INIT_ATOMIC_UINT,
     q: q::Queue {
-        head: atomics::INIT_ATOMIC_UINT,
+        head: atomic::INIT_ATOMIC_UINT,
         tail: UnsafeCell { value: 0 as *mut q::Node<uint> },
         stub: q::DummyNode {
-            next: atomics::INIT_ATOMIC_UINT,
+            next: atomic::INIT_ATOMIC_UINT,
         }
     }
 };
@@ -185,7 +185,7 @@ impl StaticMutex {
         // Attempt to steal the mutex from an unlocked state.
         //
         // FIXME: this can mess up the fairness of the mutex, seems bad
-        match self.state.compare_and_swap(0, LOCKED, atomics::SeqCst) {
+        match self.state.compare_and_swap(0, LOCKED, atomic::SeqCst) {
             0 => {
                 // After acquiring the mutex, we can safely access the inner
                 // fields.
@@ -230,7 +230,7 @@ impl StaticMutex {
         // allow threads coming out of the native_lock function to try their
         // best to not hit a cvar in deschedule.
         let mut old = match self.state.compare_and_swap(0, LOCKED,
-                                                        atomics::SeqCst) {
+                                                        atomic::SeqCst) {
             0 => {
                 let flavor = if can_block {
                     NativeAcquisition
@@ -272,7 +272,7 @@ impl StaticMutex {
                 if old & LOCKED != 0 {
                     old = match self.state.compare_and_swap(old,
                                                             old | native_bit,
-                                                            atomics::SeqCst) {
+                                                            atomic::SeqCst) {
                         n if n == old => return Ok(()),
                         n => n
                     };
@@ -280,7 +280,7 @@ impl StaticMutex {
                     assert_eq!(old, 0);
                     old = match self.state.compare_and_swap(old,
                                                             old | LOCKED,
-                                                            atomics::SeqCst) {
+                                                            atomic::SeqCst) {
                         n if n == old => {
                             // After acquiring the lock, we have access to the
                             // flavor field, and we've regained access to our
@@ -330,7 +330,7 @@ impl StaticMutex {
         //
         // FIXME: There isn't a cancellation currently of an enqueue, forcing
         //        the unlocker to spin for a bit.
-        if self.green_cnt.fetch_add(1, atomics::SeqCst) == 0 {
+        if self.green_cnt.fetch_add(1, atomic::SeqCst) == 0 {
             Local::put(t);
             return
         }
@@ -348,7 +348,7 @@ impl StaticMutex {
     fn green_unlock(&self) {
         // If we're the only green thread, then no need to check the queue,
         // otherwise the fixme above forces us to spin for a bit.
-        if self.green_cnt.fetch_sub(1, atomics::SeqCst) == 1 { return }
+        if self.green_cnt.fetch_sub(1, atomic::SeqCst) == 1 { return }
         let node;
         loop {
             match unsafe { self.q.pop() } {
@@ -380,7 +380,7 @@ impl StaticMutex {
         // of the outer mutex.
         let flavor = unsafe { mem::replace(&mut *self.flavor.get(), Unlocked) };
 
-        let mut state = self.state.load(atomics::SeqCst);
+        let mut state = self.state.load(atomic::SeqCst);
         let mut unlocked = false;
         let task;
         loop {
@@ -412,7 +412,7 @@ impl StaticMutex {
                     }
                     unlocked = true;
                 }
-                match self.state.compare_and_swap(LOCKED, 0, atomics::SeqCst) {
+                match self.state.compare_and_swap(LOCKED, 0, atomic::SeqCst) {
                     LOCKED => return,
                     n => { state = n; }
                 }
@@ -435,7 +435,7 @@ impl StaticMutex {
         loop {
             assert!(state & bit != 0);
             let new = state ^ bit;
-            match self.state.compare_and_swap(state, new, atomics::SeqCst) {
+            match self.state.compare_and_swap(state, new, atomic::SeqCst) {
                 n if n == state => break,
                 n => { state = n; }
             }
@@ -462,11 +462,11 @@ impl Mutex {
     pub fn new() -> Mutex {
         Mutex {
             lock: box StaticMutex {
-                state: atomics::AtomicUint::new(0),
+                state: atomic::AtomicUint::new(0),
                 flavor: UnsafeCell::new(Unlocked),
                 green_blocker: UnsafeCell::new(0),
                 native_blocker: UnsafeCell::new(0),
-                green_cnt: atomics::AtomicUint::new(0),
+                green_cnt: atomic::AtomicUint::new(0),
                 q: q::Queue::new(),
                 lock: unsafe { mutex::StaticNativeMutex::new() },
             }
@@ -498,7 +498,7 @@ impl<'a> Guard<'a> {
         if cfg!(debug) {
             // once we've acquired a lock, it's ok to access the flavor
             assert!(unsafe { *lock.flavor.get() != Unlocked });
-            assert!(lock.state.load(atomics::SeqCst) & LOCKED != 0);
+            assert!(lock.state.load(atomic::SeqCst) & LOCKED != 0);
         }
         Guard { lock: lock }
     }
diff --git a/src/libsync/one.rs b/src/libsync/one.rs
index 6fad2c8aa40..4594345d2a3 100644
--- a/src/libsync/one.rs
+++ b/src/libsync/one.rs
@@ -16,7 +16,7 @@
 use core::prelude::*;
 
 use core::int;
-use core::atomics;
+use core::atomic;
 
 use mutex::{StaticMutex, MUTEX_INIT};
 
@@ -40,15 +40,15 @@ use mutex::{StaticMutex, MUTEX_INIT};
 /// ```
 pub struct Once {
     mutex: StaticMutex,
-    cnt: atomics::AtomicInt,
-    lock_cnt: atomics::AtomicInt,
+    cnt: atomic::AtomicInt,
+    lock_cnt: atomic::AtomicInt,
 }
 
 /// Initialization value for static `Once` values.
 pub static ONCE_INIT: Once = Once {
     mutex: MUTEX_INIT,
-    cnt: atomics::INIT_ATOMIC_INT,
-    lock_cnt: atomics::INIT_ATOMIC_INT,
+    cnt: atomic::INIT_ATOMIC_INT,
+    lock_cnt: atomic::INIT_ATOMIC_INT,
 };
 
 impl Once {
@@ -63,7 +63,7 @@ impl Once {
     /// has run and completed (it may not be the closure specified).
     pub fn doit(&self, f: ||) {
         // Optimize common path: load is much cheaper than fetch_add.
-        if self.cnt.load(atomics::SeqCst) < 0 {
+        if self.cnt.load(atomic::SeqCst) < 0 {
             return
         }
 
@@ -94,11 +94,11 @@ impl Once {
         // calling `doit` will return immediately before the initialization has
         // completed.
 
-        let prev = self.cnt.fetch_add(1, atomics::SeqCst);
+        let prev = self.cnt.fetch_add(1, atomic::SeqCst);
         if prev < 0 {
             // Make sure we never overflow, we'll never have int::MIN
             // simultaneous calls to `doit` to make this value go back to 0
-            self.cnt.store(int::MIN, atomics::SeqCst);
+            self.cnt.store(int::MIN, atomic::SeqCst);
             return
         }
 
@@ -106,15 +106,15 @@ impl Once {
         // otherwise we run the job and record how many people will try to grab
         // this lock
         let guard = self.mutex.lock();
-        if self.cnt.load(atomics::SeqCst) > 0 {
+        if self.cnt.load(atomic::SeqCst) > 0 {
             f();
-            let prev = self.cnt.swap(int::MIN, atomics::SeqCst);
-            self.lock_cnt.store(prev, atomics::SeqCst);
+            let prev = self.cnt.swap(int::MIN, atomic::SeqCst);
+            self.lock_cnt.store(prev, atomic::SeqCst);
         }
         drop(guard);
 
         // Last one out cleans up after everyone else, no leaks!
-        if self.lock_cnt.fetch_add(-1, atomics::SeqCst) == 1 {
+        if self.lock_cnt.fetch_add(-1, atomic::SeqCst) == 1 {
             unsafe { self.mutex.destroy() }
         }
     }
diff --git a/src/libsync/raw.rs b/src/libsync/raw.rs
index e7a2d3e0639..49f60fe6f00 100644
--- a/src/libsync/raw.rs
+++ b/src/libsync/raw.rs
@@ -17,7 +17,7 @@
 
 use core::prelude::*;
 
-use core::atomics;
+use core::atomic;
 use core::finally::Finally;
 use core::kinds::marker;
 use core::mem;
@@ -458,7 +458,7 @@ pub struct RWLock {
     //
     // FIXME(#6598): The atomics module has no relaxed ordering flag, so I use
     // acquire/release orderings superfluously. Change these someday.
-    read_count: atomics::AtomicUint,
+    read_count: atomic::AtomicUint,
 }
 
 /// An RAII helper which is created by acquiring a read lock on an RWLock. When
@@ -490,7 +490,7 @@ impl RWLock {
         RWLock {
             order_lock: Semaphore::new(1),
             access_lock: Sem::new_and_signal(1, num_condvars),
-            read_count: atomics::AtomicUint::new(0),
+            read_count: atomic::AtomicUint::new(0),
         }
     }
 
@@ -499,7 +499,7 @@ impl RWLock {
     /// this one.
     pub fn read<'a>(&'a self) -> RWLockReadGuard<'a> {
         let _guard = self.order_lock.access();
-        let old_count = self.read_count.fetch_add(1, atomics::Acquire);
+        let old_count = self.read_count.fetch_add(1, atomic::Acquire);
         if old_count == 0 {
             self.access_lock.acquire();
         }
@@ -575,7 +575,7 @@ impl<'a> RWLockWriteGuard<'a> {
         // things from now on
         unsafe { mem::forget(self) }
 
-        let old_count = lock.read_count.fetch_add(1, atomics::Release);
+        let old_count = lock.read_count.fetch_add(1, atomic::Release);
         // If another reader was already blocking, we need to hand-off
         // the "reader cloud" access lock to them.
         if old_count != 0 {
@@ -600,7 +600,7 @@ impl<'a> Drop for RWLockWriteGuard<'a> {
 #[unsafe_destructor]
 impl<'a> Drop for RWLockReadGuard<'a> {
     fn drop(&mut self) {
-        let old_count = self.lock.read_count.fetch_sub(1, atomics::Release);
+        let old_count = self.lock.read_count.fetch_sub(1, atomic::Release);
         assert!(old_count > 0);
         if old_count == 1 {
             // Note: this release used to be outside of a locked access
diff --git a/src/libsync/spsc_queue.rs b/src/libsync/spsc_queue.rs
index d8cd44f9935..578e518cb8f 100644
--- a/src/libsync/spsc_queue.rs
+++ b/src/libsync/spsc_queue.rs
@@ -42,7 +42,7 @@ use core::mem;
 use core::cell::UnsafeCell;
 use alloc::arc::Arc;
 
-use atomics::{AtomicPtr, Relaxed, AtomicUint, Acquire, Release};
+use atomic::{AtomicPtr, Relaxed, AtomicUint, Acquire, Release};
 
 // Node within the linked list queue of messages to send
 struct Node<T> {
diff --git a/src/test/compile-fail/std-uncopyable-atomics.rs b/src/test/compile-fail/std-uncopyable-atomics.rs
index cd853a2cf4d..fa2cf6bda33 100644
--- a/src/test/compile-fail/std-uncopyable-atomics.rs
+++ b/src/test/compile-fail/std-uncopyable-atomics.rs
@@ -12,7 +12,7 @@
 
 #![feature(globs)]
 
-use std::sync::atomics::*;
+use std::sync::atomic::*;
 use std::ptr;
 
 fn main() {