about summary refs log tree commit diff
path: root/src/libstd
diff options
context:
space:
mode:
authorFlavio Percoco <flaper87@gmail.com>2014-03-23 11:37:31 +0100
committerFlavio Percoco <flaper87@gmail.com>2014-03-23 11:37:31 +0100
commit576e36e674d645cd4682cf2df43fb25c3d1a71d4 (patch)
tree3d53cbf1db37999af25297e739d495a718721faa /src/libstd
parent2ddb60565423bdc225ccc8dd4ebfb653c5650ba2 (diff)
downloadrust-576e36e674d645cd4682cf2df43fb25c3d1a71d4.tar.gz
rust-576e36e674d645cd4682cf2df43fb25c3d1a71d4.zip
Register new snapshots
Diffstat (limited to 'src/libstd')
-rw-r--r--src/libstd/intrinsics.rs83
-rw-r--r--src/libstd/kinds.rs10
-rw-r--r--src/libstd/lib.rs4
-rw-r--r--src/libstd/sync/atomics_stage0.rs930
-rw-r--r--src/libstd/sync/mod.rs4
-rw-r--r--src/libstd/ty.rs11
6 files changed, 0 insertions, 1042 deletions
diff --git a/src/libstd/intrinsics.rs b/src/libstd/intrinsics.rs
index 95607bd8f79..78e3df4b6f8 100644
--- a/src/libstd/intrinsics.rs
+++ b/src/libstd/intrinsics.rs
@@ -164,90 +164,7 @@ pub trait TyVisitor {
     fn visit_self(&mut self) -> bool;
 }
 
-#[cfg(stage0)]
-extern "rust-intrinsic" {
-    pub fn atomic_cxchg<T>(dst: &mut T, old: T, src: T) -> T;
-    pub fn atomic_cxchg_acq<T>(dst: &mut T, old: T, src: T) -> T;
-    pub fn atomic_cxchg_rel<T>(dst: &mut T, old: T, src: T) -> T;
-    pub fn atomic_cxchg_acqrel<T>(dst: &mut T, old: T, src: T) -> T;
-    pub fn atomic_cxchg_relaxed<T>(dst: &mut T, old: T, src: T) -> T;
-
-    pub fn atomic_load<T>(src: &T) -> T;
-    pub fn atomic_load_acq<T>(src: &T) -> T;
-    pub fn atomic_load_relaxed<T>(src: &T) -> T;
-
-    pub fn atomic_store<T>(dst: &mut T, val: T);
-    pub fn atomic_store_rel<T>(dst: &mut T, val: T);
-    pub fn atomic_store_relaxed<T>(dst: &mut T, val: T);
-
-    pub fn atomic_xchg<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_xchg_acq<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_xchg_rel<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_xchg_acqrel<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_xchg_relaxed<T>(dst: &mut T, src: T) -> T;
-
-    pub fn atomic_xadd<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_xadd_acq<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_xadd_rel<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_xadd_acqrel<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_xadd_relaxed<T>(dst: &mut T, src: T) -> T;
-
-    pub fn atomic_xsub<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_xsub_acq<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_xsub_rel<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_xsub_acqrel<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_xsub_relaxed<T>(dst: &mut T, src: T) -> T;
-
-    pub fn atomic_and<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_and_acq<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_and_rel<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_and_acqrel<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_and_relaxed<T>(dst: &mut T, src: T) -> T;
-
-    pub fn atomic_nand<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_nand_acq<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_nand_rel<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_nand_acqrel<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_nand_relaxed<T>(dst: &mut T, src: T) -> T;
-
-    pub fn atomic_or<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_or_acq<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_or_rel<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_or_acqrel<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_or_relaxed<T>(dst: &mut T, src: T) -> T;
-
-    pub fn atomic_xor<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_xor_acq<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_xor_rel<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_xor_acqrel<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_xor_relaxed<T>(dst: &mut T, src: T) -> T;
-
-    pub fn atomic_max<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_max_acq<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_max_rel<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_max_acqrel<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_max_relaxed<T>(dst: &mut T, src: T) -> T;
-
-    pub fn atomic_min<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_min_acq<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_min_rel<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_min_acqrel<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_min_relaxed<T>(dst: &mut T, src: T) -> T;
-
-    pub fn atomic_umin<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_umin_acq<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_umin_rel<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_umin_acqrel<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_umin_relaxed<T>(dst: &mut T, src: T) -> T;
-
-    pub fn atomic_umax<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_umax_acq<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_umax_rel<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_umax_acqrel<T>(dst: &mut T, src: T) -> T;
-    pub fn atomic_umax_relaxed<T>(dst: &mut T, src: T) -> T;
-}
 
-#[cfg(not(stage0))]
 extern "rust-intrinsic" {
 
     // NB: These intrinsics take unsafe pointers because they mutate aliased
diff --git a/src/libstd/kinds.rs b/src/libstd/kinds.rs
index c0a442a6141..2ce41a33c29 100644
--- a/src/libstd/kinds.rs
+++ b/src/libstd/kinds.rs
@@ -41,16 +41,6 @@ pub trait Pod {
 }
 
 /// Types that can be safely shared between threads, hence thread-safe.
-#[cfg(stage0)]
-pub trait Share {
-    // Empty
-}
-
-#[cfg(stage0)]
-impl<T> Share for T {}
-
-/// Types that can be safely shared between threads, hence thread-safe.
-#[cfg(not(stage0))]
 #[lang="share"]
 pub trait Share {
     // Empty
diff --git a/src/libstd/lib.rs b/src/libstd/lib.rs
index 8a890d0293c..021a932fc8b 100644
--- a/src/libstd/lib.rs
+++ b/src/libstd/lib.rs
@@ -58,7 +58,6 @@
 #[no_std];
 
 #[deny(missing_doc)];
-#[allow(deprecated_owned_vector)]; // NOTE: remove after stage0
 
 // When testing libstd, bring in libuv as the I/O backend so tests can print
 // things and all of the std::io tests have an I/O interface to run on top
@@ -78,9 +77,6 @@
 #[cfg(test)] pub use cmp = realstd::cmp;
 #[cfg(test)] pub use ty = realstd::ty;
 
-#[cfg(stage0)]
-pub use vec_ng = vec;
-
 // Run tests with libgreen instead of libnative.
 //
 // FIXME: This egregiously hacks around starting the test runner in a different
diff --git a/src/libstd/sync/atomics_stage0.rs b/src/libstd/sync/atomics_stage0.rs
deleted file mode 100644
index b501972532d..00000000000
--- a/src/libstd/sync/atomics_stage0.rs
+++ /dev/null
@@ -1,930 +0,0 @@
-// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Atomic types
-//!
-//! Atomic types provide primitive shared-memory communication between
-//! threads, and are the building blocks of other concurrent
-//! types.
-//!
-//! This module defines atomic versions of a select number of primitive
-//! types, including `AtomicBool`, `AtomicInt`, `AtomicUint`, and `AtomicOption`.
-//! Atomic types present operations that, when used correctly, synchronize
-//! updates between threads.
-//!
-//! Each method takes an `Ordering` which represents the strength of
-//! the memory barrier for that operation. These orderings are the
-//! same as [C++11 atomic orderings][1].
-//!
-//! [1]: http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync
-//!
-//! Atomic variables are safe to share between threads (they implement `Share`)
-//! but they do not themselves provide the mechanism for sharing. The most
-//! common way to share an atomic variable is to put it into an `Arc` (an
-//! atomically-reference-counted shared pointer).
-//!
-//! Most atomic types may be stored in static variables, initialized using
-//! the provided static initializers like `INIT_ATOMIC_BOOL`. Atomic statics
-//! are often used for lazy global initialization.
-//!
-//!
-//! # Examples
-//!
-//! A simple spinlock:
-//!
-//! ```ignore
-//! # // FIXME: Needs PR #12430
-//! extern crate sync;
-//!
-//! use sync::Arc;
-//! use std::sync::atomics::{AtomicUint, SeqCst};
-//! use std::task::deschedule;
-//!
-//! fn main() {
-//!     let spinlock = Arc::new(AtomicUint::new(1));
-//!
-//!     let spinlock_clone = spinlock.clone();
-//!     spawn(proc() {
-//!         spinlock_clone.store(0, SeqCst);
-//!     });
-//!
-//!     // Wait for the other task to release the lock
-//!     while spinlock.load(SeqCst) != 0 {
-//!         // Since tasks may not be preemptive (if they are green threads)
-//!         // yield to the scheduler to let the other task run. Low level
-//!         // concurrent code needs to take into account Rust's two threading
-//!         // models.
-//!         deschedule();
-//!     }
-//! }
-//! ```
-//!
-//! Transferring a heap object with `AtomicOption`:
-//!
-//! ```ignore
-//! # // FIXME: Needs PR #12430
-//! extern crate sync;
-//!
-//! use sync::Arc;
-//! use std::sync::atomics::{AtomicOption, SeqCst};
-//!
-//! fn main() {
-//!     struct BigObject;
-//!
-//!     let shared_big_object = Arc::new(AtomicOption::empty());
-//!
-//!     let shared_big_object_clone = shared_big_object.clone();
-//!     spawn(proc() {
-//!         let unwrapped_big_object = shared_big_object_clone.take(SeqCst);
-//!         if unwrapped_big_object.is_some() {
-//!             println!("got a big object from another task");
-//!         } else {
-//!             println!("other task hasn't sent big object yet");
-//!         }
-//!     });
-//!
-//!     shared_big_object.swap(~BigObject, SeqCst);
-//! }
-//! ```
-//!
-//! Keep a global count of live tasks:
-//!
-//! ```
-//! use std::sync::atomics::{AtomicUint, SeqCst, INIT_ATOMIC_UINT};
-//!
-//! static mut GLOBAL_TASK_COUNT: AtomicUint = INIT_ATOMIC_UINT;
-//!
-//! unsafe {
-//!     let old_task_count = GLOBAL_TASK_COUNT.fetch_add(1, SeqCst);
-//!     println!("live tasks: {}", old_task_count + 1);
-//! }
-//! ```
-
-#[allow(missing_doc)];
-
-use intrinsics;
-use cast;
-use std::kinds::marker;
-use option::{Option,Some,None};
-use ops::Drop;
-use ty::Unsafe;
-
-/// An atomic boolean type.
-pub struct AtomicBool {
-    priv v: Unsafe<uint>,
-    priv nopod: marker::NoPod
-}
-
-/// A signed atomic integer type, supporting basic atomic arithmetic operations
-pub struct AtomicInt {
-    priv v: Unsafe<int>,
-    priv nopod: marker::NoPod
-}
-
-/// An unsigned atomic integer type, supporting basic atomic arithmetic operations
-pub struct AtomicUint {
-    priv v: Unsafe<uint>,
-    priv nopod: marker::NoPod
-}
-
-/// An unsigned atomic integer type that is forced to be 64-bits. This does not
-/// support all operations.
-pub struct AtomicU64 {
-    priv v: Unsafe<u64>,
-    priv nopod: marker::NoPod
-}
-
-/// An unsafe atomic pointer. Only supports basic atomic operations
-pub struct AtomicPtr<T> {
-    priv p: Unsafe<uint>,
-    priv nopod: marker::NoPod
-}
-
-/// An atomic, nullable unique pointer
-///
-/// This can be used as the concurrency primitive for operations that transfer
-/// owned heap objects across tasks.
-#[unsafe_no_drop_flag]
-pub struct AtomicOption<T> {
-    priv p: Unsafe<uint>,
-}
-
-/// Atomic memory orderings
-///
-/// Memory orderings limit the ways that both the compiler and CPU may reorder
-/// instructions around atomic operations. At its most restrictive,
-/// "sequentially consistent" atomics allow neither reads nor writes
-/// to be moved either before or after the atomic operation; on the other end
-/// "relaxed" atomics allow all reorderings.
-///
-/// Rust's memory orderings are the same as in C++[1].
-///
-/// [1]: http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync
-pub enum Ordering {
-    /// No ordering constraints, only atomic operations
-    Relaxed,
-    /// When coupled with a store, all previous writes become visible
-    /// to another thread that performs a load with `Acquire` ordering
-    /// on the same value
-    Release,
-    /// When coupled with a load, all subsequent loads will see data
-    /// written before a store with `Release` ordering on the same value
-    /// in another thread
-    Acquire,
-    /// When coupled with a load, uses `Acquire` ordering, and with a store
-    /// `Release` ordering
-    AcqRel,
-    /// Like `AcqRel` with the additional guarantee that all threads see all
-    /// sequentially consistent operations in the same order.
-    SeqCst
-}
-
-/// An `AtomicBool` initialized to `false`
-pub static INIT_ATOMIC_BOOL : AtomicBool = AtomicBool { v: Unsafe{value: 0,
-                                                                  marker1: marker::InvariantType},
-                                                        nopod: marker::NoPod };
-/// An `AtomicInt` initialized to `0`
-pub static INIT_ATOMIC_INT  : AtomicInt  = AtomicInt  { v: Unsafe{value: 0,
-                                                                  marker1: marker::InvariantType},
-                                                        nopod: marker::NoPod };
-/// An `AtomicUint` initialized to `0`
-pub static INIT_ATOMIC_UINT : AtomicUint = AtomicUint { v: Unsafe{value: 0,
-                                                                  marker1: marker::InvariantType},
-                                                        nopod: marker::NoPod };
-/// An `AtomicU64` initialized to `0`
-pub static INIT_ATOMIC_U64 : AtomicU64 = AtomicU64 { v: Unsafe{value: 0,
-                                                               marker1: marker::InvariantType},
-                                                     nopod: marker::NoPod };
-
-
-// NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly
-static UINT_TRUE: uint = -1;
-
-impl AtomicBool {
-    /// Create a new `AtomicBool`
-    pub fn new(v: bool) -> AtomicBool {
-        let val = if v { UINT_TRUE } else { 0 };
-        AtomicBool { v: Unsafe::new(val), nopod: marker::NoPod }
-    }
-
-    /// Load the value
-    #[inline]
-    pub fn load(&self, order: Ordering) -> bool {
-        unsafe { atomic_load(&*self.v.get(), order) > 0 }
-    }
-
-    /// Store the value
-    #[inline]
-    pub fn store(&mut self, val: bool, order: Ordering) {
-        let val = if val { UINT_TRUE } else { 0 };
-
-        unsafe { atomic_store(&mut *self.v.get(), val, order); }
-    }
-
-    /// Store a value, returning the old value
-    #[inline]
-    pub fn swap(&mut self, val: bool, order: Ordering) -> bool {
-        let val = if val { UINT_TRUE } else { 0 };
-
-        unsafe { atomic_swap(&mut *self.v.get(), val, order) > 0 }
-    }
-
-    /// If the current value is the same as expected, store a new value
-    ///
-    /// Compare the current value with `old`; if they are the same then
-    /// replace the current value with `new`. Return the previous value.
-    /// If the return value is equal to `old` then the value was updated.
-    ///
-    /// # Examples
-    ///
-    /// ```ignore
-    /// # // FIXME: Needs PR #12430
-    /// extern crate sync;
-    ///
-    /// use sync::Arc;
-    /// use std::sync::atomics::{AtomicBool, SeqCst};
-    ///
-    /// fn main() {
-    ///     let spinlock = Arc::new(AtomicBool::new(false));
-    ///     let spinlock_clone = spin_lock.clone();
-    ///
-    ///     spawn(proc() {
-    ///         with_lock(&spinlock, || println!("task 1 in lock"));
-    ///     });
-    ///
-    ///     spawn(proc() {
-    ///         with_lock(&spinlock_clone, || println!("task 2 in lock"));
-    ///     });
-    /// }
-    ///
-    /// fn with_lock(spinlock: &Arc<AtomicBool>, f: || -> ()) {
-    ///     // CAS loop until we are able to replace `false` with `true`
-    ///     while spinlock.compare_and_swap(false, true, SeqCst) == false {
-    ///         // Since tasks may not be preemptive (if they are green threads)
-    ///         // yield to the scheduler to let the other task run. Low level
-    ///         // concurrent code needs to take into account Rust's two threading
-    ///         // models.
-    ///         deschedule();
-    ///     }
-    ///
-    ///     // Now we have the spinlock
-    ///     f();
-    ///
-    ///     // Release the lock
-    ///     spinlock.store(false);
-    /// }
-    /// ```
-    #[inline]
-    pub fn compare_and_swap(&mut self, old: bool, new: bool, order: Ordering) -> bool {
-        let old = if old { UINT_TRUE } else { 0 };
-        let new = if new { UINT_TRUE } else { 0 };
-
-        unsafe { atomic_compare_and_swap(&mut *self.v.get(), old, new, order) > 0 }
-    }
-
-    /// A logical "and" operation
-    ///
-    /// Performs a logical "and" operation on the current value and the
-    /// argument `val`, and sets the new value to the result.
-    /// Returns the previous value.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use std::sync::atomics::{AtomicBool, SeqCst};
-    ///
-    /// let mut foo = AtomicBool::new(true);
-    /// assert_eq!(true, foo.fetch_and(false, SeqCst));
-    /// assert_eq!(false, foo.load(SeqCst));
-    ///
-    /// let mut foo = AtomicBool::new(true);
-    /// assert_eq!(true, foo.fetch_and(true, SeqCst));
-    /// assert_eq!(true, foo.load(SeqCst));
-    ///
-    /// let mut foo = AtomicBool::new(false);
-    /// assert_eq!(false, foo.fetch_and(false, SeqCst));
-    /// assert_eq!(false, foo.load(SeqCst));
-    /// ```
-    #[inline]
-    pub fn fetch_and(&mut self, val: bool, order: Ordering) -> bool {
-        let val = if val { UINT_TRUE } else { 0 };
-
-        unsafe { atomic_and(&mut *self.v.get(), val, order) > 0 }
-    }
-
-    /// A logical "nand" operation
-    ///
-    /// Performs a logical "nand" operation on the current value and the
-    /// argument `val`, and sets the new value to the result.
-    /// Returns the previous value.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use std::sync::atomics::{AtomicBool, SeqCst};
-    ///
-    /// let mut foo = AtomicBool::new(true);
-    /// assert_eq!(true, foo.fetch_nand(false, SeqCst));
-    /// assert_eq!(true, foo.load(SeqCst));
-    ///
-    /// let mut foo = AtomicBool::new(true);
-    /// assert_eq!(true, foo.fetch_nand(true, SeqCst));
-    /// assert_eq!(0, foo.load(SeqCst) as int);
-    /// assert_eq!(false, foo.load(SeqCst));
-    ///
-    /// let mut foo = AtomicBool::new(false);
-    /// assert_eq!(false, foo.fetch_nand(false, SeqCst));
-    /// assert_eq!(true, foo.load(SeqCst));
-    /// ```
-    #[inline]
-    pub fn fetch_nand(&mut self, val: bool, order: Ordering) -> bool {
-        let val = if val { UINT_TRUE } else { 0 };
-
-        unsafe { atomic_nand(&mut *self.v.get(), val, order) > 0 }
-    }
-
-    /// A logical "or" operation
-    ///
-    /// Performs a logical "or" operation on the current value and the
-    /// argument `val`, and sets the new value to the result.
-    /// Returns the previous value.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use std::sync::atomics::{AtomicBool, SeqCst};
-    ///
-    /// let mut foo = AtomicBool::new(true);
-    /// assert_eq!(true, foo.fetch_or(false, SeqCst));
-    /// assert_eq!(true, foo.load(SeqCst));
-    ///
-    /// let mut foo = AtomicBool::new(true);
-    /// assert_eq!(true, foo.fetch_or(true, SeqCst));
-    /// assert_eq!(true, foo.load(SeqCst));
-    ///
-    /// let mut foo = AtomicBool::new(false);
-    /// assert_eq!(false, foo.fetch_or(false, SeqCst));
-    /// assert_eq!(false, foo.load(SeqCst));
-    /// ```
-    #[inline]
-    pub fn fetch_or(&mut self, val: bool, order: Ordering) -> bool {
-        let val = if val { UINT_TRUE } else { 0 };
-
-        unsafe { atomic_or(&mut *self.v.get(), val, order) > 0 }
-    }
-
-    /// A logical "xor" operation
-    ///
-    /// Performs a logical "xor" operation on the current value and the
-    /// argument `val`, and sets the new value to the result.
-    /// Returns the previous value.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use std::sync::atomics::{AtomicBool, SeqCst};
-    ///
-    /// let mut foo = AtomicBool::new(true);
-    /// assert_eq!(true, foo.fetch_xor(false, SeqCst));
-    /// assert_eq!(true, foo.load(SeqCst));
-    ///
-    /// let mut foo = AtomicBool::new(true);
-    /// assert_eq!(true, foo.fetch_xor(true, SeqCst));
-    /// assert_eq!(false, foo.load(SeqCst));
-    ///
-    /// let mut foo = AtomicBool::new(false);
-    /// assert_eq!(false, foo.fetch_xor(false, SeqCst));
-    /// assert_eq!(false, foo.load(SeqCst));
-    /// ```
-    #[inline]
-    pub fn fetch_xor(&mut self, val: bool, order: Ordering) -> bool {
-        let val = if val { UINT_TRUE } else { 0 };
-
-        unsafe { atomic_xor(&mut *self.v.get(), val, order) > 0 }
-    }
-}
-
-impl AtomicInt {
-    /// Create a new `AtomicInt`
-    pub fn new(v: int) -> AtomicInt {
-        AtomicInt {v: Unsafe::new(v), nopod: marker::NoPod}
-    }
-
-    /// Load the value
-    #[inline]
-    pub fn load(&self, order: Ordering) -> int {
-        unsafe { atomic_load(&*self.v.get(), order) }
-    }
-
-    /// Store the value
-    #[inline]
-    pub fn store(&mut self, val: int, order: Ordering) {
-        unsafe { atomic_store(&mut *self.v.get(), val, order); }
-    }
-
-    /// Store a value, returning the old value
-    #[inline]
-    pub fn swap(&mut self, val: int, order: Ordering) -> int {
-        unsafe { atomic_swap(&mut *self.v.get(), val, order) }
-    }
-
-    /// If the current value is the same as expected, store a new value
-    ///
-    /// Compare the current value with `old`; if they are the same then
-    /// replace the current value with `new`. Return the previous value.
-    /// If the return value is equal to `old` then the value was updated.
-    #[inline]
-    pub fn compare_and_swap(&mut self, old: int, new: int, order: Ordering) -> int {
-        unsafe { atomic_compare_and_swap(&mut *self.v.get(), old, new, order) }
-    }
-
-    /// Add to the current value, returning the previous
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use std::sync::atomics::{AtomicInt, SeqCst};
-    ///
-    /// let mut foo = AtomicInt::new(0);
-    /// assert_eq!(0, foo.fetch_add(10, SeqCst));
-    /// assert_eq!(10, foo.load(SeqCst));
-    /// ```
-    #[inline]
-    pub fn fetch_add(&mut self, val: int, order: Ordering) -> int {
-        unsafe { atomic_add(&mut *self.v.get(), val, order) }
-    }
-
-    /// Subtract from the current value, returning the previous
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use std::sync::atomics::{AtomicInt, SeqCst};
-    ///
-    /// let mut foo = AtomicInt::new(0);
-    /// assert_eq!(0, foo.fetch_sub(10, SeqCst));
-    /// assert_eq!(-10, foo.load(SeqCst));
-    /// ```
-    #[inline]
-    pub fn fetch_sub(&mut self, val: int, order: Ordering) -> int {
-        unsafe { atomic_sub(&mut *self.v.get(), val, order) }
-    }
-}
-
-// temporary workaround
-// it causes link failure on MIPS target
-// libgcc doesn't implement 64-bit atomic operations for MIPS32
-#[cfg(not(target_arch = "mips"))]
-impl AtomicU64 {
-    pub fn new(v: u64) -> AtomicU64 {
-        AtomicU64 { v: Unsafe::new(v), nopod: marker::NoPod }
-    }
-
-    #[inline]
-    pub fn load(&self, order: Ordering) -> u64 {
-        unsafe { atomic_load(&*self.v.get(), order) }
-    }
-
-    #[inline]
-    pub fn store(&mut self, val: u64, order: Ordering) {
-        unsafe { atomic_store(&mut *self.v.get(), val, order); }
-    }
-
-    #[inline]
-    pub fn swap(&mut self, val: u64, order: Ordering) -> u64 {
-        unsafe { atomic_swap(&mut *self.v.get(), val, order) }
-    }
-
-    #[inline]
-    pub fn compare_and_swap(&mut self, old: u64, new: u64, order: Ordering) -> u64 {
-        unsafe { atomic_compare_and_swap(&mut *self.v.get(), old, new, order) }
-    }
-
-    #[inline]
-    pub fn fetch_add(&mut self, val: u64, order: Ordering) -> u64 {
-        unsafe { atomic_add(&mut *self.v.get(), val, order) }
-    }
-
-    #[inline]
-    pub fn fetch_sub(&mut self, val: u64, order: Ordering) -> u64 {
-        unsafe { atomic_sub(&mut *self.v.get(), val, order) }
-    }
-}
-
-impl AtomicUint {
-    /// Create a new `AtomicUint`
-    pub fn new(v: uint) -> AtomicUint {
-        AtomicUint { v: Unsafe::new(v), nopod: marker::NoPod }
-    }
-
-    /// Load the value
-    #[inline]
-    pub fn load(&self, order: Ordering) -> uint {
-        unsafe { atomic_load(&*self.v.get(), order) }
-    }
-
-    /// Store the value
-    #[inline]
-    pub fn store(&mut self, val: uint, order: Ordering) {
-        unsafe { atomic_store(&mut *self.v.get(), val, order); }
-    }
-
-    /// Store a value, returning the old value
-    #[inline]
-    pub fn swap(&mut self, val: uint, order: Ordering) -> uint {
-        unsafe { atomic_swap(&mut *self.v.get(), val, order) }
-    }
-
-    /// If the current value is the same as expected, store a new value
-    ///
-    /// Compare the current value with `old`; if they are the same then
-    /// replace the current value with `new`. Return the previous value.
-    /// If the return value is equal to `old` then the value was updated.
-    #[inline]
-    pub fn compare_and_swap(&mut self, old: uint, new: uint, order: Ordering) -> uint {
-        unsafe { atomic_compare_and_swap(&mut *self.v.get(), old, new, order) }
-    }
-
-    /// Add to the current value, returning the previous
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use std::sync::atomics::{AtomicUint, SeqCst};
-    ///
-    /// let mut foo = AtomicUint::new(0);
-    /// assert_eq!(0, foo.fetch_add(10, SeqCst));
-    /// assert_eq!(10, foo.load(SeqCst));
-    /// ```
-    #[inline]
-    pub fn fetch_add(&mut self, val: uint, order: Ordering) -> uint {
-        unsafe { atomic_add(&mut *self.v.get(), val, order) }
-    }
-
-    /// Subtract from the current value, returning the previous
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use std::sync::atomics::{AtomicUint, SeqCst};
-    ///
-    /// let mut foo = AtomicUint::new(10);
-    /// assert_eq!(10, foo.fetch_sub(10, SeqCst));
-    /// assert_eq!(0, foo.load(SeqCst));
-    /// ```
-    #[inline]
-    pub fn fetch_sub(&mut self, val: uint, order: Ordering) -> uint {
-        unsafe { atomic_sub(&mut *self.v.get(), val, order) }
-    }
-}
-
-impl<T> AtomicPtr<T> {
-    /// Create a new `AtomicPtr`
-    pub fn new(p: *mut T) -> AtomicPtr<T> {
-        AtomicPtr { p: Unsafe::new(p as uint), nopod: marker::NoPod }
-    }
-
-    /// Load the value
-    #[inline]
-    pub fn load(&self, order: Ordering) -> *mut T {
-        unsafe {
-            atomic_load(&*self.p.get(), order) as *mut T
-        }
-    }
-
-    /// Store the value
-    #[inline]
-    pub fn store(&mut self, ptr: *mut T, order: Ordering) {
-        unsafe { atomic_store(&mut *self.p.get(), ptr as uint, order); }
-    }
-
-    /// Store a value, returning the old value
-    #[inline]
-    pub fn swap(&mut self, ptr: *mut T, order: Ordering) -> *mut T {
-        unsafe { atomic_swap(&mut *self.p.get(), ptr as uint, order) as *mut T }
-    }
-
-    /// If the current value is the same as expected, store a new value
-    ///
-    /// Compare the current value with `old`; if they are the same then
-    /// replace the current value with `new`. Return the previous value.
-    /// If the return value is equal to `old` then the value was updated.
-    #[inline]
-    pub fn compare_and_swap(&mut self, old: *mut T, new: *mut T, order: Ordering) -> *mut T {
-        unsafe {
-            atomic_compare_and_swap(&mut *self.p.get(), old as uint,
-                                    new as uint, order) as *mut T
-        }
-    }
-}
-
-impl<T> AtomicOption<T> {
-    /// Create a new `AtomicOption`
-    pub fn new(p: ~T) -> AtomicOption<T> {
-        unsafe { AtomicOption { p: Unsafe::new(cast::transmute(p)) } }
-    }
-
-    /// Create a new `AtomicOption` that doesn't contain a value
-    pub fn empty() -> AtomicOption<T> { AtomicOption { p: Unsafe::new(0) } }
-
-    /// Store a value, returning the old value
-    #[inline]
-    pub fn swap(&mut self, val: ~T, order: Ordering) -> Option<~T> {
-        unsafe {
-            let val = cast::transmute(val);
-
-            let p = atomic_swap(&mut *self.p.get(), val, order);
-            if p as uint == 0 {
-                None
-            } else {
-                Some(cast::transmute(p))
-            }
-        }
-    }
-
-    /// Remove the value, leaving the `AtomicOption` empty.
-    #[inline]
-    pub fn take(&mut self, order: Ordering) -> Option<~T> {
-        unsafe { self.swap(cast::transmute(0), order) }
-    }
-
-    /// Replace an empty value with a non-empty value.
-    ///
-    /// Succeeds if the option is `None` and returns `None` if so. If
-    /// the option was already `Some`, returns `Some` of the rejected
-    /// value.
-    #[inline]
-    pub fn fill(&mut self, val: ~T, order: Ordering) -> Option<~T> {
-        unsafe {
-            let val = cast::transmute(val);
-            let expected = cast::transmute(0);
-            let oldval = atomic_compare_and_swap(&mut *self.p.get(), expected, val, order);
-            if oldval == expected {
-                None
-            } else {
-                Some(cast::transmute(val))
-            }
-        }
-    }
-
-    /// Returns `true` if the `AtomicOption` is empty.
-    ///
-    /// Be careful: The caller must have some external method of ensuring the
-    /// result does not get invalidated by another task after this returns.
-    #[inline]
-    pub fn is_empty(&mut self, order: Ordering) -> bool {
-        unsafe { atomic_load(&*self.p.get(), order) as uint == 0 }
-    }
-}
-
-#[unsafe_destructor]
-impl<T> Drop for AtomicOption<T> {
-    fn drop(&mut self) {
-        let _ = self.take(SeqCst);
-    }
-}
-
-#[inline]
-pub unsafe fn atomic_store<T>(dst: &mut T, val: T, order:Ordering) {
-    match order {
-        Release => intrinsics::atomic_store_rel(dst, val),
-        Relaxed => intrinsics::atomic_store_relaxed(dst, val),
-        _       => intrinsics::atomic_store(dst, val)
-    }
-}
-
-#[inline]
-pub unsafe fn atomic_load<T>(dst: &T, order:Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_load_acq(dst),
-        Relaxed => intrinsics::atomic_load_relaxed(dst),
-        _       => intrinsics::atomic_load(dst)
-    }
-}
-
-#[inline]
-pub unsafe fn atomic_swap<T>(dst: &mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_xchg_acq(dst, val),
-        Release => intrinsics::atomic_xchg_rel(dst, val),
-        AcqRel  => intrinsics::atomic_xchg_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
-        _       => intrinsics::atomic_xchg(dst, val)
-    }
-}
-
-/// Returns the old value (like __sync_fetch_and_add).
-#[inline]
-pub unsafe fn atomic_add<T>(dst: &mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_xadd_acq(dst, val),
-        Release => intrinsics::atomic_xadd_rel(dst, val),
-        AcqRel  => intrinsics::atomic_xadd_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
-        _       => intrinsics::atomic_xadd(dst, val)
-    }
-}
-
-/// Returns the old value (like __sync_fetch_and_sub).
-#[inline]
-pub unsafe fn atomic_sub<T>(dst: &mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_xsub_acq(dst, val),
-        Release => intrinsics::atomic_xsub_rel(dst, val),
-        AcqRel  => intrinsics::atomic_xsub_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
-        _       => intrinsics::atomic_xsub(dst, val)
-    }
-}
-
-#[inline]
-pub unsafe fn atomic_compare_and_swap<T>(dst:&mut T, old:T, new:T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_cxchg_acq(dst, old, new),
-        Release => intrinsics::atomic_cxchg_rel(dst, old, new),
-        AcqRel  => intrinsics::atomic_cxchg_acqrel(dst, old, new),
-        Relaxed => intrinsics::atomic_cxchg_relaxed(dst, old, new),
-        _       => intrinsics::atomic_cxchg(dst, old, new),
-    }
-}
-
-#[inline]
-pub unsafe fn atomic_and<T>(dst: &mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_and_acq(dst, val),
-        Release => intrinsics::atomic_and_rel(dst, val),
-        AcqRel  => intrinsics::atomic_and_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_and_relaxed(dst, val),
-        _       => intrinsics::atomic_and(dst, val)
-    }
-}
-
-#[inline]
-pub unsafe fn atomic_nand<T>(dst: &mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_nand_acq(dst, val),
-        Release => intrinsics::atomic_nand_rel(dst, val),
-        AcqRel  => intrinsics::atomic_nand_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
-        _       => intrinsics::atomic_nand(dst, val)
-    }
-}
-
-
-#[inline]
-pub unsafe fn atomic_or<T>(dst: &mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_or_acq(dst, val),
-        Release => intrinsics::atomic_or_rel(dst, val),
-        AcqRel  => intrinsics::atomic_or_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_or_relaxed(dst, val),
-        _       => intrinsics::atomic_or(dst, val)
-    }
-}
-
-
-#[inline]
-pub unsafe fn atomic_xor<T>(dst: &mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_xor_acq(dst, val),
-        Release => intrinsics::atomic_xor_rel(dst, val),
-        AcqRel  => intrinsics::atomic_xor_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
-        _       => intrinsics::atomic_xor(dst, val)
-    }
-}
-
-
-/// An atomic fence.
-///
-/// A fence 'A' which has `Release` ordering semantics, synchronizes with a
-/// fence 'B' with (at least) `Acquire` semantics, if and only if there exists
-/// atomic operations X and Y, both operating on some atomic object 'M' such
-/// that A is sequenced before X, Y is synchronized before B and Y observers
-/// the change to M. This provides a happens-before dependence between A and B.
-///
-/// Atomic operations with `Release` or `Acquire` semantics can also synchronize
-/// with a fence.
-///
-/// A fence with has `SeqCst` ordering, in addition to having both `Acquire` and
-/// `Release` semantics, participates in the global program order of the other
-/// `SeqCst` operations and/or fences.
-///
-/// Accepts `Acquire`, `Release`, `AcqRel` and `SeqCst` orderings.
-///
-/// # Failure
-///
-/// Fails if `order` is `Relaxed`
-#[inline]
-pub fn fence(order: Ordering) {
-    unsafe {
-        match order {
-            Acquire => intrinsics::atomic_fence_acq(),
-            Release => intrinsics::atomic_fence_rel(),
-            AcqRel  => intrinsics::atomic_fence_acqrel(),
-            SeqCst  => intrinsics::atomic_fence(),
-            Relaxed => fail!("there is no such thing as a relaxed fence")
-        }
-    }
-}
-
-#[cfg(test)]
-mod test {
-    use option::*;
-    use super::*;
-
-    #[test]
-    fn bool_() {
-        let mut a = AtomicBool::new(false);
-        assert_eq!(a.compare_and_swap(false, true, SeqCst), false);
-        assert_eq!(a.compare_and_swap(false, true, SeqCst), true);
-
-        a.store(false, SeqCst);
-        assert_eq!(a.compare_and_swap(false, true, SeqCst), false);
-    }
-
-    #[test]
-    fn option_empty() {
-        let mut option: AtomicOption<()> = AtomicOption::empty();
-        assert!(option.is_empty(SeqCst));
-    }
-
-    #[test]
-    fn option_swap() {
-        let mut p = AtomicOption::new(~1);
-        let a = ~2;
-
-        let b = p.swap(a, SeqCst);
-
-        assert_eq!(b, Some(~1));
-        assert_eq!(p.take(SeqCst), Some(~2));
-    }
-
-    #[test]
-    fn option_take() {
-        let mut p = AtomicOption::new(~1);
-
-        assert_eq!(p.take(SeqCst), Some(~1));
-        assert_eq!(p.take(SeqCst), None);
-
-        let p2 = ~2;
-        p.swap(p2, SeqCst);
-
-        assert_eq!(p.take(SeqCst), Some(~2));
-    }
-
-    #[test]
-    fn option_fill() {
-        let mut p = AtomicOption::new(~1);
-        assert!(p.fill(~2, SeqCst).is_some()); // should fail; shouldn't leak!
-        assert_eq!(p.take(SeqCst), Some(~1));
-
-        assert!(p.fill(~2, SeqCst).is_none()); // shouldn't fail
-        assert_eq!(p.take(SeqCst), Some(~2));
-    }
-
-    #[test]
-    fn bool_and() {
-        let mut a = AtomicBool::new(true);
-        assert_eq!(a.fetch_and(false, SeqCst),true);
-        assert_eq!(a.load(SeqCst),false);
-    }
-
-    static mut S_BOOL : AtomicBool = INIT_ATOMIC_BOOL;
-    static mut S_INT  : AtomicInt  = INIT_ATOMIC_INT;
-    static mut S_UINT : AtomicUint = INIT_ATOMIC_UINT;
-
-    #[test]
-    fn static_init() {
-        unsafe {
-            assert!(!S_BOOL.load(SeqCst));
-            assert!(S_INT.load(SeqCst) == 0);
-            assert!(S_UINT.load(SeqCst) == 0);
-        }
-    }
-
-    #[test]
-    fn different_sizes() {
-        unsafe {
-            let mut slot = 0u16;
-            assert_eq!(super::atomic_swap(&mut slot, 1, SeqCst), 0);
-
-            let mut slot = 0u8;
-            assert_eq!(super::atomic_compare_and_swap(&mut slot, 1, 2, SeqCst), 0);
-
-            let mut slot = 0u32;
-            assert_eq!(super::atomic_load(&mut slot, SeqCst), 0);
-
-            let mut slot = 0u64;
-            super::atomic_store(&mut slot, 2, SeqCst);
-        }
-    }
-}
-
diff --git a/src/libstd/sync/mod.rs b/src/libstd/sync/mod.rs
index 994d12b34e5..3213c538152 100644
--- a/src/libstd/sync/mod.rs
+++ b/src/libstd/sync/mod.rs
@@ -16,10 +16,6 @@
 //! other types of concurrent primitives.
 
 pub mod arc;
-#[cfg(stage0)]
-#[path = "atomics_stage0.rs"]
-pub mod atomics;
-#[cfg(not(stage0))]
 pub mod atomics;
 pub mod deque;
 pub mod mpmc_bounded_queue;
diff --git a/src/libstd/ty.rs b/src/libstd/ty.rs
index 344235053f3..ae8be25205d 100644
--- a/src/libstd/ty.rs
+++ b/src/libstd/ty.rs
@@ -45,7 +45,6 @@ use kinds::marker;
 ///
 /// **NOTE:** Unsafe<T> fields are public to allow static initializers. It is not recommended
 /// to access its fields directly, `get` should be used instead.
-#[cfg(not(stage0))]
 #[lang="unsafe"]
 pub struct Unsafe<T> {
     /// Wrapped value
@@ -55,16 +54,6 @@ pub struct Unsafe<T> {
     marker1: marker::InvariantType<T>
 }
 
-/// Unsafe type for stage0
-#[cfg(stage0)]
-pub struct Unsafe<T> {
-    /// Wrapped value
-    value: T,
-
-    /// Invariance marker
-    marker1: marker::InvariantType<T>
-}
-
 impl<T> Unsafe<T> {
 
     /// Static constructor