about summary refs log tree commit diff
path: root/src/libstd
diff options
context:
space:
mode:
Diffstat (limited to 'src/libstd')
-rw-r--r--src/libstd/collections/hash/map.rs74
-rw-r--r--src/libstd/collections/hash/set.rs30
-rw-r--r--src/libstd/collections/hash/table.rs24
-rw-r--r--src/libstd/io/buffered.rs33
-rw-r--r--src/libstd/io/test.rs6
-rw-r--r--src/libstd/io/util.rs23
-rw-r--r--src/libstd/lib.rs2
-rw-r--r--src/libstd/os.rs4
-rw-r--r--src/libstd/rt/backtrace.rs2
-rw-r--r--src/libstd/rt/unwind.rs20
-rw-r--r--src/libstd/rt/util.rs52
-rw-r--r--src/libstd/sync/condvar.rs8
-rw-r--r--src/libstd/sync/mpsc/oneshot.rs6
-rw-r--r--src/libstd/sync/mpsc/shared.rs18
-rw-r--r--src/libstd/sync/mpsc/spsc_queue.rs10
-rw-r--r--src/libstd/sync/mpsc/stream.rs10
-rw-r--r--src/libstd/sync/mpsc/sync.rs6
-rw-r--r--src/libstd/sync/once.rs10
-rw-r--r--src/libstd/sys/common/thread_local.rs6
-rw-r--r--src/libstd/sys/unix/thread.rs14
-rw-r--r--src/libstd/sys/unix/timer.rs2
-rw-r--r--src/libstd/sys/windows/condvar.rs14
-rw-r--r--src/libstd/sys/windows/mutex.rs72
-rw-r--r--src/libstd/sys/windows/sync.rs46
24 files changed, 262 insertions, 230 deletions
diff --git a/src/libstd/collections/hash/map.rs b/src/libstd/collections/hash/map.rs
index c3bdfbb12d8..80ae3076df3 100644
--- a/src/libstd/collections/hash/map.rs
+++ b/src/libstd/collections/hash/map.rs
@@ -20,7 +20,7 @@ use cmp::{max, Eq, PartialEq};
 use default::Default;
 use fmt::{self, Show};
 use hash::{self, Hash, SipHasher};
-use iter::{self, Iterator, IteratorExt, FromIterator, Extend, Map};
+use iter::{self, Iterator, ExactSizeIterator, IteratorExt, FromIterator, Extend, Map};
 use marker::Sized;
 use mem::{self, replace};
 use num::{Int, UnsignedInt};
@@ -1384,7 +1384,11 @@ impl<'a, K, V> Iterator for Iter<'a, K, V> {
     type Item = (&'a K, &'a V);
 
     #[inline] fn next(&mut self) -> Option<(&'a K, &'a V)> { self.inner.next() }
-    #[inline] fn size_hint(&self) -> (uint, Option<uint>) { self.inner.size_hint() }
+    #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
+}
+#[stable]
+impl<'a, K, V> ExactSizeIterator for Iter<'a, K, V> {
+    #[inline] fn len(&self) -> usize { self.inner.len() }
 }
 
 #[stable]
@@ -1392,7 +1396,11 @@ impl<'a, K, V> Iterator for IterMut<'a, K, V> {
     type Item = (&'a K, &'a mut V);
 
     #[inline] fn next(&mut self) -> Option<(&'a K, &'a mut V)> { self.inner.next() }
-    #[inline] fn size_hint(&self) -> (uint, Option<uint>) { self.inner.size_hint() }
+    #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
+}
+#[stable]
+impl<'a, K, V> ExactSizeIterator for IterMut<'a, K, V> {
+    #[inline] fn len(&self) -> usize { self.inner.len() }
 }
 
 #[stable]
@@ -1400,7 +1408,11 @@ impl<K, V> Iterator for IntoIter<K, V> {
     type Item = (K, V);
 
     #[inline] fn next(&mut self) -> Option<(K, V)> { self.inner.next() }
-    #[inline] fn size_hint(&self) -> (uint, Option<uint>) { self.inner.size_hint() }
+    #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
+}
+#[stable]
+impl<K, V> ExactSizeIterator for IntoIter<K, V> {
+    #[inline] fn len(&self) -> usize { self.inner.len() }
 }
 
 #[stable]
@@ -1408,7 +1420,11 @@ impl<'a, K, V> Iterator for Keys<'a, K, V> {
     type Item = &'a K;
 
     #[inline] fn next(&mut self) -> Option<(&'a K)> { self.inner.next() }
-    #[inline] fn size_hint(&self) -> (uint, Option<uint>) { self.inner.size_hint() }
+    #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
+}
+#[stable]
+impl<'a, K, V> ExactSizeIterator for Keys<'a, K, V> {
+    #[inline] fn len(&self) -> usize { self.inner.len() }
 }
 
 #[stable]
@@ -1416,21 +1432,23 @@ impl<'a, K, V> Iterator for Values<'a, K, V> {
     type Item = &'a V;
 
     #[inline] fn next(&mut self) -> Option<(&'a V)> { self.inner.next() }
-    #[inline] fn size_hint(&self) -> (uint, Option<uint>) { self.inner.size_hint() }
+    #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
+}
+#[stable]
+impl<'a, K, V> ExactSizeIterator for Values<'a, K, V> {
+    #[inline] fn len(&self) -> usize { self.inner.len() }
 }
 
 #[stable]
-impl<'a, K: 'a, V: 'a> Iterator for Drain<'a, K, V> {
+impl<'a, K, V> Iterator for Drain<'a, K, V> {
     type Item = (K, V);
 
-    #[inline]
-    fn next(&mut self) -> Option<(K, V)> {
-        self.inner.next()
-    }
-    #[inline]
-    fn size_hint(&self) -> (uint, Option<uint>) {
-        self.inner.size_hint()
-    }
+    #[inline] fn next(&mut self) -> Option<(K, V)> { self.inner.next() }
+    #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
+}
+#[stable]
+impl<'a, K, V> ExactSizeIterator for Drain<'a, K, V> {
+    #[inline] fn len(&self) -> usize { self.inner.len() }
 }
 
 #[unstable = "matches collection reform v2 specification, waiting for dust to settle"]
@@ -2136,6 +2154,19 @@ mod test_map {
     }
 
     #[test]
+    fn test_iter_len() {
+        let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
+
+        let map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
+
+        let mut iter = map.iter();
+
+        for _ in iter.by_ref().take(3) {}
+
+        assert_eq!(iter.len(), 3);
+    }
+
+    #[test]
     fn test_mut_size_hint() {
         let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
 
@@ -2149,6 +2180,19 @@ mod test_map {
     }
 
     #[test]
+    fn test_iter_mut_len() {
+        let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
+
+        let mut map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
+
+        let mut iter = map.iter_mut();
+
+        for _ in iter.by_ref().take(3) {}
+
+        assert_eq!(iter.len(), 3);
+    }
+
+    #[test]
     fn test_index() {
         let mut map: HashMap<int, int> = HashMap::new();
 
diff --git a/src/libstd/collections/hash/set.rs b/src/libstd/collections/hash/set.rs
index 4003d3addf1..1293f45161d 100644
--- a/src/libstd/collections/hash/set.rs
+++ b/src/libstd/collections/hash/set.rs
@@ -18,7 +18,7 @@ use default::Default;
 use fmt::Show;
 use fmt;
 use hash::{self, Hash};
-use iter::{Iterator, IteratorExt, FromIterator, Map, Chain, Extend};
+use iter::{Iterator, ExactSizeIterator, IteratorExt, FromIterator, Map, Chain, Extend};
 use ops::{BitOr, BitAnd, BitXor, Sub};
 use option::Option::{Some, None, self};
 
@@ -837,7 +837,11 @@ impl<'a, K> Iterator for Iter<'a, K> {
     type Item = &'a K;
 
     fn next(&mut self) -> Option<&'a K> { self.iter.next() }
-    fn size_hint(&self) -> (uint, Option<uint>) { self.iter.size_hint() }
+    fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() }
+}
+#[stable]
+impl<'a, K> ExactSizeIterator for Iter<'a, K> {
+    fn len(&self) -> usize { self.iter.len() }
 }
 
 #[stable]
@@ -845,15 +849,23 @@ impl<K> Iterator for IntoIter<K> {
     type Item = K;
 
     fn next(&mut self) -> Option<K> { self.iter.next() }
-    fn size_hint(&self) -> (uint, Option<uint>) { self.iter.size_hint() }
+    fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() }
+}
+#[stable]
+impl<K> ExactSizeIterator for IntoIter<K> {
+    fn len(&self) -> usize { self.iter.len() }
 }
 
 #[stable]
-impl<'a, K: 'a> Iterator for Drain<'a, K> {
+impl<'a, K> Iterator for Drain<'a, K> {
     type Item = K;
 
     fn next(&mut self) -> Option<K> { self.iter.next() }
-    fn size_hint(&self) -> (uint, Option<uint>) { self.iter.size_hint() }
+    fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() }
+}
+#[stable]
+impl<'a, K> ExactSizeIterator for Drain<'a, K> {
+    fn len(&self) -> usize { self.iter.len() }
 }
 
 #[stable]
@@ -875,7 +887,7 @@ impl<'a, T, S, H> Iterator for Intersection<'a, T, S>
         }
     }
 
-    fn size_hint(&self) -> (uint, Option<uint>) {
+    fn size_hint(&self) -> (usize, Option<usize>) {
         let (_, upper) = self.iter.size_hint();
         (0, upper)
     }
@@ -900,7 +912,7 @@ impl<'a, T, S, H> Iterator for Difference<'a, T, S>
         }
     }
 
-    fn size_hint(&self) -> (uint, Option<uint>) {
+    fn size_hint(&self) -> (usize, Option<usize>) {
         let (_, upper) = self.iter.size_hint();
         (0, upper)
     }
@@ -915,7 +927,7 @@ impl<'a, T, S, H> Iterator for SymmetricDifference<'a, T, S>
     type Item = &'a T;
 
     fn next(&mut self) -> Option<&'a T> { self.iter.next() }
-    fn size_hint(&self) -> (uint, Option<uint>) { self.iter.size_hint() }
+    fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() }
 }
 
 #[stable]
@@ -927,7 +939,7 @@ impl<'a, T, S, H> Iterator for Union<'a, T, S>
     type Item = &'a T;
 
     fn next(&mut self) -> Option<&'a T> { self.iter.next() }
-    fn size_hint(&self) -> (uint, Option<uint>) { self.iter.size_hint() }
+    fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() }
 }
 
 #[cfg(test)]
diff --git a/src/libstd/collections/hash/table.rs b/src/libstd/collections/hash/table.rs
index 456f3763b39..f28b95dbe95 100644
--- a/src/libstd/collections/hash/table.rs
+++ b/src/libstd/collections/hash/table.rs
@@ -15,7 +15,7 @@ use self::BucketState::*;
 use clone::Clone;
 use cmp;
 use hash::{Hash, Hasher};
-use iter::{Iterator, count};
+use iter::{Iterator, ExactSizeIterator, count};
 use marker::{Copy, Sized, self};
 use mem::{min_align_of, size_of};
 use mem;
@@ -838,10 +838,13 @@ impl<'a, K, V> Iterator for Iter<'a, K, V> {
         })
     }
 
-    fn size_hint(&self) -> (uint, Option<uint>) {
+    fn size_hint(&self) -> (usize, Option<usize>) {
         (self.elems_left, Some(self.elems_left))
     }
 }
+impl<'a, K, V> ExactSizeIterator for Iter<'a, K, V> {
+    fn len(&self) -> usize { self.elems_left }
+}
 
 impl<'a, K, V> Iterator for IterMut<'a, K, V> {
     type Item = (&'a K, &'a mut V);
@@ -856,10 +859,13 @@ impl<'a, K, V> Iterator for IterMut<'a, K, V> {
         })
     }
 
-    fn size_hint(&self) -> (uint, Option<uint>) {
+    fn size_hint(&self) -> (usize, Option<usize>) {
         (self.elems_left, Some(self.elems_left))
     }
 }
+impl<'a, K, V> ExactSizeIterator for IterMut<'a, K, V> {
+    fn len(&self) -> usize { self.elems_left }
+}
 
 impl<K, V> Iterator for IntoIter<K, V> {
     type Item = (SafeHash, K, V);
@@ -879,13 +885,16 @@ impl<K, V> Iterator for IntoIter<K, V> {
         })
     }
 
-    fn size_hint(&self) -> (uint, Option<uint>) {
+    fn size_hint(&self) -> (usize, Option<usize>) {
         let size = self.table.size();
         (size, Some(size))
     }
 }
+impl<K, V> ExactSizeIterator for IntoIter<K, V> {
+    fn len(&self) -> usize { self.table.size() }
+}
 
-impl<'a, K: 'a, V: 'a> Iterator for Drain<'a, K, V> {
+impl<'a, K, V> Iterator for Drain<'a, K, V> {
     type Item = (SafeHash, K, V);
 
     #[inline]
@@ -904,11 +913,14 @@ impl<'a, K: 'a, V: 'a> Iterator for Drain<'a, K, V> {
         })
     }
 
-    fn size_hint(&self) -> (uint, Option<uint>) {
+    fn size_hint(&self) -> (usize, Option<usize>) {
         let size = self.table.size();
         (size, Some(size))
     }
 }
+impl<'a, K, V> ExactSizeIterator for Drain<'a, K, V> {
+    fn len(&self) -> usize { self.table.size() }
+}
 
 #[unsafe_destructor]
 impl<'a, K: 'a, V: 'a> Drop for Drain<'a, K, V> {
diff --git a/src/libstd/io/buffered.rs b/src/libstd/io/buffered.rs
index ba13bd05dc5..36def48b88b 100644
--- a/src/libstd/io/buffered.rs
+++ b/src/libstd/io/buffered.rs
@@ -13,6 +13,7 @@
 //! Buffering wrappers for I/O traits
 
 use cmp;
+use fmt;
 use io::{Reader, Writer, Stream, Buffer, DEFAULT_BUF_SIZE, IoResult};
 use iter::{IteratorExt, ExactSizeIterator};
 use ops::Drop;
@@ -51,6 +52,13 @@ pub struct BufferedReader<R> {
     cap: uint,
 }
 
+impl<R> fmt::Show for BufferedReader<R> where R: fmt::Show {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        write!(fmt, "BufferedReader {{ reader: {:?}, buffer: {}/{} }}",
+               self.inner, self.cap - self.pos, self.buf.len())
+    }
+}
+
 impl<R: Reader> BufferedReader<R> {
     /// Creates a new `BufferedReader` with the specified buffer capacity
     pub fn with_capacity(cap: uint, inner: R) -> BufferedReader<R> {
@@ -148,6 +156,13 @@ pub struct BufferedWriter<W> {
     pos: uint
 }
 
+impl<W> fmt::Show for BufferedWriter<W> where W: fmt::Show {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        write!(fmt, "BufferedWriter {{ writer: {:?}, buffer: {}/{} }}",
+               self.inner.as_ref().unwrap(), self.pos, self.buf.len())
+    }
+}
+
 impl<W: Writer> BufferedWriter<W> {
     /// Creates a new `BufferedWriter` with the specified buffer capacity
     pub fn with_capacity(cap: uint, inner: W) -> BufferedWriter<W> {
@@ -235,6 +250,13 @@ pub struct LineBufferedWriter<W> {
     inner: BufferedWriter<W>,
 }
 
+impl<W> fmt::Show for LineBufferedWriter<W> where W: fmt::Show {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        write!(fmt, "LineBufferedWriter {{ writer: {:?}, buffer: {}/{} }}",
+               self.inner.inner, self.inner.pos, self.inner.buf.len())
+    }
+}
+
 impl<W: Writer> LineBufferedWriter<W> {
     /// Creates a new `LineBufferedWriter`
     pub fn new(inner: W) -> LineBufferedWriter<W> {
@@ -318,6 +340,17 @@ pub struct BufferedStream<S> {
     inner: BufferedReader<InternalBufferedWriter<S>>
 }
 
+impl<S> fmt::Show for BufferedStream<S> where S: fmt::Show {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        let reader = &self.inner;
+        let writer = &self.inner.inner.0;
+        write!(fmt, "BufferedStream {{ stream: {:?}, write_buffer: {}/{}, read_buffer: {}/{} }}",
+               writer.inner,
+               writer.pos, writer.buf.len(),
+               reader.cap - reader.pos, reader.buf.len())
+    }
+}
+
 impl<S: Stream> BufferedStream<S> {
     /// Creates a new buffered stream with explicitly listed capacities for the
     /// reader/writer buffer.
diff --git a/src/libstd/io/test.rs b/src/libstd/io/test.rs
index 67c14dc2dc1..6de466eb20b 100644
--- a/src/libstd/io/test.rs
+++ b/src/libstd/io/test.rs
@@ -15,18 +15,18 @@ use prelude::v1::*;
 use libc;
 use os;
 use std::io::net::ip::*;
-use sync::atomic::{AtomicUint, ATOMIC_UINT_INIT, Ordering};
+use sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
 
 /// Get a port number, starting at 9600, for use in tests
 pub fn next_test_port() -> u16 {
-    static NEXT_OFFSET: AtomicUint = ATOMIC_UINT_INIT;
+    static NEXT_OFFSET: AtomicUsize = ATOMIC_USIZE_INIT;
     base_port() + NEXT_OFFSET.fetch_add(1, Ordering::Relaxed) as u16
 }
 
 // iOS has a pretty long tmpdir path which causes pipe creation
 // to like: invalid argument: path must be smaller than SUN_LEN
 fn next_test_unix_socket() -> String {
-    static COUNT: AtomicUint = ATOMIC_UINT_INIT;
+    static COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
     // base port and pid are an attempt to be unique between multiple
     // test-runners of different configurations running on one
     // buildbot, the count is to be unique within this executable.
diff --git a/src/libstd/io/util.rs b/src/libstd/io/util.rs
index 5a7219495f5..ac7fb3f9cdb 100644
--- a/src/libstd/io/util.rs
+++ b/src/libstd/io/util.rs
@@ -16,6 +16,7 @@ use io;
 use slice::bytes::MutableByteVector;
 
 /// Wraps a `Reader`, limiting the number of bytes that can be read from it.
+#[derive(Show)]
 pub struct LimitReader<R> {
     limit: uint,
     inner: R
@@ -77,7 +78,7 @@ impl<R: Buffer> Buffer for LimitReader<R> {
 }
 
 /// A `Writer` which ignores bytes written to it, like /dev/null.
-#[derive(Copy)]
+#[derive(Copy, Show)]
 pub struct NullWriter;
 
 impl Writer for NullWriter {
@@ -86,7 +87,7 @@ impl Writer for NullWriter {
 }
 
 /// A `Reader` which returns an infinite stream of 0 bytes, like /dev/zero.
-#[derive(Copy)]
+#[derive(Copy, Show)]
 pub struct ZeroReader;
 
 impl Reader for ZeroReader {
@@ -107,7 +108,7 @@ impl Buffer for ZeroReader {
 }
 
 /// A `Reader` which is always at EOF, like /dev/null.
-#[derive(Copy)]
+#[derive(Copy, Show)]
 pub struct NullReader;
 
 impl Reader for NullReader {
@@ -128,18 +129,19 @@ impl Buffer for NullReader {
 ///
 /// The `Writer`s are delegated to in order. If any `Writer` returns an error,
 /// that error is returned immediately and remaining `Writer`s are not called.
-pub struct MultiWriter {
-    writers: Vec<Box<Writer+'static>>
+#[derive(Show)]
+pub struct MultiWriter<W> {
+    writers: Vec<W>
 }
 
-impl MultiWriter {
+impl<W> MultiWriter<W> where W: Writer {
     /// Creates a new `MultiWriter`
-    pub fn new(writers: Vec<Box<Writer+'static>>) -> MultiWriter {
+    pub fn new(writers: Vec<W>) -> MultiWriter<W> {
         MultiWriter { writers: writers }
     }
 }
 
-impl Writer for MultiWriter {
+impl<W> Writer for MultiWriter<W> where W: Writer {
     #[inline]
     fn write(&mut self, buf: &[u8]) -> io::IoResult<()> {
         for writer in self.writers.iter_mut() {
@@ -159,7 +161,7 @@ impl Writer for MultiWriter {
 
 /// A `Reader` which chains input from multiple `Reader`s, reading each to
 /// completion before moving onto the next.
-#[derive(Clone)]
+#[derive(Clone, Show)]
 pub struct ChainedReader<I, R> {
     readers: I,
     cur_reader: Option<R>,
@@ -198,6 +200,7 @@ impl<R: Reader, I: Iterator<Item=R>> Reader for ChainedReader<I, R> {
 
 /// A `Reader` which forwards input from another `Reader`, passing it along to
 /// a `Writer` as well. Similar to the `tee(1)` command.
+#[derive(Show)]
 pub struct TeeReader<R, W> {
     reader: R,
     writer: W,
@@ -239,7 +242,7 @@ pub fn copy<R: Reader, W: Writer>(r: &mut R, w: &mut W) -> io::IoResult<()> {
 }
 
 /// An adaptor converting an `Iterator<u8>` to a `Reader`.
-#[derive(Clone)]
+#[derive(Clone, Show)]
 pub struct IterReader<T> {
     iter: T,
 }
diff --git a/src/libstd/lib.rs b/src/libstd/lib.rs
index e15e611adc0..03d9030b025 100644
--- a/src/libstd/lib.rs
+++ b/src/libstd/lib.rs
@@ -73,7 +73,7 @@
 //!
 //! ## Concurrency, I/O, and the runtime
 //!
-//! The [`task`](task/index.html) module contains Rust's threading abstractions,
+//! The [`thread`](thread/index.html) module contains Rust's threading abstractions,
 //! while [`comm`](comm/index.html) contains the channel types for message
 //! passing. [`sync`](sync/index.html) contains further, primitive, shared
 //! memory types, including [`atomic`](sync/atomic/index.html).
diff --git a/src/libstd/os.rs b/src/libstd/os.rs
index 6e3949b9e22..588f729d800 100644
--- a/src/libstd/os.rs
+++ b/src/libstd/os.rs
@@ -54,7 +54,7 @@ use result::Result::{Err, Ok};
 use slice::{AsSlice, SliceExt};
 use str::{Str, StrExt};
 use string::{String, ToString};
-use sync::atomic::{AtomicInt, ATOMIC_INT_INIT, Ordering};
+use sync::atomic::{AtomicIsize, ATOMIC_ISIZE_INIT, Ordering};
 use vec::Vec;
 
 #[cfg(unix)] use ffi::{self, CString};
@@ -590,7 +590,7 @@ pub fn last_os_error() -> String {
     error_string(errno() as uint)
 }
 
-static EXIT_STATUS: AtomicInt = ATOMIC_INT_INIT;
+static EXIT_STATUS: AtomicIsize = ATOMIC_ISIZE_INIT;
 
 /// Sets the process exit code
 ///
diff --git a/src/libstd/rt/backtrace.rs b/src/libstd/rt/backtrace.rs
index bb0b6fe804b..f2d66e1a4d7 100644
--- a/src/libstd/rt/backtrace.rs
+++ b/src/libstd/rt/backtrace.rs
@@ -22,7 +22,7 @@ pub use sys::backtrace::write;
 // For now logging is turned off by default, and this function checks to see
 // whether the magical environment variable is present to see if it's turned on.
 pub fn log_enabled() -> bool {
-    static ENABLED: atomic::AtomicInt = atomic::ATOMIC_INT_INIT;
+    static ENABLED: atomic::AtomicIsize = atomic::ATOMIC_ISIZE_INIT;
     match ENABLED.load(Ordering::SeqCst) {
         1 => return false,
         2 => return true,
diff --git a/src/libstd/rt/unwind.rs b/src/libstd/rt/unwind.rs
index 4cd0b29688a..6326e4c08f1 100644
--- a/src/libstd/rt/unwind.rs
+++ b/src/libstd/rt/unwind.rs
@@ -83,16 +83,16 @@ pub type Callback = fn(msg: &(Any + Send), file: &'static str, line: uint);
 //
 // For more information, see below.
 const MAX_CALLBACKS: uint = 16;
-static CALLBACKS: [atomic::AtomicUint; MAX_CALLBACKS] =
-        [atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT,
-         atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT,
-         atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT,
-         atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT,
-         atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT,
-         atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT,
-         atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT,
-         atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT];
-static CALLBACK_CNT: atomic::AtomicUint = atomic::ATOMIC_UINT_INIT;
+static CALLBACKS: [atomic::AtomicUsize; MAX_CALLBACKS] =
+        [atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
+         atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
+         atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
+         atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
+         atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
+         atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
+         atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
+         atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT];
+static CALLBACK_CNT: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
 
 thread_local! { static PANICKING: Cell<bool> = Cell::new(false) }
 
diff --git a/src/libstd/rt/util.rs b/src/libstd/rt/util.rs
index c076f0a7c6c..83f646d89dc 100644
--- a/src/libstd/rt/util.rs
+++ b/src/libstd/rt/util.rs
@@ -46,7 +46,7 @@ pub fn limit_thread_creation_due_to_osx_and_valgrind() -> bool {
 }
 
 pub fn min_stack() -> uint {
-    static MIN: atomic::AtomicUint = atomic::ATOMIC_UINT_INIT;
+    static MIN: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
     match MIN.load(Ordering::SeqCst) {
         0 => {}
         n => return n - 1,
@@ -144,56 +144,6 @@ pub fn abort(args: fmt::Arguments) -> ! {
     let _ = write!(&mut w, "{}", args);
     let msg = str::from_utf8(&w.buf[0..w.pos]).unwrap_or("aborted");
     let msg = if msg.is_empty() {"aborted"} else {msg};
-
-    // Give some context to the message
-    let hash = msg.bytes().fold(0, |accum, val| accum + (val as uint) );
-    let quote = match hash % 10 {
-        0 => "
-It was from the artists and poets that the pertinent answers came, and I
-know that panic would have broken loose had they been able to compare notes.
-As it was, lacking their original letters, I half suspected the compiler of
-having asked leading questions, or of having edited the correspondence in
-corroboration of what he had latently resolved to see.",
-        1 => "
-There are not many persons who know what wonders are opened to them in the
-stories and visions of their youth; for when as children we listen and dream,
-we think but half-formed thoughts, and when as men we try to remember, we are
-dulled and prosaic with the poison of life. But some of us awake in the night
-with strange phantasms of enchanted hills and gardens, of fountains that sing
-in the sun, of golden cliffs overhanging murmuring seas, of plains that stretch
-down to sleeping cities of bronze and stone, and of shadowy companies of heroes
-that ride caparisoned white horses along the edges of thick forests; and then
-we know that we have looked back through the ivory gates into that world of
-wonder which was ours before we were wise and unhappy.",
-        2 => "
-Instead of the poems I had hoped for, there came only a shuddering blackness
-and ineffable loneliness; and I saw at last a fearful truth which no one had
-ever dared to breathe before — the unwhisperable secret of secrets — The fact
-that this city of stone and stridor is not a sentient perpetuation of Old New
-York as London is of Old London and Paris of Old Paris, but that it is in fact
-quite dead, its sprawling body imperfectly embalmed and infested with queer
-animate things which have nothing to do with it as it was in life.",
-        3 => "
-The ocean ate the last of the land and poured into the smoking gulf, thereby
-giving up all it had ever conquered. From the new-flooded lands it flowed
-again, uncovering death and decay; and from its ancient and immemorial bed it
-trickled loathsomely, uncovering nighted secrets of the years when Time was
-young and the gods unborn. Above the waves rose weedy remembered spires. The
-moon laid pale lilies of light on dead London, and Paris stood up from its damp
-grave to be sanctified with star-dust. Then rose spires and monoliths that were
-weedy but not remembered; terrible spires and monoliths of lands that men never
-knew were lands...",
-        4 => "
-There was a night when winds from unknown spaces whirled us irresistibly into
-limitless vacuum beyond all thought and entity. Perceptions of the most
-maddeningly untransmissible sort thronged upon us; perceptions of infinity
-which at the time convulsed us with joy, yet which are now partly lost to my
-memory and partly incapable of presentation to others.",
-        _ => "You've met with a terrible fate, haven't you?"
-    };
-    rterrln!("{}", "");
-    rterrln!("{}", quote);
-    rterrln!("{}", "");
     rterrln!("fatal runtime error: {}", msg);
     unsafe { intrinsics::abort(); }
 }
diff --git a/src/libstd/sync/condvar.rs b/src/libstd/sync/condvar.rs
index 3c0ae71255d..bcd5f56a353 100644
--- a/src/libstd/sync/condvar.rs
+++ b/src/libstd/sync/condvar.rs
@@ -10,7 +10,7 @@
 
 use prelude::v1::*;
 
-use sync::atomic::{AtomicUint, Ordering, ATOMIC_UINT_INIT};
+use sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
 use sync::poison::{self, LockResult};
 use sys_common::condvar as sys;
 use sys_common::mutex as sys_mutex;
@@ -78,7 +78,7 @@ unsafe impl Sync for Condvar {}
 #[unstable = "may be merged with Condvar in the future"]
 pub struct StaticCondvar {
     inner: sys::Condvar,
-    mutex: AtomicUint,
+    mutex: AtomicUsize,
 }
 
 unsafe impl Send for StaticCondvar {}
@@ -88,7 +88,7 @@ unsafe impl Sync for StaticCondvar {}
 #[unstable = "may be merged with Condvar in the future"]
 pub const CONDVAR_INIT: StaticCondvar = StaticCondvar {
     inner: sys::CONDVAR_INIT,
-    mutex: ATOMIC_UINT_INIT,
+    mutex: ATOMIC_USIZE_INIT,
 };
 
 impl Condvar {
@@ -99,7 +99,7 @@ impl Condvar {
         Condvar {
             inner: box StaticCondvar {
                 inner: unsafe { sys::Condvar::new() },
-                mutex: AtomicUint::new(0),
+                mutex: AtomicUsize::new(0),
             }
         }
     }
diff --git a/src/libstd/sync/mpsc/oneshot.rs b/src/libstd/sync/mpsc/oneshot.rs
index 5c2331d0f2e..ca667e65e30 100644
--- a/src/libstd/sync/mpsc/oneshot.rs
+++ b/src/libstd/sync/mpsc/oneshot.rs
@@ -42,7 +42,7 @@ use core::prelude::*;
 use sync::mpsc::Receiver;
 use sync::mpsc::blocking::{self, SignalToken};
 use core::mem;
-use sync::atomic::{AtomicUint, Ordering};
+use sync::atomic::{AtomicUsize, Ordering};
 
 // Various states you can find a port in.
 const EMPTY: uint = 0;          // initial state: no data, no blocked reciever
@@ -56,7 +56,7 @@ const DISCONNECTED: uint = 2;   // channel is disconnected OR upgraded
 
 pub struct Packet<T> {
     // Internal state of the chan/port pair (stores the blocked task as well)
-    state: AtomicUint,
+    state: AtomicUsize,
     // One-shot data slot location
     data: Option<T>,
     // when used for the second time, a oneshot channel must be upgraded, and
@@ -93,7 +93,7 @@ impl<T: Send> Packet<T> {
         Packet {
             data: None,
             upgrade: NothingSent,
-            state: AtomicUint::new(EMPTY),
+            state: AtomicUsize::new(EMPTY),
         }
     }
 
diff --git a/src/libstd/sync/mpsc/shared.rs b/src/libstd/sync/mpsc/shared.rs
index 4295d116aed..c97af4c6bca 100644
--- a/src/libstd/sync/mpsc/shared.rs
+++ b/src/libstd/sync/mpsc/shared.rs
@@ -25,7 +25,7 @@ use core::prelude::*;
 use core::cmp;
 use core::int;
 
-use sync::atomic::{AtomicUint, AtomicInt, AtomicBool, Ordering};
+use sync::atomic::{AtomicUsize, AtomicIsize, AtomicBool, Ordering};
 use sync::mpsc::blocking::{self, SignalToken};
 use sync::mpsc::mpsc_queue as mpsc;
 use sync::mpsc::select::StartResult::*;
@@ -42,17 +42,17 @@ const MAX_STEALS: int = 1 << 20;
 
 pub struct Packet<T> {
     queue: mpsc::Queue<T>,
-    cnt: AtomicInt, // How many items are on this channel
+    cnt: AtomicIsize, // How many items are on this channel
     steals: int, // How many times has a port received without blocking?
-    to_wake: AtomicUint, // SignalToken for wake up
+    to_wake: AtomicUsize, // SignalToken for wake up
 
     // The number of channels which are currently using this packet.
-    channels: AtomicInt,
+    channels: AtomicIsize,
 
     // See the discussion in Port::drop and the channel send methods for what
     // these are used for
     port_dropped: AtomicBool,
-    sender_drain: AtomicInt,
+    sender_drain: AtomicIsize,
 
     // this lock protects various portions of this implementation during
     // select()
@@ -70,12 +70,12 @@ impl<T: Send> Packet<T> {
     pub fn new() -> Packet<T> {
         let p = Packet {
             queue: mpsc::Queue::new(),
-            cnt: AtomicInt::new(0),
+            cnt: AtomicIsize::new(0),
             steals: 0,
-            to_wake: AtomicUint::new(0),
-            channels: AtomicInt::new(2),
+            to_wake: AtomicUsize::new(0),
+            channels: AtomicIsize::new(2),
             port_dropped: AtomicBool::new(false),
-            sender_drain: AtomicInt::new(0),
+            sender_drain: AtomicIsize::new(0),
             select_lock: Mutex::new(()),
         };
         return p;
diff --git a/src/libstd/sync/mpsc/spsc_queue.rs b/src/libstd/sync/mpsc/spsc_queue.rs
index 46c69f6f547..34fd6bb70dc 100644
--- a/src/libstd/sync/mpsc/spsc_queue.rs
+++ b/src/libstd/sync/mpsc/spsc_queue.rs
@@ -41,7 +41,7 @@ use alloc::boxed::Box;
 use core::mem;
 use core::cell::UnsafeCell;
 
-use sync::atomic::{AtomicPtr, AtomicUint, Ordering};
+use sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
 
 // Node within the linked list queue of messages to send
 struct Node<T> {
@@ -69,8 +69,8 @@ pub struct Queue<T> {
     // Cache maintenance fields. Additions and subtractions are stored
     // separately in order to allow them to use nonatomic addition/subtraction.
     cache_bound: uint,
-    cache_additions: AtomicUint,
-    cache_subtractions: AtomicUint,
+    cache_additions: AtomicUsize,
+    cache_subtractions: AtomicUsize,
 }
 
 unsafe impl<T: Send> Send for Queue<T> { }
@@ -117,8 +117,8 @@ impl<T: Send> Queue<T> {
             first: UnsafeCell::new(n1),
             tail_copy: UnsafeCell::new(n1),
             cache_bound: bound,
-            cache_additions: AtomicUint::new(0),
-            cache_subtractions: AtomicUint::new(0),
+            cache_additions: AtomicUsize::new(0),
+            cache_subtractions: AtomicUsize::new(0),
         }
     }
 
diff --git a/src/libstd/sync/mpsc/stream.rs b/src/libstd/sync/mpsc/stream.rs
index f4b20c7b742..a03add8c532 100644
--- a/src/libstd/sync/mpsc/stream.rs
+++ b/src/libstd/sync/mpsc/stream.rs
@@ -28,7 +28,7 @@ use core::cmp;
 use core::int;
 use thread::Thread;
 
-use sync::atomic::{AtomicInt, AtomicUint, Ordering, AtomicBool};
+use sync::atomic::{AtomicIsize, AtomicUsize, Ordering, AtomicBool};
 use sync::mpsc::Receiver;
 use sync::mpsc::blocking::{self, SignalToken};
 use sync::mpsc::spsc_queue as spsc;
@@ -42,9 +42,9 @@ const MAX_STEALS: int = 1 << 20;
 pub struct Packet<T> {
     queue: spsc::Queue<Message<T>>, // internal queue for all message
 
-    cnt: AtomicInt, // How many items are on this channel
+    cnt: AtomicIsize, // How many items are on this channel
     steals: int, // How many times has a port received without blocking?
-    to_wake: AtomicUint, // SignalToken for the blocked thread to wake up
+    to_wake: AtomicUsize, // SignalToken for the blocked thread to wake up
 
     port_dropped: AtomicBool, // flag if the channel has been destroyed.
 }
@@ -79,9 +79,9 @@ impl<T: Send> Packet<T> {
         Packet {
             queue: unsafe { spsc::Queue::new(128) },
 
-            cnt: AtomicInt::new(0),
+            cnt: AtomicIsize::new(0),
             steals: 0,
-            to_wake: AtomicUint::new(0),
+            to_wake: AtomicUsize::new(0),
 
             port_dropped: AtomicBool::new(false),
         }
diff --git a/src/libstd/sync/mpsc/sync.rs b/src/libstd/sync/mpsc/sync.rs
index b2cc807eb11..30304dffb75 100644
--- a/src/libstd/sync/mpsc/sync.rs
+++ b/src/libstd/sync/mpsc/sync.rs
@@ -41,7 +41,7 @@ use self::Blocker::*;
 use vec::Vec;
 use core::mem;
 
-use sync::atomic::{Ordering, AtomicUint};
+use sync::atomic::{Ordering, AtomicUsize};
 use sync::mpsc::blocking::{self, WaitToken, SignalToken};
 use sync::mpsc::select::StartResult::{self, Installed, Abort};
 use sync::{Mutex, MutexGuard};
@@ -49,7 +49,7 @@ use sync::{Mutex, MutexGuard};
 pub struct Packet<T> {
     /// Only field outside of the mutex. Just done for kicks, but mainly because
     /// the other shared channel already had the code implemented
-    channels: AtomicUint,
+    channels: AtomicUsize,
 
     lock: Mutex<State<T>>,
 }
@@ -138,7 +138,7 @@ fn wakeup<T>(token: SignalToken, guard: MutexGuard<State<T>>) {
 impl<T: Send> Packet<T> {
     pub fn new(cap: uint) -> Packet<T> {
         Packet {
-            channels: AtomicUint::new(1),
+            channels: AtomicUsize::new(1),
             lock: Mutex::new(State {
                 disconnected: false,
                 blocker: NoneBlocked,
diff --git a/src/libstd/sync/once.rs b/src/libstd/sync/once.rs
index 3bf2ae277e0..6231a91833d 100644
--- a/src/libstd/sync/once.rs
+++ b/src/libstd/sync/once.rs
@@ -17,7 +17,7 @@ use int;
 use marker::Sync;
 use mem::drop;
 use ops::FnOnce;
-use sync::atomic::{AtomicInt, Ordering, ATOMIC_INT_INIT};
+use sync::atomic::{AtomicIsize, Ordering, ATOMIC_ISIZE_INIT};
 use sync::{StaticMutex, MUTEX_INIT};
 
 /// A synchronization primitive which can be used to run a one-time global
@@ -39,8 +39,8 @@ use sync::{StaticMutex, MUTEX_INIT};
 #[stable]
 pub struct Once {
     mutex: StaticMutex,
-    cnt: AtomicInt,
-    lock_cnt: AtomicInt,
+    cnt: AtomicIsize,
+    lock_cnt: AtomicIsize,
 }
 
 unsafe impl Sync for Once {}
@@ -49,8 +49,8 @@ unsafe impl Sync for Once {}
 #[stable]
 pub const ONCE_INIT: Once = Once {
     mutex: MUTEX_INIT,
-    cnt: ATOMIC_INT_INIT,
-    lock_cnt: ATOMIC_INT_INIT,
+    cnt: ATOMIC_ISIZE_INIT,
+    lock_cnt: ATOMIC_ISIZE_INIT,
 };
 
 impl Once {
diff --git a/src/libstd/sys/common/thread_local.rs b/src/libstd/sys/common/thread_local.rs
index e9af796c674..edd16e0c062 100644
--- a/src/libstd/sys/common/thread_local.rs
+++ b/src/libstd/sys/common/thread_local.rs
@@ -58,7 +58,7 @@
 
 use prelude::v1::*;
 
-use sync::atomic::{self, AtomicUint, Ordering};
+use sync::atomic::{self, AtomicUsize, Ordering};
 use sync::{Mutex, Once, ONCE_INIT};
 
 use sys::thread_local as imp;
@@ -97,7 +97,7 @@ pub struct StaticKey {
 
 /// Inner contents of `StaticKey`, created by the `INIT_INNER` constant.
 pub struct StaticKeyInner {
-    key: AtomicUint,
+    key: AtomicUsize,
 }
 
 /// A type for a safely managed OS-based TLS slot.
@@ -137,7 +137,7 @@ pub const INIT: StaticKey = StaticKey {
 ///
 /// This value allows specific configuration of the destructor for a TLS key.
 pub const INIT_INNER: StaticKeyInner = StaticKeyInner {
-    key: atomic::ATOMIC_UINT_INIT,
+    key: atomic::ATOMIC_USIZE_INIT,
 };
 
 static INIT_KEYS: Once = ONCE_INIT;
diff --git a/src/libstd/sys/unix/thread.rs b/src/libstd/sys/unix/thread.rs
index 2416b64f98f..ac51b68795f 100644
--- a/src/libstd/sys/unix/thread.rs
+++ b/src/libstd/sys/unix/thread.rs
@@ -93,7 +93,19 @@ pub mod guard {
 
         PAGE_SIZE = psize as uint;
 
-        let stackaddr = get_stack_start();
+        let mut stackaddr = get_stack_start();
+
+        // Ensure stackaddr is page aligned! A parent process might
+        // have reset RLIMIT_STACK to be non-page aligned. The
+        // pthread_attr_getstack() reports the usable stack area
+        // stackaddr < stackaddr + stacksize, so if stackaddr is not
+        // page-aligned, calculate the fix such that stackaddr <
+        // new_page_aligned_stackaddr < stackaddr + stacksize
+        let remainder = (stackaddr as usize) % (PAGE_SIZE as usize);
+        if remainder != 0 {
+            stackaddr = ((stackaddr as usize) + (PAGE_SIZE as usize) - remainder)
+                as *mut libc::c_void;
+        }
 
         // Rellocate the last page of the stack.
         // This ensures SIGBUS will be raised on
diff --git a/src/libstd/sys/unix/timer.rs b/src/libstd/sys/unix/timer.rs
index 62f3242a206..c0c231a9e73 100644
--- a/src/libstd/sys/unix/timer.rs
+++ b/src/libstd/sys/unix/timer.rs
@@ -211,7 +211,7 @@ impl Timer {
         // instead of ()
         HELPER.boot(|| {}, helper);
 
-        static ID: atomic::AtomicUint = atomic::ATOMIC_UINT_INIT;
+        static ID: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
         let id = ID.fetch_add(1, Ordering::Relaxed);
         Ok(Timer {
             id: id,
diff --git a/src/libstd/sys/windows/condvar.rs b/src/libstd/sys/windows/condvar.rs
index 291a8024cfc..db8038006fd 100644
--- a/src/libstd/sys/windows/condvar.rs
+++ b/src/libstd/sys/windows/condvar.rs
@@ -27,16 +27,18 @@ impl Condvar {
 
     #[inline]
     pub unsafe fn wait(&self, mutex: &Mutex) {
-        let r = ffi::SleepConditionVariableCS(self.inner.get(),
-                                              mutex::raw(mutex),
-                                              libc::INFINITE);
+        let r = ffi::SleepConditionVariableSRW(self.inner.get(),
+                                               mutex::raw(mutex),
+                                               libc::INFINITE,
+                                               0);
         debug_assert!(r != 0);
     }
 
     pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
-        let r = ffi::SleepConditionVariableCS(self.inner.get(),
-                                              mutex::raw(mutex),
-                                              dur.num_milliseconds() as DWORD);
+        let r = ffi::SleepConditionVariableSRW(self.inner.get(),
+                                               mutex::raw(mutex),
+                                               dur.num_milliseconds() as DWORD,
+                                               0);
         if r == 0 {
             const ERROR_TIMEOUT: DWORD = 0x5B4;
             debug_assert_eq!(os::errno() as uint, ERROR_TIMEOUT as uint);
diff --git a/src/libstd/sys/windows/mutex.rs b/src/libstd/sys/windows/mutex.rs
index 1def99a3741..828ad795ed3 100644
--- a/src/libstd/sys/windows/mutex.rs
+++ b/src/libstd/sys/windows/mutex.rs
@@ -8,73 +8,51 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use prelude::v1::*;
-
-use sync::atomic::{AtomicUint, ATOMIC_UINT_INIT, Ordering};
-use alloc::{self, heap};
-
-use libc::DWORD;
+use marker::Sync;
+use cell::UnsafeCell;
 use sys::sync as ffi;
 
-const SPIN_COUNT: DWORD = 4000;
+pub struct Mutex { inner: UnsafeCell<ffi::SRWLOCK> }
 
-pub struct Mutex { inner: AtomicUint }
-
-pub const MUTEX_INIT: Mutex = Mutex { inner: ATOMIC_UINT_INIT };
+pub const MUTEX_INIT: Mutex = Mutex {
+    inner: UnsafeCell { value: ffi::SRWLOCK_INIT }
+};
 
 unsafe impl Sync for Mutex {}
 
 #[inline]
-pub unsafe fn raw(m: &Mutex) -> ffi::LPCRITICAL_SECTION {
-    m.get()
+pub unsafe fn raw(m: &Mutex) -> ffi::PSRWLOCK {
+    m.inner.get()
 }
 
+// So you might be asking why we're using SRWLock instead of CriticalSection?
+//
+// 1. SRWLock is several times faster than CriticalSection according to benchmarks performed on both
+// Windows 8 and Windows 7.
+//
+// 2. CriticalSection allows recursive locking while SRWLock deadlocks. The Unix implementation
+// deadlocks so consistency is preferred. See #19962 for more details.
+//
+// 3. While CriticalSection is fair and SRWLock is not, the current Rust policy is there there are
+// no guarantees of fairness.
+
 impl Mutex {
     #[inline]
-    pub unsafe fn new() -> Mutex {
-        Mutex { inner: AtomicUint::new(init_lock() as uint) }
-    }
+    pub unsafe fn new() -> Mutex { MUTEX_INIT }
     #[inline]
     pub unsafe fn lock(&self) {
-        ffi::EnterCriticalSection(self.get())
+        ffi::AcquireSRWLockExclusive(self.inner.get())
     }
     #[inline]
     pub unsafe fn try_lock(&self) -> bool {
-        ffi::TryEnterCriticalSection(self.get()) != 0
+        ffi::TryAcquireSRWLockExclusive(self.inner.get()) != 0
     }
     #[inline]
     pub unsafe fn unlock(&self) {
-        ffi::LeaveCriticalSection(self.get())
+        ffi::ReleaseSRWLockExclusive(self.inner.get())
     }
+    #[inline]
     pub unsafe fn destroy(&self) {
-        let lock = self.inner.swap(0, Ordering::SeqCst);
-        if lock != 0 { free_lock(lock as ffi::LPCRITICAL_SECTION) }
-    }
-
-    unsafe fn get(&self) -> ffi::LPCRITICAL_SECTION {
-        match self.inner.load(Ordering::SeqCst) {
-            0 => {}
-            n => return n as ffi::LPCRITICAL_SECTION
-        }
-        let lock = init_lock();
-        match self.inner.compare_and_swap(0, lock as uint, Ordering::SeqCst) {
-            0 => return lock as ffi::LPCRITICAL_SECTION,
-            _ => {}
-        }
-        free_lock(lock);
-        return self.inner.load(Ordering::SeqCst) as ffi::LPCRITICAL_SECTION;
+        // ...
     }
 }
-
-unsafe fn init_lock() -> ffi::LPCRITICAL_SECTION {
-    let block = heap::allocate(ffi::CRITICAL_SECTION_SIZE, 8)
-                        as ffi::LPCRITICAL_SECTION;
-    if block.is_null() { alloc::oom() }
-    ffi::InitializeCriticalSectionAndSpinCount(block, SPIN_COUNT);
-    return block;
-}
-
-unsafe fn free_lock(h: ffi::LPCRITICAL_SECTION) {
-    ffi::DeleteCriticalSection(h);
-    heap::deallocate(h as *mut _, ffi::CRITICAL_SECTION_SIZE, 8);
-}
diff --git a/src/libstd/sys/windows/sync.rs b/src/libstd/sys/windows/sync.rs
index cbca47912b5..d60646b7db9 100644
--- a/src/libstd/sys/windows/sync.rs
+++ b/src/libstd/sys/windows/sync.rs
@@ -8,17 +8,12 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use libc::{BOOL, DWORD, c_void, LPVOID};
+use libc::{BOOL, DWORD, c_void, LPVOID, c_ulong};
 use libc::types::os::arch::extra::BOOLEAN;
 
-pub type LPCRITICAL_SECTION = *mut c_void;
-pub type LPCONDITION_VARIABLE = *mut CONDITION_VARIABLE;
-pub type LPSRWLOCK = *mut SRWLOCK;
-
-#[cfg(target_arch = "x86")]
-pub const CRITICAL_SECTION_SIZE: uint = 24;
-#[cfg(target_arch = "x86_64")]
-pub const CRITICAL_SECTION_SIZE: uint = 40;
+pub type PCONDITION_VARIABLE = *mut CONDITION_VARIABLE;
+pub type PSRWLOCK = *mut SRWLOCK;
+pub type ULONG = c_ulong;
 
 #[repr(C)]
 pub struct CONDITION_VARIABLE { pub ptr: LPVOID }
@@ -31,28 +26,19 @@ pub const CONDITION_VARIABLE_INIT: CONDITION_VARIABLE = CONDITION_VARIABLE {
 pub const SRWLOCK_INIT: SRWLOCK = SRWLOCK { ptr: 0 as *mut _ };
 
 extern "system" {
-    // critical sections
-    pub fn InitializeCriticalSectionAndSpinCount(
-                    lpCriticalSection: LPCRITICAL_SECTION,
-                    dwSpinCount: DWORD) -> BOOL;
-    pub fn DeleteCriticalSection(lpCriticalSection: LPCRITICAL_SECTION);
-    pub fn EnterCriticalSection(lpCriticalSection: LPCRITICAL_SECTION);
-    pub fn LeaveCriticalSection(lpCriticalSection: LPCRITICAL_SECTION);
-    pub fn TryEnterCriticalSection(lpCriticalSection: LPCRITICAL_SECTION) -> BOOL;
-
     // condition variables
-    pub fn SleepConditionVariableCS(ConditionVariable: LPCONDITION_VARIABLE,
-                                    CriticalSection: LPCRITICAL_SECTION,
-                                    dwMilliseconds: DWORD) -> BOOL;
-    pub fn WakeConditionVariable(ConditionVariable: LPCONDITION_VARIABLE);
-    pub fn WakeAllConditionVariable(ConditionVariable: LPCONDITION_VARIABLE);
+    pub fn SleepConditionVariableSRW(ConditionVariable: PCONDITION_VARIABLE,
+                                     SRWLock: PSRWLOCK,
+                                     dwMilliseconds: DWORD,
+                                     Flags: ULONG) -> BOOL;
+    pub fn WakeConditionVariable(ConditionVariable: PCONDITION_VARIABLE);
+    pub fn WakeAllConditionVariable(ConditionVariable: PCONDITION_VARIABLE);
 
     // slim rwlocks
-    pub fn AcquireSRWLockExclusive(SRWLock: LPSRWLOCK);
-    pub fn AcquireSRWLockShared(SRWLock: LPSRWLOCK);
-    pub fn ReleaseSRWLockExclusive(SRWLock: LPSRWLOCK);
-    pub fn ReleaseSRWLockShared(SRWLock: LPSRWLOCK);
-    pub fn TryAcquireSRWLockExclusive(SRWLock: LPSRWLOCK) -> BOOLEAN;
-    pub fn TryAcquireSRWLockShared(SRWLock: LPSRWLOCK) -> BOOLEAN;
+    pub fn AcquireSRWLockExclusive(SRWLock: PSRWLOCK);
+    pub fn AcquireSRWLockShared(SRWLock: PSRWLOCK);
+    pub fn ReleaseSRWLockExclusive(SRWLock: PSRWLOCK);
+    pub fn ReleaseSRWLockShared(SRWLock: PSRWLOCK);
+    pub fn TryAcquireSRWLockExclusive(SRWLock: PSRWLOCK) -> BOOLEAN;
+    pub fn TryAcquireSRWLockShared(SRWLock: PSRWLOCK) -> BOOLEAN;
 }
-