about summary refs log tree commit diff
path: root/src/libstd
diff options
context:
space:
mode:
Diffstat (limited to 'src/libstd')
-rw-r--r--src/libstd/fs/mod.rs76
-rw-r--r--src/libstd/io/buffered.rs18
-rw-r--r--src/libstd/io/lazy.rs24
-rw-r--r--src/libstd/io/mod.rs44
-rw-r--r--src/libstd/lib.rs2
-rw-r--r--src/libstd/old_io/buffered.rs18
-rw-r--r--src/libstd/old_io/stdio.rs6
-rw-r--r--src/libstd/rt/at_exit_imp.rs59
-rw-r--r--src/libstd/rt/mod.rs24
-rw-r--r--src/libstd/sync/mpsc/mod.rs16
-rw-r--r--src/libstd/sync/mpsc/mpsc_queue.rs2
-rw-r--r--src/libstd/sync/mpsc/oneshot.rs8
-rw-r--r--src/libstd/sync/mpsc/select.rs2
-rw-r--r--src/libstd/sync/mpsc/shared.rs2
-rw-r--r--src/libstd/sync/mpsc/spsc_queue.rs2
-rw-r--r--src/libstd/sync/mpsc/stream.rs8
-rw-r--r--src/libstd/sync/mpsc/sync.rs2
-rw-r--r--src/libstd/sync/mutex.rs4
-rw-r--r--src/libstd/sys/common/helper_thread.rs4
-rw-r--r--src/libstd/sys/windows/net.rs2
-rw-r--r--src/libstd/sys/windows/os.rs2
-rw-r--r--src/libstd/sys/windows/thread_local.rs8
-rw-r--r--src/libstd/thread/local.rs1
-rw-r--r--src/libstd/thread/mod.rs2
24 files changed, 211 insertions, 125 deletions
diff --git a/src/libstd/fs/mod.rs b/src/libstd/fs/mod.rs
index 2546aace265..c1253706832 100644
--- a/src/libstd/fs/mod.rs
+++ b/src/libstd/fs/mod.rs
@@ -128,6 +128,17 @@ impl File {
     ///
     /// This function will return an error if `path` does not already exist.
     /// Other errors may also be returned according to `OpenOptions::open`.
+    ///
+    /// # Examples
+    ///
+    /// ```no_run
+    /// use std::fs::File;
+    ///
+    /// # fn foo() -> std::io::Result<()> {
+    /// let mut f = try!(File::open("foo.txt"));
+    /// # Ok(())
+    /// # }
+    /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn open<P: AsRef<Path>>(path: P) -> io::Result<File> {
         OpenOptions::new().read(true).open(path)
@@ -139,6 +150,17 @@ impl File {
     /// and will truncate it if it does.
     ///
     /// See the `OpenOptions::open` function for more details.
+    ///
+    /// # Examples
+    ///
+    /// ```no_run
+    /// use std::fs::File;
+    ///
+    /// # fn foo() -> std::io::Result<()> {
+    /// let mut f = try!(File::create("foo.txt"));
+    /// # Ok(())
+    /// # }
+    /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn create<P: AsRef<Path>>(path: P) -> io::Result<File> {
         OpenOptions::new().write(true).create(true).truncate(true).open(path)
@@ -156,6 +178,21 @@ impl File {
     ///
     /// This function will attempt to ensure that all in-core data reaches the
     /// filesystem before returning.
+    ///
+    /// # Examples
+    ///
+    /// ```no_run
+    /// use std::fs::File;
+    /// use std::io::prelude::*;
+    ///
+    /// # fn foo() -> std::io::Result<()> {
+    /// let mut f = try!(File::create("foo.txt"));
+    /// try!(f.write_all(b"Hello, world!"));
+    ///
+    /// try!(f.sync_all());
+    /// # Ok(())
+    /// # }
+    /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn sync_all(&self) -> io::Result<()> {
         self.inner.fsync()
@@ -170,6 +207,21 @@ impl File {
     ///
     /// Note that some platforms may simply implement this in terms of
     /// `sync_all`.
+    ///
+    /// # Examples
+    ///
+    /// ```no_run
+    /// use std::fs::File;
+    /// use std::io::prelude::*;
+    ///
+    /// # fn foo() -> std::io::Result<()> {
+    /// let mut f = try!(File::create("foo.txt"));
+    /// try!(f.write_all(b"Hello, world!"));
+    ///
+    /// try!(f.sync_data());
+    /// # Ok(())
+    /// # }
+    /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn sync_data(&self) -> io::Result<()> {
         self.inner.datasync()
@@ -182,12 +234,36 @@ impl File {
     /// be shrunk. If it is greater than the current file's size, then the file
     /// will be extended to `size` and have all of the intermediate data filled
     /// in with 0s.
+    ///
+    /// # Examples
+    ///
+    /// ```no_run
+    /// use std::fs::File;
+    ///
+    /// # fn foo() -> std::io::Result<()> {
+    /// let mut f = try!(File::open("foo.txt"));
+    /// try!(f.set_len(0));
+    /// # Ok(())
+    /// # }
+    /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn set_len(&self, size: u64) -> io::Result<()> {
         self.inner.truncate(size)
     }
 
     /// Queries metadata about the underlying file.
+    ///
+    /// # Examples
+    ///
+    /// ```no_run
+    /// use std::fs::File;
+    ///
+    /// # fn foo() -> std::io::Result<()> {
+    /// let mut f = try!(File::open("foo.txt"));
+    /// let metadata = try!(f.metadata());
+    /// # Ok(())
+    /// # }
+    /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn metadata(&self) -> io::Result<Metadata> {
         self.inner.file_attr().map(Metadata)
diff --git a/src/libstd/io/buffered.rs b/src/libstd/io/buffered.rs
index 4def601f1c0..2a1294f23b2 100644
--- a/src/libstd/io/buffered.rs
+++ b/src/libstd/io/buffered.rs
@@ -120,7 +120,7 @@ impl<R> fmt::Debug for BufReader<R> where R: fmt::Debug {
 ///
 /// The buffer will be written out when the writer is dropped.
 #[stable(feature = "rust1", since = "1.0.0")]
-pub struct BufWriter<W> {
+pub struct BufWriter<W: Write> {
     inner: Option<W>,
     buf: Vec<u8>,
 }
@@ -220,7 +220,7 @@ impl<W: Write> Write for BufWriter<W> {
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
-impl<W> fmt::Debug for BufWriter<W> where W: fmt::Debug {
+impl<W: Write> fmt::Debug for BufWriter<W> where W: fmt::Debug {
     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
         write!(fmt, "BufWriter {{ writer: {:?}, buffer: {}/{} }}",
                self.inner.as_ref().unwrap(), self.buf.len(), self.buf.capacity())
@@ -276,7 +276,7 @@ impl<W> fmt::Display for IntoInnerError<W> {
 ///
 /// The buffer will be written out when the writer is dropped.
 #[stable(feature = "rust1", since = "1.0.0")]
-pub struct LineWriter<W> {
+pub struct LineWriter<W: Write> {
     inner: BufWriter<W>,
 }
 
@@ -335,7 +335,7 @@ impl<W: Write> Write for LineWriter<W> {
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
-impl<W> fmt::Debug for LineWriter<W> where W: fmt::Debug {
+impl<W: Write> fmt::Debug for LineWriter<W> where W: fmt::Debug {
     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
         write!(fmt, "LineWriter {{ writer: {:?}, buffer: {}/{} }}",
                self.inner.inner, self.inner.buf.len(),
@@ -343,16 +343,16 @@ impl<W> fmt::Debug for LineWriter<W> where W: fmt::Debug {
     }
 }
 
-struct InternalBufWriter<W>(BufWriter<W>);
+struct InternalBufWriter<W: Write>(BufWriter<W>);
 
-impl<W> InternalBufWriter<W> {
+impl<W: Read + Write> InternalBufWriter<W> {
     fn get_mut(&mut self) -> &mut BufWriter<W> {
         let InternalBufWriter(ref mut w) = *self;
         return w;
     }
 }
 
-impl<W: Read> Read for InternalBufWriter<W> {
+impl<W: Read + Write> Read for InternalBufWriter<W> {
     fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
         self.get_mut().inner.as_mut().unwrap().read(buf)
     }
@@ -367,7 +367,7 @@ impl<W: Read> Read for InternalBufWriter<W> {
 ///
 /// The output buffer will be written out when this stream is dropped.
 #[stable(feature = "rust1", since = "1.0.0")]
-pub struct BufStream<S> {
+pub struct BufStream<S: Write> {
     inner: BufReader<InternalBufWriter<S>>
 }
 
@@ -448,7 +448,7 @@ impl<S: Read + Write> Write for BufStream<S> {
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
-impl<S> fmt::Debug for BufStream<S> where S: fmt::Debug {
+impl<S: Write> fmt::Debug for BufStream<S> where S: fmt::Debug {
     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
         let reader = &self.inner;
         let writer = &self.inner.inner.0;
diff --git a/src/libstd/io/lazy.rs b/src/libstd/io/lazy.rs
index c9b105f72a5..df280dab37d 100644
--- a/src/libstd/io/lazy.rs
+++ b/src/libstd/io/lazy.rs
@@ -35,25 +35,33 @@ impl<T: Send + Sync + 'static> Lazy<T> {
     pub fn get(&'static self) -> Option<Arc<T>> {
         let _g = self.lock.lock();
         unsafe {
-            let mut ptr = *self.ptr.get();
+            let ptr = *self.ptr.get();
             if ptr.is_null() {
-                ptr = boxed::into_raw(self.init());
-                *self.ptr.get() = ptr;
+                Some(self.init())
             } else if ptr as usize == 1 {
-                return None
+                None
+            } else {
+                Some((*ptr).clone())
             }
-            Some((*ptr).clone())
         }
     }
 
-    fn init(&'static self) -> Box<Arc<T>> {
-        rt::at_exit(move || unsafe {
+    unsafe fn init(&'static self) -> Arc<T> {
+        // If we successfully register an at exit handler, then we cache the
+        // `Arc` allocation in our own internal box (it will get deallocated by
+        // the at exit handler). Otherwise we just return the freshly allocated
+        // `Arc`.
+        let registered = rt::at_exit(move || {
             let g = self.lock.lock();
             let ptr = *self.ptr.get();
             *self.ptr.get() = 1 as *mut _;
             drop(g);
             drop(Box::from_raw(ptr))
         });
-        Box::new((self.init)())
+        let ret = (self.init)();
+        if registered.is_ok() {
+            *self.ptr.get() = boxed::into_raw(Box::new(ret.clone()));
+        }
+        return ret
     }
 }
diff --git a/src/libstd/io/mod.rs b/src/libstd/io/mod.rs
index 39c718c96b3..c6ae4d0dbec 100644
--- a/src/libstd/io/mod.rs
+++ b/src/libstd/io/mod.rs
@@ -16,13 +16,12 @@ use cmp;
 use unicode::str as core_str;
 use error as std_error;
 use fmt;
-use iter::Iterator;
+use iter::{self, Iterator, IteratorExt, Extend};
 use marker::Sized;
 use ops::{Drop, FnOnce};
 use option::Option::{self, Some, None};
 use result::Result::{Ok, Err};
 use result;
-use slice;
 use string::String;
 use str;
 use vec::Vec;
@@ -50,41 +49,26 @@ mod stdio;
 const DEFAULT_BUF_SIZE: usize = 64 * 1024;
 
 // Acquires a slice of the vector `v` from its length to its capacity
-// (uninitialized data), reads into it, and then updates the length.
+// (after initializing the data), reads into it, and then updates the length.
 //
 // This function is leveraged to efficiently read some bytes into a destination
 // vector without extra copying and taking advantage of the space that's already
 // in `v`.
-//
-// The buffer we're passing down, however, is pointing at uninitialized data
-// (the end of a `Vec`), and many operations will be *much* faster if we don't
-// have to zero it out. In order to prevent LLVM from generating an `undef`
-// value when reads happen from this uninitialized memory, we force LLVM to
-// think it's initialized by sending it through a black box. This should prevent
-// actual undefined behavior after optimizations.
 fn with_end_to_cap<F>(v: &mut Vec<u8>, f: F) -> Result<usize>
     where F: FnOnce(&mut [u8]) -> Result<usize>
 {
-    unsafe {
-        let n = try!(f({
-            let base = v.as_mut_ptr().offset(v.len() as isize);
-            black_box(slice::from_raw_parts_mut(base,
-                                                v.capacity() - v.len()))
-        }));
-
-        // If the closure (typically a `read` implementation) reported that it
-        // read a larger number of bytes than the vector actually has, we need
-        // to be sure to clamp the vector to at most its capacity.
-        let new_len = cmp::min(v.capacity(), v.len() + n);
-        v.set_len(new_len);
-        return Ok(n);
-    }
-
-    // Semi-hack used to prevent LLVM from retaining any assumptions about
-    // `dummy` over this function call
-    unsafe fn black_box<T>(mut dummy: T) -> T {
-        asm!("" :: "r"(&mut dummy) : "memory");
-        dummy
+    let len = v.len();
+    let new_area = v.capacity() - len;
+    v.extend(iter::repeat(0).take(new_area));
+    match f(&mut v[len..]) {
+        Ok(n) => {
+            v.truncate(len + n);
+            Ok(n)
+        }
+        Err(e) => {
+            v.truncate(len);
+            Err(e)
+        }
     }
 }
 
diff --git a/src/libstd/lib.rs b/src/libstd/lib.rs
index 90eca6168f2..cca6bb747d4 100644
--- a/src/libstd/lib.rs
+++ b/src/libstd/lib.rs
@@ -135,6 +135,8 @@
 #![feature(no_std)]
 #![no_std]
 
+#![allow(trivial_casts)]
+#![allow(trivial_numeric_casts)]
 #![deny(missing_docs)]
 
 #[cfg(test)] extern crate test;
diff --git a/src/libstd/old_io/buffered.rs b/src/libstd/old_io/buffered.rs
index cb67d709a14..9a9d421dfe1 100644
--- a/src/libstd/old_io/buffered.rs
+++ b/src/libstd/old_io/buffered.rs
@@ -148,14 +148,14 @@ impl<R: Reader> Reader for BufferedReader<R> {
 /// writer.write_str("hello, world").unwrap();
 /// writer.flush().unwrap();
 /// ```
-pub struct BufferedWriter<W> {
+pub struct BufferedWriter<W: Writer> {
     inner: Option<W>,
     buf: Vec<u8>,
     pos: uint
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
-impl<W> fmt::Debug for BufferedWriter<W> where W: fmt::Debug {
+impl<W: Writer> fmt::Debug for BufferedWriter<W> where W: fmt::Debug {
     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
         write!(fmt, "BufferedWriter {{ writer: {:?}, buffer: {}/{} }}",
                self.inner.as_ref().unwrap(), self.pos, self.buf.len())
@@ -250,12 +250,12 @@ impl<W: Writer> Drop for BufferedWriter<W> {
 /// `'\n'`) is detected.
 ///
 /// This writer will be flushed when it is dropped.
-pub struct LineBufferedWriter<W> {
+pub struct LineBufferedWriter<W: Writer> {
     inner: BufferedWriter<W>,
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
-impl<W> fmt::Debug for LineBufferedWriter<W> where W: fmt::Debug {
+impl<W: Writer> fmt::Debug for LineBufferedWriter<W> where W: fmt::Debug {
     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
         write!(fmt, "LineBufferedWriter {{ writer: {:?}, buffer: {}/{} }}",
                self.inner.inner, self.inner.pos, self.inner.buf.len())
@@ -299,16 +299,16 @@ impl<W: Writer> Writer for LineBufferedWriter<W> {
     fn flush(&mut self) -> IoResult<()> { self.inner.flush() }
 }
 
-struct InternalBufferedWriter<W>(BufferedWriter<W>);
+struct InternalBufferedWriter<W: Writer>(BufferedWriter<W>);
 
-impl<W> InternalBufferedWriter<W> {
+impl<W: Writer> InternalBufferedWriter<W> {
     fn get_mut<'a>(&'a mut self) -> &'a mut BufferedWriter<W> {
         let InternalBufferedWriter(ref mut w) = *self;
         return w;
     }
 }
 
-impl<W: Reader> Reader for InternalBufferedWriter<W> {
+impl<W: Reader + Writer> Reader for InternalBufferedWriter<W> {
     fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> {
         self.get_mut().inner.as_mut().unwrap().read(buf)
     }
@@ -343,12 +343,12 @@ impl<W: Reader> Reader for InternalBufferedWriter<W> {
 ///     Err(e) => println!("error reading: {}", e)
 /// }
 /// ```
-pub struct BufferedStream<S> {
+pub struct BufferedStream<S: Writer> {
     inner: BufferedReader<InternalBufferedWriter<S>>
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
-impl<S> fmt::Debug for BufferedStream<S> where S: fmt::Debug {
+impl<S: Writer> fmt::Debug for BufferedStream<S> where S: fmt::Debug {
     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
         let reader = &self.inner;
         let writer = &self.inner.inner.0;
diff --git a/src/libstd/old_io/stdio.rs b/src/libstd/old_io/stdio.rs
index ef811f832b3..90d27084911 100644
--- a/src/libstd/old_io/stdio.rs
+++ b/src/libstd/old_io/stdio.rs
@@ -240,7 +240,7 @@ pub fn stdin() -> StdinReader {
             STDIN = boxed::into_raw(box stdin);
 
             // Make sure to free it at exit
-            rt::at_exit(|| {
+            let _ = rt::at_exit(|| {
                 Box::from_raw(STDIN);
                 STDIN = ptr::null_mut();
             });
@@ -337,10 +337,10 @@ pub fn set_stderr(_stderr: Box<Writer + Send>) -> Option<Box<Writer + Send>> {
 //      })
 //  })
 fn with_task_stdout<F>(f: F) where F: FnOnce(&mut Writer) -> IoResult<()> {
-    let mut my_stdout = LOCAL_STDOUT.with(|slot| {
+    let mut my_stdout: Box<Writer + Send> = LOCAL_STDOUT.with(|slot| {
         slot.borrow_mut().take()
     }).unwrap_or_else(|| {
-        box stdout() as Box<Writer + Send>
+        box stdout()
     });
     let result = f(&mut *my_stdout);
     let mut var = Some(my_stdout);
diff --git a/src/libstd/rt/at_exit_imp.rs b/src/libstd/rt/at_exit_imp.rs
index 9fa12b1ef30..9079c0aaffb 100644
--- a/src/libstd/rt/at_exit_imp.rs
+++ b/src/libstd/rt/at_exit_imp.rs
@@ -12,6 +12,10 @@
 //!
 //! Documentation can be found on the `rt::at_exit` function.
 
+// FIXME: switch this to use atexit. Currently this
+// segfaults (the queue's memory is mysteriously gone), so
+// instead the cleanup is tied to the `std::rt` entry point.
+
 use boxed;
 use boxed::Box;
 use vec::Vec;
@@ -27,47 +31,56 @@ type Queue = Vec<Thunk<'static>>;
 static LOCK: Mutex = MUTEX_INIT;
 static mut QUEUE: *mut Queue = 0 as *mut Queue;
 
-unsafe fn init() {
+// The maximum number of times the cleanup routines will be run. While running
+// the at_exit closures new ones may be registered, and this count is the number
+// of times the new closures will be allowed to register successfully. After
+// this number of iterations all new registrations will return `false`.
+const ITERS: usize = 10;
+
+unsafe fn init() -> bool {
     if QUEUE.is_null() {
         let state: Box<Queue> = box Vec::new();
         QUEUE = boxed::into_raw(state);
-    } else {
+    } else if QUEUE as usize == 1 {
         // can't re-init after a cleanup
-        rtassert!(QUEUE as uint != 1);
+        return false
     }
 
-    // FIXME: switch this to use atexit as below. Currently this
-    // segfaults (the queue's memory is mysteriously gone), so
-    // instead the cleanup is tied to the `std::rt` entry point.
-    //
-    // ::libc::atexit(cleanup);
+    return true
 }
 
 pub fn cleanup() {
-    unsafe {
-        LOCK.lock();
-        let queue = QUEUE;
-        QUEUE = 1 as *mut _;
-        LOCK.unlock();
+    for i in 0..ITERS {
+        unsafe {
+            LOCK.lock();
+            let queue = QUEUE;
+            QUEUE = if i == ITERS - 1 {1} else {0} as *mut _;
+            LOCK.unlock();
 
-        // make sure we're not recursively cleaning up
-        rtassert!(queue as uint != 1);
+            // make sure we're not recursively cleaning up
+            rtassert!(queue as usize != 1);
 
-        // If we never called init, not need to cleanup!
-        if queue as uint != 0 {
-            let queue: Box<Queue> = Box::from_raw(queue);
-            for to_run in *queue {
-                to_run.invoke(());
+            // If we never called init, not need to cleanup!
+            if queue as usize != 0 {
+                let queue: Box<Queue> = Box::from_raw(queue);
+                for to_run in *queue {
+                    to_run.invoke(());
+                }
             }
         }
     }
 }
 
-pub fn push(f: Thunk<'static>) {
+pub fn push(f: Thunk<'static>) -> bool {
+    let mut ret = true;
     unsafe {
         LOCK.lock();
-        init();
-        (*QUEUE).push(f);
+        if init() {
+            (*QUEUE).push(f);
+        } else {
+            ret = false;
+        }
         LOCK.unlock();
     }
+    return ret
 }
diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs
index e52e68dad23..497076cc6ac 100644
--- a/src/libstd/rt/mod.rs
+++ b/src/libstd/rt/mod.rs
@@ -17,14 +17,9 @@
 //! time being.
 
 #![unstable(feature = "std_misc")]
-
-// FIXME: this should not be here.
 #![allow(missing_docs)]
 
-#![allow(dead_code)]
-
-use marker::Send;
-use ops::FnOnce;
+use prelude::v1::*;
 use sys;
 use thunk::Thunk;
 use usize;
@@ -73,7 +68,7 @@ fn lang_start(main: *const u8, argc: int, argv: *const *const u8) -> int {
     use thread::Thread;
 
     let something_around_the_top_of_the_stack = 1;
-    let addr = &something_around_the_top_of_the_stack as *const int;
+    let addr = &something_around_the_top_of_the_stack as *const _ as *const int;
     let my_stack_top = addr as uint;
 
     // FIXME #11359 we just assume that this thread has a stack of a
@@ -149,13 +144,16 @@ fn lang_start(main: *const u8, argc: int, argv: *const *const u8) -> int {
 
 /// Enqueues a procedure to run when the main thread exits.
 ///
-/// It is forbidden for procedures to register more `at_exit` handlers when they
-/// are running, and doing so will lead to a process abort.
+/// Currently these closures are only run once the main *Rust* thread exits.
+/// Once the `at_exit` handlers begin running, more may be enqueued, but not
+/// infinitely so. Eventually a handler registration will be forced to fail.
 ///
-/// Note that other threads may still be running when `at_exit` routines start
-/// running.
-pub fn at_exit<F: FnOnce() + Send + 'static>(f: F) {
-    at_exit_imp::push(Thunk::new(f));
+/// Returns `Ok` if the handler was successfully registered, meaning that the
+/// closure will be run once the main thread exits. Returns `Err` to indicate
+/// that the closure could not be registered, meaning that it is not scheduled
+/// to be rune.
+pub fn at_exit<F: FnOnce() + Send + 'static>(f: F) -> Result<(), ()> {
+    if at_exit_imp::push(Thunk::new(f)) {Ok(())} else {Err(())}
 }
 
 /// One-time runtime cleanup.
diff --git a/src/libstd/sync/mpsc/mod.rs b/src/libstd/sync/mpsc/mod.rs
index 7adfd9154ac..eb421fe55a4 100644
--- a/src/libstd/sync/mpsc/mod.rs
+++ b/src/libstd/sync/mpsc/mod.rs
@@ -342,7 +342,7 @@ mod spsc_queue;
 /// The receiving-half of Rust's channel type. This half can only be owned by
 /// one task
 #[stable(feature = "rust1", since = "1.0.0")]
-pub struct Receiver<T> {
+pub struct Receiver<T:Send> {
     inner: UnsafeCell<Flavor<T>>,
 }
 
@@ -354,14 +354,14 @@ unsafe impl<T: Send> Send for Receiver<T> { }
 /// whenever `next` is called, waiting for a new message, and `None` will be
 /// returned when the corresponding channel has hung up.
 #[stable(feature = "rust1", since = "1.0.0")]
-pub struct Iter<'a, T:'a> {
+pub struct Iter<'a, T:Send+'a> {
     rx: &'a Receiver<T>
 }
 
 /// The sending-half of Rust's asynchronous channel type. This half can only be
 /// owned by one task, but it can be cloned to send to other tasks.
 #[stable(feature = "rust1", since = "1.0.0")]
-pub struct Sender<T> {
+pub struct Sender<T:Send> {
     inner: UnsafeCell<Flavor<T>>,
 }
 
@@ -372,7 +372,7 @@ unsafe impl<T: Send> Send for Sender<T> { }
 /// The sending-half of Rust's synchronous channel type. This half can only be
 /// owned by one task, but it can be cloned to send to other tasks.
 #[stable(feature = "rust1", since = "1.0.0")]
-pub struct SyncSender<T> {
+pub struct SyncSender<T: Send> {
     inner: Arc<UnsafeCell<sync::Packet<T>>>,
 }
 
@@ -433,7 +433,7 @@ pub enum TrySendError<T> {
     Disconnected(T),
 }
 
-enum Flavor<T> {
+enum Flavor<T:Send> {
     Oneshot(Arc<UnsafeCell<oneshot::Packet<T>>>),
     Stream(Arc<UnsafeCell<stream::Packet<T>>>),
     Shared(Arc<UnsafeCell<shared::Packet<T>>>),
@@ -441,7 +441,7 @@ enum Flavor<T> {
 }
 
 #[doc(hidden)]
-trait UnsafeFlavor<T> {
+trait UnsafeFlavor<T:Send> {
     fn inner_unsafe<'a>(&'a self) -> &'a UnsafeCell<Flavor<T>>;
     unsafe fn inner_mut<'a>(&'a self) -> &'a mut Flavor<T> {
         &mut *self.inner_unsafe().get()
@@ -450,12 +450,12 @@ trait UnsafeFlavor<T> {
         &*self.inner_unsafe().get()
     }
 }
-impl<T> UnsafeFlavor<T> for Sender<T> {
+impl<T:Send> UnsafeFlavor<T> for Sender<T> {
     fn inner_unsafe<'a>(&'a self) -> &'a UnsafeCell<Flavor<T>> {
         &self.inner
     }
 }
-impl<T> UnsafeFlavor<T> for Receiver<T> {
+impl<T:Send> UnsafeFlavor<T> for Receiver<T> {
     fn inner_unsafe<'a>(&'a self) -> &'a UnsafeCell<Flavor<T>> {
         &self.inner
     }
diff --git a/src/libstd/sync/mpsc/mpsc_queue.rs b/src/libstd/sync/mpsc/mpsc_queue.rs
index 14ed253d8e2..1be8b0dd862 100644
--- a/src/libstd/sync/mpsc/mpsc_queue.rs
+++ b/src/libstd/sync/mpsc/mpsc_queue.rs
@@ -72,7 +72,7 @@ struct Node<T> {
 /// The multi-producer single-consumer structure. This is not cloneable, but it
 /// may be safely shared so long as it is guaranteed that there is only one
 /// popper at a time (many pushers are allowed).
-pub struct Queue<T> {
+pub struct Queue<T: Send> {
     head: AtomicPtr<Node<T>>,
     tail: UnsafeCell<*mut Node<T>>,
 }
diff --git a/src/libstd/sync/mpsc/oneshot.rs b/src/libstd/sync/mpsc/oneshot.rs
index f287712d9d4..13578ce0517 100644
--- a/src/libstd/sync/mpsc/oneshot.rs
+++ b/src/libstd/sync/mpsc/oneshot.rs
@@ -54,7 +54,7 @@ const DISCONNECTED: usize = 2;   // channel is disconnected OR upgraded
 // moves *from* a pointer, ownership of the token is transferred to
 // whoever changed the state.
 
-pub struct Packet<T> {
+pub struct Packet<T:Send> {
     // Internal state of the chan/port pair (stores the blocked task as well)
     state: AtomicUsize,
     // One-shot data slot location
@@ -64,7 +64,7 @@ pub struct Packet<T> {
     upgrade: MyUpgrade<T>,
 }
 
-pub enum Failure<T> {
+pub enum Failure<T:Send> {
     Empty,
     Disconnected,
     Upgraded(Receiver<T>),
@@ -76,13 +76,13 @@ pub enum UpgradeResult {
     UpWoke(SignalToken),
 }
 
-pub enum SelectionResult<T> {
+pub enum SelectionResult<T:Send> {
     SelCanceled,
     SelUpgraded(SignalToken, Receiver<T>),
     SelSuccess,
 }
 
-enum MyUpgrade<T> {
+enum MyUpgrade<T:Send> {
     NothingSent,
     SendUsed,
     GoUp(Receiver<T>),
diff --git a/src/libstd/sync/mpsc/select.rs b/src/libstd/sync/mpsc/select.rs
index 0f936641cdc..b509b3472ee 100644
--- a/src/libstd/sync/mpsc/select.rs
+++ b/src/libstd/sync/mpsc/select.rs
@@ -80,7 +80,7 @@ impl !marker::Send for Select {}
 /// A handle to a receiver which is currently a member of a `Select` set of
 /// receivers.  This handle is used to keep the receiver in the set as well as
 /// interact with the underlying receiver.
-pub struct Handle<'rx, T:'rx> {
+pub struct Handle<'rx, T:Send+'rx> {
     /// The ID of this handle, used to compare against the return value of
     /// `Select::wait()`
     id: usize,
diff --git a/src/libstd/sync/mpsc/shared.rs b/src/libstd/sync/mpsc/shared.rs
index 8d14824d37f..f3930a8a5d6 100644
--- a/src/libstd/sync/mpsc/shared.rs
+++ b/src/libstd/sync/mpsc/shared.rs
@@ -40,7 +40,7 @@ const MAX_STEALS: isize = 5;
 #[cfg(not(test))]
 const MAX_STEALS: isize = 1 << 20;
 
-pub struct Packet<T> {
+pub struct Packet<T: Send> {
     queue: mpsc::Queue<T>,
     cnt: AtomicIsize, // How many items are on this channel
     steals: isize, // How many times has a port received without blocking?
diff --git a/src/libstd/sync/mpsc/spsc_queue.rs b/src/libstd/sync/mpsc/spsc_queue.rs
index 3fb13739aa7..cd6d1ee05c7 100644
--- a/src/libstd/sync/mpsc/spsc_queue.rs
+++ b/src/libstd/sync/mpsc/spsc_queue.rs
@@ -57,7 +57,7 @@ struct Node<T> {
 /// but it can be safely shared in an Arc if it is guaranteed that there
 /// is only one popper and one pusher touching the queue at any one point in
 /// time.
-pub struct Queue<T> {
+pub struct Queue<T: Send> {
     // consumer fields
     tail: UnsafeCell<*mut Node<T>>, // where to pop from
     tail_prev: AtomicPtr<Node<T>>, // where to pop from
diff --git a/src/libstd/sync/mpsc/stream.rs b/src/libstd/sync/mpsc/stream.rs
index 5a1e05f9c15..a5a73314a6d 100644
--- a/src/libstd/sync/mpsc/stream.rs
+++ b/src/libstd/sync/mpsc/stream.rs
@@ -39,7 +39,7 @@ const MAX_STEALS: isize = 5;
 #[cfg(not(test))]
 const MAX_STEALS: isize = 1 << 20;
 
-pub struct Packet<T> {
+pub struct Packet<T:Send> {
     queue: spsc::Queue<Message<T>>, // internal queue for all message
 
     cnt: AtomicIsize, // How many items are on this channel
@@ -49,7 +49,7 @@ pub struct Packet<T> {
     port_dropped: AtomicBool, // flag if the channel has been destroyed.
 }
 
-pub enum Failure<T> {
+pub enum Failure<T:Send> {
     Empty,
     Disconnected,
     Upgraded(Receiver<T>),
@@ -61,7 +61,7 @@ pub enum UpgradeResult {
     UpWoke(SignalToken),
 }
 
-pub enum SelectionResult<T> {
+pub enum SelectionResult<T:Send> {
     SelSuccess,
     SelCanceled,
     SelUpgraded(SignalToken, Receiver<T>),
@@ -69,7 +69,7 @@ pub enum SelectionResult<T> {
 
 // Any message could contain an "upgrade request" to a new shared port, so the
 // internal queue it's a queue of T, but rather Message<T>
-enum Message<T> {
+enum Message<T:Send> {
     Data(T),
     GoUp(Receiver<T>),
 }
diff --git a/src/libstd/sync/mpsc/sync.rs b/src/libstd/sync/mpsc/sync.rs
index 33c1614e1b2..71236269487 100644
--- a/src/libstd/sync/mpsc/sync.rs
+++ b/src/libstd/sync/mpsc/sync.rs
@@ -47,7 +47,7 @@ use sync::mpsc::blocking::{self, WaitToken, SignalToken};
 use sync::mpsc::select::StartResult::{self, Installed, Abort};
 use sync::{Mutex, MutexGuard};
 
-pub struct Packet<T> {
+pub struct Packet<T: Send> {
     /// Only field outside of the mutex. Just done for kicks, but mainly because
     /// the other shared channel already had the code implemented
     channels: AtomicUsize,
diff --git a/src/libstd/sync/mutex.rs b/src/libstd/sync/mutex.rs
index 2bf75cf1d37..b24cfbb6899 100644
--- a/src/libstd/sync/mutex.rs
+++ b/src/libstd/sync/mutex.rs
@@ -112,7 +112,7 @@ use fmt;
 /// *guard += 1;
 /// ```
 #[stable(feature = "rust1", since = "1.0.0")]
-pub struct Mutex<T> {
+pub struct Mutex<T: Send> {
     // Note that this static mutex is in a *box*, not inlined into the struct
     // itself. Once a native mutex has been used once, its address can never
     // change (it can't be moved). This mutex type can be safely moved at any
@@ -366,7 +366,7 @@ mod test {
     use sync::{Arc, Mutex, StaticMutex, MUTEX_INIT, Condvar};
     use thread;
 
-    struct Packet<T>(Arc<(Mutex<T>, Condvar)>);
+    struct Packet<T: Send>(Arc<(Mutex<T>, Condvar)>);
 
     unsafe impl<T: Send> Send for Packet<T> {}
     unsafe impl<T> Sync for Packet<T> {}
diff --git a/src/libstd/sys/common/helper_thread.rs b/src/libstd/sys/common/helper_thread.rs
index 2a852fbcd57..10077dfd1b8 100644
--- a/src/libstd/sys/common/helper_thread.rs
+++ b/src/libstd/sys/common/helper_thread.rs
@@ -38,7 +38,7 @@ use thread;
 ///
 /// The fields of this helper are all public, but they should not be used, this
 /// is for static initialization.
-pub struct Helper<M> {
+pub struct Helper<M:Send> {
     /// Internal lock which protects the remaining fields
     pub lock: StaticMutex,
     pub cond: StaticCondvar,
@@ -112,7 +112,7 @@ impl<M: Send> Helper<M> {
                     self.cond.notify_one()
                 });
 
-                rt::at_exit(move || { self.shutdown() });
+                let _ = rt::at_exit(move || { self.shutdown() });
                 *self.initialized.get() = true;
             } else if *self.chan.get() as uint == 1 {
                 panic!("cannot continue usage after shutdown");
diff --git a/src/libstd/sys/windows/net.rs b/src/libstd/sys/windows/net.rs
index e092faf4935..734268c70ac 100644
--- a/src/libstd/sys/windows/net.rs
+++ b/src/libstd/sys/windows/net.rs
@@ -36,7 +36,7 @@ pub fn init() {
                                 &mut data);
         assert_eq!(ret, 0);
 
-        rt::at_exit(|| { c::WSACleanup(); })
+        let _ = rt::at_exit(|| { c::WSACleanup(); });
     });
 }
 
diff --git a/src/libstd/sys/windows/os.rs b/src/libstd/sys/windows/os.rs
index 83d06371734..167db1e8ac2 100644
--- a/src/libstd/sys/windows/os.rs
+++ b/src/libstd/sys/windows/os.rs
@@ -109,7 +109,7 @@ impl Iterator for Env {
             if *self.cur == 0 { return None }
             let p = &*self.cur;
             let mut len = 0;
-            while *(p as *const _).offset(len) != 0 {
+            while *(p as *const u16).offset(len) != 0 {
                 len += 1;
             }
             let p = p as *const u16;
diff --git a/src/libstd/sys/windows/thread_local.rs b/src/libstd/sys/windows/thread_local.rs
index 1359803070a..c908c791247 100644
--- a/src/libstd/sys/windows/thread_local.rs
+++ b/src/libstd/sys/windows/thread_local.rs
@@ -133,9 +133,8 @@ unsafe fn init_dtors() {
     if !DTORS.is_null() { return }
 
     let dtors = box Vec::<(Key, Dtor)>::new();
-    DTORS = boxed::into_raw(dtors);
 
-    rt::at_exit(move|| {
+    let res = rt::at_exit(move|| {
         DTOR_LOCK.lock();
         let dtors = DTORS;
         DTORS = 1 as *mut _;
@@ -143,6 +142,11 @@ unsafe fn init_dtors() {
         assert!(DTORS as uint == 1); // can't re-init after destructing
         DTOR_LOCK.unlock();
     });
+    if res.is_ok() {
+        DTORS = boxed::into_raw(dtors);
+    } else {
+        DTORS = 1 as *mut _;
+    }
 }
 
 unsafe fn register_dtor(key: Key, dtor: Dtor) {
diff --git a/src/libstd/thread/local.rs b/src/libstd/thread/local.rs
index 1bf1b09681c..a2b824bb016 100644
--- a/src/libstd/thread/local.rs
+++ b/src/libstd/thread/local.rs
@@ -176,6 +176,7 @@ macro_rules! __thread_local_inner {
             }
         };
 
+        #[allow(trivial_casts)]
         #[cfg(any(not(any(target_os = "macos", target_os = "linux")), target_arch = "aarch64"))]
         const _INIT: ::std::thread::__local::__impl::KeyInner<$t> = {
             ::std::thread::__local::__impl::KeyInner {
diff --git a/src/libstd/thread/mod.rs b/src/libstd/thread/mod.rs
index 57baeb1fb74..27b50fc9aaa 100644
--- a/src/libstd/thread/mod.rs
+++ b/src/libstd/thread/mod.rs
@@ -698,7 +698,7 @@ impl Drop for JoinHandle {
 /// permission.
 #[must_use = "thread will be immediately joined if `JoinGuard` is not used"]
 #[stable(feature = "rust1", since = "1.0.0")]
-pub struct JoinGuard<'a, T: 'a> {
+pub struct JoinGuard<'a, T: Send + 'a> {
     inner: JoinInner<T>,
     _marker: PhantomData<&'a T>,
 }