about summary refs log tree commit diff
path: root/src/libstd/sys
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2019-03-16 14:46:43 +0000
committerbors <bors@rust-lang.org>2019-03-16 14:46:43 +0000
commit2c8bbf50db0ef90a33f986ba8fc2e1fe129197ff (patch)
tree0d1a620d973a7dff4f6ca02a51bdd3e4814ba5c9 /src/libstd/sys
parent52e885628e4317aa3f158622435927eb29b812e9 (diff)
parent7c009a4df7214290e6e5cf73d5bd5652fa8015dc (diff)
downloadrust-2c8bbf50db0ef90a33f986ba8fc2e1fe129197ff.tar.gz
rust-2c8bbf50db0ef90a33f986ba8fc2e1fe129197ff.zip
Auto merge of #59226 - kennytm:rollup, r=kennytm
Rollup of 37 pull requests

Successful merges:

 - #58854 (appveyor: Use VS2017 for all our images)
 - #58855 (std: Spin for a global malloc lock on wasm32)
 - #58873 (Fix "Auto-hide item methods documentation" setting)
 - #58901 (Change `std::fs::copy` to use `copyfile` on MacOS and iOS)
 - #58933 (Move alloc::prelude::* to alloc::prelude::v1, make alloc a subset of std)
 - #58938 (core: ensure VaList passes improper_ctypes lint)
 - #58941 (MIPS: add r6 support)
 - #58949 (SGX target: Expose thread id function in os module)
 - #58959 (Add release notes for PR #56243)
 - #58976 (Default to integrated `rust-lld` linker for UEFI targets)
 - #59009 (Fix SGX implementations of read/write_vectored.)
 - #59025 (Fix generic argument lookup for Self)
 - #59036 (Fix ICE in MIR pretty printing)
 - #59037 (Avoid some common false positives in intra doc link checking)
 - #59072 (we can now skip should_panic tests with the libtest harness)
 - #59079 (add suggestions to invalid macro item error)
 - #59082 (A few improvements to comments in user-facing crates)
 - #59102 (Consistent naming for duration_float methods and additional f32 methods)
 - #59118 (rustc: fix ICE when trait alias has bare Self)
 - #59139 (Unregress using scalar unions in constants.)
 - #59146 (Suggest return lifetime when there's only one named lifetime)
 - #59147 (Make std time tests more robust for platform differences)
 - #59152 (Stabilize Range*::contains.)
 - #59156 ([wg-async-await] Add regression test for #55809.)
 - #59158 (Revert "Don't generate minification variable if minification disabled")
 - #59169 (Add `-Z allow_features=...` flag)
 - #59173 (bootstrap: Default to a sensible llvm-suffix.)
 - #59175 (Don't run test launching `echo` since that doesn't exist on Windows)
 - #59180 (Use try blocks in rustc_codegen_ssa)
 - #59185 (No old chestnuts in iter::repeat docs)
 - #59201 (Remove restriction on isize/usize in repr(simd))
 - #59204 (Output diagnostic information for rustdoc)
 - #59206 (Improved test output)
 - #59208 (Reduce a Code Repetition Related to Bit Operation)
 - #59212 (Add x86_64 musl host to the manifest)
 - #59221 (Option and Result: Add references to documentation of as_ref and as_mut)
 - #59231 (Stabilize Option::copied)
Diffstat (limited to 'src/libstd/sys')
-rw-r--r--src/libstd/sys/redox/net/tcp.rs10
-rw-r--r--src/libstd/sys/sgx/abi/thread.rs1
-rw-r--r--src/libstd/sys/sgx/net.rs16
-rw-r--r--src/libstd/sys/unix/fs.rs87
-rw-r--r--src/libstd/sys/wasm/alloc.rs95
-rw-r--r--src/libstd/sys/windows/pipe.rs6
6 files changed, 176 insertions, 39 deletions
diff --git a/src/libstd/sys/redox/net/tcp.rs b/src/libstd/sys/redox/net/tcp.rs
index 5081c3de73c..3f2f6166a79 100644
--- a/src/libstd/sys/redox/net/tcp.rs
+++ b/src/libstd/sys/redox/net/tcp.rs
@@ -35,10 +35,7 @@ impl TcpStream {
     }
 
     pub fn read_vectored(&self, bufs: &mut [IoVecMut<'_>]) -> io::Result<usize> {
-        match bufs.iter_mut().find(|b| !b.is_empty()) {
-            Some(buf) => self.read(buf),
-            None => Ok(0),
-        }
+        io::default_read_vectored(|b| self.read(b), bufs)
     }
 
     pub fn write(&self, buf: &[u8]) -> Result<usize> {
@@ -46,10 +43,7 @@ impl TcpStream {
     }
 
     pub fn write_vectored(&self, bufs: &[IoVec<'_>]) -> io::Result<usize> {
-        match bufs.iter().find(|b| !b.is_empty()) {
-            Some(buf) => self.write(buf),
-            None => Ok(0),
-        }
+        io::default_write_vectored(|b| self.write(b), bufs)
     }
 
     pub fn take_error(&self) -> Result<Option<Error>> {
diff --git a/src/libstd/sys/sgx/abi/thread.rs b/src/libstd/sys/sgx/abi/thread.rs
index 86fe09d0035..c17fa2d0015 100644
--- a/src/libstd/sys/sgx/abi/thread.rs
+++ b/src/libstd/sys/sgx/abi/thread.rs
@@ -4,6 +4,7 @@ use fortanix_sgx_abi::Tcs;
 /// all currently running threads in the enclave, and it is guaranteed to be
 /// constant for the lifetime of the thread. More specifically for SGX, there
 /// is a one-to-one correspondence of the ID to the address of the TCS.
+#[unstable(feature = "sgx_platform", issue = "56975")]
 pub fn current() -> Tcs {
     extern "C" { fn get_tcs_addr() -> Tcs; }
     unsafe { get_tcs_addr() }
diff --git a/src/libstd/sys/sgx/net.rs b/src/libstd/sys/sgx/net.rs
index e5e42e3d0b0..e167e917957 100644
--- a/src/libstd/sys/sgx/net.rs
+++ b/src/libstd/sys/sgx/net.rs
@@ -103,24 +103,16 @@ impl TcpStream {
         self.inner.inner.read(buf)
     }
 
-    pub fn read_vectored(&self, buf: &mut [IoVecMut<'_>]) -> io::Result<usize> {
-        let buf = match buf.get_mut(0) {
-            Some(buf) => buf,
-            None => return Ok(0),
-        };
-        self.read(buf)
+    pub fn read_vectored(&self, bufs: &mut [IoVecMut<'_>]) -> io::Result<usize> {
+        io::default_read_vectored(|b| self.read(b), bufs)
     }
 
     pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
         self.inner.inner.write(buf)
     }
 
-    pub fn write_vectored(&self, buf: &[IoVec<'_>]) -> io::Result<usize> {
-        let buf = match buf.get(0) {
-            Some(buf) => buf,
-            None => return Ok(0),
-        };
-        self.write(buf)
+    pub fn write_vectored(&self, bufs: &[IoVec<'_>]) -> io::Result<usize> {
+        io::default_write_vectored(|b| self.write(b), bufs)
     }
 
     pub fn peer_addr(&self) -> io::Result<SocketAddr> {
diff --git a/src/libstd/sys/unix/fs.rs b/src/libstd/sys/unix/fs.rs
index 3b80b475a93..7ff098bc9e1 100644
--- a/src/libstd/sys/unix/fs.rs
+++ b/src/libstd/sys/unix/fs.rs
@@ -827,7 +827,10 @@ pub fn canonicalize(p: &Path) -> io::Result<PathBuf> {
     Ok(PathBuf::from(OsString::from_vec(buf)))
 }
 
-#[cfg(not(any(target_os = "linux", target_os = "android")))]
+#[cfg(not(any(target_os = "linux",
+              target_os = "android",
+              target_os = "macos",
+              target_os = "ios")))]
 pub fn copy(from: &Path, to: &Path) -> io::Result<u64> {
     use crate::fs::File;
     if !from.is_file() {
@@ -937,3 +940,85 @@ pub fn copy(from: &Path, to: &Path) -> io::Result<u64> {
     writer.set_permissions(perm)?;
     Ok(written)
 }
+
+#[cfg(any(target_os = "macos", target_os = "ios"))]
+pub fn copy(from: &Path, to: &Path) -> io::Result<u64> {
+    const COPYFILE_ACL: u32 = 1 << 0;
+    const COPYFILE_STAT: u32 = 1 << 1;
+    const COPYFILE_XATTR: u32 = 1 << 2;
+    const COPYFILE_DATA: u32 = 1 << 3;
+
+    const COPYFILE_SECURITY: u32 = COPYFILE_STAT | COPYFILE_ACL;
+    const COPYFILE_METADATA: u32 = COPYFILE_SECURITY | COPYFILE_XATTR;
+    const COPYFILE_ALL: u32 = COPYFILE_METADATA | COPYFILE_DATA;
+
+    const COPYFILE_STATE_COPIED: u32 = 8;
+
+    #[allow(non_camel_case_types)]
+    type copyfile_state_t = *mut libc::c_void;
+    #[allow(non_camel_case_types)]
+    type copyfile_flags_t = u32;
+
+    extern "C" {
+        fn copyfile(
+            from: *const libc::c_char,
+            to: *const libc::c_char,
+            state: copyfile_state_t,
+            flags: copyfile_flags_t,
+        ) -> libc::c_int;
+        fn copyfile_state_alloc() -> copyfile_state_t;
+        fn copyfile_state_free(state: copyfile_state_t) -> libc::c_int;
+        fn copyfile_state_get(
+            state: copyfile_state_t,
+            flag: u32,
+            dst: *mut libc::c_void,
+        ) -> libc::c_int;
+    }
+
+    struct FreeOnDrop(copyfile_state_t);
+    impl Drop for FreeOnDrop {
+        fn drop(&mut self) {
+            // The code below ensures that `FreeOnDrop` is never a null pointer
+            unsafe {
+                // `copyfile_state_free` returns -1 if the `to` or `from` files
+                // cannot be closed. However, this is not considerd this an
+                // error.
+                copyfile_state_free(self.0);
+            }
+        }
+    }
+
+    if !from.is_file() {
+        return Err(Error::new(ErrorKind::InvalidInput,
+                              "the source path is not an existing regular file"))
+    }
+
+    // We ensure that `FreeOnDrop` never contains a null pointer so it is
+    // always safe to call `copyfile_state_free`
+    let state = unsafe {
+        let state = copyfile_state_alloc();
+        if state.is_null() {
+            return Err(crate::io::Error::last_os_error());
+        }
+        FreeOnDrop(state)
+    };
+
+    cvt(unsafe {
+        copyfile(
+            cstr(from)?.as_ptr(),
+            cstr(to)?.as_ptr(),
+            state.0,
+            COPYFILE_ALL,
+        )
+    })?;
+
+    let mut bytes_copied: libc::off_t = 0;
+    cvt(unsafe {
+        copyfile_state_get(
+            state.0,
+            COPYFILE_STATE_COPIED,
+            &mut bytes_copied as *mut libc::off_t as *mut libc::c_void,
+        )
+    })?;
+    Ok(bytes_copied as u64)
+}
diff --git a/src/libstd/sys/wasm/alloc.rs b/src/libstd/sys/wasm/alloc.rs
index b9098548b9c..c1af6ec1262 100644
--- a/src/libstd/sys/wasm/alloc.rs
+++ b/src/libstd/sys/wasm/alloc.rs
@@ -49,7 +49,6 @@ unsafe impl GlobalAlloc for System {
 
 #[cfg(target_feature = "atomics")]
 mod lock {
-    use crate::arch::wasm32;
     use crate::sync::atomic::{AtomicI32, Ordering::SeqCst};
 
     static LOCKED: AtomicI32 = AtomicI32::new(0);
@@ -61,14 +60,76 @@ mod lock {
             if LOCKED.swap(1, SeqCst) == 0 {
                 return DropLock
             }
-            unsafe {
-                let r = wasm32::i32_atomic_wait(
-                    &LOCKED as *const AtomicI32 as *mut i32,
-                    1,  // expected value
-                    -1, // timeout
-                );
-                debug_assert!(r == 0 || r == 1);
-            }
+            // Ok so here's where things get a little depressing. At this point
+            // in time we need to synchronously acquire a lock, but we're
+            // contending with some other thread. Typically we'd execute some
+            // form of `i32.atomic.wait` like so:
+            //
+            //     unsafe {
+            //         let r = core::arch::wasm32::i32_atomic_wait(
+            //             &LOCKED as *const AtomicI32 as *mut i32,
+            //             1,  //     expected value
+            //             -1, //     timeout
+            //         );
+            //         debug_assert!(r == 0 || r == 1);
+            //     }
+            //
+            // Unfortunately though in doing so we would cause issues for the
+            // main thread. The main thread in a web browser *cannot ever
+            // block*, no exceptions. This means that the main thread can't
+            // actually execute the `i32.atomic.wait` instruction.
+            //
+            // As a result if we want to work within the context of browsers we
+            // need to figure out some sort of allocation scheme for the main
+            // thread where when there's contention on the global malloc lock we
+            // do... something.
+            //
+            // Possible ideas include:
+            //
+            // 1. Attempt to acquire the global lock. If it fails, fall back to
+            //    memory allocation via `memory.grow`. Later just ... somehow
+            //    ... inject this raw page back into the main allocator as it
+            //    gets sliced up over time. This strategy has the downside of
+            //    forcing allocation of a page to happen whenever the main
+            //    thread contents with other threads, which is unfortunate.
+            //
+            // 2. Maintain a form of "two level" allocator scheme where the main
+            //    thread has its own allocator. Somehow this allocator would
+            //    also be balanced with a global allocator, not only to have
+            //    allocations cross between threads but also to ensure that the
+            //    two allocators stay "balanced" in terms of free'd memory and
+            //    such. This, however, seems significantly complicated.
+            //
+            // Out of a lack of other ideas, the current strategy implemented
+            // here is to simply spin. Typical spin loop algorithms have some
+            // form of "hint" here to the CPU that it's what we're doing to
+            // ensure that the CPU doesn't get too hot, but wasm doesn't have
+            // such an instruction.
+            //
+            // To be clear, spinning here is not a great solution.
+            // Another thread with the lock may take quite a long time to wake
+            // up. For example it could be in `memory.grow` or it could be
+            // evicted from the CPU for a timeslice like 10ms. For these periods
+            // of time our thread will "helpfully" sit here and eat CPU time
+            // until it itself is evicted or the lock holder finishes. This
+            // means we're just burning and wasting CPU time to no one's
+            // benefit.
+            //
+            // Spinning does have the nice properties, though, of being
+            // semantically correct, being fair to all threads for memory
+            // allocation, and being simple enough to implement.
+            //
+            // This will surely (hopefully) be replaced in the future with a
+            // real memory allocator that can handle the restriction of the main
+            // thread.
+            //
+            //
+            // FIXME: We can also possibly add an optimization here to detect
+            // when a thread is the main thread or not and block on all
+            // non-main-thread threads. Currently, however, we have no way
+            // of knowing which wasm thread is on the browser main thread, but
+            // if we could figure out we could at least somewhat mitigate the
+            // cost of this spinning.
         }
     }
 
@@ -76,12 +137,16 @@ mod lock {
         fn drop(&mut self) {
             let r = LOCKED.swap(0, SeqCst);
             debug_assert_eq!(r, 1);
-            unsafe {
-                wasm32::atomic_notify(
-                    &LOCKED as *const AtomicI32 as *mut i32,
-                    1, // only one thread
-                );
-            }
+
+            // Note that due to the above logic we don't actually need to wake
+            // anyone up, but if we did it'd likely look something like this:
+            //
+            //     unsafe {
+            //         core::arch::wasm32::atomic_notify(
+            //             &LOCKED as *const AtomicI32 as *mut i32,
+            //             1, //     only one thread
+            //         );
+            //     }
         }
     }
 }
diff --git a/src/libstd/sys/windows/pipe.rs b/src/libstd/sys/windows/pipe.rs
index 07f4f5f0e58..b38727830f3 100644
--- a/src/libstd/sys/windows/pipe.rs
+++ b/src/libstd/sys/windows/pipe.rs
@@ -37,9 +37,9 @@ pub struct Pipes {
 ///
 /// The ours/theirs pipes are *not* specifically readable or writable. Each
 /// one only supports a read or a write, but which is which depends on the
-/// boolean flag given. If `ours_readable` is true then `ours` is readable where
-/// `theirs` is writable. Conversely if `ours_readable` is false then `ours` is
-/// writable where `theirs` is readable.
+/// boolean flag given. If `ours_readable` is `true`, then `ours` is readable and
+/// `theirs` is writable. Conversely, if `ours_readable` is `false`, then `ours`
+/// is writable and `theirs` is readable.
 ///
 /// Also note that the `ours` pipe is always a handle opened up in overlapped
 /// mode. This means that technically speaking it should only ever be used