diff options
| author | bors <bors@rust-lang.org> | 2019-03-16 14:46:43 +0000 |
|---|---|---|
| committer | bors <bors@rust-lang.org> | 2019-03-16 14:46:43 +0000 |
| commit | 2c8bbf50db0ef90a33f986ba8fc2e1fe129197ff (patch) | |
| tree | 0d1a620d973a7dff4f6ca02a51bdd3e4814ba5c9 /src/libstd | |
| parent | 52e885628e4317aa3f158622435927eb29b812e9 (diff) | |
| parent | 7c009a4df7214290e6e5cf73d5bd5652fa8015dc (diff) | |
| download | rust-2c8bbf50db0ef90a33f986ba8fc2e1fe129197ff.tar.gz rust-2c8bbf50db0ef90a33f986ba8fc2e1fe129197ff.zip | |
Auto merge of #59226 - kennytm:rollup, r=kennytm
Rollup of 37 pull requests Successful merges: - #58854 (appveyor: Use VS2017 for all our images) - #58855 (std: Spin for a global malloc lock on wasm32) - #58873 (Fix "Auto-hide item methods documentation" setting) - #58901 (Change `std::fs::copy` to use `copyfile` on MacOS and iOS) - #58933 (Move alloc::prelude::* to alloc::prelude::v1, make alloc a subset of std) - #58938 (core: ensure VaList passes improper_ctypes lint) - #58941 (MIPS: add r6 support) - #58949 (SGX target: Expose thread id function in os module) - #58959 (Add release notes for PR #56243) - #58976 (Default to integrated `rust-lld` linker for UEFI targets) - #59009 (Fix SGX implementations of read/write_vectored.) - #59025 (Fix generic argument lookup for Self) - #59036 (Fix ICE in MIR pretty printing) - #59037 (Avoid some common false positives in intra doc link checking) - #59072 (we can now skip should_panic tests with the libtest harness) - #59079 (add suggestions to invalid macro item error) - #59082 (A few improvements to comments in user-facing crates) - #59102 (Consistent naming for duration_float methods and additional f32 methods) - #59118 (rustc: fix ICE when trait alias has bare Self) - #59139 (Unregress using scalar unions in constants.) - #59146 (Suggest return lifetime when there's only one named lifetime) - #59147 (Make std time tests more robust for platform differences) - #59152 (Stabilize Range*::contains.) - #59156 ([wg-async-await] Add regression test for #55809.) - #59158 (Revert "Don't generate minification variable if minification disabled") - #59169 (Add `-Z allow_features=...` flag) - #59173 (bootstrap: Default to a sensible llvm-suffix.) - #59175 (Don't run test launching `echo` since that doesn't exist on Windows) - #59180 (Use try blocks in rustc_codegen_ssa) - #59185 (No old chestnuts in iter::repeat docs) - #59201 (Remove restriction on isize/usize in repr(simd)) - #59204 (Output diagnostic information for rustdoc) - #59206 (Improved test output) - #59208 (Reduce a Code Repetition Related to Bit Operation) - #59212 (Add x86_64 musl host to the manifest) - #59221 (Option and Result: Add references to documentation of as_ref and as_mut) - #59231 (Stabilize Option::copied)
Diffstat (limited to 'src/libstd')
| -rw-r--r-- | src/libstd/collections/hash/map.rs | 2 | ||||
| -rw-r--r-- | src/libstd/collections/hash/set.rs | 2 | ||||
| -rw-r--r-- | src/libstd/fs.rs | 25 | ||||
| -rw-r--r-- | src/libstd/io/mod.rs | 40 | ||||
| -rw-r--r-- | src/libstd/lib.rs | 2 | ||||
| -rw-r--r-- | src/libstd/os/fortanix_sgx/mod.rs | 5 | ||||
| -rw-r--r-- | src/libstd/process.rs | 2 | ||||
| -rw-r--r-- | src/libstd/sync/condvar.rs | 12 | ||||
| -rw-r--r-- | src/libstd/sys/redox/net/tcp.rs | 10 | ||||
| -rw-r--r-- | src/libstd/sys/sgx/abi/thread.rs | 1 | ||||
| -rw-r--r-- | src/libstd/sys/sgx/net.rs | 16 | ||||
| -rw-r--r-- | src/libstd/sys/unix/fs.rs | 87 | ||||
| -rw-r--r-- | src/libstd/sys/wasm/alloc.rs | 95 | ||||
| -rw-r--r-- | src/libstd/sys/windows/pipe.rs | 6 | ||||
| -rw-r--r-- | src/libstd/time.rs | 11 |
15 files changed, 244 insertions, 72 deletions
diff --git a/src/libstd/collections/hash/map.rs b/src/libstd/collections/hash/map.rs index 928de29b297..1d45df499d8 100644 --- a/src/libstd/collections/hash/map.rs +++ b/src/libstd/collections/hash/map.rs @@ -19,7 +19,7 @@ use super::table::{self, Bucket, EmptyBucket, Fallibility, FullBucket, FullBucke use super::table::BucketState::{Empty, Full}; use super::table::Fallibility::{Fallible, Infallible}; -const MIN_NONZERO_RAW_CAPACITY: usize = 32; // must be a power of two +const MIN_NONZERO_RAW_CAPACITY: usize = 32; // must be a power of two /// The default behavior of HashMap implements a maximum load factor of 90.9%. #[derive(Clone)] diff --git a/src/libstd/collections/hash/set.rs b/src/libstd/collections/hash/set.rs index f2111f2d9e0..c026de35da6 100644 --- a/src/libstd/collections/hash/set.rs +++ b/src/libstd/collections/hash/set.rs @@ -8,7 +8,7 @@ use super::Recover; use super::map::{self, HashMap, Keys, RandomState}; // Future Optimization (FIXME!) -// ============================= +// ============================ // // Iteration over zero sized values is a noop. There is no need // for `bucket.val` in the case of HashSet. I suppose we would need HKT diff --git a/src/libstd/fs.rs b/src/libstd/fs.rs index 25f2dd73504..8c3d0da0a7e 100644 --- a/src/libstd/fs.rs +++ b/src/libstd/fs.rs @@ -211,7 +211,7 @@ pub struct DirBuilder { recursive: bool, } -/// How large a buffer to pre-allocate before reading the entire file. +/// Indicates how large a buffer to pre-allocate before reading the entire file. fn initial_buffer_size(file: &File) -> usize { // Allocate one extra byte so the buffer doesn't need to grow before the // final `read` call at the end of the file. Don't worry about `usize` @@ -1581,7 +1581,8 @@ pub fn rename<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> io::Result<()> /// `O_CLOEXEC` is set for returned file descriptors. /// On Windows, this function currently corresponds to `CopyFileEx`. Alternate /// NTFS streams are copied but only the size of the main stream is returned by -/// this function. +/// this function. On MacOS, this function corresponds to `copyfile` with +/// `COPYFILE_ALL`. /// Note that, this [may change in the future][changes]. /// /// [changes]: ../io/index.html#platform-specific-behavior @@ -2837,6 +2838,26 @@ mod tests { } #[test] + fn copy_file_follows_dst_symlink() { + let tmp = tmpdir(); + if !got_symlink_permission(&tmp) { return }; + + let in_path = tmp.join("in.txt"); + let out_path = tmp.join("out.txt"); + let out_path_symlink = tmp.join("out_symlink.txt"); + + check!(fs::write(&in_path, "foo")); + check!(fs::write(&out_path, "bar")); + check!(symlink_file(&out_path, &out_path_symlink)); + + check!(fs::copy(&in_path, &out_path_symlink)); + + assert!(check!(out_path_symlink.symlink_metadata()).file_type().is_symlink()); + assert_eq!(check!(fs::read(&out_path_symlink)), b"foo".to_vec()); + assert_eq!(check!(fs::read(&out_path)), b"foo".to_vec()); + } + + #[test] fn symlinks_work() { let tmpdir = tmpdir(); if !got_symlink_permission(&tmpdir) { return }; diff --git a/src/libstd/io/mod.rs b/src/libstd/io/mod.rs index e3e2754a7aa..1a2152a79af 100644 --- a/src/libstd/io/mod.rs +++ b/src/libstd/io/mod.rs @@ -390,6 +390,28 @@ fn read_to_end_with_reservation<R: Read + ?Sized>(r: &mut R, ret } +pub(crate) fn default_read_vectored<F>(read: F, bufs: &mut [IoVecMut<'_>]) -> Result<usize> +where + F: FnOnce(&mut [u8]) -> Result<usize> +{ + let buf = bufs + .iter_mut() + .find(|b| !b.is_empty()) + .map_or(&mut [][..], |b| &mut **b); + read(buf) +} + +pub(crate) fn default_write_vectored<F>(write: F, bufs: &[IoVec<'_>]) -> Result<usize> +where + F: FnOnce(&[u8]) -> Result<usize> +{ + let buf = bufs + .iter() + .find(|b| !b.is_empty()) + .map_or(&[][..], |b| &**b); + write(buf) +} + /// The `Read` trait allows for reading bytes from a source. /// /// Implementors of the `Read` trait are called 'readers'. @@ -528,14 +550,11 @@ pub trait Read { /// written to possibly being only partially filled. This method must behave /// as a single call to `read` with the buffers concatenated would. /// - /// The default implementation simply passes the first nonempty buffer to - /// `read`. + /// The default implementation calls `read` with either the first nonempty + /// buffer provided, or an empty one if none exists. #[unstable(feature = "iovec", issue = "58452")] fn read_vectored(&mut self, bufs: &mut [IoVecMut<'_>]) -> Result<usize> { - match bufs.iter_mut().find(|b| !b.is_empty()) { - Some(buf) => self.read(buf), - None => Ok(0), - } + default_read_vectored(|b| self.read(b), bufs) } /// Determines if this `Read`er can work with buffers of uninitialized @@ -1107,14 +1126,11 @@ pub trait Write { /// read from possibly being only partially consumed. This method must /// behave as a call to `write` with the buffers concatenated would. /// - /// The default implementation simply passes the first nonempty buffer to - /// `write`. + /// The default implementation calls `write` with either the first nonempty + /// buffer provided, or an empty one if none exists. #[unstable(feature = "iovec", issue = "58452")] fn write_vectored(&mut self, bufs: &[IoVec<'_>]) -> Result<usize> { - match bufs.iter().find(|b| !b.is_empty()) { - Some(buf) => self.write(buf), - None => Ok(0), - } + default_write_vectored(|b| self.write(b), bufs) } /// Flush this output stream, ensuring that all intermediately buffered diff --git a/src/libstd/lib.rs b/src/libstd/lib.rs index b830c90cc43..fc8ac9a0b3e 100644 --- a/src/libstd/lib.rs +++ b/src/libstd/lib.rs @@ -221,7 +221,7 @@ #![cfg_attr(test, feature(print_internals, set_stdio, test, update_panic_count))] #![cfg_attr(all(target_vendor = "fortanix", target_env = "sgx"), - feature(global_asm, range_contains, slice_index_methods, + feature(global_asm, slice_index_methods, decl_macro, coerce_unsized, sgx_platform, ptr_wrapping_offset_from))] // std is implemented with unstable features, many of which are internal diff --git a/src/libstd/os/fortanix_sgx/mod.rs b/src/libstd/os/fortanix_sgx/mod.rs index bca22e717d7..4e30b1edd15 100644 --- a/src/libstd/os/fortanix_sgx/mod.rs +++ b/src/libstd/os/fortanix_sgx/mod.rs @@ -43,3 +43,8 @@ pub mod mem { } pub use crate::sys::ext::{io, arch, ffi}; + +/// Functions for querying thread-related information. +pub mod thread { + pub use crate::sys::abi::thread::current; +} diff --git a/src/libstd/process.rs b/src/libstd/process.rs index 56840009344..ad86acbb47d 100644 --- a/src/libstd/process.rs +++ b/src/libstd/process.rs @@ -8,7 +8,7 @@ //! //! The [`Command`] struct is used to configure and spawn processes: //! -//! ``` +//! ```no_run //! use std::process::Command; //! //! let output = Command::new("echo") diff --git a/src/libstd/sync/condvar.rs b/src/libstd/sync/condvar.rs index 5ebb61754e1..c383f21dcd7 100644 --- a/src/libstd/sync/condvar.rs +++ b/src/libstd/sync/condvar.rs @@ -190,7 +190,7 @@ impl Condvar { /// // Wait for the thread to start up. /// let &(ref lock, ref cvar) = &*pair; /// let mut started = lock.lock().unwrap(); - /// // As long as the value inside the `Mutex` is false, we wait. + /// // As long as the value inside the `Mutex<bool>` is `false`, we wait. /// while !*started { /// started = cvar.wait(started).unwrap(); /// } @@ -254,7 +254,7 @@ impl Condvar { /// /// // Wait for the thread to start up. /// let &(ref lock, ref cvar) = &*pair; - /// // As long as the value inside the `Mutex` is false, we wait. + /// // As long as the value inside the `Mutex<bool>` is `false`, we wait. /// let _guard = cvar.wait_until(lock.lock().unwrap(), |started| { *started }).unwrap(); /// ``` #[unstable(feature = "wait_until", issue = "47960")] @@ -311,7 +311,7 @@ impl Condvar { /// // Wait for the thread to start up. /// let &(ref lock, ref cvar) = &*pair; /// let mut started = lock.lock().unwrap(); - /// // As long as the value inside the `Mutex` is false, we wait. + /// // As long as the value inside the `Mutex<bool>` is `false`, we wait. /// loop { /// let result = cvar.wait_timeout_ms(started, 10).unwrap(); /// // 10 milliseconds have passed, or maybe the value changed! @@ -384,7 +384,7 @@ impl Condvar { /// // wait for the thread to start up /// let &(ref lock, ref cvar) = &*pair; /// let mut started = lock.lock().unwrap(); - /// // as long as the value inside the `Mutex` is false, we wait + /// // as long as the value inside the `Mutex<bool>` is `false`, we wait /// loop { /// let result = cvar.wait_timeout(started, Duration::from_millis(10)).unwrap(); /// // 10 milliseconds have passed, or maybe the value changed! @@ -518,7 +518,7 @@ impl Condvar { /// // Wait for the thread to start up. /// let &(ref lock, ref cvar) = &*pair; /// let mut started = lock.lock().unwrap(); - /// // As long as the value inside the `Mutex` is false, we wait. + /// // As long as the value inside the `Mutex<bool>` is `false`, we wait. /// while !*started { /// started = cvar.wait(started).unwrap(); /// } @@ -558,7 +558,7 @@ impl Condvar { /// // Wait for the thread to start up. /// let &(ref lock, ref cvar) = &*pair; /// let mut started = lock.lock().unwrap(); - /// // As long as the value inside the `Mutex` is false, we wait. + /// // As long as the value inside the `Mutex<bool>` is `false`, we wait. /// while !*started { /// started = cvar.wait(started).unwrap(); /// } diff --git a/src/libstd/sys/redox/net/tcp.rs b/src/libstd/sys/redox/net/tcp.rs index 5081c3de73c..3f2f6166a79 100644 --- a/src/libstd/sys/redox/net/tcp.rs +++ b/src/libstd/sys/redox/net/tcp.rs @@ -35,10 +35,7 @@ impl TcpStream { } pub fn read_vectored(&self, bufs: &mut [IoVecMut<'_>]) -> io::Result<usize> { - match bufs.iter_mut().find(|b| !b.is_empty()) { - Some(buf) => self.read(buf), - None => Ok(0), - } + io::default_read_vectored(|b| self.read(b), bufs) } pub fn write(&self, buf: &[u8]) -> Result<usize> { @@ -46,10 +43,7 @@ impl TcpStream { } pub fn write_vectored(&self, bufs: &[IoVec<'_>]) -> io::Result<usize> { - match bufs.iter().find(|b| !b.is_empty()) { - Some(buf) => self.write(buf), - None => Ok(0), - } + io::default_write_vectored(|b| self.write(b), bufs) } pub fn take_error(&self) -> Result<Option<Error>> { diff --git a/src/libstd/sys/sgx/abi/thread.rs b/src/libstd/sys/sgx/abi/thread.rs index 86fe09d0035..c17fa2d0015 100644 --- a/src/libstd/sys/sgx/abi/thread.rs +++ b/src/libstd/sys/sgx/abi/thread.rs @@ -4,6 +4,7 @@ use fortanix_sgx_abi::Tcs; /// all currently running threads in the enclave, and it is guaranteed to be /// constant for the lifetime of the thread. More specifically for SGX, there /// is a one-to-one correspondence of the ID to the address of the TCS. +#[unstable(feature = "sgx_platform", issue = "56975")] pub fn current() -> Tcs { extern "C" { fn get_tcs_addr() -> Tcs; } unsafe { get_tcs_addr() } diff --git a/src/libstd/sys/sgx/net.rs b/src/libstd/sys/sgx/net.rs index e5e42e3d0b0..e167e917957 100644 --- a/src/libstd/sys/sgx/net.rs +++ b/src/libstd/sys/sgx/net.rs @@ -103,24 +103,16 @@ impl TcpStream { self.inner.inner.read(buf) } - pub fn read_vectored(&self, buf: &mut [IoVecMut<'_>]) -> io::Result<usize> { - let buf = match buf.get_mut(0) { - Some(buf) => buf, - None => return Ok(0), - }; - self.read(buf) + pub fn read_vectored(&self, bufs: &mut [IoVecMut<'_>]) -> io::Result<usize> { + io::default_read_vectored(|b| self.read(b), bufs) } pub fn write(&self, buf: &[u8]) -> io::Result<usize> { self.inner.inner.write(buf) } - pub fn write_vectored(&self, buf: &[IoVec<'_>]) -> io::Result<usize> { - let buf = match buf.get(0) { - Some(buf) => buf, - None => return Ok(0), - }; - self.write(buf) + pub fn write_vectored(&self, bufs: &[IoVec<'_>]) -> io::Result<usize> { + io::default_write_vectored(|b| self.write(b), bufs) } pub fn peer_addr(&self) -> io::Result<SocketAddr> { diff --git a/src/libstd/sys/unix/fs.rs b/src/libstd/sys/unix/fs.rs index 3b80b475a93..7ff098bc9e1 100644 --- a/src/libstd/sys/unix/fs.rs +++ b/src/libstd/sys/unix/fs.rs @@ -827,7 +827,10 @@ pub fn canonicalize(p: &Path) -> io::Result<PathBuf> { Ok(PathBuf::from(OsString::from_vec(buf))) } -#[cfg(not(any(target_os = "linux", target_os = "android")))] +#[cfg(not(any(target_os = "linux", + target_os = "android", + target_os = "macos", + target_os = "ios")))] pub fn copy(from: &Path, to: &Path) -> io::Result<u64> { use crate::fs::File; if !from.is_file() { @@ -937,3 +940,85 @@ pub fn copy(from: &Path, to: &Path) -> io::Result<u64> { writer.set_permissions(perm)?; Ok(written) } + +#[cfg(any(target_os = "macos", target_os = "ios"))] +pub fn copy(from: &Path, to: &Path) -> io::Result<u64> { + const COPYFILE_ACL: u32 = 1 << 0; + const COPYFILE_STAT: u32 = 1 << 1; + const COPYFILE_XATTR: u32 = 1 << 2; + const COPYFILE_DATA: u32 = 1 << 3; + + const COPYFILE_SECURITY: u32 = COPYFILE_STAT | COPYFILE_ACL; + const COPYFILE_METADATA: u32 = COPYFILE_SECURITY | COPYFILE_XATTR; + const COPYFILE_ALL: u32 = COPYFILE_METADATA | COPYFILE_DATA; + + const COPYFILE_STATE_COPIED: u32 = 8; + + #[allow(non_camel_case_types)] + type copyfile_state_t = *mut libc::c_void; + #[allow(non_camel_case_types)] + type copyfile_flags_t = u32; + + extern "C" { + fn copyfile( + from: *const libc::c_char, + to: *const libc::c_char, + state: copyfile_state_t, + flags: copyfile_flags_t, + ) -> libc::c_int; + fn copyfile_state_alloc() -> copyfile_state_t; + fn copyfile_state_free(state: copyfile_state_t) -> libc::c_int; + fn copyfile_state_get( + state: copyfile_state_t, + flag: u32, + dst: *mut libc::c_void, + ) -> libc::c_int; + } + + struct FreeOnDrop(copyfile_state_t); + impl Drop for FreeOnDrop { + fn drop(&mut self) { + // The code below ensures that `FreeOnDrop` is never a null pointer + unsafe { + // `copyfile_state_free` returns -1 if the `to` or `from` files + // cannot be closed. However, this is not considerd this an + // error. + copyfile_state_free(self.0); + } + } + } + + if !from.is_file() { + return Err(Error::new(ErrorKind::InvalidInput, + "the source path is not an existing regular file")) + } + + // We ensure that `FreeOnDrop` never contains a null pointer so it is + // always safe to call `copyfile_state_free` + let state = unsafe { + let state = copyfile_state_alloc(); + if state.is_null() { + return Err(crate::io::Error::last_os_error()); + } + FreeOnDrop(state) + }; + + cvt(unsafe { + copyfile( + cstr(from)?.as_ptr(), + cstr(to)?.as_ptr(), + state.0, + COPYFILE_ALL, + ) + })?; + + let mut bytes_copied: libc::off_t = 0; + cvt(unsafe { + copyfile_state_get( + state.0, + COPYFILE_STATE_COPIED, + &mut bytes_copied as *mut libc::off_t as *mut libc::c_void, + ) + })?; + Ok(bytes_copied as u64) +} diff --git a/src/libstd/sys/wasm/alloc.rs b/src/libstd/sys/wasm/alloc.rs index b9098548b9c..c1af6ec1262 100644 --- a/src/libstd/sys/wasm/alloc.rs +++ b/src/libstd/sys/wasm/alloc.rs @@ -49,7 +49,6 @@ unsafe impl GlobalAlloc for System { #[cfg(target_feature = "atomics")] mod lock { - use crate::arch::wasm32; use crate::sync::atomic::{AtomicI32, Ordering::SeqCst}; static LOCKED: AtomicI32 = AtomicI32::new(0); @@ -61,14 +60,76 @@ mod lock { if LOCKED.swap(1, SeqCst) == 0 { return DropLock } - unsafe { - let r = wasm32::i32_atomic_wait( - &LOCKED as *const AtomicI32 as *mut i32, - 1, // expected value - -1, // timeout - ); - debug_assert!(r == 0 || r == 1); - } + // Ok so here's where things get a little depressing. At this point + // in time we need to synchronously acquire a lock, but we're + // contending with some other thread. Typically we'd execute some + // form of `i32.atomic.wait` like so: + // + // unsafe { + // let r = core::arch::wasm32::i32_atomic_wait( + // &LOCKED as *const AtomicI32 as *mut i32, + // 1, // expected value + // -1, // timeout + // ); + // debug_assert!(r == 0 || r == 1); + // } + // + // Unfortunately though in doing so we would cause issues for the + // main thread. The main thread in a web browser *cannot ever + // block*, no exceptions. This means that the main thread can't + // actually execute the `i32.atomic.wait` instruction. + // + // As a result if we want to work within the context of browsers we + // need to figure out some sort of allocation scheme for the main + // thread where when there's contention on the global malloc lock we + // do... something. + // + // Possible ideas include: + // + // 1. Attempt to acquire the global lock. If it fails, fall back to + // memory allocation via `memory.grow`. Later just ... somehow + // ... inject this raw page back into the main allocator as it + // gets sliced up over time. This strategy has the downside of + // forcing allocation of a page to happen whenever the main + // thread contents with other threads, which is unfortunate. + // + // 2. Maintain a form of "two level" allocator scheme where the main + // thread has its own allocator. Somehow this allocator would + // also be balanced with a global allocator, not only to have + // allocations cross between threads but also to ensure that the + // two allocators stay "balanced" in terms of free'd memory and + // such. This, however, seems significantly complicated. + // + // Out of a lack of other ideas, the current strategy implemented + // here is to simply spin. Typical spin loop algorithms have some + // form of "hint" here to the CPU that it's what we're doing to + // ensure that the CPU doesn't get too hot, but wasm doesn't have + // such an instruction. + // + // To be clear, spinning here is not a great solution. + // Another thread with the lock may take quite a long time to wake + // up. For example it could be in `memory.grow` or it could be + // evicted from the CPU for a timeslice like 10ms. For these periods + // of time our thread will "helpfully" sit here and eat CPU time + // until it itself is evicted or the lock holder finishes. This + // means we're just burning and wasting CPU time to no one's + // benefit. + // + // Spinning does have the nice properties, though, of being + // semantically correct, being fair to all threads for memory + // allocation, and being simple enough to implement. + // + // This will surely (hopefully) be replaced in the future with a + // real memory allocator that can handle the restriction of the main + // thread. + // + // + // FIXME: We can also possibly add an optimization here to detect + // when a thread is the main thread or not and block on all + // non-main-thread threads. Currently, however, we have no way + // of knowing which wasm thread is on the browser main thread, but + // if we could figure out we could at least somewhat mitigate the + // cost of this spinning. } } @@ -76,12 +137,16 @@ mod lock { fn drop(&mut self) { let r = LOCKED.swap(0, SeqCst); debug_assert_eq!(r, 1); - unsafe { - wasm32::atomic_notify( - &LOCKED as *const AtomicI32 as *mut i32, - 1, // only one thread - ); - } + + // Note that due to the above logic we don't actually need to wake + // anyone up, but if we did it'd likely look something like this: + // + // unsafe { + // core::arch::wasm32::atomic_notify( + // &LOCKED as *const AtomicI32 as *mut i32, + // 1, // only one thread + // ); + // } } } } diff --git a/src/libstd/sys/windows/pipe.rs b/src/libstd/sys/windows/pipe.rs index 07f4f5f0e58..b38727830f3 100644 --- a/src/libstd/sys/windows/pipe.rs +++ b/src/libstd/sys/windows/pipe.rs @@ -37,9 +37,9 @@ pub struct Pipes { /// /// The ours/theirs pipes are *not* specifically readable or writable. Each /// one only supports a read or a write, but which is which depends on the -/// boolean flag given. If `ours_readable` is true then `ours` is readable where -/// `theirs` is writable. Conversely if `ours_readable` is false then `ours` is -/// writable where `theirs` is readable. +/// boolean flag given. If `ours_readable` is `true`, then `ours` is readable and +/// `theirs` is writable. Conversely, if `ours_readable` is `false`, then `ours` +/// is writable and `theirs` is readable. /// /// Also note that the `ours` pipe is always a handle opened up in overlapped /// mode. This means that technically speaking it should only ever be used diff --git a/src/libstd/time.rs b/src/libstd/time.rs index 6d7093ac33e..4c86f70ad87 100644 --- a/src/libstd/time.rs +++ b/src/libstd/time.rs @@ -712,13 +712,6 @@ mod tests { assert_almost_eq!(a - second + second, a); assert_almost_eq!(a.checked_sub(second).unwrap().checked_add(second).unwrap(), a); - // A difference of 80 and 800 years cannot fit inside a 32-bit time_t - if !(cfg!(unix) && crate::mem::size_of::<libc::time_t>() <= 4) { - let eighty_years = second * 60 * 60 * 24 * 365 * 80; - assert_almost_eq!(a - eighty_years + eighty_years, a); - assert_almost_eq!(a - (eighty_years * 10) + (eighty_years * 10), a); - } - let one_second_from_epoch = UNIX_EPOCH + Duration::new(1, 0); let one_second_from_epoch2 = UNIX_EPOCH + Duration::new(0, 500_000_000) + Duration::new(0, 500_000_000); @@ -747,8 +740,8 @@ mod tests { #[test] fn since_epoch() { let ts = SystemTime::now(); - let a = ts.duration_since(UNIX_EPOCH).unwrap(); - let b = ts.duration_since(UNIX_EPOCH - Duration::new(1, 0)).unwrap(); + let a = ts.duration_since(UNIX_EPOCH + Duration::new(1, 0)).unwrap(); + let b = ts.duration_since(UNIX_EPOCH).unwrap(); assert!(b > a); assert_eq!(b - a, Duration::new(1, 0)); |
