diff options
Diffstat (limited to 'library/std/src/sys')
242 files changed, 41827 insertions, 0 deletions
diff --git a/library/std/src/sys/cloudabi/abi/bitflags.rs b/library/std/src/sys/cloudabi/abi/bitflags.rs new file mode 100644 index 00000000000..2383277ad72 --- /dev/null +++ b/library/std/src/sys/cloudabi/abi/bitflags.rs @@ -0,0 +1,47 @@ +// Copyright (c) 2018 Nuxi (https://nuxi.nl/) and contributors. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +// SUCH DAMAGE. + +#[cfg(feature = "bitflags")] +use bitflags::bitflags; + +// Minimal implementation of bitflags! in case we can't depend on the bitflags +// crate. Only implements `bits()` and a `from_bits_truncate()` that doesn't +// actually truncate. +#[cfg(not(feature = "bitflags"))] +macro_rules! bitflags { + ( + $(#[$attr:meta])* + pub struct $name:ident: $type:ty { + $($(#[$const_attr:meta])* const $const:ident = $val:expr;)* + } + ) => { + $(#[$attr])* + #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] + pub struct $name { bits: $type } + impl $name { + $($(#[$const_attr])* pub const $const: $name = $name{ bits: $val };)* + pub fn bits(&self) -> $type { self.bits } + pub fn from_bits_truncate(bits: $type) -> Self { $name{ bits } } + } + } +} diff --git a/library/std/src/sys/cloudabi/abi/cloudabi.rs b/library/std/src/sys/cloudabi/abi/cloudabi.rs new file mode 100644 index 00000000000..b02faf1830c --- /dev/null +++ b/library/std/src/sys/cloudabi/abi/cloudabi.rs @@ -0,0 +1,2947 @@ +// Copyright (c) 2016-2017 Nuxi <https://nuxi.nl/> and contributors. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +// SUCH DAMAGE. +// +// This file is automatically generated. Do not edit. +// +// Source: https://github.com/NuxiNL/cloudabi + +// Appease Rust's tidy. +// ignore-tidy-linelength + +//! **PLEASE NOTE: This entire crate including this +//! documentation is automatically generated from +//! [`cloudabi.txt`](https://github.com/NuxiNL/cloudabi/blob/master/cloudabi.txt)** +//! +//! # Nuxi CloudABI +//! +//! CloudABI is what you get if you take POSIX, add capability-based +//! security, and remove everything that's incompatible with that. The +//! result is a minimal ABI consisting of only 49 syscalls. +//! +//! CloudABI doesn't have its own kernel, but instead is implemented in existing +//! kernels: FreeBSD has CloudABI support for x86-64 and arm64, and [a patch-set +//! for NetBSD](https://github.com/NuxiNL/netbsd) and [a patch-set for +//! Linux](https://github.com/NuxiNL/linux) are available as well. This means that +//! CloudABI binaries can be executed on different operating systems, without any +//! modification. +//! +//! ## Capability-Based Security +//! +//! Capability-based security means that processes can only perform +//! actions that have no global impact. Processes cannot open files by +//! their absolute path, cannot open network connections, and cannot +//! observe global system state such as the process table. +//! +//! The capabilities of a process are fully determined by its set of open +//! file descriptors (fds). For example, files can only be opened if the +//! process already has a file descriptor to a directory the file is in. +//! +//! Unlike in POSIX, where processes are normally started with file +//! descriptors 0, 1, and 2 reserved for standard input, output, and +//! error, CloudABI does not reserve any file descriptor numbers for +//! specific purposes. +//! +//! In CloudABI, a process depends on its parent process to launch it with +//! the right set of resources, since the process will not be able to open +//! any new resources. For example, a simple static web server would need +//! to be started with a file descriptor to a [TCP +//! listener](https://github.com/NuxiNL/flower), and a file descriptor to +//! the directory for which to serve files. The web server will then be +//! unable to do anything other than reading files in that directory, and +//! process incoming network connections. +//! +//! So, unknown CloudABI binaries can safely be executed without the need +//! for containers, virtual machines, or other sandboxing technologies. +//! +//! Watch [Ed Schouten's Talk at +//! 32C3](https://www.youtube.com/watch?v=3N29vrPoDv8) for more +//! information about what capability-based security for UNIX means. +//! +//! ## Cloudlibc +//! +//! [Cloudlibc](https://github.com/NuxiNL/cloudlibc) is an implementation +//! of the C standard library, without all CloudABI-incompatible +//! functions. For example, Cloudlibc does not have `printf`, but does +//! have `fprintf`. It does not have `open`, but does have `openat`. +//! +//! ## CloudABI-Ports +//! +//! [CloudABI-Ports](https://github.com/NuxiNL/cloudabi-ports) is a +//! collection of ports of commonly used libraries and applications to +//! CloudABI. It contains software such as `zlib`, `libpng`, `boost`, +//! `memcached`, and much more. The software is patched to not depend on +//! any global state, such as files in `/etc` or `/dev`, using `open()`, +//! etc. +//! +//! ## Using CloudABI +//! +//! Instructions for using CloudABI (including kernel modules/patches, +//! toolchain, and ports) are available for several operating systems: +//! +//! - [Arch Linux](https://nuxi.nl/cloudabi/archlinux/) +//! - [Debian, Ubuntu, and other Debian derivatives](https://nuxi.nl/cloudabi/debian/) +//! - [FreeBSD, PC-BSD and DragonFly BSD](https://nuxi.nl/cloudabi/freebsd/) +//! - [Mac OS X](https://nuxi.nl/cloudabi/mac/) +//! - [NetBSD](https://nuxi.nl/cloudabi/netbsd/) +//! +//! ## Specification of the ABI +//! +//! The entire ABI is specified in a file called +//! [`cloudabi.txt`](https://github.com/NuxiNL/cloudabi/blob/master/cloudabi.txt), +//! from which all +//! [headers](https://github.com/NuxiNL/cloudabi/tree/master/headers) +//! and documentation (including the one you're reading now) is generated. + +#![no_std] +#![allow(non_camel_case_types)] +#![allow(deprecated)] // FIXME: using `mem::uninitialized()` + +include!("bitflags.rs"); + +/// File or memory access pattern advisory information. +#[repr(u8)] +#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] +#[non_exhaustive] +pub enum advice { + /// The application expects that it will not access the + /// specified data in the near future. + DONTNEED = 1, + /// The application expects to access the specified data + /// once and then not reuse it thereafter. + NOREUSE = 2, + /// The application has no advice to give on its behavior + /// with respect to the specified data. + NORMAL = 3, + /// The application expects to access the specified data + /// in a random order. + RANDOM = 4, + /// The application expects to access the specified data + /// sequentially from lower offsets to higher offsets. + SEQUENTIAL = 5, + /// The application expects to access the specified data + /// in the near future. + WILLNEED = 6, +} + +/// Enumeration describing the kind of value stored in [`auxv`](struct.auxv.html). +#[repr(u32)] +#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] +#[non_exhaustive] +pub enum auxtype { + /// Base address of the binary argument data provided to + /// [`proc_exec()`](fn.proc_exec.html). + ARGDATA = 256, + /// Length of the binary argument data provided to + /// [`proc_exec()`](fn.proc_exec.html). + ARGDATALEN = 257, + /// Base address at which the executable is placed in + /// memory. + BASE = 7, + /// Base address of a buffer of random data that may be + /// used for non-cryptographic purposes, for example as a + /// canary for stack smashing protection. + CANARY = 258, + /// Length of a buffer of random data that may be used + /// for non-cryptographic purposes, for example as a + /// canary for stack smashing protection. + CANARYLEN = 259, + /// Number of CPUs that the system this process is running + /// on has. + NCPUS = 260, + /// Terminator of the auxiliary vector. + NULL = 0, + /// Smallest memory object size for which individual + /// memory protection controls can be configured. + PAGESZ = 6, + /// Address of the first ELF program header of the + /// executable. + PHDR = 3, + /// Number of ELF program headers of the executable. + PHNUM = 4, + /// Identifier of the process. + /// + /// This environment does not provide any simple numerical + /// process identifiers, for the reason that these are not + /// useful in distributed contexts. Instead, processes are + /// identified by a UUID. + /// + /// This record should point to sixteen bytes of binary + /// data, containing a version 4 UUID (fully random). + PID = 263, + /// Address of the ELF header of the vDSO. + /// + /// The vDSO is a shared library that is mapped in the + /// address space of the process. It provides entry points + /// for every system call supported by the environment, + /// all having a corresponding symbol that is prefixed + /// with `cloudabi_sys_`. System calls should be invoked + /// through these entry points. + /// + /// The first advantage of letting processes call into a + /// vDSO to perform system calls instead of raising + /// hardware traps is that it allows for easy emulation of + /// executables on top of existing operating systems. The + /// second advantage is that in cases where an operating + /// system provides native support for CloudABI executables, + /// it may still implement partial userspace + /// implementations of these system calls to improve + /// performance (e.g., [`clock_time_get()`](fn.clock_time_get.html)). It also provides + /// a more dynamic way of adding, removing or replacing + /// system calls. + SYSINFO_EHDR = 262, + /// Thread ID of the initial thread of the process. + TID = 261, +} + +/// Identifiers for clocks. +#[repr(u32)] +#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] +#[non_exhaustive] +pub enum clockid { + /// The system-wide monotonic clock, which is defined as a + /// clock measuring real time, whose value cannot be + /// adjusted and which cannot have negative clock jumps. + /// + /// The epoch of this clock is undefined. The absolute + /// time value of this clock therefore has no meaning. + MONOTONIC = 1, + /// The CPU-time clock associated with the current + /// process. + PROCESS_CPUTIME_ID = 2, + /// The system-wide clock measuring real time. Time value + /// zero corresponds with 1970-01-01T00:00:00Z. + REALTIME = 3, + /// The CPU-time clock associated with the current thread. + THREAD_CPUTIME_ID = 4, +} + +/// A userspace condition variable. +#[repr(C)] +#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] +pub struct condvar(pub u32); +/// The condition variable is in its initial state. There +/// are no threads waiting to be woken up. If the +/// condition variable has any other value, the kernel +/// must be called to wake up any sleeping threads. +pub const CONDVAR_HAS_NO_WAITERS: condvar = condvar(0); + +/// Identifier for a device containing a file system. Can be used +/// in combination with [`inode`](struct.inode.html) to uniquely identify a file on the +/// local system. +#[repr(C)] +#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] +pub struct device(pub u64); + +/// A reference to the offset of a directory entry. +#[repr(C)] +#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] +pub struct dircookie(pub u64); +/// Permanent reference to the first directory entry +/// within a directory. +pub const DIRCOOKIE_START: dircookie = dircookie(0); + +/// Error codes returned by system calls. +/// +/// Not all of these error codes are returned by the system calls +/// provided by this environment, but are either used in userspace +/// exclusively or merely provided for alignment with POSIX. +#[repr(u16)] +#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] +#[non_exhaustive] +pub enum errno { + /// No error occurred. System call completed successfully. + SUCCESS = 0, + /// Argument list too long. + TOOBIG = 1, + /// Permission denied. + ACCES = 2, + /// Address in use. + ADDRINUSE = 3, + /// Address not available. + ADDRNOTAVAIL = 4, + /// Address family not supported. + AFNOSUPPORT = 5, + /// Resource unavailable, or operation would block. + AGAIN = 6, + /// Connection already in progress. + ALREADY = 7, + /// Bad file descriptor. + BADF = 8, + /// Bad message. + BADMSG = 9, + /// Device or resource busy. + BUSY = 10, + /// Operation canceled. + CANCELED = 11, + /// No child processes. + CHILD = 12, + /// Connection aborted. + CONNABORTED = 13, + /// Connection refused. + CONNREFUSED = 14, + /// Connection reset. + CONNRESET = 15, + /// Resource deadlock would occur. + DEADLK = 16, + /// Destination address required. + DESTADDRREQ = 17, + /// Mathematics argument out of domain of function. + DOM = 18, + /// Reserved. + DQUOT = 19, + /// File exists. + EXIST = 20, + /// Bad address. + FAULT = 21, + /// File too large. + FBIG = 22, + /// Host is unreachable. + HOSTUNREACH = 23, + /// Identifier removed. + IDRM = 24, + /// Illegal byte sequence. + ILSEQ = 25, + /// Operation in progress. + INPROGRESS = 26, + /// Interrupted function. + INTR = 27, + /// Invalid argument. + INVAL = 28, + /// I/O error. + IO = 29, + /// Socket is connected. + ISCONN = 30, + /// Is a directory. + ISDIR = 31, + /// Too many levels of symbolic links. + LOOP = 32, + /// File descriptor value too large. + MFILE = 33, + /// Too many links. + MLINK = 34, + /// Message too large. + MSGSIZE = 35, + /// Reserved. + MULTIHOP = 36, + /// Filename too long. + NAMETOOLONG = 37, + /// Network is down. + NETDOWN = 38, + /// Connection aborted by network. + NETRESET = 39, + /// Network unreachable. + NETUNREACH = 40, + /// Too many files open in system. + NFILE = 41, + /// No buffer space available. + NOBUFS = 42, + /// No such device. + NODEV = 43, + /// No such file or directory. + NOENT = 44, + /// Executable file format error. + NOEXEC = 45, + /// No locks available. + NOLCK = 46, + /// Reserved. + NOLINK = 47, + /// Not enough space. + NOMEM = 48, + /// No message of the desired type. + NOMSG = 49, + /// Protocol not available. + NOPROTOOPT = 50, + /// No space left on device. + NOSPC = 51, + /// Function not supported. + NOSYS = 52, + /// The socket is not connected. + NOTCONN = 53, + /// Not a directory or a symbolic link to a directory. + NOTDIR = 54, + /// Directory not empty. + NOTEMPTY = 55, + /// State not recoverable. + NOTRECOVERABLE = 56, + /// Not a socket. + NOTSOCK = 57, + /// Not supported, or operation not supported on socket. + NOTSUP = 58, + /// Inappropriate I/O control operation. + NOTTY = 59, + /// No such device or address. + NXIO = 60, + /// Value too large to be stored in data type. + OVERFLOW = 61, + /// Previous owner died. + OWNERDEAD = 62, + /// Operation not permitted. + PERM = 63, + /// Broken pipe. + PIPE = 64, + /// Protocol error. + PROTO = 65, + /// Protocol not supported. + PROTONOSUPPORT = 66, + /// Protocol wrong type for socket. + PROTOTYPE = 67, + /// Result too large. + RANGE = 68, + /// Read-only file system. + ROFS = 69, + /// Invalid seek. + SPIPE = 70, + /// No such process. + SRCH = 71, + /// Reserved. + STALE = 72, + /// Connection timed out. + TIMEDOUT = 73, + /// Text file busy. + TXTBSY = 74, + /// Cross-device link. + XDEV = 75, + /// Extension: Capabilities insufficient. + NOTCAPABLE = 76, +} + +bitflags! { + /// The state of the file descriptor subscribed to with + /// [`FD_READ`](enum.eventtype.html#variant.FD_READ) or [`FD_WRITE`](enum.eventtype.html#variant.FD_WRITE). + #[repr(C)] + pub struct eventrwflags: u16 { + /// The peer of this socket has closed or disconnected. + const HANGUP = 0x0001; + } +} + +/// Type of a subscription to an event or its occurrence. +#[repr(u8)] +#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] +#[non_exhaustive] +pub enum eventtype { + /// The time value of clock [`subscription.union.clock.clock_id`](struct.subscription_clock.html#structfield.clock_id) + /// has reached timestamp [`subscription.union.clock.timeout`](struct.subscription_clock.html#structfield.timeout). + CLOCK = 1, + /// Condition variable [`subscription.union.condvar.condvar`](struct.subscription_condvar.html#structfield.condvar) has + /// been woken up and [`subscription.union.condvar.lock`](struct.subscription_condvar.html#structfield.lock) has been + /// acquired for writing. + CONDVAR = 2, + /// File descriptor [`subscription.union.fd_readwrite.fd`](struct.subscription_fd_readwrite.html#structfield.fd) has + /// data available for reading. This event always triggers + /// for regular files. + FD_READ = 3, + /// File descriptor [`subscription.union.fd_readwrite.fd`](struct.subscription_fd_readwrite.html#structfield.fd) has + /// capacity available for writing. This event always + /// triggers for regular files. + FD_WRITE = 4, + /// Lock [`subscription.union.lock.lock`](struct.subscription_lock.html#structfield.lock) has been acquired for + /// reading. + LOCK_RDLOCK = 5, + /// Lock [`subscription.union.lock.lock`](struct.subscription_lock.html#structfield.lock) has been acquired for + /// writing. + LOCK_WRLOCK = 6, + /// The process associated with process descriptor + /// [`subscription.union.proc_terminate.fd`](struct.subscription_proc_terminate.html#structfield.fd) has terminated. + PROC_TERMINATE = 7, +} + +/// Exit code generated by a process when exiting. +pub type exitcode = u32; + +/// A file descriptor number. +/// +/// Unlike on POSIX-compliant systems, none of the file descriptor +/// numbers are reserved for a purpose (e.g., stdin, stdout, +/// stderr). Operating systems are not required to allocate new +/// file descriptors in ascending order. +#[repr(C)] +#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] +pub struct fd(pub u32); +/// Returned to the child process by [`proc_fork()`](fn.proc_fork.html). +pub const PROCESS_CHILD: fd = fd(0xffffffff); +/// Passed to [`mem_map()`](fn.mem_map.html) when creating a mapping to +/// anonymous memory. +pub const MAP_ANON_FD: fd = fd(0xffffffff); + +bitflags! { + /// File descriptor flags. + #[repr(C)] + pub struct fdflags: u16 { + /// Append mode: Data written to the file is always + /// appended to the file's end. + const APPEND = 0x0001; + /// Write according to synchronized I/O data integrity + /// completion. Only the data stored in the file is + /// synchronized. + const DSYNC = 0x0002; + /// Non-blocking mode. + const NONBLOCK = 0x0004; + /// Synchronized read I/O operations. + const RSYNC = 0x0008; + /// Write according to synchronized I/O file integrity + /// completion. In addition to synchronizing the data + /// stored in the file, the system may also synchronously + /// update the file's metadata. + const SYNC = 0x0010; + } +} + +bitflags! { + /// Which file descriptor attributes to adjust. + #[repr(C)] + pub struct fdsflags: u16 { + /// Adjust the file descriptor flags stored in + /// [`fdstat.fs_flags`](struct.fdstat.html#structfield.fs_flags). + const FLAGS = 0x0001; + /// Restrict the rights of the file descriptor to the + /// rights stored in [`fdstat.fs_rights_base`](struct.fdstat.html#structfield.fs_rights_base) and + /// [`fdstat.fs_rights_inheriting`](struct.fdstat.html#structfield.fs_rights_inheriting). + const RIGHTS = 0x0002; + } +} + +/// Relative offset within a file. +pub type filedelta = i64; + +/// Non-negative file size or length of a region within a file. +pub type filesize = u64; + +/// The type of a file descriptor or file. +#[repr(u8)] +#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] +#[non_exhaustive] +pub enum filetype { + /// The type of the file descriptor or file is unknown or + /// is different from any of the other types specified. + UNKNOWN = 0, + /// The file descriptor or file refers to a block device + /// inode. + BLOCK_DEVICE = 16, + /// The file descriptor or file refers to a character + /// device inode. + CHARACTER_DEVICE = 17, + /// The file descriptor or file refers to a directory + /// inode. + DIRECTORY = 32, + /// The file descriptor refers to a process handle. + PROCESS = 80, + /// The file descriptor or file refers to a regular file + /// inode. + REGULAR_FILE = 96, + /// The file descriptor refers to a shared memory object. + SHARED_MEMORY = 112, + /// The file descriptor or file refers to a datagram + /// socket. + SOCKET_DGRAM = 128, + /// The file descriptor or file refers to a byte-stream + /// socket. + SOCKET_STREAM = 130, + /// The file refers to a symbolic link inode. + SYMBOLIC_LINK = 144, +} + +bitflags! { + /// Which file attributes to adjust. + #[repr(C)] + pub struct fsflags: u16 { + /// Adjust the last data access timestamp to the value + /// stored in [`filestat.st_atim`](struct.filestat.html#structfield.st_atim). + const ATIM = 0x0001; + /// Adjust the last data access timestamp to the time + /// of clock [`REALTIME`](enum.clockid.html#variant.REALTIME). + const ATIM_NOW = 0x0002; + /// Adjust the last data modification timestamp to the + /// value stored in [`filestat.st_mtim`](struct.filestat.html#structfield.st_mtim). + const MTIM = 0x0004; + /// Adjust the last data modification timestamp to the + /// time of clock [`REALTIME`](enum.clockid.html#variant.REALTIME). + const MTIM_NOW = 0x0008; + /// Truncate or extend the file to the size stored in + /// [`filestat.st_size`](struct.filestat.html#structfield.st_size). + const SIZE = 0x0010; + } +} + +/// File serial number that is unique within its file system. +#[repr(C)] +#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] +pub struct inode(pub u64); + +/// Number of hard links to an inode. +pub type linkcount = u32; + +/// A userspace read-recursive readers-writer lock, similar to a +/// Linux futex or a FreeBSD umtx. +#[repr(C)] +#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] +pub struct lock(pub u32); +/// Value indicating that the lock is in its initial +/// unlocked state. +pub const LOCK_UNLOCKED: lock = lock(0x00000000); +/// Bitmask indicating that the lock is write-locked. If +/// set, the lower 30 bits of the lock contain the +/// identifier of the thread that owns the write lock. +/// Otherwise, the lower 30 bits of the lock contain the +/// number of acquired read locks. +pub const LOCK_WRLOCKED: lock = lock(0x40000000); +/// Bitmask indicating that the lock is either read locked +/// or write locked, and that one or more threads have +/// their execution suspended, waiting to acquire the +/// lock. The last owner of the lock must call the +/// kernel to unlock. +/// +/// When the lock is acquired for reading and this bit is +/// set, it means that one or more threads are attempting +/// to acquire this lock for writing. In that case, other +/// threads should only acquire additional read locks if +/// suspending execution would cause a deadlock. It is +/// preferred to suspend execution, as this prevents +/// starvation of writers. +pub const LOCK_KERNEL_MANAGED: lock = lock(0x80000000); +/// Value indicating that the lock is in an incorrect +/// state. A lock cannot be in its initial unlocked state, +/// while also managed by the kernel. +pub const LOCK_BOGUS: lock = lock(0x80000000); + +bitflags! { + /// Flags determining the method of how paths are resolved. + #[repr(C)] + pub struct lookupflags: u32 { + /// As long as the resolved path corresponds to a symbolic + /// link, it is expanded. + const SYMLINK_FOLLOW = 0x00000001; + } +} + +bitflags! { + /// Memory mapping flags. + #[repr(C)] + pub struct mflags: u8 { + /// Instead of mapping the contents of the file provided, + /// create a mapping to anonymous memory. The file + /// descriptor argument must be set to [`MAP_ANON_FD`](constant.MAP_ANON_FD.html), + /// and the offset must be set to zero. + const ANON = 0x01; + /// Require that the mapping is performed at the base + /// address provided. + const FIXED = 0x02; + /// Changes are private. + const PRIVATE = 0x04; + /// Changes are shared. + const SHARED = 0x08; + } +} + +bitflags! { + /// Memory page protection options. + /// + /// This implementation enforces the `W^X` property: Pages cannot be + /// mapped for execution while also mapped for writing. + #[repr(C)] + pub struct mprot: u8 { + /// Page can be executed. + const EXEC = 0x01; + /// Page can be written. + const WRITE = 0x02; + /// Page can be read. + const READ = 0x04; + } +} + +bitflags! { + /// Methods of synchronizing memory with physical storage. + #[repr(C)] + pub struct msflags: u8 { + /// Performs asynchronous writes. + const ASYNC = 0x01; + /// Invalidates cached data. + const INVALIDATE = 0x02; + /// Performs synchronous writes. + const SYNC = 0x04; + } +} + +/// Specifies the number of threads sleeping on a condition +/// variable that should be woken up. +pub type nthreads = u32; + +bitflags! { + /// Open flags used by [`file_open()`](fn.file_open.html). + #[repr(C)] + pub struct oflags: u16 { + /// Create file if it does not exist. + const CREAT = 0x0001; + /// Fail if not a directory. + const DIRECTORY = 0x0002; + /// Fail if file already exists. + const EXCL = 0x0004; + /// Truncate file to size 0. + const TRUNC = 0x0008; + } +} + +bitflags! { + /// Flags provided to [`sock_recv()`](fn.sock_recv.html). + #[repr(C)] + pub struct riflags: u16 { + /// Returns the message without removing it from the + /// socket's receive queue. + const PEEK = 0x0004; + /// On byte-stream sockets, block until the full amount + /// of data can be returned. + const WAITALL = 0x0010; + } +} + +bitflags! { + /// File descriptor rights, determining which actions may be + /// performed. + #[repr(C)] + pub struct rights: u64 { + /// The right to invoke [`fd_datasync()`](fn.fd_datasync.html). + /// + /// If [`FILE_OPEN`](struct.rights.html#associatedconstant.FILE_OPEN) is set, includes the right to + /// invoke [`file_open()`](fn.file_open.html) with [`DSYNC`](struct.fdflags.html#associatedconstant.DSYNC). + const FD_DATASYNC = 0x0000000000000001; + /// The right to invoke [`fd_read()`](fn.fd_read.html) and [`sock_recv()`](fn.sock_recv.html). + /// + /// If [`MEM_MAP`](struct.rights.html#associatedconstant.MEM_MAP) is set, includes the right to + /// invoke [`mem_map()`](fn.mem_map.html) with memory protection option + /// [`READ`](struct.mprot.html#associatedconstant.READ). + /// + /// If [`FD_SEEK`](struct.rights.html#associatedconstant.FD_SEEK) is set, includes the right to invoke + /// [`fd_pread()`](fn.fd_pread.html). + const FD_READ = 0x0000000000000002; + /// The right to invoke [`fd_seek()`](fn.fd_seek.html). This flag implies + /// [`FD_TELL`](struct.rights.html#associatedconstant.FD_TELL). + const FD_SEEK = 0x0000000000000004; + /// The right to invoke [`fd_stat_put()`](fn.fd_stat_put.html) with + /// [`FLAGS`](struct.fdsflags.html#associatedconstant.FLAGS). + const FD_STAT_PUT_FLAGS = 0x0000000000000008; + /// The right to invoke [`fd_sync()`](fn.fd_sync.html). + /// + /// If [`FILE_OPEN`](struct.rights.html#associatedconstant.FILE_OPEN) is set, includes the right to + /// invoke [`file_open()`](fn.file_open.html) with [`RSYNC`](struct.fdflags.html#associatedconstant.RSYNC) and + /// [`DSYNC`](struct.fdflags.html#associatedconstant.DSYNC). + const FD_SYNC = 0x0000000000000010; + /// The right to invoke [`fd_seek()`](fn.fd_seek.html) in such a way that the + /// file offset remains unaltered (i.e., [`CUR`](enum.whence.html#variant.CUR) with + /// offset zero). + const FD_TELL = 0x0000000000000020; + /// The right to invoke [`fd_write()`](fn.fd_write.html) and [`sock_send()`](fn.sock_send.html). + /// + /// If [`MEM_MAP`](struct.rights.html#associatedconstant.MEM_MAP) is set, includes the right to + /// invoke [`mem_map()`](fn.mem_map.html) with memory protection option + /// [`WRITE`](struct.mprot.html#associatedconstant.WRITE). + /// + /// If [`FD_SEEK`](struct.rights.html#associatedconstant.FD_SEEK) is set, includes the right to + /// invoke [`fd_pwrite()`](fn.fd_pwrite.html). + const FD_WRITE = 0x0000000000000040; + /// The right to invoke [`file_advise()`](fn.file_advise.html). + const FILE_ADVISE = 0x0000000000000080; + /// The right to invoke [`file_allocate()`](fn.file_allocate.html). + const FILE_ALLOCATE = 0x0000000000000100; + /// The right to invoke [`file_create()`](fn.file_create.html) with + /// [`DIRECTORY`](enum.filetype.html#variant.DIRECTORY). + const FILE_CREATE_DIRECTORY = 0x0000000000000200; + /// If [`FILE_OPEN`](struct.rights.html#associatedconstant.FILE_OPEN) is set, the right to invoke + /// [`file_open()`](fn.file_open.html) with [`CREAT`](struct.oflags.html#associatedconstant.CREAT). + const FILE_CREATE_FILE = 0x0000000000000400; + /// The right to invoke [`file_link()`](fn.file_link.html) with the file + /// descriptor as the source directory. + const FILE_LINK_SOURCE = 0x0000000000001000; + /// The right to invoke [`file_link()`](fn.file_link.html) with the file + /// descriptor as the target directory. + const FILE_LINK_TARGET = 0x0000000000002000; + /// The right to invoke [`file_open()`](fn.file_open.html). + const FILE_OPEN = 0x0000000000004000; + /// The right to invoke [`file_readdir()`](fn.file_readdir.html). + const FILE_READDIR = 0x0000000000008000; + /// The right to invoke [`file_readlink()`](fn.file_readlink.html). + const FILE_READLINK = 0x0000000000010000; + /// The right to invoke [`file_rename()`](fn.file_rename.html) with the file + /// descriptor as the source directory. + const FILE_RENAME_SOURCE = 0x0000000000020000; + /// The right to invoke [`file_rename()`](fn.file_rename.html) with the file + /// descriptor as the target directory. + const FILE_RENAME_TARGET = 0x0000000000040000; + /// The right to invoke [`file_stat_fget()`](fn.file_stat_fget.html). + const FILE_STAT_FGET = 0x0000000000080000; + /// The right to invoke [`file_stat_fput()`](fn.file_stat_fput.html) with + /// [`SIZE`](struct.fsflags.html#associatedconstant.SIZE). + /// + /// If [`FILE_OPEN`](struct.rights.html#associatedconstant.FILE_OPEN) is set, includes the right to + /// invoke [`file_open()`](fn.file_open.html) with [`TRUNC`](struct.oflags.html#associatedconstant.TRUNC). + const FILE_STAT_FPUT_SIZE = 0x0000000000100000; + /// The right to invoke [`file_stat_fput()`](fn.file_stat_fput.html) with + /// [`ATIM`](struct.fsflags.html#associatedconstant.ATIM), [`ATIM_NOW`](struct.fsflags.html#associatedconstant.ATIM_NOW), [`MTIM`](struct.fsflags.html#associatedconstant.MTIM), + /// and [`MTIM_NOW`](struct.fsflags.html#associatedconstant.MTIM_NOW). + const FILE_STAT_FPUT_TIMES = 0x0000000000200000; + /// The right to invoke [`file_stat_get()`](fn.file_stat_get.html). + const FILE_STAT_GET = 0x0000000000400000; + /// The right to invoke [`file_stat_put()`](fn.file_stat_put.html) with + /// [`ATIM`](struct.fsflags.html#associatedconstant.ATIM), [`ATIM_NOW`](struct.fsflags.html#associatedconstant.ATIM_NOW), [`MTIM`](struct.fsflags.html#associatedconstant.MTIM), + /// and [`MTIM_NOW`](struct.fsflags.html#associatedconstant.MTIM_NOW). + const FILE_STAT_PUT_TIMES = 0x0000000000800000; + /// The right to invoke [`file_symlink()`](fn.file_symlink.html). + const FILE_SYMLINK = 0x0000000001000000; + /// The right to invoke [`file_unlink()`](fn.file_unlink.html). + const FILE_UNLINK = 0x0000000002000000; + /// The right to invoke [`mem_map()`](fn.mem_map.html) with [`mprot`](struct.mprot.html) set to + /// zero. + const MEM_MAP = 0x0000000004000000; + /// If [`MEM_MAP`](struct.rights.html#associatedconstant.MEM_MAP) is set, the right to invoke + /// [`mem_map()`](fn.mem_map.html) with [`EXEC`](struct.mprot.html#associatedconstant.EXEC). + const MEM_MAP_EXEC = 0x0000000008000000; + /// If [`FD_READ`](struct.rights.html#associatedconstant.FD_READ) is set, includes the right to + /// invoke [`poll()`](fn.poll.html) to subscribe to [`FD_READ`](enum.eventtype.html#variant.FD_READ). + /// + /// If [`FD_WRITE`](struct.rights.html#associatedconstant.FD_WRITE) is set, includes the right to + /// invoke [`poll()`](fn.poll.html) to subscribe to [`FD_WRITE`](enum.eventtype.html#variant.FD_WRITE). + const POLL_FD_READWRITE = 0x0000000010000000; + /// The right to invoke [`poll()`](fn.poll.html) to subscribe to + /// [`PROC_TERMINATE`](enum.eventtype.html#variant.PROC_TERMINATE). + const POLL_PROC_TERMINATE = 0x0000000040000000; + /// The right to invoke [`proc_exec()`](fn.proc_exec.html). + const PROC_EXEC = 0x0000000100000000; + /// The right to invoke [`sock_shutdown()`](fn.sock_shutdown.html). + const SOCK_SHUTDOWN = 0x0000008000000000; + } +} + +bitflags! { + /// Flags returned by [`sock_recv()`](fn.sock_recv.html). + #[repr(C)] + pub struct roflags: u16 { + /// Returned by [`sock_recv()`](fn.sock_recv.html): List of file descriptors + /// has been truncated. + const FDS_TRUNCATED = 0x0001; + /// Returned by [`sock_recv()`](fn.sock_recv.html): Message data has been + /// truncated. + const DATA_TRUNCATED = 0x0008; + } +} + +/// Indicates whether an object is stored in private or shared +/// memory. +#[repr(u8)] +#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] +#[non_exhaustive] +pub enum scope { + /// The object is stored in private memory. + PRIVATE = 4, + /// The object is stored in shared memory. + SHARED = 8, +} + +bitflags! { + /// Which channels on a socket need to be shut down. + #[repr(C)] + pub struct sdflags: u8 { + /// Disables further receive operations. + const RD = 0x01; + /// Disables further send operations. + const WR = 0x02; + } +} + +bitflags! { + /// Flags provided to [`sock_send()`](fn.sock_send.html). As there are currently no flags + /// defined, it must be set to zero. + #[repr(C)] + pub struct siflags: u16 { + const DEFAULT = 0; + } +} + +/// Signal condition. +#[repr(u8)] +#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] +#[non_exhaustive] +pub enum signal { + /// Process abort signal. + /// + /// Action: Terminates the process. + ABRT = 1, + /// Alarm clock. + /// + /// Action: Terminates the process. + ALRM = 2, + /// Access to an undefined portion of a memory object. + /// + /// Action: Terminates the process. + BUS = 3, + /// Child process terminated, stopped, or continued. + /// + /// Action: Ignored. + CHLD = 4, + /// Continue executing, if stopped. + /// + /// Action: Continues executing, if stopped. + CONT = 5, + /// Erroneous arithmetic operation. + /// + /// Action: Terminates the process. + FPE = 6, + /// Hangup. + /// + /// Action: Terminates the process. + HUP = 7, + /// Illegal instruction. + /// + /// Action: Terminates the process. + ILL = 8, + /// Terminate interrupt signal. + /// + /// Action: Terminates the process. + INT = 9, + /// Kill. + /// + /// Action: Terminates the process. + KILL = 10, + /// Write on a pipe with no one to read it. + /// + /// Action: Ignored. + PIPE = 11, + /// Terminal quit signal. + /// + /// Action: Terminates the process. + QUIT = 12, + /// Invalid memory reference. + /// + /// Action: Terminates the process. + SEGV = 13, + /// Stop executing. + /// + /// Action: Stops executing. + STOP = 14, + /// Bad system call. + /// + /// Action: Terminates the process. + SYS = 15, + /// Termination signal. + /// + /// Action: Terminates the process. + TERM = 16, + /// Trace/breakpoint trap. + /// + /// Action: Terminates the process. + TRAP = 17, + /// Terminal stop signal. + /// + /// Action: Stops executing. + TSTP = 18, + /// Background process attempting read. + /// + /// Action: Stops executing. + TTIN = 19, + /// Background process attempting write. + /// + /// Action: Stops executing. + TTOU = 20, + /// High bandwidth data is available at a socket. + /// + /// Action: Ignored. + URG = 21, + /// User-defined signal 1. + /// + /// Action: Terminates the process. + USR1 = 22, + /// User-defined signal 2. + /// + /// Action: Terminates the process. + USR2 = 23, + /// Virtual timer expired. + /// + /// Action: Terminates the process. + VTALRM = 24, + /// CPU time limit exceeded. + /// + /// Action: Terminates the process. + XCPU = 25, + /// File size limit exceeded. + /// + /// Action: Terminates the process. + XFSZ = 26, +} + +bitflags! { + /// Flags determining how the timestamp provided in + /// [`subscription.union.clock.timeout`](struct.subscription_clock.html#structfield.timeout) should be interpreted. + #[repr(C)] + pub struct subclockflags: u16 { + /// If set, treat the timestamp provided in + /// [`subscription.union.clock.timeout`](struct.subscription_clock.html#structfield.timeout) as an absolute timestamp + /// of clock [`subscription.union.clock.clock_id`](struct.subscription_clock.html#structfield.clock_id). + /// + /// If clear, treat the timestamp provided in + /// [`subscription.union.clock.timeout`](struct.subscription_clock.html#structfield.timeout) relative to the current + /// time value of clock [`subscription.union.clock.clock_id`](struct.subscription_clock.html#structfield.clock_id). + const ABSTIME = 0x0001; + } +} + +bitflags! { + /// Flags influencing the method of polling for read or writing on + /// a file descriptor. + #[repr(C)] + pub struct subrwflags: u16 { + /// Deprecated. Must be set by callers and ignored by + /// implementations. + const POLL = 0x0001; + } +} + +/// Unique system-local identifier of a thread. This identifier is +/// only valid during the lifetime of the thread. +/// +/// Threads must be aware of their thread identifier, as it is +/// written it into locks when acquiring them for writing. It is +/// not advised to use these identifiers for any other purpose. +/// +/// As the thread identifier is also stored in [`lock`](struct.lock.html) when +/// [`LOCK_WRLOCKED`](constant.LOCK_WRLOCKED.html) is set, the top two bits of the thread +/// must always be set to zero. +#[repr(C)] +#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] +pub struct tid(pub u32); + +/// Timestamp in nanoseconds. +pub type timestamp = u64; + +bitflags! { + /// Specifies whether files are unlinked or directories are + /// removed. + #[repr(C)] + pub struct ulflags: u8 { + /// If set, removes a directory. Otherwise, unlinks any + /// non-directory file. + const REMOVEDIR = 0x01; + } +} + +/// User-provided value that can be attached to objects that is +/// retained when extracted from the kernel. +pub type userdata = u64; + +/// Relative to which position the offset of the file descriptor +/// should be set. +#[repr(u8)] +#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] +#[non_exhaustive] +pub enum whence { + /// Seek relative to current position. + CUR = 1, + /// Seek relative to end-of-file. + END = 2, + /// Seek relative to start-of-file. + SET = 3, +} + +/// Auxiliary vector entry. +/// +/// The auxiliary vector is a list of key-value pairs that is +/// provided to the process on startup. Unlike structures, it is +/// extensible, as it is possible to add new records later on. +/// The auxiliary vector is always terminated by an entry having +/// type [`NULL`](enum.auxtype.html#variant.NULL). +/// +/// The auxiliary vector is part of the x86-64 ABI, but is used by +/// this environment on all architectures. +#[repr(C)] +#[derive(Copy, Clone)] +pub struct auxv { + /// The type of the auxiliary vector entry. + pub a_type: auxtype, + pub union: auxv_union, +} +/// A union inside `auxv`. +#[repr(C)] +#[derive(Copy, Clone)] +pub union auxv_union { + /// Used when `a_type` is [`ARGDATALEN`](enum.auxtype.html#variant.ARGDATALEN), [`CANARYLEN`](enum.auxtype.html#variant.CANARYLEN), [`NCPUS`](enum.auxtype.html#variant.NCPUS), [`PAGESZ`](enum.auxtype.html#variant.PAGESZ), [`PHNUM`](enum.auxtype.html#variant.PHNUM), or [`TID`](enum.auxtype.html#variant.TID). + /// A numerical value. + pub a_val: usize, + /// Used when `a_type` is [`ARGDATA`](enum.auxtype.html#variant.ARGDATA), [`BASE`](enum.auxtype.html#variant.BASE), [`CANARY`](enum.auxtype.html#variant.CANARY), [`PHDR`](enum.auxtype.html#variant.PHDR), [`PID`](enum.auxtype.html#variant.PID), or [`SYSINFO_EHDR`](enum.auxtype.html#variant.SYSINFO_EHDR). + /// A pointer value. + pub a_ptr: *mut (), +} +#[test] +#[cfg(target_pointer_width = "32")] +fn auxv_layout_test_32() { + assert_eq!(core::mem::size_of::<auxv>(), 8); + assert_eq!(core::mem::align_of::<auxv>(), 4); + unsafe { + let obj: auxv = core::mem::uninitialized(); + let base = &obj as *const _ as usize; + assert_eq!(&obj.a_type as *const _ as usize - base, 0); + assert_eq!(&obj.union.a_val as *const _ as usize - base, 4); + assert_eq!(&obj.union.a_ptr as *const _ as usize - base, 4); + } +} +#[test] +#[cfg(target_pointer_width = "64")] +fn auxv_layout_test_64() { + assert_eq!(core::mem::size_of::<auxv>(), 16); + assert_eq!(core::mem::align_of::<auxv>(), 8); + unsafe { + let obj: auxv = core::mem::uninitialized(); + let base = &obj as *const _ as usize; + assert_eq!(&obj.a_type as *const _ as usize - base, 0); + assert_eq!(&obj.union.a_val as *const _ as usize - base, 8); + assert_eq!(&obj.union.a_ptr as *const _ as usize - base, 8); + } +} + +/// A region of memory for scatter/gather writes. +#[repr(C)] +#[derive(Copy, Clone)] +pub struct ciovec { + /// The address and length of the buffer to be written. + pub buf: (*const (), usize), +} +#[test] +#[cfg(target_pointer_width = "32")] +fn ciovec_layout_test_32() { + assert_eq!(core::mem::size_of::<ciovec>(), 8); + assert_eq!(core::mem::align_of::<ciovec>(), 4); + unsafe { + let obj: ciovec = core::mem::uninitialized(); + let base = &obj as *const _ as usize; + assert_eq!(&obj.buf.0 as *const _ as usize - base, 0); + assert_eq!(&obj.buf.1 as *const _ as usize - base, 4); + } +} +#[test] +#[cfg(target_pointer_width = "64")] +fn ciovec_layout_test_64() { + assert_eq!(core::mem::size_of::<ciovec>(), 16); + assert_eq!(core::mem::align_of::<ciovec>(), 8); + unsafe { + let obj: ciovec = core::mem::uninitialized(); + let base = &obj as *const _ as usize; + assert_eq!(&obj.buf.0 as *const _ as usize - base, 0); + assert_eq!(&obj.buf.1 as *const _ as usize - base, 8); + } +} + +/// A directory entry. +#[repr(C)] +#[derive(Copy, Clone)] +pub struct dirent { + /// The offset of the next directory entry stored in this + /// directory. + pub d_next: dircookie, + /// The serial number of the file referred to by this + /// directory entry. + pub d_ino: inode, + /// The length of the name of the directory entry. + pub d_namlen: u32, + /// The type of the file referred to by this directory + /// entry. + pub d_type: filetype, +} +#[test] +fn dirent_layout_test() { + assert_eq!(core::mem::size_of::<dirent>(), 24); + assert_eq!(core::mem::align_of::<dirent>(), 8); + unsafe { + let obj: dirent = core::mem::uninitialized(); + let base = &obj as *const _ as usize; + assert_eq!(&obj.d_next as *const _ as usize - base, 0); + assert_eq!(&obj.d_ino as *const _ as usize - base, 8); + assert_eq!(&obj.d_namlen as *const _ as usize - base, 16); + assert_eq!(&obj.d_type as *const _ as usize - base, 20); + } +} + +/// An event that occurred. +#[repr(C)] +#[derive(Copy, Clone)] +pub struct event { + /// User-provided value that got attached to + /// [`subscription.userdata`](struct.subscription.html#structfield.userdata). + pub userdata: userdata, + /// If non-zero, an error that occurred while processing + /// the subscription request. + pub error: errno, + /// The type of the event that occurred. + pub type_: eventtype, + pub union: event_union, +} +/// A union inside `event`. +#[repr(C)] +#[derive(Copy, Clone)] +pub union event_union { + /// Used when `type_` is [`FD_READ`](enum.eventtype.html#variant.FD_READ) or [`FD_WRITE`](enum.eventtype.html#variant.FD_WRITE). + pub fd_readwrite: event_fd_readwrite, + /// Used when `type_` is [`PROC_TERMINATE`](enum.eventtype.html#variant.PROC_TERMINATE). + pub proc_terminate: event_proc_terminate, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct event_fd_readwrite { + /// The number of bytes available + /// for reading or writing. + pub nbytes: filesize, + /// Obsolete. + pub unused: [u8; 4], + /// The state of the file + /// descriptor. + pub flags: eventrwflags, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct event_proc_terminate { + /// Obsolete. + pub unused: [u8; 4], + /// If zero, the process has + /// exited. + /// Otherwise, the signal + /// condition causing it to + /// terminated. + pub signal: signal, + /// If exited, the exit code of + /// the process. + pub exitcode: exitcode, +} +#[test] +fn event_layout_test() { + assert_eq!(core::mem::size_of::<event>(), 32); + assert_eq!(core::mem::align_of::<event>(), 8); + unsafe { + let obj: event = core::mem::uninitialized(); + let base = &obj as *const _ as usize; + assert_eq!(&obj.userdata as *const _ as usize - base, 0); + assert_eq!(&obj.error as *const _ as usize - base, 8); + assert_eq!(&obj.type_ as *const _ as usize - base, 10); + assert_eq!(&obj.union.fd_readwrite.nbytes as *const _ as usize - base, 16); + assert_eq!(&obj.union.fd_readwrite.unused as *const _ as usize - base, 24); + assert_eq!(&obj.union.fd_readwrite.flags as *const _ as usize - base, 28); + assert_eq!(&obj.union.proc_terminate.unused as *const _ as usize - base, 16); + assert_eq!(&obj.union.proc_terminate.signal as *const _ as usize - base, 20); + assert_eq!(&obj.union.proc_terminate.exitcode as *const _ as usize - base, 24); + } +} + +/// File descriptor attributes. +#[repr(C)] +#[derive(Copy, Clone)] +pub struct fdstat { + /// File type. + pub fs_filetype: filetype, + /// File descriptor flags. + pub fs_flags: fdflags, + /// Rights that apply to this file descriptor. + pub fs_rights_base: rights, + /// Maximum set of rights that can be installed on new + /// file descriptors that are created through this file + /// descriptor, e.g., through [`file_open()`](fn.file_open.html). + pub fs_rights_inheriting: rights, +} +#[test] +fn fdstat_layout_test() { + assert_eq!(core::mem::size_of::<fdstat>(), 24); + assert_eq!(core::mem::align_of::<fdstat>(), 8); + unsafe { + let obj: fdstat = core::mem::uninitialized(); + let base = &obj as *const _ as usize; + assert_eq!(&obj.fs_filetype as *const _ as usize - base, 0); + assert_eq!(&obj.fs_flags as *const _ as usize - base, 2); + assert_eq!(&obj.fs_rights_base as *const _ as usize - base, 8); + assert_eq!(&obj.fs_rights_inheriting as *const _ as usize - base, 16); + } +} + +/// File attributes. +#[repr(C)] +#[derive(Copy, Clone)] +pub struct filestat { + /// Device ID of device containing the file. + pub st_dev: device, + /// File serial number. + pub st_ino: inode, + /// File type. + pub st_filetype: filetype, + /// Number of hard links to the file. + pub st_nlink: linkcount, + /// For regular files, the file size in bytes. For + /// symbolic links, the length in bytes of the pathname + /// contained in the symbolic link. + pub st_size: filesize, + /// Last data access timestamp. + pub st_atim: timestamp, + /// Last data modification timestamp. + pub st_mtim: timestamp, + /// Last file status change timestamp. + pub st_ctim: timestamp, +} +#[test] +fn filestat_layout_test() { + assert_eq!(core::mem::size_of::<filestat>(), 56); + assert_eq!(core::mem::align_of::<filestat>(), 8); + unsafe { + let obj: filestat = core::mem::uninitialized(); + let base = &obj as *const _ as usize; + assert_eq!(&obj.st_dev as *const _ as usize - base, 0); + assert_eq!(&obj.st_ino as *const _ as usize - base, 8); + assert_eq!(&obj.st_filetype as *const _ as usize - base, 16); + assert_eq!(&obj.st_nlink as *const _ as usize - base, 20); + assert_eq!(&obj.st_size as *const _ as usize - base, 24); + assert_eq!(&obj.st_atim as *const _ as usize - base, 32); + assert_eq!(&obj.st_mtim as *const _ as usize - base, 40); + assert_eq!(&obj.st_ctim as *const _ as usize - base, 48); + } +} + +/// A region of memory for scatter/gather reads. +#[repr(C)] +#[derive(Copy, Clone)] +pub struct iovec { + /// The address and length of the buffer to be filled. + pub buf: (*mut (), usize), +} +#[test] +#[cfg(target_pointer_width = "32")] +fn iovec_layout_test_32() { + assert_eq!(core::mem::size_of::<iovec>(), 8); + assert_eq!(core::mem::align_of::<iovec>(), 4); + unsafe { + let obj: iovec = core::mem::uninitialized(); + let base = &obj as *const _ as usize; + assert_eq!(&obj.buf.0 as *const _ as usize - base, 0); + assert_eq!(&obj.buf.1 as *const _ as usize - base, 4); + } +} +#[test] +#[cfg(target_pointer_width = "64")] +fn iovec_layout_test_64() { + assert_eq!(core::mem::size_of::<iovec>(), 16); + assert_eq!(core::mem::align_of::<iovec>(), 8); + unsafe { + let obj: iovec = core::mem::uninitialized(); + let base = &obj as *const _ as usize; + assert_eq!(&obj.buf.0 as *const _ as usize - base, 0); + assert_eq!(&obj.buf.1 as *const _ as usize - base, 8); + } +} + +/// Path lookup properties. +#[repr(C)] +#[derive(Copy, Clone)] +pub struct lookup { + /// The working directory at which the resolution of the + /// path starts. + pub fd: fd, + /// Flags determining the method of how the path is + /// resolved. + pub flags: lookupflags, +} +#[test] +fn lookup_layout_test() { + assert_eq!(core::mem::size_of::<lookup>(), 8); + assert_eq!(core::mem::align_of::<lookup>(), 4); + unsafe { + let obj: lookup = core::mem::uninitialized(); + let base = &obj as *const _ as usize; + assert_eq!(&obj.fd as *const _ as usize - base, 0); + assert_eq!(&obj.flags as *const _ as usize - base, 4); + } +} + +/// Entry point for a process (`_start`). +/// +/// **auxv**: +/// The auxiliary vector. See [`auxv`](struct.auxv.html). +pub type processentry = unsafe extern "C" fn(auxv: *const auxv) -> (); + +/// Arguments of [`sock_recv()`](fn.sock_recv.html). +#[repr(C)] +#[derive(Copy, Clone)] +pub struct recv_in { + /// List of scatter/gather vectors where message data + /// should be stored. + pub ri_data: (*const iovec, usize), + /// Buffer where numbers of incoming file descriptors + /// should be stored. + pub ri_fds: (*mut fd, usize), + /// Message flags. + pub ri_flags: riflags, +} +#[test] +#[cfg(target_pointer_width = "32")] +fn recv_in_layout_test_32() { + assert_eq!(core::mem::size_of::<recv_in>(), 20); + assert_eq!(core::mem::align_of::<recv_in>(), 4); + unsafe { + let obj: recv_in = core::mem::uninitialized(); + let base = &obj as *const _ as usize; + assert_eq!(&obj.ri_data.0 as *const _ as usize - base, 0); + assert_eq!(&obj.ri_data.1 as *const _ as usize - base, 4); + assert_eq!(&obj.ri_fds.0 as *const _ as usize - base, 8); + assert_eq!(&obj.ri_fds.1 as *const _ as usize - base, 12); + assert_eq!(&obj.ri_flags as *const _ as usize - base, 16); + } +} +#[test] +#[cfg(target_pointer_width = "64")] +fn recv_in_layout_test_64() { + assert_eq!(core::mem::size_of::<recv_in>(), 40); + assert_eq!(core::mem::align_of::<recv_in>(), 8); + unsafe { + let obj: recv_in = core::mem::uninitialized(); + let base = &obj as *const _ as usize; + assert_eq!(&obj.ri_data.0 as *const _ as usize - base, 0); + assert_eq!(&obj.ri_data.1 as *const _ as usize - base, 8); + assert_eq!(&obj.ri_fds.0 as *const _ as usize - base, 16); + assert_eq!(&obj.ri_fds.1 as *const _ as usize - base, 24); + assert_eq!(&obj.ri_flags as *const _ as usize - base, 32); + } +} + +/// Results of [`sock_recv()`](fn.sock_recv.html). +#[repr(C)] +#[derive(Copy, Clone)] +pub struct recv_out { + /// Number of bytes stored in [`recv_in.ri_data`](struct.recv_in.html#structfield.ri_data). + pub ro_datalen: usize, + /// Number of file descriptors stored in [`recv_in.ri_fds`](struct.recv_in.html#structfield.ri_fds). + pub ro_fdslen: usize, + /// Fields that were used by previous implementations. + pub ro_unused: [u8; 40], + /// Message flags. + pub ro_flags: roflags, +} +#[test] +#[cfg(target_pointer_width = "32")] +fn recv_out_layout_test_32() { + assert_eq!(core::mem::size_of::<recv_out>(), 52); + assert_eq!(core::mem::align_of::<recv_out>(), 4); + unsafe { + let obj: recv_out = core::mem::uninitialized(); + let base = &obj as *const _ as usize; + assert_eq!(&obj.ro_datalen as *const _ as usize - base, 0); + assert_eq!(&obj.ro_fdslen as *const _ as usize - base, 4); + assert_eq!(&obj.ro_unused as *const _ as usize - base, 8); + assert_eq!(&obj.ro_flags as *const _ as usize - base, 48); + } +} +#[test] +#[cfg(target_pointer_width = "64")] +fn recv_out_layout_test_64() { + assert_eq!(core::mem::size_of::<recv_out>(), 64); + assert_eq!(core::mem::align_of::<recv_out>(), 8); + unsafe { + let obj: recv_out = core::mem::uninitialized(); + let base = &obj as *const _ as usize; + assert_eq!(&obj.ro_datalen as *const _ as usize - base, 0); + assert_eq!(&obj.ro_fdslen as *const _ as usize - base, 8); + assert_eq!(&obj.ro_unused as *const _ as usize - base, 16); + assert_eq!(&obj.ro_flags as *const _ as usize - base, 56); + } +} + +/// Arguments of [`sock_send()`](fn.sock_send.html). +#[repr(C)] +#[derive(Copy, Clone)] +pub struct send_in { + /// List of scatter/gather vectors where message data + /// should be retrieved. + pub si_data: (*const ciovec, usize), + /// File descriptors that need to be attached to the + /// message. + pub si_fds: (*const fd, usize), + /// Message flags. + pub si_flags: siflags, +} +#[test] +#[cfg(target_pointer_width = "32")] +fn send_in_layout_test_32() { + assert_eq!(core::mem::size_of::<send_in>(), 20); + assert_eq!(core::mem::align_of::<send_in>(), 4); + unsafe { + let obj: send_in = core::mem::uninitialized(); + let base = &obj as *const _ as usize; + assert_eq!(&obj.si_data.0 as *const _ as usize - base, 0); + assert_eq!(&obj.si_data.1 as *const _ as usize - base, 4); + assert_eq!(&obj.si_fds.0 as *const _ as usize - base, 8); + assert_eq!(&obj.si_fds.1 as *const _ as usize - base, 12); + assert_eq!(&obj.si_flags as *const _ as usize - base, 16); + } +} +#[test] +#[cfg(target_pointer_width = "64")] +fn send_in_layout_test_64() { + assert_eq!(core::mem::size_of::<send_in>(), 40); + assert_eq!(core::mem::align_of::<send_in>(), 8); + unsafe { + let obj: send_in = core::mem::uninitialized(); + let base = &obj as *const _ as usize; + assert_eq!(&obj.si_data.0 as *const _ as usize - base, 0); + assert_eq!(&obj.si_data.1 as *const _ as usize - base, 8); + assert_eq!(&obj.si_fds.0 as *const _ as usize - base, 16); + assert_eq!(&obj.si_fds.1 as *const _ as usize - base, 24); + assert_eq!(&obj.si_flags as *const _ as usize - base, 32); + } +} + +/// Results of [`sock_send()`](fn.sock_send.html). +#[repr(C)] +#[derive(Copy, Clone)] +pub struct send_out { + /// Number of bytes transmitted. + pub so_datalen: usize, +} +#[test] +#[cfg(target_pointer_width = "32")] +fn send_out_layout_test_32() { + assert_eq!(core::mem::size_of::<send_out>(), 4); + assert_eq!(core::mem::align_of::<send_out>(), 4); + unsafe { + let obj: send_out = core::mem::uninitialized(); + let base = &obj as *const _ as usize; + assert_eq!(&obj.so_datalen as *const _ as usize - base, 0); + } +} +#[test] +#[cfg(target_pointer_width = "64")] +fn send_out_layout_test_64() { + assert_eq!(core::mem::size_of::<send_out>(), 8); + assert_eq!(core::mem::align_of::<send_out>(), 8); + unsafe { + let obj: send_out = core::mem::uninitialized(); + let base = &obj as *const _ as usize; + assert_eq!(&obj.so_datalen as *const _ as usize - base, 0); + } +} + +/// Subscription to an event. +#[repr(C)] +#[derive(Copy, Clone)] +pub struct subscription { + /// User-provided value that is attached to the + /// subscription in the kernel and returned through + /// [`event.userdata`](struct.event.html#structfield.userdata). + pub userdata: userdata, + /// Used by previous implementations. Ignored. + pub unused: u16, + /// The type of the event to which to subscribe. + /// + /// Currently, [`CONDVAR`](enum.eventtype.html#variant.CONDVAR), + /// [`LOCK_RDLOCK`](enum.eventtype.html#variant.LOCK_RDLOCK), and [`LOCK_WRLOCK`](enum.eventtype.html#variant.LOCK_WRLOCK) + /// must be provided as the first subscription and may + /// only be followed by up to one other subscription, + /// having type [`CLOCK`](enum.eventtype.html#variant.CLOCK). + pub type_: eventtype, + pub union: subscription_union, +} +/// A union inside `subscription`. +#[repr(C)] +#[derive(Copy, Clone)] +pub union subscription_union { + /// Used when `type_` is [`CLOCK`](enum.eventtype.html#variant.CLOCK). + pub clock: subscription_clock, + /// Used when `type_` is [`CONDVAR`](enum.eventtype.html#variant.CONDVAR). + pub condvar: subscription_condvar, + /// Used when `type_` is [`FD_READ`](enum.eventtype.html#variant.FD_READ) or [`FD_WRITE`](enum.eventtype.html#variant.FD_WRITE). + pub fd_readwrite: subscription_fd_readwrite, + /// Used when `type_` is [`LOCK_RDLOCK`](enum.eventtype.html#variant.LOCK_RDLOCK) or [`LOCK_WRLOCK`](enum.eventtype.html#variant.LOCK_WRLOCK). + pub lock: subscription_lock, + /// Used when `type_` is [`PROC_TERMINATE`](enum.eventtype.html#variant.PROC_TERMINATE). + pub proc_terminate: subscription_proc_terminate, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct subscription_clock { + /// The user-defined unique + /// identifier of the clock. + pub identifier: userdata, + /// The clock against which the + /// timestamp should be compared. + pub clock_id: clockid, + /// The absolute or relative + /// timestamp. + pub timeout: timestamp, + /// The amount of time that the + /// kernel may wait additionally + /// to coalesce with other events. + pub precision: timestamp, + /// Flags specifying whether the + /// timeout is absolute or + /// relative. + pub flags: subclockflags, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct subscription_condvar { + /// The condition variable on + /// which to wait to be woken up. + pub condvar: *mut condvar, + /// The lock that will be + /// released while waiting. + /// + /// The lock will be reacquired + /// for writing when the condition + /// variable triggers. + pub lock: *mut lock, + /// Whether the condition variable + /// is stored in private or shared + /// memory. + pub condvar_scope: scope, + /// Whether the lock is stored in + /// private or shared memory. + pub lock_scope: scope, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct subscription_fd_readwrite { + /// The file descriptor on which + /// to wait for it to become ready + /// for reading or writing. + pub fd: fd, + /// Under which conditions to + /// trigger. + pub flags: subrwflags, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct subscription_lock { + /// The lock that will be acquired + /// for reading or writing. + pub lock: *mut lock, + /// Whether the lock is stored in + /// private or shared memory. + pub lock_scope: scope, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct subscription_proc_terminate { + /// The process descriptor on + /// which to wait for process + /// termination. + pub fd: fd, +} +#[test] +#[cfg(target_pointer_width = "32")] +fn subscription_layout_test_32() { + assert_eq!(core::mem::size_of::<subscription>(), 56); + assert_eq!(core::mem::align_of::<subscription>(), 8); + unsafe { + let obj: subscription = core::mem::uninitialized(); + let base = &obj as *const _ as usize; + assert_eq!(&obj.userdata as *const _ as usize - base, 0); + assert_eq!(&obj.unused as *const _ as usize - base, 8); + assert_eq!(&obj.type_ as *const _ as usize - base, 10); + assert_eq!(&obj.union.clock.identifier as *const _ as usize - base, 16); + assert_eq!(&obj.union.clock.clock_id as *const _ as usize - base, 24); + assert_eq!(&obj.union.clock.timeout as *const _ as usize - base, 32); + assert_eq!(&obj.union.clock.precision as *const _ as usize - base, 40); + assert_eq!(&obj.union.clock.flags as *const _ as usize - base, 48); + assert_eq!(&obj.union.condvar.condvar as *const _ as usize - base, 16); + assert_eq!(&obj.union.condvar.lock as *const _ as usize - base, 20); + assert_eq!(&obj.union.condvar.condvar_scope as *const _ as usize - base, 24); + assert_eq!(&obj.union.condvar.lock_scope as *const _ as usize - base, 25); + assert_eq!(&obj.union.fd_readwrite.fd as *const _ as usize - base, 16); + assert_eq!(&obj.union.fd_readwrite.flags as *const _ as usize - base, 20); + assert_eq!(&obj.union.lock.lock as *const _ as usize - base, 16); + assert_eq!(&obj.union.lock.lock_scope as *const _ as usize - base, 20); + assert_eq!(&obj.union.proc_terminate.fd as *const _ as usize - base, 16); + } +} +#[test] +#[cfg(target_pointer_width = "64")] +fn subscription_layout_test_64() { + assert_eq!(core::mem::size_of::<subscription>(), 56); + assert_eq!(core::mem::align_of::<subscription>(), 8); + unsafe { + let obj: subscription = core::mem::uninitialized(); + let base = &obj as *const _ as usize; + assert_eq!(&obj.userdata as *const _ as usize - base, 0); + assert_eq!(&obj.unused as *const _ as usize - base, 8); + assert_eq!(&obj.type_ as *const _ as usize - base, 10); + assert_eq!(&obj.union.clock.identifier as *const _ as usize - base, 16); + assert_eq!(&obj.union.clock.clock_id as *const _ as usize - base, 24); + assert_eq!(&obj.union.clock.timeout as *const _ as usize - base, 32); + assert_eq!(&obj.union.clock.precision as *const _ as usize - base, 40); + assert_eq!(&obj.union.clock.flags as *const _ as usize - base, 48); + assert_eq!(&obj.union.condvar.condvar as *const _ as usize - base, 16); + assert_eq!(&obj.union.condvar.lock as *const _ as usize - base, 24); + assert_eq!(&obj.union.condvar.condvar_scope as *const _ as usize - base, 32); + assert_eq!(&obj.union.condvar.lock_scope as *const _ as usize - base, 33); + assert_eq!(&obj.union.fd_readwrite.fd as *const _ as usize - base, 16); + assert_eq!(&obj.union.fd_readwrite.flags as *const _ as usize - base, 20); + assert_eq!(&obj.union.lock.lock as *const _ as usize - base, 16); + assert_eq!(&obj.union.lock.lock_scope as *const _ as usize - base, 24); + assert_eq!(&obj.union.proc_terminate.fd as *const _ as usize - base, 16); + } +} + +/// The Thread Control Block (TCB). +/// +/// After a thread begins execution (at program startup or when +/// created through [`thread_create()`](fn.thread_create.html)), the CPU's registers +/// controlling Thread-Local Storage (TLS) will already be +/// initialized. They will point to an area only containing the +/// TCB. +/// +/// If the thread needs space for storing thread-specific +/// variables, the thread may allocate a larger area and adjust +/// the CPU's registers to point to that area instead. However, it +/// does need to make sure that the TCB is copied over to the new +/// TLS area. +/// +/// The purpose of the TCB is that it allows light-weight +/// emulators to store information related to individual threads. +/// For example, it may be used to store a copy of the CPU +/// registers prior emulation, so that TLS for the host system +/// can be restored if needed. +#[repr(C)] +#[derive(Copy, Clone)] +pub struct tcb { + /// Pointer that may be freely assigned by the system. Its + /// value cannot be interpreted by the application. + pub parent: *mut (), +} +#[test] +#[cfg(target_pointer_width = "32")] +fn tcb_layout_test_32() { + assert_eq!(core::mem::size_of::<tcb>(), 4); + assert_eq!(core::mem::align_of::<tcb>(), 4); + unsafe { + let obj: tcb = core::mem::uninitialized(); + let base = &obj as *const _ as usize; + assert_eq!(&obj.parent as *const _ as usize - base, 0); + } +} +#[test] +#[cfg(target_pointer_width = "64")] +fn tcb_layout_test_64() { + assert_eq!(core::mem::size_of::<tcb>(), 8); + assert_eq!(core::mem::align_of::<tcb>(), 8); + unsafe { + let obj: tcb = core::mem::uninitialized(); + let base = &obj as *const _ as usize; + assert_eq!(&obj.parent as *const _ as usize - base, 0); + } +} + +/// Entry point for additionally created threads. +/// +/// `tid`: thread ID of the current thread. +/// +/// `aux`: copy of the value stored in +/// [`threadattr.argument`](struct.threadattr.html#structfield.argument). +pub type threadentry = unsafe extern "C" fn(tid: tid, aux: *mut ()) -> (); + +/// Attributes for thread creation. +#[repr(C)] +#[derive(Copy, Clone)] +pub struct threadattr { + /// Initial program counter value. + pub entry_point: threadentry, + /// Region allocated to serve as stack space. + pub stack: (*mut (), usize), + /// Argument to be forwarded to the entry point function. + pub argument: *mut (), +} +#[test] +#[cfg(target_pointer_width = "32")] +fn threadattr_layout_test_32() { + assert_eq!(core::mem::size_of::<threadattr>(), 16); + assert_eq!(core::mem::align_of::<threadattr>(), 4); + unsafe { + let obj: threadattr = core::mem::uninitialized(); + let base = &obj as *const _ as usize; + assert_eq!(&obj.entry_point as *const _ as usize - base, 0); + assert_eq!(&obj.stack.0 as *const _ as usize - base, 4); + assert_eq!(&obj.stack.1 as *const _ as usize - base, 8); + assert_eq!(&obj.argument as *const _ as usize - base, 12); + } +} +#[test] +#[cfg(target_pointer_width = "64")] +fn threadattr_layout_test_64() { + assert_eq!(core::mem::size_of::<threadattr>(), 32); + assert_eq!(core::mem::align_of::<threadattr>(), 8); + unsafe { + let obj: threadattr = core::mem::uninitialized(); + let base = &obj as *const _ as usize; + assert_eq!(&obj.entry_point as *const _ as usize - base, 0); + assert_eq!(&obj.stack.0 as *const _ as usize - base, 8); + assert_eq!(&obj.stack.1 as *const _ as usize - base, 16); + assert_eq!(&obj.argument as *const _ as usize - base, 24); + } +} + +/// The table with pointers to all syscall implementations. +#[allow(improper_ctypes)] +extern "C" { + fn cloudabi_sys_clock_res_get(_: clockid, _: *mut timestamp) -> errno; + fn cloudabi_sys_clock_time_get(_: clockid, _: timestamp, _: *mut timestamp) -> errno; + fn cloudabi_sys_condvar_signal(_: *mut condvar, _: scope, _: nthreads) -> errno; + fn cloudabi_sys_fd_close(_: fd) -> errno; + fn cloudabi_sys_fd_create1(_: filetype, _: *mut fd) -> errno; + fn cloudabi_sys_fd_create2(_: filetype, _: *mut fd, _: *mut fd) -> errno; + fn cloudabi_sys_fd_datasync(_: fd) -> errno; + fn cloudabi_sys_fd_dup(_: fd, _: *mut fd) -> errno; + fn cloudabi_sys_fd_pread(_: fd, _: *const iovec, _: usize, _: filesize, _: *mut usize) + -> errno; + fn cloudabi_sys_fd_pwrite( + _: fd, + _: *const ciovec, + _: usize, + _: filesize, + _: *mut usize, + ) -> errno; + fn cloudabi_sys_fd_read(_: fd, _: *const iovec, _: usize, _: *mut usize) -> errno; + fn cloudabi_sys_fd_replace(_: fd, _: fd) -> errno; + fn cloudabi_sys_fd_seek(_: fd, _: filedelta, _: whence, _: *mut filesize) -> errno; + fn cloudabi_sys_fd_stat_get(_: fd, _: *mut fdstat) -> errno; + fn cloudabi_sys_fd_stat_put(_: fd, _: *const fdstat, _: fdsflags) -> errno; + fn cloudabi_sys_fd_sync(_: fd) -> errno; + fn cloudabi_sys_fd_write(_: fd, _: *const ciovec, _: usize, _: *mut usize) -> errno; + fn cloudabi_sys_file_advise(_: fd, _: filesize, _: filesize, _: advice) -> errno; + fn cloudabi_sys_file_allocate(_: fd, _: filesize, _: filesize) -> errno; + fn cloudabi_sys_file_create(_: fd, _: *const u8, _: usize, _: filetype) -> errno; + fn cloudabi_sys_file_link( + _: lookup, + _: *const u8, + _: usize, + _: fd, + _: *const u8, + _: usize, + ) -> errno; + fn cloudabi_sys_file_open( + _: lookup, + _: *const u8, + _: usize, + _: oflags, + _: *const fdstat, + _: *mut fd, + ) -> errno; + fn cloudabi_sys_file_readdir(_: fd, _: *mut (), _: usize, _: dircookie, _: *mut usize) + -> errno; + fn cloudabi_sys_file_readlink( + _: fd, + _: *const u8, + _: usize, + _: *mut u8, + _: usize, + _: *mut usize, + ) -> errno; + fn cloudabi_sys_file_rename( + _: fd, + _: *const u8, + _: usize, + _: fd, + _: *const u8, + _: usize, + ) -> errno; + fn cloudabi_sys_file_stat_fget(_: fd, _: *mut filestat) -> errno; + fn cloudabi_sys_file_stat_fput(_: fd, _: *const filestat, _: fsflags) -> errno; + fn cloudabi_sys_file_stat_get(_: lookup, _: *const u8, _: usize, _: *mut filestat) -> errno; + fn cloudabi_sys_file_stat_put( + _: lookup, + _: *const u8, + _: usize, + _: *const filestat, + _: fsflags, + ) -> errno; + fn cloudabi_sys_file_symlink(_: *const u8, _: usize, _: fd, _: *const u8, _: usize) -> errno; + fn cloudabi_sys_file_unlink(_: fd, _: *const u8, _: usize, _: ulflags) -> errno; + fn cloudabi_sys_lock_unlock(_: *mut lock, _: scope) -> errno; + fn cloudabi_sys_mem_advise(_: *mut (), _: usize, _: advice) -> errno; + fn cloudabi_sys_mem_map( + _: *mut (), + _: usize, + _: mprot, + _: mflags, + _: fd, + _: filesize, + _: *mut *mut (), + ) -> errno; + fn cloudabi_sys_mem_protect(_: *mut (), _: usize, _: mprot) -> errno; + fn cloudabi_sys_mem_sync(_: *mut (), _: usize, _: msflags) -> errno; + fn cloudabi_sys_mem_unmap(_: *mut (), _: usize) -> errno; + fn cloudabi_sys_poll(_: *const subscription, _: *mut event, _: usize, _: *mut usize) -> errno; + fn cloudabi_sys_proc_exec(_: fd, _: *const (), _: usize, _: *const fd, _: usize) -> errno; + fn cloudabi_sys_proc_exit(_: exitcode) -> !; + fn cloudabi_sys_proc_fork(_: *mut fd, _: *mut tid) -> errno; + fn cloudabi_sys_proc_raise(_: signal) -> errno; + fn cloudabi_sys_random_get(_: *mut (), _: usize) -> errno; + fn cloudabi_sys_sock_recv(_: fd, _: *const recv_in, _: *mut recv_out) -> errno; + fn cloudabi_sys_sock_send(_: fd, _: *const send_in, _: *mut send_out) -> errno; + fn cloudabi_sys_sock_shutdown(_: fd, _: sdflags) -> errno; + fn cloudabi_sys_thread_create(_: *mut threadattr, _: *mut tid) -> errno; + fn cloudabi_sys_thread_exit(_: *mut lock, _: scope) -> !; + fn cloudabi_sys_thread_yield() -> errno; +} + +/// Obtains the resolution of a clock. +/// +/// ## Parameters +/// +/// **clock_id**: +/// The clock for which the resolution needs to be +/// returned. +/// +/// **resolution**: +/// The resolution of the clock. +#[inline] +pub unsafe fn clock_res_get(clock_id_: clockid, resolution_: &mut timestamp) -> errno { + cloudabi_sys_clock_res_get(clock_id_, resolution_) +} + +/// Obtains the time value of a clock. +/// +/// ## Parameters +/// +/// **clock_id**: +/// The clock for which the time needs to be +/// returned. +/// +/// **precision**: +/// The maximum lag (exclusive) that the returned +/// time value may have, compared to its actual +/// value. +/// +/// **time**: +/// The time value of the clock. +#[inline] +pub unsafe fn clock_time_get( + clock_id_: clockid, + precision_: timestamp, + time_: *mut timestamp, +) -> errno { + cloudabi_sys_clock_time_get(clock_id_, precision_, time_) +} + +/// Wakes up threads waiting on a userspace condition variable. +/// +/// If an invocation of this system call causes all waiting +/// threads to be woken up, the value of the condition variable +/// is set to [`CONDVAR_HAS_NO_WAITERS`](constant.CONDVAR_HAS_NO_WAITERS.html). As long as the condition +/// variable is set to this value, it is not needed to invoke this +/// system call. +/// +/// ## Parameters +/// +/// **condvar**: +/// The userspace condition variable that has +/// waiting threads. +/// +/// **scope**: +/// Whether the condition variable is stored in +/// private or shared memory. +/// +/// **nwaiters**: +/// The number of threads that need to be woken +/// up. If it exceeds the number of waiting +/// threads, all threads are woken up. +#[inline] +pub unsafe fn condvar_signal(condvar_: *mut condvar, scope_: scope, nwaiters_: nthreads) -> errno { + cloudabi_sys_condvar_signal(condvar_, scope_, nwaiters_) +} + +/// Closes a file descriptor. +/// +/// ## Parameters +/// +/// **fd**: +/// The file descriptor that needs to be closed. +#[inline] +pub unsafe fn fd_close(fd_: fd) -> errno { + cloudabi_sys_fd_close(fd_) +} + +/// Creates a file descriptor. +/// +/// ## Parameters +/// +/// **type**: +/// Possible values: +/// +/// - [`SHARED_MEMORY`](enum.filetype.html#variant.SHARED_MEMORY): +/// Creates an anonymous shared memory +/// object. +/// +/// **fd**: +/// The file descriptor that has been created. +#[inline] +pub unsafe fn fd_create1(type_: filetype, fd_: &mut fd) -> errno { + cloudabi_sys_fd_create1(type_, fd_) +} + +/// Creates a pair of file descriptors. +/// +/// ## Parameters +/// +/// **type**: +/// Possible values: +/// +/// - [`SOCKET_DGRAM`](enum.filetype.html#variant.SOCKET_DGRAM): +/// Creates a UNIX datagram socket pair. +/// - [`SOCKET_STREAM`](enum.filetype.html#variant.SOCKET_STREAM): +/// Creates a UNIX byte-stream socket +/// pair. +/// +/// **fd1**: +/// The first file descriptor of the pair. +/// +/// **fd2**: +/// The second file descriptor of the pair. +#[inline] +pub unsafe fn fd_create2(type_: filetype, fd1_: &mut fd, fd2_: &mut fd) -> errno { + cloudabi_sys_fd_create2(type_, fd1_, fd2_) +} + +/// Synchronizes the data of a file to disk. +/// +/// ## Parameters +/// +/// **fd**: +/// The file descriptor of the file whose data +/// needs to be synchronized to disk. +#[inline] +pub unsafe fn fd_datasync(fd_: fd) -> errno { + cloudabi_sys_fd_datasync(fd_) +} + +/// Duplicates a file descriptor. +/// +/// ## Parameters +/// +/// **from**: +/// The file descriptor that needs to be +/// duplicated. +/// +/// **fd**: +/// The new file descriptor. +#[inline] +pub unsafe fn fd_dup(from_: fd, fd_: &mut fd) -> errno { + cloudabi_sys_fd_dup(from_, fd_) +} + +/// Reads from a file descriptor, without using and updating the +/// file descriptor's offset. +/// +/// ## Parameters +/// +/// **fd**: +/// The file descriptor from which data should be +/// read. +/// +/// **iovs**: +/// List of scatter/gather vectors where data +/// should be stored. +/// +/// **offset**: +/// The offset within the file at which reading +/// should start. +/// +/// **nread**: +/// The number of bytes read. +#[inline] +pub unsafe fn fd_pread(fd_: fd, iovs_: &[iovec], offset_: filesize, nread_: &mut usize) -> errno { + cloudabi_sys_fd_pread(fd_, iovs_.as_ptr(), iovs_.len(), offset_, nread_) +} + +/// Writes to a file descriptor, without using and updating the +/// file descriptor's offset. +/// +/// ## Parameters +/// +/// **fd**: +/// The file descriptor to which data should be +/// written. +/// +/// **iovs**: +/// List of scatter/gather vectors where data +/// should be retrieved. +/// +/// **offset**: +/// The offset within the file at which writing +/// should start. +/// +/// **nwritten**: +/// The number of bytes written. +#[inline] +pub unsafe fn fd_pwrite( + fd_: fd, + iovs_: &[ciovec], + offset_: filesize, + nwritten_: &mut usize, +) -> errno { + cloudabi_sys_fd_pwrite(fd_, iovs_.as_ptr(), iovs_.len(), offset_, nwritten_) +} + +/// Reads from a file descriptor. +/// +/// ## Parameters +/// +/// **fd**: +/// The file descriptor from which data should be +/// read. +/// +/// **iovs**: +/// List of scatter/gather vectors where data +/// should be stored. +/// +/// **nread**: +/// The number of bytes read. +#[inline] +pub unsafe fn fd_read(fd_: fd, iovs_: &[iovec], nread_: &mut usize) -> errno { + cloudabi_sys_fd_read(fd_, iovs_.as_ptr(), iovs_.len(), nread_) +} + +/// Atomically replaces a file descriptor by a copy of another +/// file descriptor. +/// +/// Due to the strong focus on thread safety, this environment +/// does not provide a mechanism to duplicate a file descriptor to +/// an arbitrary number, like dup2(). This would be prone to race +/// conditions, as an actual file descriptor with the same number +/// could be allocated by a different thread at the same time. +/// +/// This system call provides a way to atomically replace file +/// descriptors, which would disappear if dup2() were to be +/// removed entirely. +/// +/// ## Parameters +/// +/// **from**: +/// The file descriptor that needs to be copied. +/// +/// **to**: +/// The file descriptor that needs to be +/// overwritten. +#[inline] +pub unsafe fn fd_replace(from_: fd, to_: fd) -> errno { + cloudabi_sys_fd_replace(from_, to_) +} + +/// Moves the offset of the file descriptor. +/// +/// ## Parameters +/// +/// **fd**: +/// The file descriptor whose offset has to be +/// moved. +/// +/// **offset**: +/// The number of bytes to move. +/// +/// **whence**: +/// Relative to which position the move should +/// take place. +/// +/// **newoffset**: +/// The new offset of the file descriptor, +/// relative to the start of the file. +#[inline] +pub unsafe fn fd_seek( + fd_: fd, + offset_: filedelta, + whence_: whence, + newoffset_: &mut filesize, +) -> errno { + cloudabi_sys_fd_seek(fd_, offset_, whence_, newoffset_) +} + +/// Gets attributes of a file descriptor. +/// +/// ## Parameters +/// +/// **fd**: +/// The file descriptor whose attributes have to +/// be obtained. +/// +/// **buf**: +/// The buffer where the file descriptor's +/// attributes are stored. +#[inline] +pub unsafe fn fd_stat_get(fd_: fd, buf_: *mut fdstat) -> errno { + cloudabi_sys_fd_stat_get(fd_, buf_) +} + +/// Adjusts attributes of a file descriptor. +/// +/// ## Parameters +/// +/// **fd**: +/// The file descriptor whose attributes have to +/// be adjusted. +/// +/// **buf**: +/// The desired values of the file descriptor +/// attributes that are adjusted. +/// +/// **flags**: +/// A bitmask indicating which attributes have to +/// be adjusted. +#[inline] +pub unsafe fn fd_stat_put(fd_: fd, buf_: *const fdstat, flags_: fdsflags) -> errno { + cloudabi_sys_fd_stat_put(fd_, buf_, flags_) +} + +/// Synchronizes the data and metadata of a file to disk. +/// +/// ## Parameters +/// +/// **fd**: +/// The file descriptor of the file whose data +/// and metadata needs to be synchronized to disk. +#[inline] +pub unsafe fn fd_sync(fd_: fd) -> errno { + cloudabi_sys_fd_sync(fd_) +} + +/// Writes to a file descriptor. +/// +/// ## Parameters +/// +/// **fd**: +/// The file descriptor to which data should be +/// written. +/// +/// **iovs**: +/// List of scatter/gather vectors where data +/// should be retrieved. +/// +/// **nwritten**: +/// The number of bytes written. +#[inline] +pub unsafe fn fd_write(fd_: fd, iovs_: &[ciovec], nwritten_: &mut usize) -> errno { + cloudabi_sys_fd_write(fd_, iovs_.as_ptr(), iovs_.len(), nwritten_) +} + +/// Provides file advisory information on a file descriptor. +/// +/// ## Parameters +/// +/// **fd**: +/// The file descriptor for which to provide file +/// advisory information. +/// +/// **offset**: +/// The offset within the file to which the +/// advisory applies. +/// +/// **len**: +/// The length of the region to which the advisory +/// applies. +/// +/// **advice**: +/// The advice. +#[inline] +pub unsafe fn file_advise(fd_: fd, offset_: filesize, len_: filesize, advice_: advice) -> errno { + cloudabi_sys_file_advise(fd_, offset_, len_, advice_) +} + +/// Forces the allocation of space in a file. +/// +/// ## Parameters +/// +/// **fd**: +/// The file in which the space should be +/// allocated. +/// +/// **offset**: +/// The offset at which the allocation should +/// start. +/// +/// **len**: +/// The length of the area that is allocated. +#[inline] +pub unsafe fn file_allocate(fd_: fd, offset_: filesize, len_: filesize) -> errno { + cloudabi_sys_file_allocate(fd_, offset_, len_) +} + +/// Creates a file of a specified type. +/// +/// ## Parameters +/// +/// **fd**: +/// The working directory at which the resolution +/// of the file to be created starts. +/// +/// **path**: +/// The path at which the file should be created. +/// +/// **type**: +/// Possible values: +/// +/// - [`DIRECTORY`](enum.filetype.html#variant.DIRECTORY): +/// Creates a directory. +#[inline] +pub unsafe fn file_create(fd_: fd, path_: &[u8], type_: filetype) -> errno { + cloudabi_sys_file_create(fd_, path_.as_ptr(), path_.len(), type_) +} + +/// Creates a hard link. +/// +/// ## Parameters +/// +/// **fd1**: +/// The working directory at which the resolution +/// of the source path starts. +/// +/// **path1**: +/// The source path of the file that should be +/// hard linked. +/// +/// **fd2**: +/// The working directory at which the resolution +/// of the destination path starts. +/// +/// **path2**: +/// The destination path at which the hard link +/// should be created. +#[inline] +pub unsafe fn file_link(fd1_: lookup, path1_: &[u8], fd2_: fd, path2_: &[u8]) -> errno { + cloudabi_sys_file_link(fd1_, path1_.as_ptr(), path1_.len(), fd2_, path2_.as_ptr(), path2_.len()) +} + +/// Opens a file. +/// +/// ## Parameters +/// +/// **dirfd**: +/// The working directory at which the resolution +/// of the file to be opened starts. +/// +/// **path**: +/// The path of the file that should be opened. +/// +/// **oflags**: +/// The method at which the file should be opened. +/// +/// **fds**: +/// [`fdstat.fs_rights_base`](struct.fdstat.html#structfield.fs_rights_base) and +/// [`fdstat.fs_rights_inheriting`](struct.fdstat.html#structfield.fs_rights_inheriting) specify the +/// initial rights of the newly created file +/// descriptor. The operating system is allowed to +/// return a file descriptor with fewer rights +/// than specified, if and only if those rights do +/// not apply to the type of file being opened. +/// +/// [`fdstat.fs_flags`](struct.fdstat.html#structfield.fs_flags) specifies the initial flags +/// of the file descriptor. +/// +/// [`fdstat.fs_filetype`](struct.fdstat.html#structfield.fs_filetype) is ignored. +/// +/// **fd**: +/// The file descriptor of the file that has been +/// opened. +#[inline] +pub unsafe fn file_open( + dirfd_: lookup, + path_: &[u8], + oflags_: oflags, + fds_: *const fdstat, + fd_: &mut fd, +) -> errno { + cloudabi_sys_file_open(dirfd_, path_.as_ptr(), path_.len(), oflags_, fds_, fd_) +} + +/// Reads directory entries from a directory. +/// +/// When successful, the contents of the output buffer consist of +/// a sequence of directory entries. Each directory entry consists +/// of a [`dirent`](struct.dirent.html) object, followed by [`dirent.d_namlen`](struct.dirent.html#structfield.d_namlen) bytes +/// holding the name of the directory entry. +/// +/// This system call fills the output buffer as much as possible, +/// potentially truncating the last directory entry. This allows +/// the caller to grow its read buffer size in case it's too small +/// to fit a single large directory entry, or skip the oversized +/// directory entry. +/// +/// ## Parameters +/// +/// **fd**: +/// The directory from which to read the directory +/// entries. +/// +/// **buf**: +/// The buffer where directory entries are stored. +/// +/// **cookie**: +/// The location within the directory to start +/// reading. +/// +/// **bufused**: +/// The number of bytes stored in the read buffer. +/// If less than the size of the read buffer, the +/// end of the directory has been reached. +#[inline] +pub unsafe fn file_readdir( + fd_: fd, + buf_: &mut [u8], + cookie_: dircookie, + bufused_: &mut usize, +) -> errno { + cloudabi_sys_file_readdir(fd_, buf_.as_mut_ptr() as *mut (), buf_.len(), cookie_, bufused_) +} + +/// Reads the contents of a symbolic link. +/// +/// ## Parameters +/// +/// **fd**: +/// The working directory at which the resolution +/// of the path of the symbolic starts. +/// +/// **path**: +/// The path of the symbolic link whose contents +/// should be read. +/// +/// **buf**: +/// The buffer where the contents of the symbolic +/// link should be stored. +/// +/// **bufused**: +/// The number of bytes placed in the buffer. +#[inline] +pub unsafe fn file_readlink(fd_: fd, path_: &[u8], buf_: &mut [u8], bufused_: &mut usize) -> errno { + cloudabi_sys_file_readlink( + fd_, + path_.as_ptr(), + path_.len(), + buf_.as_mut_ptr(), + buf_.len(), + bufused_, + ) +} + +/// Renames a file. +/// +/// ## Parameters +/// +/// **fd1**: +/// The working directory at which the resolution +/// of the source path starts. +/// +/// **path1**: +/// The source path of the file that should be +/// renamed. +/// +/// **fd2**: +/// The working directory at which the resolution +/// of the destination path starts. +/// +/// **path2**: +/// The destination path to which the file should +/// be renamed. +#[inline] +pub unsafe fn file_rename(fd1_: fd, path1_: &[u8], fd2_: fd, path2_: &[u8]) -> errno { + cloudabi_sys_file_rename( + fd1_, + path1_.as_ptr(), + path1_.len(), + fd2_, + path2_.as_ptr(), + path2_.len(), + ) +} + +/// Gets attributes of a file by file descriptor. +/// +/// ## Parameters +/// +/// **fd**: +/// The file descriptor whose attributes have to +/// be obtained. +/// +/// **buf**: +/// The buffer where the file's attributes are +/// stored. +#[inline] +pub unsafe fn file_stat_fget(fd_: fd, buf_: *mut filestat) -> errno { + cloudabi_sys_file_stat_fget(fd_, buf_) +} + +/// Adjusts attributes of a file by file descriptor. +/// +/// ## Parameters +/// +/// **fd**: +/// The file descriptor whose attributes have to +/// be adjusted. +/// +/// **buf**: +/// The desired values of the file attributes that +/// are adjusted. +/// +/// **flags**: +/// A bitmask indicating which attributes have to +/// be adjusted. +#[inline] +pub unsafe fn file_stat_fput(fd_: fd, buf_: *const filestat, flags_: fsflags) -> errno { + cloudabi_sys_file_stat_fput(fd_, buf_, flags_) +} + +/// Gets attributes of a file by path. +/// +/// ## Parameters +/// +/// **fd**: +/// The working directory at which the resolution +/// of the path whose attributes have to be +/// obtained starts. +/// +/// **path**: +/// The path of the file whose attributes have to +/// be obtained. +/// +/// **buf**: +/// The buffer where the file's attributes are +/// stored. +#[inline] +pub unsafe fn file_stat_get(fd_: lookup, path_: &[u8], buf_: *mut filestat) -> errno { + cloudabi_sys_file_stat_get(fd_, path_.as_ptr(), path_.len(), buf_) +} + +/// Adjusts attributes of a file by path. +/// +/// ## Parameters +/// +/// **fd**: +/// The working directory at which the resolution +/// of the path whose attributes have to be +/// adjusted starts. +/// +/// **path**: +/// The path of the file whose attributes have to +/// be adjusted. +/// +/// **buf**: +/// The desired values of the file attributes that +/// are adjusted. +/// +/// **flags**: +/// A bitmask indicating which attributes have to +/// be adjusted. +#[inline] +pub unsafe fn file_stat_put( + fd_: lookup, + path_: &[u8], + buf_: *const filestat, + flags_: fsflags, +) -> errno { + cloudabi_sys_file_stat_put(fd_, path_.as_ptr(), path_.len(), buf_, flags_) +} + +/// Creates a symbolic link. +/// +/// ## Parameters +/// +/// **path1**: +/// The contents of the symbolic link. +/// +/// **fd**: +/// The working directory at which the resolution +/// of the destination path starts. +/// +/// **path2**: +/// The destination path at which the symbolic +/// link should be created. +#[inline] +pub unsafe fn file_symlink(path1_: &[u8], fd_: fd, path2_: &[u8]) -> errno { + cloudabi_sys_file_symlink(path1_.as_ptr(), path1_.len(), fd_, path2_.as_ptr(), path2_.len()) +} + +/// Unlinks a file, or removes a directory. +/// +/// ## Parameters +/// +/// **fd**: +/// The working directory at which the resolution +/// of the path starts. +/// +/// **path**: +/// The path that needs to be unlinked or removed. +/// +/// **flags**: +/// Possible values: +/// +/// - [`REMOVEDIR`](struct.ulflags.html#associatedconstant.REMOVEDIR): +/// If set, attempt to remove a directory. +/// Otherwise, unlink a file. +#[inline] +pub unsafe fn file_unlink(fd_: fd, path_: &[u8], flags_: ulflags) -> errno { + cloudabi_sys_file_unlink(fd_, path_.as_ptr(), path_.len(), flags_) +} + +/// Unlocks a write-locked userspace lock. +/// +/// If a userspace lock is unlocked while having its +/// [`LOCK_KERNEL_MANAGED`](constant.LOCK_KERNEL_MANAGED.html) flag set, the lock cannot be unlocked in +/// userspace directly. This system call needs to be performed +/// instead, so that any waiting threads can be woken up. +/// +/// To prevent spurious invocations of this system call, the lock +/// must be locked for writing. This prevents other threads from +/// acquiring additional read locks while the system call is in +/// progress. If the lock is acquired for reading, it must first +/// be upgraded to a write lock. +/// +/// ## Parameters +/// +/// **lock**: +/// The userspace lock that is locked for writing +/// by the calling thread. +/// +/// **scope**: +/// Whether the lock is stored in private or +/// shared memory. +#[inline] +pub unsafe fn lock_unlock(lock_: *mut lock, scope_: scope) -> errno { + cloudabi_sys_lock_unlock(lock_, scope_) +} + +/// Provides memory advisory information on a region of memory. +/// +/// ## Parameters +/// +/// **mapping**: +/// The pages for which to provide memory advisory +/// information. +/// +/// **advice**: +/// The advice. +#[inline] +pub unsafe fn mem_advise(mapping_: &mut [u8], advice_: advice) -> errno { + cloudabi_sys_mem_advise(mapping_.as_mut_ptr() as *mut (), mapping_.len(), advice_) +} + +/// Creates a memory mapping, making the contents of a file +/// accessible through memory. +/// +/// ## Parameters +/// +/// **addr**: +/// If [`FIXED`](struct.mflags.html#associatedconstant.FIXED) is set, specifies to which +/// address the file region is mapped. Otherwise, +/// the mapping is performed at an unused +/// location. +/// +/// **len**: +/// The length of the memory mapping to be +/// created. +/// +/// **prot**: +/// Initial memory protection options for the +/// memory mapping. +/// +/// **flags**: +/// Memory mapping flags. +/// +/// **fd**: +/// If [`ANON`](struct.mflags.html#associatedconstant.ANON) is set, this argument must be +/// [`MAP_ANON_FD`](constant.MAP_ANON_FD.html). Otherwise, this argument +/// specifies the file whose contents need to be +/// mapped. +/// +/// **off**: +/// If [`ANON`](struct.mflags.html#associatedconstant.ANON) is set, this argument must be +/// zero. Otherwise, this argument specifies the +/// offset within the file at which the mapping +/// starts. +/// +/// **mem**: +/// The starting address of the memory mapping. +#[inline] +pub unsafe fn mem_map( + addr_: *mut (), + len_: usize, + prot_: mprot, + flags_: mflags, + fd_: fd, + off_: filesize, + mem_: &mut *mut (), +) -> errno { + cloudabi_sys_mem_map(addr_, len_, prot_, flags_, fd_, off_, mem_) +} + +/// Changes the protection of a memory mapping. +/// +/// ## Parameters +/// +/// **mapping**: +/// The pages that need their protection changed. +/// +/// **prot**: +/// New protection options. +#[inline] +pub unsafe fn mem_protect(mapping_: &mut [u8], prot_: mprot) -> errno { + cloudabi_sys_mem_protect(mapping_.as_mut_ptr() as *mut (), mapping_.len(), prot_) +} + +/// Synchronizes a region of memory with its physical storage. +/// +/// ## Parameters +/// +/// **mapping**: +/// The pages that need to be synchronized. +/// +/// **flags**: +/// The method of synchronization. +#[inline] +pub unsafe fn mem_sync(mapping_: &mut [u8], flags_: msflags) -> errno { + cloudabi_sys_mem_sync(mapping_.as_mut_ptr() as *mut (), mapping_.len(), flags_) +} + +/// Unmaps a region of memory. +/// +/// ## Parameters +/// +/// **mapping**: +/// The pages that needs to be unmapped. +#[inline] +pub unsafe fn mem_unmap(mapping_: &mut [u8]) -> errno { + cloudabi_sys_mem_unmap(mapping_.as_mut_ptr() as *mut (), mapping_.len()) +} + +/// Concurrently polls for the occurrence of a set of events. +/// +/// ## Parameters +/// +/// **in**: +/// The events to which to subscribe. +/// +/// **out**: +/// The events that have occurred. +/// +/// **nsubscriptions**: +/// Both the number of subscriptions and events. +/// +/// **nevents**: +/// The number of events stored. +#[inline] +pub unsafe fn poll( + in_: *const subscription, + out_: *mut event, + nsubscriptions_: usize, + nevents_: *mut usize, +) -> errno { + cloudabi_sys_poll(in_, out_, nsubscriptions_, nevents_) +} + +/// Replaces the process by a new executable. +/// +/// Process execution in CloudABI differs from POSIX in two ways: +/// handling of arguments and inheritance of file descriptors. +/// +/// CloudABI does not use string command line arguments. Instead, +/// a buffer with binary data is copied into the address space of +/// the new executable. The kernel does not enforce any specific +/// structure to this data, although CloudABI's C library uses it +/// to store a tree structure that is semantically identical to +/// YAML. +/// +/// Due to the strong focus on thread safety, file descriptors +/// aren't inherited through close-on-exec flags. An explicit +/// list of file descriptors that need to be retained needs to be +/// provided. After execution, file descriptors are placed in the +/// order in which they are stored in the array. This not only +/// makes the execution process deterministic. It also prevents +/// potential information disclosures about the layout of the +/// original process. +/// +/// ## Parameters +/// +/// **fd**: +/// A file descriptor of the new executable. +/// +/// **data**: +/// Binary argument data that is passed on to the +/// new executable. +/// +/// **fds**: +/// The layout of the file descriptor table after +/// execution. +#[inline] +pub unsafe fn proc_exec(fd_: fd, data_: &[u8], fds_: &[fd]) -> errno { + cloudabi_sys_proc_exec(fd_, data_.as_ptr() as *const (), data_.len(), fds_.as_ptr(), fds_.len()) +} + +/// Terminates the process normally. +/// +/// ## Parameters +/// +/// **rval**: +/// The exit code returned by the process. The +/// exit code can be obtained by other processes +/// through [`event.union.proc_terminate.exitcode`](struct.event_proc_terminate.html#structfield.exitcode). +#[inline] +pub unsafe fn proc_exit(rval_: exitcode) -> ! { + cloudabi_sys_proc_exit(rval_) +} + +/// Forks the process of the calling thread. +/// +/// After forking, a new process shall be created, having only a +/// copy of the calling thread. The parent process will obtain a +/// process descriptor. When closed, the child process is +/// automatically signaled with [`KILL`](enum.signal.html#variant.KILL). +/// +/// ## Parameters +/// +/// **fd**: +/// In the parent process: the file descriptor +/// number of the process descriptor. +/// +/// In the child process: [`PROCESS_CHILD`](constant.PROCESS_CHILD.html). +/// +/// **tid**: +/// In the parent process: undefined. +/// +/// In the child process: the thread ID of the +/// initial thread of the child process. +#[inline] +pub unsafe fn proc_fork(fd_: &mut fd, tid_: &mut tid) -> errno { + cloudabi_sys_proc_fork(fd_, tid_) +} + +/// Sends a signal to the process of the calling thread. +/// +/// ## Parameters +/// +/// **sig**: +/// The signal condition that should be triggered. +/// If the signal causes the process to terminate, +/// its condition can be obtained by other +/// processes through +/// [`event.union.proc_terminate.signal`](struct.event_proc_terminate.html#structfield.signal). +#[inline] +pub unsafe fn proc_raise(sig_: signal) -> errno { + cloudabi_sys_proc_raise(sig_) +} + +/// Obtains random data from the kernel random number generator. +/// +/// As this interface is not guaranteed to be fast, it is advised +/// that the random data obtained through this system call is used +/// as the seed for a userspace pseudo-random number generator. +/// +/// ## Parameters +/// +/// **buf**: +/// The buffer that needs to be filled with random +/// data. +#[inline] +pub unsafe fn random_get(buf_: &mut [u8]) -> errno { + cloudabi_sys_random_get(buf_.as_mut_ptr() as *mut (), buf_.len()) +} + +/// Receives a message on a socket. +/// +/// ## Parameters +/// +/// **sock**: +/// The socket on which a message should be +/// received. +/// +/// **in**: +/// Input parameters. +/// +/// **out**: +/// Output parameters. +#[inline] +pub unsafe fn sock_recv(sock_: fd, in_: *const recv_in, out_: *mut recv_out) -> errno { + cloudabi_sys_sock_recv(sock_, in_, out_) +} + +/// Sends a message on a socket. +/// +/// ## Parameters +/// +/// **sock**: +/// The socket on which a message should be sent. +/// +/// **in**: +/// Input parameters. +/// +/// **out**: +/// Output parameters. +#[inline] +pub unsafe fn sock_send(sock_: fd, in_: *const send_in, out_: *mut send_out) -> errno { + cloudabi_sys_sock_send(sock_, in_, out_) +} + +/// Shuts down socket send and receive channels. +/// +/// ## Parameters +/// +/// **sock**: +/// The socket that needs its channels shut down. +/// +/// **how**: +/// Which channels on the socket need to be shut +/// down. +#[inline] +pub unsafe fn sock_shutdown(sock_: fd, how_: sdflags) -> errno { + cloudabi_sys_sock_shutdown(sock_, how_) +} + +/// Creates a new thread within the current process. +/// +/// ## Parameters +/// +/// **attr**: +/// The desired attributes of the new thread. +/// +/// **tid**: +/// The thread ID of the new thread. +#[inline] +pub unsafe fn thread_create(attr_: *mut threadattr, tid_: &mut tid) -> errno { + cloudabi_sys_thread_create(attr_, tid_) +} + +/// Terminates the calling thread. +/// +/// This system call can also unlock a single userspace lock +/// after termination, which can be used to implement thread +/// joining. +/// +/// ## Parameters +/// +/// **lock**: +/// Userspace lock that is locked for writing by +/// the calling thread. +/// +/// **scope**: +/// Whether the lock is stored in private or +/// shared memory. +#[inline] +pub unsafe fn thread_exit(lock_: *mut lock, scope_: scope) -> ! { + cloudabi_sys_thread_exit(lock_, scope_) +} + +/// Temporarily yields execution of the calling thread. +#[inline] +pub unsafe fn thread_yield() -> errno { + cloudabi_sys_thread_yield() +} diff --git a/library/std/src/sys/cloudabi/abi/mod.rs b/library/std/src/sys/cloudabi/abi/mod.rs new file mode 100644 index 00000000000..9d01d24ea83 --- /dev/null +++ b/library/std/src/sys/cloudabi/abi/mod.rs @@ -0,0 +1,3 @@ +#[allow(warnings)] +mod cloudabi; +pub use self::cloudabi::*; diff --git a/library/std/src/sys/cloudabi/args.rs b/library/std/src/sys/cloudabi/args.rs new file mode 100644 index 00000000000..dea562abad3 --- /dev/null +++ b/library/std/src/sys/cloudabi/args.rs @@ -0,0 +1,7 @@ +pub use crate::sys::cloudabi::shims::args::*; + +#[allow(dead_code)] +pub fn init(_: isize, _: *const *const u8) {} + +#[allow(dead_code)] +pub fn cleanup() {} diff --git a/library/std/src/sys/cloudabi/condvar.rs b/library/std/src/sys/cloudabi/condvar.rs new file mode 100644 index 00000000000..dabdc0c9b51 --- /dev/null +++ b/library/std/src/sys/cloudabi/condvar.rs @@ -0,0 +1,152 @@ +use crate::cell::UnsafeCell; +use crate::mem; +use crate::sync::atomic::{AtomicU32, Ordering}; +use crate::sys::cloudabi::abi; +use crate::sys::mutex::{self, Mutex}; +use crate::sys::time::checked_dur2intervals; +use crate::time::Duration; + +extern "C" { + #[thread_local] + static __pthread_thread_id: abi::tid; +} + +pub struct Condvar { + condvar: UnsafeCell<AtomicU32>, +} + +unsafe impl Send for Condvar {} +unsafe impl Sync for Condvar {} + +const NEW: Condvar = + Condvar { condvar: UnsafeCell::new(AtomicU32::new(abi::CONDVAR_HAS_NO_WAITERS.0)) }; + +impl Condvar { + pub const fn new() -> Condvar { + NEW + } + + pub unsafe fn init(&mut self) {} + + pub unsafe fn notify_one(&self) { + let condvar = self.condvar.get(); + if (*condvar).load(Ordering::Relaxed) != abi::CONDVAR_HAS_NO_WAITERS.0 { + let ret = abi::condvar_signal(condvar as *mut abi::condvar, abi::scope::PRIVATE, 1); + assert_eq!(ret, abi::errno::SUCCESS, "Failed to signal on condition variable"); + } + } + + pub unsafe fn notify_all(&self) { + let condvar = self.condvar.get(); + if (*condvar).load(Ordering::Relaxed) != abi::CONDVAR_HAS_NO_WAITERS.0 { + let ret = abi::condvar_signal( + condvar as *mut abi::condvar, + abi::scope::PRIVATE, + abi::nthreads::MAX, + ); + assert_eq!(ret, abi::errno::SUCCESS, "Failed to broadcast on condition variable"); + } + } + + pub unsafe fn wait(&self, mutex: &Mutex) { + let mutex = mutex::raw(mutex); + assert_eq!( + (*mutex).load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0, + __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, + "This lock is not write-locked by this thread" + ); + + // Call into the kernel to wait on the condition variable. + let condvar = self.condvar.get(); + let subscription = abi::subscription { + type_: abi::eventtype::CONDVAR, + union: abi::subscription_union { + condvar: abi::subscription_condvar { + condvar: condvar as *mut abi::condvar, + condvar_scope: abi::scope::PRIVATE, + lock: mutex as *mut abi::lock, + lock_scope: abi::scope::PRIVATE, + }, + }, + ..mem::zeroed() + }; + let mut event: mem::MaybeUninit<abi::event> = mem::MaybeUninit::uninit(); + let mut nevents: mem::MaybeUninit<usize> = mem::MaybeUninit::uninit(); + let ret = abi::poll(&subscription, event.as_mut_ptr(), 1, nevents.as_mut_ptr()); + assert_eq!(ret, abi::errno::SUCCESS, "Failed to wait on condition variable"); + assert_eq!( + event.assume_init().error, + abi::errno::SUCCESS, + "Failed to wait on condition variable" + ); + } + + pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { + let mutex = mutex::raw(mutex); + assert_eq!( + (*mutex).load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0, + __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, + "This lock is not write-locked by this thread" + ); + + // Call into the kernel to wait on the condition variable. + let condvar = self.condvar.get(); + let timeout = + checked_dur2intervals(&dur).expect("overflow converting duration to nanoseconds"); + let subscriptions = [ + abi::subscription { + type_: abi::eventtype::CONDVAR, + union: abi::subscription_union { + condvar: abi::subscription_condvar { + condvar: condvar as *mut abi::condvar, + condvar_scope: abi::scope::PRIVATE, + lock: mutex as *mut abi::lock, + lock_scope: abi::scope::PRIVATE, + }, + }, + ..mem::zeroed() + }, + abi::subscription { + type_: abi::eventtype::CLOCK, + union: abi::subscription_union { + clock: abi::subscription_clock { + clock_id: abi::clockid::MONOTONIC, + timeout, + ..mem::zeroed() + }, + }, + ..mem::zeroed() + }, + ]; + let mut events: [mem::MaybeUninit<abi::event>; 2] = [mem::MaybeUninit::uninit(); 2]; + let mut nevents: mem::MaybeUninit<usize> = mem::MaybeUninit::uninit(); + let ret = abi::poll( + subscriptions.as_ptr(), + mem::MaybeUninit::first_ptr_mut(&mut events), + 2, + nevents.as_mut_ptr(), + ); + assert_eq!(ret, abi::errno::SUCCESS, "Failed to wait on condition variable"); + let nevents = nevents.assume_init(); + for i in 0..nevents { + assert_eq!( + events[i].assume_init().error, + abi::errno::SUCCESS, + "Failed to wait on condition variable" + ); + if events[i].assume_init().type_ == abi::eventtype::CONDVAR { + return true; + } + } + false + } + + pub unsafe fn destroy(&self) { + let condvar = self.condvar.get(); + assert_eq!( + (*condvar).load(Ordering::Relaxed), + abi::CONDVAR_HAS_NO_WAITERS.0, + "Attempted to destroy a condition variable with blocked threads" + ); + } +} diff --git a/library/std/src/sys/cloudabi/io.rs b/library/std/src/sys/cloudabi/io.rs new file mode 100644 index 00000000000..d5f475b4310 --- /dev/null +++ b/library/std/src/sys/cloudabi/io.rs @@ -0,0 +1,47 @@ +use crate::mem; + +#[derive(Copy, Clone)] +pub struct IoSlice<'a>(&'a [u8]); + +impl<'a> IoSlice<'a> { + #[inline] + pub fn new(buf: &'a [u8]) -> IoSlice<'a> { + IoSlice(buf) + } + + #[inline] + pub fn advance(&mut self, n: usize) { + self.0 = &self.0[n..] + } + + #[inline] + pub fn as_slice(&self) -> &[u8] { + self.0 + } +} + +pub struct IoSliceMut<'a>(&'a mut [u8]); + +impl<'a> IoSliceMut<'a> { + #[inline] + pub fn new(buf: &'a mut [u8]) -> IoSliceMut<'a> { + IoSliceMut(buf) + } + + #[inline] + pub fn advance(&mut self, n: usize) { + let slice = mem::replace(&mut self.0, &mut []); + let (_, remaining) = slice.split_at_mut(n); + self.0 = remaining; + } + + #[inline] + pub fn as_slice(&self) -> &[u8] { + self.0 + } + + #[inline] + pub fn as_mut_slice(&mut self) -> &mut [u8] { + self.0 + } +} diff --git a/library/std/src/sys/cloudabi/mod.rs b/library/std/src/sys/cloudabi/mod.rs new file mode 100644 index 00000000000..f7dd2c8d00f --- /dev/null +++ b/library/std/src/sys/cloudabi/mod.rs @@ -0,0 +1,66 @@ +use crate::io::ErrorKind; +use crate::mem; + +#[path = "../unix/alloc.rs"] +pub mod alloc; +pub mod args; +#[path = "../unix/cmath.rs"] +pub mod cmath; +pub mod condvar; +pub mod io; +#[path = "../unix/memchr.rs"] +pub mod memchr; +pub mod mutex; +pub mod os; +pub mod rwlock; +pub mod stack_overflow; +pub mod stdio; +pub mod thread; +#[path = "../unix/thread_local_key.rs"] +pub mod thread_local_key; +pub mod time; + +pub use crate::sys_common::os_str_bytes as os_str; + +mod abi; + +mod shims; +pub use self::shims::*; + +#[allow(dead_code)] +pub fn init() {} + +pub fn decode_error_kind(errno: i32) -> ErrorKind { + match errno { + x if x == abi::errno::ACCES as i32 => ErrorKind::PermissionDenied, + x if x == abi::errno::ADDRINUSE as i32 => ErrorKind::AddrInUse, + x if x == abi::errno::ADDRNOTAVAIL as i32 => ErrorKind::AddrNotAvailable, + x if x == abi::errno::AGAIN as i32 => ErrorKind::WouldBlock, + x if x == abi::errno::CONNABORTED as i32 => ErrorKind::ConnectionAborted, + x if x == abi::errno::CONNREFUSED as i32 => ErrorKind::ConnectionRefused, + x if x == abi::errno::CONNRESET as i32 => ErrorKind::ConnectionReset, + x if x == abi::errno::EXIST as i32 => ErrorKind::AlreadyExists, + x if x == abi::errno::INTR as i32 => ErrorKind::Interrupted, + x if x == abi::errno::INVAL as i32 => ErrorKind::InvalidInput, + x if x == abi::errno::NOENT as i32 => ErrorKind::NotFound, + x if x == abi::errno::NOTCONN as i32 => ErrorKind::NotConnected, + x if x == abi::errno::PERM as i32 => ErrorKind::PermissionDenied, + x if x == abi::errno::PIPE as i32 => ErrorKind::BrokenPipe, + x if x == abi::errno::TIMEDOUT as i32 => ErrorKind::TimedOut, + _ => ErrorKind::Other, + } +} + +pub fn abort_internal() -> ! { + core::intrinsics::abort(); +} + +pub use libc::strlen; + +pub fn hashmap_random_keys() -> (u64, u64) { + unsafe { + let mut v: mem::MaybeUninit<(u64, u64)> = mem::MaybeUninit::uninit(); + libc::arc4random_buf(v.as_mut_ptr() as *mut libc::c_void, mem::size_of_val(&v)); + v.assume_init() + } +} diff --git a/library/std/src/sys/cloudabi/mutex.rs b/library/std/src/sys/cloudabi/mutex.rs new file mode 100644 index 00000000000..580ab0e8ad8 --- /dev/null +++ b/library/std/src/sys/cloudabi/mutex.rs @@ -0,0 +1,153 @@ +use crate::cell::UnsafeCell; +use crate::mem; +use crate::mem::MaybeUninit; +use crate::sync::atomic::{AtomicU32, Ordering}; +use crate::sys::cloudabi::abi; +use crate::sys::rwlock::{self, RWLock}; + +extern "C" { + #[thread_local] + static __pthread_thread_id: abi::tid; +} + +// Implement Mutex using an RWLock. This doesn't introduce any +// performance overhead in this environment, as the operations would be +// implemented identically. +pub struct Mutex(RWLock); + +pub unsafe fn raw(m: &Mutex) -> *mut AtomicU32 { + rwlock::raw(&m.0) +} + +impl Mutex { + pub const fn new() -> Mutex { + Mutex(RWLock::new()) + } + + pub unsafe fn init(&mut self) { + // This function should normally reinitialize the mutex after + // moving it to a different memory address. This implementation + // does not require adjustments after moving. + } + + pub unsafe fn try_lock(&self) -> bool { + self.0.try_write() + } + + pub unsafe fn lock(&self) { + self.0.write() + } + + pub unsafe fn unlock(&self) { + self.0.write_unlock() + } + + pub unsafe fn destroy(&self) { + self.0.destroy() + } +} + +pub struct ReentrantMutex { + lock: UnsafeCell<MaybeUninit<AtomicU32>>, + recursion: UnsafeCell<MaybeUninit<u32>>, +} + +impl ReentrantMutex { + pub const unsafe fn uninitialized() -> ReentrantMutex { + ReentrantMutex { + lock: UnsafeCell::new(MaybeUninit::uninit()), + recursion: UnsafeCell::new(MaybeUninit::uninit()), + } + } + + pub unsafe fn init(&self) { + *self.lock.get() = MaybeUninit::new(AtomicU32::new(abi::LOCK_UNLOCKED.0)); + *self.recursion.get() = MaybeUninit::new(0); + } + + pub unsafe fn try_lock(&self) -> bool { + // Attempt to acquire the lock. + let lock = (*self.lock.get()).as_mut_ptr(); + let recursion = (*self.recursion.get()).as_mut_ptr(); + if let Err(old) = (*lock).compare_exchange( + abi::LOCK_UNLOCKED.0, + __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, + Ordering::Acquire, + Ordering::Relaxed, + ) { + // If we fail to acquire the lock, it may be the case + // that we've already acquired it and may need to recurse. + if old & !abi::LOCK_KERNEL_MANAGED.0 == __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0 { + *recursion += 1; + true + } else { + false + } + } else { + // Success. + assert_eq!(*recursion, 0, "Mutex has invalid recursion count"); + true + } + } + + pub unsafe fn lock(&self) { + if !self.try_lock() { + // Call into the kernel to acquire a write lock. + let lock = self.lock.get(); + let subscription = abi::subscription { + type_: abi::eventtype::LOCK_WRLOCK, + union: abi::subscription_union { + lock: abi::subscription_lock { + lock: lock as *mut abi::lock, + lock_scope: abi::scope::PRIVATE, + }, + }, + ..mem::zeroed() + }; + let mut event = MaybeUninit::<abi::event>::uninit(); + let mut nevents = MaybeUninit::<usize>::uninit(); + let ret = abi::poll(&subscription, event.as_mut_ptr(), 1, nevents.as_mut_ptr()); + assert_eq!(ret, abi::errno::SUCCESS, "Failed to acquire mutex"); + let event = event.assume_init(); + assert_eq!(event.error, abi::errno::SUCCESS, "Failed to acquire mutex"); + } + } + + pub unsafe fn unlock(&self) { + let lock = (*self.lock.get()).as_mut_ptr(); + let recursion = (*self.recursion.get()).as_mut_ptr(); + assert_eq!( + (*lock).load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0, + __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, + "This mutex is locked by a different thread" + ); + + if *recursion > 0 { + *recursion -= 1; + } else if !(*lock) + .compare_exchange( + __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, + abi::LOCK_UNLOCKED.0, + Ordering::Release, + Ordering::Relaxed, + ) + .is_ok() + { + // Lock is managed by kernelspace. Call into the kernel + // to unblock waiting threads. + let ret = abi::lock_unlock(lock as *mut abi::lock, abi::scope::PRIVATE); + assert_eq!(ret, abi::errno::SUCCESS, "Failed to unlock a mutex"); + } + } + + pub unsafe fn destroy(&self) { + let lock = (*self.lock.get()).as_mut_ptr(); + let recursion = (*self.recursion.get()).as_mut_ptr(); + assert_eq!( + (*lock).load(Ordering::Relaxed), + abi::LOCK_UNLOCKED.0, + "Attempted to destroy locked mutex" + ); + assert_eq!(*recursion, 0, "Recursion counter invalid"); + } +} diff --git a/library/std/src/sys/cloudabi/os.rs b/library/std/src/sys/cloudabi/os.rs new file mode 100644 index 00000000000..326faaa852a --- /dev/null +++ b/library/std/src/sys/cloudabi/os.rs @@ -0,0 +1,26 @@ +use crate::ffi::CStr; +use crate::str; + +use libc::c_int; + +pub use crate::sys::cloudabi::shims::os::*; + +pub fn errno() -> i32 { + extern "C" { + #[thread_local] + static errno: c_int; + } + + unsafe { errno as i32 } +} + +/// Gets a detailed string description for the given error number. +pub fn error_string(errno: i32) -> String { + // cloudlibc's strerror() is guaranteed to be thread-safe. There is + // thus no need to use strerror_r(). + str::from_utf8(unsafe { CStr::from_ptr(libc::strerror(errno)) }.to_bytes()).unwrap().to_owned() +} + +pub fn exit(code: i32) -> ! { + unsafe { libc::exit(code as c_int) } +} diff --git a/library/std/src/sys/cloudabi/rwlock.rs b/library/std/src/sys/cloudabi/rwlock.rs new file mode 100644 index 00000000000..b8af5af1d70 --- /dev/null +++ b/library/std/src/sys/cloudabi/rwlock.rs @@ -0,0 +1,218 @@ +use crate::cell::UnsafeCell; +use crate::mem; +use crate::mem::MaybeUninit; +use crate::sync::atomic::{AtomicU32, Ordering}; +use crate::sys::cloudabi::abi; + +extern "C" { + #[thread_local] + static __pthread_thread_id: abi::tid; +} + +#[thread_local] +static mut RDLOCKS_ACQUIRED: u32 = 0; + +pub struct RWLock { + lock: UnsafeCell<AtomicU32>, +} + +pub unsafe fn raw(r: &RWLock) -> *mut AtomicU32 { + r.lock.get() +} + +unsafe impl Send for RWLock {} +unsafe impl Sync for RWLock {} + +const NEW: RWLock = RWLock { lock: UnsafeCell::new(AtomicU32::new(abi::LOCK_UNLOCKED.0)) }; + +impl RWLock { + pub const fn new() -> RWLock { + NEW + } + + pub unsafe fn try_read(&self) -> bool { + let lock = self.lock.get(); + let mut old = abi::LOCK_UNLOCKED.0; + while let Err(cur) = + (*lock).compare_exchange_weak(old, old + 1, Ordering::Acquire, Ordering::Relaxed) + { + if (cur & abi::LOCK_WRLOCKED.0) != 0 { + // Another thread already has a write lock. + assert_ne!( + old & !abi::LOCK_KERNEL_MANAGED.0, + __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, + "Attempted to acquire a read lock while holding a write lock" + ); + return false; + } else if (old & abi::LOCK_KERNEL_MANAGED.0) != 0 && RDLOCKS_ACQUIRED == 0 { + // Lock has threads waiting for the lock. Only acquire + // the lock if we have already acquired read locks. In + // that case, it is justified to acquire this lock to + // prevent a deadlock. + return false; + } + old = cur; + } + + RDLOCKS_ACQUIRED += 1; + true + } + + pub unsafe fn read(&self) { + if !self.try_read() { + // Call into the kernel to acquire a read lock. + let lock = self.lock.get(); + let subscription = abi::subscription { + type_: abi::eventtype::LOCK_RDLOCK, + union: abi::subscription_union { + lock: abi::subscription_lock { + lock: lock as *mut abi::lock, + lock_scope: abi::scope::PRIVATE, + }, + }, + ..mem::zeroed() + }; + let mut event = MaybeUninit::<abi::event>::uninit(); + let mut nevents = MaybeUninit::<usize>::uninit(); + let ret = abi::poll(&subscription, event.as_mut_ptr(), 1, nevents.as_mut_ptr()); + assert_eq!(ret, abi::errno::SUCCESS, "Failed to acquire read lock"); + let event = event.assume_init(); + assert_eq!(event.error, abi::errno::SUCCESS, "Failed to acquire read lock"); + + RDLOCKS_ACQUIRED += 1; + } + } + + pub unsafe fn read_unlock(&self) { + // Perform a read unlock. We can do this in userspace, except when + // other threads are blocked and we are performing the last unlock. + // In that case, call into the kernel. + // + // Other threads may attempt to increment the read lock count, + // meaning that the call into the kernel could be spurious. To + // prevent this from happening, upgrade to a write lock first. This + // allows us to call into the kernel, having the guarantee that the + // lock value will not change in the meantime. + assert!(RDLOCKS_ACQUIRED > 0, "Bad lock count"); + let mut old = 1; + loop { + let lock = self.lock.get(); + if old == 1 | abi::LOCK_KERNEL_MANAGED.0 { + // Last read lock while threads are waiting. Attempt to upgrade + // to a write lock before calling into the kernel to unlock. + if let Err(cur) = (*lock).compare_exchange_weak( + old, + __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0 | abi::LOCK_KERNEL_MANAGED.0, + Ordering::Acquire, + Ordering::Relaxed, + ) { + old = cur; + } else { + // Call into the kernel to unlock. + let ret = abi::lock_unlock(lock as *mut abi::lock, abi::scope::PRIVATE); + assert_eq!(ret, abi::errno::SUCCESS, "Failed to write unlock a rwlock"); + break; + } + } else { + // No threads waiting or not the last read lock. Just decrement + // the read lock count. + assert_ne!(old & !abi::LOCK_KERNEL_MANAGED.0, 0, "This rwlock is not locked"); + assert_eq!( + old & abi::LOCK_WRLOCKED.0, + 0, + "Attempted to read-unlock a write-locked rwlock" + ); + if let Err(cur) = (*lock).compare_exchange_weak( + old, + old - 1, + Ordering::Acquire, + Ordering::Relaxed, + ) { + old = cur; + } else { + break; + } + } + } + + RDLOCKS_ACQUIRED -= 1; + } + + pub unsafe fn try_write(&self) -> bool { + // Attempt to acquire the lock. + let lock = self.lock.get(); + if let Err(old) = (*lock).compare_exchange( + abi::LOCK_UNLOCKED.0, + __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, + Ordering::Acquire, + Ordering::Relaxed, + ) { + // Failure. Crash upon recursive acquisition. + assert_ne!( + old & !abi::LOCK_KERNEL_MANAGED.0, + __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, + "Attempted to recursive write-lock a rwlock", + ); + false + } else { + // Success. + true + } + } + + pub unsafe fn write(&self) { + if !self.try_write() { + // Call into the kernel to acquire a write lock. + let lock = self.lock.get(); + let subscription = abi::subscription { + type_: abi::eventtype::LOCK_WRLOCK, + union: abi::subscription_union { + lock: abi::subscription_lock { + lock: lock as *mut abi::lock, + lock_scope: abi::scope::PRIVATE, + }, + }, + ..mem::zeroed() + }; + let mut event = MaybeUninit::<abi::event>::uninit(); + let mut nevents = MaybeUninit::<usize>::uninit(); + let ret = abi::poll(&subscription, event.as_mut_ptr(), 1, nevents.as_mut_ptr()); + assert_eq!(ret, abi::errno::SUCCESS, "Failed to acquire write lock"); + let event = event.assume_init(); + assert_eq!(event.error, abi::errno::SUCCESS, "Failed to acquire write lock"); + } + } + + pub unsafe fn write_unlock(&self) { + let lock = self.lock.get(); + assert_eq!( + (*lock).load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0, + __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, + "This rwlock is not write-locked by this thread" + ); + + if !(*lock) + .compare_exchange( + __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0, + abi::LOCK_UNLOCKED.0, + Ordering::Release, + Ordering::Relaxed, + ) + .is_ok() + { + // Lock is managed by kernelspace. Call into the kernel + // to unblock waiting threads. + let ret = abi::lock_unlock(lock as *mut abi::lock, abi::scope::PRIVATE); + assert_eq!(ret, abi::errno::SUCCESS, "Failed to write unlock a rwlock"); + } + } + + pub unsafe fn destroy(&self) { + let lock = self.lock.get(); + assert_eq!( + (*lock).load(Ordering::Relaxed), + abi::LOCK_UNLOCKED.0, + "Attempted to destroy locked rwlock" + ); + } +} diff --git a/library/std/src/sys/cloudabi/shims/args.rs b/library/std/src/sys/cloudabi/shims/args.rs new file mode 100644 index 00000000000..f5cf71caf6c --- /dev/null +++ b/library/std/src/sys/cloudabi/shims/args.rs @@ -0,0 +1,35 @@ +use crate::ffi::OsString; + +pub struct Args(()); + +impl Args { + pub fn inner_debug(&self) -> &[OsString] { + &[] + } +} + +impl Iterator for Args { + type Item = OsString; + fn next(&mut self) -> Option<OsString> { + None + } + fn size_hint(&self) -> (usize, Option<usize>) { + (0, Some(0)) + } +} + +impl ExactSizeIterator for Args { + fn len(&self) -> usize { + 0 + } +} + +impl DoubleEndedIterator for Args { + fn next_back(&mut self) -> Option<OsString> { + None + } +} + +pub fn args() -> Args { + Args(()) +} diff --git a/library/std/src/sys/cloudabi/shims/env.rs b/library/std/src/sys/cloudabi/shims/env.rs new file mode 100644 index 00000000000..de165a864b9 --- /dev/null +++ b/library/std/src/sys/cloudabi/shims/env.rs @@ -0,0 +1,9 @@ +pub mod os { + pub const FAMILY: &str = "cloudabi"; + pub const OS: &str = "cloudabi"; + pub const DLL_PREFIX: &str = "lib"; + pub const DLL_SUFFIX: &str = ".so"; + pub const DLL_EXTENSION: &str = "so"; + pub const EXE_SUFFIX: &str = ""; + pub const EXE_EXTENSION: &str = ""; +} diff --git a/library/std/src/sys/cloudabi/shims/fs.rs b/library/std/src/sys/cloudabi/shims/fs.rs new file mode 100644 index 00000000000..ecb5b51cccd --- /dev/null +++ b/library/std/src/sys/cloudabi/shims/fs.rs @@ -0,0 +1,308 @@ +use crate::ffi::OsString; +use crate::fmt; +use crate::hash::{Hash, Hasher}; +use crate::io::{self, IoSlice, IoSliceMut, SeekFrom}; +use crate::path::{Path, PathBuf}; +use crate::sys::time::SystemTime; +use crate::sys::{unsupported, Void}; + +pub struct File(Void); + +pub struct FileAttr(Void); + +pub struct ReadDir(Void); + +pub struct DirEntry(Void); + +#[derive(Clone, Debug)] +pub struct OpenOptions {} + +pub struct FilePermissions(Void); + +pub struct FileType(Void); + +#[derive(Debug)] +pub struct DirBuilder {} + +impl FileAttr { + pub fn size(&self) -> u64 { + match self.0 {} + } + + pub fn perm(&self) -> FilePermissions { + match self.0 {} + } + + pub fn file_type(&self) -> FileType { + match self.0 {} + } + + pub fn modified(&self) -> io::Result<SystemTime> { + match self.0 {} + } + + pub fn accessed(&self) -> io::Result<SystemTime> { + match self.0 {} + } + + pub fn created(&self) -> io::Result<SystemTime> { + match self.0 {} + } +} + +impl Clone for FileAttr { + fn clone(&self) -> FileAttr { + match self.0 {} + } +} + +impl FilePermissions { + pub fn readonly(&self) -> bool { + match self.0 {} + } + + pub fn set_readonly(&mut self, _readonly: bool) { + match self.0 {} + } +} + +impl Clone for FilePermissions { + fn clone(&self) -> FilePermissions { + match self.0 {} + } +} + +impl PartialEq for FilePermissions { + fn eq(&self, _other: &FilePermissions) -> bool { + match self.0 {} + } +} + +impl Eq for FilePermissions {} + +impl fmt::Debug for FilePermissions { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +impl FileType { + pub fn is_dir(&self) -> bool { + match self.0 {} + } + + pub fn is_file(&self) -> bool { + match self.0 {} + } + + pub fn is_symlink(&self) -> bool { + match self.0 {} + } +} + +impl Clone for FileType { + fn clone(&self) -> FileType { + match self.0 {} + } +} + +impl Copy for FileType {} + +impl PartialEq for FileType { + fn eq(&self, _other: &FileType) -> bool { + match self.0 {} + } +} + +impl Eq for FileType {} + +impl Hash for FileType { + fn hash<H: Hasher>(&self, _h: &mut H) { + match self.0 {} + } +} + +impl fmt::Debug for FileType { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +impl fmt::Debug for ReadDir { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +impl Iterator for ReadDir { + type Item = io::Result<DirEntry>; + + fn next(&mut self) -> Option<io::Result<DirEntry>> { + match self.0 {} + } +} + +impl DirEntry { + pub fn path(&self) -> PathBuf { + match self.0 {} + } + + pub fn file_name(&self) -> OsString { + match self.0 {} + } + + pub fn metadata(&self) -> io::Result<FileAttr> { + match self.0 {} + } + + pub fn file_type(&self) -> io::Result<FileType> { + match self.0 {} + } +} + +impl OpenOptions { + pub fn new() -> OpenOptions { + OpenOptions {} + } + + pub fn read(&mut self, _read: bool) {} + pub fn write(&mut self, _write: bool) {} + pub fn append(&mut self, _append: bool) {} + pub fn truncate(&mut self, _truncate: bool) {} + pub fn create(&mut self, _create: bool) {} + pub fn create_new(&mut self, _create_new: bool) {} +} + +impl File { + pub fn open(_path: &Path, _opts: &OpenOptions) -> io::Result<File> { + unsupported() + } + + pub fn file_attr(&self) -> io::Result<FileAttr> { + match self.0 {} + } + + pub fn fsync(&self) -> io::Result<()> { + match self.0 {} + } + + pub fn datasync(&self) -> io::Result<()> { + match self.0 {} + } + + pub fn truncate(&self, _size: u64) -> io::Result<()> { + match self.0 {} + } + + pub fn read(&self, _buf: &mut [u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn read_vectored(&self, _bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + match self.0 {} + } + + pub fn is_read_vectored(&self) -> bool { + match self.0 {} + } + + pub fn write(&self, _buf: &[u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn write_vectored(&self, _bufs: &[IoSlice<'_>]) -> io::Result<usize> { + match self.0 {} + } + + pub fn is_write_vectored(&self) -> bool { + match self.0 {} + } + + pub fn flush(&self) -> io::Result<()> { + match self.0 {} + } + + pub fn seek(&self, _pos: SeekFrom) -> io::Result<u64> { + match self.0 {} + } + + pub fn duplicate(&self) -> io::Result<File> { + match self.0 {} + } + + pub fn set_permissions(&self, _perm: FilePermissions) -> io::Result<()> { + match self.0 {} + } + + pub fn diverge(&self) -> ! { + match self.0 {} + } +} + +impl DirBuilder { + pub fn new() -> DirBuilder { + DirBuilder {} + } + + pub fn mkdir(&self, _p: &Path) -> io::Result<()> { + unsupported() + } +} + +impl fmt::Debug for File { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +pub fn readdir(_p: &Path) -> io::Result<ReadDir> { + unsupported() +} + +pub fn unlink(_p: &Path) -> io::Result<()> { + unsupported() +} + +pub fn rename(_old: &Path, _new: &Path) -> io::Result<()> { + unsupported() +} + +pub fn set_perm(_p: &Path, perm: FilePermissions) -> io::Result<()> { + match perm.0 {} +} + +pub fn rmdir(_p: &Path) -> io::Result<()> { + unsupported() +} + +pub fn remove_dir_all(_path: &Path) -> io::Result<()> { + unsupported() +} + +pub fn readlink(_p: &Path) -> io::Result<PathBuf> { + unsupported() +} + +pub fn symlink(_src: &Path, _dst: &Path) -> io::Result<()> { + unsupported() +} + +pub fn link(_src: &Path, _dst: &Path) -> io::Result<()> { + unsupported() +} + +pub fn stat(_p: &Path) -> io::Result<FileAttr> { + unsupported() +} + +pub fn lstat(_p: &Path) -> io::Result<FileAttr> { + unsupported() +} + +pub fn canonicalize(_p: &Path) -> io::Result<PathBuf> { + unsupported() +} + +pub fn copy(_from: &Path, _to: &Path) -> io::Result<u64> { + unsupported() +} diff --git a/library/std/src/sys/cloudabi/shims/mod.rs b/library/std/src/sys/cloudabi/shims/mod.rs new file mode 100644 index 00000000000..b1b5f142f45 --- /dev/null +++ b/library/std/src/sys/cloudabi/shims/mod.rs @@ -0,0 +1,19 @@ +use crate::io; + +pub mod args; +pub mod env; +pub mod fs; +pub mod net; +pub mod os; +#[path = "../../unix/path.rs"] +pub mod path; +pub mod pipe; +pub mod process; + +// This enum is used as the storage for a bunch of types which can't actually exist. +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +pub enum Void {} + +pub fn unsupported<T>() -> io::Result<T> { + Err(io::Error::new(io::ErrorKind::Other, "This function is not available on CloudABI.")) +} diff --git a/library/std/src/sys/cloudabi/shims/net.rs b/library/std/src/sys/cloudabi/shims/net.rs new file mode 100644 index 00000000000..375aaab405d --- /dev/null +++ b/library/std/src/sys/cloudabi/shims/net.rs @@ -0,0 +1,326 @@ +use crate::convert::TryFrom; +use crate::fmt; +use crate::io::{self, IoSlice, IoSliceMut}; +use crate::net::{Ipv4Addr, Ipv6Addr, Shutdown, SocketAddr}; +use crate::sys::{unsupported, Void}; +use crate::time::Duration; + +#[allow(unused_extern_crates)] +pub extern crate libc as netc; + +pub struct TcpStream(Void); + +impl TcpStream { + pub fn connect(_: io::Result<&SocketAddr>) -> io::Result<TcpStream> { + unsupported() + } + + pub fn connect_timeout(_: &SocketAddr, _: Duration) -> io::Result<TcpStream> { + unsupported() + } + + pub fn set_read_timeout(&self, _: Option<Duration>) -> io::Result<()> { + match self.0 {} + } + + pub fn set_write_timeout(&self, _: Option<Duration>) -> io::Result<()> { + match self.0 {} + } + + pub fn read_timeout(&self) -> io::Result<Option<Duration>> { + match self.0 {} + } + + pub fn write_timeout(&self) -> io::Result<Option<Duration>> { + match self.0 {} + } + + pub fn peek(&self, _: &mut [u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn read(&self, _: &mut [u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn read_vectored(&self, _: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + match self.0 {} + } + + pub fn is_read_vectored(&self) -> bool { + match self.0 {} + } + + pub fn write(&self, _: &[u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn write_vectored(&self, _: &[IoSlice<'_>]) -> io::Result<usize> { + match self.0 {} + } + + pub fn is_write_vectored(&self) -> bool { + match self.0 {} + } + + pub fn peer_addr(&self) -> io::Result<SocketAddr> { + match self.0 {} + } + + pub fn socket_addr(&self) -> io::Result<SocketAddr> { + match self.0 {} + } + + pub fn shutdown(&self, _: Shutdown) -> io::Result<()> { + match self.0 {} + } + + pub fn duplicate(&self) -> io::Result<TcpStream> { + match self.0 {} + } + + pub fn set_nodelay(&self, _: bool) -> io::Result<()> { + match self.0 {} + } + + pub fn nodelay(&self) -> io::Result<bool> { + match self.0 {} + } + + pub fn set_ttl(&self, _: u32) -> io::Result<()> { + match self.0 {} + } + + pub fn ttl(&self) -> io::Result<u32> { + match self.0 {} + } + + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + match self.0 {} + } + + pub fn set_nonblocking(&self, _: bool) -> io::Result<()> { + match self.0 {} + } +} + +impl fmt::Debug for TcpStream { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +pub struct TcpListener(Void); + +impl TcpListener { + pub fn bind(_: io::Result<&SocketAddr>) -> io::Result<TcpListener> { + unsupported() + } + + pub fn socket_addr(&self) -> io::Result<SocketAddr> { + match self.0 {} + } + + pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> { + match self.0 {} + } + + pub fn duplicate(&self) -> io::Result<TcpListener> { + match self.0 {} + } + + pub fn set_ttl(&self, _: u32) -> io::Result<()> { + match self.0 {} + } + + pub fn ttl(&self) -> io::Result<u32> { + match self.0 {} + } + + pub fn set_only_v6(&self, _: bool) -> io::Result<()> { + match self.0 {} + } + + pub fn only_v6(&self) -> io::Result<bool> { + match self.0 {} + } + + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + match self.0 {} + } + + pub fn set_nonblocking(&self, _: bool) -> io::Result<()> { + match self.0 {} + } +} + +impl fmt::Debug for TcpListener { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +pub struct UdpSocket(Void); + +impl UdpSocket { + pub fn bind(_: io::Result<&SocketAddr>) -> io::Result<UdpSocket> { + unsupported() + } + + pub fn peer_addr(&self) -> io::Result<SocketAddr> { + match self.0 {} + } + + pub fn socket_addr(&self) -> io::Result<SocketAddr> { + match self.0 {} + } + + pub fn recv_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + match self.0 {} + } + + pub fn peek_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + match self.0 {} + } + + pub fn send_to(&self, _: &[u8], _: &SocketAddr) -> io::Result<usize> { + match self.0 {} + } + + pub fn duplicate(&self) -> io::Result<UdpSocket> { + match self.0 {} + } + + pub fn set_read_timeout(&self, _: Option<Duration>) -> io::Result<()> { + match self.0 {} + } + + pub fn set_write_timeout(&self, _: Option<Duration>) -> io::Result<()> { + match self.0 {} + } + + pub fn read_timeout(&self) -> io::Result<Option<Duration>> { + match self.0 {} + } + + pub fn write_timeout(&self) -> io::Result<Option<Duration>> { + match self.0 {} + } + + pub fn set_broadcast(&self, _: bool) -> io::Result<()> { + match self.0 {} + } + + pub fn broadcast(&self) -> io::Result<bool> { + match self.0 {} + } + + pub fn set_multicast_loop_v4(&self, _: bool) -> io::Result<()> { + match self.0 {} + } + + pub fn multicast_loop_v4(&self) -> io::Result<bool> { + match self.0 {} + } + + pub fn set_multicast_ttl_v4(&self, _: u32) -> io::Result<()> { + match self.0 {} + } + + pub fn multicast_ttl_v4(&self) -> io::Result<u32> { + match self.0 {} + } + + pub fn set_multicast_loop_v6(&self, _: bool) -> io::Result<()> { + match self.0 {} + } + + pub fn multicast_loop_v6(&self) -> io::Result<bool> { + match self.0 {} + } + + pub fn join_multicast_v4(&self, _: &Ipv4Addr, _: &Ipv4Addr) -> io::Result<()> { + match self.0 {} + } + + pub fn join_multicast_v6(&self, _: &Ipv6Addr, _: u32) -> io::Result<()> { + match self.0 {} + } + + pub fn leave_multicast_v4(&self, _: &Ipv4Addr, _: &Ipv4Addr) -> io::Result<()> { + match self.0 {} + } + + pub fn leave_multicast_v6(&self, _: &Ipv6Addr, _: u32) -> io::Result<()> { + match self.0 {} + } + + pub fn set_ttl(&self, _: u32) -> io::Result<()> { + match self.0 {} + } + + pub fn ttl(&self) -> io::Result<u32> { + match self.0 {} + } + + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + match self.0 {} + } + + pub fn set_nonblocking(&self, _: bool) -> io::Result<()> { + match self.0 {} + } + + pub fn recv(&self, _: &mut [u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn peek(&self, _: &mut [u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn send(&self, _: &[u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn connect(&self, _: io::Result<&SocketAddr>) -> io::Result<()> { + match self.0 {} + } +} + +impl fmt::Debug for UdpSocket { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +pub struct LookupHost(Void); + +impl LookupHost { + pub fn port(&self) -> u16 { + match self.0 {} + } +} + +impl Iterator for LookupHost { + type Item = SocketAddr; + fn next(&mut self) -> Option<SocketAddr> { + match self.0 {} + } +} + +impl TryFrom<&str> for LookupHost { + type Error = io::Error; + + fn try_from(_v: &str) -> io::Result<LookupHost> { + unsupported() + } +} + +impl<'a> TryFrom<(&'a str, u16)> for LookupHost { + type Error = io::Error; + + fn try_from(_v: (&'a str, u16)) -> io::Result<LookupHost> { + unsupported() + } +} diff --git a/library/std/src/sys/cloudabi/shims/os.rs b/library/std/src/sys/cloudabi/shims/os.rs new file mode 100644 index 00000000000..779e6d54b7c --- /dev/null +++ b/library/std/src/sys/cloudabi/shims/os.rs @@ -0,0 +1,86 @@ +use crate::error::Error as StdError; +use crate::ffi::{OsStr, OsString}; +use crate::fmt; +use crate::io; +use crate::iter; +use crate::path::{self, PathBuf}; +use crate::sys::{unsupported, Void}; + +pub fn getcwd() -> io::Result<PathBuf> { + unsupported() +} + +pub fn chdir(_: &path::Path) -> io::Result<()> { + unsupported() +} + +pub type Env = iter::Empty<(OsString, OsString)>; + +pub fn env() -> Env { + iter::empty() +} + +pub fn getenv(_: &OsStr) -> io::Result<Option<OsString>> { + Ok(None) +} + +pub fn setenv(_: &OsStr, _: &OsStr) -> io::Result<()> { + unsupported() +} + +pub fn unsetenv(_: &OsStr) -> io::Result<()> { + unsupported() +} + +pub struct SplitPaths<'a>(&'a Void); + +pub fn split_paths(_unparsed: &OsStr) -> SplitPaths<'_> { + panic!("unsupported") +} + +impl<'a> Iterator for SplitPaths<'a> { + type Item = PathBuf; + fn next(&mut self) -> Option<PathBuf> { + match *self.0 {} + } +} + +#[derive(Debug)] +pub struct JoinPathsError; + +pub fn join_paths<I, T>(_paths: I) -> Result<OsString, JoinPathsError> +where + I: Iterator<Item = T>, + T: AsRef<OsStr>, +{ + Err(JoinPathsError) +} + +impl fmt::Display for JoinPathsError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + "not supported on CloudABI yet".fmt(f) + } +} + +impl StdError for JoinPathsError { + #[allow(deprecated)] + fn description(&self) -> &str { + "not supported on CloudABI yet" + } +} + +pub fn home_dir() -> Option<PathBuf> { + None +} + +pub fn temp_dir() -> PathBuf { + PathBuf::from("/tmp") +} + +pub fn current_exe() -> io::Result<PathBuf> { + unsupported() +} + +pub fn getpid() -> u32 { + 1 +} diff --git a/library/std/src/sys/cloudabi/shims/pipe.rs b/library/std/src/sys/cloudabi/shims/pipe.rs new file mode 100644 index 00000000000..10d0925823e --- /dev/null +++ b/library/std/src/sys/cloudabi/shims/pipe.rs @@ -0,0 +1,38 @@ +use crate::io::{self, IoSlice, IoSliceMut}; +use crate::sys::Void; + +pub struct AnonPipe(Void); + +impl AnonPipe { + pub fn read(&self, _buf: &mut [u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn read_vectored(&self, _bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + match self.0 {} + } + + pub fn is_read_vectored(&self) -> bool { + match self.0 {} + } + + pub fn write(&self, _buf: &[u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn write_vectored(&self, _bufs: &[IoSlice<'_>]) -> io::Result<usize> { + match self.0 {} + } + + pub fn is_write_vectored(&self) -> bool { + match self.0 {} + } + + pub fn diverge(&self) -> ! { + match self.0 {} + } +} + +pub fn read2(p1: AnonPipe, _v1: &mut Vec<u8>, _p2: AnonPipe, _v2: &mut Vec<u8>) -> io::Result<()> { + match p1.0 {} +} diff --git a/library/std/src/sys/cloudabi/shims/process.rs b/library/std/src/sys/cloudabi/shims/process.rs new file mode 100644 index 00000000000..4702e5c5492 --- /dev/null +++ b/library/std/src/sys/cloudabi/shims/process.rs @@ -0,0 +1,149 @@ +use crate::ffi::OsStr; +use crate::fmt; +use crate::io; +use crate::sys::fs::File; +use crate::sys::pipe::AnonPipe; +use crate::sys::{unsupported, Void}; +use crate::sys_common::process::CommandEnv; + +pub use crate::ffi::OsString as EnvKey; + +//////////////////////////////////////////////////////////////////////////////// +// Command +//////////////////////////////////////////////////////////////////////////////// + +pub struct Command { + env: CommandEnv, +} + +// passed back to std::process with the pipes connected to the child, if any +// were requested +pub struct StdioPipes { + pub stdin: Option<AnonPipe>, + pub stdout: Option<AnonPipe>, + pub stderr: Option<AnonPipe>, +} + +pub enum Stdio { + Inherit, + Null, + MakePipe, +} + +impl Command { + pub fn new(_program: &OsStr) -> Command { + Command { env: Default::default() } + } + + pub fn arg(&mut self, _arg: &OsStr) {} + + pub fn env_mut(&mut self) -> &mut CommandEnv { + &mut self.env + } + + pub fn cwd(&mut self, _dir: &OsStr) {} + + pub fn stdin(&mut self, _stdin: Stdio) {} + + pub fn stdout(&mut self, _stdout: Stdio) {} + + pub fn stderr(&mut self, _stderr: Stdio) {} + + pub fn spawn( + &mut self, + _default: Stdio, + _needs_stdin: bool, + ) -> io::Result<(Process, StdioPipes)> { + unsupported() + } +} + +impl From<AnonPipe> for Stdio { + fn from(pipe: AnonPipe) -> Stdio { + pipe.diverge() + } +} + +impl From<File> for Stdio { + fn from(file: File) -> Stdio { + file.diverge() + } +} + +impl fmt::Debug for Command { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + Ok(()) + } +} + +pub struct ExitStatus(Void); + +impl ExitStatus { + pub fn success(&self) -> bool { + match self.0 {} + } + + pub fn code(&self) -> Option<i32> { + match self.0 {} + } +} + +impl Clone for ExitStatus { + fn clone(&self) -> ExitStatus { + match self.0 {} + } +} + +impl Copy for ExitStatus {} + +impl PartialEq for ExitStatus { + fn eq(&self, _other: &ExitStatus) -> bool { + match self.0 {} + } +} + +impl Eq for ExitStatus {} + +impl fmt::Debug for ExitStatus { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +impl fmt::Display for ExitStatus { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +#[derive(PartialEq, Eq, Clone, Copy, Debug)] +pub struct ExitCode(bool); + +impl ExitCode { + pub const SUCCESS: ExitCode = ExitCode(false); + pub const FAILURE: ExitCode = ExitCode(true); + + pub fn as_i32(&self) -> i32 { + self.0 as i32 + } +} + +pub struct Process(Void); + +impl Process { + pub fn id(&self) -> u32 { + match self.0 {} + } + + pub fn kill(&mut self) -> io::Result<()> { + match self.0 {} + } + + pub fn wait(&mut self) -> io::Result<ExitStatus> { + match self.0 {} + } + + pub fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> { + match self.0 {} + } +} diff --git a/library/std/src/sys/cloudabi/stack_overflow.rs b/library/std/src/sys/cloudabi/stack_overflow.rs new file mode 100644 index 00000000000..9339b143731 --- /dev/null +++ b/library/std/src/sys/cloudabi/stack_overflow.rs @@ -0,0 +1,5 @@ +#![cfg_attr(test, allow(dead_code))] + +pub unsafe fn init() {} + +pub unsafe fn cleanup() {} diff --git a/library/std/src/sys/cloudabi/stdio.rs b/library/std/src/sys/cloudabi/stdio.rs new file mode 100644 index 00000000000..601563c5b1f --- /dev/null +++ b/library/std/src/sys/cloudabi/stdio.rs @@ -0,0 +1,66 @@ +use crate::io; +use crate::sys::cloudabi::abi; + +pub struct Stdin(()); +pub struct Stdout(()); +pub struct Stderr(()); + +impl Stdin { + pub fn new() -> io::Result<Stdin> { + Ok(Stdin(())) + } +} + +impl io::Read for Stdin { + fn read(&mut self, _buf: &mut [u8]) -> io::Result<usize> { + Ok(0) + } +} + +impl Stdout { + pub fn new() -> io::Result<Stdout> { + Ok(Stdout(())) + } +} + +impl io::Write for Stdout { + fn write(&mut self, _buf: &[u8]) -> io::Result<usize> { + Err(io::Error::new( + io::ErrorKind::BrokenPipe, + "Stdout is not connected to any output in this environment", + )) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +impl Stderr { + pub fn new() -> io::Result<Stderr> { + Ok(Stderr(())) + } +} + +impl io::Write for Stderr { + fn write(&mut self, _buf: &[u8]) -> io::Result<usize> { + Err(io::Error::new( + io::ErrorKind::BrokenPipe, + "Stderr is not connected to any output in this environment", + )) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +pub fn is_ebadf(err: &io::Error) -> bool { + err.raw_os_error() == Some(abi::errno::BADF as i32) +} + +pub const STDIN_BUF_SIZE: usize = crate::sys_common::io::DEFAULT_BUF_SIZE; + +pub fn panic_output() -> Option<impl io::Write> { + Stderr::new().ok() +} diff --git a/library/std/src/sys/cloudabi/thread.rs b/library/std/src/sys/cloudabi/thread.rs new file mode 100644 index 00000000000..a15dc8653e8 --- /dev/null +++ b/library/std/src/sys/cloudabi/thread.rs @@ -0,0 +1,118 @@ +use crate::cmp; +use crate::ffi::CStr; +use crate::io; +use crate::mem; +use crate::ptr; +use crate::sys::cloudabi::abi; +use crate::sys::time::checked_dur2intervals; +use crate::time::Duration; + +pub const DEFAULT_MIN_STACK_SIZE: usize = 2 * 1024 * 1024; + +pub struct Thread { + id: libc::pthread_t, +} + +// CloudABI has pthread_t as a pointer in which case we still want +// a thread to be Send/Sync +unsafe impl Send for Thread {} +unsafe impl Sync for Thread {} + +impl Thread { + // unsafe: see thread::Builder::spawn_unchecked for safety requirements + pub unsafe fn new(stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> { + let p = Box::into_raw(box p); + let mut native: libc::pthread_t = mem::zeroed(); + let mut attr: libc::pthread_attr_t = mem::zeroed(); + assert_eq!(libc::pthread_attr_init(&mut attr), 0); + + let stack_size = cmp::max(stack, min_stack_size(&attr)); + assert_eq!(libc::pthread_attr_setstacksize(&mut attr, stack_size), 0); + + let ret = libc::pthread_create(&mut native, &attr, thread_start, p as *mut _); + // Note: if the thread creation fails and this assert fails, then p will + // be leaked. However, an alternative design could cause double-free + // which is clearly worse. + assert_eq!(libc::pthread_attr_destroy(&mut attr), 0); + + return if ret != 0 { + // The thread failed to start and as a result p was not consumed. Therefore, it is + // safe to reconstruct the box so that it gets deallocated. + drop(Box::from_raw(p)); + Err(io::Error::from_raw_os_error(ret)) + } else { + Ok(Thread { id: native }) + }; + + extern "C" fn thread_start(main: *mut libc::c_void) -> *mut libc::c_void { + unsafe { + // Let's run some code. + Box::from_raw(main as *mut Box<dyn FnOnce()>)(); + } + ptr::null_mut() + } + } + + pub fn yield_now() { + let ret = unsafe { abi::thread_yield() }; + debug_assert_eq!(ret, abi::errno::SUCCESS); + } + + pub fn set_name(_name: &CStr) { + // CloudABI has no way to set a thread name. + } + + pub fn sleep(dur: Duration) { + let timeout = + checked_dur2intervals(&dur).expect("overflow converting duration to nanoseconds"); + unsafe { + let subscription = abi::subscription { + type_: abi::eventtype::CLOCK, + union: abi::subscription_union { + clock: abi::subscription_clock { + clock_id: abi::clockid::MONOTONIC, + timeout, + ..mem::zeroed() + }, + }, + ..mem::zeroed() + }; + let mut event = mem::MaybeUninit::<abi::event>::uninit(); + let mut nevents = mem::MaybeUninit::<usize>::uninit(); + let ret = abi::poll(&subscription, event.as_mut_ptr(), 1, nevents.as_mut_ptr()); + assert_eq!(ret, abi::errno::SUCCESS); + let event = event.assume_init(); + assert_eq!(event.error, abi::errno::SUCCESS); + } + } + + pub fn join(self) { + unsafe { + let ret = libc::pthread_join(self.id, ptr::null_mut()); + mem::forget(self); + assert!(ret == 0, "failed to join thread: {}", io::Error::from_raw_os_error(ret)); + } + } +} + +impl Drop for Thread { + fn drop(&mut self) { + let ret = unsafe { libc::pthread_detach(self.id) }; + debug_assert_eq!(ret, 0); + } +} + +#[cfg_attr(test, allow(dead_code))] +pub mod guard { + pub type Guard = !; + pub unsafe fn current() -> Option<Guard> { + None + } + pub unsafe fn init() -> Option<Guard> { + None + } +} + +fn min_stack_size(_: *const libc::pthread_attr_t) -> usize { + libc::PTHREAD_STACK_MIN +} diff --git a/library/std/src/sys/cloudabi/time.rs b/library/std/src/sys/cloudabi/time.rs new file mode 100644 index 00000000000..c209231cf8c --- /dev/null +++ b/library/std/src/sys/cloudabi/time.rs @@ -0,0 +1,82 @@ +use crate::mem; +use crate::sys::cloudabi::abi; +use crate::time::Duration; + +const NSEC_PER_SEC: abi::timestamp = 1_000_000_000; + +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +pub struct Instant { + t: abi::timestamp, +} + +pub fn checked_dur2intervals(dur: &Duration) -> Option<abi::timestamp> { + dur.as_secs().checked_mul(NSEC_PER_SEC)?.checked_add(dur.subsec_nanos() as abi::timestamp) +} + +impl Instant { + pub fn now() -> Instant { + unsafe { + let mut t: mem::MaybeUninit<abi::timestamp> = mem::MaybeUninit::uninit(); + let ret = abi::clock_time_get(abi::clockid::MONOTONIC, 0, t.as_mut_ptr()); + assert_eq!(ret, abi::errno::SUCCESS); + Instant { t: t.assume_init() } + } + } + + pub fn actually_monotonic() -> bool { + true + } + + pub const fn zero() -> Instant { + Instant { t: 0 } + } + + pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> { + let diff = self.t.checked_sub(other.t)?; + Some(Duration::new(diff / NSEC_PER_SEC, (diff % NSEC_PER_SEC) as u32)) + } + + pub fn checked_add_duration(&self, other: &Duration) -> Option<Instant> { + Some(Instant { t: self.t.checked_add(checked_dur2intervals(other)?)? }) + } + + pub fn checked_sub_duration(&self, other: &Duration) -> Option<Instant> { + Some(Instant { t: self.t.checked_sub(checked_dur2intervals(other)?)? }) + } +} + +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +pub struct SystemTime { + t: abi::timestamp, +} + +impl SystemTime { + pub fn now() -> SystemTime { + unsafe { + let mut t: mem::MaybeUninit<abi::timestamp> = mem::MaybeUninit::uninit(); + let ret = abi::clock_time_get(abi::clockid::REALTIME, 0, t.as_mut_ptr()); + assert_eq!(ret, abi::errno::SUCCESS); + SystemTime { t: t.assume_init() } + } + } + + pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> { + if self.t >= other.t { + let diff = self.t - other.t; + Ok(Duration::new(diff / NSEC_PER_SEC, (diff % NSEC_PER_SEC) as u32)) + } else { + let diff = other.t - self.t; + Err(Duration::new(diff / NSEC_PER_SEC, (diff % NSEC_PER_SEC) as u32)) + } + } + + pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> { + Some(SystemTime { t: self.t.checked_add(checked_dur2intervals(other)?)? }) + } + + pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> { + Some(SystemTime { t: self.t.checked_sub(checked_dur2intervals(other)?)? }) + } +} + +pub const UNIX_EPOCH: SystemTime = SystemTime { t: 0 }; diff --git a/library/std/src/sys/hermit/alloc.rs b/library/std/src/sys/hermit/alloc.rs new file mode 100644 index 00000000000..d153914e77e --- /dev/null +++ b/library/std/src/sys/hermit/alloc.rs @@ -0,0 +1,31 @@ +use crate::alloc::{GlobalAlloc, Layout, System}; +use crate::ptr; +use crate::sys::hermit::abi; + +#[stable(feature = "alloc_system_type", since = "1.28.0")] +unsafe impl GlobalAlloc for System { + #[inline] + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + abi::malloc(layout.size(), layout.align()) + } + + unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { + let addr = abi::malloc(layout.size(), layout.align()); + + if !addr.is_null() { + ptr::write_bytes(addr, 0x00, layout.size()); + } + + addr + } + + #[inline] + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + abi::free(ptr, layout.size(), layout.align()) + } + + #[inline] + unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + abi::realloc(ptr, layout.size(), layout.align(), new_size) + } +} diff --git a/library/std/src/sys/hermit/args.rs b/library/std/src/sys/hermit/args.rs new file mode 100644 index 00000000000..72c1b8511ca --- /dev/null +++ b/library/std/src/sys/hermit/args.rs @@ -0,0 +1,93 @@ +use crate::ffi::OsString; +use crate::marker::PhantomData; +use crate::vec; + +/// One-time global initialization. +pub unsafe fn init(argc: isize, argv: *const *const u8) { + imp::init(argc, argv) +} + +/// One-time global cleanup. +pub unsafe fn cleanup() { + imp::cleanup() +} + +/// Returns the command line arguments +pub fn args() -> Args { + imp::args() +} + +pub struct Args { + iter: vec::IntoIter<OsString>, + _dont_send_or_sync_me: PhantomData<*mut ()>, +} + +impl Args { + pub fn inner_debug(&self) -> &[OsString] { + self.iter.as_slice() + } +} + +impl Iterator for Args { + type Item = OsString; + fn next(&mut self) -> Option<OsString> { + self.iter.next() + } + fn size_hint(&self) -> (usize, Option<usize>) { + self.iter.size_hint() + } +} + +impl ExactSizeIterator for Args { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl DoubleEndedIterator for Args { + fn next_back(&mut self) -> Option<OsString> { + self.iter.next_back() + } +} + +mod imp { + use super::Args; + use crate::ffi::{CStr, OsString}; + use crate::marker::PhantomData; + use crate::ptr; + use crate::sys_common::os_str_bytes::*; + + use crate::sys_common::mutex::Mutex; + + static mut ARGC: isize = 0; + static mut ARGV: *const *const u8 = ptr::null(); + static LOCK: Mutex = Mutex::new(); + + pub unsafe fn init(argc: isize, argv: *const *const u8) { + let _guard = LOCK.lock(); + ARGC = argc; + ARGV = argv; + } + + pub unsafe fn cleanup() { + let _guard = LOCK.lock(); + ARGC = 0; + ARGV = ptr::null(); + } + + pub fn args() -> Args { + Args { iter: clone().into_iter(), _dont_send_or_sync_me: PhantomData } + } + + fn clone() -> Vec<OsString> { + unsafe { + let _guard = LOCK.lock(); + (0..ARGC) + .map(|i| { + let cstr = CStr::from_ptr(*ARGV.offset(i) as *const i8); + OsStringExt::from_vec(cstr.to_bytes().to_vec()) + }) + .collect() + } + } +} diff --git a/library/std/src/sys/hermit/cmath.rs b/library/std/src/sys/hermit/cmath.rs new file mode 100644 index 00000000000..304cf906b2a --- /dev/null +++ b/library/std/src/sys/hermit/cmath.rs @@ -0,0 +1,29 @@ +// These symbols are all defined in `compiler-builtins` +extern "C" { + pub fn acos(n: f64) -> f64; + pub fn acosf(n: f32) -> f32; + pub fn asin(n: f64) -> f64; + pub fn asinf(n: f32) -> f32; + pub fn atan(n: f64) -> f64; + pub fn atan2(a: f64, b: f64) -> f64; + pub fn atan2f(a: f32, b: f32) -> f32; + pub fn atanf(n: f32) -> f32; + pub fn cbrt(n: f64) -> f64; + pub fn cbrtf(n: f32) -> f32; + pub fn cosh(n: f64) -> f64; + pub fn coshf(n: f32) -> f32; + pub fn expm1(n: f64) -> f64; + pub fn expm1f(n: f32) -> f32; + pub fn fdim(a: f64, b: f64) -> f64; + pub fn fdimf(a: f32, b: f32) -> f32; + pub fn hypot(x: f64, y: f64) -> f64; + pub fn hypotf(x: f32, y: f32) -> f32; + pub fn log1p(n: f64) -> f64; + pub fn log1pf(n: f32) -> f32; + pub fn sinh(n: f64) -> f64; + pub fn sinhf(n: f32) -> f32; + pub fn tan(n: f64) -> f64; + pub fn tanf(n: f32) -> f32; + pub fn tanh(n: f64) -> f64; + pub fn tanhf(n: f32) -> f32; +} diff --git a/library/std/src/sys/hermit/condvar.rs b/library/std/src/sys/hermit/condvar.rs new file mode 100644 index 00000000000..52c8c3b17e8 --- /dev/null +++ b/library/std/src/sys/hermit/condvar.rs @@ -0,0 +1,64 @@ +use crate::ffi::c_void; +use crate::ptr; +use crate::sync::atomic::{AtomicUsize, Ordering::SeqCst}; +use crate::sys::hermit::abi; +use crate::sys::mutex::Mutex; +use crate::time::Duration; + +// The implementation is inspired by Andrew D. Birrell's paper +// "Implementing Condition Variables with Semaphores" + +pub struct Condvar { + counter: AtomicUsize, + sem1: *const c_void, + sem2: *const c_void, +} + +unsafe impl Send for Condvar {} +unsafe impl Sync for Condvar {} + +impl Condvar { + pub const fn new() -> Condvar { + Condvar { counter: AtomicUsize::new(0), sem1: ptr::null(), sem2: ptr::null() } + } + + pub unsafe fn init(&mut self) { + let _ = abi::sem_init(&mut self.sem1 as *mut *const c_void, 0); + let _ = abi::sem_init(&mut self.sem2 as *mut *const c_void, 0); + } + + pub unsafe fn notify_one(&self) { + if self.counter.load(SeqCst) > 0 { + self.counter.fetch_sub(1, SeqCst); + abi::sem_post(self.sem1); + abi::sem_timedwait(self.sem2, 0); + } + } + + pub unsafe fn notify_all(&self) { + let counter = self.counter.swap(0, SeqCst); + for _ in 0..counter { + abi::sem_post(self.sem1); + } + for _ in 0..counter { + abi::sem_timedwait(self.sem2, 0); + } + } + + pub unsafe fn wait(&self, mutex: &Mutex) { + self.counter.fetch_add(1, SeqCst); + mutex.unlock(); + abi::sem_timedwait(self.sem1, 0); + abi::sem_post(self.sem2); + mutex.lock(); + } + + pub unsafe fn wait_timeout(&self, _mutex: &Mutex, _dur: Duration) -> bool { + panic!("wait_timeout not supported on hermit"); + } + + pub unsafe fn destroy(&self) { + let _ = abi::sem_destroy(self.sem1); + let _ = abi::sem_destroy(self.sem2); + } +} diff --git a/library/std/src/sys/hermit/env.rs b/library/std/src/sys/hermit/env.rs new file mode 100644 index 00000000000..7a0fcb31ef2 --- /dev/null +++ b/library/std/src/sys/hermit/env.rs @@ -0,0 +1,9 @@ +pub mod os { + pub const FAMILY: &str = ""; + pub const OS: &str = "hermit"; + pub const DLL_PREFIX: &str = ""; + pub const DLL_SUFFIX: &str = ""; + pub const DLL_EXTENSION: &str = ""; + pub const EXE_SUFFIX: &str = ""; + pub const EXE_EXTENSION: &str = ""; +} diff --git a/library/std/src/sys/hermit/ext/ffi.rs b/library/std/src/sys/hermit/ext/ffi.rs new file mode 100644 index 00000000000..07b59a02556 --- /dev/null +++ b/library/std/src/sys/hermit/ext/ffi.rs @@ -0,0 +1,38 @@ +//! HermitCore-specific extension to the primitives in the `std::ffi` module +//! +//! # Examples +//! +//! ``` +//! use std::ffi::OsString; +//! use std::os::hermit::ffi::OsStringExt; +//! +//! let bytes = b"foo".to_vec(); +//! +//! // OsStringExt::from_vec +//! let os_string = OsString::from_vec(bytes); +//! assert_eq!(os_string.to_str(), Some("foo")); +//! +//! // OsStringExt::into_vec +//! let bytes = os_string.into_vec(); +//! assert_eq!(bytes, b"foo"); +//! ``` +//! +//! ``` +//! use std::ffi::OsStr; +//! use std::os::hermit::ffi::OsStrExt; +//! +//! let bytes = b"foo"; +//! +//! // OsStrExt::from_bytes +//! let os_str = OsStr::from_bytes(bytes); +//! assert_eq!(os_str.to_str(), Some("foo")); +//! +//! // OsStrExt::as_bytes +//! let bytes = os_str.as_bytes(); +//! assert_eq!(bytes, b"foo"); +//! ``` + +#![stable(feature = "rust1", since = "1.0.0")] + +#[stable(feature = "rust1", since = "1.0.0")] +pub use crate::sys_common::os_str_bytes::*; diff --git a/library/std/src/sys/hermit/ext/mod.rs b/library/std/src/sys/hermit/ext/mod.rs new file mode 100644 index 00000000000..ea87d0ad2c9 --- /dev/null +++ b/library/std/src/sys/hermit/ext/mod.rs @@ -0,0 +1,14 @@ +#![stable(feature = "rust1", since = "1.0.0")] +#![allow(missing_docs)] + +pub mod ffi; + +/// A prelude for conveniently writing platform-specific code. +/// +/// Includes all extension traits, and some important type definitions. +#[stable(feature = "rust1", since = "1.0.0")] +pub mod prelude { + #[doc(no_inline)] + #[stable(feature = "rust1", since = "1.0.0")] + pub use super::ffi::{OsStrExt, OsStringExt}; +} diff --git a/library/std/src/sys/hermit/fd.rs b/library/std/src/sys/hermit/fd.rs new file mode 100644 index 00000000000..97d1a38b41a --- /dev/null +++ b/library/std/src/sys/hermit/fd.rs @@ -0,0 +1,86 @@ +#![unstable(reason = "not public", issue = "none", feature = "fd")] + +use crate::io::{self, ErrorKind, Read}; +use crate::mem; +use crate::sys::cvt; +use crate::sys::hermit::abi; +use crate::sys_common::AsInner; + +#[derive(Debug)] +pub struct FileDesc { + fd: i32, +} + +impl FileDesc { + pub fn new(fd: i32) -> FileDesc { + FileDesc { fd } + } + + pub fn raw(&self) -> i32 { + self.fd + } + + /// Extracts the actual file descriptor without closing it. + pub fn into_raw(self) -> i32 { + let fd = self.fd; + mem::forget(self); + fd + } + + pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> { + let result = unsafe { abi::read(self.fd, buf.as_mut_ptr(), buf.len()) }; + cvt(result as i32) + } + + pub fn read_to_end(&self, buf: &mut Vec<u8>) -> io::Result<usize> { + let mut me = self; + (&mut me).read_to_end(buf) + } + + pub fn write(&self, buf: &[u8]) -> io::Result<usize> { + let result = unsafe { abi::write(self.fd, buf.as_ptr(), buf.len()) }; + cvt(result as i32) + } + + pub fn duplicate(&self) -> io::Result<FileDesc> { + self.duplicate_path(&[]) + } + pub fn duplicate_path(&self, _path: &[u8]) -> io::Result<FileDesc> { + Err(io::Error::new(ErrorKind::Other, "duplicate isn't supported")) + } + + pub fn nonblocking(&self) -> io::Result<bool> { + Ok(false) + } + + pub fn set_cloexec(&self) -> io::Result<()> { + Err(io::Error::new(ErrorKind::Other, "cloexec isn't supported")) + } + + pub fn set_nonblocking(&self, _nonblocking: bool) -> io::Result<()> { + Err(io::Error::new(ErrorKind::Other, "nonblocking isn't supported")) + } +} + +impl<'a> Read for &'a FileDesc { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + (**self).read(buf) + } +} + +impl AsInner<i32> for FileDesc { + fn as_inner(&self) -> &i32 { + &self.fd + } +} + +impl Drop for FileDesc { + fn drop(&mut self) { + // Note that errors are ignored when closing a file descriptor. The + // reason for this is that if an error occurs we don't actually know if + // the file descriptor was closed or not, and if we retried (for + // something like EINTR), we might close another valid file descriptor + // (opened after we closed ours. + let _ = unsafe { abi::close(self.fd) }; + } +} diff --git a/library/std/src/sys/hermit/fs.rs b/library/std/src/sys/hermit/fs.rs new file mode 100644 index 00000000000..82ccab1462b --- /dev/null +++ b/library/std/src/sys/hermit/fs.rs @@ -0,0 +1,402 @@ +use crate::ffi::{CStr, CString, OsString}; +use crate::fmt; +use crate::hash::{Hash, Hasher}; +use crate::io::{self, Error, ErrorKind}; +use crate::io::{IoSlice, IoSliceMut, SeekFrom}; +use crate::path::{Path, PathBuf}; +use crate::sys::cvt; +use crate::sys::hermit::abi; +use crate::sys::hermit::abi::{O_APPEND, O_CREAT, O_EXCL, O_RDONLY, O_RDWR, O_TRUNC, O_WRONLY}; +use crate::sys::hermit::fd::FileDesc; +use crate::sys::time::SystemTime; +use crate::sys::{unsupported, Void}; +use crate::sys_common::os_str_bytes::OsStrExt; + +pub use crate::sys_common::fs::copy; +//pub use crate::sys_common::fs::remove_dir_all; + +fn cstr(path: &Path) -> io::Result<CString> { + Ok(CString::new(path.as_os_str().as_bytes())?) +} + +#[derive(Debug)] +pub struct File(FileDesc); + +pub struct FileAttr(Void); + +pub struct ReadDir(Void); + +pub struct DirEntry(Void); + +#[derive(Clone, Debug)] +pub struct OpenOptions { + // generic + read: bool, + write: bool, + append: bool, + truncate: bool, + create: bool, + create_new: bool, + // system-specific + mode: i32, +} + +pub struct FilePermissions(Void); + +pub struct FileType(Void); + +#[derive(Debug)] +pub struct DirBuilder {} + +impl FileAttr { + pub fn size(&self) -> u64 { + match self.0 {} + } + + pub fn perm(&self) -> FilePermissions { + match self.0 {} + } + + pub fn file_type(&self) -> FileType { + match self.0 {} + } + + pub fn modified(&self) -> io::Result<SystemTime> { + match self.0 {} + } + + pub fn accessed(&self) -> io::Result<SystemTime> { + match self.0 {} + } + + pub fn created(&self) -> io::Result<SystemTime> { + match self.0 {} + } +} + +impl Clone for FileAttr { + fn clone(&self) -> FileAttr { + match self.0 {} + } +} + +impl FilePermissions { + pub fn readonly(&self) -> bool { + match self.0 {} + } + + pub fn set_readonly(&mut self, _readonly: bool) { + match self.0 {} + } +} + +impl Clone for FilePermissions { + fn clone(&self) -> FilePermissions { + match self.0 {} + } +} + +impl PartialEq for FilePermissions { + fn eq(&self, _other: &FilePermissions) -> bool { + match self.0 {} + } +} + +impl Eq for FilePermissions {} + +impl fmt::Debug for FilePermissions { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +impl FileType { + pub fn is_dir(&self) -> bool { + match self.0 {} + } + + pub fn is_file(&self) -> bool { + match self.0 {} + } + + pub fn is_symlink(&self) -> bool { + match self.0 {} + } +} + +impl Clone for FileType { + fn clone(&self) -> FileType { + match self.0 {} + } +} + +impl Copy for FileType {} + +impl PartialEq for FileType { + fn eq(&self, _other: &FileType) -> bool { + match self.0 {} + } +} + +impl Eq for FileType {} + +impl Hash for FileType { + fn hash<H: Hasher>(&self, _h: &mut H) { + match self.0 {} + } +} + +impl fmt::Debug for FileType { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +impl fmt::Debug for ReadDir { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +impl Iterator for ReadDir { + type Item = io::Result<DirEntry>; + + fn next(&mut self) -> Option<io::Result<DirEntry>> { + match self.0 {} + } +} + +impl DirEntry { + pub fn path(&self) -> PathBuf { + match self.0 {} + } + + pub fn file_name(&self) -> OsString { + match self.0 {} + } + + pub fn metadata(&self) -> io::Result<FileAttr> { + match self.0 {} + } + + pub fn file_type(&self) -> io::Result<FileType> { + match self.0 {} + } +} + +impl OpenOptions { + pub fn new() -> OpenOptions { + OpenOptions { + // generic + read: false, + write: false, + append: false, + truncate: false, + create: false, + create_new: false, + // system-specific + mode: 0x777, + } + } + + pub fn read(&mut self, read: bool) { + self.read = read; + } + pub fn write(&mut self, write: bool) { + self.write = write; + } + pub fn append(&mut self, append: bool) { + self.append = append; + } + pub fn truncate(&mut self, truncate: bool) { + self.truncate = truncate; + } + pub fn create(&mut self, create: bool) { + self.create = create; + } + pub fn create_new(&mut self, create_new: bool) { + self.create_new = create_new; + } + + fn get_access_mode(&self) -> io::Result<i32> { + match (self.read, self.write, self.append) { + (true, false, false) => Ok(O_RDONLY), + (false, true, false) => Ok(O_WRONLY), + (true, true, false) => Ok(O_RDWR), + (false, _, true) => Ok(O_WRONLY | O_APPEND), + (true, _, true) => Ok(O_RDWR | O_APPEND), + (false, false, false) => { + Err(io::Error::new(ErrorKind::InvalidInput, "invalid access mode")) + } + } + } + + fn get_creation_mode(&self) -> io::Result<i32> { + match (self.write, self.append) { + (true, false) => {} + (false, false) => { + if self.truncate || self.create || self.create_new { + return Err(io::Error::new(ErrorKind::InvalidInput, "invalid creation mode")); + } + } + (_, true) => { + if self.truncate && !self.create_new { + return Err(io::Error::new(ErrorKind::InvalidInput, "invalid creation mode")); + } + } + } + + Ok(match (self.create, self.truncate, self.create_new) { + (false, false, false) => 0, + (true, false, false) => O_CREAT, + (false, true, false) => O_TRUNC, + (true, true, false) => O_CREAT | O_TRUNC, + (_, _, true) => O_CREAT | O_EXCL, + }) + } +} + +impl File { + pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> { + let path = cstr(path)?; + File::open_c(&path, opts) + } + + pub fn open_c(path: &CStr, opts: &OpenOptions) -> io::Result<File> { + let mut flags = opts.get_access_mode()?; + flags = flags | opts.get_creation_mode()?; + + let mode; + if flags & O_CREAT == O_CREAT { + mode = opts.mode; + } else { + mode = 0; + } + + let fd = unsafe { cvt(abi::open(path.as_ptr(), flags, mode))? }; + Ok(File(FileDesc::new(fd as i32))) + } + + pub fn file_attr(&self) -> io::Result<FileAttr> { + Err(Error::from_raw_os_error(22)) + } + + pub fn fsync(&self) -> io::Result<()> { + Err(Error::from_raw_os_error(22)) + } + + pub fn datasync(&self) -> io::Result<()> { + self.fsync() + } + + pub fn truncate(&self, _size: u64) -> io::Result<()> { + Err(Error::from_raw_os_error(22)) + } + + pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> { + self.0.read(buf) + } + + pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + crate::io::default_read_vectored(|buf| self.read(buf), bufs) + } + + #[inline] + pub fn is_read_vectored(&self) -> bool { + false + } + + pub fn write(&self, buf: &[u8]) -> io::Result<usize> { + self.0.write(buf) + } + + pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + crate::io::default_write_vectored(|buf| self.write(buf), bufs) + } + + #[inline] + pub fn is_write_vectored(&self) -> bool { + false + } + + pub fn flush(&self) -> io::Result<()> { + Ok(()) + } + + pub fn seek(&self, _pos: SeekFrom) -> io::Result<u64> { + Err(Error::from_raw_os_error(22)) + } + + pub fn duplicate(&self) -> io::Result<File> { + Err(Error::from_raw_os_error(22)) + } + + pub fn set_permissions(&self, _perm: FilePermissions) -> io::Result<()> { + Err(Error::from_raw_os_error(22)) + } + + pub fn diverge(&self) -> ! { + loop {} + } +} + +impl DirBuilder { + pub fn new() -> DirBuilder { + DirBuilder {} + } + + pub fn mkdir(&self, _p: &Path) -> io::Result<()> { + unsupported() + } +} + +pub fn readdir(_p: &Path) -> io::Result<ReadDir> { + unsupported() +} + +pub fn unlink(path: &Path) -> io::Result<()> { + let name = cstr(path)?; + let _ = unsafe { cvt(abi::unlink(name.as_ptr()))? }; + Ok(()) +} + +pub fn rename(_old: &Path, _new: &Path) -> io::Result<()> { + unsupported() +} + +pub fn set_perm(_p: &Path, perm: FilePermissions) -> io::Result<()> { + match perm.0 {} +} + +pub fn rmdir(_p: &Path) -> io::Result<()> { + unsupported() +} + +pub fn remove_dir_all(_path: &Path) -> io::Result<()> { + //unsupported() + Ok(()) +} + +pub fn readlink(_p: &Path) -> io::Result<PathBuf> { + unsupported() +} + +pub fn symlink(_src: &Path, _dst: &Path) -> io::Result<()> { + unsupported() +} + +pub fn link(_src: &Path, _dst: &Path) -> io::Result<()> { + unsupported() +} + +pub fn stat(_p: &Path) -> io::Result<FileAttr> { + unsupported() +} + +pub fn lstat(_p: &Path) -> io::Result<FileAttr> { + unsupported() +} + +pub fn canonicalize(_p: &Path) -> io::Result<PathBuf> { + unsupported() +} diff --git a/library/std/src/sys/hermit/io.rs b/library/std/src/sys/hermit/io.rs new file mode 100644 index 00000000000..d5f475b4310 --- /dev/null +++ b/library/std/src/sys/hermit/io.rs @@ -0,0 +1,47 @@ +use crate::mem; + +#[derive(Copy, Clone)] +pub struct IoSlice<'a>(&'a [u8]); + +impl<'a> IoSlice<'a> { + #[inline] + pub fn new(buf: &'a [u8]) -> IoSlice<'a> { + IoSlice(buf) + } + + #[inline] + pub fn advance(&mut self, n: usize) { + self.0 = &self.0[n..] + } + + #[inline] + pub fn as_slice(&self) -> &[u8] { + self.0 + } +} + +pub struct IoSliceMut<'a>(&'a mut [u8]); + +impl<'a> IoSliceMut<'a> { + #[inline] + pub fn new(buf: &'a mut [u8]) -> IoSliceMut<'a> { + IoSliceMut(buf) + } + + #[inline] + pub fn advance(&mut self, n: usize) { + let slice = mem::replace(&mut self.0, &mut []); + let (_, remaining) = slice.split_at_mut(n); + self.0 = remaining; + } + + #[inline] + pub fn as_slice(&self) -> &[u8] { + self.0 + } + + #[inline] + pub fn as_mut_slice(&mut self) -> &mut [u8] { + self.0 + } +} diff --git a/library/std/src/sys/hermit/memchr.rs b/library/std/src/sys/hermit/memchr.rs new file mode 100644 index 00000000000..9967482197e --- /dev/null +++ b/library/std/src/sys/hermit/memchr.rs @@ -0,0 +1 @@ +pub use core::slice::memchr::{memchr, memrchr}; diff --git a/library/std/src/sys/hermit/mod.rs b/library/std/src/sys/hermit/mod.rs new file mode 100644 index 00000000000..675b82ceb77 --- /dev/null +++ b/library/std/src/sys/hermit/mod.rs @@ -0,0 +1,146 @@ +//! System bindings for HermitCore +//! +//! This module contains the facade (aka platform-specific) implementations of +//! OS level functionality for HermitCore. +//! +//! This is all super highly experimental and not actually intended for +//! wide/production use yet, it's still all in the experimental category. This +//! will likely change over time. +//! +//! Currently all functions here are basically stubs that immediately return +//! errors. The hope is that with a portability lint we can turn actually just +//! remove all this and just omit parts of the standard library if we're +//! compiling for wasm. That way it's a compile time error for something that's +//! guaranteed to be a runtime error! + +use crate::intrinsics; +use crate::os::raw::c_char; + +pub mod alloc; +pub mod args; +pub mod cmath; +pub mod condvar; +pub mod env; +pub mod ext; +pub mod fd; +pub mod fs; +pub mod io; +pub mod memchr; +pub mod mutex; +pub mod net; +pub mod os; +pub mod path; +pub mod pipe; +pub mod process; +pub mod rwlock; +pub mod stack_overflow; +pub mod stdio; +pub mod thread; +pub mod thread_local_dtor; +pub mod thread_local_key; +pub mod time; + +use crate::io::ErrorKind; +pub use crate::sys_common::os_str_bytes as os_str; + +#[allow(unused_extern_crates)] +pub extern crate hermit_abi as abi; + +pub fn unsupported<T>() -> crate::io::Result<T> { + Err(unsupported_err()) +} + +pub fn unsupported_err() -> crate::io::Error { + crate::io::Error::new(crate::io::ErrorKind::Other, "operation not supported on HermitCore yet") +} + +// This enum is used as the storage for a bunch of types which can't actually +// exist. +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +pub enum Void {} + +pub unsafe fn strlen(start: *const c_char) -> usize { + let mut str = start; + + while *str != 0 { + str = str.offset(1); + } + + (str as usize) - (start as usize) +} + +#[no_mangle] +pub extern "C" fn floor(x: f64) -> f64 { + unsafe { intrinsics::floorf64(x) } +} + +pub fn abort_internal() -> ! { + unsafe { + abi::abort(); + } +} + +// FIXME: just a workaround to test the system +pub fn hashmap_random_keys() -> (u64, u64) { + (1, 2) +} + +// This function is needed by the panic runtime. The symbol is named in +// pre-link args for the target specification, so keep that in sync. +#[cfg(not(test))] +#[no_mangle] +// NB. used by both libunwind and libpanic_abort +pub extern "C" fn __rust_abort() { + abort_internal(); +} + +#[cfg(not(test))] +pub fn init() { + let _ = net::init(); +} + +#[cfg(not(test))] +#[no_mangle] +pub unsafe extern "C" fn runtime_entry( + argc: i32, + argv: *const *const c_char, + env: *const *const c_char, +) -> ! { + use crate::sys::hermit::fast_thread_local::run_dtors; + extern "C" { + fn main(argc: isize, argv: *const *const c_char) -> i32; + } + + // initialize environment + os::init_environment(env as *const *const i8); + + let result = main(argc as isize, argv); + + run_dtors(); + abi::exit(result); +} + +pub fn decode_error_kind(errno: i32) -> ErrorKind { + match errno { + x if x == 13 as i32 => ErrorKind::PermissionDenied, + x if x == 98 as i32 => ErrorKind::AddrInUse, + x if x == 99 as i32 => ErrorKind::AddrNotAvailable, + x if x == 11 as i32 => ErrorKind::WouldBlock, + x if x == 103 as i32 => ErrorKind::ConnectionAborted, + x if x == 111 as i32 => ErrorKind::ConnectionRefused, + x if x == 104 as i32 => ErrorKind::ConnectionReset, + x if x == 17 as i32 => ErrorKind::AlreadyExists, + x if x == 4 as i32 => ErrorKind::Interrupted, + x if x == 22 as i32 => ErrorKind::InvalidInput, + x if x == 2 as i32 => ErrorKind::NotFound, + x if x == 107 as i32 => ErrorKind::NotConnected, + x if x == 1 as i32 => ErrorKind::PermissionDenied, + x if x == 32 as i32 => ErrorKind::BrokenPipe, + x if x == 110 as i32 => ErrorKind::TimedOut, + _ => ErrorKind::Other, + } +} + +pub fn cvt(result: i32) -> crate::io::Result<usize> { + if result < 0 { Err(crate::io::Error::from_raw_os_error(-result)) } else { Ok(result as usize) } +} diff --git a/library/std/src/sys/hermit/mutex.rs b/library/std/src/sys/hermit/mutex.rs new file mode 100644 index 00000000000..3d4813209cb --- /dev/null +++ b/library/std/src/sys/hermit/mutex.rs @@ -0,0 +1,77 @@ +use crate::ffi::c_void; +use crate::ptr; +use crate::sys::hermit::abi; + +pub struct Mutex { + inner: *const c_void, +} + +unsafe impl Send for Mutex {} +unsafe impl Sync for Mutex {} + +impl Mutex { + pub const fn new() -> Mutex { + Mutex { inner: ptr::null() } + } + + #[inline] + pub unsafe fn init(&mut self) { + let _ = abi::sem_init(&mut self.inner as *mut *const c_void, 1); + } + + #[inline] + pub unsafe fn lock(&self) { + let _ = abi::sem_timedwait(self.inner, 0); + } + + #[inline] + pub unsafe fn unlock(&self) { + let _ = abi::sem_post(self.inner); + } + + #[inline] + pub unsafe fn try_lock(&self) -> bool { + let result = abi::sem_trywait(self.inner); + result == 0 + } + + #[inline] + pub unsafe fn destroy(&self) { + let _ = abi::sem_destroy(self.inner); + } +} + +pub struct ReentrantMutex { + inner: *const c_void, +} + +impl ReentrantMutex { + pub const unsafe fn uninitialized() -> ReentrantMutex { + ReentrantMutex { inner: ptr::null() } + } + + #[inline] + pub unsafe fn init(&self) { + let _ = abi::recmutex_init(&self.inner as *const *const c_void as *mut _); + } + + #[inline] + pub unsafe fn lock(&self) { + let _ = abi::recmutex_lock(self.inner); + } + + #[inline] + pub unsafe fn try_lock(&self) -> bool { + true + } + + #[inline] + pub unsafe fn unlock(&self) { + let _ = abi::recmutex_unlock(self.inner); + } + + #[inline] + pub unsafe fn destroy(&self) { + let _ = abi::recmutex_destroy(self.inner); + } +} diff --git a/library/std/src/sys/hermit/net.rs b/library/std/src/sys/hermit/net.rs new file mode 100644 index 00000000000..8a788a9265f --- /dev/null +++ b/library/std/src/sys/hermit/net.rs @@ -0,0 +1,473 @@ +use crate::convert::TryFrom; +use crate::fmt; +use crate::io::{self, ErrorKind, IoSlice, IoSliceMut}; +use crate::net::{IpAddr, Ipv4Addr, Ipv6Addr, Shutdown, SocketAddr}; +use crate::str; +use crate::sync::Arc; +use crate::sys::hermit::abi; +use crate::sys::hermit::abi::IpAddress::{Ipv4, Ipv6}; +use crate::sys::{unsupported, Void}; +use crate::sys_common::AsInner; +use crate::time::Duration; + +/// Checks whether the HermitCore's socket interface has been started already, and +/// if not, starts it. +pub fn init() -> io::Result<()> { + if abi::network_init() < 0 { + return Err(io::Error::new(ErrorKind::Other, "Unable to initialize network interface")); + } + + Ok(()) +} + +#[derive(Debug, Clone)] +pub struct Socket(abi::Handle); + +impl AsInner<abi::Handle> for Socket { + fn as_inner(&self) -> &abi::Handle { + &self.0 + } +} + +impl Drop for Socket { + fn drop(&mut self) { + let _ = abi::tcpstream::close(self.0); + } +} + +// Arc is used to count the number of used sockets. +// Only if all sockets are released, the drop +// method will close the socket. +#[derive(Clone)] +pub struct TcpStream(Arc<Socket>); + +impl TcpStream { + pub fn connect(addr: io::Result<&SocketAddr>) -> io::Result<TcpStream> { + let addr = addr?; + + match abi::tcpstream::connect(addr.ip().to_string().as_bytes(), addr.port(), None) { + Ok(handle) => Ok(TcpStream(Arc::new(Socket(handle)))), + _ => { + Err(io::Error::new(ErrorKind::Other, "Unable to initiate a connection on a socket")) + } + } + } + + pub fn connect_timeout(saddr: &SocketAddr, duration: Duration) -> io::Result<TcpStream> { + match abi::tcpstream::connect( + saddr.ip().to_string().as_bytes(), + saddr.port(), + Some(duration.as_millis() as u64), + ) { + Ok(handle) => Ok(TcpStream(Arc::new(Socket(handle)))), + _ => { + Err(io::Error::new(ErrorKind::Other, "Unable to initiate a connection on a socket")) + } + } + } + + pub fn set_read_timeout(&self, duration: Option<Duration>) -> io::Result<()> { + abi::tcpstream::set_read_timeout(*self.0.as_inner(), duration.map(|d| d.as_millis() as u64)) + .map_err(|_| io::Error::new(ErrorKind::Other, "Unable to set timeout value")) + } + + pub fn set_write_timeout(&self, duration: Option<Duration>) -> io::Result<()> { + abi::tcpstream::set_write_timeout( + *self.0.as_inner(), + duration.map(|d| d.as_millis() as u64), + ) + .map_err(|_| io::Error::new(ErrorKind::Other, "Unable to set timeout value")) + } + + pub fn read_timeout(&self) -> io::Result<Option<Duration>> { + let duration = abi::tcpstream::get_read_timeout(*self.0.as_inner()) + .map_err(|_| io::Error::new(ErrorKind::Other, "Unable to determine timeout value"))?; + + Ok(duration.map(|d| Duration::from_millis(d))) + } + + pub fn write_timeout(&self) -> io::Result<Option<Duration>> { + let duration = abi::tcpstream::get_write_timeout(*self.0.as_inner()) + .map_err(|_| io::Error::new(ErrorKind::Other, "Unable to determine timeout value"))?; + + Ok(duration.map(|d| Duration::from_millis(d))) + } + + pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> { + abi::tcpstream::peek(*self.0.as_inner(), buf) + .map_err(|_| io::Error::new(ErrorKind::Other, "set_nodelay failed")) + } + + pub fn read(&self, buffer: &mut [u8]) -> io::Result<usize> { + self.read_vectored(&mut [IoSliceMut::new(buffer)]) + } + + pub fn read_vectored(&self, ioslice: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + let mut size: usize = 0; + + for i in ioslice.iter_mut() { + let ret = abi::tcpstream::read(*self.0.as_inner(), &mut i[0..]) + .map_err(|_| io::Error::new(ErrorKind::Other, "Unable to read on socket"))?; + + if ret != 0 { + size += ret; + } + } + + Ok(size) + } + + #[inline] + pub fn is_read_vectored(&self) -> bool { + true + } + + pub fn write(&self, buffer: &[u8]) -> io::Result<usize> { + self.write_vectored(&[IoSlice::new(buffer)]) + } + + pub fn write_vectored(&self, ioslice: &[IoSlice<'_>]) -> io::Result<usize> { + let mut size: usize = 0; + + for i in ioslice.iter() { + size += abi::tcpstream::write(*self.0.as_inner(), i) + .map_err(|_| io::Error::new(ErrorKind::Other, "Unable to write on socket"))?; + } + + Ok(size) + } + + #[inline] + pub fn is_write_vectored(&self) -> bool { + true + } + + pub fn peer_addr(&self) -> io::Result<SocketAddr> { + let (ipaddr, port) = abi::tcpstream::peer_addr(*self.0.as_inner()) + .map_err(|_| io::Error::new(ErrorKind::Other, "peer_addr failed"))?; + + let saddr = match ipaddr { + Ipv4(ref addr) => SocketAddr::new(IpAddr::V4(Ipv4Addr::from(addr.0)), port), + Ipv6(ref addr) => SocketAddr::new(IpAddr::V6(Ipv6Addr::from(addr.0)), port), + _ => { + return Err(io::Error::new(ErrorKind::Other, "peer_addr failed")); + } + }; + + Ok(saddr) + } + + pub fn socket_addr(&self) -> io::Result<SocketAddr> { + Err(io::Error::new(ErrorKind::Other, "socket_addr isn't supported")) + } + + pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { + abi::tcpstream::shutdown(*self.0.as_inner(), how as i32) + .map_err(|_| io::Error::new(ErrorKind::Other, "unable to shutdown socket")) + } + + pub fn duplicate(&self) -> io::Result<TcpStream> { + Ok(self.clone()) + } + + pub fn set_nodelay(&self, mode: bool) -> io::Result<()> { + abi::tcpstream::set_nodelay(*self.0.as_inner(), mode) + .map_err(|_| io::Error::new(ErrorKind::Other, "set_nodelay failed")) + } + + pub fn nodelay(&self) -> io::Result<bool> { + abi::tcpstream::nodelay(*self.0.as_inner()) + .map_err(|_| io::Error::new(ErrorKind::Other, "nodelay failed")) + } + + pub fn set_ttl(&self, tll: u32) -> io::Result<()> { + abi::tcpstream::set_tll(*self.0.as_inner(), tll) + .map_err(|_| io::Error::new(ErrorKind::Other, "unable to set TTL")) + } + + pub fn ttl(&self) -> io::Result<u32> { + abi::tcpstream::get_tll(*self.0.as_inner()) + .map_err(|_| io::Error::new(ErrorKind::Other, "unable to get TTL")) + } + + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + Err(io::Error::new(ErrorKind::Other, "take_error isn't supported")) + } + + pub fn set_nonblocking(&self, mode: bool) -> io::Result<()> { + abi::tcpstream::set_nonblocking(*self.0.as_inner(), mode) + .map_err(|_| io::Error::new(ErrorKind::Other, "unable to set blocking mode")) + } +} + +impl fmt::Debug for TcpStream { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + Ok(()) + } +} + +#[derive(Clone)] +pub struct TcpListener(SocketAddr); + +impl TcpListener { + pub fn bind(addr: io::Result<&SocketAddr>) -> io::Result<TcpListener> { + let addr = addr?; + + Ok(TcpListener(*addr)) + } + + pub fn socket_addr(&self) -> io::Result<SocketAddr> { + Ok(self.0) + } + + pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> { + let (handle, ipaddr, port) = abi::tcplistener::accept(self.0.port()) + .map_err(|_| io::Error::new(ErrorKind::Other, "accept failed"))?; + let saddr = match ipaddr { + Ipv4(ref addr) => SocketAddr::new(IpAddr::V4(Ipv4Addr::from(addr.0)), port), + Ipv6(ref addr) => SocketAddr::new(IpAddr::V6(Ipv6Addr::from(addr.0)), port), + _ => { + return Err(io::Error::new(ErrorKind::Other, "accept failed")); + } + }; + + Ok((TcpStream(Arc::new(Socket(handle))), saddr)) + } + + pub fn duplicate(&self) -> io::Result<TcpListener> { + Ok(self.clone()) + } + + pub fn set_ttl(&self, _: u32) -> io::Result<()> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn ttl(&self) -> io::Result<u32> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn set_only_v6(&self, _: bool) -> io::Result<()> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn only_v6(&self) -> io::Result<bool> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn set_nonblocking(&self, _: bool) -> io::Result<()> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } +} + +impl fmt::Debug for TcpListener { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + Ok(()) + } +} + +pub struct UdpSocket(abi::Handle); + +impl UdpSocket { + pub fn bind(_: io::Result<&SocketAddr>) -> io::Result<UdpSocket> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn peer_addr(&self) -> io::Result<SocketAddr> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn socket_addr(&self) -> io::Result<SocketAddr> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn recv_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn peek_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn send_to(&self, _: &[u8], _: &SocketAddr) -> io::Result<usize> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn duplicate(&self) -> io::Result<UdpSocket> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn set_read_timeout(&self, _: Option<Duration>) -> io::Result<()> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn set_write_timeout(&self, _: Option<Duration>) -> io::Result<()> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn read_timeout(&self) -> io::Result<Option<Duration>> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn write_timeout(&self) -> io::Result<Option<Duration>> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn set_broadcast(&self, _: bool) -> io::Result<()> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn broadcast(&self) -> io::Result<bool> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn set_multicast_loop_v4(&self, _: bool) -> io::Result<()> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn multicast_loop_v4(&self) -> io::Result<bool> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn set_multicast_ttl_v4(&self, _: u32) -> io::Result<()> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn multicast_ttl_v4(&self) -> io::Result<u32> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn set_multicast_loop_v6(&self, _: bool) -> io::Result<()> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn multicast_loop_v6(&self) -> io::Result<bool> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn join_multicast_v4(&self, _: &Ipv4Addr, _: &Ipv4Addr) -> io::Result<()> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn join_multicast_v6(&self, _: &Ipv6Addr, _: u32) -> io::Result<()> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn leave_multicast_v4(&self, _: &Ipv4Addr, _: &Ipv4Addr) -> io::Result<()> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn leave_multicast_v6(&self, _: &Ipv6Addr, _: u32) -> io::Result<()> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn set_ttl(&self, _: u32) -> io::Result<()> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn ttl(&self) -> io::Result<u32> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn set_nonblocking(&self, _: bool) -> io::Result<()> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn recv(&self, _: &mut [u8]) -> io::Result<usize> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn peek(&self, _: &mut [u8]) -> io::Result<usize> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn send(&self, _: &[u8]) -> io::Result<usize> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } + + pub fn connect(&self, _: io::Result<&SocketAddr>) -> io::Result<()> { + Err(io::Error::new(ErrorKind::Other, "not supported")) + } +} + +impl fmt::Debug for UdpSocket { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + Ok(()) + } +} + +pub struct LookupHost(Void); + +impl LookupHost { + pub fn port(&self) -> u16 { + match self.0 {} + } +} + +impl Iterator for LookupHost { + type Item = SocketAddr; + fn next(&mut self) -> Option<SocketAddr> { + match self.0 {} + } +} + +impl TryFrom<&str> for LookupHost { + type Error = io::Error; + + fn try_from(_v: &str) -> io::Result<LookupHost> { + unsupported() + } +} + +impl<'a> TryFrom<(&'a str, u16)> for LookupHost { + type Error = io::Error; + + fn try_from(_v: (&'a str, u16)) -> io::Result<LookupHost> { + unsupported() + } +} + +#[allow(nonstandard_style)] +pub mod netc { + pub const AF_INET: u8 = 0; + pub const AF_INET6: u8 = 1; + pub type sa_family_t = u8; + + #[derive(Copy, Clone)] + pub struct in_addr { + pub s_addr: u32, + } + + #[derive(Copy, Clone)] + pub struct sockaddr_in { + pub sin_family: sa_family_t, + pub sin_port: u16, + pub sin_addr: in_addr, + } + + #[derive(Copy, Clone)] + pub struct in6_addr { + pub s6_addr: [u8; 16], + } + + #[derive(Copy, Clone)] + pub struct sockaddr_in6 { + pub sin6_family: sa_family_t, + pub sin6_port: u16, + pub sin6_addr: in6_addr, + pub sin6_flowinfo: u32, + pub sin6_scope_id: u32, + } + + #[derive(Copy, Clone)] + pub struct sockaddr {} + + pub type socklen_t = usize; +} diff --git a/library/std/src/sys/hermit/os.rs b/library/std/src/sys/hermit/os.rs new file mode 100644 index 00000000000..78eabf8f81e --- /dev/null +++ b/library/std/src/sys/hermit/os.rs @@ -0,0 +1,181 @@ +use crate::collections::HashMap; +use crate::error::Error as StdError; +use crate::ffi::{CStr, OsStr, OsString}; +use crate::fmt; +use crate::io; +use crate::marker::PhantomData; +use crate::memchr; +use crate::path::{self, PathBuf}; +use crate::str; +use crate::sync::Mutex; +use crate::sys::hermit::abi; +use crate::sys::{unsupported, Void}; +use crate::sys_common::os_str_bytes::*; +use crate::vec; + +pub fn errno() -> i32 { + 0 +} + +pub fn error_string(_errno: i32) -> String { + "operation successful".to_string() +} + +pub fn getcwd() -> io::Result<PathBuf> { + unsupported() +} + +pub fn chdir(_: &path::Path) -> io::Result<()> { + unsupported() +} + +pub struct SplitPaths<'a>(&'a Void); + +pub fn split_paths(_unparsed: &OsStr) -> SplitPaths<'_> { + panic!("unsupported") +} + +impl<'a> Iterator for SplitPaths<'a> { + type Item = PathBuf; + fn next(&mut self) -> Option<PathBuf> { + match *self.0 {} + } +} + +#[derive(Debug)] +pub struct JoinPathsError; + +pub fn join_paths<I, T>(_paths: I) -> Result<OsString, JoinPathsError> +where + I: Iterator<Item = T>, + T: AsRef<OsStr>, +{ + Err(JoinPathsError) +} + +impl fmt::Display for JoinPathsError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + "not supported on hermit yet".fmt(f) + } +} + +impl StdError for JoinPathsError { + #[allow(deprecated)] + fn description(&self) -> &str { + "not supported on hermit yet" + } +} + +pub fn current_exe() -> io::Result<PathBuf> { + unsupported() +} + +static mut ENV: Option<Mutex<HashMap<OsString, OsString>>> = None; + +pub fn init_environment(env: *const *const i8) { + unsafe { + ENV = Some(Mutex::new(HashMap::new())); + + if env.is_null() { + return; + } + + let mut guard = ENV.as_ref().unwrap().lock().unwrap(); + let mut environ = env; + while !(*environ).is_null() { + if let Some((key, value)) = parse(CStr::from_ptr(*environ).to_bytes()) { + guard.insert(key, value); + } + environ = environ.add(1); + } + } + + fn parse(input: &[u8]) -> Option<(OsString, OsString)> { + // Strategy (copied from glibc): Variable name and value are separated + // by an ASCII equals sign '='. Since a variable name must not be + // empty, allow variable names starting with an equals sign. Skip all + // malformed lines. + if input.is_empty() { + return None; + } + let pos = memchr::memchr(b'=', &input[1..]).map(|p| p + 1); + pos.map(|p| { + ( + OsStringExt::from_vec(input[..p].to_vec()), + OsStringExt::from_vec(input[p + 1..].to_vec()), + ) + }) + } +} + +pub struct Env { + iter: vec::IntoIter<(OsString, OsString)>, + _dont_send_or_sync_me: PhantomData<*mut ()>, +} + +impl Iterator for Env { + type Item = (OsString, OsString); + fn next(&mut self) -> Option<(OsString, OsString)> { + self.iter.next() + } + fn size_hint(&self) -> (usize, Option<usize>) { + self.iter.size_hint() + } +} + +/// Returns a vector of (variable, value) byte-vector pairs for all the +/// environment variables of the current process. +pub fn env() -> Env { + unsafe { + let guard = ENV.as_ref().unwrap().lock().unwrap(); + let mut result = Vec::new(); + + for (key, value) in guard.iter() { + result.push((key.clone(), value.clone())); + } + + return Env { iter: result.into_iter(), _dont_send_or_sync_me: PhantomData }; + } +} + +pub fn getenv(k: &OsStr) -> io::Result<Option<OsString>> { + unsafe { + match ENV.as_ref().unwrap().lock().unwrap().get_mut(k) { + Some(value) => Ok(Some(value.clone())), + None => Ok(None), + } + } +} + +pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> { + unsafe { + let (k, v) = (k.to_owned(), v.to_owned()); + ENV.as_ref().unwrap().lock().unwrap().insert(k, v); + } + Ok(()) +} + +pub fn unsetenv(k: &OsStr) -> io::Result<()> { + unsafe { + ENV.as_ref().unwrap().lock().unwrap().remove(k); + } + Ok(()) +} + +pub fn temp_dir() -> PathBuf { + panic!("no filesystem on hermit") +} + +pub fn home_dir() -> Option<PathBuf> { + None +} + +pub fn exit(code: i32) -> ! { + unsafe { + abi::exit(code); + } +} + +pub fn getpid() -> u32 { + unsafe { abi::getpid() } +} diff --git a/library/std/src/sys/hermit/path.rs b/library/std/src/sys/hermit/path.rs new file mode 100644 index 00000000000..840a7ae0426 --- /dev/null +++ b/library/std/src/sys/hermit/path.rs @@ -0,0 +1,19 @@ +use crate::ffi::OsStr; +use crate::path::Prefix; + +#[inline] +pub fn is_sep_byte(b: u8) -> bool { + b == b'/' +} + +#[inline] +pub fn is_verbatim_sep(b: u8) -> bool { + b == b'/' +} + +pub fn parse_prefix(_: &OsStr) -> Option<Prefix<'_>> { + None +} + +pub const MAIN_SEP_STR: &str = "/"; +pub const MAIN_SEP: char = '/'; diff --git a/library/std/src/sys/hermit/pipe.rs b/library/std/src/sys/hermit/pipe.rs new file mode 100644 index 00000000000..10d0925823e --- /dev/null +++ b/library/std/src/sys/hermit/pipe.rs @@ -0,0 +1,38 @@ +use crate::io::{self, IoSlice, IoSliceMut}; +use crate::sys::Void; + +pub struct AnonPipe(Void); + +impl AnonPipe { + pub fn read(&self, _buf: &mut [u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn read_vectored(&self, _bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + match self.0 {} + } + + pub fn is_read_vectored(&self) -> bool { + match self.0 {} + } + + pub fn write(&self, _buf: &[u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn write_vectored(&self, _bufs: &[IoSlice<'_>]) -> io::Result<usize> { + match self.0 {} + } + + pub fn is_write_vectored(&self) -> bool { + match self.0 {} + } + + pub fn diverge(&self) -> ! { + match self.0 {} + } +} + +pub fn read2(p1: AnonPipe, _v1: &mut Vec<u8>, _p2: AnonPipe, _v2: &mut Vec<u8>) -> io::Result<()> { + match p1.0 {} +} diff --git a/library/std/src/sys/hermit/process.rs b/library/std/src/sys/hermit/process.rs new file mode 100644 index 00000000000..4702e5c5492 --- /dev/null +++ b/library/std/src/sys/hermit/process.rs @@ -0,0 +1,149 @@ +use crate::ffi::OsStr; +use crate::fmt; +use crate::io; +use crate::sys::fs::File; +use crate::sys::pipe::AnonPipe; +use crate::sys::{unsupported, Void}; +use crate::sys_common::process::CommandEnv; + +pub use crate::ffi::OsString as EnvKey; + +//////////////////////////////////////////////////////////////////////////////// +// Command +//////////////////////////////////////////////////////////////////////////////// + +pub struct Command { + env: CommandEnv, +} + +// passed back to std::process with the pipes connected to the child, if any +// were requested +pub struct StdioPipes { + pub stdin: Option<AnonPipe>, + pub stdout: Option<AnonPipe>, + pub stderr: Option<AnonPipe>, +} + +pub enum Stdio { + Inherit, + Null, + MakePipe, +} + +impl Command { + pub fn new(_program: &OsStr) -> Command { + Command { env: Default::default() } + } + + pub fn arg(&mut self, _arg: &OsStr) {} + + pub fn env_mut(&mut self) -> &mut CommandEnv { + &mut self.env + } + + pub fn cwd(&mut self, _dir: &OsStr) {} + + pub fn stdin(&mut self, _stdin: Stdio) {} + + pub fn stdout(&mut self, _stdout: Stdio) {} + + pub fn stderr(&mut self, _stderr: Stdio) {} + + pub fn spawn( + &mut self, + _default: Stdio, + _needs_stdin: bool, + ) -> io::Result<(Process, StdioPipes)> { + unsupported() + } +} + +impl From<AnonPipe> for Stdio { + fn from(pipe: AnonPipe) -> Stdio { + pipe.diverge() + } +} + +impl From<File> for Stdio { + fn from(file: File) -> Stdio { + file.diverge() + } +} + +impl fmt::Debug for Command { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + Ok(()) + } +} + +pub struct ExitStatus(Void); + +impl ExitStatus { + pub fn success(&self) -> bool { + match self.0 {} + } + + pub fn code(&self) -> Option<i32> { + match self.0 {} + } +} + +impl Clone for ExitStatus { + fn clone(&self) -> ExitStatus { + match self.0 {} + } +} + +impl Copy for ExitStatus {} + +impl PartialEq for ExitStatus { + fn eq(&self, _other: &ExitStatus) -> bool { + match self.0 {} + } +} + +impl Eq for ExitStatus {} + +impl fmt::Debug for ExitStatus { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +impl fmt::Display for ExitStatus { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +#[derive(PartialEq, Eq, Clone, Copy, Debug)] +pub struct ExitCode(bool); + +impl ExitCode { + pub const SUCCESS: ExitCode = ExitCode(false); + pub const FAILURE: ExitCode = ExitCode(true); + + pub fn as_i32(&self) -> i32 { + self.0 as i32 + } +} + +pub struct Process(Void); + +impl Process { + pub fn id(&self) -> u32 { + match self.0 {} + } + + pub fn kill(&mut self) -> io::Result<()> { + match self.0 {} + } + + pub fn wait(&mut self) -> io::Result<ExitStatus> { + match self.0 {} + } + + pub fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> { + match self.0 {} + } +} diff --git a/library/std/src/sys/hermit/rwlock.rs b/library/std/src/sys/hermit/rwlock.rs new file mode 100644 index 00000000000..06442e925f4 --- /dev/null +++ b/library/std/src/sys/hermit/rwlock.rs @@ -0,0 +1,144 @@ +use crate::cell::UnsafeCell; +use crate::sys::condvar::Condvar; +use crate::sys::mutex::Mutex; + +pub struct RWLock { + lock: Mutex, + cond: Condvar, + state: UnsafeCell<State>, +} + +enum State { + Unlocked, + Reading(usize), + Writing, +} + +unsafe impl Send for RWLock {} +unsafe impl Sync for RWLock {} + +// This rwlock implementation is a relatively simple implementation which has a +// condition variable for readers/writers as well as a mutex protecting the +// internal state of the lock. A current downside of the implementation is that +// unlocking the lock will notify *all* waiters rather than just readers or just +// writers. This can cause lots of "thundering stampede" problems. While +// hopefully correct this implementation is very likely to want to be changed in +// the future. + +impl RWLock { + pub const fn new() -> RWLock { + RWLock { lock: Mutex::new(), cond: Condvar::new(), state: UnsafeCell::new(State::Unlocked) } + } + + #[inline] + pub unsafe fn read(&self) { + self.lock.lock(); + while !(*self.state.get()).inc_readers() { + self.cond.wait(&self.lock); + } + self.lock.unlock(); + } + + #[inline] + pub unsafe fn try_read(&self) -> bool { + self.lock.lock(); + let ok = (*self.state.get()).inc_readers(); + self.lock.unlock(); + return ok; + } + + #[inline] + pub unsafe fn write(&self) { + self.lock.lock(); + while !(*self.state.get()).inc_writers() { + self.cond.wait(&self.lock); + } + self.lock.unlock(); + } + + #[inline] + pub unsafe fn try_write(&self) -> bool { + self.lock.lock(); + let ok = (*self.state.get()).inc_writers(); + self.lock.unlock(); + return ok; + } + + #[inline] + pub unsafe fn read_unlock(&self) { + self.lock.lock(); + let notify = (*self.state.get()).dec_readers(); + self.lock.unlock(); + if notify { + // FIXME: should only wake up one of these some of the time + self.cond.notify_all(); + } + } + + #[inline] + pub unsafe fn write_unlock(&self) { + self.lock.lock(); + (*self.state.get()).dec_writers(); + self.lock.unlock(); + // FIXME: should only wake up one of these some of the time + self.cond.notify_all(); + } + + #[inline] + pub unsafe fn destroy(&self) { + self.lock.destroy(); + self.cond.destroy(); + } +} + +impl State { + fn inc_readers(&mut self) -> bool { + match *self { + State::Unlocked => { + *self = State::Reading(1); + true + } + State::Reading(ref mut cnt) => { + *cnt += 1; + true + } + State::Writing => false, + } + } + + fn inc_writers(&mut self) -> bool { + match *self { + State::Unlocked => { + *self = State::Writing; + true + } + State::Reading(_) | State::Writing => false, + } + } + + fn dec_readers(&mut self) -> bool { + let zero = match *self { + State::Reading(ref mut cnt) => { + *cnt -= 1; + *cnt == 0 + } + State::Unlocked | State::Writing => invalid(), + }; + if zero { + *self = State::Unlocked; + } + zero + } + + fn dec_writers(&mut self) { + match *self { + State::Writing => {} + State::Unlocked | State::Reading(_) => invalid(), + } + *self = State::Unlocked; + } +} + +fn invalid() -> ! { + panic!("inconsistent rwlock"); +} diff --git a/library/std/src/sys/hermit/stack_overflow.rs b/library/std/src/sys/hermit/stack_overflow.rs new file mode 100644 index 00000000000..121fe42011d --- /dev/null +++ b/library/std/src/sys/hermit/stack_overflow.rs @@ -0,0 +1,5 @@ +#[inline] +pub unsafe fn init() {} + +#[inline] +pub unsafe fn cleanup() {} diff --git a/library/std/src/sys/hermit/stdio.rs b/library/std/src/sys/hermit/stdio.rs new file mode 100644 index 00000000000..f3654ee3871 --- /dev/null +++ b/library/std/src/sys/hermit/stdio.rs @@ -0,0 +1,120 @@ +use crate::io; +use crate::io::{IoSlice, IoSliceMut}; +use crate::sys::hermit::abi; + +pub struct Stdin; +pub struct Stdout; +pub struct Stderr; + +impl Stdin { + pub fn new() -> io::Result<Stdin> { + Ok(Stdin) + } +} + +impl io::Read for Stdin { + fn read(&mut self, data: &mut [u8]) -> io::Result<usize> { + self.read_vectored(&mut [IoSliceMut::new(data)]) + } + + fn read_vectored(&mut self, _data: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + Ok(0) + } + + #[inline] + fn is_read_vectored(&self) -> bool { + true + } +} + +impl Stdout { + pub fn new() -> io::Result<Stdout> { + Ok(Stdout) + } +} + +impl io::Write for Stdout { + fn write(&mut self, data: &[u8]) -> io::Result<usize> { + let len; + + unsafe { len = abi::write(1, data.as_ptr() as *const u8, data.len()) } + + if len < 0 { + Err(io::Error::new(io::ErrorKind::Other, "Stdout is not able to print")) + } else { + Ok(len as usize) + } + } + + fn write_vectored(&mut self, data: &[IoSlice<'_>]) -> io::Result<usize> { + let len; + + unsafe { len = abi::write(1, data.as_ptr() as *const u8, data.len()) } + + if len < 0 { + Err(io::Error::new(io::ErrorKind::Other, "Stdout is not able to print")) + } else { + Ok(len as usize) + } + } + + #[inline] + fn is_write_vectored(&self) -> bool { + true + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +impl Stderr { + pub fn new() -> io::Result<Stderr> { + Ok(Stderr) + } +} + +impl io::Write for Stderr { + fn write(&mut self, data: &[u8]) -> io::Result<usize> { + let len; + + unsafe { len = abi::write(2, data.as_ptr() as *const u8, data.len()) } + + if len < 0 { + Err(io::Error::new(io::ErrorKind::Other, "Stderr is not able to print")) + } else { + Ok(len as usize) + } + } + + fn write_vectored(&mut self, data: &[IoSlice<'_>]) -> io::Result<usize> { + let len; + + unsafe { len = abi::write(2, data.as_ptr() as *const u8, data.len()) } + + if len < 0 { + Err(io::Error::new(io::ErrorKind::Other, "Stderr is not able to print")) + } else { + Ok(len as usize) + } + } + + #[inline] + fn is_write_vectored(&self) -> bool { + true + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +pub const STDIN_BUF_SIZE: usize = 0; + +pub fn is_ebadf(_err: &io::Error) -> bool { + true +} + +pub fn panic_output() -> Option<impl io::Write> { + Stderr::new().ok() +} diff --git a/library/std/src/sys/hermit/thread.rs b/library/std/src/sys/hermit/thread.rs new file mode 100644 index 00000000000..e11afed6687 --- /dev/null +++ b/library/std/src/sys/hermit/thread.rs @@ -0,0 +1,106 @@ +#![allow(dead_code)] + +use crate::ffi::CStr; +use crate::io; +use crate::mem; +use crate::sys::hermit::abi; +use crate::sys::hermit::fast_thread_local::run_dtors; +use crate::time::Duration; + +pub type Tid = abi::Tid; + +pub struct Thread { + tid: Tid, +} + +unsafe impl Send for Thread {} +unsafe impl Sync for Thread {} + +pub const DEFAULT_MIN_STACK_SIZE: usize = 1 << 20; + +impl Thread { + pub unsafe fn new_with_coreid( + stack: usize, + p: Box<dyn FnOnce()>, + core_id: isize, + ) -> io::Result<Thread> { + let p = Box::into_raw(box p); + let tid = abi::spawn2( + thread_start, + p as usize, + abi::Priority::into(abi::NORMAL_PRIO), + stack, + core_id, + ); + + return if tid == 0 { + // The thread failed to start and as a result p was not consumed. Therefore, it is + // safe to reconstruct the box so that it gets deallocated. + drop(Box::from_raw(p)); + Err(io::Error::new(io::ErrorKind::Other, "Unable to create thread!")) + } else { + Ok(Thread { tid: tid }) + }; + + extern "C" fn thread_start(main: usize) { + unsafe { + // Finally, let's run some code. + Box::from_raw(main as *mut Box<dyn FnOnce()>)(); + + // run all destructors + run_dtors(); + } + } + } + + pub unsafe fn new(stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> { + Thread::new_with_coreid(stack, p, -1 /* = no specific core */) + } + + #[inline] + pub fn yield_now() { + unsafe { + abi::yield_now(); + } + } + + #[inline] + pub fn set_name(_name: &CStr) { + // nope + } + + #[inline] + pub fn sleep(dur: Duration) { + unsafe { + abi::usleep(dur.as_micros() as u64); + } + } + + pub fn join(self) { + unsafe { + let _ = abi::join(self.tid); + } + } + + #[inline] + pub fn id(&self) -> Tid { + self.tid + } + + #[inline] + pub fn into_id(self) -> Tid { + let id = self.tid; + mem::forget(self); + id + } +} + +pub mod guard { + pub type Guard = !; + pub unsafe fn current() -> Option<Guard> { + None + } + pub unsafe fn init() -> Option<Guard> { + None + } +} diff --git a/library/std/src/sys/hermit/thread_local_dtor.rs b/library/std/src/sys/hermit/thread_local_dtor.rs new file mode 100644 index 00000000000..9b683fce157 --- /dev/null +++ b/library/std/src/sys/hermit/thread_local_dtor.rs @@ -0,0 +1,36 @@ +#![cfg(target_thread_local)] +#![unstable(feature = "thread_local_internals", issue = "none")] + +// Simplify dtor registration by using a list of destructors. +// The this solution works like the implementation of macOS and +// doesn't additional OS support + +use crate::cell::Cell; +use crate::ptr; + +#[thread_local] +static DTORS: Cell<*mut List> = Cell::new(ptr::null_mut()); + +type List = Vec<(*mut u8, unsafe extern "C" fn(*mut u8))>; + +pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) { + if DTORS.get().is_null() { + let v: Box<List> = box Vec::new(); + DTORS.set(Box::into_raw(v)); + } + + let list: &mut List = &mut *DTORS.get(); + list.push((t, dtor)); +} + +// every thread call this function to run through all possible destructors +pub unsafe fn run_dtors() { + let mut ptr = DTORS.replace(ptr::null_mut()); + while !ptr.is_null() { + let list = Box::from_raw(ptr); + for (ptr, dtor) in list.into_iter() { + dtor(ptr); + } + ptr = DTORS.replace(ptr::null_mut()); + } +} diff --git a/library/std/src/sys/hermit/thread_local_key.rs b/library/std/src/sys/hermit/thread_local_key.rs new file mode 100644 index 00000000000..bf1b49eb83b --- /dev/null +++ b/library/std/src/sys/hermit/thread_local_key.rs @@ -0,0 +1,26 @@ +pub type Key = usize; + +#[inline] +pub unsafe fn create(_dtor: Option<unsafe extern "C" fn(*mut u8)>) -> Key { + panic!("should not be used on the hermit target"); +} + +#[inline] +pub unsafe fn set(_key: Key, _value: *mut u8) { + panic!("should not be used on the hermit target"); +} + +#[inline] +pub unsafe fn get(_key: Key) -> *mut u8 { + panic!("should not be used on the hermit target"); +} + +#[inline] +pub unsafe fn destroy(_key: Key) { + panic!("should not be used on the hermit target"); +} + +#[inline] +pub fn requires_synchronized_create() -> bool { + panic!("should not be used on the hermit target"); +} diff --git a/library/std/src/sys/hermit/time.rs b/library/std/src/sys/hermit/time.rs new file mode 100644 index 00000000000..c02de17c1fc --- /dev/null +++ b/library/std/src/sys/hermit/time.rs @@ -0,0 +1,165 @@ +#![allow(dead_code)] + +use crate::cmp::Ordering; +use crate::convert::TryInto; +use crate::sys::hermit::abi; +use crate::sys::hermit::abi::timespec; +use crate::sys::hermit::abi::{CLOCK_MONOTONIC, CLOCK_REALTIME, NSEC_PER_SEC}; +use crate::time::Duration; +use core::hash::{Hash, Hasher}; + +#[derive(Copy, Clone, Debug)] +struct Timespec { + t: timespec, +} + +impl Timespec { + const fn zero() -> Timespec { + Timespec { t: timespec { tv_sec: 0, tv_nsec: 0 } } + } + + fn sub_timespec(&self, other: &Timespec) -> Result<Duration, Duration> { + if self >= other { + Ok(if self.t.tv_nsec >= other.t.tv_nsec { + Duration::new( + (self.t.tv_sec - other.t.tv_sec) as u64, + (self.t.tv_nsec - other.t.tv_nsec) as u32, + ) + } else { + Duration::new( + (self.t.tv_sec - 1 - other.t.tv_sec) as u64, + self.t.tv_nsec as u32 + (NSEC_PER_SEC as u32) - other.t.tv_nsec as u32, + ) + }) + } else { + match other.sub_timespec(self) { + Ok(d) => Err(d), + Err(d) => Ok(d), + } + } + } + + fn checked_add_duration(&self, other: &Duration) -> Option<Timespec> { + let mut secs = other + .as_secs() + .try_into() // <- target type would be `libc::time_t` + .ok() + .and_then(|secs| self.t.tv_sec.checked_add(secs))?; + + // Nano calculations can't overflow because nanos are <1B which fit + // in a u32. + let mut nsec = other.subsec_nanos() + self.t.tv_nsec as u32; + if nsec >= NSEC_PER_SEC as u32 { + nsec -= NSEC_PER_SEC as u32; + secs = secs.checked_add(1)?; + } + Some(Timespec { t: timespec { tv_sec: secs, tv_nsec: nsec as _ } }) + } + + fn checked_sub_duration(&self, other: &Duration) -> Option<Timespec> { + let mut secs = other + .as_secs() + .try_into() // <- target type would be `libc::time_t` + .ok() + .and_then(|secs| self.t.tv_sec.checked_sub(secs))?; + + // Similar to above, nanos can't overflow. + let mut nsec = self.t.tv_nsec as i32 - other.subsec_nanos() as i32; + if nsec < 0 { + nsec += NSEC_PER_SEC as i32; + secs = secs.checked_sub(1)?; + } + Some(Timespec { t: timespec { tv_sec: secs, tv_nsec: nsec as _ } }) + } +} + +impl PartialEq for Timespec { + fn eq(&self, other: &Timespec) -> bool { + self.t.tv_sec == other.t.tv_sec && self.t.tv_nsec == other.t.tv_nsec + } +} + +impl Eq for Timespec {} + +impl PartialOrd for Timespec { + fn partial_cmp(&self, other: &Timespec) -> Option<Ordering> { + Some(self.cmp(other)) + } +} + +impl Ord for Timespec { + fn cmp(&self, other: &Timespec) -> Ordering { + let me = (self.t.tv_sec, self.t.tv_nsec); + let other = (other.t.tv_sec, other.t.tv_nsec); + me.cmp(&other) + } +} + +impl Hash for Timespec { + fn hash<H: Hasher>(&self, state: &mut H) { + self.t.tv_sec.hash(state); + self.t.tv_nsec.hash(state); + } +} + +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +pub struct Instant { + t: Timespec, +} + +impl Instant { + pub fn now() -> Instant { + let mut time: Timespec = Timespec::zero(); + let _ = unsafe { abi::clock_gettime(CLOCK_MONOTONIC, &mut time.t as *mut timespec) }; + + Instant { t: time } + } + + pub const fn zero() -> Instant { + Instant { t: Timespec::zero() } + } + + pub fn actually_monotonic() -> bool { + true + } + + pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> { + self.t.sub_timespec(&other.t).ok() + } + + pub fn checked_add_duration(&self, other: &Duration) -> Option<Instant> { + Some(Instant { t: self.t.checked_add_duration(other)? }) + } + + pub fn checked_sub_duration(&self, other: &Duration) -> Option<Instant> { + Some(Instant { t: self.t.checked_sub_duration(other)? }) + } +} + +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub struct SystemTime { + t: Timespec, +} + +pub const UNIX_EPOCH: SystemTime = SystemTime { t: Timespec::zero() }; + +impl SystemTime { + pub fn now() -> SystemTime { + let mut time: Timespec = Timespec::zero(); + let _ = unsafe { abi::clock_gettime(CLOCK_REALTIME, &mut time.t as *mut timespec) }; + + SystemTime { t: time } + } + + pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> { + self.t.sub_timespec(&other.t) + } + + pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> { + Some(SystemTime { t: self.t.checked_add_duration(other)? }) + } + + pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> { + Some(SystemTime { t: self.t.checked_sub_duration(other)? }) + } +} diff --git a/library/std/src/sys/mod.rs b/library/std/src/sys/mod.rs new file mode 100644 index 00000000000..7b5fac922d0 --- /dev/null +++ b/library/std/src/sys/mod.rs @@ -0,0 +1,112 @@ +//! Platform-dependent platform abstraction. +//! +//! The `std::sys` module is the abstracted interface through which +//! `std` talks to the underlying operating system. It has different +//! implementations for different operating system families, today +//! just Unix and Windows, and initial support for Redox. +//! +//! The centralization of platform-specific code in this module is +//! enforced by the "platform abstraction layer" tidy script in +//! `tools/tidy/src/pal.rs`. +//! +//! This module is closely related to the platform-independent system +//! integration code in `std::sys_common`. See that module's +//! documentation for details. +//! +//! In the future it would be desirable for the independent +//! implementations of this module to be extracted to their own crates +//! that `std` can link to, thus enabling their implementation +//! out-of-tree via crate replacement. Though due to the complex +//! inter-dependencies within `std` that will be a challenging goal to +//! achieve. + +#![allow(missing_debug_implementations)] + +cfg_if::cfg_if! { + if #[cfg(target_os = "vxworks")] { + mod vxworks; + pub use self::vxworks::*; + } else if #[cfg(unix)] { + mod unix; + pub use self::unix::*; + } else if #[cfg(windows)] { + mod windows; + pub use self::windows::*; + } else if #[cfg(target_os = "cloudabi")] { + mod cloudabi; + pub use self::cloudabi::*; + } else if #[cfg(target_os = "hermit")] { + mod hermit; + pub use self::hermit::*; + } else if #[cfg(target_os = "wasi")] { + mod wasi; + pub use self::wasi::*; + } else if #[cfg(target_arch = "wasm32")] { + mod wasm; + pub use self::wasm::*; + } else if #[cfg(all(target_vendor = "fortanix", target_env = "sgx"))] { + mod sgx; + pub use self::sgx::*; + } else { + mod unsupported; + pub use self::unsupported::*; + } +} + +// Import essential modules from both platforms when documenting. These are +// then later used in the `std::os` module when documenting, for example, +// Windows when we're compiling for Linux. + +#[cfg(doc)] +cfg_if::cfg_if! { + if #[cfg(unix)] { + // On unix we'll document what's already available + #[stable(feature = "rust1", since = "1.0.0")] + pub use self::ext as unix_ext; + } else if #[cfg(any(target_os = "cloudabi", + target_os = "hermit", + target_arch = "wasm32", + all(target_vendor = "fortanix", target_env = "sgx")))] { + // On CloudABI and wasm right now the module below doesn't compile + // (missing things in `libc` which is empty) so just omit everything + // with an empty module + #[unstable(issue = "none", feature = "std_internals")] + #[allow(missing_docs)] + pub mod unix_ext {} + } else { + // On other platforms like Windows document the bare bones of unix + use crate::os::linux as platform; + #[path = "unix/ext/mod.rs"] + pub mod unix_ext; + } +} + +#[cfg(doc)] +cfg_if::cfg_if! { + if #[cfg(windows)] { + // On windows we'll just be documenting what's already available + #[allow(missing_docs)] + #[stable(feature = "rust1", since = "1.0.0")] + pub use self::ext as windows_ext; + } else if #[cfg(any(target_os = "cloudabi", + target_arch = "wasm32", + all(target_vendor = "fortanix", target_env = "sgx")))] { + // On CloudABI and wasm right now the shim below doesn't compile, so + // just omit it + #[unstable(issue = "none", feature = "std_internals")] + #[allow(missing_docs)] + pub mod windows_ext {} + } else { + // On all other platforms (aka linux/osx/etc) then pull in a "minimal" + // amount of windows goop which ends up compiling + #[macro_use] + #[path = "windows/compat.rs"] + mod compat; + + #[path = "windows/c.rs"] + mod c; + + #[path = "windows/ext/mod.rs"] + pub mod windows_ext; + } +} diff --git a/library/std/src/sys/sgx/abi/entry.S b/library/std/src/sys/sgx/abi/entry.S new file mode 100644 index 00000000000..f61bcf06f08 --- /dev/null +++ b/library/std/src/sys/sgx/abi/entry.S @@ -0,0 +1,372 @@ +/* This symbol is used at runtime to figure out the virtual address that the */ +/* enclave is loaded at. */ +.section absolute +.global IMAGE_BASE +IMAGE_BASE: + +.section ".note.x86_64-fortanix-unknown-sgx", "", @note + .align 4 + .long 1f - 0f /* name length (not including padding) */ + .long 3f - 2f /* desc length (not including padding) */ + .long 1 /* type = NT_VERSION */ +0: .asciz "toolchain-version" /* name */ +1: .align 4 +2: .long 1 /* desc - toolchain version number, 32-bit LE */ +3: .align 4 + +.section .rodata +/* The XSAVE area needs to be a large chunk of readable memory, but since we are */ +/* going to restore everything to its initial state (XSTATE_BV=0), only certain */ +/* parts need to have a defined value. In particular: */ +/* */ +/* * MXCSR in the legacy area. This register is always restored if RFBM[1] or */ +/* RFBM[2] is set, regardless of the value of XSTATE_BV */ +/* * XSAVE header */ +.align 64 +.Lxsave_clear: +.org .+24 +.Lxsave_mxcsr: + .short 0x1f80 + +/* We can store a bunch of data in the gap between MXCSR and the XSAVE header */ + +/* The following symbols point at read-only data that will be filled in by the */ +/* post-linker. */ + +/* When using this macro, don't forget to adjust the linker version script! */ +.macro globvar name:req size:req + .global \name + .protected \name + .align \size + .size \name , \size + \name : + .org .+\size +.endm + /* The base address (relative to enclave start) of the heap area */ + globvar HEAP_BASE 8 + /* The heap size in bytes */ + globvar HEAP_SIZE 8 + /* Value of the RELA entry in the dynamic table */ + globvar RELA 8 + /* Value of the RELACOUNT entry in the dynamic table */ + globvar RELACOUNT 8 + /* The enclave size in bytes */ + globvar ENCLAVE_SIZE 8 + /* The base address (relative to enclave start) of the enclave configuration area */ + globvar CFGDATA_BASE 8 + /* Non-zero if debugging is enabled, zero otherwise */ + globvar DEBUG 1 + /* The base address (relative to enclave start) of the enclave text section */ + globvar TEXT_BASE 8 + /* The size in bytes of enclacve text section */ + globvar TEXT_SIZE 8 + /* The base address (relative to enclave start) of the enclave .eh_frame_hdr section */ + globvar EH_FRM_HDR_OFFSET 8 + /* The size in bytes of enclave .eh_frame_hdr section */ + globvar EH_FRM_HDR_LEN 8 + /* The base address (relative to enclave start) of the enclave .eh_frame section */ + globvar EH_FRM_OFFSET 8 + /* The size in bytes of enclacve .eh_frame section */ + globvar EH_FRM_LEN 8 + +.org .Lxsave_clear+512 +.Lxsave_header: + .int 0, 0 /* XSTATE_BV */ + .int 0, 0 /* XCOMP_BV */ + .org .+48 /* reserved bits */ + +.data +.Laborted: + .byte 0 + +/* TCS local storage section */ +.equ tcsls_tos, 0x00 /* initialized by loader to *offset* from image base to TOS */ +.equ tcsls_flags, 0x08 /* initialized by loader */ +.equ tcsls_flag_secondary, 0 /* initialized by loader; 0 = standard TCS, 1 = secondary TCS */ +.equ tcsls_flag_init_once, 1 /* initialized by loader to 0 */ +/* 14 unused bits */ +.equ tcsls_user_fcw, 0x0a +.equ tcsls_user_mxcsr, 0x0c +.equ tcsls_last_rsp, 0x10 /* initialized by loader to 0 */ +.equ tcsls_panic_last_rsp, 0x18 /* initialized by loader to 0 */ +.equ tcsls_debug_panic_buf_ptr, 0x20 /* initialized by loader to 0 */ +.equ tcsls_user_rsp, 0x28 +.equ tcsls_user_retip, 0x30 +.equ tcsls_user_rbp, 0x38 +.equ tcsls_user_r12, 0x40 +.equ tcsls_user_r13, 0x48 +.equ tcsls_user_r14, 0x50 +.equ tcsls_user_r15, 0x58 +.equ tcsls_tls_ptr, 0x60 +.equ tcsls_tcs_addr, 0x68 + +.macro load_tcsls_flag_secondary_bool reg:req comments:vararg + .ifne tcsls_flag_secondary /* to convert to a bool, must be the first bit */ + .abort + .endif + mov $(1<<tcsls_flag_secondary),%e\reg + and %gs:tcsls_flags,%\reg +.endm + +/* We place the ELF entry point in a separate section so it can be removed by + elf2sgxs */ +.section .text_no_sgx, "ax" +.Lelf_entry_error_msg: + .ascii "Error: This file is an SGX enclave which cannot be executed as a standard Linux binary.\nSee the installation guide at https://edp.fortanix.com/docs/installation/guide/ on how to use 'cargo run' or follow the steps at https://edp.fortanix.com/docs/tasks/deployment/ for manual deployment.\n" +.Lelf_entry_error_msg_end: + +.global elf_entry +.type elf_entry,function +elf_entry: +/* print error message */ + movq $2,%rdi /* write to stderr (fd 2) */ + lea .Lelf_entry_error_msg(%rip),%rsi + movq $.Lelf_entry_error_msg_end-.Lelf_entry_error_msg,%rdx +.Lelf_entry_call: + movq $1,%rax /* write() syscall */ + syscall + test %rax,%rax + jle .Lelf_exit /* exit on error */ + add %rax,%rsi + sub %rax,%rdx /* all chars written? */ + jnz .Lelf_entry_call + +.Lelf_exit: + movq $60,%rax /* exit() syscall */ + movq $1,%rdi /* exit code 1 */ + syscall + ud2 /* should not be reached */ +/* end elf_entry */ + +/* This code needs to be called *after* the enclave stack has been setup. */ +/* There are 3 places where this needs to happen, so this is put in a macro. */ +.macro entry_sanitize_final +/* Sanitize rflags received from user */ +/* - DF flag: x86-64 ABI requires DF to be unset at function entry/exit */ +/* - AC flag: AEX on misaligned memory accesses leaks side channel info */ + pushfq + andq $~0x40400, (%rsp) + popfq +/* check for abort */ + bt $0,.Laborted(%rip) + jc .Lreentry_panic +.endm + +.text +.global sgx_entry +.type sgx_entry,function +sgx_entry: +/* save user registers */ + mov %rcx,%gs:tcsls_user_retip + mov %rsp,%gs:tcsls_user_rsp + mov %rbp,%gs:tcsls_user_rbp + mov %r12,%gs:tcsls_user_r12 + mov %r13,%gs:tcsls_user_r13 + mov %r14,%gs:tcsls_user_r14 + mov %r15,%gs:tcsls_user_r15 + mov %rbx,%gs:tcsls_tcs_addr + stmxcsr %gs:tcsls_user_mxcsr + fnstcw %gs:tcsls_user_fcw + +/* check for debug buffer pointer */ + testb $0xff,DEBUG(%rip) + jz .Lskip_debug_init + mov %r10,%gs:tcsls_debug_panic_buf_ptr +.Lskip_debug_init: +/* reset cpu state */ + mov %rdx, %r10 + mov $-1, %rax + mov $-1, %rdx + xrstor .Lxsave_clear(%rip) + mov %r10, %rdx + +/* check if returning from usercall */ + mov %gs:tcsls_last_rsp,%r11 + test %r11,%r11 + jnz .Lusercall_ret +/* setup stack */ + mov %gs:tcsls_tos,%rsp /* initially, RSP is not set to the correct value */ + /* here. This is fixed below under "adjust stack". */ +/* check for thread init */ + bts $tcsls_flag_init_once,%gs:tcsls_flags + jc .Lskip_init +/* adjust stack */ + lea IMAGE_BASE(%rip),%rax + add %rax,%rsp + mov %rsp,%gs:tcsls_tos + entry_sanitize_final +/* call tcs_init */ +/* store caller-saved registers in callee-saved registers */ + mov %rdi,%rbx + mov %rsi,%r12 + mov %rdx,%r13 + mov %r8,%r14 + mov %r9,%r15 + load_tcsls_flag_secondary_bool di /* RDI = tcs_init() argument: secondary: bool */ + call tcs_init +/* reload caller-saved registers */ + mov %rbx,%rdi + mov %r12,%rsi + mov %r13,%rdx + mov %r14,%r8 + mov %r15,%r9 + jmp .Lafter_init +.Lskip_init: + entry_sanitize_final +.Lafter_init: +/* call into main entry point */ + load_tcsls_flag_secondary_bool cx /* RCX = entry() argument: secondary: bool */ + call entry /* RDI, RSI, RDX, R8, R9 passed in from userspace */ + mov %rax,%rsi /* RSI = return value */ + /* NOP: mov %rdx,%rdx */ /* RDX = return value */ + xor %rdi,%rdi /* RDI = normal exit */ +.Lexit: +/* clear general purpose register state */ + /* RAX overwritten by ENCLU */ + /* RBX set later */ + /* RCX overwritten by ENCLU */ + /* RDX contains return value */ + /* RSP set later */ + /* RBP set later */ + /* RDI contains exit mode */ + /* RSI contains return value */ + xor %r8,%r8 + xor %r9,%r9 + xor %r10,%r10 + xor %r11,%r11 + /* R12 ~ R15 set by sgx_exit */ +.Lsgx_exit: +/* clear extended register state */ + mov %rdx, %rcx /* save RDX */ + mov $-1, %rax + mov %rax, %rdx + xrstor .Lxsave_clear(%rip) + mov %rcx, %rdx /* restore RDX */ +/* clear flags */ + pushq $0 + popfq +/* restore user registers */ + mov %gs:tcsls_user_r12,%r12 + mov %gs:tcsls_user_r13,%r13 + mov %gs:tcsls_user_r14,%r14 + mov %gs:tcsls_user_r15,%r15 + mov %gs:tcsls_user_retip,%rbx + mov %gs:tcsls_user_rsp,%rsp + mov %gs:tcsls_user_rbp,%rbp + fldcw %gs:tcsls_user_fcw + ldmxcsr %gs:tcsls_user_mxcsr +/* exit enclave */ + mov $0x4,%eax /* EEXIT */ + enclu +/* end sgx_entry */ + +.Lreentry_panic: + orq $8,%rsp + jmp abort_reentry + +/* This *MUST* be called with 6 parameters, otherwise register information */ +/* might leak! */ +.global usercall +usercall: + test %rcx,%rcx /* check `abort` function argument */ + jnz .Lusercall_abort /* abort is set, jump to abort code (unlikely forward conditional) */ + jmp .Lusercall_save_state /* non-aborting usercall */ +.Lusercall_abort: +/* set aborted bit */ + movb $1,.Laborted(%rip) +/* save registers in DEBUG mode, so that debugger can reconstruct the stack */ + testb $0xff,DEBUG(%rip) + jz .Lusercall_noreturn +.Lusercall_save_state: +/* save callee-saved state */ + push %r15 + push %r14 + push %r13 + push %r12 + push %rbp + push %rbx + sub $8, %rsp + fstcw 4(%rsp) + stmxcsr (%rsp) + movq %rsp,%gs:tcsls_last_rsp +.Lusercall_noreturn: +/* clear general purpose register state */ + /* RAX overwritten by ENCLU */ + /* RBX set by sgx_exit */ + /* RCX overwritten by ENCLU */ + /* RDX contains parameter */ + /* RSP set by sgx_exit */ + /* RBP set by sgx_exit */ + /* RDI contains parameter */ + /* RSI contains parameter */ + /* R8 contains parameter */ + /* R9 contains parameter */ + xor %r10,%r10 + xor %r11,%r11 + /* R12 ~ R15 set by sgx_exit */ +/* extended registers/flags cleared by sgx_exit */ +/* exit */ + jmp .Lsgx_exit +.Lusercall_ret: + movq $0,%gs:tcsls_last_rsp +/* restore callee-saved state, cf. "save" above */ + mov %r11,%rsp + ldmxcsr (%rsp) + fldcw 4(%rsp) + add $8, %rsp + entry_sanitize_final + pop %rbx + pop %rbp + pop %r12 + pop %r13 + pop %r14 + pop %r15 +/* return */ + mov %rsi,%rax /* RAX = return value */ + /* NOP: mov %rdx,%rdx */ /* RDX = return value */ + pop %r11 + lfence + jmp *%r11 + +/* +The following functions need to be defined externally: +``` +// Called by entry code on re-entry after exit +extern "C" fn abort_reentry() -> !; + +// Called once when a TCS is first entered +extern "C" fn tcs_init(secondary: bool); + +// Standard TCS entrypoint +extern "C" fn entry(p1: u64, p2: u64, p3: u64, secondary: bool, p4: u64, p5: u64) -> (u64, u64); +``` +*/ + +.global get_tcs_addr +get_tcs_addr: + mov %gs:tcsls_tcs_addr,%rax + pop %r11 + lfence + jmp *%r11 + +.global get_tls_ptr +get_tls_ptr: + mov %gs:tcsls_tls_ptr,%rax + pop %r11 + lfence + jmp *%r11 + +.global set_tls_ptr +set_tls_ptr: + mov %rdi,%gs:tcsls_tls_ptr + pop %r11 + lfence + jmp *%r11 + +.global take_debug_panic_buf_ptr +take_debug_panic_buf_ptr: + xor %rax,%rax + xchg %gs:tcsls_debug_panic_buf_ptr,%rax + pop %r11 + lfence + jmp *%r11 diff --git a/library/std/src/sys/sgx/abi/mem.rs b/library/std/src/sys/sgx/abi/mem.rs new file mode 100644 index 00000000000..57fd7efdd49 --- /dev/null +++ b/library/std/src/sys/sgx/abi/mem.rs @@ -0,0 +1,47 @@ +// Do not remove inline: will result in relocation failure +#[inline(always)] +pub(crate) unsafe fn rel_ptr<T>(offset: u64) -> *const T { + (image_base() + offset) as *const T +} + +// Do not remove inline: will result in relocation failure +#[inline(always)] +pub(crate) unsafe fn rel_ptr_mut<T>(offset: u64) -> *mut T { + (image_base() + offset) as *mut T +} + +extern "C" { + static ENCLAVE_SIZE: usize; +} + +// Do not remove inline: will result in relocation failure +// For the same reason we use inline ASM here instead of an extern static to +// locate the base +/// Returns address at which current enclave is loaded. +#[inline(always)] +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn image_base() -> u64 { + let base; + unsafe { llvm_asm!("lea IMAGE_BASE(%rip),$0":"=r"(base)) }; + base +} + +/// Returns `true` if the specified memory range is in the enclave. +/// +/// `p + len` must not overflow. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn is_enclave_range(p: *const u8, len: usize) -> bool { + let start = p as u64; + let end = start + (len as u64); + start >= image_base() && end <= image_base() + (unsafe { ENCLAVE_SIZE } as u64) // unsafe ok: link-time constant +} + +/// Returns `true` if the specified memory range is in userspace. +/// +/// `p + len` must not overflow. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn is_user_range(p: *const u8, len: usize) -> bool { + let start = p as u64; + let end = start + (len as u64); + end <= image_base() || start >= image_base() + (unsafe { ENCLAVE_SIZE } as u64) // unsafe ok: link-time constant +} diff --git a/library/std/src/sys/sgx/abi/mod.rs b/library/std/src/sys/sgx/abi/mod.rs new file mode 100644 index 00000000000..b0693b63a48 --- /dev/null +++ b/library/std/src/sys/sgx/abi/mod.rs @@ -0,0 +1,105 @@ +#![cfg_attr(test, allow(unused))] // RT initialization logic is not compiled for test + +use crate::io::Write; +use core::sync::atomic::{AtomicUsize, Ordering}; + +// runtime features +pub(super) mod panic; +mod reloc; + +// library features +pub mod mem; +pub mod thread; +pub mod tls; +#[macro_use] +pub mod usercalls; + +#[cfg(not(test))] +global_asm!(include_str!("entry.S")); + +#[repr(C)] +struct EntryReturn(u64, u64); + +#[cfg(not(test))] +#[no_mangle] +unsafe extern "C" fn tcs_init(secondary: bool) { + // Be very careful when changing this code: it runs before the binary has been + // relocated. Any indirect accesses to symbols will likely fail. + const UNINIT: usize = 0; + const BUSY: usize = 1; + const DONE: usize = 2; + // Three-state spin-lock + static RELOC_STATE: AtomicUsize = AtomicUsize::new(UNINIT); + + if secondary && RELOC_STATE.load(Ordering::Relaxed) != DONE { + rtabort!("Entered secondary TCS before main TCS!") + } + + // Try to atomically swap UNINIT with BUSY. The returned state can be: + match RELOC_STATE.compare_and_swap(UNINIT, BUSY, Ordering::Acquire) { + // This thread just obtained the lock and other threads will observe BUSY + UNINIT => { + reloc::relocate_elf_rela(); + RELOC_STATE.store(DONE, Ordering::Release); + } + // We need to wait until the initialization is done. + BUSY => { + while RELOC_STATE.load(Ordering::Acquire) == BUSY { + core::arch::x86_64::_mm_pause() + } + } + // Initialization is done. + DONE => {} + _ => unreachable!(), + } +} + +// FIXME: this item should only exist if this is linked into an executable +// (main function exists). If this is a library, the crate author should be +// able to specify this +#[cfg(not(test))] +#[no_mangle] +extern "C" fn entry(p1: u64, p2: u64, p3: u64, secondary: bool, p4: u64, p5: u64) -> EntryReturn { + // FIXME: how to support TLS in library mode? + let tls = Box::new(tls::Tls::new()); + let _tls_guard = unsafe { tls.activate() }; + + if secondary { + super::thread::Thread::entry(); + + EntryReturn(0, 0) + } else { + extern "C" { + fn main(argc: isize, argv: *const *const u8) -> isize; + } + + // check entry is being called according to ABI + rtassert!(p3 == 0); + rtassert!(p4 == 0); + rtassert!(p5 == 0); + + unsafe { + // The actual types of these arguments are `p1: *const Arg, p2: + // usize`. We can't currently customize the argument list of Rust's + // main function, so we pass these in as the standard pointer-sized + // values in `argc` and `argv`. + let ret = main(p2 as _, p1 as _); + exit_with_code(ret) + } + } +} + +pub(super) fn exit_with_code(code: isize) -> ! { + if code != 0 { + if let Some(mut out) = panic::SgxPanicOutput::new() { + let _ = write!(out, "Exited with status code {}", code); + } + } + usercalls::exit(code != 0); +} + +#[cfg(not(test))] +#[no_mangle] +extern "C" fn abort_reentry() -> ! { + usercalls::exit(false) +} diff --git a/library/std/src/sys/sgx/abi/panic.rs b/library/std/src/sys/sgx/abi/panic.rs new file mode 100644 index 00000000000..229b3b3291f --- /dev/null +++ b/library/std/src/sys/sgx/abi/panic.rs @@ -0,0 +1,42 @@ +use super::usercalls::alloc::UserRef; +use crate::cmp; +use crate::io::{self, Write}; +use crate::mem; + +extern "C" { + fn take_debug_panic_buf_ptr() -> *mut u8; + static DEBUG: u8; +} + +pub(crate) struct SgxPanicOutput(Option<&'static mut UserRef<[u8]>>); + +fn empty_user_slice() -> &'static mut UserRef<[u8]> { + unsafe { UserRef::from_raw_parts_mut(1 as *mut u8, 0) } +} + +impl SgxPanicOutput { + pub(crate) fn new() -> Option<Self> { + if unsafe { DEBUG == 0 } { None } else { Some(SgxPanicOutput(None)) } + } + + fn init(&mut self) -> &mut &'static mut UserRef<[u8]> { + self.0.get_or_insert_with(|| unsafe { + let ptr = take_debug_panic_buf_ptr(); + if ptr.is_null() { empty_user_slice() } else { UserRef::from_raw_parts_mut(ptr, 1024) } + }) + } +} + +impl Write for SgxPanicOutput { + fn write(&mut self, src: &[u8]) -> io::Result<usize> { + let dst = mem::replace(self.init(), empty_user_slice()); + let written = cmp::min(src.len(), dst.len()); + dst[..written].copy_from_enclave(&src[..written]); + self.0 = Some(&mut dst[written..]); + Ok(written) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} diff --git a/library/std/src/sys/sgx/abi/reloc.rs b/library/std/src/sys/sgx/abi/reloc.rs new file mode 100644 index 00000000000..02dff0ad29f --- /dev/null +++ b/library/std/src/sys/sgx/abi/reloc.rs @@ -0,0 +1,32 @@ +use super::mem; +use crate::slice::from_raw_parts; + +const R_X86_64_RELATIVE: u32 = 8; + +#[repr(packed)] +struct Rela<T> { + offset: T, + info: T, + addend: T, +} + +pub fn relocate_elf_rela() { + extern "C" { + static RELA: u64; + static RELACOUNT: usize; + } + + if unsafe { RELACOUNT } == 0 { + return; + } // unsafe ok: link-time constant + + let relas = unsafe { + from_raw_parts::<Rela<u64>>(mem::rel_ptr(RELA), RELACOUNT) // unsafe ok: link-time constant + }; + for rela in relas { + if rela.info != (/*0 << 32 |*/R_X86_64_RELATIVE as u64) { + rtabort!("Invalid relocation"); + } + unsafe { *mem::rel_ptr_mut::<*const ()>(rela.offset) = mem::rel_ptr(rela.addend) }; + } +} diff --git a/library/std/src/sys/sgx/abi/thread.rs b/library/std/src/sys/sgx/abi/thread.rs new file mode 100644 index 00000000000..ef55b821a2b --- /dev/null +++ b/library/std/src/sys/sgx/abi/thread.rs @@ -0,0 +1,13 @@ +use fortanix_sgx_abi::Tcs; + +/// Gets the ID for the current thread. The ID is guaranteed to be unique among +/// all currently running threads in the enclave, and it is guaranteed to be +/// constant for the lifetime of the thread. More specifically for SGX, there +/// is a one-to-one correspondence of the ID to the address of the TCS. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn current() -> Tcs { + extern "C" { + fn get_tcs_addr() -> Tcs; + } + unsafe { get_tcs_addr() } +} diff --git a/library/std/src/sys/sgx/abi/tls.rs b/library/std/src/sys/sgx/abi/tls.rs new file mode 100644 index 00000000000..2b0485c4f03 --- /dev/null +++ b/library/std/src/sys/sgx/abi/tls.rs @@ -0,0 +1,241 @@ +use self::sync_bitset::*; +use crate::cell::Cell; +use crate::mem; +use crate::num::NonZeroUsize; +use crate::ptr; +use crate::sync::atomic::{AtomicUsize, Ordering}; + +#[cfg(target_pointer_width = "64")] +const USIZE_BITS: usize = 64; +const TLS_KEYS: usize = 128; // Same as POSIX minimum +const TLS_KEYS_BITSET_SIZE: usize = (TLS_KEYS + (USIZE_BITS - 1)) / USIZE_BITS; + +#[cfg_attr(test, linkage = "available_externally")] +#[export_name = "_ZN16__rust_internals3std3sys3sgx3abi3tls14TLS_KEY_IN_USEE"] +static TLS_KEY_IN_USE: SyncBitset = SYNC_BITSET_INIT; +macro_rules! dup { + ((* $($exp:tt)*) $($val:tt)*) => (dup!( ($($exp)*) $($val)* $($val)* )); + (() $($val:tt)*) => ([$($val),*]) +} +#[cfg_attr(test, linkage = "available_externally")] +#[export_name = "_ZN16__rust_internals3std3sys3sgx3abi3tls14TLS_DESTRUCTORE"] +static TLS_DESTRUCTOR: [AtomicUsize; TLS_KEYS] = dup!((* * * * * * *) (AtomicUsize::new(0))); + +extern "C" { + fn get_tls_ptr() -> *const u8; + fn set_tls_ptr(tls: *const u8); +} + +#[derive(Copy, Clone)] +#[repr(C)] +pub struct Key(NonZeroUsize); + +impl Key { + fn to_index(self) -> usize { + self.0.get() - 1 + } + + fn from_index(index: usize) -> Self { + Key(NonZeroUsize::new(index + 1).unwrap()) + } + + pub fn as_usize(self) -> usize { + self.0.get() + } + + pub fn from_usize(index: usize) -> Self { + Key(NonZeroUsize::new(index).unwrap()) + } +} + +#[repr(C)] +pub struct Tls { + data: [Cell<*mut u8>; TLS_KEYS], +} + +pub struct ActiveTls<'a> { + tls: &'a Tls, +} + +impl<'a> Drop for ActiveTls<'a> { + fn drop(&mut self) { + let value_with_destructor = |key: usize| { + let ptr = TLS_DESTRUCTOR[key].load(Ordering::Relaxed); + unsafe { mem::transmute::<_, Option<unsafe extern "C" fn(*mut u8)>>(ptr) } + .map(|dtor| (&self.tls.data[key], dtor)) + }; + + let mut any_non_null_dtor = true; + while any_non_null_dtor { + any_non_null_dtor = false; + for (value, dtor) in TLS_KEY_IN_USE.iter().filter_map(&value_with_destructor) { + let value = value.replace(ptr::null_mut()); + if !value.is_null() { + any_non_null_dtor = true; + unsafe { dtor(value) } + } + } + } + } +} + +impl Tls { + pub fn new() -> Tls { + Tls { data: dup!((* * * * * * *) (Cell::new(ptr::null_mut()))) } + } + + pub unsafe fn activate(&self) -> ActiveTls<'_> { + set_tls_ptr(self as *const Tls as _); + ActiveTls { tls: self } + } + + #[allow(unused)] + pub unsafe fn activate_persistent(self: Box<Self>) { + set_tls_ptr((&*self) as *const Tls as _); + mem::forget(self); + } + + unsafe fn current<'a>() -> &'a Tls { + &*(get_tls_ptr() as *const Tls) + } + + pub fn create(dtor: Option<unsafe extern "C" fn(*mut u8)>) -> Key { + let index = if let Some(index) = TLS_KEY_IN_USE.set() { + index + } else { + rtabort!("TLS limit exceeded") + }; + TLS_DESTRUCTOR[index].store(dtor.map_or(0, |f| f as usize), Ordering::Relaxed); + Key::from_index(index) + } + + pub fn set(key: Key, value: *mut u8) { + let index = key.to_index(); + rtassert!(TLS_KEY_IN_USE.get(index)); + unsafe { Self::current() }.data[index].set(value); + } + + pub fn get(key: Key) -> *mut u8 { + let index = key.to_index(); + rtassert!(TLS_KEY_IN_USE.get(index)); + unsafe { Self::current() }.data[index].get() + } + + pub fn destroy(key: Key) { + TLS_KEY_IN_USE.clear(key.to_index()); + } +} + +mod sync_bitset { + use super::{TLS_KEYS_BITSET_SIZE, USIZE_BITS}; + use crate::iter::{Enumerate, Peekable}; + use crate::slice::Iter; + use crate::sync::atomic::{AtomicUsize, Ordering}; + + /// A bitset that can be used synchronously. + pub(super) struct SyncBitset([AtomicUsize; TLS_KEYS_BITSET_SIZE]); + + pub(super) const SYNC_BITSET_INIT: SyncBitset = + SyncBitset([AtomicUsize::new(0), AtomicUsize::new(0)]); + + impl SyncBitset { + pub fn get(&self, index: usize) -> bool { + let (hi, lo) = Self::split(index); + (self.0[hi].load(Ordering::Relaxed) & lo) != 0 + } + + /// Not atomic. + pub fn iter(&self) -> SyncBitsetIter<'_> { + SyncBitsetIter { iter: self.0.iter().enumerate().peekable(), elem_idx: 0 } + } + + pub fn clear(&self, index: usize) { + let (hi, lo) = Self::split(index); + self.0[hi].fetch_and(!lo, Ordering::Relaxed); + } + + /// Sets any unset bit. Not atomic. Returns `None` if all bits were + /// observed to be set. + pub fn set(&self) -> Option<usize> { + 'elems: for (idx, elem) in self.0.iter().enumerate() { + let mut current = elem.load(Ordering::Relaxed); + loop { + if 0 == !current { + continue 'elems; + } + let trailing_ones = (!current).trailing_zeros() as usize; + match elem.compare_exchange( + current, + current | (1 << trailing_ones), + Ordering::AcqRel, + Ordering::Relaxed, + ) { + Ok(_) => return Some(idx * USIZE_BITS + trailing_ones), + Err(previous) => current = previous, + } + } + } + None + } + + fn split(index: usize) -> (usize, usize) { + (index / USIZE_BITS, 1 << (index % USIZE_BITS)) + } + } + + pub(super) struct SyncBitsetIter<'a> { + iter: Peekable<Enumerate<Iter<'a, AtomicUsize>>>, + elem_idx: usize, + } + + impl<'a> Iterator for SyncBitsetIter<'a> { + type Item = usize; + + fn next(&mut self) -> Option<usize> { + self.iter.peek().cloned().and_then(|(idx, elem)| { + let elem = elem.load(Ordering::Relaxed); + let low_mask = (1 << self.elem_idx) - 1; + let next = elem & !low_mask; + let next_idx = next.trailing_zeros() as usize; + self.elem_idx = next_idx + 1; + if self.elem_idx >= 64 { + self.elem_idx = 0; + self.iter.next(); + } + match next_idx { + 64 => self.next(), + _ => Some(idx * USIZE_BITS + next_idx), + } + }) + } + } + + #[cfg(test)] + mod tests { + use super::*; + + fn test_data(bitset: [usize; 2], bit_indices: &[usize]) { + let set = SyncBitset([AtomicUsize::new(bitset[0]), AtomicUsize::new(bitset[1])]); + assert_eq!(set.iter().collect::<Vec<_>>(), bit_indices); + for &i in bit_indices { + assert!(set.get(i)); + } + } + + #[test] + fn iter() { + test_data([0b0110_1001, 0], &[0, 3, 5, 6]); + test_data([0x8000_0000_0000_0000, 0x8000_0000_0000_0001], &[63, 64, 127]); + test_data([0, 0], &[]); + } + + #[test] + fn set_get_clear() { + let set = SYNC_BITSET_INIT; + let key = set.set().unwrap(); + assert!(set.get(key)); + set.clear(key); + assert!(!set.get(key)); + } + } +} diff --git a/library/std/src/sys/sgx/abi/usercalls/alloc.rs b/library/std/src/sys/sgx/abi/usercalls/alloc.rs new file mode 100644 index 00000000000..76a9b427b39 --- /dev/null +++ b/library/std/src/sys/sgx/abi/usercalls/alloc.rs @@ -0,0 +1,603 @@ +#![allow(unused)] + +use crate::cell::UnsafeCell; +use crate::mem; +use crate::ops::{CoerceUnsized, Deref, DerefMut, Index, IndexMut}; +use crate::ptr::{self, NonNull}; +use crate::slice; +use crate::slice::SliceIndex; + +use super::super::mem::is_user_range; +use fortanix_sgx_abi::*; + +/// A type that can be safely read from or written to userspace. +/// +/// Non-exhaustive list of specific requirements for reading and writing: +/// * **Type is `Copy`** (and therefore also not `Drop`). Copies will be +/// created when copying from/to userspace. Destructors will not be called. +/// * **No references or Rust-style owned pointers** (`Vec`, `Arc`, etc.). When +/// reading from userspace, references into enclave memory must not be +/// created. Also, only enclave memory is considered managed by the Rust +/// compiler's static analysis. When reading from userspace, there can be no +/// guarantee that the value correctly adheres to the expectations of the +/// type. When writing to userspace, memory addresses of data in enclave +/// memory must not be leaked for confidentiality reasons. `User` and +/// `UserRef` are also not allowed for the same reasons. +/// * **No fat pointers.** When reading from userspace, the size or vtable +/// pointer could be automatically interpreted and used by the code. When +/// writing to userspace, memory addresses of data in enclave memory (such +/// as vtable pointers) must not be leaked for confidentiality reasons. +/// +/// Non-exhaustive list of specific requirements for reading from userspace: +/// * **Any bit pattern is valid** for this type (no `enum`s). There can be no +/// guarantee that the value correctly adheres to the expectations of the +/// type, so any value must be valid for this type. +/// +/// Non-exhaustive list of specific requirements for writing to userspace: +/// * **No pointers to enclave memory.** Memory addresses of data in enclave +/// memory must not be leaked for confidentiality reasons. +/// * **No internal padding.** Padding might contain previously-initialized +/// secret data stored at that memory location and must not be leaked for +/// confidentiality reasons. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub unsafe trait UserSafeSized: Copy + Sized {} + +#[unstable(feature = "sgx_platform", issue = "56975")] +unsafe impl UserSafeSized for u8 {} +#[unstable(feature = "sgx_platform", issue = "56975")] +unsafe impl<T> UserSafeSized for FifoDescriptor<T> {} +#[unstable(feature = "sgx_platform", issue = "56975")] +unsafe impl UserSafeSized for ByteBuffer {} +#[unstable(feature = "sgx_platform", issue = "56975")] +unsafe impl UserSafeSized for Usercall {} +#[unstable(feature = "sgx_platform", issue = "56975")] +unsafe impl UserSafeSized for Return {} +#[unstable(feature = "sgx_platform", issue = "56975")] +unsafe impl<T: UserSafeSized> UserSafeSized for [T; 2] {} + +/// A type that can be represented in memory as one or more `UserSafeSized`s. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub unsafe trait UserSafe { + /// Equivalent to `mem::align_of::<Self>`. + fn align_of() -> usize; + + /// Construct a pointer to `Self` given a memory range in user space. + /// + /// N.B., this takes a size, not a length! + /// + /// # Safety + /// + /// The caller must ensure the memory range is in user memory, is the + /// correct size and is correctly aligned and points to the right type. + unsafe fn from_raw_sized_unchecked(ptr: *mut u8, size: usize) -> *mut Self; + + /// Construct a pointer to `Self` given a memory range. + /// + /// N.B., this takes a size, not a length! + /// + /// # Safety + /// + /// The caller must ensure the memory range points to the correct type. + /// + /// # Panics + /// + /// This function panics if: + /// + /// * the pointer is not aligned. + /// * the pointer is null. + /// * the pointed-to range does not fit in the address space. + /// * the pointed-to range is not in user memory. + unsafe fn from_raw_sized(ptr: *mut u8, size: usize) -> NonNull<Self> { + assert!(ptr.wrapping_add(size) >= ptr); + let ret = Self::from_raw_sized_unchecked(ptr, size); + Self::check_ptr(ret); + NonNull::new_unchecked(ret as _) + } + + /// Checks if a pointer may point to `Self` in user memory. + /// + /// # Safety + /// + /// The caller must ensure the memory range points to the correct type and + /// length (if this is a slice). + /// + /// # Panics + /// + /// This function panics if: + /// + /// * the pointer is not aligned. + /// * the pointer is null. + /// * the pointed-to range is not in user memory. + unsafe fn check_ptr(ptr: *const Self) { + let is_aligned = |p| -> bool { 0 == (p as usize) & (Self::align_of() - 1) }; + + assert!(is_aligned(ptr as *const u8)); + assert!(is_user_range(ptr as _, mem::size_of_val(&*ptr))); + assert!(!ptr.is_null()); + } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +unsafe impl<T: UserSafeSized> UserSafe for T { + fn align_of() -> usize { + mem::align_of::<T>() + } + + unsafe fn from_raw_sized_unchecked(ptr: *mut u8, size: usize) -> *mut Self { + assert_eq!(size, mem::size_of::<T>()); + ptr as _ + } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +unsafe impl<T: UserSafeSized> UserSafe for [T] { + fn align_of() -> usize { + mem::align_of::<T>() + } + + unsafe fn from_raw_sized_unchecked(ptr: *mut u8, size: usize) -> *mut Self { + let elem_size = mem::size_of::<T>(); + assert_eq!(size % elem_size, 0); + let len = size / elem_size; + slice::from_raw_parts_mut(ptr as _, len) + } +} + +/// A reference to some type in userspace memory. `&UserRef<T>` is equivalent +/// to `&T` in enclave memory. Access to the memory is only allowed by copying +/// to avoid TOCTTOU issues. After copying, code should make sure to completely +/// check the value before use. +/// +/// It is also possible to obtain a mutable reference `&mut UserRef<T>`. Unlike +/// regular mutable references, these are not exclusive. Userspace may always +/// write to the backing memory at any time, so it can't be assumed that there +/// the pointed-to memory is uniquely borrowed. The two different reference types +/// are used solely to indicate intent: a mutable reference is for writing to +/// user memory, an immutable reference for reading from user memory. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub struct UserRef<T: ?Sized>(UnsafeCell<T>); +/// An owned type in userspace memory. `User<T>` is equivalent to `Box<T>` in +/// enclave memory. Access to the memory is only allowed by copying to avoid +/// TOCTTOU issues. The user memory will be freed when the value is dropped. +/// After copying, code should make sure to completely check the value before +/// use. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub struct User<T: UserSafe + ?Sized>(NonNull<UserRef<T>>); + +trait NewUserRef<T: ?Sized> { + unsafe fn new_userref(v: T) -> Self; +} + +impl<T: ?Sized> NewUserRef<*mut T> for NonNull<UserRef<T>> { + unsafe fn new_userref(v: *mut T) -> Self { + NonNull::new_unchecked(v as _) + } +} + +impl<T: ?Sized> NewUserRef<NonNull<T>> for NonNull<UserRef<T>> { + unsafe fn new_userref(v: NonNull<T>) -> Self { + NonNull::new_userref(v.as_ptr()) + } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl<T: ?Sized> User<T> +where + T: UserSafe, +{ + // This function returns memory that is practically uninitialized, but is + // not considered "unspecified" or "undefined" for purposes of an + // optimizing compiler. This is achieved by returning a pointer from + // from outside as obtained by `super::alloc`. + fn new_uninit_bytes(size: usize) -> Self { + unsafe { + // Mustn't call alloc with size 0. + let ptr = if size > 0 { + rtunwrap!(Ok, super::alloc(size, T::align_of())) as _ + } else { + T::align_of() as _ // dangling pointer ok for size 0 + }; + if let Ok(v) = crate::panic::catch_unwind(|| T::from_raw_sized(ptr, size)) { + User(NonNull::new_userref(v)) + } else { + rtabort!("Got invalid pointer from alloc() usercall") + } + } + } + + /// Copies `val` into freshly allocated space in user memory. + pub fn new_from_enclave(val: &T) -> Self { + unsafe { + let ret = Self::new_uninit_bytes(mem::size_of_val(val)); + ptr::copy( + val as *const T as *const u8, + ret.0.as_ptr() as *mut u8, + mem::size_of_val(val), + ); + ret + } + } + + /// Creates an owned `User<T>` from a raw pointer. + /// + /// # Safety + /// The caller must ensure `ptr` points to `T`, is freeable with the `free` + /// usercall and the alignment of `T`, and is uniquely owned. + /// + /// # Panics + /// This function panics if: + /// + /// * The pointer is not aligned + /// * The pointer is null + /// * The pointed-to range is not in user memory + pub unsafe fn from_raw(ptr: *mut T) -> Self { + T::check_ptr(ptr); + User(NonNull::new_userref(ptr)) + } + + /// Converts this value into a raw pointer. The value will no longer be + /// automatically freed. + pub fn into_raw(self) -> *mut T { + let ret = self.0; + mem::forget(self); + ret.as_ptr() as _ + } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl<T> User<T> +where + T: UserSafe, +{ + /// Allocate space for `T` in user memory. + pub fn uninitialized() -> Self { + Self::new_uninit_bytes(mem::size_of::<T>()) + } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl<T> User<[T]> +where + [T]: UserSafe, +{ + /// Allocate space for a `[T]` of `n` elements in user memory. + pub fn uninitialized(n: usize) -> Self { + Self::new_uninit_bytes(n * mem::size_of::<T>()) + } + + /// Creates an owned `User<[T]>` from a raw thin pointer and a slice length. + /// + /// # Safety + /// The caller must ensure `ptr` points to `len` elements of `T`, is + /// freeable with the `free` usercall and the alignment of `T`, and is + /// uniquely owned. + /// + /// # Panics + /// This function panics if: + /// + /// * The pointer is not aligned + /// * The pointer is null + /// * The pointed-to range does not fit in the address space + /// * The pointed-to range is not in user memory + pub unsafe fn from_raw_parts(ptr: *mut T, len: usize) -> Self { + User(NonNull::new_userref(<[T]>::from_raw_sized(ptr as _, len * mem::size_of::<T>()))) + } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl<T: ?Sized> UserRef<T> +where + T: UserSafe, +{ + /// Creates a `&UserRef<[T]>` from a raw pointer. + /// + /// # Safety + /// The caller must ensure `ptr` points to `T`. + /// + /// # Panics + /// This function panics if: + /// + /// * The pointer is not aligned + /// * The pointer is null + /// * The pointed-to range is not in user memory + pub unsafe fn from_ptr<'a>(ptr: *const T) -> &'a Self { + T::check_ptr(ptr); + &*(ptr as *const Self) + } + + /// Creates a `&mut UserRef<[T]>` from a raw pointer. See the struct + /// documentation for the nuances regarding a `&mut UserRef<T>`. + /// + /// # Safety + /// The caller must ensure `ptr` points to `T`. + /// + /// # Panics + /// This function panics if: + /// + /// * The pointer is not aligned + /// * The pointer is null + /// * The pointed-to range is not in user memory + pub unsafe fn from_mut_ptr<'a>(ptr: *mut T) -> &'a mut Self { + T::check_ptr(ptr); + &mut *(ptr as *mut Self) + } + + /// Copies `val` into user memory. + /// + /// # Panics + /// This function panics if the destination doesn't have the same size as + /// the source. This can happen for dynamically-sized types such as slices. + pub fn copy_from_enclave(&mut self, val: &T) { + unsafe { + assert_eq!(mem::size_of_val(val), mem::size_of_val(&*self.0.get())); + ptr::copy( + val as *const T as *const u8, + self.0.get() as *mut T as *mut u8, + mem::size_of_val(val), + ); + } + } + + /// Copies the value from user memory and place it into `dest`. + /// + /// # Panics + /// This function panics if the destination doesn't have the same size as + /// the source. This can happen for dynamically-sized types such as slices. + pub fn copy_to_enclave(&self, dest: &mut T) { + unsafe { + assert_eq!(mem::size_of_val(dest), mem::size_of_val(&*self.0.get())); + ptr::copy( + self.0.get() as *const T as *const u8, + dest as *mut T as *mut u8, + mem::size_of_val(dest), + ); + } + } + + /// Obtain a raw pointer from this reference. + pub fn as_raw_ptr(&self) -> *const T { + self as *const _ as _ + } + + /// Obtain a raw pointer from this reference. + pub fn as_raw_mut_ptr(&mut self) -> *mut T { + self as *mut _ as _ + } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl<T> UserRef<T> +where + T: UserSafe, +{ + /// Copies the value from user memory into enclave memory. + pub fn to_enclave(&self) -> T { + unsafe { ptr::read(self.0.get()) } + } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl<T> UserRef<[T]> +where + [T]: UserSafe, +{ + /// Creates a `&UserRef<[T]>` from a raw thin pointer and a slice length. + /// + /// # Safety + /// The caller must ensure `ptr` points to `n` elements of `T`. + /// + /// # Panics + /// This function panics if: + /// + /// * The pointer is not aligned + /// * The pointer is null + /// * The pointed-to range does not fit in the address space + /// * The pointed-to range is not in user memory + pub unsafe fn from_raw_parts<'a>(ptr: *const T, len: usize) -> &'a Self { + &*(<[T]>::from_raw_sized(ptr as _, len * mem::size_of::<T>()).as_ptr() as *const Self) + } + + /// Creates a `&mut UserRef<[T]>` from a raw thin pointer and a slice length. + /// See the struct documentation for the nuances regarding a + /// `&mut UserRef<T>`. + /// + /// # Safety + /// The caller must ensure `ptr` points to `n` elements of `T`. + /// + /// # Panics + /// This function panics if: + /// + /// * The pointer is not aligned + /// * The pointer is null + /// * The pointed-to range does not fit in the address space + /// * The pointed-to range is not in user memory + pub unsafe fn from_raw_parts_mut<'a>(ptr: *mut T, len: usize) -> &'a mut Self { + &mut *(<[T]>::from_raw_sized(ptr as _, len * mem::size_of::<T>()).as_ptr() as *mut Self) + } + + /// Obtain a raw pointer to the first element of this user slice. + pub fn as_ptr(&self) -> *const T { + self.0.get() as _ + } + + /// Obtain a raw pointer to the first element of this user slice. + pub fn as_mut_ptr(&mut self) -> *mut T { + self.0.get() as _ + } + + /// Obtain the number of elements in this user slice. + pub fn len(&self) -> usize { + unsafe { (*self.0.get()).len() } + } + + /// Copies the value from user memory and place it into `dest`. Afterwards, + /// `dest` will contain exactly `self.len()` elements. + /// + /// # Panics + /// This function panics if the destination doesn't have the same size as + /// the source. This can happen for dynamically-sized types such as slices. + pub fn copy_to_enclave_vec(&self, dest: &mut Vec<T>) { + unsafe { + if let Some(missing) = self.len().checked_sub(dest.capacity()) { + dest.reserve(missing) + } + dest.set_len(self.len()); + self.copy_to_enclave(&mut dest[..]); + } + } + + /// Copies the value from user memory into a vector in enclave memory. + pub fn to_enclave(&self) -> Vec<T> { + let mut ret = Vec::with_capacity(self.len()); + self.copy_to_enclave_vec(&mut ret); + ret + } + + /// Returns an iterator over the slice. + pub fn iter(&self) -> Iter<'_, T> + where + T: UserSafe, // FIXME: should be implied by [T]: UserSafe? + { + unsafe { Iter((&*self.as_raw_ptr()).iter()) } + } + + /// Returns an iterator that allows modifying each value. + pub fn iter_mut(&mut self) -> IterMut<'_, T> + where + T: UserSafe, // FIXME: should be implied by [T]: UserSafe? + { + unsafe { IterMut((&mut *self.as_raw_mut_ptr()).iter_mut()) } + } +} + +/// Immutable user slice iterator +/// +/// This struct is created by the `iter` method on `UserRef<[T]>`. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub struct Iter<'a, T: 'a + UserSafe>(slice::Iter<'a, T>); + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl<'a, T: UserSafe> Iterator for Iter<'a, T> { + type Item = &'a UserRef<T>; + + #[inline] + fn next(&mut self) -> Option<Self::Item> { + unsafe { self.0.next().map(|e| UserRef::from_ptr(e)) } + } +} + +/// Mutable user slice iterator +/// +/// This struct is created by the `iter_mut` method on `UserRef<[T]>`. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub struct IterMut<'a, T: 'a + UserSafe>(slice::IterMut<'a, T>); + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl<'a, T: UserSafe> Iterator for IterMut<'a, T> { + type Item = &'a mut UserRef<T>; + + #[inline] + fn next(&mut self) -> Option<Self::Item> { + unsafe { self.0.next().map(|e| UserRef::from_mut_ptr(e)) } + } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl<T: ?Sized> Deref for User<T> +where + T: UserSafe, +{ + type Target = UserRef<T>; + + fn deref(&self) -> &Self::Target { + unsafe { &*self.0.as_ptr() } + } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl<T: ?Sized> DerefMut for User<T> +where + T: UserSafe, +{ + fn deref_mut(&mut self) -> &mut Self::Target { + unsafe { &mut *self.0.as_ptr() } + } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl<T: ?Sized> Drop for User<T> +where + T: UserSafe, +{ + fn drop(&mut self) { + unsafe { + let ptr = (*self.0.as_ptr()).0.get(); + super::free(ptr as _, mem::size_of_val(&mut *ptr), T::align_of()); + } + } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl<T: CoerceUnsized<U>, U> CoerceUnsized<UserRef<U>> for UserRef<T> {} + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl<T, I> Index<I> for UserRef<[T]> +where + [T]: UserSafe, + I: SliceIndex<[T], Output: UserSafe>, +{ + type Output = UserRef<I::Output>; + + #[inline] + fn index(&self, index: I) -> &UserRef<I::Output> { + unsafe { + if let Some(slice) = index.get(&*self.as_raw_ptr()) { + UserRef::from_ptr(slice) + } else { + rtabort!("index out of range for user slice"); + } + } + } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl<T, I> IndexMut<I> for UserRef<[T]> +where + [T]: UserSafe, + I: SliceIndex<[T], Output: UserSafe>, +{ + #[inline] + fn index_mut(&mut self, index: I) -> &mut UserRef<I::Output> { + unsafe { + if let Some(slice) = index.get_mut(&mut *self.as_raw_mut_ptr()) { + UserRef::from_mut_ptr(slice) + } else { + rtabort!("index out of range for user slice"); + } + } + } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +impl UserRef<super::raw::ByteBuffer> { + /// Copies the user memory range pointed to by the user `ByteBuffer` to + /// enclave memory. + /// + /// # Panics + /// This function panics if, in the user `ByteBuffer`: + /// + /// * The pointer is null + /// * The pointed-to range does not fit in the address space + /// * The pointed-to range is not in user memory + pub fn copy_user_buffer(&self) -> Vec<u8> { + unsafe { + let buf = self.to_enclave(); + if buf.len > 0 { + User::from_raw_parts(buf.data as _, buf.len).to_enclave() + } else { + // Mustn't look at `data` or call `free` if `len` is `0`. + Vec::with_capacity(0) + } + } + } +} diff --git a/library/std/src/sys/sgx/abi/usercalls/mod.rs b/library/std/src/sys/sgx/abi/usercalls/mod.rs new file mode 100644 index 00000000000..73f1b951e74 --- /dev/null +++ b/library/std/src/sys/sgx/abi/usercalls/mod.rs @@ -0,0 +1,321 @@ +use crate::cmp; +use crate::convert::TryFrom; +use crate::io::{Error as IoError, ErrorKind, IoSlice, IoSliceMut, Result as IoResult}; +use crate::sys::rand::rdrand64; +use crate::time::{Duration, Instant}; + +pub(crate) mod alloc; +#[macro_use] +pub(crate) mod raw; + +use self::raw::*; + +/// Usercall `read`. See the ABI documentation for more information. +/// +/// This will do a single `read` usercall and scatter the read data among +/// `bufs`. To read to a single buffer, just pass a slice of length one. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn read(fd: Fd, bufs: &mut [IoSliceMut<'_>]) -> IoResult<usize> { + unsafe { + let total_len = bufs.iter().fold(0usize, |sum, buf| sum.saturating_add(buf.len())); + let mut userbuf = alloc::User::<[u8]>::uninitialized(total_len); + let ret_len = raw::read(fd, userbuf.as_mut_ptr(), userbuf.len()).from_sgx_result()?; + let userbuf = &userbuf[..ret_len]; + let mut index = 0; + for buf in bufs { + let end = cmp::min(index + buf.len(), userbuf.len()); + if let Some(buflen) = end.checked_sub(index) { + userbuf[index..end].copy_to_enclave(&mut buf[..buflen]); + index += buf.len(); + } else { + break; + } + } + Ok(userbuf.len()) + } +} + +/// Usercall `read_alloc`. See the ABI documentation for more information. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn read_alloc(fd: Fd) -> IoResult<Vec<u8>> { + unsafe { + let userbuf = ByteBuffer { data: crate::ptr::null_mut(), len: 0 }; + let mut userbuf = alloc::User::new_from_enclave(&userbuf); + raw::read_alloc(fd, userbuf.as_raw_mut_ptr()).from_sgx_result()?; + Ok(userbuf.copy_user_buffer()) + } +} + +/// Usercall `write`. See the ABI documentation for more information. +/// +/// This will do a single `write` usercall and gather the written data from +/// `bufs`. To write from a single buffer, just pass a slice of length one. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn write(fd: Fd, bufs: &[IoSlice<'_>]) -> IoResult<usize> { + unsafe { + let total_len = bufs.iter().fold(0usize, |sum, buf| sum.saturating_add(buf.len())); + let mut userbuf = alloc::User::<[u8]>::uninitialized(total_len); + let mut index = 0; + for buf in bufs { + let end = cmp::min(index + buf.len(), userbuf.len()); + if let Some(buflen) = end.checked_sub(index) { + userbuf[index..end].copy_from_enclave(&buf[..buflen]); + index += buf.len(); + } else { + break; + } + } + raw::write(fd, userbuf.as_ptr(), userbuf.len()).from_sgx_result() + } +} + +/// Usercall `flush`. See the ABI documentation for more information. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn flush(fd: Fd) -> IoResult<()> { + unsafe { raw::flush(fd).from_sgx_result() } +} + +/// Usercall `close`. See the ABI documentation for more information. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn close(fd: Fd) { + unsafe { raw::close(fd) } +} + +fn string_from_bytebuffer(buf: &alloc::UserRef<ByteBuffer>, usercall: &str, arg: &str) -> String { + String::from_utf8(buf.copy_user_buffer()) + .unwrap_or_else(|_| rtabort!("Usercall {}: expected {} to be valid UTF-8", usercall, arg)) +} + +/// Usercall `bind_stream`. See the ABI documentation for more information. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn bind_stream(addr: &str) -> IoResult<(Fd, String)> { + unsafe { + let addr_user = alloc::User::new_from_enclave(addr.as_bytes()); + let mut local = alloc::User::<ByteBuffer>::uninitialized(); + let fd = raw::bind_stream(addr_user.as_ptr(), addr_user.len(), local.as_raw_mut_ptr()) + .from_sgx_result()?; + let local = string_from_bytebuffer(&local, "bind_stream", "local_addr"); + Ok((fd, local)) + } +} + +/// Usercall `accept_stream`. See the ABI documentation for more information. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn accept_stream(fd: Fd) -> IoResult<(Fd, String, String)> { + unsafe { + let mut bufs = alloc::User::<[ByteBuffer; 2]>::uninitialized(); + let mut buf_it = alloc::UserRef::iter_mut(&mut *bufs); // FIXME: can this be done + // without forcing coercion? + let (local, peer) = (buf_it.next().unwrap(), buf_it.next().unwrap()); + let fd = raw::accept_stream(fd, local.as_raw_mut_ptr(), peer.as_raw_mut_ptr()) + .from_sgx_result()?; + let local = string_from_bytebuffer(&local, "accept_stream", "local_addr"); + let peer = string_from_bytebuffer(&peer, "accept_stream", "peer_addr"); + Ok((fd, local, peer)) + } +} + +/// Usercall `connect_stream`. See the ABI documentation for more information. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn connect_stream(addr: &str) -> IoResult<(Fd, String, String)> { + unsafe { + let addr_user = alloc::User::new_from_enclave(addr.as_bytes()); + let mut bufs = alloc::User::<[ByteBuffer; 2]>::uninitialized(); + let mut buf_it = alloc::UserRef::iter_mut(&mut *bufs); // FIXME: can this be done + // without forcing coercion? + let (local, peer) = (buf_it.next().unwrap(), buf_it.next().unwrap()); + let fd = raw::connect_stream( + addr_user.as_ptr(), + addr_user.len(), + local.as_raw_mut_ptr(), + peer.as_raw_mut_ptr(), + ) + .from_sgx_result()?; + let local = string_from_bytebuffer(&local, "connect_stream", "local_addr"); + let peer = string_from_bytebuffer(&peer, "connect_stream", "peer_addr"); + Ok((fd, local, peer)) + } +} + +/// Usercall `launch_thread`. See the ABI documentation for more information. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub unsafe fn launch_thread() -> IoResult<()> { + raw::launch_thread().from_sgx_result() +} + +/// Usercall `exit`. See the ABI documentation for more information. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn exit(panic: bool) -> ! { + unsafe { raw::exit(panic) } +} + +/// Usercall `wait`. See the ABI documentation for more information. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn wait(event_mask: u64, mut timeout: u64) -> IoResult<u64> { + if timeout != WAIT_NO && timeout != WAIT_INDEFINITE { + // We don't want people to rely on accuracy of timeouts to make + // security decisions in an SGX enclave. That's why we add a random + // amount not exceeding +/- 10% to the timeout value to discourage + // people from relying on accuracy of timeouts while providing a way + // to make things work in other cases. Note that in the SGX threat + // model the enclave runner which is serving the wait usercall is not + // trusted to ensure accurate timeouts. + if let Ok(timeout_signed) = i64::try_from(timeout) { + let tenth = timeout_signed / 10; + let deviation = (rdrand64() as i64).checked_rem(tenth).unwrap_or(0); + timeout = timeout_signed.saturating_add(deviation) as _; + } + } + unsafe { raw::wait(event_mask, timeout).from_sgx_result() } +} + +/// This function makes an effort to wait for a non-spurious event at least as +/// long as `duration`. Note that in general there is no guarantee about accuracy +/// of time and timeouts in SGX model. The enclave runner serving usercalls may +/// lie about current time and/or ignore timeout values. +/// +/// Once the event is observed, `should_wake_up` will be used to determine +/// whether or not the event was spurious. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn wait_timeout<F>(event_mask: u64, duration: Duration, should_wake_up: F) +where + F: Fn() -> bool, +{ + // Calls the wait usercall and checks the result. Returns true if event was + // returned, and false if WouldBlock/TimedOut was returned. + // If duration is None, it will use WAIT_NO. + fn wait_checked(event_mask: u64, duration: Option<Duration>) -> bool { + let timeout = duration.map_or(raw::WAIT_NO, |duration| { + cmp::min((u64::MAX - 1) as u128, duration.as_nanos()) as u64 + }); + match wait(event_mask, timeout) { + Ok(eventset) => { + if event_mask == 0 { + rtabort!("expected wait() to return Err, found Ok."); + } + rtassert!(eventset != 0 && eventset & !event_mask == 0); + true + } + Err(e) => { + rtassert!(e.kind() == ErrorKind::TimedOut || e.kind() == ErrorKind::WouldBlock); + false + } + } + } + + match wait_checked(event_mask, Some(duration)) { + false => return, // timed out + true if should_wake_up() => return, // woken up + true => {} // spurious event + } + + // Drain all cached events. + // Note that `event_mask != 0` is implied if we get here. + loop { + match wait_checked(event_mask, None) { + false => break, // no more cached events + true if should_wake_up() => return, // woken up + true => {} // spurious event + } + } + + // Continue waiting, but take note of time spent waiting so we don't wait + // forever. We intentionally don't call `Instant::now()` before this point + // to avoid the cost of the `insecure_time` usercall in case there are no + // spurious wakeups. + + let start = Instant::now(); + let mut remaining = duration; + loop { + match wait_checked(event_mask, Some(remaining)) { + false => return, // timed out + true if should_wake_up() => return, // woken up + true => {} // spurious event + } + remaining = match duration.checked_sub(start.elapsed()) { + Some(remaining) => remaining, + None => break, + } + } +} + +/// Usercall `send`. See the ABI documentation for more information. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn send(event_set: u64, tcs: Option<Tcs>) -> IoResult<()> { + unsafe { raw::send(event_set, tcs).from_sgx_result() } +} + +/// Usercall `insecure_time`. See the ABI documentation for more information. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn insecure_time() -> Duration { + let t = unsafe { raw::insecure_time() }; + Duration::new(t / 1_000_000_000, (t % 1_000_000_000) as _) +} + +/// Usercall `alloc`. See the ABI documentation for more information. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn alloc(size: usize, alignment: usize) -> IoResult<*mut u8> { + unsafe { raw::alloc(size, alignment).from_sgx_result() } +} + +#[unstable(feature = "sgx_platform", issue = "56975")] +#[doc(inline)] +pub use self::raw::free; + +fn check_os_error(err: Result) -> i32 { + // FIXME: not sure how to make sure all variants of Error are covered + if err == Error::NotFound as _ + || err == Error::PermissionDenied as _ + || err == Error::ConnectionRefused as _ + || err == Error::ConnectionReset as _ + || err == Error::ConnectionAborted as _ + || err == Error::NotConnected as _ + || err == Error::AddrInUse as _ + || err == Error::AddrNotAvailable as _ + || err == Error::BrokenPipe as _ + || err == Error::AlreadyExists as _ + || err == Error::WouldBlock as _ + || err == Error::InvalidInput as _ + || err == Error::InvalidData as _ + || err == Error::TimedOut as _ + || err == Error::WriteZero as _ + || err == Error::Interrupted as _ + || err == Error::Other as _ + || err == Error::UnexpectedEof as _ + || ((Error::UserRangeStart as _)..=(Error::UserRangeEnd as _)).contains(&err) + { + err + } else { + rtabort!("Usercall: returned invalid error value {}", err) + } +} + +trait FromSgxResult { + type Return; + + fn from_sgx_result(self) -> IoResult<Self::Return>; +} + +impl<T> FromSgxResult for (Result, T) { + type Return = T; + + fn from_sgx_result(self) -> IoResult<Self::Return> { + if self.0 == RESULT_SUCCESS { + Ok(self.1) + } else { + Err(IoError::from_raw_os_error(check_os_error(self.0))) + } + } +} + +impl FromSgxResult for Result { + type Return = (); + + fn from_sgx_result(self) -> IoResult<Self::Return> { + if self == RESULT_SUCCESS { + Ok(()) + } else { + Err(IoError::from_raw_os_error(check_os_error(self))) + } + } +} diff --git a/library/std/src/sys/sgx/abi/usercalls/raw.rs b/library/std/src/sys/sgx/abi/usercalls/raw.rs new file mode 100644 index 00000000000..e0ebf860618 --- /dev/null +++ b/library/std/src/sys/sgx/abi/usercalls/raw.rs @@ -0,0 +1,251 @@ +#![allow(unused)] + +#[unstable(feature = "sgx_platform", issue = "56975")] +pub use fortanix_sgx_abi::*; + +use crate::num::NonZeroU64; +use crate::ptr::NonNull; + +#[repr(C)] +struct UsercallReturn(u64, u64); + +extern "C" { + fn usercall(nr: NonZeroU64, p1: u64, p2: u64, abort: u64, p3: u64, p4: u64) -> UsercallReturn; +} + +/// Performs the raw usercall operation as defined in the ABI calling convention. +/// +/// # Safety +/// +/// The caller must ensure to pass parameters appropriate for the usercall `nr` +/// and to observe all requirements specified in the ABI. +/// +/// # Panics +/// +/// Panics if `nr` is `0`. +#[unstable(feature = "sgx_platform", issue = "56975")] +#[inline] +pub unsafe fn do_usercall( + nr: NonZeroU64, + p1: u64, + p2: u64, + p3: u64, + p4: u64, + abort: bool, +) -> (u64, u64) { + let UsercallReturn(a, b) = usercall(nr, p1, p2, abort as _, p3, p4); + (a, b) +} + +type Register = u64; + +trait RegisterArgument { + fn from_register(_: Register) -> Self; + fn into_register(self) -> Register; +} + +trait ReturnValue { + fn from_registers(call: &'static str, regs: (Register, Register)) -> Self; +} + +macro_rules! define_usercalls { + ($(fn $f:ident($($n:ident: $t:ty),*) $(-> $r:tt)*; )*) => { + /// Usercall numbers as per the ABI. + #[repr(u64)] + #[unstable(feature = "sgx_platform", issue = "56975")] + #[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)] + #[allow(missing_docs, non_camel_case_types)] + #[non_exhaustive] + pub enum Usercalls { + #[doc(hidden)] + __enclave_usercalls_invalid = 0, + $($f,)* + } + + $(enclave_usercalls_internal_define_usercalls!(def fn $f($($n: $t),*) $(-> $r)*);)* + }; +} + +macro_rules! define_ra { + (< $i:ident > $t:ty) => { + impl<$i> RegisterArgument for $t { + fn from_register(a: Register) -> Self { + a as _ + } + fn into_register(self) -> Register { + self as _ + } + } + }; + ($i:ty as $t:ty) => { + impl RegisterArgument for $t { + fn from_register(a: Register) -> Self { + a as $i as _ + } + fn into_register(self) -> Register { + self as $i as _ + } + } + }; + ($t:ty) => { + impl RegisterArgument for $t { + fn from_register(a: Register) -> Self { + a as _ + } + fn into_register(self) -> Register { + self as _ + } + } + }; +} + +define_ra!(Register); +define_ra!(i64); +define_ra!(u32); +define_ra!(u32 as i32); +define_ra!(u16); +define_ra!(u16 as i16); +define_ra!(u8); +define_ra!(u8 as i8); +define_ra!(usize); +define_ra!(usize as isize); +define_ra!(<T> *const T); +define_ra!(<T> *mut T); + +impl RegisterArgument for bool { + fn from_register(a: Register) -> bool { + if a != 0 { true } else { false } + } + fn into_register(self) -> Register { + self as _ + } +} + +impl<T: RegisterArgument> RegisterArgument for Option<NonNull<T>> { + fn from_register(a: Register) -> Option<NonNull<T>> { + NonNull::new(a as _) + } + fn into_register(self) -> Register { + self.map_or(0 as _, NonNull::as_ptr) as _ + } +} + +impl ReturnValue for ! { + fn from_registers(call: &'static str, _regs: (Register, Register)) -> Self { + rtabort!("Usercall {}: did not expect to be re-entered", call); + } +} + +impl ReturnValue for () { + fn from_registers(call: &'static str, usercall_retval: (Register, Register)) -> Self { + rtassert!(usercall_retval.0 == 0); + rtassert!(usercall_retval.1 == 0); + () + } +} + +impl<T: RegisterArgument> ReturnValue for T { + fn from_registers(call: &'static str, usercall_retval: (Register, Register)) -> Self { + rtassert!(usercall_retval.1 == 0); + T::from_register(usercall_retval.0) + } +} + +impl<T: RegisterArgument, U: RegisterArgument> ReturnValue for (T, U) { + fn from_registers(_call: &'static str, regs: (Register, Register)) -> Self { + (T::from_register(regs.0), U::from_register(regs.1)) + } +} + +macro_rules! return_type_is_abort { + (!) => { + true + }; + ($r:ty) => { + false + }; +} + +// In this macro: using `$r:tt` because `$r:ty` doesn't match ! in `return_type_is_abort` +macro_rules! enclave_usercalls_internal_define_usercalls { + (def fn $f:ident($n1:ident: $t1:ty, $n2:ident: $t2:ty, + $n3:ident: $t3:ty, $n4:ident: $t4:ty) -> $r:tt) => ( + /// This is the raw function definition, see the ABI documentation for + /// more information. + #[unstable(feature = "sgx_platform", issue = "56975")] + #[inline(always)] + pub unsafe fn $f($n1: $t1, $n2: $t2, $n3: $t3, $n4: $t4) -> $r { + ReturnValue::from_registers(stringify!($f), do_usercall( + rtunwrap!(Some, NonZeroU64::new(Usercalls::$f as Register)), + RegisterArgument::into_register($n1), + RegisterArgument::into_register($n2), + RegisterArgument::into_register($n3), + RegisterArgument::into_register($n4), + return_type_is_abort!($r) + )) + } + ); + (def fn $f:ident($n1:ident: $t1:ty, $n2:ident: $t2:ty, $n3:ident: $t3:ty) -> $r:tt) => ( + /// This is the raw function definition, see the ABI documentation for + /// more information. + #[unstable(feature = "sgx_platform", issue = "56975")] + #[inline(always)] + pub unsafe fn $f($n1: $t1, $n2: $t2, $n3: $t3) -> $r { + ReturnValue::from_registers(stringify!($f), do_usercall( + rtunwrap!(Some, NonZeroU64::new(Usercalls::$f as Register)), + RegisterArgument::into_register($n1), + RegisterArgument::into_register($n2), + RegisterArgument::into_register($n3), + 0, + return_type_is_abort!($r) + )) + } + ); + (def fn $f:ident($n1:ident: $t1:ty, $n2:ident: $t2:ty) -> $r:tt) => ( + /// This is the raw function definition, see the ABI documentation for + /// more information. + #[unstable(feature = "sgx_platform", issue = "56975")] + #[inline(always)] + pub unsafe fn $f($n1: $t1, $n2: $t2) -> $r { + ReturnValue::from_registers(stringify!($f), do_usercall( + rtunwrap!(Some, NonZeroU64::new(Usercalls::$f as Register)), + RegisterArgument::into_register($n1), + RegisterArgument::into_register($n2), + 0,0, + return_type_is_abort!($r) + )) + } + ); + (def fn $f:ident($n1:ident: $t1:ty) -> $r:tt) => ( + /// This is the raw function definition, see the ABI documentation for + /// more information. + #[unstable(feature = "sgx_platform", issue = "56975")] + #[inline(always)] + pub unsafe fn $f($n1: $t1) -> $r { + ReturnValue::from_registers(stringify!($f), do_usercall( + rtunwrap!(Some, NonZeroU64::new(Usercalls::$f as Register)), + RegisterArgument::into_register($n1), + 0,0,0, + return_type_is_abort!($r) + )) + } + ); + (def fn $f:ident() -> $r:tt) => ( + /// This is the raw function definition, see the ABI documentation for + /// more information. + #[unstable(feature = "sgx_platform", issue = "56975")] + #[inline(always)] + pub unsafe fn $f() -> $r { + ReturnValue::from_registers(stringify!($f), do_usercall( + rtunwrap!(Some, NonZeroU64::new(Usercalls::$f as Register)), + 0,0,0,0, + return_type_is_abort!($r) + )) + } + ); + (def fn $f:ident($($n:ident: $t:ty),*)) => ( + enclave_usercalls_internal_define_usercalls!(def fn $f($($n: $t),*) -> ()); + ); +} + +invoke_with_usercalls!(define_usercalls); diff --git a/library/std/src/sys/sgx/alloc.rs b/library/std/src/sys/sgx/alloc.rs new file mode 100644 index 00000000000..40daec758a9 --- /dev/null +++ b/library/std/src/sys/sgx/alloc.rs @@ -0,0 +1,46 @@ +use crate::alloc::{GlobalAlloc, Layout, System}; + +use super::waitqueue::SpinMutex; + +// Using a SpinMutex because we never want to exit the enclave waiting for the +// allocator. +#[cfg_attr(test, linkage = "available_externally")] +#[export_name = "_ZN16__rust_internals3std3sys3sgx5alloc8DLMALLOCE"] +static DLMALLOC: SpinMutex<dlmalloc::Dlmalloc> = SpinMutex::new(dlmalloc::DLMALLOC_INIT); + +#[stable(feature = "alloc_system_type", since = "1.28.0")] +unsafe impl GlobalAlloc for System { + #[inline] + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + DLMALLOC.lock().malloc(layout.size(), layout.align()) + } + + #[inline] + unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { + DLMALLOC.lock().calloc(layout.size(), layout.align()) + } + + #[inline] + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + DLMALLOC.lock().free(ptr, layout.size(), layout.align()) + } + + #[inline] + unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + DLMALLOC.lock().realloc(ptr, layout.size(), layout.align(), new_size) + } +} + +// The following functions are needed by libunwind. These symbols are named +// in pre-link args for the target specification, so keep that in sync. +#[cfg(not(test))] +#[no_mangle] +pub unsafe extern "C" fn __rust_c_alloc(size: usize, align: usize) -> *mut u8 { + crate::alloc::alloc(Layout::from_size_align_unchecked(size, align)) +} + +#[cfg(not(test))] +#[no_mangle] +pub unsafe extern "C" fn __rust_c_dealloc(ptr: *mut u8, size: usize, align: usize) { + crate::alloc::dealloc(ptr, Layout::from_size_align_unchecked(size, align)) +} diff --git a/library/std/src/sys/sgx/args.rs b/library/std/src/sys/sgx/args.rs new file mode 100644 index 00000000000..5a53695a846 --- /dev/null +++ b/library/std/src/sys/sgx/args.rs @@ -0,0 +1,60 @@ +use super::abi::usercalls::{alloc, raw::ByteBuffer}; +use crate::ffi::OsString; +use crate::slice; +use crate::sync::atomic::{AtomicUsize, Ordering}; +use crate::sys::os_str::Buf; +use crate::sys_common::FromInner; + +#[cfg_attr(test, linkage = "available_externally")] +#[export_name = "_ZN16__rust_internals3std3sys3sgx4args4ARGSE"] +static ARGS: AtomicUsize = AtomicUsize::new(0); +type ArgsStore = Vec<OsString>; + +#[cfg_attr(test, allow(dead_code))] +pub unsafe fn init(argc: isize, argv: *const *const u8) { + if argc != 0 { + let args = alloc::User::<[ByteBuffer]>::from_raw_parts(argv as _, argc as _); + let args = args + .iter() + .map(|a| OsString::from_inner(Buf { inner: a.copy_user_buffer() })) + .collect::<ArgsStore>(); + ARGS.store(Box::into_raw(Box::new(args)) as _, Ordering::Relaxed); + } +} + +pub unsafe fn cleanup() {} + +pub fn args() -> Args { + let args = unsafe { (ARGS.load(Ordering::Relaxed) as *const ArgsStore).as_ref() }; + if let Some(args) = args { Args(args.iter()) } else { Args([].iter()) } +} + +pub struct Args(slice::Iter<'static, OsString>); + +impl Args { + pub fn inner_debug(&self) -> &[OsString] { + self.0.as_slice() + } +} + +impl Iterator for Args { + type Item = OsString; + fn next(&mut self) -> Option<OsString> { + self.0.next().cloned() + } + fn size_hint(&self) -> (usize, Option<usize>) { + self.0.size_hint() + } +} + +impl ExactSizeIterator for Args { + fn len(&self) -> usize { + self.0.len() + } +} + +impl DoubleEndedIterator for Args { + fn next_back(&mut self) -> Option<OsString> { + self.0.next_back().cloned() + } +} diff --git a/library/std/src/sys/sgx/cmath.rs b/library/std/src/sys/sgx/cmath.rs new file mode 100644 index 00000000000..b89238f1da8 --- /dev/null +++ b/library/std/src/sys/sgx/cmath.rs @@ -0,0 +1,31 @@ +#![cfg(not(test))] + +// These symbols are all defined in `compiler-builtins` +extern "C" { + pub fn acos(n: f64) -> f64; + pub fn acosf(n: f32) -> f32; + pub fn asin(n: f64) -> f64; + pub fn asinf(n: f32) -> f32; + pub fn atan(n: f64) -> f64; + pub fn atan2(a: f64, b: f64) -> f64; + pub fn atan2f(a: f32, b: f32) -> f32; + pub fn atanf(n: f32) -> f32; + pub fn cbrt(n: f64) -> f64; + pub fn cbrtf(n: f32) -> f32; + pub fn cosh(n: f64) -> f64; + pub fn coshf(n: f32) -> f32; + pub fn expm1(n: f64) -> f64; + pub fn expm1f(n: f32) -> f32; + pub fn fdim(a: f64, b: f64) -> f64; + pub fn fdimf(a: f32, b: f32) -> f32; + pub fn hypot(x: f64, y: f64) -> f64; + pub fn hypotf(x: f32, y: f32) -> f32; + pub fn log1p(n: f64) -> f64; + pub fn log1pf(n: f32) -> f32; + pub fn sinh(n: f64) -> f64; + pub fn sinhf(n: f32) -> f32; + pub fn tan(n: f64) -> f64; + pub fn tanf(n: f32) -> f32; + pub fn tanh(n: f64) -> f64; + pub fn tanhf(n: f32) -> f32; +} diff --git a/library/std/src/sys/sgx/condvar.rs b/library/std/src/sys/sgx/condvar.rs new file mode 100644 index 00000000000..ed6dbcf4971 --- /dev/null +++ b/library/std/src/sys/sgx/condvar.rs @@ -0,0 +1,42 @@ +use crate::sys::mutex::Mutex; +use crate::time::Duration; + +use super::waitqueue::{SpinMutex, WaitQueue, WaitVariable}; + +pub struct Condvar { + inner: SpinMutex<WaitVariable<()>>, +} + +impl Condvar { + pub const fn new() -> Condvar { + Condvar { inner: SpinMutex::new(WaitVariable::new(())) } + } + + #[inline] + pub unsafe fn init(&mut self) {} + + #[inline] + pub unsafe fn notify_one(&self) { + let _ = WaitQueue::notify_one(self.inner.lock()); + } + + #[inline] + pub unsafe fn notify_all(&self) { + let _ = WaitQueue::notify_all(self.inner.lock()); + } + + pub unsafe fn wait(&self, mutex: &Mutex) { + let guard = self.inner.lock(); + WaitQueue::wait(guard, || mutex.unlock()); + mutex.lock() + } + + pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { + let success = WaitQueue::wait_timeout(&self.inner, dur, || mutex.unlock()); + mutex.lock(); + success + } + + #[inline] + pub unsafe fn destroy(&self) {} +} diff --git a/library/std/src/sys/sgx/env.rs b/library/std/src/sys/sgx/env.rs new file mode 100644 index 00000000000..6fa0ed7bcf4 --- /dev/null +++ b/library/std/src/sys/sgx/env.rs @@ -0,0 +1,9 @@ +pub mod os { + pub const FAMILY: &'static str = ""; + pub const OS: &'static str = ""; + pub const DLL_PREFIX: &'static str = ""; + pub const DLL_SUFFIX: &'static str = ".sgxs"; + pub const DLL_EXTENSION: &'static str = "sgxs"; + pub const EXE_SUFFIX: &'static str = ".sgxs"; + pub const EXE_EXTENSION: &'static str = "sgxs"; +} diff --git a/library/std/src/sys/sgx/ext/arch.rs b/library/std/src/sys/sgx/ext/arch.rs new file mode 100644 index 00000000000..0c97a87e2e4 --- /dev/null +++ b/library/std/src/sys/sgx/ext/arch.rs @@ -0,0 +1,74 @@ +//! SGX-specific access to architectural features. +//! +//! The functionality in this module is further documented in the Intel +//! Software Developer's Manual, Volume 3, Chapter 40. +#![unstable(feature = "sgx_platform", issue = "56975")] + +use crate::mem::MaybeUninit; + +/// Wrapper struct to force 16-byte alignment. +#[repr(align(16))] +#[unstable(feature = "sgx_platform", issue = "56975")] +pub struct Align16<T>(pub T); + +/// Wrapper struct to force 128-byte alignment. +#[repr(align(128))] +#[unstable(feature = "sgx_platform", issue = "56975")] +pub struct Align128<T>(pub T); + +/// Wrapper struct to force 512-byte alignment. +#[repr(align(512))] +#[unstable(feature = "sgx_platform", issue = "56975")] +pub struct Align512<T>(pub T); + +const ENCLU_EREPORT: u32 = 0; +const ENCLU_EGETKEY: u32 = 1; + +/// Call the `EGETKEY` instruction to obtain a 128-bit secret key. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn egetkey(request: &Align512<[u8; 512]>) -> Result<Align16<[u8; 16]>, u32> { + unsafe { + let mut out = MaybeUninit::uninit(); + let error; + + llvm_asm!( + "enclu" + : "={eax}"(error) + : "{eax}"(ENCLU_EGETKEY), + "{rbx}"(request), + "{rcx}"(out.as_mut_ptr()) + : "flags" + ); + + match error { + 0 => Ok(out.assume_init()), + err => Err(err), + } + } +} + +/// Call the `EREPORT` instruction. +/// +/// This creates a cryptographic report describing the contents of the current +/// enclave. The report may be verified by the enclave described in +/// `targetinfo`. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub fn ereport( + targetinfo: &Align512<[u8; 512]>, + reportdata: &Align128<[u8; 64]>, +) -> Align512<[u8; 432]> { + unsafe { + let mut report = MaybeUninit::uninit(); + + llvm_asm!( + "enclu" + : /* no output registers */ + : "{eax}"(ENCLU_EREPORT), + "{rbx}"(targetinfo), + "{rcx}"(reportdata), + "{rdx}"(report.as_mut_ptr()) + ); + + report.assume_init() + } +} diff --git a/library/std/src/sys/sgx/ext/ffi.rs b/library/std/src/sys/sgx/ext/ffi.rs new file mode 100644 index 00000000000..63fc5ff2866 --- /dev/null +++ b/library/std/src/sys/sgx/ext/ffi.rs @@ -0,0 +1,38 @@ +//! SGX-specific extension to the primitives in the `std::ffi` module +//! +//! # Examples +//! +//! ``` +//! use std::ffi::OsString; +//! use std::os::fortanix_sgx::ffi::OsStringExt; +//! +//! let bytes = b"foo".to_vec(); +//! +//! // OsStringExt::from_vec +//! let os_string = OsString::from_vec(bytes); +//! assert_eq!(os_string.to_str(), Some("foo")); +//! +//! // OsStringExt::into_vec +//! let bytes = os_string.into_vec(); +//! assert_eq!(bytes, b"foo"); +//! ``` +//! +//! ``` +//! use std::ffi::OsStr; +//! use std::os::fortanix_sgx::ffi::OsStrExt; +//! +//! let bytes = b"foo"; +//! +//! // OsStrExt::from_bytes +//! let os_str = OsStr::from_bytes(bytes); +//! assert_eq!(os_str.to_str(), Some("foo")); +//! +//! // OsStrExt::as_bytes +//! let bytes = os_str.as_bytes(); +//! assert_eq!(bytes, b"foo"); +//! ``` + +#![unstable(feature = "sgx_platform", issue = "56975")] + +#[unstable(feature = "sgx_platform", issue = "56975")] +pub use crate::sys_common::os_str_bytes::*; diff --git a/library/std/src/sys/sgx/ext/io.rs b/library/std/src/sys/sgx/ext/io.rs new file mode 100644 index 00000000000..8aa84a550d2 --- /dev/null +++ b/library/std/src/sys/sgx/ext/io.rs @@ -0,0 +1,113 @@ +//! SGX-specific extensions to general I/O primitives +//! +//! SGX file descriptors behave differently from Unix file descriptors. See the +//! description of [`TryIntoRawFd`](trait.TryIntoRawFd.html) for more details. +#![unstable(feature = "sgx_platform", issue = "56975")] + +use crate::net; +pub use crate::sys::abi::usercalls::raw::Fd as RawFd; +use crate::sys::{self, AsInner, FromInner, IntoInner, TryIntoInner}; + +/// A trait to extract the raw SGX file descriptor from an underlying +/// object. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub trait AsRawFd { + /// Extracts the raw file descriptor. + /// + /// This method does **not** pass ownership of the raw file descriptor + /// to the caller. The descriptor is only guaranteed to be valid while + /// the original object has not yet been destroyed. + #[unstable(feature = "sgx_platform", issue = "56975")] + fn as_raw_fd(&self) -> RawFd; +} + +/// A trait to express the ability to construct an object from a raw file +/// descriptor. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub trait FromRawFd { + /// Constructs a new instance of `Self` from the given raw file + /// descriptor. + /// + /// This function **consumes ownership** of the specified file + /// descriptor. The returned object will take responsibility for closing + /// it when the object goes out of scope. + /// + /// This function is also unsafe as the primitives currently returned + /// have the contract that they are the sole owner of the file + /// descriptor they are wrapping. Usage of this function could + /// accidentally allow violating this contract which can cause memory + /// unsafety in code that relies on it being true. + #[unstable(feature = "sgx_platform", issue = "56975")] + unsafe fn from_raw_fd(fd: RawFd) -> Self; +} + +/// A trait to express the ability to consume an object and acquire ownership of +/// its raw file descriptor. +#[unstable(feature = "sgx_platform", issue = "56975")] +pub trait TryIntoRawFd: Sized { + /// Consumes this object, returning the raw underlying file descriptor, if + /// this object is not cloned. + /// + /// This function **transfers ownership** of the underlying file descriptor + /// to the caller. Callers are then the unique owners of the file descriptor + /// and must close the descriptor once it's no longer needed. + /// + /// Unlike other platforms, on SGX, the file descriptor is shared between + /// all clones of an object. To avoid race conditions, this function will + /// only return `Ok` when called on the final clone. + #[unstable(feature = "sgx_platform", issue = "56975")] + fn try_into_raw_fd(self) -> Result<RawFd, Self>; +} + +impl AsRawFd for net::TcpStream { + fn as_raw_fd(&self) -> RawFd { + *self.as_inner().as_inner().as_inner().as_inner() + } +} + +impl AsRawFd for net::TcpListener { + fn as_raw_fd(&self) -> RawFd { + *self.as_inner().as_inner().as_inner().as_inner() + } +} + +impl FromRawFd for net::TcpStream { + unsafe fn from_raw_fd(fd: RawFd) -> net::TcpStream { + let fd = sys::fd::FileDesc::from_inner(fd); + let socket = sys::net::Socket::from_inner(fd); + net::TcpStream::from_inner(sys::net::TcpStream::from_inner((socket, None))) + } +} + +impl FromRawFd for net::TcpListener { + unsafe fn from_raw_fd(fd: RawFd) -> net::TcpListener { + let fd = sys::fd::FileDesc::from_inner(fd); + let socket = sys::net::Socket::from_inner(fd); + net::TcpListener::from_inner(sys::net::TcpListener::from_inner(socket)) + } +} + +impl TryIntoRawFd for net::TcpStream { + fn try_into_raw_fd(self) -> Result<RawFd, Self> { + let (socket, peer_addr) = self.into_inner().into_inner(); + match socket.try_into_inner() { + Ok(fd) => Ok(fd.into_inner()), + Err(socket) => { + let sys = sys::net::TcpStream::from_inner((socket, peer_addr)); + Err(net::TcpStream::from_inner(sys)) + } + } + } +} + +impl TryIntoRawFd for net::TcpListener { + fn try_into_raw_fd(self) -> Result<RawFd, Self> { + match self.into_inner().into_inner().try_into_inner() { + Ok(fd) => Ok(fd.into_inner()), + Err(socket) => { + let sys = sys::net::TcpListener::from_inner(socket); + Err(net::TcpListener::from_inner(sys)) + } + } + } +} diff --git a/library/std/src/sys/sgx/ext/mod.rs b/library/std/src/sys/sgx/ext/mod.rs new file mode 100644 index 00000000000..258ad3cd218 --- /dev/null +++ b/library/std/src/sys/sgx/ext/mod.rs @@ -0,0 +1,5 @@ +#![unstable(feature = "sgx_platform", issue = "56975")] + +pub mod arch; +pub mod ffi; +pub mod io; diff --git a/library/std/src/sys/sgx/fd.rs b/library/std/src/sys/sgx/fd.rs new file mode 100644 index 00000000000..e5dc5b5adaa --- /dev/null +++ b/library/std/src/sys/sgx/fd.rs @@ -0,0 +1,84 @@ +use fortanix_sgx_abi::Fd; + +use super::abi::usercalls; +use crate::io::{self, IoSlice, IoSliceMut}; +use crate::mem; +use crate::sys::{AsInner, FromInner, IntoInner}; + +#[derive(Debug)] +pub struct FileDesc { + fd: Fd, +} + +impl FileDesc { + pub fn new(fd: Fd) -> FileDesc { + FileDesc { fd: fd } + } + + pub fn raw(&self) -> Fd { + self.fd + } + + /// Extracts the actual file descriptor without closing it. + pub fn into_raw(self) -> Fd { + let fd = self.fd; + mem::forget(self); + fd + } + + pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> { + usercalls::read(self.fd, &mut [IoSliceMut::new(buf)]) + } + + pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + usercalls::read(self.fd, bufs) + } + + #[inline] + pub fn is_read_vectored(&self) -> bool { + true + } + + pub fn write(&self, buf: &[u8]) -> io::Result<usize> { + usercalls::write(self.fd, &[IoSlice::new(buf)]) + } + + pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + usercalls::write(self.fd, bufs) + } + + #[inline] + pub fn is_write_vectored(&self) -> bool { + true + } + + pub fn flush(&self) -> io::Result<()> { + usercalls::flush(self.fd) + } +} + +impl AsInner<Fd> for FileDesc { + fn as_inner(&self) -> &Fd { + &self.fd + } +} + +impl IntoInner<Fd> for FileDesc { + fn into_inner(self) -> Fd { + let fd = self.fd; + mem::forget(self); + fd + } +} + +impl FromInner<Fd> for FileDesc { + fn from_inner(fd: Fd) -> FileDesc { + FileDesc { fd } + } +} + +impl Drop for FileDesc { + fn drop(&mut self) { + usercalls::close(self.fd) + } +} diff --git a/library/std/src/sys/sgx/fs.rs b/library/std/src/sys/sgx/fs.rs new file mode 100644 index 00000000000..ecb5b51cccd --- /dev/null +++ b/library/std/src/sys/sgx/fs.rs @@ -0,0 +1,308 @@ +use crate::ffi::OsString; +use crate::fmt; +use crate::hash::{Hash, Hasher}; +use crate::io::{self, IoSlice, IoSliceMut, SeekFrom}; +use crate::path::{Path, PathBuf}; +use crate::sys::time::SystemTime; +use crate::sys::{unsupported, Void}; + +pub struct File(Void); + +pub struct FileAttr(Void); + +pub struct ReadDir(Void); + +pub struct DirEntry(Void); + +#[derive(Clone, Debug)] +pub struct OpenOptions {} + +pub struct FilePermissions(Void); + +pub struct FileType(Void); + +#[derive(Debug)] +pub struct DirBuilder {} + +impl FileAttr { + pub fn size(&self) -> u64 { + match self.0 {} + } + + pub fn perm(&self) -> FilePermissions { + match self.0 {} + } + + pub fn file_type(&self) -> FileType { + match self.0 {} + } + + pub fn modified(&self) -> io::Result<SystemTime> { + match self.0 {} + } + + pub fn accessed(&self) -> io::Result<SystemTime> { + match self.0 {} + } + + pub fn created(&self) -> io::Result<SystemTime> { + match self.0 {} + } +} + +impl Clone for FileAttr { + fn clone(&self) -> FileAttr { + match self.0 {} + } +} + +impl FilePermissions { + pub fn readonly(&self) -> bool { + match self.0 {} + } + + pub fn set_readonly(&mut self, _readonly: bool) { + match self.0 {} + } +} + +impl Clone for FilePermissions { + fn clone(&self) -> FilePermissions { + match self.0 {} + } +} + +impl PartialEq for FilePermissions { + fn eq(&self, _other: &FilePermissions) -> bool { + match self.0 {} + } +} + +impl Eq for FilePermissions {} + +impl fmt::Debug for FilePermissions { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +impl FileType { + pub fn is_dir(&self) -> bool { + match self.0 {} + } + + pub fn is_file(&self) -> bool { + match self.0 {} + } + + pub fn is_symlink(&self) -> bool { + match self.0 {} + } +} + +impl Clone for FileType { + fn clone(&self) -> FileType { + match self.0 {} + } +} + +impl Copy for FileType {} + +impl PartialEq for FileType { + fn eq(&self, _other: &FileType) -> bool { + match self.0 {} + } +} + +impl Eq for FileType {} + +impl Hash for FileType { + fn hash<H: Hasher>(&self, _h: &mut H) { + match self.0 {} + } +} + +impl fmt::Debug for FileType { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +impl fmt::Debug for ReadDir { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +impl Iterator for ReadDir { + type Item = io::Result<DirEntry>; + + fn next(&mut self) -> Option<io::Result<DirEntry>> { + match self.0 {} + } +} + +impl DirEntry { + pub fn path(&self) -> PathBuf { + match self.0 {} + } + + pub fn file_name(&self) -> OsString { + match self.0 {} + } + + pub fn metadata(&self) -> io::Result<FileAttr> { + match self.0 {} + } + + pub fn file_type(&self) -> io::Result<FileType> { + match self.0 {} + } +} + +impl OpenOptions { + pub fn new() -> OpenOptions { + OpenOptions {} + } + + pub fn read(&mut self, _read: bool) {} + pub fn write(&mut self, _write: bool) {} + pub fn append(&mut self, _append: bool) {} + pub fn truncate(&mut self, _truncate: bool) {} + pub fn create(&mut self, _create: bool) {} + pub fn create_new(&mut self, _create_new: bool) {} +} + +impl File { + pub fn open(_path: &Path, _opts: &OpenOptions) -> io::Result<File> { + unsupported() + } + + pub fn file_attr(&self) -> io::Result<FileAttr> { + match self.0 {} + } + + pub fn fsync(&self) -> io::Result<()> { + match self.0 {} + } + + pub fn datasync(&self) -> io::Result<()> { + match self.0 {} + } + + pub fn truncate(&self, _size: u64) -> io::Result<()> { + match self.0 {} + } + + pub fn read(&self, _buf: &mut [u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn read_vectored(&self, _bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + match self.0 {} + } + + pub fn is_read_vectored(&self) -> bool { + match self.0 {} + } + + pub fn write(&self, _buf: &[u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn write_vectored(&self, _bufs: &[IoSlice<'_>]) -> io::Result<usize> { + match self.0 {} + } + + pub fn is_write_vectored(&self) -> bool { + match self.0 {} + } + + pub fn flush(&self) -> io::Result<()> { + match self.0 {} + } + + pub fn seek(&self, _pos: SeekFrom) -> io::Result<u64> { + match self.0 {} + } + + pub fn duplicate(&self) -> io::Result<File> { + match self.0 {} + } + + pub fn set_permissions(&self, _perm: FilePermissions) -> io::Result<()> { + match self.0 {} + } + + pub fn diverge(&self) -> ! { + match self.0 {} + } +} + +impl DirBuilder { + pub fn new() -> DirBuilder { + DirBuilder {} + } + + pub fn mkdir(&self, _p: &Path) -> io::Result<()> { + unsupported() + } +} + +impl fmt::Debug for File { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +pub fn readdir(_p: &Path) -> io::Result<ReadDir> { + unsupported() +} + +pub fn unlink(_p: &Path) -> io::Result<()> { + unsupported() +} + +pub fn rename(_old: &Path, _new: &Path) -> io::Result<()> { + unsupported() +} + +pub fn set_perm(_p: &Path, perm: FilePermissions) -> io::Result<()> { + match perm.0 {} +} + +pub fn rmdir(_p: &Path) -> io::Result<()> { + unsupported() +} + +pub fn remove_dir_all(_path: &Path) -> io::Result<()> { + unsupported() +} + +pub fn readlink(_p: &Path) -> io::Result<PathBuf> { + unsupported() +} + +pub fn symlink(_src: &Path, _dst: &Path) -> io::Result<()> { + unsupported() +} + +pub fn link(_src: &Path, _dst: &Path) -> io::Result<()> { + unsupported() +} + +pub fn stat(_p: &Path) -> io::Result<FileAttr> { + unsupported() +} + +pub fn lstat(_p: &Path) -> io::Result<FileAttr> { + unsupported() +} + +pub fn canonicalize(_p: &Path) -> io::Result<PathBuf> { + unsupported() +} + +pub fn copy(_from: &Path, _to: &Path) -> io::Result<u64> { + unsupported() +} diff --git a/library/std/src/sys/sgx/io.rs b/library/std/src/sys/sgx/io.rs new file mode 100644 index 00000000000..d5f475b4310 --- /dev/null +++ b/library/std/src/sys/sgx/io.rs @@ -0,0 +1,47 @@ +use crate::mem; + +#[derive(Copy, Clone)] +pub struct IoSlice<'a>(&'a [u8]); + +impl<'a> IoSlice<'a> { + #[inline] + pub fn new(buf: &'a [u8]) -> IoSlice<'a> { + IoSlice(buf) + } + + #[inline] + pub fn advance(&mut self, n: usize) { + self.0 = &self.0[n..] + } + + #[inline] + pub fn as_slice(&self) -> &[u8] { + self.0 + } +} + +pub struct IoSliceMut<'a>(&'a mut [u8]); + +impl<'a> IoSliceMut<'a> { + #[inline] + pub fn new(buf: &'a mut [u8]) -> IoSliceMut<'a> { + IoSliceMut(buf) + } + + #[inline] + pub fn advance(&mut self, n: usize) { + let slice = mem::replace(&mut self.0, &mut []); + let (_, remaining) = slice.split_at_mut(n); + self.0 = remaining; + } + + #[inline] + pub fn as_slice(&self) -> &[u8] { + self.0 + } + + #[inline] + pub fn as_mut_slice(&mut self) -> &mut [u8] { + self.0 + } +} diff --git a/library/std/src/sys/sgx/memchr.rs b/library/std/src/sys/sgx/memchr.rs new file mode 100644 index 00000000000..9967482197e --- /dev/null +++ b/library/std/src/sys/sgx/memchr.rs @@ -0,0 +1 @@ +pub use core::slice::memchr::{memchr, memrchr}; diff --git a/library/std/src/sys/sgx/mod.rs b/library/std/src/sys/sgx/mod.rs new file mode 100644 index 00000000000..1d32eb25424 --- /dev/null +++ b/library/std/src/sys/sgx/mod.rs @@ -0,0 +1,162 @@ +//! System bindings for the Fortanix SGX platform +//! +//! This module contains the facade (aka platform-specific) implementations of +//! OS level functionality for Fortanix SGX. + +use crate::io::ErrorKind; +use crate::os::raw::c_char; +use crate::sync::atomic::{AtomicBool, Ordering}; + +pub mod abi; +mod waitqueue; + +pub mod alloc; +pub mod args; +pub mod cmath; +pub mod condvar; +pub mod env; +pub mod ext; +pub mod fd; +pub mod fs; +pub mod io; +pub mod memchr; +pub mod mutex; +pub mod net; +pub mod os; +pub mod path; +pub mod pipe; +pub mod process; +pub mod rwlock; +pub mod stack_overflow; +pub mod stdio; +pub mod thread; +pub mod thread_local_key; +pub mod time; + +pub use crate::sys_common::os_str_bytes as os_str; + +#[cfg(not(test))] +pub fn init() {} + +/// This function is used to implement functionality that simply doesn't exist. +/// Programs relying on this functionality will need to deal with the error. +pub fn unsupported<T>() -> crate::io::Result<T> { + Err(unsupported_err()) +} + +pub fn unsupported_err() -> crate::io::Error { + crate::io::Error::new(ErrorKind::Other, "operation not supported on SGX yet") +} + +/// This function is used to implement various functions that doesn't exist, +/// but the lack of which might not be reason for error. If no error is +/// returned, the program might very well be able to function normally. This is +/// what happens when `SGX_INEFFECTIVE_ERROR` is set to `true`. If it is +/// `false`, the behavior is the same as `unsupported`. +pub fn sgx_ineffective<T>(v: T) -> crate::io::Result<T> { + static SGX_INEFFECTIVE_ERROR: AtomicBool = AtomicBool::new(false); + if SGX_INEFFECTIVE_ERROR.load(Ordering::Relaxed) { + Err(crate::io::Error::new( + ErrorKind::Other, + "operation can't be trusted to have any effect on SGX", + )) + } else { + Ok(v) + } +} + +pub fn decode_error_kind(code: i32) -> ErrorKind { + use fortanix_sgx_abi::Error; + + // FIXME: not sure how to make sure all variants of Error are covered + if code == Error::NotFound as _ { + ErrorKind::NotFound + } else if code == Error::PermissionDenied as _ { + ErrorKind::PermissionDenied + } else if code == Error::ConnectionRefused as _ { + ErrorKind::ConnectionRefused + } else if code == Error::ConnectionReset as _ { + ErrorKind::ConnectionReset + } else if code == Error::ConnectionAborted as _ { + ErrorKind::ConnectionAborted + } else if code == Error::NotConnected as _ { + ErrorKind::NotConnected + } else if code == Error::AddrInUse as _ { + ErrorKind::AddrInUse + } else if code == Error::AddrNotAvailable as _ { + ErrorKind::AddrNotAvailable + } else if code == Error::BrokenPipe as _ { + ErrorKind::BrokenPipe + } else if code == Error::AlreadyExists as _ { + ErrorKind::AlreadyExists + } else if code == Error::WouldBlock as _ { + ErrorKind::WouldBlock + } else if code == Error::InvalidInput as _ { + ErrorKind::InvalidInput + } else if code == Error::InvalidData as _ { + ErrorKind::InvalidData + } else if code == Error::TimedOut as _ { + ErrorKind::TimedOut + } else if code == Error::WriteZero as _ { + ErrorKind::WriteZero + } else if code == Error::Interrupted as _ { + ErrorKind::Interrupted + } else if code == Error::Other as _ { + ErrorKind::Other + } else if code == Error::UnexpectedEof as _ { + ErrorKind::UnexpectedEof + } else { + ErrorKind::Other + } +} + +// This enum is used as the storage for a bunch of types which can't actually +// exist. +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +pub enum Void {} + +pub unsafe fn strlen(mut s: *const c_char) -> usize { + let mut n = 0; + while *s != 0 { + n += 1; + s = s.offset(1); + } + return n; +} + +pub fn abort_internal() -> ! { + abi::usercalls::exit(true) +} + +// This function is needed by the panic runtime. The symbol is named in +// pre-link args for the target specification, so keep that in sync. +#[cfg(not(test))] +#[no_mangle] +// NB. used by both libunwind and libpanic_abort +pub extern "C" fn __rust_abort() { + abort_internal(); +} + +pub mod rand { + pub fn rdrand64() -> u64 { + unsafe { + let mut ret: u64 = 0; + for _ in 0..10 { + if crate::arch::x86_64::_rdrand64_step(&mut ret) == 1 { + return ret; + } + } + rtabort!("Failed to obtain random data"); + } + } +} + +pub fn hashmap_random_keys() -> (u64, u64) { + (self::rand::rdrand64(), self::rand::rdrand64()) +} + +pub use crate::sys_common::{AsInner, FromInner, IntoInner}; + +pub trait TryIntoInner<Inner>: Sized { + fn try_into_inner(self) -> Result<Inner, Self>; +} diff --git a/library/std/src/sys/sgx/mutex.rs b/library/std/src/sys/sgx/mutex.rs new file mode 100644 index 00000000000..4911c2f5387 --- /dev/null +++ b/library/std/src/sys/sgx/mutex.rs @@ -0,0 +1,140 @@ +use fortanix_sgx_abi::Tcs; + +use super::abi::thread; + +use super::waitqueue::{try_lock_or_false, NotifiedTcs, SpinMutex, WaitQueue, WaitVariable}; + +pub struct Mutex { + inner: SpinMutex<WaitVariable<bool>>, +} + +// Implementation according to “Operating Systems: Three Easy Pieces”, chapter 28 +impl Mutex { + pub const fn new() -> Mutex { + Mutex { inner: SpinMutex::new(WaitVariable::new(false)) } + } + + #[inline] + pub unsafe fn init(&mut self) {} + + #[inline] + pub unsafe fn lock(&self) { + let mut guard = self.inner.lock(); + if *guard.lock_var() { + // Another thread has the lock, wait + WaitQueue::wait(guard, || {}) + // Another thread has passed the lock to us + } else { + // We are just now obtaining the lock + *guard.lock_var_mut() = true; + } + } + + #[inline] + pub unsafe fn unlock(&self) { + let guard = self.inner.lock(); + if let Err(mut guard) = WaitQueue::notify_one(guard) { + // No other waiters, unlock + *guard.lock_var_mut() = false; + } else { + // There was a thread waiting, just pass the lock + } + } + + #[inline] + pub unsafe fn try_lock(&self) -> bool { + let mut guard = try_lock_or_false!(self.inner); + if *guard.lock_var() { + // Another thread has the lock + false + } else { + // We are just now obtaining the lock + *guard.lock_var_mut() = true; + true + } + } + + #[inline] + pub unsafe fn destroy(&self) {} +} + +struct ReentrantLock { + owner: Option<Tcs>, + count: usize, +} + +pub struct ReentrantMutex { + inner: SpinMutex<WaitVariable<ReentrantLock>>, +} + +impl ReentrantMutex { + pub const fn uninitialized() -> ReentrantMutex { + ReentrantMutex { + inner: SpinMutex::new(WaitVariable::new(ReentrantLock { owner: None, count: 0 })), + } + } + + #[inline] + pub unsafe fn init(&self) {} + + #[inline] + pub unsafe fn lock(&self) { + let mut guard = self.inner.lock(); + match guard.lock_var().owner { + Some(tcs) if tcs != thread::current() => { + // Another thread has the lock, wait + WaitQueue::wait(guard, || {}); + // Another thread has passed the lock to us + } + _ => { + // We are just now obtaining the lock + guard.lock_var_mut().owner = Some(thread::current()); + guard.lock_var_mut().count += 1; + } + } + } + + #[inline] + pub unsafe fn unlock(&self) { + let mut guard = self.inner.lock(); + if guard.lock_var().count > 1 { + guard.lock_var_mut().count -= 1; + } else { + match WaitQueue::notify_one(guard) { + Err(mut guard) => { + // No other waiters, unlock + guard.lock_var_mut().count = 0; + guard.lock_var_mut().owner = None; + } + Ok(mut guard) => { + // There was a thread waiting, just pass the lock + if let NotifiedTcs::Single(tcs) = guard.notified_tcs() { + guard.lock_var_mut().owner = Some(tcs) + } else { + unreachable!() // called notify_one + } + } + } + } + } + + #[inline] + pub unsafe fn try_lock(&self) -> bool { + let mut guard = try_lock_or_false!(self.inner); + match guard.lock_var().owner { + Some(tcs) if tcs != thread::current() => { + // Another thread has the lock + false + } + _ => { + // We are just now obtaining the lock + guard.lock_var_mut().owner = Some(thread::current()); + guard.lock_var_mut().count += 1; + true + } + } + } + + #[inline] + pub unsafe fn destroy(&self) {} +} diff --git a/library/std/src/sys/sgx/net.rs b/library/std/src/sys/sgx/net.rs new file mode 100644 index 00000000000..666a157b09c --- /dev/null +++ b/library/std/src/sys/sgx/net.rs @@ -0,0 +1,536 @@ +use crate::convert::TryFrom; +use crate::error; +use crate::fmt; +use crate::io::{self, IoSlice, IoSliceMut}; +use crate::net::{Ipv4Addr, Ipv6Addr, Shutdown, SocketAddr, ToSocketAddrs}; +use crate::sync::Arc; +use crate::sys::fd::FileDesc; +use crate::sys::{sgx_ineffective, unsupported, AsInner, FromInner, IntoInner, TryIntoInner, Void}; +use crate::time::Duration; + +use super::abi::usercalls; + +const DEFAULT_FAKE_TTL: u32 = 64; + +#[derive(Debug, Clone)] +pub struct Socket { + inner: Arc<FileDesc>, + local_addr: Option<String>, +} + +impl Socket { + fn new(fd: usercalls::raw::Fd, local_addr: String) -> Socket { + Socket { inner: Arc::new(FileDesc::new(fd)), local_addr: Some(local_addr) } + } +} + +impl AsInner<FileDesc> for Socket { + fn as_inner(&self) -> &FileDesc { + &self.inner + } +} + +impl TryIntoInner<FileDesc> for Socket { + fn try_into_inner(self) -> Result<FileDesc, Socket> { + let Socket { inner, local_addr } = self; + Arc::try_unwrap(inner).map_err(|inner| Socket { inner, local_addr }) + } +} + +impl FromInner<FileDesc> for Socket { + fn from_inner(inner: FileDesc) -> Socket { + Socket { inner: Arc::new(inner), local_addr: None } + } +} + +#[derive(Clone)] +pub struct TcpStream { + inner: Socket, + peer_addr: Option<String>, +} + +impl fmt::Debug for TcpStream { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut res = f.debug_struct("TcpStream"); + + if let Some(ref addr) = self.inner.local_addr { + res.field("addr", addr); + } + + if let Some(ref peer) = self.peer_addr { + res.field("peer", peer); + } + + res.field("fd", &self.inner.inner.as_inner()).finish() + } +} + +fn io_err_to_addr(result: io::Result<&SocketAddr>) -> io::Result<String> { + match result { + Ok(saddr) => Ok(saddr.to_string()), + // need to downcast twice because io::Error::into_inner doesn't return the original + // value if the conversion fails + Err(e) => { + if e.get_ref().and_then(|e| e.downcast_ref::<NonIpSockAddr>()).is_some() { + Ok(e.into_inner().unwrap().downcast::<NonIpSockAddr>().unwrap().host) + } else { + Err(e) + } + } + } +} + +fn addr_to_sockaddr(addr: &Option<String>) -> io::Result<SocketAddr> { + addr.as_ref() + .ok_or(io::ErrorKind::AddrNotAvailable)? + .to_socket_addrs() + // unwrap OK: if an iterator is returned, we're guaranteed to get exactly one entry + .map(|mut it| it.next().unwrap()) +} + +impl TcpStream { + pub fn connect(addr: io::Result<&SocketAddr>) -> io::Result<TcpStream> { + let addr = io_err_to_addr(addr)?; + let (fd, local_addr, peer_addr) = usercalls::connect_stream(&addr)?; + Ok(TcpStream { inner: Socket::new(fd, local_addr), peer_addr: Some(peer_addr) }) + } + + pub fn connect_timeout(addr: &SocketAddr, dur: Duration) -> io::Result<TcpStream> { + if dur == Duration::default() { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "cannot set a 0 duration timeout", + )); + } + Self::connect(Ok(addr)) // FIXME: ignoring timeout + } + + pub fn set_read_timeout(&self, dur: Option<Duration>) -> io::Result<()> { + match dur { + Some(dur) if dur == Duration::default() => { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "cannot set a 0 duration timeout", + )); + } + _ => sgx_ineffective(()), + } + } + + pub fn set_write_timeout(&self, dur: Option<Duration>) -> io::Result<()> { + match dur { + Some(dur) if dur == Duration::default() => { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "cannot set a 0 duration timeout", + )); + } + _ => sgx_ineffective(()), + } + } + + pub fn read_timeout(&self) -> io::Result<Option<Duration>> { + sgx_ineffective(None) + } + + pub fn write_timeout(&self) -> io::Result<Option<Duration>> { + sgx_ineffective(None) + } + + pub fn peek(&self, _: &mut [u8]) -> io::Result<usize> { + Ok(0) + } + + pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> { + self.inner.inner.read(buf) + } + + pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + self.inner.inner.read_vectored(bufs) + } + + #[inline] + pub fn is_read_vectored(&self) -> bool { + self.inner.inner.is_read_vectored() + } + + pub fn write(&self, buf: &[u8]) -> io::Result<usize> { + self.inner.inner.write(buf) + } + + pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + self.inner.inner.write_vectored(bufs) + } + + #[inline] + pub fn is_write_vectored(&self) -> bool { + self.inner.inner.is_write_vectored() + } + + pub fn peer_addr(&self) -> io::Result<SocketAddr> { + addr_to_sockaddr(&self.peer_addr) + } + + pub fn socket_addr(&self) -> io::Result<SocketAddr> { + addr_to_sockaddr(&self.inner.local_addr) + } + + pub fn shutdown(&self, _: Shutdown) -> io::Result<()> { + sgx_ineffective(()) + } + + pub fn duplicate(&self) -> io::Result<TcpStream> { + Ok(self.clone()) + } + + pub fn set_nodelay(&self, _: bool) -> io::Result<()> { + sgx_ineffective(()) + } + + pub fn nodelay(&self) -> io::Result<bool> { + sgx_ineffective(false) + } + + pub fn set_ttl(&self, _: u32) -> io::Result<()> { + sgx_ineffective(()) + } + + pub fn ttl(&self) -> io::Result<u32> { + sgx_ineffective(DEFAULT_FAKE_TTL) + } + + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + Ok(None) + } + + pub fn set_nonblocking(&self, _: bool) -> io::Result<()> { + sgx_ineffective(()) + } +} + +impl AsInner<Socket> for TcpStream { + fn as_inner(&self) -> &Socket { + &self.inner + } +} + +// `Inner` includes `peer_addr` so that a `TcpStream` maybe correctly +// reconstructed if `Socket::try_into_inner` fails. +impl IntoInner<(Socket, Option<String>)> for TcpStream { + fn into_inner(self) -> (Socket, Option<String>) { + (self.inner, self.peer_addr) + } +} + +impl FromInner<(Socket, Option<String>)> for TcpStream { + fn from_inner((inner, peer_addr): (Socket, Option<String>)) -> TcpStream { + TcpStream { inner, peer_addr } + } +} + +#[derive(Clone)] +pub struct TcpListener { + inner: Socket, +} + +impl fmt::Debug for TcpListener { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut res = f.debug_struct("TcpListener"); + + if let Some(ref addr) = self.inner.local_addr { + res.field("addr", addr); + } + + res.field("fd", &self.inner.inner.as_inner()).finish() + } +} + +impl TcpListener { + pub fn bind(addr: io::Result<&SocketAddr>) -> io::Result<TcpListener> { + let addr = io_err_to_addr(addr)?; + let (fd, local_addr) = usercalls::bind_stream(&addr)?; + Ok(TcpListener { inner: Socket::new(fd, local_addr) }) + } + + pub fn socket_addr(&self) -> io::Result<SocketAddr> { + addr_to_sockaddr(&self.inner.local_addr) + } + + pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> { + let (fd, local_addr, peer_addr) = usercalls::accept_stream(self.inner.inner.raw())?; + let peer_addr = Some(peer_addr); + let ret_peer = addr_to_sockaddr(&peer_addr).unwrap_or_else(|_| ([0; 4], 0).into()); + Ok((TcpStream { inner: Socket::new(fd, local_addr), peer_addr }, ret_peer)) + } + + pub fn duplicate(&self) -> io::Result<TcpListener> { + Ok(self.clone()) + } + + pub fn set_ttl(&self, _: u32) -> io::Result<()> { + sgx_ineffective(()) + } + + pub fn ttl(&self) -> io::Result<u32> { + sgx_ineffective(DEFAULT_FAKE_TTL) + } + + pub fn set_only_v6(&self, _: bool) -> io::Result<()> { + sgx_ineffective(()) + } + + pub fn only_v6(&self) -> io::Result<bool> { + sgx_ineffective(false) + } + + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + Ok(None) + } + + pub fn set_nonblocking(&self, _: bool) -> io::Result<()> { + sgx_ineffective(()) + } +} + +impl AsInner<Socket> for TcpListener { + fn as_inner(&self) -> &Socket { + &self.inner + } +} + +impl IntoInner<Socket> for TcpListener { + fn into_inner(self) -> Socket { + self.inner + } +} + +impl FromInner<Socket> for TcpListener { + fn from_inner(inner: Socket) -> TcpListener { + TcpListener { inner } + } +} + +pub struct UdpSocket(Void); + +impl UdpSocket { + pub fn bind(_: io::Result<&SocketAddr>) -> io::Result<UdpSocket> { + unsupported() + } + + pub fn peer_addr(&self) -> io::Result<SocketAddr> { + match self.0 {} + } + + pub fn socket_addr(&self) -> io::Result<SocketAddr> { + match self.0 {} + } + + pub fn recv_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + match self.0 {} + } + + pub fn peek_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + match self.0 {} + } + + pub fn send_to(&self, _: &[u8], _: &SocketAddr) -> io::Result<usize> { + match self.0 {} + } + + pub fn duplicate(&self) -> io::Result<UdpSocket> { + match self.0 {} + } + + pub fn set_read_timeout(&self, _: Option<Duration>) -> io::Result<()> { + match self.0 {} + } + + pub fn set_write_timeout(&self, _: Option<Duration>) -> io::Result<()> { + match self.0 {} + } + + pub fn read_timeout(&self) -> io::Result<Option<Duration>> { + match self.0 {} + } + + pub fn write_timeout(&self) -> io::Result<Option<Duration>> { + match self.0 {} + } + + pub fn set_broadcast(&self, _: bool) -> io::Result<()> { + match self.0 {} + } + + pub fn broadcast(&self) -> io::Result<bool> { + match self.0 {} + } + + pub fn set_multicast_loop_v4(&self, _: bool) -> io::Result<()> { + match self.0 {} + } + + pub fn multicast_loop_v4(&self) -> io::Result<bool> { + match self.0 {} + } + + pub fn set_multicast_ttl_v4(&self, _: u32) -> io::Result<()> { + match self.0 {} + } + + pub fn multicast_ttl_v4(&self) -> io::Result<u32> { + match self.0 {} + } + + pub fn set_multicast_loop_v6(&self, _: bool) -> io::Result<()> { + match self.0 {} + } + + pub fn multicast_loop_v6(&self) -> io::Result<bool> { + match self.0 {} + } + + pub fn join_multicast_v4(&self, _: &Ipv4Addr, _: &Ipv4Addr) -> io::Result<()> { + match self.0 {} + } + + pub fn join_multicast_v6(&self, _: &Ipv6Addr, _: u32) -> io::Result<()> { + match self.0 {} + } + + pub fn leave_multicast_v4(&self, _: &Ipv4Addr, _: &Ipv4Addr) -> io::Result<()> { + match self.0 {} + } + + pub fn leave_multicast_v6(&self, _: &Ipv6Addr, _: u32) -> io::Result<()> { + match self.0 {} + } + + pub fn set_ttl(&self, _: u32) -> io::Result<()> { + match self.0 {} + } + + pub fn ttl(&self) -> io::Result<u32> { + match self.0 {} + } + + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + match self.0 {} + } + + pub fn set_nonblocking(&self, _: bool) -> io::Result<()> { + match self.0 {} + } + + pub fn recv(&self, _: &mut [u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn peek(&self, _: &mut [u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn send(&self, _: &[u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn connect(&self, _: io::Result<&SocketAddr>) -> io::Result<()> { + match self.0 {} + } +} + +impl fmt::Debug for UdpSocket { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +#[derive(Debug)] +pub struct NonIpSockAddr { + host: String, +} + +impl error::Error for NonIpSockAddr { + #[allow(deprecated)] + fn description(&self) -> &str { + "Failed to convert address to SocketAddr" + } +} + +impl fmt::Display for NonIpSockAddr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Failed to convert address to SocketAddr: {}", self.host) + } +} + +pub struct LookupHost(Void); + +impl LookupHost { + fn new(host: String) -> io::Result<LookupHost> { + Err(io::Error::new(io::ErrorKind::Other, NonIpSockAddr { host })) + } + + pub fn port(&self) -> u16 { + match self.0 {} + } +} + +impl Iterator for LookupHost { + type Item = SocketAddr; + fn next(&mut self) -> Option<SocketAddr> { + match self.0 {} + } +} + +impl TryFrom<&str> for LookupHost { + type Error = io::Error; + + fn try_from(v: &str) -> io::Result<LookupHost> { + LookupHost::new(v.to_owned()) + } +} + +impl<'a> TryFrom<(&'a str, u16)> for LookupHost { + type Error = io::Error; + + fn try_from((host, port): (&'a str, u16)) -> io::Result<LookupHost> { + LookupHost::new(format!("{}:{}", host, port)) + } +} + +#[allow(bad_style)] +pub mod netc { + pub const AF_INET: u8 = 0; + pub const AF_INET6: u8 = 1; + pub type sa_family_t = u8; + + #[derive(Copy, Clone)] + pub struct in_addr { + pub s_addr: u32, + } + + #[derive(Copy, Clone)] + pub struct sockaddr_in { + pub sin_family: sa_family_t, + pub sin_port: u16, + pub sin_addr: in_addr, + } + + #[derive(Copy, Clone)] + pub struct in6_addr { + pub s6_addr: [u8; 16], + } + + #[derive(Copy, Clone)] + pub struct sockaddr_in6 { + pub sin6_family: sa_family_t, + pub sin6_port: u16, + pub sin6_addr: in6_addr, + pub sin6_flowinfo: u32, + pub sin6_scope_id: u32, + } + + #[derive(Copy, Clone)] + pub struct sockaddr {} + + pub type socklen_t = usize; +} diff --git a/library/std/src/sys/sgx/os.rs b/library/std/src/sys/sgx/os.rs new file mode 100644 index 00000000000..56fc84b4a3f --- /dev/null +++ b/library/std/src/sys/sgx/os.rs @@ -0,0 +1,139 @@ +use fortanix_sgx_abi::{Error, RESULT_SUCCESS}; + +use crate::collections::HashMap; +use crate::error::Error as StdError; +use crate::ffi::{OsStr, OsString}; +use crate::fmt; +use crate::io; +use crate::path::{self, PathBuf}; +use crate::str; +use crate::sync::atomic::{AtomicUsize, Ordering}; +use crate::sync::Mutex; +use crate::sync::Once; +use crate::sys::{decode_error_kind, sgx_ineffective, unsupported, Void}; +use crate::vec; + +pub fn errno() -> i32 { + RESULT_SUCCESS +} + +pub fn error_string(errno: i32) -> String { + if errno == RESULT_SUCCESS { + "operation successful".into() + } else if ((Error::UserRangeStart as _)..=(Error::UserRangeEnd as _)).contains(&errno) { + format!("user-specified error {:08x}", errno) + } else { + decode_error_kind(errno).as_str().into() + } +} + +pub fn getcwd() -> io::Result<PathBuf> { + unsupported() +} + +pub fn chdir(_: &path::Path) -> io::Result<()> { + sgx_ineffective(()) +} + +pub struct SplitPaths<'a>(&'a Void); + +pub fn split_paths(_unparsed: &OsStr) -> SplitPaths<'_> { + panic!("unsupported") +} + +impl<'a> Iterator for SplitPaths<'a> { + type Item = PathBuf; + fn next(&mut self) -> Option<PathBuf> { + match *self.0 {} + } +} + +#[derive(Debug)] +pub struct JoinPathsError; + +pub fn join_paths<I, T>(_paths: I) -> Result<OsString, JoinPathsError> +where + I: Iterator<Item = T>, + T: AsRef<OsStr>, +{ + Err(JoinPathsError) +} + +impl fmt::Display for JoinPathsError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + "not supported in SGX yet".fmt(f) + } +} + +impl StdError for JoinPathsError { + #[allow(deprecated)] + fn description(&self) -> &str { + "not supported in SGX yet" + } +} + +pub fn current_exe() -> io::Result<PathBuf> { + unsupported() +} + +#[cfg_attr(test, linkage = "available_externally")] +#[export_name = "_ZN16__rust_internals3std3sys3sgx2os3ENVE"] +static ENV: AtomicUsize = AtomicUsize::new(0); +#[cfg_attr(test, linkage = "available_externally")] +#[export_name = "_ZN16__rust_internals3std3sys3sgx2os8ENV_INITE"] +static ENV_INIT: Once = Once::new(); +type EnvStore = Mutex<HashMap<OsString, OsString>>; + +fn get_env_store() -> Option<&'static EnvStore> { + unsafe { (ENV.load(Ordering::Relaxed) as *const EnvStore).as_ref() } +} + +fn create_env_store() -> &'static EnvStore { + ENV_INIT.call_once(|| { + ENV.store(Box::into_raw(Box::new(EnvStore::default())) as _, Ordering::Relaxed) + }); + unsafe { &*(ENV.load(Ordering::Relaxed) as *const EnvStore) } +} + +pub type Env = vec::IntoIter<(OsString, OsString)>; + +pub fn env() -> Env { + let clone_to_vec = |map: &HashMap<OsString, OsString>| -> Vec<_> { + map.iter().map(|(k, v)| (k.clone(), v.clone())).collect() + }; + + get_env_store().map(|env| clone_to_vec(&env.lock().unwrap())).unwrap_or_default().into_iter() +} + +pub fn getenv(k: &OsStr) -> io::Result<Option<OsString>> { + Ok(get_env_store().and_then(|s| s.lock().unwrap().get(k).cloned())) +} + +pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> { + let (k, v) = (k.to_owned(), v.to_owned()); + create_env_store().lock().unwrap().insert(k, v); + Ok(()) +} + +pub fn unsetenv(k: &OsStr) -> io::Result<()> { + if let Some(env) = get_env_store() { + env.lock().unwrap().remove(k); + } + Ok(()) +} + +pub fn temp_dir() -> PathBuf { + panic!("no filesystem in SGX") +} + +pub fn home_dir() -> Option<PathBuf> { + None +} + +pub fn exit(code: i32) -> ! { + super::abi::exit_with_code(code as _) +} + +pub fn getpid() -> u32 { + panic!("no pids in SGX") +} diff --git a/library/std/src/sys/sgx/path.rs b/library/std/src/sys/sgx/path.rs new file mode 100644 index 00000000000..06c9df3ff54 --- /dev/null +++ b/library/std/src/sys/sgx/path.rs @@ -0,0 +1,19 @@ +use crate::ffi::OsStr; +use crate::path::Prefix; + +#[inline] +pub fn is_sep_byte(b: u8) -> bool { + b == b'/' +} + +#[inline] +pub fn is_verbatim_sep(b: u8) -> bool { + b == b'/' +} + +pub fn parse_prefix(_: &OsStr) -> Option<Prefix<'_>> { + None +} + +pub const MAIN_SEP_STR: &'static str = "/"; +pub const MAIN_SEP: char = '/'; diff --git a/library/std/src/sys/sgx/pipe.rs b/library/std/src/sys/sgx/pipe.rs new file mode 100644 index 00000000000..10d0925823e --- /dev/null +++ b/library/std/src/sys/sgx/pipe.rs @@ -0,0 +1,38 @@ +use crate::io::{self, IoSlice, IoSliceMut}; +use crate::sys::Void; + +pub struct AnonPipe(Void); + +impl AnonPipe { + pub fn read(&self, _buf: &mut [u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn read_vectored(&self, _bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + match self.0 {} + } + + pub fn is_read_vectored(&self) -> bool { + match self.0 {} + } + + pub fn write(&self, _buf: &[u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn write_vectored(&self, _bufs: &[IoSlice<'_>]) -> io::Result<usize> { + match self.0 {} + } + + pub fn is_write_vectored(&self) -> bool { + match self.0 {} + } + + pub fn diverge(&self) -> ! { + match self.0 {} + } +} + +pub fn read2(p1: AnonPipe, _v1: &mut Vec<u8>, _p2: AnonPipe, _v2: &mut Vec<u8>) -> io::Result<()> { + match p1.0 {} +} diff --git a/library/std/src/sys/sgx/process.rs b/library/std/src/sys/sgx/process.rs new file mode 100644 index 00000000000..4702e5c5492 --- /dev/null +++ b/library/std/src/sys/sgx/process.rs @@ -0,0 +1,149 @@ +use crate::ffi::OsStr; +use crate::fmt; +use crate::io; +use crate::sys::fs::File; +use crate::sys::pipe::AnonPipe; +use crate::sys::{unsupported, Void}; +use crate::sys_common::process::CommandEnv; + +pub use crate::ffi::OsString as EnvKey; + +//////////////////////////////////////////////////////////////////////////////// +// Command +//////////////////////////////////////////////////////////////////////////////// + +pub struct Command { + env: CommandEnv, +} + +// passed back to std::process with the pipes connected to the child, if any +// were requested +pub struct StdioPipes { + pub stdin: Option<AnonPipe>, + pub stdout: Option<AnonPipe>, + pub stderr: Option<AnonPipe>, +} + +pub enum Stdio { + Inherit, + Null, + MakePipe, +} + +impl Command { + pub fn new(_program: &OsStr) -> Command { + Command { env: Default::default() } + } + + pub fn arg(&mut self, _arg: &OsStr) {} + + pub fn env_mut(&mut self) -> &mut CommandEnv { + &mut self.env + } + + pub fn cwd(&mut self, _dir: &OsStr) {} + + pub fn stdin(&mut self, _stdin: Stdio) {} + + pub fn stdout(&mut self, _stdout: Stdio) {} + + pub fn stderr(&mut self, _stderr: Stdio) {} + + pub fn spawn( + &mut self, + _default: Stdio, + _needs_stdin: bool, + ) -> io::Result<(Process, StdioPipes)> { + unsupported() + } +} + +impl From<AnonPipe> for Stdio { + fn from(pipe: AnonPipe) -> Stdio { + pipe.diverge() + } +} + +impl From<File> for Stdio { + fn from(file: File) -> Stdio { + file.diverge() + } +} + +impl fmt::Debug for Command { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + Ok(()) + } +} + +pub struct ExitStatus(Void); + +impl ExitStatus { + pub fn success(&self) -> bool { + match self.0 {} + } + + pub fn code(&self) -> Option<i32> { + match self.0 {} + } +} + +impl Clone for ExitStatus { + fn clone(&self) -> ExitStatus { + match self.0 {} + } +} + +impl Copy for ExitStatus {} + +impl PartialEq for ExitStatus { + fn eq(&self, _other: &ExitStatus) -> bool { + match self.0 {} + } +} + +impl Eq for ExitStatus {} + +impl fmt::Debug for ExitStatus { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +impl fmt::Display for ExitStatus { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +#[derive(PartialEq, Eq, Clone, Copy, Debug)] +pub struct ExitCode(bool); + +impl ExitCode { + pub const SUCCESS: ExitCode = ExitCode(false); + pub const FAILURE: ExitCode = ExitCode(true); + + pub fn as_i32(&self) -> i32 { + self.0 as i32 + } +} + +pub struct Process(Void); + +impl Process { + pub fn id(&self) -> u32 { + match self.0 {} + } + + pub fn kill(&mut self) -> io::Result<()> { + match self.0 {} + } + + pub fn wait(&mut self) -> io::Result<ExitStatus> { + match self.0 {} + } + + pub fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> { + match self.0 {} + } +} diff --git a/library/std/src/sys/sgx/rwlock.rs b/library/std/src/sys/sgx/rwlock.rs new file mode 100644 index 00000000000..722b4f5e0ba --- /dev/null +++ b/library/std/src/sys/sgx/rwlock.rs @@ -0,0 +1,247 @@ +use crate::num::NonZeroUsize; + +use super::waitqueue::{ + try_lock_or_false, NotifiedTcs, SpinMutex, SpinMutexGuard, WaitQueue, WaitVariable, +}; +use crate::mem; + +pub struct RWLock { + readers: SpinMutex<WaitVariable<Option<NonZeroUsize>>>, + writer: SpinMutex<WaitVariable<bool>>, +} + +// Check at compile time that RWLock size matches C definition (see test_c_rwlock_initializer below) +#[allow(dead_code)] +unsafe fn rw_lock_size_assert(r: RWLock) { + mem::transmute::<RWLock, [u8; 144]>(r); +} + +impl RWLock { + pub const fn new() -> RWLock { + RWLock { + readers: SpinMutex::new(WaitVariable::new(None)), + writer: SpinMutex::new(WaitVariable::new(false)), + } + } + + #[inline] + pub unsafe fn read(&self) { + let mut rguard = self.readers.lock(); + let wguard = self.writer.lock(); + if *wguard.lock_var() || !wguard.queue_empty() { + // Another thread has or is waiting for the write lock, wait + drop(wguard); + WaitQueue::wait(rguard, || {}); + // Another thread has passed the lock to us + } else { + // No waiting writers, acquire the read lock + *rguard.lock_var_mut() = + NonZeroUsize::new(rguard.lock_var().map_or(0, |n| n.get()) + 1); + } + } + + #[inline] + pub unsafe fn try_read(&self) -> bool { + let mut rguard = try_lock_or_false!(self.readers); + let wguard = try_lock_or_false!(self.writer); + if *wguard.lock_var() || !wguard.queue_empty() { + // Another thread has or is waiting for the write lock + false + } else { + // No waiting writers, acquire the read lock + *rguard.lock_var_mut() = + NonZeroUsize::new(rguard.lock_var().map_or(0, |n| n.get()) + 1); + true + } + } + + #[inline] + pub unsafe fn write(&self) { + let rguard = self.readers.lock(); + let mut wguard = self.writer.lock(); + if *wguard.lock_var() || rguard.lock_var().is_some() { + // Another thread has the lock, wait + drop(rguard); + WaitQueue::wait(wguard, || {}); + // Another thread has passed the lock to us + } else { + // We are just now obtaining the lock + *wguard.lock_var_mut() = true; + } + } + + #[inline] + pub unsafe fn try_write(&self) -> bool { + let rguard = try_lock_or_false!(self.readers); + let mut wguard = try_lock_or_false!(self.writer); + if *wguard.lock_var() || rguard.lock_var().is_some() { + // Another thread has the lock + false + } else { + // We are just now obtaining the lock + *wguard.lock_var_mut() = true; + true + } + } + + #[inline] + unsafe fn __read_unlock( + &self, + mut rguard: SpinMutexGuard<'_, WaitVariable<Option<NonZeroUsize>>>, + wguard: SpinMutexGuard<'_, WaitVariable<bool>>, + ) { + *rguard.lock_var_mut() = NonZeroUsize::new(rguard.lock_var().unwrap().get() - 1); + if rguard.lock_var().is_some() { + // There are other active readers + } else { + if let Ok(mut wguard) = WaitQueue::notify_one(wguard) { + // A writer was waiting, pass the lock + *wguard.lock_var_mut() = true; + wguard.drop_after(rguard); + } else { + // No writers were waiting, the lock is released + rtassert!(rguard.queue_empty()); + } + } + } + + #[inline] + pub unsafe fn read_unlock(&self) { + let rguard = self.readers.lock(); + let wguard = self.writer.lock(); + self.__read_unlock(rguard, wguard); + } + + #[inline] + unsafe fn __write_unlock( + &self, + rguard: SpinMutexGuard<'_, WaitVariable<Option<NonZeroUsize>>>, + wguard: SpinMutexGuard<'_, WaitVariable<bool>>, + ) { + match WaitQueue::notify_one(wguard) { + Err(mut wguard) => { + // No writers waiting, release the write lock + *wguard.lock_var_mut() = false; + if let Ok(mut rguard) = WaitQueue::notify_all(rguard) { + // One or more readers were waiting, pass the lock to them + if let NotifiedTcs::All { count } = rguard.notified_tcs() { + *rguard.lock_var_mut() = Some(count) + } else { + unreachable!() // called notify_all + } + rguard.drop_after(wguard); + } else { + // No readers waiting, the lock is released + } + } + Ok(wguard) => { + // There was a thread waiting for write, just pass the lock + wguard.drop_after(rguard); + } + } + } + + #[inline] + pub unsafe fn write_unlock(&self) { + let rguard = self.readers.lock(); + let wguard = self.writer.lock(); + self.__write_unlock(rguard, wguard); + } + + // only used by __rust_rwlock_unlock below + #[inline] + #[cfg_attr(test, allow(dead_code))] + unsafe fn unlock(&self) { + let rguard = self.readers.lock(); + let wguard = self.writer.lock(); + if *wguard.lock_var() == true { + self.__write_unlock(rguard, wguard); + } else { + self.__read_unlock(rguard, wguard); + } + } + + #[inline] + pub unsafe fn destroy(&self) {} +} + +// The following functions are needed by libunwind. These symbols are named +// in pre-link args for the target specification, so keep that in sync. +#[cfg(not(test))] +const EINVAL: i32 = 22; + +#[cfg(not(test))] +#[no_mangle] +pub unsafe extern "C" fn __rust_rwlock_rdlock(p: *mut RWLock) -> i32 { + if p.is_null() { + return EINVAL; + } + (*p).read(); + return 0; +} + +#[cfg(not(test))] +#[no_mangle] +pub unsafe extern "C" fn __rust_rwlock_wrlock(p: *mut RWLock) -> i32 { + if p.is_null() { + return EINVAL; + } + (*p).write(); + return 0; +} +#[cfg(not(test))] +#[no_mangle] +pub unsafe extern "C" fn __rust_rwlock_unlock(p: *mut RWLock) -> i32 { + if p.is_null() { + return EINVAL; + } + (*p).unlock(); + return 0; +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mem::{self, MaybeUninit}; + use core::array::FixedSizeArray; + + // Verify that the bytes of initialized RWLock are the same as in + // libunwind. If they change, `src/UnwindRustSgx.h` in libunwind needs to + // be changed too. + #[test] + fn test_c_rwlock_initializer() { + #[rustfmt::skip] + const RWLOCK_INIT: &[u8] = &[ + /* 0x00 */ 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + /* 0x10 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + /* 0x20 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + /* 0x30 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + /* 0x40 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + /* 0x50 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + /* 0x60 */ 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + /* 0x70 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + /* 0x80 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + ]; + + #[inline(never)] + fn zero_stack() { + test::black_box(MaybeUninit::<[RWLock; 16]>::zeroed()); + } + + #[inline(never)] + unsafe fn rwlock_new(init: &mut MaybeUninit<RWLock>) { + init.write(RWLock::new()); + } + + unsafe { + // try hard to make sure that the padding/unused bytes in RWLock + // get initialized as 0. If the assertion below fails, that might + // just be an issue with the test code and not with the value of + // RWLOCK_INIT. + zero_stack(); + let mut init = MaybeUninit::<RWLock>::zeroed(); + rwlock_new(&mut init); + assert_eq!(mem::transmute::<_, [u8; 144]>(init.assume_init()).as_slice(), RWLOCK_INIT) + }; + } +} diff --git a/library/std/src/sys/sgx/stack_overflow.rs b/library/std/src/sys/sgx/stack_overflow.rs new file mode 100644 index 00000000000..b96652a8330 --- /dev/null +++ b/library/std/src/sys/sgx/stack_overflow.rs @@ -0,0 +1,4 @@ +#[cfg_attr(test, allow(dead_code))] +pub unsafe fn init() {} + +pub unsafe fn cleanup() {} diff --git a/library/std/src/sys/sgx/stdio.rs b/library/std/src/sys/sgx/stdio.rs new file mode 100644 index 00000000000..716c174bd53 --- /dev/null +++ b/library/std/src/sys/sgx/stdio.rs @@ -0,0 +1,88 @@ +use fortanix_sgx_abi as abi; + +use crate::io; +#[cfg(not(test))] +use crate::slice; +#[cfg(not(test))] +use crate::str; +use crate::sys::fd::FileDesc; + +pub struct Stdin(()); +pub struct Stdout(()); +pub struct Stderr(()); + +fn with_std_fd<F: FnOnce(&FileDesc) -> R, R>(fd: abi::Fd, f: F) -> R { + let fd = FileDesc::new(fd); + let ret = f(&fd); + fd.into_raw(); + ret +} + +impl Stdin { + pub fn new() -> io::Result<Stdin> { + Ok(Stdin(())) + } +} + +impl io::Read for Stdin { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + with_std_fd(abi::FD_STDIN, |fd| fd.read(buf)) + } +} + +impl Stdout { + pub fn new() -> io::Result<Stdout> { + Ok(Stdout(())) + } +} + +impl io::Write for Stdout { + fn write(&mut self, buf: &[u8]) -> io::Result<usize> { + with_std_fd(abi::FD_STDOUT, |fd| fd.write(buf)) + } + + fn flush(&mut self) -> io::Result<()> { + with_std_fd(abi::FD_STDOUT, |fd| fd.flush()) + } +} + +impl Stderr { + pub fn new() -> io::Result<Stderr> { + Ok(Stderr(())) + } +} + +impl io::Write for Stderr { + fn write(&mut self, buf: &[u8]) -> io::Result<usize> { + with_std_fd(abi::FD_STDERR, |fd| fd.write(buf)) + } + + fn flush(&mut self) -> io::Result<()> { + with_std_fd(abi::FD_STDERR, |fd| fd.flush()) + } +} + +pub const STDIN_BUF_SIZE: usize = crate::sys_common::io::DEFAULT_BUF_SIZE; + +pub fn is_ebadf(err: &io::Error) -> bool { + // FIXME: Rust normally maps Unix EBADF to `Other` + err.raw_os_error() == Some(abi::Error::BrokenPipe as _) +} + +pub fn panic_output() -> Option<impl io::Write> { + super::abi::panic::SgxPanicOutput::new() +} + +// This function is needed by libunwind. The symbol is named in pre-link args +// for the target specification, so keep that in sync. +#[cfg(not(test))] +#[no_mangle] +pub unsafe extern "C" fn __rust_print_err(m: *mut u8, s: i32) { + if s < 0 { + return; + } + let buf = slice::from_raw_parts(m as *const u8, s as _); + if let Ok(s) = str::from_utf8(&buf[..buf.iter().position(|&b| b == 0).unwrap_or(buf.len())]) { + eprint!("{}", s); + } +} diff --git a/library/std/src/sys/sgx/thread.rs b/library/std/src/sys/sgx/thread.rs new file mode 100644 index 00000000000..5895f70436e --- /dev/null +++ b/library/std/src/sys/sgx/thread.rs @@ -0,0 +1,93 @@ +#![cfg_attr(test, allow(dead_code))] // why is this necessary? +use crate::ffi::CStr; +use crate::io; +use crate::time::Duration; + +use super::abi::usercalls; + +pub struct Thread(task_queue::JoinHandle); + +pub const DEFAULT_MIN_STACK_SIZE: usize = 4096; + +mod task_queue { + use crate::sync::mpsc; + use crate::sync::{Mutex, MutexGuard, Once}; + + pub type JoinHandle = mpsc::Receiver<()>; + + pub(super) struct Task { + p: Box<dyn FnOnce()>, + done: mpsc::Sender<()>, + } + + impl Task { + pub(super) fn new(p: Box<dyn FnOnce()>) -> (Task, JoinHandle) { + let (done, recv) = mpsc::channel(); + (Task { p, done }, recv) + } + + pub(super) fn run(self) { + (self.p)(); + let _ = self.done.send(()); + } + } + + #[cfg_attr(test, linkage = "available_externally")] + #[export_name = "_ZN16__rust_internals3std3sys3sgx6thread15TASK_QUEUE_INITE"] + static TASK_QUEUE_INIT: Once = Once::new(); + #[cfg_attr(test, linkage = "available_externally")] + #[export_name = "_ZN16__rust_internals3std3sys3sgx6thread10TASK_QUEUEE"] + static mut TASK_QUEUE: Option<Mutex<Vec<Task>>> = None; + + pub(super) fn lock() -> MutexGuard<'static, Vec<Task>> { + unsafe { + TASK_QUEUE_INIT.call_once(|| TASK_QUEUE = Some(Default::default())); + TASK_QUEUE.as_ref().unwrap().lock().unwrap() + } + } +} + +impl Thread { + // unsafe: see thread::Builder::spawn_unchecked for safety requirements + pub unsafe fn new(_stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> { + let mut queue_lock = task_queue::lock(); + usercalls::launch_thread()?; + let (task, handle) = task_queue::Task::new(p); + queue_lock.push(task); + Ok(Thread(handle)) + } + + pub(super) fn entry() { + let mut pending_tasks = task_queue::lock(); + let task = rtunwrap!(Some, pending_tasks.pop()); + drop(pending_tasks); // make sure to not hold the task queue lock longer than necessary + task.run() + } + + pub fn yield_now() { + let wait_error = rtunwrap!(Err, usercalls::wait(0, usercalls::raw::WAIT_NO)); + rtassert!(wait_error.kind() == io::ErrorKind::WouldBlock); + } + + pub fn set_name(_name: &CStr) { + // FIXME: could store this pointer in TLS somewhere + } + + pub fn sleep(dur: Duration) { + usercalls::wait_timeout(0, dur, || true); + } + + pub fn join(self) { + let _ = self.0.recv(); + } +} + +pub mod guard { + pub type Guard = !; + pub unsafe fn current() -> Option<Guard> { + None + } + pub unsafe fn init() -> Option<Guard> { + None + } +} diff --git a/library/std/src/sys/sgx/thread_local_key.rs b/library/std/src/sys/sgx/thread_local_key.rs new file mode 100644 index 00000000000..b21784475f0 --- /dev/null +++ b/library/std/src/sys/sgx/thread_local_key.rs @@ -0,0 +1,28 @@ +use super::abi::tls::{Key as AbiKey, Tls}; + +pub type Key = usize; + +#[inline] +pub unsafe fn create(dtor: Option<unsafe extern "C" fn(*mut u8)>) -> Key { + Tls::create(dtor).as_usize() +} + +#[inline] +pub unsafe fn set(key: Key, value: *mut u8) { + Tls::set(AbiKey::from_usize(key), value) +} + +#[inline] +pub unsafe fn get(key: Key) -> *mut u8 { + Tls::get(AbiKey::from_usize(key)) +} + +#[inline] +pub unsafe fn destroy(key: Key) { + Tls::destroy(AbiKey::from_usize(key)) +} + +#[inline] +pub fn requires_synchronized_create() -> bool { + false +} diff --git a/library/std/src/sys/sgx/time.rs b/library/std/src/sys/sgx/time.rs new file mode 100644 index 00000000000..e2f6e6dba69 --- /dev/null +++ b/library/std/src/sys/sgx/time.rs @@ -0,0 +1,54 @@ +use super::abi::usercalls; +use crate::time::Duration; + +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +pub struct Instant(Duration); + +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +pub struct SystemTime(Duration); + +pub const UNIX_EPOCH: SystemTime = SystemTime(Duration::from_secs(0)); + +impl Instant { + pub fn now() -> Instant { + Instant(usercalls::insecure_time()) + } + + pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> { + self.0.checked_sub(other.0) + } + + pub fn checked_add_duration(&self, other: &Duration) -> Option<Instant> { + Some(Instant(self.0.checked_add(*other)?)) + } + + pub fn checked_sub_duration(&self, other: &Duration) -> Option<Instant> { + Some(Instant(self.0.checked_sub(*other)?)) + } + + pub fn actually_monotonic() -> bool { + false + } + + pub const fn zero() -> Instant { + Instant(Duration::from_secs(0)) + } +} + +impl SystemTime { + pub fn now() -> SystemTime { + SystemTime(usercalls::insecure_time()) + } + + pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> { + self.0.checked_sub(other.0).ok_or_else(|| other.0 - self.0) + } + + pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> { + Some(SystemTime(self.0.checked_add(*other)?)) + } + + pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> { + Some(SystemTime(self.0.checked_sub(*other)?)) + } +} diff --git a/library/std/src/sys/sgx/waitqueue.rs b/library/std/src/sys/sgx/waitqueue.rs new file mode 100644 index 00000000000..070afa55f30 --- /dev/null +++ b/library/std/src/sys/sgx/waitqueue.rs @@ -0,0 +1,619 @@ +//! A simple queue implementation for synchronization primitives. +//! +//! This queue is used to implement condition variable and mutexes. +//! +//! Users of this API are expected to use the `WaitVariable<T>` type. Since +//! that type is not `Sync`, it needs to be protected by e.g., a `SpinMutex` to +//! allow shared access. +//! +//! Since userspace may send spurious wake-ups, the wakeup event state is +//! recorded in the enclave. The wakeup event state is protected by a spinlock. +//! The queue and associated wait state are stored in a `WaitVariable`. +use crate::num::NonZeroUsize; +use crate::ops::{Deref, DerefMut}; +use crate::time::Duration; + +use super::abi::thread; +use super::abi::usercalls; +use fortanix_sgx_abi::{Tcs, EV_UNPARK, WAIT_INDEFINITE}; + +pub use self::spin_mutex::{try_lock_or_false, SpinMutex, SpinMutexGuard}; +use self::unsafe_list::{UnsafeList, UnsafeListEntry}; + +/// An queue entry in a `WaitQueue`. +struct WaitEntry { + /// TCS address of the thread that is waiting + tcs: Tcs, + /// Whether this thread has been notified to be awoken + wake: bool, +} + +/// Data stored with a `WaitQueue` alongside it. This ensures accesses to the +/// queue and the data are synchronized, since the type itself is not `Sync`. +/// +/// Consumers of this API should use a synchronization primitive for shared +/// access, such as `SpinMutex`. +#[derive(Default)] +pub struct WaitVariable<T> { + queue: WaitQueue, + lock: T, +} + +impl<T> WaitVariable<T> { + pub const fn new(var: T) -> Self { + WaitVariable { queue: WaitQueue::new(), lock: var } + } + + pub fn queue_empty(&self) -> bool { + self.queue.is_empty() + } + + pub fn lock_var(&self) -> &T { + &self.lock + } + + pub fn lock_var_mut(&mut self) -> &mut T { + &mut self.lock + } +} + +#[derive(Copy, Clone)] +pub enum NotifiedTcs { + Single(Tcs), + All { count: NonZeroUsize }, +} + +/// An RAII guard that will notify a set of target threads as well as unlock +/// a mutex on drop. +pub struct WaitGuard<'a, T: 'a> { + mutex_guard: Option<SpinMutexGuard<'a, WaitVariable<T>>>, + notified_tcs: NotifiedTcs, +} + +/// A queue of threads that are waiting on some synchronization primitive. +/// +/// `UnsafeList` entries are allocated on the waiting thread's stack. This +/// avoids any global locking that might happen in the heap allocator. This is +/// safe because the waiting thread will not return from that stack frame until +/// after it is notified. The notifying thread ensures to clean up any +/// references to the list entries before sending the wakeup event. +pub struct WaitQueue { + // We use an inner Mutex here to protect the data in the face of spurious + // wakeups. + inner: UnsafeList<SpinMutex<WaitEntry>>, +} +unsafe impl Send for WaitQueue {} + +impl Default for WaitQueue { + fn default() -> Self { + Self::new() + } +} + +impl<'a, T> WaitGuard<'a, T> { + /// Returns which TCSes will be notified when this guard drops. + pub fn notified_tcs(&self) -> NotifiedTcs { + self.notified_tcs + } + + /// Drop this `WaitGuard`, after dropping another `guard`. + pub fn drop_after<U>(self, guard: U) { + drop(guard); + drop(self); + } +} + +impl<'a, T> Deref for WaitGuard<'a, T> { + type Target = SpinMutexGuard<'a, WaitVariable<T>>; + + fn deref(&self) -> &Self::Target { + self.mutex_guard.as_ref().unwrap() + } +} + +impl<'a, T> DerefMut for WaitGuard<'a, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + self.mutex_guard.as_mut().unwrap() + } +} + +impl<'a, T> Drop for WaitGuard<'a, T> { + fn drop(&mut self) { + drop(self.mutex_guard.take()); + let target_tcs = match self.notified_tcs { + NotifiedTcs::Single(tcs) => Some(tcs), + NotifiedTcs::All { .. } => None, + }; + rtunwrap!(Ok, usercalls::send(EV_UNPARK, target_tcs)); + } +} + +impl WaitQueue { + pub const fn new() -> Self { + WaitQueue { inner: UnsafeList::new() } + } + + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } + + /// Adds the calling thread to the `WaitVariable`'s wait queue, then wait + /// until a wakeup event. + /// + /// This function does not return until this thread has been awoken. + pub fn wait<T, F: FnOnce()>(mut guard: SpinMutexGuard<'_, WaitVariable<T>>, before_wait: F) { + // very unsafe: check requirements of UnsafeList::push + unsafe { + let mut entry = UnsafeListEntry::new(SpinMutex::new(WaitEntry { + tcs: thread::current(), + wake: false, + })); + let entry = guard.queue.inner.push(&mut entry); + drop(guard); + before_wait(); + while !entry.lock().wake { + // don't panic, this would invalidate `entry` during unwinding + let eventset = rtunwrap!(Ok, usercalls::wait(EV_UNPARK, WAIT_INDEFINITE)); + rtassert!(eventset & EV_UNPARK == EV_UNPARK); + } + } + } + + /// Adds the calling thread to the `WaitVariable`'s wait queue, then wait + /// until a wakeup event or timeout. If event was observed, returns true. + /// If not, it will remove the calling thread from the wait queue. + pub fn wait_timeout<T, F: FnOnce()>( + lock: &SpinMutex<WaitVariable<T>>, + timeout: Duration, + before_wait: F, + ) -> bool { + // very unsafe: check requirements of UnsafeList::push + unsafe { + let mut entry = UnsafeListEntry::new(SpinMutex::new(WaitEntry { + tcs: thread::current(), + wake: false, + })); + let entry_lock = lock.lock().queue.inner.push(&mut entry); + before_wait(); + usercalls::wait_timeout(EV_UNPARK, timeout, || entry_lock.lock().wake); + // acquire the wait queue's lock first to avoid deadlock. + let mut guard = lock.lock(); + let success = entry_lock.lock().wake; + if !success { + // nobody is waking us up, so remove our entry from the wait queue. + guard.queue.inner.remove(&mut entry); + } + success + } + } + + /// Either find the next waiter on the wait queue, or return the mutex + /// guard unchanged. + /// + /// If a waiter is found, a `WaitGuard` is returned which will notify the + /// waiter when it is dropped. + pub fn notify_one<T>( + mut guard: SpinMutexGuard<'_, WaitVariable<T>>, + ) -> Result<WaitGuard<'_, T>, SpinMutexGuard<'_, WaitVariable<T>>> { + unsafe { + if let Some(entry) = guard.queue.inner.pop() { + let mut entry_guard = entry.lock(); + let tcs = entry_guard.tcs; + entry_guard.wake = true; + drop(entry); + Ok(WaitGuard { mutex_guard: Some(guard), notified_tcs: NotifiedTcs::Single(tcs) }) + } else { + Err(guard) + } + } + } + + /// Either find any and all waiters on the wait queue, or return the mutex + /// guard unchanged. + /// + /// If at least one waiter is found, a `WaitGuard` is returned which will + /// notify all waiters when it is dropped. + pub fn notify_all<T>( + mut guard: SpinMutexGuard<'_, WaitVariable<T>>, + ) -> Result<WaitGuard<'_, T>, SpinMutexGuard<'_, WaitVariable<T>>> { + unsafe { + let mut count = 0; + while let Some(entry) = guard.queue.inner.pop() { + count += 1; + let mut entry_guard = entry.lock(); + entry_guard.wake = true; + } + if let Some(count) = NonZeroUsize::new(count) { + Ok(WaitGuard { mutex_guard: Some(guard), notified_tcs: NotifiedTcs::All { count } }) + } else { + Err(guard) + } + } + } +} + +/// A doubly-linked list where callers are in charge of memory allocation +/// of the nodes in the list. +mod unsafe_list { + use crate::mem; + use crate::ptr::NonNull; + + pub struct UnsafeListEntry<T> { + next: NonNull<UnsafeListEntry<T>>, + prev: NonNull<UnsafeListEntry<T>>, + value: Option<T>, + } + + impl<T> UnsafeListEntry<T> { + fn dummy() -> Self { + UnsafeListEntry { next: NonNull::dangling(), prev: NonNull::dangling(), value: None } + } + + pub fn new(value: T) -> Self { + UnsafeListEntry { value: Some(value), ..Self::dummy() } + } + } + + pub struct UnsafeList<T> { + head_tail: NonNull<UnsafeListEntry<T>>, + head_tail_entry: Option<UnsafeListEntry<T>>, + } + + impl<T> UnsafeList<T> { + pub const fn new() -> Self { + unsafe { + UnsafeList { head_tail: NonNull::new_unchecked(1 as _), head_tail_entry: None } + } + } + + unsafe fn init(&mut self) { + if self.head_tail_entry.is_none() { + self.head_tail_entry = Some(UnsafeListEntry::dummy()); + self.head_tail = NonNull::new_unchecked(self.head_tail_entry.as_mut().unwrap()); + self.head_tail.as_mut().next = self.head_tail; + self.head_tail.as_mut().prev = self.head_tail; + } + } + + pub fn is_empty(&self) -> bool { + unsafe { + if self.head_tail_entry.is_some() { + let first = self.head_tail.as_ref().next; + if first == self.head_tail { + // ,-------> /---------\ next ---, + // | |head_tail| | + // `--- prev \---------/ <-------` + rtassert!(self.head_tail.as_ref().prev == first); + true + } else { + false + } + } else { + true + } + } + } + + /// Pushes an entry onto the back of the list. + /// + /// # Safety + /// + /// The entry must remain allocated until the entry is removed from the + /// list AND the caller who popped is done using the entry. Special + /// care must be taken in the caller of `push` to ensure unwinding does + /// not destroy the stack frame containing the entry. + pub unsafe fn push<'a>(&mut self, entry: &'a mut UnsafeListEntry<T>) -> &'a T { + self.init(); + + // BEFORE: + // /---------\ next ---> /---------\ + // ... |prev_tail| |head_tail| ... + // \---------/ <--- prev \---------/ + // + // AFTER: + // /---------\ next ---> /-----\ next ---> /---------\ + // ... |prev_tail| |entry| |head_tail| ... + // \---------/ <--- prev \-----/ <--- prev \---------/ + let mut entry = NonNull::new_unchecked(entry); + let mut prev_tail = mem::replace(&mut self.head_tail.as_mut().prev, entry); + entry.as_mut().prev = prev_tail; + entry.as_mut().next = self.head_tail; + prev_tail.as_mut().next = entry; + // unwrap ok: always `Some` on non-dummy entries + (*entry.as_ptr()).value.as_ref().unwrap() + } + + /// Pops an entry from the front of the list. + /// + /// # Safety + /// + /// The caller must make sure to synchronize ending the borrow of the + /// return value and deallocation of the containing entry. + pub unsafe fn pop<'a>(&mut self) -> Option<&'a T> { + self.init(); + + if self.is_empty() { + None + } else { + // BEFORE: + // /---------\ next ---> /-----\ next ---> /------\ + // ... |head_tail| |first| |second| ... + // \---------/ <--- prev \-----/ <--- prev \------/ + // + // AFTER: + // /---------\ next ---> /------\ + // ... |head_tail| |second| ... + // \---------/ <--- prev \------/ + let mut first = self.head_tail.as_mut().next; + let mut second = first.as_mut().next; + self.head_tail.as_mut().next = second; + second.as_mut().prev = self.head_tail; + first.as_mut().next = NonNull::dangling(); + first.as_mut().prev = NonNull::dangling(); + // unwrap ok: always `Some` on non-dummy entries + Some((*first.as_ptr()).value.as_ref().unwrap()) + } + } + + /// Removes an entry from the list. + /// + /// # Safety + /// + /// The caller must ensure that `entry` has been pushed onto `self` + /// prior to this call and has not moved since then. + pub unsafe fn remove(&mut self, entry: &mut UnsafeListEntry<T>) { + rtassert!(!self.is_empty()); + // BEFORE: + // /----\ next ---> /-----\ next ---> /----\ + // ... |prev| |entry| |next| ... + // \----/ <--- prev \-----/ <--- prev \----/ + // + // AFTER: + // /----\ next ---> /----\ + // ... |prev| |next| ... + // \----/ <--- prev \----/ + let mut prev = entry.prev; + let mut next = entry.next; + prev.as_mut().next = next; + next.as_mut().prev = prev; + entry.next = NonNull::dangling(); + entry.prev = NonNull::dangling(); + } + } + + #[cfg(test)] + mod tests { + use super::*; + use crate::cell::Cell; + + unsafe fn assert_empty<T>(list: &mut UnsafeList<T>) { + assert!(list.pop().is_none(), "assertion failed: list is not empty"); + } + + #[test] + fn init_empty() { + unsafe { + assert_empty(&mut UnsafeList::<i32>::new()); + } + } + + #[test] + fn push_pop() { + unsafe { + let mut node = UnsafeListEntry::new(1234); + let mut list = UnsafeList::new(); + assert_eq!(list.push(&mut node), &1234); + assert_eq!(list.pop().unwrap(), &1234); + assert_empty(&mut list); + } + } + + #[test] + fn push_remove() { + unsafe { + let mut node = UnsafeListEntry::new(1234); + let mut list = UnsafeList::new(); + assert_eq!(list.push(&mut node), &1234); + list.remove(&mut node); + assert_empty(&mut list); + } + } + + #[test] + fn push_remove_pop() { + unsafe { + let mut node1 = UnsafeListEntry::new(11); + let mut node2 = UnsafeListEntry::new(12); + let mut node3 = UnsafeListEntry::new(13); + let mut node4 = UnsafeListEntry::new(14); + let mut node5 = UnsafeListEntry::new(15); + let mut list = UnsafeList::new(); + assert_eq!(list.push(&mut node1), &11); + assert_eq!(list.push(&mut node2), &12); + assert_eq!(list.push(&mut node3), &13); + assert_eq!(list.push(&mut node4), &14); + assert_eq!(list.push(&mut node5), &15); + + list.remove(&mut node1); + assert_eq!(list.pop().unwrap(), &12); + list.remove(&mut node3); + assert_eq!(list.pop().unwrap(), &14); + list.remove(&mut node5); + assert_empty(&mut list); + + assert_eq!(list.push(&mut node1), &11); + assert_eq!(list.pop().unwrap(), &11); + assert_empty(&mut list); + + assert_eq!(list.push(&mut node3), &13); + assert_eq!(list.push(&mut node4), &14); + list.remove(&mut node3); + list.remove(&mut node4); + assert_empty(&mut list); + } + } + + #[test] + fn complex_pushes_pops() { + unsafe { + let mut node1 = UnsafeListEntry::new(1234); + let mut node2 = UnsafeListEntry::new(4567); + let mut node3 = UnsafeListEntry::new(9999); + let mut node4 = UnsafeListEntry::new(8642); + let mut list = UnsafeList::new(); + list.push(&mut node1); + list.push(&mut node2); + assert_eq!(list.pop().unwrap(), &1234); + list.push(&mut node3); + assert_eq!(list.pop().unwrap(), &4567); + assert_eq!(list.pop().unwrap(), &9999); + assert_empty(&mut list); + list.push(&mut node4); + assert_eq!(list.pop().unwrap(), &8642); + assert_empty(&mut list); + } + } + + #[test] + fn cell() { + unsafe { + let mut node = UnsafeListEntry::new(Cell::new(0)); + let mut list = UnsafeList::new(); + let noderef = list.push(&mut node); + assert_eq!(noderef.get(), 0); + list.pop().unwrap().set(1); + assert_empty(&mut list); + assert_eq!(noderef.get(), 1); + } + } + } +} + +/// Trivial spinlock-based implementation of `sync::Mutex`. +// FIXME: Perhaps use Intel TSX to avoid locking? +mod spin_mutex { + use crate::cell::UnsafeCell; + use crate::ops::{Deref, DerefMut}; + use crate::sync::atomic::{spin_loop_hint, AtomicBool, Ordering}; + + #[derive(Default)] + pub struct SpinMutex<T> { + value: UnsafeCell<T>, + lock: AtomicBool, + } + + unsafe impl<T: Send> Send for SpinMutex<T> {} + unsafe impl<T: Send> Sync for SpinMutex<T> {} + + pub struct SpinMutexGuard<'a, T: 'a> { + mutex: &'a SpinMutex<T>, + } + + impl<'a, T> !Send for SpinMutexGuard<'a, T> {} + unsafe impl<'a, T: Sync> Sync for SpinMutexGuard<'a, T> {} + + impl<T> SpinMutex<T> { + pub const fn new(value: T) -> Self { + SpinMutex { value: UnsafeCell::new(value), lock: AtomicBool::new(false) } + } + + #[inline(always)] + pub fn lock(&self) -> SpinMutexGuard<'_, T> { + loop { + match self.try_lock() { + None => { + while self.lock.load(Ordering::Relaxed) { + spin_loop_hint() + } + } + Some(guard) => return guard, + } + } + } + + #[inline(always)] + pub fn try_lock(&self) -> Option<SpinMutexGuard<'_, T>> { + if !self.lock.compare_and_swap(false, true, Ordering::Acquire) { + Some(SpinMutexGuard { mutex: self }) + } else { + None + } + } + } + + /// Lock the Mutex or return false. + pub macro try_lock_or_false($e:expr) { + if let Some(v) = $e.try_lock() { v } else { return false } + } + + impl<'a, T> Deref for SpinMutexGuard<'a, T> { + type Target = T; + + fn deref(&self) -> &T { + unsafe { &*self.mutex.value.get() } + } + } + + impl<'a, T> DerefMut for SpinMutexGuard<'a, T> { + fn deref_mut(&mut self) -> &mut T { + unsafe { &mut *self.mutex.value.get() } + } + } + + impl<'a, T> Drop for SpinMutexGuard<'a, T> { + fn drop(&mut self) { + self.mutex.lock.store(false, Ordering::Release) + } + } + + #[cfg(test)] + mod tests { + #![allow(deprecated)] + + use super::*; + use crate::sync::Arc; + use crate::thread; + use crate::time::Duration; + + #[test] + fn sleep() { + let mutex = Arc::new(SpinMutex::<i32>::default()); + let mutex2 = mutex.clone(); + let guard = mutex.lock(); + let t1 = thread::spawn(move || { + *mutex2.lock() = 1; + }); + + thread::sleep(Duration::from_millis(50)); + + assert_eq!(*guard, 0); + drop(guard); + t1.join().unwrap(); + assert_eq!(*mutex.lock(), 1); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::sync::Arc; + use crate::thread; + + #[test] + fn queue() { + let wq = Arc::new(SpinMutex::<WaitVariable<()>>::default()); + let wq2 = wq.clone(); + + let locked = wq.lock(); + + let t1 = thread::spawn(move || { + // if we obtain the lock, the main thread should be waiting + assert!(WaitQueue::notify_one(wq2.lock()).is_ok()); + }); + + WaitQueue::wait(locked, || {}); + + t1.join().unwrap(); + } +} diff --git a/library/std/src/sys/unix/alloc.rs b/library/std/src/sys/unix/alloc.rs new file mode 100644 index 00000000000..8e193935460 --- /dev/null +++ b/library/std/src/sys/unix/alloc.rs @@ -0,0 +1,97 @@ +use crate::alloc::{GlobalAlloc, Layout, System}; +use crate::ptr; +use crate::sys_common::alloc::{realloc_fallback, MIN_ALIGN}; + +#[stable(feature = "alloc_system_type", since = "1.28.0")] +unsafe impl GlobalAlloc for System { + #[inline] + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + // jemalloc provides alignment less than MIN_ALIGN for small allocations. + // So only rely on MIN_ALIGN if size >= align. + // Also see <https://github.com/rust-lang/rust/issues/45955> and + // <https://github.com/rust-lang/rust/issues/62251#issuecomment-507580914>. + if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() { + libc::malloc(layout.size()) as *mut u8 + } else { + #[cfg(target_os = "macos")] + { + if layout.align() > (1 << 31) { + return ptr::null_mut(); + } + } + aligned_malloc(&layout) + } + } + + #[inline] + unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { + // See the comment above in `alloc` for why this check looks the way it does. + if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() { + libc::calloc(layout.size(), 1) as *mut u8 + } else { + let ptr = self.alloc(layout); + if !ptr.is_null() { + ptr::write_bytes(ptr, 0, layout.size()); + } + ptr + } + } + + #[inline] + unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) { + libc::free(ptr as *mut libc::c_void) + } + + #[inline] + unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + if layout.align() <= MIN_ALIGN && layout.align() <= new_size { + libc::realloc(ptr as *mut libc::c_void, new_size) as *mut u8 + } else { + realloc_fallback(self, ptr, layout, new_size) + } + } +} + +#[cfg(any( + target_os = "android", + target_os = "illumos", + target_os = "redox", + target_os = "solaris" +))] +#[inline] +unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 { + // On android we currently target API level 9 which unfortunately + // doesn't have the `posix_memalign` API used below. Instead we use + // `memalign`, but this unfortunately has the property on some systems + // where the memory returned cannot be deallocated by `free`! + // + // Upon closer inspection, however, this appears to work just fine with + // Android, so for this platform we should be fine to call `memalign` + // (which is present in API level 9). Some helpful references could + // possibly be chromium using memalign [1], attempts at documenting that + // memalign + free is ok [2] [3], or the current source of chromium + // which still uses memalign on android [4]. + // + // [1]: https://codereview.chromium.org/10796020/ + // [2]: https://code.google.com/p/android/issues/detail?id=35391 + // [3]: https://bugs.chromium.org/p/chromium/issues/detail?id=138579 + // [4]: https://chromium.googlesource.com/chromium/src/base/+/master/ + // /memory/aligned_memory.cc + libc::memalign(layout.align(), layout.size()) as *mut u8 +} + +#[cfg(not(any( + target_os = "android", + target_os = "illumos", + target_os = "redox", + target_os = "solaris" +)))] +#[inline] +unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 { + let mut out = ptr::null_mut(); + // posix_memalign requires that the alignment be a multiple of `sizeof(void*)`. + // Since these are all powers of 2, we can just use max. + let align = layout.align().max(crate::mem::size_of::<usize>()); + let ret = libc::posix_memalign(&mut out, align, layout.size()); + if ret != 0 { ptr::null_mut() } else { out as *mut u8 } +} diff --git a/library/std/src/sys/unix/android.rs b/library/std/src/sys/unix/android.rs new file mode 100644 index 00000000000..ea05ee3d7ce --- /dev/null +++ b/library/std/src/sys/unix/android.rs @@ -0,0 +1,167 @@ +//! Android ABI-compatibility module +//! +//! The ABI of Android has changed quite a bit over time, and libstd attempts to +//! be both forwards and backwards compatible as much as possible. We want to +//! always work with the most recent version of Android, but we also want to +//! work with older versions of Android for whenever projects need to. +//! +//! Our current minimum supported Android version is `android-9`, e.g., Android +//! with API level 9. We then in theory want to work on that and all future +//! versions of Android! +//! +//! Some of the detection here is done at runtime via `dlopen` and +//! introspection. Other times no detection is performed at all and we just +//! provide a fallback implementation as some versions of Android we support +//! don't have the function. +//! +//! You'll find more details below about why each compatibility shim is needed. + +#![cfg(target_os = "android")] + +use libc::{c_int, c_void, sighandler_t, size_t, ssize_t}; +use libc::{ftruncate, pread, pwrite}; + +use super::{cvt, cvt_r}; +use crate::io; + +// The `log2` and `log2f` functions apparently appeared in android-18, or at +// least you can see they're not present in the android-17 header [1] and they +// are present in android-18 [2]. +// +// [1]: https://chromium.googlesource.com/android_tools/+/20ee6d20/ndk/platforms +// /android-17/arch-arm/usr/include/math.h +// [2]: https://chromium.googlesource.com/android_tools/+/20ee6d20/ndk/platforms +// /android-18/arch-arm/usr/include/math.h +// +// Note that these shims are likely less precise than directly calling `log2`, +// but hopefully that should be enough for now... +// +// Note that mathematically, for any arbitrary `y`: +// +// log_2(x) = log_y(x) / log_y(2) +// = log_y(x) / (1 / log_2(y)) +// = log_y(x) * log_2(y) +// +// Hence because `ln` (log_e) is available on all Android we just choose `y = e` +// and get: +// +// log_2(x) = ln(x) * log_2(e) + +#[cfg(not(test))] +pub fn log2f32(f: f32) -> f32 { + f.ln() * crate::f32::consts::LOG2_E +} + +#[cfg(not(test))] +pub fn log2f64(f: f64) -> f64 { + f.ln() * crate::f64::consts::LOG2_E +} + +// Back in the day [1] the `signal` function was just an inline wrapper +// around `bsd_signal`, but starting in API level android-20 the `signal` +// symbols was introduced [2]. Finally, in android-21 the API `bsd_signal` was +// removed [3]. +// +// Basically this means that if we want to be binary compatible with multiple +// Android releases (oldest being 9 and newest being 21) then we need to check +// for both symbols and not actually link against either. +// +// [1]: https://chromium.googlesource.com/android_tools/+/20ee6d20/ndk/platforms +// /android-18/arch-arm/usr/include/signal.h +// [2]: https://chromium.googlesource.com/android_tools/+/fbd420/ndk_experimental +// /platforms/android-20/arch-arm +// /usr/include/signal.h +// [3]: https://chromium.googlesource.com/android_tools/+/20ee6d/ndk/platforms +// /android-21/arch-arm/usr/include/signal.h +pub unsafe fn signal(signum: c_int, handler: sighandler_t) -> sighandler_t { + weak!(fn signal(c_int, sighandler_t) -> sighandler_t); + weak!(fn bsd_signal(c_int, sighandler_t) -> sighandler_t); + + let f = signal.get().or_else(|| bsd_signal.get()); + let f = f.expect("neither `signal` nor `bsd_signal` symbols found"); + f(signum, handler) +} + +// The `ftruncate64` symbol apparently appeared in android-12, so we do some +// dynamic detection to see if we can figure out whether `ftruncate64` exists. +// +// If it doesn't we just fall back to `ftruncate`, generating an error for +// too-large values. +#[cfg(target_pointer_width = "32")] +pub fn ftruncate64(fd: c_int, size: u64) -> io::Result<()> { + weak!(fn ftruncate64(c_int, i64) -> c_int); + + unsafe { + match ftruncate64.get() { + Some(f) => cvt_r(|| f(fd, size as i64)).map(drop), + None => { + if size > i32::MAX as u64 { + Err(io::Error::new(io::ErrorKind::InvalidInput, "cannot truncate >2GB")) + } else { + cvt_r(|| ftruncate(fd, size as i32)).map(drop) + } + } + } + } +} + +#[cfg(target_pointer_width = "64")] +pub fn ftruncate64(fd: c_int, size: u64) -> io::Result<()> { + unsafe { cvt_r(|| ftruncate(fd, size as i64)).map(drop) } +} + +#[cfg(target_pointer_width = "32")] +pub unsafe fn cvt_pread64( + fd: c_int, + buf: *mut c_void, + count: size_t, + offset: i64, +) -> io::Result<ssize_t> { + use crate::convert::TryInto; + weak!(fn pread64(c_int, *mut c_void, size_t, i64) -> ssize_t); + pread64.get().map(|f| cvt(f(fd, buf, count, offset))).unwrap_or_else(|| { + if let Ok(o) = offset.try_into() { + cvt(pread(fd, buf, count, o)) + } else { + Err(io::Error::new(io::ErrorKind::InvalidInput, "cannot pread >2GB")) + } + }) +} + +#[cfg(target_pointer_width = "32")] +pub unsafe fn cvt_pwrite64( + fd: c_int, + buf: *const c_void, + count: size_t, + offset: i64, +) -> io::Result<ssize_t> { + use crate::convert::TryInto; + weak!(fn pwrite64(c_int, *const c_void, size_t, i64) -> ssize_t); + pwrite64.get().map(|f| cvt(f(fd, buf, count, offset))).unwrap_or_else(|| { + if let Ok(o) = offset.try_into() { + cvt(pwrite(fd, buf, count, o)) + } else { + Err(io::Error::new(io::ErrorKind::InvalidInput, "cannot pwrite >2GB")) + } + }) +} + +#[cfg(target_pointer_width = "64")] +pub unsafe fn cvt_pread64( + fd: c_int, + buf: *mut c_void, + count: size_t, + offset: i64, +) -> io::Result<ssize_t> { + cvt(pread(fd, buf, count, offset)) +} + +#[cfg(target_pointer_width = "64")] +pub unsafe fn cvt_pwrite64( + fd: c_int, + buf: *const c_void, + count: size_t, + offset: i64, +) -> io::Result<ssize_t> { + cvt(pwrite(fd, buf, count, offset)) +} diff --git a/library/std/src/sys/unix/args.rs b/library/std/src/sys/unix/args.rs new file mode 100644 index 00000000000..9bc44a59482 --- /dev/null +++ b/library/std/src/sys/unix/args.rs @@ -0,0 +1,251 @@ +//! Global initialization and retrieval of command line arguments. +//! +//! On some platforms these are stored during runtime startup, +//! and on some they are retrieved from the system on demand. + +#![allow(dead_code)] // runtime init functions not used during testing + +use crate::ffi::OsString; +use crate::marker::PhantomData; +use crate::vec; + +/// One-time global initialization. +pub unsafe fn init(argc: isize, argv: *const *const u8) { + imp::init(argc, argv) +} + +/// One-time global cleanup. +pub unsafe fn cleanup() { + imp::cleanup() +} + +/// Returns the command line arguments +pub fn args() -> Args { + imp::args() +} + +pub struct Args { + iter: vec::IntoIter<OsString>, + _dont_send_or_sync_me: PhantomData<*mut ()>, +} + +impl Args { + pub fn inner_debug(&self) -> &[OsString] { + self.iter.as_slice() + } +} + +impl Iterator for Args { + type Item = OsString; + fn next(&mut self) -> Option<OsString> { + self.iter.next() + } + fn size_hint(&self) -> (usize, Option<usize>) { + self.iter.size_hint() + } +} + +impl ExactSizeIterator for Args { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl DoubleEndedIterator for Args { + fn next_back(&mut self) -> Option<OsString> { + self.iter.next_back() + } +} + +#[cfg(any( + target_os = "linux", + target_os = "android", + target_os = "freebsd", + target_os = "dragonfly", + target_os = "netbsd", + target_os = "openbsd", + target_os = "solaris", + target_os = "illumos", + target_os = "emscripten", + target_os = "haiku", + target_os = "l4re", + target_os = "fuchsia", + target_os = "redox" +))] +mod imp { + use super::Args; + use crate::ffi::{CStr, OsString}; + use crate::marker::PhantomData; + use crate::os::unix::prelude::*; + use crate::ptr; + use crate::sync::atomic::{AtomicIsize, AtomicPtr, Ordering}; + + use crate::sys_common::mutex::Mutex; + + static ARGC: AtomicIsize = AtomicIsize::new(0); + static ARGV: AtomicPtr<*const u8> = AtomicPtr::new(ptr::null_mut()); + // We never call `ENV_LOCK.init()`, so it is UB to attempt to + // acquire this mutex reentrantly! + static LOCK: Mutex = Mutex::new(); + + unsafe fn really_init(argc: isize, argv: *const *const u8) { + let _guard = LOCK.lock(); + ARGC.store(argc, Ordering::Relaxed); + ARGV.store(argv as *mut _, Ordering::Relaxed); + } + + #[inline(always)] + pub unsafe fn init(_argc: isize, _argv: *const *const u8) { + // On Linux-GNU, we rely on `ARGV_INIT_ARRAY` below to initialize + // `ARGC` and `ARGV`. But in Miri that does not actually happen so we + // still initialize here. + #[cfg(any(miri, not(all(target_os = "linux", target_env = "gnu"))))] + really_init(_argc, _argv); + } + + /// glibc passes argc, argv, and envp to functions in .init_array, as a non-standard extension. + /// This allows `std::env::args` to work even in a `cdylib`, as it does on macOS and Windows. + #[cfg(all(target_os = "linux", target_env = "gnu"))] + #[used] + #[link_section = ".init_array.00099"] + static ARGV_INIT_ARRAY: extern "C" fn( + crate::os::raw::c_int, + *const *const u8, + *const *const u8, + ) = { + extern "C" fn init_wrapper( + argc: crate::os::raw::c_int, + argv: *const *const u8, + _envp: *const *const u8, + ) { + unsafe { + really_init(argc as isize, argv); + } + } + init_wrapper + }; + + pub unsafe fn cleanup() { + let _guard = LOCK.lock(); + ARGC.store(0, Ordering::Relaxed); + ARGV.store(ptr::null_mut(), Ordering::Relaxed); + } + + pub fn args() -> Args { + Args { iter: clone().into_iter(), _dont_send_or_sync_me: PhantomData } + } + + fn clone() -> Vec<OsString> { + unsafe { + let _guard = LOCK.lock(); + let argc = ARGC.load(Ordering::Relaxed); + let argv = ARGV.load(Ordering::Relaxed); + (0..argc) + .map(|i| { + let cstr = CStr::from_ptr(*argv.offset(i) as *const libc::c_char); + OsStringExt::from_vec(cstr.to_bytes().to_vec()) + }) + .collect() + } + } +} + +#[cfg(any(target_os = "macos", target_os = "ios"))] +mod imp { + use super::Args; + use crate::ffi::CStr; + use crate::marker::PhantomData; + + pub unsafe fn init(_argc: isize, _argv: *const *const u8) {} + + pub fn cleanup() {} + + #[cfg(target_os = "macos")] + pub fn args() -> Args { + use crate::os::unix::prelude::*; + extern "C" { + // These functions are in crt_externs.h. + fn _NSGetArgc() -> *mut libc::c_int; + fn _NSGetArgv() -> *mut *mut *mut libc::c_char; + } + + let vec = unsafe { + let (argc, argv) = + (*_NSGetArgc() as isize, *_NSGetArgv() as *const *const libc::c_char); + (0..argc as isize) + .map(|i| { + let bytes = CStr::from_ptr(*argv.offset(i)).to_bytes().to_vec(); + OsStringExt::from_vec(bytes) + }) + .collect::<Vec<_>>() + }; + Args { iter: vec.into_iter(), _dont_send_or_sync_me: PhantomData } + } + + // As _NSGetArgc and _NSGetArgv aren't mentioned in iOS docs + // and use underscores in their names - they're most probably + // are considered private and therefore should be avoided + // Here is another way to get arguments using Objective C + // runtime + // + // In general it looks like: + // res = Vec::new() + // let args = [[NSProcessInfo processInfo] arguments] + // for i in (0..[args count]) + // res.push([args objectAtIndex:i]) + // res + #[cfg(target_os = "ios")] + pub fn args() -> Args { + use crate::ffi::OsString; + use crate::mem; + use crate::str; + + extern "C" { + fn sel_registerName(name: *const libc::c_uchar) -> Sel; + fn objc_getClass(class_name: *const libc::c_uchar) -> NsId; + } + + #[cfg(target_arch = "aarch64")] + extern "C" { + fn objc_msgSend(obj: NsId, sel: Sel) -> NsId; + #[allow(clashing_extern_declarations)] + #[link_name = "objc_msgSend"] + fn objc_msgSend_ul(obj: NsId, sel: Sel, i: libc::c_ulong) -> NsId; + } + + #[cfg(not(target_arch = "aarch64"))] + extern "C" { + fn objc_msgSend(obj: NsId, sel: Sel, ...) -> NsId; + #[allow(clashing_extern_declarations)] + #[link_name = "objc_msgSend"] + fn objc_msgSend_ul(obj: NsId, sel: Sel, ...) -> NsId; + } + + type Sel = *const libc::c_void; + type NsId = *const libc::c_void; + + let mut res = Vec::new(); + + unsafe { + let process_info_sel = sel_registerName("processInfo\0".as_ptr()); + let arguments_sel = sel_registerName("arguments\0".as_ptr()); + let utf8_sel = sel_registerName("UTF8String\0".as_ptr()); + let count_sel = sel_registerName("count\0".as_ptr()); + let object_at_sel = sel_registerName("objectAtIndex:\0".as_ptr()); + + let klass = objc_getClass("NSProcessInfo\0".as_ptr()); + let info = objc_msgSend(klass, process_info_sel); + let args = objc_msgSend(info, arguments_sel); + + let cnt: usize = mem::transmute(objc_msgSend(args, count_sel)); + for i in 0..cnt { + let tmp = objc_msgSend_ul(args, object_at_sel, i as libc::c_ulong); + let utf_c_str: *const libc::c_char = mem::transmute(objc_msgSend(tmp, utf8_sel)); + let bytes = CStr::from_ptr(utf_c_str).to_bytes(); + res.push(OsString::from(str::from_utf8(bytes).unwrap())) + } + } + + Args { iter: res.into_iter(), _dont_send_or_sync_me: PhantomData } + } +} diff --git a/library/std/src/sys/unix/cmath.rs b/library/std/src/sys/unix/cmath.rs new file mode 100644 index 00000000000..f327b69fc75 --- /dev/null +++ b/library/std/src/sys/unix/cmath.rs @@ -0,0 +1,32 @@ +#![cfg(not(test))] + +use libc::{c_double, c_float}; + +extern "C" { + pub fn acos(n: c_double) -> c_double; + pub fn acosf(n: c_float) -> c_float; + pub fn asin(n: c_double) -> c_double; + pub fn asinf(n: c_float) -> c_float; + pub fn atan(n: c_double) -> c_double; + pub fn atan2(a: c_double, b: c_double) -> c_double; + pub fn atan2f(a: c_float, b: c_float) -> c_float; + pub fn atanf(n: c_float) -> c_float; + pub fn cbrt(n: c_double) -> c_double; + pub fn cbrtf(n: c_float) -> c_float; + pub fn cosh(n: c_double) -> c_double; + pub fn coshf(n: c_float) -> c_float; + pub fn expm1(n: c_double) -> c_double; + pub fn expm1f(n: c_float) -> c_float; + pub fn fdim(a: c_double, b: c_double) -> c_double; + pub fn fdimf(a: c_float, b: c_float) -> c_float; + pub fn hypot(x: c_double, y: c_double) -> c_double; + pub fn hypotf(x: c_float, y: c_float) -> c_float; + pub fn log1p(n: c_double) -> c_double; + pub fn log1pf(n: c_float) -> c_float; + pub fn sinh(n: c_double) -> c_double; + pub fn sinhf(n: c_float) -> c_float; + pub fn tan(n: c_double) -> c_double; + pub fn tanf(n: c_float) -> c_float; + pub fn tanh(n: c_double) -> c_double; + pub fn tanhf(n: c_float) -> c_float; +} diff --git a/library/std/src/sys/unix/condvar.rs b/library/std/src/sys/unix/condvar.rs new file mode 100644 index 00000000000..9f1847943f3 --- /dev/null +++ b/library/std/src/sys/unix/condvar.rs @@ -0,0 +1,174 @@ +use crate::cell::UnsafeCell; +use crate::sys::mutex::{self, Mutex}; +use crate::time::Duration; + +pub struct Condvar { + inner: UnsafeCell<libc::pthread_cond_t>, +} + +unsafe impl Send for Condvar {} +unsafe impl Sync for Condvar {} + +const TIMESPEC_MAX: libc::timespec = + libc::timespec { tv_sec: <libc::time_t>::MAX, tv_nsec: 1_000_000_000 - 1 }; + +fn saturating_cast_to_time_t(value: u64) -> libc::time_t { + if value > <libc::time_t>::MAX as u64 { <libc::time_t>::MAX } else { value as libc::time_t } +} + +impl Condvar { + pub const fn new() -> Condvar { + // Might be moved and address is changing it is better to avoid + // initialization of potentially opaque OS data before it landed + Condvar { inner: UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER) } + } + + #[cfg(any( + target_os = "macos", + target_os = "ios", + target_os = "l4re", + target_os = "android", + target_os = "redox" + ))] + pub unsafe fn init(&mut self) {} + + #[cfg(not(any( + target_os = "macos", + target_os = "ios", + target_os = "l4re", + target_os = "android", + target_os = "redox" + )))] + pub unsafe fn init(&mut self) { + use crate::mem::MaybeUninit; + let mut attr = MaybeUninit::<libc::pthread_condattr_t>::uninit(); + let r = libc::pthread_condattr_init(attr.as_mut_ptr()); + assert_eq!(r, 0); + let r = libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC); + assert_eq!(r, 0); + let r = libc::pthread_cond_init(self.inner.get(), attr.as_ptr()); + assert_eq!(r, 0); + let r = libc::pthread_condattr_destroy(attr.as_mut_ptr()); + assert_eq!(r, 0); + } + + #[inline] + pub unsafe fn notify_one(&self) { + let r = libc::pthread_cond_signal(self.inner.get()); + debug_assert_eq!(r, 0); + } + + #[inline] + pub unsafe fn notify_all(&self) { + let r = libc::pthread_cond_broadcast(self.inner.get()); + debug_assert_eq!(r, 0); + } + + #[inline] + pub unsafe fn wait(&self, mutex: &Mutex) { + let r = libc::pthread_cond_wait(self.inner.get(), mutex::raw(mutex)); + debug_assert_eq!(r, 0); + } + + // This implementation is used on systems that support pthread_condattr_setclock + // where we configure condition variable to use monotonic clock (instead of + // default system clock). This approach avoids all problems that result + // from changes made to the system time. + #[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "android")))] + pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { + use crate::mem; + + let mut now: libc::timespec = mem::zeroed(); + let r = libc::clock_gettime(libc::CLOCK_MONOTONIC, &mut now); + assert_eq!(r, 0); + + // Nanosecond calculations can't overflow because both values are below 1e9. + let nsec = dur.subsec_nanos() + now.tv_nsec as u32; + + let sec = saturating_cast_to_time_t(dur.as_secs()) + .checked_add((nsec / 1_000_000_000) as libc::time_t) + .and_then(|s| s.checked_add(now.tv_sec)); + let nsec = nsec % 1_000_000_000; + + let timeout = + sec.map(|s| libc::timespec { tv_sec: s, tv_nsec: nsec as _ }).unwrap_or(TIMESPEC_MAX); + + let r = libc::pthread_cond_timedwait(self.inner.get(), mutex::raw(mutex), &timeout); + assert!(r == libc::ETIMEDOUT || r == 0); + r == 0 + } + + // This implementation is modeled after libcxx's condition_variable + // https://github.com/llvm-mirror/libcxx/blob/release_35/src/condition_variable.cpp#L46 + // https://github.com/llvm-mirror/libcxx/blob/release_35/include/__mutex_base#L367 + #[cfg(any(target_os = "macos", target_os = "ios", target_os = "android"))] + pub unsafe fn wait_timeout(&self, mutex: &Mutex, mut dur: Duration) -> bool { + use crate::ptr; + use crate::time::Instant; + + // 1000 years + let max_dur = Duration::from_secs(1000 * 365 * 86400); + + if dur > max_dur { + // OSX implementation of `pthread_cond_timedwait` is buggy + // with super long durations. When duration is greater than + // 0x100_0000_0000_0000 seconds, `pthread_cond_timedwait` + // in macOS Sierra return error 316. + // + // This program demonstrates the issue: + // https://gist.github.com/stepancheg/198db4623a20aad2ad7cddb8fda4a63c + // + // To work around this issue, and possible bugs of other OSes, timeout + // is clamped to 1000 years, which is allowable per the API of `wait_timeout` + // because of spurious wakeups. + + dur = max_dur; + } + + // First, figure out what time it currently is, in both system and + // stable time. pthread_cond_timedwait uses system time, but we want to + // report timeout based on stable time. + let mut sys_now = libc::timeval { tv_sec: 0, tv_usec: 0 }; + let stable_now = Instant::now(); + let r = libc::gettimeofday(&mut sys_now, ptr::null_mut()); + debug_assert_eq!(r, 0); + + let nsec = dur.subsec_nanos() as libc::c_long + (sys_now.tv_usec * 1000) as libc::c_long; + let extra = (nsec / 1_000_000_000) as libc::time_t; + let nsec = nsec % 1_000_000_000; + let seconds = saturating_cast_to_time_t(dur.as_secs()); + + let timeout = sys_now + .tv_sec + .checked_add(extra) + .and_then(|s| s.checked_add(seconds)) + .map(|s| libc::timespec { tv_sec: s, tv_nsec: nsec }) + .unwrap_or(TIMESPEC_MAX); + + // And wait! + let r = libc::pthread_cond_timedwait(self.inner.get(), mutex::raw(mutex), &timeout); + debug_assert!(r == libc::ETIMEDOUT || r == 0); + + // ETIMEDOUT is not a totally reliable method of determining timeout due + // to clock shifts, so do the check ourselves + stable_now.elapsed() < dur + } + + #[inline] + #[cfg(not(target_os = "dragonfly"))] + pub unsafe fn destroy(&self) { + let r = libc::pthread_cond_destroy(self.inner.get()); + debug_assert_eq!(r, 0); + } + + #[inline] + #[cfg(target_os = "dragonfly")] + pub unsafe fn destroy(&self) { + let r = libc::pthread_cond_destroy(self.inner.get()); + // On DragonFly pthread_cond_destroy() returns EINVAL if called on + // a condvar that was just initialized with + // libc::PTHREAD_COND_INITIALIZER. Once it is used or + // pthread_cond_init() is called, this behaviour no longer occurs. + debug_assert!(r == 0 || r == libc::EINVAL); + } +} diff --git a/library/std/src/sys/unix/env.rs b/library/std/src/sys/unix/env.rs new file mode 100644 index 00000000000..7f5e9b04dba --- /dev/null +++ b/library/std/src/sys/unix/env.rs @@ -0,0 +1,175 @@ +#[cfg(target_os = "linux")] +pub mod os { + pub const FAMILY: &str = "unix"; + pub const OS: &str = "linux"; + pub const DLL_PREFIX: &str = "lib"; + pub const DLL_SUFFIX: &str = ".so"; + pub const DLL_EXTENSION: &str = "so"; + pub const EXE_SUFFIX: &str = ""; + pub const EXE_EXTENSION: &str = ""; +} + +#[cfg(target_os = "macos")] +pub mod os { + pub const FAMILY: &str = "unix"; + pub const OS: &str = "macos"; + pub const DLL_PREFIX: &str = "lib"; + pub const DLL_SUFFIX: &str = ".dylib"; + pub const DLL_EXTENSION: &str = "dylib"; + pub const EXE_SUFFIX: &str = ""; + pub const EXE_EXTENSION: &str = ""; +} + +#[cfg(target_os = "ios")] +pub mod os { + pub const FAMILY: &str = "unix"; + pub const OS: &str = "ios"; + pub const DLL_PREFIX: &str = "lib"; + pub const DLL_SUFFIX: &str = ".dylib"; + pub const DLL_EXTENSION: &str = "dylib"; + pub const EXE_SUFFIX: &str = ""; + pub const EXE_EXTENSION: &str = ""; +} + +#[cfg(target_os = "freebsd")] +pub mod os { + pub const FAMILY: &str = "unix"; + pub const OS: &str = "freebsd"; + pub const DLL_PREFIX: &str = "lib"; + pub const DLL_SUFFIX: &str = ".so"; + pub const DLL_EXTENSION: &str = "so"; + pub const EXE_SUFFIX: &str = ""; + pub const EXE_EXTENSION: &str = ""; +} + +#[cfg(target_os = "dragonfly")] +pub mod os { + pub const FAMILY: &str = "unix"; + pub const OS: &str = "dragonfly"; + pub const DLL_PREFIX: &str = "lib"; + pub const DLL_SUFFIX: &str = ".so"; + pub const DLL_EXTENSION: &str = "so"; + pub const EXE_SUFFIX: &str = ""; + pub const EXE_EXTENSION: &str = ""; +} + +#[cfg(target_os = "netbsd")] +pub mod os { + pub const FAMILY: &str = "unix"; + pub const OS: &str = "netbsd"; + pub const DLL_PREFIX: &str = "lib"; + pub const DLL_SUFFIX: &str = ".so"; + pub const DLL_EXTENSION: &str = "so"; + pub const EXE_SUFFIX: &str = ""; + pub const EXE_EXTENSION: &str = ""; +} + +#[cfg(target_os = "openbsd")] +pub mod os { + pub const FAMILY: &str = "unix"; + pub const OS: &str = "openbsd"; + pub const DLL_PREFIX: &str = "lib"; + pub const DLL_SUFFIX: &str = ".so"; + pub const DLL_EXTENSION: &str = "so"; + pub const EXE_SUFFIX: &str = ""; + pub const EXE_EXTENSION: &str = ""; +} + +#[cfg(target_os = "android")] +pub mod os { + pub const FAMILY: &str = "unix"; + pub const OS: &str = "android"; + pub const DLL_PREFIX: &str = "lib"; + pub const DLL_SUFFIX: &str = ".so"; + pub const DLL_EXTENSION: &str = "so"; + pub const EXE_SUFFIX: &str = ""; + pub const EXE_EXTENSION: &str = ""; +} + +#[cfg(target_os = "solaris")] +pub mod os { + pub const FAMILY: &str = "unix"; + pub const OS: &str = "solaris"; + pub const DLL_PREFIX: &str = "lib"; + pub const DLL_SUFFIX: &str = ".so"; + pub const DLL_EXTENSION: &str = "so"; + pub const EXE_SUFFIX: &str = ""; + pub const EXE_EXTENSION: &str = ""; +} + +#[cfg(target_os = "illumos")] +pub mod os { + pub const FAMILY: &str = "unix"; + pub const OS: &str = "illumos"; + pub const DLL_PREFIX: &str = "lib"; + pub const DLL_SUFFIX: &str = ".so"; + pub const DLL_EXTENSION: &str = "so"; + pub const EXE_SUFFIX: &str = ""; + pub const EXE_EXTENSION: &str = ""; +} + +#[cfg(target_os = "haiku")] +pub mod os { + pub const FAMILY: &str = "unix"; + pub const OS: &str = "haiku"; + pub const DLL_PREFIX: &str = "lib"; + pub const DLL_SUFFIX: &str = ".so"; + pub const DLL_EXTENSION: &str = "so"; + pub const EXE_SUFFIX: &str = ""; + pub const EXE_EXTENSION: &str = ""; +} + +#[cfg(all(target_os = "emscripten", target_arch = "asmjs"))] +pub mod os { + pub const FAMILY: &str = "unix"; + pub const OS: &str = "emscripten"; + pub const DLL_PREFIX: &str = "lib"; + pub const DLL_SUFFIX: &str = ".so"; + pub const DLL_EXTENSION: &str = "so"; + pub const EXE_SUFFIX: &str = ".js"; + pub const EXE_EXTENSION: &str = "js"; +} + +#[cfg(all(target_os = "emscripten", target_arch = "wasm32"))] +pub mod os { + pub const FAMILY: &str = "unix"; + pub const OS: &str = "emscripten"; + pub const DLL_PREFIX: &str = "lib"; + pub const DLL_SUFFIX: &str = ".so"; + pub const DLL_EXTENSION: &str = "so"; + pub const EXE_SUFFIX: &str = ".js"; + pub const EXE_EXTENSION: &str = "js"; +} + +#[cfg(target_os = "fuchsia")] +pub mod os { + pub const FAMILY: &str = "unix"; + pub const OS: &str = "fuchsia"; + pub const DLL_PREFIX: &str = "lib"; + pub const DLL_SUFFIX: &str = ".so"; + pub const DLL_EXTENSION: &str = "so"; + pub const EXE_SUFFIX: &str = ""; + pub const EXE_EXTENSION: &str = ""; +} + +#[cfg(target_os = "l4re")] +pub mod os { + pub const FAMILY: &str = "unix"; + pub const OS: &str = "l4re"; + pub const DLL_PREFIX: &str = "lib"; + pub const DLL_SUFFIX: &str = ".so"; + pub const DLL_EXTENSION: &str = "so"; + pub const EXE_SUFFIX: &str = ""; + pub const EXE_EXTENSION: &str = ""; +} + +#[cfg(target_os = "redox")] +pub mod os { + pub const FAMILY: &str = "unix"; + pub const OS: &str = "redox"; + pub const DLL_PREFIX: &str = "lib"; + pub const DLL_SUFFIX: &str = ".so"; + pub const DLL_EXTENSION: &str = "so"; + pub const EXE_SUFFIX: &str = ""; + pub const EXE_EXTENSION: &str = ""; +} diff --git a/library/std/src/sys/unix/ext/ffi.rs b/library/std/src/sys/unix/ext/ffi.rs new file mode 100644 index 00000000000..76b34a6b5d8 --- /dev/null +++ b/library/std/src/sys/unix/ext/ffi.rs @@ -0,0 +1,38 @@ +//! Unix-specific extension to the primitives in the `std::ffi` module +//! +//! # Examples +//! +//! ``` +//! use std::ffi::OsString; +//! use std::os::unix::ffi::OsStringExt; +//! +//! let bytes = b"foo".to_vec(); +//! +//! // OsStringExt::from_vec +//! let os_string = OsString::from_vec(bytes); +//! assert_eq!(os_string.to_str(), Some("foo")); +//! +//! // OsStringExt::into_vec +//! let bytes = os_string.into_vec(); +//! assert_eq!(bytes, b"foo"); +//! ``` +//! +//! ``` +//! use std::ffi::OsStr; +//! use std::os::unix::ffi::OsStrExt; +//! +//! let bytes = b"foo"; +//! +//! // OsStrExt::from_bytes +//! let os_str = OsStr::from_bytes(bytes); +//! assert_eq!(os_str.to_str(), Some("foo")); +//! +//! // OsStrExt::as_bytes +//! let bytes = os_str.as_bytes(); +//! assert_eq!(bytes, b"foo"); +//! ``` + +#![stable(feature = "rust1", since = "1.0.0")] + +#[stable(feature = "rust1", since = "1.0.0")] +pub use crate::sys_common::os_str_bytes::*; diff --git a/library/std/src/sys/unix/ext/fs.rs b/library/std/src/sys/unix/ext/fs.rs new file mode 100644 index 00000000000..f174a59b49a --- /dev/null +++ b/library/std/src/sys/unix/ext/fs.rs @@ -0,0 +1,904 @@ +//! Unix-specific extensions to primitives in the `std::fs` module. + +#![stable(feature = "rust1", since = "1.0.0")] + +use crate::fs::{self, OpenOptions, Permissions}; +use crate::io; +use crate::path::Path; +use crate::sys; +use crate::sys::platform::fs::MetadataExt as UnixMetadataExt; +use crate::sys_common::{AsInner, AsInnerMut, FromInner}; + +/// Unix-specific extensions to [`File`]. +/// +/// [`File`]: ../../../../std/fs/struct.File.html +#[stable(feature = "file_offset", since = "1.15.0")] +pub trait FileExt { + /// Reads a number of bytes starting from a given offset. + /// + /// Returns the number of bytes read. + /// + /// The offset is relative to the start of the file and thus independent + /// from the current cursor. + /// + /// The current file cursor is not affected by this function. + /// + /// Note that similar to [`File::read`], it is not an error to return with a + /// short read. + /// + /// [`File::read`]: ../../../../std/fs/struct.File.html#method.read + /// + /// # Examples + /// + /// ```no_run + /// use std::io; + /// use std::fs::File; + /// use std::os::unix::prelude::FileExt; + /// + /// fn main() -> io::Result<()> { + /// let mut buf = [0u8; 8]; + /// let file = File::open("foo.txt")?; + /// + /// // We now read 8 bytes from the offset 10. + /// let num_bytes_read = file.read_at(&mut buf, 10)?; + /// println!("read {} bytes: {:?}", num_bytes_read, buf); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "file_offset", since = "1.15.0")] + fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<usize>; + + /// Reads the exact number of byte required to fill `buf` from the given offset. + /// + /// The offset is relative to the start of the file and thus independent + /// from the current cursor. + /// + /// The current file cursor is not affected by this function. + /// + /// Similar to [`Read::read_exact`] but uses [`read_at`] instead of `read`. + /// + /// [`Read::read_exact`]: ../../../../std/io/trait.Read.html#method.read_exact + /// [`read_at`]: #tymethod.read_at + /// + /// # Errors + /// + /// If this function encounters an error of the kind + /// [`ErrorKind::Interrupted`] then the error is ignored and the operation + /// will continue. + /// + /// If this function encounters an "end of file" before completely filling + /// the buffer, it returns an error of the kind [`ErrorKind::UnexpectedEof`]. + /// The contents of `buf` are unspecified in this case. + /// + /// If any other read error is encountered then this function immediately + /// returns. The contents of `buf` are unspecified in this case. + /// + /// If this function returns an error, it is unspecified how many bytes it + /// has read, but it will never read more than would be necessary to + /// completely fill the buffer. + /// + /// [`ErrorKind::Interrupted`]: ../../../../std/io/enum.ErrorKind.html#variant.Interrupted + /// [`ErrorKind::UnexpectedEof`]: ../../../../std/io/enum.ErrorKind.html#variant.UnexpectedEof + /// + /// # Examples + /// + /// ```no_run + /// use std::io; + /// use std::fs::File; + /// use std::os::unix::prelude::FileExt; + /// + /// fn main() -> io::Result<()> { + /// let mut buf = [0u8; 8]; + /// let file = File::open("foo.txt")?; + /// + /// // We now read exactly 8 bytes from the offset 10. + /// file.read_exact_at(&mut buf, 10)?; + /// println!("read {} bytes: {:?}", buf.len(), buf); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "rw_exact_all_at", since = "1.33.0")] + fn read_exact_at(&self, mut buf: &mut [u8], mut offset: u64) -> io::Result<()> { + while !buf.is_empty() { + match self.read_at(buf, offset) { + Ok(0) => break, + Ok(n) => { + let tmp = buf; + buf = &mut tmp[n..]; + offset += n as u64; + } + Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {} + Err(e) => return Err(e), + } + } + if !buf.is_empty() { + Err(io::Error::new(io::ErrorKind::UnexpectedEof, "failed to fill whole buffer")) + } else { + Ok(()) + } + } + + /// Writes a number of bytes starting from a given offset. + /// + /// Returns the number of bytes written. + /// + /// The offset is relative to the start of the file and thus independent + /// from the current cursor. + /// + /// The current file cursor is not affected by this function. + /// + /// When writing beyond the end of the file, the file is appropriately + /// extended and the intermediate bytes are initialized with the value 0. + /// + /// Note that similar to [`File::write`], it is not an error to return a + /// short write. + /// + /// [`File::write`]: ../../../../std/fs/struct.File.html#method.write + /// + /// # Examples + /// + /// ```no_run + /// use std::fs::File; + /// use std::io; + /// use std::os::unix::prelude::FileExt; + /// + /// fn main() -> io::Result<()> { + /// let file = File::open("foo.txt")?; + /// + /// // We now write at the offset 10. + /// file.write_at(b"sushi", 10)?; + /// Ok(()) + /// } + /// ``` + #[stable(feature = "file_offset", since = "1.15.0")] + fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize>; + + /// Attempts to write an entire buffer starting from a given offset. + /// + /// The offset is relative to the start of the file and thus independent + /// from the current cursor. + /// + /// The current file cursor is not affected by this function. + /// + /// This method will continuously call [`write_at`] until there is no more data + /// to be written or an error of non-[`ErrorKind::Interrupted`] kind is + /// returned. This method will not return until the entire buffer has been + /// successfully written or such an error occurs. The first error that is + /// not of [`ErrorKind::Interrupted`] kind generated from this method will be + /// returned. + /// + /// # Errors + /// + /// This function will return the first error of + /// non-[`ErrorKind::Interrupted`] kind that [`write_at`] returns. + /// + /// [`ErrorKind::Interrupted`]: ../../../../std/io/enum.ErrorKind.html#variant.Interrupted + /// [`write_at`]: #tymethod.write_at + /// + /// # Examples + /// + /// ```no_run + /// use std::fs::File; + /// use std::io; + /// use std::os::unix::prelude::FileExt; + /// + /// fn main() -> io::Result<()> { + /// let file = File::open("foo.txt")?; + /// + /// // We now write at the offset 10. + /// file.write_all_at(b"sushi", 10)?; + /// Ok(()) + /// } + /// ``` + #[stable(feature = "rw_exact_all_at", since = "1.33.0")] + fn write_all_at(&self, mut buf: &[u8], mut offset: u64) -> io::Result<()> { + while !buf.is_empty() { + match self.write_at(buf, offset) { + Ok(0) => { + return Err(io::Error::new( + io::ErrorKind::WriteZero, + "failed to write whole buffer", + )); + } + Ok(n) => { + buf = &buf[n..]; + offset += n as u64 + } + Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {} + Err(e) => return Err(e), + } + } + Ok(()) + } +} + +#[stable(feature = "file_offset", since = "1.15.0")] +impl FileExt for fs::File { + fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<usize> { + self.as_inner().read_at(buf, offset) + } + fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> { + self.as_inner().write_at(buf, offset) + } +} + +/// Unix-specific extensions to [`fs::Permissions`]. +/// +/// [`fs::Permissions`]: ../../../../std/fs/struct.Permissions.html +#[stable(feature = "fs_ext", since = "1.1.0")] +pub trait PermissionsExt { + /// Returns the underlying raw `st_mode` bits that contain the standard + /// Unix permissions for this file. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs::File; + /// use std::os::unix::fs::PermissionsExt; + /// + /// fn main() -> std::io::Result<()> { + /// let f = File::create("foo.txt")?; + /// let metadata = f.metadata()?; + /// let permissions = metadata.permissions(); + /// + /// println!("permissions: {:o}", permissions.mode()); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "fs_ext", since = "1.1.0")] + fn mode(&self) -> u32; + + /// Sets the underlying raw bits for this set of permissions. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs::File; + /// use std::os::unix::fs::PermissionsExt; + /// + /// fn main() -> std::io::Result<()> { + /// let f = File::create("foo.txt")?; + /// let metadata = f.metadata()?; + /// let mut permissions = metadata.permissions(); + /// + /// permissions.set_mode(0o644); // Read/write for owner and read for others. + /// assert_eq!(permissions.mode(), 0o644); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "fs_ext", since = "1.1.0")] + fn set_mode(&mut self, mode: u32); + + /// Creates a new instance of `Permissions` from the given set of Unix + /// permission bits. + /// + /// # Examples + /// + /// ``` + /// use std::fs::Permissions; + /// use std::os::unix::fs::PermissionsExt; + /// + /// // Read/write for owner and read for others. + /// let permissions = Permissions::from_mode(0o644); + /// assert_eq!(permissions.mode(), 0o644); + /// ``` + #[stable(feature = "fs_ext", since = "1.1.0")] + fn from_mode(mode: u32) -> Self; +} + +#[stable(feature = "fs_ext", since = "1.1.0")] +impl PermissionsExt for Permissions { + fn mode(&self) -> u32 { + self.as_inner().mode() + } + + fn set_mode(&mut self, mode: u32) { + *self = Permissions::from_inner(FromInner::from_inner(mode)); + } + + fn from_mode(mode: u32) -> Permissions { + Permissions::from_inner(FromInner::from_inner(mode)) + } +} + +/// Unix-specific extensions to [`fs::OpenOptions`]. +/// +/// [`fs::OpenOptions`]: ../../../../std/fs/struct.OpenOptions.html +#[stable(feature = "fs_ext", since = "1.1.0")] +pub trait OpenOptionsExt { + /// Sets the mode bits that a new file will be created with. + /// + /// If a new file is created as part of an `OpenOptions::open` call then this + /// specified `mode` will be used as the permission bits for the new file. + /// If no `mode` is set, the default of `0o666` will be used. + /// The operating system masks out bits with the system's `umask`, to produce + /// the final permissions. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs::OpenOptions; + /// use std::os::unix::fs::OpenOptionsExt; + /// + /// # fn main() { + /// let mut options = OpenOptions::new(); + /// options.mode(0o644); // Give read/write for owner and read for others. + /// let file = options.open("foo.txt"); + /// # } + /// ``` + #[stable(feature = "fs_ext", since = "1.1.0")] + fn mode(&mut self, mode: u32) -> &mut Self; + + /// Pass custom flags to the `flags` argument of `open`. + /// + /// The bits that define the access mode are masked out with `O_ACCMODE`, to + /// ensure they do not interfere with the access mode set by Rusts options. + /// + /// Custom flags can only set flags, not remove flags set by Rusts options. + /// This options overwrites any previously set custom flags. + /// + /// # Examples + /// + /// ```no_run + /// # #![feature(rustc_private)] + /// extern crate libc; + /// use std::fs::OpenOptions; + /// use std::os::unix::fs::OpenOptionsExt; + /// + /// # fn main() { + /// let mut options = OpenOptions::new(); + /// options.write(true); + /// if cfg!(unix) { + /// options.custom_flags(libc::O_NOFOLLOW); + /// } + /// let file = options.open("foo.txt"); + /// # } + /// ``` + #[stable(feature = "open_options_ext", since = "1.10.0")] + fn custom_flags(&mut self, flags: i32) -> &mut Self; +} + +#[stable(feature = "fs_ext", since = "1.1.0")] +impl OpenOptionsExt for OpenOptions { + fn mode(&mut self, mode: u32) -> &mut OpenOptions { + self.as_inner_mut().mode(mode); + self + } + + fn custom_flags(&mut self, flags: i32) -> &mut OpenOptions { + self.as_inner_mut().custom_flags(flags); + self + } +} + +/// Unix-specific extensions to [`fs::Metadata`]. +/// +/// [`fs::Metadata`]: ../../../../std/fs/struct.Metadata.html +#[stable(feature = "metadata_ext", since = "1.1.0")] +pub trait MetadataExt { + /// Returns the ID of the device containing the file. + /// + /// # Examples + /// + /// ```no_run + /// use std::io; + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let dev_id = meta.dev(); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn dev(&self) -> u64; + /// Returns the inode number. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let inode = meta.ino(); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn ino(&self) -> u64; + /// Returns the rights applied to this file. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let mode = meta.mode(); + /// let user_has_write_access = mode & 0o200; + /// let user_has_read_write_access = mode & 0o600; + /// let group_has_read_access = mode & 0o040; + /// let others_have_exec_access = mode & 0o001; + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn mode(&self) -> u32; + /// Returns the number of hard links pointing to this file. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let nb_hard_links = meta.nlink(); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn nlink(&self) -> u64; + /// Returns the user ID of the owner of this file. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let user_id = meta.uid(); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn uid(&self) -> u32; + /// Returns the group ID of the owner of this file. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let group_id = meta.gid(); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn gid(&self) -> u32; + /// Returns the device ID of this file (if it is a special one). + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let device_id = meta.rdev(); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn rdev(&self) -> u64; + /// Returns the total size of this file in bytes. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let file_size = meta.size(); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn size(&self) -> u64; + /// Returns the last access time of the file, in seconds since Unix Epoch. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let last_access_time = meta.atime(); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn atime(&self) -> i64; + /// Returns the last access time of the file, in nanoseconds since [`atime`]. + /// + /// [`atime`]: #tymethod.atime + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let nano_last_access_time = meta.atime_nsec(); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn atime_nsec(&self) -> i64; + /// Returns the last modification time of the file, in seconds since Unix Epoch. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let last_modification_time = meta.mtime(); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn mtime(&self) -> i64; + /// Returns the last modification time of the file, in nanoseconds since [`mtime`]. + /// + /// [`mtime`]: #tymethod.mtime + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let nano_last_modification_time = meta.mtime_nsec(); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn mtime_nsec(&self) -> i64; + /// Returns the last status change time of the file, in seconds since Unix Epoch. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let last_status_change_time = meta.ctime(); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn ctime(&self) -> i64; + /// Returns the last status change time of the file, in nanoseconds since [`ctime`]. + /// + /// [`ctime`]: #tymethod.ctime + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let nano_last_status_change_time = meta.ctime_nsec(); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn ctime_nsec(&self) -> i64; + /// Returns the block size for filesystem I/O. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let block_size = meta.blksize(); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn blksize(&self) -> u64; + /// Returns the number of blocks allocated to the file, in 512-byte units. + /// + /// Please note that this may be smaller than `st_size / 512` when the file has holes. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let blocks = meta.blocks(); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn blocks(&self) -> u64; +} + +#[stable(feature = "metadata_ext", since = "1.1.0")] +impl MetadataExt for fs::Metadata { + fn dev(&self) -> u64 { + self.st_dev() + } + fn ino(&self) -> u64 { + self.st_ino() + } + fn mode(&self) -> u32 { + self.st_mode() + } + fn nlink(&self) -> u64 { + self.st_nlink() + } + fn uid(&self) -> u32 { + self.st_uid() + } + fn gid(&self) -> u32 { + self.st_gid() + } + fn rdev(&self) -> u64 { + self.st_rdev() + } + fn size(&self) -> u64 { + self.st_size() + } + fn atime(&self) -> i64 { + self.st_atime() + } + fn atime_nsec(&self) -> i64 { + self.st_atime_nsec() + } + fn mtime(&self) -> i64 { + self.st_mtime() + } + fn mtime_nsec(&self) -> i64 { + self.st_mtime_nsec() + } + fn ctime(&self) -> i64 { + self.st_ctime() + } + fn ctime_nsec(&self) -> i64 { + self.st_ctime_nsec() + } + fn blksize(&self) -> u64 { + self.st_blksize() + } + fn blocks(&self) -> u64 { + self.st_blocks() + } +} + +/// Unix-specific extensions for [`FileType`]. +/// +/// Adds support for special Unix file types such as block/character devices, +/// pipes, and sockets. +/// +/// [`FileType`]: ../../../../std/fs/struct.FileType.html +#[stable(feature = "file_type_ext", since = "1.5.0")] +pub trait FileTypeExt { + /// Returns `true` if this file type is a block device. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::FileTypeExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("block_device_file")?; + /// let file_type = meta.file_type(); + /// assert!(file_type.is_block_device()); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "file_type_ext", since = "1.5.0")] + fn is_block_device(&self) -> bool; + /// Returns `true` if this file type is a char device. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::FileTypeExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("char_device_file")?; + /// let file_type = meta.file_type(); + /// assert!(file_type.is_char_device()); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "file_type_ext", since = "1.5.0")] + fn is_char_device(&self) -> bool; + /// Returns `true` if this file type is a fifo. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::FileTypeExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("fifo_file")?; + /// let file_type = meta.file_type(); + /// assert!(file_type.is_fifo()); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "file_type_ext", since = "1.5.0")] + fn is_fifo(&self) -> bool; + /// Returns `true` if this file type is a socket. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::FileTypeExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("unix.socket")?; + /// let file_type = meta.file_type(); + /// assert!(file_type.is_socket()); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "file_type_ext", since = "1.5.0")] + fn is_socket(&self) -> bool; +} + +#[stable(feature = "file_type_ext", since = "1.5.0")] +impl FileTypeExt for fs::FileType { + fn is_block_device(&self) -> bool { + self.as_inner().is(libc::S_IFBLK) + } + fn is_char_device(&self) -> bool { + self.as_inner().is(libc::S_IFCHR) + } + fn is_fifo(&self) -> bool { + self.as_inner().is(libc::S_IFIFO) + } + fn is_socket(&self) -> bool { + self.as_inner().is(libc::S_IFSOCK) + } +} + +/// Unix-specific extension methods for [`fs::DirEntry`]. +/// +/// [`fs::DirEntry`]: ../../../../std/fs/struct.DirEntry.html +#[stable(feature = "dir_entry_ext", since = "1.1.0")] +pub trait DirEntryExt { + /// Returns the underlying `d_ino` field in the contained `dirent` + /// structure. + /// + /// # Examples + /// + /// ``` + /// use std::fs; + /// use std::os::unix::fs::DirEntryExt; + /// + /// if let Ok(entries) = fs::read_dir(".") { + /// for entry in entries { + /// if let Ok(entry) = entry { + /// // Here, `entry` is a `DirEntry`. + /// println!("{:?}: {}", entry.file_name(), entry.ino()); + /// } + /// } + /// } + /// ``` + #[stable(feature = "dir_entry_ext", since = "1.1.0")] + fn ino(&self) -> u64; +} + +#[stable(feature = "dir_entry_ext", since = "1.1.0")] +impl DirEntryExt for fs::DirEntry { + fn ino(&self) -> u64 { + self.as_inner().ino() + } +} + +/// Creates a new symbolic link on the filesystem. +/// +/// The `dst` path will be a symbolic link pointing to the `src` path. +/// +/// # Note +/// +/// On Windows, you must specify whether a symbolic link points to a file +/// or directory. Use `os::windows::fs::symlink_file` to create a +/// symbolic link to a file, or `os::windows::fs::symlink_dir` to create a +/// symbolic link to a directory. Additionally, the process must have +/// `SeCreateSymbolicLinkPrivilege` in order to be able to create a +/// symbolic link. +/// +/// # Examples +/// +/// ```no_run +/// use std::os::unix::fs; +/// +/// fn main() -> std::io::Result<()> { +/// fs::symlink("a.txt", "b.txt")?; +/// Ok(()) +/// } +/// ``` +#[stable(feature = "symlink", since = "1.1.0")] +pub fn symlink<P: AsRef<Path>, Q: AsRef<Path>>(src: P, dst: Q) -> io::Result<()> { + sys::fs::symlink(src.as_ref(), dst.as_ref()) +} + +/// Unix-specific extensions to [`fs::DirBuilder`]. +/// +/// [`fs::DirBuilder`]: ../../../../std/fs/struct.DirBuilder.html +#[stable(feature = "dir_builder", since = "1.6.0")] +pub trait DirBuilderExt { + /// Sets the mode to create new directories with. This option defaults to + /// 0o777. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs::DirBuilder; + /// use std::os::unix::fs::DirBuilderExt; + /// + /// let mut builder = DirBuilder::new(); + /// builder.mode(0o755); + /// ``` + #[stable(feature = "dir_builder", since = "1.6.0")] + fn mode(&mut self, mode: u32) -> &mut Self; +} + +#[stable(feature = "dir_builder", since = "1.6.0")] +impl DirBuilderExt for fs::DirBuilder { + fn mode(&mut self, mode: u32) -> &mut fs::DirBuilder { + self.as_inner_mut().set_mode(mode); + self + } +} diff --git a/library/std/src/sys/unix/ext/io.rs b/library/std/src/sys/unix/ext/io.rs new file mode 100644 index 00000000000..5077e2e28d1 --- /dev/null +++ b/library/std/src/sys/unix/ext/io.rs @@ -0,0 +1,124 @@ +//! Unix-specific extensions to general I/O primitives + +#![stable(feature = "rust1", since = "1.0.0")] + +use crate::fs; +use crate::io; +use crate::os::raw; +use crate::sys; +use crate::sys_common::{AsInner, FromInner, IntoInner}; + +/// Raw file descriptors. +#[stable(feature = "rust1", since = "1.0.0")] +pub type RawFd = raw::c_int; + +/// A trait to extract the raw unix file descriptor from an underlying +/// object. +/// +/// This is only available on unix platforms and must be imported in order +/// to call the method. Windows platforms have a corresponding `AsRawHandle` +/// and `AsRawSocket` set of traits. +#[stable(feature = "rust1", since = "1.0.0")] +pub trait AsRawFd { + /// Extracts the raw file descriptor. + /// + /// This method does **not** pass ownership of the raw file descriptor + /// to the caller. The descriptor is only guaranteed to be valid while + /// the original object has not yet been destroyed. + #[stable(feature = "rust1", since = "1.0.0")] + fn as_raw_fd(&self) -> RawFd; +} + +/// A trait to express the ability to construct an object from a raw file +/// descriptor. +#[stable(feature = "from_raw_os", since = "1.1.0")] +pub trait FromRawFd { + /// Constructs a new instance of `Self` from the given raw file + /// descriptor. + /// + /// This function **consumes ownership** of the specified file + /// descriptor. The returned object will take responsibility for closing + /// it when the object goes out of scope. + /// + /// This function is also unsafe as the primitives currently returned + /// have the contract that they are the sole owner of the file + /// descriptor they are wrapping. Usage of this function could + /// accidentally allow violating this contract which can cause memory + /// unsafety in code that relies on it being true. + #[stable(feature = "from_raw_os", since = "1.1.0")] + unsafe fn from_raw_fd(fd: RawFd) -> Self; +} + +/// A trait to express the ability to consume an object and acquire ownership of +/// its raw file descriptor. +#[stable(feature = "into_raw_os", since = "1.4.0")] +pub trait IntoRawFd { + /// Consumes this object, returning the raw underlying file descriptor. + /// + /// This function **transfers ownership** of the underlying file descriptor + /// to the caller. Callers are then the unique owners of the file descriptor + /// and must close the descriptor once it's no longer needed. + #[stable(feature = "into_raw_os", since = "1.4.0")] + fn into_raw_fd(self) -> RawFd; +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl AsRawFd for fs::File { + fn as_raw_fd(&self) -> RawFd { + self.as_inner().fd().raw() + } +} +#[stable(feature = "from_raw_os", since = "1.1.0")] +impl FromRawFd for fs::File { + unsafe fn from_raw_fd(fd: RawFd) -> fs::File { + fs::File::from_inner(sys::fs::File::from_inner(fd)) + } +} +#[stable(feature = "into_raw_os", since = "1.4.0")] +impl IntoRawFd for fs::File { + fn into_raw_fd(self) -> RawFd { + self.into_inner().into_fd().into_raw() + } +} + +#[stable(feature = "asraw_stdio", since = "1.21.0")] +impl AsRawFd for io::Stdin { + fn as_raw_fd(&self) -> RawFd { + libc::STDIN_FILENO + } +} + +#[stable(feature = "asraw_stdio", since = "1.21.0")] +impl AsRawFd for io::Stdout { + fn as_raw_fd(&self) -> RawFd { + libc::STDOUT_FILENO + } +} + +#[stable(feature = "asraw_stdio", since = "1.21.0")] +impl AsRawFd for io::Stderr { + fn as_raw_fd(&self) -> RawFd { + libc::STDERR_FILENO + } +} + +#[stable(feature = "asraw_stdio_locks", since = "1.35.0")] +impl<'a> AsRawFd for io::StdinLock<'a> { + fn as_raw_fd(&self) -> RawFd { + libc::STDIN_FILENO + } +} + +#[stable(feature = "asraw_stdio_locks", since = "1.35.0")] +impl<'a> AsRawFd for io::StdoutLock<'a> { + fn as_raw_fd(&self) -> RawFd { + libc::STDOUT_FILENO + } +} + +#[stable(feature = "asraw_stdio_locks", since = "1.35.0")] +impl<'a> AsRawFd for io::StderrLock<'a> { + fn as_raw_fd(&self) -> RawFd { + libc::STDERR_FILENO + } +} diff --git a/library/std/src/sys/unix/ext/mod.rs b/library/std/src/sys/unix/ext/mod.rs new file mode 100644 index 00000000000..cbdb1c10049 --- /dev/null +++ b/library/std/src/sys/unix/ext/mod.rs @@ -0,0 +1,66 @@ +//! Platform-specific extensions to `std` for Unix platforms. +//! +//! Provides access to platform-level information on Unix platforms, and +//! exposes Unix-specific functions that would otherwise be inappropriate as +//! part of the core `std` library. +//! +//! It exposes more ways to deal with platform-specific strings (`OsStr`, +//! `OsString`), allows to set permissions more granularly, extract low-level +//! file descriptors from files and sockets, and has platform-specific helpers +//! for spawning processes. +//! +//! # Examples +//! +//! ```no_run +//! use std::fs::File; +//! use std::os::unix::prelude::*; +//! +//! fn main() -> std::io::Result<()> { +//! let f = File::create("foo.txt")?; +//! let fd = f.as_raw_fd(); +//! +//! // use fd with native unix bindings +//! +//! Ok(()) +//! } +//! ``` + +#![stable(feature = "rust1", since = "1.0.0")] +#![doc(cfg(unix))] +#![allow(missing_docs)] + +pub mod ffi; +pub mod fs; +pub mod io; +pub mod net; +pub mod process; +pub mod raw; +pub mod thread; + +/// A prelude for conveniently writing platform-specific code. +/// +/// Includes all extension traits, and some important type definitions. +#[stable(feature = "rust1", since = "1.0.0")] +pub mod prelude { + #[doc(no_inline)] + #[stable(feature = "rust1", since = "1.0.0")] + pub use super::ffi::{OsStrExt, OsStringExt}; + #[doc(no_inline)] + #[stable(feature = "rust1", since = "1.0.0")] + pub use super::fs::DirEntryExt; + #[doc(no_inline)] + #[stable(feature = "file_offset", since = "1.15.0")] + pub use super::fs::FileExt; + #[doc(no_inline)] + #[stable(feature = "rust1", since = "1.0.0")] + pub use super::fs::{FileTypeExt, MetadataExt, OpenOptionsExt, PermissionsExt}; + #[doc(no_inline)] + #[stable(feature = "rust1", since = "1.0.0")] + pub use super::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; + #[doc(no_inline)] + #[stable(feature = "rust1", since = "1.0.0")] + pub use super::process::{CommandExt, ExitStatusExt}; + #[doc(no_inline)] + #[stable(feature = "rust1", since = "1.0.0")] + pub use super::thread::JoinHandleExt; +} diff --git a/library/std/src/sys/unix/ext/net.rs b/library/std/src/sys/unix/ext/net.rs new file mode 100644 index 00000000000..ada8eaa1c97 --- /dev/null +++ b/library/std/src/sys/unix/ext/net.rs @@ -0,0 +1,2024 @@ +#![stable(feature = "unix_socket", since = "1.10.0")] + +//! Unix-specific networking functionality + +// FIXME(#43348): Make libc adapt #[doc(cfg(...))] so we don't need these fake definitions here? +#[cfg(not(unix))] +#[allow(non_camel_case_types)] +mod libc { + pub use libc::c_int; + pub type socklen_t = u32; + pub struct sockaddr; + #[derive(Clone)] + pub struct sockaddr_un; +} + +use crate::ascii; +use crate::ffi::OsStr; +use crate::fmt; +use crate::io::{self, Initializer, IoSlice, IoSliceMut}; +use crate::mem; +use crate::net::{self, Shutdown}; +use crate::os::unix::ffi::OsStrExt; +use crate::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; +use crate::path::Path; +use crate::sys::net::Socket; +use crate::sys::{self, cvt}; +use crate::sys_common::{self, AsInner, FromInner, IntoInner}; +use crate::time::Duration; + +#[cfg(any( + target_os = "linux", + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "openbsd", + target_os = "netbsd", + target_os = "haiku" +))] +use libc::MSG_NOSIGNAL; +#[cfg(not(any( + target_os = "linux", + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "openbsd", + target_os = "netbsd", + target_os = "haiku" +)))] +const MSG_NOSIGNAL: libc::c_int = 0x0; + +fn sun_path_offset(addr: &libc::sockaddr_un) -> usize { + // Work with an actual instance of the type since using a null pointer is UB + let base = addr as *const _ as usize; + let path = &addr.sun_path as *const _ as usize; + path - base +} + +unsafe fn sockaddr_un(path: &Path) -> io::Result<(libc::sockaddr_un, libc::socklen_t)> { + let mut addr: libc::sockaddr_un = mem::zeroed(); + addr.sun_family = libc::AF_UNIX as libc::sa_family_t; + + let bytes = path.as_os_str().as_bytes(); + + if bytes.contains(&0) { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "paths may not contain interior null bytes", + )); + } + + if bytes.len() >= addr.sun_path.len() { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "path must be shorter than SUN_LEN", + )); + } + for (dst, src) in addr.sun_path.iter_mut().zip(bytes.iter()) { + *dst = *src as libc::c_char; + } + // null byte for pathname addresses is already there because we zeroed the + // struct + + let mut len = sun_path_offset(&addr) + bytes.len(); + match bytes.get(0) { + Some(&0) | None => {} + Some(_) => len += 1, + } + Ok((addr, len as libc::socklen_t)) +} + +enum AddressKind<'a> { + Unnamed, + Pathname(&'a Path), + Abstract(&'a [u8]), +} + +/// An address associated with a Unix socket. +/// +/// # Examples +/// +/// ``` +/// use std::os::unix::net::UnixListener; +/// +/// let socket = match UnixListener::bind("/tmp/sock") { +/// Ok(sock) => sock, +/// Err(e) => { +/// println!("Couldn't bind: {:?}", e); +/// return +/// } +/// }; +/// let addr = socket.local_addr().expect("Couldn't get local address"); +/// ``` +#[derive(Clone)] +#[stable(feature = "unix_socket", since = "1.10.0")] +pub struct SocketAddr { + addr: libc::sockaddr_un, + len: libc::socklen_t, +} + +impl SocketAddr { + fn new<F>(f: F) -> io::Result<SocketAddr> + where + F: FnOnce(*mut libc::sockaddr, *mut libc::socklen_t) -> libc::c_int, + { + unsafe { + let mut addr: libc::sockaddr_un = mem::zeroed(); + let mut len = mem::size_of::<libc::sockaddr_un>() as libc::socklen_t; + cvt(f(&mut addr as *mut _ as *mut _, &mut len))?; + SocketAddr::from_parts(addr, len) + } + } + + fn from_parts(addr: libc::sockaddr_un, mut len: libc::socklen_t) -> io::Result<SocketAddr> { + if len == 0 { + // When there is a datagram from unnamed unix socket + // linux returns zero bytes of address + len = sun_path_offset(&addr) as libc::socklen_t; // i.e., zero-length address + } else if addr.sun_family != libc::AF_UNIX as libc::sa_family_t { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "file descriptor did not correspond to a Unix socket", + )); + } + + Ok(SocketAddr { addr, len }) + } + + /// Returns `true` if the address is unnamed. + /// + /// # Examples + /// + /// A named address: + /// + /// ```no_run + /// use std::os::unix::net::UnixListener; + /// + /// fn main() -> std::io::Result<()> { + /// let socket = UnixListener::bind("/tmp/sock")?; + /// let addr = socket.local_addr().expect("Couldn't get local address"); + /// assert_eq!(addr.is_unnamed(), false); + /// Ok(()) + /// } + /// ``` + /// + /// An unnamed address: + /// + /// ``` + /// use std::os::unix::net::UnixDatagram; + /// + /// fn main() -> std::io::Result<()> { + /// let socket = UnixDatagram::unbound()?; + /// let addr = socket.local_addr().expect("Couldn't get local address"); + /// assert_eq!(addr.is_unnamed(), true); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn is_unnamed(&self) -> bool { + if let AddressKind::Unnamed = self.address() { true } else { false } + } + + /// Returns the contents of this address if it is a `pathname` address. + /// + /// # Examples + /// + /// With a pathname: + /// + /// ```no_run + /// use std::os::unix::net::UnixListener; + /// use std::path::Path; + /// + /// fn main() -> std::io::Result<()> { + /// let socket = UnixListener::bind("/tmp/sock")?; + /// let addr = socket.local_addr().expect("Couldn't get local address"); + /// assert_eq!(addr.as_pathname(), Some(Path::new("/tmp/sock"))); + /// Ok(()) + /// } + /// ``` + /// + /// Without a pathname: + /// + /// ``` + /// use std::os::unix::net::UnixDatagram; + /// + /// fn main() -> std::io::Result<()> { + /// let socket = UnixDatagram::unbound()?; + /// let addr = socket.local_addr().expect("Couldn't get local address"); + /// assert_eq!(addr.as_pathname(), None); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn as_pathname(&self) -> Option<&Path> { + if let AddressKind::Pathname(path) = self.address() { Some(path) } else { None } + } + + fn address(&self) -> AddressKind<'_> { + let len = self.len as usize - sun_path_offset(&self.addr); + let path = unsafe { mem::transmute::<&[libc::c_char], &[u8]>(&self.addr.sun_path) }; + + // macOS seems to return a len of 16 and a zeroed sun_path for unnamed addresses + if len == 0 + || (cfg!(not(any(target_os = "linux", target_os = "android"))) + && self.addr.sun_path[0] == 0) + { + AddressKind::Unnamed + } else if self.addr.sun_path[0] == 0 { + AddressKind::Abstract(&path[1..len]) + } else { + AddressKind::Pathname(OsStr::from_bytes(&path[..len - 1]).as_ref()) + } + } +} + +#[stable(feature = "unix_socket", since = "1.10.0")] +impl fmt::Debug for SocketAddr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.address() { + AddressKind::Unnamed => write!(fmt, "(unnamed)"), + AddressKind::Abstract(name) => write!(fmt, "{} (abstract)", AsciiEscaped(name)), + AddressKind::Pathname(path) => write!(fmt, "{:?} (pathname)", path), + } + } +} + +struct AsciiEscaped<'a>(&'a [u8]); + +impl<'a> fmt::Display for AsciiEscaped<'a> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(fmt, "\"")?; + for byte in self.0.iter().cloned().flat_map(ascii::escape_default) { + write!(fmt, "{}", byte as char)?; + } + write!(fmt, "\"") + } +} + +/// A Unix stream socket. +/// +/// # Examples +/// +/// ```no_run +/// use std::os::unix::net::UnixStream; +/// use std::io::prelude::*; +/// +/// fn main() -> std::io::Result<()> { +/// let mut stream = UnixStream::connect("/path/to/my/socket")?; +/// stream.write_all(b"hello world")?; +/// let mut response = String::new(); +/// stream.read_to_string(&mut response)?; +/// println!("{}", response); +/// Ok(()) +/// } +/// ``` +#[stable(feature = "unix_socket", since = "1.10.0")] +pub struct UnixStream(Socket); + +#[stable(feature = "unix_socket", since = "1.10.0")] +impl fmt::Debug for UnixStream { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut builder = fmt.debug_struct("UnixStream"); + builder.field("fd", self.0.as_inner()); + if let Ok(addr) = self.local_addr() { + builder.field("local", &addr); + } + if let Ok(addr) = self.peer_addr() { + builder.field("peer", &addr); + } + builder.finish() + } +} + +impl UnixStream { + /// Connects to the socket named by `path`. + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixStream; + /// + /// let socket = match UnixStream::connect("/tmp/sock") { + /// Ok(sock) => sock, + /// Err(e) => { + /// println!("Couldn't connect: {:?}", e); + /// return + /// } + /// }; + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn connect<P: AsRef<Path>>(path: P) -> io::Result<UnixStream> { + fn inner(path: &Path) -> io::Result<UnixStream> { + unsafe { + let inner = Socket::new_raw(libc::AF_UNIX, libc::SOCK_STREAM)?; + let (addr, len) = sockaddr_un(path)?; + + cvt(libc::connect(*inner.as_inner(), &addr as *const _ as *const _, len))?; + Ok(UnixStream(inner)) + } + } + inner(path.as_ref()) + } + + /// Creates an unnamed pair of connected sockets. + /// + /// Returns two `UnixStream`s which are connected to each other. + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixStream; + /// + /// let (sock1, sock2) = match UnixStream::pair() { + /// Ok((sock1, sock2)) => (sock1, sock2), + /// Err(e) => { + /// println!("Couldn't create a pair of sockets: {:?}", e); + /// return + /// } + /// }; + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn pair() -> io::Result<(UnixStream, UnixStream)> { + let (i1, i2) = Socket::new_pair(libc::AF_UNIX, libc::SOCK_STREAM)?; + Ok((UnixStream(i1), UnixStream(i2))) + } + + /// Creates a new independently owned handle to the underlying socket. + /// + /// The returned `UnixStream` is a reference to the same stream that this + /// object references. Both handles will read and write the same stream of + /// data, and options set on one stream will be propagated to the other + /// stream. + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixStream; + /// + /// fn main() -> std::io::Result<()> { + /// let socket = UnixStream::connect("/tmp/sock")?; + /// let sock_copy = socket.try_clone().expect("Couldn't clone socket"); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn try_clone(&self) -> io::Result<UnixStream> { + self.0.duplicate().map(UnixStream) + } + + /// Returns the socket address of the local half of this connection. + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixStream; + /// + /// fn main() -> std::io::Result<()> { + /// let socket = UnixStream::connect("/tmp/sock")?; + /// let addr = socket.local_addr().expect("Couldn't get local address"); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn local_addr(&self) -> io::Result<SocketAddr> { + SocketAddr::new(|addr, len| unsafe { libc::getsockname(*self.0.as_inner(), addr, len) }) + } + + /// Returns the socket address of the remote half of this connection. + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixStream; + /// + /// fn main() -> std::io::Result<()> { + /// let socket = UnixStream::connect("/tmp/sock")?; + /// let addr = socket.peer_addr().expect("Couldn't get peer address"); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn peer_addr(&self) -> io::Result<SocketAddr> { + SocketAddr::new(|addr, len| unsafe { libc::getpeername(*self.0.as_inner(), addr, len) }) + } + + /// Sets the read timeout for the socket. + /// + /// If the provided value is [`None`], then [`read`] calls will block + /// indefinitely. An [`Err`] is returned if the zero [`Duration`] is passed to this + /// method. + /// + /// [`None`]: ../../../../std/option/enum.Option.html#variant.None + /// [`Err`]: ../../../../std/result/enum.Result.html#variant.Err + /// [`read`]: ../../../../std/io/trait.Read.html#tymethod.read + /// [`Duration`]: ../../../../std/time/struct.Duration.html + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixStream; + /// use std::time::Duration; + /// + /// fn main() -> std::io::Result<()> { + /// let socket = UnixStream::connect("/tmp/sock")?; + /// socket.set_read_timeout(Some(Duration::new(1, 0))).expect("Couldn't set read timeout"); + /// Ok(()) + /// } + /// ``` + /// + /// An [`Err`] is returned if the zero [`Duration`] is passed to this + /// method: + /// + /// ```no_run + /// use std::io; + /// use std::os::unix::net::UnixStream; + /// use std::time::Duration; + /// + /// fn main() -> std::io::Result<()> { + /// let socket = UnixStream::connect("/tmp/sock")?; + /// let result = socket.set_read_timeout(Some(Duration::new(0, 0))); + /// let err = result.unwrap_err(); + /// assert_eq!(err.kind(), io::ErrorKind::InvalidInput); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn set_read_timeout(&self, timeout: Option<Duration>) -> io::Result<()> { + self.0.set_timeout(timeout, libc::SO_RCVTIMEO) + } + + /// Sets the write timeout for the socket. + /// + /// If the provided value is [`None`], then [`write`] calls will block + /// indefinitely. An [`Err`] is returned if the zero [`Duration`] is + /// passed to this method. + /// + /// [`None`]: ../../../../std/option/enum.Option.html#variant.None + /// [`Err`]: ../../../../std/result/enum.Result.html#variant.Err + /// [`write`]: ../../../../std/io/trait.Write.html#tymethod.write + /// [`Duration`]: ../../../../std/time/struct.Duration.html + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixStream; + /// use std::time::Duration; + /// + /// fn main() -> std::io::Result<()> { + /// let socket = UnixStream::connect("/tmp/sock")?; + /// socket.set_write_timeout(Some(Duration::new(1, 0))) + /// .expect("Couldn't set write timeout"); + /// Ok(()) + /// } + /// ``` + /// + /// An [`Err`] is returned if the zero [`Duration`] is passed to this + /// method: + /// + /// ```no_run + /// use std::io; + /// use std::net::UdpSocket; + /// use std::time::Duration; + /// + /// fn main() -> std::io::Result<()> { + /// let socket = UdpSocket::bind("127.0.0.1:34254")?; + /// let result = socket.set_write_timeout(Some(Duration::new(0, 0))); + /// let err = result.unwrap_err(); + /// assert_eq!(err.kind(), io::ErrorKind::InvalidInput); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn set_write_timeout(&self, timeout: Option<Duration>) -> io::Result<()> { + self.0.set_timeout(timeout, libc::SO_SNDTIMEO) + } + + /// Returns the read timeout of this socket. + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixStream; + /// use std::time::Duration; + /// + /// fn main() -> std::io::Result<()> { + /// let socket = UnixStream::connect("/tmp/sock")?; + /// socket.set_read_timeout(Some(Duration::new(1, 0))).expect("Couldn't set read timeout"); + /// assert_eq!(socket.read_timeout()?, Some(Duration::new(1, 0))); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn read_timeout(&self) -> io::Result<Option<Duration>> { + self.0.timeout(libc::SO_RCVTIMEO) + } + + /// Returns the write timeout of this socket. + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixStream; + /// use std::time::Duration; + /// + /// fn main() -> std::io::Result<()> { + /// let socket = UnixStream::connect("/tmp/sock")?; + /// socket.set_write_timeout(Some(Duration::new(1, 0))) + /// .expect("Couldn't set write timeout"); + /// assert_eq!(socket.write_timeout()?, Some(Duration::new(1, 0))); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn write_timeout(&self) -> io::Result<Option<Duration>> { + self.0.timeout(libc::SO_SNDTIMEO) + } + + /// Moves the socket into or out of nonblocking mode. + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixStream; + /// + /// fn main() -> std::io::Result<()> { + /// let socket = UnixStream::connect("/tmp/sock")?; + /// socket.set_nonblocking(true).expect("Couldn't set nonblocking"); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { + self.0.set_nonblocking(nonblocking) + } + + /// Returns the value of the `SO_ERROR` option. + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixStream; + /// + /// fn main() -> std::io::Result<()> { + /// let socket = UnixStream::connect("/tmp/sock")?; + /// if let Ok(Some(err)) = socket.take_error() { + /// println!("Got error: {:?}", err); + /// } + /// Ok(()) + /// } + /// ``` + /// + /// # Platform specific + /// On Redox this always returns `None`. + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + self.0.take_error() + } + + /// Shuts down the read, write, or both halves of this connection. + /// + /// This function will cause all pending and future I/O calls on the + /// specified portions to immediately return with an appropriate value + /// (see the documentation of [`Shutdown`]). + /// + /// [`Shutdown`]: ../../../../std/net/enum.Shutdown.html + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixStream; + /// use std::net::Shutdown; + /// + /// fn main() -> std::io::Result<()> { + /// let socket = UnixStream::connect("/tmp/sock")?; + /// socket.shutdown(Shutdown::Both).expect("shutdown function failed"); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { + self.0.shutdown(how) + } +} + +#[stable(feature = "unix_socket", since = "1.10.0")] +impl io::Read for UnixStream { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + io::Read::read(&mut &*self, buf) + } + + fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + io::Read::read_vectored(&mut &*self, bufs) + } + + #[inline] + fn is_read_vectored(&self) -> bool { + io::Read::is_read_vectored(&&*self) + } + + #[inline] + unsafe fn initializer(&self) -> Initializer { + Initializer::nop() + } +} + +#[stable(feature = "unix_socket", since = "1.10.0")] +impl<'a> io::Read for &'a UnixStream { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + self.0.read(buf) + } + + fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + self.0.read_vectored(bufs) + } + + #[inline] + fn is_read_vectored(&self) -> bool { + self.0.is_read_vectored() + } + + #[inline] + unsafe fn initializer(&self) -> Initializer { + Initializer::nop() + } +} + +#[stable(feature = "unix_socket", since = "1.10.0")] +impl io::Write for UnixStream { + fn write(&mut self, buf: &[u8]) -> io::Result<usize> { + io::Write::write(&mut &*self, buf) + } + + fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + io::Write::write_vectored(&mut &*self, bufs) + } + + #[inline] + fn is_write_vectored(&self) -> bool { + io::Write::is_write_vectored(&&*self) + } + + fn flush(&mut self) -> io::Result<()> { + io::Write::flush(&mut &*self) + } +} + +#[stable(feature = "unix_socket", since = "1.10.0")] +impl<'a> io::Write for &'a UnixStream { + fn write(&mut self, buf: &[u8]) -> io::Result<usize> { + self.0.write(buf) + } + + fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + self.0.write_vectored(bufs) + } + + #[inline] + fn is_write_vectored(&self) -> bool { + self.0.is_write_vectored() + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +#[stable(feature = "unix_socket", since = "1.10.0")] +impl AsRawFd for UnixStream { + fn as_raw_fd(&self) -> RawFd { + *self.0.as_inner() + } +} + +#[stable(feature = "unix_socket", since = "1.10.0")] +impl FromRawFd for UnixStream { + unsafe fn from_raw_fd(fd: RawFd) -> UnixStream { + UnixStream(Socket::from_inner(fd)) + } +} + +#[stable(feature = "unix_socket", since = "1.10.0")] +impl IntoRawFd for UnixStream { + fn into_raw_fd(self) -> RawFd { + self.0.into_inner() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl AsRawFd for net::TcpStream { + fn as_raw_fd(&self) -> RawFd { + *self.as_inner().socket().as_inner() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl AsRawFd for net::TcpListener { + fn as_raw_fd(&self) -> RawFd { + *self.as_inner().socket().as_inner() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl AsRawFd for net::UdpSocket { + fn as_raw_fd(&self) -> RawFd { + *self.as_inner().socket().as_inner() + } +} + +#[stable(feature = "from_raw_os", since = "1.1.0")] +impl FromRawFd for net::TcpStream { + unsafe fn from_raw_fd(fd: RawFd) -> net::TcpStream { + let socket = sys::net::Socket::from_inner(fd); + net::TcpStream::from_inner(sys_common::net::TcpStream::from_inner(socket)) + } +} + +#[stable(feature = "from_raw_os", since = "1.1.0")] +impl FromRawFd for net::TcpListener { + unsafe fn from_raw_fd(fd: RawFd) -> net::TcpListener { + let socket = sys::net::Socket::from_inner(fd); + net::TcpListener::from_inner(sys_common::net::TcpListener::from_inner(socket)) + } +} + +#[stable(feature = "from_raw_os", since = "1.1.0")] +impl FromRawFd for net::UdpSocket { + unsafe fn from_raw_fd(fd: RawFd) -> net::UdpSocket { + let socket = sys::net::Socket::from_inner(fd); + net::UdpSocket::from_inner(sys_common::net::UdpSocket::from_inner(socket)) + } +} + +#[stable(feature = "into_raw_os", since = "1.4.0")] +impl IntoRawFd for net::TcpStream { + fn into_raw_fd(self) -> RawFd { + self.into_inner().into_socket().into_inner() + } +} +#[stable(feature = "into_raw_os", since = "1.4.0")] +impl IntoRawFd for net::TcpListener { + fn into_raw_fd(self) -> RawFd { + self.into_inner().into_socket().into_inner() + } +} +#[stable(feature = "into_raw_os", since = "1.4.0")] +impl IntoRawFd for net::UdpSocket { + fn into_raw_fd(self) -> RawFd { + self.into_inner().into_socket().into_inner() + } +} + +/// A structure representing a Unix domain socket server. +/// +/// # Examples +/// +/// ```no_run +/// use std::thread; +/// use std::os::unix::net::{UnixStream, UnixListener}; +/// +/// fn handle_client(stream: UnixStream) { +/// // ... +/// } +/// +/// fn main() -> std::io::Result<()> { +/// let listener = UnixListener::bind("/path/to/the/socket")?; +/// +/// // accept connections and process them, spawning a new thread for each one +/// for stream in listener.incoming() { +/// match stream { +/// Ok(stream) => { +/// /* connection succeeded */ +/// thread::spawn(|| handle_client(stream)); +/// } +/// Err(err) => { +/// /* connection failed */ +/// break; +/// } +/// } +/// } +/// Ok(()) +/// } +/// ``` +#[stable(feature = "unix_socket", since = "1.10.0")] +pub struct UnixListener(Socket); + +#[stable(feature = "unix_socket", since = "1.10.0")] +impl fmt::Debug for UnixListener { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut builder = fmt.debug_struct("UnixListener"); + builder.field("fd", self.0.as_inner()); + if let Ok(addr) = self.local_addr() { + builder.field("local", &addr); + } + builder.finish() + } +} + +impl UnixListener { + /// Creates a new `UnixListener` bound to the specified socket. + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixListener; + /// + /// let listener = match UnixListener::bind("/path/to/the/socket") { + /// Ok(sock) => sock, + /// Err(e) => { + /// println!("Couldn't connect: {:?}", e); + /// return + /// } + /// }; + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn bind<P: AsRef<Path>>(path: P) -> io::Result<UnixListener> { + fn inner(path: &Path) -> io::Result<UnixListener> { + unsafe { + let inner = Socket::new_raw(libc::AF_UNIX, libc::SOCK_STREAM)?; + let (addr, len) = sockaddr_un(path)?; + + cvt(libc::bind(*inner.as_inner(), &addr as *const _ as *const _, len as _))?; + cvt(libc::listen(*inner.as_inner(), 128))?; + + Ok(UnixListener(inner)) + } + } + inner(path.as_ref()) + } + + /// Accepts a new incoming connection to this listener. + /// + /// This function will block the calling thread until a new Unix connection + /// is established. When established, the corresponding [`UnixStream`] and + /// the remote peer's address will be returned. + /// + /// [`UnixStream`]: ../../../../std/os/unix/net/struct.UnixStream.html + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixListener; + /// + /// fn main() -> std::io::Result<()> { + /// let listener = UnixListener::bind("/path/to/the/socket")?; + /// + /// match listener.accept() { + /// Ok((socket, addr)) => println!("Got a client: {:?}", addr), + /// Err(e) => println!("accept function failed: {:?}", e), + /// } + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn accept(&self) -> io::Result<(UnixStream, SocketAddr)> { + let mut storage: libc::sockaddr_un = unsafe { mem::zeroed() }; + let mut len = mem::size_of_val(&storage) as libc::socklen_t; + let sock = self.0.accept(&mut storage as *mut _ as *mut _, &mut len)?; + let addr = SocketAddr::from_parts(storage, len)?; + Ok((UnixStream(sock), addr)) + } + + /// Creates a new independently owned handle to the underlying socket. + /// + /// The returned `UnixListener` is a reference to the same socket that this + /// object references. Both handles can be used to accept incoming + /// connections and options set on one listener will affect the other. + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixListener; + /// + /// fn main() -> std::io::Result<()> { + /// let listener = UnixListener::bind("/path/to/the/socket")?; + /// let listener_copy = listener.try_clone().expect("try_clone failed"); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn try_clone(&self) -> io::Result<UnixListener> { + self.0.duplicate().map(UnixListener) + } + + /// Returns the local socket address of this listener. + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixListener; + /// + /// fn main() -> std::io::Result<()> { + /// let listener = UnixListener::bind("/path/to/the/socket")?; + /// let addr = listener.local_addr().expect("Couldn't get local address"); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn local_addr(&self) -> io::Result<SocketAddr> { + SocketAddr::new(|addr, len| unsafe { libc::getsockname(*self.0.as_inner(), addr, len) }) + } + + /// Moves the socket into or out of nonblocking mode. + /// + /// This will result in the `accept` operation becoming nonblocking, + /// i.e., immediately returning from their calls. If the IO operation is + /// successful, `Ok` is returned and no further action is required. If the + /// IO operation could not be completed and needs to be retried, an error + /// with kind [`io::ErrorKind::WouldBlock`] is returned. + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixListener; + /// + /// fn main() -> std::io::Result<()> { + /// let listener = UnixListener::bind("/path/to/the/socket")?; + /// listener.set_nonblocking(true).expect("Couldn't set non blocking"); + /// Ok(()) + /// } + /// ``` + /// + /// [`io::ErrorKind::WouldBlock`]: ../../../io/enum.ErrorKind.html#variant.WouldBlock + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { + self.0.set_nonblocking(nonblocking) + } + + /// Returns the value of the `SO_ERROR` option. + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixListener; + /// + /// fn main() -> std::io::Result<()> { + /// let listener = UnixListener::bind("/tmp/sock")?; + /// + /// if let Ok(Some(err)) = listener.take_error() { + /// println!("Got error: {:?}", err); + /// } + /// Ok(()) + /// } + /// ``` + /// + /// # Platform specific + /// On Redox this always returns `None`. + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + self.0.take_error() + } + + /// Returns an iterator over incoming connections. + /// + /// The iterator will never return [`None`] and will also not yield the + /// peer's [`SocketAddr`] structure. + /// + /// [`None`]: ../../../../std/option/enum.Option.html#variant.None + /// [`SocketAddr`]: struct.SocketAddr.html + /// + /// # Examples + /// + /// ```no_run + /// use std::thread; + /// use std::os::unix::net::{UnixStream, UnixListener}; + /// + /// fn handle_client(stream: UnixStream) { + /// // ... + /// } + /// + /// fn main() -> std::io::Result<()> { + /// let listener = UnixListener::bind("/path/to/the/socket")?; + /// + /// for stream in listener.incoming() { + /// match stream { + /// Ok(stream) => { + /// thread::spawn(|| handle_client(stream)); + /// } + /// Err(err) => { + /// break; + /// } + /// } + /// } + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn incoming(&self) -> Incoming<'_> { + Incoming { listener: self } + } +} + +#[stable(feature = "unix_socket", since = "1.10.0")] +impl AsRawFd for UnixListener { + fn as_raw_fd(&self) -> RawFd { + *self.0.as_inner() + } +} + +#[stable(feature = "unix_socket", since = "1.10.0")] +impl FromRawFd for UnixListener { + unsafe fn from_raw_fd(fd: RawFd) -> UnixListener { + UnixListener(Socket::from_inner(fd)) + } +} + +#[stable(feature = "unix_socket", since = "1.10.0")] +impl IntoRawFd for UnixListener { + fn into_raw_fd(self) -> RawFd { + self.0.into_inner() + } +} + +#[stable(feature = "unix_socket", since = "1.10.0")] +impl<'a> IntoIterator for &'a UnixListener { + type Item = io::Result<UnixStream>; + type IntoIter = Incoming<'a>; + + fn into_iter(self) -> Incoming<'a> { + self.incoming() + } +} + +/// An iterator over incoming connections to a [`UnixListener`]. +/// +/// It will never return [`None`]. +/// +/// [`None`]: ../../../../std/option/enum.Option.html#variant.None +/// [`UnixListener`]: struct.UnixListener.html +/// +/// # Examples +/// +/// ```no_run +/// use std::thread; +/// use std::os::unix::net::{UnixStream, UnixListener}; +/// +/// fn handle_client(stream: UnixStream) { +/// // ... +/// } +/// +/// fn main() -> std::io::Result<()> { +/// let listener = UnixListener::bind("/path/to/the/socket")?; +/// +/// for stream in listener.incoming() { +/// match stream { +/// Ok(stream) => { +/// thread::spawn(|| handle_client(stream)); +/// } +/// Err(err) => { +/// break; +/// } +/// } +/// } +/// Ok(()) +/// } +/// ``` +#[derive(Debug)] +#[stable(feature = "unix_socket", since = "1.10.0")] +pub struct Incoming<'a> { + listener: &'a UnixListener, +} + +#[stable(feature = "unix_socket", since = "1.10.0")] +impl<'a> Iterator for Incoming<'a> { + type Item = io::Result<UnixStream>; + + fn next(&mut self) -> Option<io::Result<UnixStream>> { + Some(self.listener.accept().map(|s| s.0)) + } + + fn size_hint(&self) -> (usize, Option<usize>) { + (usize::MAX, None) + } +} + +/// A Unix datagram socket. +/// +/// # Examples +/// +/// ```no_run +/// use std::os::unix::net::UnixDatagram; +/// +/// fn main() -> std::io::Result<()> { +/// let socket = UnixDatagram::bind("/path/to/my/socket")?; +/// socket.send_to(b"hello world", "/path/to/other/socket")?; +/// let mut buf = [0; 100]; +/// let (count, address) = socket.recv_from(&mut buf)?; +/// println!("socket {:?} sent {:?}", address, &buf[..count]); +/// Ok(()) +/// } +/// ``` +#[stable(feature = "unix_socket", since = "1.10.0")] +pub struct UnixDatagram(Socket); + +#[stable(feature = "unix_socket", since = "1.10.0")] +impl fmt::Debug for UnixDatagram { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut builder = fmt.debug_struct("UnixDatagram"); + builder.field("fd", self.0.as_inner()); + if let Ok(addr) = self.local_addr() { + builder.field("local", &addr); + } + if let Ok(addr) = self.peer_addr() { + builder.field("peer", &addr); + } + builder.finish() + } +} + +impl UnixDatagram { + /// Creates a Unix datagram socket bound to the given path. + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixDatagram; + /// + /// let sock = match UnixDatagram::bind("/path/to/the/socket") { + /// Ok(sock) => sock, + /// Err(e) => { + /// println!("Couldn't bind: {:?}", e); + /// return + /// } + /// }; + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn bind<P: AsRef<Path>>(path: P) -> io::Result<UnixDatagram> { + fn inner(path: &Path) -> io::Result<UnixDatagram> { + unsafe { + let socket = UnixDatagram::unbound()?; + let (addr, len) = sockaddr_un(path)?; + + cvt(libc::bind(*socket.0.as_inner(), &addr as *const _ as *const _, len as _))?; + + Ok(socket) + } + } + inner(path.as_ref()) + } + + /// Creates a Unix Datagram socket which is not bound to any address. + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixDatagram; + /// + /// let sock = match UnixDatagram::unbound() { + /// Ok(sock) => sock, + /// Err(e) => { + /// println!("Couldn't unbound: {:?}", e); + /// return + /// } + /// }; + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn unbound() -> io::Result<UnixDatagram> { + let inner = Socket::new_raw(libc::AF_UNIX, libc::SOCK_DGRAM)?; + Ok(UnixDatagram(inner)) + } + + /// Creates an unnamed pair of connected sockets. + /// + /// Returns two `UnixDatagrams`s which are connected to each other. + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixDatagram; + /// + /// let (sock1, sock2) = match UnixDatagram::pair() { + /// Ok((sock1, sock2)) => (sock1, sock2), + /// Err(e) => { + /// println!("Couldn't unbound: {:?}", e); + /// return + /// } + /// }; + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn pair() -> io::Result<(UnixDatagram, UnixDatagram)> { + let (i1, i2) = Socket::new_pair(libc::AF_UNIX, libc::SOCK_DGRAM)?; + Ok((UnixDatagram(i1), UnixDatagram(i2))) + } + + /// Connects the socket to the specified address. + /// + /// The [`send`] method may be used to send data to the specified address. + /// [`recv`] and [`recv_from`] will only receive data from that address. + /// + /// [`send`]: #method.send + /// [`recv`]: #method.recv + /// [`recv_from`]: #method.recv_from + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixDatagram; + /// + /// fn main() -> std::io::Result<()> { + /// let sock = UnixDatagram::unbound()?; + /// match sock.connect("/path/to/the/socket") { + /// Ok(sock) => sock, + /// Err(e) => { + /// println!("Couldn't connect: {:?}", e); + /// return Err(e) + /// } + /// }; + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn connect<P: AsRef<Path>>(&self, path: P) -> io::Result<()> { + fn inner(d: &UnixDatagram, path: &Path) -> io::Result<()> { + unsafe { + let (addr, len) = sockaddr_un(path)?; + + cvt(libc::connect(*d.0.as_inner(), &addr as *const _ as *const _, len))?; + + Ok(()) + } + } + inner(self, path.as_ref()) + } + + /// Creates a new independently owned handle to the underlying socket. + /// + /// The returned `UnixDatagram` is a reference to the same socket that this + /// object references. Both handles can be used to accept incoming + /// connections and options set on one side will affect the other. + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixDatagram; + /// + /// fn main() -> std::io::Result<()> { + /// let sock = UnixDatagram::bind("/path/to/the/socket")?; + /// let sock_copy = sock.try_clone().expect("try_clone failed"); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn try_clone(&self) -> io::Result<UnixDatagram> { + self.0.duplicate().map(UnixDatagram) + } + + /// Returns the address of this socket. + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixDatagram; + /// + /// fn main() -> std::io::Result<()> { + /// let sock = UnixDatagram::bind("/path/to/the/socket")?; + /// let addr = sock.local_addr().expect("Couldn't get local address"); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn local_addr(&self) -> io::Result<SocketAddr> { + SocketAddr::new(|addr, len| unsafe { libc::getsockname(*self.0.as_inner(), addr, len) }) + } + + /// Returns the address of this socket's peer. + /// + /// The [`connect`] method will connect the socket to a peer. + /// + /// [`connect`]: #method.connect + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixDatagram; + /// + /// fn main() -> std::io::Result<()> { + /// let sock = UnixDatagram::unbound()?; + /// sock.connect("/path/to/the/socket")?; + /// + /// let addr = sock.peer_addr().expect("Couldn't get peer address"); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn peer_addr(&self) -> io::Result<SocketAddr> { + SocketAddr::new(|addr, len| unsafe { libc::getpeername(*self.0.as_inner(), addr, len) }) + } + + /// Receives data from the socket. + /// + /// On success, returns the number of bytes read and the address from + /// whence the data came. + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixDatagram; + /// + /// fn main() -> std::io::Result<()> { + /// let sock = UnixDatagram::unbound()?; + /// let mut buf = vec![0; 10]; + /// let (size, sender) = sock.recv_from(buf.as_mut_slice())?; + /// println!("received {} bytes from {:?}", size, sender); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + let mut count = 0; + let addr = SocketAddr::new(|addr, len| unsafe { + count = libc::recvfrom( + *self.0.as_inner(), + buf.as_mut_ptr() as *mut _, + buf.len(), + 0, + addr, + len, + ); + if count > 0 { + 1 + } else if count == 0 { + 0 + } else { + -1 + } + })?; + + Ok((count as usize, addr)) + } + + /// Receives data from the socket. + /// + /// On success, returns the number of bytes read. + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixDatagram; + /// + /// fn main() -> std::io::Result<()> { + /// let sock = UnixDatagram::bind("/path/to/the/socket")?; + /// let mut buf = vec![0; 10]; + /// sock.recv(buf.as_mut_slice()).expect("recv function failed"); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> { + self.0.read(buf) + } + + /// Sends data on the socket to the specified address. + /// + /// On success, returns the number of bytes written. + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixDatagram; + /// + /// fn main() -> std::io::Result<()> { + /// let sock = UnixDatagram::unbound()?; + /// sock.send_to(b"omelette au fromage", "/some/sock").expect("send_to function failed"); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn send_to<P: AsRef<Path>>(&self, buf: &[u8], path: P) -> io::Result<usize> { + fn inner(d: &UnixDatagram, buf: &[u8], path: &Path) -> io::Result<usize> { + unsafe { + let (addr, len) = sockaddr_un(path)?; + + let count = cvt(libc::sendto( + *d.0.as_inner(), + buf.as_ptr() as *const _, + buf.len(), + MSG_NOSIGNAL, + &addr as *const _ as *const _, + len, + ))?; + Ok(count as usize) + } + } + inner(self, buf, path.as_ref()) + } + + /// Sends data on the socket to the socket's peer. + /// + /// The peer address may be set by the `connect` method, and this method + /// will return an error if the socket has not already been connected. + /// + /// On success, returns the number of bytes written. + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixDatagram; + /// + /// fn main() -> std::io::Result<()> { + /// let sock = UnixDatagram::unbound()?; + /// sock.connect("/some/sock").expect("Couldn't connect"); + /// sock.send(b"omelette au fromage").expect("send_to function failed"); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn send(&self, buf: &[u8]) -> io::Result<usize> { + self.0.write(buf) + } + + /// Sets the read timeout for the socket. + /// + /// If the provided value is [`None`], then [`recv`] and [`recv_from`] calls will + /// block indefinitely. An [`Err`] is returned if the zero [`Duration`] + /// is passed to this method. + /// + /// [`None`]: ../../../../std/option/enum.Option.html#variant.None + /// [`Err`]: ../../../../std/result/enum.Result.html#variant.Err + /// [`recv`]: #method.recv + /// [`recv_from`]: #method.recv_from + /// [`Duration`]: ../../../../std/time/struct.Duration.html + /// + /// # Examples + /// + /// ``` + /// use std::os::unix::net::UnixDatagram; + /// use std::time::Duration; + /// + /// fn main() -> std::io::Result<()> { + /// let sock = UnixDatagram::unbound()?; + /// sock.set_read_timeout(Some(Duration::new(1, 0))) + /// .expect("set_read_timeout function failed"); + /// Ok(()) + /// } + /// ``` + /// + /// An [`Err`] is returned if the zero [`Duration`] is passed to this + /// method: + /// + /// ```no_run + /// use std::io; + /// use std::os::unix::net::UnixDatagram; + /// use std::time::Duration; + /// + /// fn main() -> std::io::Result<()> { + /// let socket = UnixDatagram::unbound()?; + /// let result = socket.set_read_timeout(Some(Duration::new(0, 0))); + /// let err = result.unwrap_err(); + /// assert_eq!(err.kind(), io::ErrorKind::InvalidInput); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn set_read_timeout(&self, timeout: Option<Duration>) -> io::Result<()> { + self.0.set_timeout(timeout, libc::SO_RCVTIMEO) + } + + /// Sets the write timeout for the socket. + /// + /// If the provided value is [`None`], then [`send`] and [`send_to`] calls will + /// block indefinitely. An [`Err`] is returned if the zero [`Duration`] is passed to this + /// method. + /// + /// [`None`]: ../../../../std/option/enum.Option.html#variant.None + /// [`send`]: #method.send + /// [`send_to`]: #method.send_to + /// [`Duration`]: ../../../../std/time/struct.Duration.html + /// + /// # Examples + /// + /// ``` + /// use std::os::unix::net::UnixDatagram; + /// use std::time::Duration; + /// + /// fn main() -> std::io::Result<()> { + /// let sock = UnixDatagram::unbound()?; + /// sock.set_write_timeout(Some(Duration::new(1, 0))) + /// .expect("set_write_timeout function failed"); + /// Ok(()) + /// } + /// ``` + /// + /// An [`Err`] is returned if the zero [`Duration`] is passed to this + /// method: + /// + /// ```no_run + /// use std::io; + /// use std::os::unix::net::UnixDatagram; + /// use std::time::Duration; + /// + /// fn main() -> std::io::Result<()> { + /// let socket = UnixDatagram::unbound()?; + /// let result = socket.set_write_timeout(Some(Duration::new(0, 0))); + /// let err = result.unwrap_err(); + /// assert_eq!(err.kind(), io::ErrorKind::InvalidInput); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn set_write_timeout(&self, timeout: Option<Duration>) -> io::Result<()> { + self.0.set_timeout(timeout, libc::SO_SNDTIMEO) + } + + /// Returns the read timeout of this socket. + /// + /// # Examples + /// + /// ``` + /// use std::os::unix::net::UnixDatagram; + /// use std::time::Duration; + /// + /// fn main() -> std::io::Result<()> { + /// let sock = UnixDatagram::unbound()?; + /// sock.set_read_timeout(Some(Duration::new(1, 0))) + /// .expect("set_read_timeout function failed"); + /// assert_eq!(sock.read_timeout()?, Some(Duration::new(1, 0))); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn read_timeout(&self) -> io::Result<Option<Duration>> { + self.0.timeout(libc::SO_RCVTIMEO) + } + + /// Returns the write timeout of this socket. + /// + /// # Examples + /// + /// ``` + /// use std::os::unix::net::UnixDatagram; + /// use std::time::Duration; + /// + /// fn main() -> std::io::Result<()> { + /// let sock = UnixDatagram::unbound()?; + /// sock.set_write_timeout(Some(Duration::new(1, 0))) + /// .expect("set_write_timeout function failed"); + /// assert_eq!(sock.write_timeout()?, Some(Duration::new(1, 0))); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn write_timeout(&self) -> io::Result<Option<Duration>> { + self.0.timeout(libc::SO_SNDTIMEO) + } + + /// Moves the socket into or out of nonblocking mode. + /// + /// # Examples + /// + /// ``` + /// use std::os::unix::net::UnixDatagram; + /// + /// fn main() -> std::io::Result<()> { + /// let sock = UnixDatagram::unbound()?; + /// sock.set_nonblocking(true).expect("set_nonblocking function failed"); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { + self.0.set_nonblocking(nonblocking) + } + + /// Returns the value of the `SO_ERROR` option. + /// + /// # Examples + /// + /// ```no_run + /// use std::os::unix::net::UnixDatagram; + /// + /// fn main() -> std::io::Result<()> { + /// let sock = UnixDatagram::unbound()?; + /// if let Ok(Some(err)) = sock.take_error() { + /// println!("Got error: {:?}", err); + /// } + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + self.0.take_error() + } + + /// Shut down the read, write, or both halves of this connection. + /// + /// This function will cause all pending and future I/O calls on the + /// specified portions to immediately return with an appropriate value + /// (see the documentation of [`Shutdown`]). + /// + /// [`Shutdown`]: ../../../../std/net/enum.Shutdown.html + /// + /// ```no_run + /// use std::os::unix::net::UnixDatagram; + /// use std::net::Shutdown; + /// + /// fn main() -> std::io::Result<()> { + /// let sock = UnixDatagram::unbound()?; + /// sock.shutdown(Shutdown::Both).expect("shutdown function failed"); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "unix_socket", since = "1.10.0")] + pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { + self.0.shutdown(how) + } +} + +#[stable(feature = "unix_socket", since = "1.10.0")] +impl AsRawFd for UnixDatagram { + fn as_raw_fd(&self) -> RawFd { + *self.0.as_inner() + } +} + +#[stable(feature = "unix_socket", since = "1.10.0")] +impl FromRawFd for UnixDatagram { + unsafe fn from_raw_fd(fd: RawFd) -> UnixDatagram { + UnixDatagram(Socket::from_inner(fd)) + } +} + +#[stable(feature = "unix_socket", since = "1.10.0")] +impl IntoRawFd for UnixDatagram { + fn into_raw_fd(self) -> RawFd { + self.0.into_inner() + } +} + +#[cfg(all(test, not(target_os = "emscripten")))] +mod test { + use crate::io::prelude::*; + use crate::io::{self, ErrorKind}; + use crate::sys_common::io::test::tmpdir; + use crate::thread; + use crate::time::Duration; + + use super::*; + + macro_rules! or_panic { + ($e:expr) => { + match $e { + Ok(e) => e, + Err(e) => panic!("{}", e), + } + }; + } + + #[test] + fn basic() { + let dir = tmpdir(); + let socket_path = dir.path().join("sock"); + let msg1 = b"hello"; + let msg2 = b"world!"; + + let listener = or_panic!(UnixListener::bind(&socket_path)); + let thread = thread::spawn(move || { + let mut stream = or_panic!(listener.accept()).0; + let mut buf = [0; 5]; + or_panic!(stream.read(&mut buf)); + assert_eq!(&msg1[..], &buf[..]); + or_panic!(stream.write_all(msg2)); + }); + + let mut stream = or_panic!(UnixStream::connect(&socket_path)); + assert_eq!(Some(&*socket_path), stream.peer_addr().unwrap().as_pathname()); + or_panic!(stream.write_all(msg1)); + let mut buf = vec![]; + or_panic!(stream.read_to_end(&mut buf)); + assert_eq!(&msg2[..], &buf[..]); + drop(stream); + + thread.join().unwrap(); + } + + #[test] + fn vectored() { + let (mut s1, mut s2) = or_panic!(UnixStream::pair()); + + let len = or_panic!(s1.write_vectored(&[ + IoSlice::new(b"hello"), + IoSlice::new(b" "), + IoSlice::new(b"world!") + ],)); + assert_eq!(len, 12); + + let mut buf1 = [0; 6]; + let mut buf2 = [0; 7]; + let len = or_panic!( + s2.read_vectored(&mut [IoSliceMut::new(&mut buf1), IoSliceMut::new(&mut buf2)],) + ); + assert_eq!(len, 12); + assert_eq!(&buf1, b"hello "); + assert_eq!(&buf2, b"world!\0"); + } + + #[test] + fn pair() { + let msg1 = b"hello"; + let msg2 = b"world!"; + + let (mut s1, mut s2) = or_panic!(UnixStream::pair()); + let thread = thread::spawn(move || { + // s1 must be moved in or the test will hang! + let mut buf = [0; 5]; + or_panic!(s1.read(&mut buf)); + assert_eq!(&msg1[..], &buf[..]); + or_panic!(s1.write_all(msg2)); + }); + + or_panic!(s2.write_all(msg1)); + let mut buf = vec![]; + or_panic!(s2.read_to_end(&mut buf)); + assert_eq!(&msg2[..], &buf[..]); + drop(s2); + + thread.join().unwrap(); + } + + #[test] + fn try_clone() { + let dir = tmpdir(); + let socket_path = dir.path().join("sock"); + let msg1 = b"hello"; + let msg2 = b"world"; + + let listener = or_panic!(UnixListener::bind(&socket_path)); + let thread = thread::spawn(move || { + let mut stream = or_panic!(listener.accept()).0; + or_panic!(stream.write_all(msg1)); + or_panic!(stream.write_all(msg2)); + }); + + let mut stream = or_panic!(UnixStream::connect(&socket_path)); + let mut stream2 = or_panic!(stream.try_clone()); + + let mut buf = [0; 5]; + or_panic!(stream.read(&mut buf)); + assert_eq!(&msg1[..], &buf[..]); + or_panic!(stream2.read(&mut buf)); + assert_eq!(&msg2[..], &buf[..]); + + thread.join().unwrap(); + } + + #[test] + fn iter() { + let dir = tmpdir(); + let socket_path = dir.path().join("sock"); + + let listener = or_panic!(UnixListener::bind(&socket_path)); + let thread = thread::spawn(move || { + for stream in listener.incoming().take(2) { + let mut stream = or_panic!(stream); + let mut buf = [0]; + or_panic!(stream.read(&mut buf)); + } + }); + + for _ in 0..2 { + let mut stream = or_panic!(UnixStream::connect(&socket_path)); + or_panic!(stream.write_all(&[0])); + } + + thread.join().unwrap(); + } + + #[test] + fn long_path() { + let dir = tmpdir(); + let socket_path = dir.path().join( + "asdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfa\ + sasdfasdfasdasdfasdfasdfadfasdfasdfasdfasdfasdf", + ); + match UnixStream::connect(&socket_path) { + Err(ref e) if e.kind() == io::ErrorKind::InvalidInput => {} + Err(e) => panic!("unexpected error {}", e), + Ok(_) => panic!("unexpected success"), + } + + match UnixListener::bind(&socket_path) { + Err(ref e) if e.kind() == io::ErrorKind::InvalidInput => {} + Err(e) => panic!("unexpected error {}", e), + Ok(_) => panic!("unexpected success"), + } + + match UnixDatagram::bind(&socket_path) { + Err(ref e) if e.kind() == io::ErrorKind::InvalidInput => {} + Err(e) => panic!("unexpected error {}", e), + Ok(_) => panic!("unexpected success"), + } + } + + #[test] + fn timeouts() { + let dir = tmpdir(); + let socket_path = dir.path().join("sock"); + + let _listener = or_panic!(UnixListener::bind(&socket_path)); + + let stream = or_panic!(UnixStream::connect(&socket_path)); + let dur = Duration::new(15410, 0); + + assert_eq!(None, or_panic!(stream.read_timeout())); + + or_panic!(stream.set_read_timeout(Some(dur))); + assert_eq!(Some(dur), or_panic!(stream.read_timeout())); + + assert_eq!(None, or_panic!(stream.write_timeout())); + + or_panic!(stream.set_write_timeout(Some(dur))); + assert_eq!(Some(dur), or_panic!(stream.write_timeout())); + + or_panic!(stream.set_read_timeout(None)); + assert_eq!(None, or_panic!(stream.read_timeout())); + + or_panic!(stream.set_write_timeout(None)); + assert_eq!(None, or_panic!(stream.write_timeout())); + } + + #[test] + fn test_read_timeout() { + let dir = tmpdir(); + let socket_path = dir.path().join("sock"); + + let _listener = or_panic!(UnixListener::bind(&socket_path)); + + let mut stream = or_panic!(UnixStream::connect(&socket_path)); + or_panic!(stream.set_read_timeout(Some(Duration::from_millis(1000)))); + + let mut buf = [0; 10]; + let kind = stream.read_exact(&mut buf).err().expect("expected error").kind(); + assert!( + kind == ErrorKind::WouldBlock || kind == ErrorKind::TimedOut, + "unexpected_error: {:?}", + kind + ); + } + + #[test] + fn test_read_with_timeout() { + let dir = tmpdir(); + let socket_path = dir.path().join("sock"); + + let listener = or_panic!(UnixListener::bind(&socket_path)); + + let mut stream = or_panic!(UnixStream::connect(&socket_path)); + or_panic!(stream.set_read_timeout(Some(Duration::from_millis(1000)))); + + let mut other_end = or_panic!(listener.accept()).0; + or_panic!(other_end.write_all(b"hello world")); + + let mut buf = [0; 11]; + or_panic!(stream.read(&mut buf)); + assert_eq!(b"hello world", &buf[..]); + + let kind = stream.read_exact(&mut buf).err().expect("expected error").kind(); + assert!( + kind == ErrorKind::WouldBlock || kind == ErrorKind::TimedOut, + "unexpected_error: {:?}", + kind + ); + } + + // Ensure the `set_read_timeout` and `set_write_timeout` calls return errors + // when passed zero Durations + #[test] + fn test_unix_stream_timeout_zero_duration() { + let dir = tmpdir(); + let socket_path = dir.path().join("sock"); + + let listener = or_panic!(UnixListener::bind(&socket_path)); + let stream = or_panic!(UnixStream::connect(&socket_path)); + + let result = stream.set_write_timeout(Some(Duration::new(0, 0))); + let err = result.unwrap_err(); + assert_eq!(err.kind(), ErrorKind::InvalidInput); + + let result = stream.set_read_timeout(Some(Duration::new(0, 0))); + let err = result.unwrap_err(); + assert_eq!(err.kind(), ErrorKind::InvalidInput); + + drop(listener); + } + + #[test] + fn test_unix_datagram() { + let dir = tmpdir(); + let path1 = dir.path().join("sock1"); + let path2 = dir.path().join("sock2"); + + let sock1 = or_panic!(UnixDatagram::bind(&path1)); + let sock2 = or_panic!(UnixDatagram::bind(&path2)); + + let msg = b"hello world"; + or_panic!(sock1.send_to(msg, &path2)); + let mut buf = [0; 11]; + or_panic!(sock2.recv_from(&mut buf)); + assert_eq!(msg, &buf[..]); + } + + #[test] + fn test_unnamed_unix_datagram() { + let dir = tmpdir(); + let path1 = dir.path().join("sock1"); + + let sock1 = or_panic!(UnixDatagram::bind(&path1)); + let sock2 = or_panic!(UnixDatagram::unbound()); + + let msg = b"hello world"; + or_panic!(sock2.send_to(msg, &path1)); + let mut buf = [0; 11]; + let (usize, addr) = or_panic!(sock1.recv_from(&mut buf)); + assert_eq!(usize, 11); + assert!(addr.is_unnamed()); + assert_eq!(msg, &buf[..]); + } + + #[test] + fn test_connect_unix_datagram() { + let dir = tmpdir(); + let path1 = dir.path().join("sock1"); + let path2 = dir.path().join("sock2"); + + let bsock1 = or_panic!(UnixDatagram::bind(&path1)); + let bsock2 = or_panic!(UnixDatagram::bind(&path2)); + let sock = or_panic!(UnixDatagram::unbound()); + or_panic!(sock.connect(&path1)); + + // Check send() + let msg = b"hello there"; + or_panic!(sock.send(msg)); + let mut buf = [0; 11]; + let (usize, addr) = or_panic!(bsock1.recv_from(&mut buf)); + assert_eq!(usize, 11); + assert!(addr.is_unnamed()); + assert_eq!(msg, &buf[..]); + + // Changing default socket works too + or_panic!(sock.connect(&path2)); + or_panic!(sock.send(msg)); + or_panic!(bsock2.recv_from(&mut buf)); + } + + #[test] + fn test_unix_datagram_recv() { + let dir = tmpdir(); + let path1 = dir.path().join("sock1"); + + let sock1 = or_panic!(UnixDatagram::bind(&path1)); + let sock2 = or_panic!(UnixDatagram::unbound()); + or_panic!(sock2.connect(&path1)); + + let msg = b"hello world"; + or_panic!(sock2.send(msg)); + let mut buf = [0; 11]; + let size = or_panic!(sock1.recv(&mut buf)); + assert_eq!(size, 11); + assert_eq!(msg, &buf[..]); + } + + #[test] + fn datagram_pair() { + let msg1 = b"hello"; + let msg2 = b"world!"; + + let (s1, s2) = or_panic!(UnixDatagram::pair()); + let thread = thread::spawn(move || { + // s1 must be moved in or the test will hang! + let mut buf = [0; 5]; + or_panic!(s1.recv(&mut buf)); + assert_eq!(&msg1[..], &buf[..]); + or_panic!(s1.send(msg2)); + }); + + or_panic!(s2.send(msg1)); + let mut buf = [0; 6]; + or_panic!(s2.recv(&mut buf)); + assert_eq!(&msg2[..], &buf[..]); + drop(s2); + + thread.join().unwrap(); + } + + // Ensure the `set_read_timeout` and `set_write_timeout` calls return errors + // when passed zero Durations + #[test] + fn test_unix_datagram_timeout_zero_duration() { + let dir = tmpdir(); + let path = dir.path().join("sock"); + + let datagram = or_panic!(UnixDatagram::bind(&path)); + + let result = datagram.set_write_timeout(Some(Duration::new(0, 0))); + let err = result.unwrap_err(); + assert_eq!(err.kind(), ErrorKind::InvalidInput); + + let result = datagram.set_read_timeout(Some(Duration::new(0, 0))); + let err = result.unwrap_err(); + assert_eq!(err.kind(), ErrorKind::InvalidInput); + } + + #[test] + fn abstract_namespace_not_allowed() { + assert!(UnixStream::connect("\0asdf").is_err()); + } +} diff --git a/library/std/src/sys/unix/ext/process.rs b/library/std/src/sys/unix/ext/process.rs new file mode 100644 index 00000000000..048ce24d6ba --- /dev/null +++ b/library/std/src/sys/unix/ext/process.rs @@ -0,0 +1,234 @@ +//! Unix-specific extensions to primitives in the `std::process` module. + +#![stable(feature = "rust1", since = "1.0.0")] + +use crate::ffi::OsStr; +use crate::io; +use crate::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; +use crate::process; +use crate::sys; +use crate::sys_common::{AsInner, AsInnerMut, FromInner, IntoInner}; + +/// Unix-specific extensions to the [`process::Command`] builder. +/// +/// [`process::Command`]: ../../../../std/process/struct.Command.html +#[stable(feature = "rust1", since = "1.0.0")] +pub trait CommandExt { + /// Sets the child process's user ID. This translates to a + /// `setuid` call in the child process. Failure in the `setuid` + /// call will cause the spawn to fail. + #[stable(feature = "rust1", since = "1.0.0")] + fn uid(&mut self, id: u32) -> &mut process::Command; + + /// Similar to `uid`, but sets the group ID of the child process. This has + /// the same semantics as the `uid` field. + #[stable(feature = "rust1", since = "1.0.0")] + fn gid(&mut self, id: u32) -> &mut process::Command; + + /// Schedules a closure to be run just before the `exec` function is + /// invoked. + /// + /// The closure is allowed to return an I/O error whose OS error code will + /// be communicated back to the parent and returned as an error from when + /// the spawn was requested. + /// + /// Multiple closures can be registered and they will be called in order of + /// their registration. If a closure returns `Err` then no further closures + /// will be called and the spawn operation will immediately return with a + /// failure. + /// + /// # Notes and Safety + /// + /// This closure will be run in the context of the child process after a + /// `fork`. This primarily means that any modifications made to memory on + /// behalf of this closure will **not** be visible to the parent process. + /// This is often a very constrained environment where normal operations + /// like `malloc` or acquiring a mutex are not guaranteed to work (due to + /// other threads perhaps still running when the `fork` was run). + /// + /// This also means that all resources such as file descriptors and + /// memory-mapped regions got duplicated. It is your responsibility to make + /// sure that the closure does not violate library invariants by making + /// invalid use of these duplicates. + /// + /// When this closure is run, aspects such as the stdio file descriptors and + /// working directory have successfully been changed, so output to these + /// locations may not appear where intended. + #[stable(feature = "process_pre_exec", since = "1.34.0")] + unsafe fn pre_exec<F>(&mut self, f: F) -> &mut process::Command + where + F: FnMut() -> io::Result<()> + Send + Sync + 'static; + + /// Schedules a closure to be run just before the `exec` function is + /// invoked. + /// + /// This method is stable and usable, but it should be unsafe. To fix + /// that, it got deprecated in favor of the unsafe [`pre_exec`]. + /// + /// [`pre_exec`]: #tymethod.pre_exec + #[stable(feature = "process_exec", since = "1.15.0")] + #[rustc_deprecated(since = "1.37.0", reason = "should be unsafe, use `pre_exec` instead")] + fn before_exec<F>(&mut self, f: F) -> &mut process::Command + where + F: FnMut() -> io::Result<()> + Send + Sync + 'static, + { + unsafe { self.pre_exec(f) } + } + + /// Performs all the required setup by this `Command`, followed by calling + /// the `execvp` syscall. + /// + /// On success this function will not return, and otherwise it will return + /// an error indicating why the exec (or another part of the setup of the + /// `Command`) failed. + /// + /// `exec` not returning has the same implications as calling + /// [`process::exit`] – no destructors on the current stack or any other + /// thread’s stack will be run. Therefore, it is recommended to only call + /// `exec` at a point where it is fine to not run any destructors. Note, + /// that the `execvp` syscall independently guarantees that all memory is + /// freed and all file descriptors with the `CLOEXEC` option (set by default + /// on all file descriptors opened by the standard library) are closed. + /// + /// This function, unlike `spawn`, will **not** `fork` the process to create + /// a new child. Like spawn, however, the default behavior for the stdio + /// descriptors will be to inherited from the current process. + /// + /// [`process::exit`]: ../../../process/fn.exit.html + /// + /// # Notes + /// + /// The process may be in a "broken state" if this function returns in + /// error. For example the working directory, environment variables, signal + /// handling settings, various user/group information, or aspects of stdio + /// file descriptors may have changed. If a "transactional spawn" is + /// required to gracefully handle errors it is recommended to use the + /// cross-platform `spawn` instead. + #[stable(feature = "process_exec2", since = "1.9.0")] + fn exec(&mut self) -> io::Error; + + /// Set executable argument + /// + /// Set the first process argument, `argv[0]`, to something other than the + /// default executable path. + #[stable(feature = "process_set_argv0", since = "1.45.0")] + fn arg0<S>(&mut self, arg: S) -> &mut process::Command + where + S: AsRef<OsStr>; +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl CommandExt for process::Command { + fn uid(&mut self, id: u32) -> &mut process::Command { + self.as_inner_mut().uid(id); + self + } + + fn gid(&mut self, id: u32) -> &mut process::Command { + self.as_inner_mut().gid(id); + self + } + + unsafe fn pre_exec<F>(&mut self, f: F) -> &mut process::Command + where + F: FnMut() -> io::Result<()> + Send + Sync + 'static, + { + self.as_inner_mut().pre_exec(Box::new(f)); + self + } + + fn exec(&mut self) -> io::Error { + self.as_inner_mut().exec(sys::process::Stdio::Inherit) + } + + fn arg0<S>(&mut self, arg: S) -> &mut process::Command + where + S: AsRef<OsStr>, + { + self.as_inner_mut().set_arg_0(arg.as_ref()); + self + } +} + +/// Unix-specific extensions to [`process::ExitStatus`]. +/// +/// [`process::ExitStatus`]: ../../../../std/process/struct.ExitStatus.html +#[stable(feature = "rust1", since = "1.0.0")] +pub trait ExitStatusExt { + /// Creates a new `ExitStatus` from the raw underlying `i32` return value of + /// a process. + #[stable(feature = "exit_status_from", since = "1.12.0")] + fn from_raw(raw: i32) -> Self; + + /// If the process was terminated by a signal, returns that signal. + #[stable(feature = "rust1", since = "1.0.0")] + fn signal(&self) -> Option<i32>; +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExitStatusExt for process::ExitStatus { + fn from_raw(raw: i32) -> Self { + process::ExitStatus::from_inner(From::from(raw)) + } + + fn signal(&self) -> Option<i32> { + self.as_inner().signal() + } +} + +#[stable(feature = "process_extensions", since = "1.2.0")] +impl FromRawFd for process::Stdio { + unsafe fn from_raw_fd(fd: RawFd) -> process::Stdio { + let fd = sys::fd::FileDesc::new(fd); + let io = sys::process::Stdio::Fd(fd); + process::Stdio::from_inner(io) + } +} + +#[stable(feature = "process_extensions", since = "1.2.0")] +impl AsRawFd for process::ChildStdin { + fn as_raw_fd(&self) -> RawFd { + self.as_inner().fd().raw() + } +} + +#[stable(feature = "process_extensions", since = "1.2.0")] +impl AsRawFd for process::ChildStdout { + fn as_raw_fd(&self) -> RawFd { + self.as_inner().fd().raw() + } +} + +#[stable(feature = "process_extensions", since = "1.2.0")] +impl AsRawFd for process::ChildStderr { + fn as_raw_fd(&self) -> RawFd { + self.as_inner().fd().raw() + } +} + +#[stable(feature = "into_raw_os", since = "1.4.0")] +impl IntoRawFd for process::ChildStdin { + fn into_raw_fd(self) -> RawFd { + self.into_inner().into_fd().into_raw() + } +} + +#[stable(feature = "into_raw_os", since = "1.4.0")] +impl IntoRawFd for process::ChildStdout { + fn into_raw_fd(self) -> RawFd { + self.into_inner().into_fd().into_raw() + } +} + +#[stable(feature = "into_raw_os", since = "1.4.0")] +impl IntoRawFd for process::ChildStderr { + fn into_raw_fd(self) -> RawFd { + self.into_inner().into_fd().into_raw() + } +} + +/// Returns the OS-assigned process identifier associated with this process's parent. +#[stable(feature = "unix_ppid", since = "1.27.0")] +pub fn parent_id() -> u32 { + crate::sys::os::getppid() +} diff --git a/library/std/src/sys/unix/ext/raw.rs b/library/std/src/sys/unix/ext/raw.rs new file mode 100644 index 00000000000..40fa53d484f --- /dev/null +++ b/library/std/src/sys/unix/ext/raw.rs @@ -0,0 +1,33 @@ +//! Unix-specific primitives available on all unix platforms + +#![stable(feature = "raw_ext", since = "1.1.0")] +#![rustc_deprecated( + since = "1.8.0", + reason = "these type aliases are no longer supported by \ + the standard library, the `libc` crate on \ + crates.io should be used instead for the correct \ + definitions" +)] +#![allow(deprecated)] + +#[stable(feature = "raw_ext", since = "1.1.0")] +#[allow(non_camel_case_types)] +pub type uid_t = u32; + +#[stable(feature = "raw_ext", since = "1.1.0")] +#[allow(non_camel_case_types)] +pub type gid_t = u32; + +#[stable(feature = "raw_ext", since = "1.1.0")] +#[allow(non_camel_case_types)] +pub type pid_t = i32; + +#[doc(inline)] +#[stable(feature = "pthread_t", since = "1.8.0")] +pub use crate::sys::platform::raw::pthread_t; +#[doc(inline)] +#[stable(feature = "raw_ext", since = "1.1.0")] +pub use crate::sys::platform::raw::{blkcnt_t, time_t}; +#[doc(inline)] +#[stable(feature = "raw_ext", since = "1.1.0")] +pub use crate::sys::platform::raw::{blksize_t, dev_t, ino_t, mode_t, nlink_t, off_t}; diff --git a/library/std/src/sys/unix/ext/thread.rs b/library/std/src/sys/unix/ext/thread.rs new file mode 100644 index 00000000000..759ef6236e8 --- /dev/null +++ b/library/std/src/sys/unix/ext/thread.rs @@ -0,0 +1,41 @@ +//! Unix-specific extensions to primitives in the `std::thread` module. + +#![stable(feature = "thread_extensions", since = "1.9.0")] + +#[allow(deprecated)] +use crate::os::unix::raw::pthread_t; +use crate::sys_common::{AsInner, IntoInner}; +use crate::thread::JoinHandle; + +#[stable(feature = "thread_extensions", since = "1.9.0")] +#[allow(deprecated)] +pub type RawPthread = pthread_t; + +/// Unix-specific extensions to [`thread::JoinHandle`]. +/// +/// [`thread::JoinHandle`]: ../../../../std/thread/struct.JoinHandle.html +#[stable(feature = "thread_extensions", since = "1.9.0")] +pub trait JoinHandleExt { + /// Extracts the raw pthread_t without taking ownership + #[stable(feature = "thread_extensions", since = "1.9.0")] + fn as_pthread_t(&self) -> RawPthread; + + /// Consumes the thread, returning the raw pthread_t + /// + /// This function **transfers ownership** of the underlying pthread_t to + /// the caller. Callers are then the unique owners of the pthread_t and + /// must either detach or join the pthread_t once it's no longer needed. + #[stable(feature = "thread_extensions", since = "1.9.0")] + fn into_pthread_t(self) -> RawPthread; +} + +#[stable(feature = "thread_extensions", since = "1.9.0")] +impl<T> JoinHandleExt for JoinHandle<T> { + fn as_pthread_t(&self) -> RawPthread { + self.as_inner().id() as RawPthread + } + + fn into_pthread_t(self) -> RawPthread { + self.into_inner().into_id() as RawPthread + } +} diff --git a/library/std/src/sys/unix/fd.rs b/library/std/src/sys/unix/fd.rs new file mode 100644 index 00000000000..84c4d662161 --- /dev/null +++ b/library/std/src/sys/unix/fd.rs @@ -0,0 +1,258 @@ +#![unstable(reason = "not public", issue = "none", feature = "fd")] + +use crate::cmp; +use crate::io::{self, Initializer, IoSlice, IoSliceMut, Read}; +use crate::mem; +use crate::sys::cvt; +use crate::sys_common::AsInner; + +use libc::{c_int, c_void}; + +#[derive(Debug)] +pub struct FileDesc { + fd: c_int, +} + +// The maximum read limit on most POSIX-like systems is `SSIZE_MAX`, +// with the man page quoting that if the count of bytes to read is +// greater than `SSIZE_MAX` the result is "unspecified". +// +// On macOS, however, apparently the 64-bit libc is either buggy or +// intentionally showing odd behavior by rejecting any read with a size +// larger than or equal to INT_MAX. To handle both of these the read +// size is capped on both platforms. +#[cfg(target_os = "macos")] +const READ_LIMIT: usize = c_int::MAX as usize - 1; +#[cfg(not(target_os = "macos"))] +const READ_LIMIT: usize = libc::ssize_t::MAX as usize; + +impl FileDesc { + pub fn new(fd: c_int) -> FileDesc { + FileDesc { fd } + } + + pub fn raw(&self) -> c_int { + self.fd + } + + /// Extracts the actual file descriptor without closing it. + pub fn into_raw(self) -> c_int { + let fd = self.fd; + mem::forget(self); + fd + } + + pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> { + let ret = cvt(unsafe { + libc::read(self.fd, buf.as_mut_ptr() as *mut c_void, cmp::min(buf.len(), READ_LIMIT)) + })?; + Ok(ret as usize) + } + + pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + let ret = cvt(unsafe { + libc::readv( + self.fd, + bufs.as_ptr() as *const libc::iovec, + cmp::min(bufs.len(), c_int::MAX as usize) as c_int, + ) + })?; + Ok(ret as usize) + } + + #[inline] + pub fn is_read_vectored(&self) -> bool { + true + } + + pub fn read_to_end(&self, buf: &mut Vec<u8>) -> io::Result<usize> { + let mut me = self; + (&mut me).read_to_end(buf) + } + + pub fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<usize> { + #[cfg(target_os = "android")] + use super::android::cvt_pread64; + + #[cfg(not(target_os = "android"))] + unsafe fn cvt_pread64( + fd: c_int, + buf: *mut c_void, + count: usize, + offset: i64, + ) -> io::Result<isize> { + #[cfg(not(target_os = "linux"))] + use libc::pread as pread64; + #[cfg(target_os = "linux")] + use libc::pread64; + cvt(pread64(fd, buf, count, offset)) + } + + unsafe { + cvt_pread64( + self.fd, + buf.as_mut_ptr() as *mut c_void, + cmp::min(buf.len(), READ_LIMIT), + offset as i64, + ) + .map(|n| n as usize) + } + } + + pub fn write(&self, buf: &[u8]) -> io::Result<usize> { + let ret = cvt(unsafe { + libc::write(self.fd, buf.as_ptr() as *const c_void, cmp::min(buf.len(), READ_LIMIT)) + })?; + Ok(ret as usize) + } + + pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + let ret = cvt(unsafe { + libc::writev( + self.fd, + bufs.as_ptr() as *const libc::iovec, + cmp::min(bufs.len(), c_int::MAX as usize) as c_int, + ) + })?; + Ok(ret as usize) + } + + #[inline] + pub fn is_write_vectored(&self) -> bool { + true + } + + pub fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> { + #[cfg(target_os = "android")] + use super::android::cvt_pwrite64; + + #[cfg(not(target_os = "android"))] + unsafe fn cvt_pwrite64( + fd: c_int, + buf: *const c_void, + count: usize, + offset: i64, + ) -> io::Result<isize> { + #[cfg(not(target_os = "linux"))] + use libc::pwrite as pwrite64; + #[cfg(target_os = "linux")] + use libc::pwrite64; + cvt(pwrite64(fd, buf, count, offset)) + } + + unsafe { + cvt_pwrite64( + self.fd, + buf.as_ptr() as *const c_void, + cmp::min(buf.len(), READ_LIMIT), + offset as i64, + ) + .map(|n| n as usize) + } + } + + #[cfg(target_os = "linux")] + pub fn get_cloexec(&self) -> io::Result<bool> { + unsafe { Ok((cvt(libc::fcntl(self.fd, libc::F_GETFD))? & libc::FD_CLOEXEC) != 0) } + } + + #[cfg(not(any( + target_env = "newlib", + target_os = "solaris", + target_os = "illumos", + target_os = "emscripten", + target_os = "fuchsia", + target_os = "l4re", + target_os = "linux", + target_os = "haiku", + target_os = "redox" + )))] + pub fn set_cloexec(&self) -> io::Result<()> { + unsafe { + cvt(libc::ioctl(self.fd, libc::FIOCLEX))?; + Ok(()) + } + } + #[cfg(any( + target_env = "newlib", + target_os = "solaris", + target_os = "illumos", + target_os = "emscripten", + target_os = "fuchsia", + target_os = "l4re", + target_os = "linux", + target_os = "haiku", + target_os = "redox" + ))] + pub fn set_cloexec(&self) -> io::Result<()> { + unsafe { + let previous = cvt(libc::fcntl(self.fd, libc::F_GETFD))?; + let new = previous | libc::FD_CLOEXEC; + if new != previous { + cvt(libc::fcntl(self.fd, libc::F_SETFD, new))?; + } + Ok(()) + } + } + + #[cfg(target_os = "linux")] + pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { + unsafe { + let v = nonblocking as c_int; + cvt(libc::ioctl(self.fd, libc::FIONBIO, &v))?; + Ok(()) + } + } + + #[cfg(not(target_os = "linux"))] + pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { + unsafe { + let previous = cvt(libc::fcntl(self.fd, libc::F_GETFL))?; + let new = if nonblocking { + previous | libc::O_NONBLOCK + } else { + previous & !libc::O_NONBLOCK + }; + if new != previous { + cvt(libc::fcntl(self.fd, libc::F_SETFL, new))?; + } + Ok(()) + } + } + + pub fn duplicate(&self) -> io::Result<FileDesc> { + // We want to atomically duplicate this file descriptor and set the + // CLOEXEC flag, and currently that's done via F_DUPFD_CLOEXEC. This + // is a POSIX flag that was added to Linux in 2.6.24. + let fd = cvt(unsafe { libc::fcntl(self.raw(), libc::F_DUPFD_CLOEXEC, 0) })?; + Ok(FileDesc::new(fd)) + } +} + +impl<'a> Read for &'a FileDesc { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + (**self).read(buf) + } + + #[inline] + unsafe fn initializer(&self) -> Initializer { + Initializer::nop() + } +} + +impl AsInner<c_int> for FileDesc { + fn as_inner(&self) -> &c_int { + &self.fd + } +} + +impl Drop for FileDesc { + fn drop(&mut self) { + // Note that errors are ignored when closing a file descriptor. The + // reason for this is that if an error occurs we don't actually know if + // the file descriptor was closed or not, and if we retried (for + // something like EINTR), we might close another valid file descriptor + // opened after we closed ours. + let _ = unsafe { libc::close(self.fd) }; + } +} diff --git a/library/std/src/sys/unix/fs.rs b/library/std/src/sys/unix/fs.rs new file mode 100644 index 00000000000..acb18e6d064 --- /dev/null +++ b/library/std/src/sys/unix/fs.rs @@ -0,0 +1,1310 @@ +use crate::os::unix::prelude::*; + +use crate::ffi::{CStr, CString, OsStr, OsString}; +use crate::fmt; +use crate::io::{self, Error, ErrorKind, IoSlice, IoSliceMut, SeekFrom}; +use crate::mem; +use crate::path::{Path, PathBuf}; +use crate::ptr; +use crate::sync::Arc; +use crate::sys::fd::FileDesc; +use crate::sys::time::SystemTime; +use crate::sys::{cvt, cvt_r}; +use crate::sys_common::{AsInner, FromInner}; + +use libc::{c_int, mode_t}; + +#[cfg(any(target_os = "linux", target_os = "emscripten", target_os = "android"))] +use libc::dirfd; +#[cfg(any(target_os = "linux", target_os = "emscripten"))] +use libc::fstatat64; +#[cfg(not(any( + target_os = "linux", + target_os = "emscripten", + target_os = "solaris", + target_os = "illumos", + target_os = "l4re", + target_os = "fuchsia", + target_os = "redox" +)))] +use libc::readdir_r as readdir64_r; +#[cfg(target_os = "android")] +use libc::{ + dirent as dirent64, fstat as fstat64, fstatat as fstatat64, lseek64, lstat as lstat64, + open as open64, stat as stat64, +}; +#[cfg(not(any( + target_os = "linux", + target_os = "emscripten", + target_os = "l4re", + target_os = "android" +)))] +use libc::{ + dirent as dirent64, fstat as fstat64, ftruncate as ftruncate64, lseek as lseek64, + lstat as lstat64, off_t as off64_t, open as open64, stat as stat64, +}; +#[cfg(any(target_os = "linux", target_os = "emscripten", target_os = "l4re"))] +use libc::{ + dirent64, fstat64, ftruncate64, lseek64, lstat64, off64_t, open64, readdir64_r, stat64, +}; + +pub use crate::sys_common::fs::remove_dir_all; + +pub struct File(FileDesc); + +// FIXME: This should be available on Linux with all `target_env`. +// But currently only glibc exposes `statx` fn and structs. +// We don't want to import unverified raw C structs here directly. +// https://github.com/rust-lang/rust/pull/67774 +macro_rules! cfg_has_statx { + ({ $($then_tt:tt)* } else { $($else_tt:tt)* }) => { + cfg_if::cfg_if! { + if #[cfg(all(target_os = "linux", target_env = "gnu"))] { + $($then_tt)* + } else { + $($else_tt)* + } + } + }; + ($($block_inner:tt)*) => { + #[cfg(all(target_os = "linux", target_env = "gnu"))] + { + $($block_inner)* + } + }; +} + +cfg_has_statx! {{ + #[derive(Clone)] + pub struct FileAttr { + stat: stat64, + statx_extra_fields: Option<StatxExtraFields>, + } + + #[derive(Clone)] + struct StatxExtraFields { + // This is needed to check if btime is supported by the filesystem. + stx_mask: u32, + stx_btime: libc::statx_timestamp, + } + + // We prefer `statx` on Linux if available, which contains file creation time. + // Default `stat64` contains no creation time. + unsafe fn try_statx( + fd: c_int, + path: *const libc::c_char, + flags: i32, + mask: u32, + ) -> Option<io::Result<FileAttr>> { + use crate::sync::atomic::{AtomicU8, Ordering}; + + // Linux kernel prior to 4.11 or glibc prior to glibc 2.28 don't support `statx` + // We store the availability in global to avoid unnecessary syscalls. + // 0: Unknown + // 1: Not available + // 2: Available + static STATX_STATE: AtomicU8 = AtomicU8::new(0); + syscall! { + fn statx( + fd: c_int, + pathname: *const libc::c_char, + flags: c_int, + mask: libc::c_uint, + statxbuf: *mut libc::statx + ) -> c_int + } + + match STATX_STATE.load(Ordering::Relaxed) { + 0 => { + // It is a trick to call `statx` with NULL pointers to check if the syscall + // is available. According to the manual, it is expected to fail with EFAULT. + // We do this mainly for performance, since it is nearly hundreds times + // faster than a normal successful call. + let err = cvt(statx(0, ptr::null(), 0, libc::STATX_ALL, ptr::null_mut())) + .err() + .and_then(|e| e.raw_os_error()); + // We don't check `err == Some(libc::ENOSYS)` because the syscall may be limited + // and returns `EPERM`. Listing all possible errors seems not a good idea. + // See: https://github.com/rust-lang/rust/issues/65662 + if err != Some(libc::EFAULT) { + STATX_STATE.store(1, Ordering::Relaxed); + return None; + } + STATX_STATE.store(2, Ordering::Relaxed); + } + 1 => return None, + _ => {} + } + + let mut buf: libc::statx = mem::zeroed(); + if let Err(err) = cvt(statx(fd, path, flags, mask, &mut buf)) { + return Some(Err(err)); + } + + // We cannot fill `stat64` exhaustively because of private padding fields. + let mut stat: stat64 = mem::zeroed(); + // `c_ulong` on gnu-mips, `dev_t` otherwise + stat.st_dev = libc::makedev(buf.stx_dev_major, buf.stx_dev_minor) as _; + stat.st_ino = buf.stx_ino as libc::ino64_t; + stat.st_nlink = buf.stx_nlink as libc::nlink_t; + stat.st_mode = buf.stx_mode as libc::mode_t; + stat.st_uid = buf.stx_uid as libc::uid_t; + stat.st_gid = buf.stx_gid as libc::gid_t; + stat.st_rdev = libc::makedev(buf.stx_rdev_major, buf.stx_rdev_minor) as _; + stat.st_size = buf.stx_size as off64_t; + stat.st_blksize = buf.stx_blksize as libc::blksize_t; + stat.st_blocks = buf.stx_blocks as libc::blkcnt64_t; + stat.st_atime = buf.stx_atime.tv_sec as libc::time_t; + // `i64` on gnu-x86_64-x32, `c_ulong` otherwise. + stat.st_atime_nsec = buf.stx_atime.tv_nsec as _; + stat.st_mtime = buf.stx_mtime.tv_sec as libc::time_t; + stat.st_mtime_nsec = buf.stx_mtime.tv_nsec as _; + stat.st_ctime = buf.stx_ctime.tv_sec as libc::time_t; + stat.st_ctime_nsec = buf.stx_ctime.tv_nsec as _; + + let extra = StatxExtraFields { + stx_mask: buf.stx_mask, + stx_btime: buf.stx_btime, + }; + + Some(Ok(FileAttr { stat, statx_extra_fields: Some(extra) })) + } + +} else { + #[derive(Clone)] + pub struct FileAttr { + stat: stat64, + } +}} + +// all DirEntry's will have a reference to this struct +struct InnerReadDir { + dirp: Dir, + root: PathBuf, +} + +#[derive(Clone)] +pub struct ReadDir { + inner: Arc<InnerReadDir>, + end_of_stream: bool, +} + +struct Dir(*mut libc::DIR); + +unsafe impl Send for Dir {} +unsafe impl Sync for Dir {} + +pub struct DirEntry { + entry: dirent64, + dir: ReadDir, + // We need to store an owned copy of the entry name + // on Solaris and Fuchsia because a) it uses a zero-length + // array to store the name, b) its lifetime between readdir + // calls is not guaranteed. + #[cfg(any( + target_os = "solaris", + target_os = "illumos", + target_os = "fuchsia", + target_os = "redox" + ))] + name: Box<[u8]>, +} + +#[derive(Clone, Debug)] +pub struct OpenOptions { + // generic + read: bool, + write: bool, + append: bool, + truncate: bool, + create: bool, + create_new: bool, + // system-specific + custom_flags: i32, + mode: mode_t, +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct FilePermissions { + mode: mode_t, +} + +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub struct FileType { + mode: mode_t, +} + +#[derive(Debug)] +pub struct DirBuilder { + mode: mode_t, +} + +cfg_has_statx! {{ + impl FileAttr { + fn from_stat64(stat: stat64) -> Self { + Self { stat, statx_extra_fields: None } + } + } +} else { + impl FileAttr { + fn from_stat64(stat: stat64) -> Self { + Self { stat } + } + } +}} + +impl FileAttr { + pub fn size(&self) -> u64 { + self.stat.st_size as u64 + } + pub fn perm(&self) -> FilePermissions { + FilePermissions { mode: (self.stat.st_mode as mode_t) } + } + + pub fn file_type(&self) -> FileType { + FileType { mode: self.stat.st_mode as mode_t } + } +} + +#[cfg(target_os = "netbsd")] +impl FileAttr { + pub fn modified(&self) -> io::Result<SystemTime> { + Ok(SystemTime::from(libc::timespec { + tv_sec: self.stat.st_mtime as libc::time_t, + tv_nsec: self.stat.st_mtimensec as libc::c_long, + })) + } + + pub fn accessed(&self) -> io::Result<SystemTime> { + Ok(SystemTime::from(libc::timespec { + tv_sec: self.stat.st_atime as libc::time_t, + tv_nsec: self.stat.st_atimensec as libc::c_long, + })) + } + + pub fn created(&self) -> io::Result<SystemTime> { + Ok(SystemTime::from(libc::timespec { + tv_sec: self.stat.st_birthtime as libc::time_t, + tv_nsec: self.stat.st_birthtimensec as libc::c_long, + })) + } +} + +#[cfg(not(target_os = "netbsd"))] +impl FileAttr { + pub fn modified(&self) -> io::Result<SystemTime> { + Ok(SystemTime::from(libc::timespec { + tv_sec: self.stat.st_mtime as libc::time_t, + tv_nsec: self.stat.st_mtime_nsec as _, + })) + } + + pub fn accessed(&self) -> io::Result<SystemTime> { + Ok(SystemTime::from(libc::timespec { + tv_sec: self.stat.st_atime as libc::time_t, + tv_nsec: self.stat.st_atime_nsec as _, + })) + } + + #[cfg(any( + target_os = "freebsd", + target_os = "openbsd", + target_os = "macos", + target_os = "ios" + ))] + pub fn created(&self) -> io::Result<SystemTime> { + Ok(SystemTime::from(libc::timespec { + tv_sec: self.stat.st_birthtime as libc::time_t, + tv_nsec: self.stat.st_birthtime_nsec as libc::c_long, + })) + } + + #[cfg(not(any( + target_os = "freebsd", + target_os = "openbsd", + target_os = "macos", + target_os = "ios" + )))] + pub fn created(&self) -> io::Result<SystemTime> { + cfg_has_statx! { + if let Some(ext) = &self.statx_extra_fields { + return if (ext.stx_mask & libc::STATX_BTIME) != 0 { + Ok(SystemTime::from(libc::timespec { + tv_sec: ext.stx_btime.tv_sec as libc::time_t, + tv_nsec: ext.stx_btime.tv_nsec as _, + })) + } else { + Err(io::Error::new( + io::ErrorKind::Other, + "creation time is not available for the filesystem", + )) + }; + } + } + + Err(io::Error::new( + io::ErrorKind::Other, + "creation time is not available on this platform \ + currently", + )) + } +} + +impl AsInner<stat64> for FileAttr { + fn as_inner(&self) -> &stat64 { + &self.stat + } +} + +impl FilePermissions { + pub fn readonly(&self) -> bool { + // check if any class (owner, group, others) has write permission + self.mode & 0o222 == 0 + } + + pub fn set_readonly(&mut self, readonly: bool) { + if readonly { + // remove write permission for all classes; equivalent to `chmod a-w <file>` + self.mode &= !0o222; + } else { + // add write permission for all classes; equivalent to `chmod a+w <file>` + self.mode |= 0o222; + } + } + pub fn mode(&self) -> u32 { + self.mode as u32 + } +} + +impl FileType { + pub fn is_dir(&self) -> bool { + self.is(libc::S_IFDIR) + } + pub fn is_file(&self) -> bool { + self.is(libc::S_IFREG) + } + pub fn is_symlink(&self) -> bool { + self.is(libc::S_IFLNK) + } + + pub fn is(&self, mode: mode_t) -> bool { + self.mode & libc::S_IFMT == mode + } +} + +impl FromInner<u32> for FilePermissions { + fn from_inner(mode: u32) -> FilePermissions { + FilePermissions { mode: mode as mode_t } + } +} + +impl fmt::Debug for ReadDir { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // This will only be called from std::fs::ReadDir, which will add a "ReadDir()" frame. + // Thus the result will be e g 'ReadDir("/home")' + fmt::Debug::fmt(&*self.inner.root, f) + } +} + +impl Iterator for ReadDir { + type Item = io::Result<DirEntry>; + + #[cfg(any( + target_os = "solaris", + target_os = "fuchsia", + target_os = "redox", + target_os = "illumos" + ))] + fn next(&mut self) -> Option<io::Result<DirEntry>> { + use crate::slice; + + unsafe { + loop { + // Although readdir_r(3) would be a correct function to use here because + // of the thread safety, on Illumos and Fuchsia the readdir(3C) function + // is safe to use in threaded applications and it is generally preferred + // over the readdir_r(3C) function. + super::os::set_errno(0); + let entry_ptr = libc::readdir(self.inner.dirp.0); + if entry_ptr.is_null() { + // NULL can mean either the end is reached or an error occurred. + // So we had to clear errno beforehand to check for an error now. + return match super::os::errno() { + 0 => None, + e => Some(Err(Error::from_raw_os_error(e))), + }; + } + + let name = (*entry_ptr).d_name.as_ptr(); + let namelen = libc::strlen(name) as usize; + + let ret = DirEntry { + entry: *entry_ptr, + name: slice::from_raw_parts(name as *const u8, namelen as usize) + .to_owned() + .into_boxed_slice(), + dir: self.clone(), + }; + if ret.name_bytes() != b"." && ret.name_bytes() != b".." { + return Some(Ok(ret)); + } + } + } + } + + #[cfg(not(any( + target_os = "solaris", + target_os = "fuchsia", + target_os = "redox", + target_os = "illumos" + )))] + fn next(&mut self) -> Option<io::Result<DirEntry>> { + if self.end_of_stream { + return None; + } + + unsafe { + let mut ret = DirEntry { entry: mem::zeroed(), dir: self.clone() }; + let mut entry_ptr = ptr::null_mut(); + loop { + if readdir64_r(self.inner.dirp.0, &mut ret.entry, &mut entry_ptr) != 0 { + if entry_ptr.is_null() { + // We encountered an error (which will be returned in this iteration), but + // we also reached the end of the directory stream. The `end_of_stream` + // flag is enabled to make sure that we return `None` in the next iteration + // (instead of looping forever) + self.end_of_stream = true; + } + return Some(Err(Error::last_os_error())); + } + if entry_ptr.is_null() { + return None; + } + if ret.name_bytes() != b"." && ret.name_bytes() != b".." { + return Some(Ok(ret)); + } + } + } + } +} + +impl Drop for Dir { + fn drop(&mut self) { + let r = unsafe { libc::closedir(self.0) }; + debug_assert_eq!(r, 0); + } +} + +impl DirEntry { + pub fn path(&self) -> PathBuf { + self.dir.inner.root.join(OsStr::from_bytes(self.name_bytes())) + } + + pub fn file_name(&self) -> OsString { + OsStr::from_bytes(self.name_bytes()).to_os_string() + } + + #[cfg(any(target_os = "linux", target_os = "emscripten", target_os = "android"))] + pub fn metadata(&self) -> io::Result<FileAttr> { + let fd = cvt(unsafe { dirfd(self.dir.inner.dirp.0) })?; + let name = self.entry.d_name.as_ptr(); + + cfg_has_statx! { + if let Some(ret) = unsafe { try_statx( + fd, + name, + libc::AT_SYMLINK_NOFOLLOW | libc::AT_STATX_SYNC_AS_STAT, + libc::STATX_ALL, + ) } { + return ret; + } + } + + let mut stat: stat64 = unsafe { mem::zeroed() }; + cvt(unsafe { fstatat64(fd, name, &mut stat, libc::AT_SYMLINK_NOFOLLOW) })?; + Ok(FileAttr::from_stat64(stat)) + } + + #[cfg(not(any(target_os = "linux", target_os = "emscripten", target_os = "android")))] + pub fn metadata(&self) -> io::Result<FileAttr> { + lstat(&self.path()) + } + + #[cfg(any(target_os = "solaris", target_os = "illumos", target_os = "haiku"))] + pub fn file_type(&self) -> io::Result<FileType> { + lstat(&self.path()).map(|m| m.file_type()) + } + + #[cfg(not(any(target_os = "solaris", target_os = "illumos", target_os = "haiku")))] + pub fn file_type(&self) -> io::Result<FileType> { + match self.entry.d_type { + libc::DT_CHR => Ok(FileType { mode: libc::S_IFCHR }), + libc::DT_FIFO => Ok(FileType { mode: libc::S_IFIFO }), + libc::DT_LNK => Ok(FileType { mode: libc::S_IFLNK }), + libc::DT_REG => Ok(FileType { mode: libc::S_IFREG }), + libc::DT_SOCK => Ok(FileType { mode: libc::S_IFSOCK }), + libc::DT_DIR => Ok(FileType { mode: libc::S_IFDIR }), + libc::DT_BLK => Ok(FileType { mode: libc::S_IFBLK }), + _ => lstat(&self.path()).map(|m| m.file_type()), + } + } + + #[cfg(any( + target_os = "macos", + target_os = "ios", + target_os = "linux", + target_os = "emscripten", + target_os = "android", + target_os = "solaris", + target_os = "illumos", + target_os = "haiku", + target_os = "l4re", + target_os = "fuchsia", + target_os = "redox" + ))] + pub fn ino(&self) -> u64 { + self.entry.d_ino as u64 + } + + #[cfg(any( + target_os = "freebsd", + target_os = "openbsd", + target_os = "netbsd", + target_os = "dragonfly" + ))] + pub fn ino(&self) -> u64 { + self.entry.d_fileno as u64 + } + + #[cfg(any( + target_os = "macos", + target_os = "ios", + target_os = "netbsd", + target_os = "openbsd", + target_os = "freebsd", + target_os = "dragonfly" + ))] + fn name_bytes(&self) -> &[u8] { + use crate::slice; + unsafe { + slice::from_raw_parts( + self.entry.d_name.as_ptr() as *const u8, + self.entry.d_namlen as usize, + ) + } + } + #[cfg(any( + target_os = "android", + target_os = "linux", + target_os = "emscripten", + target_os = "l4re", + target_os = "haiku" + ))] + fn name_bytes(&self) -> &[u8] { + unsafe { CStr::from_ptr(self.entry.d_name.as_ptr()).to_bytes() } + } + #[cfg(any( + target_os = "solaris", + target_os = "illumos", + target_os = "fuchsia", + target_os = "redox" + ))] + fn name_bytes(&self) -> &[u8] { + &*self.name + } +} + +impl OpenOptions { + pub fn new() -> OpenOptions { + OpenOptions { + // generic + read: false, + write: false, + append: false, + truncate: false, + create: false, + create_new: false, + // system-specific + custom_flags: 0, + mode: 0o666, + } + } + + pub fn read(&mut self, read: bool) { + self.read = read; + } + pub fn write(&mut self, write: bool) { + self.write = write; + } + pub fn append(&mut self, append: bool) { + self.append = append; + } + pub fn truncate(&mut self, truncate: bool) { + self.truncate = truncate; + } + pub fn create(&mut self, create: bool) { + self.create = create; + } + pub fn create_new(&mut self, create_new: bool) { + self.create_new = create_new; + } + + pub fn custom_flags(&mut self, flags: i32) { + self.custom_flags = flags; + } + pub fn mode(&mut self, mode: u32) { + self.mode = mode as mode_t; + } + + fn get_access_mode(&self) -> io::Result<c_int> { + match (self.read, self.write, self.append) { + (true, false, false) => Ok(libc::O_RDONLY), + (false, true, false) => Ok(libc::O_WRONLY), + (true, true, false) => Ok(libc::O_RDWR), + (false, _, true) => Ok(libc::O_WRONLY | libc::O_APPEND), + (true, _, true) => Ok(libc::O_RDWR | libc::O_APPEND), + (false, false, false) => Err(Error::from_raw_os_error(libc::EINVAL)), + } + } + + fn get_creation_mode(&self) -> io::Result<c_int> { + match (self.write, self.append) { + (true, false) => {} + (false, false) => { + if self.truncate || self.create || self.create_new { + return Err(Error::from_raw_os_error(libc::EINVAL)); + } + } + (_, true) => { + if self.truncate && !self.create_new { + return Err(Error::from_raw_os_error(libc::EINVAL)); + } + } + } + + Ok(match (self.create, self.truncate, self.create_new) { + (false, false, false) => 0, + (true, false, false) => libc::O_CREAT, + (false, true, false) => libc::O_TRUNC, + (true, true, false) => libc::O_CREAT | libc::O_TRUNC, + (_, _, true) => libc::O_CREAT | libc::O_EXCL, + }) + } +} + +impl File { + pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> { + let path = cstr(path)?; + File::open_c(&path, opts) + } + + pub fn open_c(path: &CStr, opts: &OpenOptions) -> io::Result<File> { + let flags = libc::O_CLOEXEC + | opts.get_access_mode()? + | opts.get_creation_mode()? + | (opts.custom_flags as c_int & !libc::O_ACCMODE); + // The third argument of `open64` is documented to have type `mode_t`. On + // some platforms (like macOS, where `open64` is actually `open`), `mode_t` is `u16`. + // However, since this is a variadic function, C integer promotion rules mean that on + // the ABI level, this still gets passed as `c_int` (aka `u32` on Unix platforms). + let fd = cvt_r(|| unsafe { open64(path.as_ptr(), flags, opts.mode as c_int) })?; + Ok(File(FileDesc::new(fd))) + } + + pub fn file_attr(&self) -> io::Result<FileAttr> { + let fd = self.0.raw(); + + cfg_has_statx! { + if let Some(ret) = unsafe { try_statx( + fd, + b"\0" as *const _ as *const libc::c_char, + libc::AT_EMPTY_PATH | libc::AT_STATX_SYNC_AS_STAT, + libc::STATX_ALL, + ) } { + return ret; + } + } + + let mut stat: stat64 = unsafe { mem::zeroed() }; + cvt(unsafe { fstat64(fd, &mut stat) })?; + Ok(FileAttr::from_stat64(stat)) + } + + pub fn fsync(&self) -> io::Result<()> { + cvt_r(|| unsafe { os_fsync(self.0.raw()) })?; + return Ok(()); + + #[cfg(any(target_os = "macos", target_os = "ios"))] + unsafe fn os_fsync(fd: c_int) -> c_int { + libc::fcntl(fd, libc::F_FULLFSYNC) + } + #[cfg(not(any(target_os = "macos", target_os = "ios")))] + unsafe fn os_fsync(fd: c_int) -> c_int { + libc::fsync(fd) + } + } + + pub fn datasync(&self) -> io::Result<()> { + cvt_r(|| unsafe { os_datasync(self.0.raw()) })?; + return Ok(()); + + #[cfg(any(target_os = "macos", target_os = "ios"))] + unsafe fn os_datasync(fd: c_int) -> c_int { + libc::fcntl(fd, libc::F_FULLFSYNC) + } + #[cfg(target_os = "linux")] + unsafe fn os_datasync(fd: c_int) -> c_int { + libc::fdatasync(fd) + } + #[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "linux")))] + unsafe fn os_datasync(fd: c_int) -> c_int { + libc::fsync(fd) + } + } + + pub fn truncate(&self, size: u64) -> io::Result<()> { + #[cfg(target_os = "android")] + return crate::sys::android::ftruncate64(self.0.raw(), size); + + #[cfg(not(target_os = "android"))] + { + use crate::convert::TryInto; + let size: off64_t = + size.try_into().map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))?; + cvt_r(|| unsafe { ftruncate64(self.0.raw(), size) }).map(drop) + } + } + + pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> { + self.0.read(buf) + } + + pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + self.0.read_vectored(bufs) + } + + #[inline] + pub fn is_read_vectored(&self) -> bool { + self.0.is_read_vectored() + } + + pub fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<usize> { + self.0.read_at(buf, offset) + } + + pub fn write(&self, buf: &[u8]) -> io::Result<usize> { + self.0.write(buf) + } + + pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + self.0.write_vectored(bufs) + } + + #[inline] + pub fn is_write_vectored(&self) -> bool { + self.0.is_write_vectored() + } + + pub fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> { + self.0.write_at(buf, offset) + } + + pub fn flush(&self) -> io::Result<()> { + Ok(()) + } + + pub fn seek(&self, pos: SeekFrom) -> io::Result<u64> { + let (whence, pos) = match pos { + // Casting to `i64` is fine, too large values will end up as + // negative which will cause an error in `lseek64`. + SeekFrom::Start(off) => (libc::SEEK_SET, off as i64), + SeekFrom::End(off) => (libc::SEEK_END, off), + SeekFrom::Current(off) => (libc::SEEK_CUR, off), + }; + let n = cvt(unsafe { lseek64(self.0.raw(), pos, whence) })?; + Ok(n as u64) + } + + pub fn duplicate(&self) -> io::Result<File> { + self.0.duplicate().map(File) + } + + pub fn fd(&self) -> &FileDesc { + &self.0 + } + + pub fn into_fd(self) -> FileDesc { + self.0 + } + + pub fn set_permissions(&self, perm: FilePermissions) -> io::Result<()> { + cvt_r(|| unsafe { libc::fchmod(self.0.raw(), perm.mode) })?; + Ok(()) + } +} + +impl DirBuilder { + pub fn new() -> DirBuilder { + DirBuilder { mode: 0o777 } + } + + pub fn mkdir(&self, p: &Path) -> io::Result<()> { + let p = cstr(p)?; + cvt(unsafe { libc::mkdir(p.as_ptr(), self.mode) })?; + Ok(()) + } + + pub fn set_mode(&mut self, mode: u32) { + self.mode = mode as mode_t; + } +} + +fn cstr(path: &Path) -> io::Result<CString> { + Ok(CString::new(path.as_os_str().as_bytes())?) +} + +impl FromInner<c_int> for File { + fn from_inner(fd: c_int) -> File { + File(FileDesc::new(fd)) + } +} + +impl fmt::Debug for File { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + #[cfg(target_os = "linux")] + fn get_path(fd: c_int) -> Option<PathBuf> { + let mut p = PathBuf::from("/proc/self/fd"); + p.push(&fd.to_string()); + readlink(&p).ok() + } + + #[cfg(target_os = "macos")] + fn get_path(fd: c_int) -> Option<PathBuf> { + // FIXME: The use of PATH_MAX is generally not encouraged, but it + // is inevitable in this case because macOS defines `fcntl` with + // `F_GETPATH` in terms of `MAXPATHLEN`, and there are no + // alternatives. If a better method is invented, it should be used + // instead. + let mut buf = vec![0; libc::PATH_MAX as usize]; + let n = unsafe { libc::fcntl(fd, libc::F_GETPATH, buf.as_ptr()) }; + if n == -1 { + return None; + } + let l = buf.iter().position(|&c| c == 0).unwrap(); + buf.truncate(l as usize); + buf.shrink_to_fit(); + Some(PathBuf::from(OsString::from_vec(buf))) + } + + #[cfg(not(any(target_os = "linux", target_os = "macos")))] + fn get_path(_fd: c_int) -> Option<PathBuf> { + // FIXME(#24570): implement this for other Unix platforms + None + } + + #[cfg(any(target_os = "linux", target_os = "macos"))] + fn get_mode(fd: c_int) -> Option<(bool, bool)> { + let mode = unsafe { libc::fcntl(fd, libc::F_GETFL) }; + if mode == -1 { + return None; + } + match mode & libc::O_ACCMODE { + libc::O_RDONLY => Some((true, false)), + libc::O_RDWR => Some((true, true)), + libc::O_WRONLY => Some((false, true)), + _ => None, + } + } + + #[cfg(not(any(target_os = "linux", target_os = "macos")))] + fn get_mode(_fd: c_int) -> Option<(bool, bool)> { + // FIXME(#24570): implement this for other Unix platforms + None + } + + let fd = self.0.raw(); + let mut b = f.debug_struct("File"); + b.field("fd", &fd); + if let Some(path) = get_path(fd) { + b.field("path", &path); + } + if let Some((read, write)) = get_mode(fd) { + b.field("read", &read).field("write", &write); + } + b.finish() + } +} + +pub fn readdir(p: &Path) -> io::Result<ReadDir> { + let root = p.to_path_buf(); + let p = cstr(p)?; + unsafe { + let ptr = libc::opendir(p.as_ptr()); + if ptr.is_null() { + Err(Error::last_os_error()) + } else { + let inner = InnerReadDir { dirp: Dir(ptr), root }; + Ok(ReadDir { inner: Arc::new(inner), end_of_stream: false }) + } + } +} + +pub fn unlink(p: &Path) -> io::Result<()> { + let p = cstr(p)?; + cvt(unsafe { libc::unlink(p.as_ptr()) })?; + Ok(()) +} + +pub fn rename(old: &Path, new: &Path) -> io::Result<()> { + let old = cstr(old)?; + let new = cstr(new)?; + cvt(unsafe { libc::rename(old.as_ptr(), new.as_ptr()) })?; + Ok(()) +} + +pub fn set_perm(p: &Path, perm: FilePermissions) -> io::Result<()> { + let p = cstr(p)?; + cvt_r(|| unsafe { libc::chmod(p.as_ptr(), perm.mode) })?; + Ok(()) +} + +pub fn rmdir(p: &Path) -> io::Result<()> { + let p = cstr(p)?; + cvt(unsafe { libc::rmdir(p.as_ptr()) })?; + Ok(()) +} + +pub fn readlink(p: &Path) -> io::Result<PathBuf> { + let c_path = cstr(p)?; + let p = c_path.as_ptr(); + + let mut buf = Vec::with_capacity(256); + + loop { + let buf_read = + cvt(unsafe { libc::readlink(p, buf.as_mut_ptr() as *mut _, buf.capacity()) })? as usize; + + unsafe { + buf.set_len(buf_read); + } + + if buf_read != buf.capacity() { + buf.shrink_to_fit(); + + return Ok(PathBuf::from(OsString::from_vec(buf))); + } + + // Trigger the internal buffer resizing logic of `Vec` by requiring + // more space than the current capacity. The length is guaranteed to be + // the same as the capacity due to the if statement above. + buf.reserve(1); + } +} + +pub fn symlink(src: &Path, dst: &Path) -> io::Result<()> { + let src = cstr(src)?; + let dst = cstr(dst)?; + cvt(unsafe { libc::symlink(src.as_ptr(), dst.as_ptr()) })?; + Ok(()) +} + +pub fn link(src: &Path, dst: &Path) -> io::Result<()> { + let src = cstr(src)?; + let dst = cstr(dst)?; + cvt(unsafe { libc::link(src.as_ptr(), dst.as_ptr()) })?; + Ok(()) +} + +pub fn stat(p: &Path) -> io::Result<FileAttr> { + let p = cstr(p)?; + + cfg_has_statx! { + if let Some(ret) = unsafe { try_statx( + libc::AT_FDCWD, + p.as_ptr(), + libc::AT_STATX_SYNC_AS_STAT, + libc::STATX_ALL, + ) } { + return ret; + } + } + + let mut stat: stat64 = unsafe { mem::zeroed() }; + cvt(unsafe { stat64(p.as_ptr(), &mut stat) })?; + Ok(FileAttr::from_stat64(stat)) +} + +pub fn lstat(p: &Path) -> io::Result<FileAttr> { + let p = cstr(p)?; + + cfg_has_statx! { + if let Some(ret) = unsafe { try_statx( + libc::AT_FDCWD, + p.as_ptr(), + libc::AT_SYMLINK_NOFOLLOW | libc::AT_STATX_SYNC_AS_STAT, + libc::STATX_ALL, + ) } { + return ret; + } + } + + let mut stat: stat64 = unsafe { mem::zeroed() }; + cvt(unsafe { lstat64(p.as_ptr(), &mut stat) })?; + Ok(FileAttr::from_stat64(stat)) +} + +pub fn canonicalize(p: &Path) -> io::Result<PathBuf> { + let path = CString::new(p.as_os_str().as_bytes())?; + let buf; + unsafe { + let r = libc::realpath(path.as_ptr(), ptr::null_mut()); + if r.is_null() { + return Err(io::Error::last_os_error()); + } + buf = CStr::from_ptr(r).to_bytes().to_vec(); + libc::free(r as *mut _); + } + Ok(PathBuf::from(OsString::from_vec(buf))) +} + +fn open_from(from: &Path) -> io::Result<(crate::fs::File, crate::fs::Metadata)> { + use crate::fs::File; + + let reader = File::open(from)?; + let metadata = reader.metadata()?; + if !metadata.is_file() { + return Err(Error::new( + ErrorKind::InvalidInput, + "the source path is not an existing regular file", + )); + } + Ok((reader, metadata)) +} + +fn open_to_and_set_permissions( + to: &Path, + reader_metadata: crate::fs::Metadata, +) -> io::Result<(crate::fs::File, crate::fs::Metadata)> { + use crate::fs::OpenOptions; + use crate::os::unix::fs::{OpenOptionsExt, PermissionsExt}; + + let perm = reader_metadata.permissions(); + let writer = OpenOptions::new() + // create the file with the correct mode right away + .mode(perm.mode()) + .write(true) + .create(true) + .truncate(true) + .open(to)?; + let writer_metadata = writer.metadata()?; + if writer_metadata.is_file() { + // Set the correct file permissions, in case the file already existed. + // Don't set the permissions on already existing non-files like + // pipes/FIFOs or device nodes. + writer.set_permissions(perm)?; + } + Ok((writer, writer_metadata)) +} + +#[cfg(not(any( + target_os = "linux", + target_os = "android", + target_os = "macos", + target_os = "ios" +)))] +pub fn copy(from: &Path, to: &Path) -> io::Result<u64> { + let (mut reader, reader_metadata) = open_from(from)?; + let (mut writer, _) = open_to_and_set_permissions(to, reader_metadata)?; + + io::copy(&mut reader, &mut writer) +} + +#[cfg(any(target_os = "linux", target_os = "android"))] +pub fn copy(from: &Path, to: &Path) -> io::Result<u64> { + use crate::cmp; + use crate::sync::atomic::{AtomicBool, Ordering}; + + // Kernel prior to 4.5 don't have copy_file_range + // We store the availability in a global to avoid unnecessary syscalls + static HAS_COPY_FILE_RANGE: AtomicBool = AtomicBool::new(true); + + unsafe fn copy_file_range( + fd_in: libc::c_int, + off_in: *mut libc::loff_t, + fd_out: libc::c_int, + off_out: *mut libc::loff_t, + len: libc::size_t, + flags: libc::c_uint, + ) -> libc::c_long { + libc::syscall(libc::SYS_copy_file_range, fd_in, off_in, fd_out, off_out, len, flags) + } + + let (mut reader, reader_metadata) = open_from(from)?; + let len = reader_metadata.len(); + let (mut writer, _) = open_to_and_set_permissions(to, reader_metadata)?; + + let has_copy_file_range = HAS_COPY_FILE_RANGE.load(Ordering::Relaxed); + let mut written = 0u64; + while written < len { + let copy_result = if has_copy_file_range { + let bytes_to_copy = cmp::min(len - written, usize::MAX as u64) as usize; + let copy_result = unsafe { + // We actually don't have to adjust the offsets, + // because copy_file_range adjusts the file offset automatically + cvt(copy_file_range( + reader.as_raw_fd(), + ptr::null_mut(), + writer.as_raw_fd(), + ptr::null_mut(), + bytes_to_copy, + 0, + )) + }; + if let Err(ref copy_err) = copy_result { + match copy_err.raw_os_error() { + Some(libc::ENOSYS) | Some(libc::EPERM) => { + HAS_COPY_FILE_RANGE.store(false, Ordering::Relaxed); + } + _ => {} + } + } + copy_result + } else { + Err(io::Error::from_raw_os_error(libc::ENOSYS)) + }; + match copy_result { + Ok(ret) => written += ret as u64, + Err(err) => { + match err.raw_os_error() { + Some(os_err) + if os_err == libc::ENOSYS + || os_err == libc::EXDEV + || os_err == libc::EINVAL + || os_err == libc::EPERM => + { + // Try fallback io::copy if either: + // - Kernel version is < 4.5 (ENOSYS) + // - Files are mounted on different fs (EXDEV) + // - copy_file_range is disallowed, for example by seccomp (EPERM) + // - copy_file_range cannot be used with pipes or device nodes (EINVAL) + assert_eq!(written, 0); + return io::copy(&mut reader, &mut writer); + } + _ => return Err(err), + } + } + } + } + Ok(written) +} + +#[cfg(any(target_os = "macos", target_os = "ios"))] +pub fn copy(from: &Path, to: &Path) -> io::Result<u64> { + use crate::sync::atomic::{AtomicBool, Ordering}; + + const COPYFILE_ACL: u32 = 1 << 0; + const COPYFILE_STAT: u32 = 1 << 1; + const COPYFILE_XATTR: u32 = 1 << 2; + const COPYFILE_DATA: u32 = 1 << 3; + + const COPYFILE_SECURITY: u32 = COPYFILE_STAT | COPYFILE_ACL; + const COPYFILE_METADATA: u32 = COPYFILE_SECURITY | COPYFILE_XATTR; + const COPYFILE_ALL: u32 = COPYFILE_METADATA | COPYFILE_DATA; + + const COPYFILE_STATE_COPIED: u32 = 8; + + #[allow(non_camel_case_types)] + type copyfile_state_t = *mut libc::c_void; + #[allow(non_camel_case_types)] + type copyfile_flags_t = u32; + + extern "C" { + fn fcopyfile( + from: libc::c_int, + to: libc::c_int, + state: copyfile_state_t, + flags: copyfile_flags_t, + ) -> libc::c_int; + fn copyfile_state_alloc() -> copyfile_state_t; + fn copyfile_state_free(state: copyfile_state_t) -> libc::c_int; + fn copyfile_state_get( + state: copyfile_state_t, + flag: u32, + dst: *mut libc::c_void, + ) -> libc::c_int; + } + + struct FreeOnDrop(copyfile_state_t); + impl Drop for FreeOnDrop { + fn drop(&mut self) { + // The code below ensures that `FreeOnDrop` is never a null pointer + unsafe { + // `copyfile_state_free` returns -1 if the `to` or `from` files + // cannot be closed. However, this is not considered this an + // error. + copyfile_state_free(self.0); + } + } + } + + // MacOS prior to 10.12 don't support `fclonefileat` + // We store the availability in a global to avoid unnecessary syscalls + static HAS_FCLONEFILEAT: AtomicBool = AtomicBool::new(true); + syscall! { + fn fclonefileat( + srcfd: libc::c_int, + dst_dirfd: libc::c_int, + dst: *const libc::c_char, + flags: libc::c_int + ) -> libc::c_int + } + + let (reader, reader_metadata) = open_from(from)?; + + // Opportunistically attempt to create a copy-on-write clone of `from` + // using `fclonefileat`. + if HAS_FCLONEFILEAT.load(Ordering::Relaxed) { + let to = cstr(to)?; + let clonefile_result = + cvt(unsafe { fclonefileat(reader.as_raw_fd(), libc::AT_FDCWD, to.as_ptr(), 0) }); + match clonefile_result { + Ok(_) => return Ok(reader_metadata.len()), + Err(err) => match err.raw_os_error() { + // `fclonefileat` will fail on non-APFS volumes, if the + // destination already exists, or if the source and destination + // are on different devices. In all these cases `fcopyfile` + // should succeed. + Some(libc::ENOTSUP) | Some(libc::EEXIST) | Some(libc::EXDEV) => (), + Some(libc::ENOSYS) => HAS_FCLONEFILEAT.store(false, Ordering::Relaxed), + _ => return Err(err), + }, + } + } + + // Fall back to using `fcopyfile` if `fclonefileat` does not succeed. + let (writer, writer_metadata) = open_to_and_set_permissions(to, reader_metadata)?; + + // We ensure that `FreeOnDrop` never contains a null pointer so it is + // always safe to call `copyfile_state_free` + let state = unsafe { + let state = copyfile_state_alloc(); + if state.is_null() { + return Err(crate::io::Error::last_os_error()); + } + FreeOnDrop(state) + }; + + let flags = if writer_metadata.is_file() { COPYFILE_ALL } else { COPYFILE_DATA }; + + cvt(unsafe { fcopyfile(reader.as_raw_fd(), writer.as_raw_fd(), state.0, flags) })?; + + let mut bytes_copied: libc::off_t = 0; + cvt(unsafe { + copyfile_state_get( + state.0, + COPYFILE_STATE_COPIED, + &mut bytes_copied as *mut libc::off_t as *mut libc::c_void, + ) + })?; + Ok(bytes_copied as u64) +} diff --git a/library/std/src/sys/unix/io.rs b/library/std/src/sys/unix/io.rs new file mode 100644 index 00000000000..deb5ee76bd0 --- /dev/null +++ b/library/std/src/sys/unix/io.rs @@ -0,0 +1,76 @@ +use crate::marker::PhantomData; +use crate::slice; + +use libc::{c_void, iovec}; + +#[derive(Copy, Clone)] +#[repr(transparent)] +pub struct IoSlice<'a> { + vec: iovec, + _p: PhantomData<&'a [u8]>, +} + +impl<'a> IoSlice<'a> { + #[inline] + pub fn new(buf: &'a [u8]) -> IoSlice<'a> { + IoSlice { + vec: iovec { iov_base: buf.as_ptr() as *mut u8 as *mut c_void, iov_len: buf.len() }, + _p: PhantomData, + } + } + + #[inline] + pub fn advance(&mut self, n: usize) { + if self.vec.iov_len < n { + panic!("advancing IoSlice beyond its length"); + } + + unsafe { + self.vec.iov_len -= n; + self.vec.iov_base = self.vec.iov_base.add(n); + } + } + + #[inline] + pub fn as_slice(&self) -> &[u8] { + unsafe { slice::from_raw_parts(self.vec.iov_base as *mut u8, self.vec.iov_len) } + } +} + +#[repr(transparent)] +pub struct IoSliceMut<'a> { + vec: iovec, + _p: PhantomData<&'a mut [u8]>, +} + +impl<'a> IoSliceMut<'a> { + #[inline] + pub fn new(buf: &'a mut [u8]) -> IoSliceMut<'a> { + IoSliceMut { + vec: iovec { iov_base: buf.as_mut_ptr() as *mut c_void, iov_len: buf.len() }, + _p: PhantomData, + } + } + + #[inline] + pub fn advance(&mut self, n: usize) { + if self.vec.iov_len < n { + panic!("advancing IoSliceMut beyond its length"); + } + + unsafe { + self.vec.iov_len -= n; + self.vec.iov_base = self.vec.iov_base.add(n); + } + } + + #[inline] + pub fn as_slice(&self) -> &[u8] { + unsafe { slice::from_raw_parts(self.vec.iov_base as *mut u8, self.vec.iov_len) } + } + + #[inline] + pub fn as_mut_slice(&mut self) -> &mut [u8] { + unsafe { slice::from_raw_parts_mut(self.vec.iov_base as *mut u8, self.vec.iov_len) } + } +} diff --git a/library/std/src/sys/unix/l4re.rs b/library/std/src/sys/unix/l4re.rs new file mode 100644 index 00000000000..a2912387108 --- /dev/null +++ b/library/std/src/sys/unix/l4re.rs @@ -0,0 +1,503 @@ +macro_rules! unimpl { + () => { + return Err(io::Error::new(io::ErrorKind::Other, "No networking available on L4Re.")); + }; +} + +pub mod net { + #![allow(warnings)] + use crate::convert::TryFrom; + use crate::fmt; + use crate::io::{self, IoSlice, IoSliceMut}; + use crate::net::{Ipv4Addr, Ipv6Addr, Shutdown, SocketAddr}; + use crate::sys::fd::FileDesc; + use crate::sys_common::{AsInner, FromInner, IntoInner}; + use crate::time::Duration; + + #[allow(unused_extern_crates)] + pub extern crate libc as netc; + + pub struct Socket(FileDesc); + impl Socket { + pub fn new(_: &SocketAddr, _: libc::c_int) -> io::Result<Socket> { + unimpl!(); + } + + pub fn new_raw(_: libc::c_int, _: libc::c_int) -> io::Result<Socket> { + unimpl!(); + } + + pub fn new_pair(_: libc::c_int, _: libc::c_int) -> io::Result<(Socket, Socket)> { + unimpl!(); + } + + pub fn connect_timeout(&self, _: &SocketAddr, _: Duration) -> io::Result<()> { + unimpl!(); + } + + pub fn accept( + &self, + _: *mut libc::sockaddr, + _: *mut libc::socklen_t, + ) -> io::Result<Socket> { + unimpl!(); + } + + pub fn duplicate(&self) -> io::Result<Socket> { + unimpl!(); + } + + pub fn read(&self, _: &mut [u8]) -> io::Result<usize> { + unimpl!(); + } + + pub fn read_vectored(&self, _: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + unimpl!(); + } + + pub fn is_read_vectored(&self) -> bool { + unimpl!(); + } + + pub fn peek(&self, _: &mut [u8]) -> io::Result<usize> { + unimpl!(); + } + + pub fn recv_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + unimpl!(); + } + + pub fn peek_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + unimpl!(); + } + + pub fn write(&self, _: &[u8]) -> io::Result<usize> { + unimpl!(); + } + + pub fn write_vectored(&self, _: &[IoSlice<'_>]) -> io::Result<usize> { + unimpl!(); + } + + pub fn is_write_vectored(&self) -> bool { + unimpl!(); + } + + pub fn set_timeout(&self, _: Option<Duration>, _: libc::c_int) -> io::Result<()> { + unimpl!(); + } + + pub fn timeout(&self, _: libc::c_int) -> io::Result<Option<Duration>> { + unimpl!(); + } + + pub fn shutdown(&self, _: Shutdown) -> io::Result<()> { + unimpl!(); + } + + pub fn set_nodelay(&self, _: bool) -> io::Result<()> { + unimpl!(); + } + + pub fn nodelay(&self) -> io::Result<bool> { + unimpl!(); + } + + pub fn set_nonblocking(&self, _: bool) -> io::Result<()> { + unimpl!(); + } + + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + unimpl!(); + } + } + + impl AsInner<libc::c_int> for Socket { + fn as_inner(&self) -> &libc::c_int { + self.0.as_inner() + } + } + + impl FromInner<libc::c_int> for Socket { + fn from_inner(fd: libc::c_int) -> Socket { + Socket(FileDesc::new(fd)) + } + } + + impl IntoInner<libc::c_int> for Socket { + fn into_inner(self) -> libc::c_int { + self.0.into_raw() + } + } + + pub struct TcpStream { + inner: Socket, + } + + impl TcpStream { + pub fn connect(_: io::Result<&SocketAddr>) -> io::Result<TcpStream> { + unimpl!(); + } + + pub fn connect_timeout(_: &SocketAddr, _: Duration) -> io::Result<TcpStream> { + unimpl!(); + } + + pub fn socket(&self) -> &Socket { + &self.inner + } + + pub fn into_socket(self) -> Socket { + self.inner + } + + pub fn set_read_timeout(&self, _: Option<Duration>) -> io::Result<()> { + unimpl!(); + } + + pub fn set_write_timeout(&self, _: Option<Duration>) -> io::Result<()> { + unimpl!(); + } + + pub fn read_timeout(&self) -> io::Result<Option<Duration>> { + unimpl!(); + } + + pub fn write_timeout(&self) -> io::Result<Option<Duration>> { + unimpl!(); + } + + pub fn peek(&self, _: &mut [u8]) -> io::Result<usize> { + unimpl!(); + } + + pub fn read(&self, _: &mut [u8]) -> io::Result<usize> { + unimpl!(); + } + + pub fn read_vectored(&self, _: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + unimpl!(); + } + + pub fn is_read_vectored(&self) -> bool { + unimpl!(); + } + + pub fn write(&self, _: &[u8]) -> io::Result<usize> { + unimpl!(); + } + + pub fn write_vectored(&self, _: &[IoSlice<'_>]) -> io::Result<usize> { + unimpl!(); + } + + pub fn is_write_vectored(&self) -> bool { + unimpl!(); + } + + pub fn peer_addr(&self) -> io::Result<SocketAddr> { + unimpl!(); + } + + pub fn socket_addr(&self) -> io::Result<SocketAddr> { + unimpl!(); + } + + pub fn shutdown(&self, _: Shutdown) -> io::Result<()> { + unimpl!(); + } + + pub fn duplicate(&self) -> io::Result<TcpStream> { + unimpl!(); + } + + pub fn set_nodelay(&self, _: bool) -> io::Result<()> { + unimpl!(); + } + + pub fn nodelay(&self) -> io::Result<bool> { + unimpl!(); + } + + pub fn set_ttl(&self, _: u32) -> io::Result<()> { + unimpl!(); + } + + pub fn ttl(&self) -> io::Result<u32> { + unimpl!(); + } + + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + unimpl!(); + } + + pub fn set_nonblocking(&self, _: bool) -> io::Result<()> { + unimpl!(); + } + } + + impl FromInner<Socket> for TcpStream { + fn from_inner(socket: Socket) -> TcpStream { + TcpStream { inner: socket } + } + } + + impl fmt::Debug for TcpStream { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "No networking support available on L4Re") + } + } + + pub struct TcpListener { + inner: Socket, + } + + impl TcpListener { + pub fn bind(_: io::Result<&SocketAddr>) -> io::Result<TcpListener> { + unimpl!(); + } + + pub fn socket(&self) -> &Socket { + &self.inner + } + + pub fn into_socket(self) -> Socket { + self.inner + } + + pub fn socket_addr(&self) -> io::Result<SocketAddr> { + unimpl!(); + } + + pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> { + unimpl!(); + } + + pub fn duplicate(&self) -> io::Result<TcpListener> { + unimpl!(); + } + + pub fn set_ttl(&self, _: u32) -> io::Result<()> { + unimpl!(); + } + + pub fn ttl(&self) -> io::Result<u32> { + unimpl!(); + } + + pub fn set_only_v6(&self, _: bool) -> io::Result<()> { + unimpl!(); + } + + pub fn only_v6(&self) -> io::Result<bool> { + unimpl!(); + } + + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + unimpl!(); + } + + pub fn set_nonblocking(&self, _: bool) -> io::Result<()> { + unimpl!(); + } + } + + impl FromInner<Socket> for TcpListener { + fn from_inner(socket: Socket) -> TcpListener { + TcpListener { inner: socket } + } + } + + impl fmt::Debug for TcpListener { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "No networking support available on L4Re.") + } + } + + pub struct UdpSocket { + inner: Socket, + } + + impl UdpSocket { + pub fn bind(_: io::Result<&SocketAddr>) -> io::Result<UdpSocket> { + unimpl!(); + } + + pub fn socket(&self) -> &Socket { + &self.inner + } + + pub fn into_socket(self) -> Socket { + self.inner + } + + pub fn peer_addr(&self) -> io::Result<SocketAddr> { + unimpl!(); + } + + pub fn socket_addr(&self) -> io::Result<SocketAddr> { + unimpl!(); + } + + pub fn recv_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + unimpl!(); + } + + pub fn peek_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + unimpl!(); + } + + pub fn send_to(&self, _: &[u8], _: &SocketAddr) -> io::Result<usize> { + unimpl!(); + } + + pub fn duplicate(&self) -> io::Result<UdpSocket> { + unimpl!(); + } + + pub fn set_read_timeout(&self, _: Option<Duration>) -> io::Result<()> { + unimpl!(); + } + + pub fn set_write_timeout(&self, _: Option<Duration>) -> io::Result<()> { + unimpl!(); + } + + pub fn read_timeout(&self) -> io::Result<Option<Duration>> { + unimpl!(); + } + + pub fn write_timeout(&self) -> io::Result<Option<Duration>> { + unimpl!(); + } + + pub fn set_broadcast(&self, _: bool) -> io::Result<()> { + unimpl!(); + } + + pub fn broadcast(&self) -> io::Result<bool> { + unimpl!(); + } + + pub fn set_multicast_loop_v4(&self, _: bool) -> io::Result<()> { + unimpl!(); + } + + pub fn multicast_loop_v4(&self) -> io::Result<bool> { + unimpl!(); + } + + pub fn set_multicast_ttl_v4(&self, _: u32) -> io::Result<()> { + unimpl!(); + } + + pub fn multicast_ttl_v4(&self) -> io::Result<u32> { + unimpl!(); + } + + pub fn set_multicast_loop_v6(&self, _: bool) -> io::Result<()> { + unimpl!(); + } + + pub fn multicast_loop_v6(&self) -> io::Result<bool> { + unimpl!(); + } + + pub fn join_multicast_v4(&self, _: &Ipv4Addr, _: &Ipv4Addr) -> io::Result<()> { + unimpl!(); + } + + pub fn join_multicast_v6(&self, _: &Ipv6Addr, _: u32) -> io::Result<()> { + unimpl!(); + } + + pub fn leave_multicast_v4(&self, _: &Ipv4Addr, _: &Ipv4Addr) -> io::Result<()> { + unimpl!(); + } + + pub fn leave_multicast_v6(&self, _: &Ipv6Addr, _: u32) -> io::Result<()> { + unimpl!(); + } + + pub fn set_ttl(&self, _: u32) -> io::Result<()> { + unimpl!(); + } + + pub fn ttl(&self) -> io::Result<u32> { + unimpl!(); + } + + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + unimpl!(); + } + + pub fn set_nonblocking(&self, _: bool) -> io::Result<()> { + unimpl!(); + } + + pub fn recv(&self, _: &mut [u8]) -> io::Result<usize> { + unimpl!(); + } + + pub fn peek(&self, _: &mut [u8]) -> io::Result<usize> { + unimpl!(); + } + + pub fn send(&self, _: &[u8]) -> io::Result<usize> { + unimpl!(); + } + + pub fn connect(&self, _: io::Result<&SocketAddr>) -> io::Result<()> { + unimpl!(); + } + } + + impl FromInner<Socket> for UdpSocket { + fn from_inner(socket: Socket) -> UdpSocket { + UdpSocket { inner: socket } + } + } + + impl fmt::Debug for UdpSocket { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "No networking support on L4Re available.") + } + } + + pub struct LookupHost { + original: *mut libc::addrinfo, + cur: *mut libc::addrinfo, + } + + impl Iterator for LookupHost { + type Item = SocketAddr; + fn next(&mut self) -> Option<SocketAddr> { + None + } + } + + impl LookupHost { + pub fn port(&self) -> u16 { + unimpl!(); + } + } + + unsafe impl Sync for LookupHost {} + unsafe impl Send for LookupHost {} + + impl TryFrom<&str> for LookupHost { + type Error = io::Error; + + fn try_from(_v: &str) -> io::Result<LookupHost> { + unimpl!(); + } + } + + impl<'a> TryFrom<(&'a str, u16)> for LookupHost { + type Error = io::Error; + + fn try_from(_v: (&'a str, u16)) -> io::Result<LookupHost> { + unimpl!(); + } + } +} diff --git a/library/std/src/sys/unix/memchr.rs b/library/std/src/sys/unix/memchr.rs new file mode 100644 index 00000000000..a9273ea676c --- /dev/null +++ b/library/std/src/sys/unix/memchr.rs @@ -0,0 +1,38 @@ +// Original implementation taken from rust-memchr. +// Copyright 2015 Andrew Gallant, bluss and Nicolas Koch + +pub fn memchr(needle: u8, haystack: &[u8]) -> Option<usize> { + let p = unsafe { + libc::memchr( + haystack.as_ptr() as *const libc::c_void, + needle as libc::c_int, + haystack.len(), + ) + }; + if p.is_null() { None } else { Some(p as usize - (haystack.as_ptr() as usize)) } +} + +pub fn memrchr(needle: u8, haystack: &[u8]) -> Option<usize> { + #[cfg(target_os = "linux")] + fn memrchr_specific(needle: u8, haystack: &[u8]) -> Option<usize> { + // GNU's memrchr() will - unlike memchr() - error if haystack is empty. + if haystack.is_empty() { + return None; + } + let p = unsafe { + libc::memrchr( + haystack.as_ptr() as *const libc::c_void, + needle as libc::c_int, + haystack.len(), + ) + }; + if p.is_null() { None } else { Some(p as usize - (haystack.as_ptr() as usize)) } + } + + #[cfg(not(target_os = "linux"))] + fn memrchr_specific(needle: u8, haystack: &[u8]) -> Option<usize> { + core::slice::memchr::memrchr(needle, haystack) + } + + memrchr_specific(needle, haystack) +} diff --git a/library/std/src/sys/unix/mod.rs b/library/std/src/sys/unix/mod.rs new file mode 100644 index 00000000000..eddf00d3979 --- /dev/null +++ b/library/std/src/sys/unix/mod.rs @@ -0,0 +1,168 @@ +#![allow(missing_docs, nonstandard_style)] + +use crate::io::ErrorKind; + +#[cfg(any(doc, target_os = "linux"))] +pub use crate::os::linux as platform; + +#[cfg(all(not(doc), target_os = "android"))] +pub use crate::os::android as platform; +#[cfg(all(not(doc), target_os = "dragonfly"))] +pub use crate::os::dragonfly as platform; +#[cfg(all(not(doc), target_os = "emscripten"))] +pub use crate::os::emscripten as platform; +#[cfg(all(not(doc), target_os = "freebsd"))] +pub use crate::os::freebsd as platform; +#[cfg(all(not(doc), target_os = "fuchsia"))] +pub use crate::os::fuchsia as platform; +#[cfg(all(not(doc), target_os = "haiku"))] +pub use crate::os::haiku as platform; +#[cfg(all(not(doc), target_os = "illumos"))] +pub use crate::os::illumos as platform; +#[cfg(all(not(doc), target_os = "ios"))] +pub use crate::os::ios as platform; +#[cfg(all(not(doc), target_os = "l4re"))] +pub use crate::os::linux as platform; +#[cfg(all(not(doc), target_os = "macos"))] +pub use crate::os::macos as platform; +#[cfg(all(not(doc), target_os = "netbsd"))] +pub use crate::os::netbsd as platform; +#[cfg(all(not(doc), target_os = "openbsd"))] +pub use crate::os::openbsd as platform; +#[cfg(all(not(doc), target_os = "redox"))] +pub use crate::os::redox as platform; +#[cfg(all(not(doc), target_os = "solaris"))] +pub use crate::os::solaris as platform; + +pub use self::rand::hashmap_random_keys; +pub use libc::strlen; + +#[macro_use] +pub mod weak; + +pub mod alloc; +pub mod android; +pub mod args; +pub mod cmath; +pub mod condvar; +pub mod env; +pub mod ext; +pub mod fd; +pub mod fs; +pub mod io; +#[cfg(target_os = "l4re")] +mod l4re; +pub mod memchr; +pub mod mutex; +#[cfg(not(target_os = "l4re"))] +pub mod net; +#[cfg(target_os = "l4re")] +pub use self::l4re::net; +pub mod os; +pub mod path; +pub mod pipe; +pub mod process; +pub mod rand; +pub mod rwlock; +pub mod stack_overflow; +pub mod stdio; +pub mod thread; +pub mod thread_local_dtor; +pub mod thread_local_key; +pub mod time; + +pub use crate::sys_common::os_str_bytes as os_str; + +#[cfg(not(test))] +pub fn init() { + // By default, some platforms will send a *signal* when an EPIPE error + // would otherwise be delivered. This runtime doesn't install a SIGPIPE + // handler, causing it to kill the program, which isn't exactly what we + // want! + // + // Hence, we set SIGPIPE to ignore when the program starts up in order + // to prevent this problem. + unsafe { + reset_sigpipe(); + } + + #[cfg(not(any(target_os = "emscripten", target_os = "fuchsia")))] + unsafe fn reset_sigpipe() { + assert!(signal(libc::SIGPIPE, libc::SIG_IGN) != libc::SIG_ERR); + } + #[cfg(any(target_os = "emscripten", target_os = "fuchsia"))] + unsafe fn reset_sigpipe() {} +} + +#[cfg(target_os = "android")] +pub use crate::sys::android::signal; +#[cfg(not(target_os = "android"))] +pub use libc::signal; + +pub fn decode_error_kind(errno: i32) -> ErrorKind { + match errno as libc::c_int { + libc::ECONNREFUSED => ErrorKind::ConnectionRefused, + libc::ECONNRESET => ErrorKind::ConnectionReset, + libc::EPERM | libc::EACCES => ErrorKind::PermissionDenied, + libc::EPIPE => ErrorKind::BrokenPipe, + libc::ENOTCONN => ErrorKind::NotConnected, + libc::ECONNABORTED => ErrorKind::ConnectionAborted, + libc::EADDRNOTAVAIL => ErrorKind::AddrNotAvailable, + libc::EADDRINUSE => ErrorKind::AddrInUse, + libc::ENOENT => ErrorKind::NotFound, + libc::EINTR => ErrorKind::Interrupted, + libc::EINVAL => ErrorKind::InvalidInput, + libc::ETIMEDOUT => ErrorKind::TimedOut, + libc::EEXIST => ErrorKind::AlreadyExists, + + // These two constants can have the same value on some systems, + // but different values on others, so we can't use a match + // clause + x if x == libc::EAGAIN || x == libc::EWOULDBLOCK => ErrorKind::WouldBlock, + + _ => ErrorKind::Other, + } +} + +#[doc(hidden)] +pub trait IsMinusOne { + fn is_minus_one(&self) -> bool; +} + +macro_rules! impl_is_minus_one { + ($($t:ident)*) => ($(impl IsMinusOne for $t { + fn is_minus_one(&self) -> bool { + *self == -1 + } + })*) +} + +impl_is_minus_one! { i8 i16 i32 i64 isize } + +pub fn cvt<T: IsMinusOne>(t: T) -> crate::io::Result<T> { + if t.is_minus_one() { Err(crate::io::Error::last_os_error()) } else { Ok(t) } +} + +pub fn cvt_r<T, F>(mut f: F) -> crate::io::Result<T> +where + T: IsMinusOne, + F: FnMut() -> T, +{ + loop { + match cvt(f()) { + Err(ref e) if e.kind() == ErrorKind::Interrupted => {} + other => return other, + } + } +} + +// On Unix-like platforms, libc::abort will unregister signal handlers +// including the SIGABRT handler, preventing the abort from being blocked, and +// fclose streams, with the side effect of flushing them so libc buffered +// output will be printed. Additionally the shell will generally print a more +// understandable error message like "Abort trap" rather than "Illegal +// instruction" that intrinsics::abort would cause, as intrinsics::abort is +// implemented as an illegal instruction. +pub fn abort_internal() -> ! { + unsafe { libc::abort() } +} diff --git a/library/std/src/sys/unix/mutex.rs b/library/std/src/sys/unix/mutex.rs new file mode 100644 index 00000000000..45c600f75f5 --- /dev/null +++ b/library/std/src/sys/unix/mutex.rs @@ -0,0 +1,137 @@ +use crate::cell::UnsafeCell; +use crate::mem::MaybeUninit; + +pub struct Mutex { + inner: UnsafeCell<libc::pthread_mutex_t>, +} + +#[inline] +pub unsafe fn raw(m: &Mutex) -> *mut libc::pthread_mutex_t { + m.inner.get() +} + +unsafe impl Send for Mutex {} +unsafe impl Sync for Mutex {} + +#[allow(dead_code)] // sys isn't exported yet +impl Mutex { + pub const fn new() -> Mutex { + // Might be moved to a different address, so it is better to avoid + // initialization of potentially opaque OS data before it landed. + // Be very careful using this newly constructed `Mutex`, reentrant + // locking is undefined behavior until `init` is called! + Mutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) } + } + #[inline] + pub unsafe fn init(&mut self) { + // Issue #33770 + // + // A pthread mutex initialized with PTHREAD_MUTEX_INITIALIZER will have + // a type of PTHREAD_MUTEX_DEFAULT, which has undefined behavior if you + // try to re-lock it from the same thread when you already hold a lock + // (https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutex_init.html). + // This is the case even if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_NORMAL + // (https://github.com/rust-lang/rust/issues/33770#issuecomment-220847521) -- in that + // case, `pthread_mutexattr_settype(PTHREAD_MUTEX_DEFAULT)` will of course be the same + // as setting it to `PTHREAD_MUTEX_NORMAL`, but not setting any mode will result in + // a Mutex where re-locking is UB. + // + // In practice, glibc takes advantage of this undefined behavior to + // implement hardware lock elision, which uses hardware transactional + // memory to avoid acquiring the lock. While a transaction is in + // progress, the lock appears to be unlocked. This isn't a problem for + // other threads since the transactional memory will abort if a conflict + // is detected, however no abort is generated when re-locking from the + // same thread. + // + // Since locking the same mutex twice will result in two aliasing &mut + // references, we instead create the mutex with type + // PTHREAD_MUTEX_NORMAL which is guaranteed to deadlock if we try to + // re-lock it from the same thread, thus avoiding undefined behavior. + let mut attr = MaybeUninit::<libc::pthread_mutexattr_t>::uninit(); + let r = libc::pthread_mutexattr_init(attr.as_mut_ptr()); + debug_assert_eq!(r, 0); + let r = libc::pthread_mutexattr_settype(attr.as_mut_ptr(), libc::PTHREAD_MUTEX_NORMAL); + debug_assert_eq!(r, 0); + let r = libc::pthread_mutex_init(self.inner.get(), attr.as_ptr()); + debug_assert_eq!(r, 0); + let r = libc::pthread_mutexattr_destroy(attr.as_mut_ptr()); + debug_assert_eq!(r, 0); + } + #[inline] + pub unsafe fn lock(&self) { + let r = libc::pthread_mutex_lock(self.inner.get()); + debug_assert_eq!(r, 0); + } + #[inline] + pub unsafe fn unlock(&self) { + let r = libc::pthread_mutex_unlock(self.inner.get()); + debug_assert_eq!(r, 0); + } + #[inline] + pub unsafe fn try_lock(&self) -> bool { + libc::pthread_mutex_trylock(self.inner.get()) == 0 + } + #[inline] + #[cfg(not(target_os = "dragonfly"))] + pub unsafe fn destroy(&self) { + let r = libc::pthread_mutex_destroy(self.inner.get()); + debug_assert_eq!(r, 0); + } + #[inline] + #[cfg(target_os = "dragonfly")] + pub unsafe fn destroy(&self) { + let r = libc::pthread_mutex_destroy(self.inner.get()); + // On DragonFly pthread_mutex_destroy() returns EINVAL if called on a + // mutex that was just initialized with libc::PTHREAD_MUTEX_INITIALIZER. + // Once it is used (locked/unlocked) or pthread_mutex_init() is called, + // this behaviour no longer occurs. + debug_assert!(r == 0 || r == libc::EINVAL); + } +} + +pub struct ReentrantMutex { + inner: UnsafeCell<libc::pthread_mutex_t>, +} + +unsafe impl Send for ReentrantMutex {} +unsafe impl Sync for ReentrantMutex {} + +impl ReentrantMutex { + pub const unsafe fn uninitialized() -> ReentrantMutex { + ReentrantMutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) } + } + + pub unsafe fn init(&self) { + let mut attr = MaybeUninit::<libc::pthread_mutexattr_t>::uninit(); + let result = libc::pthread_mutexattr_init(attr.as_mut_ptr()); + debug_assert_eq!(result, 0); + let result = + libc::pthread_mutexattr_settype(attr.as_mut_ptr(), libc::PTHREAD_MUTEX_RECURSIVE); + debug_assert_eq!(result, 0); + let result = libc::pthread_mutex_init(self.inner.get(), attr.as_ptr()); + debug_assert_eq!(result, 0); + let result = libc::pthread_mutexattr_destroy(attr.as_mut_ptr()); + debug_assert_eq!(result, 0); + } + + pub unsafe fn lock(&self) { + let result = libc::pthread_mutex_lock(self.inner.get()); + debug_assert_eq!(result, 0); + } + + #[inline] + pub unsafe fn try_lock(&self) -> bool { + libc::pthread_mutex_trylock(self.inner.get()) == 0 + } + + pub unsafe fn unlock(&self) { + let result = libc::pthread_mutex_unlock(self.inner.get()); + debug_assert_eq!(result, 0); + } + + pub unsafe fn destroy(&self) { + let result = libc::pthread_mutex_destroy(self.inner.get()); + debug_assert_eq!(result, 0); + } +} diff --git a/library/std/src/sys/unix/net.rs b/library/std/src/sys/unix/net.rs new file mode 100644 index 00000000000..011325fddc5 --- /dev/null +++ b/library/std/src/sys/unix/net.rs @@ -0,0 +1,382 @@ +use crate::cmp; +use crate::ffi::CStr; +use crate::io::{self, IoSlice, IoSliceMut}; +use crate::mem; +use crate::net::{Shutdown, SocketAddr}; +use crate::str; +use crate::sys::fd::FileDesc; +use crate::sys_common::net::{getsockopt, setsockopt, sockaddr_to_addr}; +use crate::sys_common::{AsInner, FromInner, IntoInner}; +use crate::time::{Duration, Instant}; + +use libc::{c_int, c_void, size_t, sockaddr, socklen_t, EAI_SYSTEM, MSG_PEEK}; + +pub use crate::sys::{cvt, cvt_r}; + +#[allow(unused_extern_crates)] +pub extern crate libc as netc; + +pub type wrlen_t = size_t; + +pub struct Socket(FileDesc); + +pub fn init() {} + +pub fn cvt_gai(err: c_int) -> io::Result<()> { + if err == 0 { + return Ok(()); + } + + // We may need to trigger a glibc workaround. See on_resolver_failure() for details. + on_resolver_failure(); + + if err == EAI_SYSTEM { + return Err(io::Error::last_os_error()); + } + + let detail = unsafe { + str::from_utf8(CStr::from_ptr(libc::gai_strerror(err)).to_bytes()).unwrap().to_owned() + }; + Err(io::Error::new( + io::ErrorKind::Other, + &format!("failed to lookup address information: {}", detail)[..], + )) +} + +impl Socket { + pub fn new(addr: &SocketAddr, ty: c_int) -> io::Result<Socket> { + let fam = match *addr { + SocketAddr::V4(..) => libc::AF_INET, + SocketAddr::V6(..) => libc::AF_INET6, + }; + Socket::new_raw(fam, ty) + } + + pub fn new_raw(fam: c_int, ty: c_int) -> io::Result<Socket> { + unsafe { + cfg_if::cfg_if! { + if #[cfg(target_os = "linux")] { + // On Linux we pass the SOCK_CLOEXEC flag to atomically create + // the socket and set it as CLOEXEC, added in 2.6.27. + let fd = cvt(libc::socket(fam, ty | libc::SOCK_CLOEXEC, 0))?; + Ok(Socket(FileDesc::new(fd))) + } else { + let fd = cvt(libc::socket(fam, ty, 0))?; + let fd = FileDesc::new(fd); + fd.set_cloexec()?; + let socket = Socket(fd); + + // macOS and iOS use `SO_NOSIGPIPE` as a `setsockopt` + // flag to disable `SIGPIPE` emission on socket. + #[cfg(target_vendor = "apple")] + setsockopt(&socket, libc::SOL_SOCKET, libc::SO_NOSIGPIPE, 1)?; + + Ok(socket) + } + } + } + } + + pub fn new_pair(fam: c_int, ty: c_int) -> io::Result<(Socket, Socket)> { + unsafe { + let mut fds = [0, 0]; + + cfg_if::cfg_if! { + if #[cfg(target_os = "linux")] { + // Like above, set cloexec atomically + cvt(libc::socketpair(fam, ty | libc::SOCK_CLOEXEC, 0, fds.as_mut_ptr()))?; + Ok((Socket(FileDesc::new(fds[0])), Socket(FileDesc::new(fds[1])))) + } else { + cvt(libc::socketpair(fam, ty, 0, fds.as_mut_ptr()))?; + let a = FileDesc::new(fds[0]); + let b = FileDesc::new(fds[1]); + a.set_cloexec()?; + b.set_cloexec()?; + Ok((Socket(a), Socket(b))) + } + } + } + } + + pub fn connect_timeout(&self, addr: &SocketAddr, timeout: Duration) -> io::Result<()> { + self.set_nonblocking(true)?; + let r = unsafe { + let (addrp, len) = addr.into_inner(); + cvt(libc::connect(self.0.raw(), addrp, len)) + }; + self.set_nonblocking(false)?; + + match r { + Ok(_) => return Ok(()), + // there's no ErrorKind for EINPROGRESS :( + Err(ref e) if e.raw_os_error() == Some(libc::EINPROGRESS) => {} + Err(e) => return Err(e), + } + + let mut pollfd = libc::pollfd { fd: self.0.raw(), events: libc::POLLOUT, revents: 0 }; + + if timeout.as_secs() == 0 && timeout.subsec_nanos() == 0 { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "cannot set a 0 duration timeout", + )); + } + + let start = Instant::now(); + + loop { + let elapsed = start.elapsed(); + if elapsed >= timeout { + return Err(io::Error::new(io::ErrorKind::TimedOut, "connection timed out")); + } + + let timeout = timeout - elapsed; + let mut timeout = timeout + .as_secs() + .saturating_mul(1_000) + .saturating_add(timeout.subsec_nanos() as u64 / 1_000_000); + if timeout == 0 { + timeout = 1; + } + + let timeout = cmp::min(timeout, c_int::MAX as u64) as c_int; + + match unsafe { libc::poll(&mut pollfd, 1, timeout) } { + -1 => { + let err = io::Error::last_os_error(); + if err.kind() != io::ErrorKind::Interrupted { + return Err(err); + } + } + 0 => {} + _ => { + // linux returns POLLOUT|POLLERR|POLLHUP for refused connections (!), so look + // for POLLHUP rather than read readiness + if pollfd.revents & libc::POLLHUP != 0 { + let e = self.take_error()?.unwrap_or_else(|| { + io::Error::new(io::ErrorKind::Other, "no error set after POLLHUP") + }); + return Err(e); + } + + return Ok(()); + } + } + } + } + + pub fn accept(&self, storage: *mut sockaddr, len: *mut socklen_t) -> io::Result<Socket> { + // Unfortunately the only known way right now to accept a socket and + // atomically set the CLOEXEC flag is to use the `accept4` syscall on + // Linux. This was added in 2.6.28, glibc 2.10 and musl 0.9.5. + cfg_if::cfg_if! { + if #[cfg(target_os = "linux")] { + let fd = cvt_r(|| unsafe { + libc::accept4(self.0.raw(), storage, len, libc::SOCK_CLOEXEC) + })?; + Ok(Socket(FileDesc::new(fd))) + } else { + let fd = cvt_r(|| unsafe { libc::accept(self.0.raw(), storage, len) })?; + let fd = FileDesc::new(fd); + fd.set_cloexec()?; + Ok(Socket(fd)) + } + } + } + + pub fn duplicate(&self) -> io::Result<Socket> { + self.0.duplicate().map(Socket) + } + + fn recv_with_flags(&self, buf: &mut [u8], flags: c_int) -> io::Result<usize> { + let ret = cvt(unsafe { + libc::recv(self.0.raw(), buf.as_mut_ptr() as *mut c_void, buf.len(), flags) + })?; + Ok(ret as usize) + } + + pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> { + self.recv_with_flags(buf, 0) + } + + pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> { + self.recv_with_flags(buf, MSG_PEEK) + } + + pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + self.0.read_vectored(bufs) + } + + #[inline] + pub fn is_read_vectored(&self) -> bool { + self.0.is_read_vectored() + } + + fn recv_from_with_flags( + &self, + buf: &mut [u8], + flags: c_int, + ) -> io::Result<(usize, SocketAddr)> { + let mut storage: libc::sockaddr_storage = unsafe { mem::zeroed() }; + let mut addrlen = mem::size_of_val(&storage) as libc::socklen_t; + + let n = cvt(unsafe { + libc::recvfrom( + self.0.raw(), + buf.as_mut_ptr() as *mut c_void, + buf.len(), + flags, + &mut storage as *mut _ as *mut _, + &mut addrlen, + ) + })?; + Ok((n as usize, sockaddr_to_addr(&storage, addrlen as usize)?)) + } + + pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + self.recv_from_with_flags(buf, 0) + } + + pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + self.recv_from_with_flags(buf, MSG_PEEK) + } + + pub fn write(&self, buf: &[u8]) -> io::Result<usize> { + self.0.write(buf) + } + + pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + self.0.write_vectored(bufs) + } + + #[inline] + pub fn is_write_vectored(&self) -> bool { + self.0.is_write_vectored() + } + + pub fn set_timeout(&self, dur: Option<Duration>, kind: libc::c_int) -> io::Result<()> { + let timeout = match dur { + Some(dur) => { + if dur.as_secs() == 0 && dur.subsec_nanos() == 0 { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "cannot set a 0 duration timeout", + )); + } + + let secs = if dur.as_secs() > libc::time_t::MAX as u64 { + libc::time_t::MAX + } else { + dur.as_secs() as libc::time_t + }; + let mut timeout = libc::timeval { + tv_sec: secs, + tv_usec: dur.subsec_micros() as libc::suseconds_t, + }; + if timeout.tv_sec == 0 && timeout.tv_usec == 0 { + timeout.tv_usec = 1; + } + timeout + } + None => libc::timeval { tv_sec: 0, tv_usec: 0 }, + }; + setsockopt(self, libc::SOL_SOCKET, kind, timeout) + } + + pub fn timeout(&self, kind: libc::c_int) -> io::Result<Option<Duration>> { + let raw: libc::timeval = getsockopt(self, libc::SOL_SOCKET, kind)?; + if raw.tv_sec == 0 && raw.tv_usec == 0 { + Ok(None) + } else { + let sec = raw.tv_sec as u64; + let nsec = (raw.tv_usec as u32) * 1000; + Ok(Some(Duration::new(sec, nsec))) + } + } + + pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { + let how = match how { + Shutdown::Write => libc::SHUT_WR, + Shutdown::Read => libc::SHUT_RD, + Shutdown::Both => libc::SHUT_RDWR, + }; + cvt(unsafe { libc::shutdown(self.0.raw(), how) })?; + Ok(()) + } + + pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> { + setsockopt(self, libc::IPPROTO_TCP, libc::TCP_NODELAY, nodelay as c_int) + } + + pub fn nodelay(&self) -> io::Result<bool> { + let raw: c_int = getsockopt(self, libc::IPPROTO_TCP, libc::TCP_NODELAY)?; + Ok(raw != 0) + } + + #[cfg(not(any(target_os = "solaris", target_os = "illumos")))] + pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { + let mut nonblocking = nonblocking as libc::c_int; + cvt(unsafe { libc::ioctl(*self.as_inner(), libc::FIONBIO, &mut nonblocking) }).map(drop) + } + + #[cfg(any(target_os = "solaris", target_os = "illumos"))] + pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { + // FIONBIO is inadequate for sockets on illumos/Solaris, so use the + // fcntl(F_[GS]ETFL)-based method provided by FileDesc instead. + self.0.set_nonblocking(nonblocking) + } + + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + let raw: c_int = getsockopt(self, libc::SOL_SOCKET, libc::SO_ERROR)?; + if raw == 0 { Ok(None) } else { Ok(Some(io::Error::from_raw_os_error(raw as i32))) } + } +} + +impl AsInner<c_int> for Socket { + fn as_inner(&self) -> &c_int { + self.0.as_inner() + } +} + +impl FromInner<c_int> for Socket { + fn from_inner(fd: c_int) -> Socket { + Socket(FileDesc::new(fd)) + } +} + +impl IntoInner<c_int> for Socket { + fn into_inner(self) -> c_int { + self.0.into_raw() + } +} + +// In versions of glibc prior to 2.26, there's a bug where the DNS resolver +// will cache the contents of /etc/resolv.conf, so changes to that file on disk +// can be ignored by a long-running program. That can break DNS lookups on e.g. +// laptops where the network comes and goes. See +// https://sourceware.org/bugzilla/show_bug.cgi?id=984. Note however that some +// distros including Debian have patched glibc to fix this for a long time. +// +// A workaround for this bug is to call the res_init libc function, to clear +// the cached configs. Unfortunately, while we believe glibc's implementation +// of res_init is thread-safe, we know that other implementations are not +// (https://github.com/rust-lang/rust/issues/43592). Code here in libstd could +// try to synchronize its res_init calls with a Mutex, but that wouldn't +// protect programs that call into libc in other ways. So instead of calling +// res_init unconditionally, we call it only when we detect we're linking +// against glibc version < 2.26. (That is, when we both know its needed and +// believe it's thread-safe). +#[cfg(target_env = "gnu")] +fn on_resolver_failure() { + use crate::sys; + + // If the version fails to parse, we treat it the same as "not glibc". + if let Some(version) = sys::os::glibc_version() { + if version < (2, 26) { + unsafe { libc::res_init() }; + } + } +} + +#[cfg(not(target_env = "gnu"))] +fn on_resolver_failure() {} diff --git a/library/std/src/sys/unix/os.rs b/library/std/src/sys/unix/os.rs new file mode 100644 index 00000000000..2fcb5b9c4e6 --- /dev/null +++ b/library/std/src/sys/unix/os.rs @@ -0,0 +1,674 @@ +//! Implementation of `std::os` functionality for unix systems + +#![allow(unused_imports)] // lots of cfg code here + +use crate::os::unix::prelude::*; + +use crate::error::Error as StdError; +use crate::ffi::{CStr, CString, OsStr, OsString}; +use crate::fmt; +use crate::io; +use crate::iter; +use crate::marker::PhantomData; +use crate::mem; +use crate::memchr; +use crate::path::{self, PathBuf}; +use crate::ptr; +use crate::slice; +use crate::str; +use crate::sys::cvt; +use crate::sys::fd; +use crate::sys_common::mutex::{Mutex, MutexGuard}; +use crate::vec; + +use libc::{c_char, c_int, c_void}; + +const TMPBUF_SZ: usize = 128; + +cfg_if::cfg_if! { + if #[cfg(target_os = "redox")] { + const PATH_SEPARATOR: u8 = b';'; + } else { + const PATH_SEPARATOR: u8 = b':'; + } +} + +extern "C" { + #[cfg(not(target_os = "dragonfly"))] + #[cfg_attr( + any( + target_os = "linux", + target_os = "emscripten", + target_os = "fuchsia", + target_os = "l4re" + ), + link_name = "__errno_location" + )] + #[cfg_attr( + any( + target_os = "netbsd", + target_os = "openbsd", + target_os = "android", + target_os = "redox", + target_env = "newlib" + ), + link_name = "__errno" + )] + #[cfg_attr(any(target_os = "solaris", target_os = "illumos"), link_name = "___errno")] + #[cfg_attr( + any(target_os = "macos", target_os = "ios", target_os = "freebsd"), + link_name = "__error" + )] + #[cfg_attr(target_os = "haiku", link_name = "_errnop")] + fn errno_location() -> *mut c_int; +} + +/// Returns the platform-specific value of errno +#[cfg(not(target_os = "dragonfly"))] +pub fn errno() -> i32 { + unsafe { (*errno_location()) as i32 } +} + +/// Sets the platform-specific value of errno +#[cfg(all(not(target_os = "linux"), not(target_os = "dragonfly")))] // needed for readdir and syscall! +#[allow(dead_code)] // but not all target cfgs actually end up using it +pub fn set_errno(e: i32) { + unsafe { *errno_location() = e as c_int } +} + +#[cfg(target_os = "dragonfly")] +pub fn errno() -> i32 { + extern "C" { + #[thread_local] + static errno: c_int; + } + + unsafe { errno as i32 } +} + +#[cfg(target_os = "dragonfly")] +pub fn set_errno(e: i32) { + extern "C" { + #[thread_local] + static mut errno: c_int; + } + + unsafe { + errno = e; + } +} + +/// Gets a detailed string description for the given error number. +pub fn error_string(errno: i32) -> String { + extern "C" { + #[cfg_attr(any(target_os = "linux", target_env = "newlib"), link_name = "__xpg_strerror_r")] + fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: libc::size_t) -> c_int; + } + + let mut buf = [0 as c_char; TMPBUF_SZ]; + + let p = buf.as_mut_ptr(); + unsafe { + if strerror_r(errno as c_int, p, buf.len()) < 0 { + panic!("strerror_r failure"); + } + + let p = p as *const _; + str::from_utf8(CStr::from_ptr(p).to_bytes()).unwrap().to_owned() + } +} + +pub fn getcwd() -> io::Result<PathBuf> { + let mut buf = Vec::with_capacity(512); + loop { + unsafe { + let ptr = buf.as_mut_ptr() as *mut libc::c_char; + if !libc::getcwd(ptr, buf.capacity()).is_null() { + let len = CStr::from_ptr(buf.as_ptr() as *const libc::c_char).to_bytes().len(); + buf.set_len(len); + buf.shrink_to_fit(); + return Ok(PathBuf::from(OsString::from_vec(buf))); + } else { + let error = io::Error::last_os_error(); + if error.raw_os_error() != Some(libc::ERANGE) { + return Err(error); + } + } + + // Trigger the internal buffer resizing logic of `Vec` by requiring + // more space than the current capacity. + let cap = buf.capacity(); + buf.set_len(cap); + buf.reserve(1); + } + } +} + +pub fn chdir(p: &path::Path) -> io::Result<()> { + let p: &OsStr = p.as_ref(); + let p = CString::new(p.as_bytes())?; + unsafe { + match libc::chdir(p.as_ptr()) == (0 as c_int) { + true => Ok(()), + false => Err(io::Error::last_os_error()), + } + } +} + +pub struct SplitPaths<'a> { + iter: iter::Map<slice::Split<'a, u8, fn(&u8) -> bool>, fn(&'a [u8]) -> PathBuf>, +} + +pub fn split_paths(unparsed: &OsStr) -> SplitPaths<'_> { + fn bytes_to_path(b: &[u8]) -> PathBuf { + PathBuf::from(<OsStr as OsStrExt>::from_bytes(b)) + } + fn is_separator(b: &u8) -> bool { + *b == PATH_SEPARATOR + } + let unparsed = unparsed.as_bytes(); + SplitPaths { + iter: unparsed + .split(is_separator as fn(&u8) -> bool) + .map(bytes_to_path as fn(&[u8]) -> PathBuf), + } +} + +impl<'a> Iterator for SplitPaths<'a> { + type Item = PathBuf; + fn next(&mut self) -> Option<PathBuf> { + self.iter.next() + } + fn size_hint(&self) -> (usize, Option<usize>) { + self.iter.size_hint() + } +} + +#[derive(Debug)] +pub struct JoinPathsError; + +pub fn join_paths<I, T>(paths: I) -> Result<OsString, JoinPathsError> +where + I: Iterator<Item = T>, + T: AsRef<OsStr>, +{ + let mut joined = Vec::new(); + + for (i, path) in paths.enumerate() { + let path = path.as_ref().as_bytes(); + if i > 0 { + joined.push(PATH_SEPARATOR) + } + if path.contains(&PATH_SEPARATOR) { + return Err(JoinPathsError); + } + joined.extend_from_slice(path); + } + Ok(OsStringExt::from_vec(joined)) +} + +impl fmt::Display for JoinPathsError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "path segment contains separator `{}`", PATH_SEPARATOR) + } +} + +impl StdError for JoinPathsError { + #[allow(deprecated)] + fn description(&self) -> &str { + "failed to join paths" + } +} + +#[cfg(any(target_os = "freebsd", target_os = "dragonfly"))] +pub fn current_exe() -> io::Result<PathBuf> { + unsafe { + let mut mib = [ + libc::CTL_KERN as c_int, + libc::KERN_PROC as c_int, + libc::KERN_PROC_PATHNAME as c_int, + -1 as c_int, + ]; + let mut sz = 0; + cvt(libc::sysctl( + mib.as_mut_ptr(), + mib.len() as libc::c_uint, + ptr::null_mut(), + &mut sz, + ptr::null_mut(), + 0, + ))?; + if sz == 0 { + return Err(io::Error::last_os_error()); + } + let mut v: Vec<u8> = Vec::with_capacity(sz); + cvt(libc::sysctl( + mib.as_mut_ptr(), + mib.len() as libc::c_uint, + v.as_mut_ptr() as *mut libc::c_void, + &mut sz, + ptr::null_mut(), + 0, + ))?; + if sz == 0 { + return Err(io::Error::last_os_error()); + } + v.set_len(sz - 1); // chop off trailing NUL + Ok(PathBuf::from(OsString::from_vec(v))) + } +} + +#[cfg(target_os = "netbsd")] +pub fn current_exe() -> io::Result<PathBuf> { + fn sysctl() -> io::Result<PathBuf> { + unsafe { + let mib = [libc::CTL_KERN, libc::KERN_PROC_ARGS, -1, libc::KERN_PROC_PATHNAME]; + let mut path_len: usize = 0; + cvt(libc::sysctl( + mib.as_ptr(), + mib.len() as libc::c_uint, + ptr::null_mut(), + &mut path_len, + ptr::null(), + 0, + ))?; + if path_len <= 1 { + return Err(io::Error::new( + io::ErrorKind::Other, + "KERN_PROC_PATHNAME sysctl returned zero-length string", + )); + } + let mut path: Vec<u8> = Vec::with_capacity(path_len); + cvt(libc::sysctl( + mib.as_ptr(), + mib.len() as libc::c_uint, + path.as_ptr() as *mut libc::c_void, + &mut path_len, + ptr::null(), + 0, + ))?; + path.set_len(path_len - 1); // chop off NUL + Ok(PathBuf::from(OsString::from_vec(path))) + } + } + fn procfs() -> io::Result<PathBuf> { + let curproc_exe = path::Path::new("/proc/curproc/exe"); + if curproc_exe.is_file() { + return crate::fs::read_link(curproc_exe); + } + Err(io::Error::new( + io::ErrorKind::Other, + "/proc/curproc/exe doesn't point to regular file.", + )) + } + sysctl().or_else(|_| procfs()) +} + +#[cfg(target_os = "openbsd")] +pub fn current_exe() -> io::Result<PathBuf> { + unsafe { + let mut mib = [libc::CTL_KERN, libc::KERN_PROC_ARGS, libc::getpid(), libc::KERN_PROC_ARGV]; + let mib = mib.as_mut_ptr(); + let mut argv_len = 0; + cvt(libc::sysctl(mib, 4, ptr::null_mut(), &mut argv_len, ptr::null_mut(), 0))?; + let mut argv = Vec::<*const libc::c_char>::with_capacity(argv_len as usize); + cvt(libc::sysctl(mib, 4, argv.as_mut_ptr() as *mut _, &mut argv_len, ptr::null_mut(), 0))?; + argv.set_len(argv_len as usize); + if argv[0].is_null() { + return Err(io::Error::new(io::ErrorKind::Other, "no current exe available")); + } + let argv0 = CStr::from_ptr(argv[0]).to_bytes(); + if argv0[0] == b'.' || argv0.iter().any(|b| *b == b'/') { + crate::fs::canonicalize(OsStr::from_bytes(argv0)) + } else { + Ok(PathBuf::from(OsStr::from_bytes(argv0))) + } + } +} + +#[cfg(any(target_os = "linux", target_os = "android", target_os = "emscripten"))] +pub fn current_exe() -> io::Result<PathBuf> { + match crate::fs::read_link("/proc/self/exe") { + Err(ref e) if e.kind() == io::ErrorKind::NotFound => Err(io::Error::new( + io::ErrorKind::Other, + "no /proc/self/exe available. Is /proc mounted?", + )), + other => other, + } +} + +#[cfg(any(target_os = "macos", target_os = "ios"))] +pub fn current_exe() -> io::Result<PathBuf> { + extern "C" { + fn _NSGetExecutablePath(buf: *mut libc::c_char, bufsize: *mut u32) -> libc::c_int; + } + unsafe { + let mut sz: u32 = 0; + _NSGetExecutablePath(ptr::null_mut(), &mut sz); + if sz == 0 { + return Err(io::Error::last_os_error()); + } + let mut v: Vec<u8> = Vec::with_capacity(sz as usize); + let err = _NSGetExecutablePath(v.as_mut_ptr() as *mut i8, &mut sz); + if err != 0 { + return Err(io::Error::last_os_error()); + } + v.set_len(sz as usize - 1); // chop off trailing NUL + Ok(PathBuf::from(OsString::from_vec(v))) + } +} + +#[cfg(any(target_os = "solaris", target_os = "illumos"))] +pub fn current_exe() -> io::Result<PathBuf> { + extern "C" { + fn getexecname() -> *const c_char; + } + unsafe { + let path = getexecname(); + if path.is_null() { + Err(io::Error::last_os_error()) + } else { + let filename = CStr::from_ptr(path).to_bytes(); + let path = PathBuf::from(<OsStr as OsStrExt>::from_bytes(filename)); + + // Prepend a current working directory to the path if + // it doesn't contain an absolute pathname. + if filename[0] == b'/' { Ok(path) } else { getcwd().map(|cwd| cwd.join(path)) } + } + } +} + +#[cfg(target_os = "haiku")] +pub fn current_exe() -> io::Result<PathBuf> { + // Use Haiku's image info functions + #[repr(C)] + struct image_info { + id: i32, + type_: i32, + sequence: i32, + init_order: i32, + init_routine: *mut libc::c_void, // function pointer + term_routine: *mut libc::c_void, // function pointer + device: libc::dev_t, + node: libc::ino_t, + name: [libc::c_char; 1024], // MAXPATHLEN + text: *mut libc::c_void, + data: *mut libc::c_void, + text_size: i32, + data_size: i32, + api_version: i32, + abi: i32, + } + + unsafe { + extern "C" { + fn _get_next_image_info( + team_id: i32, + cookie: *mut i32, + info: *mut image_info, + size: i32, + ) -> i32; + } + + let mut info: image_info = mem::zeroed(); + let mut cookie: i32 = 0; + // the executable can be found at team id 0 + let result = + _get_next_image_info(0, &mut cookie, &mut info, mem::size_of::<image_info>() as i32); + if result != 0 { + use crate::io::ErrorKind; + Err(io::Error::new(ErrorKind::Other, "Error getting executable path")) + } else { + let name = CStr::from_ptr(info.name.as_ptr()).to_bytes(); + Ok(PathBuf::from(OsStr::from_bytes(name))) + } + } +} + +#[cfg(target_os = "redox")] +pub fn current_exe() -> io::Result<PathBuf> { + crate::fs::read_to_string("sys:exe").map(PathBuf::from) +} + +#[cfg(any(target_os = "fuchsia", target_os = "l4re"))] +pub fn current_exe() -> io::Result<PathBuf> { + use crate::io::ErrorKind; + Err(io::Error::new(ErrorKind::Other, "Not yet implemented!")) +} + +pub struct Env { + iter: vec::IntoIter<(OsString, OsString)>, + _dont_send_or_sync_me: PhantomData<*mut ()>, +} + +impl Iterator for Env { + type Item = (OsString, OsString); + fn next(&mut self) -> Option<(OsString, OsString)> { + self.iter.next() + } + fn size_hint(&self) -> (usize, Option<usize>) { + self.iter.size_hint() + } +} + +#[cfg(target_os = "macos")] +pub unsafe fn environ() -> *mut *const *const c_char { + extern "C" { + fn _NSGetEnviron() -> *mut *const *const c_char; + } + _NSGetEnviron() +} + +#[cfg(not(target_os = "macos"))] +pub unsafe fn environ() -> *mut *const *const c_char { + extern "C" { + static mut environ: *const *const c_char; + } + &mut environ +} + +pub unsafe fn env_lock() -> MutexGuard<'static> { + // We never call `ENV_LOCK.init()`, so it is UB to attempt to + // acquire this mutex reentrantly! + static ENV_LOCK: Mutex = Mutex::new(); + ENV_LOCK.lock() +} + +/// Returns a vector of (variable, value) byte-vector pairs for all the +/// environment variables of the current process. +pub fn env() -> Env { + unsafe { + let _guard = env_lock(); + let mut environ = *environ(); + let mut result = Vec::new(); + if !environ.is_null() { + while !(*environ).is_null() { + if let Some(key_value) = parse(CStr::from_ptr(*environ).to_bytes()) { + result.push(key_value); + } + environ = environ.add(1); + } + } + return Env { iter: result.into_iter(), _dont_send_or_sync_me: PhantomData }; + } + + fn parse(input: &[u8]) -> Option<(OsString, OsString)> { + // Strategy (copied from glibc): Variable name and value are separated + // by an ASCII equals sign '='. Since a variable name must not be + // empty, allow variable names starting with an equals sign. Skip all + // malformed lines. + if input.is_empty() { + return None; + } + let pos = memchr::memchr(b'=', &input[1..]).map(|p| p + 1); + pos.map(|p| { + ( + OsStringExt::from_vec(input[..p].to_vec()), + OsStringExt::from_vec(input[p + 1..].to_vec()), + ) + }) + } +} + +pub fn getenv(k: &OsStr) -> io::Result<Option<OsString>> { + // environment variables with a nul byte can't be set, so their value is + // always None as well + let k = CString::new(k.as_bytes())?; + unsafe { + let _guard = env_lock(); + let s = libc::getenv(k.as_ptr()) as *const libc::c_char; + let ret = if s.is_null() { + None + } else { + Some(OsStringExt::from_vec(CStr::from_ptr(s).to_bytes().to_vec())) + }; + Ok(ret) + } +} + +pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> { + let k = CString::new(k.as_bytes())?; + let v = CString::new(v.as_bytes())?; + + unsafe { + let _guard = env_lock(); + cvt(libc::setenv(k.as_ptr(), v.as_ptr(), 1)).map(drop) + } +} + +pub fn unsetenv(n: &OsStr) -> io::Result<()> { + let nbuf = CString::new(n.as_bytes())?; + + unsafe { + let _guard = env_lock(); + cvt(libc::unsetenv(nbuf.as_ptr())).map(drop) + } +} + +pub fn page_size() -> usize { + unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize } +} + +pub fn temp_dir() -> PathBuf { + crate::env::var_os("TMPDIR").map(PathBuf::from).unwrap_or_else(|| { + if cfg!(target_os = "android") { + PathBuf::from("/data/local/tmp") + } else { + PathBuf::from("/tmp") + } + }) +} + +pub fn home_dir() -> Option<PathBuf> { + return crate::env::var_os("HOME").or_else(|| unsafe { fallback() }).map(PathBuf::from); + + #[cfg(any( + target_os = "android", + target_os = "ios", + target_os = "emscripten", + target_os = "redox" + ))] + unsafe fn fallback() -> Option<OsString> { + None + } + #[cfg(not(any( + target_os = "android", + target_os = "ios", + target_os = "emscripten", + target_os = "redox" + )))] + unsafe fn fallback() -> Option<OsString> { + let amt = match libc::sysconf(libc::_SC_GETPW_R_SIZE_MAX) { + n if n < 0 => 512 as usize, + n => n as usize, + }; + let mut buf = Vec::with_capacity(amt); + let mut passwd: libc::passwd = mem::zeroed(); + let mut result = ptr::null_mut(); + match libc::getpwuid_r( + libc::getuid(), + &mut passwd, + buf.as_mut_ptr(), + buf.capacity(), + &mut result, + ) { + 0 if !result.is_null() => { + let ptr = passwd.pw_dir as *const _; + let bytes = CStr::from_ptr(ptr).to_bytes().to_vec(); + Some(OsStringExt::from_vec(bytes)) + } + _ => None, + } + } +} + +pub fn exit(code: i32) -> ! { + unsafe { libc::exit(code as c_int) } +} + +pub fn getpid() -> u32 { + unsafe { libc::getpid() as u32 } +} + +pub fn getppid() -> u32 { + unsafe { libc::getppid() as u32 } +} + +#[cfg(target_env = "gnu")] +pub fn glibc_version() -> Option<(usize, usize)> { + if let Some(Ok(version_str)) = glibc_version_cstr().map(CStr::to_str) { + parse_glibc_version(version_str) + } else { + None + } +} + +#[cfg(target_env = "gnu")] +fn glibc_version_cstr() -> Option<&'static CStr> { + weak! { + fn gnu_get_libc_version() -> *const libc::c_char + } + if let Some(f) = gnu_get_libc_version.get() { + unsafe { Some(CStr::from_ptr(f())) } + } else { + None + } +} + +// Returns Some((major, minor)) if the string is a valid "x.y" version, +// ignoring any extra dot-separated parts. Otherwise return None. +#[cfg(target_env = "gnu")] +fn parse_glibc_version(version: &str) -> Option<(usize, usize)> { + let mut parsed_ints = version.split('.').map(str::parse::<usize>).fuse(); + match (parsed_ints.next(), parsed_ints.next()) { + (Some(Ok(major)), Some(Ok(minor))) => Some((major, minor)), + _ => None, + } +} + +#[cfg(all(test, target_env = "gnu"))] +mod test { + use super::*; + + #[test] + fn test_glibc_version() { + // This mostly just tests that the weak linkage doesn't panic wildly... + glibc_version(); + } + + #[test] + fn test_parse_glibc_version() { + let cases = [ + ("0.0", Some((0, 0))), + ("01.+2", Some((1, 2))), + ("3.4.5.six", Some((3, 4))), + ("1", None), + ("1.-2", None), + ("1.foo", None), + ("foo.1", None), + ]; + for &(version_str, parsed) in cases.iter() { + assert_eq!(parsed, parse_glibc_version(version_str)); + } + } +} diff --git a/library/std/src/sys/unix/path.rs b/library/std/src/sys/unix/path.rs new file mode 100644 index 00000000000..840a7ae0426 --- /dev/null +++ b/library/std/src/sys/unix/path.rs @@ -0,0 +1,19 @@ +use crate::ffi::OsStr; +use crate::path::Prefix; + +#[inline] +pub fn is_sep_byte(b: u8) -> bool { + b == b'/' +} + +#[inline] +pub fn is_verbatim_sep(b: u8) -> bool { + b == b'/' +} + +pub fn parse_prefix(_: &OsStr) -> Option<Prefix<'_>> { + None +} + +pub const MAIN_SEP_STR: &str = "/"; +pub const MAIN_SEP: char = '/'; diff --git a/library/std/src/sys/unix/pipe.rs b/library/std/src/sys/unix/pipe.rs new file mode 100644 index 00000000000..7ae37bdda70 --- /dev/null +++ b/library/std/src/sys/unix/pipe.rs @@ -0,0 +1,122 @@ +use crate::io::{self, IoSlice, IoSliceMut}; +use crate::mem; +use crate::sys::fd::FileDesc; +use crate::sys::{cvt, cvt_r}; + +//////////////////////////////////////////////////////////////////////////////// +// Anonymous pipes +//////////////////////////////////////////////////////////////////////////////// + +pub struct AnonPipe(FileDesc); + +pub fn anon_pipe() -> io::Result<(AnonPipe, AnonPipe)> { + let mut fds = [0; 2]; + + // The only known way right now to create atomically set the CLOEXEC flag is + // to use the `pipe2` syscall. This was added to Linux in 2.6.27, glibc 2.9 + // and musl 0.9.3, and some other targets also have it. + cfg_if::cfg_if! { + if #[cfg(any( + target_os = "dragonfly", + target_os = "freebsd", + target_os = "linux", + target_os = "netbsd", + target_os = "openbsd", + target_os = "redox" + ))] { + cvt(unsafe { libc::pipe2(fds.as_mut_ptr(), libc::O_CLOEXEC) })?; + Ok((AnonPipe(FileDesc::new(fds[0])), AnonPipe(FileDesc::new(fds[1])))) + } else { + cvt(unsafe { libc::pipe(fds.as_mut_ptr()) })?; + + let fd0 = FileDesc::new(fds[0]); + let fd1 = FileDesc::new(fds[1]); + fd0.set_cloexec()?; + fd1.set_cloexec()?; + Ok((AnonPipe(fd0), AnonPipe(fd1))) + } + } +} + +impl AnonPipe { + pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> { + self.0.read(buf) + } + + pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + self.0.read_vectored(bufs) + } + + #[inline] + pub fn is_read_vectored(&self) -> bool { + self.0.is_read_vectored() + } + + pub fn write(&self, buf: &[u8]) -> io::Result<usize> { + self.0.write(buf) + } + + pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + self.0.write_vectored(bufs) + } + + #[inline] + pub fn is_write_vectored(&self) -> bool { + self.0.is_write_vectored() + } + + pub fn fd(&self) -> &FileDesc { + &self.0 + } + pub fn into_fd(self) -> FileDesc { + self.0 + } +} + +pub fn read2(p1: AnonPipe, v1: &mut Vec<u8>, p2: AnonPipe, v2: &mut Vec<u8>) -> io::Result<()> { + // Set both pipes into nonblocking mode as we're gonna be reading from both + // in the `select` loop below, and we wouldn't want one to block the other! + let p1 = p1.into_fd(); + let p2 = p2.into_fd(); + p1.set_nonblocking(true)?; + p2.set_nonblocking(true)?; + + let mut fds: [libc::pollfd; 2] = unsafe { mem::zeroed() }; + fds[0].fd = p1.raw(); + fds[0].events = libc::POLLIN; + fds[1].fd = p2.raw(); + fds[1].events = libc::POLLIN; + loop { + // wait for either pipe to become readable using `poll` + cvt_r(|| unsafe { libc::poll(fds.as_mut_ptr(), 2, -1) })?; + + if fds[0].revents != 0 && read(&p1, v1)? { + p2.set_nonblocking(false)?; + return p2.read_to_end(v2).map(drop); + } + if fds[1].revents != 0 && read(&p2, v2)? { + p1.set_nonblocking(false)?; + return p1.read_to_end(v1).map(drop); + } + } + + // Read as much as we can from each pipe, ignoring EWOULDBLOCK or + // EAGAIN. If we hit EOF, then this will happen because the underlying + // reader will return Ok(0), in which case we'll see `Ok` ourselves. In + // this case we flip the other fd back into blocking mode and read + // whatever's leftover on that file descriptor. + fn read(fd: &FileDesc, dst: &mut Vec<u8>) -> Result<bool, io::Error> { + match fd.read_to_end(dst) { + Ok(_) => Ok(true), + Err(e) => { + if e.raw_os_error() == Some(libc::EWOULDBLOCK) + || e.raw_os_error() == Some(libc::EAGAIN) + { + Ok(false) + } else { + Err(e) + } + } + } + } +} diff --git a/library/std/src/sys/unix/process/mod.rs b/library/std/src/sys/unix/process/mod.rs new file mode 100644 index 00000000000..553e980f08e --- /dev/null +++ b/library/std/src/sys/unix/process/mod.rs @@ -0,0 +1,13 @@ +pub use self::process_common::{Command, ExitCode, Stdio, StdioPipes}; +pub use self::process_inner::{ExitStatus, Process}; +pub use crate::ffi::OsString as EnvKey; + +mod process_common; +#[cfg(not(target_os = "fuchsia"))] +#[path = "process_unix.rs"] +mod process_inner; +#[cfg(target_os = "fuchsia")] +#[path = "process_fuchsia.rs"] +mod process_inner; +#[cfg(target_os = "fuchsia")] +mod zircon; diff --git a/library/std/src/sys/unix/process/process_common.rs b/library/std/src/sys/unix/process/process_common.rs new file mode 100644 index 00000000000..6e33cdd3c48 --- /dev/null +++ b/library/std/src/sys/unix/process/process_common.rs @@ -0,0 +1,469 @@ +use crate::os::unix::prelude::*; + +use crate::collections::BTreeMap; +use crate::ffi::{CStr, CString, OsStr, OsString}; +use crate::fmt; +use crate::io; +use crate::ptr; +use crate::sys::fd::FileDesc; +use crate::sys::fs::File; +use crate::sys::pipe::{self, AnonPipe}; +use crate::sys_common::process::CommandEnv; + +#[cfg(not(target_os = "fuchsia"))] +use crate::sys::fs::OpenOptions; + +use libc::{c_char, c_int, gid_t, uid_t, EXIT_FAILURE, EXIT_SUCCESS}; + +cfg_if::cfg_if! { + if #[cfg(target_os = "fuchsia")] { + // fuchsia doesn't have /dev/null + } else if #[cfg(target_os = "redox")] { + const DEV_NULL: &str = "null:\0"; + } else { + const DEV_NULL: &str = "/dev/null\0"; + } +} + +// Android with api less than 21 define sig* functions inline, so it is not +// available for dynamic link. Implementing sigemptyset and sigaddset allow us +// to support older Android version (independent of libc version). +// The following implementations are based on https://git.io/vSkNf +cfg_if::cfg_if! { + if #[cfg(target_os = "android")] { + pub unsafe fn sigemptyset(set: *mut libc::sigset_t) -> libc::c_int { + set.write_bytes(0u8, 1); + return 0; + } + #[allow(dead_code)] + pub unsafe fn sigaddset(set: *mut libc::sigset_t, signum: libc::c_int) -> libc::c_int { + use crate::{slice, mem}; + + let raw = slice::from_raw_parts_mut(set as *mut u8, mem::size_of::<libc::sigset_t>()); + let bit = (signum - 1) as usize; + raw[bit / 8] |= 1 << (bit % 8); + return 0; + } + } else { + pub use libc::{sigemptyset, sigaddset}; + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Command +//////////////////////////////////////////////////////////////////////////////// + +pub struct Command { + // Currently we try hard to ensure that the call to `.exec()` doesn't + // actually allocate any memory. While many platforms try to ensure that + // memory allocation works after a fork in a multithreaded process, it's + // been observed to be buggy and somewhat unreliable, so we do our best to + // just not do it at all! + // + // Along those lines, the `argv` and `envp` raw pointers here are exactly + // what's gonna get passed to `execvp`. The `argv` array starts with the + // `program` and ends with a NULL, and the `envp` pointer, if present, is + // also null-terminated. + // + // Right now we don't support removing arguments, so there's no much fancy + // support there, but we support adding and removing environment variables, + // so a side table is used to track where in the `envp` array each key is + // located. Whenever we add a key we update it in place if it's already + // present, and whenever we remove a key we update the locations of all + // other keys. + program: CString, + args: Vec<CString>, + argv: Argv, + env: CommandEnv, + + cwd: Option<CString>, + uid: Option<uid_t>, + gid: Option<gid_t>, + saw_nul: bool, + closures: Vec<Box<dyn FnMut() -> io::Result<()> + Send + Sync>>, + stdin: Option<Stdio>, + stdout: Option<Stdio>, + stderr: Option<Stdio>, +} + +// Create a new type for argv, so that we can make it `Send` and `Sync` +struct Argv(Vec<*const c_char>); + +// It is safe to make `Argv` `Send` and `Sync`, because it contains +// pointers to memory owned by `Command.args` +unsafe impl Send for Argv {} +unsafe impl Sync for Argv {} + +// passed back to std::process with the pipes connected to the child, if any +// were requested +pub struct StdioPipes { + pub stdin: Option<AnonPipe>, + pub stdout: Option<AnonPipe>, + pub stderr: Option<AnonPipe>, +} + +// passed to do_exec() with configuration of what the child stdio should look +// like +pub struct ChildPipes { + pub stdin: ChildStdio, + pub stdout: ChildStdio, + pub stderr: ChildStdio, +} + +pub enum ChildStdio { + Inherit, + Explicit(c_int), + Owned(FileDesc), + + // On Fuchsia, null stdio is the default, so we simply don't specify + // any actions at the time of spawning. + #[cfg(target_os = "fuchsia")] + Null, +} + +pub enum Stdio { + Inherit, + Null, + MakePipe, + Fd(FileDesc), +} + +impl Command { + pub fn new(program: &OsStr) -> Command { + let mut saw_nul = false; + let program = os2c(program, &mut saw_nul); + Command { + argv: Argv(vec![program.as_ptr(), ptr::null()]), + args: vec![program.clone()], + program, + env: Default::default(), + cwd: None, + uid: None, + gid: None, + saw_nul, + closures: Vec::new(), + stdin: None, + stdout: None, + stderr: None, + } + } + + pub fn set_arg_0(&mut self, arg: &OsStr) { + // Set a new arg0 + let arg = os2c(arg, &mut self.saw_nul); + debug_assert!(self.argv.0.len() > 1); + self.argv.0[0] = arg.as_ptr(); + self.args[0] = arg; + } + + pub fn arg(&mut self, arg: &OsStr) { + // Overwrite the trailing NULL pointer in `argv` and then add a new null + // pointer. + let arg = os2c(arg, &mut self.saw_nul); + self.argv.0[self.args.len()] = arg.as_ptr(); + self.argv.0.push(ptr::null()); + + // Also make sure we keep track of the owned value to schedule a + // destructor for this memory. + self.args.push(arg); + } + + pub fn cwd(&mut self, dir: &OsStr) { + self.cwd = Some(os2c(dir, &mut self.saw_nul)); + } + pub fn uid(&mut self, id: uid_t) { + self.uid = Some(id); + } + pub fn gid(&mut self, id: gid_t) { + self.gid = Some(id); + } + + pub fn saw_nul(&self) -> bool { + self.saw_nul + } + pub fn get_argv(&self) -> &Vec<*const c_char> { + &self.argv.0 + } + + pub fn get_program(&self) -> &CStr { + &*self.program + } + + #[allow(dead_code)] + pub fn get_cwd(&self) -> &Option<CString> { + &self.cwd + } + #[allow(dead_code)] + pub fn get_uid(&self) -> Option<uid_t> { + self.uid + } + #[allow(dead_code)] + pub fn get_gid(&self) -> Option<gid_t> { + self.gid + } + + pub fn get_closures(&mut self) -> &mut Vec<Box<dyn FnMut() -> io::Result<()> + Send + Sync>> { + &mut self.closures + } + + pub unsafe fn pre_exec(&mut self, f: Box<dyn FnMut() -> io::Result<()> + Send + Sync>) { + self.closures.push(f); + } + + pub fn stdin(&mut self, stdin: Stdio) { + self.stdin = Some(stdin); + } + + pub fn stdout(&mut self, stdout: Stdio) { + self.stdout = Some(stdout); + } + + pub fn stderr(&mut self, stderr: Stdio) { + self.stderr = Some(stderr); + } + + pub fn env_mut(&mut self) -> &mut CommandEnv { + &mut self.env + } + + pub fn capture_env(&mut self) -> Option<CStringArray> { + let maybe_env = self.env.capture_if_changed(); + maybe_env.map(|env| construct_envp(env, &mut self.saw_nul)) + } + #[allow(dead_code)] + pub fn env_saw_path(&self) -> bool { + self.env.have_changed_path() + } + + pub fn setup_io( + &self, + default: Stdio, + needs_stdin: bool, + ) -> io::Result<(StdioPipes, ChildPipes)> { + let null = Stdio::Null; + let default_stdin = if needs_stdin { &default } else { &null }; + let stdin = self.stdin.as_ref().unwrap_or(default_stdin); + let stdout = self.stdout.as_ref().unwrap_or(&default); + let stderr = self.stderr.as_ref().unwrap_or(&default); + let (their_stdin, our_stdin) = stdin.to_child_stdio(true)?; + let (their_stdout, our_stdout) = stdout.to_child_stdio(false)?; + let (their_stderr, our_stderr) = stderr.to_child_stdio(false)?; + let ours = StdioPipes { stdin: our_stdin, stdout: our_stdout, stderr: our_stderr }; + let theirs = ChildPipes { stdin: their_stdin, stdout: their_stdout, stderr: their_stderr }; + Ok((ours, theirs)) + } +} + +fn os2c(s: &OsStr, saw_nul: &mut bool) -> CString { + CString::new(s.as_bytes()).unwrap_or_else(|_e| { + *saw_nul = true; + CString::new("<string-with-nul>").unwrap() + }) +} + +// Helper type to manage ownership of the strings within a C-style array. +pub struct CStringArray { + items: Vec<CString>, + ptrs: Vec<*const c_char>, +} + +impl CStringArray { + pub fn with_capacity(capacity: usize) -> Self { + let mut result = CStringArray { + items: Vec::with_capacity(capacity), + ptrs: Vec::with_capacity(capacity + 1), + }; + result.ptrs.push(ptr::null()); + result + } + pub fn push(&mut self, item: CString) { + let l = self.ptrs.len(); + self.ptrs[l - 1] = item.as_ptr(); + self.ptrs.push(ptr::null()); + self.items.push(item); + } + pub fn as_ptr(&self) -> *const *const c_char { + self.ptrs.as_ptr() + } +} + +fn construct_envp(env: BTreeMap<OsString, OsString>, saw_nul: &mut bool) -> CStringArray { + let mut result = CStringArray::with_capacity(env.len()); + for (mut k, v) in env { + // Reserve additional space for '=' and null terminator + k.reserve_exact(v.len() + 2); + k.push("="); + k.push(&v); + + // Add the new entry into the array + if let Ok(item) = CString::new(k.into_vec()) { + result.push(item); + } else { + *saw_nul = true; + } + } + + result +} + +impl Stdio { + pub fn to_child_stdio(&self, readable: bool) -> io::Result<(ChildStdio, Option<AnonPipe>)> { + match *self { + Stdio::Inherit => Ok((ChildStdio::Inherit, None)), + + // Make sure that the source descriptors are not an stdio + // descriptor, otherwise the order which we set the child's + // descriptors may blow away a descriptor which we are hoping to + // save. For example, suppose we want the child's stderr to be the + // parent's stdout, and the child's stdout to be the parent's + // stderr. No matter which we dup first, the second will get + // overwritten prematurely. + Stdio::Fd(ref fd) => { + if fd.raw() >= 0 && fd.raw() <= libc::STDERR_FILENO { + Ok((ChildStdio::Owned(fd.duplicate()?), None)) + } else { + Ok((ChildStdio::Explicit(fd.raw()), None)) + } + } + + Stdio::MakePipe => { + let (reader, writer) = pipe::anon_pipe()?; + let (ours, theirs) = if readable { (writer, reader) } else { (reader, writer) }; + Ok((ChildStdio::Owned(theirs.into_fd()), Some(ours))) + } + + #[cfg(not(target_os = "fuchsia"))] + Stdio::Null => { + let mut opts = OpenOptions::new(); + opts.read(readable); + opts.write(!readable); + let path = unsafe { CStr::from_ptr(DEV_NULL.as_ptr() as *const _) }; + let fd = File::open_c(&path, &opts)?; + Ok((ChildStdio::Owned(fd.into_fd()), None)) + } + + #[cfg(target_os = "fuchsia")] + Stdio::Null => Ok((ChildStdio::Null, None)), + } + } +} + +impl From<AnonPipe> for Stdio { + fn from(pipe: AnonPipe) -> Stdio { + Stdio::Fd(pipe.into_fd()) + } +} + +impl From<File> for Stdio { + fn from(file: File) -> Stdio { + Stdio::Fd(file.into_fd()) + } +} + +impl ChildStdio { + pub fn fd(&self) -> Option<c_int> { + match *self { + ChildStdio::Inherit => None, + ChildStdio::Explicit(fd) => Some(fd), + ChildStdio::Owned(ref fd) => Some(fd.raw()), + + #[cfg(target_os = "fuchsia")] + ChildStdio::Null => None, + } + } +} + +impl fmt::Debug for Command { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.program != self.args[0] { + write!(f, "[{:?}] ", self.program)?; + } + write!(f, "{:?}", self.args[0])?; + + for arg in &self.args[1..] { + write!(f, " {:?}", arg)?; + } + Ok(()) + } +} + +#[derive(PartialEq, Eq, Clone, Copy, Debug)] +pub struct ExitCode(u8); + +impl ExitCode { + pub const SUCCESS: ExitCode = ExitCode(EXIT_SUCCESS as _); + pub const FAILURE: ExitCode = ExitCode(EXIT_FAILURE as _); + + #[inline] + pub fn as_i32(&self) -> i32 { + self.0 as i32 + } +} + +#[cfg(all(test, not(target_os = "emscripten")))] +mod tests { + use super::*; + + use crate::ffi::OsStr; + use crate::mem; + use crate::ptr; + use crate::sys::cvt; + + macro_rules! t { + ($e:expr) => { + match $e { + Ok(t) => t, + Err(e) => panic!("received error for `{}`: {}", stringify!($e), e), + } + }; + } + + // See #14232 for more information, but it appears that signal delivery to a + // newly spawned process may just be raced in the macOS, so to prevent this + // test from being flaky we ignore it on macOS. + #[test] + #[cfg_attr(target_os = "macos", ignore)] + // When run under our current QEMU emulation test suite this test fails, + // although the reason isn't very clear as to why. For now this test is + // ignored there. + #[cfg_attr(target_arch = "arm", ignore)] + #[cfg_attr(target_arch = "aarch64", ignore)] + #[cfg_attr(target_arch = "riscv64", ignore)] + fn test_process_mask() { + unsafe { + // Test to make sure that a signal mask does not get inherited. + let mut cmd = Command::new(OsStr::new("cat")); + + let mut set = mem::MaybeUninit::<libc::sigset_t>::uninit(); + let mut old_set = mem::MaybeUninit::<libc::sigset_t>::uninit(); + t!(cvt(sigemptyset(set.as_mut_ptr()))); + t!(cvt(sigaddset(set.as_mut_ptr(), libc::SIGINT))); + t!(cvt(libc::pthread_sigmask(libc::SIG_SETMASK, set.as_ptr(), old_set.as_mut_ptr()))); + + cmd.stdin(Stdio::MakePipe); + cmd.stdout(Stdio::MakePipe); + + let (mut cat, mut pipes) = t!(cmd.spawn(Stdio::Null, true)); + let stdin_write = pipes.stdin.take().unwrap(); + let stdout_read = pipes.stdout.take().unwrap(); + + t!(cvt(libc::pthread_sigmask(libc::SIG_SETMASK, old_set.as_ptr(), ptr::null_mut()))); + + t!(cvt(libc::kill(cat.id() as libc::pid_t, libc::SIGINT))); + // We need to wait until SIGINT is definitely delivered. The + // easiest way is to write something to cat, and try to read it + // back: if SIGINT is unmasked, it'll get delivered when cat is + // next scheduled. + let _ = stdin_write.write(b"Hello"); + drop(stdin_write); + + // Either EOF or failure (EPIPE) is okay. + let mut buf = [0; 5]; + if let Ok(ret) = stdout_read.read(&mut buf) { + assert_eq!(ret, 0); + } + + t!(cat.wait()); + } + } +} diff --git a/library/std/src/sys/unix/process/process_fuchsia.rs b/library/std/src/sys/unix/process/process_fuchsia.rs new file mode 100644 index 00000000000..6daf2885bae --- /dev/null +++ b/library/std/src/sys/unix/process/process_fuchsia.rs @@ -0,0 +1,260 @@ +use crate::convert::TryInto; +use crate::fmt; +use crate::io; +use crate::mem; +use crate::ptr; + +use crate::sys::process::process_common::*; +use crate::sys::process::zircon::{zx_handle_t, Handle}; + +use libc::{c_int, size_t}; + +//////////////////////////////////////////////////////////////////////////////// +// Command +//////////////////////////////////////////////////////////////////////////////// + +impl Command { + pub fn spawn( + &mut self, + default: Stdio, + needs_stdin: bool, + ) -> io::Result<(Process, StdioPipes)> { + let envp = self.capture_env(); + + if self.saw_nul() { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "nul byte found in provided data", + )); + } + + let (ours, theirs) = self.setup_io(default, needs_stdin)?; + + let process_handle = unsafe { self.do_exec(theirs, envp.as_ref())? }; + + Ok((Process { handle: Handle::new(process_handle) }, ours)) + } + + pub fn exec(&mut self, default: Stdio) -> io::Error { + if self.saw_nul() { + return io::Error::new(io::ErrorKind::InvalidInput, "nul byte found in provided data"); + } + + match self.setup_io(default, true) { + Ok((_, _)) => { + // FIXME: This is tough because we don't support the exec syscalls + unimplemented!(); + } + Err(e) => e, + } + } + + unsafe fn do_exec( + &mut self, + stdio: ChildPipes, + maybe_envp: Option<&CStringArray>, + ) -> io::Result<zx_handle_t> { + use crate::sys::process::zircon::*; + + let envp = match maybe_envp { + // None means to clone the current environment, which is done in the + // flags below. + None => ptr::null(), + Some(envp) => envp.as_ptr(), + }; + + let make_action = |local_io: &ChildStdio, target_fd| -> io::Result<fdio_spawn_action_t> { + if let Some(local_fd) = local_io.fd() { + Ok(fdio_spawn_action_t { + action: FDIO_SPAWN_ACTION_TRANSFER_FD, + local_fd, + target_fd, + ..Default::default() + }) + } else { + if let ChildStdio::Null = local_io { + // acts as no-op + return Ok(Default::default()); + } + + let mut handle = ZX_HANDLE_INVALID; + let status = fdio_fd_clone(target_fd, &mut handle); + if status == ERR_INVALID_ARGS || status == ERR_NOT_SUPPORTED { + // This descriptor is closed; skip it rather than generating an + // error. + return Ok(Default::default()); + } + zx_cvt(status)?; + + let mut cloned_fd = 0; + zx_cvt(fdio_fd_create(handle, &mut cloned_fd))?; + + Ok(fdio_spawn_action_t { + action: FDIO_SPAWN_ACTION_TRANSFER_FD, + local_fd: cloned_fd as i32, + target_fd, + ..Default::default() + }) + } + }; + + // Clone stdin, stdout, and stderr + let action1 = make_action(&stdio.stdin, 0)?; + let action2 = make_action(&stdio.stdout, 1)?; + let action3 = make_action(&stdio.stderr, 2)?; + let actions = [action1, action2, action3]; + + // We don't want FileDesc::drop to be called on any stdio. fdio_spawn_etc + // always consumes transferred file descriptors. + mem::forget(stdio); + + for callback in self.get_closures().iter_mut() { + callback()?; + } + + let mut process_handle: zx_handle_t = 0; + zx_cvt(fdio_spawn_etc( + ZX_HANDLE_INVALID, + FDIO_SPAWN_CLONE_JOB + | FDIO_SPAWN_CLONE_LDSVC + | FDIO_SPAWN_CLONE_NAMESPACE + | FDIO_SPAWN_CLONE_ENVIRON, // this is ignored when envp is non-null + self.get_program().as_ptr(), + self.get_argv().as_ptr(), + envp, + actions.len() as size_t, + actions.as_ptr(), + &mut process_handle, + ptr::null_mut(), + ))?; + // FIXME: See if we want to do something with that err_msg + + Ok(process_handle) + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Processes +//////////////////////////////////////////////////////////////////////////////// + +pub struct Process { + handle: Handle, +} + +impl Process { + pub fn id(&self) -> u32 { + self.handle.raw() as u32 + } + + pub fn kill(&mut self) -> io::Result<()> { + use crate::sys::process::zircon::*; + + unsafe { + zx_cvt(zx_task_kill(self.handle.raw()))?; + } + + Ok(()) + } + + pub fn wait(&mut self) -> io::Result<ExitStatus> { + use crate::default::Default; + use crate::sys::process::zircon::*; + + let mut proc_info: zx_info_process_t = Default::default(); + let mut actual: size_t = 0; + let mut avail: size_t = 0; + + unsafe { + zx_cvt(zx_object_wait_one( + self.handle.raw(), + ZX_TASK_TERMINATED, + ZX_TIME_INFINITE, + ptr::null_mut(), + ))?; + zx_cvt(zx_object_get_info( + self.handle.raw(), + ZX_INFO_PROCESS, + &mut proc_info as *mut _ as *mut libc::c_void, + mem::size_of::<zx_info_process_t>(), + &mut actual, + &mut avail, + ))?; + } + if actual != 1 { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "Failed to get exit status of process", + )); + } + Ok(ExitStatus(proc_info.return_code)) + } + + pub fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> { + use crate::default::Default; + use crate::sys::process::zircon::*; + + let mut proc_info: zx_info_process_t = Default::default(); + let mut actual: size_t = 0; + let mut avail: size_t = 0; + + unsafe { + let status = + zx_object_wait_one(self.handle.raw(), ZX_TASK_TERMINATED, 0, ptr::null_mut()); + match status { + 0 => {} // Success + x if x == ERR_TIMED_OUT => { + return Ok(None); + } + _ => { + panic!("Failed to wait on process handle: {}", status); + } + } + zx_cvt(zx_object_get_info( + self.handle.raw(), + ZX_INFO_PROCESS, + &mut proc_info as *mut _ as *mut libc::c_void, + mem::size_of::<zx_info_process_t>(), + &mut actual, + &mut avail, + ))?; + } + if actual != 1 { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "Failed to get exit status of process", + )); + } + Ok(Some(ExitStatus(proc_info.return_code))) + } +} + +#[derive(PartialEq, Eq, Clone, Copy, Debug)] +pub struct ExitStatus(i64); + +impl ExitStatus { + pub fn success(&self) -> bool { + self.code() == Some(0) + } + + pub fn code(&self) -> Option<i32> { + // FIXME: support extracting return code as an i64 + self.0.try_into().ok() + } + + pub fn signal(&self) -> Option<i32> { + None + } +} + +/// Converts a raw `c_int` to a type-safe `ExitStatus` by wrapping it without copying. +impl From<c_int> for ExitStatus { + fn from(a: c_int) -> ExitStatus { + ExitStatus(a as i64) + } +} + +impl fmt::Display for ExitStatus { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "exit code: {}", self.0) + } +} diff --git a/library/std/src/sys/unix/process/process_unix.rs b/library/std/src/sys/unix/process/process_unix.rs new file mode 100644 index 00000000000..0f349dfa302 --- /dev/null +++ b/library/std/src/sys/unix/process/process_unix.rs @@ -0,0 +1,494 @@ +use crate::convert::TryInto; +use crate::fmt; +use crate::io::{self, Error, ErrorKind}; +use crate::ptr; +use crate::sys; +use crate::sys::cvt; +use crate::sys::process::process_common::*; + +use libc::{c_int, gid_t, pid_t, uid_t}; + +//////////////////////////////////////////////////////////////////////////////// +// Command +//////////////////////////////////////////////////////////////////////////////// + +impl Command { + pub fn spawn( + &mut self, + default: Stdio, + needs_stdin: bool, + ) -> io::Result<(Process, StdioPipes)> { + const CLOEXEC_MSG_FOOTER: [u8; 4] = *b"NOEX"; + + let envp = self.capture_env(); + + if self.saw_nul() { + return Err(io::Error::new(ErrorKind::InvalidInput, "nul byte found in provided data")); + } + + let (ours, theirs) = self.setup_io(default, needs_stdin)?; + + if let Some(ret) = self.posix_spawn(&theirs, envp.as_ref())? { + return Ok((ret, ours)); + } + + let (input, output) = sys::pipe::anon_pipe()?; + + // Whatever happens after the fork is almost for sure going to touch or + // look at the environment in one way or another (PATH in `execvp` or + // accessing the `environ` pointer ourselves). Make sure no other thread + // is accessing the environment when we do the fork itself. + // + // Note that as soon as we're done with the fork there's no need to hold + // a lock any more because the parent won't do anything and the child is + // in its own process. + let result = unsafe { + let _env_lock = sys::os::env_lock(); + cvt(libc::fork())? + }; + + let pid = unsafe { + match result { + 0 => { + drop(input); + let Err(err) = self.do_exec(theirs, envp.as_ref()); + let errno = err.raw_os_error().unwrap_or(libc::EINVAL) as u32; + let errno = errno.to_be_bytes(); + let bytes = [ + errno[0], + errno[1], + errno[2], + errno[3], + CLOEXEC_MSG_FOOTER[0], + CLOEXEC_MSG_FOOTER[1], + CLOEXEC_MSG_FOOTER[2], + CLOEXEC_MSG_FOOTER[3], + ]; + // pipe I/O up to PIPE_BUF bytes should be atomic, and then + // we want to be sure we *don't* run at_exit destructors as + // we're being torn down regardless + assert!(output.write(&bytes).is_ok()); + libc::_exit(1) + } + n => n, + } + }; + + let mut p = Process { pid, status: None }; + drop(output); + let mut bytes = [0; 8]; + + // loop to handle EINTR + loop { + match input.read(&mut bytes) { + Ok(0) => return Ok((p, ours)), + Ok(8) => { + let (errno, footer) = bytes.split_at(4); + assert_eq!( + CLOEXEC_MSG_FOOTER, footer, + "Validation on the CLOEXEC pipe failed: {:?}", + bytes + ); + let errno = i32::from_be_bytes(errno.try_into().unwrap()); + assert!(p.wait().is_ok(), "wait() should either return Ok or panic"); + return Err(Error::from_raw_os_error(errno)); + } + Err(ref e) if e.kind() == ErrorKind::Interrupted => {} + Err(e) => { + assert!(p.wait().is_ok(), "wait() should either return Ok or panic"); + panic!("the CLOEXEC pipe failed: {:?}", e) + } + Ok(..) => { + // pipe I/O up to PIPE_BUF bytes should be atomic + assert!(p.wait().is_ok(), "wait() should either return Ok or panic"); + panic!("short read on the CLOEXEC pipe") + } + } + } + } + + pub fn exec(&mut self, default: Stdio) -> io::Error { + let envp = self.capture_env(); + + if self.saw_nul() { + return io::Error::new(ErrorKind::InvalidInput, "nul byte found in provided data"); + } + + match self.setup_io(default, true) { + Ok((_, theirs)) => { + unsafe { + // Similar to when forking, we want to ensure that access to + // the environment is synchronized, so make sure to grab the + // environment lock before we try to exec. + let _lock = sys::os::env_lock(); + + let Err(e) = self.do_exec(theirs, envp.as_ref()); + e + } + } + Err(e) => e, + } + } + + // And at this point we've reached a special time in the life of the + // child. The child must now be considered hamstrung and unable to + // do anything other than syscalls really. Consider the following + // scenario: + // + // 1. Thread A of process 1 grabs the malloc() mutex + // 2. Thread B of process 1 forks(), creating thread C + // 3. Thread C of process 2 then attempts to malloc() + // 4. The memory of process 2 is the same as the memory of + // process 1, so the mutex is locked. + // + // This situation looks a lot like deadlock, right? It turns out + // that this is what pthread_atfork() takes care of, which is + // presumably implemented across platforms. The first thing that + // threads to *before* forking is to do things like grab the malloc + // mutex, and then after the fork they unlock it. + // + // Despite this information, libnative's spawn has been witnessed to + // deadlock on both macOS and FreeBSD. I'm not entirely sure why, but + // all collected backtraces point at malloc/free traffic in the + // child spawned process. + // + // For this reason, the block of code below should contain 0 + // invocations of either malloc of free (or their related friends). + // + // As an example of not having malloc/free traffic, we don't close + // this file descriptor by dropping the FileDesc (which contains an + // allocation). Instead we just close it manually. This will never + // have the drop glue anyway because this code never returns (the + // child will either exec() or invoke libc::exit) + unsafe fn do_exec( + &mut self, + stdio: ChildPipes, + maybe_envp: Option<&CStringArray>, + ) -> Result<!, io::Error> { + use crate::sys::{self, cvt_r}; + + if let Some(fd) = stdio.stdin.fd() { + cvt_r(|| libc::dup2(fd, libc::STDIN_FILENO))?; + } + if let Some(fd) = stdio.stdout.fd() { + cvt_r(|| libc::dup2(fd, libc::STDOUT_FILENO))?; + } + if let Some(fd) = stdio.stderr.fd() { + cvt_r(|| libc::dup2(fd, libc::STDERR_FILENO))?; + } + + #[cfg(not(target_os = "l4re"))] + { + if let Some(u) = self.get_gid() { + cvt(libc::setgid(u as gid_t))?; + } + if let Some(u) = self.get_uid() { + // When dropping privileges from root, the `setgroups` call + // will remove any extraneous groups. If we don't call this, + // then even though our uid has dropped, we may still have + // groups that enable us to do super-user things. This will + // fail if we aren't root, so don't bother checking the + // return value, this is just done as an optimistic + // privilege dropping function. + //FIXME: Redox kernel does not support setgroups yet + #[cfg(not(target_os = "redox"))] + let _ = libc::setgroups(0, ptr::null()); + cvt(libc::setuid(u as uid_t))?; + } + } + if let Some(ref cwd) = *self.get_cwd() { + cvt(libc::chdir(cwd.as_ptr()))?; + } + + // emscripten has no signal support. + #[cfg(not(target_os = "emscripten"))] + { + use crate::mem::MaybeUninit; + // Reset signal handling so the child process starts in a + // standardized state. libstd ignores SIGPIPE, and signal-handling + // libraries often set a mask. Child processes inherit ignored + // signals and the signal mask from their parent, but most + // UNIX programs do not reset these things on their own, so we + // need to clean things up now to avoid confusing the program + // we're about to run. + let mut set = MaybeUninit::<libc::sigset_t>::uninit(); + cvt(sigemptyset(set.as_mut_ptr()))?; + cvt(libc::pthread_sigmask(libc::SIG_SETMASK, set.as_ptr(), ptr::null_mut()))?; + let ret = sys::signal(libc::SIGPIPE, libc::SIG_DFL); + if ret == libc::SIG_ERR { + return Err(io::Error::last_os_error()); + } + } + + for callback in self.get_closures().iter_mut() { + callback()?; + } + + // Although we're performing an exec here we may also return with an + // error from this function (without actually exec'ing) in which case we + // want to be sure to restore the global environment back to what it + // once was, ensuring that our temporary override, when free'd, doesn't + // corrupt our process's environment. + let mut _reset = None; + if let Some(envp) = maybe_envp { + struct Reset(*const *const libc::c_char); + + impl Drop for Reset { + fn drop(&mut self) { + unsafe { + *sys::os::environ() = self.0; + } + } + } + + _reset = Some(Reset(*sys::os::environ())); + *sys::os::environ() = envp.as_ptr(); + } + + libc::execvp(self.get_program().as_ptr(), self.get_argv().as_ptr()); + Err(io::Error::last_os_error()) + } + + #[cfg(not(any( + target_os = "macos", + target_os = "freebsd", + all(target_os = "linux", target_env = "gnu") + )))] + fn posix_spawn( + &mut self, + _: &ChildPipes, + _: Option<&CStringArray>, + ) -> io::Result<Option<Process>> { + Ok(None) + } + + // Only support platforms for which posix_spawn() can return ENOENT + // directly. + #[cfg(any( + target_os = "macos", + target_os = "freebsd", + all(target_os = "linux", target_env = "gnu") + ))] + fn posix_spawn( + &mut self, + stdio: &ChildPipes, + envp: Option<&CStringArray>, + ) -> io::Result<Option<Process>> { + use crate::mem::MaybeUninit; + use crate::sys; + + if self.get_gid().is_some() + || self.get_uid().is_some() + || self.env_saw_path() + || !self.get_closures().is_empty() + { + return Ok(None); + } + + // Only glibc 2.24+ posix_spawn() supports returning ENOENT directly. + #[cfg(all(target_os = "linux", target_env = "gnu"))] + { + if let Some(version) = sys::os::glibc_version() { + if version < (2, 24) { + return Ok(None); + } + } else { + return Ok(None); + } + } + + // Solaris and glibc 2.29+ can set a new working directory, and maybe + // others will gain this non-POSIX function too. We'll check for this + // weak symbol as soon as it's needed, so we can return early otherwise + // to do a manual chdir before exec. + weak! { + fn posix_spawn_file_actions_addchdir_np( + *mut libc::posix_spawn_file_actions_t, + *const libc::c_char + ) -> libc::c_int + } + let addchdir = match self.get_cwd() { + Some(cwd) => match posix_spawn_file_actions_addchdir_np.get() { + Some(f) => Some((f, cwd)), + None => return Ok(None), + }, + None => None, + }; + + let mut p = Process { pid: 0, status: None }; + + struct PosixSpawnFileActions(MaybeUninit<libc::posix_spawn_file_actions_t>); + + impl Drop for PosixSpawnFileActions { + fn drop(&mut self) { + unsafe { + libc::posix_spawn_file_actions_destroy(self.0.as_mut_ptr()); + } + } + } + + struct PosixSpawnattr(MaybeUninit<libc::posix_spawnattr_t>); + + impl Drop for PosixSpawnattr { + fn drop(&mut self) { + unsafe { + libc::posix_spawnattr_destroy(self.0.as_mut_ptr()); + } + } + } + + unsafe { + let mut file_actions = PosixSpawnFileActions(MaybeUninit::uninit()); + let mut attrs = PosixSpawnattr(MaybeUninit::uninit()); + + libc::posix_spawnattr_init(attrs.0.as_mut_ptr()); + libc::posix_spawn_file_actions_init(file_actions.0.as_mut_ptr()); + + if let Some(fd) = stdio.stdin.fd() { + cvt(libc::posix_spawn_file_actions_adddup2( + file_actions.0.as_mut_ptr(), + fd, + libc::STDIN_FILENO, + ))?; + } + if let Some(fd) = stdio.stdout.fd() { + cvt(libc::posix_spawn_file_actions_adddup2( + file_actions.0.as_mut_ptr(), + fd, + libc::STDOUT_FILENO, + ))?; + } + if let Some(fd) = stdio.stderr.fd() { + cvt(libc::posix_spawn_file_actions_adddup2( + file_actions.0.as_mut_ptr(), + fd, + libc::STDERR_FILENO, + ))?; + } + if let Some((f, cwd)) = addchdir { + cvt(f(file_actions.0.as_mut_ptr(), cwd.as_ptr()))?; + } + + let mut set = MaybeUninit::<libc::sigset_t>::uninit(); + cvt(sigemptyset(set.as_mut_ptr()))?; + cvt(libc::posix_spawnattr_setsigmask(attrs.0.as_mut_ptr(), set.as_ptr()))?; + cvt(sigaddset(set.as_mut_ptr(), libc::SIGPIPE))?; + cvt(libc::posix_spawnattr_setsigdefault(attrs.0.as_mut_ptr(), set.as_ptr()))?; + + let flags = libc::POSIX_SPAWN_SETSIGDEF | libc::POSIX_SPAWN_SETSIGMASK; + cvt(libc::posix_spawnattr_setflags(attrs.0.as_mut_ptr(), flags as _))?; + + // Make sure we synchronize access to the global `environ` resource + let _env_lock = sys::os::env_lock(); + let envp = envp.map(|c| c.as_ptr()).unwrap_or_else(|| *sys::os::environ() as *const _); + let ret = libc::posix_spawnp( + &mut p.pid, + self.get_program().as_ptr(), + file_actions.0.as_ptr(), + attrs.0.as_ptr(), + self.get_argv().as_ptr() as *const _, + envp as *const _, + ); + if ret == 0 { Ok(Some(p)) } else { Err(io::Error::from_raw_os_error(ret)) } + } + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Processes +//////////////////////////////////////////////////////////////////////////////// + +/// The unique ID of the process (this should never be negative). +pub struct Process { + pid: pid_t, + status: Option<ExitStatus>, +} + +impl Process { + pub fn id(&self) -> u32 { + self.pid as u32 + } + + pub fn kill(&mut self) -> io::Result<()> { + // If we've already waited on this process then the pid can be recycled + // and used for another process, and we probably shouldn't be killing + // random processes, so just return an error. + if self.status.is_some() { + Err(Error::new( + ErrorKind::InvalidInput, + "invalid argument: can't kill an exited process", + )) + } else { + cvt(unsafe { libc::kill(self.pid, libc::SIGKILL) }).map(drop) + } + } + + pub fn wait(&mut self) -> io::Result<ExitStatus> { + use crate::sys::cvt_r; + if let Some(status) = self.status { + return Ok(status); + } + let mut status = 0 as c_int; + cvt_r(|| unsafe { libc::waitpid(self.pid, &mut status, 0) })?; + self.status = Some(ExitStatus::new(status)); + Ok(ExitStatus::new(status)) + } + + pub fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> { + if let Some(status) = self.status { + return Ok(Some(status)); + } + let mut status = 0 as c_int; + let pid = cvt(unsafe { libc::waitpid(self.pid, &mut status, libc::WNOHANG) })?; + if pid == 0 { + Ok(None) + } else { + self.status = Some(ExitStatus::new(status)); + Ok(Some(ExitStatus::new(status))) + } + } +} + +/// Unix exit statuses +#[derive(PartialEq, Eq, Clone, Copy, Debug)] +pub struct ExitStatus(c_int); + +impl ExitStatus { + pub fn new(status: c_int) -> ExitStatus { + ExitStatus(status) + } + + fn exited(&self) -> bool { + unsafe { libc::WIFEXITED(self.0) } + } + + pub fn success(&self) -> bool { + self.code() == Some(0) + } + + pub fn code(&self) -> Option<i32> { + if self.exited() { Some(unsafe { libc::WEXITSTATUS(self.0) }) } else { None } + } + + pub fn signal(&self) -> Option<i32> { + if !self.exited() { Some(unsafe { libc::WTERMSIG(self.0) }) } else { None } + } +} + +/// Converts a raw `c_int` to a type-safe `ExitStatus` by wrapping it without copying. +impl From<c_int> for ExitStatus { + fn from(a: c_int) -> ExitStatus { + ExitStatus(a) + } +} + +impl fmt::Display for ExitStatus { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(code) = self.code() { + write!(f, "exit code: {}", code) + } else { + let signal = self.signal().unwrap(); + write!(f, "signal: {}", signal) + } + } +} diff --git a/library/std/src/sys/unix/process/zircon.rs b/library/std/src/sys/unix/process/zircon.rs new file mode 100644 index 00000000000..750b8f0762a --- /dev/null +++ b/library/std/src/sys/unix/process/zircon.rs @@ -0,0 +1,307 @@ +#![allow(non_camel_case_types, unused)] + +use crate::convert::TryInto; +use crate::i64; +use crate::io; +use crate::mem::MaybeUninit; +use crate::os::raw::c_char; + +use libc::{c_int, c_void, size_t}; + +pub type zx_handle_t = u32; +pub type zx_vaddr_t = usize; +pub type zx_rights_t = u32; +pub type zx_status_t = i32; + +pub const ZX_HANDLE_INVALID: zx_handle_t = 0; + +pub type zx_time_t = i64; +pub const ZX_TIME_INFINITE: zx_time_t = i64::MAX; + +pub type zx_signals_t = u32; + +pub const ZX_OBJECT_SIGNAL_3: zx_signals_t = 1 << 3; + +pub const ZX_TASK_TERMINATED: zx_signals_t = ZX_OBJECT_SIGNAL_3; + +pub const ZX_RIGHT_SAME_RIGHTS: zx_rights_t = 1 << 31; + +pub type zx_object_info_topic_t = u32; + +pub const ZX_INFO_PROCESS: zx_object_info_topic_t = 3; + +pub fn zx_cvt<T>(t: T) -> io::Result<T> +where + T: TryInto<zx_status_t> + Copy, +{ + if let Ok(status) = TryInto::try_into(t) { + if status < 0 { Err(io::Error::from_raw_os_error(status)) } else { Ok(t) } + } else { + Err(io::Error::last_os_error()) + } +} + +// Safe wrapper around zx_handle_t +pub struct Handle { + raw: zx_handle_t, +} + +impl Handle { + pub fn new(raw: zx_handle_t) -> Handle { + Handle { raw } + } + + pub fn raw(&self) -> zx_handle_t { + self.raw + } +} + +impl Drop for Handle { + fn drop(&mut self) { + unsafe { + zx_cvt(zx_handle_close(self.raw)).expect("Failed to close zx_handle_t"); + } + } +} + +// Returned for topic ZX_INFO_PROCESS +#[derive(Default)] +#[repr(C)] +pub struct zx_info_process_t { + pub return_code: i64, + pub started: bool, + pub exited: bool, + pub debugger_attached: bool, +} + +extern "C" { + pub fn zx_job_default() -> zx_handle_t; + + pub fn zx_task_kill(handle: zx_handle_t) -> zx_status_t; + + pub fn zx_handle_close(handle: zx_handle_t) -> zx_status_t; + + pub fn zx_handle_duplicate( + handle: zx_handle_t, + rights: zx_rights_t, + out: *const zx_handle_t, + ) -> zx_handle_t; + + pub fn zx_object_wait_one( + handle: zx_handle_t, + signals: zx_signals_t, + timeout: zx_time_t, + pending: *mut zx_signals_t, + ) -> zx_status_t; + + pub fn zx_object_get_info( + handle: zx_handle_t, + topic: u32, + buffer: *mut c_void, + buffer_size: size_t, + actual_size: *mut size_t, + avail: *mut size_t, + ) -> zx_status_t; +} + +#[derive(Default)] +#[repr(C)] +pub struct fdio_spawn_action_t { + pub action: u32, + pub reserved0: u32, + pub local_fd: i32, + pub target_fd: i32, + pub reserved1: u64, +} + +extern "C" { + pub fn fdio_spawn_etc( + job: zx_handle_t, + flags: u32, + path: *const c_char, + argv: *const *const c_char, + envp: *const *const c_char, + action_count: size_t, + actions: *const fdio_spawn_action_t, + process: *mut zx_handle_t, + err_msg: *mut c_char, + ) -> zx_status_t; + + pub fn fdio_fd_clone(fd: c_int, out_handle: *mut zx_handle_t) -> zx_status_t; + pub fn fdio_fd_create(handle: zx_handle_t, fd: *mut c_int) -> zx_status_t; +} + +// fdio_spawn_etc flags + +pub const FDIO_SPAWN_CLONE_JOB: u32 = 0x0001; +pub const FDIO_SPAWN_CLONE_LDSVC: u32 = 0x0002; +pub const FDIO_SPAWN_CLONE_NAMESPACE: u32 = 0x0004; +pub const FDIO_SPAWN_CLONE_STDIO: u32 = 0x0008; +pub const FDIO_SPAWN_CLONE_ENVIRON: u32 = 0x0010; +pub const FDIO_SPAWN_CLONE_ALL: u32 = 0xFFFF; + +// fdio_spawn_etc actions + +pub const FDIO_SPAWN_ACTION_CLONE_FD: u32 = 0x0001; +pub const FDIO_SPAWN_ACTION_TRANSFER_FD: u32 = 0x0002; + +// Errors + +#[allow(unused)] +pub const ERR_INTERNAL: zx_status_t = -1; + +// ERR_NOT_SUPPORTED: The operation is not implemented, supported, +// or enabled. +#[allow(unused)] +pub const ERR_NOT_SUPPORTED: zx_status_t = -2; + +// ERR_NO_RESOURCES: The system was not able to allocate some resource +// needed for the operation. +#[allow(unused)] +pub const ERR_NO_RESOURCES: zx_status_t = -3; + +// ERR_NO_MEMORY: The system was not able to allocate memory needed +// for the operation. +#[allow(unused)] +pub const ERR_NO_MEMORY: zx_status_t = -4; + +// ERR_CALL_FAILED: The second phase of zx_channel_call(; did not complete +// successfully. +#[allow(unused)] +pub const ERR_CALL_FAILED: zx_status_t = -5; + +// ERR_INTERRUPTED_RETRY: The system call was interrupted, but should be +// retried. This should not be seen outside of the VDSO. +#[allow(unused)] +pub const ERR_INTERRUPTED_RETRY: zx_status_t = -6; + +// ======= Parameter errors ======= +// ERR_INVALID_ARGS: an argument is invalid, ex. null pointer +#[allow(unused)] +pub const ERR_INVALID_ARGS: zx_status_t = -10; + +// ERR_BAD_HANDLE: A specified handle value does not refer to a handle. +#[allow(unused)] +pub const ERR_BAD_HANDLE: zx_status_t = -11; + +// ERR_WRONG_TYPE: The subject of the operation is the wrong type to +// perform the operation. +// Example: Attempting a message_read on a thread handle. +#[allow(unused)] +pub const ERR_WRONG_TYPE: zx_status_t = -12; + +// ERR_BAD_SYSCALL: The specified syscall number is invalid. +#[allow(unused)] +pub const ERR_BAD_SYSCALL: zx_status_t = -13; + +// ERR_OUT_OF_RANGE: An argument is outside the valid range for this +// operation. +#[allow(unused)] +pub const ERR_OUT_OF_RANGE: zx_status_t = -14; + +// ERR_BUFFER_TOO_SMALL: A caller provided buffer is too small for +// this operation. +#[allow(unused)] +pub const ERR_BUFFER_TOO_SMALL: zx_status_t = -15; + +// ======= Precondition or state errors ======= +// ERR_BAD_STATE: operation failed because the current state of the +// object does not allow it, or a precondition of the operation is +// not satisfied +#[allow(unused)] +pub const ERR_BAD_STATE: zx_status_t = -20; + +// ERR_TIMED_OUT: The time limit for the operation elapsed before +// the operation completed. +#[allow(unused)] +pub const ERR_TIMED_OUT: zx_status_t = -21; + +// ERR_SHOULD_WAIT: The operation cannot be performed currently but +// potentially could succeed if the caller waits for a prerequisite +// to be satisfied, for example waiting for a handle to be readable +// or writable. +// Example: Attempting to read from a message pipe that has no +// messages waiting but has an open remote will return ERR_SHOULD_WAIT. +// Attempting to read from a message pipe that has no messages waiting +// and has a closed remote end will return ERR_REMOTE_CLOSED. +#[allow(unused)] +pub const ERR_SHOULD_WAIT: zx_status_t = -22; + +// ERR_CANCELED: The in-progress operation (e.g., a wait) has been +// // canceled. +#[allow(unused)] +pub const ERR_CANCELED: zx_status_t = -23; + +// ERR_PEER_CLOSED: The operation failed because the remote end +// of the subject of the operation was closed. +#[allow(unused)] +pub const ERR_PEER_CLOSED: zx_status_t = -24; + +// ERR_NOT_FOUND: The requested entity is not found. +#[allow(unused)] +pub const ERR_NOT_FOUND: zx_status_t = -25; + +// ERR_ALREADY_EXISTS: An object with the specified identifier +// already exists. +// Example: Attempting to create a file when a file already exists +// with that name. +#[allow(unused)] +pub const ERR_ALREADY_EXISTS: zx_status_t = -26; + +// ERR_ALREADY_BOUND: The operation failed because the named entity +// is already owned or controlled by another entity. The operation +// could succeed later if the current owner releases the entity. +#[allow(unused)] +pub const ERR_ALREADY_BOUND: zx_status_t = -27; + +// ERR_UNAVAILABLE: The subject of the operation is currently unable +// to perform the operation. +// Note: This is used when there's no direct way for the caller to +// observe when the subject will be able to perform the operation +// and should thus retry. +#[allow(unused)] +pub const ERR_UNAVAILABLE: zx_status_t = -28; + +// ======= Permission check errors ======= +// ERR_ACCESS_DENIED: The caller did not have permission to perform +// the specified operation. +#[allow(unused)] +pub const ERR_ACCESS_DENIED: zx_status_t = -30; + +// ======= Input-output errors ======= +// ERR_IO: Otherwise unspecified error occurred during I/O. +#[allow(unused)] +pub const ERR_IO: zx_status_t = -40; + +// ERR_REFUSED: The entity the I/O operation is being performed on +// rejected the operation. +// Example: an I2C device NAK'ing a transaction or a disk controller +// rejecting an invalid command. +#[allow(unused)] +pub const ERR_IO_REFUSED: zx_status_t = -41; + +// ERR_IO_DATA_INTEGRITY: The data in the operation failed an integrity +// check and is possibly corrupted. +// Example: CRC or Parity error. +#[allow(unused)] +pub const ERR_IO_DATA_INTEGRITY: zx_status_t = -42; + +// ERR_IO_DATA_LOSS: The data in the operation is currently unavailable +// and may be permanently lost. +// Example: A disk block is irrecoverably damaged. +#[allow(unused)] +pub const ERR_IO_DATA_LOSS: zx_status_t = -43; + +// Filesystem specific errors +#[allow(unused)] +pub const ERR_BAD_PATH: zx_status_t = -50; +#[allow(unused)] +pub const ERR_NOT_DIR: zx_status_t = -51; +#[allow(unused)] +pub const ERR_NOT_FILE: zx_status_t = -52; +// ERR_FILE_BIG: A file exceeds a filesystem-specific size limit. +#[allow(unused)] +pub const ERR_FILE_BIG: zx_status_t = -53; +// ERR_NO_SPACE: Filesystem or device space is exhausted. +#[allow(unused)] +pub const ERR_NO_SPACE: zx_status_t = -54; diff --git a/library/std/src/sys/unix/rand.rs b/library/std/src/sys/unix/rand.rs new file mode 100644 index 00000000000..eed6fbf13b7 --- /dev/null +++ b/library/std/src/sys/unix/rand.rs @@ -0,0 +1,230 @@ +use crate::mem; +use crate::slice; + +pub fn hashmap_random_keys() -> (u64, u64) { + let mut v = (0, 0); + unsafe { + let view = slice::from_raw_parts_mut(&mut v as *mut _ as *mut u8, mem::size_of_val(&v)); + imp::fill_bytes(view); + } + v +} + +#[cfg(all( + unix, + not(target_os = "macos"), + not(target_os = "ios"), + not(target_os = "openbsd"), + not(target_os = "freebsd"), + not(target_os = "netbsd"), + not(target_os = "fuchsia"), + not(target_os = "redox") +))] +mod imp { + use crate::fs::File; + use crate::io::Read; + + #[cfg(any(target_os = "linux", target_os = "android"))] + fn getrandom(buf: &mut [u8]) -> libc::c_long { + unsafe { + libc::syscall(libc::SYS_getrandom, buf.as_mut_ptr(), buf.len(), libc::GRND_NONBLOCK) + } + } + + #[cfg(not(any(target_os = "linux", target_os = "android")))] + fn getrandom_fill_bytes(_buf: &mut [u8]) -> bool { + false + } + + #[cfg(any(target_os = "linux", target_os = "android"))] + fn getrandom_fill_bytes(v: &mut [u8]) -> bool { + use crate::sync::atomic::{AtomicBool, Ordering}; + use crate::sys::os::errno; + + static GETRANDOM_UNAVAILABLE: AtomicBool = AtomicBool::new(false); + if GETRANDOM_UNAVAILABLE.load(Ordering::Relaxed) { + return false; + } + + let mut read = 0; + while read < v.len() { + let result = getrandom(&mut v[read..]); + if result == -1 { + let err = errno() as libc::c_int; + if err == libc::EINTR { + continue; + } else if err == libc::ENOSYS || err == libc::EPERM { + // Fall back to reading /dev/urandom if `getrandom` is not + // supported on the current kernel. + // + // Also fall back in case it is disabled by something like + // seccomp or inside of virtual machines. + GETRANDOM_UNAVAILABLE.store(true, Ordering::Relaxed); + return false; + } else if err == libc::EAGAIN { + return false; + } else { + panic!("unexpected getrandom error: {}", err); + } + } else { + read += result as usize; + } + } + true + } + + pub fn fill_bytes(v: &mut [u8]) { + // getrandom_fill_bytes here can fail if getrandom() returns EAGAIN, + // meaning it would have blocked because the non-blocking pool (urandom) + // has not initialized in the kernel yet due to a lack of entropy. The + // fallback we do here is to avoid blocking applications which could + // depend on this call without ever knowing they do and don't have a + // work around. The PRNG of /dev/urandom will still be used but over a + // possibly predictable entropy pool. + if getrandom_fill_bytes(v) { + return; + } + + // getrandom failed because it is permanently or temporarily (because + // of missing entropy) unavailable. Open /dev/urandom, read from it, + // and close it again. + let mut file = File::open("/dev/urandom").expect("failed to open /dev/urandom"); + file.read_exact(v).expect("failed to read /dev/urandom") + } +} + +#[cfg(target_os = "macos")] +mod imp { + use crate::fs::File; + use crate::io::Read; + use crate::sys::os::errno; + use libc::{c_int, c_void, size_t}; + + fn getentropy_fill_bytes(v: &mut [u8]) -> bool { + weak!(fn getentropy(*mut c_void, size_t) -> c_int); + + getentropy + .get() + .map(|f| { + // getentropy(2) permits a maximum buffer size of 256 bytes + for s in v.chunks_mut(256) { + let ret = unsafe { f(s.as_mut_ptr() as *mut c_void, s.len()) }; + if ret == -1 { + panic!("unexpected getentropy error: {}", errno()); + } + } + true + }) + .unwrap_or(false) + } + + pub fn fill_bytes(v: &mut [u8]) { + if getentropy_fill_bytes(v) { + return; + } + + // for older macos which doesn't support getentropy + let mut file = File::open("/dev/urandom").expect("failed to open /dev/urandom"); + file.read_exact(v).expect("failed to read /dev/urandom") + } +} + +#[cfg(target_os = "openbsd")] +mod imp { + use crate::sys::os::errno; + + pub fn fill_bytes(v: &mut [u8]) { + // getentropy(2) permits a maximum buffer size of 256 bytes + for s in v.chunks_mut(256) { + let ret = unsafe { libc::getentropy(s.as_mut_ptr() as *mut libc::c_void, s.len()) }; + if ret == -1 { + panic!("unexpected getentropy error: {}", errno()); + } + } + } +} + +// On iOS and MacOS `SecRandomCopyBytes` calls `CCRandomCopyBytes` with +// `kCCRandomDefault`. `CCRandomCopyBytes` manages a CSPRNG which is seeded +// from `/dev/random` and which runs on its own thread accessed via GCD. +// This seems needlessly heavyweight for the purposes of generating two u64s +// once per thread in `hashmap_random_keys`. Therefore `SecRandomCopyBytes` is +// only used on iOS where direct access to `/dev/urandom` is blocked by the +// sandbox. +#[cfg(target_os = "ios")] +mod imp { + use crate::io; + use crate::ptr; + use libc::{c_int, size_t}; + + enum SecRandom {} + + #[allow(non_upper_case_globals)] + const kSecRandomDefault: *const SecRandom = ptr::null(); + + extern "C" { + fn SecRandomCopyBytes(rnd: *const SecRandom, count: size_t, bytes: *mut u8) -> c_int; + } + + pub fn fill_bytes(v: &mut [u8]) { + let ret = unsafe { SecRandomCopyBytes(kSecRandomDefault, v.len(), v.as_mut_ptr()) }; + if ret == -1 { + panic!("couldn't generate random bytes: {}", io::Error::last_os_error()); + } + } +} + +#[cfg(any(target_os = "freebsd", target_os = "netbsd"))] +mod imp { + use crate::ptr; + + pub fn fill_bytes(v: &mut [u8]) { + let mib = [libc::CTL_KERN, libc::KERN_ARND]; + // kern.arandom permits a maximum buffer size of 256 bytes + for s in v.chunks_mut(256) { + let mut s_len = s.len(); + let ret = unsafe { + libc::sysctl( + mib.as_ptr(), + mib.len() as libc::c_uint, + s.as_mut_ptr() as *mut _, + &mut s_len, + ptr::null(), + 0, + ) + }; + if ret == -1 || s_len != s.len() { + panic!( + "kern.arandom sysctl failed! (returned {}, s.len() {}, oldlenp {})", + ret, + s.len(), + s_len + ); + } + } + } +} + +#[cfg(target_os = "fuchsia")] +mod imp { + #[link(name = "zircon")] + extern "C" { + fn zx_cprng_draw(buffer: *mut u8, len: usize); + } + + pub fn fill_bytes(v: &mut [u8]) { + unsafe { zx_cprng_draw(v.as_mut_ptr(), v.len()) } + } +} + +#[cfg(target_os = "redox")] +mod imp { + use crate::fs::File; + use crate::io::Read; + + pub fn fill_bytes(v: &mut [u8]) { + // Open rand:, read from it, and close it again. + let mut file = File::open("rand:").expect("failed to open rand:"); + file.read_exact(v).expect("failed to read rand:") + } +} diff --git a/library/std/src/sys/unix/rwlock.rs b/library/std/src/sys/unix/rwlock.rs new file mode 100644 index 00000000000..2b5067a34f6 --- /dev/null +++ b/library/std/src/sys/unix/rwlock.rs @@ -0,0 +1,141 @@ +use crate::cell::UnsafeCell; +use crate::sync::atomic::{AtomicUsize, Ordering}; + +pub struct RWLock { + inner: UnsafeCell<libc::pthread_rwlock_t>, + write_locked: UnsafeCell<bool>, // guarded by the `inner` RwLock + num_readers: AtomicUsize, +} + +unsafe impl Send for RWLock {} +unsafe impl Sync for RWLock {} + +impl RWLock { + pub const fn new() -> RWLock { + RWLock { + inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER), + write_locked: UnsafeCell::new(false), + num_readers: AtomicUsize::new(0), + } + } + #[inline] + pub unsafe fn read(&self) { + let r = libc::pthread_rwlock_rdlock(self.inner.get()); + + // According to POSIX, when a thread tries to acquire this read lock + // while it already holds the write lock + // (or vice versa, or tries to acquire the write lock twice), + // "the call shall either deadlock or return [EDEADLK]" + // (https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_wrlock.html, + // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_rdlock.html). + // So, in principle, all we have to do here is check `r == 0` to be sure we properly + // got the lock. + // + // However, (at least) glibc before version 2.25 does not conform to this spec, + // and can return `r == 0` even when this thread already holds the write lock. + // We thus check for this situation ourselves and panic when detecting that a thread + // got the write lock more than once, or got a read and a write lock. + if r == libc::EAGAIN { + panic!("rwlock maximum reader count exceeded"); + } else if r == libc::EDEADLK || (r == 0 && *self.write_locked.get()) { + // Above, we make sure to only access `write_locked` when `r == 0` to avoid + // data races. + if r == 0 { + // `pthread_rwlock_rdlock` succeeded when it should not have. + self.raw_unlock(); + } + panic!("rwlock read lock would result in deadlock"); + } else { + // According to POSIX, for a properly initialized rwlock this can only + // return EAGAIN or EDEADLK or 0. We rely on that. + debug_assert_eq!(r, 0); + self.num_readers.fetch_add(1, Ordering::Relaxed); + } + } + #[inline] + pub unsafe fn try_read(&self) -> bool { + let r = libc::pthread_rwlock_tryrdlock(self.inner.get()); + if r == 0 { + if *self.write_locked.get() { + // `pthread_rwlock_tryrdlock` succeeded when it should not have. + self.raw_unlock(); + false + } else { + self.num_readers.fetch_add(1, Ordering::Relaxed); + true + } + } else { + false + } + } + #[inline] + pub unsafe fn write(&self) { + let r = libc::pthread_rwlock_wrlock(self.inner.get()); + // See comments above for why we check for EDEADLK and write_locked. For the same reason, + // we also need to check that there are no readers (tracked in `num_readers`). + if r == libc::EDEADLK + || (r == 0 && *self.write_locked.get()) + || self.num_readers.load(Ordering::Relaxed) != 0 + { + // Above, we make sure to only access `write_locked` when `r == 0` to avoid + // data races. + if r == 0 { + // `pthread_rwlock_wrlock` succeeded when it should not have. + self.raw_unlock(); + } + panic!("rwlock write lock would result in deadlock"); + } else { + // According to POSIX, for a properly initialized rwlock this can only + // return EDEADLK or 0. We rely on that. + debug_assert_eq!(r, 0); + } + *self.write_locked.get() = true; + } + #[inline] + pub unsafe fn try_write(&self) -> bool { + let r = libc::pthread_rwlock_trywrlock(self.inner.get()); + if r == 0 { + if *self.write_locked.get() || self.num_readers.load(Ordering::Relaxed) != 0 { + // `pthread_rwlock_trywrlock` succeeded when it should not have. + self.raw_unlock(); + false + } else { + *self.write_locked.get() = true; + true + } + } else { + false + } + } + #[inline] + unsafe fn raw_unlock(&self) { + let r = libc::pthread_rwlock_unlock(self.inner.get()); + debug_assert_eq!(r, 0); + } + #[inline] + pub unsafe fn read_unlock(&self) { + debug_assert!(!*self.write_locked.get()); + self.num_readers.fetch_sub(1, Ordering::Relaxed); + self.raw_unlock(); + } + #[inline] + pub unsafe fn write_unlock(&self) { + debug_assert_eq!(self.num_readers.load(Ordering::Relaxed), 0); + debug_assert!(*self.write_locked.get()); + *self.write_locked.get() = false; + self.raw_unlock(); + } + #[inline] + pub unsafe fn destroy(&self) { + let r = libc::pthread_rwlock_destroy(self.inner.get()); + // On DragonFly pthread_rwlock_destroy() returns EINVAL if called on a + // rwlock that was just initialized with + // libc::PTHREAD_RWLOCK_INITIALIZER. Once it is used (locked/unlocked) + // or pthread_rwlock_init() is called, this behaviour no longer occurs. + if cfg!(target_os = "dragonfly") { + debug_assert!(r == 0 || r == libc::EINVAL); + } else { + debug_assert_eq!(r, 0); + } + } +} diff --git a/library/std/src/sys/unix/stack_overflow.rs b/library/std/src/sys/unix/stack_overflow.rs new file mode 100644 index 00000000000..c74fc2b5903 --- /dev/null +++ b/library/std/src/sys/unix/stack_overflow.rs @@ -0,0 +1,234 @@ +#![cfg_attr(test, allow(dead_code))] + +use self::imp::{drop_handler, make_handler}; + +pub use self::imp::cleanup; +pub use self::imp::init; + +pub struct Handler { + _data: *mut libc::c_void, +} + +impl Handler { + pub unsafe fn new() -> Handler { + make_handler() + } + + fn null() -> Handler { + Handler { _data: crate::ptr::null_mut() } + } +} + +impl Drop for Handler { + fn drop(&mut self) { + unsafe { + drop_handler(self); + } + } +} + +#[cfg(any( + target_os = "linux", + target_os = "macos", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "solaris", + target_os = "illumos", + all(target_os = "netbsd", not(target_vendor = "rumprun")), + target_os = "openbsd" +))] +mod imp { + use super::Handler; + use crate::mem; + use crate::ptr; + + use libc::MAP_FAILED; + use libc::{mmap, munmap}; + use libc::{sigaction, sighandler_t, SA_ONSTACK, SA_SIGINFO, SIGBUS, SIG_DFL}; + use libc::{sigaltstack, SIGSTKSZ, SS_DISABLE}; + use libc::{MAP_ANON, MAP_PRIVATE, PROT_NONE, PROT_READ, PROT_WRITE, SIGSEGV}; + + use crate::sync::atomic::{AtomicBool, AtomicPtr, Ordering}; + use crate::sys::unix::os::page_size; + use crate::sys_common::thread_info; + + #[cfg(any(target_os = "linux", target_os = "android"))] + unsafe fn siginfo_si_addr(info: *mut libc::siginfo_t) -> usize { + #[repr(C)] + struct siginfo_t { + a: [libc::c_int; 3], // si_signo, si_errno, si_code + si_addr: *mut libc::c_void, + } + + (*(info as *const siginfo_t)).si_addr as usize + } + + #[cfg(not(any(target_os = "linux", target_os = "android")))] + unsafe fn siginfo_si_addr(info: *mut libc::siginfo_t) -> usize { + (*info).si_addr as usize + } + + // Signal handler for the SIGSEGV and SIGBUS handlers. We've got guard pages + // (unmapped pages) at the end of every thread's stack, so if a thread ends + // up running into the guard page it'll trigger this handler. We want to + // detect these cases and print out a helpful error saying that the stack + // has overflowed. All other signals, however, should go back to what they + // were originally supposed to do. + // + // This handler currently exists purely to print an informative message + // whenever a thread overflows its stack. We then abort to exit and + // indicate a crash, but to avoid a misleading SIGSEGV that might lead + // users to believe that unsafe code has accessed an invalid pointer; the + // SIGSEGV encountered when overflowing the stack is expected and + // well-defined. + // + // If this is not a stack overflow, the handler un-registers itself and + // then returns (to allow the original signal to be delivered again). + // Returning from this kind of signal handler is technically not defined + // to work when reading the POSIX spec strictly, but in practice it turns + // out many large systems and all implementations allow returning from a + // signal handler to work. For a more detailed explanation see the + // comments on #26458. + unsafe extern "C" fn signal_handler( + signum: libc::c_int, + info: *mut libc::siginfo_t, + _data: *mut libc::c_void, + ) { + use crate::sys_common::util::report_overflow; + + let guard = thread_info::stack_guard().unwrap_or(0..0); + let addr = siginfo_si_addr(info); + + // If the faulting address is within the guard page, then we print a + // message saying so and abort. + if guard.start <= addr && addr < guard.end { + report_overflow(); + rtabort!("stack overflow"); + } else { + // Unregister ourselves by reverting back to the default behavior. + let mut action: sigaction = mem::zeroed(); + action.sa_sigaction = SIG_DFL; + sigaction(signum, &action, ptr::null_mut()); + + // See comment above for why this function returns. + } + } + + static MAIN_ALTSTACK: AtomicPtr<libc::c_void> = AtomicPtr::new(ptr::null_mut()); + static NEED_ALTSTACK: AtomicBool = AtomicBool::new(false); + + pub unsafe fn init() { + let mut action: sigaction = mem::zeroed(); + for &signal in &[SIGSEGV, SIGBUS] { + sigaction(signal, ptr::null_mut(), &mut action); + // Configure our signal handler if one is not already set. + if action.sa_sigaction == SIG_DFL { + action.sa_flags = SA_SIGINFO | SA_ONSTACK; + action.sa_sigaction = signal_handler as sighandler_t; + sigaction(signal, &action, ptr::null_mut()); + NEED_ALTSTACK.store(true, Ordering::Relaxed); + } + } + + let handler = make_handler(); + MAIN_ALTSTACK.store(handler._data, Ordering::Relaxed); + mem::forget(handler); + } + + pub unsafe fn cleanup() { + Handler { _data: MAIN_ALTSTACK.load(Ordering::Relaxed) }; + } + + unsafe fn get_stackp() -> *mut libc::c_void { + let stackp = mmap( + ptr::null_mut(), + SIGSTKSZ + page_size(), + PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANON, + -1, + 0, + ); + if stackp == MAP_FAILED { + panic!("failed to allocate an alternative stack"); + } + let guard_result = libc::mprotect(stackp, page_size(), PROT_NONE); + if guard_result != 0 { + panic!("failed to set up alternative stack guard page"); + } + stackp.add(page_size()) + } + + #[cfg(any( + target_os = "linux", + target_os = "macos", + target_os = "freebsd", + target_os = "netbsd", + target_os = "openbsd", + target_os = "solaris", + target_os = "illumos" + ))] + unsafe fn get_stack() -> libc::stack_t { + libc::stack_t { ss_sp: get_stackp(), ss_flags: 0, ss_size: SIGSTKSZ } + } + + #[cfg(target_os = "dragonfly")] + unsafe fn get_stack() -> libc::stack_t { + libc::stack_t { ss_sp: get_stackp() as *mut i8, ss_flags: 0, ss_size: SIGSTKSZ } + } + + pub unsafe fn make_handler() -> Handler { + if !NEED_ALTSTACK.load(Ordering::Relaxed) { + return Handler::null(); + } + let mut stack = mem::zeroed(); + sigaltstack(ptr::null(), &mut stack); + // Configure alternate signal stack, if one is not already set. + if stack.ss_flags & SS_DISABLE != 0 { + stack = get_stack(); + sigaltstack(&stack, ptr::null_mut()); + Handler { _data: stack.ss_sp as *mut libc::c_void } + } else { + Handler::null() + } + } + + pub unsafe fn drop_handler(handler: &mut Handler) { + if !handler._data.is_null() { + let stack = libc::stack_t { + ss_sp: ptr::null_mut(), + ss_flags: SS_DISABLE, + // Workaround for bug in macOS implementation of sigaltstack + // UNIX2003 which returns ENOMEM when disabling a stack while + // passing ss_size smaller than MINSIGSTKSZ. According to POSIX + // both ss_sp and ss_size should be ignored in this case. + ss_size: SIGSTKSZ, + }; + sigaltstack(&stack, ptr::null_mut()); + // We know from `get_stackp` that the alternate stack we installed is part of a mapping + // that started one page earlier, so walk back a page and unmap from there. + munmap(handler._data.sub(page_size()), SIGSTKSZ + page_size()); + } + } +} + +#[cfg(not(any( + target_os = "linux", + target_os = "macos", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "solaris", + target_os = "illumos", + all(target_os = "netbsd", not(target_vendor = "rumprun")), + target_os = "openbsd" +)))] +mod imp { + pub unsafe fn init() {} + + pub unsafe fn cleanup() {} + + pub unsafe fn make_handler() -> super::Handler { + super::Handler::null() + } + + pub unsafe fn drop_handler(_handler: &mut super::Handler) {} +} diff --git a/library/std/src/sys/unix/stdio.rs b/library/std/src/sys/unix/stdio.rs new file mode 100644 index 00000000000..f8353214cbc --- /dev/null +++ b/library/std/src/sys/unix/stdio.rs @@ -0,0 +1,88 @@ +use crate::io::{self, IoSlice, IoSliceMut}; +use crate::mem::ManuallyDrop; +use crate::sys::fd::FileDesc; + +pub struct Stdin(()); +pub struct Stdout(()); +pub struct Stderr(()); + +impl Stdin { + pub fn new() -> io::Result<Stdin> { + Ok(Stdin(())) + } +} + +impl io::Read for Stdin { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + ManuallyDrop::new(FileDesc::new(libc::STDIN_FILENO)).read(buf) + } + + fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + ManuallyDrop::new(FileDesc::new(libc::STDIN_FILENO)).read_vectored(bufs) + } + + #[inline] + fn is_read_vectored(&self) -> bool { + true + } +} + +impl Stdout { + pub fn new() -> io::Result<Stdout> { + Ok(Stdout(())) + } +} + +impl io::Write for Stdout { + fn write(&mut self, buf: &[u8]) -> io::Result<usize> { + ManuallyDrop::new(FileDesc::new(libc::STDOUT_FILENO)).write(buf) + } + + fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + ManuallyDrop::new(FileDesc::new(libc::STDOUT_FILENO)).write_vectored(bufs) + } + + #[inline] + fn is_write_vectored(&self) -> bool { + true + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +impl Stderr { + pub fn new() -> io::Result<Stderr> { + Ok(Stderr(())) + } +} + +impl io::Write for Stderr { + fn write(&mut self, buf: &[u8]) -> io::Result<usize> { + ManuallyDrop::new(FileDesc::new(libc::STDERR_FILENO)).write(buf) + } + + fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + ManuallyDrop::new(FileDesc::new(libc::STDERR_FILENO)).write_vectored(bufs) + } + + #[inline] + fn is_write_vectored(&self) -> bool { + true + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +pub fn is_ebadf(err: &io::Error) -> bool { + err.raw_os_error() == Some(libc::EBADF as i32) +} + +pub const STDIN_BUF_SIZE: usize = crate::sys_common::io::DEFAULT_BUF_SIZE; + +pub fn panic_output() -> Option<impl io::Write> { + Stderr::new().ok() +} diff --git a/library/std/src/sys/unix/thread.rs b/library/std/src/sys/unix/thread.rs new file mode 100644 index 00000000000..c1bda6b430e --- /dev/null +++ b/library/std/src/sys/unix/thread.rs @@ -0,0 +1,465 @@ +use crate::cmp; +use crate::ffi::CStr; +use crate::io; +use crate::mem; +use crate::ptr; +use crate::sys::{os, stack_overflow}; +use crate::time::Duration; + +#[cfg(not(target_os = "l4re"))] +pub const DEFAULT_MIN_STACK_SIZE: usize = 2 * 1024 * 1024; +#[cfg(target_os = "l4re")] +pub const DEFAULT_MIN_STACK_SIZE: usize = 1024 * 1024; + +pub struct Thread { + id: libc::pthread_t, +} + +// Some platforms may have pthread_t as a pointer in which case we still want +// a thread to be Send/Sync +unsafe impl Send for Thread {} +unsafe impl Sync for Thread {} + +// The pthread_attr_setstacksize symbol doesn't exist in the emscripten libc, +// so we have to not link to it to satisfy emcc's ERROR_ON_UNDEFINED_SYMBOLS. +#[cfg(not(target_os = "emscripten"))] +unsafe fn pthread_attr_setstacksize( + attr: *mut libc::pthread_attr_t, + stack_size: libc::size_t, +) -> libc::c_int { + libc::pthread_attr_setstacksize(attr, stack_size) +} + +#[cfg(target_os = "emscripten")] +unsafe fn pthread_attr_setstacksize( + _attr: *mut libc::pthread_attr_t, + _stack_size: libc::size_t, +) -> libc::c_int { + panic!() +} + +impl Thread { + // unsafe: see thread::Builder::spawn_unchecked for safety requirements + pub unsafe fn new(stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> { + let p = Box::into_raw(box p); + let mut native: libc::pthread_t = mem::zeroed(); + let mut attr: libc::pthread_attr_t = mem::zeroed(); + assert_eq!(libc::pthread_attr_init(&mut attr), 0); + + let stack_size = cmp::max(stack, min_stack_size(&attr)); + + match pthread_attr_setstacksize(&mut attr, stack_size) { + 0 => {} + n => { + assert_eq!(n, libc::EINVAL); + // EINVAL means |stack_size| is either too small or not a + // multiple of the system page size. Because it's definitely + // >= PTHREAD_STACK_MIN, it must be an alignment issue. + // Round up to the nearest page and try again. + let page_size = os::page_size(); + let stack_size = + (stack_size + page_size - 1) & (-(page_size as isize - 1) as usize - 1); + assert_eq!(libc::pthread_attr_setstacksize(&mut attr, stack_size), 0); + } + }; + + let ret = libc::pthread_create(&mut native, &attr, thread_start, p as *mut _); + // Note: if the thread creation fails and this assert fails, then p will + // be leaked. However, an alternative design could cause double-free + // which is clearly worse. + assert_eq!(libc::pthread_attr_destroy(&mut attr), 0); + + return if ret != 0 { + // The thread failed to start and as a result p was not consumed. Therefore, it is + // safe to reconstruct the box so that it gets deallocated. + drop(Box::from_raw(p)); + Err(io::Error::from_raw_os_error(ret)) + } else { + Ok(Thread { id: native }) + }; + + extern "C" fn thread_start(main: *mut libc::c_void) -> *mut libc::c_void { + unsafe { + // Next, set up our stack overflow handler which may get triggered if we run + // out of stack. + let _handler = stack_overflow::Handler::new(); + // Finally, let's run some code. + Box::from_raw(main as *mut Box<dyn FnOnce()>)(); + } + ptr::null_mut() + } + } + + pub fn yield_now() { + let ret = unsafe { libc::sched_yield() }; + debug_assert_eq!(ret, 0); + } + + #[cfg(any(target_os = "linux", target_os = "android"))] + pub fn set_name(name: &CStr) { + const PR_SET_NAME: libc::c_int = 15; + // pthread wrapper only appeared in glibc 2.12, so we use syscall + // directly. + unsafe { + libc::prctl(PR_SET_NAME, name.as_ptr() as libc::c_ulong, 0, 0, 0); + } + } + + #[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "openbsd"))] + pub fn set_name(name: &CStr) { + unsafe { + libc::pthread_set_name_np(libc::pthread_self(), name.as_ptr()); + } + } + + #[cfg(any(target_os = "macos", target_os = "ios"))] + pub fn set_name(name: &CStr) { + unsafe { + libc::pthread_setname_np(name.as_ptr()); + } + } + + #[cfg(target_os = "netbsd")] + pub fn set_name(name: &CStr) { + use crate::ffi::CString; + let cname = CString::new(&b"%s"[..]).unwrap(); + unsafe { + libc::pthread_setname_np( + libc::pthread_self(), + cname.as_ptr(), + name.as_ptr() as *mut libc::c_void, + ); + } + } + + #[cfg(any(target_os = "solaris", target_os = "illumos"))] + pub fn set_name(name: &CStr) { + weak! { + fn pthread_setname_np( + libc::pthread_t, *const libc::c_char + ) -> libc::c_int + } + + if let Some(f) = pthread_setname_np.get() { + unsafe { + f(libc::pthread_self(), name.as_ptr()); + } + } + } + + #[cfg(any( + target_env = "newlib", + target_os = "haiku", + target_os = "l4re", + target_os = "emscripten", + target_os = "redox" + ))] + pub fn set_name(_name: &CStr) { + // Newlib, Haiku, and Emscripten have no way to set a thread name. + } + #[cfg(target_os = "fuchsia")] + pub fn set_name(_name: &CStr) { + // FIXME: determine whether Fuchsia has a way to set a thread name. + } + + pub fn sleep(dur: Duration) { + let mut secs = dur.as_secs(); + let mut nsecs = dur.subsec_nanos() as _; + + // If we're awoken with a signal then the return value will be -1 and + // nanosleep will fill in `ts` with the remaining time. + unsafe { + while secs > 0 || nsecs > 0 { + let mut ts = libc::timespec { + tv_sec: cmp::min(libc::time_t::MAX as u64, secs) as libc::time_t, + tv_nsec: nsecs, + }; + secs -= ts.tv_sec as u64; + if libc::nanosleep(&ts, &mut ts) == -1 { + assert_eq!(os::errno(), libc::EINTR); + secs += ts.tv_sec as u64; + nsecs = ts.tv_nsec; + } else { + nsecs = 0; + } + } + } + } + + pub fn join(self) { + unsafe { + let ret = libc::pthread_join(self.id, ptr::null_mut()); + mem::forget(self); + assert!(ret == 0, "failed to join thread: {}", io::Error::from_raw_os_error(ret)); + } + } + + pub fn id(&self) -> libc::pthread_t { + self.id + } + + pub fn into_id(self) -> libc::pthread_t { + let id = self.id; + mem::forget(self); + id + } +} + +impl Drop for Thread { + fn drop(&mut self) { + let ret = unsafe { libc::pthread_detach(self.id) }; + debug_assert_eq!(ret, 0); + } +} + +#[cfg(all( + not(all(target_os = "linux", not(target_env = "musl"))), + not(target_os = "freebsd"), + not(target_os = "macos"), + not(all(target_os = "netbsd", not(target_vendor = "rumprun"))), + not(target_os = "openbsd"), + not(target_os = "solaris") +))] +#[cfg_attr(test, allow(dead_code))] +pub mod guard { + use crate::ops::Range; + pub type Guard = Range<usize>; + pub unsafe fn current() -> Option<Guard> { + None + } + pub unsafe fn init() -> Option<Guard> { + None + } +} + +#[cfg(any( + all(target_os = "linux", not(target_env = "musl")), + target_os = "freebsd", + target_os = "macos", + all(target_os = "netbsd", not(target_vendor = "rumprun")), + target_os = "openbsd", + target_os = "solaris" +))] +#[cfg_attr(test, allow(dead_code))] +pub mod guard { + use libc::{mmap, mprotect}; + use libc::{MAP_ANON, MAP_FAILED, MAP_FIXED, MAP_PRIVATE, PROT_NONE, PROT_READ, PROT_WRITE}; + + use crate::ops::Range; + use crate::sync::atomic::{AtomicUsize, Ordering}; + use crate::sys::os; + + // This is initialized in init() and only read from after + static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0); + + pub type Guard = Range<usize>; + + #[cfg(target_os = "solaris")] + unsafe fn get_stack_start() -> Option<*mut libc::c_void> { + let mut current_stack: libc::stack_t = crate::mem::zeroed(); + assert_eq!(libc::stack_getbounds(&mut current_stack), 0); + Some(current_stack.ss_sp) + } + + #[cfg(target_os = "macos")] + unsafe fn get_stack_start() -> Option<*mut libc::c_void> { + let th = libc::pthread_self(); + let stackaddr = + libc::pthread_get_stackaddr_np(th) as usize - libc::pthread_get_stacksize_np(th); + Some(stackaddr as *mut libc::c_void) + } + + #[cfg(target_os = "openbsd")] + unsafe fn get_stack_start() -> Option<*mut libc::c_void> { + let mut current_stack: libc::stack_t = crate::mem::zeroed(); + assert_eq!(libc::pthread_stackseg_np(libc::pthread_self(), &mut current_stack), 0); + + let stackaddr = if libc::pthread_main_np() == 1 { + // main thread + current_stack.ss_sp as usize - current_stack.ss_size + PAGE_SIZE.load(Ordering::Relaxed) + } else { + // new thread + current_stack.ss_sp as usize - current_stack.ss_size + }; + Some(stackaddr as *mut libc::c_void) + } + + #[cfg(any( + target_os = "android", + target_os = "freebsd", + target_os = "linux", + target_os = "netbsd", + target_os = "l4re" + ))] + unsafe fn get_stack_start() -> Option<*mut libc::c_void> { + let mut ret = None; + let mut attr: libc::pthread_attr_t = crate::mem::zeroed(); + assert_eq!(libc::pthread_attr_init(&mut attr), 0); + #[cfg(target_os = "freebsd")] + let e = libc::pthread_attr_get_np(libc::pthread_self(), &mut attr); + #[cfg(not(target_os = "freebsd"))] + let e = libc::pthread_getattr_np(libc::pthread_self(), &mut attr); + if e == 0 { + let mut stackaddr = crate::ptr::null_mut(); + let mut stacksize = 0; + assert_eq!(libc::pthread_attr_getstack(&attr, &mut stackaddr, &mut stacksize), 0); + ret = Some(stackaddr); + } + assert_eq!(libc::pthread_attr_destroy(&mut attr), 0); + ret + } + + // Precondition: PAGE_SIZE is initialized. + unsafe fn get_stack_start_aligned() -> Option<*mut libc::c_void> { + let page_size = PAGE_SIZE.load(Ordering::Relaxed); + assert!(page_size != 0); + let stackaddr = get_stack_start()?; + + // Ensure stackaddr is page aligned! A parent process might + // have reset RLIMIT_STACK to be non-page aligned. The + // pthread_attr_getstack() reports the usable stack area + // stackaddr < stackaddr + stacksize, so if stackaddr is not + // page-aligned, calculate the fix such that stackaddr < + // new_page_aligned_stackaddr < stackaddr + stacksize + let remainder = (stackaddr as usize) % page_size; + Some(if remainder == 0 { + stackaddr + } else { + ((stackaddr as usize) + page_size - remainder) as *mut libc::c_void + }) + } + + pub unsafe fn init() -> Option<Guard> { + let page_size = os::page_size(); + PAGE_SIZE.store(page_size, Ordering::Relaxed); + + let stackaddr = get_stack_start_aligned()?; + + if cfg!(target_os = "linux") { + // Linux doesn't allocate the whole stack right away, and + // the kernel has its own stack-guard mechanism to fault + // when growing too close to an existing mapping. If we map + // our own guard, then the kernel starts enforcing a rather + // large gap above that, rendering much of the possible + // stack space useless. See #43052. + // + // Instead, we'll just note where we expect rlimit to start + // faulting, so our handler can report "stack overflow", and + // trust that the kernel's own stack guard will work. + let stackaddr = stackaddr as usize; + Some(stackaddr - page_size..stackaddr) + } else { + // Reallocate the last page of the stack. + // This ensures SIGBUS will be raised on + // stack overflow. + // Systems which enforce strict PAX MPROTECT do not allow + // to mprotect() a mapping with less restrictive permissions + // than the initial mmap() used, so we mmap() here with + // read/write permissions and only then mprotect() it to + // no permissions at all. See issue #50313. + let result = mmap( + stackaddr, + page_size, + PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANON | MAP_FIXED, + -1, + 0, + ); + if result != stackaddr || result == MAP_FAILED { + panic!("failed to allocate a guard page"); + } + + let result = mprotect(stackaddr, page_size, PROT_NONE); + if result != 0 { + panic!("failed to protect the guard page"); + } + + let guardaddr = stackaddr as usize; + let offset = if cfg!(target_os = "freebsd") { 2 } else { 1 }; + + Some(guardaddr..guardaddr + offset * page_size) + } + } + + #[cfg(any(target_os = "macos", target_os = "openbsd", target_os = "solaris"))] + pub unsafe fn current() -> Option<Guard> { + let stackaddr = get_stack_start()? as usize; + Some(stackaddr - PAGE_SIZE.load(Ordering::Relaxed)..stackaddr) + } + + #[cfg(any( + target_os = "android", + target_os = "freebsd", + target_os = "linux", + target_os = "netbsd", + target_os = "l4re" + ))] + pub unsafe fn current() -> Option<Guard> { + let mut ret = None; + let mut attr: libc::pthread_attr_t = crate::mem::zeroed(); + assert_eq!(libc::pthread_attr_init(&mut attr), 0); + #[cfg(target_os = "freebsd")] + let e = libc::pthread_attr_get_np(libc::pthread_self(), &mut attr); + #[cfg(not(target_os = "freebsd"))] + let e = libc::pthread_getattr_np(libc::pthread_self(), &mut attr); + if e == 0 { + let mut guardsize = 0; + assert_eq!(libc::pthread_attr_getguardsize(&attr, &mut guardsize), 0); + if guardsize == 0 { + panic!("there is no guard page"); + } + let mut stackaddr = crate::ptr::null_mut(); + let mut size = 0; + assert_eq!(libc::pthread_attr_getstack(&attr, &mut stackaddr, &mut size), 0); + + let stackaddr = stackaddr as usize; + ret = if cfg!(target_os = "freebsd") { + // FIXME does freebsd really fault *below* the guard addr? + let guardaddr = stackaddr - guardsize; + Some(guardaddr - PAGE_SIZE.load(Ordering::Relaxed)..guardaddr) + } else if cfg!(target_os = "netbsd") { + Some(stackaddr - guardsize..stackaddr) + } else if cfg!(all(target_os = "linux", target_env = "gnu")) { + // glibc used to include the guard area within the stack, as noted in the BUGS + // section of `man pthread_attr_getguardsize`. This has been corrected starting + // with glibc 2.27, and in some distro backports, so the guard is now placed at the + // end (below) the stack. There's no easy way for us to know which we have at + // runtime, so we'll just match any fault in the range right above or below the + // stack base to call that fault a stack overflow. + Some(stackaddr - guardsize..stackaddr + guardsize) + } else { + Some(stackaddr..stackaddr + guardsize) + }; + } + assert_eq!(libc::pthread_attr_destroy(&mut attr), 0); + ret + } +} + +// glibc >= 2.15 has a __pthread_get_minstack() function that returns +// PTHREAD_STACK_MIN plus bytes needed for thread-local storage. +// We need that information to avoid blowing up when a small stack +// is created in an application with big thread-local storage requirements. +// See #6233 for rationale and details. +#[cfg(target_os = "linux")] +#[allow(deprecated)] +fn min_stack_size(attr: *const libc::pthread_attr_t) -> usize { + weak!(fn __pthread_get_minstack(*const libc::pthread_attr_t) -> libc::size_t); + + match __pthread_get_minstack.get() { + None => libc::PTHREAD_STACK_MIN, + Some(f) => unsafe { f(attr) }, + } +} + +// No point in looking up __pthread_get_minstack() on non-glibc +// platforms. +#[cfg(all(not(target_os = "linux"), not(target_os = "netbsd")))] +fn min_stack_size(_: *const libc::pthread_attr_t) -> usize { + libc::PTHREAD_STACK_MIN +} + +#[cfg(target_os = "netbsd")] +fn min_stack_size(_: *const libc::pthread_attr_t) -> usize { + 2048 // just a guess +} diff --git a/library/std/src/sys/unix/thread_local_dtor.rs b/library/std/src/sys/unix/thread_local_dtor.rs new file mode 100644 index 00000000000..c3275eb6f0e --- /dev/null +++ b/library/std/src/sys/unix/thread_local_dtor.rs @@ -0,0 +1,94 @@ +#![cfg(target_thread_local)] +#![unstable(feature = "thread_local_internals", issue = "none")] + +//! Provides thread-local destructors without an associated "key", which +//! can be more efficient. + +// Since what appears to be glibc 2.18 this symbol has been shipped which +// GCC and clang both use to invoke destructors in thread_local globals, so +// let's do the same! +// +// Note, however, that we run on lots older linuxes, as well as cross +// compiling from a newer linux to an older linux, so we also have a +// fallback implementation to use as well. +#[cfg(any( + target_os = "linux", + target_os = "fuchsia", + target_os = "redox", + target_os = "emscripten" +))] +pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) { + use crate::mem; + use crate::sys_common::thread_local_dtor::register_dtor_fallback; + + extern "C" { + #[linkage = "extern_weak"] + static __dso_handle: *mut u8; + #[linkage = "extern_weak"] + static __cxa_thread_atexit_impl: *const libc::c_void; + } + if !__cxa_thread_atexit_impl.is_null() { + type F = unsafe extern "C" fn( + dtor: unsafe extern "C" fn(*mut u8), + arg: *mut u8, + dso_handle: *mut u8, + ) -> libc::c_int; + mem::transmute::<*const libc::c_void, F>(__cxa_thread_atexit_impl)( + dtor, + t, + &__dso_handle as *const _ as *mut _, + ); + return; + } + register_dtor_fallback(t, dtor); +} + +// This implementation is very similar to register_dtor_fallback in +// sys_common/thread_local.rs. The main difference is that we want to hook into +// macOS's analog of the above linux function, _tlv_atexit. OSX will run the +// registered dtors before any TLS slots get freed, and when the main thread +// exits. +// +// Unfortunately, calling _tlv_atexit while tls dtors are running is UB. The +// workaround below is to register, via _tlv_atexit, a custom DTOR list once per +// thread. thread_local dtors are pushed to the DTOR list without calling +// _tlv_atexit. +#[cfg(target_os = "macos")] +pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) { + use crate::cell::Cell; + use crate::ptr; + + #[thread_local] + static REGISTERED: Cell<bool> = Cell::new(false); + if !REGISTERED.get() { + _tlv_atexit(run_dtors, ptr::null_mut()); + REGISTERED.set(true); + } + + type List = Vec<(*mut u8, unsafe extern "C" fn(*mut u8))>; + + #[thread_local] + static DTORS: Cell<*mut List> = Cell::new(ptr::null_mut()); + if DTORS.get().is_null() { + let v: Box<List> = box Vec::new(); + DTORS.set(Box::into_raw(v)); + } + + extern "C" { + fn _tlv_atexit(dtor: unsafe extern "C" fn(*mut u8), arg: *mut u8); + } + + let list: &mut List = &mut *DTORS.get(); + list.push((t, dtor)); + + unsafe extern "C" fn run_dtors(_: *mut u8) { + let mut ptr = DTORS.replace(ptr::null_mut()); + while !ptr.is_null() { + let list = Box::from_raw(ptr); + for (ptr, dtor) in list.into_iter() { + dtor(ptr); + } + ptr = DTORS.replace(ptr::null_mut()); + } + } +} diff --git a/library/std/src/sys/unix/thread_local_key.rs b/library/std/src/sys/unix/thread_local_key.rs new file mode 100644 index 00000000000..2c5b94b1e61 --- /dev/null +++ b/library/std/src/sys/unix/thread_local_key.rs @@ -0,0 +1,34 @@ +#![allow(dead_code)] // not used on all platforms + +use crate::mem; + +pub type Key = libc::pthread_key_t; + +#[inline] +pub unsafe fn create(dtor: Option<unsafe extern "C" fn(*mut u8)>) -> Key { + let mut key = 0; + assert_eq!(libc::pthread_key_create(&mut key, mem::transmute(dtor)), 0); + key +} + +#[inline] +pub unsafe fn set(key: Key, value: *mut u8) { + let r = libc::pthread_setspecific(key, value as *mut _); + debug_assert_eq!(r, 0); +} + +#[inline] +pub unsafe fn get(key: Key) -> *mut u8 { + libc::pthread_getspecific(key) as *mut u8 +} + +#[inline] +pub unsafe fn destroy(key: Key) { + let r = libc::pthread_key_delete(key); + debug_assert_eq!(r, 0); +} + +#[inline] +pub fn requires_synchronized_create() -> bool { + false +} diff --git a/library/std/src/sys/unix/time.rs b/library/std/src/sys/unix/time.rs new file mode 100644 index 00000000000..6707f790cab --- /dev/null +++ b/library/std/src/sys/unix/time.rs @@ -0,0 +1,352 @@ +use crate::cmp::Ordering; +use crate::time::Duration; + +use core::hash::{Hash, Hasher}; + +pub use self::inner::{Instant, SystemTime, UNIX_EPOCH}; +use crate::convert::TryInto; + +const NSEC_PER_SEC: u64 = 1_000_000_000; + +#[derive(Copy, Clone)] +struct Timespec { + t: libc::timespec, +} + +impl Timespec { + const fn zero() -> Timespec { + Timespec { t: libc::timespec { tv_sec: 0, tv_nsec: 0 } } + } + + fn sub_timespec(&self, other: &Timespec) -> Result<Duration, Duration> { + if self >= other { + Ok(if self.t.tv_nsec >= other.t.tv_nsec { + Duration::new( + (self.t.tv_sec - other.t.tv_sec) as u64, + (self.t.tv_nsec - other.t.tv_nsec) as u32, + ) + } else { + Duration::new( + (self.t.tv_sec - 1 - other.t.tv_sec) as u64, + self.t.tv_nsec as u32 + (NSEC_PER_SEC as u32) - other.t.tv_nsec as u32, + ) + }) + } else { + match other.sub_timespec(self) { + Ok(d) => Err(d), + Err(d) => Ok(d), + } + } + } + + fn checked_add_duration(&self, other: &Duration) -> Option<Timespec> { + let mut secs = other + .as_secs() + .try_into() // <- target type would be `libc::time_t` + .ok() + .and_then(|secs| self.t.tv_sec.checked_add(secs))?; + + // Nano calculations can't overflow because nanos are <1B which fit + // in a u32. + let mut nsec = other.subsec_nanos() + self.t.tv_nsec as u32; + if nsec >= NSEC_PER_SEC as u32 { + nsec -= NSEC_PER_SEC as u32; + secs = secs.checked_add(1)?; + } + Some(Timespec { t: libc::timespec { tv_sec: secs, tv_nsec: nsec as _ } }) + } + + fn checked_sub_duration(&self, other: &Duration) -> Option<Timespec> { + let mut secs = other + .as_secs() + .try_into() // <- target type would be `libc::time_t` + .ok() + .and_then(|secs| self.t.tv_sec.checked_sub(secs))?; + + // Similar to above, nanos can't overflow. + let mut nsec = self.t.tv_nsec as i32 - other.subsec_nanos() as i32; + if nsec < 0 { + nsec += NSEC_PER_SEC as i32; + secs = secs.checked_sub(1)?; + } + Some(Timespec { t: libc::timespec { tv_sec: secs, tv_nsec: nsec as _ } }) + } +} + +impl PartialEq for Timespec { + fn eq(&self, other: &Timespec) -> bool { + self.t.tv_sec == other.t.tv_sec && self.t.tv_nsec == other.t.tv_nsec + } +} + +impl Eq for Timespec {} + +impl PartialOrd for Timespec { + fn partial_cmp(&self, other: &Timespec) -> Option<Ordering> { + Some(self.cmp(other)) + } +} + +impl Ord for Timespec { + fn cmp(&self, other: &Timespec) -> Ordering { + let me = (self.t.tv_sec, self.t.tv_nsec); + let other = (other.t.tv_sec, other.t.tv_nsec); + me.cmp(&other) + } +} + +impl Hash for Timespec { + fn hash<H: Hasher>(&self, state: &mut H) { + self.t.tv_sec.hash(state); + self.t.tv_nsec.hash(state); + } +} + +#[cfg(any(target_os = "macos", target_os = "ios"))] +mod inner { + use crate::fmt; + use crate::mem; + use crate::sync::atomic::{AtomicUsize, Ordering::SeqCst}; + use crate::sys::cvt; + use crate::sys_common::mul_div_u64; + use crate::time::Duration; + + use super::Timespec; + use super::NSEC_PER_SEC; + + #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] + pub struct Instant { + t: u64, + } + + #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] + pub struct SystemTime { + t: Timespec, + } + + pub const UNIX_EPOCH: SystemTime = SystemTime { t: Timespec::zero() }; + + #[repr(C)] + #[derive(Copy, Clone)] + struct mach_timebase_info { + numer: u32, + denom: u32, + } + type mach_timebase_info_t = *mut mach_timebase_info; + type kern_return_t = libc::c_int; + + impl Instant { + pub fn now() -> Instant { + extern "C" { + fn mach_absolute_time() -> u64; + } + Instant { t: unsafe { mach_absolute_time() } } + } + + pub const fn zero() -> Instant { + Instant { t: 0 } + } + + pub fn actually_monotonic() -> bool { + true + } + + pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> { + let diff = self.t.checked_sub(other.t)?; + let info = info(); + let nanos = mul_div_u64(diff, info.numer as u64, info.denom as u64); + Some(Duration::new(nanos / NSEC_PER_SEC, (nanos % NSEC_PER_SEC) as u32)) + } + + pub fn checked_add_duration(&self, other: &Duration) -> Option<Instant> { + Some(Instant { t: self.t.checked_add(checked_dur2intervals(other)?)? }) + } + + pub fn checked_sub_duration(&self, other: &Duration) -> Option<Instant> { + Some(Instant { t: self.t.checked_sub(checked_dur2intervals(other)?)? }) + } + } + + impl SystemTime { + pub fn now() -> SystemTime { + use crate::ptr; + + let mut s = libc::timeval { tv_sec: 0, tv_usec: 0 }; + cvt(unsafe { libc::gettimeofday(&mut s, ptr::null_mut()) }).unwrap(); + return SystemTime::from(s); + } + + pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> { + self.t.sub_timespec(&other.t) + } + + pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> { + Some(SystemTime { t: self.t.checked_add_duration(other)? }) + } + + pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> { + Some(SystemTime { t: self.t.checked_sub_duration(other)? }) + } + } + + impl From<libc::timeval> for SystemTime { + fn from(t: libc::timeval) -> SystemTime { + SystemTime::from(libc::timespec { + tv_sec: t.tv_sec, + tv_nsec: (t.tv_usec * 1000) as libc::c_long, + }) + } + } + + impl From<libc::timespec> for SystemTime { + fn from(t: libc::timespec) -> SystemTime { + SystemTime { t: Timespec { t } } + } + } + + impl fmt::Debug for SystemTime { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SystemTime") + .field("tv_sec", &self.t.t.tv_sec) + .field("tv_nsec", &self.t.t.tv_nsec) + .finish() + } + } + + fn checked_dur2intervals(dur: &Duration) -> Option<u64> { + let nanos = + dur.as_secs().checked_mul(NSEC_PER_SEC)?.checked_add(dur.subsec_nanos() as u64)?; + let info = info(); + Some(mul_div_u64(nanos, info.denom as u64, info.numer as u64)) + } + + fn info() -> mach_timebase_info { + static mut INFO: mach_timebase_info = mach_timebase_info { numer: 0, denom: 0 }; + static STATE: AtomicUsize = AtomicUsize::new(0); + + unsafe { + // If a previous thread has filled in this global state, use that. + if STATE.load(SeqCst) == 2 { + return INFO; + } + + // ... otherwise learn for ourselves ... + let mut info = mem::zeroed(); + extern "C" { + fn mach_timebase_info(info: mach_timebase_info_t) -> kern_return_t; + } + + mach_timebase_info(&mut info); + + // ... and attempt to be the one thread that stores it globally for + // all other threads + if STATE.compare_exchange(0, 1, SeqCst, SeqCst).is_ok() { + INFO = info; + STATE.store(2, SeqCst); + } + return info; + } + } +} + +#[cfg(not(any(target_os = "macos", target_os = "ios")))] +mod inner { + use crate::fmt; + use crate::sys::cvt; + use crate::time::Duration; + + use super::Timespec; + + #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] + pub struct Instant { + t: Timespec, + } + + #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] + pub struct SystemTime { + t: Timespec, + } + + pub const UNIX_EPOCH: SystemTime = SystemTime { t: Timespec::zero() }; + + impl Instant { + pub fn now() -> Instant { + Instant { t: now(libc::CLOCK_MONOTONIC) } + } + + pub const fn zero() -> Instant { + Instant { t: Timespec::zero() } + } + + pub fn actually_monotonic() -> bool { + (cfg!(target_os = "linux") && cfg!(target_arch = "x86_64")) + || (cfg!(target_os = "linux") && cfg!(target_arch = "x86")) + || cfg!(target_os = "fuchsia") + } + + pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> { + self.t.sub_timespec(&other.t).ok() + } + + pub fn checked_add_duration(&self, other: &Duration) -> Option<Instant> { + Some(Instant { t: self.t.checked_add_duration(other)? }) + } + + pub fn checked_sub_duration(&self, other: &Duration) -> Option<Instant> { + Some(Instant { t: self.t.checked_sub_duration(other)? }) + } + } + + impl fmt::Debug for Instant { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Instant") + .field("tv_sec", &self.t.t.tv_sec) + .field("tv_nsec", &self.t.t.tv_nsec) + .finish() + } + } + + impl SystemTime { + pub fn now() -> SystemTime { + SystemTime { t: now(libc::CLOCK_REALTIME) } + } + + pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> { + self.t.sub_timespec(&other.t) + } + + pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> { + Some(SystemTime { t: self.t.checked_add_duration(other)? }) + } + + pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> { + Some(SystemTime { t: self.t.checked_sub_duration(other)? }) + } + } + + impl From<libc::timespec> for SystemTime { + fn from(t: libc::timespec) -> SystemTime { + SystemTime { t: Timespec { t } } + } + } + + impl fmt::Debug for SystemTime { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SystemTime") + .field("tv_sec", &self.t.t.tv_sec) + .field("tv_nsec", &self.t.t.tv_nsec) + .finish() + } + } + + #[cfg(not(target_os = "dragonfly"))] + pub type clock_t = libc::c_int; + #[cfg(target_os = "dragonfly")] + pub type clock_t = libc::c_ulong; + + fn now(clock: clock_t) -> Timespec { + let mut t = Timespec { t: libc::timespec { tv_sec: 0, tv_nsec: 0 } }; + cvt(unsafe { libc::clock_gettime(clock, &mut t.t) }).unwrap(); + t + } +} diff --git a/library/std/src/sys/unix/weak.rs b/library/std/src/sys/unix/weak.rs new file mode 100644 index 00000000000..f4b33a00f7c --- /dev/null +++ b/library/std/src/sys/unix/weak.rs @@ -0,0 +1,101 @@ +//! Support for "weak linkage" to symbols on Unix +//! +//! Some I/O operations we do in libstd require newer versions of OSes but we +//! need to maintain binary compatibility with older releases for now. In order +//! to use the new functionality when available we use this module for +//! detection. +//! +//! One option to use here is weak linkage, but that is unfortunately only +//! really workable on Linux. Hence, use dlsym to get the symbol value at +//! runtime. This is also done for compatibility with older versions of glibc, +//! and to avoid creating dependencies on GLIBC_PRIVATE symbols. It assumes that +//! we've been dynamically linked to the library the symbol comes from, but that +//! is currently always the case for things like libpthread/libc. +//! +//! A long time ago this used weak linkage for the __pthread_get_minstack +//! symbol, but that caused Debian to detect an unnecessarily strict versioned +//! dependency on libc6 (#23628). + +// There are a variety of `#[cfg]`s controlling which targets are involved in +// each instance of `weak!` and `syscall!`. Rather than trying to unify all of +// that, we'll just allow that some unix targets don't use this module at all. +#![allow(dead_code, unused_macros)] + +use crate::ffi::CStr; +use crate::marker; +use crate::mem; +use crate::sync::atomic::{AtomicUsize, Ordering}; + +macro_rules! weak { + (fn $name:ident($($t:ty),*) -> $ret:ty) => ( + static $name: crate::sys::weak::Weak<unsafe extern fn($($t),*) -> $ret> = + crate::sys::weak::Weak::new(concat!(stringify!($name), '\0')); + ) +} + +pub struct Weak<F> { + name: &'static str, + addr: AtomicUsize, + _marker: marker::PhantomData<F>, +} + +impl<F> Weak<F> { + pub const fn new(name: &'static str) -> Weak<F> { + Weak { name, addr: AtomicUsize::new(1), _marker: marker::PhantomData } + } + + pub fn get(&self) -> Option<F> { + assert_eq!(mem::size_of::<F>(), mem::size_of::<usize>()); + unsafe { + if self.addr.load(Ordering::SeqCst) == 1 { + self.addr.store(fetch(self.name), Ordering::SeqCst); + } + match self.addr.load(Ordering::SeqCst) { + 0 => None, + addr => Some(mem::transmute_copy::<usize, F>(&addr)), + } + } + } +} + +unsafe fn fetch(name: &str) -> usize { + let name = match CStr::from_bytes_with_nul(name.as_bytes()) { + Ok(cstr) => cstr, + Err(..) => return 0, + }; + libc::dlsym(libc::RTLD_DEFAULT, name.as_ptr()) as usize +} + +#[cfg(not(target_os = "linux"))] +macro_rules! syscall { + (fn $name:ident($($arg_name:ident: $t:ty),*) -> $ret:ty) => ( + unsafe fn $name($($arg_name: $t),*) -> $ret { + use super::os; + + weak! { fn $name($($t),*) -> $ret } + + if let Some(fun) = $name.get() { + fun($($arg_name),*) + } else { + os::set_errno(libc::ENOSYS); + -1 + } + } + ) +} + +#[cfg(target_os = "linux")] +macro_rules! syscall { + (fn $name:ident($($arg_name:ident: $t:ty),*) -> $ret:ty) => ( + unsafe fn $name($($arg_name:$t),*) -> $ret { + // This looks like a hack, but concat_idents only accepts idents + // (not paths). + use libc::*; + + syscall( + concat_idents!(SYS_, $name), + $($arg_name as c_long),* + ) as $ret + } + ) +} diff --git a/library/std/src/sys/unsupported/alloc.rs b/library/std/src/sys/unsupported/alloc.rs new file mode 100644 index 00000000000..8d5d0a2f5cc --- /dev/null +++ b/library/std/src/sys/unsupported/alloc.rs @@ -0,0 +1,22 @@ +use crate::alloc::{GlobalAlloc, Layout, System}; + +#[stable(feature = "alloc_system_type", since = "1.28.0")] +unsafe impl GlobalAlloc for System { + #[inline] + unsafe fn alloc(&self, _layout: Layout) -> *mut u8 { + 0 as *mut u8 + } + + #[inline] + unsafe fn alloc_zeroed(&self, _layout: Layout) -> *mut u8 { + 0 as *mut u8 + } + + #[inline] + unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {} + + #[inline] + unsafe fn realloc(&self, _ptr: *mut u8, _layout: Layout, _new_size: usize) -> *mut u8 { + 0 as *mut u8 + } +} diff --git a/library/std/src/sys/unsupported/args.rs b/library/std/src/sys/unsupported/args.rs new file mode 100644 index 00000000000..71d0c5fa13e --- /dev/null +++ b/library/std/src/sys/unsupported/args.rs @@ -0,0 +1,38 @@ +use crate::ffi::OsString; + +pub unsafe fn init(_argc: isize, _argv: *const *const u8) {} +pub unsafe fn cleanup() {} + +pub struct Args {} + +pub fn args() -> Args { + Args {} +} + +impl Args { + pub fn inner_debug(&self) -> &[OsString] { + &[] + } +} + +impl Iterator for Args { + type Item = OsString; + fn next(&mut self) -> Option<OsString> { + None + } + fn size_hint(&self) -> (usize, Option<usize>) { + (0, Some(0)) + } +} + +impl ExactSizeIterator for Args { + fn len(&self) -> usize { + 0 + } +} + +impl DoubleEndedIterator for Args { + fn next_back(&mut self) -> Option<OsString> { + None + } +} diff --git a/library/std/src/sys/unsupported/cmath.rs b/library/std/src/sys/unsupported/cmath.rs new file mode 100644 index 00000000000..304cf906b2a --- /dev/null +++ b/library/std/src/sys/unsupported/cmath.rs @@ -0,0 +1,29 @@ +// These symbols are all defined in `compiler-builtins` +extern "C" { + pub fn acos(n: f64) -> f64; + pub fn acosf(n: f32) -> f32; + pub fn asin(n: f64) -> f64; + pub fn asinf(n: f32) -> f32; + pub fn atan(n: f64) -> f64; + pub fn atan2(a: f64, b: f64) -> f64; + pub fn atan2f(a: f32, b: f32) -> f32; + pub fn atanf(n: f32) -> f32; + pub fn cbrt(n: f64) -> f64; + pub fn cbrtf(n: f32) -> f32; + pub fn cosh(n: f64) -> f64; + pub fn coshf(n: f32) -> f32; + pub fn expm1(n: f64) -> f64; + pub fn expm1f(n: f32) -> f32; + pub fn fdim(a: f64, b: f64) -> f64; + pub fn fdimf(a: f32, b: f32) -> f32; + pub fn hypot(x: f64, y: f64) -> f64; + pub fn hypotf(x: f32, y: f32) -> f32; + pub fn log1p(n: f64) -> f64; + pub fn log1pf(n: f32) -> f32; + pub fn sinh(n: f64) -> f64; + pub fn sinhf(n: f32) -> f32; + pub fn tan(n: f64) -> f64; + pub fn tanf(n: f32) -> f32; + pub fn tanh(n: f64) -> f64; + pub fn tanhf(n: f32) -> f32; +} diff --git a/library/std/src/sys/unsupported/common.rs b/library/std/src/sys/unsupported/common.rs new file mode 100644 index 00000000000..80311d26819 --- /dev/null +++ b/library/std/src/sys/unsupported/common.rs @@ -0,0 +1,48 @@ +use crate::io as std_io; + +pub mod memchr { + pub use core::slice::memchr::{memchr, memrchr}; +} + +pub use crate::sys_common::os_str_bytes as os_str; + +// This is not necessarily correct. May want to consider making it part of the +// spec definition? +use crate::os::raw::c_char; + +#[cfg(not(test))] +pub fn init() {} + +pub fn unsupported<T>() -> std_io::Result<T> { + Err(unsupported_err()) +} + +pub fn unsupported_err() -> std_io::Error { + std_io::Error::new(std_io::ErrorKind::Other, "operation not supported on this platform") +} + +pub fn decode_error_kind(_code: i32) -> crate::io::ErrorKind { + crate::io::ErrorKind::Other +} + +pub fn abort_internal() -> ! { + core::intrinsics::abort(); +} + +pub fn hashmap_random_keys() -> (u64, u64) { + (1, 2) +} + +// This enum is used as the storage for a bunch of types which can't actually +// exist. +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +pub enum Void {} + +pub unsafe fn strlen(mut s: *const c_char) -> usize { + let mut n = 0; + while *s != 0 { + n += 1; + s = s.offset(1); + } + return n; +} diff --git a/library/std/src/sys/unsupported/condvar.rs b/library/std/src/sys/unsupported/condvar.rs new file mode 100644 index 00000000000..a578eee8ccc --- /dev/null +++ b/library/std/src/sys/unsupported/condvar.rs @@ -0,0 +1,30 @@ +use crate::sys::mutex::Mutex; +use crate::time::Duration; + +pub struct Condvar {} + +impl Condvar { + pub const fn new() -> Condvar { + Condvar {} + } + + #[inline] + pub unsafe fn init(&mut self) {} + + #[inline] + pub unsafe fn notify_one(&self) {} + + #[inline] + pub unsafe fn notify_all(&self) {} + + pub unsafe fn wait(&self, _mutex: &Mutex) { + panic!("condvar wait not supported") + } + + pub unsafe fn wait_timeout(&self, _mutex: &Mutex, _dur: Duration) -> bool { + panic!("condvar wait not supported"); + } + + #[inline] + pub unsafe fn destroy(&self) {} +} diff --git a/library/std/src/sys/unsupported/env.rs b/library/std/src/sys/unsupported/env.rs new file mode 100644 index 00000000000..d2efec506c5 --- /dev/null +++ b/library/std/src/sys/unsupported/env.rs @@ -0,0 +1,9 @@ +pub mod os { + pub const FAMILY: &str = ""; + pub const OS: &str = ""; + pub const DLL_PREFIX: &str = ""; + pub const DLL_SUFFIX: &str = ""; + pub const DLL_EXTENSION: &str = ""; + pub const EXE_SUFFIX: &str = ""; + pub const EXE_EXTENSION: &str = ""; +} diff --git a/library/std/src/sys/unsupported/fs.rs b/library/std/src/sys/unsupported/fs.rs new file mode 100644 index 00000000000..ecb5b51cccd --- /dev/null +++ b/library/std/src/sys/unsupported/fs.rs @@ -0,0 +1,308 @@ +use crate::ffi::OsString; +use crate::fmt; +use crate::hash::{Hash, Hasher}; +use crate::io::{self, IoSlice, IoSliceMut, SeekFrom}; +use crate::path::{Path, PathBuf}; +use crate::sys::time::SystemTime; +use crate::sys::{unsupported, Void}; + +pub struct File(Void); + +pub struct FileAttr(Void); + +pub struct ReadDir(Void); + +pub struct DirEntry(Void); + +#[derive(Clone, Debug)] +pub struct OpenOptions {} + +pub struct FilePermissions(Void); + +pub struct FileType(Void); + +#[derive(Debug)] +pub struct DirBuilder {} + +impl FileAttr { + pub fn size(&self) -> u64 { + match self.0 {} + } + + pub fn perm(&self) -> FilePermissions { + match self.0 {} + } + + pub fn file_type(&self) -> FileType { + match self.0 {} + } + + pub fn modified(&self) -> io::Result<SystemTime> { + match self.0 {} + } + + pub fn accessed(&self) -> io::Result<SystemTime> { + match self.0 {} + } + + pub fn created(&self) -> io::Result<SystemTime> { + match self.0 {} + } +} + +impl Clone for FileAttr { + fn clone(&self) -> FileAttr { + match self.0 {} + } +} + +impl FilePermissions { + pub fn readonly(&self) -> bool { + match self.0 {} + } + + pub fn set_readonly(&mut self, _readonly: bool) { + match self.0 {} + } +} + +impl Clone for FilePermissions { + fn clone(&self) -> FilePermissions { + match self.0 {} + } +} + +impl PartialEq for FilePermissions { + fn eq(&self, _other: &FilePermissions) -> bool { + match self.0 {} + } +} + +impl Eq for FilePermissions {} + +impl fmt::Debug for FilePermissions { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +impl FileType { + pub fn is_dir(&self) -> bool { + match self.0 {} + } + + pub fn is_file(&self) -> bool { + match self.0 {} + } + + pub fn is_symlink(&self) -> bool { + match self.0 {} + } +} + +impl Clone for FileType { + fn clone(&self) -> FileType { + match self.0 {} + } +} + +impl Copy for FileType {} + +impl PartialEq for FileType { + fn eq(&self, _other: &FileType) -> bool { + match self.0 {} + } +} + +impl Eq for FileType {} + +impl Hash for FileType { + fn hash<H: Hasher>(&self, _h: &mut H) { + match self.0 {} + } +} + +impl fmt::Debug for FileType { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +impl fmt::Debug for ReadDir { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +impl Iterator for ReadDir { + type Item = io::Result<DirEntry>; + + fn next(&mut self) -> Option<io::Result<DirEntry>> { + match self.0 {} + } +} + +impl DirEntry { + pub fn path(&self) -> PathBuf { + match self.0 {} + } + + pub fn file_name(&self) -> OsString { + match self.0 {} + } + + pub fn metadata(&self) -> io::Result<FileAttr> { + match self.0 {} + } + + pub fn file_type(&self) -> io::Result<FileType> { + match self.0 {} + } +} + +impl OpenOptions { + pub fn new() -> OpenOptions { + OpenOptions {} + } + + pub fn read(&mut self, _read: bool) {} + pub fn write(&mut self, _write: bool) {} + pub fn append(&mut self, _append: bool) {} + pub fn truncate(&mut self, _truncate: bool) {} + pub fn create(&mut self, _create: bool) {} + pub fn create_new(&mut self, _create_new: bool) {} +} + +impl File { + pub fn open(_path: &Path, _opts: &OpenOptions) -> io::Result<File> { + unsupported() + } + + pub fn file_attr(&self) -> io::Result<FileAttr> { + match self.0 {} + } + + pub fn fsync(&self) -> io::Result<()> { + match self.0 {} + } + + pub fn datasync(&self) -> io::Result<()> { + match self.0 {} + } + + pub fn truncate(&self, _size: u64) -> io::Result<()> { + match self.0 {} + } + + pub fn read(&self, _buf: &mut [u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn read_vectored(&self, _bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + match self.0 {} + } + + pub fn is_read_vectored(&self) -> bool { + match self.0 {} + } + + pub fn write(&self, _buf: &[u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn write_vectored(&self, _bufs: &[IoSlice<'_>]) -> io::Result<usize> { + match self.0 {} + } + + pub fn is_write_vectored(&self) -> bool { + match self.0 {} + } + + pub fn flush(&self) -> io::Result<()> { + match self.0 {} + } + + pub fn seek(&self, _pos: SeekFrom) -> io::Result<u64> { + match self.0 {} + } + + pub fn duplicate(&self) -> io::Result<File> { + match self.0 {} + } + + pub fn set_permissions(&self, _perm: FilePermissions) -> io::Result<()> { + match self.0 {} + } + + pub fn diverge(&self) -> ! { + match self.0 {} + } +} + +impl DirBuilder { + pub fn new() -> DirBuilder { + DirBuilder {} + } + + pub fn mkdir(&self, _p: &Path) -> io::Result<()> { + unsupported() + } +} + +impl fmt::Debug for File { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +pub fn readdir(_p: &Path) -> io::Result<ReadDir> { + unsupported() +} + +pub fn unlink(_p: &Path) -> io::Result<()> { + unsupported() +} + +pub fn rename(_old: &Path, _new: &Path) -> io::Result<()> { + unsupported() +} + +pub fn set_perm(_p: &Path, perm: FilePermissions) -> io::Result<()> { + match perm.0 {} +} + +pub fn rmdir(_p: &Path) -> io::Result<()> { + unsupported() +} + +pub fn remove_dir_all(_path: &Path) -> io::Result<()> { + unsupported() +} + +pub fn readlink(_p: &Path) -> io::Result<PathBuf> { + unsupported() +} + +pub fn symlink(_src: &Path, _dst: &Path) -> io::Result<()> { + unsupported() +} + +pub fn link(_src: &Path, _dst: &Path) -> io::Result<()> { + unsupported() +} + +pub fn stat(_p: &Path) -> io::Result<FileAttr> { + unsupported() +} + +pub fn lstat(_p: &Path) -> io::Result<FileAttr> { + unsupported() +} + +pub fn canonicalize(_p: &Path) -> io::Result<PathBuf> { + unsupported() +} + +pub fn copy(_from: &Path, _to: &Path) -> io::Result<u64> { + unsupported() +} diff --git a/library/std/src/sys/unsupported/io.rs b/library/std/src/sys/unsupported/io.rs new file mode 100644 index 00000000000..d5f475b4310 --- /dev/null +++ b/library/std/src/sys/unsupported/io.rs @@ -0,0 +1,47 @@ +use crate::mem; + +#[derive(Copy, Clone)] +pub struct IoSlice<'a>(&'a [u8]); + +impl<'a> IoSlice<'a> { + #[inline] + pub fn new(buf: &'a [u8]) -> IoSlice<'a> { + IoSlice(buf) + } + + #[inline] + pub fn advance(&mut self, n: usize) { + self.0 = &self.0[n..] + } + + #[inline] + pub fn as_slice(&self) -> &[u8] { + self.0 + } +} + +pub struct IoSliceMut<'a>(&'a mut [u8]); + +impl<'a> IoSliceMut<'a> { + #[inline] + pub fn new(buf: &'a mut [u8]) -> IoSliceMut<'a> { + IoSliceMut(buf) + } + + #[inline] + pub fn advance(&mut self, n: usize) { + let slice = mem::replace(&mut self.0, &mut []); + let (_, remaining) = slice.split_at_mut(n); + self.0 = remaining; + } + + #[inline] + pub fn as_slice(&self) -> &[u8] { + self.0 + } + + #[inline] + pub fn as_mut_slice(&mut self) -> &mut [u8] { + self.0 + } +} diff --git a/library/std/src/sys/unsupported/mod.rs b/library/std/src/sys/unsupported/mod.rs new file mode 100644 index 00000000000..87f655eecd5 --- /dev/null +++ b/library/std/src/sys/unsupported/mod.rs @@ -0,0 +1,24 @@ +pub mod alloc; +pub mod args; +pub mod cmath; +pub mod condvar; +pub mod env; +pub mod fs; +pub mod io; +pub mod mutex; +pub mod net; +pub mod os; +pub mod path; +pub mod pipe; +pub mod process; +pub mod rwlock; +pub mod stack_overflow; +pub mod stdio; +pub mod thread; +#[cfg(target_thread_local)] +pub mod thread_local_dtor; +pub mod thread_local_key; +pub mod time; + +mod common; +pub use common::*; diff --git a/library/std/src/sys/unsupported/mutex.rs b/library/std/src/sys/unsupported/mutex.rs new file mode 100644 index 00000000000..9ef8af52eb5 --- /dev/null +++ b/library/std/src/sys/unsupported/mutex.rs @@ -0,0 +1,67 @@ +use crate::cell::UnsafeCell; + +pub struct Mutex { + locked: UnsafeCell<bool>, +} + +unsafe impl Send for Mutex {} +unsafe impl Sync for Mutex {} // no threads on this platform + +impl Mutex { + #[rustc_const_stable(feature = "const_sys_mutex_new", since = "1.0.0")] + pub const fn new() -> Mutex { + Mutex { locked: UnsafeCell::new(false) } + } + + #[inline] + pub unsafe fn init(&mut self) {} + + #[inline] + pub unsafe fn lock(&self) { + let locked = self.locked.get(); + assert!(!*locked, "cannot recursively acquire mutex"); + *locked = true; + } + + #[inline] + pub unsafe fn unlock(&self) { + *self.locked.get() = false; + } + + #[inline] + pub unsafe fn try_lock(&self) -> bool { + let locked = self.locked.get(); + if *locked { + false + } else { + *locked = true; + true + } + } + + #[inline] + pub unsafe fn destroy(&self) {} +} + +// All empty stubs because this platform does not yet support threads, so lock +// acquisition always succeeds. +pub struct ReentrantMutex {} + +impl ReentrantMutex { + pub const unsafe fn uninitialized() -> ReentrantMutex { + ReentrantMutex {} + } + + pub unsafe fn init(&self) {} + + pub unsafe fn lock(&self) {} + + #[inline] + pub unsafe fn try_lock(&self) -> bool { + true + } + + pub unsafe fn unlock(&self) {} + + pub unsafe fn destroy(&self) {} +} diff --git a/library/std/src/sys/unsupported/net.rs b/library/std/src/sys/unsupported/net.rs new file mode 100644 index 00000000000..5c9f1098f9b --- /dev/null +++ b/library/std/src/sys/unsupported/net.rs @@ -0,0 +1,361 @@ +use crate::convert::TryFrom; +use crate::fmt; +use crate::io::{self, IoSlice, IoSliceMut}; +use crate::net::{Ipv4Addr, Ipv6Addr, Shutdown, SocketAddr}; +use crate::sys::{unsupported, Void}; +use crate::time::Duration; + +pub struct TcpStream(Void); + +impl TcpStream { + pub fn connect(_: io::Result<&SocketAddr>) -> io::Result<TcpStream> { + unsupported() + } + + pub fn connect_timeout(_: &SocketAddr, _: Duration) -> io::Result<TcpStream> { + unsupported() + } + + pub fn set_read_timeout(&self, _: Option<Duration>) -> io::Result<()> { + match self.0 {} + } + + pub fn set_write_timeout(&self, _: Option<Duration>) -> io::Result<()> { + match self.0 {} + } + + pub fn read_timeout(&self) -> io::Result<Option<Duration>> { + match self.0 {} + } + + pub fn write_timeout(&self) -> io::Result<Option<Duration>> { + match self.0 {} + } + + pub fn peek(&self, _: &mut [u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn read(&self, _: &mut [u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn read_vectored(&self, _: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + match self.0 {} + } + + pub fn is_read_vectored(&self) -> bool { + match self.0 {} + } + + pub fn write(&self, _: &[u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn write_vectored(&self, _: &[IoSlice<'_>]) -> io::Result<usize> { + match self.0 {} + } + + pub fn is_write_vectored(&self) -> bool { + match self.0 {} + } + + pub fn peer_addr(&self) -> io::Result<SocketAddr> { + match self.0 {} + } + + pub fn socket_addr(&self) -> io::Result<SocketAddr> { + match self.0 {} + } + + pub fn shutdown(&self, _: Shutdown) -> io::Result<()> { + match self.0 {} + } + + pub fn duplicate(&self) -> io::Result<TcpStream> { + match self.0 {} + } + + pub fn set_nodelay(&self, _: bool) -> io::Result<()> { + match self.0 {} + } + + pub fn nodelay(&self) -> io::Result<bool> { + match self.0 {} + } + + pub fn set_ttl(&self, _: u32) -> io::Result<()> { + match self.0 {} + } + + pub fn ttl(&self) -> io::Result<u32> { + match self.0 {} + } + + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + match self.0 {} + } + + pub fn set_nonblocking(&self, _: bool) -> io::Result<()> { + match self.0 {} + } +} + +impl fmt::Debug for TcpStream { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +pub struct TcpListener(Void); + +impl TcpListener { + pub fn bind(_: io::Result<&SocketAddr>) -> io::Result<TcpListener> { + unsupported() + } + + pub fn socket_addr(&self) -> io::Result<SocketAddr> { + match self.0 {} + } + + pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> { + match self.0 {} + } + + pub fn duplicate(&self) -> io::Result<TcpListener> { + match self.0 {} + } + + pub fn set_ttl(&self, _: u32) -> io::Result<()> { + match self.0 {} + } + + pub fn ttl(&self) -> io::Result<u32> { + match self.0 {} + } + + pub fn set_only_v6(&self, _: bool) -> io::Result<()> { + match self.0 {} + } + + pub fn only_v6(&self) -> io::Result<bool> { + match self.0 {} + } + + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + match self.0 {} + } + + pub fn set_nonblocking(&self, _: bool) -> io::Result<()> { + match self.0 {} + } +} + +impl fmt::Debug for TcpListener { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +pub struct UdpSocket(Void); + +impl UdpSocket { + pub fn bind(_: io::Result<&SocketAddr>) -> io::Result<UdpSocket> { + unsupported() + } + + pub fn peer_addr(&self) -> io::Result<SocketAddr> { + match self.0 {} + } + + pub fn socket_addr(&self) -> io::Result<SocketAddr> { + match self.0 {} + } + + pub fn recv_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + match self.0 {} + } + + pub fn peek_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + match self.0 {} + } + + pub fn send_to(&self, _: &[u8], _: &SocketAddr) -> io::Result<usize> { + match self.0 {} + } + + pub fn duplicate(&self) -> io::Result<UdpSocket> { + match self.0 {} + } + + pub fn set_read_timeout(&self, _: Option<Duration>) -> io::Result<()> { + match self.0 {} + } + + pub fn set_write_timeout(&self, _: Option<Duration>) -> io::Result<()> { + match self.0 {} + } + + pub fn read_timeout(&self) -> io::Result<Option<Duration>> { + match self.0 {} + } + + pub fn write_timeout(&self) -> io::Result<Option<Duration>> { + match self.0 {} + } + + pub fn set_broadcast(&self, _: bool) -> io::Result<()> { + match self.0 {} + } + + pub fn broadcast(&self) -> io::Result<bool> { + match self.0 {} + } + + pub fn set_multicast_loop_v4(&self, _: bool) -> io::Result<()> { + match self.0 {} + } + + pub fn multicast_loop_v4(&self) -> io::Result<bool> { + match self.0 {} + } + + pub fn set_multicast_ttl_v4(&self, _: u32) -> io::Result<()> { + match self.0 {} + } + + pub fn multicast_ttl_v4(&self) -> io::Result<u32> { + match self.0 {} + } + + pub fn set_multicast_loop_v6(&self, _: bool) -> io::Result<()> { + match self.0 {} + } + + pub fn multicast_loop_v6(&self) -> io::Result<bool> { + match self.0 {} + } + + pub fn join_multicast_v4(&self, _: &Ipv4Addr, _: &Ipv4Addr) -> io::Result<()> { + match self.0 {} + } + + pub fn join_multicast_v6(&self, _: &Ipv6Addr, _: u32) -> io::Result<()> { + match self.0 {} + } + + pub fn leave_multicast_v4(&self, _: &Ipv4Addr, _: &Ipv4Addr) -> io::Result<()> { + match self.0 {} + } + + pub fn leave_multicast_v6(&self, _: &Ipv6Addr, _: u32) -> io::Result<()> { + match self.0 {} + } + + pub fn set_ttl(&self, _: u32) -> io::Result<()> { + match self.0 {} + } + + pub fn ttl(&self) -> io::Result<u32> { + match self.0 {} + } + + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + match self.0 {} + } + + pub fn set_nonblocking(&self, _: bool) -> io::Result<()> { + match self.0 {} + } + + pub fn recv(&self, _: &mut [u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn peek(&self, _: &mut [u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn send(&self, _: &[u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn connect(&self, _: io::Result<&SocketAddr>) -> io::Result<()> { + match self.0 {} + } +} + +impl fmt::Debug for UdpSocket { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +pub struct LookupHost(Void); + +impl LookupHost { + pub fn port(&self) -> u16 { + match self.0 {} + } +} + +impl Iterator for LookupHost { + type Item = SocketAddr; + fn next(&mut self) -> Option<SocketAddr> { + match self.0 {} + } +} + +impl TryFrom<&str> for LookupHost { + type Error = io::Error; + + fn try_from(_v: &str) -> io::Result<LookupHost> { + unsupported() + } +} + +impl<'a> TryFrom<(&'a str, u16)> for LookupHost { + type Error = io::Error; + + fn try_from(_v: (&'a str, u16)) -> io::Result<LookupHost> { + unsupported() + } +} + +#[allow(nonstandard_style)] +pub mod netc { + pub const AF_INET: u8 = 0; + pub const AF_INET6: u8 = 1; + pub type sa_family_t = u8; + + #[derive(Copy, Clone)] + pub struct in_addr { + pub s_addr: u32, + } + + #[derive(Copy, Clone)] + pub struct sockaddr_in { + pub sin_family: sa_family_t, + pub sin_port: u16, + pub sin_addr: in_addr, + } + + #[derive(Copy, Clone)] + pub struct in6_addr { + pub s6_addr: [u8; 16], + } + + #[derive(Copy, Clone)] + pub struct sockaddr_in6 { + pub sin6_family: sa_family_t, + pub sin6_port: u16, + pub sin6_addr: in6_addr, + pub sin6_flowinfo: u32, + pub sin6_scope_id: u32, + } + + #[derive(Copy, Clone)] + pub struct sockaddr {} + + pub type socklen_t = usize; +} diff --git a/library/std/src/sys/unsupported/os.rs b/library/std/src/sys/unsupported/os.rs new file mode 100644 index 00000000000..0615780c242 --- /dev/null +++ b/library/std/src/sys/unsupported/os.rs @@ -0,0 +1,104 @@ +use super::{unsupported, Void}; +use crate::error::Error as StdError; +use crate::ffi::{OsStr, OsString}; +use crate::fmt; +use crate::io; +use crate::path::{self, PathBuf}; + +pub fn errno() -> i32 { + 0 +} + +pub fn error_string(_errno: i32) -> String { + "operation successful".to_string() +} + +pub fn getcwd() -> io::Result<PathBuf> { + unsupported() +} + +pub fn chdir(_: &path::Path) -> io::Result<()> { + unsupported() +} + +pub struct SplitPaths<'a>(&'a Void); + +pub fn split_paths(_unparsed: &OsStr) -> SplitPaths<'_> { + panic!("unsupported") +} + +impl<'a> Iterator for SplitPaths<'a> { + type Item = PathBuf; + fn next(&mut self) -> Option<PathBuf> { + match *self.0 {} + } +} + +#[derive(Debug)] +pub struct JoinPathsError; + +pub fn join_paths<I, T>(_paths: I) -> Result<OsString, JoinPathsError> +where + I: Iterator<Item = T>, + T: AsRef<OsStr>, +{ + Err(JoinPathsError) +} + +impl fmt::Display for JoinPathsError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + "not supported on this platform yet".fmt(f) + } +} + +impl StdError for JoinPathsError { + #[allow(deprecated)] + fn description(&self) -> &str { + "not supported on this platform yet" + } +} + +pub fn current_exe() -> io::Result<PathBuf> { + unsupported() +} + +pub struct Env(Void); + +impl Iterator for Env { + type Item = (OsString, OsString); + fn next(&mut self) -> Option<(OsString, OsString)> { + match self.0 {} + } +} + +pub fn env() -> Env { + panic!("not supported on this platform") +} + +pub fn getenv(_: &OsStr) -> io::Result<Option<OsString>> { + Ok(None) +} + +pub fn setenv(_: &OsStr, _: &OsStr) -> io::Result<()> { + Err(io::Error::new(io::ErrorKind::Other, "cannot set env vars on this platform")) +} + +pub fn unsetenv(_: &OsStr) -> io::Result<()> { + Err(io::Error::new(io::ErrorKind::Other, "cannot unset env vars on this platform")) +} + +pub fn temp_dir() -> PathBuf { + panic!("no filesystem on this platform") +} + +pub fn home_dir() -> Option<PathBuf> { + None +} + +pub fn exit(_code: i32) -> ! { + crate::intrinsics::abort() +} + +pub fn getpid() -> u32 { + panic!("no pids on this platform") +} diff --git a/library/std/src/sys/unsupported/path.rs b/library/std/src/sys/unsupported/path.rs new file mode 100644 index 00000000000..840a7ae0426 --- /dev/null +++ b/library/std/src/sys/unsupported/path.rs @@ -0,0 +1,19 @@ +use crate::ffi::OsStr; +use crate::path::Prefix; + +#[inline] +pub fn is_sep_byte(b: u8) -> bool { + b == b'/' +} + +#[inline] +pub fn is_verbatim_sep(b: u8) -> bool { + b == b'/' +} + +pub fn parse_prefix(_: &OsStr) -> Option<Prefix<'_>> { + None +} + +pub const MAIN_SEP_STR: &str = "/"; +pub const MAIN_SEP: char = '/'; diff --git a/library/std/src/sys/unsupported/pipe.rs b/library/std/src/sys/unsupported/pipe.rs new file mode 100644 index 00000000000..10d0925823e --- /dev/null +++ b/library/std/src/sys/unsupported/pipe.rs @@ -0,0 +1,38 @@ +use crate::io::{self, IoSlice, IoSliceMut}; +use crate::sys::Void; + +pub struct AnonPipe(Void); + +impl AnonPipe { + pub fn read(&self, _buf: &mut [u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn read_vectored(&self, _bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + match self.0 {} + } + + pub fn is_read_vectored(&self) -> bool { + match self.0 {} + } + + pub fn write(&self, _buf: &[u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn write_vectored(&self, _bufs: &[IoSlice<'_>]) -> io::Result<usize> { + match self.0 {} + } + + pub fn is_write_vectored(&self) -> bool { + match self.0 {} + } + + pub fn diverge(&self) -> ! { + match self.0 {} + } +} + +pub fn read2(p1: AnonPipe, _v1: &mut Vec<u8>, _p2: AnonPipe, _v2: &mut Vec<u8>) -> io::Result<()> { + match p1.0 {} +} diff --git a/library/std/src/sys/unsupported/process.rs b/library/std/src/sys/unsupported/process.rs new file mode 100644 index 00000000000..4702e5c5492 --- /dev/null +++ b/library/std/src/sys/unsupported/process.rs @@ -0,0 +1,149 @@ +use crate::ffi::OsStr; +use crate::fmt; +use crate::io; +use crate::sys::fs::File; +use crate::sys::pipe::AnonPipe; +use crate::sys::{unsupported, Void}; +use crate::sys_common::process::CommandEnv; + +pub use crate::ffi::OsString as EnvKey; + +//////////////////////////////////////////////////////////////////////////////// +// Command +//////////////////////////////////////////////////////////////////////////////// + +pub struct Command { + env: CommandEnv, +} + +// passed back to std::process with the pipes connected to the child, if any +// were requested +pub struct StdioPipes { + pub stdin: Option<AnonPipe>, + pub stdout: Option<AnonPipe>, + pub stderr: Option<AnonPipe>, +} + +pub enum Stdio { + Inherit, + Null, + MakePipe, +} + +impl Command { + pub fn new(_program: &OsStr) -> Command { + Command { env: Default::default() } + } + + pub fn arg(&mut self, _arg: &OsStr) {} + + pub fn env_mut(&mut self) -> &mut CommandEnv { + &mut self.env + } + + pub fn cwd(&mut self, _dir: &OsStr) {} + + pub fn stdin(&mut self, _stdin: Stdio) {} + + pub fn stdout(&mut self, _stdout: Stdio) {} + + pub fn stderr(&mut self, _stderr: Stdio) {} + + pub fn spawn( + &mut self, + _default: Stdio, + _needs_stdin: bool, + ) -> io::Result<(Process, StdioPipes)> { + unsupported() + } +} + +impl From<AnonPipe> for Stdio { + fn from(pipe: AnonPipe) -> Stdio { + pipe.diverge() + } +} + +impl From<File> for Stdio { + fn from(file: File) -> Stdio { + file.diverge() + } +} + +impl fmt::Debug for Command { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + Ok(()) + } +} + +pub struct ExitStatus(Void); + +impl ExitStatus { + pub fn success(&self) -> bool { + match self.0 {} + } + + pub fn code(&self) -> Option<i32> { + match self.0 {} + } +} + +impl Clone for ExitStatus { + fn clone(&self) -> ExitStatus { + match self.0 {} + } +} + +impl Copy for ExitStatus {} + +impl PartialEq for ExitStatus { + fn eq(&self, _other: &ExitStatus) -> bool { + match self.0 {} + } +} + +impl Eq for ExitStatus {} + +impl fmt::Debug for ExitStatus { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +impl fmt::Display for ExitStatus { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +#[derive(PartialEq, Eq, Clone, Copy, Debug)] +pub struct ExitCode(bool); + +impl ExitCode { + pub const SUCCESS: ExitCode = ExitCode(false); + pub const FAILURE: ExitCode = ExitCode(true); + + pub fn as_i32(&self) -> i32 { + self.0 as i32 + } +} + +pub struct Process(Void); + +impl Process { + pub fn id(&self) -> u32 { + match self.0 {} + } + + pub fn kill(&mut self) -> io::Result<()> { + match self.0 {} + } + + pub fn wait(&mut self) -> io::Result<ExitStatus> { + match self.0 {} + } + + pub fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> { + match self.0 {} + } +} diff --git a/library/std/src/sys/unsupported/rwlock.rs b/library/std/src/sys/unsupported/rwlock.rs new file mode 100644 index 00000000000..d37f34ac935 --- /dev/null +++ b/library/std/src/sys/unsupported/rwlock.rs @@ -0,0 +1,69 @@ +use crate::cell::UnsafeCell; + +pub struct RWLock { + mode: UnsafeCell<isize>, +} + +unsafe impl Send for RWLock {} +unsafe impl Sync for RWLock {} // no threads on this platform + +impl RWLock { + pub const fn new() -> RWLock { + RWLock { mode: UnsafeCell::new(0) } + } + + #[inline] + pub unsafe fn read(&self) { + let mode = self.mode.get(); + if *mode >= 0 { + *mode += 1; + } else { + rtabort!("rwlock locked for writing"); + } + } + + #[inline] + pub unsafe fn try_read(&self) -> bool { + let mode = self.mode.get(); + if *mode >= 0 { + *mode += 1; + true + } else { + false + } + } + + #[inline] + pub unsafe fn write(&self) { + let mode = self.mode.get(); + if *mode == 0 { + *mode = -1; + } else { + rtabort!("rwlock locked for reading") + } + } + + #[inline] + pub unsafe fn try_write(&self) -> bool { + let mode = self.mode.get(); + if *mode == 0 { + *mode = -1; + true + } else { + false + } + } + + #[inline] + pub unsafe fn read_unlock(&self) { + *self.mode.get() -= 1; + } + + #[inline] + pub unsafe fn write_unlock(&self) { + *self.mode.get() += 1; + } + + #[inline] + pub unsafe fn destroy(&self) {} +} diff --git a/library/std/src/sys/unsupported/stack_overflow.rs b/library/std/src/sys/unsupported/stack_overflow.rs new file mode 100644 index 00000000000..32555394cd5 --- /dev/null +++ b/library/std/src/sys/unsupported/stack_overflow.rs @@ -0,0 +1,3 @@ +pub unsafe fn init() {} + +pub unsafe fn cleanup() {} diff --git a/library/std/src/sys/unsupported/stdio.rs b/library/std/src/sys/unsupported/stdio.rs new file mode 100644 index 00000000000..5a4e4505e93 --- /dev/null +++ b/library/std/src/sys/unsupported/stdio.rs @@ -0,0 +1,59 @@ +use crate::io; + +pub struct Stdin; +pub struct Stdout; +pub struct Stderr; + +impl Stdin { + pub fn new() -> io::Result<Stdin> { + Ok(Stdin) + } +} + +impl io::Read for Stdin { + fn read(&mut self, _buf: &mut [u8]) -> io::Result<usize> { + Ok(0) + } +} + +impl Stdout { + pub fn new() -> io::Result<Stdout> { + Ok(Stdout) + } +} + +impl io::Write for Stdout { + fn write(&mut self, buf: &[u8]) -> io::Result<usize> { + Ok(buf.len()) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +impl Stderr { + pub fn new() -> io::Result<Stderr> { + Ok(Stderr) + } +} + +impl io::Write for Stderr { + fn write(&mut self, buf: &[u8]) -> io::Result<usize> { + Ok(buf.len()) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +pub const STDIN_BUF_SIZE: usize = 0; + +pub fn is_ebadf(_err: &io::Error) -> bool { + true +} + +pub fn panic_output() -> Option<Vec<u8>> { + None +} diff --git a/library/std/src/sys/unsupported/thread.rs b/library/std/src/sys/unsupported/thread.rs new file mode 100644 index 00000000000..20ae309db30 --- /dev/null +++ b/library/std/src/sys/unsupported/thread.rs @@ -0,0 +1,41 @@ +use super::{unsupported, Void}; +use crate::ffi::CStr; +use crate::io; +use crate::time::Duration; + +pub struct Thread(Void); + +pub const DEFAULT_MIN_STACK_SIZE: usize = 4096; + +impl Thread { + // unsafe: see thread::Builder::spawn_unchecked for safety requirements + pub unsafe fn new(_stack: usize, _p: Box<dyn FnOnce()>) -> io::Result<Thread> { + unsupported() + } + + pub fn yield_now() { + // do nothing + } + + pub fn set_name(_name: &CStr) { + // nope + } + + pub fn sleep(_dur: Duration) { + panic!("can't sleep"); + } + + pub fn join(self) { + match self.0 {} + } +} + +pub mod guard { + pub type Guard = !; + pub unsafe fn current() -> Option<Guard> { + None + } + pub unsafe fn init() -> Option<Guard> { + None + } +} diff --git a/library/std/src/sys/unsupported/thread_local_dtor.rs b/library/std/src/sys/unsupported/thread_local_dtor.rs new file mode 100644 index 00000000000..85d66098302 --- /dev/null +++ b/library/std/src/sys/unsupported/thread_local_dtor.rs @@ -0,0 +1,9 @@ +#![unstable(feature = "thread_local_internals", issue = "none")] + +pub unsafe fn register_dtor(_t: *mut u8, _dtor: unsafe extern "C" fn(*mut u8)) { + // FIXME: right now there is no concept of "thread exit", but this is likely + // going to show up at some point in the form of an exported symbol that the + // wasm runtime is going to be expected to call. For now we basically just + // ignore the arguments, but if such a function starts to exist it will + // likely look like the OSX implementation in `unix/fast_thread_local.rs` +} diff --git a/library/std/src/sys/unsupported/thread_local_key.rs b/library/std/src/sys/unsupported/thread_local_key.rs new file mode 100644 index 00000000000..c31b61cbf56 --- /dev/null +++ b/library/std/src/sys/unsupported/thread_local_key.rs @@ -0,0 +1,26 @@ +pub type Key = usize; + +#[inline] +pub unsafe fn create(_dtor: Option<unsafe extern "C" fn(*mut u8)>) -> Key { + panic!("should not be used on this target"); +} + +#[inline] +pub unsafe fn set(_key: Key, _value: *mut u8) { + panic!("should not be used on this target"); +} + +#[inline] +pub unsafe fn get(_key: Key) -> *mut u8 { + panic!("should not be used on this target"); +} + +#[inline] +pub unsafe fn destroy(_key: Key) { + panic!("should not be used on this target"); +} + +#[inline] +pub fn requires_synchronized_create() -> bool { + panic!("should not be used on this target"); +} diff --git a/library/std/src/sys/unsupported/time.rs b/library/std/src/sys/unsupported/time.rs new file mode 100644 index 00000000000..8aaf1777f24 --- /dev/null +++ b/library/std/src/sys/unsupported/time.rs @@ -0,0 +1,53 @@ +use crate::time::Duration; + +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +pub struct Instant(Duration); + +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +pub struct SystemTime(Duration); + +pub const UNIX_EPOCH: SystemTime = SystemTime(Duration::from_secs(0)); + +impl Instant { + pub fn now() -> Instant { + panic!("time not implemented on this platform") + } + + pub const fn zero() -> Instant { + Instant(Duration::from_secs(0)) + } + + pub fn actually_monotonic() -> bool { + false + } + + pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> { + self.0.checked_sub(other.0) + } + + pub fn checked_add_duration(&self, other: &Duration) -> Option<Instant> { + Some(Instant(self.0.checked_add(*other)?)) + } + + pub fn checked_sub_duration(&self, other: &Duration) -> Option<Instant> { + Some(Instant(self.0.checked_sub(*other)?)) + } +} + +impl SystemTime { + pub fn now() -> SystemTime { + panic!("time not implemented on this platform") + } + + pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> { + self.0.checked_sub(other.0).ok_or_else(|| other.0 - self.0) + } + + pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> { + Some(SystemTime(self.0.checked_add(*other)?)) + } + + pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> { + Some(SystemTime(self.0.checked_sub(*other)?)) + } +} diff --git a/library/std/src/sys/vxworks/alloc.rs b/library/std/src/sys/vxworks/alloc.rs new file mode 100644 index 00000000000..97a191d7232 --- /dev/null +++ b/library/std/src/sys/vxworks/alloc.rs @@ -0,0 +1,49 @@ +use crate::alloc::{GlobalAlloc, Layout, System}; +use crate::ptr; +use crate::sys_common::alloc::{realloc_fallback, MIN_ALIGN}; + +#[stable(feature = "alloc_system_type", since = "1.28.0")] +unsafe impl GlobalAlloc for System { + #[inline] + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() { + libc::malloc(layout.size()) as *mut u8 + } else { + aligned_malloc(&layout) + } + } + + #[inline] + unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { + if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() { + libc::calloc(layout.size(), 1) as *mut u8 + } else { + let ptr = self.alloc(layout.clone()); + if !ptr.is_null() { + ptr::write_bytes(ptr, 0, layout.size()); + } + ptr + } + } + + #[inline] + unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) { + libc::free(ptr as *mut libc::c_void) + } + + #[inline] + unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + if layout.align() <= MIN_ALIGN && layout.align() <= new_size { + libc::realloc(ptr as *mut libc::c_void, new_size) as *mut u8 + } else { + realloc_fallback(self, ptr, layout, new_size) + } + } +} + +#[inline] +unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 { + let mut out = ptr::null_mut(); + let ret = libc::posix_memalign(&mut out, layout.align(), layout.size()); + if ret != 0 { ptr::null_mut() } else { out as *mut u8 } +} diff --git a/library/std/src/sys/vxworks/args.rs b/library/std/src/sys/vxworks/args.rs new file mode 100644 index 00000000000..adff6c489bb --- /dev/null +++ b/library/std/src/sys/vxworks/args.rs @@ -0,0 +1,95 @@ +#![allow(dead_code)] // runtime init functions not used during testing +use crate::ffi::OsString; +use crate::marker::PhantomData; +use crate::vec; + +/// One-time global initialization. +pub unsafe fn init(argc: isize, argv: *const *const u8) { + imp::init(argc, argv) +} + +/// One-time global cleanup. +pub unsafe fn cleanup() { + imp::cleanup() +} + +/// Returns the command line arguments +pub fn args() -> Args { + imp::args() +} + +pub struct Args { + iter: vec::IntoIter<OsString>, + _dont_send_or_sync_me: PhantomData<*mut ()>, +} + +impl Args { + pub fn inner_debug(&self) -> &[OsString] { + self.iter.as_slice() + } +} + +impl Iterator for Args { + type Item = OsString; + fn next(&mut self) -> Option<OsString> { + self.iter.next() + } + fn size_hint(&self) -> (usize, Option<usize>) { + self.iter.size_hint() + } +} + +impl ExactSizeIterator for Args { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl DoubleEndedIterator for Args { + fn next_back(&mut self) -> Option<OsString> { + self.iter.next_back() + } +} + +mod imp { + use super::Args; + use crate::ffi::{CStr, OsString}; + use crate::marker::PhantomData; + use crate::ptr; + + use crate::sys_common::mutex::Mutex; + + static mut ARGC: isize = 0; + static mut ARGV: *const *const u8 = ptr::null(); + static LOCK: Mutex = Mutex::new(); + + pub unsafe fn init(argc: isize, argv: *const *const u8) { + let _guard = LOCK.lock(); + ARGC = argc; + ARGV = argv; + } + + pub unsafe fn cleanup() { + let _guard = LOCK.lock(); + ARGC = 0; + ARGV = ptr::null(); + } + + pub fn args() -> Args { + Args { iter: clone().into_iter(), _dont_send_or_sync_me: PhantomData } + } + + fn clone() -> Vec<OsString> { + unsafe { + let _guard = LOCK.lock(); + let ret = (0..ARGC) + .map(|i| { + let cstr = CStr::from_ptr(*ARGV.offset(i) as *const libc::c_char); + use crate::sys::vxworks::ext::ffi::OsStringExt; + OsStringExt::from_vec(cstr.to_bytes().to_vec()) + }) + .collect(); + return ret; + } + } +} diff --git a/library/std/src/sys/vxworks/cmath.rs b/library/std/src/sys/vxworks/cmath.rs new file mode 100644 index 00000000000..f327b69fc75 --- /dev/null +++ b/library/std/src/sys/vxworks/cmath.rs @@ -0,0 +1,32 @@ +#![cfg(not(test))] + +use libc::{c_double, c_float}; + +extern "C" { + pub fn acos(n: c_double) -> c_double; + pub fn acosf(n: c_float) -> c_float; + pub fn asin(n: c_double) -> c_double; + pub fn asinf(n: c_float) -> c_float; + pub fn atan(n: c_double) -> c_double; + pub fn atan2(a: c_double, b: c_double) -> c_double; + pub fn atan2f(a: c_float, b: c_float) -> c_float; + pub fn atanf(n: c_float) -> c_float; + pub fn cbrt(n: c_double) -> c_double; + pub fn cbrtf(n: c_float) -> c_float; + pub fn cosh(n: c_double) -> c_double; + pub fn coshf(n: c_float) -> c_float; + pub fn expm1(n: c_double) -> c_double; + pub fn expm1f(n: c_float) -> c_float; + pub fn fdim(a: c_double, b: c_double) -> c_double; + pub fn fdimf(a: c_float, b: c_float) -> c_float; + pub fn hypot(x: c_double, y: c_double) -> c_double; + pub fn hypotf(x: c_float, y: c_float) -> c_float; + pub fn log1p(n: c_double) -> c_double; + pub fn log1pf(n: c_float) -> c_float; + pub fn sinh(n: c_double) -> c_double; + pub fn sinhf(n: c_float) -> c_float; + pub fn tan(n: c_double) -> c_double; + pub fn tanf(n: c_float) -> c_float; + pub fn tanh(n: c_double) -> c_double; + pub fn tanhf(n: c_float) -> c_float; +} diff --git a/library/std/src/sys/vxworks/condvar.rs b/library/std/src/sys/vxworks/condvar.rs new file mode 100644 index 00000000000..5a77966d974 --- /dev/null +++ b/library/std/src/sys/vxworks/condvar.rs @@ -0,0 +1,89 @@ +use crate::cell::UnsafeCell; +use crate::sys::mutex::{self, Mutex}; +use crate::time::Duration; + +pub struct Condvar { + inner: UnsafeCell<libc::pthread_cond_t>, +} + +unsafe impl Send for Condvar {} +unsafe impl Sync for Condvar {} + +const TIMESPEC_MAX: libc::timespec = + libc::timespec { tv_sec: <libc::time_t>::MAX, tv_nsec: 1_000_000_000 - 1 }; + +fn saturating_cast_to_time_t(value: u64) -> libc::time_t { + if value > <libc::time_t>::MAX as u64 { <libc::time_t>::MAX } else { value as libc::time_t } +} + +impl Condvar { + pub const fn new() -> Condvar { + // Might be moved and address is changing it is better to avoid + // initialization of potentially opaque OS data before it landed + Condvar { inner: UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER) } + } + + pub unsafe fn init(&mut self) { + use crate::mem::MaybeUninit; + let mut attr = MaybeUninit::<libc::pthread_condattr_t>::uninit(); + let r = libc::pthread_condattr_init(attr.as_mut_ptr()); + assert_eq!(r, 0); + let r = libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC); + assert_eq!(r, 0); + let r = libc::pthread_cond_init(self.inner.get(), attr.as_ptr()); + assert_eq!(r, 0); + let r = libc::pthread_condattr_destroy(attr.as_mut_ptr()); + assert_eq!(r, 0); + } + + #[inline] + pub unsafe fn notify_one(&self) { + let r = libc::pthread_cond_signal(self.inner.get()); + debug_assert_eq!(r, 0); + } + + #[inline] + pub unsafe fn notify_all(&self) { + let r = libc::pthread_cond_broadcast(self.inner.get()); + debug_assert_eq!(r, 0); + } + + #[inline] + pub unsafe fn wait(&self, mutex: &Mutex) { + let r = libc::pthread_cond_wait(self.inner.get(), mutex::raw(mutex)); + debug_assert_eq!(r, 0); + } + + // This implementation is used on systems that support pthread_condattr_setclock + // where we configure condition variable to use monotonic clock (instead of + // default system clock). This approach avoids all problems that result + // from changes made to the system time. + pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { + use crate::mem; + + let mut now: libc::timespec = mem::zeroed(); + let r = libc::clock_gettime(libc::CLOCK_MONOTONIC, &mut now); + assert_eq!(r, 0); + + // Nanosecond calculations can't overflow because both values are below 1e9. + let nsec = dur.subsec_nanos() + now.tv_nsec as u32; + + let sec = saturating_cast_to_time_t(dur.as_secs()) + .checked_add((nsec / 1_000_000_000) as libc::time_t) + .and_then(|s| s.checked_add(now.tv_sec)); + let nsec = nsec % 1_000_000_000; + + let timeout = + sec.map(|s| libc::timespec { tv_sec: s, tv_nsec: nsec as _ }).unwrap_or(TIMESPEC_MAX); + + let r = libc::pthread_cond_timedwait(self.inner.get(), mutex::raw(mutex), &timeout); + assert!(r == libc::ETIMEDOUT || r == 0); + r == 0 + } + + #[inline] + pub unsafe fn destroy(&self) { + let r = libc::pthread_cond_destroy(self.inner.get()); + debug_assert_eq!(r, 0); + } +} diff --git a/library/std/src/sys/vxworks/env.rs b/library/std/src/sys/vxworks/env.rs new file mode 100644 index 00000000000..fe1aedd5859 --- /dev/null +++ b/library/std/src/sys/vxworks/env.rs @@ -0,0 +1,9 @@ +pub mod os { + pub const FAMILY: &str = "vxworks"; + pub const OS: &str = "vxworks"; + pub const DLL_PREFIX: &str = "lib"; + pub const DLL_SUFFIX: &str = ".so"; + pub const DLL_EXTENSION: &str = "so"; + pub const EXE_SUFFIX: &str = ""; + pub const EXE_EXTENSION: &str = ""; +} diff --git a/library/std/src/sys/vxworks/ext/ffi.rs b/library/std/src/sys/vxworks/ext/ffi.rs new file mode 100644 index 00000000000..76b34a6b5d8 --- /dev/null +++ b/library/std/src/sys/vxworks/ext/ffi.rs @@ -0,0 +1,38 @@ +//! Unix-specific extension to the primitives in the `std::ffi` module +//! +//! # Examples +//! +//! ``` +//! use std::ffi::OsString; +//! use std::os::unix::ffi::OsStringExt; +//! +//! let bytes = b"foo".to_vec(); +//! +//! // OsStringExt::from_vec +//! let os_string = OsString::from_vec(bytes); +//! assert_eq!(os_string.to_str(), Some("foo")); +//! +//! // OsStringExt::into_vec +//! let bytes = os_string.into_vec(); +//! assert_eq!(bytes, b"foo"); +//! ``` +//! +//! ``` +//! use std::ffi::OsStr; +//! use std::os::unix::ffi::OsStrExt; +//! +//! let bytes = b"foo"; +//! +//! // OsStrExt::from_bytes +//! let os_str = OsStr::from_bytes(bytes); +//! assert_eq!(os_str.to_str(), Some("foo")); +//! +//! // OsStrExt::as_bytes +//! let bytes = os_str.as_bytes(); +//! assert_eq!(bytes, b"foo"); +//! ``` + +#![stable(feature = "rust1", since = "1.0.0")] + +#[stable(feature = "rust1", since = "1.0.0")] +pub use crate::sys_common::os_str_bytes::*; diff --git a/library/std/src/sys/vxworks/ext/fs.rs b/library/std/src/sys/vxworks/ext/fs.rs new file mode 100644 index 00000000000..b479fbaf346 --- /dev/null +++ b/library/std/src/sys/vxworks/ext/fs.rs @@ -0,0 +1,840 @@ +#![stable(feature = "rust1", since = "1.0.0")] + +use crate::fs::{self, Permissions}; +use crate::io; +use crate::path::Path; +use crate::sys; +use crate::sys::platform::fs::MetadataExt as UnixMetadataExt; +use crate::sys_common::{AsInner, AsInnerMut, FromInner}; + +/// Unix-specific extensions to [`File`]. +/// +/// [`File`]: ../../../../std/fs/struct.File.html +#[stable(feature = "file_offset", since = "1.15.0")] +pub trait FileExt { + /// Reads a number of bytes starting from a given offset. + /// + /// Returns the number of bytes read. + /// + /// The offset is relative to the start of the file and thus independent + /// from the current cursor. + /// + /// The current file cursor is not affected by this function. + /// + /// Note that similar to [`File::read`], it is not an error to return with a + /// short read. + /// + /// [`File::read`]: ../../../../std/fs/struct.File.html#method.read + /// + /// # Examples + /// + /// ```no_run + /// use std::io; + /// use std::fs::File; + /// use std::os::unix::prelude::FileExt; + /// + /// fn main() -> io::Result<()> { + /// let mut buf = [0u8; 8]; + /// let file = File::open("foo.txt")?; + /// + /// // We now read 8 bytes from the offset 10. + /// let num_bytes_read = file.read_at(&mut buf, 10)?; + /// println!("read {} bytes: {:?}", num_bytes_read, buf); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "file_offset", since = "1.15.0")] + fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<usize>; + + /// Reads the exact number of byte required to fill `buf` from the given offset. + /// + /// The offset is relative to the start of the file and thus independent + /// from the current cursor. + /// + /// The current file cursor is not affected by this function. + /// + /// Similar to [`Read::read_exact`] but uses [`read_at`] instead of `read`. + /// + /// [`Read::read_exact`]: ../../../../std/io/trait.Read.html#method.read_exact + /// [`read_at`]: #tymethod.read_at + /// + /// # Errors + /// + /// If this function encounters an error of the kind + /// [`ErrorKind::Interrupted`] then the error is ignored and the operation + /// will continue. + /// + /// If this function encounters an "end of file" before completely filling + /// the buffer, it returns an error of the kind [`ErrorKind::UnexpectedEof`]. + /// The contents of `buf` are unspecified in this case. + /// + /// If any other read error is encountered then this function immediately + /// returns. The contents of `buf` are unspecified in this case. + /// + /// If this function returns an error, it is unspecified how many bytes it + /// has read, but it will never read more than would be necessary to + /// completely fill the buffer. + /// + /// [`ErrorKind::Interrupted`]: ../../../../std/io/enum.ErrorKind.html#variant.Interrupted + /// [`ErrorKind::UnexpectedEof`]: ../../../../std/io/enum.ErrorKind.html#variant.UnexpectedEof + /// + /// # Examples + /// + /// ```no_run + /// #![feature(rw_exact_all_at)] + /// use std::io; + /// use std::fs::File; + /// use std::os::unix::prelude::FileExt; + /// + /// fn main() -> io::Result<()> { + /// let mut buf = [0u8; 8]; + /// let file = File::open("foo.txt")?; + /// + /// // We now read exactly 8 bytes from the offset 10. + /// file.read_exact_at(&mut buf, 10)?; + /// println!("read {} bytes: {:?}", buf.len(), buf); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "rw_exact_all_at", since = "1.33.0")] + fn read_exact_at(&self, mut buf: &mut [u8], mut offset: u64) -> io::Result<()> { + while !buf.is_empty() { + match self.read_at(buf, offset) { + Ok(0) => break, + Ok(n) => { + let tmp = buf; + buf = &mut tmp[n..]; + offset += n as u64; + } + Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {} + Err(e) => return Err(e), + } + } + if !buf.is_empty() { + Err(io::Error::new(io::ErrorKind::UnexpectedEof, "failed to fill whole buffer")) + } else { + Ok(()) + } + } + + /// Writes a number of bytes starting from a given offset. + /// + /// Returns the number of bytes written. + /// + /// The offset is relative to the start of the file and thus independent + /// from the current cursor. + /// + /// The current file cursor is not affected by this function. + /// + /// When writing beyond the end of the file, the file is appropriately + /// extended and the intermediate bytes are initialized with the value 0. + /// + /// Note that similar to [`File::write`], it is not an error to return a + /// short write. + /// + /// [`File::write`]: ../../../../std/fs/struct.File.html#method.write + /// + /// # Examples + /// + /// ```no_run + /// use std::fs::File; + /// use std::io; + /// use std::os::unix::prelude::FileExt; + /// + /// fn main() -> io::Result<()> { + /// let file = File::open("foo.txt")?; + /// + /// // We now write at the offset 10. + /// file.write_at(b"sushi", 10)?; + /// Ok(()) + /// } + /// ``` + #[stable(feature = "file_offset", since = "1.15.0")] + fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize>; + + /// Attempts to write an entire buffer starting from a given offset. + /// + /// The offset is relative to the start of the file and thus independent + /// from the current cursor. + /// + /// The current file cursor is not affected by this function. + /// + /// This method will continuously call [`write_at`] until there is no more data + /// to be written or an error of non-[`ErrorKind::Interrupted`] kind is + /// returned. This method will not return until the entire buffer has been + /// successfully written or such an error occurs. The first error that is + /// not of [`ErrorKind::Interrupted`] kind generated from this method will be + /// returned. + /// + /// # Errors + /// + /// This function will return the first error of + /// non-[`ErrorKind::Interrupted`] kind that [`write_at`] returns. + /// + /// [`ErrorKind::Interrupted`]: ../../../../std/io/enum.ErrorKind.html#variant.Interrupted + /// [`write_at`]: #tymethod.write_at + /// + /// # Examples + /// + /// ```no_run + /// #![feature(rw_exact_all_at)] + /// use std::fs::File; + /// use std::io; + /// use std::os::unix::prelude::FileExt; + /// + /// fn main() -> io::Result<()> { + /// let file = File::open("foo.txt")?; + /// + /// // We now write at the offset 10. + /// file.write_all_at(b"sushi", 10)?; + /// Ok(()) + /// } + /// ``` + #[stable(feature = "rw_exact_all_at", since = "1.33.0")] + fn write_all_at(&self, mut buf: &[u8], mut offset: u64) -> io::Result<()> { + while !buf.is_empty() { + match self.write_at(buf, offset) { + Ok(0) => { + return Err(io::Error::new( + io::ErrorKind::WriteZero, + "failed to write whole buffer", + )); + } + Ok(n) => { + buf = &buf[n..]; + offset += n as u64 + } + Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {} + Err(e) => return Err(e), + } + } + Ok(()) + } +} + +#[stable(feature = "file_offset", since = "1.15.0")] +impl FileExt for fs::File { + fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<usize> { + self.as_inner().read_at(buf, offset) + } + fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> { + self.as_inner().write_at(buf, offset) + } +} + +/// Unix-specific extensions to [`fs::Permissions`]. +/// +/// [`fs::Permissions`]: ../../../../std/fs/struct.Permissions.html +#[stable(feature = "fs_ext", since = "1.1.0")] +pub trait PermissionsExt { + /// Returns the underlying raw `st_mode` bits that contain the standard + /// Unix permissions for this file. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs::File; + /// use std::os::unix::fs::PermissionsExt; + /// + /// fn main() -> std::io::Result<()> { + /// let f = File::create("foo.txt")?; + /// let metadata = f.metadata()?; + /// let permissions = metadata.permissions(); + /// + /// println!("permissions: {}", permissions.mode()); + /// Ok(()) } + /// ``` + #[stable(feature = "fs_ext", since = "1.1.0")] + fn mode(&self) -> u32; + + /// Sets the underlying raw bits for this set of permissions. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs::File; + /// use std::os::unix::fs::PermissionsExt; + /// + /// fn main() -> std::io::Result<()> { + /// let f = File::create("foo.txt")?; + /// let metadata = f.metadata()?; + /// let mut permissions = metadata.permissions(); + /// + /// permissions.set_mode(0o644); // Read/write for owner and read for others. + /// assert_eq!(permissions.mode(), 0o644); + /// Ok(()) } + /// ``` + #[stable(feature = "fs_ext", since = "1.1.0")] + fn set_mode(&mut self, mode: u32); + + /// Creates a new instance of `Permissions` from the given set of Unix + /// permission bits. + /// + /// # Examples + /// + /// ``` + /// use std::fs::Permissions; + /// use std::os::unix::fs::PermissionsExt; + /// + /// // Read/write for owner and read for others. + /// let permissions = Permissions::from_mode(0o644); + /// assert_eq!(permissions.mode(), 0o644); + /// ``` + #[stable(feature = "fs_ext", since = "1.1.0")] + fn from_mode(mode: u32) -> Self; +} + +#[stable(feature = "fs_ext", since = "1.1.0")] +impl PermissionsExt for Permissions { + fn mode(&self) -> u32 { + self.as_inner().mode() + } + + fn set_mode(&mut self, mode: u32) { + *self = Permissions::from_inner(FromInner::from_inner(mode)); + } + + fn from_mode(mode: u32) -> Permissions { + Permissions::from_inner(FromInner::from_inner(mode)) + } +} + +/// Unix-specific extensions to [`fs::OpenOptions`]. +/// +/// [`fs::OpenOptions`]: ../../../../std/fs/struct.OpenOptions.html +#[stable(feature = "fs_ext", since = "1.1.0")] +pub trait OpenOptionsExt { + /// Sets the mode bits that a new file will be created with. + /// + /// If a new file is created as part of an `OpenOptions::open` call then this + /// specified `mode` will be used as the permission bits for the new file. + /// If no `mode` is set, the default of `0o666` will be used. + /// The operating system masks out bits with the system's `umask`, to produce + /// the final permissions. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs::OpenOptions; + /// use std::os::unix::fs::OpenOptionsExt; + /// + /// # fn main() { + /// let mut options = OpenOptions::new(); + /// options.mode(0o644); // Give read/write for owner and read for others. + /// let file = options.open("foo.txt"); + /// # } + /// ``` + #[stable(feature = "fs_ext", since = "1.1.0")] + fn mode(&mut self, mode: u32) -> &mut Self; + + /// Pass custom flags to the `flags` argument of `open`. + /// + /// The bits that define the access mode are masked out with `O_ACCMODE`, to + /// ensure they do not interfere with the access mode set by Rusts options. + /// + /// Custom flags can only set flags, not remove flags set by Rusts options. + /// This options overwrites any previously set custom flags. + /// + /// # Examples + /// + /// ```no_run + /// # #![feature(libc)] + /// extern crate libc; + /// use std::fs::OpenOptions; + /// use std::os::unix::fs::OpenOptionsExt; + /// + /// # fn main() { + /// let mut options = OpenOptions::new(); + /// options.write(true); + /// if cfg!(unix) { + /// options.custom_flags(libc::O_NOFOLLOW); + /// } + /// let file = options.open("foo.txt"); + /// # } + /// ``` + #[stable(feature = "open_options_ext", since = "1.10.0")] + fn custom_flags(&mut self, flags: i32) -> &mut Self; +} + +/*#[stable(feature = "fs_ext", since = "1.1.0")] +impl OpenOptionsExt for OpenOptions { + fn mode(&mut self, mode: u32) -> &mut OpenOptions { + self.as_inner_mut().mode(mode); self + } + + fn custom_flags(&mut self, flags: i32) -> &mut OpenOptions { + self.as_inner_mut().custom_flags(flags); self + } +} +*/ + +/// Unix-specific extensions to [`fs::Metadata`]. +/// +/// [`fs::Metadata`]: ../../../../std/fs/struct.Metadata.html +#[stable(feature = "metadata_ext", since = "1.1.0")] +pub trait MetadataExt { + /// Returns the ID of the device containing the file. + /// + /// # Examples + /// + /// ```no_run + /// use std::io; + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let dev_id = meta.dev(); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn dev(&self) -> u64; + /// Returns the inode number. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let inode = meta.ino(); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn ino(&self) -> u64; + /// Returns the rights applied to this file. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let mode = meta.mode(); + /// let user_has_write_access = mode & 0o200; + /// let user_has_read_write_access = mode & 0o600; + /// let group_has_read_access = mode & 0o040; + /// let others_have_exec_access = mode & 0o001; + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn mode(&self) -> u32; + /// Returns the number of hard links pointing to this file. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let nb_hard_links = meta.nlink(); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn nlink(&self) -> u64; + /// Returns the user ID of the owner of this file. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let user_id = meta.uid(); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn uid(&self) -> u32; + /// Returns the group ID of the owner of this file. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let group_id = meta.gid(); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn gid(&self) -> u32; + /// Returns the device ID of this file (if it is a special one). + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let device_id = meta.rdev(); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn rdev(&self) -> u64; + /// Returns the total size of this file in bytes. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let file_size = meta.size(); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn size(&self) -> u64; + /// Returns the time of the last access to the file. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let last_access_time = meta.atime(); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn atime(&self) -> i64; + /// Returns the time of the last access to the file in nanoseconds. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let nano_last_access_time = meta.atime_nsec(); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn mtime(&self) -> i64; + /// Returns the time of the last modification of the file in nanoseconds. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let nano_last_modification_time = meta.mtime_nsec(); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn ctime(&self) -> i64; + /// Returns the time of the last status change of the file in nanoseconds. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let nano_last_status_change_time = meta.ctime_nsec(); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn blksize(&self) -> u64; + /// Returns the number of blocks allocated to the file, in 512-byte units. + /// + /// Please note that this may be smaller than `st_size / 512` when the file has holes. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::MetadataExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("some_file")?; + /// let blocks = meta.blocks(); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn blocks(&self) -> u64; + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn attrib(&self) -> u8; +} + +#[stable(feature = "metadata_ext", since = "1.1.0")] +impl MetadataExt for fs::Metadata { + fn dev(&self) -> u64 { + self.st_dev() + } + fn ino(&self) -> u64 { + self.st_ino() + } + fn mode(&self) -> u32 { + self.st_mode() + } + fn nlink(&self) -> u64 { + self.st_nlink() + } + fn uid(&self) -> u32 { + self.st_uid() + } + fn gid(&self) -> u32 { + self.st_gid() + } + fn rdev(&self) -> u64 { + self.st_rdev() + } + fn size(&self) -> u64 { + self.st_size() + } + fn atime(&self) -> i64 { + self.st_atime() + } + fn mtime(&self) -> i64 { + self.st_mtime() + } + fn ctime(&self) -> i64 { + self.st_ctime() + } + fn blksize(&self) -> u64 { + self.st_blksize() + } + fn blocks(&self) -> u64 { + self.st_blocks() + } + fn attrib(&self) -> u8 { + self.st_attrib() + } +} + +/// Unix-specific extensions for [`FileType`]. +/// +/// Adds support for special Unix file types such as block/character devices, +/// pipes, and sockets. +/// +/// [`FileType`]: ../../../../std/fs/struct.FileType.html +#[stable(feature = "file_type_ext", since = "1.5.0")] +pub trait FileTypeExt { + /// Returns whether this file type is a block device. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::FileTypeExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("block_device_file")?; + /// let file_type = meta.file_type(); + /// assert!(file_type.is_block_device()); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "file_type_ext", since = "1.5.0")] + fn is_block_device(&self) -> bool; + /// Returns whether this file type is a char device. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::FileTypeExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("char_device_file")?; + /// let file_type = meta.file_type(); + /// assert!(file_type.is_char_device()); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "file_type_ext", since = "1.5.0")] + fn is_char_device(&self) -> bool; + /// Returns whether this file type is a fifo. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::FileTypeExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("fifo_file")?; + /// let file_type = meta.file_type(); + /// assert!(file_type.is_fifo()); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "file_type_ext", since = "1.5.0")] + fn is_fifo(&self) -> bool; + /// Returns whether this file type is a socket. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs; + /// use std::os::unix::fs::FileTypeExt; + /// use std::io; + /// + /// fn main() -> io::Result<()> { + /// let meta = fs::metadata("unix.socket")?; + /// let file_type = meta.file_type(); + /// assert!(file_type.is_socket()); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "file_type_ext", since = "1.5.0")] + fn is_socket(&self) -> bool; +} + +#[stable(feature = "file_type_ext", since = "1.5.0")] +impl FileTypeExt for fs::FileType { + fn is_block_device(&self) -> bool { + self.as_inner().is(libc::S_IFBLK) + } + fn is_char_device(&self) -> bool { + self.as_inner().is(libc::S_IFCHR) + } + fn is_fifo(&self) -> bool { + self.as_inner().is(libc::S_IFIFO) + } + fn is_socket(&self) -> bool { + self.as_inner().is(libc::S_IFSOCK) + } +} + +/// Unix-specific extension methods for [`fs::DirEntry`]. +/// +/// [`fs::DirEntry`]: ../../../../std/fs/struct.DirEntry.html +#[stable(feature = "dir_entry_ext", since = "1.1.0")] +pub trait DirEntryExt { + /// Returns the underlying `d_ino` field in the contained `dirent` + /// structure. + /// + /// # Examples + /// + /// ``` + /// use std::fs; + /// use std::os::unix::fs::DirEntryExt; + /// + /// if let Ok(entries) = fs::read_dir(".") { + /// for entry in entries { + /// if let Ok(entry) = entry { + /// // Here, `entry` is a `DirEntry`. + /// println!("{:?}: {}", entry.file_name(), entry.ino()); + /// } + /// } + /// } + /// ``` + #[stable(feature = "dir_entry_ext", since = "1.1.0")] + fn ino(&self) -> u64; +} + +#[stable(feature = "dir_entry_ext", since = "1.1.0")] +impl DirEntryExt for fs::DirEntry { + fn ino(&self) -> u64 { + self.as_inner().ino() + } +} + +/// Creates a new symbolic link on the filesystem. +/// +/// The `dst` path will be a symbolic link pointing to the `src` path. +/// +/// # Note +/// +/// On Windows, you must specify whether a symbolic link points to a file +/// or directory. Use `os::windows::fs::symlink_file` to create a +/// symbolic link to a file, or `os::windows::fs::symlink_dir` to create a +/// symbolic link to a directory. Additionally, the process must have +/// `SeCreateSymbolicLinkPrivilege` in order to be able to create a +/// symbolic link. +/// +/// # Examples +/// +/// ```no_run +/// use std::os::unix::fs; +/// +/// fn main() -> std::io::Result<()> { +/// fs::symlink("a.txt", "b.txt")?; +/// Ok(()) +/// } +/// ``` +#[stable(feature = "symlink", since = "1.1.0")] +pub fn symlink<P: AsRef<Path>, Q: AsRef<Path>>(src: P, dst: Q) -> io::Result<()> { + sys::fs::symlink(src.as_ref(), dst.as_ref()) +} + +/// Unix-specific extensions to [`fs::DirBuilder`]. +/// +/// [`fs::DirBuilder`]: ../../../../std/fs/struct.DirBuilder.html +#[stable(feature = "dir_builder", since = "1.6.0")] +pub trait DirBuilderExt { + /// Sets the mode to create new directories with. This option defaults to + /// 0o777. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs::DirBuilder; + /// use std::os::unix::fs::DirBuilderExt; + /// + /// let mut builder = DirBuilder::new(); + /// builder.mode(0o755); + /// ``` + #[stable(feature = "dir_builder", since = "1.6.0")] + fn mode(&mut self, mode: u32) -> &mut Self; +} + +#[stable(feature = "dir_builder", since = "1.6.0")] +impl DirBuilderExt for fs::DirBuilder { + fn mode(&mut self, mode: u32) -> &mut fs::DirBuilder { + self.as_inner_mut().set_mode(mode); + self + } +} diff --git a/library/std/src/sys/vxworks/ext/io.rs b/library/std/src/sys/vxworks/ext/io.rs new file mode 100644 index 00000000000..25c6e26d96e --- /dev/null +++ b/library/std/src/sys/vxworks/ext/io.rs @@ -0,0 +1,189 @@ +//! Unix-specific extensions to general I/O primitives + +#![stable(feature = "rust1", since = "1.0.0")] + +use crate::fs; +use crate::io; +use crate::net; +use crate::os::raw; +use crate::sys; +use crate::sys_common::{self, AsInner, FromInner, IntoInner}; + +/// Raw file descriptors. +#[stable(feature = "rust1", since = "1.0.0")] +pub type RawFd = raw::c_int; + +/// A trait to extract the raw unix file descriptor from an underlying +/// object. +/// +/// This is only available on unix platforms and must be imported in order +/// to call the method. Windows platforms have a corresponding `AsRawHandle` +/// and `AsRawSocket` set of traits. +#[stable(feature = "rust1", since = "1.0.0")] +pub trait AsRawFd { + /// Extracts the raw file descriptor. + /// + /// This method does **not** pass ownership of the raw file descriptor + /// to the caller. The descriptor is only guaranteed to be valid while + /// the original object has not yet been destroyed. + #[stable(feature = "rust1", since = "1.0.0")] + fn as_raw_fd(&self) -> RawFd; +} + +/// A trait to express the ability to construct an object from a raw file +/// descriptor. +#[stable(feature = "from_raw_os", since = "1.1.0")] +pub trait FromRawFd { + /// Constructs a new instance of `Self` from the given raw file + /// descriptor. + /// + /// This function **consumes ownership** of the specified file + /// descriptor. The returned object will take responsibility for closing + /// it when the object goes out of scope. + /// + /// This function is also unsafe as the primitives currently returned + /// have the contract that they are the sole owner of the file + /// descriptor they are wrapping. Usage of this function could + /// accidentally allow violating this contract which can cause memory + /// unsafety in code that relies on it being true. + #[stable(feature = "from_raw_os", since = "1.1.0")] + unsafe fn from_raw_fd(fd: RawFd) -> Self; +} + +/// A trait to express the ability to consume an object and acquire ownership of +/// its raw file descriptor. +#[stable(feature = "into_raw_os", since = "1.4.0")] +pub trait IntoRawFd { + /// Consumes this object, returning the raw underlying file descriptor. + /// + /// This function **transfers ownership** of the underlying file descriptor + /// to the caller. Callers are then the unique owners of the file descriptor + /// and must close the descriptor once it's no longer needed. + #[stable(feature = "into_raw_os", since = "1.4.0")] + fn into_raw_fd(self) -> RawFd; +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl AsRawFd for fs::File { + fn as_raw_fd(&self) -> RawFd { + self.as_inner().fd().raw() + } +} +#[stable(feature = "from_raw_os", since = "1.1.0")] +impl FromRawFd for fs::File { + unsafe fn from_raw_fd(fd: RawFd) -> fs::File { + fs::File::from_inner(sys::fs::File::from_inner(fd)) + } +} +#[stable(feature = "into_raw_os", since = "1.4.0")] +impl IntoRawFd for fs::File { + fn into_raw_fd(self) -> RawFd { + self.into_inner().into_fd().into_raw() + } +} + +#[stable(feature = "asraw_stdio", since = "1.21.0")] +impl AsRawFd for io::Stdin { + fn as_raw_fd(&self) -> RawFd { + libc::STDIN_FILENO + } +} + +#[stable(feature = "asraw_stdio", since = "1.21.0")] +impl AsRawFd for io::Stdout { + fn as_raw_fd(&self) -> RawFd { + libc::STDOUT_FILENO + } +} + +#[stable(feature = "asraw_stdio", since = "1.21.0")] +impl AsRawFd for io::Stderr { + fn as_raw_fd(&self) -> RawFd { + libc::STDERR_FILENO + } +} + +#[stable(feature = "asraw_stdio_locks", since = "1.35.0")] +impl<'a> AsRawFd for io::StdinLock<'a> { + fn as_raw_fd(&self) -> RawFd { + libc::STDIN_FILENO + } +} + +#[stable(feature = "asraw_stdio_locks", since = "1.35.0")] +impl<'a> AsRawFd for io::StdoutLock<'a> { + fn as_raw_fd(&self) -> RawFd { + libc::STDOUT_FILENO + } +} + +#[stable(feature = "asraw_stdio_locks", since = "1.35.0")] +impl<'a> AsRawFd for io::StderrLock<'a> { + fn as_raw_fd(&self) -> RawFd { + libc::STDERR_FILENO + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl AsRawFd for net::TcpStream { + fn as_raw_fd(&self) -> RawFd { + *self.as_inner().socket().as_inner() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl AsRawFd for net::TcpListener { + fn as_raw_fd(&self) -> RawFd { + *self.as_inner().socket().as_inner() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl AsRawFd for net::UdpSocket { + fn as_raw_fd(&self) -> RawFd { + *self.as_inner().socket().as_inner() + } +} + +#[stable(feature = "from_raw_os", since = "1.1.0")] +impl FromRawFd for net::TcpStream { + unsafe fn from_raw_fd(fd: RawFd) -> net::TcpStream { + let socket = sys::net::Socket::from_inner(fd); + net::TcpStream::from_inner(sys_common::net::TcpStream::from_inner(socket)) + } +} + +#[stable(feature = "from_raw_os", since = "1.1.0")] +impl FromRawFd for net::TcpListener { + unsafe fn from_raw_fd(fd: RawFd) -> net::TcpListener { + let socket = sys::net::Socket::from_inner(fd); + net::TcpListener::from_inner(sys_common::net::TcpListener::from_inner(socket)) + } +} + +#[stable(feature = "from_raw_os", since = "1.1.0")] +impl FromRawFd for net::UdpSocket { + unsafe fn from_raw_fd(fd: RawFd) -> net::UdpSocket { + let socket = sys::net::Socket::from_inner(fd); + net::UdpSocket::from_inner(sys_common::net::UdpSocket::from_inner(socket)) + } +} + +#[stable(feature = "into_raw_os", since = "1.4.0")] +impl IntoRawFd for net::TcpStream { + fn into_raw_fd(self) -> RawFd { + self.into_inner().into_socket().into_inner() + } +} +#[stable(feature = "into_raw_os", since = "1.4.0")] +impl IntoRawFd for net::TcpListener { + fn into_raw_fd(self) -> RawFd { + self.into_inner().into_socket().into_inner() + } +} +#[stable(feature = "into_raw_os", since = "1.4.0")] +impl IntoRawFd for net::UdpSocket { + fn into_raw_fd(self) -> RawFd { + self.into_inner().into_socket().into_inner() + } +} diff --git a/library/std/src/sys/vxworks/ext/mod.rs b/library/std/src/sys/vxworks/ext/mod.rs new file mode 100644 index 00000000000..8fa9bd9d1e2 --- /dev/null +++ b/library/std/src/sys/vxworks/ext/mod.rs @@ -0,0 +1,24 @@ +#![stable(feature = "rust1", since = "1.0.0")] +#![allow(missing_docs)] + +pub mod ffi; +pub mod fs; +pub mod io; +pub mod process; +pub mod raw; + +#[stable(feature = "rust1", since = "1.0.0")] +pub mod prelude { + #[doc(no_inline)] + #[stable(feature = "rust1", since = "1.0.0")] + pub use super::ffi::{OsStrExt, OsStringExt}; + #[doc(no_inline)] + #[stable(feature = "rust1", since = "1.0.0")] + pub use super::fs::{FileTypeExt, MetadataExt, OpenOptionsExt, PermissionsExt}; + #[doc(no_inline)] + #[stable(feature = "rust1", since = "1.0.0")] + pub use super::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; + #[doc(no_inline)] + #[stable(feature = "rust1", since = "1.0.0")] + pub use super::process::ExitStatusExt; +} diff --git a/library/std/src/sys/vxworks/ext/process.rs b/library/std/src/sys/vxworks/ext/process.rs new file mode 100644 index 00000000000..c3710f4b912 --- /dev/null +++ b/library/std/src/sys/vxworks/ext/process.rs @@ -0,0 +1,234 @@ +//! Unix-specific extensions to primitives in the `std::process` module. + +#![stable(feature = "rust1", since = "1.0.0")] + +use crate::ffi::OsStr; +use crate::io; +use crate::process; +use crate::sys; +use crate::sys::vxworks::ext::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; +use crate::sys_common::{AsInner, AsInnerMut, FromInner, IntoInner}; + +/// Unix-specific extensions to the [`process::Command`] builder. +/// +/// [`process::Command`]: ../../../../std/process/struct.Command.html +#[stable(feature = "rust1", since = "1.0.0")] +pub trait CommandExt { + /// Sets the child process's user ID. This translates to a + /// `setuid` call in the child process. Failure in the `setuid` + /// call will cause the spawn to fail. + #[stable(feature = "rust1", since = "1.0.0")] + fn uid(&mut self, id: u16) -> &mut process::Command; + + /// Similar to `uid`, but sets the group ID of the child process. This has + /// the same semantics as the `uid` field. + #[stable(feature = "rust1", since = "1.0.0")] + fn gid(&mut self, id: u16) -> &mut process::Command; + + /// Schedules a closure to be run just before the `exec` function is + /// invoked. + /// + /// The closure is allowed to return an I/O error whose OS error code will + /// be communicated back to the parent and returned as an error from when + /// the spawn was requested. + /// + /// Multiple closures can be registered and they will be called in order of + /// their registration. If a closure returns `Err` then no further closures + /// will be called and the spawn operation will immediately return with a + /// failure. + /// + /// # Notes and Safety + /// + /// This closure will be run in the context of the child process after a + /// `fork`. This primarily means that any modifications made to memory on + /// behalf of this closure will **not** be visible to the parent process. + /// This is often a very constrained environment where normal operations + /// like `malloc` or acquiring a mutex are not guaranteed to work (due to + /// other threads perhaps still running when the `fork` was run). + /// + /// This also means that all resources such as file descriptors and + /// memory-mapped regions got duplicated. It is your responsibility to make + /// sure that the closure does not violate library invariants by making + /// invalid use of these duplicates. + /// + /// When this closure is run, aspects such as the stdio file descriptors and + /// working directory have successfully been changed, so output to these + /// locations may not appear where intended. + #[stable(feature = "process_pre_exec", since = "1.34.0")] + unsafe fn pre_exec<F>(&mut self, f: F) -> &mut process::Command + where + F: FnMut() -> io::Result<()> + Send + Sync + 'static; + + /// Schedules a closure to be run just before the `exec` function is + /// invoked. + /// + /// This method is stable and usable, but it should be unsafe. To fix + /// that, it got deprecated in favor of the unsafe [`pre_exec`]. + /// + /// [`pre_exec`]: #tymethod.pre_exec + #[stable(feature = "process_exec", since = "1.15.0")] + #[rustc_deprecated(since = "1.37.0", reason = "should be unsafe, use `pre_exec` instead")] + fn before_exec<F>(&mut self, f: F) -> &mut process::Command + where + F: FnMut() -> io::Result<()> + Send + Sync + 'static, + { + unsafe { self.pre_exec(f) } + } + + /// Performs all the required setup by this `Command`, followed by calling + /// the `execvp` syscall. + /// + /// On success this function will not return, and otherwise it will return + /// an error indicating why the exec (or another part of the setup of the + /// `Command`) failed. + /// + /// `exec` not returning has the same implications as calling + /// [`process::exit`] – no destructors on the current stack or any other + /// thread’s stack will be run. Therefore, it is recommended to only call + /// `exec` at a point where it is fine to not run any destructors. Note, + /// that the `execvp` syscall independently guarantees that all memory is + /// freed and all file descriptors with the `CLOEXEC` option (set by default + /// on all file descriptors opened by the standard library) are closed. + /// + /// This function, unlike `spawn`, will **not** `fork` the process to create + /// a new child. Like spawn, however, the default behavior for the stdio + /// descriptors will be to inherited from the current process. + /// + /// [`process::exit`]: ../../../process/fn.exit.html + /// + /// # Notes + /// + /// The process may be in a "broken state" if this function returns in + /// error. For example the working directory, environment variables, signal + /// handling settings, various user/group information, or aspects of stdio + /// file descriptors may have changed. If a "transactional spawn" is + /// required to gracefully handle errors it is recommended to use the + /// cross-platform `spawn` instead. + #[stable(feature = "process_exec2", since = "1.9.0")] + fn exec(&mut self) -> io::Error; + + /// Set executable argument + /// + /// Set the first process argument, `argv[0]`, to something other than the + /// default executable path. + #[stable(feature = "process_set_argv0", since = "1.45.0")] + fn arg0<S>(&mut self, arg: S) -> &mut process::Command + where + S: AsRef<OsStr>; +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl CommandExt for process::Command { + fn uid(&mut self, id: u16) -> &mut process::Command { + self.as_inner_mut().uid(id); + self + } + + fn gid(&mut self, id: u16) -> &mut process::Command { + self.as_inner_mut().gid(id); + self + } + + unsafe fn pre_exec<F>(&mut self, f: F) -> &mut process::Command + where + F: FnMut() -> io::Result<()> + Send + Sync + 'static, + { + self.as_inner_mut().pre_exec(Box::new(f)); + self + } + + fn exec(&mut self) -> io::Error { + self.as_inner_mut().exec(sys::process::Stdio::Inherit) + } + + fn arg0<S>(&mut self, arg: S) -> &mut process::Command + where + S: AsRef<OsStr>, + { + self.as_inner_mut().set_arg_0(arg.as_ref()); + self + } +} + +/// Unix-specific extensions to [`process::ExitStatus`]. +/// +/// [`process::ExitStatus`]: ../../../../std/process/struct.ExitStatus.html +#[stable(feature = "rust1", since = "1.0.0")] +pub trait ExitStatusExt { + /// Creates a new `ExitStatus` from the raw underlying `i32` return value of + /// a process. + #[stable(feature = "exit_status_from", since = "1.12.0")] + fn from_raw(raw: i32) -> Self; + + /// If the process was terminated by a signal, returns that signal. + #[stable(feature = "rust1", since = "1.0.0")] + fn signal(&self) -> Option<i32>; +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExitStatusExt for process::ExitStatus { + fn from_raw(raw: i32) -> Self { + process::ExitStatus::from_inner(From::from(raw)) + } + + fn signal(&self) -> Option<i32> { + self.as_inner().signal() + } +} + +#[stable(feature = "process_extensions", since = "1.2.0")] +impl FromRawFd for process::Stdio { + unsafe fn from_raw_fd(fd: RawFd) -> process::Stdio { + let fd = sys::fd::FileDesc::new(fd); + let io = sys::process::Stdio::Fd(fd); + process::Stdio::from_inner(io) + } +} + +#[stable(feature = "process_extensions", since = "1.2.0")] +impl AsRawFd for process::ChildStdin { + fn as_raw_fd(&self) -> RawFd { + self.as_inner().fd().raw() + } +} + +#[stable(feature = "process_extensions", since = "1.2.0")] +impl AsRawFd for process::ChildStdout { + fn as_raw_fd(&self) -> RawFd { + self.as_inner().fd().raw() + } +} + +#[stable(feature = "process_extensions", since = "1.2.0")] +impl AsRawFd for process::ChildStderr { + fn as_raw_fd(&self) -> RawFd { + self.as_inner().fd().raw() + } +} + +#[stable(feature = "into_raw_os", since = "1.4.0")] +impl IntoRawFd for process::ChildStdin { + fn into_raw_fd(self) -> RawFd { + self.into_inner().into_fd().into_raw() + } +} + +#[stable(feature = "into_raw_os", since = "1.4.0")] +impl IntoRawFd for process::ChildStdout { + fn into_raw_fd(self) -> RawFd { + self.into_inner().into_fd().into_raw() + } +} + +#[stable(feature = "into_raw_os", since = "1.4.0")] +impl IntoRawFd for process::ChildStderr { + fn into_raw_fd(self) -> RawFd { + self.into_inner().into_fd().into_raw() + } +} + +/// Returns the OS-assigned process identifier associated with this process's parent. +#[stable(feature = "unix_ppid", since = "1.27.0")] +pub fn parent_id() -> u32 { + crate::sys::os::getppid() +} diff --git a/library/std/src/sys/vxworks/ext/raw.rs b/library/std/src/sys/vxworks/ext/raw.rs new file mode 100644 index 00000000000..1f134f4e2d1 --- /dev/null +++ b/library/std/src/sys/vxworks/ext/raw.rs @@ -0,0 +1,5 @@ +#![stable(feature = "raw_ext", since = "1.1.0")] + +#[doc(inline)] +#[stable(feature = "pthread_t", since = "1.8.0")] +pub use crate::sys::platform::raw::pthread_t; diff --git a/library/std/src/sys/vxworks/fd.rs b/library/std/src/sys/vxworks/fd.rs new file mode 100644 index 00000000000..ea186846929 --- /dev/null +++ b/library/std/src/sys/vxworks/fd.rs @@ -0,0 +1,201 @@ +#![unstable(reason = "not public", issue = "none", feature = "fd")] + +use crate::cmp; +use crate::io::{self, Initializer, IoSlice, IoSliceMut, Read}; +use crate::mem; +use crate::sys::cvt; +use crate::sys_common::AsInner; + +use libc::{self, c_int, c_void, ssize_t}; + +#[derive(Debug)] +pub struct FileDesc { + fd: c_int, +} + +// The maximum read limit on most POSIX-like systems is `SSIZE_MAX`, +// with the man page quoting that if the count of bytes to read is +// greater than `SSIZE_MAX` the result is "unspecified". +const READ_LIMIT: usize = ssize_t::MAX as usize; + +impl FileDesc { + pub fn new(fd: c_int) -> FileDesc { + FileDesc { fd: fd } + } + + pub fn raw(&self) -> c_int { + self.fd + } + + /// Extracts the actual file descriptor without closing it. + pub fn into_raw(self) -> c_int { + let fd = self.fd; + mem::forget(self); + fd + } + + pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> { + let ret = cvt(unsafe { + libc::read(self.fd, buf.as_mut_ptr() as *mut c_void, cmp::min(buf.len(), READ_LIMIT)) + })?; + Ok(ret as usize) + } + + pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + let ret = cvt(unsafe { + libc::readv( + self.fd, + bufs.as_ptr() as *const libc::iovec, + cmp::min(bufs.len(), c_int::MAX as usize) as c_int, + ) + })?; + Ok(ret as usize) + } + + #[inline] + fn is_read_vectored(&self) -> bool { + true + } + + pub fn read_to_end(&self, buf: &mut Vec<u8>) -> io::Result<usize> { + let mut me = self; + (&mut me).read_to_end(buf) + } + + pub fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<usize> { + unsafe fn cvt_pread( + fd: c_int, + buf: *mut c_void, + count: usize, + offset: i64, + ) -> io::Result<isize> { + use libc::pread; + cvt(pread(fd, buf, count, offset)) + } + + unsafe { + cvt_pread( + self.fd, + buf.as_mut_ptr() as *mut c_void, + cmp::min(buf.len(), READ_LIMIT), + offset as i64, + ) + .map(|n| n as usize) + } + } + + pub fn write(&self, buf: &[u8]) -> io::Result<usize> { + let ret = cvt(unsafe { + libc::write(self.fd, buf.as_ptr() as *const c_void, cmp::min(buf.len(), READ_LIMIT)) + })?; + Ok(ret as usize) + } + + pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + let ret = cvt(unsafe { + libc::writev( + self.fd, + bufs.as_ptr() as *const libc::iovec, + cmp::min(bufs.len(), c_int::MAX as usize) as c_int, + ) + })?; + Ok(ret as usize) + } + + #[inline] + pub fn is_write_vectored(&self) -> bool { + true + } + + pub fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> { + unsafe fn cvt_pwrite( + fd: c_int, + buf: *const c_void, + count: usize, + offset: i64, + ) -> io::Result<isize> { + use libc::pwrite; + cvt(pwrite(fd, buf, count, offset)) + } + + unsafe { + cvt_pwrite( + self.fd, + buf.as_ptr() as *const c_void, + cmp::min(buf.len(), READ_LIMIT), + offset as i64, + ) + .map(|n| n as usize) + } + } + + pub fn get_cloexec(&self) -> io::Result<bool> { + unsafe { Ok((cvt(libc::fcntl(self.fd, libc::F_GETFD))? & libc::FD_CLOEXEC) != 0) } + } + + pub fn set_cloexec(&self) -> io::Result<()> { + unsafe { + let previous = cvt(libc::fcntl(self.fd, libc::F_GETFD))?; + let new = previous | libc::FD_CLOEXEC; + if new != previous { + cvt(libc::fcntl(self.fd, libc::F_SETFD, new))?; + } + Ok(()) + } + } + + pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { + unsafe { + let v = nonblocking as c_int; + cvt(libc::ioctl(self.fd, libc::FIONBIO, &v))?; + Ok(()) + } + } + + // refer to pxPipeDrv library documentation. + // VxWorks uses fcntl to set O_NONBLOCK to the pipes + pub fn set_nonblocking_pipe(&self, nonblocking: bool) -> io::Result<()> { + unsafe { + let mut flags = cvt(libc::fcntl(self.fd, libc::F_GETFL, 0))?; + flags = if nonblocking { flags | libc::O_NONBLOCK } else { flags & !libc::O_NONBLOCK }; + cvt(libc::fcntl(self.fd, libc::F_SETFL, flags))?; + Ok(()) + } + } + + pub fn duplicate(&self) -> io::Result<FileDesc> { + let fd = self.raw(); + match cvt(unsafe { libc::fcntl(fd, libc::F_DUPFD_CLOEXEC, 0) }) { + Ok(newfd) => Ok(FileDesc::new(newfd)), + Err(e) => return Err(e), + } + } +} + +impl<'a> Read for &'a FileDesc { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + (**self).read(buf) + } + + #[inline] + unsafe fn initializer(&self) -> Initializer { + Initializer::nop() + } +} + +impl AsInner<c_int> for FileDesc { + fn as_inner(&self) -> &c_int { + &self.fd + } +} + +impl Drop for FileDesc { + fn drop(&mut self) { + // Note that errors are ignored when closing a file descriptor. The + // reason for this is that if an error occurs we don't actually know if + // the file descriptor was closed or not, and if we retried (for + // something like EINTR), we might close another valid file descriptor + // (opened after we closed ours. + let _ = unsafe { libc::close(self.fd) }; + } +} diff --git a/library/std/src/sys/vxworks/fs.rs b/library/std/src/sys/vxworks/fs.rs new file mode 100644 index 00000000000..557e65ca01b --- /dev/null +++ b/library/std/src/sys/vxworks/fs.rs @@ -0,0 +1,625 @@ +// copies from linuxx +use crate::ffi::{CStr, CString, OsStr, OsString}; +use crate::fmt; +use crate::io::{self, Error, ErrorKind, IoSlice, IoSliceMut, SeekFrom}; +use crate::mem; +use crate::path::{Path, PathBuf}; +use crate::ptr; +use crate::sync::Arc; +use crate::sys::fd::FileDesc; +use crate::sys::time::SystemTime; +use crate::sys::vxworks::ext::ffi::OsStrExt; +use crate::sys::vxworks::ext::ffi::OsStringExt; +use crate::sys::{cvt, cvt_r}; +use crate::sys_common::{AsInner, FromInner}; +use libc::{self, c_int, mode_t, off_t, stat64}; +use libc::{dirent, ftruncate, lseek, open, readdir_r as readdir64_r}; +pub struct File(FileDesc); + +#[derive(Clone)] +pub struct FileAttr { + stat: stat64, +} + +// all DirEntry's will have a reference to this struct +struct InnerReadDir { + dirp: Dir, + root: PathBuf, +} + +#[derive(Clone)] +pub struct ReadDir { + inner: Arc<InnerReadDir>, + end_of_stream: bool, +} + +struct Dir(*mut libc::DIR); + +unsafe impl Send for Dir {} +unsafe impl Sync for Dir {} + +pub struct DirEntry { + entry: dirent, + dir: ReadDir, +} + +#[derive(Clone, Debug)] +pub struct OpenOptions { + // generic + read: bool, + write: bool, + append: bool, + truncate: bool, + create: bool, + create_new: bool, + // system-specific + custom_flags: i32, + mode: mode_t, +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct FilePermissions { + mode: mode_t, +} + +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub struct FileType { + mode: mode_t, +} + +#[derive(Debug)] +pub struct DirBuilder { + mode: mode_t, +} + +impl FileAttr { + pub fn size(&self) -> u64 { + self.stat.st_size as u64 + } + pub fn perm(&self) -> FilePermissions { + FilePermissions { mode: (self.stat.st_mode as mode_t) } + } + + pub fn file_type(&self) -> FileType { + FileType { mode: self.stat.st_mode as mode_t } + } + + pub fn modified(&self) -> io::Result<SystemTime> { + Ok(SystemTime::from(libc::timespec { + tv_sec: self.stat.st_mtime as libc::time_t, + tv_nsec: 0, // hack 2.0; + })) + } + + pub fn accessed(&self) -> io::Result<SystemTime> { + Ok(SystemTime::from(libc::timespec { + tv_sec: self.stat.st_atime as libc::time_t, + tv_nsec: 0, // hack - a proper fix would be better + })) + } + + pub fn created(&self) -> io::Result<SystemTime> { + Err(io::Error::new( + io::ErrorKind::Other, + "creation time is not available on this platform currently", + )) + } +} + +impl AsInner<stat64> for FileAttr { + fn as_inner(&self) -> &stat64 { + &self.stat + } +} + +impl FilePermissions { + pub fn readonly(&self) -> bool { + // check if any class (owner, group, others) has write permission + self.mode & 0o222 == 0 + } + + pub fn set_readonly(&mut self, readonly: bool) { + if readonly { + // remove write permission for all classes; equivalent to `chmod a-w <file>` + self.mode &= !0o222; + } else { + // add write permission for all classes; equivalent to `chmod a+w <file>` + self.mode |= 0o222; + } + } + pub fn mode(&self) -> u32 { + self.mode as u32 + } +} + +impl FileType { + pub fn is_dir(&self) -> bool { + self.is(libc::S_IFDIR) + } + pub fn is_file(&self) -> bool { + self.is(libc::S_IFREG) + } + pub fn is_symlink(&self) -> bool { + self.is(libc::S_IFLNK) + } + + pub fn is(&self, mode: mode_t) -> bool { + self.mode & libc::S_IFMT == mode + } +} + +impl FromInner<u32> for FilePermissions { + fn from_inner(mode: u32) -> FilePermissions { + FilePermissions { mode: mode as mode_t } + } +} + +impl fmt::Debug for ReadDir { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // This will only be called from std::fs::ReadDir, which will add a "ReadDir()" frame. + // Thus the result will be e g 'ReadDir("/home")' + fmt::Debug::fmt(&*self.inner.root, f) + } +} + +impl Iterator for ReadDir { + type Item = io::Result<DirEntry>; + fn next(&mut self) -> Option<io::Result<DirEntry>> { + if self.end_of_stream { + return None; + } + + unsafe { + let mut ret = DirEntry { entry: mem::zeroed(), dir: self.clone() }; + let mut entry_ptr = ptr::null_mut(); + loop { + if readdir64_r(self.inner.dirp.0, &mut ret.entry, &mut entry_ptr) != 0 { + if entry_ptr.is_null() { + // We encountered an error (which will be returned in this iteration), but + // we also reached the end of the directory stream. The `end_of_stream` + // flag is enabled to make sure that we return `None` in the next iteration + // (instead of looping forever) + self.end_of_stream = true; + } + return Some(Err(Error::last_os_error())); + } + if entry_ptr.is_null() { + return None; + } + if ret.name_bytes() != b"." && ret.name_bytes() != b".." { + return Some(Ok(ret)); + } + } + } + } +} + +impl Drop for Dir { + fn drop(&mut self) { + let r = unsafe { libc::closedir(self.0) }; + debug_assert_eq!(r, 0); + } +} + +impl DirEntry { + pub fn path(&self) -> PathBuf { + use crate::sys::vxworks::ext::ffi::OsStrExt; + self.dir.inner.root.join(OsStr::from_bytes(self.name_bytes())) + } + + pub fn file_name(&self) -> OsString { + OsStr::from_bytes(self.name_bytes()).to_os_string() + } + + pub fn metadata(&self) -> io::Result<FileAttr> { + lstat(&self.path()) + } + + pub fn file_type(&self) -> io::Result<FileType> { + lstat(&self.path()).map(|m| m.file_type()) + } + + pub fn ino(&self) -> u64 { + self.entry.d_ino as u64 + } + + fn name_bytes(&self) -> &[u8] { + unsafe { + //&*self.name + CStr::from_ptr(self.entry.d_name.as_ptr()).to_bytes() + } + } +} + +impl OpenOptions { + pub fn new() -> OpenOptions { + OpenOptions { + // generic + read: false, + write: false, + append: false, + truncate: false, + create: false, + create_new: false, + // system-specific + custom_flags: 0, + mode: 0o666, + } + } + + pub fn read(&mut self, read: bool) { + self.read = read; + } + pub fn write(&mut self, write: bool) { + self.write = write; + } + pub fn append(&mut self, append: bool) { + self.append = append; + } + pub fn truncate(&mut self, truncate: bool) { + self.truncate = truncate; + } + pub fn create(&mut self, create: bool) { + self.create = create; + } + pub fn create_new(&mut self, create_new: bool) { + self.create_new = create_new; + } + pub fn mode(&mut self, mode: u32) { + self.mode = mode as mode_t; + } + + fn get_access_mode(&self) -> io::Result<c_int> { + match (self.read, self.write, self.append) { + (true, false, false) => Ok(libc::O_RDONLY), + (false, true, false) => Ok(libc::O_WRONLY), + (true, true, false) => Ok(libc::O_RDWR), + (false, _, true) => Ok(libc::O_WRONLY | libc::O_APPEND), + (true, _, true) => Ok(libc::O_RDWR | libc::O_APPEND), + (false, false, false) => Err(Error::from_raw_os_error(libc::EINVAL)), + } + } + + fn get_creation_mode(&self) -> io::Result<c_int> { + match (self.write, self.append) { + (true, false) => {} + (false, false) => { + if self.truncate || self.create || self.create_new { + return Err(Error::from_raw_os_error(libc::EINVAL)); + } + } + (_, true) => { + if self.truncate && !self.create_new { + return Err(Error::from_raw_os_error(libc::EINVAL)); + } + } + } + + Ok(match (self.create, self.truncate, self.create_new) { + (false, false, false) => 0, + (true, false, false) => libc::O_CREAT, + (false, true, false) => libc::O_TRUNC, + (true, true, false) => libc::O_CREAT | libc::O_TRUNC, + (_, _, true) => libc::O_CREAT | libc::O_EXCL, + }) + } +} + +impl File { + pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> { + let path = cstr(path)?; + File::open_c(&path, opts) + } + + pub fn open_c(path: &CStr, opts: &OpenOptions) -> io::Result<File> { + let flags = libc::O_CLOEXEC + | opts.get_access_mode()? + | opts.get_creation_mode()? + | (opts.custom_flags as c_int & !libc::O_ACCMODE); + let fd = cvt_r(|| unsafe { open(path.as_ptr(), flags, opts.mode as c_int) })?; + Ok(File(FileDesc::new(fd))) + } + + pub fn file_attr(&self) -> io::Result<FileAttr> { + let mut stat: stat64 = unsafe { mem::zeroed() }; + cvt(unsafe { ::libc::fstat(self.0.raw(), &mut stat) })?; + Ok(FileAttr { stat: stat }) + } + + pub fn fsync(&self) -> io::Result<()> { + cvt_r(|| unsafe { libc::fsync(self.0.raw()) })?; + Ok(()) + } + + pub fn datasync(&self) -> io::Result<()> { + cvt_r(|| unsafe { os_datasync(self.0.raw()) })?; + return Ok(()); + unsafe fn os_datasync(fd: c_int) -> c_int { + libc::fsync(fd) + } //not supported + } + + pub fn truncate(&self, size: u64) -> io::Result<()> { + return cvt_r(|| unsafe { ftruncate(self.0.raw(), size as off_t) }).map(drop); + } + + pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> { + self.0.read(buf) + } + + pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + self.0.read_vectored(bufs) + } + + #[inline] + pub fn is_read_vectored(&self) -> bool { + self.0.is_read_vectored() + } + + pub fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<usize> { + self.0.read_at(buf, offset) + } + + pub fn write(&self, buf: &[u8]) -> io::Result<usize> { + self.0.write(buf) + } + + pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + self.0.write_vectored(bufs) + } + + #[inline] + pub fn is_write_vectored(&self) -> bool { + self.0.is_write_vectored() + } + + pub fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> { + self.0.write_at(buf, offset) + } + + pub fn flush(&self) -> io::Result<()> { + Ok(()) + } + + pub fn seek(&self, pos: SeekFrom) -> io::Result<u64> { + let (whence, pos) = match pos { + // Casting to `i64` is fine, too large values will end up as + // negative which will cause an error in `"lseek64"`. + SeekFrom::Start(off) => (libc::SEEK_SET, off as i64), + SeekFrom::End(off) => (libc::SEEK_END, off), + SeekFrom::Current(off) => (libc::SEEK_CUR, off), + }; + let n = cvt(unsafe { lseek(self.0.raw(), pos, whence) })?; + Ok(n as u64) + } + + pub fn duplicate(&self) -> io::Result<File> { + self.0.duplicate().map(File) + } + + pub fn fd(&self) -> &FileDesc { + &self.0 + } + + pub fn into_fd(self) -> FileDesc { + self.0 + } + + pub fn set_permissions(&self, perm: FilePermissions) -> io::Result<()> { + cvt_r(|| unsafe { libc::fchmod(self.0.raw(), perm.mode) })?; + Ok(()) + } + + pub fn diverge(&self) -> ! { + panic!() + } +} + +impl DirBuilder { + pub fn new() -> DirBuilder { + DirBuilder { mode: 0o777 } + } + + pub fn mkdir(&self, p: &Path) -> io::Result<()> { + let p = cstr(p)?; + cvt(unsafe { libc::mkdir(p.as_ptr(), self.mode) })?; + Ok(()) + } + + pub fn set_mode(&mut self, mode: u32) { + self.mode = mode as mode_t; + } +} + +fn cstr(path: &Path) -> io::Result<CString> { + use crate::sys::vxworks::ext::ffi::OsStrExt; + Ok(CString::new(path.as_os_str().as_bytes())?) +} + +impl FromInner<c_int> for File { + fn from_inner(fd: c_int) -> File { + File(FileDesc::new(fd)) + } +} + +impl fmt::Debug for File { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn get_path(fd: c_int) -> Option<PathBuf> { + let mut buf = vec![0; libc::PATH_MAX as usize]; + let n = unsafe { libc::ioctl(fd, libc::FIOGETNAME, buf.as_ptr()) }; + if n == -1 { + return None; + } + let l = buf.iter().position(|&c| c == 0).unwrap(); + buf.truncate(l as usize); + Some(PathBuf::from(OsString::from_vec(buf))) + } + fn get_mode(fd: c_int) -> Option<(bool, bool)> { + let mode = unsafe { libc::fcntl(fd, libc::F_GETFL) }; + if mode == -1 { + return None; + } + match mode & libc::O_ACCMODE { + libc::O_RDONLY => Some((true, false)), + libc::O_RDWR => Some((true, true)), + libc::O_WRONLY => Some((false, true)), + _ => None, + } + } + + let fd = self.0.raw(); + let mut b = f.debug_struct("File"); + b.field("fd", &fd); + if let Some(path) = get_path(fd) { + b.field("path", &path); + } + if let Some((read, write)) = get_mode(fd) { + b.field("read", &read).field("write", &write); + } + b.finish() + } +} + +pub fn readdir(p: &Path) -> io::Result<ReadDir> { + let root = p.to_path_buf(); + let p = cstr(p)?; + unsafe { + let ptr = libc::opendir(p.as_ptr()); + if ptr.is_null() { + Err(Error::last_os_error()) + } else { + let inner = InnerReadDir { dirp: Dir(ptr), root }; + Ok(ReadDir { inner: Arc::new(inner), end_of_stream: false }) + } + } +} + +pub fn unlink(p: &Path) -> io::Result<()> { + let p = cstr(p)?; + cvt(unsafe { libc::unlink(p.as_ptr()) })?; + Ok(()) +} + +pub fn rename(old: &Path, new: &Path) -> io::Result<()> { + let old = cstr(old)?; + let new = cstr(new)?; + cvt(unsafe { libc::rename(old.as_ptr(), new.as_ptr()) })?; + Ok(()) +} + +pub fn set_perm(p: &Path, perm: FilePermissions) -> io::Result<()> { + let p = cstr(p)?; + cvt_r(|| unsafe { libc::chmod(p.as_ptr(), perm.mode) })?; + Ok(()) +} + +pub fn rmdir(p: &Path) -> io::Result<()> { + let p = cstr(p)?; + cvt(unsafe { libc::rmdir(p.as_ptr()) })?; + Ok(()) +} + +pub fn remove_dir_all(path: &Path) -> io::Result<()> { + let filetype = lstat(path)?.file_type(); + if filetype.is_symlink() { unlink(path) } else { remove_dir_all_recursive(path) } +} + +fn remove_dir_all_recursive(path: &Path) -> io::Result<()> { + for child in readdir(path)? { + let child = child?; + if child.file_type()?.is_dir() { + remove_dir_all_recursive(&child.path())?; + } else { + unlink(&child.path())?; + } + } + rmdir(path) +} + +pub fn readlink(p: &Path) -> io::Result<PathBuf> { + let c_path = cstr(p)?; + let p = c_path.as_ptr(); + + let mut buf = Vec::with_capacity(256); + + loop { + let buf_read = + cvt(unsafe { libc::readlink(p, buf.as_mut_ptr() as *mut _, buf.capacity()) })? as usize; + + unsafe { + buf.set_len(buf_read); + } + + if buf_read != buf.capacity() { + buf.shrink_to_fit(); + + return Ok(PathBuf::from(OsString::from_vec(buf))); + } + + // Trigger the internal buffer resizing logic of `Vec` by requiring + // more space than the current capacity. The length is guaranteed to be + // the same as the capacity due to the if statement above. + buf.reserve(1); + } +} + +pub fn symlink(src: &Path, dst: &Path) -> io::Result<()> { + let src = cstr(src)?; + let dst = cstr(dst)?; + cvt(unsafe { libc::symlink(src.as_ptr(), dst.as_ptr()) })?; + Ok(()) +} + +pub fn link(src: &Path, dst: &Path) -> io::Result<()> { + let src = cstr(src)?; + let dst = cstr(dst)?; + cvt(unsafe { libc::link(src.as_ptr(), dst.as_ptr()) })?; + Ok(()) +} + +pub fn stat(p: &Path) -> io::Result<FileAttr> { + let p = cstr(p)?; + let mut stat: stat64 = unsafe { mem::zeroed() }; + cvt(unsafe { libc::stat(p.as_ptr(), &mut stat as *mut _ as *mut _) })?; + Ok(FileAttr { stat }) +} + +pub fn lstat(p: &Path) -> io::Result<FileAttr> { + let p = cstr(p)?; + let mut stat: stat64 = unsafe { mem::zeroed() }; + cvt(unsafe { ::libc::lstat(p.as_ptr(), &mut stat as *mut _ as *mut _) })?; + Ok(FileAttr { stat }) +} + +pub fn canonicalize(p: &Path) -> io::Result<PathBuf> { + use crate::sys::vxworks::ext::ffi::OsStrExt; + let path = CString::new(p.as_os_str().as_bytes())?; + let buf; + unsafe { + let r = libc::realpath(path.as_ptr(), ptr::null_mut()); + if r.is_null() { + return Err(io::Error::last_os_error()); + } + buf = CStr::from_ptr(r).to_bytes().to_vec(); + libc::free(r as *mut _); + } + Ok(PathBuf::from(OsString::from_vec(buf))) +} + +pub fn copy(from: &Path, to: &Path) -> io::Result<u64> { + use crate::fs::File; + if !from.is_file() { + return Err(Error::new( + ErrorKind::InvalidInput, + "the source path is not an existing regular file", + )); + } + + let mut reader = File::open(from)?; + let mut writer = File::create(to)?; + let perm = reader.metadata()?.permissions(); + + let ret = io::copy(&mut reader, &mut writer)?; + writer.set_permissions(perm)?; + Ok(ret) +} diff --git a/library/std/src/sys/vxworks/io.rs b/library/std/src/sys/vxworks/io.rs new file mode 100644 index 00000000000..0f68ebf8da9 --- /dev/null +++ b/library/std/src/sys/vxworks/io.rs @@ -0,0 +1,75 @@ +use crate::marker::PhantomData; +use crate::slice; + +use libc::{c_void, iovec}; + +#[derive(Copy, Clone)] +#[repr(transparent)] +pub struct IoSlice<'a> { + vec: iovec, + _p: PhantomData<&'a [u8]>, +} + +impl<'a> IoSlice<'a> { + #[inline] + pub fn new(buf: &'a [u8]) -> IoSlice<'a> { + IoSlice { + vec: iovec { iov_base: buf.as_ptr() as *mut u8 as *mut c_void, iov_len: buf.len() }, + _p: PhantomData, + } + } + + #[inline] + pub fn advance(&mut self, n: usize) { + if self.vec.iov_len < n { + panic!("advancing IoSlice beyond its length"); + } + + unsafe { + self.vec.iov_len -= n; + self.vec.iov_base = self.vec.iov_base.add(n); + } + } + + #[inline] + pub fn as_slice(&self) -> &[u8] { + unsafe { slice::from_raw_parts(self.vec.iov_base as *mut u8, self.vec.iov_len) } + } +} + +pub struct IoSliceMut<'a> { + vec: iovec, + _p: PhantomData<&'a mut [u8]>, +} + +impl<'a> IoSliceMut<'a> { + #[inline] + pub fn new(buf: &'a mut [u8]) -> IoSliceMut<'a> { + IoSliceMut { + vec: iovec { iov_base: buf.as_mut_ptr() as *mut c_void, iov_len: buf.len() }, + _p: PhantomData, + } + } + + #[inline] + pub fn advance(&mut self, n: usize) { + if self.vec.iov_len < n { + panic!("advancing IoSliceMut beyond its length"); + } + + unsafe { + self.vec.iov_len -= n; + self.vec.iov_base = self.vec.iov_base.add(n); + } + } + + #[inline] + pub fn as_slice(&self) -> &[u8] { + unsafe { slice::from_raw_parts(self.vec.iov_base as *mut u8, self.vec.iov_len) } + } + + #[inline] + pub fn as_mut_slice(&mut self) -> &mut [u8] { + unsafe { slice::from_raw_parts_mut(self.vec.iov_base as *mut u8, self.vec.iov_len) } + } +} diff --git a/library/std/src/sys/vxworks/memchr.rs b/library/std/src/sys/vxworks/memchr.rs new file mode 100644 index 00000000000..928100c92ff --- /dev/null +++ b/library/std/src/sys/vxworks/memchr.rs @@ -0,0 +1,21 @@ +// Original implementation taken from rust-memchr. +// Copyright 2015 Andrew Gallant, bluss and Nicolas Koch + +pub fn memchr(needle: u8, haystack: &[u8]) -> Option<usize> { + let p = unsafe { + libc::memchr( + haystack.as_ptr() as *const libc::c_void, + needle as libc::c_int, + haystack.len(), + ) + }; + if p.is_null() { None } else { Some(p as usize - (haystack.as_ptr() as usize)) } +} + +pub fn memrchr(needle: u8, haystack: &[u8]) -> Option<usize> { + fn memrchr_specific(needle: u8, haystack: &[u8]) -> Option<usize> { + core::slice::memchr::memrchr(needle, haystack) + } + + memrchr_specific(needle, haystack) +} diff --git a/library/std/src/sys/vxworks/mod.rs b/library/std/src/sys/vxworks/mod.rs new file mode 100644 index 00000000000..1132a849e2f --- /dev/null +++ b/library/std/src/sys/vxworks/mod.rs @@ -0,0 +1,113 @@ +#![allow(dead_code)] +#![allow(missing_docs, nonstandard_style)] + +use crate::io::ErrorKind; + +pub use self::rand::hashmap_random_keys; +pub use crate::os::vxworks as platform; +pub use libc::strlen; + +pub mod alloc; +pub mod args; +pub mod cmath; +pub mod condvar; +pub mod env; +pub mod ext; +pub mod fd; +pub mod fs; +pub mod io; +pub mod memchr; +pub mod mutex; +pub mod net; +pub mod os; +pub mod path; +pub mod pipe; +pub mod process; +pub mod rand; +pub mod rwlock; +pub mod stack_overflow; +pub mod stdio; +pub mod thread; +pub mod thread_local_dtor; +pub mod thread_local_key; +pub mod time; + +pub use crate::sys_common::os_str_bytes as os_str; + +#[cfg(not(test))] +pub fn init() { + // ignore SIGPIPE + unsafe { + assert!(signal(libc::SIGPIPE, libc::SIG_IGN) != libc::SIG_ERR); + } +} + +pub use libc::signal; + +pub fn decode_error_kind(errno: i32) -> ErrorKind { + match errno as libc::c_int { + libc::ECONNREFUSED => ErrorKind::ConnectionRefused, + libc::ECONNRESET => ErrorKind::ConnectionReset, + libc::EPERM | libc::EACCES => ErrorKind::PermissionDenied, + libc::EPIPE => ErrorKind::BrokenPipe, + libc::ENOTCONN => ErrorKind::NotConnected, + libc::ECONNABORTED => ErrorKind::ConnectionAborted, + libc::EADDRNOTAVAIL => ErrorKind::AddrNotAvailable, + libc::EADDRINUSE => ErrorKind::AddrInUse, + libc::ENOENT => ErrorKind::NotFound, + libc::EINTR => ErrorKind::Interrupted, + libc::EINVAL => ErrorKind::InvalidInput, + libc::ETIMEDOUT => ErrorKind::TimedOut, + libc::EEXIST => ErrorKind::AlreadyExists, + + // These two constants can have the same value on some systems, + // but different values on others, so we can't use a match + // clause + x if x == libc::EAGAIN || x == libc::EWOULDBLOCK => ErrorKind::WouldBlock, + + _ => ErrorKind::Other, + } +} + +#[doc(hidden)] +pub trait IsMinusOne { + fn is_minus_one(&self) -> bool; +} + +macro_rules! impl_is_minus_one { + ($($t:ident)*) => ($(impl IsMinusOne for $t { + fn is_minus_one(&self) -> bool { + *self == -1 + } + })*) +} + +impl_is_minus_one! { i8 i16 i32 i64 isize } + +pub fn cvt<T: IsMinusOne>(t: T) -> crate::io::Result<T> { + if t.is_minus_one() { Err(crate::io::Error::last_os_error()) } else { Ok(t) } +} + +pub fn cvt_r<T, F>(mut f: F) -> crate::io::Result<T> +where + T: IsMinusOne, + F: FnMut() -> T, +{ + loop { + match cvt(f()) { + Err(ref e) if e.kind() == ErrorKind::Interrupted => {} + other => return other, + } + } +} + +// On Unix-like platforms, libc::abort will unregister signal handlers +// including the SIGABRT handler, preventing the abort from being blocked, and +// fclose streams, with the side effect of flushing them so libc buffered +// output will be printed. Additionally the shell will generally print a more +// understandable error message like "Abort trap" rather than "Illegal +// instruction" that intrinsics::abort would cause, as intrinsics::abort is +// implemented as an illegal instruction. +pub fn abort_internal() -> ! { + unsafe { libc::abort() } +} diff --git a/library/std/src/sys/vxworks/mutex.rs b/library/std/src/sys/vxworks/mutex.rs new file mode 100644 index 00000000000..103d87e3d2f --- /dev/null +++ b/library/std/src/sys/vxworks/mutex.rs @@ -0,0 +1,131 @@ +use crate::cell::UnsafeCell; +use crate::mem::MaybeUninit; + +pub struct Mutex { + inner: UnsafeCell<libc::pthread_mutex_t>, +} + +#[inline] +pub unsafe fn raw(m: &Mutex) -> *mut libc::pthread_mutex_t { + m.inner.get() +} + +unsafe impl Send for Mutex {} +unsafe impl Sync for Mutex {} + +#[allow(dead_code)] // sys isn't exported yet +impl Mutex { + pub const fn new() -> Mutex { + // Might be moved to a different address, so it is better to avoid + // initialization of potentially opaque OS data before it landed. + // Be very careful using this newly constructed `Mutex`, reentrant + // locking is undefined behavior until `init` is called! + Mutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) } + } + #[inline] + pub unsafe fn init(&mut self) { + // Issue #33770 + // + // A pthread mutex initialized with PTHREAD_MUTEX_INITIALIZER will have + // a type of PTHREAD_MUTEX_DEFAULT, which has undefined behavior if you + // try to re-lock it from the same thread when you already hold a lock. + // + // In practice, glibc takes advantage of this undefined behavior to + // implement hardware lock elision, which uses hardware transactional + // memory to avoid acquiring the lock. While a transaction is in + // progress, the lock appears to be unlocked. This isn't a problem for + // other threads since the transactional memory will abort if a conflict + // is detected, however no abort is generated if re-locking from the + // same thread. + // + // Since locking the same mutex twice will result in two aliasing &mut + // references, we instead create the mutex with type + // PTHREAD_MUTEX_NORMAL which is guaranteed to deadlock if we try to + // re-lock it from the same thread, thus avoiding undefined behavior. + let mut attr = MaybeUninit::<libc::pthread_mutexattr_t>::uninit(); + let r = libc::pthread_mutexattr_init(attr.as_mut_ptr()); + debug_assert_eq!(r, 0); + let r = libc::pthread_mutexattr_settype(attr.as_mut_ptr(), libc::PTHREAD_MUTEX_NORMAL); + debug_assert_eq!(r, 0); + let r = libc::pthread_mutex_init(self.inner.get(), attr.as_ptr()); + debug_assert_eq!(r, 0); + let r = libc::pthread_mutexattr_destroy(attr.as_mut_ptr()); + debug_assert_eq!(r, 0); + } + #[inline] + pub unsafe fn lock(&self) { + let r = libc::pthread_mutex_lock(self.inner.get()); + debug_assert_eq!(r, 0); + } + #[inline] + pub unsafe fn unlock(&self) { + let r = libc::pthread_mutex_unlock(self.inner.get()); + debug_assert_eq!(r, 0); + } + #[inline] + pub unsafe fn try_lock(&self) -> bool { + libc::pthread_mutex_trylock(self.inner.get()) == 0 + } + #[inline] + #[cfg(not(target_os = "dragonfly"))] + pub unsafe fn destroy(&self) { + let r = libc::pthread_mutex_destroy(self.inner.get()); + debug_assert_eq!(r, 0); + } + #[inline] + #[cfg(target_os = "dragonfly")] + pub unsafe fn destroy(&self) { + let r = libc::pthread_mutex_destroy(self.inner.get()); + // On DragonFly pthread_mutex_destroy() returns EINVAL if called on a + // mutex that was just initialized with libc::PTHREAD_MUTEX_INITIALIZER. + // Once it is used (locked/unlocked) or pthread_mutex_init() is called, + // this behaviour no longer occurs. + debug_assert!(r == 0 || r == libc::EINVAL); + } +} + +pub struct ReentrantMutex { + inner: UnsafeCell<libc::pthread_mutex_t>, +} + +unsafe impl Send for ReentrantMutex {} +unsafe impl Sync for ReentrantMutex {} + +impl ReentrantMutex { + pub const unsafe fn uninitialized() -> ReentrantMutex { + ReentrantMutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) } + } + + pub unsafe fn init(&self) { + let mut attr = MaybeUninit::<libc::pthread_mutexattr_t>::uninit(); + let result = libc::pthread_mutexattr_init(attr.as_mut_ptr()); + debug_assert_eq!(result, 0); + let result = + libc::pthread_mutexattr_settype(attr.as_mut_ptr(), libc::PTHREAD_MUTEX_RECURSIVE); + debug_assert_eq!(result, 0); + let result = libc::pthread_mutex_init(self.inner.get(), attr.as_ptr()); + debug_assert_eq!(result, 0); + let result = libc::pthread_mutexattr_destroy(attr.as_mut_ptr()); + debug_assert_eq!(result, 0); + } + + pub unsafe fn lock(&self) { + let result = libc::pthread_mutex_lock(self.inner.get()); + debug_assert_eq!(result, 0); + } + + #[inline] + pub unsafe fn try_lock(&self) -> bool { + libc::pthread_mutex_trylock(self.inner.get()) == 0 + } + + pub unsafe fn unlock(&self) { + let result = libc::pthread_mutex_unlock(self.inner.get()); + debug_assert_eq!(result, 0); + } + + pub unsafe fn destroy(&self) { + let result = libc::pthread_mutex_destroy(self.inner.get()); + debug_assert_eq!(result, 0); + } +} diff --git a/library/std/src/sys/vxworks/net.rs b/library/std/src/sys/vxworks/net.rs new file mode 100644 index 00000000000..32c27ab6e9e --- /dev/null +++ b/library/std/src/sys/vxworks/net.rs @@ -0,0 +1,359 @@ +use crate::cmp; +use crate::ffi::CStr; +use crate::io; +use crate::io::{IoSlice, IoSliceMut}; +use crate::mem; +use crate::net::{Shutdown, SocketAddr}; +use crate::str; +use crate::sys::fd::FileDesc; +use crate::sys_common::net::{getsockopt, setsockopt, sockaddr_to_addr}; +use crate::sys_common::{AsInner, FromInner, IntoInner}; +use crate::time::{Duration, Instant}; +use libc::{self, c_int, c_void, size_t, sockaddr, socklen_t, EAI_SYSTEM, MSG_PEEK}; + +pub use crate::sys::{cvt, cvt_r}; + +#[allow(unused_extern_crates)] +pub extern crate libc as netc; + +pub type wrlen_t = size_t; + +pub struct Socket(FileDesc); + +pub fn init() {} + +pub fn cvt_gai(err: c_int) -> io::Result<()> { + if err == 0 { + return Ok(()); + } + + // We may need to trigger a glibc workaround. See on_resolver_failure() for details. + on_resolver_failure(); + + if err == EAI_SYSTEM { + return Err(io::Error::last_os_error()); + } + + let detail = unsafe { + str::from_utf8(CStr::from_ptr(libc::gai_strerror(err)).to_bytes()).unwrap().to_owned() + }; + Err(io::Error::new( + io::ErrorKind::Other, + &format!("failed to lookup address information: {}", detail)[..], + )) +} + +impl Socket { + pub fn new(addr: &SocketAddr, ty: c_int) -> io::Result<Socket> { + let fam = match *addr { + SocketAddr::V4(..) => libc::AF_INET, + SocketAddr::V6(..) => libc::AF_INET6, + }; + Socket::new_raw(fam, ty) + } + + pub fn new_raw(fam: c_int, ty: c_int) -> io::Result<Socket> { + unsafe { + let fd = cvt(libc::socket(fam, ty, 0))?; + let fd = FileDesc::new(fd); + fd.set_cloexec()?; + let socket = Socket(fd); + Ok(socket) + } + } + + pub fn new_pair(_fam: c_int, _ty: c_int) -> io::Result<(Socket, Socket)> { + unimplemented!(); + } + + pub fn connect_timeout(&self, addr: &SocketAddr, timeout: Duration) -> io::Result<()> { + self.set_nonblocking(true)?; + let r = unsafe { + let (addrp, len) = addr.into_inner(); + cvt(libc::connect(self.0.raw(), addrp, len)) + }; + self.set_nonblocking(false)?; + + match r { + Ok(_) => return Ok(()), + // there's no ErrorKind for EINPROGRESS :( + Err(ref e) if e.raw_os_error() == Some(libc::EINPROGRESS) => {} + Err(e) => return Err(e), + } + + let mut pollfd = libc::pollfd { fd: self.0.raw(), events: libc::POLLOUT, revents: 0 }; + + if timeout.as_secs() == 0 && timeout.subsec_nanos() == 0 { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "cannot set a 0 duration timeout", + )); + } + + let start = Instant::now(); + + loop { + let elapsed = start.elapsed(); + if elapsed >= timeout { + return Err(io::Error::new(io::ErrorKind::TimedOut, "connection timed out")); + } + + let timeout = timeout - elapsed; + let mut timeout = timeout + .as_secs() + .saturating_mul(1_000) + .saturating_add(timeout.subsec_nanos() as u64 / 1_000_000); + if timeout == 0 { + timeout = 1; + } + + let timeout = cmp::min(timeout, c_int::MAX as u64) as c_int; + + match unsafe { libc::poll(&mut pollfd, 1, timeout) } { + -1 => { + let err = io::Error::last_os_error(); + if err.kind() != io::ErrorKind::Interrupted { + return Err(err); + } + } + 0 => {} + _ => { + // linux returns POLLOUT|POLLERR|POLLHUP for refused connections (!), so look + // for POLLHUP rather than read readiness + if pollfd.revents & libc::POLLHUP != 0 { + let e = self.take_error()?.unwrap_or_else(|| { + io::Error::new(io::ErrorKind::Other, "no error set after POLLHUP") + }); + return Err(e); + } + + return Ok(()); + } + } + } + } + + pub fn accept(&self, storage: *mut sockaddr, len: *mut socklen_t) -> io::Result<Socket> { + let fd = cvt_r(|| unsafe { libc::accept(self.0.raw(), storage, len) })?; + let fd = FileDesc::new(fd); + fd.set_cloexec()?; + Ok(Socket(fd)) + } + + pub fn duplicate(&self) -> io::Result<Socket> { + self.0.duplicate().map(Socket) + } + + fn recv_with_flags(&self, buf: &mut [u8], flags: c_int) -> io::Result<usize> { + let ret = cvt(unsafe { + libc::recv(self.0.raw(), buf.as_mut_ptr() as *mut c_void, buf.len(), flags) + })?; + Ok(ret as usize) + } + + pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> { + self.recv_with_flags(buf, 0) + } + + pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> { + self.recv_with_flags(buf, MSG_PEEK) + } + + pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + self.0.read_vectored(bufs) + } + + #[inline] + pub fn is_read_vectored(&self) -> bool { + self.0.is_read_vectored() + } + + fn recv_from_with_flags( + &self, + buf: &mut [u8], + flags: c_int, + ) -> io::Result<(usize, SocketAddr)> { + let mut storage: libc::sockaddr_storage = unsafe { mem::zeroed() }; + let mut addrlen = mem::size_of_val(&storage) as libc::socklen_t; + + let n = cvt(unsafe { + libc::recvfrom( + self.0.raw(), + buf.as_mut_ptr() as *mut c_void, + buf.len(), + flags, + &mut storage as *mut _ as *mut _, + &mut addrlen, + ) + })?; + Ok((n as usize, sockaddr_to_addr(&storage, addrlen as usize)?)) + } + + pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + self.recv_from_with_flags(buf, 0) + } + + pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + self.recv_from_with_flags(buf, MSG_PEEK) + } + + pub fn write(&self, buf: &[u8]) -> io::Result<usize> { + self.0.write(buf) + } + + pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + self.0.write_vectored(bufs) + } + + #[inline] + pub fn is_write_vectored(&self) -> bool { + self.0.is_write_vectored() + } + + pub fn set_timeout(&self, dur: Option<Duration>, kind: libc::c_int) -> io::Result<()> { + let timeout = match dur { + Some(dur) => { + if dur.as_secs() == 0 && dur.subsec_nanos() == 0 { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "cannot set a 0 duration timeout", + )); + } + + let secs = if dur.as_secs() > libc::time_t::MAX as u64 { + libc::time_t::MAX + } else { + dur.as_secs() as libc::time_t + }; + let mut timeout = libc::timeval { + tv_sec: secs, + tv_usec: (dur.subsec_nanos() / 1000) as libc::suseconds_t, + }; + if timeout.tv_sec == 0 && timeout.tv_usec == 0 { + timeout.tv_usec = 1; + } + timeout + } + None => libc::timeval { tv_sec: 0, tv_usec: 0 }, + }; + setsockopt(self, libc::SOL_SOCKET, kind, timeout) + } + + pub fn timeout(&self, kind: libc::c_int) -> io::Result<Option<Duration>> { + let raw: libc::timeval = getsockopt(self, libc::SOL_SOCKET, kind)?; + if raw.tv_sec == 0 && raw.tv_usec == 0 { + Ok(None) + } else { + let sec = raw.tv_sec as u64; + let nsec = (raw.tv_usec as u32) * 1000; + Ok(Some(Duration::new(sec, nsec))) + } + } + + pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { + let how = match how { + Shutdown::Write => libc::SHUT_WR, + Shutdown::Read => libc::SHUT_RD, + Shutdown::Both => libc::SHUT_RDWR, + }; + cvt(unsafe { libc::shutdown(self.0.raw(), how) })?; + Ok(()) + } + + pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> { + setsockopt(self, libc::IPPROTO_TCP, libc::TCP_NODELAY, nodelay as c_int) + } + + pub fn nodelay(&self) -> io::Result<bool> { + let raw: c_int = getsockopt(self, libc::IPPROTO_TCP, libc::TCP_NODELAY)?; + Ok(raw != 0) + } + + pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { + let mut nonblocking = nonblocking as libc::c_int; + cvt(unsafe { libc::ioctl(*self.as_inner(), libc::FIONBIO, &mut nonblocking) }).map(drop) + } + + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + let raw: c_int = getsockopt(self, libc::SOL_SOCKET, libc::SO_ERROR)?; + if raw == 0 { Ok(None) } else { Ok(Some(io::Error::from_raw_os_error(raw as i32))) } + } +} + +impl AsInner<c_int> for Socket { + fn as_inner(&self) -> &c_int { + self.0.as_inner() + } +} + +impl FromInner<c_int> for Socket { + fn from_inner(fd: c_int) -> Socket { + Socket(FileDesc::new(fd)) + } +} + +impl IntoInner<c_int> for Socket { + fn into_inner(self) -> c_int { + self.0.into_raw() + } +} + +// In versions of glibc prior to 2.26, there's a bug where the DNS resolver +// will cache the contents of /etc/resolv.conf, so changes to that file on disk +// can be ignored by a long-running program. That can break DNS lookups on e.g. +// laptops where the network comes and goes. See +// https://sourceware.org/bugzilla/show_bug.cgi?id=984. Note however that some +// distros including Debian have patched glibc to fix this for a long time. +// +// A workaround for this bug is to call the res_init libc function, to clear +// the cached configs. Unfortunately, while we believe glibc's implementation +// of res_init is thread-safe, we know that other implementations are not +// (https://github.com/rust-lang/rust/issues/43592). Code here in libstd could +// try to synchronize its res_init calls with a Mutex, but that wouldn't +// protect programs that call into libc in other ways. So instead of calling +// res_init unconditionally, we call it only when we detect we're linking +// against glibc version < 2.26. (That is, when we both know its needed and +// believe it's thread-safe). +#[cfg(target_env = "gnu")] +fn on_resolver_failure() { + /* + use crate::sys; + + // If the version fails to parse, we treat it the same as "not glibc". + if let Some(version) = sys::os::glibc_version() { + if version < (2, 26) { + unsafe { libc::res_init() }; + } + } + */ +} + +#[cfg(not(target_env = "gnu"))] +fn on_resolver_failure() {} + +#[cfg(all(test, taget_env = "gnu"))] +mod test { + use super::*; + + #[test] + fn test_res_init() { + // This mostly just tests that the weak linkage doesn't panic wildly... + res_init_if_glibc_before_2_26().unwrap(); + } + + #[test] + fn test_parse_glibc_version() { + let cases = [ + ("0.0", Some((0, 0))), + ("01.+2", Some((1, 2))), + ("3.4.5.six", Some((3, 4))), + ("1", None), + ("1.-2", None), + ("1.foo", None), + ("foo.1", None), + ]; + for &(version_str, parsed) in cases.iter() { + assert_eq!(parsed, parse_glibc_version(version_str)); + } + } +} diff --git a/library/std/src/sys/vxworks/os.rs b/library/std/src/sys/vxworks/os.rs new file mode 100644 index 00000000000..1fadf716135 --- /dev/null +++ b/library/std/src/sys/vxworks/os.rs @@ -0,0 +1,316 @@ +use crate::error::Error as StdError; +use crate::ffi::{CStr, CString, OsStr, OsString}; +use crate::fmt; +use crate::io; +use crate::iter; +use crate::marker::PhantomData; +use crate::mem; +use crate::memchr; +use crate::path::{self, Path, PathBuf}; +use crate::slice; +use crate::str; +use crate::sys::cvt; +use crate::sys_common::mutex::{Mutex, MutexGuard}; +use libc::{self, c_char /*,c_void */, c_int}; +/*use sys::fd; this one is probably important */ +use crate::vec; + +const TMPBUF_SZ: usize = 128; + +// This is a terrible fix +use crate::sys::os_str::Buf; +use crate::sys_common::{AsInner, FromInner, IntoInner}; + +pub trait OsStringExt { + fn from_vec(vec: Vec<u8>) -> Self; + fn into_vec(self) -> Vec<u8>; +} + +impl OsStringExt for OsString { + fn from_vec(vec: Vec<u8>) -> OsString { + FromInner::from_inner(Buf { inner: vec }) + } + fn into_vec(self) -> Vec<u8> { + self.into_inner().inner + } +} + +pub trait OsStrExt { + fn from_bytes(slice: &[u8]) -> &Self; + fn as_bytes(&self) -> &[u8]; +} + +impl OsStrExt for OsStr { + fn from_bytes(slice: &[u8]) -> &OsStr { + unsafe { mem::transmute(slice) } + } + fn as_bytes(&self) -> &[u8] { + &self.as_inner().inner + } +} + +pub fn errno() -> i32 { + unsafe { libc::errnoGet() } +} + +pub fn set_errno(e: i32) { + unsafe { + libc::errnoSet(e as c_int); + } +} + +/// Gets a detailed string description for the given error number. +pub fn error_string(errno: i32) -> String { + let mut buf = [0 as c_char; TMPBUF_SZ]; + extern "C" { + fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: libc::size_t) -> c_int; + } + + let p = buf.as_mut_ptr(); + unsafe { + if strerror_r(errno as c_int, p, buf.len()) < 0 { + panic!("strerror_r failure"); + } + let p = p as *const _; + str::from_utf8(CStr::from_ptr(p).to_bytes()).unwrap().to_owned() + } +} + +pub fn getcwd() -> io::Result<PathBuf> { + let mut buf = Vec::with_capacity(512); + loop { + unsafe { + let ptr = buf.as_mut_ptr() as *mut libc::c_char; + if !libc::getcwd(ptr, buf.capacity() as libc::size_t).is_null() { + let len = CStr::from_ptr(buf.as_ptr() as *const libc::c_char).to_bytes().len(); + buf.set_len(len); + buf.shrink_to_fit(); + return Ok(PathBuf::from(OsString::from_vec(buf))); + } else { + let error = io::Error::last_os_error(); + if error.raw_os_error() != Some(libc::ERANGE) { + return Err(error); + } + } + // Trigger the internal buffer resizing logic of `Vec` by requiring + // more space than the current capacity. + let cap = buf.capacity(); + buf.set_len(cap); + buf.reserve(1); + } + } +} + +pub fn chdir(p: &path::Path) -> io::Result<()> { + let p: &OsStr = p.as_ref(); + let p = CString::new(p.as_bytes())?; + unsafe { + match libc::chdir(p.as_ptr()) == (0 as c_int) { + true => Ok(()), + false => Err(io::Error::last_os_error()), + } + } +} + +pub struct SplitPaths<'a> { + iter: iter::Map<slice::Split<'a, u8, fn(&u8) -> bool>, fn(&'a [u8]) -> PathBuf>, +} + +pub fn split_paths(unparsed: &OsStr) -> SplitPaths<'_> { + fn bytes_to_path(b: &[u8]) -> PathBuf { + PathBuf::from(<OsStr as OsStrExt>::from_bytes(b)) + } + fn is_colon(b: &u8) -> bool { + *b == b':' + } + let unparsed = unparsed.as_bytes(); + SplitPaths { + iter: unparsed + .split(is_colon as fn(&u8) -> bool) + .map(bytes_to_path as fn(&[u8]) -> PathBuf), + } +} + +impl<'a> Iterator for SplitPaths<'a> { + type Item = PathBuf; + fn next(&mut self) -> Option<PathBuf> { + self.iter.next() + } + fn size_hint(&self) -> (usize, Option<usize>) { + self.iter.size_hint() + } +} + +#[derive(Debug)] +pub struct JoinPathsError; + +pub fn join_paths<I, T>(paths: I) -> Result<OsString, JoinPathsError> +where + I: Iterator<Item = T>, + T: AsRef<OsStr>, +{ + let mut joined = Vec::new(); + let sep = b':'; + + for (i, path) in paths.enumerate() { + let path = path.as_ref().as_bytes(); + if i > 0 { + joined.push(sep) + } + if path.contains(&sep) { + return Err(JoinPathsError); + } + joined.extend_from_slice(path); + } + Ok(OsStringExt::from_vec(joined)) +} + +impl fmt::Display for JoinPathsError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + "path segment contains separator `:`".fmt(f) + } +} + +impl StdError for JoinPathsError { + #[allow(deprecated)] + fn description(&self) -> &str { + "failed to join paths" + } +} + +pub fn current_exe() -> io::Result<PathBuf> { + #[cfg(test)] + use realstd::env; + + #[cfg(not(test))] + use crate::env; + + let exe_path = env::args().next().unwrap(); + let path = Path::new(&exe_path); + path.canonicalize() +} + +pub struct Env { + iter: vec::IntoIter<(OsString, OsString)>, + _dont_send_or_sync_me: PhantomData<*mut ()>, +} + +impl Iterator for Env { + type Item = (OsString, OsString); + fn next(&mut self) -> Option<(OsString, OsString)> { + self.iter.next() + } + fn size_hint(&self) -> (usize, Option<usize>) { + self.iter.size_hint() + } +} + +pub unsafe fn environ() -> *mut *const *const c_char { + extern "C" { + static mut environ: *const *const c_char; + } + &mut environ +} + +pub unsafe fn env_lock() -> MutexGuard<'static> { + // We never call `ENV_LOCK.init()`, so it is UB to attempt to + // acquire this mutex reentrantly! + static ENV_LOCK: Mutex = Mutex::new(); + ENV_LOCK.lock() +} + +/// Returns a vector of (variable, value) byte-vector pairs for all the +/// environment variables of the current process. +pub fn env() -> Env { + unsafe { + let _guard = env_lock(); + let mut environ = *environ(); + if environ.is_null() { + panic!("os::env() failure getting env string from OS: {}", io::Error::last_os_error()); + } + let mut result = Vec::new(); + while !(*environ).is_null() { + if let Some(key_value) = parse(CStr::from_ptr(*environ).to_bytes()) { + result.push(key_value); + } + environ = environ.add(1); + } + return Env { iter: result.into_iter(), _dont_send_or_sync_me: PhantomData }; + } + + fn parse(input: &[u8]) -> Option<(OsString, OsString)> { + // Strategy (copied from glibc): Variable name and value are separated + // by an ASCII equals sign '='. Since a variable name must not be + // empty, allow variable names starting with an equals sign. Skip all + // malformed lines. + if input.is_empty() { + return None; + } + let pos = memchr::memchr(b'=', &input[1..]).map(|p| p + 1); + pos.map(|p| { + ( + OsStringExt::from_vec(input[..p].to_vec()), + OsStringExt::from_vec(input[p + 1..].to_vec()), + ) + }) + } +} + +pub fn getenv(k: &OsStr) -> io::Result<Option<OsString>> { + // environment variables with a nul byte can't be set, so their value is + // always None as well + let k = CString::new(k.as_bytes())?; + unsafe { + let _guard = env_lock(); + let s = libc::getenv(k.as_ptr()) as *const libc::c_char; + let ret = if s.is_null() { + None + } else { + Some(OsStringExt::from_vec(CStr::from_ptr(s).to_bytes().to_vec())) + }; + Ok(ret) + } +} + +pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> { + let k = CString::new(k.as_bytes())?; + let v = CString::new(v.as_bytes())?; + + unsafe { + let _guard = env_lock(); + cvt(libc::setenv(k.as_ptr(), v.as_ptr(), 1)).map(drop) + } +} + +pub fn unsetenv(n: &OsStr) -> io::Result<()> { + let nbuf = CString::new(n.as_bytes())?; + + unsafe { + let _guard = env_lock(); + cvt(libc::unsetenv(nbuf.as_ptr())).map(drop) + } +} + +pub fn page_size() -> usize { + unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize } +} + +pub fn temp_dir() -> PathBuf { + crate::env::var_os("TMPDIR").map(PathBuf::from).unwrap_or_else(|| PathBuf::from("/tmp")) +} + +pub fn home_dir() -> Option<PathBuf> { + crate::env::var_os("HOME").or_else(|| None).map(PathBuf::from) +} + +pub fn exit(code: i32) -> ! { + unsafe { libc::exit(code as c_int) } +} + +pub fn getpid() -> u32 { + unsafe { libc::getpid() as u32 } +} + +pub fn getppid() -> u32 { + unsafe { libc::getppid() as u32 } +} diff --git a/library/std/src/sys/vxworks/path.rs b/library/std/src/sys/vxworks/path.rs new file mode 100644 index 00000000000..840a7ae0426 --- /dev/null +++ b/library/std/src/sys/vxworks/path.rs @@ -0,0 +1,19 @@ +use crate::ffi::OsStr; +use crate::path::Prefix; + +#[inline] +pub fn is_sep_byte(b: u8) -> bool { + b == b'/' +} + +#[inline] +pub fn is_verbatim_sep(b: u8) -> bool { + b == b'/' +} + +pub fn parse_prefix(_: &OsStr) -> Option<Prefix<'_>> { + None +} + +pub const MAIN_SEP_STR: &str = "/"; +pub const MAIN_SEP: char = '/'; diff --git a/library/std/src/sys/vxworks/pipe.rs b/library/std/src/sys/vxworks/pipe.rs new file mode 100644 index 00000000000..a18376212af --- /dev/null +++ b/library/std/src/sys/vxworks/pipe.rs @@ -0,0 +1,107 @@ +use crate::io::{self, IoSlice, IoSliceMut}; +use crate::mem; +use crate::sync::atomic::AtomicBool; +use crate::sys::fd::FileDesc; +use crate::sys::{cvt, cvt_r}; +use libc::{self /*, c_int apparently not used? */}; + +pub struct AnonPipe(FileDesc); + +pub fn anon_pipe() -> io::Result<(AnonPipe, AnonPipe)> { + static INVALID: AtomicBool = AtomicBool::new(false); + + let mut fds = [0; 2]; + cvt(unsafe { libc::pipe(fds.as_mut_ptr()) })?; + + let fd0 = FileDesc::new(fds[0]); + let fd1 = FileDesc::new(fds[1]); + fd0.set_cloexec()?; + fd1.set_cloexec()?; + Ok((AnonPipe(fd0), AnonPipe(fd1))) +} + +impl AnonPipe { + pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> { + self.0.read(buf) + } + + pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + self.0.read_vectored(bufs) + } + + #[inline] + pub fn is_read_vectored(&self) -> bool { + self.0.is_read_vectored() + } + + pub fn write(&self, buf: &[u8]) -> io::Result<usize> { + self.0.write(buf) + } + + pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + self.0.write_vectored(bufs) + } + + #[inline] + pub fn is_write_vectored(&self) -> bool { + self.0.is_write_vectored() + } + + pub fn fd(&self) -> &FileDesc { + &self.0 + } + pub fn into_fd(self) -> FileDesc { + self.0 + } + pub fn diverge(&self) -> ! { + panic!() + } +} + +pub fn read2(p1: AnonPipe, v1: &mut Vec<u8>, p2: AnonPipe, v2: &mut Vec<u8>) -> io::Result<()> { + // Set both pipes into nonblocking mode as we're gonna be reading from both + // in the `select` loop below, and we wouldn't want one to block the other! + let p1 = p1.into_fd(); + let p2 = p2.into_fd(); + p1.set_nonblocking_pipe(true)?; + p2.set_nonblocking_pipe(true)?; + + let mut fds: [libc::pollfd; 2] = unsafe { mem::zeroed() }; + fds[0].fd = p1.raw(); + fds[0].events = libc::POLLIN; + fds[1].fd = p2.raw(); + fds[1].events = libc::POLLIN; + loop { + // wait for either pipe to become readable using `poll` + cvt_r(|| unsafe { libc::poll(fds.as_mut_ptr(), 2, -1) })?; + + if fds[0].revents != 0 && read(&p1, v1)? { + p2.set_nonblocking_pipe(false)?; + return p2.read_to_end(v2).map(drop); + } + if fds[1].revents != 0 && read(&p2, v2)? { + p1.set_nonblocking_pipe(false)?; + return p1.read_to_end(v1).map(drop); + } + } + + // Read as much as we can from each pipe, ignoring EWOULDBLOCK or + // EAGAIN. If we hit EOF, then this will happen because the underlying + // reader will return Ok(0), in which case we'll see `Ok` ourselves. In + // this case we flip the other fd back into blocking mode and read + // whatever's leftover on that file descriptor. + fn read(fd: &FileDesc, dst: &mut Vec<u8>) -> Result<bool, io::Error> { + match fd.read_to_end(dst) { + Ok(_) => Ok(true), + Err(e) => { + if e.raw_os_error() == Some(libc::EWOULDBLOCK) + || e.raw_os_error() == Some(libc::EAGAIN) + { + Ok(false) + } else { + Err(e) + } + } + } + } +} diff --git a/library/std/src/sys/vxworks/process/mod.rs b/library/std/src/sys/vxworks/process/mod.rs new file mode 100644 index 00000000000..c59782ff44b --- /dev/null +++ b/library/std/src/sys/vxworks/process/mod.rs @@ -0,0 +1,7 @@ +pub use self::process_common::{Command, ExitCode, ExitStatus, Stdio, StdioPipes}; +pub use self::process_inner::Process; +pub use crate::ffi::OsString as EnvKey; + +mod process_common; +#[path = "process_vxworks.rs"] +mod process_inner; diff --git a/library/std/src/sys/vxworks/process/process_common.rs b/library/std/src/sys/vxworks/process/process_common.rs new file mode 100644 index 00000000000..bbbd5eda773 --- /dev/null +++ b/library/std/src/sys/vxworks/process/process_common.rs @@ -0,0 +1,408 @@ +use crate::os::unix::prelude::*; + +use crate::collections::BTreeMap; +use crate::ffi::{CStr, CString, OsStr, OsString}; +use crate::fmt; +use crate::io; +use crate::ptr; +use crate::sys::fd::FileDesc; +use crate::sys::fs::{File, OpenOptions}; +use crate::sys::pipe::{self, AnonPipe}; +use crate::sys_common::process::CommandEnv; + +use libc::{c_char, c_int, gid_t, uid_t, EXIT_FAILURE, EXIT_SUCCESS}; + +//////////////////////////////////////////////////////////////////////////////// +// Command +//////////////////////////////////////////////////////////////////////////////// + +pub struct Command { + // Currently we try hard to ensure that the call to `.exec()` doesn't + // actually allocate any memory. While many platforms try to ensure that + // memory allocation works after a fork in a multithreaded process, it's + // been observed to be buggy and somewhat unreliable, so we do our best to + // just not do it at all! + // + // Along those lines, the `argv` and `envp` raw pointers here are exactly + // what's gonna get passed to `execvp`. The `argv` array starts with the + // `program` and ends with a NULL, and the `envp` pointer, if present, is + // also null-terminated. + // + // Right now we don't support removing arguments, so there's no much fancy + // support there, but we support adding and removing environment variables, + // so a side table is used to track where in the `envp` array each key is + // located. Whenever we add a key we update it in place if it's already + // present, and whenever we remove a key we update the locations of all + // other keys. + program: CString, + args: Vec<CString>, + argv: Argv, + env: CommandEnv, + + cwd: Option<CString>, + uid: Option<uid_t>, + gid: Option<gid_t>, + saw_nul: bool, + closures: Vec<Box<dyn FnMut() -> io::Result<()> + Send + Sync>>, + stdin: Option<Stdio>, + stdout: Option<Stdio>, + stderr: Option<Stdio>, +} + +// Create a new type for `Argv`, so that we can make it `Send` and `Sync` +struct Argv(Vec<*const c_char>); + +// It is safe to make `Argv` `Send` and `Sync`, because it contains +// pointers to memory owned by `Command.args` +unsafe impl Send for Argv {} +unsafe impl Sync for Argv {} + +// passed back to std::process with the pipes connected to the child, if any +// were requested +pub struct StdioPipes { + pub stdin: Option<AnonPipe>, + pub stdout: Option<AnonPipe>, + pub stderr: Option<AnonPipe>, +} + +// passed to do_exec() with configuration of what the child stdio should look +// like +pub struct ChildPipes { + pub stdin: ChildStdio, + pub stdout: ChildStdio, + pub stderr: ChildStdio, +} + +pub enum ChildStdio { + Inherit, + Explicit(c_int), + Owned(FileDesc), +} + +pub enum Stdio { + Inherit, + Null, + MakePipe, + Fd(FileDesc), +} + +impl Command { + pub fn new(program: &OsStr) -> Command { + let mut saw_nul = false; + let program = os2c(program, &mut saw_nul); + Command { + argv: Argv(vec![program.as_ptr(), ptr::null()]), + args: vec![program.clone()], + program, + env: Default::default(), + cwd: None, + uid: None, + gid: None, + saw_nul, + closures: Vec::new(), + stdin: None, + stdout: None, + stderr: None, + } + } + + pub fn set_arg_0(&mut self, arg: &OsStr) { + // Set a new arg0 + let arg = os2c(arg, &mut self.saw_nul); + debug_assert!(self.argv.0.len() > 1); + self.argv.0[0] = arg.as_ptr(); + self.args[0] = arg; + } + + pub fn arg(&mut self, arg: &OsStr) { + // Overwrite the trailing NULL pointer in `argv` and then add a new null + // pointer. + let arg = os2c(arg, &mut self.saw_nul); + self.argv.0[self.args.len()] = arg.as_ptr(); + self.argv.0.push(ptr::null()); + + // Also make sure we keep track of the owned value to schedule a + // destructor for this memory. + self.args.push(arg); + } + + pub fn cwd(&mut self, dir: &OsStr) { + self.cwd = Some(os2c(dir, &mut self.saw_nul)); + } + pub fn uid(&mut self, id: uid_t) { + self.uid = Some(id); + } + pub fn gid(&mut self, id: gid_t) { + self.gid = Some(id); + } + + pub fn saw_nul(&self) -> bool { + self.saw_nul + } + pub fn get_argv(&self) -> &Vec<*const c_char> { + &self.argv.0 + } + + pub fn get_program(&self) -> &CStr { + &*self.program + } + + #[allow(dead_code)] + pub fn get_cwd(&self) -> &Option<CString> { + &self.cwd + } + #[allow(dead_code)] + pub fn get_uid(&self) -> Option<uid_t> { + self.uid + } + #[allow(dead_code)] + pub fn get_gid(&self) -> Option<gid_t> { + self.gid + } + + pub fn get_closures(&mut self) -> &mut Vec<Box<dyn FnMut() -> io::Result<()> + Send + Sync>> { + &mut self.closures + } + + pub unsafe fn pre_exec(&mut self, _f: Box<dyn FnMut() -> io::Result<()> + Send + Sync>) { + // Fork() is not supported in vxWorks so no way to run the closure in the new procecss. + unimplemented!(); + } + + pub fn stdin(&mut self, stdin: Stdio) { + self.stdin = Some(stdin); + } + + pub fn stdout(&mut self, stdout: Stdio) { + self.stdout = Some(stdout); + } + + pub fn stderr(&mut self, stderr: Stdio) { + self.stderr = Some(stderr); + } + + pub fn env_mut(&mut self) -> &mut CommandEnv { + &mut self.env + } + + pub fn capture_env(&mut self) -> Option<CStringArray> { + let maybe_env = self.env.capture_if_changed(); + maybe_env.map(|env| construct_envp(env, &mut self.saw_nul)) + } + #[allow(dead_code)] + pub fn env_saw_path(&self) -> bool { + self.env.have_changed_path() + } + + pub fn setup_io( + &self, + default: Stdio, + needs_stdin: bool, + ) -> io::Result<(StdioPipes, ChildPipes)> { + let null = Stdio::Null; + let default_stdin = if needs_stdin { &default } else { &null }; + let stdin = self.stdin.as_ref().unwrap_or(default_stdin); + let stdout = self.stdout.as_ref().unwrap_or(&default); + let stderr = self.stderr.as_ref().unwrap_or(&default); + let (their_stdin, our_stdin) = stdin.to_child_stdio(true)?; + let (their_stdout, our_stdout) = stdout.to_child_stdio(false)?; + let (their_stderr, our_stderr) = stderr.to_child_stdio(false)?; + let ours = StdioPipes { stdin: our_stdin, stdout: our_stdout, stderr: our_stderr }; + let theirs = ChildPipes { stdin: their_stdin, stdout: their_stdout, stderr: their_stderr }; + Ok((ours, theirs)) + } +} + +fn os2c(s: &OsStr, saw_nul: &mut bool) -> CString { + CString::new(s.as_bytes()).unwrap_or_else(|_e| { + *saw_nul = true; + CString::new("<string-with-nul>").unwrap() + }) +} + +// Helper type to manage ownership of the strings within a C-style array. +pub struct CStringArray { + items: Vec<CString>, + ptrs: Vec<*const c_char>, +} + +impl CStringArray { + pub fn with_capacity(capacity: usize) -> Self { + let mut result = CStringArray { + items: Vec::with_capacity(capacity), + ptrs: Vec::with_capacity(capacity + 1), + }; + result.ptrs.push(ptr::null()); + result + } + pub fn push(&mut self, item: CString) { + let l = self.ptrs.len(); + self.ptrs[l - 1] = item.as_ptr(); + self.ptrs.push(ptr::null()); + self.items.push(item); + } + pub fn as_ptr(&self) -> *const *const c_char { + self.ptrs.as_ptr() + } +} + +fn construct_envp(env: BTreeMap<OsString, OsString>, saw_nul: &mut bool) -> CStringArray { + let mut result = CStringArray::with_capacity(env.len()); + for (k, v) in env { + let mut k: OsString = k.into(); + + // Reserve additional space for '=' and null terminator + k.reserve_exact(v.len() + 2); + k.push("="); + k.push(&v); + + // Add the new entry into the array + if let Ok(item) = CString::new(k.into_vec()) { + result.push(item); + } else { + *saw_nul = true; + } + } + + result +} + +impl Stdio { + pub fn to_child_stdio(&self, readable: bool) -> io::Result<(ChildStdio, Option<AnonPipe>)> { + match *self { + Stdio::Inherit => Ok((ChildStdio::Inherit, None)), + + // Make sure that the source descriptors are not an stdio + // descriptor, otherwise the order which we set the child's + // descriptors may blow away a descriptor which we are hoping to + // save. For example, suppose we want the child's stderr to be the + // parent's stdout, and the child's stdout to be the parent's + // stderr. No matter which we dup first, the second will get + // overwritten prematurely. + Stdio::Fd(ref fd) => { + if fd.raw() >= 0 && fd.raw() <= libc::STDERR_FILENO { + Ok((ChildStdio::Owned(fd.duplicate()?), None)) + } else { + Ok((ChildStdio::Explicit(fd.raw()), None)) + } + } + + Stdio::MakePipe => { + let (reader, writer) = pipe::anon_pipe()?; + let (ours, theirs) = if readable { (writer, reader) } else { (reader, writer) }; + Ok((ChildStdio::Owned(theirs.into_fd()), Some(ours))) + } + + Stdio::Null => { + let mut opts = OpenOptions::new(); + opts.read(readable); + opts.write(!readable); + let path = unsafe { CStr::from_ptr("/null\0".as_ptr() as *const _) }; + let fd = File::open_c(&path, &opts)?; + Ok((ChildStdio::Owned(fd.into_fd()), None)) + } + } + } +} + +impl From<AnonPipe> for Stdio { + fn from(pipe: AnonPipe) -> Stdio { + Stdio::Fd(pipe.into_fd()) + } +} + +impl From<File> for Stdio { + fn from(file: File) -> Stdio { + Stdio::Fd(file.into_fd()) + } +} + +impl ChildStdio { + pub fn fd(&self) -> Option<c_int> { + match *self { + ChildStdio::Inherit => None, + ChildStdio::Explicit(fd) => Some(fd), + ChildStdio::Owned(ref fd) => Some(fd.raw()), + } + } +} + +impl fmt::Debug for Command { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.program != self.args[0] { + write!(f, "[{:?}] ", self.program)?; + } + write!(f, "{:?}", self.args[0])?; + + for arg in &self.args[1..] { + write!(f, " {:?}", arg)?; + } + Ok(()) + } +} + +/// Unix exit statuses +#[derive(PartialEq, Eq, Clone, Copy, Debug)] +pub struct ExitStatus(c_int); + +impl ExitStatus { + pub fn new(status: c_int) -> ExitStatus { + ExitStatus(status) + } + + fn exited(&self) -> bool { + /*unsafe*/ + { libc::WIFEXITED(self.0) } + } + + pub fn success(&self) -> bool { + self.code() == Some(0) + } + + pub fn code(&self) -> Option<i32> { + if self.exited() { + Some(/*unsafe*/ { libc::WEXITSTATUS(self.0) }) + } else { + None + } + } + + pub fn signal(&self) -> Option<i32> { + if !self.exited() { + Some(/*unsafe*/ { libc::WTERMSIG(self.0) }) + } else { + None + } + } +} + +/// Converts a raw `c_int` to a type-safe `ExitStatus` by wrapping it without copying. +impl From<c_int> for ExitStatus { + fn from(a: c_int) -> ExitStatus { + ExitStatus(a) + } +} + +impl fmt::Display for ExitStatus { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(code) = self.code() { + write!(f, "exit code: {}", code) + } else { + let signal = self.signal().unwrap(); + write!(f, "signal: {}", signal) + } + } +} + +#[derive(PartialEq, Eq, Clone, Copy, Debug)] +pub struct ExitCode(u8); + +impl ExitCode { + pub const SUCCESS: ExitCode = ExitCode(EXIT_SUCCESS as _); + pub const FAILURE: ExitCode = ExitCode(EXIT_FAILURE as _); + + #[inline] + pub fn as_i32(&self) -> i32 { + self.0 as i32 + } +} diff --git a/library/std/src/sys/vxworks/process/process_vxworks.rs b/library/std/src/sys/vxworks/process/process_vxworks.rs new file mode 100644 index 00000000000..f7e84ae3de9 --- /dev/null +++ b/library/std/src/sys/vxworks/process/process_vxworks.rs @@ -0,0 +1,169 @@ +use crate::io::{self, Error, ErrorKind}; +use crate::sys; +use crate::sys::cvt; +use crate::sys::process::process_common::*; +use crate::sys_common::thread; +use libc::RTP_ID; +use libc::{self, c_char, c_int}; + +//////////////////////////////////////////////////////////////////////////////// +// Command +//////////////////////////////////////////////////////////////////////////////// + +impl Command { + pub fn spawn( + &mut self, + default: Stdio, + needs_stdin: bool, + ) -> io::Result<(Process, StdioPipes)> { + use crate::sys::cvt_r; + const CLOEXEC_MSG_FOOTER: &'static [u8] = b"NOEX"; + let envp = self.capture_env(); + + if self.saw_nul() { + return Err(io::Error::new(ErrorKind::InvalidInput, "nul byte found in provided data")); + } + let (ours, theirs) = self.setup_io(default, needs_stdin)?; + let mut p = Process { pid: 0, status: None }; + + unsafe { + macro_rules! t { + ($e:expr) => { + match $e { + Ok(e) => e, + Err(e) => return Err(e.into()), + } + }; + } + + let mut orig_stdin = libc::STDIN_FILENO; + let mut orig_stdout = libc::STDOUT_FILENO; + let mut orig_stderr = libc::STDERR_FILENO; + + if let Some(fd) = theirs.stdin.fd() { + orig_stdin = t!(cvt_r(|| libc::dup(libc::STDIN_FILENO))); + t!(cvt_r(|| libc::dup2(fd, libc::STDIN_FILENO))); + } + if let Some(fd) = theirs.stdout.fd() { + orig_stdout = t!(cvt_r(|| libc::dup(libc::STDOUT_FILENO))); + t!(cvt_r(|| libc::dup2(fd, libc::STDOUT_FILENO))); + } + if let Some(fd) = theirs.stderr.fd() { + orig_stderr = t!(cvt_r(|| libc::dup(libc::STDERR_FILENO))); + t!(cvt_r(|| libc::dup2(fd, libc::STDERR_FILENO))); + } + + if let Some(ref cwd) = *self.get_cwd() { + t!(cvt(libc::chdir(cwd.as_ptr()))); + } + + let c_envp = envp + .as_ref() + .map(|c| c.as_ptr()) + .unwrap_or_else(|| *sys::os::environ() as *const _); + let stack_size = thread::min_stack(); + + // ensure that access to the environment is synchronized + let _lock = sys::os::env_lock(); + + let ret = libc::rtpSpawn( + self.get_program().as_ptr(), + self.get_argv().as_ptr() as *mut *const c_char, // argv + c_envp as *mut *const c_char, + 100 as c_int, // initial priority + stack_size, // initial stack size. + 0, // options + 0, // task options + ); + + // Because FileDesc was not used, each duplicated file descriptor + // needs to be closed manually + if orig_stdin != libc::STDIN_FILENO { + t!(cvt_r(|| libc::dup2(orig_stdin, libc::STDIN_FILENO))); + libc::close(orig_stdin); + } + if orig_stdout != libc::STDOUT_FILENO { + t!(cvt_r(|| libc::dup2(orig_stdout, libc::STDOUT_FILENO))); + libc::close(orig_stdout); + } + if orig_stderr != libc::STDERR_FILENO { + t!(cvt_r(|| libc::dup2(orig_stderr, libc::STDERR_FILENO))); + libc::close(orig_stderr); + } + + if ret != libc::RTP_ID_ERROR { + p.pid = ret; + Ok((p, ours)) + } else { + Err(io::Error::last_os_error()) + } + } + } + + pub fn exec(&mut self, default: Stdio) -> io::Error { + let ret = Command::spawn(self, default, false); + match ret { + Ok(t) => unsafe { + let mut status = 0 as c_int; + libc::waitpid(t.0.pid, &mut status, 0); + libc::exit(0); + }, + Err(e) => e, + } + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Processes +//////////////////////////////////////////////////////////////////////////////// + +/// The unique id of the process (this should never be negative). +pub struct Process { + pid: RTP_ID, + status: Option<ExitStatus>, +} + +impl Process { + pub fn id(&self) -> u32 { + self.pid as u32 + } + + pub fn kill(&mut self) -> io::Result<()> { + // If we've already waited on this process then the pid can be recycled + // and used for another process, and we probably shouldn't be killing + // random processes, so just return an error. + if self.status.is_some() { + Err(Error::new( + ErrorKind::InvalidInput, + "invalid argument: can't kill an exited process", + )) + } else { + cvt(unsafe { libc::kill(self.pid, libc::SIGKILL) }).map(drop) + } + } + + pub fn wait(&mut self) -> io::Result<ExitStatus> { + use crate::sys::cvt_r; + if let Some(status) = self.status { + return Ok(status); + } + let mut status = 0 as c_int; + cvt_r(|| unsafe { libc::waitpid(self.pid, &mut status, 0) })?; + self.status = Some(ExitStatus::new(status)); + Ok(ExitStatus::new(status)) + } + + pub fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> { + if let Some(status) = self.status { + return Ok(Some(status)); + } + let mut status = 0 as c_int; + let pid = cvt(unsafe { libc::waitpid(self.pid, &mut status, libc::WNOHANG) })?; + if pid == 0 { + Ok(None) + } else { + self.status = Some(ExitStatus::new(status)); + Ok(Some(ExitStatus::new(status))) + } + } +} diff --git a/library/std/src/sys/vxworks/rand.rs b/library/std/src/sys/vxworks/rand.rs new file mode 100644 index 00000000000..3a1ff5fd3b9 --- /dev/null +++ b/library/std/src/sys/vxworks/rand.rs @@ -0,0 +1,36 @@ +use crate::mem; +use crate::slice; + +pub fn hashmap_random_keys() -> (u64, u64) { + let mut v = (0, 0); + unsafe { + let view = slice::from_raw_parts_mut(&mut v as *mut _ as *mut u8, mem::size_of_val(&v)); + imp::fill_bytes(view); + } + return v; +} + +mod imp { + use crate::io; + use core::sync::atomic::{AtomicBool, Ordering::Relaxed}; + + pub fn fill_bytes(v: &mut [u8]) { + static RNG_INIT: AtomicBool = AtomicBool::new(false); + while !RNG_INIT.load(Relaxed) { + let ret = unsafe { libc::randSecure() }; + if ret < 0 { + panic!("couldn't generate random bytes: {}", io::Error::last_os_error()); + } else if ret > 0 { + RNG_INIT.store(true, Relaxed); + break; + } + unsafe { libc::usleep(10) }; + } + let ret = unsafe { + libc::randABytes(v.as_mut_ptr() as *mut libc::c_uchar, v.len() as libc::c_int) + }; + if ret < 0 { + panic!("couldn't generate random bytes: {}", io::Error::last_os_error()); + } + } +} diff --git a/library/std/src/sys/vxworks/rwlock.rs b/library/std/src/sys/vxworks/rwlock.rs new file mode 100644 index 00000000000..c90304c2b4a --- /dev/null +++ b/library/std/src/sys/vxworks/rwlock.rs @@ -0,0 +1,114 @@ +use crate::cell::UnsafeCell; +use crate::sync::atomic::{AtomicUsize, Ordering}; + +pub struct RWLock { + inner: UnsafeCell<libc::pthread_rwlock_t>, + write_locked: UnsafeCell<bool>, + num_readers: AtomicUsize, +} + +unsafe impl Send for RWLock {} +unsafe impl Sync for RWLock {} + +impl RWLock { + pub const fn new() -> RWLock { + RWLock { + inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER), + write_locked: UnsafeCell::new(false), + num_readers: AtomicUsize::new(0), + } + } + + #[inline] + pub unsafe fn read(&self) { + let r = libc::pthread_rwlock_rdlock(self.inner.get()); + if r == libc::EAGAIN { + panic!("rwlock maximum reader count exceeded"); + } else if r == libc::EDEADLK || (r == 0 && *self.write_locked.get()) { + if r == 0 { + self.raw_unlock(); + } + panic!("rwlock read lock would result in deadlock"); + } else { + debug_assert_eq!(r, 0); + self.num_readers.fetch_add(1, Ordering::Relaxed); + } + } + + #[inline] + pub unsafe fn try_read(&self) -> bool { + let r = libc::pthread_rwlock_tryrdlock(self.inner.get()); + if r == 0 { + if *self.write_locked.get() { + self.raw_unlock(); + false + } else { + self.num_readers.fetch_add(1, Ordering::Relaxed); + true + } + } else { + false + } + } + + #[inline] + pub unsafe fn write(&self) { + let r = libc::pthread_rwlock_wrlock(self.inner.get()); + // See comments above for why we check for EDEADLK and write_locked. We + // also need to check that num_readers is 0. + if r == libc::EDEADLK + || *self.write_locked.get() + || self.num_readers.load(Ordering::Relaxed) != 0 + { + if r == 0 { + self.raw_unlock(); + } + panic!("rwlock write lock would result in deadlock"); + } else { + debug_assert_eq!(r, 0); + } + *self.write_locked.get() = true; + } + + #[inline] + pub unsafe fn try_write(&self) -> bool { + let r = libc::pthread_rwlock_trywrlock(self.inner.get()); + if r == 0 { + if *self.write_locked.get() || self.num_readers.load(Ordering::Relaxed) != 0 { + self.raw_unlock(); + false + } else { + *self.write_locked.get() = true; + true + } + } else { + false + } + } + + #[inline] + unsafe fn raw_unlock(&self) { + let r = libc::pthread_rwlock_unlock(self.inner.get()); + debug_assert_eq!(r, 0); + } + + #[inline] + pub unsafe fn read_unlock(&self) { + debug_assert!(!*self.write_locked.get()); + self.num_readers.fetch_sub(1, Ordering::Relaxed); + self.raw_unlock(); + } + + #[inline] + pub unsafe fn write_unlock(&self) { + debug_assert_eq!(self.num_readers.load(Ordering::Relaxed), 0); + debug_assert!(*self.write_locked.get()); + *self.write_locked.get() = false; + self.raw_unlock(); + } + #[inline] + pub unsafe fn destroy(&self) { + let r = libc::pthread_rwlock_destroy(self.inner.get()); + debug_assert_eq!(r, 0); + } +} diff --git a/library/std/src/sys/vxworks/stack_overflow.rs b/library/std/src/sys/vxworks/stack_overflow.rs new file mode 100644 index 00000000000..7b58c83193b --- /dev/null +++ b/library/std/src/sys/vxworks/stack_overflow.rs @@ -0,0 +1,38 @@ +#![cfg_attr(test, allow(dead_code))] + +use self::imp::{drop_handler, make_handler}; + +pub use self::imp::cleanup; +pub use self::imp::init; + +pub struct Handler { + _data: *mut libc::c_void, +} + +impl Handler { + pub unsafe fn new() -> Handler { + make_handler() + } +} + +impl Drop for Handler { + fn drop(&mut self) { + unsafe { + drop_handler(self); + } + } +} + +mod imp { + use crate::ptr; + + pub unsafe fn init() {} + + pub unsafe fn cleanup() {} + + pub unsafe fn make_handler() -> super::Handler { + super::Handler { _data: ptr::null_mut() } + } + + pub unsafe fn drop_handler(_handler: &mut super::Handler) {} +} diff --git a/library/std/src/sys/vxworks/stdio.rs b/library/std/src/sys/vxworks/stdio.rs new file mode 100644 index 00000000000..622444ccafd --- /dev/null +++ b/library/std/src/sys/vxworks/stdio.rs @@ -0,0 +1,69 @@ +use crate::io; +use crate::sys::fd::FileDesc; + +pub struct Stdin(()); +pub struct Stdout(()); +pub struct Stderr(()); + +impl Stdin { + pub fn new() -> io::Result<Stdin> { + Ok(Stdin(())) + } +} + +impl io::Read for Stdin { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + let fd = FileDesc::new(libc::STDIN_FILENO); + let ret = fd.read(buf); + fd.into_raw(); // do not close this FD + ret + } +} + +impl Stdout { + pub fn new() -> io::Result<Stdout> { + Ok(Stdout(())) + } +} + +impl io::Write for Stdout { + fn write(&mut self, buf: &[u8]) -> io::Result<usize> { + let fd = FileDesc::new(libc::STDOUT_FILENO); + let ret = fd.write(buf); + fd.into_raw(); // do not close this FD + ret + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +impl Stderr { + pub fn new() -> io::Result<Stderr> { + Ok(Stderr(())) + } +} + +impl io::Write for Stderr { + fn write(&mut self, buf: &[u8]) -> io::Result<usize> { + let fd = FileDesc::new(libc::STDERR_FILENO); + let ret = fd.write(buf); + fd.into_raw(); // do not close this FD + ret + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +pub fn is_ebadf(err: &io::Error) -> bool { + err.raw_os_error() == Some(libc::EBADF as i32) +} + +pub const STDIN_BUF_SIZE: usize = crate::sys_common::io::DEFAULT_BUF_SIZE; + +pub fn panic_output() -> Option<impl io::Write> { + Stderr::new().ok() +} diff --git a/library/std/src/sys/vxworks/thread.rs b/library/std/src/sys/vxworks/thread.rs new file mode 100644 index 00000000000..24a2e0f965d --- /dev/null +++ b/library/std/src/sys/vxworks/thread.rs @@ -0,0 +1,155 @@ +use crate::cmp; +use crate::ffi::CStr; +use crate::io; +use crate::mem; +use crate::ptr; +use crate::sys::{os, stack_overflow}; +use crate::time::Duration; + +pub const DEFAULT_MIN_STACK_SIZE: usize = 0x40000; // 256K + +pub struct Thread { + id: libc::pthread_t, +} + +// Some platforms may have pthread_t as a pointer in which case we still want +// a thread to be Send/Sync +unsafe impl Send for Thread {} +unsafe impl Sync for Thread {} + +// The pthread_attr_setstacksize symbol doesn't exist in the emscripten libc, +// so we have to not link to it to satisfy emcc's ERROR_ON_UNDEFINED_SYMBOLS. +unsafe fn pthread_attr_setstacksize( + attr: *mut libc::pthread_attr_t, + stack_size: libc::size_t, +) -> libc::c_int { + libc::pthread_attr_setstacksize(attr, stack_size) +} + +impl Thread { + // unsafe: see thread::Builder::spawn_unchecked for safety requirements + pub unsafe fn new(stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> { + let p = Box::into_raw(box p); + let mut native: libc::pthread_t = mem::zeroed(); + let mut attr: libc::pthread_attr_t = mem::zeroed(); + assert_eq!(libc::pthread_attr_init(&mut attr), 0); + + let stack_size = cmp::max(stack, min_stack_size(&attr)); + + match pthread_attr_setstacksize(&mut attr, stack_size) { + 0 => {} + n => { + assert_eq!(n, libc::EINVAL); + // EINVAL means |stack_size| is either too small or not a + // multiple of the system page size. Because it's definitely + // >= PTHREAD_STACK_MIN, it must be an alignment issue. + // Round up to the nearest page and try again. + let page_size = os::page_size(); + let stack_size = + (stack_size + page_size - 1) & (-(page_size as isize - 1) as usize - 1); + assert_eq!(libc::pthread_attr_setstacksize(&mut attr, stack_size), 0); + } + }; + + let ret = libc::pthread_create(&mut native, &attr, thread_start, p as *mut _); + // Note: if the thread creation fails and this assert fails, then p will + // be leaked. However, an alternative design could cause double-free + // which is clearly worse. + assert_eq!(libc::pthread_attr_destroy(&mut attr), 0); + + return if ret != 0 { + // The thread failed to start and as a result p was not consumed. Therefore, it is + // safe to reconstruct the box so that it gets deallocated. + drop(Box::from_raw(p)); + Err(io::Error::from_raw_os_error(ret)) + } else { + Ok(Thread { id: native }) + }; + + extern "C" fn thread_start(main: *mut libc::c_void) -> *mut libc::c_void { + unsafe { + // Next, set up our stack overflow handler which may get triggered if we run + // out of stack. + let _handler = stack_overflow::Handler::new(); + // Finally, let's run some code. + Box::from_raw(main as *mut Box<dyn FnOnce()>)(); + } + ptr::null_mut() + } + } + + pub fn yield_now() { + let ret = unsafe { libc::sched_yield() }; + debug_assert_eq!(ret, 0); + } + + pub fn set_name(_name: &CStr) { + // VxWorks does not provide a way to set the task name except at creation time + } + + pub fn sleep(dur: Duration) { + let mut secs = dur.as_secs(); + let mut nsecs = dur.subsec_nanos() as _; + + // If we're awoken with a signal then the return value will be -1 and + // nanosleep will fill in `ts` with the remaining time. + unsafe { + while secs > 0 || nsecs > 0 { + let mut ts = libc::timespec { + tv_sec: cmp::min(libc::time_t::MAX as u64, secs) as libc::time_t, + tv_nsec: nsecs, + }; + secs -= ts.tv_sec as u64; + if libc::nanosleep(&ts, &mut ts) == -1 { + assert_eq!(os::errno(), libc::EINTR); + secs += ts.tv_sec as u64; + nsecs = ts.tv_nsec; + } else { + nsecs = 0; + } + } + } + } + + pub fn join(self) { + unsafe { + let ret = libc::pthread_join(self.id, ptr::null_mut()); + mem::forget(self); + assert!(ret == 0, "failed to join thread: {}", io::Error::from_raw_os_error(ret)); + } + } + + pub fn id(&self) -> libc::pthread_t { + self.id + } + + pub fn into_id(self) -> libc::pthread_t { + let id = self.id; + mem::forget(self); + id + } +} + +impl Drop for Thread { + fn drop(&mut self) { + let ret = unsafe { libc::pthread_detach(self.id) }; + debug_assert_eq!(ret, 0); + } +} + +#[cfg_attr(test, allow(dead_code))] +pub mod guard { + use crate::ops::Range; + pub type Guard = Range<usize>; + pub unsafe fn current() -> Option<Guard> { + None + } + pub unsafe fn init() -> Option<Guard> { + None + } + pub unsafe fn deinit() {} +} + +fn min_stack_size(_: *const libc::pthread_attr_t) -> usize { + libc::PTHREAD_STACK_MIN +} diff --git a/library/std/src/sys/vxworks/thread_local_dtor.rs b/library/std/src/sys/vxworks/thread_local_dtor.rs new file mode 100644 index 00000000000..3f73f6c4903 --- /dev/null +++ b/library/std/src/sys/vxworks/thread_local_dtor.rs @@ -0,0 +1,7 @@ +#![cfg(target_thread_local)] +#![unstable(feature = "thread_local_internals", issue = "none")] + +pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) { + use crate::sys_common::thread_local::register_dtor_fallback; + register_dtor_fallback(t, dtor); +} diff --git a/library/std/src/sys/vxworks/thread_local_key.rs b/library/std/src/sys/vxworks/thread_local_key.rs new file mode 100644 index 00000000000..2c5b94b1e61 --- /dev/null +++ b/library/std/src/sys/vxworks/thread_local_key.rs @@ -0,0 +1,34 @@ +#![allow(dead_code)] // not used on all platforms + +use crate::mem; + +pub type Key = libc::pthread_key_t; + +#[inline] +pub unsafe fn create(dtor: Option<unsafe extern "C" fn(*mut u8)>) -> Key { + let mut key = 0; + assert_eq!(libc::pthread_key_create(&mut key, mem::transmute(dtor)), 0); + key +} + +#[inline] +pub unsafe fn set(key: Key, value: *mut u8) { + let r = libc::pthread_setspecific(key, value as *mut _); + debug_assert_eq!(r, 0); +} + +#[inline] +pub unsafe fn get(key: Key) -> *mut u8 { + libc::pthread_getspecific(key) as *mut u8 +} + +#[inline] +pub unsafe fn destroy(key: Key) { + let r = libc::pthread_key_delete(key); + debug_assert_eq!(r, 0); +} + +#[inline] +pub fn requires_synchronized_create() -> bool { + false +} diff --git a/library/std/src/sys/vxworks/time.rs b/library/std/src/sys/vxworks/time.rs new file mode 100644 index 00000000000..8f46f4d284f --- /dev/null +++ b/library/std/src/sys/vxworks/time.rs @@ -0,0 +1,197 @@ +use crate::cmp::Ordering; +use crate::time::Duration; +use core::hash::{Hash, Hasher}; + +pub use self::inner::{Instant, SystemTime, UNIX_EPOCH}; +use crate::convert::TryInto; + +const NSEC_PER_SEC: u64 = 1_000_000_000; + +#[derive(Copy, Clone)] +struct Timespec { + t: libc::timespec, +} + +impl Timespec { + const fn zero() -> Timespec { + Timespec { t: libc::timespec { tv_sec: 0, tv_nsec: 0 } } + } + fn sub_timespec(&self, other: &Timespec) -> Result<Duration, Duration> { + if self >= other { + Ok(if self.t.tv_nsec >= other.t.tv_nsec { + Duration::new( + (self.t.tv_sec - other.t.tv_sec) as u64, + (self.t.tv_nsec - other.t.tv_nsec) as u32, + ) + } else { + Duration::new( + (self.t.tv_sec - 1 - other.t.tv_sec) as u64, + self.t.tv_nsec as u32 + (NSEC_PER_SEC as u32) - other.t.tv_nsec as u32, + ) + }) + } else { + match other.sub_timespec(self) { + Ok(d) => Err(d), + Err(d) => Ok(d), + } + } + } + + fn checked_add_duration(&self, other: &Duration) -> Option<Timespec> { + let mut secs = other + .as_secs() + .try_into() // <- target type would be `libc::time_t` + .ok() + .and_then(|secs| self.t.tv_sec.checked_add(secs))?; + + // Nano calculations can't overflow because nanos are <1B which fit + // in a u32. + let mut nsec = other.subsec_nanos() + self.t.tv_nsec as u32; + if nsec >= NSEC_PER_SEC as u32 { + nsec -= NSEC_PER_SEC as u32; + secs = secs.checked_add(1)?; + } + Some(Timespec { t: libc::timespec { tv_sec: secs, tv_nsec: nsec as _ } }) + } + + fn checked_sub_duration(&self, other: &Duration) -> Option<Timespec> { + let mut secs = other + .as_secs() + .try_into() // <- target type would be `libc::time_t` + .ok() + .and_then(|secs| self.t.tv_sec.checked_sub(secs))?; + + // Similar to above, nanos can't overflow. + let mut nsec = self.t.tv_nsec as i32 - other.subsec_nanos() as i32; + if nsec < 0 { + nsec += NSEC_PER_SEC as i32; + secs = secs.checked_sub(1)?; + } + Some(Timespec { t: libc::timespec { tv_sec: secs, tv_nsec: nsec as _ } }) + } +} + +impl PartialEq for Timespec { + fn eq(&self, other: &Timespec) -> bool { + self.t.tv_sec == other.t.tv_sec && self.t.tv_nsec == other.t.tv_nsec + } +} + +impl Eq for Timespec {} + +impl PartialOrd for Timespec { + fn partial_cmp(&self, other: &Timespec) -> Option<Ordering> { + Some(self.cmp(other)) + } +} + +impl Ord for Timespec { + fn cmp(&self, other: &Timespec) -> Ordering { + let me = (self.t.tv_sec, self.t.tv_nsec); + let other = (other.t.tv_sec, other.t.tv_nsec); + me.cmp(&other) + } +} + +impl Hash for Timespec { + fn hash<H: Hasher>(&self, state: &mut H) { + self.t.tv_sec.hash(state); + self.t.tv_nsec.hash(state); + } +} +mod inner { + use crate::fmt; + use crate::sys::cvt; + use crate::time::Duration; + + use super::Timespec; + + #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] + pub struct Instant { + t: Timespec, + } + + #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] + pub struct SystemTime { + t: Timespec, + } + + pub const UNIX_EPOCH: SystemTime = + SystemTime { t: Timespec { t: libc::timespec { tv_sec: 0, tv_nsec: 0 } } }; + + impl Instant { + pub fn now() -> Instant { + Instant { t: now(libc::CLOCK_MONOTONIC) } + } + + pub const fn zero() -> Instant { + Instant { t: Timespec::zero() } + } + + pub fn actually_monotonic() -> bool { + true + } + + pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> { + self.t.sub_timespec(&other.t).ok() + } + + pub fn checked_add_duration(&self, other: &Duration) -> Option<Instant> { + Some(Instant { t: self.t.checked_add_duration(other)? }) + } + + pub fn checked_sub_duration(&self, other: &Duration) -> Option<Instant> { + Some(Instant { t: self.t.checked_sub_duration(other)? }) + } + } + + impl fmt::Debug for Instant { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Instant") + .field("tv_sec", &self.t.t.tv_sec) + .field("tv_nsec", &self.t.t.tv_nsec) + .finish() + } + } + + impl SystemTime { + pub fn now() -> SystemTime { + SystemTime { t: now(libc::CLOCK_REALTIME) } + } + + pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> { + self.t.sub_timespec(&other.t) + } + + pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> { + Some(SystemTime { t: self.t.checked_add_duration(other)? }) + } + + pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> { + Some(SystemTime { t: self.t.checked_sub_duration(other)? }) + } + } + + impl From<libc::timespec> for SystemTime { + fn from(t: libc::timespec) -> SystemTime { + SystemTime { t: Timespec { t: t } } + } + } + + impl fmt::Debug for SystemTime { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SystemTime") + .field("tv_sec", &self.t.t.tv_sec) + .field("tv_nsec", &self.t.t.tv_nsec) + .finish() + } + } + + pub type clock_t = libc::c_int; + + fn now(clock: clock_t) -> Timespec { + let mut t = Timespec { t: libc::timespec { tv_sec: 0, tv_nsec: 0 } }; + cvt(unsafe { libc::clock_gettime(clock, &mut t.t) }).unwrap(); + t + } +} diff --git a/library/std/src/sys/wasi/alloc.rs b/library/std/src/sys/wasi/alloc.rs new file mode 100644 index 00000000000..57187851a14 --- /dev/null +++ b/library/std/src/sys/wasi/alloc.rs @@ -0,0 +1,42 @@ +use crate::alloc::{GlobalAlloc, Layout, System}; +use crate::ptr; +use crate::sys_common::alloc::{realloc_fallback, MIN_ALIGN}; + +#[stable(feature = "alloc_system_type", since = "1.28.0")] +unsafe impl GlobalAlloc for System { + #[inline] + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() { + libc::malloc(layout.size()) as *mut u8 + } else { + libc::aligned_alloc(layout.align(), layout.size()) as *mut u8 + } + } + + #[inline] + unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { + if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() { + libc::calloc(layout.size(), 1) as *mut u8 + } else { + let ptr = self.alloc(layout.clone()); + if !ptr.is_null() { + ptr::write_bytes(ptr, 0, layout.size()); + } + ptr + } + } + + #[inline] + unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) { + libc::free(ptr as *mut libc::c_void) + } + + #[inline] + unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + if layout.align() <= MIN_ALIGN && layout.align() <= new_size { + libc::realloc(ptr as *mut libc::c_void, new_size) as *mut u8 + } else { + realloc_fallback(self, ptr, layout, new_size) + } + } +} diff --git a/library/std/src/sys/wasi/args.rs b/library/std/src/sys/wasi/args.rs new file mode 100644 index 00000000000..02aa68d6f3a --- /dev/null +++ b/library/std/src/sys/wasi/args.rs @@ -0,0 +1,65 @@ +use crate::ffi::{CStr, OsStr, OsString}; +use crate::marker::PhantomData; +use crate::os::wasi::ffi::OsStrExt; +use crate::vec; + +pub unsafe fn init(_argc: isize, _argv: *const *const u8) {} + +pub unsafe fn cleanup() {} + +pub struct Args { + iter: vec::IntoIter<OsString>, + _dont_send_or_sync_me: PhantomData<*mut ()>, +} + +/// Returns the command line arguments +pub fn args() -> Args { + Args { + iter: maybe_args().unwrap_or(Vec::new()).into_iter(), + _dont_send_or_sync_me: PhantomData, + } +} + +fn maybe_args() -> Option<Vec<OsString>> { + unsafe { + let (argc, buf_size) = wasi::args_sizes_get().ok()?; + let mut argv = Vec::with_capacity(argc); + let mut buf = Vec::with_capacity(buf_size); + wasi::args_get(argv.as_mut_ptr(), buf.as_mut_ptr()).ok()?; + argv.set_len(argc); + let mut ret = Vec::with_capacity(argc); + for ptr in argv { + let s = CStr::from_ptr(ptr.cast()); + ret.push(OsStr::from_bytes(s.to_bytes()).to_owned()); + } + Some(ret) + } +} + +impl Args { + pub fn inner_debug(&self) -> &[OsString] { + self.iter.as_slice() + } +} + +impl Iterator for Args { + type Item = OsString; + fn next(&mut self) -> Option<OsString> { + self.iter.next() + } + fn size_hint(&self) -> (usize, Option<usize>) { + self.iter.size_hint() + } +} + +impl ExactSizeIterator for Args { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl DoubleEndedIterator for Args { + fn next_back(&mut self) -> Option<OsString> { + self.iter.next_back() + } +} diff --git a/library/std/src/sys/wasi/env.rs b/library/std/src/sys/wasi/env.rs new file mode 100644 index 00000000000..730e356d7fe --- /dev/null +++ b/library/std/src/sys/wasi/env.rs @@ -0,0 +1,9 @@ +pub mod os { + pub const FAMILY: &str = ""; + pub const OS: &str = ""; + pub const DLL_PREFIX: &str = ""; + pub const DLL_SUFFIX: &str = ".wasm"; + pub const DLL_EXTENSION: &str = "wasm"; + pub const EXE_SUFFIX: &str = ".wasm"; + pub const EXE_EXTENSION: &str = "wasm"; +} diff --git a/library/std/src/sys/wasi/ext/ffi.rs b/library/std/src/sys/wasi/ext/ffi.rs new file mode 100644 index 00000000000..f71f316d1ba --- /dev/null +++ b/library/std/src/sys/wasi/ext/ffi.rs @@ -0,0 +1,6 @@ +//! WASI-specific extension to the primitives in the `std::ffi` module + +#![stable(feature = "rust1", since = "1.0.0")] + +#[stable(feature = "rust1", since = "1.0.0")] +pub use crate::sys_common::os_str_bytes::*; diff --git a/library/std/src/sys/wasi/ext/fs.rs b/library/std/src/sys/wasi/ext/fs.rs new file mode 100644 index 00000000000..f41c6626ccf --- /dev/null +++ b/library/std/src/sys/wasi/ext/fs.rs @@ -0,0 +1,538 @@ +//! WASI-specific extensions to primitives in the `std::fs` module. + +#![unstable(feature = "wasi_ext", issue = "none")] + +use crate::fs::{self, File, Metadata, OpenOptions}; +use crate::io::{self, IoSlice, IoSliceMut}; +use crate::path::{Path, PathBuf}; +use crate::sys::fs::osstr2str; +use crate::sys_common::{AsInner, AsInnerMut, FromInner}; + +/// WASI-specific extensions to [`File`]. +/// +/// [`File`]: ../../../../std/fs/struct.File.html +pub trait FileExt { + /// Reads a number of bytes starting from a given offset. + /// + /// Returns the number of bytes read. + /// + /// The offset is relative to the start of the file and thus independent + /// from the current cursor. + /// + /// The current file cursor is not affected by this function. + /// + /// Note that similar to [`File::read`], it is not an error to return with a + /// short read. + /// + /// [`File::read`]: ../../../../std/fs/struct.File.html#method.read + fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<usize> { + let bufs = &mut [IoSliceMut::new(buf)]; + self.read_vectored_at(bufs, offset) + } + + /// Reads a number of bytes starting from a given offset. + /// + /// Returns the number of bytes read. + /// + /// The offset is relative to the start of the file and thus independent + /// from the current cursor. + /// + /// The current file cursor is not affected by this function. + /// + /// Note that similar to [`File::read_vectored`], it is not an error to + /// return with a short read. + /// + /// [`File::read`]: ../../../../std/fs/struct.File.html#method.read_vectored + fn read_vectored_at(&self, bufs: &mut [IoSliceMut<'_>], offset: u64) -> io::Result<usize>; + + /// Reads the exact number of byte required to fill `buf` from the given offset. + /// + /// The offset is relative to the start of the file and thus independent + /// from the current cursor. + /// + /// The current file cursor is not affected by this function. + /// + /// Similar to [`Read::read_exact`] but uses [`read_at`] instead of `read`. + /// + /// [`Read::read_exact`]: ../../../../std/io/trait.Read.html#method.read_exact + /// [`read_at`]: #tymethod.read_at + /// + /// # Errors + /// + /// If this function encounters an error of the kind + /// [`ErrorKind::Interrupted`] then the error is ignored and the operation + /// will continue. + /// + /// If this function encounters an "end of file" before completely filling + /// the buffer, it returns an error of the kind [`ErrorKind::UnexpectedEof`]. + /// The contents of `buf` are unspecified in this case. + /// + /// If any other read error is encountered then this function immediately + /// returns. The contents of `buf` are unspecified in this case. + /// + /// If this function returns an error, it is unspecified how many bytes it + /// has read, but it will never read more than would be necessary to + /// completely fill the buffer. + /// + /// [`ErrorKind::Interrupted`]: ../../../../std/io/enum.ErrorKind.html#variant.Interrupted + /// [`ErrorKind::UnexpectedEof`]: ../../../../std/io/enum.ErrorKind.html#variant.UnexpectedEof + #[stable(feature = "rw_exact_all_at", since = "1.33.0")] + fn read_exact_at(&self, mut buf: &mut [u8], mut offset: u64) -> io::Result<()> { + while !buf.is_empty() { + match self.read_at(buf, offset) { + Ok(0) => break, + Ok(n) => { + let tmp = buf; + buf = &mut tmp[n..]; + offset += n as u64; + } + Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {} + Err(e) => return Err(e), + } + } + if !buf.is_empty() { + Err(io::Error::new(io::ErrorKind::UnexpectedEof, "failed to fill whole buffer")) + } else { + Ok(()) + } + } + + /// Writes a number of bytes starting from a given offset. + /// + /// Returns the number of bytes written. + /// + /// The offset is relative to the start of the file and thus independent + /// from the current cursor. + /// + /// The current file cursor is not affected by this function. + /// + /// When writing beyond the end of the file, the file is appropriately + /// extended and the intermediate bytes are initialized with the value 0. + /// + /// Note that similar to [`File::write`], it is not an error to return a + /// short write. + /// + /// [`File::write`]: ../../../../std/fs/struct.File.html#write.v + fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> { + let bufs = &[IoSlice::new(buf)]; + self.write_vectored_at(bufs, offset) + } + + /// Writes a number of bytes starting from a given offset. + /// + /// Returns the number of bytes written. + /// + /// The offset is relative to the start of the file and thus independent + /// from the current cursor. + /// + /// The current file cursor is not affected by this function. + /// + /// When writing beyond the end of the file, the file is appropriately + /// extended and the intermediate bytes are initialized with the value 0. + /// + /// Note that similar to [`File::write_vectored`], it is not an error to return a + /// short write. + /// + /// [`File::write`]: ../../../../std/fs/struct.File.html#method.write_vectored + fn write_vectored_at(&self, bufs: &[IoSlice<'_>], offset: u64) -> io::Result<usize>; + + /// Attempts to write an entire buffer starting from a given offset. + /// + /// The offset is relative to the start of the file and thus independent + /// from the current cursor. + /// + /// The current file cursor is not affected by this function. + /// + /// This method will continuously call [`write_at`] until there is no more data + /// to be written or an error of non-[`ErrorKind::Interrupted`] kind is + /// returned. This method will not return until the entire buffer has been + /// successfully written or such an error occurs. The first error that is + /// not of [`ErrorKind::Interrupted`] kind generated from this method will be + /// returned. + /// + /// # Errors + /// + /// This function will return the first error of + /// non-[`ErrorKind::Interrupted`] kind that [`write_at`] returns. + /// + /// [`ErrorKind::Interrupted`]: ../../../../std/io/enum.ErrorKind.html#variant.Interrupted + /// [`write_at`]: #tymethod.write_at + #[stable(feature = "rw_exact_all_at", since = "1.33.0")] + fn write_all_at(&self, mut buf: &[u8], mut offset: u64) -> io::Result<()> { + while !buf.is_empty() { + match self.write_at(buf, offset) { + Ok(0) => { + return Err(io::Error::new( + io::ErrorKind::WriteZero, + "failed to write whole buffer", + )); + } + Ok(n) => { + buf = &buf[n..]; + offset += n as u64 + } + Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {} + Err(e) => return Err(e), + } + } + Ok(()) + } + + /// Returns the current position within the file. + /// + /// This corresponds to the `fd_tell` syscall and is similar to + /// `seek` where you offset 0 bytes from the current position. + fn tell(&self) -> io::Result<u64>; + + /// Adjust the flags associated with this file. + /// + /// This corresponds to the `fd_fdstat_set_flags` syscall. + fn fdstat_set_flags(&self, flags: u16) -> io::Result<()>; + + /// Adjust the rights associated with this file. + /// + /// This corresponds to the `fd_fdstat_set_rights` syscall. + fn fdstat_set_rights(&self, rights: u64, inheriting: u64) -> io::Result<()>; + + /// Provide file advisory information on a file descriptor. + /// + /// This corresponds to the `fd_advise` syscall. + fn advise(&self, offset: u64, len: u64, advice: u8) -> io::Result<()>; + + /// Force the allocation of space in a file. + /// + /// This corresponds to the `fd_allocate` syscall. + fn allocate(&self, offset: u64, len: u64) -> io::Result<()>; + + /// Create a directory. + /// + /// This corresponds to the `path_create_directory` syscall. + fn create_directory<P: AsRef<Path>>(&self, dir: P) -> io::Result<()>; + + /// Read the contents of a symbolic link. + /// + /// This corresponds to the `path_readlink` syscall. + fn read_link<P: AsRef<Path>>(&self, path: P) -> io::Result<PathBuf>; + + /// Return the attributes of a file or directory. + /// + /// This corresponds to the `path_filestat_get` syscall. + fn metadata_at<P: AsRef<Path>>(&self, lookup_flags: u32, path: P) -> io::Result<Metadata>; + + /// Unlink a file. + /// + /// This corresponds to the `path_unlink_file` syscall. + fn remove_file<P: AsRef<Path>>(&self, path: P) -> io::Result<()>; + + /// Remove a directory. + /// + /// This corresponds to the `path_remove_directory` syscall. + fn remove_directory<P: AsRef<Path>>(&self, path: P) -> io::Result<()>; +} + +// FIXME: bind fd_fdstat_get - need to define a custom return type +// FIXME: bind fd_readdir - can't return `ReadDir` since we only have entry name +// FIXME: bind fd_filestat_set_times maybe? - on crates.io for unix +// FIXME: bind path_filestat_set_times maybe? - on crates.io for unix +// FIXME: bind poll_oneoff maybe? - probably should wait for I/O to settle +// FIXME: bind random_get maybe? - on crates.io for unix + +impl FileExt for fs::File { + fn read_vectored_at(&self, bufs: &mut [IoSliceMut<'_>], offset: u64) -> io::Result<usize> { + self.as_inner().fd().pread(bufs, offset) + } + + fn write_vectored_at(&self, bufs: &[IoSlice<'_>], offset: u64) -> io::Result<usize> { + self.as_inner().fd().pwrite(bufs, offset) + } + + fn tell(&self) -> io::Result<u64> { + self.as_inner().fd().tell() + } + + fn fdstat_set_flags(&self, flags: u16) -> io::Result<()> { + self.as_inner().fd().set_flags(flags) + } + + fn fdstat_set_rights(&self, rights: u64, inheriting: u64) -> io::Result<()> { + self.as_inner().fd().set_rights(rights, inheriting) + } + + fn advise(&self, offset: u64, len: u64, advice: u8) -> io::Result<()> { + self.as_inner().fd().advise(offset, len, advice) + } + + fn allocate(&self, offset: u64, len: u64) -> io::Result<()> { + self.as_inner().fd().allocate(offset, len) + } + + fn create_directory<P: AsRef<Path>>(&self, dir: P) -> io::Result<()> { + self.as_inner().fd().create_directory(osstr2str(dir.as_ref().as_ref())?) + } + + fn read_link<P: AsRef<Path>>(&self, path: P) -> io::Result<PathBuf> { + self.as_inner().read_link(path.as_ref()) + } + + fn metadata_at<P: AsRef<Path>>(&self, lookup_flags: u32, path: P) -> io::Result<Metadata> { + let m = self.as_inner().metadata_at(lookup_flags, path.as_ref())?; + Ok(FromInner::from_inner(m)) + } + + fn remove_file<P: AsRef<Path>>(&self, path: P) -> io::Result<()> { + self.as_inner().fd().unlink_file(osstr2str(path.as_ref().as_ref())?) + } + + fn remove_directory<P: AsRef<Path>>(&self, path: P) -> io::Result<()> { + self.as_inner().fd().remove_directory(osstr2str(path.as_ref().as_ref())?) + } +} + +/// WASI-specific extensions to [`fs::OpenOptions`]. +/// +/// [`fs::OpenOptions`]: ../../../../std/fs/struct.OpenOptions.html +pub trait OpenOptionsExt { + /// Pass custom `dirflags` argument to `path_open`. + /// + /// This option configures the `dirflags` argument to the + /// `path_open` syscall which `OpenOptions` will eventually call. The + /// `dirflags` argument configures how the file is looked up, currently + /// primarily affecting whether symlinks are followed or not. + /// + /// By default this value is `__WASI_LOOKUP_SYMLINK_FOLLOW`, or symlinks are + /// followed. You can call this method with 0 to disable following symlinks + fn lookup_flags(&mut self, flags: u32) -> &mut Self; + + /// Indicates whether `OpenOptions` must open a directory or not. + /// + /// This method will configure whether the `__WASI_O_DIRECTORY` flag is + /// passed when opening a file. When passed it will require that the opened + /// path is a directory. + /// + /// This option is by default `false` + fn directory(&mut self, dir: bool) -> &mut Self; + + /// Indicates whether `__WASI_FDFLAG_DSYNC` is passed in the `fs_flags` + /// field of `path_open`. + /// + /// This option is by default `false` + fn dsync(&mut self, dsync: bool) -> &mut Self; + + /// Indicates whether `__WASI_FDFLAG_NONBLOCK` is passed in the `fs_flags` + /// field of `path_open`. + /// + /// This option is by default `false` + fn nonblock(&mut self, nonblock: bool) -> &mut Self; + + /// Indicates whether `__WASI_FDFLAG_RSYNC` is passed in the `fs_flags` + /// field of `path_open`. + /// + /// This option is by default `false` + fn rsync(&mut self, rsync: bool) -> &mut Self; + + /// Indicates whether `__WASI_FDFLAG_SYNC` is passed in the `fs_flags` + /// field of `path_open`. + /// + /// This option is by default `false` + fn sync(&mut self, sync: bool) -> &mut Self; + + /// Indicates the value that should be passed in for the `fs_rights_base` + /// parameter of `path_open`. + /// + /// This option defaults based on the `read` and `write` configuration of + /// this `OpenOptions` builder. If this method is called, however, the + /// exact mask passed in will be used instead. + fn fs_rights_base(&mut self, rights: u64) -> &mut Self; + + /// Indicates the value that should be passed in for the + /// `fs_rights_inheriting` parameter of `path_open`. + /// + /// The default for this option is the same value as what will be passed + /// for the `fs_rights_base` parameter but if this method is called then + /// the specified value will be used instead. + fn fs_rights_inheriting(&mut self, rights: u64) -> &mut Self; + + /// Open a file or directory. + /// + /// This corresponds to the `path_open` syscall. + fn open_at<P: AsRef<Path>>(&self, file: &File, path: P) -> io::Result<File>; +} + +impl OpenOptionsExt for OpenOptions { + fn lookup_flags(&mut self, flags: u32) -> &mut OpenOptions { + self.as_inner_mut().lookup_flags(flags); + self + } + + fn directory(&mut self, dir: bool) -> &mut OpenOptions { + self.as_inner_mut().directory(dir); + self + } + + fn dsync(&mut self, enabled: bool) -> &mut OpenOptions { + self.as_inner_mut().dsync(enabled); + self + } + + fn nonblock(&mut self, enabled: bool) -> &mut OpenOptions { + self.as_inner_mut().nonblock(enabled); + self + } + + fn rsync(&mut self, enabled: bool) -> &mut OpenOptions { + self.as_inner_mut().rsync(enabled); + self + } + + fn sync(&mut self, enabled: bool) -> &mut OpenOptions { + self.as_inner_mut().sync(enabled); + self + } + + fn fs_rights_base(&mut self, rights: u64) -> &mut OpenOptions { + self.as_inner_mut().fs_rights_base(rights); + self + } + + fn fs_rights_inheriting(&mut self, rights: u64) -> &mut OpenOptions { + self.as_inner_mut().fs_rights_inheriting(rights); + self + } + + fn open_at<P: AsRef<Path>>(&self, file: &File, path: P) -> io::Result<File> { + let inner = file.as_inner().open_at(path.as_ref(), self.as_inner())?; + Ok(File::from_inner(inner)) + } +} + +/// WASI-specific extensions to [`fs::Metadata`]. +/// +/// [`fs::Metadata`]: ../../../../std/fs/struct.Metadata.html +pub trait MetadataExt { + /// Returns the `st_dev` field of the internal `filestat_t` + fn dev(&self) -> u64; + /// Returns the `st_ino` field of the internal `filestat_t` + fn ino(&self) -> u64; + /// Returns the `st_nlink` field of the internal `filestat_t` + fn nlink(&self) -> u64; + /// Returns the `st_atim` field of the internal `filestat_t` + fn atim(&self) -> u64; + /// Returns the `st_mtim` field of the internal `filestat_t` + fn mtim(&self) -> u64; + /// Returns the `st_ctim` field of the internal `filestat_t` + fn ctim(&self) -> u64; +} + +impl MetadataExt for fs::Metadata { + fn dev(&self) -> u64 { + self.as_inner().as_wasi().dev + } + fn ino(&self) -> u64 { + self.as_inner().as_wasi().ino + } + fn nlink(&self) -> u64 { + self.as_inner().as_wasi().nlink + } + fn atim(&self) -> u64 { + self.as_inner().as_wasi().atim + } + fn mtim(&self) -> u64 { + self.as_inner().as_wasi().mtim + } + fn ctim(&self) -> u64 { + self.as_inner().as_wasi().ctim + } +} + +/// WASI-specific extensions for [`FileType`]. +/// +/// Adds support for special WASI file types such as block/character devices, +/// pipes, and sockets. +/// +/// [`FileType`]: ../../../../std/fs/struct.FileType.html +pub trait FileTypeExt { + /// Returns `true` if this file type is a block device. + fn is_block_device(&self) -> bool; + /// Returns `true` if this file type is a character device. + fn is_character_device(&self) -> bool; + /// Returns `true` if this file type is a socket datagram. + fn is_socket_dgram(&self) -> bool; + /// Returns `true` if this file type is a socket stream. + fn is_socket_stream(&self) -> bool; +} + +impl FileTypeExt for fs::FileType { + fn is_block_device(&self) -> bool { + self.as_inner().bits() == wasi::FILETYPE_BLOCK_DEVICE + } + fn is_character_device(&self) -> bool { + self.as_inner().bits() == wasi::FILETYPE_CHARACTER_DEVICE + } + fn is_socket_dgram(&self) -> bool { + self.as_inner().bits() == wasi::FILETYPE_SOCKET_DGRAM + } + fn is_socket_stream(&self) -> bool { + self.as_inner().bits() == wasi::FILETYPE_SOCKET_STREAM + } +} + +/// WASI-specific extension methods for [`fs::DirEntry`]. +/// +/// [`fs::DirEntry`]: ../../../../std/fs/struct.DirEntry.html +pub trait DirEntryExt { + /// Returns the underlying `d_ino` field of the `dirent_t` + fn ino(&self) -> u64; +} + +impl DirEntryExt for fs::DirEntry { + fn ino(&self) -> u64 { + self.as_inner().ino() + } +} + +/// Create a hard link. +/// +/// This corresponds to the `path_link` syscall. +pub fn link<P: AsRef<Path>, U: AsRef<Path>>( + old_fd: &File, + old_flags: u32, + old_path: P, + new_fd: &File, + new_path: U, +) -> io::Result<()> { + old_fd.as_inner().fd().link( + old_flags, + osstr2str(old_path.as_ref().as_ref())?, + new_fd.as_inner().fd(), + osstr2str(new_path.as_ref().as_ref())?, + ) +} + +/// Rename a file or directory. +/// +/// This corresponds to the `path_rename` syscall. +pub fn rename<P: AsRef<Path>, U: AsRef<Path>>( + old_fd: &File, + old_path: P, + new_fd: &File, + new_path: U, +) -> io::Result<()> { + old_fd.as_inner().fd().rename( + osstr2str(old_path.as_ref().as_ref())?, + new_fd.as_inner().fd(), + osstr2str(new_path.as_ref().as_ref())?, + ) +} + +/// Create a symbolic link. +/// +/// This corresponds to the `path_symlink` syscall. +pub fn symlink<P: AsRef<Path>, U: AsRef<Path>>( + old_path: P, + fd: &File, + new_path: U, +) -> io::Result<()> { + fd.as_inner() + .fd() + .symlink(osstr2str(old_path.as_ref().as_ref())?, osstr2str(new_path.as_ref().as_ref())?) +} diff --git a/library/std/src/sys/wasi/ext/io.rs b/library/std/src/sys/wasi/ext/io.rs new file mode 100644 index 00000000000..e849400d67e --- /dev/null +++ b/library/std/src/sys/wasi/ext/io.rs @@ -0,0 +1,142 @@ +//! WASI-specific extensions to general I/O primitives + +#![unstable(feature = "wasi_ext", issue = "none")] + +use crate::fs; +use crate::io; +use crate::net; +use crate::sys; +use crate::sys_common::{AsInner, FromInner, IntoInner}; + +/// Raw file descriptors. +pub type RawFd = u32; + +/// A trait to extract the raw WASI file descriptor from an underlying +/// object. +pub trait AsRawFd { + /// Extracts the raw file descriptor. + /// + /// This method does **not** pass ownership of the raw file descriptor + /// to the caller. The descriptor is only guaranteed to be valid while + /// the original object has not yet been destroyed. + fn as_raw_fd(&self) -> RawFd; +} + +/// A trait to express the ability to construct an object from a raw file +/// descriptor. +pub trait FromRawFd { + /// Constructs a new instance of `Self` from the given raw file + /// descriptor. + /// + /// This function **consumes ownership** of the specified file + /// descriptor. The returned object will take responsibility for closing + /// it when the object goes out of scope. + /// + /// This function is also unsafe as the primitives currently returned + /// have the contract that they are the sole owner of the file + /// descriptor they are wrapping. Usage of this function could + /// accidentally allow violating this contract which can cause memory + /// unsafety in code that relies on it being true. + unsafe fn from_raw_fd(fd: RawFd) -> Self; +} + +/// A trait to express the ability to consume an object and acquire ownership of +/// its raw file descriptor. +pub trait IntoRawFd { + /// Consumes this object, returning the raw underlying file descriptor. + /// + /// This function **transfers ownership** of the underlying file descriptor + /// to the caller. Callers are then the unique owners of the file descriptor + /// and must close the descriptor once it's no longer needed. + fn into_raw_fd(self) -> RawFd; +} + +impl AsRawFd for net::TcpStream { + fn as_raw_fd(&self) -> RawFd { + self.as_inner().fd().as_raw() + } +} + +impl FromRawFd for net::TcpStream { + unsafe fn from_raw_fd(fd: RawFd) -> net::TcpStream { + net::TcpStream::from_inner(sys::net::TcpStream::from_inner(fd)) + } +} + +impl IntoRawFd for net::TcpStream { + fn into_raw_fd(self) -> RawFd { + self.into_inner().into_fd().into_raw() + } +} + +impl AsRawFd for net::TcpListener { + fn as_raw_fd(&self) -> RawFd { + self.as_inner().fd().as_raw() + } +} + +impl FromRawFd for net::TcpListener { + unsafe fn from_raw_fd(fd: RawFd) -> net::TcpListener { + net::TcpListener::from_inner(sys::net::TcpListener::from_inner(fd)) + } +} + +impl IntoRawFd for net::TcpListener { + fn into_raw_fd(self) -> RawFd { + self.into_inner().into_fd().into_raw() + } +} + +impl AsRawFd for net::UdpSocket { + fn as_raw_fd(&self) -> RawFd { + self.as_inner().fd().as_raw() + } +} + +impl FromRawFd for net::UdpSocket { + unsafe fn from_raw_fd(fd: RawFd) -> net::UdpSocket { + net::UdpSocket::from_inner(sys::net::UdpSocket::from_inner(fd)) + } +} + +impl IntoRawFd for net::UdpSocket { + fn into_raw_fd(self) -> RawFd { + self.into_inner().into_fd().into_raw() + } +} + +impl AsRawFd for fs::File { + fn as_raw_fd(&self) -> RawFd { + self.as_inner().fd().as_raw() + } +} + +impl FromRawFd for fs::File { + unsafe fn from_raw_fd(fd: RawFd) -> fs::File { + fs::File::from_inner(sys::fs::File::from_inner(fd)) + } +} + +impl IntoRawFd for fs::File { + fn into_raw_fd(self) -> RawFd { + self.into_inner().into_fd().into_raw() + } +} + +impl AsRawFd for io::Stdin { + fn as_raw_fd(&self) -> RawFd { + sys::stdio::Stdin.as_raw_fd() + } +} + +impl AsRawFd for io::Stdout { + fn as_raw_fd(&self) -> RawFd { + sys::stdio::Stdout.as_raw_fd() + } +} + +impl AsRawFd for io::Stderr { + fn as_raw_fd(&self) -> RawFd { + sys::stdio::Stderr.as_raw_fd() + } +} diff --git a/library/std/src/sys/wasi/ext/mod.rs b/library/std/src/sys/wasi/ext/mod.rs new file mode 100644 index 00000000000..58c8c46c969 --- /dev/null +++ b/library/std/src/sys/wasi/ext/mod.rs @@ -0,0 +1,22 @@ +pub mod ffi; +pub mod fs; +pub mod io; + +/// A prelude for conveniently writing platform-specific code. +/// +/// Includes all extension traits, and some important type definitions. +#[stable(feature = "rust1", since = "1.0.0")] +pub mod prelude { + #[doc(no_inline)] + #[stable(feature = "rust1", since = "1.0.0")] + pub use crate::sys::ext::ffi::{OsStrExt, OsStringExt}; + #[doc(no_inline)] + #[stable(feature = "rust1", since = "1.0.0")] + pub use crate::sys::ext::fs::FileTypeExt; + #[doc(no_inline)] + #[stable(feature = "rust1", since = "1.0.0")] + pub use crate::sys::ext::fs::{DirEntryExt, FileExt, MetadataExt, OpenOptionsExt}; + #[doc(no_inline)] + #[stable(feature = "rust1", since = "1.0.0")] + pub use crate::sys::ext::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; +} diff --git a/library/std/src/sys/wasi/fd.rs b/library/std/src/sys/wasi/fd.rs new file mode 100644 index 00000000000..8458ded5db0 --- /dev/null +++ b/library/std/src/sys/wasi/fd.rs @@ -0,0 +1,228 @@ +#![allow(dead_code)] + +use super::err2io; +use crate::io::{self, IoSlice, IoSliceMut, SeekFrom}; +use crate::mem; +use crate::net::Shutdown; + +#[derive(Debug)] +pub struct WasiFd { + fd: wasi::Fd, +} + +fn iovec<'a>(a: &'a mut [IoSliceMut<'_>]) -> &'a [wasi::Iovec] { + assert_eq!(mem::size_of::<IoSliceMut<'_>>(), mem::size_of::<wasi::Iovec>()); + assert_eq!(mem::align_of::<IoSliceMut<'_>>(), mem::align_of::<wasi::Iovec>()); + // SAFETY: `IoSliceMut` and `IoVec` have exactly the same memory layout + unsafe { mem::transmute(a) } +} + +fn ciovec<'a>(a: &'a [IoSlice<'_>]) -> &'a [wasi::Ciovec] { + assert_eq!(mem::size_of::<IoSlice<'_>>(), mem::size_of::<wasi::Ciovec>()); + assert_eq!(mem::align_of::<IoSlice<'_>>(), mem::align_of::<wasi::Ciovec>()); + // SAFETY: `IoSlice` and `CIoVec` have exactly the same memory layout + unsafe { mem::transmute(a) } +} + +impl WasiFd { + pub unsafe fn from_raw(fd: wasi::Fd) -> WasiFd { + WasiFd { fd } + } + + pub fn into_raw(self) -> wasi::Fd { + let ret = self.fd; + mem::forget(self); + ret + } + + pub fn as_raw(&self) -> wasi::Fd { + self.fd + } + + pub fn datasync(&self) -> io::Result<()> { + unsafe { wasi::fd_datasync(self.fd).map_err(err2io) } + } + + pub fn pread(&self, bufs: &mut [IoSliceMut<'_>], offset: u64) -> io::Result<usize> { + unsafe { wasi::fd_pread(self.fd, iovec(bufs), offset).map_err(err2io) } + } + + pub fn pwrite(&self, bufs: &[IoSlice<'_>], offset: u64) -> io::Result<usize> { + unsafe { wasi::fd_pwrite(self.fd, ciovec(bufs), offset).map_err(err2io) } + } + + pub fn read(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + unsafe { wasi::fd_read(self.fd, iovec(bufs)).map_err(err2io) } + } + + pub fn write(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + unsafe { wasi::fd_write(self.fd, ciovec(bufs)).map_err(err2io) } + } + + pub fn seek(&self, pos: SeekFrom) -> io::Result<u64> { + let (whence, offset) = match pos { + SeekFrom::Start(pos) => (wasi::WHENCE_SET, pos as i64), + SeekFrom::End(pos) => (wasi::WHENCE_END, pos), + SeekFrom::Current(pos) => (wasi::WHENCE_CUR, pos), + }; + unsafe { wasi::fd_seek(self.fd, offset, whence).map_err(err2io) } + } + + pub fn tell(&self) -> io::Result<u64> { + unsafe { wasi::fd_tell(self.fd).map_err(err2io) } + } + + // FIXME: __wasi_fd_fdstat_get + + pub fn set_flags(&self, flags: wasi::Fdflags) -> io::Result<()> { + unsafe { wasi::fd_fdstat_set_flags(self.fd, flags).map_err(err2io) } + } + + pub fn set_rights(&self, base: wasi::Rights, inheriting: wasi::Rights) -> io::Result<()> { + unsafe { wasi::fd_fdstat_set_rights(self.fd, base, inheriting).map_err(err2io) } + } + + pub fn sync(&self) -> io::Result<()> { + unsafe { wasi::fd_sync(self.fd).map_err(err2io) } + } + + pub fn advise(&self, offset: u64, len: u64, advice: wasi::Advice) -> io::Result<()> { + unsafe { wasi::fd_advise(self.fd, offset, len, advice).map_err(err2io) } + } + + pub fn allocate(&self, offset: u64, len: u64) -> io::Result<()> { + unsafe { wasi::fd_allocate(self.fd, offset, len).map_err(err2io) } + } + + pub fn create_directory(&self, path: &str) -> io::Result<()> { + unsafe { wasi::path_create_directory(self.fd, path).map_err(err2io) } + } + + pub fn link( + &self, + old_flags: wasi::Lookupflags, + old_path: &str, + new_fd: &WasiFd, + new_path: &str, + ) -> io::Result<()> { + unsafe { + wasi::path_link(self.fd, old_flags, old_path, new_fd.fd, new_path).map_err(err2io) + } + } + + pub fn open( + &self, + dirflags: wasi::Lookupflags, + path: &str, + oflags: wasi::Oflags, + fs_rights_base: wasi::Rights, + fs_rights_inheriting: wasi::Rights, + fs_flags: wasi::Fdflags, + ) -> io::Result<WasiFd> { + unsafe { + wasi::path_open( + self.fd, + dirflags, + path, + oflags, + fs_rights_base, + fs_rights_inheriting, + fs_flags, + ) + .map(|fd| WasiFd::from_raw(fd)) + .map_err(err2io) + } + } + + pub fn readdir(&self, buf: &mut [u8], cookie: wasi::Dircookie) -> io::Result<usize> { + unsafe { wasi::fd_readdir(self.fd, buf.as_mut_ptr(), buf.len(), cookie).map_err(err2io) } + } + + pub fn readlink(&self, path: &str, buf: &mut [u8]) -> io::Result<usize> { + unsafe { wasi::path_readlink(self.fd, path, buf.as_mut_ptr(), buf.len()).map_err(err2io) } + } + + pub fn rename(&self, old_path: &str, new_fd: &WasiFd, new_path: &str) -> io::Result<()> { + unsafe { wasi::path_rename(self.fd, old_path, new_fd.fd, new_path).map_err(err2io) } + } + + pub fn filestat_get(&self) -> io::Result<wasi::Filestat> { + unsafe { wasi::fd_filestat_get(self.fd).map_err(err2io) } + } + + pub fn filestat_set_times( + &self, + atim: wasi::Timestamp, + mtim: wasi::Timestamp, + fstflags: wasi::Fstflags, + ) -> io::Result<()> { + unsafe { wasi::fd_filestat_set_times(self.fd, atim, mtim, fstflags).map_err(err2io) } + } + + pub fn filestat_set_size(&self, size: u64) -> io::Result<()> { + unsafe { wasi::fd_filestat_set_size(self.fd, size).map_err(err2io) } + } + + pub fn path_filestat_get( + &self, + flags: wasi::Lookupflags, + path: &str, + ) -> io::Result<wasi::Filestat> { + unsafe { wasi::path_filestat_get(self.fd, flags, path).map_err(err2io) } + } + + pub fn path_filestat_set_times( + &self, + flags: wasi::Lookupflags, + path: &str, + atim: wasi::Timestamp, + mtim: wasi::Timestamp, + fstflags: wasi::Fstflags, + ) -> io::Result<()> { + unsafe { + wasi::path_filestat_set_times(self.fd, flags, path, atim, mtim, fstflags) + .map_err(err2io) + } + } + + pub fn symlink(&self, old_path: &str, new_path: &str) -> io::Result<()> { + unsafe { wasi::path_symlink(old_path, self.fd, new_path).map_err(err2io) } + } + + pub fn unlink_file(&self, path: &str) -> io::Result<()> { + unsafe { wasi::path_unlink_file(self.fd, path).map_err(err2io) } + } + + pub fn remove_directory(&self, path: &str) -> io::Result<()> { + unsafe { wasi::path_remove_directory(self.fd, path).map_err(err2io) } + } + + pub fn sock_recv( + &self, + ri_data: &mut [IoSliceMut<'_>], + ri_flags: wasi::Riflags, + ) -> io::Result<(usize, wasi::Roflags)> { + unsafe { wasi::sock_recv(self.fd, iovec(ri_data), ri_flags).map_err(err2io) } + } + + pub fn sock_send(&self, si_data: &[IoSlice<'_>], si_flags: wasi::Siflags) -> io::Result<usize> { + unsafe { wasi::sock_send(self.fd, ciovec(si_data), si_flags).map_err(err2io) } + } + + pub fn sock_shutdown(&self, how: Shutdown) -> io::Result<()> { + let how = match how { + Shutdown::Read => wasi::SDFLAGS_RD, + Shutdown::Write => wasi::SDFLAGS_WR, + Shutdown::Both => wasi::SDFLAGS_WR | wasi::SDFLAGS_RD, + }; + unsafe { wasi::sock_shutdown(self.fd, how).map_err(err2io) } + } +} + +impl Drop for WasiFd { + fn drop(&mut self) { + // FIXME: can we handle the return code here even though we can't on + // unix? + let _ = unsafe { wasi::fd_close(self.fd) }; + } +} diff --git a/library/std/src/sys/wasi/fs.rs b/library/std/src/sys/wasi/fs.rs new file mode 100644 index 00000000000..2eed9e436a9 --- /dev/null +++ b/library/std/src/sys/wasi/fs.rs @@ -0,0 +1,667 @@ +use crate::ffi::{CStr, CString, OsStr, OsString}; +use crate::fmt; +use crate::io::{self, IoSlice, IoSliceMut, SeekFrom}; +use crate::iter; +use crate::mem::{self, ManuallyDrop}; +use crate::os::wasi::ffi::{OsStrExt, OsStringExt}; +use crate::path::{Path, PathBuf}; +use crate::ptr; +use crate::sync::Arc; +use crate::sys::fd::WasiFd; +use crate::sys::time::SystemTime; +use crate::sys::unsupported; +use crate::sys_common::FromInner; + +pub use crate::sys_common::fs::remove_dir_all; + +pub struct File { + fd: WasiFd, +} + +#[derive(Clone)] +pub struct FileAttr { + meta: wasi::Filestat, +} + +pub struct ReadDir { + inner: Arc<ReadDirInner>, + cookie: Option<wasi::Dircookie>, + buf: Vec<u8>, + offset: usize, + cap: usize, +} + +struct ReadDirInner { + root: PathBuf, + dir: File, +} + +pub struct DirEntry { + meta: wasi::Dirent, + name: Vec<u8>, + inner: Arc<ReadDirInner>, +} + +#[derive(Clone, Debug, Default)] +pub struct OpenOptions { + read: bool, + write: bool, + dirflags: wasi::Lookupflags, + fdflags: wasi::Fdflags, + oflags: wasi::Oflags, + rights_base: Option<wasi::Rights>, + rights_inheriting: Option<wasi::Rights>, +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct FilePermissions { + readonly: bool, +} + +#[derive(PartialEq, Eq, Hash, Debug, Copy, Clone)] +pub struct FileType { + bits: wasi::Filetype, +} + +#[derive(Debug)] +pub struct DirBuilder {} + +impl FileAttr { + pub fn size(&self) -> u64 { + self.meta.size + } + + pub fn perm(&self) -> FilePermissions { + // not currently implemented in wasi yet + FilePermissions { readonly: false } + } + + pub fn file_type(&self) -> FileType { + FileType { bits: self.meta.filetype } + } + + pub fn modified(&self) -> io::Result<SystemTime> { + Ok(SystemTime::from_wasi_timestamp(self.meta.mtim)) + } + + pub fn accessed(&self) -> io::Result<SystemTime> { + Ok(SystemTime::from_wasi_timestamp(self.meta.atim)) + } + + pub fn created(&self) -> io::Result<SystemTime> { + Ok(SystemTime::from_wasi_timestamp(self.meta.ctim)) + } + + pub fn as_wasi(&self) -> &wasi::Filestat { + &self.meta + } +} + +impl FilePermissions { + pub fn readonly(&self) -> bool { + self.readonly + } + + pub fn set_readonly(&mut self, readonly: bool) { + self.readonly = readonly; + } +} + +impl FileType { + pub fn is_dir(&self) -> bool { + self.bits == wasi::FILETYPE_DIRECTORY + } + + pub fn is_file(&self) -> bool { + self.bits == wasi::FILETYPE_REGULAR_FILE + } + + pub fn is_symlink(&self) -> bool { + self.bits == wasi::FILETYPE_SYMBOLIC_LINK + } + + pub fn bits(&self) -> wasi::Filetype { + self.bits + } +} + +impl fmt::Debug for ReadDir { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ReadDir").finish() + } +} + +impl Iterator for ReadDir { + type Item = io::Result<DirEntry>; + + fn next(&mut self) -> Option<io::Result<DirEntry>> { + loop { + // If we've reached the capacity of our buffer then we need to read + // some more from the OS, otherwise we pick up at our old offset. + let offset = if self.offset == self.cap { + let cookie = self.cookie.take()?; + match self.inner.dir.fd.readdir(&mut self.buf, cookie) { + Ok(bytes) => self.cap = bytes, + Err(e) => return Some(Err(e)), + } + self.offset = 0; + self.cookie = Some(cookie); + + // If we didn't actually read anything, this is in theory the + // end of the directory. + if self.cap == 0 { + self.cookie = None; + return None; + } + + 0 + } else { + self.offset + }; + let data = &self.buf[offset..self.cap]; + + // If we're not able to read a directory entry then that means it + // must have been truncated at the end of the buffer, so reset our + // offset so we can go back and reread into the buffer, picking up + // where we last left off. + let dirent_size = mem::size_of::<wasi::Dirent>(); + if data.len() < dirent_size { + assert!(self.cookie.is_some()); + assert!(self.buf.len() >= dirent_size); + self.offset = self.cap; + continue; + } + let (dirent, data) = data.split_at(dirent_size); + let dirent = unsafe { ptr::read_unaligned(dirent.as_ptr() as *const wasi::Dirent) }; + + // If the file name was truncated, then we need to reinvoke + // `readdir` so we truncate our buffer to start over and reread this + // descriptor. Note that if our offset is 0 that means the file name + // is massive and we need a bigger buffer. + if data.len() < dirent.d_namlen as usize { + if offset == 0 { + let amt_to_add = self.buf.capacity(); + self.buf.extend(iter::repeat(0).take(amt_to_add)); + } + assert!(self.cookie.is_some()); + self.offset = self.cap; + continue; + } + self.cookie = Some(dirent.d_next); + self.offset = offset + dirent_size + dirent.d_namlen as usize; + + let name = &data[..(dirent.d_namlen as usize)]; + + // These names are skipped on all other platforms, so let's skip + // them here too + if name == b"." || name == b".." { + continue; + } + + return Some(Ok(DirEntry { + meta: dirent, + name: name.to_vec(), + inner: self.inner.clone(), + })); + } + } +} + +impl DirEntry { + pub fn path(&self) -> PathBuf { + let name = OsStr::from_bytes(&self.name); + self.inner.root.join(name) + } + + pub fn file_name(&self) -> OsString { + OsString::from_vec(self.name.clone()) + } + + pub fn metadata(&self) -> io::Result<FileAttr> { + metadata_at(&self.inner.dir.fd, 0, OsStr::from_bytes(&self.name).as_ref()) + } + + pub fn file_type(&self) -> io::Result<FileType> { + Ok(FileType { bits: self.meta.d_type }) + } + + pub fn ino(&self) -> wasi::Inode { + self.meta.d_ino + } +} + +impl OpenOptions { + pub fn new() -> OpenOptions { + let mut base = OpenOptions::default(); + base.dirflags = wasi::LOOKUPFLAGS_SYMLINK_FOLLOW; + return base; + } + + pub fn read(&mut self, read: bool) { + self.read = read; + } + + pub fn write(&mut self, write: bool) { + self.write = write; + } + + pub fn truncate(&mut self, truncate: bool) { + self.oflag(wasi::OFLAGS_TRUNC, truncate); + } + + pub fn create(&mut self, create: bool) { + self.oflag(wasi::OFLAGS_CREAT, create); + } + + pub fn create_new(&mut self, create_new: bool) { + self.oflag(wasi::OFLAGS_EXCL, create_new); + self.oflag(wasi::OFLAGS_CREAT, create_new); + } + + pub fn directory(&mut self, directory: bool) { + self.oflag(wasi::OFLAGS_DIRECTORY, directory); + } + + fn oflag(&mut self, bit: wasi::Oflags, set: bool) { + if set { + self.oflags |= bit; + } else { + self.oflags &= !bit; + } + } + + pub fn append(&mut self, set: bool) { + self.fdflag(wasi::FDFLAGS_APPEND, set); + } + + pub fn dsync(&mut self, set: bool) { + self.fdflag(wasi::FDFLAGS_DSYNC, set); + } + + pub fn nonblock(&mut self, set: bool) { + self.fdflag(wasi::FDFLAGS_NONBLOCK, set); + } + + pub fn rsync(&mut self, set: bool) { + self.fdflag(wasi::FDFLAGS_RSYNC, set); + } + + pub fn sync(&mut self, set: bool) { + self.fdflag(wasi::FDFLAGS_SYNC, set); + } + + fn fdflag(&mut self, bit: wasi::Fdflags, set: bool) { + if set { + self.fdflags |= bit; + } else { + self.fdflags &= !bit; + } + } + + pub fn fs_rights_base(&mut self, rights: wasi::Rights) { + self.rights_base = Some(rights); + } + + pub fn fs_rights_inheriting(&mut self, rights: wasi::Rights) { + self.rights_inheriting = Some(rights); + } + + fn rights_base(&self) -> wasi::Rights { + if let Some(rights) = self.rights_base { + return rights; + } + + // If rights haven't otherwise been specified try to pick a reasonable + // set. This can always be overridden by users via extension traits, and + // implementations may give us fewer rights silently than we ask for. So + // given that, just look at `read` and `write` and bucket permissions + // based on that. + let mut base = 0; + if self.read { + base |= wasi::RIGHTS_FD_READ; + base |= wasi::RIGHTS_FD_READDIR; + } + if self.write { + base |= wasi::RIGHTS_FD_WRITE; + base |= wasi::RIGHTS_FD_DATASYNC; + base |= wasi::RIGHTS_FD_ALLOCATE; + base |= wasi::RIGHTS_FD_FILESTAT_SET_SIZE; + } + + // FIXME: some of these should probably be read-only or write-only... + base |= wasi::RIGHTS_FD_ADVISE; + base |= wasi::RIGHTS_FD_FDSTAT_SET_FLAGS; + base |= wasi::RIGHTS_FD_FILESTAT_SET_TIMES; + base |= wasi::RIGHTS_FD_SEEK; + base |= wasi::RIGHTS_FD_SYNC; + base |= wasi::RIGHTS_FD_TELL; + base |= wasi::RIGHTS_PATH_CREATE_DIRECTORY; + base |= wasi::RIGHTS_PATH_CREATE_FILE; + base |= wasi::RIGHTS_PATH_FILESTAT_GET; + base |= wasi::RIGHTS_PATH_LINK_SOURCE; + base |= wasi::RIGHTS_PATH_LINK_TARGET; + base |= wasi::RIGHTS_PATH_OPEN; + base |= wasi::RIGHTS_PATH_READLINK; + base |= wasi::RIGHTS_PATH_REMOVE_DIRECTORY; + base |= wasi::RIGHTS_PATH_RENAME_SOURCE; + base |= wasi::RIGHTS_PATH_RENAME_TARGET; + base |= wasi::RIGHTS_PATH_SYMLINK; + base |= wasi::RIGHTS_PATH_UNLINK_FILE; + base |= wasi::RIGHTS_POLL_FD_READWRITE; + + return base; + } + + fn rights_inheriting(&self) -> wasi::Rights { + self.rights_inheriting.unwrap_or_else(|| self.rights_base()) + } + + pub fn lookup_flags(&mut self, flags: wasi::Lookupflags) { + self.dirflags = flags; + } +} + +impl File { + pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> { + let (dir, file) = open_parent(path)?; + open_at(&dir, &file, opts) + } + + pub fn open_at(&self, path: &Path, opts: &OpenOptions) -> io::Result<File> { + open_at(&self.fd, path, opts) + } + + pub fn file_attr(&self) -> io::Result<FileAttr> { + self.fd.filestat_get().map(|meta| FileAttr { meta }) + } + + pub fn metadata_at(&self, flags: wasi::Lookupflags, path: &Path) -> io::Result<FileAttr> { + metadata_at(&self.fd, flags, path) + } + + pub fn fsync(&self) -> io::Result<()> { + self.fd.sync() + } + + pub fn datasync(&self) -> io::Result<()> { + self.fd.datasync() + } + + pub fn truncate(&self, size: u64) -> io::Result<()> { + self.fd.filestat_set_size(size) + } + + pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> { + self.read_vectored(&mut [IoSliceMut::new(buf)]) + } + + pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + self.fd.read(bufs) + } + + #[inline] + pub fn is_read_vectored(&self) -> bool { + true + } + + pub fn write(&self, buf: &[u8]) -> io::Result<usize> { + self.write_vectored(&[IoSlice::new(buf)]) + } + + pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + self.fd.write(bufs) + } + + #[inline] + pub fn is_write_vectored(&self) -> bool { + true + } + + pub fn flush(&self) -> io::Result<()> { + Ok(()) + } + + pub fn seek(&self, pos: SeekFrom) -> io::Result<u64> { + self.fd.seek(pos) + } + + pub fn duplicate(&self) -> io::Result<File> { + // https://github.com/CraneStation/wasmtime/blob/master/docs/WASI-rationale.md#why-no-dup + unsupported() + } + + pub fn set_permissions(&self, _perm: FilePermissions) -> io::Result<()> { + // Permissions haven't been fully figured out in wasi yet, so this is + // likely temporary + unsupported() + } + + pub fn fd(&self) -> &WasiFd { + &self.fd + } + + pub fn into_fd(self) -> WasiFd { + self.fd + } + + pub fn read_link(&self, file: &Path) -> io::Result<PathBuf> { + read_link(&self.fd, file) + } +} + +impl FromInner<u32> for File { + fn from_inner(fd: u32) -> File { + unsafe { File { fd: WasiFd::from_raw(fd) } } + } +} + +impl DirBuilder { + pub fn new() -> DirBuilder { + DirBuilder {} + } + + pub fn mkdir(&self, p: &Path) -> io::Result<()> { + let (dir, file) = open_parent(p)?; + dir.create_directory(osstr2str(file.as_ref())?) + } +} + +impl fmt::Debug for File { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("File").field("fd", &self.fd.as_raw()).finish() + } +} + +pub fn readdir(p: &Path) -> io::Result<ReadDir> { + let mut opts = OpenOptions::new(); + opts.directory(true); + opts.read(true); + let dir = File::open(p, &opts)?; + Ok(ReadDir { + cookie: Some(0), + buf: vec![0; 128], + offset: 0, + cap: 0, + inner: Arc::new(ReadDirInner { dir, root: p.to_path_buf() }), + }) +} + +pub fn unlink(p: &Path) -> io::Result<()> { + let (dir, file) = open_parent(p)?; + dir.unlink_file(osstr2str(file.as_ref())?) +} + +pub fn rename(old: &Path, new: &Path) -> io::Result<()> { + let (old, old_file) = open_parent(old)?; + let (new, new_file) = open_parent(new)?; + old.rename(osstr2str(old_file.as_ref())?, &new, osstr2str(new_file.as_ref())?) +} + +pub fn set_perm(_p: &Path, _perm: FilePermissions) -> io::Result<()> { + // Permissions haven't been fully figured out in wasi yet, so this is + // likely temporary + unsupported() +} + +pub fn rmdir(p: &Path) -> io::Result<()> { + let (dir, file) = open_parent(p)?; + dir.remove_directory(osstr2str(file.as_ref())?) +} + +pub fn readlink(p: &Path) -> io::Result<PathBuf> { + let (dir, file) = open_parent(p)?; + read_link(&dir, &file) +} + +fn read_link(fd: &WasiFd, file: &Path) -> io::Result<PathBuf> { + // Try to get a best effort initial capacity for the vector we're going to + // fill. Note that if it's not a symlink we don't use a file to avoid + // allocating gigabytes if you read_link a huge movie file by accident. + // Additionally we add 1 to the initial size so if it doesn't change until + // when we call `readlink` the returned length will be less than the + // capacity, guaranteeing that we got all the data. + let meta = metadata_at(fd, 0, file)?; + let initial_size = if meta.file_type().is_symlink() { + (meta.size() as usize).saturating_add(1) + } else { + 1 // this'll fail in just a moment + }; + + // Now that we have an initial guess of how big to make our buffer, call + // `readlink` in a loop until it fails or reports it filled fewer bytes than + // we asked for, indicating we got everything. + let file = osstr2str(file.as_ref())?; + let mut destination = vec![0u8; initial_size]; + loop { + let len = fd.readlink(file, &mut destination)?; + if len < destination.len() { + destination.truncate(len); + destination.shrink_to_fit(); + return Ok(PathBuf::from(OsString::from_vec(destination))); + } + let amt_to_add = destination.len(); + destination.extend(iter::repeat(0).take(amt_to_add)); + } +} + +pub fn symlink(src: &Path, dst: &Path) -> io::Result<()> { + let (dst, dst_file) = open_parent(dst)?; + dst.symlink(osstr2str(src.as_ref())?, osstr2str(dst_file.as_ref())?) +} + +pub fn link(src: &Path, dst: &Path) -> io::Result<()> { + let (src, src_file) = open_parent(src)?; + let (dst, dst_file) = open_parent(dst)?; + src.link( + wasi::LOOKUPFLAGS_SYMLINK_FOLLOW, + osstr2str(src_file.as_ref())?, + &dst, + osstr2str(dst_file.as_ref())?, + ) +} + +pub fn stat(p: &Path) -> io::Result<FileAttr> { + let (dir, file) = open_parent(p)?; + metadata_at(&dir, wasi::LOOKUPFLAGS_SYMLINK_FOLLOW, &file) +} + +pub fn lstat(p: &Path) -> io::Result<FileAttr> { + let (dir, file) = open_parent(p)?; + metadata_at(&dir, 0, &file) +} + +fn metadata_at(fd: &WasiFd, flags: wasi::Lookupflags, path: &Path) -> io::Result<FileAttr> { + let meta = fd.path_filestat_get(flags, osstr2str(path.as_ref())?)?; + Ok(FileAttr { meta }) +} + +pub fn canonicalize(_p: &Path) -> io::Result<PathBuf> { + // This seems to not be in wasi's API yet, and we may need to end up + // emulating it ourselves. For now just return an error. + unsupported() +} + +fn open_at(fd: &WasiFd, path: &Path, opts: &OpenOptions) -> io::Result<File> { + let fd = fd.open( + opts.dirflags, + osstr2str(path.as_ref())?, + opts.oflags, + opts.rights_base(), + opts.rights_inheriting(), + opts.fdflags, + )?; + Ok(File { fd }) +} + +/// Attempts to open a bare path `p`. +/// +/// WASI has no fundamental capability to do this. All syscalls and operations +/// are relative to already-open file descriptors. The C library, however, +/// manages a map of pre-opened file descriptors to their path, and then the C +/// library provides an API to look at this. In other words, when you want to +/// open a path `p`, you have to find a previously opened file descriptor in a +/// global table and then see if `p` is relative to that file descriptor. +/// +/// This function, if successful, will return two items: +/// +/// * The first is a `ManuallyDrop<WasiFd>`. This represents a pre-opened file +/// descriptor which we don't have ownership of, but we can use. You shouldn't +/// actually drop the `fd`. +/// +/// * The second is a path that should be a part of `p` and represents a +/// relative traversal from the file descriptor specified to the desired +/// location `p`. +/// +/// If successful you can use the returned file descriptor to perform +/// file-descriptor-relative operations on the path returned as well. The +/// `rights` argument indicates what operations are desired on the returned file +/// descriptor, and if successful the returned file descriptor should have the +/// appropriate rights for performing `rights` actions. +/// +/// Note that this can fail if `p` doesn't look like it can be opened relative +/// to any pre-opened file descriptor. +fn open_parent(p: &Path) -> io::Result<(ManuallyDrop<WasiFd>, PathBuf)> { + let p = CString::new(p.as_os_str().as_bytes())?; + unsafe { + let mut ret = ptr::null(); + let fd = __wasilibc_find_relpath(p.as_ptr(), &mut ret); + if fd == -1 { + let msg = format!( + "failed to find a pre-opened file descriptor \ + through which {:?} could be opened", + p + ); + return Err(io::Error::new(io::ErrorKind::Other, msg)); + } + let path = Path::new(OsStr::from_bytes(CStr::from_ptr(ret).to_bytes())); + + // FIXME: right now `path` is a pointer into `p`, the `CString` above. + // When we return `p` is deallocated and we can't use it, so we need to + // currently separately allocate `path`. If this becomes an issue though + // we should probably turn this into a closure-taking interface or take + // `&CString` and then pass off `&Path` tied to the same lifetime. + let path = path.to_path_buf(); + + return Ok((ManuallyDrop::new(WasiFd::from_raw(fd as u32)), path)); + } + + extern "C" { + pub fn __wasilibc_find_relpath( + path: *const libc::c_char, + relative_path: *mut *const libc::c_char, + ) -> libc::c_int; + } +} + +pub fn osstr2str(f: &OsStr) -> io::Result<&str> { + f.to_str().ok_or_else(|| io::Error::new(io::ErrorKind::Other, "input must be utf-8")) +} + +pub fn copy(from: &Path, to: &Path) -> io::Result<u64> { + use crate::fs::File; + + let mut reader = File::open(from)?; + let mut writer = File::create(to)?; + + io::copy(&mut reader, &mut writer) +} diff --git a/library/std/src/sys/wasi/io.rs b/library/std/src/sys/wasi/io.rs new file mode 100644 index 00000000000..0ad2e152855 --- /dev/null +++ b/library/std/src/sys/wasi/io.rs @@ -0,0 +1,71 @@ +use crate::marker::PhantomData; +use crate::slice; + +#[derive(Copy, Clone)] +#[repr(transparent)] +pub struct IoSlice<'a> { + vec: wasi::Ciovec, + _p: PhantomData<&'a [u8]>, +} + +impl<'a> IoSlice<'a> { + #[inline] + pub fn new(buf: &'a [u8]) -> IoSlice<'a> { + IoSlice { vec: wasi::Ciovec { buf: buf.as_ptr(), buf_len: buf.len() }, _p: PhantomData } + } + + #[inline] + pub fn advance(&mut self, n: usize) { + if self.vec.buf_len < n { + panic!("advancing IoSlice beyond its length"); + } + + unsafe { + self.vec.buf_len -= n; + self.vec.buf = self.vec.buf.add(n); + } + } + + #[inline] + pub fn as_slice(&self) -> &[u8] { + unsafe { slice::from_raw_parts(self.vec.buf as *const u8, self.vec.buf_len) } + } +} + +#[repr(transparent)] +pub struct IoSliceMut<'a> { + vec: wasi::Iovec, + _p: PhantomData<&'a mut [u8]>, +} + +impl<'a> IoSliceMut<'a> { + #[inline] + pub fn new(buf: &'a mut [u8]) -> IoSliceMut<'a> { + IoSliceMut { + vec: wasi::Iovec { buf: buf.as_mut_ptr(), buf_len: buf.len() }, + _p: PhantomData, + } + } + + #[inline] + pub fn advance(&mut self, n: usize) { + if self.vec.buf_len < n { + panic!("advancing IoSlice beyond its length"); + } + + unsafe { + self.vec.buf_len -= n; + self.vec.buf = self.vec.buf.add(n); + } + } + + #[inline] + pub fn as_slice(&self) -> &[u8] { + unsafe { slice::from_raw_parts(self.vec.buf as *const u8, self.vec.buf_len) } + } + + #[inline] + pub fn as_mut_slice(&mut self) -> &mut [u8] { + unsafe { slice::from_raw_parts_mut(self.vec.buf as *mut u8, self.vec.buf_len) } + } +} diff --git a/library/std/src/sys/wasi/mod.rs b/library/std/src/sys/wasi/mod.rs new file mode 100644 index 00000000000..2704ff484f9 --- /dev/null +++ b/library/std/src/sys/wasi/mod.rs @@ -0,0 +1,96 @@ +//! System bindings for the wasm/web platform +//! +//! This module contains the facade (aka platform-specific) implementations of +//! OS level functionality for wasm. Note that this wasm is *not* the emscripten +//! wasm, so we have no runtime here. +//! +//! This is all super highly experimental and not actually intended for +//! wide/production use yet, it's still all in the experimental category. This +//! will likely change over time. +//! +//! Currently all functions here are basically stubs that immediately return +//! errors. The hope is that with a portability lint we can turn actually just +//! remove all this and just omit parts of the standard library if we're +//! compiling for wasm. That way it's a compile time error for something that's +//! guaranteed to be a runtime error! + +use crate::io as std_io; +use crate::mem; + +pub mod alloc; +pub mod args; +#[path = "../unsupported/cmath.rs"] +pub mod cmath; +#[path = "../unsupported/condvar.rs"] +pub mod condvar; +pub mod env; +pub mod fd; +pub mod fs; +pub mod io; +#[path = "../unsupported/mutex.rs"] +pub mod mutex; +pub mod net; +pub mod os; +pub use crate::sys_common::os_str_bytes as os_str; +pub mod ext; +pub mod path; +pub mod pipe; +pub mod process; +#[path = "../unsupported/rwlock.rs"] +pub mod rwlock; +#[path = "../unsupported/stack_overflow.rs"] +pub mod stack_overflow; +pub mod stdio; +pub mod thread; +#[path = "../unsupported/thread_local_dtor.rs"] +pub mod thread_local_dtor; +#[path = "../unsupported/thread_local_key.rs"] +pub mod thread_local_key; +pub mod time; + +#[path = "../unsupported/common.rs"] +#[allow(unused)] +mod common; +pub use common::*; + +pub fn decode_error_kind(errno: i32) -> std_io::ErrorKind { + use std_io::ErrorKind::*; + if errno > u16::MAX as i32 || errno < 0 { + return Other; + } + match errno as u16 { + wasi::ERRNO_CONNREFUSED => ConnectionRefused, + wasi::ERRNO_CONNRESET => ConnectionReset, + wasi::ERRNO_PERM | wasi::ERRNO_ACCES => PermissionDenied, + wasi::ERRNO_PIPE => BrokenPipe, + wasi::ERRNO_NOTCONN => NotConnected, + wasi::ERRNO_CONNABORTED => ConnectionAborted, + wasi::ERRNO_ADDRNOTAVAIL => AddrNotAvailable, + wasi::ERRNO_ADDRINUSE => AddrInUse, + wasi::ERRNO_NOENT => NotFound, + wasi::ERRNO_INTR => Interrupted, + wasi::ERRNO_INVAL => InvalidInput, + wasi::ERRNO_TIMEDOUT => TimedOut, + wasi::ERRNO_EXIST => AlreadyExists, + wasi::ERRNO_AGAIN => WouldBlock, + _ => Other, + } +} + +pub fn abort_internal() -> ! { + unsafe { libc::abort() } +} + +pub fn hashmap_random_keys() -> (u64, u64) { + let mut ret = (0u64, 0u64); + unsafe { + let base = &mut ret as *mut (u64, u64) as *mut u8; + let len = mem::size_of_val(&ret); + wasi::random_get(base, len).expect("random_get failure"); + } + return ret; +} + +fn err2io(err: wasi::Error) -> std_io::Error { + std_io::Error::from_raw_os_error(err.raw_error().into()) +} diff --git a/library/std/src/sys/wasi/net.rs b/library/std/src/sys/wasi/net.rs new file mode 100644 index 00000000000..e186453588d --- /dev/null +++ b/library/std/src/sys/wasi/net.rs @@ -0,0 +1,411 @@ +use crate::convert::TryFrom; +use crate::fmt; +use crate::io::{self, IoSlice, IoSliceMut}; +use crate::net::{Ipv4Addr, Ipv6Addr, Shutdown, SocketAddr}; +use crate::sys::fd::WasiFd; +use crate::sys::{unsupported, Void}; +use crate::sys_common::FromInner; +use crate::time::Duration; + +pub struct TcpStream { + fd: WasiFd, +} + +impl TcpStream { + pub fn connect(_: io::Result<&SocketAddr>) -> io::Result<TcpStream> { + unsupported() + } + + pub fn connect_timeout(_: &SocketAddr, _: Duration) -> io::Result<TcpStream> { + unsupported() + } + + pub fn set_read_timeout(&self, _: Option<Duration>) -> io::Result<()> { + unsupported() + } + + pub fn set_write_timeout(&self, _: Option<Duration>) -> io::Result<()> { + unsupported() + } + + pub fn read_timeout(&self) -> io::Result<Option<Duration>> { + unsupported() + } + + pub fn write_timeout(&self) -> io::Result<Option<Duration>> { + unsupported() + } + + pub fn peek(&self, _: &mut [u8]) -> io::Result<usize> { + unsupported() + } + + pub fn read(&self, _: &mut [u8]) -> io::Result<usize> { + unsupported() + } + + pub fn read_vectored(&self, _: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + unsupported() + } + + pub fn is_read_vectored(&self) -> bool { + true + } + + pub fn write(&self, _: &[u8]) -> io::Result<usize> { + unsupported() + } + + pub fn write_vectored(&self, _: &[IoSlice<'_>]) -> io::Result<usize> { + unsupported() + } + + pub fn is_write_vectored(&self) -> bool { + true + } + + pub fn peer_addr(&self) -> io::Result<SocketAddr> { + unsupported() + } + + pub fn socket_addr(&self) -> io::Result<SocketAddr> { + unsupported() + } + + pub fn shutdown(&self, _: Shutdown) -> io::Result<()> { + unsupported() + } + + pub fn duplicate(&self) -> io::Result<TcpStream> { + unsupported() + } + + pub fn set_nodelay(&self, _: bool) -> io::Result<()> { + unsupported() + } + + pub fn nodelay(&self) -> io::Result<bool> { + unsupported() + } + + pub fn set_ttl(&self, _: u32) -> io::Result<()> { + unsupported() + } + + pub fn ttl(&self) -> io::Result<u32> { + unsupported() + } + + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + unsupported() + } + + pub fn set_nonblocking(&self, _: bool) -> io::Result<()> { + unsupported() + } + + pub fn fd(&self) -> &WasiFd { + &self.fd + } + + pub fn into_fd(self) -> WasiFd { + self.fd + } +} + +impl FromInner<u32> for TcpStream { + fn from_inner(fd: u32) -> TcpStream { + unsafe { TcpStream { fd: WasiFd::from_raw(fd) } } + } +} + +impl fmt::Debug for TcpStream { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TcpStream").field("fd", &self.fd.as_raw()).finish() + } +} + +pub struct TcpListener { + fd: WasiFd, +} + +impl TcpListener { + pub fn bind(_: io::Result<&SocketAddr>) -> io::Result<TcpListener> { + unsupported() + } + + pub fn socket_addr(&self) -> io::Result<SocketAddr> { + unsupported() + } + + pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> { + unsupported() + } + + pub fn duplicate(&self) -> io::Result<TcpListener> { + unsupported() + } + + pub fn set_ttl(&self, _: u32) -> io::Result<()> { + unsupported() + } + + pub fn ttl(&self) -> io::Result<u32> { + unsupported() + } + + pub fn set_only_v6(&self, _: bool) -> io::Result<()> { + unsupported() + } + + pub fn only_v6(&self) -> io::Result<bool> { + unsupported() + } + + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + unsupported() + } + + pub fn set_nonblocking(&self, _: bool) -> io::Result<()> { + unsupported() + } + + pub fn fd(&self) -> &WasiFd { + &self.fd + } + + pub fn into_fd(self) -> WasiFd { + self.fd + } +} + +impl FromInner<u32> for TcpListener { + fn from_inner(fd: u32) -> TcpListener { + unsafe { TcpListener { fd: WasiFd::from_raw(fd) } } + } +} + +impl fmt::Debug for TcpListener { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TcpListener").field("fd", &self.fd.as_raw()).finish() + } +} + +pub struct UdpSocket { + fd: WasiFd, +} + +impl UdpSocket { + pub fn bind(_: io::Result<&SocketAddr>) -> io::Result<UdpSocket> { + unsupported() + } + + pub fn peer_addr(&self) -> io::Result<SocketAddr> { + unsupported() + } + + pub fn socket_addr(&self) -> io::Result<SocketAddr> { + unsupported() + } + + pub fn recv_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + unsupported() + } + + pub fn peek_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + unsupported() + } + + pub fn send_to(&self, _: &[u8], _: &SocketAddr) -> io::Result<usize> { + unsupported() + } + + pub fn duplicate(&self) -> io::Result<UdpSocket> { + unsupported() + } + + pub fn set_read_timeout(&self, _: Option<Duration>) -> io::Result<()> { + unsupported() + } + + pub fn set_write_timeout(&self, _: Option<Duration>) -> io::Result<()> { + unsupported() + } + + pub fn read_timeout(&self) -> io::Result<Option<Duration>> { + unsupported() + } + + pub fn write_timeout(&self) -> io::Result<Option<Duration>> { + unsupported() + } + + pub fn set_broadcast(&self, _: bool) -> io::Result<()> { + unsupported() + } + + pub fn broadcast(&self) -> io::Result<bool> { + unsupported() + } + + pub fn set_multicast_loop_v4(&self, _: bool) -> io::Result<()> { + unsupported() + } + + pub fn multicast_loop_v4(&self) -> io::Result<bool> { + unsupported() + } + + pub fn set_multicast_ttl_v4(&self, _: u32) -> io::Result<()> { + unsupported() + } + + pub fn multicast_ttl_v4(&self) -> io::Result<u32> { + unsupported() + } + + pub fn set_multicast_loop_v6(&self, _: bool) -> io::Result<()> { + unsupported() + } + + pub fn multicast_loop_v6(&self) -> io::Result<bool> { + unsupported() + } + + pub fn join_multicast_v4(&self, _: &Ipv4Addr, _: &Ipv4Addr) -> io::Result<()> { + unsupported() + } + + pub fn join_multicast_v6(&self, _: &Ipv6Addr, _: u32) -> io::Result<()> { + unsupported() + } + + pub fn leave_multicast_v4(&self, _: &Ipv4Addr, _: &Ipv4Addr) -> io::Result<()> { + unsupported() + } + + pub fn leave_multicast_v6(&self, _: &Ipv6Addr, _: u32) -> io::Result<()> { + unsupported() + } + + pub fn set_ttl(&self, _: u32) -> io::Result<()> { + unsupported() + } + + pub fn ttl(&self) -> io::Result<u32> { + unsupported() + } + + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + unsupported() + } + + pub fn set_nonblocking(&self, _: bool) -> io::Result<()> { + unsupported() + } + + pub fn recv(&self, _: &mut [u8]) -> io::Result<usize> { + unsupported() + } + + pub fn peek(&self, _: &mut [u8]) -> io::Result<usize> { + unsupported() + } + + pub fn send(&self, _: &[u8]) -> io::Result<usize> { + unsupported() + } + + pub fn connect(&self, _: io::Result<&SocketAddr>) -> io::Result<()> { + unsupported() + } + + pub fn fd(&self) -> &WasiFd { + &self.fd + } + + pub fn into_fd(self) -> WasiFd { + self.fd + } +} + +impl FromInner<u32> for UdpSocket { + fn from_inner(fd: u32) -> UdpSocket { + unsafe { UdpSocket { fd: WasiFd::from_raw(fd) } } + } +} + +impl fmt::Debug for UdpSocket { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("UdpSocket").field("fd", &self.fd.as_raw()).finish() + } +} + +pub struct LookupHost(Void); + +impl LookupHost { + pub fn port(&self) -> u16 { + match self.0 {} + } +} + +impl Iterator for LookupHost { + type Item = SocketAddr; + fn next(&mut self) -> Option<SocketAddr> { + match self.0 {} + } +} + +impl<'a> TryFrom<&'a str> for LookupHost { + type Error = io::Error; + + fn try_from(_v: &'a str) -> io::Result<LookupHost> { + unsupported() + } +} + +impl<'a> TryFrom<(&'a str, u16)> for LookupHost { + type Error = io::Error; + + fn try_from(_v: (&'a str, u16)) -> io::Result<LookupHost> { + unsupported() + } +} + +#[allow(nonstandard_style)] +pub mod netc { + pub const AF_INET: u8 = 0; + pub const AF_INET6: u8 = 1; + pub type sa_family_t = u8; + + #[derive(Copy, Clone)] + pub struct in_addr { + pub s_addr: u32, + } + + #[derive(Copy, Clone)] + pub struct sockaddr_in { + pub sin_family: sa_family_t, + pub sin_port: u16, + pub sin_addr: in_addr, + } + + #[derive(Copy, Clone)] + pub struct in6_addr { + pub s6_addr: [u8; 16], + } + + #[derive(Copy, Clone)] + pub struct sockaddr_in6 { + pub sin6_family: sa_family_t, + pub sin6_port: u16, + pub sin6_addr: in6_addr, + pub sin6_flowinfo: u32, + pub sin6_scope_id: u32, + } + + #[derive(Copy, Clone)] + pub struct sockaddr {} + + pub type socklen_t = usize; +} diff --git a/library/std/src/sys/wasi/os.rs b/library/std/src/sys/wasi/os.rs new file mode 100644 index 00000000000..8052c0aa8a8 --- /dev/null +++ b/library/std/src/sys/wasi/os.rs @@ -0,0 +1,201 @@ +use crate::any::Any; +use crate::error::Error as StdError; +use crate::ffi::{CStr, CString, OsStr, OsString}; +use crate::fmt; +use crate::io; +use crate::marker::PhantomData; +use crate::os::wasi::prelude::*; +use crate::path::{self, PathBuf}; +use crate::str; +use crate::sys::memchr; +use crate::sys::{unsupported, Void}; +use crate::vec; + +#[cfg(not(target_feature = "atomics"))] +pub unsafe fn env_lock() -> impl Any { + // No need for a lock if we're single-threaded, but this function will need + // to get implemented for multi-threaded scenarios +} + +pub fn errno() -> i32 { + extern "C" { + #[thread_local] + static errno: libc::c_int; + } + + unsafe { errno as i32 } +} + +pub fn error_string(errno: i32) -> String { + let mut buf = [0 as libc::c_char; 1024]; + + let p = buf.as_mut_ptr(); + unsafe { + if libc::strerror_r(errno as libc::c_int, p, buf.len()) < 0 { + panic!("strerror_r failure"); + } + str::from_utf8(CStr::from_ptr(p).to_bytes()).unwrap().to_owned() + } +} + +pub fn getcwd() -> io::Result<PathBuf> { + unsupported() +} + +pub fn chdir(_: &path::Path) -> io::Result<()> { + unsupported() +} + +pub struct SplitPaths<'a>(&'a Void); + +pub fn split_paths(_unparsed: &OsStr) -> SplitPaths<'_> { + panic!("unsupported") +} + +impl<'a> Iterator for SplitPaths<'a> { + type Item = PathBuf; + fn next(&mut self) -> Option<PathBuf> { + match *self.0 {} + } +} + +#[derive(Debug)] +pub struct JoinPathsError; + +pub fn join_paths<I, T>(_paths: I) -> Result<OsString, JoinPathsError> +where + I: Iterator<Item = T>, + T: AsRef<OsStr>, +{ + Err(JoinPathsError) +} + +impl fmt::Display for JoinPathsError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + "not supported on wasm yet".fmt(f) + } +} + +impl StdError for JoinPathsError { + #[allow(deprecated)] + fn description(&self) -> &str { + "not supported on wasm yet" + } +} + +pub fn current_exe() -> io::Result<PathBuf> { + unsupported() +} +pub struct Env { + iter: vec::IntoIter<(OsString, OsString)>, + _dont_send_or_sync_me: PhantomData<*mut ()>, +} + +impl Iterator for Env { + type Item = (OsString, OsString); + fn next(&mut self) -> Option<(OsString, OsString)> { + self.iter.next() + } + fn size_hint(&self) -> (usize, Option<usize>) { + self.iter.size_hint() + } +} + +pub fn env() -> Env { + unsafe { + let _guard = env_lock(); + let mut environ = libc::environ; + let mut result = Vec::new(); + if !environ.is_null() { + while !(*environ).is_null() { + if let Some(key_value) = parse(CStr::from_ptr(*environ).to_bytes()) { + result.push(key_value); + } + environ = environ.add(1); + } + } + return Env { iter: result.into_iter(), _dont_send_or_sync_me: PhantomData }; + } + + // See src/libstd/sys/unix/os.rs, same as that + fn parse(input: &[u8]) -> Option<(OsString, OsString)> { + if input.is_empty() { + return None; + } + let pos = memchr::memchr(b'=', &input[1..]).map(|p| p + 1); + pos.map(|p| { + ( + OsStringExt::from_vec(input[..p].to_vec()), + OsStringExt::from_vec(input[p + 1..].to_vec()), + ) + }) + } +} + +pub fn getenv(k: &OsStr) -> io::Result<Option<OsString>> { + let k = CString::new(k.as_bytes())?; + unsafe { + let _guard = env_lock(); + let s = libc::getenv(k.as_ptr()) as *const libc::c_char; + let ret = if s.is_null() { + None + } else { + Some(OsStringExt::from_vec(CStr::from_ptr(s).to_bytes().to_vec())) + }; + Ok(ret) + } +} + +pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> { + let k = CString::new(k.as_bytes())?; + let v = CString::new(v.as_bytes())?; + + unsafe { + let _guard = env_lock(); + cvt(libc::setenv(k.as_ptr(), v.as_ptr(), 1)).map(drop) + } +} + +pub fn unsetenv(n: &OsStr) -> io::Result<()> { + let nbuf = CString::new(n.as_bytes())?; + + unsafe { + let _guard = env_lock(); + cvt(libc::unsetenv(nbuf.as_ptr())).map(drop) + } +} + +pub fn temp_dir() -> PathBuf { + panic!("no filesystem on wasm") +} + +pub fn home_dir() -> Option<PathBuf> { + None +} + +pub fn exit(code: i32) -> ! { + unsafe { libc::exit(code) } +} + +pub fn getpid() -> u32 { + panic!("unsupported"); +} + +#[doc(hidden)] +pub trait IsMinusOne { + fn is_minus_one(&self) -> bool; +} + +macro_rules! impl_is_minus_one { + ($($t:ident)*) => ($(impl IsMinusOne for $t { + fn is_minus_one(&self) -> bool { + *self == -1 + } + })*) +} + +impl_is_minus_one! { i8 i16 i32 i64 isize } + +fn cvt<T: IsMinusOne>(t: T) -> io::Result<T> { + if t.is_minus_one() { Err(io::Error::last_os_error()) } else { Ok(t) } +} diff --git a/library/std/src/sys/wasi/path.rs b/library/std/src/sys/wasi/path.rs new file mode 100644 index 00000000000..840a7ae0426 --- /dev/null +++ b/library/std/src/sys/wasi/path.rs @@ -0,0 +1,19 @@ +use crate::ffi::OsStr; +use crate::path::Prefix; + +#[inline] +pub fn is_sep_byte(b: u8) -> bool { + b == b'/' +} + +#[inline] +pub fn is_verbatim_sep(b: u8) -> bool { + b == b'/' +} + +pub fn parse_prefix(_: &OsStr) -> Option<Prefix<'_>> { + None +} + +pub const MAIN_SEP_STR: &str = "/"; +pub const MAIN_SEP: char = '/'; diff --git a/library/std/src/sys/wasi/pipe.rs b/library/std/src/sys/wasi/pipe.rs new file mode 100644 index 00000000000..10d0925823e --- /dev/null +++ b/library/std/src/sys/wasi/pipe.rs @@ -0,0 +1,38 @@ +use crate::io::{self, IoSlice, IoSliceMut}; +use crate::sys::Void; + +pub struct AnonPipe(Void); + +impl AnonPipe { + pub fn read(&self, _buf: &mut [u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn read_vectored(&self, _bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + match self.0 {} + } + + pub fn is_read_vectored(&self) -> bool { + match self.0 {} + } + + pub fn write(&self, _buf: &[u8]) -> io::Result<usize> { + match self.0 {} + } + + pub fn write_vectored(&self, _bufs: &[IoSlice<'_>]) -> io::Result<usize> { + match self.0 {} + } + + pub fn is_write_vectored(&self) -> bool { + match self.0 {} + } + + pub fn diverge(&self) -> ! { + match self.0 {} + } +} + +pub fn read2(p1: AnonPipe, _v1: &mut Vec<u8>, _p2: AnonPipe, _v2: &mut Vec<u8>) -> io::Result<()> { + match p1.0 {} +} diff --git a/library/std/src/sys/wasi/process.rs b/library/std/src/sys/wasi/process.rs new file mode 100644 index 00000000000..7156c9ab92f --- /dev/null +++ b/library/std/src/sys/wasi/process.rs @@ -0,0 +1,149 @@ +use crate::ffi::OsStr; +use crate::fmt; +use crate::io; +use crate::sys::fs::File; +use crate::sys::pipe::AnonPipe; +use crate::sys::{unsupported, Void}; +use crate::sys_common::process::CommandEnv; + +pub use crate::ffi::OsString as EnvKey; + +//////////////////////////////////////////////////////////////////////////////// +// Command +//////////////////////////////////////////////////////////////////////////////// + +pub struct Command { + env: CommandEnv, +} + +// passed back to std::process with the pipes connected to the child, if any +// were requested +pub struct StdioPipes { + pub stdin: Option<AnonPipe>, + pub stdout: Option<AnonPipe>, + pub stderr: Option<AnonPipe>, +} + +pub enum Stdio { + Inherit, + Null, + MakePipe, +} + +impl Command { + pub fn new(_program: &OsStr) -> Command { + Command { env: Default::default() } + } + + pub fn arg(&mut self, _arg: &OsStr) {} + + pub fn env_mut(&mut self) -> &mut CommandEnv { + &mut self.env + } + + pub fn cwd(&mut self, _dir: &OsStr) {} + + pub fn stdin(&mut self, _stdin: Stdio) {} + + pub fn stdout(&mut self, _stdout: Stdio) {} + + pub fn stderr(&mut self, _stderr: Stdio) {} + + pub fn spawn( + &mut self, + _default: Stdio, + _needs_stdin: bool, + ) -> io::Result<(Process, StdioPipes)> { + unsupported() + } +} + +impl From<AnonPipe> for Stdio { + fn from(pipe: AnonPipe) -> Stdio { + pipe.diverge() + } +} + +impl From<File> for Stdio { + fn from(_file: File) -> Stdio { + panic!("unsupported") + } +} + +impl fmt::Debug for Command { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + Ok(()) + } +} + +pub struct ExitStatus(Void); + +impl ExitStatus { + pub fn success(&self) -> bool { + match self.0 {} + } + + pub fn code(&self) -> Option<i32> { + match self.0 {} + } +} + +impl Clone for ExitStatus { + fn clone(&self) -> ExitStatus { + match self.0 {} + } +} + +impl Copy for ExitStatus {} + +impl PartialEq for ExitStatus { + fn eq(&self, _other: &ExitStatus) -> bool { + match self.0 {} + } +} + +impl Eq for ExitStatus {} + +impl fmt::Debug for ExitStatus { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +impl fmt::Display for ExitStatus { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 {} + } +} + +#[derive(PartialEq, Eq, Clone, Copy, Debug)] +pub struct ExitCode(bool); + +impl ExitCode { + pub const SUCCESS: ExitCode = ExitCode(false); + pub const FAILURE: ExitCode = ExitCode(true); + + pub fn as_i32(&self) -> i32 { + self.0 as i32 + } +} + +pub struct Process(Void); + +impl Process { + pub fn id(&self) -> u32 { + match self.0 {} + } + + pub fn kill(&mut self) -> io::Result<()> { + match self.0 {} + } + + pub fn wait(&mut self) -> io::Result<ExitStatus> { + match self.0 {} + } + + pub fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> { + match self.0 {} + } +} diff --git a/library/std/src/sys/wasi/stdio.rs b/library/std/src/sys/wasi/stdio.rs new file mode 100644 index 00000000000..78e3911dc4e --- /dev/null +++ b/library/std/src/sys/wasi/stdio.rs @@ -0,0 +1,102 @@ +use crate::io::{self, IoSlice, IoSliceMut}; +use crate::mem::ManuallyDrop; +use crate::sys::fd::WasiFd; + +pub struct Stdin; +pub struct Stdout; +pub struct Stderr; + +impl Stdin { + pub fn new() -> io::Result<Stdin> { + Ok(Stdin) + } + + #[inline] + pub fn as_raw_fd(&self) -> u32 { + 0 + } +} + +impl io::Read for Stdin { + fn read(&mut self, data: &mut [u8]) -> io::Result<usize> { + self.read_vectored(&mut [IoSliceMut::new(data)]) + } + + fn read_vectored(&mut self, data: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + ManuallyDrop::new(unsafe { WasiFd::from_raw(self.as_raw_fd()) }).read(data) + } + + #[inline] + fn is_read_vectored(&self) -> bool { + true + } +} + +impl Stdout { + pub fn new() -> io::Result<Stdout> { + Ok(Stdout) + } + + #[inline] + pub fn as_raw_fd(&self) -> u32 { + 1 + } +} + +impl io::Write for Stdout { + fn write(&mut self, data: &[u8]) -> io::Result<usize> { + self.write_vectored(&[IoSlice::new(data)]) + } + + fn write_vectored(&mut self, data: &[IoSlice<'_>]) -> io::Result<usize> { + ManuallyDrop::new(unsafe { WasiFd::from_raw(self.as_raw_fd()) }).write(data) + } + + #[inline] + fn is_write_vectored(&self) -> bool { + true + } + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +impl Stderr { + pub fn new() -> io::Result<Stderr> { + Ok(Stderr) + } + + #[inline] + pub fn as_raw_fd(&self) -> u32 { + 2 + } +} + +impl io::Write for Stderr { + fn write(&mut self, data: &[u8]) -> io::Result<usize> { + self.write_vectored(&[IoSlice::new(data)]) + } + + fn write_vectored(&mut self, data: &[IoSlice<'_>]) -> io::Result<usize> { + ManuallyDrop::new(unsafe { WasiFd::from_raw(self.as_raw_fd()) }).write(data) + } + + #[inline] + fn is_write_vectored(&self) -> bool { + true + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +pub const STDIN_BUF_SIZE: usize = crate::sys_common::io::DEFAULT_BUF_SIZE; + +pub fn is_ebadf(err: &io::Error) -> bool { + err.raw_os_error() == Some(wasi::ERRNO_BADF.into()) +} + +pub fn panic_output() -> Option<impl io::Write> { + Stderr::new().ok() +} diff --git a/library/std/src/sys/wasi/thread.rs b/library/std/src/sys/wasi/thread.rs new file mode 100644 index 00000000000..0d39b1cec32 --- /dev/null +++ b/library/std/src/sys/wasi/thread.rs @@ -0,0 +1,72 @@ +use crate::ffi::CStr; +use crate::io; +use crate::mem; +use crate::sys::{unsupported, Void}; +use crate::time::Duration; + +pub struct Thread(Void); + +pub const DEFAULT_MIN_STACK_SIZE: usize = 4096; + +impl Thread { + // unsafe: see thread::Builder::spawn_unchecked for safety requirements + pub unsafe fn new(_stack: usize, _p: Box<dyn FnOnce()>) -> io::Result<Thread> { + unsupported() + } + + pub fn yield_now() { + let ret = unsafe { wasi::sched_yield() }; + debug_assert_eq!(ret, Ok(())); + } + + pub fn set_name(_name: &CStr) { + // nope + } + + pub fn sleep(dur: Duration) { + let nanos = dur.as_nanos(); + assert!(nanos <= u64::MAX as u128); + + const USERDATA: wasi::Userdata = 0x0123_45678; + + let clock = wasi::SubscriptionClock { + id: wasi::CLOCKID_MONOTONIC, + timeout: nanos as u64, + precision: 0, + flags: 0, + }; + + let in_ = wasi::Subscription { + userdata: USERDATA, + r#type: wasi::EVENTTYPE_CLOCK, + u: wasi::SubscriptionU { clock }, + }; + unsafe { + let mut event: wasi::Event = mem::zeroed(); + let res = wasi::poll_oneoff(&in_, &mut event, 1); + match (res, event) { + ( + Ok(1), + wasi::Event { + userdata: USERDATA, error: 0, r#type: wasi::EVENTTYPE_CLOCK, .. + }, + ) => {} + _ => panic!("thread::sleep(): unexpected result of poll_oneoff"), + } + } + } + + pub fn join(self) { + match self.0 {} + } +} + +pub mod guard { + pub type Guard = !; + pub unsafe fn current() -> Option<Guard> { + None + } + pub unsafe fn init() -> Option<Guard> { + None + } +} diff --git a/library/std/src/sys/wasi/time.rs b/library/std/src/sys/wasi/time.rs new file mode 100644 index 00000000000..80ec317b5a2 --- /dev/null +++ b/library/std/src/sys/wasi/time.rs @@ -0,0 +1,67 @@ +use crate::time::Duration; + +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +pub struct Instant(Duration); + +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +pub struct SystemTime(Duration); + +pub const UNIX_EPOCH: SystemTime = SystemTime(Duration::from_secs(0)); + +fn current_time(clock: u32) -> Duration { + let ts = unsafe { + wasi::clock_time_get( + clock, 1, // precision... seems ignored though? + ) + .unwrap() + }; + Duration::new((ts / 1_000_000_000) as u64, (ts % 1_000_000_000) as u32) +} + +impl Instant { + pub fn now() -> Instant { + Instant(current_time(wasi::CLOCKID_MONOTONIC)) + } + + pub const fn zero() -> Instant { + Instant(Duration::from_secs(0)) + } + + pub fn actually_monotonic() -> bool { + true + } + + pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> { + self.0.checked_sub(other.0) + } + + pub fn checked_add_duration(&self, other: &Duration) -> Option<Instant> { + Some(Instant(self.0.checked_add(*other)?)) + } + + pub fn checked_sub_duration(&self, other: &Duration) -> Option<Instant> { + Some(Instant(self.0.checked_sub(*other)?)) + } +} + +impl SystemTime { + pub fn now() -> SystemTime { + SystemTime(current_time(wasi::CLOCKID_REALTIME)) + } + + pub fn from_wasi_timestamp(ts: wasi::Timestamp) -> SystemTime { + SystemTime(Duration::from_nanos(ts)) + } + + pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> { + self.0.checked_sub(other.0).ok_or_else(|| other.0 - self.0) + } + + pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> { + Some(SystemTime(self.0.checked_add(*other)?)) + } + + pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> { + Some(SystemTime(self.0.checked_sub(*other)?)) + } +} diff --git a/library/std/src/sys/wasm/alloc.rs b/library/std/src/sys/wasm/alloc.rs new file mode 100644 index 00000000000..32b8b5bdaea --- /dev/null +++ b/library/std/src/sys/wasm/alloc.rs @@ -0,0 +1,158 @@ +//! This is an implementation of a global allocator on the wasm32 platform when +//! emscripten is not in use. In that situation there's no actual runtime for us +//! to lean on for allocation, so instead we provide our own! +//! +//! The wasm32 instruction set has two instructions for getting the current +//! amount of memory and growing the amount of memory. These instructions are the +//! foundation on which we're able to build an allocator, so we do so! Note that +//! the instructions are also pretty "global" and this is the "global" allocator +//! after all! +//! +//! The current allocator here is the `dlmalloc` crate which we've got included +//! in the rust-lang/rust repository as a submodule. The crate is a port of +//! dlmalloc.c from C to Rust and is basically just so we can have "pure Rust" +//! for now which is currently technically required (can't link with C yet). +//! +//! The crate itself provides a global allocator which on wasm has no +//! synchronization as there are no threads! + +use crate::alloc::{GlobalAlloc, Layout, System}; + +static mut DLMALLOC: dlmalloc::Dlmalloc = dlmalloc::DLMALLOC_INIT; + +#[stable(feature = "alloc_system_type", since = "1.28.0")] +unsafe impl GlobalAlloc for System { + #[inline] + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + let _lock = lock::lock(); + DLMALLOC.malloc(layout.size(), layout.align()) + } + + #[inline] + unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { + let _lock = lock::lock(); + DLMALLOC.calloc(layout.size(), layout.align()) + } + + #[inline] + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + let _lock = lock::lock(); + DLMALLOC.free(ptr, layout.size(), layout.align()) + } + + #[inline] + unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + let _lock = lock::lock(); + DLMALLOC.realloc(ptr, layout.size(), layout.align(), new_size) + } +} + +#[cfg(target_feature = "atomics")] +mod lock { + use crate::sync::atomic::{AtomicI32, Ordering::SeqCst}; + + static LOCKED: AtomicI32 = AtomicI32::new(0); + + pub struct DropLock; + + pub fn lock() -> DropLock { + loop { + if LOCKED.swap(1, SeqCst) == 0 { + return DropLock; + } + // Ok so here's where things get a little depressing. At this point + // in time we need to synchronously acquire a lock, but we're + // contending with some other thread. Typically we'd execute some + // form of `i32.atomic.wait` like so: + // + // unsafe { + // let r = core::arch::wasm32::i32_atomic_wait( + // LOCKED.as_mut_ptr(), + // 1, // expected value + // -1, // timeout + // ); + // debug_assert!(r == 0 || r == 1); + // } + // + // Unfortunately though in doing so we would cause issues for the + // main thread. The main thread in a web browser *cannot ever + // block*, no exceptions. This means that the main thread can't + // actually execute the `i32.atomic.wait` instruction. + // + // As a result if we want to work within the context of browsers we + // need to figure out some sort of allocation scheme for the main + // thread where when there's contention on the global malloc lock we + // do... something. + // + // Possible ideas include: + // + // 1. Attempt to acquire the global lock. If it fails, fall back to + // memory allocation via `memory.grow`. Later just ... somehow + // ... inject this raw page back into the main allocator as it + // gets sliced up over time. This strategy has the downside of + // forcing allocation of a page to happen whenever the main + // thread contents with other threads, which is unfortunate. + // + // 2. Maintain a form of "two level" allocator scheme where the main + // thread has its own allocator. Somehow this allocator would + // also be balanced with a global allocator, not only to have + // allocations cross between threads but also to ensure that the + // two allocators stay "balanced" in terms of free'd memory and + // such. This, however, seems significantly complicated. + // + // Out of a lack of other ideas, the current strategy implemented + // here is to simply spin. Typical spin loop algorithms have some + // form of "hint" here to the CPU that it's what we're doing to + // ensure that the CPU doesn't get too hot, but wasm doesn't have + // such an instruction. + // + // To be clear, spinning here is not a great solution. + // Another thread with the lock may take quite a long time to wake + // up. For example it could be in `memory.grow` or it could be + // evicted from the CPU for a timeslice like 10ms. For these periods + // of time our thread will "helpfully" sit here and eat CPU time + // until it itself is evicted or the lock holder finishes. This + // means we're just burning and wasting CPU time to no one's + // benefit. + // + // Spinning does have the nice properties, though, of being + // semantically correct, being fair to all threads for memory + // allocation, and being simple enough to implement. + // + // This will surely (hopefully) be replaced in the future with a + // real memory allocator that can handle the restriction of the main + // thread. + // + // + // FIXME: We can also possibly add an optimization here to detect + // when a thread is the main thread or not and block on all + // non-main-thread threads. Currently, however, we have no way + // of knowing which wasm thread is on the browser main thread, but + // if we could figure out we could at least somewhat mitigate the + // cost of this spinning. + } + } + + impl Drop for DropLock { + fn drop(&mut self) { + let r = LOCKED.swap(0, SeqCst); + debug_assert_eq!(r, 1); + + // Note that due to the above logic we don't actually need to wake + // anyone up, but if we did it'd likely look something like this: + // + // unsafe { + // core::arch::wasm32::atomic_notify( + // LOCKED.as_mut_ptr(), + // 1, // only one thread + // ); + // } + } + } +} + +#[cfg(not(target_feature = "atomics"))] +mod lock { + #[inline] + pub fn lock() {} // no atomics, no threads, that's easy! +} diff --git a/library/std/src/sys/wasm/args.rs b/library/std/src/sys/wasm/args.rs new file mode 100644 index 00000000000..3b6557ae325 --- /dev/null +++ b/library/std/src/sys/wasm/args.rs @@ -0,0 +1,46 @@ +use crate::ffi::OsString; +use crate::marker::PhantomData; +use crate::vec; + +pub unsafe fn init(_argc: isize, _argv: *const *const u8) { + // On wasm these should always be null, so there's nothing for us to do here +} + +pub unsafe fn cleanup() {} + +pub fn args() -> Args { + Args { iter: Vec::new().into_iter(), _dont_send_or_sync_me: PhantomData } +} + +pub struct Args { + iter: vec::IntoIter<OsString>, + _dont_send_or_sync_me: PhantomData<*mut ()>, +} + +impl Args { + pub fn inner_debug(&self) -> &[OsString] { + self.iter.as_slice() + } +} + +impl Iterator for Args { + type Item = OsString; + fn next(&mut self) -> Option<OsString> { + self.iter.next() + } + fn size_hint(&self) -> (usize, Option<usize>) { + self.iter.size_hint() + } +} + +impl ExactSizeIterator for Args { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl DoubleEndedIterator for Args { + fn next_back(&mut self) -> Option<OsString> { + self.iter.next_back() + } +} diff --git a/library/std/src/sys/wasm/condvar_atomics.rs b/library/std/src/sys/wasm/condvar_atomics.rs new file mode 100644 index 00000000000..1859cdd5a0e --- /dev/null +++ b/library/std/src/sys/wasm/condvar_atomics.rs @@ -0,0 +1,94 @@ +use crate::arch::wasm32; +use crate::cmp; +use crate::mem; +use crate::sync::atomic::{AtomicUsize, Ordering::SeqCst}; +use crate::sys::mutex::Mutex; +use crate::time::Duration; + +pub struct Condvar { + cnt: AtomicUsize, +} + +// Condition variables are implemented with a simple counter internally that is +// likely to cause spurious wakeups. Blocking on a condition variable will first +// read the value of the internal counter, unlock the given mutex, and then +// block if and only if the counter's value is still the same. Notifying a +// condition variable will modify the counter (add one for now) and then wake up +// a thread waiting on the address of the counter. +// +// A thread waiting on the condition variable will as a result avoid going to +// sleep if it's notified after the lock is unlocked but before it fully goes to +// sleep. A sleeping thread is guaranteed to be woken up at some point as it can +// only be woken up with a call to `wake`. +// +// Note that it's possible for 2 or more threads to be woken up by a call to +// `notify_one` with this implementation. That can happen where the modification +// of `cnt` causes any threads in the middle of `wait` to avoid going to sleep, +// and the subsequent `wake` may wake up a thread that's actually blocking. We +// consider this a spurious wakeup, though, which all users of condition +// variables must already be prepared to handle. As a result, this source of +// spurious wakeups is currently though to be ok, although it may be problematic +// later on if it causes too many spurious wakeups. + +impl Condvar { + pub const fn new() -> Condvar { + Condvar { cnt: AtomicUsize::new(0) } + } + + #[inline] + pub unsafe fn init(&mut self) { + // nothing to do + } + + pub unsafe fn notify_one(&self) { + self.cnt.fetch_add(1, SeqCst); + wasm32::atomic_notify(self.ptr(), 1); + } + + #[inline] + pub unsafe fn notify_all(&self) { + self.cnt.fetch_add(1, SeqCst); + wasm32::atomic_notify(self.ptr(), u32::MAX); // -1 == "wake everyone" + } + + pub unsafe fn wait(&self, mutex: &Mutex) { + // "atomically block and unlock" implemented by loading our current + // counter's value, unlocking the mutex, and blocking if the counter + // still has the same value. + // + // Notifications happen by incrementing the counter and then waking a + // thread. Incrementing the counter after we unlock the mutex will + // prevent us from sleeping and otherwise the call to `wake` will + // wake us up once we're asleep. + let ticket = self.cnt.load(SeqCst) as i32; + mutex.unlock(); + let val = wasm32::i32_atomic_wait(self.ptr(), ticket, -1); + // 0 == woken, 1 == not equal to `ticket`, 2 == timeout (shouldn't happen) + debug_assert!(val == 0 || val == 1); + mutex.lock(); + } + + pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { + let ticket = self.cnt.load(SeqCst) as i32; + mutex.unlock(); + let nanos = dur.as_nanos(); + let nanos = cmp::min(i64::MAX as u128, nanos); + + // If the return value is 2 then a timeout happened, so we return + // `false` as we weren't actually notified. + let ret = wasm32::i32_atomic_wait(self.ptr(), ticket, nanos as i64) != 2; + mutex.lock(); + return ret; + } + + #[inline] + pub unsafe fn destroy(&self) { + // nothing to do + } + + #[inline] + fn ptr(&self) -> *mut i32 { + assert_eq!(mem::size_of::<usize>(), mem::size_of::<i32>()); + self.cnt.as_mut_ptr() as *mut i32 + } +} diff --git a/library/std/src/sys/wasm/env.rs b/library/std/src/sys/wasm/env.rs new file mode 100644 index 00000000000..730e356d7fe --- /dev/null +++ b/library/std/src/sys/wasm/env.rs @@ -0,0 +1,9 @@ +pub mod os { + pub const FAMILY: &str = ""; + pub const OS: &str = ""; + pub const DLL_PREFIX: &str = ""; + pub const DLL_SUFFIX: &str = ".wasm"; + pub const DLL_EXTENSION: &str = "wasm"; + pub const EXE_SUFFIX: &str = ".wasm"; + pub const EXE_EXTENSION: &str = "wasm"; +} diff --git a/library/std/src/sys/wasm/mod.rs b/library/std/src/sys/wasm/mod.rs new file mode 100644 index 00000000000..3de58904043 --- /dev/null +++ b/library/std/src/sys/wasm/mod.rs @@ -0,0 +1,70 @@ +//! System bindings for the wasm/web platform +//! +//! This module contains the facade (aka platform-specific) implementations of +//! OS level functionality for wasm. Note that this wasm is *not* the emscripten +//! wasm, so we have no runtime here. +//! +//! This is all super highly experimental and not actually intended for +//! wide/production use yet, it's still all in the experimental category. This +//! will likely change over time. +//! +//! Currently all functions here are basically stubs that immediately return +//! errors. The hope is that with a portability lint we can turn actually just +//! remove all this and just omit parts of the standard library if we're +//! compiling for wasm. That way it's a compile time error for something that's +//! guaranteed to be a runtime error! + +pub mod alloc; +pub mod args; +#[path = "../unsupported/cmath.rs"] +pub mod cmath; +pub mod env; +#[path = "../unsupported/fs.rs"] +pub mod fs; +#[path = "../unsupported/io.rs"] +pub mod io; +#[path = "../unsupported/net.rs"] +pub mod net; +#[path = "../unsupported/os.rs"] +pub mod os; +#[path = "../unsupported/path.rs"] +pub mod path; +#[path = "../unsupported/pipe.rs"] +pub mod pipe; +#[path = "../unsupported/process.rs"] +pub mod process; +#[path = "../unsupported/stack_overflow.rs"] +pub mod stack_overflow; +#[path = "../unsupported/stdio.rs"] +pub mod stdio; +pub mod thread; +#[path = "../unsupported/thread_local_dtor.rs"] +pub mod thread_local_dtor; +#[path = "../unsupported/thread_local_key.rs"] +pub mod thread_local_key; +#[path = "../unsupported/time.rs"] +pub mod time; + +pub use crate::sys_common::os_str_bytes as os_str; + +cfg_if::cfg_if! { + if #[cfg(target_feature = "atomics")] { + #[path = "condvar_atomics.rs"] + pub mod condvar; + #[path = "mutex_atomics.rs"] + pub mod mutex; + #[path = "rwlock_atomics.rs"] + pub mod rwlock; + } else { + #[path = "../unsupported/condvar.rs"] + pub mod condvar; + #[path = "../unsupported/mutex.rs"] + pub mod mutex; + #[path = "../unsupported/rwlock.rs"] + pub mod rwlock; + } +} + +#[path = "../unsupported/common.rs"] +mod common; +pub use common::*; diff --git a/library/std/src/sys/wasm/mutex_atomics.rs b/library/std/src/sys/wasm/mutex_atomics.rs new file mode 100644 index 00000000000..268a53bb564 --- /dev/null +++ b/library/std/src/sys/wasm/mutex_atomics.rs @@ -0,0 +1,147 @@ +use crate::arch::wasm32; +use crate::cell::UnsafeCell; +use crate::mem; +use crate::sync::atomic::{AtomicU32, AtomicUsize, Ordering::SeqCst}; +use crate::sys::thread; + +pub struct Mutex { + locked: AtomicUsize, +} + +// Mutexes have a pretty simple implementation where they contain an `i32` +// internally that is 0 when unlocked and 1 when the mutex is locked. +// Acquisition has a fast path where it attempts to cmpxchg the 0 to a 1, and +// if it fails it then waits for a notification. Releasing a lock is then done +// by swapping in 0 and then notifying any waiters, if present. + +impl Mutex { + pub const fn new() -> Mutex { + Mutex { locked: AtomicUsize::new(0) } + } + + #[inline] + pub unsafe fn init(&mut self) { + // nothing to do + } + + pub unsafe fn lock(&self) { + while !self.try_lock() { + let val = wasm32::i32_atomic_wait( + self.ptr(), + 1, // we expect our mutex is locked + -1, // wait infinitely + ); + // we should have either woke up (0) or got a not-equal due to a + // race (1). We should never time out (2) + debug_assert!(val == 0 || val == 1); + } + } + + pub unsafe fn unlock(&self) { + let prev = self.locked.swap(0, SeqCst); + debug_assert_eq!(prev, 1); + wasm32::atomic_notify(self.ptr(), 1); // wake up one waiter, if any + } + + #[inline] + pub unsafe fn try_lock(&self) -> bool { + self.locked.compare_exchange(0, 1, SeqCst, SeqCst).is_ok() + } + + #[inline] + pub unsafe fn destroy(&self) { + // nothing to do + } + + #[inline] + fn ptr(&self) -> *mut i32 { + assert_eq!(mem::size_of::<usize>(), mem::size_of::<i32>()); + self.locked.as_mut_ptr() as *mut i32 + } +} + +pub struct ReentrantMutex { + owner: AtomicU32, + recursions: UnsafeCell<u32>, +} + +unsafe impl Send for ReentrantMutex {} +unsafe impl Sync for ReentrantMutex {} + +// Reentrant mutexes are similarly implemented to mutexs above except that +// instead of "1" meaning unlocked we use the id of a thread to represent +// whether it has locked a mutex. That way we have an atomic counter which +// always holds the id of the thread that currently holds the lock (or 0 if the +// lock is unlocked). +// +// Once a thread acquires a lock recursively, which it detects by looking at +// the value that's already there, it will update a local `recursions` counter +// in a nonatomic fashion (as we hold the lock). The lock is then fully +// released when this recursion counter reaches 0. + +impl ReentrantMutex { + pub const unsafe fn uninitialized() -> ReentrantMutex { + ReentrantMutex { owner: AtomicU32::new(0), recursions: UnsafeCell::new(0) } + } + + pub unsafe fn init(&self) { + // nothing to do... + } + + pub unsafe fn lock(&self) { + let me = thread::my_id(); + while let Err(owner) = self._try_lock(me) { + let val = wasm32::i32_atomic_wait(self.ptr(), owner as i32, -1); + debug_assert!(val == 0 || val == 1); + } + } + + #[inline] + pub unsafe fn try_lock(&self) -> bool { + self._try_lock(thread::my_id()).is_ok() + } + + #[inline] + unsafe fn _try_lock(&self, id: u32) -> Result<(), u32> { + let id = id.checked_add(1).unwrap(); // make sure `id` isn't 0 + match self.owner.compare_exchange(0, id, SeqCst, SeqCst) { + // we transitioned from unlocked to locked + Ok(_) => { + debug_assert_eq!(*self.recursions.get(), 0); + Ok(()) + } + + // we currently own this lock, so let's update our count and return + // true. + Err(n) if n == id => { + *self.recursions.get() += 1; + Ok(()) + } + + // Someone else owns the lock, let our caller take care of it + Err(other) => Err(other), + } + } + + pub unsafe fn unlock(&self) { + // If we didn't ever recursively lock the lock then we fully unlock the + // mutex and wake up a waiter, if any. Otherwise we decrement our + // recursive counter and let some one else take care of the zero. + match *self.recursions.get() { + 0 => { + self.owner.swap(0, SeqCst); + wasm32::atomic_notify(self.ptr() as *mut i32, 1); // wake up one waiter, if any + } + ref mut n => *n -= 1, + } + } + + pub unsafe fn destroy(&self) { + // nothing to do... + } + + #[inline] + fn ptr(&self) -> *mut i32 { + self.owner.as_mut_ptr() as *mut i32 + } +} diff --git a/library/std/src/sys/wasm/rwlock_atomics.rs b/library/std/src/sys/wasm/rwlock_atomics.rs new file mode 100644 index 00000000000..06442e925f4 --- /dev/null +++ b/library/std/src/sys/wasm/rwlock_atomics.rs @@ -0,0 +1,144 @@ +use crate::cell::UnsafeCell; +use crate::sys::condvar::Condvar; +use crate::sys::mutex::Mutex; + +pub struct RWLock { + lock: Mutex, + cond: Condvar, + state: UnsafeCell<State>, +} + +enum State { + Unlocked, + Reading(usize), + Writing, +} + +unsafe impl Send for RWLock {} +unsafe impl Sync for RWLock {} + +// This rwlock implementation is a relatively simple implementation which has a +// condition variable for readers/writers as well as a mutex protecting the +// internal state of the lock. A current downside of the implementation is that +// unlocking the lock will notify *all* waiters rather than just readers or just +// writers. This can cause lots of "thundering stampede" problems. While +// hopefully correct this implementation is very likely to want to be changed in +// the future. + +impl RWLock { + pub const fn new() -> RWLock { + RWLock { lock: Mutex::new(), cond: Condvar::new(), state: UnsafeCell::new(State::Unlocked) } + } + + #[inline] + pub unsafe fn read(&self) { + self.lock.lock(); + while !(*self.state.get()).inc_readers() { + self.cond.wait(&self.lock); + } + self.lock.unlock(); + } + + #[inline] + pub unsafe fn try_read(&self) -> bool { + self.lock.lock(); + let ok = (*self.state.get()).inc_readers(); + self.lock.unlock(); + return ok; + } + + #[inline] + pub unsafe fn write(&self) { + self.lock.lock(); + while !(*self.state.get()).inc_writers() { + self.cond.wait(&self.lock); + } + self.lock.unlock(); + } + + #[inline] + pub unsafe fn try_write(&self) -> bool { + self.lock.lock(); + let ok = (*self.state.get()).inc_writers(); + self.lock.unlock(); + return ok; + } + + #[inline] + pub unsafe fn read_unlock(&self) { + self.lock.lock(); + let notify = (*self.state.get()).dec_readers(); + self.lock.unlock(); + if notify { + // FIXME: should only wake up one of these some of the time + self.cond.notify_all(); + } + } + + #[inline] + pub unsafe fn write_unlock(&self) { + self.lock.lock(); + (*self.state.get()).dec_writers(); + self.lock.unlock(); + // FIXME: should only wake up one of these some of the time + self.cond.notify_all(); + } + + #[inline] + pub unsafe fn destroy(&self) { + self.lock.destroy(); + self.cond.destroy(); + } +} + +impl State { + fn inc_readers(&mut self) -> bool { + match *self { + State::Unlocked => { + *self = State::Reading(1); + true + } + State::Reading(ref mut cnt) => { + *cnt += 1; + true + } + State::Writing => false, + } + } + + fn inc_writers(&mut self) -> bool { + match *self { + State::Unlocked => { + *self = State::Writing; + true + } + State::Reading(_) | State::Writing => false, + } + } + + fn dec_readers(&mut self) -> bool { + let zero = match *self { + State::Reading(ref mut cnt) => { + *cnt -= 1; + *cnt == 0 + } + State::Unlocked | State::Writing => invalid(), + }; + if zero { + *self = State::Unlocked; + } + zero + } + + fn dec_writers(&mut self) { + match *self { + State::Writing => {} + State::Unlocked | State::Reading(_) => invalid(), + } + *self = State::Unlocked; + } +} + +fn invalid() -> ! { + panic!("inconsistent rwlock"); +} diff --git a/library/std/src/sys/wasm/thread.rs b/library/std/src/sys/wasm/thread.rs new file mode 100644 index 00000000000..0a11896a004 --- /dev/null +++ b/library/std/src/sys/wasm/thread.rs @@ -0,0 +1,98 @@ +use crate::ffi::CStr; +use crate::io; +use crate::sys::{unsupported, Void}; +use crate::time::Duration; + +pub struct Thread(Void); + +pub const DEFAULT_MIN_STACK_SIZE: usize = 4096; + +impl Thread { + // unsafe: see thread::Builder::spawn_unchecked for safety requirements + pub unsafe fn new(_stack: usize, _p: Box<dyn FnOnce()>) -> io::Result<Thread> { + unsupported() + } + + pub fn yield_now() { + // do nothing + } + + pub fn set_name(_name: &CStr) { + // nope + } + + #[cfg(not(target_feature = "atomics"))] + pub fn sleep(_dur: Duration) { + panic!("can't sleep"); + } + + #[cfg(target_feature = "atomics")] + pub fn sleep(dur: Duration) { + use crate::arch::wasm32; + use crate::cmp; + + // Use an atomic wait to block the current thread artificially with a + // timeout listed. Note that we should never be notified (return value + // of 0) or our comparison should never fail (return value of 1) so we + // should always only resume execution through a timeout (return value + // 2). + let mut nanos = dur.as_nanos(); + while nanos > 0 { + let amt = cmp::min(i64::MAX as u128, nanos); + let mut x = 0; + let val = unsafe { wasm32::i32_atomic_wait(&mut x, 0, amt as i64) }; + debug_assert_eq!(val, 2); + nanos -= amt; + } + } + + pub fn join(self) { + match self.0 {} + } +} + +pub mod guard { + pub type Guard = !; + pub unsafe fn current() -> Option<Guard> { + None + } + pub unsafe fn init() -> Option<Guard> { + None + } +} + +// This is only used by atomics primitives when the `atomics` feature is +// enabled. In that mode we currently just use our own thread-local to store our +// current thread's ID, and then we lazily initialize it to something allocated +// from a global counter. +#[cfg(target_feature = "atomics")] +pub fn my_id() -> u32 { + use crate::sync::atomic::{AtomicU32, Ordering::SeqCst}; + + static NEXT_ID: AtomicU32 = AtomicU32::new(0); + + #[thread_local] + static mut MY_ID: u32 = 0; + + unsafe { + // If our thread ID isn't set yet then we need to allocate one. Do so + // with with a simple "atomically add to a global counter" strategy. + // This strategy doesn't handled what happens when the counter + // overflows, however, so just abort everything once the counter + // overflows and eventually we could have some sort of recycling scheme + // (or maybe this is all totally irrelevant by that point!). In any case + // though we're using a CAS loop instead of a `fetch_add` to ensure that + // the global counter never overflows. + if MY_ID == 0 { + let mut cur = NEXT_ID.load(SeqCst); + MY_ID = loop { + let next = cur.checked_add(1).unwrap_or_else(|| crate::arch::wasm32::unreachable()); + match NEXT_ID.compare_exchange(cur, next, SeqCst, SeqCst) { + Ok(_) => break next, + Err(i) => cur = i, + } + }; + } + MY_ID + } +} diff --git a/library/std/src/sys/windows/alloc.rs b/library/std/src/sys/windows/alloc.rs new file mode 100644 index 00000000000..99b4d6c72a0 --- /dev/null +++ b/library/std/src/sys/windows/alloc.rs @@ -0,0 +1,61 @@ +use crate::alloc::{GlobalAlloc, Layout, System}; +use crate::sys::c; +use crate::sys_common::alloc::{realloc_fallback, MIN_ALIGN}; + +#[repr(C)] +struct Header(*mut u8); + +unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header { + &mut *(ptr as *mut Header).offset(-1) +} + +unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 { + let aligned = ptr.add(align - (ptr as usize & (align - 1))); + *get_header(aligned) = Header(ptr); + aligned +} + +#[inline] +unsafe fn allocate_with_flags(layout: Layout, flags: c::DWORD) -> *mut u8 { + if layout.align() <= MIN_ALIGN { + return c::HeapAlloc(c::GetProcessHeap(), flags, layout.size()) as *mut u8; + } + + let size = layout.size() + layout.align(); + let ptr = c::HeapAlloc(c::GetProcessHeap(), flags, size); + if ptr.is_null() { ptr as *mut u8 } else { align_ptr(ptr as *mut u8, layout.align()) } +} + +#[stable(feature = "alloc_system_type", since = "1.28.0")] +unsafe impl GlobalAlloc for System { + #[inline] + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + allocate_with_flags(layout, 0) + } + + #[inline] + unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { + allocate_with_flags(layout, c::HEAP_ZERO_MEMORY) + } + + #[inline] + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + if layout.align() <= MIN_ALIGN { + let err = c::HeapFree(c::GetProcessHeap(), 0, ptr as c::LPVOID); + debug_assert!(err != 0, "Failed to free heap memory: {}", c::GetLastError()); + } else { + let header = get_header(ptr); + let err = c::HeapFree(c::GetProcessHeap(), 0, header.0 as c::LPVOID); + debug_assert!(err != 0, "Failed to free heap memory: {}", c::GetLastError()); + } + } + + #[inline] + unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + if layout.align() <= MIN_ALIGN { + c::HeapReAlloc(c::GetProcessHeap(), 0, ptr as c::LPVOID, new_size) as *mut u8 + } else { + realloc_fallback(self, ptr, layout, new_size) + } + } +} diff --git a/library/std/src/sys/windows/args.rs b/library/std/src/sys/windows/args.rs new file mode 100644 index 00000000000..5fbea2a2910 --- /dev/null +++ b/library/std/src/sys/windows/args.rs @@ -0,0 +1,266 @@ +#![allow(dead_code)] // runtime init functions not used during testing + +use crate::ffi::OsString; +use crate::fmt; +use crate::os::windows::prelude::*; +use crate::path::PathBuf; +use crate::slice; +use crate::sys::c; +use crate::sys::windows::os::current_exe; +use crate::vec; + +use core::iter; + +pub unsafe fn init(_argc: isize, _argv: *const *const u8) {} + +pub unsafe fn cleanup() {} + +pub fn args() -> Args { + unsafe { + let lp_cmd_line = c::GetCommandLineW(); + let parsed_args_list = parse_lp_cmd_line(lp_cmd_line as *const u16, || { + current_exe().map(PathBuf::into_os_string).unwrap_or_else(|_| OsString::new()) + }); + + Args { parsed_args_list: parsed_args_list.into_iter() } + } +} + +/// Implements the Windows command-line argument parsing algorithm. +/// +/// Microsoft's documentation for the Windows CLI argument format can be found at +/// <https://docs.microsoft.com/en-us/previous-versions//17w5ykft(v=vs.85)>. +/// +/// Windows includes a function to do this in shell32.dll, +/// but linking with that DLL causes the process to be registered as a GUI application. +/// GUI applications add a bunch of overhead, even if no windows are drawn. See +/// <https://randomascii.wordpress.com/2018/12/03/a-not-called-function-can-cause-a-5x-slowdown/>. +/// +/// This function was tested for equivalence to the shell32.dll implementation in +/// Windows 10 Pro v1803, using an exhaustive test suite available at +/// <https://gist.github.com/notriddle/dde431930c392e428055b2dc22e638f5> or +/// <https://paste.gg/p/anonymous/47d6ed5f5bd549168b1c69c799825223>. +unsafe fn parse_lp_cmd_line<F: Fn() -> OsString>( + lp_cmd_line: *const u16, + exe_name: F, +) -> Vec<OsString> { + const BACKSLASH: u16 = '\\' as u16; + const QUOTE: u16 = '"' as u16; + const TAB: u16 = '\t' as u16; + const SPACE: u16 = ' ' as u16; + let mut ret_val = Vec::new(); + if lp_cmd_line.is_null() || *lp_cmd_line == 0 { + ret_val.push(exe_name()); + return ret_val; + } + let mut cmd_line = { + let mut end = 0; + while *lp_cmd_line.offset(end) != 0 { + end += 1; + } + slice::from_raw_parts(lp_cmd_line, end as usize) + }; + // The executable name at the beginning is special. + cmd_line = match cmd_line[0] { + // The executable name ends at the next quote mark, + // no matter what. + QUOTE => { + let args = { + let mut cut = cmd_line[1..].splitn(2, |&c| c == QUOTE); + if let Some(exe) = cut.next() { + ret_val.push(OsString::from_wide(exe)); + } + cut.next() + }; + if let Some(args) = args { + args + } else { + return ret_val; + } + } + // Implement quirk: when they say whitespace here, + // they include the entire ASCII control plane: + // "However, if lpCmdLine starts with any amount of whitespace, CommandLineToArgvW + // will consider the first argument to be an empty string. Excess whitespace at the + // end of lpCmdLine is ignored." + 0..=SPACE => { + ret_val.push(OsString::new()); + &cmd_line[1..] + } + // The executable name ends at the next whitespace, + // no matter what. + _ => { + let args = { + let mut cut = cmd_line.splitn(2, |&c| c > 0 && c <= SPACE); + if let Some(exe) = cut.next() { + ret_val.push(OsString::from_wide(exe)); + } + cut.next() + }; + if let Some(args) = args { + args + } else { + return ret_val; + } + } + }; + let mut cur = Vec::new(); + let mut in_quotes = false; + let mut was_in_quotes = false; + let mut backslash_count: usize = 0; + for &c in cmd_line { + match c { + // backslash + BACKSLASH => { + backslash_count += 1; + was_in_quotes = false; + } + QUOTE if backslash_count % 2 == 0 => { + cur.extend(iter::repeat(b'\\' as u16).take(backslash_count / 2)); + backslash_count = 0; + if was_in_quotes { + cur.push('"' as u16); + was_in_quotes = false; + } else { + was_in_quotes = in_quotes; + in_quotes = !in_quotes; + } + } + QUOTE if backslash_count % 2 != 0 => { + cur.extend(iter::repeat(b'\\' as u16).take(backslash_count / 2)); + backslash_count = 0; + was_in_quotes = false; + cur.push(b'"' as u16); + } + SPACE | TAB if !in_quotes => { + cur.extend(iter::repeat(b'\\' as u16).take(backslash_count)); + if !cur.is_empty() || was_in_quotes { + ret_val.push(OsString::from_wide(&cur[..])); + cur.truncate(0); + } + backslash_count = 0; + was_in_quotes = false; + } + _ => { + cur.extend(iter::repeat(b'\\' as u16).take(backslash_count)); + backslash_count = 0; + was_in_quotes = false; + cur.push(c); + } + } + } + cur.extend(iter::repeat(b'\\' as u16).take(backslash_count)); + // include empty quoted strings at the end of the arguments list + if !cur.is_empty() || was_in_quotes || in_quotes { + ret_val.push(OsString::from_wide(&cur[..])); + } + ret_val +} + +pub struct Args { + parsed_args_list: vec::IntoIter<OsString>, +} + +pub struct ArgsInnerDebug<'a> { + args: &'a Args, +} + +impl<'a> fmt::Debug for ArgsInnerDebug<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.args.parsed_args_list.as_slice().fmt(f) + } +} + +impl Args { + pub fn inner_debug(&self) -> ArgsInnerDebug<'_> { + ArgsInnerDebug { args: self } + } +} + +impl Iterator for Args { + type Item = OsString; + fn next(&mut self) -> Option<OsString> { + self.parsed_args_list.next() + } + fn size_hint(&self) -> (usize, Option<usize>) { + self.parsed_args_list.size_hint() + } +} + +impl DoubleEndedIterator for Args { + fn next_back(&mut self) -> Option<OsString> { + self.parsed_args_list.next_back() + } +} + +impl ExactSizeIterator for Args { + fn len(&self) -> usize { + self.parsed_args_list.len() + } +} + +#[cfg(test)] +mod tests { + use crate::ffi::OsString; + use crate::sys::windows::args::*; + + fn chk(string: &str, parts: &[&str]) { + let mut wide: Vec<u16> = OsString::from(string).encode_wide().collect(); + wide.push(0); + let parsed = unsafe { + parse_lp_cmd_line(wide.as_ptr() as *const u16, || OsString::from("TEST.EXE")) + }; + let expected: Vec<OsString> = parts.iter().map(|k| OsString::from(k)).collect(); + assert_eq!(parsed.as_slice(), expected.as_slice()); + } + + #[test] + fn empty() { + chk("", &["TEST.EXE"]); + chk("\0", &["TEST.EXE"]); + } + + #[test] + fn single_words() { + chk("EXE one_word", &["EXE", "one_word"]); + chk("EXE a", &["EXE", "a"]); + chk("EXE 😅", &["EXE", "😅"]); + chk("EXE 😅🤦", &["EXE", "😅🤦"]); + } + + #[test] + fn official_examples() { + chk(r#"EXE "abc" d e"#, &["EXE", "abc", "d", "e"]); + chk(r#"EXE a\\\b d"e f"g h"#, &["EXE", r#"a\\\b"#, "de fg", "h"]); + chk(r#"EXE a\\\"b c d"#, &["EXE", r#"a\"b"#, "c", "d"]); + chk(r#"EXE a\\\\"b c" d e"#, &["EXE", r#"a\\b c"#, "d", "e"]); + } + + #[test] + fn whitespace_behavior() { + chk(r#" test"#, &["", "test"]); + chk(r#" test"#, &["", "test"]); + chk(r#" test test2"#, &["", "test", "test2"]); + chk(r#" test test2"#, &["", "test", "test2"]); + chk(r#"test test2 "#, &["test", "test2"]); + chk(r#"test test2 "#, &["test", "test2"]); + chk(r#"test "#, &["test"]); + } + + #[test] + fn genius_quotes() { + chk(r#"EXE "" """#, &["EXE", "", ""]); + chk(r#"EXE "" """"#, &["EXE", "", "\""]); + chk( + r#"EXE "this is """all""" in the same argument""#, + &["EXE", "this is \"all\" in the same argument"], + ); + chk(r#"EXE "a"""#, &["EXE", "a\""]); + chk(r#"EXE "a"" a"#, &["EXE", "a\"", "a"]); + // quotes cannot be escaped in command names + chk(r#""EXE" check"#, &["EXE", "check"]); + chk(r#""EXE check""#, &["EXE check"]); + chk(r#""EXE """for""" check"#, &["EXE ", r#"for""#, "check"]); + chk(r#""EXE \"for\" check"#, &[r#"EXE \"#, r#"for""#, "check"]); + } +} diff --git a/library/std/src/sys/windows/c.rs b/library/std/src/sys/windows/c.rs new file mode 100644 index 00000000000..f440442ca30 --- /dev/null +++ b/library/std/src/sys/windows/c.rs @@ -0,0 +1,1098 @@ +//! C definitions used by libnative that don't belong in liblibc + +#![allow(nonstandard_style)] +#![cfg_attr(test, allow(dead_code))] +#![unstable(issue = "none", feature = "windows_c")] + +use crate::os::raw::{c_char, c_int, c_long, c_longlong, c_uint, c_ulong, c_ushort}; +use crate::ptr; + +use libc::{c_void, size_t, wchar_t}; + +pub use self::EXCEPTION_DISPOSITION::*; +pub use self::FILE_INFO_BY_HANDLE_CLASS::*; + +pub type DWORD = c_ulong; +pub type HANDLE = LPVOID; +pub type HINSTANCE = HANDLE; +pub type HMODULE = HINSTANCE; +pub type HRESULT = LONG; +pub type BOOL = c_int; +pub type BYTE = u8; +pub type BOOLEAN = BYTE; +pub type GROUP = c_uint; +pub type LARGE_INTEGER = c_longlong; +pub type LONG = c_long; +pub type UINT = c_uint; +pub type WCHAR = u16; +pub type USHORT = c_ushort; +pub type SIZE_T = usize; +pub type WORD = u16; +pub type CHAR = c_char; +pub type ULONG_PTR = usize; +pub type ULONG = c_ulong; + +pub type LPBOOL = *mut BOOL; +pub type LPBYTE = *mut BYTE; +pub type LPCSTR = *const CHAR; +pub type LPCWSTR = *const WCHAR; +pub type LPDWORD = *mut DWORD; +pub type LPHANDLE = *mut HANDLE; +pub type LPOVERLAPPED = *mut OVERLAPPED; +pub type LPPROCESS_INFORMATION = *mut PROCESS_INFORMATION; +pub type LPSECURITY_ATTRIBUTES = *mut SECURITY_ATTRIBUTES; +pub type LPSTARTUPINFO = *mut STARTUPINFO; +pub type LPVOID = *mut c_void; +pub type LPWCH = *mut WCHAR; +pub type LPWIN32_FIND_DATAW = *mut WIN32_FIND_DATAW; +pub type LPWSADATA = *mut WSADATA; +pub type LPWSAPROTOCOL_INFO = *mut WSAPROTOCOL_INFO; +pub type LPSTR = *mut CHAR; +pub type LPWSTR = *mut WCHAR; +pub type LPFILETIME = *mut FILETIME; +pub type LPWSABUF = *mut WSABUF; +pub type LPWSAOVERLAPPED = *mut c_void; +pub type LPWSAOVERLAPPED_COMPLETION_ROUTINE = *mut c_void; + +pub type PCONDITION_VARIABLE = *mut CONDITION_VARIABLE; +pub type PLARGE_INTEGER = *mut c_longlong; +pub type PSRWLOCK = *mut SRWLOCK; + +pub type SOCKET = crate::os::windows::raw::SOCKET; +pub type socklen_t = c_int; +pub type ADDRESS_FAMILY = USHORT; + +pub const TRUE: BOOL = 1; +pub const FALSE: BOOL = 0; + +pub const FILE_ATTRIBUTE_READONLY: DWORD = 0x1; +pub const FILE_ATTRIBUTE_DIRECTORY: DWORD = 0x10; +pub const FILE_ATTRIBUTE_REPARSE_POINT: DWORD = 0x400; + +pub const FILE_SHARE_DELETE: DWORD = 0x4; +pub const FILE_SHARE_READ: DWORD = 0x1; +pub const FILE_SHARE_WRITE: DWORD = 0x2; + +pub const CREATE_ALWAYS: DWORD = 2; +pub const CREATE_NEW: DWORD = 1; +pub const OPEN_ALWAYS: DWORD = 4; +pub const OPEN_EXISTING: DWORD = 3; +pub const TRUNCATE_EXISTING: DWORD = 5; + +pub const FILE_WRITE_DATA: DWORD = 0x00000002; +pub const FILE_APPEND_DATA: DWORD = 0x00000004; +pub const FILE_WRITE_EA: DWORD = 0x00000010; +pub const FILE_WRITE_ATTRIBUTES: DWORD = 0x00000100; +pub const READ_CONTROL: DWORD = 0x00020000; +pub const SYNCHRONIZE: DWORD = 0x00100000; +pub const GENERIC_READ: DWORD = 0x80000000; +pub const GENERIC_WRITE: DWORD = 0x40000000; +pub const STANDARD_RIGHTS_WRITE: DWORD = READ_CONTROL; +pub const FILE_GENERIC_WRITE: DWORD = STANDARD_RIGHTS_WRITE + | FILE_WRITE_DATA + | FILE_WRITE_ATTRIBUTES + | FILE_WRITE_EA + | FILE_APPEND_DATA + | SYNCHRONIZE; + +pub const FILE_FLAG_OPEN_REPARSE_POINT: DWORD = 0x00200000; +pub const FILE_FLAG_BACKUP_SEMANTICS: DWORD = 0x02000000; +pub const SECURITY_SQOS_PRESENT: DWORD = 0x00100000; + +pub const FIONBIO: c_ulong = 0x8004667e; + +#[repr(C)] +#[derive(Copy)] +pub struct WIN32_FIND_DATAW { + pub dwFileAttributes: DWORD, + pub ftCreationTime: FILETIME, + pub ftLastAccessTime: FILETIME, + pub ftLastWriteTime: FILETIME, + pub nFileSizeHigh: DWORD, + pub nFileSizeLow: DWORD, + pub dwReserved0: DWORD, + pub dwReserved1: DWORD, + pub cFileName: [wchar_t; 260], // #define MAX_PATH 260 + pub cAlternateFileName: [wchar_t; 14], +} +impl Clone for WIN32_FIND_DATAW { + fn clone(&self) -> Self { + *self + } +} + +pub const WSA_FLAG_OVERLAPPED: DWORD = 0x01; +pub const WSA_FLAG_NO_HANDLE_INHERIT: DWORD = 0x80; + +pub const WSADESCRIPTION_LEN: usize = 256; +pub const WSASYS_STATUS_LEN: usize = 128; +pub const WSAPROTOCOL_LEN: DWORD = 255; +pub const INVALID_SOCKET: SOCKET = !0; + +pub const WSAEACCES: c_int = 10013; +pub const WSAEINVAL: c_int = 10022; +pub const WSAEWOULDBLOCK: c_int = 10035; +pub const WSAEPROTOTYPE: c_int = 10041; +pub const WSAEADDRINUSE: c_int = 10048; +pub const WSAEADDRNOTAVAIL: c_int = 10049; +pub const WSAECONNABORTED: c_int = 10053; +pub const WSAECONNRESET: c_int = 10054; +pub const WSAENOTCONN: c_int = 10057; +pub const WSAESHUTDOWN: c_int = 10058; +pub const WSAETIMEDOUT: c_int = 10060; +pub const WSAECONNREFUSED: c_int = 10061; + +pub const MAX_PROTOCOL_CHAIN: DWORD = 7; + +pub const MAXIMUM_REPARSE_DATA_BUFFER_SIZE: usize = 16 * 1024; +pub const FSCTL_GET_REPARSE_POINT: DWORD = 0x900a8; +pub const IO_REPARSE_TAG_SYMLINK: DWORD = 0xa000000c; +pub const IO_REPARSE_TAG_MOUNT_POINT: DWORD = 0xa0000003; +pub const SYMLINK_FLAG_RELATIVE: DWORD = 0x00000001; +pub const FSCTL_SET_REPARSE_POINT: DWORD = 0x900a4; + +pub const SYMBOLIC_LINK_FLAG_DIRECTORY: DWORD = 0x1; +pub const SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE: DWORD = 0x2; + +// Note that these are not actually HANDLEs, just values to pass to GetStdHandle +pub const STD_INPUT_HANDLE: DWORD = -10i32 as DWORD; +pub const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD; +pub const STD_ERROR_HANDLE: DWORD = -12i32 as DWORD; + +pub const PROGRESS_CONTINUE: DWORD = 0; + +// List of Windows system error codes with descriptions: +// https://docs.microsoft.com/en-us/windows/win32/debug/system-error-codes#system-error-codes +pub const ERROR_FILE_NOT_FOUND: DWORD = 2; +pub const ERROR_PATH_NOT_FOUND: DWORD = 3; +pub const ERROR_ACCESS_DENIED: DWORD = 5; +pub const ERROR_INVALID_HANDLE: DWORD = 6; +pub const ERROR_NO_MORE_FILES: DWORD = 18; +pub const ERROR_HANDLE_EOF: DWORD = 38; +pub const ERROR_FILE_EXISTS: DWORD = 80; +pub const ERROR_INVALID_PARAMETER: DWORD = 87; +pub const ERROR_BROKEN_PIPE: DWORD = 109; +pub const ERROR_CALL_NOT_IMPLEMENTED: DWORD = 120; +pub const ERROR_SEM_TIMEOUT: DWORD = 121; +pub const ERROR_INSUFFICIENT_BUFFER: DWORD = 122; +pub const ERROR_ALREADY_EXISTS: DWORD = 183; +pub const ERROR_ENVVAR_NOT_FOUND: DWORD = 203; +pub const ERROR_NO_DATA: DWORD = 232; +pub const ERROR_DRIVER_CANCEL_TIMEOUT: DWORD = 594; +pub const ERROR_OPERATION_ABORTED: DWORD = 995; +pub const ERROR_IO_PENDING: DWORD = 997; +pub const ERROR_SERVICE_REQUEST_TIMEOUT: DWORD = 1053; +pub const ERROR_COUNTER_TIMEOUT: DWORD = 1121; +pub const ERROR_TIMEOUT: DWORD = 1460; +pub const ERROR_RESOURCE_CALL_TIMED_OUT: DWORD = 5910; +pub const ERROR_CTX_MODEM_RESPONSE_TIMEOUT: DWORD = 7012; +pub const ERROR_CTX_CLIENT_QUERY_TIMEOUT: DWORD = 7040; +pub const FRS_ERR_SYSVOL_POPULATE_TIMEOUT: DWORD = 8014; +pub const ERROR_DS_TIMELIMIT_EXCEEDED: DWORD = 8226; +pub const DNS_ERROR_RECORD_TIMED_OUT: DWORD = 9705; +pub const ERROR_IPSEC_IKE_TIMED_OUT: DWORD = 13805; +pub const ERROR_RUNLEVEL_SWITCH_TIMEOUT: DWORD = 15402; +pub const ERROR_RUNLEVEL_SWITCH_AGENT_TIMEOUT: DWORD = 15403; + +pub const E_NOTIMPL: HRESULT = 0x80004001u32 as HRESULT; + +pub const INVALID_HANDLE_VALUE: HANDLE = !0 as HANDLE; + +pub const FACILITY_NT_BIT: DWORD = 0x1000_0000; + +pub const FORMAT_MESSAGE_FROM_SYSTEM: DWORD = 0x00001000; +pub const FORMAT_MESSAGE_FROM_HMODULE: DWORD = 0x00000800; +pub const FORMAT_MESSAGE_IGNORE_INSERTS: DWORD = 0x00000200; + +pub const TLS_OUT_OF_INDEXES: DWORD = 0xFFFFFFFF; + +pub const DLL_THREAD_DETACH: DWORD = 3; +pub const DLL_PROCESS_DETACH: DWORD = 0; + +pub const INFINITE: DWORD = !0; + +pub const DUPLICATE_SAME_ACCESS: DWORD = 0x00000002; + +pub const CONDITION_VARIABLE_INIT: CONDITION_VARIABLE = CONDITION_VARIABLE { ptr: ptr::null_mut() }; +pub const SRWLOCK_INIT: SRWLOCK = SRWLOCK { ptr: ptr::null_mut() }; + +pub const DETACHED_PROCESS: DWORD = 0x00000008; +pub const CREATE_NEW_PROCESS_GROUP: DWORD = 0x00000200; +pub const CREATE_UNICODE_ENVIRONMENT: DWORD = 0x00000400; +pub const STARTF_USESTDHANDLES: DWORD = 0x00000100; + +pub const AF_INET: c_int = 2; +pub const AF_INET6: c_int = 23; +pub const SD_BOTH: c_int = 2; +pub const SD_RECEIVE: c_int = 0; +pub const SD_SEND: c_int = 1; +pub const SOCK_DGRAM: c_int = 2; +pub const SOCK_STREAM: c_int = 1; +pub const SOL_SOCKET: c_int = 0xffff; +pub const SO_RCVTIMEO: c_int = 0x1006; +pub const SO_SNDTIMEO: c_int = 0x1005; +pub const IPPROTO_IP: c_int = 0; +pub const IPPROTO_TCP: c_int = 6; +pub const IPPROTO_IPV6: c_int = 41; +pub const TCP_NODELAY: c_int = 0x0001; +pub const IP_TTL: c_int = 4; +pub const IPV6_V6ONLY: c_int = 27; +pub const SO_ERROR: c_int = 0x1007; +pub const SO_BROADCAST: c_int = 0x0020; +pub const IP_MULTICAST_LOOP: c_int = 11; +pub const IPV6_MULTICAST_LOOP: c_int = 11; +pub const IP_MULTICAST_TTL: c_int = 10; +pub const IP_ADD_MEMBERSHIP: c_int = 12; +pub const IP_DROP_MEMBERSHIP: c_int = 13; +pub const IPV6_ADD_MEMBERSHIP: c_int = 12; +pub const IPV6_DROP_MEMBERSHIP: c_int = 13; +pub const MSG_PEEK: c_int = 0x2; + +#[repr(C)] +pub struct ip_mreq { + pub imr_multiaddr: in_addr, + pub imr_interface: in_addr, +} + +#[repr(C)] +pub struct ipv6_mreq { + pub ipv6mr_multiaddr: in6_addr, + pub ipv6mr_interface: c_uint, +} + +pub const VOLUME_NAME_DOS: DWORD = 0x0; +pub const MOVEFILE_REPLACE_EXISTING: DWORD = 1; + +pub const FILE_BEGIN: DWORD = 0; +pub const FILE_CURRENT: DWORD = 1; +pub const FILE_END: DWORD = 2; + +pub const WAIT_OBJECT_0: DWORD = 0x00000000; +pub const WAIT_TIMEOUT: DWORD = 258; +pub const WAIT_FAILED: DWORD = 0xFFFFFFFF; + +pub const PIPE_ACCESS_INBOUND: DWORD = 0x00000001; +pub const PIPE_ACCESS_OUTBOUND: DWORD = 0x00000002; +pub const FILE_FLAG_FIRST_PIPE_INSTANCE: DWORD = 0x00080000; +pub const FILE_FLAG_OVERLAPPED: DWORD = 0x40000000; +pub const PIPE_WAIT: DWORD = 0x00000000; +pub const PIPE_TYPE_BYTE: DWORD = 0x00000000; +pub const PIPE_REJECT_REMOTE_CLIENTS: DWORD = 0x00000008; +pub const PIPE_READMODE_BYTE: DWORD = 0x00000000; + +pub const FD_SETSIZE: usize = 64; + +pub const STACK_SIZE_PARAM_IS_A_RESERVATION: DWORD = 0x00010000; + +pub const HEAP_ZERO_MEMORY: DWORD = 0x00000008; + +#[repr(C)] +#[cfg(not(target_pointer_width = "64"))] +pub struct WSADATA { + pub wVersion: WORD, + pub wHighVersion: WORD, + pub szDescription: [u8; WSADESCRIPTION_LEN + 1], + pub szSystemStatus: [u8; WSASYS_STATUS_LEN + 1], + pub iMaxSockets: u16, + pub iMaxUdpDg: u16, + pub lpVendorInfo: *mut u8, +} +#[repr(C)] +#[cfg(target_pointer_width = "64")] +pub struct WSADATA { + pub wVersion: WORD, + pub wHighVersion: WORD, + pub iMaxSockets: u16, + pub iMaxUdpDg: u16, + pub lpVendorInfo: *mut u8, + pub szDescription: [u8; WSADESCRIPTION_LEN + 1], + pub szSystemStatus: [u8; WSASYS_STATUS_LEN + 1], +} + +#[derive(Copy, Clone)] +#[repr(C)] +pub struct WSABUF { + pub len: ULONG, + pub buf: *mut CHAR, +} + +#[repr(C)] +pub struct WSAPROTOCOL_INFO { + pub dwServiceFlags1: DWORD, + pub dwServiceFlags2: DWORD, + pub dwServiceFlags3: DWORD, + pub dwServiceFlags4: DWORD, + pub dwProviderFlags: DWORD, + pub ProviderId: GUID, + pub dwCatalogEntryId: DWORD, + pub ProtocolChain: WSAPROTOCOLCHAIN, + pub iVersion: c_int, + pub iAddressFamily: c_int, + pub iMaxSockAddr: c_int, + pub iMinSockAddr: c_int, + pub iSocketType: c_int, + pub iProtocol: c_int, + pub iProtocolMaxOffset: c_int, + pub iNetworkByteOrder: c_int, + pub iSecurityScheme: c_int, + pub dwMessageSize: DWORD, + pub dwProviderReserved: DWORD, + pub szProtocol: [u16; (WSAPROTOCOL_LEN as usize) + 1], +} + +#[repr(C)] +#[derive(Copy, Clone)] +pub struct WIN32_FILE_ATTRIBUTE_DATA { + pub dwFileAttributes: DWORD, + pub ftCreationTime: FILETIME, + pub ftLastAccessTime: FILETIME, + pub ftLastWriteTime: FILETIME, + pub nFileSizeHigh: DWORD, + pub nFileSizeLow: DWORD, +} + +#[repr(C)] +#[allow(dead_code)] // we only use some variants +pub enum FILE_INFO_BY_HANDLE_CLASS { + FileBasicInfo = 0, + FileStandardInfo = 1, + FileNameInfo = 2, + FileRenameInfo = 3, + FileDispositionInfo = 4, + FileAllocationInfo = 5, + FileEndOfFileInfo = 6, + FileStreamInfo = 7, + FileCompressionInfo = 8, + FileAttributeTagInfo = 9, + FileIdBothDirectoryInfo = 10, // 0xA + FileIdBothDirectoryRestartInfo = 11, // 0xB + FileIoPriorityHintInfo = 12, // 0xC + FileRemoteProtocolInfo = 13, // 0xD + FileFullDirectoryInfo = 14, // 0xE + FileFullDirectoryRestartInfo = 15, // 0xF + FileStorageInfo = 16, // 0x10 + FileAlignmentInfo = 17, // 0x11 + FileIdInfo = 18, // 0x12 + FileIdExtdDirectoryInfo = 19, // 0x13 + FileIdExtdDirectoryRestartInfo = 20, // 0x14 + MaximumFileInfoByHandlesClass, +} + +#[repr(C)] +pub struct FILE_BASIC_INFO { + pub CreationTime: LARGE_INTEGER, + pub LastAccessTime: LARGE_INTEGER, + pub LastWriteTime: LARGE_INTEGER, + pub ChangeTime: LARGE_INTEGER, + pub FileAttributes: DWORD, +} + +#[repr(C)] +pub struct FILE_END_OF_FILE_INFO { + pub EndOfFile: LARGE_INTEGER, +} + +#[repr(C)] +pub struct REPARSE_DATA_BUFFER { + pub ReparseTag: c_uint, + pub ReparseDataLength: c_ushort, + pub Reserved: c_ushort, + pub rest: (), +} + +#[repr(C)] +pub struct SYMBOLIC_LINK_REPARSE_BUFFER { + pub SubstituteNameOffset: c_ushort, + pub SubstituteNameLength: c_ushort, + pub PrintNameOffset: c_ushort, + pub PrintNameLength: c_ushort, + pub Flags: c_ulong, + pub PathBuffer: WCHAR, +} + +#[repr(C)] +pub struct MOUNT_POINT_REPARSE_BUFFER { + pub SubstituteNameOffset: c_ushort, + pub SubstituteNameLength: c_ushort, + pub PrintNameOffset: c_ushort, + pub PrintNameLength: c_ushort, + pub PathBuffer: WCHAR, +} + +pub type LPPROGRESS_ROUTINE = crate::option::Option< + unsafe extern "system" fn( + TotalFileSize: LARGE_INTEGER, + TotalBytesTransferred: LARGE_INTEGER, + StreamSize: LARGE_INTEGER, + StreamBytesTransferred: LARGE_INTEGER, + dwStreamNumber: DWORD, + dwCallbackReason: DWORD, + hSourceFile: HANDLE, + hDestinationFile: HANDLE, + lpData: LPVOID, + ) -> DWORD, +>; + +#[repr(C)] +pub struct CONDITION_VARIABLE { + pub ptr: LPVOID, +} +#[repr(C)] +pub struct SRWLOCK { + pub ptr: LPVOID, +} +#[repr(C)] +pub struct CRITICAL_SECTION { + CriticalSectionDebug: LPVOID, + LockCount: LONG, + RecursionCount: LONG, + OwningThread: HANDLE, + LockSemaphore: HANDLE, + SpinCount: ULONG_PTR, +} + +#[repr(C)] +pub struct REPARSE_MOUNTPOINT_DATA_BUFFER { + pub ReparseTag: DWORD, + pub ReparseDataLength: DWORD, + pub Reserved: WORD, + pub ReparseTargetLength: WORD, + pub ReparseTargetMaximumLength: WORD, + pub Reserved1: WORD, + pub ReparseTarget: WCHAR, +} + +#[repr(C)] +pub struct GUID { + pub Data1: DWORD, + pub Data2: WORD, + pub Data3: WORD, + pub Data4: [BYTE; 8], +} + +#[repr(C)] +pub struct WSAPROTOCOLCHAIN { + pub ChainLen: c_int, + pub ChainEntries: [DWORD; MAX_PROTOCOL_CHAIN as usize], +} + +#[repr(C)] +pub struct SECURITY_ATTRIBUTES { + pub nLength: DWORD, + pub lpSecurityDescriptor: LPVOID, + pub bInheritHandle: BOOL, +} + +#[repr(C)] +pub struct PROCESS_INFORMATION { + pub hProcess: HANDLE, + pub hThread: HANDLE, + pub dwProcessId: DWORD, + pub dwThreadId: DWORD, +} + +#[repr(C)] +pub struct STARTUPINFO { + pub cb: DWORD, + pub lpReserved: LPWSTR, + pub lpDesktop: LPWSTR, + pub lpTitle: LPWSTR, + pub dwX: DWORD, + pub dwY: DWORD, + pub dwXSize: DWORD, + pub dwYSize: DWORD, + pub dwXCountChars: DWORD, + pub dwYCountCharts: DWORD, + pub dwFillAttribute: DWORD, + pub dwFlags: DWORD, + pub wShowWindow: WORD, + pub cbReserved2: WORD, + pub lpReserved2: LPBYTE, + pub hStdInput: HANDLE, + pub hStdOutput: HANDLE, + pub hStdError: HANDLE, +} + +#[repr(C)] +pub struct SOCKADDR { + pub sa_family: ADDRESS_FAMILY, + pub sa_data: [CHAR; 14], +} + +#[repr(C)] +#[derive(Copy, Clone)] +pub struct FILETIME { + pub dwLowDateTime: DWORD, + pub dwHighDateTime: DWORD, +} + +#[repr(C)] +pub struct OVERLAPPED { + pub Internal: *mut c_ulong, + pub InternalHigh: *mut c_ulong, + pub Offset: DWORD, + pub OffsetHigh: DWORD, + pub hEvent: HANDLE, +} + +#[repr(C)] +#[allow(dead_code)] // we only use some variants +pub enum ADDRESS_MODE { + AddrMode1616, + AddrMode1632, + AddrModeReal, + AddrModeFlat, +} + +#[repr(C)] +pub struct SOCKADDR_STORAGE_LH { + pub ss_family: ADDRESS_FAMILY, + pub __ss_pad1: [CHAR; 6], + pub __ss_align: i64, + pub __ss_pad2: [CHAR; 112], +} + +#[repr(C)] +pub struct ADDRINFOA { + pub ai_flags: c_int, + pub ai_family: c_int, + pub ai_socktype: c_int, + pub ai_protocol: c_int, + pub ai_addrlen: size_t, + pub ai_canonname: *mut c_char, + pub ai_addr: *mut SOCKADDR, + pub ai_next: *mut ADDRINFOA, +} + +#[repr(C)] +#[derive(Copy, Clone)] +pub struct sockaddr_in { + pub sin_family: ADDRESS_FAMILY, + pub sin_port: USHORT, + pub sin_addr: in_addr, + pub sin_zero: [CHAR; 8], +} + +#[repr(C)] +#[derive(Copy, Clone)] +pub struct sockaddr_in6 { + pub sin6_family: ADDRESS_FAMILY, + pub sin6_port: USHORT, + pub sin6_flowinfo: c_ulong, + pub sin6_addr: in6_addr, + pub sin6_scope_id: c_ulong, +} + +#[repr(C)] +#[derive(Copy, Clone)] +pub struct in_addr { + pub s_addr: u32, +} + +#[repr(C)] +#[derive(Copy, Clone)] +pub struct in6_addr { + pub s6_addr: [u8; 16], +} + +#[repr(C)] +#[derive(Copy, Clone)] +#[allow(dead_code)] // we only use some variants +pub enum EXCEPTION_DISPOSITION { + ExceptionContinueExecution, + ExceptionContinueSearch, + ExceptionNestedException, + ExceptionCollidedUnwind, +} + +#[repr(C)] +#[derive(Copy)] +pub struct fd_set { + pub fd_count: c_uint, + pub fd_array: [SOCKET; FD_SETSIZE], +} + +impl Clone for fd_set { + fn clone(&self) -> fd_set { + *self + } +} + +#[repr(C)] +#[derive(Copy, Clone)] +pub struct timeval { + pub tv_sec: c_long, + pub tv_usec: c_long, +} + +// Functions forbidden when targeting UWP +cfg_if::cfg_if! { +if #[cfg(not(target_vendor = "uwp"))] { + pub const EXCEPTION_CONTINUE_SEARCH: LONG = 0; + pub const EXCEPTION_STACK_OVERFLOW: DWORD = 0xc00000fd; + pub const EXCEPTION_MAXIMUM_PARAMETERS: usize = 15; + + #[repr(C)] + pub struct EXCEPTION_RECORD { + pub ExceptionCode: DWORD, + pub ExceptionFlags: DWORD, + pub ExceptionRecord: *mut EXCEPTION_RECORD, + pub ExceptionAddress: LPVOID, + pub NumberParameters: DWORD, + pub ExceptionInformation: [LPVOID; EXCEPTION_MAXIMUM_PARAMETERS] + } + + pub enum CONTEXT {} + + #[repr(C)] + pub struct EXCEPTION_POINTERS { + pub ExceptionRecord: *mut EXCEPTION_RECORD, + pub ContextRecord: *mut CONTEXT, + } + + pub type PVECTORED_EXCEPTION_HANDLER = extern "system" + fn(ExceptionInfo: *mut EXCEPTION_POINTERS) -> LONG; + + #[repr(C)] + #[derive(Copy, Clone)] + pub struct CONSOLE_READCONSOLE_CONTROL { + pub nLength: ULONG, + pub nInitialChars: ULONG, + pub dwCtrlWakeupMask: ULONG, + pub dwControlKeyState: ULONG, + } + + pub type PCONSOLE_READCONSOLE_CONTROL = *mut CONSOLE_READCONSOLE_CONTROL; + + #[repr(C)] + pub struct BY_HANDLE_FILE_INFORMATION { + pub dwFileAttributes: DWORD, + pub ftCreationTime: FILETIME, + pub ftLastAccessTime: FILETIME, + pub ftLastWriteTime: FILETIME, + pub dwVolumeSerialNumber: DWORD, + pub nFileSizeHigh: DWORD, + pub nFileSizeLow: DWORD, + pub nNumberOfLinks: DWORD, + pub nFileIndexHigh: DWORD, + pub nFileIndexLow: DWORD, + } + + pub type LPBY_HANDLE_FILE_INFORMATION = *mut BY_HANDLE_FILE_INFORMATION; + pub type LPCVOID = *const c_void; + + pub const HANDLE_FLAG_INHERIT: DWORD = 0x00000001; + + pub const TOKEN_READ: DWORD = 0x20008; + + extern "system" { + #[link_name = "SystemFunction036"] + pub fn RtlGenRandom(RandomBuffer: *mut u8, RandomBufferLength: ULONG) -> BOOLEAN; + + pub fn ReadConsoleW(hConsoleInput: HANDLE, + lpBuffer: LPVOID, + nNumberOfCharsToRead: DWORD, + lpNumberOfCharsRead: LPDWORD, + pInputControl: PCONSOLE_READCONSOLE_CONTROL) -> BOOL; + + pub fn WriteConsoleW(hConsoleOutput: HANDLE, + lpBuffer: LPCVOID, + nNumberOfCharsToWrite: DWORD, + lpNumberOfCharsWritten: LPDWORD, + lpReserved: LPVOID) -> BOOL; + + pub fn GetConsoleMode(hConsoleHandle: HANDLE, + lpMode: LPDWORD) -> BOOL; + // Allowed but unused by UWP + pub fn OpenProcessToken(ProcessHandle: HANDLE, + DesiredAccess: DWORD, + TokenHandle: *mut HANDLE) -> BOOL; + pub fn GetUserProfileDirectoryW(hToken: HANDLE, + lpProfileDir: LPWSTR, + lpcchSize: *mut DWORD) -> BOOL; + pub fn GetFileInformationByHandle(hFile: HANDLE, + lpFileInformation: LPBY_HANDLE_FILE_INFORMATION) + -> BOOL; + pub fn SetHandleInformation(hObject: HANDLE, + dwMask: DWORD, + dwFlags: DWORD) -> BOOL; + pub fn AddVectoredExceptionHandler(FirstHandler: ULONG, + VectoredHandler: PVECTORED_EXCEPTION_HANDLER) + -> LPVOID; + pub fn CreateHardLinkW(lpSymlinkFileName: LPCWSTR, + lpTargetFileName: LPCWSTR, + lpSecurityAttributes: LPSECURITY_ATTRIBUTES) + -> BOOL; + } +} +} + +// UWP specific functions & types +cfg_if::cfg_if! { +if #[cfg(target_vendor = "uwp")] { + pub const BCRYPT_USE_SYSTEM_PREFERRED_RNG: DWORD = 0x00000002; + + #[repr(C)] + pub struct FILE_STANDARD_INFO { + pub AllocationSize: LARGE_INTEGER, + pub EndOfFile: LARGE_INTEGER, + pub NumberOfLinks: DWORD, + pub DeletePending: BOOLEAN, + pub Directory: BOOLEAN, + } + + extern "system" { + pub fn GetFileInformationByHandleEx(hFile: HANDLE, + fileInfoClass: FILE_INFO_BY_HANDLE_CLASS, + lpFileInformation: LPVOID, + dwBufferSize: DWORD) -> BOOL; + pub fn BCryptGenRandom(hAlgorithm: LPVOID, pBuffer: *mut u8, + cbBuffer: ULONG, dwFlags: ULONG) -> LONG; + } +} +} + +// Shared between Desktop & UWP +extern "system" { + pub fn WSAStartup(wVersionRequested: WORD, lpWSAData: LPWSADATA) -> c_int; + pub fn WSACleanup() -> c_int; + pub fn WSAGetLastError() -> c_int; + pub fn WSADuplicateSocketW( + s: SOCKET, + dwProcessId: DWORD, + lpProtocolInfo: LPWSAPROTOCOL_INFO, + ) -> c_int; + pub fn WSASend( + s: SOCKET, + lpBuffers: LPWSABUF, + dwBufferCount: DWORD, + lpNumberOfBytesSent: LPDWORD, + dwFlags: DWORD, + lpOverlapped: LPWSAOVERLAPPED, + lpCompletionRoutine: LPWSAOVERLAPPED_COMPLETION_ROUTINE, + ) -> c_int; + pub fn WSARecv( + s: SOCKET, + lpBuffers: LPWSABUF, + dwBufferCount: DWORD, + lpNumberOfBytesRecvd: LPDWORD, + lpFlags: LPDWORD, + lpOverlapped: LPWSAOVERLAPPED, + lpCompletionRoutine: LPWSAOVERLAPPED_COMPLETION_ROUTINE, + ) -> c_int; + pub fn GetCurrentProcessId() -> DWORD; + pub fn WSASocketW( + af: c_int, + kind: c_int, + protocol: c_int, + lpProtocolInfo: LPWSAPROTOCOL_INFO, + g: GROUP, + dwFlags: DWORD, + ) -> SOCKET; + pub fn ioctlsocket(s: SOCKET, cmd: c_long, argp: *mut c_ulong) -> c_int; + pub fn InitializeCriticalSection(CriticalSection: *mut CRITICAL_SECTION); + pub fn EnterCriticalSection(CriticalSection: *mut CRITICAL_SECTION); + pub fn TryEnterCriticalSection(CriticalSection: *mut CRITICAL_SECTION) -> BOOL; + pub fn LeaveCriticalSection(CriticalSection: *mut CRITICAL_SECTION); + pub fn DeleteCriticalSection(CriticalSection: *mut CRITICAL_SECTION); + + pub fn RemoveDirectoryW(lpPathName: LPCWSTR) -> BOOL; + pub fn SetFileAttributesW(lpFileName: LPCWSTR, dwFileAttributes: DWORD) -> BOOL; + pub fn SetLastError(dwErrCode: DWORD); + pub fn GetCommandLineW() -> *mut LPCWSTR; + pub fn GetTempPathW(nBufferLength: DWORD, lpBuffer: LPCWSTR) -> DWORD; + pub fn GetCurrentProcess() -> HANDLE; + pub fn GetCurrentThread() -> HANDLE; + pub fn GetStdHandle(which: DWORD) -> HANDLE; + pub fn ExitProcess(uExitCode: c_uint) -> !; + pub fn DeviceIoControl( + hDevice: HANDLE, + dwIoControlCode: DWORD, + lpInBuffer: LPVOID, + nInBufferSize: DWORD, + lpOutBuffer: LPVOID, + nOutBufferSize: DWORD, + lpBytesReturned: LPDWORD, + lpOverlapped: LPOVERLAPPED, + ) -> BOOL; + pub fn CreateThread( + lpThreadAttributes: LPSECURITY_ATTRIBUTES, + dwStackSize: SIZE_T, + lpStartAddress: extern "system" fn(*mut c_void) -> DWORD, + lpParameter: LPVOID, + dwCreationFlags: DWORD, + lpThreadId: LPDWORD, + ) -> HANDLE; + pub fn WaitForSingleObject(hHandle: HANDLE, dwMilliseconds: DWORD) -> DWORD; + pub fn SwitchToThread() -> BOOL; + pub fn Sleep(dwMilliseconds: DWORD); + pub fn GetProcessId(handle: HANDLE) -> DWORD; + pub fn CopyFileExW( + lpExistingFileName: LPCWSTR, + lpNewFileName: LPCWSTR, + lpProgressRoutine: LPPROGRESS_ROUTINE, + lpData: LPVOID, + pbCancel: LPBOOL, + dwCopyFlags: DWORD, + ) -> BOOL; + pub fn FormatMessageW( + flags: DWORD, + lpSrc: LPVOID, + msgId: DWORD, + langId: DWORD, + buf: LPWSTR, + nsize: DWORD, + args: *const c_void, + ) -> DWORD; + pub fn TlsAlloc() -> DWORD; + pub fn TlsGetValue(dwTlsIndex: DWORD) -> LPVOID; + pub fn TlsSetValue(dwTlsIndex: DWORD, lpTlsvalue: LPVOID) -> BOOL; + pub fn GetLastError() -> DWORD; + pub fn QueryPerformanceFrequency(lpFrequency: *mut LARGE_INTEGER) -> BOOL; + pub fn QueryPerformanceCounter(lpPerformanceCount: *mut LARGE_INTEGER) -> BOOL; + pub fn GetExitCodeProcess(hProcess: HANDLE, lpExitCode: LPDWORD) -> BOOL; + pub fn TerminateProcess(hProcess: HANDLE, uExitCode: UINT) -> BOOL; + pub fn CreateProcessW( + lpApplicationName: LPCWSTR, + lpCommandLine: LPWSTR, + lpProcessAttributes: LPSECURITY_ATTRIBUTES, + lpThreadAttributes: LPSECURITY_ATTRIBUTES, + bInheritHandles: BOOL, + dwCreationFlags: DWORD, + lpEnvironment: LPVOID, + lpCurrentDirectory: LPCWSTR, + lpStartupInfo: LPSTARTUPINFO, + lpProcessInformation: LPPROCESS_INFORMATION, + ) -> BOOL; + pub fn GetEnvironmentVariableW(n: LPCWSTR, v: LPWSTR, nsize: DWORD) -> DWORD; + pub fn SetEnvironmentVariableW(n: LPCWSTR, v: LPCWSTR) -> BOOL; + pub fn GetEnvironmentStringsW() -> LPWCH; + pub fn FreeEnvironmentStringsW(env_ptr: LPWCH) -> BOOL; + pub fn GetModuleFileNameW(hModule: HMODULE, lpFilename: LPWSTR, nSize: DWORD) -> DWORD; + pub fn CreateDirectoryW( + lpPathName: LPCWSTR, + lpSecurityAttributes: LPSECURITY_ATTRIBUTES, + ) -> BOOL; + pub fn DeleteFileW(lpPathName: LPCWSTR) -> BOOL; + pub fn GetCurrentDirectoryW(nBufferLength: DWORD, lpBuffer: LPWSTR) -> DWORD; + pub fn SetCurrentDirectoryW(lpPathName: LPCWSTR) -> BOOL; + pub fn WideCharToMultiByte( + CodePage: UINT, + dwFlags: DWORD, + lpWideCharStr: LPCWSTR, + cchWideChar: c_int, + lpMultiByteStr: LPSTR, + cbMultiByte: c_int, + lpDefaultChar: LPCSTR, + lpUsedDefaultChar: LPBOOL, + ) -> c_int; + + pub fn closesocket(socket: SOCKET) -> c_int; + pub fn recv(socket: SOCKET, buf: *mut c_void, len: c_int, flags: c_int) -> c_int; + pub fn send(socket: SOCKET, buf: *const c_void, len: c_int, flags: c_int) -> c_int; + pub fn recvfrom( + socket: SOCKET, + buf: *mut c_void, + len: c_int, + flags: c_int, + addr: *mut SOCKADDR, + addrlen: *mut c_int, + ) -> c_int; + pub fn sendto( + socket: SOCKET, + buf: *const c_void, + len: c_int, + flags: c_int, + addr: *const SOCKADDR, + addrlen: c_int, + ) -> c_int; + pub fn shutdown(socket: SOCKET, how: c_int) -> c_int; + pub fn accept(socket: SOCKET, address: *mut SOCKADDR, address_len: *mut c_int) -> SOCKET; + pub fn DuplicateHandle( + hSourceProcessHandle: HANDLE, + hSourceHandle: HANDLE, + hTargetProcessHandle: HANDLE, + lpTargetHandle: LPHANDLE, + dwDesiredAccess: DWORD, + bInheritHandle: BOOL, + dwOptions: DWORD, + ) -> BOOL; + pub fn ReadFile( + hFile: HANDLE, + lpBuffer: LPVOID, + nNumberOfBytesToRead: DWORD, + lpNumberOfBytesRead: LPDWORD, + lpOverlapped: LPOVERLAPPED, + ) -> BOOL; + pub fn WriteFile( + hFile: HANDLE, + lpBuffer: LPVOID, + nNumberOfBytesToWrite: DWORD, + lpNumberOfBytesWritten: LPDWORD, + lpOverlapped: LPOVERLAPPED, + ) -> BOOL; + pub fn CloseHandle(hObject: HANDLE) -> BOOL; + pub fn MoveFileExW(lpExistingFileName: LPCWSTR, lpNewFileName: LPCWSTR, dwFlags: DWORD) + -> BOOL; + pub fn SetFilePointerEx( + hFile: HANDLE, + liDistanceToMove: LARGE_INTEGER, + lpNewFilePointer: PLARGE_INTEGER, + dwMoveMethod: DWORD, + ) -> BOOL; + pub fn FlushFileBuffers(hFile: HANDLE) -> BOOL; + pub fn CreateFileW( + lpFileName: LPCWSTR, + dwDesiredAccess: DWORD, + dwShareMode: DWORD, + lpSecurityAttributes: LPSECURITY_ATTRIBUTES, + dwCreationDisposition: DWORD, + dwFlagsAndAttributes: DWORD, + hTemplateFile: HANDLE, + ) -> HANDLE; + + pub fn FindFirstFileW(fileName: LPCWSTR, findFileData: LPWIN32_FIND_DATAW) -> HANDLE; + pub fn FindNextFileW(findFile: HANDLE, findFileData: LPWIN32_FIND_DATAW) -> BOOL; + pub fn FindClose(findFile: HANDLE) -> BOOL; + pub fn getsockopt( + s: SOCKET, + level: c_int, + optname: c_int, + optval: *mut c_char, + optlen: *mut c_int, + ) -> c_int; + pub fn setsockopt( + s: SOCKET, + level: c_int, + optname: c_int, + optval: *const c_void, + optlen: c_int, + ) -> c_int; + pub fn getsockname(socket: SOCKET, address: *mut SOCKADDR, address_len: *mut c_int) -> c_int; + pub fn getpeername(socket: SOCKET, address: *mut SOCKADDR, address_len: *mut c_int) -> c_int; + pub fn bind(socket: SOCKET, address: *const SOCKADDR, address_len: socklen_t) -> c_int; + pub fn listen(socket: SOCKET, backlog: c_int) -> c_int; + pub fn connect(socket: SOCKET, address: *const SOCKADDR, len: c_int) -> c_int; + pub fn getaddrinfo( + node: *const c_char, + service: *const c_char, + hints: *const ADDRINFOA, + res: *mut *mut ADDRINFOA, + ) -> c_int; + pub fn freeaddrinfo(res: *mut ADDRINFOA); + + pub fn GetProcAddress(handle: HMODULE, name: LPCSTR) -> *mut c_void; + pub fn GetModuleHandleW(lpModuleName: LPCWSTR) -> HMODULE; + + pub fn GetSystemTimeAsFileTime(lpSystemTimeAsFileTime: LPFILETIME); + + pub fn CreateEventW( + lpEventAttributes: LPSECURITY_ATTRIBUTES, + bManualReset: BOOL, + bInitialState: BOOL, + lpName: LPCWSTR, + ) -> HANDLE; + pub fn WaitForMultipleObjects( + nCount: DWORD, + lpHandles: *const HANDLE, + bWaitAll: BOOL, + dwMilliseconds: DWORD, + ) -> DWORD; + pub fn CreateNamedPipeW( + lpName: LPCWSTR, + dwOpenMode: DWORD, + dwPipeMode: DWORD, + nMaxInstances: DWORD, + nOutBufferSize: DWORD, + nInBufferSize: DWORD, + nDefaultTimeOut: DWORD, + lpSecurityAttributes: LPSECURITY_ATTRIBUTES, + ) -> HANDLE; + pub fn CancelIo(handle: HANDLE) -> BOOL; + pub fn GetOverlappedResult( + hFile: HANDLE, + lpOverlapped: LPOVERLAPPED, + lpNumberOfBytesTransferred: LPDWORD, + bWait: BOOL, + ) -> BOOL; + pub fn select( + nfds: c_int, + readfds: *mut fd_set, + writefds: *mut fd_set, + exceptfds: *mut fd_set, + timeout: *const timeval, + ) -> c_int; + + pub fn GetProcessHeap() -> HANDLE; + pub fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) -> LPVOID; + pub fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID, dwBytes: SIZE_T) -> LPVOID; + pub fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID) -> BOOL; +} + +// Functions that aren't available on every version of Windows that we support, +// but we still use them and just provide some form of a fallback implementation. +compat_fn! { + kernel32: + + pub fn CreateSymbolicLinkW(_lpSymlinkFileName: LPCWSTR, + _lpTargetFileName: LPCWSTR, + _dwFlags: DWORD) -> BOOLEAN { + SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); 0 + } + pub fn GetFinalPathNameByHandleW(_hFile: HANDLE, + _lpszFilePath: LPCWSTR, + _cchFilePath: DWORD, + _dwFlags: DWORD) -> DWORD { + SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); 0 + } + #[cfg(not(target_vendor = "uwp"))] + pub fn SetThreadStackGuarantee(_size: *mut c_ulong) -> BOOL { + SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); 0 + } + pub fn SetThreadDescription(hThread: HANDLE, + lpThreadDescription: LPCWSTR) -> HRESULT { + SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); E_NOTIMPL + } + pub fn SetFileInformationByHandle(_hFile: HANDLE, + _FileInformationClass: FILE_INFO_BY_HANDLE_CLASS, + _lpFileInformation: LPVOID, + _dwBufferSize: DWORD) -> BOOL { + SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); 0 + } + pub fn GetSystemTimePreciseAsFileTime(lpSystemTimeAsFileTime: LPFILETIME) + -> () { + GetSystemTimeAsFileTime(lpSystemTimeAsFileTime) + } + pub fn SleepConditionVariableSRW(ConditionVariable: PCONDITION_VARIABLE, + SRWLock: PSRWLOCK, + dwMilliseconds: DWORD, + Flags: ULONG) -> BOOL { + panic!("condition variables not available") + } + pub fn WakeConditionVariable(ConditionVariable: PCONDITION_VARIABLE) + -> () { + panic!("condition variables not available") + } + pub fn WakeAllConditionVariable(ConditionVariable: PCONDITION_VARIABLE) + -> () { + panic!("condition variables not available") + } + pub fn AcquireSRWLockExclusive(SRWLock: PSRWLOCK) -> () { + panic!("rwlocks not available") + } + pub fn AcquireSRWLockShared(SRWLock: PSRWLOCK) -> () { + panic!("rwlocks not available") + } + pub fn ReleaseSRWLockExclusive(SRWLock: PSRWLOCK) -> () { + panic!("rwlocks not available") + } + pub fn ReleaseSRWLockShared(SRWLock: PSRWLOCK) -> () { + panic!("rwlocks not available") + } + pub fn TryAcquireSRWLockExclusive(SRWLock: PSRWLOCK) -> BOOLEAN { + panic!("rwlocks not available") + } + pub fn TryAcquireSRWLockShared(SRWLock: PSRWLOCK) -> BOOLEAN { + panic!("rwlocks not available") + } +} diff --git a/library/std/src/sys/windows/cmath.rs b/library/std/src/sys/windows/cmath.rs new file mode 100644 index 00000000000..1a5421facd0 --- /dev/null +++ b/library/std/src/sys/windows/cmath.rs @@ -0,0 +1,92 @@ +#![cfg(not(test))] + +use libc::{c_double, c_float}; + +extern "C" { + pub fn acos(n: c_double) -> c_double; + pub fn asin(n: c_double) -> c_double; + pub fn atan(n: c_double) -> c_double; + pub fn atan2(a: c_double, b: c_double) -> c_double; + pub fn cbrt(n: c_double) -> c_double; + pub fn cbrtf(n: c_float) -> c_float; + pub fn cosh(n: c_double) -> c_double; + pub fn expm1(n: c_double) -> c_double; + pub fn expm1f(n: c_float) -> c_float; + pub fn fdim(a: c_double, b: c_double) -> c_double; + pub fn fdimf(a: c_float, b: c_float) -> c_float; + #[cfg_attr(target_env = "msvc", link_name = "_hypot")] + pub fn hypot(x: c_double, y: c_double) -> c_double; + #[cfg_attr(target_env = "msvc", link_name = "_hypotf")] + pub fn hypotf(x: c_float, y: c_float) -> c_float; + pub fn log1p(n: c_double) -> c_double; + pub fn log1pf(n: c_float) -> c_float; + pub fn sinh(n: c_double) -> c_double; + pub fn tan(n: c_double) -> c_double; + pub fn tanh(n: c_double) -> c_double; +} + +pub use self::shims::*; + +#[cfg(not(all(target_env = "msvc", target_arch = "x86")))] +mod shims { + use libc::c_float; + + extern "C" { + pub fn acosf(n: c_float) -> c_float; + pub fn asinf(n: c_float) -> c_float; + pub fn atan2f(a: c_float, b: c_float) -> c_float; + pub fn atanf(n: c_float) -> c_float; + pub fn coshf(n: c_float) -> c_float; + pub fn sinhf(n: c_float) -> c_float; + pub fn tanf(n: c_float) -> c_float; + pub fn tanhf(n: c_float) -> c_float; + } +} + +// On 32-bit x86 MSVC these functions aren't defined, so we just define shims +// which promote everything fo f64, perform the calculation, and then demote +// back to f32. While not precisely correct should be "correct enough" for now. +#[cfg(all(target_env = "msvc", target_arch = "x86"))] +mod shims { + use libc::c_float; + + #[inline] + pub unsafe fn acosf(n: c_float) -> c_float { + f64::acos(n as f64) as c_float + } + + #[inline] + pub unsafe fn asinf(n: c_float) -> c_float { + f64::asin(n as f64) as c_float + } + + #[inline] + pub unsafe fn atan2f(n: c_float, b: c_float) -> c_float { + f64::atan2(n as f64, b as f64) as c_float + } + + #[inline] + pub unsafe fn atanf(n: c_float) -> c_float { + f64::atan(n as f64) as c_float + } + + #[inline] + pub unsafe fn coshf(n: c_float) -> c_float { + f64::cosh(n as f64) as c_float + } + + #[inline] + pub unsafe fn sinhf(n: c_float) -> c_float { + f64::sinh(n as f64) as c_float + } + + #[inline] + pub unsafe fn tanf(n: c_float) -> c_float { + f64::tan(n as f64) as c_float + } + + #[inline] + pub unsafe fn tanhf(n: c_float) -> c_float { + f64::tanh(n as f64) as c_float + } +} diff --git a/library/std/src/sys/windows/compat.rs b/library/std/src/sys/windows/compat.rs new file mode 100644 index 00000000000..d6d433f9d08 --- /dev/null +++ b/library/std/src/sys/windows/compat.rs @@ -0,0 +1,72 @@ +//! A "compatibility layer" for spanning XP and Windows 7 +//! +//! The standard library currently binds many functions that are not available +//! on Windows XP, but we would also like to support building executables that +//! run on XP. To do this we specify all non-XP APIs as having a fallback +//! implementation to do something reasonable. +//! +//! This dynamic runtime detection of whether a function is available is +//! implemented with `GetModuleHandle` and `GetProcAddress` paired with a +//! static-per-function which caches the result of the first check. In this +//! manner we pay a semi-large one-time cost up front for detecting whether a +//! function is available but afterwards it's just a load and a jump. + +use crate::ffi::CString; +use crate::sync::atomic::{AtomicUsize, Ordering}; +use crate::sys::c; + +pub fn lookup(module: &str, symbol: &str) -> Option<usize> { + let mut module: Vec<u16> = module.encode_utf16().collect(); + module.push(0); + let symbol = CString::new(symbol).unwrap(); + unsafe { + let handle = c::GetModuleHandleW(module.as_ptr()); + match c::GetProcAddress(handle, symbol.as_ptr()) as usize { + 0 => None, + n => Some(n), + } + } +} + +pub fn store_func(ptr: &AtomicUsize, module: &str, symbol: &str, fallback: usize) -> usize { + let value = lookup(module, symbol).unwrap_or(fallback); + ptr.store(value, Ordering::SeqCst); + value +} + +macro_rules! compat_fn { + ($module:ident: $( + $(#[$meta:meta])* + pub fn $symbol:ident($($argname:ident: $argtype:ty),*) + -> $rettype:ty { + $($body:expr);* + } + )*) => ($( + #[allow(unused_variables)] + $(#[$meta])* + pub unsafe fn $symbol($($argname: $argtype),*) -> $rettype { + use crate::sync::atomic::{AtomicUsize, Ordering}; + use crate::mem; + type F = unsafe extern "system" fn($($argtype),*) -> $rettype; + + static PTR: AtomicUsize = AtomicUsize::new(0); + + fn load() -> usize { + crate::sys::compat::store_func(&PTR, + stringify!($module), + stringify!($symbol), + fallback as usize) + } + unsafe extern "system" fn fallback($($argname: $argtype),*) + -> $rettype { + $($body);* + } + + let addr = match PTR.load(Ordering::SeqCst) { + 0 => load(), + n => n, + }; + mem::transmute::<usize, F>(addr)($($argname),*) + } + )*) +} diff --git a/library/std/src/sys/windows/condvar.rs b/library/std/src/sys/windows/condvar.rs new file mode 100644 index 00000000000..8f7f6854cc2 --- /dev/null +++ b/library/std/src/sys/windows/condvar.rs @@ -0,0 +1,56 @@ +use crate::cell::UnsafeCell; +use crate::sys::c; +use crate::sys::mutex::{self, Mutex}; +use crate::sys::os; +use crate::time::Duration; + +pub struct Condvar { + inner: UnsafeCell<c::CONDITION_VARIABLE>, +} + +unsafe impl Send for Condvar {} +unsafe impl Sync for Condvar {} + +impl Condvar { + pub const fn new() -> Condvar { + Condvar { inner: UnsafeCell::new(c::CONDITION_VARIABLE_INIT) } + } + + #[inline] + pub unsafe fn init(&mut self) {} + + #[inline] + pub unsafe fn wait(&self, mutex: &Mutex) { + let r = c::SleepConditionVariableSRW(self.inner.get(), mutex::raw(mutex), c::INFINITE, 0); + debug_assert!(r != 0); + } + + pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { + let r = c::SleepConditionVariableSRW( + self.inner.get(), + mutex::raw(mutex), + super::dur2timeout(dur), + 0, + ); + if r == 0 { + debug_assert_eq!(os::errno() as usize, c::ERROR_TIMEOUT as usize); + false + } else { + true + } + } + + #[inline] + pub unsafe fn notify_one(&self) { + c::WakeConditionVariable(self.inner.get()) + } + + #[inline] + pub unsafe fn notify_all(&self) { + c::WakeAllConditionVariable(self.inner.get()) + } + + pub unsafe fn destroy(&self) { + // ... + } +} diff --git a/library/std/src/sys/windows/env.rs b/library/std/src/sys/windows/env.rs new file mode 100644 index 00000000000..f0a99d6200c --- /dev/null +++ b/library/std/src/sys/windows/env.rs @@ -0,0 +1,9 @@ +pub mod os { + pub const FAMILY: &str = "windows"; + pub const OS: &str = "windows"; + pub const DLL_PREFIX: &str = ""; + pub const DLL_SUFFIX: &str = ".dll"; + pub const DLL_EXTENSION: &str = "dll"; + pub const EXE_SUFFIX: &str = ".exe"; + pub const EXE_EXTENSION: &str = "exe"; +} diff --git a/library/std/src/sys/windows/ext/ffi.rs b/library/std/src/sys/windows/ext/ffi.rs new file mode 100644 index 00000000000..6e78119383f --- /dev/null +++ b/library/std/src/sys/windows/ext/ffi.rs @@ -0,0 +1,142 @@ +//! Windows-specific extensions to the primitives in the `std::ffi` module. +//! +//! # Overview +//! +//! For historical reasons, the Windows API uses a form of potentially +//! ill-formed UTF-16 encoding for strings. Specifically, the 16-bit +//! code units in Windows strings may contain [isolated surrogate code +//! points which are not paired together][ill-formed-utf-16]. The +//! Unicode standard requires that surrogate code points (those in the +//! range U+D800 to U+DFFF) always be *paired*, because in the UTF-16 +//! encoding a *surrogate code unit pair* is used to encode a single +//! character. For compatibility with code that does not enforce +//! these pairings, Windows does not enforce them, either. +//! +//! While it is not always possible to convert such a string losslessly into +//! a valid UTF-16 string (or even UTF-8), it is often desirable to be +//! able to round-trip such a string from and to Windows APIs +//! losslessly. For example, some Rust code may be "bridging" some +//! Windows APIs together, just passing `WCHAR` strings among those +//! APIs without ever really looking into the strings. +//! +//! If Rust code *does* need to look into those strings, it can +//! convert them to valid UTF-8, possibly lossily, by substituting +//! invalid sequences with [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD], as is +//! conventionally done in other Rust APIs that deal with string +//! encodings. +//! +//! # `OsStringExt` and `OsStrExt` +//! +//! [`OsString`] is the Rust wrapper for owned strings in the +//! preferred representation of the operating system. On Windows, +//! this struct gets augmented with an implementation of the +//! [`OsStringExt`] trait, which has a [`from_wide`] method. This +//! lets you create an [`OsString`] from a `&[u16]` slice; presumably +//! you get such a slice out of a `WCHAR` Windows API. +//! +//! Similarly, [`OsStr`] is the Rust wrapper for borrowed strings from +//! preferred representation of the operating system. On Windows, the +//! [`OsStrExt`] trait provides the [`encode_wide`] method, which +//! outputs an [`EncodeWide`] iterator. You can [`collect`] this +//! iterator, for example, to obtain a `Vec<u16>`; you can later get a +//! pointer to this vector's contents and feed it to Windows APIs. +//! +//! These traits, along with [`OsString`] and [`OsStr`], work in +//! conjunction so that it is possible to **round-trip** strings from +//! Windows and back, with no loss of data, even if the strings are +//! ill-formed UTF-16. +//! +//! [ill-formed-utf-16]: https://simonsapin.github.io/wtf-8/#ill-formed-utf-16 +//! [`OsString`]: ../../../ffi/struct.OsString.html +//! [`OsStr`]: ../../../ffi/struct.OsStr.html +//! [`OsStringExt`]: trait.OsStringExt.html +//! [`OsStrExt`]: trait.OsStrExt.html +//! [`EncodeWide`]: struct.EncodeWide.html +//! [`from_wide`]: trait.OsStringExt.html#tymethod.from_wide +//! [`encode_wide`]: trait.OsStrExt.html#tymethod.encode_wide +//! [`collect`]: ../../../iter/trait.Iterator.html#method.collect +//! [U+FFFD]: ../../../char/constant.REPLACEMENT_CHARACTER.html + +#![stable(feature = "rust1", since = "1.0.0")] + +use crate::ffi::{OsStr, OsString}; +use crate::sys::os_str::Buf; +use crate::sys_common::wtf8::Wtf8Buf; +use crate::sys_common::{AsInner, FromInner}; + +#[stable(feature = "rust1", since = "1.0.0")] +pub use crate::sys_common::wtf8::EncodeWide; + +/// Windows-specific extensions to [`OsString`]. +/// +/// [`OsString`]: ../../../../std/ffi/struct.OsString.html +#[stable(feature = "rust1", since = "1.0.0")] +pub trait OsStringExt { + /// Creates an `OsString` from a potentially ill-formed UTF-16 slice of + /// 16-bit code units. + /// + /// This is lossless: calling [`encode_wide`] on the resulting string + /// will always return the original code units. + /// + /// # Examples + /// + /// ``` + /// use std::ffi::OsString; + /// use std::os::windows::prelude::*; + /// + /// // UTF-16 encoding for "Unicode". + /// let source = [0x0055, 0x006E, 0x0069, 0x0063, 0x006F, 0x0064, 0x0065]; + /// + /// let string = OsString::from_wide(&source[..]); + /// ``` + /// + /// [`encode_wide`]: ./trait.OsStrExt.html#tymethod.encode_wide + #[stable(feature = "rust1", since = "1.0.0")] + fn from_wide(wide: &[u16]) -> Self; +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl OsStringExt for OsString { + fn from_wide(wide: &[u16]) -> OsString { + FromInner::from_inner(Buf { inner: Wtf8Buf::from_wide(wide) }) + } +} + +/// Windows-specific extensions to [`OsStr`]. +/// +/// [`OsStr`]: ../../../../std/ffi/struct.OsStr.html +#[stable(feature = "rust1", since = "1.0.0")] +pub trait OsStrExt { + /// Re-encodes an `OsStr` as a wide character sequence, i.e., potentially + /// ill-formed UTF-16. + /// + /// This is lossless: calling [`OsString::from_wide`] and then + /// `encode_wide` on the result will yield the original code units. + /// Note that the encoding does not add a final null terminator. + /// + /// # Examples + /// + /// ``` + /// use std::ffi::OsString; + /// use std::os::windows::prelude::*; + /// + /// // UTF-16 encoding for "Unicode". + /// let source = [0x0055, 0x006E, 0x0069, 0x0063, 0x006F, 0x0064, 0x0065]; + /// + /// let string = OsString::from_wide(&source[..]); + /// + /// let result: Vec<u16> = string.encode_wide().collect(); + /// assert_eq!(&source[..], &result[..]); + /// ``` + /// + /// [`OsString::from_wide`]: ./trait.OsStringExt.html#tymethod.from_wide + #[stable(feature = "rust1", since = "1.0.0")] + fn encode_wide(&self) -> EncodeWide<'_>; +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl OsStrExt for OsStr { + fn encode_wide(&self) -> EncodeWide<'_> { + self.as_inner().inner.encode_wide() + } +} diff --git a/library/std/src/sys/windows/ext/fs.rs b/library/std/src/sys/windows/ext/fs.rs new file mode 100644 index 00000000000..81b2bf99872 --- /dev/null +++ b/library/std/src/sys/windows/ext/fs.rs @@ -0,0 +1,565 @@ +//! Windows-specific extensions for the primitives in the `std::fs` module. + +#![stable(feature = "rust1", since = "1.0.0")] + +use crate::fs::{self, Metadata, OpenOptions}; +use crate::io; +use crate::path::Path; +use crate::sys; +use crate::sys_common::{AsInner, AsInnerMut}; + +/// Windows-specific extensions to [`File`]. +/// +/// [`File`]: ../../../fs/struct.File.html +#[stable(feature = "file_offset", since = "1.15.0")] +pub trait FileExt { + /// Seeks to a given position and reads a number of bytes. + /// + /// Returns the number of bytes read. + /// + /// The offset is relative to the start of the file and thus independent + /// from the current cursor. The current cursor **is** affected by this + /// function, it is set to the end of the read. + /// + /// Reading beyond the end of the file will always return with a length of + /// 0\. + /// + /// Note that similar to `File::read`, it is not an error to return with a + /// short read. When returning from such a short read, the file pointer is + /// still updated. + /// + /// # Examples + /// + /// ```no_run + /// use std::io; + /// use std::fs::File; + /// use std::os::windows::prelude::*; + /// + /// fn main() -> io::Result<()> { + /// let mut file = File::open("foo.txt")?; + /// let mut buffer = [0; 10]; + /// + /// // Read 10 bytes, starting 72 bytes from the + /// // start of the file. + /// file.seek_read(&mut buffer[..], 72)?; + /// Ok(()) + /// } + /// ``` + #[stable(feature = "file_offset", since = "1.15.0")] + fn seek_read(&self, buf: &mut [u8], offset: u64) -> io::Result<usize>; + + /// Seeks to a given position and writes a number of bytes. + /// + /// Returns the number of bytes written. + /// + /// The offset is relative to the start of the file and thus independent + /// from the current cursor. The current cursor **is** affected by this + /// function, it is set to the end of the write. + /// + /// When writing beyond the end of the file, the file is appropriately + /// extended and the intermediate bytes are left uninitialized. + /// + /// Note that similar to `File::write`, it is not an error to return a + /// short write. When returning from such a short write, the file pointer + /// is still updated. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs::File; + /// use std::os::windows::prelude::*; + /// + /// fn main() -> std::io::Result<()> { + /// let mut buffer = File::create("foo.txt")?; + /// + /// // Write a byte string starting 72 bytes from + /// // the start of the file. + /// buffer.seek_write(b"some bytes", 72)?; + /// Ok(()) + /// } + /// ``` + #[stable(feature = "file_offset", since = "1.15.0")] + fn seek_write(&self, buf: &[u8], offset: u64) -> io::Result<usize>; +} + +#[stable(feature = "file_offset", since = "1.15.0")] +impl FileExt for fs::File { + fn seek_read(&self, buf: &mut [u8], offset: u64) -> io::Result<usize> { + self.as_inner().read_at(buf, offset) + } + + fn seek_write(&self, buf: &[u8], offset: u64) -> io::Result<usize> { + self.as_inner().write_at(buf, offset) + } +} + +/// Windows-specific extensions to [`fs::OpenOptions`]. +/// +/// [`fs::OpenOptions`]: ../../../../std/fs/struct.OpenOptions.html +#[stable(feature = "open_options_ext", since = "1.10.0")] +pub trait OpenOptionsExt { + /// Overrides the `dwDesiredAccess` argument to the call to [`CreateFile`] + /// with the specified value. + /// + /// This will override the `read`, `write`, and `append` flags on the + /// `OpenOptions` structure. This method provides fine-grained control over + /// the permissions to read, write and append data, attributes (like hidden + /// and system), and extended attributes. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs::OpenOptions; + /// use std::os::windows::prelude::*; + /// + /// // Open without read and write permission, for example if you only need + /// // to call `stat` on the file + /// let file = OpenOptions::new().access_mode(0).open("foo.txt"); + /// ``` + /// + /// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea + #[stable(feature = "open_options_ext", since = "1.10.0")] + fn access_mode(&mut self, access: u32) -> &mut Self; + + /// Overrides the `dwShareMode` argument to the call to [`CreateFile`] with + /// the specified value. + /// + /// By default `share_mode` is set to + /// `FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE`. This allows + /// other processes to read, write, and delete/rename the same file + /// while it is open. Removing any of the flags will prevent other + /// processes from performing the corresponding operation until the file + /// handle is closed. + /// + /// # Examples + /// + /// ```no_run + /// use std::fs::OpenOptions; + /// use std::os::windows::prelude::*; + /// + /// // Do not allow others to read or modify this file while we have it open + /// // for writing. + /// let file = OpenOptions::new() + /// .write(true) + /// .share_mode(0) + /// .open("foo.txt"); + /// ``` + /// + /// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea + #[stable(feature = "open_options_ext", since = "1.10.0")] + fn share_mode(&mut self, val: u32) -> &mut Self; + + /// Sets extra flags for the `dwFileFlags` argument to the call to + /// [`CreateFile2`] to the specified value (or combines it with + /// `attributes` and `security_qos_flags` to set the `dwFlagsAndAttributes` + /// for [`CreateFile`]). + /// + /// Custom flags can only set flags, not remove flags set by Rust's options. + /// This option overwrites any previously set custom flags. + /// + /// # Examples + /// + /// ```no_run + /// # #[cfg(for_demonstration_only)] + /// extern crate winapi; + /// # mod winapi { pub const FILE_FLAG_DELETE_ON_CLOSE: u32 = 0x04000000; } + /// + /// use std::fs::OpenOptions; + /// use std::os::windows::prelude::*; + /// + /// let file = OpenOptions::new() + /// .create(true) + /// .write(true) + /// .custom_flags(winapi::FILE_FLAG_DELETE_ON_CLOSE) + /// .open("foo.txt"); + /// ``` + /// + /// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea + /// [`CreateFile2`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfile2 + #[stable(feature = "open_options_ext", since = "1.10.0")] + fn custom_flags(&mut self, flags: u32) -> &mut Self; + + /// Sets the `dwFileAttributes` argument to the call to [`CreateFile2`] to + /// the specified value (or combines it with `custom_flags` and + /// `security_qos_flags` to set the `dwFlagsAndAttributes` for + /// [`CreateFile`]). + /// + /// If a _new_ file is created because it does not yet exist and + /// `.create(true)` or `.create_new(true)` are specified, the new file is + /// given the attributes declared with `.attributes()`. + /// + /// If an _existing_ file is opened with `.create(true).truncate(true)`, its + /// existing attributes are preserved and combined with the ones declared + /// with `.attributes()`. + /// + /// In all other cases the attributes get ignored. + /// + /// # Examples + /// + /// ```no_run + /// # #[cfg(for_demonstration_only)] + /// extern crate winapi; + /// # mod winapi { pub const FILE_ATTRIBUTE_HIDDEN: u32 = 2; } + /// + /// use std::fs::OpenOptions; + /// use std::os::windows::prelude::*; + /// + /// let file = OpenOptions::new() + /// .write(true) + /// .create(true) + /// .attributes(winapi::FILE_ATTRIBUTE_HIDDEN) + /// .open("foo.txt"); + /// ``` + /// + /// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea + /// [`CreateFile2`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfile2 + #[stable(feature = "open_options_ext", since = "1.10.0")] + fn attributes(&mut self, val: u32) -> &mut Self; + + /// Sets the `dwSecurityQosFlags` argument to the call to [`CreateFile2`] to + /// the specified value (or combines it with `custom_flags` and `attributes` + /// to set the `dwFlagsAndAttributes` for [`CreateFile`]). + /// + /// By default `security_qos_flags` is not set. It should be specified when + /// opening a named pipe, to control to which degree a server process can + /// act on behalf of a client process (security impersonation level). + /// + /// When `security_qos_flags` is not set, a malicious program can gain the + /// elevated privileges of a privileged Rust process when it allows opening + /// user-specified paths, by tricking it into opening a named pipe. So + /// arguably `security_qos_flags` should also be set when opening arbitrary + /// paths. However the bits can then conflict with other flags, specifically + /// `FILE_FLAG_OPEN_NO_RECALL`. + /// + /// For information about possible values, see [Impersonation Levels] on the + /// Windows Dev Center site. The `SECURITY_SQOS_PRESENT` flag is set + /// automatically when using this method. + + /// # Examples + /// + /// ```no_run + /// # #[cfg(for_demonstration_only)] + /// extern crate winapi; + /// # mod winapi { pub const SECURITY_IDENTIFICATION: u32 = 0; } + /// use std::fs::OpenOptions; + /// use std::os::windows::prelude::*; + /// + /// let file = OpenOptions::new() + /// .write(true) + /// .create(true) + /// + /// // Sets the flag value to `SecurityIdentification`. + /// .security_qos_flags(winapi::SECURITY_IDENTIFICATION) + /// + /// .open(r"\\.\pipe\MyPipe"); + /// ``` + /// + /// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea + /// [`CreateFile2`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfile2 + /// [Impersonation Levels]: + /// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-security_impersonation_level + #[stable(feature = "open_options_ext", since = "1.10.0")] + fn security_qos_flags(&mut self, flags: u32) -> &mut Self; +} + +#[stable(feature = "open_options_ext", since = "1.10.0")] +impl OpenOptionsExt for OpenOptions { + fn access_mode(&mut self, access: u32) -> &mut OpenOptions { + self.as_inner_mut().access_mode(access); + self + } + + fn share_mode(&mut self, share: u32) -> &mut OpenOptions { + self.as_inner_mut().share_mode(share); + self + } + + fn custom_flags(&mut self, flags: u32) -> &mut OpenOptions { + self.as_inner_mut().custom_flags(flags); + self + } + + fn attributes(&mut self, attributes: u32) -> &mut OpenOptions { + self.as_inner_mut().attributes(attributes); + self + } + + fn security_qos_flags(&mut self, flags: u32) -> &mut OpenOptions { + self.as_inner_mut().security_qos_flags(flags); + self + } +} + +/// Windows-specific extensions to [`fs::Metadata`]. +/// +/// The data members that this trait exposes correspond to the members +/// of the [`BY_HANDLE_FILE_INFORMATION`] structure. +/// +/// [`fs::Metadata`]: ../../../../std/fs/struct.Metadata.html +/// [`BY_HANDLE_FILE_INFORMATION`]: +/// https://docs.microsoft.com/en-us/windows/win32/api/fileapi/ns-fileapi-by_handle_file_information +#[stable(feature = "metadata_ext", since = "1.1.0")] +pub trait MetadataExt { + /// Returns the value of the `dwFileAttributes` field of this metadata. + /// + /// This field contains the file system attribute information for a file + /// or directory. For possible values and their descriptions, see + /// [File Attribute Constants] in the Windows Dev Center. + /// + /// # Examples + /// + /// ```no_run + /// use std::io; + /// use std::fs; + /// use std::os::windows::prelude::*; + /// + /// fn main() -> io::Result<()> { + /// let metadata = fs::metadata("foo.txt")?; + /// let attributes = metadata.file_attributes(); + /// Ok(()) + /// } + /// ``` + /// + /// [File Attribute Constants]: + /// https://docs.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn file_attributes(&self) -> u32; + + /// Returns the value of the `ftCreationTime` field of this metadata. + /// + /// The returned 64-bit value is equivalent to a [`FILETIME`] struct, + /// which represents the number of 100-nanosecond intervals since + /// January 1, 1601 (UTC). The struct is automatically + /// converted to a `u64` value, as that is the recommended way + /// to use it. + /// + /// If the underlying filesystem does not support creation time, the + /// returned value is 0. + /// + /// # Examples + /// + /// ```no_run + /// use std::io; + /// use std::fs; + /// use std::os::windows::prelude::*; + /// + /// fn main() -> io::Result<()> { + /// let metadata = fs::metadata("foo.txt")?; + /// let creation_time = metadata.creation_time(); + /// Ok(()) + /// } + /// ``` + /// + /// [`FILETIME`]: https://docs.microsoft.com/en-us/windows/win32/api/minwinbase/ns-minwinbase-filetime + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn creation_time(&self) -> u64; + + /// Returns the value of the `ftLastAccessTime` field of this metadata. + /// + /// The returned 64-bit value is equivalent to a [`FILETIME`] struct, + /// which represents the number of 100-nanosecond intervals since + /// January 1, 1601 (UTC). The struct is automatically + /// converted to a `u64` value, as that is the recommended way + /// to use it. + /// + /// For a file, the value specifies the last time that a file was read + /// from or written to. For a directory, the value specifies when + /// the directory was created. For both files and directories, the + /// specified date is correct, but the time of day is always set to + /// midnight. + /// + /// If the underlying filesystem does not support last access time, the + /// returned value is 0. + /// + /// # Examples + /// + /// ```no_run + /// use std::io; + /// use std::fs; + /// use std::os::windows::prelude::*; + /// + /// fn main() -> io::Result<()> { + /// let metadata = fs::metadata("foo.txt")?; + /// let last_access_time = metadata.last_access_time(); + /// Ok(()) + /// } + /// ``` + /// + /// [`FILETIME`]: https://docs.microsoft.com/en-us/windows/win32/api/minwinbase/ns-minwinbase-filetime + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn last_access_time(&self) -> u64; + + /// Returns the value of the `ftLastWriteTime` field of this metadata. + /// + /// The returned 64-bit value is equivalent to a [`FILETIME`] struct, + /// which represents the number of 100-nanosecond intervals since + /// January 1, 1601 (UTC). The struct is automatically + /// converted to a `u64` value, as that is the recommended way + /// to use it. + /// + /// For a file, the value specifies the last time that a file was written + /// to. For a directory, the structure specifies when the directory was + /// created. + /// + /// If the underlying filesystem does not support the last write time, + /// the returned value is 0. + /// + /// # Examples + /// + /// ```no_run + /// use std::io; + /// use std::fs; + /// use std::os::windows::prelude::*; + /// + /// fn main() -> io::Result<()> { + /// let metadata = fs::metadata("foo.txt")?; + /// let last_write_time = metadata.last_write_time(); + /// Ok(()) + /// } + /// ``` + /// + /// [`FILETIME`]: https://docs.microsoft.com/en-us/windows/win32/api/minwinbase/ns-minwinbase-filetime + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn last_write_time(&self) -> u64; + + /// Returns the value of the `nFileSize{High,Low}` fields of this + /// metadata. + /// + /// The returned value does not have meaning for directories. + /// + /// # Examples + /// + /// ```no_run + /// use std::io; + /// use std::fs; + /// use std::os::windows::prelude::*; + /// + /// fn main() -> io::Result<()> { + /// let metadata = fs::metadata("foo.txt")?; + /// let file_size = metadata.file_size(); + /// Ok(()) + /// } + /// ``` + #[stable(feature = "metadata_ext", since = "1.1.0")] + fn file_size(&self) -> u64; + + /// Returns the value of the `dwVolumeSerialNumber` field of this + /// metadata. + /// + /// This will return `None` if the `Metadata` instance was created from a + /// call to `DirEntry::metadata`. If this `Metadata` was created by using + /// `fs::metadata` or `File::metadata`, then this will return `Some`. + #[unstable(feature = "windows_by_handle", issue = "63010")] + fn volume_serial_number(&self) -> Option<u32>; + + /// Returns the value of the `nNumberOfLinks` field of this + /// metadata. + /// + /// This will return `None` if the `Metadata` instance was created from a + /// call to `DirEntry::metadata`. If this `Metadata` was created by using + /// `fs::metadata` or `File::metadata`, then this will return `Some`. + #[unstable(feature = "windows_by_handle", issue = "63010")] + fn number_of_links(&self) -> Option<u32>; + + /// Returns the value of the `nFileIndex{Low,High}` fields of this + /// metadata. + /// + /// This will return `None` if the `Metadata` instance was created from a + /// call to `DirEntry::metadata`. If this `Metadata` was created by using + /// `fs::metadata` or `File::metadata`, then this will return `Some`. + #[unstable(feature = "windows_by_handle", issue = "63010")] + fn file_index(&self) -> Option<u64>; +} + +#[stable(feature = "metadata_ext", since = "1.1.0")] +impl MetadataExt for Metadata { + fn file_attributes(&self) -> u32 { + self.as_inner().attrs() + } + fn creation_time(&self) -> u64 { + self.as_inner().created_u64() + } + fn last_access_time(&self) -> u64 { + self.as_inner().accessed_u64() + } + fn last_write_time(&self) -> u64 { + self.as_inner().modified_u64() + } + fn file_size(&self) -> u64 { + self.as_inner().size() + } + fn volume_serial_number(&self) -> Option<u32> { + self.as_inner().volume_serial_number() + } + fn number_of_links(&self) -> Option<u32> { + self.as_inner().number_of_links() + } + fn file_index(&self) -> Option<u64> { + self.as_inner().file_index() + } +} + +/// Windows-specific extensions to [`FileType`]. +/// +/// On Windows, a symbolic link knows whether it is a file or directory. +/// +/// [`FileType`]: ../../../../std/fs/struct.FileType.html +#[unstable(feature = "windows_file_type_ext", issue = "none")] +pub trait FileTypeExt { + /// Returns `true` if this file type is a symbolic link that is also a directory. + #[unstable(feature = "windows_file_type_ext", issue = "none")] + fn is_symlink_dir(&self) -> bool; + /// Returns `true` if this file type is a symbolic link that is also a file. + #[unstable(feature = "windows_file_type_ext", issue = "none")] + fn is_symlink_file(&self) -> bool; +} + +#[unstable(feature = "windows_file_type_ext", issue = "none")] +impl FileTypeExt for fs::FileType { + fn is_symlink_dir(&self) -> bool { + self.as_inner().is_symlink_dir() + } + fn is_symlink_file(&self) -> bool { + self.as_inner().is_symlink_file() + } +} + +/// Creates a new file symbolic link on the filesystem. +/// +/// The `dst` path will be a file symbolic link pointing to the `src` +/// path. +/// +/// # Examples +/// +/// ```no_run +/// use std::os::windows::fs; +/// +/// fn main() -> std::io::Result<()> { +/// fs::symlink_file("a.txt", "b.txt")?; +/// Ok(()) +/// } +/// ``` +#[stable(feature = "symlink", since = "1.1.0")] +pub fn symlink_file<P: AsRef<Path>, Q: AsRef<Path>>(src: P, dst: Q) -> io::Result<()> { + sys::fs::symlink_inner(src.as_ref(), dst.as_ref(), false) +} + +/// Creates a new directory symlink on the filesystem. +/// +/// The `dst` path will be a directory symbolic link pointing to the `src` +/// path. +/// +/// # Examples +/// +/// ```no_run +/// use std::os::windows::fs; +/// +/// fn main() -> std::io::Result<()> { +/// fs::symlink_dir("a", "b")?; +/// Ok(()) +/// } +/// ``` +#[stable(feature = "symlink", since = "1.1.0")] +pub fn symlink_dir<P: AsRef<Path>, Q: AsRef<Path>>(src: P, dst: Q) -> io::Result<()> { + sys::fs::symlink_inner(src.as_ref(), dst.as_ref(), true) +} diff --git a/library/std/src/sys/windows/ext/io.rs b/library/std/src/sys/windows/ext/io.rs new file mode 100644 index 00000000000..4573ee58932 --- /dev/null +++ b/library/std/src/sys/windows/ext/io.rs @@ -0,0 +1,220 @@ +#![stable(feature = "rust1", since = "1.0.0")] + +use crate::fs; +use crate::io; +use crate::net; +use crate::os::windows::raw; +use crate::sys; +use crate::sys::c; +use crate::sys_common::{self, AsInner, FromInner, IntoInner}; + +/// Raw HANDLEs. +#[stable(feature = "rust1", since = "1.0.0")] +pub type RawHandle = raw::HANDLE; + +/// Raw SOCKETs. +#[stable(feature = "rust1", since = "1.0.0")] +pub type RawSocket = raw::SOCKET; + +/// Extracts raw handles. +#[stable(feature = "rust1", since = "1.0.0")] +pub trait AsRawHandle { + /// Extracts the raw handle, without taking any ownership. + #[stable(feature = "rust1", since = "1.0.0")] + fn as_raw_handle(&self) -> RawHandle; +} + +/// Construct I/O objects from raw handles. +#[stable(feature = "from_raw_os", since = "1.1.0")] +pub trait FromRawHandle { + /// Constructs a new I/O object from the specified raw handle. + /// + /// This function will **consume ownership** of the handle given, + /// passing responsibility for closing the handle to the returned + /// object. + /// + /// This function is also unsafe as the primitives currently returned + /// have the contract that they are the sole owner of the file + /// descriptor they are wrapping. Usage of this function could + /// accidentally allow violating this contract which can cause memory + /// unsafety in code that relies on it being true. + #[stable(feature = "from_raw_os", since = "1.1.0")] + unsafe fn from_raw_handle(handle: RawHandle) -> Self; +} + +/// A trait to express the ability to consume an object and acquire ownership of +/// its raw `HANDLE`. +#[stable(feature = "into_raw_os", since = "1.4.0")] +pub trait IntoRawHandle { + /// Consumes this object, returning the raw underlying handle. + /// + /// This function **transfers ownership** of the underlying handle to the + /// caller. Callers are then the unique owners of the handle and must close + /// it once it's no longer needed. + #[stable(feature = "into_raw_os", since = "1.4.0")] + fn into_raw_handle(self) -> RawHandle; +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl AsRawHandle for fs::File { + fn as_raw_handle(&self) -> RawHandle { + self.as_inner().handle().raw() as RawHandle + } +} + +#[stable(feature = "asraw_stdio", since = "1.21.0")] +impl AsRawHandle for io::Stdin { + fn as_raw_handle(&self) -> RawHandle { + unsafe { c::GetStdHandle(c::STD_INPUT_HANDLE) as RawHandle } + } +} + +#[stable(feature = "asraw_stdio", since = "1.21.0")] +impl AsRawHandle for io::Stdout { + fn as_raw_handle(&self) -> RawHandle { + unsafe { c::GetStdHandle(c::STD_OUTPUT_HANDLE) as RawHandle } + } +} + +#[stable(feature = "asraw_stdio", since = "1.21.0")] +impl AsRawHandle for io::Stderr { + fn as_raw_handle(&self) -> RawHandle { + unsafe { c::GetStdHandle(c::STD_ERROR_HANDLE) as RawHandle } + } +} + +#[stable(feature = "asraw_stdio_locks", since = "1.35.0")] +impl<'a> AsRawHandle for io::StdinLock<'a> { + fn as_raw_handle(&self) -> RawHandle { + unsafe { c::GetStdHandle(c::STD_INPUT_HANDLE) as RawHandle } + } +} + +#[stable(feature = "asraw_stdio_locks", since = "1.35.0")] +impl<'a> AsRawHandle for io::StdoutLock<'a> { + fn as_raw_handle(&self) -> RawHandle { + unsafe { c::GetStdHandle(c::STD_OUTPUT_HANDLE) as RawHandle } + } +} + +#[stable(feature = "asraw_stdio_locks", since = "1.35.0")] +impl<'a> AsRawHandle for io::StderrLock<'a> { + fn as_raw_handle(&self) -> RawHandle { + unsafe { c::GetStdHandle(c::STD_ERROR_HANDLE) as RawHandle } + } +} + +#[stable(feature = "from_raw_os", since = "1.1.0")] +impl FromRawHandle for fs::File { + unsafe fn from_raw_handle(handle: RawHandle) -> fs::File { + let handle = handle as c::HANDLE; + fs::File::from_inner(sys::fs::File::from_inner(handle)) + } +} + +#[stable(feature = "into_raw_os", since = "1.4.0")] +impl IntoRawHandle for fs::File { + fn into_raw_handle(self) -> RawHandle { + self.into_inner().into_handle().into_raw() as *mut _ + } +} + +/// Extracts raw sockets. +#[stable(feature = "rust1", since = "1.0.0")] +pub trait AsRawSocket { + /// Extracts the underlying raw socket from this object. + #[stable(feature = "rust1", since = "1.0.0")] + fn as_raw_socket(&self) -> RawSocket; +} + +/// Creates I/O objects from raw sockets. +#[stable(feature = "from_raw_os", since = "1.1.0")] +pub trait FromRawSocket { + /// Creates a new I/O object from the given raw socket. + /// + /// This function will **consume ownership** of the socket provided and + /// it will be closed when the returned object goes out of scope. + /// + /// This function is also unsafe as the primitives currently returned + /// have the contract that they are the sole owner of the file + /// descriptor they are wrapping. Usage of this function could + /// accidentally allow violating this contract which can cause memory + /// unsafety in code that relies on it being true. + #[stable(feature = "from_raw_os", since = "1.1.0")] + unsafe fn from_raw_socket(sock: RawSocket) -> Self; +} + +/// A trait to express the ability to consume an object and acquire ownership of +/// its raw `SOCKET`. +#[stable(feature = "into_raw_os", since = "1.4.0")] +pub trait IntoRawSocket { + /// Consumes this object, returning the raw underlying socket. + /// + /// This function **transfers ownership** of the underlying socket to the + /// caller. Callers are then the unique owners of the socket and must close + /// it once it's no longer needed. + #[stable(feature = "into_raw_os", since = "1.4.0")] + fn into_raw_socket(self) -> RawSocket; +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl AsRawSocket for net::TcpStream { + fn as_raw_socket(&self) -> RawSocket { + *self.as_inner().socket().as_inner() + } +} +#[stable(feature = "rust1", since = "1.0.0")] +impl AsRawSocket for net::TcpListener { + fn as_raw_socket(&self) -> RawSocket { + *self.as_inner().socket().as_inner() + } +} +#[stable(feature = "rust1", since = "1.0.0")] +impl AsRawSocket for net::UdpSocket { + fn as_raw_socket(&self) -> RawSocket { + *self.as_inner().socket().as_inner() + } +} + +#[stable(feature = "from_raw_os", since = "1.1.0")] +impl FromRawSocket for net::TcpStream { + unsafe fn from_raw_socket(sock: RawSocket) -> net::TcpStream { + let sock = sys::net::Socket::from_inner(sock); + net::TcpStream::from_inner(sys_common::net::TcpStream::from_inner(sock)) + } +} +#[stable(feature = "from_raw_os", since = "1.1.0")] +impl FromRawSocket for net::TcpListener { + unsafe fn from_raw_socket(sock: RawSocket) -> net::TcpListener { + let sock = sys::net::Socket::from_inner(sock); + net::TcpListener::from_inner(sys_common::net::TcpListener::from_inner(sock)) + } +} +#[stable(feature = "from_raw_os", since = "1.1.0")] +impl FromRawSocket for net::UdpSocket { + unsafe fn from_raw_socket(sock: RawSocket) -> net::UdpSocket { + let sock = sys::net::Socket::from_inner(sock); + net::UdpSocket::from_inner(sys_common::net::UdpSocket::from_inner(sock)) + } +} + +#[stable(feature = "into_raw_os", since = "1.4.0")] +impl IntoRawSocket for net::TcpStream { + fn into_raw_socket(self) -> RawSocket { + self.into_inner().into_socket().into_inner() + } +} + +#[stable(feature = "into_raw_os", since = "1.4.0")] +impl IntoRawSocket for net::TcpListener { + fn into_raw_socket(self) -> RawSocket { + self.into_inner().into_socket().into_inner() + } +} + +#[stable(feature = "into_raw_os", since = "1.4.0")] +impl IntoRawSocket for net::UdpSocket { + fn into_raw_socket(self) -> RawSocket { + self.into_inner().into_socket().into_inner() + } +} diff --git a/library/std/src/sys/windows/ext/mod.rs b/library/std/src/sys/windows/ext/mod.rs new file mode 100644 index 00000000000..613d3dc189a --- /dev/null +++ b/library/std/src/sys/windows/ext/mod.rs @@ -0,0 +1,40 @@ +//! Platform-specific extensions to `std` for Windows. +//! +//! Provides access to platform-level information for Windows, and exposes +//! Windows-specific idioms that would otherwise be inappropriate as part +//! the core `std` library. These extensions allow developers to use +//! `std` types and idioms with Windows in a way that the normal +//! platform-agnostic idioms would not normally support. + +#![stable(feature = "rust1", since = "1.0.0")] +#![doc(cfg(windows))] +#![allow(missing_docs)] + +pub mod ffi; +pub mod fs; +pub mod io; +pub mod process; +pub mod raw; +pub mod thread; + +/// A prelude for conveniently writing platform-specific code. +/// +/// Includes all extension traits, and some important type definitions. +#[stable(feature = "rust1", since = "1.0.0")] +pub mod prelude { + #[doc(no_inline)] + #[stable(feature = "rust1", since = "1.0.0")] + pub use super::ffi::{OsStrExt, OsStringExt}; + #[doc(no_inline)] + #[stable(feature = "file_offset", since = "1.15.0")] + pub use super::fs::FileExt; + #[doc(no_inline)] + #[stable(feature = "rust1", since = "1.0.0")] + pub use super::fs::{MetadataExt, OpenOptionsExt}; + #[doc(no_inline)] + #[stable(feature = "rust1", since = "1.0.0")] + pub use super::io::{AsRawHandle, AsRawSocket, RawHandle, RawSocket}; + #[doc(no_inline)] + #[stable(feature = "rust1", since = "1.0.0")] + pub use super::io::{FromRawHandle, FromRawSocket, IntoRawHandle, IntoRawSocket}; +} diff --git a/library/std/src/sys/windows/ext/process.rs b/library/std/src/sys/windows/ext/process.rs new file mode 100644 index 00000000000..8c34a9faf1d --- /dev/null +++ b/library/std/src/sys/windows/ext/process.rs @@ -0,0 +1,113 @@ +//! Extensions to `std::process` for Windows. + +#![stable(feature = "process_extensions", since = "1.2.0")] + +use crate::os::windows::io::{AsRawHandle, FromRawHandle, IntoRawHandle, RawHandle}; +use crate::process; +use crate::sys; +use crate::sys_common::{AsInner, AsInnerMut, FromInner, IntoInner}; + +#[stable(feature = "process_extensions", since = "1.2.0")] +impl FromRawHandle for process::Stdio { + unsafe fn from_raw_handle(handle: RawHandle) -> process::Stdio { + let handle = sys::handle::Handle::new(handle as *mut _); + let io = sys::process::Stdio::Handle(handle); + process::Stdio::from_inner(io) + } +} + +#[stable(feature = "process_extensions", since = "1.2.0")] +impl AsRawHandle for process::Child { + fn as_raw_handle(&self) -> RawHandle { + self.as_inner().handle().raw() as *mut _ + } +} + +#[stable(feature = "into_raw_os", since = "1.4.0")] +impl IntoRawHandle for process::Child { + fn into_raw_handle(self) -> RawHandle { + self.into_inner().into_handle().into_raw() as *mut _ + } +} + +#[stable(feature = "process_extensions", since = "1.2.0")] +impl AsRawHandle for process::ChildStdin { + fn as_raw_handle(&self) -> RawHandle { + self.as_inner().handle().raw() as *mut _ + } +} + +#[stable(feature = "process_extensions", since = "1.2.0")] +impl AsRawHandle for process::ChildStdout { + fn as_raw_handle(&self) -> RawHandle { + self.as_inner().handle().raw() as *mut _ + } +} + +#[stable(feature = "process_extensions", since = "1.2.0")] +impl AsRawHandle for process::ChildStderr { + fn as_raw_handle(&self) -> RawHandle { + self.as_inner().handle().raw() as *mut _ + } +} + +#[stable(feature = "into_raw_os", since = "1.4.0")] +impl IntoRawHandle for process::ChildStdin { + fn into_raw_handle(self) -> RawHandle { + self.into_inner().into_handle().into_raw() as *mut _ + } +} + +#[stable(feature = "into_raw_os", since = "1.4.0")] +impl IntoRawHandle for process::ChildStdout { + fn into_raw_handle(self) -> RawHandle { + self.into_inner().into_handle().into_raw() as *mut _ + } +} + +#[stable(feature = "into_raw_os", since = "1.4.0")] +impl IntoRawHandle for process::ChildStderr { + fn into_raw_handle(self) -> RawHandle { + self.into_inner().into_handle().into_raw() as *mut _ + } +} + +/// Windows-specific extensions to [`process::ExitStatus`]. +/// +/// [`process::ExitStatus`]: ../../../../std/process/struct.ExitStatus.html +#[stable(feature = "exit_status_from", since = "1.12.0")] +pub trait ExitStatusExt { + /// Creates a new `ExitStatus` from the raw underlying `u32` return value of + /// a process. + #[stable(feature = "exit_status_from", since = "1.12.0")] + fn from_raw(raw: u32) -> Self; +} + +#[stable(feature = "exit_status_from", since = "1.12.0")] +impl ExitStatusExt for process::ExitStatus { + fn from_raw(raw: u32) -> Self { + process::ExitStatus::from_inner(From::from(raw)) + } +} + +/// Windows-specific extensions to the [`process::Command`] builder. +/// +/// [`process::Command`]: ../../../../std/process/struct.Command.html +#[stable(feature = "windows_process_extensions", since = "1.16.0")] +pub trait CommandExt { + /// Sets the [process creation flags][1] to be passed to `CreateProcess`. + /// + /// These will always be ORed with `CREATE_UNICODE_ENVIRONMENT`. + /// + /// [1]: https://docs.microsoft.com/en-us/windows/win32/procthread/process-creation-flags + #[stable(feature = "windows_process_extensions", since = "1.16.0")] + fn creation_flags(&mut self, flags: u32) -> &mut process::Command; +} + +#[stable(feature = "windows_process_extensions", since = "1.16.0")] +impl CommandExt for process::Command { + fn creation_flags(&mut self, flags: u32) -> &mut process::Command { + self.as_inner_mut().creation_flags(flags); + self + } +} diff --git a/library/std/src/sys/windows/ext/raw.rs b/library/std/src/sys/windows/ext/raw.rs new file mode 100644 index 00000000000..7f2a2877828 --- /dev/null +++ b/library/std/src/sys/windows/ext/raw.rs @@ -0,0 +1,14 @@ +//! Windows-specific primitives + +#![stable(feature = "raw_ext", since = "1.1.0")] + +use crate::os::raw::c_void; + +#[stable(feature = "raw_ext", since = "1.1.0")] +pub type HANDLE = *mut c_void; +#[cfg(target_pointer_width = "32")] +#[stable(feature = "raw_ext", since = "1.1.0")] +pub type SOCKET = u32; +#[cfg(target_pointer_width = "64")] +#[stable(feature = "raw_ext", since = "1.1.0")] +pub type SOCKET = u64; diff --git a/library/std/src/sys/windows/ext/thread.rs b/library/std/src/sys/windows/ext/thread.rs new file mode 100644 index 00000000000..41c29f5b950 --- /dev/null +++ b/library/std/src/sys/windows/ext/thread.rs @@ -0,0 +1,21 @@ +//! Extensions to `std::thread` for Windows. + +#![stable(feature = "thread_extensions", since = "1.9.0")] + +use crate::os::windows::io::{AsRawHandle, IntoRawHandle, RawHandle}; +use crate::sys_common::{AsInner, IntoInner}; +use crate::thread; + +#[stable(feature = "thread_extensions", since = "1.9.0")] +impl<T> AsRawHandle for thread::JoinHandle<T> { + fn as_raw_handle(&self) -> RawHandle { + self.as_inner().handle().raw() as *mut _ + } +} + +#[stable(feature = "thread_extensions", since = "1.9.0")] +impl<T> IntoRawHandle for thread::JoinHandle<T> { + fn into_raw_handle(self) -> RawHandle { + self.into_inner().into_handle().into_raw() as *mut _ + } +} diff --git a/library/std/src/sys/windows/fs.rs b/library/std/src/sys/windows/fs.rs new file mode 100644 index 00000000000..cdbfac267b9 --- /dev/null +++ b/library/std/src/sys/windows/fs.rs @@ -0,0 +1,938 @@ +use crate::os::windows::prelude::*; + +use crate::ffi::OsString; +use crate::fmt; +use crate::io::{self, Error, IoSlice, IoSliceMut, SeekFrom}; +use crate::mem; +use crate::path::{Path, PathBuf}; +use crate::ptr; +use crate::slice; +use crate::sync::Arc; +use crate::sys::handle::Handle; +use crate::sys::time::SystemTime; +use crate::sys::{c, cvt}; +use crate::sys_common::FromInner; + +use super::to_u16s; + +pub struct File { + handle: Handle, +} + +#[derive(Clone)] +pub struct FileAttr { + attributes: c::DWORD, + creation_time: c::FILETIME, + last_access_time: c::FILETIME, + last_write_time: c::FILETIME, + file_size: u64, + reparse_tag: c::DWORD, + volume_serial_number: Option<u32>, + number_of_links: Option<u32>, + file_index: Option<u64>, +} + +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub struct FileType { + attributes: c::DWORD, + reparse_tag: c::DWORD, +} + +pub struct ReadDir { + handle: FindNextFileHandle, + root: Arc<PathBuf>, + first: Option<c::WIN32_FIND_DATAW>, +} + +struct FindNextFileHandle(c::HANDLE); + +unsafe impl Send for FindNextFileHandle {} +unsafe impl Sync for FindNextFileHandle {} + +pub struct DirEntry { + root: Arc<PathBuf>, + data: c::WIN32_FIND_DATAW, +} + +#[derive(Clone, Debug)] +pub struct OpenOptions { + // generic + read: bool, + write: bool, + append: bool, + truncate: bool, + create: bool, + create_new: bool, + // system-specific + custom_flags: u32, + access_mode: Option<c::DWORD>, + attributes: c::DWORD, + share_mode: c::DWORD, + security_qos_flags: c::DWORD, + security_attributes: usize, // FIXME: should be a reference +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct FilePermissions { + attrs: c::DWORD, +} + +#[derive(Debug)] +pub struct DirBuilder; + +impl fmt::Debug for ReadDir { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // This will only be called from std::fs::ReadDir, which will add a "ReadDir()" frame. + // Thus the result will be e g 'ReadDir("C:\")' + fmt::Debug::fmt(&*self.root, f) + } +} + +impl Iterator for ReadDir { + type Item = io::Result<DirEntry>; + fn next(&mut self) -> Option<io::Result<DirEntry>> { + if let Some(first) = self.first.take() { + if let Some(e) = DirEntry::new(&self.root, &first) { + return Some(Ok(e)); + } + } + unsafe { + let mut wfd = mem::zeroed(); + loop { + if c::FindNextFileW(self.handle.0, &mut wfd) == 0 { + if c::GetLastError() == c::ERROR_NO_MORE_FILES { + return None; + } else { + return Some(Err(Error::last_os_error())); + } + } + if let Some(e) = DirEntry::new(&self.root, &wfd) { + return Some(Ok(e)); + } + } + } + } +} + +impl Drop for FindNextFileHandle { + fn drop(&mut self) { + let r = unsafe { c::FindClose(self.0) }; + debug_assert!(r != 0); + } +} + +impl DirEntry { + fn new(root: &Arc<PathBuf>, wfd: &c::WIN32_FIND_DATAW) -> Option<DirEntry> { + match &wfd.cFileName[0..3] { + // check for '.' and '..' + &[46, 0, ..] | &[46, 46, 0, ..] => return None, + _ => {} + } + + Some(DirEntry { root: root.clone(), data: *wfd }) + } + + pub fn path(&self) -> PathBuf { + self.root.join(&self.file_name()) + } + + pub fn file_name(&self) -> OsString { + let filename = super::truncate_utf16_at_nul(&self.data.cFileName); + OsString::from_wide(filename) + } + + pub fn file_type(&self) -> io::Result<FileType> { + Ok(FileType::new( + self.data.dwFileAttributes, + /* reparse_tag = */ self.data.dwReserved0, + )) + } + + pub fn metadata(&self) -> io::Result<FileAttr> { + Ok(FileAttr { + attributes: self.data.dwFileAttributes, + creation_time: self.data.ftCreationTime, + last_access_time: self.data.ftLastAccessTime, + last_write_time: self.data.ftLastWriteTime, + file_size: ((self.data.nFileSizeHigh as u64) << 32) | (self.data.nFileSizeLow as u64), + reparse_tag: if self.data.dwFileAttributes & c::FILE_ATTRIBUTE_REPARSE_POINT != 0 { + // reserved unless this is a reparse point + self.data.dwReserved0 + } else { + 0 + }, + volume_serial_number: None, + number_of_links: None, + file_index: None, + }) + } +} + +impl OpenOptions { + pub fn new() -> OpenOptions { + OpenOptions { + // generic + read: false, + write: false, + append: false, + truncate: false, + create: false, + create_new: false, + // system-specific + custom_flags: 0, + access_mode: None, + share_mode: c::FILE_SHARE_READ | c::FILE_SHARE_WRITE | c::FILE_SHARE_DELETE, + attributes: 0, + security_qos_flags: 0, + security_attributes: 0, + } + } + + pub fn read(&mut self, read: bool) { + self.read = read; + } + pub fn write(&mut self, write: bool) { + self.write = write; + } + pub fn append(&mut self, append: bool) { + self.append = append; + } + pub fn truncate(&mut self, truncate: bool) { + self.truncate = truncate; + } + pub fn create(&mut self, create: bool) { + self.create = create; + } + pub fn create_new(&mut self, create_new: bool) { + self.create_new = create_new; + } + + pub fn custom_flags(&mut self, flags: u32) { + self.custom_flags = flags; + } + pub fn access_mode(&mut self, access_mode: u32) { + self.access_mode = Some(access_mode); + } + pub fn share_mode(&mut self, share_mode: u32) { + self.share_mode = share_mode; + } + pub fn attributes(&mut self, attrs: u32) { + self.attributes = attrs; + } + pub fn security_qos_flags(&mut self, flags: u32) { + // We have to set `SECURITY_SQOS_PRESENT` here, because one of the valid flags we can + // receive is `SECURITY_ANONYMOUS = 0x0`, which we can't check for later on. + self.security_qos_flags = flags | c::SECURITY_SQOS_PRESENT; + } + pub fn security_attributes(&mut self, attrs: c::LPSECURITY_ATTRIBUTES) { + self.security_attributes = attrs as usize; + } + + fn get_access_mode(&self) -> io::Result<c::DWORD> { + const ERROR_INVALID_PARAMETER: i32 = 87; + + match (self.read, self.write, self.append, self.access_mode) { + (.., Some(mode)) => Ok(mode), + (true, false, false, None) => Ok(c::GENERIC_READ), + (false, true, false, None) => Ok(c::GENERIC_WRITE), + (true, true, false, None) => Ok(c::GENERIC_READ | c::GENERIC_WRITE), + (false, _, true, None) => Ok(c::FILE_GENERIC_WRITE & !c::FILE_WRITE_DATA), + (true, _, true, None) => { + Ok(c::GENERIC_READ | (c::FILE_GENERIC_WRITE & !c::FILE_WRITE_DATA)) + } + (false, false, false, None) => Err(Error::from_raw_os_error(ERROR_INVALID_PARAMETER)), + } + } + + fn get_creation_mode(&self) -> io::Result<c::DWORD> { + const ERROR_INVALID_PARAMETER: i32 = 87; + + match (self.write, self.append) { + (true, false) => {} + (false, false) => { + if self.truncate || self.create || self.create_new { + return Err(Error::from_raw_os_error(ERROR_INVALID_PARAMETER)); + } + } + (_, true) => { + if self.truncate && !self.create_new { + return Err(Error::from_raw_os_error(ERROR_INVALID_PARAMETER)); + } + } + } + + Ok(match (self.create, self.truncate, self.create_new) { + (false, false, false) => c::OPEN_EXISTING, + (true, false, false) => c::OPEN_ALWAYS, + (false, true, false) => c::TRUNCATE_EXISTING, + (true, true, false) => c::CREATE_ALWAYS, + (_, _, true) => c::CREATE_NEW, + }) + } + + fn get_flags_and_attributes(&self) -> c::DWORD { + self.custom_flags + | self.attributes + | self.security_qos_flags + | if self.create_new { c::FILE_FLAG_OPEN_REPARSE_POINT } else { 0 } + } +} + +impl File { + pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> { + let path = to_u16s(path)?; + let handle = unsafe { + c::CreateFileW( + path.as_ptr(), + opts.get_access_mode()?, + opts.share_mode, + opts.security_attributes as *mut _, + opts.get_creation_mode()?, + opts.get_flags_and_attributes(), + ptr::null_mut(), + ) + }; + if handle == c::INVALID_HANDLE_VALUE { + Err(Error::last_os_error()) + } else { + Ok(File { handle: Handle::new(handle) }) + } + } + + pub fn fsync(&self) -> io::Result<()> { + cvt(unsafe { c::FlushFileBuffers(self.handle.raw()) })?; + Ok(()) + } + + pub fn datasync(&self) -> io::Result<()> { + self.fsync() + } + + pub fn truncate(&self, size: u64) -> io::Result<()> { + let mut info = c::FILE_END_OF_FILE_INFO { EndOfFile: size as c::LARGE_INTEGER }; + let size = mem::size_of_val(&info); + cvt(unsafe { + c::SetFileInformationByHandle( + self.handle.raw(), + c::FileEndOfFileInfo, + &mut info as *mut _ as *mut _, + size as c::DWORD, + ) + })?; + Ok(()) + } + + #[cfg(not(target_vendor = "uwp"))] + pub fn file_attr(&self) -> io::Result<FileAttr> { + unsafe { + let mut info: c::BY_HANDLE_FILE_INFORMATION = mem::zeroed(); + cvt(c::GetFileInformationByHandle(self.handle.raw(), &mut info))?; + let mut reparse_tag = 0; + if info.dwFileAttributes & c::FILE_ATTRIBUTE_REPARSE_POINT != 0 { + let mut b = [0; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE]; + if let Ok((_, buf)) = self.reparse_point(&mut b) { + reparse_tag = buf.ReparseTag; + } + } + Ok(FileAttr { + attributes: info.dwFileAttributes, + creation_time: info.ftCreationTime, + last_access_time: info.ftLastAccessTime, + last_write_time: info.ftLastWriteTime, + file_size: (info.nFileSizeLow as u64) | ((info.nFileSizeHigh as u64) << 32), + reparse_tag, + volume_serial_number: Some(info.dwVolumeSerialNumber), + number_of_links: Some(info.nNumberOfLinks), + file_index: Some( + (info.nFileIndexLow as u64) | ((info.nFileIndexHigh as u64) << 32), + ), + }) + } + } + + #[cfg(target_vendor = "uwp")] + pub fn file_attr(&self) -> io::Result<FileAttr> { + unsafe { + let mut info: c::FILE_BASIC_INFO = mem::zeroed(); + let size = mem::size_of_val(&info); + cvt(c::GetFileInformationByHandleEx( + self.handle.raw(), + c::FileBasicInfo, + &mut info as *mut _ as *mut libc::c_void, + size as c::DWORD, + ))?; + let mut attr = FileAttr { + attributes: info.FileAttributes, + creation_time: c::FILETIME { + dwLowDateTime: info.CreationTime as c::DWORD, + dwHighDateTime: (info.CreationTime >> 32) as c::DWORD, + }, + last_access_time: c::FILETIME { + dwLowDateTime: info.LastAccessTime as c::DWORD, + dwHighDateTime: (info.LastAccessTime >> 32) as c::DWORD, + }, + last_write_time: c::FILETIME { + dwLowDateTime: info.LastWriteTime as c::DWORD, + dwHighDateTime: (info.LastWriteTime >> 32) as c::DWORD, + }, + file_size: 0, + reparse_tag: 0, + volume_serial_number: None, + number_of_links: None, + file_index: None, + }; + let mut info: c::FILE_STANDARD_INFO = mem::zeroed(); + let size = mem::size_of_val(&info); + cvt(c::GetFileInformationByHandleEx( + self.handle.raw(), + c::FileStandardInfo, + &mut info as *mut _ as *mut libc::c_void, + size as c::DWORD, + ))?; + attr.file_size = info.AllocationSize as u64; + attr.number_of_links = Some(info.NumberOfLinks); + if attr.file_type().is_reparse_point() { + let mut b = [0; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE]; + if let Ok((_, buf)) = self.reparse_point(&mut b) { + attr.reparse_tag = buf.ReparseTag; + } + } + Ok(attr) + } + } + + pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> { + self.handle.read(buf) + } + + pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + self.handle.read_vectored(bufs) + } + + #[inline] + pub fn is_read_vectored(&self) -> bool { + self.handle.is_read_vectored() + } + + pub fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<usize> { + self.handle.read_at(buf, offset) + } + + pub fn write(&self, buf: &[u8]) -> io::Result<usize> { + self.handle.write(buf) + } + + pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + self.handle.write_vectored(bufs) + } + + #[inline] + pub fn is_write_vectored(&self) -> bool { + self.handle.is_write_vectored() + } + + pub fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> { + self.handle.write_at(buf, offset) + } + + pub fn flush(&self) -> io::Result<()> { + Ok(()) + } + + pub fn seek(&self, pos: SeekFrom) -> io::Result<u64> { + let (whence, pos) = match pos { + // Casting to `i64` is fine, `SetFilePointerEx` reinterprets this + // integer as `u64`. + SeekFrom::Start(n) => (c::FILE_BEGIN, n as i64), + SeekFrom::End(n) => (c::FILE_END, n), + SeekFrom::Current(n) => (c::FILE_CURRENT, n), + }; + let pos = pos as c::LARGE_INTEGER; + let mut newpos = 0; + cvt(unsafe { c::SetFilePointerEx(self.handle.raw(), pos, &mut newpos, whence) })?; + Ok(newpos as u64) + } + + pub fn duplicate(&self) -> io::Result<File> { + Ok(File { handle: self.handle.duplicate(0, false, c::DUPLICATE_SAME_ACCESS)? }) + } + + pub fn handle(&self) -> &Handle { + &self.handle + } + + pub fn into_handle(self) -> Handle { + self.handle + } + + fn reparse_point<'a>( + &self, + space: &'a mut [u8; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE], + ) -> io::Result<(c::DWORD, &'a c::REPARSE_DATA_BUFFER)> { + unsafe { + let mut bytes = 0; + cvt({ + c::DeviceIoControl( + self.handle.raw(), + c::FSCTL_GET_REPARSE_POINT, + ptr::null_mut(), + 0, + space.as_mut_ptr() as *mut _, + space.len() as c::DWORD, + &mut bytes, + ptr::null_mut(), + ) + })?; + Ok((bytes, &*(space.as_ptr() as *const c::REPARSE_DATA_BUFFER))) + } + } + + fn readlink(&self) -> io::Result<PathBuf> { + let mut space = [0u8; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE]; + let (_bytes, buf) = self.reparse_point(&mut space)?; + unsafe { + let (path_buffer, subst_off, subst_len, relative) = match buf.ReparseTag { + c::IO_REPARSE_TAG_SYMLINK => { + let info: *const c::SYMBOLIC_LINK_REPARSE_BUFFER = + &buf.rest as *const _ as *const _; + ( + &(*info).PathBuffer as *const _ as *const u16, + (*info).SubstituteNameOffset / 2, + (*info).SubstituteNameLength / 2, + (*info).Flags & c::SYMLINK_FLAG_RELATIVE != 0, + ) + } + c::IO_REPARSE_TAG_MOUNT_POINT => { + let info: *const c::MOUNT_POINT_REPARSE_BUFFER = + &buf.rest as *const _ as *const _; + ( + &(*info).PathBuffer as *const _ as *const u16, + (*info).SubstituteNameOffset / 2, + (*info).SubstituteNameLength / 2, + false, + ) + } + _ => { + return Err(io::Error::new( + io::ErrorKind::Other, + "Unsupported reparse point type", + )); + } + }; + let subst_ptr = path_buffer.offset(subst_off as isize); + let mut subst = slice::from_raw_parts(subst_ptr, subst_len as usize); + // Absolute paths start with an NT internal namespace prefix `\??\` + // We should not let it leak through. + if !relative && subst.starts_with(&[92u16, 63u16, 63u16, 92u16]) { + subst = &subst[4..]; + } + Ok(PathBuf::from(OsString::from_wide(subst))) + } + } + + pub fn set_permissions(&self, perm: FilePermissions) -> io::Result<()> { + let mut info = c::FILE_BASIC_INFO { + CreationTime: 0, + LastAccessTime: 0, + LastWriteTime: 0, + ChangeTime: 0, + FileAttributes: perm.attrs, + }; + let size = mem::size_of_val(&info); + cvt(unsafe { + c::SetFileInformationByHandle( + self.handle.raw(), + c::FileBasicInfo, + &mut info as *mut _ as *mut _, + size as c::DWORD, + ) + })?; + Ok(()) + } +} + +impl FromInner<c::HANDLE> for File { + fn from_inner(handle: c::HANDLE) -> File { + File { handle: Handle::new(handle) } + } +} + +impl fmt::Debug for File { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // FIXME(#24570): add more info here (e.g., mode) + let mut b = f.debug_struct("File"); + b.field("handle", &self.handle.raw()); + if let Ok(path) = get_path(&self) { + b.field("path", &path); + } + b.finish() + } +} + +impl FileAttr { + pub fn size(&self) -> u64 { + self.file_size + } + + pub fn perm(&self) -> FilePermissions { + FilePermissions { attrs: self.attributes } + } + + pub fn attrs(&self) -> u32 { + self.attributes + } + + pub fn file_type(&self) -> FileType { + FileType::new(self.attributes, self.reparse_tag) + } + + pub fn modified(&self) -> io::Result<SystemTime> { + Ok(SystemTime::from(self.last_write_time)) + } + + pub fn accessed(&self) -> io::Result<SystemTime> { + Ok(SystemTime::from(self.last_access_time)) + } + + pub fn created(&self) -> io::Result<SystemTime> { + Ok(SystemTime::from(self.creation_time)) + } + + pub fn modified_u64(&self) -> u64 { + to_u64(&self.last_write_time) + } + + pub fn accessed_u64(&self) -> u64 { + to_u64(&self.last_access_time) + } + + pub fn created_u64(&self) -> u64 { + to_u64(&self.creation_time) + } + + pub fn volume_serial_number(&self) -> Option<u32> { + self.volume_serial_number + } + + pub fn number_of_links(&self) -> Option<u32> { + self.number_of_links + } + + pub fn file_index(&self) -> Option<u64> { + self.file_index + } +} + +fn to_u64(ft: &c::FILETIME) -> u64 { + (ft.dwLowDateTime as u64) | ((ft.dwHighDateTime as u64) << 32) +} + +impl FilePermissions { + pub fn readonly(&self) -> bool { + self.attrs & c::FILE_ATTRIBUTE_READONLY != 0 + } + + pub fn set_readonly(&mut self, readonly: bool) { + if readonly { + self.attrs |= c::FILE_ATTRIBUTE_READONLY; + } else { + self.attrs &= !c::FILE_ATTRIBUTE_READONLY; + } + } +} + +impl FileType { + fn new(attrs: c::DWORD, reparse_tag: c::DWORD) -> FileType { + FileType { attributes: attrs, reparse_tag: reparse_tag } + } + pub fn is_dir(&self) -> bool { + !self.is_symlink() && self.is_directory() + } + pub fn is_file(&self) -> bool { + !self.is_symlink() && !self.is_directory() + } + pub fn is_symlink(&self) -> bool { + self.is_reparse_point() && self.is_reparse_tag_name_surrogate() + } + pub fn is_symlink_dir(&self) -> bool { + self.is_symlink() && self.is_directory() + } + pub fn is_symlink_file(&self) -> bool { + self.is_symlink() && !self.is_directory() + } + fn is_directory(&self) -> bool { + self.attributes & c::FILE_ATTRIBUTE_DIRECTORY != 0 + } + fn is_reparse_point(&self) -> bool { + self.attributes & c::FILE_ATTRIBUTE_REPARSE_POINT != 0 + } + fn is_reparse_tag_name_surrogate(&self) -> bool { + self.reparse_tag & 0x20000000 != 0 + } +} + +impl DirBuilder { + pub fn new() -> DirBuilder { + DirBuilder + } + + pub fn mkdir(&self, p: &Path) -> io::Result<()> { + let p = to_u16s(p)?; + cvt(unsafe { c::CreateDirectoryW(p.as_ptr(), ptr::null_mut()) })?; + Ok(()) + } +} + +pub fn readdir(p: &Path) -> io::Result<ReadDir> { + let root = p.to_path_buf(); + let star = p.join("*"); + let path = to_u16s(&star)?; + + unsafe { + let mut wfd = mem::zeroed(); + let find_handle = c::FindFirstFileW(path.as_ptr(), &mut wfd); + if find_handle != c::INVALID_HANDLE_VALUE { + Ok(ReadDir { + handle: FindNextFileHandle(find_handle), + root: Arc::new(root), + first: Some(wfd), + }) + } else { + Err(Error::last_os_error()) + } + } +} + +pub fn unlink(p: &Path) -> io::Result<()> { + let p_u16s = to_u16s(p)?; + cvt(unsafe { c::DeleteFileW(p_u16s.as_ptr()) })?; + Ok(()) +} + +pub fn rename(old: &Path, new: &Path) -> io::Result<()> { + let old = to_u16s(old)?; + let new = to_u16s(new)?; + cvt(unsafe { c::MoveFileExW(old.as_ptr(), new.as_ptr(), c::MOVEFILE_REPLACE_EXISTING) })?; + Ok(()) +} + +pub fn rmdir(p: &Path) -> io::Result<()> { + let p = to_u16s(p)?; + cvt(unsafe { c::RemoveDirectoryW(p.as_ptr()) })?; + Ok(()) +} + +pub fn remove_dir_all(path: &Path) -> io::Result<()> { + let filetype = lstat(path)?.file_type(); + if filetype.is_symlink() { + // On Windows symlinks to files and directories are removed differently. + // rmdir only deletes dir symlinks and junctions, not file symlinks. + rmdir(path) + } else { + remove_dir_all_recursive(path) + } +} + +fn remove_dir_all_recursive(path: &Path) -> io::Result<()> { + for child in readdir(path)? { + let child = child?; + let child_type = child.file_type()?; + if child_type.is_dir() { + remove_dir_all_recursive(&child.path())?; + } else if child_type.is_symlink_dir() { + rmdir(&child.path())?; + } else { + unlink(&child.path())?; + } + } + rmdir(path) +} + +pub fn readlink(path: &Path) -> io::Result<PathBuf> { + // Open the link with no access mode, instead of generic read. + // By default FILE_LIST_DIRECTORY is denied for the junction "C:\Documents and Settings", so + // this is needed for a common case. + let mut opts = OpenOptions::new(); + opts.access_mode(0); + opts.custom_flags(c::FILE_FLAG_OPEN_REPARSE_POINT | c::FILE_FLAG_BACKUP_SEMANTICS); + let file = File::open(&path, &opts)?; + file.readlink() +} + +pub fn symlink(src: &Path, dst: &Path) -> io::Result<()> { + symlink_inner(src, dst, false) +} + +pub fn symlink_inner(src: &Path, dst: &Path, dir: bool) -> io::Result<()> { + let src = to_u16s(src)?; + let dst = to_u16s(dst)?; + let flags = if dir { c::SYMBOLIC_LINK_FLAG_DIRECTORY } else { 0 }; + // Formerly, symlink creation required the SeCreateSymbolicLink privilege. For the Windows 10 + // Creators Update, Microsoft loosened this to allow unprivileged symlink creation if the + // computer is in Developer Mode, but SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE must be + // added to dwFlags to opt into this behaviour. + let result = cvt(unsafe { + c::CreateSymbolicLinkW( + dst.as_ptr(), + src.as_ptr(), + flags | c::SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE, + ) as c::BOOL + }); + if let Err(err) = result { + if err.raw_os_error() == Some(c::ERROR_INVALID_PARAMETER as i32) { + // Older Windows objects to SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE, + // so if we encounter ERROR_INVALID_PARAMETER, retry without that flag. + cvt(unsafe { c::CreateSymbolicLinkW(dst.as_ptr(), src.as_ptr(), flags) as c::BOOL })?; + } else { + return Err(err); + } + } + Ok(()) +} + +#[cfg(not(target_vendor = "uwp"))] +pub fn link(src: &Path, dst: &Path) -> io::Result<()> { + let src = to_u16s(src)?; + let dst = to_u16s(dst)?; + cvt(unsafe { c::CreateHardLinkW(dst.as_ptr(), src.as_ptr(), ptr::null_mut()) })?; + Ok(()) +} + +#[cfg(target_vendor = "uwp")] +pub fn link(_src: &Path, _dst: &Path) -> io::Result<()> { + return Err(io::Error::new(io::ErrorKind::Other, "hard link are not supported on UWP")); +} + +pub fn stat(path: &Path) -> io::Result<FileAttr> { + let mut opts = OpenOptions::new(); + // No read or write permissions are necessary + opts.access_mode(0); + // This flag is so we can open directories too + opts.custom_flags(c::FILE_FLAG_BACKUP_SEMANTICS); + let file = File::open(path, &opts)?; + file.file_attr() +} + +pub fn lstat(path: &Path) -> io::Result<FileAttr> { + let mut opts = OpenOptions::new(); + // No read or write permissions are necessary + opts.access_mode(0); + opts.custom_flags(c::FILE_FLAG_BACKUP_SEMANTICS | c::FILE_FLAG_OPEN_REPARSE_POINT); + let file = File::open(path, &opts)?; + file.file_attr() +} + +pub fn set_perm(p: &Path, perm: FilePermissions) -> io::Result<()> { + let p = to_u16s(p)?; + unsafe { + cvt(c::SetFileAttributesW(p.as_ptr(), perm.attrs))?; + Ok(()) + } +} + +fn get_path(f: &File) -> io::Result<PathBuf> { + super::fill_utf16_buf( + |buf, sz| unsafe { + c::GetFinalPathNameByHandleW(f.handle.raw(), buf, sz, c::VOLUME_NAME_DOS) + }, + |buf| PathBuf::from(OsString::from_wide(buf)), + ) +} + +pub fn canonicalize(p: &Path) -> io::Result<PathBuf> { + let mut opts = OpenOptions::new(); + // No read or write permissions are necessary + opts.access_mode(0); + // This flag is so we can open directories too + opts.custom_flags(c::FILE_FLAG_BACKUP_SEMANTICS); + let f = File::open(p, &opts)?; + get_path(&f) +} + +pub fn copy(from: &Path, to: &Path) -> io::Result<u64> { + unsafe extern "system" fn callback( + _TotalFileSize: c::LARGE_INTEGER, + _TotalBytesTransferred: c::LARGE_INTEGER, + _StreamSize: c::LARGE_INTEGER, + StreamBytesTransferred: c::LARGE_INTEGER, + dwStreamNumber: c::DWORD, + _dwCallbackReason: c::DWORD, + _hSourceFile: c::HANDLE, + _hDestinationFile: c::HANDLE, + lpData: c::LPVOID, + ) -> c::DWORD { + if dwStreamNumber == 1 { + *(lpData as *mut i64) = StreamBytesTransferred; + } + c::PROGRESS_CONTINUE + } + let pfrom = to_u16s(from)?; + let pto = to_u16s(to)?; + let mut size = 0i64; + cvt(unsafe { + c::CopyFileExW( + pfrom.as_ptr(), + pto.as_ptr(), + Some(callback), + &mut size as *mut _ as *mut _, + ptr::null_mut(), + 0, + ) + })?; + Ok(size as u64) +} + +#[allow(dead_code)] +pub fn symlink_junction<P: AsRef<Path>, Q: AsRef<Path>>(src: P, dst: Q) -> io::Result<()> { + symlink_junction_inner(src.as_ref(), dst.as_ref()) +} + +// Creating a directory junction on windows involves dealing with reparse +// points and the DeviceIoControl function, and this code is a skeleton of +// what can be found here: +// +// http://www.flexhex.com/docs/articles/hard-links.phtml +#[allow(dead_code)] +fn symlink_junction_inner(target: &Path, junction: &Path) -> io::Result<()> { + let d = DirBuilder::new(); + d.mkdir(&junction)?; + + let mut opts = OpenOptions::new(); + opts.write(true); + opts.custom_flags(c::FILE_FLAG_OPEN_REPARSE_POINT | c::FILE_FLAG_BACKUP_SEMANTICS); + let f = File::open(junction, &opts)?; + let h = f.handle().raw(); + + unsafe { + let mut data = [0u8; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE]; + let db = data.as_mut_ptr() as *mut c::REPARSE_MOUNTPOINT_DATA_BUFFER; + let buf = &mut (*db).ReparseTarget as *mut c::WCHAR; + let mut i = 0; + // FIXME: this conversion is very hacky + let v = br"\??\"; + let v = v.iter().map(|x| *x as u16); + for c in v.chain(target.as_os_str().encode_wide()) { + *buf.offset(i) = c; + i += 1; + } + *buf.offset(i) = 0; + i += 1; + (*db).ReparseTag = c::IO_REPARSE_TAG_MOUNT_POINT; + (*db).ReparseTargetMaximumLength = (i * 2) as c::WORD; + (*db).ReparseTargetLength = ((i - 1) * 2) as c::WORD; + (*db).ReparseDataLength = (*db).ReparseTargetLength as c::DWORD + 12; + + let mut ret = 0; + cvt(c::DeviceIoControl( + h as *mut _, + c::FSCTL_SET_REPARSE_POINT, + data.as_ptr() as *mut _, + (*db).ReparseDataLength + 8, + ptr::null_mut(), + 0, + &mut ret, + ptr::null_mut(), + )) + .map(drop) + } +} diff --git a/library/std/src/sys/windows/handle.rs b/library/std/src/sys/windows/handle.rs new file mode 100644 index 00000000000..0d4baa3b340 --- /dev/null +++ b/library/std/src/sys/windows/handle.rs @@ -0,0 +1,233 @@ +#![unstable(issue = "none", feature = "windows_handle")] + +use crate::cmp; +use crate::io::{self, ErrorKind, IoSlice, IoSliceMut, Read}; +use crate::mem; +use crate::ops::Deref; +use crate::ptr; +use crate::sys::c; +use crate::sys::cvt; + +/// An owned container for `HANDLE` object, closing them on Drop. +/// +/// All methods are inherited through a `Deref` impl to `RawHandle` +pub struct Handle(RawHandle); + +/// A wrapper type for `HANDLE` objects to give them proper Send/Sync inference +/// as well as Rust-y methods. +/// +/// This does **not** drop the handle when it goes out of scope, use `Handle` +/// instead for that. +#[derive(Copy, Clone)] +pub struct RawHandle(c::HANDLE); + +unsafe impl Send for RawHandle {} +unsafe impl Sync for RawHandle {} + +impl Handle { + pub fn new(handle: c::HANDLE) -> Handle { + Handle(RawHandle::new(handle)) + } + + pub fn new_event(manual: bool, init: bool) -> io::Result<Handle> { + unsafe { + let event = + c::CreateEventW(ptr::null_mut(), manual as c::BOOL, init as c::BOOL, ptr::null()); + if event.is_null() { Err(io::Error::last_os_error()) } else { Ok(Handle::new(event)) } + } + } + + pub fn into_raw(self) -> c::HANDLE { + let ret = self.raw(); + mem::forget(self); + ret + } +} + +impl Deref for Handle { + type Target = RawHandle; + fn deref(&self) -> &RawHandle { + &self.0 + } +} + +impl Drop for Handle { + fn drop(&mut self) { + unsafe { + let _ = c::CloseHandle(self.raw()); + } + } +} + +impl RawHandle { + pub fn new(handle: c::HANDLE) -> RawHandle { + RawHandle(handle) + } + + pub fn raw(&self) -> c::HANDLE { + self.0 + } + + pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> { + let mut read = 0; + let len = cmp::min(buf.len(), <c::DWORD>::MAX as usize) as c::DWORD; + let res = cvt(unsafe { + c::ReadFile(self.0, buf.as_mut_ptr() as c::LPVOID, len, &mut read, ptr::null_mut()) + }); + + match res { + Ok(_) => Ok(read as usize), + + // The special treatment of BrokenPipe is to deal with Windows + // pipe semantics, which yields this error when *reading* from + // a pipe after the other end has closed; we interpret that as + // EOF on the pipe. + Err(ref e) if e.kind() == ErrorKind::BrokenPipe => Ok(0), + + Err(e) => Err(e), + } + } + + pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + crate::io::default_read_vectored(|buf| self.read(buf), bufs) + } + + #[inline] + pub fn is_read_vectored(&self) -> bool { + false + } + + pub fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<usize> { + let mut read = 0; + let len = cmp::min(buf.len(), <c::DWORD>::MAX as usize) as c::DWORD; + let res = unsafe { + let mut overlapped: c::OVERLAPPED = mem::zeroed(); + overlapped.Offset = offset as u32; + overlapped.OffsetHigh = (offset >> 32) as u32; + cvt(c::ReadFile(self.0, buf.as_mut_ptr() as c::LPVOID, len, &mut read, &mut overlapped)) + }; + match res { + Ok(_) => Ok(read as usize), + Err(ref e) if e.raw_os_error() == Some(c::ERROR_HANDLE_EOF as i32) => Ok(0), + Err(e) => Err(e), + } + } + + pub unsafe fn read_overlapped( + &self, + buf: &mut [u8], + overlapped: *mut c::OVERLAPPED, + ) -> io::Result<Option<usize>> { + let len = cmp::min(buf.len(), <c::DWORD>::MAX as usize) as c::DWORD; + let mut amt = 0; + let res = cvt(c::ReadFile(self.0, buf.as_ptr() as c::LPVOID, len, &mut amt, overlapped)); + match res { + Ok(_) => Ok(Some(amt as usize)), + Err(e) => { + if e.raw_os_error() == Some(c::ERROR_IO_PENDING as i32) { + Ok(None) + } else if e.raw_os_error() == Some(c::ERROR_BROKEN_PIPE as i32) { + Ok(Some(0)) + } else { + Err(e) + } + } + } + } + + pub fn overlapped_result( + &self, + overlapped: *mut c::OVERLAPPED, + wait: bool, + ) -> io::Result<usize> { + unsafe { + let mut bytes = 0; + let wait = if wait { c::TRUE } else { c::FALSE }; + let res = cvt(c::GetOverlappedResult(self.raw(), overlapped, &mut bytes, wait)); + match res { + Ok(_) => Ok(bytes as usize), + Err(e) => { + if e.raw_os_error() == Some(c::ERROR_HANDLE_EOF as i32) + || e.raw_os_error() == Some(c::ERROR_BROKEN_PIPE as i32) + { + Ok(0) + } else { + Err(e) + } + } + } + } + } + + pub fn cancel_io(&self) -> io::Result<()> { + unsafe { cvt(c::CancelIo(self.raw())).map(drop) } + } + + pub fn write(&self, buf: &[u8]) -> io::Result<usize> { + let mut amt = 0; + let len = cmp::min(buf.len(), <c::DWORD>::MAX as usize) as c::DWORD; + cvt(unsafe { + c::WriteFile(self.0, buf.as_ptr() as c::LPVOID, len, &mut amt, ptr::null_mut()) + })?; + Ok(amt as usize) + } + + pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + crate::io::default_write_vectored(|buf| self.write(buf), bufs) + } + + #[inline] + pub fn is_write_vectored(&self) -> bool { + false + } + + pub fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> { + let mut written = 0; + let len = cmp::min(buf.len(), <c::DWORD>::MAX as usize) as c::DWORD; + unsafe { + let mut overlapped: c::OVERLAPPED = mem::zeroed(); + overlapped.Offset = offset as u32; + overlapped.OffsetHigh = (offset >> 32) as u32; + cvt(c::WriteFile( + self.0, + buf.as_ptr() as c::LPVOID, + len, + &mut written, + &mut overlapped, + ))?; + } + Ok(written as usize) + } + + pub fn duplicate( + &self, + access: c::DWORD, + inherit: bool, + options: c::DWORD, + ) -> io::Result<Handle> { + let mut ret = 0 as c::HANDLE; + cvt(unsafe { + let cur_proc = c::GetCurrentProcess(); + c::DuplicateHandle( + cur_proc, + self.0, + cur_proc, + &mut ret, + access, + inherit as c::BOOL, + options, + ) + })?; + Ok(Handle::new(ret)) + } +} + +impl<'a> Read for &'a RawHandle { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + (**self).read(buf) + } + + fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + (**self).read_vectored(bufs) + } +} diff --git a/library/std/src/sys/windows/io.rs b/library/std/src/sys/windows/io.rs new file mode 100644 index 00000000000..fb06df1f80c --- /dev/null +++ b/library/std/src/sys/windows/io.rs @@ -0,0 +1,80 @@ +use crate::marker::PhantomData; +use crate::slice; +use crate::sys::c; + +#[derive(Copy, Clone)] +#[repr(transparent)] +pub struct IoSlice<'a> { + vec: c::WSABUF, + _p: PhantomData<&'a [u8]>, +} + +impl<'a> IoSlice<'a> { + #[inline] + pub fn new(buf: &'a [u8]) -> IoSlice<'a> { + assert!(buf.len() <= c::ULONG::MAX as usize); + IoSlice { + vec: c::WSABUF { + len: buf.len() as c::ULONG, + buf: buf.as_ptr() as *mut u8 as *mut c::CHAR, + }, + _p: PhantomData, + } + } + + #[inline] + pub fn advance(&mut self, n: usize) { + if (self.vec.len as usize) < n { + panic!("advancing IoSlice beyond its length"); + } + + unsafe { + self.vec.len -= n as c::ULONG; + self.vec.buf = self.vec.buf.add(n); + } + } + + #[inline] + pub fn as_slice(&self) -> &[u8] { + unsafe { slice::from_raw_parts(self.vec.buf as *mut u8, self.vec.len as usize) } + } +} + +#[repr(transparent)] +pub struct IoSliceMut<'a> { + vec: c::WSABUF, + _p: PhantomData<&'a mut [u8]>, +} + +impl<'a> IoSliceMut<'a> { + #[inline] + pub fn new(buf: &'a mut [u8]) -> IoSliceMut<'a> { + assert!(buf.len() <= c::ULONG::MAX as usize); + IoSliceMut { + vec: c::WSABUF { len: buf.len() as c::ULONG, buf: buf.as_mut_ptr() as *mut c::CHAR }, + _p: PhantomData, + } + } + + #[inline] + pub fn advance(&mut self, n: usize) { + if (self.vec.len as usize) < n { + panic!("advancing IoSliceMut beyond its length"); + } + + unsafe { + self.vec.len -= n as c::ULONG; + self.vec.buf = self.vec.buf.add(n); + } + } + + #[inline] + pub fn as_slice(&self) -> &[u8] { + unsafe { slice::from_raw_parts(self.vec.buf as *mut u8, self.vec.len as usize) } + } + + #[inline] + pub fn as_mut_slice(&mut self) -> &mut [u8] { + unsafe { slice::from_raw_parts_mut(self.vec.buf as *mut u8, self.vec.len as usize) } + } +} diff --git a/library/std/src/sys/windows/memchr.rs b/library/std/src/sys/windows/memchr.rs new file mode 100644 index 00000000000..b9e5bcc1b4b --- /dev/null +++ b/library/std/src/sys/windows/memchr.rs @@ -0,0 +1,5 @@ +// Original implementation taken from rust-memchr. +// Copyright 2015 Andrew Gallant, bluss and Nicolas Koch + +// Fallback memchr is fastest on Windows. +pub use core::slice::memchr::{memchr, memrchr}; diff --git a/library/std/src/sys/windows/mod.rs b/library/std/src/sys/windows/mod.rs new file mode 100644 index 00000000000..9a52371280e --- /dev/null +++ b/library/std/src/sys/windows/mod.rs @@ -0,0 +1,334 @@ +#![allow(missing_docs, nonstandard_style)] + +use crate::ffi::{OsStr, OsString}; +use crate::io::ErrorKind; +use crate::os::windows::ffi::{OsStrExt, OsStringExt}; +use crate::path::PathBuf; +use crate::ptr; +use crate::time::Duration; + +pub use self::rand::hashmap_random_keys; +pub use libc::strlen; + +#[macro_use] +pub mod compat; + +pub mod alloc; +pub mod args; +pub mod c; +pub mod cmath; +pub mod condvar; +pub mod env; +pub mod ext; +pub mod fs; +pub mod handle; +pub mod io; +pub mod memchr; +pub mod mutex; +pub mod net; +pub mod os; +pub mod os_str; +pub mod path; +pub mod pipe; +pub mod process; +pub mod rand; +pub mod rwlock; +pub mod thread; +pub mod thread_local_dtor; +pub mod thread_local_key; +pub mod time; +cfg_if::cfg_if! { + if #[cfg(not(target_vendor = "uwp"))] { + pub mod stdio; + pub mod stack_overflow; + } else { + pub mod stdio_uwp; + pub mod stack_overflow_uwp; + pub use self::stdio_uwp as stdio; + pub use self::stack_overflow_uwp as stack_overflow; + } +} + +#[cfg(not(test))] +pub fn init() {} + +pub fn decode_error_kind(errno: i32) -> ErrorKind { + match errno as c::DWORD { + c::ERROR_ACCESS_DENIED => return ErrorKind::PermissionDenied, + c::ERROR_ALREADY_EXISTS => return ErrorKind::AlreadyExists, + c::ERROR_FILE_EXISTS => return ErrorKind::AlreadyExists, + c::ERROR_BROKEN_PIPE => return ErrorKind::BrokenPipe, + c::ERROR_FILE_NOT_FOUND => return ErrorKind::NotFound, + c::ERROR_PATH_NOT_FOUND => return ErrorKind::NotFound, + c::ERROR_NO_DATA => return ErrorKind::BrokenPipe, + c::ERROR_INVALID_PARAMETER => return ErrorKind::InvalidInput, + c::ERROR_SEM_TIMEOUT + | c::WAIT_TIMEOUT + | c::ERROR_DRIVER_CANCEL_TIMEOUT + | c::ERROR_OPERATION_ABORTED + | c::ERROR_SERVICE_REQUEST_TIMEOUT + | c::ERROR_COUNTER_TIMEOUT + | c::ERROR_TIMEOUT + | c::ERROR_RESOURCE_CALL_TIMED_OUT + | c::ERROR_CTX_MODEM_RESPONSE_TIMEOUT + | c::ERROR_CTX_CLIENT_QUERY_TIMEOUT + | c::FRS_ERR_SYSVOL_POPULATE_TIMEOUT + | c::ERROR_DS_TIMELIMIT_EXCEEDED + | c::DNS_ERROR_RECORD_TIMED_OUT + | c::ERROR_IPSEC_IKE_TIMED_OUT + | c::ERROR_RUNLEVEL_SWITCH_TIMEOUT + | c::ERROR_RUNLEVEL_SWITCH_AGENT_TIMEOUT => return ErrorKind::TimedOut, + _ => {} + } + + match errno { + c::WSAEACCES => ErrorKind::PermissionDenied, + c::WSAEADDRINUSE => ErrorKind::AddrInUse, + c::WSAEADDRNOTAVAIL => ErrorKind::AddrNotAvailable, + c::WSAECONNABORTED => ErrorKind::ConnectionAborted, + c::WSAECONNREFUSED => ErrorKind::ConnectionRefused, + c::WSAECONNRESET => ErrorKind::ConnectionReset, + c::WSAEINVAL => ErrorKind::InvalidInput, + c::WSAENOTCONN => ErrorKind::NotConnected, + c::WSAEWOULDBLOCK => ErrorKind::WouldBlock, + c::WSAETIMEDOUT => ErrorKind::TimedOut, + + _ => ErrorKind::Other, + } +} + +pub fn unrolled_find_u16s(needle: u16, haystack: &[u16]) -> Option<usize> { + let ptr = haystack.as_ptr(); + let mut len = haystack.len(); + let mut start = &haystack[..]; + + // For performance reasons unfold the loop eight times. + while len >= 8 { + if start[0] == needle { + return Some((start.as_ptr() as usize - ptr as usize) / 2); + } + if start[1] == needle { + return Some((start[1..].as_ptr() as usize - ptr as usize) / 2); + } + if start[2] == needle { + return Some((start[2..].as_ptr() as usize - ptr as usize) / 2); + } + if start[3] == needle { + return Some((start[3..].as_ptr() as usize - ptr as usize) / 2); + } + if start[4] == needle { + return Some((start[4..].as_ptr() as usize - ptr as usize) / 2); + } + if start[5] == needle { + return Some((start[5..].as_ptr() as usize - ptr as usize) / 2); + } + if start[6] == needle { + return Some((start[6..].as_ptr() as usize - ptr as usize) / 2); + } + if start[7] == needle { + return Some((start[7..].as_ptr() as usize - ptr as usize) / 2); + } + + start = &start[8..]; + len -= 8; + } + + for (i, c) in start.iter().enumerate() { + if *c == needle { + return Some((start.as_ptr() as usize - ptr as usize) / 2 + i); + } + } + None +} + +pub fn to_u16s<S: AsRef<OsStr>>(s: S) -> crate::io::Result<Vec<u16>> { + fn inner(s: &OsStr) -> crate::io::Result<Vec<u16>> { + let mut maybe_result: Vec<u16> = s.encode_wide().collect(); + if unrolled_find_u16s(0, &maybe_result).is_some() { + return Err(crate::io::Error::new( + ErrorKind::InvalidInput, + "strings passed to WinAPI cannot contain NULs", + )); + } + maybe_result.push(0); + Ok(maybe_result) + } + inner(s.as_ref()) +} + +// Many Windows APIs follow a pattern of where we hand a buffer and then they +// will report back to us how large the buffer should be or how many bytes +// currently reside in the buffer. This function is an abstraction over these +// functions by making them easier to call. +// +// The first callback, `f1`, is yielded a (pointer, len) pair which can be +// passed to a syscall. The `ptr` is valid for `len` items (u16 in this case). +// The closure is expected to return what the syscall returns which will be +// interpreted by this function to determine if the syscall needs to be invoked +// again (with more buffer space). +// +// Once the syscall has completed (errors bail out early) the second closure is +// yielded the data which has been read from the syscall. The return value +// from this closure is then the return value of the function. +fn fill_utf16_buf<F1, F2, T>(mut f1: F1, f2: F2) -> crate::io::Result<T> +where + F1: FnMut(*mut u16, c::DWORD) -> c::DWORD, + F2: FnOnce(&[u16]) -> T, +{ + // Start off with a stack buf but then spill over to the heap if we end up + // needing more space. + let mut stack_buf = [0u16; 512]; + let mut heap_buf = Vec::new(); + unsafe { + let mut n = stack_buf.len(); + loop { + let buf = if n <= stack_buf.len() { + &mut stack_buf[..] + } else { + let extra = n - heap_buf.len(); + heap_buf.reserve(extra); + heap_buf.set_len(n); + &mut heap_buf[..] + }; + + // This function is typically called on windows API functions which + // will return the correct length of the string, but these functions + // also return the `0` on error. In some cases, however, the + // returned "correct length" may actually be 0! + // + // To handle this case we call `SetLastError` to reset it to 0 and + // then check it again if we get the "0 error value". If the "last + // error" is still 0 then we interpret it as a 0 length buffer and + // not an actual error. + c::SetLastError(0); + let k = match f1(buf.as_mut_ptr(), n as c::DWORD) { + 0 if c::GetLastError() == 0 => 0, + 0 => return Err(crate::io::Error::last_os_error()), + n => n, + } as usize; + if k == n && c::GetLastError() == c::ERROR_INSUFFICIENT_BUFFER { + n *= 2; + } else if k >= n { + n = k; + } else { + return Ok(f2(&buf[..k])); + } + } + } +} + +fn os2path(s: &[u16]) -> PathBuf { + PathBuf::from(OsString::from_wide(s)) +} + +#[allow(dead_code)] // Only used in backtrace::gnu::get_executable_filename() +fn wide_char_to_multi_byte( + code_page: u32, + flags: u32, + s: &[u16], + no_default_char: bool, +) -> crate::io::Result<Vec<i8>> { + unsafe { + let mut size = c::WideCharToMultiByte( + code_page, + flags, + s.as_ptr(), + s.len() as i32, + ptr::null_mut(), + 0, + ptr::null(), + ptr::null_mut(), + ); + if size == 0 { + return Err(crate::io::Error::last_os_error()); + } + + let mut buf = Vec::with_capacity(size as usize); + buf.set_len(size as usize); + + let mut used_default_char = c::FALSE; + size = c::WideCharToMultiByte( + code_page, + flags, + s.as_ptr(), + s.len() as i32, + buf.as_mut_ptr(), + buf.len() as i32, + ptr::null(), + if no_default_char { &mut used_default_char } else { ptr::null_mut() }, + ); + if size == 0 { + return Err(crate::io::Error::last_os_error()); + } + if no_default_char && used_default_char == c::TRUE { + return Err(crate::io::Error::new( + crate::io::ErrorKind::InvalidData, + "string cannot be converted to requested code page", + )); + } + + buf.set_len(size as usize); + + Ok(buf) + } +} + +pub fn truncate_utf16_at_nul(v: &[u16]) -> &[u16] { + match unrolled_find_u16s(0, v) { + // don't include the 0 + Some(i) => &v[..i], + None => v, + } +} + +pub trait IsZero { + fn is_zero(&self) -> bool; +} + +macro_rules! impl_is_zero { + ($($t:ident)*) => ($(impl IsZero for $t { + fn is_zero(&self) -> bool { + *self == 0 + } + })*) +} + +impl_is_zero! { i8 i16 i32 i64 isize u8 u16 u32 u64 usize } + +pub fn cvt<I: IsZero>(i: I) -> crate::io::Result<I> { + if i.is_zero() { Err(crate::io::Error::last_os_error()) } else { Ok(i) } +} + +pub fn dur2timeout(dur: Duration) -> c::DWORD { + // Note that a duration is a (u64, u32) (seconds, nanoseconds) pair, and the + // timeouts in windows APIs are typically u32 milliseconds. To translate, we + // have two pieces to take care of: + // + // * Nanosecond precision is rounded up + // * Greater than u32::MAX milliseconds (50 days) is rounded up to INFINITE + // (never time out). + dur.as_secs() + .checked_mul(1000) + .and_then(|ms| ms.checked_add((dur.subsec_nanos() as u64) / 1_000_000)) + .and_then(|ms| ms.checked_add(if dur.subsec_nanos() % 1_000_000 > 0 { 1 } else { 0 })) + .map(|ms| if ms > <c::DWORD>::MAX as u64 { c::INFINITE } else { ms as c::DWORD }) + .unwrap_or(c::INFINITE) +} + +// On Windows, use the processor-specific __fastfail mechanism. In Windows 8 +// and later, this will terminate the process immediately without running any +// in-process exception handlers. In earlier versions of Windows, this +// sequence of instructions will be treated as an access violation, +// terminating the process but without necessarily bypassing all exception +// handlers. +// +// https://docs.microsoft.com/en-us/cpp/intrinsics/fastfail +#[allow(unreachable_code)] +pub fn abort_internal() -> ! { + #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] + unsafe { + llvm_asm!("int $$0x29" :: "{ecx}"(7) ::: volatile); // 7 is FAST_FAIL_FATAL_APP_EXIT + crate::intrinsics::unreachable(); + } + crate::intrinsics::abort(); +} diff --git a/library/std/src/sys/windows/mutex.rs b/library/std/src/sys/windows/mutex.rs new file mode 100644 index 00000000000..63dfc640908 --- /dev/null +++ b/library/std/src/sys/windows/mutex.rs @@ -0,0 +1,184 @@ +//! System Mutexes +//! +//! The Windows implementation of mutexes is a little odd and it may not be +//! immediately obvious what's going on. The primary oddness is that SRWLock is +//! used instead of CriticalSection, and this is done because: +//! +//! 1. SRWLock is several times faster than CriticalSection according to +//! benchmarks performed on both Windows 8 and Windows 7. +//! +//! 2. CriticalSection allows recursive locking while SRWLock deadlocks. The +//! Unix implementation deadlocks so consistency is preferred. See #19962 for +//! more details. +//! +//! 3. While CriticalSection is fair and SRWLock is not, the current Rust policy +//! is that there are no guarantees of fairness. +//! +//! The downside of this approach, however, is that SRWLock is not available on +//! Windows XP, so we continue to have a fallback implementation where +//! CriticalSection is used and we keep track of who's holding the mutex to +//! detect recursive locks. + +use crate::cell::UnsafeCell; +use crate::mem::{self, MaybeUninit}; +use crate::sync::atomic::{AtomicUsize, Ordering}; +use crate::sys::c; +use crate::sys::compat; + +pub struct Mutex { + lock: AtomicUsize, + held: UnsafeCell<bool>, +} + +unsafe impl Send for Mutex {} +unsafe impl Sync for Mutex {} + +#[derive(Clone, Copy)] +enum Kind { + SRWLock = 1, + CriticalSection = 2, +} + +#[inline] +pub unsafe fn raw(m: &Mutex) -> c::PSRWLOCK { + debug_assert!(mem::size_of::<c::SRWLOCK>() <= mem::size_of_val(&m.lock)); + &m.lock as *const _ as *mut _ +} + +impl Mutex { + pub const fn new() -> Mutex { + Mutex { + // This works because SRWLOCK_INIT is 0 (wrapped in a struct), so we are also properly + // initializing an SRWLOCK here. + lock: AtomicUsize::new(0), + held: UnsafeCell::new(false), + } + } + #[inline] + pub unsafe fn init(&mut self) {} + pub unsafe fn lock(&self) { + match kind() { + Kind::SRWLock => c::AcquireSRWLockExclusive(raw(self)), + Kind::CriticalSection => { + let re = self.remutex(); + (*re).lock(); + if !self.flag_locked() { + (*re).unlock(); + panic!("cannot recursively lock a mutex"); + } + } + } + } + pub unsafe fn try_lock(&self) -> bool { + match kind() { + Kind::SRWLock => c::TryAcquireSRWLockExclusive(raw(self)) != 0, + Kind::CriticalSection => { + let re = self.remutex(); + if !(*re).try_lock() { + false + } else if self.flag_locked() { + true + } else { + (*re).unlock(); + false + } + } + } + } + pub unsafe fn unlock(&self) { + *self.held.get() = false; + match kind() { + Kind::SRWLock => c::ReleaseSRWLockExclusive(raw(self)), + Kind::CriticalSection => (*self.remutex()).unlock(), + } + } + pub unsafe fn destroy(&self) { + match kind() { + Kind::SRWLock => {} + Kind::CriticalSection => match self.lock.load(Ordering::SeqCst) { + 0 => {} + n => { + Box::from_raw(n as *mut ReentrantMutex).destroy(); + } + }, + } + } + + unsafe fn remutex(&self) -> *mut ReentrantMutex { + match self.lock.load(Ordering::SeqCst) { + 0 => {} + n => return n as *mut _, + } + let re = box ReentrantMutex::uninitialized(); + re.init(); + let re = Box::into_raw(re); + match self.lock.compare_and_swap(0, re as usize, Ordering::SeqCst) { + 0 => re, + n => { + Box::from_raw(re).destroy(); + n as *mut _ + } + } + } + + unsafe fn flag_locked(&self) -> bool { + if *self.held.get() { + false + } else { + *self.held.get() = true; + true + } + } +} + +fn kind() -> Kind { + static KIND: AtomicUsize = AtomicUsize::new(0); + + let val = KIND.load(Ordering::SeqCst); + if val == Kind::SRWLock as usize { + return Kind::SRWLock; + } else if val == Kind::CriticalSection as usize { + return Kind::CriticalSection; + } + + let ret = match compat::lookup("kernel32", "AcquireSRWLockExclusive") { + None => Kind::CriticalSection, + Some(..) => Kind::SRWLock, + }; + KIND.store(ret as usize, Ordering::SeqCst); + ret +} + +pub struct ReentrantMutex { + inner: UnsafeCell<MaybeUninit<c::CRITICAL_SECTION>>, +} + +unsafe impl Send for ReentrantMutex {} +unsafe impl Sync for ReentrantMutex {} + +impl ReentrantMutex { + pub const fn uninitialized() -> ReentrantMutex { + ReentrantMutex { inner: UnsafeCell::new(MaybeUninit::uninit()) } + } + + pub unsafe fn init(&self) { + c::InitializeCriticalSection((&mut *self.inner.get()).as_mut_ptr()); + } + + pub unsafe fn lock(&self) { + c::EnterCriticalSection((&mut *self.inner.get()).as_mut_ptr()); + } + + #[inline] + pub unsafe fn try_lock(&self) -> bool { + c::TryEnterCriticalSection((&mut *self.inner.get()).as_mut_ptr()) != 0 + } + + pub unsafe fn unlock(&self) { + c::LeaveCriticalSection((&mut *self.inner.get()).as_mut_ptr()); + } + + pub unsafe fn destroy(&self) { + c::DeleteCriticalSection((&mut *self.inner.get()).as_mut_ptr()); + } +} diff --git a/library/std/src/sys/windows/net.rs b/library/std/src/sys/windows/net.rs new file mode 100644 index 00000000000..9e74454bc23 --- /dev/null +++ b/library/std/src/sys/windows/net.rs @@ -0,0 +1,438 @@ +#![unstable(issue = "none", feature = "windows_net")] + +use crate::cmp; +use crate::io::{self, IoSlice, IoSliceMut, Read}; +use crate::mem; +use crate::net::{Shutdown, SocketAddr}; +use crate::ptr; +use crate::sync::Once; +use crate::sys; +use crate::sys::c; +use crate::sys_common::net; +use crate::sys_common::{self, AsInner, FromInner, IntoInner}; +use crate::time::Duration; + +use libc::{c_int, c_long, c_ulong, c_void}; + +pub type wrlen_t = i32; + +pub mod netc { + pub use crate::sys::c::ADDRESS_FAMILY as sa_family_t; + pub use crate::sys::c::ADDRINFOA as addrinfo; + pub use crate::sys::c::SOCKADDR as sockaddr; + pub use crate::sys::c::SOCKADDR_STORAGE_LH as sockaddr_storage; + pub use crate::sys::c::*; +} + +pub struct Socket(c::SOCKET); + +/// Checks whether the Windows socket interface has been started already, and +/// if not, starts it. +pub fn init() { + static START: Once = Once::new(); + + START.call_once(|| unsafe { + let mut data: c::WSADATA = mem::zeroed(); + let ret = c::WSAStartup( + 0x202, // version 2.2 + &mut data, + ); + assert_eq!(ret, 0); + + let _ = sys_common::at_exit(|| { + c::WSACleanup(); + }); + }); +} + +/// Returns the last error from the Windows socket interface. +fn last_error() -> io::Error { + io::Error::from_raw_os_error(unsafe { c::WSAGetLastError() }) +} + +#[doc(hidden)] +pub trait IsMinusOne { + fn is_minus_one(&self) -> bool; +} + +macro_rules! impl_is_minus_one { + ($($t:ident)*) => ($(impl IsMinusOne for $t { + fn is_minus_one(&self) -> bool { + *self == -1 + } + })*) +} + +impl_is_minus_one! { i8 i16 i32 i64 isize } + +/// Checks if the signed integer is the Windows constant `SOCKET_ERROR` (-1) +/// and if so, returns the last error from the Windows socket interface. This +/// function must be called before another call to the socket API is made. +pub fn cvt<T: IsMinusOne>(t: T) -> io::Result<T> { + if t.is_minus_one() { Err(last_error()) } else { Ok(t) } +} + +/// A variant of `cvt` for `getaddrinfo` which return 0 for a success. +pub fn cvt_gai(err: c_int) -> io::Result<()> { + if err == 0 { Ok(()) } else { Err(last_error()) } +} + +/// Just to provide the same interface as sys/unix/net.rs +pub fn cvt_r<T, F>(mut f: F) -> io::Result<T> +where + T: IsMinusOne, + F: FnMut() -> T, +{ + cvt(f()) +} + +impl Socket { + pub fn new(addr: &SocketAddr, ty: c_int) -> io::Result<Socket> { + let fam = match *addr { + SocketAddr::V4(..) => c::AF_INET, + SocketAddr::V6(..) => c::AF_INET6, + }; + let socket = unsafe { + match c::WSASocketW( + fam, + ty, + 0, + ptr::null_mut(), + 0, + c::WSA_FLAG_OVERLAPPED | c::WSA_FLAG_NO_HANDLE_INHERIT, + ) { + c::INVALID_SOCKET => match c::WSAGetLastError() { + c::WSAEPROTOTYPE | c::WSAEINVAL => { + match c::WSASocketW(fam, ty, 0, ptr::null_mut(), 0, c::WSA_FLAG_OVERLAPPED) + { + c::INVALID_SOCKET => Err(last_error()), + n => { + let s = Socket(n); + s.set_no_inherit()?; + Ok(s) + } + } + } + n => Err(io::Error::from_raw_os_error(n)), + }, + n => Ok(Socket(n)), + } + }?; + Ok(socket) + } + + pub fn connect_timeout(&self, addr: &SocketAddr, timeout: Duration) -> io::Result<()> { + self.set_nonblocking(true)?; + let r = unsafe { + let (addrp, len) = addr.into_inner(); + cvt(c::connect(self.0, addrp, len)) + }; + self.set_nonblocking(false)?; + + match r { + Ok(_) => return Ok(()), + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {} + Err(e) => return Err(e), + } + + if timeout.as_secs() == 0 && timeout.subsec_nanos() == 0 { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "cannot set a 0 duration timeout", + )); + } + + let mut timeout = c::timeval { + tv_sec: timeout.as_secs() as c_long, + tv_usec: (timeout.subsec_nanos() / 1000) as c_long, + }; + if timeout.tv_sec == 0 && timeout.tv_usec == 0 { + timeout.tv_usec = 1; + } + + let fds = unsafe { + let mut fds = mem::zeroed::<c::fd_set>(); + fds.fd_count = 1; + fds.fd_array[0] = self.0; + fds + }; + + let mut writefds = fds; + let mut errorfds = fds; + + let n = + unsafe { cvt(c::select(1, ptr::null_mut(), &mut writefds, &mut errorfds, &timeout))? }; + + match n { + 0 => Err(io::Error::new(io::ErrorKind::TimedOut, "connection timed out")), + _ => { + if writefds.fd_count != 1 { + if let Some(e) = self.take_error()? { + return Err(e); + } + } + Ok(()) + } + } + } + + pub fn accept(&self, storage: *mut c::SOCKADDR, len: *mut c_int) -> io::Result<Socket> { + let socket = unsafe { + match c::accept(self.0, storage, len) { + c::INVALID_SOCKET => Err(last_error()), + n => Ok(Socket(n)), + } + }?; + Ok(socket) + } + + pub fn duplicate(&self) -> io::Result<Socket> { + let socket = unsafe { + let mut info: c::WSAPROTOCOL_INFO = mem::zeroed(); + cvt(c::WSADuplicateSocketW(self.0, c::GetCurrentProcessId(), &mut info))?; + + match c::WSASocketW( + info.iAddressFamily, + info.iSocketType, + info.iProtocol, + &mut info, + 0, + c::WSA_FLAG_OVERLAPPED | c::WSA_FLAG_NO_HANDLE_INHERIT, + ) { + c::INVALID_SOCKET => match c::WSAGetLastError() { + c::WSAEPROTOTYPE | c::WSAEINVAL => { + match c::WSASocketW( + info.iAddressFamily, + info.iSocketType, + info.iProtocol, + &mut info, + 0, + c::WSA_FLAG_OVERLAPPED, + ) { + c::INVALID_SOCKET => Err(last_error()), + n => { + let s = Socket(n); + s.set_no_inherit()?; + Ok(s) + } + } + } + n => Err(io::Error::from_raw_os_error(n)), + }, + n => Ok(Socket(n)), + } + }?; + Ok(socket) + } + + fn recv_with_flags(&self, buf: &mut [u8], flags: c_int) -> io::Result<usize> { + // On unix when a socket is shut down all further reads return 0, so we + // do the same on windows to map a shut down socket to returning EOF. + let len = cmp::min(buf.len(), i32::MAX as usize) as i32; + unsafe { + match c::recv(self.0, buf.as_mut_ptr() as *mut c_void, len, flags) { + -1 if c::WSAGetLastError() == c::WSAESHUTDOWN => Ok(0), + -1 => Err(last_error()), + n => Ok(n as usize), + } + } + } + + pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> { + self.recv_with_flags(buf, 0) + } + + pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + // On unix when a socket is shut down all further reads return 0, so we + // do the same on windows to map a shut down socket to returning EOF. + let len = cmp::min(bufs.len(), c::DWORD::MAX as usize) as c::DWORD; + let mut nread = 0; + let mut flags = 0; + unsafe { + let ret = c::WSARecv( + self.0, + bufs.as_mut_ptr() as *mut c::WSABUF, + len, + &mut nread, + &mut flags, + ptr::null_mut(), + ptr::null_mut(), + ); + match ret { + 0 => Ok(nread as usize), + _ if c::WSAGetLastError() == c::WSAESHUTDOWN => Ok(0), + _ => Err(last_error()), + } + } + } + + #[inline] + pub fn is_read_vectored(&self) -> bool { + true + } + + pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> { + self.recv_with_flags(buf, c::MSG_PEEK) + } + + fn recv_from_with_flags( + &self, + buf: &mut [u8], + flags: c_int, + ) -> io::Result<(usize, SocketAddr)> { + let mut storage: c::SOCKADDR_STORAGE_LH = unsafe { mem::zeroed() }; + let mut addrlen = mem::size_of_val(&storage) as c::socklen_t; + let len = cmp::min(buf.len(), <wrlen_t>::MAX as usize) as wrlen_t; + + // On unix when a socket is shut down all further reads return 0, so we + // do the same on windows to map a shut down socket to returning EOF. + unsafe { + match c::recvfrom( + self.0, + buf.as_mut_ptr() as *mut c_void, + len, + flags, + &mut storage as *mut _ as *mut _, + &mut addrlen, + ) { + -1 if c::WSAGetLastError() == c::WSAESHUTDOWN => { + Ok((0, net::sockaddr_to_addr(&storage, addrlen as usize)?)) + } + -1 => Err(last_error()), + n => Ok((n as usize, net::sockaddr_to_addr(&storage, addrlen as usize)?)), + } + } + } + + pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + self.recv_from_with_flags(buf, 0) + } + + pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + self.recv_from_with_flags(buf, c::MSG_PEEK) + } + + pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + let len = cmp::min(bufs.len(), c::DWORD::MAX as usize) as c::DWORD; + let mut nwritten = 0; + unsafe { + cvt(c::WSASend( + self.0, + bufs.as_ptr() as *const c::WSABUF as *mut c::WSABUF, + len, + &mut nwritten, + 0, + ptr::null_mut(), + ptr::null_mut(), + ))?; + } + Ok(nwritten as usize) + } + + #[inline] + pub fn is_write_vectored(&self) -> bool { + true + } + + pub fn set_timeout(&self, dur: Option<Duration>, kind: c_int) -> io::Result<()> { + let timeout = match dur { + Some(dur) => { + let timeout = sys::dur2timeout(dur); + if timeout == 0 { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "cannot set a 0 duration timeout", + )); + } + timeout + } + None => 0, + }; + net::setsockopt(self, c::SOL_SOCKET, kind, timeout) + } + + pub fn timeout(&self, kind: c_int) -> io::Result<Option<Duration>> { + let raw: c::DWORD = net::getsockopt(self, c::SOL_SOCKET, kind)?; + if raw == 0 { + Ok(None) + } else { + let secs = raw / 1000; + let nsec = (raw % 1000) * 1000000; + Ok(Some(Duration::new(secs as u64, nsec as u32))) + } + } + + #[cfg(not(target_vendor = "uwp"))] + fn set_no_inherit(&self) -> io::Result<()> { + sys::cvt(unsafe { c::SetHandleInformation(self.0 as c::HANDLE, c::HANDLE_FLAG_INHERIT, 0) }) + .map(drop) + } + + #[cfg(target_vendor = "uwp")] + fn set_no_inherit(&self) -> io::Result<()> { + Err(io::Error::new(io::ErrorKind::Other, "Unavailable on UWP")) + } + + pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { + let how = match how { + Shutdown::Write => c::SD_SEND, + Shutdown::Read => c::SD_RECEIVE, + Shutdown::Both => c::SD_BOTH, + }; + cvt(unsafe { c::shutdown(self.0, how) })?; + Ok(()) + } + + pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { + let mut nonblocking = nonblocking as c_ulong; + let r = unsafe { c::ioctlsocket(self.0, c::FIONBIO as c_int, &mut nonblocking) }; + if r == 0 { Ok(()) } else { Err(io::Error::last_os_error()) } + } + + pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> { + net::setsockopt(self, c::IPPROTO_TCP, c::TCP_NODELAY, nodelay as c::BYTE) + } + + pub fn nodelay(&self) -> io::Result<bool> { + let raw: c::BYTE = net::getsockopt(self, c::IPPROTO_TCP, c::TCP_NODELAY)?; + Ok(raw != 0) + } + + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + let raw: c_int = net::getsockopt(self, c::SOL_SOCKET, c::SO_ERROR)?; + if raw == 0 { Ok(None) } else { Ok(Some(io::Error::from_raw_os_error(raw as i32))) } + } +} + +#[unstable(reason = "not public", issue = "none", feature = "fd_read")] +impl<'a> Read for &'a Socket { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + (**self).read(buf) + } +} + +impl Drop for Socket { + fn drop(&mut self) { + let _ = unsafe { c::closesocket(self.0) }; + } +} + +impl AsInner<c::SOCKET> for Socket { + fn as_inner(&self) -> &c::SOCKET { + &self.0 + } +} + +impl FromInner<c::SOCKET> for Socket { + fn from_inner(sock: c::SOCKET) -> Socket { + Socket(sock) + } +} + +impl IntoInner<c::SOCKET> for Socket { + fn into_inner(self) -> c::SOCKET { + let ret = self.0; + mem::forget(self); + ret + } +} diff --git a/library/std/src/sys/windows/os.rs b/library/std/src/sys/windows/os.rs new file mode 100644 index 00000000000..a0da2498bb7 --- /dev/null +++ b/library/std/src/sys/windows/os.rs @@ -0,0 +1,347 @@ +//! Implementation of `std::os` functionality for Windows. + +#![allow(nonstandard_style)] + +use crate::os::windows::prelude::*; + +use crate::error::Error as StdError; +use crate::ffi::{OsStr, OsString}; +use crate::fmt; +use crate::io; +use crate::os::windows::ffi::EncodeWide; +use crate::path::{self, PathBuf}; +use crate::ptr; +use crate::slice; +use crate::sys::{c, cvt}; + +use super::to_u16s; + +pub fn errno() -> i32 { + unsafe { c::GetLastError() as i32 } +} + +/// Gets a detailed string description for the given error number. +pub fn error_string(mut errnum: i32) -> String { + // This value is calculated from the macro + // MAKELANGID(LANG_SYSTEM_DEFAULT, SUBLANG_SYS_DEFAULT) + let langId = 0x0800 as c::DWORD; + + let mut buf = [0 as c::WCHAR; 2048]; + + unsafe { + let mut module = ptr::null_mut(); + let mut flags = 0; + + // NTSTATUS errors may be encoded as HRESULT, which may returned from + // GetLastError. For more information about Windows error codes, see + // `[MS-ERREF]`: https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-erref/0642cb2f-2075-4469-918c-4441e69c548a + if (errnum & c::FACILITY_NT_BIT as i32) != 0 { + // format according to https://support.microsoft.com/en-us/help/259693 + const NTDLL_DLL: &[u16] = &[ + 'N' as _, 'T' as _, 'D' as _, 'L' as _, 'L' as _, '.' as _, 'D' as _, 'L' as _, + 'L' as _, 0, + ]; + module = c::GetModuleHandleW(NTDLL_DLL.as_ptr()); + + if !module.is_null() { + errnum ^= c::FACILITY_NT_BIT as i32; + flags = c::FORMAT_MESSAGE_FROM_HMODULE; + } + } + + let res = c::FormatMessageW( + flags | c::FORMAT_MESSAGE_FROM_SYSTEM | c::FORMAT_MESSAGE_IGNORE_INSERTS, + module, + errnum as c::DWORD, + langId, + buf.as_mut_ptr(), + buf.len() as c::DWORD, + ptr::null(), + ) as usize; + if res == 0 { + // Sometimes FormatMessageW can fail e.g., system doesn't like langId, + let fm_err = errno(); + return format!("OS Error {} (FormatMessageW() returned error {})", errnum, fm_err); + } + + match String::from_utf16(&buf[..res]) { + Ok(mut msg) => { + // Trim trailing CRLF inserted by FormatMessageW + let len = msg.trim_end().len(); + msg.truncate(len); + msg + } + Err(..) => format!( + "OS Error {} (FormatMessageW() returned \ + invalid UTF-16)", + errnum + ), + } + } +} + +pub struct Env { + base: c::LPWCH, + cur: c::LPWCH, +} + +impl Iterator for Env { + type Item = (OsString, OsString); + + fn next(&mut self) -> Option<(OsString, OsString)> { + loop { + unsafe { + if *self.cur == 0 { + return None; + } + let p = self.cur as *const u16; + let mut len = 0; + while *p.offset(len) != 0 { + len += 1; + } + let s = slice::from_raw_parts(p, len as usize); + self.cur = self.cur.offset(len + 1); + + // Windows allows environment variables to start with an equals + // symbol (in any other position, this is the separator between + // variable name and value). Since`s` has at least length 1 at + // this point (because the empty string terminates the array of + // environment variables), we can safely slice. + let pos = match s[1..].iter().position(|&u| u == b'=' as u16).map(|p| p + 1) { + Some(p) => p, + None => continue, + }; + return Some(( + OsStringExt::from_wide(&s[..pos]), + OsStringExt::from_wide(&s[pos + 1..]), + )); + } + } + } +} + +impl Drop for Env { + fn drop(&mut self) { + unsafe { + c::FreeEnvironmentStringsW(self.base); + } + } +} + +pub fn env() -> Env { + unsafe { + let ch = c::GetEnvironmentStringsW(); + if ch as usize == 0 { + panic!("failure getting env string from OS: {}", io::Error::last_os_error()); + } + Env { base: ch, cur: ch } + } +} + +pub struct SplitPaths<'a> { + data: EncodeWide<'a>, + must_yield: bool, +} + +pub fn split_paths(unparsed: &OsStr) -> SplitPaths<'_> { + SplitPaths { data: unparsed.encode_wide(), must_yield: true } +} + +impl<'a> Iterator for SplitPaths<'a> { + type Item = PathBuf; + fn next(&mut self) -> Option<PathBuf> { + // On Windows, the PATH environment variable is semicolon separated. + // Double quotes are used as a way of introducing literal semicolons + // (since c:\some;dir is a valid Windows path). Double quotes are not + // themselves permitted in path names, so there is no way to escape a + // double quote. Quoted regions can appear in arbitrary locations, so + // + // c:\foo;c:\som"e;di"r;c:\bar + // + // Should parse as [c:\foo, c:\some;dir, c:\bar]. + // + // (The above is based on testing; there is no clear reference available + // for the grammar.) + + let must_yield = self.must_yield; + self.must_yield = false; + + let mut in_progress = Vec::new(); + let mut in_quote = false; + for b in self.data.by_ref() { + if b == '"' as u16 { + in_quote = !in_quote; + } else if b == ';' as u16 && !in_quote { + self.must_yield = true; + break; + } else { + in_progress.push(b) + } + } + + if !must_yield && in_progress.is_empty() { + None + } else { + Some(super::os2path(&in_progress)) + } + } +} + +#[derive(Debug)] +pub struct JoinPathsError; + +pub fn join_paths<I, T>(paths: I) -> Result<OsString, JoinPathsError> +where + I: Iterator<Item = T>, + T: AsRef<OsStr>, +{ + let mut joined = Vec::new(); + let sep = b';' as u16; + + for (i, path) in paths.enumerate() { + let path = path.as_ref(); + if i > 0 { + joined.push(sep) + } + let v = path.encode_wide().collect::<Vec<u16>>(); + if v.contains(&(b'"' as u16)) { + return Err(JoinPathsError); + } else if v.contains(&sep) { + joined.push(b'"' as u16); + joined.extend_from_slice(&v[..]); + joined.push(b'"' as u16); + } else { + joined.extend_from_slice(&v[..]); + } + } + + Ok(OsStringExt::from_wide(&joined[..])) +} + +impl fmt::Display for JoinPathsError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + "path segment contains `\"`".fmt(f) + } +} + +impl StdError for JoinPathsError { + #[allow(deprecated)] + fn description(&self) -> &str { + "failed to join paths" + } +} + +pub fn current_exe() -> io::Result<PathBuf> { + super::fill_utf16_buf( + |buf, sz| unsafe { c::GetModuleFileNameW(ptr::null_mut(), buf, sz) }, + super::os2path, + ) +} + +pub fn getcwd() -> io::Result<PathBuf> { + super::fill_utf16_buf(|buf, sz| unsafe { c::GetCurrentDirectoryW(sz, buf) }, super::os2path) +} + +pub fn chdir(p: &path::Path) -> io::Result<()> { + let p: &OsStr = p.as_ref(); + let mut p = p.encode_wide().collect::<Vec<_>>(); + p.push(0); + + cvt(unsafe { c::SetCurrentDirectoryW(p.as_ptr()) }).map(drop) +} + +pub fn getenv(k: &OsStr) -> io::Result<Option<OsString>> { + let k = to_u16s(k)?; + let res = super::fill_utf16_buf( + |buf, sz| unsafe { c::GetEnvironmentVariableW(k.as_ptr(), buf, sz) }, + |buf| OsStringExt::from_wide(buf), + ); + match res { + Ok(value) => Ok(Some(value)), + Err(e) => { + if e.raw_os_error() == Some(c::ERROR_ENVVAR_NOT_FOUND as i32) { + Ok(None) + } else { + Err(e) + } + } + } +} + +pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> { + let k = to_u16s(k)?; + let v = to_u16s(v)?; + + cvt(unsafe { c::SetEnvironmentVariableW(k.as_ptr(), v.as_ptr()) }).map(drop) +} + +pub fn unsetenv(n: &OsStr) -> io::Result<()> { + let v = to_u16s(n)?; + cvt(unsafe { c::SetEnvironmentVariableW(v.as_ptr(), ptr::null()) }).map(drop) +} + +pub fn temp_dir() -> PathBuf { + super::fill_utf16_buf(|buf, sz| unsafe { c::GetTempPathW(sz, buf) }, super::os2path).unwrap() +} + +#[cfg(not(target_vendor = "uwp"))] +fn home_dir_crt() -> Option<PathBuf> { + unsafe { + use crate::sys::handle::Handle; + + let me = c::GetCurrentProcess(); + let mut token = ptr::null_mut(); + if c::OpenProcessToken(me, c::TOKEN_READ, &mut token) == 0 { + return None; + } + let _handle = Handle::new(token); + super::fill_utf16_buf( + |buf, mut sz| { + match c::GetUserProfileDirectoryW(token, buf, &mut sz) { + 0 if c::GetLastError() != c::ERROR_INSUFFICIENT_BUFFER => 0, + 0 => sz, + _ => sz - 1, // sz includes the null terminator + } + }, + super::os2path, + ) + .ok() + } +} + +#[cfg(target_vendor = "uwp")] +fn home_dir_crt() -> Option<PathBuf> { + None +} + +pub fn home_dir() -> Option<PathBuf> { + crate::env::var_os("HOME") + .or_else(|| crate::env::var_os("USERPROFILE")) + .map(PathBuf::from) + .or_else(|| home_dir_crt()) +} + +pub fn exit(code: i32) -> ! { + unsafe { c::ExitProcess(code as c::UINT) } +} + +pub fn getpid() -> u32 { + unsafe { c::GetCurrentProcessId() as u32 } +} + +#[cfg(test)] +mod tests { + use crate::io::Error; + use crate::sys::c; + + // tests `error_string` above + #[test] + fn ntstatus_error() { + const STATUS_UNSUCCESSFUL: u32 = 0xc000_0001; + assert!( + !Error::from_raw_os_error((STATUS_UNSUCCESSFUL | c::FACILITY_NT_BIT) as _) + .to_string() + .contains("FormatMessageW() returned error") + ); + } +} diff --git a/library/std/src/sys/windows/os_str.rs b/library/std/src/sys/windows/os_str.rs new file mode 100644 index 00000000000..2f5fc72ab44 --- /dev/null +++ b/library/std/src/sys/windows/os_str.rs @@ -0,0 +1,216 @@ +/// The underlying OsString/OsStr implementation on Windows is a +/// wrapper around the "WTF-8" encoding; see the `wtf8` module for more. +use crate::borrow::Cow; +use crate::fmt; +use crate::mem; +use crate::rc::Rc; +use crate::sync::Arc; +use crate::sys_common::wtf8::{Wtf8, Wtf8Buf}; +use crate::sys_common::{AsInner, FromInner, IntoInner}; + +#[derive(Clone, Hash)] +pub struct Buf { + pub inner: Wtf8Buf, +} + +impl IntoInner<Wtf8Buf> for Buf { + fn into_inner(self) -> Wtf8Buf { + self.inner + } +} + +impl FromInner<Wtf8Buf> for Buf { + fn from_inner(inner: Wtf8Buf) -> Self { + Buf { inner } + } +} + +impl AsInner<Wtf8> for Buf { + fn as_inner(&self) -> &Wtf8 { + &self.inner + } +} + +impl fmt::Debug for Buf { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(self.as_slice(), formatter) + } +} + +impl fmt::Display for Buf { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self.as_slice(), formatter) + } +} + +pub struct Slice { + pub inner: Wtf8, +} + +impl fmt::Debug for Slice { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&self.inner, formatter) + } +} + +impl fmt::Display for Slice { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&self.inner, formatter) + } +} + +impl Buf { + pub fn with_capacity(capacity: usize) -> Buf { + Buf { inner: Wtf8Buf::with_capacity(capacity) } + } + + pub fn clear(&mut self) { + self.inner.clear() + } + + pub fn capacity(&self) -> usize { + self.inner.capacity() + } + + pub fn from_string(s: String) -> Buf { + Buf { inner: Wtf8Buf::from_string(s) } + } + + pub fn as_slice(&self) -> &Slice { + // Safety: Slice is just a wrapper for Wtf8, + // and self.inner.as_slice() returns &Wtf8. + // Therefore, transmuting &Wtf8 to &Slice is safe. + unsafe { mem::transmute(self.inner.as_slice()) } + } + + pub fn as_mut_slice(&mut self) -> &mut Slice { + // Safety: Slice is just a wrapper for Wtf8, + // and self.inner.as_mut_slice() returns &mut Wtf8. + // Therefore, transmuting &mut Wtf8 to &mut Slice is safe. + // Additionally, care should be taken to ensure the slice + // is always valid Wtf8. + unsafe { mem::transmute(self.inner.as_mut_slice()) } + } + + pub fn into_string(self) -> Result<String, Buf> { + self.inner.into_string().map_err(|buf| Buf { inner: buf }) + } + + pub fn push_slice(&mut self, s: &Slice) { + self.inner.push_wtf8(&s.inner) + } + + pub fn reserve(&mut self, additional: usize) { + self.inner.reserve(additional) + } + + pub fn reserve_exact(&mut self, additional: usize) { + self.inner.reserve_exact(additional) + } + + pub fn shrink_to_fit(&mut self) { + self.inner.shrink_to_fit() + } + + #[inline] + pub fn shrink_to(&mut self, min_capacity: usize) { + self.inner.shrink_to(min_capacity) + } + + #[inline] + pub fn into_box(self) -> Box<Slice> { + unsafe { mem::transmute(self.inner.into_box()) } + } + + #[inline] + pub fn from_box(boxed: Box<Slice>) -> Buf { + let inner: Box<Wtf8> = unsafe { mem::transmute(boxed) }; + Buf { inner: Wtf8Buf::from_box(inner) } + } + + #[inline] + pub fn into_arc(&self) -> Arc<Slice> { + self.as_slice().into_arc() + } + + #[inline] + pub fn into_rc(&self) -> Rc<Slice> { + self.as_slice().into_rc() + } +} + +impl Slice { + #[inline] + pub fn from_str(s: &str) -> &Slice { + unsafe { mem::transmute(Wtf8::from_str(s)) } + } + + pub fn to_str(&self) -> Option<&str> { + self.inner.as_str() + } + + pub fn to_string_lossy(&self) -> Cow<'_, str> { + self.inner.to_string_lossy() + } + + pub fn to_owned(&self) -> Buf { + let mut buf = Wtf8Buf::with_capacity(self.inner.len()); + buf.push_wtf8(&self.inner); + Buf { inner: buf } + } + + pub fn clone_into(&self, buf: &mut Buf) { + self.inner.clone_into(&mut buf.inner) + } + + #[inline] + pub fn into_box(&self) -> Box<Slice> { + unsafe { mem::transmute(self.inner.into_box()) } + } + + pub fn empty_box() -> Box<Slice> { + unsafe { mem::transmute(Wtf8::empty_box()) } + } + + #[inline] + pub fn into_arc(&self) -> Arc<Slice> { + let arc = self.inner.into_arc(); + unsafe { Arc::from_raw(Arc::into_raw(arc) as *const Slice) } + } + + #[inline] + pub fn into_rc(&self) -> Rc<Slice> { + let rc = self.inner.into_rc(); + unsafe { Rc::from_raw(Rc::into_raw(rc) as *const Slice) } + } + + #[inline] + pub fn make_ascii_lowercase(&mut self) { + self.inner.make_ascii_lowercase() + } + + #[inline] + pub fn make_ascii_uppercase(&mut self) { + self.inner.make_ascii_uppercase() + } + + #[inline] + pub fn to_ascii_lowercase(&self) -> Buf { + Buf { inner: self.inner.to_ascii_lowercase() } + } + + #[inline] + pub fn to_ascii_uppercase(&self) -> Buf { + Buf { inner: self.inner.to_ascii_uppercase() } + } + + #[inline] + pub fn is_ascii(&self) -> bool { + self.inner.is_ascii() + } + + #[inline] + pub fn eq_ignore_ascii_case(&self, other: &Self) -> bool { + self.inner.eq_ignore_ascii_case(&other.inner) + } +} diff --git a/library/std/src/sys/windows/path.rs b/library/std/src/sys/windows/path.rs new file mode 100644 index 00000000000..dda3ed68cfc --- /dev/null +++ b/library/std/src/sys/windows/path.rs @@ -0,0 +1,107 @@ +use crate::ffi::OsStr; +use crate::mem; +use crate::path::Prefix; + +#[cfg(test)] +mod tests; + +pub const MAIN_SEP_STR: &str = "\\"; +pub const MAIN_SEP: char = '\\'; + +// The unsafety here stems from converting between `&OsStr` and `&[u8]` +// and back. This is safe to do because (1) we only look at ASCII +// contents of the encoding and (2) new &OsStr values are produced +// only from ASCII-bounded slices of existing &OsStr values. +fn os_str_as_u8_slice(s: &OsStr) -> &[u8] { + unsafe { mem::transmute(s) } +} +unsafe fn u8_slice_as_os_str(s: &[u8]) -> &OsStr { + mem::transmute(s) +} + +#[inline] +pub fn is_sep_byte(b: u8) -> bool { + b == b'/' || b == b'\\' +} + +#[inline] +pub fn is_verbatim_sep(b: u8) -> bool { + b == b'\\' +} + +// In most DOS systems, it is not possible to have more than 26 drive letters. +// See <https://en.wikipedia.org/wiki/Drive_letter_assignment#Common_assignments>. +pub fn is_valid_drive_letter(disk: u8) -> bool { + disk.is_ascii_alphabetic() +} + +pub fn parse_prefix(path: &OsStr) -> Option<Prefix<'_>> { + use Prefix::{DeviceNS, Disk, Verbatim, VerbatimDisk, VerbatimUNC, UNC}; + + let path = os_str_as_u8_slice(path); + + // \\ + if let Some(path) = path.strip_prefix(br"\\") { + // \\?\ + if let Some(path) = path.strip_prefix(br"?\") { + // \\?\UNC\server\share + if let Some(path) = path.strip_prefix(br"UNC\") { + let (server, share) = match get_first_two_components(path, is_verbatim_sep) { + Some((server, share)) => unsafe { + (u8_slice_as_os_str(server), u8_slice_as_os_str(share)) + }, + None => (unsafe { u8_slice_as_os_str(path) }, OsStr::new("")), + }; + return Some(VerbatimUNC(server, share)); + } else { + // \\?\path + match path { + // \\?\C:\path + [c, b':', b'\\', ..] if is_valid_drive_letter(*c) => { + return Some(VerbatimDisk(c.to_ascii_uppercase())); + } + // \\?\cat_pics + _ => { + let idx = path.iter().position(|&b| b == b'\\').unwrap_or(path.len()); + let slice = &path[..idx]; + return Some(Verbatim(unsafe { u8_slice_as_os_str(slice) })); + } + } + } + } else if let Some(path) = path.strip_prefix(b".\\") { + // \\.\COM42 + let idx = path.iter().position(|&b| b == b'\\').unwrap_or(path.len()); + let slice = &path[..idx]; + return Some(DeviceNS(unsafe { u8_slice_as_os_str(slice) })); + } + match get_first_two_components(path, is_sep_byte) { + Some((server, share)) if !server.is_empty() && !share.is_empty() => { + // \\server\share + return Some(unsafe { UNC(u8_slice_as_os_str(server), u8_slice_as_os_str(share)) }); + } + _ => {} + } + } else if let [c, b':', ..] = path { + // C: + if is_valid_drive_letter(*c) { + return Some(Disk(c.to_ascii_uppercase())); + } + } + None +} + +/// Returns the first two path components with predicate `f`. +/// +/// The two components returned will be use by caller +/// to construct `VerbatimUNC` or `UNC` Windows path prefix. +/// +/// Returns [`None`] if there are no separators in path. +fn get_first_two_components(path: &[u8], f: fn(u8) -> bool) -> Option<(&[u8], &[u8])> { + let idx = path.iter().position(|&x| f(x))?; + // Panic safe + // The max `idx+1` is `path.len()` and `path[path.len()..]` is a valid index. + let (first, path) = (&path[..idx], &path[idx + 1..]); + let idx = path.iter().position(|&x| f(x)).unwrap_or(path.len()); + let second = &path[..idx]; + Some((first, second)) +} diff --git a/library/std/src/sys/windows/path/tests.rs b/library/std/src/sys/windows/path/tests.rs new file mode 100644 index 00000000000..fbac1dc1ca1 --- /dev/null +++ b/library/std/src/sys/windows/path/tests.rs @@ -0,0 +1,21 @@ +use super::*; + +#[test] +fn test_get_first_two_components() { + assert_eq!( + get_first_two_components(br"server\share", is_verbatim_sep), + Some((&b"server"[..], &b"share"[..])), + ); + + assert_eq!( + get_first_two_components(br"server\", is_verbatim_sep), + Some((&b"server"[..], &b""[..])) + ); + + assert_eq!( + get_first_two_components(br"\server\", is_verbatim_sep), + Some((&b""[..], &b"server"[..])) + ); + + assert_eq!(get_first_two_components(br"there are no separators here", is_verbatim_sep), None,); +} diff --git a/library/std/src/sys/windows/pipe.rs b/library/std/src/sys/windows/pipe.rs new file mode 100644 index 00000000000..104a8db4659 --- /dev/null +++ b/library/std/src/sys/windows/pipe.rs @@ -0,0 +1,368 @@ +use crate::os::windows::prelude::*; + +use crate::ffi::OsStr; +use crate::io::{self, IoSlice, IoSliceMut}; +use crate::mem; +use crate::path::Path; +use crate::ptr; +use crate::slice; +use crate::sync::atomic::AtomicUsize; +use crate::sync::atomic::Ordering::SeqCst; +use crate::sys::c; +use crate::sys::fs::{File, OpenOptions}; +use crate::sys::handle::Handle; +use crate::sys::hashmap_random_keys; + +//////////////////////////////////////////////////////////////////////////////// +// Anonymous pipes +//////////////////////////////////////////////////////////////////////////////// + +pub struct AnonPipe { + inner: Handle, +} + +pub struct Pipes { + pub ours: AnonPipe, + pub theirs: AnonPipe, +} + +/// Although this looks similar to `anon_pipe` in the Unix module it's actually +/// subtly different. Here we'll return two pipes in the `Pipes` return value, +/// but one is intended for "us" where as the other is intended for "someone +/// else". +/// +/// Currently the only use case for this function is pipes for stdio on +/// processes in the standard library, so "ours" is the one that'll stay in our +/// process whereas "theirs" will be inherited to a child. +/// +/// The ours/theirs pipes are *not* specifically readable or writable. Each +/// one only supports a read or a write, but which is which depends on the +/// boolean flag given. If `ours_readable` is `true`, then `ours` is readable and +/// `theirs` is writable. Conversely, if `ours_readable` is `false`, then `ours` +/// is writable and `theirs` is readable. +/// +/// Also note that the `ours` pipe is always a handle opened up in overlapped +/// mode. This means that technically speaking it should only ever be used +/// with `OVERLAPPED` instances, but also works out ok if it's only ever used +/// once at a time (which we do indeed guarantee). +pub fn anon_pipe(ours_readable: bool, their_handle_inheritable: bool) -> io::Result<Pipes> { + // Note that we specifically do *not* use `CreatePipe` here because + // unfortunately the anonymous pipes returned do not support overlapped + // operations. Instead, we create a "hopefully unique" name and create a + // named pipe which has overlapped operations enabled. + // + // Once we do this, we connect do it as usual via `CreateFileW`, and then + // we return those reader/writer halves. Note that the `ours` pipe return + // value is always the named pipe, whereas `theirs` is just the normal file. + // This should hopefully shield us from child processes which assume their + // stdout is a named pipe, which would indeed be odd! + unsafe { + let ours; + let mut name; + let mut tries = 0; + let mut reject_remote_clients_flag = c::PIPE_REJECT_REMOTE_CLIENTS; + loop { + tries += 1; + name = format!( + r"\\.\pipe\__rust_anonymous_pipe1__.{}.{}", + c::GetCurrentProcessId(), + random_number() + ); + let wide_name = OsStr::new(&name).encode_wide().chain(Some(0)).collect::<Vec<_>>(); + let mut flags = c::FILE_FLAG_FIRST_PIPE_INSTANCE | c::FILE_FLAG_OVERLAPPED; + if ours_readable { + flags |= c::PIPE_ACCESS_INBOUND; + } else { + flags |= c::PIPE_ACCESS_OUTBOUND; + } + + let handle = c::CreateNamedPipeW( + wide_name.as_ptr(), + flags, + c::PIPE_TYPE_BYTE + | c::PIPE_READMODE_BYTE + | c::PIPE_WAIT + | reject_remote_clients_flag, + 1, + 4096, + 4096, + 0, + ptr::null_mut(), + ); + + // We pass the `FILE_FLAG_FIRST_PIPE_INSTANCE` flag above, and we're + // also just doing a best effort at selecting a unique name. If + // `ERROR_ACCESS_DENIED` is returned then it could mean that we + // accidentally conflicted with an already existing pipe, so we try + // again. + // + // Don't try again too much though as this could also perhaps be a + // legit error. + // If `ERROR_INVALID_PARAMETER` is returned, this probably means we're + // running on pre-Vista version where `PIPE_REJECT_REMOTE_CLIENTS` is + // not supported, so we continue retrying without it. This implies + // reduced security on Windows versions older than Vista by allowing + // connections to this pipe from remote machines. + // Proper fix would increase the number of FFI imports and introduce + // significant amount of Windows XP specific code with no clean + // testing strategy + // For more info, see https://github.com/rust-lang/rust/pull/37677. + if handle == c::INVALID_HANDLE_VALUE { + let err = io::Error::last_os_error(); + let raw_os_err = err.raw_os_error(); + if tries < 10 { + if raw_os_err == Some(c::ERROR_ACCESS_DENIED as i32) { + continue; + } else if reject_remote_clients_flag != 0 + && raw_os_err == Some(c::ERROR_INVALID_PARAMETER as i32) + { + reject_remote_clients_flag = 0; + tries -= 1; + continue; + } + } + return Err(err); + } + ours = Handle::new(handle); + break; + } + + // Connect to the named pipe we just created. This handle is going to be + // returned in `theirs`, so if `ours` is readable we want this to be + // writable, otherwise if `ours` is writable we want this to be + // readable. + // + // Additionally we don't enable overlapped mode on this because most + // client processes aren't enabled to work with that. + let mut opts = OpenOptions::new(); + opts.write(ours_readable); + opts.read(!ours_readable); + opts.share_mode(0); + let size = mem::size_of::<c::SECURITY_ATTRIBUTES>(); + let mut sa = c::SECURITY_ATTRIBUTES { + nLength: size as c::DWORD, + lpSecurityDescriptor: ptr::null_mut(), + bInheritHandle: their_handle_inheritable as i32, + }; + opts.security_attributes(&mut sa); + let theirs = File::open(Path::new(&name), &opts)?; + let theirs = AnonPipe { inner: theirs.into_handle() }; + + Ok(Pipes { + ours: AnonPipe { inner: ours }, + theirs: AnonPipe { inner: theirs.into_handle() }, + }) + } +} + +fn random_number() -> usize { + static N: AtomicUsize = AtomicUsize::new(0); + loop { + if N.load(SeqCst) != 0 { + return N.fetch_add(1, SeqCst); + } + + N.store(hashmap_random_keys().0 as usize, SeqCst); + } +} + +impl AnonPipe { + pub fn handle(&self) -> &Handle { + &self.inner + } + pub fn into_handle(self) -> Handle { + self.inner + } + + pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> { + self.inner.read(buf) + } + + pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + self.inner.read_vectored(bufs) + } + + #[inline] + pub fn is_read_vectored(&self) -> bool { + self.inner.is_read_vectored() + } + + pub fn write(&self, buf: &[u8]) -> io::Result<usize> { + self.inner.write(buf) + } + + pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + self.inner.write_vectored(bufs) + } + + #[inline] + pub fn is_write_vectored(&self) -> bool { + self.inner.is_write_vectored() + } +} + +pub fn read2(p1: AnonPipe, v1: &mut Vec<u8>, p2: AnonPipe, v2: &mut Vec<u8>) -> io::Result<()> { + let p1 = p1.into_handle(); + let p2 = p2.into_handle(); + + let mut p1 = AsyncPipe::new(p1, v1)?; + let mut p2 = AsyncPipe::new(p2, v2)?; + let objs = [p1.event.raw(), p2.event.raw()]; + + // In a loop we wait for either pipe's scheduled read operation to complete. + // If the operation completes with 0 bytes, that means EOF was reached, in + // which case we just finish out the other pipe entirely. + // + // Note that overlapped I/O is in general super unsafe because we have to + // be careful to ensure that all pointers in play are valid for the entire + // duration of the I/O operation (where tons of operations can also fail). + // The destructor for `AsyncPipe` ends up taking care of most of this. + loop { + let res = unsafe { c::WaitForMultipleObjects(2, objs.as_ptr(), c::FALSE, c::INFINITE) }; + if res == c::WAIT_OBJECT_0 { + if !p1.result()? || !p1.schedule_read()? { + return p2.finish(); + } + } else if res == c::WAIT_OBJECT_0 + 1 { + if !p2.result()? || !p2.schedule_read()? { + return p1.finish(); + } + } else { + return Err(io::Error::last_os_error()); + } + } +} + +struct AsyncPipe<'a> { + pipe: Handle, + event: Handle, + overlapped: Box<c::OVERLAPPED>, // needs a stable address + dst: &'a mut Vec<u8>, + state: State, +} + +#[derive(PartialEq, Debug)] +enum State { + NotReading, + Reading, + Read(usize), +} + +impl<'a> AsyncPipe<'a> { + fn new(pipe: Handle, dst: &'a mut Vec<u8>) -> io::Result<AsyncPipe<'a>> { + // Create an event which we'll use to coordinate our overlapped + // operations, this event will be used in WaitForMultipleObjects + // and passed as part of the OVERLAPPED handle. + // + // Note that we do a somewhat clever thing here by flagging the + // event as being manually reset and setting it initially to the + // signaled state. This means that we'll naturally fall through the + // WaitForMultipleObjects call above for pipes created initially, + // and the only time an even will go back to "unset" will be once an + // I/O operation is successfully scheduled (what we want). + let event = Handle::new_event(true, true)?; + let mut overlapped: Box<c::OVERLAPPED> = unsafe { Box::new(mem::zeroed()) }; + overlapped.hEvent = event.raw(); + Ok(AsyncPipe { pipe, overlapped, event, dst, state: State::NotReading }) + } + + /// Executes an overlapped read operation. + /// + /// Must not currently be reading, and returns whether the pipe is currently + /// at EOF or not. If the pipe is not at EOF then `result()` must be called + /// to complete the read later on (may block), but if the pipe is at EOF + /// then `result()` should not be called as it will just block forever. + fn schedule_read(&mut self) -> io::Result<bool> { + assert_eq!(self.state, State::NotReading); + let amt = unsafe { + let slice = slice_to_end(self.dst); + self.pipe.read_overlapped(slice, &mut *self.overlapped)? + }; + + // If this read finished immediately then our overlapped event will + // remain signaled (it was signaled coming in here) and we'll progress + // down to the method below. + // + // Otherwise the I/O operation is scheduled and the system set our event + // to not signaled, so we flag ourselves into the reading state and move + // on. + self.state = match amt { + Some(0) => return Ok(false), + Some(amt) => State::Read(amt), + None => State::Reading, + }; + Ok(true) + } + + /// Wait for the result of the overlapped operation previously executed. + /// + /// Takes a parameter `wait` which indicates if this pipe is currently being + /// read whether the function should block waiting for the read to complete. + /// + /// Returns values: + /// + /// * `true` - finished any pending read and the pipe is not at EOF (keep + /// going) + /// * `false` - finished any pending read and pipe is at EOF (stop issuing + /// reads) + fn result(&mut self) -> io::Result<bool> { + let amt = match self.state { + State::NotReading => return Ok(true), + State::Reading => self.pipe.overlapped_result(&mut *self.overlapped, true)?, + State::Read(amt) => amt, + }; + self.state = State::NotReading; + unsafe { + let len = self.dst.len(); + self.dst.set_len(len + amt); + } + Ok(amt != 0) + } + + /// Finishes out reading this pipe entirely. + /// + /// Waits for any pending and schedule read, and then calls `read_to_end` + /// if necessary to read all the remaining information. + fn finish(&mut self) -> io::Result<()> { + while self.result()? && self.schedule_read()? { + // ... + } + Ok(()) + } +} + +impl<'a> Drop for AsyncPipe<'a> { + fn drop(&mut self) { + match self.state { + State::Reading => {} + _ => return, + } + + // If we have a pending read operation, then we have to make sure that + // it's *done* before we actually drop this type. The kernel requires + // that the `OVERLAPPED` and buffer pointers are valid for the entire + // I/O operation. + // + // To do that, we call `CancelIo` to cancel any pending operation, and + // if that succeeds we wait for the overlapped result. + // + // If anything here fails, there's not really much we can do, so we leak + // the buffer/OVERLAPPED pointers to ensure we're at least memory safe. + if self.pipe.cancel_io().is_err() || self.result().is_err() { + let buf = mem::take(self.dst); + let overlapped = Box::new(unsafe { mem::zeroed() }); + let overlapped = mem::replace(&mut self.overlapped, overlapped); + mem::forget((buf, overlapped)); + } + } +} + +unsafe fn slice_to_end(v: &mut Vec<u8>) -> &mut [u8] { + if v.capacity() == 0 { + v.reserve(16); + } + if v.capacity() == v.len() { + v.reserve(1); + } + slice::from_raw_parts_mut(v.as_mut_ptr().add(v.len()), v.capacity() - v.len()) +} diff --git a/library/std/src/sys/windows/process.rs b/library/std/src/sys/windows/process.rs new file mode 100644 index 00000000000..7d6d4775eec --- /dev/null +++ b/library/std/src/sys/windows/process.rs @@ -0,0 +1,566 @@ +#![unstable(feature = "process_internals", issue = "none")] + +use crate::borrow::Borrow; +use crate::collections::BTreeMap; +use crate::env; +use crate::env::split_paths; +use crate::ffi::{OsStr, OsString}; +use crate::fmt; +use crate::fs; +use crate::io::{self, Error, ErrorKind}; +use crate::mem; +use crate::os::windows::ffi::OsStrExt; +use crate::path::Path; +use crate::ptr; +use crate::sys::c; +use crate::sys::cvt; +use crate::sys::fs::{File, OpenOptions}; +use crate::sys::handle::Handle; +use crate::sys::mutex::Mutex; +use crate::sys::pipe::{self, AnonPipe}; +use crate::sys::stdio; +use crate::sys_common::process::CommandEnv; +use crate::sys_common::AsInner; + +use libc::{c_void, EXIT_FAILURE, EXIT_SUCCESS}; + +//////////////////////////////////////////////////////////////////////////////// +// Command +//////////////////////////////////////////////////////////////////////////////// + +#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] +#[doc(hidden)] +pub struct EnvKey(OsString); + +impl From<OsString> for EnvKey { + fn from(mut k: OsString) -> Self { + k.make_ascii_uppercase(); + EnvKey(k) + } +} + +impl From<EnvKey> for OsString { + fn from(k: EnvKey) -> Self { + k.0 + } +} + +impl Borrow<OsStr> for EnvKey { + fn borrow(&self) -> &OsStr { + &self.0 + } +} + +impl AsRef<OsStr> for EnvKey { + fn as_ref(&self) -> &OsStr { + &self.0 + } +} + +fn ensure_no_nuls<T: AsRef<OsStr>>(str: T) -> io::Result<T> { + if str.as_ref().encode_wide().any(|b| b == 0) { + Err(io::Error::new(ErrorKind::InvalidInput, "nul byte found in provided data")) + } else { + Ok(str) + } +} + +pub struct Command { + program: OsString, + args: Vec<OsString>, + env: CommandEnv, + cwd: Option<OsString>, + flags: u32, + detach: bool, // not currently exposed in std::process + stdin: Option<Stdio>, + stdout: Option<Stdio>, + stderr: Option<Stdio>, +} + +pub enum Stdio { + Inherit, + Null, + MakePipe, + Handle(Handle), +} + +pub struct StdioPipes { + pub stdin: Option<AnonPipe>, + pub stdout: Option<AnonPipe>, + pub stderr: Option<AnonPipe>, +} + +struct DropGuard<'a> { + lock: &'a Mutex, +} + +impl Command { + pub fn new(program: &OsStr) -> Command { + Command { + program: program.to_os_string(), + args: Vec::new(), + env: Default::default(), + cwd: None, + flags: 0, + detach: false, + stdin: None, + stdout: None, + stderr: None, + } + } + + pub fn arg(&mut self, arg: &OsStr) { + self.args.push(arg.to_os_string()) + } + pub fn env_mut(&mut self) -> &mut CommandEnv { + &mut self.env + } + pub fn cwd(&mut self, dir: &OsStr) { + self.cwd = Some(dir.to_os_string()) + } + pub fn stdin(&mut self, stdin: Stdio) { + self.stdin = Some(stdin); + } + pub fn stdout(&mut self, stdout: Stdio) { + self.stdout = Some(stdout); + } + pub fn stderr(&mut self, stderr: Stdio) { + self.stderr = Some(stderr); + } + pub fn creation_flags(&mut self, flags: u32) { + self.flags = flags; + } + + pub fn spawn( + &mut self, + default: Stdio, + needs_stdin: bool, + ) -> io::Result<(Process, StdioPipes)> { + let maybe_env = self.env.capture_if_changed(); + // To have the spawning semantics of unix/windows stay the same, we need + // to read the *child's* PATH if one is provided. See #15149 for more + // details. + let program = maybe_env.as_ref().and_then(|env| { + if let Some(v) = env.get(OsStr::new("PATH")) { + // Split the value and test each path to see if the + // program exists. + for path in split_paths(&v) { + let path = path + .join(self.program.to_str().unwrap()) + .with_extension(env::consts::EXE_EXTENSION); + if fs::metadata(&path).is_ok() { + return Some(path.into_os_string()); + } + } + } + None + }); + + let mut si = zeroed_startupinfo(); + si.cb = mem::size_of::<c::STARTUPINFO>() as c::DWORD; + si.dwFlags = c::STARTF_USESTDHANDLES; + + let program = program.as_ref().unwrap_or(&self.program); + let mut cmd_str = make_command_line(program, &self.args)?; + cmd_str.push(0); // add null terminator + + // stolen from the libuv code. + let mut flags = self.flags | c::CREATE_UNICODE_ENVIRONMENT; + if self.detach { + flags |= c::DETACHED_PROCESS | c::CREATE_NEW_PROCESS_GROUP; + } + + let (envp, _data) = make_envp(maybe_env)?; + let (dirp, _data) = make_dirp(self.cwd.as_ref())?; + let mut pi = zeroed_process_information(); + + // Prepare all stdio handles to be inherited by the child. This + // currently involves duplicating any existing ones with the ability to + // be inherited by child processes. Note, however, that once an + // inheritable handle is created, *any* spawned child will inherit that + // handle. We only want our own child to inherit this handle, so we wrap + // the remaining portion of this spawn in a mutex. + // + // For more information, msdn also has an article about this race: + // http://support.microsoft.com/kb/315939 + static CREATE_PROCESS_LOCK: Mutex = Mutex::new(); + let _guard = DropGuard::new(&CREATE_PROCESS_LOCK); + + let mut pipes = StdioPipes { stdin: None, stdout: None, stderr: None }; + let null = Stdio::Null; + let default_stdin = if needs_stdin { &default } else { &null }; + let stdin = self.stdin.as_ref().unwrap_or(default_stdin); + let stdout = self.stdout.as_ref().unwrap_or(&default); + let stderr = self.stderr.as_ref().unwrap_or(&default); + let stdin = stdin.to_handle(c::STD_INPUT_HANDLE, &mut pipes.stdin)?; + let stdout = stdout.to_handle(c::STD_OUTPUT_HANDLE, &mut pipes.stdout)?; + let stderr = stderr.to_handle(c::STD_ERROR_HANDLE, &mut pipes.stderr)?; + si.hStdInput = stdin.raw(); + si.hStdOutput = stdout.raw(); + si.hStdError = stderr.raw(); + + unsafe { + cvt(c::CreateProcessW( + ptr::null(), + cmd_str.as_mut_ptr(), + ptr::null_mut(), + ptr::null_mut(), + c::TRUE, + flags, + envp, + dirp, + &mut si, + &mut pi, + )) + }?; + + // We close the thread handle because we don't care about keeping + // the thread id valid, and we aren't keeping the thread handle + // around to be able to close it later. + drop(Handle::new(pi.hThread)); + + Ok((Process { handle: Handle::new(pi.hProcess) }, pipes)) + } +} + +impl fmt::Debug for Command { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self.program)?; + for arg in &self.args { + write!(f, " {:?}", arg)?; + } + Ok(()) + } +} + +impl<'a> DropGuard<'a> { + fn new(lock: &'a Mutex) -> DropGuard<'a> { + unsafe { + lock.lock(); + DropGuard { lock } + } + } +} + +impl<'a> Drop for DropGuard<'a> { + fn drop(&mut self) { + unsafe { + self.lock.unlock(); + } + } +} + +impl Stdio { + fn to_handle(&self, stdio_id: c::DWORD, pipe: &mut Option<AnonPipe>) -> io::Result<Handle> { + match *self { + // If no stdio handle is available, then inherit means that it + // should still be unavailable so propagate the + // INVALID_HANDLE_VALUE. + Stdio::Inherit => match stdio::get_handle(stdio_id) { + Ok(io) => { + let io = Handle::new(io); + let ret = io.duplicate(0, true, c::DUPLICATE_SAME_ACCESS); + io.into_raw(); + ret + } + Err(..) => Ok(Handle::new(c::INVALID_HANDLE_VALUE)), + }, + + Stdio::MakePipe => { + let ours_readable = stdio_id != c::STD_INPUT_HANDLE; + let pipes = pipe::anon_pipe(ours_readable, true)?; + *pipe = Some(pipes.ours); + Ok(pipes.theirs.into_handle()) + } + + Stdio::Handle(ref handle) => handle.duplicate(0, true, c::DUPLICATE_SAME_ACCESS), + + // Open up a reference to NUL with appropriate read/write + // permissions as well as the ability to be inherited to child + // processes (as this is about to be inherited). + Stdio::Null => { + let size = mem::size_of::<c::SECURITY_ATTRIBUTES>(); + let mut sa = c::SECURITY_ATTRIBUTES { + nLength: size as c::DWORD, + lpSecurityDescriptor: ptr::null_mut(), + bInheritHandle: 1, + }; + let mut opts = OpenOptions::new(); + opts.read(stdio_id == c::STD_INPUT_HANDLE); + opts.write(stdio_id != c::STD_INPUT_HANDLE); + opts.security_attributes(&mut sa); + File::open(Path::new("NUL"), &opts).map(|file| file.into_handle()) + } + } + } +} + +impl From<AnonPipe> for Stdio { + fn from(pipe: AnonPipe) -> Stdio { + Stdio::Handle(pipe.into_handle()) + } +} + +impl From<File> for Stdio { + fn from(file: File) -> Stdio { + Stdio::Handle(file.into_handle()) + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Processes +//////////////////////////////////////////////////////////////////////////////// + +/// A value representing a child process. +/// +/// The lifetime of this value is linked to the lifetime of the actual +/// process - the Process destructor calls self.finish() which waits +/// for the process to terminate. +pub struct Process { + handle: Handle, +} + +impl Process { + pub fn kill(&mut self) -> io::Result<()> { + cvt(unsafe { c::TerminateProcess(self.handle.raw(), 1) })?; + Ok(()) + } + + pub fn id(&self) -> u32 { + unsafe { c::GetProcessId(self.handle.raw()) as u32 } + } + + pub fn wait(&mut self) -> io::Result<ExitStatus> { + unsafe { + let res = c::WaitForSingleObject(self.handle.raw(), c::INFINITE); + if res != c::WAIT_OBJECT_0 { + return Err(Error::last_os_error()); + } + let mut status = 0; + cvt(c::GetExitCodeProcess(self.handle.raw(), &mut status))?; + Ok(ExitStatus(status)) + } + } + + pub fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> { + unsafe { + match c::WaitForSingleObject(self.handle.raw(), 0) { + c::WAIT_OBJECT_0 => {} + c::WAIT_TIMEOUT => { + return Ok(None); + } + _ => return Err(io::Error::last_os_error()), + } + let mut status = 0; + cvt(c::GetExitCodeProcess(self.handle.raw(), &mut status))?; + Ok(Some(ExitStatus(status))) + } + } + + pub fn handle(&self) -> &Handle { + &self.handle + } + + pub fn into_handle(self) -> Handle { + self.handle + } +} + +#[derive(PartialEq, Eq, Clone, Copy, Debug)] +pub struct ExitStatus(c::DWORD); + +impl ExitStatus { + pub fn success(&self) -> bool { + self.0 == 0 + } + pub fn code(&self) -> Option<i32> { + Some(self.0 as i32) + } +} + +/// Converts a raw `c::DWORD` to a type-safe `ExitStatus` by wrapping it without copying. +impl From<c::DWORD> for ExitStatus { + fn from(u: c::DWORD) -> ExitStatus { + ExitStatus(u) + } +} + +impl fmt::Display for ExitStatus { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Windows exit codes with the high bit set typically mean some form of + // unhandled exception or warning. In this scenario printing the exit + // code in decimal doesn't always make sense because it's a very large + // and somewhat gibberish number. The hex code is a bit more + // recognizable and easier to search for, so print that. + if self.0 & 0x80000000 != 0 { + write!(f, "exit code: {:#x}", self.0) + } else { + write!(f, "exit code: {}", self.0) + } + } +} + +#[derive(PartialEq, Eq, Clone, Copy, Debug)] +pub struct ExitCode(c::DWORD); + +impl ExitCode { + pub const SUCCESS: ExitCode = ExitCode(EXIT_SUCCESS as _); + pub const FAILURE: ExitCode = ExitCode(EXIT_FAILURE as _); + + #[inline] + pub fn as_i32(&self) -> i32 { + self.0 as i32 + } +} + +fn zeroed_startupinfo() -> c::STARTUPINFO { + c::STARTUPINFO { + cb: 0, + lpReserved: ptr::null_mut(), + lpDesktop: ptr::null_mut(), + lpTitle: ptr::null_mut(), + dwX: 0, + dwY: 0, + dwXSize: 0, + dwYSize: 0, + dwXCountChars: 0, + dwYCountCharts: 0, + dwFillAttribute: 0, + dwFlags: 0, + wShowWindow: 0, + cbReserved2: 0, + lpReserved2: ptr::null_mut(), + hStdInput: c::INVALID_HANDLE_VALUE, + hStdOutput: c::INVALID_HANDLE_VALUE, + hStdError: c::INVALID_HANDLE_VALUE, + } +} + +fn zeroed_process_information() -> c::PROCESS_INFORMATION { + c::PROCESS_INFORMATION { + hProcess: ptr::null_mut(), + hThread: ptr::null_mut(), + dwProcessId: 0, + dwThreadId: 0, + } +} + +// Produces a wide string *without terminating null*; returns an error if +// `prog` or any of the `args` contain a nul. +fn make_command_line(prog: &OsStr, args: &[OsString]) -> io::Result<Vec<u16>> { + // Encode the command and arguments in a command line string such + // that the spawned process may recover them using CommandLineToArgvW. + let mut cmd: Vec<u16> = Vec::new(); + // Always quote the program name so CreateProcess doesn't interpret args as + // part of the name if the binary wasn't found first time. + append_arg(&mut cmd, prog, true)?; + for arg in args { + cmd.push(' ' as u16); + append_arg(&mut cmd, arg, false)?; + } + return Ok(cmd); + + fn append_arg(cmd: &mut Vec<u16>, arg: &OsStr, force_quotes: bool) -> io::Result<()> { + // If an argument has 0 characters then we need to quote it to ensure + // that it actually gets passed through on the command line or otherwise + // it will be dropped entirely when parsed on the other end. + ensure_no_nuls(arg)?; + let arg_bytes = &arg.as_inner().inner.as_inner(); + let quote = force_quotes + || arg_bytes.iter().any(|c| *c == b' ' || *c == b'\t') + || arg_bytes.is_empty(); + if quote { + cmd.push('"' as u16); + } + + let mut backslashes: usize = 0; + for x in arg.encode_wide() { + if x == '\\' as u16 { + backslashes += 1; + } else { + if x == '"' as u16 { + // Add n+1 backslashes to total 2n+1 before internal '"'. + cmd.extend((0..=backslashes).map(|_| '\\' as u16)); + } + backslashes = 0; + } + cmd.push(x); + } + + if quote { + // Add n backslashes to total 2n before ending '"'. + cmd.extend((0..backslashes).map(|_| '\\' as u16)); + cmd.push('"' as u16); + } + Ok(()) + } +} + +fn make_envp(maybe_env: Option<BTreeMap<EnvKey, OsString>>) -> io::Result<(*mut c_void, Vec<u16>)> { + // On Windows we pass an "environment block" which is not a char**, but + // rather a concatenation of null-terminated k=v\0 sequences, with a final + // \0 to terminate. + if let Some(env) = maybe_env { + let mut blk = Vec::new(); + + for (k, v) in env { + blk.extend(ensure_no_nuls(k.0)?.encode_wide()); + blk.push('=' as u16); + blk.extend(ensure_no_nuls(v)?.encode_wide()); + blk.push(0); + } + blk.push(0); + Ok((blk.as_mut_ptr() as *mut c_void, blk)) + } else { + Ok((ptr::null_mut(), Vec::new())) + } +} + +fn make_dirp(d: Option<&OsString>) -> io::Result<(*const u16, Vec<u16>)> { + match d { + Some(dir) => { + let mut dir_str: Vec<u16> = ensure_no_nuls(dir)?.encode_wide().collect(); + dir_str.push(0); + Ok((dir_str.as_ptr(), dir_str)) + } + None => Ok((ptr::null(), Vec::new())), + } +} + +#[cfg(test)] +mod tests { + use super::make_command_line; + use crate::ffi::{OsStr, OsString}; + + #[test] + fn test_make_command_line() { + fn test_wrapper(prog: &str, args: &[&str]) -> String { + let command_line = &make_command_line( + OsStr::new(prog), + &args.iter().map(|a| OsString::from(a)).collect::<Vec<OsString>>(), + ) + .unwrap(); + String::from_utf16(command_line).unwrap() + } + + assert_eq!(test_wrapper("prog", &["aaa", "bbb", "ccc"]), "\"prog\" aaa bbb ccc"); + + assert_eq!( + test_wrapper("C:\\Program Files\\blah\\blah.exe", &["aaa"]), + "\"C:\\Program Files\\blah\\blah.exe\" aaa" + ); + assert_eq!( + test_wrapper("C:\\Program Files\\test", &["aa\"bb"]), + "\"C:\\Program Files\\test\" aa\\\"bb" + ); + assert_eq!(test_wrapper("echo", &["a b c"]), "\"echo\" \"a b c\""); + assert_eq!( + test_wrapper("echo", &["\" \\\" \\", "\\"]), + "\"echo\" \"\\\" \\\\\\\" \\\\\" \\" + ); + assert_eq!( + test_wrapper("\u{03c0}\u{042f}\u{97f3}\u{00e6}\u{221e}", &[]), + "\"\u{03c0}\u{042f}\u{97f3}\u{00e6}\u{221e}\"" + ); + } +} diff --git a/library/std/src/sys/windows/rand.rs b/library/std/src/sys/windows/rand.rs new file mode 100644 index 00000000000..87ea416bf67 --- /dev/null +++ b/library/std/src/sys/windows/rand.rs @@ -0,0 +1,33 @@ +use crate::io; +use crate::mem; +use crate::sys::c; + +#[cfg(not(target_vendor = "uwp"))] +pub fn hashmap_random_keys() -> (u64, u64) { + let mut v = (0, 0); + let ret = + unsafe { c::RtlGenRandom(&mut v as *mut _ as *mut u8, mem::size_of_val(&v) as c::ULONG) }; + if ret == 0 { + panic!("couldn't generate random bytes: {}", io::Error::last_os_error()); + } + v +} + +#[cfg(target_vendor = "uwp")] +pub fn hashmap_random_keys() -> (u64, u64) { + use crate::ptr; + + let mut v = (0, 0); + let ret = unsafe { + c::BCryptGenRandom( + ptr::null_mut(), + &mut v as *mut _ as *mut u8, + mem::size_of_val(&v) as c::ULONG, + c::BCRYPT_USE_SYSTEM_PREFERRED_RNG, + ) + }; + if ret != 0 { + panic!("couldn't generate random bytes: {}", io::Error::last_os_error()); + } + return v; +} diff --git a/library/std/src/sys/windows/rwlock.rs b/library/std/src/sys/windows/rwlock.rs new file mode 100644 index 00000000000..a769326352c --- /dev/null +++ b/library/std/src/sys/windows/rwlock.rs @@ -0,0 +1,44 @@ +use crate::cell::UnsafeCell; +use crate::sys::c; + +pub struct RWLock { + inner: UnsafeCell<c::SRWLOCK>, +} + +unsafe impl Send for RWLock {} +unsafe impl Sync for RWLock {} + +impl RWLock { + pub const fn new() -> RWLock { + RWLock { inner: UnsafeCell::new(c::SRWLOCK_INIT) } + } + #[inline] + pub unsafe fn read(&self) { + c::AcquireSRWLockShared(self.inner.get()) + } + #[inline] + pub unsafe fn try_read(&self) -> bool { + c::TryAcquireSRWLockShared(self.inner.get()) != 0 + } + #[inline] + pub unsafe fn write(&self) { + c::AcquireSRWLockExclusive(self.inner.get()) + } + #[inline] + pub unsafe fn try_write(&self) -> bool { + c::TryAcquireSRWLockExclusive(self.inner.get()) != 0 + } + #[inline] + pub unsafe fn read_unlock(&self) { + c::ReleaseSRWLockShared(self.inner.get()) + } + #[inline] + pub unsafe fn write_unlock(&self) { + c::ReleaseSRWLockExclusive(self.inner.get()) + } + + #[inline] + pub unsafe fn destroy(&self) { + // ... + } +} diff --git a/library/std/src/sys/windows/stack_overflow.rs b/library/std/src/sys/windows/stack_overflow.rs new file mode 100644 index 00000000000..187ad4e66c3 --- /dev/null +++ b/library/std/src/sys/windows/stack_overflow.rs @@ -0,0 +1,41 @@ +#![cfg_attr(test, allow(dead_code))] + +use crate::sys::c; +use crate::sys_common::util::report_overflow; + +pub struct Handler; + +impl Handler { + pub unsafe fn new() -> Handler { + // This API isn't available on XP, so don't panic in that case and just + // pray it works out ok. + if c::SetThreadStackGuarantee(&mut 0x5000) == 0 { + if c::GetLastError() as u32 != c::ERROR_CALL_NOT_IMPLEMENTED as u32 { + panic!("failed to reserve stack space for exception handling"); + } + } + Handler + } +} + +extern "system" fn vectored_handler(ExceptionInfo: *mut c::EXCEPTION_POINTERS) -> c::LONG { + unsafe { + let rec = &(*(*ExceptionInfo).ExceptionRecord); + let code = rec.ExceptionCode; + + if code == c::EXCEPTION_STACK_OVERFLOW { + report_overflow(); + } + c::EXCEPTION_CONTINUE_SEARCH + } +} + +pub unsafe fn init() { + if c::AddVectoredExceptionHandler(0, vectored_handler).is_null() { + panic!("failed to install exception handler"); + } + // Set the thread stack guarantee for the main thread. + let _h = Handler::new(); +} + +pub unsafe fn cleanup() {} diff --git a/library/std/src/sys/windows/stack_overflow_uwp.rs b/library/std/src/sys/windows/stack_overflow_uwp.rs new file mode 100644 index 00000000000..e7236cf359c --- /dev/null +++ b/library/std/src/sys/windows/stack_overflow_uwp.rs @@ -0,0 +1,13 @@ +#![cfg_attr(test, allow(dead_code))] + +pub struct Handler; + +impl Handler { + pub fn new() -> Handler { + Handler + } +} + +pub unsafe fn init() {} + +pub unsafe fn cleanup() {} diff --git a/library/std/src/sys/windows/stdio.rs b/library/std/src/sys/windows/stdio.rs new file mode 100644 index 00000000000..c84896296ec --- /dev/null +++ b/library/std/src/sys/windows/stdio.rs @@ -0,0 +1,295 @@ +#![unstable(issue = "none", feature = "windows_stdio")] + +use crate::char::decode_utf16; +use crate::cmp; +use crate::io; +use crate::ptr; +use crate::str; +use crate::sys::c; +use crate::sys::cvt; +use crate::sys::handle::Handle; + +// Don't cache handles but get them fresh for every read/write. This allows us to track changes to +// the value over time (such as if a process calls `SetStdHandle` while it's running). See #40490. +pub struct Stdin { + surrogate: u16, +} +pub struct Stdout; +pub struct Stderr; + +// Apparently Windows doesn't handle large reads on stdin or writes to stdout/stderr well (see +// #13304 for details). +// +// From MSDN (2011): "The storage for this buffer is allocated from a shared heap for the +// process that is 64 KB in size. The maximum size of the buffer will depend on heap usage." +// +// We choose the cap at 8 KiB because libuv does the same, and it seems to be acceptable so far. +const MAX_BUFFER_SIZE: usize = 8192; + +// The standard buffer size of BufReader for Stdin should be able to hold 3x more bytes than there +// are `u16`'s in MAX_BUFFER_SIZE. This ensures the read data can always be completely decoded from +// UTF-16 to UTF-8. +pub const STDIN_BUF_SIZE: usize = MAX_BUFFER_SIZE / 2 * 3; + +pub fn get_handle(handle_id: c::DWORD) -> io::Result<c::HANDLE> { + let handle = unsafe { c::GetStdHandle(handle_id) }; + if handle == c::INVALID_HANDLE_VALUE { + Err(io::Error::last_os_error()) + } else if handle.is_null() { + Err(io::Error::from_raw_os_error(c::ERROR_INVALID_HANDLE as i32)) + } else { + Ok(handle) + } +} + +fn is_console(handle: c::HANDLE) -> bool { + // `GetConsoleMode` will return false (0) if this is a pipe (we don't care about the reported + // mode). This will only detect Windows Console, not other terminals connected to a pipe like + // MSYS. Which is exactly what we need, as only Windows Console needs a conversion to UTF-16. + let mut mode = 0; + unsafe { c::GetConsoleMode(handle, &mut mode) != 0 } +} + +fn write(handle_id: c::DWORD, data: &[u8]) -> io::Result<usize> { + let handle = get_handle(handle_id)?; + if !is_console(handle) { + let handle = Handle::new(handle); + let ret = handle.write(data); + handle.into_raw(); // Don't close the handle + return ret; + } + + // As the console is meant for presenting text, we assume bytes of `data` come from a string + // and are encoded as UTF-8, which needs to be encoded as UTF-16. + // + // If the data is not valid UTF-8 we write out as many bytes as are valid. + // Only when there are no valid bytes (which will happen on the next call), return an error. + let len = cmp::min(data.len(), MAX_BUFFER_SIZE / 2); + let utf8 = match str::from_utf8(&data[..len]) { + Ok(s) => s, + Err(ref e) if e.valid_up_to() == 0 => { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "Windows stdio in console mode does not support writing non-UTF-8 byte sequences", + )); + } + Err(e) => str::from_utf8(&data[..e.valid_up_to()]).unwrap(), + }; + let mut utf16 = [0u16; MAX_BUFFER_SIZE / 2]; + let mut len_utf16 = 0; + for (chr, dest) in utf8.encode_utf16().zip(utf16.iter_mut()) { + *dest = chr; + len_utf16 += 1; + } + let utf16 = &utf16[..len_utf16]; + + let mut written = write_u16s(handle, &utf16)?; + + // Figure out how many bytes of as UTF-8 were written away as UTF-16. + if written == utf16.len() { + Ok(utf8.len()) + } else { + // Make sure we didn't end up writing only half of a surrogate pair (even though the chance + // is tiny). Because it is not possible for user code to re-slice `data` in such a way that + // a missing surrogate can be produced (and also because of the UTF-8 validation above), + // write the missing surrogate out now. + // Buffering it would mean we have to lie about the number of bytes written. + let first_char_remaining = utf16[written]; + if first_char_remaining >= 0xDCEE && first_char_remaining <= 0xDFFF { + // low surrogate + // We just hope this works, and give up otherwise + let _ = write_u16s(handle, &utf16[written..written + 1]); + written += 1; + } + // Calculate the number of bytes of `utf8` that were actually written. + let mut count = 0; + for ch in utf16[..written].iter() { + count += match ch { + 0x0000..=0x007F => 1, + 0x0080..=0x07FF => 2, + 0xDCEE..=0xDFFF => 1, // Low surrogate. We already counted 3 bytes for the other. + _ => 3, + }; + } + debug_assert!(String::from_utf16(&utf16[..written]).unwrap() == utf8[..count]); + Ok(count) + } +} + +fn write_u16s(handle: c::HANDLE, data: &[u16]) -> io::Result<usize> { + let mut written = 0; + cvt(unsafe { + c::WriteConsoleW( + handle, + data.as_ptr() as c::LPCVOID, + data.len() as u32, + &mut written, + ptr::null_mut(), + ) + })?; + Ok(written as usize) +} + +impl Stdin { + pub fn new() -> io::Result<Stdin> { + Ok(Stdin { surrogate: 0 }) + } +} + +impl io::Read for Stdin { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + let handle = get_handle(c::STD_INPUT_HANDLE)?; + if !is_console(handle) { + let handle = Handle::new(handle); + let ret = handle.read(buf); + handle.into_raw(); // Don't close the handle + return ret; + } + + if buf.len() == 0 { + return Ok(0); + } else if buf.len() < 4 { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Windows stdin in console mode does not support a buffer too small to \ + guarantee holding one arbitrary UTF-8 character (4 bytes)", + )); + } + + let mut utf16_buf = [0u16; MAX_BUFFER_SIZE / 2]; + // In the worst case, an UTF-8 string can take 3 bytes for every `u16` of an UTF-16. So + // we can read at most a third of `buf.len()` chars and uphold the guarantee no data gets + // lost. + let amount = cmp::min(buf.len() / 3, utf16_buf.len()); + let read = read_u16s_fixup_surrogates(handle, &mut utf16_buf, amount, &mut self.surrogate)?; + + utf16_to_utf8(&utf16_buf[..read], buf) + } +} + +// We assume that if the last `u16` is an unpaired surrogate they got sliced apart by our +// buffer size, and keep it around for the next read hoping to put them together. +// This is a best effort, and may not work if we are not the only reader on Stdin. +fn read_u16s_fixup_surrogates( + handle: c::HANDLE, + buf: &mut [u16], + mut amount: usize, + surrogate: &mut u16, +) -> io::Result<usize> { + // Insert possibly remaining unpaired surrogate from last read. + let mut start = 0; + if *surrogate != 0 { + buf[0] = *surrogate; + *surrogate = 0; + start = 1; + if amount == 1 { + // Special case: `Stdin::read` guarantees we can always read at least one new `u16` + // and combine it with an unpaired surrogate, because the UTF-8 buffer is at least + // 4 bytes. + amount = 2; + } + } + let mut amount = read_u16s(handle, &mut buf[start..amount])? + start; + + if amount > 0 { + let last_char = buf[amount - 1]; + if last_char >= 0xD800 && last_char <= 0xDBFF { + // high surrogate + *surrogate = last_char; + amount -= 1; + } + } + Ok(amount) +} + +fn read_u16s(handle: c::HANDLE, buf: &mut [u16]) -> io::Result<usize> { + // Configure the `pInputControl` parameter to not only return on `\r\n` but also Ctrl-Z, the + // traditional DOS method to indicate end of character stream / user input (SUB). + // See #38274 and https://stackoverflow.com/questions/43836040/win-api-readconsole. + const CTRL_Z: u16 = 0x1A; + const CTRL_Z_MASK: c::ULONG = 1 << CTRL_Z; + let mut input_control = c::CONSOLE_READCONSOLE_CONTROL { + nLength: crate::mem::size_of::<c::CONSOLE_READCONSOLE_CONTROL>() as c::ULONG, + nInitialChars: 0, + dwCtrlWakeupMask: CTRL_Z_MASK, + dwControlKeyState: 0, + }; + + let mut amount = 0; + cvt(unsafe { + c::ReadConsoleW( + handle, + buf.as_mut_ptr() as c::LPVOID, + buf.len() as u32, + &mut amount, + &mut input_control as c::PCONSOLE_READCONSOLE_CONTROL, + ) + })?; + + if amount > 0 && buf[amount as usize - 1] == CTRL_Z { + amount -= 1; + } + Ok(amount as usize) +} + +#[allow(unused)] +fn utf16_to_utf8(utf16: &[u16], utf8: &mut [u8]) -> io::Result<usize> { + let mut written = 0; + for chr in decode_utf16(utf16.iter().cloned()) { + match chr { + Ok(chr) => { + chr.encode_utf8(&mut utf8[written..]); + written += chr.len_utf8(); + } + Err(_) => { + // We can't really do any better than forget all data and return an error. + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "Windows stdin in console mode does not support non-UTF-16 input; \ + encountered unpaired surrogate", + )); + } + } + } + Ok(written) +} + +impl Stdout { + pub fn new() -> io::Result<Stdout> { + Ok(Stdout) + } +} + +impl io::Write for Stdout { + fn write(&mut self, buf: &[u8]) -> io::Result<usize> { + write(c::STD_OUTPUT_HANDLE, buf) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +impl Stderr { + pub fn new() -> io::Result<Stderr> { + Ok(Stderr) + } +} + +impl io::Write for Stderr { + fn write(&mut self, buf: &[u8]) -> io::Result<usize> { + write(c::STD_ERROR_HANDLE, buf) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +pub fn is_ebadf(err: &io::Error) -> bool { + err.raw_os_error() == Some(c::ERROR_INVALID_HANDLE as i32) +} + +pub fn panic_output() -> Option<impl io::Write> { + Stderr::new().ok() +} diff --git a/library/std/src/sys/windows/stdio_uwp.rs b/library/std/src/sys/windows/stdio_uwp.rs new file mode 100644 index 00000000000..5bdabf6d4b7 --- /dev/null +++ b/library/std/src/sys/windows/stdio_uwp.rs @@ -0,0 +1,84 @@ +#![unstable(issue = "none", feature = "windows_stdio")] + +use crate::io; +use crate::mem::ManuallyDrop; +use crate::sys::c; +use crate::sys::handle::Handle; + +pub struct Stdin {} +pub struct Stdout; +pub struct Stderr; + +const MAX_BUFFER_SIZE: usize = 8192; +pub const STDIN_BUF_SIZE: usize = MAX_BUFFER_SIZE / 2 * 3; + +pub fn get_handle(handle_id: c::DWORD) -> io::Result<c::HANDLE> { + let handle = unsafe { c::GetStdHandle(handle_id) }; + if handle == c::INVALID_HANDLE_VALUE { + Err(io::Error::last_os_error()) + } else if handle.is_null() { + Err(io::Error::from_raw_os_error(c::ERROR_INVALID_HANDLE as i32)) + } else { + Ok(handle) + } +} + +fn write(handle_id: c::DWORD, data: &[u8]) -> io::Result<usize> { + let handle = get_handle(handle_id)?; + let handle = Handle::new(handle); + ManuallyDrop::new(handle).write(data) +} + +impl Stdin { + pub fn new() -> io::Result<Stdin> { + Ok(Stdin {}) + } +} + +impl io::Read for Stdin { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + let handle = get_handle(c::STD_INPUT_HANDLE)?; + let handle = Handle::new(handle); + ManuallyDrop::new(handle).read(buf) + } +} + +impl Stdout { + pub fn new() -> io::Result<Stdout> { + Ok(Stdout) + } +} + +impl io::Write for Stdout { + fn write(&mut self, buf: &[u8]) -> io::Result<usize> { + write(c::STD_OUTPUT_HANDLE, buf) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +impl Stderr { + pub fn new() -> io::Result<Stderr> { + Ok(Stderr) + } +} + +impl io::Write for Stderr { + fn write(&mut self, buf: &[u8]) -> io::Result<usize> { + write(c::STD_ERROR_HANDLE, buf) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +pub fn is_ebadf(err: &io::Error) -> bool { + err.raw_os_error() == Some(c::ERROR_INVALID_HANDLE as i32) +} + +pub fn panic_output() -> Option<impl io::Write> { + Stderr::new().ok() +} diff --git a/library/std/src/sys/windows/thread.rs b/library/std/src/sys/windows/thread.rs new file mode 100644 index 00000000000..38839ea5e90 --- /dev/null +++ b/library/std/src/sys/windows/thread.rs @@ -0,0 +1,110 @@ +use crate::ffi::CStr; +use crate::io; +use crate::ptr; +use crate::sys::c; +use crate::sys::handle::Handle; +use crate::sys::stack_overflow; +use crate::time::Duration; + +use libc::c_void; + +use super::to_u16s; + +pub const DEFAULT_MIN_STACK_SIZE: usize = 2 * 1024 * 1024; + +pub struct Thread { + handle: Handle, +} + +impl Thread { + // unsafe: see thread::Builder::spawn_unchecked for safety requirements + pub unsafe fn new(stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> { + let p = Box::into_raw(box p); + + // FIXME On UNIX, we guard against stack sizes that are too small but + // that's because pthreads enforces that stacks are at least + // PTHREAD_STACK_MIN bytes big. Windows has no such lower limit, it's + // just that below a certain threshold you can't do anything useful. + // That threshold is application and architecture-specific, however. + // Round up to the next 64 kB because that's what the NT kernel does, + // might as well make it explicit. + let stack_size = (stack + 0xfffe) & (!0xfffe); + let ret = c::CreateThread( + ptr::null_mut(), + stack_size, + thread_start, + p as *mut _, + c::STACK_SIZE_PARAM_IS_A_RESERVATION, + ptr::null_mut(), + ); + + return if ret as usize == 0 { + // The thread failed to start and as a result p was not consumed. Therefore, it is + // safe to reconstruct the box so that it gets deallocated. + drop(Box::from_raw(p)); + Err(io::Error::last_os_error()) + } else { + Ok(Thread { handle: Handle::new(ret) }) + }; + + extern "system" fn thread_start(main: *mut c_void) -> c::DWORD { + unsafe { + // Next, set up our stack overflow handler which may get triggered if we run + // out of stack. + let _handler = stack_overflow::Handler::new(); + // Finally, let's run some code. + Box::from_raw(main as *mut Box<dyn FnOnce()>)(); + } + 0 + } + } + + pub fn set_name(name: &CStr) { + if let Ok(utf8) = name.to_str() { + if let Ok(utf16) = to_u16s(utf8) { + unsafe { + c::SetThreadDescription(c::GetCurrentThread(), utf16.as_ptr()); + }; + }; + }; + } + + pub fn join(self) { + let rc = unsafe { c::WaitForSingleObject(self.handle.raw(), c::INFINITE) }; + if rc == c::WAIT_FAILED { + panic!("failed to join on thread: {}", io::Error::last_os_error()); + } + } + + pub fn yield_now() { + // This function will return 0 if there are no other threads to execute, + // but this also means that the yield was useless so this isn't really a + // case that needs to be worried about. + unsafe { + c::SwitchToThread(); + } + } + + pub fn sleep(dur: Duration) { + unsafe { c::Sleep(super::dur2timeout(dur)) } + } + + pub fn handle(&self) -> &Handle { + &self.handle + } + + pub fn into_handle(self) -> Handle { + self.handle + } +} + +#[cfg_attr(test, allow(dead_code))] +pub mod guard { + pub type Guard = !; + pub unsafe fn current() -> Option<Guard> { + None + } + pub unsafe fn init() -> Option<Guard> { + None + } +} diff --git a/library/std/src/sys/windows/thread_local_dtor.rs b/library/std/src/sys/windows/thread_local_dtor.rs new file mode 100644 index 00000000000..7be13bc4b2b --- /dev/null +++ b/library/std/src/sys/windows/thread_local_dtor.rs @@ -0,0 +1,4 @@ +#![unstable(feature = "thread_local_internals", issue = "none")] +#![cfg(target_thread_local)] + +pub use crate::sys_common::thread_local_dtor::register_dtor_fallback as register_dtor; diff --git a/library/std/src/sys/windows/thread_local_key.rs b/library/std/src/sys/windows/thread_local_key.rs new file mode 100644 index 00000000000..82901871e78 --- /dev/null +++ b/library/std/src/sys/windows/thread_local_key.rs @@ -0,0 +1,252 @@ +use crate::mem; +use crate::ptr; +use crate::sync::atomic::AtomicPtr; +use crate::sync::atomic::Ordering::SeqCst; +use crate::sys::c; + +pub type Key = c::DWORD; +pub type Dtor = unsafe extern "C" fn(*mut u8); + +// Turns out, like pretty much everything, Windows is pretty close the +// functionality that Unix provides, but slightly different! In the case of +// TLS, Windows does not provide an API to provide a destructor for a TLS +// variable. This ends up being pretty crucial to this implementation, so we +// need a way around this. +// +// The solution here ended up being a little obscure, but fear not, the +// internet has informed me [1][2] that this solution is not unique (no way +// I could have thought of it as well!). The key idea is to insert some hook +// somewhere to run arbitrary code on thread termination. With this in place +// we'll be able to run anything we like, including all TLS destructors! +// +// To accomplish this feat, we perform a number of threads, all contained +// within this module: +// +// * All TLS destructors are tracked by *us*, not the windows runtime. This +// means that we have a global list of destructors for each TLS key that +// we know about. +// * When a thread exits, we run over the entire list and run dtors for all +// non-null keys. This attempts to match Unix semantics in this regard. +// +// This ends up having the overhead of using a global list, having some +// locks here and there, and in general just adding some more code bloat. We +// attempt to optimize runtime by forgetting keys that don't have +// destructors, but this only gets us so far. +// +// For more details and nitty-gritty, see the code sections below! +// +// [1]: http://www.codeproject.com/Articles/8113/Thread-Local-Storage-The-C-Way +// [2]: https://github.com/ChromiumWebApps/chromium/blob/master/base +// /threading/thread_local_storage_win.cc#L42 + +// ------------------------------------------------------------------------- +// Native bindings +// +// This section is just raw bindings to the native functions that Windows +// provides, There's a few extra calls to deal with destructors. + +#[inline] +pub unsafe fn create(dtor: Option<Dtor>) -> Key { + let key = c::TlsAlloc(); + assert!(key != c::TLS_OUT_OF_INDEXES); + if let Some(f) = dtor { + register_dtor(key, f); + } + key +} + +#[inline] +pub unsafe fn set(key: Key, value: *mut u8) { + let r = c::TlsSetValue(key, value as c::LPVOID); + debug_assert!(r != 0); +} + +#[inline] +pub unsafe fn get(key: Key) -> *mut u8 { + c::TlsGetValue(key) as *mut u8 +} + +#[inline] +pub unsafe fn destroy(_key: Key) { + rtabort!("can't destroy tls keys on windows") +} + +#[inline] +pub fn requires_synchronized_create() -> bool { + true +} + +// ------------------------------------------------------------------------- +// Dtor registration +// +// Windows has no native support for running destructors so we manage our own +// list of destructors to keep track of how to destroy keys. We then install a +// callback later to get invoked whenever a thread exits, running all +// appropriate destructors. +// +// Currently unregistration from this list is not supported. A destructor can be +// registered but cannot be unregistered. There's various simplifying reasons +// for doing this, the big ones being: +// +// 1. Currently we don't even support deallocating TLS keys, so normal operation +// doesn't need to deallocate a destructor. +// 2. There is no point in time where we know we can unregister a destructor +// because it could always be getting run by some remote thread. +// +// Typically processes have a statically known set of TLS keys which is pretty +// small, and we'd want to keep this memory alive for the whole process anyway +// really. +// +// Perhaps one day we can fold the `Box` here into a static allocation, +// expanding the `StaticKey` structure to contain not only a slot for the TLS +// key but also a slot for the destructor queue on windows. An optimization for +// another day! + +static DTORS: AtomicPtr<Node> = AtomicPtr::new(ptr::null_mut()); + +struct Node { + dtor: Dtor, + key: Key, + next: *mut Node, +} + +#[cfg(miri)] +extern "Rust" { + /// Miri-provided extern function to mark the block `ptr` points to as a "root" + /// for some static memory. This memory and everything reachable by it is not + /// considered leaking even if it still exists when the program terminates. + /// + /// `ptr` has to point to the beginning of an allocated block. + fn miri_static_root(ptr: *const u8); +} + +unsafe fn register_dtor(key: Key, dtor: Dtor) { + let mut node = Box::new(Node { key, dtor, next: ptr::null_mut() }); + + let mut head = DTORS.load(SeqCst); + loop { + node.next = head; + match DTORS.compare_exchange(head, &mut *node, SeqCst, SeqCst) { + Ok(_) => { + #[cfg(miri)] + miri_static_root(&*node as *const _ as *const u8); + + mem::forget(node); + return; + } + Err(cur) => head = cur, + } + } +} + +// ------------------------------------------------------------------------- +// Where the Magic (TM) Happens +// +// If you're looking at this code, and wondering "what is this doing?", +// you're not alone! I'll try to break this down step by step: +// +// # What's up with CRT$XLB? +// +// For anything about TLS destructors to work on Windows, we have to be able +// to run *something* when a thread exits. To do so, we place a very special +// static in a very special location. If this is encoded in just the right +// way, the kernel's loader is apparently nice enough to run some function +// of ours whenever a thread exits! How nice of the kernel! +// +// Lots of detailed information can be found in source [1] above, but the +// gist of it is that this is leveraging a feature of Microsoft's PE format +// (executable format) which is not actually used by any compilers today. +// This apparently translates to any callbacks in the ".CRT$XLB" section +// being run on certain events. +// +// So after all that, we use the compiler's #[link_section] feature to place +// a callback pointer into the magic section so it ends up being called. +// +// # What's up with this callback? +// +// The callback specified receives a number of parameters from... someone! +// (the kernel? the runtime? I'm not quite sure!) There are a few events that +// this gets invoked for, but we're currently only interested on when a +// thread or a process "detaches" (exits). The process part happens for the +// last thread and the thread part happens for any normal thread. +// +// # Ok, what's up with running all these destructors? +// +// This will likely need to be improved over time, but this function +// attempts a "poor man's" destructor callback system. Once we've got a list +// of what to run, we iterate over all keys, check their values, and then run +// destructors if the values turn out to be non null (setting them to null just +// beforehand). We do this a few times in a loop to basically match Unix +// semantics. If we don't reach a fixed point after a short while then we just +// inevitably leak something most likely. +// +// # The article mentions weird stuff about "/INCLUDE"? +// +// It sure does! Specifically we're talking about this quote: +// +// The Microsoft run-time library facilitates this process by defining a +// memory image of the TLS Directory and giving it the special name +// “__tls_used” (Intel x86 platforms) or “_tls_used” (other platforms). The +// linker looks for this memory image and uses the data there to create the +// TLS Directory. Other compilers that support TLS and work with the +// Microsoft linker must use this same technique. +// +// Basically what this means is that if we want support for our TLS +// destructors/our hook being called then we need to make sure the linker does +// not omit this symbol. Otherwise it will omit it and our callback won't be +// wired up. +// +// We don't actually use the `/INCLUDE` linker flag here like the article +// mentions because the Rust compiler doesn't propagate linker flags, but +// instead we use a shim function which performs a volatile 1-byte load from +// the address of the symbol to ensure it sticks around. + +#[link_section = ".CRT$XLB"] +#[allow(dead_code, unused_variables)] +#[used] // we don't want LLVM eliminating this symbol for any reason, and +// when the symbol makes it to the linker the linker will take over +pub static p_thread_callback: unsafe extern "system" fn(c::LPVOID, c::DWORD, c::LPVOID) = + on_tls_callback; + +#[allow(dead_code, unused_variables)] +unsafe extern "system" fn on_tls_callback(h: c::LPVOID, dwReason: c::DWORD, pv: c::LPVOID) { + if dwReason == c::DLL_THREAD_DETACH || dwReason == c::DLL_PROCESS_DETACH { + run_dtors(); + } + + // See comments above for what this is doing. Note that we don't need this + // trickery on GNU windows, just on MSVC. + reference_tls_used(); + #[cfg(target_env = "msvc")] + unsafe fn reference_tls_used() { + extern "C" { + static _tls_used: u8; + } + crate::intrinsics::volatile_load(&_tls_used); + } + #[cfg(not(target_env = "msvc"))] + unsafe fn reference_tls_used() {} +} + +#[allow(dead_code)] // actually called above +unsafe fn run_dtors() { + let mut any_run = true; + for _ in 0..5 { + if !any_run { + break; + } + any_run = false; + let mut cur = DTORS.load(SeqCst); + while !cur.is_null() { + let ptr = c::TlsGetValue((*cur).key); + + if !ptr.is_null() { + c::TlsSetValue((*cur).key, ptr::null_mut()); + ((*cur).dtor)(ptr as *mut _); + any_run = true; + } + + cur = (*cur).next; + } + } +} diff --git a/library/std/src/sys/windows/time.rs b/library/std/src/sys/windows/time.rs new file mode 100644 index 00000000000..900260169c7 --- /dev/null +++ b/library/std/src/sys/windows/time.rs @@ -0,0 +1,228 @@ +use crate::cmp::Ordering; +use crate::convert::TryInto; +use crate::fmt; +use crate::mem; +use crate::sys::c; +use crate::time::Duration; + +use core::hash::{Hash, Hasher}; + +const NANOS_PER_SEC: u64 = 1_000_000_000; +const INTERVALS_PER_SEC: u64 = NANOS_PER_SEC / 100; + +#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Hash)] +pub struct Instant { + // This duration is relative to an arbitrary microsecond epoch + // from the winapi QueryPerformanceCounter function. + t: Duration, +} + +#[derive(Copy, Clone)] +pub struct SystemTime { + t: c::FILETIME, +} + +const INTERVALS_TO_UNIX_EPOCH: u64 = 11_644_473_600 * INTERVALS_PER_SEC; + +pub const UNIX_EPOCH: SystemTime = SystemTime { + t: c::FILETIME { + dwLowDateTime: INTERVALS_TO_UNIX_EPOCH as u32, + dwHighDateTime: (INTERVALS_TO_UNIX_EPOCH >> 32) as u32, + }, +}; + +impl Instant { + pub fn now() -> Instant { + // High precision timing on windows operates in "Performance Counter" + // units, as returned by the WINAPI QueryPerformanceCounter function. + // These relate to seconds by a factor of QueryPerformanceFrequency. + // In order to keep unit conversions out of normal interval math, we + // measure in QPC units and immediately convert to nanoseconds. + perf_counter::PerformanceCounterInstant::now().into() + } + + pub fn actually_monotonic() -> bool { + false + } + + pub const fn zero() -> Instant { + Instant { t: Duration::from_secs(0) } + } + + pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> { + // On windows there's a threshold below which we consider two timestamps + // equivalent due to measurement error. For more details + doc link, + // check the docs on epsilon. + let epsilon = perf_counter::PerformanceCounterInstant::epsilon(); + if other.t > self.t && other.t - self.t <= epsilon { + Some(Duration::new(0, 0)) + } else { + self.t.checked_sub(other.t) + } + } + + pub fn checked_add_duration(&self, other: &Duration) -> Option<Instant> { + Some(Instant { t: self.t.checked_add(*other)? }) + } + + pub fn checked_sub_duration(&self, other: &Duration) -> Option<Instant> { + Some(Instant { t: self.t.checked_sub(*other)? }) + } +} + +impl SystemTime { + pub fn now() -> SystemTime { + unsafe { + let mut t: SystemTime = mem::zeroed(); + c::GetSystemTimePreciseAsFileTime(&mut t.t); + t + } + } + + fn from_intervals(intervals: i64) -> SystemTime { + SystemTime { + t: c::FILETIME { + dwLowDateTime: intervals as c::DWORD, + dwHighDateTime: (intervals >> 32) as c::DWORD, + }, + } + } + + fn intervals(&self) -> i64 { + (self.t.dwLowDateTime as i64) | ((self.t.dwHighDateTime as i64) << 32) + } + + pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> { + let me = self.intervals(); + let other = other.intervals(); + if me >= other { + Ok(intervals2dur((me - other) as u64)) + } else { + Err(intervals2dur((other - me) as u64)) + } + } + + pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> { + let intervals = self.intervals().checked_add(checked_dur2intervals(other)?)?; + Some(SystemTime::from_intervals(intervals)) + } + + pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> { + let intervals = self.intervals().checked_sub(checked_dur2intervals(other)?)?; + Some(SystemTime::from_intervals(intervals)) + } +} + +impl PartialEq for SystemTime { + fn eq(&self, other: &SystemTime) -> bool { + self.intervals() == other.intervals() + } +} + +impl Eq for SystemTime {} + +impl PartialOrd for SystemTime { + fn partial_cmp(&self, other: &SystemTime) -> Option<Ordering> { + Some(self.cmp(other)) + } +} + +impl Ord for SystemTime { + fn cmp(&self, other: &SystemTime) -> Ordering { + self.intervals().cmp(&other.intervals()) + } +} + +impl fmt::Debug for SystemTime { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SystemTime").field("intervals", &self.intervals()).finish() + } +} + +impl From<c::FILETIME> for SystemTime { + fn from(t: c::FILETIME) -> SystemTime { + SystemTime { t } + } +} + +impl Hash for SystemTime { + fn hash<H: Hasher>(&self, state: &mut H) { + self.intervals().hash(state) + } +} + +fn checked_dur2intervals(dur: &Duration) -> Option<i64> { + dur.as_secs() + .checked_mul(INTERVALS_PER_SEC)? + .checked_add(dur.subsec_nanos() as u64 / 100)? + .try_into() + .ok() +} + +fn intervals2dur(intervals: u64) -> Duration { + Duration::new(intervals / INTERVALS_PER_SEC, ((intervals % INTERVALS_PER_SEC) * 100) as u32) +} + +mod perf_counter { + use super::NANOS_PER_SEC; + use crate::sync::atomic::{AtomicUsize, Ordering::SeqCst}; + use crate::sys::c; + use crate::sys::cvt; + use crate::sys_common::mul_div_u64; + use crate::time::Duration; + + pub struct PerformanceCounterInstant { + ts: c::LARGE_INTEGER, + } + impl PerformanceCounterInstant { + pub fn now() -> Self { + Self { ts: query() } + } + + // Per microsoft docs, the margin of error for cross-thread time comparisons + // using QueryPerformanceCounter is 1 "tick" -- defined as 1/frequency(). + // Reference: https://docs.microsoft.com/en-us/windows/desktop/SysInfo + // /acquiring-high-resolution-time-stamps + pub fn epsilon() -> Duration { + let epsilon = NANOS_PER_SEC / (frequency() as u64); + Duration::from_nanos(epsilon) + } + } + impl From<PerformanceCounterInstant> for super::Instant { + fn from(other: PerformanceCounterInstant) -> Self { + let freq = frequency() as u64; + let instant_nsec = mul_div_u64(other.ts as u64, NANOS_PER_SEC, freq); + Self { t: Duration::from_nanos(instant_nsec) } + } + } + + fn frequency() -> c::LARGE_INTEGER { + static mut FREQUENCY: c::LARGE_INTEGER = 0; + static STATE: AtomicUsize = AtomicUsize::new(0); + + unsafe { + // If a previous thread has filled in this global state, use that. + if STATE.load(SeqCst) == 2 { + return FREQUENCY; + } + + // ... otherwise learn for ourselves ... + let mut frequency = 0; + cvt(c::QueryPerformanceFrequency(&mut frequency)).unwrap(); + + // ... and attempt to be the one thread that stores it globally for + // all other threads + if STATE.compare_exchange(0, 1, SeqCst, SeqCst).is_ok() { + FREQUENCY = frequency; + STATE.store(2, SeqCst); + } + frequency + } + } + + fn query() -> c::LARGE_INTEGER { + let mut qpc_value: c::LARGE_INTEGER = 0; + cvt(unsafe { c::QueryPerformanceCounter(&mut qpc_value) }).unwrap(); + qpc_value + } +} |
