diff options
| author | Simonas Kazlauskas <git@kazlauskas.me> | 2015-04-04 00:46:54 +0300 |
|---|---|---|
| committer | Simonas Kazlauskas <git@kazlauskas.me> | 2015-04-08 19:42:16 +0300 |
| commit | 45aa6c8d1bc2f7863c92da6643de4642bb2d83bf (patch) | |
| tree | 53f8648a696dc49072ceef53c36974f6fc599515 /src/libstd/sys | |
| parent | 80def6c2447d23a624e611417f24cf0ab2a5a676 (diff) | |
| download | rust-45aa6c8d1bc2f7863c92da6643de4642bb2d83bf.tar.gz rust-45aa6c8d1bc2f7863c92da6643de4642bb2d83bf.zip | |
Implement reentrant mutexes and make stdio use them
write_fmt calls write for each formatted field. The default implementation of write_fmt is used, which will call write on not-yet-locked stdout (and write locking after), therefore making print! in multithreaded environment still interleave contents of two separate prints. This patch implements reentrant mutexes, changes stdio handles to use these mutexes and overrides write_fmt to lock the stdio handle for the whole duration of the call.
Diffstat (limited to 'src/libstd/sys')
| -rw-r--r-- | src/libstd/sys/common/mod.rs | 2 | ||||
| -rw-r--r-- | src/libstd/sys/common/poison.rs | 187 | ||||
| -rw-r--r-- | src/libstd/sys/common/remutex.rs | 233 | ||||
| -rw-r--r-- | src/libstd/sys/unix/mutex.rs | 48 | ||||
| -rw-r--r-- | src/libstd/sys/unix/sync.rs | 29 | ||||
| -rw-r--r-- | src/libstd/sys/windows/mutex.rs | 31 | ||||
| -rw-r--r-- | src/libstd/sys/windows/sync.rs | 18 |
7 files changed, 546 insertions, 2 deletions
diff --git a/src/libstd/sys/common/mod.rs b/src/libstd/sys/common/mod.rs index d2e2f1044d6..8a01eace889 100644 --- a/src/libstd/sys/common/mod.rs +++ b/src/libstd/sys/common/mod.rs @@ -29,6 +29,8 @@ pub mod condvar; pub mod mutex; pub mod net; pub mod net2; +pub mod poison; +pub mod remutex; pub mod rwlock; pub mod stack; pub mod thread; diff --git a/src/libstd/sys/common/poison.rs b/src/libstd/sys/common/poison.rs new file mode 100644 index 00000000000..347cd0b464e --- /dev/null +++ b/src/libstd/sys/common/poison.rs @@ -0,0 +1,187 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use prelude::v1::*; + +use cell::UnsafeCell; +use error::{Error}; +use fmt; +use thread; + +pub struct Flag { failed: UnsafeCell<bool> } + +// This flag is only ever accessed with a lock previously held. Note that this +// a totally private structure. +unsafe impl Send for Flag {} +unsafe impl Sync for Flag {} + +pub const FLAG_INIT: Flag = Flag { failed: UnsafeCell { value: false } }; + +impl Flag { + #[inline] + pub fn borrow(&self) -> LockResult<Guard> { + let ret = Guard { panicking: thread::panicking() }; + if unsafe { *self.failed.get() } { + Err(PoisonError::new(ret)) + } else { + Ok(ret) + } + } + + #[inline] + pub fn done(&self, guard: &Guard) { + if !guard.panicking && thread::panicking() { + unsafe { *self.failed.get() = true; } + } + } + + #[inline] + pub fn get(&self) -> bool { + unsafe { *self.failed.get() } + } +} + +pub struct Guard { + panicking: bool, +} + +/// A type of error which can be returned whenever a lock is acquired. +/// +/// Both Mutexes and RwLocks are poisoned whenever a task fails while the lock +/// is held. The precise semantics for when a lock is poisoned is documented on +/// each lock, but once a lock is poisoned then all future acquisitions will +/// return this error. +#[stable(feature = "rust1", since = "1.0.0")] +pub struct PoisonError<T> { + guard: T, +} + +/// An enumeration of possible errors which can occur while calling the +/// `try_lock` method. +#[stable(feature = "rust1", since = "1.0.0")] +pub enum TryLockError<T> { + /// The lock could not be acquired because another task failed while holding + /// the lock. + #[stable(feature = "rust1", since = "1.0.0")] + Poisoned(PoisonError<T>), + /// The lock could not be acquired at this time because the operation would + /// otherwise block. + #[stable(feature = "rust1", since = "1.0.0")] + WouldBlock, +} + +/// A type alias for the result of a lock method which can be poisoned. +/// +/// The `Ok` variant of this result indicates that the primitive was not +/// poisoned, and the `Guard` is contained within. The `Err` variant indicates +/// that the primitive was poisoned. Note that the `Err` variant *also* carries +/// the associated guard, and it can be acquired through the `into_inner` +/// method. +#[stable(feature = "rust1", since = "1.0.0")] +pub type LockResult<Guard> = Result<Guard, PoisonError<Guard>>; + +/// A type alias for the result of a nonblocking locking method. +/// +/// For more information, see `LockResult`. A `TryLockResult` doesn't +/// necessarily hold the associated guard in the `Err` type as the lock may not +/// have been acquired for other reasons. +#[stable(feature = "rust1", since = "1.0.0")] +pub type TryLockResult<Guard> = Result<Guard, TryLockError<Guard>>; + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T> fmt::Debug for PoisonError<T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + "PoisonError { inner: .. }".fmt(f) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T> fmt::Display for PoisonError<T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + "poisoned lock: another task failed inside".fmt(f) + } +} + +impl<T: Send> Error for PoisonError<T> { + fn description(&self) -> &str { + "poisoned lock: another task failed inside" + } +} + +impl<T> PoisonError<T> { + /// Create a `PoisonError`. + #[unstable(feature = "std_misc")] + pub fn new(guard: T) -> PoisonError<T> { + PoisonError { guard: guard } + } + + /// Consumes this error indicating that a lock is poisoned, returning the + /// underlying guard to allow access regardless. + #[unstable(feature = "std_misc")] + pub fn into_inner(self) -> T { self.guard } + + /// Reaches into this error indicating that a lock is poisoned, returning a + /// reference to the underlying guard to allow access regardless. + #[unstable(feature = "std_misc")] + pub fn get_ref(&self) -> &T { &self.guard } + + /// Reaches into this error indicating that a lock is poisoned, returning a + /// mutable reference to the underlying guard to allow access regardless. + #[unstable(feature = "std_misc")] + pub fn get_mut(&mut self) -> &mut T { &mut self.guard } +} + +impl<T> From<PoisonError<T>> for TryLockError<T> { + fn from(err: PoisonError<T>) -> TryLockError<T> { + TryLockError::Poisoned(err) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T> fmt::Debug for TryLockError<T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + TryLockError::Poisoned(..) => "Poisoned(..)".fmt(f), + TryLockError::WouldBlock => "WouldBlock".fmt(f) + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: Send> fmt::Display for TryLockError<T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.description().fmt(f) + } +} + +impl<T: Send> Error for TryLockError<T> { + fn description(&self) -> &str { + match *self { + TryLockError::Poisoned(ref p) => p.description(), + TryLockError::WouldBlock => "try_lock failed because the operation would block" + } + } + + fn cause(&self) -> Option<&Error> { + match *self { + TryLockError::Poisoned(ref p) => Some(p), + _ => None + } + } +} + +pub fn map_result<T, U, F>(result: LockResult<T>, f: F) + -> LockResult<U> + where F: FnOnce(T) -> U { + match result { + Ok(t) => Ok(f(t)), + Err(PoisonError { guard }) => Err(PoisonError::new(f(guard))) + } +} diff --git a/src/libstd/sys/common/remutex.rs b/src/libstd/sys/common/remutex.rs new file mode 100644 index 00000000000..b35063c0e23 --- /dev/null +++ b/src/libstd/sys/common/remutex.rs @@ -0,0 +1,233 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +#![unstable(feature = "reentrant_mutex", reason = "new API")] + +use prelude::v1::*; + +use fmt; +use marker; +use ops::Deref; +use sys_common::poison::{self, TryLockError, TryLockResult, LockResult}; +use sys::mutex as sys; + +/// A re-entrant mutual exclusion +/// +/// This mutex will block *other* threads waiting for the lock to become available. The thread +/// which has already locked the mutex can lock it multiple times without blocking, preventing a +/// common source of deadlocks. +pub struct ReentrantMutex<T> { + inner: Box<sys::ReentrantMutex>, + poison: poison::Flag, + data: T, +} + +unsafe impl<T: Send> Send for ReentrantMutex<T> {} +unsafe impl<T: Send> Sync for ReentrantMutex<T> {} + + +/// An RAII implementation of a "scoped lock" of a mutex. When this structure is +/// dropped (falls out of scope), the lock will be unlocked. +/// +/// The data protected by the mutex can be accessed through this guard via its +/// Deref and DerefMut implementations +#[must_use] +pub struct ReentrantMutexGuard<'a, T: 'a> { + // funny underscores due to how Deref/DerefMut currently work (they + // disregard field privacy). + __lock: &'a ReentrantMutex<T>, + __poison: poison::Guard, +} + +impl<'a, T> !marker::Send for ReentrantMutexGuard<'a, T> {} + + +impl<T> ReentrantMutex<T> { + /// Creates a new reentrant mutex in an unlocked state. + pub fn new(t: T) -> ReentrantMutex<T> { + ReentrantMutex { + inner: box unsafe { sys::ReentrantMutex::new() }, + poison: poison::FLAG_INIT, + data: t, + } + } + + /// Acquires a mutex, blocking the current thread until it is able to do so. + /// + /// This function will block the caller until it is available to acquire the mutex. + /// Upon returning, the thread is the only thread with the mutex held. When the thread + /// calling this method already holds the lock, the call shall succeed without + /// blocking. + /// + /// # Failure + /// + /// If another user of this mutex panicked while holding the mutex, then + /// this call will return failure if the mutex would otherwise be + /// acquired. + pub fn lock(&self) -> LockResult<ReentrantMutexGuard<T>> { + unsafe { self.inner.lock() } + ReentrantMutexGuard::new(&self) + } + + /// Attempts to acquire this lock. + /// + /// If the lock could not be acquired at this time, then `Err` is returned. + /// Otherwise, an RAII guard is returned. + /// + /// This function does not block. + /// + /// # Failure + /// + /// If another user of this mutex panicked while holding the mutex, then + /// this call will return failure if the mutex would otherwise be + /// acquired. + pub fn try_lock(&self) -> TryLockResult<ReentrantMutexGuard<T>> { + if unsafe { self.inner.try_lock() } { + Ok(try!(ReentrantMutexGuard::new(&self))) + } else { + Err(TryLockError::WouldBlock) + } + } +} + +#[unsafe_destructor] +impl<T> Drop for ReentrantMutex<T> { + fn drop(&mut self) { + // This is actually safe b/c we know that there is no further usage of + // this mutex (it's up to the user to arrange for a mutex to get + // dropped, that's not our job) + unsafe { self.inner.destroy() } + } +} + +impl<T: fmt::Debug + 'static> fmt::Debug for ReentrantMutex<T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self.try_lock() { + Ok(guard) => write!(f, "ReentrantMutex {{ data: {:?} }}", &*guard), + Err(TryLockError::Poisoned(err)) => { + write!(f, "ReentrantMutex {{ data: Poisoned({:?}) }}", &**err.get_ref()) + }, + Err(TryLockError::WouldBlock) => write!(f, "ReentrantMutex {{ <locked> }}") + } + } +} + +impl<'mutex, T> ReentrantMutexGuard<'mutex, T> { + fn new(lock: &'mutex ReentrantMutex<T>) + -> LockResult<ReentrantMutexGuard<'mutex, T>> { + poison::map_result(lock.poison.borrow(), |guard| { + ReentrantMutexGuard { + __lock: lock, + __poison: guard, + } + }) + } +} + +impl<'mutex, T> Deref for ReentrantMutexGuard<'mutex, T> { + type Target = T; + + fn deref<'a>(&'a self) -> &'a T { + &self.__lock.data + } +} + +#[unsafe_destructor] +impl<'a, T> Drop for ReentrantMutexGuard<'a, T> { + #[inline] + fn drop(&mut self) { + unsafe { + self.__lock.poison.done(&self.__poison); + self.__lock.inner.unlock(); + } + } +} + + +#[cfg(test)] +mod test { + use prelude::v1::*; + use sys_common::remutex::{ReentrantMutex, ReentrantMutexGuard}; + use cell::RefCell; + use sync::Arc; + use boxed; + use thread; + + #[test] + fn smoke() { + let m = ReentrantMutex::new(()); + { + let a = m.lock().unwrap(); + { + let b = m.lock().unwrap(); + { + let c = m.lock().unwrap(); + assert_eq!(*c, ()); + } + assert_eq!(*b, ()); + } + assert_eq!(*a, ()); + } + } + + #[test] + fn is_mutex() { + let m = ReentrantMutex::new(RefCell::new(0)); + let lock = m.lock().unwrap(); + let handle = thread::scoped(|| { + let lock = m.lock().unwrap(); + assert_eq!(*lock.borrow(), 4950); + }); + for i in 0..100 { + let mut lock = m.lock().unwrap(); + *lock.borrow_mut() += i; + } + drop(lock); + drop(handle); + } + + #[test] + fn trylock_works() { + let m = ReentrantMutex::new(()); + let lock = m.try_lock().unwrap(); + let lock2 = m.try_lock().unwrap(); + { + thread::scoped(|| { + let lock = m.try_lock(); + assert!(lock.is_err()); + }); + } + let lock3 = m.try_lock().unwrap(); + } + + pub struct Answer<'a>(pub ReentrantMutexGuard<'a, RefCell<u32>>); + impl<'a> Drop for Answer<'a> { + fn drop(&mut self) { + *self.0.borrow_mut() = 42; + } + } + + #[test] + fn poison_works() { + let m = Arc::new(ReentrantMutex::new(RefCell::new(0))); + let mc = m.clone(); + let result = thread::spawn(move ||{ + let lock = mc.lock().unwrap(); + *lock.borrow_mut() = 1; + let lock2 = mc.lock().unwrap(); + *lock.borrow_mut() = 2; + let answer = Answer(lock2); + panic!("What the answer to my lifetimes dilemma is?"); + drop(answer); + }).join(); + assert!(result.is_err()); + let r = m.lock().err().unwrap().into_inner(); + assert_eq!(*r.borrow(), 42); + } +} diff --git a/src/libstd/sys/unix/mutex.rs b/src/libstd/sys/unix/mutex.rs index 1c0ce293804..af814653c14 100644 --- a/src/libstd/sys/unix/mutex.rs +++ b/src/libstd/sys/unix/mutex.rs @@ -12,6 +12,7 @@ use prelude::v1::*; use cell::UnsafeCell; use sys::sync as ffi; +use mem; pub struct Mutex { inner: UnsafeCell<ffi::pthread_mutex_t> } @@ -67,3 +68,50 @@ impl Mutex { debug_assert!(r == 0 || r == libc::EINVAL); } } + +// FIXME: remove the box, because box happens twice now, once at the common layer and once here. +// Box is necessary here, because mutex may not change address after it is intialised on some +// platforms. Regular Mutex above handles this by offloading intialisation to the OS on first lock. +// Sadly, as far as reentrant mutexes go, this scheme is not quite portable and we must initialise +// when we create the mutex, in the `new`. +pub struct ReentrantMutex { inner: Box<UnsafeCell<ffi::pthread_mutex_t>> } + +unsafe impl Send for ReentrantMutex {} +unsafe impl Sync for ReentrantMutex {} + +impl ReentrantMutex { + pub unsafe fn new() -> ReentrantMutex { + let mutex = ReentrantMutex { inner: box mem::uninitialized() }; + let mut attr: ffi::pthread_mutexattr_t = mem::uninitialized(); + let result = ffi::pthread_mutexattr_init(&mut attr as *mut _); + debug_assert_eq!(result, 0); + let result = ffi::pthread_mutexattr_settype(&mut attr as *mut _, + ffi::PTHREAD_MUTEX_RECURSIVE); + debug_assert_eq!(result, 0); + let result = ffi::pthread_mutex_init(mutex.inner.get(), &attr as *const _); + debug_assert_eq!(result, 0); + let result = ffi::pthread_mutexattr_destroy(&mut attr as *mut _); + debug_assert_eq!(result, 0); + mutex + } + + pub unsafe fn lock(&self) { + let result = ffi::pthread_mutex_lock(self.inner.get()); + debug_assert_eq!(result, 0); + } + + #[inline] + pub unsafe fn try_lock(&self) -> bool { + ffi::pthread_mutex_trylock(self.inner.get()) == 0 + } + + pub unsafe fn unlock(&self) { + let result = ffi::pthread_mutex_unlock(self.inner.get()); + debug_assert_eq!(result, 0); + } + + pub unsafe fn destroy(&self) { + let result = ffi::pthread_mutex_destroy(self.inner.get()); + debug_assert_eq!(result, 0); + } +} diff --git a/src/libstd/sys/unix/sync.rs b/src/libstd/sys/unix/sync.rs index 3c05fd602be..41e1e206a42 100644 --- a/src/libstd/sys/unix/sync.rs +++ b/src/libstd/sys/unix/sync.rs @@ -12,17 +12,25 @@ use libc; -pub use self::os::{PTHREAD_MUTEX_INITIALIZER, pthread_mutex_t}; +pub use self::os::{PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_RECURSIVE, pthread_mutex_t, + pthread_mutexattr_t}; pub use self::os::{PTHREAD_COND_INITIALIZER, pthread_cond_t}; pub use self::os::{PTHREAD_RWLOCK_INITIALIZER, pthread_rwlock_t}; extern { // mutexes + pub fn pthread_mutex_init(lock: *mut pthread_mutex_t, attr: *const pthread_mutexattr_t) + -> libc::c_int; pub fn pthread_mutex_destroy(lock: *mut pthread_mutex_t) -> libc::c_int; pub fn pthread_mutex_lock(lock: *mut pthread_mutex_t) -> libc::c_int; pub fn pthread_mutex_trylock(lock: *mut pthread_mutex_t) -> libc::c_int; pub fn pthread_mutex_unlock(lock: *mut pthread_mutex_t) -> libc::c_int; + pub fn pthread_mutexattr_init(attr: *mut pthread_mutexattr_t) -> libc::c_int; + pub fn pthread_mutexattr_destroy(attr: *mut pthread_mutexattr_t) -> libc::c_int; + pub fn pthread_mutexattr_settype(attr: *mut pthread_mutexattr_t, _type: libc::c_int) + -> libc::c_int; + // cvars pub fn pthread_cond_wait(cond: *mut pthread_cond_t, lock: *mut pthread_mutex_t) -> libc::c_int; @@ -52,12 +60,14 @@ mod os { use libc; pub type pthread_mutex_t = *mut libc::c_void; + pub type pthread_mutexattr_t = *mut libc::c_void; pub type pthread_cond_t = *mut libc::c_void; pub type pthread_rwlock_t = *mut libc::c_void; pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = 0 as *mut _; pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = 0 as *mut _; pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = 0 as *mut _; + pub const PTHREAD_MUTEX_RECURSIVE: libc::c_int = 2; } #[cfg(any(target_os = "macos", target_os = "ios"))] @@ -95,6 +105,12 @@ mod os { __opaque: [u8; __PTHREAD_MUTEX_SIZE__], } #[repr(C)] + pub struct pthread_mutexattr_t { + __sig: libc::c_long, + // note, that this is 16 bytes just to be safe, the actual struct might be smaller. + __opaque: [u8; 16], + } + #[repr(C)] pub struct pthread_cond_t { __sig: libc::c_long, __opaque: [u8; __PTHREAD_COND_SIZE__], @@ -117,6 +133,8 @@ mod os { __sig: _PTHREAD_RWLOCK_SIG_INIT, __opaque: [0; __PTHREAD_RWLOCK_SIZE__], }; + + pub const PTHREAD_MUTEX_RECURSIVE: libc::c_int = 2; } #[cfg(target_os = "linux")] @@ -161,6 +179,12 @@ mod os { size: [u8; __SIZEOF_PTHREAD_MUTEX_T], } #[repr(C)] + pub struct pthread_mutexattr_t { + __align: libc::c_longlong, + // note, that this is 16 bytes just to be safe, the actual struct might be smaller. + size: [u8; 16], + } + #[repr(C)] pub struct pthread_cond_t { __align: libc::c_longlong, size: [u8; __SIZEOF_PTHREAD_COND_T], @@ -183,6 +207,7 @@ mod os { __align: 0, size: [0; __SIZEOF_PTHREAD_RWLOCK_T], }; + pub const PTHREAD_MUTEX_RECURSIVE: libc::c_int = 1; } #[cfg(target_os = "android")] mod os { @@ -190,6 +215,7 @@ mod os { #[repr(C)] pub struct pthread_mutex_t { value: libc::c_int } + pub type pthread_mutexattr_t = libc::c_long; #[repr(C)] pub struct pthread_cond_t { value: libc::c_int } #[repr(C)] @@ -218,4 +244,5 @@ mod os { pendingWriters: 0, reserved: [0 as *mut _; 4], }; + pub const PTHREAD_MUTEX_RECURSIVE: libc::c_int = 1; } diff --git a/src/libstd/sys/windows/mutex.rs b/src/libstd/sys/windows/mutex.rs index 0847f3b52bf..ca20858bb5b 100644 --- a/src/libstd/sys/windows/mutex.rs +++ b/src/libstd/sys/windows/mutex.rs @@ -12,6 +12,7 @@ use prelude::v1::*; use cell::UnsafeCell; use sys::sync as ffi; +use mem; pub struct Mutex { inner: UnsafeCell<ffi::SRWLOCK> } @@ -57,3 +58,33 @@ impl Mutex { // ... } } + +pub struct ReentrantMutex { inner: Box<UnsafeCell<ffi::CRITICAL_SECTION>> } + +unsafe impl Send for ReentrantMutex {} +unsafe impl Sync for ReentrantMutex {} + +impl ReentrantMutex { + pub unsafe fn new() -> ReentrantMutex { + let mutex = ReentrantMutex { inner: box mem::uninitialized() }; + ffi::InitializeCriticalSection(mutex.inner.get()); + mutex + } + + pub unsafe fn lock(&self) { + ffi::EnterCriticalSection(self.inner.get()); + } + + #[inline] + pub unsafe fn try_lock(&self) -> bool { + ffi::TryEnterCriticalSection(self.inner.get()) != 0 + } + + pub unsafe fn unlock(&self) { + ffi::LeaveCriticalSection(self.inner.get()); + } + + pub unsafe fn destroy(&self) { + ffi::DeleteCriticalSection(self.inner.get()); + } +} diff --git a/src/libstd/sys/windows/sync.rs b/src/libstd/sys/windows/sync.rs index 7614104c98b..5410259540e 100644 --- a/src/libstd/sys/windows/sync.rs +++ b/src/libstd/sys/windows/sync.rs @@ -8,17 +8,27 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use libc::{BOOL, DWORD, LPVOID, c_ulong}; +use libc::{BOOL, DWORD, LPVOID, LONG, HANDLE, c_ulong}; use libc::types::os::arch::extra::BOOLEAN; pub type PCONDITION_VARIABLE = *mut CONDITION_VARIABLE; pub type PSRWLOCK = *mut SRWLOCK; pub type ULONG = c_ulong; +pub type ULONG_PTR = c_ulong; #[repr(C)] pub struct CONDITION_VARIABLE { pub ptr: LPVOID } #[repr(C)] pub struct SRWLOCK { pub ptr: LPVOID } +#[repr(C)] +pub struct CRITICAL_SECTION { + CriticalSectionDebug: LPVOID, + LockCount: LONG, + RecursionCount: LONG, + OwningThread: HANDLE, + LockSemaphore: HANDLE, + SpinCount: ULONG_PTR +} pub const CONDITION_VARIABLE_INIT: CONDITION_VARIABLE = CONDITION_VARIABLE { ptr: 0 as *mut _, @@ -41,4 +51,10 @@ extern "system" { pub fn ReleaseSRWLockShared(SRWLock: PSRWLOCK); pub fn TryAcquireSRWLockExclusive(SRWLock: PSRWLOCK) -> BOOLEAN; pub fn TryAcquireSRWLockShared(SRWLock: PSRWLOCK) -> BOOLEAN; + + pub fn InitializeCriticalSection(CriticalSection: *mut CRITICAL_SECTION); + pub fn EnterCriticalSection(CriticalSection: *mut CRITICAL_SECTION); + pub fn TryEnterCriticalSection(CriticalSection: *mut CRITICAL_SECTION) -> BOOLEAN; + pub fn LeaveCriticalSection(CriticalSection: *mut CRITICAL_SECTION); + pub fn DeleteCriticalSection(CriticalSection: *mut CRITICAL_SECTION); } |
