about summary refs log tree commit diff
path: root/src/libstd/sys
diff options
context:
space:
mode:
Diffstat (limited to 'src/libstd/sys')
-rw-r--r--src/libstd/sys/common/thread_local.rs6
-rw-r--r--src/libstd/sys/unix/thread.rs14
-rw-r--r--src/libstd/sys/unix/timer.rs2
-rw-r--r--src/libstd/sys/windows/condvar.rs14
-rw-r--r--src/libstd/sys/windows/mutex.rs72
-rw-r--r--src/libstd/sys/windows/os.rs2
-rw-r--r--src/libstd/sys/windows/sync.rs46
7 files changed, 67 insertions, 89 deletions
diff --git a/src/libstd/sys/common/thread_local.rs b/src/libstd/sys/common/thread_local.rs
index e9af796c674..edd16e0c062 100644
--- a/src/libstd/sys/common/thread_local.rs
+++ b/src/libstd/sys/common/thread_local.rs
@@ -58,7 +58,7 @@
 
 use prelude::v1::*;
 
-use sync::atomic::{self, AtomicUint, Ordering};
+use sync::atomic::{self, AtomicUsize, Ordering};
 use sync::{Mutex, Once, ONCE_INIT};
 
 use sys::thread_local as imp;
@@ -97,7 +97,7 @@ pub struct StaticKey {
 
 /// Inner contents of `StaticKey`, created by the `INIT_INNER` constant.
 pub struct StaticKeyInner {
-    key: AtomicUint,
+    key: AtomicUsize,
 }
 
 /// A type for a safely managed OS-based TLS slot.
@@ -137,7 +137,7 @@ pub const INIT: StaticKey = StaticKey {
 ///
 /// This value allows specific configuration of the destructor for a TLS key.
 pub const INIT_INNER: StaticKeyInner = StaticKeyInner {
-    key: atomic::ATOMIC_UINT_INIT,
+    key: atomic::ATOMIC_USIZE_INIT,
 };
 
 static INIT_KEYS: Once = ONCE_INIT;
diff --git a/src/libstd/sys/unix/thread.rs b/src/libstd/sys/unix/thread.rs
index 2416b64f98f..ac51b68795f 100644
--- a/src/libstd/sys/unix/thread.rs
+++ b/src/libstd/sys/unix/thread.rs
@@ -93,7 +93,19 @@ pub mod guard {
 
         PAGE_SIZE = psize as uint;
 
-        let stackaddr = get_stack_start();
+        let mut stackaddr = get_stack_start();
+
+        // Ensure stackaddr is page aligned! A parent process might
+        // have reset RLIMIT_STACK to be non-page aligned. The
+        // pthread_attr_getstack() reports the usable stack area
+        // stackaddr < stackaddr + stacksize, so if stackaddr is not
+        // page-aligned, calculate the fix such that stackaddr <
+        // new_page_aligned_stackaddr < stackaddr + stacksize
+        let remainder = (stackaddr as usize) % (PAGE_SIZE as usize);
+        if remainder != 0 {
+            stackaddr = ((stackaddr as usize) + (PAGE_SIZE as usize) - remainder)
+                as *mut libc::c_void;
+        }
 
         // Rellocate the last page of the stack.
         // This ensures SIGBUS will be raised on
diff --git a/src/libstd/sys/unix/timer.rs b/src/libstd/sys/unix/timer.rs
index 62f3242a206..c0c231a9e73 100644
--- a/src/libstd/sys/unix/timer.rs
+++ b/src/libstd/sys/unix/timer.rs
@@ -211,7 +211,7 @@ impl Timer {
         // instead of ()
         HELPER.boot(|| {}, helper);
 
-        static ID: atomic::AtomicUint = atomic::ATOMIC_UINT_INIT;
+        static ID: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
         let id = ID.fetch_add(1, Ordering::Relaxed);
         Ok(Timer {
             id: id,
diff --git a/src/libstd/sys/windows/condvar.rs b/src/libstd/sys/windows/condvar.rs
index 291a8024cfc..db8038006fd 100644
--- a/src/libstd/sys/windows/condvar.rs
+++ b/src/libstd/sys/windows/condvar.rs
@@ -27,16 +27,18 @@ impl Condvar {
 
     #[inline]
     pub unsafe fn wait(&self, mutex: &Mutex) {
-        let r = ffi::SleepConditionVariableCS(self.inner.get(),
-                                              mutex::raw(mutex),
-                                              libc::INFINITE);
+        let r = ffi::SleepConditionVariableSRW(self.inner.get(),
+                                               mutex::raw(mutex),
+                                               libc::INFINITE,
+                                               0);
         debug_assert!(r != 0);
     }
 
     pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
-        let r = ffi::SleepConditionVariableCS(self.inner.get(),
-                                              mutex::raw(mutex),
-                                              dur.num_milliseconds() as DWORD);
+        let r = ffi::SleepConditionVariableSRW(self.inner.get(),
+                                               mutex::raw(mutex),
+                                               dur.num_milliseconds() as DWORD,
+                                               0);
         if r == 0 {
             const ERROR_TIMEOUT: DWORD = 0x5B4;
             debug_assert_eq!(os::errno() as uint, ERROR_TIMEOUT as uint);
diff --git a/src/libstd/sys/windows/mutex.rs b/src/libstd/sys/windows/mutex.rs
index 1def99a3741..828ad795ed3 100644
--- a/src/libstd/sys/windows/mutex.rs
+++ b/src/libstd/sys/windows/mutex.rs
@@ -8,73 +8,51 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use prelude::v1::*;
-
-use sync::atomic::{AtomicUint, ATOMIC_UINT_INIT, Ordering};
-use alloc::{self, heap};
-
-use libc::DWORD;
+use marker::Sync;
+use cell::UnsafeCell;
 use sys::sync as ffi;
 
-const SPIN_COUNT: DWORD = 4000;
+pub struct Mutex { inner: UnsafeCell<ffi::SRWLOCK> }
 
-pub struct Mutex { inner: AtomicUint }
-
-pub const MUTEX_INIT: Mutex = Mutex { inner: ATOMIC_UINT_INIT };
+pub const MUTEX_INIT: Mutex = Mutex {
+    inner: UnsafeCell { value: ffi::SRWLOCK_INIT }
+};
 
 unsafe impl Sync for Mutex {}
 
 #[inline]
-pub unsafe fn raw(m: &Mutex) -> ffi::LPCRITICAL_SECTION {
-    m.get()
+pub unsafe fn raw(m: &Mutex) -> ffi::PSRWLOCK {
+    m.inner.get()
 }
 
+// So you might be asking why we're using SRWLock instead of CriticalSection?
+//
+// 1. SRWLock is several times faster than CriticalSection according to benchmarks performed on both
+// Windows 8 and Windows 7.
+//
+// 2. CriticalSection allows recursive locking while SRWLock deadlocks. The Unix implementation
+// deadlocks so consistency is preferred. See #19962 for more details.
+//
+// 3. While CriticalSection is fair and SRWLock is not, the current Rust policy is there there are
+// no guarantees of fairness.
+
 impl Mutex {
     #[inline]
-    pub unsafe fn new() -> Mutex {
-        Mutex { inner: AtomicUint::new(init_lock() as uint) }
-    }
+    pub unsafe fn new() -> Mutex { MUTEX_INIT }
     #[inline]
     pub unsafe fn lock(&self) {
-        ffi::EnterCriticalSection(self.get())
+        ffi::AcquireSRWLockExclusive(self.inner.get())
     }
     #[inline]
     pub unsafe fn try_lock(&self) -> bool {
-        ffi::TryEnterCriticalSection(self.get()) != 0
+        ffi::TryAcquireSRWLockExclusive(self.inner.get()) != 0
     }
     #[inline]
     pub unsafe fn unlock(&self) {
-        ffi::LeaveCriticalSection(self.get())
+        ffi::ReleaseSRWLockExclusive(self.inner.get())
     }
+    #[inline]
     pub unsafe fn destroy(&self) {
-        let lock = self.inner.swap(0, Ordering::SeqCst);
-        if lock != 0 { free_lock(lock as ffi::LPCRITICAL_SECTION) }
-    }
-
-    unsafe fn get(&self) -> ffi::LPCRITICAL_SECTION {
-        match self.inner.load(Ordering::SeqCst) {
-            0 => {}
-            n => return n as ffi::LPCRITICAL_SECTION
-        }
-        let lock = init_lock();
-        match self.inner.compare_and_swap(0, lock as uint, Ordering::SeqCst) {
-            0 => return lock as ffi::LPCRITICAL_SECTION,
-            _ => {}
-        }
-        free_lock(lock);
-        return self.inner.load(Ordering::SeqCst) as ffi::LPCRITICAL_SECTION;
+        // ...
     }
 }
-
-unsafe fn init_lock() -> ffi::LPCRITICAL_SECTION {
-    let block = heap::allocate(ffi::CRITICAL_SECTION_SIZE, 8)
-                        as ffi::LPCRITICAL_SECTION;
-    if block.is_null() { alloc::oom() }
-    ffi::InitializeCriticalSectionAndSpinCount(block, SPIN_COUNT);
-    return block;
-}
-
-unsafe fn free_lock(h: ffi::LPCRITICAL_SECTION) {
-    ffi::DeleteCriticalSection(h);
-    heap::deallocate(h as *mut _, ffi::CRITICAL_SECTION_SIZE, 8);
-}
diff --git a/src/libstd/sys/windows/os.rs b/src/libstd/sys/windows/os.rs
index 064633f321c..4540068133b 100644
--- a/src/libstd/sys/windows/os.rs
+++ b/src/libstd/sys/windows/os.rs
@@ -36,7 +36,7 @@ const BUF_BYTES : uint = 2048u;
 pub fn truncate_utf16_at_nul<'a>(v: &'a [u16]) -> &'a [u16] {
     match v.iter().position(|c| *c == 0) {
         // don't include the 0
-        Some(i) => &v[0..i],
+        Some(i) => &v[..i],
         None => v
     }
 }
diff --git a/src/libstd/sys/windows/sync.rs b/src/libstd/sys/windows/sync.rs
index cbca47912b5..d60646b7db9 100644
--- a/src/libstd/sys/windows/sync.rs
+++ b/src/libstd/sys/windows/sync.rs
@@ -8,17 +8,12 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use libc::{BOOL, DWORD, c_void, LPVOID};
+use libc::{BOOL, DWORD, c_void, LPVOID, c_ulong};
 use libc::types::os::arch::extra::BOOLEAN;
 
-pub type LPCRITICAL_SECTION = *mut c_void;
-pub type LPCONDITION_VARIABLE = *mut CONDITION_VARIABLE;
-pub type LPSRWLOCK = *mut SRWLOCK;
-
-#[cfg(target_arch = "x86")]
-pub const CRITICAL_SECTION_SIZE: uint = 24;
-#[cfg(target_arch = "x86_64")]
-pub const CRITICAL_SECTION_SIZE: uint = 40;
+pub type PCONDITION_VARIABLE = *mut CONDITION_VARIABLE;
+pub type PSRWLOCK = *mut SRWLOCK;
+pub type ULONG = c_ulong;
 
 #[repr(C)]
 pub struct CONDITION_VARIABLE { pub ptr: LPVOID }
@@ -31,28 +26,19 @@ pub const CONDITION_VARIABLE_INIT: CONDITION_VARIABLE = CONDITION_VARIABLE {
 pub const SRWLOCK_INIT: SRWLOCK = SRWLOCK { ptr: 0 as *mut _ };
 
 extern "system" {
-    // critical sections
-    pub fn InitializeCriticalSectionAndSpinCount(
-                    lpCriticalSection: LPCRITICAL_SECTION,
-                    dwSpinCount: DWORD) -> BOOL;
-    pub fn DeleteCriticalSection(lpCriticalSection: LPCRITICAL_SECTION);
-    pub fn EnterCriticalSection(lpCriticalSection: LPCRITICAL_SECTION);
-    pub fn LeaveCriticalSection(lpCriticalSection: LPCRITICAL_SECTION);
-    pub fn TryEnterCriticalSection(lpCriticalSection: LPCRITICAL_SECTION) -> BOOL;
-
     // condition variables
-    pub fn SleepConditionVariableCS(ConditionVariable: LPCONDITION_VARIABLE,
-                                    CriticalSection: LPCRITICAL_SECTION,
-                                    dwMilliseconds: DWORD) -> BOOL;
-    pub fn WakeConditionVariable(ConditionVariable: LPCONDITION_VARIABLE);
-    pub fn WakeAllConditionVariable(ConditionVariable: LPCONDITION_VARIABLE);
+    pub fn SleepConditionVariableSRW(ConditionVariable: PCONDITION_VARIABLE,
+                                     SRWLock: PSRWLOCK,
+                                     dwMilliseconds: DWORD,
+                                     Flags: ULONG) -> BOOL;
+    pub fn WakeConditionVariable(ConditionVariable: PCONDITION_VARIABLE);
+    pub fn WakeAllConditionVariable(ConditionVariable: PCONDITION_VARIABLE);
 
     // slim rwlocks
-    pub fn AcquireSRWLockExclusive(SRWLock: LPSRWLOCK);
-    pub fn AcquireSRWLockShared(SRWLock: LPSRWLOCK);
-    pub fn ReleaseSRWLockExclusive(SRWLock: LPSRWLOCK);
-    pub fn ReleaseSRWLockShared(SRWLock: LPSRWLOCK);
-    pub fn TryAcquireSRWLockExclusive(SRWLock: LPSRWLOCK) -> BOOLEAN;
-    pub fn TryAcquireSRWLockShared(SRWLock: LPSRWLOCK) -> BOOLEAN;
+    pub fn AcquireSRWLockExclusive(SRWLock: PSRWLOCK);
+    pub fn AcquireSRWLockShared(SRWLock: PSRWLOCK);
+    pub fn ReleaseSRWLockExclusive(SRWLock: PSRWLOCK);
+    pub fn ReleaseSRWLockShared(SRWLock: PSRWLOCK);
+    pub fn TryAcquireSRWLockExclusive(SRWLock: PSRWLOCK) -> BOOLEAN;
+    pub fn TryAcquireSRWLockShared(SRWLock: PSRWLOCK) -> BOOLEAN;
 }
-