about summary refs log tree commit diff
path: root/src/libstd/rt
diff options
context:
space:
mode:
authorEduard Burtescu <edy.burt@gmail.com>2015-05-27 11:18:36 +0300
committerEduard Burtescu <edy.burt@gmail.com>2015-05-27 11:19:03 +0300
commit377b0900aede976b2d37a499bbd7b62c2e39b358 (patch)
treeb4a5a4431d36ed1a4e0a39c7d2ef2563ecac9bf4 /src/libstd/rt
parent6e8e4f847c2ea02fec021ea15dfb2de6beac797a (diff)
downloadrust-377b0900aede976b2d37a499bbd7b62c2e39b358.tar.gz
rust-377b0900aede976b2d37a499bbd7b62c2e39b358.zip
Use `const fn` to abstract away the contents of UnsafeCell & friends.
Diffstat (limited to 'src/libstd/rt')
-rw-r--r--src/libstd/rt/args.rs4
-rw-r--r--src/libstd/rt/at_exit_imp.rs4
-rw-r--r--src/libstd/rt/backtrace.rs2
-rw-r--r--src/libstd/rt/unwind/mod.rs22
-rw-r--r--src/libstd/rt/util.rs2
5 files changed, 17 insertions, 17 deletions
diff --git a/src/libstd/rt/args.rs b/src/libstd/rt/args.rs
index 2329861f29b..d23a124a6ec 100644
--- a/src/libstd/rt/args.rs
+++ b/src/libstd/rt/args.rs
@@ -52,10 +52,10 @@ mod imp {
     use mem;
     use ffi::CStr;
 
-    use sync::{StaticMutex, MUTEX_INIT};
+    use sync::StaticMutex;
 
     static mut GLOBAL_ARGS_PTR: usize = 0;
-    static LOCK: StaticMutex = MUTEX_INIT;
+    static LOCK: StaticMutex = StaticMutex::new();
 
     pub unsafe fn init(argc: isize, argv: *const *const u8) {
         let args = load_argc_and_argv(argc, argv);
diff --git a/src/libstd/rt/at_exit_imp.rs b/src/libstd/rt/at_exit_imp.rs
index beb2870807a..19a17be4ccf 100644
--- a/src/libstd/rt/at_exit_imp.rs
+++ b/src/libstd/rt/at_exit_imp.rs
@@ -20,7 +20,7 @@ use boxed;
 use boxed::Box;
 use vec::Vec;
 use thunk::Thunk;
-use sys_common::mutex::{Mutex, MUTEX_INIT};
+use sys_common::mutex::Mutex;
 
 type Queue = Vec<Thunk<'static>>;
 
@@ -28,7 +28,7 @@ type Queue = Vec<Thunk<'static>>;
 // on poisoning and this module needs to operate at a lower level than requiring
 // the thread infrastructure to be in place (useful on the borders of
 // initialization/destruction).
-static LOCK: Mutex = MUTEX_INIT;
+static LOCK: Mutex = Mutex::new();
 static mut QUEUE: *mut Queue = 0 as *mut Queue;
 
 // The maximum number of times the cleanup routines will be run. While running
diff --git a/src/libstd/rt/backtrace.rs b/src/libstd/rt/backtrace.rs
index 72cbe2b533b..2eadf36a6b4 100644
--- a/src/libstd/rt/backtrace.rs
+++ b/src/libstd/rt/backtrace.rs
@@ -22,7 +22,7 @@ pub use sys::backtrace::write;
 // For now logging is turned off by default, and this function checks to see
 // whether the magical environment variable is present to see if it's turned on.
 pub fn log_enabled() -> bool {
-    static ENABLED: atomic::AtomicIsize = atomic::ATOMIC_ISIZE_INIT;
+    static ENABLED: atomic::AtomicIsize = atomic::AtomicIsize::new(0);
     match ENABLED.load(Ordering::SeqCst) {
         1 => return false,
         2 => return true,
diff --git a/src/libstd/rt/unwind/mod.rs b/src/libstd/rt/unwind/mod.rs
index 576035ffe9a..c403976745a 100644
--- a/src/libstd/rt/unwind/mod.rs
+++ b/src/libstd/rt/unwind/mod.rs
@@ -72,7 +72,7 @@ use intrinsics;
 use libc::c_void;
 use mem;
 use sync::atomic::{self, Ordering};
-use sys_common::mutex::{Mutex, MUTEX_INIT};
+use sys_common::mutex::Mutex;
 
 // The actual unwinding implementation is cfg'd here, and we've got two current
 // implementations. One goes through SEH on Windows and the other goes through
@@ -89,15 +89,15 @@ pub type Callback = fn(msg: &(Any + Send), file: &'static str, line: u32);
 // For more information, see below.
 const MAX_CALLBACKS: usize = 16;
 static CALLBACKS: [atomic::AtomicUsize; MAX_CALLBACKS] =
-        [atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
-         atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
-         atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
-         atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
-         atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
-         atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
-         atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
-         atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT];
-static CALLBACK_CNT: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
+        [atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0),
+         atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0),
+         atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0),
+         atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0),
+         atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0),
+         atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0),
+         atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0),
+         atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0)];
+static CALLBACK_CNT: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
 
 thread_local! { static PANICKING: Cell<bool> = Cell::new(false) }
 
@@ -243,7 +243,7 @@ fn begin_unwind_inner(msg: Box<Any + Send>,
     // `std::sync` one as accessing TLS can cause weird recursive problems (and
     // we don't need poison checking).
     unsafe {
-        static LOCK: Mutex = MUTEX_INIT;
+        static LOCK: Mutex = Mutex::new();
         static mut INIT: bool = false;
         LOCK.lock();
         if !INIT {
diff --git a/src/libstd/rt/util.rs b/src/libstd/rt/util.rs
index 31e970a9550..b53219db245 100644
--- a/src/libstd/rt/util.rs
+++ b/src/libstd/rt/util.rs
@@ -42,7 +42,7 @@ pub fn limit_thread_creation_due_to_osx_and_valgrind() -> bool {
 }
 
 pub fn min_stack() -> usize {
-    static MIN: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
+    static MIN: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
     match MIN.load(Ordering::SeqCst) {
         0 => {}
         n => return n - 1,