diff options
| author | Alex Crichton <alex@alexcrichton.com> | 2014-05-09 10:34:51 -0700 |
|---|---|---|
| committer | Alex Crichton <alex@alexcrichton.com> | 2014-05-11 01:13:02 -0700 |
| commit | f94d671bfae5d8e9a4a4add310b1c40af0ab62a6 (patch) | |
| tree | 97bea161eb7fff71a0e9a484aa9f190dbe037f58 /src/libstd/sync | |
| parent | adb8b0b230d5e5c79b4f873825b3d3cff8d1bc8f (diff) | |
| download | rust-f94d671bfae5d8e9a4a4add310b1c40af0ab62a6.tar.gz rust-f94d671bfae5d8e9a4a4add310b1c40af0ab62a6.zip | |
core: Remove the cast module
This commit revisits the `cast` module in libcore and libstd, and scrutinizes
all functions inside of it. The result was to remove the `cast` module entirely,
folding all functionality into the `mem` module. Specifically, this is the fate
of each function in the `cast` module.
* transmute - This function was moved to `mem`, but it is now marked as
#[unstable]. This is due to planned changes to the `transmute`
function and how it can be invoked (see the #[unstable] comment).
For more information, see RFC 5 and #12898
* transmute_copy - This function was moved to `mem`, with clarification that is
is not an error to invoke it with T/U that are different
sizes, but rather that it is strongly discouraged. This
function is now #[stable]
* forget - This function was moved to `mem` and marked #[stable]
* bump_box_refcount - This function was removed due to the deprecation of
managed boxes as well as its questionable utility.
* transmute_mut - This function was previously deprecated, and removed as part
of this commit.
* transmute_mut_unsafe - This function doesn't serve much of a purpose when it
can be achieved with an `as` in safe code, so it was
removed.
* transmute_lifetime - This function was removed because it is likely a strong
indication that code is incorrect in the first place.
* transmute_mut_lifetime - This function was removed for the same reasons as
`transmute_lifetime`
* copy_lifetime - This function was moved to `mem`, but it is marked
`#[unstable]` now due to the likelihood of being removed in
the future if it is found to not be very useful.
* copy_mut_lifetime - This function was also moved to `mem`, but had the same
treatment as `copy_lifetime`.
* copy_lifetime_vec - This function was removed because it is not used today,
and its existence is not necessary with DST
(copy_lifetime will suffice).
In summary, the cast module was stripped down to these functions, and then the
functions were moved to the `mem` module.
transmute - #[unstable]
transmute_copy - #[stable]
forget - #[stable]
copy_lifetime - #[unstable]
copy_mut_lifetime - #[unstable]
[breaking-change]
Diffstat (limited to 'src/libstd/sync')
| -rw-r--r-- | src/libstd/sync/arc.rs | 6 | ||||
| -rw-r--r-- | src/libstd/sync/atomics.rs | 20 | ||||
| -rw-r--r-- | src/libstd/sync/deque.rs | 23 | ||||
| -rw-r--r-- | src/libstd/sync/mpsc_queue.rs | 8 | ||||
| -rw-r--r-- | src/libstd/sync/spsc_queue.rs | 8 |
5 files changed, 32 insertions, 33 deletions
diff --git a/src/libstd/sync/arc.rs b/src/libstd/sync/arc.rs index 676c836c459..7dcfe62ffb8 100644 --- a/src/libstd/sync/arc.rs +++ b/src/libstd/sync/arc.rs @@ -21,10 +21,10 @@ //! the underlying data will remain valid (not free'd) so long as the reference //! count is greater than one. -use cast; use clone::Clone; use iter::Iterator; use kinds::Send; +use mem; use ops::Drop; use owned::Box; use ptr::RawPtr; @@ -50,7 +50,7 @@ unsafe fn new_inner<T: Send>(data: T, refcount: uint) -> *mut ArcData<T> { count: AtomicUint::new(refcount), data: Unsafe::new(data) }; - cast::transmute(data) + mem::transmute(data) } impl<T: Send> UnsafeArc<T> { @@ -158,7 +158,7 @@ impl<T> Drop for UnsafeArc<T>{ // happened before), and an "acquire" operation before deleting the object. // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) fence(Acquire); - let _: Box<ArcData<T>> = cast::transmute(self.data); + let _: Box<ArcData<T>> = mem::transmute(self.data); } } } diff --git a/src/libstd/sync/atomics.rs b/src/libstd/sync/atomics.rs index 2fba59c3233..6ddae97e901 100644 --- a/src/libstd/sync/atomics.rs +++ b/src/libstd/sync/atomics.rs @@ -108,11 +108,11 @@ #![allow(missing_doc)] use intrinsics; -use cast; -use std::kinds::marker; -use option::{Option,Some,None}; +use mem; use ops::Drop; +use option::{Option,Some,None}; use owned::Box; +use std::kinds::marker; use ty::Unsafe; /// An atomic boolean type. @@ -665,7 +665,7 @@ impl<T> AtomicPtr<T> { impl<T> AtomicOption<T> { /// Create a new `AtomicOption` pub fn new(p: Box<T>) -> AtomicOption<T> { - unsafe { AtomicOption { p: Unsafe::new(cast::transmute(p)) } } + unsafe { AtomicOption { p: Unsafe::new(mem::transmute(p)) } } } /// Create a new `AtomicOption` that doesn't contain a value @@ -675,13 +675,13 @@ impl<T> AtomicOption<T> { #[inline] pub fn swap(&self, val: Box<T>, order: Ordering) -> Option<Box<T>> { unsafe { - let val = cast::transmute(val); + let val = mem::transmute(val); let p = atomic_swap(self.p.get(), val, order); if p as uint == 0 { None } else { - Some(cast::transmute(p)) + Some(mem::transmute(p)) } } } @@ -689,7 +689,7 @@ impl<T> AtomicOption<T> { /// Remove the value, leaving the `AtomicOption` empty. #[inline] pub fn take(&self, order: Ordering) -> Option<Box<T>> { - unsafe { self.swap(cast::transmute(0), order) } + unsafe { self.swap(mem::transmute(0), order) } } /// Replace an empty value with a non-empty value. @@ -700,13 +700,13 @@ impl<T> AtomicOption<T> { #[inline] pub fn fill(&self, val: Box<T>, order: Ordering) -> Option<Box<T>> { unsafe { - let val = cast::transmute(val); - let expected = cast::transmute(0); + let val = mem::transmute(val); + let expected = mem::transmute(0); let oldval = atomic_compare_and_swap(self.p.get(), expected, val, order); if oldval == expected { None } else { - Some(cast::transmute(val)) + Some(mem::transmute(val)) } } } diff --git a/src/libstd/sync/deque.rs b/src/libstd/sync/deque.rs index 8dfd691e6ff..175bb03d262 100644 --- a/src/libstd/sync/deque.rs +++ b/src/libstd/sync/deque.rs @@ -48,7 +48,6 @@ // FIXME: all atomic operations in this module use a SeqCst ordering. That is // probably overkill -use cast; use clone::Clone; use iter::{range, Iterator}; use kinds::Send; @@ -57,12 +56,12 @@ use mem; use ops::Drop; use option::{Option, Some, None}; use owned::Box; -use ptr; use ptr::RawPtr; +use ptr; +use slice::ImmutableVector; use sync::arc::UnsafeArc; use sync::atomics::{AtomicInt, AtomicPtr, SeqCst}; use unstable::sync::Exclusive; -use slice::ImmutableVector; use vec::Vec; // Once the queue is less than 1/K full, then it will be downsized. Note that @@ -230,7 +229,7 @@ impl<T: Send> Deque<T> { Deque { bottom: AtomicInt::new(0), top: AtomicInt::new(0), - array: AtomicPtr::new(unsafe { cast::transmute(buf) }), + array: AtomicPtr::new(unsafe { mem::transmute(buf) }), pool: pool, } } @@ -272,7 +271,7 @@ impl<T: Send> Deque<T> { return Some(data); } else { self.bottom.store(t + 1, SeqCst); - cast::forget(data); // someone else stole this value + mem::forget(data); // someone else stole this value return None; } } @@ -294,7 +293,7 @@ impl<T: Send> Deque<T> { if self.top.compare_and_swap(t, t + 1, SeqCst) == t { Data(data) } else { - cast::forget(data); // someone else stole this value + mem::forget(data); // someone else stole this value Abort } } @@ -315,7 +314,7 @@ impl<T: Send> Deque<T> { // continue to be read after we flag this buffer for reclamation. unsafe fn swap_buffer(&mut self, b: int, old: *mut Buffer<T>, buf: Buffer<T>) -> *mut Buffer<T> { - let newbuf: *mut Buffer<T> = cast::transmute(box buf); + let newbuf: *mut Buffer<T> = mem::transmute(box buf); self.array.store(newbuf, SeqCst); let ss = (*newbuf).size(); self.bottom.store(b + ss, SeqCst); @@ -323,7 +322,7 @@ impl<T: Send> Deque<T> { if self.top.compare_and_swap(t, t + ss, SeqCst) != t { self.bottom.store(b, SeqCst); } - self.pool.free(cast::transmute(old)); + self.pool.free(mem::transmute(old)); return newbuf; } } @@ -340,7 +339,7 @@ impl<T: Send> Drop for Deque<T> { for i in range(t, b) { let _: T = unsafe { (*a).get(i) }; } - self.pool.free(unsafe { cast::transmute(a) }); + self.pool.free(unsafe { mem::transmute(a) }); } } @@ -373,7 +372,7 @@ impl<T: Send> Buffer<T> { unsafe fn put(&mut self, i: int, t: T) { let ptr = self.storage.offset(i & self.mask()); ptr::copy_nonoverlapping_memory(ptr as *mut T, &t as *T, 1); - cast::forget(t); + mem::forget(t); } // Again, unsafe because this has incredibly dubious ownership violations. @@ -400,7 +399,7 @@ mod tests { use prelude::*; use super::{Data, BufferPool, Abort, Empty, Worker, Stealer}; - use cast; + use mem; use owned::Box; use rt::thread::Thread; use rand; @@ -607,7 +606,7 @@ mod tests { let s = s.clone(); let unique_box = box AtomicUint::new(0); let thread_box = unsafe { - *cast::transmute::<&Box<AtomicUint>, + *mem::transmute::<&Box<AtomicUint>, **mut AtomicUint>(&unique_box) }; (Thread::start(proc() { diff --git a/src/libstd/sync/mpsc_queue.rs b/src/libstd/sync/mpsc_queue.rs index e05959e2591..4cdcd05e9b4 100644 --- a/src/libstd/sync/mpsc_queue.rs +++ b/src/libstd/sync/mpsc_queue.rs @@ -38,8 +38,8 @@ // http://www.1024cores.net/home/lock-free-algorithms // /queues/non-intrusive-mpsc-node-based-queue -use cast; use kinds::Send; +use mem; use ops::Drop; use option::{Option, None, Some}; use owned::Box; @@ -74,7 +74,7 @@ pub struct Queue<T> { impl<T> Node<T> { unsafe fn new(v: Option<T>) -> *mut Node<T> { - cast::transmute(box Node { + mem::transmute(box Node { next: AtomicPtr::new(0 as *mut Node<T>), value: v, }) @@ -121,7 +121,7 @@ impl<T: Send> Queue<T> { assert!((*tail).value.is_none()); assert!((*next).value.is_some()); let ret = (*next).value.take_unwrap(); - let _: Box<Node<T>> = cast::transmute(tail); + let _: Box<Node<T>> = mem::transmute(tail); return Data(ret); } @@ -146,7 +146,7 @@ impl<T: Send> Drop for Queue<T> { let mut cur = self.tail; while !cur.is_null() { let next = (*cur).next.load(Relaxed); - let _: Box<Node<T>> = cast::transmute(cur); + let _: Box<Node<T>> = mem::transmute(cur); cur = next; } } diff --git a/src/libstd/sync/spsc_queue.rs b/src/libstd/sync/spsc_queue.rs index 7854a0e168e..ed6d690def0 100644 --- a/src/libstd/sync/spsc_queue.rs +++ b/src/libstd/sync/spsc_queue.rs @@ -33,8 +33,8 @@ //! concurrently between two tasks. This data structure is safe to use and //! enforces the semantics that there is one pusher and one popper. -use cast; use kinds::Send; +use mem; use ops::Drop; use option::{Some, None, Option}; use owned::Box; @@ -74,7 +74,7 @@ pub struct Queue<T> { impl<T: Send> Node<T> { fn new() -> *mut Node<T> { unsafe { - cast::transmute(box Node { + mem::transmute(box Node { value: None, next: AtomicPtr::new(0 as *mut Node<T>), }) @@ -188,7 +188,7 @@ impl<T: Send> Queue<T> { (*self.tail_prev.load(Relaxed)).next.store(next, Relaxed); // We have successfully erased all references to 'tail', so // now we can safely drop it. - let _: Box<Node<T>> = cast::transmute(tail); + let _: Box<Node<T>> = mem::transmute(tail); } } return ret; @@ -216,7 +216,7 @@ impl<T: Send> Drop for Queue<T> { let mut cur = self.first; while !cur.is_null() { let next = (*cur).next.load(Relaxed); - let _n: Box<Node<T>> = cast::transmute(cur); + let _n: Box<Node<T>> = mem::transmute(cur); cur = next; } } |
