1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Atomically reference counted data
//!
//! This modules contains the implementation of an atomically reference counted
//! pointer for the purpose of sharing data between tasks. This is obviously a
//! very unsafe primitive to use, but it has its use cases when implementing
//! concurrent data structures and similar tasks.
//!
//! Great care must be taken to ensure that data races do not arise through the
//! usage of `UnsafeArc`, and this often requires some form of external
//! synchronization. The only guarantee provided to you by this class is that
//! the underlying data will remain valid (not free'd) so long as the reference
//! count is greater than one.
use cast;
use clone::Clone;
use iter::Iterator;
use kinds::Send;
use ops::Drop;
use ptr::RawPtr;
use sync::atomics::{fence, AtomicUint, Relaxed, Acquire, Release};
use ty::Unsafe;
use vec::Vec;
/// An atomically reference counted pointer.
///
/// Enforces no shared-memory safety.
#[unsafe_no_drop_flag]
pub struct UnsafeArc<T> {
data: *mut ArcData<T>,
}
struct ArcData<T> {
count: AtomicUint,
data: Unsafe<T>,
}
unsafe fn new_inner<T: Send>(data: T, refcount: uint) -> *mut ArcData<T> {
let data = ~ArcData {
count: AtomicUint::new(refcount),
data: Unsafe::new(data)
};
cast::transmute(data)
}
impl<T: Send> UnsafeArc<T> {
/// Creates a new `UnsafeArc` which wraps the given data.
pub fn new(data: T) -> UnsafeArc<T> {
unsafe { UnsafeArc { data: new_inner(data, 1) } }
}
/// As new(), but returns an extra pre-cloned handle.
pub fn new2(data: T) -> (UnsafeArc<T>, UnsafeArc<T>) {
unsafe {
let ptr = new_inner(data, 2);
(UnsafeArc { data: ptr }, UnsafeArc { data: ptr })
}
}
/// As new(), but returns a vector of as many pre-cloned handles as
/// requested.
pub fn newN(data: T, num_handles: uint) -> ~[UnsafeArc<T>] {
unsafe {
if num_handles == 0 {
~[] // need to free data here
} else {
let ptr = new_inner(data, num_handles);
let v = Vec::from_fn(num_handles, |_| UnsafeArc { data: ptr });
v.move_iter().collect()
}
}
}
/// Gets a pointer to the inner shared data. Note that care must be taken to
/// ensure that the outer `UnsafeArc` does not fall out of scope while this
/// pointer is in use, otherwise it could possibly contain a use-after-free.
#[inline]
pub fn get(&self) -> *mut T {
unsafe {
debug_assert!((*self.data).count.load(Relaxed) > 0);
return (*self.data).data.get();
}
}
/// Gets an immutable pointer to the inner shared data. This has the same
/// caveats as the `get` method.
#[inline]
pub fn get_immut(&self) -> *T {
unsafe {
debug_assert!((*self.data).count.load(Relaxed) > 0);
return (*self.data).data.get() as *T;
}
}
/// checks if this is the only reference to the arc protected data
#[inline]
pub fn is_owned(&self) -> bool {
unsafe {
(*self.data).count.load(Relaxed) == 1
}
}
}
impl<T: Send> Clone for UnsafeArc<T> {
fn clone(&self) -> UnsafeArc<T> {
unsafe {
// Using a relaxed ordering is alright here, as knowledge of the original reference
// prevents other threads from erroneously deleting the object.
//
// As explained in the [Boost documentation][1],
// Increasing the reference counter can always be done with memory_order_relaxed: New
// references to an object can only be formed from an existing reference, and passing
// an existing reference from one thread to another must already provide any required
// synchronization.
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
let old_count = (*self.data).count.fetch_add(1, Relaxed);
debug_assert!(old_count >= 1);
return UnsafeArc { data: self.data };
}
}
}
#[unsafe_destructor]
impl<T> Drop for UnsafeArc<T>{
fn drop(&mut self) {
unsafe {
// Happens when destructing an unwrapper's handle and from
// `#[unsafe_no_drop_flag]`
if self.data.is_null() {
return
}
// Because `fetch_sub` is already atomic, we do not need to synchronize with other
// threads unless we are going to delete the object.
let old_count = (*self.data).count.fetch_sub(1, Release);
debug_assert!(old_count >= 1);
if old_count == 1 {
// This fence is needed to prevent reordering of use of the data and deletion of
// the data. Because it is marked `Release`, the decreasing of the reference count
// sychronizes with this `Acquire` fence. This means that use of the data happens
// before decreasing the refernce count, which happens before this fence, which
// happens before the deletion of the data.
//
// As explained in the [Boost documentation][1],
// It is important to enforce any possible access to the object in one thread
// (through an existing reference) to *happen before* deleting the object in a
// different thread. This is achieved by a "release" operation after dropping a
// reference (any access to the object through this reference must obviously
// happened before), and an "acquire" operation before deleting the object.
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
fence(Acquire);
let _: ~ArcData<T> = cast::transmute(self.data);
}
}
}
}
#[cfg(test)]
mod tests {
use prelude::*;
use super::UnsafeArc;
use mem::size_of;
#[test]
fn test_size() {
assert_eq!(size_of::<UnsafeArc<[int, ..10]>>(), size_of::<*[int, ..10]>());
}
#[test]
fn arclike_newN() {
// Tests that the many-refcounts-at-once constructors don't leak.
let _ = UnsafeArc::new2("hello".to_owned().to_owned());
let x = UnsafeArc::newN("hello".to_owned().to_owned(), 0);
assert_eq!(x.len(), 0)
let x = UnsafeArc::newN("hello".to_owned().to_owned(), 1);
assert_eq!(x.len(), 1)
let x = UnsafeArc::newN("hello".to_owned().to_owned(), 10);
assert_eq!(x.len(), 10)
}
}
|