summary refs log tree commit diff
path: root/src/libstd/sys/redox/condvar.rs
blob: a6365cac23ea740f5ef7f74a61b05068f5bdffa4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
use crate::cell::UnsafeCell;
use crate::intrinsics::{atomic_cxchg, atomic_load, atomic_xadd, atomic_xchg};
use crate::ptr;
use crate::time::Duration;

use crate::sys::mutex::{mutex_unlock, Mutex};
use crate::sys::syscall::{futex, TimeSpec, FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE};

pub struct Condvar {
    lock: UnsafeCell<*mut i32>,
    seq: UnsafeCell<i32>
}

impl Condvar {
    pub const fn new() -> Condvar {
        Condvar {
            lock: UnsafeCell::new(ptr::null_mut()),
            seq: UnsafeCell::new(0)
        }
    }

    #[inline]
    pub unsafe fn init(&self) {
        *self.lock.get() = ptr::null_mut();
        *self.seq.get() = 0;
    }

    #[inline]
    pub fn notify_one(&self) {
        unsafe {
            let seq = self.seq.get();

            atomic_xadd(seq, 1);

            let _ = futex(seq, FUTEX_WAKE, 1, 0, ptr::null_mut());
        }
    }

    #[inline]
    pub fn notify_all(&self) {
        unsafe {
            let lock = self.lock.get();
            let seq = self.seq.get();

            if *lock == ptr::null_mut() {
                return;
            }

            atomic_xadd(seq, 1);

            let _ = futex(seq, FUTEX_REQUEUE, 1, crate::usize::MAX, *lock);
        }
    }

    #[inline]
    unsafe fn wait_inner(&self, mutex: &Mutex, timeout_ptr: *const TimeSpec) -> bool {
        let lock = self.lock.get();
        let seq = self.seq.get();

        if *lock != mutex.lock.get() {
            if *lock != ptr::null_mut() {
                panic!("Condvar used with more than one Mutex");
            }

            atomic_cxchg(lock as *mut usize, 0, mutex.lock.get() as usize);
        }

        mutex_unlock(*lock);

        let seq_before = atomic_load(seq);

        let _ = futex(seq, FUTEX_WAIT, seq_before, timeout_ptr as usize, ptr::null_mut());

        let seq_after = atomic_load(seq);

        while atomic_xchg(*lock, 2) != 0 {
            let _ = futex(*lock, FUTEX_WAIT, 2, 0, ptr::null_mut());
        }

        seq_before != seq_after
    }

    #[inline]
    pub fn wait(&self, mutex: &Mutex) {
        unsafe {
            assert!(self.wait_inner(mutex, ptr::null()));
        }
    }

    #[inline]
    pub fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
        unsafe {
            let timeout = TimeSpec {
                tv_sec: dur.as_secs() as i64,
                tv_nsec: dur.subsec_nanos() as i32
            };

            self.wait_inner(mutex, &timeout as *const TimeSpec)
        }
    }

    #[inline]
    pub unsafe fn destroy(&self) {
        *self.lock.get() = ptr::null_mut();
        *self.seq.get() = 0;
    }
}

unsafe impl Send for Condvar {}

unsafe impl Sync for Condvar {}