about summary refs log tree commit diff
path: root/src/tools/miri/tests/fail/weak_memory/weak_uninit.rs
blob: b4b4b08498773df3dffff5aeaf167a02e707aa04 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
//@compile-flags: -Zmiri-ignore-leaks -Zmiri-fixed-schedule

// Tests showing weak memory behaviours are exhibited. All tests
// return true when the desired behaviour is seen.
// This is scheduler and pseudo-RNG dependent, so each test is
// run multiple times until one try returns true.
// Spurious failure is possible, if you are really unlucky with
// the RNG and always read the latest value from the store buffer.

use std::sync::atomic::*;
use std::thread::spawn;

#[allow(dead_code)]
#[derive(Copy, Clone)]
struct EvilSend<T>(pub T);

unsafe impl<T> Send for EvilSend<T> {}
unsafe impl<T> Sync for EvilSend<T> {}

// We can't create static items because we need to run each test multiple times.
fn static_uninit_atomic() -> &'static AtomicUsize {
    unsafe { Box::leak(Box::new_uninit()).assume_init_ref() }
}

fn relaxed() {
    let x = static_uninit_atomic();
    let j1 = spawn(move || {
        x.store(1, Ordering::Relaxed);
    });

    let j2 = spawn(move || x.load(Ordering::Relaxed)); //~ERROR: using uninitialized data

    j1.join().unwrap();
    j2.join().unwrap();
}

pub fn main() {
    // If we try often enough, we should hit UB.
    for _ in 0..100 {
        relaxed();
    }
}