about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--src/tools/miri/src/concurrency/data_race.rs9
-rw-r--r--src/tools/miri/src/concurrency/weak_memory.rs8
-rw-r--r--src/tools/miri/tests/pass/0weak_memory/weak.rs33
3 files changed, 46 insertions, 4 deletions
diff --git a/src/tools/miri/src/concurrency/data_race.rs b/src/tools/miri/src/concurrency/data_race.rs
index de60e80b9f7..16cac7e6d04 100644
--- a/src/tools/miri/src/concurrency/data_race.rs
+++ b/src/tools/miri/src/concurrency/data_race.rs
@@ -276,7 +276,7 @@ struct MemoryCellClocks {
     /// zero on each write operation.
     read: VClock,
 
-    /// Atomic access, acquire, release sequence tracking clocks.
+    /// Atomic access tracking clocks.
     /// For non-atomic memory this value is set to None.
     /// For atomic memory, each byte carries this information.
     atomic_ops: Option<Box<AtomicMemoryCellClocks>>,
@@ -555,7 +555,8 @@ impl MemoryCellClocks {
         // The handling of release sequences was changed in C++20 and so
         // the code here is different to the paper since now all relaxed
         // stores block release sequences. The exception for same-thread
-        // relaxed stores has been removed.
+        // relaxed stores has been removed. We always overwrite the `sync_vector`,
+        // meaning the previous release sequence is broken.
         let atomic = self.atomic_mut_unwrap();
         atomic.sync_vector.clone_from(&thread_clocks.fence_release);
         Ok(())
@@ -571,6 +572,8 @@ impl MemoryCellClocks {
     ) -> Result<(), DataRace> {
         self.atomic_write_detect(thread_clocks, index, access_size)?;
         let atomic = self.atomic_mut_unwrap();
+        // This *joining* of `sync_vector` implements release sequences: future
+        // reads of this location will acquire our clock *and* what was here before.
         atomic.sync_vector.join(&thread_clocks.clock);
         Ok(())
     }
@@ -585,6 +588,8 @@ impl MemoryCellClocks {
     ) -> Result<(), DataRace> {
         self.atomic_write_detect(thread_clocks, index, access_size)?;
         let atomic = self.atomic_mut_unwrap();
+        // This *joining* of `sync_vector` implements release sequences: future
+        // reads of this location will acquire our fence clock *and* what was here before.
         atomic.sync_vector.join(&thread_clocks.fence_release);
         Ok(())
     }
diff --git a/src/tools/miri/src/concurrency/weak_memory.rs b/src/tools/miri/src/concurrency/weak_memory.rs
index a752ef7e454..caa27422d2f 100644
--- a/src/tools/miri/src/concurrency/weak_memory.rs
+++ b/src/tools/miri/src/concurrency/weak_memory.rs
@@ -159,9 +159,12 @@ struct StoreElement {
 
 #[derive(Debug, Clone, PartialEq, Eq, Default)]
 struct LoadInfo {
-    /// Timestamp of first loads from this store element by each thread
+    /// Timestamp of first loads from this store element by each thread.
     timestamps: FxHashMap<VectorIdx, VTimestamp>,
-    /// Whether this store element has been read by an SC load
+    /// Whether this store element has been read by an SC load.
+    /// This is crucial to ensure we respect coherence-ordered-before. Concretely we use
+    /// this to ensure that if a store element is seen by an SC load, then all later SC loads
+    /// cannot see `mo`-earlier store elements.
     sc_loaded: bool,
 }
 
@@ -476,6 +479,7 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
             }
             let range = alloc_range(base_offset, place.layout.size);
             let buffer = alloc_buffers.get_or_create_store_buffer_mut(range, Some(init))?;
+            // The RMW always reads from the most recent store.
             buffer.read_from_last_store(global, threads, atomic == AtomicRwOrd::SeqCst);
             buffer.buffered_write(new_val, global, threads, atomic == AtomicRwOrd::SeqCst)?;
         }
diff --git a/src/tools/miri/tests/pass/0weak_memory/weak.rs b/src/tools/miri/tests/pass/0weak_memory/weak.rs
index 75c6e8f1a87..44791d7cd7c 100644
--- a/src/tools/miri/tests/pass/0weak_memory/weak.rs
+++ b/src/tools/miri/tests/pass/0weak_memory/weak.rs
@@ -198,11 +198,44 @@ fn weaker_release_sequences() {
     });
 }
 
+/// Ensuring normal release sequences (with RMWs) still work correctly.
+fn release_sequence() {
+    check_all_outcomes([None, Some(1)], || {
+        let x = static_atomic(0);
+        let y = static_atomic(0);
+
+        let t1 = spawn(move || {
+            x.store(2, Relaxed);
+        });
+        let t2 = spawn(move || {
+            y.store(1, Relaxed);
+            x.store(1, Release);
+            x.swap(3, Relaxed);
+        });
+        let t3 = spawn(move || {
+            if x.load(Acquire) == 3 {
+                // If we read 3 here, we are seeing the result of the `x.swap` above, which
+                // was relaxed but forms a release sequence with the `x.store` (since we know
+                // `t1` will not be scheduled in between). This means there is a release sequence,
+                // so we acquire the `y.store` and cannot see the original value `0` any more.
+                Some(y.load(Relaxed))
+            } else {
+                None
+            }
+        });
+
+        t1.join().unwrap();
+        t2.join().unwrap();
+        t3.join().unwrap()
+    });
+}
+
 pub fn main() {
     relaxed();
     seq_cst();
     initialization_write(false);
     initialization_write(true);
     faa_replaced_by_load();
+    release_sequence();
     weaker_release_sequences();
 }