about summary refs log tree commit diff
diff options
context:
space:
mode:
authorPatrick-6 <pamu99@gmx.ch>2025-03-14 09:58:46 +0100
committerPatrick-6 <pamu99@gmx.ch>2025-05-02 14:49:17 +0200
commitd940d0b7584fd7753f49d897c64eccc5535d2374 (patch)
tree40028ee5a6f15012f50dfe10922b5baf670c4b8b
parenta8da0fd22d0e6e0547dd14551b1144b82dfb9655 (diff)
downloadrust-d940d0b7584fd7753f49d897c64eccc5535d2374.tar.gz
rust-d940d0b7584fd7753f49d897c64eccc5535d2374.zip
Implement skeleton code for adding GenMC support to Miri (not yet functional).
- Add a cargo feature to enable GenMC support (off by default)
- Add support for GenMC datastructures to MiriMachine
- Adjust several functions where GenMC needs to be informed about relevant events (e.g., atomic accesses)
- Add skeleton code for parsing GenMC command line arguments
- Some cleanup
  - Finish sentences with a `.`
  - Fix some spelling errors/typos

Co-authored-by: Ralf Jung <post@ralfj.de>
-rw-r--r--src/tools/miri/.gitignore5
-rw-r--r--src/tools/miri/Cargo.toml1
-rw-r--r--src/tools/miri/src/alloc_addresses/mod.rs12
-rw-r--r--src/tools/miri/src/bin/miri.rs60
-rw-r--r--src/tools/miri/src/borrow_tracker/stacked_borrows/mod.rs4
-rw-r--r--src/tools/miri/src/borrow_tracker/tree_borrows/mod.rs2
-rw-r--r--src/tools/miri/src/concurrency/data_race.rs256
-rw-r--r--src/tools/miri/src/concurrency/data_race_handler.rs91
-rw-r--r--src/tools/miri/src/concurrency/genmc/config.rs19
-rw-r--r--src/tools/miri/src/concurrency/genmc/dummy.rs239
-rw-r--r--src/tools/miri/src/concurrency/genmc/mod.rs284
-rw-r--r--src/tools/miri/src/concurrency/init_once.rs4
-rw-r--r--src/tools/miri/src/concurrency/mod.rs16
-rw-r--r--src/tools/miri/src/concurrency/sync.rs23
-rw-r--r--src/tools/miri/src/concurrency/thread.rs230
-rw-r--r--src/tools/miri/src/concurrency/vector_clock.rs10
-rw-r--r--src/tools/miri/src/concurrency/weak_memory.rs34
-rw-r--r--src/tools/miri/src/diagnostics.rs9
-rw-r--r--src/tools/miri/src/eval.rs67
-rw-r--r--src/tools/miri/src/intrinsics/atomic.rs31
-rw-r--r--src/tools/miri/src/lib.rs1
-rw-r--r--src/tools/miri/src/machine.rs163
22 files changed, 1254 insertions, 307 deletions
diff --git a/src/tools/miri/.gitignore b/src/tools/miri/.gitignore
index 03c5591b787..ed2d0ba7ba0 100644
--- a/src/tools/miri/.gitignore
+++ b/src/tools/miri/.gitignore
@@ -9,6 +9,9 @@ tex/*/out
 *.mm_profdata
 perf.data
 perf.data.old
-flamegraph.svg
+flamegraph*.svg
+rustc-ice*.txt
 tests/native-lib/libtestlib.so
 .auto-*
+
+/genmc/
diff --git a/src/tools/miri/Cargo.toml b/src/tools/miri/Cargo.toml
index bb24e700e73..7b7be97aa51 100644
--- a/src/tools/miri/Cargo.toml
+++ b/src/tools/miri/Cargo.toml
@@ -65,6 +65,7 @@ harness = false
 
 [features]
 default = ["stack-cache"]
+genmc = []
 stack-cache = []
 stack-cache-consistency-check = ["stack-cache"]
 
diff --git a/src/tools/miri/src/alloc_addresses/mod.rs b/src/tools/miri/src/alloc_addresses/mod.rs
index 335e8d76999..75eb0415a6f 100644
--- a/src/tools/miri/src/alloc_addresses/mod.rs
+++ b/src/tools/miri/src/alloc_addresses/mod.rs
@@ -114,8 +114,16 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
         memory_kind: MemoryKind,
     ) -> InterpResult<'tcx, u64> {
         let this = self.eval_context_ref();
-        let mut rng = this.machine.rng.borrow_mut();
         let info = this.get_alloc_info(alloc_id);
+
+        // Miri's address assignment leaks state across thread boundaries, which is incompatible
+        // with GenMC execution. So we instead let GenMC assign addresses to allocations.
+        if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
+            let addr = genmc_ctx.handle_alloc(&this.machine, info.size, info.align, memory_kind)?;
+            return interp_ok(addr);
+        }
+
+        let mut rng = this.machine.rng.borrow_mut();
         // This is either called immediately after allocation (and then cached), or when
         // adjusting `tcx` pointers (which never get freed). So assert that we are looking
         // at a live allocation. This also ensures that we never re-assign an address to an
@@ -490,7 +498,7 @@ impl<'tcx> MiriMachine<'tcx> {
         // Also remember this address for future reuse.
         let thread = self.threads.active_thread();
         global_state.reuse.add_addr(rng, addr, size, align, kind, thread, || {
-            if let Some(data_race) = &self.data_race {
+            if let Some(data_race) = self.data_race.as_vclocks_ref() {
                 data_race.release_clock(&self.threads, |clock| clock.clone())
             } else {
                 VClock::default()
diff --git a/src/tools/miri/src/bin/miri.rs b/src/tools/miri/src/bin/miri.rs
index dce2ac77c2a..411f9191bda 100644
--- a/src/tools/miri/src/bin/miri.rs
+++ b/src/tools/miri/src/bin/miri.rs
@@ -28,13 +28,14 @@ use std::env::{self, VarError};
 use std::num::NonZero;
 use std::ops::Range;
 use std::path::PathBuf;
+use std::rc::Rc;
 use std::str::FromStr;
 use std::sync::atomic::{AtomicI32, AtomicU32, Ordering};
 use std::sync::{Arc, Once};
 
 use miri::{
-    BacktraceStyle, BorrowTrackerMethod, MiriConfig, MiriEntryFnType, ProvenanceMode, RetagFields,
-    ValidationMode,
+    BacktraceStyle, BorrowTrackerMethod, GenmcConfig, GenmcCtx, MiriConfig, MiriEntryFnType,
+    ProvenanceMode, RetagFields, ValidationMode,
 };
 use rustc_abi::ExternAbi;
 use rustc_data_structures::sync;
@@ -60,6 +61,8 @@ use tracing::debug;
 struct MiriCompilerCalls {
     miri_config: Option<MiriConfig>,
     many_seeds: Option<ManySeedsConfig>,
+    /// Settings for using GenMC with Miri.
+    genmc_config: Option<GenmcConfig>,
 }
 
 struct ManySeedsConfig {
@@ -68,8 +71,12 @@ struct ManySeedsConfig {
 }
 
 impl MiriCompilerCalls {
-    fn new(miri_config: MiriConfig, many_seeds: Option<ManySeedsConfig>) -> Self {
-        Self { miri_config: Some(miri_config), many_seeds }
+    fn new(
+        miri_config: MiriConfig,
+        many_seeds: Option<ManySeedsConfig>,
+        genmc_config: Option<GenmcConfig>,
+    ) -> Self {
+        Self { miri_config: Some(miri_config), many_seeds, genmc_config }
     }
 }
 
@@ -179,6 +186,12 @@ impl rustc_driver::Callbacks for MiriCompilerCalls {
                     optimizations is usually marginal at best.");
         }
 
+        if let Some(genmc_config) = &self.genmc_config {
+            let _genmc_ctx = Rc::new(GenmcCtx::new(&config, genmc_config));
+
+            todo!("GenMC mode not yet implemented");
+        };
+
         if let Some(many_seeds) = self.many_seeds.take() {
             assert!(config.seed.is_none());
             let exit_code = sync::IntoDynSyncSend(AtomicI32::new(rustc_driver::EXIT_SUCCESS));
@@ -187,8 +200,14 @@ impl rustc_driver::Callbacks for MiriCompilerCalls {
                 let mut config = config.clone();
                 config.seed = Some((*seed).into());
                 eprintln!("Trying seed: {seed}");
-                let return_code = miri::eval_entry(tcx, entry_def_id, entry_type, config)
-                    .unwrap_or(rustc_driver::EXIT_FAILURE);
+                let return_code = miri::eval_entry(
+                    tcx,
+                    entry_def_id,
+                    entry_type,
+                    &config,
+                    /* genmc_ctx */ None,
+                )
+                .unwrap_or(rustc_driver::EXIT_FAILURE);
                 if return_code != rustc_driver::EXIT_SUCCESS {
                     eprintln!("FAILING SEED: {seed}");
                     if !many_seeds.keep_going {
@@ -206,11 +225,12 @@ impl rustc_driver::Callbacks for MiriCompilerCalls {
             }
             std::process::exit(exit_code.0.into_inner());
         } else {
-            let return_code = miri::eval_entry(tcx, entry_def_id, entry_type, config)
+            let return_code = miri::eval_entry(tcx, entry_def_id, entry_type, &config, None)
                 .unwrap_or_else(|| {
                     tcx.dcx().abort_if_errors();
                     rustc_driver::EXIT_FAILURE
                 });
+
             std::process::exit(return_code);
         }
 
@@ -506,6 +526,7 @@ fn main() {
     let mut many_seeds_keep_going = false;
     let mut miri_config = MiriConfig::default();
     miri_config.env = env_snapshot;
+    let mut genmc_config = None;
 
     let mut rustc_args = vec![];
     let mut after_dashdash = false;
@@ -603,6 +624,10 @@ fn main() {
             many_seeds = Some(0..64);
         } else if arg == "-Zmiri-many-seeds-keep-going" {
             many_seeds_keep_going = true;
+        } else if let Some(trimmed_arg) = arg.strip_prefix("-Zmiri-genmc") {
+            // FIXME(GenMC): Currently, GenMC mode is incompatible with aliasing model checking.
+            miri_config.borrow_tracker = None;
+            GenmcConfig::parse_arg(&mut genmc_config, trimmed_arg);
         } else if let Some(param) = arg.strip_prefix("-Zmiri-env-forward=") {
             miri_config.forwarded_env_vars.push(param.to_owned());
         } else if let Some(param) = arg.strip_prefix("-Zmiri-env-set=") {
@@ -697,7 +722,7 @@ fn main() {
             rustc_args.push(arg);
         }
     }
-    // `-Zmiri-unique-is-unique` should only be used with `-Zmiri-tree-borrows`
+    // `-Zmiri-unique-is-unique` should only be used with `-Zmiri-tree-borrows`.
     if miri_config.unique_is_unique
         && !matches!(miri_config.borrow_tracker, Some(BorrowTrackerMethod::TreeBorrows))
     {
@@ -734,7 +759,24 @@ fn main() {
     let many_seeds =
         many_seeds.map(|seeds| ManySeedsConfig { seeds, keep_going: many_seeds_keep_going });
 
+    // Validate settings for data race detection and GenMC mode.
+    assert_eq!(genmc_config.is_some(), miri_config.genmc_mode);
+    if genmc_config.is_some() {
+        if !miri_config.data_race_detector {
+            show_error!("Cannot disable data race detection in GenMC mode (currently)");
+        } else if !miri_config.weak_memory_emulation {
+            show_error!("Cannot disable weak memory emulation in GenMC mode");
+        }
+    } else if miri_config.weak_memory_emulation && !miri_config.data_race_detector {
+        show_error!(
+            "Weak memory emulation cannot be enabled when the data race detector is disabled"
+        );
+    };
+
     debug!("rustc arguments: {:?}", rustc_args);
     debug!("crate arguments: {:?}", miri_config.args);
-    run_compiler_and_exit(&rustc_args, &mut MiriCompilerCalls::new(miri_config, many_seeds))
+    run_compiler_and_exit(
+        &rustc_args,
+        &mut MiriCompilerCalls::new(miri_config, many_seeds, genmc_config),
+    )
 }
diff --git a/src/tools/miri/src/borrow_tracker/stacked_borrows/mod.rs b/src/tools/miri/src/borrow_tracker/stacked_borrows/mod.rs
index 18a5a0612bb..45863fee872 100644
--- a/src/tools/miri/src/borrow_tracker/stacked_borrows/mod.rs
+++ b/src/tools/miri/src/borrow_tracker/stacked_borrows/mod.rs
@@ -740,7 +740,7 @@ trait EvalContextPrivExt<'tcx, 'ecx>: crate::MiriInterpCxExt<'tcx> {
                 if let Some(access) = access {
                     assert_eq!(access, AccessKind::Write);
                     // Make sure the data race model also knows about this.
-                    if let Some(data_race) = alloc_extra.data_race.as_mut() {
+                    if let Some(data_race) = alloc_extra.data_race.as_vclocks_mut() {
                         data_race.write(
                             alloc_id,
                             range,
@@ -789,7 +789,7 @@ trait EvalContextPrivExt<'tcx, 'ecx>: crate::MiriInterpCxExt<'tcx> {
                     if let Some(access) = access {
                         assert_eq!(access, AccessKind::Read);
                         // Make sure the data race model also knows about this.
-                        if let Some(data_race) = alloc_extra.data_race.as_ref() {
+                        if let Some(data_race) = alloc_extra.data_race.as_vclocks_ref() {
                             data_race.read(
                                 alloc_id,
                                 range,
diff --git a/src/tools/miri/src/borrow_tracker/tree_borrows/mod.rs b/src/tools/miri/src/borrow_tracker/tree_borrows/mod.rs
index ff09821394a..ce7d7ab25d7 100644
--- a/src/tools/miri/src/borrow_tracker/tree_borrows/mod.rs
+++ b/src/tools/miri/src/borrow_tracker/tree_borrows/mod.rs
@@ -317,7 +317,7 @@ trait EvalContextPrivExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
 
         // Also inform the data race model (but only if any bytes are actually affected).
         if range.size.bytes() > 0 && new_perm.initial_read {
-            if let Some(data_race) = alloc_extra.data_race.as_ref() {
+            if let Some(data_race) = alloc_extra.data_race.as_vclocks_ref() {
                 data_race.read(
                     alloc_id,
                     range,
diff --git a/src/tools/miri/src/concurrency/data_race.rs b/src/tools/miri/src/concurrency/data_race.rs
index 847c6823475..714eb1fba91 100644
--- a/src/tools/miri/src/concurrency/data_race.rs
+++ b/src/tools/miri/src/concurrency/data_race.rs
@@ -1,4 +1,4 @@
-//! Implementation of a data-race detector using Lamport Timestamps / Vector-clocks
+//! Implementation of a data-race detector using Lamport Timestamps / Vector clocks
 //! based on the Dynamic Race Detection for C++:
 //! <https://www.doc.ic.ac.uk/~afd/homepages/papers/pdfs/2017/POPL.pdf>
 //! which does not report false-positives when fences are used, and gives better
@@ -54,6 +54,7 @@ use rustc_span::Span;
 
 use super::vector_clock::{VClock, VTimestamp, VectorIdx};
 use super::weak_memory::EvalContextExt as _;
+use crate::concurrency::GlobalDataRaceHandler;
 use crate::diagnostics::RacingOp;
 use crate::*;
 
@@ -259,7 +260,7 @@ enum AccessType {
 /// Per-byte vector clock metadata for data-race detection.
 #[derive(Clone, PartialEq, Eq, Debug)]
 struct MemoryCellClocks {
-    /// The vector-clock timestamp and the thread that did the last non-atomic write. We don't need
+    /// The vector clock timestamp and the thread that did the last non-atomic write. We don't need
     /// a full `VClock` here, it's always a single thread and nothing synchronizes, so the effective
     /// clock is all-0 except for the thread that did the write.
     write: (VectorIdx, VTimestamp),
@@ -269,7 +270,7 @@ struct MemoryCellClocks {
     /// a deallocation of memory.
     write_type: NaWriteType,
 
-    /// The vector-clock of all non-atomic reads that happened since the last non-atomic write
+    /// The vector clock of all non-atomic reads that happened since the last non-atomic write
     /// (i.e., we join together the "singleton" clocks corresponding to each read). It is reset to
     /// zero on each write operation.
     read: VClock,
@@ -298,7 +299,7 @@ struct ThreadExtraState {
 }
 
 /// Global data-race detection state, contains the currently
-/// executing thread as well as the vector-clocks associated
+/// executing thread as well as the vector clocks associated
 /// with each of the threads.
 // FIXME: it is probably better to have one large RefCell, than to have so many small ones.
 #[derive(Debug, Clone)]
@@ -335,7 +336,7 @@ pub struct GlobalState {
     /// for use as the index for a new thread.
     /// Elements in this set may still require the vector index to
     /// report data-races, and can only be re-used after all
-    /// active vector-clocks catch up with the threads timestamp.
+    /// active vector clocks catch up with the threads timestamp.
     reuse_candidates: RefCell<FxHashSet<VectorIdx>>,
 
     /// We make SC fences act like RMWs on a global location.
@@ -348,6 +349,9 @@ pub struct GlobalState {
 
     /// Track when an outdated (weak memory) load happens.
     pub track_outdated_loads: bool,
+
+    /// Whether weak memory emulation is enabled
+    pub weak_memory: bool,
 }
 
 impl VisitProvenance for GlobalState {
@@ -680,6 +684,23 @@ impl MemoryCellClocks {
     }
 }
 
+impl GlobalDataRaceHandler {
+    /// Select whether data race checking is disabled. This is solely an
+    /// implementation detail of `allow_data_races_*` and must not be used anywhere else!
+    fn set_ongoing_action_data_race_free(&self, enable: bool) {
+        match self {
+            GlobalDataRaceHandler::None => {}
+            GlobalDataRaceHandler::Vclocks(data_race) => {
+                let old = data_race.ongoing_action_data_race_free.replace(enable);
+                assert_ne!(old, enable, "cannot nest allow_data_races");
+            }
+            GlobalDataRaceHandler::Genmc(genmc_ctx) => {
+                genmc_ctx.set_ongoing_action_data_race_free(enable);
+            }
+        }
+    }
+}
+
 /// Evaluation context extensions.
 impl<'tcx> EvalContextExt<'tcx> for MiriInterpCx<'tcx> {}
 pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
@@ -696,6 +717,19 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
         // This is fine with StackedBorrow and race checks because they don't concern metadata on
         // the *value* (including the associated provenance if this is an AtomicPtr) at this location.
         // Only metadata on the location itself is used.
+
+        if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
+            // FIXME(GenMC): Inform GenMC what a non-atomic read here would return, to support mixed atomics/non-atomics
+            let old_val = None;
+            return genmc_ctx.atomic_load(
+                this,
+                place.ptr().addr(),
+                place.layout.size,
+                atomic,
+                old_val,
+            );
+        }
+
         let scalar = this.allow_data_races_ref(move |this| this.read_scalar(place))?;
         let buffered_scalar = this.buffered_atomic_read(place, atomic, scalar, || {
             this.validate_atomic_load(place, atomic)
@@ -718,6 +752,12 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
         // This is also a very special exception where we just ignore an error -- if this read
         // was UB e.g. because the memory is uninitialized, we don't want to know!
         let old_val = this.run_for_validation_mut(|this| this.read_scalar(dest)).discard_err();
+        // Inform GenMC about the atomic store.
+        if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
+            // FIXME(GenMC): Inform GenMC what a non-atomic read here would return, to support mixed atomics/non-atomics
+            genmc_ctx.atomic_store(this, dest.ptr().addr(), dest.layout.size, val, atomic)?;
+            return interp_ok(());
+        }
         this.allow_data_races_mut(move |this| this.write_scalar(val, dest))?;
         this.validate_atomic_store(dest, atomic)?;
         this.buffered_atomic_write(val, dest, atomic, old_val)
@@ -737,6 +777,21 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
 
         let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
 
+        // Inform GenMC about the atomic rmw operation.
+        if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
+            // FIXME(GenMC): Inform GenMC what a non-atomic read here would return, to support mixed atomics/non-atomics
+            let (old_val, new_val) = genmc_ctx.atomic_rmw_op(
+                this,
+                place.ptr().addr(),
+                place.layout.size,
+                atomic,
+                (op, not),
+                rhs.to_scalar(),
+            )?;
+            this.allow_data_races_mut(|this| this.write_scalar(new_val, place))?;
+            return interp_ok(ImmTy::from_scalar(old_val, old.layout));
+        }
+
         let val = this.binary_op(op, &old, rhs)?;
         let val = if not { this.unary_op(mir::UnOp::Not, &val)? } else { val };
         this.allow_data_races_mut(|this| this.write_immediate(*val, place))?;
@@ -761,6 +816,19 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
         let old = this.allow_data_races_mut(|this| this.read_scalar(place))?;
         this.allow_data_races_mut(|this| this.write_scalar(new, place))?;
 
+        // Inform GenMC about the atomic atomic exchange.
+        if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
+            // FIXME(GenMC): Inform GenMC what a non-atomic read here would return, to support mixed atomics/non-atomics
+            let (old_val, _is_success) = genmc_ctx.atomic_exchange(
+                this,
+                place.ptr().addr(),
+                place.layout.size,
+                new,
+                atomic,
+            )?;
+            return interp_ok(old_val);
+        }
+
         this.validate_atomic_rmw(place, atomic)?;
 
         this.buffered_atomic_rmw(new, place, atomic, old)?;
@@ -780,6 +848,23 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
         this.atomic_access_check(place, AtomicAccessType::Rmw)?;
 
         let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
+
+        // Inform GenMC about the atomic min/max operation.
+        if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
+            // FIXME(GenMC): Inform GenMC what a non-atomic read here would return, to support mixed atomics/non-atomics
+            let (old_val, new_val) = genmc_ctx.atomic_min_max_op(
+                this,
+                place.ptr().addr(),
+                place.layout.size,
+                atomic,
+                min,
+                old.layout.backend_repr.is_signed(),
+                rhs.to_scalar(),
+            )?;
+            this.allow_data_races_mut(|this| this.write_scalar(new_val, place))?;
+            return interp_ok(ImmTy::from_scalar(old_val, old.layout));
+        }
+
         let lt = this.binary_op(mir::BinOp::Lt, &old, &rhs)?.to_scalar().to_bool()?;
 
         #[rustfmt::skip] // rustfmt makes this unreadable
@@ -823,6 +908,25 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
         // read ordering and write in the success case.
         // Read as immediate for the sake of `binary_op()`
         let old = this.allow_data_races_mut(|this| this.read_immediate(place))?;
+
+        // Inform GenMC about the atomic atomic compare exchange.
+        if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
+            let (old, cmpxchg_success) = genmc_ctx.atomic_compare_exchange(
+                this,
+                place.ptr().addr(),
+                place.layout.size,
+                this.read_scalar(expect_old)?,
+                new,
+                success,
+                fail,
+                can_fail_spuriously,
+            )?;
+            if cmpxchg_success {
+                this.allow_data_races_mut(|this| this.write_scalar(new, place))?;
+            }
+            return interp_ok(Immediate::ScalarPair(old, Scalar::from_bool(cmpxchg_success)));
+        }
+
         // `binary_op` will bail if either of them is not a scalar.
         let eq = this.binary_op(mir::BinOp::Eq, &old, expect_old)?;
         // If the operation would succeed, but is "weak", fail some portion
@@ -859,49 +963,11 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
     /// Update the data-race detector for an atomic fence on the current thread.
     fn atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx> {
         let this = self.eval_context_mut();
-        let current_span = this.machine.current_span();
-        if let Some(data_race) = &mut this.machine.data_race {
-            data_race.maybe_perform_sync_operation(
-                &this.machine.threads,
-                current_span,
-                |index, mut clocks| {
-                    trace!("Atomic fence on {:?} with ordering {:?}", index, atomic);
-
-                    // Apply data-race detection for the current fences
-                    // this treats AcqRel and SeqCst as the same as an acquire
-                    // and release fence applied in the same timestamp.
-                    if atomic != AtomicFenceOrd::Release {
-                        // Either Acquire | AcqRel | SeqCst
-                        clocks.apply_acquire_fence();
-                    }
-                    if atomic == AtomicFenceOrd::SeqCst {
-                        // Behave like an RMW on the global fence location. This takes full care of
-                        // all the SC fence requirements, including C++17 ยง32.4 [atomics.order]
-                        // paragraph 6 (which would limit what future reads can see). It also rules
-                        // out many legal behaviors, but we don't currently have a model that would
-                        // be more precise.
-                        // Also see the second bullet on page 10 of
-                        // <https://www.cs.tau.ac.il/~orilahav/papers/popl21_robustness.pdf>.
-                        let mut sc_fence_clock = data_race.last_sc_fence.borrow_mut();
-                        sc_fence_clock.join(&clocks.clock);
-                        clocks.clock.join(&sc_fence_clock);
-                        // Also establish some sort of order with the last SC write that happened, globally
-                        // (but this is only respected by future reads).
-                        clocks.write_seqcst.join(&data_race.last_sc_write_per_thread.borrow());
-                    }
-                    // The release fence is last, since both of the above could alter our clock,
-                    // which should be part of what is being released.
-                    if atomic != AtomicFenceOrd::Acquire {
-                        // Either Release | AcqRel | SeqCst
-                        clocks.apply_release_fence();
-                    }
-
-                    // Increment timestamp in case of release semantics.
-                    interp_ok(atomic != AtomicFenceOrd::Acquire)
-                },
-            )
-        } else {
-            interp_ok(())
+        let machine = &this.machine;
+        match &this.machine.data_race {
+            GlobalDataRaceHandler::None => interp_ok(()),
+            GlobalDataRaceHandler::Vclocks(data_race) => data_race.atomic_fence(machine, atomic),
+            GlobalDataRaceHandler::Genmc(genmc_ctx) => genmc_ctx.atomic_fence(machine, atomic),
         }
     }
 
@@ -910,10 +976,7 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
     fn allow_data_races_all_threads_done(&mut self) {
         let this = self.eval_context_ref();
         assert!(this.have_all_terminated());
-        if let Some(data_race) = &this.machine.data_race {
-            let old = data_race.ongoing_action_data_race_free.replace(true);
-            assert!(!old, "cannot nest allow_data_races");
-        }
+        this.machine.data_race.set_ongoing_action_data_race_free(true);
     }
 
     /// Calls the callback with the "release" clock of the current thread.
@@ -923,14 +986,16 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
     /// The closure will only be invoked if data race handling is on.
     fn release_clock<R>(&self, callback: impl FnOnce(&VClock) -> R) -> Option<R> {
         let this = self.eval_context_ref();
-        Some(this.machine.data_race.as_ref()?.release_clock(&this.machine.threads, callback))
+        Some(
+            this.machine.data_race.as_vclocks_ref()?.release_clock(&this.machine.threads, callback),
+        )
     }
 
     /// Acquire the given clock into the current thread, establishing synchronization with
     /// the moment when that clock snapshot was taken via `release_clock`.
     fn acquire_clock(&self, clock: &VClock) {
         let this = self.eval_context_ref();
-        if let Some(data_race) = &this.machine.data_race {
+        if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
             data_race.acquire_clock(clock, &this.machine.threads);
         }
     }
@@ -1132,7 +1197,7 @@ impl VClockAlloc {
         machine: &MiriMachine<'_>,
     ) -> InterpResult<'tcx> {
         let current_span = machine.current_span();
-        let global = machine.data_race.as_ref().unwrap();
+        let global = machine.data_race.as_vclocks_ref().unwrap();
         if !global.race_detecting() {
             return interp_ok(());
         }
@@ -1174,7 +1239,7 @@ impl VClockAlloc {
         machine: &mut MiriMachine<'_>,
     ) -> InterpResult<'tcx> {
         let current_span = machine.current_span();
-        let global = machine.data_race.as_mut().unwrap();
+        let global = machine.data_race.as_vclocks_mut().unwrap();
         if !global.race_detecting() {
             return interp_ok(());
         }
@@ -1228,7 +1293,7 @@ impl Default for LocalClocks {
 impl FrameState {
     pub fn local_write(&self, local: mir::Local, storage_live: bool, machine: &MiriMachine<'_>) {
         let current_span = machine.current_span();
-        let global = machine.data_race.as_ref().unwrap();
+        let global = machine.data_race.as_vclocks_ref().unwrap();
         if !global.race_detecting() {
             return;
         }
@@ -1258,7 +1323,7 @@ impl FrameState {
 
     pub fn local_read(&self, local: mir::Local, machine: &MiriMachine<'_>) {
         let current_span = machine.current_span();
-        let global = machine.data_race.as_ref().unwrap();
+        let global = machine.data_race.as_vclocks_ref().unwrap();
         if !global.race_detecting() {
             return;
         }
@@ -1281,7 +1346,7 @@ impl FrameState {
         alloc: &mut VClockAlloc,
         machine: &MiriMachine<'_>,
     ) {
-        let global = machine.data_race.as_ref().unwrap();
+        let global = machine.data_race.as_vclocks_ref().unwrap();
         if !global.race_detecting() {
             return;
         }
@@ -1314,14 +1379,9 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
     #[inline]
     fn allow_data_races_ref<R>(&self, op: impl FnOnce(&MiriInterpCx<'tcx>) -> R) -> R {
         let this = self.eval_context_ref();
-        if let Some(data_race) = &this.machine.data_race {
-            let old = data_race.ongoing_action_data_race_free.replace(true);
-            assert!(!old, "cannot nest allow_data_races");
-        }
+        this.machine.data_race.set_ongoing_action_data_race_free(true);
         let result = op(this);
-        if let Some(data_race) = &this.machine.data_race {
-            data_race.ongoing_action_data_race_free.set(false);
-        }
+        this.machine.data_race.set_ongoing_action_data_race_free(false);
         result
     }
 
@@ -1331,14 +1391,9 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
     #[inline]
     fn allow_data_races_mut<R>(&mut self, op: impl FnOnce(&mut MiriInterpCx<'tcx>) -> R) -> R {
         let this = self.eval_context_mut();
-        if let Some(data_race) = &this.machine.data_race {
-            let old = data_race.ongoing_action_data_race_free.replace(true);
-            assert!(!old, "cannot nest allow_data_races");
-        }
+        this.machine.data_race.set_ongoing_action_data_race_free(true);
         let result = op(this);
-        if let Some(data_race) = &this.machine.data_race {
-            data_race.ongoing_action_data_race_free.set(false);
-        }
+        this.machine.data_race.set_ongoing_action_data_race_free(false);
         result
     }
 
@@ -1355,7 +1410,7 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
         let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
         this.check_ptr_align(place.ptr(), align)?;
         // Ensure the allocation is mutable. Even failing (read-only) compare_exchange need mutable
-        // memory on many targets (i.e., they segfault if taht memory is mapped read-only), and
+        // memory on many targets (i.e., they segfault if that memory is mapped read-only), and
         // atomic loads can be implemented via compare_exchange on some targets. There could
         // possibly be some very specific exceptions to this, see
         // <https://github.com/rust-lang/miri/pull/2464#discussion_r939636130> for details.
@@ -1486,7 +1541,9 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
     ) -> InterpResult<'tcx> {
         let this = self.eval_context_ref();
         assert!(access.is_atomic());
-        let Some(data_race) = &this.machine.data_race else { return interp_ok(()) };
+        let Some(data_race) = this.machine.data_race.as_vclocks_ref() else {
+            return interp_ok(());
+        };
         if !data_race.race_detecting() {
             return interp_ok(());
         }
@@ -1494,7 +1551,7 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
         let (alloc_id, base_offset, _prov) = this.ptr_get_alloc_id(place.ptr(), 0)?;
         // Load and log the atomic operation.
         // Note that atomic loads are possible even from read-only allocations, so `get_alloc_extra_mut` is not an option.
-        let alloc_meta = this.get_alloc_extra(alloc_id)?.data_race.as_ref().unwrap();
+        let alloc_meta = this.get_alloc_extra(alloc_id)?.data_race.as_vclocks_ref().unwrap();
         trace!(
             "Atomic op({}) with ordering {:?} on {:?} (size={})",
             access.description(None, None),
@@ -1565,6 +1622,7 @@ impl GlobalState {
             last_sc_fence: RefCell::new(VClock::default()),
             last_sc_write_per_thread: RefCell::new(VClock::default()),
             track_outdated_loads: config.track_outdated_loads,
+            weak_memory: config.weak_memory_emulation,
         };
 
         // Setup the main-thread since it is not explicitly created:
@@ -1728,7 +1786,7 @@ impl GlobalState {
         }
     }
 
-    /// On thread termination, the vector-clock may re-used
+    /// On thread termination, the vector clock may be re-used
     /// in the future once all remaining thread-clocks catch
     /// up with the time index of the terminated thread.
     /// This assigns thread termination with a unique index
@@ -1750,6 +1808,50 @@ impl GlobalState {
         reuse.insert(current_index);
     }
 
+    /// Update the data-race detector for an atomic fence on the current thread.
+    fn atomic_fence<'tcx>(
+        &self,
+        machine: &MiriMachine<'tcx>,
+        atomic: AtomicFenceOrd,
+    ) -> InterpResult<'tcx> {
+        let current_span = machine.current_span();
+        self.maybe_perform_sync_operation(&machine.threads, current_span, |index, mut clocks| {
+            trace!("Atomic fence on {:?} with ordering {:?}", index, atomic);
+
+            // Apply data-race detection for the current fences
+            // this treats AcqRel and SeqCst as the same as an acquire
+            // and release fence applied in the same timestamp.
+            if atomic != AtomicFenceOrd::Release {
+                // Either Acquire | AcqRel | SeqCst
+                clocks.apply_acquire_fence();
+            }
+            if atomic == AtomicFenceOrd::SeqCst {
+                // Behave like an RMW on the global fence location. This takes full care of
+                // all the SC fence requirements, including C++17 ยง32.4 [atomics.order]
+                // paragraph 6 (which would limit what future reads can see). It also rules
+                // out many legal behaviors, but we don't currently have a model that would
+                // be more precise.
+                // Also see the second bullet on page 10 of
+                // <https://www.cs.tau.ac.il/~orilahav/papers/popl21_robustness.pdf>.
+                let mut sc_fence_clock = self.last_sc_fence.borrow_mut();
+                sc_fence_clock.join(&clocks.clock);
+                clocks.clock.join(&sc_fence_clock);
+                // Also establish some sort of order with the last SC write that happened, globally
+                // (but this is only respected by future reads).
+                clocks.write_seqcst.join(&self.last_sc_write_per_thread.borrow());
+            }
+            // The release fence is last, since both of the above could alter our clock,
+            // which should be part of what is being released.
+            if atomic != AtomicFenceOrd::Acquire {
+                // Either Release | AcqRel | SeqCst
+                clocks.apply_release_fence();
+            }
+
+            // Increment timestamp in case of release semantics.
+            interp_ok(atomic != AtomicFenceOrd::Acquire)
+        })
+    }
+
     /// Attempt to perform a synchronized operation, this
     /// will perform no operation if multi-threading is
     /// not currently enabled.
diff --git a/src/tools/miri/src/concurrency/data_race_handler.rs b/src/tools/miri/src/concurrency/data_race_handler.rs
new file mode 100644
index 00000000000..047c37e56b8
--- /dev/null
+++ b/src/tools/miri/src/concurrency/data_race_handler.rs
@@ -0,0 +1,91 @@
+use std::rc::Rc;
+
+use super::{data_race, weak_memory};
+use crate::concurrency::GenmcCtx;
+use crate::{VisitProvenance, VisitWith};
+
+pub enum GlobalDataRaceHandler {
+    /// No data race detection will be done.
+    None,
+    /// State required to run in GenMC mode.
+    /// In this mode, the program will be executed repeatedly to explore different concurrent executions.
+    /// The `GenmcCtx` must persist across multiple executions, so it is behind an `Rc`.
+    ///
+    /// The `GenmcCtx` has several methods with which to inform it about events like atomic memory accesses.
+    /// In GenMC mode, some functionality is taken over by GenMC:
+    /// - Memory Allocation:    Allocated addresses need to be consistent across executions, which Miri's allocator doesn't guarantee
+    /// - Scheduling:           To influence which concurrent execution we will explore next, GenMC takes over scheduling
+    /// - Atomic operations:    GenMC will ensure that we explore all possible values that the memory model allows
+    ///   an atomic operation to see at any specific point of the program.
+    Genmc(Rc<GenmcCtx>),
+    /// The default data race detector for Miri using vector clocks.
+    Vclocks(Box<data_race::GlobalState>),
+}
+
+#[derive(Debug)]
+pub enum AllocDataRaceHandler {
+    None,
+    Genmc,
+    /// Data race detection via the use of vector clocks.
+    /// Weak memory emulation via the use of store buffers (if enabled).
+    Vclocks(data_race::AllocState, Option<weak_memory::AllocState>),
+}
+
+impl GlobalDataRaceHandler {
+    pub fn is_none(&self) -> bool {
+        matches!(self, GlobalDataRaceHandler::None)
+    }
+
+    pub fn as_vclocks_ref(&self) -> Option<&data_race::GlobalState> {
+        if let Self::Vclocks(data_race) = self { Some(data_race) } else { None }
+    }
+
+    pub fn as_vclocks_mut(&mut self) -> Option<&mut data_race::GlobalState> {
+        if let Self::Vclocks(data_race) = self { Some(data_race) } else { None }
+    }
+
+    pub fn as_genmc_ref(&self) -> Option<&GenmcCtx> {
+        if let Self::Genmc(genmc_ctx) = self { Some(genmc_ctx) } else { None }
+    }
+}
+
+impl AllocDataRaceHandler {
+    pub fn as_vclocks_ref(&self) -> Option<&data_race::AllocState> {
+        if let Self::Vclocks(data_race, _weak_memory) = self { Some(data_race) } else { None }
+    }
+
+    pub fn as_vclocks_mut(&mut self) -> Option<&mut data_race::AllocState> {
+        if let Self::Vclocks(data_race, _weak_memory) = self { Some(data_race) } else { None }
+    }
+
+    pub fn as_weak_memory_ref(&self) -> Option<&weak_memory::AllocState> {
+        if let Self::Vclocks(_data_race, weak_memory) = self { weak_memory.as_ref() } else { None }
+    }
+
+    pub fn as_weak_memory_mut(&mut self) -> Option<&mut weak_memory::AllocState> {
+        if let Self::Vclocks(_data_race, weak_memory) = self { weak_memory.as_mut() } else { None }
+    }
+}
+
+impl VisitProvenance for GlobalDataRaceHandler {
+    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
+        match self {
+            GlobalDataRaceHandler::None => {}
+            GlobalDataRaceHandler::Vclocks(data_race) => data_race.visit_provenance(visit),
+            GlobalDataRaceHandler::Genmc(genmc_ctx) => genmc_ctx.visit_provenance(visit),
+        }
+    }
+}
+
+impl VisitProvenance for AllocDataRaceHandler {
+    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
+        match self {
+            AllocDataRaceHandler::None => {}
+            AllocDataRaceHandler::Genmc => {}
+            AllocDataRaceHandler::Vclocks(data_race, weak_memory) => {
+                data_race.visit_provenance(visit);
+                weak_memory.visit_provenance(visit);
+            }
+        }
+    }
+}
diff --git a/src/tools/miri/src/concurrency/genmc/config.rs b/src/tools/miri/src/concurrency/genmc/config.rs
new file mode 100644
index 00000000000..f91211a670f
--- /dev/null
+++ b/src/tools/miri/src/concurrency/genmc/config.rs
@@ -0,0 +1,19 @@
+use crate::MiriConfig;
+
+#[derive(Debug, Default, Clone)]
+pub struct GenmcConfig {
+    // TODO: add fields
+}
+
+impl GenmcConfig {
+    /// Function for parsing command line options for GenMC mode.
+    /// All GenMC arguments start with the string "-Zmiri-genmc".
+    ///
+    /// `trimmed_arg` should be the argument to be parsed, with the suffix "-Zmiri-genmc" removed
+    pub fn parse_arg(genmc_config: &mut Option<GenmcConfig>, trimmed_arg: &str) {
+        if genmc_config.is_none() {
+            *genmc_config = Some(Default::default());
+        }
+        todo!("implement parsing of GenMC options")
+    }
+}
diff --git a/src/tools/miri/src/concurrency/genmc/dummy.rs b/src/tools/miri/src/concurrency/genmc/dummy.rs
new file mode 100644
index 00000000000..3d0558fb685
--- /dev/null
+++ b/src/tools/miri/src/concurrency/genmc/dummy.rs
@@ -0,0 +1,239 @@
+#![allow(unused)]
+
+use rustc_abi::{Align, Size};
+use rustc_const_eval::interpret::{InterpCx, InterpResult};
+use rustc_middle::mir;
+
+use crate::{
+    AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, MemoryKind, MiriConfig,
+    MiriMachine, Scalar, ThreadId, ThreadManager, VisitProvenance, VisitWith,
+};
+
+#[derive(Debug)]
+pub struct GenmcCtx {}
+
+#[derive(Debug, Default, Clone)]
+pub struct GenmcConfig {}
+
+impl GenmcCtx {
+    pub fn new(_miri_config: &MiriConfig, _genmc_config: &GenmcConfig) -> Self {
+        unreachable!()
+    }
+
+    pub fn get_stuck_execution_count(&self) -> usize {
+        unreachable!()
+    }
+
+    pub fn print_genmc_graph(&self) {
+        unreachable!()
+    }
+
+    pub fn is_exploration_done(&self) -> bool {
+        unreachable!()
+    }
+
+    /**** Memory access handling ****/
+
+    pub(crate) fn handle_execution_start(&self) {
+        unreachable!()
+    }
+
+    pub(crate) fn handle_execution_end<'tcx>(
+        &self,
+        _ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
+    ) -> Result<(), String> {
+        unreachable!()
+    }
+
+    pub(super) fn set_ongoing_action_data_race_free(&self, _enable: bool) {
+        unreachable!()
+    }
+
+    //* might fails if there's a race, load might also not read anything (returns None) */
+    pub(crate) fn atomic_load<'tcx>(
+        &self,
+        _ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
+        _address: Size,
+        _size: Size,
+        _ordering: AtomicReadOrd,
+        _old_val: Option<Scalar>,
+    ) -> InterpResult<'tcx, Scalar> {
+        unreachable!()
+    }
+
+    pub(crate) fn atomic_store<'tcx>(
+        &self,
+        _ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
+        _address: Size,
+        _size: Size,
+        _value: Scalar,
+        _ordering: AtomicWriteOrd,
+    ) -> InterpResult<'tcx, ()> {
+        unreachable!()
+    }
+
+    pub(crate) fn atomic_fence<'tcx>(
+        &self,
+        _machine: &MiriMachine<'tcx>,
+        _ordering: AtomicFenceOrd,
+    ) -> InterpResult<'tcx, ()> {
+        unreachable!()
+    }
+
+    pub(crate) fn atomic_rmw_op<'tcx>(
+        &self,
+        _ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
+        _address: Size,
+        _size: Size,
+        _ordering: AtomicRwOrd,
+        (rmw_op, not): (mir::BinOp, bool),
+        _rhs_scalar: Scalar,
+    ) -> InterpResult<'tcx, (Scalar, Scalar)> {
+        unreachable!()
+    }
+
+    pub(crate) fn atomic_min_max_op<'tcx>(
+        &self,
+        ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
+        address: Size,
+        size: Size,
+        ordering: AtomicRwOrd,
+        min: bool,
+        is_signed: bool,
+        rhs_scalar: Scalar,
+    ) -> InterpResult<'tcx, (Scalar, Scalar)> {
+        unreachable!()
+    }
+
+    pub(crate) fn atomic_exchange<'tcx>(
+        &self,
+        _ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
+        _address: Size,
+        _size: Size,
+        _rhs_scalar: Scalar,
+        _ordering: AtomicRwOrd,
+    ) -> InterpResult<'tcx, (Scalar, bool)> {
+        unreachable!()
+    }
+
+    pub(crate) fn atomic_compare_exchange<'tcx>(
+        &self,
+        _ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
+        _address: Size,
+        _size: Size,
+        _expected_old_value: Scalar,
+        _new_value: Scalar,
+        _success: AtomicRwOrd,
+        _fail: AtomicReadOrd,
+        _can_fail_spuriously: bool,
+    ) -> InterpResult<'tcx, (Scalar, bool)> {
+        unreachable!()
+    }
+
+    pub(crate) fn memory_load<'tcx>(
+        &self,
+        _machine: &MiriMachine<'tcx>,
+        _address: Size,
+        _size: Size,
+    ) -> InterpResult<'tcx, ()> {
+        unreachable!()
+    }
+
+    pub(crate) fn memory_store<'tcx>(
+        &self,
+        _machine: &MiriMachine<'tcx>,
+        _address: Size,
+        _size: Size,
+    ) -> InterpResult<'tcx, ()> {
+        unreachable!()
+    }
+
+    /**** Memory (de)allocation ****/
+
+    pub(crate) fn handle_alloc<'tcx>(
+        &self,
+        _machine: &MiriMachine<'tcx>,
+        _size: Size,
+        _alignment: Align,
+        _memory_kind: MemoryKind,
+    ) -> InterpResult<'tcx, u64> {
+        unreachable!()
+    }
+
+    pub(crate) fn handle_dealloc<'tcx>(
+        &self,
+        _machine: &MiriMachine<'tcx>,
+        _address: Size,
+        _size: Size,
+        _align: Align,
+        _kind: MemoryKind,
+    ) -> InterpResult<'tcx, ()> {
+        unreachable!()
+    }
+
+    /**** Thread management ****/
+
+    pub(crate) fn handle_thread_create<'tcx>(
+        &self,
+        _threads: &ThreadManager<'tcx>,
+        _new_thread_id: ThreadId,
+    ) -> InterpResult<'tcx, ()> {
+        unreachable!()
+    }
+
+    pub(crate) fn handle_thread_join<'tcx>(
+        &self,
+        _active_thread_id: ThreadId,
+        _child_thread_id: ThreadId,
+    ) -> InterpResult<'tcx, ()> {
+        unreachable!()
+    }
+
+    pub(crate) fn handle_thread_stack_empty(&self, _thread_id: ThreadId) {
+        unreachable!()
+    }
+
+    pub(crate) fn handle_thread_finish<'tcx>(
+        &self,
+        _threads: &ThreadManager<'tcx>,
+    ) -> InterpResult<'tcx, ()> {
+        unreachable!()
+    }
+
+    /**** Scheduling functionality ****/
+
+    pub(crate) fn schedule_thread<'tcx>(
+        &self,
+        _ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
+    ) -> InterpResult<'tcx, ThreadId> {
+        unreachable!()
+    }
+
+    /**** Blocking instructions ****/
+
+    pub(crate) fn handle_verifier_assume<'tcx>(
+        &self,
+        _machine: &MiriMachine<'tcx>,
+        _condition: bool,
+    ) -> InterpResult<'tcx, ()> {
+        unreachable!()
+    }
+}
+
+impl VisitProvenance for GenmcCtx {
+    fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
+        unreachable!()
+    }
+}
+
+impl GenmcConfig {
+    pub fn parse_arg(_genmc_config: &mut Option<GenmcConfig>, trimmed_arg: &str) {
+        unimplemented!(
+            "GenMC feature im Miri is disabled, cannot handle argument: \"-Zmiri-genmc{trimmed_arg}\""
+        );
+    }
+
+    pub fn should_print_graph(&self, _rep: usize) -> bool {
+        unreachable!()
+    }
+}
diff --git a/src/tools/miri/src/concurrency/genmc/mod.rs b/src/tools/miri/src/concurrency/genmc/mod.rs
new file mode 100644
index 00000000000..0dfd4b9b80f
--- /dev/null
+++ b/src/tools/miri/src/concurrency/genmc/mod.rs
@@ -0,0 +1,284 @@
+#![allow(unused)] // FIXME(GenMC): remove this
+
+use std::cell::Cell;
+
+use rustc_abi::{Align, Size};
+use rustc_const_eval::interpret::{InterpCx, InterpResult, interp_ok};
+use rustc_middle::mir;
+
+use crate::{
+    AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, MemoryKind, MiriConfig,
+    MiriMachine, Scalar, ThreadId, ThreadManager, VisitProvenance, VisitWith,
+};
+
+mod config;
+
+pub use self::config::GenmcConfig;
+
+// FIXME(GenMC): add fields
+pub struct GenmcCtx {
+    /// Some actions Miri does are allowed to cause data races.
+    /// GenMC will not be informed about certain actions (e.g. non-atomic loads) when this flag is set.
+    allow_data_races: Cell<bool>,
+}
+
+impl GenmcCtx {
+    /// Create a new `GenmcCtx` from a given config.
+    pub fn new(miri_config: &MiriConfig, genmc_config: &GenmcConfig) -> Self {
+        assert!(miri_config.genmc_mode);
+        todo!()
+    }
+
+    pub fn get_stuck_execution_count(&self) -> usize {
+        todo!()
+    }
+
+    pub fn print_genmc_graph(&self) {
+        todo!()
+    }
+
+    /// This function determines if we should continue exploring executions or if we are done.
+    ///
+    /// In GenMC mode, the input program should be repeatedly executed until this function returns `true` or an error is found.
+    pub fn is_exploration_done(&self) -> bool {
+        todo!()
+    }
+
+    /// Inform GenMC that a new program execution has started.
+    /// This function should be called at the start of every execution.
+    pub(crate) fn handle_execution_start(&self) {
+        todo!()
+    }
+
+    /// Inform GenMC that the program's execution has ended.
+    ///
+    /// This function must be called even when the execution got stuck (i.e., it returned a `InterpErrorKind::MachineStop` with error kind `TerminationInfo::GenmcStuckExecution`).
+    pub(crate) fn handle_execution_end<'tcx>(
+        &self,
+        ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
+    ) -> Result<(), String> {
+        todo!()
+    }
+
+    /**** Memory access handling ****/
+
+    /// Select whether data race free actions should be allowed. This function should be used carefully!
+    ///
+    /// If `true` is passed, allow for data races to happen without triggering an error, until this function is called again with argument `false`.
+    /// This allows for racy non-atomic memory accesses to be ignored (GenMC is not informed about them at all).
+    ///
+    /// Certain operations are not permitted in GenMC mode with data races disabled and will cause a panic, e.g., atomic accesses or asking for scheduling decisions.
+    ///
+    /// # Panics
+    /// If data race free is attempted to be set more than once (i.e., no nesting allowed).
+    pub(super) fn set_ongoing_action_data_race_free(&self, enable: bool) {
+        let old = self.allow_data_races.replace(enable);
+        assert_ne!(old, enable, "cannot nest allow_data_races");
+    }
+
+    pub(crate) fn atomic_load<'tcx>(
+        &self,
+        ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
+        address: Size,
+        size: Size,
+        ordering: AtomicReadOrd,
+        old_val: Option<Scalar>,
+    ) -> InterpResult<'tcx, Scalar> {
+        assert!(!self.allow_data_races.get());
+        todo!()
+    }
+
+    pub(crate) fn atomic_store<'tcx>(
+        &self,
+        ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
+        address: Size,
+        size: Size,
+        value: Scalar,
+        ordering: AtomicWriteOrd,
+    ) -> InterpResult<'tcx, ()> {
+        assert!(!self.allow_data_races.get());
+        todo!()
+    }
+
+    pub(crate) fn atomic_fence<'tcx>(
+        &self,
+        machine: &MiriMachine<'tcx>,
+        ordering: AtomicFenceOrd,
+    ) -> InterpResult<'tcx, ()> {
+        assert!(!self.allow_data_races.get());
+        todo!()
+    }
+
+    /// Inform GenMC about an atomic read-modify-write operation.
+    ///
+    /// Returns `(old_val, new_val)`.
+    pub(crate) fn atomic_rmw_op<'tcx>(
+        &self,
+        ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
+        address: Size,
+        size: Size,
+        ordering: AtomicRwOrd,
+        (rmw_op, not): (mir::BinOp, bool),
+        rhs_scalar: Scalar,
+    ) -> InterpResult<'tcx, (Scalar, Scalar)> {
+        assert!(!self.allow_data_races.get());
+        todo!()
+    }
+
+    /// Inform GenMC about an atomic `min` or `max` operation.
+    ///
+    /// Returns `(old_val, new_val)`.
+    pub(crate) fn atomic_min_max_op<'tcx>(
+        &self,
+        ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
+        address: Size,
+        size: Size,
+        ordering: AtomicRwOrd,
+        min: bool,
+        is_signed: bool,
+        rhs_scalar: Scalar,
+    ) -> InterpResult<'tcx, (Scalar, Scalar)> {
+        assert!(!self.allow_data_races.get());
+        todo!()
+    }
+
+    pub(crate) fn atomic_exchange<'tcx>(
+        &self,
+        ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
+        address: Size,
+        size: Size,
+        rhs_scalar: Scalar,
+        ordering: AtomicRwOrd,
+    ) -> InterpResult<'tcx, (Scalar, bool)> {
+        assert!(!self.allow_data_races.get());
+        todo!()
+    }
+
+    pub(crate) fn atomic_compare_exchange<'tcx>(
+        &self,
+        ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
+        address: Size,
+        size: Size,
+        expected_old_value: Scalar,
+        new_value: Scalar,
+        success: AtomicRwOrd,
+        fail: AtomicReadOrd,
+        can_fail_spuriously: bool,
+    ) -> InterpResult<'tcx, (Scalar, bool)> {
+        assert!(!self.allow_data_races.get());
+        todo!()
+    }
+
+    /// Inform GenMC about a non-atomic memory load
+    ///
+    /// NOTE: Unlike for *atomic* loads, we don't return a value here. Non-atomic values are still handled by Miri.
+    pub(crate) fn memory_load<'tcx>(
+        &self,
+        machine: &MiriMachine<'tcx>,
+        address: Size,
+        size: Size,
+    ) -> InterpResult<'tcx, ()> {
+        todo!()
+    }
+
+    pub(crate) fn memory_store<'tcx>(
+        &self,
+        machine: &MiriMachine<'tcx>,
+        address: Size,
+        size: Size,
+    ) -> InterpResult<'tcx, ()> {
+        todo!()
+    }
+
+    /**** Memory (de)allocation ****/
+
+    pub(crate) fn handle_alloc<'tcx>(
+        &self,
+        machine: &MiriMachine<'tcx>,
+        size: Size,
+        alignment: Align,
+        memory_kind: MemoryKind,
+    ) -> InterpResult<'tcx, u64> {
+        todo!()
+    }
+
+    pub(crate) fn handle_dealloc<'tcx>(
+        &self,
+        machine: &MiriMachine<'tcx>,
+        address: Size,
+        size: Size,
+        align: Align,
+        kind: MemoryKind,
+    ) -> InterpResult<'tcx, ()> {
+        todo!()
+    }
+
+    /**** Thread management ****/
+
+    pub(crate) fn handle_thread_create<'tcx>(
+        &self,
+        threads: &ThreadManager<'tcx>,
+        new_thread_id: ThreadId,
+    ) -> InterpResult<'tcx, ()> {
+        assert!(!self.allow_data_races.get());
+        todo!()
+    }
+
+    pub(crate) fn handle_thread_join<'tcx>(
+        &self,
+        active_thread_id: ThreadId,
+        child_thread_id: ThreadId,
+    ) -> InterpResult<'tcx, ()> {
+        assert!(!self.allow_data_races.get());
+        todo!()
+    }
+
+    pub(crate) fn handle_thread_stack_empty(&self, thread_id: ThreadId) {
+        todo!()
+    }
+
+    pub(crate) fn handle_thread_finish<'tcx>(
+        &self,
+        threads: &ThreadManager<'tcx>,
+    ) -> InterpResult<'tcx, ()> {
+        assert!(!self.allow_data_races.get());
+        todo!()
+    }
+
+    /**** Scheduling functionality ****/
+
+    /// Ask for a scheduling decision. This should be called before every MIR instruction.
+    ///
+    /// GenMC may realize that the execution got stuck, then this function will return a `InterpErrorKind::MachineStop` with error kind `TerminationInfo::GenmcStuckExecution`).
+    ///
+    /// This is **not** an error by iself! Treat this as if the program ended normally: `handle_execution_end` should be called next, which will determine if were are any actual errors.
+    pub(crate) fn schedule_thread<'tcx>(
+        &self,
+        ecx: &InterpCx<'tcx, MiriMachine<'tcx>>,
+    ) -> InterpResult<'tcx, ThreadId> {
+        assert!(!self.allow_data_races.get());
+        todo!()
+    }
+
+    /**** Blocking instructions ****/
+
+    pub(crate) fn handle_verifier_assume<'tcx>(
+        &self,
+        machine: &MiriMachine<'tcx>,
+        condition: bool,
+    ) -> InterpResult<'tcx, ()> {
+        if condition { interp_ok(()) } else { self.handle_user_block(machine) }
+    }
+}
+
+impl VisitProvenance for GenmcCtx {
+    fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
+        // We don't have any tags.
+    }
+}
+
+impl GenmcCtx {
+    fn handle_user_block<'tcx>(&self, machine: &MiriMachine<'tcx>) -> InterpResult<'tcx, ()> {
+        todo!()
+    }
+}
diff --git a/src/tools/miri/src/concurrency/init_once.rs b/src/tools/miri/src/concurrency/init_once.rs
index 534f02545bd..c26384f65f6 100644
--- a/src/tools/miri/src/concurrency/init_once.rs
+++ b/src/tools/miri/src/concurrency/init_once.rs
@@ -72,7 +72,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
         init_once.status = InitOnceStatus::Complete;
 
         // Each complete happens-before the end of the wait
-        if let Some(data_race) = &this.machine.data_race {
+        if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
             data_race
                 .release_clock(&this.machine.threads, |clock| init_once.clock.clone_from(clock));
         }
@@ -99,7 +99,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
         init_once.status = InitOnceStatus::Uninitialized;
 
         // Each complete happens-before the end of the wait
-        if let Some(data_race) = &this.machine.data_race {
+        if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
             data_race
                 .release_clock(&this.machine.threads, |clock| init_once.clock.clone_from(clock));
         }
diff --git a/src/tools/miri/src/concurrency/mod.rs b/src/tools/miri/src/concurrency/mod.rs
index c5082b4e40b..dd33f90f153 100644
--- a/src/tools/miri/src/concurrency/mod.rs
+++ b/src/tools/miri/src/concurrency/mod.rs
@@ -1,5 +1,6 @@
 pub mod cpu_affinity;
 pub mod data_race;
+mod data_race_handler;
 pub mod init_once;
 mod range_object_map;
 pub mod sync;
@@ -7,4 +8,19 @@ pub mod thread;
 mod vector_clock;
 pub mod weak_memory;
 
+// Import either the real genmc adapter or a dummy module.
+cfg_match! {
+    feature = "genmc" => {
+        mod genmc;
+        pub use self::genmc::{GenmcCtx, GenmcConfig};
+    }
+    _ => {
+        #[path = "genmc/dummy.rs"]
+        mod genmc_dummy;
+        use self::genmc_dummy as genmc;
+        pub use self::genmc::{GenmcCtx, GenmcConfig};
+    }
+}
+
+pub use self::data_race_handler::{AllocDataRaceHandler, GlobalDataRaceHandler};
 pub use self::vector_clock::VClock;
diff --git a/src/tools/miri/src/concurrency/sync.rs b/src/tools/miri/src/concurrency/sync.rs
index 268268848ed..64f34d3e21c 100644
--- a/src/tools/miri/src/concurrency/sync.rs
+++ b/src/tools/miri/src/concurrency/sync.rs
@@ -361,7 +361,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
             mutex.owner = Some(thread);
         }
         mutex.lock_count = mutex.lock_count.strict_add(1);
-        if let Some(data_race) = &this.machine.data_race {
+        if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
             data_race.acquire_clock(&mutex.clock, &this.machine.threads);
         }
     }
@@ -385,7 +385,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
                 mutex.owner = None;
                 // The mutex is completely unlocked. Try transferring ownership
                 // to another thread.
-                if let Some(data_race) = &this.machine.data_race {
+
+                if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
                     data_race.release_clock(&this.machine.threads, |clock| {
                         mutex.clock.clone_from(clock)
                     });
@@ -477,7 +478,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
         let rwlock = &mut this.machine.sync.rwlocks[id];
         let count = rwlock.readers.entry(thread).or_insert(0);
         *count = count.strict_add(1);
-        if let Some(data_race) = &this.machine.data_race {
+        if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
             data_race.acquire_clock(&rwlock.clock_unlocked, &this.machine.threads);
         }
     }
@@ -502,7 +503,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
             }
             Entry::Vacant(_) => return interp_ok(false), // we did not even own this lock
         }
-        if let Some(data_race) = &this.machine.data_race {
+        if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
             // Add this to the shared-release clock of all concurrent readers.
             data_race.release_clock(&this.machine.threads, |clock| {
                 rwlock.clock_current_readers.join(clock)
@@ -565,7 +566,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
         trace!("rwlock_writer_lock: {:?} now held by {:?}", id, thread);
         let rwlock = &mut this.machine.sync.rwlocks[id];
         rwlock.writer = Some(thread);
-        if let Some(data_race) = &this.machine.data_race {
+        if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
             data_race.acquire_clock(&rwlock.clock_unlocked, &this.machine.threads);
         }
     }
@@ -585,7 +586,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
             rwlock.writer = None;
             trace!("rwlock_writer_unlock: {:?} unlocked by {:?}", id, thread);
             // Record release clock for next lock holder.
-            if let Some(data_race) = &this.machine.data_race {
+            if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
                 data_race.release_clock(&this.machine.threads, |clock| {
                     rwlock.clock_unlocked.clone_from(clock)
                 });
@@ -691,7 +692,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
                     match unblock {
                         UnblockKind::Ready => {
                             // The condvar was signaled. Make sure we get the clock for that.
-                            if let Some(data_race) = &this.machine.data_race {
+                            if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
                                 data_race.acquire_clock(
                                     &this.machine.sync.condvars[condvar].clock,
                                     &this.machine.threads,
@@ -721,10 +722,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
     fn condvar_signal(&mut self, id: CondvarId) -> InterpResult<'tcx, bool> {
         let this = self.eval_context_mut();
         let condvar = &mut this.machine.sync.condvars[id];
-        let data_race = &this.machine.data_race;
 
         // Each condvar signal happens-before the end of the condvar wake
-        if let Some(data_race) = data_race {
+        if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
             data_race.release_clock(&this.machine.threads, |clock| condvar.clock.clone_from(clock));
         }
         let Some(waiter) = condvar.waiters.pop_front() else {
@@ -764,7 +764,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
                         UnblockKind::Ready => {
                             let futex = futex_ref.0.borrow();
                             // Acquire the clock of the futex.
-                            if let Some(data_race) = &this.machine.data_race {
+                            if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
                                 data_race.acquire_clock(&futex.clock, &this.machine.threads);
                             }
                         },
@@ -792,10 +792,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
     ) -> InterpResult<'tcx, usize> {
         let this = self.eval_context_mut();
         let mut futex = futex_ref.0.borrow_mut();
-        let data_race = &this.machine.data_race;
 
         // Each futex-wake happens-before the end of the futex wait
-        if let Some(data_race) = data_race {
+        if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
             data_race.release_clock(&this.machine.threads, |clock| futex.clock.clone_from(clock));
         }
 
diff --git a/src/tools/miri/src/concurrency/thread.rs b/src/tools/miri/src/concurrency/thread.rs
index f8ff47e7113..8aa65e6cb61 100644
--- a/src/tools/miri/src/concurrency/thread.rs
+++ b/src/tools/miri/src/concurrency/thread.rs
@@ -6,7 +6,6 @@ use std::task::Poll;
 use std::time::{Duration, SystemTime};
 
 use either::Either;
-use rand::rngs::StdRng;
 use rand::seq::IteratorRandom;
 use rustc_abi::ExternAbi;
 use rustc_const_eval::CTRL_C_RECEIVED;
@@ -17,7 +16,7 @@ use rustc_middle::mir::Mutability;
 use rustc_middle::ty::layout::TyAndLayout;
 use rustc_span::Span;
 
-use crate::concurrency::data_race;
+use crate::concurrency::GlobalDataRaceHandler;
 use crate::shims::tls;
 use crate::*;
 
@@ -584,13 +583,28 @@ impl<'tcx> ThreadManager<'tcx> {
     fn join_thread(
         &mut self,
         joined_thread_id: ThreadId,
-        data_race: Option<&mut data_race::GlobalState>,
+        data_race_handler: &mut GlobalDataRaceHandler,
     ) -> InterpResult<'tcx> {
         if self.threads[joined_thread_id].join_status == ThreadJoinStatus::Detached {
             // On Windows this corresponds to joining on a closed handle.
             throw_ub_format!("trying to join a detached thread");
         }
 
+        fn after_join<'tcx>(
+            threads: &mut ThreadManager<'_>,
+            joined_thread_id: ThreadId,
+            data_race_handler: &mut GlobalDataRaceHandler,
+        ) -> InterpResult<'tcx> {
+            match data_race_handler {
+                GlobalDataRaceHandler::None => {}
+                GlobalDataRaceHandler::Vclocks(data_race) =>
+                    data_race.thread_joined(threads, joined_thread_id),
+                GlobalDataRaceHandler::Genmc(genmc_ctx) =>
+                    genmc_ctx.handle_thread_join(threads.active_thread, joined_thread_id)?,
+            }
+            interp_ok(())
+        }
+
         // Mark the joined thread as being joined so that we detect if other
         // threads try to join it.
         self.threads[joined_thread_id].join_status = ThreadJoinStatus::Joined;
@@ -610,18 +624,13 @@ impl<'tcx> ThreadManager<'tcx> {
                     }
                     |this, unblock: UnblockKind| {
                         assert_eq!(unblock, UnblockKind::Ready);
-                        if let Some(data_race) = &mut this.machine.data_race {
-                            data_race.thread_joined(&this.machine.threads, joined_thread_id);
-                        }
-                        interp_ok(())
+                        after_join(&mut this.machine.threads, joined_thread_id, &mut this.machine.data_race)
                     }
                 ),
             );
         } else {
             // The thread has already terminated - establish happens-before
-            if let Some(data_race) = data_race {
-                data_race.thread_joined(self, joined_thread_id);
-            }
+            after_join(self, joined_thread_id, data_race_handler)?;
         }
         interp_ok(())
     }
@@ -631,7 +640,7 @@ impl<'tcx> ThreadManager<'tcx> {
     fn join_thread_exclusive(
         &mut self,
         joined_thread_id: ThreadId,
-        data_race: Option<&mut data_race::GlobalState>,
+        data_race_handler: &mut GlobalDataRaceHandler,
     ) -> InterpResult<'tcx> {
         if self.threads[joined_thread_id].join_status == ThreadJoinStatus::Joined {
             throw_ub_format!("trying to join an already joined thread");
@@ -649,7 +658,7 @@ impl<'tcx> ThreadManager<'tcx> {
             "this thread already has threads waiting for its termination"
         );
 
-        self.join_thread(joined_thread_id, data_race)
+        self.join_thread(joined_thread_id, data_race_handler)
     }
 
     /// Set the name of the given thread.
@@ -699,77 +708,6 @@ impl<'tcx> ThreadManager<'tcx> {
             })
             .min()
     }
-
-    /// Decide which action to take next and on which thread.
-    ///
-    /// The currently implemented scheduling policy is the one that is commonly
-    /// used in stateless model checkers such as Loom: run the active thread as
-    /// long as we can and switch only when we have to (the active thread was
-    /// blocked, terminated, or has explicitly asked to be preempted).
-    fn schedule(
-        &mut self,
-        clock: &MonotonicClock,
-        rng: &mut StdRng,
-    ) -> InterpResult<'tcx, SchedulingAction> {
-        // This thread and the program can keep going.
-        if self.threads[self.active_thread].state.is_enabled() && !self.yield_active_thread {
-            // The currently active thread is still enabled, just continue with it.
-            return interp_ok(SchedulingAction::ExecuteStep);
-        }
-        // The active thread yielded or got terminated. Let's see if there are any timeouts to take
-        // care of. We do this *before* running any other thread, to ensure that timeouts "in the
-        // past" fire before any other thread can take an action. This ensures that for
-        // `pthread_cond_timedwait`, "an error is returned if [...] the absolute time specified by
-        // abstime has already been passed at the time of the call".
-        // <https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_cond_timedwait.html>
-        let potential_sleep_time = self.next_callback_wait_time(clock);
-        if potential_sleep_time == Some(Duration::ZERO) {
-            return interp_ok(SchedulingAction::ExecuteTimeoutCallback);
-        }
-        // No callbacks immediately scheduled, pick a regular thread to execute.
-        // The active thread blocked or yielded. So we go search for another enabled thread.
-        // We build the list of threads by starting with the threads after the current one, followed by
-        // the threads before the current one and then the current thread itself (i.e., this iterator acts
-        // like `threads.rotate_left(self.active_thread.index() + 1)`. This ensures that if we pick the first
-        // eligible thread, we do regular round-robin scheduling, and all threads get a chance to take a step.
-        let mut threads_iter = self
-            .threads
-            .iter_enumerated()
-            .skip(self.active_thread.index() + 1)
-            .chain(self.threads.iter_enumerated().take(self.active_thread.index() + 1))
-            .filter(|(_id, thread)| thread.state.is_enabled());
-        // Pick a new thread, and switch to it.
-        let new_thread =
-            if self.fixed_scheduling { threads_iter.next() } else { threads_iter.choose(rng) };
-
-        if let Some((id, _thread)) = new_thread {
-            if self.active_thread != id {
-                info!(
-                    "---------- Now executing on thread `{}` (previous: `{}`) ----------------------------------------",
-                    self.get_thread_display_name(id),
-                    self.get_thread_display_name(self.active_thread)
-                );
-                self.active_thread = id;
-            }
-        }
-        // This completes the `yield`, if any was requested.
-        self.yield_active_thread = false;
-
-        if self.threads[self.active_thread].state.is_enabled() {
-            return interp_ok(SchedulingAction::ExecuteStep);
-        }
-        // We have not found a thread to execute.
-        if self.threads.iter().all(|thread| thread.state.is_terminated()) {
-            unreachable!("all threads terminated without the main thread terminating?!");
-        } else if let Some(sleep_time) = potential_sleep_time {
-            // All threads are currently blocked, but we have unexecuted
-            // timeout_callbacks, which may unblock some of the threads. Hence,
-            // sleep until the first callback.
-            interp_ok(SchedulingAction::Sleep(sleep_time))
-        } else {
-            throw_machine_stop!(TerminationInfo::Deadlock);
-        }
-    }
 }
 
 impl<'tcx> EvalContextPrivExt<'tcx> for MiriInterpCx<'tcx> {}
@@ -817,6 +755,11 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
     #[inline]
     fn run_on_stack_empty(&mut self) -> InterpResult<'tcx, Poll<()>> {
         let this = self.eval_context_mut();
+        // Inform GenMC that a thread has finished all user code. GenMC needs to know this for scheduling.
+        if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
+            let thread_id = this.active_thread();
+            genmc_ctx.handle_thread_stack_empty(thread_id);
+        }
         let mut callback = this
             .active_thread_mut()
             .on_stack_empty
@@ -826,6 +769,102 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
         this.active_thread_mut().on_stack_empty = Some(callback);
         interp_ok(res)
     }
+
+    /// Decide which action to take next and on which thread.
+    ///
+    /// The currently implemented scheduling policy is the one that is commonly
+    /// used in stateless model checkers such as Loom: run the active thread as
+    /// long as we can and switch only when we have to (the active thread was
+    /// blocked, terminated, or has explicitly asked to be preempted).
+    ///
+    /// If GenMC mode is active, the scheduling is instead handled by GenMC.
+    fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
+        let this = self.eval_context_mut();
+        // In GenMC mode, we let GenMC do the scheduling
+        if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
+            let next_thread_id = genmc_ctx.schedule_thread(this)?;
+
+            let thread_manager = &mut this.machine.threads;
+            thread_manager.active_thread = next_thread_id;
+            thread_manager.yield_active_thread = false;
+
+            assert!(thread_manager.threads[thread_manager.active_thread].state.is_enabled());
+            return interp_ok(SchedulingAction::ExecuteStep);
+        }
+
+        // We are not in GenMC mode, so we control the schedule
+        let thread_manager = &mut this.machine.threads;
+        let clock = &this.machine.monotonic_clock;
+        let rng = this.machine.rng.get_mut();
+        // This thread and the program can keep going.
+        if thread_manager.threads[thread_manager.active_thread].state.is_enabled()
+            && !thread_manager.yield_active_thread
+        {
+            // The currently active thread is still enabled, just continue with it.
+            return interp_ok(SchedulingAction::ExecuteStep);
+        }
+        // The active thread yielded or got terminated. Let's see if there are any timeouts to take
+        // care of. We do this *before* running any other thread, to ensure that timeouts "in the
+        // past" fire before any other thread can take an action. This ensures that for
+        // `pthread_cond_timedwait`, "an error is returned if [...] the absolute time specified by
+        // abstime has already been passed at the time of the call".
+        // <https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_cond_timedwait.html>
+        let potential_sleep_time = thread_manager.next_callback_wait_time(clock);
+        if potential_sleep_time == Some(Duration::ZERO) {
+            return interp_ok(SchedulingAction::ExecuteTimeoutCallback);
+        }
+        // No callbacks immediately scheduled, pick a regular thread to execute.
+        // The active thread blocked or yielded. So we go search for another enabled thread.
+        // We build the list of threads by starting with the threads after the current one, followed by
+        // the threads before the current one and then the current thread itself (i.e., this iterator acts
+        // like `threads.rotate_left(self.active_thread.index() + 1)`. This ensures that if we pick the first
+        // eligible thread, we do regular round-robin scheduling, and all threads get a chance to take a step.
+        let mut threads_iter = thread_manager
+            .threads
+            .iter_enumerated()
+            .skip(thread_manager.active_thread.index() + 1)
+            .chain(
+                thread_manager
+                    .threads
+                    .iter_enumerated()
+                    .take(thread_manager.active_thread.index() + 1),
+            )
+            .filter(|(_id, thread)| thread.state.is_enabled());
+        // Pick a new thread, and switch to it.
+        let new_thread = if thread_manager.fixed_scheduling {
+            threads_iter.next()
+        } else {
+            threads_iter.choose(rng)
+        };
+
+        if let Some((id, _thread)) = new_thread {
+            if thread_manager.active_thread != id {
+                info!(
+                    "---------- Now executing on thread `{}` (previous: `{}`) ----------------------------------------",
+                    thread_manager.get_thread_display_name(id),
+                    thread_manager.get_thread_display_name(thread_manager.active_thread)
+                );
+                thread_manager.active_thread = id;
+            }
+        }
+        // This completes the `yield`, if any was requested.
+        thread_manager.yield_active_thread = false;
+
+        if thread_manager.threads[thread_manager.active_thread].state.is_enabled() {
+            return interp_ok(SchedulingAction::ExecuteStep);
+        }
+        // We have not found a thread to execute.
+        if thread_manager.threads.iter().all(|thread| thread.state.is_terminated()) {
+            unreachable!("all threads terminated without the main thread terminating?!");
+        } else if let Some(sleep_time) = potential_sleep_time {
+            // All threads are currently blocked, but we have unexecuted
+            // timeout_callbacks, which may unblock some of the threads. Hence,
+            // sleep until the first callback.
+            interp_ok(SchedulingAction::Sleep(sleep_time))
+        } else {
+            throw_machine_stop!(TerminationInfo::Deadlock);
+        }
+    }
 }
 
 // Public interface to thread management.
@@ -891,10 +930,13 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
             Box::new(move |m| state.on_stack_empty(m))
         });
         let current_span = this.machine.current_span();
-        if let Some(data_race) = &mut this.machine.data_race {
-            data_race.thread_created(&this.machine.threads, new_thread_id, current_span);
+        match &mut this.machine.data_race {
+            GlobalDataRaceHandler::None => {}
+            GlobalDataRaceHandler::Vclocks(data_race) =>
+                data_race.thread_created(&this.machine.threads, new_thread_id, current_span),
+            GlobalDataRaceHandler::Genmc(genmc_ctx) =>
+                genmc_ctx.handle_thread_create(&this.machine.threads, new_thread_id)?,
         }
-
         // Write the current thread-id, switch to the next thread later
         // to treat this write operation as occurring on the current thread.
         if let Some(thread_info_place) = thread {
@@ -941,12 +983,17 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
     /// This is called by the eval loop when a thread's on_stack_empty returns `Ready`.
     fn terminate_active_thread(&mut self, tls_alloc_action: TlsAllocAction) -> InterpResult<'tcx> {
         let this = self.eval_context_mut();
+
         // Mark thread as terminated.
         let thread = this.active_thread_mut();
         assert!(thread.stack.is_empty(), "only threads with an empty stack can be terminated");
         thread.state = ThreadState::Terminated;
-        if let Some(ref mut data_race) = this.machine.data_race {
-            data_race.thread_terminated(&this.machine.threads);
+        match &mut this.machine.data_race {
+            GlobalDataRaceHandler::None => {}
+            GlobalDataRaceHandler::Vclocks(data_race) =>
+                data_race.thread_terminated(&this.machine.threads),
+            GlobalDataRaceHandler::Genmc(genmc_ctx) =>
+                genmc_ctx.handle_thread_finish(&this.machine.threads)?,
         }
         // Deallocate TLS.
         let gone_thread = this.active_thread();
@@ -1062,7 +1109,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
     #[inline]
     fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx> {
         let this = self.eval_context_mut();
-        this.machine.threads.join_thread(joined_thread_id, this.machine.data_race.as_mut())?;
+        this.machine.threads.join_thread(joined_thread_id, &mut this.machine.data_race)?;
         interp_ok(())
     }
 
@@ -1071,7 +1118,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
         let this = self.eval_context_mut();
         this.machine
             .threads
-            .join_thread_exclusive(joined_thread_id, this.machine.data_race.as_mut())?;
+            .join_thread_exclusive(joined_thread_id, &mut this.machine.data_race)?;
         interp_ok(())
     }
 
@@ -1165,8 +1212,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
                 this.machine.handle_abnormal_termination();
                 throw_machine_stop!(TerminationInfo::Interrupted);
             }
-            let rng = this.machine.rng.get_mut();
-            match this.machine.threads.schedule(&this.machine.monotonic_clock, rng)? {
+            match this.schedule()? {
                 SchedulingAction::ExecuteStep => {
                     if !this.step()? {
                         // See if this thread can do something else.
diff --git a/src/tools/miri/src/concurrency/vector_clock.rs b/src/tools/miri/src/concurrency/vector_clock.rs
index 34572663429..78858fcedae 100644
--- a/src/tools/miri/src/concurrency/vector_clock.rs
+++ b/src/tools/miri/src/concurrency/vector_clock.rs
@@ -40,8 +40,8 @@ impl From<u32> for VectorIdx {
     }
 }
 
-/// The size of the vector-clock to store inline
-/// clock vectors larger than this will be stored on the heap
+/// The size of the vector clock to store inline.
+/// Clock vectors larger than this will be stored on the heap.
 const SMALL_VECTOR: usize = 4;
 
 /// The time-stamps recorded in the data-race detector consist of both
@@ -136,7 +136,7 @@ impl Ord for VTimestamp {
 pub struct VClock(SmallVec<[VTimestamp; SMALL_VECTOR]>);
 
 impl VClock {
-    /// Create a new vector-clock containing all zeros except
+    /// Create a new vector clock containing all zeros except
     /// for a value at the given index
     pub(super) fn new_with_index(index: VectorIdx, timestamp: VTimestamp) -> VClock {
         if timestamp.time() == 0 {
@@ -185,8 +185,8 @@ impl VClock {
         }
     }
 
-    // Join the two vector-clocks together, this
-    // sets each vector-element to the maximum value
+    // Join the two vector clocks together, this
+    // sets each vector element to the maximum value
     // of that element in either of the two source elements.
     pub fn join(&mut self, other: &Self) {
         let rhs_slice = other.as_slice();
diff --git a/src/tools/miri/src/concurrency/weak_memory.rs b/src/tools/miri/src/concurrency/weak_memory.rs
index 1a3e9614f8a..95c010be2fd 100644
--- a/src/tools/miri/src/concurrency/weak_memory.rs
+++ b/src/tools/miri/src/concurrency/weak_memory.rs
@@ -77,7 +77,7 @@
 // (https://github.com/ChrisLidbury/tsan11/blob/ecbd6b81e9b9454e01cba78eb9d88684168132c7/lib/tsan/rtl/tsan_relaxed.cc#L160-L167)
 // and here.
 //
-// 4. W_SC ; R_SC case requires the SC load to ignore all but last store maked SC (stores not marked SC are not
+// 4. W_SC ; R_SC case requires the SC load to ignore all but last store marked SC (stores not marked SC are not
 // affected). But this rule is applied to all loads in ReadsFromSet from the paper (last two lines of code), not just SC load.
 // This is implemented correctly in tsan11
 // (https://github.com/ChrisLidbury/tsan11/blob/ecbd6b81e9b9454e01cba78eb9d88684168132c7/lib/tsan/rtl/tsan_relaxed.cc#L295)
@@ -88,9 +88,11 @@ use std::collections::VecDeque;
 
 use rustc_data_structures::fx::FxHashMap;
 
+use super::AllocDataRaceHandler;
 use super::data_race::{GlobalState as DataRaceState, ThreadClockSet};
 use super::range_object_map::{AccessType, RangeObjectMap};
 use super::vector_clock::{VClock, VTimestamp, VectorIdx};
+use crate::concurrency::GlobalDataRaceHandler;
 use crate::*;
 
 pub type AllocState = StoreBufferAlloc;
@@ -459,8 +461,13 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
         let this = self.eval_context_mut();
         let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(place.ptr(), 0)?;
         if let (
-            crate::AllocExtra { weak_memory: Some(alloc_buffers), .. },
-            crate::MiriMachine { data_race: Some(global), threads, .. },
+            crate::AllocExtra {
+                data_race: AllocDataRaceHandler::Vclocks(_, Some(alloc_buffers)),
+                ..
+            },
+            crate::MiriMachine {
+                data_race: GlobalDataRaceHandler::Vclocks(global), threads, ..
+            },
         ) = this.get_alloc_extra_mut(alloc_id)?
         {
             if atomic == AtomicRwOrd::SeqCst {
@@ -484,9 +491,11 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
     ) -> InterpResult<'tcx, Option<Scalar>> {
         let this = self.eval_context_ref();
         'fallback: {
-            if let Some(global) = &this.machine.data_race {
+            if let Some(global) = this.machine.data_race.as_vclocks_ref() {
                 let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(place.ptr(), 0)?;
-                if let Some(alloc_buffers) = this.get_alloc_extra(alloc_id)?.weak_memory.as_ref() {
+                if let Some(alloc_buffers) =
+                    this.get_alloc_extra(alloc_id)?.data_race.as_weak_memory_ref()
+                {
                     if atomic == AtomicReadOrd::SeqCst {
                         global.sc_read(&this.machine.threads);
                     }
@@ -534,8 +543,13 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
         let this = self.eval_context_mut();
         let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(dest.ptr(), 0)?;
         if let (
-            crate::AllocExtra { weak_memory: Some(alloc_buffers), .. },
-            crate::MiriMachine { data_race: Some(global), threads, .. },
+            crate::AllocExtra {
+                data_race: AllocDataRaceHandler::Vclocks(_, Some(alloc_buffers)),
+                ..
+            },
+            crate::MiriMachine {
+                data_race: GlobalDataRaceHandler::Vclocks(global), threads, ..
+            },
         ) = this.get_alloc_extra_mut(alloc_id)?
         {
             if atomic == AtomicWriteOrd::SeqCst {
@@ -561,13 +575,15 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
     ) -> InterpResult<'tcx> {
         let this = self.eval_context_ref();
 
-        if let Some(global) = &this.machine.data_race {
+        if let Some(global) = this.machine.data_race.as_vclocks_ref() {
             if atomic == AtomicReadOrd::SeqCst {
                 global.sc_read(&this.machine.threads);
             }
             let size = place.layout.size;
             let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(place.ptr(), 0)?;
-            if let Some(alloc_buffers) = this.get_alloc_extra(alloc_id)?.weak_memory.as_ref() {
+            if let Some(alloc_buffers) =
+                this.get_alloc_extra(alloc_id)?.data_race.as_weak_memory_ref()
+            {
                 let Some(buffer) =
                     alloc_buffers.get_store_buffer(alloc_range(base_offset, size))?
                 else {
diff --git a/src/tools/miri/src/diagnostics.rs b/src/tools/miri/src/diagnostics.rs
index 014b1299f2d..89768077d87 100644
--- a/src/tools/miri/src/diagnostics.rs
+++ b/src/tools/miri/src/diagnostics.rs
@@ -31,6 +31,8 @@ pub enum TerminationInfo {
     },
     Int2PtrWithStrictProvenance,
     Deadlock,
+    /// In GenMC mode, an execution can get stuck in certain cases. This is not an error.
+    GenmcStuckExecution,
     MultipleSymbolDefinitions {
         link_name: Symbol,
         first: SpanData,
@@ -75,6 +77,7 @@ impl fmt::Display for TerminationInfo {
             StackedBorrowsUb { msg, .. } => write!(f, "{msg}"),
             TreeBorrowsUb { title, .. } => write!(f, "{title}"),
             Deadlock => write!(f, "the evaluated program deadlocked"),
+            GenmcStuckExecution => write!(f, "GenMC determined that the execution got stuck"),
             MultipleSymbolDefinitions { link_name, .. } =>
                 write!(f, "multiple definitions of symbol `{link_name}`"),
             SymbolShimClashing { link_name, .. } =>
@@ -235,6 +238,12 @@ pub fn report_error<'tcx>(
             StackedBorrowsUb { .. } | TreeBorrowsUb { .. } | DataRace { .. } =>
                 Some("Undefined Behavior"),
             Deadlock => Some("deadlock"),
+            GenmcStuckExecution => {
+                // This case should only happen in GenMC mode. We treat it like a normal program exit.
+                assert!(ecx.machine.data_race.as_genmc_ref().is_some());
+                tracing::info!("GenMC: found stuck execution");
+                return Some((0, true));
+            }
             MultipleSymbolDefinitions { .. } | SymbolShimClashing { .. } => None,
         };
         #[rustfmt::skip]
diff --git a/src/tools/miri/src/eval.rs b/src/tools/miri/src/eval.rs
index ef8d9dab15a..f41062dae45 100644
--- a/src/tools/miri/src/eval.rs
+++ b/src/tools/miri/src/eval.rs
@@ -3,6 +3,7 @@
 use std::ffi::{OsStr, OsString};
 use std::panic::{self, AssertUnwindSafe};
 use std::path::PathBuf;
+use std::rc::Rc;
 use std::task::Poll;
 use std::{iter, thread};
 
@@ -14,6 +15,7 @@ use rustc_middle::ty::layout::{LayoutCx, LayoutOf};
 use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_session::config::EntryFnType;
 
+use crate::concurrency::GenmcCtx;
 use crate::concurrency::thread::TlsAllocAction;
 use crate::diagnostics::report_leaks;
 use crate::shims::tls;
@@ -117,16 +119,18 @@ pub struct MiriConfig {
     pub args: Vec<String>,
     /// The seed to use when non-determinism or randomness are required (e.g. ptr-to-int cast, `getrandom()`).
     pub seed: Option<u64>,
-    /// The stacked borrows pointer ids to report about
+    /// The stacked borrows pointer ids to report about.
     pub tracked_pointer_tags: FxHashSet<BorTag>,
     /// The allocation ids to report about.
     pub tracked_alloc_ids: FxHashSet<AllocId>,
     /// For the tracked alloc ids, also report read/write accesses.
     pub track_alloc_accesses: bool,
-    /// Determine if data race detection should be enabled
+    /// Determine if data race detection should be enabled.
     pub data_race_detector: bool,
-    /// Determine if weak memory emulation should be enabled. Requires data race detection to be enabled
+    /// Determine if weak memory emulation should be enabled. Requires data race detection to be enabled.
     pub weak_memory_emulation: bool,
+    /// Determine if we are running in GenMC mode. In this mode, Miri will explore multiple concurrent executions of the given program.
+    pub genmc_mode: bool,
     /// Track when an outdated (weak memory) load happens.
     pub track_outdated_loads: bool,
     /// Rate of spurious failures for compare_exchange_weak atomic operations,
@@ -137,7 +141,7 @@ pub struct MiriConfig {
     pub measureme_out: Option<String>,
     /// Which style to use for printing backtraces.
     pub backtrace_style: BacktraceStyle,
-    /// Which provenance to use for int2ptr casts
+    /// Which provenance to use for int2ptr casts.
     pub provenance_mode: ProvenanceMode,
     /// Whether to ignore any output by the program. This is helpful when debugging miri
     /// as its messages don't get intermingled with the program messages.
@@ -155,7 +159,7 @@ pub struct MiriConfig {
     pub gc_interval: u32,
     /// The number of CPUs to be reported by miri.
     pub num_cpus: u32,
-    /// Requires Miri to emulate pages of a certain size
+    /// Requires Miri to emulate pages of a certain size.
     pub page_size: Option<u64>,
     /// Whether to collect a backtrace when each allocation is created, just in case it leaks.
     pub collect_leak_backtraces: bool,
@@ -186,6 +190,7 @@ impl Default for MiriConfig {
             track_alloc_accesses: false,
             data_race_detector: true,
             weak_memory_emulation: true,
+            genmc_mode: false,
             track_outdated_loads: false,
             cmpxchg_weak_failure_rate: 0.8, // 80%
             measureme_out: None,
@@ -233,16 +238,22 @@ impl<'tcx> MainThreadState<'tcx> {
                 match state.on_stack_empty(this)? {
                     Poll::Pending => {} // just keep going
                     Poll::Ready(()) => {
-                        // Give background threads a chance to finish by yielding the main thread a
-                        // couple of times -- but only if we would also preempt threads randomly.
-                        if this.machine.preemption_rate > 0.0 {
-                            // There is a non-zero chance they will yield back to us often enough to
-                            // make Miri terminate eventually.
-                            *self = Yield { remaining: MAIN_THREAD_YIELDS_AT_SHUTDOWN };
-                        } else {
-                            // The other threads did not get preempted, so no need to yield back to
-                            // them.
+                        if this.machine.data_race.as_genmc_ref().is_some() {
+                            // In GenMC mode, we don't yield at the end of the main thread.
+                            // Instead, the `GenmcCtx` will ensure that unfinished threads get a chance to run at this point.
                             *self = Done;
+                        } else {
+                            // Give background threads a chance to finish by yielding the main thread a
+                            // couple of times -- but only if we would also preempt threads randomly.
+                            if this.machine.preemption_rate > 0.0 {
+                                // There is a non-zero chance they will yield back to us often enough to
+                                // make Miri terminate eventually.
+                                *self = Yield { remaining: MAIN_THREAD_YIELDS_AT_SHUTDOWN };
+                            } else {
+                                // The other threads did not get preempted, so no need to yield back to
+                                // them.
+                                *self = Done;
+                            }
                         }
                     }
                 },
@@ -294,11 +305,16 @@ pub fn create_ecx<'tcx>(
     entry_id: DefId,
     entry_type: MiriEntryFnType,
     config: &MiriConfig,
+    genmc_ctx: Option<Rc<GenmcCtx>>,
 ) -> InterpResult<'tcx, InterpCx<'tcx, MiriMachine<'tcx>>> {
     let typing_env = ty::TypingEnv::fully_monomorphized();
     let layout_cx = LayoutCx::new(tcx, typing_env);
-    let mut ecx =
-        InterpCx::new(tcx, rustc_span::DUMMY_SP, typing_env, MiriMachine::new(config, layout_cx));
+    let mut ecx = InterpCx::new(
+        tcx,
+        rustc_span::DUMMY_SP,
+        typing_env,
+        MiriMachine::new(config, layout_cx, genmc_ctx),
+    );
 
     // Some parts of initialization require a full `InterpCx`.
     MiriMachine::late_init(&mut ecx, config, {
@@ -452,12 +468,17 @@ pub fn eval_entry<'tcx>(
     tcx: TyCtxt<'tcx>,
     entry_id: DefId,
     entry_type: MiriEntryFnType,
-    config: MiriConfig,
+    config: &MiriConfig,
+    genmc_ctx: Option<Rc<GenmcCtx>>,
 ) -> Option<i32> {
     // Copy setting before we move `config`.
     let ignore_leaks = config.ignore_leaks;
 
-    let mut ecx = match create_ecx(tcx, entry_id, entry_type, &config).report_err() {
+    if let Some(genmc_ctx) = &genmc_ctx {
+        genmc_ctx.handle_execution_start();
+    }
+
+    let mut ecx = match create_ecx(tcx, entry_id, entry_type, config, genmc_ctx).report_err() {
         Ok(v) => v,
         Err(err) => {
             let (kind, backtrace) = err.into_parts();
@@ -476,9 +497,19 @@ pub fn eval_entry<'tcx>(
     // `Ok` can never happen; the interpreter loop always exits with an "error"
     // (but that "error" might be just "regular program termination").
     let Err(err) = res.report_err();
+
     // Show diagnostic, if any.
     let (return_code, leak_check) = report_error(&ecx, err)?;
 
+    // We inform GenMC that the execution is complete.
+    if let Some(genmc_ctx) = ecx.machine.data_race.as_genmc_ref()
+        && let Err(error) = genmc_ctx.handle_execution_end(&ecx)
+    {
+        // FIXME(GenMC): Improve error reporting.
+        tcx.dcx().err(format!("GenMC returned an error: \"{error}\""));
+        return None;
+    }
+
     // If we get here there was no fatal error.
 
     // Possibly check for memory leaks.
diff --git a/src/tools/miri/src/intrinsics/atomic.rs b/src/tools/miri/src/intrinsics/atomic.rs
index dcafa7b6cab..2eb8086f578 100644
--- a/src/tools/miri/src/intrinsics/atomic.rs
+++ b/src/tools/miri/src/intrinsics/atomic.rs
@@ -149,7 +149,7 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
 
         // Perform regular load.
         let val = this.read_scalar(val)?;
-        // Perform atomic store
+        // Perform atomic store.
         this.write_scalar_atomic(val, &place, atomic)?;
         interp_ok(())
     }
@@ -161,7 +161,7 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
     ) -> InterpResult<'tcx> {
         let [] = check_intrinsic_arg_count(args)?;
         let _ = atomic;
-        //FIXME: compiler fences are currently ignored
+        // FIXME, FIXME(GenMC): compiler fences are currently ignored (also ignored in GenMC mode)
         interp_ok(())
     }
 
@@ -199,23 +199,16 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
             span_bug!(this.cur_span(), "atomic arithmetic operation type mismatch");
         }
 
-        match atomic_op {
-            AtomicOp::Min => {
-                let old = this.atomic_min_max_scalar(&place, rhs, true, atomic)?;
-                this.write_immediate(*old, dest)?; // old value is returned
-                interp_ok(())
-            }
-            AtomicOp::Max => {
-                let old = this.atomic_min_max_scalar(&place, rhs, false, atomic)?;
-                this.write_immediate(*old, dest)?; // old value is returned
-                interp_ok(())
-            }
-            AtomicOp::MirOp(op, not) => {
-                let old = this.atomic_rmw_op_immediate(&place, &rhs, op, not, atomic)?;
-                this.write_immediate(*old, dest)?; // old value is returned
-                interp_ok(())
-            }
-        }
+        let old = match atomic_op {
+            AtomicOp::Min =>
+                this.atomic_min_max_scalar(&place, rhs, /* min */ true, atomic)?,
+            AtomicOp::Max =>
+                this.atomic_min_max_scalar(&place, rhs, /* min */ false, atomic)?,
+            AtomicOp::MirOp(op, not) =>
+                this.atomic_rmw_op_immediate(&place, &rhs, op, not, atomic)?,
+        };
+        this.write_immediate(*old, dest)?; // old value is returned
+        interp_ok(())
     }
 
     fn atomic_exchange(
diff --git a/src/tools/miri/src/lib.rs b/src/tools/miri/src/lib.rs
index e03611e9b50..58b93ae82a1 100644
--- a/src/tools/miri/src/lib.rs
+++ b/src/tools/miri/src/lib.rs
@@ -133,6 +133,7 @@ pub use crate::concurrency::thread::{
     BlockReason, DynUnblockCallback, EvalContextExt as _, StackEmptyCallback, ThreadId,
     ThreadManager, TimeoutAnchor, TimeoutClock, UnblockKind,
 };
+pub use crate::concurrency::{GenmcConfig, GenmcCtx};
 pub use crate::diagnostics::{
     EvalContextExt as _, NonHaltingDiagnostic, TerminationInfo, report_error,
 };
diff --git a/src/tools/miri/src/machine.rs b/src/tools/miri/src/machine.rs
index 201b950e29c..6060d41dac5 100644
--- a/src/tools/miri/src/machine.rs
+++ b/src/tools/miri/src/machine.rs
@@ -6,6 +6,7 @@ use std::borrow::Cow;
 use std::cell::{Cell, RefCell};
 use std::collections::hash_map::Entry;
 use std::path::Path;
+use std::rc::Rc;
 use std::{fmt, process};
 
 use rand::rngs::StdRng;
@@ -27,9 +28,10 @@ use rustc_span::def_id::{CrateNum, DefId};
 use rustc_span::{Span, SpanData, Symbol};
 use rustc_target::callconv::FnAbi;
 
+use crate::alloc_addresses::EvalContextExt;
 use crate::concurrency::cpu_affinity::{self, CpuAffinityMask};
 use crate::concurrency::data_race::{self, NaReadType, NaWriteType};
-use crate::concurrency::weak_memory;
+use crate::concurrency::{AllocDataRaceHandler, GenmcCtx, GlobalDataRaceHandler, weak_memory};
 use crate::*;
 
 /// First real-time signal.
@@ -332,12 +334,10 @@ impl ProvenanceExtra {
 pub struct AllocExtra<'tcx> {
     /// Global state of the borrow tracker, if enabled.
     pub borrow_tracker: Option<borrow_tracker::AllocState>,
-    /// Data race detection via the use of a vector-clock.
-    /// This is only added if it is enabled.
-    pub data_race: Option<data_race::AllocState>,
-    /// Weak memory emulation via the use of store buffers.
-    /// This is only added if it is enabled.
-    pub weak_memory: Option<weak_memory::AllocState>,
+    /// Extra state for data race detection.
+    ///
+    /// Invariant: The enum variant must match the enum variant in the `data_race` field on `MiriMachine`
+    pub data_race: AllocDataRaceHandler,
     /// A backtrace to where this allocation was allocated.
     /// As this is recorded for leak reports, it only exists
     /// if this allocation is leakable. The backtrace is not
@@ -360,11 +360,10 @@ impl<'tcx> Clone for AllocExtra<'tcx> {
 
 impl VisitProvenance for AllocExtra<'_> {
     fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
-        let AllocExtra { borrow_tracker, data_race, weak_memory, backtrace: _, sync: _ } = self;
+        let AllocExtra { borrow_tracker, data_race, backtrace: _, sync: _ } = self;
 
         borrow_tracker.visit_provenance(visit);
         data_race.visit_provenance(visit);
-        weak_memory.visit_provenance(visit);
     }
 }
 
@@ -447,8 +446,12 @@ pub struct MiriMachine<'tcx> {
     /// Global data for borrow tracking.
     pub borrow_tracker: Option<borrow_tracker::GlobalState>,
 
-    /// Data race detector global data.
-    pub data_race: Option<data_race::GlobalState>,
+    /// Depending on settings, this will be `None`,
+    /// global data for a data race detector,
+    /// or the context required for running in GenMC mode.
+    ///
+    /// Invariant: The enum variant must match the enum variant of `AllocDataRaceHandler` in the `data_race` field of all `AllocExtra`.
+    pub data_race: GlobalDataRaceHandler,
 
     /// Ptr-int-cast module global data.
     pub alloc_addresses: alloc_addresses::GlobalState,
@@ -544,9 +547,6 @@ pub struct MiriMachine<'tcx> {
     /// Corresponds to -Zmiri-mute-stdout-stderr and doesn't write the output but acts as if it succeeded.
     pub(crate) mute_stdout_stderr: bool,
 
-    /// Whether weak memory emulation is enabled
-    pub(crate) weak_memory: bool,
-
     /// The probability of the active thread being preempted at the end of each basic block.
     pub(crate) preemption_rate: f64,
 
@@ -617,7 +617,11 @@ pub struct MiriMachine<'tcx> {
 }
 
 impl<'tcx> MiriMachine<'tcx> {
-    pub(crate) fn new(config: &MiriConfig, layout_cx: LayoutCx<'tcx>) -> Self {
+    pub(crate) fn new(
+        config: &MiriConfig,
+        layout_cx: LayoutCx<'tcx>,
+        genmc_ctx: Option<Rc<GenmcCtx>>,
+    ) -> Self {
         let tcx = layout_cx.tcx();
         let local_crates = helpers::get_local_crates(tcx);
         let layouts =
@@ -636,7 +640,14 @@ impl<'tcx> MiriMachine<'tcx> {
         });
         let rng = StdRng::seed_from_u64(config.seed.unwrap_or(0));
         let borrow_tracker = config.borrow_tracker.map(|bt| bt.instantiate_global_state(config));
-        let data_race = config.data_race_detector.then(|| data_race::GlobalState::new(config));
+        let data_race = if config.genmc_mode {
+            // `genmc_ctx` persists across executions, so we don't create a new one here.
+            GlobalDataRaceHandler::Genmc(genmc_ctx.unwrap())
+        } else if config.data_race_detector {
+            GlobalDataRaceHandler::Vclocks(Box::new(data_race::GlobalState::new(config)))
+        } else {
+            GlobalDataRaceHandler::None
+        };
         // Determine page size, stack address, and stack size.
         // These values are mostly meaningless, but the stack address is also where we start
         // allocating physical integer addresses for all allocations.
@@ -709,7 +720,6 @@ impl<'tcx> MiriMachine<'tcx> {
             check_alignment: config.check_alignment,
             cmpxchg_weak_failure_rate: config.cmpxchg_weak_failure_rate,
             mute_stdout_stderr: config.mute_stdout_stderr,
-            weak_memory: config.weak_memory_emulation,
             preemption_rate: config.preemption_rate,
             report_progress: config.report_progress,
             basic_block_count: 0,
@@ -835,16 +845,25 @@ impl<'tcx> MiriMachine<'tcx> {
             .as_ref()
             .map(|bt| bt.borrow_mut().new_allocation(id, size, kind, &ecx.machine));
 
-        let data_race = ecx.machine.data_race.as_ref().map(|data_race| {
-            data_race::AllocState::new_allocation(
-                data_race,
-                &ecx.machine.threads,
-                size,
-                kind,
-                ecx.machine.current_span(),
-            )
-        });
-        let weak_memory = ecx.machine.weak_memory.then(weak_memory::AllocState::new_allocation);
+        let data_race = match &ecx.machine.data_race {
+            GlobalDataRaceHandler::None => AllocDataRaceHandler::None,
+            GlobalDataRaceHandler::Vclocks(data_race) =>
+                AllocDataRaceHandler::Vclocks(
+                    data_race::AllocState::new_allocation(
+                        data_race,
+                        &ecx.machine.threads,
+                        size,
+                        kind,
+                        ecx.machine.current_span(),
+                    ),
+                    data_race.weak_memory.then(weak_memory::AllocState::new_allocation),
+                ),
+            GlobalDataRaceHandler::Genmc(_genmc_ctx) => {
+                // GenMC learns about new allocations directly from the alloc_addresses module,
+                // since it has to be able to control the address at which they are placed.
+                AllocDataRaceHandler::Genmc
+            }
+        };
 
         // If an allocation is leaked, we want to report a backtrace to indicate where it was
         // allocated. We don't need to record a backtrace for allocations which are allowed to
@@ -862,13 +881,7 @@ impl<'tcx> MiriMachine<'tcx> {
                 .insert(id, (ecx.machine.current_span(), None));
         }
 
-        interp_ok(AllocExtra {
-            borrow_tracker,
-            data_race,
-            weak_memory,
-            backtrace,
-            sync: FxHashMap::default(),
-        })
+        interp_ok(AllocExtra { borrow_tracker, data_race, backtrace, sync: FxHashMap::default() })
     }
 }
 
@@ -909,7 +922,6 @@ impl VisitProvenance for MiriMachine<'_> {
             check_alignment: _,
             cmpxchg_weak_failure_rate: _,
             mute_stdout_stderr: _,
-            weak_memory: _,
             preemption_rate: _,
             report_progress: _,
             basic_block_count: _,
@@ -1378,7 +1390,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
         _tcx: TyCtxtAt<'tcx>,
         machine: &Self,
         alloc_extra: &AllocExtra<'tcx>,
-        _ptr: Pointer,
+        ptr: Pointer,
         (alloc_id, prov_extra): (AllocId, Self::ProvenanceExtra),
         range: AllocRange,
     ) -> InterpResult<'tcx> {
@@ -1386,15 +1398,25 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
             machine
                 .emit_diagnostic(NonHaltingDiagnostic::AccessedAlloc(alloc_id, AccessKind::Read));
         }
-        if let Some(data_race) = &alloc_extra.data_race {
-            data_race.read(alloc_id, range, NaReadType::Read, None, machine)?;
+        // The order of checks is deliberate, to prefer reporting a data race over a borrow tracker error.
+        match &machine.data_race {
+            GlobalDataRaceHandler::None => {}
+            GlobalDataRaceHandler::Genmc(genmc_ctx) =>
+                genmc_ctx.memory_load(machine, ptr.addr(), range.size)?,
+            GlobalDataRaceHandler::Vclocks(_data_race) => {
+                let AllocDataRaceHandler::Vclocks(data_race, weak_memory) = &alloc_extra.data_race
+                else {
+                    unreachable!();
+                };
+                data_race.read(alloc_id, range, NaReadType::Read, None, machine)?;
+                if let Some(weak_memory) = weak_memory {
+                    weak_memory.memory_accessed(range, machine.data_race.as_vclocks_ref().unwrap());
+                }
+            }
         }
         if let Some(borrow_tracker) = &alloc_extra.borrow_tracker {
             borrow_tracker.before_memory_read(alloc_id, prov_extra, range, machine)?;
         }
-        if let Some(weak_memory) = &alloc_extra.weak_memory {
-            weak_memory.memory_accessed(range, machine.data_race.as_ref().unwrap());
-        }
         interp_ok(())
     }
 
@@ -1403,7 +1425,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
         _tcx: TyCtxtAt<'tcx>,
         machine: &mut Self,
         alloc_extra: &mut AllocExtra<'tcx>,
-        _ptr: Pointer,
+        ptr: Pointer,
         (alloc_id, prov_extra): (AllocId, Self::ProvenanceExtra),
         range: AllocRange,
     ) -> InterpResult<'tcx> {
@@ -1411,15 +1433,26 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
             machine
                 .emit_diagnostic(NonHaltingDiagnostic::AccessedAlloc(alloc_id, AccessKind::Write));
         }
-        if let Some(data_race) = &mut alloc_extra.data_race {
-            data_race.write(alloc_id, range, NaWriteType::Write, None, machine)?;
+        match &machine.data_race {
+            GlobalDataRaceHandler::None => {}
+            GlobalDataRaceHandler::Genmc(genmc_ctx) => {
+                genmc_ctx.memory_store(machine, ptr.addr(), range.size)?;
+            }
+            GlobalDataRaceHandler::Vclocks(_global_state) => {
+                let AllocDataRaceHandler::Vclocks(data_race, weak_memory) =
+                    &mut alloc_extra.data_race
+                else {
+                    unreachable!()
+                };
+                data_race.write(alloc_id, range, NaWriteType::Write, None, machine)?;
+                if let Some(weak_memory) = weak_memory {
+                    weak_memory.memory_accessed(range, machine.data_race.as_vclocks_ref().unwrap());
+                }
+            }
         }
         if let Some(borrow_tracker) = &mut alloc_extra.borrow_tracker {
             borrow_tracker.before_memory_write(alloc_id, prov_extra, range, machine)?;
         }
-        if let Some(weak_memory) = &alloc_extra.weak_memory {
-            weak_memory.memory_accessed(range, machine.data_race.as_ref().unwrap());
-        }
         interp_ok(())
     }
 
@@ -1428,7 +1461,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
         _tcx: TyCtxtAt<'tcx>,
         machine: &mut Self,
         alloc_extra: &mut AllocExtra<'tcx>,
-        _ptr: Pointer,
+        ptr: Pointer,
         (alloc_id, prove_extra): (AllocId, Self::ProvenanceExtra),
         size: Size,
         align: Align,
@@ -1437,14 +1470,20 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
         if machine.tracked_alloc_ids.contains(&alloc_id) {
             machine.emit_diagnostic(NonHaltingDiagnostic::FreedAlloc(alloc_id));
         }
-        if let Some(data_race) = &mut alloc_extra.data_race {
-            data_race.write(
-                alloc_id,
-                alloc_range(Size::ZERO, size),
-                NaWriteType::Deallocate,
-                None,
-                machine,
-            )?;
+        match &machine.data_race {
+            GlobalDataRaceHandler::None => {}
+            GlobalDataRaceHandler::Genmc(genmc_ctx) =>
+                genmc_ctx.handle_dealloc(machine, ptr.addr(), size, align, kind)?,
+            GlobalDataRaceHandler::Vclocks(_global_state) => {
+                let data_race = alloc_extra.data_race.as_vclocks_mut().unwrap();
+                data_race.write(
+                    alloc_id,
+                    alloc_range(Size::ZERO, size),
+                    NaWriteType::Deallocate,
+                    None,
+                    machine,
+                )?;
+            }
         }
         if let Some(borrow_tracker) = &mut alloc_extra.borrow_tracker {
             borrow_tracker.before_memory_deallocation(alloc_id, prove_extra, size, machine)?;
@@ -1531,7 +1570,11 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
             timing,
             is_user_relevant: ecx.machine.is_user_relevant(&frame),
             salt: ecx.machine.rng.borrow_mut().random_range(0..ADDRS_PER_ANON_GLOBAL),
-            data_race: ecx.machine.data_race.as_ref().map(|_| data_race::FrameState::default()),
+            data_race: ecx
+                .machine
+                .data_race
+                .as_vclocks_ref()
+                .map(|_| data_race::FrameState::default()),
         };
 
         interp_ok(frame.with_extra(extra))
@@ -1677,7 +1720,11 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
         if let Some(data_race) =
             &machine.threads.active_thread_stack().last().unwrap().extra.data_race
         {
-            data_race.local_moved_to_memory(local, alloc_info.data_race.as_mut().unwrap(), machine);
+            data_race.local_moved_to_memory(
+                local,
+                alloc_info.data_race.as_vclocks_mut().unwrap(),
+                machine,
+            );
         }
         interp_ok(())
     }