about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--src/tools/miri/Cargo.toml2
-rw-r--r--src/tools/miri/README.md2
-rw-r--r--src/tools/miri/rust-version2
-rw-r--r--src/tools/miri/src/alloc_addresses/mod.rs213
-rw-r--r--src/tools/miri/src/concurrency/init_once.rs10
-rw-r--r--src/tools/miri/src/concurrency/sync.rs119
-rw-r--r--src/tools/miri/src/concurrency/thread.rs9
-rw-r--r--src/tools/miri/src/lib.rs5
-rw-r--r--src/tools/miri/src/machine.rs16
-rw-r--r--src/tools/miri/src/shims/foreign_items.rs2
-rw-r--r--src/tools/miri/src/shims/mod.rs2
-rw-r--r--src/tools/miri/src/shims/native_lib.rs7
-rw-r--r--src/tools/miri/src/shims/unix/macos/sync.rs2
-rw-r--r--src/tools/miri/src/shims/unix/sync.rs222
-rw-r--r--src/tools/miri/tests/fail-dep/concurrency/libc_pthread_mutex_move.init.stderr20
-rw-r--r--src/tools/miri/tests/fail-dep/concurrency/libc_pthread_mutex_move.rs28
-rw-r--r--src/tools/miri/tests/fail-dep/concurrency/libc_pthread_mutex_move.static_initializer.stderr20
-rw-r--r--src/tools/miri/tests/native-lib/fail/function_not_in_so.rs4
-rw-r--r--src/tools/miri/tests/native-lib/fail/private_function.rs15
-rw-r--r--src/tools/miri/tests/native-lib/fail/private_function.stderr15
-rw-r--r--src/tools/miri/tests/native-lib/native-lib.map20
-rw-r--r--src/tools/miri/tests/native-lib/pass/ptr_read_access.rs16
-rw-r--r--src/tools/miri/tests/native-lib/pass/scalar_arguments.rs4
-rw-r--r--src/tools/miri/tests/native-lib/ptr_read_access.c11
-rw-r--r--src/tools/miri/tests/native-lib/scalar_arguments.c20
-rw-r--r--src/tools/miri/tests/ui.rs19
26 files changed, 521 insertions, 284 deletions
diff --git a/src/tools/miri/Cargo.toml b/src/tools/miri/Cargo.toml
index 4b7f3483ff7..d8cfa5b886d 100644
--- a/src/tools/miri/Cargo.toml
+++ b/src/tools/miri/Cargo.toml
@@ -37,8 +37,6 @@ features = ['unprefixed_malloc_on_supported_platforms']
 
 [target.'cfg(unix)'.dependencies]
 libc = "0.2"
-
-[target.'cfg(target_os = "linux")'.dependencies]
 libffi = "3.2.0"
 libloading = "0.8"
 
diff --git a/src/tools/miri/README.md b/src/tools/miri/README.md
index 5821adb96ce..dbb0e8a2925 100644
--- a/src/tools/miri/README.md
+++ b/src/tools/miri/README.md
@@ -383,7 +383,7 @@ to Miri failing to detect cases of undefined behavior in a program.
   file descriptors will be mixed up.
   This is **work in progress**; currently, only integer arguments and return values are
   supported (and no, pointer/integer casts to work around this limitation will not work;
-  they will fail horribly). It also only works on Linux hosts for now.
+  they will fail horribly). It also only works on Unix hosts for now.
 * `-Zmiri-measureme=<name>` enables `measureme` profiling for the interpreted program.
    This can be used to find which parts of your program are executing slowly under Miri.
    The profile is written out to a file inside a directory called `<name>`, and can be processed
diff --git a/src/tools/miri/rust-version b/src/tools/miri/rust-version
index c33d9ad8614..3fdad2a91e9 100644
--- a/src/tools/miri/rust-version
+++ b/src/tools/miri/rust-version
@@ -1 +1 @@
-0d634185dfddefe09047881175f35c65d68dcff1
+54fdef7799d9ff9470bb5cabd29fde9471a99eaa
diff --git a/src/tools/miri/src/alloc_addresses/mod.rs b/src/tools/miri/src/alloc_addresses/mod.rs
index 76c68add8cd..c19a962e652 100644
--- a/src/tools/miri/src/alloc_addresses/mod.rs
+++ b/src/tools/miri/src/alloc_addresses/mod.rs
@@ -5,7 +5,6 @@ mod reuse_pool;
 
 use std::cell::RefCell;
 use std::cmp::max;
-use std::collections::hash_map::Entry;
 
 use rand::Rng;
 
@@ -151,6 +150,95 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
         }
     }
 
+    fn addr_from_alloc_id_uncached(
+        &self,
+        global_state: &mut GlobalStateInner,
+        alloc_id: AllocId,
+        memory_kind: MemoryKind,
+    ) -> InterpResult<'tcx, u64> {
+        let ecx = self.eval_context_ref();
+        let mut rng = ecx.machine.rng.borrow_mut();
+        let (size, align, kind) = ecx.get_alloc_info(alloc_id);
+        // This is either called immediately after allocation (and then cached), or when
+        // adjusting `tcx` pointers (which never get freed). So assert that we are looking
+        // at a live allocation. This also ensures that we never re-assign an address to an
+        // allocation that previously had an address, but then was freed and the address
+        // information was removed.
+        assert!(!matches!(kind, AllocKind::Dead));
+
+        // This allocation does not have a base address yet, pick or reuse one.
+        if ecx.machine.native_lib.is_some() {
+            // In native lib mode, we use the "real" address of the bytes for this allocation.
+            // This ensures the interpreted program and native code have the same view of memory.
+            let base_ptr = match kind {
+                AllocKind::LiveData => {
+                    if ecx.tcx.try_get_global_alloc(alloc_id).is_some() {
+                        // For new global allocations, we always pre-allocate the memory to be able use the machine address directly.
+                        let prepared_bytes = MiriAllocBytes::zeroed(size, align)
+                            .unwrap_or_else(|| {
+                                panic!("Miri ran out of memory: cannot create allocation of {size:?} bytes")
+                            });
+                        let ptr = prepared_bytes.as_ptr();
+                        // Store prepared allocation space to be picked up for use later.
+                        global_state
+                            .prepared_alloc_bytes
+                            .try_insert(alloc_id, prepared_bytes)
+                            .unwrap();
+                        ptr
+                    } else {
+                        ecx.get_alloc_bytes_unchecked_raw(alloc_id)?
+                    }
+                }
+                AllocKind::Function | AllocKind::VTable => {
+                    // Allocate some dummy memory to get a unique address for this function/vtable.
+                    let alloc_bytes =
+                        MiriAllocBytes::from_bytes(&[0u8; 1], Align::from_bytes(1).unwrap());
+                    let ptr = alloc_bytes.as_ptr();
+                    // Leak the underlying memory to ensure it remains unique.
+                    std::mem::forget(alloc_bytes);
+                    ptr
+                }
+                AllocKind::Dead => unreachable!(),
+            };
+            // Ensure this pointer's provenance is exposed, so that it can be used by FFI code.
+            return Ok(base_ptr.expose_provenance().try_into().unwrap());
+        }
+        // We are not in native lib mode, so we control the addresses ourselves.
+        if let Some((reuse_addr, clock)) =
+            global_state.reuse.take_addr(&mut *rng, size, align, memory_kind, ecx.active_thread())
+        {
+            if let Some(clock) = clock {
+                ecx.acquire_clock(&clock);
+            }
+            Ok(reuse_addr)
+        } else {
+            // We have to pick a fresh address.
+            // Leave some space to the previous allocation, to give it some chance to be less aligned.
+            // We ensure that `(global_state.next_base_addr + slack) % 16` is uniformly distributed.
+            let slack = rng.gen_range(0..16);
+            // From next_base_addr + slack, round up to adjust for alignment.
+            let base_addr = global_state
+                .next_base_addr
+                .checked_add(slack)
+                .ok_or_else(|| err_exhaust!(AddressSpaceFull))?;
+            let base_addr = align_addr(base_addr, align.bytes());
+
+            // Remember next base address.  If this allocation is zero-sized, leave a gap of at
+            // least 1 to avoid two allocations having the same base address. (The logic in
+            // `alloc_id_from_addr` assumes unique addresses, and different function/vtable pointers
+            // need to be distinguishable!)
+            global_state.next_base_addr = base_addr
+                .checked_add(max(size.bytes(), 1))
+                .ok_or_else(|| err_exhaust!(AddressSpaceFull))?;
+            // Even if `Size` didn't overflow, we might still have filled up the address space.
+            if global_state.next_base_addr > ecx.target_usize_max() {
+                throw_exhaust!(AddressSpaceFull);
+            }
+
+            Ok(base_addr)
+        }
+    }
+
     fn addr_from_alloc_id(
         &self,
         alloc_id: AllocId,
@@ -160,98 +248,16 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
         let mut global_state = ecx.machine.alloc_addresses.borrow_mut();
         let global_state = &mut *global_state;
 
-        Ok(match global_state.base_addr.entry(alloc_id) {
-            Entry::Occupied(entry) => *entry.get(),
-            Entry::Vacant(entry) => {
-                let mut rng = ecx.machine.rng.borrow_mut();
-                let (size, align, kind) = ecx.get_alloc_info(alloc_id);
-                // This is either called immediately after allocation (and then cached), or when
-                // adjusting `tcx` pointers (which never get freed). So assert that we are looking
-                // at a live allocation. This also ensures that we never re-assign an address to an
-                // allocation that previously had an address, but then was freed and the address
-                // information was removed.
-                assert!(!matches!(kind, AllocKind::Dead));
-
-                // This allocation does not have a base address yet, pick or reuse one.
-                let base_addr = if ecx.machine.native_lib.is_some() {
-                    // In native lib mode, we use the "real" address of the bytes for this allocation.
-                    // This ensures the interpreted program and native code have the same view of memory.
-                    match kind {
-                        AllocKind::LiveData => {
-                            let ptr = if ecx.tcx.try_get_global_alloc(alloc_id).is_some() {
-                                // For new global allocations, we always pre-allocate the memory to be able use the machine address directly.
-                                let prepared_bytes = MiriAllocBytes::zeroed(size, align)
-                                    .unwrap_or_else(|| {
-                                        panic!("Miri ran out of memory: cannot create allocation of {size:?} bytes")
-                                    });
-                                let ptr = prepared_bytes.as_ptr();
-                                    // Store prepared allocation space to be picked up for use later.
-                                    global_state.prepared_alloc_bytes.try_insert(alloc_id, prepared_bytes).unwrap();
-                                ptr
-                            } else {
-                                ecx.get_alloc_bytes_unchecked_raw(alloc_id)?
-                            };
-                            // Ensure this pointer's provenance is exposed, so that it can be used by FFI code.
-                            ptr.expose_provenance().try_into().unwrap()
-                        }
-                        AllocKind::Function | AllocKind::VTable => {
-                            // Allocate some dummy memory to get a unique address for this function/vtable.
-                            let alloc_bytes = MiriAllocBytes::from_bytes(&[0u8; 1], Align::from_bytes(1).unwrap());
-                            // We don't need to expose these bytes as nobody is allowed to access them.
-                            let addr = alloc_bytes.as_ptr().addr().try_into().unwrap();
-                            // Leak the underlying memory to ensure it remains unique.
-                            std::mem::forget(alloc_bytes);
-                            addr
-                        }
-                        AllocKind::Dead => unreachable!()
-                    }
-                } else if let Some((reuse_addr, clock)) = global_state.reuse.take_addr(                    
-                    &mut *rng,
-                    size,
-                    align,
-                    memory_kind,
-                    ecx.active_thread(),
-                ) {
-                    if let Some(clock) = clock {
-                        ecx.acquire_clock(&clock);
-                    }
-                    reuse_addr
-                } else {
-                    // We have to pick a fresh address.
-                    // Leave some space to the previous allocation, to give it some chance to be less aligned.
-                    // We ensure that `(global_state.next_base_addr + slack) % 16` is uniformly distributed.
-                    let slack = rng.gen_range(0..16);
-                    // From next_base_addr + slack, round up to adjust for alignment.
-                    let base_addr = global_state
-                        .next_base_addr
-                        .checked_add(slack)
-                        .ok_or_else(|| err_exhaust!(AddressSpaceFull))?;
-                    let base_addr = align_addr(base_addr, align.bytes());
-
-                    // Remember next base address.  If this allocation is zero-sized, leave a gap
-                    // of at least 1 to avoid two allocations having the same base address.
-                    // (The logic in `alloc_id_from_addr` assumes unique addresses, and different
-                    // function/vtable pointers need to be distinguishable!)
-                    global_state.next_base_addr = base_addr
-                        .checked_add(max(size.bytes(), 1))
-                        .ok_or_else(|| err_exhaust!(AddressSpaceFull))?;
-                    // Even if `Size` didn't overflow, we might still have filled up the address space.
-                    if global_state.next_base_addr > ecx.target_usize_max() {
-                        throw_exhaust!(AddressSpaceFull);
-                    }
-
-                    base_addr
-                };
-                trace!(
-                    "Assigning base address {:#x} to allocation {:?} (size: {}, align: {})",
-                    base_addr,
-                    alloc_id,
-                    size.bytes(),
-                    align.bytes(),
-                );
+        match global_state.base_addr.get(&alloc_id) {
+            Some(&addr) => Ok(addr),
+            None => {
+                // First time we're looking for the absolute address of this allocation.
+                let base_addr =
+                    self.addr_from_alloc_id_uncached(global_state, alloc_id, memory_kind)?;
+                trace!("Assigning base address {:#x} to allocation {:?}", base_addr, alloc_id);
 
                 // Store address in cache.
-                entry.insert(base_addr);
+                global_state.base_addr.try_insert(alloc_id, base_addr).unwrap();
 
                 // Also maintain the opposite mapping in `int_to_ptr_map`, ensuring we keep it sorted.
                 // We have a fast-path for the common case that this address is bigger than all previous ones.
@@ -269,9 +275,9 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
                 };
                 global_state.int_to_ptr_map.insert(pos, (base_addr, alloc_id));
 
-                base_addr
+                Ok(base_addr)
             }
-        })
+        }
     }
 }
 
@@ -359,16 +365,23 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
 
     // This returns some prepared `MiriAllocBytes`, either because `addr_from_alloc_id` reserved
     // memory space in the past, or by doing the pre-allocation right upon being called.
-    fn get_global_alloc_bytes(&self, id: AllocId, kind: MemoryKind, bytes: &[u8], align: Align) -> InterpResult<'tcx, MiriAllocBytes> {
+    fn get_global_alloc_bytes(
+        &self,
+        id: AllocId,
+        kind: MemoryKind,
+        bytes: &[u8],
+        align: Align,
+    ) -> InterpResult<'tcx, MiriAllocBytes> {
         let ecx = self.eval_context_ref();
-        Ok(if ecx.machine.native_lib.is_some() {
+        if ecx.machine.native_lib.is_some() {
             // In native lib mode, MiriAllocBytes for global allocations are handled via `prepared_alloc_bytes`.
-            // This additional call ensures that some `MiriAllocBytes` are always prepared.
+            // This additional call ensures that some `MiriAllocBytes` are always prepared, just in case
+            // this function gets called before the first time `addr_from_alloc_id` gets called.
             ecx.addr_from_alloc_id(id, kind)?;
-            let mut global_state = ecx.machine.alloc_addresses.borrow_mut();
             // The memory we need here will have already been allocated during an earlier call to
             // `addr_from_alloc_id` for this allocation. So don't create a new `MiriAllocBytes` here, instead
             // fetch the previously prepared bytes from `prepared_alloc_bytes`.
+            let mut global_state = ecx.machine.alloc_addresses.borrow_mut();
             let mut prepared_alloc_bytes = global_state
                 .prepared_alloc_bytes
                 .remove(&id)
@@ -378,10 +391,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
             assert_eq!(prepared_alloc_bytes.len(), bytes.len());
             // Copy allocation contents into prepared memory.
             prepared_alloc_bytes.copy_from_slice(bytes);
-            prepared_alloc_bytes
+            Ok(prepared_alloc_bytes)
         } else {
-            MiriAllocBytes::from_bytes(std::borrow::Cow::Borrowed(&*bytes), align)
-        })
+            Ok(MiriAllocBytes::from_bytes(std::borrow::Cow::Borrowed(bytes), align))
+        }
     }
 
     /// When a pointer is used for a memory access, this computes where in which allocation the
diff --git a/src/tools/miri/src/concurrency/init_once.rs b/src/tools/miri/src/concurrency/init_once.rs
index c23e5737280..d709e4e6eae 100644
--- a/src/tools/miri/src/concurrency/init_once.rs
+++ b/src/tools/miri/src/concurrency/init_once.rs
@@ -35,8 +35,14 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
         offset: u64,
     ) -> InterpResult<'tcx, InitOnceId> {
         let this = self.eval_context_mut();
-        this.get_or_create_id(lock_op, lock_layout, offset, |ecx| &mut ecx.machine.sync.init_onces)?
-            .ok_or_else(|| err_ub_format!("init_once has invalid ID").into())
+        this.get_or_create_id(
+            lock_op,
+            lock_layout,
+            offset,
+            |ecx| &mut ecx.machine.sync.init_onces,
+            |_| Ok(Default::default()),
+        )?
+        .ok_or_else(|| err_ub_format!("init_once has invalid ID").into())
     }
 
     #[inline]
diff --git a/src/tools/miri/src/concurrency/sync.rs b/src/tools/miri/src/concurrency/sync.rs
index d972831c768..97e910df6a2 100644
--- a/src/tools/miri/src/concurrency/sync.rs
+++ b/src/tools/miri/src/concurrency/sync.rs
@@ -66,6 +66,27 @@ pub(super) use declare_id;
 
 declare_id!(MutexId);
 
+/// The mutex kind.
+#[derive(Debug, Clone, Copy)]
+#[non_exhaustive]
+pub enum MutexKind {
+    Invalid,
+    Normal,
+    Default,
+    Recursive,
+    ErrorCheck,
+}
+
+#[derive(Debug)]
+/// Additional data that may be used by shim implementations.
+pub struct AdditionalMutexData {
+    /// The mutex kind, used by some mutex implementations like pthreads mutexes.
+    pub kind: MutexKind,
+
+    /// The address of the mutex.
+    pub address: u64,
+}
+
 /// The mutex state.
 #[derive(Default, Debug)]
 struct Mutex {
@@ -77,6 +98,9 @@ struct Mutex {
     queue: VecDeque<ThreadId>,
     /// Mutex clock. This tracks the moment of the last unlock.
     clock: VClock,
+
+    /// Additional data that can be set by shim implementations.
+    data: Option<AdditionalMutexData>,
 }
 
 declare_id!(RwLockId);
@@ -168,13 +192,15 @@ pub(super) trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
     /// Returns `None` if memory stores a non-zero invalid ID.
     ///
     /// `get_objs` must return the `IndexVec` that stores all the objects of this type.
+    /// `create_obj` must create the new object if initialization is needed.
     #[inline]
-    fn get_or_create_id<Id: SyncId + Idx, T: Default>(
+    fn get_or_create_id<Id: SyncId + Idx, T>(
         &mut self,
         lock_op: &OpTy<'tcx>,
         lock_layout: TyAndLayout<'tcx>,
         offset: u64,
         get_objs: impl for<'a> Fn(&'a mut MiriInterpCx<'tcx>) -> &'a mut IndexVec<Id, T>,
+        create_obj: impl for<'a> FnOnce(&'a mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, T>,
     ) -> InterpResult<'tcx, Option<Id>> {
         let this = self.eval_context_mut();
         let value_place =
@@ -196,7 +222,8 @@ pub(super) trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
         Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
             // We set the in-memory ID to `next_index`, now also create this object in the machine
             // state.
-            let new_index = get_objs(this).push(T::default());
+            let obj = create_obj(this)?;
+            let new_index = get_objs(this).push(obj);
             assert_eq!(next_index, new_index);
             Some(new_index)
         } else {
@@ -210,6 +237,32 @@ pub(super) trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
         })
     }
 
+    /// Eagerly creates a Miri sync structure.
+    ///
+    /// `create_id` will store the index of the sync_structure in the memory pointed to by
+    /// `lock_op`, so that future calls to `get_or_create_id` will see it as initialized.
+    /// - `lock_op` must hold a pointer to the sync structure.
+    /// - `lock_layout` must be the memory layout of the sync structure.
+    /// - `offset` must be the offset inside the sync structure where its miri id will be stored.
+    /// - `get_objs` is described in `get_or_create_id`.
+    /// - `obj` must be the new sync object.
+    fn create_id<Id: SyncId + Idx, T>(
+        &mut self,
+        lock_op: &OpTy<'tcx>,
+        lock_layout: TyAndLayout<'tcx>,
+        offset: u64,
+        get_objs: impl for<'a> Fn(&'a mut MiriInterpCx<'tcx>) -> &'a mut IndexVec<Id, T>,
+        obj: T,
+    ) -> InterpResult<'tcx, Id> {
+        let this = self.eval_context_mut();
+        let value_place =
+            this.deref_pointer_and_offset(lock_op, offset, lock_layout, this.machine.layouts.u32)?;
+
+        let new_index = get_objs(this).push(obj);
+        this.write_scalar(Scalar::from_u32(new_index.to_u32()), &value_place)?;
+        Ok(new_index)
+    }
+
     fn condvar_reacquire_mutex(
         &mut self,
         mutex: MutexId,
@@ -236,15 +289,53 @@ pub(super) trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
 // situations.
 impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
 pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
+    /// Eagerly create and initialize a new mutex.
+    fn mutex_create(
+        &mut self,
+        lock_op: &OpTy<'tcx>,
+        lock_layout: TyAndLayout<'tcx>,
+        offset: u64,
+        data: Option<AdditionalMutexData>,
+    ) -> InterpResult<'tcx, MutexId> {
+        let this = self.eval_context_mut();
+        this.create_id(
+            lock_op,
+            lock_layout,
+            offset,
+            |ecx| &mut ecx.machine.sync.mutexes,
+            Mutex { data, ..Default::default() },
+        )
+    }
+
+    /// Lazily create a new mutex.
+    /// `initialize_data` must return any additional data that a user wants to associate with the mutex.
     fn mutex_get_or_create_id(
         &mut self,
         lock_op: &OpTy<'tcx>,
         lock_layout: TyAndLayout<'tcx>,
         offset: u64,
+        initialize_data: impl for<'a> FnOnce(
+            &'a mut MiriInterpCx<'tcx>,
+        ) -> InterpResult<'tcx, Option<AdditionalMutexData>>,
     ) -> InterpResult<'tcx, MutexId> {
         let this = self.eval_context_mut();
-        this.get_or_create_id(lock_op, lock_layout, offset, |ecx| &mut ecx.machine.sync.mutexes)?
-            .ok_or_else(|| err_ub_format!("mutex has invalid ID").into())
+        this.get_or_create_id(
+            lock_op,
+            lock_layout,
+            offset,
+            |ecx| &mut ecx.machine.sync.mutexes,
+            |ecx| initialize_data(ecx).map(|data| Mutex { data, ..Default::default() }),
+        )?
+        .ok_or_else(|| err_ub_format!("mutex has invalid ID").into())
+    }
+
+    /// Retrieve the additional data stored for a mutex.
+    fn mutex_get_data<'a>(&'a mut self, id: MutexId) -> Option<&'a AdditionalMutexData>
+    where
+        'tcx: 'a,
+    {
+        let this = self.eval_context_ref();
+        this.machine.sync.mutexes[id].data.as_ref()
     }
 
     fn rwlock_get_or_create_id(
@@ -254,8 +345,14 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
         offset: u64,
     ) -> InterpResult<'tcx, RwLockId> {
         let this = self.eval_context_mut();
-        this.get_or_create_id(lock_op, lock_layout, offset, |ecx| &mut ecx.machine.sync.rwlocks)?
-            .ok_or_else(|| err_ub_format!("rwlock has invalid ID").into())
+        this.get_or_create_id(
+            lock_op,
+            lock_layout,
+            offset,
+            |ecx| &mut ecx.machine.sync.rwlocks,
+            |_| Ok(Default::default()),
+        )?
+        .ok_or_else(|| err_ub_format!("rwlock has invalid ID").into())
     }
 
     fn condvar_get_or_create_id(
@@ -265,8 +362,14 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
         offset: u64,
     ) -> InterpResult<'tcx, CondvarId> {
         let this = self.eval_context_mut();
-        this.get_or_create_id(lock_op, lock_layout, offset, |ecx| &mut ecx.machine.sync.condvars)?
-            .ok_or_else(|| err_ub_format!("condvar has invalid ID").into())
+        this.get_or_create_id(
+            lock_op,
+            lock_layout,
+            offset,
+            |ecx| &mut ecx.machine.sync.condvars,
+            |_| Ok(Default::default()),
+        )?
+        .ok_or_else(|| err_ub_format!("condvar has invalid ID").into())
     }
 
     #[inline]
diff --git a/src/tools/miri/src/concurrency/thread.rs b/src/tools/miri/src/concurrency/thread.rs
index 79f292f6965..306245a843b 100644
--- a/src/tools/miri/src/concurrency/thread.rs
+++ b/src/tools/miri/src/concurrency/thread.rs
@@ -888,8 +888,13 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
             }
             let alloc = this.ctfe_query(|tcx| tcx.eval_static_initializer(def_id))?;
             // We make a full copy of this allocation.
-            let mut alloc =
-                alloc.inner().adjust_from_tcx(&this.tcx, |bytes, align| Ok(MiriAllocBytes::from_bytes(std::borrow::Cow::Borrowed(bytes), align)), |ptr| this.global_root_pointer(ptr))?;
+            let mut alloc = alloc.inner().adjust_from_tcx(
+                &this.tcx,
+                |bytes, align| {
+                    Ok(MiriAllocBytes::from_bytes(std::borrow::Cow::Borrowed(bytes), align))
+                },
+                |ptr| this.global_root_pointer(ptr),
+            )?;
             // This allocation will be deallocated when the thread dies, so it is not in read-only memory.
             alloc.mutability = Mutability::Mut;
             // Create a fresh allocation with this content.
diff --git a/src/tools/miri/src/lib.rs b/src/tools/miri/src/lib.rs
index 8a59206943d..85b4b02ff5f 100644
--- a/src/tools/miri/src/lib.rs
+++ b/src/tools/miri/src/lib.rs
@@ -133,7 +133,10 @@ pub use crate::concurrency::{
     cpu_affinity::MAX_CPUS,
     data_race::{AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, EvalContextExt as _},
     init_once::{EvalContextExt as _, InitOnceId},
-    sync::{CondvarId, EvalContextExt as _, MutexId, RwLockId, SynchronizationObjects},
+    sync::{
+        AdditionalMutexData, CondvarId, EvalContextExt as _, MutexId, MutexKind, RwLockId,
+        SynchronizationObjects,
+    },
     thread::{
         BlockReason, EvalContextExt as _, StackEmptyCallback, ThreadId, ThreadManager,
         TimeoutAnchor, TimeoutClock, UnblockCallback,
diff --git a/src/tools/miri/src/machine.rs b/src/tools/miri/src/machine.rs
index 66c6966d1a6..2cd57e72871 100644
--- a/src/tools/miri/src/machine.rs
+++ b/src/tools/miri/src/machine.rs
@@ -535,9 +535,9 @@ pub struct MiriMachine<'tcx> {
     pub(crate) basic_block_count: u64,
 
     /// Handle of the optional shared object file for native functions.
-    #[cfg(target_os = "linux")]
+    #[cfg(unix)]
     pub native_lib: Option<(libloading::Library, std::path::PathBuf)>,
-    #[cfg(not(target_os = "linux"))]
+    #[cfg(not(unix))]
     pub native_lib: Option<!>,
 
     /// Run a garbage collector for BorTags every N basic blocks.
@@ -678,7 +678,7 @@ impl<'tcx> MiriMachine<'tcx> {
             report_progress: config.report_progress,
             basic_block_count: 0,
             clock: Clock::new(config.isolated_op == IsolatedOp::Allow),
-            #[cfg(target_os = "linux")]
+            #[cfg(unix)]
             native_lib: config.native_lib.as_ref().map(|lib_file_path| {
                 let target_triple = layout_cx.tcx.sess.opts.target_triple.triple();
                 // Check if host target == the session target.
@@ -700,9 +700,9 @@ impl<'tcx> MiriMachine<'tcx> {
                     lib_file_path.clone(),
                 )
             }),
-            #[cfg(not(target_os = "linux"))]
+            #[cfg(not(unix))]
             native_lib: config.native_lib.as_ref().map(|_| {
-                panic!("loading external .so files is only supported on Linux")
+                panic!("calling functions from native libraries via FFI is only supported on Unix")
             }),
             gc_interval: config.gc_interval,
             since_gc: 0,
@@ -1277,12 +1277,12 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
     ) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance, Self::AllocExtra, Self::Bytes>>>
     {
         let kind = Self::GLOBAL_KIND.unwrap().into();
-        let alloc = alloc.adjust_from_tcx(&ecx.tcx,
+        let alloc = alloc.adjust_from_tcx(
+            &ecx.tcx,
             |bytes, align| ecx.get_global_alloc_bytes(id, kind, bytes, align),
             |ptr| ecx.global_root_pointer(ptr),
         )?;
-        let extra =
-            Self::init_alloc_extra(ecx, id, kind, alloc.size(), alloc.align)?;
+        let extra = Self::init_alloc_extra(ecx, id, kind, alloc.size(), alloc.align)?;
         Ok(Cow::Owned(alloc.with_extra(extra)))
     }
 
diff --git a/src/tools/miri/src/shims/foreign_items.rs b/src/tools/miri/src/shims/foreign_items.rs
index d40dbdba80f..1b45bc22038 100644
--- a/src/tools/miri/src/shims/foreign_items.rs
+++ b/src/tools/miri/src/shims/foreign_items.rs
@@ -226,7 +226,7 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
         let this = self.eval_context_mut();
 
         // First deal with any external C functions in linked .so file.
-        #[cfg(target_os = "linux")]
+        #[cfg(unix)]
         if this.machine.native_lib.as_ref().is_some() {
             use crate::shims::native_lib::EvalContextExt as _;
             // An Ok(false) here means that the function being called was not exported
diff --git a/src/tools/miri/src/shims/mod.rs b/src/tools/miri/src/shims/mod.rs
index 7d5349f26b1..618cf8cf200 100644
--- a/src/tools/miri/src/shims/mod.rs
+++ b/src/tools/miri/src/shims/mod.rs
@@ -2,7 +2,7 @@
 
 mod alloc;
 mod backtrace;
-#[cfg(target_os = "linux")]
+#[cfg(unix)]
 mod native_lib;
 mod unix;
 mod wasi;
diff --git a/src/tools/miri/src/shims/native_lib.rs b/src/tools/miri/src/shims/native_lib.rs
index 25d96fbd733..e4998c37f3f 100644
--- a/src/tools/miri/src/shims/native_lib.rs
+++ b/src/tools/miri/src/shims/native_lib.rs
@@ -240,13 +240,16 @@ fn imm_to_carg<'tcx>(v: ImmTy<'tcx>, cx: &impl HasDataLayout) -> InterpResult<'t
         ty::RawPtr(_, mutability) => {
             // Arbitrary mutable pointer accesses are not currently supported in Miri.
             if mutability.is_mut() {
-                throw_unsup_format!("unsupported mutable pointer type for native call: {}", v.layout.ty);
+                throw_unsup_format!(
+                    "unsupported mutable pointer type for native call: {}",
+                    v.layout.ty
+                );
             } else {
                 let s = v.to_scalar().to_pointer(cx)?.addr();
                 // This relies on the `expose_provenance` in `addr_from_alloc_id`.
                 CArg::RawPtr(std::ptr::with_exposed_provenance_mut(s.bytes_usize()))
             }
-        },
+        }
         _ => throw_unsup_format!("unsupported argument type for native call: {}", v.layout.ty),
     })
 }
diff --git a/src/tools/miri/src/shims/unix/macos/sync.rs b/src/tools/miri/src/shims/unix/macos/sync.rs
index 5e5fccb587b..882c08cca15 100644
--- a/src/tools/miri/src/shims/unix/macos/sync.rs
+++ b/src/tools/miri/src/shims/unix/macos/sync.rs
@@ -19,7 +19,7 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
         // os_unfair_lock holds a 32-bit value, is initialized with zero and
         // must be assumed to be opaque. Therefore, we can just store our
         // internal mutex ID in the structure without anyone noticing.
-        this.mutex_get_or_create_id(lock_op, this.libc_ty_layout("os_unfair_lock"), 0)
+        this.mutex_get_or_create_id(lock_op, this.libc_ty_layout("os_unfair_lock"), 0, |_| Ok(None))
     }
 }
 
diff --git a/src/tools/miri/src/shims/unix/sync.rs b/src/tools/miri/src/shims/unix/sync.rs
index 0b889b11822..5da5cab8fe4 100644
--- a/src/tools/miri/src/shims/unix/sync.rs
+++ b/src/tools/miri/src/shims/unix/sync.rs
@@ -74,6 +74,8 @@ fn mutex_id_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
 
     // Sanity-check this against PTHREAD_MUTEX_INITIALIZER (but only once):
     // the id must start out as 0.
+    // FIXME on some platforms (e.g linux) there are more static initializers for
+    // recursive or error checking mutexes. We should also add thme in this sanity check.
     static SANITY: AtomicBool = AtomicBool::new(false);
     if !SANITY.swap(true, Ordering::Relaxed) {
         let static_initializer = ecx.eval_path(&["libc", "PTHREAD_MUTEX_INITIALIZER"]);
@@ -90,79 +92,92 @@ fn mutex_id_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
     Ok(offset)
 }
 
-fn mutex_kind_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> u64 {
-    // These offsets are picked for compatibility with Linux's static initializer
-    // macros, e.g. PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP.)
-    let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
-
-    // Sanity-check this against PTHREAD_MUTEX_INITIALIZER (but only once):
-    // the kind must start out as PTHREAD_MUTEX_DEFAULT.
-    static SANITY: AtomicBool = AtomicBool::new(false);
-    if !SANITY.swap(true, Ordering::Relaxed) {
-        let static_initializer = ecx.eval_path(&["libc", "PTHREAD_MUTEX_INITIALIZER"]);
-        let kind_field = static_initializer
-            .offset(Size::from_bytes(mutex_kind_offset(ecx)), ecx.machine.layouts.i32, ecx)
-            .unwrap();
-        let kind = ecx.read_scalar(&kind_field).unwrap().to_i32().unwrap();
-        assert_eq!(
-            kind,
-            ecx.eval_libc_i32("PTHREAD_MUTEX_DEFAULT"),
-            "PTHREAD_MUTEX_INITIALIZER is incompatible with our pthread_mutex layout: kind is not PTHREAD_MUTEX_DEFAULT"
-        );
-    }
-
-    offset
+/// Eagerly create and initialize a new mutex.
+fn mutex_create<'tcx>(
+    ecx: &mut MiriInterpCx<'tcx>,
+    mutex_op: &OpTy<'tcx>,
+    kind: i32,
+) -> InterpResult<'tcx> {
+    // FIXME: might be worth changing mutex_create to take the mplace
+    // rather than the `OpTy`.
+    let address = ecx.read_pointer(mutex_op)?.addr().bytes();
+    let kind = translate_kind(ecx, kind)?;
+    let data = Some(AdditionalMutexData { address, kind });
+    ecx.mutex_create(mutex_op, ecx.libc_ty_layout("pthread_mutex_t"), mutex_id_offset(ecx)?, data)?;
+    Ok(())
 }
 
+/// Returns the `MutexId` of the mutex stored at `mutex_op`.
+///
+/// `mutex_get_id` will also check if the mutex has been moved since its first use and
+/// return an error if it has.
 fn mutex_get_id<'tcx>(
     ecx: &mut MiriInterpCx<'tcx>,
     mutex_op: &OpTy<'tcx>,
 ) -> InterpResult<'tcx, MutexId> {
-    ecx.mutex_get_or_create_id(
+    let address = ecx.read_pointer(mutex_op)?.addr().bytes();
+
+    // FIXME: might be worth changing mutex_get_or_create_id to take the mplace
+    // rather than the `OpTy`.
+    let id = ecx.mutex_get_or_create_id(
         mutex_op,
         ecx.libc_ty_layout("pthread_mutex_t"),
         mutex_id_offset(ecx)?,
-    )
-}
+        |ecx| {
+            // This is called if a static initializer was used and the lock has not been assigned
+            // an ID yet. We have to determine the mutex kind from the static initializer.
+            let kind = kind_from_static_initializer(ecx, mutex_op)?;
+
+            Ok(Some(AdditionalMutexData { kind, address }))
+        },
+    )?;
+
+    // Check that the mutex has not been moved since last use.
+    let data = ecx.mutex_get_data(id).expect("data should be always exist for pthreads");
+    if data.address != address {
+        throw_ub_format!("pthread_mutex_t can't be moved after first use")
+    }
 
-fn mutex_reset_id<'tcx>(
-    ecx: &mut MiriInterpCx<'tcx>,
-    mutex_op: &OpTy<'tcx>,
-) -> InterpResult<'tcx, ()> {
-    ecx.deref_pointer_and_write(
-        mutex_op,
-        mutex_id_offset(ecx)?,
-        Scalar::from_u32(0),
-        ecx.libc_ty_layout("pthread_mutex_t"),
-        ecx.machine.layouts.u32,
-    )
+    Ok(id)
 }
 
-fn mutex_get_kind<'tcx>(
+/// Returns the kind of a static initializer.
+fn kind_from_static_initializer<'tcx>(
     ecx: &MiriInterpCx<'tcx>,
     mutex_op: &OpTy<'tcx>,
-) -> InterpResult<'tcx, i32> {
-    ecx.deref_pointer_and_read(
-        mutex_op,
-        mutex_kind_offset(ecx),
-        ecx.libc_ty_layout("pthread_mutex_t"),
-        ecx.machine.layouts.i32,
-    )?
-    .to_i32()
+) -> InterpResult<'tcx, MutexKind> {
+    // Only linux has static initializers other than PTHREAD_MUTEX_DEFAULT.
+    let kind = match &*ecx.tcx.sess.target.os {
+        "linux" => {
+            let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
+
+            ecx.deref_pointer_and_read(
+                mutex_op,
+                offset,
+                ecx.libc_ty_layout("pthread_mutex_t"),
+                ecx.machine.layouts.i32,
+            )?
+            .to_i32()?
+        }
+        | "illumos" | "solaris" | "macos" => ecx.eval_libc_i32("PTHREAD_MUTEX_DEFAULT"),
+        os => throw_unsup_format!("`pthread_mutex` is not supported on {os}"),
+    };
+
+    translate_kind(ecx, kind)
 }
 
-fn mutex_set_kind<'tcx>(
-    ecx: &mut MiriInterpCx<'tcx>,
-    mutex_op: &OpTy<'tcx>,
-    kind: i32,
-) -> InterpResult<'tcx, ()> {
-    ecx.deref_pointer_and_write(
-        mutex_op,
-        mutex_kind_offset(ecx),
-        Scalar::from_i32(kind),
-        ecx.libc_ty_layout("pthread_mutex_t"),
-        ecx.machine.layouts.i32,
-    )
+fn translate_kind<'tcx>(ecx: &MiriInterpCx<'tcx>, kind: i32) -> InterpResult<'tcx, MutexKind> {
+    Ok(if is_mutex_kind_default(ecx, kind)? {
+        MutexKind::Default
+    } else if is_mutex_kind_normal(ecx, kind)? {
+        MutexKind::Normal
+    } else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK") {
+        MutexKind::ErrorCheck
+    } else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE") {
+        MutexKind::Recursive
+    } else {
+        throw_unsup_format!("unsupported type of mutex: {kind}");
+    })
 }
 
 // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform.
@@ -452,10 +467,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
             mutexattr_get_kind(this, attr_op)?
         };
 
-        // Write 0 to use the same code path as the static initializers.
-        mutex_reset_id(this, mutex_op)?;
-
-        mutex_set_kind(this, mutex_op, kind)?;
+        mutex_create(this, mutex_op, kind)?;
 
         Ok(())
     }
@@ -467,8 +479,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
     ) -> InterpResult<'tcx> {
         let this = self.eval_context_mut();
 
-        let kind = mutex_get_kind(this, mutex_op)?;
         let id = mutex_get_id(this, mutex_op)?;
+        let kind =
+            this.mutex_get_data(id).expect("data should always exist for pthread mutexes").kind;
 
         let ret = if this.mutex_is_locked(id) {
             let owner_thread = this.mutex_get_owner(id);
@@ -477,19 +490,19 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
                 return Ok(());
             } else {
                 // Trying to acquire the same mutex again.
-                if is_mutex_kind_default(this, kind)? {
-                    throw_ub_format!("trying to acquire already locked default mutex");
-                } else if is_mutex_kind_normal(this, kind)? {
-                    throw_machine_stop!(TerminationInfo::Deadlock);
-                } else if kind == this.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK") {
-                    this.eval_libc_i32("EDEADLK")
-                } else if kind == this.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE") {
-                    this.mutex_lock(id);
-                    0
-                } else {
-                    throw_unsup_format!(
-                        "called pthread_mutex_lock on an unsupported type of mutex"
-                    );
+                match kind {
+                    MutexKind::Default =>
+                        throw_ub_format!("trying to acquire already locked default mutex"),
+                    MutexKind::Normal => throw_machine_stop!(TerminationInfo::Deadlock),
+                    MutexKind::ErrorCheck => this.eval_libc_i32("EDEADLK"),
+                    MutexKind::Recursive => {
+                        this.mutex_lock(id);
+                        0
+                    }
+                    _ =>
+                        throw_unsup_format!(
+                            "called pthread_mutex_lock on an unsupported type of mutex"
+                        ),
                 }
             }
         } else {
@@ -504,26 +517,26 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
     fn pthread_mutex_trylock(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
         let this = self.eval_context_mut();
 
-        let kind = mutex_get_kind(this, mutex_op)?;
         let id = mutex_get_id(this, mutex_op)?;
+        let kind =
+            this.mutex_get_data(id).expect("data should always exist for pthread mutexes").kind;
 
         Ok(Scalar::from_i32(if this.mutex_is_locked(id) {
             let owner_thread = this.mutex_get_owner(id);
             if owner_thread != this.active_thread() {
                 this.eval_libc_i32("EBUSY")
             } else {
-                if is_mutex_kind_default(this, kind)?
-                    || is_mutex_kind_normal(this, kind)?
-                    || kind == this.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK")
-                {
-                    this.eval_libc_i32("EBUSY")
-                } else if kind == this.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE") {
-                    this.mutex_lock(id);
-                    0
-                } else {
-                    throw_unsup_format!(
-                        "called pthread_mutex_trylock on an unsupported type of mutex"
-                    );
+                match kind {
+                    MutexKind::Default | MutexKind::Normal | MutexKind::ErrorCheck =>
+                        this.eval_libc_i32("EBUSY"),
+                    MutexKind::Recursive => {
+                        this.mutex_lock(id);
+                        0
+                    }
+                    _ =>
+                        throw_unsup_format!(
+                            "called pthread_mutex_trylock on an unsupported type of mutex"
+                        ),
                 }
             }
         } else {
@@ -536,8 +549,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
     fn pthread_mutex_unlock(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
         let this = self.eval_context_mut();
 
-        let kind = mutex_get_kind(this, mutex_op)?;
         let id = mutex_get_id(this, mutex_op)?;
+        let kind =
+            this.mutex_get_data(id).expect("data should always exist for pthread mutexes").kind;
 
         if let Some(_old_locked_count) = this.mutex_unlock(id)? {
             // The mutex was locked by the current thread.
@@ -546,20 +560,21 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
             // The mutex was locked by another thread or not locked at all. See
             // the “Unlock When Not Owner” column in
             // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutex_unlock.html.
-            if is_mutex_kind_default(this, kind)? {
-                throw_ub_format!(
-                    "unlocked a default mutex that was not locked by the current thread"
-                );
-            } else if is_mutex_kind_normal(this, kind)? {
-                throw_ub_format!(
-                    "unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked by the current thread"
-                );
-            } else if kind == this.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK")
-                || kind == this.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE")
-            {
-                Ok(Scalar::from_i32(this.eval_libc_i32("EPERM")))
-            } else {
-                throw_unsup_format!("called pthread_mutex_unlock on an unsupported type of mutex");
+            match kind {
+                MutexKind::Default =>
+                    throw_ub_format!(
+                        "unlocked a default mutex that was not locked by the current thread"
+                    ),
+                MutexKind::Normal =>
+                    throw_ub_format!(
+                        "unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked by the current thread"
+                    ),
+                MutexKind::ErrorCheck | MutexKind::Recursive =>
+                    Ok(Scalar::from_i32(this.eval_libc_i32("EPERM"))),
+                _ =>
+                    throw_unsup_format!(
+                        "called pthread_mutex_unlock on an unsupported type of mutex"
+                    ),
             }
         }
     }
@@ -574,7 +589,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
         }
 
         // Destroying an uninit pthread_mutex is UB, so check to make sure it's not uninit.
-        mutex_get_kind(this, mutex_op)?;
         mutex_get_id(this, mutex_op)?;
 
         // This might lead to false positives, see comment in pthread_mutexattr_destroy
diff --git a/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_mutex_move.init.stderr b/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_mutex_move.init.stderr
new file mode 100644
index 00000000000..5ca6acc3fbe
--- /dev/null
+++ b/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_mutex_move.init.stderr
@@ -0,0 +1,20 @@
+error: Undefined Behavior: pthread_mutex_t can't be moved after first use
+  --> $DIR/libc_pthread_mutex_move.rs:LL:CC
+   |
+LL |         libc::pthread_mutex_lock(&mut m2 as *mut _);
+   |         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ pthread_mutex_t can't be moved after first use
+   |
+   = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
+   = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
+   = note: BACKTRACE:
+   = note: inside `check` at $DIR/libc_pthread_mutex_move.rs:LL:CC
+note: inside `main`
+  --> $DIR/libc_pthread_mutex_move.rs:LL:CC
+   |
+LL |     check();
+   |     ^^^^^^^
+
+note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
+
+error: aborting due to 1 previous error
+
diff --git a/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_mutex_move.rs b/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_mutex_move.rs
new file mode 100644
index 00000000000..229335c97ce
--- /dev/null
+++ b/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_mutex_move.rs
@@ -0,0 +1,28 @@
+//@ignore-target-windows: No pthreads on Windows
+//@revisions: static_initializer init
+
+fn main() {
+    check();
+}
+
+#[cfg(init)]
+fn check() {
+    unsafe {
+        let mut m: libc::pthread_mutex_t = std::mem::zeroed();
+        assert_eq!(libc::pthread_mutex_init(&mut m as *mut _, std::ptr::null()), 0);
+
+        let mut m2 = m; // move the mutex
+        libc::pthread_mutex_lock(&mut m2 as *mut _); //~[init] ERROR: pthread_mutex_t can't be moved after first use
+    }
+}
+
+#[cfg(static_initializer)]
+fn check() {
+    unsafe {
+        let mut m: libc::pthread_mutex_t = libc::PTHREAD_MUTEX_INITIALIZER;
+        libc::pthread_mutex_lock(&mut m as *mut _);
+
+        let mut m2 = m; // move the mutex
+        libc::pthread_mutex_unlock(&mut m2 as *mut _); //~[static_initializer] ERROR: pthread_mutex_t can't be moved after first use
+    }
+}
diff --git a/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_mutex_move.static_initializer.stderr b/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_mutex_move.static_initializer.stderr
new file mode 100644
index 00000000000..c3632eca43f
--- /dev/null
+++ b/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_mutex_move.static_initializer.stderr
@@ -0,0 +1,20 @@
+error: Undefined Behavior: pthread_mutex_t can't be moved after first use
+  --> $DIR/libc_pthread_mutex_move.rs:LL:CC
+   |
+LL |         libc::pthread_mutex_unlock(&mut m2 as *mut _);
+   |         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ pthread_mutex_t can't be moved after first use
+   |
+   = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
+   = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
+   = note: BACKTRACE:
+   = note: inside `check` at $DIR/libc_pthread_mutex_move.rs:LL:CC
+note: inside `main`
+  --> $DIR/libc_pthread_mutex_move.rs:LL:CC
+   |
+LL |     check();
+   |     ^^^^^^^
+
+note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
+
+error: aborting due to 1 previous error
+
diff --git a/src/tools/miri/tests/native-lib/fail/function_not_in_so.rs b/src/tools/miri/tests/native-lib/fail/function_not_in_so.rs
index 3540c75b73a..c532d052245 100644
--- a/src/tools/miri/tests/native-lib/fail/function_not_in_so.rs
+++ b/src/tools/miri/tests/native-lib/fail/function_not_in_so.rs
@@ -1,4 +1,6 @@
-//@only-target-linux
+// Only works on Unix targets
+//@ignore-target-windows
+//@ignore-target-wasm
 //@only-on-host
 //@normalize-stderr-test: "OS `.*`" -> "$$OS"
 
diff --git a/src/tools/miri/tests/native-lib/fail/private_function.rs b/src/tools/miri/tests/native-lib/fail/private_function.rs
new file mode 100644
index 00000000000..3c6fda741dd
--- /dev/null
+++ b/src/tools/miri/tests/native-lib/fail/private_function.rs
@@ -0,0 +1,15 @@
+// Only works on Unix targets
+//@ignore-target-windows
+//@ignore-target-wasm
+//@only-on-host
+//@normalize-stderr-test: "OS `.*`" -> "$$OS"
+
+extern "C" {
+    fn not_exported();
+}
+
+fn main() {
+    unsafe {
+        not_exported(); //~ ERROR: unsupported operation: can't call foreign function `not_exported`
+    }
+}
diff --git a/src/tools/miri/tests/native-lib/fail/private_function.stderr b/src/tools/miri/tests/native-lib/fail/private_function.stderr
new file mode 100644
index 00000000000..e27a501ebb9
--- /dev/null
+++ b/src/tools/miri/tests/native-lib/fail/private_function.stderr
@@ -0,0 +1,15 @@
+error: unsupported operation: can't call foreign function `not_exported` on $OS
+  --> $DIR/private_function.rs:LL:CC
+   |
+LL |         not_exported();
+   |         ^^^^^^^^^^^^^^ can't call foreign function `not_exported` on $OS
+   |
+   = help: if this is a basic API commonly used on this target, please report an issue with Miri
+   = help: however, note that Miri does not aim to support every FFI function out there; for instance, we will not support APIs for things such as GUIs, scripting languages, or databases
+   = note: BACKTRACE:
+   = note: inside `main` at $DIR/private_function.rs:LL:CC
+
+note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
+
+error: aborting due to 1 previous error
+
diff --git a/src/tools/miri/tests/native-lib/native-lib.map b/src/tools/miri/tests/native-lib/native-lib.map
deleted file mode 100644
index 7e3bd19622a..00000000000
--- a/src/tools/miri/tests/native-lib/native-lib.map
+++ /dev/null
@@ -1,20 +0,0 @@
-CODEABI_1.0 {
-    # Define which symbols to export.
-    global:
-        # scalar_arguments.c
-        add_one_int;
-        printer;
-        test_stack_spill;
-        get_unsigned_int;
-        add_int16;
-        add_short_to_long;
-
-        # ptr_read_access.c
-        print_pointer;
-        access_simple;
-        access_nested;
-        access_static;
-
-    # The rest remains private.
-    local: *;
-};
diff --git a/src/tools/miri/tests/native-lib/pass/ptr_read_access.rs b/src/tools/miri/tests/native-lib/pass/ptr_read_access.rs
index d8e6209839e..2990dfa8975 100644
--- a/src/tools/miri/tests/native-lib/pass/ptr_read_access.rs
+++ b/src/tools/miri/tests/native-lib/pass/ptr_read_access.rs
@@ -1,4 +1,6 @@
-//@only-target-linux
+// Only works on Unix targets
+//@ignore-target-windows
+//@ignore-target-wasm
 //@only-on-host
 
 fn main() {
@@ -26,7 +28,7 @@ fn test_pointer() {
 fn test_simple() {
     #[repr(C)]
     struct Simple {
-        field: i32
+        field: i32,
     }
 
     extern "C" {
@@ -41,7 +43,7 @@ fn test_simple() {
 // Test function that dereferences nested struct pointers and accesses fields.
 fn test_nested() {
     use std::ptr::NonNull;
-    
+
     #[derive(Debug, PartialEq, Eq)]
     #[repr(C)]
     struct Nested {
@@ -62,7 +64,6 @@ fn test_nested() {
 
 // Test function that dereferences static struct pointers and accesses fields.
 fn test_static() {
-
     #[repr(C)]
     struct Static {
         value: i32,
@@ -72,11 +73,8 @@ fn test_static() {
     extern "C" {
         fn access_static(n_ptr: *const Static) -> i32;
     }
-    
-    static STATIC: Static = Static {
-        value: 9001,
-        recurse: &STATIC,
-    };
+
+    static STATIC: Static = Static { value: 9001, recurse: &STATIC };
 
     assert_eq!(unsafe { access_static(&STATIC) }, 9001);
 }
diff --git a/src/tools/miri/tests/native-lib/pass/scalar_arguments.rs b/src/tools/miri/tests/native-lib/pass/scalar_arguments.rs
index 1e1d0b11e99..378baa7ce98 100644
--- a/src/tools/miri/tests/native-lib/pass/scalar_arguments.rs
+++ b/src/tools/miri/tests/native-lib/pass/scalar_arguments.rs
@@ -1,4 +1,6 @@
-//@only-target-linux
+// Only works on Unix targets
+//@ignore-target-windows
+//@ignore-target-wasm
 //@only-on-host
 
 extern "C" {
diff --git a/src/tools/miri/tests/native-lib/ptr_read_access.c b/src/tools/miri/tests/native-lib/ptr_read_access.c
index 03b9189e2e8..540845d53a7 100644
--- a/src/tools/miri/tests/native-lib/ptr_read_access.c
+++ b/src/tools/miri/tests/native-lib/ptr_read_access.c
@@ -1,8 +1,11 @@
 #include <stdio.h>
 
+// See comments in build_native_lib()
+#define EXPORT __attribute__((visibility("default")))
+
 /* Test: test_pointer */
 
-void print_pointer(const int *ptr) {
+EXPORT void print_pointer(const int *ptr) {
   printf("printing pointer dereference from C: %d\n", *ptr);
 }
 
@@ -12,7 +15,7 @@ typedef struct Simple {
   int field;
 } Simple;
 
-int access_simple(const Simple *s_ptr) {
+EXPORT int access_simple(const Simple *s_ptr) {
   return s_ptr->field;
 }
 
@@ -24,7 +27,7 @@ typedef struct Nested {
 } Nested;
 
 // Returns the innermost/last value of a Nested pointer chain.
-int access_nested(const Nested *n_ptr) {
+EXPORT int access_nested(const Nested *n_ptr) {
   // Edge case: `n_ptr == NULL` (i.e. first Nested is None).
   if (!n_ptr) { return 0; }
 
@@ -42,6 +45,6 @@ typedef struct Static {
     struct Static *recurse;
 } Static;
 
-int access_static(const Static *s_ptr) {
+EXPORT int access_static(const Static *s_ptr) {
   return s_ptr->recurse->recurse->value;
 }
diff --git a/src/tools/miri/tests/native-lib/scalar_arguments.c b/src/tools/miri/tests/native-lib/scalar_arguments.c
index 68714f1743b..6da730a4987 100644
--- a/src/tools/miri/tests/native-lib/scalar_arguments.c
+++ b/src/tools/miri/tests/native-lib/scalar_arguments.c
@@ -1,27 +1,35 @@
 #include <stdio.h>
 
-int add_one_int(int x) {
+// See comments in build_native_lib()
+#define EXPORT __attribute__((visibility("default")))
+
+EXPORT int add_one_int(int x) {
   return 2 + x;
 }
 
-void printer() {
+EXPORT void printer(void) {
   printf("printing from C\n");
 }
 
 // function with many arguments, to test functionality when some args are stored
 // on the stack
-int test_stack_spill(int a, int b, int c, int d, int e, int f, int g, int h, int i, int j, int k, int l) {
+EXPORT int test_stack_spill(int a, int b, int c, int d, int e, int f, int g, int h, int i, int j, int k, int l) {
   return a+b+c+d+e+f+g+h+i+j+k+l;
 }
 
-unsigned int get_unsigned_int() {
+EXPORT unsigned int get_unsigned_int(void) {
   return -10;
 }
 
-short add_int16(short x) {
+EXPORT short add_int16(short x) {
   return x + 3;
 }
 
-long add_short_to_long(short x, long y) {
+EXPORT long add_short_to_long(short x, long y) {
   return x + y;
 }
+
+// To test that functions not marked with EXPORT cannot be called by Miri.
+int not_exported(void) {
+  return 0;
+}
diff --git a/src/tools/miri/tests/ui.rs b/src/tools/miri/tests/ui.rs
index c510ef95c30..e62394bc499 100644
--- a/src/tools/miri/tests/ui.rs
+++ b/src/tools/miri/tests/ui.rs
@@ -36,20 +36,21 @@ fn build_native_lib() -> PathBuf {
     // Create the directory if it does not already exist.
     std::fs::create_dir_all(&so_target_dir)
         .expect("Failed to create directory for shared object file");
-    let so_file_path = so_target_dir.join("native-lib.so");
+    // We use a platform-neutral file extension to avoid having to hard-code alternatives.
+    let native_lib_path = so_target_dir.join("native-lib.module");
     let cc_output = Command::new(cc)
         .args([
             "-shared",
+            "-fPIC",
+            // We hide all symbols by default and export just the ones we need
+            // This is to future-proof against a more complex shared object which eg defines its own malloc
+            // (but we wouldn't want miri to call that, as it would if it was exported).
+            "-fvisibility=hidden",
             "-o",
-            so_file_path.to_str().unwrap(),
+            native_lib_path.to_str().unwrap(),
             // FIXME: Automate gathering of all relevant C source files in the directory.
             "tests/native-lib/scalar_arguments.c",
             "tests/native-lib/ptr_read_access.c",
-            // Only add the functions specified in libcode.version to the shared object file.
-            // This is to avoid automatically adding `malloc`, etc.
-            // Source: https://anadoxin.org/blog/control-over-symbol-exports-in-gcc.html/
-            "-fPIC",
-            "-Wl,--version-script=tests/native-lib/native-lib.map",
             // Ensure we notice serious problems in the C code.
             "-Wall",
             "-Wextra",
@@ -64,7 +65,7 @@ fn build_native_lib() -> PathBuf {
             String::from_utf8_lossy(&cc_output.stderr),
         );
     }
-    so_file_path
+    native_lib_path
 }
 
 /// Does *not* set any args or env vars, since it is shared between the test runner and
@@ -300,7 +301,7 @@ fn main() -> Result<()> {
         WithDependencies,
         tmpdir.path(),
     )?;
-    if cfg!(target_os = "linux") {
+    if cfg!(unix) {
         ui(Mode::Pass, "tests/native-lib/pass", &target, WithoutDependencies, tmpdir.path())?;
         ui(
             Mode::Fail { require_patterns: true, rustfix: RustfixMode::Disabled },