about summary refs log tree commit diff
path: root/compiler/rustc_data_structures/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_data_structures/src')
-rw-r--r--compiler/rustc_data_structures/src/marker.rs2
-rw-r--r--compiler/rustc_data_structures/src/sharded.rs25
-rw-r--r--compiler/rustc_data_structures/src/sync.rs316
-rw-r--r--compiler/rustc_data_structures/src/sync/lock.rs276
-rw-r--r--compiler/rustc_data_structures/src/sync/worker_local.rs6
5 files changed, 392 insertions, 233 deletions
diff --git a/compiler/rustc_data_structures/src/marker.rs b/compiler/rustc_data_structures/src/marker.rs
index f8c06f9a814..b067f9d4502 100644
--- a/compiler/rustc_data_structures/src/marker.rs
+++ b/compiler/rustc_data_structures/src/marker.rs
@@ -92,7 +92,6 @@ cfg_if!(
             [std::collections::BTreeMap<K, V, A> where K: DynSend, V: DynSend, A: std::alloc::Allocator + Clone + DynSend]
             [Vec<T, A> where T: DynSend, A: std::alloc::Allocator + DynSend]
             [Box<T, A> where T: ?Sized + DynSend, A: std::alloc::Allocator + DynSend]
-            [crate::sync::Lock<T> where T: DynSend]
             [crate::sync::RwLock<T> where T: DynSend]
             [crate::tagged_ptr::CopyTaggedPtr<P, T, CP> where P: Send + crate::tagged_ptr::Pointer, T: Send + crate::tagged_ptr::Tag, const CP: bool]
             [rustc_arena::TypedArena<T> where T: DynSend]
@@ -171,7 +170,6 @@ cfg_if!(
             [std::collections::BTreeMap<K, V, A> where K: DynSync, V: DynSync, A: std::alloc::Allocator + Clone + DynSync]
             [Vec<T, A> where T: DynSync, A: std::alloc::Allocator + DynSync]
             [Box<T, A> where T: ?Sized + DynSync, A: std::alloc::Allocator + DynSync]
-            [crate::sync::Lock<T> where T: DynSend]
             [crate::sync::RwLock<T> where T: DynSend + DynSync]
             [crate::sync::OneThread<T> where T]
             [crate::sync::WorkerLocal<T> where T: DynSend]
diff --git a/compiler/rustc_data_structures/src/sharded.rs b/compiler/rustc_data_structures/src/sharded.rs
index 52ab5a7fb14..0f769c1f3bf 100644
--- a/compiler/rustc_data_structures/src/sharded.rs
+++ b/compiler/rustc_data_structures/src/sharded.rs
@@ -2,9 +2,12 @@ use crate::fx::{FxHashMap, FxHasher};
 #[cfg(parallel_compiler)]
 use crate::sync::{is_dyn_thread_safe, CacheAligned};
 use crate::sync::{Lock, LockGuard};
+#[cfg(parallel_compiler)]
+use itertools::Either;
 use std::borrow::Borrow;
 use std::collections::hash_map::RawEntryMut;
 use std::hash::{Hash, Hasher};
+use std::iter;
 use std::mem;
 
 // 32 shards is sufficient to reduce contention on an 8-core Ryzen 7 1700,
@@ -70,19 +73,27 @@ impl<T> Sharded<T> {
         }
     }
 
-    pub fn lock_shards(&self) -> Vec<LockGuard<'_, T>> {
+    #[inline]
+    pub fn lock_shards(&self) -> impl Iterator<Item = LockGuard<'_, T>> {
         match self {
-            Self::Single(single) => vec![single.lock()],
+            #[cfg(not(parallel_compiler))]
+            Self::Single(single) => iter::once(single.lock()),
+            #[cfg(parallel_compiler)]
+            Self::Single(single) => Either::Left(iter::once(single.lock())),
             #[cfg(parallel_compiler)]
-            Self::Shards(shards) => shards.iter().map(|shard| shard.0.lock()).collect(),
+            Self::Shards(shards) => Either::Right(shards.iter().map(|shard| shard.0.lock())),
         }
     }
 
-    pub fn try_lock_shards(&self) -> Option<Vec<LockGuard<'_, T>>> {
+    #[inline]
+    pub fn try_lock_shards(&self) -> impl Iterator<Item = Option<LockGuard<'_, T>>> {
         match self {
-            Self::Single(single) => Some(vec![single.try_lock()?]),
+            #[cfg(not(parallel_compiler))]
+            Self::Single(single) => iter::once(single.try_lock()),
+            #[cfg(parallel_compiler)]
+            Self::Single(single) => Either::Left(iter::once(single.try_lock())),
             #[cfg(parallel_compiler)]
-            Self::Shards(shards) => shards.iter().map(|shard| shard.0.try_lock()).collect(),
+            Self::Shards(shards) => Either::Right(shards.iter().map(|shard| shard.0.try_lock())),
         }
     }
 }
@@ -101,7 +112,7 @@ pub type ShardedHashMap<K, V> = Sharded<FxHashMap<K, V>>;
 
 impl<K: Eq, V> ShardedHashMap<K, V> {
     pub fn len(&self) -> usize {
-        self.lock_shards().iter().map(|shard| shard.len()).sum()
+        self.lock_shards().map(|shard| shard.len()).sum()
     }
 }
 
diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs
index 25a08237346..2ec509b9118 100644
--- a/compiler/rustc_data_structures/src/sync.rs
+++ b/compiler/rustc_data_structures/src/sync.rs
@@ -26,7 +26,8 @@
 //! | `AtomicU64`             | `Cell<u64>`         | `atomic::AtomicU64`             |
 //! | `AtomicUsize`           | `Cell<usize>`       | `atomic::AtomicUsize`           |
 //! |                         |                     |                                 |
-//! | `Lock<T>`               | `RefCell<T>`        | `parking_lot::Mutex<T>`         |
+//! | `Lock<T>`               | `RefCell<T>`        | `RefCell<T>` or                 |
+//! |                         |                     | `parking_lot::Mutex<T>`         |
 //! | `RwLock<T>`             | `RefCell<T>`        | `parking_lot::RwLock<T>`        |
 //! | `MTLock<T>`        [^1] | `T`                 | `Lock<T>`                       |
 //! | `MTLockRef<'a, T>` [^2] | `&'a mut MTLock<T>` | `&'a MTLock<T>`                 |
@@ -40,11 +41,16 @@
 //! [^2] `MTLockRef` is a typedef.
 
 pub use crate::marker::*;
+use parking_lot::Mutex;
+use std::any::Any;
 use std::collections::HashMap;
 use std::hash::{BuildHasher, Hash};
 use std::ops::{Deref, DerefMut};
 use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe};
 
+mod lock;
+pub use lock::{Lock, LockGuard};
+
 mod worker_local;
 pub use worker_local::{Registry, WorkerLocal};
 
@@ -75,6 +81,13 @@ mod mode {
         }
     }
 
+    // Whether thread safety might be enabled.
+    #[inline]
+    #[cfg(parallel_compiler)]
+    pub fn might_be_dyn_thread_safe() -> bool {
+        DYN_THREAD_SAFE_MODE.load(Ordering::Relaxed) != DYN_NOT_THREAD_SAFE
+    }
+
     // Only set by the `-Z threads` compile option
     pub fn set_dyn_thread_safe_mode(mode: bool) {
         let set: u8 = if mode { DYN_THREAD_SAFE } else { DYN_NOT_THREAD_SAFE };
@@ -92,16 +105,48 @@ mod mode {
 
 pub use mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode};
 
+/// A guard used to hold panics that occur during a parallel section to later by unwound.
+/// This is used for the parallel compiler to prevent fatal errors from non-deterministically
+/// hiding errors by ensuring that everything in the section has completed executing before
+/// continuing with unwinding. It's also used for the non-parallel code to ensure error message
+/// output match the parallel compiler for testing purposes.
+pub struct ParallelGuard {
+    panic: Mutex<Option<Box<dyn Any + std::marker::Send + 'static>>>,
+}
+
+impl ParallelGuard {
+    pub fn run<R>(&self, f: impl FnOnce() -> R) -> Option<R> {
+        catch_unwind(AssertUnwindSafe(f))
+            .map_err(|err| {
+                *self.panic.lock() = Some(err);
+            })
+            .ok()
+    }
+}
+
+/// This gives access to a fresh parallel guard in the closure and will unwind any panics
+/// caught in it after the closure returns.
+#[inline]
+pub fn parallel_guard<R>(f: impl FnOnce(&ParallelGuard) -> R) -> R {
+    let guard = ParallelGuard { panic: Mutex::new(None) };
+    let ret = f(&guard);
+    if let Some(panic) = guard.panic.into_inner() {
+        resume_unwind(panic);
+    }
+    ret
+}
+
 cfg_if! {
     if #[cfg(not(parallel_compiler))] {
+        use std::ops::Add;
+        use std::cell::Cell;
+
         pub unsafe auto trait Send {}
         pub unsafe auto trait Sync {}
 
         unsafe impl<T> Send for T {}
         unsafe impl<T> Sync for T {}
 
-        use std::ops::Add;
-
         /// This is a single threaded variant of `AtomicU64`, `AtomicUsize`, etc.
         /// It has explicit ordering arguments and is only intended for use with
         /// the native atomic types.
@@ -186,67 +231,38 @@ cfg_if! {
             where A: FnOnce() -> RA,
                   B: FnOnce() -> RB
         {
-            (oper_a(), oper_b())
+            let (a, b) = parallel_guard(|guard| {
+                let a = guard.run(oper_a);
+                let b = guard.run(oper_b);
+                (a, b)
+            });
+            (a.unwrap(), b.unwrap())
         }
 
         #[macro_export]
         macro_rules! parallel {
-            ($($blocks:block),*) => {
-                // We catch panics here ensuring that all the blocks execute.
-                // This makes behavior consistent with the parallel compiler.
-                let mut panic = None;
-                $(
-                    if let Err(p) = ::std::panic::catch_unwind(
-                        ::std::panic::AssertUnwindSafe(|| $blocks)
-                    ) {
-                        if panic.is_none() {
-                            panic = Some(p);
-                        }
-                    }
-                )*
-                if let Some(panic) = panic {
-                    ::std::panic::resume_unwind(panic);
-                }
-            }
+            ($($blocks:block),*) => {{
+                $crate::sync::parallel_guard(|guard| {
+                    $(guard.run(|| $blocks);)*
+                });
+            }}
         }
 
         pub fn par_for_each_in<T: IntoIterator>(t: T, mut for_each: impl FnMut(T::Item) + Sync + Send) {
-            // We catch panics here ensuring that all the loop iterations execute.
-            // This makes behavior consistent with the parallel compiler.
-            let mut panic = None;
-            t.into_iter().for_each(|i| {
-                if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) {
-                    if panic.is_none() {
-                        panic = Some(p);
-                    }
-                }
-            });
-            if let Some(panic) = panic {
-                resume_unwind(panic);
-            }
+            parallel_guard(|guard| {
+                t.into_iter().for_each(|i| {
+                    guard.run(|| for_each(i));
+                });
+            })
         }
 
         pub fn par_map<T: IntoIterator, R, C: FromIterator<R>>(
             t: T,
             mut map: impl FnMut(<<T as IntoIterator>::IntoIter as Iterator>::Item) -> R,
         ) -> C {
-            // We catch panics here ensuring that all the loop iterations execute.
-            let mut panic = None;
-            let r = t.into_iter().filter_map(|i| {
-                match catch_unwind(AssertUnwindSafe(|| map(i))) {
-                    Ok(r) => Some(r),
-                    Err(p) => {
-                        if panic.is_none() {
-                            panic = Some(p);
-                        }
-                        None
-                    }
-                }
-            }).collect();
-            if let Some(panic) = panic {
-                resume_unwind(panic);
-            }
-            r
+            parallel_guard(|guard| {
+                t.into_iter().filter_map(|i| guard.run(|| map(i))).collect()
+            })
         }
 
         pub use std::rc::Rc as Lrc;
@@ -255,15 +271,11 @@ cfg_if! {
         pub use std::cell::Ref as MappedReadGuard;
         pub use std::cell::RefMut as WriteGuard;
         pub use std::cell::RefMut as MappedWriteGuard;
-        pub use std::cell::RefMut as LockGuard;
         pub use std::cell::RefMut as MappedLockGuard;
 
         pub use std::cell::OnceCell;
 
         use std::cell::RefCell as InnerRwLock;
-        use std::cell::RefCell as InnerLock;
-
-        use std::cell::Cell;
 
         pub type MTLockRef<'a, T> = &'a mut MTLock<T>;
 
@@ -313,7 +325,6 @@ cfg_if! {
         pub use parking_lot::RwLockWriteGuard as WriteGuard;
         pub use parking_lot::MappedRwLockWriteGuard as MappedWriteGuard;
 
-        pub use parking_lot::MutexGuard as LockGuard;
         pub use parking_lot::MappedMutexGuard as MappedLockGuard;
 
         pub use std::sync::OnceLock as OnceCell;
@@ -355,7 +366,6 @@ cfg_if! {
             }
         }
 
-        use parking_lot::Mutex as InnerLock;
         use parking_lot::RwLock as InnerRwLock;
 
         use std::thread;
@@ -372,7 +382,12 @@ cfg_if! {
                 let (a, b) = rayon::join(move || FromDyn::from(oper_a.into_inner()()), move || FromDyn::from(oper_b.into_inner()()));
                 (a.into_inner(), b.into_inner())
             } else {
-                (oper_a(), oper_b())
+                let (a, b) = parallel_guard(|guard| {
+                    let a = guard.run(oper_a);
+                    let b = guard.run(oper_b);
+                    (a, b)
+                });
+                (a.unwrap(), b.unwrap())
             }
         }
 
@@ -407,28 +422,10 @@ cfg_if! {
                     // of a single threaded rustc.
                     parallel!(impl $fblock [] [$($blocks),*]);
                 } else {
-                    // We catch panics here ensuring that all the blocks execute.
-                    // This makes behavior consistent with the parallel compiler.
-                    let mut panic = None;
-                    if let Err(p) = ::std::panic::catch_unwind(
-                        ::std::panic::AssertUnwindSafe(|| $fblock)
-                    ) {
-                        if panic.is_none() {
-                            panic = Some(p);
-                        }
-                    }
-                    $(
-                        if let Err(p) = ::std::panic::catch_unwind(
-                            ::std::panic::AssertUnwindSafe(|| $blocks)
-                        ) {
-                            if panic.is_none() {
-                                panic = Some(p);
-                            }
-                        }
-                    )*
-                    if let Some(panic) = panic {
-                        ::std::panic::resume_unwind(panic);
-                    }
+                    $crate::sync::parallel_guard(|guard| {
+                        guard.run(|| $fblock);
+                        $(guard.run(|| $blocks);)*
+                    });
                 }
             };
         }
@@ -439,34 +436,18 @@ cfg_if! {
             t: T,
             for_each: impl Fn(I) + DynSync + DynSend
         ) {
-            if mode::is_dyn_thread_safe() {
-                let for_each = FromDyn::from(for_each);
-                let panic: Lock<Option<_>> = Lock::new(None);
-                t.into_par_iter().for_each(|i| if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) {
-                    let mut l = panic.lock();
-                    if l.is_none() {
-                        *l = Some(p)
-                    }
-                });
-
-                if let Some(panic) = panic.into_inner() {
-                    resume_unwind(panic);
-                }
-            } else {
-                // We catch panics here ensuring that all the loop iterations execute.
-                // This makes behavior consistent with the parallel compiler.
-                let mut panic = None;
-                t.into_iter().for_each(|i| {
-                    if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) {
-                        if panic.is_none() {
-                            panic = Some(p);
-                        }
-                    }
-                });
-                if let Some(panic) = panic {
-                    resume_unwind(panic);
+            parallel_guard(|guard| {
+                if mode::is_dyn_thread_safe() {
+                    let for_each = FromDyn::from(for_each);
+                    t.into_par_iter().for_each(|i| {
+                        guard.run(|| for_each(i));
+                    });
+                } else {
+                    t.into_iter().for_each(|i| {
+                        guard.run(|| for_each(i));
+                    });
                 }
-            }
+            });
         }
 
         pub fn par_map<
@@ -478,46 +459,14 @@ cfg_if! {
             t: T,
             map: impl Fn(I) -> R + DynSync + DynSend
         ) -> C {
-            if mode::is_dyn_thread_safe() {
-                let panic: Lock<Option<_>> = Lock::new(None);
-                let map = FromDyn::from(map);
-                // We catch panics here ensuring that all the loop iterations execute.
-                let r = t.into_par_iter().filter_map(|i| {
-                    match catch_unwind(AssertUnwindSafe(|| map(i))) {
-                        Ok(r) => Some(r),
-                        Err(p) => {
-                            let mut l = panic.lock();
-                            if l.is_none() {
-                                *l = Some(p);
-                            }
-                            None
-                        },
-                    }
-                }).collect();
-
-                if let Some(panic) = panic.into_inner() {
-                    resume_unwind(panic);
-                }
-                r
-            } else {
-                // We catch panics here ensuring that all the loop iterations execute.
-                let mut panic = None;
-                let r = t.into_iter().filter_map(|i| {
-                    match catch_unwind(AssertUnwindSafe(|| map(i))) {
-                        Ok(r) => Some(r),
-                        Err(p) => {
-                            if panic.is_none() {
-                                panic = Some(p);
-                            }
-                            None
-                        }
-                    }
-                }).collect();
-                if let Some(panic) = panic {
-                    resume_unwind(panic);
+            parallel_guard(|guard| {
+                if mode::is_dyn_thread_safe() {
+                    let map = FromDyn::from(map);
+                    t.into_par_iter().filter_map(|i| guard.run(|| map(i))).collect()
+                } else {
+                    t.into_iter().filter_map(|i| guard.run(|| map(i))).collect()
                 }
-                r
-            }
+            })
         }
 
         /// This makes locks panic if they are already held.
@@ -542,81 +491,6 @@ impl<K: Eq + Hash, V: Eq, S: BuildHasher> HashMapExt<K, V> for HashMap<K, V, S>
     }
 }
 
-#[derive(Debug)]
-pub struct Lock<T>(InnerLock<T>);
-
-impl<T> Lock<T> {
-    #[inline(always)]
-    pub fn new(inner: T) -> Self {
-        Lock(InnerLock::new(inner))
-    }
-
-    #[inline(always)]
-    pub fn into_inner(self) -> T {
-        self.0.into_inner()
-    }
-
-    #[inline(always)]
-    pub fn get_mut(&mut self) -> &mut T {
-        self.0.get_mut()
-    }
-
-    #[cfg(parallel_compiler)]
-    #[inline(always)]
-    pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
-        self.0.try_lock()
-    }
-
-    #[cfg(not(parallel_compiler))]
-    #[inline(always)]
-    pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
-        self.0.try_borrow_mut().ok()
-    }
-
-    #[cfg(parallel_compiler)]
-    #[inline(always)]
-    #[track_caller]
-    pub fn lock(&self) -> LockGuard<'_, T> {
-        if ERROR_CHECKING {
-            self.0.try_lock().expect("lock was already held")
-        } else {
-            self.0.lock()
-        }
-    }
-
-    #[cfg(not(parallel_compiler))]
-    #[inline(always)]
-    #[track_caller]
-    pub fn lock(&self) -> LockGuard<'_, T> {
-        self.0.borrow_mut()
-    }
-
-    #[inline(always)]
-    #[track_caller]
-    pub fn with_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
-        f(&mut *self.lock())
-    }
-
-    #[inline(always)]
-    #[track_caller]
-    pub fn borrow(&self) -> LockGuard<'_, T> {
-        self.lock()
-    }
-
-    #[inline(always)]
-    #[track_caller]
-    pub fn borrow_mut(&self) -> LockGuard<'_, T> {
-        self.lock()
-    }
-}
-
-impl<T: Default> Default for Lock<T> {
-    #[inline]
-    fn default() -> Self {
-        Lock::new(T::default())
-    }
-}
-
 #[derive(Debug, Default)]
 pub struct RwLock<T>(InnerRwLock<T>);
 
diff --git a/compiler/rustc_data_structures/src/sync/lock.rs b/compiler/rustc_data_structures/src/sync/lock.rs
new file mode 100644
index 00000000000..62cd1b993de
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sync/lock.rs
@@ -0,0 +1,276 @@
+//! This module implements a lock which only uses synchronization if `might_be_dyn_thread_safe` is true.
+//! It implements `DynSend` and `DynSync` instead of the typical `Send` and `Sync` traits.
+//!
+//! When `cfg(parallel_compiler)` is not set, the lock is instead a wrapper around `RefCell`.
+
+#[cfg(not(parallel_compiler))]
+use std::cell::RefCell;
+#[cfg(parallel_compiler)]
+use {
+    crate::cold_path,
+    crate::sync::DynSend,
+    crate::sync::DynSync,
+    parking_lot::lock_api::RawMutex,
+    std::cell::Cell,
+    std::cell::UnsafeCell,
+    std::fmt,
+    std::intrinsics::{likely, unlikely},
+    std::marker::PhantomData,
+    std::mem::ManuallyDrop,
+    std::ops::{Deref, DerefMut},
+};
+
+#[cfg(not(parallel_compiler))]
+pub use std::cell::RefMut as LockGuard;
+
+#[cfg(not(parallel_compiler))]
+#[derive(Debug)]
+pub struct Lock<T>(RefCell<T>);
+
+#[cfg(not(parallel_compiler))]
+impl<T> Lock<T> {
+    #[inline(always)]
+    pub fn new(inner: T) -> Self {
+        Lock(RefCell::new(inner))
+    }
+
+    #[inline(always)]
+    pub fn into_inner(self) -> T {
+        self.0.into_inner()
+    }
+
+    #[inline(always)]
+    pub fn get_mut(&mut self) -> &mut T {
+        self.0.get_mut()
+    }
+
+    #[inline(always)]
+    pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
+        self.0.try_borrow_mut().ok()
+    }
+
+    #[inline(always)]
+    #[track_caller]
+    pub fn lock(&self) -> LockGuard<'_, T> {
+        self.0.borrow_mut()
+    }
+}
+
+/// A guard holding mutable access to a `Lock` which is in a locked state.
+#[cfg(parallel_compiler)]
+#[must_use = "if unused the Lock will immediately unlock"]
+pub struct LockGuard<'a, T> {
+    lock: &'a Lock<T>,
+    marker: PhantomData<&'a mut T>,
+}
+
+#[cfg(parallel_compiler)]
+impl<'a, T: 'a> Deref for LockGuard<'a, T> {
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        // SAFETY: We have shared access to the mutable access owned by this type,
+        // so we can give out a shared reference.
+        unsafe { &*self.lock.data.get() }
+    }
+}
+
+#[cfg(parallel_compiler)]
+impl<'a, T: 'a> DerefMut for LockGuard<'a, T> {
+    #[inline]
+    fn deref_mut(&mut self) -> &mut T {
+        // SAFETY: We have mutable access to the data so we can give out a mutable reference.
+        unsafe { &mut *self.lock.data.get() }
+    }
+}
+
+#[cfg(parallel_compiler)]
+impl<'a, T: 'a> Drop for LockGuard<'a, T> {
+    #[inline]
+    fn drop(&mut self) {
+        // SAFETY: We know that the lock is in a locked
+        // state because it is a invariant of this type.
+        unsafe { self.lock.raw.unlock() };
+    }
+}
+
+#[cfg(parallel_compiler)]
+union LockRawUnion {
+    /// Indicates if the cell is locked. Only used if `LockRaw.sync` is false.
+    cell: ManuallyDrop<Cell<bool>>,
+
+    /// A lock implementation that's only used if `LockRaw.sync` is true.
+    lock: ManuallyDrop<parking_lot::RawMutex>,
+}
+
+/// A raw lock which only uses synchronization if `might_be_dyn_thread_safe` is true.
+/// It contains no associated data and is used in the implementation of `Lock` which does have such data.
+///
+/// A manual implementation of a tagged union is used with the `sync` field and the `LockRawUnion` instead
+/// of using enums as it results in better code generation.
+#[cfg(parallel_compiler)]
+struct LockRaw {
+    /// Indicates if synchronization is used via `opt.lock` if true,
+    /// or if a non-thread safe cell is used via `opt.cell`. This is set on initialization and never changed.
+    sync: bool,
+    opt: LockRawUnion,
+}
+
+#[cfg(parallel_compiler)]
+impl LockRaw {
+    fn new() -> Self {
+        if unlikely(super::mode::might_be_dyn_thread_safe()) {
+            // Create the lock with synchronization enabled using the `RawMutex` type.
+            LockRaw {
+                sync: true,
+                opt: LockRawUnion { lock: ManuallyDrop::new(parking_lot::RawMutex::INIT) },
+            }
+        } else {
+            // Create the lock with synchronization disabled.
+            LockRaw { sync: false, opt: LockRawUnion { cell: ManuallyDrop::new(Cell::new(false)) } }
+        }
+    }
+
+    #[inline(always)]
+    fn try_lock(&self) -> bool {
+        // SAFETY: This is safe since the union fields are used in accordance with `self.sync`.
+        unsafe {
+            if likely(!self.sync) {
+                if self.opt.cell.get() {
+                    false
+                } else {
+                    self.opt.cell.set(true);
+                    true
+                }
+            } else {
+                self.opt.lock.try_lock()
+            }
+        }
+    }
+
+    #[inline(always)]
+    fn lock(&self) {
+        if super::ERROR_CHECKING {
+            // We're in the debugging mode, so assert that the lock is not held so we
+            // get a panic instead of waiting for the lock.
+            assert_eq!(self.try_lock(), true, "lock must not be hold");
+        } else {
+            // SAFETY: This is safe since the union fields are used in accordance with `self.sync`.
+            unsafe {
+                if likely(!self.sync) {
+                    if unlikely(self.opt.cell.replace(true)) {
+                        cold_path(|| panic!("lock was already held"))
+                    }
+                } else {
+                    self.opt.lock.lock();
+                }
+            }
+        }
+    }
+
+    /// This unlocks the lock.
+    ///
+    /// Safety
+    /// This method may only be called if the lock is currently held.
+    #[inline(always)]
+    unsafe fn unlock(&self) {
+        // SAFETY: The union use is safe since the union fields are used in accordance with
+        // `self.sync` and the `unlock` method precondition is upheld by the caller.
+        unsafe {
+            if likely(!self.sync) {
+                debug_assert_eq!(self.opt.cell.get(), true);
+                self.opt.cell.set(false);
+            } else {
+                self.opt.lock.unlock();
+            }
+        }
+    }
+}
+
+/// A lock which only uses synchronization if `might_be_dyn_thread_safe` is true.
+/// It implements `DynSend` and `DynSync` instead of the typical `Send` and `Sync`.
+#[cfg(parallel_compiler)]
+pub struct Lock<T> {
+    raw: LockRaw,
+    data: UnsafeCell<T>,
+}
+
+#[cfg(parallel_compiler)]
+impl<T> Lock<T> {
+    #[inline(always)]
+    pub fn new(inner: T) -> Self {
+        Lock { raw: LockRaw::new(), data: UnsafeCell::new(inner) }
+    }
+
+    #[inline(always)]
+    pub fn into_inner(self) -> T {
+        self.data.into_inner()
+    }
+
+    #[inline(always)]
+    pub fn get_mut(&mut self) -> &mut T {
+        self.data.get_mut()
+    }
+
+    #[inline(always)]
+    pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
+        if self.raw.try_lock() { Some(LockGuard { lock: self, marker: PhantomData }) } else { None }
+    }
+
+    #[inline(always)]
+    pub fn lock(&self) -> LockGuard<'_, T> {
+        self.raw.lock();
+        LockGuard { lock: self, marker: PhantomData }
+    }
+}
+
+impl<T> Lock<T> {
+    #[inline(always)]
+    #[track_caller]
+    pub fn with_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
+        f(&mut *self.lock())
+    }
+
+    #[inline(always)]
+    #[track_caller]
+    pub fn borrow(&self) -> LockGuard<'_, T> {
+        self.lock()
+    }
+
+    #[inline(always)]
+    #[track_caller]
+    pub fn borrow_mut(&self) -> LockGuard<'_, T> {
+        self.lock()
+    }
+}
+
+#[cfg(parallel_compiler)]
+unsafe impl<T: DynSend> DynSend for Lock<T> {}
+#[cfg(parallel_compiler)]
+unsafe impl<T: DynSend> DynSync for Lock<T> {}
+
+#[cfg(parallel_compiler)]
+impl<T: fmt::Debug> fmt::Debug for Lock<T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self.try_lock() {
+            Some(guard) => f.debug_struct("Lock").field("data", &&*guard).finish(),
+            None => {
+                struct LockedPlaceholder;
+                impl fmt::Debug for LockedPlaceholder {
+                    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+                        f.write_str("<locked>")
+                    }
+                }
+
+                f.debug_struct("Lock").field("data", &LockedPlaceholder).finish()
+            }
+        }
+    }
+}
+
+impl<T: Default> Default for Lock<T> {
+    #[inline]
+    fn default() -> Self {
+        Lock::new(T::default())
+    }
+}
diff --git a/compiler/rustc_data_structures/src/sync/worker_local.rs b/compiler/rustc_data_structures/src/sync/worker_local.rs
index 27d687c3cfe..1f838cc4648 100644
--- a/compiler/rustc_data_structures/src/sync/worker_local.rs
+++ b/compiler/rustc_data_structures/src/sync/worker_local.rs
@@ -1,4 +1,4 @@
-use crate::sync::Lock;
+use parking_lot::Mutex;
 use std::cell::Cell;
 use std::cell::OnceCell;
 use std::ops::Deref;
@@ -35,7 +35,7 @@ impl RegistryId {
 
 struct RegistryData {
     thread_limit: usize,
-    threads: Lock<usize>,
+    threads: Mutex<usize>,
 }
 
 /// Represents a list of threads which can access worker locals.
@@ -65,7 +65,7 @@ thread_local! {
 impl Registry {
     /// Creates a registry which can hold up to `thread_limit` threads.
     pub fn new(thread_limit: usize) -> Self {
-        Registry(Arc::new(RegistryData { thread_limit, threads: Lock::new(0) }))
+        Registry(Arc::new(RegistryData { thread_limit, threads: Mutex::new(0) }))
     }
 
     /// Gets the registry associated with the current thread. Panics if there's no such registry.