about summary refs log tree commit diff
path: root/compiler/rustc_data_structures
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_data_structures')
-rw-r--r--compiler/rustc_data_structures/Cargo.toml9
-rw-r--r--compiler/rustc_data_structures/src/lib.rs1
-rw-r--r--compiler/rustc_data_structures/src/marker.rs341
-rw-r--r--compiler/rustc_data_structures/src/owned_slice.rs2
-rw-r--r--compiler/rustc_data_structures/src/sharded.rs21
-rw-r--r--compiler/rustc_data_structures/src/sync.rs282
-rw-r--r--compiler/rustc_data_structures/src/sync/freeze.rs5
-rw-r--r--compiler/rustc_data_structures/src/sync/lock.rs309
-rw-r--r--compiler/rustc_data_structures/src/sync/parallel.rs236
-rw-r--r--compiler/rustc_data_structures/src/sync/vec.rs21
-rw-r--r--compiler/rustc_data_structures/src/sync/worker_local.rs44
11 files changed, 430 insertions, 841 deletions
diff --git a/compiler/rustc_data_structures/Cargo.toml b/compiler/rustc_data_structures/Cargo.toml
index 5a477143a62..c8ecddb046c 100644
--- a/compiler/rustc_data_structures/Cargo.toml
+++ b/compiler/rustc_data_structures/Cargo.toml
@@ -10,11 +10,11 @@ bitflags = "2.4.1"
 either = "1.0"
 elsa = "=1.7.1"
 ena = "0.14.3"
-indexmap = { version = "2.4.0" }
+indexmap = { version = "2.4.0", features = ["rustc-rayon"] }
 jobserver_crate = { version = "0.1.28", package = "jobserver" }
 measureme = "11"
 rustc-hash = "2.0.0"
-rustc-rayon = { version = "0.5.0", optional = true }
+rustc-rayon = "0.5.0"
 rustc-stable-hash = { version = "0.1.0", features = ["nightly"] }
 rustc_arena = { path = "../rustc_arena" }
 rustc_graphviz = { path = "../rustc_graphviz" }
@@ -53,8 +53,3 @@ memmap2 = "0.2.1"
 
 [target.'cfg(not(target_has_atomic = "64"))'.dependencies]
 portable-atomic = "1.5.1"
-
-[features]
-# tidy-alphabetical-start
-rustc_use_parallel_compiler = ["indexmap/rustc-rayon", "dep:rustc-rayon"]
-# tidy-alphabetical-end
diff --git a/compiler/rustc_data_structures/src/lib.rs b/compiler/rustc_data_structures/src/lib.rs
index afac08ae6f8..bede4c49703 100644
--- a/compiler/rustc_data_structures/src/lib.rs
+++ b/compiler/rustc_data_structures/src/lib.rs
@@ -10,7 +10,6 @@
 #![allow(internal_features)]
 #![allow(rustc::default_hash_types)]
 #![allow(rustc::potential_query_instability)]
-#![cfg_attr(not(parallel_compiler), feature(cell_leak))]
 #![deny(unsafe_op_in_unsafe_fn)]
 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
 #![doc(rust_logo)]
diff --git a/compiler/rustc_data_structures/src/marker.rs b/compiler/rustc_data_structures/src/marker.rs
index 83fdaff515b..2b629024bfe 100644
--- a/compiler/rustc_data_structures/src/marker.rs
+++ b/compiler/rustc_data_structures/src/marker.rs
@@ -1,194 +1,162 @@
-cfg_match! {
-    cfg(not(parallel_compiler)) => {
-        pub auto trait DynSend {}
-        pub auto trait DynSync {}
-
-        impl<T> DynSend for T {}
-        impl<T> DynSync for T {}
-    }
-    _ => {
-        #[rustc_on_unimplemented(
-            message = "`{Self}` doesn't implement `DynSend`. \
-            Add it to `rustc_data_structures::marker` or use `IntoDynSyncSend` if it's already `Send`"
-        )]
-        // This is an auto trait for types which can be sent across threads if `sync::is_dyn_thread_safe()`
-        // is true. These types can be wrapped in a `FromDyn` to get a `Send` type. Wrapping a
-        // `Send` type in `IntoDynSyncSend` will create a `DynSend` type.
-        pub unsafe auto trait DynSend {}
-
-        #[rustc_on_unimplemented(
-            message = "`{Self}` doesn't implement `DynSync`. \
-            Add it to `rustc_data_structures::marker` or use `IntoDynSyncSend` if it's already `Sync`"
-        )]
-        // This is an auto trait for types which can be shared across threads if `sync::is_dyn_thread_safe()`
-        // is true. These types can be wrapped in a `FromDyn` to get a `Sync` type. Wrapping a
-        // `Sync` type in `IntoDynSyncSend` will create a `DynSync` type.
-        pub unsafe auto trait DynSync {}
-
-        // Same with `Sync` and `Send`.
-        unsafe impl<T: DynSync + ?Sized> DynSend for &T {}
-
-        macro_rules! impls_dyn_send_neg {
-            ($([$t1: ty $(where $($generics1: tt)*)?])*) => {
-                $(impl$(<$($generics1)*>)? !DynSend for $t1 {})*
-            };
-        }
-
-        // Consistent with `std`
-        impls_dyn_send_neg!(
-            [std::env::Args]
-            [std::env::ArgsOs]
-            [*const T where T: ?Sized]
-            [*mut T where T: ?Sized]
-            [std::ptr::NonNull<T> where T: ?Sized]
-            [std::rc::Rc<T> where T: ?Sized]
-            [std::rc::Weak<T> where T: ?Sized]
-            [std::sync::MutexGuard<'_, T> where T: ?Sized]
-            [std::sync::RwLockReadGuard<'_, T> where T: ?Sized]
-            [std::sync::RwLockWriteGuard<'_, T> where T: ?Sized]
-            [std::io::StdoutLock<'_>]
-            [std::io::StderrLock<'_>]
-        );
-
-        #[cfg(any(unix, target_os = "hermit", target_os = "wasi", target_os = "solid_asp3"))]
-        // Consistent with `std`, `os_imp::Env` is `!Sync` in these platforms
-        impl !DynSend for std::env::VarsOs {}
-
-        macro_rules! already_send {
-            ($([$ty: ty])*) => {
-                $(unsafe impl DynSend for $ty where $ty: Send {})*
-            };
-        }
-
-        // These structures are already `Send`.
-        already_send!(
-            [std::backtrace::Backtrace]
-            [std::io::Stdout]
-            [std::io::Stderr]
-            [std::io::Error]
-            [std::fs::File]
-            [rustc_arena::DroplessArena]
-            [crate::memmap::Mmap]
-            [crate::profiling::SelfProfiler]
-            [crate::owned_slice::OwnedSlice]
-        );
-
-        macro_rules! impl_dyn_send {
-            ($($($attr: meta)* [$ty: ty where $($generics2: tt)*])*) => {
-                $(unsafe impl<$($generics2)*> DynSend for $ty {})*
-            };
-        }
-
-        impl_dyn_send!(
-            [std::sync::atomic::AtomicPtr<T> where T]
-            [std::sync::Mutex<T> where T: ?Sized+ DynSend]
-            [std::sync::mpsc::Sender<T> where T: DynSend]
-            [std::sync::Arc<T> where T: ?Sized + DynSync + DynSend]
-            [std::sync::LazyLock<T, F> where T: DynSend, F: DynSend]
-            [std::collections::HashSet<K, S> where K: DynSend, S: DynSend]
-            [std::collections::HashMap<K, V, S> where K: DynSend, V: DynSend, S: DynSend]
-            [std::collections::BTreeMap<K, V, A> where K: DynSend, V: DynSend, A: std::alloc::Allocator + Clone + DynSend]
-            [Vec<T, A> where T: DynSend, A: std::alloc::Allocator + DynSend]
-            [Box<T, A> where T: ?Sized + DynSend, A: std::alloc::Allocator + DynSend]
-            [crate::sync::RwLock<T> where T: DynSend]
-            [crate::tagged_ptr::CopyTaggedPtr<P, T, CP> where P: Send + crate::tagged_ptr::Pointer, T: Send + crate::tagged_ptr::Tag, const CP: bool]
-            [rustc_arena::TypedArena<T> where T: DynSend]
-            [indexmap::IndexSet<V, S> where V: DynSend, S: DynSend]
-            [indexmap::IndexMap<K, V, S> where K: DynSend, V: DynSend, S: DynSend]
-            [thin_vec::ThinVec<T> where T: DynSend]
-            [smallvec::SmallVec<A> where A: smallvec::Array + DynSend]
-        );
-
-        macro_rules! impls_dyn_sync_neg {
-            ($([$t1: ty $(where $($generics1: tt)*)?])*) => {
-                $(impl$(<$($generics1)*>)? !DynSync for $t1 {})*
-            };
-        }
-
-        // Consistent with `std`
-        impls_dyn_sync_neg!(
-            [std::env::Args]
-            [std::env::ArgsOs]
-            [*const T where T: ?Sized]
-            [*mut T where T: ?Sized]
-            [std::cell::Cell<T> where T: ?Sized]
-            [std::cell::RefCell<T> where T: ?Sized]
-            [std::cell::UnsafeCell<T> where T: ?Sized]
-            [std::ptr::NonNull<T> where T: ?Sized]
-            [std::rc::Rc<T> where T: ?Sized]
-            [std::rc::Weak<T> where T: ?Sized]
-            [std::cell::OnceCell<T> where T]
-            [std::sync::mpsc::Receiver<T> where T]
-            [std::sync::mpsc::Sender<T> where T]
-        );
-
-        #[cfg(any(unix, target_os = "hermit", target_os = "wasi", target_os = "solid_asp3"))]
-        // Consistent with `std`, `os_imp::Env` is `!Sync` in these platforms
-        impl !DynSync for std::env::VarsOs {}
-
-        macro_rules! already_sync {
-            ($([$ty: ty])*) => {
-                $(unsafe impl DynSync for $ty where $ty: Sync {})*
-            };
-        }
+#[rustc_on_unimplemented(message = "`{Self}` doesn't implement `DynSend`. \
+            Add it to `rustc_data_structures::marker` or use `IntoDynSyncSend` if it's already `Send`")]
+// This is an auto trait for types which can be sent across threads if `sync::is_dyn_thread_safe()`
+// is true. These types can be wrapped in a `FromDyn` to get a `Send` type. Wrapping a
+// `Send` type in `IntoDynSyncSend` will create a `DynSend` type.
+pub unsafe auto trait DynSend {}
+
+#[rustc_on_unimplemented(message = "`{Self}` doesn't implement `DynSync`. \
+            Add it to `rustc_data_structures::marker` or use `IntoDynSyncSend` if it's already `Sync`")]
+// This is an auto trait for types which can be shared across threads if `sync::is_dyn_thread_safe()`
+// is true. These types can be wrapped in a `FromDyn` to get a `Sync` type. Wrapping a
+// `Sync` type in `IntoDynSyncSend` will create a `DynSync` type.
+pub unsafe auto trait DynSync {}
+
+// Same with `Sync` and `Send`.
+unsafe impl<T: DynSync + ?Sized> DynSend for &T {}
+
+macro_rules! impls_dyn_send_neg {
+    ($([$t1: ty $(where $($generics1: tt)*)?])*) => {
+        $(impl$(<$($generics1)*>)? !DynSend for $t1 {})*
+    };
+}
 
-        // These structures are already `Sync`.
-        already_sync!(
-            [std::sync::atomic::AtomicBool]
-            [std::sync::atomic::AtomicUsize]
-            [std::sync::atomic::AtomicU8]
-            [std::sync::atomic::AtomicU32]
-            [std::backtrace::Backtrace]
-            [std::io::Error]
-            [std::fs::File]
-            [jobserver_crate::Client]
-            [crate::memmap::Mmap]
-            [crate::profiling::SelfProfiler]
-            [crate::owned_slice::OwnedSlice]
-        );
+// Consistent with `std`
+impls_dyn_send_neg!(
+    [std::env::Args]
+    [std::env::ArgsOs]
+    [*const T where T: ?Sized]
+    [*mut T where T: ?Sized]
+    [std::ptr::NonNull<T> where T: ?Sized]
+    [std::rc::Rc<T> where T: ?Sized]
+    [std::rc::Weak<T> where T: ?Sized]
+    [std::sync::MutexGuard<'_, T> where T: ?Sized]
+    [std::sync::RwLockReadGuard<'_, T> where T: ?Sized]
+    [std::sync::RwLockWriteGuard<'_, T> where T: ?Sized]
+    [std::io::StdoutLock<'_>]
+    [std::io::StderrLock<'_>]
+);
+
+#[cfg(any(unix, target_os = "hermit", target_os = "wasi", target_os = "solid_asp3"))]
+// Consistent with `std`, `os_imp::Env` is `!Sync` in these platforms
+impl !DynSend for std::env::VarsOs {}
+
+macro_rules! already_send {
+    ($([$ty: ty])*) => {
+        $(unsafe impl DynSend for $ty where $ty: Send {})*
+    };
+}
 
-        // Use portable AtomicU64 for targets without native 64-bit atomics
-        #[cfg(target_has_atomic = "64")]
-        already_sync!(
-            [std::sync::atomic::AtomicU64]
-        );
+// These structures are already `Send`.
+already_send!(
+    [std::backtrace::Backtrace][std::io::Stdout][std::io::Stderr][std::io::Error][std::fs::File]
+        [rustc_arena::DroplessArena][crate::memmap::Mmap][crate::profiling::SelfProfiler]
+        [crate::owned_slice::OwnedSlice]
+);
+
+macro_rules! impl_dyn_send {
+    ($($($attr: meta)* [$ty: ty where $($generics2: tt)*])*) => {
+        $(unsafe impl<$($generics2)*> DynSend for $ty {})*
+    };
+}
 
-        #[cfg(not(target_has_atomic = "64"))]
-        already_sync!(
-            [portable_atomic::AtomicU64]
-        );
+impl_dyn_send!(
+    [std::sync::atomic::AtomicPtr<T> where T]
+    [std::sync::Mutex<T> where T: ?Sized+ DynSend]
+    [std::sync::mpsc::Sender<T> where T: DynSend]
+    [std::sync::Arc<T> where T: ?Sized + DynSync + DynSend]
+    [std::sync::LazyLock<T, F> where T: DynSend, F: DynSend]
+    [std::collections::HashSet<K, S> where K: DynSend, S: DynSend]
+    [std::collections::HashMap<K, V, S> where K: DynSend, V: DynSend, S: DynSend]
+    [std::collections::BTreeMap<K, V, A> where K: DynSend, V: DynSend, A: std::alloc::Allocator + Clone + DynSend]
+    [Vec<T, A> where T: DynSend, A: std::alloc::Allocator + DynSend]
+    [Box<T, A> where T: ?Sized + DynSend, A: std::alloc::Allocator + DynSend]
+    [crate::sync::RwLock<T> where T: DynSend]
+    [crate::tagged_ptr::CopyTaggedPtr<P, T, CP> where P: Send + crate::tagged_ptr::Pointer, T: Send + crate::tagged_ptr::Tag, const CP: bool]
+    [rustc_arena::TypedArena<T> where T: DynSend]
+    [indexmap::IndexSet<V, S> where V: DynSend, S: DynSend]
+    [indexmap::IndexMap<K, V, S> where K: DynSend, V: DynSend, S: DynSend]
+    [thin_vec::ThinVec<T> where T: DynSend]
+    [smallvec::SmallVec<A> where A: smallvec::Array + DynSend]
+);
+
+macro_rules! impls_dyn_sync_neg {
+    ($([$t1: ty $(where $($generics1: tt)*)?])*) => {
+        $(impl$(<$($generics1)*>)? !DynSync for $t1 {})*
+    };
+}
 
-        macro_rules! impl_dyn_sync {
-            ($($($attr: meta)* [$ty: ty where $($generics2: tt)*])*) => {
-                $(unsafe impl<$($generics2)*> DynSync for $ty {})*
-            };
-        }
+// Consistent with `std`
+impls_dyn_sync_neg!(
+    [std::env::Args]
+    [std::env::ArgsOs]
+    [*const T where T: ?Sized]
+    [*mut T where T: ?Sized]
+    [std::cell::Cell<T> where T: ?Sized]
+    [std::cell::RefCell<T> where T: ?Sized]
+    [std::cell::UnsafeCell<T> where T: ?Sized]
+    [std::ptr::NonNull<T> where T: ?Sized]
+    [std::rc::Rc<T> where T: ?Sized]
+    [std::rc::Weak<T> where T: ?Sized]
+    [std::cell::OnceCell<T> where T]
+    [std::sync::mpsc::Receiver<T> where T]
+    [std::sync::mpsc::Sender<T> where T]
+);
+
+#[cfg(any(unix, target_os = "hermit", target_os = "wasi", target_os = "solid_asp3"))]
+// Consistent with `std`, `os_imp::Env` is `!Sync` in these platforms
+impl !DynSync for std::env::VarsOs {}
+
+macro_rules! already_sync {
+    ($([$ty: ty])*) => {
+        $(unsafe impl DynSync for $ty where $ty: Sync {})*
+    };
+}
 
-        impl_dyn_sync!(
-            [std::sync::atomic::AtomicPtr<T> where T]
-            [std::sync::OnceLock<T> where T: DynSend + DynSync]
-            [std::sync::Mutex<T> where T: ?Sized + DynSend]
-            [std::sync::Arc<T> where T: ?Sized + DynSync + DynSend]
-            [std::sync::LazyLock<T, F> where T: DynSend + DynSync, F: DynSend]
-            [std::collections::HashSet<K, S> where K: DynSync, S: DynSync]
-            [std::collections::HashMap<K, V, S> where K: DynSync, V: DynSync, S: DynSync]
-            [std::collections::BTreeMap<K, V, A> where K: DynSync, V: DynSync, A: std::alloc::Allocator + Clone + DynSync]
-            [Vec<T, A> where T: DynSync, A: std::alloc::Allocator + DynSync]
-            [Box<T, A> where T: ?Sized + DynSync, A: std::alloc::Allocator + DynSync]
-            [crate::sync::RwLock<T> where T: DynSend + DynSync]
-            [crate::sync::WorkerLocal<T> where T: DynSend]
-            [crate::intern::Interned<'a, T> where 'a, T: DynSync]
-            [crate::tagged_ptr::CopyTaggedPtr<P, T, CP> where P: Sync + crate::tagged_ptr::Pointer, T: Sync + crate::tagged_ptr::Tag, const CP: bool]
-            [parking_lot::lock_api::Mutex<R, T> where R: DynSync, T: ?Sized + DynSend]
-            [parking_lot::lock_api::RwLock<R, T> where R: DynSync, T: ?Sized + DynSend + DynSync]
-            [indexmap::IndexSet<V, S> where V: DynSync, S: DynSync]
-            [indexmap::IndexMap<K, V, S> where K: DynSync, V: DynSync, S: DynSync]
-            [smallvec::SmallVec<A> where A: smallvec::Array + DynSync]
-            [thin_vec::ThinVec<T> where T: DynSync]
-        );
-    }
+// These structures are already `Sync`.
+already_sync!(
+    [std::sync::atomic::AtomicBool][std::sync::atomic::AtomicUsize][std::sync::atomic::AtomicU8]
+        [std::sync::atomic::AtomicU32][std::backtrace::Backtrace][std::io::Error][std::fs::File]
+        [jobserver_crate::Client][crate::memmap::Mmap][crate::profiling::SelfProfiler]
+        [crate::owned_slice::OwnedSlice]
+);
+
+// Use portable AtomicU64 for targets without native 64-bit atomics
+#[cfg(target_has_atomic = "64")]
+already_sync!([std::sync::atomic::AtomicU64]);
+
+#[cfg(not(target_has_atomic = "64"))]
+already_sync!([portable_atomic::AtomicU64]);
+
+macro_rules! impl_dyn_sync {
+    ($($($attr: meta)* [$ty: ty where $($generics2: tt)*])*) => {
+        $(unsafe impl<$($generics2)*> DynSync for $ty {})*
+    };
 }
 
+impl_dyn_sync!(
+    [std::sync::atomic::AtomicPtr<T> where T]
+    [std::sync::OnceLock<T> where T: DynSend + DynSync]
+    [std::sync::Mutex<T> where T: ?Sized + DynSend]
+    [std::sync::Arc<T> where T: ?Sized + DynSync + DynSend]
+    [std::sync::LazyLock<T, F> where T: DynSend + DynSync, F: DynSend]
+    [std::collections::HashSet<K, S> where K: DynSync, S: DynSync]
+    [std::collections::HashMap<K, V, S> where K: DynSync, V: DynSync, S: DynSync]
+    [std::collections::BTreeMap<K, V, A> where K: DynSync, V: DynSync, A: std::alloc::Allocator + Clone + DynSync]
+    [Vec<T, A> where T: DynSync, A: std::alloc::Allocator + DynSync]
+    [Box<T, A> where T: ?Sized + DynSync, A: std::alloc::Allocator + DynSync]
+    [crate::sync::RwLock<T> where T: DynSend + DynSync]
+    [crate::sync::WorkerLocal<T> where T: DynSend]
+    [crate::intern::Interned<'a, T> where 'a, T: DynSync]
+    [crate::tagged_ptr::CopyTaggedPtr<P, T, CP> where P: Sync + crate::tagged_ptr::Pointer, T: Sync + crate::tagged_ptr::Tag, const CP: bool]
+    [parking_lot::lock_api::Mutex<R, T> where R: DynSync, T: ?Sized + DynSend]
+    [parking_lot::lock_api::RwLock<R, T> where R: DynSync, T: ?Sized + DynSend + DynSync]
+    [indexmap::IndexSet<V, S> where V: DynSync, S: DynSync]
+    [indexmap::IndexMap<K, V, S> where K: DynSync, V: DynSync, S: DynSync]
+    [smallvec::SmallVec<A> where A: smallvec::Array + DynSync]
+    [thin_vec::ThinVec<T> where T: DynSync]
+);
+
 pub fn assert_dyn_sync<T: ?Sized + DynSync>() {}
 pub fn assert_dyn_send<T: ?Sized + DynSend>() {}
 pub fn assert_dyn_send_val<T: ?Sized + DynSend>(_t: &T) {}
@@ -203,7 +171,6 @@ impl<T> FromDyn<T> {
         // Check that `sync::is_dyn_thread_safe()` is true on creation so we can
         // implement `Send` and `Sync` for this structure when `T`
         // implements `DynSend` and `DynSync` respectively.
-        #[cfg(parallel_compiler)]
         assert!(crate::sync::is_dyn_thread_safe());
         FromDyn(val)
     }
@@ -215,11 +182,9 @@ impl<T> FromDyn<T> {
 }
 
 // `FromDyn` is `Send` if `T` is `DynSend`, since it ensures that sync::is_dyn_thread_safe() is true.
-#[cfg(parallel_compiler)]
 unsafe impl<T: DynSend> Send for FromDyn<T> {}
 
 // `FromDyn` is `Sync` if `T` is `DynSync`, since it ensures that sync::is_dyn_thread_safe() is true.
-#[cfg(parallel_compiler)]
 unsafe impl<T: DynSync> Sync for FromDyn<T> {}
 
 impl<T> std::ops::Deref for FromDyn<T> {
@@ -237,9 +202,7 @@ impl<T> std::ops::Deref for FromDyn<T> {
 #[derive(Copy, Clone)]
 pub struct IntoDynSyncSend<T: ?Sized>(pub T);
 
-#[cfg(parallel_compiler)]
 unsafe impl<T: ?Sized + Send> DynSend for IntoDynSyncSend<T> {}
-#[cfg(parallel_compiler)]
 unsafe impl<T: ?Sized + Sync> DynSync for IntoDynSyncSend<T> {}
 
 impl<T> std::ops::Deref for IntoDynSyncSend<T> {
diff --git a/compiler/rustc_data_structures/src/owned_slice.rs b/compiler/rustc_data_structures/src/owned_slice.rs
index bbe6691e548..c8be0ab52e9 100644
--- a/compiler/rustc_data_structures/src/owned_slice.rs
+++ b/compiler/rustc_data_structures/src/owned_slice.rs
@@ -139,11 +139,9 @@ impl Borrow<[u8]> for OwnedSlice {
 }
 
 // Safety: `OwnedSlice` is conceptually `(&'self.1 [u8], Arc<dyn Send + Sync>)`, which is `Send`
-#[cfg(parallel_compiler)]
 unsafe impl sync::Send for OwnedSlice {}
 
 // Safety: `OwnedSlice` is conceptually `(&'self.1 [u8], Arc<dyn Send + Sync>)`, which is `Sync`
-#[cfg(parallel_compiler)]
 unsafe impl sync::Sync for OwnedSlice {}
 
 #[cfg(test)]
diff --git a/compiler/rustc_data_structures/src/sharded.rs b/compiler/rustc_data_structures/src/sharded.rs
index d0b6fe2bc6f..65488c73d3c 100644
--- a/compiler/rustc_data_structures/src/sharded.rs
+++ b/compiler/rustc_data_structures/src/sharded.rs
@@ -3,27 +3,22 @@ use std::collections::hash_map::RawEntryMut;
 use std::hash::{Hash, Hasher};
 use std::{iter, mem};
 
-#[cfg(parallel_compiler)]
 use either::Either;
 
 use crate::fx::{FxHashMap, FxHasher};
-#[cfg(parallel_compiler)]
-use crate::sync::{CacheAligned, is_dyn_thread_safe};
-use crate::sync::{Lock, LockGuard, Mode};
+use crate::sync::{CacheAligned, Lock, LockGuard, Mode, is_dyn_thread_safe};
 
 // 32 shards is sufficient to reduce contention on an 8-core Ryzen 7 1700,
 // but this should be tested on higher core count CPUs. How the `Sharded` type gets used
 // may also affect the ideal number of shards.
 const SHARD_BITS: usize = 5;
 
-#[cfg(parallel_compiler)]
 const SHARDS: usize = 1 << SHARD_BITS;
 
 /// An array of cache-line aligned inner locked structures with convenience methods.
 /// A single field is used when the compiler uses only one thread.
 pub enum Sharded<T> {
     Single(Lock<T>),
-    #[cfg(parallel_compiler)]
     Shards(Box<[CacheAligned<Lock<T>>; SHARDS]>),
 }
 
@@ -37,7 +32,6 @@ impl<T: Default> Default for Sharded<T> {
 impl<T> Sharded<T> {
     #[inline]
     pub fn new(mut value: impl FnMut() -> T) -> Self {
-        #[cfg(parallel_compiler)]
         if is_dyn_thread_safe() {
             return Sharded::Shards(Box::new(
                 [(); SHARDS].map(|()| CacheAligned(Lock::new(value()))),
@@ -52,7 +46,6 @@ impl<T> Sharded<T> {
     pub fn get_shard_by_value<K: Hash + ?Sized>(&self, _val: &K) -> &Lock<T> {
         match self {
             Self::Single(single) => single,
-            #[cfg(parallel_compiler)]
             Self::Shards(..) => self.get_shard_by_hash(make_hash(_val)),
         }
     }
@@ -66,7 +59,6 @@ impl<T> Sharded<T> {
     pub fn get_shard_by_index(&self, _i: usize) -> &Lock<T> {
         match self {
             Self::Single(single) => single,
-            #[cfg(parallel_compiler)]
             Self::Shards(shards) => {
                 // SAFETY: The index gets ANDed with the shard mask, ensuring it is always inbounds.
                 unsafe { &shards.get_unchecked(_i & (SHARDS - 1)).0 }
@@ -87,7 +79,6 @@ impl<T> Sharded<T> {
                 // `might_be_dyn_thread_safe` was also false.
                 unsafe { single.lock_assume(Mode::NoSync) }
             }
-            #[cfg(parallel_compiler)]
             Self::Shards(..) => self.lock_shard_by_hash(make_hash(_val)),
         }
     }
@@ -110,7 +101,6 @@ impl<T> Sharded<T> {
                 // `might_be_dyn_thread_safe` was also false.
                 unsafe { single.lock_assume(Mode::NoSync) }
             }
-            #[cfg(parallel_compiler)]
             Self::Shards(shards) => {
                 // Synchronization is enabled so use the `lock_assume_sync` method optimized
                 // for that case.
@@ -127,11 +117,7 @@ impl<T> Sharded<T> {
     #[inline]
     pub fn lock_shards(&self) -> impl Iterator<Item = LockGuard<'_, T>> {
         match self {
-            #[cfg(not(parallel_compiler))]
-            Self::Single(single) => iter::once(single.lock()),
-            #[cfg(parallel_compiler)]
             Self::Single(single) => Either::Left(iter::once(single.lock())),
-            #[cfg(parallel_compiler)]
             Self::Shards(shards) => Either::Right(shards.iter().map(|shard| shard.0.lock())),
         }
     }
@@ -139,11 +125,7 @@ impl<T> Sharded<T> {
     #[inline]
     pub fn try_lock_shards(&self) -> impl Iterator<Item = Option<LockGuard<'_, T>>> {
         match self {
-            #[cfg(not(parallel_compiler))]
-            Self::Single(single) => iter::once(single.try_lock()),
-            #[cfg(parallel_compiler)]
             Self::Single(single) => Either::Left(iter::once(single.try_lock())),
-            #[cfg(parallel_compiler)]
             Self::Shards(shards) => Either::Right(shards.iter().map(|shard| shard.0.try_lock())),
         }
     }
@@ -151,7 +133,6 @@ impl<T> Sharded<T> {
 
 #[inline]
 pub fn shards() -> usize {
-    #[cfg(parallel_compiler)]
     if is_dyn_thread_safe() {
         return SHARDS;
     }
diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs
index a3491dbfec7..7a9533031f4 100644
--- a/compiler/rustc_data_structures/src/sync.rs
+++ b/compiler/rustc_data_structures/src/sync.rs
@@ -54,9 +54,7 @@ mod worker_local;
 pub use worker_local::{Registry, WorkerLocal};
 
 mod parallel;
-#[cfg(parallel_compiler)]
-pub use parallel::scope;
-pub use parallel::{join, par_for_each_in, par_map, parallel_guard, try_par_for_each_in};
+pub use parallel::{join, par_for_each_in, par_map, parallel_guard, scope, try_par_for_each_in};
 pub use vec::{AppendOnlyIndexVec, AppendOnlyVec};
 
 mod vec;
@@ -104,226 +102,66 @@ mod mode {
     }
 }
 
-pub use mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode};
-
-cfg_match! {
-    cfg(not(parallel_compiler)) => {
-        use std::ops::Add;
-        use std::cell::Cell;
-        use std::sync::atomic::Ordering;
-
-        pub unsafe auto trait Send {}
-        pub unsafe auto trait Sync {}
-
-        unsafe impl<T> Send for T {}
-        unsafe impl<T> Sync for T {}
-
-        /// This is a single threaded variant of `AtomicU64`, `AtomicUsize`, etc.
-        /// It has explicit ordering arguments and is only intended for use with
-        /// the native atomic types.
-        /// You should use this type through the `AtomicU64`, `AtomicUsize`, etc, type aliases
-        /// as it's not intended to be used separately.
-        #[derive(Debug, Default)]
-        pub struct Atomic<T: Copy>(Cell<T>);
-
-        impl<T: Copy> Atomic<T> {
-            #[inline]
-            pub fn new(v: T) -> Self {
-                Atomic(Cell::new(v))
-            }
-
-            #[inline]
-            pub fn into_inner(self) -> T {
-                self.0.into_inner()
-            }
-
-            #[inline]
-            pub fn load(&self, _: Ordering) -> T {
-                self.0.get()
-            }
-
-            #[inline]
-            pub fn store(&self, val: T, _: Ordering) {
-                self.0.set(val)
-            }
-
-            #[inline]
-            pub fn swap(&self, val: T, _: Ordering) -> T {
-                self.0.replace(val)
-            }
-        }
+// FIXME(parallel_compiler): Get rid of these aliases across the compiler.
 
-        impl Atomic<bool> {
-            pub fn fetch_or(&self, val: bool, _: Ordering) -> bool {
-                let old = self.0.get();
-                self.0.set(val | old);
-                old
-            }
-            pub fn fetch_and(&self, val: bool, _: Ordering) -> bool {
-                let old = self.0.get();
-                self.0.set(val & old);
-                old
-            }
-        }
+pub use std::marker::{Send, Sync};
+// Use portable AtomicU64 for targets without native 64-bit atomics
+#[cfg(target_has_atomic = "64")]
+pub use std::sync::atomic::AtomicU64;
+pub use std::sync::atomic::{AtomicBool, AtomicU32, AtomicUsize};
+pub use std::sync::{Arc as Lrc, OnceLock, Weak};
 
-        impl<T: Copy + PartialEq> Atomic<T> {
-            #[inline]
-            pub fn compare_exchange(&self,
-                                    current: T,
-                                    new: T,
-                                    _: Ordering,
-                                    _: Ordering)
-                                    -> Result<T, T> {
-                let read = self.0.get();
-                if read == current {
-                    self.0.set(new);
-                    Ok(read)
-                } else {
-                    Err(read)
-                }
-            }
-        }
+pub use mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode};
+pub use parking_lot::{
+    MappedMutexGuard as MappedLockGuard, MappedRwLockReadGuard as MappedReadGuard,
+    MappedRwLockWriteGuard as MappedWriteGuard, RwLockReadGuard as ReadGuard,
+    RwLockWriteGuard as WriteGuard,
+};
+#[cfg(not(target_has_atomic = "64"))]
+pub use portable_atomic::AtomicU64;
 
-        impl<T: Add<Output=T> + Copy> Atomic<T> {
-            #[inline]
-            pub fn fetch_add(&self, val: T, _: Ordering) -> T {
-                let old = self.0.get();
-                self.0.set(old + val);
-                old
-            }
-        }
+pub type LRef<'a, T> = &'a T;
 
-        pub type AtomicUsize = Atomic<usize>;
-        pub type AtomicBool = Atomic<bool>;
-        pub type AtomicU32 = Atomic<u32>;
-        pub type AtomicU64 = Atomic<u64>;
-
-        pub use std::rc::Rc as Lrc;
-        pub use std::rc::Weak as Weak;
-        #[doc(no_inline)]
-        pub use std::cell::Ref as ReadGuard;
-        #[doc(no_inline)]
-        pub use std::cell::Ref as MappedReadGuard;
-        #[doc(no_inline)]
-        pub use std::cell::RefMut as WriteGuard;
-        #[doc(no_inline)]
-        pub use std::cell::RefMut as MappedWriteGuard;
-        #[doc(no_inline)]
-        pub use std::cell::RefMut as MappedLockGuard;
-
-        pub use std::cell::OnceCell as OnceLock;
-
-        use std::cell::RefCell as InnerRwLock;
-
-        pub type LRef<'a, T> = &'a mut T;
-
-        #[derive(Debug, Default)]
-        pub struct MTLock<T>(T);
-
-        impl<T> MTLock<T> {
-            #[inline(always)]
-            pub fn new(inner: T) -> Self {
-                MTLock(inner)
-            }
-
-            #[inline(always)]
-            pub fn into_inner(self) -> T {
-                self.0
-            }
-
-            #[inline(always)]
-            pub fn get_mut(&mut self) -> &mut T {
-                &mut self.0
-            }
-
-            #[inline(always)]
-            pub fn lock(&self) -> &T {
-                &self.0
-            }
-
-            #[inline(always)]
-            pub fn lock_mut(&mut self) -> &mut T {
-                &mut self.0
-            }
-        }
+#[derive(Debug, Default)]
+pub struct MTLock<T>(Lock<T>);
 
-        // FIXME: Probably a bad idea (in the threaded case)
-        impl<T: Clone> Clone for MTLock<T> {
-            #[inline]
-            fn clone(&self) -> Self {
-                MTLock(self.0.clone())
-            }
-        }
+impl<T> MTLock<T> {
+    #[inline(always)]
+    pub fn new(inner: T) -> Self {
+        MTLock(Lock::new(inner))
     }
-    _ => {
-        pub use std::marker::Send as Send;
-        pub use std::marker::Sync as Sync;
-
-        pub use parking_lot::RwLockReadGuard as ReadGuard;
-        pub use parking_lot::MappedRwLockReadGuard as MappedReadGuard;
-        pub use parking_lot::RwLockWriteGuard as WriteGuard;
-        pub use parking_lot::MappedRwLockWriteGuard as MappedWriteGuard;
-
-        pub use parking_lot::MappedMutexGuard as MappedLockGuard;
 
-        pub use std::sync::OnceLock;
-
-        pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU32};
-
-        // Use portable AtomicU64 for targets without native 64-bit atomics
-        #[cfg(target_has_atomic = "64")]
-        pub use std::sync::atomic::AtomicU64;
-
-        #[cfg(not(target_has_atomic = "64"))]
-        pub use portable_atomic::AtomicU64;
-
-        pub use std::sync::Arc as Lrc;
-        pub use std::sync::Weak as Weak;
-
-        pub type LRef<'a, T> = &'a T;
-
-        #[derive(Debug, Default)]
-        pub struct MTLock<T>(Lock<T>);
-
-        impl<T> MTLock<T> {
-            #[inline(always)]
-            pub fn new(inner: T) -> Self {
-                MTLock(Lock::new(inner))
-            }
-
-            #[inline(always)]
-            pub fn into_inner(self) -> T {
-                self.0.into_inner()
-            }
-
-            #[inline(always)]
-            pub fn get_mut(&mut self) -> &mut T {
-                self.0.get_mut()
-            }
-
-            #[inline(always)]
-            pub fn lock(&self) -> LockGuard<'_, T> {
-                self.0.lock()
-            }
+    #[inline(always)]
+    pub fn into_inner(self) -> T {
+        self.0.into_inner()
+    }
 
-            #[inline(always)]
-            pub fn lock_mut(&self) -> LockGuard<'_, T> {
-                self.lock()
-            }
-        }
+    #[inline(always)]
+    pub fn get_mut(&mut self) -> &mut T {
+        self.0.get_mut()
+    }
 
-        use parking_lot::RwLock as InnerRwLock;
+    #[inline(always)]
+    pub fn lock(&self) -> LockGuard<'_, T> {
+        self.0.lock()
+    }
 
-        /// This makes locks panic if they are already held.
-        /// It is only useful when you are running in a single thread
-        const ERROR_CHECKING: bool = false;
+    #[inline(always)]
+    pub fn lock_mut(&self) -> LockGuard<'_, T> {
+        self.lock()
     }
 }
 
+use parking_lot::RwLock as InnerRwLock;
+
+/// This makes locks panic if they are already held.
+/// It is only useful when you are running in a single thread
+const ERROR_CHECKING: bool = false;
+
 pub type MTLockRef<'a, T> = LRef<'a, MTLock<T>>;
 
 #[derive(Default)]
-#[cfg_attr(parallel_compiler, repr(align(64)))]
+#[repr(align(64))]
 pub struct CacheAligned<T>(pub T);
 
 pub trait HashMapExt<K, V> {
@@ -357,14 +195,6 @@ impl<T> RwLock<T> {
         self.0.get_mut()
     }
 
-    #[cfg(not(parallel_compiler))]
-    #[inline(always)]
-    #[track_caller]
-    pub fn read(&self) -> ReadGuard<'_, T> {
-        self.0.borrow()
-    }
-
-    #[cfg(parallel_compiler)]
     #[inline(always)]
     pub fn read(&self) -> ReadGuard<'_, T> {
         if ERROR_CHECKING {
@@ -380,26 +210,11 @@ impl<T> RwLock<T> {
         f(&*self.read())
     }
 
-    #[cfg(not(parallel_compiler))]
-    #[inline(always)]
-    pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
-        self.0.try_borrow_mut().map_err(|_| ())
-    }
-
-    #[cfg(parallel_compiler)]
     #[inline(always)]
     pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
         self.0.try_write().ok_or(())
     }
 
-    #[cfg(not(parallel_compiler))]
-    #[inline(always)]
-    #[track_caller]
-    pub fn write(&self) -> WriteGuard<'_, T> {
-        self.0.borrow_mut()
-    }
-
-    #[cfg(parallel_compiler)]
     #[inline(always)]
     pub fn write(&self) -> WriteGuard<'_, T> {
         if ERROR_CHECKING {
@@ -427,13 +242,6 @@ impl<T> RwLock<T> {
         self.write()
     }
 
-    #[cfg(not(parallel_compiler))]
-    #[inline(always)]
-    pub fn leak(&self) -> &T {
-        ReadGuard::leak(self.read())
-    }
-
-    #[cfg(parallel_compiler)]
     #[inline(always)]
     pub fn leak(&self) -> &T {
         let guard = self.read();
diff --git a/compiler/rustc_data_structures/src/sync/freeze.rs b/compiler/rustc_data_structures/src/sync/freeze.rs
index fad5f583d1c..5236c9fe156 100644
--- a/compiler/rustc_data_structures/src/sync/freeze.rs
+++ b/compiler/rustc_data_structures/src/sync/freeze.rs
@@ -5,9 +5,7 @@ use std::ops::{Deref, DerefMut};
 use std::ptr::NonNull;
 use std::sync::atomic::Ordering;
 
-use crate::sync::{AtomicBool, ReadGuard, RwLock, WriteGuard};
-#[cfg(parallel_compiler)]
-use crate::sync::{DynSend, DynSync};
+use crate::sync::{AtomicBool, DynSend, DynSync, ReadGuard, RwLock, WriteGuard};
 
 /// A type which allows mutation using a lock until
 /// the value is frozen and can be accessed lock-free.
@@ -22,7 +20,6 @@ pub struct FreezeLock<T> {
     lock: RwLock<()>,
 }
 
-#[cfg(parallel_compiler)]
 unsafe impl<T: DynSync + DynSend> DynSync for FreezeLock<T> {}
 
 impl<T> FreezeLock<T> {
diff --git a/compiler/rustc_data_structures/src/sync/lock.rs b/compiler/rustc_data_structures/src/sync/lock.rs
index 012ee7f900e..2ccf06ccd4f 100644
--- a/compiler/rustc_data_structures/src/sync/lock.rs
+++ b/compiler/rustc_data_structures/src/sync/lock.rs
@@ -1,236 +1,177 @@
 //! This module implements a lock which only uses synchronization if `might_be_dyn_thread_safe` is true.
 //! It implements `DynSend` and `DynSync` instead of the typical `Send` and `Sync` traits.
-//!
-//! When `cfg(parallel_compiler)` is not set, the lock is instead a wrapper around `RefCell`.
 
 #![allow(dead_code)]
 
 use std::fmt;
 
-#[cfg(parallel_compiler)]
-pub use maybe_sync::*;
-#[cfg(not(parallel_compiler))]
-pub use no_sync::*;
-
 #[derive(Clone, Copy, PartialEq)]
 pub enum Mode {
     NoSync,
     Sync,
 }
 
-mod maybe_sync {
-    use std::cell::{Cell, UnsafeCell};
-    use std::intrinsics::unlikely;
-    use std::marker::PhantomData;
-    use std::mem::ManuallyDrop;
-    use std::ops::{Deref, DerefMut};
+use std::cell::{Cell, UnsafeCell};
+use std::intrinsics::unlikely;
+use std::marker::PhantomData;
+use std::mem::ManuallyDrop;
+use std::ops::{Deref, DerefMut};
 
-    use parking_lot::RawMutex;
-    use parking_lot::lock_api::RawMutex as _;
+use parking_lot::RawMutex;
+use parking_lot::lock_api::RawMutex as _;
 
-    use super::Mode;
-    use crate::sync::mode;
-    #[cfg(parallel_compiler)]
-    use crate::sync::{DynSend, DynSync};
+use crate::sync::{DynSend, DynSync, mode};
 
-    /// A guard holding mutable access to a `Lock` which is in a locked state.
-    #[must_use = "if unused the Lock will immediately unlock"]
-    pub struct LockGuard<'a, T> {
-        lock: &'a Lock<T>,
-        marker: PhantomData<&'a mut T>,
+/// A guard holding mutable access to a `Lock` which is in a locked state.
+#[must_use = "if unused the Lock will immediately unlock"]
+pub struct LockGuard<'a, T> {
+    lock: &'a Lock<T>,
+    marker: PhantomData<&'a mut T>,
 
-        /// The synchronization mode of the lock. This is explicitly passed to let LLVM relate it
-        /// to the original lock operation.
-        mode: Mode,
-    }
+    /// The synchronization mode of the lock. This is explicitly passed to let LLVM relate it
+    /// to the original lock operation.
+    mode: Mode,
+}
 
-    impl<'a, T: 'a> Deref for LockGuard<'a, T> {
-        type Target = T;
-        #[inline]
-        fn deref(&self) -> &T {
-            // SAFETY: We have shared access to the mutable access owned by this type,
-            // so we can give out a shared reference.
-            unsafe { &*self.lock.data.get() }
-        }
+impl<'a, T: 'a> Deref for LockGuard<'a, T> {
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        // SAFETY: We have shared access to the mutable access owned by this type,
+        // so we can give out a shared reference.
+        unsafe { &*self.lock.data.get() }
     }
+}
 
-    impl<'a, T: 'a> DerefMut for LockGuard<'a, T> {
-        #[inline]
-        fn deref_mut(&mut self) -> &mut T {
-            // SAFETY: We have mutable access to the data so we can give out a mutable reference.
-            unsafe { &mut *self.lock.data.get() }
-        }
+impl<'a, T: 'a> DerefMut for LockGuard<'a, T> {
+    #[inline]
+    fn deref_mut(&mut self) -> &mut T {
+        // SAFETY: We have mutable access to the data so we can give out a mutable reference.
+        unsafe { &mut *self.lock.data.get() }
     }
+}
 
-    impl<'a, T: 'a> Drop for LockGuard<'a, T> {
-        #[inline]
-        fn drop(&mut self) {
-            // SAFETY (union access): We get `self.mode` from the lock operation so it is consistent
-            // with the `lock.mode` state. This means we access the right union fields.
-            match self.mode {
-                Mode::NoSync => {
-                    let cell = unsafe { &self.lock.mode_union.no_sync };
-                    debug_assert!(cell.get());
-                    cell.set(false);
-                }
-                // SAFETY (unlock): We know that the lock is locked as this type is a proof of that.
-                Mode::Sync => unsafe { self.lock.mode_union.sync.unlock() },
+impl<'a, T: 'a> Drop for LockGuard<'a, T> {
+    #[inline]
+    fn drop(&mut self) {
+        // SAFETY (union access): We get `self.mode` from the lock operation so it is consistent
+        // with the `lock.mode` state. This means we access the right union fields.
+        match self.mode {
+            Mode::NoSync => {
+                let cell = unsafe { &self.lock.mode_union.no_sync };
+                debug_assert!(cell.get());
+                cell.set(false);
             }
+            // SAFETY (unlock): We know that the lock is locked as this type is a proof of that.
+            Mode::Sync => unsafe { self.lock.mode_union.sync.unlock() },
         }
     }
+}
 
-    union ModeUnion {
-        /// Indicates if the cell is locked. Only used if `Lock.mode` is `NoSync`.
-        no_sync: ManuallyDrop<Cell<bool>>,
+union ModeUnion {
+    /// Indicates if the cell is locked. Only used if `Lock.mode` is `NoSync`.
+    no_sync: ManuallyDrop<Cell<bool>>,
 
-        /// A lock implementation that's only used if `Lock.mode` is `Sync`.
-        sync: ManuallyDrop<RawMutex>,
-    }
+    /// A lock implementation that's only used if `Lock.mode` is `Sync`.
+    sync: ManuallyDrop<RawMutex>,
+}
 
-    /// The value representing a locked state for the `Cell`.
-    const LOCKED: bool = true;
+/// The value representing a locked state for the `Cell`.
+const LOCKED: bool = true;
 
-    /// A lock which only uses synchronization if `might_be_dyn_thread_safe` is true.
-    /// It implements `DynSend` and `DynSync` instead of the typical `Send` and `Sync`.
-    pub struct Lock<T> {
-        /// Indicates if synchronization is used via `mode_union.sync` if it's `Sync`, or if a
-        /// not thread safe cell is used via `mode_union.no_sync` if it's `NoSync`.
-        /// This is set on initialization and never changed.
-        mode: Mode,
+/// A lock which only uses synchronization if `might_be_dyn_thread_safe` is true.
+/// It implements `DynSend` and `DynSync` instead of the typical `Send` and `Sync`.
+pub struct Lock<T> {
+    /// Indicates if synchronization is used via `mode_union.sync` if it's `Sync`, or if a
+    /// not thread safe cell is used via `mode_union.no_sync` if it's `NoSync`.
+    /// This is set on initialization and never changed.
+    mode: Mode,
 
-        mode_union: ModeUnion,
-        data: UnsafeCell<T>,
-    }
+    mode_union: ModeUnion,
+    data: UnsafeCell<T>,
+}
 
-    impl<T> Lock<T> {
-        #[inline(always)]
-        pub fn new(inner: T) -> Self {
-            let (mode, mode_union) = if unlikely(mode::might_be_dyn_thread_safe()) {
-                // Create the lock with synchronization enabled using the `RawMutex` type.
-                (Mode::Sync, ModeUnion { sync: ManuallyDrop::new(RawMutex::INIT) })
-            } else {
-                // Create the lock with synchronization disabled.
-                (Mode::NoSync, ModeUnion { no_sync: ManuallyDrop::new(Cell::new(!LOCKED)) })
-            };
-            Lock { mode, mode_union, data: UnsafeCell::new(inner) }
-        }
+impl<T> Lock<T> {
+    #[inline(always)]
+    pub fn new(inner: T) -> Self {
+        let (mode, mode_union) = if unlikely(mode::might_be_dyn_thread_safe()) {
+            // Create the lock with synchronization enabled using the `RawMutex` type.
+            (Mode::Sync, ModeUnion { sync: ManuallyDrop::new(RawMutex::INIT) })
+        } else {
+            // Create the lock with synchronization disabled.
+            (Mode::NoSync, ModeUnion { no_sync: ManuallyDrop::new(Cell::new(!LOCKED)) })
+        };
+        Lock { mode, mode_union, data: UnsafeCell::new(inner) }
+    }
 
-        #[inline(always)]
-        pub fn into_inner(self) -> T {
-            self.data.into_inner()
-        }
+    #[inline(always)]
+    pub fn into_inner(self) -> T {
+        self.data.into_inner()
+    }
 
-        #[inline(always)]
-        pub fn get_mut(&mut self) -> &mut T {
-            self.data.get_mut()
-        }
+    #[inline(always)]
+    pub fn get_mut(&mut self) -> &mut T {
+        self.data.get_mut()
+    }
 
-        #[inline(always)]
-        pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
-            let mode = self.mode;
-            // SAFETY: This is safe since the union fields are used in accordance with `self.mode`.
-            match mode {
-                Mode::NoSync => {
-                    let cell = unsafe { &self.mode_union.no_sync };
-                    let was_unlocked = cell.get() != LOCKED;
-                    if was_unlocked {
-                        cell.set(LOCKED);
-                    }
-                    was_unlocked
+    #[inline(always)]
+    pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
+        let mode = self.mode;
+        // SAFETY: This is safe since the union fields are used in accordance with `self.mode`.
+        match mode {
+            Mode::NoSync => {
+                let cell = unsafe { &self.mode_union.no_sync };
+                let was_unlocked = cell.get() != LOCKED;
+                if was_unlocked {
+                    cell.set(LOCKED);
                 }
-                Mode::Sync => unsafe { self.mode_union.sync.try_lock() },
+                was_unlocked
             }
-            .then(|| LockGuard { lock: self, marker: PhantomData, mode })
+            Mode::Sync => unsafe { self.mode_union.sync.try_lock() },
         }
+        .then(|| LockGuard { lock: self, marker: PhantomData, mode })
+    }
 
-        /// This acquires the lock assuming synchronization is in a specific mode.
-        ///
-        /// Safety
-        /// This method must only be called with `Mode::Sync` if `might_be_dyn_thread_safe` was
-        /// true on lock creation.
-        #[inline(always)]
+    /// This acquires the lock assuming synchronization is in a specific mode.
+    ///
+    /// Safety
+    /// This method must only be called with `Mode::Sync` if `might_be_dyn_thread_safe` was
+    /// true on lock creation.
+    #[inline(always)]
+    #[track_caller]
+    pub unsafe fn lock_assume(&self, mode: Mode) -> LockGuard<'_, T> {
+        #[inline(never)]
         #[track_caller]
-        pub unsafe fn lock_assume(&self, mode: Mode) -> LockGuard<'_, T> {
-            #[inline(never)]
-            #[track_caller]
-            #[cold]
-            fn lock_held() -> ! {
-                panic!("lock was already held")
-            }
+        #[cold]
+        fn lock_held() -> ! {
+            panic!("lock was already held")
+        }
 
-            // SAFETY: This is safe since the union fields are used in accordance with `mode`
-            // which also must match `self.mode` due to the safety precondition.
-            unsafe {
-                match mode {
-                    Mode::NoSync => {
-                        if unlikely(self.mode_union.no_sync.replace(LOCKED) == LOCKED) {
-                            lock_held()
-                        }
+        // SAFETY: This is safe since the union fields are used in accordance with `mode`
+        // which also must match `self.mode` due to the safety precondition.
+        unsafe {
+            match mode {
+                Mode::NoSync => {
+                    if unlikely(self.mode_union.no_sync.replace(LOCKED) == LOCKED) {
+                        lock_held()
                     }
-                    Mode::Sync => self.mode_union.sync.lock(),
                 }
+                Mode::Sync => self.mode_union.sync.lock(),
             }
-            LockGuard { lock: self, marker: PhantomData, mode }
-        }
-
-        #[inline(always)]
-        #[track_caller]
-        pub fn lock(&self) -> LockGuard<'_, T> {
-            unsafe { self.lock_assume(self.mode) }
         }
+        LockGuard { lock: self, marker: PhantomData, mode }
     }
 
-    #[cfg(parallel_compiler)]
-    unsafe impl<T: DynSend> DynSend for Lock<T> {}
-    #[cfg(parallel_compiler)]
-    unsafe impl<T: DynSend> DynSync for Lock<T> {}
-}
-
-mod no_sync {
-    use std::cell::RefCell;
-    #[doc(no_inline)]
-    pub use std::cell::RefMut as LockGuard;
-
-    use super::Mode;
-
-    pub struct Lock<T>(RefCell<T>);
-
-    impl<T> Lock<T> {
-        #[inline(always)]
-        pub fn new(inner: T) -> Self {
-            Lock(RefCell::new(inner))
-        }
-
-        #[inline(always)]
-        pub fn into_inner(self) -> T {
-            self.0.into_inner()
-        }
-
-        #[inline(always)]
-        pub fn get_mut(&mut self) -> &mut T {
-            self.0.get_mut()
-        }
-
-        #[inline(always)]
-        pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
-            self.0.try_borrow_mut().ok()
-        }
-
-        #[inline(always)]
-        #[track_caller]
-        // This is unsafe to match the API for the `parallel_compiler` case.
-        pub unsafe fn lock_assume(&self, _mode: Mode) -> LockGuard<'_, T> {
-            self.0.borrow_mut()
-        }
-
-        #[inline(always)]
-        #[track_caller]
-        pub fn lock(&self) -> LockGuard<'_, T> {
-            self.0.borrow_mut()
-        }
+    #[inline(always)]
+    #[track_caller]
+    pub fn lock(&self) -> LockGuard<'_, T> {
+        unsafe { self.lock_assume(self.mode) }
     }
 }
 
+unsafe impl<T: DynSend> DynSend for Lock<T> {}
+unsafe impl<T: DynSend> DynSync for Lock<T> {}
+
 impl<T> Lock<T> {
     #[inline(always)]
     #[track_caller]
diff --git a/compiler/rustc_data_structures/src/sync/parallel.rs b/compiler/rustc_data_structures/src/sync/parallel.rs
index c7df19842d6..1ba631b8623 100644
--- a/compiler/rustc_data_structures/src/sync/parallel.rs
+++ b/compiler/rustc_data_structures/src/sync/parallel.rs
@@ -6,14 +6,11 @@
 use std::any::Any;
 use std::panic::{AssertUnwindSafe, catch_unwind, resume_unwind};
 
-#[cfg(not(parallel_compiler))]
-pub use disabled::*;
-#[cfg(parallel_compiler)]
-pub use enabled::*;
 use parking_lot::Mutex;
+use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelIterator};
 
 use crate::FatalErrorMarker;
-use crate::sync::IntoDynSyncSend;
+use crate::sync::{DynSend, DynSync, FromDyn, IntoDynSyncSend, mode};
 
 /// A guard used to hold panics that occur during a parallel section to later by unwound.
 /// This is used for the parallel compiler to prevent fatal errors from non-deterministically
@@ -49,65 +46,23 @@ pub fn parallel_guard<R>(f: impl FnOnce(&ParallelGuard) -> R) -> R {
     ret
 }
 
-mod disabled {
-    use crate::sync::parallel_guard;
-
-    #[macro_export]
-    #[cfg(not(parallel_compiler))]
-    macro_rules! parallel {
-        ($($blocks:block),*) => {{
-            $crate::sync::parallel_guard(|guard| {
-                $(guard.run(|| $blocks);)*
-            });
-        }}
-    }
-
-    pub fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
-    where
-        A: FnOnce() -> RA,
-        B: FnOnce() -> RB,
-    {
-        let (a, b) = parallel_guard(|guard| {
-            let a = guard.run(oper_a);
-            let b = guard.run(oper_b);
-            (a, b)
-        });
-        (a.unwrap(), b.unwrap())
-    }
-
-    pub fn par_for_each_in<T: IntoIterator>(t: T, mut for_each: impl FnMut(T::Item)) {
-        parallel_guard(|guard| {
-            t.into_iter().for_each(|i| {
-                guard.run(|| for_each(i));
-            });
-        })
-    }
-
-    pub fn try_par_for_each_in<T: IntoIterator, E>(
-        t: T,
-        mut for_each: impl FnMut(T::Item) -> Result<(), E>,
-    ) -> Result<(), E> {
-        parallel_guard(|guard| {
-            t.into_iter().filter_map(|i| guard.run(|| for_each(i))).fold(Ok(()), Result::and)
-        })
-    }
-
-    pub fn par_map<T: IntoIterator, R, C: FromIterator<R>>(
-        t: T,
-        mut map: impl FnMut(<<T as IntoIterator>::IntoIter as Iterator>::Item) -> R,
-    ) -> C {
-        parallel_guard(|guard| t.into_iter().filter_map(|i| guard.run(|| map(i))).collect())
-    }
+pub fn serial_join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
+where
+    A: FnOnce() -> RA,
+    B: FnOnce() -> RB,
+{
+    let (a, b) = parallel_guard(|guard| {
+        let a = guard.run(oper_a);
+        let b = guard.run(oper_b);
+        (a, b)
+    });
+    (a.unwrap(), b.unwrap())
 }
 
-#[cfg(parallel_compiler)]
-mod enabled {
-    use crate::sync::{DynSend, DynSync, FromDyn, mode, parallel_guard};
-
-    /// Runs a list of blocks in parallel. The first block is executed immediately on
-    /// the current thread. Use that for the longest running block.
-    #[macro_export]
-    macro_rules! parallel {
+/// Runs a list of blocks in parallel. The first block is executed immediately on
+/// the current thread. Use that for the longest running block.
+#[macro_export]
+macro_rules! parallel {
         (impl $fblock:block [$($c:expr,)*] [$block:expr $(, $rest:expr)*]) => {
             parallel!(impl $fblock [$block, $($c,)*] [$($rest),*])
         };
@@ -139,92 +94,89 @@ mod enabled {
         };
     }
 
-    // This function only works when `mode::is_dyn_thread_safe()`.
-    pub fn scope<'scope, OP, R>(op: OP) -> R
-    where
-        OP: FnOnce(&rayon::Scope<'scope>) -> R + DynSend,
-        R: DynSend,
-    {
-        let op = FromDyn::from(op);
-        rayon::scope(|s| FromDyn::from(op.into_inner()(s))).into_inner()
+// This function only works when `mode::is_dyn_thread_safe()`.
+pub fn scope<'scope, OP, R>(op: OP) -> R
+where
+    OP: FnOnce(&rayon::Scope<'scope>) -> R + DynSend,
+    R: DynSend,
+{
+    let op = FromDyn::from(op);
+    rayon::scope(|s| FromDyn::from(op.into_inner()(s))).into_inner()
+}
+
+#[inline]
+pub fn join<A, B, RA: DynSend, RB: DynSend>(oper_a: A, oper_b: B) -> (RA, RB)
+where
+    A: FnOnce() -> RA + DynSend,
+    B: FnOnce() -> RB + DynSend,
+{
+    if mode::is_dyn_thread_safe() {
+        let oper_a = FromDyn::from(oper_a);
+        let oper_b = FromDyn::from(oper_b);
+        let (a, b) = parallel_guard(|guard| {
+            rayon::join(
+                move || guard.run(move || FromDyn::from(oper_a.into_inner()())),
+                move || guard.run(move || FromDyn::from(oper_b.into_inner()())),
+            )
+        });
+        (a.unwrap().into_inner(), b.unwrap().into_inner())
+    } else {
+        serial_join(oper_a, oper_b)
     }
+}
 
-    #[inline]
-    pub fn join<A, B, RA: DynSend, RB: DynSend>(oper_a: A, oper_b: B) -> (RA, RB)
-    where
-        A: FnOnce() -> RA + DynSend,
-        B: FnOnce() -> RB + DynSend,
-    {
+pub fn par_for_each_in<I, T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>>(
+    t: T,
+    for_each: impl Fn(I) + DynSync + DynSend,
+) {
+    parallel_guard(|guard| {
         if mode::is_dyn_thread_safe() {
-            let oper_a = FromDyn::from(oper_a);
-            let oper_b = FromDyn::from(oper_b);
-            let (a, b) = parallel_guard(|guard| {
-                rayon::join(
-                    move || guard.run(move || FromDyn::from(oper_a.into_inner()())),
-                    move || guard.run(move || FromDyn::from(oper_b.into_inner()())),
-                )
+            let for_each = FromDyn::from(for_each);
+            t.into_par_iter().for_each(|i| {
+                guard.run(|| for_each(i));
             });
-            (a.unwrap().into_inner(), b.unwrap().into_inner())
         } else {
-            super::disabled::join(oper_a, oper_b)
+            t.into_iter().for_each(|i| {
+                guard.run(|| for_each(i));
+            });
         }
-    }
-
-    use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelIterator};
-
-    pub fn par_for_each_in<I, T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>>(
-        t: T,
-        for_each: impl Fn(I) + DynSync + DynSend,
-    ) {
-        parallel_guard(|guard| {
-            if mode::is_dyn_thread_safe() {
-                let for_each = FromDyn::from(for_each);
-                t.into_par_iter().for_each(|i| {
-                    guard.run(|| for_each(i));
-                });
-            } else {
-                t.into_iter().for_each(|i| {
-                    guard.run(|| for_each(i));
-                });
-            }
-        });
-    }
+    });
+}
 
-    pub fn try_par_for_each_in<
-        T: IntoIterator + IntoParallelIterator<Item = <T as IntoIterator>::Item>,
-        E: Send,
-    >(
-        t: T,
-        for_each: impl Fn(<T as IntoIterator>::Item) -> Result<(), E> + DynSync + DynSend,
-    ) -> Result<(), E> {
-        parallel_guard(|guard| {
-            if mode::is_dyn_thread_safe() {
-                let for_each = FromDyn::from(for_each);
-                t.into_par_iter()
-                    .filter_map(|i| guard.run(|| for_each(i)))
-                    .reduce(|| Ok(()), Result::and)
-            } else {
-                t.into_iter().filter_map(|i| guard.run(|| for_each(i))).fold(Ok(()), Result::and)
-            }
-        })
-    }
+pub fn try_par_for_each_in<
+    T: IntoIterator + IntoParallelIterator<Item = <T as IntoIterator>::Item>,
+    E: Send,
+>(
+    t: T,
+    for_each: impl Fn(<T as IntoIterator>::Item) -> Result<(), E> + DynSync + DynSend,
+) -> Result<(), E> {
+    parallel_guard(|guard| {
+        if mode::is_dyn_thread_safe() {
+            let for_each = FromDyn::from(for_each);
+            t.into_par_iter()
+                .filter_map(|i| guard.run(|| for_each(i)))
+                .reduce(|| Ok(()), Result::and)
+        } else {
+            t.into_iter().filter_map(|i| guard.run(|| for_each(i))).fold(Ok(()), Result::and)
+        }
+    })
+}
 
-    pub fn par_map<
-        I,
-        T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>,
-        R: std::marker::Send,
-        C: FromIterator<R> + FromParallelIterator<R>,
-    >(
-        t: T,
-        map: impl Fn(I) -> R + DynSync + DynSend,
-    ) -> C {
-        parallel_guard(|guard| {
-            if mode::is_dyn_thread_safe() {
-                let map = FromDyn::from(map);
-                t.into_par_iter().filter_map(|i| guard.run(|| map(i))).collect()
-            } else {
-                t.into_iter().filter_map(|i| guard.run(|| map(i))).collect()
-            }
-        })
-    }
+pub fn par_map<
+    I,
+    T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>,
+    R: std::marker::Send,
+    C: FromIterator<R> + FromParallelIterator<R>,
+>(
+    t: T,
+    map: impl Fn(I) -> R + DynSync + DynSend,
+) -> C {
+    parallel_guard(|guard| {
+        if mode::is_dyn_thread_safe() {
+            let map = FromDyn::from(map);
+            t.into_par_iter().filter_map(|i| guard.run(|| map(i))).collect()
+        } else {
+            t.into_iter().filter_map(|i| guard.run(|| map(i))).collect()
+        }
+    })
 }
diff --git a/compiler/rustc_data_structures/src/sync/vec.rs b/compiler/rustc_data_structures/src/sync/vec.rs
index 314496ce9f0..21ec5cf6c13 100644
--- a/compiler/rustc_data_structures/src/sync/vec.rs
+++ b/compiler/rustc_data_structures/src/sync/vec.rs
@@ -4,40 +4,23 @@ use rustc_index::Idx;
 
 #[derive(Default)]
 pub struct AppendOnlyIndexVec<I: Idx, T: Copy> {
-    #[cfg(not(parallel_compiler))]
-    vec: elsa::vec::FrozenVec<T>,
-    #[cfg(parallel_compiler)]
     vec: elsa::sync::LockFreeFrozenVec<T>,
     _marker: PhantomData<fn(&I)>,
 }
 
 impl<I: Idx, T: Copy> AppendOnlyIndexVec<I, T> {
     pub fn new() -> Self {
-        Self {
-            #[cfg(not(parallel_compiler))]
-            vec: elsa::vec::FrozenVec::new(),
-            #[cfg(parallel_compiler)]
-            vec: elsa::sync::LockFreeFrozenVec::new(),
-            _marker: PhantomData,
-        }
+        Self { vec: elsa::sync::LockFreeFrozenVec::new(), _marker: PhantomData }
     }
 
     pub fn push(&self, val: T) -> I {
-        #[cfg(not(parallel_compiler))]
-        let i = self.vec.len();
-        #[cfg(not(parallel_compiler))]
-        self.vec.push(val);
-        #[cfg(parallel_compiler)]
         let i = self.vec.push(val);
         I::new(i)
     }
 
     pub fn get(&self, i: I) -> Option<T> {
         let i = i.index();
-        #[cfg(not(parallel_compiler))]
-        return self.vec.get_copy(i);
-        #[cfg(parallel_compiler)]
-        return self.vec.get(i);
+        self.vec.get(i)
     }
 }
 
diff --git a/compiler/rustc_data_structures/src/sync/worker_local.rs b/compiler/rustc_data_structures/src/sync/worker_local.rs
index b6efcada10b..d75af009850 100644
--- a/compiler/rustc_data_structures/src/sync/worker_local.rs
+++ b/compiler/rustc_data_structures/src/sync/worker_local.rs
@@ -5,8 +5,9 @@ use std::ptr;
 use std::sync::Arc;
 
 use parking_lot::Mutex;
-#[cfg(parallel_compiler)]
-use {crate::outline, crate::sync::CacheAligned};
+
+use crate::outline;
+use crate::sync::CacheAligned;
 
 /// A pointer to the `RegistryData` which uniquely identifies a registry.
 /// This identifier can be reused if the registry gets freed.
@@ -21,7 +22,6 @@ impl RegistryId {
     ///
     /// Note that there's a race possible where the identifier in `THREAD_DATA` could be reused
     /// so this can succeed from a different registry.
-    #[cfg(parallel_compiler)]
     fn verify(self) -> usize {
         let (id, index) = THREAD_DATA.with(|data| (data.registry_id.get(), data.index.get()));
 
@@ -102,11 +102,7 @@ impl Registry {
 /// worker local value through the `Deref` impl on the registry associated with the thread it was
 /// created on. It will panic otherwise.
 pub struct WorkerLocal<T> {
-    #[cfg(not(parallel_compiler))]
-    local: T,
-    #[cfg(parallel_compiler)]
     locals: Box<[CacheAligned<T>]>,
-    #[cfg(parallel_compiler)]
     registry: Registry,
 }
 
@@ -114,7 +110,6 @@ pub struct WorkerLocal<T> {
 // or it will panic for threads without an associated local. So there isn't a need for `T` to do
 // it's own synchronization. The `verify` method on `RegistryId` has an issue where the id
 // can be reused, but `WorkerLocal` has a reference to `Registry` which will prevent any reuse.
-#[cfg(parallel_compiler)]
 unsafe impl<T: Send> Sync for WorkerLocal<T> {}
 
 impl<T> WorkerLocal<T> {
@@ -122,33 +117,17 @@ impl<T> WorkerLocal<T> {
     /// value this worker local should take for each thread in the registry.
     #[inline]
     pub fn new<F: FnMut(usize) -> T>(mut initial: F) -> WorkerLocal<T> {
-        #[cfg(parallel_compiler)]
-        {
-            let registry = Registry::current();
-            WorkerLocal {
-                locals: (0..registry.0.thread_limit.get())
-                    .map(|i| CacheAligned(initial(i)))
-                    .collect(),
-                registry,
-            }
-        }
-        #[cfg(not(parallel_compiler))]
-        {
-            WorkerLocal { local: initial(0) }
+        let registry = Registry::current();
+        WorkerLocal {
+            locals: (0..registry.0.thread_limit.get()).map(|i| CacheAligned(initial(i))).collect(),
+            registry,
         }
     }
 
     /// Returns the worker-local values for each thread
     #[inline]
     pub fn into_inner(self) -> impl Iterator<Item = T> {
-        #[cfg(parallel_compiler)]
-        {
-            self.locals.into_vec().into_iter().map(|local| local.0)
-        }
-        #[cfg(not(parallel_compiler))]
-        {
-            std::iter::once(self.local)
-        }
+        self.locals.into_vec().into_iter().map(|local| local.0)
     }
 }
 
@@ -156,13 +135,6 @@ impl<T> Deref for WorkerLocal<T> {
     type Target = T;
 
     #[inline(always)]
-    #[cfg(not(parallel_compiler))]
-    fn deref(&self) -> &T {
-        &self.local
-    }
-
-    #[inline(always)]
-    #[cfg(parallel_compiler)]
     fn deref(&self) -> &T {
         // This is safe because `verify` will only return values less than
         // `self.registry.thread_limit` which is the size of the `self.locals` array.