about summary refs log tree commit diff
path: root/compiler/rustc_query_system/src
diff options
context:
space:
mode:
authorNoratrieb <48135649+Noratrieb@users.noreply.github.com>2024-10-28 18:51:12 +0100
committerclubby789 <jamie@hill-daniel.co.uk>2024-11-12 13:38:58 +0000
commit505b8e133282a5ced49d8b9c6c5678b8030123a4 (patch)
tree0b78d0a358d7ca2325cb3f0edb0d6a6884397fa0 /compiler/rustc_query_system/src
parent00ed73cdc09a6452cb58202d56a9211fb3c73031 (diff)
downloadrust-505b8e133282a5ced49d8b9c6c5678b8030123a4.tar.gz
rust-505b8e133282a5ced49d8b9c6c5678b8030123a4.zip
Delete the `cfg(not(parallel))` serial compiler
Since it's inception a long time ago, the parallel compiler and its cfgs
have been a maintenance burden. This was a necessary evil the allow
iteration while not degrading performance because of synchronization
overhead.

But this time is over. Thanks to the amazing work by the parallel
working group (and the dyn sync crimes), the parallel compiler has now
been fast enough to be shipped by default in nightly for quite a while
now.
Stable and beta have still been on the serial compiler, because they
can't use `-Zthreads` anyways.
But this is quite suboptimal:
- the maintenance burden still sucks
- we're not testing the serial compiler in nightly

Because of these reasons, it's time to end it. The serial compiler has
served us well in the years since it was split from the parallel one,
but it's over now.

Let the knight slay one head of the two-headed dragon!
Diffstat (limited to 'compiler/rustc_query_system/src')
-rw-r--r--compiler/rustc_query_system/src/dep_graph/graph.rs13
-rw-r--r--compiler/rustc_query_system/src/query/job.rs49
-rw-r--r--compiler/rustc_query_system/src/query/mod.rs19
-rw-r--r--compiler/rustc_query_system/src/query/plumbing.rs9
4 files changed, 15 insertions, 75 deletions
diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs
index 5e30f17d626..d806e995d1b 100644
--- a/compiler/rustc_query_system/src/dep_graph/graph.rs
+++ b/compiler/rustc_query_system/src/dep_graph/graph.rs
@@ -837,12 +837,6 @@ impl<D: Deps> DepGraphData<D> {
     ) -> Option<DepNodeIndex> {
         let frame = MarkFrame { index: prev_dep_node_index, parent: frame };
 
-        #[cfg(not(parallel_compiler))]
-        {
-            debug_assert!(!self.dep_node_exists(dep_node));
-            debug_assert!(self.colors.get(prev_dep_node_index).is_none());
-        }
-
         // We never try to mark eval_always nodes as green
         debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
 
@@ -871,13 +865,6 @@ impl<D: Deps> DepGraphData<D> {
         // Maybe store a list on disk and encode this fact in the DepNodeState
         let side_effects = qcx.load_side_effects(prev_dep_node_index);
 
-        #[cfg(not(parallel_compiler))]
-        debug_assert!(
-            self.colors.get(prev_dep_node_index).is_none(),
-            "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
-                      insertion for {dep_node:?}"
-        );
-
         if side_effects.maybe_any() {
             qcx.dep_context().dep_graph().with_query_deserialization(|| {
                 self.emit_side_effects(qcx, dep_node_index, side_effects)
diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs
index 5af41b9e687..2a7d759ab35 100644
--- a/compiler/rustc_query_system/src/query/job.rs
+++ b/compiler/rustc_query_system/src/query/job.rs
@@ -1,21 +1,16 @@
 use std::hash::Hash;
 use std::io::Write;
+use std::iter;
 use std::num::NonZero;
+use std::sync::Arc;
 
-use rustc_data_structures::fx::FxHashMap;
+use parking_lot::{Condvar, Mutex};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::jobserver;
 use rustc_errors::{Diag, DiagCtxtHandle};
 use rustc_hir::def::DefKind;
 use rustc_session::Session;
-use rustc_span::Span;
-#[cfg(parallel_compiler)]
-use {
-    parking_lot::{Condvar, Mutex},
-    rustc_data_structures::fx::FxHashSet,
-    rustc_data_structures::jobserver,
-    rustc_span::DUMMY_SP,
-    std::iter,
-    std::sync::Arc,
-};
+use rustc_span::{DUMMY_SP, Span};
 
 use crate::dep_graph::DepContext;
 use crate::error::CycleStack;
@@ -41,17 +36,14 @@ impl QueryJobId {
         map.get(&self).unwrap().query.clone()
     }
 
-    #[cfg(parallel_compiler)]
     fn span(self, map: &QueryMap) -> Span {
         map.get(&self).unwrap().job.span
     }
 
-    #[cfg(parallel_compiler)]
     fn parent(self, map: &QueryMap) -> Option<QueryJobId> {
         map.get(&self).unwrap().job.parent
     }
 
-    #[cfg(parallel_compiler)]
     fn latch(self, map: &QueryMap) -> Option<&QueryLatch> {
         map.get(&self).unwrap().job.latch.as_ref()
     }
@@ -75,7 +67,6 @@ pub struct QueryJob {
     pub parent: Option<QueryJobId>,
 
     /// The latch that is used to wait on this job.
-    #[cfg(parallel_compiler)]
     latch: Option<QueryLatch>,
 }
 
@@ -83,16 +74,9 @@ impl QueryJob {
     /// Creates a new query job.
     #[inline]
     pub fn new(id: QueryJobId, span: Span, parent: Option<QueryJobId>) -> Self {
-        QueryJob {
-            id,
-            span,
-            parent,
-            #[cfg(parallel_compiler)]
-            latch: None,
-        }
+        QueryJob { id, span, parent, latch: None }
     }
 
-    #[cfg(parallel_compiler)]
     pub(super) fn latch(&mut self) -> QueryLatch {
         if self.latch.is_none() {
             self.latch = Some(QueryLatch::new());
@@ -106,11 +90,8 @@ impl QueryJob {
     /// as there are no concurrent jobs which could be waiting on us
     #[inline]
     pub fn signal_complete(self) {
-        #[cfg(parallel_compiler)]
-        {
-            if let Some(latch) = self.latch {
-                latch.set();
-            }
+        if let Some(latch) = self.latch {
+            latch.set();
         }
     }
 }
@@ -176,7 +157,6 @@ impl QueryJobId {
     }
 }
 
-#[cfg(parallel_compiler)]
 #[derive(Debug)]
 struct QueryWaiter {
     query: Option<QueryJobId>,
@@ -185,7 +165,6 @@ struct QueryWaiter {
     cycle: Mutex<Option<CycleError>>,
 }
 
-#[cfg(parallel_compiler)]
 impl QueryWaiter {
     fn notify(&self, registry: &rayon_core::Registry) {
         rayon_core::mark_unblocked(registry);
@@ -193,20 +172,17 @@ impl QueryWaiter {
     }
 }
 
-#[cfg(parallel_compiler)]
 #[derive(Debug)]
 struct QueryLatchInfo {
     complete: bool,
     waiters: Vec<Arc<QueryWaiter>>,
 }
 
-#[cfg(parallel_compiler)]
 #[derive(Clone, Debug)]
 pub(super) struct QueryLatch {
     info: Arc<Mutex<QueryLatchInfo>>,
 }
 
-#[cfg(parallel_compiler)]
 impl QueryLatch {
     fn new() -> Self {
         QueryLatch {
@@ -273,7 +249,6 @@ impl QueryLatch {
 }
 
 /// A resumable waiter of a query. The usize is the index into waiters in the query's latch
-#[cfg(parallel_compiler)]
 type Waiter = (QueryJobId, usize);
 
 /// Visits all the non-resumable and resumable waiters of a query.
@@ -285,7 +260,6 @@ type Waiter = (QueryJobId, usize);
 /// For visits of resumable waiters it returns Some(Some(Waiter)) which has the
 /// required information to resume the waiter.
 /// If all `visit` calls returns None, this function also returns None.
-#[cfg(parallel_compiler)]
 fn visit_waiters<F>(query_map: &QueryMap, query: QueryJobId, mut visit: F) -> Option<Option<Waiter>>
 where
     F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>,
@@ -316,7 +290,6 @@ where
 /// `span` is the reason for the `query` to execute. This is initially DUMMY_SP.
 /// If a cycle is detected, this initial value is replaced with the span causing
 /// the cycle.
-#[cfg(parallel_compiler)]
 fn cycle_check(
     query_map: &QueryMap,
     query: QueryJobId,
@@ -357,7 +330,6 @@ fn cycle_check(
 /// Finds out if there's a path to the compiler root (aka. code which isn't in a query)
 /// from `query` without going through any of the queries in `visited`.
 /// This is achieved with a depth first search.
-#[cfg(parallel_compiler)]
 fn connected_to_root(
     query_map: &QueryMap,
     query: QueryJobId,
@@ -380,7 +352,6 @@ fn connected_to_root(
 }
 
 // Deterministically pick an query from a list
-#[cfg(parallel_compiler)]
 fn pick_query<'a, T, F>(query_map: &QueryMap, queries: &'a [T], f: F) -> &'a T
 where
     F: Fn(&T) -> (Span, QueryJobId),
@@ -406,7 +377,6 @@ where
 /// the function return true.
 /// If a cycle was not found, the starting query is removed from `jobs` and
 /// the function returns false.
-#[cfg(parallel_compiler)]
 fn remove_cycle(
     query_map: &QueryMap,
     jobs: &mut Vec<QueryJobId>,
@@ -511,7 +481,6 @@ fn remove_cycle(
 /// uses a query latch and then resuming that waiter.
 /// There may be multiple cycles involved in a deadlock, so this searches
 /// all active queries for cycles before finally resuming all the waiters at once.
-#[cfg(parallel_compiler)]
 pub fn break_query_cycles(query_map: QueryMap, registry: &rayon_core::Registry) {
     let mut wakelist = Vec::new();
     let mut jobs: Vec<QueryJobId> = query_map.keys().cloned().collect();
diff --git a/compiler/rustc_query_system/src/query/mod.rs b/compiler/rustc_query_system/src/query/mod.rs
index 3e35fdb77b3..b81386f06ec 100644
--- a/compiler/rustc_query_system/src/query/mod.rs
+++ b/compiler/rustc_query_system/src/query/mod.rs
@@ -2,10 +2,9 @@ mod plumbing;
 pub use self::plumbing::*;
 
 mod job;
-#[cfg(parallel_compiler)]
-pub use self::job::break_query_cycles;
 pub use self::job::{
-    QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryMap, print_query_stack, report_cycle,
+    QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryMap, break_query_cycles, print_query_stack,
+    report_cycle,
 };
 
 mod caches;
@@ -38,7 +37,6 @@ pub struct QueryStackFrame {
     pub dep_kind: DepKind,
     /// This hash is used to deterministically pick
     /// a query to remove cycles in the parallel compiler.
-    #[cfg(parallel_compiler)]
     hash: Hash64,
 }
 
@@ -51,18 +49,9 @@ impl QueryStackFrame {
         def_kind: Option<DefKind>,
         dep_kind: DepKind,
         ty_def_id: Option<DefId>,
-        _hash: impl FnOnce() -> Hash64,
+        hash: impl FnOnce() -> Hash64,
     ) -> Self {
-        Self {
-            description,
-            span,
-            def_id,
-            def_kind,
-            ty_def_id,
-            dep_kind,
-            #[cfg(parallel_compiler)]
-            hash: _hash(),
-        }
+        Self { description, span, def_id, def_kind, ty_def_id, dep_kind, hash: hash() }
     }
 
     // FIXME(eddyb) Get more valid `Span`s on queries.
diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs
index 17486be04dc..aac8ab87c64 100644
--- a/compiler/rustc_query_system/src/query/plumbing.rs
+++ b/compiler/rustc_query_system/src/query/plumbing.rs
@@ -13,7 +13,6 @@ use rustc_data_structures::fx::FxHashMap;
 use rustc_data_structures::sharded::Sharded;
 use rustc_data_structures::stack::ensure_sufficient_stack;
 use rustc_data_structures::sync::Lock;
-#[cfg(parallel_compiler)]
 use rustc_data_structures::{outline, sync};
 use rustc_errors::{Diag, FatalError, StashKey};
 use rustc_span::{DUMMY_SP, Span};
@@ -25,9 +24,7 @@ use crate::HandleCycleError;
 use crate::dep_graph::{DepContext, DepGraphData, DepNode, DepNodeIndex, DepNodeParams};
 use crate::ich::StableHashingContext;
 use crate::query::caches::QueryCache;
-#[cfg(parallel_compiler)]
-use crate::query::job::QueryLatch;
-use crate::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, report_cycle};
+use crate::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryLatch, report_cycle};
 use crate::query::{
     QueryContext, QueryMap, QuerySideEffects, QueryStackFrame, SerializedDepNodeIndex,
 };
@@ -263,7 +260,6 @@ where
 }
 
 #[inline(always)]
-#[cfg(parallel_compiler)]
 fn wait_for_query<Q, Qcx>(
     query: Q,
     qcx: Qcx,
@@ -334,7 +330,7 @@ where
     // re-executing the query since `try_start` only checks that the query is not currently
     // executing, but another thread may have already completed the query and stores it result
     // in the query cache.
-    if cfg!(parallel_compiler) && qcx.dep_context().sess().threads() > 1 {
+    if qcx.dep_context().sess().threads() > 1 {
         if let Some((value, index)) = query.query_cache(qcx).lookup(&key) {
             qcx.dep_context().profiler().query_cache_hit(index.into());
             return (value, Some(index));
@@ -359,7 +355,6 @@ where
         Entry::Occupied(mut entry) => {
             match entry.get_mut() {
                 QueryResult::Started(job) => {
-                    #[cfg(parallel_compiler)]
                     if sync::is_dyn_thread_safe() {
                         // Get the latch out
                         let latch = job.latch();