diff options
| author | root <root@Opensource.localdomain> | 2025-03-05 11:24:58 +0800 |
|---|---|---|
| committer | root <root@Opensource.localdomain> | 2025-03-05 11:24:58 +0800 |
| commit | cc1e4ede9388d87750c3751f41e8c6c4f6cae995 (patch) | |
| tree | 2feb15e1633c40e90d3a91d5b389c4c5284ba4dd | |
| parent | ce36a966c79e109dabeef7a47fe68e5294c6d71e (diff) | |
| download | rust-cc1e4ede9388d87750c3751f41e8c6c4f6cae995.tar.gz rust-cc1e4ede9388d87750c3751f41e8c6c4f6cae995.zip | |
resume one waiter at a call
| -rw-r--r-- | compiler/rustc_query_system/src/query/job.rs | 17 |
1 files changed, 15 insertions, 2 deletions
diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs index a8c2aa98cd0..37b305d0a8b 100644 --- a/compiler/rustc_query_system/src/query/job.rs +++ b/compiler/rustc_query_system/src/query/job.rs @@ -477,8 +477,8 @@ fn remove_cycle( /// Detects query cycles by using depth first search over all active query jobs. /// If a query cycle is found it will break the cycle by finding an edge which /// uses a query latch and then resuming that waiter. -/// There may be multiple cycles involved in a deadlock, so this searches -/// all active queries for cycles before finally resuming all the waiters at once. +/// There may be multiple cycles involved in a deadlock, but we only search +/// one cycle at a call and resume one waiter at once. See `FIXME` below. pub fn break_query_cycles(query_map: QueryMap, registry: &rayon_core::Registry) { let mut wakelist = Vec::new(); let mut jobs: Vec<QueryJobId> = query_map.keys().cloned().collect(); @@ -488,6 +488,19 @@ pub fn break_query_cycles(query_map: QueryMap, registry: &rayon_core::Registry) while jobs.len() > 0 { if remove_cycle(&query_map, &mut jobs, &mut wakelist) { found_cycle = true; + + // FIXME(#137731): Resume all the waiters at once may cause deadlocks, + // so we resume one waiter at a call for now. It's still unclear whether + // it's due to possible issues in rustc-rayon or instead in the handling + // of query cycles. + // This seem to only appear when multiple query cycles errors + // are involved, so this reduction in parallelism, while suboptimal, is not + // universal and only the deadlock handler will encounter these cases. + // The workaround shows loss of potential gains, but there still are big + // improvements in the common case, and no regressions compared to the + // single-threaded case. More investigation is still needed, and once fixed, + // we can wake up all the waiters up. + break; } } |
