about summary refs log tree commit diff
path: root/compiler/rustc_data_structures
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2023-10-23 09:59:40 +0000
committerbors <bors@rust-lang.org>2023-10-23 09:59:40 +0000
commita56bd2b944aed6b4a0507cc1870fb4c86d08e48e (patch)
tree93fef0ff5217a7dc9b4c2d90affc5e1510c5427c /compiler/rustc_data_structures
parent6bb4ad6dfb7d373c2f19b6cc8c0f05bd73f6d3cc (diff)
parentfe8ebb1890574a713bc8ee7cd5cc6cde54989b55 (diff)
downloadrust-a56bd2b944aed6b4a0507cc1870fb4c86d08e48e.tar.gz
rust-a56bd2b944aed6b4a0507cc1870fb4c86d08e48e.zip
Auto merge of #116849 - oli-obk:error_shenanigans, r=cjgillot
Avoid a `track_errors` by bubbling up most errors from `check_well_formed`

I believe `track_errors` is mostly papering over issues that a sufficiently convoluted query graph can hit. I made this change, while the actual change I want to do is to stop bailing out early on errors, and instead use this new `ErrorGuaranteed` to invoke `check_well_formed` for individual items before doing all the `typeck` logic on them.

This works towards resolving https://github.com/rust-lang/rust/issues/97477 and various other ICEs, as well as allowing us to use parallel rustc more (which is currently rather limited/bottlenecked due to the very sequential nature in which we do `rustc_hir_analysis::check_crate`)

cc `@SparrowLii` `@Zoxc` for the new `try_par_for_each_in` function
Diffstat (limited to 'compiler/rustc_data_structures')
-rw-r--r--compiler/rustc_data_structures/src/sync.rs2
-rw-r--r--compiler/rustc_data_structures/src/sync/parallel.rs29
2 files changed, 30 insertions, 1 deletions
diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs
index 62fff604f81..f957734b04d 100644
--- a/compiler/rustc_data_structures/src/sync.rs
+++ b/compiler/rustc_data_structures/src/sync.rs
@@ -54,7 +54,7 @@ pub use worker_local::{Registry, WorkerLocal};
 mod parallel;
 #[cfg(parallel_compiler)]
 pub use parallel::scope;
-pub use parallel::{join, par_for_each_in, par_map, parallel_guard};
+pub use parallel::{join, par_for_each_in, par_map, parallel_guard, try_par_for_each_in};
 
 pub use std::sync::atomic::Ordering;
 pub use std::sync::atomic::Ordering::SeqCst;
diff --git a/compiler/rustc_data_structures/src/sync/parallel.rs b/compiler/rustc_data_structures/src/sync/parallel.rs
index 1944ddfb710..39dddb59569 100644
--- a/compiler/rustc_data_structures/src/sync/parallel.rs
+++ b/compiler/rustc_data_structures/src/sync/parallel.rs
@@ -77,6 +77,15 @@ mod disabled {
         })
     }
 
+    pub fn try_par_for_each_in<T: IntoIterator, E: Copy>(
+        t: T,
+        mut for_each: impl FnMut(T::Item) -> Result<(), E>,
+    ) -> Result<(), E> {
+        parallel_guard(|guard| {
+            t.into_iter().fold(Ok(()), |ret, i| guard.run(|| for_each(i)).unwrap_or(ret).and(ret))
+        })
+    }
+
     pub fn par_map<T: IntoIterator, R, C: FromIterator<R>>(
         t: T,
         mut map: impl FnMut(<<T as IntoIterator>::IntoIter as Iterator>::Item) -> R,
@@ -167,6 +176,26 @@ mod enabled {
         });
     }
 
+    pub fn try_par_for_each_in<
+        T: IntoIterator + IntoParallelIterator<Item = <T as IntoIterator>::Item>,
+        E: Copy + Send,
+    >(
+        t: T,
+        for_each: impl Fn(<T as IntoIterator>::Item) -> Result<(), E> + DynSync + DynSend,
+    ) -> Result<(), E> {
+        parallel_guard(|guard| {
+            if mode::is_dyn_thread_safe() {
+                let for_each = FromDyn::from(for_each);
+                t.into_par_iter()
+                    .fold_with(Ok(()), |ret, i| guard.run(|| for_each(i)).unwrap_or(ret).and(ret))
+                    .reduce(|| Ok(()), |a, b| a.and(b))
+            } else {
+                t.into_iter()
+                    .fold(Ok(()), |ret, i| guard.run(|| for_each(i)).unwrap_or(ret).and(ret))
+            }
+        })
+    }
+
     pub fn par_map<
         I,
         T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>,