diff options
Diffstat (limited to 'compiler/rustc_data_structures/src')
| -rw-r--r-- | compiler/rustc_data_structures/src/graph/scc/mod.rs | 152 | ||||
| -rw-r--r-- | compiler/rustc_data_structures/src/graph/scc/tests.rs | 184 | ||||
| -rw-r--r-- | compiler/rustc_data_structures/src/jobserver.rs | 94 | ||||
| -rw-r--r-- | compiler/rustc_data_structures/src/marker.rs | 47 | ||||
| -rw-r--r-- | compiler/rustc_data_structures/src/obligation_forest/mod.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_data_structures/src/sync.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_data_structures/src/sync/freeze.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_data_structures/src/sync/parallel.rs | 115 | ||||
| -rw-r--r-- | compiler/rustc_data_structures/src/unord.rs | 10 |
9 files changed, 411 insertions, 197 deletions
diff --git a/compiler/rustc_data_structures/src/graph/scc/mod.rs b/compiler/rustc_data_structures/src/graph/scc/mod.rs index e7c4ea3daae..518817ea0f5 100644 --- a/compiler/rustc_data_structures/src/graph/scc/mod.rs +++ b/compiler/rustc_data_structures/src/graph/scc/mod.rs @@ -10,10 +10,11 @@ use std::assert_matches::debug_assert_matches; use std::fmt::Debug; +use std::marker::PhantomData; use std::ops::Range; use rustc_index::{Idx, IndexSlice, IndexVec}; -use tracing::{debug, instrument}; +use tracing::{debug, instrument, trace}; use crate::fx::FxHashSet; use crate::graph::vec_graph::VecGraph; @@ -48,6 +49,25 @@ pub trait Annotation: Debug + Copy { } } +/// An accumulator for annotations. +pub trait Annotations<N: Idx> { + type Ann: Annotation; + type SccIdx: Idx + Ord; + + fn new(&self, element: N) -> Self::Ann; + fn annotate_scc(&mut self, scc: Self::SccIdx, annotation: Self::Ann); +} + +/// The nil annotation accumulator, which does nothing. +struct NoAnnotations<S: Idx + Ord>(PhantomData<S>); + +impl<N: Idx, S: Idx + Ord> Annotations<N> for NoAnnotations<S> { + type SccIdx = S; + type Ann = (); + fn new(&self, _element: N) {} + fn annotate_scc(&mut self, _scc: S, _annotation: ()) {} +} + /// The empty annotation, which does nothing. impl Annotation for () { fn merge_reached(self, _other: Self) -> Self { @@ -62,23 +82,20 @@ impl Annotation for () { /// the index type for the graph nodes and `S` is the index type for /// the SCCs. We can map from each node to the SCC that it /// participates in, and we also have the successors of each SCC. -pub struct Sccs<N: Idx, S: Idx, A: Annotation = ()> { +pub struct Sccs<N: Idx, S: Idx> { /// For each node, what is the SCC index of the SCC to which it /// belongs. scc_indices: IndexVec<N, S>, /// Data about all the SCCs. - scc_data: SccData<S, A>, + scc_data: SccData<S>, } /// Information about an invidividual SCC node. -struct SccDetails<A: Annotation> { +struct SccDetails { /// For this SCC, the range of `all_successors` where its /// successors can be found. range: Range<usize>, - - /// User-specified metadata about the SCC. - annotation: A, } // The name of this struct should discourage you from making it public and leaking @@ -87,10 +104,10 @@ struct SccDetails<A: Annotation> { // is difficult when it's publicly inspectable. // // Obey the law of Demeter! -struct SccData<S: Idx, A: Annotation> { +struct SccData<S: Idx> { /// Maps SCC indices to their metadata, including /// offsets into `all_successors`. - scc_details: IndexVec<S, SccDetails<A>>, + scc_details: IndexVec<S, SccDetails>, /// Contains the successors for all the Sccs, concatenated. The /// range of indices corresponding to a given SCC is found in its @@ -98,24 +115,18 @@ struct SccData<S: Idx, A: Annotation> { all_successors: Vec<S>, } -impl<N: Idx, S: Idx + Ord> Sccs<N, S, ()> { +impl<N: Idx, S: Idx + Ord> Sccs<N, S> { /// Compute SCCs without annotations. pub fn new(graph: &impl Successors<Node = N>) -> Self { - Self::new_with_annotation(graph, |_| ()) + Self::new_with_annotation(graph, &mut NoAnnotations(PhantomData::<S>)) } -} -impl<N: Idx, S: Idx + Ord, A: Annotation> Sccs<N, S, A> { /// Compute SCCs and annotate them with a user-supplied annotation - pub fn new_with_annotation<F: Fn(N) -> A>( + pub fn new_with_annotation<A: Annotations<N, SccIdx = S>>( graph: &impl Successors<Node = N>, - to_annotation: F, + annotations: &mut A, ) -> Self { - SccsConstruction::construct(graph, to_annotation) - } - - pub fn annotation(&self, scc: S) -> A { - self.scc_data.annotation(scc) + SccsConstruction::construct(graph, annotations) } pub fn scc_indices(&self) -> &IndexSlice<N, S> { @@ -160,7 +171,7 @@ impl<N: Idx, S: Idx + Ord, A: Annotation> Sccs<N, S, A> { } } -impl<N: Idx, S: Idx + Ord, A: Annotation> DirectedGraph for Sccs<N, S, A> { +impl<N: Idx, S: Idx + Ord> DirectedGraph for Sccs<N, S> { type Node = S; fn num_nodes(&self) -> usize { @@ -168,19 +179,19 @@ impl<N: Idx, S: Idx + Ord, A: Annotation> DirectedGraph for Sccs<N, S, A> { } } -impl<N: Idx, S: Idx + Ord, A: Annotation> NumEdges for Sccs<N, S, A> { +impl<N: Idx, S: Idx + Ord> NumEdges for Sccs<N, S> { fn num_edges(&self) -> usize { self.scc_data.all_successors.len() } } -impl<N: Idx, S: Idx + Ord, A: Annotation> Successors for Sccs<N, S, A> { +impl<N: Idx, S: Idx + Ord> Successors for Sccs<N, S> { fn successors(&self, node: S) -> impl Iterator<Item = Self::Node> { self.successors(node).iter().cloned() } } -impl<S: Idx, A: Annotation> SccData<S, A> { +impl<S: Idx> SccData<S> { /// Number of SCCs, fn len(&self) -> usize { self.scc_details.len() @@ -192,9 +203,8 @@ impl<S: Idx, A: Annotation> SccData<S, A> { } /// Creates a new SCC with `successors` as its successors and - /// the maximum weight of its internal nodes `scc_max_weight` and /// returns the resulting index. - fn create_scc(&mut self, successors: impl IntoIterator<Item = S>, annotation: A) -> S { + fn create_scc(&mut self, successors: impl IntoIterator<Item = S>) -> S { // Store the successors on `scc_successors_vec`, remembering // the range of indices. let all_successors_start = self.all_successors.len(); @@ -202,35 +212,28 @@ impl<S: Idx, A: Annotation> SccData<S, A> { let all_successors_end = self.all_successors.len(); debug!( - "create_scc({:?}) successors={:?}, annotation={:?}", + "create_scc({:?}) successors={:?}", self.len(), &self.all_successors[all_successors_start..all_successors_end], - annotation ); let range = all_successors_start..all_successors_end; - let metadata = SccDetails { range, annotation }; + let metadata = SccDetails { range }; self.scc_details.push(metadata) } - - fn annotation(&self, scc: S) -> A { - self.scc_details[scc].annotation - } } -struct SccsConstruction<'c, G, S, A, F> +struct SccsConstruction<'c, 'a, G, A> where G: DirectedGraph + Successors, - S: Idx, - A: Annotation, - F: Fn(G::Node) -> A, + A: Annotations<G::Node>, { graph: &'c G, /// The state of each node; used during walk to record the stack /// and after walk to record what cycle each node ended up being /// in. - node_states: IndexVec<G::Node, NodeState<G::Node, S, A>>, + node_states: IndexVec<G::Node, NodeState<G::Node, A::SccIdx, A::Ann>>, /// The stack of nodes that we are visiting as part of the DFS. node_stack: Vec<G::Node>, @@ -239,23 +242,21 @@ where /// position in this stack, and when we encounter a successor SCC, /// we push it on the stack. When we complete an SCC, we can pop /// everything off the stack that was found along the way. - successors_stack: Vec<S>, + successors_stack: Vec<A::SccIdx>, /// A set used to strip duplicates. As we accumulate successors /// into the successors_stack, we sometimes get duplicate entries. /// We use this set to remove those -- we also keep its storage /// around between successors to amortize memory allocation costs. - duplicate_set: FxHashSet<S>, + duplicate_set: FxHashSet<A::SccIdx>, - scc_data: SccData<S, A>, + scc_data: SccData<A::SccIdx>, - /// A function that constructs an initial SCC annotation - /// out of a single node. - to_annotation: F, + annotations: &'a mut A, } #[derive(Copy, Clone, Debug)] -enum NodeState<N, S, A> { +enum NodeState<N, S, A: Annotation> { /// This node has not yet been visited as part of the DFS. /// /// After SCC construction is complete, this state ought to be @@ -286,7 +287,7 @@ enum NodeState<N, S, A> { /// The state of walking a given node. #[derive(Copy, Clone, Debug)] -enum WalkReturn<S, A> { +enum WalkReturn<S, A: Annotation> { /// The walk found a cycle, but the entire component is not known to have /// been fully walked yet. We only know the minimum depth of this /// component in a minimum spanning tree of the graph. This component @@ -299,12 +300,10 @@ enum WalkReturn<S, A> { Complete { scc_index: S, annotation: A }, } -impl<'c, G, S, A, F> SccsConstruction<'c, G, S, A, F> +impl<'c, 'a, G, A> SccsConstruction<'c, 'a, G, A> where G: DirectedGraph + Successors, - S: Idx, - F: Fn(G::Node) -> A, - A: Annotation, + A: Annotations<G::Node>, { /// Identifies SCCs in the graph `G` and computes the resulting /// DAG. This uses a variant of [Tarjan's @@ -320,7 +319,7 @@ where /// Additionally, we keep track of a current annotation of the SCC. /// /// [wikipedia]: https://bit.ly/2EZIx84 - fn construct(graph: &'c G, to_annotation: F) -> Sccs<G::Node, S, A> { + fn construct(graph: &'c G, annotations: &'a mut A) -> Sccs<G::Node, A::SccIdx> { let num_nodes = graph.num_nodes(); let mut this = Self { @@ -330,7 +329,7 @@ where successors_stack: Vec::new(), scc_data: SccData { scc_details: IndexVec::new(), all_successors: Vec::new() }, duplicate_set: FxHashSet::default(), - to_annotation, + annotations, }; let scc_indices = graph @@ -346,7 +345,7 @@ where Sccs { scc_indices, scc_data: this.scc_data } } - fn start_walk_from(&mut self, node: G::Node) -> WalkReturn<S, A> { + fn start_walk_from(&mut self, node: G::Node) -> WalkReturn<A::SccIdx, A::Ann> { self.inspect_node(node).unwrap_or_else(|| self.walk_unvisited_node(node)) } @@ -362,7 +361,7 @@ where /// Otherwise, we are looking at a node that has already been /// completely visited. We therefore return `WalkReturn::Complete` /// with its associated SCC index. - fn inspect_node(&mut self, node: G::Node) -> Option<WalkReturn<S, A>> { + fn inspect_node(&mut self, node: G::Node) -> Option<WalkReturn<A::SccIdx, A::Ann>> { Some(match self.find_state(node) { NodeState::InCycle { scc_index, annotation } => { WalkReturn::Complete { scc_index, annotation } @@ -385,7 +384,7 @@ where /// of `r2` (and updates `r` to reflect current result). This is /// basically the "find" part of a standard union-find algorithm /// (with path compression). - fn find_state(&mut self, mut node: G::Node) -> NodeState<G::Node, S, A> { + fn find_state(&mut self, mut node: G::Node) -> NodeState<G::Node, A::SccIdx, A::Ann> { // To avoid recursion we temporarily reuse the `parent` of each // InCycleWith link to encode a downwards link while compressing // the path. After we have found the root or deepest node being @@ -408,7 +407,7 @@ where // a potentially derived version of the root state for non-root nodes in the chain. let (root_state, assigned_state) = { loop { - debug!("find_state(r = {node:?} in state {:?})", self.node_states[node]); + trace!("find_state(r = {node:?} in state {:?})", self.node_states[node]); match self.node_states[node] { // This must have been the first and only state since it is unexplored*; // no update needed! * Unless there is a bug :') @@ -482,7 +481,7 @@ where if previous_node == node { return root_state; } - debug!("Compressing {node:?} down to {previous_node:?} with state {assigned_state:?}"); + trace!("Compressing {node:?} down to {previous_node:?} with state {assigned_state:?}"); // Update to previous node in the link. match self.node_states[previous_node] { @@ -507,9 +506,9 @@ where /// Call this method when `inspect_node` has returned `None`. Having the /// caller decide avoids mutual recursion between the two methods and allows /// us to maintain an allocated stack for nodes on the path between calls. - #[instrument(skip(self, initial), level = "debug")] - fn walk_unvisited_node(&mut self, initial: G::Node) -> WalkReturn<S, A> { - debug!("Walk unvisited node: {initial:?}"); + #[instrument(skip(self, initial), level = "trace")] + fn walk_unvisited_node(&mut self, initial: G::Node) -> WalkReturn<A::SccIdx, A::Ann> { + trace!("Walk unvisited node: {initial:?}"); struct VisitingNodeFrame<G: DirectedGraph, Successors, A> { node: G::Node, successors: Option<Successors>, @@ -537,7 +536,7 @@ where successors_len: 0, min_cycle_root: initial, successor_node: initial, - current_component_annotation: (self.to_annotation)(initial), + current_component_annotation: self.annotations.new(initial), }]; let mut return_value = None; @@ -556,11 +555,7 @@ where let node = *node; let depth = *depth; - // node is definitely in the current component, add it to the annotation. - if node != initial { - current_component_annotation.update_scc((self.to_annotation)(node)); - } - debug!( + trace!( "Visiting {node:?} at depth {depth:?}, annotation: {current_component_annotation:?}" ); @@ -568,7 +563,7 @@ where Some(successors) => successors, None => { // This None marks that we still have the initialize this node's frame. - debug!(?depth, ?node); + trace!(?depth, ?node); debug_assert_matches!(self.node_states[node], NodeState::NotVisited); @@ -598,7 +593,7 @@ where return_value.take().into_iter().map(|walk| (*successor_node, Some(walk))); let successor_walk = successors.map(|successor_node| { - debug!(?node, ?successor_node); + trace!(?node, ?successor_node); (successor_node, self.inspect_node(successor_node)) }); for (successor_node, walk) in returned_walk.chain(successor_walk) { @@ -609,13 +604,13 @@ where min_depth: successor_min_depth, annotation: successor_annotation, }) => { - debug!( + trace!( "Cycle found from {node:?}, minimum depth: {successor_min_depth:?}, annotation: {successor_annotation:?}" ); // Track the minimum depth we can reach. assert!(successor_min_depth <= depth); if successor_min_depth < *min_depth { - debug!(?node, ?successor_min_depth); + trace!(?node, ?successor_min_depth); *min_depth = successor_min_depth; *min_cycle_root = successor_node; } @@ -627,20 +622,20 @@ where scc_index: successor_scc_index, annotation: successor_annotation, }) => { - debug!( + trace!( "Complete; {node:?} is root of complete-visited SCC idx {successor_scc_index:?} with annotation {successor_annotation:?}" ); // Push the completed SCC indices onto // the `successors_stack` for later. - debug!(?node, ?successor_scc_index); + trace!(?node, ?successor_scc_index); successors_stack.push(successor_scc_index); current_component_annotation.update_reachable(successor_annotation); } // `node` has no more (direct) successors; search recursively. None => { let depth = depth + 1; - debug!("Recursing down into {successor_node:?} at depth {depth:?}"); - debug!(?depth, ?successor_node); + trace!("Recursing down into {successor_node:?} at depth {depth:?}"); + trace!(?depth, ?successor_node); // Remember which node the return value will come from. frame.successor_node = successor_node; // Start a new stack frame, then step into it. @@ -652,14 +647,14 @@ where min_depth: depth, min_cycle_root: successor_node, successor_node, - current_component_annotation: (self.to_annotation)(successor_node), + current_component_annotation: self.annotations.new(successor_node), }); continue 'recurse; } } } - debug!("Finished walk from {node:?} with annotation: {current_component_annotation:?}"); + trace!("Finished walk from {node:?} with annotation: {current_component_annotation:?}"); // Completed walk, remove `node` from the stack. let r = self.node_stack.pop(); @@ -691,8 +686,9 @@ where debug!("Creating SCC rooted in {node:?} with successor {:?}", frame.successor_node); - let scc_index = - self.scc_data.create_scc(deduplicated_successors, current_component_annotation); + let scc_index = self.scc_data.create_scc(deduplicated_successors); + + self.annotations.annotate_scc(scc_index, current_component_annotation); self.node_states[node] = NodeState::InCycle { scc_index, annotation: current_component_annotation }; diff --git a/compiler/rustc_data_structures/src/graph/scc/tests.rs b/compiler/rustc_data_structures/src/graph/scc/tests.rs index 373f87bfdbc..8f04baf51f3 100644 --- a/compiler/rustc_data_structures/src/graph/scc/tests.rs +++ b/compiler/rustc_data_structures/src/graph/scc/tests.rs @@ -5,8 +5,31 @@ use crate::graph::tests::TestGraph; #[derive(Copy, Clone, Debug)] struct MaxReached(usize); -type UsizeSccs = Sccs<usize, usize, ()>; -type MaxReachedSccs = Sccs<usize, usize, MaxReached>; +struct Maxes(IndexVec<usize, MaxReached>, fn(usize) -> usize); +type UsizeSccs = Sccs<usize, usize>; + +impl Annotations<usize> for Maxes { + fn new(&self, element: usize) -> MaxReached { + MaxReached(self.1(element)) + } + + fn annotate_scc(&mut self, scc: usize, annotation: MaxReached) { + let i = self.0.push(annotation); + assert!(i == scc); + } + + type Ann = MaxReached; + type SccIdx = usize; +} + +impl Maxes { + fn annotation(&self, scc: usize) -> MaxReached { + self.0[scc] + } + fn new(mapping: fn(usize) -> usize) -> Self { + Self(IndexVec::new(), mapping) + } +} impl Annotation for MaxReached { fn merge_scc(self, other: Self) -> Self { @@ -14,7 +37,7 @@ impl Annotation for MaxReached { } fn merge_reached(self, other: Self) -> Self { - self.merge_scc(other) + Self(std::cmp::max(other.0, self.0)) } } @@ -24,17 +47,32 @@ impl PartialEq<usize> for MaxReached { } } -impl MaxReached { - fn from_usize(nr: usize) -> Self { - Self(nr) - } -} - #[derive(Copy, Clone, Debug)] struct MinMaxIn { min: usize, max: usize, } +struct MinMaxes(IndexVec<usize, MinMaxIn>, fn(usize) -> MinMaxIn); + +impl MinMaxes { + fn annotation(&self, scc: usize) -> MinMaxIn { + self.0[scc] + } +} + +impl Annotations<usize> for MinMaxes { + fn new(&self, element: usize) -> MinMaxIn { + self.1(element) + } + + fn annotate_scc(&mut self, scc: usize, annotation: MinMaxIn) { + let i = self.0.push(annotation); + assert!(i == scc); + } + + type Ann = MinMaxIn; + type SccIdx = usize; +} impl Annotation for MinMaxIn { fn merge_scc(self, other: Self) -> Self { @@ -261,67 +299,68 @@ fn bench_sccc(b: &mut test::Bencher) { #[test] fn test_max_self_loop() { let graph = TestGraph::new(0, &[(0, 0)]); - let sccs: MaxReachedSccs = - Sccs::new_with_annotation(&graph, |n| if n == 0 { MaxReached(17) } else { MaxReached(0) }); - assert_eq!(sccs.annotation(0), 17); + let mut annotations = Maxes(IndexVec::new(), |n| if n == 0 { 17 } else { 0 }); + Sccs::new_with_annotation(&graph, &mut annotations); + assert_eq!(annotations.0[0], 17); } #[test] fn test_max_branch() { let graph = TestGraph::new(0, &[(0, 1), (0, 2), (1, 3), (2, 4)]); - let sccs: MaxReachedSccs = Sccs::new_with_annotation(&graph, MaxReached::from_usize); - assert_eq!(sccs.annotation(sccs.scc(0)), 4); - assert_eq!(sccs.annotation(sccs.scc(1)), 3); - assert_eq!(sccs.annotation(sccs.scc(2)), 4); + let mut annotations = Maxes(IndexVec::new(), |n| n); + let sccs = Sccs::new_with_annotation(&graph, &mut annotations); + assert_eq!(annotations.0[sccs.scc(0)], 4); + assert_eq!(annotations.0[sccs.scc(1)], 3); + assert_eq!(annotations.0[sccs.scc(2)], 4); } + #[test] fn test_single_cycle_max() { let graph = TestGraph::new(0, &[(0, 2), (2, 3), (2, 4), (4, 1), (1, 2)]); - let sccs: MaxReachedSccs = Sccs::new_with_annotation(&graph, MaxReached::from_usize); - assert_eq!(sccs.annotation(sccs.scc(2)), 4); - assert_eq!(sccs.annotation(sccs.scc(0)), 4); -} - -#[test] -fn test_simple_cycle_max() { - let graph = TestGraph::new(0, &[(0, 1), (1, 2), (2, 0)]); - let sccs: MaxReachedSccs = Sccs::new_with_annotation(&graph, MaxReached::from_usize); - assert_eq!(sccs.num_sccs(), 1); + let mut annotations = Maxes(IndexVec::new(), |n| n); + let sccs = Sccs::new_with_annotation(&graph, &mut annotations); + assert_eq!(annotations.0[sccs.scc(2)], 4); + assert_eq!(annotations.0[sccs.scc(0)], 4); } #[test] fn test_double_cycle_max() { let graph = TestGraph::new(0, &[(0, 1), (1, 2), (1, 4), (2, 3), (2, 4), (3, 5), (4, 1), (5, 4)]); - let sccs: MaxReachedSccs = - Sccs::new_with_annotation(&graph, |n| if n == 5 { MaxReached(2) } else { MaxReached(1) }); + let mut annotations = Maxes(IndexVec::new(), |n| if n == 5 { 2 } else { 1 }); + + let sccs = Sccs::new_with_annotation(&graph, &mut annotations); - assert_eq!(sccs.annotation(sccs.scc(0)).0, 2); + assert_eq!(annotations.0[sccs.scc(0)].0, 2); } #[test] fn test_bug_minimised() { let graph = TestGraph::new(0, &[(0, 3), (0, 1), (3, 2), (2, 3), (1, 4), (4, 5), (5, 4)]); - let sccs: MaxReachedSccs = Sccs::new_with_annotation(&graph, |n| match n { - 3 => MaxReached(1), - _ => MaxReached(0), + let mut annotations = Maxes(IndexVec::new(), |n| match n { + 3 => 1, + _ => 0, }); - assert_eq!(sccs.annotation(sccs.scc(2)), 1); - assert_eq!(sccs.annotation(sccs.scc(1)), 0); - assert_eq!(sccs.annotation(sccs.scc(4)), 0); + + let sccs = Sccs::new_with_annotation(&graph, &mut annotations); + assert_eq!(annotations.annotation(sccs.scc(2)), 1); + assert_eq!(annotations.annotation(sccs.scc(1)), 0); + assert_eq!(annotations.annotation(sccs.scc(4)), 0); } #[test] fn test_bug_max_leak_minimised() { let graph = TestGraph::new(0, &[(0, 1), (0, 2), (1, 3), (3, 0), (3, 4), (4, 3)]); - let sccs: MaxReachedSccs = Sccs::new_with_annotation(&graph, |w| match w { - 4 => MaxReached(1), - _ => MaxReached(0), + let mut annotations = Maxes(IndexVec::new(), |w| match w { + 4 => 1, + _ => 0, }); - assert_eq!(sccs.annotation(sccs.scc(2)), 0); - assert_eq!(sccs.annotation(sccs.scc(3)), 1); - assert_eq!(sccs.annotation(sccs.scc(0)), 1); + let sccs = Sccs::new_with_annotation(&graph, &mut annotations); + + assert_eq!(annotations.annotation(sccs.scc(2)), 0); + assert_eq!(annotations.annotation(sccs.scc(3)), 1); + assert_eq!(annotations.annotation(sccs.scc(0)), 1); } #[test] @@ -369,48 +408,49 @@ fn test_bug_max_leak() { (23, 24), ], ); - let sccs: MaxReachedSccs = Sccs::new_with_annotation(&graph, |w| match w { - 22 => MaxReached(1), - 24 => MaxReached(2), - 27 => MaxReached(2), - _ => MaxReached(0), + let mut annotations = Maxes::new(|w| match w { + 22 => 1, + 24 => 2, + 27 => 2, + _ => 0, }); - - assert_eq!(sccs.annotation(sccs.scc(2)), 0); - assert_eq!(sccs.annotation(sccs.scc(7)), 0); - assert_eq!(sccs.annotation(sccs.scc(8)), 2); - assert_eq!(sccs.annotation(sccs.scc(23)), 2); - assert_eq!(sccs.annotation(sccs.scc(3)), 2); - assert_eq!(sccs.annotation(sccs.scc(0)), 2); + let sccs = Sccs::new_with_annotation(&graph, &mut annotations); + + assert_eq!(annotations.annotation(sccs.scc(2)), 0); + assert_eq!(annotations.annotation(sccs.scc(7)), 0); + assert_eq!(annotations.annotation(sccs.scc(8)), 2); + assert_eq!(annotations.annotation(sccs.scc(23)), 2); + assert_eq!(annotations.annotation(sccs.scc(3)), 2); + assert_eq!(annotations.annotation(sccs.scc(0)), 2); } #[test] fn test_bug_max_zero_stick_shape() { let graph = TestGraph::new(0, &[(0, 1), (1, 2), (2, 3), (3, 2), (3, 4)]); - - let sccs: MaxReachedSccs = Sccs::new_with_annotation(&graph, |w| match w { - 4 => MaxReached(1), - _ => MaxReached(0), + let mut annotations = Maxes::new(|w| match w { + 4 => 1, + _ => 0, }); + let sccs = Sccs::new_with_annotation(&graph, &mut annotations); - assert_eq!(sccs.annotation(sccs.scc(0)), 1); - assert_eq!(sccs.annotation(sccs.scc(1)), 1); - assert_eq!(sccs.annotation(sccs.scc(2)), 1); - assert_eq!(sccs.annotation(sccs.scc(3)), 1); - assert_eq!(sccs.annotation(sccs.scc(4)), 1); + assert_eq!(annotations.annotation(sccs.scc(0)), 1); + assert_eq!(annotations.annotation(sccs.scc(1)), 1); + assert_eq!(annotations.annotation(sccs.scc(2)), 1); + assert_eq!(annotations.annotation(sccs.scc(3)), 1); + assert_eq!(annotations.annotation(sccs.scc(4)), 1); } #[test] fn test_min_max_in() { let graph = TestGraph::new(0, &[(0, 1), (0, 2), (1, 3), (3, 0), (3, 4), (4, 3), (3, 5)]); - let sccs: Sccs<usize, usize, MinMaxIn> = - Sccs::new_with_annotation(&graph, |w| MinMaxIn { min: w, max: w }); - - assert_eq!(sccs.annotation(sccs.scc(2)).min, 2); - assert_eq!(sccs.annotation(sccs.scc(2)).max, 2); - assert_eq!(sccs.annotation(sccs.scc(0)).min, 0); - assert_eq!(sccs.annotation(sccs.scc(0)).max, 4); - assert_eq!(sccs.annotation(sccs.scc(3)).min, 0); - assert_eq!(sccs.annotation(sccs.scc(3)).max, 4); - assert_eq!(sccs.annotation(sccs.scc(5)).min, 5); + let mut annotations = MinMaxes(IndexVec::new(), |w| MinMaxIn { min: w, max: w }); + let sccs = Sccs::new_with_annotation(&graph, &mut annotations); + + assert_eq!(annotations.annotation(sccs.scc(2)).min, 2); + assert_eq!(annotations.annotation(sccs.scc(2)).max, 2); + assert_eq!(annotations.annotation(sccs.scc(0)).min, 0); + assert_eq!(annotations.annotation(sccs.scc(0)).max, 4); + assert_eq!(annotations.annotation(sccs.scc(3)).min, 0); + assert_eq!(annotations.annotation(sccs.scc(3)).max, 4); + assert_eq!(annotations.annotation(sccs.scc(5)).min, 5); } diff --git a/compiler/rustc_data_structures/src/jobserver.rs b/compiler/rustc_data_structures/src/jobserver.rs index 1204f2d692d..3ed1ea7543f 100644 --- a/compiler/rustc_data_structures/src/jobserver.rs +++ b/compiler/rustc_data_structures/src/jobserver.rs @@ -1,7 +1,8 @@ -use std::sync::{LazyLock, OnceLock}; +use std::sync::{Arc, LazyLock, OnceLock}; pub use jobserver_crate::{Acquired, Client, HelperThread}; use jobserver_crate::{FromEnv, FromEnvErrorKind}; +use parking_lot::{Condvar, Mutex}; // We can only call `from_env_ext` once per process @@ -71,10 +72,93 @@ pub fn client() -> Client { GLOBAL_CLIENT_CHECKED.get().expect(ACCESS_ERROR).clone() } -pub fn acquire_thread() { - GLOBAL_CLIENT_CHECKED.get().expect(ACCESS_ERROR).acquire_raw().ok(); +struct ProxyData { + /// The number of tokens assigned to threads. + /// If this is 0, a single token is still assigned to this process, but is unused. + used: u16, + + /// The number of threads requesting a token + pending: u16, +} + +/// This is a jobserver proxy used to ensure that we hold on to at least one token. +pub struct Proxy { + client: Client, + data: Mutex<ProxyData>, + + /// Threads which are waiting on a token will wait on this. + wake_pending: Condvar, + + helper: OnceLock<HelperThread>, } -pub fn release_thread() { - GLOBAL_CLIENT_CHECKED.get().expect(ACCESS_ERROR).release_raw().ok(); +impl Proxy { + pub fn new() -> Arc<Self> { + let proxy = Arc::new(Proxy { + client: client(), + data: Mutex::new(ProxyData { used: 1, pending: 0 }), + wake_pending: Condvar::new(), + helper: OnceLock::new(), + }); + let proxy_ = Arc::clone(&proxy); + let helper = proxy + .client + .clone() + .into_helper_thread(move |token| { + if let Ok(token) = token { + let mut data = proxy_.data.lock(); + if data.pending > 0 { + // Give the token to a waiting thread + token.drop_without_releasing(); + assert!(data.used > 0); + data.used += 1; + data.pending -= 1; + proxy_.wake_pending.notify_one(); + } else { + // The token is no longer needed, drop it. + drop(data); + drop(token); + } + } + }) + .expect("failed to create helper thread"); + proxy.helper.set(helper).unwrap(); + proxy + } + + pub fn acquire_thread(&self) { + let mut data = self.data.lock(); + + if data.used == 0 { + // There was a free token around. This can + // happen when all threads release their token. + assert_eq!(data.pending, 0); + data.used += 1; + } else { + // Request a token from the helper thread. We can't directly use `acquire_raw` + // as we also need to be able to wait for the final token in the process which + // does not get a corresponding `release_raw` call. + self.helper.get().unwrap().request_token(); + data.pending += 1; + self.wake_pending.wait(&mut data); + } + } + + pub fn release_thread(&self) { + let mut data = self.data.lock(); + + if data.pending > 0 { + // Give the token to a waiting thread + data.pending -= 1; + self.wake_pending.notify_one(); + } else { + data.used -= 1; + + // Release the token unless it's the last one in the process + if data.used > 0 { + drop(data); + self.client.release_raw().ok(); + } + } + } } diff --git a/compiler/rustc_data_structures/src/marker.rs b/compiler/rustc_data_structures/src/marker.rs index 64c64bfa3c2..e0df1b232e1 100644 --- a/compiler/rustc_data_structures/src/marker.rs +++ b/compiler/rustc_data_structures/src/marker.rs @@ -1,13 +1,13 @@ use std::alloc::Allocator; -#[rustc_on_unimplemented(message = "`{Self}` doesn't implement `DynSend`. \ +#[diagnostic::on_unimplemented(message = "`{Self}` doesn't implement `DynSend`. \ Add it to `rustc_data_structures::marker` or use `IntoDynSyncSend` if it's already `Send`")] // This is an auto trait for types which can be sent across threads if `sync::is_dyn_thread_safe()` // is true. These types can be wrapped in a `FromDyn` to get a `Send` type. Wrapping a // `Send` type in `IntoDynSyncSend` will create a `DynSend` type. pub unsafe auto trait DynSend {} -#[rustc_on_unimplemented(message = "`{Self}` doesn't implement `DynSync`. \ +#[diagnostic::on_unimplemented(message = "`{Self}` doesn't implement `DynSync`. \ Add it to `rustc_data_structures::marker` or use `IntoDynSyncSend` if it's already `Sync`")] // This is an auto trait for types which can be shared across threads if `sync::is_dyn_thread_safe()` // is true. These types can be wrapped in a `FromDyn` to get a `Sync` type. Wrapping a @@ -39,8 +39,15 @@ impls_dyn_send_neg!( [std::io::StderrLock<'_>] ); -#[cfg(any(unix, target_os = "hermit", target_os = "wasi", target_os = "solid_asp3"))] -// Consistent with `std`, `os_imp::Env` is `!Sync` in these platforms +#[cfg(any( + unix, + target_os = "hermit", + all(target_vendor = "fortanix", target_env = "sgx"), + target_os = "solid_asp3", + target_os = "wasi", + target_os = "xous" +))] +// Consistent with `std`, `env_imp::Env` is `!Sync` in these platforms impl !DynSend for std::env::VarsOs {} macro_rules! already_send { @@ -52,8 +59,8 @@ macro_rules! already_send { // These structures are already `Send`. already_send!( [std::backtrace::Backtrace][std::io::Stdout][std::io::Stderr][std::io::Error][std::fs::File] - [rustc_arena::DroplessArena][crate::memmap::Mmap][crate::profiling::SelfProfiler] - [crate::owned_slice::OwnedSlice] + [rustc_arena::DroplessArena][jobserver_crate::Client][jobserver_crate::HelperThread] + [crate::memmap::Mmap][crate::profiling::SelfProfiler][crate::owned_slice::OwnedSlice] ); macro_rules! impl_dyn_send { @@ -106,8 +113,15 @@ impls_dyn_sync_neg!( [std::sync::mpsc::Sender<T> where T] ); -#[cfg(any(unix, target_os = "hermit", target_os = "wasi", target_os = "solid_asp3"))] -// Consistent with `std`, `os_imp::Env` is `!Sync` in these platforms +#[cfg(any( + unix, + target_os = "hermit", + all(target_vendor = "fortanix", target_env = "sgx"), + target_os = "solid_asp3", + target_os = "wasi", + target_os = "xous" +))] +// Consistent with `std`, `env_imp::Env` is `!Sync` in these platforms impl !DynSync for std::env::VarsOs {} macro_rules! already_sync { @@ -120,8 +134,8 @@ macro_rules! already_sync { already_sync!( [std::sync::atomic::AtomicBool][std::sync::atomic::AtomicUsize][std::sync::atomic::AtomicU8] [std::sync::atomic::AtomicU32][std::backtrace::Backtrace][std::io::Error][std::fs::File] - [jobserver_crate::Client][crate::memmap::Mmap][crate::profiling::SelfProfiler] - [crate::owned_slice::OwnedSlice] + [jobserver_crate::Client][jobserver_crate::HelperThread][crate::memmap::Mmap] + [crate::profiling::SelfProfiler][crate::owned_slice::OwnedSlice] ); // Use portable AtomicU64 for targets without native 64-bit atomics @@ -180,6 +194,12 @@ impl<T> FromDyn<T> { } #[inline(always)] + pub fn derive<O>(&self, val: O) -> FromDyn<O> { + // We already did the check for `sync::is_dyn_thread_safe()` when creating `Self` + FromDyn(val) + } + + #[inline(always)] pub fn into_inner(self) -> T { self.0 } @@ -200,6 +220,13 @@ impl<T> std::ops::Deref for FromDyn<T> { } } +impl<T> std::ops::DerefMut for FromDyn<T> { + #[inline(always)] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + // A wrapper to convert a struct that is already a `Send` or `Sync` into // an instance of `DynSend` and `DynSync`, since the compiler cannot infer // it automatically in some cases. (e.g. Box<dyn Send / Sync>) diff --git a/compiler/rustc_data_structures/src/obligation_forest/mod.rs b/compiler/rustc_data_structures/src/obligation_forest/mod.rs index f63b201742d..2c62034c6e8 100644 --- a/compiler/rustc_data_structures/src/obligation_forest/mod.rs +++ b/compiler/rustc_data_structures/src/obligation_forest/mod.rs @@ -315,7 +315,7 @@ mod helper { use super::*; pub(super) type ObligationTreeIdGenerator = impl Iterator<Item = ObligationTreeId>; impl<O: ForestObligation> ObligationForest<O> { - #[cfg_attr(not(bootstrap), define_opaque(ObligationTreeIdGenerator))] + #[define_opaque(ObligationTreeIdGenerator)] pub fn new() -> ObligationForest<O> { ObligationForest { nodes: vec![], diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs index 616a18a72ab..80d49effbf8 100644 --- a/compiler/rustc_data_structures/src/sync.rs +++ b/compiler/rustc_data_structures/src/sync.rs @@ -43,7 +43,7 @@ pub use self::freeze::{FreezeLock, FreezeReadGuard, FreezeWriteGuard}; pub use self::lock::{Lock, LockGuard, Mode}; pub use self::mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode}; pub use self::parallel::{ - join, par_for_each_in, par_map, parallel_guard, scope, try_par_for_each_in, + join, par_for_each_in, par_map, parallel_guard, scope, spawn, try_par_for_each_in, }; pub use self::vec::{AppendOnlyIndexVec, AppendOnlyVec}; pub use self::worker_local::{Registry, WorkerLocal}; diff --git a/compiler/rustc_data_structures/src/sync/freeze.rs b/compiler/rustc_data_structures/src/sync/freeze.rs index 9720b22ea7d..6338afb92c3 100644 --- a/compiler/rustc_data_structures/src/sync/freeze.rs +++ b/compiler/rustc_data_structures/src/sync/freeze.rs @@ -88,7 +88,7 @@ impl<T> FreezeLock<T> { #[inline] #[track_caller] pub fn write(&self) -> FreezeWriteGuard<'_, T> { - self.try_write().expect("still mutable") + self.try_write().expect("data should not be frozen if we're still attempting to mutate it") } #[inline] diff --git a/compiler/rustc_data_structures/src/sync/parallel.rs b/compiler/rustc_data_structures/src/sync/parallel.rs index 8ef8a3f3585..64db39cc4c6 100644 --- a/compiler/rustc_data_structures/src/sync/parallel.rs +++ b/compiler/rustc_data_structures/src/sync/parallel.rs @@ -7,7 +7,6 @@ use std::any::Any; use std::panic::{AssertUnwindSafe, catch_unwind, resume_unwind}; use parking_lot::Mutex; -use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelIterator}; use crate::FatalErrorMarker; use crate::sync::{DynSend, DynSync, FromDyn, IntoDynSyncSend, mode}; @@ -94,14 +93,25 @@ macro_rules! parallel { }; } +pub fn spawn(func: impl FnOnce() + DynSend + 'static) { + if mode::is_dyn_thread_safe() { + let func = FromDyn::from(func); + rayon_core::spawn(|| { + (func.into_inner())(); + }); + } else { + func() + } +} + // This function only works when `mode::is_dyn_thread_safe()`. pub fn scope<'scope, OP, R>(op: OP) -> R where - OP: FnOnce(&rayon::Scope<'scope>) -> R + DynSend, + OP: FnOnce(&rayon_core::Scope<'scope>) -> R + DynSend, R: DynSend, { let op = FromDyn::from(op); - rayon::scope(|s| FromDyn::from(op.into_inner()(s))).into_inner() + rayon_core::scope(|s| FromDyn::from(op.into_inner()(s))).into_inner() } #[inline] @@ -114,7 +124,7 @@ where let oper_a = FromDyn::from(oper_a); let oper_b = FromDyn::from(oper_b); let (a, b) = parallel_guard(|guard| { - rayon::join( + rayon_core::join( move || guard.run(move || FromDyn::from(oper_a.into_inner()())), move || guard.run(move || FromDyn::from(oper_b.into_inner()())), ) @@ -125,56 +135,103 @@ where } } -pub fn par_for_each_in<I, T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>>( +fn par_slice<I: DynSend>( + items: &mut [I], + guard: &ParallelGuard, + for_each: impl Fn(&mut I) + DynSync + DynSend, +) { + struct State<'a, F> { + for_each: FromDyn<F>, + guard: &'a ParallelGuard, + group: usize, + } + + fn par_rec<I: DynSend, F: Fn(&mut I) + DynSync + DynSend>( + items: &mut [I], + state: &State<'_, F>, + ) { + if items.len() <= state.group { + for item in items { + state.guard.run(|| (state.for_each)(item)); + } + } else { + let (left, right) = items.split_at_mut(items.len() / 2); + let mut left = state.for_each.derive(left); + let mut right = state.for_each.derive(right); + rayon_core::join(move || par_rec(*left, state), move || par_rec(*right, state)); + } + } + + let state = State { + for_each: FromDyn::from(for_each), + guard, + group: std::cmp::max(items.len() / 128, 1), + }; + par_rec(items, &state) +} + +pub fn par_for_each_in<I: DynSend, T: IntoIterator<Item = I>>( t: T, - for_each: impl Fn(I) + DynSync + DynSend, + for_each: impl Fn(&I) + DynSync + DynSend, ) { parallel_guard(|guard| { if mode::is_dyn_thread_safe() { - let for_each = FromDyn::from(for_each); - t.into_par_iter().for_each(|i| { - guard.run(|| for_each(i)); - }); + let mut items: Vec<_> = t.into_iter().collect(); + par_slice(&mut items, guard, |i| for_each(&*i)) } else { t.into_iter().for_each(|i| { - guard.run(|| for_each(i)); + guard.run(|| for_each(&i)); }); } }); } -pub fn try_par_for_each_in< - T: IntoIterator + IntoParallelIterator<Item = <T as IntoIterator>::Item>, - E: Send, ->( +/// This runs `for_each` in parallel for each iterator item. If one or more of the +/// `for_each` calls returns `Err`, the function will also return `Err`. The error returned +/// will be non-deterministic, but this is expected to be used with `ErrorGuaranteed` which +/// are all equivalent. +pub fn try_par_for_each_in<T: IntoIterator, E: DynSend>( t: T, - for_each: impl Fn(<T as IntoIterator>::Item) -> Result<(), E> + DynSync + DynSend, -) -> Result<(), E> { + for_each: impl Fn(&<T as IntoIterator>::Item) -> Result<(), E> + DynSync + DynSend, +) -> Result<(), E> +where + <T as IntoIterator>::Item: DynSend, +{ parallel_guard(|guard| { if mode::is_dyn_thread_safe() { - let for_each = FromDyn::from(for_each); - t.into_par_iter() - .filter_map(|i| guard.run(|| for_each(i))) - .reduce(|| Ok(()), Result::and) + let mut items: Vec<_> = t.into_iter().collect(); + + let error = Mutex::new(None); + + par_slice(&mut items, guard, |i| { + if let Err(err) = for_each(&*i) { + *error.lock() = Some(err); + } + }); + + if let Some(err) = error.into_inner() { Err(err) } else { Ok(()) } } else { - t.into_iter().filter_map(|i| guard.run(|| for_each(i))).fold(Ok(()), Result::and) + t.into_iter().filter_map(|i| guard.run(|| for_each(&i))).fold(Ok(()), Result::and) } }) } -pub fn par_map< - I, - T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>, - R: std::marker::Send, - C: FromIterator<R> + FromParallelIterator<R>, ->( +pub fn par_map<I: DynSend, T: IntoIterator<Item = I>, R: DynSend, C: FromIterator<R>>( t: T, map: impl Fn(I) -> R + DynSync + DynSend, ) -> C { parallel_guard(|guard| { if mode::is_dyn_thread_safe() { let map = FromDyn::from(map); - t.into_par_iter().filter_map(|i| guard.run(|| map(i))).collect() + + let mut items: Vec<(Option<I>, Option<R>)> = + t.into_iter().map(|i| (Some(i), None)).collect(); + + par_slice(&mut items, guard, |i| { + i.1 = Some(map(i.0.take().unwrap())); + }); + + items.into_iter().filter_map(|i| i.1).collect() } else { t.into_iter().filter_map(|i| guard.run(|| map(i))).collect() } diff --git a/compiler/rustc_data_structures/src/unord.rs b/compiler/rustc_data_structures/src/unord.rs index baa66cd7c85..3d44fb1fd48 100644 --- a/compiler/rustc_data_structures/src/unord.rs +++ b/compiler/rustc_data_structures/src/unord.rs @@ -109,6 +109,16 @@ impl<T, I: Iterator<Item = T>> UnordItems<T, I> { pub fn collect<C: From<UnordItems<T, I>>>(self) -> C { self.into() } + + /// If the iterator has only one element, returns it, otherwise returns `None`. + #[track_caller] + pub fn get_only(mut self) -> Option<T> { + let item = self.0.next(); + if self.0.next().is_some() { + return None; + } + item + } } impl<T> UnordItems<T, std::iter::Empty<T>> { |
