about summary refs log tree commit diff
path: root/compiler/rustc_data_structures/src
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2023-04-05 10:38:02 +0000
committerbors <bors@rust-lang.org>2023-04-05 10:38:02 +0000
commit383c1d729ead956584a6dd83cce17c7fdeb61468 (patch)
tree9fc21ebe17133cdb5537cfe8cdc0a716af076630 /compiler/rustc_data_structures/src
parent90a9f69c80812c8694959c1f2d5c336e3300d1e2 (diff)
parent457a162d008befc6baa9acde637f29780c079844 (diff)
downloadrust-383c1d729ead956584a6dd83cce17c7fdeb61468.tar.gz
rust-383c1d729ead956584a6dd83cce17c7fdeb61468.zip
Auto merge of #109117 - oli-obk:locks, r=michaelwoerister
Avoid a few locks

We can use atomics or datastructures tuned for specific access patterns instead of locks. This may be an improvement for parallel rustc, but it's mostly a cleanup making various datastructures only usable in the way they are used right now (append data, never mutate), instead of having a general purpose lock.
Diffstat (limited to 'compiler/rustc_data_structures/src')
-rw-r--r--compiler/rustc_data_structures/src/sync.rs18
-rw-r--r--compiler/rustc_data_structures/src/sync/vec.rs68
2 files changed, 75 insertions, 11 deletions
diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs
index 4e2126fff7b..e8ee4fb76cc 100644
--- a/compiler/rustc_data_structures/src/sync.rs
+++ b/compiler/rustc_data_structures/src/sync.rs
@@ -51,7 +51,7 @@ use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe};
 pub use std::sync::atomic::Ordering;
 pub use std::sync::atomic::Ordering::SeqCst;
 
-pub use vec::AppendOnlyVec;
+pub use vec::{AppendOnlyIndexVec, AppendOnlyVec};
 
 mod vec;
 
@@ -107,6 +107,14 @@ cfg_if! {
             }
         }
 
+        impl Atomic<bool> {
+            pub fn fetch_or(&self, val: bool, _: Ordering) -> bool {
+                let result = self.0.get() | val;
+                self.0.set(val);
+                result
+            }
+        }
+
         impl<T: Copy + PartialEq> Atomic<T> {
             #[inline]
             pub fn compare_exchange(&self,
@@ -481,14 +489,6 @@ impl<T: Default> Default for Lock<T> {
     }
 }
 
-// FIXME: Probably a bad idea
-impl<T: Clone> Clone for Lock<T> {
-    #[inline]
-    fn clone(&self) -> Self {
-        Lock::new(self.borrow().clone())
-    }
-}
-
 #[derive(Debug, Default)]
 pub struct RwLock<T>(InnerRwLock<T>);
 
diff --git a/compiler/rustc_data_structures/src/sync/vec.rs b/compiler/rustc_data_structures/src/sync/vec.rs
index cbea4f05999..aefaa8519d5 100644
--- a/compiler/rustc_data_structures/src/sync/vec.rs
+++ b/compiler/rustc_data_structures/src/sync/vec.rs
@@ -2,7 +2,8 @@ use std::marker::PhantomData;
 
 use rustc_index::vec::Idx;
 
-pub struct AppendOnlyVec<I: Idx, T: Copy> {
+#[derive(Default)]
+pub struct AppendOnlyIndexVec<I: Idx, T: Copy> {
     #[cfg(not(parallel_compiler))]
     vec: elsa::vec::FrozenVec<T>,
     #[cfg(parallel_compiler)]
@@ -10,7 +11,7 @@ pub struct AppendOnlyVec<I: Idx, T: Copy> {
     _marker: PhantomData<fn(&I)>,
 }
 
-impl<I: Idx, T: Copy> AppendOnlyVec<I, T> {
+impl<I: Idx, T: Copy> AppendOnlyIndexVec<I, T> {
     pub fn new() -> Self {
         Self {
             #[cfg(not(parallel_compiler))]
@@ -39,3 +40,66 @@ impl<I: Idx, T: Copy> AppendOnlyVec<I, T> {
         return self.vec.get(i);
     }
 }
+
+#[derive(Default)]
+pub struct AppendOnlyVec<T: Copy> {
+    #[cfg(not(parallel_compiler))]
+    vec: elsa::vec::FrozenVec<T>,
+    #[cfg(parallel_compiler)]
+    vec: elsa::sync::LockFreeFrozenVec<T>,
+}
+
+impl<T: Copy> AppendOnlyVec<T> {
+    pub fn new() -> Self {
+        Self {
+            #[cfg(not(parallel_compiler))]
+            vec: elsa::vec::FrozenVec::new(),
+            #[cfg(parallel_compiler)]
+            vec: elsa::sync::LockFreeFrozenVec::new(),
+        }
+    }
+
+    pub fn push(&self, val: T) -> usize {
+        #[cfg(not(parallel_compiler))]
+        let i = self.vec.len();
+        #[cfg(not(parallel_compiler))]
+        self.vec.push(val);
+        #[cfg(parallel_compiler)]
+        let i = self.vec.push(val);
+        i
+    }
+
+    pub fn get(&self, i: usize) -> Option<T> {
+        #[cfg(not(parallel_compiler))]
+        return self.vec.get_copy(i);
+        #[cfg(parallel_compiler)]
+        return self.vec.get(i);
+    }
+
+    pub fn iter_enumerated(&self) -> impl Iterator<Item = (usize, T)> + '_ {
+        (0..)
+            .map(|i| (i, self.get(i)))
+            .take_while(|(_, o)| o.is_some())
+            .filter_map(|(i, o)| Some((i, o?)))
+    }
+
+    pub fn iter(&self) -> impl Iterator<Item = T> + '_ {
+        (0..).map(|i| self.get(i)).take_while(|o| o.is_some()).filter_map(|o| o)
+    }
+}
+
+impl<T: Copy + PartialEq> AppendOnlyVec<T> {
+    pub fn contains(&self, val: T) -> bool {
+        self.iter_enumerated().any(|(_, v)| v == val)
+    }
+}
+
+impl<A: Copy> FromIterator<A> for AppendOnlyVec<A> {
+    fn from_iter<T: IntoIterator<Item = A>>(iter: T) -> Self {
+        let this = Self::new();
+        for val in iter {
+            this.push(val);
+        }
+        this
+    }
+}