about summary refs log tree commit diff
path: root/compiler/rustc_data_structures/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_data_structures/src')
-rw-r--r--compiler/rustc_data_structures/src/flat_map_in_place.rs42
-rw-r--r--compiler/rustc_data_structures/src/sharded.rs19
-rw-r--r--compiler/rustc_data_structures/src/sorted_map.rs33
-rw-r--r--compiler/rustc_data_structures/src/sync.rs67
4 files changed, 99 insertions, 62 deletions
diff --git a/compiler/rustc_data_structures/src/flat_map_in_place.rs b/compiler/rustc_data_structures/src/flat_map_in_place.rs
index e66b00b7557..6d718059f9c 100644
--- a/compiler/rustc_data_structures/src/flat_map_in_place.rs
+++ b/compiler/rustc_data_structures/src/flat_map_in_place.rs
@@ -1,4 +1,4 @@
-use std::ptr;
+use std::{mem, ptr};
 
 use smallvec::{Array, SmallVec};
 use thin_vec::ThinVec;
@@ -13,39 +13,44 @@ pub trait FlatMapInPlace<T>: Sized {
 // The implementation of this method is syntactically identical for all the
 // different vector types.
 macro_rules! flat_map_in_place {
-    () => {
+    ($vec:ident $( where T: $bound:path)?) => {
         fn flat_map_in_place<F, I>(&mut self, mut f: F)
         where
             F: FnMut(T) -> I,
             I: IntoIterator<Item = T>,
         {
+            struct LeakGuard<'a, T $(: $bound)?>(&'a mut $vec<T>);
+
+            impl<'a, T $(: $bound)?> Drop for LeakGuard<'a, T> {
+                fn drop(&mut self) {
+                    unsafe {
+                        self.0.set_len(0); // make sure we just leak elements in case of panic
+                    }
+                }
+            }
+
+            let this = LeakGuard(self);
+
             let mut read_i = 0;
             let mut write_i = 0;
             unsafe {
-                let mut old_len = self.len();
-                self.set_len(0); // make sure we just leak elements in case of panic
-
-                while read_i < old_len {
+                while read_i < this.0.len() {
                     // move the read_i'th item out of the vector and map it
                     // to an iterator
-                    let e = ptr::read(self.as_ptr().add(read_i));
+                    let e = ptr::read(this.0.as_ptr().add(read_i));
                     let iter = f(e).into_iter();
                     read_i += 1;
 
                     for e in iter {
                         if write_i < read_i {
-                            ptr::write(self.as_mut_ptr().add(write_i), e);
+                            ptr::write(this.0.as_mut_ptr().add(write_i), e);
                             write_i += 1;
                         } else {
                             // If this is reached we ran out of space
                             // in the middle of the vector.
                             // However, the vector is in a valid state here,
                             // so we just do a somewhat inefficient insert.
-                            self.set_len(old_len);
-                            self.insert(write_i, e);
-
-                            old_len = self.len();
-                            self.set_len(0);
+                            this.0.insert(write_i, e);
 
                             read_i += 1;
                             write_i += 1;
@@ -54,20 +59,23 @@ macro_rules! flat_map_in_place {
                 }
 
                 // write_i tracks the number of actually written new items.
-                self.set_len(write_i);
+                this.0.set_len(write_i);
+
+                // The ThinVec is in a sane state again. Prevent the LeakGuard from leaking the data.
+                mem::forget(this);
             }
         }
     };
 }
 
 impl<T> FlatMapInPlace<T> for Vec<T> {
-    flat_map_in_place!();
+    flat_map_in_place!(Vec);
 }
 
 impl<T, A: Array<Item = T>> FlatMapInPlace<T> for SmallVec<A> {
-    flat_map_in_place!();
+    flat_map_in_place!(SmallVec where T: Array);
 }
 
 impl<T> FlatMapInPlace<T> for ThinVec<T> {
-    flat_map_in_place!();
+    flat_map_in_place!(ThinVec);
 }
diff --git a/compiler/rustc_data_structures/src/sharded.rs b/compiler/rustc_data_structures/src/sharded.rs
index 65488c73d3c..e6be9c256f0 100644
--- a/compiler/rustc_data_structures/src/sharded.rs
+++ b/compiler/rustc_data_structures/src/sharded.rs
@@ -43,10 +43,10 @@ impl<T> Sharded<T> {
 
     /// The shard is selected by hashing `val` with `FxHasher`.
     #[inline]
-    pub fn get_shard_by_value<K: Hash + ?Sized>(&self, _val: &K) -> &Lock<T> {
+    pub fn get_shard_by_value<K: Hash + ?Sized>(&self, val: &K) -> &Lock<T> {
         match self {
             Self::Single(single) => single,
-            Self::Shards(..) => self.get_shard_by_hash(make_hash(_val)),
+            Self::Shards(..) => self.get_shard_by_hash(make_hash(val)),
         }
     }
 
@@ -56,12 +56,12 @@ impl<T> Sharded<T> {
     }
 
     #[inline]
-    pub fn get_shard_by_index(&self, _i: usize) -> &Lock<T> {
+    pub fn get_shard_by_index(&self, i: usize) -> &Lock<T> {
         match self {
             Self::Single(single) => single,
             Self::Shards(shards) => {
                 // SAFETY: The index gets ANDed with the shard mask, ensuring it is always inbounds.
-                unsafe { &shards.get_unchecked(_i & (SHARDS - 1)).0 }
+                unsafe { &shards.get_unchecked(i & (SHARDS - 1)).0 }
             }
         }
     }
@@ -69,7 +69,7 @@ impl<T> Sharded<T> {
     /// The shard is selected by hashing `val` with `FxHasher`.
     #[inline]
     #[track_caller]
-    pub fn lock_shard_by_value<K: Hash + ?Sized>(&self, _val: &K) -> LockGuard<'_, T> {
+    pub fn lock_shard_by_value<K: Hash + ?Sized>(&self, val: &K) -> LockGuard<'_, T> {
         match self {
             Self::Single(single) => {
                 // Synchronization is disabled so use the `lock_assume_no_sync` method optimized
@@ -79,7 +79,7 @@ impl<T> Sharded<T> {
                 // `might_be_dyn_thread_safe` was also false.
                 unsafe { single.lock_assume(Mode::NoSync) }
             }
-            Self::Shards(..) => self.lock_shard_by_hash(make_hash(_val)),
+            Self::Shards(..) => self.lock_shard_by_hash(make_hash(val)),
         }
     }
 
@@ -91,7 +91,7 @@ impl<T> Sharded<T> {
 
     #[inline]
     #[track_caller]
-    pub fn lock_shard_by_index(&self, _i: usize) -> LockGuard<'_, T> {
+    pub fn lock_shard_by_index(&self, i: usize) -> LockGuard<'_, T> {
         match self {
             Self::Single(single) => {
                 // Synchronization is disabled so use the `lock_assume_no_sync` method optimized
@@ -109,7 +109,7 @@ impl<T> Sharded<T> {
                 // always inbounds.
                 // SAFETY (lock_assume_sync): We know `is_dyn_thread_safe` was true when creating
                 // the lock thus `might_be_dyn_thread_safe` was also true.
-                unsafe { shards.get_unchecked(_i & (SHARDS - 1)).0.lock_assume(Mode::Sync) }
+                unsafe { shards.get_unchecked(i & (SHARDS - 1)).0.lock_assume(Mode::Sync) }
             }
         }
     }
@@ -143,6 +143,9 @@ pub fn shards() -> usize {
 pub type ShardedHashMap<K, V> = Sharded<FxHashMap<K, V>>;
 
 impl<K: Eq, V> ShardedHashMap<K, V> {
+    pub fn with_capacity(cap: usize) -> Self {
+        Self::new(|| FxHashMap::with_capacity_and_hasher(cap, rustc_hash::FxBuildHasher::default()))
+    }
     pub fn len(&self) -> usize {
         self.lock_shards().map(|shard| shard.len()).sum()
     }
diff --git a/compiler/rustc_data_structures/src/sorted_map.rs b/compiler/rustc_data_structures/src/sorted_map.rs
index 066ea03b4ac..a01a420dfbd 100644
--- a/compiler/rustc_data_structures/src/sorted_map.rs
+++ b/compiler/rustc_data_structures/src/sorted_map.rs
@@ -1,4 +1,5 @@
 use std::borrow::Borrow;
+use std::cmp::Ordering;
 use std::fmt::Debug;
 use std::mem;
 use std::ops::{Bound, Index, IndexMut, RangeBounds};
@@ -156,6 +157,38 @@ impl<K: Ord, V> SortedMap<K, V> {
         &self.data[start..end]
     }
 
+    /// `sm.range_is_empty(r)` == `sm.range(r).is_empty()`, but is faster.
+    #[inline]
+    pub fn range_is_empty<R>(&self, range: R) -> bool
+    where
+        R: RangeBounds<K>,
+    {
+        // `range` must (via `range_slice_indices`) search for the start and
+        // end separately. But here we can do a single binary search for the
+        // entire range. If a single `x` matching `range` is found then the
+        // range is *not* empty.
+        self.data
+            .binary_search_by(|(x, _)| {
+                // Is `x` below `range`?
+                match range.start_bound() {
+                    Bound::Included(start) if x < start => return Ordering::Less,
+                    Bound::Excluded(start) if x <= start => return Ordering::Less,
+                    _ => {}
+                };
+
+                // Is `x` above `range`?
+                match range.end_bound() {
+                    Bound::Included(end) if x > end => return Ordering::Greater,
+                    Bound::Excluded(end) if x >= end => return Ordering::Greater,
+                    _ => {}
+                };
+
+                // `x` must be within `range`.
+                Ordering::Equal
+            })
+            .is_err()
+    }
+
     #[inline]
     pub fn remove_range<R>(&mut self, range: R)
     where
diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs
index 37b54fe38ff..a1cc75c4985 100644
--- a/compiler/rustc_data_structures/src/sync.rs
+++ b/compiler/rustc_data_structures/src/sync.rs
@@ -18,42 +18,54 @@
 //!
 //! | Type                    | Serial version      | Parallel version                |
 //! | ----------------------- | ------------------- | ------------------------------- |
-//! | `LRef<'a, T>` [^2]      | `&'a mut T`         | `&'a T`                         |
-//! |                         |                     |                                 |
 //! | `Lock<T>`               | `RefCell<T>`        | `RefCell<T>` or                 |
 //! |                         |                     | `parking_lot::Mutex<T>`         |
 //! | `RwLock<T>`             | `RefCell<T>`        | `parking_lot::RwLock<T>`        |
 //! | `MTLock<T>`        [^1] | `T`                 | `Lock<T>`                       |
-//! | `MTLockRef<'a, T>` [^2] | `&'a mut MTLock<T>` | `&'a MTLock<T>`                 |
 //! |                         |                     |                                 |
 //! | `ParallelIterator`      | `Iterator`          | `rayon::iter::ParallelIterator` |
 //!
 //! [^1]: `MTLock` is similar to `Lock`, but the serial version avoids the cost
 //! of a `RefCell`. This is appropriate when interior mutability is not
 //! required.
-//!
-//! [^2]: `MTRef`, `MTLockRef` are type aliases.
 
 use std::collections::HashMap;
 use std::hash::{BuildHasher, Hash};
 
-pub use crate::marker::*;
+pub use parking_lot::{
+    MappedRwLockReadGuard as MappedReadGuard, MappedRwLockWriteGuard as MappedWriteGuard,
+    RwLockReadGuard as ReadGuard, RwLockWriteGuard as WriteGuard,
+};
 
-mod lock;
+pub use self::atomic::AtomicU64;
+pub use self::freeze::{FreezeLock, FreezeReadGuard, FreezeWriteGuard};
 #[doc(no_inline)]
-pub use lock::{Lock, LockGuard, Mode};
-
-mod worker_local;
-pub use worker_local::{Registry, WorkerLocal};
+pub use self::lock::{Lock, LockGuard, Mode};
+pub use self::mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode};
+pub use self::parallel::{
+    join, par_for_each_in, par_map, parallel_guard, scope, try_par_for_each_in,
+};
+pub use self::vec::{AppendOnlyIndexVec, AppendOnlyVec};
+pub use self::worker_local::{Registry, WorkerLocal};
+pub use crate::marker::*;
 
+mod freeze;
+mod lock;
 mod parallel;
-pub use parallel::{join, par_for_each_in, par_map, parallel_guard, scope, try_par_for_each_in};
-pub use vec::{AppendOnlyIndexVec, AppendOnlyVec};
-
 mod vec;
+mod worker_local;
 
-mod freeze;
-pub use freeze::{FreezeLock, FreezeReadGuard, FreezeWriteGuard};
+/// Keep the conditional imports together in a submodule, so that import-sorting
+/// doesn't split them up.
+mod atomic {
+    // Most hosts can just use a regular AtomicU64.
+    #[cfg(target_has_atomic = "64")]
+    pub use std::sync::atomic::AtomicU64;
+
+    // Some 32-bit hosts don't have AtomicU64, so use a fallback.
+    #[cfg(not(target_has_atomic = "64"))]
+    pub use portable_atomic::AtomicU64;
+}
 
 mod mode {
     use std::sync::atomic::{AtomicU8, Ordering};
@@ -97,21 +109,6 @@ mod mode {
 
 // FIXME(parallel_compiler): Get rid of these aliases across the compiler.
 
-pub use std::sync::OnceLock;
-// Use portable AtomicU64 for targets without native 64-bit atomics
-#[cfg(target_has_atomic = "64")]
-pub use std::sync::atomic::AtomicU64;
-
-pub use mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode};
-pub use parking_lot::{
-    MappedRwLockReadGuard as MappedReadGuard, MappedRwLockWriteGuard as MappedWriteGuard,
-    RwLockReadGuard as ReadGuard, RwLockWriteGuard as WriteGuard,
-};
-#[cfg(not(target_has_atomic = "64"))]
-pub use portable_atomic::AtomicU64;
-
-pub type LRef<'a, T> = &'a T;
-
 #[derive(Debug, Default)]
 pub struct MTLock<T>(Lock<T>);
 
@@ -142,14 +139,10 @@ impl<T> MTLock<T> {
     }
 }
 
-use parking_lot::RwLock as InnerRwLock;
-
 /// This makes locks panic if they are already held.
 /// It is only useful when you are running in a single thread
 const ERROR_CHECKING: bool = false;
 
-pub type MTLockRef<'a, T> = LRef<'a, MTLock<T>>;
-
 #[derive(Default)]
 #[repr(align(64))]
 pub struct CacheAligned<T>(pub T);
@@ -167,12 +160,12 @@ impl<K: Eq + Hash, V: Eq, S: BuildHasher> HashMapExt<K, V> for HashMap<K, V, S>
 }
 
 #[derive(Debug, Default)]
-pub struct RwLock<T>(InnerRwLock<T>);
+pub struct RwLock<T>(parking_lot::RwLock<T>);
 
 impl<T> RwLock<T> {
     #[inline(always)]
     pub fn new(inner: T) -> Self {
-        RwLock(InnerRwLock::new(inner))
+        RwLock(parking_lot::RwLock::new(inner))
     }
 
     #[inline(always)]