diff options
| author | Celina G. Val <celinval@amazon.com> | 2025-06-11 12:32:09 -0700 |
|---|---|---|
| committer | Celina G. Val <celinval@amazon.com> | 2025-06-11 16:56:01 -0700 |
| commit | f52c6eee02fb9a9cfe203ce95c4968c2835c034b (patch) | |
| tree | 93d6b6f10a357b37cd68911f6f4b049ccbec5fd8 | |
| parent | 4aa62ea9e9015621969a0f505abf7a6894e99e9e (diff) | |
| download | rust-f52c6eee02fb9a9cfe203ce95c4968c2835c034b.tar.gz rust-f52c6eee02fb9a9cfe203ce95c4968c2835c034b.zip | |
Another round of tidy / warning fixes
24 files changed, 120 insertions, 105 deletions
diff --git a/Cargo.toml b/Cargo.toml index c4d2a06f4cb..e08f14d2101 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -60,7 +60,7 @@ exclude = [ "obj", ] -[profile.release.package.rustc-rayon-core] +[profile.release.package.rustc_thread_pool] # The rustc fork of Rayon has deadlock detection code which intermittently # causes overflows in the CI (see https://github.com/rust-lang/rust/issues/90227) # so we turn overflow checks off for now. diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs index 68527dde1f3..3881f3c2aa8 100644 --- a/compiler/rustc_data_structures/src/sync.rs +++ b/compiler/rustc_data_structures/src/sync.rs @@ -22,8 +22,6 @@ //! | | | `parking_lot::Mutex<T>` | //! | `RwLock<T>` | `RefCell<T>` | `parking_lot::RwLock<T>` | //! | `MTLock<T>` [^1] | `T` | `Lock<T>` | -//! | | | | -//! | `ParallelIterator` | `Iterator` | `rustc_thread_pool::iter::ParallelIterator` | //! //! [^1]: `MTLock` is similar to `Lock`, but the serial version avoids the cost //! of a `RefCell`. This is appropriate when interior mutability is not diff --git a/compiler/rustc_thread_pool/src/broadcast/mod.rs b/compiler/rustc_thread_pool/src/broadcast/mod.rs index c2b0d47f829..9545c4b15d8 100644 --- a/compiler/rustc_thread_pool/src/broadcast/mod.rs +++ b/compiler/rustc_thread_pool/src/broadcast/mod.rs @@ -6,7 +6,7 @@ use crate::job::{ArcJob, StackJob}; use crate::latch::{CountLatch, LatchRef}; use crate::registry::{Registry, WorkerThread}; -mod test; +mod tests; /// Executes `op` within every thread in the current threadpool. If this is /// called from a non-Rayon thread, it will execute in the global threadpool. @@ -103,18 +103,18 @@ where }; let n_threads = registry.num_threads(); - let current_thread = WorkerThread::current().as_ref(); + let current_thread = unsafe { WorkerThread::current().as_ref() }; let tlv = crate::tlv::get(); let latch = CountLatch::with_count(n_threads, current_thread); let jobs: Vec<_> = (0..n_threads).map(|_| StackJob::new(tlv, &f, LatchRef::new(&latch))).collect(); - let job_refs = jobs.iter().map(|job| job.as_job_ref()); + let job_refs = jobs.iter().map(|job| unsafe { job.as_job_ref() }); registry.inject_broadcast(job_refs); // Wait for all jobs to complete, then collect the results, maybe propagating a panic. latch.wait(current_thread); - jobs.into_iter().map(|job| job.into_result()).collect() + jobs.into_iter().map(|job| unsafe { job.into_result() }).collect() } /// Execute `op` on every thread in the pool. It will be executed on each diff --git a/compiler/rustc_thread_pool/src/compile_fail/quicksort_race1.rs b/compiler/rustc_thread_pool/src/compile_fail/quicksort_race1.rs index 1f7a7b0b429..f6dbc769699 100644 --- a/compiler/rustc_thread_pool/src/compile_fail/quicksort_race1.rs +++ b/compiler/rustc_thread_pool/src/compile_fail/quicksort_race1.rs @@ -7,7 +7,7 @@ fn quick_sort<T:PartialOrd+Send>(v: &mut [T]) { let mid = partition(v); let (lo, _hi) = v.split_at_mut(mid); - rustc_thred_pool::join(|| quick_sort(lo), || quick_sort(lo)); //~ ERROR + rustc_thread_pool::join(|| quick_sort(lo), || quick_sort(lo)); //~ ERROR } fn partition<T:PartialOrd+Send>(v: &mut [T]) -> usize { diff --git a/compiler/rustc_thread_pool/src/compile_fail/quicksort_race2.rs b/compiler/rustc_thread_pool/src/compile_fail/quicksort_race2.rs index 71b695dd855..ccd737a700d 100644 --- a/compiler/rustc_thread_pool/src/compile_fail/quicksort_race2.rs +++ b/compiler/rustc_thread_pool/src/compile_fail/quicksort_race2.rs @@ -7,7 +7,7 @@ fn quick_sort<T:PartialOrd+Send>(v: &mut [T]) { let mid = partition(v); let (lo, _hi) = v.split_at_mut(mid); - rustc_thred_pool::join(|| quick_sort(lo), || quick_sort(v)); //~ ERROR + rustc_thread_pool::join(|| quick_sort(lo), || quick_sort(v)); //~ ERROR } fn partition<T:PartialOrd+Send>(v: &mut [T]) -> usize { diff --git a/compiler/rustc_thread_pool/src/compile_fail/quicksort_race3.rs b/compiler/rustc_thread_pool/src/compile_fail/quicksort_race3.rs index 8484cebaae8..6acdf084433 100644 --- a/compiler/rustc_thread_pool/src/compile_fail/quicksort_race3.rs +++ b/compiler/rustc_thread_pool/src/compile_fail/quicksort_race3.rs @@ -7,7 +7,7 @@ fn quick_sort<T:PartialOrd+Send>(v: &mut [T]) { let mid = partition(v); let (_lo, hi) = v.split_at_mut(mid); - rustc_thred_pool::join(|| quick_sort(hi), || quick_sort(hi)); //~ ERROR + rustc_thread_pool::join(|| quick_sort(hi), || quick_sort(hi)); //~ ERROR } fn partition<T:PartialOrd+Send>(v: &mut [T]) -> usize { diff --git a/compiler/rustc_thread_pool/src/compile_fail/rc_return.rs b/compiler/rustc_thread_pool/src/compile_fail/rc_return.rs index 509c8d62ad1..165c685aba1 100644 --- a/compiler/rustc_thread_pool/src/compile_fail/rc_return.rs +++ b/compiler/rustc_thread_pool/src/compile_fail/rc_return.rs @@ -2,7 +2,7 @@ use std::rc::Rc; -rustc_thred_pool::join(|| Rc::new(22), || ()); //~ ERROR +rustc_thread_pool::join(|| Rc::new(22), || ()); //~ ERROR ``` */ mod left {} @@ -11,7 +11,7 @@ mod left {} use std::rc::Rc; -rustc_thred_pool::join(|| (), || Rc::new(23)); //~ ERROR +rustc_thread_pool::join(|| (), || Rc::new(23)); //~ ERROR ``` */ mod right {} diff --git a/compiler/rustc_thread_pool/src/compile_fail/rc_upvar.rs b/compiler/rustc_thread_pool/src/compile_fail/rc_upvar.rs index a27b3c8c39f..6dc9ead48a0 100644 --- a/compiler/rustc_thread_pool/src/compile_fail/rc_upvar.rs +++ b/compiler/rustc_thread_pool/src/compile_fail/rc_upvar.rs @@ -3,7 +3,7 @@ use std::rc::Rc; let r = Rc::new(22); -rustc_thred_pool::join(|| r.clone(), || r.clone()); +rustc_thread_pool::join(|| r.clone(), || r.clone()); //~^ ERROR ``` */ diff --git a/compiler/rustc_thread_pool/src/compile_fail/scope_join_bad.rs b/compiler/rustc_thread_pool/src/compile_fail/scope_join_bad.rs index 6e700a483b1..e65abfc3c1e 100644 --- a/compiler/rustc_thread_pool/src/compile_fail/scope_join_bad.rs +++ b/compiler/rustc_thread_pool/src/compile_fail/scope_join_bad.rs @@ -3,7 +3,7 @@ fn bad_scope<F>(f: F) where F: FnOnce(&i32) + Send, { - rustc_thred_pool::scope(|s| { + rustc_thread_pool::scope(|s| { let x = 22; s.spawn(|_| f(&x)); //~ ERROR `x` does not live long enough }); @@ -13,7 +13,7 @@ fn good_scope<F>(f: F) where F: FnOnce(&i32) + Send, { let x = 22; - rustc_thred_pool::scope(|s| { + rustc_thread_pool::scope(|s| { s.spawn(|_| f(&x)); }); } diff --git a/compiler/rustc_thread_pool/src/job.rs b/compiler/rustc_thread_pool/src/job.rs index 3241914ba81..e6e84ac2320 100644 --- a/compiler/rustc_thread_pool/src/job.rs +++ b/compiler/rustc_thread_pool/src/job.rs @@ -61,7 +61,7 @@ impl JobRef { #[inline] pub(super) unsafe fn execute(self) { - (self.execute_fn)(self.pointer) + unsafe { (self.execute_fn)(self.pointer) } } } @@ -97,7 +97,7 @@ where } pub(super) unsafe fn as_job_ref(&self) -> JobRef { - JobRef::new(self) + unsafe { JobRef::new(self) } } pub(super) unsafe fn run_inline(self, stolen: bool) -> R { @@ -116,12 +116,16 @@ where R: Send, { unsafe fn execute(this: *const ()) { - let this = &*(this as *const Self); + let this = unsafe { &*(this as *const Self) }; tlv::set(this.tlv); let abort = unwind::AbortIfPanic; - let func = (*this.func.get()).take().unwrap(); - (*this.result.get()) = JobResult::call(func); - Latch::set(&this.latch); + let func = unsafe { (*this.func.get()).take().unwrap() }; + unsafe { + (*this.result.get()) = JobResult::call(func); + } + unsafe { + Latch::set(&this.latch); + } mem::forget(abort); } } @@ -152,7 +156,7 @@ where /// lifetimes, so it is up to you to ensure that this JobRef /// doesn't outlive any data that it closes over. pub(super) unsafe fn into_job_ref(self: Box<Self>) -> JobRef { - JobRef::new(Box::into_raw(self)) + unsafe { JobRef::new(Box::into_raw(self)) } } /// Creates a static `JobRef` from this job. @@ -169,7 +173,7 @@ where BODY: FnOnce() + Send, { unsafe fn execute(this: *const ()) { - let this = Box::from_raw(this as *mut Self); + let this = unsafe { Box::from_raw(this as *mut Self) }; tlv::set(this.tlv); (this.job)(); } @@ -196,7 +200,7 @@ where /// lifetimes, so it is up to you to ensure that this JobRef /// doesn't outlive any data that it closes over. pub(super) unsafe fn as_job_ref(this: &Arc<Self>) -> JobRef { - JobRef::new(Arc::into_raw(Arc::clone(this))) + unsafe { JobRef::new(Arc::into_raw(Arc::clone(this))) } } /// Creates a static `JobRef` from this job. @@ -213,7 +217,7 @@ where BODY: Fn() + Send + Sync, { unsafe fn execute(this: *const ()) { - let this = Arc::from_raw(this as *mut Self); + let this = unsafe { Arc::from_raw(this as *mut Self) }; (this.job)(); } } @@ -254,17 +258,17 @@ impl JobFifo { // jobs in a thread's deque may be popped from the back (LIFO) or stolen from the front // (FIFO), but either way they will end up popping from the front of this queue. self.inner.push(job_ref); - JobRef::new(self) + unsafe { JobRef::new(self) } } } impl Job for JobFifo { unsafe fn execute(this: *const ()) { // We "execute" a queue by executing its first job, FIFO. - let this = &*(this as *const Self); + let this = unsafe { &*(this as *const Self) }; loop { match this.inner.steal() { - Steal::Success(job_ref) => break job_ref.execute(), + Steal::Success(job_ref) => break unsafe { job_ref.execute() }, Steal::Empty => panic!("FIFO is empty"), Steal::Retry => {} } diff --git a/compiler/rustc_thread_pool/src/join/mod.rs b/compiler/rustc_thread_pool/src/join/mod.rs index e48d17f16a3..f285362c19b 100644 --- a/compiler/rustc_thread_pool/src/join/mod.rs +++ b/compiler/rustc_thread_pool/src/join/mod.rs @@ -7,7 +7,7 @@ use crate::tlv::{self, Tlv}; use crate::{FnContext, unwind}; #[cfg(test)] -mod test; +mod tests; /// Takes two closures and *potentially* runs them in parallel. It /// returns a pair of the results from those closures. @@ -41,7 +41,7 @@ mod test; /// [the `par_sort` method]: ../rayon/slice/trait.ParallelSliceMut.html#method.par_sort /// /// ```rust -/// # use rustc_thred_pool as rayon; +/// # use rustc_thread_pool as rayon; /// let mut v = vec![5, 1, 8, 22, 0, 44]; /// quick_sort(&mut v); /// assert_eq!(v, vec![0, 1, 5, 8, 22, 44]); @@ -192,7 +192,7 @@ unsafe fn join_recover_from_panic( err: Box<dyn Any + Send>, tlv: Tlv, ) -> ! { - worker_thread.wait_until(job_b_latch); + unsafe { worker_thread.wait_until(job_b_latch) }; // Restore the TLV since we might have run some jobs overwriting it when waiting for job b. tlv::set(tlv); diff --git a/compiler/rustc_thread_pool/src/latch.rs b/compiler/rustc_thread_pool/src/latch.rs index f2f806e0184..49ba62d3bea 100644 --- a/compiler/rustc_thread_pool/src/latch.rs +++ b/compiler/rustc_thread_pool/src/latch.rs @@ -116,7 +116,7 @@ impl CoreLatch { /// latch code. #[inline] unsafe fn set(this: *const Self) -> bool { - let old_state = (*this).state.swap(SET, Ordering::AcqRel); + let old_state = unsafe { (*this).state.swap(SET, Ordering::AcqRel) }; old_state == SLEEPING } @@ -185,13 +185,13 @@ impl<'r> Latch for SpinLatch<'r> { unsafe fn set(this: *const Self) { let cross_registry; - let registry: &Registry = if (*this).cross { + let registry: &Registry = if unsafe { (*this).cross } { // Ensure the registry stays alive while we notify it. // Otherwise, it would be possible that we set the spin // latch and the other thread sees it and exits, causing // the registry to be deallocated, all before we get a // chance to invoke `registry.notify_worker_latch_is_set`. - cross_registry = Arc::clone((*this).registry); + cross_registry = Arc::clone(unsafe { (*this).registry }); &cross_registry } else { // If this is not a "cross-registry" spin-latch, then the @@ -199,12 +199,12 @@ impl<'r> Latch for SpinLatch<'r> { // that the registry stays alive. However, that doesn't // include this *particular* `Arc` handle if the waiting // thread then exits, so we must completely dereference it. - (*this).registry + unsafe { (*this).registry } }; - let target_worker_index = (*this).target_worker_index; + let target_worker_index = unsafe { (*this).target_worker_index }; // NOTE: Once we `set`, the target may proceed and invalidate `this`! - if CoreLatch::set(&(*this).core_latch) { + if unsafe { CoreLatch::set(&(*this).core_latch) } { // Subtle: at this point, we can no longer read from // `self`, because the thread owning this spin latch may // have awoken and deallocated the latch. Therefore, we @@ -249,9 +249,9 @@ impl LockLatch { impl Latch for LockLatch { #[inline] unsafe fn set(this: *const Self) { - let mut guard = (*this).m.lock().unwrap(); + let mut guard = unsafe { (*this).m.lock().unwrap() }; *guard = true; - (*this).v.notify_all(); + unsafe { (*this).v.notify_all() }; } } @@ -286,7 +286,7 @@ impl OnceLatch { registry: &Registry, target_worker_index: usize, ) { - if CoreLatch::set(&(*this).core_latch) { + if unsafe { CoreLatch::set(&(*this).core_latch) } { registry.notify_worker_latch_is_set(target_worker_index); } } @@ -384,17 +384,17 @@ impl CountLatch { impl Latch for CountLatch { #[inline] unsafe fn set(this: *const Self) { - if (*this).counter.fetch_sub(1, Ordering::SeqCst) == 1 { + if unsafe { (*this).counter.fetch_sub(1, Ordering::SeqCst) == 1 } { // NOTE: Once we call `set` on the internal `latch`, // the target may proceed and invalidate `this`! - match (*this).kind { - CountLatchKind::Stealing { ref latch, ref registry, worker_index } => { + match unsafe { &(*this).kind } { + CountLatchKind::Stealing { latch, registry, worker_index } => { let registry = Arc::clone(registry); - if CoreLatch::set(latch) { - registry.notify_worker_latch_is_set(worker_index); + if unsafe { CoreLatch::set(latch) } { + registry.notify_worker_latch_is_set(*worker_index); } } - CountLatchKind::Blocking { ref latch } => LockLatch::set(latch), + CountLatchKind::Blocking { latch } => unsafe { LockLatch::set(latch) }, } } } @@ -426,6 +426,6 @@ impl<L> Deref for LatchRef<'_, L> { impl<L: Latch> Latch for LatchRef<'_, L> { #[inline] unsafe fn set(this: *const Self) { - L::set((*this).inner); + unsafe { L::set((*this).inner) }; } } diff --git a/compiler/rustc_thread_pool/src/lib.rs b/compiler/rustc_thread_pool/src/lib.rs index f1d466b4948..34252d919e3 100644 --- a/compiler/rustc_thread_pool/src/lib.rs +++ b/compiler/rustc_thread_pool/src/lib.rs @@ -61,6 +61,7 @@ //! conflicting requirements will need to be resolved before the build will //! succeed. +#![cfg_attr(test, allow(unused_crate_dependencies))] #![warn(rust_2018_idioms)] use std::any::Any; @@ -85,7 +86,7 @@ mod unwind; mod worker_local; mod compile_fail; -mod test; +mod tests; pub mod tlv; @@ -152,14 +153,14 @@ enum ErrorKind { /// The following creates a thread pool with 22 threads. /// /// ```rust -/// # use rustc_thred_pool as rayon; +/// # use rustc_thread_pool as rayon; /// let pool = rayon::ThreadPoolBuilder::new().num_threads(22).build().unwrap(); /// ``` /// /// To instead configure the global thread pool, use [`build_global()`]: /// /// ```rust -/// # use rustc_thred_pool as rayon; +/// # use rustc_thread_pool as rayon; /// rayon::ThreadPoolBuilder::new().num_threads(22).build_global().unwrap(); /// ``` /// @@ -315,7 +316,7 @@ impl ThreadPoolBuilder { /// A scoped pool may be useful in combination with scoped thread-local variables. /// /// ``` - /// # use rustc_thred_pool as rayon; + /// # use rustc_thread_pool as rayon; /// /// scoped_tls::scoped_thread_local!(static POOL_DATA: Vec<i32>); /// @@ -382,7 +383,7 @@ impl<S> ThreadPoolBuilder<S> { /// A minimal spawn handler just needs to call `run()` from an independent thread. /// /// ``` - /// # use rustc_thred_pool as rayon; + /// # use rustc_thread_pool as rayon; /// fn main() -> Result<(), rayon::ThreadPoolBuildError> { /// let pool = rayon::ThreadPoolBuilder::new() /// .spawn_handler(|thread| { @@ -400,7 +401,7 @@ impl<S> ThreadPoolBuilder<S> { /// any errors from the thread builder. /// /// ``` - /// # use rustc_thred_pool as rayon; + /// # use rustc_thread_pool as rayon; /// fn main() -> Result<(), rayon::ThreadPoolBuildError> { /// let pool = rayon::ThreadPoolBuilder::new() /// .spawn_handler(|thread| { @@ -429,7 +430,7 @@ impl<S> ThreadPoolBuilder<S> { /// [`std::thread::scope`]: https://doc.rust-lang.org/std/thread/fn.scope.html /// /// ``` - /// # use rustc_thred_pool as rayon; + /// # use rustc_thread_pool as rayon; /// fn main() -> Result<(), rayon::ThreadPoolBuildError> { /// std::thread::scope(|scope| { /// let pool = rayon::ThreadPoolBuilder::new() diff --git a/compiler/rustc_thread_pool/src/registry.rs b/compiler/rustc_thread_pool/src/registry.rs index 2848556aab6..03a01aa29d2 100644 --- a/compiler/rustc_thread_pool/src/registry.rs +++ b/compiler/rustc_thread_pool/src/registry.rs @@ -533,16 +533,16 @@ impl Registry { |injected| { let worker_thread = WorkerThread::current(); assert!(injected && !worker_thread.is_null()); - op(&*worker_thread, true) + op(unsafe { &*worker_thread }, true) }, LatchRef::new(l), ); - self.inject(job.as_job_ref()); + self.inject(unsafe { job.as_job_ref() }); self.release_thread(); job.latch.wait_and_reset(); // Make sure we can use the same latch again next time. self.acquire_thread(); - job.into_result() + unsafe { job.into_result() } }) } @@ -561,13 +561,13 @@ impl Registry { |injected| { let worker_thread = WorkerThread::current(); assert!(injected && !worker_thread.is_null()); - op(&*worker_thread, true) + op(unsafe { &*worker_thread }, true) }, latch, ); - self.inject(job.as_job_ref()); - current_thread.wait_until(&job.latch); - job.into_result() + self.inject(unsafe { job.as_job_ref() }); + unsafe { current_thread.wait_until(&job.latch) }; + unsafe { job.into_result() } } /// Increments the terminate counter. This increment should be @@ -759,7 +759,7 @@ impl WorkerThread { #[inline] pub(super) unsafe fn push_fifo(&self, job: JobRef) { - self.push(self.fifo.push(job)); + unsafe { self.push(self.fifo.push(job)) }; } #[inline] @@ -798,7 +798,7 @@ impl WorkerThread { pub(super) unsafe fn wait_until<L: AsCoreLatch + ?Sized>(&self, latch: &L) { let latch = latch.as_core_latch(); if !latch.probe() { - self.wait_until_cold(latch); + unsafe { self.wait_until_cold(latch) }; } } @@ -815,7 +815,7 @@ impl WorkerThread { // Check for local work *before* we start marking ourself idle, // especially to avoid modifying shared sleep state. if let Some(job) = self.take_local_job() { - self.execute(job); + unsafe { self.execute(job) }; continue; } @@ -823,7 +823,7 @@ impl WorkerThread { while !latch.probe() { if let Some(job) = self.find_work() { self.registry.sleep.work_found(); - self.execute(job); + unsafe { self.execute(job) }; // The job might have injected local work, so go back to the outer loop. continue 'outer; } else { @@ -846,13 +846,13 @@ impl WorkerThread { let index = self.index; registry.acquire_thread(); - self.wait_until(®istry.thread_infos[index].terminate); + unsafe { self.wait_until(®istry.thread_infos[index].terminate) }; // Should not be any work left in our queue. debug_assert!(self.take_local_job().is_none()); // Let registry know we are done - Latch::set(®istry.thread_infos[index].stopped); + unsafe { Latch::set(®istry.thread_infos[index].stopped) }; } fn find_work(&self) -> Option<JobRef> { @@ -886,7 +886,7 @@ impl WorkerThread { #[inline] pub(super) unsafe fn execute(&self, job: JobRef) { - job.execute(); + unsafe { job.execute() }; } /// Try to steal a single job and return it. @@ -932,12 +932,12 @@ impl WorkerThread { unsafe fn main_loop(thread: ThreadBuilder) { let worker_thread = &WorkerThread::from(thread); - WorkerThread::set_current(worker_thread); + unsafe { WorkerThread::set_current(worker_thread) }; let registry = &*worker_thread.registry; let index = worker_thread.index; // let registry know we are ready to do work - Latch::set(®istry.thread_infos[index].primed); + unsafe { Latch::set(®istry.thread_infos[index].primed) }; // Worker threads should not panic. If they do, just abort, as the // internal state of the threadpool is corrupted. Note that if @@ -949,7 +949,7 @@ unsafe fn main_loop(thread: ThreadBuilder) { registry.catch_unwind(|| handler(index)); } - worker_thread.wait_until_out_of_work(); + unsafe { worker_thread.wait_until_out_of_work() }; // Normal termination, do not abort. mem::forget(abort_guard); diff --git a/compiler/rustc_thread_pool/src/scope/mod.rs b/compiler/rustc_thread_pool/src/scope/mod.rs index 82b3d053dcb..55e58b3509d 100644 --- a/compiler/rustc_thread_pool/src/scope/mod.rs +++ b/compiler/rustc_thread_pool/src/scope/mod.rs @@ -20,7 +20,7 @@ use crate::tlv::{self, Tlv}; use crate::unwind; #[cfg(test)] -mod test; +mod tests; /// Represents a fork-join scope which can be used to spawn any number of tasks. /// See [`scope()`] for more information. @@ -84,7 +84,7 @@ struct ScopeBase<'scope> { /// it would be less efficient than the real implementation: /// /// ```rust -/// # use rustc_thred_pool as rayon; +/// # use rustc_thread_pool as rayon; /// pub fn join<A,B,RA,RB>(oper_a: A, oper_b: B) -> (RA, RB) /// where A: FnOnce() -> RA + Send, /// B: FnOnce() -> RB + Send, @@ -125,7 +125,7 @@ struct ScopeBase<'scope> { /// To see how and when tasks are joined, consider this example: /// /// ```rust -/// # use rustc_thred_pool as rayon; +/// # use rustc_thread_pool as rayon; /// // point start /// rayon::scope(|s| { /// s.spawn(|s| { // task s.1 @@ -193,7 +193,7 @@ struct ScopeBase<'scope> { /// spawned task. /// /// ```rust -/// # use rustc_thred_pool as rayon; +/// # use rustc_thread_pool as rayon; /// let ok: Vec<i32> = vec![1, 2, 3]; /// rayon::scope(|s| { /// let bad: Vec<i32> = vec![4, 5, 6]; @@ -217,7 +217,7 @@ struct ScopeBase<'scope> { /// in this case including both `ok` *and* `bad`: /// /// ```rust -/// # use rustc_thred_pool as rayon; +/// # use rustc_thread_pool as rayon; /// let ok: Vec<i32> = vec![1, 2, 3]; /// rayon::scope(|s| { /// let bad: Vec<i32> = vec![4, 5, 6]; @@ -238,7 +238,7 @@ struct ScopeBase<'scope> { /// is a borrow of `ok` and capture *that*: /// /// ```rust -/// # use rustc_thred_pool as rayon; +/// # use rustc_thread_pool as rayon; /// let ok: Vec<i32> = vec![1, 2, 3]; /// rayon::scope(|s| { /// let bad: Vec<i32> = vec![4, 5, 6]; @@ -260,7 +260,7 @@ struct ScopeBase<'scope> { /// of individual variables: /// /// ```rust -/// # use rustc_thred_pool as rayon; +/// # use rustc_thread_pool as rayon; /// let ok: Vec<i32> = vec![1, 2, 3]; /// rayon::scope(|s| { /// let bad: Vec<i32> = vec![4, 5, 6]; @@ -312,7 +312,7 @@ where /// [`scope()`]: fn.scope.html /// /// ```rust -/// # use rustc_thred_pool as rayon; +/// # use rustc_thread_pool as rayon; /// // point start /// rayon::scope_fifo(|s| { /// s.spawn_fifo(|s| { // task s.1 @@ -487,7 +487,7 @@ impl<'scope> Scope<'scope> { /// # Examples /// /// ```rust - /// # use rustc_thred_pool as rayon; + /// # use rustc_thread_pool as rayon; /// let mut value_a = None; /// let mut value_b = None; /// let mut value_c = None; @@ -686,7 +686,7 @@ impl<'scope> ScopeBase<'scope> { where FUNC: FnOnce(), { - let _: Option<()> = Self::execute_job_closure(this, func); + let _: Option<()> = unsafe { Self::execute_job_closure(this, func) }; } /// Executes `func` as a job in scope. Adjusts the "job completed" @@ -699,11 +699,11 @@ impl<'scope> ScopeBase<'scope> { let result = match unwind::halt_unwinding(func) { Ok(r) => Some(r), Err(err) => { - (*this).job_panicked(err); + unsafe { (*this).job_panicked(err) }; None } }; - Latch::set(&(*this).job_completed_latch); + unsafe { Latch::set(&(*this).job_completed_latch) }; result } @@ -778,6 +778,6 @@ unsafe impl<T: Sync> Sync for ScopePtr<T> {} impl<T> ScopePtr<T> { // Helper to avoid disjoint captures of `scope_ptr.0` unsafe fn as_ref(&self) -> &T { - &*self.0 + unsafe { &*self.0 } } } diff --git a/compiler/rustc_thread_pool/src/sleep/mod.rs b/compiler/rustc_thread_pool/src/sleep/mod.rs index bee7c82c450..a9cdf68cc7e 100644 --- a/compiler/rustc_thread_pool/src/sleep/mod.rs +++ b/compiler/rustc_thread_pool/src/sleep/mod.rs @@ -31,7 +31,7 @@ struct SleepData { impl SleepData { /// Checks if the conditions for a deadlock holds and if so calls the deadlock handler #[inline] - pub fn deadlock_check(&self, deadlock_handler: &Option<Box<DeadlockHandler>>) { + pub(super) fn deadlock_check(&self, deadlock_handler: &Option<Box<DeadlockHandler>>) { if self.active_threads == 0 && self.blocked_threads > 0 { (deadlock_handler.as_ref().unwrap())(); } @@ -102,7 +102,7 @@ impl Sleep { /// Mark a Rayon worker thread as blocked. This triggers the deadlock handler /// if no other worker thread is active #[inline] - pub fn mark_blocked(&self, deadlock_handler: &Option<Box<DeadlockHandler>>) { + pub(super) fn mark_blocked(&self, deadlock_handler: &Option<Box<DeadlockHandler>>) { let mut data = self.data.lock().unwrap(); debug_assert!(data.active_threads > 0); debug_assert!(data.blocked_threads < data.worker_count); @@ -115,7 +115,7 @@ impl Sleep { /// Mark a previously blocked Rayon worker thread as unblocked #[inline] - pub fn mark_unblocked(&self) { + pub(super) fn mark_unblocked(&self) { let mut data = self.data.lock().unwrap(); debug_assert!(data.active_threads < data.worker_count); debug_assert!(data.blocked_threads > 0); diff --git a/compiler/rustc_thread_pool/src/spawn/mod.rs b/compiler/rustc_thread_pool/src/spawn/mod.rs index 92b89ed5948..040a02bfa67 100644 --- a/compiler/rustc_thread_pool/src/spawn/mod.rs +++ b/compiler/rustc_thread_pool/src/spawn/mod.rs @@ -50,7 +50,7 @@ use crate::unwind; /// This code creates a Rayon task that increments a global counter. /// /// ```rust -/// # use rustc_thred_pool as rayon; +/// # use rustc_thread_pool as rayon; /// use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT}; /// /// static GLOBAL_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT; @@ -80,7 +80,7 @@ where // be able to panic, and hence the data won't leak but will be // enqueued into some deque for later execution. let abort_guard = unwind::AbortIfPanic; // just in case we are wrong, and code CAN panic - let job_ref = spawn_job(func, registry); + let job_ref = unsafe { spawn_job(func, registry) }; registry.inject_or_push(job_ref); mem::forget(abort_guard); } @@ -150,16 +150,16 @@ where // be able to panic, and hence the data won't leak but will be // enqueued into some deque for later execution. let abort_guard = unwind::AbortIfPanic; // just in case we are wrong, and code CAN panic - let job_ref = spawn_job(func, registry); + let job_ref = unsafe { spawn_job(func, registry) }; // If we're in the pool, use our thread's private fifo for this thread to execute // in a locally-FIFO order. Otherwise, just use the pool's global injector. match registry.current_thread() { - Some(worker) => worker.push_fifo(job_ref), + Some(worker) => unsafe { worker.push_fifo(job_ref) }, None => registry.inject(job_ref), } mem::forget(abort_guard); } #[cfg(test)] -mod test; +mod tests; diff --git a/compiler/rustc_thread_pool/src/thread_pool/mod.rs b/compiler/rustc_thread_pool/src/thread_pool/mod.rs index c8644ecf9a9..3294e2a77cb 100644 --- a/compiler/rustc_thread_pool/src/thread_pool/mod.rs +++ b/compiler/rustc_thread_pool/src/thread_pool/mod.rs @@ -14,7 +14,7 @@ use crate::{ Scope, ScopeFifo, ThreadPoolBuildError, ThreadPoolBuilder, join, scope, scope_fifo, spawn, }; -mod test; +mod tests; /// Represents a user created [thread-pool]. /// @@ -28,7 +28,7 @@ mod test; /// ## Creating a ThreadPool /// /// ```rust -/// # use rustc_thred_pool as rayon; +/// # use rustc_thread_pool as rayon; /// let pool = rayon::ThreadPoolBuilder::new().num_threads(8).build().unwrap(); /// ``` /// @@ -88,10 +88,10 @@ impl ThreadPool { /// meantime. For example /// /// ```rust - /// # use rustc_thred_pool as rayon; + /// # use rustc_thread_pool as rayon; /// fn main() { /// rayon::ThreadPoolBuilder::new().num_threads(1).build_global().unwrap(); - /// let pool = rustc_thred_pool::ThreadPoolBuilder::default().build().unwrap(); + /// let pool = rustc_thread_pool::ThreadPoolBuilder::default().build().unwrap(); /// let do_it = || { /// print!("one "); /// pool.install(||{}); @@ -123,7 +123,7 @@ impl ThreadPool { /// ## Using `install()` /// /// ```rust - /// # use rustc_thred_pool as rayon; + /// # use rustc_thread_pool as rayon; /// fn main() { /// let pool = rayon::ThreadPoolBuilder::new().num_threads(8).build().unwrap(); /// let n = pool.install(|| fib(20)); @@ -172,7 +172,7 @@ impl ThreadPool { /// # Examples /// /// ``` - /// # use rustc_thred_pool as rayon; + /// # use rustc_thread_pool as rayon; /// use std::sync::atomic::{AtomicUsize, Ordering}; /// /// fn main() { @@ -401,7 +401,7 @@ impl ThreadPool { } pub(crate) fn wait_until_stopped(self) { - let registry = self.registry.clone(); + let registry = Arc::clone(&self.registry); drop(self); registry.wait_until_stopped(); } diff --git a/compiler/rustc_thread_pool/tests/double_init_fail.rs b/compiler/rustc_thread_pool/tests/double_init_fail.rs index ef190099293..85e509518d4 100644 --- a/compiler/rustc_thread_pool/tests/double_init_fail.rs +++ b/compiler/rustc_thread_pool/tests/double_init_fail.rs @@ -1,6 +1,8 @@ +#![allow(unused_crate_dependencies)] + use std::error::Error; -use rustc_thred_pool::ThreadPoolBuilder; +use rustc_thread_pool::ThreadPoolBuilder; #[test] #[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)] diff --git a/compiler/rustc_thread_pool/tests/init_zero_threads.rs b/compiler/rustc_thread_pool/tests/init_zero_threads.rs index 1f7e299e3e9..261493fcb7b 100644 --- a/compiler/rustc_thread_pool/tests/init_zero_threads.rs +++ b/compiler/rustc_thread_pool/tests/init_zero_threads.rs @@ -1,4 +1,6 @@ -use rustc_thred_pool::ThreadPoolBuilder; +#![allow(unused_crate_dependencies)] + +use rustc_thread_pool::ThreadPoolBuilder; #[test] #[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)] diff --git a/compiler/rustc_thread_pool/tests/scope_join.rs b/compiler/rustc_thread_pool/tests/scope_join.rs index 0bd33d086cf..83468da81c0 100644 --- a/compiler/rustc_thread_pool/tests/scope_join.rs +++ b/compiler/rustc_thread_pool/tests/scope_join.rs @@ -1,10 +1,12 @@ +#![allow(unused_crate_dependencies)] + /// Test that one can emulate join with `scope`: fn pseudo_join<F, G>(f: F, g: G) where F: FnOnce() + Send, G: FnOnce() + Send, { - rustc_thred_pool::scope(|s| { + rustc_thread_pool::scope(|s| { s.spawn(|_| g()); f(); }); diff --git a/compiler/rustc_thread_pool/tests/scoped_threadpool.rs b/compiler/rustc_thread_pool/tests/scoped_threadpool.rs index e4b0f6c41e1..295da650e88 100644 --- a/compiler/rustc_thread_pool/tests/scoped_threadpool.rs +++ b/compiler/rustc_thread_pool/tests/scoped_threadpool.rs @@ -1,5 +1,7 @@ +#![allow(unused_crate_dependencies)] + use crossbeam_utils::thread; -use rustc_thred_pool::ThreadPoolBuilder; +use rustc_thread_pool::ThreadPoolBuilder; #[derive(PartialEq, Eq, Debug)] struct Local(i32); diff --git a/compiler/rustc_thread_pool/tests/simple_panic.rs b/compiler/rustc_thread_pool/tests/simple_panic.rs index 16896e36fa0..b35b4d632d2 100644 --- a/compiler/rustc_thread_pool/tests/simple_panic.rs +++ b/compiler/rustc_thread_pool/tests/simple_panic.rs @@ -1,4 +1,6 @@ -use rustc_thred_pool::join; +#![allow(unused_crate_dependencies)] + +use rustc_thread_pool::join; #[test] #[should_panic(expected = "should panic")] diff --git a/compiler/rustc_thread_pool/tests/stack_overflow_crash.rs b/compiler/rustc_thread_pool/tests/stack_overflow_crash.rs index 49c9ca1d75e..805b6d8ee3f 100644 --- a/compiler/rustc_thread_pool/tests/stack_overflow_crash.rs +++ b/compiler/rustc_thread_pool/tests/stack_overflow_crash.rs @@ -1,9 +1,11 @@ +#![allow(unused_crate_dependencies)] + use std::env; #[cfg(target_os = "linux")] use std::os::unix::process::ExitStatusExt; use std::process::{Command, ExitStatus, Stdio}; -use rustc_thred_pool::ThreadPoolBuilder; +use rustc_thread_pool::ThreadPoolBuilder; fn force_stack_overflow(depth: u32) { let mut buffer = [0u8; 1024 * 1024]; |
