Skip to content

Commit 249198c

Browse files
Rollup merge of #109758 - nnethercote:parallel-cleanups, r=cjgillot
Parallel compiler cleanups A few small improvements I found while looking closely at this code. r? `@cjgillot` cc `@Zoxc,` `@SparrowLii`
2 parents 7cd96ae + 08dec89 commit 249198c

File tree

4 files changed

+45
-36
lines changed

4 files changed

+45
-36
lines changed

compiler/rustc_data_structures/src/sharded.rs

+1-2
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ use std::collections::hash_map::RawEntryMut;
55
use std::hash::{Hash, Hasher};
66
use std::mem;
77

8-
#[derive(Clone, Default)]
8+
#[derive(Default)]
99
#[cfg_attr(parallel_compiler, repr(align(64)))]
1010
struct CacheAligned<T>(T);
1111

@@ -21,7 +21,6 @@ const SHARD_BITS: usize = 0;
2121
pub const SHARDS: usize = 1 << SHARD_BITS;
2222

2323
/// An array of cache-line aligned inner locked structures with convenience methods.
24-
#[derive(Clone)]
2524
pub struct Sharded<T> {
2625
shards: [CacheAligned<Lock<T>>; SHARDS],
2726
}

compiler/rustc_data_structures/src/sync.rs

+39-26
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,46 @@
1-
//! This module defines types which are thread safe if cfg!(parallel_compiler) is true.
1+
//! This module defines various operations and types that are implemented in
2+
//! one way for the serial compiler, and another way the parallel compiler.
23
//!
3-
//! `Lrc` is an alias of `Arc` if cfg!(parallel_compiler) is true, `Rc` otherwise.
4+
//! Operations
5+
//! ----------
6+
//! The parallel versions of operations use Rayon to execute code in parallel,
7+
//! while the serial versions degenerate straightforwardly to serial execution.
8+
//! The operations include `join`, `parallel`, `par_iter`, and `par_for_each`.
49
//!
5-
//! `Lock` is a mutex.
6-
//! It internally uses `parking_lot::Mutex` if cfg!(parallel_compiler) is true,
7-
//! `RefCell` otherwise.
10+
//! `rustc_erase_owner!` erases an `OwningRef` owner into `Erased` for the
11+
//! serial version and `Erased + Send + Sync` for the parallel version.
812
//!
9-
//! `RwLock` is a read-write lock.
10-
//! It internally uses `parking_lot::RwLock` if cfg!(parallel_compiler) is true,
11-
//! `RefCell` otherwise.
13+
//! Types
14+
//! -----
15+
//! The parallel versions of types provide various kinds of synchronization,
16+
//! while the serial compiler versions do not.
1217
//!
13-
//! `MTLock` is a mutex which disappears if cfg!(parallel_compiler) is false.
18+
//! The following table shows how the types are implemented internally. Except
19+
//! where noted otherwise, the type in column one is defined as a
20+
//! newtype around the type from column two or three.
1421
//!
15-
//! `MTRef` is an immutable reference if cfg!(parallel_compiler), and a mutable reference otherwise.
22+
//! | Type | Serial version | Parallel version |
23+
//! | ----------------------- | ------------------- | ------------------------------- |
24+
//! | `Lrc<T>` | `rc::Rc<T>` | `sync::Arc<T>` |
25+
//! |` Weak<T>` | `rc::Weak<T>` | `sync::Weak<T>` |
26+
//! | | | |
27+
//! | `AtomicBool` | `Cell<bool>` | `atomic::AtomicBool` |
28+
//! | `AtomicU32` | `Cell<u32>` | `atomic::AtomicU32` |
29+
//! | `AtomicU64` | `Cell<u64>` | `atomic::AtomicU64` |
30+
//! | `AtomicUsize` | `Cell<usize>` | `atomic::AtomicUsize` |
31+
//! | | | |
32+
//! | `Lock<T>` | `RefCell<T>` | `parking_lot::Mutex<T>` |
33+
//! | `RwLock<T>` | `RefCell<T>` | `parking_lot::RwLock<T>` |
34+
//! | `MTLock<T>` [^1] | `T` | `Lock<T>` |
35+
//! | `MTLockRef<'a, T>` [^2] | `&'a mut MTLock<T>` | `&'a MTLock<T>` |
36+
//! | | | |
37+
//! | `ParallelIterator` | `Iterator` | `rayon::iter::ParallelIterator` |
1638
//!
17-
//! `rustc_erase_owner!` erases an OwningRef owner into Erased or Erased + Send + Sync
18-
//! depending on the value of cfg!(parallel_compiler).
39+
//! [^1] `MTLock` is similar to `Lock`, but the serial version avoids the cost
40+
//! of a `RefCell`. This is appropriate when interior mutability is not
41+
//! required.
42+
//!
43+
//! [^2] `MTLockRef` is a typedef.
1944
2045
use crate::owning_ref::{Erased, OwningRef};
2146
use std::collections::HashMap;
@@ -209,7 +234,7 @@ cfg_if! {
209234
}
210235
}
211236

212-
pub type MTRef<'a, T> = &'a mut T;
237+
pub type MTLockRef<'a, T> = &'a mut MTLock<T>;
213238

214239
#[derive(Debug, Default)]
215240
pub struct MTLock<T>(T);
@@ -267,7 +292,7 @@ cfg_if! {
267292
pub use std::sync::Arc as Lrc;
268293
pub use std::sync::Weak as Weak;
269294

270-
pub type MTRef<'a, T> = &'a T;
295+
pub type MTLockRef<'a, T> = &'a MTLock<T>;
271296

272297
#[derive(Debug, Default)]
273298
pub struct MTLock<T>(Lock<T>);
@@ -553,18 +578,6 @@ impl<T> RwLock<T> {
553578
self.write()
554579
}
555580

556-
#[cfg(not(parallel_compiler))]
557-
#[inline(always)]
558-
pub fn clone_guard<'a>(rg: &ReadGuard<'a, T>) -> ReadGuard<'a, T> {
559-
ReadGuard::clone(rg)
560-
}
561-
562-
#[cfg(parallel_compiler)]
563-
#[inline(always)]
564-
pub fn clone_guard<'a>(rg: &ReadGuard<'a, T>) -> ReadGuard<'a, T> {
565-
ReadGuard::rwlock(&rg).read()
566-
}
567-
568581
#[cfg(not(parallel_compiler))]
569582
#[inline(always)]
570583
pub fn leak(&self) -> &T {

compiler/rustc_monomorphize/src/collector.rs

+5-5
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,7 @@
174174
//! regardless of whether it is actually needed or not.
175175
176176
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
177-
use rustc_data_structures::sync::{par_for_each_in, MTLock, MTRef};
177+
use rustc_data_structures::sync::{par_for_each_in, MTLock, MTLockRef};
178178
use rustc_hir as hir;
179179
use rustc_hir::def::DefKind;
180180
use rustc_hir::def_id::{DefId, DefIdMap, LocalDefId};
@@ -341,8 +341,8 @@ pub fn collect_crate_mono_items(
341341
let recursion_limit = tcx.recursion_limit();
342342

343343
{
344-
let visited: MTRef<'_, _> = &mut visited;
345-
let inlining_map: MTRef<'_, _> = &mut inlining_map;
344+
let visited: MTLockRef<'_, _> = &mut visited;
345+
let inlining_map: MTLockRef<'_, _> = &mut inlining_map;
346346

347347
tcx.sess.time("monomorphization_collector_graph_walk", || {
348348
par_for_each_in(roots, |root| {
@@ -407,10 +407,10 @@ fn collect_roots(tcx: TyCtxt<'_>, mode: MonoItemCollectionMode) -> Vec<MonoItem<
407407
fn collect_items_rec<'tcx>(
408408
tcx: TyCtxt<'tcx>,
409409
starting_point: Spanned<MonoItem<'tcx>>,
410-
visited: MTRef<'_, MTLock<FxHashSet<MonoItem<'tcx>>>>,
410+
visited: MTLockRef<'_, FxHashSet<MonoItem<'tcx>>>,
411411
recursion_depths: &mut DefIdMap<usize>,
412412
recursion_limit: Limit,
413-
inlining_map: MTRef<'_, MTLock<InliningMap<'tcx>>>,
413+
inlining_map: MTLockRef<'_, InliningMap<'tcx>>,
414414
) {
415415
if !visited.lock_mut().insert(starting_point.node) {
416416
// We've been here already, no need to search again.

compiler/rustc_query_system/src/query/caches.rs

-3
Original file line numberDiff line numberDiff line change
@@ -21,9 +21,6 @@ pub trait QueryCache: Sized {
2121
type Value: Copy + Debug;
2222

2323
/// Checks if the query is already computed and in the cache.
24-
/// It returns the shard index and a lock guard to the shard,
25-
/// which will be used if the query is not in the cache and we need
26-
/// to compute it.
2724
fn lookup(&self, key: &Self::Key) -> Option<(Self::Value, DepNodeIndex)>;
2825

2926
fn complete(&self, key: Self::Key, value: Self::Value, index: DepNodeIndex);

0 commit comments

Comments
 (0)