Auto merge of #115388 - Zoxc:sharded-lock, r=SparrowLii

Add optimized lock methods for `Sharded` and refactor `Lock`

This adds methods to `Sharded` which pick a shard and also locks it. These branch on parallelism just once instead of twice, improving performance.

Benchmark for `cfg(parallel_compiler)` and 1 thread:
<table><tr><td rowspan="2">Benchmark</td><td colspan="1"><b>Before</b></th><td colspan="2"><b>After</b></th></tr><tr><td align="right">Time</td><td align="right">Time</td><td align="right">%</th></tr><tr><td>🟣 <b>clap</b>:check</td><td align="right">1.6461s</td><td align="right">1.6345s</td><td align="right"> -0.70%</td></tr><tr><td>🟣 <b>hyper</b>:check</td><td align="right">0.2414s</td><td align="right">0.2394s</td><td align="right"> -0.83%</td></tr><tr><td>🟣 <b>regex</b>:check</td><td align="right">0.9205s</td><td align="right">0.9143s</td><td align="right"> -0.67%</td></tr><tr><td>🟣 <b>syn</b>:check</td><td align="right">1.4981s</td><td align="right">1.4869s</td><td align="right"> -0.75%</td></tr><tr><td>🟣 <b>syntex_syntax</b>:check</td><td align="right">5.7629s</td><td align="right">5.7256s</td><td align="right"> -0.65%</td></tr><tr><td>Total</td><td align="right">10.0690s</td><td align="right">10.0008s</td><td align="right"> -0.68%</td></tr><tr><td>Summary</td><td align="right">1.0000s</td><td align="right">0.9928s</td><td align="right"> -0.72%</td></tr></table>

cc `@SparrowLii`
This commit is contained in:
bors 2023-09-11 01:43:29 +00:00
commit 9b72cc9abf
6 changed files with 267 additions and 225 deletions

View file

@ -629,12 +629,7 @@ impl<K: DepKind> DepGraphData<K> {
if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
self.current.prev_index_to_index.lock()[prev_index]
} else {
self.current
.new_node_to_index
.get_shard_by_value(dep_node)
.lock()
.get(dep_node)
.copied()
self.current.new_node_to_index.lock_shard_by_value(dep_node).get(dep_node).copied()
}
}
@ -1201,8 +1196,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
edges: EdgesVec,
current_fingerprint: Fingerprint,
) -> DepNodeIndex {
let dep_node_index = match self.new_node_to_index.get_shard_by_value(&key).lock().entry(key)
{
let dep_node_index = match self.new_node_to_index.lock_shard_by_value(&key).entry(key) {
Entry::Occupied(entry) => *entry.get(),
Entry::Vacant(entry) => {
let dep_node_index =
@ -1328,7 +1322,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
) {
let node = &prev_graph.index_to_node(prev_index);
debug_assert!(
!self.new_node_to_index.get_shard_by_value(node).lock().contains_key(node),
!self.new_node_to_index.lock_shard_by_value(node).contains_key(node),
"node from previous graph present in new node collection"
);
}

View file

@ -55,7 +55,7 @@ where
#[inline(always)]
fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> {
let key_hash = sharded::make_hash(key);
let lock = self.cache.get_shard_by_hash(key_hash).lock();
let lock = self.cache.lock_shard_by_hash(key_hash);
let result = lock.raw_entry().from_key_hashed_nocheck(key_hash, key);
if let Some((_, value)) = result { Some(*value) } else { None }
@ -63,7 +63,7 @@ where
#[inline]
fn complete(&self, key: K, value: V, index: DepNodeIndex) {
let mut lock = self.cache.get_shard_by_value(&key).lock();
let mut lock = self.cache.lock_shard_by_value(&key);
// We may be overwriting another value. This is all right, since the dep-graph
// will check that the fingerprint matches.
lock.insert(key, (value, index));
@ -148,13 +148,13 @@ where
#[inline(always)]
fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> {
let lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
let lock = self.cache.lock_shard_by_hash(key.index() as u64);
if let Some(Some(value)) = lock.get(*key) { Some(*value) } else { None }
}
#[inline]
fn complete(&self, key: K, value: V, index: DepNodeIndex) {
let mut lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
let mut lock = self.cache.lock_shard_by_hash(key.index() as u64);
lock.insert(key, (value, index));
}

View file

@ -158,7 +158,7 @@ where
cache.complete(key, result, dep_node_index);
let job = {
let mut lock = state.active.get_shard_by_value(&key).lock();
let mut lock = state.active.lock_shard_by_value(&key);
match lock.remove(&key).unwrap() {
QueryResult::Started(job) => job,
QueryResult::Poisoned => panic!(),
@ -180,7 +180,7 @@ where
// Poison the query so jobs waiting on it panic.
let state = self.state;
let job = {
let mut shard = state.active.get_shard_by_value(&self.key).lock();
let mut shard = state.active.lock_shard_by_value(&self.key);
let job = match shard.remove(&self.key).unwrap() {
QueryResult::Started(job) => job,
QueryResult::Poisoned => panic!(),
@ -303,7 +303,7 @@ where
Qcx: QueryContext,
{
let state = query.query_state(qcx);
let mut state_lock = state.active.get_shard_by_value(&key).lock();
let mut state_lock = state.active.lock_shard_by_value(&key);
// For the parallel compiler we need to check both the query cache and query state structures
// while holding the state lock to ensure that 1) the query has not yet completed and 2) the