Convert ShardedHashMap
to use hashbrown::HashTable
The `hash_raw_entry` feature has finished fcp-close, so the compiler should stop using it to allow its removal. Several `Sharded` maps were using raw entries to avoid re-hashing between shard and map lookup, and we can do that with `hashbrown::HashTable` instead.
This commit is contained in:
parent
9fb94b32df
commit
3b0c2585c8
10 changed files with 109 additions and 66 deletions
|
@ -1,5 +1,4 @@
|
|||
use std::assert_matches::assert_matches;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::fmt::Debug;
|
||||
use std::hash::Hash;
|
||||
use std::marker::PhantomData;
|
||||
|
@ -9,7 +8,7 @@ use std::sync::atomic::{AtomicU32, Ordering};
|
|||
use rustc_data_structures::fingerprint::Fingerprint;
|
||||
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
|
||||
use rustc_data_structures::profiling::{QueryInvocationId, SelfProfilerRef};
|
||||
use rustc_data_structures::sharded::{self, Sharded};
|
||||
use rustc_data_structures::sharded::{self, ShardedHashMap};
|
||||
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
|
||||
use rustc_data_structures::sync::{AtomicU64, Lock};
|
||||
use rustc_data_structures::unord::UnordMap;
|
||||
|
@ -619,7 +618,7 @@ impl<D: Deps> DepGraphData<D> {
|
|||
if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
|
||||
self.current.prev_index_to_index.lock()[prev_index]
|
||||
} else {
|
||||
self.current.new_node_to_index.lock_shard_by_value(dep_node).get(dep_node).copied()
|
||||
self.current.new_node_to_index.get(dep_node)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1048,7 +1047,7 @@ rustc_index::newtype_index! {
|
|||
/// first, and `data` second.
|
||||
pub(super) struct CurrentDepGraph<D: Deps> {
|
||||
encoder: GraphEncoder<D>,
|
||||
new_node_to_index: Sharded<FxHashMap<DepNode, DepNodeIndex>>,
|
||||
new_node_to_index: ShardedHashMap<DepNode, DepNodeIndex>,
|
||||
prev_index_to_index: Lock<IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>>,
|
||||
|
||||
/// This is used to verify that fingerprints do not change between the creation of a node
|
||||
|
@ -1117,12 +1116,9 @@ impl<D: Deps> CurrentDepGraph<D> {
|
|||
profiler,
|
||||
previous,
|
||||
),
|
||||
new_node_to_index: Sharded::new(|| {
|
||||
FxHashMap::with_capacity_and_hasher(
|
||||
new_node_count_estimate / sharded::shards(),
|
||||
Default::default(),
|
||||
)
|
||||
}),
|
||||
new_node_to_index: ShardedHashMap::with_capacity(
|
||||
new_node_count_estimate / sharded::shards(),
|
||||
),
|
||||
prev_index_to_index: Lock::new(IndexVec::from_elem_n(None, prev_graph_node_count)),
|
||||
anon_id_seed,
|
||||
#[cfg(debug_assertions)]
|
||||
|
@ -1152,14 +1148,9 @@ impl<D: Deps> CurrentDepGraph<D> {
|
|||
edges: EdgesVec,
|
||||
current_fingerprint: Fingerprint,
|
||||
) -> DepNodeIndex {
|
||||
let dep_node_index = match self.new_node_to_index.lock_shard_by_value(&key).entry(key) {
|
||||
Entry::Occupied(entry) => *entry.get(),
|
||||
Entry::Vacant(entry) => {
|
||||
let dep_node_index = self.encoder.send(key, current_fingerprint, edges);
|
||||
entry.insert(dep_node_index);
|
||||
dep_node_index
|
||||
}
|
||||
};
|
||||
let dep_node_index = self
|
||||
.new_node_to_index
|
||||
.get_or_insert_with(key, || self.encoder.send(key, current_fingerprint, edges));
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
self.record_edge(dep_node_index, key, current_fingerprint);
|
||||
|
@ -1257,7 +1248,7 @@ impl<D: Deps> CurrentDepGraph<D> {
|
|||
) {
|
||||
let node = &prev_graph.index_to_node(prev_index);
|
||||
debug_assert!(
|
||||
!self.new_node_to_index.lock_shard_by_value(node).contains_key(node),
|
||||
!self.new_node_to_index.get(node).is_some(),
|
||||
"node from previous graph present in new node collection"
|
||||
);
|
||||
}
|
||||
|
@ -1382,7 +1373,7 @@ fn panic_on_forbidden_read<D: Deps>(data: &DepGraphData<D>, dep_node_index: DepN
|
|||
if dep_node.is_none() {
|
||||
// Try to find it among the new nodes
|
||||
for shard in data.current.new_node_to_index.lock_shards() {
|
||||
if let Some((node, _)) = shard.iter().find(|(_, index)| **index == dep_node_index) {
|
||||
if let Some((node, _)) = shard.iter().find(|(_, index)| *index == dep_node_index) {
|
||||
dep_node = Some(*node);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
#![feature(assert_matches)]
|
||||
#![feature(core_intrinsics)]
|
||||
#![feature(dropck_eyepatch)]
|
||||
#![feature(hash_raw_entry)]
|
||||
#![feature(let_chains)]
|
||||
#![feature(min_specialization)]
|
||||
#![warn(unreachable_pub)]
|
||||
|
|
|
@ -2,8 +2,7 @@ use std::fmt::Debug;
|
|||
use std::hash::Hash;
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_data_structures::sharded::{self, Sharded};
|
||||
use rustc_data_structures::sharded::ShardedHashMap;
|
||||
pub use rustc_data_structures::vec_cache::VecCache;
|
||||
use rustc_hir::def_id::LOCAL_CRATE;
|
||||
use rustc_index::Idx;
|
||||
|
@ -36,7 +35,7 @@ pub trait QueryCache: Sized {
|
|||
/// In-memory cache for queries whose keys aren't suitable for any of the
|
||||
/// more specialized kinds of cache. Backed by a sharded hashmap.
|
||||
pub struct DefaultCache<K, V> {
|
||||
cache: Sharded<FxHashMap<K, (V, DepNodeIndex)>>,
|
||||
cache: ShardedHashMap<K, (V, DepNodeIndex)>,
|
||||
}
|
||||
|
||||
impl<K, V> Default for DefaultCache<K, V> {
|
||||
|
@ -55,19 +54,14 @@ where
|
|||
|
||||
#[inline(always)]
|
||||
fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> {
|
||||
let key_hash = sharded::make_hash(key);
|
||||
let lock = self.cache.lock_shard_by_hash(key_hash);
|
||||
let result = lock.raw_entry().from_key_hashed_nocheck(key_hash, key);
|
||||
|
||||
if let Some((_, value)) = result { Some(*value) } else { None }
|
||||
self.cache.get(key)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn complete(&self, key: K, value: V, index: DepNodeIndex) {
|
||||
let mut lock = self.cache.lock_shard_by_value(&key);
|
||||
// We may be overwriting another value. This is all right, since the dep-graph
|
||||
// will check that the fingerprint matches.
|
||||
lock.insert(key, (value, index));
|
||||
self.cache.insert(key, (value, index));
|
||||
}
|
||||
|
||||
fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue