1
Fork 0

Allow the QueryCache to specify storage.

This commit is contained in:
Camille GILLOT 2020-03-27 18:41:13 +01:00
parent fb5615a477
commit e8ef41e83f
5 changed files with 62 additions and 39 deletions

View file

@ -328,6 +328,10 @@ macro_rules! define_queries_inner {
$(impl<$tcx> QueryConfig<TyCtxt<$tcx>> for queries::$name<$tcx> { $(impl<$tcx> QueryConfig<TyCtxt<$tcx>> for queries::$name<$tcx> {
type Key = $($K)*; type Key = $($K)*;
type Value = $V; type Value = $V;
type Stored = <
query_storage!([$($modifiers)*][$($K)*, $V])
as QueryStorage
>::Stored;
const NAME: &'static str = stringify!($name); const NAME: &'static str = stringify!($name);
const CATEGORY: ProfileCategory = $category; const CATEGORY: ProfileCategory = $category;
} }
@ -426,8 +430,10 @@ macro_rules! define_queries_inner {
$($(#[$attr])* $($(#[$attr])*
#[inline(always)] #[inline(always)]
pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> $V { pub fn $name(self, key: query_helper_param_ty!($($K)*))
self.at(DUMMY_SP).$name(key) -> <queries::$name<$tcx> as QueryConfig<TyCtxt<$tcx>>>::Stored
{
self.at(DUMMY_SP).$name(key.into_query_param())
})* })*
/// All self-profiling events generated by the query engine use /// All self-profiling events generated by the query engine use
@ -463,7 +469,9 @@ macro_rules! define_queries_inner {
impl TyCtxtAt<$tcx> { impl TyCtxtAt<$tcx> {
$($(#[$attr])* $($(#[$attr])*
#[inline(always)] #[inline(always)]
pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> $V { pub fn $name(self, key: query_helper_param_ty!($($K)*))
-> <queries::$name<$tcx> as QueryConfig<TyCtxt<$tcx>>>::Stored
{
get_query::<queries::$name<'_>, _>(self.tcx, self.span, key.into_query_param()) get_query::<queries::$name<'_>, _>(self.tcx, self.span, key.into_query_param())
})* })*
} }

View file

@ -8,13 +8,21 @@ use std::default::Default;
use std::hash::Hash; use std::hash::Hash;
use std::marker::PhantomData; use std::marker::PhantomData;
pub trait CacheSelector<K: Hash, V> { pub trait CacheSelector<K, V> {
type Cache: QueryCache<Key = K, Value = V>; type Cache;
} }
pub trait QueryCache: Default { pub trait QueryStorage: Default {
type Key: Hash;
type Value; type Value;
type Stored: Clone;
/// Store a value without putting it in the cache.
/// This is meant to be used with cycle errors.
fn store_nocache(&self, value: Self::Value) -> Self::Stored;
}
pub trait QueryCache: QueryStorage {
type Key: Hash;
type Sharded: Default; type Sharded: Default;
/// Checks if the query is already computed and in the cache. /// Checks if the query is already computed and in the cache.
@ -30,7 +38,7 @@ pub trait QueryCache: Default {
on_miss: OnMiss, on_miss: OnMiss,
) -> R ) -> R
where where
OnHit: FnOnce(&Self::Value, DepNodeIndex) -> R, OnHit: FnOnce(&Self::Stored, DepNodeIndex) -> R,
OnMiss: FnOnce(Self::Key, QueryLookup<'_, CTX, Self::Key, Self::Sharded>) -> R; OnMiss: FnOnce(Self::Key, QueryLookup<'_, CTX, Self::Key, Self::Sharded>) -> R;
fn complete<CTX: QueryContext>( fn complete<CTX: QueryContext>(
@ -40,7 +48,7 @@ pub trait QueryCache: Default {
key: Self::Key, key: Self::Key,
value: Self::Value, value: Self::Value,
index: DepNodeIndex, index: DepNodeIndex,
); ) -> Self::Stored;
fn iter<R, L>( fn iter<R, L>(
&self, &self,
@ -66,9 +74,18 @@ impl<K, V> Default for DefaultCache<K, V> {
} }
} }
impl<K: Eq + Hash, V: Clone> QueryStorage for DefaultCache<K, V> {
type Value = V;
type Stored = V;
fn store_nocache(&self, value: Self::Value) -> Self::Stored {
// We have no dedicated storage
value
}
}
impl<K: Eq + Hash, V: Clone> QueryCache for DefaultCache<K, V> { impl<K: Eq + Hash, V: Clone> QueryCache for DefaultCache<K, V> {
type Key = K; type Key = K;
type Value = V;
type Sharded = FxHashMap<K, (V, DepNodeIndex)>; type Sharded = FxHashMap<K, (V, DepNodeIndex)>;
#[inline(always)] #[inline(always)]
@ -99,8 +116,9 @@ impl<K: Eq + Hash, V: Clone> QueryCache for DefaultCache<K, V> {
key: K, key: K,
value: V, value: V,
index: DepNodeIndex, index: DepNodeIndex,
) { ) -> Self::Stored {
lock_sharded_storage.insert(key, (value, index)); lock_sharded_storage.insert(key, (value.clone(), index));
value
} }
fn iter<R, L>( fn iter<R, L>(

View file

@ -20,7 +20,8 @@ pub trait QueryConfig<CTX> {
const CATEGORY: ProfileCategory; const CATEGORY: ProfileCategory;
type Key: Eq + Hash + Clone + Debug; type Key: Eq + Hash + Clone + Debug;
type Value: Clone; type Value;
type Stored: Clone;
} }
pub trait QueryAccessors<CTX: QueryContext>: QueryConfig<CTX> { pub trait QueryAccessors<CTX: QueryContext>: QueryConfig<CTX> {
@ -28,7 +29,7 @@ pub trait QueryAccessors<CTX: QueryContext>: QueryConfig<CTX> {
const EVAL_ALWAYS: bool; const EVAL_ALWAYS: bool;
const DEP_KIND: CTX::DepKind; const DEP_KIND: CTX::DepKind;
type Cache: QueryCache<Key = Self::Key, Value = Self::Value>; type Cache: QueryCache<Key = Self::Key, Stored = Self::Stored, Value = Self::Value>;
// Don't use this method to access query results, instead use the methods on TyCtxt // Don't use this method to access query results, instead use the methods on TyCtxt
fn query_state<'a>(tcx: CTX) -> &'a QueryState<CTX, Self::Cache>; fn query_state<'a>(tcx: CTX) -> &'a QueryState<CTX, Self::Cache>;

View file

@ -7,7 +7,7 @@ pub use self::job::deadlock;
pub use self::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo}; pub use self::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo};
mod caches; mod caches;
pub use self::caches::{CacheSelector, DefaultCacheSelector, QueryCache}; pub use self::caches::{CacheSelector, DefaultCacheSelector, QueryCache, QueryStorage};
mod config; mod config;
pub use self::config::{QueryAccessors, QueryConfig, QueryDescription}; pub use self::config::{QueryAccessors, QueryConfig, QueryDescription};

View file

@ -148,7 +148,6 @@ struct JobOwner<'tcx, CTX: QueryContext, C>
where where
C: QueryCache, C: QueryCache,
C::Key: Eq + Hash + Clone + Debug, C::Key: Eq + Hash + Clone + Debug,
C::Value: Clone,
{ {
state: &'tcx QueryState<CTX, C>, state: &'tcx QueryState<CTX, C>,
key: C::Key, key: C::Key,
@ -159,7 +158,6 @@ impl<'tcx, CTX: QueryContext, C> JobOwner<'tcx, CTX, C>
where where
C: QueryCache, C: QueryCache,
C::Key: Eq + Hash + Clone + Debug, C::Key: Eq + Hash + Clone + Debug,
C::Value: Clone,
{ {
/// Either gets a `JobOwner` corresponding the query, allowing us to /// Either gets a `JobOwner` corresponding the query, allowing us to
/// start executing the query, or returns with the result of the query. /// start executing the query, or returns with the result of the query.
@ -177,7 +175,7 @@ where
mut lookup: QueryLookup<'a, CTX, C::Key, C::Sharded>, mut lookup: QueryLookup<'a, CTX, C::Key, C::Sharded>,
) -> TryGetJob<'b, CTX, C> ) -> TryGetJob<'b, CTX, C>
where where
Q: QueryDescription<CTX, Key = C::Key, Value = C::Value, Cache = C>, Q: QueryDescription<CTX, Key = C::Key, Stored = C::Stored, Value = C::Value, Cache = C>,
CTX: QueryContext, CTX: QueryContext,
{ {
let lock = &mut *lookup.lock; let lock = &mut *lookup.lock;
@ -229,7 +227,8 @@ where
// so we just return the error. // so we just return the error.
#[cfg(not(parallel_compiler))] #[cfg(not(parallel_compiler))]
return TryGetJob::Cycle(cold_path(|| { return TryGetJob::Cycle(cold_path(|| {
Q::handle_cycle_error(tcx, latch.find_cycle_in_stack(tcx, span)) let value = Q::handle_cycle_error(tcx, latch.find_cycle_in_stack(tcx, span));
Q::query_state(tcx).cache.store_nocache(value)
})); }));
// With parallel queries we might just have to wait on some other // With parallel queries we might just have to wait on some other
@ -239,7 +238,9 @@ where
let result = latch.wait_on(tcx, span); let result = latch.wait_on(tcx, span);
if let Err(cycle) = result { if let Err(cycle) = result {
return TryGetJob::Cycle(Q::handle_cycle_error(tcx, cycle)); let value = Q::handle_cycle_error(tcx, cycle);
let value = Q::query_state(tcx).cache.store_nocache(value);
return TryGetJob::Cycle(value);
} }
let cached = try_get_cached( let cached = try_get_cached(
@ -261,7 +262,7 @@ where
/// Completes the query by updating the query cache with the `result`, /// Completes the query by updating the query cache with the `result`,
/// signals the waiter and forgets the JobOwner, so it won't poison the query /// signals the waiter and forgets the JobOwner, so it won't poison the query
#[inline(always)] #[inline(always)]
fn complete(self, tcx: CTX, result: &C::Value, dep_node_index: DepNodeIndex) { fn complete(self, tcx: CTX, result: C::Value, dep_node_index: DepNodeIndex) -> C::Stored {
// We can move out of `self` here because we `mem::forget` it below // We can move out of `self` here because we `mem::forget` it below
let key = unsafe { ptr::read(&self.key) }; let key = unsafe { ptr::read(&self.key) };
let state = self.state; let state = self.state;
@ -269,18 +270,18 @@ where
// Forget ourself so our destructor won't poison the query // Forget ourself so our destructor won't poison the query
mem::forget(self); mem::forget(self);
let job = { let (job, result) = {
let result = result.clone();
let mut lock = state.shards.get_shard_by_value(&key).lock(); let mut lock = state.shards.get_shard_by_value(&key).lock();
let job = match lock.active.remove(&key).unwrap() { let job = match lock.active.remove(&key).unwrap() {
QueryResult::Started(job) => job, QueryResult::Started(job) => job,
QueryResult::Poisoned => panic!(), QueryResult::Poisoned => panic!(),
}; };
state.cache.complete(tcx, &mut lock.cache, key, result, dep_node_index); let result = state.cache.complete(tcx, &mut lock.cache, key, result, dep_node_index);
job (job, result)
}; };
job.signal_complete(); job.signal_complete();
result
} }
} }
@ -297,7 +298,6 @@ where
impl<'tcx, CTX: QueryContext, C: QueryCache> Drop for JobOwner<'tcx, CTX, C> impl<'tcx, CTX: QueryContext, C: QueryCache> Drop for JobOwner<'tcx, CTX, C>
where where
C::Key: Eq + Hash + Clone + Debug, C::Key: Eq + Hash + Clone + Debug,
C::Value: Clone,
{ {
#[inline(never)] #[inline(never)]
#[cold] #[cold]
@ -331,7 +331,6 @@ pub struct CycleError<Q> {
enum TryGetJob<'tcx, CTX: QueryContext, C: QueryCache> enum TryGetJob<'tcx, CTX: QueryContext, C: QueryCache>
where where
C::Key: Eq + Hash + Clone + Debug, C::Key: Eq + Hash + Clone + Debug,
C::Value: Clone,
{ {
/// The query is not yet started. Contains a guard to the cache eventually used to start it. /// The query is not yet started. Contains a guard to the cache eventually used to start it.
NotYetStarted(JobOwner<'tcx, CTX, C>), NotYetStarted(JobOwner<'tcx, CTX, C>),
@ -340,10 +339,10 @@ where
/// Returns the result of the query and its dep-node index /// Returns the result of the query and its dep-node index
/// if it succeeded or a cycle error if it failed. /// if it succeeded or a cycle error if it failed.
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
JobCompleted((C::Value, DepNodeIndex)), JobCompleted((C::Stored, DepNodeIndex)),
/// Trying to execute the query resulted in a cycle. /// Trying to execute the query resulted in a cycle.
Cycle(C::Value), Cycle(C::Stored),
} }
/// Checks if the query is already computed and in the cache. /// Checks if the query is already computed and in the cache.
@ -362,7 +361,7 @@ fn try_get_cached<CTX, C, R, OnHit, OnMiss>(
where where
C: QueryCache, C: QueryCache,
CTX: QueryContext, CTX: QueryContext,
OnHit: FnOnce(&C::Value, DepNodeIndex) -> R, OnHit: FnOnce(&C::Stored, DepNodeIndex) -> R,
OnMiss: FnOnce(C::Key, QueryLookup<'_, CTX, C::Key, C::Sharded>) -> R, OnMiss: FnOnce(C::Key, QueryLookup<'_, CTX, C::Key, C::Sharded>) -> R,
{ {
state.cache.lookup( state.cache.lookup(
@ -388,7 +387,7 @@ fn try_execute_query<Q, CTX>(
span: Span, span: Span,
key: Q::Key, key: Q::Key,
lookup: QueryLookup<'_, CTX, Q::Key, <Q::Cache as QueryCache>::Sharded>, lookup: QueryLookup<'_, CTX, Q::Key, <Q::Cache as QueryCache>::Sharded>,
) -> Q::Value ) -> Q::Stored
where where
Q: QueryDescription<CTX>, Q: QueryDescription<CTX>,
CTX: QueryContext, CTX: QueryContext,
@ -427,9 +426,7 @@ where
tcx.store_diagnostics_for_anon_node(dep_node_index, diagnostics); tcx.store_diagnostics_for_anon_node(dep_node_index, diagnostics);
} }
job.complete(tcx, &result, dep_node_index); return job.complete(tcx, result, dep_node_index);
return result;
} }
let dep_node = Q::to_dep_node(tcx, &key); let dep_node = Q::to_dep_node(tcx, &key);
@ -454,8 +451,7 @@ where
}) })
}); });
if let Some((result, dep_node_index)) = loaded { if let Some((result, dep_node_index)) = loaded {
job.complete(tcx, &result, dep_node_index); return job.complete(tcx, result, dep_node_index);
return result;
} }
} }
@ -558,7 +554,7 @@ fn force_query_with_job<Q, CTX>(
key: Q::Key, key: Q::Key,
job: JobOwner<'_, CTX, Q::Cache>, job: JobOwner<'_, CTX, Q::Cache>,
dep_node: DepNode<CTX::DepKind>, dep_node: DepNode<CTX::DepKind>,
) -> (Q::Value, DepNodeIndex) ) -> (Q::Stored, DepNodeIndex)
where where
Q: QueryDescription<CTX>, Q: QueryDescription<CTX>,
CTX: QueryContext, CTX: QueryContext,
@ -603,13 +599,13 @@ where
} }
} }
job.complete(tcx, &result, dep_node_index); let result = job.complete(tcx, result, dep_node_index);
(result, dep_node_index) (result, dep_node_index)
} }
#[inline(never)] #[inline(never)]
pub fn get_query<Q, CTX>(tcx: CTX, span: Span, key: Q::Key) -> Q::Value pub fn get_query<Q, CTX>(tcx: CTX, span: Span, key: Q::Key) -> Q::Stored
where where
Q: QueryDescription<CTX>, Q: QueryDescription<CTX>,
CTX: QueryContext, CTX: QueryContext,