1
Fork 0

Make rustc_query_system take QueryConfig by instance.

This commit is contained in:
John Kåre Alsaker 2023-02-07 08:32:30 +01:00
parent d962ea5789
commit 3fd7c4a17d
6 changed files with 184 additions and 125 deletions

View file

@ -16,12 +16,9 @@ pub trait CacheSelector<'tcx, V> {
V: Copy;
}
pub trait QueryStorage {
type Value: Copy;
}
pub trait QueryCache: QueryStorage + Sized {
pub trait QueryCache: Sized {
type Key: Hash + Eq + Copy + Debug;
type Value: Copy + Debug;
/// Checks if the query is already computed and in the cache.
/// It returns the shard index and a lock guard to the shard,
@ -55,16 +52,13 @@ impl<K, V> Default for DefaultCache<K, V> {
}
}
impl<K: Eq + Hash, V: Copy + Debug> QueryStorage for DefaultCache<K, V> {
type Value = V;
}
impl<K, V> QueryCache for DefaultCache<K, V>
where
K: Eq + Hash + Copy + Debug,
V: Copy + Debug,
{
type Key = K;
type Value = V;
#[inline(always)]
fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> {
@ -127,15 +121,12 @@ impl<V> Default for SingleCache<V> {
}
}
impl<V: Copy + Debug> QueryStorage for SingleCache<V> {
type Value = V;
}
impl<V> QueryCache for SingleCache<V>
where
V: Copy + Debug,
{
type Key = ();
type Value = V;
#[inline(always)]
fn lookup(&self, _key: &()) -> Option<(V, DepNodeIndex)> {
@ -173,16 +164,13 @@ impl<K: Idx, V> Default for VecCache<K, V> {
}
}
impl<K: Eq + Idx, V: Copy + Debug> QueryStorage for VecCache<K, V> {
type Value = V;
}
impl<K, V> QueryCache for VecCache<K, V>
where
K: Eq + Idx + Copy + Debug,
V: Copy + Debug,
{
type Key = K;
type Value = V;
#[inline(always)]
fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> {

View file

@ -10,14 +10,12 @@ use rustc_data_structures::fingerprint::Fingerprint;
use std::fmt::Debug;
use std::hash::Hash;
pub type HashResult<Qcx, Q> =
Option<fn(&mut StableHashingContext<'_>, &<Q as QueryConfig<Qcx>>::Value) -> Fingerprint>;
pub type HashResult<V> = Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>;
pub type TryLoadFromDisk<Qcx, Q> =
Option<fn(Qcx, SerializedDepNodeIndex) -> Option<<Q as QueryConfig<Qcx>>::Value>>;
pub type TryLoadFromDisk<Qcx, V> = Option<fn(Qcx, SerializedDepNodeIndex) -> Option<V>>;
pub trait QueryConfig<Qcx: QueryContext> {
const NAME: &'static str;
pub trait QueryConfig<Qcx: QueryContext>: Copy + Debug {
fn name(self) -> &'static str;
// `Key` and `Value` are `Copy` instead of `Clone` to ensure copying them stays cheap,
// but it isn't necessary.
@ -27,36 +25,35 @@ pub trait QueryConfig<Qcx: QueryContext> {
type Cache: QueryCache<Key = Self::Key, Value = Self::Value>;
// Don't use this method to access query results, instead use the methods on TyCtxt
fn query_state<'a>(tcx: Qcx) -> &'a QueryState<Self::Key, Qcx::DepKind>
fn query_state<'a>(self, tcx: Qcx) -> &'a QueryState<Self::Key, Qcx::DepKind>
where
Qcx: 'a;
// Don't use this method to access query results, instead use the methods on TyCtxt
fn query_cache<'a>(tcx: Qcx) -> &'a Self::Cache
fn query_cache<'a>(self, tcx: Qcx) -> &'a Self::Cache
where
Qcx: 'a;
fn cache_on_disk(tcx: Qcx::DepContext, key: &Self::Key) -> bool;
fn cache_on_disk(self, tcx: Qcx::DepContext, key: &Self::Key) -> bool;
// Don't use this method to compute query results, instead use the methods on TyCtxt
fn execute_query(tcx: Qcx::DepContext, k: Self::Key) -> Self::Value;
fn execute_query(self, tcx: Qcx::DepContext, k: Self::Key) -> Self::Value;
fn compute(tcx: Qcx, key: Self::Key) -> Self::Value;
fn compute(self, tcx: Qcx, key: Self::Key) -> Self::Value;
fn try_load_from_disk(qcx: Qcx, idx: &Self::Key) -> TryLoadFromDisk<Qcx, Self>;
fn try_load_from_disk(self, qcx: Qcx, idx: &Self::Key) -> TryLoadFromDisk<Qcx, Self::Value>;
const ANON: bool;
const EVAL_ALWAYS: bool;
const DEPTH_LIMIT: bool;
const FEEDABLE: bool;
fn anon(self) -> bool;
fn eval_always(self) -> bool;
fn depth_limit(self) -> bool;
fn feedable(self) -> bool;
const DEP_KIND: Qcx::DepKind;
const HANDLE_CYCLE_ERROR: HandleCycleError;
const HASH_RESULT: HashResult<Qcx, Self>;
fn dep_kind(self) -> Qcx::DepKind;
fn handle_cycle_error(self) -> HandleCycleError;
fn hash_result(self) -> HashResult<Self::Value>;
// Just here for convernience and checking that the key matches the kind, don't override this.
fn construct_dep_node(tcx: Qcx::DepContext, key: &Self::Key) -> DepNode<Qcx::DepKind> {
DepNode::construct(tcx, Self::DEP_KIND, key)
fn construct_dep_node(self, tcx: Qcx::DepContext, key: &Self::Key) -> DepNode<Qcx::DepKind> {
DepNode::construct(tcx, self.dep_kind(), key)
}
}

View file

@ -8,8 +8,7 @@ pub use self::job::{print_query_stack, QueryInfo, QueryJob, QueryJobId, QueryJob
mod caches;
pub use self::caches::{
CacheSelector, DefaultCacheSelector, QueryCache, QueryStorage, SingleCacheSelector,
VecCacheSelector,
CacheSelector, DefaultCacheSelector, QueryCache, SingleCacheSelector, VecCacheSelector,
};
mod config;

View file

@ -2,6 +2,7 @@
//! generate the actual methods on tcx which find and execute the provider,
//! manage the caches, and so forth.
use crate::dep_graph::HasDepContext;
use crate::dep_graph::{DepContext, DepKind, DepNode, DepNodeIndex, DepNodeParams};
use crate::ich::StableHashingContext;
use crate::query::caches::QueryCache;
@ -127,7 +128,7 @@ fn mk_cycle<Qcx, R, D: DepKind>(
handler: HandleCycleError,
) -> R
where
Qcx: QueryContext + crate::query::HasDepContext<DepKind = D>,
Qcx: QueryContext + HasDepContext<DepKind = D>,
R: std::fmt::Debug + Value<Qcx::DepContext, Qcx::DepKind>,
{
let error = report_cycle(qcx.dep_context().sess(), &cycle_error);
@ -182,7 +183,7 @@ where
key: K,
) -> TryGetJob<'b, K, D>
where
Qcx: QueryContext + crate::query::HasDepContext<DepKind = D>,
Qcx: QueryContext + HasDepContext<DepKind = D>,
{
#[cfg(parallel_compiler)]
let mut state_lock = state.active.get_shard_by_value(&key).lock();
@ -349,6 +350,7 @@ where
#[inline(never)]
fn try_execute_query<Q, Qcx>(
query: Q,
qcx: Qcx,
span: Span,
key: Q::Key,
@ -358,12 +360,12 @@ where
Q: QueryConfig<Qcx>,
Qcx: QueryContext,
{
let state = Q::query_state(qcx);
let state = query.query_state(qcx);
match JobOwner::<'_, Q::Key, Qcx::DepKind>::try_start(&qcx, state, span, key) {
TryGetJob::NotYetStarted(job) => {
let (result, dep_node_index) = execute_job::<Q, Qcx>(qcx, key, dep_node, job.id);
let cache = Q::query_cache(qcx);
if Q::FEEDABLE {
let (result, dep_node_index) = execute_job(query, qcx, key.clone(), dep_node, job.id);
let cache = query.query_cache(qcx);
if query.feedable() {
// We should not compute queries that also got a value via feeding.
// This can't happen, as query feeding adds the very dependencies to the fed query
// as its feeding query had. So if the fed query is red, so is its feeder, which will
@ -378,12 +380,12 @@ where
(result, Some(dep_node_index))
}
TryGetJob::Cycle(error) => {
let result = mk_cycle(qcx, error, Q::HANDLE_CYCLE_ERROR);
let result = mk_cycle(qcx, error, query.handle_cycle_error());
(result, None)
}
#[cfg(parallel_compiler)]
TryGetJob::JobCompleted(query_blocked_prof_timer) => {
let Some((v, index)) = Q::query_cache(qcx).lookup(&key) else {
let Some((v, index)) = query.query_cache(qcx).lookup(&key) else {
panic!("value must be in cache after waiting")
};
@ -397,6 +399,7 @@ where
#[inline(always)]
fn execute_job<Q, Qcx>(
query: Q,
qcx: Qcx,
key: Q::Key,
mut dep_node_opt: Option<DepNode<Qcx::DepKind>>,
@ -417,14 +420,14 @@ where
}
let prof_timer = qcx.dep_context().profiler().query_provider();
let result = qcx.start_query(job_id, Q::DEPTH_LIMIT, None, || Q::compute(qcx, key));
let result = qcx.start_query(job_id, query.depth_limit(), None, || query.compute(qcx, key));
let dep_node_index = dep_graph.next_virtual_depnode_index();
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
// Similarly, fingerprint the result to assert that
// it doesn't have anything not considered hashable.
if cfg!(debug_assertions)
&& let Some(hash_result) = Q::HASH_RESULT
&& let Some(hash_result) = query.hash_result()
{
qcx.dep_context().with_stable_hashing_context(|mut hcx| {
hash_result(&mut hcx, &result);
@ -434,15 +437,15 @@ where
return (result, dep_node_index);
}
if !Q::ANON && !Q::EVAL_ALWAYS {
if !query.anon() && !query.eval_always() {
// `to_dep_node` is expensive for some `DepKind`s.
let dep_node =
dep_node_opt.get_or_insert_with(|| Q::construct_dep_node(*qcx.dep_context(), &key));
dep_node_opt.get_or_insert_with(|| query.construct_dep_node(*qcx.dep_context(), &key));
// The diagnostics for this query will be promoted to the current session during
// `try_mark_green()`, so we can ignore them here.
if let Some(ret) = qcx.start_query(job_id, false, None, || {
try_load_from_disk_and_cache_in_memory::<Q, Qcx>(qcx, &key, &dep_node)
try_load_from_disk_and_cache_in_memory(query, qcx, &key, &dep_node)
}) {
return ret;
}
@ -452,17 +455,24 @@ where
let diagnostics = Lock::new(ThinVec::new());
let (result, dep_node_index) =
qcx.start_query(job_id, Q::DEPTH_LIMIT, Some(&diagnostics), || {
if Q::ANON {
return dep_graph
.with_anon_task(*qcx.dep_context(), Q::DEP_KIND, || Q::compute(qcx, key));
qcx.start_query(job_id, query.depth_limit(), Some(&diagnostics), || {
if query.anon() {
return dep_graph.with_anon_task(*qcx.dep_context(), query.dep_kind(), || {
query.compute(qcx, key)
});
}
// `to_dep_node` is expensive for some `DepKind`s.
let dep_node =
dep_node_opt.unwrap_or_else(|| Q::construct_dep_node(*qcx.dep_context(), &key));
dep_node_opt.unwrap_or_else(|| query.construct_dep_node(*qcx.dep_context(), &key));
dep_graph.with_task(dep_node, qcx, key, Q::compute, Q::HASH_RESULT)
dep_graph.with_task(
dep_node,
qcx,
(key, query),
|qcx, (key, query)| query.compute(qcx, key),
query.hash_result(),
)
});
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
@ -471,7 +481,7 @@ where
let side_effects = QuerySideEffects { diagnostics };
if std::intrinsics::unlikely(!side_effects.is_empty()) {
if Q::ANON {
if query.anon() {
qcx.store_side_effects_for_anon_node(dep_node_index, side_effects);
} else {
qcx.store_side_effects(dep_node_index, side_effects);
@ -483,6 +493,7 @@ where
#[inline(always)]
fn try_load_from_disk_and_cache_in_memory<Q, Qcx>(
query: Q,
qcx: Qcx,
key: &Q::Key,
dep_node: &DepNode<Qcx::DepKind>,
@ -501,7 +512,7 @@ where
// First we try to load the result from the on-disk cache.
// Some things are never cached on disk.
if let Some(try_load_from_disk) = Q::try_load_from_disk(qcx, &key) {
if let Some(try_load_from_disk) = query.try_load_from_disk(qcx, &key) {
let prof_timer = qcx.dep_context().profiler().incr_cache_loading();
// The call to `with_query_deserialization` enforces that no new `DepNodes`
@ -535,7 +546,7 @@ where
if std::intrinsics::unlikely(
try_verify || qcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich,
) {
incremental_verify_ich(*qcx.dep_context(), &result, dep_node, Q::HASH_RESULT);
incremental_verify_ich(*qcx.dep_context(), &result, dep_node, query.hash_result());
}
return Some((result, dep_node_index));
@ -554,7 +565,7 @@ where
let prof_timer = qcx.dep_context().profiler().query_provider();
// The dep-graph for this computation is already in-place.
let result = dep_graph.with_ignore(|| Q::compute(qcx, *key));
let result = dep_graph.with_ignore(|| query.compute(qcx, *key));
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
@ -567,7 +578,7 @@ where
//
// See issue #82920 for an example of a miscompilation that would get turned into
// an ICE by this check
incremental_verify_ich(*qcx.dep_context(), &result, dep_node, Q::HASH_RESULT);
incremental_verify_ich(*qcx.dep_context(), &result, dep_node, query.hash_result());
Some((result, dep_node_index))
}
@ -688,19 +699,23 @@ fn incremental_verify_ich_failed(sess: &Session, dep_node: DebugArg<'_>, result:
///
/// Note: The optimization is only available during incr. comp.
#[inline(never)]
fn ensure_must_run<Q, Qcx>(qcx: Qcx, key: &Q::Key) -> (bool, Option<DepNode<Qcx::DepKind>>)
fn ensure_must_run<Q, Qcx>(
query: Q,
qcx: Qcx,
key: &Q::Key,
) -> (bool, Option<DepNode<Qcx::DepKind>>)
where
Q: QueryConfig<Qcx>,
Qcx: QueryContext,
{
if Q::EVAL_ALWAYS {
if query.eval_always() {
return (true, None);
}
// Ensuring an anonymous query makes no sense
assert!(!Q::ANON);
assert!(!query.anon());
let dep_node = Q::construct_dep_node(*qcx.dep_context(), key);
let dep_node = query.construct_dep_node(*qcx.dep_context(), key);
let dep_graph = qcx.dep_context().dep_graph();
match dep_graph.try_mark_green(qcx, &dep_node) {
@ -728,15 +743,19 @@ pub enum QueryMode {
}
#[inline(always)]
pub fn get_query<Q, Qcx, D>(qcx: Qcx, span: Span, key: Q::Key, mode: QueryMode) -> Option<Q::Value>
pub fn get_query<Q, Qcx>(
query: Q,
qcx: Qcx,
span: Span,
key: Q::Key,
mode: QueryMode,
) -> Option<Q::Value>
where
D: DepKind,
Q: QueryConfig<Qcx>,
Q::Value: Value<Qcx::DepContext, D>,
Qcx: QueryContext,
{
let dep_node = if let QueryMode::Ensure = mode {
let (must_run, dep_node) = ensure_must_run::<Q, _>(qcx, &key);
let (must_run, dep_node) = ensure_must_run(query, qcx, &key);
if !must_run {
return None;
}
@ -746,28 +765,30 @@ where
};
let (result, dep_node_index) =
ensure_sufficient_stack(|| try_execute_query::<Q, Qcx>(qcx, span, key, dep_node));
ensure_sufficient_stack(|| try_execute_query(query, qcx, span, key, dep_node));
if let Some(dep_node_index) = dep_node_index {
qcx.dep_context().dep_graph().read_index(dep_node_index)
}
Some(result)
}
pub fn force_query<Q, Qcx, D>(qcx: Qcx, key: Q::Key, dep_node: DepNode<Qcx::DepKind>)
where
D: DepKind,
pub fn force_query<Q, Qcx>(
query: Q,
qcx: Qcx,
key: Q::Key,
dep_node: DepNode<<Qcx as HasDepContext>::DepKind>,
) where
Q: QueryConfig<Qcx>,
Q::Value: Value<Qcx::DepContext, D>,
Qcx: QueryContext,
{
// We may be concurrently trying both execute and force a query.
// Ensure that only one of them runs the query.
if let Some((_, index)) = Q::query_cache(qcx).lookup(&key) {
if let Some((_, index)) = query.query_cache(qcx).lookup(&key) {
qcx.dep_context().profiler().query_cache_hit(index.into());
return;
}
debug_assert!(!Q::ANON);
debug_assert!(!query.anon());
ensure_sufficient_stack(|| try_execute_query::<Q, _>(qcx, DUMMY_SP, key, Some(dep_node)));
ensure_sufficient_stack(|| try_execute_query(query, qcx, DUMMY_SP, key, Some(dep_node)));
}