1
Fork 0

Auto merge of #77871 - Julian-Wollersberger:less-query-context, r=oli-obk

Make fewer types generic over QueryContext

While trying to refactor `rustc_query_system::query::QueryContext` to make it dyn-safe, I noticed some smaller things:
* QueryConfig doesn't need to be generic over QueryContext
* ~~The `kind` field on QueryJobId is unused~~
* Some unnecessary where clauses
* Many types in `job.rs` where generic over `QueryContext` but only needed `QueryContext::Query`.
  If handle_cycle_error() could be refactored to not take `error: CycleError<CTX::Query>`, all those bounds could be removed as well.

Changing `find_cycle_in_stack()` in job.rs to not take a `tcx` argument is the only functional change here. Everything else is just updating type signatures. (aka compile-error driven development ^^)

~~Currently there is a weird bug where memory usage suddenly skyrockets when running UI tests. I'll investigate that tomorrow.
A perf run probably won't make sense before that is fixed.~~

EDIT: `kind` actually is used by `Eq`, and re-adding it fixed the memory issue.
This commit is contained in:
bors 2020-10-22 12:24:55 +00:00
commit 500ddc5efd
8 changed files with 216 additions and 174 deletions

View file

@ -40,7 +40,8 @@ impl QueryContext for TyCtxt<'tcx> {
fn try_collect_active_jobs( fn try_collect_active_jobs(
&self, &self,
) -> Option<FxHashMap<QueryJobId<Self::DepKind>, QueryJobInfo<Self>>> { ) -> Option<FxHashMap<QueryJobId<Self::DepKind>, QueryJobInfo<Self::DepKind, Self::Query>>>
{
self.queries.try_collect_active_jobs() self.queries.try_collect_active_jobs()
} }
@ -353,7 +354,7 @@ macro_rules! define_queries_inner {
$(pub type $name<$tcx> = $V;)* $(pub type $name<$tcx> = $V;)*
} }
$(impl<$tcx> QueryConfig<TyCtxt<$tcx>> for queries::$name<$tcx> { $(impl<$tcx> QueryConfig for queries::$name<$tcx> {
type Key = $($K)*; type Key = $($K)*;
type Value = $V; type Value = $V;
type Stored = < type Stored = <
@ -372,7 +373,7 @@ macro_rules! define_queries_inner {
type Cache = query_storage!([$($modifiers)*][$($K)*, $V]); type Cache = query_storage!([$($modifiers)*][$($K)*, $V]);
#[inline(always)] #[inline(always)]
fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState<TyCtxt<$tcx>, Self::Cache> { fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState<crate::dep_graph::DepKind, <TyCtxt<$tcx> as QueryContext>::Query, Self::Cache> {
&tcx.queries.$name &tcx.queries.$name
} }
@ -454,7 +455,7 @@ macro_rules! define_queries_inner {
#[inline(always)] #[inline(always)]
#[must_use] #[must_use]
pub fn $name(self, key: query_helper_param_ty!($($K)*)) pub fn $name(self, key: query_helper_param_ty!($($K)*))
-> <queries::$name<$tcx> as QueryConfig<TyCtxt<$tcx>>>::Stored -> <queries::$name<$tcx> as QueryConfig>::Stored
{ {
self.at(DUMMY_SP).$name(key.into_query_param()) self.at(DUMMY_SP).$name(key.into_query_param())
})* })*
@ -493,7 +494,7 @@ macro_rules! define_queries_inner {
$($(#[$attr])* $($(#[$attr])*
#[inline(always)] #[inline(always)]
pub fn $name(self, key: query_helper_param_ty!($($K)*)) pub fn $name(self, key: query_helper_param_ty!($($K)*))
-> <queries::$name<$tcx> as QueryConfig<TyCtxt<$tcx>>>::Stored -> <queries::$name<$tcx> as QueryConfig>::Stored
{ {
get_query::<queries::$name<'_>, _>(self.tcx, self.span, key.into_query_param()) get_query::<queries::$name<'_>, _>(self.tcx, self.span, key.into_query_param())
})* })*
@ -527,7 +528,8 @@ macro_rules! define_queries_struct {
fallback_extern_providers: Box<Providers>, fallback_extern_providers: Box<Providers>,
$($(#[$attr])* $name: QueryState< $($(#[$attr])* $name: QueryState<
TyCtxt<$tcx>, crate::dep_graph::DepKind,
<TyCtxt<$tcx> as QueryContext>::Query,
<queries::$name<$tcx> as QueryAccessors<TyCtxt<'tcx>>>::Cache, <queries::$name<$tcx> as QueryAccessors<TyCtxt<'tcx>>>::Cache,
>,)* >,)*
} }
@ -548,7 +550,7 @@ macro_rules! define_queries_struct {
pub(crate) fn try_collect_active_jobs( pub(crate) fn try_collect_active_jobs(
&self &self
) -> Option<FxHashMap<QueryJobId<crate::dep_graph::DepKind>, QueryJobInfo<TyCtxt<'tcx>>>> { ) -> Option<FxHashMap<QueryJobId<crate::dep_graph::DepKind>, QueryJobInfo<crate::dep_graph::DepKind, <TyCtxt<$tcx> as QueryContext>::Query>>> {
let mut jobs = FxHashMap::default(); let mut jobs = FxHashMap::default();
$( $(

View file

@ -5,8 +5,7 @@ use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::profiling::SelfProfiler; use rustc_data_structures::profiling::SelfProfiler;
use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, CRATE_DEF_INDEX, LOCAL_CRATE}; use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, CRATE_DEF_INDEX, LOCAL_CRATE};
use rustc_hir::definitions::DefPathData; use rustc_hir::definitions::DefPathData;
use rustc_query_system::query::QueryCache; use rustc_query_system::query::{QueryCache, QueryContext, QueryState};
use rustc_query_system::query::QueryState;
use std::fmt::Debug; use std::fmt::Debug;
use std::io::Write; use std::io::Write;
@ -231,7 +230,7 @@ where
pub(super) fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>( pub(super) fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>(
tcx: TyCtxt<'tcx>, tcx: TyCtxt<'tcx>,
query_name: &'static str, query_name: &'static str,
query_state: &QueryState<TyCtxt<'tcx>, C>, query_state: &QueryState<crate::dep_graph::DepKind, <TyCtxt<'tcx> as QueryContext>::Query, C>,
string_cache: &mut QueryKeyStringCache, string_cache: &mut QueryKeyStringCache,
) where ) where
C: QueryCache, C: QueryCache,

View file

@ -1,11 +1,10 @@
use crate::ty::query::queries; use crate::ty::query::queries;
use crate::ty::TyCtxt; use crate::ty::TyCtxt;
use rustc_hir::def_id::{DefId, LOCAL_CRATE}; use rustc_hir::def_id::{DefId, LOCAL_CRATE};
use rustc_query_system::query::QueryCache; use rustc_query_system::query::{QueryAccessors, QueryCache, QueryContext, QueryState};
use rustc_query_system::query::QueryState;
use rustc_query_system::query::{QueryAccessors, QueryContext};
use std::any::type_name; use std::any::type_name;
use std::hash::Hash;
use std::mem; use std::mem;
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
use std::sync::atomic::Ordering; use std::sync::atomic::Ordering;
@ -38,10 +37,12 @@ struct QueryStats {
local_def_id_keys: Option<usize>, local_def_id_keys: Option<usize>,
} }
fn stats<CTX: QueryContext, C: QueryCache>( fn stats<D, Q, C>(name: &'static str, map: &QueryState<D, Q, C>) -> QueryStats
name: &'static str, where
map: &QueryState<CTX, C>, D: Copy + Clone + Eq + Hash,
) -> QueryStats { Q: Clone,
C: QueryCache,
{
let mut stats = QueryStats { let mut stats = QueryStats {
name, name,
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
@ -127,7 +128,8 @@ macro_rules! print_stats {
$($( $($(
queries.push(stats::< queries.push(stats::<
TyCtxt<'_>, crate::dep_graph::DepKind,
<TyCtxt<'_> as QueryContext>::Query,
<queries::$name<'_> as QueryAccessors<TyCtxt<'_>>>::Cache, <queries::$name<'_> as QueryAccessors<TyCtxt<'_>>>::Cache,
>( >(
stringify!($name), stringify!($name),

View file

@ -1,12 +1,12 @@
use crate::dep_graph::DepNodeIndex; use crate::dep_graph::DepNodeIndex;
use crate::query::plumbing::{QueryLookup, QueryState}; use crate::query::plumbing::{QueryLookup, QueryState};
use crate::query::QueryContext;
use rustc_arena::TypedArena; use rustc_arena::TypedArena;
use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sharded::Sharded; use rustc_data_structures::sharded::Sharded;
use rustc_data_structures::sync::WorkerLocal; use rustc_data_structures::sync::WorkerLocal;
use std::default::Default; use std::default::Default;
use std::fmt::Debug;
use std::hash::Hash; use std::hash::Hash;
use std::marker::PhantomData; use std::marker::PhantomData;
@ -24,16 +24,16 @@ pub trait QueryStorage: Default {
} }
pub trait QueryCache: QueryStorage { pub trait QueryCache: QueryStorage {
type Key: Hash; type Key: Hash + Eq + Clone + Debug;
type Sharded: Default; type Sharded: Default;
/// Checks if the query is already computed and in the cache. /// Checks if the query is already computed and in the cache.
/// It returns the shard index and a lock guard to the shard, /// It returns the shard index and a lock guard to the shard,
/// which will be used if the query is not in the cache and we need /// which will be used if the query is not in the cache and we need
/// to compute it. /// to compute it.
fn lookup<CTX: QueryContext, R, OnHit, OnMiss>( fn lookup<D, Q, R, OnHit, OnMiss>(
&self, &self,
state: &QueryState<CTX, Self>, state: &QueryState<D, Q, Self>,
key: Self::Key, key: Self::Key,
// `on_hit` can be called while holding a lock to the query state shard. // `on_hit` can be called while holding a lock to the query state shard.
on_hit: OnHit, on_hit: OnHit,
@ -41,7 +41,7 @@ pub trait QueryCache: QueryStorage {
) -> R ) -> R
where where
OnHit: FnOnce(&Self::Stored, DepNodeIndex) -> R, OnHit: FnOnce(&Self::Stored, DepNodeIndex) -> R,
OnMiss: FnOnce(Self::Key, QueryLookup<'_, CTX, Self::Key, Self::Sharded>) -> R; OnMiss: FnOnce(Self::Key, QueryLookup<'_, D, Q, Self::Key, Self::Sharded>) -> R;
fn complete( fn complete(
&self, &self,
@ -86,21 +86,25 @@ impl<K: Eq + Hash, V: Clone> QueryStorage for DefaultCache<K, V> {
} }
} }
impl<K: Eq + Hash, V: Clone> QueryCache for DefaultCache<K, V> { impl<K, V> QueryCache for DefaultCache<K, V>
where
K: Eq + Hash + Clone + Debug,
V: Clone,
{
type Key = K; type Key = K;
type Sharded = FxHashMap<K, (V, DepNodeIndex)>; type Sharded = FxHashMap<K, (V, DepNodeIndex)>;
#[inline(always)] #[inline(always)]
fn lookup<CTX: QueryContext, R, OnHit, OnMiss>( fn lookup<D, Q, R, OnHit, OnMiss>(
&self, &self,
state: &QueryState<CTX, Self>, state: &QueryState<D, Q, Self>,
key: K, key: K,
on_hit: OnHit, on_hit: OnHit,
on_miss: OnMiss, on_miss: OnMiss,
) -> R ) -> R
where where
OnHit: FnOnce(&V, DepNodeIndex) -> R, OnHit: FnOnce(&V, DepNodeIndex) -> R,
OnMiss: FnOnce(K, QueryLookup<'_, CTX, K, Self::Sharded>) -> R, OnMiss: FnOnce(K, QueryLookup<'_, D, Q, K, Self::Sharded>) -> R,
{ {
let mut lookup = state.get_lookup(&key); let mut lookup = state.get_lookup(&key);
let lock = &mut *lookup.lock; let lock = &mut *lookup.lock;
@ -164,21 +168,24 @@ impl<'tcx, K: Eq + Hash, V: 'tcx> QueryStorage for ArenaCache<'tcx, K, V> {
} }
} }
impl<'tcx, K: Eq + Hash, V: 'tcx> QueryCache for ArenaCache<'tcx, K, V> { impl<'tcx, K, V: 'tcx> QueryCache for ArenaCache<'tcx, K, V>
where
K: Eq + Hash + Clone + Debug,
{
type Key = K; type Key = K;
type Sharded = FxHashMap<K, &'tcx (V, DepNodeIndex)>; type Sharded = FxHashMap<K, &'tcx (V, DepNodeIndex)>;
#[inline(always)] #[inline(always)]
fn lookup<CTX: QueryContext, R, OnHit, OnMiss>( fn lookup<D, Q, R, OnHit, OnMiss>(
&self, &self,
state: &QueryState<CTX, Self>, state: &QueryState<D, Q, Self>,
key: K, key: K,
on_hit: OnHit, on_hit: OnHit,
on_miss: OnMiss, on_miss: OnMiss,
) -> R ) -> R
where where
OnHit: FnOnce(&&'tcx V, DepNodeIndex) -> R, OnHit: FnOnce(&&'tcx V, DepNodeIndex) -> R,
OnMiss: FnOnce(K, QueryLookup<'_, CTX, K, Self::Sharded>) -> R, OnMiss: FnOnce(K, QueryLookup<'_, D, Q, K, Self::Sharded>) -> R,
{ {
let mut lookup = state.get_lookup(&key); let mut lookup = state.get_lookup(&key);
let lock = &mut *lookup.lock; let lock = &mut *lookup.lock;

View file

@ -12,9 +12,7 @@ use std::borrow::Cow;
use std::fmt::Debug; use std::fmt::Debug;
use std::hash::Hash; use std::hash::Hash;
// The parameter `CTX` is required in librustc_middle: pub trait QueryConfig {
// implementations may need to access the `'tcx` lifetime in `CTX = TyCtxt<'tcx>`.
pub trait QueryConfig<CTX> {
const NAME: &'static str; const NAME: &'static str;
const CATEGORY: ProfileCategory; const CATEGORY: ProfileCategory;
@ -70,7 +68,7 @@ impl<CTX: QueryContext, K, V> QueryVtable<CTX, K, V> {
} }
} }
pub trait QueryAccessors<CTX: QueryContext>: QueryConfig<CTX> { pub trait QueryAccessors<CTX: QueryContext>: QueryConfig {
const ANON: bool; const ANON: bool;
const EVAL_ALWAYS: bool; const EVAL_ALWAYS: bool;
const DEP_KIND: CTX::DepKind; const DEP_KIND: CTX::DepKind;
@ -78,7 +76,7 @@ pub trait QueryAccessors<CTX: QueryContext>: QueryConfig<CTX> {
type Cache: QueryCache<Key = Self::Key, Stored = Self::Stored, Value = Self::Value>; type Cache: QueryCache<Key = Self::Key, Stored = Self::Stored, Value = Self::Value>;
// Don't use this method to access query results, instead use the methods on TyCtxt // Don't use this method to access query results, instead use the methods on TyCtxt
fn query_state<'a>(tcx: CTX) -> &'a QueryState<CTX, Self::Cache>; fn query_state<'a>(tcx: CTX) -> &'a QueryState<CTX::DepKind, CTX::Query, Self::Cache>;
fn to_dep_node(tcx: CTX, key: &Self::Key) -> DepNode<CTX::DepKind> fn to_dep_node(tcx: CTX, key: &Self::Key) -> DepNode<CTX::DepKind>
where where

View file

@ -1,16 +1,16 @@
use crate::dep_graph::{DepContext, DepKind};
use crate::query::plumbing::CycleError; use crate::query::plumbing::CycleError;
use crate::query::QueryContext;
use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::fx::FxHashMap;
use rustc_span::Span; use rustc_span::Span;
use std::convert::TryFrom; use std::convert::TryFrom;
use std::hash::Hash;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::num::NonZeroU32; use std::num::NonZeroU32;
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
use { use {
super::QueryContext,
parking_lot::{Condvar, Mutex}, parking_lot::{Condvar, Mutex},
rustc_data_structures::fx::FxHashSet, rustc_data_structures::fx::FxHashSet,
rustc_data_structures::stable_hasher::{HashStable, StableHasher}, rustc_data_structures::stable_hasher::{HashStable, StableHasher},
@ -31,7 +31,7 @@ pub struct QueryInfo<Q> {
pub query: Q, pub query: Q,
} }
type QueryMap<CTX> = FxHashMap<QueryJobId<<CTX as DepContext>::DepKind>, QueryJobInfo<CTX>>; pub(crate) type QueryMap<D, Q> = FxHashMap<QueryJobId<D>, QueryJobInfo<D, Q>>;
/// A value uniquely identifiying an active query job within a shard in the query cache. /// A value uniquely identifiying an active query job within a shard in the query cache.
#[derive(Copy, Clone, Eq, PartialEq, Hash)] #[derive(Copy, Clone, Eq, PartialEq, Hash)]
@ -39,71 +39,75 @@ pub struct QueryShardJobId(pub NonZeroU32);
/// A value uniquely identifiying an active query job. /// A value uniquely identifiying an active query job.
#[derive(Copy, Clone, Eq, PartialEq, Hash)] #[derive(Copy, Clone, Eq, PartialEq, Hash)]
pub struct QueryJobId<K> { pub struct QueryJobId<D> {
/// Which job within a shard is this /// Which job within a shard is this
pub job: QueryShardJobId, pub job: QueryShardJobId,
/// In which shard is this job /// In which shard is this job
pub shard: u16, pub shard: u16,
/// What kind of query this job is /// What kind of query this job is.
pub kind: K, pub kind: D,
} }
impl<K: DepKind> QueryJobId<K> { impl<D> QueryJobId<D>
pub fn new(job: QueryShardJobId, shard: usize, kind: K) -> Self { where
D: Copy + Clone + Eq + Hash,
{
pub fn new(job: QueryShardJobId, shard: usize, kind: D) -> Self {
QueryJobId { job, shard: u16::try_from(shard).unwrap(), kind } QueryJobId { job, shard: u16::try_from(shard).unwrap(), kind }
} }
fn query<CTX: QueryContext<DepKind = K>>(self, map: &QueryMap<CTX>) -> CTX::Query { fn query<Q: Clone>(self, map: &QueryMap<D, Q>) -> Q {
map.get(&self).unwrap().info.query.clone() map.get(&self).unwrap().info.query.clone()
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
fn span<CTX: QueryContext<DepKind = K>>(self, map: &QueryMap<CTX>) -> Span { fn span<Q: Clone>(self, map: &QueryMap<D, Q>) -> Span {
map.get(&self).unwrap().job.span map.get(&self).unwrap().job.span
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
fn parent<CTX: QueryContext<DepKind = K>>(self, map: &QueryMap<CTX>) -> Option<QueryJobId<K>> { fn parent<Q: Clone>(self, map: &QueryMap<D, Q>) -> Option<QueryJobId<D>> {
map.get(&self).unwrap().job.parent map.get(&self).unwrap().job.parent
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
fn latch<'a, CTX: QueryContext<DepKind = K>>( fn latch<'a, Q: Clone>(self, map: &'a QueryMap<D, Q>) -> Option<&'a QueryLatch<D, Q>> {
self,
map: &'a QueryMap<CTX>,
) -> Option<&'a QueryLatch<CTX>> {
map.get(&self).unwrap().job.latch.as_ref() map.get(&self).unwrap().job.latch.as_ref()
} }
} }
pub struct QueryJobInfo<CTX: QueryContext> { pub struct QueryJobInfo<D, Q> {
pub info: QueryInfo<CTX::Query>, pub info: QueryInfo<Q>,
pub job: QueryJob<CTX>, pub job: QueryJob<D, Q>,
} }
/// Represents an active query job. /// Represents an active query job.
#[derive(Clone)] #[derive(Clone)]
pub struct QueryJob<CTX: QueryContext> { pub struct QueryJob<D, Q> {
pub id: QueryShardJobId, pub id: QueryShardJobId,
/// The span corresponding to the reason for which this query was required. /// The span corresponding to the reason for which this query was required.
pub span: Span, pub span: Span,
/// The parent query job which created this job and is implicitly waiting on it. /// The parent query job which created this job and is implicitly waiting on it.
pub parent: Option<QueryJobId<CTX::DepKind>>, pub parent: Option<QueryJobId<D>>,
/// The latch that is used to wait on this job. /// The latch that is used to wait on this job.
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
latch: Option<QueryLatch<CTX>>, latch: Option<QueryLatch<D, Q>>,
dummy: PhantomData<QueryLatch<CTX>>, dummy: PhantomData<QueryLatch<D, Q>>,
} }
impl<CTX: QueryContext> QueryJob<CTX> { impl<D, Q> QueryJob<D, Q>
where
D: Copy + Clone + Eq + Hash,
Q: Clone,
{
/// Creates a new query job. /// Creates a new query job.
pub fn new(id: QueryShardJobId, span: Span, parent: Option<QueryJobId<CTX::DepKind>>) -> Self { pub fn new(id: QueryShardJobId, span: Span, parent: Option<QueryJobId<D>>) -> Self {
QueryJob { QueryJob {
id, id,
span, span,
@ -115,7 +119,7 @@ impl<CTX: QueryContext> QueryJob<CTX> {
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
pub(super) fn latch(&mut self, _id: QueryJobId<CTX::DepKind>) -> QueryLatch<CTX> { pub(super) fn latch(&mut self, _id: QueryJobId<D>) -> QueryLatch<D, Q> {
if self.latch.is_none() { if self.latch.is_none() {
self.latch = Some(QueryLatch::new()); self.latch = Some(QueryLatch::new());
} }
@ -123,7 +127,7 @@ impl<CTX: QueryContext> QueryJob<CTX> {
} }
#[cfg(not(parallel_compiler))] #[cfg(not(parallel_compiler))]
pub(super) fn latch(&mut self, id: QueryJobId<CTX::DepKind>) -> QueryLatch<CTX> { pub(super) fn latch(&mut self, id: QueryJobId<D>) -> QueryLatch<D, Q> {
QueryLatch { id, dummy: PhantomData } QueryLatch { id, dummy: PhantomData }
} }
@ -143,19 +147,26 @@ impl<CTX: QueryContext> QueryJob<CTX> {
#[cfg(not(parallel_compiler))] #[cfg(not(parallel_compiler))]
#[derive(Clone)] #[derive(Clone)]
pub(super) struct QueryLatch<CTX: QueryContext> { pub(super) struct QueryLatch<D, Q> {
id: QueryJobId<CTX::DepKind>, id: QueryJobId<D>,
dummy: PhantomData<CTX>, dummy: PhantomData<Q>,
} }
#[cfg(not(parallel_compiler))] #[cfg(not(parallel_compiler))]
impl<CTX: QueryContext> QueryLatch<CTX> { impl<D, Q> QueryLatch<D, Q>
pub(super) fn find_cycle_in_stack(&self, tcx: CTX, span: Span) -> CycleError<CTX::Query> { where
let query_map = tcx.try_collect_active_jobs().unwrap(); D: Copy + Clone + Eq + Hash,
Q: Clone,
// Get the current executing query (waiter) and find the waitee amongst its parents {
let mut current_job = tcx.current_query_job(); pub(super) fn find_cycle_in_stack(
&self,
query_map: QueryMap<D, Q>,
current_job: &Option<QueryJobId<D>>,
span: Span,
) -> CycleError<Q> {
// Find the waitee amongst `current_job` parents
let mut cycle = Vec::new(); let mut cycle = Vec::new();
let mut current_job = Option::clone(current_job);
while let Some(job) = current_job { while let Some(job) = current_job {
let info = query_map.get(&job).unwrap(); let info = query_map.get(&job).unwrap();
@ -186,15 +197,15 @@ impl<CTX: QueryContext> QueryLatch<CTX> {
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
struct QueryWaiter<CTX: QueryContext> { struct QueryWaiter<D, Q> {
query: Option<QueryJobId<CTX::DepKind>>, query: Option<QueryJobId<D>>,
condvar: Condvar, condvar: Condvar,
span: Span, span: Span,
cycle: Lock<Option<CycleError<CTX::Query>>>, cycle: Lock<Option<CycleError<Q>>>,
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
impl<CTX: QueryContext> QueryWaiter<CTX> { impl<D, Q> QueryWaiter<D, Q> {
fn notify(&self, registry: &rayon_core::Registry) { fn notify(&self, registry: &rayon_core::Registry) {
rayon_core::mark_unblocked(registry); rayon_core::mark_unblocked(registry);
self.condvar.notify_one(); self.condvar.notify_one();
@ -202,19 +213,19 @@ impl<CTX: QueryContext> QueryWaiter<CTX> {
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
struct QueryLatchInfo<CTX: QueryContext> { struct QueryLatchInfo<D, Q> {
complete: bool, complete: bool,
waiters: Vec<Lrc<QueryWaiter<CTX>>>, waiters: Vec<Lrc<QueryWaiter<D, Q>>>,
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
#[derive(Clone)] #[derive(Clone)]
pub(super) struct QueryLatch<CTX: QueryContext> { pub(super) struct QueryLatch<D, Q> {
info: Lrc<Mutex<QueryLatchInfo<CTX>>>, info: Lrc<Mutex<QueryLatchInfo<D, Q>>>,
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
impl<CTX: QueryContext> QueryLatch<CTX> { impl<D: Eq + Hash, Q: Clone> QueryLatch<D, Q> {
fn new() -> Self { fn new() -> Self {
QueryLatch { QueryLatch {
info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })), info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
@ -223,10 +234,13 @@ impl<CTX: QueryContext> QueryLatch<CTX> {
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
impl<CTX: QueryContext> QueryLatch<CTX> { impl<D, Q> QueryLatch<D, Q> {
/// Awaits for the query job to complete. /// Awaits for the query job to complete.
pub(super) fn wait_on(&self, tcx: CTX, span: Span) -> Result<(), CycleError<CTX::Query>> { pub(super) fn wait_on(
let query = tcx.current_query_job(); &self,
query: Option<QueryJobId<D>>,
span: Span,
) -> Result<(), CycleError<Q>> {
let waiter = let waiter =
Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() }); Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() });
self.wait_on_inner(&waiter); self.wait_on_inner(&waiter);
@ -239,12 +253,9 @@ impl<CTX: QueryContext> QueryLatch<CTX> {
Some(cycle) => Err(cycle), Some(cycle) => Err(cycle),
} }
} }
}
#[cfg(parallel_compiler)]
impl<CTX: QueryContext> QueryLatch<CTX> {
/// Awaits the caller on this latch by blocking the current thread. /// Awaits the caller on this latch by blocking the current thread.
fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter<CTX>>) { fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter<D, Q>>) {
let mut info = self.info.lock(); let mut info = self.info.lock();
if !info.complete { if !info.complete {
// We push the waiter on to the `waiters` list. It can be accessed inside // We push the waiter on to the `waiters` list. It can be accessed inside
@ -278,7 +289,7 @@ impl<CTX: QueryContext> QueryLatch<CTX> {
/// Removes a single waiter from the list of waiters. /// Removes a single waiter from the list of waiters.
/// This is used to break query cycles. /// This is used to break query cycles.
fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<CTX>> { fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<D, Q>> {
let mut info = self.info.lock(); let mut info = self.info.lock();
debug_assert!(!info.complete); debug_assert!(!info.complete);
// Remove the waiter from the list of waiters // Remove the waiter from the list of waiters
@ -288,7 +299,7 @@ impl<CTX: QueryContext> QueryLatch<CTX> {
/// A resumable waiter of a query. The usize is the index into waiters in the query's latch /// A resumable waiter of a query. The usize is the index into waiters in the query's latch
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
type Waiter<K> = (QueryJobId<K>, usize); type Waiter<D> = (QueryJobId<D>, usize);
/// Visits all the non-resumable and resumable waiters of a query. /// Visits all the non-resumable and resumable waiters of a query.
/// Only waiters in a query are visited. /// Only waiters in a query are visited.
@ -300,13 +311,15 @@ type Waiter<K> = (QueryJobId<K>, usize);
/// required information to resume the waiter. /// required information to resume the waiter.
/// If all `visit` calls returns None, this function also returns None. /// If all `visit` calls returns None, this function also returns None.
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
fn visit_waiters<CTX: QueryContext, F>( fn visit_waiters<D, Q, F>(
query_map: &QueryMap<CTX>, query_map: &QueryMap<D, Q>,
query: QueryJobId<CTX::DepKind>, query: QueryJobId<D>,
mut visit: F, mut visit: F,
) -> Option<Option<Waiter<CTX::DepKind>>> ) -> Option<Option<Waiter<D>>>
where where
F: FnMut(Span, QueryJobId<CTX::DepKind>) -> Option<Option<Waiter<CTX::DepKind>>>, D: Copy + Clone + Eq + Hash,
Q: Clone,
F: FnMut(Span, QueryJobId<D>) -> Option<Option<Waiter<D>>>,
{ {
// Visit the parent query which is a non-resumable waiter since it's on the same stack // Visit the parent query which is a non-resumable waiter since it's on the same stack
if let Some(parent) = query.parent(query_map) { if let Some(parent) = query.parent(query_map) {
@ -335,13 +348,17 @@ where
/// If a cycle is detected, this initial value is replaced with the span causing /// If a cycle is detected, this initial value is replaced with the span causing
/// the cycle. /// the cycle.
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
fn cycle_check<CTX: QueryContext>( fn cycle_check<D, Q>(
query_map: &QueryMap<CTX>, query_map: &QueryMap<D, Q>,
query: QueryJobId<CTX::DepKind>, query: QueryJobId<D>,
span: Span, span: Span,
stack: &mut Vec<(Span, QueryJobId<CTX::DepKind>)>, stack: &mut Vec<(Span, QueryJobId<D>)>,
visited: &mut FxHashSet<QueryJobId<CTX::DepKind>>, visited: &mut FxHashSet<QueryJobId<D>>,
) -> Option<Option<Waiter<CTX::DepKind>>> { ) -> Option<Option<Waiter<D>>>
where
D: Copy + Clone + Eq + Hash,
Q: Clone,
{
if !visited.insert(query) { if !visited.insert(query) {
return if let Some(p) = stack.iter().position(|q| q.1 == query) { return if let Some(p) = stack.iter().position(|q| q.1 == query) {
// We detected a query cycle, fix up the initial span and return Some // We detected a query cycle, fix up the initial span and return Some
@ -376,11 +393,15 @@ fn cycle_check<CTX: QueryContext>(
/// from `query` without going through any of the queries in `visited`. /// from `query` without going through any of the queries in `visited`.
/// This is achieved with a depth first search. /// This is achieved with a depth first search.
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
fn connected_to_root<CTX: QueryContext>( fn connected_to_root<D, Q>(
query_map: &QueryMap<CTX>, query_map: &QueryMap<D, Q>,
query: QueryJobId<CTX::DepKind>, query: QueryJobId<D>,
visited: &mut FxHashSet<QueryJobId<CTX::DepKind>>, visited: &mut FxHashSet<QueryJobId<D>>,
) -> bool { ) -> bool
where
D: Copy + Clone + Eq + Hash,
Q: Clone,
{
// We already visited this or we're deliberately ignoring it // We already visited this or we're deliberately ignoring it
if !visited.insert(query) { if !visited.insert(query) {
return false; return false;
@ -399,7 +420,12 @@ fn connected_to_root<CTX: QueryContext>(
// Deterministically pick an query from a list // Deterministically pick an query from a list
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
fn pick_query<'a, CTX, T, F>(query_map: &QueryMap<CTX>, tcx: CTX, queries: &'a [T], f: F) -> &'a T fn pick_query<'a, CTX, T, F>(
query_map: &QueryMap<CTX::DepKind, CTX::Query>,
tcx: CTX,
queries: &'a [T],
f: F,
) -> &'a T
where where
CTX: QueryContext, CTX: QueryContext,
F: Fn(&T) -> (Span, QueryJobId<CTX::DepKind>), F: Fn(&T) -> (Span, QueryJobId<CTX::DepKind>),
@ -429,9 +455,9 @@ where
/// the function returns false. /// the function returns false.
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
fn remove_cycle<CTX: QueryContext>( fn remove_cycle<CTX: QueryContext>(
query_map: &QueryMap<CTX>, query_map: &QueryMap<CTX::DepKind, CTX::Query>,
jobs: &mut Vec<QueryJobId<CTX::DepKind>>, jobs: &mut Vec<QueryJobId<CTX::DepKind>>,
wakelist: &mut Vec<Lrc<QueryWaiter<CTX>>>, wakelist: &mut Vec<Lrc<QueryWaiter<CTX::DepKind, CTX::Query>>>,
tcx: CTX, tcx: CTX,
) -> bool { ) -> bool {
let mut visited = FxHashSet::default(); let mut visited = FxHashSet::default();

View file

@ -15,8 +15,8 @@ mod config;
pub use self::config::{QueryAccessors, QueryConfig, QueryDescription}; pub use self::config::{QueryAccessors, QueryConfig, QueryDescription};
use crate::dep_graph::{DepContext, DepGraph}; use crate::dep_graph::{DepContext, DepGraph};
use crate::query::job::QueryMap;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::stable_hasher::HashStable; use rustc_data_structures::stable_hasher::HashStable;
use rustc_data_structures::sync::Lock; use rustc_data_structures::sync::Lock;
use rustc_data_structures::thin_vec::ThinVec; use rustc_data_structures::thin_vec::ThinVec;
@ -38,9 +38,7 @@ pub trait QueryContext: DepContext {
/// Get the query information from the TLS context. /// Get the query information from the TLS context.
fn current_query_job(&self) -> Option<QueryJobId<Self::DepKind>>; fn current_query_job(&self) -> Option<QueryJobId<Self::DepKind>>;
fn try_collect_active_jobs( fn try_collect_active_jobs(&self) -> Option<QueryMap<Self::DepKind, Self::Query>>;
&self,
) -> Option<FxHashMap<QueryJobId<Self::DepKind>, QueryJobInfo<Self>>>;
/// Executes a job by changing the `ImplicitCtxt` to point to the /// Executes a job by changing the `ImplicitCtxt` to point to the
/// new query job while it executes. It returns the diagnostics /// new query job while it executes. It returns the diagnostics

View file

@ -7,7 +7,7 @@ use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex};
use crate::query::caches::QueryCache; use crate::query::caches::QueryCache;
use crate::query::config::{QueryDescription, QueryVtable, QueryVtableExt}; use crate::query::config::{QueryDescription, QueryVtable, QueryVtableExt};
use crate::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId}; use crate::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId};
use crate::query::QueryContext; use crate::query::{QueryContext, QueryMap};
#[cfg(not(parallel_compiler))] #[cfg(not(parallel_compiler))]
use rustc_data_structures::cold_path; use rustc_data_structures::cold_path;
@ -20,8 +20,6 @@ use rustc_errors::{Diagnostic, FatalError};
use rustc_span::source_map::DUMMY_SP; use rustc_span::source_map::DUMMY_SP;
use rustc_span::Span; use rustc_span::Span;
use std::collections::hash_map::Entry; use std::collections::hash_map::Entry;
use std::convert::TryFrom;
use std::fmt::Debug;
use std::hash::{Hash, Hasher}; use std::hash::{Hash, Hasher};
use std::mem; use std::mem;
use std::num::NonZeroU32; use std::num::NonZeroU32;
@ -29,33 +27,33 @@ use std::ptr;
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
pub(super) struct QueryStateShard<CTX: QueryContext, K, C> { pub(super) struct QueryStateShard<D, Q, K, C> {
pub(super) cache: C, pub(super) cache: C,
active: FxHashMap<K, QueryResult<CTX>>, active: FxHashMap<K, QueryResult<D, Q>>,
/// Used to generate unique ids for active jobs. /// Used to generate unique ids for active jobs.
jobs: u32, jobs: u32,
} }
impl<CTX: QueryContext, K, C: Default> Default for QueryStateShard<CTX, K, C> { impl<D, Q, K, C: Default> Default for QueryStateShard<D, Q, K, C> {
fn default() -> QueryStateShard<CTX, K, C> { fn default() -> QueryStateShard<D, Q, K, C> {
QueryStateShard { cache: Default::default(), active: Default::default(), jobs: 0 } QueryStateShard { cache: Default::default(), active: Default::default(), jobs: 0 }
} }
} }
pub struct QueryState<CTX: QueryContext, C: QueryCache> { pub struct QueryState<D, Q, C: QueryCache> {
cache: C, cache: C,
shards: Sharded<QueryStateShard<CTX, C::Key, C::Sharded>>, shards: Sharded<QueryStateShard<D, Q, C::Key, C::Sharded>>,
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
pub cache_hits: AtomicUsize, pub cache_hits: AtomicUsize,
} }
impl<CTX: QueryContext, C: QueryCache> QueryState<CTX, C> { impl<D, Q, C: QueryCache> QueryState<D, Q, C> {
#[inline] #[inline]
pub(super) fn get_lookup<'tcx>( pub(super) fn get_lookup<'tcx>(
&'tcx self, &'tcx self,
key: &C::Key, key: &C::Key,
) -> QueryLookup<'tcx, CTX, C::Key, C::Sharded> { ) -> QueryLookup<'tcx, D, Q, C::Key, C::Sharded> {
// We compute the key's hash once and then use it for both the // We compute the key's hash once and then use it for both the
// shard lookup and the hashmap lookup. This relies on the fact // shard lookup and the hashmap lookup. This relies on the fact
// that both of them use `FxHasher`. // that both of them use `FxHasher`.
@ -70,16 +68,21 @@ impl<CTX: QueryContext, C: QueryCache> QueryState<CTX, C> {
} }
/// Indicates the state of a query for a given key in a query map. /// Indicates the state of a query for a given key in a query map.
enum QueryResult<CTX: QueryContext> { enum QueryResult<D, Q> {
/// An already executing query. The query job can be used to await for its completion. /// An already executing query. The query job can be used to await for its completion.
Started(QueryJob<CTX>), Started(QueryJob<D, Q>),
/// The query panicked. Queries trying to wait on this will raise a fatal error which will /// The query panicked. Queries trying to wait on this will raise a fatal error which will
/// silently panic. /// silently panic.
Poisoned, Poisoned,
} }
impl<CTX: QueryContext, C: QueryCache> QueryState<CTX, C> { impl<D, Q, C> QueryState<D, Q, C>
where
D: Copy + Clone + Eq + Hash,
Q: Clone,
C: QueryCache,
{
#[inline(always)] #[inline(always)]
pub fn iter_results<R>( pub fn iter_results<R>(
&self, &self,
@ -98,13 +101,10 @@ impl<CTX: QueryContext, C: QueryCache> QueryState<CTX, C> {
pub fn try_collect_active_jobs( pub fn try_collect_active_jobs(
&self, &self,
kind: CTX::DepKind, kind: D,
make_query: fn(C::Key) -> CTX::Query, make_query: fn(C::Key) -> Q,
jobs: &mut FxHashMap<QueryJobId<CTX::DepKind>, QueryJobInfo<CTX>>, jobs: &mut QueryMap<D, Q>,
) -> Option<()> ) -> Option<()> {
where
C::Key: Clone,
{
// We use try_lock_shards here since we are called from the // We use try_lock_shards here since we are called from the
// deadlock handler, and this shouldn't be locked. // deadlock handler, and this shouldn't be locked.
let shards = self.shards.try_lock_shards()?; let shards = self.shards.try_lock_shards()?;
@ -112,8 +112,7 @@ impl<CTX: QueryContext, C: QueryCache> QueryState<CTX, C> {
jobs.extend(shards.flat_map(|(shard_id, shard)| { jobs.extend(shards.flat_map(|(shard_id, shard)| {
shard.active.iter().filter_map(move |(k, v)| { shard.active.iter().filter_map(move |(k, v)| {
if let QueryResult::Started(ref job) = *v { if let QueryResult::Started(ref job) = *v {
let id = let id = QueryJobId::new(job.id, shard_id, kind);
QueryJobId { job: job.id, shard: u16::try_from(shard_id).unwrap(), kind };
let info = QueryInfo { span: job.span, query: make_query(k.clone()) }; let info = QueryInfo { span: job.span, query: make_query(k.clone()) };
Some((id, QueryJobInfo { info, job: job.clone() })) Some((id, QueryJobInfo { info, job: job.clone() }))
} else { } else {
@ -126,8 +125,8 @@ impl<CTX: QueryContext, C: QueryCache> QueryState<CTX, C> {
} }
} }
impl<CTX: QueryContext, C: QueryCache> Default for QueryState<CTX, C> { impl<D, Q, C: QueryCache> Default for QueryState<D, Q, C> {
fn default() -> QueryState<CTX, C> { fn default() -> QueryState<D, Q, C> {
QueryState { QueryState {
cache: C::default(), cache: C::default(),
shards: Default::default(), shards: Default::default(),
@ -138,28 +137,30 @@ impl<CTX: QueryContext, C: QueryCache> Default for QueryState<CTX, C> {
} }
/// Values used when checking a query cache which can be reused on a cache-miss to execute the query. /// Values used when checking a query cache which can be reused on a cache-miss to execute the query.
pub struct QueryLookup<'tcx, CTX: QueryContext, K, C> { pub struct QueryLookup<'tcx, D, Q, K, C> {
pub(super) key_hash: u64, pub(super) key_hash: u64,
shard: usize, shard: usize,
pub(super) lock: LockGuard<'tcx, QueryStateShard<CTX, K, C>>, pub(super) lock: LockGuard<'tcx, QueryStateShard<D, Q, K, C>>,
} }
/// A type representing the responsibility to execute the job in the `job` field. /// A type representing the responsibility to execute the job in the `job` field.
/// This will poison the relevant query if dropped. /// This will poison the relevant query if dropped.
struct JobOwner<'tcx, CTX: QueryContext, C> struct JobOwner<'tcx, D, Q, C>
where where
D: Copy + Clone + Eq + Hash,
Q: Clone,
C: QueryCache, C: QueryCache,
C::Key: Eq + Hash + Clone + Debug,
{ {
state: &'tcx QueryState<CTX, C>, state: &'tcx QueryState<D, Q, C>,
key: C::Key, key: C::Key,
id: QueryJobId<CTX::DepKind>, id: QueryJobId<D>,
} }
impl<'tcx, CTX: QueryContext, C> JobOwner<'tcx, CTX, C> impl<'tcx, D, Q, C> JobOwner<'tcx, D, Q, C>
where where
D: Copy + Clone + Eq + Hash,
Q: Clone,
C: QueryCache, C: QueryCache,
C::Key: Eq + Hash + Clone + Debug,
{ {
/// Either gets a `JobOwner` corresponding the query, allowing us to /// Either gets a `JobOwner` corresponding the query, allowing us to
/// start executing the query, or returns with the result of the query. /// start executing the query, or returns with the result of the query.
@ -170,14 +171,14 @@ where
/// This function is inlined because that results in a noticeable speed-up /// This function is inlined because that results in a noticeable speed-up
/// for some compile-time benchmarks. /// for some compile-time benchmarks.
#[inline(always)] #[inline(always)]
fn try_start<'a, 'b>( fn try_start<'a, 'b, CTX>(
tcx: CTX, tcx: CTX,
state: &'b QueryState<CTX, C>, state: &'b QueryState<CTX::DepKind, CTX::Query, C>,
span: Span, span: Span,
key: &C::Key, key: &C::Key,
mut lookup: QueryLookup<'a, CTX, C::Key, C::Sharded>, mut lookup: QueryLookup<'a, CTX::DepKind, CTX::Query, C::Key, C::Sharded>,
query: &QueryVtable<CTX, C::Key, C::Value>, query: &QueryVtable<CTX, C::Key, C::Value>,
) -> TryGetJob<'b, CTX, C> ) -> TryGetJob<'b, CTX::DepKind, CTX::Query, C>
where where
CTX: QueryContext, CTX: QueryContext,
{ {
@ -229,7 +230,12 @@ where
// so we just return the error. // so we just return the error.
#[cfg(not(parallel_compiler))] #[cfg(not(parallel_compiler))]
return TryGetJob::Cycle(cold_path(|| { return TryGetJob::Cycle(cold_path(|| {
let value = query.handle_cycle_error(tcx, latch.find_cycle_in_stack(tcx, span)); let error: CycleError<CTX::Query> = latch.find_cycle_in_stack(
tcx.try_collect_active_jobs().unwrap(),
&tcx.current_query_job(),
span,
);
let value = query.handle_cycle_error(tcx, error);
state.cache.store_nocache(value) state.cache.store_nocache(value)
})); }));
@ -237,7 +243,7 @@ where
// thread. // thread.
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
{ {
let result = latch.wait_on(tcx, span); let result = latch.wait_on(tcx.current_query_job(), span);
if let Err(cycle) = result { if let Err(cycle) = result {
let value = query.handle_cycle_error(tcx, cycle); let value = query.handle_cycle_error(tcx, cycle);
@ -297,9 +303,11 @@ where
(result, diagnostics.into_inner()) (result, diagnostics.into_inner())
} }
impl<'tcx, CTX: QueryContext, C: QueryCache> Drop for JobOwner<'tcx, CTX, C> impl<'tcx, D, Q, C> Drop for JobOwner<'tcx, D, Q, C>
where where
C::Key: Eq + Hash + Clone + Debug, D: Copy + Clone + Eq + Hash,
Q: Clone,
C: QueryCache,
{ {
#[inline(never)] #[inline(never)]
#[cold] #[cold]
@ -330,12 +338,14 @@ pub struct CycleError<Q> {
} }
/// The result of `try_start`. /// The result of `try_start`.
enum TryGetJob<'tcx, CTX: QueryContext, C: QueryCache> enum TryGetJob<'tcx, D, Q, C>
where where
C::Key: Eq + Hash + Clone + Debug, D: Copy + Clone + Eq + Hash,
Q: Clone,
C: QueryCache,
{ {
/// The query is not yet started. Contains a guard to the cache eventually used to start it. /// The query is not yet started. Contains a guard to the cache eventually used to start it.
NotYetStarted(JobOwner<'tcx, CTX, C>), NotYetStarted(JobOwner<'tcx, D, Q, C>),
/// The query was already completed. /// The query was already completed.
/// Returns the result of the query and its dep-node index /// Returns the result of the query and its dep-node index
@ -354,7 +364,7 @@ where
#[inline(always)] #[inline(always)]
fn try_get_cached<CTX, C, R, OnHit, OnMiss>( fn try_get_cached<CTX, C, R, OnHit, OnMiss>(
tcx: CTX, tcx: CTX,
state: &QueryState<CTX, C>, state: &QueryState<CTX::DepKind, CTX::Query, C>,
key: C::Key, key: C::Key,
// `on_hit` can be called while holding a lock to the query cache // `on_hit` can be called while holding a lock to the query cache
on_hit: OnHit, on_hit: OnHit,
@ -364,7 +374,7 @@ where
C: QueryCache, C: QueryCache,
CTX: QueryContext, CTX: QueryContext,
OnHit: FnOnce(&C::Stored, DepNodeIndex) -> R, OnHit: FnOnce(&C::Stored, DepNodeIndex) -> R,
OnMiss: FnOnce(C::Key, QueryLookup<'_, CTX, C::Key, C::Sharded>) -> R, OnMiss: FnOnce(C::Key, QueryLookup<'_, CTX::DepKind, CTX::Query, C::Key, C::Sharded>) -> R,
{ {
state.cache.lookup( state.cache.lookup(
state, state,
@ -386,19 +396,20 @@ where
#[inline(always)] #[inline(always)]
fn try_execute_query<CTX, C>( fn try_execute_query<CTX, C>(
tcx: CTX, tcx: CTX,
state: &QueryState<CTX, C>, state: &QueryState<CTX::DepKind, CTX::Query, C>,
span: Span, span: Span,
key: C::Key, key: C::Key,
lookup: QueryLookup<'_, CTX, C::Key, C::Sharded>, lookup: QueryLookup<'_, CTX::DepKind, CTX::Query, C::Key, C::Sharded>,
query: &QueryVtable<CTX, C::Key, C::Value>, query: &QueryVtable<CTX, C::Key, C::Value>,
) -> C::Stored ) -> C::Stored
where where
C: QueryCache, C: QueryCache,
C::Key: Eq + Clone + Debug + crate::dep_graph::DepNodeParams<CTX>, C::Key: crate::dep_graph::DepNodeParams<CTX>,
C::Stored: Clone,
CTX: QueryContext, CTX: QueryContext,
{ {
let job = match JobOwner::try_start(tcx, state, span, &key, lookup, query) { let job = match JobOwner::<'_, CTX::DepKind, CTX::Query, C>::try_start(
tcx, state, span, &key, lookup, query,
) {
TryGetJob::NotYetStarted(job) => job, TryGetJob::NotYetStarted(job) => job,
TryGetJob::Cycle(result) => return result, TryGetJob::Cycle(result) => return result,
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
@ -559,14 +570,12 @@ fn incremental_verify_ich<CTX, K, V>(
fn force_query_with_job<C, CTX>( fn force_query_with_job<C, CTX>(
tcx: CTX, tcx: CTX,
key: C::Key, key: C::Key,
job: JobOwner<'_, CTX, C>, job: JobOwner<'_, CTX::DepKind, CTX::Query, C>,
dep_node: DepNode<CTX::DepKind>, dep_node: DepNode<CTX::DepKind>,
query: &QueryVtable<CTX, C::Key, C::Value>, query: &QueryVtable<CTX, C::Key, C::Value>,
) -> (C::Stored, DepNodeIndex) ) -> (C::Stored, DepNodeIndex)
where where
C: QueryCache, C: QueryCache,
C::Key: Eq + Clone + Debug,
C::Stored: Clone,
CTX: QueryContext, CTX: QueryContext,
{ {
// If the following assertion triggers, it can have two reasons: // If the following assertion triggers, it can have two reasons:
@ -617,7 +626,7 @@ where
#[inline(never)] #[inline(never)]
fn get_query_impl<CTX, C>( fn get_query_impl<CTX, C>(
tcx: CTX, tcx: CTX,
state: &QueryState<CTX, C>, state: &QueryState<CTX::DepKind, CTX::Query, C>,
span: Span, span: Span,
key: C::Key, key: C::Key,
query: &QueryVtable<CTX, C::Key, C::Value>, query: &QueryVtable<CTX, C::Key, C::Value>,
@ -625,8 +634,7 @@ fn get_query_impl<CTX, C>(
where where
CTX: QueryContext, CTX: QueryContext,
C: QueryCache, C: QueryCache,
C::Key: Eq + Clone + crate::dep_graph::DepNodeParams<CTX>, C::Key: crate::dep_graph::DepNodeParams<CTX>,
C::Stored: Clone,
{ {
try_get_cached( try_get_cached(
tcx, tcx,
@ -650,12 +658,12 @@ where
#[inline(never)] #[inline(never)]
fn ensure_query_impl<CTX, C>( fn ensure_query_impl<CTX, C>(
tcx: CTX, tcx: CTX,
state: &QueryState<CTX, C>, state: &QueryState<CTX::DepKind, CTX::Query, C>,
key: C::Key, key: C::Key,
query: &QueryVtable<CTX, C::Key, C::Value>, query: &QueryVtable<CTX, C::Key, C::Value>,
) where ) where
C: QueryCache, C: QueryCache,
C::Key: Eq + Clone + crate::dep_graph::DepNodeParams<CTX>, C::Key: crate::dep_graph::DepNodeParams<CTX>,
CTX: QueryContext, CTX: QueryContext,
{ {
if query.eval_always { if query.eval_always {
@ -687,14 +695,14 @@ fn ensure_query_impl<CTX, C>(
#[inline(never)] #[inline(never)]
fn force_query_impl<CTX, C>( fn force_query_impl<CTX, C>(
tcx: CTX, tcx: CTX,
state: &QueryState<CTX, C>, state: &QueryState<CTX::DepKind, CTX::Query, C>,
key: C::Key, key: C::Key,
span: Span, span: Span,
dep_node: DepNode<CTX::DepKind>, dep_node: DepNode<CTX::DepKind>,
query: &QueryVtable<CTX, C::Key, C::Value>, query: &QueryVtable<CTX, C::Key, C::Value>,
) where ) where
C: QueryCache, C: QueryCache,
C::Key: Eq + Clone + crate::dep_graph::DepNodeParams<CTX>, C::Key: crate::dep_graph::DepNodeParams<CTX>,
CTX: QueryContext, CTX: QueryContext,
{ {
// We may be concurrently trying both execute and force a query. // We may be concurrently trying both execute and force a query.
@ -708,7 +716,9 @@ fn force_query_impl<CTX, C>(
// Cache hit, do nothing // Cache hit, do nothing
}, },
|key, lookup| { |key, lookup| {
let job = match JobOwner::try_start(tcx, state, span, &key, lookup, query) { let job = match JobOwner::<'_, CTX::DepKind, CTX::Query, C>::try_start(
tcx, state, span, &key, lookup, query,
) {
TryGetJob::NotYetStarted(job) => job, TryGetJob::NotYetStarted(job) => job,
TryGetJob::Cycle(_) => return, TryGetJob::Cycle(_) => return,
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]