1
Fork 0

Move Query to rustc_query_system.

Rename it to QueryStackFrame and document a bit.
This commit is contained in:
Camille GILLOT 2020-11-28 22:48:05 +01:00
parent 0144d6a3b7
commit 3897395787
6 changed files with 140 additions and 138 deletions

View file

@ -17,7 +17,6 @@ extern crate rustc_middle;
extern crate tracing; extern crate tracing;
use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_errors::{Diagnostic, Handler, Level}; use rustc_errors::{Diagnostic, Handler, Level};
use rustc_hir::def_id::CrateNum; use rustc_hir::def_id::CrateNum;

View file

@ -2,16 +2,15 @@
//! generate the actual methods on tcx which find and execute the provider, //! generate the actual methods on tcx which find and execute the provider,
//! manage the caches, and so forth. //! manage the caches, and so forth.
use super::{queries, Query}; use super::queries;
use rustc_middle::dep_graph::{DepKind, DepNode, DepNodeExt, DepNodeIndex, SerializedDepNodeIndex}; use rustc_middle::dep_graph::{DepKind, DepNode, DepNodeExt, DepNodeIndex, SerializedDepNodeIndex};
use rustc_middle::ty::query::on_disk_cache; use rustc_middle::ty::query::on_disk_cache;
use rustc_middle::ty::tls::{self, ImplicitCtxt}; use rustc_middle::ty::tls::{self, ImplicitCtxt};
use rustc_middle::ty::{self, TyCtxt}; use rustc_middle::ty::{self, TyCtxt};
use rustc_query_system::dep_graph::HasDepContext; use rustc_query_system::dep_graph::HasDepContext;
use rustc_query_system::query::{CycleError, QueryJobId, QueryJobInfo}; use rustc_query_system::query::{CycleError, QueryJobId};
use rustc_query_system::query::{QueryContext, QueryDescription}; use rustc_query_system::query::{QueryContext, QueryDescription, QueryMap, QueryStackFrame};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sync::Lock; use rustc_data_structures::sync::Lock;
use rustc_data_structures::thin_vec::ThinVec; use rustc_data_structures::thin_vec::ThinVec;
use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder}; use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder};
@ -45,8 +44,6 @@ impl HasDepContext for QueryCtxt<'tcx> {
} }
impl QueryContext for QueryCtxt<'tcx> { impl QueryContext for QueryCtxt<'tcx> {
type Query = Query;
fn def_path_str(&self, def_id: DefId) -> String { fn def_path_str(&self, def_id: DefId) -> String {
self.tcx.def_path_str(def_id) self.tcx.def_path_str(def_id)
} }
@ -55,10 +52,7 @@ impl QueryContext for QueryCtxt<'tcx> {
tls::with_related_context(**self, |icx| icx.query) tls::with_related_context(**self, |icx| icx.query)
} }
fn try_collect_active_jobs( fn try_collect_active_jobs(&self) -> Option<QueryMap<Self::DepKind>> {
&self,
) -> Option<FxHashMap<QueryJobId<Self::DepKind>, QueryJobInfo<Self::DepKind, Self::Query>>>
{
self.queries.try_collect_active_jobs(**self) self.queries.try_collect_active_jobs(**self)
} }
@ -185,11 +179,11 @@ impl<'tcx> QueryCtxt<'tcx> {
#[cold] #[cold]
pub(super) fn report_cycle( pub(super) fn report_cycle(
self, self,
CycleError { usage, cycle: stack }: CycleError<Query>, CycleError { usage, cycle: stack }: CycleError,
) -> DiagnosticBuilder<'tcx> { ) -> DiagnosticBuilder<'tcx> {
assert!(!stack.is_empty()); assert!(!stack.is_empty());
let fix_span = |span: Span, query: &Query| { let fix_span = |span: Span, query: &QueryStackFrame| {
self.sess.source_map().guess_head_span(query.default_span(span)) self.sess.source_map().guess_head_span(query.default_span(span))
}; };
@ -371,17 +365,12 @@ macro_rules! define_queries {
input: ($(([$($modifiers)*] [$($attr)*] [$name]))*) input: ($(([$($modifiers)*] [$($attr)*] [$name]))*)
} }
#[derive(Clone, Debug)] mod make_query {
pub struct Query { use super::*;
pub name: &'static str,
hash: Fingerprint,
description: String,
span: Option<Span>,
}
impl Query { // Create an eponymous constructor for each query.
$(#[allow(nonstandard_style)] $(#[$attr])* $(#[allow(nonstandard_style)] $(#[$attr])*
pub fn $name<$tcx>(tcx: QueryCtxt<$tcx>, key: query_keys::$name<$tcx>) -> Self { pub fn $name<$tcx>(tcx: QueryCtxt<$tcx>, key: query_keys::$name<$tcx>) -> QueryStackFrame {
let kind = dep_graph::DepKind::$name; let kind = dep_graph::DepKind::$name;
let name = stringify!($name); let name = stringify!($name);
let description = ty::print::with_forced_impl_filename_line( let description = ty::print::with_forced_impl_filename_line(
@ -408,22 +397,8 @@ macro_rules! define_queries {
hasher.finish() hasher.finish()
}; };
Self { name, description, span, hash } QueryStackFrame::new(name, description, span, hash)
})* })*
// FIXME(eddyb) Get more valid `Span`s on queries.
pub fn default_span(&self, span: Span) -> Span {
if !span.is_dummy() {
return span;
}
self.span.unwrap_or(span)
}
}
impl<'a> HashStable<StableHashingContext<'a>> for Query {
fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
self.hash.hash_stable(hcx, hasher)
}
} }
#[allow(nonstandard_style)] #[allow(nonstandard_style)]
@ -450,7 +425,7 @@ macro_rules! define_queries {
type Cache = query_storage::$name<$tcx>; type Cache = query_storage::$name<$tcx>;
#[inline(always)] #[inline(always)]
fn query_state<'a>(tcx: QueryCtxt<$tcx>) -> &'a QueryState<crate::dep_graph::DepKind, Query, Self::Key> fn query_state<'a>(tcx: QueryCtxt<$tcx>) -> &'a QueryState<crate::dep_graph::DepKind, Self::Key>
where QueryCtxt<$tcx>: 'a where QueryCtxt<$tcx>: 'a
{ {
&tcx.queries.$name &tcx.queries.$name
@ -484,7 +459,7 @@ macro_rules! define_queries {
fn handle_cycle_error( fn handle_cycle_error(
tcx: QueryCtxt<'tcx>, tcx: QueryCtxt<'tcx>,
error: CycleError<Query> error: CycleError,
) -> Self::Value { ) -> Self::Value {
handle_cycle_error!([$($modifiers)*][tcx, error]) handle_cycle_error!([$($modifiers)*][tcx, error])
} }
@ -587,7 +562,6 @@ macro_rules! define_queries_struct {
$($(#[$attr])* $name: QueryState< $($(#[$attr])* $name: QueryState<
crate::dep_graph::DepKind, crate::dep_graph::DepKind,
Query,
query_keys::$name<$tcx>, query_keys::$name<$tcx>,
>,)* >,)*
} }
@ -607,15 +581,15 @@ macro_rules! define_queries_struct {
pub(crate) fn try_collect_active_jobs( pub(crate) fn try_collect_active_jobs(
&$tcx self, &$tcx self,
tcx: TyCtxt<$tcx>, tcx: TyCtxt<$tcx>,
) -> Option<FxHashMap<QueryJobId<crate::dep_graph::DepKind>, QueryJobInfo<crate::dep_graph::DepKind, Query>>> { ) -> Option<QueryMap<crate::dep_graph::DepKind>> {
let tcx = QueryCtxt { tcx, queries: self }; let tcx = QueryCtxt { tcx, queries: self };
let mut jobs = FxHashMap::default(); let mut jobs = QueryMap::default();
$( $(
self.$name.try_collect_active_jobs( self.$name.try_collect_active_jobs(
tcx, tcx,
dep_graph::DepKind::$name, dep_graph::DepKind::$name,
Query::$name, make_query::$name,
&mut jobs, &mut jobs,
)?; )?;
)* )*

View file

@ -27,7 +27,7 @@ pub(crate) struct QueryVtable<CTX: QueryContext, K, V> {
pub compute: fn(CTX, K) -> V, pub compute: fn(CTX, K) -> V,
pub hash_result: fn(&mut CTX::StableHashingContext, &V) -> Option<Fingerprint>, pub hash_result: fn(&mut CTX::StableHashingContext, &V) -> Option<Fingerprint>,
pub handle_cycle_error: fn(CTX, CycleError<CTX::Query>) -> V, pub handle_cycle_error: fn(CTX, CycleError) -> V,
pub cache_on_disk: fn(CTX, &K, Option<&V>) -> bool, pub cache_on_disk: fn(CTX, &K, Option<&V>) -> bool,
pub try_load_from_disk: fn(CTX, SerializedDepNodeIndex) -> Option<V>, pub try_load_from_disk: fn(CTX, SerializedDepNodeIndex) -> Option<V>,
} }
@ -52,7 +52,7 @@ impl<CTX: QueryContext, K, V> QueryVtable<CTX, K, V> {
(self.hash_result)(hcx, value) (self.hash_result)(hcx, value)
} }
pub(crate) fn handle_cycle_error(&self, tcx: CTX, error: CycleError<CTX::Query>) -> V { pub(crate) fn handle_cycle_error(&self, tcx: CTX, error: CycleError) -> V {
(self.handle_cycle_error)(tcx, error) (self.handle_cycle_error)(tcx, error)
} }
@ -73,7 +73,7 @@ pub trait QueryAccessors<CTX: QueryContext>: QueryConfig {
type Cache: QueryCache<Key = Self::Key, Stored = Self::Stored, Value = Self::Value>; type Cache: QueryCache<Key = Self::Key, Stored = Self::Stored, Value = Self::Value>;
// Don't use this method to access query results, instead use the methods on TyCtxt // Don't use this method to access query results, instead use the methods on TyCtxt
fn query_state<'a>(tcx: CTX) -> &'a QueryState<CTX::DepKind, CTX::Query, Self::Key> fn query_state<'a>(tcx: CTX) -> &'a QueryState<CTX::DepKind, Self::Key>
where where
CTX: 'a; CTX: 'a;
@ -90,7 +90,7 @@ pub trait QueryAccessors<CTX: QueryContext>: QueryConfig {
result: &Self::Value, result: &Self::Value,
) -> Option<Fingerprint>; ) -> Option<Fingerprint>;
fn handle_cycle_error(tcx: CTX, error: CycleError<CTX::Query>) -> Self::Value; fn handle_cycle_error(tcx: CTX, error: CycleError) -> Self::Value;
} }
pub trait QueryDescription<CTX: QueryContext>: QueryAccessors<CTX> { pub trait QueryDescription<CTX: QueryContext>: QueryAccessors<CTX> {

View file

@ -1,4 +1,5 @@
use crate::query::plumbing::CycleError; use crate::query::plumbing::CycleError;
use crate::query::QueryStackFrame;
use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::fx::FxHashMap;
use rustc_span::Span; use rustc_span::Span;
@ -26,13 +27,13 @@ use {
/// Represents a span and a query key. /// Represents a span and a query key.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct QueryInfo<Q> { pub struct QueryInfo {
/// The span corresponding to the reason for which this query was required. /// The span corresponding to the reason for which this query was required.
pub span: Span, pub span: Span,
pub query: Q, pub query: QueryStackFrame,
} }
pub(crate) type QueryMap<D, Q> = FxHashMap<QueryJobId<D>, QueryJobInfo<D, Q>>; pub type QueryMap<D> = FxHashMap<QueryJobId<D>, QueryJobInfo<D>>;
/// A value uniquely identifying an active query job within a shard in the query cache. /// A value uniquely identifying an active query job within a shard in the query cache.
#[derive(Copy, Clone, Eq, PartialEq, Hash)] #[derive(Copy, Clone, Eq, PartialEq, Hash)]
@ -59,34 +60,34 @@ where
QueryJobId { job, shard: u16::try_from(shard).unwrap(), kind } QueryJobId { job, shard: u16::try_from(shard).unwrap(), kind }
} }
fn query<Q: Clone>(self, map: &QueryMap<D, Q>) -> Q { fn query(self, map: &QueryMap<D>) -> QueryStackFrame {
map.get(&self).unwrap().info.query.clone() map.get(&self).unwrap().info.query.clone()
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
fn span<Q: Clone>(self, map: &QueryMap<D, Q>) -> Span { fn span(self, map: &QueryMap<D>) -> Span {
map.get(&self).unwrap().job.span map.get(&self).unwrap().job.span
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
fn parent<Q: Clone>(self, map: &QueryMap<D, Q>) -> Option<QueryJobId<D>> { fn parent(self, map: &QueryMap<D>) -> Option<QueryJobId<D>> {
map.get(&self).unwrap().job.parent map.get(&self).unwrap().job.parent
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
fn latch<'a, Q: Clone>(self, map: &'a QueryMap<D, Q>) -> Option<&'a QueryLatch<D, Q>> { fn latch<'a>(self, map: &'a QueryMap<D>) -> Option<&'a QueryLatch<D>> {
map.get(&self).unwrap().job.latch.as_ref() map.get(&self).unwrap().job.latch.as_ref()
} }
} }
pub struct QueryJobInfo<D, Q> { pub struct QueryJobInfo<D> {
pub info: QueryInfo<Q>, pub info: QueryInfo,
pub job: QueryJob<D, Q>, pub job: QueryJob<D>,
} }
/// Represents an active query job. /// Represents an active query job.
#[derive(Clone)] #[derive(Clone)]
pub struct QueryJob<D, Q> { pub struct QueryJob<D> {
pub id: QueryShardJobId, pub id: QueryShardJobId,
/// The span corresponding to the reason for which this query was required. /// The span corresponding to the reason for which this query was required.
@ -97,15 +98,14 @@ pub struct QueryJob<D, Q> {
/// The latch that is used to wait on this job. /// The latch that is used to wait on this job.
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
latch: Option<QueryLatch<D, Q>>, latch: Option<QueryLatch<D>>,
dummy: PhantomData<QueryLatch<D, Q>>, dummy: PhantomData<QueryLatch<D>>,
} }
impl<D, Q> QueryJob<D, Q> impl<D> QueryJob<D>
where where
D: Copy + Clone + Eq + Hash, D: Copy + Clone + Eq + Hash,
Q: Clone,
{ {
/// Creates a new query job. /// Creates a new query job.
pub fn new(id: QueryShardJobId, span: Span, parent: Option<QueryJobId<D>>) -> Self { pub fn new(id: QueryShardJobId, span: Span, parent: Option<QueryJobId<D>>) -> Self {
@ -120,7 +120,7 @@ where
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
pub(super) fn latch(&mut self, _id: QueryJobId<D>) -> QueryLatch<D, Q> { pub(super) fn latch(&mut self, _id: QueryJobId<D>) -> QueryLatch<D> {
if self.latch.is_none() { if self.latch.is_none() {
self.latch = Some(QueryLatch::new()); self.latch = Some(QueryLatch::new());
} }
@ -128,8 +128,8 @@ where
} }
#[cfg(not(parallel_compiler))] #[cfg(not(parallel_compiler))]
pub(super) fn latch(&mut self, id: QueryJobId<D>) -> QueryLatch<D, Q> { pub(super) fn latch(&mut self, id: QueryJobId<D>) -> QueryLatch<D> {
QueryLatch { id, dummy: PhantomData } QueryLatch { id }
} }
/// Signals to waiters that the query is complete. /// Signals to waiters that the query is complete.
@ -148,23 +148,21 @@ where
#[cfg(not(parallel_compiler))] #[cfg(not(parallel_compiler))]
#[derive(Clone)] #[derive(Clone)]
pub(super) struct QueryLatch<D, Q> { pub(super) struct QueryLatch<D> {
id: QueryJobId<D>, id: QueryJobId<D>,
dummy: PhantomData<Q>,
} }
#[cfg(not(parallel_compiler))] #[cfg(not(parallel_compiler))]
impl<D, Q> QueryLatch<D, Q> impl<D> QueryLatch<D>
where where
D: Copy + Clone + Eq + Hash, D: Copy + Clone + Eq + Hash,
Q: Clone,
{ {
pub(super) fn find_cycle_in_stack( pub(super) fn find_cycle_in_stack(
&self, &self,
query_map: QueryMap<D, Q>, query_map: QueryMap<D>,
current_job: &Option<QueryJobId<D>>, current_job: &Option<QueryJobId<D>>,
span: Span, span: Span,
) -> CycleError<Q> { ) -> CycleError {
// Find the waitee amongst `current_job` parents // Find the waitee amongst `current_job` parents
let mut cycle = Vec::new(); let mut cycle = Vec::new();
let mut current_job = Option::clone(current_job); let mut current_job = Option::clone(current_job);
@ -198,15 +196,15 @@ where
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
struct QueryWaiter<D, Q> { struct QueryWaiter<D> {
query: Option<QueryJobId<D>>, query: Option<QueryJobId<D>>,
condvar: Condvar, condvar: Condvar,
span: Span, span: Span,
cycle: Lock<Option<CycleError<Q>>>, cycle: Lock<Option<CycleError>>,
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
impl<D, Q> QueryWaiter<D, Q> { impl<D> QueryWaiter<D> {
fn notify(&self, registry: &rayon_core::Registry) { fn notify(&self, registry: &rayon_core::Registry) {
rayon_core::mark_unblocked(registry); rayon_core::mark_unblocked(registry);
self.condvar.notify_one(); self.condvar.notify_one();
@ -214,19 +212,19 @@ impl<D, Q> QueryWaiter<D, Q> {
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
struct QueryLatchInfo<D, Q> { struct QueryLatchInfo<D> {
complete: bool, complete: bool,
waiters: Vec<Lrc<QueryWaiter<D, Q>>>, waiters: Vec<Lrc<QueryWaiter<D>>>,
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
#[derive(Clone)] #[derive(Clone)]
pub(super) struct QueryLatch<D, Q> { pub(super) struct QueryLatch<D> {
info: Lrc<Mutex<QueryLatchInfo<D, Q>>>, info: Lrc<Mutex<QueryLatchInfo<D>>>,
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
impl<D: Eq + Hash, Q: Clone> QueryLatch<D, Q> { impl<D: Eq + Hash> QueryLatch<D> {
fn new() -> Self { fn new() -> Self {
QueryLatch { QueryLatch {
info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })), info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
@ -235,13 +233,13 @@ impl<D: Eq + Hash, Q: Clone> QueryLatch<D, Q> {
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
impl<D, Q> QueryLatch<D, Q> { impl<D> QueryLatch<D> {
/// Awaits for the query job to complete. /// Awaits for the query job to complete.
pub(super) fn wait_on( pub(super) fn wait_on(
&self, &self,
query: Option<QueryJobId<D>>, query: Option<QueryJobId<D>>,
span: Span, span: Span,
) -> Result<(), CycleError<Q>> { ) -> Result<(), CycleError> {
let waiter = let waiter =
Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() }); Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() });
self.wait_on_inner(&waiter); self.wait_on_inner(&waiter);
@ -256,7 +254,7 @@ impl<D, Q> QueryLatch<D, Q> {
} }
/// Awaits the caller on this latch by blocking the current thread. /// Awaits the caller on this latch by blocking the current thread.
fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter<D, Q>>) { fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter<D>>) {
let mut info = self.info.lock(); let mut info = self.info.lock();
if !info.complete { if !info.complete {
// We push the waiter on to the `waiters` list. It can be accessed inside // We push the waiter on to the `waiters` list. It can be accessed inside
@ -290,7 +288,7 @@ impl<D, Q> QueryLatch<D, Q> {
/// Removes a single waiter from the list of waiters. /// Removes a single waiter from the list of waiters.
/// This is used to break query cycles. /// This is used to break query cycles.
fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<D, Q>> { fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<D>> {
let mut info = self.info.lock(); let mut info = self.info.lock();
debug_assert!(!info.complete); debug_assert!(!info.complete);
// Remove the waiter from the list of waiters // Remove the waiter from the list of waiters
@ -312,14 +310,13 @@ type Waiter<D> = (QueryJobId<D>, usize);
/// required information to resume the waiter. /// required information to resume the waiter.
/// If all `visit` calls returns None, this function also returns None. /// If all `visit` calls returns None, this function also returns None.
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
fn visit_waiters<D, Q, F>( fn visit_waiters<D, F>(
query_map: &QueryMap<D, Q>, query_map: &QueryMap<D>,
query: QueryJobId<D>, query: QueryJobId<D>,
mut visit: F, mut visit: F,
) -> Option<Option<Waiter<D>>> ) -> Option<Option<Waiter<D>>>
where where
D: Copy + Clone + Eq + Hash, D: Copy + Clone + Eq + Hash,
Q: Clone,
F: FnMut(Span, QueryJobId<D>) -> Option<Option<Waiter<D>>>, F: FnMut(Span, QueryJobId<D>) -> Option<Option<Waiter<D>>>,
{ {
// Visit the parent query which is a non-resumable waiter since it's on the same stack // Visit the parent query which is a non-resumable waiter since it's on the same stack
@ -349,8 +346,8 @@ where
/// If a cycle is detected, this initial value is replaced with the span causing /// If a cycle is detected, this initial value is replaced with the span causing
/// the cycle. /// the cycle.
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
fn cycle_check<D, Q>( fn cycle_check<D>(
query_map: &QueryMap<D, Q>, query_map: &QueryMap<D>,
query: QueryJobId<D>, query: QueryJobId<D>,
span: Span, span: Span,
stack: &mut Vec<(Span, QueryJobId<D>)>, stack: &mut Vec<(Span, QueryJobId<D>)>,
@ -358,7 +355,6 @@ fn cycle_check<D, Q>(
) -> Option<Option<Waiter<D>>> ) -> Option<Option<Waiter<D>>>
where where
D: Copy + Clone + Eq + Hash, D: Copy + Clone + Eq + Hash,
Q: Clone,
{ {
if !visited.insert(query) { if !visited.insert(query) {
return if let Some(p) = stack.iter().position(|q| q.1 == query) { return if let Some(p) = stack.iter().position(|q| q.1 == query) {
@ -394,14 +390,13 @@ where
/// from `query` without going through any of the queries in `visited`. /// from `query` without going through any of the queries in `visited`.
/// This is achieved with a depth first search. /// This is achieved with a depth first search.
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
fn connected_to_root<D, Q>( fn connected_to_root<D>(
query_map: &QueryMap<D, Q>, query_map: &QueryMap<D>,
query: QueryJobId<D>, query: QueryJobId<D>,
visited: &mut FxHashSet<QueryJobId<D>>, visited: &mut FxHashSet<QueryJobId<D>>,
) -> bool ) -> bool
where where
D: Copy + Clone + Eq + Hash, D: Copy + Clone + Eq + Hash,
Q: Clone,
{ {
// We already visited this or we're deliberately ignoring it // We already visited this or we're deliberately ignoring it
if !visited.insert(query) { if !visited.insert(query) {
@ -422,7 +417,7 @@ where
// Deterministically pick an query from a list // Deterministically pick an query from a list
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
fn pick_query<'a, CTX, T, F>( fn pick_query<'a, CTX, T, F>(
query_map: &QueryMap<CTX::DepKind, CTX::Query>, query_map: &QueryMap<CTX::DepKind>,
tcx: CTX, tcx: CTX,
queries: &'a [T], queries: &'a [T],
f: F, f: F,
@ -456,9 +451,9 @@ where
/// the function returns false. /// the function returns false.
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
fn remove_cycle<CTX: QueryContext>( fn remove_cycle<CTX: QueryContext>(
query_map: &QueryMap<CTX::DepKind, CTX::Query>, query_map: &QueryMap<CTX::DepKind>,
jobs: &mut Vec<QueryJobId<CTX::DepKind>>, jobs: &mut Vec<QueryJobId<CTX::DepKind>>,
wakelist: &mut Vec<Lrc<QueryWaiter<CTX::DepKind, CTX::Query>>>, wakelist: &mut Vec<Lrc<QueryWaiter<CTX::DepKind>>>,
tcx: CTX, tcx: CTX,
) -> bool { ) -> bool {
let mut visited = FxHashSet::default(); let mut visited = FxHashSet::default();

View file

@ -4,7 +4,7 @@ pub use self::plumbing::*;
mod job; mod job;
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
pub use self::job::deadlock; pub use self::job::deadlock;
pub use self::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo}; pub use self::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryMap};
mod caches; mod caches;
pub use self::caches::{ pub use self::caches::{
@ -15,24 +15,63 @@ mod config;
pub use self::config::{QueryAccessors, QueryConfig, QueryDescription}; pub use self::config::{QueryAccessors, QueryConfig, QueryDescription};
use crate::dep_graph::{DepNode, DepNodeIndex, HasDepContext, SerializedDepNodeIndex}; use crate::dep_graph::{DepNode, DepNodeIndex, HasDepContext, SerializedDepNodeIndex};
use crate::query::job::QueryMap;
use rustc_data_structures::stable_hasher::HashStable; use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::sync::Lock; use rustc_data_structures::sync::Lock;
use rustc_data_structures::thin_vec::ThinVec; use rustc_data_structures::thin_vec::ThinVec;
use rustc_errors::Diagnostic; use rustc_errors::Diagnostic;
use rustc_span::def_id::DefId; use rustc_span::def_id::DefId;
use rustc_span::Span;
/// Description of a frame in the query stack.
///
/// This is mostly used in case of cycles for error reporting.
#[derive(Clone, Debug)]
pub struct QueryStackFrame {
pub name: &'static str,
pub description: String,
span: Option<Span>,
/// This hash is used to deterministically pick
/// a query to remove cycles in the parallel compiler.
hash: Fingerprint,
}
impl QueryStackFrame {
#[inline]
pub fn new(
name: &'static str,
description: String,
span: Option<Span>,
hash: Fingerprint,
) -> Self {
Self { name, hash, description, span }
}
// FIXME(eddyb) Get more valid `Span`s on queries.
#[inline]
pub fn default_span(&self, span: Span) -> Span {
if !span.is_dummy() {
return span;
}
self.span.unwrap_or(span)
}
}
impl<CTX> HashStable<CTX> for QueryStackFrame {
fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
self.hash.hash_stable(hcx, hasher)
}
}
pub trait QueryContext: HasDepContext { pub trait QueryContext: HasDepContext {
type Query: Clone + HashStable<Self::StableHashingContext>;
/// Get string representation from DefPath. /// Get string representation from DefPath.
fn def_path_str(&self, def_id: DefId) -> String; fn def_path_str(&self, def_id: DefId) -> String;
/// Get the query information from the TLS context. /// Get the query information from the TLS context.
fn current_query_job(&self) -> Option<QueryJobId<Self::DepKind>>; fn current_query_job(&self) -> Option<QueryJobId<Self::DepKind>>;
fn try_collect_active_jobs(&self) -> Option<QueryMap<Self::DepKind, Self::Query>>; fn try_collect_active_jobs(&self) -> Option<QueryMap<Self::DepKind>>;
/// Load data from the on-disk cache. /// Load data from the on-disk cache.
fn try_load_from_on_disk_cache(&self, dep_node: &DepNode<Self::DepKind>); fn try_load_from_on_disk_cache(&self, dep_node: &DepNode<Self::DepKind>);

View file

@ -7,7 +7,7 @@ use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex};
use crate::query::caches::QueryCache; use crate::query::caches::QueryCache;
use crate::query::config::{QueryDescription, QueryVtable, QueryVtableExt}; use crate::query::config::{QueryDescription, QueryVtable, QueryVtableExt};
use crate::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId}; use crate::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId};
use crate::query::{QueryContext, QueryMap}; use crate::query::{QueryContext, QueryMap, QueryStackFrame};
#[cfg(not(parallel_compiler))] #[cfg(not(parallel_compiler))]
use rustc_data_structures::cold_path; use rustc_data_structures::cold_path;
@ -81,37 +81,36 @@ impl<C: QueryCache> QueryCacheStore<C> {
} }
} }
struct QueryStateShard<D, Q, K> { struct QueryStateShard<D, K> {
active: FxHashMap<K, QueryResult<D, Q>>, active: FxHashMap<K, QueryResult<D>>,
/// Used to generate unique ids for active jobs. /// Used to generate unique ids for active jobs.
jobs: u32, jobs: u32,
} }
impl<D, Q, K> Default for QueryStateShard<D, Q, K> { impl<D, K> Default for QueryStateShard<D, K> {
fn default() -> QueryStateShard<D, Q, K> { fn default() -> QueryStateShard<D, K> {
QueryStateShard { active: Default::default(), jobs: 0 } QueryStateShard { active: Default::default(), jobs: 0 }
} }
} }
pub struct QueryState<D, Q, K> { pub struct QueryState<D, K> {
shards: Sharded<QueryStateShard<D, Q, K>>, shards: Sharded<QueryStateShard<D, K>>,
} }
/// Indicates the state of a query for a given key in a query map. /// Indicates the state of a query for a given key in a query map.
enum QueryResult<D, Q> { enum QueryResult<D> {
/// An already executing query. The query job can be used to await for its completion. /// An already executing query. The query job can be used to await for its completion.
Started(QueryJob<D, Q>), Started(QueryJob<D>),
/// The query panicked. Queries trying to wait on this will raise a fatal error which will /// The query panicked. Queries trying to wait on this will raise a fatal error which will
/// silently panic. /// silently panic.
Poisoned, Poisoned,
} }
impl<D, Q, K> QueryState<D, Q, K> impl<D, K> QueryState<D, K>
where where
D: Copy + Clone + Eq + Hash, D: Copy + Clone + Eq + Hash,
Q: Clone,
K: Eq + Hash + Clone + Debug, K: Eq + Hash + Clone + Debug,
{ {
pub fn all_inactive(&self) -> bool { pub fn all_inactive(&self) -> bool {
@ -123,8 +122,8 @@ where
&self, &self,
tcx: CTX, tcx: CTX,
kind: D, kind: D,
make_query: fn(CTX, K) -> Q, make_query: fn(CTX, K) -> QueryStackFrame,
jobs: &mut QueryMap<D, Q>, jobs: &mut QueryMap<D>,
) -> Option<()> { ) -> Option<()> {
// We use try_lock_shards here since we are called from the // We use try_lock_shards here since we are called from the
// deadlock handler, and this shouldn't be locked. // deadlock handler, and this shouldn't be locked.
@ -146,30 +145,28 @@ where
} }
} }
impl<D, Q, K> Default for QueryState<D, Q, K> { impl<D, K> Default for QueryState<D, K> {
fn default() -> QueryState<D, Q, K> { fn default() -> QueryState<D, K> {
QueryState { shards: Default::default() } QueryState { shards: Default::default() }
} }
} }
/// A type representing the responsibility to execute the job in the `job` field. /// A type representing the responsibility to execute the job in the `job` field.
/// This will poison the relevant query if dropped. /// This will poison the relevant query if dropped.
struct JobOwner<'tcx, D, Q, C> struct JobOwner<'tcx, D, C>
where where
D: Copy + Clone + Eq + Hash, D: Copy + Clone + Eq + Hash,
Q: Clone,
C: QueryCache, C: QueryCache,
{ {
state: &'tcx QueryState<D, Q, C::Key>, state: &'tcx QueryState<D, C::Key>,
cache: &'tcx QueryCacheStore<C>, cache: &'tcx QueryCacheStore<C>,
key: C::Key, key: C::Key,
id: QueryJobId<D>, id: QueryJobId<D>,
} }
impl<'tcx, D, Q, C> JobOwner<'tcx, D, Q, C> impl<'tcx, D, C> JobOwner<'tcx, D, C>
where where
D: Copy + Clone + Eq + Hash, D: Copy + Clone + Eq + Hash,
Q: Clone,
C: QueryCache, C: QueryCache,
{ {
/// Either gets a `JobOwner` corresponding the query, allowing us to /// Either gets a `JobOwner` corresponding the query, allowing us to
@ -183,13 +180,13 @@ where
#[inline(always)] #[inline(always)]
fn try_start<'b, CTX>( fn try_start<'b, CTX>(
tcx: CTX, tcx: CTX,
state: &'b QueryState<CTX::DepKind, CTX::Query, C::Key>, state: &'b QueryState<CTX::DepKind, C::Key>,
cache: &'b QueryCacheStore<C>, cache: &'b QueryCacheStore<C>,
span: Span, span: Span,
key: &C::Key, key: &C::Key,
lookup: QueryLookup, lookup: QueryLookup,
query: &QueryVtable<CTX, C::Key, C::Value>, query: &QueryVtable<CTX, C::Key, C::Value>,
) -> TryGetJob<'b, CTX::DepKind, CTX::Query, C> ) -> TryGetJob<'b, CTX::DepKind, C>
where where
CTX: QueryContext, CTX: QueryContext,
{ {
@ -243,7 +240,7 @@ where
// so we just return the error. // so we just return the error.
#[cfg(not(parallel_compiler))] #[cfg(not(parallel_compiler))]
return TryGetJob::Cycle(cold_path(|| { return TryGetJob::Cycle(cold_path(|| {
let error: CycleError<CTX::Query> = latch.find_cycle_in_stack( let error: CycleError = latch.find_cycle_in_stack(
tcx.try_collect_active_jobs().unwrap(), tcx.try_collect_active_jobs().unwrap(),
&tcx.current_query_job(), &tcx.current_query_job(),
span, span,
@ -328,10 +325,9 @@ where
(result, diagnostics.into_inner()) (result, diagnostics.into_inner())
} }
impl<'tcx, D, Q, C> Drop for JobOwner<'tcx, D, Q, C> impl<'tcx, D, C> Drop for JobOwner<'tcx, D, C>
where where
D: Copy + Clone + Eq + Hash, D: Copy + Clone + Eq + Hash,
Q: Clone,
C: QueryCache, C: QueryCache,
{ {
#[inline(never)] #[inline(never)]
@ -356,21 +352,20 @@ where
} }
#[derive(Clone)] #[derive(Clone)]
pub struct CycleError<Q> { pub struct CycleError {
/// The query and related span that uses the cycle. /// The query and related span that uses the cycle.
pub usage: Option<(Span, Q)>, pub usage: Option<(Span, QueryStackFrame)>,
pub cycle: Vec<QueryInfo<Q>>, pub cycle: Vec<QueryInfo>,
} }
/// The result of `try_start`. /// The result of `try_start`.
enum TryGetJob<'tcx, D, Q, C> enum TryGetJob<'tcx, D, C>
where where
D: Copy + Clone + Eq + Hash, D: Copy + Clone + Eq + Hash,
Q: Clone,
C: QueryCache, C: QueryCache,
{ {
/// The query is not yet started. Contains a guard to the cache eventually used to start it. /// The query is not yet started. Contains a guard to the cache eventually used to start it.
NotYetStarted(JobOwner<'tcx, D, Q, C>), NotYetStarted(JobOwner<'tcx, D, C>),
/// The query was already completed. /// The query was already completed.
/// Returns the result of the query and its dep-node index /// Returns the result of the query and its dep-node index
@ -414,7 +409,7 @@ where
fn try_execute_query<CTX, C>( fn try_execute_query<CTX, C>(
tcx: CTX, tcx: CTX,
state: &QueryState<CTX::DepKind, CTX::Query, C::Key>, state: &QueryState<CTX::DepKind, C::Key>,
cache: &QueryCacheStore<C>, cache: &QueryCacheStore<C>,
span: Span, span: Span,
key: C::Key, key: C::Key,
@ -426,7 +421,7 @@ where
C::Key: crate::dep_graph::DepNodeParams<CTX::DepContext>, C::Key: crate::dep_graph::DepNodeParams<CTX::DepContext>,
CTX: QueryContext, CTX: QueryContext,
{ {
let job = match JobOwner::<'_, CTX::DepKind, CTX::Query, C>::try_start( let job = match JobOwner::<'_, CTX::DepKind, C>::try_start(
tcx, state, cache, span, &key, lookup, query, tcx, state, cache, span, &key, lookup, query,
) { ) {
TryGetJob::NotYetStarted(job) => job, TryGetJob::NotYetStarted(job) => job,
@ -590,7 +585,7 @@ fn incremental_verify_ich<CTX, K, V: Debug>(
fn force_query_with_job<C, CTX>( fn force_query_with_job<C, CTX>(
tcx: CTX, tcx: CTX,
key: C::Key, key: C::Key,
job: JobOwner<'_, CTX::DepKind, CTX::Query, C>, job: JobOwner<'_, CTX::DepKind, C>,
dep_node: DepNode<CTX::DepKind>, dep_node: DepNode<CTX::DepKind>,
query: &QueryVtable<CTX, C::Key, C::Value>, query: &QueryVtable<CTX, C::Key, C::Value>,
) -> (C::Stored, DepNodeIndex) ) -> (C::Stored, DepNodeIndex)
@ -650,7 +645,7 @@ where
#[inline(never)] #[inline(never)]
fn get_query_impl<CTX, C>( fn get_query_impl<CTX, C>(
tcx: CTX, tcx: CTX,
state: &QueryState<CTX::DepKind, CTX::Query, C::Key>, state: &QueryState<CTX::DepKind, C::Key>,
cache: &QueryCacheStore<C>, cache: &QueryCacheStore<C>,
span: Span, span: Span,
key: C::Key, key: C::Key,
@ -708,7 +703,7 @@ where
#[inline(never)] #[inline(never)]
fn force_query_impl<CTX, C>( fn force_query_impl<CTX, C>(
tcx: CTX, tcx: CTX,
state: &QueryState<CTX::DepKind, CTX::Query, C::Key>, state: &QueryState<CTX::DepKind, C::Key>,
cache: &QueryCacheStore<C>, cache: &QueryCacheStore<C>,
key: C::Key, key: C::Key,
span: Span, span: Span,
@ -736,7 +731,7 @@ fn force_query_impl<CTX, C>(
Err(lookup) => lookup, Err(lookup) => lookup,
}; };
let job = match JobOwner::<'_, CTX::DepKind, CTX::Query, C>::try_start( let job = match JobOwner::<'_, CTX::DepKind, C>::try_start(
tcx, state, cache, span, &key, lookup, query, tcx, state, cache, span, &key, lookup, query,
) { ) {
TryGetJob::NotYetStarted(job) => job, TryGetJob::NotYetStarted(job) => job,