Remove force_query_with_job.
This commit is contained in:
parent
ef4becdce4
commit
f2c8707abb
1 changed files with 57 additions and 92 deletions
|
@ -17,7 +17,7 @@ use rustc_data_structures::profiling::TimingGuard;
|
||||||
use rustc_data_structures::sharded::{get_shard_index_by_hash, Sharded};
|
use rustc_data_structures::sharded::{get_shard_index_by_hash, Sharded};
|
||||||
use rustc_data_structures::sync::{Lock, LockGuard};
|
use rustc_data_structures::sync::{Lock, LockGuard};
|
||||||
use rustc_data_structures::thin_vec::ThinVec;
|
use rustc_data_structures::thin_vec::ThinVec;
|
||||||
use rustc_errors::{Diagnostic, DiagnosticBuilder, FatalError};
|
use rustc_errors::{DiagnosticBuilder, FatalError};
|
||||||
use rustc_span::{Span, DUMMY_SP};
|
use rustc_span::{Span, DUMMY_SP};
|
||||||
use std::cell::Cell;
|
use std::cell::Cell;
|
||||||
use std::collections::hash_map::Entry;
|
use std::collections::hash_map::Entry;
|
||||||
|
@ -304,15 +304,6 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn with_diagnostics<F, R>(f: F) -> (R, ThinVec<Diagnostic>)
|
|
||||||
where
|
|
||||||
F: FnOnce(Option<&Lock<ThinVec<Diagnostic>>>) -> R,
|
|
||||||
{
|
|
||||||
let diagnostics = Lock::new(ThinVec::new());
|
|
||||||
let result = f(Some(&diagnostics));
|
|
||||||
(result, diagnostics.into_inner())
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'tcx, D, K> Drop for JobOwner<'tcx, D, K>
|
impl<'tcx, D, K> Drop for JobOwner<'tcx, D, K>
|
||||||
where
|
where
|
||||||
D: Copy + Clone + Eq + Hash,
|
D: Copy + Clone + Eq + Hash,
|
||||||
|
@ -452,7 +443,7 @@ where
|
||||||
fn execute_job<CTX, K, V>(
|
fn execute_job<CTX, K, V>(
|
||||||
tcx: CTX,
|
tcx: CTX,
|
||||||
key: K,
|
key: K,
|
||||||
dep_node: Option<DepNode<CTX::DepKind>>,
|
mut dep_node_opt: Option<DepNode<CTX::DepKind>>,
|
||||||
query: &QueryVtable<CTX, K, V>,
|
query: &QueryVtable<CTX, K, V>,
|
||||||
job_id: QueryJobId<CTX::DepKind>,
|
job_id: QueryJobId<CTX::DepKind>,
|
||||||
compute: fn(CTX::DepContext, K) -> V,
|
compute: fn(CTX::DepContext, K) -> V,
|
||||||
|
@ -473,45 +464,66 @@ where
|
||||||
return (result, dep_node_index);
|
return (result, dep_node_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
if query.anon {
|
if !query.anon && !query.eval_always {
|
||||||
let prof_timer = tcx.dep_context().profiler().query_provider();
|
|
||||||
|
|
||||||
let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
|
|
||||||
tcx.start_query(job_id, diagnostics, || {
|
|
||||||
dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || {
|
|
||||||
compute(*tcx.dep_context(), key)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
});
|
|
||||||
|
|
||||||
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
|
|
||||||
|
|
||||||
let side_effects = QuerySideEffects { diagnostics };
|
|
||||||
|
|
||||||
if unlikely!(!side_effects.is_empty()) {
|
|
||||||
tcx.store_side_effects_for_anon_node(dep_node_index, side_effects);
|
|
||||||
}
|
|
||||||
|
|
||||||
(result, dep_node_index)
|
|
||||||
} else if query.eval_always {
|
|
||||||
// `to_dep_node` is expensive for some `DepKind`s.
|
// `to_dep_node` is expensive for some `DepKind`s.
|
||||||
let dep_node = dep_node.unwrap_or_else(|| query.to_dep_node(*tcx.dep_context(), &key));
|
let dep_node =
|
||||||
force_query_with_job(tcx, key, job_id, dep_node, query, compute)
|
dep_node_opt.get_or_insert_with(|| query.to_dep_node(*tcx.dep_context(), &key));
|
||||||
} else {
|
|
||||||
// `to_dep_node` is expensive for some `DepKind`s.
|
// The diagnostics for this query will be promoted to the current session during
|
||||||
let dep_node = dep_node.unwrap_or_else(|| query.to_dep_node(*tcx.dep_context(), &key));
|
|
||||||
// The diagnostics for this query will be
|
|
||||||
// promoted to the current session during
|
|
||||||
// `try_mark_green()`, so we can ignore them here.
|
// `try_mark_green()`, so we can ignore them here.
|
||||||
let loaded = tcx.start_query(job_id, None, || {
|
if let Some(ret) = tcx.start_query(job_id, None, || {
|
||||||
try_load_from_disk_and_cache_in_memory(tcx, &key, &dep_node, query, compute)
|
try_load_from_disk_and_cache_in_memory(tcx, &key, &dep_node, query, compute)
|
||||||
});
|
}) {
|
||||||
if let Some((result, dep_node_index)) = loaded {
|
return ret;
|
||||||
(result, dep_node_index)
|
|
||||||
} else {
|
|
||||||
force_query_with_job(tcx, key, job_id, dep_node, query, compute)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let prof_timer = tcx.dep_context().profiler().query_provider();
|
||||||
|
let diagnostics = Lock::new(ThinVec::new());
|
||||||
|
|
||||||
|
let (result, dep_node_index) = tcx.start_query(job_id, Some(&diagnostics), || {
|
||||||
|
if query.anon {
|
||||||
|
return dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || {
|
||||||
|
compute(*tcx.dep_context(), key)
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// `to_dep_node` is expensive for some `DepKind`s.
|
||||||
|
let dep_node = dep_node_opt.unwrap_or_else(|| query.to_dep_node(*tcx.dep_context(), &key));
|
||||||
|
|
||||||
|
if query.eval_always {
|
||||||
|
tcx.dep_context().dep_graph().with_eval_always_task(
|
||||||
|
dep_node,
|
||||||
|
*tcx.dep_context(),
|
||||||
|
key,
|
||||||
|
compute,
|
||||||
|
query.hash_result,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
tcx.dep_context().dep_graph().with_task(
|
||||||
|
dep_node,
|
||||||
|
*tcx.dep_context(),
|
||||||
|
key,
|
||||||
|
compute,
|
||||||
|
query.hash_result,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
|
||||||
|
|
||||||
|
let diagnostics = diagnostics.into_inner();
|
||||||
|
let side_effects = QuerySideEffects { diagnostics };
|
||||||
|
|
||||||
|
if unlikely!(!side_effects.is_empty()) {
|
||||||
|
if query.anon {
|
||||||
|
tcx.store_side_effects_for_anon_node(dep_node_index, side_effects);
|
||||||
|
} else {
|
||||||
|
tcx.store_side_effects(dep_node_index, side_effects);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
(result, dep_node_index)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn try_load_from_disk_and_cache_in_memory<CTX, K, V>(
|
fn try_load_from_disk_and_cache_in_memory<CTX, K, V>(
|
||||||
|
@ -641,53 +653,6 @@ fn incremental_verify_ich<CTX, K, V: Debug>(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn force_query_with_job<CTX, K, V>(
|
|
||||||
tcx: CTX,
|
|
||||||
key: K,
|
|
||||||
job_id: QueryJobId<CTX::DepKind>,
|
|
||||||
dep_node: DepNode<CTX::DepKind>,
|
|
||||||
query: &QueryVtable<CTX, K, V>,
|
|
||||||
compute: fn(CTX::DepContext, K) -> V,
|
|
||||||
) -> (V, DepNodeIndex)
|
|
||||||
where
|
|
||||||
CTX: QueryContext,
|
|
||||||
K: Debug,
|
|
||||||
{
|
|
||||||
let prof_timer = tcx.dep_context().profiler().query_provider();
|
|
||||||
|
|
||||||
let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
|
|
||||||
tcx.start_query(job_id, diagnostics, || {
|
|
||||||
if query.eval_always {
|
|
||||||
tcx.dep_context().dep_graph().with_eval_always_task(
|
|
||||||
dep_node,
|
|
||||||
*tcx.dep_context(),
|
|
||||||
key,
|
|
||||||
compute,
|
|
||||||
query.hash_result,
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
tcx.dep_context().dep_graph().with_task(
|
|
||||||
dep_node,
|
|
||||||
*tcx.dep_context(),
|
|
||||||
key,
|
|
||||||
compute,
|
|
||||||
query.hash_result,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
});
|
|
||||||
|
|
||||||
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
|
|
||||||
|
|
||||||
let side_effects = QuerySideEffects { diagnostics };
|
|
||||||
|
|
||||||
if unlikely!(!side_effects.is_empty()) && dep_node.kind != DepKind::NULL {
|
|
||||||
tcx.store_side_effects(dep_node_index, side_effects);
|
|
||||||
}
|
|
||||||
|
|
||||||
(result, dep_node_index)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Ensure that either this query has all green inputs or been executed.
|
/// Ensure that either this query has all green inputs or been executed.
|
||||||
/// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
|
/// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
|
||||||
/// Returns true if the query should still run.
|
/// Returns true if the query should still run.
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue