Use a QueryContext for try_mark_green.
This commit is contained in:
parent
3bd14c7bbe
commit
b27266fdb2
10 changed files with 133 additions and 143 deletions
|
@ -135,7 +135,7 @@ pub struct DepKindStruct {
|
|||
/// then `force_from_dep_node()` should not fail for it. Otherwise, you can just
|
||||
/// add it to the "We don't have enough information to reconstruct..." group in
|
||||
/// the match below.
|
||||
pub(super) force_from_dep_node: fn(tcx: TyCtxt<'_>, dep_node: &DepNode) -> bool,
|
||||
pub(crate) force_from_dep_node: fn(tcx: TyCtxt<'_>, dep_node: &DepNode) -> bool,
|
||||
|
||||
/// Invoke a query to put the on-disk cached value in memory.
|
||||
pub(crate) try_load_from_on_disk_cache: fn(QueryCtxt<'_>, &DepNode),
|
||||
|
|
|
@ -2,9 +2,6 @@ use crate::ich::StableHashingContext;
|
|||
use crate::ty::{self, TyCtxt};
|
||||
use rustc_data_structures::profiling::SelfProfilerRef;
|
||||
use rustc_data_structures::sync::Lock;
|
||||
use rustc_data_structures::thin_vec::ThinVec;
|
||||
use rustc_errors::Diagnostic;
|
||||
use rustc_hir::def_id::LocalDefId;
|
||||
|
||||
mod dep_node;
|
||||
|
||||
|
@ -116,99 +113,7 @@ impl<'tcx> DepContext for TyCtxt<'tcx> {
|
|||
&self.dep_graph
|
||||
}
|
||||
|
||||
fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool {
|
||||
// FIXME: This match is just a workaround for incremental bugs and should
|
||||
// be removed. https://github.com/rust-lang/rust/issues/62649 is one such
|
||||
// bug that must be fixed before removing this.
|
||||
match dep_node.kind {
|
||||
DepKind::hir_owner | DepKind::hir_owner_nodes => {
|
||||
if let Some(def_id) = dep_node.extract_def_id(*self) {
|
||||
if !def_id_corresponds_to_hir_dep_node(*self, def_id.expect_local()) {
|
||||
// This `DefPath` does not have a
|
||||
// corresponding `DepNode` (e.g. a
|
||||
// struct field), and the ` DefPath`
|
||||
// collided with the `DefPath` of a
|
||||
// proper item that existed in the
|
||||
// previous compilation session.
|
||||
//
|
||||
// Since the given `DefPath` does not
|
||||
// denote the item that previously
|
||||
// existed, we just fail to mark green.
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
// If the node does not exist anymore, we
|
||||
// just fail to mark green.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
// For other kinds of nodes it's OK to be
|
||||
// forced.
|
||||
}
|
||||
}
|
||||
|
||||
debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node);
|
||||
|
||||
// We must avoid ever having to call `force_from_dep_node()` for a
|
||||
// `DepNode::codegen_unit`:
|
||||
// Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we
|
||||
// would always end up having to evaluate the first caller of the
|
||||
// `codegen_unit` query that *is* reconstructible. This might very well be
|
||||
// the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
|
||||
// to re-trigger calling the `codegen_unit` query with the right key. At
|
||||
// that point we would already have re-done all the work we are trying to
|
||||
// avoid doing in the first place.
|
||||
// The solution is simple: Just explicitly call the `codegen_unit` query for
|
||||
// each CGU, right after partitioning. This way `try_mark_green` will always
|
||||
// hit the cache instead of having to go through `force_from_dep_node`.
|
||||
// This assertion makes sure, we actually keep applying the solution above.
|
||||
debug_assert!(
|
||||
dep_node.kind != DepKind::codegen_unit,
|
||||
"calling force_from_dep_node() on DepKind::codegen_unit"
|
||||
);
|
||||
|
||||
(dep_node.kind.force_from_dep_node)(*self, dep_node)
|
||||
}
|
||||
|
||||
fn has_errors_or_delayed_span_bugs(&self) -> bool {
|
||||
self.sess.has_errors_or_delayed_span_bugs()
|
||||
}
|
||||
|
||||
fn diagnostic(&self) -> &rustc_errors::Handler {
|
||||
self.sess.diagnostic()
|
||||
}
|
||||
|
||||
// Interactions with on_disk_cache
|
||||
fn load_diagnostics(&self, prev_dep_node_index: SerializedDepNodeIndex) -> Vec<Diagnostic> {
|
||||
self.on_disk_cache
|
||||
.as_ref()
|
||||
.map(|c| c.load_diagnostics(*self, prev_dep_node_index))
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>) {
|
||||
if let Some(c) = self.on_disk_cache.as_ref() {
|
||||
c.store_diagnostics(dep_node_index, diagnostics)
|
||||
}
|
||||
}
|
||||
|
||||
fn store_diagnostics_for_anon_node(
|
||||
&self,
|
||||
dep_node_index: DepNodeIndex,
|
||||
diagnostics: ThinVec<Diagnostic>,
|
||||
) {
|
||||
if let Some(c) = self.on_disk_cache.as_ref() {
|
||||
c.store_diagnostics_for_anon_node(dep_node_index, diagnostics)
|
||||
}
|
||||
}
|
||||
|
||||
fn profiler(&self) -> &SelfProfilerRef {
|
||||
&self.prof
|
||||
}
|
||||
}
|
||||
|
||||
fn def_id_corresponds_to_hir_dep_node(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
|
||||
let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
|
||||
def_id == hir_id.owner
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue