rustc_query_system: explicitly register reused dep nodes
Register nodes that we've reused from the previous session explicitly with `OnDiskCache`. Previously, we relied on this happening as a side effect of accessing the nodes in the `PreviousDepGraph`. For the sake of performance and avoiding unintended side effects, register explictily.
This commit is contained in:
parent
eb4fc71dc9
commit
7795801902
6 changed files with 51 additions and 62 deletions
|
@ -60,9 +60,8 @@ pub struct DepNode<K> {
|
|||
// * When a `DepNode::construct` is called, `arg.to_fingerprint()`
|
||||
// is responsible for calling `OnDiskCache::store_foreign_def_id_hash`
|
||||
// if needed
|
||||
// * When a `DepNode` is loaded from the `PreviousDepGraph`,
|
||||
// then `PreviousDepGraph::index_to_node` is responsible for calling
|
||||
// `tcx.register_reused_dep_path_hash`
|
||||
// * When we serialize the on-disk cache, `OnDiskCache::serialize` is
|
||||
// responsible for calling `DepGraph::register_reused_dep_nodes`.
|
||||
//
|
||||
// FIXME: Enforce this by preventing manual construction of `DefNode`
|
||||
// (e.g. add a `_priv: ()` field)
|
||||
|
|
|
@ -554,7 +554,7 @@ impl<K: DepKind> DepGraph<K> {
|
|||
// We never try to mark eval_always nodes as green
|
||||
debug_assert!(!dep_node.kind.is_eval_always());
|
||||
|
||||
data.previous.debug_assert_eq(prev_dep_node_index, *dep_node);
|
||||
debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node);
|
||||
|
||||
let prev_deps = data.previous.edge_targets_from(prev_dep_node_index);
|
||||
|
||||
|
@ -572,7 +572,7 @@ impl<K: DepKind> DepGraph<K> {
|
|||
"try_mark_previous_green({:?}) --- found dependency {:?} to \
|
||||
be immediately green",
|
||||
dep_node,
|
||||
data.previous.debug_dep_node(dep_dep_node_index),
|
||||
data.previous.index_to_node(dep_dep_node_index)
|
||||
);
|
||||
current_deps.push(node_index);
|
||||
}
|
||||
|
@ -585,12 +585,12 @@ impl<K: DepKind> DepGraph<K> {
|
|||
"try_mark_previous_green({:?}) - END - dependency {:?} was \
|
||||
immediately red",
|
||||
dep_node,
|
||||
data.previous.debug_dep_node(dep_dep_node_index)
|
||||
data.previous.index_to_node(dep_dep_node_index)
|
||||
);
|
||||
return None;
|
||||
}
|
||||
None => {
|
||||
let dep_dep_node = &data.previous.index_to_node(dep_dep_node_index, tcx);
|
||||
let dep_dep_node = &data.previous.index_to_node(dep_dep_node_index);
|
||||
|
||||
// We don't know the state of this dependency. If it isn't
|
||||
// an eval_always node, let's try to mark it green recursively.
|
||||
|
@ -801,7 +801,7 @@ impl<K: DepKind> DepGraph<K> {
|
|||
for prev_index in data.colors.values.indices() {
|
||||
match data.colors.get(prev_index) {
|
||||
Some(DepNodeColor::Green(_)) => {
|
||||
let dep_node = data.previous.index_to_node(prev_index, tcx);
|
||||
let dep_node = data.previous.index_to_node(prev_index);
|
||||
tcx.try_load_from_on_disk_cache(&dep_node);
|
||||
}
|
||||
None | Some(DepNodeColor::Red) => {
|
||||
|
@ -813,6 +813,20 @@ impl<K: DepKind> DepGraph<K> {
|
|||
}
|
||||
}
|
||||
|
||||
// Register reused dep nodes (i.e. nodes we've marked red or green) with the context.
|
||||
pub fn register_reused_dep_nodes<Ctxt: DepContext<DepKind = K>>(&self, tcx: Ctxt) {
|
||||
let data = self.data.as_ref().unwrap();
|
||||
for prev_index in data.colors.values.indices() {
|
||||
match data.colors.get(prev_index) {
|
||||
Some(DepNodeColor::Red) | Some(DepNodeColor::Green(_)) => {
|
||||
let dep_node = data.previous.index_to_node(prev_index);
|
||||
tcx.register_reused_dep_node(&dep_node);
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn next_virtual_depnode_index(&self) -> DepNodeIndex {
|
||||
let index = self.virtual_dep_node_index.fetch_add(1, Relaxed);
|
||||
DepNodeIndex::from_u32(index)
|
||||
|
|
|
@ -15,7 +15,6 @@ use rustc_data_structures::profiling::SelfProfilerRef;
|
|||
use rustc_data_structures::sync::Lock;
|
||||
use rustc_data_structures::thin_vec::ThinVec;
|
||||
use rustc_errors::Diagnostic;
|
||||
use rustc_span::def_id::DefPathHash;
|
||||
|
||||
use std::fmt;
|
||||
use std::hash::Hash;
|
||||
|
@ -33,7 +32,7 @@ pub trait DepContext: Copy {
|
|||
/// Try to force a dep node to execute and see if it's green.
|
||||
fn try_force_from_dep_node(&self, dep_node: &DepNode<Self::DepKind>) -> bool;
|
||||
|
||||
fn register_reused_dep_path_hash(&self, hash: DefPathHash);
|
||||
fn register_reused_dep_node(&self, dep_node: &DepNode<Self::DepKind>);
|
||||
|
||||
/// Return whether the current session is tainted by errors.
|
||||
fn has_errors_or_delayed_span_bugs(&self) -> bool;
|
||||
|
|
|
@ -1,9 +1,7 @@
|
|||
use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex};
|
||||
use super::{DepKind, DepNode};
|
||||
use crate::dep_graph::DepContext;
|
||||
use rustc_data_structures::fingerprint::Fingerprint;
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_span::def_id::DefPathHash;
|
||||
|
||||
#[derive(Debug, Encodable, Decodable)]
|
||||
pub struct PreviousDepGraph<K: DepKind> {
|
||||
|
@ -33,44 +31,7 @@ impl<K: DepKind> PreviousDepGraph<K> {
|
|||
}
|
||||
|
||||
#[inline]
|
||||
pub fn index_to_node<CTX: DepContext<DepKind = K>>(
|
||||
&self,
|
||||
dep_node_index: SerializedDepNodeIndex,
|
||||
tcx: CTX,
|
||||
) -> DepNode<K> {
|
||||
let dep_node = self.data.nodes[dep_node_index];
|
||||
// We have just loaded a deserialized `DepNode` from the previous
|
||||
// compilation session into the current one. If this was a foreign `DefId`,
|
||||
// then we stored additional information in the incr comp cache when we
|
||||
// initially created its fingerprint (see `DepNodeParams::to_fingerprint`)
|
||||
// We won't be calling `to_fingerprint` again for this `DepNode` (we no longer
|
||||
// have the original value), so we need to copy over this additional information
|
||||
// from the old incremental cache into the new cache that we serialize
|
||||
// and the end of this compilation session.
|
||||
if dep_node.kind.can_reconstruct_query_key() {
|
||||
tcx.register_reused_dep_path_hash(DefPathHash(dep_node.hash.into()));
|
||||
}
|
||||
dep_node
|
||||
}
|
||||
|
||||
/// When debug assertions are enabled, asserts that the dep node at `dep_node_index` is equal to `dep_node`.
|
||||
/// This method should be preferred over manually calling `index_to_node`.
|
||||
/// Calls to `index_to_node` may affect global state, so gating a call
|
||||
/// to `index_to_node` on debug assertions could cause behavior changes when debug assertions
|
||||
/// are enabled.
|
||||
#[inline]
|
||||
pub fn debug_assert_eq(&self, dep_node_index: SerializedDepNodeIndex, dep_node: DepNode<K>) {
|
||||
debug_assert_eq!(self.data.nodes[dep_node_index], dep_node);
|
||||
}
|
||||
|
||||
/// Obtains a debug-printable version of the `DepNode`.
|
||||
/// See `debug_assert_eq` for why this should be preferred over manually
|
||||
/// calling `dep_node_index`
|
||||
pub fn debug_dep_node(&self, dep_node_index: SerializedDepNodeIndex) -> impl std::fmt::Debug {
|
||||
// We're returning the `DepNode` without calling `register_reused_dep_path_hash`,
|
||||
// but `impl Debug` return type means that it can only be used for debug printing.
|
||||
// So, there's no risk of calls trying to create new dep nodes that have this
|
||||
// node as a dependency
|
||||
pub fn index_to_node(&self, dep_node_index: SerializedDepNodeIndex) -> DepNode<K> {
|
||||
self.data.nodes[dep_node_index]
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue