Avoid creating anonymous nodes with zero or one dependency.
This commit is contained in:
parent
7f9ab0300c
commit
3a6d5c2beb
2 changed files with 49 additions and 26 deletions
|
@ -229,6 +229,7 @@ pub fn build_dep_graph(
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(DepGraph::new(
|
Some(DepGraph::new(
|
||||||
|
&sess.prof,
|
||||||
prev_graph,
|
prev_graph,
|
||||||
prev_work_products,
|
prev_work_products,
|
||||||
encoder,
|
encoder,
|
||||||
|
|
|
@ -44,6 +44,7 @@ rustc_index::newtype_index! {
|
||||||
|
|
||||||
impl DepNodeIndex {
|
impl DepNodeIndex {
|
||||||
pub const INVALID: DepNodeIndex = DepNodeIndex::MAX;
|
pub const INVALID: DepNodeIndex = DepNodeIndex::MAX;
|
||||||
|
pub const DUMMY_ANON: DepNodeIndex = DepNodeIndex::from_u32(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::convert::From<DepNodeIndex> for QueryInvocationId {
|
impl std::convert::From<DepNodeIndex> for QueryInvocationId {
|
||||||
|
@ -108,6 +109,7 @@ where
|
||||||
|
|
||||||
impl<K: DepKind> DepGraph<K> {
|
impl<K: DepKind> DepGraph<K> {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
|
profiler: &SelfProfilerRef,
|
||||||
prev_graph: SerializedDepGraph<K>,
|
prev_graph: SerializedDepGraph<K>,
|
||||||
prev_work_products: FxHashMap<WorkProductId, WorkProduct>,
|
prev_work_products: FxHashMap<WorkProductId, WorkProduct>,
|
||||||
encoder: FileEncoder,
|
encoder: FileEncoder,
|
||||||
|
@ -116,16 +118,23 @@ impl<K: DepKind> DepGraph<K> {
|
||||||
) -> DepGraph<K> {
|
) -> DepGraph<K> {
|
||||||
let prev_graph_node_count = prev_graph.node_count();
|
let prev_graph_node_count = prev_graph.node_count();
|
||||||
|
|
||||||
|
let current =
|
||||||
|
CurrentDepGraph::new(prev_graph_node_count, encoder, record_graph, record_stats);
|
||||||
|
|
||||||
|
// Instantiate an *always green* node for dependency-less anonymous queries.
|
||||||
|
let _green_node_index = current.intern_new_node(
|
||||||
|
profiler,
|
||||||
|
DepNode { kind: DepKind::NULL, hash: current.anon_id_seed.into() },
|
||||||
|
smallvec![],
|
||||||
|
Fingerprint::ZERO,
|
||||||
|
);
|
||||||
|
debug_assert_eq!(_green_node_index, DepNodeIndex::DUMMY_ANON);
|
||||||
|
|
||||||
DepGraph {
|
DepGraph {
|
||||||
data: Some(Lrc::new(DepGraphData {
|
data: Some(Lrc::new(DepGraphData {
|
||||||
previous_work_products: prev_work_products,
|
previous_work_products: prev_work_products,
|
||||||
dep_node_debug: Default::default(),
|
dep_node_debug: Default::default(),
|
||||||
current: CurrentDepGraph::new(
|
current,
|
||||||
prev_graph_node_count,
|
|
||||||
encoder,
|
|
||||||
record_graph,
|
|
||||||
record_stats,
|
|
||||||
),
|
|
||||||
emitting_diagnostics: Default::default(),
|
emitting_diagnostics: Default::default(),
|
||||||
emitting_diagnostics_cond_var: Condvar::new(),
|
emitting_diagnostics_cond_var: Condvar::new(),
|
||||||
previous: prev_graph,
|
previous: prev_graph,
|
||||||
|
@ -287,30 +296,43 @@ impl<K: DepKind> DepGraph<K> {
|
||||||
let task_deps = Lock::new(TaskDeps::default());
|
let task_deps = Lock::new(TaskDeps::default());
|
||||||
let result = K::with_deps(Some(&task_deps), op);
|
let result = K::with_deps(Some(&task_deps), op);
|
||||||
let task_deps = task_deps.into_inner();
|
let task_deps = task_deps.into_inner();
|
||||||
|
let task_deps = task_deps.reads;
|
||||||
|
|
||||||
// The dep node indices are hashed here instead of hashing the dep nodes of the
|
let dep_node_index = match task_deps.len() {
|
||||||
// dependencies. These indices may refer to different nodes per session, but this isn't
|
0 => {
|
||||||
// a problem here because we that ensure the final dep node hash is per session only by
|
// Dependency-less anonymous nodes can safely be replaced by a dummy node.
|
||||||
// combining it with the per session random number `anon_id_seed`. This hash only need
|
DepNodeIndex::DUMMY_ANON
|
||||||
// to map the dependencies to a single value on a per session basis.
|
}
|
||||||
let mut hasher = StableHasher::new();
|
1 => {
|
||||||
task_deps.reads.hash(&mut hasher);
|
// When there is only one dependency, don't bother creating a node.
|
||||||
|
task_deps[0]
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
// The dep node indices are hashed here instead of hashing the dep nodes of the
|
||||||
|
// dependencies. These indices may refer to different nodes per session, but this isn't
|
||||||
|
// a problem here because we that ensure the final dep node hash is per session only by
|
||||||
|
// combining it with the per session random number `anon_id_seed`. This hash only need
|
||||||
|
// to map the dependencies to a single value on a per session basis.
|
||||||
|
let mut hasher = StableHasher::new();
|
||||||
|
task_deps.hash(&mut hasher);
|
||||||
|
|
||||||
let target_dep_node = DepNode {
|
let target_dep_node = DepNode {
|
||||||
kind: dep_kind,
|
kind: dep_kind,
|
||||||
// Fingerprint::combine() is faster than sending Fingerprint
|
// Fingerprint::combine() is faster than sending Fingerprint
|
||||||
// through the StableHasher (at least as long as StableHasher
|
// through the StableHasher (at least as long as StableHasher
|
||||||
// is so slow).
|
// is so slow).
|
||||||
hash: data.current.anon_id_seed.combine(hasher.finish()).into(),
|
hash: data.current.anon_id_seed.combine(hasher.finish()).into(),
|
||||||
|
};
|
||||||
|
|
||||||
|
data.current.intern_new_node(
|
||||||
|
cx.profiler(),
|
||||||
|
target_dep_node,
|
||||||
|
task_deps,
|
||||||
|
Fingerprint::ZERO,
|
||||||
|
)
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let dep_node_index = data.current.intern_new_node(
|
|
||||||
cx.profiler(),
|
|
||||||
target_dep_node,
|
|
||||||
task_deps.reads,
|
|
||||||
Fingerprint::ZERO,
|
|
||||||
);
|
|
||||||
|
|
||||||
(result, dep_node_index)
|
(result, dep_node_index)
|
||||||
} else {
|
} else {
|
||||||
(op(), self.next_virtual_depnode_index())
|
(op(), self.next_virtual_depnode_index())
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue