1
Fork 0

Auto merge of #56983 - ljedrz:parallel_query_tweaks, r=Zoxc

Parallel query tweaks

- faster stack reversal in `remove_cycle`
- insert visited queries more eagerly
- simplify stack trimming in `cycle_check`
- minor refactoring in 2 spots
This commit is contained in:
bors 2018-12-25 10:52:13 +00:00
commit 27c4335287
2 changed files with 10 additions and 22 deletions

View file

@ -303,12 +303,12 @@ fn cycle_check<'tcx>(query: Lrc<QueryJob<'tcx>>,
stack: &mut Vec<(Span, Lrc<QueryJob<'tcx>>)>,
visited: &mut FxHashSet<*const QueryJob<'tcx>>
) -> Option<Option<Waiter<'tcx>>> {
if visited.contains(&query.as_ptr()) {
if !visited.insert(query.as_ptr()) {
return if let Some(p) = stack.iter().position(|q| q.1.as_ptr() == query.as_ptr()) {
// We detected a query cycle, fix up the initial span and return Some
// Remove previous stack entries
stack.splice(0..p, iter::empty());
stack.drain(0..p);
// Replace the span for the first query with the cycle cause
stack[0].0 = span;
Some(None)
@ -317,8 +317,7 @@ fn cycle_check<'tcx>(query: Lrc<QueryJob<'tcx>>,
}
}
// Mark this query is visited and add it to the stack
visited.insert(query.as_ptr());
// Query marked as visited is added it to the stack
stack.push((span, query.clone()));
// Visit all the waiters
@ -343,7 +342,7 @@ fn connected_to_root<'tcx>(
visited: &mut FxHashSet<*const QueryJob<'tcx>>
) -> bool {
// We already visited this or we're deliberately ignoring it
if visited.contains(&query.as_ptr()) {
if !visited.insert(query.as_ptr()) {
return false;
}
@ -352,8 +351,6 @@ fn connected_to_root<'tcx>(
return true;
}
visited.insert(query.as_ptr());
visit_waiters(query, |_, successor| {
if connected_to_root(successor, visited) {
Some(None)
@ -403,11 +400,9 @@ fn remove_cycle<'tcx>(
DUMMY_SP,
&mut stack,
&mut visited) {
// Reverse the stack so earlier entries require later entries
stack.reverse();
// The stack is a vector of pairs of spans and queries
let (mut spans, queries): (Vec<_>, Vec<_>) = stack.into_iter().unzip();
// The stack is a vector of pairs of spans and queries; reverse it so that
// the earlier entries require later entries
let (mut spans, queries): (Vec<_>, Vec<_>) = stack.into_iter().rev().unzip();
// Shift the spans so that queries are matched with the span for their waitee
spans.rotate_right(1);
@ -424,7 +419,7 @@ fn remove_cycle<'tcx>(
// Find the queries in the cycle which are
// connected to queries outside the cycle
let entry_points: Vec<_> = stack.iter().filter_map(|(span, query)| {
let entry_points = stack.iter().filter_map(|(span, query)| {
if query.parent.is_none() {
// This query is connected to the root (it has no query parent)
Some((*span, query.clone(), None))
@ -449,10 +444,7 @@ fn remove_cycle<'tcx>(
Some((*span, query.clone(), Some(waiter)))
}
}
}).collect();
let entry_points: Vec<(Span, Lrc<QueryJob<'tcx>>, Option<(Span, Lrc<QueryJob<'tcx>>)>)>
= entry_points;
}).collect::<Vec<(Span, Lrc<QueryJob<'tcx>>, Option<(Span, Lrc<QueryJob<'tcx>>)>)>>();
// Deterministically pick an entry point
let (_, entry_point, usage) = pick_query(tcx, &entry_points, |e| (e.0, e.1.clone()));

View file

@ -398,11 +398,7 @@ impl<'sess> OnDiskCache<'sess> {
-> Option<T>
where T: Decodable
{
let pos = if let Some(&pos) = index.get(&dep_node_index) {
pos
} else {
return None
};
let pos = index.get(&dep_node_index).cloned()?;
// Initialize the cnum_map using the value from the thread which finishes the closure first
self.cnum_map.init_nonlocking_same(|| {