Error if we try to read dep during deserialization
This commit is contained in:
parent
489296d825
commit
75181dc22f
4 changed files with 24 additions and 5 deletions
|
@ -251,6 +251,7 @@ impl<K: DepKind> DepGraph<K> {
|
|||
reads: SmallVec::new(),
|
||||
read_set: Default::default(),
|
||||
phantom_data: PhantomData,
|
||||
read_allowed: true,
|
||||
}))
|
||||
};
|
||||
let result = K::with_deps(task_deps.as_ref(), || task(cx, arg));
|
||||
|
@ -362,6 +363,11 @@ impl<K: DepKind> DepGraph<K> {
|
|||
if let Some(task_deps) = task_deps {
|
||||
let mut task_deps = task_deps.lock();
|
||||
let task_deps = &mut *task_deps;
|
||||
|
||||
if !task_deps.read_allowed {
|
||||
panic!("Illegal read of: {:?}", dep_node_index);
|
||||
}
|
||||
|
||||
if cfg!(debug_assertions) {
|
||||
data.current.total_read_count.fetch_add(1, Relaxed);
|
||||
}
|
||||
|
@ -1115,6 +1121,7 @@ pub struct TaskDeps<K> {
|
|||
reads: EdgesVec,
|
||||
read_set: FxHashSet<DepNodeIndex>,
|
||||
phantom_data: PhantomData<DepNode<K>>,
|
||||
pub read_allowed: bool,
|
||||
}
|
||||
|
||||
impl<K> Default for TaskDeps<K> {
|
||||
|
@ -1125,6 +1132,7 @@ impl<K> Default for TaskDeps<K> {
|
|||
reads: EdgesVec::new(),
|
||||
read_set: FxHashSet::default(),
|
||||
phantom_data: PhantomData,
|
||||
read_allowed: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -142,6 +142,7 @@ pub trait QueryContext: HasDepContext {
|
|||
&self,
|
||||
token: QueryJobId<Self::DepKind>,
|
||||
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
|
||||
read_allowed: bool,
|
||||
compute: impl FnOnce() -> R,
|
||||
) -> R;
|
||||
}
|
||||
|
|
|
@ -440,7 +440,7 @@ where
|
|||
// Fast path for when incr. comp. is off.
|
||||
if !dep_graph.is_fully_enabled() {
|
||||
let prof_timer = tcx.dep_context().profiler().query_provider();
|
||||
let result = tcx.start_query(job_id, None, || query.compute(*tcx.dep_context(), key));
|
||||
let result = tcx.start_query(job_id, None, true, || query.compute(*tcx.dep_context(), key));
|
||||
let dep_node_index = dep_graph.next_virtual_depnode_index();
|
||||
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
|
||||
return (result, dep_node_index);
|
||||
|
@ -453,7 +453,7 @@ where
|
|||
|
||||
// The diagnostics for this query will be promoted to the current session during
|
||||
// `try_mark_green()`, so we can ignore them here.
|
||||
if let Some(ret) = tcx.start_query(job_id, None, || {
|
||||
if let Some(ret) = tcx.start_query(job_id, None, false, || {
|
||||
try_load_from_disk_and_cache_in_memory(tcx, &key, &dep_node, query)
|
||||
}) {
|
||||
return ret;
|
||||
|
@ -463,7 +463,7 @@ where
|
|||
let prof_timer = tcx.dep_context().profiler().query_provider();
|
||||
let diagnostics = Lock::new(ThinVec::new());
|
||||
|
||||
let (result, dep_node_index) = tcx.start_query(job_id, Some(&diagnostics), || {
|
||||
let (result, dep_node_index) = tcx.start_query(job_id, Some(&diagnostics), true, || {
|
||||
if query.anon {
|
||||
return dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || {
|
||||
query.compute(*tcx.dep_context(), key)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue