Auto merge of #104940 - cjgillot:query-feed-simple, r=oli-obk
Allow to feed a value in another query's cache Restricted version of https://github.com/rust-lang/rust/pull/96840 A query can create new definitions. If those definitions are created after HIR lowering, they do not appear in the initial HIR map, and information for them cannot be provided in the normal pull-based way. In order to make those definitions useful, we allow to feed values as query results for the newly created definition. The API is as follows: ```rust let feed = tcx.create_def(<parent def id>, <DefPathData>); // `feed` is a TyCtxtFeed<'tcx>. // Access the created definition. let def_id: LocalDefId = feed.def_id; // Assign `my_query(def_id) := my_value`. feed.my_query(my_value). ``` This PR keeps the consistency checks introduced by https://github.com/rust-lang/rust/pull/96840, even if they are not reachable. This allows to extend the behaviour later without forgetting them. cc `@oli-obk` `@spastorino`
This commit is contained in:
commit
c97b539e40
11 changed files with 298 additions and 53 deletions
|
@ -497,7 +497,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
|
|||
self.tcx.hir().def_key(self.local_def_id(node_id)),
|
||||
);
|
||||
|
||||
let def_id = self.tcx.create_def(parent, data);
|
||||
let def_id = self.tcx.create_def(parent, data).def_id();
|
||||
|
||||
debug!("create_def: def_id_to_node_id[{:?}] <-> {:?}", def_id, node_id);
|
||||
self.resolver.node_id_to_def_id.insert(node_id, def_id);
|
||||
|
|
|
@ -368,10 +368,6 @@ impl Definitions {
|
|||
LocalDefId { local_def_index: self.table.allocate(key, def_path_hash) }
|
||||
}
|
||||
|
||||
pub fn iter_local_def_id(&self) -> impl Iterator<Item = LocalDefId> + '_ {
|
||||
self.table.def_path_hashes.indices().map(|local_def_index| LocalDefId { local_def_index })
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn local_def_path_hash_to_def_id(
|
||||
&self,
|
||||
|
@ -389,6 +385,10 @@ impl Definitions {
|
|||
pub fn def_path_hash_to_def_index_map(&self) -> &DefPathHashMap {
|
||||
&self.table.def_path_hash_to_index
|
||||
}
|
||||
|
||||
pub fn num_definitions(&self) -> usize {
|
||||
self.table.def_path_hashes.len()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Debug)]
|
||||
|
|
|
@ -114,6 +114,9 @@ struct QueryModifiers {
|
|||
|
||||
/// Always remap the ParamEnv's constness before hashing.
|
||||
remap_env_constness: Option<Ident>,
|
||||
|
||||
/// Generate a `feed` method to set the query's value from another query.
|
||||
feedable: Option<Ident>,
|
||||
}
|
||||
|
||||
fn parse_query_modifiers(input: ParseStream<'_>) -> Result<QueryModifiers> {
|
||||
|
@ -128,6 +131,7 @@ fn parse_query_modifiers(input: ParseStream<'_>) -> Result<QueryModifiers> {
|
|||
let mut depth_limit = None;
|
||||
let mut separate_provide_extern = None;
|
||||
let mut remap_env_constness = None;
|
||||
let mut feedable = None;
|
||||
|
||||
while !input.is_empty() {
|
||||
let modifier: Ident = input.parse()?;
|
||||
|
@ -187,6 +191,8 @@ fn parse_query_modifiers(input: ParseStream<'_>) -> Result<QueryModifiers> {
|
|||
try_insert!(separate_provide_extern = modifier);
|
||||
} else if modifier == "remap_env_constness" {
|
||||
try_insert!(remap_env_constness = modifier);
|
||||
} else if modifier == "feedable" {
|
||||
try_insert!(feedable = modifier);
|
||||
} else {
|
||||
return Err(Error::new(modifier.span(), "unknown query modifier"));
|
||||
}
|
||||
|
@ -206,6 +212,7 @@ fn parse_query_modifiers(input: ParseStream<'_>) -> Result<QueryModifiers> {
|
|||
depth_limit,
|
||||
separate_provide_extern,
|
||||
remap_env_constness,
|
||||
feedable,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -296,6 +303,7 @@ pub fn rustc_queries(input: TokenStream) -> TokenStream {
|
|||
let mut query_stream = quote! {};
|
||||
let mut query_description_stream = quote! {};
|
||||
let mut query_cached_stream = quote! {};
|
||||
let mut feedable_queries = quote! {};
|
||||
|
||||
for query in queries.0 {
|
||||
let Query { name, arg, modifiers, .. } = &query;
|
||||
|
@ -350,6 +358,22 @@ pub fn rustc_queries(input: TokenStream) -> TokenStream {
|
|||
[#attribute_stream] fn #name(#arg) #result,
|
||||
});
|
||||
|
||||
if modifiers.feedable.is_some() {
|
||||
assert!(modifiers.anon.is_none(), "Query {name} cannot be both `feedable` and `anon`.");
|
||||
assert!(
|
||||
modifiers.eval_always.is_none(),
|
||||
"Query {name} cannot be both `feedable` and `eval_always`."
|
||||
);
|
||||
assert!(
|
||||
modifiers.no_hash.is_none(),
|
||||
"Query {name} cannot be both `feedable` and `no_hash`."
|
||||
);
|
||||
feedable_queries.extend(quote! {
|
||||
#(#doc_comments)*
|
||||
[#attribute_stream] fn #name(#arg) #result,
|
||||
});
|
||||
}
|
||||
|
||||
add_query_desc_cached_impl(&query, &mut query_description_stream, &mut query_cached_stream);
|
||||
}
|
||||
|
||||
|
@ -363,7 +387,11 @@ pub fn rustc_queries(input: TokenStream) -> TokenStream {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! rustc_feedable_queries {
|
||||
( $macro:ident! ) => {
|
||||
$macro!(#feedable_queries);
|
||||
}
|
||||
}
|
||||
pub mod descs {
|
||||
use super::*;
|
||||
#query_description_stream
|
||||
|
|
|
@ -30,8 +30,10 @@
|
|||
#![feature(core_intrinsics)]
|
||||
#![feature(discriminant_kind)]
|
||||
#![feature(exhaustive_patterns)]
|
||||
#![feature(generators)]
|
||||
#![feature(get_mut_unchecked)]
|
||||
#![feature(if_let_guard)]
|
||||
#![feature(iter_from_generator)]
|
||||
#![feature(negative_impls)]
|
||||
#![feature(never_type)]
|
||||
#![feature(extern_types)]
|
||||
|
|
|
@ -53,6 +53,7 @@ use rustc_hir::{
|
|||
use rustc_index::vec::{Idx, IndexVec};
|
||||
use rustc_macros::HashStable;
|
||||
use rustc_middle::mir::FakeReadCause;
|
||||
use rustc_query_system::dep_graph::DepNodeIndex;
|
||||
use rustc_query_system::ich::StableHashingContext;
|
||||
use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
|
||||
use rustc_session::config::{CrateType, OutputFilenames};
|
||||
|
@ -1031,6 +1032,21 @@ pub struct FreeRegionInfo {
|
|||
pub is_impl_item: bool,
|
||||
}
|
||||
|
||||
/// This struct should only be created by `create_def`.
|
||||
#[derive(Copy, Clone)]
|
||||
pub struct TyCtxtFeed<'tcx> {
|
||||
pub tcx: TyCtxt<'tcx>,
|
||||
// Do not allow direct access, as downstream code must not mutate this field.
|
||||
def_id: LocalDefId,
|
||||
}
|
||||
|
||||
impl<'tcx> TyCtxtFeed<'tcx> {
|
||||
#[inline(always)]
|
||||
pub fn def_id(&self) -> LocalDefId {
|
||||
self.def_id
|
||||
}
|
||||
}
|
||||
|
||||
/// The central data structure of the compiler. It stores references
|
||||
/// to the various **arenas** and also houses the results of the
|
||||
/// various **compiler queries** that have been performed. See the
|
||||
|
@ -1493,12 +1509,15 @@ impl<'tcx> TyCtxt<'tcx> {
|
|||
}
|
||||
|
||||
/// Create a new definition within the incr. comp. engine.
|
||||
pub fn create_def(self, parent: LocalDefId, data: hir::definitions::DefPathData) -> LocalDefId {
|
||||
pub fn create_def(
|
||||
self,
|
||||
parent: LocalDefId,
|
||||
data: hir::definitions::DefPathData,
|
||||
) -> TyCtxtFeed<'tcx> {
|
||||
// This function modifies `self.definitions` using a side-effect.
|
||||
// We need to ensure that these side effects are re-run by the incr. comp. engine.
|
||||
// Depending on the forever-red node will tell the graph that the calling query
|
||||
// needs to be re-evaluated.
|
||||
use rustc_query_system::dep_graph::DepNodeIndex;
|
||||
self.dep_graph.read_index(DepNodeIndex::FOREVER_RED_NODE);
|
||||
|
||||
// The following call has the side effect of modifying the tables inside `definitions`.
|
||||
|
@ -1515,23 +1534,38 @@ impl<'tcx> TyCtxt<'tcx> {
|
|||
// This is fine because:
|
||||
// - those queries are `eval_always` so we won't miss their result changing;
|
||||
// - this write will have happened before these queries are called.
|
||||
self.definitions.write().create_def(parent, data)
|
||||
let def_id = self.definitions.write().create_def(parent, data);
|
||||
|
||||
TyCtxtFeed { tcx: self, def_id }
|
||||
}
|
||||
|
||||
pub fn iter_local_def_id(self) -> impl Iterator<Item = LocalDefId> + 'tcx {
|
||||
// Create a dependency to the crate to be sure we re-execute this when the amount of
|
||||
// Create a dependency to the red node to be sure we re-execute this when the amount of
|
||||
// definitions change.
|
||||
self.ensure().hir_crate(());
|
||||
// Leak a read lock once we start iterating on definitions, to prevent adding new ones
|
||||
// while iterating. If some query needs to add definitions, it should be `ensure`d above.
|
||||
let definitions = self.definitions.leak();
|
||||
definitions.iter_local_def_id()
|
||||
self.dep_graph.read_index(DepNodeIndex::FOREVER_RED_NODE);
|
||||
|
||||
let definitions = &self.definitions;
|
||||
std::iter::from_generator(|| {
|
||||
let mut i = 0;
|
||||
|
||||
// Recompute the number of definitions each time, because our caller may be creating
|
||||
// new ones.
|
||||
while i < { definitions.read().num_definitions() } {
|
||||
let local_def_index = rustc_span::def_id::DefIndex::from_usize(i);
|
||||
yield LocalDefId { local_def_index };
|
||||
i += 1;
|
||||
}
|
||||
|
||||
// Leak a read lock once we finish iterating on definitions, to prevent adding new ones.
|
||||
definitions.leak();
|
||||
})
|
||||
}
|
||||
|
||||
pub fn def_path_table(self) -> &'tcx rustc_hir::definitions::DefPathTable {
|
||||
// Create a dependency to the crate to be sure we re-execute this when the amount of
|
||||
// definitions change.
|
||||
self.ensure().hir_crate(());
|
||||
self.dep_graph.read_index(DepNodeIndex::FOREVER_RED_NODE);
|
||||
|
||||
// Leak a read lock once we start iterating on definitions, to prevent adding new ones
|
||||
// while iterating. If some query needs to add definitions, it should be `ensure`d above.
|
||||
let definitions = self.definitions.leak();
|
||||
|
|
|
@ -28,6 +28,7 @@ use crate::traits::query::{
|
|||
};
|
||||
use crate::traits::specialization_graph;
|
||||
use crate::traits::{self, ImplSource};
|
||||
use crate::ty::context::TyCtxtFeed;
|
||||
use crate::ty::fast_reject::SimplifiedType;
|
||||
use crate::ty::layout::TyAndLayout;
|
||||
use crate::ty::subst::{GenericArg, SubstsRef};
|
||||
|
@ -327,6 +328,46 @@ macro_rules! define_callbacks {
|
|||
};
|
||||
}
|
||||
|
||||
macro_rules! define_feedable {
|
||||
($($(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident($($K:tt)*) -> $V:ty,)*) => {
|
||||
impl<'tcx> TyCtxtFeed<'tcx> {
|
||||
$($(#[$attr])*
|
||||
#[inline(always)]
|
||||
pub fn $name(self, value: $V) -> query_stored::$name<'tcx> {
|
||||
let key = self.def_id().into_query_param();
|
||||
opt_remap_env_constness!([$($modifiers)*][key]);
|
||||
|
||||
let tcx = self.tcx;
|
||||
let cache = &tcx.query_caches.$name;
|
||||
|
||||
let cached = try_get_cached(tcx, cache, &key, copy);
|
||||
|
||||
match cached {
|
||||
Ok(old) => {
|
||||
assert_eq!(
|
||||
value, old,
|
||||
"Trying to feed an already recorded value for query {} key={key:?}",
|
||||
stringify!($name),
|
||||
);
|
||||
return old;
|
||||
}
|
||||
Err(()) => (),
|
||||
}
|
||||
|
||||
let dep_node = dep_graph::DepNode::construct(tcx, dep_graph::DepKind::$name, &key);
|
||||
let dep_node_index = tcx.dep_graph.with_feed_task(
|
||||
dep_node,
|
||||
tcx,
|
||||
key,
|
||||
&value,
|
||||
dep_graph::hash_result,
|
||||
);
|
||||
cache.complete(key, value, dep_node_index)
|
||||
})*
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Each of these queries corresponds to a function pointer field in the
|
||||
// `Providers` struct for requesting a value of that type, and a method
|
||||
// on `tcx: TyCtxt` (and `tcx.at(span)`) for doing that request in a way
|
||||
|
@ -340,6 +381,7 @@ macro_rules! define_callbacks {
|
|||
// as they will raise an fatal error on query cycles instead.
|
||||
|
||||
rustc_query_append! { define_callbacks! }
|
||||
rustc_feedable_queries! { define_feedable! }
|
||||
|
||||
mod sealed {
|
||||
use super::{DefId, LocalDefId, OwnerId};
|
||||
|
|
|
@ -252,6 +252,18 @@ macro_rules! depth_limit {
|
|||
};
|
||||
}
|
||||
|
||||
macro_rules! feedable {
|
||||
([]) => {{
|
||||
false
|
||||
}};
|
||||
([(feedable) $($rest:tt)*]) => {{
|
||||
true
|
||||
}};
|
||||
([$other:tt $($modifiers:tt)*]) => {
|
||||
feedable!([$($modifiers)*])
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! hash_result {
|
||||
([]) => {{
|
||||
Some(dep_graph::hash_result)
|
||||
|
@ -309,7 +321,7 @@ pub(crate) fn create_query_frame<
|
|||
ty::print::with_forced_impl_filename_line!(do_describe(tcx.tcx, key))
|
||||
);
|
||||
let description =
|
||||
if tcx.sess.verbose() { format!("{} [{}]", description, name) } else { description };
|
||||
if tcx.sess.verbose() { format!("{} [{:?}]", description, name) } else { description };
|
||||
let span = if kind == dep_graph::DepKind::def_span {
|
||||
// The `def_span` query is used to calculate `default_span`,
|
||||
// so exit to avoid infinite recursion.
|
||||
|
@ -491,6 +503,7 @@ macro_rules! define_queries {
|
|||
anon: is_anon!([$($modifiers)*]),
|
||||
eval_always: is_eval_always!([$($modifiers)*]),
|
||||
depth_limit: depth_limit!([$($modifiers)*]),
|
||||
feedable: feedable!([$($modifiers)*]),
|
||||
dep_kind: dep_graph::DepKind::$name,
|
||||
hash_result: hash_result!([$($modifiers)*]),
|
||||
handle_cycle_error: handle_cycle_error!([$($modifiers)*]),
|
||||
|
|
|
@ -489,6 +489,91 @@ impl<K: DepKind> DepGraph<K> {
|
|||
}
|
||||
}
|
||||
|
||||
/// Create a node when we force-feed a value into the query cache.
|
||||
/// This is used to remove cycles during type-checking const generic parameters.
|
||||
///
|
||||
/// As usual in the query system, we consider the current state of the calling query
|
||||
/// only depends on the list of dependencies up to now. As a consequence, the value
|
||||
/// that this query gives us can only depend on those dependencies too. Therefore,
|
||||
/// it is sound to use the current dependency set for the created node.
|
||||
///
|
||||
/// During replay, the order of the nodes is relevant in the dependency graph.
|
||||
/// So the unchanged replay will mark the caller query before trying to mark this one.
|
||||
/// If there is a change to report, the caller query will be re-executed before this one.
|
||||
///
|
||||
/// FIXME: If the code is changed enough for this node to be marked before requiring the
|
||||
/// caller's node, we suppose that those changes will be enough to mark this node red and
|
||||
/// force a recomputation using the "normal" way.
|
||||
pub fn with_feed_task<Ctxt: DepContext<DepKind = K>, A: Debug, R: Debug>(
|
||||
&self,
|
||||
node: DepNode<K>,
|
||||
cx: Ctxt,
|
||||
key: A,
|
||||
result: &R,
|
||||
hash_result: fn(&mut StableHashingContext<'_>, &R) -> Fingerprint,
|
||||
) -> DepNodeIndex {
|
||||
if let Some(data) = self.data.as_ref() {
|
||||
// The caller query has more dependencies than the node we are creating. We may
|
||||
// encounter a case where this created node is marked as green, but the caller query is
|
||||
// subsequently marked as red or recomputed. In this case, we will end up feeding a
|
||||
// value to an existing node.
|
||||
//
|
||||
// For sanity, we still check that the loaded stable hash and the new one match.
|
||||
if let Some(dep_node_index) = self.dep_node_index_of_opt(&node) {
|
||||
let _current_fingerprint =
|
||||
crate::query::incremental_verify_ich(cx, result, &node, Some(hash_result));
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
data.current.record_edge(dep_node_index, node, _current_fingerprint);
|
||||
|
||||
return dep_node_index;
|
||||
}
|
||||
|
||||
let mut edges = SmallVec::new();
|
||||
K::read_deps(|task_deps| match task_deps {
|
||||
TaskDepsRef::Allow(deps) => edges.extend(deps.lock().reads.iter().copied()),
|
||||
TaskDepsRef::Ignore | TaskDepsRef::Forbid => {
|
||||
panic!("Cannot summarize when dependencies are not recorded.")
|
||||
}
|
||||
});
|
||||
|
||||
let hashing_timer = cx.profiler().incr_result_hashing();
|
||||
let current_fingerprint =
|
||||
cx.with_stable_hashing_context(|mut hcx| hash_result(&mut hcx, result));
|
||||
|
||||
let print_status = cfg!(debug_assertions) && cx.sess().opts.unstable_opts.dep_tasks;
|
||||
|
||||
// Intern the new `DepNode` with the dependencies up-to-now.
|
||||
let (dep_node_index, prev_and_color) = data.current.intern_node(
|
||||
cx.profiler(),
|
||||
&data.previous,
|
||||
node,
|
||||
edges,
|
||||
Some(current_fingerprint),
|
||||
print_status,
|
||||
);
|
||||
|
||||
hashing_timer.finish_with_query_invocation_id(dep_node_index.into());
|
||||
|
||||
if let Some((prev_index, color)) = prev_and_color {
|
||||
debug_assert!(
|
||||
data.colors.get(prev_index).is_none(),
|
||||
"DepGraph::with_task() - Duplicate DepNodeColor insertion for {key:?}",
|
||||
);
|
||||
|
||||
data.colors.insert(prev_index, color);
|
||||
}
|
||||
|
||||
dep_node_index
|
||||
} else {
|
||||
// Incremental compilation is turned off. We just execute the task
|
||||
// without tracking. We still provide a dep-node index that uniquely
|
||||
// identifies the task so that we have a cheap way of referring to
|
||||
// the query for self-profiling.
|
||||
self.next_virtual_depnode_index()
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn dep_node_index_of(&self, dep_node: &DepNode<K>) -> DepNodeIndex {
|
||||
self.dep_node_index_of_opt(dep_node).unwrap()
|
||||
|
@ -916,6 +1001,11 @@ pub(super) struct CurrentDepGraph<K: DepKind> {
|
|||
new_node_to_index: Sharded<FxHashMap<DepNode<K>, DepNodeIndex>>,
|
||||
prev_index_to_index: Lock<IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>>,
|
||||
|
||||
/// This is used to verify that fingerprints do not change between the creation of a node
|
||||
/// and its recomputation.
|
||||
#[cfg(debug_assertions)]
|
||||
fingerprints: Lock<FxHashMap<DepNode<K>, Fingerprint>>,
|
||||
|
||||
/// Used to trap when a specific edge is added to the graph.
|
||||
/// This is used for debug purposes and is only active with `debug_assertions`.
|
||||
#[cfg(debug_assertions)]
|
||||
|
@ -999,6 +1089,8 @@ impl<K: DepKind> CurrentDepGraph<K> {
|
|||
anon_id_seed,
|
||||
#[cfg(debug_assertions)]
|
||||
forbidden_edge,
|
||||
#[cfg(debug_assertions)]
|
||||
fingerprints: Lock::new(Default::default()),
|
||||
total_read_count: AtomicU64::new(0),
|
||||
total_duplicate_read_count: AtomicU64::new(0),
|
||||
node_intern_event_id,
|
||||
|
@ -1006,10 +1098,18 @@ impl<K: DepKind> CurrentDepGraph<K> {
|
|||
}
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode<K>) {
|
||||
fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode<K>, fingerprint: Fingerprint) {
|
||||
if let Some(forbidden_edge) = &self.forbidden_edge {
|
||||
forbidden_edge.index_to_node.lock().insert(dep_node_index, key);
|
||||
}
|
||||
match self.fingerprints.lock().entry(key) {
|
||||
Entry::Vacant(v) => {
|
||||
v.insert(fingerprint);
|
||||
}
|
||||
Entry::Occupied(o) => {
|
||||
assert_eq!(*o.get(), fingerprint, "Unstable fingerprints for {:?}", key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it.
|
||||
|
@ -1021,17 +1121,21 @@ impl<K: DepKind> CurrentDepGraph<K> {
|
|||
edges: EdgesVec,
|
||||
current_fingerprint: Fingerprint,
|
||||
) -> DepNodeIndex {
|
||||
match self.new_node_to_index.get_shard_by_value(&key).lock().entry(key) {
|
||||
let dep_node_index = match self.new_node_to_index.get_shard_by_value(&key).lock().entry(key)
|
||||
{
|
||||
Entry::Occupied(entry) => *entry.get(),
|
||||
Entry::Vacant(entry) => {
|
||||
let dep_node_index =
|
||||
self.encoder.borrow().send(profiler, key, current_fingerprint, edges);
|
||||
entry.insert(dep_node_index);
|
||||
#[cfg(debug_assertions)]
|
||||
self.record_edge(dep_node_index, key);
|
||||
dep_node_index
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
self.record_edge(dep_node_index, key, current_fingerprint);
|
||||
|
||||
dep_node_index
|
||||
}
|
||||
|
||||
fn intern_node(
|
||||
|
@ -1072,7 +1176,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
|
|||
};
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
self.record_edge(dep_node_index, key);
|
||||
self.record_edge(dep_node_index, key, fingerprint);
|
||||
(dep_node_index, Some((prev_index, DepNodeColor::Green(dep_node_index))))
|
||||
} else {
|
||||
if print_status {
|
||||
|
@ -1094,7 +1198,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
|
|||
};
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
self.record_edge(dep_node_index, key);
|
||||
self.record_edge(dep_node_index, key, fingerprint);
|
||||
(dep_node_index, Some((prev_index, DepNodeColor::Red)))
|
||||
}
|
||||
} else {
|
||||
|
@ -1119,7 +1223,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
|
|||
};
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
self.record_edge(dep_node_index, key);
|
||||
self.record_edge(dep_node_index, key, Fingerprint::ZERO);
|
||||
(dep_node_index, Some((prev_index, DepNodeColor::Red)))
|
||||
}
|
||||
} else {
|
||||
|
@ -1150,19 +1254,16 @@ impl<K: DepKind> CurrentDepGraph<K> {
|
|||
Some(dep_node_index) => dep_node_index,
|
||||
None => {
|
||||
let key = prev_graph.index_to_node(prev_index);
|
||||
let dep_node_index = self.encoder.borrow().send(
|
||||
profiler,
|
||||
key,
|
||||
prev_graph.fingerprint_by_index(prev_index),
|
||||
prev_graph
|
||||
let edges = prev_graph
|
||||
.edge_targets_from(prev_index)
|
||||
.iter()
|
||||
.map(|i| prev_index_to_index[*i].unwrap())
|
||||
.collect(),
|
||||
);
|
||||
.collect();
|
||||
let fingerprint = prev_graph.fingerprint_by_index(prev_index);
|
||||
let dep_node_index = self.encoder.borrow().send(profiler, key, fingerprint, edges);
|
||||
prev_index_to_index[prev_index] = Some(dep_node_index);
|
||||
#[cfg(debug_assertions)]
|
||||
self.record_edge(dep_node_index, key);
|
||||
self.record_edge(dep_node_index, key, fingerprint);
|
||||
dep_node_index
|
||||
}
|
||||
}
|
||||
|
|
|
@ -117,6 +117,8 @@ where
|
|||
let mut lock = self.cache.get_shard_by_value(&key).lock();
|
||||
#[cfg(not(parallel_compiler))]
|
||||
let mut lock = self.cache.lock();
|
||||
// We may be overwriting another value. This is all right, since the dep-graph
|
||||
// will check that the fingerprint matches.
|
||||
lock.insert(key, (value.clone(), index));
|
||||
value
|
||||
}
|
||||
|
@ -202,6 +204,8 @@ where
|
|||
let mut lock = self.cache.get_shard_by_value(&key).lock();
|
||||
#[cfg(not(parallel_compiler))]
|
||||
let mut lock = self.cache.lock();
|
||||
// We may be overwriting another value. This is all right, since the dep-graph
|
||||
// will check that the fingerprint matches.
|
||||
lock.insert(key, value);
|
||||
&value.0
|
||||
}
|
||||
|
|
|
@ -15,8 +15,8 @@ pub trait QueryConfig<Qcx: QueryContext> {
|
|||
const NAME: &'static str;
|
||||
|
||||
type Key: Eq + Hash + Clone + Debug;
|
||||
type Value;
|
||||
type Stored: Clone;
|
||||
type Value: Debug;
|
||||
type Stored: Debug + Clone + std::borrow::Borrow<Self::Value>;
|
||||
|
||||
type Cache: QueryCache<Key = Self::Key, Stored = Self::Stored, Value = Self::Value>;
|
||||
|
||||
|
@ -45,6 +45,7 @@ pub struct QueryVTable<Qcx: QueryContext, K, V> {
|
|||
pub dep_kind: Qcx::DepKind,
|
||||
pub eval_always: bool,
|
||||
pub depth_limit: bool,
|
||||
pub feedable: bool,
|
||||
|
||||
pub compute: fn(Qcx::DepContext, K) -> V,
|
||||
pub hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>,
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
//! manage the caches, and so forth.
|
||||
|
||||
use crate::dep_graph::{DepContext, DepNode, DepNodeIndex, DepNodeParams};
|
||||
use crate::ich::StableHashingContext;
|
||||
use crate::query::caches::QueryCache;
|
||||
use crate::query::config::QueryVTable;
|
||||
use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo};
|
||||
|
@ -19,6 +20,7 @@ use rustc_data_structures::sync::Lock;
|
|||
use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError};
|
||||
use rustc_session::Session;
|
||||
use rustc_span::{Span, DUMMY_SP};
|
||||
use std::borrow::Borrow;
|
||||
use std::cell::Cell;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::fmt::Debug;
|
||||
|
@ -369,11 +371,26 @@ where
|
|||
C: QueryCache,
|
||||
C::Key: Clone + DepNodeParams<Qcx::DepContext>,
|
||||
C::Value: Value<Qcx::DepContext>,
|
||||
C::Stored: Debug + std::borrow::Borrow<C::Value>,
|
||||
Qcx: QueryContext,
|
||||
{
|
||||
match JobOwner::<'_, C::Key>::try_start(&qcx, state, span, key.clone()) {
|
||||
TryGetJob::NotYetStarted(job) => {
|
||||
let (result, dep_node_index) = execute_job(qcx, key, dep_node, query, job.id);
|
||||
let (result, dep_node_index) = execute_job(qcx, key.clone(), dep_node, query, job.id);
|
||||
if query.feedable {
|
||||
// We may have put a value inside the cache from inside the execution.
|
||||
// Verify that it has the same hash as what we have now, to ensure consistency.
|
||||
let _ = cache.lookup(&key, |cached_result, _| {
|
||||
let hasher = query.hash_result.expect("feedable forbids no_hash");
|
||||
let old_hash = qcx.dep_context().with_stable_hashing_context(|mut hcx| hasher(&mut hcx, cached_result.borrow()));
|
||||
let new_hash = qcx.dep_context().with_stable_hashing_context(|mut hcx| hasher(&mut hcx, &result));
|
||||
debug_assert_eq!(
|
||||
old_hash, new_hash,
|
||||
"Computed query value for {:?}({:?}) is inconsistent with fed value,\ncomputed={:#?}\nfed={:#?}",
|
||||
query.dep_kind, key, result, cached_result,
|
||||
);
|
||||
});
|
||||
}
|
||||
let result = job.complete(cache, result, dep_node_index);
|
||||
(result, Some(dep_node_index))
|
||||
}
|
||||
|
@ -525,7 +542,7 @@ where
|
|||
if std::intrinsics::unlikely(
|
||||
try_verify || qcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich,
|
||||
) {
|
||||
incremental_verify_ich(*qcx.dep_context(), &result, dep_node, query);
|
||||
incremental_verify_ich(*qcx.dep_context(), &result, dep_node, query.hash_result);
|
||||
}
|
||||
|
||||
return Some((result, dep_node_index));
|
||||
|
@ -558,39 +575,42 @@ where
|
|||
//
|
||||
// See issue #82920 for an example of a miscompilation that would get turned into
|
||||
// an ICE by this check
|
||||
incremental_verify_ich(*qcx.dep_context(), &result, dep_node, query);
|
||||
incremental_verify_ich(*qcx.dep_context(), &result, dep_node, query.hash_result);
|
||||
|
||||
Some((result, dep_node_index))
|
||||
}
|
||||
|
||||
#[instrument(skip(qcx, result, query), level = "debug")]
|
||||
fn incremental_verify_ich<Qcx, K, V: Debug>(
|
||||
qcx: Qcx::DepContext,
|
||||
#[instrument(skip(tcx, result, hash_result), level = "debug")]
|
||||
pub(crate) fn incremental_verify_ich<Tcx, V: Debug>(
|
||||
tcx: Tcx,
|
||||
result: &V,
|
||||
dep_node: &DepNode<Qcx::DepKind>,
|
||||
query: &QueryVTable<Qcx, K, V>,
|
||||
) where
|
||||
Qcx: QueryContext,
|
||||
dep_node: &DepNode<Tcx::DepKind>,
|
||||
hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>,
|
||||
) -> Fingerprint
|
||||
where
|
||||
Tcx: DepContext,
|
||||
{
|
||||
assert!(
|
||||
qcx.dep_graph().is_green(dep_node),
|
||||
tcx.dep_graph().is_green(dep_node),
|
||||
"fingerprint for green query instance not loaded from cache: {:?}",
|
||||
dep_node,
|
||||
);
|
||||
|
||||
let new_hash = query.hash_result.map_or(Fingerprint::ZERO, |f| {
|
||||
qcx.with_stable_hashing_context(|mut hcx| f(&mut hcx, result))
|
||||
let new_hash = hash_result.map_or(Fingerprint::ZERO, |f| {
|
||||
tcx.with_stable_hashing_context(|mut hcx| f(&mut hcx, result))
|
||||
});
|
||||
|
||||
let old_hash = qcx.dep_graph().prev_fingerprint_of(dep_node);
|
||||
let old_hash = tcx.dep_graph().prev_fingerprint_of(dep_node);
|
||||
|
||||
if Some(new_hash) != old_hash {
|
||||
incremental_verify_ich_failed(
|
||||
qcx.sess(),
|
||||
tcx.sess(),
|
||||
DebugArg::from(&dep_node),
|
||||
DebugArg::from(&result),
|
||||
);
|
||||
}
|
||||
|
||||
new_hash
|
||||
}
|
||||
|
||||
// This DebugArg business is largely a mirror of std::fmt::ArgumentV1, which is
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue