Auto merge of #100748 - SparrowLii:query_depth, r=cjgillot
add `depth_limit` in `QueryVTable` to avoid entering a new tcx in `layout_of` Fixes #49735 Updates #48685 The `layout_of` query needs to check whether it overflows the depth limit, and the current implementation needs to create a new `ImplicitCtxt` inside `layout_of`. However, `start_query` will already create a new `ImplicitCtxt`, so we can check the depth limit in `start_query`. We can tell whether we need to check the depth limit simply by whether the return value of `to_debug_str` of the query is `layout_of`. But I think adding the `depth_limit` field in `QueryVTable` may be more elegant and more scalable.
This commit is contained in:
commit
cfb5ae26a4
8 changed files with 82 additions and 53 deletions
|
@ -106,9 +106,12 @@ struct QueryModifiers {
|
||||||
/// Generate a dep node based on the dependencies of the query
|
/// Generate a dep node based on the dependencies of the query
|
||||||
anon: Option<Ident>,
|
anon: Option<Ident>,
|
||||||
|
|
||||||
// Always evaluate the query, ignoring its dependencies
|
/// Always evaluate the query, ignoring its dependencies
|
||||||
eval_always: Option<Ident>,
|
eval_always: Option<Ident>,
|
||||||
|
|
||||||
|
/// Whether the query has a call depth limit
|
||||||
|
depth_limit: Option<Ident>,
|
||||||
|
|
||||||
/// Use a separate query provider for local and extern crates
|
/// Use a separate query provider for local and extern crates
|
||||||
separate_provide_extern: Option<Ident>,
|
separate_provide_extern: Option<Ident>,
|
||||||
|
|
||||||
|
@ -126,6 +129,7 @@ fn parse_query_modifiers(input: ParseStream<'_>) -> Result<QueryModifiers> {
|
||||||
let mut no_hash = None;
|
let mut no_hash = None;
|
||||||
let mut anon = None;
|
let mut anon = None;
|
||||||
let mut eval_always = None;
|
let mut eval_always = None;
|
||||||
|
let mut depth_limit = None;
|
||||||
let mut separate_provide_extern = None;
|
let mut separate_provide_extern = None;
|
||||||
let mut remap_env_constness = None;
|
let mut remap_env_constness = None;
|
||||||
|
|
||||||
|
@ -194,6 +198,8 @@ fn parse_query_modifiers(input: ParseStream<'_>) -> Result<QueryModifiers> {
|
||||||
try_insert!(anon = modifier);
|
try_insert!(anon = modifier);
|
||||||
} else if modifier == "eval_always" {
|
} else if modifier == "eval_always" {
|
||||||
try_insert!(eval_always = modifier);
|
try_insert!(eval_always = modifier);
|
||||||
|
} else if modifier == "depth_limit" {
|
||||||
|
try_insert!(depth_limit = modifier);
|
||||||
} else if modifier == "separate_provide_extern" {
|
} else if modifier == "separate_provide_extern" {
|
||||||
try_insert!(separate_provide_extern = modifier);
|
try_insert!(separate_provide_extern = modifier);
|
||||||
} else if modifier == "remap_env_constness" {
|
} else if modifier == "remap_env_constness" {
|
||||||
|
@ -215,6 +221,7 @@ fn parse_query_modifiers(input: ParseStream<'_>) -> Result<QueryModifiers> {
|
||||||
no_hash,
|
no_hash,
|
||||||
anon,
|
anon,
|
||||||
eval_always,
|
eval_always,
|
||||||
|
depth_limit,
|
||||||
separate_provide_extern,
|
separate_provide_extern,
|
||||||
remap_env_constness,
|
remap_env_constness,
|
||||||
})
|
})
|
||||||
|
@ -365,6 +372,10 @@ pub fn rustc_queries(input: TokenStream) -> TokenStream {
|
||||||
if let Some(eval_always) = &modifiers.eval_always {
|
if let Some(eval_always) = &modifiers.eval_always {
|
||||||
attributes.push(quote! { (#eval_always) });
|
attributes.push(quote! { (#eval_always) });
|
||||||
};
|
};
|
||||||
|
// Pass on the depth_limit modifier
|
||||||
|
if let Some(depth_limit) = &modifiers.depth_limit {
|
||||||
|
attributes.push(quote! { (#depth_limit) });
|
||||||
|
};
|
||||||
// Pass on the separate_provide_extern modifier
|
// Pass on the separate_provide_extern modifier
|
||||||
if let Some(separate_provide_extern) = &modifiers.separate_provide_extern {
|
if let Some(separate_provide_extern) = &modifiers.separate_provide_extern {
|
||||||
attributes.push(quote! { (#separate_provide_extern) });
|
attributes.push(quote! { (#separate_provide_extern) });
|
||||||
|
|
|
@ -1309,6 +1309,7 @@ rustc_queries! {
|
||||||
query layout_of(
|
query layout_of(
|
||||||
key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>
|
key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>
|
||||||
) -> Result<ty::layout::TyAndLayout<'tcx>, ty::layout::LayoutError<'tcx>> {
|
) -> Result<ty::layout::TyAndLayout<'tcx>, ty::layout::LayoutError<'tcx>> {
|
||||||
|
depth_limit
|
||||||
desc { "computing layout of `{}`", key.value }
|
desc { "computing layout of `{}`", key.value }
|
||||||
remap_env_constness
|
remap_env_constness
|
||||||
}
|
}
|
||||||
|
|
|
@ -1857,8 +1857,8 @@ pub mod tls {
|
||||||
/// This is updated by `JobOwner::start` in `ty::query::plumbing` when executing a query.
|
/// This is updated by `JobOwner::start` in `ty::query::plumbing` when executing a query.
|
||||||
pub diagnostics: Option<&'a Lock<ThinVec<Diagnostic>>>,
|
pub diagnostics: Option<&'a Lock<ThinVec<Diagnostic>>>,
|
||||||
|
|
||||||
/// Used to prevent layout from recursing too deeply.
|
/// Used to prevent queries from calling too deeply.
|
||||||
pub layout_depth: usize,
|
pub query_depth: usize,
|
||||||
|
|
||||||
/// The current dep graph task. This is used to add dependencies to queries
|
/// The current dep graph task. This is used to add dependencies to queries
|
||||||
/// when executing them.
|
/// when executing them.
|
||||||
|
@ -1872,7 +1872,7 @@ pub mod tls {
|
||||||
tcx,
|
tcx,
|
||||||
query: None,
|
query: None,
|
||||||
diagnostics: None,
|
diagnostics: None,
|
||||||
layout_depth: 0,
|
query_depth: 0,
|
||||||
task_deps: TaskDepsRef::Ignore,
|
task_deps: TaskDepsRef::Ignore,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -229,18 +229,9 @@ fn layout_of<'tcx>(
|
||||||
tcx: TyCtxt<'tcx>,
|
tcx: TyCtxt<'tcx>,
|
||||||
query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
|
query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
|
||||||
) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
|
) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
|
||||||
ty::tls::with_related_context(tcx, move |icx| {
|
|
||||||
let (param_env, ty) = query.into_parts();
|
let (param_env, ty) = query.into_parts();
|
||||||
debug!(?ty);
|
debug!(?ty);
|
||||||
|
|
||||||
if !tcx.recursion_limit().value_within_limit(icx.layout_depth) {
|
|
||||||
tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the ImplicitCtxt to increase the layout_depth
|
|
||||||
let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
|
|
||||||
|
|
||||||
ty::tls::enter_context(&icx, |_| {
|
|
||||||
let param_env = param_env.with_reveal_all_normalized(tcx);
|
let param_env = param_env.with_reveal_all_normalized(tcx);
|
||||||
let unnormalized_ty = ty;
|
let unnormalized_ty = ty;
|
||||||
|
|
||||||
|
@ -270,8 +261,6 @@ fn layout_of<'tcx>(
|
||||||
sanity_check_layout(&cx, &layout);
|
sanity_check_layout(&cx, &layout);
|
||||||
|
|
||||||
Ok(layout)
|
Ok(layout)
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct LayoutCx<'tcx, C> {
|
pub struct LayoutCx<'tcx, C> {
|
||||||
|
|
|
@ -96,6 +96,7 @@ impl QueryContext for QueryCtxt<'_> {
|
||||||
fn start_query<R>(
|
fn start_query<R>(
|
||||||
&self,
|
&self,
|
||||||
token: QueryJobId,
|
token: QueryJobId,
|
||||||
|
depth_limit: bool,
|
||||||
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
|
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
|
||||||
compute: impl FnOnce() -> R,
|
compute: impl FnOnce() -> R,
|
||||||
) -> R {
|
) -> R {
|
||||||
|
@ -103,12 +104,16 @@ impl QueryContext for QueryCtxt<'_> {
|
||||||
// as `self`, so we use `with_related_context` to relate the 'tcx lifetimes
|
// as `self`, so we use `with_related_context` to relate the 'tcx lifetimes
|
||||||
// when accessing the `ImplicitCtxt`.
|
// when accessing the `ImplicitCtxt`.
|
||||||
tls::with_related_context(**self, move |current_icx| {
|
tls::with_related_context(**self, move |current_icx| {
|
||||||
|
if depth_limit && !self.recursion_limit().value_within_limit(current_icx.query_depth) {
|
||||||
|
self.depth_limit_error();
|
||||||
|
}
|
||||||
|
|
||||||
// Update the `ImplicitCtxt` to point to our new query job.
|
// Update the `ImplicitCtxt` to point to our new query job.
|
||||||
let new_icx = ImplicitCtxt {
|
let new_icx = ImplicitCtxt {
|
||||||
tcx: **self,
|
tcx: **self,
|
||||||
query: Some(token),
|
query: Some(token),
|
||||||
diagnostics,
|
diagnostics,
|
||||||
layout_depth: current_icx.layout_depth,
|
query_depth: current_icx.query_depth + depth_limit as usize,
|
||||||
task_deps: current_icx.task_deps,
|
task_deps: current_icx.task_deps,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -210,6 +215,18 @@ macro_rules! is_eval_always {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
macro_rules! depth_limit {
|
||||||
|
([]) => {{
|
||||||
|
false
|
||||||
|
}};
|
||||||
|
([(depth_limit) $($rest:tt)*]) => {{
|
||||||
|
true
|
||||||
|
}};
|
||||||
|
([$other:tt $($modifiers:tt)*]) => {
|
||||||
|
depth_limit!([$($modifiers)*])
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
macro_rules! hash_result {
|
macro_rules! hash_result {
|
||||||
([]) => {{
|
([]) => {{
|
||||||
Some(dep_graph::hash_result)
|
Some(dep_graph::hash_result)
|
||||||
|
@ -349,6 +366,7 @@ macro_rules! define_queries {
|
||||||
QueryVTable {
|
QueryVTable {
|
||||||
anon: is_anon!([$($modifiers)*]),
|
anon: is_anon!([$($modifiers)*]),
|
||||||
eval_always: is_eval_always!([$($modifiers)*]),
|
eval_always: is_eval_always!([$($modifiers)*]),
|
||||||
|
depth_limit: depth_limit!([$($modifiers)*]),
|
||||||
dep_kind: dep_graph::DepKind::$name,
|
dep_kind: dep_graph::DepKind::$name,
|
||||||
hash_result: hash_result!([$($modifiers)*]),
|
hash_result: hash_result!([$($modifiers)*]),
|
||||||
handle_cycle_error: |tcx, mut error| handle_cycle_error!([$($modifiers)*][tcx, error]),
|
handle_cycle_error: |tcx, mut error| handle_cycle_error!([$($modifiers)*][tcx, error]),
|
||||||
|
|
|
@ -23,6 +23,7 @@ pub struct QueryVTable<CTX: QueryContext, K, V> {
|
||||||
pub anon: bool,
|
pub anon: bool,
|
||||||
pub dep_kind: CTX::DepKind,
|
pub dep_kind: CTX::DepKind,
|
||||||
pub eval_always: bool,
|
pub eval_always: bool,
|
||||||
|
pub depth_limit: bool,
|
||||||
pub cache_on_disk: bool,
|
pub cache_on_disk: bool,
|
||||||
|
|
||||||
pub compute: fn(CTX::DepContext, K) -> V,
|
pub compute: fn(CTX::DepContext, K) -> V,
|
||||||
|
|
|
@ -14,7 +14,7 @@ pub use self::caches::{
|
||||||
mod config;
|
mod config;
|
||||||
pub use self::config::{QueryConfig, QueryDescription, QueryVTable};
|
pub use self::config::{QueryConfig, QueryDescription, QueryVTable};
|
||||||
|
|
||||||
use crate::dep_graph::{DepNodeIndex, HasDepContext, SerializedDepNodeIndex};
|
use crate::dep_graph::{DepContext, DepNodeIndex, HasDepContext, SerializedDepNodeIndex};
|
||||||
|
|
||||||
use rustc_data_structures::sync::Lock;
|
use rustc_data_structures::sync::Lock;
|
||||||
use rustc_data_structures::thin_vec::ThinVec;
|
use rustc_data_structures::thin_vec::ThinVec;
|
||||||
|
@ -119,7 +119,12 @@ pub trait QueryContext: HasDepContext {
|
||||||
fn start_query<R>(
|
fn start_query<R>(
|
||||||
&self,
|
&self,
|
||||||
token: QueryJobId,
|
token: QueryJobId,
|
||||||
|
depth_limit: bool,
|
||||||
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
|
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
|
||||||
compute: impl FnOnce() -> R,
|
compute: impl FnOnce() -> R,
|
||||||
) -> R;
|
) -> R;
|
||||||
|
|
||||||
|
fn depth_limit_error(&self) {
|
||||||
|
self.dep_context().sess().fatal("queries overflow the depth limit!");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -381,7 +381,9 @@ where
|
||||||
// Fast path for when incr. comp. is off.
|
// Fast path for when incr. comp. is off.
|
||||||
if !dep_graph.is_fully_enabled() {
|
if !dep_graph.is_fully_enabled() {
|
||||||
let prof_timer = tcx.dep_context().profiler().query_provider();
|
let prof_timer = tcx.dep_context().profiler().query_provider();
|
||||||
let result = tcx.start_query(job_id, None, || query.compute(*tcx.dep_context(), key));
|
let result = tcx.start_query(job_id, query.depth_limit, None, || {
|
||||||
|
query.compute(*tcx.dep_context(), key)
|
||||||
|
});
|
||||||
let dep_node_index = dep_graph.next_virtual_depnode_index();
|
let dep_node_index = dep_graph.next_virtual_depnode_index();
|
||||||
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
|
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
|
||||||
return (result, dep_node_index);
|
return (result, dep_node_index);
|
||||||
|
@ -394,7 +396,7 @@ where
|
||||||
|
|
||||||
// The diagnostics for this query will be promoted to the current session during
|
// The diagnostics for this query will be promoted to the current session during
|
||||||
// `try_mark_green()`, so we can ignore them here.
|
// `try_mark_green()`, so we can ignore them here.
|
||||||
if let Some(ret) = tcx.start_query(job_id, None, || {
|
if let Some(ret) = tcx.start_query(job_id, false, None, || {
|
||||||
try_load_from_disk_and_cache_in_memory(tcx, &key, &dep_node, query)
|
try_load_from_disk_and_cache_in_memory(tcx, &key, &dep_node, query)
|
||||||
}) {
|
}) {
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -404,7 +406,8 @@ where
|
||||||
let prof_timer = tcx.dep_context().profiler().query_provider();
|
let prof_timer = tcx.dep_context().profiler().query_provider();
|
||||||
let diagnostics = Lock::new(ThinVec::new());
|
let diagnostics = Lock::new(ThinVec::new());
|
||||||
|
|
||||||
let (result, dep_node_index) = tcx.start_query(job_id, Some(&diagnostics), || {
|
let (result, dep_node_index) =
|
||||||
|
tcx.start_query(job_id, query.depth_limit, Some(&diagnostics), || {
|
||||||
if query.anon {
|
if query.anon {
|
||||||
return dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || {
|
return dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || {
|
||||||
query.compute(*tcx.dep_context(), key)
|
query.compute(*tcx.dep_context(), key)
|
||||||
|
@ -412,7 +415,8 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
// `to_dep_node` is expensive for some `DepKind`s.
|
// `to_dep_node` is expensive for some `DepKind`s.
|
||||||
let dep_node = dep_node_opt.unwrap_or_else(|| query.to_dep_node(*tcx.dep_context(), &key));
|
let dep_node =
|
||||||
|
dep_node_opt.unwrap_or_else(|| query.to_dep_node(*tcx.dep_context(), &key));
|
||||||
|
|
||||||
dep_graph.with_task(dep_node, *tcx.dep_context(), key, query.compute, query.hash_result)
|
dep_graph.with_task(dep_node, *tcx.dep_context(), key, query.compute, query.hash_result)
|
||||||
});
|
});
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue