1
Fork 0

Auto merge of #108375 - Zoxc:query-inline, r=cjgillot

Add inlining attributes for query system functions

These only have a single caller, but don't always get inlined.
This commit is contained in:
bors 2023-02-26 09:44:54 +00:00
commit 43ee4d15bf
4 changed files with 24 additions and 22 deletions

View file

@ -1012,6 +1012,7 @@ impl<'tcx> TyCtxt<'tcx> {
/// Note that this is *untracked* and should only be used within the query /// Note that this is *untracked* and should only be used within the query
/// system if the result is otherwise tracked through queries /// system if the result is otherwise tracked through queries
#[inline]
pub fn cstore_untracked(self) -> MappedReadGuard<'tcx, CrateStoreDyn> { pub fn cstore_untracked(self) -> MappedReadGuard<'tcx, CrateStoreDyn> {
ReadGuard::map(self.untracked.cstore.read(), |c| &**c) ReadGuard::map(self.untracked.cstore.read(), |c| &**c)
} }

View file

@ -124,9 +124,7 @@ impl QueryContext for QueryCtxt<'_> {
}; };
// Use the `ImplicitCtxt` while we execute the query. // Use the `ImplicitCtxt` while we execute the query.
tls::enter_context(&new_icx, || { tls::enter_context(&new_icx, compute)
rustc_data_structures::stack::ensure_sufficient_stack(compute)
})
}) })
} }

View file

@ -279,6 +279,7 @@ impl<K: DepKind> DepGraph<K> {
/// `arg` parameter. /// `arg` parameter.
/// ///
/// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html
#[inline(always)]
pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>( pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
&self, &self,
key: DepNode<K>, key: DepNode<K>,
@ -298,6 +299,7 @@ impl<K: DepKind> DepGraph<K> {
} }
} }
#[inline(always)]
fn with_task_impl<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>( fn with_task_impl<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
&self, &self,
key: DepNode<K>, key: DepNode<K>,
@ -598,6 +600,7 @@ impl<K: DepKind> DepGraph<K> {
self.data.is_some() && self.dep_node_index_of_opt(dep_node).is_some() self.data.is_some() && self.dep_node_index_of_opt(dep_node).is_some()
} }
#[inline]
pub fn prev_fingerprint_of(&self, dep_node: &DepNode<K>) -> Option<Fingerprint> { pub fn prev_fingerprint_of(&self, dep_node: &DepNode<K>) -> Option<Fingerprint> {
self.data.as_ref().unwrap().previous.fingerprint_of(dep_node) self.data.as_ref().unwrap().previous.fingerprint_of(dep_node)
} }
@ -1127,6 +1130,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
/// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it. /// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it.
/// Assumes that this is a node that has no equivalent in the previous dep-graph. /// Assumes that this is a node that has no equivalent in the previous dep-graph.
#[inline(always)]
fn intern_new_node( fn intern_new_node(
&self, &self,
profiler: &SelfProfilerRef, profiler: &SelfProfilerRef,
@ -1365,6 +1369,7 @@ impl DepNodeColorMap {
} }
} }
#[inline]
fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) { fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) {
self.values[index].store( self.values[index].store(
match color { match color {

View file

@ -15,6 +15,7 @@ use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::profiling::TimingGuard; use rustc_data_structures::profiling::TimingGuard;
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
use rustc_data_structures::sharded::Sharded; use rustc_data_structures::sharded::Sharded;
use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_data_structures::sync::Lock; use rustc_data_structures::sync::Lock;
use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError}; use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError};
use rustc_session::Session; use rustc_session::Session;
@ -188,12 +189,12 @@ where
#[cfg(not(parallel_compiler))] #[cfg(not(parallel_compiler))]
let mut state_lock = state.active.lock(); let mut state_lock = state.active.lock();
let lock = &mut *state_lock; let lock = &mut *state_lock;
let current_job_id = qcx.current_query_job();
match lock.entry(key) { match lock.entry(key) {
Entry::Vacant(entry) => { Entry::Vacant(entry) => {
let id = qcx.next_job_id(); let id = qcx.next_job_id();
let job = qcx.current_query_job(); let job = QueryJob::new(id, span, current_job_id);
let job = QueryJob::new(id, span, job);
let key = *entry.key(); let key = *entry.key();
entry.insert(QueryResult::Started(job)); entry.insert(QueryResult::Started(job));
@ -212,7 +213,7 @@ where
// so we just return the error. // so we just return the error.
return TryGetJob::Cycle(id.find_cycle_in_stack( return TryGetJob::Cycle(id.find_cycle_in_stack(
qcx.try_collect_active_jobs().unwrap(), qcx.try_collect_active_jobs().unwrap(),
&qcx.current_query_job(), &current_job_id,
span, span,
)); ));
} }
@ -230,7 +231,7 @@ where
// With parallel queries we might just have to wait on some other // With parallel queries we might just have to wait on some other
// thread. // thread.
let result = latch.wait_on(qcx.current_query_job(), span); let result = latch.wait_on(current_job_id, span);
match result { match result {
Ok(()) => TryGetJob::JobCompleted(query_blocked_prof_timer), Ok(()) => TryGetJob::JobCompleted(query_blocked_prof_timer),
@ -346,10 +347,9 @@ where
} }
} }
#[inline(never)]
fn try_execute_query<Q, Qcx>( fn try_execute_query<Q, Qcx>(
qcx: Qcx, qcx: Qcx,
state: &QueryState<Q::Key, Qcx::DepKind>,
cache: &Q::Cache,
span: Span, span: Span,
key: Q::Key, key: Q::Key,
dep_node: Option<DepNode<Qcx::DepKind>>, dep_node: Option<DepNode<Qcx::DepKind>>,
@ -358,9 +358,11 @@ where
Q: QueryConfig<Qcx>, Q: QueryConfig<Qcx>,
Qcx: QueryContext, Qcx: QueryContext,
{ {
let state = Q::query_state(qcx);
match JobOwner::<'_, Q::Key, Qcx::DepKind>::try_start(&qcx, state, span, key) { match JobOwner::<'_, Q::Key, Qcx::DepKind>::try_start(&qcx, state, span, key) {
TryGetJob::NotYetStarted(job) => { TryGetJob::NotYetStarted(job) => {
let (result, dep_node_index) = execute_job::<Q, Qcx>(qcx, key, dep_node, job.id); let (result, dep_node_index) = execute_job::<Q, Qcx>(qcx, key, dep_node, job.id);
let cache = Q::query_cache(qcx);
if Q::FEEDABLE { if Q::FEEDABLE {
// We should not compute queries that also got a value via feeding. // We should not compute queries that also got a value via feeding.
// This can't happen, as query feeding adds the very dependencies to the fed query // This can't happen, as query feeding adds the very dependencies to the fed query
@ -381,7 +383,7 @@ where
} }
#[cfg(parallel_compiler)] #[cfg(parallel_compiler)]
TryGetJob::JobCompleted(query_blocked_prof_timer) => { TryGetJob::JobCompleted(query_blocked_prof_timer) => {
let Some((v, index)) = cache.lookup(&key) else { let Some((v, index)) = Q::query_cache(qcx).lookup(&key) else {
panic!("value must be in cache after waiting") panic!("value must be in cache after waiting")
}; };
@ -393,6 +395,7 @@ where
} }
} }
#[inline(always)]
fn execute_job<Q, Qcx>( fn execute_job<Q, Qcx>(
qcx: Qcx, qcx: Qcx,
key: Q::Key, key: Q::Key,
@ -478,6 +481,7 @@ where
(result, dep_node_index) (result, dep_node_index)
} }
#[inline(always)]
fn try_load_from_disk_and_cache_in_memory<Q, Qcx>( fn try_load_from_disk_and_cache_in_memory<Q, Qcx>(
qcx: Qcx, qcx: Qcx,
key: &Q::Key, key: &Q::Key,
@ -568,6 +572,7 @@ where
Some((result, dep_node_index)) Some((result, dep_node_index))
} }
#[inline]
#[instrument(skip(tcx, result, hash_result), level = "debug")] #[instrument(skip(tcx, result, hash_result), level = "debug")]
pub(crate) fn incremental_verify_ich<Tcx, V: Debug>( pub(crate) fn incremental_verify_ich<Tcx, V: Debug>(
tcx: Tcx, tcx: Tcx,
@ -722,6 +727,7 @@ pub enum QueryMode {
Ensure, Ensure,
} }
#[inline(always)]
pub fn get_query<Q, Qcx, D>(qcx: Qcx, span: Span, key: Q::Key, mode: QueryMode) -> Option<Q::Value> pub fn get_query<Q, Qcx, D>(qcx: Qcx, span: Span, key: Q::Key, mode: QueryMode) -> Option<Q::Value>
where where
D: DepKind, D: DepKind,
@ -739,14 +745,8 @@ where
None None
}; };
let (result, dep_node_index) = try_execute_query::<Q, Qcx>( let (result, dep_node_index) =
qcx, ensure_sufficient_stack(|| try_execute_query::<Q, Qcx>(qcx, span, key, dep_node));
Q::query_state(qcx),
Q::query_cache(qcx),
span,
key,
dep_node,
);
if let Some(dep_node_index) = dep_node_index { if let Some(dep_node_index) = dep_node_index {
qcx.dep_context().dep_graph().read_index(dep_node_index) qcx.dep_context().dep_graph().read_index(dep_node_index)
} }
@ -762,14 +762,12 @@ where
{ {
// We may be concurrently trying both execute and force a query. // We may be concurrently trying both execute and force a query.
// Ensure that only one of them runs the query. // Ensure that only one of them runs the query.
let cache = Q::query_cache(qcx); if let Some((_, index)) = Q::query_cache(qcx).lookup(&key) {
if let Some((_, index)) = cache.lookup(&key) {
qcx.dep_context().profiler().query_cache_hit(index.into()); qcx.dep_context().profiler().query_cache_hit(index.into());
return; return;
} }
let state = Q::query_state(qcx);
debug_assert!(!Q::ANON); debug_assert!(!Q::ANON);
try_execute_query::<Q, _>(qcx, state, cache, DUMMY_SP, key, Some(dep_node)); ensure_sufficient_stack(|| try_execute_query::<Q, _>(qcx, DUMMY_SP, key, Some(dep_node)));
} }