1
Fork 0

make the provisional cache slightly less broken

This commit is contained in:
lcnr 2023-08-10 12:35:34 +02:00
parent abc910be6f
commit d558353f28
4 changed files with 59 additions and 55 deletions

View file

@ -19,21 +19,25 @@ rustc_index::newtype_index! {
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub(super) struct ProvisionalEntry<'tcx> { pub(super) struct ProvisionalEntry<'tcx> {
// In case we have a coinductive cycle, this is the /// In case we have a coinductive cycle, this is the
// the currently least restrictive result of this goal. /// the current provisional result of this goal.
pub(super) response: QueryResult<'tcx>, ///
// In case of a cycle, the position of deepest stack entry involved /// This starts out as `None` for all goals and gets to some
// in that cycle. This is monotonically decreasing in the stack as all /// when the goal gets popped from the stack or we rerun evaluation
// elements between the current stack element in the deepest stack entry /// for this goal to reach a fixpoint.
// involved have to also be involved in that cycle. pub(super) response: Option<QueryResult<'tcx>>,
// /// In case of a cycle, the position of deepest stack entry involved
// We can only move entries to the global cache once we're complete done /// in that cycle. This is monotonically decreasing in the stack as all
// with the cycle. If this entry has not been involved in a cycle, /// elements between the current stack element in the deepest stack entry
// this is just its own depth. /// involved have to also be involved in that cycle.
///
/// We can only move entries to the global cache once we're complete done
/// with the cycle. If this entry has not been involved in a cycle,
/// this is just its own depth.
pub(super) depth: StackDepth, pub(super) depth: StackDepth,
// The goal for this entry. Should always be equal to the corresponding goal /// The goal for this entry. Should always be equal to the corresponding goal
// in the lookup table. /// in the lookup table.
pub(super) input: CanonicalInput<'tcx>, pub(super) input: CanonicalInput<'tcx>,
} }
@ -92,7 +96,7 @@ impl<'tcx> ProvisionalCache<'tcx> {
self.entries[entry_index].depth self.entries[entry_index].depth
} }
pub(super) fn provisional_result(&self, entry_index: EntryIndex) -> QueryResult<'tcx> { pub(super) fn provisional_result(&self, entry_index: EntryIndex) -> Option<QueryResult<'tcx>> {
self.entries[entry_index].response self.entries[entry_index].response
} }
} }

View file

@ -13,7 +13,7 @@ use rustc_middle::traits::solve::CacheData;
use rustc_middle::traits::solve::{CanonicalInput, Certainty, EvaluationCache, QueryResult}; use rustc_middle::traits::solve::{CanonicalInput, Certainty, EvaluationCache, QueryResult};
use rustc_middle::ty::TyCtxt; use rustc_middle::ty::TyCtxt;
use rustc_session::Limit; use rustc_session::Limit;
use std::{collections::hash_map::Entry, mem}; use std::collections::hash_map::Entry;
rustc_index::newtype_index! { rustc_index::newtype_index! {
pub struct StackDepth {} pub struct StackDepth {}
@ -216,8 +216,8 @@ impl<'tcx> SearchGraph<'tcx> {
cycle_participants: Default::default(), cycle_participants: Default::default(),
}; };
assert_eq!(self.stack.push(entry), depth); assert_eq!(self.stack.push(entry), depth);
let response = Self::response_no_constraints(tcx, input, Certainty::Yes); let entry_index =
let entry_index = cache.entries.push(ProvisionalEntry { response, depth, input }); cache.entries.push(ProvisionalEntry { response: None, depth, input });
v.insert(entry_index); v.insert(entry_index);
} }
// We have a nested goal which relies on a goal `root` deeper in the stack. // We have a nested goal which relies on a goal `root` deeper in the stack.
@ -243,23 +243,31 @@ impl<'tcx> SearchGraph<'tcx> {
root.cycle_participants.insert(e.input); root.cycle_participants.insert(e.input);
} }
// NOTE: The goals on the stack aren't the only goals involved in this cycle. // If we're in a cycle, we have to retry proving the current goal
// We can also depend on goals which aren't part of the stack but coinductively
// depend on the stack themselves. We already checked whether all the goals
// between these goals and their root on the stack. This means that as long as
// each goal in a cycle is checked for coinductivity by itself, simply checking
// the stack is enough.
if self.stack.raw[stack_depth.index()..]
.iter()
.all(|g| g.input.value.goal.predicate.is_coinductive(tcx))
{
// If we're in a coinductive cycle, we have to retry proving the current goal
// until we reach a fixpoint. // until we reach a fixpoint.
self.stack[stack_depth].has_been_used = true; self.stack[stack_depth].has_been_used = true;
return cache.provisional_result(entry_index); return if let Some(result) = cache.provisional_result(entry_index) {
result
} else { } else {
return Self::response_no_constraints(tcx, input, Certainty::OVERFLOW); // If we don't have a provisional result yet, the goal has to
// still be on the stack.
let mut goal_on_stack = false;
let mut is_coinductive = true;
for entry in self.stack.raw[stack_depth.index()..]
.iter()
.skip_while(|entry| entry.input != input)
{
goal_on_stack = true;
is_coinductive &= entry.input.value.goal.predicate.is_coinductive(tcx);
} }
debug_assert!(goal_on_stack);
if is_coinductive {
Self::response_no_constraints(tcx, input, Certainty::Yes)
} else {
Self::response_no_constraints(tcx, input, Certainty::OVERFLOW)
}
};
} }
} }
@ -288,15 +296,18 @@ impl<'tcx> SearchGraph<'tcx> {
let provisional_entry_index = let provisional_entry_index =
*cache.lookup_table.get(&stack_entry.input).unwrap(); *cache.lookup_table.get(&stack_entry.input).unwrap();
let provisional_entry = &mut cache.entries[provisional_entry_index]; let provisional_entry = &mut cache.entries[provisional_entry_index];
let prev_response = mem::replace(&mut provisional_entry.response, response); if stack_entry.has_been_used
if stack_entry.has_been_used && prev_response != response { && provisional_entry.response.map_or(true, |r| r != response)
// If so, remove all entries whose result depends on this goal {
// from the provisional cache... // If so, update the provisional result for this goal and remove
// all entries whose result depends on this goal from the provisional
// cache...
// //
// That's not completely correct, as a nested goal can also // That's not completely correct, as a nested goal can also only
// depend on a goal which is lower in the stack so it doesn't // depend on a goal which is lower in the stack so it doesn't
// actually depend on the current goal. This should be fairly // actually depend on the current goal. This should be fairly
// rare and is hopefully not relevant for performance. // rare and is hopefully not relevant for performance.
provisional_entry.response = Some(response);
#[allow(rustc::potential_query_instability)] #[allow(rustc::potential_query_instability)]
cache.lookup_table.retain(|_key, index| *index <= provisional_entry_index); cache.lookup_table.retain(|_key, index| *index <= provisional_entry_index);
cache.entries.truncate(provisional_entry_index.index() + 1); cache.entries.truncate(provisional_entry_index.index() + 1);
@ -315,8 +326,8 @@ impl<'tcx> SearchGraph<'tcx> {
}); });
// We're now done with this goal. In case this goal is involved in a larger cycle // We're now done with this goal. In case this goal is involved in a larger cycle
// do not remove it from the provisional cache and do not add it to the global // do not remove it from the provisional cache and update its provisional result.
// cache. // We only add the root of cycles to the global cache.
// //
// It is not possible for any nested goal to depend on something deeper on the // It is not possible for any nested goal to depend on something deeper on the
// stack, as this would have also updated the depth of the current goal. // stack, as this would have also updated the depth of the current goal.
@ -348,6 +359,8 @@ impl<'tcx> SearchGraph<'tcx> {
dep_node, dep_node,
result, result,
) )
} else {
provisional_entry.response = Some(result);
} }
result result

View file

@ -39,7 +39,7 @@ fn impls_ar<T: AR>() {}
fn main() { fn main() {
impls_a::<()>(); impls_a::<()>();
//~^ ERROR overflow evaluating the requirement `(): A` // FIXME(-Ztrait-solver=next): This is broken and should error.
impls_ar::<()>(); impls_ar::<()>();
//~^ ERROR overflow evaluating the requirement `(): AR` //~^ ERROR overflow evaluating the requirement `(): AR`

View file

@ -1,16 +1,3 @@
error[E0275]: overflow evaluating the requirement `(): A`
--> $DIR/inductive-not-on-stack.rs:41:5
|
LL | impls_a::<()>();
| ^^^^^^^^^^^^^
|
= help: consider increasing the recursion limit by adding a `#![recursion_limit = "256"]` attribute to your crate (`inductive_not_on_stack`)
note: required by a bound in `impls_a`
--> $DIR/inductive-not-on-stack.rs:25:15
|
LL | fn impls_a<T: A>() {}
| ^ required by this bound in `impls_a`
error[E0275]: overflow evaluating the requirement `(): AR` error[E0275]: overflow evaluating the requirement `(): AR`
--> $DIR/inductive-not-on-stack.rs:44:5 --> $DIR/inductive-not-on-stack.rs:44:5
| |
@ -24,6 +11,6 @@ note: required by a bound in `impls_ar`
LL | fn impls_ar<T: AR>() {} LL | fn impls_ar<T: AR>() {}
| ^^ required by this bound in `impls_ar` | ^^ required by this bound in `impls_ar`
error: aborting due to 2 previous errors error: aborting due to previous error
For more information about this error, try `rustc --explain E0275`. For more information about this error, try `rustc --explain E0275`.