1
Fork 0

rework cycle handling

A cycle was previously coinductive if all steps were coinductive.
Change this to instead considerm cycles to be coinductive if they
step through at least one where-bound of an impl of a coinductive
trait goal.
This commit is contained in:
lcnr 2025-02-10 14:45:24 +01:00
parent cb08599451
commit 46faf4bed6
10 changed files with 315 additions and 182 deletions

View file

@ -594,6 +594,10 @@ impl<'tcx> Interner for TyCtxt<'tcx> {
self.trait_is_auto(trait_def_id) self.trait_is_auto(trait_def_id)
} }
fn trait_is_coinductive(self, trait_def_id: DefId) -> bool {
self.trait_is_coinductive(trait_def_id)
}
fn trait_is_alias(self, trait_def_id: DefId) -> bool { fn trait_is_alias(self, trait_def_id: DefId) -> bool {
self.trait_is_alias(trait_def_id) self.trait_is_alias(trait_def_id)
} }

View file

@ -51,10 +51,6 @@ impl<'tcx> rustc_type_ir::inherent::Predicate<TyCtxt<'tcx>> for Predicate<'tcx>
self.as_clause() self.as_clause()
} }
fn is_coinductive(self, interner: TyCtxt<'tcx>) -> bool {
self.is_coinductive(interner)
}
fn allow_normalization(self) -> bool { fn allow_normalization(self) -> bool {
self.allow_normalization() self.allow_normalization()
} }
@ -119,6 +115,8 @@ impl<'tcx> Predicate<'tcx> {
Some(tcx.mk_predicate(kind)) Some(tcx.mk_predicate(kind))
} }
/// Only used by the old solver to decide whether a predicate is accepted
/// in a coinductive trait solver cycle.
#[instrument(level = "debug", skip(tcx), ret)] #[instrument(level = "debug", skip(tcx), ret)]
pub fn is_coinductive(self, tcx: TyCtxt<'tcx>) -> bool { pub fn is_coinductive(self, tcx: TyCtxt<'tcx>) -> bool {
match self.kind().skip_binder() { match self.kind().skip_binder() {

View file

@ -21,7 +21,7 @@ use tracing::{debug, instrument, trace};
use crate::canonicalizer::Canonicalizer; use crate::canonicalizer::Canonicalizer;
use crate::delegate::SolverDelegate; use crate::delegate::SolverDelegate;
use crate::resolve::EagerResolver; use crate::resolve::EagerResolver;
use crate::solve::eval_ctxt::NestedGoals; use crate::solve::eval_ctxt::{CurrentGoalKind, NestedGoals};
use crate::solve::{ use crate::solve::{
CanonicalInput, CanonicalResponse, Certainty, EvalCtxt, ExternalConstraintsData, Goal, CanonicalInput, CanonicalResponse, Certainty, EvalCtxt, ExternalConstraintsData, Goal,
MaybeCause, NestedNormalizationGoals, NoSolution, PredefinedOpaquesData, QueryInput, MaybeCause, NestedNormalizationGoals, NoSolution, PredefinedOpaquesData, QueryInput,
@ -109,8 +109,10 @@ where
// //
// As we return all ambiguous nested goals, we can ignore the certainty returned // As we return all ambiguous nested goals, we can ignore the certainty returned
// by `try_evaluate_added_goals()`. // by `try_evaluate_added_goals()`.
let (certainty, normalization_nested_goals) = if self.is_normalizes_to_goal { let (certainty, normalization_nested_goals) = match self.current_goal_kind {
let NestedGoals { normalizes_to_goals, goals } = std::mem::take(&mut self.nested_goals); CurrentGoalKind::NormalizesTo => {
let NestedGoals { normalizes_to_goals, goals } =
std::mem::take(&mut self.nested_goals);
if cfg!(debug_assertions) { if cfg!(debug_assertions) {
assert!(normalizes_to_goals.is_empty()); assert!(normalizes_to_goals.is_empty());
if goals.is_empty() { if goals.is_empty() {
@ -118,9 +120,11 @@ where
} }
} }
(certainty, NestedNormalizationGoals(goals)) (certainty, NestedNormalizationGoals(goals))
} else { }
CurrentGoalKind::Misc | CurrentGoalKind::CoinductiveTrait => {
let certainty = certainty.unify_with(goals_certainty); let certainty = certainty.unify_with(goals_certainty);
(certainty, NestedNormalizationGoals::empty()) (certainty, NestedNormalizationGoals::empty())
}
}; };
if let Certainty::Maybe(cause @ MaybeCause::Overflow { .. }) = certainty { if let Certainty::Maybe(cause @ MaybeCause::Overflow { .. }) = certainty {
@ -163,14 +167,18 @@ where
// ambiguous alias types which get replaced with fresh inference variables // ambiguous alias types which get replaced with fresh inference variables
// during generalization. This prevents hangs caused by an exponential blowup, // during generalization. This prevents hangs caused by an exponential blowup,
// see tests/ui/traits/next-solver/coherence-alias-hang.rs. // see tests/ui/traits/next-solver/coherence-alias-hang.rs.
// match self.current_goal_kind {
// We don't do so for `NormalizesTo` goals as we erased the expected term and // We don't do so for `NormalizesTo` goals as we erased the expected term and
// bailing with overflow here would prevent us from detecting a type-mismatch, // bailing with overflow here would prevent us from detecting a type-mismatch,
// causing a coherence error in diesel, see #131969. We still bail with overflow // causing a coherence error in diesel, see #131969. We still bail with overflow
// when later returning from the parent AliasRelate goal. // when later returning from the parent AliasRelate goal.
if !self.is_normalizes_to_goal { CurrentGoalKind::NormalizesTo => {}
let num_non_region_vars = CurrentGoalKind::Misc | CurrentGoalKind::CoinductiveTrait => {
canonical.variables.iter().filter(|c| !c.is_region() && c.is_existential()).count(); let num_non_region_vars = canonical
.variables
.iter()
.filter(|c| !c.is_region() && c.is_existential())
.count();
if num_non_region_vars > self.cx().recursion_limit() { if num_non_region_vars > self.cx().recursion_limit() {
debug!(?num_non_region_vars, "too many inference variables -> overflow"); debug!(?num_non_region_vars, "too many inference variables -> overflow");
return Ok(self.make_ambiguous_response_no_constraints(MaybeCause::Overflow { return Ok(self.make_ambiguous_response_no_constraints(MaybeCause::Overflow {
@ -178,6 +186,7 @@ where
})); }));
} }
} }
}
Ok(canonical) Ok(canonical)
} }

View file

@ -9,6 +9,7 @@ use rustc_type_ir::fold::{TypeFoldable, TypeFolder, TypeSuperFoldable};
use rustc_type_ir::inherent::*; use rustc_type_ir::inherent::*;
use rustc_type_ir::relate::Relate; use rustc_type_ir::relate::Relate;
use rustc_type_ir::relate::solver_relating::RelateExt; use rustc_type_ir::relate::solver_relating::RelateExt;
use rustc_type_ir::search_graph::PathKind;
use rustc_type_ir::visit::{TypeSuperVisitable, TypeVisitable, TypeVisitableExt, TypeVisitor}; use rustc_type_ir::visit::{TypeSuperVisitable, TypeVisitable, TypeVisitableExt, TypeVisitor};
use rustc_type_ir::{self as ty, CanonicalVarValues, InferCtxtLike, Interner, TypingMode}; use rustc_type_ir::{self as ty, CanonicalVarValues, InferCtxtLike, Interner, TypingMode};
use rustc_type_ir_macros::{Lift_Generic, TypeFoldable_Generic, TypeVisitable_Generic}; use rustc_type_ir_macros::{Lift_Generic, TypeFoldable_Generic, TypeVisitable_Generic};
@ -20,12 +21,51 @@ use crate::solve::inspect::{self, ProofTreeBuilder};
use crate::solve::search_graph::SearchGraph; use crate::solve::search_graph::SearchGraph;
use crate::solve::{ use crate::solve::{
CanonicalInput, Certainty, FIXPOINT_STEP_LIMIT, Goal, GoalEvaluationKind, GoalSource, CanonicalInput, Certainty, FIXPOINT_STEP_LIMIT, Goal, GoalEvaluationKind, GoalSource,
HasChanged, NestedNormalizationGoals, NoSolution, PredefinedOpaquesData, QueryResult, HasChanged, NestedNormalizationGoals, NoSolution, PredefinedOpaquesData, QueryInput,
QueryResult,
}; };
pub(super) mod canonical; pub(super) mod canonical;
mod probe; mod probe;
/// The kind of goal we're currently proving.
///
/// This has effects on cycle handling handling and on how we compute
/// query responses, see the variant descriptions for more info.
#[derive(Debug, Copy, Clone)]
enum CurrentGoalKind {
Misc,
/// We're proving an trait goal for a coinductive trait, either an auto trait or `Sized`.
///
/// These are currently the only goals whose impl where-clauses are considered to be
/// productive steps.
CoinductiveTrait,
/// Unlike other goals, `NormalizesTo` goals act like functions with the expected term
/// always being fully unconstrained. This would weaken inference however, as the nested
/// goals never get the inference constraints from the actual normalized-to type.
///
/// Because of this we return any ambiguous nested goals from `NormalizesTo` to the
/// caller when then adds these to its own context. The caller is always an `AliasRelate`
/// goal so this never leaks out of the solver.
NormalizesTo,
}
impl CurrentGoalKind {
fn from_query_input<I: Interner>(cx: I, input: QueryInput<I, I::Predicate>) -> CurrentGoalKind {
match input.goal.predicate.kind().skip_binder() {
ty::PredicateKind::Clause(ty::ClauseKind::Trait(pred)) => {
if cx.trait_is_coinductive(pred.trait_ref.def_id) {
CurrentGoalKind::CoinductiveTrait
} else {
CurrentGoalKind::Misc
}
}
ty::PredicateKind::NormalizesTo(_) => CurrentGoalKind::NormalizesTo,
_ => CurrentGoalKind::Misc,
}
}
}
pub struct EvalCtxt<'a, D, I = <D as SolverDelegate>::Interner> pub struct EvalCtxt<'a, D, I = <D as SolverDelegate>::Interner>
where where
D: SolverDelegate<Interner = I>, D: SolverDelegate<Interner = I>,
@ -51,14 +91,10 @@ where
/// The variable info for the `var_values`, only used to make an ambiguous response /// The variable info for the `var_values`, only used to make an ambiguous response
/// with no constraints. /// with no constraints.
variables: I::CanonicalVars, variables: I::CanonicalVars,
/// Whether we're currently computing a `NormalizesTo` goal. Unlike other goals,
/// `NormalizesTo` goals act like functions with the expected term always being /// What kind of goal we're currently computing, see the enum definition
/// fully unconstrained. This would weaken inference however, as the nested goals /// for more info.
/// never get the inference constraints from the actual normalized-to type. Because current_goal_kind: CurrentGoalKind,
/// of this we return any ambiguous nested goals from `NormalizesTo` to the caller
/// when then adds these to its own context. The caller is always an `AliasRelate`
/// goal so this never leaks out of the solver.
is_normalizes_to_goal: bool,
pub(super) var_values: CanonicalVarValues<I>, pub(super) var_values: CanonicalVarValues<I>,
predefined_opaques_in_body: I::PredefinedOpaques, predefined_opaques_in_body: I::PredefinedOpaques,
@ -226,8 +262,11 @@ where
self.delegate.typing_mode() self.delegate.typing_mode()
} }
pub(super) fn set_is_normalizes_to_goal(&mut self) { pub(super) fn step_kind_for_source(&self, source: GoalSource) -> PathKind {
self.is_normalizes_to_goal = true; match (self.current_goal_kind, source) {
(CurrentGoalKind::CoinductiveTrait, GoalSource::ImplWhereBound) => PathKind::Coinductive,
_ => PathKind::Inductive,
}
} }
/// Creates a root evaluation context and search graph. This should only be /// Creates a root evaluation context and search graph. This should only be
@ -256,7 +295,7 @@ where
max_input_universe: ty::UniverseIndex::ROOT, max_input_universe: ty::UniverseIndex::ROOT,
variables: Default::default(), variables: Default::default(),
var_values: CanonicalVarValues::dummy(), var_values: CanonicalVarValues::dummy(),
is_normalizes_to_goal: false, current_goal_kind: CurrentGoalKind::Misc,
origin_span, origin_span,
tainted: Ok(()), tainted: Ok(()),
}; };
@ -294,7 +333,7 @@ where
delegate, delegate,
variables: canonical_input.canonical.variables, variables: canonical_input.canonical.variables,
var_values, var_values,
is_normalizes_to_goal: false, current_goal_kind: CurrentGoalKind::from_query_input(cx, input),
predefined_opaques_in_body: input.predefined_opaques_in_body, predefined_opaques_in_body: input.predefined_opaques_in_body,
max_input_universe: canonical_input.canonical.max_universe, max_input_universe: canonical_input.canonical.max_universe,
search_graph, search_graph,
@ -340,6 +379,7 @@ where
cx: I, cx: I,
search_graph: &'a mut SearchGraph<D>, search_graph: &'a mut SearchGraph<D>,
canonical_input: CanonicalInput<I>, canonical_input: CanonicalInput<I>,
step_kind_from_parent: PathKind,
goal_evaluation: &mut ProofTreeBuilder<D>, goal_evaluation: &mut ProofTreeBuilder<D>,
) -> QueryResult<I> { ) -> QueryResult<I> {
let mut canonical_goal_evaluation = let mut canonical_goal_evaluation =
@ -352,6 +392,7 @@ where
search_graph.with_new_goal( search_graph.with_new_goal(
cx, cx,
canonical_input, canonical_input,
step_kind_from_parent,
&mut canonical_goal_evaluation, &mut canonical_goal_evaluation,
|search_graph, canonical_goal_evaluation| { |search_graph, canonical_goal_evaluation| {
EvalCtxt::enter_canonical( EvalCtxt::enter_canonical(
@ -395,12 +436,10 @@ where
/// `NormalizesTo` is only used by `AliasRelate`, all other callsites /// `NormalizesTo` is only used by `AliasRelate`, all other callsites
/// should use [`EvalCtxt::evaluate_goal`] which discards that empty /// should use [`EvalCtxt::evaluate_goal`] which discards that empty
/// storage. /// storage.
// FIXME(-Znext-solver=coinduction): `_source` is currently unused but will
// be necessary once we implement the new coinduction approach.
pub(super) fn evaluate_goal_raw( pub(super) fn evaluate_goal_raw(
&mut self, &mut self,
goal_evaluation_kind: GoalEvaluationKind, goal_evaluation_kind: GoalEvaluationKind,
_source: GoalSource, source: GoalSource,
goal: Goal<I, I::Predicate>, goal: Goal<I, I::Predicate>,
) -> Result<(NestedNormalizationGoals<I>, HasChanged, Certainty), NoSolution> { ) -> Result<(NestedNormalizationGoals<I>, HasChanged, Certainty), NoSolution> {
let (orig_values, canonical_goal) = self.canonicalize_goal(goal); let (orig_values, canonical_goal) = self.canonicalize_goal(goal);
@ -410,6 +449,7 @@ where
self.cx(), self.cx(),
self.search_graph, self.search_graph,
canonical_goal, canonical_goal,
self.step_kind_for_source(source),
&mut goal_evaluation, &mut goal_evaluation,
); );
let response = match canonical_response { let response = match canonical_response {

View file

@ -34,7 +34,7 @@ where
delegate, delegate,
variables: outer_ecx.variables, variables: outer_ecx.variables,
var_values: outer_ecx.var_values, var_values: outer_ecx.var_values,
is_normalizes_to_goal: outer_ecx.is_normalizes_to_goal, current_goal_kind: outer_ecx.current_goal_kind,
predefined_opaques_in_body: outer_ecx.predefined_opaques_in_body, predefined_opaques_in_body: outer_ecx.predefined_opaques_in_body,
max_input_universe, max_input_universe,
search_graph: outer_ecx.search_graph, search_graph: outer_ecx.search_graph,

View file

@ -28,7 +28,6 @@ where
&mut self, &mut self,
goal: Goal<I, NormalizesTo<I>>, goal: Goal<I, NormalizesTo<I>>,
) -> QueryResult<I> { ) -> QueryResult<I> {
self.set_is_normalizes_to_goal();
debug_assert!(self.term_is_fully_unconstrained(goal)); debug_assert!(self.term_is_fully_unconstrained(goal));
let cx = self.cx(); let cx = self.cx();
match goal.predicate.alias.kind(cx) { match goal.predicate.alias.kind(cx) {

View file

@ -2,7 +2,6 @@ use std::convert::Infallible;
use std::marker::PhantomData; use std::marker::PhantomData;
use rustc_type_ir::Interner; use rustc_type_ir::Interner;
use rustc_type_ir::inherent::*;
use rustc_type_ir::search_graph::{self, PathKind}; use rustc_type_ir::search_graph::{self, PathKind};
use rustc_type_ir::solve::{CanonicalInput, Certainty, QueryResult}; use rustc_type_ir::solve::{CanonicalInput, Certainty, QueryResult};
@ -94,10 +93,6 @@ where
let certainty = from_result.unwrap().value.certainty; let certainty = from_result.unwrap().value.certainty;
response_no_constraints(cx, for_input, certainty) response_no_constraints(cx, for_input, certainty)
} }
fn step_is_coinductive(cx: I, input: CanonicalInput<I>) -> bool {
input.canonical.value.goal.predicate.is_coinductive(cx)
}
} }
fn response_no_constraints<I: Interner>( fn response_no_constraints<I: Interner>(

View file

@ -462,8 +462,6 @@ pub trait Predicate<I: Interner<Predicate = Self>>:
{ {
fn as_clause(self) -> Option<I::Clause>; fn as_clause(self) -> Option<I::Clause>;
fn is_coinductive(self, interner: I) -> bool;
// FIXME: Eventually uplift the impl out of rustc and make this defaulted. // FIXME: Eventually uplift the impl out of rustc and make this defaulted.
fn allow_normalization(self) -> bool; fn allow_normalization(self) -> bool;
} }

View file

@ -279,6 +279,8 @@ pub trait Interner:
fn trait_is_auto(self, trait_def_id: Self::DefId) -> bool; fn trait_is_auto(self, trait_def_id: Self::DefId) -> bool;
fn trait_is_coinductive(self, trait_def_id: Self::DefId) -> bool;
fn trait_is_alias(self, trait_def_id: Self::DefId) -> bool; fn trait_is_alias(self, trait_def_id: Self::DefId) -> bool;
fn trait_is_dyn_compatible(self, trait_def_id: Self::DefId) -> bool; fn trait_is_dyn_compatible(self, trait_def_id: Self::DefId) -> bool;

View file

@ -12,7 +12,7 @@
/// The global cache has to be completely unobservable, while the per-cycle cache may impact /// The global cache has to be completely unobservable, while the per-cycle cache may impact
/// behavior as long as the resulting behavior is still correct. /// behavior as long as the resulting behavior is still correct.
use std::cmp::Ordering; use std::cmp::Ordering;
use std::collections::BTreeSet; use std::collections::BTreeMap;
use std::fmt::Debug; use std::fmt::Debug;
use std::hash::Hash; use std::hash::Hash;
use std::marker::PhantomData; use std::marker::PhantomData;
@ -104,8 +104,6 @@ pub trait Delegate {
for_input: <Self::Cx as Cx>::Input, for_input: <Self::Cx as Cx>::Input,
from_result: <Self::Cx as Cx>::Result, from_result: <Self::Cx as Cx>::Result,
) -> <Self::Cx as Cx>::Result; ) -> <Self::Cx as Cx>::Result;
fn step_is_coinductive(cx: Self::Cx, input: <Self::Cx as Cx>::Input) -> bool;
} }
/// In the initial iteration of a cycle, we do not yet have a provisional /// In the initial iteration of a cycle, we do not yet have a provisional
@ -116,15 +114,38 @@ pub enum PathKind {
Coinductive, Coinductive,
Inductive, Inductive,
} }
impl PathKind {
/// Returns the path kind when merging `self` with `rest`.
///
/// Given an inductive path `self` and a coinductive path `rest`,
/// the path `self -> rest` would be coinductive.
fn extend(self, rest: PathKind) -> PathKind {
match self {
PathKind::Coinductive => PathKind::Coinductive,
PathKind::Inductive => rest,
}
}
}
/// The kinds of cycles a cycle head was involved in.
///
/// This is used to avoid rerunning a cycle if there's
/// just a single usage kind and the final result matches
/// its provisional result.
#[derive(Debug, Clone, Copy, PartialEq, Eq)] #[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum UsageKind { pub enum UsageKind {
Single(PathKind), Single(PathKind),
Mixed, Mixed,
} }
impl From<PathKind> for UsageKind {
fn from(path: PathKind) -> UsageKind {
UsageKind::Single(path)
}
}
impl UsageKind { impl UsageKind {
fn merge(self, other: Self) -> Self { #[must_use]
match (self, other) { fn merge(self, other: impl Into<Self>) -> Self {
match (self, other.into()) {
(UsageKind::Mixed, _) | (_, UsageKind::Mixed) => UsageKind::Mixed, (UsageKind::Mixed, _) | (_, UsageKind::Mixed) => UsageKind::Mixed,
(UsageKind::Single(lhs), UsageKind::Single(rhs)) => { (UsageKind::Single(lhs), UsageKind::Single(rhs)) => {
if lhs == rhs { if lhs == rhs {
@ -135,7 +156,42 @@ impl UsageKind {
} }
} }
} }
fn and_merge(&mut self, other: Self) { fn and_merge(&mut self, other: impl Into<Self>) {
*self = self.merge(other);
}
}
/// For each goal we track whether the paths from this goal
/// to its cycle heads are coinductive.
///
/// This is a necessary condition to rebase provisional cache
/// entries.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum AllPathsToHeadCoinductive {
Yes,
No,
}
impl From<PathKind> for AllPathsToHeadCoinductive {
fn from(path: PathKind) -> AllPathsToHeadCoinductive {
match path {
PathKind::Coinductive => AllPathsToHeadCoinductive::Yes,
_ => AllPathsToHeadCoinductive::No,
}
}
}
impl AllPathsToHeadCoinductive {
#[must_use]
fn merge(self, other: impl Into<Self>) -> Self {
match (self, other.into()) {
(AllPathsToHeadCoinductive::Yes, AllPathsToHeadCoinductive::Yes) => {
AllPathsToHeadCoinductive::Yes
}
(AllPathsToHeadCoinductive::No, _) | (_, AllPathsToHeadCoinductive::No) => {
AllPathsToHeadCoinductive::No
}
}
}
fn and_merge(&mut self, other: impl Into<Self>) {
*self = self.merge(other); *self = self.merge(other);
} }
} }
@ -177,10 +233,11 @@ impl AvailableDepth {
/// All cycle heads a given goal depends on, ordered by their stack depth. /// All cycle heads a given goal depends on, ordered by their stack depth.
/// ///
/// We therefore pop the cycle heads from highest to lowest. /// We also track all paths from this goal to that head. This is necessary
/// when rebasing provisional cache results.
#[derive(Clone, Debug, PartialEq, Eq, Default)] #[derive(Clone, Debug, PartialEq, Eq, Default)]
struct CycleHeads { struct CycleHeads {
heads: BTreeSet<StackDepth>, heads: BTreeMap<StackDepth, AllPathsToHeadCoinductive>,
} }
impl CycleHeads { impl CycleHeads {
@ -189,15 +246,15 @@ impl CycleHeads {
} }
fn highest_cycle_head(&self) -> StackDepth { fn highest_cycle_head(&self) -> StackDepth {
*self.heads.last().unwrap() self.opt_highest_cycle_head().unwrap()
} }
fn opt_highest_cycle_head(&self) -> Option<StackDepth> { fn opt_highest_cycle_head(&self) -> Option<StackDepth> {
self.heads.last().copied() self.heads.last_key_value().map(|(k, _)| *k)
} }
fn opt_lowest_cycle_head(&self) -> Option<StackDepth> { fn opt_lowest_cycle_head(&self) -> Option<StackDepth> {
self.heads.first().copied() self.heads.first_key_value().map(|(k, _)| *k)
} }
fn remove_highest_cycle_head(&mut self) { fn remove_highest_cycle_head(&mut self) {
@ -205,28 +262,42 @@ impl CycleHeads {
debug_assert_ne!(last, None); debug_assert_ne!(last, None);
} }
fn insert(&mut self, head: StackDepth) { fn insert(
self.heads.insert(head); &mut self,
head: StackDepth,
path_from_entry: impl Into<AllPathsToHeadCoinductive> + Copy,
) {
self.heads.entry(head).or_insert(path_from_entry.into()).and_merge(path_from_entry);
} }
fn merge(&mut self, heads: &CycleHeads) { fn merge(&mut self, heads: &CycleHeads) {
for &head in heads.heads.iter() { for (&head, &path_from_entry) in heads.heads.iter() {
self.insert(head); self.insert(head, path_from_entry);
debug_assert!(matches!(self.heads[&head], AllPathsToHeadCoinductive::Yes));
} }
} }
fn iter(&self) -> impl Iterator<Item = (StackDepth, AllPathsToHeadCoinductive)> + '_ {
self.heads.iter().map(|(k, v)| (*k, *v))
}
/// Update the cycle heads of a goal at depth `this` given the cycle heads /// Update the cycle heads of a goal at depth `this` given the cycle heads
/// of a nested goal. This merges the heads after filtering the parent goal /// of a nested goal. This merges the heads after filtering the parent goal
/// itself. /// itself.
fn extend_from_child(&mut self, this: StackDepth, child: &CycleHeads) { fn extend_from_child(&mut self, this: StackDepth, step_kind: PathKind, child: &CycleHeads) {
for &head in child.heads.iter() { for (&head, &path_from_entry) in child.heads.iter() {
match head.cmp(&this) { match head.cmp(&this) {
Ordering::Less => {} Ordering::Less => {}
Ordering::Equal => continue, Ordering::Equal => continue,
Ordering::Greater => unreachable!(), Ordering::Greater => unreachable!(),
} }
self.insert(head); let path_from_entry = match step_kind {
PathKind::Coinductive => AllPathsToHeadCoinductive::Yes,
PathKind::Inductive => path_from_entry,
};
self.insert(head, path_from_entry);
} }
} }
} }
@ -246,7 +317,7 @@ impl CycleHeads {
/// We need to disable the global cache if using it would hide a cycle, as /// We need to disable the global cache if using it would hide a cycle, as
/// cycles can impact behavior. The cycle ABA may have different final /// cycles can impact behavior. The cycle ABA may have different final
/// results from a the cycle BAB depending on the cycle root. /// results from a the cycle BAB depending on the cycle root.
#[derive_where(Debug, Default; X: Cx)] #[derive_where(Debug, Default, Clone; X: Cx)]
struct NestedGoals<X: Cx> { struct NestedGoals<X: Cx> {
nested_goals: HashMap<X::Input, UsageKind>, nested_goals: HashMap<X::Input, UsageKind>,
} }
@ -259,13 +330,6 @@ impl<X: Cx> NestedGoals<X> {
self.nested_goals.entry(input).or_insert(path_from_entry).and_merge(path_from_entry); self.nested_goals.entry(input).or_insert(path_from_entry).and_merge(path_from_entry);
} }
fn merge(&mut self, nested_goals: &NestedGoals<X>) {
#[allow(rustc::potential_query_instability)]
for (input, path_from_entry) in nested_goals.iter() {
self.insert(input, path_from_entry);
}
}
/// Adds the nested goals of a nested goal, given that the path `step_kind` from this goal /// Adds the nested goals of a nested goal, given that the path `step_kind` from this goal
/// to the parent goal. /// to the parent goal.
/// ///
@ -276,8 +340,8 @@ impl<X: Cx> NestedGoals<X> {
#[allow(rustc::potential_query_instability)] #[allow(rustc::potential_query_instability)]
for (input, path_from_entry) in nested_goals.iter() { for (input, path_from_entry) in nested_goals.iter() {
let path_from_entry = match step_kind { let path_from_entry = match step_kind {
PathKind::Coinductive => path_from_entry, PathKind::Coinductive => UsageKind::Single(PathKind::Coinductive),
PathKind::Inductive => UsageKind::Single(PathKind::Inductive), PathKind::Inductive => path_from_entry,
}; };
self.insert(input, path_from_entry); self.insert(input, path_from_entry);
} }
@ -289,10 +353,6 @@ impl<X: Cx> NestedGoals<X> {
self.nested_goals.iter().map(|(i, p)| (*i, *p)) self.nested_goals.iter().map(|(i, p)| (*i, *p))
} }
fn get(&self, input: X::Input) -> Option<UsageKind> {
self.nested_goals.get(&input).copied()
}
fn contains(&self, input: X::Input) -> bool { fn contains(&self, input: X::Input) -> bool {
self.nested_goals.contains_key(&input) self.nested_goals.contains_key(&input)
} }
@ -310,6 +370,12 @@ rustc_index::newtype_index! {
struct StackEntry<X: Cx> { struct StackEntry<X: Cx> {
input: X::Input, input: X::Input,
/// Whether proving this goal is a coinductive step.
///
/// This is used when encountering a trait solver cycle to
/// decide whether the initial provisional result of the cycle.
step_kind_from_parent: PathKind,
/// The available depth of a given goal, immutable. /// The available depth of a given goal, immutable.
available_depth: AvailableDepth, available_depth: AvailableDepth,
@ -346,9 +412,9 @@ struct ProvisionalCacheEntry<X: Cx> {
encountered_overflow: bool, encountered_overflow: bool,
/// All cycle heads this cache entry depends on. /// All cycle heads this cache entry depends on.
heads: CycleHeads, heads: CycleHeads,
/// The path from the highest cycle head to this goal. /// The path from the highest cycle head to this goal. This differs from
/// `heads` which tracks the path to the cycle head *from* this goal.
path_from_head: PathKind, path_from_head: PathKind,
nested_goals: NestedGoals<X>,
result: X::Result, result: X::Result,
} }
@ -367,6 +433,20 @@ pub struct SearchGraph<D: Delegate<Cx = X>, X: Cx = <D as Delegate>::Cx> {
_marker: PhantomData<D>, _marker: PhantomData<D>,
} }
/// While [`SearchGraph::update_parent_goal`] can be mostly shared between
/// ordinary nested goals/global cache hits and provisional cache hits,
/// using the provisional cache should not add any nested goals.
///
/// `nested_goals` are only used when checking whether global cache entries
/// are applicable. This only cares about whether a goal is actually accessed.
/// Given that the usage of the provisional cache is fully determinstic, we
/// don't need to track the nested goals used while computing a provisional
/// cache entry.
enum UpdateParentGoalCtxt<'a, X: Cx> {
Ordinary(&'a NestedGoals<X>),
ProvisionalCacheHit,
}
impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
pub fn new(root_depth: usize) -> SearchGraph<D> { pub fn new(root_depth: usize) -> SearchGraph<D> {
Self { Self {
@ -382,27 +462,32 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
/// and using existing global cache entries to make sure they /// and using existing global cache entries to make sure they
/// have the same impact on the remaining evaluation. /// have the same impact on the remaining evaluation.
fn update_parent_goal( fn update_parent_goal(
cx: X,
stack: &mut IndexVec<StackDepth, StackEntry<X>>, stack: &mut IndexVec<StackDepth, StackEntry<X>>,
step_kind_from_parent: PathKind,
reached_depth: StackDepth, reached_depth: StackDepth,
heads: &CycleHeads, heads: &CycleHeads,
encountered_overflow: bool, encountered_overflow: bool,
nested_goals: &NestedGoals<X>, context: UpdateParentGoalCtxt<'_, X>,
) { ) {
if let Some(parent_index) = stack.last_index() { if let Some(parent_index) = stack.last_index() {
let parent = &mut stack[parent_index]; let parent = &mut stack[parent_index];
parent.reached_depth = parent.reached_depth.max(reached_depth); parent.reached_depth = parent.reached_depth.max(reached_depth);
parent.encountered_overflow |= encountered_overflow; parent.encountered_overflow |= encountered_overflow;
parent.heads.extend_from_child(parent_index, heads); parent.heads.extend_from_child(parent_index, step_kind_from_parent, heads);
let step_kind = Self::step_kind(cx, parent.input); let parent_depends_on_cycle = match context {
parent.nested_goals.extend_from_child(step_kind, nested_goals); UpdateParentGoalCtxt::Ordinary(nested_goals) => {
parent.nested_goals.extend_from_child(step_kind_from_parent, nested_goals);
!nested_goals.is_empty()
}
UpdateParentGoalCtxt::ProvisionalCacheHit => true,
};
// Once we've got goals which encountered overflow or a cycle, // Once we've got goals which encountered overflow or a cycle,
// we track all goals whose behavior may depend depend on these // we track all goals whose behavior may depend depend on these
// goals as this change may cause them to now depend on additional // goals as this change may cause them to now depend on additional
// goals, resulting in new cycles. See the dev-guide for examples. // goals, resulting in new cycles. See the dev-guide for examples.
if !nested_goals.is_empty() { if parent_depends_on_cycle {
parent.nested_goals.insert(parent.input, UsageKind::Single(PathKind::Coinductive)) parent.nested_goals.insert(parent.input, UsageKind::Single(PathKind::Inductive))
} }
} }
} }
@ -422,21 +507,19 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
self.stack.len() self.stack.len()
} }
fn step_kind(cx: X, input: X::Input) -> PathKind {
if D::step_is_coinductive(cx, input) { PathKind::Coinductive } else { PathKind::Inductive }
}
/// Whether the path from `head` to the current stack entry is inductive or coinductive. /// Whether the path from `head` to the current stack entry is inductive or coinductive.
fn stack_path_kind( ///
cx: X, /// The `step_kind_to_head` is used to add a single additional path segment to the path on
/// the stack which completes the cycle. This given an inductive step AB which then cycles
/// coinductively with A, we need to treat this cycle as coinductive.
fn cycle_path_kind(
stack: &IndexVec<StackDepth, StackEntry<X>>, stack: &IndexVec<StackDepth, StackEntry<X>>,
step_kind_to_head: PathKind,
head: StackDepth, head: StackDepth,
) -> PathKind { ) -> PathKind {
if stack.raw[head.index()..].iter().all(|entry| D::step_is_coinductive(cx, entry.input)) { stack.raw[head.index() + 1..]
PathKind::Coinductive .iter()
} else { .fold(step_kind_to_head, |curr, entry| curr.extend(entry.step_kind_from_parent))
PathKind::Inductive
}
} }
/// Probably the most involved method of the whole solver. /// Probably the most involved method of the whole solver.
@ -447,6 +530,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
&mut self, &mut self,
cx: X, cx: X,
input: X::Input, input: X::Input,
step_kind_from_parent: PathKind,
inspect: &mut D::ProofTreeBuilder, inspect: &mut D::ProofTreeBuilder,
mut evaluate_goal: impl FnMut(&mut Self, &mut D::ProofTreeBuilder) -> X::Result, mut evaluate_goal: impl FnMut(&mut Self, &mut D::ProofTreeBuilder) -> X::Result,
) -> X::Result { ) -> X::Result {
@ -464,7 +548,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
// - A // - A
// - BA cycle // - BA cycle
// - CB :x: // - CB :x:
if let Some(result) = self.lookup_provisional_cache(cx, input) { if let Some(result) = self.lookup_provisional_cache(input, step_kind_from_parent) {
return result; return result;
} }
@ -477,10 +561,12 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
// global cache has been disabled as it may otherwise change the result for // global cache has been disabled as it may otherwise change the result for
// cyclic goals. We don't care about goals which are not on the current stack // cyclic goals. We don't care about goals which are not on the current stack
// so it's fine to drop their scope eagerly. // so it's fine to drop their scope eagerly.
self.lookup_global_cache_untracked(cx, input, available_depth) self.lookup_global_cache_untracked(cx, input, step_kind_from_parent, available_depth)
.inspect(|expected| debug!(?expected, "validate cache entry")) .inspect(|expected| debug!(?expected, "validate cache entry"))
.map(|r| (scope, r)) .map(|r| (scope, r))
} else if let Some(result) = self.lookup_global_cache(cx, input, available_depth) { } else if let Some(result) =
self.lookup_global_cache(cx, input, step_kind_from_parent, available_depth)
{
return result; return result;
} else { } else {
None None
@ -490,8 +576,8 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
// avoid iterating over the stack in case a goal has already been computed. // avoid iterating over the stack in case a goal has already been computed.
// This may not have an actual performance impact and we could reorder them // This may not have an actual performance impact and we could reorder them
// as it may reduce the number of `nested_goals` we need to track. // as it may reduce the number of `nested_goals` we need to track.
if let Some(result) = self.check_cycle_on_stack(cx, input) { if let Some(result) = self.check_cycle_on_stack(cx, input, step_kind_from_parent) {
debug_assert!(validate_cache.is_none(), "global cache and cycle on stack"); debug_assert!(validate_cache.is_none(), "global cache and cycle on stack: {input:?}");
return result; return result;
} }
@ -499,6 +585,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
let depth = self.stack.next_index(); let depth = self.stack.next_index();
let entry = StackEntry { let entry = StackEntry {
input, input,
step_kind_from_parent,
available_depth, available_depth,
reached_depth: depth, reached_depth: depth,
heads: Default::default(), heads: Default::default(),
@ -522,12 +609,12 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
// We've finished computing the goal and have popped it from the stack, // We've finished computing the goal and have popped it from the stack,
// lazily update its parent goal. // lazily update its parent goal.
Self::update_parent_goal( Self::update_parent_goal(
cx,
&mut self.stack, &mut self.stack,
final_entry.step_kind_from_parent,
final_entry.reached_depth, final_entry.reached_depth,
&final_entry.heads, &final_entry.heads,
final_entry.encountered_overflow, final_entry.encountered_overflow,
&final_entry.nested_goals, UpdateParentGoalCtxt::Ordinary(&final_entry.nested_goals),
); );
// We're now done with this goal. We only add the root of cycles to the global cache. // We're now done with this goal. We only add the root of cycles to the global cache.
@ -541,19 +628,20 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
self.insert_global_cache(cx, input, final_entry, result, dep_node) self.insert_global_cache(cx, input, final_entry, result, dep_node)
} }
} else if D::ENABLE_PROVISIONAL_CACHE { } else if D::ENABLE_PROVISIONAL_CACHE {
debug_assert!(validate_cache.is_none()); debug_assert!(validate_cache.is_none(), "unexpected non-root: {input:?}");
let entry = self.provisional_cache.entry(input).or_default(); let entry = self.provisional_cache.entry(input).or_default();
let StackEntry { heads, nested_goals, encountered_overflow, .. } = final_entry; let StackEntry { heads, encountered_overflow, .. } = final_entry;
let path_from_head = Self::stack_path_kind(cx, &self.stack, heads.highest_cycle_head()); let path_from_head = Self::cycle_path_kind(
entry.push(ProvisionalCacheEntry { &self.stack,
encountered_overflow, step_kind_from_parent,
heads, heads.highest_cycle_head(),
path_from_head, );
nested_goals, let provisional_cache_entry =
result, ProvisionalCacheEntry { encountered_overflow, heads, path_from_head, result };
}); debug!(?provisional_cache_entry);
entry.push(provisional_cache_entry);
} else { } else {
debug_assert!(validate_cache.is_none()); debug_assert!(validate_cache.is_none(), "unexpected non-root: {input:?}");
} }
result result
@ -575,7 +663,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
// //
// We must therefore not use the global cache entry for `B` in that case. // We must therefore not use the global cache entry for `B` in that case.
// See tests/ui/traits/next-solver/cycles/hidden-by-overflow.rs // See tests/ui/traits/next-solver/cycles/hidden-by-overflow.rs
last.nested_goals.insert(last.input, UsageKind::Single(PathKind::Coinductive)); last.nested_goals.insert(last.input, UsageKind::Single(PathKind::Inductive));
} }
debug!("encountered stack overflow"); debug!("encountered stack overflow");
@ -607,7 +695,6 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
/// This can be thought of rotating the sub-tree of this provisional result and changing /// This can be thought of rotating the sub-tree of this provisional result and changing
/// its entry point while making sure that all paths through this sub-tree stay the same. /// its entry point while making sure that all paths through this sub-tree stay the same.
/// ///
///
/// In case the popped cycle head failed to reach a fixpoint anything which depends on /// In case the popped cycle head failed to reach a fixpoint anything which depends on
/// its provisional result is invalid. Actually discarding provisional cache entries in /// its provisional result is invalid. Actually discarding provisional cache entries in
/// this case would cause hangs, so we instead change the result of dependant provisional /// this case would cause hangs, so we instead change the result of dependant provisional
@ -616,7 +703,6 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
/// to me. /// to me.
fn rebase_provisional_cache_entries( fn rebase_provisional_cache_entries(
&mut self, &mut self,
cx: X,
stack_entry: &StackEntry<X>, stack_entry: &StackEntry<X>,
mut mutate_result: impl FnMut(X::Input, X::Result) -> X::Result, mut mutate_result: impl FnMut(X::Input, X::Result) -> X::Result,
) { ) {
@ -628,25 +714,24 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
encountered_overflow: _, encountered_overflow: _,
heads, heads,
path_from_head, path_from_head,
nested_goals,
result, result,
} = entry; } = entry;
if heads.highest_cycle_head() != head { if heads.highest_cycle_head() == head {
heads.remove_highest_cycle_head()
} else {
return true; return true;
} }
// We don't try rebasing if the path from the current head // We only try to rebase if all paths from the cache entry
// to the cache entry is not coinductive or if the path from // to its heads are coinductive. In this case these cycle
// the cache entry to the current head is not coinductive. // kinds won't change, no matter the goals between these
// // heads and the provisional cache entry.
// Both of these constraints could be weakened, but by only if heads.iter().any(|(_, p)| matches!(p, AllPathsToHeadCoinductive::No)) {
// accepting coinductive paths we don't have to worry about return false;
// changing the cycle kind of the remaining cycles. We can }
// extend this in the future once there's a known issue
// caused by it. // The same for nested goals of the cycle head.
if *path_from_head != PathKind::Coinductive if stack_entry.heads.iter().any(|(_, p)| matches!(p, AllPathsToHeadCoinductive::No))
|| nested_goals.get(stack_entry.input).unwrap()
!= UsageKind::Single(PathKind::Coinductive)
{ {
return false; return false;
} }
@ -654,20 +739,23 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
// Merge the cycle heads of the provisional cache entry and the // Merge the cycle heads of the provisional cache entry and the
// popped head. If the popped cycle head was a root, discard all // popped head. If the popped cycle head was a root, discard all
// provisional cache entries which depend on it. // provisional cache entries which depend on it.
heads.remove_highest_cycle_head();
heads.merge(&stack_entry.heads); heads.merge(&stack_entry.heads);
let Some(head) = heads.opt_highest_cycle_head() else { let Some(head) = heads.opt_highest_cycle_head() else {
return false; return false;
}; };
// As we've made sure that the path from the new highest cycle
// head to the uses of the popped cycle head are fully coinductive,
// we can be sure that the paths to all nested goals of the popped
// cycle head remain the same. We can simply merge them.
nested_goals.merge(&stack_entry.nested_goals);
// We now care about the path from the next highest cycle head to the // We now care about the path from the next highest cycle head to the
// provisional cache entry. // provisional cache entry.
*path_from_head = Self::stack_path_kind(cx, &self.stack, head); match path_from_head {
PathKind::Coinductive => {}
PathKind::Inductive => {
*path_from_head = Self::cycle_path_kind(
&self.stack,
stack_entry.step_kind_from_parent,
head,
)
}
}
// Mutate the result of the provisional cache entry in case we did // Mutate the result of the provisional cache entry in case we did
// not reach a fixpoint. // not reach a fixpoint.
*result = mutate_result(input, *result); *result = mutate_result(input, *result);
@ -677,19 +765,18 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
}); });
} }
fn lookup_provisional_cache(&mut self, cx: X, input: X::Input) -> Option<X::Result> { fn lookup_provisional_cache(
&mut self,
input: X::Input,
step_kind_from_parent: PathKind,
) -> Option<X::Result> {
if !D::ENABLE_PROVISIONAL_CACHE { if !D::ENABLE_PROVISIONAL_CACHE {
return None; return None;
} }
let entries = self.provisional_cache.get(&input)?; let entries = self.provisional_cache.get(&input)?;
for &ProvisionalCacheEntry { for &ProvisionalCacheEntry { encountered_overflow, ref heads, path_from_head, result } in
encountered_overflow, entries
ref heads,
path_from_head,
ref nested_goals,
result,
} in entries
{ {
let head = heads.highest_cycle_head(); let head = heads.highest_cycle_head();
if encountered_overflow { if encountered_overflow {
@ -710,22 +797,18 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
// A provisional cache entry is only valid if the current path from its // A provisional cache entry is only valid if the current path from its
// highest cycle head to the goal is the same. // highest cycle head to the goal is the same.
if path_from_head == Self::stack_path_kind(cx, &self.stack, head) { if path_from_head == Self::cycle_path_kind(&self.stack, step_kind_from_parent, head) {
// While we don't have to track the full depth of the provisional cache entry, // While we don't have to track the full depth of the provisional cache entry,
// we do have to increment the required depth by one as we'd have already failed // we do have to increment the required depth by one as we'd have already failed
// with overflow otherwise // with overflow otherwise
let next_index = self.stack.next_index(); let next_index = self.stack.next_index();
let last = &mut self.stack.raw.last_mut().unwrap();
let path_from_entry = Self::step_kind(cx, last.input);
last.nested_goals.insert(input, UsageKind::Single(path_from_entry));
Self::update_parent_goal( Self::update_parent_goal(
cx,
&mut self.stack, &mut self.stack,
step_kind_from_parent,
next_index, next_index,
heads, heads,
false, encountered_overflow,
nested_goals, UpdateParentGoalCtxt::ProvisionalCacheHit,
); );
debug_assert!(self.stack[head].has_been_used.is_some()); debug_assert!(self.stack[head].has_been_used.is_some());
debug!(?head, ?path_from_head, "provisional cache hit"); debug!(?head, ?path_from_head, "provisional cache hit");
@ -740,8 +823,8 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
/// evaluating this entry would not have ended up depending on either a goal /// evaluating this entry would not have ended up depending on either a goal
/// already on the stack or a provisional cache entry. /// already on the stack or a provisional cache entry.
fn candidate_is_applicable( fn candidate_is_applicable(
cx: X,
stack: &IndexVec<StackDepth, StackEntry<X>>, stack: &IndexVec<StackDepth, StackEntry<X>>,
step_kind_from_parent: PathKind,
provisional_cache: &HashMap<X::Input, Vec<ProvisionalCacheEntry<X>>>, provisional_cache: &HashMap<X::Input, Vec<ProvisionalCacheEntry<X>>>,
nested_goals: &NestedGoals<X>, nested_goals: &NestedGoals<X>,
) -> bool { ) -> bool {
@ -773,7 +856,6 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
encountered_overflow, encountered_overflow,
ref heads, ref heads,
path_from_head, path_from_head,
nested_goals: _,
result: _, result: _,
} in entries.iter() } in entries.iter()
{ {
@ -786,9 +868,9 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
// A provisional cache entry only applies if the path from its highest head // A provisional cache entry only applies if the path from its highest head
// matches the path when encountering the goal. // matches the path when encountering the goal.
let head = heads.highest_cycle_head(); let head = heads.highest_cycle_head();
let full_path = match Self::stack_path_kind(cx, stack, head) { let full_path = match Self::cycle_path_kind(stack, step_kind_from_parent, head) {
PathKind::Coinductive => path_from_global_entry, PathKind::Coinductive => UsageKind::Single(PathKind::Coinductive),
PathKind::Inductive => UsageKind::Single(PathKind::Inductive), PathKind::Inductive => path_from_global_entry,
}; };
match (full_path, path_from_head) { match (full_path, path_from_head) {
@ -816,14 +898,15 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
&self, &self,
cx: X, cx: X,
input: X::Input, input: X::Input,
step_kind_from_parent: PathKind,
available_depth: AvailableDepth, available_depth: AvailableDepth,
) -> Option<X::Result> { ) -> Option<X::Result> {
cx.with_global_cache(|cache| { cx.with_global_cache(|cache| {
cache cache
.get(cx, input, available_depth, |nested_goals| { .get(cx, input, available_depth, |nested_goals| {
Self::candidate_is_applicable( Self::candidate_is_applicable(
cx,
&self.stack, &self.stack,
step_kind_from_parent,
&self.provisional_cache, &self.provisional_cache,
nested_goals, nested_goals,
) )
@ -839,14 +922,15 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
&mut self, &mut self,
cx: X, cx: X,
input: X::Input, input: X::Input,
step_kind_from_parent: PathKind,
available_depth: AvailableDepth, available_depth: AvailableDepth,
) -> Option<X::Result> { ) -> Option<X::Result> {
cx.with_global_cache(|cache| { cx.with_global_cache(|cache| {
let CacheData { result, additional_depth, encountered_overflow, nested_goals } = cache let CacheData { result, additional_depth, encountered_overflow, nested_goals } = cache
.get(cx, input, available_depth, |nested_goals| { .get(cx, input, available_depth, |nested_goals| {
Self::candidate_is_applicable( Self::candidate_is_applicable(
cx,
&self.stack, &self.stack,
step_kind_from_parent,
&self.provisional_cache, &self.provisional_cache,
nested_goals, nested_goals,
) )
@ -860,12 +944,12 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
// cycle heads are always empty. // cycle heads are always empty.
let heads = Default::default(); let heads = Default::default();
Self::update_parent_goal( Self::update_parent_goal(
cx,
&mut self.stack, &mut self.stack,
step_kind_from_parent,
reached_depth, reached_depth,
&heads, &heads,
encountered_overflow, encountered_overflow,
nested_goals, UpdateParentGoalCtxt::Ordinary(nested_goals),
); );
debug!(?additional_depth, "global cache hit"); debug!(?additional_depth, "global cache hit");
@ -873,16 +957,21 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
}) })
} }
fn check_cycle_on_stack(&mut self, cx: X, input: X::Input) -> Option<X::Result> { fn check_cycle_on_stack(
&mut self,
cx: X,
input: X::Input,
step_kind_from_parent: PathKind,
) -> Option<X::Result> {
let (head, _stack_entry) = self.stack.iter_enumerated().find(|(_, e)| e.input == input)?; let (head, _stack_entry) = self.stack.iter_enumerated().find(|(_, e)| e.input == input)?;
debug!("encountered cycle with depth {head:?}");
// We have a nested goal which directly relies on a goal deeper in the stack. // We have a nested goal which directly relies on a goal deeper in the stack.
// //
// We start by tagging all cycle participants, as that's necessary for caching. // We start by tagging all cycle participants, as that's necessary for caching.
// //
// Finally we can return either the provisional response or the initial response // Finally we can return either the provisional response or the initial response
// in case we're in the first fixpoint iteration for this goal. // in case we're in the first fixpoint iteration for this goal.
let path_kind = Self::stack_path_kind(cx, &self.stack, head); let path_kind = Self::cycle_path_kind(&self.stack, step_kind_from_parent, head);
debug!(?path_kind, "encountered cycle with depth {head:?}");
let usage_kind = UsageKind::Single(path_kind); let usage_kind = UsageKind::Single(path_kind);
self.stack[head].has_been_used = self.stack[head].has_been_used =
Some(self.stack[head].has_been_used.map_or(usage_kind, |prev| prev.merge(usage_kind))); Some(self.stack[head].has_been_used.map_or(usage_kind, |prev| prev.merge(usage_kind)));
@ -894,11 +983,10 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
let last = &mut self.stack[last_index]; let last = &mut self.stack[last_index];
last.reached_depth = last.reached_depth.max(next_index); last.reached_depth = last.reached_depth.max(next_index);
let path_from_entry = Self::step_kind(cx, last.input); last.nested_goals.insert(input, UsageKind::Single(step_kind_from_parent));
last.nested_goals.insert(input, UsageKind::Single(path_from_entry)); last.nested_goals.insert(last.input, UsageKind::Single(PathKind::Inductive));
last.nested_goals.insert(last.input, UsageKind::Single(PathKind::Coinductive));
if last_index != head { if last_index != head {
last.heads.insert(head); last.heads.insert(head, step_kind_from_parent);
} }
// Return the provisional result or, if we're in the first iteration, // Return the provisional result or, if we're in the first iteration,
@ -964,7 +1052,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
// this was only the root of either coinductive or inductive cycles, and the // this was only the root of either coinductive or inductive cycles, and the
// final result is equal to the initial response for that case. // final result is equal to the initial response for that case.
if self.reached_fixpoint(cx, &stack_entry, usage_kind, result) { if self.reached_fixpoint(cx, &stack_entry, usage_kind, result) {
self.rebase_provisional_cache_entries(cx, &stack_entry, |_, result| result); self.rebase_provisional_cache_entries(&stack_entry, |_, result| result);
return (stack_entry, result); return (stack_entry, result);
} }
@ -981,7 +1069,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
// we also taint all provisional cache entries which depend on the // we also taint all provisional cache entries which depend on the
// current goal. // current goal.
if D::is_ambiguous_result(result) { if D::is_ambiguous_result(result) {
self.rebase_provisional_cache_entries(cx, &stack_entry, |input, _| { self.rebase_provisional_cache_entries(&stack_entry, |input, _| {
D::propagate_ambiguity(cx, input, result) D::propagate_ambiguity(cx, input, result)
}); });
return (stack_entry, result); return (stack_entry, result);
@ -993,7 +1081,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
if i >= D::FIXPOINT_STEP_LIMIT { if i >= D::FIXPOINT_STEP_LIMIT {
debug!("canonical cycle overflow"); debug!("canonical cycle overflow");
let result = D::on_fixpoint_overflow(cx, input); let result = D::on_fixpoint_overflow(cx, input);
self.rebase_provisional_cache_entries(cx, &stack_entry, |input, _| { self.rebase_provisional_cache_entries(&stack_entry, |input, _| {
D::on_fixpoint_overflow(cx, input) D::on_fixpoint_overflow(cx, input)
}); });
return (stack_entry, result); return (stack_entry, result);