Uplift the new trait solver
This commit is contained in:
parent
baf94bddf0
commit
532149eb88
36 changed files with 2014 additions and 1457 deletions
|
@ -1,13 +1,27 @@
|
|||
use std::fmt::Debug;
|
||||
|
||||
use rustc_type_ir::fold::TypeFoldable;
|
||||
use rustc_type_ir::relate::Relate;
|
||||
use rustc_type_ir::solve::{Goal, NoSolution};
|
||||
use rustc_type_ir::solve::{Goal, NoSolution, SolverMode};
|
||||
use rustc_type_ir::{self as ty, Interner};
|
||||
|
||||
pub trait SolverDelegate: Sized {
|
||||
type Interner: Interner;
|
||||
|
||||
fn interner(&self) -> Self::Interner;
|
||||
|
||||
fn solver_mode(&self) -> SolverMode;
|
||||
|
||||
fn build_with_canonical<V>(
|
||||
interner: Self::Interner,
|
||||
solver_mode: SolverMode,
|
||||
canonical: &ty::Canonical<Self::Interner, V>,
|
||||
) -> (Self, V, ty::CanonicalVarValues<Self::Interner>)
|
||||
where
|
||||
V: TypeFoldable<Self::Interner>;
|
||||
|
||||
fn universe(&self) -> ty::UniverseIndex;
|
||||
fn create_next_universe(&self) -> ty::UniverseIndex;
|
||||
|
||||
fn universe_of_ty(&self, ty: ty::TyVid) -> Option<ty::UniverseIndex>;
|
||||
fn universe_of_lt(&self, lt: ty::RegionVid) -> Option<ty::UniverseIndex>;
|
||||
fn universe_of_ct(&self, ct: ty::ConstVid) -> Option<ty::UniverseIndex>;
|
||||
|
@ -74,4 +88,102 @@ pub trait SolverDelegate: Sized {
|
|||
T: TypeFoldable<Self::Interner>;
|
||||
|
||||
fn probe<T>(&self, probe: impl FnOnce() -> T) -> T;
|
||||
|
||||
// FIXME: Uplift the leak check into this crate.
|
||||
fn leak_check(&self, max_input_universe: ty::UniverseIndex) -> Result<(), NoSolution>;
|
||||
|
||||
// FIXME: This is only here because elaboration lives in `rustc_infer`!
|
||||
fn elaborate_supertraits(
|
||||
interner: Self::Interner,
|
||||
trait_ref: ty::Binder<Self::Interner, ty::TraitRef<Self::Interner>>,
|
||||
) -> impl Iterator<Item = ty::Binder<Self::Interner, ty::TraitRef<Self::Interner>>>;
|
||||
|
||||
fn try_const_eval_resolve(
|
||||
&self,
|
||||
param_env: <Self::Interner as Interner>::ParamEnv,
|
||||
unevaluated: ty::UnevaluatedConst<Self::Interner>,
|
||||
) -> Option<<Self::Interner as Interner>::Const>;
|
||||
|
||||
fn sub_regions(
|
||||
&self,
|
||||
sub: <Self::Interner as Interner>::Region,
|
||||
sup: <Self::Interner as Interner>::Region,
|
||||
);
|
||||
|
||||
fn register_ty_outlives(
|
||||
&self,
|
||||
ty: <Self::Interner as Interner>::Ty,
|
||||
r: <Self::Interner as Interner>::Region,
|
||||
);
|
||||
|
||||
// FIXME: This only is here because `wf::obligations` is in `rustc_trait_selection`!
|
||||
fn well_formed_goals(
|
||||
&self,
|
||||
param_env: <Self::Interner as Interner>::ParamEnv,
|
||||
arg: <Self::Interner as Interner>::GenericArg,
|
||||
) -> Option<Vec<Goal<Self::Interner, <Self::Interner as Interner>::Predicate>>>;
|
||||
|
||||
fn clone_opaque_types_for_query_response(
|
||||
&self,
|
||||
) -> Vec<(ty::OpaqueTypeKey<Self::Interner>, <Self::Interner as Interner>::Ty)>;
|
||||
|
||||
fn make_deduplicated_outlives_constraints(
|
||||
&self,
|
||||
) -> Vec<ty::OutlivesPredicate<Self::Interner, <Self::Interner as Interner>::GenericArg>>;
|
||||
|
||||
fn instantiate_canonical<V>(
|
||||
&self,
|
||||
canonical: ty::Canonical<Self::Interner, V>,
|
||||
values: ty::CanonicalVarValues<Self::Interner>,
|
||||
) -> V
|
||||
where
|
||||
V: TypeFoldable<Self::Interner>;
|
||||
|
||||
fn instantiate_canonical_var_with_infer(
|
||||
&self,
|
||||
cv_info: ty::CanonicalVarInfo<Self::Interner>,
|
||||
universe_map: impl Fn(ty::UniverseIndex) -> ty::UniverseIndex,
|
||||
) -> <Self::Interner as Interner>::GenericArg;
|
||||
|
||||
// FIXME: Can we implement this in terms of `add` and `inject`?
|
||||
fn insert_hidden_type(
|
||||
&self,
|
||||
opaque_type_key: ty::OpaqueTypeKey<Self::Interner>,
|
||||
param_env: <Self::Interner as Interner>::ParamEnv,
|
||||
hidden_ty: <Self::Interner as Interner>::Ty,
|
||||
goals: &mut Vec<Goal<Self::Interner, <Self::Interner as Interner>::Predicate>>,
|
||||
) -> Result<(), NoSolution>;
|
||||
|
||||
fn add_item_bounds_for_hidden_type(
|
||||
&self,
|
||||
def_id: <Self::Interner as Interner>::DefId,
|
||||
args: <Self::Interner as Interner>::GenericArgs,
|
||||
param_env: <Self::Interner as Interner>::ParamEnv,
|
||||
hidden_ty: <Self::Interner as Interner>::Ty,
|
||||
goals: &mut Vec<Goal<Self::Interner, <Self::Interner as Interner>::Predicate>>,
|
||||
);
|
||||
|
||||
fn inject_new_hidden_type_unchecked(
|
||||
&self,
|
||||
key: ty::OpaqueTypeKey<Self::Interner>,
|
||||
hidden_ty: <Self::Interner as Interner>::Ty,
|
||||
);
|
||||
|
||||
fn reset_opaque_types(&self);
|
||||
|
||||
fn trait_ref_is_knowable<E: Debug>(
|
||||
&self,
|
||||
trait_ref: ty::TraitRef<Self::Interner>,
|
||||
lazily_normalize_ty: impl FnMut(
|
||||
<Self::Interner as Interner>::Ty,
|
||||
) -> Result<<Self::Interner as Interner>::Ty, E>,
|
||||
) -> Result<bool, E>;
|
||||
|
||||
fn fetch_eligible_assoc_item(
|
||||
&self,
|
||||
param_env: <Self::Interner as Interner>::ParamEnv,
|
||||
goal_trait_ref: ty::TraitRef<Self::Interner>,
|
||||
trait_assoc_def_id: <Self::Interner as Interner>::DefId,
|
||||
impl_def_id: <Self::Interner as Interner>::DefId,
|
||||
) -> Result<Option<<Self::Interner as Interner>::DefId>, NoSolution>;
|
||||
}
|
||||
|
|
|
@ -4,6 +4,12 @@
|
|||
//! but were uplifted in the process of making the new trait solver generic.
|
||||
//! So if you got to this crate from the old solver, it's totally normal.
|
||||
|
||||
#![feature(let_chains)]
|
||||
|
||||
// TODO: remove this, use explicit imports.
|
||||
#[macro_use]
|
||||
extern crate tracing;
|
||||
|
||||
pub mod canonicalizer;
|
||||
pub mod infcx;
|
||||
pub mod resolve;
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
pub use rustc_type_ir::solve::*;
|
98
compiler/rustc_next_trait_solver/src/solve/alias_relate.rs
Normal file
98
compiler/rustc_next_trait_solver/src/solve/alias_relate.rs
Normal file
|
@ -0,0 +1,98 @@
|
|||
//! Implements the `AliasRelate` goal, which is used when unifying aliases.
|
||||
//! Doing this via a separate goal is called "deferred alias relation" and part
|
||||
//! of our more general approach to "lazy normalization".
|
||||
//!
|
||||
//! This is done by first structurally normalizing both sides of the goal, ending
|
||||
//! up in either a concrete type, rigid alias, or an infer variable.
|
||||
//! These are related further according to the rules below:
|
||||
//!
|
||||
//! (1.) If we end up with two rigid aliases, then we relate them structurally.
|
||||
//!
|
||||
//! (2.) If we end up with an infer var and a rigid alias, then we instantiate
|
||||
//! the infer var with the constructor of the alias and then recursively relate
|
||||
//! the terms.
|
||||
//!
|
||||
//! (3.) Otherwise, if we end with two rigid (non-projection) or infer types,
|
||||
//! relate them structurally.
|
||||
|
||||
use rustc_type_ir::inherent::*;
|
||||
use rustc_type_ir::{self as ty, Interner};
|
||||
|
||||
use crate::infcx::SolverDelegate;
|
||||
use crate::solve::{Certainty, EvalCtxt, Goal, QueryResult};
|
||||
|
||||
impl<Infcx, I> EvalCtxt<'_, Infcx>
|
||||
where
|
||||
Infcx: SolverDelegate<Interner = I>,
|
||||
I: Interner,
|
||||
{
|
||||
#[instrument(level = "trace", skip(self), ret)]
|
||||
pub(super) fn compute_alias_relate_goal(
|
||||
&mut self,
|
||||
goal: Goal<I, (I::Term, I::Term, ty::AliasRelationDirection)>,
|
||||
) -> QueryResult<I> {
|
||||
let tcx = self.interner();
|
||||
let Goal { param_env, predicate: (lhs, rhs, direction) } = goal;
|
||||
debug_assert!(lhs.to_alias_term().is_some() || rhs.to_alias_term().is_some());
|
||||
|
||||
// Structurally normalize the lhs.
|
||||
let lhs = if let Some(alias) = lhs.to_alias_term() {
|
||||
let term = self.next_term_infer_of_kind(lhs);
|
||||
self.add_normalizes_to_goal(goal.with(tcx, ty::NormalizesTo { alias, term }));
|
||||
term
|
||||
} else {
|
||||
lhs
|
||||
};
|
||||
|
||||
// Structurally normalize the rhs.
|
||||
let rhs = if let Some(alias) = rhs.to_alias_term() {
|
||||
let term = self.next_term_infer_of_kind(rhs);
|
||||
self.add_normalizes_to_goal(goal.with(tcx, ty::NormalizesTo { alias, term }));
|
||||
term
|
||||
} else {
|
||||
rhs
|
||||
};
|
||||
|
||||
// Add a `make_canonical_response` probe step so that we treat this as
|
||||
// a candidate, even if `try_evaluate_added_goals` bails due to an error.
|
||||
// It's `Certainty::AMBIGUOUS` because this candidate is not "finished",
|
||||
// since equating the normalized terms will lead to additional constraints.
|
||||
self.inspect.make_canonical_response(Certainty::AMBIGUOUS);
|
||||
|
||||
// Apply the constraints.
|
||||
self.try_evaluate_added_goals()?;
|
||||
let lhs = self.resolve_vars_if_possible(lhs);
|
||||
let rhs = self.resolve_vars_if_possible(rhs);
|
||||
trace!(?lhs, ?rhs);
|
||||
|
||||
let variance = match direction {
|
||||
ty::AliasRelationDirection::Equate => ty::Invariant,
|
||||
ty::AliasRelationDirection::Subtype => ty::Covariant,
|
||||
};
|
||||
match (lhs.to_alias_term(), rhs.to_alias_term()) {
|
||||
(None, None) => {
|
||||
self.relate(param_env, lhs, variance, rhs)?;
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
}
|
||||
|
||||
(Some(alias), None) => {
|
||||
self.relate_rigid_alias_non_alias(param_env, alias, variance, rhs)?;
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
}
|
||||
(None, Some(alias)) => {
|
||||
self.relate_rigid_alias_non_alias(
|
||||
param_env,
|
||||
alias,
|
||||
variance.xform(ty::Contravariant),
|
||||
lhs,
|
||||
)?;
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
}
|
||||
|
||||
(Some(alias_lhs), Some(alias_rhs)) => {
|
||||
self.relate(param_env, alias_lhs, variance, alias_rhs)?;
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
738
compiler/rustc_next_trait_solver/src/solve/assembly/mod.rs
Normal file
738
compiler/rustc_next_trait_solver/src/solve/assembly/mod.rs
Normal file
|
@ -0,0 +1,738 @@
|
|||
//! Code shared by trait and projection goals for candidate assembly.
|
||||
|
||||
pub(super) mod structural_traits;
|
||||
|
||||
use rustc_type_ir::fold::TypeFoldable;
|
||||
use rustc_type_ir::inherent::*;
|
||||
use rustc_type_ir::lang_items::TraitSolverLangItem;
|
||||
use rustc_type_ir::visit::TypeVisitableExt as _;
|
||||
use rustc_type_ir::{self as ty, Interner, Upcast as _};
|
||||
|
||||
use crate::infcx::SolverDelegate;
|
||||
use crate::solve::inspect::ProbeKind;
|
||||
use crate::solve::{
|
||||
BuiltinImplSource, CandidateSource, CanonicalResponse, Certainty, EvalCtxt, Goal, GoalSource,
|
||||
MaybeCause, NoSolution, QueryResult, SolverMode,
|
||||
};
|
||||
|
||||
/// A candidate is a possible way to prove a goal.
|
||||
///
|
||||
/// It consists of both the `source`, which describes how that goal would be proven,
|
||||
/// and the `result` when using the given `source`.
|
||||
#[derive(derivative::Derivative)]
|
||||
#[derivative(Debug(bound = ""), Clone(bound = ""))]
|
||||
pub(super) struct Candidate<I: Interner> {
|
||||
pub(super) source: CandidateSource<I>,
|
||||
pub(super) result: CanonicalResponse<I>,
|
||||
}
|
||||
|
||||
/// Methods used to assemble candidates for either trait or projection goals.
|
||||
pub(super) trait GoalKind<Infcx, I = <Infcx as SolverDelegate>::Interner>:
|
||||
TypeFoldable<I> + Copy + Eq + std::fmt::Display
|
||||
where
|
||||
Infcx: SolverDelegate<Interner = I>,
|
||||
I: Interner,
|
||||
{
|
||||
fn self_ty(self) -> I::Ty;
|
||||
|
||||
fn trait_ref(self, tcx: I) -> ty::TraitRef<I>;
|
||||
|
||||
fn with_self_ty(self, tcx: I, self_ty: I::Ty) -> Self;
|
||||
|
||||
fn trait_def_id(self, tcx: I) -> I::DefId;
|
||||
|
||||
/// Try equating an assumption predicate against a goal's predicate. If it
|
||||
/// holds, then execute the `then` callback, which should do any additional
|
||||
/// work, then produce a response (typically by executing
|
||||
/// [`EvalCtxt::evaluate_added_goals_and_make_canonical_response`]).
|
||||
fn probe_and_match_goal_against_assumption(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
source: CandidateSource<I>,
|
||||
goal: Goal<I, Self>,
|
||||
assumption: I::Clause,
|
||||
then: impl FnOnce(&mut EvalCtxt<'_, Infcx>) -> QueryResult<I>,
|
||||
) -> Result<Candidate<I>, NoSolution>;
|
||||
|
||||
/// Consider a clause, which consists of a "assumption" and some "requirements",
|
||||
/// to satisfy a goal. If the requirements hold, then attempt to satisfy our
|
||||
/// goal by equating it with the assumption.
|
||||
fn probe_and_consider_implied_clause(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
parent_source: CandidateSource<I>,
|
||||
goal: Goal<I, Self>,
|
||||
assumption: I::Clause,
|
||||
requirements: impl IntoIterator<Item = (GoalSource, Goal<I, I::Predicate>)>,
|
||||
) -> Result<Candidate<I>, NoSolution> {
|
||||
Self::probe_and_match_goal_against_assumption(ecx, parent_source, goal, assumption, |ecx| {
|
||||
for (nested_source, goal) in requirements {
|
||||
ecx.add_goal(nested_source, goal);
|
||||
}
|
||||
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
})
|
||||
}
|
||||
|
||||
/// Consider a clause specifically for a `dyn Trait` self type. This requires
|
||||
/// additionally checking all of the supertraits and object bounds to hold,
|
||||
/// since they're not implied by the well-formedness of the object type.
|
||||
fn probe_and_consider_object_bound_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
source: CandidateSource<I>,
|
||||
goal: Goal<I, Self>,
|
||||
assumption: I::Clause,
|
||||
) -> Result<Candidate<I>, NoSolution> {
|
||||
Self::probe_and_match_goal_against_assumption(ecx, source, goal, assumption, |ecx| {
|
||||
let tcx = ecx.interner();
|
||||
let ty::Dynamic(bounds, _, _) = goal.predicate.self_ty().kind() else {
|
||||
panic!("expected object type in `probe_and_consider_object_bound_candidate`");
|
||||
};
|
||||
ecx.add_goals(
|
||||
GoalSource::ImplWhereBound,
|
||||
structural_traits::predicates_for_object_candidate(
|
||||
ecx,
|
||||
goal.param_env,
|
||||
goal.predicate.trait_ref(tcx),
|
||||
bounds,
|
||||
),
|
||||
);
|
||||
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
})
|
||||
}
|
||||
|
||||
fn consider_impl_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
impl_def_id: I::DefId,
|
||||
) -> Result<Candidate<I>, NoSolution>;
|
||||
|
||||
/// If the predicate contained an error, we want to avoid emitting unnecessary trait
|
||||
/// errors but still want to emit errors for other trait goals. We have some special
|
||||
/// handling for this case.
|
||||
///
|
||||
/// Trait goals always hold while projection goals never do. This is a bit arbitrary
|
||||
/// but prevents incorrect normalization while hiding any trait errors.
|
||||
fn consider_error_guaranteed_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
guar: I::ErrorGuaranteed,
|
||||
) -> Result<Candidate<I>, NoSolution>;
|
||||
|
||||
/// A type implements an `auto trait` if its components do as well.
|
||||
///
|
||||
/// These components are given by built-in rules from
|
||||
/// [`structural_traits::instantiate_constituent_tys_for_auto_trait`].
|
||||
fn consider_auto_trait_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution>;
|
||||
|
||||
/// A trait alias holds if the RHS traits and `where` clauses hold.
|
||||
fn consider_trait_alias_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution>;
|
||||
|
||||
/// A type is `Sized` if its tail component is `Sized`.
|
||||
///
|
||||
/// These components are given by built-in rules from
|
||||
/// [`structural_traits::instantiate_constituent_tys_for_sized_trait`].
|
||||
fn consider_builtin_sized_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution>;
|
||||
|
||||
/// A type is `Copy` or `Clone` if its components are `Copy` or `Clone`.
|
||||
///
|
||||
/// These components are given by built-in rules from
|
||||
/// [`structural_traits::instantiate_constituent_tys_for_copy_clone_trait`].
|
||||
fn consider_builtin_copy_clone_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution>;
|
||||
|
||||
/// A type is `PointerLike` if we can compute its layout, and that layout
|
||||
/// matches the layout of `usize`.
|
||||
fn consider_builtin_pointer_like_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution>;
|
||||
|
||||
/// A type is a `FnPtr` if it is of `FnPtr` type.
|
||||
fn consider_builtin_fn_ptr_trait_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution>;
|
||||
|
||||
/// A callable type (a closure, fn def, or fn ptr) is known to implement the `Fn<A>`
|
||||
/// family of traits where `A` is given by the signature of the type.
|
||||
fn consider_builtin_fn_trait_candidates(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
kind: ty::ClosureKind,
|
||||
) -> Result<Candidate<I>, NoSolution>;
|
||||
|
||||
/// An async closure is known to implement the `AsyncFn<A>` family of traits
|
||||
/// where `A` is given by the signature of the type.
|
||||
fn consider_builtin_async_fn_trait_candidates(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
kind: ty::ClosureKind,
|
||||
) -> Result<Candidate<I>, NoSolution>;
|
||||
|
||||
/// Compute the built-in logic of the `AsyncFnKindHelper` helper trait, which
|
||||
/// is used internally to delay computation for async closures until after
|
||||
/// upvar analysis is performed in HIR typeck.
|
||||
fn consider_builtin_async_fn_kind_helper_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution>;
|
||||
|
||||
/// `Tuple` is implemented if the `Self` type is a tuple.
|
||||
fn consider_builtin_tuple_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution>;
|
||||
|
||||
/// `Pointee` is always implemented.
|
||||
///
|
||||
/// See the projection implementation for the `Metadata` types for all of
|
||||
/// the built-in types. For structs, the metadata type is given by the struct
|
||||
/// tail.
|
||||
fn consider_builtin_pointee_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution>;
|
||||
|
||||
/// A coroutine (that comes from an `async` desugaring) is known to implement
|
||||
/// `Future<Output = O>`, where `O` is given by the coroutine's return type
|
||||
/// that was computed during type-checking.
|
||||
fn consider_builtin_future_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution>;
|
||||
|
||||
/// A coroutine (that comes from a `gen` desugaring) is known to implement
|
||||
/// `Iterator<Item = O>`, where `O` is given by the generator's yield type
|
||||
/// that was computed during type-checking.
|
||||
fn consider_builtin_iterator_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution>;
|
||||
|
||||
/// A coroutine (that comes from a `gen` desugaring) is known to implement
|
||||
/// `FusedIterator`
|
||||
fn consider_builtin_fused_iterator_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution>;
|
||||
|
||||
fn consider_builtin_async_iterator_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution>;
|
||||
|
||||
/// A coroutine (that doesn't come from an `async` or `gen` desugaring) is known to
|
||||
/// implement `Coroutine<R, Yield = Y, Return = O>`, given the resume, yield,
|
||||
/// and return types of the coroutine computed during type-checking.
|
||||
fn consider_builtin_coroutine_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution>;
|
||||
|
||||
fn consider_builtin_discriminant_kind_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution>;
|
||||
|
||||
fn consider_builtin_async_destruct_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution>;
|
||||
|
||||
fn consider_builtin_destruct_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution>;
|
||||
|
||||
fn consider_builtin_transmute_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution>;
|
||||
|
||||
/// Consider (possibly several) candidates to upcast or unsize a type to another
|
||||
/// type, excluding the coercion of a sized type into a `dyn Trait`.
|
||||
///
|
||||
/// We return the `BuiltinImplSource` for each candidate as it is needed
|
||||
/// for unsize coercion in hir typeck and because it is difficult to
|
||||
/// otherwise recompute this for codegen. This is a bit of a mess but the
|
||||
/// easiest way to maintain the existing behavior for now.
|
||||
fn consider_structural_builtin_unsize_candidates(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Vec<Candidate<I>>;
|
||||
}
|
||||
|
||||
impl<Infcx, I> EvalCtxt<'_, Infcx>
|
||||
where
|
||||
Infcx: SolverDelegate<Interner = I>,
|
||||
I: Interner,
|
||||
{
|
||||
pub(super) fn assemble_and_evaluate_candidates<G: GoalKind<Infcx>>(
|
||||
&mut self,
|
||||
goal: Goal<I, G>,
|
||||
) -> Vec<Candidate<I>> {
|
||||
let Ok(normalized_self_ty) =
|
||||
self.structurally_normalize_ty(goal.param_env, goal.predicate.self_ty())
|
||||
else {
|
||||
return vec![];
|
||||
};
|
||||
|
||||
if normalized_self_ty.is_ty_var() {
|
||||
debug!("self type has been normalized to infer");
|
||||
return self.forced_ambiguity(MaybeCause::Ambiguity).into_iter().collect();
|
||||
}
|
||||
|
||||
let goal: Goal<I, G> = goal.with(
|
||||
self.interner(),
|
||||
goal.predicate.with_self_ty(self.interner(), normalized_self_ty),
|
||||
);
|
||||
// Vars that show up in the rest of the goal substs may have been constrained by
|
||||
// normalizing the self type as well, since type variables are not uniquified.
|
||||
let goal = self.resolve_vars_if_possible(goal);
|
||||
|
||||
let mut candidates = vec![];
|
||||
|
||||
self.assemble_impl_candidates(goal, &mut candidates);
|
||||
|
||||
self.assemble_builtin_impl_candidates(goal, &mut candidates);
|
||||
|
||||
self.assemble_alias_bound_candidates(goal, &mut candidates);
|
||||
|
||||
self.assemble_object_bound_candidates(goal, &mut candidates);
|
||||
|
||||
self.assemble_param_env_candidates(goal, &mut candidates);
|
||||
|
||||
match self.solver_mode() {
|
||||
SolverMode::Normal => self.discard_impls_shadowed_by_env(goal, &mut candidates),
|
||||
SolverMode::Coherence => {
|
||||
self.assemble_coherence_unknowable_candidates(goal, &mut candidates)
|
||||
}
|
||||
}
|
||||
|
||||
candidates
|
||||
}
|
||||
|
||||
pub(super) fn forced_ambiguity(
|
||||
&mut self,
|
||||
cause: MaybeCause,
|
||||
) -> Result<Candidate<I>, NoSolution> {
|
||||
// This may fail if `try_evaluate_added_goals` overflows because it
|
||||
// fails to reach a fixpoint but ends up getting an error after
|
||||
// running for some additional step.
|
||||
//
|
||||
// cc trait-system-refactor-initiative#105
|
||||
let source = CandidateSource::BuiltinImpl(BuiltinImplSource::Misc);
|
||||
let certainty = Certainty::Maybe(cause);
|
||||
self.probe_trait_candidate(source)
|
||||
.enter(|this| this.evaluate_added_goals_and_make_canonical_response(certainty))
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip_all)]
|
||||
fn assemble_impl_candidates<G: GoalKind<Infcx>>(
|
||||
&mut self,
|
||||
goal: Goal<I, G>,
|
||||
candidates: &mut Vec<Candidate<I>>,
|
||||
) {
|
||||
let tcx = self.interner();
|
||||
tcx.for_each_relevant_impl(
|
||||
goal.predicate.trait_def_id(tcx),
|
||||
goal.predicate.self_ty(),
|
||||
|impl_def_id| {
|
||||
// For every `default impl`, there's always a non-default `impl`
|
||||
// that will *also* apply. There's no reason to register a candidate
|
||||
// for this impl, since it is *not* proof that the trait goal holds.
|
||||
if tcx.impl_is_default(impl_def_id) {
|
||||
return;
|
||||
}
|
||||
|
||||
match G::consider_impl_candidate(self, goal, impl_def_id) {
|
||||
Ok(candidate) => candidates.push(candidate),
|
||||
Err(NoSolution) => (),
|
||||
}
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip_all)]
|
||||
fn assemble_builtin_impl_candidates<G: GoalKind<Infcx>>(
|
||||
&mut self,
|
||||
goal: Goal<I, G>,
|
||||
candidates: &mut Vec<Candidate<I>>,
|
||||
) {
|
||||
let tcx = self.interner();
|
||||
let trait_def_id = goal.predicate.trait_def_id(tcx);
|
||||
|
||||
// N.B. When assembling built-in candidates for lang items that are also
|
||||
// `auto` traits, then the auto trait candidate that is assembled in
|
||||
// `consider_auto_trait_candidate` MUST be disqualified to remain sound.
|
||||
//
|
||||
// Instead of adding the logic here, it's a better idea to add it in
|
||||
// `EvalCtxt::disqualify_auto_trait_candidate_due_to_possible_impl` in
|
||||
// `solve::trait_goals` instead.
|
||||
let result = if let Err(guar) = goal.predicate.error_reported() {
|
||||
G::consider_error_guaranteed_candidate(self, guar)
|
||||
} else if tcx.trait_is_auto(trait_def_id) {
|
||||
G::consider_auto_trait_candidate(self, goal)
|
||||
} else if tcx.trait_is_alias(trait_def_id) {
|
||||
G::consider_trait_alias_candidate(self, goal)
|
||||
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::Sized) {
|
||||
G::consider_builtin_sized_candidate(self, goal)
|
||||
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::Copy)
|
||||
|| tcx.is_lang_item(trait_def_id, TraitSolverLangItem::Clone)
|
||||
{
|
||||
G::consider_builtin_copy_clone_candidate(self, goal)
|
||||
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::PointerLike) {
|
||||
G::consider_builtin_pointer_like_candidate(self, goal)
|
||||
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::FnPtrTrait) {
|
||||
G::consider_builtin_fn_ptr_trait_candidate(self, goal)
|
||||
} else if let Some(kind) = self.interner().fn_trait_kind_from_def_id(trait_def_id) {
|
||||
G::consider_builtin_fn_trait_candidates(self, goal, kind)
|
||||
} else if let Some(kind) = self.interner().async_fn_trait_kind_from_def_id(trait_def_id) {
|
||||
G::consider_builtin_async_fn_trait_candidates(self, goal, kind)
|
||||
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::AsyncFnKindHelper) {
|
||||
G::consider_builtin_async_fn_kind_helper_candidate(self, goal)
|
||||
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::Tuple) {
|
||||
G::consider_builtin_tuple_candidate(self, goal)
|
||||
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::PointeeTrait) {
|
||||
G::consider_builtin_pointee_candidate(self, goal)
|
||||
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::Future) {
|
||||
G::consider_builtin_future_candidate(self, goal)
|
||||
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::Iterator) {
|
||||
G::consider_builtin_iterator_candidate(self, goal)
|
||||
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::FusedIterator) {
|
||||
G::consider_builtin_fused_iterator_candidate(self, goal)
|
||||
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::AsyncIterator) {
|
||||
G::consider_builtin_async_iterator_candidate(self, goal)
|
||||
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::Coroutine) {
|
||||
G::consider_builtin_coroutine_candidate(self, goal)
|
||||
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::DiscriminantKind) {
|
||||
G::consider_builtin_discriminant_kind_candidate(self, goal)
|
||||
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::AsyncDestruct) {
|
||||
G::consider_builtin_async_destruct_candidate(self, goal)
|
||||
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::Destruct) {
|
||||
G::consider_builtin_destruct_candidate(self, goal)
|
||||
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::TransmuteTrait) {
|
||||
G::consider_builtin_transmute_candidate(self, goal)
|
||||
} else {
|
||||
Err(NoSolution)
|
||||
};
|
||||
|
||||
candidates.extend(result);
|
||||
|
||||
// There may be multiple unsize candidates for a trait with several supertraits:
|
||||
// `trait Foo: Bar<A> + Bar<B>` and `dyn Foo: Unsize<dyn Bar<_>>`
|
||||
if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::Unsize) {
|
||||
candidates.extend(G::consider_structural_builtin_unsize_candidates(self, goal));
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip_all)]
|
||||
fn assemble_param_env_candidates<G: GoalKind<Infcx>>(
|
||||
&mut self,
|
||||
goal: Goal<I, G>,
|
||||
candidates: &mut Vec<Candidate<I>>,
|
||||
) {
|
||||
for (i, assumption) in goal.param_env.caller_bounds().into_iter().enumerate() {
|
||||
candidates.extend(G::probe_and_consider_implied_clause(
|
||||
self,
|
||||
CandidateSource::ParamEnv(i),
|
||||
goal,
|
||||
assumption,
|
||||
[],
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip_all)]
|
||||
fn assemble_alias_bound_candidates<G: GoalKind<Infcx>>(
|
||||
&mut self,
|
||||
goal: Goal<I, G>,
|
||||
candidates: &mut Vec<Candidate<I>>,
|
||||
) {
|
||||
let () = self.probe(|_| ProbeKind::NormalizedSelfTyAssembly).enter(|ecx| {
|
||||
ecx.assemble_alias_bound_candidates_recur(goal.predicate.self_ty(), goal, candidates);
|
||||
});
|
||||
}
|
||||
|
||||
/// For some deeply nested `<T>::A::B::C::D` rigid associated type,
|
||||
/// we should explore the item bounds for all levels, since the
|
||||
/// `associated_type_bounds` feature means that a parent associated
|
||||
/// type may carry bounds for a nested associated type.
|
||||
///
|
||||
/// If we have a projection, check that its self type is a rigid projection.
|
||||
/// If so, continue searching by recursively calling after normalization.
|
||||
// FIXME: This may recurse infinitely, but I can't seem to trigger it without
|
||||
// hitting another overflow error something. Add a depth parameter needed later.
|
||||
fn assemble_alias_bound_candidates_recur<G: GoalKind<Infcx>>(
|
||||
&mut self,
|
||||
self_ty: I::Ty,
|
||||
goal: Goal<I, G>,
|
||||
candidates: &mut Vec<Candidate<I>>,
|
||||
) {
|
||||
let (kind, alias_ty) = match self_ty.kind() {
|
||||
ty::Bool
|
||||
| ty::Char
|
||||
| ty::Int(_)
|
||||
| ty::Uint(_)
|
||||
| ty::Float(_)
|
||||
| ty::Adt(_, _)
|
||||
| ty::Foreign(_)
|
||||
| ty::Str
|
||||
| ty::Array(_, _)
|
||||
| ty::Pat(_, _)
|
||||
| ty::Slice(_)
|
||||
| ty::RawPtr(_, _)
|
||||
| ty::Ref(_, _, _)
|
||||
| ty::FnDef(_, _)
|
||||
| ty::FnPtr(_)
|
||||
| ty::Dynamic(..)
|
||||
| ty::Closure(..)
|
||||
| ty::CoroutineClosure(..)
|
||||
| ty::Coroutine(..)
|
||||
| ty::CoroutineWitness(..)
|
||||
| ty::Never
|
||||
| ty::Tuple(_)
|
||||
| ty::Param(_)
|
||||
| ty::Placeholder(..)
|
||||
| ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
|
||||
| ty::Error(_) => return,
|
||||
ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) | ty::Bound(..) => {
|
||||
panic!("unexpected self type for `{goal:?}`")
|
||||
}
|
||||
|
||||
ty::Infer(ty::TyVar(_)) => {
|
||||
// If we hit infer when normalizing the self type of an alias,
|
||||
// then bail with ambiguity. We should never encounter this on
|
||||
// the *first* iteration of this recursive function.
|
||||
if let Ok(result) =
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::AMBIGUOUS)
|
||||
{
|
||||
candidates.push(Candidate { source: CandidateSource::AliasBound, result });
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
ty::Alias(kind @ (ty::Projection | ty::Opaque), alias_ty) => (kind, alias_ty),
|
||||
ty::Alias(ty::Inherent | ty::Weak, _) => {
|
||||
self.interner().delay_bug(format!("could not normalize {self_ty:?}, it is not WF"));
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
for assumption in self
|
||||
.interner()
|
||||
.item_bounds(alias_ty.def_id)
|
||||
.iter_instantiated(self.interner(), &alias_ty.args)
|
||||
{
|
||||
candidates.extend(G::probe_and_consider_implied_clause(
|
||||
self,
|
||||
CandidateSource::AliasBound,
|
||||
goal,
|
||||
assumption,
|
||||
[],
|
||||
));
|
||||
}
|
||||
|
||||
if kind != ty::Projection {
|
||||
return;
|
||||
}
|
||||
|
||||
// Recurse on the self type of the projection.
|
||||
match self.structurally_normalize_ty(goal.param_env, alias_ty.self_ty()) {
|
||||
Ok(next_self_ty) => {
|
||||
self.assemble_alias_bound_candidates_recur(next_self_ty, goal, candidates)
|
||||
}
|
||||
Err(NoSolution) => {}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip_all)]
|
||||
fn assemble_object_bound_candidates<G: GoalKind<Infcx>>(
|
||||
&mut self,
|
||||
goal: Goal<I, G>,
|
||||
candidates: &mut Vec<Candidate<I>>,
|
||||
) {
|
||||
let tcx = self.interner();
|
||||
if !tcx.trait_may_be_implemented_via_object(goal.predicate.trait_def_id(tcx)) {
|
||||
return;
|
||||
}
|
||||
|
||||
let self_ty = goal.predicate.self_ty();
|
||||
let bounds = match self_ty.kind() {
|
||||
ty::Bool
|
||||
| ty::Char
|
||||
| ty::Int(_)
|
||||
| ty::Uint(_)
|
||||
| ty::Float(_)
|
||||
| ty::Adt(_, _)
|
||||
| ty::Foreign(_)
|
||||
| ty::Str
|
||||
| ty::Array(_, _)
|
||||
| ty::Pat(_, _)
|
||||
| ty::Slice(_)
|
||||
| ty::RawPtr(_, _)
|
||||
| ty::Ref(_, _, _)
|
||||
| ty::FnDef(_, _)
|
||||
| ty::FnPtr(_)
|
||||
| ty::Alias(..)
|
||||
| ty::Closure(..)
|
||||
| ty::CoroutineClosure(..)
|
||||
| ty::Coroutine(..)
|
||||
| ty::CoroutineWitness(..)
|
||||
| ty::Never
|
||||
| ty::Tuple(_)
|
||||
| ty::Param(_)
|
||||
| ty::Placeholder(..)
|
||||
| ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
|
||||
| ty::Error(_) => return,
|
||||
ty::Infer(ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_))
|
||||
| ty::Bound(..) => panic!("unexpected self type for `{goal:?}`"),
|
||||
ty::Dynamic(bounds, ..) => bounds,
|
||||
};
|
||||
|
||||
// Do not consider built-in object impls for non-object-safe types.
|
||||
if bounds.principal_def_id().is_some_and(|def_id| !tcx.trait_is_object_safe(def_id)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Consider all of the auto-trait and projection bounds, which don't
|
||||
// need to be recorded as a `BuiltinImplSource::Object` since they don't
|
||||
// really have a vtable base...
|
||||
for bound in bounds {
|
||||
match bound.skip_binder() {
|
||||
ty::ExistentialPredicate::Trait(_) => {
|
||||
// Skip principal
|
||||
}
|
||||
ty::ExistentialPredicate::Projection(_)
|
||||
| ty::ExistentialPredicate::AutoTrait(_) => {
|
||||
candidates.extend(G::probe_and_consider_object_bound_candidate(
|
||||
self,
|
||||
CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
|
||||
goal,
|
||||
bound.with_self_ty(tcx, self_ty),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: We only need to do *any* of this if we're considering a trait goal,
|
||||
// since we don't need to look at any supertrait or anything if we are doing
|
||||
// a projection goal.
|
||||
if let Some(principal) = bounds.principal() {
|
||||
let principal_trait_ref = principal.with_self_ty(tcx, self_ty);
|
||||
for (idx, assumption) in
|
||||
Infcx::elaborate_supertraits(tcx, principal_trait_ref).enumerate()
|
||||
{
|
||||
candidates.extend(G::probe_and_consider_object_bound_candidate(
|
||||
self,
|
||||
CandidateSource::BuiltinImpl(BuiltinImplSource::Object(idx)),
|
||||
goal,
|
||||
assumption.upcast(tcx),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// In coherence we have to not only care about all impls we know about, but
|
||||
/// also consider impls which may get added in a downstream or sibling crate
|
||||
/// or which an upstream impl may add in a minor release.
|
||||
///
|
||||
/// To do so we add an ambiguous candidate in case such an unknown impl could
|
||||
/// apply to the current goal.
|
||||
#[instrument(level = "trace", skip_all)]
|
||||
fn assemble_coherence_unknowable_candidates<G: GoalKind<Infcx>>(
|
||||
&mut self,
|
||||
goal: Goal<I, G>,
|
||||
candidates: &mut Vec<Candidate<I>>,
|
||||
) {
|
||||
let tcx = self.interner();
|
||||
|
||||
candidates.extend(self.probe_trait_candidate(CandidateSource::CoherenceUnknowable).enter(
|
||||
|ecx| {
|
||||
let trait_ref = goal.predicate.trait_ref(tcx);
|
||||
if ecx.trait_ref_is_knowable(goal.param_env, trait_ref)? {
|
||||
Err(NoSolution)
|
||||
} else {
|
||||
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::AMBIGUOUS)
|
||||
}
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
/// If there's a where-bound for the current goal, do not use any impl candidates
|
||||
/// to prove the current goal. Most importantly, if there is a where-bound which does
|
||||
/// not specify any associated types, we do not allow normalizing the associated type
|
||||
/// by using an impl, even if it would apply.
|
||||
///
|
||||
/// <https://github.com/rust-lang/trait-system-refactor-initiative/issues/76>
|
||||
// FIXME(@lcnr): The current structure here makes me unhappy and feels ugly. idk how
|
||||
// to improve this however. However, this should make it fairly straightforward to refine
|
||||
// the filtering going forward, so it seems alright-ish for now.
|
||||
#[instrument(level = "debug", skip(self, goal))]
|
||||
fn discard_impls_shadowed_by_env<G: GoalKind<Infcx>>(
|
||||
&mut self,
|
||||
goal: Goal<I, G>,
|
||||
candidates: &mut Vec<Candidate<I>>,
|
||||
) {
|
||||
let tcx = self.interner();
|
||||
let trait_goal: Goal<I, ty::TraitPredicate<I>> =
|
||||
goal.with(tcx, goal.predicate.trait_ref(tcx));
|
||||
|
||||
let mut trait_candidates_from_env = vec![];
|
||||
self.probe(|_| ProbeKind::ShadowedEnvProbing).enter(|ecx| {
|
||||
ecx.assemble_param_env_candidates(trait_goal, &mut trait_candidates_from_env);
|
||||
ecx.assemble_alias_bound_candidates(trait_goal, &mut trait_candidates_from_env);
|
||||
});
|
||||
|
||||
if !trait_candidates_from_env.is_empty() {
|
||||
let trait_env_result = self.merge_candidates(trait_candidates_from_env);
|
||||
match trait_env_result.unwrap().value.certainty {
|
||||
// If proving the trait goal succeeds by using the env,
|
||||
// we freely drop all impl candidates.
|
||||
//
|
||||
// FIXME(@lcnr): It feels like this could easily hide
|
||||
// a forced ambiguity candidate added earlier.
|
||||
// This feels dangerous.
|
||||
Certainty::Yes => {
|
||||
candidates.retain(|c| match c.source {
|
||||
CandidateSource::Impl(_) | CandidateSource::BuiltinImpl(_) => {
|
||||
debug!(?c, "discard impl candidate");
|
||||
false
|
||||
}
|
||||
CandidateSource::ParamEnv(_) | CandidateSource::AliasBound => true,
|
||||
CandidateSource::CoherenceUnknowable => panic!("uh oh"),
|
||||
});
|
||||
}
|
||||
// If it is still ambiguous we instead just force the whole goal
|
||||
// to be ambig and wait for inference constraints. See
|
||||
// tests/ui/traits/next-solver/env-shadows-impls/ambig-env-no-shadow.rs
|
||||
Certainty::Maybe(cause) => {
|
||||
debug!(?cause, "force ambiguity");
|
||||
*candidates = self.forced_ambiguity(cause).into_iter().collect();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// If there are multiple ways to prove a trait or projection goal, we have
|
||||
/// to somehow try to merge the candidates into one. If that fails, we return
|
||||
/// ambiguity.
|
||||
#[instrument(level = "debug", skip(self), ret)]
|
||||
pub(super) fn merge_candidates(&mut self, candidates: Vec<Candidate<I>>) -> QueryResult<I> {
|
||||
// First try merging all candidates. This is complete and fully sound.
|
||||
let responses = candidates.iter().map(|c| c.result).collect::<Vec<_>>();
|
||||
if let Some(result) = self.try_merge_responses(&responses) {
|
||||
return Ok(result);
|
||||
} else {
|
||||
self.flounder(&responses)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,749 @@
|
|||
//! Code which is used by built-in goals that match "structurally", such a auto
|
||||
//! traits, `Copy`/`Clone`.
|
||||
|
||||
use rustc_ast_ir::{Movability, Mutability};
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_type_ir::fold::{TypeFoldable, TypeFolder, TypeSuperFoldable};
|
||||
use rustc_type_ir::inherent::*;
|
||||
use rustc_type_ir::lang_items::TraitSolverLangItem;
|
||||
use rustc_type_ir::{self as ty, Interner, Upcast as _};
|
||||
use rustc_type_ir_macros::{TypeFoldable_Generic, TypeVisitable_Generic};
|
||||
|
||||
use crate::infcx::SolverDelegate;
|
||||
use crate::solve::{EvalCtxt, Goal, NoSolution};
|
||||
|
||||
// Calculates the constituent types of a type for `auto trait` purposes.
|
||||
#[instrument(level = "trace", skip(ecx), ret)]
|
||||
pub(in crate::solve) fn instantiate_constituent_tys_for_auto_trait<Infcx, I>(
|
||||
ecx: &EvalCtxt<'_, Infcx>,
|
||||
ty: I::Ty,
|
||||
) -> Result<Vec<ty::Binder<I, I::Ty>>, NoSolution>
|
||||
where
|
||||
Infcx: SolverDelegate<Interner = I>,
|
||||
I: Interner,
|
||||
{
|
||||
let tcx = ecx.interner();
|
||||
match ty.kind() {
|
||||
ty::Uint(_)
|
||||
| ty::Int(_)
|
||||
| ty::Bool
|
||||
| ty::Float(_)
|
||||
| ty::FnDef(..)
|
||||
| ty::FnPtr(_)
|
||||
| ty::Error(_)
|
||||
| ty::Never
|
||||
| ty::Char => Ok(vec![]),
|
||||
|
||||
// Treat `str` like it's defined as `struct str([u8]);`
|
||||
ty::Str => Ok(vec![ty::Binder::dummy(Ty::new_slice(tcx, Ty::new_u8(tcx)))]),
|
||||
|
||||
ty::Dynamic(..)
|
||||
| ty::Param(..)
|
||||
| ty::Foreign(..)
|
||||
| ty::Alias(ty::Projection | ty::Inherent | ty::Weak, ..)
|
||||
| ty::Placeholder(..)
|
||||
| ty::Bound(..)
|
||||
| ty::Infer(_) => {
|
||||
panic!("unexpected type `{ty:?}`")
|
||||
}
|
||||
|
||||
ty::RawPtr(element_ty, _) | ty::Ref(_, element_ty, _) => {
|
||||
Ok(vec![ty::Binder::dummy(element_ty)])
|
||||
}
|
||||
|
||||
ty::Pat(element_ty, _) | ty::Array(element_ty, _) | ty::Slice(element_ty) => {
|
||||
Ok(vec![ty::Binder::dummy(element_ty)])
|
||||
}
|
||||
|
||||
ty::Tuple(tys) => {
|
||||
// (T1, ..., Tn) -- meets any bound that all of T1...Tn meet
|
||||
Ok(tys.into_iter().map(ty::Binder::dummy).collect())
|
||||
}
|
||||
|
||||
ty::Closure(_, args) => Ok(vec![ty::Binder::dummy(args.as_closure().tupled_upvars_ty())]),
|
||||
|
||||
ty::CoroutineClosure(_, args) => {
|
||||
Ok(vec![ty::Binder::dummy(args.as_coroutine_closure().tupled_upvars_ty())])
|
||||
}
|
||||
|
||||
ty::Coroutine(_, args) => {
|
||||
let coroutine_args = args.as_coroutine();
|
||||
Ok(vec![
|
||||
ty::Binder::dummy(coroutine_args.tupled_upvars_ty()),
|
||||
ty::Binder::dummy(coroutine_args.witness()),
|
||||
])
|
||||
}
|
||||
|
||||
ty::CoroutineWitness(def_id, args) => Ok(ecx
|
||||
.interner()
|
||||
.bound_coroutine_hidden_types(def_id)
|
||||
.into_iter()
|
||||
.map(|bty| bty.instantiate(tcx, &args))
|
||||
.collect()),
|
||||
|
||||
// For `PhantomData<T>`, we pass `T`.
|
||||
ty::Adt(def, args) if def.is_phantom_data() => Ok(vec![ty::Binder::dummy(args.type_at(0))]),
|
||||
|
||||
ty::Adt(def, args) => Ok(def
|
||||
.all_field_tys(tcx)
|
||||
.iter_instantiated(tcx, &args)
|
||||
.map(ty::Binder::dummy)
|
||||
.collect()),
|
||||
|
||||
ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) => {
|
||||
// We can resolve the `impl Trait` to its concrete type,
|
||||
// which enforces a DAG between the functions requiring
|
||||
// the auto trait bounds in question.
|
||||
Ok(vec![ty::Binder::dummy(tcx.type_of(def_id).instantiate(tcx, &args))])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(ecx), ret)]
|
||||
pub(in crate::solve) fn instantiate_constituent_tys_for_sized_trait<Infcx, I>(
|
||||
ecx: &EvalCtxt<'_, Infcx>,
|
||||
ty: I::Ty,
|
||||
) -> Result<Vec<ty::Binder<I, I::Ty>>, NoSolution>
|
||||
where
|
||||
Infcx: SolverDelegate<Interner = I>,
|
||||
I: Interner,
|
||||
{
|
||||
match ty.kind() {
|
||||
// impl Sized for u*, i*, bool, f*, FnDef, FnPtr, *(const/mut) T, char, &mut? T, [T; N], dyn* Trait, !
|
||||
// impl Sized for Coroutine, CoroutineWitness, Closure, CoroutineClosure
|
||||
ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
|
||||
| ty::Uint(_)
|
||||
| ty::Int(_)
|
||||
| ty::Bool
|
||||
| ty::Float(_)
|
||||
| ty::FnDef(..)
|
||||
| ty::FnPtr(_)
|
||||
| ty::RawPtr(..)
|
||||
| ty::Char
|
||||
| ty::Ref(..)
|
||||
| ty::Coroutine(..)
|
||||
| ty::CoroutineWitness(..)
|
||||
| ty::Array(..)
|
||||
| ty::Pat(..)
|
||||
| ty::Closure(..)
|
||||
| ty::CoroutineClosure(..)
|
||||
| ty::Never
|
||||
| ty::Dynamic(_, _, ty::DynStar)
|
||||
| ty::Error(_) => Ok(vec![]),
|
||||
|
||||
ty::Str
|
||||
| ty::Slice(_)
|
||||
| ty::Dynamic(..)
|
||||
| ty::Foreign(..)
|
||||
| ty::Alias(..)
|
||||
| ty::Param(_)
|
||||
| ty::Placeholder(..) => Err(NoSolution),
|
||||
|
||||
ty::Bound(..)
|
||||
| ty::Infer(ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
|
||||
panic!("unexpected type `{ty:?}`")
|
||||
}
|
||||
|
||||
// impl Sized for ()
|
||||
// impl Sized for (T1, T2, .., Tn) where Tn: Sized if n >= 1
|
||||
ty::Tuple(tys) => Ok(tys.last().map_or_else(Vec::new, |&ty| vec![ty::Binder::dummy(ty)])),
|
||||
|
||||
// impl Sized for Adt<Args...> where sized_constraint(Adt)<Args...>: Sized
|
||||
// `sized_constraint(Adt)` is the deepest struct trail that can be determined
|
||||
// by the definition of `Adt`, independent of the generic args.
|
||||
// impl Sized for Adt<Args...> if sized_constraint(Adt) == None
|
||||
// As a performance optimization, `sized_constraint(Adt)` can return `None`
|
||||
// if the ADTs definition implies that it is sized by for all possible args.
|
||||
// In this case, the builtin impl will have no nested subgoals. This is a
|
||||
// "best effort" optimization and `sized_constraint` may return `Some`, even
|
||||
// if the ADT is sized for all possible args.
|
||||
ty::Adt(def, args) => {
|
||||
if let Some(sized_crit) = def.sized_constraint(ecx.interner()) {
|
||||
Ok(vec![ty::Binder::dummy(sized_crit.instantiate(ecx.interner(), &args))])
|
||||
} else {
|
||||
Ok(vec![])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(ecx), ret)]
|
||||
pub(in crate::solve) fn instantiate_constituent_tys_for_copy_clone_trait<Infcx, I>(
|
||||
ecx: &EvalCtxt<'_, Infcx>,
|
||||
ty: I::Ty,
|
||||
) -> Result<Vec<ty::Binder<I, I::Ty>>, NoSolution>
|
||||
where
|
||||
Infcx: SolverDelegate<Interner = I>,
|
||||
I: Interner,
|
||||
{
|
||||
match ty.kind() {
|
||||
// impl Copy/Clone for FnDef, FnPtr
|
||||
ty::FnDef(..) | ty::FnPtr(_) | ty::Error(_) => Ok(vec![]),
|
||||
|
||||
// Implementations are provided in core
|
||||
ty::Uint(_)
|
||||
| ty::Int(_)
|
||||
| ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
|
||||
| ty::Bool
|
||||
| ty::Float(_)
|
||||
| ty::Char
|
||||
| ty::RawPtr(..)
|
||||
| ty::Never
|
||||
| ty::Ref(_, _, Mutability::Not)
|
||||
| ty::Array(..) => Err(NoSolution),
|
||||
|
||||
// Cannot implement in core, as we can't be generic over patterns yet,
|
||||
// so we'd have to list all patterns and type combinations.
|
||||
ty::Pat(ty, ..) => Ok(vec![ty::Binder::dummy(ty)]),
|
||||
|
||||
ty::Dynamic(..)
|
||||
| ty::Str
|
||||
| ty::Slice(_)
|
||||
| ty::Foreign(..)
|
||||
| ty::Ref(_, _, Mutability::Mut)
|
||||
| ty::Adt(_, _)
|
||||
| ty::Alias(_, _)
|
||||
| ty::Param(_)
|
||||
| ty::Placeholder(..) => Err(NoSolution),
|
||||
|
||||
ty::Bound(..)
|
||||
| ty::Infer(ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
|
||||
panic!("unexpected type `{ty:?}`")
|
||||
}
|
||||
|
||||
// impl Copy/Clone for (T1, T2, .., Tn) where T1: Copy/Clone, T2: Copy/Clone, .. Tn: Copy/Clone
|
||||
ty::Tuple(tys) => Ok(tys.into_iter().map(ty::Binder::dummy).collect()),
|
||||
|
||||
// impl Copy/Clone for Closure where Self::TupledUpvars: Copy/Clone
|
||||
ty::Closure(_, args) => Ok(vec![ty::Binder::dummy(args.as_closure().tupled_upvars_ty())]),
|
||||
|
||||
ty::CoroutineClosure(..) => Err(NoSolution),
|
||||
|
||||
// only when `coroutine_clone` is enabled and the coroutine is movable
|
||||
// impl Copy/Clone for Coroutine where T: Copy/Clone forall T in (upvars, witnesses)
|
||||
ty::Coroutine(def_id, args) => match ecx.interner().coroutine_movability(def_id) {
|
||||
Movability::Static => Err(NoSolution),
|
||||
Movability::Movable => {
|
||||
if ecx.interner().features().coroutine_clone() {
|
||||
let coroutine = args.as_coroutine();
|
||||
Ok(vec![
|
||||
ty::Binder::dummy(coroutine.tupled_upvars_ty()),
|
||||
ty::Binder::dummy(coroutine.witness()),
|
||||
])
|
||||
} else {
|
||||
Err(NoSolution)
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
// impl Copy/Clone for CoroutineWitness where T: Copy/Clone forall T in coroutine_hidden_types
|
||||
ty::CoroutineWitness(def_id, args) => Ok(ecx
|
||||
.interner()
|
||||
.bound_coroutine_hidden_types(def_id)
|
||||
.into_iter()
|
||||
.map(|bty| bty.instantiate(ecx.interner(), &args))
|
||||
.collect()),
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a binder of the tupled inputs types and output type from a builtin callable type.
|
||||
pub(in crate::solve) fn extract_tupled_inputs_and_output_from_callable<I: Interner>(
|
||||
tcx: I,
|
||||
self_ty: I::Ty,
|
||||
goal_kind: ty::ClosureKind,
|
||||
) -> Result<Option<ty::Binder<I, (I::Ty, I::Ty)>>, NoSolution> {
|
||||
match self_ty.kind() {
|
||||
// keep this in sync with assemble_fn_pointer_candidates until the old solver is removed.
|
||||
ty::FnDef(def_id, args) => {
|
||||
let sig = tcx.fn_sig(def_id);
|
||||
if sig.skip_binder().is_fn_trait_compatible() && !tcx.has_target_features(def_id) {
|
||||
Ok(Some(
|
||||
sig.instantiate(tcx, &args)
|
||||
.map_bound(|sig| (Ty::new_tup(tcx, &sig.inputs()), sig.output())),
|
||||
))
|
||||
} else {
|
||||
Err(NoSolution)
|
||||
}
|
||||
}
|
||||
// keep this in sync with assemble_fn_pointer_candidates until the old solver is removed.
|
||||
ty::FnPtr(sig) => {
|
||||
if sig.is_fn_trait_compatible() {
|
||||
Ok(Some(sig.map_bound(|sig| (Ty::new_tup(tcx, &sig.inputs()), sig.output()))))
|
||||
} else {
|
||||
Err(NoSolution)
|
||||
}
|
||||
}
|
||||
ty::Closure(_, args) => {
|
||||
let closure_args = args.as_closure();
|
||||
match closure_args.kind_ty().to_opt_closure_kind() {
|
||||
// If the closure's kind doesn't extend the goal kind,
|
||||
// then the closure doesn't implement the trait.
|
||||
Some(closure_kind) => {
|
||||
if !closure_kind.extends(goal_kind) {
|
||||
return Err(NoSolution);
|
||||
}
|
||||
}
|
||||
// Closure kind is not yet determined, so we return ambiguity unless
|
||||
// the expected kind is `FnOnce` as that is always implemented.
|
||||
None => {
|
||||
if goal_kind != ty::ClosureKind::FnOnce {
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(Some(closure_args.sig().map_bound(|sig| (sig.inputs()[0], sig.output()))))
|
||||
}
|
||||
|
||||
// Coroutine-closures don't implement `Fn` traits the normal way.
|
||||
// Instead, they always implement `FnOnce`, but only implement
|
||||
// `FnMut`/`Fn` if they capture no upvars, since those may borrow
|
||||
// from the closure.
|
||||
ty::CoroutineClosure(def_id, args) => {
|
||||
let args = args.as_coroutine_closure();
|
||||
let kind_ty = args.kind_ty();
|
||||
let sig = args.coroutine_closure_sig().skip_binder();
|
||||
|
||||
let coroutine_ty = if let Some(closure_kind) = kind_ty.to_opt_closure_kind()
|
||||
&& !args.tupled_upvars_ty().is_ty_var()
|
||||
{
|
||||
if !closure_kind.extends(goal_kind) {
|
||||
return Err(NoSolution);
|
||||
}
|
||||
|
||||
// A coroutine-closure implements `FnOnce` *always*, since it may
|
||||
// always be called once. It additionally implements `Fn`/`FnMut`
|
||||
// only if it has no upvars referencing the closure-env lifetime,
|
||||
// and if the closure kind permits it.
|
||||
if closure_kind != ty::ClosureKind::FnOnce && args.has_self_borrows() {
|
||||
return Err(NoSolution);
|
||||
}
|
||||
|
||||
coroutine_closure_to_certain_coroutine(
|
||||
tcx,
|
||||
goal_kind,
|
||||
// No captures by ref, so this doesn't matter.
|
||||
Region::new_static(tcx),
|
||||
def_id,
|
||||
args,
|
||||
sig,
|
||||
)
|
||||
} else {
|
||||
// Closure kind is not yet determined, so we return ambiguity unless
|
||||
// the expected kind is `FnOnce` as that is always implemented.
|
||||
if goal_kind != ty::ClosureKind::FnOnce {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
coroutine_closure_to_ambiguous_coroutine(
|
||||
tcx,
|
||||
goal_kind, // No captures by ref, so this doesn't matter.
|
||||
Region::new_static(tcx),
|
||||
def_id,
|
||||
args,
|
||||
sig,
|
||||
)
|
||||
};
|
||||
|
||||
Ok(Some(args.coroutine_closure_sig().rebind((sig.tupled_inputs_ty, coroutine_ty))))
|
||||
}
|
||||
|
||||
ty::Bool
|
||||
| ty::Char
|
||||
| ty::Int(_)
|
||||
| ty::Uint(_)
|
||||
| ty::Float(_)
|
||||
| ty::Adt(_, _)
|
||||
| ty::Foreign(_)
|
||||
| ty::Str
|
||||
| ty::Array(_, _)
|
||||
| ty::Slice(_)
|
||||
| ty::RawPtr(_, _)
|
||||
| ty::Ref(_, _, _)
|
||||
| ty::Dynamic(_, _, _)
|
||||
| ty::Coroutine(_, _)
|
||||
| ty::CoroutineWitness(..)
|
||||
| ty::Never
|
||||
| ty::Tuple(_)
|
||||
| ty::Pat(_, _)
|
||||
| ty::Alias(_, _)
|
||||
| ty::Param(_)
|
||||
| ty::Placeholder(..)
|
||||
| ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
|
||||
| ty::Error(_) => Err(NoSolution),
|
||||
|
||||
ty::Bound(..)
|
||||
| ty::Infer(ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
|
||||
panic!("unexpected type `{self_ty:?}`")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Relevant types for an async callable, including its inputs, output,
|
||||
/// and the return type you get from awaiting the output.
|
||||
#[derive(derivative::Derivative)]
|
||||
#[derivative(Clone(bound = ""), Copy(bound = ""), Debug(bound = ""))]
|
||||
#[derive(TypeVisitable_Generic, TypeFoldable_Generic)]
|
||||
pub(in crate::solve) struct AsyncCallableRelevantTypes<I: Interner> {
|
||||
pub tupled_inputs_ty: I::Ty,
|
||||
/// Type returned by calling the closure
|
||||
/// i.e. `f()`.
|
||||
pub output_coroutine_ty: I::Ty,
|
||||
/// Type returned by `await`ing the output
|
||||
/// i.e. `f().await`.
|
||||
pub coroutine_return_ty: I::Ty,
|
||||
}
|
||||
|
||||
// Returns a binder of the tupled inputs types, output type, and coroutine type
|
||||
// from a builtin coroutine-closure type. If we don't yet know the closure kind of
|
||||
// the coroutine-closure, emit an additional trait predicate for `AsyncFnKindHelper`
|
||||
// which enforces the closure is actually callable with the given trait. When we
|
||||
// know the kind already, we can short-circuit this check.
|
||||
pub(in crate::solve) fn extract_tupled_inputs_and_output_from_async_callable<I: Interner>(
|
||||
tcx: I,
|
||||
self_ty: I::Ty,
|
||||
goal_kind: ty::ClosureKind,
|
||||
env_region: I::Region,
|
||||
) -> Result<(ty::Binder<I, AsyncCallableRelevantTypes<I>>, Vec<I::Predicate>), NoSolution> {
|
||||
match self_ty.kind() {
|
||||
ty::CoroutineClosure(def_id, args) => {
|
||||
let args = args.as_coroutine_closure();
|
||||
let kind_ty = args.kind_ty();
|
||||
let sig = args.coroutine_closure_sig().skip_binder();
|
||||
let mut nested = vec![];
|
||||
let coroutine_ty = if let Some(closure_kind) = kind_ty.to_opt_closure_kind()
|
||||
&& !args.tupled_upvars_ty().is_ty_var()
|
||||
{
|
||||
if !closure_kind.extends(goal_kind) {
|
||||
return Err(NoSolution);
|
||||
}
|
||||
|
||||
coroutine_closure_to_certain_coroutine(
|
||||
tcx, goal_kind, env_region, def_id, args, sig,
|
||||
)
|
||||
} else {
|
||||
// When we don't know the closure kind (and therefore also the closure's upvars,
|
||||
// which are computed at the same time), we must delay the computation of the
|
||||
// generator's upvars. We do this using the `AsyncFnKindHelper`, which as a trait
|
||||
// goal functions similarly to the old `ClosureKind` predicate, and ensures that
|
||||
// the goal kind <= the closure kind. As a projection `AsyncFnKindHelper::Upvars`
|
||||
// will project to the right upvars for the generator, appending the inputs and
|
||||
// coroutine upvars respecting the closure kind.
|
||||
nested.push(
|
||||
ty::TraitRef::new(
|
||||
tcx,
|
||||
tcx.require_lang_item(TraitSolverLangItem::AsyncFnKindHelper),
|
||||
[kind_ty, Ty::from_closure_kind(tcx, goal_kind)],
|
||||
)
|
||||
.upcast(tcx),
|
||||
);
|
||||
|
||||
coroutine_closure_to_ambiguous_coroutine(
|
||||
tcx, goal_kind, env_region, def_id, args, sig,
|
||||
)
|
||||
};
|
||||
|
||||
Ok((
|
||||
args.coroutine_closure_sig().rebind(AsyncCallableRelevantTypes {
|
||||
tupled_inputs_ty: sig.tupled_inputs_ty,
|
||||
output_coroutine_ty: coroutine_ty,
|
||||
coroutine_return_ty: sig.return_ty,
|
||||
}),
|
||||
nested,
|
||||
))
|
||||
}
|
||||
|
||||
ty::FnDef(..) | ty::FnPtr(..) => {
|
||||
let bound_sig = self_ty.fn_sig(tcx);
|
||||
let sig = bound_sig.skip_binder();
|
||||
let future_trait_def_id = tcx.require_lang_item(TraitSolverLangItem::Future);
|
||||
// `FnDef` and `FnPtr` only implement `AsyncFn*` when their
|
||||
// return type implements `Future`.
|
||||
let nested = vec![
|
||||
bound_sig
|
||||
.rebind(ty::TraitRef::new(tcx, future_trait_def_id, [sig.output()]))
|
||||
.upcast(tcx),
|
||||
];
|
||||
let future_output_def_id = tcx.require_lang_item(TraitSolverLangItem::FutureOutput);
|
||||
let future_output_ty = Ty::new_projection(tcx, future_output_def_id, [sig.output()]);
|
||||
Ok((
|
||||
bound_sig.rebind(AsyncCallableRelevantTypes {
|
||||
tupled_inputs_ty: Ty::new_tup(tcx, &sig.inputs()),
|
||||
output_coroutine_ty: sig.output(),
|
||||
coroutine_return_ty: future_output_ty,
|
||||
}),
|
||||
nested,
|
||||
))
|
||||
}
|
||||
ty::Closure(_, args) => {
|
||||
let args = args.as_closure();
|
||||
let bound_sig = args.sig();
|
||||
let sig = bound_sig.skip_binder();
|
||||
let future_trait_def_id = tcx.require_lang_item(TraitSolverLangItem::Future);
|
||||
// `Closure`s only implement `AsyncFn*` when their return type
|
||||
// implements `Future`.
|
||||
let mut nested = vec![
|
||||
bound_sig
|
||||
.rebind(ty::TraitRef::new(tcx, future_trait_def_id, [sig.output()]))
|
||||
.upcast(tcx),
|
||||
];
|
||||
|
||||
// Additionally, we need to check that the closure kind
|
||||
// is still compatible.
|
||||
let kind_ty = args.kind_ty();
|
||||
if let Some(closure_kind) = kind_ty.to_opt_closure_kind() {
|
||||
if !closure_kind.extends(goal_kind) {
|
||||
return Err(NoSolution);
|
||||
}
|
||||
} else {
|
||||
let async_fn_kind_trait_def_id =
|
||||
tcx.require_lang_item(TraitSolverLangItem::AsyncFnKindHelper);
|
||||
// When we don't know the closure kind (and therefore also the closure's upvars,
|
||||
// which are computed at the same time), we must delay the computation of the
|
||||
// generator's upvars. We do this using the `AsyncFnKindHelper`, which as a trait
|
||||
// goal functions similarly to the old `ClosureKind` predicate, and ensures that
|
||||
// the goal kind <= the closure kind. As a projection `AsyncFnKindHelper::Upvars`
|
||||
// will project to the right upvars for the generator, appending the inputs and
|
||||
// coroutine upvars respecting the closure kind.
|
||||
nested.push(
|
||||
ty::TraitRef::new(
|
||||
tcx,
|
||||
async_fn_kind_trait_def_id,
|
||||
[kind_ty, Ty::from_closure_kind(tcx, goal_kind)],
|
||||
)
|
||||
.upcast(tcx),
|
||||
);
|
||||
}
|
||||
|
||||
let future_output_def_id = tcx.require_lang_item(TraitSolverLangItem::FutureOutput);
|
||||
let future_output_ty = Ty::new_projection(tcx, future_output_def_id, [sig.output()]);
|
||||
Ok((
|
||||
bound_sig.rebind(AsyncCallableRelevantTypes {
|
||||
tupled_inputs_ty: sig.inputs()[0],
|
||||
output_coroutine_ty: sig.output(),
|
||||
coroutine_return_ty: future_output_ty,
|
||||
}),
|
||||
nested,
|
||||
))
|
||||
}
|
||||
|
||||
ty::Bool
|
||||
| ty::Char
|
||||
| ty::Int(_)
|
||||
| ty::Uint(_)
|
||||
| ty::Float(_)
|
||||
| ty::Adt(_, _)
|
||||
| ty::Foreign(_)
|
||||
| ty::Str
|
||||
| ty::Array(_, _)
|
||||
| ty::Pat(_, _)
|
||||
| ty::Slice(_)
|
||||
| ty::RawPtr(_, _)
|
||||
| ty::Ref(_, _, _)
|
||||
| ty::Dynamic(_, _, _)
|
||||
| ty::Coroutine(_, _)
|
||||
| ty::CoroutineWitness(..)
|
||||
| ty::Never
|
||||
| ty::Tuple(_)
|
||||
| ty::Alias(_, _)
|
||||
| ty::Param(_)
|
||||
| ty::Placeholder(..)
|
||||
| ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
|
||||
| ty::Error(_) => Err(NoSolution),
|
||||
|
||||
ty::Bound(..)
|
||||
| ty::Infer(ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
|
||||
panic!("unexpected type `{self_ty:?}`")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Given a coroutine-closure, project to its returned coroutine when we are *certain*
|
||||
/// that the closure's kind is compatible with the goal.
|
||||
fn coroutine_closure_to_certain_coroutine<I: Interner>(
|
||||
tcx: I,
|
||||
goal_kind: ty::ClosureKind,
|
||||
goal_region: I::Region,
|
||||
def_id: I::DefId,
|
||||
args: ty::CoroutineClosureArgs<I>,
|
||||
sig: ty::CoroutineClosureSignature<I>,
|
||||
) -> I::Ty {
|
||||
sig.to_coroutine_given_kind_and_upvars(
|
||||
tcx,
|
||||
args.parent_args(),
|
||||
tcx.coroutine_for_closure(def_id),
|
||||
goal_kind,
|
||||
goal_region,
|
||||
args.tupled_upvars_ty(),
|
||||
args.coroutine_captures_by_ref_ty(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Given a coroutine-closure, project to its returned coroutine when we are *not certain*
|
||||
/// that the closure's kind is compatible with the goal, and therefore also don't know
|
||||
/// yet what the closure's upvars are.
|
||||
///
|
||||
/// Note that we do not also push a `AsyncFnKindHelper` goal here.
|
||||
fn coroutine_closure_to_ambiguous_coroutine<I: Interner>(
|
||||
tcx: I,
|
||||
goal_kind: ty::ClosureKind,
|
||||
goal_region: I::Region,
|
||||
def_id: I::DefId,
|
||||
args: ty::CoroutineClosureArgs<I>,
|
||||
sig: ty::CoroutineClosureSignature<I>,
|
||||
) -> I::Ty {
|
||||
let upvars_projection_def_id = tcx.require_lang_item(TraitSolverLangItem::AsyncFnKindUpvars);
|
||||
let tupled_upvars_ty = Ty::new_projection(
|
||||
tcx,
|
||||
upvars_projection_def_id,
|
||||
[
|
||||
I::GenericArg::from(args.kind_ty()),
|
||||
Ty::from_closure_kind(tcx, goal_kind).into(),
|
||||
goal_region.into(),
|
||||
sig.tupled_inputs_ty.into(),
|
||||
args.tupled_upvars_ty().into(),
|
||||
args.coroutine_captures_by_ref_ty().into(),
|
||||
],
|
||||
);
|
||||
sig.to_coroutine(
|
||||
tcx,
|
||||
args.parent_args(),
|
||||
Ty::from_closure_kind(tcx, goal_kind),
|
||||
tcx.coroutine_for_closure(def_id),
|
||||
tupled_upvars_ty,
|
||||
)
|
||||
}
|
||||
|
||||
/// Assemble a list of predicates that would be present on a theoretical
|
||||
/// user impl for an object type. These predicates must be checked any time
|
||||
/// we assemble a built-in object candidate for an object type, since they
|
||||
/// are not implied by the well-formedness of the type.
|
||||
///
|
||||
/// For example, given the following traits:
|
||||
///
|
||||
/// ```rust,ignore (theoretical code)
|
||||
/// trait Foo: Baz {
|
||||
/// type Bar: Copy;
|
||||
/// }
|
||||
///
|
||||
/// trait Baz {}
|
||||
/// ```
|
||||
///
|
||||
/// For the dyn type `dyn Foo<Item = Ty>`, we can imagine there being a
|
||||
/// pair of theoretical impls:
|
||||
///
|
||||
/// ```rust,ignore (theoretical code)
|
||||
/// impl Foo for dyn Foo<Item = Ty>
|
||||
/// where
|
||||
/// Self: Baz,
|
||||
/// <Self as Foo>::Bar: Copy,
|
||||
/// {
|
||||
/// type Bar = Ty;
|
||||
/// }
|
||||
///
|
||||
/// impl Baz for dyn Foo<Item = Ty> {}
|
||||
/// ```
|
||||
///
|
||||
/// However, in order to make such impls well-formed, we need to do an
|
||||
/// additional step of eagerly folding the associated types in the where
|
||||
/// clauses of the impl. In this example, that means replacing
|
||||
/// `<Self as Foo>::Bar` with `Ty` in the first impl.
|
||||
///
|
||||
// FIXME: This is only necessary as `<Self as Trait>::Assoc: ItemBound`
|
||||
// bounds in impls are trivially proven using the item bound candidates.
|
||||
// This is unsound in general and once that is fixed, we don't need to
|
||||
// normalize eagerly here. See https://github.com/lcnr/solver-woes/issues/9
|
||||
// for more details.
|
||||
pub(in crate::solve) fn predicates_for_object_candidate<Infcx, I>(
|
||||
ecx: &EvalCtxt<'_, Infcx>,
|
||||
param_env: I::ParamEnv,
|
||||
trait_ref: ty::TraitRef<I>,
|
||||
object_bounds: I::BoundExistentialPredicates,
|
||||
) -> Vec<Goal<I, I::Predicate>>
|
||||
where
|
||||
Infcx: SolverDelegate<Interner = I>,
|
||||
I: Interner,
|
||||
{
|
||||
let tcx = ecx.interner();
|
||||
let mut requirements = vec![];
|
||||
requirements
|
||||
.extend(tcx.super_predicates_of(trait_ref.def_id).iter_instantiated(tcx, &trait_ref.args));
|
||||
|
||||
// FIXME(associated_const_equality): Also add associated consts to
|
||||
// the requirements here.
|
||||
for associated_type_def_id in tcx.associated_type_def_ids(trait_ref.def_id) {
|
||||
// associated types that require `Self: Sized` do not show up in the built-in
|
||||
// implementation of `Trait for dyn Trait`, and can be dropped here.
|
||||
if tcx.generics_require_sized_self(associated_type_def_id) {
|
||||
continue;
|
||||
}
|
||||
|
||||
requirements.extend(
|
||||
tcx.item_bounds(associated_type_def_id).iter_instantiated(tcx, &trait_ref.args),
|
||||
);
|
||||
}
|
||||
|
||||
let mut replace_projection_with = FxHashMap::default();
|
||||
for bound in object_bounds {
|
||||
if let ty::ExistentialPredicate::Projection(proj) = bound.skip_binder() {
|
||||
let proj = proj.with_self_ty(tcx, trait_ref.self_ty());
|
||||
let old_ty = replace_projection_with.insert(proj.def_id(), bound.rebind(proj));
|
||||
assert_eq!(
|
||||
old_ty,
|
||||
None,
|
||||
"{:?} has two generic parameters: {:?} and {:?}",
|
||||
proj.projection_term,
|
||||
proj.term,
|
||||
old_ty.unwrap()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let mut folder =
|
||||
ReplaceProjectionWith { ecx, param_env, mapping: replace_projection_with, nested: vec![] };
|
||||
let folded_requirements = requirements.fold_with(&mut folder);
|
||||
|
||||
folder
|
||||
.nested
|
||||
.into_iter()
|
||||
.chain(folded_requirements.into_iter().map(|clause| Goal::new(tcx, param_env, clause)))
|
||||
.collect()
|
||||
}
|
||||
|
||||
struct ReplaceProjectionWith<'a, Infcx: SolverDelegate<Interner = I>, I: Interner> {
|
||||
ecx: &'a EvalCtxt<'a, Infcx>,
|
||||
param_env: I::ParamEnv,
|
||||
mapping: FxHashMap<I::DefId, ty::Binder<I, ty::ProjectionPredicate<I>>>,
|
||||
nested: Vec<Goal<I, I::Predicate>>,
|
||||
}
|
||||
|
||||
impl<Infcx: SolverDelegate<Interner = I>, I: Interner> TypeFolder<I>
|
||||
for ReplaceProjectionWith<'_, Infcx, I>
|
||||
{
|
||||
fn interner(&self) -> I {
|
||||
self.ecx.interner()
|
||||
}
|
||||
|
||||
fn fold_ty(&mut self, ty: I::Ty) -> I::Ty {
|
||||
if let ty::Alias(ty::Projection, alias_ty) = ty.kind()
|
||||
&& let Some(replacement) = self.mapping.get(&alias_ty.def_id)
|
||||
{
|
||||
// We may have a case where our object type's projection bound is higher-ranked,
|
||||
// but the where clauses we instantiated are not. We can solve this by instantiating
|
||||
// the binder at the usage site.
|
||||
let proj = self.ecx.instantiate_binder_with_infer(*replacement);
|
||||
// FIXME: Technically this equate could be fallible...
|
||||
self.nested.extend(
|
||||
self.ecx
|
||||
.eq_and_get_goals(
|
||||
self.param_env,
|
||||
alias_ty,
|
||||
proj.projection_term.expect_ty(self.ecx.interner()),
|
||||
)
|
||||
.expect("expected to be able to unify goal projection with dyn's projection"),
|
||||
);
|
||||
proj.term.expect_ty()
|
||||
} else {
|
||||
ty.super_fold_with(self)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,396 @@
|
|||
//! Canonicalization is used to separate some goal from its context,
|
||||
//! throwing away unnecessary information in the process.
|
||||
//!
|
||||
//! This is necessary to cache goals containing inference variables
|
||||
//! and placeholders without restricting them to the current `InferCtxt`.
|
||||
//!
|
||||
//! Canonicalization is fairly involved, for more details see the relevant
|
||||
//! section of the [rustc-dev-guide][c].
|
||||
//!
|
||||
//! [c]: https://rustc-dev-guide.rust-lang.org/solve/canonicalization.html
|
||||
|
||||
use std::iter;
|
||||
|
||||
use rustc_index::IndexVec;
|
||||
use rustc_type_ir::fold::TypeFoldable;
|
||||
use rustc_type_ir::inherent::*;
|
||||
use rustc_type_ir::{self as ty, Canonical, CanonicalVarValues, Interner};
|
||||
|
||||
use crate::canonicalizer::{CanonicalizeMode, Canonicalizer};
|
||||
use crate::infcx::SolverDelegate;
|
||||
use crate::resolve::EagerResolver;
|
||||
use crate::solve::eval_ctxt::NestedGoals;
|
||||
use crate::solve::inspect;
|
||||
use crate::solve::{
|
||||
response_no_constraints_raw, CanonicalInput, CanonicalResponse, Certainty, EvalCtxt,
|
||||
ExternalConstraintsData, Goal, MaybeCause, NestedNormalizationGoals, NoSolution,
|
||||
PredefinedOpaquesData, QueryInput, QueryResult, Response,
|
||||
};
|
||||
|
||||
trait ResponseT<I: Interner> {
|
||||
fn var_values(&self) -> CanonicalVarValues<I>;
|
||||
}
|
||||
|
||||
impl<I: Interner> ResponseT<I> for Response<I> {
|
||||
fn var_values(&self) -> CanonicalVarValues<I> {
|
||||
self.var_values
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: Interner, T> ResponseT<I> for inspect::State<I, T> {
|
||||
fn var_values(&self) -> CanonicalVarValues<I> {
|
||||
self.var_values
|
||||
}
|
||||
}
|
||||
|
||||
impl<Infcx, I> EvalCtxt<'_, Infcx>
|
||||
where
|
||||
Infcx: SolverDelegate<Interner = I>,
|
||||
I: Interner,
|
||||
{
|
||||
/// Canonicalizes the goal remembering the original values
|
||||
/// for each bound variable.
|
||||
pub(super) fn canonicalize_goal<T: TypeFoldable<I>>(
|
||||
&self,
|
||||
goal: Goal<I, T>,
|
||||
) -> (Vec<I::GenericArg>, CanonicalInput<I, T>) {
|
||||
let opaque_types = self.infcx.clone_opaque_types_for_query_response();
|
||||
let (goal, opaque_types) =
|
||||
(goal, opaque_types).fold_with(&mut EagerResolver::new(self.infcx));
|
||||
|
||||
let mut orig_values = Default::default();
|
||||
let canonical_goal = Canonicalizer::canonicalize(
|
||||
self.infcx,
|
||||
CanonicalizeMode::Input,
|
||||
&mut orig_values,
|
||||
QueryInput {
|
||||
goal,
|
||||
predefined_opaques_in_body: self
|
||||
.interner()
|
||||
.mk_predefined_opaques_in_body(PredefinedOpaquesData { opaque_types }),
|
||||
},
|
||||
);
|
||||
(orig_values, canonical_goal)
|
||||
}
|
||||
|
||||
/// To return the constraints of a canonical query to the caller, we canonicalize:
|
||||
///
|
||||
/// - `var_values`: a map from bound variables in the canonical goal to
|
||||
/// the values inferred while solving the instantiated goal.
|
||||
/// - `external_constraints`: additional constraints which aren't expressible
|
||||
/// using simple unification of inference variables.
|
||||
#[instrument(level = "trace", skip(self), ret)]
|
||||
pub(in crate::solve) fn evaluate_added_goals_and_make_canonical_response(
|
||||
&mut self,
|
||||
certainty: Certainty,
|
||||
) -> QueryResult<I> {
|
||||
self.inspect.make_canonical_response(certainty);
|
||||
|
||||
let goals_certainty = self.try_evaluate_added_goals()?;
|
||||
assert_eq!(
|
||||
self.tainted,
|
||||
Ok(()),
|
||||
"EvalCtxt is tainted -- nested goals may have been dropped in a \
|
||||
previous call to `try_evaluate_added_goals!`"
|
||||
);
|
||||
|
||||
// We only check for leaks from universes which were entered inside
|
||||
// of the query.
|
||||
self.infcx.leak_check(self.max_input_universe).map_err(|NoSolution| {
|
||||
trace!("failed the leak check");
|
||||
NoSolution
|
||||
})?;
|
||||
|
||||
// When normalizing, we've replaced the expected term with an unconstrained
|
||||
// inference variable. This means that we dropped information which could
|
||||
// have been important. We handle this by instead returning the nested goals
|
||||
// to the caller, where they are then handled.
|
||||
//
|
||||
// As we return all ambiguous nested goals, we can ignore the certainty returned
|
||||
// by `try_evaluate_added_goals()`.
|
||||
let (certainty, normalization_nested_goals) = if self.is_normalizes_to_goal {
|
||||
let NestedGoals { normalizes_to_goals, goals } = std::mem::take(&mut self.nested_goals);
|
||||
if cfg!(debug_assertions) {
|
||||
assert!(normalizes_to_goals.is_empty());
|
||||
if goals.is_empty() {
|
||||
assert!(matches!(goals_certainty, Certainty::Yes));
|
||||
}
|
||||
}
|
||||
(certainty, NestedNormalizationGoals(goals))
|
||||
} else {
|
||||
let certainty = certainty.unify_with(goals_certainty);
|
||||
(certainty, NestedNormalizationGoals::empty())
|
||||
};
|
||||
|
||||
let external_constraints =
|
||||
self.compute_external_query_constraints(certainty, normalization_nested_goals);
|
||||
let (var_values, mut external_constraints) =
|
||||
(self.var_values, external_constraints).fold_with(&mut EagerResolver::new(self.infcx));
|
||||
// Remove any trivial region constraints once we've resolved regions
|
||||
external_constraints
|
||||
.region_constraints
|
||||
.retain(|outlives| outlives.0.as_region().map_or(true, |re| re != outlives.1));
|
||||
|
||||
let canonical = Canonicalizer::canonicalize(
|
||||
self.infcx,
|
||||
CanonicalizeMode::Response { max_input_universe: self.max_input_universe },
|
||||
&mut Default::default(),
|
||||
Response {
|
||||
var_values,
|
||||
certainty,
|
||||
external_constraints: self.interner().mk_external_constraints(external_constraints),
|
||||
},
|
||||
);
|
||||
|
||||
Ok(canonical)
|
||||
}
|
||||
|
||||
/// Constructs a totally unconstrained, ambiguous response to a goal.
|
||||
///
|
||||
/// Take care when using this, since often it's useful to respond with
|
||||
/// ambiguity but return constrained variables to guide inference.
|
||||
pub(in crate::solve) fn make_ambiguous_response_no_constraints(
|
||||
&self,
|
||||
maybe_cause: MaybeCause,
|
||||
) -> CanonicalResponse<I> {
|
||||
response_no_constraints_raw(
|
||||
self.interner(),
|
||||
self.max_input_universe,
|
||||
self.variables,
|
||||
Certainty::Maybe(maybe_cause),
|
||||
)
|
||||
}
|
||||
|
||||
/// Computes the region constraints and *new* opaque types registered when
|
||||
/// proving a goal.
|
||||
///
|
||||
/// If an opaque was already constrained before proving this goal, then the
|
||||
/// external constraints do not need to record that opaque, since if it is
|
||||
/// further constrained by inference, that will be passed back in the var
|
||||
/// values.
|
||||
#[instrument(level = "trace", skip(self), ret)]
|
||||
fn compute_external_query_constraints(
|
||||
&self,
|
||||
certainty: Certainty,
|
||||
normalization_nested_goals: NestedNormalizationGoals<I>,
|
||||
) -> ExternalConstraintsData<I> {
|
||||
// We only return region constraints once the certainty is `Yes`. This
|
||||
// is necessary as we may drop nested goals on ambiguity, which may result
|
||||
// in unconstrained inference variables in the region constraints. It also
|
||||
// prevents us from emitting duplicate region constraints, avoiding some
|
||||
// unnecessary work. This slightly weakens the leak check in case it uses
|
||||
// region constraints from an ambiguous nested goal. This is tested in both
|
||||
// `tests/ui/higher-ranked/leak-check/leak-check-in-selection-5-ambig.rs` and
|
||||
// `tests/ui/higher-ranked/leak-check/leak-check-in-selection-6-ambig-unify.rs`.
|
||||
let region_constraints = if certainty == Certainty::Yes {
|
||||
self.infcx.make_deduplicated_outlives_constraints()
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
|
||||
ExternalConstraintsData {
|
||||
region_constraints,
|
||||
opaque_types: self
|
||||
.infcx
|
||||
.clone_opaque_types_for_query_response()
|
||||
.into_iter()
|
||||
// Only return *newly defined* opaque types.
|
||||
.filter(|(a, _)| {
|
||||
self.predefined_opaques_in_body.opaque_types.iter().all(|(pa, _)| pa != a)
|
||||
})
|
||||
.collect(),
|
||||
normalization_nested_goals,
|
||||
}
|
||||
}
|
||||
|
||||
/// After calling a canonical query, we apply the constraints returned
|
||||
/// by the query using this function.
|
||||
///
|
||||
/// This happens in three steps:
|
||||
/// - we instantiate the bound variables of the query response
|
||||
/// - we unify the `var_values` of the response with the `original_values`
|
||||
/// - we apply the `external_constraints` returned by the query, returning
|
||||
/// the `normalization_nested_goals`
|
||||
pub(super) fn instantiate_and_apply_query_response(
|
||||
&mut self,
|
||||
param_env: I::ParamEnv,
|
||||
original_values: Vec<I::GenericArg>,
|
||||
response: CanonicalResponse<I>,
|
||||
) -> (NestedNormalizationGoals<I>, Certainty) {
|
||||
let instantiation = Self::compute_query_response_instantiation_values(
|
||||
self.infcx,
|
||||
&original_values,
|
||||
&response,
|
||||
);
|
||||
|
||||
let Response { var_values, external_constraints, certainty } =
|
||||
self.infcx.instantiate_canonical(response, instantiation);
|
||||
|
||||
Self::unify_query_var_values(self.infcx, param_env, &original_values, var_values);
|
||||
|
||||
let ExternalConstraintsData {
|
||||
region_constraints,
|
||||
opaque_types,
|
||||
normalization_nested_goals,
|
||||
} = &*external_constraints;
|
||||
self.register_region_constraints(region_constraints);
|
||||
self.register_new_opaque_types(opaque_types);
|
||||
(normalization_nested_goals.clone(), certainty)
|
||||
}
|
||||
|
||||
/// This returns the canoncial variable values to instantiate the bound variables of
|
||||
/// the canonical response. This depends on the `original_values` for the
|
||||
/// bound variables.
|
||||
fn compute_query_response_instantiation_values<T: ResponseT<I>>(
|
||||
infcx: &Infcx,
|
||||
original_values: &[I::GenericArg],
|
||||
response: &Canonical<I, T>,
|
||||
) -> CanonicalVarValues<I> {
|
||||
// FIXME: Longterm canonical queries should deal with all placeholders
|
||||
// created inside of the query directly instead of returning them to the
|
||||
// caller.
|
||||
let prev_universe = infcx.universe();
|
||||
let universes_created_in_query = response.max_universe.index();
|
||||
for _ in 0..universes_created_in_query {
|
||||
infcx.create_next_universe();
|
||||
}
|
||||
|
||||
let var_values = response.value.var_values();
|
||||
assert_eq!(original_values.len(), var_values.len());
|
||||
|
||||
// If the query did not make progress with constraining inference variables,
|
||||
// we would normally create a new inference variables for bound existential variables
|
||||
// only then unify this new inference variable with the inference variable from
|
||||
// the input.
|
||||
//
|
||||
// We therefore instantiate the existential variable in the canonical response with the
|
||||
// inference variable of the input right away, which is more performant.
|
||||
let mut opt_values = IndexVec::from_elem_n(None, response.variables.len());
|
||||
for (original_value, result_value) in iter::zip(original_values, var_values.var_values) {
|
||||
match result_value.kind() {
|
||||
ty::GenericArgKind::Type(t) => {
|
||||
if let ty::Bound(debruijn, b) = t.kind() {
|
||||
assert_eq!(debruijn, ty::INNERMOST);
|
||||
opt_values[b.var()] = Some(*original_value);
|
||||
}
|
||||
}
|
||||
ty::GenericArgKind::Lifetime(r) => {
|
||||
if let ty::ReBound(debruijn, br) = r.kind() {
|
||||
assert_eq!(debruijn, ty::INNERMOST);
|
||||
opt_values[br.var()] = Some(*original_value);
|
||||
}
|
||||
}
|
||||
ty::GenericArgKind::Const(c) => {
|
||||
if let ty::ConstKind::Bound(debruijn, bv) = c.kind() {
|
||||
assert_eq!(debruijn, ty::INNERMOST);
|
||||
opt_values[bv.var()] = Some(*original_value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let var_values = infcx.interner().mk_args_from_iter(
|
||||
response.variables.into_iter().enumerate().map(|(index, info)| {
|
||||
if info.universe() != ty::UniverseIndex::ROOT {
|
||||
// A variable from inside a binder of the query. While ideally these shouldn't
|
||||
// exist at all (see the FIXME at the start of this method), we have to deal with
|
||||
// them for now.
|
||||
infcx.instantiate_canonical_var_with_infer(info, |idx| {
|
||||
ty::UniverseIndex::from(prev_universe.index() + idx.index())
|
||||
})
|
||||
} else if info.is_existential() {
|
||||
// As an optimization we sometimes avoid creating a new inference variable here.
|
||||
//
|
||||
// All new inference variables we create start out in the current universe of the caller.
|
||||
// This is conceptually wrong as these inference variables would be able to name
|
||||
// more placeholders then they should be able to. However the inference variables have
|
||||
// to "come from somewhere", so by equating them with the original values of the caller
|
||||
// later on, we pull them down into their correct universe again.
|
||||
if let Some(v) = opt_values[ty::BoundVar::from_usize(index)] {
|
||||
v
|
||||
} else {
|
||||
infcx.instantiate_canonical_var_with_infer(info, |_| prev_universe)
|
||||
}
|
||||
} else {
|
||||
// For placeholders which were already part of the input, we simply map this
|
||||
// universal bound variable back the placeholder of the input.
|
||||
original_values[info.expect_placeholder_index()]
|
||||
}
|
||||
}),
|
||||
);
|
||||
|
||||
CanonicalVarValues { var_values }
|
||||
}
|
||||
|
||||
/// Unify the `original_values` with the `var_values` returned by the canonical query..
|
||||
///
|
||||
/// This assumes that this unification will always succeed. This is the case when
|
||||
/// applying a query response right away. However, calling a canonical query, doing any
|
||||
/// other kind of trait solving, and only then instantiating the result of the query
|
||||
/// can cause the instantiation to fail. This is not supported and we ICE in this case.
|
||||
///
|
||||
/// We always structurally instantiate aliases. Relating aliases needs to be different
|
||||
/// depending on whether the alias is *rigid* or not. We're only really able to tell
|
||||
/// whether an alias is rigid by using the trait solver. When instantiating a response
|
||||
/// from the solver we assume that the solver correctly handled aliases and therefore
|
||||
/// always relate them structurally here.
|
||||
#[instrument(level = "trace", skip(infcx))]
|
||||
fn unify_query_var_values(
|
||||
infcx: &Infcx,
|
||||
param_env: I::ParamEnv,
|
||||
original_values: &[I::GenericArg],
|
||||
var_values: CanonicalVarValues<I>,
|
||||
) {
|
||||
assert_eq!(original_values.len(), var_values.len());
|
||||
|
||||
for (&orig, response) in iter::zip(original_values, var_values.var_values) {
|
||||
let goals = infcx.eq_structurally_relating_aliases(param_env, orig, response).unwrap();
|
||||
assert!(goals.is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
fn register_region_constraints(
|
||||
&mut self,
|
||||
outlives: &[ty::OutlivesPredicate<I, I::GenericArg>],
|
||||
) {
|
||||
for &ty::OutlivesPredicate(lhs, rhs) in outlives {
|
||||
match lhs.kind() {
|
||||
ty::GenericArgKind::Lifetime(lhs) => self.register_region_outlives(lhs, rhs),
|
||||
ty::GenericArgKind::Type(lhs) => self.register_ty_outlives(lhs, rhs),
|
||||
ty::GenericArgKind::Const(_) => panic!("const outlives: {lhs:?}: {rhs:?}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn register_new_opaque_types(&mut self, opaque_types: &[(ty::OpaqueTypeKey<I>, I::Ty)]) {
|
||||
for &(key, ty) in opaque_types {
|
||||
self.infcx.inject_new_hidden_type_unchecked(key, ty);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Used by proof trees to be able to recompute intermediate actions while
|
||||
/// evaluating a goal. The `var_values` not only include the bound variables
|
||||
/// of the query input, but also contain all unconstrained inference vars
|
||||
/// created while evaluating this goal.
|
||||
pub(in crate::solve) fn make_canonical_state<Infcx, T, I>(
|
||||
infcx: &Infcx,
|
||||
var_values: &[I::GenericArg],
|
||||
max_input_universe: ty::UniverseIndex,
|
||||
data: T,
|
||||
) -> inspect::CanonicalState<I, T>
|
||||
where
|
||||
Infcx: SolverDelegate<Interner = I>,
|
||||
I: Interner,
|
||||
T: TypeFoldable<I>,
|
||||
{
|
||||
let var_values = CanonicalVarValues { var_values: infcx.interner().mk_args(var_values) };
|
||||
let state = inspect::State { var_values, data };
|
||||
let state = state.fold_with(&mut EagerResolver::new(infcx));
|
||||
Canonicalizer::canonicalize(
|
||||
infcx,
|
||||
CanonicalizeMode::Response { max_input_universe },
|
||||
&mut vec![],
|
||||
state,
|
||||
)
|
||||
}
|
1061
compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs
Normal file
1061
compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs
Normal file
File diff suppressed because it is too large
Load diff
123
compiler/rustc_next_trait_solver/src/solve/eval_ctxt/probe.rs
Normal file
123
compiler/rustc_next_trait_solver/src/solve/eval_ctxt/probe.rs
Normal file
|
@ -0,0 +1,123 @@
|
|||
use std::marker::PhantomData;
|
||||
|
||||
use rustc_type_ir::Interner;
|
||||
|
||||
use crate::infcx::SolverDelegate;
|
||||
use crate::solve::assembly::Candidate;
|
||||
use crate::solve::inspect;
|
||||
use crate::solve::{BuiltinImplSource, CandidateSource, EvalCtxt, NoSolution, QueryResult};
|
||||
|
||||
pub(in crate::solve) struct ProbeCtxt<'me, 'a, Infcx, I, F, T>
|
||||
where
|
||||
Infcx: SolverDelegate<Interner = I>,
|
||||
I: Interner,
|
||||
{
|
||||
ecx: &'me mut EvalCtxt<'a, Infcx, I>,
|
||||
probe_kind: F,
|
||||
_result: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<Infcx, I, F, T> ProbeCtxt<'_, '_, Infcx, I, F, T>
|
||||
where
|
||||
F: FnOnce(&T) -> inspect::ProbeKind<I>,
|
||||
Infcx: SolverDelegate<Interner = I>,
|
||||
I: Interner,
|
||||
{
|
||||
pub(in crate::solve) fn enter(self, f: impl FnOnce(&mut EvalCtxt<'_, Infcx>) -> T) -> T {
|
||||
let ProbeCtxt { ecx: outer_ecx, probe_kind, _result } = self;
|
||||
|
||||
let infcx = outer_ecx.infcx;
|
||||
let max_input_universe = outer_ecx.max_input_universe;
|
||||
let mut nested_ecx = EvalCtxt {
|
||||
infcx,
|
||||
variables: outer_ecx.variables,
|
||||
var_values: outer_ecx.var_values,
|
||||
is_normalizes_to_goal: outer_ecx.is_normalizes_to_goal,
|
||||
predefined_opaques_in_body: outer_ecx.predefined_opaques_in_body,
|
||||
max_input_universe,
|
||||
search_graph: outer_ecx.search_graph,
|
||||
nested_goals: outer_ecx.nested_goals.clone(),
|
||||
tainted: outer_ecx.tainted,
|
||||
inspect: outer_ecx.inspect.take_and_enter_probe(),
|
||||
};
|
||||
let r = nested_ecx.infcx.probe(|| {
|
||||
let r = f(&mut nested_ecx);
|
||||
nested_ecx.inspect.probe_final_state(infcx, max_input_universe);
|
||||
r
|
||||
});
|
||||
if !nested_ecx.inspect.is_noop() {
|
||||
let probe_kind = probe_kind(&r);
|
||||
nested_ecx.inspect.probe_kind(probe_kind);
|
||||
outer_ecx.inspect = nested_ecx.inspect.finish_probe();
|
||||
}
|
||||
r
|
||||
}
|
||||
}
|
||||
|
||||
pub(in crate::solve) struct TraitProbeCtxt<'me, 'a, Infcx, I, F>
|
||||
where
|
||||
Infcx: SolverDelegate<Interner = I>,
|
||||
I: Interner,
|
||||
{
|
||||
cx: ProbeCtxt<'me, 'a, Infcx, I, F, QueryResult<I>>,
|
||||
source: CandidateSource<I>,
|
||||
}
|
||||
|
||||
impl<Infcx, I, F> TraitProbeCtxt<'_, '_, Infcx, I, F>
|
||||
where
|
||||
Infcx: SolverDelegate<Interner = I>,
|
||||
I: Interner,
|
||||
F: FnOnce(&QueryResult<I>) -> inspect::ProbeKind<I>,
|
||||
{
|
||||
#[instrument(level = "debug", skip_all, fields(source = ?self.source))]
|
||||
pub(in crate::solve) fn enter(
|
||||
self,
|
||||
f: impl FnOnce(&mut EvalCtxt<'_, Infcx>) -> QueryResult<I>,
|
||||
) -> Result<Candidate<I>, NoSolution> {
|
||||
self.cx.enter(|ecx| f(ecx)).map(|result| Candidate { source: self.source, result })
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, Infcx, I> EvalCtxt<'a, Infcx, I>
|
||||
where
|
||||
Infcx: SolverDelegate<Interner = I>,
|
||||
I: Interner,
|
||||
{
|
||||
/// `probe_kind` is only called when proof tree building is enabled so it can be
|
||||
/// as expensive as necessary to output the desired information.
|
||||
pub(in crate::solve) fn probe<F, T>(
|
||||
&mut self,
|
||||
probe_kind: F,
|
||||
) -> ProbeCtxt<'_, 'a, Infcx, I, F, T>
|
||||
where
|
||||
F: FnOnce(&T) -> inspect::ProbeKind<I>,
|
||||
{
|
||||
ProbeCtxt { ecx: self, probe_kind, _result: PhantomData }
|
||||
}
|
||||
|
||||
pub(in crate::solve) fn probe_builtin_trait_candidate(
|
||||
&mut self,
|
||||
source: BuiltinImplSource,
|
||||
) -> TraitProbeCtxt<'_, 'a, Infcx, I, impl FnOnce(&QueryResult<I>) -> inspect::ProbeKind<I>>
|
||||
{
|
||||
self.probe_trait_candidate(CandidateSource::BuiltinImpl(source))
|
||||
}
|
||||
|
||||
pub(in crate::solve) fn probe_trait_candidate(
|
||||
&mut self,
|
||||
source: CandidateSource<I>,
|
||||
) -> TraitProbeCtxt<'_, 'a, Infcx, I, impl FnOnce(&QueryResult<I>) -> inspect::ProbeKind<I>>
|
||||
{
|
||||
TraitProbeCtxt {
|
||||
cx: ProbeCtxt {
|
||||
ecx: self,
|
||||
probe_kind: move |result: &QueryResult<I>| inspect::ProbeKind::TraitCandidate {
|
||||
source,
|
||||
result: *result,
|
||||
},
|
||||
_result: PhantomData,
|
||||
},
|
||||
source,
|
||||
}
|
||||
}
|
||||
}
|
575
compiler/rustc_next_trait_solver/src/solve/inspect/build.rs
Normal file
575
compiler/rustc_next_trait_solver/src/solve/inspect/build.rs
Normal file
|
@ -0,0 +1,575 @@
|
|||
//! Building proof trees incrementally during trait solving.
|
||||
//!
|
||||
//! This code is *a bit* of a mess and can hopefully be
|
||||
//! mostly ignored. For a general overview of how it works,
|
||||
//! see the comment on [ProofTreeBuilder].
|
||||
|
||||
use std::marker::PhantomData;
|
||||
use std::mem;
|
||||
|
||||
use rustc_type_ir::{self as ty, Interner};
|
||||
|
||||
use crate::infcx::SolverDelegate;
|
||||
use crate::solve::eval_ctxt::canonical;
|
||||
use crate::solve::inspect;
|
||||
use crate::solve::{
|
||||
CanonicalInput, Certainty, GenerateProofTree, Goal, GoalEvaluationKind, GoalSource, QueryInput,
|
||||
QueryResult,
|
||||
};
|
||||
|
||||
/// The core data structure when building proof trees.
|
||||
///
|
||||
/// In case the current evaluation does not generate a proof
|
||||
/// tree, `state` is simply `None` and we avoid any work.
|
||||
///
|
||||
/// The possible states of the solver are represented via
|
||||
/// variants of [DebugSolver]. For any nested computation we call
|
||||
/// `ProofTreeBuilder::new_nested_computation_kind` which
|
||||
/// creates a new `ProofTreeBuilder` to temporarily replace the
|
||||
/// current one. Once that nested computation is done,
|
||||
/// `ProofTreeBuilder::nested_computation_kind` is called
|
||||
/// to add the finished nested evaluation to the parent.
|
||||
///
|
||||
/// We provide additional information to the current state
|
||||
/// by calling methods such as `ProofTreeBuilder::probe_kind`.
|
||||
///
|
||||
/// The actual structure closely mirrors the finished proof
|
||||
/// trees. At the end of trait solving `ProofTreeBuilder::finalize`
|
||||
/// is called to recursively convert the whole structure to a
|
||||
/// finished proof tree.
|
||||
pub(in crate::solve) struct ProofTreeBuilder<Infcx, I = <Infcx as SolverDelegate>::Interner>
|
||||
where
|
||||
Infcx: SolverDelegate<Interner = I>,
|
||||
I: Interner,
|
||||
{
|
||||
_infcx: PhantomData<Infcx>,
|
||||
state: Option<Box<DebugSolver<I>>>,
|
||||
}
|
||||
|
||||
/// The current state of the proof tree builder, at most places
|
||||
/// in the code, only one or two variants are actually possible.
|
||||
///
|
||||
/// We simply ICE in case that assumption is broken.
|
||||
#[derive(derivative::Derivative)]
|
||||
#[derivative(Debug(bound = ""))]
|
||||
enum DebugSolver<I: Interner> {
|
||||
Root,
|
||||
GoalEvaluation(WipGoalEvaluation<I>),
|
||||
CanonicalGoalEvaluation(WipCanonicalGoalEvaluation<I>),
|
||||
CanonicalGoalEvaluationStep(WipCanonicalGoalEvaluationStep<I>),
|
||||
}
|
||||
|
||||
impl<I: Interner> From<WipGoalEvaluation<I>> for DebugSolver<I> {
|
||||
fn from(g: WipGoalEvaluation<I>) -> DebugSolver<I> {
|
||||
DebugSolver::GoalEvaluation(g)
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: Interner> From<WipCanonicalGoalEvaluation<I>> for DebugSolver<I> {
|
||||
fn from(g: WipCanonicalGoalEvaluation<I>) -> DebugSolver<I> {
|
||||
DebugSolver::CanonicalGoalEvaluation(g)
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: Interner> From<WipCanonicalGoalEvaluationStep<I>> for DebugSolver<I> {
|
||||
fn from(g: WipCanonicalGoalEvaluationStep<I>) -> DebugSolver<I> {
|
||||
DebugSolver::CanonicalGoalEvaluationStep(g)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(derivative::Derivative)]
|
||||
#[derivative(PartialEq(bound = ""), Eq(bound = ""), Debug(bound = ""))]
|
||||
struct WipGoalEvaluation<I: Interner> {
|
||||
pub uncanonicalized_goal: Goal<I, I::Predicate>,
|
||||
pub orig_values: Vec<I::GenericArg>,
|
||||
pub evaluation: Option<WipCanonicalGoalEvaluation<I>>,
|
||||
}
|
||||
|
||||
impl<I: Interner> WipGoalEvaluation<I> {
|
||||
fn finalize(self) -> inspect::GoalEvaluation<I> {
|
||||
inspect::GoalEvaluation {
|
||||
uncanonicalized_goal: self.uncanonicalized_goal,
|
||||
orig_values: self.orig_values,
|
||||
evaluation: self.evaluation.unwrap().finalize(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(derivative::Derivative)]
|
||||
#[derivative(PartialEq(bound = ""), Eq(bound = ""))]
|
||||
pub(in crate::solve) enum WipCanonicalGoalEvaluationKind<I: Interner> {
|
||||
Overflow,
|
||||
CycleInStack,
|
||||
ProvisionalCacheHit,
|
||||
Interned { final_revision: I::CanonicalGoalEvaluationStepRef },
|
||||
}
|
||||
|
||||
impl<I: Interner> std::fmt::Debug for WipCanonicalGoalEvaluationKind<I> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::Overflow => write!(f, "Overflow"),
|
||||
Self::CycleInStack => write!(f, "CycleInStack"),
|
||||
Self::ProvisionalCacheHit => write!(f, "ProvisionalCacheHit"),
|
||||
Self::Interned { final_revision: _ } => {
|
||||
f.debug_struct("Interned").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(derivative::Derivative)]
|
||||
#[derivative(PartialEq(bound = ""), Eq(bound = ""), Debug(bound = ""))]
|
||||
struct WipCanonicalGoalEvaluation<I: Interner> {
|
||||
goal: CanonicalInput<I>,
|
||||
kind: Option<WipCanonicalGoalEvaluationKind<I>>,
|
||||
/// Only used for uncached goals. After we finished evaluating
|
||||
/// the goal, this is interned and moved into `kind`.
|
||||
final_revision: Option<WipCanonicalGoalEvaluationStep<I>>,
|
||||
result: Option<QueryResult<I>>,
|
||||
}
|
||||
|
||||
impl<I: Interner> WipCanonicalGoalEvaluation<I> {
|
||||
fn finalize(self) -> inspect::CanonicalGoalEvaluation<I> {
|
||||
// We've already interned the final revision in
|
||||
// `fn finalize_canonical_goal_evaluation`.
|
||||
assert!(self.final_revision.is_none());
|
||||
let kind = match self.kind.unwrap() {
|
||||
WipCanonicalGoalEvaluationKind::Overflow => {
|
||||
inspect::CanonicalGoalEvaluationKind::Overflow
|
||||
}
|
||||
WipCanonicalGoalEvaluationKind::CycleInStack => {
|
||||
inspect::CanonicalGoalEvaluationKind::CycleInStack
|
||||
}
|
||||
WipCanonicalGoalEvaluationKind::ProvisionalCacheHit => {
|
||||
inspect::CanonicalGoalEvaluationKind::ProvisionalCacheHit
|
||||
}
|
||||
WipCanonicalGoalEvaluationKind::Interned { final_revision } => {
|
||||
inspect::CanonicalGoalEvaluationKind::Evaluation { final_revision }
|
||||
}
|
||||
};
|
||||
|
||||
inspect::CanonicalGoalEvaluation { goal: self.goal, kind, result: self.result.unwrap() }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(derivative::Derivative)]
|
||||
#[derivative(PartialEq(bound = ""), Eq(bound = ""), Debug(bound = ""))]
|
||||
struct WipCanonicalGoalEvaluationStep<I: Interner> {
|
||||
/// Unlike `EvalCtxt::var_values`, we append a new
|
||||
/// generic arg here whenever we create a new inference
|
||||
/// variable.
|
||||
///
|
||||
/// This is necessary as we otherwise don't unify these
|
||||
/// vars when instantiating multiple `CanonicalState`.
|
||||
var_values: Vec<I::GenericArg>,
|
||||
instantiated_goal: QueryInput<I, I::Predicate>,
|
||||
probe_depth: usize,
|
||||
evaluation: WipProbe<I>,
|
||||
}
|
||||
|
||||
impl<I: Interner> WipCanonicalGoalEvaluationStep<I> {
|
||||
fn current_evaluation_scope(&mut self) -> &mut WipProbe<I> {
|
||||
let mut current = &mut self.evaluation;
|
||||
for _ in 0..self.probe_depth {
|
||||
match current.steps.last_mut() {
|
||||
Some(WipProbeStep::NestedProbe(p)) => current = p,
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
current
|
||||
}
|
||||
|
||||
fn finalize(self) -> inspect::CanonicalGoalEvaluationStep<I> {
|
||||
let evaluation = self.evaluation.finalize();
|
||||
match evaluation.kind {
|
||||
inspect::ProbeKind::Root { .. } => (),
|
||||
_ => unreachable!("unexpected root evaluation: {evaluation:?}"),
|
||||
}
|
||||
inspect::CanonicalGoalEvaluationStep {
|
||||
instantiated_goal: self.instantiated_goal,
|
||||
evaluation,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(derivative::Derivative)]
|
||||
#[derivative(PartialEq(bound = ""), Eq(bound = ""), Debug(bound = ""))]
|
||||
struct WipProbe<I: Interner> {
|
||||
initial_num_var_values: usize,
|
||||
steps: Vec<WipProbeStep<I>>,
|
||||
kind: Option<inspect::ProbeKind<I>>,
|
||||
final_state: Option<inspect::CanonicalState<I, ()>>,
|
||||
}
|
||||
|
||||
impl<I: Interner> WipProbe<I> {
|
||||
fn finalize(self) -> inspect::Probe<I> {
|
||||
inspect::Probe {
|
||||
steps: self.steps.into_iter().map(WipProbeStep::finalize).collect(),
|
||||
kind: self.kind.unwrap(),
|
||||
final_state: self.final_state.unwrap(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(derivative::Derivative)]
|
||||
#[derivative(PartialEq(bound = ""), Eq(bound = ""), Debug(bound = ""))]
|
||||
enum WipProbeStep<I: Interner> {
|
||||
AddGoal(GoalSource, inspect::CanonicalState<I, Goal<I, I::Predicate>>),
|
||||
NestedProbe(WipProbe<I>),
|
||||
MakeCanonicalResponse { shallow_certainty: Certainty },
|
||||
RecordImplArgs { impl_args: inspect::CanonicalState<I, I::GenericArgs> },
|
||||
}
|
||||
|
||||
impl<I: Interner> WipProbeStep<I> {
|
||||
fn finalize(self) -> inspect::ProbeStep<I> {
|
||||
match self {
|
||||
WipProbeStep::AddGoal(source, goal) => inspect::ProbeStep::AddGoal(source, goal),
|
||||
WipProbeStep::NestedProbe(probe) => inspect::ProbeStep::NestedProbe(probe.finalize()),
|
||||
WipProbeStep::RecordImplArgs { impl_args } => {
|
||||
inspect::ProbeStep::RecordImplArgs { impl_args }
|
||||
}
|
||||
WipProbeStep::MakeCanonicalResponse { shallow_certainty } => {
|
||||
inspect::ProbeStep::MakeCanonicalResponse { shallow_certainty }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Infcx: SolverDelegate<Interner = I>, I: Interner> ProofTreeBuilder<Infcx> {
|
||||
fn new(state: impl Into<DebugSolver<I>>) -> ProofTreeBuilder<Infcx> {
|
||||
ProofTreeBuilder { state: Some(Box::new(state.into())), _infcx: PhantomData }
|
||||
}
|
||||
|
||||
fn opt_nested<T: Into<DebugSolver<I>>>(&self, state: impl FnOnce() -> Option<T>) -> Self {
|
||||
ProofTreeBuilder {
|
||||
state: self.state.as_ref().and_then(|_| Some(state()?.into())).map(Box::new),
|
||||
_infcx: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
fn nested<T: Into<DebugSolver<I>>>(&self, state: impl FnOnce() -> T) -> Self {
|
||||
ProofTreeBuilder {
|
||||
state: self.state.as_ref().map(|_| Box::new(state().into())),
|
||||
_infcx: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_mut(&mut self) -> Option<&mut DebugSolver<I>> {
|
||||
self.state.as_deref_mut()
|
||||
}
|
||||
|
||||
pub fn take_and_enter_probe(&mut self) -> ProofTreeBuilder<Infcx> {
|
||||
let mut nested = ProofTreeBuilder { state: self.state.take(), _infcx: PhantomData };
|
||||
nested.enter_probe();
|
||||
nested
|
||||
}
|
||||
|
||||
pub fn finalize(self) -> Option<inspect::GoalEvaluation<I>> {
|
||||
match *self.state? {
|
||||
DebugSolver::GoalEvaluation(wip_goal_evaluation) => {
|
||||
Some(wip_goal_evaluation.finalize())
|
||||
}
|
||||
root => unreachable!("unexpected proof tree builder root node: {:?}", root),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_maybe_root(generate_proof_tree: GenerateProofTree) -> ProofTreeBuilder<Infcx> {
|
||||
match generate_proof_tree {
|
||||
GenerateProofTree::No => ProofTreeBuilder::new_noop(),
|
||||
GenerateProofTree::Yes => ProofTreeBuilder::new_root(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_root() -> ProofTreeBuilder<Infcx> {
|
||||
ProofTreeBuilder::new(DebugSolver::Root)
|
||||
}
|
||||
|
||||
pub fn new_noop() -> ProofTreeBuilder<Infcx> {
|
||||
ProofTreeBuilder { state: None, _infcx: PhantomData }
|
||||
}
|
||||
|
||||
pub fn is_noop(&self) -> bool {
|
||||
self.state.is_none()
|
||||
}
|
||||
|
||||
pub(in crate::solve) fn new_goal_evaluation(
|
||||
&mut self,
|
||||
goal: Goal<I, I::Predicate>,
|
||||
orig_values: &[I::GenericArg],
|
||||
kind: GoalEvaluationKind,
|
||||
) -> ProofTreeBuilder<Infcx> {
|
||||
self.opt_nested(|| match kind {
|
||||
GoalEvaluationKind::Root => Some(WipGoalEvaluation {
|
||||
uncanonicalized_goal: goal,
|
||||
orig_values: orig_values.to_vec(),
|
||||
evaluation: None,
|
||||
}),
|
||||
GoalEvaluationKind::Nested => None,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn new_canonical_goal_evaluation(
|
||||
&mut self,
|
||||
goal: CanonicalInput<I>,
|
||||
) -> ProofTreeBuilder<Infcx> {
|
||||
self.nested(|| WipCanonicalGoalEvaluation {
|
||||
goal,
|
||||
kind: None,
|
||||
final_revision: None,
|
||||
result: None,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn finalize_canonical_goal_evaluation(
|
||||
&mut self,
|
||||
tcx: I,
|
||||
) -> Option<I::CanonicalGoalEvaluationStepRef> {
|
||||
self.as_mut().map(|this| match this {
|
||||
DebugSolver::CanonicalGoalEvaluation(evaluation) => {
|
||||
let final_revision = mem::take(&mut evaluation.final_revision).unwrap();
|
||||
let final_revision =
|
||||
tcx.intern_canonical_goal_evaluation_step(final_revision.finalize());
|
||||
let kind = WipCanonicalGoalEvaluationKind::Interned { final_revision };
|
||||
assert_eq!(evaluation.kind.replace(kind), None);
|
||||
final_revision
|
||||
}
|
||||
_ => unreachable!(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn canonical_goal_evaluation(
|
||||
&mut self,
|
||||
canonical_goal_evaluation: ProofTreeBuilder<Infcx>,
|
||||
) {
|
||||
if let Some(this) = self.as_mut() {
|
||||
match (this, *canonical_goal_evaluation.state.unwrap()) {
|
||||
(
|
||||
DebugSolver::GoalEvaluation(goal_evaluation),
|
||||
DebugSolver::CanonicalGoalEvaluation(canonical_goal_evaluation),
|
||||
) => {
|
||||
let prev = goal_evaluation.evaluation.replace(canonical_goal_evaluation);
|
||||
assert_eq!(prev, None);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn canonical_goal_evaluation_kind(&mut self, kind: WipCanonicalGoalEvaluationKind<I>) {
|
||||
if let Some(this) = self.as_mut() {
|
||||
match this {
|
||||
DebugSolver::CanonicalGoalEvaluation(canonical_goal_evaluation) => {
|
||||
assert_eq!(canonical_goal_evaluation.kind.replace(kind), None);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
pub fn goal_evaluation(&mut self, goal_evaluation: ProofTreeBuilder<Infcx>) {
|
||||
if let Some(this) = self.as_mut() {
|
||||
match this {
|
||||
DebugSolver::Root => *this = *goal_evaluation.state.unwrap(),
|
||||
DebugSolver::CanonicalGoalEvaluationStep(_) => {
|
||||
assert!(goal_evaluation.state.is_none())
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_goal_evaluation_step(
|
||||
&mut self,
|
||||
var_values: ty::CanonicalVarValues<I>,
|
||||
instantiated_goal: QueryInput<I, I::Predicate>,
|
||||
) -> ProofTreeBuilder<Infcx> {
|
||||
self.nested(|| WipCanonicalGoalEvaluationStep {
|
||||
var_values: var_values.var_values.to_vec(),
|
||||
instantiated_goal,
|
||||
evaluation: WipProbe {
|
||||
initial_num_var_values: var_values.len(),
|
||||
steps: vec![],
|
||||
kind: None,
|
||||
final_state: None,
|
||||
},
|
||||
probe_depth: 0,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn goal_evaluation_step(&mut self, goal_evaluation_step: ProofTreeBuilder<Infcx>) {
|
||||
if let Some(this) = self.as_mut() {
|
||||
match (this, *goal_evaluation_step.state.unwrap()) {
|
||||
(
|
||||
DebugSolver::CanonicalGoalEvaluation(canonical_goal_evaluations),
|
||||
DebugSolver::CanonicalGoalEvaluationStep(goal_evaluation_step),
|
||||
) => {
|
||||
canonical_goal_evaluations.final_revision = Some(goal_evaluation_step);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_var_value<T: Into<I::GenericArg>>(&mut self, arg: T) {
|
||||
match self.as_mut() {
|
||||
None => {}
|
||||
Some(DebugSolver::CanonicalGoalEvaluationStep(state)) => {
|
||||
state.var_values.push(arg.into());
|
||||
}
|
||||
Some(s) => panic!("tried to add var values to {s:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn enter_probe(&mut self) {
|
||||
match self.as_mut() {
|
||||
None => {}
|
||||
Some(DebugSolver::CanonicalGoalEvaluationStep(state)) => {
|
||||
let initial_num_var_values = state.var_values.len();
|
||||
state.current_evaluation_scope().steps.push(WipProbeStep::NestedProbe(WipProbe {
|
||||
initial_num_var_values,
|
||||
steps: vec![],
|
||||
kind: None,
|
||||
final_state: None,
|
||||
}));
|
||||
state.probe_depth += 1;
|
||||
}
|
||||
Some(s) => panic!("tried to start probe to {s:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn probe_kind(&mut self, probe_kind: inspect::ProbeKind<I>) {
|
||||
match self.as_mut() {
|
||||
None => {}
|
||||
Some(DebugSolver::CanonicalGoalEvaluationStep(state)) => {
|
||||
let prev = state.current_evaluation_scope().kind.replace(probe_kind);
|
||||
assert_eq!(prev, None);
|
||||
}
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn probe_final_state(&mut self, infcx: &Infcx, max_input_universe: ty::UniverseIndex) {
|
||||
match self.as_mut() {
|
||||
None => {}
|
||||
Some(DebugSolver::CanonicalGoalEvaluationStep(state)) => {
|
||||
let final_state = canonical::make_canonical_state(
|
||||
infcx,
|
||||
&state.var_values,
|
||||
max_input_universe,
|
||||
(),
|
||||
);
|
||||
let prev = state.current_evaluation_scope().final_state.replace(final_state);
|
||||
assert_eq!(prev, None);
|
||||
}
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_normalizes_to_goal(
|
||||
&mut self,
|
||||
infcx: &Infcx,
|
||||
max_input_universe: ty::UniverseIndex,
|
||||
goal: Goal<I, ty::NormalizesTo<I>>,
|
||||
) {
|
||||
self.add_goal(
|
||||
infcx,
|
||||
max_input_universe,
|
||||
GoalSource::Misc,
|
||||
goal.with(infcx.interner(), goal.predicate),
|
||||
);
|
||||
}
|
||||
|
||||
pub fn add_goal(
|
||||
&mut self,
|
||||
infcx: &Infcx,
|
||||
max_input_universe: ty::UniverseIndex,
|
||||
source: GoalSource,
|
||||
goal: Goal<I, I::Predicate>,
|
||||
) {
|
||||
match self.as_mut() {
|
||||
None => {}
|
||||
Some(DebugSolver::CanonicalGoalEvaluationStep(state)) => {
|
||||
let goal = canonical::make_canonical_state(
|
||||
infcx,
|
||||
&state.var_values,
|
||||
max_input_universe,
|
||||
goal,
|
||||
);
|
||||
state.current_evaluation_scope().steps.push(WipProbeStep::AddGoal(source, goal))
|
||||
}
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn record_impl_args(
|
||||
&mut self,
|
||||
infcx: &Infcx,
|
||||
max_input_universe: ty::UniverseIndex,
|
||||
impl_args: I::GenericArgs,
|
||||
) {
|
||||
match self.as_mut() {
|
||||
Some(DebugSolver::CanonicalGoalEvaluationStep(state)) => {
|
||||
let impl_args = canonical::make_canonical_state(
|
||||
infcx,
|
||||
&state.var_values,
|
||||
max_input_universe,
|
||||
impl_args,
|
||||
);
|
||||
state
|
||||
.current_evaluation_scope()
|
||||
.steps
|
||||
.push(WipProbeStep::RecordImplArgs { impl_args });
|
||||
}
|
||||
None => {}
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn make_canonical_response(&mut self, shallow_certainty: Certainty) {
|
||||
match self.as_mut() {
|
||||
Some(DebugSolver::CanonicalGoalEvaluationStep(state)) => {
|
||||
state
|
||||
.current_evaluation_scope()
|
||||
.steps
|
||||
.push(WipProbeStep::MakeCanonicalResponse { shallow_certainty });
|
||||
}
|
||||
None => {}
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn finish_probe(mut self) -> ProofTreeBuilder<Infcx> {
|
||||
match self.as_mut() {
|
||||
None => {}
|
||||
Some(DebugSolver::CanonicalGoalEvaluationStep(state)) => {
|
||||
assert_ne!(state.probe_depth, 0);
|
||||
let num_var_values = state.current_evaluation_scope().initial_num_var_values;
|
||||
state.var_values.truncate(num_var_values);
|
||||
state.probe_depth -= 1;
|
||||
}
|
||||
_ => panic!(),
|
||||
}
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
pub fn query_result(&mut self, result: QueryResult<I>) {
|
||||
if let Some(this) = self.as_mut() {
|
||||
match this {
|
||||
DebugSolver::CanonicalGoalEvaluation(canonical_goal_evaluation) => {
|
||||
assert_eq!(canonical_goal_evaluation.result.replace(result), None);
|
||||
}
|
||||
DebugSolver::CanonicalGoalEvaluationStep(evaluation_step) => {
|
||||
assert_eq!(
|
||||
evaluation_step
|
||||
.evaluation
|
||||
.kind
|
||||
.replace(inspect::ProbeKind::Root { result }),
|
||||
None
|
||||
);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
pub use rustc_type_ir::solve::inspect::*;
|
||||
|
||||
mod build;
|
||||
pub(in crate::solve) use build::*;
|
305
compiler/rustc_next_trait_solver/src/solve/mod.rs
Normal file
305
compiler/rustc_next_trait_solver/src/solve/mod.rs
Normal file
|
@ -0,0 +1,305 @@
|
|||
//! The next-generation trait solver, currently still WIP.
|
||||
//!
|
||||
//! As a user of rust, you can use `-Znext-solver` to enable the new trait solver.
|
||||
//!
|
||||
//! As a developer of rustc, you shouldn't be using the new trait
|
||||
//! solver without asking the trait-system-refactor-initiative, but it can
|
||||
//! be enabled with `InferCtxtBuilder::with_next_trait_solver`. This will
|
||||
//! ensure that trait solving using that inference context will be routed
|
||||
//! to the new trait solver.
|
||||
//!
|
||||
//! For a high-level overview of how this solver works, check out the relevant
|
||||
//! section of the rustc-dev-guide.
|
||||
//!
|
||||
//! FIXME(@lcnr): Write that section. If you read this before then ask me
|
||||
//! about it on zulip.
|
||||
|
||||
mod alias_relate;
|
||||
mod assembly;
|
||||
mod eval_ctxt;
|
||||
pub mod inspect;
|
||||
mod normalizes_to;
|
||||
mod project_goals;
|
||||
mod search_graph;
|
||||
mod trait_goals;
|
||||
|
||||
pub use self::eval_ctxt::{EvalCtxt, GenerateProofTree, SolverDelegateEvalExt};
|
||||
pub use rustc_type_ir::solve::*;
|
||||
|
||||
use rustc_type_ir::inherent::*;
|
||||
use rustc_type_ir::{self as ty, Interner};
|
||||
|
||||
use crate::infcx::SolverDelegate;
|
||||
|
||||
/// How many fixpoint iterations we should attempt inside of the solver before bailing
|
||||
/// with overflow.
|
||||
///
|
||||
/// We previously used `tcx.recursion_limit().0.checked_ilog2().unwrap_or(0)` for this.
|
||||
/// However, it feels unlikely that uncreasing the recursion limit by a power of two
|
||||
/// to get one more itereation is every useful or desirable. We now instead used a constant
|
||||
/// here. If there ever ends up some use-cases where a bigger number of fixpoint iterations
|
||||
/// is required, we can add a new attribute for that or revert this to be dependant on the
|
||||
/// recursion limit again. However, this feels very unlikely.
|
||||
const FIXPOINT_STEP_LIMIT: usize = 8;
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
enum GoalEvaluationKind {
|
||||
Root,
|
||||
Nested,
|
||||
}
|
||||
|
||||
fn has_no_inference_or_external_constraints<I: Interner>(
|
||||
response: ty::Canonical<I, Response<I>>,
|
||||
) -> bool {
|
||||
response.value.external_constraints.region_constraints.is_empty()
|
||||
&& response.value.var_values.is_identity()
|
||||
&& response.value.external_constraints.opaque_types.is_empty()
|
||||
}
|
||||
|
||||
impl<'a, Infcx, I> EvalCtxt<'a, Infcx>
|
||||
where
|
||||
Infcx: SolverDelegate<Interner = I>,
|
||||
I: Interner,
|
||||
{
|
||||
#[instrument(level = "trace", skip(self))]
|
||||
fn compute_type_outlives_goal(
|
||||
&mut self,
|
||||
goal: Goal<I, ty::OutlivesPredicate<I, I::Ty>>,
|
||||
) -> QueryResult<I> {
|
||||
let ty::OutlivesPredicate(ty, lt) = goal.predicate;
|
||||
self.register_ty_outlives(ty, lt);
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self))]
|
||||
fn compute_region_outlives_goal(
|
||||
&mut self,
|
||||
goal: Goal<I, ty::OutlivesPredicate<I, I::Region>>,
|
||||
) -> QueryResult<I> {
|
||||
let ty::OutlivesPredicate(a, b) = goal.predicate;
|
||||
self.register_region_outlives(a, b);
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self))]
|
||||
fn compute_coerce_goal(&mut self, goal: Goal<I, ty::CoercePredicate<I>>) -> QueryResult<I> {
|
||||
self.compute_subtype_goal(Goal {
|
||||
param_env: goal.param_env,
|
||||
predicate: ty::SubtypePredicate {
|
||||
a_is_expected: false,
|
||||
a: goal.predicate.a,
|
||||
b: goal.predicate.b,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self))]
|
||||
fn compute_subtype_goal(&mut self, goal: Goal<I, ty::SubtypePredicate<I>>) -> QueryResult<I> {
|
||||
if goal.predicate.a.is_ty_var() && goal.predicate.b.is_ty_var() {
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::AMBIGUOUS)
|
||||
} else {
|
||||
self.sub(goal.param_env, goal.predicate.a, goal.predicate.b)?;
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_object_safe_goal(&mut self, trait_def_id: I::DefId) -> QueryResult<I> {
|
||||
if self.interner().trait_is_object_safe(trait_def_id) {
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
} else {
|
||||
Err(NoSolution)
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self))]
|
||||
fn compute_well_formed_goal(&mut self, goal: Goal<I, I::GenericArg>) -> QueryResult<I> {
|
||||
match self.well_formed_goals(goal.param_env, goal.predicate) {
|
||||
Some(goals) => {
|
||||
self.add_goals(GoalSource::Misc, goals);
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
}
|
||||
None => self.evaluate_added_goals_and_make_canonical_response(Certainty::AMBIGUOUS),
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self))]
|
||||
fn compute_const_evaluatable_goal(
|
||||
&mut self,
|
||||
Goal { param_env, predicate: ct }: Goal<I, I::Const>,
|
||||
) -> QueryResult<I> {
|
||||
match ct.kind() {
|
||||
ty::ConstKind::Unevaluated(uv) => {
|
||||
// We never return `NoSolution` here as `try_const_eval_resolve` emits an
|
||||
// error itself when failing to evaluate, so emitting an additional fulfillment
|
||||
// error in that case is unnecessary noise. This may change in the future once
|
||||
// evaluation failures are allowed to impact selection, e.g. generic const
|
||||
// expressions in impl headers or `where`-clauses.
|
||||
|
||||
// FIXME(generic_const_exprs): Implement handling for generic
|
||||
// const expressions here.
|
||||
if let Some(_normalized) = self.try_const_eval_resolve(param_env, uv) {
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
} else {
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::AMBIGUOUS)
|
||||
}
|
||||
}
|
||||
ty::ConstKind::Infer(_) => {
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::AMBIGUOUS)
|
||||
}
|
||||
ty::ConstKind::Placeholder(_)
|
||||
| ty::ConstKind::Value(_, _)
|
||||
| ty::ConstKind::Error(_) => {
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
}
|
||||
// We can freely ICE here as:
|
||||
// - `Param` gets replaced with a placeholder during canonicalization
|
||||
// - `Bound` cannot exist as we don't have a binder around the self Type
|
||||
// - `Expr` is part of `feature(generic_const_exprs)` and is not implemented yet
|
||||
ty::ConstKind::Param(_) | ty::ConstKind::Bound(_, _) | ty::ConstKind::Expr(_) => {
|
||||
panic!("unexpect const kind: {:?}", ct)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), ret)]
|
||||
fn compute_const_arg_has_type_goal(
|
||||
&mut self,
|
||||
goal: Goal<I, (I::Const, I::Ty)>,
|
||||
) -> QueryResult<I> {
|
||||
let (ct, ty) = goal.predicate;
|
||||
|
||||
let ct_ty = match ct.kind() {
|
||||
// FIXME: Ignore effect vars because canonicalization doesn't handle them correctly
|
||||
// and if we stall on the var then we wind up creating ambiguity errors in a probe
|
||||
// for this goal which contains an effect var. Which then ends up ICEing.
|
||||
ty::ConstKind::Infer(ty::InferConst::EffectVar(_)) => {
|
||||
return self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes);
|
||||
}
|
||||
ty::ConstKind::Infer(_) => {
|
||||
return self.evaluate_added_goals_and_make_canonical_response(Certainty::AMBIGUOUS);
|
||||
}
|
||||
ty::ConstKind::Error(_) => {
|
||||
return self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes);
|
||||
}
|
||||
ty::ConstKind::Unevaluated(uv) => {
|
||||
self.interner().type_of(uv.def).instantiate(self.interner(), &uv.args)
|
||||
}
|
||||
ty::ConstKind::Expr(_) => unimplemented!(
|
||||
"`feature(generic_const_exprs)` is not supported in the new trait solver"
|
||||
),
|
||||
ty::ConstKind::Param(_) => {
|
||||
unreachable!("`ConstKind::Param` should have been canonicalized to `Placeholder`")
|
||||
}
|
||||
ty::ConstKind::Bound(_, _) => panic!("escaping bound vars in {:?}", ct),
|
||||
ty::ConstKind::Value(ty, _) => ty,
|
||||
ty::ConstKind::Placeholder(placeholder) => {
|
||||
self.interner().find_const_ty_from_env(goal.param_env, placeholder)
|
||||
}
|
||||
};
|
||||
|
||||
self.eq(goal.param_env, ct_ty, ty)?;
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Infcx, I> EvalCtxt<'_, Infcx>
|
||||
where
|
||||
Infcx: SolverDelegate<Interner = I>,
|
||||
I: Interner,
|
||||
{
|
||||
/// Try to merge multiple possible ways to prove a goal, if that is not possible returns `None`.
|
||||
///
|
||||
/// In this case we tend to flounder and return ambiguity by calling `[EvalCtxt::flounder]`.
|
||||
#[instrument(level = "trace", skip(self), ret)]
|
||||
fn try_merge_responses(
|
||||
&mut self,
|
||||
responses: &[CanonicalResponse<I>],
|
||||
) -> Option<CanonicalResponse<I>> {
|
||||
if responses.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
// FIXME(-Znext-solver): We should instead try to find a `Certainty::Yes` response with
|
||||
// a subset of the constraints that all the other responses have.
|
||||
let one = responses[0];
|
||||
if responses[1..].iter().all(|&resp| resp == one) {
|
||||
return Some(one);
|
||||
}
|
||||
|
||||
responses
|
||||
.iter()
|
||||
.find(|response| {
|
||||
response.value.certainty == Certainty::Yes
|
||||
&& has_no_inference_or_external_constraints(**response)
|
||||
})
|
||||
.copied()
|
||||
}
|
||||
|
||||
/// If we fail to merge responses we flounder and return overflow or ambiguity.
|
||||
#[instrument(level = "trace", skip(self), ret)]
|
||||
fn flounder(&mut self, responses: &[CanonicalResponse<I>]) -> QueryResult<I> {
|
||||
if responses.is_empty() {
|
||||
return Err(NoSolution);
|
||||
}
|
||||
|
||||
let Certainty::Maybe(maybe_cause) =
|
||||
responses.iter().fold(Certainty::AMBIGUOUS, |certainty, response| {
|
||||
certainty.unify_with(response.value.certainty)
|
||||
})
|
||||
else {
|
||||
panic!("expected flounder response to be ambiguous")
|
||||
};
|
||||
|
||||
Ok(self.make_ambiguous_response_no_constraints(maybe_cause))
|
||||
}
|
||||
|
||||
/// Normalize a type for when it is structurally matched on.
|
||||
///
|
||||
/// This function is necessary in nearly all cases before matching on a type.
|
||||
/// Not doing so is likely to be incomplete and therefore unsound during
|
||||
/// coherence.
|
||||
#[instrument(level = "trace", skip(self, param_env), ret)]
|
||||
fn structurally_normalize_ty(
|
||||
&mut self,
|
||||
param_env: I::ParamEnv,
|
||||
ty: I::Ty,
|
||||
) -> Result<I::Ty, NoSolution> {
|
||||
if let ty::Alias(..) = ty.kind() {
|
||||
let normalized_ty = self.next_ty_infer();
|
||||
let alias_relate_goal = Goal::new(
|
||||
self.interner(),
|
||||
param_env,
|
||||
ty::PredicateKind::AliasRelate(
|
||||
ty.into(),
|
||||
normalized_ty.into(),
|
||||
ty::AliasRelationDirection::Equate,
|
||||
),
|
||||
);
|
||||
self.add_goal(GoalSource::Misc, alias_relate_goal);
|
||||
self.try_evaluate_added_goals()?;
|
||||
Ok(self.resolve_vars_if_possible(normalized_ty))
|
||||
} else {
|
||||
Ok(ty)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn response_no_constraints_raw<I: Interner>(
|
||||
tcx: I,
|
||||
max_universe: ty::UniverseIndex,
|
||||
variables: I::CanonicalVars,
|
||||
certainty: Certainty,
|
||||
) -> CanonicalResponse<I> {
|
||||
ty::Canonical {
|
||||
max_universe,
|
||||
variables,
|
||||
value: Response {
|
||||
var_values: ty::CanonicalVarValues::make_identity(tcx, variables),
|
||||
// FIXME: maybe we should store the "no response" version in tcx, like
|
||||
// we do for tcx.types and stuff.
|
||||
external_constraints: tcx.mk_external_constraints(ExternalConstraintsData::default()),
|
||||
certainty,
|
||||
},
|
||||
defining_opaque_types: Default::default(),
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
use rustc_type_ir::{self as ty, Interner};
|
||||
|
||||
use crate::infcx::SolverDelegate;
|
||||
use crate::solve::{Certainty, EvalCtxt, Goal, QueryResult};
|
||||
|
||||
impl<Infcx, I> EvalCtxt<'_, Infcx>
|
||||
where
|
||||
Infcx: SolverDelegate<Interner = I>,
|
||||
I: Interner,
|
||||
{
|
||||
#[instrument(level = "trace", skip(self), ret)]
|
||||
pub(super) fn normalize_anon_const(
|
||||
&mut self,
|
||||
goal: Goal<I, ty::NormalizesTo<I>>,
|
||||
) -> QueryResult<I> {
|
||||
if let Some(normalized_const) = self.try_const_eval_resolve(
|
||||
goal.param_env,
|
||||
ty::UnevaluatedConst::new(goal.predicate.alias.def_id, goal.predicate.alias.args),
|
||||
) {
|
||||
self.instantiate_normalizes_to_term(goal, normalized_const.into());
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
} else {
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::AMBIGUOUS)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,55 @@
|
|||
//! Computes a normalizes-to (projection) goal for inherent associated types,
|
||||
//! `#![feature(inherent_associated_type)]`. Since HIR ty lowering already determines
|
||||
//! which impl the IAT is being projected from, we just:
|
||||
//! 1. instantiate generic parameters,
|
||||
//! 2. equate the self type, and
|
||||
//! 3. instantiate and register where clauses.
|
||||
|
||||
use rustc_type_ir::{self as ty, Interner};
|
||||
|
||||
use crate::infcx::SolverDelegate;
|
||||
use crate::solve::{Certainty, EvalCtxt, Goal, GoalSource, QueryResult};
|
||||
|
||||
impl<Infcx, I> EvalCtxt<'_, Infcx>
|
||||
where
|
||||
Infcx: SolverDelegate<Interner = I>,
|
||||
I: Interner,
|
||||
{
|
||||
pub(super) fn normalize_inherent_associated_type(
|
||||
&mut self,
|
||||
goal: Goal<I, ty::NormalizesTo<I>>,
|
||||
) -> QueryResult<I> {
|
||||
let tcx = self.interner();
|
||||
let inherent = goal.predicate.alias.expect_ty(tcx);
|
||||
|
||||
let impl_def_id = tcx.parent(inherent.def_id);
|
||||
let impl_args = self.fresh_args_for_item(impl_def_id);
|
||||
|
||||
// Equate impl header and add impl where clauses
|
||||
self.eq(
|
||||
goal.param_env,
|
||||
inherent.self_ty(),
|
||||
tcx.type_of(impl_def_id).instantiate(tcx, &impl_args),
|
||||
)?;
|
||||
|
||||
// Equate IAT with the RHS of the project goal
|
||||
let inherent_args = inherent.rebase_inherent_args_onto_impl(impl_args, tcx);
|
||||
|
||||
// Check both where clauses on the impl and IAT
|
||||
//
|
||||
// FIXME(-Znext-solver=coinductive): I think this should be split
|
||||
// and we tag the impl bounds with `GoalSource::ImplWhereBound`?
|
||||
// Right not this includes both the impl and the assoc item where bounds,
|
||||
// and I don't think the assoc item where-bounds are allowed to be coinductive.
|
||||
self.add_goals(
|
||||
GoalSource::Misc,
|
||||
tcx.predicates_of(inherent.def_id)
|
||||
.iter_instantiated(tcx, &inherent_args)
|
||||
.map(|pred| goal.with(tcx, pred)),
|
||||
);
|
||||
|
||||
let normalized = tcx.type_of(inherent.def_id).instantiate(tcx, &inherent_args);
|
||||
self.instantiate_normalizes_to_term(goal, normalized.into());
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
}
|
||||
}
|
919
compiler/rustc_next_trait_solver/src/solve/normalizes_to/mod.rs
Normal file
919
compiler/rustc_next_trait_solver/src/solve/normalizes_to/mod.rs
Normal file
|
@ -0,0 +1,919 @@
|
|||
mod anon_const;
|
||||
mod inherent;
|
||||
mod opaque_types;
|
||||
mod weak_types;
|
||||
|
||||
use rustc_type_ir::inherent::*;
|
||||
use rustc_type_ir::lang_items::TraitSolverLangItem;
|
||||
use rustc_type_ir::Upcast as _;
|
||||
use rustc_type_ir::{self as ty, Interner, NormalizesTo};
|
||||
|
||||
use crate::infcx::SolverDelegate;
|
||||
use crate::solve::assembly::structural_traits::{self, AsyncCallableRelevantTypes};
|
||||
use crate::solve::assembly::{self, Candidate};
|
||||
use crate::solve::inspect::ProbeKind;
|
||||
use crate::solve::{
|
||||
BuiltinImplSource, CandidateSource, Certainty, EvalCtxt, Goal, GoalSource, MaybeCause,
|
||||
NoSolution, QueryResult,
|
||||
};
|
||||
|
||||
impl<Infcx, I> EvalCtxt<'_, Infcx>
|
||||
where
|
||||
Infcx: SolverDelegate<Interner = I>,
|
||||
I: Interner,
|
||||
{
|
||||
#[instrument(level = "trace", skip(self), ret)]
|
||||
pub(super) fn compute_normalizes_to_goal(
|
||||
&mut self,
|
||||
goal: Goal<I, NormalizesTo<I>>,
|
||||
) -> QueryResult<I> {
|
||||
self.set_is_normalizes_to_goal();
|
||||
debug_assert!(self.term_is_fully_unconstrained(goal));
|
||||
let normalize_result = self
|
||||
.probe(|&result| ProbeKind::TryNormalizeNonRigid { result })
|
||||
.enter(|this| this.normalize_at_least_one_step(goal));
|
||||
|
||||
match normalize_result {
|
||||
Ok(res) => Ok(res),
|
||||
Err(NoSolution) => {
|
||||
let Goal { param_env, predicate: NormalizesTo { alias, term } } = goal;
|
||||
self.relate_rigid_alias_non_alias(param_env, alias, ty::Invariant, term)?;
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Normalize the given alias by at least one step. If the alias is rigid, this
|
||||
/// returns `NoSolution`.
|
||||
#[instrument(level = "trace", skip(self), ret)]
|
||||
fn normalize_at_least_one_step(&mut self, goal: Goal<I, NormalizesTo<I>>) -> QueryResult<I> {
|
||||
match goal.predicate.alias.kind(self.interner()) {
|
||||
ty::AliasTermKind::ProjectionTy | ty::AliasTermKind::ProjectionConst => {
|
||||
let candidates = self.assemble_and_evaluate_candidates(goal);
|
||||
self.merge_candidates(candidates)
|
||||
}
|
||||
ty::AliasTermKind::InherentTy => self.normalize_inherent_associated_type(goal),
|
||||
ty::AliasTermKind::OpaqueTy => self.normalize_opaque_type(goal),
|
||||
ty::AliasTermKind::WeakTy => self.normalize_weak_type(goal),
|
||||
ty::AliasTermKind::UnevaluatedConst => self.normalize_anon_const(goal),
|
||||
}
|
||||
}
|
||||
|
||||
/// When normalizing an associated item, constrain the expected term to `term`.
|
||||
///
|
||||
/// We know `term` to always be a fully unconstrained inference variable, so
|
||||
/// `eq` should never fail here. However, in case `term` contains aliases, we
|
||||
/// emit nested `AliasRelate` goals to structurally normalize the alias.
|
||||
pub fn instantiate_normalizes_to_term(
|
||||
&mut self,
|
||||
goal: Goal<I, NormalizesTo<I>>,
|
||||
term: I::Term,
|
||||
) {
|
||||
self.eq(goal.param_env, goal.predicate.term, term)
|
||||
.expect("expected goal term to be fully unconstrained");
|
||||
}
|
||||
}
|
||||
|
||||
impl<Infcx, I> assembly::GoalKind<Infcx> for NormalizesTo<I>
|
||||
where
|
||||
Infcx: SolverDelegate<Interner = I>,
|
||||
I: Interner,
|
||||
{
|
||||
fn self_ty(self) -> I::Ty {
|
||||
self.self_ty()
|
||||
}
|
||||
|
||||
fn trait_ref(self, tcx: I) -> ty::TraitRef<I> {
|
||||
self.alias.trait_ref(tcx)
|
||||
}
|
||||
|
||||
fn with_self_ty(self, tcx: I, self_ty: I::Ty) -> Self {
|
||||
self.with_self_ty(tcx, self_ty)
|
||||
}
|
||||
|
||||
fn trait_def_id(self, tcx: I) -> I::DefId {
|
||||
self.trait_def_id(tcx)
|
||||
}
|
||||
|
||||
fn probe_and_match_goal_against_assumption(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
source: CandidateSource<I>,
|
||||
goal: Goal<I, Self>,
|
||||
assumption: I::Clause,
|
||||
then: impl FnOnce(&mut EvalCtxt<'_, Infcx>) -> QueryResult<I>,
|
||||
) -> Result<Candidate<I>, NoSolution> {
|
||||
if let Some(projection_pred) = assumption.as_projection_clause() {
|
||||
if projection_pred.projection_def_id() == goal.predicate.def_id() {
|
||||
let tcx = ecx.interner();
|
||||
ecx.probe_trait_candidate(source).enter(|ecx| {
|
||||
let assumption_projection_pred =
|
||||
ecx.instantiate_binder_with_infer(projection_pred);
|
||||
ecx.eq(
|
||||
goal.param_env,
|
||||
goal.predicate.alias,
|
||||
assumption_projection_pred.projection_term,
|
||||
)?;
|
||||
|
||||
ecx.instantiate_normalizes_to_term(goal, assumption_projection_pred.term);
|
||||
|
||||
// Add GAT where clauses from the trait's definition
|
||||
ecx.add_goals(
|
||||
GoalSource::Misc,
|
||||
tcx.own_predicates_of(goal.predicate.def_id())
|
||||
.iter_instantiated(tcx, &goal.predicate.alias.args)
|
||||
.map(|pred| goal.with(tcx, pred)),
|
||||
);
|
||||
|
||||
then(ecx)
|
||||
})
|
||||
} else {
|
||||
Err(NoSolution)
|
||||
}
|
||||
} else {
|
||||
Err(NoSolution)
|
||||
}
|
||||
}
|
||||
|
||||
fn consider_impl_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, NormalizesTo<I>>,
|
||||
impl_def_id: I::DefId,
|
||||
) -> Result<Candidate<I>, NoSolution> {
|
||||
let tcx = ecx.interner();
|
||||
|
||||
let goal_trait_ref = goal.predicate.alias.trait_ref(tcx);
|
||||
let impl_trait_ref = tcx.impl_trait_ref(impl_def_id);
|
||||
if !ecx.interner().args_may_unify_deep(
|
||||
goal.predicate.alias.trait_ref(tcx).args,
|
||||
impl_trait_ref.skip_binder().args,
|
||||
) {
|
||||
return Err(NoSolution);
|
||||
}
|
||||
|
||||
// We have to ignore negative impls when projecting.
|
||||
let impl_polarity = tcx.impl_polarity(impl_def_id);
|
||||
match impl_polarity {
|
||||
ty::ImplPolarity::Negative => return Err(NoSolution),
|
||||
ty::ImplPolarity::Reservation => {
|
||||
unimplemented!("reservation impl for trait with assoc item: {:?}", goal)
|
||||
}
|
||||
ty::ImplPolarity::Positive => {}
|
||||
};
|
||||
|
||||
ecx.probe_trait_candidate(CandidateSource::Impl(impl_def_id)).enter(|ecx| {
|
||||
let impl_args = ecx.fresh_args_for_item(impl_def_id);
|
||||
let impl_trait_ref = impl_trait_ref.instantiate(tcx, &impl_args);
|
||||
|
||||
ecx.eq(goal.param_env, goal_trait_ref, impl_trait_ref)?;
|
||||
|
||||
let where_clause_bounds = tcx
|
||||
.predicates_of(impl_def_id)
|
||||
.iter_instantiated(tcx, &impl_args)
|
||||
.map(|pred| goal.with(tcx, pred));
|
||||
ecx.add_goals(GoalSource::ImplWhereBound, where_clause_bounds);
|
||||
|
||||
// Add GAT where clauses from the trait's definition
|
||||
ecx.add_goals(
|
||||
GoalSource::Misc,
|
||||
tcx.own_predicates_of(goal.predicate.def_id())
|
||||
.iter_instantiated(tcx, &goal.predicate.alias.args)
|
||||
.map(|pred| goal.with(tcx, pred)),
|
||||
);
|
||||
|
||||
// In case the associated item is hidden due to specialization, we have to
|
||||
// return ambiguity this would otherwise be incomplete, resulting in
|
||||
// unsoundness during coherence (#105782).
|
||||
let Some(target_item_def_id) = ecx.fetch_eligible_assoc_item(
|
||||
goal.param_env,
|
||||
goal_trait_ref,
|
||||
goal.predicate.def_id(),
|
||||
impl_def_id,
|
||||
)?
|
||||
else {
|
||||
return ecx.evaluate_added_goals_and_make_canonical_response(Certainty::AMBIGUOUS);
|
||||
};
|
||||
|
||||
let error_response = |ecx: &mut EvalCtxt<'_, Infcx>, msg: &str| {
|
||||
let guar = tcx.delay_bug(msg);
|
||||
let error_term = match goal.predicate.alias.kind(tcx) {
|
||||
ty::AliasTermKind::ProjectionTy => Ty::new_error(tcx, guar).into(),
|
||||
ty::AliasTermKind::ProjectionConst => Const::new_error(tcx, guar).into(),
|
||||
kind => panic!("expected projection, found {kind:?}"),
|
||||
};
|
||||
ecx.instantiate_normalizes_to_term(goal, error_term);
|
||||
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
};
|
||||
|
||||
if !tcx.has_item_definition(target_item_def_id) {
|
||||
return error_response(ecx, "missing item");
|
||||
}
|
||||
|
||||
let target_container_def_id = tcx.parent(target_item_def_id);
|
||||
|
||||
// Getting the right args here is complex, e.g. given:
|
||||
// - a goal `<Vec<u32> as Trait<i32>>::Assoc<u64>`
|
||||
// - the applicable impl `impl<T> Trait<i32> for Vec<T>`
|
||||
// - and the impl which defines `Assoc` being `impl<T, U> Trait<U> for Vec<T>`
|
||||
//
|
||||
// We first rebase the goal args onto the impl, going from `[Vec<u32>, i32, u64]`
|
||||
// to `[u32, u64]`.
|
||||
//
|
||||
// And then map these args to the args of the defining impl of `Assoc`, going
|
||||
// from `[u32, u64]` to `[u32, i32, u64]`.
|
||||
let target_args = ecx.translate_args(
|
||||
goal,
|
||||
impl_def_id,
|
||||
impl_args,
|
||||
impl_trait_ref,
|
||||
target_container_def_id,
|
||||
)?;
|
||||
|
||||
if !tcx.check_args_compatible(target_item_def_id, target_args) {
|
||||
return error_response(ecx, "associated item has mismatched arguments");
|
||||
}
|
||||
|
||||
// Finally we construct the actual value of the associated type.
|
||||
let term = match goal.predicate.alias.kind(tcx) {
|
||||
ty::AliasTermKind::ProjectionTy => {
|
||||
tcx.type_of(target_item_def_id).map_bound(|ty| ty.into())
|
||||
}
|
||||
ty::AliasTermKind::ProjectionConst => {
|
||||
if tcx.features().associated_const_equality() {
|
||||
panic!("associated const projection is not supported yet")
|
||||
} else {
|
||||
ty::EarlyBinder::bind(
|
||||
Const::new_error_with_message(
|
||||
tcx,
|
||||
"associated const projection is not supported yet",
|
||||
)
|
||||
.into(),
|
||||
)
|
||||
}
|
||||
}
|
||||
kind => panic!("expected projection, found {kind:?}"),
|
||||
};
|
||||
|
||||
ecx.instantiate_normalizes_to_term(goal, term.instantiate(tcx, &target_args));
|
||||
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
})
|
||||
}
|
||||
|
||||
/// Fail to normalize if the predicate contains an error, alternatively, we could normalize to `ty::Error`
|
||||
/// and succeed. Can experiment with this to figure out what results in better error messages.
|
||||
fn consider_error_guaranteed_candidate(
|
||||
_ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
_guar: I::ErrorGuaranteed,
|
||||
) -> Result<Candidate<I>, NoSolution> {
|
||||
Err(NoSolution)
|
||||
}
|
||||
|
||||
fn consider_auto_trait_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
_goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution> {
|
||||
ecx.interner().delay_bug("associated types not allowed on auto traits");
|
||||
Err(NoSolution)
|
||||
}
|
||||
|
||||
fn consider_trait_alias_candidate(
|
||||
_ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution> {
|
||||
panic!("trait aliases do not have associated types: {:?}", goal);
|
||||
}
|
||||
|
||||
fn consider_builtin_sized_candidate(
|
||||
_ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution> {
|
||||
panic!("`Sized` does not have an associated type: {:?}", goal);
|
||||
}
|
||||
|
||||
fn consider_builtin_copy_clone_candidate(
|
||||
_ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution> {
|
||||
panic!("`Copy`/`Clone` does not have an associated type: {:?}", goal);
|
||||
}
|
||||
|
||||
fn consider_builtin_pointer_like_candidate(
|
||||
_ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution> {
|
||||
panic!("`PointerLike` does not have an associated type: {:?}", goal);
|
||||
}
|
||||
|
||||
fn consider_builtin_fn_ptr_trait_candidate(
|
||||
_ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution> {
|
||||
panic!("`FnPtr` does not have an associated type: {:?}", goal);
|
||||
}
|
||||
|
||||
fn consider_builtin_fn_trait_candidates(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
goal_kind: ty::ClosureKind,
|
||||
) -> Result<Candidate<I>, NoSolution> {
|
||||
let tcx = ecx.interner();
|
||||
let tupled_inputs_and_output =
|
||||
match structural_traits::extract_tupled_inputs_and_output_from_callable(
|
||||
tcx,
|
||||
goal.predicate.self_ty(),
|
||||
goal_kind,
|
||||
)? {
|
||||
Some(tupled_inputs_and_output) => tupled_inputs_and_output,
|
||||
None => {
|
||||
return ecx.forced_ambiguity(MaybeCause::Ambiguity);
|
||||
}
|
||||
};
|
||||
let output_is_sized_pred = tupled_inputs_and_output.map_bound(|(_, output)| {
|
||||
ty::TraitRef::new(tcx, tcx.require_lang_item(TraitSolverLangItem::Sized), [output])
|
||||
});
|
||||
|
||||
let pred = tupled_inputs_and_output
|
||||
.map_bound(|(inputs, output)| ty::ProjectionPredicate {
|
||||
projection_term: ty::AliasTerm::new(
|
||||
tcx,
|
||||
goal.predicate.def_id(),
|
||||
[goal.predicate.self_ty(), inputs],
|
||||
),
|
||||
term: output.into(),
|
||||
})
|
||||
.upcast(tcx);
|
||||
|
||||
// A built-in `Fn` impl only holds if the output is sized.
|
||||
// (FIXME: technically we only need to check this if the type is a fn ptr...)
|
||||
Self::probe_and_consider_implied_clause(
|
||||
ecx,
|
||||
CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
|
||||
goal,
|
||||
pred,
|
||||
[(GoalSource::ImplWhereBound, goal.with(tcx, output_is_sized_pred))],
|
||||
)
|
||||
}
|
||||
|
||||
fn consider_builtin_async_fn_trait_candidates(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
goal_kind: ty::ClosureKind,
|
||||
) -> Result<Candidate<I>, NoSolution> {
|
||||
let tcx = ecx.interner();
|
||||
|
||||
let env_region = match goal_kind {
|
||||
ty::ClosureKind::Fn | ty::ClosureKind::FnMut => goal.predicate.alias.args.region_at(2),
|
||||
// Doesn't matter what this region is
|
||||
ty::ClosureKind::FnOnce => Region::new_static(tcx),
|
||||
};
|
||||
let (tupled_inputs_and_output_and_coroutine, nested_preds) =
|
||||
structural_traits::extract_tupled_inputs_and_output_from_async_callable(
|
||||
tcx,
|
||||
goal.predicate.self_ty(),
|
||||
goal_kind,
|
||||
env_region,
|
||||
)?;
|
||||
let output_is_sized_pred = tupled_inputs_and_output_and_coroutine.map_bound(
|
||||
|AsyncCallableRelevantTypes { output_coroutine_ty: output_ty, .. }| {
|
||||
ty::TraitRef::new(
|
||||
tcx,
|
||||
tcx.require_lang_item(TraitSolverLangItem::Sized),
|
||||
[output_ty],
|
||||
)
|
||||
},
|
||||
);
|
||||
|
||||
let pred = tupled_inputs_and_output_and_coroutine
|
||||
.map_bound(
|
||||
|AsyncCallableRelevantTypes {
|
||||
tupled_inputs_ty,
|
||||
output_coroutine_ty,
|
||||
coroutine_return_ty,
|
||||
}| {
|
||||
let (projection_term, term) = if tcx
|
||||
.is_lang_item(goal.predicate.def_id(), TraitSolverLangItem::CallOnceFuture)
|
||||
{
|
||||
(
|
||||
ty::AliasTerm::new(
|
||||
tcx,
|
||||
goal.predicate.def_id(),
|
||||
[goal.predicate.self_ty(), tupled_inputs_ty],
|
||||
),
|
||||
output_coroutine_ty.into(),
|
||||
)
|
||||
} else if tcx
|
||||
.is_lang_item(goal.predicate.def_id(), TraitSolverLangItem::CallRefFuture)
|
||||
{
|
||||
(
|
||||
ty::AliasTerm::new(
|
||||
tcx,
|
||||
goal.predicate.def_id(),
|
||||
[
|
||||
I::GenericArg::from(goal.predicate.self_ty()),
|
||||
tupled_inputs_ty.into(),
|
||||
env_region.into(),
|
||||
],
|
||||
),
|
||||
output_coroutine_ty.into(),
|
||||
)
|
||||
} else if tcx.is_lang_item(
|
||||
goal.predicate.def_id(),
|
||||
TraitSolverLangItem::AsyncFnOnceOutput,
|
||||
) {
|
||||
(
|
||||
ty::AliasTerm::new(
|
||||
tcx,
|
||||
goal.predicate.def_id(),
|
||||
[
|
||||
I::GenericArg::from(goal.predicate.self_ty()),
|
||||
tupled_inputs_ty.into(),
|
||||
],
|
||||
),
|
||||
coroutine_return_ty.into(),
|
||||
)
|
||||
} else {
|
||||
panic!(
|
||||
"no such associated type in `AsyncFn*`: {:?}",
|
||||
goal.predicate.def_id()
|
||||
)
|
||||
};
|
||||
ty::ProjectionPredicate { projection_term, term }
|
||||
},
|
||||
)
|
||||
.upcast(tcx);
|
||||
|
||||
// A built-in `AsyncFn` impl only holds if the output is sized.
|
||||
// (FIXME: technically we only need to check this if the type is a fn ptr...)
|
||||
Self::probe_and_consider_implied_clause(
|
||||
ecx,
|
||||
CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
|
||||
goal,
|
||||
pred,
|
||||
[goal.with(tcx, output_is_sized_pred)]
|
||||
.into_iter()
|
||||
.chain(nested_preds.into_iter().map(|pred| goal.with(tcx, pred)))
|
||||
.map(|goal| (GoalSource::ImplWhereBound, goal)),
|
||||
)
|
||||
}
|
||||
|
||||
fn consider_builtin_async_fn_kind_helper_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution> {
|
||||
let [
|
||||
closure_fn_kind_ty,
|
||||
goal_kind_ty,
|
||||
borrow_region,
|
||||
tupled_inputs_ty,
|
||||
tupled_upvars_ty,
|
||||
coroutine_captures_by_ref_ty,
|
||||
] = **goal.predicate.alias.args
|
||||
else {
|
||||
panic!();
|
||||
};
|
||||
|
||||
// Bail if the upvars haven't been constrained.
|
||||
if tupled_upvars_ty.expect_ty().is_ty_var() {
|
||||
return ecx.forced_ambiguity(MaybeCause::Ambiguity);
|
||||
}
|
||||
|
||||
let Some(closure_kind) = closure_fn_kind_ty.expect_ty().to_opt_closure_kind() else {
|
||||
// We don't need to worry about the self type being an infer var.
|
||||
return Err(NoSolution);
|
||||
};
|
||||
let Some(goal_kind) = goal_kind_ty.expect_ty().to_opt_closure_kind() else {
|
||||
return Err(NoSolution);
|
||||
};
|
||||
if !closure_kind.extends(goal_kind) {
|
||||
return Err(NoSolution);
|
||||
}
|
||||
|
||||
let upvars_ty = ty::CoroutineClosureSignature::tupled_upvars_by_closure_kind(
|
||||
ecx.interner(),
|
||||
goal_kind,
|
||||
tupled_inputs_ty.expect_ty(),
|
||||
tupled_upvars_ty.expect_ty(),
|
||||
coroutine_captures_by_ref_ty.expect_ty(),
|
||||
borrow_region.expect_region(),
|
||||
);
|
||||
|
||||
ecx.probe_builtin_trait_candidate(BuiltinImplSource::Misc).enter(|ecx| {
|
||||
ecx.instantiate_normalizes_to_term(goal, upvars_ty.into());
|
||||
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
})
|
||||
}
|
||||
|
||||
fn consider_builtin_tuple_candidate(
|
||||
_ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution> {
|
||||
panic!("`Tuple` does not have an associated type: {:?}", goal);
|
||||
}
|
||||
|
||||
fn consider_builtin_pointee_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution> {
|
||||
let tcx = ecx.interner();
|
||||
let metadata_def_id = tcx.require_lang_item(TraitSolverLangItem::Metadata);
|
||||
assert_eq!(metadata_def_id, goal.predicate.def_id());
|
||||
ecx.probe_builtin_trait_candidate(BuiltinImplSource::Misc).enter(|ecx| {
|
||||
let metadata_ty = match goal.predicate.self_ty().kind() {
|
||||
ty::Bool
|
||||
| ty::Char
|
||||
| ty::Int(..)
|
||||
| ty::Uint(..)
|
||||
| ty::Float(..)
|
||||
| ty::Array(..)
|
||||
| ty::Pat(..)
|
||||
| ty::RawPtr(..)
|
||||
| ty::Ref(..)
|
||||
| ty::FnDef(..)
|
||||
| ty::FnPtr(..)
|
||||
| ty::Closure(..)
|
||||
| ty::CoroutineClosure(..)
|
||||
| ty::Infer(ty::IntVar(..) | ty::FloatVar(..))
|
||||
| ty::Coroutine(..)
|
||||
| ty::CoroutineWitness(..)
|
||||
| ty::Never
|
||||
| ty::Foreign(..)
|
||||
| ty::Dynamic(_, _, ty::DynStar) => Ty::new_unit(tcx),
|
||||
|
||||
ty::Error(e) => Ty::new_error(tcx, e),
|
||||
|
||||
ty::Str | ty::Slice(_) => Ty::new_usize(tcx),
|
||||
|
||||
ty::Dynamic(_, _, ty::Dyn) => {
|
||||
let dyn_metadata = tcx.require_lang_item(TraitSolverLangItem::DynMetadata);
|
||||
tcx.type_of(dyn_metadata)
|
||||
.instantiate(tcx, &[I::GenericArg::from(goal.predicate.self_ty())])
|
||||
}
|
||||
|
||||
ty::Alias(_, _) | ty::Param(_) | ty::Placeholder(..) => {
|
||||
// This is the "fallback impl" for type parameters, unnormalizable projections
|
||||
// and opaque types: If the `self_ty` is `Sized`, then the metadata is `()`.
|
||||
// FIXME(ptr_metadata): This impl overlaps with the other impls and shouldn't
|
||||
// exist. Instead, `Pointee<Metadata = ()>` should be a supertrait of `Sized`.
|
||||
let sized_predicate = ty::TraitRef::new(
|
||||
tcx,
|
||||
tcx.require_lang_item(TraitSolverLangItem::Sized),
|
||||
[I::GenericArg::from(goal.predicate.self_ty())],
|
||||
);
|
||||
// FIXME(-Znext-solver=coinductive): Should this be `GoalSource::ImplWhereBound`?
|
||||
ecx.add_goal(GoalSource::Misc, goal.with(tcx, sized_predicate));
|
||||
Ty::new_unit(tcx)
|
||||
}
|
||||
|
||||
ty::Adt(def, args) if def.is_struct() => match def.struct_tail_ty(tcx) {
|
||||
None => Ty::new_unit(tcx),
|
||||
Some(tail_ty) => {
|
||||
Ty::new_projection(tcx, metadata_def_id, [tail_ty.instantiate(tcx, &args)])
|
||||
}
|
||||
},
|
||||
ty::Adt(_, _) => Ty::new_unit(tcx),
|
||||
|
||||
ty::Tuple(elements) => match elements.last() {
|
||||
None => Ty::new_unit(tcx),
|
||||
Some(&tail_ty) => Ty::new_projection(tcx, metadata_def_id, [tail_ty]),
|
||||
},
|
||||
|
||||
ty::Infer(
|
||||
ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_),
|
||||
)
|
||||
| ty::Bound(..) => panic!(
|
||||
"unexpected self ty `{:?}` when normalizing `<T as Pointee>::Metadata`",
|
||||
goal.predicate.self_ty()
|
||||
),
|
||||
};
|
||||
|
||||
ecx.instantiate_normalizes_to_term(goal, metadata_ty.into());
|
||||
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
})
|
||||
}
|
||||
|
||||
fn consider_builtin_future_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution> {
|
||||
let self_ty = goal.predicate.self_ty();
|
||||
let ty::Coroutine(def_id, args) = self_ty.kind() else {
|
||||
return Err(NoSolution);
|
||||
};
|
||||
|
||||
// Coroutines are not futures unless they come from `async` desugaring
|
||||
let tcx = ecx.interner();
|
||||
if !tcx.coroutine_is_async(def_id) {
|
||||
return Err(NoSolution);
|
||||
}
|
||||
|
||||
let term = args.as_coroutine().return_ty().into();
|
||||
|
||||
Self::probe_and_consider_implied_clause(
|
||||
ecx,
|
||||
CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
|
||||
goal,
|
||||
ty::ProjectionPredicate {
|
||||
projection_term: ty::AliasTerm::new(
|
||||
ecx.interner(),
|
||||
goal.predicate.def_id(),
|
||||
[self_ty],
|
||||
),
|
||||
term,
|
||||
}
|
||||
.upcast(tcx),
|
||||
// Technically, we need to check that the future type is Sized,
|
||||
// but that's already proven by the coroutine being WF.
|
||||
[],
|
||||
)
|
||||
}
|
||||
|
||||
fn consider_builtin_iterator_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution> {
|
||||
let self_ty = goal.predicate.self_ty();
|
||||
let ty::Coroutine(def_id, args) = self_ty.kind() else {
|
||||
return Err(NoSolution);
|
||||
};
|
||||
|
||||
// Coroutines are not Iterators unless they come from `gen` desugaring
|
||||
let tcx = ecx.interner();
|
||||
if !tcx.coroutine_is_gen(def_id) {
|
||||
return Err(NoSolution);
|
||||
}
|
||||
|
||||
let term = args.as_coroutine().yield_ty().into();
|
||||
|
||||
Self::probe_and_consider_implied_clause(
|
||||
ecx,
|
||||
CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
|
||||
goal,
|
||||
ty::ProjectionPredicate {
|
||||
projection_term: ty::AliasTerm::new(
|
||||
ecx.interner(),
|
||||
goal.predicate.def_id(),
|
||||
[self_ty],
|
||||
),
|
||||
term,
|
||||
}
|
||||
.upcast(tcx),
|
||||
// Technically, we need to check that the iterator type is Sized,
|
||||
// but that's already proven by the generator being WF.
|
||||
[],
|
||||
)
|
||||
}
|
||||
|
||||
fn consider_builtin_fused_iterator_candidate(
|
||||
_ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution> {
|
||||
panic!("`FusedIterator` does not have an associated type: {:?}", goal);
|
||||
}
|
||||
|
||||
fn consider_builtin_async_iterator_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution> {
|
||||
let self_ty = goal.predicate.self_ty();
|
||||
let ty::Coroutine(def_id, args) = self_ty.kind() else {
|
||||
return Err(NoSolution);
|
||||
};
|
||||
|
||||
// Coroutines are not AsyncIterators unless they come from `gen` desugaring
|
||||
let tcx = ecx.interner();
|
||||
if !tcx.coroutine_is_async_gen(def_id) {
|
||||
return Err(NoSolution);
|
||||
}
|
||||
|
||||
ecx.probe_builtin_trait_candidate(BuiltinImplSource::Misc).enter(|ecx| {
|
||||
let expected_ty = ecx.next_ty_infer();
|
||||
// Take `AsyncIterator<Item = I>` and turn it into the corresponding
|
||||
// coroutine yield ty `Poll<Option<I>>`.
|
||||
let wrapped_expected_ty = Ty::new_adt(
|
||||
tcx,
|
||||
tcx.adt_def(tcx.require_lang_item(TraitSolverLangItem::Poll)),
|
||||
tcx.mk_args(&[Ty::new_adt(
|
||||
tcx,
|
||||
tcx.adt_def(tcx.require_lang_item(TraitSolverLangItem::Option)),
|
||||
tcx.mk_args(&[expected_ty.into()]),
|
||||
)
|
||||
.into()]),
|
||||
);
|
||||
let yield_ty = args.as_coroutine().yield_ty();
|
||||
ecx.eq(goal.param_env, wrapped_expected_ty, yield_ty)?;
|
||||
ecx.instantiate_normalizes_to_term(goal, expected_ty.into());
|
||||
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
})
|
||||
}
|
||||
|
||||
fn consider_builtin_coroutine_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution> {
|
||||
let self_ty = goal.predicate.self_ty();
|
||||
let ty::Coroutine(def_id, args) = self_ty.kind() else {
|
||||
return Err(NoSolution);
|
||||
};
|
||||
|
||||
// `async`-desugared coroutines do not implement the coroutine trait
|
||||
let tcx = ecx.interner();
|
||||
if !tcx.is_general_coroutine(def_id) {
|
||||
return Err(NoSolution);
|
||||
}
|
||||
|
||||
let coroutine = args.as_coroutine();
|
||||
|
||||
let term = if tcx
|
||||
.is_lang_item(goal.predicate.def_id(), TraitSolverLangItem::CoroutineReturn)
|
||||
{
|
||||
coroutine.return_ty().into()
|
||||
} else if tcx.is_lang_item(goal.predicate.def_id(), TraitSolverLangItem::CoroutineYield) {
|
||||
coroutine.yield_ty().into()
|
||||
} else {
|
||||
panic!("unexpected associated item `{:?}` for `{self_ty:?}`", goal.predicate.def_id())
|
||||
};
|
||||
|
||||
Self::probe_and_consider_implied_clause(
|
||||
ecx,
|
||||
CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
|
||||
goal,
|
||||
ty::ProjectionPredicate {
|
||||
projection_term: ty::AliasTerm::new(
|
||||
ecx.interner(),
|
||||
goal.predicate.def_id(),
|
||||
[self_ty, coroutine.resume_ty()],
|
||||
),
|
||||
term,
|
||||
}
|
||||
.upcast(tcx),
|
||||
// Technically, we need to check that the coroutine type is Sized,
|
||||
// but that's already proven by the coroutine being WF.
|
||||
[],
|
||||
)
|
||||
}
|
||||
|
||||
fn consider_structural_builtin_unsize_candidates(
|
||||
_ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Vec<Candidate<I>> {
|
||||
panic!("`Unsize` does not have an associated type: {:?}", goal);
|
||||
}
|
||||
|
||||
fn consider_builtin_discriminant_kind_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution> {
|
||||
let self_ty = goal.predicate.self_ty();
|
||||
let discriminant_ty = match self_ty.kind() {
|
||||
ty::Bool
|
||||
| ty::Char
|
||||
| ty::Int(..)
|
||||
| ty::Uint(..)
|
||||
| ty::Float(..)
|
||||
| ty::Array(..)
|
||||
| ty::Pat(..)
|
||||
| ty::RawPtr(..)
|
||||
| ty::Ref(..)
|
||||
| ty::FnDef(..)
|
||||
| ty::FnPtr(..)
|
||||
| ty::Closure(..)
|
||||
| ty::CoroutineClosure(..)
|
||||
| ty::Infer(ty::IntVar(..) | ty::FloatVar(..))
|
||||
| ty::Coroutine(..)
|
||||
| ty::CoroutineWitness(..)
|
||||
| ty::Never
|
||||
| ty::Foreign(..)
|
||||
| ty::Adt(_, _)
|
||||
| ty::Str
|
||||
| ty::Slice(_)
|
||||
| ty::Dynamic(_, _, _)
|
||||
| ty::Tuple(_)
|
||||
| ty::Error(_) => self_ty.discriminant_ty(ecx.interner()),
|
||||
|
||||
// We do not call `Ty::discriminant_ty` on alias, param, or placeholder
|
||||
// types, which return `<self_ty as DiscriminantKind>::Discriminant`
|
||||
// (or ICE in the case of placeholders). Projecting a type to itself
|
||||
// is never really productive.
|
||||
ty::Alias(_, _) | ty::Param(_) | ty::Placeholder(..) => {
|
||||
return Err(NoSolution);
|
||||
}
|
||||
|
||||
ty::Infer(ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_))
|
||||
| ty::Bound(..) => panic!(
|
||||
"unexpected self ty `{:?}` when normalizing `<T as DiscriminantKind>::Discriminant`",
|
||||
goal.predicate.self_ty()
|
||||
),
|
||||
};
|
||||
|
||||
ecx.probe_builtin_trait_candidate(BuiltinImplSource::Misc).enter(|ecx| {
|
||||
ecx.instantiate_normalizes_to_term(goal, discriminant_ty.into());
|
||||
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
})
|
||||
}
|
||||
|
||||
fn consider_builtin_async_destruct_candidate(
|
||||
ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution> {
|
||||
let self_ty = goal.predicate.self_ty();
|
||||
let async_destructor_ty = match self_ty.kind() {
|
||||
ty::Bool
|
||||
| ty::Char
|
||||
| ty::Int(..)
|
||||
| ty::Uint(..)
|
||||
| ty::Float(..)
|
||||
| ty::Array(..)
|
||||
| ty::RawPtr(..)
|
||||
| ty::Ref(..)
|
||||
| ty::FnDef(..)
|
||||
| ty::FnPtr(..)
|
||||
| ty::Closure(..)
|
||||
| ty::CoroutineClosure(..)
|
||||
| ty::Infer(ty::IntVar(..) | ty::FloatVar(..))
|
||||
| ty::Never
|
||||
| ty::Adt(_, _)
|
||||
| ty::Str
|
||||
| ty::Slice(_)
|
||||
| ty::Tuple(_)
|
||||
| ty::Error(_) => self_ty.async_destructor_ty(ecx.interner()),
|
||||
|
||||
// We do not call `Ty::async_destructor_ty` on alias, param, or placeholder
|
||||
// types, which return `<self_ty as AsyncDestruct>::AsyncDestructor`
|
||||
// (or ICE in the case of placeholders). Projecting a type to itself
|
||||
// is never really productive.
|
||||
ty::Alias(_, _) | ty::Param(_) | ty::Placeholder(..) => {
|
||||
return Err(NoSolution);
|
||||
}
|
||||
|
||||
ty::Infer(ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_))
|
||||
| ty::Foreign(..)
|
||||
| ty::Bound(..) => panic!(
|
||||
"unexpected self ty `{:?}` when normalizing `<T as AsyncDestruct>::AsyncDestructor`",
|
||||
goal.predicate.self_ty()
|
||||
),
|
||||
|
||||
ty::Pat(..) | ty::Dynamic(..) | ty::Coroutine(..) | ty::CoroutineWitness(..) => panic!(
|
||||
"`consider_builtin_async_destruct_candidate` is not yet implemented for type: {self_ty:?}"
|
||||
),
|
||||
};
|
||||
|
||||
ecx.probe_builtin_trait_candidate(BuiltinImplSource::Misc).enter(|ecx| {
|
||||
ecx.eq(goal.param_env, goal.predicate.term, async_destructor_ty.into())
|
||||
.expect("expected goal term to be fully unconstrained");
|
||||
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
})
|
||||
}
|
||||
|
||||
fn consider_builtin_destruct_candidate(
|
||||
_ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution> {
|
||||
panic!("`Destruct` does not have an associated type: {:?}", goal);
|
||||
}
|
||||
|
||||
fn consider_builtin_transmute_candidate(
|
||||
_ecx: &mut EvalCtxt<'_, Infcx>,
|
||||
goal: Goal<I, Self>,
|
||||
) -> Result<Candidate<I>, NoSolution> {
|
||||
panic!("`BikeshedIntrinsicFrom` does not have an associated type: {:?}", goal)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Infcx, I> EvalCtxt<'_, Infcx>
|
||||
where
|
||||
Infcx: SolverDelegate<Interner = I>,
|
||||
I: Interner,
|
||||
{
|
||||
fn translate_args(
|
||||
&mut self,
|
||||
goal: Goal<I, ty::NormalizesTo<I>>,
|
||||
impl_def_id: I::DefId,
|
||||
impl_args: I::GenericArgs,
|
||||
impl_trait_ref: rustc_type_ir::TraitRef<I>,
|
||||
target_container_def_id: I::DefId,
|
||||
) -> Result<I::GenericArgs, NoSolution> {
|
||||
let tcx = self.interner();
|
||||
Ok(if target_container_def_id == impl_trait_ref.def_id {
|
||||
// Default value from the trait definition. No need to rebase.
|
||||
goal.predicate.alias.args
|
||||
} else if target_container_def_id == impl_def_id {
|
||||
// Same impl, no need to fully translate, just a rebase from
|
||||
// the trait is sufficient.
|
||||
goal.predicate.alias.args.rebase_onto(tcx, impl_trait_ref.def_id, impl_args)
|
||||
} else {
|
||||
let target_args = self.fresh_args_for_item(target_container_def_id);
|
||||
let target_trait_ref =
|
||||
tcx.impl_trait_ref(target_container_def_id).instantiate(tcx, &target_args);
|
||||
// Relate source impl to target impl by equating trait refs.
|
||||
self.eq(goal.param_env, impl_trait_ref, target_trait_ref)?;
|
||||
// Also add predicates since they may be needed to constrain the
|
||||
// target impl's params.
|
||||
self.add_goals(
|
||||
GoalSource::Misc,
|
||||
tcx.predicates_of(target_container_def_id)
|
||||
.iter_instantiated(tcx, &target_args)
|
||||
.map(|pred| goal.with(tcx, pred)),
|
||||
);
|
||||
goal.predicate.alias.args.rebase_onto(tcx, impl_trait_ref.def_id, target_args)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,136 @@
|
|||
//! Computes a normalizes-to (projection) goal for opaque types. This goal
|
||||
//! behaves differently depending on the param-env's reveal mode and whether
|
||||
//! the opaque is in a defining scope.
|
||||
|
||||
use rustc_index::bit_set::GrowableBitSet;
|
||||
use rustc_type_ir::inherent::*;
|
||||
use rustc_type_ir::{self as ty, Interner};
|
||||
|
||||
use crate::infcx::SolverDelegate;
|
||||
use crate::solve::{Certainty, EvalCtxt, Goal, NoSolution, QueryResult, Reveal, SolverMode};
|
||||
|
||||
impl<Infcx, I> EvalCtxt<'_, Infcx>
|
||||
where
|
||||
Infcx: SolverDelegate<Interner = I>,
|
||||
I: Interner,
|
||||
{
|
||||
pub(super) fn normalize_opaque_type(
|
||||
&mut self,
|
||||
goal: Goal<I, ty::NormalizesTo<I>>,
|
||||
) -> QueryResult<I> {
|
||||
let tcx = self.interner();
|
||||
let opaque_ty = goal.predicate.alias;
|
||||
let expected = goal.predicate.term.as_type().expect("no such thing as an opaque const");
|
||||
|
||||
match (goal.param_env.reveal(), self.solver_mode()) {
|
||||
(Reveal::UserFacing, SolverMode::Normal) => {
|
||||
let Some(opaque_ty_def_id) = opaque_ty.def_id.as_local() else {
|
||||
return Err(NoSolution);
|
||||
};
|
||||
// FIXME: at some point we should call queries without defining
|
||||
// new opaque types but having the existing opaque type definitions.
|
||||
// This will require moving this below "Prefer opaques registered already".
|
||||
if !self.can_define_opaque_ty(opaque_ty_def_id) {
|
||||
return Err(NoSolution);
|
||||
}
|
||||
// FIXME: This may have issues when the args contain aliases...
|
||||
match uses_unique_placeholders_ignoring_regions(self.interner(), opaque_ty.args) {
|
||||
Err(NotUniqueParam::NotParam(param)) if param.is_non_region_infer() => {
|
||||
return self.evaluate_added_goals_and_make_canonical_response(
|
||||
Certainty::AMBIGUOUS,
|
||||
);
|
||||
}
|
||||
Err(_) => {
|
||||
return Err(NoSolution);
|
||||
}
|
||||
Ok(()) => {}
|
||||
}
|
||||
// Prefer opaques registered already.
|
||||
let opaque_type_key =
|
||||
ty::OpaqueTypeKey { def_id: opaque_ty_def_id, args: opaque_ty.args };
|
||||
// FIXME: This also unifies the previous hidden type with the expected.
|
||||
//
|
||||
// If that fails, we insert `expected` as a new hidden type instead of
|
||||
// eagerly emitting an error.
|
||||
let matches =
|
||||
self.unify_existing_opaque_tys(goal.param_env, opaque_type_key, expected);
|
||||
if !matches.is_empty() {
|
||||
if let Some(response) = self.try_merge_responses(&matches) {
|
||||
return Ok(response);
|
||||
} else {
|
||||
return self.flounder(&matches);
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, define a new opaque type
|
||||
// FIXME: should we use `inject_hidden_type_unchecked` here?
|
||||
self.insert_hidden_type(opaque_type_key, goal.param_env, expected)?;
|
||||
self.add_item_bounds_for_hidden_type(
|
||||
opaque_ty.def_id,
|
||||
opaque_ty.args,
|
||||
goal.param_env,
|
||||
expected,
|
||||
);
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
}
|
||||
(Reveal::UserFacing, SolverMode::Coherence) => {
|
||||
// An impossible opaque type bound is the only way this goal will fail
|
||||
// e.g. assigning `impl Copy := NotCopy`
|
||||
self.add_item_bounds_for_hidden_type(
|
||||
opaque_ty.def_id,
|
||||
opaque_ty.args,
|
||||
goal.param_env,
|
||||
expected,
|
||||
);
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::AMBIGUOUS)
|
||||
}
|
||||
(Reveal::All, _) => {
|
||||
// FIXME: Add an assertion that opaque type storage is empty.
|
||||
let actual = tcx.type_of(opaque_ty.def_id).instantiate(tcx, &opaque_ty.args);
|
||||
self.eq(goal.param_env, expected, actual)?;
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks whether each generic argument is simply a unique generic placeholder.
|
||||
///
|
||||
/// FIXME: Interner argument is needed to constrain the `I` parameter.
|
||||
pub fn uses_unique_placeholders_ignoring_regions<I: Interner>(
|
||||
_interner: I,
|
||||
args: I::GenericArgs,
|
||||
) -> Result<(), NotUniqueParam<I>> {
|
||||
let mut seen = GrowableBitSet::default();
|
||||
for arg in args {
|
||||
match arg.kind() {
|
||||
// Ignore regions, since we can't resolve those in a canonicalized
|
||||
// query in the trait solver.
|
||||
ty::GenericArgKind::Lifetime(_) => {}
|
||||
ty::GenericArgKind::Type(t) => match t.kind() {
|
||||
ty::Placeholder(p) => {
|
||||
if !seen.insert(p.var()) {
|
||||
return Err(NotUniqueParam::DuplicateParam(t.into()));
|
||||
}
|
||||
}
|
||||
_ => return Err(NotUniqueParam::NotParam(t.into())),
|
||||
},
|
||||
ty::GenericArgKind::Const(c) => match c.kind() {
|
||||
ty::ConstKind::Placeholder(p) => {
|
||||
if !seen.insert(p.var()) {
|
||||
return Err(NotUniqueParam::DuplicateParam(c.into()));
|
||||
}
|
||||
}
|
||||
_ => return Err(NotUniqueParam::NotParam(c.into())),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// FIXME: This should check for dupes and non-params first, then infer vars.
|
||||
pub enum NotUniqueParam<I: Interner> {
|
||||
DuplicateParam(I::GenericArg),
|
||||
NotParam(I::GenericArg),
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
//! Computes a normalizes-to (projection) goal for inherent associated types,
|
||||
//! `#![feature(lazy_type_alias)]` and `#![feature(type_alias_impl_trait)]`.
|
||||
//!
|
||||
//! Since a weak alias is never ambiguous, this just computes the `type_of` of
|
||||
//! the alias and registers the where-clauses of the type alias.
|
||||
|
||||
use rustc_type_ir::{self as ty, Interner};
|
||||
|
||||
use crate::infcx::SolverDelegate;
|
||||
use crate::solve::{Certainty, EvalCtxt, Goal, GoalSource, QueryResult};
|
||||
|
||||
impl<Infcx, I> EvalCtxt<'_, Infcx>
|
||||
where
|
||||
Infcx: SolverDelegate<Interner = I>,
|
||||
I: Interner,
|
||||
{
|
||||
pub(super) fn normalize_weak_type(
|
||||
&mut self,
|
||||
goal: Goal<I, ty::NormalizesTo<I>>,
|
||||
) -> QueryResult<I> {
|
||||
let tcx = self.interner();
|
||||
let weak_ty = goal.predicate.alias;
|
||||
|
||||
// Check where clauses
|
||||
self.add_goals(
|
||||
GoalSource::Misc,
|
||||
tcx.predicates_of(weak_ty.def_id)
|
||||
.iter_instantiated(tcx, &weak_ty.args)
|
||||
.map(|pred| goal.with(tcx, pred)),
|
||||
);
|
||||
|
||||
let actual = tcx.type_of(weak_ty.def_id).instantiate(tcx, &weak_ty.args);
|
||||
self.instantiate_normalizes_to_term(goal, actual.into());
|
||||
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
}
|
||||
}
|
29
compiler/rustc_next_trait_solver/src/solve/project_goals.rs
Normal file
29
compiler/rustc_next_trait_solver/src/solve/project_goals.rs
Normal file
|
@ -0,0 +1,29 @@
|
|||
use rustc_type_ir::{self as ty, Interner, ProjectionPredicate};
|
||||
|
||||
use crate::infcx::SolverDelegate;
|
||||
use crate::solve::{Certainty, EvalCtxt, Goal, GoalSource, QueryResult};
|
||||
|
||||
impl<Infcx, I> EvalCtxt<'_, Infcx>
|
||||
where
|
||||
Infcx: SolverDelegate<Interner = I>,
|
||||
I: Interner,
|
||||
{
|
||||
#[instrument(level = "trace", skip(self), ret)]
|
||||
pub(super) fn compute_projection_goal(
|
||||
&mut self,
|
||||
goal: Goal<I, ProjectionPredicate<I>>,
|
||||
) -> QueryResult<I> {
|
||||
let tcx = self.interner();
|
||||
let projection_term = goal.predicate.projection_term.to_term(tcx);
|
||||
let goal = goal.with(
|
||||
tcx,
|
||||
ty::PredicateKind::AliasRelate(
|
||||
projection_term,
|
||||
goal.predicate.term,
|
||||
ty::AliasRelationDirection::Equate,
|
||||
),
|
||||
);
|
||||
self.add_goal(GoalSource::Misc, goal);
|
||||
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
|
||||
}
|
||||
}
|
603
compiler/rustc_next_trait_solver/src/solve/search_graph.rs
Normal file
603
compiler/rustc_next_trait_solver/src/solve/search_graph.rs
Normal file
|
@ -0,0 +1,603 @@
|
|||
use std::mem;
|
||||
|
||||
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
|
||||
use rustc_index::{Idx, IndexVec};
|
||||
use rustc_type_ir::inherent::*;
|
||||
use rustc_type_ir::Interner;
|
||||
|
||||
use crate::infcx::SolverDelegate;
|
||||
use crate::solve::inspect::{self, ProofTreeBuilder};
|
||||
use crate::solve::{
|
||||
CacheData, CanonicalInput, Certainty, QueryResult, SolverMode, FIXPOINT_STEP_LIMIT,
|
||||
};
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Limit(usize);
|
||||
|
||||
rustc_index::newtype_index! {
|
||||
#[orderable]
|
||||
pub struct StackDepth {}
|
||||
}
|
||||
|
||||
bitflags::bitflags! {
|
||||
/// Whether and how this goal has been used as the root of a
|
||||
/// cycle. We track the kind of cycle as we're otherwise forced
|
||||
/// to always rerun at least once.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
struct HasBeenUsed: u8 {
|
||||
const INDUCTIVE_CYCLE = 1 << 0;
|
||||
const COINDUCTIVE_CYCLE = 1 << 1;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(derivative::Derivative)]
|
||||
#[derivative(Debug(bound = ""))]
|
||||
struct StackEntry<I: Interner> {
|
||||
input: CanonicalInput<I>,
|
||||
|
||||
available_depth: Limit,
|
||||
|
||||
/// The maximum depth reached by this stack entry, only up-to date
|
||||
/// for the top of the stack and lazily updated for the rest.
|
||||
reached_depth: StackDepth,
|
||||
|
||||
/// Whether this entry is a non-root cycle participant.
|
||||
///
|
||||
/// We must not move the result of non-root cycle participants to the
|
||||
/// global cache. We store the highest stack depth of a head of a cycle
|
||||
/// this goal is involved in. This necessary to soundly cache its
|
||||
/// provisional result.
|
||||
non_root_cycle_participant: Option<StackDepth>,
|
||||
|
||||
encountered_overflow: bool,
|
||||
|
||||
has_been_used: HasBeenUsed,
|
||||
|
||||
/// We put only the root goal of a coinductive cycle into the global cache.
|
||||
///
|
||||
/// If we were to use that result when later trying to prove another cycle
|
||||
/// participant, we can end up with unstable query results.
|
||||
///
|
||||
/// See tests/ui/next-solver/coinduction/incompleteness-unstable-result.rs for
|
||||
/// an example of where this is needed.
|
||||
///
|
||||
/// There can be multiple roots on the same stack, so we need to track
|
||||
/// cycle participants per root:
|
||||
/// ```plain
|
||||
/// A :- B
|
||||
/// B :- A, C
|
||||
/// C :- D
|
||||
/// D :- C
|
||||
/// ```
|
||||
cycle_participants: FxHashSet<CanonicalInput<I>>,
|
||||
/// Starts out as `None` and gets set when rerunning this
|
||||
/// goal in case we encounter a cycle.
|
||||
provisional_result: Option<QueryResult<I>>,
|
||||
}
|
||||
|
||||
/// The provisional result for a goal which is not on the stack.
|
||||
#[derive(Debug)]
|
||||
struct DetachedEntry<I: Interner> {
|
||||
/// The head of the smallest non-trivial cycle involving this entry.
|
||||
///
|
||||
/// Given the following rules, when proving `A` the head for
|
||||
/// the provisional entry of `C` would be `B`.
|
||||
/// ```plain
|
||||
/// A :- B
|
||||
/// B :- C
|
||||
/// C :- A + B + C
|
||||
/// ```
|
||||
head: StackDepth,
|
||||
result: QueryResult<I>,
|
||||
}
|
||||
|
||||
/// Stores the stack depth of a currently evaluated goal *and* already
|
||||
/// computed results for goals which depend on other goals still on the stack.
|
||||
///
|
||||
/// The provisional result may depend on whether the stack above it is inductive
|
||||
/// or coinductive. Because of this, we store separate provisional results for
|
||||
/// each case. If an provisional entry is not applicable, it may be the case
|
||||
/// that we already have provisional result while computing a goal. In this case
|
||||
/// we prefer the provisional result to potentially avoid fixpoint iterations.
|
||||
/// See tests/ui/traits/next-solver/cycles/mixed-cycles-2.rs for an example.
|
||||
///
|
||||
/// The provisional cache can theoretically result in changes to the observable behavior,
|
||||
/// see tests/ui/traits/next-solver/cycles/provisional-cache-impacts-behavior.rs.
|
||||
#[derive(derivative::Derivative)]
|
||||
#[derivative(Default(bound = ""))]
|
||||
struct ProvisionalCacheEntry<I: Interner> {
|
||||
stack_depth: Option<StackDepth>,
|
||||
with_inductive_stack: Option<DetachedEntry<I>>,
|
||||
with_coinductive_stack: Option<DetachedEntry<I>>,
|
||||
}
|
||||
|
||||
impl<I: Interner> ProvisionalCacheEntry<I> {
|
||||
fn is_empty(&self) -> bool {
|
||||
self.stack_depth.is_none()
|
||||
&& self.with_inductive_stack.is_none()
|
||||
&& self.with_coinductive_stack.is_none()
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) struct SearchGraph<I: Interner> {
|
||||
mode: SolverMode,
|
||||
/// The stack of goals currently being computed.
|
||||
///
|
||||
/// An element is *deeper* in the stack if its index is *lower*.
|
||||
stack: IndexVec<StackDepth, StackEntry<I>>,
|
||||
provisional_cache: FxHashMap<CanonicalInput<I>, ProvisionalCacheEntry<I>>,
|
||||
}
|
||||
|
||||
impl<I: Interner> SearchGraph<I> {
|
||||
pub(super) fn new(mode: SolverMode) -> SearchGraph<I> {
|
||||
Self { mode, stack: Default::default(), provisional_cache: Default::default() }
|
||||
}
|
||||
|
||||
pub(super) fn solver_mode(&self) -> SolverMode {
|
||||
self.mode
|
||||
}
|
||||
|
||||
/// Pops the highest goal from the stack, lazily updating the
|
||||
/// the next goal in the stack.
|
||||
///
|
||||
/// Directly popping from the stack instead of using this method
|
||||
/// would cause us to not track overflow and recursion depth correctly.
|
||||
fn pop_stack(&mut self) -> StackEntry<I> {
|
||||
let elem = self.stack.pop().unwrap();
|
||||
if let Some(last) = self.stack.raw.last_mut() {
|
||||
last.reached_depth = last.reached_depth.max(elem.reached_depth);
|
||||
last.encountered_overflow |= elem.encountered_overflow;
|
||||
}
|
||||
elem
|
||||
}
|
||||
|
||||
pub(super) fn is_empty(&self) -> bool {
|
||||
self.stack.is_empty()
|
||||
}
|
||||
|
||||
/// Returns the remaining depth allowed for nested goals.
|
||||
///
|
||||
/// This is generally simply one less than the current depth.
|
||||
/// However, if we encountered overflow, we significantly reduce
|
||||
/// the remaining depth of all nested goals to prevent hangs
|
||||
/// in case there is exponential blowup.
|
||||
fn allowed_depth_for_nested(
|
||||
tcx: I,
|
||||
stack: &IndexVec<StackDepth, StackEntry<I>>,
|
||||
) -> Option<Limit> {
|
||||
if let Some(last) = stack.raw.last() {
|
||||
if last.available_depth.0 == 0 {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(if last.encountered_overflow {
|
||||
Limit(last.available_depth.0 / 4)
|
||||
} else {
|
||||
Limit(last.available_depth.0 - 1)
|
||||
})
|
||||
} else {
|
||||
Some(Limit(tcx.recursion_limit()))
|
||||
}
|
||||
}
|
||||
|
||||
fn stack_coinductive_from(
|
||||
tcx: I,
|
||||
stack: &IndexVec<StackDepth, StackEntry<I>>,
|
||||
head: StackDepth,
|
||||
) -> bool {
|
||||
stack.raw[head.index()..]
|
||||
.iter()
|
||||
.all(|entry| entry.input.value.goal.predicate.is_coinductive(tcx))
|
||||
}
|
||||
|
||||
// When encountering a solver cycle, the result of the current goal
|
||||
// depends on goals lower on the stack.
|
||||
//
|
||||
// We have to therefore be careful when caching goals. Only the final result
|
||||
// of the cycle root, i.e. the lowest goal on the stack involved in this cycle,
|
||||
// is moved to the global cache while all others are stored in a provisional cache.
|
||||
//
|
||||
// We update both the head of this cycle to rerun its evaluation until
|
||||
// we reach a fixpoint and all other cycle participants to make sure that
|
||||
// their result does not get moved to the global cache.
|
||||
fn tag_cycle_participants(
|
||||
stack: &mut IndexVec<StackDepth, StackEntry<I>>,
|
||||
usage_kind: HasBeenUsed,
|
||||
head: StackDepth,
|
||||
) {
|
||||
stack[head].has_been_used |= usage_kind;
|
||||
debug_assert!(!stack[head].has_been_used.is_empty());
|
||||
|
||||
// The current root of these cycles. Note that this may not be the final
|
||||
// root in case a later goal depends on a goal higher up the stack.
|
||||
let mut current_root = head;
|
||||
while let Some(parent) = stack[current_root].non_root_cycle_participant {
|
||||
current_root = parent;
|
||||
debug_assert!(!stack[current_root].has_been_used.is_empty());
|
||||
}
|
||||
|
||||
let (stack, cycle_participants) = stack.raw.split_at_mut(head.index() + 1);
|
||||
let current_cycle_root = &mut stack[current_root.as_usize()];
|
||||
for entry in cycle_participants {
|
||||
entry.non_root_cycle_participant = entry.non_root_cycle_participant.max(Some(head));
|
||||
current_cycle_root.cycle_participants.insert(entry.input);
|
||||
current_cycle_root.cycle_participants.extend(mem::take(&mut entry.cycle_participants));
|
||||
}
|
||||
}
|
||||
|
||||
fn clear_dependent_provisional_results(
|
||||
provisional_cache: &mut FxHashMap<CanonicalInput<I>, ProvisionalCacheEntry<I>>,
|
||||
head: StackDepth,
|
||||
) {
|
||||
#[allow(rustc::potential_query_instability)]
|
||||
provisional_cache.retain(|_, entry| {
|
||||
entry.with_coinductive_stack.take_if(|p| p.head == head);
|
||||
entry.with_inductive_stack.take_if(|p| p.head == head);
|
||||
!entry.is_empty()
|
||||
});
|
||||
}
|
||||
|
||||
/// The trait solver behavior is different for coherence
|
||||
/// so we use a separate cache. Alternatively we could use
|
||||
/// a single cache and share it between coherence and ordinary
|
||||
/// trait solving.
|
||||
pub(super) fn global_cache(&self, tcx: I) -> I::EvaluationCache {
|
||||
tcx.evaluation_cache(self.mode)
|
||||
}
|
||||
|
||||
/// Probably the most involved method of the whole solver.
|
||||
///
|
||||
/// Given some goal which is proven via the `prove_goal` closure, this
|
||||
/// handles caching, overflow, and coinductive cycles.
|
||||
pub(super) fn with_new_goal<Infcx: SolverDelegate<Interner = I>>(
|
||||
&mut self,
|
||||
tcx: I,
|
||||
input: CanonicalInput<I>,
|
||||
inspect: &mut ProofTreeBuilder<Infcx>,
|
||||
mut prove_goal: impl FnMut(&mut Self, &mut ProofTreeBuilder<Infcx>) -> QueryResult<I>,
|
||||
) -> QueryResult<I> {
|
||||
self.check_invariants();
|
||||
// Check for overflow.
|
||||
let Some(available_depth) = Self::allowed_depth_for_nested(tcx, &self.stack) else {
|
||||
if let Some(last) = self.stack.raw.last_mut() {
|
||||
last.encountered_overflow = true;
|
||||
}
|
||||
|
||||
inspect
|
||||
.canonical_goal_evaluation_kind(inspect::WipCanonicalGoalEvaluationKind::Overflow);
|
||||
return Self::response_no_constraints(tcx, input, Certainty::overflow(true));
|
||||
};
|
||||
|
||||
if let Some(result) = self.lookup_global_cache(tcx, input, available_depth, inspect) {
|
||||
debug!("global cache hit");
|
||||
return result;
|
||||
}
|
||||
|
||||
// Check whether the goal is in the provisional cache.
|
||||
// The provisional result may rely on the path to its cycle roots,
|
||||
// so we have to check the path of the current goal matches that of
|
||||
// the cache entry.
|
||||
let cache_entry = self.provisional_cache.entry(input).or_default();
|
||||
if let Some(entry) = cache_entry
|
||||
.with_coinductive_stack
|
||||
.as_ref()
|
||||
.filter(|p| Self::stack_coinductive_from(tcx, &self.stack, p.head))
|
||||
.or_else(|| {
|
||||
cache_entry
|
||||
.with_inductive_stack
|
||||
.as_ref()
|
||||
.filter(|p| !Self::stack_coinductive_from(tcx, &self.stack, p.head))
|
||||
})
|
||||
{
|
||||
debug!("provisional cache hit");
|
||||
// We have a nested goal which is already in the provisional cache, use
|
||||
// its result. We do not provide any usage kind as that should have been
|
||||
// already set correctly while computing the cache entry.
|
||||
inspect.canonical_goal_evaluation_kind(
|
||||
inspect::WipCanonicalGoalEvaluationKind::ProvisionalCacheHit,
|
||||
);
|
||||
Self::tag_cycle_participants(&mut self.stack, HasBeenUsed::empty(), entry.head);
|
||||
return entry.result;
|
||||
} else if let Some(stack_depth) = cache_entry.stack_depth {
|
||||
debug!("encountered cycle with depth {stack_depth:?}");
|
||||
// We have a nested goal which directly relies on a goal deeper in the stack.
|
||||
//
|
||||
// We start by tagging all cycle participants, as that's necessary for caching.
|
||||
//
|
||||
// Finally we can return either the provisional response or the initial response
|
||||
// in case we're in the first fixpoint iteration for this goal.
|
||||
inspect.canonical_goal_evaluation_kind(
|
||||
inspect::WipCanonicalGoalEvaluationKind::CycleInStack,
|
||||
);
|
||||
let is_coinductive_cycle = Self::stack_coinductive_from(tcx, &self.stack, stack_depth);
|
||||
let usage_kind = if is_coinductive_cycle {
|
||||
HasBeenUsed::COINDUCTIVE_CYCLE
|
||||
} else {
|
||||
HasBeenUsed::INDUCTIVE_CYCLE
|
||||
};
|
||||
Self::tag_cycle_participants(&mut self.stack, usage_kind, stack_depth);
|
||||
|
||||
// Return the provisional result or, if we're in the first iteration,
|
||||
// start with no constraints.
|
||||
return if let Some(result) = self.stack[stack_depth].provisional_result {
|
||||
result
|
||||
} else if is_coinductive_cycle {
|
||||
Self::response_no_constraints(tcx, input, Certainty::Yes)
|
||||
} else {
|
||||
Self::response_no_constraints(tcx, input, Certainty::overflow(false))
|
||||
};
|
||||
} else {
|
||||
// No entry, we push this goal on the stack and try to prove it.
|
||||
let depth = self.stack.next_index();
|
||||
let entry = StackEntry {
|
||||
input,
|
||||
available_depth,
|
||||
reached_depth: depth,
|
||||
non_root_cycle_participant: None,
|
||||
encountered_overflow: false,
|
||||
has_been_used: HasBeenUsed::empty(),
|
||||
cycle_participants: Default::default(),
|
||||
provisional_result: None,
|
||||
};
|
||||
assert_eq!(self.stack.push(entry), depth);
|
||||
cache_entry.stack_depth = Some(depth);
|
||||
}
|
||||
|
||||
// This is for global caching, so we properly track query dependencies.
|
||||
// Everything that affects the `result` should be performed within this
|
||||
// `with_anon_task` closure. If computing this goal depends on something
|
||||
// not tracked by the cache key and from outside of this anon task, it
|
||||
// must not be added to the global cache. Notably, this is the case for
|
||||
// trait solver cycles participants.
|
||||
let ((final_entry, result), dep_node) = tcx.with_cached_task(|| {
|
||||
for _ in 0..FIXPOINT_STEP_LIMIT {
|
||||
match self.fixpoint_step_in_task(tcx, input, inspect, &mut prove_goal) {
|
||||
StepResult::Done(final_entry, result) => return (final_entry, result),
|
||||
StepResult::HasChanged => debug!("fixpoint changed provisional results"),
|
||||
}
|
||||
}
|
||||
|
||||
debug!("canonical cycle overflow");
|
||||
let current_entry = self.pop_stack();
|
||||
debug_assert!(current_entry.has_been_used.is_empty());
|
||||
let result = Self::response_no_constraints(tcx, input, Certainty::overflow(false));
|
||||
(current_entry, result)
|
||||
});
|
||||
|
||||
let proof_tree = inspect.finalize_canonical_goal_evaluation(tcx);
|
||||
|
||||
// We're now done with this goal. In case this goal is involved in a larger cycle
|
||||
// do not remove it from the provisional cache and update its provisional result.
|
||||
// We only add the root of cycles to the global cache.
|
||||
if let Some(head) = final_entry.non_root_cycle_participant {
|
||||
let coinductive_stack = Self::stack_coinductive_from(tcx, &self.stack, head);
|
||||
|
||||
let entry = self.provisional_cache.get_mut(&input).unwrap();
|
||||
entry.stack_depth = None;
|
||||
if coinductive_stack {
|
||||
entry.with_coinductive_stack = Some(DetachedEntry { head, result });
|
||||
} else {
|
||||
entry.with_inductive_stack = Some(DetachedEntry { head, result });
|
||||
}
|
||||
} else {
|
||||
self.provisional_cache.remove(&input);
|
||||
let reached_depth = final_entry.reached_depth.as_usize() - self.stack.len();
|
||||
// When encountering a cycle, both inductive and coinductive, we only
|
||||
// move the root into the global cache. We also store all other cycle
|
||||
// participants involved.
|
||||
//
|
||||
// We must not use the global cache entry of a root goal if a cycle
|
||||
// participant is on the stack. This is necessary to prevent unstable
|
||||
// results. See the comment of `StackEntry::cycle_participants` for
|
||||
// more details.
|
||||
self.global_cache(tcx).insert(
|
||||
tcx,
|
||||
input,
|
||||
proof_tree,
|
||||
reached_depth,
|
||||
final_entry.encountered_overflow,
|
||||
final_entry.cycle_participants,
|
||||
dep_node,
|
||||
result,
|
||||
)
|
||||
}
|
||||
|
||||
self.check_invariants();
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// Try to fetch a previously computed result from the global cache,
|
||||
/// making sure to only do so if it would match the result of reevaluating
|
||||
/// this goal.
|
||||
fn lookup_global_cache<Infcx: SolverDelegate<Interner = I>>(
|
||||
&mut self,
|
||||
tcx: I,
|
||||
input: CanonicalInput<I>,
|
||||
available_depth: Limit,
|
||||
inspect: &mut ProofTreeBuilder<Infcx>,
|
||||
) -> Option<QueryResult<I>> {
|
||||
let CacheData { result, proof_tree, additional_depth, encountered_overflow } = self
|
||||
.global_cache(tcx)
|
||||
// TODO: Awkward `Limit -> usize -> Limit`.
|
||||
.get(tcx, input, self.stack.iter().map(|e| e.input), available_depth.0)?;
|
||||
|
||||
// If we're building a proof tree and the current cache entry does not
|
||||
// contain a proof tree, we do not use the entry but instead recompute
|
||||
// the goal. We simply overwrite the existing entry once we're done,
|
||||
// caching the proof tree.
|
||||
if !inspect.is_noop() {
|
||||
if let Some(final_revision) = proof_tree {
|
||||
let kind = inspect::WipCanonicalGoalEvaluationKind::Interned { final_revision };
|
||||
inspect.canonical_goal_evaluation_kind(kind);
|
||||
} else {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
||||
// Update the reached depth of the current goal to make sure
|
||||
// its state is the same regardless of whether we've used the
|
||||
// global cache or not.
|
||||
let reached_depth = self.stack.next_index().plus(additional_depth);
|
||||
if let Some(last) = self.stack.raw.last_mut() {
|
||||
last.reached_depth = last.reached_depth.max(reached_depth);
|
||||
last.encountered_overflow |= encountered_overflow;
|
||||
}
|
||||
|
||||
Some(result)
|
||||
}
|
||||
}
|
||||
|
||||
enum StepResult<I: Interner> {
|
||||
Done(StackEntry<I>, QueryResult<I>),
|
||||
HasChanged,
|
||||
}
|
||||
|
||||
impl<I: Interner> SearchGraph<I> {
|
||||
/// When we encounter a coinductive cycle, we have to fetch the
|
||||
/// result of that cycle while we are still computing it. Because
|
||||
/// of this we continuously recompute the cycle until the result
|
||||
/// of the previous iteration is equal to the final result, at which
|
||||
/// point we are done.
|
||||
fn fixpoint_step_in_task<Infcx, F>(
|
||||
&mut self,
|
||||
tcx: I,
|
||||
input: CanonicalInput<I>,
|
||||
inspect: &mut ProofTreeBuilder<Infcx>,
|
||||
prove_goal: &mut F,
|
||||
) -> StepResult<I>
|
||||
where
|
||||
Infcx: SolverDelegate<Interner = I>,
|
||||
F: FnMut(&mut Self, &mut ProofTreeBuilder<Infcx>) -> QueryResult<I>,
|
||||
{
|
||||
let result = prove_goal(self, inspect);
|
||||
let stack_entry = self.pop_stack();
|
||||
debug_assert_eq!(stack_entry.input, input);
|
||||
|
||||
// If the current goal is not the root of a cycle, we are done.
|
||||
if stack_entry.has_been_used.is_empty() {
|
||||
return StepResult::Done(stack_entry, result);
|
||||
}
|
||||
|
||||
// If it is a cycle head, we have to keep trying to prove it until
|
||||
// we reach a fixpoint. We need to do so for all cycle heads,
|
||||
// not only for the root.
|
||||
//
|
||||
// See tests/ui/traits/next-solver/cycles/fixpoint-rerun-all-cycle-heads.rs
|
||||
// for an example.
|
||||
|
||||
// Start by clearing all provisional cache entries which depend on this
|
||||
// the current goal.
|
||||
Self::clear_dependent_provisional_results(
|
||||
&mut self.provisional_cache,
|
||||
self.stack.next_index(),
|
||||
);
|
||||
|
||||
// Check whether we reached a fixpoint, either because the final result
|
||||
// is equal to the provisional result of the previous iteration, or because
|
||||
// this was only the root of either coinductive or inductive cycles, and the
|
||||
// final result is equal to the initial response for that case.
|
||||
let reached_fixpoint = if let Some(r) = stack_entry.provisional_result {
|
||||
r == result
|
||||
} else if stack_entry.has_been_used == HasBeenUsed::COINDUCTIVE_CYCLE {
|
||||
Self::response_no_constraints(tcx, input, Certainty::Yes) == result
|
||||
} else if stack_entry.has_been_used == HasBeenUsed::INDUCTIVE_CYCLE {
|
||||
Self::response_no_constraints(tcx, input, Certainty::overflow(false)) == result
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
// If we did not reach a fixpoint, update the provisional result and reevaluate.
|
||||
if reached_fixpoint {
|
||||
StepResult::Done(stack_entry, result)
|
||||
} else {
|
||||
let depth = self.stack.push(StackEntry {
|
||||
has_been_used: HasBeenUsed::empty(),
|
||||
provisional_result: Some(result),
|
||||
..stack_entry
|
||||
});
|
||||
debug_assert_eq!(self.provisional_cache[&input].stack_depth, Some(depth));
|
||||
StepResult::HasChanged
|
||||
}
|
||||
}
|
||||
|
||||
fn response_no_constraints(
|
||||
tcx: I,
|
||||
goal: CanonicalInput<I>,
|
||||
certainty: Certainty,
|
||||
) -> QueryResult<I> {
|
||||
Ok(super::response_no_constraints_raw(tcx, goal.max_universe, goal.variables, certainty))
|
||||
}
|
||||
|
||||
#[allow(rustc::potential_query_instability)]
|
||||
fn check_invariants(&self) {
|
||||
if !cfg!(debug_assertions) {
|
||||
return;
|
||||
}
|
||||
|
||||
let SearchGraph { mode: _, stack, provisional_cache } = self;
|
||||
if stack.is_empty() {
|
||||
assert!(provisional_cache.is_empty());
|
||||
}
|
||||
|
||||
for (depth, entry) in stack.iter_enumerated() {
|
||||
let StackEntry {
|
||||
input,
|
||||
available_depth: _,
|
||||
reached_depth: _,
|
||||
non_root_cycle_participant,
|
||||
encountered_overflow: _,
|
||||
has_been_used,
|
||||
ref cycle_participants,
|
||||
provisional_result,
|
||||
} = *entry;
|
||||
let cache_entry = provisional_cache.get(&entry.input).unwrap();
|
||||
assert_eq!(cache_entry.stack_depth, Some(depth));
|
||||
if let Some(head) = non_root_cycle_participant {
|
||||
assert!(head < depth);
|
||||
assert!(cycle_participants.is_empty());
|
||||
assert_ne!(stack[head].has_been_used, HasBeenUsed::empty());
|
||||
|
||||
let mut current_root = head;
|
||||
while let Some(parent) = stack[current_root].non_root_cycle_participant {
|
||||
current_root = parent;
|
||||
}
|
||||
assert!(stack[current_root].cycle_participants.contains(&input));
|
||||
}
|
||||
|
||||
if !cycle_participants.is_empty() {
|
||||
assert!(provisional_result.is_some() || !has_been_used.is_empty());
|
||||
for entry in stack.iter().take(depth.as_usize()) {
|
||||
assert_eq!(cycle_participants.get(&entry.input), None);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (&input, entry) in &self.provisional_cache {
|
||||
let ProvisionalCacheEntry { stack_depth, with_coinductive_stack, with_inductive_stack } =
|
||||
entry;
|
||||
assert!(
|
||||
stack_depth.is_some()
|
||||
|| with_coinductive_stack.is_some()
|
||||
|| with_inductive_stack.is_some()
|
||||
);
|
||||
|
||||
if let &Some(stack_depth) = stack_depth {
|
||||
assert_eq!(stack[stack_depth].input, input);
|
||||
}
|
||||
|
||||
let check_detached = |detached_entry: &DetachedEntry<I>| {
|
||||
let DetachedEntry { head, result: _ } = *detached_entry;
|
||||
assert_ne!(stack[head].has_been_used, HasBeenUsed::empty());
|
||||
};
|
||||
|
||||
if let Some(with_coinductive_stack) = with_coinductive_stack {
|
||||
check_detached(with_coinductive_stack);
|
||||
}
|
||||
|
||||
if let Some(with_inductive_stack) = with_inductive_stack {
|
||||
check_detached(with_inductive_stack);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
1184
compiler/rustc_next_trait_solver/src/solve/trait_goals.rs
Normal file
1184
compiler/rustc_next_trait_solver/src/solve/trait_goals.rs
Normal file
File diff suppressed because it is too large
Load diff
Loading…
Add table
Add a link
Reference in a new issue