1
Fork 0

Auto merge of #95291 - Dylan-DPC:rollup-vrb4wlw, r=Dylan-DPC

Rollup of 5 pull requests

Successful merges:

 - #94391 (Fix ice when error reporting recursion errors)
 - #94655 (Clarify which kinds of MIR are allowed during which phases.)
 - #95179 (Try to evaluate in try unify and postpone resolution of constants that contain inference variables)
 - #95270 (debuginfo: Fix debuginfo for Box<T> where T is unsized.)
 - #95276 (add diagnostic items for clippy's `trim_split_whitespace`)

Failed merges:

r? `@ghost`
`@rustbot` modify labels: rollup
This commit is contained in:
bors 2022-03-25 01:20:01 +00:00
commit 661e8beec1
33 changed files with 503 additions and 358 deletions

View file

@ -166,6 +166,13 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>(
pointee_type: Ty<'tcx>, pointee_type: Ty<'tcx>,
unique_type_id: UniqueTypeId<'tcx>, unique_type_id: UniqueTypeId<'tcx>,
) -> DINodeCreationResult<'ll> { ) -> DINodeCreationResult<'ll> {
// The debuginfo generated by this function is only valid if `ptr_type` is really just
// a (fat) pointer. Make sure it is not called for e.g. `Box<T, NonZSTAllocator>`.
debug_assert_eq!(
cx.size_and_align_of(ptr_type),
cx.size_and_align_of(cx.tcx.mk_mut_ptr(pointee_type))
);
let pointee_type_di_node = type_di_node(cx, pointee_type); let pointee_type_di_node = type_di_node(cx, pointee_type);
return_if_di_node_created_in_meantime!(cx, unique_type_id); return_if_di_node_created_in_meantime!(cx, unique_type_id);
@ -212,7 +219,17 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>(
DIFlags::FlagZero, DIFlags::FlagZero,
), ),
|cx, owner| { |cx, owner| {
let layout = cx.layout_of(ptr_type); // FIXME: If this fat pointer is a `Box` then we don't want to use its
// type layout and instead use the layout of the raw pointer inside
// of it.
// The proper way to handle this is to not treat Box as a pointer
// at all and instead emit regular struct debuginfo for it. We just
// need to make sure that we don't break existing debuginfo consumers
// by doing that (at least not without a warning period).
let layout_type =
if ptr_type.is_box() { cx.tcx.mk_mut_ptr(pointee_type) } else { ptr_type };
let layout = cx.layout_of(layout_type);
let addr_field = layout.field(cx, abi::FAT_PTR_ADDR); let addr_field = layout.field(cx, abi::FAT_PTR_ADDR);
let extra_field = layout.field(cx, abi::FAT_PTR_EXTRA); let extra_field = layout.field(cx, abi::FAT_PTR_EXTRA);

View file

@ -42,7 +42,7 @@ pub struct PromoteTemps<'tcx> {
impl<'tcx> MirPass<'tcx> for PromoteTemps<'tcx> { impl<'tcx> MirPass<'tcx> for PromoteTemps<'tcx> {
fn phase_change(&self) -> Option<MirPhase> { fn phase_change(&self) -> Option<MirPhase> {
Some(MirPhase::ConstPromotion) Some(MirPhase::ConstsPromoted)
} }
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {

View file

@ -266,22 +266,15 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
); );
} }
} }
// The deaggregator currently does not deaggreagate arrays. Rvalue::Aggregate(agg_kind, _) => {
// So for now, we ignore them here. let disallowed = match **agg_kind {
Rvalue::Aggregate(box AggregateKind::Array { .. }, _) => {} AggregateKind::Array(..) => false,
// All other aggregates must be gone after some phases. AggregateKind::Generator(..) => {
Rvalue::Aggregate(box kind, _) => { self.mir_phase >= MirPhase::GeneratorsLowered
if self.mir_phase > MirPhase::DropLowering }
&& !matches!(kind, AggregateKind::Generator(..)) _ => self.mir_phase >= MirPhase::Deaggregated,
{ };
// Generators persist until the state machine transformation, but all if disallowed {
// other aggregates must have been lowered.
self.fail(
location,
format!("{:?} have been lowered to field assignments", rvalue),
)
} else if self.mir_phase > MirPhase::GeneratorLowering {
// No more aggregates after drop and generator lowering.
self.fail( self.fail(
location, location,
format!("{:?} have been lowered to field assignments", rvalue), format!("{:?} have been lowered to field assignments", rvalue),
@ -289,7 +282,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
} }
} }
Rvalue::Ref(_, BorrowKind::Shallow, _) => { Rvalue::Ref(_, BorrowKind::Shallow, _) => {
if self.mir_phase > MirPhase::DropLowering { if self.mir_phase >= MirPhase::DropsLowered {
self.fail( self.fail(
location, location,
"`Assign` statement with a `Shallow` borrow should have been removed after drop lowering phase", "`Assign` statement with a `Shallow` borrow should have been removed after drop lowering phase",
@ -300,7 +293,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
} }
} }
StatementKind::AscribeUserType(..) => { StatementKind::AscribeUserType(..) => {
if self.mir_phase > MirPhase::DropLowering { if self.mir_phase >= MirPhase::DropsLowered {
self.fail( self.fail(
location, location,
"`AscribeUserType` should have been removed after drop lowering phase", "`AscribeUserType` should have been removed after drop lowering phase",
@ -308,7 +301,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
} }
} }
StatementKind::FakeRead(..) => { StatementKind::FakeRead(..) => {
if self.mir_phase > MirPhase::DropLowering { if self.mir_phase >= MirPhase::DropsLowered {
self.fail( self.fail(
location, location,
"`FakeRead` should have been removed after drop lowering phase", "`FakeRead` should have been removed after drop lowering phase",
@ -351,10 +344,18 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
self.fail(location, format!("bad arg ({:?} != usize)", op_cnt_ty)) self.fail(location, format!("bad arg ({:?} != usize)", op_cnt_ty))
} }
} }
StatementKind::SetDiscriminant { .. } StatementKind::SetDiscriminant { .. } => {
| StatementKind::StorageLive(..) if self.mir_phase < MirPhase::DropsLowered {
self.fail(location, "`SetDiscriminant` is not allowed until drop elaboration");
}
}
StatementKind::Retag(_, _) => {
// FIXME(JakobDegen) The validator should check that `self.mir_phase <
// DropsLowered`. However, this causes ICEs with generation of drop shims, which
// seem to fail to set their `MirPhase` correctly.
}
StatementKind::StorageLive(..)
| StatementKind::StorageDead(..) | StatementKind::StorageDead(..)
| StatementKind::Retag(_, _)
| StatementKind::Coverage(_) | StatementKind::Coverage(_)
| StatementKind::Nop => {} | StatementKind::Nop => {}
} }
@ -424,10 +425,10 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
} }
} }
TerminatorKind::DropAndReplace { target, unwind, .. } => { TerminatorKind::DropAndReplace { target, unwind, .. } => {
if self.mir_phase > MirPhase::DropLowering { if self.mir_phase >= MirPhase::DropsLowered {
self.fail( self.fail(
location, location,
"`DropAndReplace` is not permitted to exist after drop elaboration", "`DropAndReplace` should have been removed during drop elaboration",
); );
} }
self.check_edge(location, *target, EdgeKind::Normal); self.check_edge(location, *target, EdgeKind::Normal);
@ -494,7 +495,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
} }
} }
TerminatorKind::Yield { resume, drop, .. } => { TerminatorKind::Yield { resume, drop, .. } => {
if self.mir_phase > MirPhase::GeneratorLowering { if self.mir_phase >= MirPhase::GeneratorsLowered {
self.fail(location, "`Yield` should have been replaced by generator lowering"); self.fail(location, "`Yield` should have been replaced by generator lowering");
} }
self.check_edge(location, *resume, EdgeKind::Normal); self.check_edge(location, *resume, EdgeKind::Normal);
@ -503,10 +504,22 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
} }
} }
TerminatorKind::FalseEdge { real_target, imaginary_target } => { TerminatorKind::FalseEdge { real_target, imaginary_target } => {
if self.mir_phase >= MirPhase::DropsLowered {
self.fail(
location,
"`FalseEdge` should have been removed after drop elaboration",
);
}
self.check_edge(location, *real_target, EdgeKind::Normal); self.check_edge(location, *real_target, EdgeKind::Normal);
self.check_edge(location, *imaginary_target, EdgeKind::Normal); self.check_edge(location, *imaginary_target, EdgeKind::Normal);
} }
TerminatorKind::FalseUnwind { real_target, unwind } => { TerminatorKind::FalseUnwind { real_target, unwind } => {
if self.mir_phase >= MirPhase::DropsLowered {
self.fail(
location,
"`FalseUnwind` should have been removed after drop elaboration",
);
}
self.check_edge(location, *real_target, EdgeKind::Normal); self.check_edge(location, *real_target, EdgeKind::Normal);
if let Some(unwind) = unwind { if let Some(unwind) = unwind {
self.check_edge(location, *unwind, EdgeKind::Unwind); self.check_edge(location, *unwind, EdgeKind::Unwind);
@ -520,12 +533,19 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
self.check_edge(location, *cleanup, EdgeKind::Unwind); self.check_edge(location, *cleanup, EdgeKind::Unwind);
} }
} }
TerminatorKind::GeneratorDrop => {
if self.mir_phase >= MirPhase::GeneratorsLowered {
self.fail(
location,
"`GeneratorDrop` should have been replaced by generator lowering",
);
}
}
// Nothing to validate for these. // Nothing to validate for these.
TerminatorKind::Resume TerminatorKind::Resume
| TerminatorKind::Abort | TerminatorKind::Abort
| TerminatorKind::Return | TerminatorKind::Return
| TerminatorKind::Unreachable | TerminatorKind::Unreachable => {}
| TerminatorKind::GeneratorDrop => {}
} }
self.super_terminator(terminator, location); self.super_terminator(terminator, location);

View file

@ -20,8 +20,7 @@ use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_middle::infer::canonical::{Canonical, CanonicalVarValues}; use rustc_middle::infer::canonical::{Canonical, CanonicalVarValues};
use rustc_middle::infer::unify_key::{ConstVarValue, ConstVariableValue}; use rustc_middle::infer::unify_key::{ConstVarValue, ConstVariableValue};
use rustc_middle::infer::unify_key::{ConstVariableOrigin, ConstVariableOriginKind, ToType}; use rustc_middle::infer::unify_key::{ConstVariableOrigin, ConstVariableOriginKind, ToType};
use rustc_middle::mir::interpret::ErrorHandled; use rustc_middle::mir::interpret::{ErrorHandled, EvalToConstValueResult};
use rustc_middle::mir::interpret::EvalToConstValueResult;
use rustc_middle::traits::select; use rustc_middle::traits::select;
use rustc_middle::ty::error::{ExpectedFound, TypeError}; use rustc_middle::ty::error::{ExpectedFound, TypeError};
use rustc_middle::ty::fold::{TypeFoldable, TypeFolder}; use rustc_middle::ty::fold::{TypeFoldable, TypeFolder};
@ -71,7 +70,6 @@ mod sub;
pub mod type_variable; pub mod type_variable;
mod undo_log; mod undo_log;
use crate::infer::canonical::OriginalQueryValues;
pub use rustc_middle::infer::unify_key; pub use rustc_middle::infer::unify_key;
#[must_use] #[must_use]
@ -687,15 +685,28 @@ pub struct CombinedSnapshot<'a, 'tcx> {
impl<'a, 'tcx> InferCtxt<'a, 'tcx> { impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
/// calls `tcx.try_unify_abstract_consts` after /// calls `tcx.try_unify_abstract_consts` after
/// canonicalizing the consts. /// canonicalizing the consts.
#[instrument(skip(self), level = "debug")]
pub fn try_unify_abstract_consts( pub fn try_unify_abstract_consts(
&self, &self,
a: ty::Unevaluated<'tcx, ()>, a: ty::Unevaluated<'tcx, ()>,
b: ty::Unevaluated<'tcx, ()>, b: ty::Unevaluated<'tcx, ()>,
param_env: ty::ParamEnv<'tcx>,
) -> bool { ) -> bool {
let canonical = self.canonicalize_query((a, b), &mut OriginalQueryValues::default()); // Reject any attempt to unify two unevaluated constants that contain inference
debug!("canonical consts: {:?}", &canonical.value); // variables, since inference variables in queries lead to ICEs.
if a.substs.has_infer_types_or_consts()
|| b.substs.has_infer_types_or_consts()
|| param_env.has_infer_types_or_consts()
{
debug!("a or b or param_env contain infer vars in its substs -> cannot unify");
return false;
}
self.tcx.try_unify_abstract_consts(canonical.value) let param_env_and = param_env.and((a, b));
let erased = self.tcx.erase_regions(param_env_and);
debug!("after erase_regions: {:?}", erased);
self.tcx.try_unify_abstract_consts(erased)
} }
pub fn is_in_snapshot(&self) -> bool { pub fn is_in_snapshot(&self) -> bool {
@ -1598,6 +1609,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
/// ///
/// This handles inferences variables within both `param_env` and `substs` by /// This handles inferences variables within both `param_env` and `substs` by
/// performing the operation on their respective canonical forms. /// performing the operation on their respective canonical forms.
#[instrument(skip(self), level = "debug")]
pub fn const_eval_resolve( pub fn const_eval_resolve(
&self, &self,
param_env: ty::ParamEnv<'tcx>, param_env: ty::ParamEnv<'tcx>,
@ -1605,15 +1617,19 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
span: Option<Span>, span: Option<Span>,
) -> EvalToConstValueResult<'tcx> { ) -> EvalToConstValueResult<'tcx> {
let substs = self.resolve_vars_if_possible(unevaluated.substs); let substs = self.resolve_vars_if_possible(unevaluated.substs);
debug!(?substs);
// Postpone the evaluation of constants whose substs depend on inference // Postpone the evaluation of constants whose substs depend on inference
// variables // variables
if substs.has_infer_types_or_consts() { if substs.has_infer_types_or_consts() {
debug!("substs have infer types or consts: {:?}", substs);
return Err(ErrorHandled::TooGeneric); return Err(ErrorHandled::TooGeneric);
} }
let param_env_erased = self.tcx.erase_regions(param_env); let param_env_erased = self.tcx.erase_regions(param_env);
let substs_erased = self.tcx.erase_regions(substs); let substs_erased = self.tcx.erase_regions(substs);
debug!(?param_env_erased);
debug!(?substs_erased);
let unevaluated = ty::Unevaluated { let unevaluated = ty::Unevaluated {
def: unevaluated.def, def: unevaluated.def,

View file

@ -1,6 +1,7 @@
use super::{ErrorHandled, EvalToConstValueResult, GlobalId}; use super::{ErrorHandled, EvalToConstValueResult, GlobalId};
use crate::mir; use crate::mir;
use crate::ty::fold::TypeFoldable;
use crate::ty::subst::InternalSubsts; use crate::ty::subst::InternalSubsts;
use crate::ty::{self, TyCtxt}; use crate::ty::{self, TyCtxt};
use rustc_hir::def_id::DefId; use rustc_hir::def_id::DefId;
@ -38,6 +39,16 @@ impl<'tcx> TyCtxt<'tcx> {
ct: ty::Unevaluated<'tcx>, ct: ty::Unevaluated<'tcx>,
span: Option<Span>, span: Option<Span>,
) -> EvalToConstValueResult<'tcx> { ) -> EvalToConstValueResult<'tcx> {
// Cannot resolve `Unevaluated` constants that contain inference
// variables. We reject those here since `resolve_opt_const_arg`
// would fail otherwise.
//
// When trying to evaluate constants containing inference variables,
// use `Infcx::const_eval_resolve` instead.
if ct.substs.has_infer_types_or_consts() {
bug!("did not expect inference variables here");
}
match ty::Instance::resolve_opt_const_arg(self, param_env, ct.def, ct.substs) { match ty::Instance::resolve_opt_const_arg(self, param_env, ct.def, ct.substs) {
Ok(Some(instance)) => { Ok(Some(instance)) => {
let cid = GlobalId { instance, promoted: ct.promoted }; let cid = GlobalId { instance, promoted: ct.promoted };

View file

@ -127,14 +127,11 @@ pub trait MirPass<'tcx> {
/// These phases all describe dialects of MIR. Since all MIR uses the same datastructures, the /// These phases all describe dialects of MIR. Since all MIR uses the same datastructures, the
/// dialects forbid certain variants or values in certain phases. /// dialects forbid certain variants or values in certain phases.
/// ///
/// Note: Each phase's validation checks all invariants of the *previous* phases' dialects. A phase
/// that changes the dialect documents what invariants must be upheld *after* that phase finishes.
///
/// Warning: ordering of variants is significant. /// Warning: ordering of variants is significant.
#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, PartialOrd, Ord)] #[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, PartialOrd, Ord)]
#[derive(HashStable)] #[derive(HashStable)]
pub enum MirPhase { pub enum MirPhase {
Build = 0, Built = 0,
// FIXME(oli-obk): it's unclear whether we still need this phase (and its corresponding query). // FIXME(oli-obk): it's unclear whether we still need this phase (and its corresponding query).
// We used to have this for pre-miri MIR based const eval. // We used to have this for pre-miri MIR based const eval.
Const = 1, Const = 1,
@ -142,17 +139,32 @@ pub enum MirPhase {
/// by creating a new MIR body per promoted element. After this phase (and thus the termination /// by creating a new MIR body per promoted element. After this phase (and thus the termination
/// of the `mir_promoted` query), these promoted elements are available in the `promoted_mir` /// of the `mir_promoted` query), these promoted elements are available in the `promoted_mir`
/// query. /// query.
ConstPromotion = 2, ConstsPromoted = 2,
/// After this phase /// Beginning with this phase, the following variants are disallowed:
/// * the only `AggregateKind`s allowed are `Array` and `Generator`, /// * [`TerminatorKind::DropAndReplace`](terminator::TerminatorKind::DropAndReplace)
/// * `DropAndReplace` is gone for good /// * [`TerminatorKind::FalseUnwind`](terminator::TerminatorKind::FalseUnwind)
/// * `Drop` now uses explicit drop flags visible in the MIR and reaching a `Drop` terminator /// * [`TerminatorKind::FalseEdge`](terminator::TerminatorKind::FalseEdge)
/// means that the auto-generated drop glue will be invoked. /// * [`StatementKind::FakeRead`]
DropLowering = 3, /// * [`StatementKind::AscribeUserType`]
/// After this phase, generators are explicit state machines (no more `Yield`). /// * [`Rvalue::Ref`] with `BorrowKind::Shallow`
/// `AggregateKind::Generator` is gone for good. ///
GeneratorLowering = 4, /// And the following variant is allowed:
Optimization = 5, /// * [`StatementKind::Retag`]
///
/// Furthermore, `Drop` now uses explicit drop flags visible in the MIR and reaching a `Drop`
/// terminator means that the auto-generated drop glue will be invoked.
DropsLowered = 3,
/// Beginning with this phase, the following variant is disallowed:
/// * [`Rvalue::Aggregate`] for any `AggregateKind` except `Array`
///
/// And the following variant is allowed:
/// * [`StatementKind::SetDiscriminant`]
Deaggregated = 4,
/// Beginning with this phase, the following variants are disallowed:
/// * [`TerminatorKind::Yield`](terminator::TerminatorKind::Yield)
/// * [`TerminatorKind::GeneratorDrop](terminator::TerminatorKind::GeneratorDrop)
GeneratorsLowered = 5,
Optimized = 6,
} }
impl MirPhase { impl MirPhase {
@ -311,7 +323,7 @@ impl<'tcx> Body<'tcx> {
); );
let mut body = Body { let mut body = Body {
phase: MirPhase::Build, phase: MirPhase::Built,
source, source,
basic_blocks, basic_blocks,
source_scopes, source_scopes,
@ -346,7 +358,7 @@ impl<'tcx> Body<'tcx> {
/// crate. /// crate.
pub fn new_cfg_only(basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>) -> Self { pub fn new_cfg_only(basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>) -> Self {
let mut body = Body { let mut body = Body {
phase: MirPhase::Build, phase: MirPhase::Built,
source: MirSource::item(DefId::local(CRATE_DEF_INDEX)), source: MirSource::item(DefId::local(CRATE_DEF_INDEX)),
basic_blocks, basic_blocks,
source_scopes: IndexVec::new(), source_scopes: IndexVec::new(),
@ -1541,9 +1553,16 @@ impl Statement<'_> {
} }
} }
/// The various kinds of statements that can appear in MIR.
///
/// Not all of these are allowed at every [`MirPhase`]. Check the documentation there to see which
/// ones you do not have to worry about. The MIR validator will generally enforce such restrictions,
/// causing an ICE if they are violated.
#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, Hash, HashStable, TypeFoldable)] #[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, Hash, HashStable, TypeFoldable)]
pub enum StatementKind<'tcx> { pub enum StatementKind<'tcx> {
/// Write the RHS Rvalue to the LHS Place. /// Write the RHS Rvalue to the LHS Place.
///
/// The LHS place may not overlap with any memory accessed on the RHS.
Assign(Box<(Place<'tcx>, Rvalue<'tcx>)>), Assign(Box<(Place<'tcx>, Rvalue<'tcx>)>),
/// This represents all the reading that a pattern match may do /// This represents all the reading that a pattern match may do
@ -1761,6 +1780,19 @@ static_assert_size!(Place<'_>, 16);
pub enum ProjectionElem<V, T> { pub enum ProjectionElem<V, T> {
Deref, Deref,
Field(Field, T), Field(Field, T),
/// Index into a slice/array.
///
/// Note that this does not also dereference, and so it does not exactly correspond to slice
/// indexing in Rust. In other words, in the below Rust code:
///
/// ```rust
/// let x = &[1, 2, 3, 4];
/// let i = 2;
/// x[i];
/// ```
///
/// The `x[i]` is turned into a `Deref` followed by an `Index`, not just an `Index`. The same
/// thing is true of the `ConstantIndex` and `Subslice` projections below.
Index(V), Index(V),
/// These indices are generated by slice patterns. Easiest to explain /// These indices are generated by slice patterns. Easiest to explain
@ -2223,6 +2255,11 @@ impl<'tcx> Operand<'tcx> {
/// Rvalues /// Rvalues
#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)] #[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)]
/// The various kinds of rvalues that can appear in MIR.
///
/// Not all of these are allowed at every [`MirPhase`]. Check the documentation there to see which
/// ones you do not have to worry about. The MIR validator will generally enforce such restrictions,
/// causing an ICE if they are violated.
pub enum Rvalue<'tcx> { pub enum Rvalue<'tcx> {
/// x (either a move or copy, depending on type of x) /// x (either a move or copy, depending on type of x)
Use(Operand<'tcx>), Use(Operand<'tcx>),

View file

@ -331,12 +331,12 @@ rustc_queries! {
} }
} }
query try_unify_abstract_consts(key: ( query try_unify_abstract_consts(key:
ty::Unevaluated<'tcx, ()>, ty::Unevaluated<'tcx, ()> ty::ParamEnvAnd<'tcx, (ty::Unevaluated<'tcx, ()>, ty::Unevaluated<'tcx, ()>
)) -> bool { )>) -> bool {
desc { desc {
|tcx| "trying to unify the generic constants {} and {}", |tcx| "trying to unify the generic constants {} and {}",
tcx.def_path_str(key.0.def.did), tcx.def_path_str(key.1.def.did) tcx.def_path_str(key.value.0.def.did), tcx.def_path_str(key.value.1.def.did)
} }
} }

View file

@ -491,7 +491,7 @@ pub enum SelectionError<'tcx> {
/// A given constant couldn't be evaluated. /// A given constant couldn't be evaluated.
NotConstEvaluatable(NotConstEvaluatable), NotConstEvaluatable(NotConstEvaluatable),
/// Exceeded the recursion depth during type projection. /// Exceeded the recursion depth during type projection.
Overflow, Overflow(OverflowError),
/// Signaling that an error has already been emitted, to avoid /// Signaling that an error has already been emitted, to avoid
/// multiple errors being shown. /// multiple errors being shown.
ErrorReporting, ErrorReporting,

View file

@ -5,6 +5,7 @@
use self::EvaluationResult::*; use self::EvaluationResult::*;
use super::{SelectionError, SelectionResult}; use super::{SelectionError, SelectionResult};
use rustc_errors::ErrorGuaranteed;
use crate::ty; use crate::ty;
@ -264,14 +265,26 @@ impl EvaluationResult {
/// Indicates that trait evaluation caused overflow and in which pass. /// Indicates that trait evaluation caused overflow and in which pass.
#[derive(Copy, Clone, Debug, PartialEq, Eq, HashStable)] #[derive(Copy, Clone, Debug, PartialEq, Eq, HashStable)]
pub enum OverflowError { pub enum OverflowError {
Error(ErrorGuaranteed),
Canonical, Canonical,
ErrorReporting, ErrorReporting,
} }
impl From<ErrorGuaranteed> for OverflowError {
fn from(e: ErrorGuaranteed) -> OverflowError {
OverflowError::Error(e)
}
}
TrivialTypeFoldableAndLiftImpls! {
OverflowError,
}
impl<'tcx> From<OverflowError> for SelectionError<'tcx> { impl<'tcx> From<OverflowError> for SelectionError<'tcx> {
fn from(overflow_error: OverflowError) -> SelectionError<'tcx> { fn from(overflow_error: OverflowError) -> SelectionError<'tcx> {
match overflow_error { match overflow_error {
OverflowError::Canonical => SelectionError::Overflow, OverflowError::Error(e) => SelectionError::Overflow(OverflowError::Error(e)),
OverflowError::Canonical => SelectionError::Overflow(OverflowError::Canonical),
OverflowError::ErrorReporting => SelectionError::ErrorReporting, OverflowError::ErrorReporting => SelectionError::ErrorReporting,
} }
} }

View file

@ -585,7 +585,7 @@ pub fn super_relate_consts<'tcx, R: TypeRelation<'tcx>>(
(ty::ConstKind::Unevaluated(au), ty::ConstKind::Unevaluated(bu)) (ty::ConstKind::Unevaluated(au), ty::ConstKind::Unevaluated(bu))
if tcx.features().generic_const_exprs => if tcx.features().generic_const_exprs =>
{ {
tcx.try_unify_abstract_consts((au.shrink(), bu.shrink())) tcx.try_unify_abstract_consts(relation.param_env().and((au.shrink(), bu.shrink())))
} }
// While this is slightly incorrect, it shouldn't matter for `min_const_generics` // While this is slightly incorrect, it shouldn't matter for `min_const_generics`

View file

@ -6,6 +6,10 @@ use rustc_middle::ty::TyCtxt;
pub struct Deaggregator; pub struct Deaggregator;
impl<'tcx> MirPass<'tcx> for Deaggregator { impl<'tcx> MirPass<'tcx> for Deaggregator {
fn phase_change(&self) -> Option<MirPhase> {
Some(MirPhase::Deaggregated)
}
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
let (basic_blocks, local_decls) = body.basic_blocks_and_local_decls_mut(); let (basic_blocks, local_decls) = body.basic_blocks_and_local_decls_mut();
let local_decls = &*local_decls; let local_decls = &*local_decls;

View file

@ -20,7 +20,7 @@ pub struct ElaborateDrops;
impl<'tcx> MirPass<'tcx> for ElaborateDrops { impl<'tcx> MirPass<'tcx> for ElaborateDrops {
fn phase_change(&self) -> Option<MirPhase> { fn phase_change(&self) -> Option<MirPhase> {
Some(MirPhase::DropLowering) Some(MirPhase::DropsLowered)
} }
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {

View file

@ -1235,7 +1235,7 @@ fn create_cases<'tcx>(
impl<'tcx> MirPass<'tcx> for StateTransform { impl<'tcx> MirPass<'tcx> for StateTransform {
fn phase_change(&self) -> Option<MirPhase> { fn phase_change(&self) -> Option<MirPhase> {
Some(MirPhase::GeneratorLowering) Some(MirPhase::GeneratorsLowered)
} }
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {

View file

@ -342,7 +342,7 @@ fn inner_mir_for_ctfe(tcx: TyCtxt<'_>, def: ty::WithOptConstParam<LocalDefId>) -
pm::run_passes( pm::run_passes(
tcx, tcx,
&mut body, &mut body,
&[&const_prop::ConstProp, &marker::PhaseChange(MirPhase::Optimization)], &[&const_prop::ConstProp, &marker::PhaseChange(MirPhase::Optimized)],
); );
} }
} }
@ -399,7 +399,7 @@ fn mir_drops_elaborated_and_const_checked<'tcx>(
} }
run_post_borrowck_cleanup_passes(tcx, &mut body); run_post_borrowck_cleanup_passes(tcx, &mut body);
assert!(body.phase == MirPhase::DropLowering); assert!(body.phase == MirPhase::Deaggregated);
tcx.alloc_steal_mir(body) tcx.alloc_steal_mir(body)
} }
@ -460,7 +460,7 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
], ],
); );
assert!(body.phase == MirPhase::GeneratorLowering); assert!(body.phase == MirPhase::GeneratorsLowered);
// The main optimizations that we do on MIR. // The main optimizations that we do on MIR.
pm::run_passes( pm::run_passes(
@ -497,7 +497,7 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
&deduplicate_blocks::DeduplicateBlocks, &deduplicate_blocks::DeduplicateBlocks,
// Some cleanup necessary at least for LLVM and potentially other codegen backends. // Some cleanup necessary at least for LLVM and potentially other codegen backends.
&add_call_guards::CriticalCallEdges, &add_call_guards::CriticalCallEdges,
&marker::PhaseChange(MirPhase::Optimization), &marker::PhaseChange(MirPhase::Optimized),
// Dump the end result for testing and debugging purposes. // Dump the end result for testing and debugging purposes.
&dump_mir::Marker("PreCodegen"), &dump_mir::Marker("PreCodegen"),
], ],

View file

@ -114,7 +114,7 @@ pub fn run_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, passes: &[&dyn
} }
} }
if validate || body.phase == MirPhase::Optimization { if validate || body.phase == MirPhase::Optimized {
validate_body(tcx, body, format!("end of phase transition to {:?}", body.phase)); validate_body(tcx, body, format!("end of phase transition to {:?}", body.phase));
} }
} }

View file

@ -1347,6 +1347,10 @@ symbols! {
store, store,
str, str,
str_alloc, str_alloc,
str_split_whitespace,
str_trim,
str_trim_end,
str_trim_start,
stringify, stringify,
stringify_macro, stringify_macro,
struct_field_attributes, struct_field_attributes,

View file

@ -188,6 +188,7 @@ pub fn is_const_evaluatable<'cx, 'tcx>(
} }
} }
#[instrument(skip(tcx), level = "debug")]
fn satisfied_from_param_env<'tcx>( fn satisfied_from_param_env<'tcx>(
tcx: TyCtxt<'tcx>, tcx: TyCtxt<'tcx>,
ct: AbstractConst<'tcx>, ct: AbstractConst<'tcx>,
@ -197,14 +198,17 @@ fn satisfied_from_param_env<'tcx>(
match pred.kind().skip_binder() { match pred.kind().skip_binder() {
ty::PredicateKind::ConstEvaluatable(uv) => { ty::PredicateKind::ConstEvaluatable(uv) => {
if let Some(b_ct) = AbstractConst::new(tcx, uv)? { if let Some(b_ct) = AbstractConst::new(tcx, uv)? {
let const_unify_ctxt = ConstUnifyCtxt { tcx, param_env };
// Try to unify with each subtree in the AbstractConst to allow for // Try to unify with each subtree in the AbstractConst to allow for
// `N + 1` being const evaluatable even if theres only a `ConstEvaluatable` // `N + 1` being const evaluatable even if theres only a `ConstEvaluatable`
// predicate for `(N + 1) * 2` // predicate for `(N + 1) * 2`
let result = let result = walk_abstract_const(tcx, b_ct, |b_ct| {
walk_abstract_const(tcx, b_ct, |b_ct| match try_unify(tcx, ct, b_ct) { match const_unify_ctxt.try_unify(ct, b_ct) {
true => ControlFlow::BREAK, true => ControlFlow::BREAK,
false => ControlFlow::CONTINUE, false => ControlFlow::CONTINUE,
}); }
});
if let ControlFlow::Break(()) = result { if let ControlFlow::Break(()) = result {
debug!("is_const_evaluatable: abstract_const ~~> ok"); debug!("is_const_evaluatable: abstract_const ~~> ok");
@ -637,11 +641,13 @@ pub(super) fn thir_abstract_const<'tcx>(
pub(super) fn try_unify_abstract_consts<'tcx>( pub(super) fn try_unify_abstract_consts<'tcx>(
tcx: TyCtxt<'tcx>, tcx: TyCtxt<'tcx>,
(a, b): (ty::Unevaluated<'tcx, ()>, ty::Unevaluated<'tcx, ()>), (a, b): (ty::Unevaluated<'tcx, ()>, ty::Unevaluated<'tcx, ()>),
param_env: ty::ParamEnv<'tcx>,
) -> bool { ) -> bool {
(|| { (|| {
if let Some(a) = AbstractConst::new(tcx, a)? { if let Some(a) = AbstractConst::new(tcx, a)? {
if let Some(b) = AbstractConst::new(tcx, b)? { if let Some(b) = AbstractConst::new(tcx, b)? {
return Ok(try_unify(tcx, a, b)); let const_unify_ctxt = ConstUnifyCtxt { tcx, param_env };
return Ok(const_unify_ctxt.try_unify(a, b));
} }
} }
@ -689,88 +695,115 @@ where
recurse(tcx, ct, &mut f) recurse(tcx, ct, &mut f)
} }
/// Tries to unify two abstract constants using structural equality. struct ConstUnifyCtxt<'tcx> {
pub(super) fn try_unify<'tcx>(
tcx: TyCtxt<'tcx>, tcx: TyCtxt<'tcx>,
mut a: AbstractConst<'tcx>, param_env: ty::ParamEnv<'tcx>,
mut b: AbstractConst<'tcx>, }
) -> bool {
// We substitute generics repeatedly to allow AbstractConsts to unify where a impl<'tcx> ConstUnifyCtxt<'tcx> {
// Substitutes generics repeatedly to allow AbstractConsts to unify where a
// ConstKind::Unevalated could be turned into an AbstractConst that would unify e.g. // ConstKind::Unevalated could be turned into an AbstractConst that would unify e.g.
// Param(N) should unify with Param(T), substs: [Unevaluated("T2", [Unevaluated("T3", [Param(N)])])] // Param(N) should unify with Param(T), substs: [Unevaluated("T2", [Unevaluated("T3", [Param(N)])])]
while let Node::Leaf(a_ct) = a.root(tcx) { #[inline]
match AbstractConst::from_const(tcx, a_ct) { #[instrument(skip(self), level = "debug")]
Ok(Some(a_act)) => a = a_act, fn try_replace_substs_in_root(
Ok(None) => break, &self,
Err(_) => return true, mut abstr_const: AbstractConst<'tcx>,
} ) -> Option<AbstractConst<'tcx>> {
} while let Node::Leaf(ct) = abstr_const.root(self.tcx) {
while let Node::Leaf(b_ct) = b.root(tcx) { match AbstractConst::from_const(self.tcx, ct) {
match AbstractConst::from_const(tcx, b_ct) { Ok(Some(act)) => abstr_const = act,
Ok(Some(b_act)) => b = b_act, Ok(None) => break,
Ok(None) => break, Err(_) => return None,
Err(_) => return true,
}
}
match (a.root(tcx), b.root(tcx)) {
(Node::Leaf(a_ct), Node::Leaf(b_ct)) => {
if a_ct.ty() != b_ct.ty() {
return false;
}
match (a_ct.val(), b_ct.val()) {
// We can just unify errors with everything to reduce the amount of
// emitted errors here.
(ty::ConstKind::Error(_), _) | (_, ty::ConstKind::Error(_)) => true,
(ty::ConstKind::Param(a_param), ty::ConstKind::Param(b_param)) => {
a_param == b_param
}
(ty::ConstKind::Value(a_val), ty::ConstKind::Value(b_val)) => a_val == b_val,
// If we have `fn a<const N: usize>() -> [u8; N + 1]` and `fn b<const M: usize>() -> [u8; 1 + M]`
// we do not want to use `assert_eq!(a(), b())` to infer that `N` and `M` have to be `1`. This
// means that we only allow inference variables if they are equal.
(ty::ConstKind::Infer(a_val), ty::ConstKind::Infer(b_val)) => a_val == b_val,
// We expand generic anonymous constants at the start of this function, so this
// branch should only be taking when dealing with associated constants, at
// which point directly comparing them seems like the desired behavior.
//
// FIXME(generic_const_exprs): This isn't actually the case.
// We also take this branch for concrete anonymous constants and
// expand generic anonymous constants with concrete substs.
(ty::ConstKind::Unevaluated(a_uv), ty::ConstKind::Unevaluated(b_uv)) => {
a_uv == b_uv
}
// FIXME(generic_const_exprs): We may want to either actually try
// to evaluate `a_ct` and `b_ct` if they are are fully concrete or something like
// this, for now we just return false here.
_ => false,
} }
} }
(Node::Binop(a_op, al, ar), Node::Binop(b_op, bl, br)) if a_op == b_op => {
try_unify(tcx, a.subtree(al), b.subtree(bl)) Some(abstr_const)
&& try_unify(tcx, a.subtree(ar), b.subtree(br)) }
/// Tries to unify two abstract constants using structural equality.
#[instrument(skip(self), level = "debug")]
fn try_unify(&self, a: AbstractConst<'tcx>, b: AbstractConst<'tcx>) -> bool {
let a = if let Some(a) = self.try_replace_substs_in_root(a) {
a
} else {
return true;
};
let b = if let Some(b) = self.try_replace_substs_in_root(b) {
b
} else {
return true;
};
let a_root = a.root(self.tcx);
let b_root = b.root(self.tcx);
debug!(?a_root, ?b_root);
match (a_root, b_root) {
(Node::Leaf(a_ct), Node::Leaf(b_ct)) => {
let a_ct = a_ct.eval(self.tcx, self.param_env);
debug!("a_ct evaluated: {:?}", a_ct);
let b_ct = b_ct.eval(self.tcx, self.param_env);
debug!("b_ct evaluated: {:?}", b_ct);
if a_ct.ty() != b_ct.ty() {
return false;
}
match (a_ct.val(), b_ct.val()) {
// We can just unify errors with everything to reduce the amount of
// emitted errors here.
(ty::ConstKind::Error(_), _) | (_, ty::ConstKind::Error(_)) => true,
(ty::ConstKind::Param(a_param), ty::ConstKind::Param(b_param)) => {
a_param == b_param
}
(ty::ConstKind::Value(a_val), ty::ConstKind::Value(b_val)) => a_val == b_val,
// If we have `fn a<const N: usize>() -> [u8; N + 1]` and `fn b<const M: usize>() -> [u8; 1 + M]`
// we do not want to use `assert_eq!(a(), b())` to infer that `N` and `M` have to be `1`. This
// means that we only allow inference variables if they are equal.
(ty::ConstKind::Infer(a_val), ty::ConstKind::Infer(b_val)) => a_val == b_val,
// We expand generic anonymous constants at the start of this function, so this
// branch should only be taking when dealing with associated constants, at
// which point directly comparing them seems like the desired behavior.
//
// FIXME(generic_const_exprs): This isn't actually the case.
// We also take this branch for concrete anonymous constants and
// expand generic anonymous constants with concrete substs.
(ty::ConstKind::Unevaluated(a_uv), ty::ConstKind::Unevaluated(b_uv)) => {
a_uv == b_uv
}
// FIXME(generic_const_exprs): We may want to either actually try
// to evaluate `a_ct` and `b_ct` if they are are fully concrete or something like
// this, for now we just return false here.
_ => false,
}
}
(Node::Binop(a_op, al, ar), Node::Binop(b_op, bl, br)) if a_op == b_op => {
self.try_unify(a.subtree(al), b.subtree(bl))
&& self.try_unify(a.subtree(ar), b.subtree(br))
}
(Node::UnaryOp(a_op, av), Node::UnaryOp(b_op, bv)) if a_op == b_op => {
self.try_unify(a.subtree(av), b.subtree(bv))
}
(Node::FunctionCall(a_f, a_args), Node::FunctionCall(b_f, b_args))
if a_args.len() == b_args.len() =>
{
self.try_unify(a.subtree(a_f), b.subtree(b_f))
&& iter::zip(a_args, b_args)
.all(|(&an, &bn)| self.try_unify(a.subtree(an), b.subtree(bn)))
}
(Node::Cast(a_kind, a_operand, a_ty), Node::Cast(b_kind, b_operand, b_ty))
if (a_ty == b_ty) && (a_kind == b_kind) =>
{
self.try_unify(a.subtree(a_operand), b.subtree(b_operand))
}
// use this over `_ => false` to make adding variants to `Node` less error prone
(Node::Cast(..), _)
| (Node::FunctionCall(..), _)
| (Node::UnaryOp(..), _)
| (Node::Binop(..), _)
| (Node::Leaf(..), _) => false,
} }
(Node::UnaryOp(a_op, av), Node::UnaryOp(b_op, bv)) if a_op == b_op => {
try_unify(tcx, a.subtree(av), b.subtree(bv))
}
(Node::FunctionCall(a_f, a_args), Node::FunctionCall(b_f, b_args))
if a_args.len() == b_args.len() =>
{
try_unify(tcx, a.subtree(a_f), b.subtree(b_f))
&& iter::zip(a_args, b_args)
.all(|(&an, &bn)| try_unify(tcx, a.subtree(an), b.subtree(bn)))
}
(Node::Cast(a_kind, a_operand, a_ty), Node::Cast(b_kind, b_operand, b_ty))
if (a_ty == b_ty) && (a_kind == b_kind) =>
{
try_unify(tcx, a.subtree(a_operand), b.subtree(b_operand))
}
// use this over `_ => false` to make adding variants to `Node` less error prone
(Node::Cast(..), _)
| (Node::FunctionCall(..), _)
| (Node::UnaryOp(..), _)
| (Node::Binop(..), _)
| (Node::Leaf(..), _) => false,
} }
} }

View file

@ -22,6 +22,7 @@ use rustc_hir::GenericParam;
use rustc_hir::Item; use rustc_hir::Item;
use rustc_hir::Node; use rustc_hir::Node;
use rustc_middle::thir::abstract_const::NotConstEvaluatable; use rustc_middle::thir::abstract_const::NotConstEvaluatable;
use rustc_middle::traits::select::OverflowError;
use rustc_middle::ty::error::ExpectedFound; use rustc_middle::ty::error::ExpectedFound;
use rustc_middle::ty::fold::TypeFolder; use rustc_middle::ty::fold::TypeFolder;
use rustc_middle::ty::{ use rustc_middle::ty::{
@ -928,8 +929,12 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
self.tcx.sess.delay_span_bug(span, "`ErrorGuaranteed` without an error"); self.tcx.sess.delay_span_bug(span, "`ErrorGuaranteed` without an error");
return; return;
} }
// Already reported.
Overflow => { Overflow(OverflowError::Error(_)) => {
self.tcx.sess.delay_span_bug(span, "`OverflowError` has been reported");
return;
}
Overflow(_) => {
bug!("overflow should be handled before the `report_selection_error` path"); bug!("overflow should be handled before the `report_selection_error` path");
} }
SelectionError::ErrorReporting => { SelectionError::ErrorReporting => {

View file

@ -580,7 +580,11 @@ impl<'a, 'b, 'tcx> FulfillProcessor<'a, 'b, 'tcx> {
if let (ty::ConstKind::Unevaluated(a), ty::ConstKind::Unevaluated(b)) = if let (ty::ConstKind::Unevaluated(a), ty::ConstKind::Unevaluated(b)) =
(c1.val(), c2.val()) (c1.val(), c2.val())
{ {
if infcx.try_unify_abstract_consts(a.shrink(), b.shrink()) { if infcx.try_unify_abstract_consts(
a.shrink(),
b.shrink(),
obligation.param_env,
) {
return ProcessResult::Changed(vec![]); return ProcessResult::Changed(vec![]);
} }
} }

View file

@ -862,7 +862,10 @@ pub fn provide(providers: &mut ty::query::Providers) {
ty::WithOptConstParam { did, const_param_did: Some(param_did) }, ty::WithOptConstParam { did, const_param_did: Some(param_did) },
) )
}, },
try_unify_abstract_consts: const_evaluatable::try_unify_abstract_consts, try_unify_abstract_consts: |tcx, param_env_and| {
let (param_env, (a, b)) = param_env_and.into_parts();
const_evaluatable::try_unify_abstract_consts(tcx, (a, b), param_env)
},
..*providers ..*providers
}; };
} }

View file

@ -27,6 +27,7 @@ use rustc_hir::def::DefKind;
use rustc_hir::def_id::DefId; use rustc_hir::def_id::DefId;
use rustc_hir::lang_items::LangItem; use rustc_hir::lang_items::LangItem;
use rustc_infer::infer::resolve::OpportunisticRegionResolver; use rustc_infer::infer::resolve::OpportunisticRegionResolver;
use rustc_middle::traits::select::OverflowError;
use rustc_middle::ty::fold::{TypeFoldable, TypeFolder}; use rustc_middle::ty::fold::{TypeFoldable, TypeFolder};
use rustc_middle::ty::subst::Subst; use rustc_middle::ty::subst::Subst;
use rustc_middle::ty::{self, Term, ToPredicate, Ty, TyCtxt}; use rustc_middle::ty::{self, Term, ToPredicate, Ty, TyCtxt};
@ -1139,7 +1140,9 @@ fn project<'cx, 'tcx>(
if !selcx.tcx().recursion_limit().value_within_limit(obligation.recursion_depth) { if !selcx.tcx().recursion_limit().value_within_limit(obligation.recursion_depth) {
// This should really be an immediate error, but some existing code // This should really be an immediate error, but some existing code
// relies on being able to recover from this. // relies on being able to recover from this.
return Err(ProjectionError::TraitSelectionError(SelectionError::Overflow)); return Err(ProjectionError::TraitSelectionError(SelectionError::Overflow(
OverflowError::Canonical,
)));
} }
if obligation.predicate.references_error() { if obligation.predicate.references_error() {

View file

@ -108,9 +108,11 @@ impl<'cx, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'cx, 'tcx> {
) )
} }
OverflowError::ErrorReporting => EvaluationResult::EvaluatedToErr, OverflowError::ErrorReporting => EvaluationResult::EvaluatedToErr,
OverflowError::Error(_) => EvaluationResult::EvaluatedToErr,
}) })
} }
Err(OverflowError::ErrorReporting) => EvaluationResult::EvaluatedToErr, Err(OverflowError::ErrorReporting) => EvaluationResult::EvaluatedToErr,
Err(OverflowError::Error(_)) => EvaluationResult::EvaluatedToErr,
} }
} }
} }

View file

@ -164,8 +164,9 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
Ok(Some(EvaluatedCandidate { candidate: c, evaluation: eval })) Ok(Some(EvaluatedCandidate { candidate: c, evaluation: eval }))
} }
Ok(_) => Ok(None), Ok(_) => Ok(None),
Err(OverflowError::Canonical) => Err(Overflow), Err(OverflowError::Canonical) => Err(Overflow(OverflowError::Canonical)),
Err(OverflowError::ErrorReporting) => Err(ErrorReporting), Err(OverflowError::ErrorReporting) => Err(ErrorReporting),
Err(OverflowError::Error(e)) => Err(Overflow(OverflowError::Error(e))),
}) })
.flat_map(Result::transpose) .flat_map(Result::transpose)
.collect::<Result<Vec<_>, _>>()?; .collect::<Result<Vec<_>, _>>()?;

View file

@ -25,7 +25,7 @@ use crate::traits::project::ProjectionCacheKeyExt;
use crate::traits::ProjectionCacheKey; use crate::traits::ProjectionCacheKey;
use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::stack::ensure_sufficient_stack; use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_errors::Diagnostic; use rustc_errors::{Diagnostic, ErrorGuaranteed};
use rustc_hir as hir; use rustc_hir as hir;
use rustc_hir::def_id::DefId; use rustc_hir::def_id::DefId;
use rustc_infer::infer::LateBoundRegionConversionTime; use rustc_infer::infer::LateBoundRegionConversionTime;
@ -316,11 +316,11 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
obligation: &TraitObligation<'tcx>, obligation: &TraitObligation<'tcx>,
) -> SelectionResult<'tcx, Selection<'tcx>> { ) -> SelectionResult<'tcx, Selection<'tcx>> {
let candidate = match self.select_from_obligation(obligation) { let candidate = match self.select_from_obligation(obligation) {
Err(SelectionError::Overflow) => { Err(SelectionError::Overflow(OverflowError::Canonical)) => {
// In standard mode, overflow must have been caught and reported // In standard mode, overflow must have been caught and reported
// earlier. // earlier.
assert!(self.query_mode == TraitQueryMode::Canonical); assert!(self.query_mode == TraitQueryMode::Canonical);
return Err(SelectionError::Overflow); return Err(SelectionError::Overflow(OverflowError::Canonical));
} }
Err(SelectionError::Ambiguous(_)) => { Err(SelectionError::Ambiguous(_)) => {
return Ok(None); return Ok(None);
@ -335,9 +335,9 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
}; };
match self.confirm_candidate(obligation, candidate) { match self.confirm_candidate(obligation, candidate) {
Err(SelectionError::Overflow) => { Err(SelectionError::Overflow(OverflowError::Canonical)) => {
assert!(self.query_mode == TraitQueryMode::Canonical); assert!(self.query_mode == TraitQueryMode::Canonical);
Err(SelectionError::Overflow) Err(SelectionError::Overflow(OverflowError::Canonical))
} }
Err(e) => Err(e), Err(e) => Err(e),
Ok(candidate) => { Ok(candidate) => {
@ -639,7 +639,11 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
if let (ty::ConstKind::Unevaluated(a), ty::ConstKind::Unevaluated(b)) = if let (ty::ConstKind::Unevaluated(a), ty::ConstKind::Unevaluated(b)) =
(c1.val(), c2.val()) (c1.val(), c2.val())
{ {
if self.infcx.try_unify_abstract_consts(a.shrink(), b.shrink()) { if self.infcx.try_unify_abstract_consts(
a.shrink(),
b.shrink(),
obligation.param_env,
) {
return Ok(EvaluatedToOk); return Ok(EvaluatedToOk);
} }
} }
@ -954,7 +958,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
Ok(Some(c)) => self.evaluate_candidate(stack, &c), Ok(Some(c)) => self.evaluate_candidate(stack, &c),
Err(SelectionError::Ambiguous(_)) => Ok(EvaluatedToAmbig), Err(SelectionError::Ambiguous(_)) => Ok(EvaluatedToAmbig),
Ok(None) => Ok(EvaluatedToAmbig), Ok(None) => Ok(EvaluatedToAmbig),
Err(Overflow) => Err(OverflowError::Canonical), Err(Overflow(OverflowError::Canonical)) => Err(OverflowError::Canonical),
Err(ErrorReporting) => Err(OverflowError::ErrorReporting), Err(ErrorReporting) => Err(OverflowError::ErrorReporting),
Err(..) => Ok(EvaluatedToErr), Err(..) => Ok(EvaluatedToErr),
} }
@ -1113,7 +1117,9 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
match self.query_mode { match self.query_mode {
TraitQueryMode::Standard => { TraitQueryMode::Standard => {
if self.infcx.is_tainted_by_errors() { if self.infcx.is_tainted_by_errors() {
return Err(OverflowError::ErrorReporting); return Err(OverflowError::Error(
ErrorGuaranteed::unchecked_claim_error_was_emitted(),
));
} }
self.infcx.report_overflow_error(error_obligation, true); self.infcx.report_overflow_error(error_obligation, true);
} }
@ -1349,7 +1355,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
} }
if self.can_use_global_caches(param_env) { if self.can_use_global_caches(param_env) {
if let Err(Overflow) = candidate { if let Err(Overflow(OverflowError::Canonical)) = candidate {
// Don't cache overflow globally; we only produce this in certain modes. // Don't cache overflow globally; we only produce this in certain modes.
} else if !pred.needs_infer() { } else if !pred.needs_infer() {
if !candidate.needs_infer() { if !candidate.needs_infer() {

View file

@ -243,7 +243,7 @@ fn ensure_drop_predicates_are_implied_by_item_defn<'tcx>(
( (
ty::PredicateKind::ConstEvaluatable(a), ty::PredicateKind::ConstEvaluatable(a),
ty::PredicateKind::ConstEvaluatable(b), ty::PredicateKind::ConstEvaluatable(b),
) => tcx.try_unify_abstract_consts((a, b)), ) => tcx.try_unify_abstract_consts(self_param_env.and((a, b))),
( (
ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(ty_a, lt_a)), ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(ty_a, lt_a)),
ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(ty_b, lt_b)), ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(ty_b, lt_b)),

View file

@ -904,6 +904,7 @@ impl str {
#[must_use = "this returns the split string as an iterator, \ #[must_use = "this returns the split string as an iterator, \
without modifying the original"] without modifying the original"]
#[stable(feature = "split_whitespace", since = "1.1.0")] #[stable(feature = "split_whitespace", since = "1.1.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "str_split_whitespace")]
#[inline] #[inline]
pub fn split_whitespace(&self) -> SplitWhitespace<'_> { pub fn split_whitespace(&self) -> SplitWhitespace<'_> {
SplitWhitespace { inner: self.split(IsWhitespace).filter(IsNotEmpty) } SplitWhitespace { inner: self.split(IsWhitespace).filter(IsNotEmpty) }
@ -1846,6 +1847,7 @@ impl str {
#[must_use = "this returns the trimmed string as a slice, \ #[must_use = "this returns the trimmed string as a slice, \
without modifying the original"] without modifying the original"]
#[stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "str_trim")]
pub fn trim(&self) -> &str { pub fn trim(&self) -> &str {
self.trim_matches(|c: char| c.is_whitespace()) self.trim_matches(|c: char| c.is_whitespace())
} }
@ -1884,6 +1886,7 @@ impl str {
#[must_use = "this returns the trimmed string as a new slice, \ #[must_use = "this returns the trimmed string as a new slice, \
without modifying the original"] without modifying the original"]
#[stable(feature = "trim_direction", since = "1.30.0")] #[stable(feature = "trim_direction", since = "1.30.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "str_trim_start")]
pub fn trim_start(&self) -> &str { pub fn trim_start(&self) -> &str {
self.trim_start_matches(|c: char| c.is_whitespace()) self.trim_start_matches(|c: char| c.is_whitespace())
} }
@ -1922,6 +1925,7 @@ impl str {
#[must_use = "this returns the trimmed string as a new slice, \ #[must_use = "this returns the trimmed string as a new slice, \
without modifying the original"] without modifying the original"]
#[stable(feature = "trim_direction", since = "1.30.0")] #[stable(feature = "trim_direction", since = "1.30.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "str_trim_end")]
pub fn trim_end(&self) -> &str { pub fn trim_end(&self) -> &str {
self.trim_end_matches(|c: char| c.is_whitespace()) self.trim_end_matches(|c: char| c.is_whitespace())
} }

View file

@ -16,13 +16,17 @@
// gdbg-check:$3 = {pointer = [...], vtable = [...]} // gdbg-check:$3 = {pointer = [...], vtable = [...]}
// gdbr-check:$3 = &unsized::Foo<dyn core::fmt::Debug> {pointer: [...], vtable: [...]} // gdbr-check:$3 = &unsized::Foo<dyn core::fmt::Debug> {pointer: [...], vtable: [...]}
// gdb-command:print _box
// gdbg-check:$4 = {pointer = [...], vtable = [...]}
// gdbr-check:$4 = alloc::boxed::Box<unsized::Foo<dyn core::fmt::Debug>, alloc::alloc::Global> {pointer: [...], vtable: [...]}
// gdb-command:print tuple_slice // gdb-command:print tuple_slice
// gdbg-check:$4 = {data_ptr = [...], length = 2} // gdbg-check:$5 = {data_ptr = [...], length = 2}
// gdbr-check:$4 = &(i32, i32, [i32]) {data_ptr: [...], length: 2} // gdbr-check:$5 = &(i32, i32, [i32]) {data_ptr: [...], length: 2}
// gdb-command:print tuple_dyn // gdb-command:print tuple_dyn
// gdbg-check:$5 = {pointer = [...], vtable = [...]} // gdbg-check:$6 = {pointer = [...], vtable = [...]}
// gdbr-check:$5 = &(i32, i32, dyn core::fmt::Debug) {pointer: [...], vtable: [...]} // gdbr-check:$6 = &(i32, i32, dyn core::fmt::Debug) {pointer: [...], vtable: [...]}
// === CDB TESTS =================================================================================== // === CDB TESTS ===================================================================================
@ -42,6 +46,12 @@
// cdb-check: [+0x000] pointer : 0x[...] [Type: unsized::Foo<dyn$<core::fmt::Debug> > *] // cdb-check: [+0x000] pointer : 0x[...] [Type: unsized::Foo<dyn$<core::fmt::Debug> > *]
// cdb-check: [...] vtable : 0x[...] [Type: unsigned [...]int[...] (*)[3]] // cdb-check: [...] vtable : 0x[...] [Type: unsigned [...]int[...] (*)[3]]
// cdb-command:dx _box
// cdb-check:
// cdb-check:_box [Type: alloc::boxed::Box<unsized::Foo<dyn$<core::fmt::Debug> >,alloc::alloc::Global>]
// cdb-check:[+0x000] pointer : 0x[...] [Type: unsized::Foo<dyn$<core::fmt::Debug> > *]
// cdb-check:[...] vtable : 0x[...] [Type: unsigned [...]int[...] (*)[3]]
// cdb-command:dx tuple_slice // cdb-command:dx tuple_slice
// cdb-check:tuple_slice [Type: ref$<tuple$<i32,i32,slice$<i32> > >] // cdb-check:tuple_slice [Type: ref$<tuple$<i32,i32,slice$<i32> > >]
// cdb-check: [+0x000] data_ptr : 0x[...] [Type: tuple$<i32,i32,slice$<i32> > *] // cdb-check: [+0x000] data_ptr : 0x[...] [Type: tuple$<i32,i32,slice$<i32> > *]
@ -69,6 +79,7 @@ fn main() {
let a: &Foo<[u8]> = &foo.value; let a: &Foo<[u8]> = &foo.value;
let b: &Foo<Foo<[u8]>> = &foo; let b: &Foo<Foo<[u8]>> = &foo;
let c: &Foo<dyn std::fmt::Debug> = &Foo { value: 7i32 }; let c: &Foo<dyn std::fmt::Debug> = &Foo { value: 7i32 };
let _box: Box<Foo<dyn std::fmt::Debug>> = Box::new(Foo { value: 8i32 });
// Also check unsized tuples // Also check unsized tuples
let tuple_slice: &(i32, i32, [i32]) = &(0, 1, [2, 3]); let tuple_slice: &(i32, i32, [i32]) = &(0, 1, [2, 3]);

View file

@ -0,0 +1,26 @@
// build-pass
#![feature(generic_const_exprs)]
//~^ WARNING the feature `generic_const_exprs` is incomplete
trait Generic {
const ASSOC: usize;
}
impl Generic for u8 {
const ASSOC: usize = 17;
}
impl Generic for u16 {
const ASSOC: usize = 13;
}
fn uses_assoc_type<T: Generic, const N: usize>() -> [u8; N + T::ASSOC] {
[0; N + T::ASSOC]
}
fn only_generic_n<const N: usize>() -> [u8; N + 13] {
uses_assoc_type::<u16, N>()
}
fn main() {}

View file

@ -0,0 +1,11 @@
warning: the feature `generic_const_exprs` is incomplete and may not be safe to use and/or cause compiler crashes
--> $DIR/eval-try-unify.rs:3:12
|
LL | #![feature(generic_const_exprs)]
| ^^^^^^^^^^^^^^^^^^^
|
= note: `#[warn(incomplete_features)]` on by default
= note: see issue #76560 <https://github.com/rust-lang/rust/issues/76560> for more information
warning: 1 warning emitted

View file

@ -2,114 +2,115 @@
#![allow(incomplete_features)] #![allow(incomplete_features)]
trait TensorDimension { trait TensorDimension {
const DIM : usize; const DIM: usize;
const ISSCALAR : bool = Self::DIM == 0; //~^ ERROR cycle detected when resolving instance
fn is_scalar(&self) -> bool {Self::ISSCALAR} // FIXME Given the current state of the compiler its expected that we cycle here,
} // but the cycle is still wrong.
const ISSCALAR: bool = Self::DIM == 0;
trait TensorSize : TensorDimension { fn is_scalar(&self) -> bool {
fn size(&self) -> [usize;Self::DIM]; Self::ISSCALAR
fn inbounds(&self,index : [usize;Self::DIM]) -> bool {
index.iter().zip(self.size().iter()).all(|(i,s)| i < s)
} }
} }
trait TensorSize: TensorDimension {
fn size(&self) -> [usize; Self::DIM];
fn inbounds(&self, index: [usize; Self::DIM]) -> bool {
index.iter().zip(self.size().iter()).all(|(i, s)| i < s)
}
}
trait Broadcastable: TensorSize + Sized { trait Broadcastable: TensorSize + Sized {
type Element; type Element;
fn bget(&self, index:[usize;Self::DIM]) -> Option<Self::Element>; fn bget(&self, index: [usize; Self::DIM]) -> Option<Self::Element>;
fn lazy_updim<const NEWDIM : usize>(&self, size : [usize;NEWDIM] ) -> fn lazy_updim<const NEWDIM: usize>(
LazyUpdim<Self,{Self::DIM},NEWDIM> &self,
{ size: [usize; NEWDIM],
assert!(NEWDIM >= Self::DIM, ) -> LazyUpdim<Self, { Self::DIM }, NEWDIM> {
"Updimmed tensor cannot have fewer indices than the initial one."); assert!(
LazyUpdim {size,reference:&self} NEWDIM >= Self::DIM,
"Updimmed tensor cannot have fewer indices than the initial one."
);
LazyUpdim { size, reference: &self }
} }
fn bmap<T,F :Fn(Self::Element) -> T>(&self,foo : F) -> BMap<T,Self,F,{Self::DIM}>{ fn bmap<T, F: Fn(Self::Element) -> T>(&self, foo: F) -> BMap<T, Self, F, { Self::DIM }> {
BMap {reference:self,closure : foo} BMap { reference: self, closure: foo }
} }
} }
struct LazyUpdim<'a, T: Broadcastable, const OLDDIM: usize, const DIM: usize> {
struct LazyUpdim<'a,T : Broadcastable,const OLDDIM : usize, const DIM : usize> { size: [usize; DIM],
size : [usize;DIM], reference: &'a T,
reference : &'a T
} }
impl<'a,T : Broadcastable,const DIM : usize> TensorDimension for LazyUpdim<'a,T,{T::DIM},DIM> { impl<'a, T: Broadcastable, const DIM: usize> TensorDimension for LazyUpdim<'a, T, { T::DIM }, DIM> {
const DIM : usize = DIM; const DIM: usize = DIM;
} }
impl<'a,T : Broadcastable,const DIM : usize> TensorSize for LazyUpdim<'a,T,{T::DIM},DIM> { impl<'a, T: Broadcastable, const DIM: usize> TensorSize for LazyUpdim<'a, T, { T::DIM }, DIM> {
fn size(&self) -> [usize;DIM] {self.size} fn size(&self) -> [usize; DIM] {
//~^ ERROR method not compatible with trait self.size
}
} }
impl<'a,T : Broadcastable,const DIM : usize> Broadcastable for LazyUpdim<'a,T,{T::DIM},DIM> impl<'a, T: Broadcastable, const DIM: usize> Broadcastable for LazyUpdim<'a, T, { T::DIM }, DIM> {
{
type Element = T::Element; type Element = T::Element;
fn bget(&self,index:[usize;DIM]) -> Option<Self::Element> { fn bget(&self, index: [usize; DIM]) -> Option<Self::Element> {
//~^ ERROR method not compatible with trait
assert!(DIM >= T::DIM); assert!(DIM >= T::DIM);
if !self.inbounds(index) {return None} if !self.inbounds(index) {
//~^ ERROR unconstrained generic constant return None;
//~| ERROR mismatched types }
let size = self.size(); let size = self.size();
//~^ ERROR unconstrained generic constant let newindex: [usize; T::DIM] = Default::default();
let newindex : [usize;T::DIM] = Default::default();
//~^ ERROR the trait bound `[usize; _]: Default` is not satisfied
self.reference.bget(newindex) self.reference.bget(newindex)
} }
} }
struct BMap<'a,R, T : Broadcastable, F : Fn(T::Element) -> R , const DIM: usize> { struct BMap<'a, R, T: Broadcastable, F: Fn(T::Element) -> R, const DIM: usize> {
reference : &'a T, reference: &'a T,
closure : F closure: F,
} }
impl<'a,R, T : Broadcastable, F : Fn(T::Element) -> R, impl<'a, R, T: Broadcastable, F: Fn(T::Element) -> R, const DIM: usize> TensorDimension
const DIM: usize> TensorDimension for BMap<'a,R,T,F,DIM> { for BMap<'a, R, T, F, DIM>
{
const DIM : usize = DIM; const DIM: usize = DIM;
} }
impl<'a,R, T : Broadcastable, F : Fn(T::Element) -> R , impl<'a, R, T: Broadcastable, F: Fn(T::Element) -> R, const DIM: usize> TensorSize
const DIM: usize> TensorSize for BMap<'a,R,T,F,DIM> { for BMap<'a, R, T, F, DIM>
{
fn size(&self) -> [usize;DIM] {self.reference.size()} fn size(&self) -> [usize; DIM] {
//~^ ERROR unconstrained generic constant self.reference.size()
//~| ERROR mismatched types }
//~| ERROR method not compatible with trait
} }
impl<'a,R, T : Broadcastable, F : Fn(T::Element) -> R , impl<'a, R, T: Broadcastable, F: Fn(T::Element) -> R, const DIM: usize> Broadcastable
const DIM: usize> Broadcastable for BMap<'a,R,T,F,DIM> { for BMap<'a, R, T, F, DIM>
{
type Element = R; type Element = R;
fn bget(&self,index:[usize;DIM]) -> Option<Self::Element> { fn bget(&self, index: [usize; DIM]) -> Option<Self::Element> {
//~^ ERROR method not compatible with trait
self.reference.bget(index).map(&self.closure) self.reference.bget(index).map(&self.closure)
//~^ ERROR unconstrained generic constant
//~| ERROR mismatched types
} }
} }
impl<T> TensorDimension for Vec<T> { impl<T> TensorDimension for Vec<T> {
const DIM : usize = 1; const DIM: usize = 1;
} }
impl<T> TensorSize for Vec<T> { impl<T> TensorSize for Vec<T> {
fn size(&self) -> [usize;1] {[self.len()]} fn size(&self) -> [usize; 1] {
[self.len()]
}
} }
impl<T: Clone> Broadcastable for Vec<T> { impl<T: Clone> Broadcastable for Vec<T> {
type Element = T; type Element = T;
fn bget(& self,index : [usize;1]) -> Option<T> { fn bget(&self, index: [usize; 1]) -> Option<T> {
self.get(index[0]).cloned() self.get(index[0]).cloned()
} }
} }
fn main() { fn main() {
let v = vec![1,2,3]; let v = vec![1, 2, 3];
let bv = v.lazy_updim([3,4]); let bv = v.lazy_updim([3, 4]);
let bbv = bv.bmap(|x| x*x); let bbv = bv.bmap(|x| x * x);
println!("The size of v is {:?}",bbv.bget([0,2]).expect("Out of bounds.")); println!("The size of v is {:?}", bbv.bget([0, 2]).expect("Out of bounds."));
} }

View file

@ -1,130 +1,17 @@
error[E0308]: method not compatible with trait error[E0391]: cycle detected when resolving instance `<LazyUpdim<T, { T::DIM }, DIM> as TensorDimension>::DIM`
--> $DIR/issue-83765.rs:44:5 --> $DIR/issue-83765.rs:5:5
| |
LL | fn size(&self) -> [usize;DIM] {self.size} LL | const DIM: usize;
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `Self::DIM`, found `DIM` | ^^^^^^^^^^^^^^^^^
| |
= note: expected type `Self::DIM` note: ...which requires checking if `TensorDimension` fulfills its obligations...
found type `DIM` --> $DIR/issue-83765.rs:4:1
|
LL | trait TensorDimension {
| ^^^^^^^^^^^^^^^^^^^^^
= note: ...which again requires resolving instance `<LazyUpdim<T, { T::DIM }, DIM> as TensorDimension>::DIM`, completing the cycle
= note: cycle used when normalizing `<LazyUpdim<T, { T::DIM }, DIM> as TensorDimension>::DIM`
error[E0308]: method not compatible with trait error: aborting due to previous error
--> $DIR/issue-83765.rs:51:5
|
LL | fn bget(&self,index:[usize;DIM]) -> Option<Self::Element> {
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `Self::DIM`, found `DIM`
|
= note: expected type `Self::DIM`
found type `DIM`
error[E0308]: method not compatible with trait For more information about this error, try `rustc --explain E0391`.
--> $DIR/issue-83765.rs:78:5
|
LL | fn size(&self) -> [usize;DIM] {self.reference.size()}
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `Self::DIM`, found `DIM`
|
= note: expected type `Self::DIM`
found type `DIM`
error[E0308]: method not compatible with trait
--> $DIR/issue-83765.rs:88:5
|
LL | fn bget(&self,index:[usize;DIM]) -> Option<Self::Element> {
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `Self::DIM`, found `DIM`
|
= note: expected type `Self::DIM`
found type `DIM`
error: unconstrained generic constant
--> $DIR/issue-83765.rs:54:18
|
LL | if !self.inbounds(index) {return None}
| ^^^^^^^^
|
= help: try adding a `where` bound using this expression: `where [(); Self::DIM]:`
note: required by a bound in `TensorSize::inbounds`
--> $DIR/issue-83765.rs:12:38
|
LL | fn inbounds(&self,index : [usize;Self::DIM]) -> bool {
| ^^^^^^^^^ required by this bound in `TensorSize::inbounds`
error[E0308]: mismatched types
--> $DIR/issue-83765.rs:54:27
|
LL | if !self.inbounds(index) {return None}
| ^^^^^ expected `Self::DIM`, found `DIM`
|
= note: expected type `Self::DIM`
found type `DIM`
error: unconstrained generic constant
--> $DIR/issue-83765.rs:57:25
|
LL | let size = self.size();
| ^^^^
|
= help: try adding a `where` bound using this expression: `where [(); Self::DIM]:`
note: required by a bound in `TensorSize::size`
--> $DIR/issue-83765.rs:11:30
|
LL | fn size(&self) -> [usize;Self::DIM];
| ^^^^^^^^^ required by this bound in `TensorSize::size`
error[E0277]: the trait bound `[usize; _]: Default` is not satisfied
--> $DIR/issue-83765.rs:59:41
|
LL | let newindex : [usize;T::DIM] = Default::default();
| ^^^^^^^^^^^^^^^^ the trait `Default` is not implemented for `[usize; _]`
|
help: consider introducing a `where` bound, but there might be an alternative better way to express this requirement
|
LL | impl<'a,T : Broadcastable,const DIM : usize> Broadcastable for LazyUpdim<'a,T,{T::DIM},DIM> where [usize; _]: Default
| +++++++++++++++++++++++++
error: unconstrained generic constant
--> $DIR/issue-83765.rs:78:51
|
LL | fn size(&self) -> [usize;DIM] {self.reference.size()}
| ^^^^
|
= help: try adding a `where` bound using this expression: `where [(); Self::DIM]:`
note: required by a bound in `TensorSize::size`
--> $DIR/issue-83765.rs:11:30
|
LL | fn size(&self) -> [usize;Self::DIM];
| ^^^^^^^^^ required by this bound in `TensorSize::size`
error[E0308]: mismatched types
--> $DIR/issue-83765.rs:78:36
|
LL | fn size(&self) -> [usize;DIM] {self.reference.size()}
| ^^^^^^^^^^^^^^^^^^^^^ expected `DIM`, found `Self::DIM`
|
= note: expected type `DIM`
found type `Self::DIM`
error: unconstrained generic constant
--> $DIR/issue-83765.rs:90:24
|
LL | self.reference.bget(index).map(&self.closure)
| ^^^^
|
= help: try adding a `where` bound using this expression: `where [(); Self::DIM]:`
note: required by a bound in `Broadcastable::bget`
--> $DIR/issue-83765.rs:20:33
|
LL | fn bget(&self, index:[usize;Self::DIM]) -> Option<Self::Element>;
| ^^^^^^^^^ required by this bound in `Broadcastable::bget`
error[E0308]: mismatched types
--> $DIR/issue-83765.rs:90:29
|
LL | self.reference.bget(index).map(&self.closure)
| ^^^^^ expected `Self::DIM`, found `DIM`
|
= note: expected type `Self::DIM`
found type `DIM`
error: aborting due to 12 previous errors
Some errors have detailed explanations: E0277, E0308.
For more information about an error, try `rustc --explain E0277`.

View file

@ -0,0 +1,17 @@
struct Wrapper<T>(T);
trait Trait {
fn method(&self) {}
}
impl<'a, T> Trait for Wrapper<&'a T> where Wrapper<T>: Trait {}
fn get<T>() -> T {
unimplemented!()
}
fn main() {
let thing = get::<Thing>();//~ERROR cannot find type `Thing` in this scope [E0412]
let wrapper = Wrapper(thing);
Trait::method(&wrapper);
}

View file

@ -0,0 +1,9 @@
error[E0412]: cannot find type `Thing` in this scope
--> $DIR/issue-90319.rs:14:23
|
LL | let thing = get::<Thing>();
| ^^^^^ not found in this scope
error: aborting due to previous error
For more information about this error, try `rustc --explain E0412`.