1
Fork 0

Auto merge of #132079 - fmease:rollup-agrd358, r=fmease

Rollup of 9 pull requests

Successful merges:

 - #130991 (Vectorized SliceContains)
 - #131928 (rustdoc: Document `markdown` module.)
 - #131955 (Set `signext` or `zeroext` for integer arguments on RISC-V and LoongArch64)
 - #131979 (Minor tweaks to `compare_impl_item.rs`)
 - #132036 (Add a test case for #131164)
 - #132039 (Specialize `read_exact` and `read_buf_exact` for `VecDeque`)
 - #132060 ("innermost", "outermost", "leftmost", and "rightmost" don't need hyphens)
 - #132065 (Clarify documentation of `ptr::dangling()` function)
 - #132066 (Fix a typo in documentation of `pointer::sub_ptr()`)

r? `@ghost`
`@rustbot` modify labels: rollup
This commit is contained in:
bors 2024-10-23 22:28:57 +00:00
commit b8bb2968ce
54 changed files with 738 additions and 317 deletions

View file

@ -197,7 +197,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
/// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this
/// is useful for indexing slices, as `&[T]`'s data pointer is `T*`.
/// If the type is an unsized struct, the regular layout is generated,
/// with the inner-most trailing unsized field using the "minimal unit"
/// with the innermost trailing unsized field using the "minimal unit"
/// of that field's type - this is useful for taking the address of
/// that field and ensuring the struct has the right alignment.
fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {

View file

@ -191,7 +191,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
/// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this
/// is useful for indexing slices, as `&[T]`'s data pointer is `T*`.
/// If the type is an unsized struct, the regular layout is generated,
/// with the inner-most trailing unsized field using the "minimal unit"
/// with the innermost trailing unsized field using the "minimal unit"
/// of that field's type - this is useful for taking the address of
/// that field and ensuring the struct has the right alignment.
fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {

View file

@ -888,7 +888,7 @@ impl CrateInfo {
// below.
//
// In order to get this left-to-right dependency ordering, we use the reverse
// postorder of all crates putting the leaves at the right-most positions.
// postorder of all crates putting the leaves at the rightmost positions.
let mut compiler_builtins = None;
let mut used_crates: Vec<_> = tcx
.postorder_cnums(())

View file

@ -58,9 +58,9 @@ impl HumanReadableErrorType {
struct Margin {
/// The available whitespace in the left that can be consumed when centering.
pub whitespace_left: usize,
/// The column of the beginning of left-most span.
/// The column of the beginning of leftmost span.
pub span_left: usize,
/// The column of the end of right-most span.
/// The column of the end of rightmost span.
pub span_right: usize,
/// The beginning of the line to be displayed.
pub computed_left: usize,
@ -128,7 +128,7 @@ impl Margin {
} else {
0
};
// We want to show as much as possible, max_line_len is the right-most boundary for the
// We want to show as much as possible, max_line_len is the rightmost boundary for the
// relevant code.
self.computed_right = max(max_line_len, self.computed_left);
@ -685,7 +685,7 @@ impl HumanEmitter {
buffer.puts(line_offset, code_offset, "...", Style::LineNumber);
}
if margin.was_cut_right(line_len) {
// We have stripped some code after the right-most span end, make it clear we did so.
// We have stripped some code after the rightmost span end, make it clear we did so.
buffer.puts(line_offset, code_offset + taken - 3, "...", Style::LineNumber);
}
buffer.puts(line_offset, 0, &self.maybe_anonymized(line_index), Style::LineNumber);

View file

@ -25,7 +25,7 @@ expand_collapse_debuginfo_illegal =
illegal value for attribute #[collapse_debuginfo(no|external|yes)]
expand_count_repetition_misplaced =
`count` can not be placed inside the inner-most repetition
`count` can not be placed inside the innermost repetition
expand_crate_name_in_cfg_attr =
`crate_name` within an `#![cfg_attr]` attribute is forbidden

View file

@ -23,11 +23,11 @@ pub(crate) enum MetaVarExpr {
/// Ignore a meta-variable for repetition without expansion.
Ignore(Ident),
/// The index of the repetition at a particular depth, where 0 is the inner-most
/// The index of the repetition at a particular depth, where 0 is the innermost
/// repetition. The `usize` is the depth.
Index(usize),
/// The length of the repetition at a particular depth, where 0 is the inner-most
/// The length of the repetition at a particular depth, where 0 is the innermost
/// repetition. The `usize` is the depth.
Len(usize),
}

View file

@ -570,7 +570,7 @@ fn lockstep_iter_size(
}
}
/// Used solely by the `count` meta-variable expression, counts the outer-most repetitions at a
/// Used solely by the `count` meta-variable expression, counts the outermost repetitions at a
/// given optional nested depth.
///
/// For example, a macro parameter of `$( { $( $foo:ident ),* } )*` called with `{ a, b } { c }`:

View file

@ -43,14 +43,13 @@ mod refine;
/// - `impl_m`: type of the method we are checking
/// - `trait_m`: the method in the trait
/// - `impl_trait_ref`: the TraitRef corresponding to the trait implementation
#[instrument(level = "debug", skip(tcx))]
pub(super) fn compare_impl_method<'tcx>(
tcx: TyCtxt<'tcx>,
impl_m: ty::AssocItem,
trait_m: ty::AssocItem,
impl_trait_ref: ty::TraitRef<'tcx>,
) {
debug!("compare_impl_method(impl_trait_ref={:?})", impl_trait_ref);
let _: Result<_, ErrorGuaranteed> = try {
check_method_is_structurally_compatible(tcx, impl_m, trait_m, impl_trait_ref, false)?;
compare_method_predicate_entailment(tcx, impl_m, trait_m, impl_trait_ref)?;
@ -167,8 +166,6 @@ fn compare_method_predicate_entailment<'tcx>(
trait_m: ty::AssocItem,
impl_trait_ref: ty::TraitRef<'tcx>,
) -> Result<(), ErrorGuaranteed> {
let trait_to_impl_args = impl_trait_ref.args;
// This node-id should be used for the `body_id` field on each
// `ObligationCause` (and the `FnCtxt`).
//
@ -183,27 +180,17 @@ fn compare_method_predicate_entailment<'tcx>(
kind: impl_m.kind,
});
// Create mapping from impl to placeholder.
let impl_to_placeholder_args = GenericArgs::identity_for_item(tcx, impl_m.def_id);
// Create mapping from trait to placeholder.
let trait_to_placeholder_args =
impl_to_placeholder_args.rebase_onto(tcx, impl_m.container_id(tcx), trait_to_impl_args);
debug!("compare_impl_method: trait_to_placeholder_args={:?}", trait_to_placeholder_args);
// Create mapping from trait method to impl method.
let trait_to_impl_args = GenericArgs::identity_for_item(tcx, impl_m.def_id).rebase_onto(
tcx,
impl_m.container_id(tcx),
impl_trait_ref.args,
);
debug!(?trait_to_impl_args);
let impl_m_predicates = tcx.predicates_of(impl_m.def_id);
let trait_m_predicates = tcx.predicates_of(trait_m.def_id);
// Create obligations for each predicate declared by the impl
// definition in the context of the trait's parameter
// environment. We can't just use `impl_env.caller_bounds`,
// however, because we want to replace all late-bound regions with
// region variables.
let impl_predicates = tcx.predicates_of(impl_m_predicates.parent.unwrap());
let mut hybrid_preds = impl_predicates.instantiate_identity(tcx);
debug!("compare_impl_method: impl_bounds={:?}", hybrid_preds);
// This is the only tricky bit of the new way we check implementation methods
// We need to build a set of predicates where only the method-level bounds
// are from the trait and we assume all other bounds from the implementation
@ -211,25 +198,25 @@ fn compare_method_predicate_entailment<'tcx>(
//
// We then register the obligations from the impl_m and check to see
// if all constraints hold.
hybrid_preds.predicates.extend(
trait_m_predicates
.instantiate_own(tcx, trait_to_placeholder_args)
.map(|(predicate, _)| predicate),
let impl_predicates = tcx.predicates_of(impl_m_predicates.parent.unwrap());
let mut hybrid_preds = impl_predicates.instantiate_identity(tcx).predicates;
hybrid_preds.extend(
trait_m_predicates.instantiate_own(tcx, trait_to_impl_args).map(|(predicate, _)| predicate),
);
// Construct trait parameter environment and then shift it into the placeholder viewpoint.
// The key step here is to update the caller_bounds's predicates to be
// the new hybrid bounds we computed.
let normalize_cause = traits::ObligationCause::misc(impl_m_span, impl_m_def_id);
let param_env = ty::ParamEnv::new(tcx.mk_clauses(&hybrid_preds.predicates), Reveal::UserFacing);
let param_env = ty::ParamEnv::new(tcx.mk_clauses(&hybrid_preds), Reveal::UserFacing);
let param_env = traits::normalize_param_env_or_error(tcx, param_env, normalize_cause);
debug!(caller_bounds=?param_env.caller_bounds());
let infcx = &tcx.infer_ctxt().build();
let ocx = ObligationCtxt::new_with_diagnostics(infcx);
debug!("compare_impl_method: caller_bounds={:?}", param_env.caller_bounds());
let impl_m_own_bounds = impl_m_predicates.instantiate_own(tcx, impl_to_placeholder_args);
// Create obligations for each predicate declared by the impl
// definition in the context of the hybrid param-env. This makes
// sure that the impl's method's where clauses are not more
// restrictive than the trait's method (and the impl itself).
let impl_m_own_bounds = impl_m_predicates.instantiate_own_identity();
for (predicate, span) in impl_m_own_bounds {
let normalize_cause = traits::ObligationCause::misc(span, impl_m_def_id);
let predicate = ocx.normalize(&normalize_cause, param_env, predicate);
@ -256,7 +243,6 @@ fn compare_method_predicate_entailment<'tcx>(
// any associated types appearing in the fn arguments or return
// type.
// Compute placeholder form of impl and trait method tys.
let mut wf_tys = FxIndexSet::default();
let unnormalized_impl_sig = infcx.instantiate_binder_with_fresh_vars(
@ -267,9 +253,9 @@ fn compare_method_predicate_entailment<'tcx>(
let norm_cause = ObligationCause::misc(impl_m_span, impl_m_def_id);
let impl_sig = ocx.normalize(&norm_cause, param_env, unnormalized_impl_sig);
debug!("compare_impl_method: impl_fty={:?}", impl_sig);
debug!(?impl_sig);
let trait_sig = tcx.fn_sig(trait_m.def_id).instantiate(tcx, trait_to_placeholder_args);
let trait_sig = tcx.fn_sig(trait_m.def_id).instantiate(tcx, trait_to_impl_args);
let trait_sig = tcx.liberate_late_bound_regions(impl_m.def_id, trait_sig);
// Next, add all inputs and output as well-formed tys. Importantly,
@ -280,9 +266,7 @@ fn compare_method_predicate_entailment<'tcx>(
// We also have to add the normalized trait signature
// as we don't normalize during implied bounds computation.
wf_tys.extend(trait_sig.inputs_and_output.iter());
let trait_fty = Ty::new_fn_ptr(tcx, ty::Binder::dummy(trait_sig));
debug!("compare_impl_method: trait_fty={:?}", trait_fty);
debug!(?trait_sig);
// FIXME: We'd want to keep more accurate spans than "the method signature" when
// processing the comparison between the trait and impl fn, but we sadly lose them
@ -455,8 +439,6 @@ pub(super) fn collect_return_position_impl_trait_in_trait_tys<'tcx>(
// just so we don't ICE during instantiation later.
check_method_is_structurally_compatible(tcx, impl_m, trait_m, impl_trait_ref, true)?;
let trait_to_impl_args = impl_trait_ref.args;
let impl_m_hir_id = tcx.local_def_id_to_hir_id(impl_m_def_id);
let return_span = tcx.hir().fn_decl_by_hir_id(impl_m_hir_id).unwrap().output.span();
let cause =
@ -466,18 +448,18 @@ pub(super) fn collect_return_position_impl_trait_in_trait_tys<'tcx>(
kind: impl_m.kind,
});
// Create mapping from impl to placeholder.
let impl_to_placeholder_args = GenericArgs::identity_for_item(tcx, impl_m.def_id);
// Create mapping from trait to placeholder.
let trait_to_placeholder_args =
impl_to_placeholder_args.rebase_onto(tcx, impl_m.container_id(tcx), trait_to_impl_args);
// Create mapping from trait to impl (i.e. impl trait header + impl method identity args).
let trait_to_impl_args = GenericArgs::identity_for_item(tcx, impl_m.def_id).rebase_onto(
tcx,
impl_m.container_id(tcx),
impl_trait_ref.args,
);
let hybrid_preds = tcx
.predicates_of(impl_m.container_id(tcx))
.instantiate_identity(tcx)
.into_iter()
.chain(tcx.predicates_of(trait_m.def_id).instantiate_own(tcx, trait_to_placeholder_args))
.chain(tcx.predicates_of(trait_m.def_id).instantiate_own(tcx, trait_to_impl_args))
.map(|(clause, _)| clause);
let param_env = ty::ParamEnv::new(tcx.mk_clauses_from_iter(hybrid_preds), Reveal::UserFacing);
let param_env = traits::normalize_param_env_or_error(
@ -511,7 +493,7 @@ pub(super) fn collect_return_position_impl_trait_in_trait_tys<'tcx>(
.instantiate_binder_with_fresh_vars(
return_span,
infer::HigherRankedType,
tcx.fn_sig(trait_m.def_id).instantiate(tcx, trait_to_placeholder_args),
tcx.fn_sig(trait_m.def_id).instantiate(tcx, trait_to_impl_args),
)
.fold_with(&mut collector);
@ -705,7 +687,7 @@ pub(super) fn collect_return_position_impl_trait_in_trait_tys<'tcx>(
// Also, we only need to account for a difference in trait and impl args,
// since we previously enforce that the trait method and impl method have the
// same generics.
let num_trait_args = trait_to_impl_args.len();
let num_trait_args = impl_trait_ref.args.len();
let num_impl_args = tcx.generics_of(impl_m.container_id(tcx)).own_params.len();
let ty = match ty.try_fold_with(&mut RemapHiddenTyRegions {
tcx,
@ -1041,12 +1023,7 @@ fn check_region_bounds_on_impl_item<'tcx>(
let trait_generics = tcx.generics_of(trait_m.def_id);
let trait_params = trait_generics.own_counts().lifetimes;
debug!(
"check_region_bounds_on_impl_item: \
trait_generics={:?} \
impl_generics={:?}",
trait_generics, impl_generics
);
debug!(?trait_generics, ?impl_generics);
// Must have same number of early-bound lifetime parameters.
// Unfortunately, if the user screws up the bounds, then this
@ -1710,8 +1687,7 @@ pub(super) fn compare_impl_const_raw(
let trait_const_item = tcx.associated_item(trait_const_item_def);
let impl_trait_ref =
tcx.impl_trait_ref(impl_const_item.container_id(tcx)).unwrap().instantiate_identity();
debug!("compare_impl_const(impl_trait_ref={:?})", impl_trait_ref);
debug!(?impl_trait_ref);
compare_number_of_generics(tcx, impl_const_item, trait_const_item, false)?;
compare_generic_param_kinds(tcx, impl_const_item, trait_const_item, false)?;
@ -1722,6 +1698,7 @@ pub(super) fn compare_impl_const_raw(
/// The equivalent of [compare_method_predicate_entailment], but for associated constants
/// instead of associated functions.
// FIXME(generic_const_items): If possible extract the common parts of `compare_{type,const}_predicate_entailment`.
#[instrument(level = "debug", skip(tcx))]
fn compare_const_predicate_entailment<'tcx>(
tcx: TyCtxt<'tcx>,
impl_ct: ty::AssocItem,
@ -1736,13 +1713,14 @@ fn compare_const_predicate_entailment<'tcx>(
// because we shouldn't really have to deal with lifetimes or
// predicates. In fact some of this should probably be put into
// shared functions because of DRY violations...
let impl_args = GenericArgs::identity_for_item(tcx, impl_ct.def_id);
let trait_to_impl_args =
impl_args.rebase_onto(tcx, impl_ct.container_id(tcx), impl_trait_ref.args);
let trait_to_impl_args = GenericArgs::identity_for_item(tcx, impl_ct.def_id).rebase_onto(
tcx,
impl_ct.container_id(tcx),
impl_trait_ref.args,
);
// Create a parameter environment that represents the implementation's
// method.
// Compute placeholder form of impl and trait const tys.
// associated const.
let impl_ty = tcx.type_of(impl_ct_def_id).instantiate_identity();
let trait_ty = tcx.type_of(trait_ct.def_id).instantiate(tcx, trait_to_impl_args);
@ -1759,14 +1737,14 @@ fn compare_const_predicate_entailment<'tcx>(
// The predicates declared by the impl definition, the trait and the
// associated const in the trait are assumed.
let impl_predicates = tcx.predicates_of(impl_ct_predicates.parent.unwrap());
let mut hybrid_preds = impl_predicates.instantiate_identity(tcx);
hybrid_preds.predicates.extend(
let mut hybrid_preds = impl_predicates.instantiate_identity(tcx).predicates;
hybrid_preds.extend(
trait_ct_predicates
.instantiate_own(tcx, trait_to_impl_args)
.map(|(predicate, _)| predicate),
);
let param_env = ty::ParamEnv::new(tcx.mk_clauses(&hybrid_preds.predicates), Reveal::UserFacing);
let param_env = ty::ParamEnv::new(tcx.mk_clauses(&hybrid_preds), Reveal::UserFacing);
let param_env = traits::normalize_param_env_or_error(
tcx,
param_env,
@ -1776,7 +1754,7 @@ fn compare_const_predicate_entailment<'tcx>(
let infcx = tcx.infer_ctxt().build();
let ocx = ObligationCtxt::new_with_diagnostics(&infcx);
let impl_ct_own_bounds = impl_ct_predicates.instantiate_own(tcx, impl_args);
let impl_ct_own_bounds = impl_ct_predicates.instantiate_own_identity();
for (predicate, span) in impl_ct_own_bounds {
let cause = ObligationCause::misc(span, impl_ct_def_id);
let predicate = ocx.normalize(&cause, param_env, predicate);
@ -1787,20 +1765,15 @@ fn compare_const_predicate_entailment<'tcx>(
// There is no "body" here, so just pass dummy id.
let impl_ty = ocx.normalize(&cause, param_env, impl_ty);
debug!("compare_const_impl: impl_ty={:?}", impl_ty);
debug!(?impl_ty);
let trait_ty = ocx.normalize(&cause, param_env, trait_ty);
debug!("compare_const_impl: trait_ty={:?}", trait_ty);
debug!(?trait_ty);
let err = ocx.sup(&cause, param_env, trait_ty, impl_ty);
if let Err(terr) = err {
debug!(
"checking associated const for compatibility: impl ty {:?}, trait ty {:?}",
impl_ty, trait_ty
);
debug!(?impl_ty, ?trait_ty);
// Locate the Span containing just the type of the offending impl
let (ty, _) = tcx.hir().expect_impl_item(impl_ct_def_id).expect_const();
@ -1845,14 +1818,13 @@ fn compare_const_predicate_entailment<'tcx>(
ocx.resolve_regions_and_report_errors(impl_ct_def_id, &outlives_env)
}
#[instrument(level = "debug", skip(tcx))]
pub(super) fn compare_impl_ty<'tcx>(
tcx: TyCtxt<'tcx>,
impl_ty: ty::AssocItem,
trait_ty: ty::AssocItem,
impl_trait_ref: ty::TraitRef<'tcx>,
) {
debug!("compare_impl_type(impl_trait_ref={:?})", impl_trait_ref);
let _: Result<(), ErrorGuaranteed> = try {
compare_number_of_generics(tcx, impl_ty, trait_ty, false)?;
compare_generic_param_kinds(tcx, impl_ty, trait_ty, false)?;
@ -1864,20 +1836,23 @@ pub(super) fn compare_impl_ty<'tcx>(
/// The equivalent of [compare_method_predicate_entailment], but for associated types
/// instead of associated functions.
#[instrument(level = "debug", skip(tcx))]
fn compare_type_predicate_entailment<'tcx>(
tcx: TyCtxt<'tcx>,
impl_ty: ty::AssocItem,
trait_ty: ty::AssocItem,
impl_trait_ref: ty::TraitRef<'tcx>,
) -> Result<(), ErrorGuaranteed> {
let impl_args = GenericArgs::identity_for_item(tcx, impl_ty.def_id);
let trait_to_impl_args =
impl_args.rebase_onto(tcx, impl_ty.container_id(tcx), impl_trait_ref.args);
let trait_to_impl_args = GenericArgs::identity_for_item(tcx, impl_ty.def_id).rebase_onto(
tcx,
impl_ty.container_id(tcx),
impl_trait_ref.args,
);
let impl_ty_predicates = tcx.predicates_of(impl_ty.def_id);
let trait_ty_predicates = tcx.predicates_of(trait_ty.def_id);
let impl_ty_own_bounds = impl_ty_predicates.instantiate_own(tcx, impl_args);
let impl_ty_own_bounds = impl_ty_predicates.instantiate_own_identity();
if impl_ty_own_bounds.len() == 0 {
// Nothing to check.
return Ok(());
@ -1887,29 +1862,29 @@ fn compare_type_predicate_entailment<'tcx>(
// `ObligationCause` (and the `FnCtxt`). This is what
// `regionck_item` expects.
let impl_ty_def_id = impl_ty.def_id.expect_local();
debug!("compare_type_predicate_entailment: trait_to_impl_args={:?}", trait_to_impl_args);
debug!(?trait_to_impl_args);
// The predicates declared by the impl definition, the trait and the
// associated type in the trait are assumed.
let impl_predicates = tcx.predicates_of(impl_ty_predicates.parent.unwrap());
let mut hybrid_preds = impl_predicates.instantiate_identity(tcx);
hybrid_preds.predicates.extend(
let mut hybrid_preds = impl_predicates.instantiate_identity(tcx).predicates;
hybrid_preds.extend(
trait_ty_predicates
.instantiate_own(tcx, trait_to_impl_args)
.map(|(predicate, _)| predicate),
);
debug!("compare_type_predicate_entailment: bounds={:?}", hybrid_preds);
debug!(?hybrid_preds);
let impl_ty_span = tcx.def_span(impl_ty_def_id);
let normalize_cause = ObligationCause::misc(impl_ty_span, impl_ty_def_id);
let param_env = ty::ParamEnv::new(tcx.mk_clauses(&hybrid_preds.predicates), Reveal::UserFacing);
let param_env = ty::ParamEnv::new(tcx.mk_clauses(&hybrid_preds), Reveal::UserFacing);
let param_env = traits::normalize_param_env_or_error(tcx, param_env, normalize_cause);
debug!(caller_bounds=?param_env.caller_bounds());
let infcx = tcx.infer_ctxt().build();
let ocx = ObligationCtxt::new_with_diagnostics(&infcx);
debug!("compare_type_predicate_entailment: caller_bounds={:?}", param_env.caller_bounds());
for (predicate, span) in impl_ty_own_bounds {
let cause = ObligationCause::misc(span, impl_ty_def_id);
let predicate = ocx.normalize(&cause, param_env, predicate);
@ -2009,11 +1984,11 @@ pub(super) fn check_type_bounds<'tcx>(
.explicit_item_bounds(trait_ty.def_id)
.iter_instantiated_copied(tcx, rebased_args)
.map(|(concrete_ty_bound, span)| {
debug!("check_type_bounds: concrete_ty_bound = {:?}", concrete_ty_bound);
debug!(?concrete_ty_bound);
traits::Obligation::new(tcx, mk_cause(span), param_env, concrete_ty_bound)
})
.collect();
debug!("check_type_bounds: item_bounds={:?}", obligations);
debug!(item_bounds=?obligations);
// Normalize predicates with the assumption that the GAT may always normalize
// to its definition type. This should be the param-env we use to *prove* the
@ -2032,7 +2007,7 @@ pub(super) fn check_type_bounds<'tcx>(
} else {
ocx.normalize(&normalize_cause, normalize_param_env, obligation.predicate)
};
debug!("compare_projection_bounds: normalized predicate = {:?}", normalized_predicate);
debug!(?normalized_predicate);
obligation.predicate = normalized_predicate;
ocx.register_obligation(obligation);

View file

@ -2457,7 +2457,7 @@ fn truncate_capture_for_optimization(
) -> (Place<'_>, ty::UpvarCapture) {
let is_shared_ref = |ty: Ty<'_>| matches!(ty.kind(), ty::Ref(.., hir::Mutability::Not));
// Find the right-most deref (if any). All the projections that come after this
// Find the rightmost deref (if any). All the projections that come after this
// are fields or other "in-place pointer adjustments"; these refer therefore to
// data owned by whatever pointer is being dereferenced here.
let idx = place.projections.iter().rposition(|proj| ProjectionKind::Deref == proj.kind);

View file

@ -395,7 +395,9 @@ impl<'tcx> GenericPredicates<'tcx> {
EarlyBinder::bind(self.predicates).iter_instantiated_copied(tcx, args)
}
pub fn instantiate_own_identity(self) -> impl Iterator<Item = (Clause<'tcx>, Span)> {
pub fn instantiate_own_identity(
self,
) -> impl Iterator<Item = (Clause<'tcx>, Span)> + DoubleEndedIterator + ExactSizeIterator {
EarlyBinder::bind(self.predicates).iter_identity_copied()
}

View file

@ -1048,8 +1048,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// | +------------|outer_scope cache|--+ |
// +------------------------------|middle_scope cache|------+
//
// Now, a new, inner-most scope is added along with a new drop into
// both inner-most and outer-most scopes:
// Now, a new, innermost scope is added along with a new drop into
// both innermost and outermost scopes:
//
// +------------------------------------------------------------+
// | +----------------------------------+ |
@ -1061,11 +1061,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// +----=----------------|invalid middle_scope cache|-----------+
//
// If, when adding `drop(new)` we do not invalidate the cached blocks for both
// outer_scope and middle_scope, then, when building drops for the inner (right-most)
// outer_scope and middle_scope, then, when building drops for the inner (rightmost)
// scope, the old, cached blocks, without `drop(new)` will get used, producing the
// wrong results.
//
// Note that this code iterates scopes from the inner-most to the outer-most,
// Note that this code iterates scopes from the innermost to the outermost,
// invalidating caches of each scope visited. This way bare minimum of the
// caches gets invalidated. i.e., if a new drop is added into the middle scope, the
// cache of outer scope stays intact.

View file

@ -1177,7 +1177,7 @@ struct PlaceInfo<'tcx> {
/// The projection used to go from parent to this node (only None for root).
proj_elem: Option<TrackElem>,
/// The left-most child.
/// The leftmost child.
first_child: Option<PlaceIndex>,
/// Index of the sibling to the right of this node.

View file

@ -85,7 +85,7 @@ pub(super) fn report_suspicious_mismatch_block(
}
}
// Find the inner-most span candidate for final report
// Find the innermost span candidate for final report
let candidate_span =
matched_spans.into_iter().rev().find(|&(_, same_ident)| !same_ident).map(|(span, _)| span);

View file

@ -1,6 +1,7 @@
use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
use crate::abi::{self, Abi, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
use crate::spec::HasTargetSpec;
use crate::spec::abi::Abi as SpecAbi;
#[derive(Copy, Clone)]
enum RegPassKind {
@ -359,3 +360,30 @@ where
);
}
}
pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, abi: SpecAbi)
where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout + HasTargetSpec,
{
if abi == SpecAbi::RustIntrinsic {
return;
}
let grlen = cx.data_layout().pointer_size.bits();
for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
// LLVM integers types do not differentiate between signed or unsigned integers.
// Some LoongArch instructions do not have a `.w` suffix version, they use all the
// GRLEN bits. By explicitly setting the `signext` or `zeroext` attribute
// according to signedness to avoid unnecessary integer extending instructions.
//
// This is similar to the RISC-V case, see
// https://github.com/rust-lang/rust/issues/114508 for details.
extend_integer_width(arg, grlen);
}
}

View file

@ -1,11 +1,14 @@
use std::fmt;
use std::str::FromStr;
use std::{fmt, iter};
pub use rustc_abi::{Reg, RegKind};
use rustc_macros::HashStable_Generic;
use rustc_span::Symbol;
use crate::abi::{self, Abi, Align, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
use crate::abi::{
self, Abi, AddressSpace, Align, HasDataLayout, Pointer, Size, TyAbiInterface, TyAndLayout,
};
use crate::spec::abi::Abi as SpecAbi;
use crate::spec::{self, HasTargetSpec, HasWasmCAbiOpt, HasX86AbiOpt, WasmCAbi};
mod aarch64;
@ -720,6 +723,118 @@ impl<'a, Ty> FnAbi<'a, Ty> {
Ok(())
}
pub fn adjust_for_rust_abi<C>(&mut self, cx: &C, abi: SpecAbi)
where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout + HasTargetSpec,
{
let spec = cx.target_spec();
match &spec.arch[..] {
"x86" => x86::compute_rust_abi_info(cx, self, abi),
"riscv32" | "riscv64" => riscv::compute_rust_abi_info(cx, self, abi),
"loongarch64" => loongarch::compute_rust_abi_info(cx, self, abi),
_ => {}
};
for (arg_idx, arg) in self
.args
.iter_mut()
.enumerate()
.map(|(idx, arg)| (Some(idx), arg))
.chain(iter::once((None, &mut self.ret)))
{
if arg.is_ignore() {
continue;
}
if arg_idx.is_none() && arg.layout.size > Pointer(AddressSpace::DATA).size(cx) * 2 {
// Return values larger than 2 registers using a return area
// pointer. LLVM and Cranelift disagree about how to return
// values that don't fit in the registers designated for return
// values. LLVM will force the entire return value to be passed
// by return area pointer, while Cranelift will look at each IR level
// return value independently and decide to pass it in a
// register or not, which would result in the return value
// being passed partially in registers and partially through a
// return area pointer.
//
// While Cranelift may need to be fixed as the LLVM behavior is
// generally more correct with respect to the surface language,
// forcing this behavior in rustc itself makes it easier for
// other backends to conform to the Rust ABI and for the C ABI
// rustc already handles this behavior anyway.
//
// In addition LLVM's decision to pass the return value in
// registers or using a return area pointer depends on how
// exactly the return type is lowered to an LLVM IR type. For
// example `Option<u128>` can be lowered as `{ i128, i128 }`
// in which case the x86_64 backend would use a return area
// pointer, or it could be passed as `{ i32, i128 }` in which
// case the x86_64 backend would pass it in registers by taking
// advantage of an LLVM ABI extension that allows using 3
// registers for the x86_64 sysv call conv rather than the
// officially specified 2 registers.
//
// FIXME: Technically we should look at the amount of available
// return registers rather than guessing that there are 2
// registers for return values. In practice only a couple of
// architectures have less than 2 return registers. None of
// which supported by Cranelift.
//
// NOTE: This adjustment is only necessary for the Rust ABI as
// for other ABI's the calling convention implementations in
// rustc_target already ensure any return value which doesn't
// fit in the available amount of return registers is passed in
// the right way for the current target.
arg.make_indirect();
continue;
}
match arg.layout.abi {
Abi::Aggregate { .. } => {}
// This is a fun case! The gist of what this is doing is
// that we want callers and callees to always agree on the
// ABI of how they pass SIMD arguments. If we were to *not*
// make these arguments indirect then they'd be immediates
// in LLVM, which means that they'd used whatever the
// appropriate ABI is for the callee and the caller. That
// means, for example, if the caller doesn't have AVX
// enabled but the callee does, then passing an AVX argument
// across this boundary would cause corrupt data to show up.
//
// This problem is fixed by unconditionally passing SIMD
// arguments through memory between callers and callees
// which should get them all to agree on ABI regardless of
// target feature sets. Some more information about this
// issue can be found in #44367.
//
// Note that the intrinsic ABI is exempt here as
// that's how we connect up to LLVM and it's unstable
// anyway, we control all calls to it in libstd.
Abi::Vector { .. } if abi != SpecAbi::RustIntrinsic && spec.simd_types_indirect => {
arg.make_indirect();
continue;
}
_ => continue,
}
// Compute `Aggregate` ABI.
let is_indirect_not_on_stack =
matches!(arg.mode, PassMode::Indirect { on_stack: false, .. });
assert!(is_indirect_not_on_stack);
let size = arg.layout.size;
if !arg.layout.is_unsized() && size <= Pointer(AddressSpace::DATA).size(cx) {
// We want to pass small aggregates as immediates, but using
// an LLVM aggregate type for this leads to bad optimizations,
// so we pick an appropriately sized integer type instead.
arg.cast_to(Reg { kind: RegKind::Integer, size });
}
}
}
}
impl FromStr for Conv {

View file

@ -7,6 +7,7 @@
use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
use crate::abi::{self, Abi, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
use crate::spec::HasTargetSpec;
use crate::spec::abi::Abi as SpecAbi;
#[derive(Copy, Clone)]
enum RegPassKind {
@ -365,3 +366,29 @@ where
);
}
}
pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, abi: SpecAbi)
where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout + HasTargetSpec,
{
if abi == SpecAbi::RustIntrinsic {
return;
}
let xlen = cx.data_layout().pointer_size.bits();
for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
// LLVM integers types do not differentiate between signed or unsigned integers.
// Some RISC-V instructions do not have a `.w` suffix version, they use all the
// XLEN bits. By explicitly setting the `signext` or `zeroext` attribute
// according to signedness to avoid unnecessary integer extending instructions.
//
// See https://github.com/rust-lang/rust/issues/114508 for details.
extend_integer_width(arg, xlen);
}
}

View file

@ -1,6 +1,9 @@
use crate::abi::call::{ArgAttribute, FnAbi, PassMode, Reg, RegKind};
use crate::abi::{Abi, Align, HasDataLayout, TyAbiInterface, TyAndLayout};
use crate::abi::{
Abi, AddressSpace, Align, Float, HasDataLayout, Pointer, TyAbiInterface, TyAndLayout,
};
use crate::spec::HasTargetSpec;
use crate::spec::abi::Abi as SpecAbi;
#[derive(PartialEq)]
pub(crate) enum Flavor {
@ -207,3 +210,35 @@ pub(crate) fn fill_inregs<'a, Ty, C>(
}
}
}
pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, abi: SpecAbi)
where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout + HasTargetSpec,
{
// Avoid returning floats in x87 registers on x86 as loading and storing from x87
// registers will quiet signalling NaNs. Also avoid using SSE registers since they
// are not always available (depending on target features).
if !fn_abi.ret.is_ignore()
// Intrinsics themselves are not actual "real" functions, so theres no need to change their ABIs.
&& abi != SpecAbi::RustIntrinsic
{
let has_float = match fn_abi.ret.layout.abi {
Abi::Scalar(s) => matches!(s.primitive(), Float(_)),
Abi::ScalarPair(s1, s2) => {
matches!(s1.primitive(), Float(_)) || matches!(s2.primitive(), Float(_))
}
_ => false, // anyway not passed via registers on x86
};
if has_float {
if fn_abi.ret.layout.size <= Pointer(AddressSpace::DATA).size(cx) {
// Same size or smaller than pointer, return in a register.
fn_abi.ret.cast_to(Reg { kind: RegKind::Integer, size: fn_abi.ret.layout.size });
} else {
// Larger than a pointer, return indirectly.
fn_abi.ret.make_indirect();
}
return;
}
}
}

View file

@ -1,7 +1,7 @@
use std::iter;
use rustc_abi::Primitive::{Float, Pointer};
use rustc_abi::{Abi, AddressSpace, PointerKind, Scalar, Size};
use rustc_abi::Primitive::Pointer;
use rustc_abi::{Abi, PointerKind, Scalar, Size};
use rustc_hir as hir;
use rustc_hir::lang_items::LangItem;
use rustc_middle::bug;
@ -13,8 +13,7 @@ use rustc_middle::ty::{self, InstanceKind, Ty, TyCtxt};
use rustc_session::config::OptLevel;
use rustc_span::def_id::DefId;
use rustc_target::abi::call::{
ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
RiscvInterruptKind,
ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, RiscvInterruptKind,
};
use rustc_target::spec::abi::Abi as SpecAbi;
use tracing::debug;
@ -678,6 +677,8 @@ fn fn_abi_adjust_for_abi<'tcx>(
let tcx = cx.tcx();
if abi == SpecAbi::Rust || abi == SpecAbi::RustCall || abi == SpecAbi::RustIntrinsic {
fn_abi.adjust_for_rust_abi(cx, abi);
// Look up the deduced parameter attributes for this function, if we have its def ID and
// we're optimizing in non-incremental mode. We'll tag its parameters with those attributes
// as appropriate.
@ -688,125 +689,9 @@ fn fn_abi_adjust_for_abi<'tcx>(
&[]
};
let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>, arg_idx: Option<usize>| {
for (arg_idx, arg) in fn_abi.args.iter_mut().enumerate() {
if arg.is_ignore() {
return;
}
// Avoid returning floats in x87 registers on x86 as loading and storing from x87
// registers will quiet signalling NaNs. Also avoid using SSE registers since they
// are not always available (depending on target features).
if tcx.sess.target.arch == "x86"
&& arg_idx.is_none()
// Intrinsics themselves are not actual "real" functions, so theres no need to
// change their ABIs.
&& abi != SpecAbi::RustIntrinsic
{
let has_float = match arg.layout.abi {
Abi::Scalar(s) => matches!(s.primitive(), Float(_)),
Abi::ScalarPair(s1, s2) => {
matches!(s1.primitive(), Float(_)) || matches!(s2.primitive(), Float(_))
}
_ => false, // anyway not passed via registers on x86
};
if has_float {
if arg.layout.size <= Pointer(AddressSpace::DATA).size(cx) {
// Same size or smaller than pointer, return in a register.
arg.cast_to(Reg { kind: RegKind::Integer, size: arg.layout.size });
} else {
// Larger than a pointer, return indirectly.
arg.make_indirect();
}
return;
}
}
if arg_idx.is_none() && arg.layout.size > Pointer(AddressSpace::DATA).size(cx) * 2 {
// Return values larger than 2 registers using a return area
// pointer. LLVM and Cranelift disagree about how to return
// values that don't fit in the registers designated for return
// values. LLVM will force the entire return value to be passed
// by return area pointer, while Cranelift will look at each IR level
// return value independently and decide to pass it in a
// register or not, which would result in the return value
// being passed partially in registers and partially through a
// return area pointer.
//
// While Cranelift may need to be fixed as the LLVM behavior is
// generally more correct with respect to the surface language,
// forcing this behavior in rustc itself makes it easier for
// other backends to conform to the Rust ABI and for the C ABI
// rustc already handles this behavior anyway.
//
// In addition LLVM's decision to pass the return value in
// registers or using a return area pointer depends on how
// exactly the return type is lowered to an LLVM IR type. For
// example `Option<u128>` can be lowered as `{ i128, i128 }`
// in which case the x86_64 backend would use a return area
// pointer, or it could be passed as `{ i32, i128 }` in which
// case the x86_64 backend would pass it in registers by taking
// advantage of an LLVM ABI extension that allows using 3
// registers for the x86_64 sysv call conv rather than the
// officially specified 2 registers.
//
// FIXME: Technically we should look at the amount of available
// return registers rather than guessing that there are 2
// registers for return values. In practice only a couple of
// architectures have less than 2 return registers. None of
// which supported by Cranelift.
//
// NOTE: This adjustment is only necessary for the Rust ABI as
// for other ABI's the calling convention implementations in
// rustc_target already ensure any return value which doesn't
// fit in the available amount of return registers is passed in
// the right way for the current target.
arg.make_indirect();
return;
}
match arg.layout.abi {
Abi::Aggregate { .. } => {}
// This is a fun case! The gist of what this is doing is
// that we want callers and callees to always agree on the
// ABI of how they pass SIMD arguments. If we were to *not*
// make these arguments indirect then they'd be immediates
// in LLVM, which means that they'd used whatever the
// appropriate ABI is for the callee and the caller. That
// means, for example, if the caller doesn't have AVX
// enabled but the callee does, then passing an AVX argument
// across this boundary would cause corrupt data to show up.
//
// This problem is fixed by unconditionally passing SIMD
// arguments through memory between callers and callees
// which should get them all to agree on ABI regardless of
// target feature sets. Some more information about this
// issue can be found in #44367.
//
// Note that the intrinsic ABI is exempt here as
// that's how we connect up to LLVM and it's unstable
// anyway, we control all calls to it in libstd.
Abi::Vector { .. }
if abi != SpecAbi::RustIntrinsic && tcx.sess.target.simd_types_indirect =>
{
arg.make_indirect();
return;
}
_ => return,
}
// Compute `Aggregate` ABI.
let is_indirect_not_on_stack =
matches!(arg.mode, PassMode::Indirect { on_stack: false, .. });
assert!(is_indirect_not_on_stack, "{:?}", arg);
let size = arg.layout.size;
if !arg.layout.is_unsized() && size <= Pointer(AddressSpace::DATA).size(cx) {
// We want to pass small aggregates as immediates, but using
// an LLVM aggregate type for this leads to bad optimizations,
// so we pick an appropriately sized integer type instead.
arg.cast_to(Reg { kind: RegKind::Integer, size });
continue;
}
// If we deduced that this parameter was read-only, add that to the attribute list now.
@ -814,9 +699,7 @@ fn fn_abi_adjust_for_abi<'tcx>(
// The `readonly` parameter only applies to pointers, so we can only do this if the
// argument was passed indirectly. (If the argument is passed directly, it's an SSA
// value, so it's implicitly immutable.)
if let (Some(arg_idx), &mut PassMode::Indirect { ref mut attrs, .. }) =
(arg_idx, &mut arg.mode)
{
if let &mut PassMode::Indirect { ref mut attrs, .. } = &mut arg.mode {
// The `deduced_param_attrs` list could be empty if this is a type of function
// we can't deduce any parameters for, so make sure the argument index is in
// bounds.
@ -827,11 +710,6 @@ fn fn_abi_adjust_for_abi<'tcx>(
}
}
}
};
fixup(&mut fn_abi.ret, None);
for (arg_idx, arg) in fn_abi.args.iter_mut().enumerate() {
fixup(arg, Some(arg_idx));
}
} else {
fn_abi

View file

@ -496,8 +496,8 @@ where
/// Similar to [`instantiate_identity`](EarlyBinder::instantiate_identity),
/// but on an iterator of values that deref to a `TypeFoldable`.
pub fn iter_identity_copied(self) -> impl Iterator<Item = <Iter::Item as Deref>::Target> {
self.value.into_iter().map(|v| *v)
pub fn iter_identity_copied(self) -> IterIdentityCopied<Iter> {
IterIdentityCopied { it: self.value.into_iter() }
}
}
@ -546,6 +546,44 @@ where
{
}
pub struct IterIdentityCopied<Iter: IntoIterator> {
it: Iter::IntoIter,
}
impl<Iter: IntoIterator> Iterator for IterIdentityCopied<Iter>
where
Iter::Item: Deref,
<Iter::Item as Deref>::Target: Copy,
{
type Item = <Iter::Item as Deref>::Target;
fn next(&mut self) -> Option<Self::Item> {
self.it.next().map(|i| *i)
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.it.size_hint()
}
}
impl<Iter: IntoIterator> DoubleEndedIterator for IterIdentityCopied<Iter>
where
Iter::IntoIter: DoubleEndedIterator,
Iter::Item: Deref,
<Iter::Item as Deref>::Target: Copy,
{
fn next_back(&mut self) -> Option<Self::Item> {
self.it.next_back().map(|i| *i)
}
}
impl<Iter: IntoIterator> ExactSizeIterator for IterIdentityCopied<Iter>
where
Iter::IntoIter: ExactSizeIterator,
Iter::Item: Deref,
<Iter::Item as Deref>::Target: Copy,
{
}
pub struct EarlyBinderIter<I, T> {
t: T,
_tcx: PhantomData<I>,