1
Fork 0

Auto merge of #120748 - Nadrieril:rollup-dj0qwv5, r=Nadrieril

Rollup of 13 pull requests

Successful merges:

 - #110482 (Add armv8r-none-eabihf target for the Cortex-R52.)
 - #119162 (Add unstable `-Z direct-access-external-data` cmdline flag for `rustc`)
 - #120302 (various const interning cleanups)
 - #120455 ( Add FileCheck annotations to MIR-opt SROA tests)
 - #120470 (Mark "unused binding" suggestion as maybe incorrect)
 - #120479 (Suggest turning `if let` into irrefutable `let` if appropriate)
 - #120564 (coverage: Split out counter increment sites from BCB node/edge counters)
 - #120633 (pattern_analysis: gather up place-relevant info)
 - #120664 (Add parallel rustc ui tests)
 - #120726 (Don't use bashism in checktools.sh)
 - #120733 (MirPass: make name more const)
 - #120735 (Remove some `unchecked_claim_error_was_emitted` calls)
 - #120746 (Record coroutine kind in coroutine generics)

r? `@ghost`
`@rustbot` modify labels: rollup
This commit is contained in:
bors 2024-02-07 19:40:25 +00:00
commit 8ace7ea1f7
53 changed files with 942 additions and 393 deletions

View file

@ -123,6 +123,17 @@ impl CodegenCx<'_, '_> {
return false; return false;
} }
// Match clang by only supporting COFF and ELF for now.
if self.tcx.sess.target.is_like_osx {
return false;
}
// With pie relocation model calls of functions defined in the translation
// unit can use copy relocations.
if self.tcx.sess.relocation_model() == RelocModel::Pie && !is_declaration {
return true;
}
// Thread-local variables generally don't support copy relocations. // Thread-local variables generally don't support copy relocations.
let is_thread_local_var = llvm::LLVMIsAGlobalVariable(llval) let is_thread_local_var = llvm::LLVMIsAGlobalVariable(llval)
.is_some_and(|v| llvm::LLVMIsThreadLocal(v) == llvm::True); .is_some_and(|v| llvm::LLVMIsThreadLocal(v) == llvm::True);
@ -130,18 +141,12 @@ impl CodegenCx<'_, '_> {
return false; return false;
} }
// Match clang by only supporting COFF and ELF for now. // Respect the direct-access-external-data to override default behavior if present.
if self.tcx.sess.target.is_like_osx { if let Some(direct) = self.tcx.sess.direct_access_external_data() {
return false; return direct;
} }
// Static relocation model should force copy relocations everywhere. // Static relocation model should force copy relocations everywhere.
if self.tcx.sess.relocation_model() == RelocModel::Static { self.tcx.sess.relocation_model() == RelocModel::Static
return true;
}
// With pie relocation model calls of functions defined in the translation
// unit can use copy relocations.
self.tcx.sess.relocation_model() == RelocModel::Pie && !is_declaration
} }
} }

View file

@ -1,5 +1,3 @@
use std::mem;
use either::{Left, Right}; use either::{Left, Right};
use rustc_hir::def::DefKind; use rustc_hir::def::DefKind;
@ -24,12 +22,13 @@ use crate::interpret::{
}; };
// Returns a pointer to where the result lives // Returns a pointer to where the result lives
#[instrument(level = "trace", skip(ecx, body), ret)]
fn eval_body_using_ecx<'mir, 'tcx>( fn eval_body_using_ecx<'mir, 'tcx>(
ecx: &mut CompileTimeEvalContext<'mir, 'tcx>, ecx: &mut CompileTimeEvalContext<'mir, 'tcx>,
cid: GlobalId<'tcx>, cid: GlobalId<'tcx>,
body: &'mir mir::Body<'tcx>, body: &'mir mir::Body<'tcx>,
) -> InterpResult<'tcx, MPlaceTy<'tcx>> { ) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
debug!("eval_body_using_ecx: {:?}, {:?}", cid, ecx.param_env); trace!(?ecx.param_env);
let tcx = *ecx.tcx; let tcx = *ecx.tcx;
assert!( assert!(
cid.promoted.is_some() cid.promoted.is_some()
@ -75,11 +74,8 @@ fn eval_body_using_ecx<'mir, 'tcx>(
None => InternKind::Constant, None => InternKind::Constant,
} }
}; };
let check_alignment = mem::replace(&mut ecx.machine.check_alignment, CheckAlignment::No); // interning doesn't need to respect alignment
intern_const_alloc_recursive(ecx, intern_kind, &ret)?; intern_const_alloc_recursive(ecx, intern_kind, &ret)?;
ecx.machine.check_alignment = check_alignment;
debug!("eval_body_using_ecx done: {:?}", ret);
Ok(ret) Ok(ret)
} }

View file

@ -41,13 +41,12 @@ pub trait CompileTimeMachine<'mir, 'tcx: 'mir, T> = Machine<
/// allocation is interned immutably; if it is `Mutability::Mut`, then the allocation *must be* /// allocation is interned immutably; if it is `Mutability::Mut`, then the allocation *must be*
/// already mutable (as a sanity check). /// already mutable (as a sanity check).
/// ///
/// `recursive_alloc` is called for all recursively encountered allocations. /// Returns an iterator over all relocations referred to by this allocation.
fn intern_shallow<'rt, 'mir, 'tcx, T, M: CompileTimeMachine<'mir, 'tcx, T>>( fn intern_shallow<'rt, 'mir, 'tcx, T, M: CompileTimeMachine<'mir, 'tcx, T>>(
ecx: &'rt mut InterpCx<'mir, 'tcx, M>, ecx: &'rt mut InterpCx<'mir, 'tcx, M>,
alloc_id: AllocId, alloc_id: AllocId,
mutability: Mutability, mutability: Mutability,
mut recursive_alloc: impl FnMut(&InterpCx<'mir, 'tcx, M>, CtfeProvenance), ) -> Result<impl Iterator<Item = CtfeProvenance> + 'tcx, ()> {
) -> Result<(), ()> {
trace!("intern_shallow {:?}", alloc_id); trace!("intern_shallow {:?}", alloc_id);
// remove allocation // remove allocation
let Some((_kind, mut alloc)) = ecx.memory.alloc_map.remove(&alloc_id) else { let Some((_kind, mut alloc)) = ecx.memory.alloc_map.remove(&alloc_id) else {
@ -65,14 +64,10 @@ fn intern_shallow<'rt, 'mir, 'tcx, T, M: CompileTimeMachine<'mir, 'tcx, T>>(
assert_eq!(alloc.mutability, Mutability::Mut); assert_eq!(alloc.mutability, Mutability::Mut);
} }
} }
// record child allocations
for &(_, prov) in alloc.provenance().ptrs().iter() {
recursive_alloc(ecx, prov);
}
// link the alloc id to the actual allocation // link the alloc id to the actual allocation
let alloc = ecx.tcx.mk_const_alloc(alloc); let alloc = ecx.tcx.mk_const_alloc(alloc);
ecx.tcx.set_alloc_id_memory(alloc_id, alloc); ecx.tcx.set_alloc_id_memory(alloc_id, alloc);
Ok(()) Ok(alloc.0.0.provenance().ptrs().iter().map(|&(_, prov)| prov))
} }
/// How a constant value should be interned. /// How a constant value should be interned.
@ -128,12 +123,16 @@ pub fn intern_const_alloc_recursive<
} }
}; };
// Initialize recursive interning. // Intern the base allocation, and initialize todo list for recursive interning.
let base_alloc_id = ret.ptr().provenance.unwrap().alloc_id(); let base_alloc_id = ret.ptr().provenance.unwrap().alloc_id();
let mut todo = vec![(base_alloc_id, base_mutability)]; // First we intern the base allocation, as it requires a different mutability.
// This gives us the initial set of nested allocations, which will then all be processed
// recursively in the loop below.
let mut todo: Vec<_> =
intern_shallow(ecx, base_alloc_id, base_mutability).unwrap().map(|prov| prov).collect();
// We need to distinguish "has just been interned" from "was already in `tcx`", // We need to distinguish "has just been interned" from "was already in `tcx`",
// so we track this in a separate set. // so we track this in a separate set.
let mut just_interned = FxHashSet::default(); let mut just_interned: FxHashSet<_> = std::iter::once(base_alloc_id).collect();
// Whether we encountered a bad mutable pointer. // Whether we encountered a bad mutable pointer.
// We want to first report "dangling" and then "mutable", so we need to delay reporting these // We want to first report "dangling" and then "mutable", so we need to delay reporting these
// errors. // errors.
@ -147,52 +146,56 @@ pub fn intern_const_alloc_recursive<
// raw pointers, so we cannot rely on validation to catch them -- and since interning runs // raw pointers, so we cannot rely on validation to catch them -- and since interning runs
// before validation, and interning doesn't know the type of anything, this means we can't show // before validation, and interning doesn't know the type of anything, this means we can't show
// better errors. Maybe we should consider doing validation before interning in the future. // better errors. Maybe we should consider doing validation before interning in the future.
while let Some((alloc_id, mutability)) = todo.pop() { while let Some(prov) = todo.pop() {
let alloc_id = prov.alloc_id();
// Crucially, we check this *before* checking whether the `alloc_id`
// has already been interned. The point of this check is to ensure that when
// there are multiple pointers to the same allocation, they are *all* immutable.
// Therefore it would be bad if we only checked the first pointer to any given
// allocation.
// (It is likely not possible to actually have multiple pointers to the same allocation,
// so alternatively we could also check that and ICE if there are multiple such pointers.)
if intern_kind != InternKind::Promoted
&& inner_mutability == Mutability::Not
&& !prov.immutable()
{
if ecx.tcx.try_get_global_alloc(alloc_id).is_some()
&& !just_interned.contains(&alloc_id)
{
// This is a pointer to some memory from another constant. We encounter mutable
// pointers to such memory since we do not always track immutability through
// these "global" pointers. Allowing them is harmless; the point of these checks
// during interning is to justify why we intern the *new* allocations immutably,
// so we can completely ignore existing allocations. We also don't need to add
// this to the todo list, since after all it is already interned.
continue;
}
// Found a mutable pointer inside a const where inner allocations should be
// immutable. We exclude promoteds from this, since things like `&mut []` and
// `&None::<Cell<i32>>` lead to promotion that can produce mutable pointers. We rely
// on the promotion analysis not screwing up to ensure that it is sound to intern
// promoteds as immutable.
found_bad_mutable_pointer = true;
}
if ecx.tcx.try_get_global_alloc(alloc_id).is_some() { if ecx.tcx.try_get_global_alloc(alloc_id).is_some() {
// Already interned. // Already interned.
debug_assert!(!ecx.memory.alloc_map.contains_key(&alloc_id)); debug_assert!(!ecx.memory.alloc_map.contains_key(&alloc_id));
continue; continue;
} }
just_interned.insert(alloc_id); just_interned.insert(alloc_id);
intern_shallow(ecx, alloc_id, mutability, |ecx, prov| { // We always intern with `inner_mutability`, and furthermore we ensured above that if
let alloc_id = prov.alloc_id(); // that is "immutable", then there are *no* mutable pointers anywhere in the newly
if intern_kind != InternKind::Promoted // interned memory -- justifying that we can indeed intern immutably. However this also
&& inner_mutability == Mutability::Not // means we can *not* easily intern immutably here if `prov.immutable()` is true and
&& !prov.immutable() // `inner_mutability` is `Mut`: there might be other pointers to that allocation, and
{ // we'd have to somehow check that they are *all* immutable before deciding that this
if ecx.tcx.try_get_global_alloc(alloc_id).is_some() // allocation can be made immutable. In the future we could consider analyzing all
&& !just_interned.contains(&alloc_id) // pointers before deciding which allocations can be made immutable; but for now we are
{ // okay with losing some potential for immutability here. This can anyway only affect
// This is a pointer to some memory from another constant. We encounter mutable // `static mut`.
// pointers to such memory since we do not always track immutability through todo.extend(intern_shallow(ecx, alloc_id, inner_mutability).map_err(|()| {
// these "global" pointers. Allowing them is harmless; the point of these checks
// during interning is to justify why we intern the *new* allocations immutably,
// so we can completely ignore existing allocations. We also don't need to add
// this to the todo list, since after all it is already interned.
return;
}
// Found a mutable pointer inside a const where inner allocations should be
// immutable. We exclude promoteds from this, since things like `&mut []` and
// `&None::<Cell<i32>>` lead to promotion that can produce mutable pointers. We rely
// on the promotion analysis not screwing up to ensure that it is sound to intern
// promoteds as immutable.
found_bad_mutable_pointer = true;
}
// We always intern with `inner_mutability`, and furthermore we ensured above that if
// that is "immutable", then there are *no* mutable pointers anywhere in the newly
// interned memory -- justifying that we can indeed intern immutably. However this also
// means we can *not* easily intern immutably here if `prov.immutable()` is true and
// `inner_mutability` is `Mut`: there might be other pointers to that allocation, and
// we'd have to somehow check that they are *all* immutable before deciding that this
// allocation can be made immutable. In the future we could consider analyzing all
// pointers before deciding which allocations can be made immutable; but for now we are
// okay with losing some potential for immutability here. This can anyway only affect
// `static mut`.
todo.push((alloc_id, inner_mutability));
})
.map_err(|()| {
ecx.tcx.dcx().emit_err(DanglingPtrInFinal { span: ecx.tcx.span, kind: intern_kind }) ecx.tcx.dcx().emit_err(DanglingPtrInFinal { span: ecx.tcx.span, kind: intern_kind })
})?; })?);
} }
if found_bad_mutable_pointer { if found_bad_mutable_pointer {
return Err(ecx return Err(ecx
@ -220,13 +223,13 @@ pub fn intern_const_alloc_for_constprop<
return Ok(()); return Ok(());
} }
// Move allocation to `tcx`. // Move allocation to `tcx`.
intern_shallow(ecx, alloc_id, Mutability::Not, |_ecx, _| { for _ in intern_shallow(ecx, alloc_id, Mutability::Not).map_err(|()| err_ub!(DeadLocal))? {
// We are not doing recursive interning, so we don't currently support provenance. // We are not doing recursive interning, so we don't currently support provenance.
// (If this assertion ever triggers, we should just implement a // (If this assertion ever triggers, we should just implement a
// proper recursive interning loop -- or just call `intern_const_alloc_recursive`. // proper recursive interning loop -- or just call `intern_const_alloc_recursive`.
panic!("`intern_const_alloc_for_constprop` called on allocation with nested provenance") panic!("`intern_const_alloc_for_constprop` called on allocation with nested provenance")
}) }
.map_err(|()| err_ub!(DeadLocal).into()) Ok(())
} }
impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>> impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>>
@ -247,15 +250,14 @@ impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>>
let dest = self.allocate(layout, MemoryKind::Stack)?; let dest = self.allocate(layout, MemoryKind::Stack)?;
f(self, &dest.clone().into())?; f(self, &dest.clone().into())?;
let alloc_id = dest.ptr().provenance.unwrap().alloc_id(); // this was just allocated, it must have provenance let alloc_id = dest.ptr().provenance.unwrap().alloc_id(); // this was just allocated, it must have provenance
intern_shallow(self, alloc_id, Mutability::Not, |ecx, prov| { for prov in intern_shallow(self, alloc_id, Mutability::Not).unwrap() {
// We are not doing recursive interning, so we don't currently support provenance. // We are not doing recursive interning, so we don't currently support provenance.
// (If this assertion ever triggers, we should just implement a // (If this assertion ever triggers, we should just implement a
// proper recursive interning loop -- or just call `intern_const_alloc_recursive`. // proper recursive interning loop -- or just call `intern_const_alloc_recursive`.
if !ecx.tcx.try_get_global_alloc(prov.alloc_id()).is_some() { if !self.tcx.try_get_global_alloc(prov.alloc_id()).is_some() {
panic!("`intern_with_temp_alloc` with nested allocations"); panic!("`intern_with_temp_alloc` with nested allocations");
} }
}) }
.unwrap();
Ok(alloc_id) Ok(alloc_id)
} }
} }

View file

@ -24,7 +24,9 @@ use rustc_data_structures::profiling::{
get_resident_set_size, print_time_passes_entry, TimePassesFormat, get_resident_set_size, print_time_passes_entry, TimePassesFormat,
}; };
use rustc_errors::registry::Registry; use rustc_errors::registry::Registry;
use rustc_errors::{markdown, ColorConfig, DiagCtxt, ErrCode, ErrorGuaranteed, PResult}; use rustc_errors::{
markdown, ColorConfig, DiagCtxt, ErrCode, ErrorGuaranteed, FatalError, PResult,
};
use rustc_feature::find_gated_cfg; use rustc_feature::find_gated_cfg;
use rustc_interface::util::{self, collect_crate_types, get_codegen_backend}; use rustc_interface::util::{self, collect_crate_types, get_codegen_backend};
use rustc_interface::{interface, Queries}; use rustc_interface::{interface, Queries};
@ -1231,11 +1233,10 @@ fn parse_crate_attrs<'a>(sess: &'a Session) -> PResult<'a, ast::AttrVec> {
/// The compiler currently unwinds with a special sentinel value to abort /// The compiler currently unwinds with a special sentinel value to abort
/// compilation on fatal errors. This function catches that sentinel and turns /// compilation on fatal errors. This function catches that sentinel and turns
/// the panic into a `Result` instead. /// the panic into a `Result` instead.
pub fn catch_fatal_errors<F: FnOnce() -> R, R>(f: F) -> Result<R, ErrorGuaranteed> { pub fn catch_fatal_errors<F: FnOnce() -> R, R>(f: F) -> Result<R, FatalError> {
catch_unwind(panic::AssertUnwindSafe(f)).map_err(|value| { catch_unwind(panic::AssertUnwindSafe(f)).map_err(|value| {
if value.is::<rustc_errors::FatalErrorMarker>() { if value.is::<rustc_errors::FatalErrorMarker>() {
#[allow(deprecated)] FatalError
ErrorGuaranteed::unchecked_claim_error_was_emitted()
} else { } else {
panic::resume_unwind(value); panic::resume_unwind(value);
} }
@ -1245,9 +1246,9 @@ pub fn catch_fatal_errors<F: FnOnce() -> R, R>(f: F) -> Result<R, ErrorGuarantee
/// Variant of `catch_fatal_errors` for the `interface::Result` return type /// Variant of `catch_fatal_errors` for the `interface::Result` return type
/// that also computes the exit code. /// that also computes the exit code.
pub fn catch_with_exit_code(f: impl FnOnce() -> interface::Result<()>) -> i32 { pub fn catch_with_exit_code(f: impl FnOnce() -> interface::Result<()>) -> i32 {
match catch_fatal_errors(f).flatten() { match catch_fatal_errors(f) {
Ok(()) => EXIT_SUCCESS, Ok(Ok(())) => EXIT_SUCCESS,
Err(_) => EXIT_FAILURE, _ => EXIT_FAILURE,
} }
} }

View file

@ -99,16 +99,20 @@ impl<'a, G: EmissionGuarantee> DiagnosticBuilder<'a, G> {
} }
/// `ErrorGuaranteed::emit_producing_guarantee` uses this. /// `ErrorGuaranteed::emit_producing_guarantee` uses this.
// FIXME(eddyb) make `ErrorGuaranteed` impossible to create outside `.emit()`.
fn emit_producing_error_guaranteed(mut self) -> ErrorGuaranteed { fn emit_producing_error_guaranteed(mut self) -> ErrorGuaranteed {
let diag = self.take_diag(); let diag = self.take_diag();
// Only allow a guarantee if the `level` wasn't switched to a // The only error levels that produce `ErrorGuaranteed` are
// non-error. The field isn't `pub`, but the whole `Diagnostic` can be // `Error` and `DelayedBug`. But `DelayedBug` should never occur here
// overwritten with a new one, thanks to `DerefMut`. // because delayed bugs have their level changed to `Bug` when they are
// actually printed, so they produce an ICE.
//
// (Also, even though `level` isn't `pub`, the whole `Diagnostic` could
// be overwritten with a new one thanks to `DerefMut`. So this assert
// protects against that, too.)
assert!( assert!(
diag.is_error(), matches!(diag.level, Level::Error | Level::DelayedBug),
"emitted non-error ({:?}) diagnostic from `DiagnosticBuilder<ErrorGuaranteed>`", "invalid diagnostic level ({:?})",
diag.level, diag.level,
); );

View file

@ -708,7 +708,7 @@ impl DiagCtxt {
} }
/// Emit all stashed diagnostics. /// Emit all stashed diagnostics.
pub fn emit_stashed_diagnostics(&self) -> Option<ErrorGuaranteed> { pub fn emit_stashed_diagnostics(&self) {
self.inner.borrow_mut().emit_stashed_diagnostics() self.inner.borrow_mut().emit_stashed_diagnostics()
} }
@ -931,8 +931,9 @@ impl DiagCtxt {
/// This excludes lint errors and delayed bugs. /// This excludes lint errors and delayed bugs.
pub fn has_errors(&self) -> Option<ErrorGuaranteed> { pub fn has_errors(&self) -> Option<ErrorGuaranteed> {
self.inner.borrow().has_errors().then(|| { self.inner.borrow().has_errors().then(|| {
// FIXME(nnethercote) find a way to store an `ErrorGuaranteed`.
#[allow(deprecated)] #[allow(deprecated)]
ErrorGuaranteed::unchecked_claim_error_was_emitted() ErrorGuaranteed::unchecked_error_guaranteed()
}) })
} }
@ -942,8 +943,9 @@ impl DiagCtxt {
let inner = self.inner.borrow(); let inner = self.inner.borrow();
let result = inner.has_errors() || inner.lint_err_count > 0; let result = inner.has_errors() || inner.lint_err_count > 0;
result.then(|| { result.then(|| {
// FIXME(nnethercote) find a way to store an `ErrorGuaranteed`.
#[allow(deprecated)] #[allow(deprecated)]
ErrorGuaranteed::unchecked_claim_error_was_emitted() ErrorGuaranteed::unchecked_error_guaranteed()
}) })
} }
@ -954,8 +956,9 @@ impl DiagCtxt {
let result = let result =
inner.has_errors() || inner.lint_err_count > 0 || !inner.delayed_bugs.is_empty(); inner.has_errors() || inner.lint_err_count > 0 || !inner.delayed_bugs.is_empty();
result.then(|| { result.then(|| {
// FIXME(nnethercote) find a way to store an `ErrorGuaranteed`.
#[allow(deprecated)] #[allow(deprecated)]
ErrorGuaranteed::unchecked_claim_error_was_emitted() ErrorGuaranteed::unchecked_error_guaranteed()
}) })
} }
@ -1216,9 +1219,8 @@ impl DiagCtxt {
// `DiagCtxtInner::foo`. // `DiagCtxtInner::foo`.
impl DiagCtxtInner { impl DiagCtxtInner {
/// Emit all stashed diagnostics. /// Emit all stashed diagnostics.
fn emit_stashed_diagnostics(&mut self) -> Option<ErrorGuaranteed> { fn emit_stashed_diagnostics(&mut self) {
let has_errors = self.has_errors(); let has_errors = self.has_errors();
let mut reported = None;
for (_, diag) in std::mem::take(&mut self.stashed_diagnostics).into_iter() { for (_, diag) in std::mem::take(&mut self.stashed_diagnostics).into_iter() {
// Decrement the count tracking the stash; emitting will increment it. // Decrement the count tracking the stash; emitting will increment it.
if diag.is_error() { if diag.is_error() {
@ -1235,12 +1237,11 @@ impl DiagCtxtInner {
continue; continue;
} }
} }
let reported_this = self.emit_diagnostic(diag); self.emit_diagnostic(diag);
reported = reported.or(reported_this);
} }
reported
} }
// Return value is only `Some` if the level is `Error` or `DelayedBug`.
fn emit_diagnostic(&mut self, mut diagnostic: Diagnostic) -> Option<ErrorGuaranteed> { fn emit_diagnostic(&mut self, mut diagnostic: Diagnostic) -> Option<ErrorGuaranteed> {
assert!(diagnostic.level.can_be_top_or_sub().0); assert!(diagnostic.level.can_be_top_or_sub().0);
@ -1285,7 +1286,7 @@ impl DiagCtxtInner {
let backtrace = std::backtrace::Backtrace::capture(); let backtrace = std::backtrace::Backtrace::capture();
self.delayed_bugs.push(DelayedDiagnostic::with_backtrace(diagnostic, backtrace)); self.delayed_bugs.push(DelayedDiagnostic::with_backtrace(diagnostic, backtrace));
#[allow(deprecated)] #[allow(deprecated)]
return Some(ErrorGuaranteed::unchecked_claim_error_was_emitted()); return Some(ErrorGuaranteed::unchecked_error_guaranteed());
} }
GoodPathDelayedBug => { GoodPathDelayedBug => {
let backtrace = std::backtrace::Backtrace::capture(); let backtrace = std::backtrace::Backtrace::capture();
@ -1319,6 +1320,7 @@ impl DiagCtxtInner {
!self.emitted_diagnostics.insert(diagnostic_hash) !self.emitted_diagnostics.insert(diagnostic_hash)
}; };
let level = diagnostic.level;
let is_error = diagnostic.is_error(); let is_error = diagnostic.is_error();
let is_lint = diagnostic.is_lint.is_some(); let is_lint = diagnostic.is_lint.is_some();
@ -1355,6 +1357,7 @@ impl DiagCtxtInner {
self.emitter.emit_diagnostic(diagnostic); self.emitter.emit_diagnostic(diagnostic);
} }
if is_error { if is_error {
if is_lint { if is_lint {
self.lint_err_count += 1; self.lint_err_count += 1;
@ -1362,11 +1365,11 @@ impl DiagCtxtInner {
self.err_count += 1; self.err_count += 1;
} }
self.panic_if_treat_err_as_bug(); self.panic_if_treat_err_as_bug();
}
#[allow(deprecated)] #[allow(deprecated)]
{ if level == Level::Error {
guaranteed = Some(ErrorGuaranteed::unchecked_claim_error_was_emitted()); guaranteed = Some(ErrorGuaranteed::unchecked_error_guaranteed());
}
} }
}); });

View file

@ -344,11 +344,18 @@ pub(super) fn generics_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Generics {
kind: hir::ExprKind::Closure(hir::Closure { kind, .. }), .. kind: hir::ExprKind::Closure(hir::Closure { kind, .. }), ..
}) = node }) = node
{ {
// See `ClosureArgsParts`, `CoroutineArgsParts`, and `CoroutineClosureArgsParts`
// for info on the usage of each of these fields.
let dummy_args = match kind { let dummy_args = match kind {
ClosureKind::Closure => &["<closure_kind>", "<closure_signature>", "<upvars>"][..], ClosureKind::Closure => &["<closure_kind>", "<closure_signature>", "<upvars>"][..],
ClosureKind::Coroutine(_) => { ClosureKind::Coroutine(_) => &[
&["<resume_ty>", "<yield_ty>", "<return_ty>", "<witness>", "<upvars>"][..] "<coroutine_kind>",
} "<resume_ty>",
"<yield_ty>",
"<return_ty>",
"<witness>",
"<upvars>",
][..],
ClosureKind::CoroutineClosure(_) => &[ ClosureKind::CoroutineClosure(_) => &[
"<closure_kind>", "<closure_kind>",
"<closure_signature_parts>", "<closure_signature_parts>",

View file

@ -1,7 +1,11 @@
use crate::coercion::{AsCoercionSite, CoerceMany}; use crate::coercion::{AsCoercionSite, CoerceMany};
use crate::{Diverges, Expectation, FnCtxt, Needs}; use crate::{Diverges, Expectation, FnCtxt, Needs};
use rustc_errors::Diagnostic; use rustc_errors::{Applicability, Diagnostic};
use rustc_hir::{self as hir, ExprKind}; use rustc_hir::{
self as hir,
def::{CtorOf, DefKind, Res},
ExprKind, PatKind,
};
use rustc_hir_pretty::ty_to_string; use rustc_hir_pretty::ty_to_string;
use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind}; use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
use rustc_infer::traits::Obligation; use rustc_infer::traits::Obligation;
@ -273,7 +277,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
/// Returns `true` if there was an error forcing the coercion to the `()` type. /// Returns `true` if there was an error forcing the coercion to the `()` type.
pub(super) fn if_fallback_coercion<T>( pub(super) fn if_fallback_coercion<T>(
&self, &self,
span: Span, if_span: Span,
cond_expr: &'tcx hir::Expr<'tcx>,
then_expr: &'tcx hir::Expr<'tcx>, then_expr: &'tcx hir::Expr<'tcx>,
coercion: &mut CoerceMany<'tcx, '_, T>, coercion: &mut CoerceMany<'tcx, '_, T>,
) -> bool ) -> bool
@ -283,29 +288,106 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// If this `if` expr is the parent's function return expr, // If this `if` expr is the parent's function return expr,
// the cause of the type coercion is the return type, point at it. (#25228) // the cause of the type coercion is the return type, point at it. (#25228)
let hir_id = self.tcx.hir().parent_id(self.tcx.hir().parent_id(then_expr.hir_id)); let hir_id = self.tcx.hir().parent_id(self.tcx.hir().parent_id(then_expr.hir_id));
let ret_reason = self.maybe_get_coercion_reason(hir_id, span); let ret_reason = self.maybe_get_coercion_reason(hir_id, if_span);
let cause = self.cause(span, ObligationCauseCode::IfExpressionWithNoElse); let cause = self.cause(if_span, ObligationCauseCode::IfExpressionWithNoElse);
let mut error = false; let mut error = false;
coercion.coerce_forced_unit( coercion.coerce_forced_unit(
self, self,
&cause, &cause,
|err| { |err| self.explain_if_expr(err, ret_reason, if_span, cond_expr, then_expr, &mut error),
if let Some((span, msg)) = &ret_reason {
err.span_label(*span, msg.clone());
} else if let ExprKind::Block(block, _) = &then_expr.kind
&& let Some(expr) = &block.expr
{
err.span_label(expr.span, "found here");
}
err.note("`if` expressions without `else` evaluate to `()`");
err.help("consider adding an `else` block that evaluates to the expected type");
error = true;
},
false, false,
); );
error error
} }
/// Explain why `if` expressions without `else` evaluate to `()` and detect likely irrefutable
/// `if let PAT = EXPR {}` expressions that could be turned into `let PAT = EXPR;`.
fn explain_if_expr(
&self,
err: &mut Diagnostic,
ret_reason: Option<(Span, String)>,
if_span: Span,
cond_expr: &'tcx hir::Expr<'tcx>,
then_expr: &'tcx hir::Expr<'tcx>,
error: &mut bool,
) {
if let Some((if_span, msg)) = ret_reason {
err.span_label(if_span, msg.clone());
} else if let ExprKind::Block(block, _) = then_expr.kind
&& let Some(expr) = block.expr
{
err.span_label(expr.span, "found here");
}
err.note("`if` expressions without `else` evaluate to `()`");
err.help("consider adding an `else` block that evaluates to the expected type");
*error = true;
if let ExprKind::Let(hir::Let { span, pat, init, .. }) = cond_expr.kind
&& let ExprKind::Block(block, _) = then_expr.kind
// Refutability checks occur on the MIR, so we approximate it here by checking
// if we have an enum with a single variant or a struct in the pattern.
&& let PatKind::TupleStruct(qpath, ..) | PatKind::Struct(qpath, ..) = pat.kind
&& let hir::QPath::Resolved(_, path) = qpath
{
match path.res {
Res::Def(DefKind::Ctor(CtorOf::Struct, _), _) => {
// Structs are always irrefutable. Their fields might not be, but we
// don't check for that here, it's only an approximation.
}
Res::Def(DefKind::Ctor(CtorOf::Variant, _), def_id)
if self
.tcx
.adt_def(self.tcx.parent(self.tcx.parent(def_id)))
.variants()
.len()
== 1 =>
{
// There's only a single variant in the `enum`, so we can suggest the
// irrefutable `let` instead of `if let`.
}
_ => return,
}
let mut sugg = vec![
// Remove the `if`
(if_span.until(*span), String::new()),
];
match (block.stmts, block.expr) {
([first, ..], Some(expr)) => {
let padding = self
.tcx
.sess
.source_map()
.indentation_before(first.span)
.unwrap_or_else(|| String::new());
sugg.extend([
(init.span.between(first.span), format!(";\n{padding}")),
(expr.span.shrink_to_hi().with_hi(block.span.hi()), String::new()),
]);
}
([], Some(expr)) => {
let padding = self
.tcx
.sess
.source_map()
.indentation_before(expr.span)
.unwrap_or_else(|| String::new());
sugg.extend([
(init.span.between(expr.span), format!(";\n{padding}")),
(expr.span.shrink_to_hi().with_hi(block.span.hi()), String::new()),
]);
}
// If there's no value in the body, then the `if` expression would already
// be of type `()`, so checking for those cases is unnecessary.
(_, None) => return,
}
err.multipart_suggestion(
"consider using an irrefutable `let` binding instead",
sugg,
Applicability::MaybeIncorrect,
);
}
}
pub fn maybe_get_coercion_reason( pub fn maybe_get_coercion_reason(
&self, &self,
hir_id: hir::HirId, hir_id: hir::HirId,

View file

@ -1118,7 +1118,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// We won't diverge unless both branches do (or the condition does). // We won't diverge unless both branches do (or the condition does).
self.diverges.set(cond_diverges | then_diverges & else_diverges); self.diverges.set(cond_diverges | then_diverges & else_diverges);
} else { } else {
self.if_fallback_coercion(sp, then_expr, &mut coerce); self.if_fallback_coercion(sp, cond_expr, then_expr, &mut coerce);
// If the condition is false we can't diverge. // If the condition is false we can't diverge.
self.diverges.set(cond_diverges); self.diverges.set(cond_diverges);

View file

@ -749,6 +749,7 @@ fn test_unstable_options_tracking_hash() {
tracked!(debug_macros, true); tracked!(debug_macros, true);
tracked!(default_hidden_visibility, Some(true)); tracked!(default_hidden_visibility, Some(true));
tracked!(dep_info_omit_d_target, true); tracked!(dep_info_omit_d_target, true);
tracked!(direct_access_external_data, Some(true));
tracked!(dual_proc_macros, true); tracked!(dual_proc_macros, true);
tracked!(dwarf_version, Some(5)); tracked!(dwarf_version, Some(5));
tracked!(emit_thin_lto, false); tracked!(emit_thin_lto, false);

View file

@ -30,6 +30,7 @@
#![feature(assert_matches)] #![feature(assert_matches)]
#![feature(box_patterns)] #![feature(box_patterns)]
#![feature(core_intrinsics)] #![feature(core_intrinsics)]
#![feature(const_type_name)]
#![feature(discriminant_kind)] #![feature(discriminant_kind)]
#![feature(exhaustive_patterns)] #![feature(exhaustive_patterns)]
#![feature(coroutines)] #![feature(coroutines)]

View file

@ -140,8 +140,12 @@ fn to_profiler_name(type_name: &'static str) -> &'static str {
/// loop that goes over each available MIR and applies `run_pass`. /// loop that goes over each available MIR and applies `run_pass`.
pub trait MirPass<'tcx> { pub trait MirPass<'tcx> {
fn name(&self) -> &'static str { fn name(&self) -> &'static str {
let name = std::any::type_name::<Self>(); // FIXME Simplify the implementation once more `str` methods get const-stable.
if let Some((_, tail)) = name.rsplit_once(':') { tail } else { name } // See copypaste in `MirLint`
const {
let name = std::any::type_name::<Self>();
crate::util::common::c_name(name)
}
} }
fn profiler_name(&self) -> &'static str { fn profiler_name(&self) -> &'static str {

View file

@ -765,7 +765,14 @@ fn polymorphize<'tcx>(
let def_id = instance.def_id(); let def_id = instance.def_id();
let upvars_ty = match tcx.type_of(def_id).skip_binder().kind() { let upvars_ty = match tcx.type_of(def_id).skip_binder().kind() {
ty::Closure(..) => Some(args.as_closure().tupled_upvars_ty()), ty::Closure(..) => Some(args.as_closure().tupled_upvars_ty()),
ty::Coroutine(..) => Some(args.as_coroutine().tupled_upvars_ty()), ty::Coroutine(..) => {
assert_eq!(
args.as_coroutine().kind_ty(),
tcx.types.unit,
"polymorphization does not support coroutines from async closures"
);
Some(args.as_coroutine().tupled_upvars_ty())
}
_ => None, _ => None,
}; };
let has_upvars = upvars_ty.is_some_and(|ty| !ty.tuple_fields().is_empty()); let has_upvars = upvars_ty.is_some_and(|ty| !ty.tuple_fields().is_empty());

View file

@ -65,3 +65,19 @@ pub fn indenter() -> Indenter {
debug!(">>"); debug!(">>");
Indenter { _cannot_construct_outside_of_this_module: () } Indenter { _cannot_construct_outside_of_this_module: () }
} }
// const wrapper for `if let Some((_, tail)) = name.rsplit_once(':') { tail } else { name }`
pub const fn c_name(name: &'static str) -> &'static str {
// FIXME Simplify the implementation once more `str` methods get const-stable.
// and inline into call site
let bytes = name.as_bytes();
let mut i = bytes.len();
while i > 0 && bytes[i - 1] != b':' {
i = i - 1;
}
let (_, bytes) = bytes.split_at(i);
match std::str::from_utf8(bytes) {
Ok(name) => name,
Err(_) => name,
}
}

View file

@ -1,4 +1,5 @@
use rustc_data_structures::fx::FxIndexMap; use rustc_data_structures::captures::Captures;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::graph::WithNumNodes; use rustc_data_structures::graph::WithNumNodes;
use rustc_index::bit_set::BitSet; use rustc_index::bit_set::BitSet;
use rustc_index::IndexVec; use rustc_index::IndexVec;
@ -38,19 +39,27 @@ impl Debug for BcbCounter {
} }
} }
#[derive(Debug)]
pub(super) enum CounterIncrementSite {
Node { bcb: BasicCoverageBlock },
Edge { from_bcb: BasicCoverageBlock, to_bcb: BasicCoverageBlock },
}
/// Generates and stores coverage counter and coverage expression information /// Generates and stores coverage counter and coverage expression information
/// associated with nodes/edges in the BCB graph. /// associated with nodes/edges in the BCB graph.
pub(super) struct CoverageCounters { pub(super) struct CoverageCounters {
next_counter_id: CounterId, /// List of places where a counter-increment statement should be injected
/// into MIR, each with its corresponding counter ID.
counter_increment_sites: IndexVec<CounterId, CounterIncrementSite>,
/// Coverage counters/expressions that are associated with individual BCBs. /// Coverage counters/expressions that are associated with individual BCBs.
bcb_counters: IndexVec<BasicCoverageBlock, Option<BcbCounter>>, bcb_counters: IndexVec<BasicCoverageBlock, Option<BcbCounter>>,
/// Coverage counters/expressions that are associated with the control-flow /// Coverage counters/expressions that are associated with the control-flow
/// edge between two BCBs. /// edge between two BCBs.
/// ///
/// The iteration order of this map can affect the precise contents of MIR, /// We currently don't iterate over this map, but if we do in the future,
/// so we use `FxIndexMap` to avoid query stability hazards. /// switch it back to `FxIndexMap` to avoid query stability hazards.
bcb_edge_counters: FxIndexMap<(BasicCoverageBlock, BasicCoverageBlock), BcbCounter>, bcb_edge_counters: FxHashMap<(BasicCoverageBlock, BasicCoverageBlock), BcbCounter>,
/// Tracks which BCBs have a counter associated with some incoming edge. /// Tracks which BCBs have a counter associated with some incoming edge.
/// Only used by assertions, to verify that BCBs with incoming edge /// Only used by assertions, to verify that BCBs with incoming edge
/// counters do not have their own physical counters (expressions are allowed). /// counters do not have their own physical counters (expressions are allowed).
@ -71,9 +80,9 @@ impl CoverageCounters {
let num_bcbs = basic_coverage_blocks.num_nodes(); let num_bcbs = basic_coverage_blocks.num_nodes();
let mut this = Self { let mut this = Self {
next_counter_id: CounterId::START, counter_increment_sites: IndexVec::new(),
bcb_counters: IndexVec::from_elem_n(None, num_bcbs), bcb_counters: IndexVec::from_elem_n(None, num_bcbs),
bcb_edge_counters: FxIndexMap::default(), bcb_edge_counters: FxHashMap::default(),
bcb_has_incoming_edge_counters: BitSet::new_empty(num_bcbs), bcb_has_incoming_edge_counters: BitSet::new_empty(num_bcbs),
expressions: IndexVec::new(), expressions: IndexVec::new(),
}; };
@ -84,8 +93,8 @@ impl CoverageCounters {
this this
} }
fn make_counter(&mut self) -> BcbCounter { fn make_counter(&mut self, site: CounterIncrementSite) -> BcbCounter {
let id = self.next_counter(); let id = self.counter_increment_sites.push(site);
BcbCounter::Counter { id } BcbCounter::Counter { id }
} }
@ -103,15 +112,8 @@ impl CoverageCounters {
self.make_expression(lhs, Op::Add, rhs) self.make_expression(lhs, Op::Add, rhs)
} }
/// Counter IDs start from one and go up.
fn next_counter(&mut self) -> CounterId {
let next = self.next_counter_id;
self.next_counter_id = self.next_counter_id + 1;
next
}
pub(super) fn num_counters(&self) -> usize { pub(super) fn num_counters(&self) -> usize {
self.next_counter_id.as_usize() self.counter_increment_sites.len()
} }
#[cfg(test)] #[cfg(test)]
@ -171,22 +173,26 @@ impl CoverageCounters {
self.bcb_counters[bcb] self.bcb_counters[bcb]
} }
pub(super) fn bcb_node_counters( /// Returns an iterator over all the nodes/edges in the coverage graph that
/// should have a counter-increment statement injected into MIR, along with
/// each site's corresponding counter ID.
pub(super) fn counter_increment_sites(
&self, &self,
) -> impl Iterator<Item = (BasicCoverageBlock, &BcbCounter)> { ) -> impl Iterator<Item = (CounterId, &CounterIncrementSite)> {
self.bcb_counters self.counter_increment_sites.iter_enumerated()
.iter_enumerated()
.filter_map(|(bcb, counter_kind)| Some((bcb, counter_kind.as_ref()?)))
} }
/// For each edge in the BCB graph that has an associated counter, yields /// Returns an iterator over the subset of BCB nodes that have been associated
/// that edge's *from* and *to* nodes, and its counter. /// with a counter *expression*, along with the ID of that expression.
pub(super) fn bcb_edge_counters( pub(super) fn bcb_nodes_with_coverage_expressions(
&self, &self,
) -> impl Iterator<Item = (BasicCoverageBlock, BasicCoverageBlock, &BcbCounter)> { ) -> impl Iterator<Item = (BasicCoverageBlock, ExpressionId)> + Captures<'_> {
self.bcb_edge_counters self.bcb_counters.iter_enumerated().filter_map(|(bcb, &counter_kind)| match counter_kind {
.iter() // Yield the BCB along with its associated expression ID.
.map(|(&(from_bcb, to_bcb), counter_kind)| (from_bcb, to_bcb, counter_kind)) Some(BcbCounter::Expression { id }) => Some((bcb, id)),
// This BCB is associated with a counter or nothing, so skip it.
Some(BcbCounter::Counter { .. }) | None => None,
})
} }
pub(super) fn into_expressions(self) -> IndexVec<ExpressionId, Expression> { pub(super) fn into_expressions(self) -> IndexVec<ExpressionId, Expression> {
@ -339,7 +345,8 @@ impl<'a> MakeBcbCounters<'a> {
// program results in a tight infinite loop, but it should still compile. // program results in a tight infinite loop, but it should still compile.
let one_path_to_target = !self.basic_coverage_blocks.bcb_has_multiple_in_edges(bcb); let one_path_to_target = !self.basic_coverage_blocks.bcb_has_multiple_in_edges(bcb);
if one_path_to_target || self.bcb_predecessors(bcb).contains(&bcb) { if one_path_to_target || self.bcb_predecessors(bcb).contains(&bcb) {
let counter_kind = self.coverage_counters.make_counter(); let counter_kind =
self.coverage_counters.make_counter(CounterIncrementSite::Node { bcb });
if one_path_to_target { if one_path_to_target {
debug!("{bcb:?} gets a new counter: {counter_kind:?}"); debug!("{bcb:?} gets a new counter: {counter_kind:?}");
} else { } else {
@ -401,7 +408,8 @@ impl<'a> MakeBcbCounters<'a> {
} }
// Make a new counter to count this edge. // Make a new counter to count this edge.
let counter_kind = self.coverage_counters.make_counter(); let counter_kind =
self.coverage_counters.make_counter(CounterIncrementSite::Edge { from_bcb, to_bcb });
debug!("Edge {from_bcb:?}->{to_bcb:?} gets a new counter: {counter_kind:?}"); debug!("Edge {from_bcb:?}->{to_bcb:?} gets a new counter: {counter_kind:?}");
self.coverage_counters.set_bcb_edge_counter(from_bcb, to_bcb, counter_kind) self.coverage_counters.set_bcb_edge_counter(from_bcb, to_bcb, counter_kind)
} }

View file

@ -7,7 +7,7 @@ mod spans;
#[cfg(test)] #[cfg(test)]
mod tests; mod tests;
use self::counters::{BcbCounter, CoverageCounters}; use self::counters::{CounterIncrementSite, CoverageCounters};
use self::graph::{BasicCoverageBlock, CoverageGraph}; use self::graph::{BasicCoverageBlock, CoverageGraph};
use self::spans::{BcbMapping, BcbMappingKind, CoverageSpans}; use self::spans::{BcbMapping, BcbMappingKind, CoverageSpans};
@ -155,61 +155,52 @@ fn inject_coverage_statements<'tcx>(
bcb_has_coverage_spans: impl Fn(BasicCoverageBlock) -> bool, bcb_has_coverage_spans: impl Fn(BasicCoverageBlock) -> bool,
coverage_counters: &CoverageCounters, coverage_counters: &CoverageCounters,
) { ) {
// Process the counters associated with BCB nodes. // Inject counter-increment statements into MIR.
for (bcb, counter_kind) in coverage_counters.bcb_node_counters() { for (id, counter_increment_site) in coverage_counters.counter_increment_sites() {
let do_inject = match counter_kind { // Determine the block to inject a counter-increment statement into.
// Counter-increment statements always need to be injected. // For BCB nodes this is just their first block, but for edges we need
BcbCounter::Counter { .. } => true, // to create a new block between the two BCBs, and inject into that.
// The only purpose of expression-used statements is to detect let target_bb = match *counter_increment_site {
// when a mapping is unreachable, so we only inject them for CounterIncrementSite::Node { bcb } => basic_coverage_blocks[bcb].leader_bb(),
// expressions with one or more mappings. CounterIncrementSite::Edge { from_bcb, to_bcb } => {
BcbCounter::Expression { .. } => bcb_has_coverage_spans(bcb), // Create a new block between the last block of `from_bcb` and
// the first block of `to_bcb`.
let from_bb = basic_coverage_blocks[from_bcb].last_bb();
let to_bb = basic_coverage_blocks[to_bcb].leader_bb();
let new_bb = inject_edge_counter_basic_block(mir_body, from_bb, to_bb);
debug!(
"Edge {from_bcb:?} (last {from_bb:?}) -> {to_bcb:?} (leader {to_bb:?}) \
requires a new MIR BasicBlock {new_bb:?} for counter increment {id:?}",
);
new_bb
}
}; };
if do_inject {
inject_statement( inject_statement(mir_body, CoverageKind::CounterIncrement { id }, target_bb);
mir_body,
make_mir_coverage_kind(counter_kind),
basic_coverage_blocks[bcb].leader_bb(),
);
}
} }
// Process the counters associated with BCB edges. // For each counter expression that is directly associated with at least one
for (from_bcb, to_bcb, counter_kind) in coverage_counters.bcb_edge_counters() { // span, we inject an "expression-used" statement, so that coverage codegen
let do_inject = match counter_kind { // can check whether the injected statement survived MIR optimization.
// Counter-increment statements always need to be injected. // (BCB edges can't have spans, so we only need to process BCB nodes here.)
BcbCounter::Counter { .. } => true, //
// BCB-edge expressions never have mappings, so they never need // See the code in `rustc_codegen_llvm::coverageinfo::map_data` that deals
// a corresponding statement. // with "expressions seen" and "zero terms".
BcbCounter::Expression { .. } => false, for (bcb, expression_id) in coverage_counters
}; .bcb_nodes_with_coverage_expressions()
if !do_inject { .filter(|&(bcb, _)| bcb_has_coverage_spans(bcb))
continue; {
} inject_statement(
mir_body,
// We need to inject a coverage statement into a new BB between the CoverageKind::ExpressionUsed { id: expression_id },
// last BB of `from_bcb` and the first BB of `to_bcb`. basic_coverage_blocks[bcb].leader_bb(),
let from_bb = basic_coverage_blocks[from_bcb].last_bb();
let to_bb = basic_coverage_blocks[to_bcb].leader_bb();
let new_bb = inject_edge_counter_basic_block(mir_body, from_bb, to_bb);
debug!(
"Edge {from_bcb:?} (last {from_bb:?}) -> {to_bcb:?} (leader {to_bb:?}) \
requires a new MIR BasicBlock {new_bb:?} for edge counter {counter_kind:?}",
); );
// Inject a counter into the newly-created BB.
inject_statement(mir_body, make_mir_coverage_kind(counter_kind), new_bb);
}
}
fn make_mir_coverage_kind(counter_kind: &BcbCounter) -> CoverageKind {
match *counter_kind {
BcbCounter::Counter { id } => CoverageKind::CounterIncrement { id },
BcbCounter::Expression { id } => CoverageKind::ExpressionUsed { id },
} }
} }
/// Given two basic blocks that have a control-flow edge between them, creates
/// and returns a new block that sits between those blocks.
fn inject_edge_counter_basic_block( fn inject_edge_counter_basic_block(
mir_body: &mut mir::Body<'_>, mir_body: &mut mir::Body<'_>,
from_bb: BasicBlock, from_bb: BasicBlock,

View file

@ -8,18 +8,10 @@ use crate::{lint::lint_body, validate, MirPass};
pub trait MirLint<'tcx> { pub trait MirLint<'tcx> {
fn name(&self) -> &'static str { fn name(&self) -> &'static str {
// FIXME Simplify the implementation once more `str` methods get const-stable. // FIXME Simplify the implementation once more `str` methods get const-stable.
// See copypaste in `MirPass`
const { const {
let name = std::any::type_name::<Self>(); let name = std::any::type_name::<Self>();
let bytes = name.as_bytes(); rustc_middle::util::common::c_name(name)
let mut i = bytes.len();
while i > 0 && bytes[i - 1] != b':' {
i = i - 1;
}
let (_, bytes) = bytes.split_at(i);
match std::str::from_utf8(bytes) {
Ok(name) => name,
Err(_) => name,
}
} }
} }

View file

@ -1732,7 +1732,7 @@ pub struct UnusedVariableTryPrefix {
#[derive(Subdiagnostic)] #[derive(Subdiagnostic)]
pub enum UnusedVariableSugg { pub enum UnusedVariableSugg {
#[multipart_suggestion(passes_suggestion, applicability = "machine-applicable")] #[multipart_suggestion(passes_suggestion, applicability = "maybe-incorrect")]
TryPrefixSugg { TryPrefixSugg {
#[suggestion_part(code = "_{name}")] #[suggestion_part(code = "_{name}")]
spans: Vec<Span>, spans: Vec<Span>,
@ -1771,7 +1771,7 @@ pub struct UnusedVarTryIgnore {
} }
#[derive(Subdiagnostic)] #[derive(Subdiagnostic)]
#[multipart_suggestion(passes_suggestion, applicability = "machine-applicable")] #[multipart_suggestion(passes_suggestion, applicability = "maybe-incorrect")]
pub struct UnusedVarTryIgnoreSugg { pub struct UnusedVarTryIgnoreSugg {
#[suggestion_part(code = "{name}: _")] #[suggestion_part(code = "{name}: _")]
pub shorthands: Vec<Span>, pub shorthands: Vec<Span>,

View file

@ -767,12 +767,6 @@ impl<'a, Cx: TypeCx> PlaceCtxt<'a, Cx> {
fn ctor_arity(&self, ctor: &Constructor<Cx>) -> usize { fn ctor_arity(&self, ctor: &Constructor<Cx>) -> usize {
self.cx.ctor_arity(ctor, self.ty) self.cx.ctor_arity(ctor, self.ty)
} }
fn ctor_sub_tys(
&'a self,
ctor: &'a Constructor<Cx>,
) -> impl Iterator<Item = Cx::Ty> + ExactSizeIterator + Captures<'a> {
self.cx.ctor_sub_tys(ctor, self.ty)
}
fn ctors_for_ty(&self) -> Result<ConstructorSet<Cx>, Cx::Error> { fn ctors_for_ty(&self) -> Result<ConstructorSet<Cx>, Cx::Error> {
self.cx.ctors_for_ty(self.ty) self.cx.ctors_for_ty(self.ty)
} }
@ -828,6 +822,38 @@ impl fmt::Display for ValidityConstraint {
} }
} }
/// Data about a place under investigation.
struct PlaceInfo<Cx: TypeCx> {
/// The type of the place.
ty: Cx::Ty,
/// Whether the place is known to contain valid data.
validity: ValidityConstraint,
/// Whether the place is the scrutinee itself or a subplace of it.
is_scrutinee: bool,
}
impl<Cx: TypeCx> PlaceInfo<Cx> {
fn specialize<'a>(
&'a self,
cx: &'a Cx,
ctor: &'a Constructor<Cx>,
) -> impl Iterator<Item = Self> + ExactSizeIterator + Captures<'a> {
let ctor_sub_tys = cx.ctor_sub_tys(ctor, &self.ty);
let ctor_sub_validity = self.validity.specialize(ctor);
ctor_sub_tys.map(move |ty| PlaceInfo {
ty,
validity: ctor_sub_validity,
is_scrutinee: false,
})
}
}
impl<Cx: TypeCx> Clone for PlaceInfo<Cx> {
fn clone(&self) -> Self {
Self { ty: self.ty.clone(), validity: self.validity, is_scrutinee: self.is_scrutinee }
}
}
/// Represents a pattern-tuple under investigation. /// Represents a pattern-tuple under investigation.
// The three lifetimes are: // The three lifetimes are:
// - 'p coming from the input // - 'p coming from the input
@ -1001,10 +1027,9 @@ struct Matrix<'p, Cx: TypeCx> {
/// each column must have the same type. Each column corresponds to a place within the /// each column must have the same type. Each column corresponds to a place within the
/// scrutinee. /// scrutinee.
rows: Vec<MatrixRow<'p, Cx>>, rows: Vec<MatrixRow<'p, Cx>>,
/// Track the type of each column/place. /// Track info about each place. Each place corresponds to a column in `rows`, and their types
place_ty: SmallVec<[Cx::Ty; 2]>, /// must match.
/// Track for each column/place whether it contains a known valid value. place_info: SmallVec<[PlaceInfo<Cx>; 2]>,
place_validity: SmallVec<[ValidityConstraint; 2]>,
/// Track whether the virtual wildcard row used to compute exhaustiveness is relevant. See top /// Track whether the virtual wildcard row used to compute exhaustiveness is relevant. See top
/// of the file for details on relevancy. /// of the file for details on relevancy.
wildcard_row_is_relevant: bool, wildcard_row_is_relevant: bool,
@ -1032,10 +1057,10 @@ impl<'p, Cx: TypeCx> Matrix<'p, Cx> {
scrut_ty: Cx::Ty, scrut_ty: Cx::Ty,
scrut_validity: ValidityConstraint, scrut_validity: ValidityConstraint,
) -> Self { ) -> Self {
let place_info = PlaceInfo { ty: scrut_ty, validity: scrut_validity, is_scrutinee: true };
let mut matrix = Matrix { let mut matrix = Matrix {
rows: Vec::with_capacity(arms.len()), rows: Vec::with_capacity(arms.len()),
place_ty: smallvec![scrut_ty], place_info: smallvec![place_info],
place_validity: smallvec![scrut_validity],
wildcard_row_is_relevant: true, wildcard_row_is_relevant: true,
}; };
for (row_id, arm) in arms.iter().enumerate() { for (row_id, arm) in arms.iter().enumerate() {
@ -1051,11 +1076,11 @@ impl<'p, Cx: TypeCx> Matrix<'p, Cx> {
matrix matrix
} }
fn head_ty(&self) -> Option<&Cx::Ty> { fn head_place(&self) -> Option<&PlaceInfo<Cx>> {
self.place_ty.first() self.place_info.first()
} }
fn column_count(&self) -> usize { fn column_count(&self) -> usize {
self.place_ty.len() self.place_info.len()
} }
fn rows( fn rows(
@ -1083,18 +1108,13 @@ impl<'p, Cx: TypeCx> Matrix<'p, Cx> {
ctor: &Constructor<Cx>, ctor: &Constructor<Cx>,
ctor_is_relevant: bool, ctor_is_relevant: bool,
) -> Result<Matrix<'p, Cx>, Cx::Error> { ) -> Result<Matrix<'p, Cx>, Cx::Error> {
let ctor_sub_tys = pcx.ctor_sub_tys(ctor); let subfield_place_info = self.place_info[0].specialize(pcx.cx, ctor);
let arity = ctor_sub_tys.len(); let arity = subfield_place_info.len();
let specialized_place_ty = ctor_sub_tys.chain(self.place_ty[1..].iter().cloned()).collect(); let specialized_place_info =
let ctor_sub_validity = self.place_validity[0].specialize(ctor); subfield_place_info.chain(self.place_info[1..].iter().cloned()).collect();
let specialized_place_validity = std::iter::repeat(ctor_sub_validity)
.take(arity)
.chain(self.place_validity[1..].iter().copied())
.collect();
let mut matrix = Matrix { let mut matrix = Matrix {
rows: Vec::new(), rows: Vec::new(),
place_ty: specialized_place_ty, place_info: specialized_place_info,
place_validity: specialized_place_validity,
wildcard_row_is_relevant: self.wildcard_row_is_relevant && ctor_is_relevant, wildcard_row_is_relevant: self.wildcard_row_is_relevant && ctor_is_relevant,
}; };
for (i, row) in self.rows().enumerate() { for (i, row) in self.rows().enumerate() {
@ -1127,11 +1147,11 @@ impl<'p, Cx: TypeCx> fmt::Debug for Matrix<'p, Cx> {
.map(|row| row.iter().map(|pat| format!("{pat:?}")).collect()) .map(|row| row.iter().map(|pat| format!("{pat:?}")).collect())
.collect(); .collect();
pretty_printed_matrix pretty_printed_matrix
.push(self.place_validity.iter().map(|validity| format!("{validity}")).collect()); .push(self.place_info.iter().map(|place| format!("{}", place.validity)).collect());
let column_count = self.column_count(); let column_count = self.column_count();
assert!(self.rows.iter().all(|row| row.len() == column_count)); assert!(self.rows.iter().all(|row| row.len() == column_count));
assert!(self.place_validity.len() == column_count); assert!(self.place_info.len() == column_count);
let column_widths: Vec<usize> = (0..column_count) let column_widths: Vec<usize> = (0..column_count)
.map(|col| pretty_printed_matrix.iter().map(|row| row[col].len()).max().unwrap_or(0)) .map(|col| pretty_printed_matrix.iter().map(|row| row[col].len()).max().unwrap_or(0))
.collect(); .collect();
@ -1432,11 +1452,10 @@ fn collect_overlapping_range_endpoints<'p, Cx: TypeCx>(
/// - unspecialization, where we lift the results from the previous step into results for this step /// - unspecialization, where we lift the results from the previous step into results for this step
/// (using `apply_constructor` and by updating `row.useful` for each parent row). /// (using `apply_constructor` and by updating `row.useful` for each parent row).
/// This is all explained at the top of the file. /// This is all explained at the top of the file.
#[instrument(level = "debug", skip(mcx, is_top_level), ret)] #[instrument(level = "debug", skip(mcx), ret)]
fn compute_exhaustiveness_and_usefulness<'a, 'p, Cx: TypeCx>( fn compute_exhaustiveness_and_usefulness<'a, 'p, Cx: TypeCx>(
mcx: UsefulnessCtxt<'a, Cx>, mcx: UsefulnessCtxt<'a, Cx>,
matrix: &mut Matrix<'p, Cx>, matrix: &mut Matrix<'p, Cx>,
is_top_level: bool,
) -> Result<WitnessMatrix<Cx>, Cx::Error> { ) -> Result<WitnessMatrix<Cx>, Cx::Error> {
debug_assert!(matrix.rows().all(|r| r.len() == matrix.column_count())); debug_assert!(matrix.rows().all(|r| r.len() == matrix.column_count()));
@ -1447,7 +1466,7 @@ fn compute_exhaustiveness_and_usefulness<'a, 'p, Cx: TypeCx>(
return Ok(WitnessMatrix::empty()); return Ok(WitnessMatrix::empty());
} }
let Some(ty) = matrix.head_ty().cloned() else { let Some(place) = matrix.head_place() else {
// The base case: there are no columns in the matrix. We are morally pattern-matching on (). // The base case: there are no columns in the matrix. We are morally pattern-matching on ().
// A row is useful iff it has no (unguarded) rows above it. // A row is useful iff it has no (unguarded) rows above it.
let mut useful = true; // Whether the next row is useful. let mut useful = true; // Whether the next row is useful.
@ -1467,18 +1486,17 @@ fn compute_exhaustiveness_and_usefulness<'a, 'p, Cx: TypeCx>(
}; };
}; };
debug!("ty: {ty:?}"); let ty = &place.ty.clone(); // Clone it out so we can mutate `matrix` later.
let pcx = &PlaceCtxt { cx: mcx.tycx, ty: &ty }; let pcx = &PlaceCtxt { cx: mcx.tycx, ty };
debug!("ty: {:?}", pcx.ty);
let ctors_for_ty = pcx.ctors_for_ty()?; let ctors_for_ty = pcx.ctors_for_ty()?;
// Whether the place/column we are inspecting is known to contain valid data.
let place_validity = matrix.place_validity[0];
// We treat match scrutinees of type `!` or `EmptyEnum` differently. // We treat match scrutinees of type `!` or `EmptyEnum` differently.
let is_toplevel_exception = let is_toplevel_exception =
is_top_level && matches!(ctors_for_ty, ConstructorSet::NoConstructors); place.is_scrutinee && matches!(ctors_for_ty, ConstructorSet::NoConstructors);
// Whether empty patterns are counted as useful or not. We only warn an empty arm unreachable if // Whether empty patterns are counted as useful or not. We only warn an empty arm unreachable if
// it is guaranteed unreachable by the opsem (i.e. if the place is `known_valid`). // it is guaranteed unreachable by the opsem (i.e. if the place is `known_valid`).
let empty_arms_are_unreachable = place_validity.is_known_valid() let empty_arms_are_unreachable = place.validity.is_known_valid()
&& (is_toplevel_exception && (is_toplevel_exception
|| mcx.tycx.is_exhaustive_patterns_feature_on() || mcx.tycx.is_exhaustive_patterns_feature_on()
|| mcx.tycx.is_min_exhaustive_patterns_feature_on()); || mcx.tycx.is_min_exhaustive_patterns_feature_on());
@ -1504,7 +1522,7 @@ fn compute_exhaustiveness_and_usefulness<'a, 'p, Cx: TypeCx>(
// Decide what constructors to report. // Decide what constructors to report.
let is_integers = matches!(ctors_for_ty, ConstructorSet::Integers { .. }); let is_integers = matches!(ctors_for_ty, ConstructorSet::Integers { .. });
let always_report_all = is_top_level && !is_integers; let always_report_all = place.is_scrutinee && !is_integers;
// Whether we should report "Enum::A and Enum::C are missing" or "_ is missing". // Whether we should report "Enum::A and Enum::C are missing" or "_ is missing".
let report_individual_missing_ctors = always_report_all || !all_missing; let report_individual_missing_ctors = always_report_all || !all_missing;
// Which constructors are considered missing. We ensure that `!missing_ctors.is_empty() => // Which constructors are considered missing. We ensure that `!missing_ctors.is_empty() =>
@ -1525,7 +1543,7 @@ fn compute_exhaustiveness_and_usefulness<'a, 'p, Cx: TypeCx>(
let ctor_is_relevant = matches!(ctor, Constructor::Missing) || missing_ctors.is_empty(); let ctor_is_relevant = matches!(ctor, Constructor::Missing) || missing_ctors.is_empty();
let mut spec_matrix = matrix.specialize_constructor(pcx, &ctor, ctor_is_relevant)?; let mut spec_matrix = matrix.specialize_constructor(pcx, &ctor, ctor_is_relevant)?;
let mut witnesses = ensure_sufficient_stack(|| { let mut witnesses = ensure_sufficient_stack(|| {
compute_exhaustiveness_and_usefulness(mcx, &mut spec_matrix, false) compute_exhaustiveness_and_usefulness(mcx, &mut spec_matrix)
})?; })?;
// Transform witnesses for `spec_matrix` into witnesses for `matrix`. // Transform witnesses for `spec_matrix` into witnesses for `matrix`.
@ -1600,8 +1618,7 @@ pub fn compute_match_usefulness<'p, Cx: TypeCx>(
) -> Result<UsefulnessReport<'p, Cx>, Cx::Error> { ) -> Result<UsefulnessReport<'p, Cx>, Cx::Error> {
let cx = UsefulnessCtxt { tycx }; let cx = UsefulnessCtxt { tycx };
let mut matrix = Matrix::new(arms, scrut_ty, scrut_validity); let mut matrix = Matrix::new(arms, scrut_ty, scrut_validity);
let non_exhaustiveness_witnesses = let non_exhaustiveness_witnesses = compute_exhaustiveness_and_usefulness(cx, &mut matrix)?;
compute_exhaustiveness_and_usefulness(cx, &mut matrix, true)?;
let non_exhaustiveness_witnesses: Vec<_> = non_exhaustiveness_witnesses.single_column(); let non_exhaustiveness_witnesses: Vec<_> = non_exhaustiveness_witnesses.single_column();
let arm_usefulness: Vec<_> = arms let arm_usefulness: Vec<_> = arms

View file

@ -1572,6 +1572,8 @@ options! {
dep_info_omit_d_target: bool = (false, parse_bool, [TRACKED], dep_info_omit_d_target: bool = (false, parse_bool, [TRACKED],
"in dep-info output, omit targets for tracking dependencies of the dep-info files \ "in dep-info output, omit targets for tracking dependencies of the dep-info files \
themselves (default: no)"), themselves (default: no)"),
direct_access_external_data: Option<bool> = (None, parse_opt_bool, [TRACKED],
"Direct or use GOT indirect to reference external data symbols"),
dual_proc_macros: bool = (false, parse_bool, [TRACKED], dual_proc_macros: bool = (false, parse_bool, [TRACKED],
"load proc macros for both target and host, but only link to the target (default: no)"), "load proc macros for both target and host, but only link to the target (default: no)"),
dump_dep_graph: bool = (false, parse_bool, [UNTRACKED], dump_dep_graph: bool = (false, parse_bool, [UNTRACKED],

View file

@ -315,7 +315,7 @@ impl Session {
pub fn compile_status(&self) -> Result<(), ErrorGuaranteed> { pub fn compile_status(&self) -> Result<(), ErrorGuaranteed> {
// We must include lint errors here. // We must include lint errors here.
if let Some(reported) = self.dcx().has_errors_or_lint_errors() { if let Some(reported) = self.dcx().has_errors_or_lint_errors() {
let _ = self.dcx().emit_stashed_diagnostics(); self.dcx().emit_stashed_diagnostics();
Err(reported) Err(reported)
} else { } else {
Ok(()) Ok(())
@ -767,6 +767,13 @@ impl Session {
self.opts.unstable_opts.tls_model.unwrap_or(self.target.tls_model) self.opts.unstable_opts.tls_model.unwrap_or(self.target.tls_model)
} }
pub fn direct_access_external_data(&self) -> Option<bool> {
self.opts
.unstable_opts
.direct_access_external_data
.or(self.target.direct_access_external_data)
}
pub fn split_debuginfo(&self) -> SplitDebuginfo { pub fn split_debuginfo(&self) -> SplitDebuginfo {
self.opts.cg.split_debuginfo.unwrap_or(self.target.split_debuginfo) self.opts.cg.split_debuginfo.unwrap_or(self.target.split_debuginfo)
} }

View file

@ -2477,10 +2477,9 @@ where
pub struct ErrorGuaranteed(()); pub struct ErrorGuaranteed(());
impl ErrorGuaranteed { impl ErrorGuaranteed {
/// To be used only if you really know what you are doing... ideally, we would find a way to /// Don't use this outside of `DiagCtxtInner::emit_diagnostic`!
/// eliminate all calls to this method. #[deprecated = "should only be used in `DiagCtxtInner::emit_diagnostic`"]
#[deprecated = "`Session::span_delayed_bug` should be preferred over this function"] pub fn unchecked_error_guaranteed() -> Self {
pub fn unchecked_claim_error_was_emitted() -> Self {
ErrorGuaranteed(()) ErrorGuaranteed(())
} }
} }

View file

@ -1543,6 +1543,7 @@ supported_targets! {
("armebv7r-none-eabihf", armebv7r_none_eabihf), ("armebv7r-none-eabihf", armebv7r_none_eabihf),
("armv7r-none-eabi", armv7r_none_eabi), ("armv7r-none-eabi", armv7r_none_eabi),
("armv7r-none-eabihf", armv7r_none_eabihf), ("armv7r-none-eabihf", armv7r_none_eabihf),
("armv8r-none-eabihf", armv8r_none_eabihf),
("x86_64-pc-solaris", x86_64_pc_solaris), ("x86_64-pc-solaris", x86_64_pc_solaris),
("sparcv9-sun-solaris", sparcv9_sun_solaris), ("sparcv9-sun-solaris", sparcv9_sun_solaris),
@ -1886,6 +1887,8 @@ pub struct TargetOptions {
/// passed, and cannot be disabled even via `-C`. Corresponds to `llc /// passed, and cannot be disabled even via `-C`. Corresponds to `llc
/// -mattr=$features`. /// -mattr=$features`.
pub features: StaticCow<str>, pub features: StaticCow<str>,
/// Direct or use GOT indirect to reference external data symbols
pub direct_access_external_data: Option<bool>,
/// Whether dynamic linking is available on this target. Defaults to false. /// Whether dynamic linking is available on this target. Defaults to false.
pub dynamic_linking: bool, pub dynamic_linking: bool,
/// Whether dynamic linking can export TLS globals. Defaults to true. /// Whether dynamic linking can export TLS globals. Defaults to true.
@ -2280,6 +2283,7 @@ impl Default for TargetOptions {
asm_args: cvs![], asm_args: cvs![],
cpu: "generic".into(), cpu: "generic".into(),
features: "".into(), features: "".into(),
direct_access_external_data: None,
dynamic_linking: false, dynamic_linking: false,
dll_tls_export: true, dll_tls_export: true,
only_cdylib: false, only_cdylib: false,
@ -2579,6 +2583,12 @@ impl Target {
base.$key_name = s as u32; base.$key_name = s as u32;
} }
} ); } );
($key_name:ident, Option<bool>) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(s) = obj.remove(&name).and_then(|b| b.as_bool()) {
base.$key_name = Some(s);
}
} );
($key_name:ident, Option<u64>) => ( { ($key_name:ident, Option<u64>) => ( {
let name = (stringify!($key_name)).replace("_", "-"); let name = (stringify!($key_name)).replace("_", "-");
if let Some(s) = obj.remove(&name).and_then(|b| b.as_u64()) { if let Some(s) = obj.remove(&name).and_then(|b| b.as_u64()) {
@ -3007,6 +3017,7 @@ impl Target {
key!(cpu); key!(cpu);
key!(features); key!(features);
key!(dynamic_linking, bool); key!(dynamic_linking, bool);
key!(direct_access_external_data, Option<bool>);
key!(dll_tls_export, bool); key!(dll_tls_export, bool);
key!(only_cdylib, bool); key!(only_cdylib, bool);
key!(executables, bool); key!(executables, bool);
@ -3261,6 +3272,7 @@ impl ToJson for Target {
target_option_val!(cpu); target_option_val!(cpu);
target_option_val!(features); target_option_val!(features);
target_option_val!(dynamic_linking); target_option_val!(dynamic_linking);
target_option_val!(direct_access_external_data);
target_option_val!(dll_tls_export); target_option_val!(dll_tls_export);
target_option_val!(only_cdylib); target_option_val!(only_cdylib);
target_option_val!(executables); target_option_val!(executables);

View file

@ -0,0 +1,35 @@
// Targets the Little-endian Cortex-R52 processor (ARMv8-R)
use crate::spec::{Cc, LinkerFlavor, Lld, PanicStrategy, RelocModel, Target, TargetOptions};
pub fn target() -> Target {
Target {
llvm_target: "armv8r-none-eabihf".into(),
pointer_width: 32,
data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
arch: "arm".into(),
options: TargetOptions {
abi: "eabihf".into(),
linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
linker: Some("rust-lld".into()),
relocation_model: RelocModel::Static,
panic_strategy: PanicStrategy::Abort,
// The Cortex-R52 has two variants with respect to floating-point support:
// 1. fp-armv8, SP-only, with 16 DP (32 SP) registers
// 2. neon-fp-armv8, SP+DP, with 32 DP registers
// Use the lesser of these two options as the default, as it will produce code
// compatible with either variant.
//
// Reference:
// Arm Cortex-R52 Processor Technical Reference Manual
// - Chapter 15 Advanced SIMD and floating-point support
features: "+fp-armv8,-fp64,-d32".into(),
max_atomic_width: Some(64),
emit_debug_gdb_scripts: false,
// GCC defaults to 8 for arm-none here.
c_enum_min_bits: Some(8),
..Default::default()
},
}
}

View file

@ -12,6 +12,7 @@ pub fn target() -> Target {
features: "+f,+d".into(), features: "+f,+d".into(),
llvm_abiname: "lp64d".into(), llvm_abiname: "lp64d".into(),
max_atomic_width: Some(64), max_atomic_width: Some(64),
direct_access_external_data: Some(false),
..base::linux_gnu::opts() ..base::linux_gnu::opts()
}, },
} }

View file

@ -30,7 +30,7 @@ python3 "$X_PY" test --stage 2 src/tools/rustfmt
# We set the GC interval to the shortest possible value (0 would be off) to increase the chance # We set the GC interval to the shortest possible value (0 would be off) to increase the chance
# that bugs which only surface when the GC runs at a specific time are more likely to cause CI to fail. # that bugs which only surface when the GC runs at a specific time are more likely to cause CI to fail.
# This significantly increases the runtime of our test suite, or we'd do this in PR CI too. # This significantly increases the runtime of our test suite, or we'd do this in PR CI too.
if [[ -z "${PR_CI_JOB:-}" ]]; then if [ -z "${PR_CI_JOB:-}" ]; then
MIRIFLAGS=-Zmiri-provenance-gc=1 python3 "$X_PY" test --stage 2 src/tools/miri MIRIFLAGS=-Zmiri-provenance-gc=1 python3 "$X_PY" test --stage 2 src/tools/miri
else else
python3 "$X_PY" test --stage 2 src/tools/miri python3 "$X_PY" test --stage 2 src/tools/miri

View file

@ -26,6 +26,7 @@
- [armv4t-none-eabi](platform-support/armv4t-none-eabi.md) - [armv4t-none-eabi](platform-support/armv4t-none-eabi.md)
- [armv5te-none-eabi](platform-support/armv5te-none-eabi.md) - [armv5te-none-eabi](platform-support/armv5te-none-eabi.md)
- [armv7r-none-eabi](platform-support/armv7r-none-eabi.md) - [armv7r-none-eabi](platform-support/armv7r-none-eabi.md)
- [armv8r-none-eabihf](platform-support/armv8r-none-eabihf.md)
- [armv6k-nintendo-3ds](platform-support/armv6k-nintendo-3ds.md) - [armv6k-nintendo-3ds](platform-support/armv6k-nintendo-3ds.md)
- [armv7-sony-vita-newlibeabihf](platform-support/armv7-sony-vita-newlibeabihf.md) - [armv7-sony-vita-newlibeabihf](platform-support/armv7-sony-vita-newlibeabihf.md)
- [armv7-unknown-linux-uclibceabi](platform-support/armv7-unknown-linux-uclibceabi.md) - [armv7-unknown-linux-uclibceabi](platform-support/armv7-unknown-linux-uclibceabi.md)

View file

@ -280,6 +280,7 @@ target | std | host | notes
[`armv7a-none-eabihf`](platform-support/arm-none-eabi.md) | * | | Bare ARMv7-A, hardfloat [`armv7a-none-eabihf`](platform-support/arm-none-eabi.md) | * | | Bare ARMv7-A, hardfloat
[`armv7k-apple-watchos`](platform-support/apple-watchos.md) | ✓ | | ARMv7-A Apple WatchOS [`armv7k-apple-watchos`](platform-support/apple-watchos.md) | ✓ | | ARMv7-A Apple WatchOS
`armv7s-apple-ios` | ✓ | | ARMv7-A Apple-A6 Apple iOS `armv7s-apple-ios` | ✓ | | ARMv7-A Apple-A6 Apple iOS
[`armv8r-none-eabihf`](platform-support/armv8r-none-eabihf.md) | * | | Bare ARMv8-R, hardfloat
`avr-unknown-gnu-atmega328` | * | | AVR. Requires `-Z build-std=core` `avr-unknown-gnu-atmega328` | * | | AVR. Requires `-Z build-std=core`
`bpfeb-unknown-none` | * | | BPF (big endian) `bpfeb-unknown-none` | * | | BPF (big endian)
`bpfel-unknown-none` | * | | BPF (little endian) `bpfel-unknown-none` | * | | BPF (little endian)

View file

@ -13,6 +13,7 @@
- [{arm,thumb}v4t-none-eabi](armv4t-none-eabi.md) - [{arm,thumb}v4t-none-eabi](armv4t-none-eabi.md)
- [{arm,thumb}v5te-none-eabi](armv5te-none-eabi.md) - [{arm,thumb}v5te-none-eabi](armv5te-none-eabi.md)
- armv7a-none-eabihf - armv7a-none-eabihf
- [armv8r-none-eabihf](armv8r-none-eabihf.md)
Bare-metal target for 32-bit ARM CPUs. Bare-metal target for 32-bit ARM CPUs.

View file

@ -0,0 +1,40 @@
# `armv8r-none-eabihf`
**Tier: 3**
Bare-metal target for CPUs in the ARMv8-R architecture family, supporting
dual ARM/Thumb mode, with ARM mode as the default.
Processors in this family include the Arm [Cortex-R52][cortex-r52]
and [Cortex-R52+][cortex-r52-plus].
See [`arm-none-eabi`](arm-none-eabi.md) for information applicable to all
`arm-none-eabi` targets.
[cortex-r52]: https://www.arm.com/products/silicon-ip-cpu/cortex-r/cortex-r52
[cortex-r52-plus]: https://www.arm.com/products/silicon-ip-cpu/cortex-r/cortex-r52-plus
## Target maintainers
- [Chris Copeland](https://github.com/chrisnc), `chris@chrisnc.net`
## Requirements
The Cortex-R52 family always includes a floating-point unit, so there is no
non-`hf` version of this target. The floating-point features assumed by this
target are those of the single-precision-only config of the Cortex-R52, which
has 16 double-precision registers, accessible as 32 single-precision registers.
The other variant of Cortex-R52 includes double-precision, 32 double-precision
registers, and Advanced SIMD (Neon).
The manual refers to this as the "Full Advanced SIMD config". To compile code
for this variant, use: `-C target-feature=+fp64,+d32,+neon`. See the [Advanced
SIMD and floating-point support][fpu] section of the Cortex-R52 Processor
Technical Reference Manual for more details.
[fpu]: https://developer.arm.com/documentation/100026/0104/Advanced-SIMD-and-floating-point-support/About-the-Advanced-SIMD-and-floating-point-support
## Cross-compilation toolchains and C code
This target supports C code compiled with the `arm-none-eabi` target triple and
`-march=armv8-r` or a suitable `-mcpu` flag.

View file

@ -0,0 +1,16 @@
# `direct_access_external_data`
The tracking issue for this feature is: https://github.com/rust-lang/compiler-team/issues/707
------------------------
Option `-Z direct-access-external-data` controls how to access symbols of
external data.
Supported values for this option are:
- `yes` - Don't use GOT indirection to reference external data symbols.
- `no` - Use GOT indirection to reference external data symbols.
If the option is not explicitly specified, different targets have different
default values.

View file

@ -323,20 +323,20 @@ impl Options {
early_dcx: &mut EarlyDiagCtxt, early_dcx: &mut EarlyDiagCtxt,
matches: &getopts::Matches, matches: &getopts::Matches,
args: Vec<String>, args: Vec<String>,
) -> Result<(Options, RenderOptions), i32> { ) -> Option<(Options, RenderOptions)> {
// Check for unstable options. // Check for unstable options.
nightly_options::check_nightly_options(early_dcx, matches, &opts()); nightly_options::check_nightly_options(early_dcx, matches, &opts());
if args.is_empty() || matches.opt_present("h") || matches.opt_present("help") { if args.is_empty() || matches.opt_present("h") || matches.opt_present("help") {
crate::usage("rustdoc"); crate::usage("rustdoc");
return Err(0); return None;
} else if matches.opt_present("version") { } else if matches.opt_present("version") {
rustc_driver::version!(&early_dcx, "rustdoc", matches); rustc_driver::version!(&early_dcx, "rustdoc", matches);
return Err(0); return None;
} }
if rustc_driver::describe_flag_categories(early_dcx, &matches) { if rustc_driver::describe_flag_categories(early_dcx, &matches) {
return Err(0); return None;
} }
let color = config::parse_color(early_dcx, matches); let color = config::parse_color(early_dcx, matches);
@ -382,7 +382,7 @@ impl Options {
} }
} }
return Err(0); return None;
} }
let mut emit = Vec::new(); let mut emit = Vec::new();
@ -390,10 +390,7 @@ impl Options {
for kind in list.split(',') { for kind in list.split(',') {
match kind.parse() { match kind.parse() {
Ok(kind) => emit.push(kind), Ok(kind) => emit.push(kind),
Err(()) => { Err(()) => dcx.fatal(format!("unrecognized emission type: {kind}")),
dcx.err(format!("unrecognized emission type: {kind}"));
return Err(1);
}
} }
} }
} }
@ -403,7 +400,7 @@ impl Options {
&& !matches.opt_present("show-coverage") && !matches.opt_present("show-coverage")
&& !nightly_options::is_unstable_enabled(matches) && !nightly_options::is_unstable_enabled(matches)
{ {
early_dcx.early_fatal( dcx.fatal(
"the -Z unstable-options flag must be passed to enable --output-format for documentation generation (see https://github.com/rust-lang/rust/issues/76578)", "the -Z unstable-options flag must be passed to enable --output-format for documentation generation (see https://github.com/rust-lang/rust/issues/76578)",
); );
} }
@ -420,10 +417,7 @@ impl Options {
} }
let paths = match theme::load_css_paths(content) { let paths = match theme::load_css_paths(content) {
Ok(p) => p, Ok(p) => p,
Err(e) => { Err(e) => dcx.fatal(e),
dcx.err(e);
return Err(1);
}
}; };
let mut errors = 0; let mut errors = 0;
@ -442,9 +436,9 @@ impl Options {
} }
} }
if errors != 0 { if errors != 0 {
return Err(1); dcx.fatal("[check-theme] one or more tests failed");
} }
return Err(0); return None;
} }
let (lint_opts, describe_lints, lint_cap) = get_cmd_lint_options(early_dcx, matches); let (lint_opts, describe_lints, lint_cap) = get_cmd_lint_options(early_dcx, matches);
@ -452,11 +446,9 @@ impl Options {
let input = PathBuf::from(if describe_lints { let input = PathBuf::from(if describe_lints {
"" // dummy, this won't be used "" // dummy, this won't be used
} else if matches.free.is_empty() { } else if matches.free.is_empty() {
dcx.err("missing file operand"); dcx.fatal("missing file operand");
return Err(1);
} else if matches.free.len() > 1 { } else if matches.free.len() > 1 {
dcx.err("too many file operands"); dcx.fatal("too many file operands");
return Err(1);
} else { } else {
&matches.free[0] &matches.free[0]
}); });
@ -466,10 +458,7 @@ impl Options {
let externs = parse_externs(early_dcx, matches, &unstable_opts); let externs = parse_externs(early_dcx, matches, &unstable_opts);
let extern_html_root_urls = match parse_extern_html_roots(matches) { let extern_html_root_urls = match parse_extern_html_roots(matches) {
Ok(ex) => ex, Ok(ex) => ex,
Err(err) => { Err(err) => dcx.fatal(err),
dcx.err(err);
return Err(1);
}
}; };
let default_settings: Vec<Vec<(String, String)>> = vec![ let default_settings: Vec<Vec<(String, String)>> = vec![
@ -526,16 +515,14 @@ impl Options {
let no_run = matches.opt_present("no-run"); let no_run = matches.opt_present("no-run");
if !should_test && no_run { if !should_test && no_run {
dcx.err("the `--test` flag must be passed to enable `--no-run`"); dcx.fatal("the `--test` flag must be passed to enable `--no-run`");
return Err(1);
} }
let out_dir = matches.opt_str("out-dir").map(|s| PathBuf::from(&s)); let out_dir = matches.opt_str("out-dir").map(|s| PathBuf::from(&s));
let output = matches.opt_str("output").map(|s| PathBuf::from(&s)); let output = matches.opt_str("output").map(|s| PathBuf::from(&s));
let output = match (out_dir, output) { let output = match (out_dir, output) {
(Some(_), Some(_)) => { (Some(_), Some(_)) => {
dcx.err("cannot use both 'out-dir' and 'output' at once"); dcx.fatal("cannot use both 'out-dir' and 'output' at once");
return Err(1);
} }
(Some(out_dir), None) => out_dir, (Some(out_dir), None) => out_dir,
(None, Some(output)) => output, (None, Some(output)) => output,
@ -549,8 +536,7 @@ impl Options {
if let Some(ref p) = extension_css { if let Some(ref p) = extension_css {
if !p.is_file() { if !p.is_file() {
dcx.err("option --extend-css argument must be a file"); dcx.fatal("option --extend-css argument must be a file");
return Err(1);
} }
} }
@ -566,31 +552,25 @@ impl Options {
} }
let paths = match theme::load_css_paths(content) { let paths = match theme::load_css_paths(content) {
Ok(p) => p, Ok(p) => p,
Err(e) => { Err(e) => dcx.fatal(e),
dcx.err(e);
return Err(1);
}
}; };
for (theme_file, theme_s) in for (theme_file, theme_s) in
matches.opt_strs("theme").iter().map(|s| (PathBuf::from(&s), s.to_owned())) matches.opt_strs("theme").iter().map(|s| (PathBuf::from(&s), s.to_owned()))
{ {
if !theme_file.is_file() { if !theme_file.is_file() {
dcx.struct_err(format!("invalid argument: \"{theme_s}\"")) dcx.struct_fatal(format!("invalid argument: \"{theme_s}\""))
.with_help("arguments to --theme must be files") .with_help("arguments to --theme must be files")
.emit(); .emit();
return Err(1);
} }
if theme_file.extension() != Some(OsStr::new("css")) { if theme_file.extension() != Some(OsStr::new("css")) {
dcx.struct_err(format!("invalid argument: \"{theme_s}\"")) dcx.struct_fatal(format!("invalid argument: \"{theme_s}\""))
.with_help("arguments to --theme must have a .css extension") .with_help("arguments to --theme must have a .css extension")
.emit(); .emit();
return Err(1);
} }
let (success, ret) = theme::test_theme_against(&theme_file, &paths, &dcx); let (success, ret) = theme::test_theme_against(&theme_file, &paths, &dcx);
if !success { if !success {
dcx.err(format!("error loading theme file: \"{theme_s}\"")); dcx.fatal(format!("error loading theme file: \"{theme_s}\""));
return Err(1);
} else if !ret.is_empty() { } else if !ret.is_empty() {
dcx.struct_warn(format!( dcx.struct_warn(format!(
"theme file \"{theme_s}\" is missing CSS rules from the default theme", "theme file \"{theme_s}\" is missing CSS rules from the default theme",
@ -620,22 +600,18 @@ impl Options {
edition, edition,
&None, &None,
) else { ) else {
return Err(3); dcx.fatal("`ExternalHtml::load` failed");
}; };
match matches.opt_str("r").as_deref() { match matches.opt_str("r").as_deref() {
Some("rust") | None => {} Some("rust") | None => {}
Some(s) => { Some(s) => dcx.fatal(format!("unknown input format: {s}")),
dcx.err(format!("unknown input format: {s}"));
return Err(1);
}
} }
let index_page = matches.opt_str("index-page").map(|s| PathBuf::from(&s)); let index_page = matches.opt_str("index-page").map(|s| PathBuf::from(&s));
if let Some(ref index_page) = index_page { if let Some(ref index_page) = index_page {
if !index_page.is_file() { if !index_page.is_file() {
dcx.err("option `--index-page` argument must be a file"); dcx.fatal("option `--index-page` argument must be a file");
return Err(1);
} }
} }
@ -646,8 +622,7 @@ impl Options {
let crate_types = match parse_crate_types_from_list(matches.opt_strs("crate-type")) { let crate_types = match parse_crate_types_from_list(matches.opt_strs("crate-type")) {
Ok(types) => types, Ok(types) => types,
Err(e) => { Err(e) => {
dcx.err(format!("unknown crate type: {e}")); dcx.fatal(format!("unknown crate type: {e}"));
return Err(1);
} }
}; };
@ -655,18 +630,13 @@ impl Options {
Some(s) => match OutputFormat::try_from(s.as_str()) { Some(s) => match OutputFormat::try_from(s.as_str()) {
Ok(out_fmt) => { Ok(out_fmt) => {
if !out_fmt.is_json() && show_coverage { if !out_fmt.is_json() && show_coverage {
dcx.struct_err( dcx.fatal(
"html output format isn't supported for the --show-coverage option", "html output format isn't supported for the --show-coverage option",
) );
.emit();
return Err(1);
} }
out_fmt out_fmt
} }
Err(e) => { Err(e) => dcx.fatal(e),
dcx.err(e);
return Err(1);
}
}, },
None => OutputFormat::default(), None => OutputFormat::default(),
}; };
@ -709,16 +679,14 @@ impl Options {
let html_no_source = matches.opt_present("html-no-source"); let html_no_source = matches.opt_present("html-no-source");
if generate_link_to_definition && (show_coverage || output_format != OutputFormat::Html) { if generate_link_to_definition && (show_coverage || output_format != OutputFormat::Html) {
dcx.struct_err( dcx.fatal(
"--generate-link-to-definition option can only be used with HTML output format", "--generate-link-to-definition option can only be used with HTML output format",
) );
.emit();
return Err(1);
} }
let scrape_examples_options = ScrapeExamplesOptions::new(matches, &dcx)?; let scrape_examples_options = ScrapeExamplesOptions::new(matches, &dcx);
let with_examples = matches.opt_strs("with-examples"); let with_examples = matches.opt_strs("with-examples");
let call_locations = crate::scrape_examples::load_call_locations(with_examples, &dcx)?; let call_locations = crate::scrape_examples::load_call_locations(with_examples, &dcx);
let unstable_features = let unstable_features =
rustc_feature::UnstableFeatures::from_environment(crate_name.as_deref()); rustc_feature::UnstableFeatures::from_environment(crate_name.as_deref());
@ -793,7 +761,7 @@ impl Options {
no_emit_shared: false, no_emit_shared: false,
html_no_source, html_no_source,
}; };
Ok((options, render_options)) Some((options, render_options))
} }
/// Returns `true` if the file given as `self.input` is a Markdown file. /// Returns `true` if the file given as `self.input` is a Markdown file.

View file

@ -177,13 +177,16 @@ pub fn main() {
init_logging(&early_dcx); init_logging(&early_dcx);
rustc_driver::init_logger(&early_dcx, rustc_log::LoggerConfig::from_env("RUSTDOC_LOG")); rustc_driver::init_logger(&early_dcx, rustc_log::LoggerConfig::from_env("RUSTDOC_LOG"));
let exit_code = rustc_driver::catch_with_exit_code(|| match get_args(&early_dcx) { let exit_code = rustc_driver::catch_with_exit_code(|| {
Some(args) => main_args(&mut early_dcx, &args, using_internal_features), let args = env::args_os()
_ => .enumerate()
{ .map(|(i, arg)| {
#[allow(deprecated)] arg.into_string().unwrap_or_else(|arg| {
Err(ErrorGuaranteed::unchecked_claim_error_was_emitted()) early_dcx.early_fatal(format!("argument {i} is not valid Unicode: {arg:?}"))
} })
})
.collect::<Vec<_>>();
main_args(&mut early_dcx, &args, using_internal_features)
}); });
process::exit(exit_code); process::exit(exit_code);
} }
@ -219,19 +222,6 @@ fn init_logging(early_dcx: &EarlyDiagCtxt) {
tracing::subscriber::set_global_default(subscriber).unwrap(); tracing::subscriber::set_global_default(subscriber).unwrap();
} }
fn get_args(early_dcx: &EarlyDiagCtxt) -> Option<Vec<String>> {
env::args_os()
.enumerate()
.map(|(i, arg)| {
arg.into_string()
.map_err(|arg| {
early_dcx.early_warn(format!("Argument {i} is not valid Unicode: {arg:?}"));
})
.ok()
})
.collect()
}
fn opts() -> Vec<RustcOptGroup> { fn opts() -> Vec<RustcOptGroup> {
let stable: fn(_, fn(&mut getopts::Options) -> &mut _) -> _ = RustcOptGroup::stable; let stable: fn(_, fn(&mut getopts::Options) -> &mut _) -> _ = RustcOptGroup::stable;
let unstable: fn(_, fn(&mut getopts::Options) -> &mut _) -> _ = RustcOptGroup::unstable; let unstable: fn(_, fn(&mut getopts::Options) -> &mut _) -> _ = RustcOptGroup::unstable;
@ -730,15 +720,8 @@ fn main_args(
// Note that we discard any distinction between different non-zero exit // Note that we discard any distinction between different non-zero exit
// codes from `from_matches` here. // codes from `from_matches` here.
let (options, render_options) = match config::Options::from_matches(early_dcx, &matches, args) { let (options, render_options) = match config::Options::from_matches(early_dcx, &matches, args) {
Ok(opts) => opts, Some(opts) => opts,
Err(code) => { None => return Ok(()),
return if code == 0 {
Ok(())
} else {
#[allow(deprecated)]
Err(ErrorGuaranteed::unchecked_claim_error_was_emitted())
};
}
}; };
let diag = let diag =

View file

@ -38,28 +38,23 @@ pub(crate) struct ScrapeExamplesOptions {
} }
impl ScrapeExamplesOptions { impl ScrapeExamplesOptions {
pub(crate) fn new( pub(crate) fn new(matches: &getopts::Matches, dcx: &rustc_errors::DiagCtxt) -> Option<Self> {
matches: &getopts::Matches,
dcx: &rustc_errors::DiagCtxt,
) -> Result<Option<Self>, i32> {
let output_path = matches.opt_str("scrape-examples-output-path"); let output_path = matches.opt_str("scrape-examples-output-path");
let target_crates = matches.opt_strs("scrape-examples-target-crate"); let target_crates = matches.opt_strs("scrape-examples-target-crate");
let scrape_tests = matches.opt_present("scrape-tests"); let scrape_tests = matches.opt_present("scrape-tests");
match (output_path, !target_crates.is_empty(), scrape_tests) { match (output_path, !target_crates.is_empty(), scrape_tests) {
(Some(output_path), true, _) => Ok(Some(ScrapeExamplesOptions { (Some(output_path), true, _) => Some(ScrapeExamplesOptions {
output_path: PathBuf::from(output_path), output_path: PathBuf::from(output_path),
target_crates, target_crates,
scrape_tests, scrape_tests,
})), }),
(Some(_), false, _) | (None, true, _) => { (Some(_), false, _) | (None, true, _) => {
dcx.err("must use --scrape-examples-output-path and --scrape-examples-target-crate together"); dcx.fatal("must use --scrape-examples-output-path and --scrape-examples-target-crate together");
Err(1)
} }
(None, false, true) => { (None, false, true) => {
dcx.err("must use --scrape-examples-output-path and --scrape-examples-target-crate with --scrape-tests"); dcx.fatal("must use --scrape-examples-output-path and --scrape-examples-target-crate with --scrape-tests");
Err(1)
} }
(None, false, false) => Ok(None), (None, false, false) => None,
} }
} }
} }
@ -342,24 +337,20 @@ pub(crate) fn run(
pub(crate) fn load_call_locations( pub(crate) fn load_call_locations(
with_examples: Vec<String>, with_examples: Vec<String>,
dcx: &rustc_errors::DiagCtxt, dcx: &rustc_errors::DiagCtxt,
) -> Result<AllCallLocations, i32> { ) -> AllCallLocations {
let inner = || { let mut all_calls: AllCallLocations = FxHashMap::default();
let mut all_calls: AllCallLocations = FxHashMap::default(); for path in with_examples {
for path in with_examples { let bytes = match fs::read(&path) {
let bytes = fs::read(&path).map_err(|e| format!("{e} (for path {path})"))?; Ok(bytes) => bytes,
let mut decoder = MemDecoder::new(&bytes, 0); Err(e) => dcx.fatal(format!("failed to load examples: {e}")),
let calls = AllCallLocations::decode(&mut decoder); };
let mut decoder = MemDecoder::new(&bytes, 0);
let calls = AllCallLocations::decode(&mut decoder);
for (function, fn_calls) in calls.into_iter() { for (function, fn_calls) in calls.into_iter() {
all_calls.entry(function).or_default().extend(fn_calls.into_iter()); all_calls.entry(function).or_default().extend(fn_calls.into_iter());
}
} }
}
Ok(all_calls) all_calls
};
inner().map_err(|e: String| {
dcx.err(format!("failed to load examples: {e}"));
1
})
} }

View file

@ -83,6 +83,7 @@ static TARGETS: &[&str] = &[
"armebv7r-none-eabihf", "armebv7r-none-eabihf",
"armv7r-none-eabi", "armv7r-none-eabi",
"armv7r-none-eabihf", "armv7r-none-eabihf",
"armv8r-none-eabihf",
"armv7s-apple-ios", "armv7s-apple-ios",
"bpfeb-unknown-none", "bpfeb-unknown-none",
"bpfel-unknown-none", "bpfel-unknown-none",

View file

@ -15,7 +15,7 @@ use std::path::{Path, PathBuf};
const ENTRY_LIMIT: usize = 900; const ENTRY_LIMIT: usize = 900;
// FIXME: The following limits should be reduced eventually. // FIXME: The following limits should be reduced eventually.
const ISSUES_ENTRY_LIMIT: usize = 1819; const ISSUES_ENTRY_LIMIT: usize = 1819;
const ROOT_ENTRY_LIMIT: usize = 870; const ROOT_ENTRY_LIMIT: usize = 871;
const EXPECTED_TEST_FILE_EXTENSIONS: &[&str] = &[ const EXPECTED_TEST_FILE_EXTENSIONS: &[&str] = &[
"rs", // test source files "rs", // test source files

View file

@ -174,6 +174,9 @@
// revisions: armv7r_none_eabihf // revisions: armv7r_none_eabihf
// [armv7r_none_eabihf] compile-flags: --target armv7r-none-eabihf // [armv7r_none_eabihf] compile-flags: --target armv7r-none-eabihf
// [armv7r_none_eabihf] needs-llvm-components: arm // [armv7r_none_eabihf] needs-llvm-components: arm
// revisions: armv8r_none_eabihf
// [armv8r_none_eabihf] compile-flags: --target armv8r-none-eabihf
// [armv8r_none_eabihf] needs-llvm-components: arm
// FIXME: disabled since it fails on CI saying the csky component is missing // FIXME: disabled since it fails on CI saying the csky component is missing
/* /*
revisions: csky_unknown_linux_gnuabiv2 revisions: csky_unknown_linux_gnuabiv2

View file

@ -0,0 +1,21 @@
// only-loongarch64-unknown-linux-gnu
// revisions: DEFAULT DIRECT INDIRECT
// [DEFAULT] compile-flags: -C relocation-model=static
// [DIRECT] compile-flags: -C relocation-model=static -Z direct-access-external-data=yes
// [INDIRECT] compile-flags: -C relocation-model=static -Z direct-access-external-data=no
#![crate_type = "rlib"]
// DEFAULT: @VAR = external {{.*}} global i32
// DIRECT: @VAR = external dso_local {{.*}} global i32
// INDIRECT: @VAR = external {{.*}} global i32
extern "C" {
static VAR: i32;
}
#[no_mangle]
pub fn get() -> i32 {
unsafe { VAR }
}

View file

@ -1,4 +1,3 @@
// skip-filecheck
// EMIT_MIR_FOR_EACH_PANIC_STRATEGY // EMIT_MIR_FOR_EACH_PANIC_STRATEGY
// unit-test: RemoveStorageMarkers // unit-test: RemoveStorageMarkers
@ -8,6 +7,10 @@
// EMIT_MIR remove_storage_markers.main.RemoveStorageMarkers.diff // EMIT_MIR remove_storage_markers.main.RemoveStorageMarkers.diff
fn main() { fn main() {
// CHECK-LABEL: fn main(
// CHECK-NOT: StorageDead
// CHECK-NOT: StorageLive
let mut sum = 0; let mut sum = 0;
for i in 0..10 { for i in 0..10 {
sum += i; sum += i;

View file

@ -1,10 +1,13 @@
// skip-filecheck
// EMIT_MIR_FOR_EACH_PANIC_STRATEGY // EMIT_MIR_FOR_EACH_PANIC_STRATEGY
#[inline(never)] #[inline(never)]
fn noop() {} fn noop() {}
// EMIT_MIR simplify_if.main.SimplifyConstCondition-after-const-prop.diff // EMIT_MIR simplify_if.main.SimplifyConstCondition-after-const-prop.diff
fn main() { fn main() {
// CHECK-LABEL: fn main(
// CHECK: bb0: {
// CHECK-NEXT: return;
if false { if false {
noop(); noop();
} }

View file

@ -1,4 +1,3 @@
// skip-filecheck
// unit-test: ScalarReplacementOfAggregates // unit-test: ScalarReplacementOfAggregates
// compile-flags: -Cpanic=abort // compile-flags: -Cpanic=abort
// no-prefer-dynamic // no-prefer-dynamic
@ -16,6 +15,10 @@ struct Foo<T: Err> {
// EMIT_MIR lifetimes.foo.ScalarReplacementOfAggregates.diff // EMIT_MIR lifetimes.foo.ScalarReplacementOfAggregates.diff
fn foo<T: Err>() { fn foo<T: Err>() {
// CHECK-LABEL: fn foo(
// CHECK-NOT: [foo:_.*]: Foo
// CHECK-NOT: Box<dyn std::fmt::Display + 'static>
let foo: Foo<T> = Foo { let foo: Foo<T> = Foo {
x: Ok(Box::new(5_u32)), x: Ok(Box::new(5_u32)),
y: 7_u32, y: 7_u32,

View file

@ -1,4 +1,3 @@
// skip-filecheck
// unit-test: ScalarReplacementOfAggregates // unit-test: ScalarReplacementOfAggregates
// compile-flags: -Cpanic=abort // compile-flags: -Cpanic=abort
// no-prefer-dynamic // no-prefer-dynamic
@ -13,28 +12,68 @@ impl Drop for Tag {
fn drop(&mut self) {} fn drop(&mut self) {}
} }
/// Check that SROA excludes structs with a `Drop` implementation.
pub fn dropping() { pub fn dropping() {
// CHECK-LABEL: fn dropping(
// CHECK: [[aggregate:_[0-9]+]]: S;
// CHECK: bb0: {
// CHECK: [[aggregate]] = S
S(Tag(0), Tag(1), Tag(2)).1; S(Tag(0), Tag(1), Tag(2)).1;
} }
/// Check that SROA excludes enums.
pub fn enums(a: usize) -> usize { pub fn enums(a: usize) -> usize {
// CHECK-LABEL: fn enums(
// CHECK: [[enum:_[0-9]+]]: std::option::Option<usize>;
// CHECK: bb0: {
// CHECK: [[enum]] = Option::<usize>::Some
// CHECK: _5 = (([[enum]] as Some).0: usize)
// CHECK: _0 = _5
if let Some(a) = Some(a) { a } else { 0 } if let Some(a) = Some(a) { a } else { 0 }
} }
/// Check that SROA destructures `U`.
pub fn structs(a: f32) -> f32 { pub fn structs(a: f32) -> f32 {
// CHECK-LABEL: fn structs(
struct U { struct U {
_foo: usize, _foo: usize,
a: f32, a: f32,
} }
// CHECK: [[ret:_0]]: f32;
// CHECK: [[struct:_[0-9]+]]: structs::U;
// CHECK: [[a_tmp:_[0-9]+]]: f32;
// CHECK: [[foo:_[0-9]+]]: usize;
// CHECK: [[a_ret:_[0-9]+]]: f32;
// CHECK: bb0: {
// CHECK-NOT: [[struct]]
// CHECK: [[a_tmp]] = _1;
// CHECK-NOT: [[struct]]
// CHECK: [[foo]] = const 0_usize;
// CHECK-NOT: [[struct]]
// CHECK: [[a_ret]] = move [[a_tmp]];
// CHECK-NOT: [[struct]]
// CHECK: _0 = [[a_ret]];
// CHECK-NOT: [[struct]]
U { _foo: 0, a }.a U { _foo: 0, a }.a
} }
/// Check that SROA excludes unions.
pub fn unions(a: f32) -> u32 { pub fn unions(a: f32) -> u32 {
// CHECK-LABEL: fn unions(
union Repr { union Repr {
f: f32, f: f32,
u: u32, u: u32,
} }
// CHECK: [[union:_[0-9]+]]: unions::Repr;
// CHECK: bb0: {
// CHECK: [[union]] = Repr {
// CHECK: _0 = ([[union]].1: u32)
unsafe { Repr { f: a }.u } unsafe { Repr { f: a }.u }
} }
@ -46,11 +85,21 @@ struct Foo {
d: Option<isize>, d: Option<isize>,
} }
fn g() -> u32 { /// Check that non-escaping uses of a struct are destructured.
3
}
pub fn flat() { pub fn flat() {
// CHECK-LABEL: fn flat(
// CHECK: [[struct:_[0-9]+]]: Foo;
// CHECK: bb0: {
// CHECK: [[init_unit:_[0-9]+]] = ();
// CHECK: [[init_opt_isize:_[0-9]+]] = Option::<isize>::Some
// CHECK: [[destr_five:_[0-9]+]] = const 5_u8;
// CHECK: [[destr_unit:_[0-9]+]] = move [[init_unit]];
// CHECK: [[destr_a:_[0-9]+]] = const "a";
// CHECK: [[destr_opt_isize:_[0-9]+]] = move [[init_opt_isize]];
let Foo { a, b, c, d } = Foo { a: 5, b: (), c: "a", d: Some(-4) }; let Foo { a, b, c, d } = Foo { a: 5, b: (), c: "a", d: Some(-4) };
let _ = a; let _ = a;
let _ = b; let _ = b;
@ -65,6 +114,10 @@ struct Escaping {
c: u32, c: u32,
} }
fn g() -> u32 {
3
}
fn f(a: *const u32) { fn f(a: *const u32) {
println!("{}", unsafe { *a.add(2) }); println!("{}", unsafe { *a.add(2) });
} }
@ -76,10 +129,38 @@ fn f(a: *const u32) {
// of them to `f`. However, this would lead to a miscompilation because `b` and `c` // of them to `f`. However, this would lead to a miscompilation because `b` and `c`
// might no longer appear right after `a` in memory. // might no longer appear right after `a` in memory.
pub fn escaping() { pub fn escaping() {
// CHECK-LABEL: fn escaping(
// CHECK: [[ptr:_[0-9]+]]: *const u32;
// CHECK: [[ref:_[0-9]+]]: &u32;
// CHECK: [[struct:_[0-9]+]]: Escaping;
// CHECK: [[a:_[0-9]+]]: u32;
// CHECK: bb0: {
// CHECK: [[struct]] = Escaping {
// CHECK: [[ref]] = &([[struct]].0
// CHECK: [[ptr]] = &raw const (*[[ref]]);
f(&Escaping { a: 1, b: 2, c: g() }.a); f(&Escaping { a: 1, b: 2, c: g() }.a);
} }
/// Check that copies from an internal struct are destructured and reassigned to
/// the original struct.
fn copies(x: Foo) { fn copies(x: Foo) {
// CHECK-LABEL: fn copies(
// CHECK: [[external:_[0-9]+]]: Foo) ->
// CHECK: [[internal:_[0-9]+]]: Foo;
// CHECK: [[byte:_[0-9]+]]: u8;
// CHECK: [[unit:_[0-9]+]]: ();
// CHECK: [[str:_[0-9]+]]: &str;
// CHECK: [[opt_isize:_[0-9]+]]: std::option::Option<isize>;
// CHECK: bb0: {
// CHECK: [[byte]] = ([[external]].0
// CHECK: [[unit]] = ([[external]].1
// CHECK: [[str]] = ([[external]].2
// CHECK: [[opt_isize]] = ([[external]].3
let y = x; let y = x;
let t = y.a; let t = y.a;
let u = y.c; let u = y.c;
@ -87,13 +168,44 @@ fn copies(x: Foo) {
let a = z.b; let a = z.b;
} }
/// Check that copies from an internal struct are destructured and reassigned to
/// the original struct.
fn ref_copies(x: &Foo) { fn ref_copies(x: &Foo) {
// CHECK-LABEL: fn ref_copies(
// CHECK: [[external:_[0-9]+]]: &Foo) ->
// CHECK: [[internal:_[0-9]+]]: Foo;
// CHECK: [[byte:_[0-9]+]]: u8;
// CHECK: [[unit:_[0-9]+]]: ();
// CHECK: [[str:_[0-9]+]]: &str;
// CHECK: [[opt_isize:_[0-9]+]]: std::option::Option<isize>;
// CHECK: bb0: {
// CHECK: [[byte]] = ((*[[external]]).0
// CHECK: [[unit]] = ((*[[external]]).1
// CHECK: [[str]] = ((*[[external]]).2
// CHECK: [[opt_isize]] = ((*[[external]]).3
let y = *x; let y = *x;
let t = y.a; let t = y.a;
let u = y.c; let u = y.c;
} }
/// Check that deaggregated assignments from constants are placed after the constant's
/// assignment. Also check that copying field accesses from the copy of the constant are
/// reassigned to copy from the constant.
fn constant() { fn constant() {
// CHECK-LABEL: constant(
// CHECK: [[constant:_[0-9]+]]: (usize, u8);
// CHECK: [[t:_[0-9]+]]: usize;
// CHECK: [[u:_[0-9]+]]: u8;
// CHECK: bb0: {
// CHECK-NOT: [[constant]]
// CHECK: [[constant]] = const
// CHECK: [[t]] = move ([[constant]].0: usize)
// CHECK: [[u]] = move ([[constant]].1: u8)
const U: (usize, u8) = (5, 9); const U: (usize, u8) = (5, 9);
let y = U; let y = U;
let t = y.0; let t = y.0;
@ -101,6 +213,7 @@ fn constant() {
} }
fn main() { fn main() {
// CHECK-LABEL: fn main(
dropping(); dropping();
enums(5); enums(5);
structs(5.); structs(5.);

View file

@ -0,0 +1,25 @@
// run-rustfix
enum Enum {
Variant(i32),
}
struct Struct(i32);
fn foo(x: Enum) -> i32 {
let Enum::Variant(value) = x;
value
}
fn bar(x: Enum) -> i32 {
let Enum::Variant(value) = x;
let x = value + 1;
x
}
fn baz(x: Struct) -> i32 {
let Struct(value) = x;
let x = value + 1;
x
}
fn main() {
let _ = foo(Enum::Variant(42));
let _ = bar(Enum::Variant(42));
let _ = baz(Struct(42));
}

View file

@ -0,0 +1,28 @@
// run-rustfix
enum Enum {
Variant(i32),
}
struct Struct(i32);
fn foo(x: Enum) -> i32 {
if let Enum::Variant(value) = x { //~ ERROR `if` may be missing an `else` clause
value
}
}
fn bar(x: Enum) -> i32 {
if let Enum::Variant(value) = x { //~ ERROR `if` may be missing an `else` clause
let x = value + 1;
x
}
}
fn baz(x: Struct) -> i32 {
if let Struct(value) = x { //~ ERROR `if` may be missing an `else` clause
let x = value + 1;
x
}
}
fn main() {
let _ = foo(Enum::Variant(42));
let _ = bar(Enum::Variant(42));
let _ = baz(Struct(42));
}

View file

@ -0,0 +1,61 @@
error[E0317]: `if` may be missing an `else` clause
--> $DIR/irrefutable-if-let-without-else.rs:8:5
|
LL | fn foo(x: Enum) -> i32 {
| --- expected `i32` because of this return type
LL | / if let Enum::Variant(value) = x {
LL | | value
LL | | }
| |_____^ expected `i32`, found `()`
|
= note: `if` expressions without `else` evaluate to `()`
= help: consider adding an `else` block that evaluates to the expected type
help: consider using an irrefutable `let` binding instead
|
LL ~ let Enum::Variant(value) = x;
LL ~ value
|
error[E0317]: `if` may be missing an `else` clause
--> $DIR/irrefutable-if-let-without-else.rs:13:5
|
LL | fn bar(x: Enum) -> i32 {
| --- expected `i32` because of this return type
LL | / if let Enum::Variant(value) = x {
LL | | let x = value + 1;
LL | | x
LL | | }
| |_____^ expected `i32`, found `()`
|
= note: `if` expressions without `else` evaluate to `()`
= help: consider adding an `else` block that evaluates to the expected type
help: consider using an irrefutable `let` binding instead
|
LL ~ let Enum::Variant(value) = x;
LL ~ let x = value + 1;
LL ~ x
|
error[E0317]: `if` may be missing an `else` clause
--> $DIR/irrefutable-if-let-without-else.rs:19:5
|
LL | fn baz(x: Struct) -> i32 {
| --- expected `i32` because of this return type
LL | / if let Struct(value) = x {
LL | | let x = value + 1;
LL | | x
LL | | }
| |_____^ expected `i32`, found `()`
|
= note: `if` expressions without `else` evaluate to `()`
= help: consider adding an `else` block that evaluates to the expected type
help: consider using an irrefutable `let` binding instead
|
LL ~ let Struct(value) = x;
LL ~ let x = value + 1;
LL ~ x
|
error: aborting due to 3 previous errors
For more information about this error, try `rustc --explain E0317`.

View file

@ -0,0 +1,17 @@
// compile-flags: -Zpolymorphize=on
// build-pass
#![feature(coroutines, coroutine_trait)]
use std::ops::Coroutine;
use std::pin::Pin;
use std::thread;
fn main() {
let mut foo = || yield;
thread::spawn(move || match Pin::new(&mut foo).resume(()) {
s => panic!("bad state: {:?}", s),
})
.join()
.unwrap();
}

View file

@ -1,4 +1,4 @@
{"$message_type":"future_incompat","future_incompat_report":[{"diagnostic":{"$message_type":"diagnostic","message":"unused variable: `x`","code":{"code":"unused_variables","explanation":null},"level":"warning","spans":[{"file_name":"$DIR/future-incompat-json-test.rs","byte_start":338,"byte_end":339,"line_start":9,"line_end":9,"column_start":9,"column_end":10,"is_primary":true,"text":[{"text":" let x = 1;","highlight_start":9,"highlight_end":10}],"label":null,"suggested_replacement":null,"suggestion_applicability":null,"expansion":null}],"children":[{"message":"`-A unused-variables` implied by `-A unused`","code":null,"level":"note","spans":[],"children":[],"rendered":null},{"message":"to override `-A unused` add `#[allow(unused_variables)]`","code":null,"level":"help","spans":[],"children":[],"rendered":null},{"message":"if this is intentional, prefix it with an underscore","code":null,"level":"help","spans":[{"file_name":"$DIR/future-incompat-json-test.rs","byte_start":338,"byte_end":339,"line_start":9,"line_end":9,"column_start":9,"column_end":10,"is_primary":true,"text":[{"text":" let x = 1;","highlight_start":9,"highlight_end":10}],"label":null,"suggested_replacement":"_x","suggestion_applicability":"MachineApplicable","expansion":null}],"children":[],"rendered":null}],"rendered":"warning: unused variable: `x` {"$message_type":"future_incompat","future_incompat_report":[{"diagnostic":{"$message_type":"diagnostic","message":"unused variable: `x`","code":{"code":"unused_variables","explanation":null},"level":"warning","spans":[{"file_name":"$DIR/future-incompat-json-test.rs","byte_start":338,"byte_end":339,"line_start":9,"line_end":9,"column_start":9,"column_end":10,"is_primary":true,"text":[{"text":" let x = 1;","highlight_start":9,"highlight_end":10}],"label":null,"suggested_replacement":null,"suggestion_applicability":null,"expansion":null}],"children":[{"message":"`-A unused-variables` implied by `-A unused`","code":null,"level":"note","spans":[],"children":[],"rendered":null},{"message":"to override `-A unused` add `#[allow(unused_variables)]`","code":null,"level":"help","spans":[],"children":[],"rendered":null},{"message":"if this is intentional, prefix it with an underscore","code":null,"level":"help","spans":[{"file_name":"$DIR/future-incompat-json-test.rs","byte_start":338,"byte_end":339,"line_start":9,"line_end":9,"column_start":9,"column_end":10,"is_primary":true,"text":[{"text":" let x = 1;","highlight_start":9,"highlight_end":10}],"label":null,"suggested_replacement":"_x","suggestion_applicability":"MaybeIncorrect","expansion":null}],"children":[],"rendered":null}],"rendered":"warning: unused variable: `x`
--> $DIR/future-incompat-json-test.rs:9:9 --> $DIR/future-incompat-json-test.rs:9:9
| |
LL | let x = 1; LL | let x = 1;

View file

@ -0,0 +1,16 @@
// compile-flags: -Z threads=16
// build-fail
#![crate_type="rlib"]
#![allow(warnings)]
#[export_name="fail"]
pub fn a() {
}
#[export_name="fail"]
pub fn b() {
//~^ Error symbol `fail` is already defined
}
fn main() {}

View file

@ -0,0 +1,8 @@
error: symbol `fail` is already defined
--> $DIR/cache-after-waiting-issue-111528.rs:12:1
|
LL | pub fn b() {
| ^^^^^^^^^^
error: aborting due to 1 previous error

View file

@ -0,0 +1,7 @@
// compile-flags:-C extra-filename=-1 -Z threads=16
// no-prefer-dynamic
// build-pass
#![crate_name = "crateresolve1"]
#![crate_type = "lib"]
pub fn f() -> isize { 10 }

View file

@ -0,0 +1,22 @@
// compile-flags: -Z threads=16
// build-pass
pub static GLOBAL: isize = 3;
static GLOBAL0: isize = 4;
pub static GLOBAL2: &'static isize = &GLOBAL0;
pub fn verify_same(a: &'static isize) {
let a = a as *const isize as usize;
let b = &GLOBAL as *const isize as usize;
assert_eq!(a, b);
}
pub fn verify_same2(a: &'static isize) {
let a = a as *const isize as usize;
let b = GLOBAL2 as *const isize as usize;
assert_eq!(a, b);
}
fn main() {}

View file

@ -0,0 +1,6 @@
// compile-flags: -Z threads=8
// run-pass
fn main() {
println!("Hello world!");
}

View file

@ -0,0 +1,18 @@
// compile-flags: -Z threads=16
// run-pass
#[repr(transparent)]
struct Sched {
i: i32,
}
impl Sched {
extern "C" fn get(self) -> i32 { self.i }
}
fn main() {
let s = Sched { i: 4 };
let f = || -> i32 {
s.get()
};
println!("f: {}", f());
}