1
Fork 0

Improve comment formatting.

By reflowing comment lines that are too long, and a few that are very
short. Plus some other very minor formatting tweaks.
This commit is contained in:
Nicholas Nethercote 2024-08-28 08:24:10 +10:00
parent c2f74c3f92
commit 8235af07d2
27 changed files with 151 additions and 111 deletions

View file

@ -60,7 +60,9 @@ impl<'tcx> crate::MirPass<'tcx> for AddRetag {
let basic_blocks = body.basic_blocks.as_mut(); let basic_blocks = body.basic_blocks.as_mut();
let local_decls = &body.local_decls; let local_decls = &body.local_decls;
let needs_retag = |place: &Place<'tcx>| { let needs_retag = |place: &Place<'tcx>| {
!place.is_indirect_first_projection() // we're not really interested in stores to "outside" locations, they are hard to keep track of anyway // We're not really interested in stores to "outside" locations, they are hard to keep
// track of anyway.
!place.is_indirect_first_projection()
&& may_contain_reference(place.ty(&*local_decls, tcx).ty, /*depth*/ 3, tcx) && may_contain_reference(place.ty(&*local_decls, tcx).ty, /*depth*/ 3, tcx)
&& !local_decls[place.local].is_deref_temp() && !local_decls[place.local].is_deref_temp()
}; };
@ -129,9 +131,9 @@ impl<'tcx> crate::MirPass<'tcx> for AddRetag {
StatementKind::Assign(box (ref place, ref rvalue)) => { StatementKind::Assign(box (ref place, ref rvalue)) => {
let add_retag = match rvalue { let add_retag = match rvalue {
// Ptr-creating operations already do their own internal retagging, no // Ptr-creating operations already do their own internal retagging, no
// need to also add a retag statement. // need to also add a retag statement. *Except* if we are deref'ing a
// *Except* if we are deref'ing a Box, because those get desugared to directly working // Box, because those get desugared to directly working with the inner
// with the inner raw pointer! That's relevant for `RawPtr` as Miri otherwise makes it // raw pointer! That's relevant for `RawPtr` as Miri otherwise makes it
// a NOP when the original pointer is already raw. // a NOP when the original pointer is already raw.
Rvalue::RawPtr(_mutbl, place) => { Rvalue::RawPtr(_mutbl, place) => {
// Using `is_box_global` here is a bit sketchy: if this code is // Using `is_box_global` here is a bit sketchy: if this code is

View file

@ -123,6 +123,7 @@ impl<'tcx> Visitor<'tcx> for ConstMutationChecker<'_, 'tcx> {
self.super_statement(stmt, loc); self.super_statement(stmt, loc);
self.target_local = None; self.target_local = None;
} }
fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, loc: Location) { fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, loc: Location) {
if let Rvalue::Ref(_, BorrowKind::Mut { .. }, place) = rvalue { if let Rvalue::Ref(_, BorrowKind::Mut { .. }, place) = rvalue {
let local = place.local; let local = place.local;

View file

@ -140,7 +140,8 @@ impl<'tcx> MutVisitor<'tcx> for Replacer<'_, 'tcx> {
fn visit_operand(&mut self, operand: &mut Operand<'tcx>, loc: Location) { fn visit_operand(&mut self, operand: &mut Operand<'tcx>, loc: Location) {
if let Operand::Move(place) = *operand if let Operand::Move(place) = *operand
// A move out of a projection of a copy is equivalent to a copy of the original projection. // A move out of a projection of a copy is equivalent to a copy of the original
// projection.
&& !place.is_indirect_first_projection() && !place.is_indirect_first_projection()
&& !self.fully_moved.contains(place.local) && !self.fully_moved.contains(place.local)
{ {

View file

@ -279,7 +279,8 @@ fn inject_mcdc_statements<'tcx>(
basic_coverage_blocks: &CoverageGraph, basic_coverage_blocks: &CoverageGraph,
extracted_mappings: &ExtractedMappings, extracted_mappings: &ExtractedMappings,
) { ) {
// Inject test vector update first because `inject_statement` always insert new statement at head. // Inject test vector update first because `inject_statement` always insert new statement at
// head.
for &mappings::MCDCDecision { for &mappings::MCDCDecision {
span: _, span: _,
ref end_bcbs, ref end_bcbs,

View file

@ -647,7 +647,8 @@ fn try_write_constant<'tcx>(
ty::FnDef(..) => {} ty::FnDef(..) => {}
// Those are scalars, must be handled above. // Those are scalars, must be handled above.
ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => throw_machine_stop_str!("primitive type with provenance"), ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char =>
throw_machine_stop_str!("primitive type with provenance"),
ty::Tuple(elem_tys) => { ty::Tuple(elem_tys) => {
for (i, elem) in elem_tys.iter().enumerate() { for (i, elem) in elem_tys.iter().enumerate() {

View file

@ -42,9 +42,9 @@ impl<'tcx> Visitor<'tcx> for DeduceReadOnly {
} }
PlaceContext::NonMutatingUse(NonMutatingUseContext::RawBorrow) => { PlaceContext::NonMutatingUse(NonMutatingUseContext::RawBorrow) => {
// Whether mutating though a `&raw const` is allowed is still undecided, so we // Whether mutating though a `&raw const` is allowed is still undecided, so we
// disable any sketchy `readonly` optimizations for now. // disable any sketchy `readonly` optimizations for now. But we only need to do
// But we only need to do this if the pointer would point into the argument. // this if the pointer would point into the argument. IOW: for indirect places,
// IOW: for indirect places, like `&raw (*local).field`, this surely cannot mutate `local`. // like `&raw (*local).field`, this surely cannot mutate `local`.
!place.is_indirect() !place.is_indirect()
} }
PlaceContext::NonMutatingUse(..) | PlaceContext::NonUse(..) => { PlaceContext::NonMutatingUse(..) | PlaceContext::NonUse(..) => {

View file

@ -69,8 +69,8 @@ fn find_duplicates(body: &Body<'_>) -> FxHashMap<BasicBlock, BasicBlock> {
// For example, if bb1, bb2 and bb3 are duplicates, we will first insert bb3 in same_hashes. // For example, if bb1, bb2 and bb3 are duplicates, we will first insert bb3 in same_hashes.
// Then we will see that bb2 is a duplicate of bb3, // Then we will see that bb2 is a duplicate of bb3,
// and insert bb2 with the replacement bb3 in the duplicates list. // and insert bb2 with the replacement bb3 in the duplicates list.
// When we see bb1, we see that it is a duplicate of bb3, and therefore insert it in the duplicates list // When we see bb1, we see that it is a duplicate of bb3, and therefore insert it in the
// with replacement bb3. // duplicates list with replacement bb3.
// When the duplicates are removed, we will end up with only bb3. // When the duplicates are removed, we will end up with only bb3.
for (bb, bbd) in body.basic_blocks.iter_enumerated().rev().filter(|(_, bbd)| !bbd.is_cleanup) { for (bb, bbd) in body.basic_blocks.iter_enumerated().rev().filter(|(_, bbd)| !bbd.is_cleanup) {
// Basic blocks can get really big, so to avoid checking for duplicates in basic blocks // Basic blocks can get really big, so to avoid checking for duplicates in basic blocks
@ -105,7 +105,8 @@ struct BasicBlockHashable<'tcx, 'a> {
impl Hash for BasicBlockHashable<'_, '_> { impl Hash for BasicBlockHashable<'_, '_> {
fn hash<H: Hasher>(&self, state: &mut H) { fn hash<H: Hasher>(&self, state: &mut H) {
hash_statements(state, self.basic_block_data.statements.iter()); hash_statements(state, self.basic_block_data.statements.iter());
// Note that since we only hash the kind, we lose span information if we deduplicate the blocks // Note that since we only hash the kind, we lose span information if we deduplicate the
// blocks.
self.basic_block_data.terminator().kind.hash(state); self.basic_block_data.terminator().kind.hash(state);
} }
} }

View file

@ -261,8 +261,8 @@ fn evaluate_candidate<'tcx>(
// }; // };
// ``` // ```
// //
// Hoisting the `discriminant(Q)` out of the `A` arm causes us to compute the discriminant of an // Hoisting the `discriminant(Q)` out of the `A` arm causes us to compute the discriminant
// invalid value, which is UB. // of an invalid value, which is UB.
// In order to fix this, **we would either need to show that the discriminant computation of // In order to fix this, **we would either need to show that the discriminant computation of
// `place` is computed in all branches**. // `place` is computed in all branches**.
// FIXME(#95162) For the moment, we adopt a conservative approach and // FIXME(#95162) For the moment, we adopt a conservative approach and

View file

@ -20,8 +20,8 @@ use tracing::{debug, instrument};
use crate::deref_separator::deref_finder; use crate::deref_separator::deref_finder;
/// During MIR building, Drop terminators are inserted in every place where a drop may occur. /// During MIR building, Drop terminators are inserted in every place where a drop may occur.
/// However, in this phase, the presence of these terminators does not guarantee that a destructor will run, /// However, in this phase, the presence of these terminators does not guarantee that a destructor
/// as the target of the drop may be uninitialized. /// will run, as the target of the drop may be uninitialized.
/// In general, the compiler cannot determine at compile time whether a destructor will run or not. /// In general, the compiler cannot determine at compile time whether a destructor will run or not.
/// ///
/// At a high level, this pass refines Drop to only run the destructor if the /// At a high level, this pass refines Drop to only run the destructor if the
@ -30,10 +30,10 @@ use crate::deref_separator::deref_finder;
/// Once this is complete, Drop terminators in the MIR correspond to a call to the "drop glue" or /// Once this is complete, Drop terminators in the MIR correspond to a call to the "drop glue" or
/// "drop shim" for the type of the dropped place. /// "drop shim" for the type of the dropped place.
/// ///
/// This pass relies on dropped places having an associated move path, which is then used to determine /// This pass relies on dropped places having an associated move path, which is then used to
/// the initialization status of the place and its descendants. /// determine the initialization status of the place and its descendants.
/// It's worth noting that a MIR containing a Drop without an associated move path is probably ill formed, /// It's worth noting that a MIR containing a Drop without an associated move path is probably ill
/// as it would allow running a destructor on a place behind a reference: /// formed, as it would allow running a destructor on a place behind a reference:
/// ///
/// ```text /// ```text
/// fn drop_term<T>(t: &mut T) { /// fn drop_term<T>(t: &mut T) {
@ -377,8 +377,8 @@ impl<'a, 'tcx> ElaborateDropsCtxt<'a, 'tcx> {
); );
} }
// A drop and replace behind a pointer/array/whatever. // A drop and replace behind a pointer/array/whatever.
// The borrow checker requires that these locations are initialized before the assignment, // The borrow checker requires that these locations are initialized before the
// so we just leave an unconditional drop. // assignment, so we just leave an unconditional drop.
assert!(!data.is_cleanup); assert!(!data.is_cleanup);
} }
} }

View file

@ -60,8 +60,9 @@ fn has_ffi_unwind_calls(tcx: TyCtxt<'_>, local_def_id: LocalDefId) -> bool {
let fn_def_id = match ty.kind() { let fn_def_id = match ty.kind() {
ty::FnPtr(..) => None, ty::FnPtr(..) => None,
&ty::FnDef(def_id, _) => { &ty::FnDef(def_id, _) => {
// Rust calls cannot themselves create foreign unwinds (even if they use a non-Rust ABI). // Rust calls cannot themselves create foreign unwinds (even if they use a non-Rust
// So the leak of the foreign unwind into Rust can only be elsewhere, not here. // ABI). So the leak of the foreign unwind into Rust can only be elsewhere, not
// here.
if !tcx.is_foreign_item(def_id) { if !tcx.is_foreign_item(def_id) {
continue; continue;
} }

View file

@ -92,8 +92,8 @@ impl<'tcx> FunctionItemRefChecker<'_, 'tcx> {
{ {
let mut span = self.nth_arg_span(args, arg_num); let mut span = self.nth_arg_span(args, arg_num);
if span.from_expansion() { if span.from_expansion() {
// The operand's ctxt wouldn't display the lint since it's inside a macro so // The operand's ctxt wouldn't display the lint since it's
// we have to use the callsite's ctxt. // inside a macro so we have to use the callsite's ctxt.
let callsite_ctxt = span.source_callsite().ctxt(); let callsite_ctxt = span.source_callsite().ctxt();
span = span.with_ctxt(callsite_ctxt); span = span.with_ctxt(callsite_ctxt);
} }

View file

@ -139,8 +139,8 @@ fn propagate_ssa<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
// Try to get some insight. // Try to get some insight.
AssignedValue::Rvalue(rvalue) => { AssignedValue::Rvalue(rvalue) => {
let value = state.simplify_rvalue(rvalue, location); let value = state.simplify_rvalue(rvalue, location);
// FIXME(#112651) `rvalue` may have a subtype to `local`. We can only mark `local` as // FIXME(#112651) `rvalue` may have a subtype to `local`. We can only mark
// reusable if we have an exact type match. // `local` as reusable if we have an exact type match.
if state.local_decls[local].ty != rvalue.ty(state.local_decls, tcx) { if state.local_decls[local].ty != rvalue.ty(state.local_decls, tcx) {
return; return;
} }
@ -480,7 +480,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
let pointer = self.evaluated[local].as_ref()?; let pointer = self.evaluated[local].as_ref()?;
let mut mplace = self.ecx.deref_pointer(pointer).ok()?; let mut mplace = self.ecx.deref_pointer(pointer).ok()?;
for proj in place.projection.iter().skip(1) { for proj in place.projection.iter().skip(1) {
// We have no call stack to associate a local with a value, so we cannot interpret indexing. // We have no call stack to associate a local with a value, so we cannot
// interpret indexing.
if matches!(proj, ProjectionElem::Index(_)) { if matches!(proj, ProjectionElem::Index(_)) {
return None; return None;
} }
@ -1382,7 +1383,8 @@ fn op_to_prop_const<'tcx>(
return Some(ConstValue::ZeroSized); return Some(ConstValue::ZeroSized);
} }
// Do not synthetize too large constants. Codegen will just memcpy them, which we'd like to avoid. // Do not synthetize too large constants. Codegen will just memcpy them, which we'd like to
// avoid.
if !matches!(op.layout.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) { if !matches!(op.layout.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
return None; return None;
} }

View file

@ -568,7 +568,8 @@ impl<'tcx> Inliner<'tcx> {
// if the no-attribute function ends up with the same instruction set anyway. // if the no-attribute function ends up with the same instruction set anyway.
return Err("Cannot move inline-asm across instruction sets"); return Err("Cannot move inline-asm across instruction sets");
} else if let TerminatorKind::TailCall { .. } = term.kind { } else if let TerminatorKind::TailCall { .. } = term.kind {
// FIXME(explicit_tail_calls): figure out how exactly functions containing tail calls can be inlined (and if they even should) // FIXME(explicit_tail_calls): figure out how exactly functions containing tail
// calls can be inlined (and if they even should)
return Err("can't inline functions with tail calls"); return Err("can't inline functions with tail calls");
} else { } else {
work_list.extend(term.successors()) work_list.extend(term.successors())

View file

@ -1,8 +1,6 @@
//! A lint that checks for known panics like //! A lint that checks for known panics like overflows, division by zero,
//! overflows, division by zero, //! out-of-bound access etc. Uses const propagation to determine the values of
//! out-of-bound access etc. //! operands during checks.
//! Uses const propagation to determine the
//! values of operands during checks.
use std::fmt::Debug; use std::fmt::Debug;
@ -562,7 +560,8 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
let val = self.use_ecx(|this| this.ecx.binary_op(bin_op, &left, &right))?; let val = self.use_ecx(|this| this.ecx.binary_op(bin_op, &left, &right))?;
if matches!(val.layout.abi, Abi::ScalarPair(..)) { if matches!(val.layout.abi, Abi::ScalarPair(..)) {
// FIXME `Value` should properly support pairs in `Immediate`... but currently it does not. // FIXME `Value` should properly support pairs in `Immediate`... but currently
// it does not.
let (val, overflow) = val.to_pair(&self.ecx); let (val, overflow) = val.to_pair(&self.ecx);
Value::Aggregate { Value::Aggregate {
variant: VariantIdx::ZERO, variant: VariantIdx::ZERO,

View file

@ -16,8 +16,7 @@ use rustc_target::abi::{HasDataLayout, Size, TagEncoding, Variants};
/// Large([u32; 1024]), /// Large([u32; 1024]),
/// } /// }
/// ``` /// ```
/// Instead of emitting moves of the large variant, /// Instead of emitting moves of the large variant, perform a memcpy instead.
/// Perform a memcpy instead.
/// Based off of [this HackMD](https://hackmd.io/@ft4bxUsFT5CEUBmRKYHr7w/rJM8BBPzD). /// Based off of [this HackMD](https://hackmd.io/@ft4bxUsFT5CEUBmRKYHr7w/rJM8BBPzD).
/// ///
/// In summary, what this does is at runtime determine which enum variant is active, /// In summary, what this does is at runtime determine which enum variant is active,
@ -34,6 +33,7 @@ impl<'tcx> crate::MirPass<'tcx> for EnumSizeOpt {
// https://github.com/rust-lang/rust/pull/85158#issuecomment-1101836457 // https://github.com/rust-lang/rust/pull/85158#issuecomment-1101836457
sess.opts.unstable_opts.unsound_mir_opts || sess.mir_opt_level() >= 3 sess.opts.unstable_opts.unsound_mir_opts || sess.mir_opt_level() >= 3
} }
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
// NOTE: This pass may produce different MIR based on the alignment of the target // NOTE: This pass may produce different MIR based on the alignment of the target
// platform, but it will still be valid. // platform, but it will still be valid.
@ -116,6 +116,7 @@ impl EnumSizeOpt {
let alloc = tcx.reserve_and_set_memory_alloc(tcx.mk_const_alloc(alloc)); let alloc = tcx.reserve_and_set_memory_alloc(tcx.mk_const_alloc(alloc));
Some((*adt_def, num_discrs, *alloc_cache.entry(ty).or_insert(alloc))) Some((*adt_def, num_discrs, *alloc_cache.entry(ty).or_insert(alloc)))
} }
fn optim<'tcx>(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { fn optim<'tcx>(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
let mut alloc_cache = FxHashMap::default(); let mut alloc_cache = FxHashMap::default();
let body_did = body.source.def_id(); let body_did = body.source.def_id();

View file

@ -168,8 +168,9 @@ fn remap_mir_for_const_eval_select<'tcx>(
let (method, place): (fn(Place<'tcx>) -> Operand<'tcx>, Place<'tcx>) = let (method, place): (fn(Place<'tcx>) -> Operand<'tcx>, Place<'tcx>) =
match tupled_args.node { match tupled_args.node {
Operand::Constant(_) => { Operand::Constant(_) => {
// there is no good way of extracting a tuple arg from a constant (const generic stuff) // There is no good way of extracting a tuple arg from a constant
// so we just create a temporary and deconstruct that. // (const generic stuff) so we just create a temporary and deconstruct
// that.
let local = body.local_decls.push(LocalDecl::new(ty, fn_span)); let local = body.local_decls.push(LocalDecl::new(ty, fn_span));
bb.statements.push(Statement { bb.statements.push(Statement {
source_info: SourceInfo::outermost(fn_span), source_info: SourceInfo::outermost(fn_span),
@ -480,7 +481,8 @@ pub fn run_analysis_to_runtime_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'
&[&remove_uninit_drops::RemoveUninitDrops, &simplify::SimplifyCfg::RemoveFalseEdges], &[&remove_uninit_drops::RemoveUninitDrops, &simplify::SimplifyCfg::RemoveFalseEdges],
None, None,
); );
check_consts::post_drop_elaboration::check_live_drops(tcx, body); // FIXME: make this a MIR lint // FIXME: make this a MIR lint
check_consts::post_drop_elaboration::check_live_drops(tcx, body);
} }
debug!("runtime_mir_lowering({:?})", did); debug!("runtime_mir_lowering({:?})", did);
@ -509,10 +511,12 @@ fn run_analysis_cleanup_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
/// Returns the sequence of passes that lowers analysis to runtime MIR. /// Returns the sequence of passes that lowers analysis to runtime MIR.
fn run_runtime_lowering_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { fn run_runtime_lowering_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
let passes: &[&dyn MirPass<'tcx>] = &[ let passes: &[&dyn MirPass<'tcx>] = &[
// These next passes must be executed together // These next passes must be executed together.
&add_call_guards::CriticalCallEdges, &add_call_guards::CriticalCallEdges,
&reveal_all::RevealAll, // has to be done before drop elaboration, since we need to drop opaque types, too. // Must be done before drop elaboration because we need to drop opaque types, too.
&add_subtyping_projections::Subtyper, // calling this after reveal_all ensures that we don't deal with opaque types &reveal_all::RevealAll,
// Calling this after reveal_all ensures that we don't deal with opaque types.
&add_subtyping_projections::Subtyper,
&elaborate_drops::ElaborateDrops, &elaborate_drops::ElaborateDrops,
// This will remove extraneous landing pads which are no longer // This will remove extraneous landing pads which are no longer
// necessary as well as forcing any call in a non-unwinding // necessary as well as forcing any call in a non-unwinding
@ -521,8 +525,8 @@ fn run_runtime_lowering_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
// AddMovesForPackedDrops needs to run after drop // AddMovesForPackedDrops needs to run after drop
// elaboration. // elaboration.
&add_moves_for_packed_drops::AddMovesForPackedDrops, &add_moves_for_packed_drops::AddMovesForPackedDrops,
// `AddRetag` needs to run after `ElaborateDrops` but before `ElaborateBoxDerefs`. Otherwise it should run fairly late, // `AddRetag` needs to run after `ElaborateDrops` but before `ElaborateBoxDerefs`.
// but before optimizations begin. // Otherwise it should run fairly late, but before optimizations begin.
&add_retag::AddRetag, &add_retag::AddRetag,
&elaborate_box_derefs::ElaborateBoxDerefs, &elaborate_box_derefs::ElaborateBoxDerefs,
&coroutine::StateTransform, &coroutine::StateTransform,
@ -563,13 +567,15 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
// Before inlining: trim down MIR with passes to reduce inlining work. // Before inlining: trim down MIR with passes to reduce inlining work.
// Has to be done before inlining, otherwise actual call will be almost always inlined. // Has to be done before inlining, otherwise actual call will be almost always inlined.
// Also simple, so can just do first // Also simple, so can just do first.
&lower_slice_len::LowerSliceLenCalls, &lower_slice_len::LowerSliceLenCalls,
// Perform instsimplify before inline to eliminate some trivial calls (like clone shims). // Perform instsimplify before inline to eliminate some trivial calls (like clone
// shims).
&instsimplify::InstSimplify::BeforeInline, &instsimplify::InstSimplify::BeforeInline,
// Perform inlining, which may add a lot of code. // Perform inlining, which may add a lot of code.
&inline::Inline, &inline::Inline,
// Code from other crates may have storage markers, so this needs to happen after inlining. // Code from other crates may have storage markers, so this needs to happen after
// inlining.
&remove_storage_markers::RemoveStorageMarkers, &remove_storage_markers::RemoveStorageMarkers,
// Inlining and instantiation may introduce ZST and useless drops. // Inlining and instantiation may introduce ZST and useless drops.
&remove_zsts::RemoveZsts, &remove_zsts::RemoveZsts,
@ -586,7 +592,8 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
&match_branches::MatchBranchSimplification, &match_branches::MatchBranchSimplification,
// inst combine is after MatchBranchSimplification to clean up Ne(_1, false) // inst combine is after MatchBranchSimplification to clean up Ne(_1, false)
&multiple_return_terminators::MultipleReturnTerminators, &multiple_return_terminators::MultipleReturnTerminators,
// After simplifycfg, it allows us to discover new opportunities for peephole optimizations. // After simplifycfg, it allows us to discover new opportunities for peephole
// optimizations.
&instsimplify::InstSimplify::AfterSimplifyCfg, &instsimplify::InstSimplify::AfterSimplifyCfg,
&simplify::SimplifyLocals::BeforeConstProp, &simplify::SimplifyLocals::BeforeConstProp,
&dead_store_elimination::DeadStoreElimination::Initial, &dead_store_elimination::DeadStoreElimination::Initial,

View file

@ -57,8 +57,9 @@ impl<'tcx> crate::MirPass<'tcx> for MatchBranchSimplification {
} }
trait SimplifyMatch<'tcx> { trait SimplifyMatch<'tcx> {
/// Simplifies a match statement, returning true if the simplification succeeds, false otherwise. /// Simplifies a match statement, returning true if the simplification succeeds, false
/// Generic code is written here, and we generally don't need a custom implementation. /// otherwise. Generic code is written here, and we generally don't need a custom
/// implementation.
fn simplify( fn simplify(
&mut self, &mut self,
tcx: TyCtxt<'tcx>, tcx: TyCtxt<'tcx>,
@ -240,7 +241,8 @@ impl<'tcx> SimplifyMatch<'tcx> for SimplifyToIf {
// Same value in both blocks. Use statement as is. // Same value in both blocks. Use statement as is.
patch.add_statement(parent_end, f.kind.clone()); patch.add_statement(parent_end, f.kind.clone());
} else { } else {
// Different value between blocks. Make value conditional on switch condition. // Different value between blocks. Make value conditional on switch
// condition.
let size = tcx.layout_of(param_env.and(discr_ty)).unwrap().size; let size = tcx.layout_of(param_env.and(discr_ty)).unwrap().size;
let const_cmp = Operand::const_from_scalar( let const_cmp = Operand::const_from_scalar(
tcx, tcx,
@ -394,14 +396,16 @@ impl<'tcx> SimplifyMatch<'tcx> for SimplifyToExp {
return None; return None;
} }
// We first compare the two branches, and then the other branches need to fulfill the same conditions. // We first compare the two branches, and then the other branches need to fulfill the same
// conditions.
let mut expected_transform_kinds = Vec::new(); let mut expected_transform_kinds = Vec::new();
for (f, s) in iter::zip(first_stmts, second_stmts) { for (f, s) in iter::zip(first_stmts, second_stmts) {
let compare_type = match (&f.kind, &s.kind) { let compare_type = match (&f.kind, &s.kind) {
// If two statements are exactly the same, we can optimize. // If two statements are exactly the same, we can optimize.
(f_s, s_s) if f_s == s_s => ExpectedTransformKind::Same(f_s), (f_s, s_s) if f_s == s_s => ExpectedTransformKind::Same(f_s),
// If two statements are assignments with the match values to the same place, we can optimize. // If two statements are assignments with the match values to the same place, we
// can optimize.
( (
StatementKind::Assign(box (lhs_f, Rvalue::Use(Operand::Constant(f_c)))), StatementKind::Assign(box (lhs_f, Rvalue::Use(Operand::Constant(f_c)))),
StatementKind::Assign(box (lhs_s, Rvalue::Use(Operand::Constant(s_c)))), StatementKind::Assign(box (lhs_s, Rvalue::Use(Operand::Constant(s_c)))),

View file

@ -82,7 +82,9 @@ impl<'tcx> Visitor<'tcx> for MentionedItemsVisitor<'_, 'tcx> {
source_ty.builtin_deref(true).map(|t| t.kind()), source_ty.builtin_deref(true).map(|t| t.kind()),
target_ty.builtin_deref(true).map(|t| t.kind()), target_ty.builtin_deref(true).map(|t| t.kind()),
) { ) {
(Some(ty::Array(..)), Some(ty::Str | ty::Slice(..))) => false, // &str/&[T] unsizing // &str/&[T] unsizing
(Some(ty::Array(..)), Some(ty::Str | ty::Slice(..))) => false,
_ => true, _ => true,
}; };
if may_involve_vtable { if may_involve_vtable {

View file

@ -63,7 +63,7 @@ impl<'tcx> crate::MirPass<'tcx> for ReorderLocals {
finder.visit_basic_block_data(bb, bbd); finder.visit_basic_block_data(bb, bbd);
} }
// track everything in case there are some locals that we never saw, // Track everything in case there are some locals that we never saw,
// such as in non-block things like debug info or in non-uses. // such as in non-block things like debug info or in non-uses.
for local in body.local_decls.indices() { for local in body.local_decls.indices() {
finder.track(local); finder.track(local);

View file

@ -1,16 +1,14 @@
//! A pass that promotes borrows of constant rvalues. //! A pass that promotes borrows of constant rvalues.
//! //!
//! The rvalues considered constant are trees of temps, //! The rvalues considered constant are trees of temps, each with exactly one
//! each with exactly one initialization, and holding //! initialization, and holding a constant value with no interior mutability.
//! a constant value with no interior mutability. //! They are placed into a new MIR constant body in `promoted` and the borrow
//! They are placed into a new MIR constant body in //! rvalue is replaced with a `Literal::Promoted` using the index into
//! `promoted` and the borrow rvalue is replaced with //! `promoted` of that constant MIR.
//! a `Literal::Promoted` using the index into `promoted`
//! of that constant MIR.
//! //!
//! This pass assumes that every use is dominated by an //! This pass assumes that every use is dominated by an initialization and can
//! initialization and can otherwise silence errors, if //! otherwise silence errors, if move analysis runs after promotion on broken
//! move analysis runs after promotion on broken MIR. //! MIR.
use std::assert_matches::assert_matches; use std::assert_matches::assert_matches;
use std::cell::Cell; use std::cell::Cell;
@ -386,7 +384,8 @@ impl<'tcx> Validator<'_, 'tcx> {
fn validate_ref(&mut self, kind: BorrowKind, place: &Place<'tcx>) -> Result<(), Unpromotable> { fn validate_ref(&mut self, kind: BorrowKind, place: &Place<'tcx>) -> Result<(), Unpromotable> {
match kind { match kind {
// Reject these borrow types just to be safe. // Reject these borrow types just to be safe.
// FIXME(RalfJung): could we allow them? Should we? No point in it until we have a usecase. // FIXME(RalfJung): could we allow them? Should we? No point in it until we have a
// usecase.
BorrowKind::Fake(_) | BorrowKind::Mut { kind: MutBorrowKind::ClosureCapture } => { BorrowKind::Fake(_) | BorrowKind::Mut { kind: MutBorrowKind::ClosureCapture } => {
return Err(Unpromotable); return Err(Unpromotable);
} }
@ -468,7 +467,8 @@ impl<'tcx> Validator<'_, 'tcx> {
let lhs_ty = lhs.ty(self.body, self.tcx); let lhs_ty = lhs.ty(self.body, self.tcx);
if let ty::RawPtr(_, _) | ty::FnPtr(..) = lhs_ty.kind() { if let ty::RawPtr(_, _) | ty::FnPtr(..) = lhs_ty.kind() {
// Raw and fn pointer operations are not allowed inside consts and thus not promotable. // Raw and fn pointer operations are not allowed inside consts and thus not
// promotable.
assert_matches!( assert_matches!(
op, op,
BinOp::Eq BinOp::Eq
@ -498,7 +498,8 @@ impl<'tcx> Validator<'_, 'tcx> {
Some(x) if x != 0 => {} // okay Some(x) if x != 0 => {} // okay
_ => return Err(Unpromotable), // value not known or 0 -- not okay _ => return Err(Unpromotable), // value not known or 0 -- not okay
} }
// Furthermore, for signed division, we also have to exclude `int::MIN / -1`. // Furthermore, for signed division, we also have to exclude `int::MIN /
// -1`.
if lhs_ty.is_signed() { if lhs_ty.is_signed() {
match rhs_val.map(|x| x.to_int(sz)) { match rhs_val.map(|x| x.to_int(sz)) {
Some(-1) | None => { Some(-1) | None => {
@ -512,8 +513,11 @@ impl<'tcx> Validator<'_, 'tcx> {
}; };
let lhs_min = sz.signed_int_min(); let lhs_min = sz.signed_int_min();
match lhs_val.map(|x| x.to_int(sz)) { match lhs_val.map(|x| x.to_int(sz)) {
Some(x) if x != lhs_min => {} // okay // okay
_ => return Err(Unpromotable), // value not known or int::MIN -- not okay Some(x) if x != lhs_min => {}
// value not known or int::MIN -- not okay
_ => return Err(Unpromotable),
} }
} }
_ => {} _ => {}
@ -815,8 +819,8 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> {
TerminatorKind::Call { TerminatorKind::Call {
mut func, mut args, call_source: desugar, fn_span, .. mut func, mut args, call_source: desugar, fn_span, ..
} => { } => {
// This promoted involves a function call, so it may fail to evaluate. // This promoted involves a function call, so it may fail to evaluate. Let's
// Let's make sure it is added to `required_consts` so that failure cannot get lost. // make sure it is added to `required_consts` so that failure cannot get lost.
self.add_to_required = true; self.add_to_required = true;
self.visit_operand(&mut func, loc); self.visit_operand(&mut func, loc);

View file

@ -106,8 +106,9 @@ fn is_needs_drop_and_init<'tcx>(
// If its projection *is* present in `MoveData`, then the field may have been moved // If its projection *is* present in `MoveData`, then the field may have been moved
// from separate from its parent. Recurse. // from separate from its parent. Recurse.
adt.variants().iter_enumerated().any(|(vid, variant)| { adt.variants().iter_enumerated().any(|(vid, variant)| {
// Enums have multiple variants, which are discriminated with a `Downcast` projection. // Enums have multiple variants, which are discriminated with a `Downcast`
// Structs have a single variant, and don't use a `Downcast` projection. // projection. Structs have a single variant, and don't use a `Downcast`
// projection.
let mpi = if adt.is_enum() { let mpi = if adt.is_enum() {
let downcast = let downcast =
move_path_children_matching(move_data, mpi, |x| x.is_downcast_to(vid)); move_path_children_matching(move_data, mpi, |x| x.is_downcast_to(vid));

View file

@ -35,9 +35,9 @@ impl<'tcx> MutVisitor<'tcx> for RevealAllVisitor<'tcx> {
if place.projection.iter().all(|elem| !matches!(elem, ProjectionElem::OpaqueCast(_))) { if place.projection.iter().all(|elem| !matches!(elem, ProjectionElem::OpaqueCast(_))) {
return; return;
} }
// `OpaqueCast` projections are only needed if there are opaque types on which projections are performed. // `OpaqueCast` projections are only needed if there are opaque types on which projections
// After the `RevealAll` pass, all opaque types are replaced with their hidden types, so we don't need these // are performed. After the `RevealAll` pass, all opaque types are replaced with their
// projections anymore. // hidden types, so we don't need these projections anymore.
place.projection = self.tcx.mk_place_elems( place.projection = self.tcx.mk_place_elems(
&place &place
.projection .projection

View file

@ -1003,7 +1003,8 @@ fn build_fn_ptr_addr_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'t
let locals = local_decls_for_sig(&sig, span); let locals = local_decls_for_sig(&sig, span);
let source_info = SourceInfo::outermost(span); let source_info = SourceInfo::outermost(span);
// FIXME: use `expose_provenance` once we figure out whether function pointers have meaningful provenance. // FIXME: use `expose_provenance` once we figure out whether function pointers have meaningful
// provenance.
let rvalue = Rvalue::Cast( let rvalue = Rvalue::Cast(
CastKind::FnPtrToPtr, CastKind::FnPtrToPtr,
Operand::Move(Place::from(Local::new(1))), Operand::Move(Place::from(Local::new(1))),

View file

@ -73,12 +73,13 @@ impl<'tcx> crate::MirPass<'tcx> for SimplifyComparisonIntegral {
_ => unreachable!(), _ => unreachable!(),
} }
// delete comparison statement if it the value being switched on was moved, which means it can not be user later on // delete comparison statement if it the value being switched on was moved, which means
// it can not be user later on
if opt.can_remove_bin_op_stmt { if opt.can_remove_bin_op_stmt {
bb.statements[opt.bin_op_stmt_idx].make_nop(); bb.statements[opt.bin_op_stmt_idx].make_nop();
} else { } else {
// if the integer being compared to a const integral is being moved into the comparison, // if the integer being compared to a const integral is being moved into the
// e.g `_2 = Eq(move _3, const 'x');` // comparison, e.g `_2 = Eq(move _3, const 'x');`
// we want to avoid making a double move later on in the switchInt on _3. // we want to avoid making a double move later on in the switchInt on _3.
// So to avoid `switchInt(move _3) -> ['x': bb2, otherwise: bb1];`, // So to avoid `switchInt(move _3) -> ['x': bb2, otherwise: bb1];`,
// we convert the move in the comparison statement to a copy. // we convert the move in the comparison statement to a copy.
@ -102,12 +103,15 @@ impl<'tcx> crate::MirPass<'tcx> for SimplifyComparisonIntegral {
// remove StorageDead (if it exists) being used in the assign of the comparison // remove StorageDead (if it exists) being used in the assign of the comparison
for (stmt_idx, stmt) in bb.statements.iter().enumerate() { for (stmt_idx, stmt) in bb.statements.iter().enumerate() {
if !matches!(stmt.kind, StatementKind::StorageDead(local) if local == opt.to_switch_on.local) if !matches!(
{ stmt.kind,
StatementKind::StorageDead(local) if local == opt.to_switch_on.local
) {
continue; continue;
} }
storage_deads_to_remove.push((stmt_idx, opt.bb_idx)); storage_deads_to_remove.push((stmt_idx, opt.bb_idx));
// if we have StorageDeads to remove then make sure to insert them at the top of each target // if we have StorageDeads to remove then make sure to insert them at the top of
// each target
for bb_idx in new_targets.all_targets() { for bb_idx in new_targets.all_targets() {
storage_deads_to_insert.push(( storage_deads_to_insert.push((
*bb_idx, *bb_idx,
@ -207,7 +211,8 @@ fn find_branch_value_info<'tcx>(
(Constant(branch_value), Copy(to_switch_on) | Move(to_switch_on)) (Constant(branch_value), Copy(to_switch_on) | Move(to_switch_on))
| (Copy(to_switch_on) | Move(to_switch_on), Constant(branch_value)) => { | (Copy(to_switch_on) | Move(to_switch_on), Constant(branch_value)) => {
let branch_value_ty = branch_value.const_.ty(); let branch_value_ty = branch_value.const_.ty();
// we only want to apply this optimization if we are matching on integrals (and chars), as it is not possible to switch on floats // we only want to apply this optimization if we are matching on integrals (and chars),
// as it is not possible to switch on floats
if !branch_value_ty.is_integral() && !branch_value_ty.is_char() { if !branch_value_ty.is_integral() && !branch_value_ty.is_char() {
return None; return None;
}; };
@ -222,7 +227,8 @@ fn find_branch_value_info<'tcx>(
struct OptimizationInfo<'tcx> { struct OptimizationInfo<'tcx> {
/// Basic block to apply the optimization /// Basic block to apply the optimization
bb_idx: BasicBlock, bb_idx: BasicBlock,
/// Statement index of Eq/Ne assignment that can be removed. None if the assignment can not be removed - i.e the statement is used later on /// Statement index of Eq/Ne assignment that can be removed. None if the assignment can not be
/// removed - i.e the statement is used later on
bin_op_stmt_idx: usize, bin_op_stmt_idx: usize,
/// Can remove Eq/Ne assignment /// Can remove Eq/Ne assignment
can_remove_bin_op_stmt: bool, can_remove_bin_op_stmt: bool,

View file

@ -156,9 +156,9 @@ impl<'tcx> crate::MirPass<'tcx> for UnreachableEnumBranching {
}; };
true true
} }
// If and only if there is a variant that does not have a branch set, // If and only if there is a variant that does not have a branch set, change the
// change the current of otherwise as the variant branch and set otherwise to unreachable. // current of otherwise as the variant branch and set otherwise to unreachable. It
// It transforms following code // transforms following code
// ```rust // ```rust
// match c { // match c {
// Ordering::Less => 1, // Ordering::Less => 1,

View file

@ -26,7 +26,8 @@ impl crate::MirPass<'_> for UnreachablePropagation {
let terminator = bb_data.terminator(); let terminator = bb_data.terminator();
let is_unreachable = match &terminator.kind { let is_unreachable = match &terminator.kind {
TerminatorKind::Unreachable => true, TerminatorKind::Unreachable => true,
// This will unconditionally run into an unreachable and is therefore unreachable as well. // This will unconditionally run into an unreachable and is therefore unreachable
// as well.
TerminatorKind::Goto { target } if unreachable_blocks.contains(target) => { TerminatorKind::Goto { target } if unreachable_blocks.contains(target) => {
patch.patch_terminator(bb, TerminatorKind::Unreachable); patch.patch_terminator(bb, TerminatorKind::Unreachable);
true true
@ -85,8 +86,9 @@ fn remove_successors_from_switch<'tcx>(
// } // }
// } // }
// //
// This generates a `switchInt() -> [0: 0, 1: 1, otherwise: unreachable]`, which allows us or LLVM to // This generates a `switchInt() -> [0: 0, 1: 1, otherwise: unreachable]`, which allows us or
// turn it into just `x` later. Without the unreachable, such a transformation would be illegal. // LLVM to turn it into just `x` later. Without the unreachable, such a transformation would be
// illegal.
// //
// In order to preserve this information, we record reachable and unreachable targets as // In order to preserve this information, we record reachable and unreachable targets as
// `Assume` statements in MIR. // `Assume` statements in MIR.

View file

@ -388,10 +388,11 @@ impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> {
} }
self.check_unwind_edge(location, unwind); self.check_unwind_edge(location, unwind);
// The code generation assumes that there are no critical call edges. The assumption // The code generation assumes that there are no critical call edges. The
// is used to simplify inserting code that should be executed along the return edge // assumption is used to simplify inserting code that should be executed along
// from the call. FIXME(tmiasko): Since this is a strictly code generation concern, // the return edge from the call. FIXME(tmiasko): Since this is a strictly code
// the code generation should be responsible for handling it. // generation concern, the code generation should be responsible for handling
// it.
if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Optimized) if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Optimized)
&& self.is_critical_call_edge(target, unwind) && self.is_critical_call_edge(target, unwind)
{ {
@ -404,8 +405,8 @@ impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> {
); );
} }
// The call destination place and Operand::Move place used as an argument might be // The call destination place and Operand::Move place used as an argument might
// passed by a reference to the callee. Consequently they cannot be packed. // be passed by a reference to the callee. Consequently they cannot be packed.
if is_within_packed(self.tcx, &self.body.local_decls, destination).is_some() { if is_within_packed(self.tcx, &self.body.local_decls, destination).is_some() {
// This is bad! The callee will expect the memory to be aligned. // This is bad! The callee will expect the memory to be aligned.
self.fail( self.fail(
@ -953,9 +954,9 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
} }
AggregateKind::RawPtr(pointee_ty, mutability) => { AggregateKind::RawPtr(pointee_ty, mutability) => {
if !matches!(self.mir_phase, MirPhase::Runtime(_)) { if !matches!(self.mir_phase, MirPhase::Runtime(_)) {
// It would probably be fine to support this in earlier phases, // It would probably be fine to support this in earlier phases, but at the
// but at the time of writing it's only ever introduced from intrinsic lowering, // time of writing it's only ever introduced from intrinsic lowering, so
// so earlier things just `bug!` on it. // earlier things just `bug!` on it.
self.fail(location, "RawPtr should be in runtime MIR only"); self.fail(location, "RawPtr should be in runtime MIR only");
} }
@ -1109,10 +1110,10 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
} }
UnOp::PtrMetadata => { UnOp::PtrMetadata => {
if !matches!(self.mir_phase, MirPhase::Runtime(_)) { if !matches!(self.mir_phase, MirPhase::Runtime(_)) {
// It would probably be fine to support this in earlier phases, // It would probably be fine to support this in earlier phases, but at
// but at the time of writing it's only ever introduced from intrinsic lowering // the time of writing it's only ever introduced from intrinsic
// or other runtime-phase optimization passes, // lowering or other runtime-phase optimization passes, so earlier
// so earlier things can just `bug!` on it. // things can just `bug!` on it.
self.fail(location, "PtrMetadata should be in runtime MIR only"); self.fail(location, "PtrMetadata should be in runtime MIR only");
} }
@ -1506,7 +1507,8 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
} }
if let TerminatorKind::TailCall { .. } = terminator.kind { if let TerminatorKind::TailCall { .. } = terminator.kind {
// FIXME(explicit_tail_calls): implement tail-call specific checks here (such as signature matching, forbidding closures, etc) // FIXME(explicit_tail_calls): implement tail-call specific checks here (such
// as signature matching, forbidding closures, etc)
} }
} }
TerminatorKind::Assert { cond, .. } => { TerminatorKind::Assert { cond, .. } => {