Rollup merge of #99027 - tmiasko:basic-blocks, r=oli-obk

Replace `Body::basic_blocks()` with field access

Since the refactoring in #98930, it is possible to borrow the basic blocks
independently from other parts of MIR by accessing the `basic_blocks` field
directly.

Replace unnecessary `Body::basic_blocks()` method with a direct field access,
which has an additional benefit of borrowing the basic blocks only.
This commit is contained in:
Matthias Krüger 2022-08-29 06:34:43 +02:00 committed by GitHub
commit d182081de1
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
65 changed files with 131 additions and 140 deletions

View file

@ -56,7 +56,7 @@ impl<'tcx> MirPass<'tcx> for AbortUnwindingCalls {
// example.
let mut calls_to_terminate = Vec::new();
let mut cleanups_to_remove = Vec::new();
for (id, block) in body.basic_blocks().iter_enumerated() {
for (id, block) in body.basic_blocks.iter_enumerated() {
if block.is_cleanup {
continue;
}

View file

@ -45,7 +45,7 @@ impl AddCallGuards {
// We need a place to store the new blocks generated
let mut new_blocks = Vec::new();
let cur_len = body.basic_blocks().len();
let cur_len = body.basic_blocks.len();
for block in body.basic_blocks_mut() {
match block.terminator {

View file

@ -55,7 +55,7 @@ fn add_moves_for_packed_drops_patch<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>)
let mut patch = MirPatch::new(body);
let param_env = tcx.param_env(def_id);
for (bb, data) in body.basic_blocks().iter_enumerated() {
for (bb, data) in body.basic_blocks.iter_enumerated() {
let loc = Location { block: bb, statement_index: data.statements.len() };
let terminator = data.terminator();

View file

@ -61,14 +61,14 @@ impl<'tcx> Visitor<'tcx> for ConstGotoOptimizationFinder<'_, 'tcx> {
let _: Option<_> = try {
let target = terminator.kind.as_goto()?;
// We only apply this optimization if the last statement is a const assignment
let last_statement = self.body.basic_blocks()[location.block].statements.last()?;
let last_statement = self.body.basic_blocks[location.block].statements.last()?;
if let (place, Rvalue::Use(Operand::Constant(_const))) =
last_statement.kind.as_assign()?
{
// We found a constant being assigned to `place`.
// Now check that the target of this Goto switches on this place.
let target_bb = &self.body.basic_blocks()[target];
let target_bb = &self.body.basic_blocks[target];
// The `StorageDead(..)` statement does not affect the functionality of mir.
// We can move this part of the statement up to the predecessor.

View file

@ -131,7 +131,7 @@ impl<'tcx> MirPass<'tcx> for ConstProp {
let dummy_body = &Body::new(
body.source,
body.basic_blocks().clone(),
(*body.basic_blocks).clone(),
body.source_scopes.clone(),
body.local_decls.clone(),
Default::default(),

View file

@ -105,7 +105,7 @@ impl<'tcx> MirLint<'tcx> for ConstProp {
let dummy_body = &Body::new(
body.source,
body.basic_blocks().clone(),
(*body.basic_blocks).clone(),
body.source_scopes.clone(),
body.local_decls.clone(),
Default::default(),
@ -522,7 +522,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
fn visit_body(&mut self, body: &Body<'tcx>) {
for (bb, data) in body.basic_blocks().iter_enumerated() {
for (bb, data) in body.basic_blocks.iter_enumerated() {
self.visit_basic_block_data(bb, data);
}
}

View file

@ -713,7 +713,7 @@ impl<
ShortCircuitPreorder {
body,
visited: BitSet::new_empty(body.basic_blocks().len()),
visited: BitSet::new_empty(body.basic_blocks.len()),
worklist,
filtered_successors,
}
@ -747,7 +747,7 @@ impl<
}
fn size_hint(&self) -> (usize, Option<usize>) {
let size = self.body.basic_blocks().len() - self.visited.count();
let size = self.body.basic_blocks.len() - self.visited.count();
(size, Some(size))
}
}

View file

@ -80,7 +80,7 @@ impl<'tcx> MirPass<'tcx> for InstrumentCoverage {
return;
}
match mir_body.basic_blocks()[mir::START_BLOCK].terminator().kind {
match mir_body.basic_blocks[mir::START_BLOCK].terminator().kind {
TerminatorKind::Unreachable => {
trace!("InstrumentCoverage skipped for unreachable `START_BLOCK`");
return;

View file

@ -84,7 +84,7 @@ impl CoverageVisitor {
}
fn visit_body(&mut self, body: &Body<'_>) {
for bb_data in body.basic_blocks().iter() {
for bb_data in body.basic_blocks.iter() {
for statement in bb_data.statements.iter() {
if let StatementKind::Coverage(box ref coverage) = statement.kind {
if is_inlined(body, statement) {
@ -138,7 +138,7 @@ fn coverageinfo<'tcx>(tcx: TyCtxt<'tcx>, instance_def: ty::InstanceDef<'tcx>) ->
fn covered_code_regions<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> Vec<&'tcx CodeRegion> {
let body = mir_body(tcx, def_id);
body.basic_blocks()
body.basic_blocks
.iter()
.flat_map(|data| {
data.statements.iter().filter_map(|statement| match statement.kind {

View file

@ -176,7 +176,7 @@ fn debug_basic_blocks<'tcx>(mir_body: &Body<'tcx>) -> String {
format!(
"{:?}",
mir_body
.basic_blocks()
.basic_blocks
.iter_enumerated()
.map(|(bb, data)| {
let term = &data.terminator();
@ -213,7 +213,7 @@ fn print_mir_graphviz(name: &str, mir_body: &Body<'_>) {
"digraph {} {{\n{}\n}}",
name,
mir_body
.basic_blocks()
.basic_blocks
.iter_enumerated()
.map(|(bb, data)| {
format!(
@ -653,7 +653,7 @@ fn test_traverse_coverage_with_loops() {
fn synthesize_body_span_from_terminators(mir_body: &Body<'_>) -> Span {
let mut some_span: Option<Span> = None;
for (_, data) in mir_body.basic_blocks().iter_enumerated() {
for (_, data) in mir_body.basic_blocks.iter_enumerated() {
let term_span = data.terminator().source_info.span;
if let Some(span) = some_span.as_mut() {
*span = span.to(term_span);

View file

@ -58,7 +58,7 @@ fn find_duplicates(body: &Body<'_>) -> FxHashMap<BasicBlock, BasicBlock> {
let mut duplicates = FxHashMap::default();
let bbs_to_go_through =
body.basic_blocks().iter_enumerated().filter(|(_, bbd)| !bbd.is_cleanup).count();
body.basic_blocks.iter_enumerated().filter(|(_, bbd)| !bbd.is_cleanup).count();
let mut same_hashes =
FxHashMap::with_capacity_and_hasher(bbs_to_go_through, Default::default());
@ -71,8 +71,7 @@ fn find_duplicates(body: &Body<'_>) -> FxHashMap<BasicBlock, BasicBlock> {
// When we see bb1, we see that it is a duplicate of bb3, and therefore insert it in the duplicates list
// with replacement bb3.
// When the duplicates are removed, we will end up with only bb3.
for (bb, bbd) in body.basic_blocks().iter_enumerated().rev().filter(|(_, bbd)| !bbd.is_cleanup)
{
for (bb, bbd) in body.basic_blocks.iter_enumerated().rev().filter(|(_, bbd)| !bbd.is_cleanup) {
// Basic blocks can get really big, so to avoid checking for duplicates in basic blocks
// that are unlikely to have duplicates, we stop early. The early bail number has been
// found experimentally by eprintln while compiling the crates in the rustc-perf suite.

View file

@ -150,7 +150,7 @@ impl<'tcx> MirPass<'tcx> for DestinationPropagation {
def_id,
body.local_decls.len(),
relevant,
body.basic_blocks().len()
body.basic_blocks.len()
);
if relevant > MAX_LOCALS {
warn!(
@ -159,11 +159,11 @@ impl<'tcx> MirPass<'tcx> for DestinationPropagation {
);
return;
}
if body.basic_blocks().len() > MAX_BLOCKS {
if body.basic_blocks.len() > MAX_BLOCKS {
warn!(
"too many blocks in {:?} ({}, max is {}), not optimizing",
def_id,
body.basic_blocks().len(),
body.basic_blocks.len(),
MAX_BLOCKS
);
return;

View file

@ -104,8 +104,8 @@ impl<'tcx> MirPass<'tcx> for EarlyOtherwiseBranch {
let mut should_cleanup = false;
// Also consider newly generated bbs in the same pass
for i in 0..body.basic_blocks().len() {
let bbs = body.basic_blocks();
for i in 0..body.basic_blocks.len() {
let bbs = &*body.basic_blocks;
let parent = BasicBlock::from_usize(i);
let Some(opt_data) = evaluate_candidate(tcx, body, parent) else {
continue
@ -316,7 +316,7 @@ fn evaluate_candidate<'tcx>(
body: &Body<'tcx>,
parent: BasicBlock,
) -> Option<OptimizationData<'tcx>> {
let bbs = body.basic_blocks();
let bbs = &body.basic_blocks;
let TerminatorKind::SwitchInt {
targets,
switch_ty: parent_ty,

View file

@ -89,13 +89,13 @@ fn find_dead_unwinds<'tcx>(
debug!("find_dead_unwinds({:?})", body.span);
// We only need to do this pass once, because unwind edges can only
// reach cleanup blocks, which can't have unwind edges themselves.
let mut dead_unwinds = BitSet::new_empty(body.basic_blocks().len());
let mut dead_unwinds = BitSet::new_empty(body.basic_blocks.len());
let mut flow_inits = MaybeInitializedPlaces::new(tcx, body, &env)
.into_engine(tcx, body)
.pass_name("find_dead_unwinds")
.iterate_to_fixpoint()
.into_results_cursor(body);
for (bb, bb_data) in body.basic_blocks().iter_enumerated() {
for (bb, bb_data) in body.basic_blocks.iter_enumerated() {
let place = match bb_data.terminator().kind {
TerminatorKind::Drop { ref place, unwind: Some(_), .. }
| TerminatorKind::DropAndReplace { ref place, unwind: Some(_), .. } => {
@ -303,7 +303,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
}
fn collect_drop_flags(&mut self) {
for (bb, data) in self.body.basic_blocks().iter_enumerated() {
for (bb, data) in self.body.basic_blocks.iter_enumerated() {
let terminator = data.terminator();
let place = match terminator.kind {
TerminatorKind::Drop { ref place, .. }
@ -358,7 +358,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
}
fn elaborate_drops(&mut self) {
for (bb, data) in self.body.basic_blocks().iter_enumerated() {
for (bb, data) in self.body.basic_blocks.iter_enumerated() {
let loc = Location { block: bb, statement_index: data.statements.len() };
let terminator = data.terminator();
@ -515,7 +515,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
}
fn drop_flags_for_fn_rets(&mut self) {
for (bb, data) in self.body.basic_blocks().iter_enumerated() {
for (bb, data) in self.body.basic_blocks.iter_enumerated() {
if let TerminatorKind::Call {
destination, target: Some(tgt), cleanup: Some(_), ..
} = data.terminator().kind
@ -550,7 +550,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
// drop flags by themselves, to avoid the drop flags being
// clobbered before they are read.
for (bb, data) in self.body.basic_blocks().iter_enumerated() {
for (bb, data) in self.body.basic_blocks.iter_enumerated() {
debug!("drop_flags_for_locs({:?})", data);
for i in 0..(data.statements.len() + 1) {
debug!("drop_flag_for_locs: stmt {}", i);

View file

@ -65,7 +65,7 @@ fn has_ffi_unwind_calls(tcx: TyCtxt<'_>, local_def_id: LocalDefId) -> bool {
let mut tainted = false;
for block in body.basic_blocks() {
for block in body.basic_blocks.iter() {
if block.is_cleanup {
continue;
}

View file

@ -490,12 +490,12 @@ fn locals_live_across_suspend_points<'tcx>(
.iterate_to_fixpoint()
.into_results_cursor(body_ref);
let mut storage_liveness_map = IndexVec::from_elem(None, body.basic_blocks());
let mut storage_liveness_map = IndexVec::from_elem(None, &body.basic_blocks);
let mut live_locals_at_suspension_points = Vec::new();
let mut source_info_at_suspension_points = Vec::new();
let mut live_locals_at_any_suspension_point = BitSet::new_empty(body.local_decls.len());
for (block, data) in body.basic_blocks().iter_enumerated() {
for (block, data) in body.basic_blocks.iter_enumerated() {
if let TerminatorKind::Yield { .. } = data.terminator().kind {
let loc = Location { block, statement_index: data.statements.len() };
@ -704,7 +704,7 @@ impl<'mir, 'tcx> rustc_mir_dataflow::ResultsVisitor<'mir, 'tcx>
impl StorageConflictVisitor<'_, '_, '_> {
fn apply_state(&mut self, flow_state: &BitSet<Local>, loc: Location) {
// Ignore unreachable blocks.
if self.body.basic_blocks()[loc.block].terminator().kind == TerminatorKind::Unreachable {
if self.body.basic_blocks[loc.block].terminator().kind == TerminatorKind::Unreachable {
return;
}
@ -886,7 +886,7 @@ fn elaborate_generator_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
let mut elaborator = DropShimElaborator { body, patch: MirPatch::new(body), tcx, param_env };
for (block, block_data) in body.basic_blocks().iter_enumerated() {
for (block, block_data) in body.basic_blocks.iter_enumerated() {
let (target, unwind, source_info) = match block_data.terminator() {
Terminator { source_info, kind: TerminatorKind::Drop { place, target, unwind } } => {
if let Some(local) = place.as_local() {
@ -991,7 +991,7 @@ fn insert_panic_block<'tcx>(
body: &mut Body<'tcx>,
message: AssertMessage<'tcx>,
) -> BasicBlock {
let assert_block = BasicBlock::new(body.basic_blocks().len());
let assert_block = BasicBlock::new(body.basic_blocks.len());
let term = TerminatorKind::Assert {
cond: Operand::Constant(Box::new(Constant {
span: body.span,
@ -1021,7 +1021,7 @@ fn can_return<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>, param_env: ty::ParamEn
}
// If there's a return terminator the function may return.
for block in body.basic_blocks() {
for block in body.basic_blocks.iter() {
if let TerminatorKind::Return = block.terminator().kind {
return true;
}
@ -1038,7 +1038,7 @@ fn can_unwind<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) -> bool {
}
// Unwinds can only start at certain terminators.
for block in body.basic_blocks() {
for block in body.basic_blocks.iter() {
match block.terminator().kind {
// These never unwind.
TerminatorKind::Goto { .. }

View file

@ -95,7 +95,7 @@ fn inline<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) -> bool {
history: Vec::new(),
changed: false,
};
let blocks = BasicBlock::new(0)..body.basic_blocks().next_index();
let blocks = BasicBlock::new(0)..body.basic_blocks.next_index();
this.process_blocks(body, blocks);
this.changed
}
@ -217,9 +217,9 @@ impl<'tcx> Inliner<'tcx> {
}
}
let old_blocks = caller_body.basic_blocks().next_index();
let old_blocks = caller_body.basic_blocks.next_index();
self.inline_call(caller_body, &callsite, callee_body);
let new_blocks = old_blocks..caller_body.basic_blocks().next_index();
let new_blocks = old_blocks..caller_body.basic_blocks.next_index();
Ok(new_blocks)
}
@ -409,14 +409,14 @@ impl<'tcx> Inliner<'tcx> {
// Give a bonus functions with a small number of blocks,
// We normally have two or three blocks for even
// very small functions.
if callee_body.basic_blocks().len() <= 3 {
if callee_body.basic_blocks.len() <= 3 {
threshold += threshold / 4;
}
debug!(" final inline threshold = {}", threshold);
// FIXME: Give a bonus to functions with only a single caller
let diverges = matches!(
callee_body.basic_blocks()[START_BLOCK].terminator().kind,
callee_body.basic_blocks[START_BLOCK].terminator().kind,
TerminatorKind::Unreachable | TerminatorKind::Call { target: None, .. }
);
if diverges && !matches!(callee_attrs.inline, InlineAttr::Always) {
@ -434,13 +434,13 @@ impl<'tcx> Inliner<'tcx> {
// Traverse the MIR manually so we can account for the effects of inlining on the CFG.
let mut work_list = vec![START_BLOCK];
let mut visited = BitSet::new_empty(callee_body.basic_blocks().len());
let mut visited = BitSet::new_empty(callee_body.basic_blocks.len());
while let Some(bb) = work_list.pop() {
if !visited.insert(bb.index()) {
continue;
}
let blk = &callee_body.basic_blocks()[bb];
let blk = &callee_body.basic_blocks[bb];
checker.visit_basic_block_data(bb, blk);
let term = blk.terminator();
@ -541,7 +541,7 @@ impl<'tcx> Inliner<'tcx> {
args: &args,
new_locals: Local::new(caller_body.local_decls.len())..,
new_scopes: SourceScope::new(caller_body.source_scopes.len())..,
new_blocks: BasicBlock::new(caller_body.basic_blocks().len())..,
new_blocks: BasicBlock::new(caller_body.basic_blocks.len())..,
destination: dest,
callsite_scope: caller_body.source_scopes[callsite.source_info.scope].clone(),
callsite,

View file

@ -153,7 +153,7 @@ pub(crate) fn mir_inliner_callees<'tcx>(
_ => tcx.instance_mir(instance),
};
let mut calls = FxIndexSet::default();
for bb_data in body.basic_blocks() {
for bb_data in body.basic_blocks.iter() {
let terminator = bb_data.terminator();
if let TerminatorKind::Call { func, .. } = &terminator.kind {
let ty = func.ty(&body.local_decls, tcx);

View file

@ -15,7 +15,7 @@ impl<'tcx> MirPass<'tcx> for MultipleReturnTerminators {
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
// find basic blocks with no statement and a return terminator
let mut bbs_simple_returns = BitSet::new_empty(body.basic_blocks().len());
let mut bbs_simple_returns = BitSet::new_empty(body.basic_blocks.len());
let def_id = body.source.def_id();
let bbs = body.basic_blocks_mut();
for idx in bbs.indices() {

View file

@ -21,10 +21,10 @@ impl<'tcx> MirPass<'tcx> for NormalizeArrayLen {
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
// early returns for edge cases of highly unrolled functions
if body.basic_blocks().len() > MAX_NUM_BLOCKS {
if body.basic_blocks.len() > MAX_NUM_BLOCKS {
return;
}
if body.local_decls().len() > MAX_NUM_LOCALS {
if body.local_decls.len() > MAX_NUM_LOCALS {
return;
}
normalize_array_len_calls(tcx, body)

View file

@ -89,7 +89,7 @@ fn local_eligible_for_nrvo(body: &mut mir::Body<'_>) -> Option<Local> {
}
let mut copied_to_return_place = None;
for block in body.basic_blocks().indices() {
for block in body.basic_blocks.indices() {
// Look for blocks with a `Return` terminator.
if !matches!(body[block].terminator().kind, mir::TerminatorKind::Return) {
continue;
@ -122,7 +122,7 @@ fn find_local_assigned_to_return_place(
body: &mut mir::Body<'_>,
) -> Option<Local> {
let mut block = start;
let mut seen = HybridBitSet::new_empty(body.basic_blocks().len());
let mut seen = HybridBitSet::new_empty(body.basic_blocks.len());
// Iterate as long as `block` has exactly one predecessor that we have not yet visited.
while seen.insert(block) {

View file

@ -94,7 +94,7 @@ impl RemoveNoopLandingPads {
let mut jumps_folded = 0;
let mut landing_pads_removed = 0;
let mut nop_landing_pads = BitSet::new_empty(body.basic_blocks().len());
let mut nop_landing_pads = BitSet::new_empty(body.basic_blocks.len());
// This is a post-order traversal, so that if A post-dominates B
// then A will be visited before B.

View file

@ -35,7 +35,7 @@ impl<'tcx> MirPass<'tcx> for RemoveUninitDrops {
.into_results_cursor(body);
let mut to_remove = vec![];
for (bb, block) in body.basic_blocks().iter_enumerated() {
for (bb, block) in body.basic_blocks.iter_enumerated() {
let terminator = block.terminator();
let (TerminatorKind::Drop { place, .. } | TerminatorKind::DropAndReplace { place, .. })
= &terminator.kind

View file

@ -62,7 +62,7 @@ impl<'tcx> MirPass<'tcx> for SeparateConstSwitch {
pub fn separate_const_switch(body: &mut Body<'_>) -> usize {
let mut new_blocks: SmallVec<[(BasicBlock, BasicBlock); 6]> = SmallVec::new();
let predecessors = body.basic_blocks.predecessors();
'block_iter: for (block_id, block) in body.basic_blocks().iter_enumerated() {
'block_iter: for (block_id, block) in body.basic_blocks.iter_enumerated() {
if let TerminatorKind::SwitchInt {
discr: Operand::Copy(switch_place) | Operand::Move(switch_place),
..
@ -90,7 +90,7 @@ pub fn separate_const_switch(body: &mut Body<'_>) -> usize {
let mut predecessors_left = predecessors[block_id].len();
'predec_iter: for predecessor_id in predecessors[block_id].iter().copied() {
let predecessor = &body.basic_blocks()[predecessor_id];
let predecessor = &body.basic_blocks[predecessor_id];
// First we make sure the predecessor jumps
// in a reasonable way

View file

@ -74,7 +74,7 @@ pub struct CfgSimplifier<'a, 'tcx> {
impl<'a, 'tcx> CfgSimplifier<'a, 'tcx> {
pub fn new(body: &'a mut Body<'tcx>) -> Self {
let mut pred_count = IndexVec::from_elem(0u32, body.basic_blocks());
let mut pred_count = IndexVec::from_elem(0u32, &body.basic_blocks);
// we can't use mir.predecessors() here because that counts
// dead blocks, which we don't want to.
@ -263,7 +263,7 @@ impl<'a, 'tcx> CfgSimplifier<'a, 'tcx> {
pub fn remove_dead_blocks<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
let reachable = traversal::reachable_as_bitset(body);
let num_blocks = body.basic_blocks().len();
let num_blocks = body.basic_blocks.len();
if num_blocks == reachable.count() {
return;
}

View file

@ -151,7 +151,7 @@ struct OptimizationFinder<'a, 'tcx> {
impl<'tcx> OptimizationFinder<'_, 'tcx> {
fn find_optimizations(&self) -> Vec<OptimizationInfo<'tcx>> {
self.body
.basic_blocks()
.basic_blocks
.iter_enumerated()
.filter_map(|(bb_idx, bb)| {
// find switch

View file

@ -596,7 +596,7 @@ struct SimplifyBranchSameOptimizationFinder<'a, 'tcx> {
impl<'tcx> SimplifyBranchSameOptimizationFinder<'_, 'tcx> {
fn find(&self) -> Vec<SimplifyBranchSameOptimization> {
self.body
.basic_blocks()
.basic_blocks
.iter_enumerated()
.filter_map(|(bb_idx, bb)| {
let (discr_switched_on, targets_and_values) = match &bb.terminator().kind {
@ -632,7 +632,7 @@ impl<'tcx> SimplifyBranchSameOptimizationFinder<'_, 'tcx> {
let mut iter_bbs_reachable = targets_and_values
.iter()
.map(|target_and_value| (target_and_value, &self.body.basic_blocks()[target_and_value.target]))
.map(|target_and_value| (target_and_value, &self.body.basic_blocks[target_and_value.target]))
.filter(|(_, bb)| {
// Reaching `unreachable` is UB so assume it doesn't happen.
bb.terminator().kind != TerminatorKind::Unreachable

View file

@ -79,7 +79,7 @@ fn ensure_otherwise_unreachable<'tcx>(
targets: &SwitchTargets,
) -> Option<BasicBlockData<'tcx>> {
let otherwise = targets.otherwise();
let bb = &body.basic_blocks()[otherwise];
let bb = &body.basic_blocks[otherwise];
if bb.terminator().kind == TerminatorKind::Unreachable
&& bb.statements.iter().all(|s| matches!(&s.kind, StatementKind::StorageDead(_)))
{
@ -102,10 +102,10 @@ impl<'tcx> MirPass<'tcx> for UninhabitedEnumBranching {
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
trace!("UninhabitedEnumBranching starting for {:?}", body.source);
for bb in body.basic_blocks().indices() {
for bb in body.basic_blocks.indices() {
trace!("processing block {:?}", bb);
let Some(discriminant_ty) = get_switched_on_type(&body.basic_blocks()[bb], tcx, body) else {
let Some(discriminant_ty) = get_switched_on_type(&body.basic_blocks[bb], tcx, body) else {
continue;
};