1
Fork 0

Rollup merge of #137923 - scottmcm:fix-postorder-size-hint, r=tmiasko

Simplify `<Postorder as Iterator>::size_hint`

The current version is sometimes malformed (cc #137919); let's see if we can get away with a loose but trivially-correct one.
This commit is contained in:
Jubilee 2025-03-04 19:37:01 -08:00 committed by GitHub
commit 81a4349e73
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -23,19 +23,13 @@ pub struct Preorder<'a, 'tcx> {
body: &'a Body<'tcx>, body: &'a Body<'tcx>,
visited: DenseBitSet<BasicBlock>, visited: DenseBitSet<BasicBlock>,
worklist: Vec<BasicBlock>, worklist: Vec<BasicBlock>,
root_is_start_block: bool,
} }
impl<'a, 'tcx> Preorder<'a, 'tcx> { impl<'a, 'tcx> Preorder<'a, 'tcx> {
pub fn new(body: &'a Body<'tcx>, root: BasicBlock) -> Preorder<'a, 'tcx> { pub fn new(body: &'a Body<'tcx>, root: BasicBlock) -> Preorder<'a, 'tcx> {
let worklist = vec![root]; let worklist = vec![root];
Preorder { Preorder { body, visited: DenseBitSet::new_empty(body.basic_blocks.len()), worklist }
body,
visited: DenseBitSet::new_empty(body.basic_blocks.len()),
worklist,
root_is_start_block: root == START_BLOCK,
}
} }
} }
@ -71,15 +65,11 @@ impl<'a, 'tcx> Iterator for Preorder<'a, 'tcx> {
} }
fn size_hint(&self) -> (usize, Option<usize>) { fn size_hint(&self) -> (usize, Option<usize>) {
// All the blocks, minus the number of blocks we've visited. // The worklist might be only things already visited.
let upper = self.body.basic_blocks.len() - self.visited.count(); let lower = 0;
let lower = if self.root_is_start_block { // This is extremely loose, but it's not worth a popcnt loop to do better.
// We will visit all remaining blocks exactly once. let upper = self.body.basic_blocks.len();
upper
} else {
self.worklist.len()
};
(lower, Some(upper)) (lower, Some(upper))
} }
@ -108,7 +98,6 @@ pub struct Postorder<'a, 'tcx> {
basic_blocks: &'a IndexSlice<BasicBlock, BasicBlockData<'tcx>>, basic_blocks: &'a IndexSlice<BasicBlock, BasicBlockData<'tcx>>,
visited: DenseBitSet<BasicBlock>, visited: DenseBitSet<BasicBlock>,
visit_stack: Vec<(BasicBlock, Successors<'a>)>, visit_stack: Vec<(BasicBlock, Successors<'a>)>,
root_is_start_block: bool,
/// A non-empty `extra` allows for a precise calculation of the successors. /// A non-empty `extra` allows for a precise calculation of the successors.
extra: Option<(TyCtxt<'tcx>, Instance<'tcx>)>, extra: Option<(TyCtxt<'tcx>, Instance<'tcx>)>,
} }
@ -123,7 +112,6 @@ impl<'a, 'tcx> Postorder<'a, 'tcx> {
basic_blocks, basic_blocks,
visited: DenseBitSet::new_empty(basic_blocks.len()), visited: DenseBitSet::new_empty(basic_blocks.len()),
visit_stack: Vec::new(), visit_stack: Vec::new(),
root_is_start_block: root == START_BLOCK,
extra, extra,
}; };
@ -211,16 +199,13 @@ impl<'tcx> Iterator for Postorder<'_, 'tcx> {
} }
fn size_hint(&self) -> (usize, Option<usize>) { fn size_hint(&self) -> (usize, Option<usize>) {
// All the blocks, minus the number of blocks we've visited. // These bounds are not at all tight, but that's fine.
let upper = self.basic_blocks.len() - self.visited.count(); // It's not worth a popcnt loop in `DenseBitSet` to improve the upper,
// and in mono-reachable we can't be precise anyway.
let lower = if self.root_is_start_block { // Leaning on amortized growth is fine.
// We will visit all remaining blocks exactly once.
upper
} else {
self.visit_stack.len()
};
let lower = self.visit_stack.len();
let upper = self.basic_blocks.len();
(lower, Some(upper)) (lower, Some(upper))
} }
} }