2022-06-13 16:37:41 +03:00
|
|
|
use crate::deref_separator::deref_finder;
|
2021-01-01 01:53:25 +01:00
|
|
|
use crate::MirPass;
|
2019-09-26 05:30:10 +00:00
|
|
|
use rustc_index::bit_set::BitSet;
|
2023-04-28 20:08:23 +00:00
|
|
|
use rustc_index::IndexVec;
|
2021-01-05 19:53:07 +01:00
|
|
|
use rustc_middle::mir::patch::MirPatch;
|
2020-03-29 17:19:48 +02:00
|
|
|
use rustc_middle::mir::*;
|
|
|
|
use rustc_middle::ty::{self, TyCtxt};
|
2021-01-05 19:53:07 +01:00
|
|
|
use rustc_mir_dataflow::elaborate_drops::{elaborate_drop, DropFlagState, Unwind};
|
|
|
|
use rustc_mir_dataflow::elaborate_drops::{DropElaborator, DropFlagMode, DropStyle};
|
|
|
|
use rustc_mir_dataflow::impls::{MaybeInitializedPlaces, MaybeUninitializedPlaces};
|
|
|
|
use rustc_mir_dataflow::move_paths::{LookupResult, MoveData, MovePathIndex};
|
|
|
|
use rustc_mir_dataflow::on_lookup_result_bits;
|
2022-06-13 16:37:41 +03:00
|
|
|
use rustc_mir_dataflow::un_derefer::UnDerefer;
|
2021-01-05 19:53:07 +01:00
|
|
|
use rustc_mir_dataflow::MoveDataParamEnv;
|
|
|
|
use rustc_mir_dataflow::{on_all_children_bits, on_all_drop_children_bits};
|
|
|
|
use rustc_mir_dataflow::{Analysis, ResultsCursor};
|
2023-05-25 17:30:23 +00:00
|
|
|
use rustc_span::Span;
|
2023-03-28 12:32:57 -07:00
|
|
|
use rustc_target::abi::{FieldIdx, VariantIdx};
|
Merge indexed_set.rs into bitvec.rs, and rename it bit_set.rs.
Currently we have two files implementing bitsets (and 2D bit matrices).
This commit combines them into one, taking the best features from each.
This involves renaming a lot of things. The high level changes are as
follows.
- bitvec.rs --> bit_set.rs
- indexed_set.rs --> (removed)
- BitArray + IdxSet --> BitSet (merged, see below)
- BitVector --> GrowableBitSet
- {,Sparse,Hybrid}IdxSet --> {,Sparse,Hybrid}BitSet
- BitMatrix --> BitMatrix
- SparseBitMatrix --> SparseBitMatrix
The changes within the bitset types themselves are as follows.
```
OLD OLD NEW
BitArray<C> IdxSet<T> BitSet<T>
-------- ------ ------
grow - grow
new - (remove)
new_empty new_empty new_empty
new_filled new_filled new_filled
- to_hybrid to_hybrid
clear clear clear
set_up_to set_up_to set_up_to
clear_above - clear_above
count - count
contains(T) contains(&T) contains(T)
contains_all - superset
is_empty - is_empty
insert(T) add(&T) insert(T)
insert_all - insert_all()
remove(T) remove(&T) remove(T)
words words words
words_mut words_mut words_mut
- overwrite overwrite
merge union union
- subtract subtract
- intersect intersect
iter iter iter
```
In general, when choosing names I went with:
- names that are more obvious (e.g. `BitSet` over `IdxSet`).
- names that are more like the Rust libraries (e.g. `T` over `C`,
`insert` over `add`);
- names that are more set-like (e.g. `union` over `merge`, `superset`
over `contains_all`, `domain_size` over `num_bits`).
Also, using `T` for index arguments seems more sensible than `&T` --
even though the latter is standard in Rust collection types -- because
indices are always copyable. It also results in fewer `&` and `*`
sigils in practice.
2018-09-14 15:07:25 +10:00
|
|
|
use std::fmt;
|
2016-05-17 02:26:18 +03:00
|
|
|
|
2023-03-05 21:02:14 +01:00
|
|
|
/// During MIR building, Drop terminators are inserted in every place where a drop may occur.
|
2023-01-24 18:48:05 +01:00
|
|
|
/// However, in this phase, the presence of these terminators does not guarantee that a destructor will run,
|
|
|
|
/// as the target of the drop may be uninitialized.
|
|
|
|
/// In general, the compiler cannot determine at compile time whether a destructor will run or not.
|
|
|
|
///
|
2023-03-05 21:02:14 +01:00
|
|
|
/// At a high level, this pass refines Drop to only run the destructor if the
|
2023-04-09 17:35:02 -04:00
|
|
|
/// target is initialized. The way this is achieved is by inserting drop flags for every variable
|
2023-01-24 18:48:05 +01:00
|
|
|
/// that may be dropped, and then using those flags to determine whether a destructor should run.
|
|
|
|
/// Once this is complete, Drop terminators in the MIR correspond to a call to the "drop glue" or
|
|
|
|
/// "drop shim" for the type of the dropped place.
|
|
|
|
///
|
|
|
|
/// This pass relies on dropped places having an associated move path, which is then used to determine
|
|
|
|
/// the initialization status of the place and its descendants.
|
|
|
|
/// It's worth noting that a MIR containing a Drop without an associated move path is probably ill formed,
|
|
|
|
/// as it would allow running a destructor on a place behind a reference:
|
|
|
|
///
|
|
|
|
/// ```text
|
|
|
|
// fn drop_term<T>(t: &mut T) {
|
|
|
|
// mir!(
|
|
|
|
// {
|
|
|
|
// Drop(*t, exit)
|
|
|
|
// }
|
|
|
|
// exit = {
|
|
|
|
// Return()
|
|
|
|
// }
|
|
|
|
// )
|
|
|
|
// }
|
|
|
|
/// ```
|
2016-05-17 02:26:18 +03:00
|
|
|
pub struct ElaborateDrops;
|
|
|
|
|
2019-08-04 16:20:00 -04:00
|
|
|
impl<'tcx> MirPass<'tcx> for ElaborateDrops {
|
2020-10-04 11:01:38 -07:00
|
|
|
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
|
|
|
debug!("elaborate_drops({:?} @ {:?})", body.source, body.span);
|
2017-11-10 19:20:35 +02:00
|
|
|
|
2020-10-04 11:01:38 -07:00
|
|
|
let def_id = body.source.def_id();
|
|
|
|
let param_env = tcx.param_env_reveal_all_normalized(def_id);
|
2022-07-24 14:40:43 +03:00
|
|
|
let (side_table, move_data) = match MoveData::gather_moves(body, tcx, param_env) {
|
2018-07-25 01:31:40 +02:00
|
|
|
Ok(move_data) => move_data,
|
2020-01-07 16:51:34 +09:00
|
|
|
Err((move_data, _)) => {
|
|
|
|
tcx.sess.delay_span_bug(
|
|
|
|
body.span,
|
|
|
|
"No `move_errors` should be allowed in MIR borrowck",
|
|
|
|
);
|
2022-07-24 14:40:43 +03:00
|
|
|
(Default::default(), move_data)
|
2020-01-07 16:51:34 +09:00
|
|
|
}
|
2018-07-25 01:31:40 +02:00
|
|
|
};
|
2022-07-24 14:40:43 +03:00
|
|
|
let un_derefer = UnDerefer { tcx: tcx, derefer_sidetable: side_table };
|
2016-05-17 02:26:18 +03:00
|
|
|
let elaborate_patch = {
|
2016-05-27 15:07:08 +03:00
|
|
|
let env = MoveDataParamEnv { move_data, param_env };
|
Remove dead unwinds before drop elaboration
As a part of drop elaboration, we identify dead unwinds, i.e., unwind
edges on a drop terminators which are known to be unreachable, because
there is no need to drop anything.
Previously, the data flow framework was informed about the dead unwinds,
and it assumed those edges are absent from MIR. Unfortunately, the data
flow framework wasn't consistent in maintaining this assumption.
In particular, if a block was reachable only through a dead unwind edge,
its state was propagated to other blocks still. This became an issue in
the context of change removes DropAndReplace terminator, since it
introduces initialization into cleanup blocks.
To avoid this issue, remove unreachable unwind edges before the drop
elaboration, and elaborate only blocks that remain reachable.
2023-01-05 10:15:33 +01:00
|
|
|
remove_dead_unwinds(tcx, body, &env, &un_derefer);
|
2020-01-20 15:18:13 -08:00
|
|
|
|
2020-01-21 20:00:55 -08:00
|
|
|
let inits = MaybeInitializedPlaces::new(tcx, body, &env)
|
2020-10-04 15:22:23 -07:00
|
|
|
.into_engine(tcx, body)
|
2020-09-14 17:13:47 -07:00
|
|
|
.pass_name("elaborate_drops")
|
2020-01-21 20:00:55 -08:00
|
|
|
.iterate_to_fixpoint()
|
|
|
|
.into_results_cursor(body);
|
2020-01-20 15:18:13 -08:00
|
|
|
|
2020-01-21 20:00:55 -08:00
|
|
|
let uninits = MaybeUninitializedPlaces::new(tcx, body, &env)
|
2020-06-29 17:20:41 -07:00
|
|
|
.mark_inactive_variants_as_uninit()
|
2020-10-04 15:22:23 -07:00
|
|
|
.into_engine(tcx, body)
|
2020-09-14 17:13:47 -07:00
|
|
|
.pass_name("elaborate_drops")
|
2020-01-21 20:00:55 -08:00
|
|
|
.iterate_to_fixpoint()
|
|
|
|
.into_results_cursor(body);
|
2016-05-27 15:07:08 +03:00
|
|
|
|
Remove dead unwinds before drop elaboration
As a part of drop elaboration, we identify dead unwinds, i.e., unwind
edges on a drop terminators which are known to be unreachable, because
there is no need to drop anything.
Previously, the data flow framework was informed about the dead unwinds,
and it assumed those edges are absent from MIR. Unfortunately, the data
flow framework wasn't consistent in maintaining this assumption.
In particular, if a block was reachable only through a dead unwind edge,
its state was propagated to other blocks still. This became an issue in
the context of change removes DropAndReplace terminator, since it
introduces initialization into cleanup blocks.
To avoid this issue, remove unreachable unwind edges before the drop
elaboration, and elaborate only blocks that remain reachable.
2023-01-05 10:15:33 +01:00
|
|
|
let reachable = traversal::reachable_as_bitset(body);
|
|
|
|
|
2023-04-28 20:08:23 +00:00
|
|
|
let drop_flags = IndexVec::from_elem(None, &env.move_data.move_paths);
|
2016-05-27 15:07:08 +03:00
|
|
|
ElaborateDropsCtxt {
|
2017-08-06 22:54:09 -07:00
|
|
|
tcx,
|
2019-06-03 18:26:48 -04:00
|
|
|
body,
|
2016-05-27 15:07:08 +03:00
|
|
|
env: &env,
|
2020-01-21 20:00:55 -08:00
|
|
|
init_data: InitializationData { inits, uninits },
|
2023-04-28 20:08:23 +00:00
|
|
|
drop_flags,
|
2019-06-03 18:26:48 -04:00
|
|
|
patch: MirPatch::new(body),
|
2022-06-13 16:37:41 +03:00
|
|
|
un_derefer: un_derefer,
|
Remove dead unwinds before drop elaboration
As a part of drop elaboration, we identify dead unwinds, i.e., unwind
edges on a drop terminators which are known to be unreachable, because
there is no need to drop anything.
Previously, the data flow framework was informed about the dead unwinds,
and it assumed those edges are absent from MIR. Unfortunately, the data
flow framework wasn't consistent in maintaining this assumption.
In particular, if a block was reachable only through a dead unwind edge,
its state was propagated to other blocks still. This became an issue in
the context of change removes DropAndReplace terminator, since it
introduces initialization into cleanup blocks.
To avoid this issue, remove unreachable unwind edges before the drop
elaboration, and elaborate only blocks that remain reachable.
2023-01-05 10:15:33 +01:00
|
|
|
reachable,
|
2016-05-27 15:07:08 +03:00
|
|
|
}
|
|
|
|
.elaborate()
|
2016-05-17 02:26:18 +03:00
|
|
|
};
|
2019-11-06 12:00:46 -05:00
|
|
|
elaborate_patch.apply(body);
|
2022-06-13 16:37:41 +03:00
|
|
|
deref_finder(tcx, body);
|
2016-05-17 02:26:18 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Remove dead unwinds before drop elaboration
As a part of drop elaboration, we identify dead unwinds, i.e., unwind
edges on a drop terminators which are known to be unreachable, because
there is no need to drop anything.
Previously, the data flow framework was informed about the dead unwinds,
and it assumed those edges are absent from MIR. Unfortunately, the data
flow framework wasn't consistent in maintaining this assumption.
In particular, if a block was reachable only through a dead unwind edge,
its state was propagated to other blocks still. This became an issue in
the context of change removes DropAndReplace terminator, since it
introduces initialization into cleanup blocks.
To avoid this issue, remove unreachable unwind edges before the drop
elaboration, and elaborate only blocks that remain reachable.
2023-01-05 10:15:33 +01:00
|
|
|
/// Removes unwind edges which are known to be unreachable, because they are in `drop` terminators
|
2017-04-07 01:00:53 +03:00
|
|
|
/// that can't drop anything.
|
Remove dead unwinds before drop elaboration
As a part of drop elaboration, we identify dead unwinds, i.e., unwind
edges on a drop terminators which are known to be unreachable, because
there is no need to drop anything.
Previously, the data flow framework was informed about the dead unwinds,
and it assumed those edges are absent from MIR. Unfortunately, the data
flow framework wasn't consistent in maintaining this assumption.
In particular, if a block was reachable only through a dead unwind edge,
its state was propagated to other blocks still. This became an issue in
the context of change removes DropAndReplace terminator, since it
introduces initialization into cleanup blocks.
To avoid this issue, remove unreachable unwind edges before the drop
elaboration, and elaborate only blocks that remain reachable.
2023-01-05 10:15:33 +01:00
|
|
|
fn remove_dead_unwinds<'tcx>(
|
2019-06-14 00:48:52 +03:00
|
|
|
tcx: TyCtxt<'tcx>,
|
Remove dead unwinds before drop elaboration
As a part of drop elaboration, we identify dead unwinds, i.e., unwind
edges on a drop terminators which are known to be unreachable, because
there is no need to drop anything.
Previously, the data flow framework was informed about the dead unwinds,
and it assumed those edges are absent from MIR. Unfortunately, the data
flow framework wasn't consistent in maintaining this assumption.
In particular, if a block was reachable only through a dead unwind edge,
its state was propagated to other blocks still. This became an issue in
the context of change removes DropAndReplace terminator, since it
introduces initialization into cleanup blocks.
To avoid this issue, remove unreachable unwind edges before the drop
elaboration, and elaborate only blocks that remain reachable.
2023-01-05 10:15:33 +01:00
|
|
|
body: &mut Body<'tcx>,
|
2019-06-14 00:48:52 +03:00
|
|
|
env: &MoveDataParamEnv<'tcx>,
|
2022-06-13 16:37:41 +03:00
|
|
|
und: &UnDerefer<'tcx>,
|
Remove dead unwinds before drop elaboration
As a part of drop elaboration, we identify dead unwinds, i.e., unwind
edges on a drop terminators which are known to be unreachable, because
there is no need to drop anything.
Previously, the data flow framework was informed about the dead unwinds,
and it assumed those edges are absent from MIR. Unfortunately, the data
flow framework wasn't consistent in maintaining this assumption.
In particular, if a block was reachable only through a dead unwind edge,
its state was propagated to other blocks still. This became an issue in
the context of change removes DropAndReplace terminator, since it
introduces initialization into cleanup blocks.
To avoid this issue, remove unreachable unwind edges before the drop
elaboration, and elaborate only blocks that remain reachable.
2023-01-05 10:15:33 +01:00
|
|
|
) {
|
|
|
|
debug!("remove_dead_unwinds({:?})", body.span);
|
2017-04-07 01:00:53 +03:00
|
|
|
// We only need to do this pass once, because unwind edges can only
|
|
|
|
// reach cleanup blocks, which can't have unwind edges themselves.
|
Remove dead unwinds before drop elaboration
As a part of drop elaboration, we identify dead unwinds, i.e., unwind
edges on a drop terminators which are known to be unreachable, because
there is no need to drop anything.
Previously, the data flow framework was informed about the dead unwinds,
and it assumed those edges are absent from MIR. Unfortunately, the data
flow framework wasn't consistent in maintaining this assumption.
In particular, if a block was reachable only through a dead unwind edge,
its state was propagated to other blocks still. This became an issue in
the context of change removes DropAndReplace terminator, since it
introduces initialization into cleanup blocks.
To avoid this issue, remove unreachable unwind edges before the drop
elaboration, and elaborate only blocks that remain reachable.
2023-01-05 10:15:33 +01:00
|
|
|
let mut dead_unwinds = Vec::new();
|
2020-01-21 20:00:55 -08:00
|
|
|
let mut flow_inits = MaybeInitializedPlaces::new(tcx, body, &env)
|
2020-10-04 15:22:23 -07:00
|
|
|
.into_engine(tcx, body)
|
Remove dead unwinds before drop elaboration
As a part of drop elaboration, we identify dead unwinds, i.e., unwind
edges on a drop terminators which are known to be unreachable, because
there is no need to drop anything.
Previously, the data flow framework was informed about the dead unwinds,
and it assumed those edges are absent from MIR. Unfortunately, the data
flow framework wasn't consistent in maintaining this assumption.
In particular, if a block was reachable only through a dead unwind edge,
its state was propagated to other blocks still. This became an issue in
the context of change removes DropAndReplace terminator, since it
introduces initialization into cleanup blocks.
To avoid this issue, remove unreachable unwind edges before the drop
elaboration, and elaborate only blocks that remain reachable.
2023-01-05 10:15:33 +01:00
|
|
|
.pass_name("remove_dead_unwinds")
|
2020-01-21 20:00:55 -08:00
|
|
|
.iterate_to_fixpoint()
|
|
|
|
.into_results_cursor(body);
|
2022-07-05 00:00:00 +00:00
|
|
|
for (bb, bb_data) in body.basic_blocks.iter_enumerated() {
|
2020-06-10 09:56:54 +02:00
|
|
|
let place = match bb_data.terminator().kind {
|
2022-10-08 23:47:59 +01:00
|
|
|
TerminatorKind::Drop { ref place, unwind: UnwindAction::Cleanup(_), .. } => {
|
2022-06-13 16:37:41 +03:00
|
|
|
und.derefer(place.as_ref(), body).unwrap_or(*place)
|
|
|
|
}
|
2016-12-26 14:34:03 +01:00
|
|
|
_ => continue,
|
|
|
|
};
|
|
|
|
|
Remove dead unwinds before drop elaboration
As a part of drop elaboration, we identify dead unwinds, i.e., unwind
edges on a drop terminators which are known to be unreachable, because
there is no need to drop anything.
Previously, the data flow framework was informed about the dead unwinds,
and it assumed those edges are absent from MIR. Unfortunately, the data
flow framework wasn't consistent in maintaining this assumption.
In particular, if a block was reachable only through a dead unwind edge,
its state was propagated to other blocks still. This became an issue in
the context of change removes DropAndReplace terminator, since it
introduces initialization into cleanup blocks.
To avoid this issue, remove unreachable unwind edges before the drop
elaboration, and elaborate only blocks that remain reachable.
2023-01-05 10:15:33 +01:00
|
|
|
debug!("remove_dead_unwinds @ {:?}: {:?}", bb, bb_data);
|
2017-04-07 01:00:53 +03:00
|
|
|
|
2022-02-19 00:48:49 +01:00
|
|
|
let LookupResult::Exact(path) = env.move_data.rev_lookup.find(place.as_ref()) else {
|
Remove dead unwinds before drop elaboration
As a part of drop elaboration, we identify dead unwinds, i.e., unwind
edges on a drop terminators which are known to be unreachable, because
there is no need to drop anything.
Previously, the data flow framework was informed about the dead unwinds,
and it assumed those edges are absent from MIR. Unfortunately, the data
flow framework wasn't consistent in maintaining this assumption.
In particular, if a block was reachable only through a dead unwind edge,
its state was propagated to other blocks still. This became an issue in
the context of change removes DropAndReplace terminator, since it
introduces initialization into cleanup blocks.
To avoid this issue, remove unreachable unwind edges before the drop
elaboration, and elaborate only blocks that remain reachable.
2023-01-05 10:15:33 +01:00
|
|
|
debug!("remove_dead_unwinds: has parent; skipping");
|
2022-02-19 00:48:49 +01:00
|
|
|
continue;
|
2017-08-25 07:16:24 -07:00
|
|
|
};
|
2017-04-07 01:00:53 +03:00
|
|
|
|
2020-03-22 12:09:40 -07:00
|
|
|
flow_inits.seek_before_primary_effect(body.terminator_loc(bb));
|
2020-02-13 21:34:19 -08:00
|
|
|
debug!(
|
Remove dead unwinds before drop elaboration
As a part of drop elaboration, we identify dead unwinds, i.e., unwind
edges on a drop terminators which are known to be unreachable, because
there is no need to drop anything.
Previously, the data flow framework was informed about the dead unwinds,
and it assumed those edges are absent from MIR. Unfortunately, the data
flow framework wasn't consistent in maintaining this assumption.
In particular, if a block was reachable only through a dead unwind edge,
its state was propagated to other blocks still. This became an issue in
the context of change removes DropAndReplace terminator, since it
introduces initialization into cleanup blocks.
To avoid this issue, remove unreachable unwind edges before the drop
elaboration, and elaborate only blocks that remain reachable.
2023-01-05 10:15:33 +01:00
|
|
|
"remove_dead_unwinds @ {:?}: path({:?})={:?}; init_data={:?}",
|
2020-02-13 21:34:19 -08:00
|
|
|
bb,
|
2020-06-10 09:56:54 +02:00
|
|
|
place,
|
2020-02-13 21:34:19 -08:00
|
|
|
path,
|
|
|
|
flow_inits.get()
|
|
|
|
);
|
|
|
|
|
2017-08-25 07:16:24 -07:00
|
|
|
let mut maybe_live = false;
|
2019-06-03 18:26:48 -04:00
|
|
|
on_all_drop_children_bits(tcx, body, &env, path, |child| {
|
2020-01-21 20:00:55 -08:00
|
|
|
maybe_live |= flow_inits.contains(child);
|
2017-08-25 07:16:24 -07:00
|
|
|
});
|
2017-04-07 01:00:53 +03:00
|
|
|
|
Remove dead unwinds before drop elaboration
As a part of drop elaboration, we identify dead unwinds, i.e., unwind
edges on a drop terminators which are known to be unreachable, because
there is no need to drop anything.
Previously, the data flow framework was informed about the dead unwinds,
and it assumed those edges are absent from MIR. Unfortunately, the data
flow framework wasn't consistent in maintaining this assumption.
In particular, if a block was reachable only through a dead unwind edge,
its state was propagated to other blocks still. This became an issue in
the context of change removes DropAndReplace terminator, since it
introduces initialization into cleanup blocks.
To avoid this issue, remove unreachable unwind edges before the drop
elaboration, and elaborate only blocks that remain reachable.
2023-01-05 10:15:33 +01:00
|
|
|
debug!("remove_dead_unwinds @ {:?}: maybe_live={}", bb, maybe_live);
|
2017-08-25 07:16:24 -07:00
|
|
|
if !maybe_live {
|
Remove dead unwinds before drop elaboration
As a part of drop elaboration, we identify dead unwinds, i.e., unwind
edges on a drop terminators which are known to be unreachable, because
there is no need to drop anything.
Previously, the data flow framework was informed about the dead unwinds,
and it assumed those edges are absent from MIR. Unfortunately, the data
flow framework wasn't consistent in maintaining this assumption.
In particular, if a block was reachable only through a dead unwind edge,
its state was propagated to other blocks still. This became an issue in
the context of change removes DropAndReplace terminator, since it
introduces initialization into cleanup blocks.
To avoid this issue, remove unreachable unwind edges before the drop
elaboration, and elaborate only blocks that remain reachable.
2023-01-05 10:15:33 +01:00
|
|
|
dead_unwinds.push(bb);
|
2017-08-25 07:16:24 -07:00
|
|
|
}
|
2017-04-07 01:00:53 +03:00
|
|
|
}
|
|
|
|
|
Remove dead unwinds before drop elaboration
As a part of drop elaboration, we identify dead unwinds, i.e., unwind
edges on a drop terminators which are known to be unreachable, because
there is no need to drop anything.
Previously, the data flow framework was informed about the dead unwinds,
and it assumed those edges are absent from MIR. Unfortunately, the data
flow framework wasn't consistent in maintaining this assumption.
In particular, if a block was reachable only through a dead unwind edge,
its state was propagated to other blocks still. This became an issue in
the context of change removes DropAndReplace terminator, since it
introduces initialization into cleanup blocks.
To avoid this issue, remove unreachable unwind edges before the drop
elaboration, and elaborate only blocks that remain reachable.
2023-01-05 10:15:33 +01:00
|
|
|
if dead_unwinds.is_empty() {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
let basic_blocks = body.basic_blocks.as_mut();
|
|
|
|
for &bb in dead_unwinds.iter() {
|
|
|
|
if let Some(unwind) = basic_blocks[bb].terminator_mut().unwind_mut() {
|
2023-03-06 16:36:42 +00:00
|
|
|
*unwind = UnwindAction::Unreachable;
|
Remove dead unwinds before drop elaboration
As a part of drop elaboration, we identify dead unwinds, i.e., unwind
edges on a drop terminators which are known to be unreachable, because
there is no need to drop anything.
Previously, the data flow framework was informed about the dead unwinds,
and it assumed those edges are absent from MIR. Unfortunately, the data
flow framework wasn't consistent in maintaining this assumption.
In particular, if a block was reachable only through a dead unwind edge,
its state was propagated to other blocks still. This became an issue in
the context of change removes DropAndReplace terminator, since it
introduces initialization into cleanup blocks.
To avoid this issue, remove unreachable unwind edges before the drop
elaboration, and elaborate only blocks that remain reachable.
2023-01-05 10:15:33 +01:00
|
|
|
}
|
|
|
|
}
|
2017-04-07 01:00:53 +03:00
|
|
|
}
|
|
|
|
|
2020-01-21 20:00:55 -08:00
|
|
|
struct InitializationData<'mir, 'tcx> {
|
|
|
|
inits: ResultsCursor<'mir, 'tcx, MaybeInitializedPlaces<'mir, 'tcx>>,
|
|
|
|
uninits: ResultsCursor<'mir, 'tcx, MaybeUninitializedPlaces<'mir, 'tcx>>,
|
2016-05-17 02:26:18 +03:00
|
|
|
}
|
|
|
|
|
2020-01-21 20:00:55 -08:00
|
|
|
impl InitializationData<'_, '_> {
|
2020-02-13 20:48:17 -08:00
|
|
|
fn seek_before(&mut self, loc: Location) {
|
2020-03-22 12:09:40 -07:00
|
|
|
self.inits.seek_before_primary_effect(loc);
|
|
|
|
self.uninits.seek_before_primary_effect(loc);
|
2016-05-17 02:26:18 +03:00
|
|
|
}
|
|
|
|
|
2020-02-13 21:30:10 -08:00
|
|
|
fn maybe_live_dead(&self, path: MovePathIndex) -> (bool, bool) {
|
2020-01-21 20:00:55 -08:00
|
|
|
(self.inits.contains(path), self.uninits.contains(path))
|
2016-05-17 02:26:18 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-14 19:39:39 +03:00
|
|
|
struct Elaborator<'a, 'b, 'tcx> {
|
2017-03-09 20:10:05 +02:00
|
|
|
ctxt: &'a mut ElaborateDropsCtxt<'b, 'tcx>,
|
|
|
|
}
|
|
|
|
|
2021-12-06 00:48:37 -08:00
|
|
|
impl fmt::Debug for Elaborator<'_, '_, '_> {
|
2019-02-08 06:28:15 +09:00
|
|
|
fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
2016-05-17 02:26:18 +03:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-06 00:48:37 -08:00
|
|
|
impl<'a, 'tcx> DropElaborator<'a, 'tcx> for Elaborator<'a, '_, 'tcx> {
|
2017-03-09 20:10:05 +02:00
|
|
|
type Path = MovePathIndex;
|
|
|
|
|
|
|
|
fn patch(&mut self) -> &mut MirPatch<'tcx> {
|
|
|
|
&mut self.ctxt.patch
|
|
|
|
}
|
|
|
|
|
2019-06-03 18:26:48 -04:00
|
|
|
fn body(&self) -> &'a Body<'tcx> {
|
|
|
|
self.ctxt.body
|
2017-03-09 20:10:05 +02:00
|
|
|
}
|
|
|
|
|
2019-06-14 00:48:52 +03:00
|
|
|
fn tcx(&self) -> TyCtxt<'tcx> {
|
2017-03-09 20:10:05 +02:00
|
|
|
self.ctxt.tcx
|
|
|
|
}
|
|
|
|
|
2017-05-15 17:57:30 -04:00
|
|
|
fn param_env(&self) -> ty::ParamEnv<'tcx> {
|
2017-03-09 20:10:05 +02:00
|
|
|
self.ctxt.param_env()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn drop_style(&self, path: Self::Path, mode: DropFlagMode) -> DropStyle {
|
|
|
|
let ((maybe_live, maybe_dead), multipart) = match mode {
|
2020-02-13 21:30:10 -08:00
|
|
|
DropFlagMode::Shallow => (self.ctxt.init_data.maybe_live_dead(path), false),
|
2017-03-09 20:10:05 +02:00
|
|
|
DropFlagMode::Deep => {
|
|
|
|
let mut some_live = false;
|
|
|
|
let mut some_dead = false;
|
|
|
|
let mut children_count = 0;
|
2019-06-03 18:26:48 -04:00
|
|
|
on_all_drop_children_bits(self.tcx(), self.body(), self.ctxt.env, path, |child| {
|
2020-02-13 21:30:10 -08:00
|
|
|
let (live, dead) = self.ctxt.init_data.maybe_live_dead(child);
|
2017-04-07 01:00:53 +03:00
|
|
|
debug!("elaborate_drop: state({:?}) = {:?}", child, (live, dead));
|
|
|
|
some_live |= live;
|
|
|
|
some_dead |= dead;
|
|
|
|
children_count += 1;
|
2017-03-09 20:10:05 +02:00
|
|
|
});
|
|
|
|
((some_live, some_dead), children_count != 1)
|
|
|
|
}
|
|
|
|
};
|
|
|
|
match (maybe_live, maybe_dead, multipart) {
|
|
|
|
(false, _, _) => DropStyle::Dead,
|
|
|
|
(true, false, _) => DropStyle::Static,
|
|
|
|
(true, true, false) => DropStyle::Conditional,
|
|
|
|
(true, true, true) => DropStyle::Open,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn clear_drop_flag(&mut self, loc: Location, path: Self::Path, mode: DropFlagMode) {
|
|
|
|
match mode {
|
|
|
|
DropFlagMode::Shallow => {
|
|
|
|
self.ctxt.set_drop_flag(loc, path, DropFlagState::Absent);
|
|
|
|
}
|
|
|
|
DropFlagMode::Deep => {
|
|
|
|
on_all_children_bits(
|
2019-06-03 18:26:48 -04:00
|
|
|
self.tcx(),
|
|
|
|
self.body(),
|
|
|
|
self.ctxt.move_data(),
|
|
|
|
path,
|
2017-03-09 20:10:05 +02:00
|
|
|
|child| self.ctxt.set_drop_flag(loc, child, DropFlagState::Absent),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-28 12:32:57 -07:00
|
|
|
fn field_subpath(&self, path: Self::Path, field: FieldIdx) -> Option<Self::Path> {
|
2021-01-05 19:53:07 +01:00
|
|
|
rustc_mir_dataflow::move_path_children_matching(self.ctxt.move_data(), path, |e| match e {
|
2020-05-23 12:02:54 +02:00
|
|
|
ProjectionElem::Field(idx, _) => idx == field,
|
2019-07-30 00:07:28 +02:00
|
|
|
_ => false,
|
2017-03-09 20:10:05 +02:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-08-23 14:54:58 +02:00
|
|
|
fn array_subpath(&self, path: Self::Path, index: u64, size: u64) -> Option<Self::Path> {
|
2021-01-05 19:53:07 +01:00
|
|
|
rustc_mir_dataflow::move_path_children_matching(self.ctxt.move_data(), path, |e| match e {
|
2019-11-22 20:28:02 +00:00
|
|
|
ProjectionElem::ConstantIndex { offset, min_length, from_end } => {
|
2020-05-23 12:02:54 +02:00
|
|
|
debug_assert!(size == min_length, "min_length should be exact for arrays");
|
2019-11-22 20:28:02 +00:00
|
|
|
assert!(!from_end, "from_end should not be used for array element ConstantIndex");
|
2020-05-23 12:02:54 +02:00
|
|
|
offset == index
|
2019-07-30 00:07:28 +02:00
|
|
|
}
|
|
|
|
_ => false,
|
2017-11-28 15:48:23 +03:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2017-03-09 20:10:05 +02:00
|
|
|
fn deref_subpath(&self, path: Self::Path) -> Option<Self::Path> {
|
2021-01-05 19:53:07 +01:00
|
|
|
rustc_mir_dataflow::move_path_children_matching(self.ctxt.move_data(), path, |e| {
|
2020-05-23 12:02:54 +02:00
|
|
|
e == ProjectionElem::Deref
|
2017-03-09 20:10:05 +02:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-11-01 19:03:38 +01:00
|
|
|
fn downcast_subpath(&self, path: Self::Path, variant: VariantIdx) -> Option<Self::Path> {
|
2021-01-05 19:53:07 +01:00
|
|
|
rustc_mir_dataflow::move_path_children_matching(self.ctxt.move_data(), path, |e| match e {
|
2020-05-23 12:02:54 +02:00
|
|
|
ProjectionElem::Downcast(_, idx) => idx == variant,
|
2019-07-30 00:07:28 +02:00
|
|
|
_ => false,
|
2017-03-09 20:10:05 +02:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_drop_flag(&mut self, path: Self::Path) -> Option<Operand<'tcx>> {
|
2017-11-17 17:19:57 +02:00
|
|
|
self.ctxt.drop_flag(path).map(Operand::Copy)
|
2017-03-09 20:10:05 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-14 19:39:39 +03:00
|
|
|
struct ElaborateDropsCtxt<'a, 'tcx> {
|
2019-06-14 00:48:52 +03:00
|
|
|
tcx: TyCtxt<'tcx>,
|
2019-06-03 18:26:48 -04:00
|
|
|
body: &'a Body<'tcx>,
|
2019-06-14 00:48:52 +03:00
|
|
|
env: &'a MoveDataParamEnv<'tcx>,
|
2020-01-21 20:00:55 -08:00
|
|
|
init_data: InitializationData<'a, 'tcx>,
|
2023-04-28 20:08:23 +00:00
|
|
|
drop_flags: IndexVec<MovePathIndex, Option<Local>>,
|
2016-05-17 02:26:18 +03:00
|
|
|
patch: MirPatch<'tcx>,
|
2022-06-13 16:37:41 +03:00
|
|
|
un_derefer: UnDerefer<'tcx>,
|
Remove dead unwinds before drop elaboration
As a part of drop elaboration, we identify dead unwinds, i.e., unwind
edges on a drop terminators which are known to be unreachable, because
there is no need to drop anything.
Previously, the data flow framework was informed about the dead unwinds,
and it assumed those edges are absent from MIR. Unfortunately, the data
flow framework wasn't consistent in maintaining this assumption.
In particular, if a block was reachable only through a dead unwind edge,
its state was propagated to other blocks still. This became an issue in
the context of change removes DropAndReplace terminator, since it
introduces initialization into cleanup blocks.
To avoid this issue, remove unreachable unwind edges before the drop
elaboration, and elaborate only blocks that remain reachable.
2023-01-05 10:15:33 +01:00
|
|
|
reachable: BitSet<BasicBlock>,
|
2016-05-17 02:26:18 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
|
2016-05-27 15:07:08 +03:00
|
|
|
fn move_data(&self) -> &'b MoveData<'tcx> {
|
|
|
|
&self.env.move_data
|
|
|
|
}
|
2017-05-10 10:28:06 -04:00
|
|
|
|
2017-05-15 17:57:30 -04:00
|
|
|
fn param_env(&self) -> ty::ParamEnv<'tcx> {
|
2017-05-10 10:28:06 -04:00
|
|
|
self.env.param_env
|
2016-05-27 15:07:08 +03:00
|
|
|
}
|
2016-05-17 02:26:18 +03:00
|
|
|
|
2017-04-11 23:52:51 +03:00
|
|
|
fn create_drop_flag(&mut self, index: MovePathIndex, span: Span) {
|
2016-05-27 15:07:08 +03:00
|
|
|
let tcx = self.tcx;
|
2016-05-17 02:26:18 +03:00
|
|
|
let patch = &mut self.patch;
|
2019-06-03 18:26:48 -04:00
|
|
|
debug!("create_drop_flag({:?})", self.body.span);
|
2023-04-28 20:08:23 +00:00
|
|
|
self.drop_flags[index].get_or_insert_with(|| patch.new_internal(tcx.types.bool, span));
|
2016-05-17 02:26:18 +03:00
|
|
|
}
|
|
|
|
|
2017-12-01 14:31:47 +02:00
|
|
|
fn drop_flag(&mut self, index: MovePathIndex) -> Option<Place<'tcx>> {
|
2023-04-28 20:08:23 +00:00
|
|
|
self.drop_flags[index].map(Place::from)
|
2016-05-17 02:26:18 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// create a patch that elaborates all drops in the input
|
|
|
|
/// MIR.
|
|
|
|
fn elaborate(mut self) -> MirPatch<'tcx> {
|
|
|
|
self.collect_drop_flags();
|
|
|
|
|
|
|
|
self.elaborate_drops();
|
|
|
|
|
|
|
|
self.drop_flags_on_init();
|
|
|
|
self.drop_flags_for_fn_rets();
|
|
|
|
self.drop_flags_for_args();
|
|
|
|
self.drop_flags_for_locs();
|
|
|
|
|
|
|
|
self.patch
|
|
|
|
}
|
|
|
|
|
|
|
|
fn collect_drop_flags(&mut self) {
|
2022-07-05 00:00:00 +00:00
|
|
|
for (bb, data) in self.body.basic_blocks.iter_enumerated() {
|
Remove dead unwinds before drop elaboration
As a part of drop elaboration, we identify dead unwinds, i.e., unwind
edges on a drop terminators which are known to be unreachable, because
there is no need to drop anything.
Previously, the data flow framework was informed about the dead unwinds,
and it assumed those edges are absent from MIR. Unfortunately, the data
flow framework wasn't consistent in maintaining this assumption.
In particular, if a block was reachable only through a dead unwind edge,
its state was propagated to other blocks still. This became an issue in
the context of change removes DropAndReplace terminator, since it
introduces initialization into cleanup blocks.
To avoid this issue, remove unreachable unwind edges before the drop
elaboration, and elaborate only blocks that remain reachable.
2023-01-05 10:15:33 +01:00
|
|
|
if !self.reachable.contains(bb) {
|
|
|
|
continue;
|
|
|
|
}
|
2016-05-17 02:26:18 +03:00
|
|
|
let terminator = data.terminator();
|
2020-06-10 09:56:54 +02:00
|
|
|
let place = match terminator.kind {
|
2023-03-05 21:02:14 +01:00
|
|
|
TerminatorKind::Drop { ref place, .. } => {
|
2022-06-13 16:37:41 +03:00
|
|
|
self.un_derefer.derefer(place.as_ref(), self.body).unwrap_or(*place)
|
|
|
|
}
|
2016-05-17 02:26:18 +03:00
|
|
|
_ => continue,
|
|
|
|
};
|
|
|
|
|
2020-02-13 20:48:17 -08:00
|
|
|
self.init_data.seek_before(self.body.terminator_loc(bb));
|
2016-05-17 02:26:18 +03:00
|
|
|
|
2020-06-10 09:56:54 +02:00
|
|
|
let path = self.move_data().rev_lookup.find(place.as_ref());
|
|
|
|
debug!("collect_drop_flags: {:?}, place {:?} ({:?})", bb, place, path);
|
2016-05-17 02:26:18 +03:00
|
|
|
|
2016-06-11 23:47:28 +03:00
|
|
|
let path = match path {
|
|
|
|
LookupResult::Exact(e) => e,
|
|
|
|
LookupResult::Parent(None) => continue,
|
|
|
|
LookupResult::Parent(Some(parent)) => {
|
2020-02-13 21:30:10 -08:00
|
|
|
let (_maybe_live, maybe_dead) = self.init_data.maybe_live_dead(parent);
|
2022-06-13 16:37:41 +03:00
|
|
|
|
|
|
|
if self.body.local_decls[place.local].is_deref_temp() {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-06-11 23:47:28 +03:00
|
|
|
if maybe_dead {
|
2021-12-22 00:00:00 +00:00
|
|
|
self.tcx.sess.delay_span_bug(
|
2016-06-11 23:47:28 +03:00
|
|
|
terminator.source_info.span,
|
Restrict `From<S>` for `{D,Subd}iagnosticMessage`.
Currently a `{D,Subd}iagnosticMessage` can be created from any type that
impls `Into<String>`. That includes `&str`, `String`, and `Cow<'static,
str>`, which are reasonable. It also includes `&String`, which is pretty
weird, and results in many places making unnecessary allocations for
patterns like this:
```
self.fatal(&format!(...))
```
This creates a string with `format!`, takes a reference, passes the
reference to `fatal`, which does an `into()`, which clones the
reference, doing a second allocation. Two allocations for a single
string, bleh.
This commit changes the `From` impls so that you can only create a
`{D,Subd}iagnosticMessage` from `&str`, `String`, or `Cow<'static,
str>`. This requires changing all the places that currently create one
from a `&String`. Most of these are of the `&format!(...)` form
described above; each one removes an unnecessary static `&`, plus an
allocation when executed. There are also a few places where the existing
use of `&String` was more reasonable; these now just use `clone()` at
the call site.
As well as making the code nicer and more efficient, this is a step
towards possibly using `Cow<'static, str>` in
`{D,Subd}iagnosticMessage::{Str,Eager}`. That would require changing
the `From<&'a str>` impls to `From<&'static str>`, which is doable, but
I'm not yet sure if it's worthwhile.
2023-04-20 13:26:58 +10:00
|
|
|
format!(
|
2021-12-22 00:00:00 +00:00
|
|
|
"drop of untracked, uninitialized value {:?}, place {:?} ({:?})",
|
2022-06-13 16:37:41 +03:00
|
|
|
bb, place, path
|
2021-12-22 00:00:00 +00:00
|
|
|
),
|
2016-06-11 23:47:28 +03:00
|
|
|
);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-06-03 18:26:48 -04:00
|
|
|
on_all_drop_children_bits(self.tcx, self.body, self.env, path, |child| {
|
2020-02-13 21:30:10 -08:00
|
|
|
let (maybe_live, maybe_dead) = self.init_data.maybe_live_dead(child);
|
2017-04-07 01:00:53 +03:00
|
|
|
debug!(
|
|
|
|
"collect_drop_flags: collecting {:?} from {:?}@{:?} - {:?}",
|
|
|
|
child,
|
2020-06-10 09:56:54 +02:00
|
|
|
place,
|
2017-04-07 01:00:53 +03:00
|
|
|
path,
|
|
|
|
(maybe_live, maybe_dead)
|
|
|
|
);
|
|
|
|
if maybe_live && maybe_dead {
|
2017-04-11 23:52:51 +03:00
|
|
|
self.create_drop_flag(child, terminator.source_info.span)
|
2016-05-17 02:26:18 +03:00
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn elaborate_drops(&mut self) {
|
2022-07-05 00:00:00 +00:00
|
|
|
for (bb, data) in self.body.basic_blocks.iter_enumerated() {
|
Remove dead unwinds before drop elaboration
As a part of drop elaboration, we identify dead unwinds, i.e., unwind
edges on a drop terminators which are known to be unreachable, because
there is no need to drop anything.
Previously, the data flow framework was informed about the dead unwinds,
and it assumed those edges are absent from MIR. Unfortunately, the data
flow framework wasn't consistent in maintaining this assumption.
In particular, if a block was reachable only through a dead unwind edge,
its state was propagated to other blocks still. This became an issue in
the context of change removes DropAndReplace terminator, since it
introduces initialization into cleanup blocks.
To avoid this issue, remove unreachable unwind edges before the drop
elaboration, and elaborate only blocks that remain reachable.
2023-01-05 10:15:33 +01:00
|
|
|
if !self.reachable.contains(bb) {
|
|
|
|
continue;
|
|
|
|
}
|
2016-08-08 18:46:06 -07:00
|
|
|
let loc = Location { block: bb, statement_index: data.statements.len() };
|
2016-05-17 02:26:18 +03:00
|
|
|
let terminator = data.terminator();
|
|
|
|
|
|
|
|
match terminator.kind {
|
2023-05-25 17:30:23 +00:00
|
|
|
TerminatorKind::Drop { mut place, target, unwind, replace } => {
|
2022-06-13 16:37:41 +03:00
|
|
|
if let Some(new_place) = self.un_derefer.derefer(place.as_ref(), self.body) {
|
|
|
|
place = new_place;
|
|
|
|
}
|
|
|
|
|
2020-02-13 20:48:17 -08:00
|
|
|
self.init_data.seek_before(loc);
|
2020-06-10 09:56:54 +02:00
|
|
|
match self.move_data().rev_lookup.find(place.as_ref()) {
|
2022-10-10 19:50:49 +01:00
|
|
|
LookupResult::Exact(path) => {
|
|
|
|
let unwind = if data.is_cleanup {
|
2017-05-12 15:00:36 +03:00
|
|
|
Unwind::InCleanup
|
2016-06-11 23:47:28 +03:00
|
|
|
} else {
|
2022-10-08 23:47:59 +01:00
|
|
|
match unwind {
|
|
|
|
UnwindAction::Cleanup(cleanup) => Unwind::To(cleanup),
|
2022-10-10 19:50:49 +01:00
|
|
|
UnwindAction::Continue => Unwind::To(self.patch.resume_block()),
|
|
|
|
UnwindAction::Unreachable => {
|
2022-11-15 17:00:40 +00:00
|
|
|
Unwind::To(self.patch.unreachable_cleanup_block())
|
2022-10-10 19:50:49 +01:00
|
|
|
}
|
2022-10-10 22:40:40 +01:00
|
|
|
UnwindAction::Terminate => {
|
|
|
|
Unwind::To(self.patch.terminate_block())
|
|
|
|
}
|
2022-10-08 23:47:59 +01:00
|
|
|
}
|
2022-10-10 19:50:49 +01:00
|
|
|
};
|
|
|
|
elaborate_drop(
|
|
|
|
&mut Elaborator { ctxt: self },
|
|
|
|
terminator.source_info,
|
|
|
|
place,
|
|
|
|
path,
|
|
|
|
target,
|
|
|
|
unwind,
|
|
|
|
bb,
|
|
|
|
)
|
|
|
|
}
|
2016-06-11 23:47:28 +03:00
|
|
|
LookupResult::Parent(..) => {
|
2023-05-25 17:30:23 +00:00
|
|
|
if !replace {
|
2023-02-08 22:29:52 +01:00
|
|
|
self.tcx.sess.delay_span_bug(
|
|
|
|
terminator.source_info.span,
|
Restrict `From<S>` for `{D,Subd}iagnosticMessage`.
Currently a `{D,Subd}iagnosticMessage` can be created from any type that
impls `Into<String>`. That includes `&str`, `String`, and `Cow<'static,
str>`, which are reasonable. It also includes `&String`, which is pretty
weird, and results in many places making unnecessary allocations for
patterns like this:
```
self.fatal(&format!(...))
```
This creates a string with `format!`, takes a reference, passes the
reference to `fatal`, which does an `into()`, which clones the
reference, doing a second allocation. Two allocations for a single
string, bleh.
This commit changes the `From` impls so that you can only create a
`{D,Subd}iagnosticMessage` from `&str`, `String`, or `Cow<'static,
str>`. This requires changing all the places that currently create one
from a `&String`. Most of these are of the `&format!(...)` form
described above; each one removes an unnecessary static `&`, plus an
allocation when executed. There are also a few places where the existing
use of `&String` was more reasonable; these now just use `clone()` at
the call site.
As well as making the code nicer and more efficient, this is a step
towards possibly using `Cow<'static, str>` in
`{D,Subd}iagnosticMessage::{Str,Eager}`. That would require changing
the `From<&'a str>` impls to `From<&'static str>`, which is doable, but
I'm not yet sure if it's worthwhile.
2023-04-20 13:26:58 +10:00
|
|
|
format!("drop of untracked value {:?}", bb),
|
2023-02-08 22:29:52 +01:00
|
|
|
);
|
|
|
|
}
|
|
|
|
// A drop and replace behind a pointer/array/whatever.
|
|
|
|
// The borrow checker requires that these locations are initialized before the assignment,
|
|
|
|
// so we just leave an unconditional drop.
|
|
|
|
assert!(!data.is_cleanup);
|
2016-06-11 23:47:28 +03:00
|
|
|
}
|
|
|
|
}
|
2016-05-17 02:26:18 +03:00
|
|
|
}
|
|
|
|
_ => continue,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn constant_bool(&self, span: Span, val: bool) -> Rvalue<'tcx> {
|
2017-05-12 01:38:26 +03:00
|
|
|
Rvalue::Use(Operand::Constant(Box::new(Constant {
|
2017-08-06 22:54:09 -07:00
|
|
|
span,
|
2018-08-09 06:18:00 -04:00
|
|
|
user_ty: None,
|
2022-02-16 10:56:01 +01:00
|
|
|
literal: ConstantKind::from_bool(self.tcx, val),
|
2017-05-12 01:38:26 +03:00
|
|
|
})))
|
2016-05-17 02:26:18 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
fn set_drop_flag(&mut self, loc: Location, path: MovePathIndex, val: DropFlagState) {
|
2023-04-28 20:08:23 +00:00
|
|
|
if let Some(flag) = self.drop_flags[path] {
|
2019-06-03 18:26:48 -04:00
|
|
|
let span = self.patch.source_info_for_location(self.body, loc).span;
|
2016-05-17 02:26:18 +03:00
|
|
|
let val = self.constant_bool(span, val.value());
|
2019-06-24 17:46:09 +02:00
|
|
|
self.patch.add_assign(loc, Place::from(flag), val);
|
2016-05-17 02:26:18 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn drop_flags_on_init(&mut self) {
|
2018-05-01 09:46:11 -04:00
|
|
|
let loc = Location::START;
|
2019-06-03 18:26:48 -04:00
|
|
|
let span = self.patch.source_info_for_location(self.body, loc).span;
|
2016-05-17 02:26:18 +03:00
|
|
|
let false_ = self.constant_bool(span, false);
|
2023-04-28 20:08:23 +00:00
|
|
|
for flag in self.drop_flags.iter().flatten() {
|
2019-06-24 17:46:09 +02:00
|
|
|
self.patch.add_assign(loc, Place::from(*flag), false_.clone());
|
2016-05-17 02:26:18 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn drop_flags_for_fn_rets(&mut self) {
|
2022-07-05 00:00:00 +00:00
|
|
|
for (bb, data) in self.body.basic_blocks.iter_enumerated() {
|
Remove dead unwinds before drop elaboration
As a part of drop elaboration, we identify dead unwinds, i.e., unwind
edges on a drop terminators which are known to be unreachable, because
there is no need to drop anything.
Previously, the data flow framework was informed about the dead unwinds,
and it assumed those edges are absent from MIR. Unfortunately, the data
flow framework wasn't consistent in maintaining this assumption.
In particular, if a block was reachable only through a dead unwind edge,
its state was propagated to other blocks still. This became an issue in
the context of change removes DropAndReplace terminator, since it
introduces initialization into cleanup blocks.
To avoid this issue, remove unreachable unwind edges before the drop
elaboration, and elaborate only blocks that remain reachable.
2023-01-05 10:15:33 +01:00
|
|
|
if !self.reachable.contains(bb) {
|
|
|
|
continue;
|
|
|
|
}
|
2016-05-17 02:26:18 +03:00
|
|
|
if let TerminatorKind::Call {
|
2022-10-08 23:47:59 +01:00
|
|
|
destination,
|
|
|
|
target: Some(tgt),
|
|
|
|
unwind: UnwindAction::Cleanup(_),
|
|
|
|
..
|
2016-05-17 02:26:18 +03:00
|
|
|
} = data.terminator().kind
|
|
|
|
{
|
|
|
|
assert!(!self.patch.is_patched(bb));
|
|
|
|
|
2016-08-08 18:46:06 -07:00
|
|
|
let loc = Location { block: tgt, statement_index: 0 };
|
2022-04-16 09:27:54 -04:00
|
|
|
let path = self.move_data().rev_lookup.find(destination.as_ref());
|
2019-06-03 18:26:48 -04:00
|
|
|
on_lookup_result_bits(self.tcx, self.body, self.move_data(), path, |child| {
|
2016-05-27 15:07:08 +03:00
|
|
|
self.set_drop_flag(loc, child, DropFlagState::Present)
|
2016-05-17 02:26:18 +03:00
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn drop_flags_for_args(&mut self) {
|
2018-05-01 09:46:11 -04:00
|
|
|
let loc = Location::START;
|
2021-01-05 19:53:07 +01:00
|
|
|
rustc_mir_dataflow::drop_flag_effects_for_function_entry(
|
|
|
|
self.tcx,
|
|
|
|
self.body,
|
|
|
|
self.env,
|
|
|
|
|path, ds| {
|
|
|
|
self.set_drop_flag(loc, path, ds);
|
|
|
|
},
|
|
|
|
)
|
2016-05-17 02:26:18 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
fn drop_flags_for_locs(&mut self) {
|
|
|
|
// We intentionally iterate only over the *old* basic blocks.
|
|
|
|
//
|
|
|
|
// Basic blocks created by drop elaboration update their
|
|
|
|
// drop flags by themselves, to avoid the drop flags being
|
|
|
|
// clobbered before they are read.
|
|
|
|
|
2022-07-05 00:00:00 +00:00
|
|
|
for (bb, data) in self.body.basic_blocks.iter_enumerated() {
|
Remove dead unwinds before drop elaboration
As a part of drop elaboration, we identify dead unwinds, i.e., unwind
edges on a drop terminators which are known to be unreachable, because
there is no need to drop anything.
Previously, the data flow framework was informed about the dead unwinds,
and it assumed those edges are absent from MIR. Unfortunately, the data
flow framework wasn't consistent in maintaining this assumption.
In particular, if a block was reachable only through a dead unwind edge,
its state was propagated to other blocks still. This became an issue in
the context of change removes DropAndReplace terminator, since it
introduces initialization into cleanup blocks.
To avoid this issue, remove unreachable unwind edges before the drop
elaboration, and elaborate only blocks that remain reachable.
2023-01-05 10:15:33 +01:00
|
|
|
if !self.reachable.contains(bb) {
|
|
|
|
continue;
|
|
|
|
}
|
2016-05-17 02:26:18 +03:00
|
|
|
debug!("drop_flags_for_locs({:?})", data);
|
|
|
|
for i in 0..(data.statements.len() + 1) {
|
|
|
|
debug!("drop_flag_for_locs: stmt {}", i);
|
|
|
|
if i == data.statements.len() {
|
|
|
|
match data.terminator().kind {
|
|
|
|
TerminatorKind::Drop { .. } => {
|
|
|
|
// drop elaboration should handle that by itself
|
|
|
|
continue;
|
|
|
|
}
|
2017-06-02 12:52:09 +02:00
|
|
|
TerminatorKind::Resume => {
|
|
|
|
// It is possible for `Resume` to be patched
|
|
|
|
// (in particular it can be patched to be replaced with
|
|
|
|
// a Goto; see `MirPatch::new`).
|
|
|
|
}
|
2016-05-17 02:26:18 +03:00
|
|
|
_ => {
|
|
|
|
assert!(!self.patch.is_patched(bb));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-08-08 18:46:06 -07:00
|
|
|
let loc = Location { block: bb, statement_index: i };
|
2021-01-05 19:53:07 +01:00
|
|
|
rustc_mir_dataflow::drop_flag_effects_for_location(
|
2019-06-03 18:26:48 -04:00
|
|
|
self.tcx,
|
|
|
|
self.body,
|
|
|
|
self.env,
|
|
|
|
loc,
|
2023-03-05 21:02:14 +01:00
|
|
|
|path, ds| self.set_drop_flag(loc, path, ds),
|
2016-05-17 02:26:18 +03:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// There may be a critical edge after this call,
|
|
|
|
// so mark the return as initialized *before* the
|
|
|
|
// call.
|
2022-10-08 23:47:59 +01:00
|
|
|
if let TerminatorKind::Call {
|
|
|
|
destination,
|
|
|
|
target: Some(_),
|
2022-10-10 22:40:40 +01:00
|
|
|
unwind: UnwindAction::Continue | UnwindAction::Unreachable | UnwindAction::Terminate,
|
2022-10-08 23:47:59 +01:00
|
|
|
..
|
|
|
|
} = data.terminator().kind
|
2016-05-17 02:26:18 +03:00
|
|
|
{
|
|
|
|
assert!(!self.patch.is_patched(bb));
|
|
|
|
|
2016-08-08 18:46:06 -07:00
|
|
|
let loc = Location { block: bb, statement_index: data.statements.len() };
|
2022-04-16 09:27:54 -04:00
|
|
|
let path = self.move_data().rev_lookup.find(destination.as_ref());
|
2019-06-03 18:26:48 -04:00
|
|
|
on_lookup_result_bits(self.tcx, self.body, self.move_data(), path, |child| {
|
2016-05-27 15:07:08 +03:00
|
|
|
self.set_drop_flag(loc, child, DropFlagState::Present)
|
2016-05-17 02:26:18 +03:00
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|