rust/compiler/rustc_mir/src/transform/mod.rs

628 lines
23 KiB
Rust
Raw Normal View History

use crate::{shim, util};
2020-04-22 12:30:11 -03:00
use required_consts::RequiredConstsVisitor;
use rustc_data_structures::fx::FxHashSet;
2020-11-14 01:29:30 +01:00
use rustc_data_structures::steal::Steal;
use rustc_hir as hir;
2021-05-11 12:26:53 +02:00
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_hir::intravisit::{self, NestedVisitorMap, Visitor};
2019-12-22 17:42:04 -05:00
use rustc_index::vec::IndexVec;
use rustc_middle::mir::visit::Visitor as _;
use rustc_middle::mir::{traversal, Body, ConstQualifs, MirPhase, Promoted};
2020-03-29 17:19:48 +02:00
use rustc_middle::ty::query::Providers;
2020-10-04 11:01:13 -07:00
use rustc_middle::ty::{self, TyCtxt, TypeFoldable};
2020-04-19 13:00:18 +02:00
use rustc_span::{Span, Symbol};
use std::borrow::Cow;
rustc: Fill out remaining parts of C-unwind ABI This commit intends to fill out some of the remaining pieces of the C-unwind ABI. This has a number of other changes with it though to move this design space forward a bit. Notably contained within here is: * On `panic=unwind`, the `extern "C"` ABI is now considered as "may unwind". This fixes a longstanding soundness issue where if you `panic!()` in an `extern "C"` function defined in Rust that's actually UB because the LLVM representation for the function has the `nounwind` attribute, but then you unwind. * Whether or not a function unwinds now mainly considers the ABI of the function instead of first checking the panic strategy. This fixes a miscompile of `extern "C-unwind"` with `panic=abort` because that ABI can still unwind. * The aborting stub for non-unwinding ABIs with `panic=unwind` has been reimplemented. Previously this was done as a small tweak during MIR generation, but this has been moved to a separate and dedicated MIR pass. This new pass will, for appropriate functions and function calls, insert a `cleanup` landing pad for any function call that may unwind within a function that is itself not allowed to unwind. Note that this subtly changes some behavior from before where previously on an unwind which was caught-to-abort it would run active destructors in the function, and now it simply immediately aborts the process. * The `#[unwind]` attribute has been removed and all users in tests and such are now using `C-unwind` and `#![feature(c_unwind)]`. I think this is largely the last piece of the RFC to implement. Unfortunately I believe this is still not stabilizable as-is because activating the feature gate changes the behavior of the existing `extern "C"` ABI in a way that has no replacement. My thinking for how to enable this is that we add support for the `C-unwind` ABI on stable Rust first, and then after it hits stable we change the behavior of the `C` ABI. That way anyone straddling stable/beta/nightly can switch to `C-unwind` safely.
2021-06-08 11:23:58 -07:00
pub mod abort_unwinding_calls;
2019-12-22 17:42:04 -05:00
pub mod add_call_guards;
pub mod add_moves_for_packed_drops;
2019-12-22 17:42:04 -05:00
pub mod add_retag;
pub mod check_const_item_mutation;
2019-09-17 16:25:40 -07:00
pub mod check_consts;
pub mod check_packed_ref;
pub mod check_unsafety;
2019-12-22 17:42:04 -05:00
pub mod cleanup_post_borrowck;
pub mod const_debuginfo;
pub mod const_goto;
2019-12-22 17:42:04 -05:00
pub mod const_prop;
pub mod coverage;
2019-12-22 17:42:04 -05:00
pub mod deaggregator;
2020-10-04 15:52:14 +02:00
pub mod deduplicate_blocks;
pub mod dest_prop;
2019-12-22 17:42:04 -05:00
pub mod dump_mir;
pub mod early_otherwise_branch;
2019-12-22 17:42:04 -05:00
pub mod elaborate_drops;
pub mod function_item_references;
2019-12-22 17:42:04 -05:00
pub mod generator;
pub mod inline;
pub mod instcombine;
pub mod lower_intrinsics;
2021-06-20 16:09:42 +02:00
pub mod lower_slice_len;
pub mod match_branches;
pub mod multiple_return_terminators;
2020-05-14 10:11:15 -07:00
pub mod nrvo;
2016-05-07 19:14:28 +03:00
pub mod promote_consts;
pub mod remove_noop_landing_pads;
pub mod remove_storage_markers;
pub mod remove_unneeded_drops;
2021-03-16 21:30:37 -04:00
pub mod remove_zsts;
pub mod required_consts;
2019-12-22 17:42:04 -05:00
pub mod rustc_peek;
pub mod separate_const_switch;
2019-12-22 17:42:04 -05:00
pub mod simplify;
pub mod simplify_branches;
pub mod simplify_comparison_integral;
2019-12-22 17:42:04 -05:00
pub mod simplify_try;
pub mod uninhabited_enum_branching;
pub mod unreachable_prop;
2020-05-24 00:55:44 +02:00
pub mod validate;
2020-10-04 11:01:13 -07:00
pub use rustc_middle::mir::MirSource;
pub(crate) fn provide(providers: &mut Providers) {
self::check_unsafety::provide(providers);
self::check_packed_ref::provide(providers);
*providers = Providers {
2017-05-02 06:32:03 -04:00
mir_keys,
mir_const,
mir_const_qualif: |tcx, def_id| {
let def_id = def_id.expect_local();
2020-07-21 22:54:18 +02:00
if let Some(def) = ty::WithOptConstParam::try_lookup(def_id, tcx) {
tcx.mir_const_qualif_const_arg(def)
} else {
mir_const_qualif(tcx, ty::WithOptConstParam::unknown(def_id))
}
2020-07-06 23:49:53 +02:00
},
2020-07-08 01:03:19 +02:00
mir_const_qualif_const_arg: |tcx, (did, param_did)| {
2020-07-15 10:50:54 +02:00
mir_const_qualif(tcx, ty::WithOptConstParam { did, const_param_did: Some(param_did) })
2020-07-06 23:49:53 +02:00
},
mir_promoted,
mir_drops_elaborated_and_const_checked,
mir_for_ctfe,
mir_for_ctfe_of_const_arg,
2017-05-02 06:32:03 -04:00
optimized_mir,
is_mir_available,
is_ctfe_mir_available: |tcx, did| is_mir_available(tcx, did),
2020-07-06 23:49:53 +02:00
promoted_mir: |tcx, def_id| {
let def_id = def_id.expect_local();
2020-07-21 22:54:18 +02:00
if let Some(def) = ty::WithOptConstParam::try_lookup(def_id, tcx) {
tcx.promoted_mir_of_const_arg(def)
} else {
promoted_mir(tcx, ty::WithOptConstParam::unknown(def_id))
}
2020-07-06 23:49:53 +02:00
},
2020-07-08 10:35:58 +02:00
promoted_mir_of_const_arg: |tcx, (did, param_did)| {
2020-07-15 10:50:54 +02:00
promoted_mir(tcx, ty::WithOptConstParam { did, const_param_did: Some(param_did) })
2020-07-06 23:49:53 +02:00
},
..*providers
};
coverage::query::provide(providers);
}
2019-06-21 18:12:39 +02:00
fn is_mir_available(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
2021-05-11 12:26:53 +02:00
let def_id = def_id.expect_local();
tcx.mir_keys(()).contains(&def_id)
2017-05-02 06:32:03 -04:00
}
2019-02-08 14:53:55 +01:00
/// Finds the full set of `DefId`s within the current crate that have
2017-05-02 06:32:03 -04:00
/// MIR associated with them.
2021-05-11 12:26:53 +02:00
fn mir_keys(tcx: TyCtxt<'_>, (): ()) -> FxHashSet<LocalDefId> {
let mut set = FxHashSet::default();
2017-05-02 06:32:03 -04:00
// All body-owners have MIR associated with them.
set.extend(tcx.body_owners());
2017-05-02 06:32:03 -04:00
// Additionally, tuple struct/variant constructors have MIR, but
// they don't have a BodyId, so we need to build them separately.
struct GatherCtors<'a, 'tcx> {
2019-06-14 00:48:52 +03:00
tcx: TyCtxt<'tcx>,
set: &'a mut FxHashSet<LocalDefId>,
2017-05-02 06:32:03 -04:00
}
impl<'a, 'tcx> Visitor<'tcx> for GatherCtors<'a, 'tcx> {
2019-12-22 17:42:04 -05:00
fn visit_variant_data(
&mut self,
v: &'tcx hir::VariantData<'tcx>,
2020-04-19 13:00:18 +02:00
_: Symbol,
2019-12-01 16:08:58 +01:00
_: &'tcx hir::Generics<'tcx>,
2019-12-22 17:42:04 -05:00
_: hir::HirId,
_: Span,
) {
2019-03-01 09:52:20 +01:00
if let hir::VariantData::Tuple(_, hir_id) = *v {
self.set.insert(self.tcx.hir().local_def_id(hir_id));
2017-05-02 06:32:03 -04:00
}
intravisit::walk_struct_def(self, v)
}
2020-03-11 12:05:32 +01:00
type Map = intravisit::ErasedMap<'tcx>;
2020-02-09 15:32:00 +01:00
fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
2017-05-02 06:32:03 -04:00
NestedVisitorMap::None
}
}
2019-12-22 17:42:04 -05:00
tcx.hir()
.krate()
.visit_all_item_likes(&mut GatherCtors { tcx, set: &mut set }.as_deep_visitor());
2017-05-02 06:32:03 -04:00
2020-03-27 18:46:25 +01:00
set
2017-05-02 06:32:03 -04:00
}
/// Generates a default name for the pass based on the name of the
/// type `T`.
pub fn default_name<T: ?Sized>() -> Cow<'static, str> {
let name = std::any::type_name::<T>();
if let Some(tail) = name.rfind(':') { Cow::from(&name[tail + 1..]) } else { Cow::from(name) }
}
/// A streamlined trait that you can implement to create a pass; the
/// pass will be named after the type, and it will consist of a main
/// loop that goes over each available MIR and applies `run_pass`.
2019-08-04 16:20:00 -04:00
pub trait MirPass<'tcx> {
2019-06-21 18:12:39 +02:00
fn name(&self) -> Cow<'_, str> {
default_name::<Self>()
}
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>);
}
pub fn run_passes(
2019-06-14 00:48:52 +03:00
tcx: TyCtxt<'tcx>,
2020-04-12 10:31:00 -07:00
body: &mut Body<'tcx>,
mir_phase: MirPhase,
passes: &[&[&dyn MirPass<'tcx>]],
2018-10-25 08:35:53 -04:00
) {
let phase_index = mir_phase.phase_index();
2020-05-24 00:55:44 +02:00
let validate = tcx.sess.opts.debugging_opts.validate_mir;
2019-08-15 06:39:31 -04:00
if body.phase >= mir_phase {
return;
}
2020-05-24 00:55:44 +02:00
if validate {
validate::Validator { when: format!("input to phase {:?}", mir_phase), mir_phase }
.run_pass(tcx, body);
2020-05-24 00:55:44 +02:00
}
2019-08-15 06:39:31 -04:00
let mut index = 0;
let mut run_pass = |pass: &dyn MirPass<'tcx>| {
let run_hooks = |body: &_, index, is_after| {
2019-12-22 17:42:04 -05:00
dump_mir::on_mir_pass(
tcx,
&format_args!("{:03}-{:03}", phase_index, index),
&pass.name(),
body,
is_after,
);
};
2019-08-15 06:39:31 -04:00
run_hooks(body, index, false);
pass.run_pass(tcx, body);
2019-08-15 06:39:31 -04:00
run_hooks(body, index, true);
2020-05-24 00:55:44 +02:00
if validate {
validate::Validator {
when: format!("after {} in phase {:?}", pass.name(), mir_phase),
mir_phase,
}
.run_pass(tcx, body);
2020-05-24 00:55:44 +02:00
}
2019-08-15 06:39:31 -04:00
index += 1;
};
for pass_group in passes {
for pass in *pass_group {
run_pass(*pass);
}
2019-08-15 06:39:31 -04:00
}
body.phase = mir_phase;
2020-05-25 22:04:48 +02:00
2020-08-18 13:44:57 +02:00
if mir_phase == MirPhase::Optimization {
validate::Validator { when: format!("end of phase {:?}", mir_phase), mir_phase }
.run_pass(tcx, body);
2020-05-25 22:04:48 +02:00
}
}
2020-07-15 10:50:54 +02:00
fn mir_const_qualif(tcx: TyCtxt<'_>, def: ty::WithOptConstParam<LocalDefId>) -> ConstQualifs {
2020-07-06 23:49:53 +02:00
let const_kind = tcx.hir().body_const_context(def.did);
// No need to const-check a non-const `fn`.
if const_kind.is_none() {
return Default::default();
}
// N.B., this `borrow()` is guaranteed to be valid (i.e., the value
// cannot yet be stolen), because `mir_promoted()`, which steals
// from `mir_const(), forces this query to execute before
// performing the steal.
2020-07-06 23:49:53 +02:00
let body = &tcx.mir_const(def).borrow();
if body.return_ty().references_error() {
tcx.sess.delay_span_bug(body.span, "mir_const_qualif: MIR had errors");
return Default::default();
}
let ccx = check_consts::ConstCx { body, tcx, const_kind, param_env: tcx.param_env(def.did) };
2021-07-24 13:27:17 +02:00
let mut validator = check_consts::check::Checker::new(&ccx);
validator.check_body();
// We return the qualifs in the return place for every MIR body, even though it is only used
// when deciding to promote a reference to a `const` for now.
validator.qualifs_in_return_place()
}
/// Make MIR ready for const evaluation. This is run on all MIR, not just on consts!
2020-07-06 23:49:53 +02:00
fn mir_const<'tcx>(
tcx: TyCtxt<'tcx>,
2020-07-15 10:50:54 +02:00
def: ty::WithOptConstParam<LocalDefId>,
2020-07-06 23:49:53 +02:00
) -> &'tcx Steal<Body<'tcx>> {
2020-07-17 19:12:30 +02:00
if let Some(def) = def.try_upgrade(tcx) {
return tcx.mir_const(def);
2020-07-06 23:49:53 +02:00
}
2020-04-17 15:53:37 +01:00
// Unsafety check uses the raw mir, so make sure it is run.
if !tcx.sess.opts.debugging_opts.thir_unsafeck {
if let Some(param_did) = def.const_param_did {
tcx.ensure().unsafety_check_result_for_const_arg((def.did, param_did));
} else {
tcx.ensure().unsafety_check_result(def.did);
}
2020-07-08 01:03:19 +02:00
}
2020-07-06 23:49:53 +02:00
let mut body = tcx.mir_built(def).steal();
util::dump_mir(tcx, None, "mir_map", &0, &body, |_, _| Ok(()));
2019-12-22 17:42:04 -05:00
run_passes(
tcx,
&mut body,
MirPhase::Const,
&[&[
// MIR-level lints.
&check_packed_ref::CheckPackedRef,
&check_const_item_mutation::CheckConstItemMutation,
&function_item_references::FunctionItemReferences,
2019-12-22 17:42:04 -05:00
// What we need to do constant evaluation.
&simplify::SimplifyCfg::new("initial"),
&rustc_peek::SanityCheck,
]],
2019-12-22 17:42:04 -05:00
);
tcx.alloc_steal_mir(body)
}
2020-12-07 12:49:00 +00:00
/// Compute the main MIR body and the list of MIR bodies of the promoteds.
fn mir_promoted(
2019-08-14 08:08:17 -04:00
tcx: TyCtxt<'tcx>,
2020-07-15 10:50:54 +02:00
def: ty::WithOptConstParam<LocalDefId>,
2020-07-03 22:15:27 +02:00
) -> (&'tcx Steal<Body<'tcx>>, &'tcx Steal<IndexVec<Promoted, Body<'tcx>>>) {
2020-07-17 19:12:30 +02:00
if let Some(def) = def.try_upgrade(tcx) {
return tcx.mir_promoted(def);
2020-07-06 23:49:53 +02:00
}
2019-10-28 21:25:51 -07:00
// Ensure that we compute the `mir_const_qualif` for constants at
// this point, before we steal the mir-const result.
2020-08-16 10:44:53 +02:00
// Also this means promotion can rely on all const checks having been done.
2020-07-08 01:03:19 +02:00
let _ = tcx.mir_const_qualif_opt_const_arg(def);
let _ = tcx.mir_abstract_const_opt_const_arg(def.to_global());
2020-07-06 23:49:53 +02:00
let mut body = tcx.mir_const(def).steal();
let mut required_consts = Vec::new();
2020-04-22 12:30:11 -03:00
let mut required_consts_visitor = RequiredConstsVisitor::new(&mut required_consts);
for (bb, bb_data) in traversal::reverse_postorder(&body) {
required_consts_visitor.visit_basic_block_data(bb, bb_data);
}
body.required_consts = required_consts;
2019-11-06 14:23:35 -08:00
let promote_pass = promote_consts::PromoteTemps::default();
let promote: &[&dyn MirPass<'tcx>] = &[
// What we need to run borrowck etc.
&promote_pass,
2020-08-16 10:44:53 +02:00
&simplify::SimplifyCfg::new("promote-consts"),
];
coverage bug fixes and optimization support Adjusted LLVM codegen for code compiled with `-Zinstrument-coverage` to address multiple, somewhat related issues. Fixed a significant flaw in prior coverage solution: Every counter generated a new counter variable, but there should have only been one counter variable per function. This appears to have bloated .profraw files significantly. (For a small program, it increased the size by about 40%. I have not tested large programs, but there is anecdotal evidence that profraw files were way too large. This is a good fix, regardless, but hopefully it also addresses related issues. Fixes: #82144 Invalid LLVM coverage data produced when compiled with -C opt-level=1 Existing tests now work up to at least `opt-level=3`. This required a detailed analysis of the LLVM IR, comparisons with Clang C++ LLVM IR when compiled with coverage, and a lot of trial and error with codegen adjustments. The biggest hurdle was figuring out how to continue to support coverage results for unused functions and generics. Rust's coverage results have three advantages over Clang's coverage results: 1. Rust's coverage map does not include any overlapping code regions, making coverage counting unambiguous. 2. Rust generates coverage results (showing zero counts) for all unused functions, including generics. (Clang does not generate coverage for uninstantiated template functions.) 3. Rust's unused functions produce minimal stubbed functions in LLVM IR, sufficient for including in the coverage results; while Clang must generate the complete LLVM IR for each unused function, even though it will never be called. This PR removes the previous hack of attempting to inject coverage into some other existing function instance, and generates dedicated instances for each unused function. This change, and a few other adjustments (similar to what is required for `-C link-dead-code`, but with lower impact), makes it possible to support LLVM optimizations. Fixes: #79651 Coverage report: "Unexecuted instantiation:..." for a generic function from multiple crates Fixed by removing the aforementioned hack. Some "Unexecuted instantiation" notices are unavoidable, as explained in the `used_crate.rs` test, but `-Zinstrument-coverage` has new options to back off support for either unused generics, or all unused functions, which avoids the notice, at the cost of less coverage of unused functions. Fixes: #82875 Invalid LLVM coverage data produced with crate brotli_decompressor Fixed by disabling the LLVM function attribute that forces inlining, if `-Z instrument-coverage` is enabled. This attribute is applied to Rust functions with `#[inline(always)], and in some cases, the forced inlining breaks coverage instrumentation and reports.
2021-03-15 16:32:45 -07:00
let opt_coverage: &[&dyn MirPass<'tcx>] =
if tcx.sess.instrument_coverage() { &[&coverage::InstrumentCoverage] } else { &[] };
run_passes(tcx, &mut body, MirPhase::ConstPromotion, &[promote, opt_coverage]);
2019-11-06 14:23:35 -08:00
let promoted = promote_pass.promoted_fragments.into_inner();
(tcx.alloc_steal_mir(body), tcx.alloc_steal_promoted(promoted))
}
2020-12-07 12:49:00 +00:00
/// Compute the MIR that is used during CTFE (and thus has no optimizations run on it)
fn mir_for_ctfe<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> &'tcx Body<'tcx> {
let did = def_id.expect_local();
if let Some(def) = ty::WithOptConstParam::try_lookup(did, tcx) {
tcx.mir_for_ctfe_of_const_arg(def)
} else {
tcx.arena.alloc(inner_mir_for_ctfe(tcx, ty::WithOptConstParam::unknown(did)))
}
}
2020-12-07 12:49:00 +00:00
/// Same as `mir_for_ctfe`, but used to get the MIR of a const generic parameter.
/// The docs on `WithOptConstParam` explain this a bit more, but the TLDR is that
/// we'd get cycle errors with `mir_for_ctfe`, because typeck would need to typeck
/// the const parameter while type checking the main body, which in turn would try
/// to type check the main body again.
fn mir_for_ctfe_of_const_arg<'tcx>(
tcx: TyCtxt<'tcx>,
(did, param_did): (LocalDefId, DefId),
) -> &'tcx Body<'tcx> {
tcx.arena.alloc(inner_mir_for_ctfe(
tcx,
ty::WithOptConstParam { did, const_param_did: Some(param_did) },
))
}
fn inner_mir_for_ctfe(tcx: TyCtxt<'_>, def: ty::WithOptConstParam<LocalDefId>) -> Body<'_> {
// FIXME: don't duplicate this between the optimized_mir/mir_for_ctfe queries
if tcx.is_constructor(def.did.to_def_id()) {
// There's no reason to run all of the MIR passes on constructors when
// we can just output the MIR we want directly. This also saves const
// qualification and borrow checking the trouble of special casing
// constructors.
return shim::build_adt_ctor(tcx, def.did.to_def_id());
}
let context = tcx
.hir()
.body_const_context(def.did)
.expect("mir_for_ctfe should not be used for runtime functions");
let mut body = tcx.mir_drops_elaborated_and_const_checked(def).borrow().clone();
match context {
// Do not const prop functions, either they get executed at runtime or exported to metadata,
// so we run const prop on them, or they don't, in which case we const evaluate some control
// flow paths of the function and any errors in those paths will get emitted as const eval
// errors.
hir::ConstContext::ConstFn => {}
// Static items always get evaluated, so we can just let const eval see if any erroneous
// control flow paths get executed.
hir::ConstContext::Static(_) => {}
// Associated constants get const prop run so we detect common failure situations in the
// crate that defined the constant.
// Technically we want to not run on regular const items, but oli-obk doesn't know how to
// conveniently detect that at this point without looking at the HIR.
hir::ConstContext::Const => {
#[rustfmt::skip]
let optimizations: &[&dyn MirPass<'_>] = &[
&const_prop::ConstProp,
];
#[rustfmt::skip]
run_passes(
tcx,
&mut body,
MirPhase::Optimization,
&[
optimizations,
],
);
}
}
debug_assert!(!body.has_free_regions(tcx), "Free regions in MIR for CTFE");
body
}
2020-12-07 12:49:00 +00:00
/// Obtain just the main MIR (no promoteds) and run some cleanups on it. This also runs
/// mir borrowck *before* doing so in order to ensure that borrowck can be run and doesn't
/// end up missing the source MIR due to stealing happening.
fn mir_drops_elaborated_and_const_checked<'tcx>(
tcx: TyCtxt<'tcx>,
2020-07-15 10:50:54 +02:00
def: ty::WithOptConstParam<LocalDefId>,
2020-07-03 22:15:27 +02:00
) -> &'tcx Steal<Body<'tcx>> {
2020-07-17 19:12:30 +02:00
if let Some(def) = def.try_upgrade(tcx) {
return tcx.mir_drops_elaborated_and_const_checked(def);
2020-07-03 22:15:27 +02:00
}
// (Mir-)Borrowck uses `mir_promoted`, so we have to force it to
// execute before we can steal.
2020-07-15 10:50:54 +02:00
if let Some(param_did) = def.const_param_did {
2020-07-08 01:03:19 +02:00
tcx.ensure().mir_borrowck_const_arg((def.did, param_did));
} else {
tcx.ensure().mir_borrowck(def.did);
}
2020-12-29 16:21:52 +00:00
let hir_id = tcx.hir().local_def_id_to_hir_id(def.did);
use rustc_middle::hir::map::blocks::FnLikeNode;
let is_fn_like = FnLikeNode::from_node(tcx.hir().get(hir_id)).is_some();
if is_fn_like {
let did = def.did.to_def_id();
let def = ty::WithOptConstParam::unknown(did);
// Do not compute the mir call graph without said call graph actually being used.
if inline::is_enabled(tcx) {
let _ = tcx.mir_inliner_callees(ty::InstanceDef::Item(def));
}
2020-12-29 16:21:52 +00:00
}
let (body, _) = tcx.mir_promoted(def);
let mut body = body.steal();
run_post_borrowck_cleanup_passes(tcx, &mut body);
check_consts::post_drop_elaboration::check_live_drops(tcx, &body);
tcx.alloc_steal_mir(body)
}
/// After this series of passes, no lifetime analysis based on borrowing can be done.
fn run_post_borrowck_cleanup_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
debug!("post_borrowck_cleanup({:?})", body.source.def_id());
let post_borrowck_cleanup: &[&dyn MirPass<'tcx>] = &[
// Remove all things only needed by analysis
&simplify_branches::SimplifyBranches::new("initial"),
&remove_noop_landing_pads::RemoveNoopLandingPads,
&cleanup_post_borrowck::CleanupNonCodegenStatements,
&simplify::SimplifyCfg::new("early-opt"),
// These next passes must be executed together
&add_call_guards::CriticalCallEdges,
&elaborate_drops::ElaborateDrops,
2021-07-13 13:56:16 -07:00
// This will remove extraneous landing pads which are no longer
// necessary as well as well as forcing any call in a non-unwinding
// function calling a possibly-unwinding function to abort the process.
&abort_unwinding_calls::AbortUnwindingCalls,
// AddMovesForPackedDrops needs to run after drop
// elaboration.
&add_moves_for_packed_drops::AddMovesForPackedDrops,
// `AddRetag` needs to run after `ElaborateDrops`. Otherwise it should run fairly late,
// but before optimizations begin.
&add_retag::AddRetag,
&lower_intrinsics::LowerIntrinsics,
&simplify::SimplifyCfg::new("elaborate-drops"),
// `Deaggregator` is conceptually part of MIR building, some backends rely on it happening
// and it can help optimizations.
&deaggregator::Deaggregator,
];
run_passes(tcx, body, MirPhase::DropLowering, &[post_borrowck_cleanup]);
}
fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
let mir_opt_level = tcx.sess.mir_opt_level();
// Lowering generator control-flow and variables has to happen before we do anything else
// to them. We run some optimizations before that, because they may be harder to do on the state
// machine than on MIR with async primitives.
let optimizations_with_generators: &[&dyn MirPass<'tcx>] = &[
2021-06-20 16:09:42 +02:00
&lower_slice_len::LowerSliceLenCalls, // has to be done before inlining, otherwise actual call will be almost always inlined. Also simple, so can just do first
&unreachable_prop::UnreachablePropagation,
&uninhabited_enum_branching::UninhabitedEnumBranching,
&simplify::SimplifyCfg::new("after-uninhabited-enum-branching"),
&inline::Inline,
&generator::StateTransform,
];
// Even if we don't do optimizations, we still have to lower generators for codegen.
let no_optimizations_with_generators: &[&dyn MirPass<'tcx>] = &[&generator::StateTransform];
// The main optimizations that we do on MIR.
let optimizations: &[&dyn MirPass<'tcx>] = &[
&remove_storage_markers::RemoveStorageMarkers,
2021-03-16 21:30:37 -04:00
&remove_zsts::RemoveZsts,
&const_goto::ConstGoto,
&remove_unneeded_drops::RemoveUnneededDrops,
&match_branches::MatchBranchSimplification,
// inst combine is after MatchBranchSimplification to clean up Ne(_1, false)
&multiple_return_terminators::MultipleReturnTerminators,
&instcombine::InstCombine,
&separate_const_switch::SeparateConstSwitch,
&const_prop::ConstProp,
&simplify_branches::SimplifyBranches::new("after-const-prop"),
&early_otherwise_branch::EarlyOtherwiseBranch,
&simplify_comparison_integral::SimplifyComparisonIntegral,
&simplify_try::SimplifyArmIdentity,
&simplify_try::SimplifyBranchSame,
&dest_prop::DestinationPropagation,
2020-10-17 02:25:31 +02:00
&simplify_branches::SimplifyBranches::new("final"),
&remove_noop_landing_pads::RemoveNoopLandingPads,
&simplify::SimplifyCfg::new("final"),
2020-05-14 10:11:15 -07:00
&nrvo::RenameReturnPlace,
&const_debuginfo::ConstDebugInfo,
&simplify::SimplifyLocals,
&multiple_return_terminators::MultipleReturnTerminators,
2020-10-04 15:52:14 +02:00
&deduplicate_blocks::DeduplicateBlocks,
];
// Optimizations to run even if mir optimizations have been disabled.
let no_optimizations: &[&dyn MirPass<'tcx>] = &[
// FIXME(#70073): This pass is responsible for both optimization as well as some lints.
&const_prop::ConstProp,
];
// Some cleanup necessary at least for LLVM and potentially other codegen backends.
let pre_codegen_cleanup: &[&dyn MirPass<'tcx>] = &[
&add_call_guards::CriticalCallEdges,
// Dump the end result for testing and debugging purposes.
&dump_mir::Marker("PreCodegen"),
];
// End of pass declarations, now actually run the passes.
// Generator Lowering
#[rustfmt::skip]
run_passes(
tcx,
body,
MirPhase::GeneratorLowering,
&[
if mir_opt_level > 0 {
optimizations_with_generators
} else {
no_optimizations_with_generators
}
],
);
// Main optimization passes
#[rustfmt::skip]
2019-12-22 17:42:04 -05:00
run_passes(
tcx,
body,
2020-08-18 13:44:57 +02:00
MirPhase::Optimization,
2019-12-22 17:42:04 -05:00
&[
if mir_opt_level > 0 { optimizations } else { no_optimizations },
pre_codegen_cleanup,
2019-12-22 17:42:04 -05:00
],
);
2019-08-15 06:39:31 -04:00
}
2020-12-07 12:49:00 +00:00
/// Optimize the MIR and prepare it for codegen.
2020-07-03 20:38:31 +02:00
fn optimized_mir<'tcx>(tcx: TyCtxt<'tcx>, did: DefId) -> &'tcx Body<'tcx> {
let did = did.expect_local();
2020-10-28 13:49:10 +00:00
assert_eq!(ty::WithOptConstParam::try_lookup(did, tcx), None);
tcx.arena.alloc(inner_optimized_mir(tcx, did))
2020-07-03 20:38:31 +02:00
}
2020-10-28 13:49:10 +00:00
fn inner_optimized_mir(tcx: TyCtxt<'_>, did: LocalDefId) -> Body<'_> {
if tcx.is_constructor(did.to_def_id()) {
2019-08-15 06:39:31 -04:00
// There's no reason to run all of the MIR passes on constructors when
// we can just output the MIR we want directly. This also saves const
// qualification and borrow checking the trouble of special casing
// constructors.
2020-10-28 13:49:10 +00:00
return shim::build_adt_ctor(tcx, did.to_def_id());
2019-08-15 06:39:31 -04:00
}
2020-10-28 13:49:10 +00:00
match tcx.hir().body_const_context(did) {
2020-12-26 15:10:06 +00:00
// Run the `mir_for_ctfe` query, which depends on `mir_drops_elaborated_and_const_checked`
// which we are going to steal below. Thus we need to run `mir_for_ctfe` first, so it
// computes and caches its result.
2020-10-28 13:49:10 +00:00
Some(hir::ConstContext::ConstFn) => tcx.ensure().mir_for_ctfe(did),
None => {}
Some(other) => panic!("do not use `optimized_mir` for constants: {:?}", other),
}
2020-10-28 13:49:10 +00:00
let mut body =
tcx.mir_drops_elaborated_and_const_checked(ty::WithOptConstParam::unknown(did)).steal();
run_optimization_passes(tcx, &mut body);
2020-03-19 11:40:38 +00:00
debug_assert!(!body.has_free_regions(tcx), "Free regions in optimized MIR");
2020-03-19 11:40:38 +00:00
2020-03-27 20:26:20 +01:00
body
}
2020-12-07 12:49:00 +00:00
/// Fetch all the promoteds of an item and prepare their MIR bodies to be ready for
/// constant evaluation once all substitutions become known.
2020-07-06 23:49:53 +02:00
fn promoted_mir<'tcx>(
tcx: TyCtxt<'tcx>,
2020-07-15 10:50:54 +02:00
def: ty::WithOptConstParam<LocalDefId>,
2020-07-06 23:49:53 +02:00
) -> &'tcx IndexVec<Promoted, Body<'tcx>> {
if tcx.is_constructor(def.did.to_def_id()) {
return tcx.arena.alloc(IndexVec::new());
}
2020-07-15 10:50:54 +02:00
if let Some(param_did) = def.const_param_did {
2020-07-08 01:03:19 +02:00
tcx.ensure().mir_borrowck_const_arg((def.did, param_did));
} else {
tcx.ensure().mir_borrowck(def.did);
}
let (_, promoted) = tcx.mir_promoted(def);
2019-08-04 16:20:21 -04:00
let mut promoted = promoted.steal();
for body in &mut promoted {
run_post_borrowck_cleanup_passes(tcx, body);
2019-08-04 16:20:21 -04:00
}
debug_assert!(!promoted.has_free_regions(tcx), "Free regions in promoted MIR");
2020-03-19 11:40:38 +00:00
2020-07-06 23:49:53 +02:00
tcx.arena.alloc(promoted)
}