Auto merge of #79049 - tmiasko:lower-intrinsics, r=jonas-schievink

Lower intrinsics calls: forget, size_of, unreachable, wrapping_*

This allows constant propagation to evaluate `size_of` and `wrapping_*`,
and unreachable propagation to propagate a call to `unreachable`.

The lowering is performed as a MIR optimization, rather than during MIR
building to preserve the special status of intrinsics with respect to
unsafety checks and promotion.

Currently enabled by default to determine the performance impact (no
significant impact expected). In practice only useful when combined with
inlining since intrinsics are rarely used directly (with exception of
`unreachable` and `discriminant_value` used by built-in derive macros).

Closes #32716.
This commit is contained in:
bors 2020-11-14 22:05:54 +00:00
commit 361c4ea224
10 changed files with 373 additions and 5 deletions

View file

@ -0,0 +1,108 @@
//! Lowers intrinsic calls
use crate::transform::MirPass;
use rustc_middle::mir::*;
use rustc_middle::ty::subst::SubstsRef;
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_span::symbol::{sym, Symbol};
use rustc_target::spec::abi::Abi;
pub struct LowerIntrinsics;
impl<'tcx> MirPass<'tcx> for LowerIntrinsics {
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
for block in body.basic_blocks_mut() {
let terminator = block.terminator.as_mut().unwrap();
if let TerminatorKind::Call {
func: Operand::Constant(box Constant { literal: ty::Const { ty: func_ty, .. }, .. }),
args,
destination,
..
} = &mut terminator.kind
{
let (intrinsic_name, substs) = match resolve_rust_intrinsic(tcx, func_ty) {
None => continue,
Some(it) => it,
};
match intrinsic_name {
sym::unreachable => {
terminator.kind = TerminatorKind::Unreachable;
}
sym::forget => {
if let Some((destination, target)) = *destination {
block.statements.push(Statement {
source_info: terminator.source_info,
kind: StatementKind::Assign(box (
destination,
Rvalue::Use(Operand::Constant(box Constant {
span: terminator.source_info.span,
user_ty: None,
literal: ty::Const::zero_sized(tcx, tcx.types.unit),
})),
)),
});
terminator.kind = TerminatorKind::Goto { target };
}
}
sym::wrapping_add | sym::wrapping_sub | sym::wrapping_mul => {
if let Some((destination, target)) = *destination {
let lhs;
let rhs;
{
let mut args = args.drain(..);
lhs = args.next().unwrap();
rhs = args.next().unwrap();
}
let bin_op = match intrinsic_name {
sym::wrapping_add => BinOp::Add,
sym::wrapping_sub => BinOp::Sub,
sym::wrapping_mul => BinOp::Mul,
_ => bug!("unexpected intrinsic"),
};
block.statements.push(Statement {
source_info: terminator.source_info,
kind: StatementKind::Assign(box (
destination,
Rvalue::BinaryOp(bin_op, lhs, rhs),
)),
});
terminator.kind = TerminatorKind::Goto { target };
}
}
sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
// The checked binary operations are not suitable target for lowering here,
// since their semantics depend on the value of overflow-checks flag used
// during codegen. Issue #35310.
}
sym::size_of => {
if let Some((destination, target)) = *destination {
let tp_ty = substs.type_at(0);
block.statements.push(Statement {
source_info: terminator.source_info,
kind: StatementKind::Assign(box (
destination,
Rvalue::NullaryOp(NullOp::SizeOf, tp_ty),
)),
});
terminator.kind = TerminatorKind::Goto { target };
}
}
_ => {}
}
}
}
}
}
fn resolve_rust_intrinsic(
tcx: TyCtxt<'tcx>,
func_ty: Ty<'tcx>,
) -> Option<(Symbol, SubstsRef<'tcx>)> {
if let ty::FnDef(def_id, substs) = *func_ty.kind() {
let fn_sig = func_ty.fn_sig(tcx);
if fn_sig.abi() == Abi::RustIntrinsic {
return Some((tcx.item_name(def_id), substs));
}
}
None
}

View file

@ -32,6 +32,7 @@ pub mod function_item_references;
pub mod generator;
pub mod inline;
pub mod instcombine;
pub mod lower_intrinsics;
pub mod match_branches;
pub mod multiple_return_terminators;
pub mod no_landing_pads;
@ -390,6 +391,7 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
// The main optimizations that we do on MIR.
let optimizations: &[&dyn MirPass<'tcx>] = &[
&lower_intrinsics::LowerIntrinsics,
&remove_unneeded_drops::RemoveUnneededDrops,
&match_branches::MatchBranchSimplification,
// inst combine is after MatchBranchSimplification to clean up Ne(_1, false)