1
Fork 0

Auto merge of #103138 - nnethercote:merge-BBs, r=bjorn3

Merge basic blocks where possible when generating LLVM IR.

r? `@ghost`
This commit is contained in:
bors 2022-11-17 01:56:24 +00:00
commit 251831ece9
9 changed files with 427 additions and 352 deletions

View file

@ -755,11 +755,11 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
OperandRef { val, layout: place.layout } OperandRef { val, layout: place.layout }
} }
fn write_operand_repeatedly(mut self, cg_elem: OperandRef<'tcx, RValue<'gcc>>, count: u64, dest: PlaceRef<'tcx, RValue<'gcc>>) -> Self { fn write_operand_repeatedly(&mut self, cg_elem: OperandRef<'tcx, RValue<'gcc>>, count: u64, dest: PlaceRef<'tcx, RValue<'gcc>>) {
let zero = self.const_usize(0); let zero = self.const_usize(0);
let count = self.const_usize(count); let count = self.const_usize(count);
let start = dest.project_index(&mut self, zero).llval; let start = dest.project_index(self, zero).llval;
let end = dest.project_index(&mut self, count).llval; let end = dest.project_index(self, count).llval;
let header_bb = self.append_sibling_block("repeat_loop_header"); let header_bb = self.append_sibling_block("repeat_loop_header");
let body_bb = self.append_sibling_block("repeat_loop_body"); let body_bb = self.append_sibling_block("repeat_loop_body");
@ -778,14 +778,13 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
self.switch_to_block(body_bb); self.switch_to_block(body_bb);
let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size); let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
cg_elem.val.store(&mut self, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align)); cg_elem.val.store(self, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align));
let next = self.inbounds_gep(self.backend_type(cg_elem.layout), current.to_rvalue(), &[self.const_usize(1)]); let next = self.inbounds_gep(self.backend_type(cg_elem.layout), current.to_rvalue(), &[self.const_usize(1)]);
self.llbb().add_assignment(None, current, next); self.llbb().add_assignment(None, current, next);
self.br(header_bb); self.br(header_bb);
self.switch_to_block(next_bb); self.switch_to_block(next_bb);
self
} }
fn range_metadata(&mut self, _load: RValue<'gcc>, _range: WrappingRange) { fn range_metadata(&mut self, _load: RValue<'gcc>, _range: WrappingRange) {

View file

@ -556,15 +556,15 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
} }
fn write_operand_repeatedly( fn write_operand_repeatedly(
mut self, &mut self,
cg_elem: OperandRef<'tcx, &'ll Value>, cg_elem: OperandRef<'tcx, &'ll Value>,
count: u64, count: u64,
dest: PlaceRef<'tcx, &'ll Value>, dest: PlaceRef<'tcx, &'ll Value>,
) -> Self { ) {
let zero = self.const_usize(0); let zero = self.const_usize(0);
let count = self.const_usize(count); let count = self.const_usize(count);
let start = dest.project_index(&mut self, zero).llval; let start = dest.project_index(self, zero).llval;
let end = dest.project_index(&mut self, count).llval; let end = dest.project_index(self, count).llval;
let header_bb = self.append_sibling_block("repeat_loop_header"); let header_bb = self.append_sibling_block("repeat_loop_header");
let body_bb = self.append_sibling_block("repeat_loop_body"); let body_bb = self.append_sibling_block("repeat_loop_body");
@ -592,7 +592,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
body_bx.br(header_bb); body_bx.br(header_bb);
header_bx.add_incoming_to_phi(current, next, body_bb); header_bx.add_incoming_to_phi(current, next, body_bb);
Self::build(self.cx, next_bb) *self = Self::build(self.cx, next_bb);
} }
fn range_metadata(&mut self, load: &'ll Value, range: WrappingRange) { fn range_metadata(&mut self, load: &'ll Value, range: WrappingRange) {

View file

@ -1,12 +1,13 @@
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![feature(box_patterns)]
#![feature(try_blocks)]
#![feature(once_cell)]
#![feature(associated_type_bounds)] #![feature(associated_type_bounds)]
#![feature(strict_provenance)] #![feature(box_patterns)]
#![feature(int_roundings)]
#![feature(if_let_guard)] #![feature(if_let_guard)]
#![feature(int_roundings)]
#![feature(let_chains)]
#![feature(never_type)] #![feature(never_type)]
#![feature(once_cell)]
#![feature(strict_provenance)]
#![feature(try_blocks)]
#![recursion_limit = "256"] #![recursion_limit = "256"]
#![allow(rustc::potential_query_instability)] #![allow(rustc::potential_query_instability)]

View file

@ -1,7 +1,7 @@
use super::operand::OperandRef; use super::operand::OperandRef;
use super::operand::OperandValue::{Immediate, Pair, Ref}; use super::operand::OperandValue::{Immediate, Pair, Ref};
use super::place::PlaceRef; use super::place::PlaceRef;
use super::{FunctionCx, LocalRef}; use super::{CachedLlbb, FunctionCx, LocalRef};
use crate::base; use crate::base;
use crate::common::{self, IntPredicate}; use crate::common::{self, IntPredicate};
@ -25,6 +25,15 @@ use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode, Reg};
use rustc_target::abi::{self, HasDataLayout, WrappingRange}; use rustc_target::abi::{self, HasDataLayout, WrappingRange};
use rustc_target::spec::abi::Abi; use rustc_target::spec::abi::Abi;
// Indicates if we are in the middle of merging a BB's successor into it. This
// can happen when BB jumps directly to its successor and the successor has no
// other predecessors.
#[derive(Debug, PartialEq)]
enum MergingSucc {
False,
True,
}
/// Used by `FunctionCx::codegen_terminator` for emitting common patterns /// Used by `FunctionCx::codegen_terminator` for emitting common patterns
/// e.g., creating a basic block, calling a function, etc. /// e.g., creating a basic block, calling a function, etc.
struct TerminatorCodegenHelper<'tcx> { struct TerminatorCodegenHelper<'tcx> {
@ -64,31 +73,6 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
} }
} }
/// Get a basic block (creating it if necessary), possibly with a landing
/// pad next to it.
fn llbb_with_landing_pad<Bx: BuilderMethods<'a, 'tcx>>(
&self,
fx: &mut FunctionCx<'a, 'tcx, Bx>,
target: mir::BasicBlock,
) -> (Bx::BasicBlock, bool) {
let span = self.terminator.source_info.span;
let lltarget = fx.llbb(target);
let target_funclet = fx.cleanup_kinds[target].funclet_bb(target);
match (self.funclet_bb, target_funclet) {
(None, None) => (lltarget, false),
// jump *into* cleanup - need a landing pad if GNU, cleanup pad if MSVC
(None, Some(_)) => (fx.landing_pad_for(target), false),
(Some(_), None) => span_bug!(span, "{:?} - jump out of cleanup?", self.terminator),
(Some(f), Some(t_f)) => {
if f == t_f || !base::wants_msvc_seh(fx.cx.tcx().sess) {
(lltarget, false)
} else {
(fx.landing_pad_for(target), true)
}
}
}
}
/// Get a basic block (creating it if necessary), possibly with cleanup /// Get a basic block (creating it if necessary), possibly with cleanup
/// stuff in it or next to it. /// stuff in it or next to it.
fn llbb_with_cleanup<Bx: BuilderMethods<'a, 'tcx>>( fn llbb_with_cleanup<Bx: BuilderMethods<'a, 'tcx>>(
@ -96,7 +80,11 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
fx: &mut FunctionCx<'a, 'tcx, Bx>, fx: &mut FunctionCx<'a, 'tcx, Bx>,
target: mir::BasicBlock, target: mir::BasicBlock,
) -> Bx::BasicBlock { ) -> Bx::BasicBlock {
let (lltarget, is_cleanupret) = self.llbb_with_landing_pad(fx, target); let (needs_landing_pad, is_cleanupret) = self.llbb_characteristics(fx, target);
let mut lltarget = fx.llbb(target);
if needs_landing_pad {
lltarget = fx.landing_pad_for(target);
}
if is_cleanupret { if is_cleanupret {
// MSVC cross-funclet jump - need a trampoline // MSVC cross-funclet jump - need a trampoline
debug_assert!(base::wants_msvc_seh(fx.cx.tcx().sess)); debug_assert!(base::wants_msvc_seh(fx.cx.tcx().sess));
@ -111,21 +99,55 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
} }
} }
fn llbb_characteristics<Bx: BuilderMethods<'a, 'tcx>>(
&self,
fx: &mut FunctionCx<'a, 'tcx, Bx>,
target: mir::BasicBlock,
) -> (bool, bool) {
let target_funclet = fx.cleanup_kinds[target].funclet_bb(target);
let (needs_landing_pad, is_cleanupret) = match (self.funclet_bb, target_funclet) {
(None, None) => (false, false),
(None, Some(_)) => (true, false),
(Some(_), None) => {
let span = self.terminator.source_info.span;
span_bug!(span, "{:?} - jump out of cleanup?", self.terminator);
}
(Some(f), Some(t_f)) => {
if f == t_f || !base::wants_msvc_seh(fx.cx.tcx().sess) {
(false, false)
} else {
(true, true)
}
}
};
(needs_landing_pad, is_cleanupret)
}
fn funclet_br<Bx: BuilderMethods<'a, 'tcx>>( fn funclet_br<Bx: BuilderMethods<'a, 'tcx>>(
&self, &self,
fx: &mut FunctionCx<'a, 'tcx, Bx>, fx: &mut FunctionCx<'a, 'tcx, Bx>,
bx: &mut Bx, bx: &mut Bx,
target: mir::BasicBlock, target: mir::BasicBlock,
) { mergeable_succ: bool,
let (lltarget, is_cleanupret) = self.llbb_with_landing_pad(fx, target); ) -> MergingSucc {
let (needs_landing_pad, is_cleanupret) = self.llbb_characteristics(fx, target);
if mergeable_succ && !needs_landing_pad && !is_cleanupret {
// We can merge the successor into this bb, so no need for a `br`.
MergingSucc::True
} else {
let mut lltarget = fx.llbb(target);
if needs_landing_pad {
lltarget = fx.landing_pad_for(target);
}
if is_cleanupret { if is_cleanupret {
// MSVC micro-optimization: generate a `ret` rather than a jump // micro-optimization: generate a `ret` rather than a jump
// to a trampoline. // to a trampoline.
debug_assert!(base::wants_msvc_seh(fx.cx.tcx().sess));
bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget)); bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget));
} else { } else {
bx.br(lltarget); bx.br(lltarget);
} }
MergingSucc::False
}
} }
/// Call `fn_ptr` of `fn_abi` with the arguments `llargs`, the optional /// Call `fn_ptr` of `fn_abi` with the arguments `llargs`, the optional
@ -140,7 +162,8 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
destination: Option<(ReturnDest<'tcx, Bx::Value>, mir::BasicBlock)>, destination: Option<(ReturnDest<'tcx, Bx::Value>, mir::BasicBlock)>,
cleanup: Option<mir::BasicBlock>, cleanup: Option<mir::BasicBlock>,
copied_constant_arguments: &[PlaceRef<'tcx, <Bx as BackendTypes>::Value>], copied_constant_arguments: &[PlaceRef<'tcx, <Bx as BackendTypes>::Value>],
) { mergeable_succ: bool,
) -> MergingSucc {
// If there is a cleanup block and the function we're calling can unwind, then // If there is a cleanup block and the function we're calling can unwind, then
// do an invoke, otherwise do a call. // do an invoke, otherwise do a call.
let fn_ty = bx.fn_decl_backend_type(&fn_abi); let fn_ty = bx.fn_decl_backend_type(&fn_abi);
@ -191,6 +214,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
} }
fx.store_return(bx, ret_dest, &fn_abi.ret, invokeret); fx.store_return(bx, ret_dest, &fn_abi.ret, invokeret);
} }
MergingSucc::False
} else { } else {
let llret = bx.call(fn_ty, Some(&fn_abi), fn_ptr, &llargs, self.funclet(fx)); let llret = bx.call(fn_ty, Some(&fn_abi), fn_ptr, &llargs, self.funclet(fx));
if fx.mir[self.bb].is_cleanup { if fx.mir[self.bb].is_cleanup {
@ -206,9 +230,10 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
bx.lifetime_end(tmp.llval, tmp.layout.size); bx.lifetime_end(tmp.llval, tmp.layout.size);
} }
fx.store_return(bx, ret_dest, &fn_abi.ret, llret); fx.store_return(bx, ret_dest, &fn_abi.ret, llret);
self.funclet_br(fx, bx, target); self.funclet_br(fx, bx, target, mergeable_succ)
} else { } else {
bx.unreachable(); bx.unreachable();
MergingSucc::False
} }
} }
} }
@ -225,7 +250,8 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
destination: Option<mir::BasicBlock>, destination: Option<mir::BasicBlock>,
cleanup: Option<mir::BasicBlock>, cleanup: Option<mir::BasicBlock>,
instance: Instance<'_>, instance: Instance<'_>,
) { mergeable_succ: bool,
) -> MergingSucc {
if let Some(cleanup) = cleanup { if let Some(cleanup) = cleanup {
let ret_llbb = if let Some(target) = destination { let ret_llbb = if let Some(target) = destination {
fx.llbb(target) fx.llbb(target)
@ -241,13 +267,15 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
instance, instance,
Some((ret_llbb, self.llbb_with_cleanup(fx, cleanup), self.funclet(fx))), Some((ret_llbb, self.llbb_with_cleanup(fx, cleanup), self.funclet(fx))),
); );
MergingSucc::False
} else { } else {
bx.codegen_inline_asm(template, &operands, options, line_spans, instance, None); bx.codegen_inline_asm(template, &operands, options, line_spans, instance, None);
if let Some(target) = destination { if let Some(target) = destination {
self.funclet_br(fx, bx, target); self.funclet_br(fx, bx, target, mergeable_succ)
} else { } else {
bx.unreachable(); bx.unreachable();
MergingSucc::False
} }
} }
} }
@ -256,16 +284,16 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
/// Codegen implementations for some terminator variants. /// Codegen implementations for some terminator variants.
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
/// Generates code for a `Resume` terminator. /// Generates code for a `Resume` terminator.
fn codegen_resume_terminator(&mut self, helper: TerminatorCodegenHelper<'tcx>, mut bx: Bx) { fn codegen_resume_terminator(&mut self, helper: TerminatorCodegenHelper<'tcx>, bx: &mut Bx) {
if let Some(funclet) = helper.funclet(self) { if let Some(funclet) = helper.funclet(self) {
bx.cleanup_ret(funclet, None); bx.cleanup_ret(funclet, None);
} else { } else {
let slot = self.get_personality_slot(&mut bx); let slot = self.get_personality_slot(bx);
let lp0 = slot.project_field(&mut bx, 0); let lp0 = slot.project_field(bx, 0);
let lp0 = bx.load_operand(lp0).immediate(); let lp0 = bx.load_operand(lp0).immediate();
let lp1 = slot.project_field(&mut bx, 1); let lp1 = slot.project_field(bx, 1);
let lp1 = bx.load_operand(lp1).immediate(); let lp1 = bx.load_operand(lp1).immediate();
slot.storage_dead(&mut bx); slot.storage_dead(bx);
let mut lp = bx.const_undef(self.landing_pad_type()); let mut lp = bx.const_undef(self.landing_pad_type());
lp = bx.insert_value(lp, lp0, 0); lp = bx.insert_value(lp, lp0, 0);
@ -277,12 +305,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
fn codegen_switchint_terminator( fn codegen_switchint_terminator(
&mut self, &mut self,
helper: TerminatorCodegenHelper<'tcx>, helper: TerminatorCodegenHelper<'tcx>,
mut bx: Bx, bx: &mut Bx,
discr: &mir::Operand<'tcx>, discr: &mir::Operand<'tcx>,
switch_ty: Ty<'tcx>, switch_ty: Ty<'tcx>,
targets: &SwitchTargets, targets: &SwitchTargets,
) { ) {
let discr = self.codegen_operand(&mut bx, &discr); let discr = self.codegen_operand(bx, &discr);
// `switch_ty` is redundant, sanity-check that. // `switch_ty` is redundant, sanity-check that.
assert_eq!(discr.layout.ty, switch_ty); assert_eq!(discr.layout.ty, switch_ty);
let mut target_iter = targets.iter(); let mut target_iter = targets.iter();
@ -338,7 +366,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
} }
} }
fn codegen_return_terminator(&mut self, mut bx: Bx) { fn codegen_return_terminator(&mut self, bx: &mut Bx) {
// Call `va_end` if this is the definition of a C-variadic function. // Call `va_end` if this is the definition of a C-variadic function.
if self.fn_abi.c_variadic { if self.fn_abi.c_variadic {
// The `VaList` "spoofed" argument is just after all the real arguments. // The `VaList` "spoofed" argument is just after all the real arguments.
@ -368,11 +396,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
} }
PassMode::Direct(_) | PassMode::Pair(..) => { PassMode::Direct(_) | PassMode::Pair(..) => {
let op = self.codegen_consume(&mut bx, mir::Place::return_place().as_ref()); let op = self.codegen_consume(bx, mir::Place::return_place().as_ref());
if let Ref(llval, _, align) = op.val { if let Ref(llval, _, align) = op.val {
bx.load(bx.backend_type(op.layout), llval, align) bx.load(bx.backend_type(op.layout), llval, align)
} else { } else {
op.immediate_or_packed_pair(&mut bx) op.immediate_or_packed_pair(bx)
} }
} }
@ -388,8 +416,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}; };
let llslot = match op.val { let llslot = match op.val {
Immediate(_) | Pair(..) => { Immediate(_) | Pair(..) => {
let scratch = PlaceRef::alloca(&mut bx, self.fn_abi.ret.layout); let scratch = PlaceRef::alloca(bx, self.fn_abi.ret.layout);
op.val.store(&mut bx, scratch); op.val.store(bx, scratch);
scratch.llval scratch.llval
} }
Ref(llval, _, align) => { Ref(llval, _, align) => {
@ -409,22 +437,22 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
fn codegen_drop_terminator( fn codegen_drop_terminator(
&mut self, &mut self,
helper: TerminatorCodegenHelper<'tcx>, helper: TerminatorCodegenHelper<'tcx>,
mut bx: Bx, bx: &mut Bx,
location: mir::Place<'tcx>, location: mir::Place<'tcx>,
target: mir::BasicBlock, target: mir::BasicBlock,
unwind: Option<mir::BasicBlock>, unwind: Option<mir::BasicBlock>,
) { mergeable_succ: bool,
) -> MergingSucc {
let ty = location.ty(self.mir, bx.tcx()).ty; let ty = location.ty(self.mir, bx.tcx()).ty;
let ty = self.monomorphize(ty); let ty = self.monomorphize(ty);
let drop_fn = Instance::resolve_drop_in_place(bx.tcx(), ty); let drop_fn = Instance::resolve_drop_in_place(bx.tcx(), ty);
if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def { if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
// we don't actually need to drop anything. // we don't actually need to drop anything.
helper.funclet_br(self, &mut bx, target); return helper.funclet_br(self, bx, target, mergeable_succ);
return;
} }
let place = self.codegen_place(&mut bx, location.as_ref()); let place = self.codegen_place(bx, location.as_ref());
let (args1, args2); let (args1, args2);
let mut args = if let Some(llextra) = place.llextra { let mut args = if let Some(llextra) = place.llextra {
args2 = [place.llval, llextra]; args2 = [place.llval, llextra];
@ -462,7 +490,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
args = &args[..1]; args = &args[..1];
( (
meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE) meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE)
.get_fn(&mut bx, vtable, ty, &fn_abi), .get_fn(bx, vtable, ty, &fn_abi),
fn_abi, fn_abi,
) )
} }
@ -507,7 +535,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
debug!("args' = {:?}", args); debug!("args' = {:?}", args);
( (
meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE) meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE)
.get_fn(&mut bx, vtable, ty, &fn_abi), .get_fn(bx, vtable, ty, &fn_abi),
fn_abi, fn_abi,
) )
} }
@ -515,29 +543,31 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}; };
helper.do_call( helper.do_call(
self, self,
&mut bx, bx,
fn_abi, fn_abi,
drop_fn, drop_fn,
args, args,
Some((ReturnDest::Nothing, target)), Some((ReturnDest::Nothing, target)),
unwind, unwind,
&[], &[],
); mergeable_succ,
)
} }
fn codegen_assert_terminator( fn codegen_assert_terminator(
&mut self, &mut self,
helper: TerminatorCodegenHelper<'tcx>, helper: TerminatorCodegenHelper<'tcx>,
mut bx: Bx, bx: &mut Bx,
terminator: &mir::Terminator<'tcx>, terminator: &mir::Terminator<'tcx>,
cond: &mir::Operand<'tcx>, cond: &mir::Operand<'tcx>,
expected: bool, expected: bool,
msg: &mir::AssertMessage<'tcx>, msg: &mir::AssertMessage<'tcx>,
target: mir::BasicBlock, target: mir::BasicBlock,
cleanup: Option<mir::BasicBlock>, cleanup: Option<mir::BasicBlock>,
) { mergeable_succ: bool,
) -> MergingSucc {
let span = terminator.source_info.span; let span = terminator.source_info.span;
let cond = self.codegen_operand(&mut bx, cond).immediate(); let cond = self.codegen_operand(bx, cond).immediate();
let mut const_cond = bx.const_to_opt_u128(cond, false).map(|c| c == 1); let mut const_cond = bx.const_to_opt_u128(cond, false).map(|c| c == 1);
// This case can currently arise only from functions marked // This case can currently arise only from functions marked
@ -555,8 +585,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// Don't codegen the panic block if success if known. // Don't codegen the panic block if success if known.
if const_cond == Some(expected) { if const_cond == Some(expected) {
helper.funclet_br(self, &mut bx, target); return helper.funclet_br(self, bx, target, mergeable_succ);
return;
} }
// Pass the condition through llvm.expect for branch hinting. // Pass the condition through llvm.expect for branch hinting.
@ -573,16 +602,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// After this point, bx is the block for the call to panic. // After this point, bx is the block for the call to panic.
bx.switch_to_block(panic_block); bx.switch_to_block(panic_block);
self.set_debug_loc(&mut bx, terminator.source_info); self.set_debug_loc(bx, terminator.source_info);
// Get the location information. // Get the location information.
let location = self.get_caller_location(&mut bx, terminator.source_info).immediate(); let location = self.get_caller_location(bx, terminator.source_info).immediate();
// Put together the arguments to the panic entry point. // Put together the arguments to the panic entry point.
let (lang_item, args) = match msg { let (lang_item, args) = match msg {
AssertKind::BoundsCheck { ref len, ref index } => { AssertKind::BoundsCheck { ref len, ref index } => {
let len = self.codegen_operand(&mut bx, len).immediate(); let len = self.codegen_operand(bx, len).immediate();
let index = self.codegen_operand(&mut bx, index).immediate(); let index = self.codegen_operand(bx, index).immediate();
// It's `fn panic_bounds_check(index: usize, len: usize)`, // It's `fn panic_bounds_check(index: usize, len: usize)`,
// and `#[track_caller]` adds an implicit third argument. // and `#[track_caller]` adds an implicit third argument.
(LangItem::PanicBoundsCheck, vec![index, len, location]) (LangItem::PanicBoundsCheck, vec![index, len, location])
@ -595,29 +624,32 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
} }
}; };
let (fn_abi, llfn) = common::build_langcall(&bx, Some(span), lang_item); let (fn_abi, llfn) = common::build_langcall(bx, Some(span), lang_item);
// Codegen the actual panic invoke/call. // Codegen the actual panic invoke/call.
helper.do_call(self, &mut bx, fn_abi, llfn, &args, None, cleanup, &[]); let merging_succ = helper.do_call(self, bx, fn_abi, llfn, &args, None, cleanup, &[], false);
assert_eq!(merging_succ, MergingSucc::False);
MergingSucc::False
} }
fn codegen_abort_terminator( fn codegen_abort_terminator(
&mut self, &mut self,
helper: TerminatorCodegenHelper<'tcx>, helper: TerminatorCodegenHelper<'tcx>,
mut bx: Bx, bx: &mut Bx,
terminator: &mir::Terminator<'tcx>, terminator: &mir::Terminator<'tcx>,
) { ) {
let span = terminator.source_info.span; let span = terminator.source_info.span;
self.set_debug_loc(&mut bx, terminator.source_info); self.set_debug_loc(bx, terminator.source_info);
// Obtain the panic entry point. // Obtain the panic entry point.
let (fn_abi, llfn) = common::build_langcall(&bx, Some(span), LangItem::PanicNoUnwind); let (fn_abi, llfn) = common::build_langcall(bx, Some(span), LangItem::PanicNoUnwind);
// Codegen the actual panic invoke/call. // Codegen the actual panic invoke/call.
helper.do_call(self, &mut bx, fn_abi, llfn, &[], None, None, &[]); let merging_succ = helper.do_call(self, bx, fn_abi, llfn, &[], None, None, &[], false);
assert_eq!(merging_succ, MergingSucc::False);
} }
/// Returns `true` if this is indeed a panic intrinsic and codegen is done. /// Returns `Some` if this is indeed a panic intrinsic and codegen is done.
fn codegen_panic_intrinsic( fn codegen_panic_intrinsic(
&mut self, &mut self,
helper: &TerminatorCodegenHelper<'tcx>, helper: &TerminatorCodegenHelper<'tcx>,
@ -627,7 +659,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
source_info: mir::SourceInfo, source_info: mir::SourceInfo,
target: Option<mir::BasicBlock>, target: Option<mir::BasicBlock>,
cleanup: Option<mir::BasicBlock>, cleanup: Option<mir::BasicBlock>,
) -> bool { mergeable_succ: bool,
) -> Option<MergingSucc> {
// Emit a panic or a no-op for `assert_*` intrinsics. // Emit a panic or a no-op for `assert_*` intrinsics.
// These are intrinsics that compile to panics so that we can get a message // These are intrinsics that compile to panics so that we can get a message
// which mentions the offending type, even from a const context. // which mentions the offending type, even from a const context.
@ -653,7 +686,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
ZeroValid => !bx.tcx().permits_zero_init(layout), ZeroValid => !bx.tcx().permits_zero_init(layout),
UninitValid => !bx.tcx().permits_uninit_init(layout), UninitValid => !bx.tcx().permits_uninit_init(layout),
}; };
if do_panic { Some(if do_panic {
let msg_str = with_no_visible_paths!({ let msg_str = with_no_visible_paths!({
with_no_trimmed_paths!({ with_no_trimmed_paths!({
if layout.abi.is_uninhabited() { if layout.abi.is_uninhabited() {
@ -686,22 +719,22 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
target.as_ref().map(|bb| (ReturnDest::Nothing, *bb)), target.as_ref().map(|bb| (ReturnDest::Nothing, *bb)),
cleanup, cleanup,
&[], &[],
); mergeable_succ,
)
} else { } else {
// a NOP // a NOP
let target = target.unwrap(); let target = target.unwrap();
helper.funclet_br(self, bx, target) helper.funclet_br(self, bx, target, mergeable_succ)
} })
true
} else { } else {
false None
} }
} }
fn codegen_call_terminator( fn codegen_call_terminator(
&mut self, &mut self,
helper: TerminatorCodegenHelper<'tcx>, helper: TerminatorCodegenHelper<'tcx>,
mut bx: Bx, bx: &mut Bx,
terminator: &mir::Terminator<'tcx>, terminator: &mir::Terminator<'tcx>,
func: &mir::Operand<'tcx>, func: &mir::Operand<'tcx>,
args: &[mir::Operand<'tcx>], args: &[mir::Operand<'tcx>],
@ -709,12 +742,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
target: Option<mir::BasicBlock>, target: Option<mir::BasicBlock>,
cleanup: Option<mir::BasicBlock>, cleanup: Option<mir::BasicBlock>,
fn_span: Span, fn_span: Span,
) { mergeable_succ: bool,
) -> MergingSucc {
let source_info = terminator.source_info; let source_info = terminator.source_info;
let span = source_info.span; let span = source_info.span;
// Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar. // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
let callee = self.codegen_operand(&mut bx, func); let callee = self.codegen_operand(bx, func);
let (instance, mut llfn) = match *callee.layout.ty.kind() { let (instance, mut llfn) = match *callee.layout.ty.kind() {
ty::FnDef(def_id, substs) => ( ty::FnDef(def_id, substs) => (
@ -734,8 +768,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if let Some(ty::InstanceDef::DropGlue(_, None)) = def { if let Some(ty::InstanceDef::DropGlue(_, None)) = def {
// Empty drop glue; a no-op. // Empty drop glue; a no-op.
let target = target.unwrap(); let target = target.unwrap();
helper.funclet_br(self, &mut bx, target); return helper.funclet_br(self, bx, target, mergeable_succ);
return;
} }
// FIXME(eddyb) avoid computing this if possible, when `instance` is // FIXME(eddyb) avoid computing this if possible, when `instance` is
@ -762,9 +795,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}; };
if intrinsic == Some(sym::transmute) { if intrinsic == Some(sym::transmute) {
if let Some(target) = target { return if let Some(target) = target {
self.codegen_transmute(&mut bx, &args[0], destination); self.codegen_transmute(bx, &args[0], destination);
helper.funclet_br(self, &mut bx, target); helper.funclet_br(self, bx, target, mergeable_succ)
} else { } else {
// If we are trying to transmute to an uninhabited type, // If we are trying to transmute to an uninhabited type,
// it is likely there is no allotted destination. In fact, // it is likely there is no allotted destination. In fact,
@ -774,20 +807,21 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// it must be unreachable. // it must be unreachable.
assert_eq!(fn_abi.ret.layout.abi, abi::Abi::Uninhabited); assert_eq!(fn_abi.ret.layout.abi, abi::Abi::Uninhabited);
bx.unreachable(); bx.unreachable();
} MergingSucc::False
return; };
} }
if self.codegen_panic_intrinsic( if let Some(merging_succ) = self.codegen_panic_intrinsic(
&helper, &helper,
&mut bx, bx,
intrinsic, intrinsic,
instance, instance,
source_info, source_info,
target, target,
cleanup, cleanup,
mergeable_succ,
) { ) {
return; return merging_succ;
} }
// The arguments we'll be passing. Plus one to account for outptr, if used. // The arguments we'll be passing. Plus one to account for outptr, if used.
@ -797,23 +831,24 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// Prepare the return value destination // Prepare the return value destination
let ret_dest = if target.is_some() { let ret_dest = if target.is_some() {
let is_intrinsic = intrinsic.is_some(); let is_intrinsic = intrinsic.is_some();
self.make_return_dest(&mut bx, destination, &fn_abi.ret, &mut llargs, is_intrinsic) self.make_return_dest(bx, destination, &fn_abi.ret, &mut llargs, is_intrinsic)
} else { } else {
ReturnDest::Nothing ReturnDest::Nothing
}; };
if intrinsic == Some(sym::caller_location) { if intrinsic == Some(sym::caller_location) {
if let Some(target) = target { return if let Some(target) = target {
let location = self let location =
.get_caller_location(&mut bx, mir::SourceInfo { span: fn_span, ..source_info }); self.get_caller_location(bx, mir::SourceInfo { span: fn_span, ..source_info });
if let ReturnDest::IndirectOperand(tmp, _) = ret_dest { if let ReturnDest::IndirectOperand(tmp, _) = ret_dest {
location.val.store(&mut bx, tmp); location.val.store(bx, tmp);
} }
self.store_return(&mut bx, ret_dest, &fn_abi.ret, location.immediate()); self.store_return(bx, ret_dest, &fn_abi.ret, location.immediate());
helper.funclet_br(self, &mut bx, target); helper.funclet_br(self, bx, target, mergeable_succ)
} } else {
return; MergingSucc::False
};
} }
match intrinsic { match intrinsic {
@ -857,12 +892,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
} }
} }
self.codegen_operand(&mut bx, arg) self.codegen_operand(bx, arg)
}) })
.collect(); .collect();
Self::codegen_intrinsic_call( Self::codegen_intrinsic_call(
&mut bx, bx,
*instance.as_ref().unwrap(), *instance.as_ref().unwrap(),
&fn_abi, &fn_abi,
&args, &args,
@ -871,16 +906,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
); );
if let ReturnDest::IndirectOperand(dst, _) = ret_dest { if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
self.store_return(&mut bx, ret_dest, &fn_abi.ret, dst.llval); self.store_return(bx, ret_dest, &fn_abi.ret, dst.llval);
} }
if let Some(target) = target { return if let Some(target) = target {
helper.funclet_br(self, &mut bx, target); helper.funclet_br(self, bx, target, mergeable_succ)
} else { } else {
bx.unreachable(); bx.unreachable();
} MergingSucc::False
};
return;
} }
} }
@ -894,7 +928,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let mut copied_constant_arguments = vec![]; let mut copied_constant_arguments = vec![];
'make_args: for (i, arg) in first_args.iter().enumerate() { 'make_args: for (i, arg) in first_args.iter().enumerate() {
let mut op = self.codegen_operand(&mut bx, arg); let mut op = self.codegen_operand(bx, arg);
if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) { if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) {
match op.val { match op.val {
@ -909,7 +943,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
&& !op.layout.ty.is_region_ptr() && !op.layout.ty.is_region_ptr()
{ {
for i in 0..op.layout.fields.count() { for i in 0..op.layout.fields.count() {
let field = op.extract_field(&mut bx, i); let field = op.extract_field(bx, i);
if !field.layout.is_zst() { if !field.layout.is_zst() {
// we found the one non-zero-sized field that is allowed // we found the one non-zero-sized field that is allowed
// now find *its* non-zero-sized field, or stop if it's a // now find *its* non-zero-sized field, or stop if it's a
@ -926,7 +960,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// data pointer and vtable. Look up the method in the vtable, and pass // data pointer and vtable. Look up the method in the vtable, and pass
// the data pointer as the first argument // the data pointer as the first argument
llfn = Some(meth::VirtualIndex::from_index(idx).get_fn( llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
&mut bx, bx,
meta, meta,
op.layout.ty, op.layout.ty,
&fn_abi, &fn_abi,
@ -937,7 +971,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
Ref(data_ptr, Some(meta), _) => { Ref(data_ptr, Some(meta), _) => {
// by-value dynamic dispatch // by-value dynamic dispatch
llfn = Some(meth::VirtualIndex::from_index(idx).get_fn( llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
&mut bx, bx,
meta, meta,
op.layout.ty, op.layout.ty,
&fn_abi, &fn_abi,
@ -954,11 +988,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
} }
// FIXME(dyn-star): Make sure this is done on a &dyn* receiver // FIXME(dyn-star): Make sure this is done on a &dyn* receiver
let place = op.deref(bx.cx()); let place = op.deref(bx.cx());
let data_ptr = place.project_field(&mut bx, 0); let data_ptr = place.project_field(bx, 0);
let meta_ptr = place.project_field(&mut bx, 1); let meta_ptr = place.project_field(bx, 1);
let meta = bx.load_operand(meta_ptr); let meta = bx.load_operand(meta_ptr);
llfn = Some(meth::VirtualIndex::from_index(idx).get_fn( llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
&mut bx, bx,
meta.immediate(), meta.immediate(),
op.layout.ty, op.layout.ty,
&fn_abi, &fn_abi,
@ -977,24 +1011,19 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
match (arg, op.val) { match (arg, op.val) {
(&mir::Operand::Copy(_), Ref(_, None, _)) (&mir::Operand::Copy(_), Ref(_, None, _))
| (&mir::Operand::Constant(_), Ref(_, None, _)) => { | (&mir::Operand::Constant(_), Ref(_, None, _)) => {
let tmp = PlaceRef::alloca(&mut bx, op.layout); let tmp = PlaceRef::alloca(bx, op.layout);
bx.lifetime_start(tmp.llval, tmp.layout.size); bx.lifetime_start(tmp.llval, tmp.layout.size);
op.val.store(&mut bx, tmp); op.val.store(bx, tmp);
op.val = Ref(tmp.llval, None, tmp.align); op.val = Ref(tmp.llval, None, tmp.align);
copied_constant_arguments.push(tmp); copied_constant_arguments.push(tmp);
} }
_ => {} _ => {}
} }
self.codegen_argument(&mut bx, op, &mut llargs, &fn_abi.args[i]); self.codegen_argument(bx, op, &mut llargs, &fn_abi.args[i]);
} }
let num_untupled = untuple.map(|tup| { let num_untupled = untuple.map(|tup| {
self.codegen_arguments_untupled( self.codegen_arguments_untupled(bx, tup, &mut llargs, &fn_abi.args[first_args.len()..])
&mut bx,
tup,
&mut llargs,
&fn_abi.args[first_args.len()..],
)
}); });
let needs_location = let needs_location =
@ -1014,14 +1043,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
fn_abi, fn_abi,
); );
let location = let location =
self.get_caller_location(&mut bx, mir::SourceInfo { span: fn_span, ..source_info }); self.get_caller_location(bx, mir::SourceInfo { span: fn_span, ..source_info });
debug!( debug!(
"codegen_call_terminator({:?}): location={:?} (fn_span {:?})", "codegen_call_terminator({:?}): location={:?} (fn_span {:?})",
terminator, location, fn_span terminator, location, fn_span
); );
let last_arg = fn_abi.args.last().unwrap(); let last_arg = fn_abi.args.last().unwrap();
self.codegen_argument(&mut bx, location, &mut llargs, last_arg); self.codegen_argument(bx, location, &mut llargs, last_arg);
} }
let (is_indirect_call, fn_ptr) = match (llfn, instance) { let (is_indirect_call, fn_ptr) = match (llfn, instance) {
@ -1046,40 +1075,43 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx.cond_br(cond, bb_pass, bb_fail); bx.cond_br(cond, bb_pass, bb_fail);
bx.switch_to_block(bb_pass); bx.switch_to_block(bb_pass);
helper.do_call( let merging_succ = helper.do_call(
self, self,
&mut bx, bx,
fn_abi, fn_abi,
fn_ptr, fn_ptr,
&llargs, &llargs,
target.as_ref().map(|&target| (ret_dest, target)), target.as_ref().map(|&target| (ret_dest, target)),
cleanup, cleanup,
&copied_constant_arguments, &copied_constant_arguments,
false,
); );
assert_eq!(merging_succ, MergingSucc::False);
bx.switch_to_block(bb_fail); bx.switch_to_block(bb_fail);
bx.abort(); bx.abort();
bx.unreachable(); bx.unreachable();
return; return MergingSucc::False;
} }
helper.do_call( helper.do_call(
self, self,
&mut bx, bx,
fn_abi, fn_abi,
fn_ptr, fn_ptr,
&llargs, &llargs,
target.as_ref().map(|&target| (ret_dest, target)), target.as_ref().map(|&target| (ret_dest, target)),
cleanup, cleanup,
&copied_constant_arguments, &copied_constant_arguments,
); mergeable_succ,
)
} }
fn codegen_asm_terminator( fn codegen_asm_terminator(
&mut self, &mut self,
helper: TerminatorCodegenHelper<'tcx>, helper: TerminatorCodegenHelper<'tcx>,
mut bx: Bx, bx: &mut Bx,
terminator: &mir::Terminator<'tcx>, terminator: &mir::Terminator<'tcx>,
template: &[ast::InlineAsmTemplatePiece], template: &[ast::InlineAsmTemplatePiece],
operands: &[mir::InlineAsmOperand<'tcx>], operands: &[mir::InlineAsmOperand<'tcx>],
@ -1088,24 +1120,25 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
destination: Option<mir::BasicBlock>, destination: Option<mir::BasicBlock>,
cleanup: Option<mir::BasicBlock>, cleanup: Option<mir::BasicBlock>,
instance: Instance<'_>, instance: Instance<'_>,
) { mergeable_succ: bool,
) -> MergingSucc {
let span = terminator.source_info.span; let span = terminator.source_info.span;
let operands: Vec<_> = operands let operands: Vec<_> = operands
.iter() .iter()
.map(|op| match *op { .map(|op| match *op {
mir::InlineAsmOperand::In { reg, ref value } => { mir::InlineAsmOperand::In { reg, ref value } => {
let value = self.codegen_operand(&mut bx, value); let value = self.codegen_operand(bx, value);
InlineAsmOperandRef::In { reg, value } InlineAsmOperandRef::In { reg, value }
} }
mir::InlineAsmOperand::Out { reg, late, ref place } => { mir::InlineAsmOperand::Out { reg, late, ref place } => {
let place = place.map(|place| self.codegen_place(&mut bx, place.as_ref())); let place = place.map(|place| self.codegen_place(bx, place.as_ref()));
InlineAsmOperandRef::Out { reg, late, place } InlineAsmOperandRef::Out { reg, late, place }
} }
mir::InlineAsmOperand::InOut { reg, late, ref in_value, ref out_place } => { mir::InlineAsmOperand::InOut { reg, late, ref in_value, ref out_place } => {
let in_value = self.codegen_operand(&mut bx, in_value); let in_value = self.codegen_operand(bx, in_value);
let out_place = let out_place =
out_place.map(|out_place| self.codegen_place(&mut bx, out_place.as_ref())); out_place.map(|out_place| self.codegen_place(bx, out_place.as_ref()));
InlineAsmOperandRef::InOut { reg, late, in_value, out_place } InlineAsmOperandRef::InOut { reg, late, in_value, out_place }
} }
mir::InlineAsmOperand::Const { ref value } => { mir::InlineAsmOperand::Const { ref value } => {
@ -1143,7 +1176,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
helper.do_inlineasm( helper.do_inlineasm(
self, self,
&mut bx, bx,
template, template,
&operands, &operands,
options, options,
@ -1151,71 +1184,128 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
destination, destination,
cleanup, cleanup,
instance, instance,
); mergeable_succ,
)
} }
} }
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn codegen_block(&mut self, bb: mir::BasicBlock) { pub fn codegen_block(&mut self, mut bb: mir::BasicBlock) {
let llbb = self.llbb(bb); let llbb = match self.try_llbb(bb) {
let mut bx = Bx::build(self.cx, llbb); Some(llbb) => llbb,
None => return,
};
let bx = &mut Bx::build(self.cx, llbb);
let mir = self.mir; let mir = self.mir;
// MIR basic blocks stop at any function call. This may not be the case
// for the backend's basic blocks, in which case we might be able to
// combine multiple MIR basic blocks into a single backend basic block.
loop {
let data = &mir[bb]; let data = &mir[bb];
debug!("codegen_block({:?}={:?})", bb, data); debug!("codegen_block({:?}={:?})", bb, data);
for statement in &data.statements { for statement in &data.statements {
bx = self.codegen_statement(bx, statement); self.codegen_statement(bx, statement);
} }
self.codegen_terminator(bx, bb, data.terminator()); let merging_succ = self.codegen_terminator(bx, bb, data.terminator());
if let MergingSucc::False = merging_succ {
break;
}
// We are merging the successor into the produced backend basic
// block. Record that the successor should be skipped when it is
// reached.
//
// Note: we must not have already generated code for the successor.
// This is implicitly ensured by the reverse postorder traversal,
// and the assertion explicitly guarantees that.
let mut successors = data.terminator().successors();
let succ = successors.next().unwrap();
assert!(matches!(self.cached_llbbs[succ], CachedLlbb::None));
self.cached_llbbs[succ] = CachedLlbb::Skip;
bb = succ;
}
} }
fn codegen_terminator( fn codegen_terminator(
&mut self, &mut self,
mut bx: Bx, bx: &mut Bx,
bb: mir::BasicBlock, bb: mir::BasicBlock,
terminator: &'tcx mir::Terminator<'tcx>, terminator: &'tcx mir::Terminator<'tcx>,
) { ) -> MergingSucc {
debug!("codegen_terminator: {:?}", terminator); debug!("codegen_terminator: {:?}", terminator);
// Create the cleanup bundle, if needed. // Create the cleanup bundle, if needed.
let funclet_bb = self.cleanup_kinds[bb].funclet_bb(bb); let funclet_bb = self.cleanup_kinds[bb].funclet_bb(bb);
let helper = TerminatorCodegenHelper { bb, terminator, funclet_bb }; let helper = TerminatorCodegenHelper { bb, terminator, funclet_bb };
self.set_debug_loc(&mut bx, terminator.source_info); let mergeable_succ = || {
// Note: any call to `switch_to_block` will invalidate a `true` value
// of `mergeable_succ`.
let mut successors = terminator.successors();
if let Some(succ) = successors.next()
&& successors.next().is_none()
&& let &[succ_pred] = self.mir.basic_blocks.predecessors()[succ].as_slice()
{
// bb has a single successor, and bb is its only predecessor. This
// makes it a candidate for merging.
assert_eq!(succ_pred, bb);
true
} else {
false
}
};
self.set_debug_loc(bx, terminator.source_info);
match terminator.kind { match terminator.kind {
mir::TerminatorKind::Resume => self.codegen_resume_terminator(helper, bx), mir::TerminatorKind::Resume => {
self.codegen_resume_terminator(helper, bx);
MergingSucc::False
}
mir::TerminatorKind::Abort => { mir::TerminatorKind::Abort => {
self.codegen_abort_terminator(helper, bx, terminator); self.codegen_abort_terminator(helper, bx, terminator);
MergingSucc::False
} }
mir::TerminatorKind::Goto { target } => { mir::TerminatorKind::Goto { target } => {
helper.funclet_br(self, &mut bx, target); helper.funclet_br(self, bx, target, mergeable_succ())
} }
mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref targets } => { mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref targets } => {
self.codegen_switchint_terminator(helper, bx, discr, switch_ty, targets); self.codegen_switchint_terminator(helper, bx, discr, switch_ty, targets);
MergingSucc::False
} }
mir::TerminatorKind::Return => { mir::TerminatorKind::Return => {
self.codegen_return_terminator(bx); self.codegen_return_terminator(bx);
MergingSucc::False
} }
mir::TerminatorKind::Unreachable => { mir::TerminatorKind::Unreachable => {
bx.unreachable(); bx.unreachable();
MergingSucc::False
} }
mir::TerminatorKind::Drop { place, target, unwind } => { mir::TerminatorKind::Drop { place, target, unwind } => {
self.codegen_drop_terminator(helper, bx, place, target, unwind); self.codegen_drop_terminator(helper, bx, place, target, unwind, mergeable_succ())
} }
mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => { mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => self
self.codegen_assert_terminator( .codegen_assert_terminator(
helper, bx, terminator, cond, expected, msg, target, cleanup, helper,
); bx,
} terminator,
cond,
expected,
msg,
target,
cleanup,
mergeable_succ(),
),
mir::TerminatorKind::DropAndReplace { .. } => { mir::TerminatorKind::DropAndReplace { .. } => {
bug!("undesugared DropAndReplace in codegen: {:?}", terminator); bug!("undesugared DropAndReplace in codegen: {:?}", terminator);
@ -1229,8 +1319,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
cleanup, cleanup,
from_hir_call: _, from_hir_call: _,
fn_span, fn_span,
} => { } => self.codegen_call_terminator(
self.codegen_call_terminator(
helper, helper,
bx, bx,
terminator, terminator,
@ -1240,8 +1329,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
target, target,
cleanup, cleanup,
fn_span, fn_span,
); mergeable_succ(),
} ),
mir::TerminatorKind::GeneratorDrop | mir::TerminatorKind::Yield { .. } => { mir::TerminatorKind::GeneratorDrop | mir::TerminatorKind::Yield { .. } => {
bug!("generator ops in codegen") bug!("generator ops in codegen")
} }
@ -1256,8 +1345,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
line_spans, line_spans,
destination, destination,
cleanup, cleanup,
} => { } => self.codegen_asm_terminator(
self.codegen_asm_terminator(
helper, helper,
bx, bx,
terminator, terminator,
@ -1268,8 +1356,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
destination, destination,
cleanup, cleanup,
self.instance, self.instance,
); mergeable_succ(),
} ),
} }
} }
@ -1587,12 +1675,21 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// FIXME(eddyb) rename `llbb` and other `ll`-prefixed things to use a // FIXME(eddyb) rename `llbb` and other `ll`-prefixed things to use a
// more backend-agnostic prefix such as `cg` (i.e. this would be `cgbb`). // more backend-agnostic prefix such as `cg` (i.e. this would be `cgbb`).
pub fn llbb(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock { pub fn llbb(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock {
self.cached_llbbs[bb].unwrap_or_else(|| { self.try_llbb(bb).unwrap()
}
/// Like `llbb`, but may fail if the basic block should be skipped.
pub fn try_llbb(&mut self, bb: mir::BasicBlock) -> Option<Bx::BasicBlock> {
match self.cached_llbbs[bb] {
CachedLlbb::None => {
// FIXME(eddyb) only name the block if `fewer_names` is `false`. // FIXME(eddyb) only name the block if `fewer_names` is `false`.
let llbb = Bx::append_block(self.cx, self.llfn, &format!("{:?}", bb)); let llbb = Bx::append_block(self.cx, self.llfn, &format!("{:?}", bb));
self.cached_llbbs[bb] = Some(llbb); self.cached_llbbs[bb] = CachedLlbb::Some(llbb);
llbb Some(llbb)
}) }
CachedLlbb::Some(llbb) => Some(llbb),
CachedLlbb::Skip => None,
}
} }
fn make_return_dest( fn make_return_dest(

View file

@ -16,6 +16,18 @@ use rustc_middle::mir::traversal;
use self::operand::{OperandRef, OperandValue}; use self::operand::{OperandRef, OperandValue};
// Used for tracking the state of generated basic blocks.
enum CachedLlbb<T> {
/// Nothing created yet.
None,
/// Has been created.
Some(T),
/// Nothing created yet, and nothing should be.
Skip,
}
/// Master context for codegenning from MIR. /// Master context for codegenning from MIR.
pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> { pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
instance: Instance<'tcx>, instance: Instance<'tcx>,
@ -43,7 +55,7 @@ pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
/// as-needed (e.g. RPO reaching it or another block branching to it). /// as-needed (e.g. RPO reaching it or another block branching to it).
// FIXME(eddyb) rename `llbbs` and other `ll`-prefixed things to use a // FIXME(eddyb) rename `llbbs` and other `ll`-prefixed things to use a
// more backend-agnostic prefix such as `cg` (i.e. this would be `cgbbs`). // more backend-agnostic prefix such as `cg` (i.e. this would be `cgbbs`).
cached_llbbs: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>, cached_llbbs: IndexVec<mir::BasicBlock, CachedLlbb<Bx::BasicBlock>>,
/// The funclet status of each basic block /// The funclet status of each basic block
cleanup_kinds: IndexVec<mir::BasicBlock, analyze::CleanupKind>, cleanup_kinds: IndexVec<mir::BasicBlock, analyze::CleanupKind>,
@ -155,10 +167,12 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
} }
let cleanup_kinds = analyze::cleanup_kinds(&mir); let cleanup_kinds = analyze::cleanup_kinds(&mir);
let cached_llbbs: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>> = mir let cached_llbbs: IndexVec<mir::BasicBlock, CachedLlbb<Bx::BasicBlock>> =
.basic_blocks mir.basic_blocks
.indices() .indices()
.map(|bb| if bb == mir::START_BLOCK { Some(start_llbb) } else { None }) .map(|bb| {
if bb == mir::START_BLOCK { CachedLlbb::Some(start_llbb) } else { CachedLlbb::None }
})
.collect(); .collect();
let mut fx = FunctionCx { let mut fx = FunctionCx {

View file

@ -18,17 +18,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
#[instrument(level = "trace", skip(self, bx))] #[instrument(level = "trace", skip(self, bx))]
pub fn codegen_rvalue( pub fn codegen_rvalue(
&mut self, &mut self,
mut bx: Bx, bx: &mut Bx,
dest: PlaceRef<'tcx, Bx::Value>, dest: PlaceRef<'tcx, Bx::Value>,
rvalue: &mir::Rvalue<'tcx>, rvalue: &mir::Rvalue<'tcx>,
) -> Bx { ) {
match *rvalue { match *rvalue {
mir::Rvalue::Use(ref operand) => { mir::Rvalue::Use(ref operand) => {
let cg_operand = self.codegen_operand(&mut bx, operand); let cg_operand = self.codegen_operand(bx, operand);
// FIXME: consider not copying constants through stack. (Fixable by codegen'ing // FIXME: consider not copying constants through stack. (Fixable by codegen'ing
// constants into `OperandValue::Ref`; why dont we do that yet if we dont?) // constants into `OperandValue::Ref`; why dont we do that yet if we dont?)
cg_operand.val.store(&mut bx, dest); cg_operand.val.store(bx, dest);
bx
} }
mir::Rvalue::Cast(mir::CastKind::Pointer(PointerCast::Unsize), ref source, _) => { mir::Rvalue::Cast(mir::CastKind::Pointer(PointerCast::Unsize), ref source, _) => {
@ -37,16 +36,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if bx.cx().is_backend_scalar_pair(dest.layout) { if bx.cx().is_backend_scalar_pair(dest.layout) {
// Into-coerce of a thin pointer to a fat pointer -- just // Into-coerce of a thin pointer to a fat pointer -- just
// use the operand path. // use the operand path.
let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue); let temp = self.codegen_rvalue_operand(bx, rvalue);
temp.val.store(&mut bx, dest); temp.val.store(bx, dest);
return bx; return;
} }
// Unsize of a nontrivial struct. I would prefer for // Unsize of a nontrivial struct. I would prefer for
// this to be eliminated by MIR building, but // this to be eliminated by MIR building, but
// `CoerceUnsized` can be passed by a where-clause, // `CoerceUnsized` can be passed by a where-clause,
// so the (generic) MIR may not be able to expand it. // so the (generic) MIR may not be able to expand it.
let operand = self.codegen_operand(&mut bx, source); let operand = self.codegen_operand(bx, source);
match operand.val { match operand.val {
OperandValue::Pair(..) | OperandValue::Immediate(_) => { OperandValue::Pair(..) | OperandValue::Immediate(_) => {
// Unsize from an immediate structure. We don't // Unsize from an immediate structure. We don't
@ -56,63 +55,62 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// index into the struct, and this case isn't // index into the struct, and this case isn't
// important enough for it. // important enough for it.
debug!("codegen_rvalue: creating ugly alloca"); debug!("codegen_rvalue: creating ugly alloca");
let scratch = PlaceRef::alloca(&mut bx, operand.layout); let scratch = PlaceRef::alloca(bx, operand.layout);
scratch.storage_live(&mut bx); scratch.storage_live(bx);
operand.val.store(&mut bx, scratch); operand.val.store(bx, scratch);
base::coerce_unsized_into(&mut bx, scratch, dest); base::coerce_unsized_into(bx, scratch, dest);
scratch.storage_dead(&mut bx); scratch.storage_dead(bx);
} }
OperandValue::Ref(llref, None, align) => { OperandValue::Ref(llref, None, align) => {
let source = PlaceRef::new_sized_aligned(llref, operand.layout, align); let source = PlaceRef::new_sized_aligned(llref, operand.layout, align);
base::coerce_unsized_into(&mut bx, source, dest); base::coerce_unsized_into(bx, source, dest);
} }
OperandValue::Ref(_, Some(_), _) => { OperandValue::Ref(_, Some(_), _) => {
bug!("unsized coercion on an unsized rvalue"); bug!("unsized coercion on an unsized rvalue");
} }
} }
bx
} }
mir::Rvalue::Repeat(ref elem, count) => { mir::Rvalue::Repeat(ref elem, count) => {
let cg_elem = self.codegen_operand(&mut bx, elem); let cg_elem = self.codegen_operand(bx, elem);
// Do not generate the loop for zero-sized elements or empty arrays. // Do not generate the loop for zero-sized elements or empty arrays.
if dest.layout.is_zst() { if dest.layout.is_zst() {
return bx; return;
} }
if let OperandValue::Immediate(v) = cg_elem.val { if let OperandValue::Immediate(v) = cg_elem.val {
let zero = bx.const_usize(0); let zero = bx.const_usize(0);
let start = dest.project_index(&mut bx, zero).llval; let start = dest.project_index(bx, zero).llval;
let size = bx.const_usize(dest.layout.size.bytes()); let size = bx.const_usize(dest.layout.size.bytes());
// Use llvm.memset.p0i8.* to initialize all zero arrays // Use llvm.memset.p0i8.* to initialize all zero arrays
if bx.cx().const_to_opt_u128(v, false) == Some(0) { if bx.cx().const_to_opt_u128(v, false) == Some(0) {
let fill = bx.cx().const_u8(0); let fill = bx.cx().const_u8(0);
bx.memset(start, fill, size, dest.align, MemFlags::empty()); bx.memset(start, fill, size, dest.align, MemFlags::empty());
return bx; return;
} }
// Use llvm.memset.p0i8.* to initialize byte arrays // Use llvm.memset.p0i8.* to initialize byte arrays
let v = bx.from_immediate(v); let v = bx.from_immediate(v);
if bx.cx().val_ty(v) == bx.cx().type_i8() { if bx.cx().val_ty(v) == bx.cx().type_i8() {
bx.memset(start, v, size, dest.align, MemFlags::empty()); bx.memset(start, v, size, dest.align, MemFlags::empty());
return bx; return;
} }
} }
let count = let count =
self.monomorphize(count).eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all()); self.monomorphize(count).eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
bx.write_operand_repeatedly(cg_elem, count, dest) bx.write_operand_repeatedly(cg_elem, count, dest);
} }
mir::Rvalue::Aggregate(ref kind, ref operands) => { mir::Rvalue::Aggregate(ref kind, ref operands) => {
let (dest, active_field_index) = match **kind { let (dest, active_field_index) = match **kind {
mir::AggregateKind::Adt(adt_did, variant_index, _, _, active_field_index) => { mir::AggregateKind::Adt(adt_did, variant_index, _, _, active_field_index) => {
dest.codegen_set_discr(&mut bx, variant_index); dest.codegen_set_discr(bx, variant_index);
if bx.tcx().adt_def(adt_did).is_enum() { if bx.tcx().adt_def(adt_did).is_enum() {
(dest.project_downcast(&mut bx, variant_index), active_field_index) (dest.project_downcast(bx, variant_index), active_field_index)
} else { } else {
(dest, active_field_index) (dest, active_field_index)
} }
@ -120,37 +118,35 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
_ => (dest, None), _ => (dest, None),
}; };
for (i, operand) in operands.iter().enumerate() { for (i, operand) in operands.iter().enumerate() {
let op = self.codegen_operand(&mut bx, operand); let op = self.codegen_operand(bx, operand);
// Do not generate stores and GEPis for zero-sized fields. // Do not generate stores and GEPis for zero-sized fields.
if !op.layout.is_zst() { if !op.layout.is_zst() {
let field_index = active_field_index.unwrap_or(i); let field_index = active_field_index.unwrap_or(i);
let field = if let mir::AggregateKind::Array(_) = **kind { let field = if let mir::AggregateKind::Array(_) = **kind {
let llindex = bx.cx().const_usize(field_index as u64); let llindex = bx.cx().const_usize(field_index as u64);
dest.project_index(&mut bx, llindex) dest.project_index(bx, llindex)
} else { } else {
dest.project_field(&mut bx, field_index) dest.project_field(bx, field_index)
}; };
op.val.store(&mut bx, field); op.val.store(bx, field);
} }
} }
bx
} }
_ => { _ => {
assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP)); assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue); let temp = self.codegen_rvalue_operand(bx, rvalue);
temp.val.store(&mut bx, dest); temp.val.store(bx, dest);
bx
} }
} }
} }
pub fn codegen_rvalue_unsized( pub fn codegen_rvalue_unsized(
&mut self, &mut self,
mut bx: Bx, bx: &mut Bx,
indirect_dest: PlaceRef<'tcx, Bx::Value>, indirect_dest: PlaceRef<'tcx, Bx::Value>,
rvalue: &mir::Rvalue<'tcx>, rvalue: &mir::Rvalue<'tcx>,
) -> Bx { ) {
debug!( debug!(
"codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})", "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
indirect_dest.llval, rvalue indirect_dest.llval, rvalue
@ -158,9 +154,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
match *rvalue { match *rvalue {
mir::Rvalue::Use(ref operand) => { mir::Rvalue::Use(ref operand) => {
let cg_operand = self.codegen_operand(&mut bx, operand); let cg_operand = self.codegen_operand(bx, operand);
cg_operand.val.store_unsized(&mut bx, indirect_dest); cg_operand.val.store_unsized(bx, indirect_dest);
bx
} }
_ => bug!("unsized assignment other than `Rvalue::Use`"), _ => bug!("unsized assignment other than `Rvalue::Use`"),
@ -169,9 +164,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn codegen_rvalue_operand( pub fn codegen_rvalue_operand(
&mut self, &mut self,
mut bx: Bx, bx: &mut Bx,
rvalue: &mir::Rvalue<'tcx>, rvalue: &mir::Rvalue<'tcx>,
) -> (Bx, OperandRef<'tcx, Bx::Value>) { ) -> OperandRef<'tcx, Bx::Value> {
assert!( assert!(
self.rvalue_creates_operand(rvalue, DUMMY_SP), self.rvalue_creates_operand(rvalue, DUMMY_SP),
"cannot codegen {:?} to operand", "cannot codegen {:?} to operand",
@ -180,7 +175,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
match *rvalue { match *rvalue {
mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => { mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
let operand = self.codegen_operand(&mut bx, source); let operand = self.codegen_operand(bx, source);
debug!("cast operand is {:?}", operand); debug!("cast operand is {:?}", operand);
let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty)); let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
@ -245,7 +240,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
} }
}; };
let (lldata, llextra) = let (lldata, llextra) =
base::unsize_ptr(&mut bx, lldata, operand.layout.ty, cast.ty, llextra); base::unsize_ptr(bx, lldata, operand.layout.ty, cast.ty, llextra);
OperandValue::Pair(lldata, llextra) OperandValue::Pair(lldata, llextra)
} }
mir::CastKind::Pointer(PointerCast::MutToConstPointer) mir::CastKind::Pointer(PointerCast::MutToConstPointer)
@ -278,7 +273,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
OperandValue::Pair(v, l) => (v, Some(l)), OperandValue::Pair(v, l) => (v, Some(l)),
}; };
let (lldata, llextra) = let (lldata, llextra) =
base::cast_to_dyn_star(&mut bx, lldata, operand.layout, cast.ty, llextra); base::cast_to_dyn_star(bx, lldata, operand.layout, cast.ty, llextra);
OperandValue::Pair(lldata, llextra) OperandValue::Pair(lldata, llextra)
} }
mir::CastKind::Pointer( mir::CastKind::Pointer(
@ -299,7 +294,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let ll_t_out = bx.cx().immediate_backend_type(cast); let ll_t_out = bx.cx().immediate_backend_type(cast);
if operand.layout.abi.is_uninhabited() { if operand.layout.abi.is_uninhabited() {
let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out)); let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
return (bx, OperandRef { val, layout: cast }); return OperandRef { val, layout: cast };
} }
let r_t_in = let r_t_in =
CastTy::from_ty(operand.layout.ty).expect("bad input type for cast"); CastTy::from_ty(operand.layout.ty).expect("bad input type for cast");
@ -348,7 +343,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
OperandValue::Immediate(newval) OperandValue::Immediate(newval)
} }
}; };
(bx, OperandRef { val, layout: cast }) OperandRef { val, layout: cast }
} }
mir::Rvalue::Ref(_, bk, place) => { mir::Rvalue::Ref(_, bk, place) => {
@ -361,10 +356,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
self.codegen_place_to_pointer(bx, place, mk_ref) self.codegen_place_to_pointer(bx, place, mk_ref)
} }
mir::Rvalue::CopyForDeref(place) => { mir::Rvalue::CopyForDeref(place) => self.codegen_operand(bx, &Operand::Copy(place)),
let operand = self.codegen_operand(&mut bx, &Operand::Copy(place));
(bx, operand)
}
mir::Rvalue::AddressOf(mutability, place) => { mir::Rvalue::AddressOf(mutability, place) => {
let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| { let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
tcx.mk_ptr(ty::TypeAndMut { ty, mutbl: mutability }) tcx.mk_ptr(ty::TypeAndMut { ty, mutbl: mutability })
@ -373,23 +365,22 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
} }
mir::Rvalue::Len(place) => { mir::Rvalue::Len(place) => {
let size = self.evaluate_array_len(&mut bx, place); let size = self.evaluate_array_len(bx, place);
let operand = OperandRef { OperandRef {
val: OperandValue::Immediate(size), val: OperandValue::Immediate(size),
layout: bx.cx().layout_of(bx.tcx().types.usize), layout: bx.cx().layout_of(bx.tcx().types.usize),
}; }
(bx, operand)
} }
mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => { mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
let lhs = self.codegen_operand(&mut bx, lhs); let lhs = self.codegen_operand(bx, lhs);
let rhs = self.codegen_operand(&mut bx, rhs); let rhs = self.codegen_operand(bx, rhs);
let llresult = match (lhs.val, rhs.val) { let llresult = match (lhs.val, rhs.val) {
( (
OperandValue::Pair(lhs_addr, lhs_extra), OperandValue::Pair(lhs_addr, lhs_extra),
OperandValue::Pair(rhs_addr, rhs_extra), OperandValue::Pair(rhs_addr, rhs_extra),
) => self.codegen_fat_ptr_binop( ) => self.codegen_fat_ptr_binop(
&mut bx, bx,
op, op,
lhs_addr, lhs_addr,
lhs_extra, lhs_extra,
@ -399,22 +390,21 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
), ),
(OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => { (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => {
self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty) self.codegen_scalar_binop(bx, op, lhs_val, rhs_val, lhs.layout.ty)
} }
_ => bug!(), _ => bug!(),
}; };
let operand = OperandRef { OperandRef {
val: OperandValue::Immediate(llresult), val: OperandValue::Immediate(llresult),
layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)), layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
}; }
(bx, operand)
} }
mir::Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => { mir::Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
let lhs = self.codegen_operand(&mut bx, lhs); let lhs = self.codegen_operand(bx, lhs);
let rhs = self.codegen_operand(&mut bx, rhs); let rhs = self.codegen_operand(bx, rhs);
let result = self.codegen_scalar_checked_binop( let result = self.codegen_scalar_checked_binop(
&mut bx, bx,
op, op,
lhs.immediate(), lhs.immediate(),
rhs.immediate(), rhs.immediate(),
@ -422,13 +412,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
); );
let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty); let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]); let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
let operand = OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) }; OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) }
(bx, operand)
} }
mir::Rvalue::UnaryOp(op, ref operand) => { mir::Rvalue::UnaryOp(op, ref operand) => {
let operand = self.codegen_operand(&mut bx, operand); let operand = self.codegen_operand(bx, operand);
let lloperand = operand.immediate(); let lloperand = operand.immediate();
let is_float = operand.layout.ty.is_floating_point(); let is_float = operand.layout.ty.is_floating_point();
let llval = match op { let llval = match op {
@ -441,22 +429,17 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
} }
} }
}; };
(bx, OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout }) OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout }
} }
mir::Rvalue::Discriminant(ref place) => { mir::Rvalue::Discriminant(ref place) => {
let discr_ty = rvalue.ty(self.mir, bx.tcx()); let discr_ty = rvalue.ty(self.mir, bx.tcx());
let discr_ty = self.monomorphize(discr_ty); let discr_ty = self.monomorphize(discr_ty);
let discr = self let discr = self.codegen_place(bx, place.as_ref()).codegen_get_discr(bx, discr_ty);
.codegen_place(&mut bx, place.as_ref())
.codegen_get_discr(&mut bx, discr_ty);
(
bx,
OperandRef { OperandRef {
val: OperandValue::Immediate(discr), val: OperandValue::Immediate(discr),
layout: self.cx.layout_of(discr_ty), layout: self.cx.layout_of(discr_ty),
}, }
)
} }
mir::Rvalue::NullaryOp(null_op, ty) => { mir::Rvalue::NullaryOp(null_op, ty) => {
@ -469,36 +452,27 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}; };
let val = bx.cx().const_usize(val); let val = bx.cx().const_usize(val);
let tcx = self.cx.tcx(); let tcx = self.cx.tcx();
(
bx,
OperandRef { OperandRef {
val: OperandValue::Immediate(val), val: OperandValue::Immediate(val),
layout: self.cx.layout_of(tcx.types.usize), layout: self.cx.layout_of(tcx.types.usize),
}, }
)
} }
mir::Rvalue::ThreadLocalRef(def_id) => { mir::Rvalue::ThreadLocalRef(def_id) => {
assert!(bx.cx().tcx().is_static(def_id)); assert!(bx.cx().tcx().is_static(def_id));
let static_ = bx.get_static(def_id); let static_ = bx.get_static(def_id);
let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id)); let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id));
let operand = OperandRef::from_immediate_or_packed_pair(&mut bx, static_, layout); OperandRef::from_immediate_or_packed_pair(bx, static_, layout)
(bx, operand)
}
mir::Rvalue::Use(ref operand) => {
let operand = self.codegen_operand(&mut bx, operand);
(bx, operand)
} }
mir::Rvalue::Use(ref operand) => self.codegen_operand(bx, operand),
mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => { mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => {
// According to `rvalue_creates_operand`, only ZST // According to `rvalue_creates_operand`, only ZST
// aggregate rvalues are allowed to be operands. // aggregate rvalues are allowed to be operands.
let ty = rvalue.ty(self.mir, self.cx.tcx()); let ty = rvalue.ty(self.mir, self.cx.tcx());
let operand = OperandRef::new_zst(bx, self.cx.layout_of(self.monomorphize(ty)))
OperandRef::new_zst(&mut bx, self.cx.layout_of(self.monomorphize(ty)));
(bx, operand)
} }
mir::Rvalue::ShallowInitBox(ref operand, content_ty) => { mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
let operand = self.codegen_operand(&mut bx, operand); let operand = self.codegen_operand(bx, operand);
let lloperand = operand.immediate(); let lloperand = operand.immediate();
let content_ty = self.monomorphize(content_ty); let content_ty = self.monomorphize(content_ty);
@ -506,8 +480,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let llty_ptr = bx.cx().backend_type(box_layout); let llty_ptr = bx.cx().backend_type(box_layout);
let val = bx.pointercast(lloperand, llty_ptr); let val = bx.pointercast(lloperand, llty_ptr);
let operand = OperandRef { val: OperandValue::Immediate(val), layout: box_layout }; OperandRef { val: OperandValue::Immediate(val), layout: box_layout }
(bx, operand)
} }
} }
} }
@ -531,11 +504,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
/// Codegen an `Rvalue::AddressOf` or `Rvalue::Ref` /// Codegen an `Rvalue::AddressOf` or `Rvalue::Ref`
fn codegen_place_to_pointer( fn codegen_place_to_pointer(
&mut self, &mut self,
mut bx: Bx, bx: &mut Bx,
place: mir::Place<'tcx>, place: mir::Place<'tcx>,
mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>, mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
) -> (Bx, OperandRef<'tcx, Bx::Value>) { ) -> OperandRef<'tcx, Bx::Value> {
let cg_place = self.codegen_place(&mut bx, place.as_ref()); let cg_place = self.codegen_place(bx, place.as_ref());
let ty = cg_place.layout.ty; let ty = cg_place.layout.ty;
@ -546,7 +519,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
} else { } else {
OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap()) OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
}; };
(bx, OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) }) OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) }
} }
pub fn codegen_scalar_binop( pub fn codegen_scalar_binop(

View file

@ -8,8 +8,8 @@ use crate::traits::*;
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
#[instrument(level = "debug", skip(self, bx))] #[instrument(level = "debug", skip(self, bx))]
pub fn codegen_statement(&mut self, mut bx: Bx, statement: &mir::Statement<'tcx>) -> Bx { pub fn codegen_statement(&mut self, bx: &mut Bx, statement: &mir::Statement<'tcx>) {
self.set_debug_loc(&mut bx, statement.source_info); self.set_debug_loc(bx, statement.source_info);
match statement.kind { match statement.kind {
mir::StatementKind::Assign(box (ref place, ref rvalue)) => { mir::StatementKind::Assign(box (ref place, ref rvalue)) => {
if let Some(index) = place.as_local() { if let Some(index) = place.as_local() {
@ -19,10 +19,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
self.codegen_rvalue_unsized(bx, cg_indirect_dest, rvalue) self.codegen_rvalue_unsized(bx, cg_indirect_dest, rvalue)
} }
LocalRef::Operand(None) => { LocalRef::Operand(None) => {
let (mut bx, operand) = self.codegen_rvalue_operand(bx, rvalue); let operand = self.codegen_rvalue_operand(bx, rvalue);
self.locals[index] = LocalRef::Operand(Some(operand)); self.locals[index] = LocalRef::Operand(Some(operand));
self.debug_introduce_local(&mut bx, index); self.debug_introduce_local(bx, index);
bx
} }
LocalRef::Operand(Some(op)) => { LocalRef::Operand(Some(op)) => {
if !op.layout.is_zst() { if !op.layout.is_zst() {
@ -35,59 +34,52 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// If the type is zero-sized, it's already been set here, // If the type is zero-sized, it's already been set here,
// but we still need to make sure we codegen the operand // but we still need to make sure we codegen the operand
self.codegen_rvalue_operand(bx, rvalue).0 self.codegen_rvalue_operand(bx, rvalue);
} }
} }
} else { } else {
let cg_dest = self.codegen_place(&mut bx, place.as_ref()); let cg_dest = self.codegen_place(bx, place.as_ref());
self.codegen_rvalue(bx, cg_dest, rvalue) self.codegen_rvalue(bx, cg_dest, rvalue);
} }
} }
mir::StatementKind::SetDiscriminant { box ref place, variant_index } => { mir::StatementKind::SetDiscriminant { box ref place, variant_index } => {
self.codegen_place(&mut bx, place.as_ref()) self.codegen_place(bx, place.as_ref()).codegen_set_discr(bx, variant_index);
.codegen_set_discr(&mut bx, variant_index);
bx
} }
mir::StatementKind::Deinit(..) => { mir::StatementKind::Deinit(..) => {
// For now, don't codegen this to anything. In the future it may be worth // For now, don't codegen this to anything. In the future it may be worth
// experimenting with what kind of information we can emit to LLVM without hurting // experimenting with what kind of information we can emit to LLVM without hurting
// perf here // perf here
bx
} }
mir::StatementKind::StorageLive(local) => { mir::StatementKind::StorageLive(local) => {
if let LocalRef::Place(cg_place) = self.locals[local] { if let LocalRef::Place(cg_place) = self.locals[local] {
cg_place.storage_live(&mut bx); cg_place.storage_live(bx);
} else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] { } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] {
cg_indirect_place.storage_live(&mut bx); cg_indirect_place.storage_live(bx);
} }
bx
} }
mir::StatementKind::StorageDead(local) => { mir::StatementKind::StorageDead(local) => {
if let LocalRef::Place(cg_place) = self.locals[local] { if let LocalRef::Place(cg_place) = self.locals[local] {
cg_place.storage_dead(&mut bx); cg_place.storage_dead(bx);
} else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] { } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] {
cg_indirect_place.storage_dead(&mut bx); cg_indirect_place.storage_dead(bx);
} }
bx
} }
mir::StatementKind::Coverage(box ref coverage) => { mir::StatementKind::Coverage(box ref coverage) => {
self.codegen_coverage(&mut bx, coverage.clone(), statement.source_info.scope); self.codegen_coverage(bx, coverage.clone(), statement.source_info.scope);
bx
} }
mir::StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(ref op)) => { mir::StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(ref op)) => {
let op_val = self.codegen_operand(&mut bx, op); let op_val = self.codegen_operand(bx, op);
bx.assume(op_val.immediate()); bx.assume(op_val.immediate());
bx
} }
mir::StatementKind::Intrinsic(box NonDivergingIntrinsic::CopyNonOverlapping( mir::StatementKind::Intrinsic(box NonDivergingIntrinsic::CopyNonOverlapping(
mir::CopyNonOverlapping { ref count, ref src, ref dst }, mir::CopyNonOverlapping { ref count, ref src, ref dst },
)) => { )) => {
let dst_val = self.codegen_operand(&mut bx, dst); let dst_val = self.codegen_operand(bx, dst);
let src_val = self.codegen_operand(&mut bx, src); let src_val = self.codegen_operand(bx, src);
let count = self.codegen_operand(&mut bx, count).immediate(); let count = self.codegen_operand(bx, count).immediate();
let pointee_layout = dst_val let pointee_layout = dst_val
.layout .layout
.pointee_info_at(&bx, rustc_target::abi::Size::ZERO) .pointee_info_at(bx, rustc_target::abi::Size::ZERO)
.expect("Expected pointer"); .expect("Expected pointer");
let bytes = bx.mul(count, bx.const_usize(pointee_layout.size.bytes())); let bytes = bx.mul(count, bx.const_usize(pointee_layout.size.bytes()));
@ -95,12 +87,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let dst = dst_val.immediate(); let dst = dst_val.immediate();
let src = src_val.immediate(); let src = src_val.immediate();
bx.memcpy(dst, align, src, align, bytes, crate::MemFlags::empty()); bx.memcpy(dst, align, src, align, bytes, crate::MemFlags::empty());
bx
} }
mir::StatementKind::FakeRead(..) mir::StatementKind::FakeRead(..)
| mir::StatementKind::Retag { .. } | mir::StatementKind::Retag { .. }
| mir::StatementKind::AscribeUserType(..) | mir::StatementKind::AscribeUserType(..)
| mir::StatementKind::Nop => bx, | mir::StatementKind::Nop => {}
} }
} }
} }

View file

@ -151,11 +151,11 @@ pub trait BuilderMethods<'a, 'tcx>:
/// Called for Rvalue::Repeat when the elem is neither a ZST nor optimizable using memset. /// Called for Rvalue::Repeat when the elem is neither a ZST nor optimizable using memset.
fn write_operand_repeatedly( fn write_operand_repeatedly(
self, &mut self,
elem: OperandRef<'tcx, Self::Value>, elem: OperandRef<'tcx, Self::Value>,
count: u64, count: u64,
dest: PlaceRef<'tcx, Self::Value>, dest: PlaceRef<'tcx, Self::Value>,
) -> Self; );
fn range_metadata(&mut self, load: Self::Value, range: WrappingRange); fn range_metadata(&mut self, load: Self::Value, range: WrappingRange);
fn nonnull_metadata(&mut self, load: Self::Value); fn nonnull_metadata(&mut self, load: Self::Value);

View file

@ -58,19 +58,19 @@
// cdb-command: g // cdb-command: g
// cdb-command: dv // cdb-command: dv
// cdb-check:[...]y = true
// cdb-check:[...]b = 0n456
// cdb-check:[...]a = 0n123 // cdb-check:[...]a = 0n123
// cdb-check:[...]x = 0n42 // cdb-check:[...]x = 0n42
// cdb-check:[...]b = 0n456
// cdb-check:[...]y = true
// cdb-command: g // cdb-command: g
// cdb-command: dv // cdb-command: dv
// cdb-check:[...]z = 0n10 // cdb-check:[...]z = 0n10
// cdb-check:[...]c = 0n789 // cdb-check:[...]c = 0n789
// cdb-check:[...]y = true
// cdb-check:[...]b = 0n456
// cdb-check:[...]a = 0n123 // cdb-check:[...]a = 0n123
// cdb-check:[...]x = 0n42 // cdb-check:[...]x = 0n42
// cdb-check:[...]b = 0n456
// cdb-check:[...]y = true
fn main() { fn main() {
let a = id(123); let a = id(123);