1
Fork 0

Merge Call and DivergingCall diffs into CallKind

This merges two separate Call terminators and uses a separate CallKind sub-enum instead.

A little bit unrelatedly, copying into destination value for a certain kind of invoke, is also
implemented here. See the associated comment in code for various details that arise with this
implementation.
This commit is contained in:
Simonas Kazlauskas 2015-12-22 01:46:56 +02:00
parent 50107034c0
commit d1c644c1e9
6 changed files with 203 additions and 185 deletions

View file

@ -250,51 +250,58 @@ pub enum Terminator<'tcx> {
func: Operand<'tcx>, func: Operand<'tcx>,
/// Arguments the function is called with /// Arguments the function is called with
args: Vec<Operand<'tcx>>, args: Vec<Operand<'tcx>>,
/// Location to write the return value into /// The kind of call with associated information
destination: Lvalue<'tcx>, kind: CallKind<'tcx>,
targets: CallTargets,
}, },
}
/// Block ends with a call of a diverging function. #[derive(Clone, RustcEncodable, RustcDecodable)]
DivergingCall { pub enum CallKind<'tcx> {
/// The function thats being called /// Diverging function without associated cleanup
func: Operand<'tcx>, Diverging,
/// Arguments the function is called with /// Diverging function with associated cleanup
args: Vec<Operand<'tcx>>, DivergingCleanup(BasicBlock),
/// Some, if theres any cleanup to be done when function unwinds /// Converging function without associated cleanup
cleanup: Option<BasicBlock>, Converging {
/// Destination where the call result is written
destination: Lvalue<'tcx>,
/// Block to branch into on successful return
target: BasicBlock,
},
ConvergingCleanup {
/// Destination where the call result is written
destination: Lvalue<'tcx>,
/// First target is branched to on successful return.
/// Second block contains the cleanups to do on unwind.
targets: (BasicBlock, BasicBlock)
} }
} }
#[derive(Clone, Copy, RustcEncodable, RustcDecodable)] impl<'tcx> CallKind<'tcx> {
pub enum CallTargets { pub fn successors(&self) -> &[BasicBlock] {
/// The only target that should be entered when function returns normally. match *self {
Return(BasicBlock), CallKind::Diverging => &[],
/// In addition to the normal-return block, function has associated cleanup that should be done CallKind::DivergingCleanup(ref b) |
/// when function unwinds. CallKind::Converging { target: ref b, .. } => slice::ref_slice(b),
WithCleanup((BasicBlock, BasicBlock)) CallKind::ConvergingCleanup { ref targets, .. } => targets.as_slice(),
}
impl CallTargets {
pub fn new(ret: BasicBlock, cleanup: Option<BasicBlock>) -> CallTargets {
if let Some(c) = cleanup {
CallTargets::WithCleanup((ret, c))
} else {
CallTargets::Return(ret)
} }
} }
pub fn as_slice(&self) -> &[BasicBlock] { pub fn successors_mut(&mut self) -> &mut [BasicBlock] {
match *self { match *self {
CallTargets::Return(ref b) => slice::ref_slice(b), CallKind::Diverging => &mut [],
CallTargets::WithCleanup(ref bs) => bs.as_slice() CallKind::DivergingCleanup(ref mut b) |
CallKind::Converging { target: ref mut b, .. } => slice::mut_ref_slice(b),
CallKind::ConvergingCleanup { ref mut targets, .. } => targets.as_mut_slice(),
} }
} }
pub fn as_mut_slice(&mut self) -> &mut [BasicBlock] { pub fn destination(&self) -> Option<Lvalue<'tcx>> {
match *self { match *self {
CallTargets::Return(ref mut b) => slice::mut_ref_slice(b), CallKind::Converging { ref destination, .. } |
CallTargets::WithCleanup(ref mut bs) => bs.as_mut_slice() CallKind::ConvergingCleanup { ref destination, .. } => Some(destination.clone()),
CallKind::Diverging |
CallKind::DivergingCleanup(_) => None
} }
} }
} }
@ -309,12 +316,7 @@ impl<'tcx> Terminator<'tcx> {
SwitchInt { targets: ref b, .. } => b, SwitchInt { targets: ref b, .. } => b,
Resume => &[], Resume => &[],
Return => &[], Return => &[],
Call { targets: ref b, .. } => b.as_slice(), Call { ref kind, .. } => kind.successors(),
DivergingCall { cleanup: ref b, .. } => if let Some(b) = b.as_ref() {
slice::ref_slice(b)
} else {
&mut []
},
} }
} }
@ -327,12 +329,7 @@ impl<'tcx> Terminator<'tcx> {
SwitchInt { targets: ref mut b, .. } => b, SwitchInt { targets: ref mut b, .. } => b,
Resume => &mut [], Resume => &mut [],
Return => &mut [], Return => &mut [],
Call { targets: ref mut b, .. } => b.as_mut_slice(), Call { ref mut kind, .. } => kind.successors_mut(),
DivergingCall { cleanup: ref mut b, .. } => if let Some(b) = b.as_mut() {
slice::mut_ref_slice(b)
} else {
&mut []
},
} }
} }
} }
@ -399,13 +396,18 @@ impl<'tcx> Terminator<'tcx> {
SwitchInt { discr: ref lv, .. } => write!(fmt, "switchInt({:?})", lv), SwitchInt { discr: ref lv, .. } => write!(fmt, "switchInt({:?})", lv),
Return => write!(fmt, "return"), Return => write!(fmt, "return"),
Resume => write!(fmt, "resume"), Resume => write!(fmt, "resume"),
Call { .. } => { Call { ref kind, ref func, ref args } => {
// the author didnt bother rebasing this if let Some(destination) = kind.destination() {
unimplemented!() try!(write!(fmt, "{:?} = ", destination));
}, }
DivergingCall { .. } => { try!(write!(fmt, "{:?}(", func));
// the author didnt bother rebasing this for (index, arg) in args.iter().enumerate() {
unimplemented!() if index > 0 {
try!(write!(fmt, ", "));
}
try!(write!(fmt, "{:?}", arg));
}
write!(fmt, ")")
} }
} }
} }
@ -417,8 +419,6 @@ impl<'tcx> Terminator<'tcx> {
Return | Resume => vec![], Return | Resume => vec![],
Goto { .. } => vec!["".into_cow()], Goto { .. } => vec!["".into_cow()],
If { .. } => vec!["true".into_cow(), "false".into_cow()], If { .. } => vec!["true".into_cow(), "false".into_cow()],
Call { .. } => vec!["return".into_cow(), "unwind".into_cow()],
DivergingCall { .. } => vec!["unwind".into_cow()],
Switch { ref adt_def, .. } => { Switch { ref adt_def, .. } => {
adt_def.variants adt_def.variants
.iter() .iter()
@ -435,6 +435,16 @@ impl<'tcx> Terminator<'tcx> {
.chain(iter::once(String::from("otherwise").into_cow())) .chain(iter::once(String::from("otherwise").into_cow()))
.collect() .collect()
} }
Call { ref kind, .. } => match *kind {
CallKind::Diverging =>
vec![],
CallKind::DivergingCleanup(..) =>
vec!["unwind".into_cow()],
CallKind::Converging { .. } =>
vec!["return".into_cow()],
CallKind::ConvergingCleanup { .. } =>
vec!["return".into_cow(), "unwind".into_cow()],
},
} }
} }
} }

View file

@ -136,23 +136,15 @@ pub trait Visitor<'tcx> {
Terminator::Return => { Terminator::Return => {
} }
Terminator::Call { ref func, ref args, ref destination, ref targets } => { Terminator::Call { ref func, ref args, ref kind } => {
self.visit_lvalue(destination, LvalueContext::Store); if let Some(ref destination) = kind.destination() {
self.visit_lvalue(destination, LvalueContext::Store);
}
self.visit_operand(func); self.visit_operand(func);
for arg in args { for arg in args {
self.visit_operand(arg); self.visit_operand(arg);
} }
for &target in targets.as_slice() { for &target in kind.successors() {
self.visit_branch(block, target);
}
}
Terminator::DivergingCall { ref func, ref args, ref cleanup } => {
self.visit_operand(func);
for arg in args {
self.visit_operand(arg);
}
for &target in cleanup.as_ref() {
self.visit_branch(block, target); self.visit_branch(block, target);
} }
} }
@ -432,26 +424,15 @@ pub trait MutVisitor<'tcx> {
Terminator::Return => { Terminator::Return => {
} }
Terminator::Call { ref mut func, Terminator::Call { ref mut func, ref mut args, ref mut kind } => {
ref mut args, if let Some(ref mut destination) = kind.destination() {
ref mut destination, self.visit_lvalue(destination, LvalueContext::Store);
ref mut targets } => { }
self.visit_lvalue(destination, LvalueContext::Store);
self.visit_operand(func); self.visit_operand(func);
for arg in args { for arg in args {
self.visit_operand(arg); self.visit_operand(arg);
} }
for &target in targets.as_slice() { for &target in kind.successors() {
self.visit_branch(block, target);
}
}
Terminator::DivergingCall { ref mut func, ref mut args, ref mut cleanup } => {
self.visit_operand(func);
for arg in args {
self.visit_operand(arg);
}
for &target in cleanup.as_ref() {
self.visit_branch(block, target); self.visit_branch(block, target);
} }
} }

View file

@ -224,17 +224,22 @@ impl<'a,'tcx> Builder<'a,'tcx> {
let success = this.cfg.start_new_block(); let success = this.cfg.start_new_block();
let cleanup = this.diverge_cleanup(); let cleanup = this.diverge_cleanup();
let term = if diverges { this.cfg.terminate(block, Terminator::Call {
Terminator::DivergingCall { func: fun, args: args, cleanup: cleanup } func: fun,
} else { args: args,
Terminator::Call { kind: match (cleanup, diverges) {
func: fun, (None, true) => CallKind::Diverging,
args: args, (Some(c), true) => CallKind::DivergingCleanup(c),
destination: destination.clone(), (None, false) => CallKind::Converging {
targets: CallTargets::new(success, cleanup) destination: destination.clone(),
target: success
},
(Some(c), false) => CallKind::ConvergingCleanup {
destination: destination.clone(),
targets: (success, c)
}
} }
}; });
this.cfg.terminate(block, term);
success.unit() success.unit()
} }

View file

@ -302,7 +302,6 @@ impl<'a,'tcx> Builder<'a,'tcx> {
index: Operand<'tcx>, index: Operand<'tcx>,
len: Operand<'tcx>, len: Operand<'tcx>,
span: Span) { span: Span) {
let cleanup = self.diverge_cleanup();
let func = self.lang_function(lang_items::PanicBoundsCheckFnLangItem); let func = self.lang_function(lang_items::PanicBoundsCheckFnLangItem);
let str_ty = self.hir.tcx().mk_static_str(); let str_ty = self.hir.tcx().mk_static_str();
let tup_ty = self.hir.tcx().mk_tup(vec![str_ty, self.hir.tcx().types.u32]); let tup_ty = self.hir.tcx().mk_tup(vec![str_ty, self.hir.tcx().types.u32]);
@ -316,16 +315,19 @@ impl<'a,'tcx> Builder<'a,'tcx> {
// FIXME: ReStatic might be wrong here? // FIXME: ReStatic might be wrong here?
self.cfg.push_assign(block, DUMMY_SP, &tuple_ref, // tuple_ref = &tuple; self.cfg.push_assign(block, DUMMY_SP, &tuple_ref, // tuple_ref = &tuple;
Rvalue::Ref(*ref_region, BorrowKind::Unique, tuple)); Rvalue::Ref(*ref_region, BorrowKind::Unique, tuple));
self.cfg.terminate(block, Terminator::DivergingCall { let cleanup = self.diverge_cleanup();
self.cfg.terminate(block, Terminator::Call {
func: func, func: func,
args: vec![Operand::Consume(tuple_ref), index, len], args: vec![Operand::Consume(tuple_ref), index, len],
cleanup: cleanup, kind: match cleanup {
None => CallKind::Diverging,
Some(c) => CallKind::DivergingCleanup(c)
}
}); });
} }
/// Create diverge cleanup and branch to it from `block`. /// Create diverge cleanup and branch to it from `block`.
pub fn panic(&mut self, block: BasicBlock, msg: &'static str, span: Span) { pub fn panic(&mut self, block: BasicBlock, msg: &'static str, span: Span) {
let cleanup = self.diverge_cleanup();
let func = self.lang_function(lang_items::PanicFnLangItem); let func = self.lang_function(lang_items::PanicFnLangItem);
let str_ty = self.hir.tcx().mk_static_str(); let str_ty = self.hir.tcx().mk_static_str();
@ -348,11 +350,14 @@ impl<'a,'tcx> Builder<'a,'tcx> {
// FIXME: ReStatic might be wrong here? // FIXME: ReStatic might be wrong here?
self.cfg.push_assign(block, DUMMY_SP, &tuple_ref, // tuple_ref = &tuple; self.cfg.push_assign(block, DUMMY_SP, &tuple_ref, // tuple_ref = &tuple;
Rvalue::Ref(*ref_region, BorrowKind::Unique, tuple)); Rvalue::Ref(*ref_region, BorrowKind::Unique, tuple));
let cleanup = self.diverge_cleanup();
self.cfg.terminate(block, Terminator::DivergingCall { self.cfg.terminate(block, Terminator::Call {
func: func, func: func,
args: vec![Operand::Consume(tuple_ref)], args: vec![Operand::Consume(tuple_ref)],
cleanup: cleanup, kind: match cleanup {
None => CallKind::Diverging,
Some(c) => CallKind::DivergingCleanup(c)
}
}); });
} }

View file

@ -93,14 +93,10 @@ impl<'a, 'tcx> EraseRegions<'a, 'tcx> {
self.erase_regions_lvalue(discr); self.erase_regions_lvalue(discr);
*switch_ty = self.tcx.erase_regions(switch_ty); *switch_ty = self.tcx.erase_regions(switch_ty);
}, },
Terminator::Call { ref mut destination, ref mut func, ref mut args, .. } => { Terminator::Call { ref mut func, ref mut args, ref mut kind } => {
self.erase_regions_lvalue(destination); if let Some(ref mut destination) = kind.destination() {
self.erase_regions_operand(func); self.erase_regions_lvalue(destination);
for arg in &mut *args {
self.erase_regions_operand(arg);
} }
}
Terminator::DivergingCall { ref mut func, ref mut args, .. } => {
self.erase_regions_operand(func); self.erase_regions_operand(func);
for arg in &mut *args { for arg in &mut *args {
self.erase_regions_operand(arg); self.erase_regions_operand(arg);

View file

@ -94,82 +94,29 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
base::build_return_block(bcx.fcx, bcx, return_ty, DebugLoc::None); base::build_return_block(bcx.fcx, bcx, return_ty, DebugLoc::None);
} }
mir::Terminator::Call { ref func, ref args, ref destination, ref targets } => { mir::Terminator::Call { ref func, ref args, ref kind } => {
// The location we'll write the result of the call into. // Create the callee. This will always be a fn ptr and hence a kind of scalar.
let call_dest = self.trans_lvalue(bcx, destination);
let ret_ty = call_dest.ty.to_ty(bcx.tcx());
// Create the callee. This will always be a fn
// ptr and hence a kind of scalar.
let callee = self.trans_operand(bcx, func); let callee = self.trans_operand(bcx, func);
let attrs = attributes::from_fn_type(bcx.ccx(), callee.ty);
let debugloc = DebugLoc::None;
// The arguments we'll be passing. Plus one to account for outptr, if used.
let mut llargs = Vec::with_capacity(args.len() + 1);
// Does the fn use an outptr? If so, we have an extra first argument. // Prepare the return value destination
let return_outptr = type_of::return_uses_outptr(bcx.ccx(), ret_ty); let (ret_dest_ty, must_copy_dest) = if let Some(ref d) = kind.destination() {
// The arguments we'll be passing. let dest = self.trans_lvalue(bcx, d);
let mut llargs = if return_outptr { let ret_ty = dest.ty.to_ty(bcx.tcx());
let mut vec = Vec::with_capacity(args.len() + 1); if type_of::return_uses_outptr(bcx.ccx(), ret_ty) {
vec.push(call_dest.llval); llargs.push(dest.llval);
vec (Some((dest, ret_ty)), false)
} else {
(Some((dest, ret_ty)), !common::type_is_zero_size(bcx.ccx(), ret_ty))
}
} else { } else {
Vec::with_capacity(args.len()) (None, false)
}; };
// Process the rest of the args. // Process the rest of the args.
for arg in args {
let arg_op = self.trans_operand(bcx, arg);
match arg_op.val {
Ref(llval) | Immediate(llval) => llargs.push(llval),
FatPtr(base, extra) => {
// The two words in a fat ptr are passed separately
llargs.push(base);
llargs.push(extra);
}
}
}
let debugloc = DebugLoc::None;
let attrs = attributes::from_fn_type(bcx.ccx(), callee.ty);
match (*targets, base::avoid_invoke(bcx)) {
(mir::CallTargets::WithCleanup((ret, cleanup)), false) => {
let cleanup = self.bcx(cleanup);
let landingpad = self.make_landing_pad(cleanup);
build::Invoke(bcx,
callee.immediate(),
&llargs[..],
self.llblock(ret),
landingpad.llbb,
Some(attrs),
debugloc);
if !return_outptr && !common::type_is_zero_size(bcx.ccx(), ret_ty) {
// FIXME: What do we do here?
unimplemented!()
}
},
(t, _) => {
let ret = match t {
mir::CallTargets::Return(ret) => ret,
mir::CallTargets::WithCleanup((ret, _)) => {
// make a landing pad regardless (so it sets the personality slot.
let block = self.unreachable_block();
self.make_landing_pad(block);
ret
}
};
let llret = build::Call(bcx,
callee.immediate(),
&llargs[..],
Some(attrs),
debugloc);
if !return_outptr && !common::type_is_zero_size(bcx.ccx(), ret_ty) {
base::store_ty(bcx, llret, call_dest.llval, ret_ty);
}
build::Br(bcx, self.llblock(ret), debugloc)
}
}
},
mir::Terminator::DivergingCall { ref func, ref args, ref cleanup } => {
let callee = self.trans_operand(bcx, func);
let mut llargs = Vec::with_capacity(args.len());
for arg in args { for arg in args {
match self.trans_operand(bcx, arg).val { match self.trans_operand(bcx, arg).val {
Ref(llval) | Immediate(llval) => llargs.push(llval), Ref(llval) | Immediate(llval) => llargs.push(llval),
@ -179,23 +126,73 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
} }
} }
} }
let debugloc = DebugLoc::None;
let attrs = attributes::from_fn_type(bcx.ccx(), callee.ty); // Many different ways to call a function handled here
match (*cleanup, base::avoid_invoke(bcx)) { match (base::avoid_invoke(bcx), kind) {
(Some(cleanup), false) => { // The two cases below are the only ones to use LLVMs `invoke`.
(false, &mir::CallKind::DivergingCleanup(cleanup)) => {
let cleanup = self.bcx(cleanup); let cleanup = self.bcx(cleanup);
let landingpad = self.make_landing_pad(cleanup); let landingpad = self.make_landing_pad(cleanup);
let unreachable = self.unreachable_block();
build::Invoke(bcx, build::Invoke(bcx,
callee.immediate(), callee.immediate(),
&llargs[..], &llargs[..],
unreachable.llbb, self.unreachable_block().llbb,
landingpad.llbb, landingpad.llbb,
Some(attrs), Some(attrs),
debugloc); debugloc);
} },
(t, _) => { (false, &mir::CallKind::ConvergingCleanup { ref targets, .. }) => {
if t.is_some() { let cleanup = self.bcx(targets.1);
let landingpad = self.make_landing_pad(cleanup);
let (target, postinvoke) = if must_copy_dest {
(bcx.fcx.new_block(false, "", None), Some(self.bcx(targets.0)))
} else {
(self.bcx(targets.0), None)
};
let invokeret = build::Invoke(bcx,
callee.immediate(),
&llargs[..],
target.llbb,
landingpad.llbb,
Some(attrs),
debugloc);
if let Some(postinvoketarget) = postinvoke {
// We translate the copy into a temoprary block. The temporary block is
// necessary because the current block has already been terminated (by
// `invoke`) and we cannot really translate into the target block
// because:
// * The target block may have more than a single precedesor;
// * Some LLVM insns cannot have a preceeding store insn (phi,
// cleanuppad), and adding/prepending the store now may render
// those other instructions invalid.
//
// NB: This approach still may break some LLVM code. For example if the
// target block starts with a `phi` (which may only match on immediate
// precedesors), it cannot know about this temporary block thus
// resulting in an invalid code:
//
// this:
// …
// %0 = …
// %1 = invoke to label %temp …
// temp:
// store ty %1, ty* %dest
// br label %actualtargetblock
// actualtargetblock: ; preds: %temp, …
// phi … [%this, …], [%0, …] ; ERROR: phi requires to match only on
// ; immediate precedesors
let (ret_dest, ret_ty) = ret_dest_ty
.expect("return destination and type not set");
base::store_ty(target, invokeret, ret_dest.llval, ret_ty);
build::Br(target, postinvoketarget.llbb, debugloc);
}
},
// Everything else uses the regular `Call`, but we have to be careful to
// generate landing pads for later, even if we do not use it.
// FIXME: maybe just change Resume to not panic in that case?
(_, k@&mir::CallKind::DivergingCleanup(_)) |
(_, k@&mir::CallKind::Diverging) => {
if let mir::CallKind::DivergingCleanup(_) = *k {
// make a landing pad regardless, so it sets the personality slot. // make a landing pad regardless, so it sets the personality slot.
let block = self.unreachable_block(); let block = self.unreachable_block();
self.make_landing_pad(block); self.make_landing_pad(block);
@ -203,6 +200,30 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
build::Call(bcx, callee.immediate(), &llargs[..], Some(attrs), debugloc); build::Call(bcx, callee.immediate(), &llargs[..], Some(attrs), debugloc);
build::Unreachable(bcx); build::Unreachable(bcx);
} }
(_, k@&mir::CallKind::ConvergingCleanup { .. }) |
(_, k@&mir::CallKind::Converging { .. }) => {
let ret = match *k {
mir::CallKind::Converging { target, .. } => target,
mir::CallKind::ConvergingCleanup { targets, .. } => {
// make a landing pad regardless (so it sets the personality slot.
let block = self.unreachable_block();
self.make_landing_pad(block);
targets.0
},
_ => unreachable!()
};
let llret = build::Call(bcx,
callee.immediate(),
&llargs[..],
Some(attrs),
debugloc);
if must_copy_dest {
let (ret_dest, ret_ty) = ret_dest_ty
.expect("return destination and type not set");
base::store_ty(bcx, llret, ret_dest.llval, ret_ty);
}
build::Br(bcx, self.llblock(ret), debugloc)
}
} }
} }
} }