1
Fork 0

Auto merge of #94123 - bjorn3:cg_ssa_singleton_builder, r=tmiasko

Partially move cg_ssa towards using a single builder

Not all codegen backends can handle hopping between blocks well. For example Cranelift requires blocks to be terminated before switching to building a new block. Rust-gpu requires a `RefCell` to allow hopping between blocks and cg_gcc currently has a buggy implementation of hopping between blocks. This PR reduces the amount of cases where cg_ssa switches between blocks before they are finished and mostly fixes the block hopping in cg_gcc. (~~only `scalar_to_backend` doesn't handle it correctly yet in cg_gcc~~ fixed that one.)

`@antoyo` please review the cg_gcc changes.
This commit is contained in:
bors 2022-02-24 12:28:19 +00:00
commit 3d127e2040
6 changed files with 158 additions and 150 deletions

View file

@ -390,11 +390,6 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
bx bx
} }
fn build_sibling_block(&mut self, name: &str) -> Self {
let block = self.append_sibling_block(name);
Self::build(self.cx, block)
}
fn llbb(&self) -> Block<'gcc> { fn llbb(&self) -> Block<'gcc> {
self.block.expect("block") self.block.expect("block")
} }
@ -409,6 +404,11 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
func.new_block(name) func.new_block(name)
} }
fn switch_to_block(&mut self, block: Self::BasicBlock) {
*self.cx.current_block.borrow_mut() = Some(block);
self.block = Some(block);
}
fn ret_void(&mut self) { fn ret_void(&mut self) {
self.llbb().end_with_void_return(None) self.llbb().end_with_void_return(None)
} }
@ -880,28 +880,31 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
let start = dest.project_index(&mut self, zero).llval; let start = dest.project_index(&mut self, zero).llval;
let end = dest.project_index(&mut self, count).llval; let end = dest.project_index(&mut self, count).llval;
let mut header_bx = self.build_sibling_block("repeat_loop_header"); let header_bb = self.append_sibling_block("repeat_loop_header");
let mut body_bx = self.build_sibling_block("repeat_loop_body"); let body_bb = self.append_sibling_block("repeat_loop_body");
let next_bx = self.build_sibling_block("repeat_loop_next"); let next_bb = self.append_sibling_block("repeat_loop_next");
let ptr_type = start.get_type(); let ptr_type = start.get_type();
let current = self.llbb().get_function().new_local(None, ptr_type, "loop_var"); let current = self.llbb().get_function().new_local(None, ptr_type, "loop_var");
let current_val = current.to_rvalue(); let current_val = current.to_rvalue();
self.assign(current, start); self.assign(current, start);
self.br(header_bx.llbb()); self.br(header_bb);
let keep_going = header_bx.icmp(IntPredicate::IntNE, current_val, end); self.switch_to_block(header_bb);
header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb()); let keep_going = self.icmp(IntPredicate::IntNE, current_val, end);
self.cond_br(keep_going, body_bb, next_bb);
self.switch_to_block(body_bb);
let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size); let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
cg_elem.val.store(&mut body_bx, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align)); cg_elem.val.store(&mut self, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align));
let next = body_bx.inbounds_gep(self.backend_type(cg_elem.layout), current.to_rvalue(), &[self.const_usize(1)]); let next = self.inbounds_gep(self.backend_type(cg_elem.layout), current.to_rvalue(), &[self.const_usize(1)]);
body_bx.llbb().add_assignment(None, current, next); self.llbb().add_assignment(None, current, next);
body_bx.br(header_bx.llbb()); self.br(header_bb);
next_bx self.switch_to_block(next_bb);
self
} }
fn range_metadata(&mut self, _load: RValue<'gcc>, _range: WrappingRange) { fn range_metadata(&mut self, _load: RValue<'gcc>, _range: WrappingRange) {

View file

@ -166,9 +166,8 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
Self::append_block(self.cx, self.llfn(), name) Self::append_block(self.cx, self.llfn(), name)
} }
fn build_sibling_block(&mut self, name: &str) -> Self { fn switch_to_block(&mut self, llbb: Self::BasicBlock) {
let llbb = self.append_sibling_block(name); *self = Self::build(self.cx, llbb)
Self::build(self.cx, llbb)
} }
fn ret_void(&mut self) { fn ret_void(&mut self) {
@ -544,16 +543,19 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let start = dest.project_index(&mut self, zero).llval; let start = dest.project_index(&mut self, zero).llval;
let end = dest.project_index(&mut self, count).llval; let end = dest.project_index(&mut self, count).llval;
let mut header_bx = self.build_sibling_block("repeat_loop_header"); let header_bb = self.append_sibling_block("repeat_loop_header");
let mut body_bx = self.build_sibling_block("repeat_loop_body"); let body_bb = self.append_sibling_block("repeat_loop_body");
let next_bx = self.build_sibling_block("repeat_loop_next"); let next_bb = self.append_sibling_block("repeat_loop_next");
self.br(header_bx.llbb()); self.br(header_bb);
let mut header_bx = Self::build(self.cx, header_bb);
let current = header_bx.phi(self.val_ty(start), &[start], &[self.llbb()]); let current = header_bx.phi(self.val_ty(start), &[start], &[self.llbb()]);
let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end); let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end);
header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb()); header_bx.cond_br(keep_going, body_bb, next_bb);
let mut body_bx = Self::build(self.cx, body_bb);
let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size); let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
cg_elem cg_elem
.val .val
@ -564,10 +566,10 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
current, current,
&[self.const_usize(1)], &[self.const_usize(1)],
); );
body_bx.br(header_bx.llbb()); body_bx.br(header_bb);
header_bx.add_incoming_to_phi(current, next, body_bx.llbb()); header_bx.add_incoming_to_phi(current, next, body_bb);
next_bx Self::build(self.cx, next_bb)
} }
fn range_metadata(&mut self, load: &'ll Value, range: WrappingRange) { fn range_metadata(&mut self, load: &'ll Value, range: WrappingRange) {

View file

@ -452,11 +452,11 @@ fn codegen_msvc_try<'ll>(
let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| { let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
bx.set_personality_fn(bx.eh_personality()); bx.set_personality_fn(bx.eh_personality());
let mut normal = bx.build_sibling_block("normal"); let normal = bx.append_sibling_block("normal");
let mut catchswitch = bx.build_sibling_block("catchswitch"); let catchswitch = bx.append_sibling_block("catchswitch");
let mut catchpad_rust = bx.build_sibling_block("catchpad_rust"); let catchpad_rust = bx.append_sibling_block("catchpad_rust");
let mut catchpad_foreign = bx.build_sibling_block("catchpad_foreign"); let catchpad_foreign = bx.append_sibling_block("catchpad_foreign");
let mut caught = bx.build_sibling_block("caught"); let caught = bx.append_sibling_block("caught");
let try_func = llvm::get_param(bx.llfn(), 0); let try_func = llvm::get_param(bx.llfn(), 0);
let data = llvm::get_param(bx.llfn(), 1); let data = llvm::get_param(bx.llfn(), 1);
@ -520,12 +520,13 @@ fn codegen_msvc_try<'ll>(
let ptr_align = bx.tcx().data_layout.pointer_align.abi; let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let slot = bx.alloca(bx.type_i8p(), ptr_align); let slot = bx.alloca(bx.type_i8p(), ptr_align);
let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
bx.invoke(try_func_ty, try_func, &[data], normal.llbb(), catchswitch.llbb(), None); bx.invoke(try_func_ty, try_func, &[data], normal, catchswitch, None);
normal.ret(bx.const_i32(0)); bx.switch_to_block(normal);
bx.ret(bx.const_i32(0));
let cs = bx.switch_to_block(catchswitch);
catchswitch.catch_switch(None, None, &[catchpad_rust.llbb(), catchpad_foreign.llbb()]); let cs = bx.catch_switch(None, None, &[catchpad_rust, catchpad_foreign]);
// We can't use the TypeDescriptor defined in libpanic_unwind because it // We can't use the TypeDescriptor defined in libpanic_unwind because it
// might be in another DLL and the SEH encoding only supports specifying // might be in another DLL and the SEH encoding only supports specifying
@ -558,21 +559,24 @@ fn codegen_msvc_try<'ll>(
// since our exception object effectively contains a Box. // since our exception object effectively contains a Box.
// //
// Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
bx.switch_to_block(catchpad_rust);
let flags = bx.const_i32(8); let flags = bx.const_i32(8);
let funclet = catchpad_rust.catch_pad(cs, &[tydesc, flags, slot]); let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]);
let ptr = catchpad_rust.load(bx.type_i8p(), slot, ptr_align); let ptr = bx.load(bx.type_i8p(), slot, ptr_align);
let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
catchpad_rust.call(catch_ty, catch_func, &[data, ptr], Some(&funclet)); bx.call(catch_ty, catch_func, &[data, ptr], Some(&funclet));
catchpad_rust.catch_ret(&funclet, caught.llbb()); bx.catch_ret(&funclet, caught);
// The flag value of 64 indicates a "catch-all". // The flag value of 64 indicates a "catch-all".
bx.switch_to_block(catchpad_foreign);
let flags = bx.const_i32(64); let flags = bx.const_i32(64);
let null = bx.const_null(bx.type_i8p()); let null = bx.const_null(bx.type_i8p());
let funclet = catchpad_foreign.catch_pad(cs, &[null, flags, null]); let funclet = bx.catch_pad(cs, &[null, flags, null]);
catchpad_foreign.call(catch_ty, catch_func, &[data, null], Some(&funclet)); bx.call(catch_ty, catch_func, &[data, null], Some(&funclet));
catchpad_foreign.catch_ret(&funclet, caught.llbb()); bx.catch_ret(&funclet, caught);
caught.ret(bx.const_i32(1)); bx.switch_to_block(caught);
bx.ret(bx.const_i32(1));
}); });
// Note that no invoke is used here because by definition this function // Note that no invoke is used here because by definition this function
@ -613,15 +617,17 @@ fn codegen_gnu_try<'ll>(
// (%ptr, _) = landingpad // (%ptr, _) = landingpad
// call %catch_func(%data, %ptr) // call %catch_func(%data, %ptr)
// ret 1 // ret 1
let mut then = bx.build_sibling_block("then"); let then = bx.append_sibling_block("then");
let mut catch = bx.build_sibling_block("catch"); let catch = bx.append_sibling_block("catch");
let try_func = llvm::get_param(bx.llfn(), 0); let try_func = llvm::get_param(bx.llfn(), 0);
let data = llvm::get_param(bx.llfn(), 1); let data = llvm::get_param(bx.llfn(), 1);
let catch_func = llvm::get_param(bx.llfn(), 2); let catch_func = llvm::get_param(bx.llfn(), 2);
let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
bx.invoke(try_func_ty, try_func, &[data], then.llbb(), catch.llbb(), None); bx.invoke(try_func_ty, try_func, &[data], then, catch, None);
then.ret(bx.const_i32(0));
bx.switch_to_block(then);
bx.ret(bx.const_i32(0));
// Type indicator for the exception being thrown. // Type indicator for the exception being thrown.
// //
@ -629,14 +635,15 @@ fn codegen_gnu_try<'ll>(
// being thrown. The second value is a "selector" indicating which of // being thrown. The second value is a "selector" indicating which of
// the landing pad clauses the exception's type had been matched to. // the landing pad clauses the exception's type had been matched to.
// rust_try ignores the selector. // rust_try ignores the selector.
bx.switch_to_block(catch);
let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false); let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 1); let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 1);
let tydesc = bx.const_null(bx.type_i8p()); let tydesc = bx.const_null(bx.type_i8p());
catch.add_clause(vals, tydesc); bx.add_clause(vals, tydesc);
let ptr = catch.extract_value(vals, 0); let ptr = bx.extract_value(vals, 0);
let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
catch.call(catch_ty, catch_func, &[data, ptr], None); bx.call(catch_ty, catch_func, &[data, ptr], None);
catch.ret(bx.const_i32(1)); bx.ret(bx.const_i32(1));
}); });
// Note that no invoke is used here because by definition this function // Note that no invoke is used here because by definition this function
@ -674,57 +681,54 @@ fn codegen_emcc_try<'ll>(
// %catch_data[1] = %is_rust_panic // %catch_data[1] = %is_rust_panic
// call %catch_func(%data, %catch_data) // call %catch_func(%data, %catch_data)
// ret 1 // ret 1
let mut then = bx.build_sibling_block("then"); let then = bx.append_sibling_block("then");
let mut catch = bx.build_sibling_block("catch"); let catch = bx.append_sibling_block("catch");
let try_func = llvm::get_param(bx.llfn(), 0); let try_func = llvm::get_param(bx.llfn(), 0);
let data = llvm::get_param(bx.llfn(), 1); let data = llvm::get_param(bx.llfn(), 1);
let catch_func = llvm::get_param(bx.llfn(), 2); let catch_func = llvm::get_param(bx.llfn(), 2);
let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
bx.invoke(try_func_ty, try_func, &[data], then.llbb(), catch.llbb(), None); bx.invoke(try_func_ty, try_func, &[data], then, catch, None);
then.ret(bx.const_i32(0));
bx.switch_to_block(then);
bx.ret(bx.const_i32(0));
// Type indicator for the exception being thrown. // Type indicator for the exception being thrown.
// //
// The first value in this tuple is a pointer to the exception object // The first value in this tuple is a pointer to the exception object
// being thrown. The second value is a "selector" indicating which of // being thrown. The second value is a "selector" indicating which of
// the landing pad clauses the exception's type had been matched to. // the landing pad clauses the exception's type had been matched to.
bx.switch_to_block(catch);
let tydesc = bx.eh_catch_typeinfo(); let tydesc = bx.eh_catch_typeinfo();
let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false); let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 2); let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 2);
catch.add_clause(vals, tydesc); bx.add_clause(vals, tydesc);
catch.add_clause(vals, bx.const_null(bx.type_i8p())); bx.add_clause(vals, bx.const_null(bx.type_i8p()));
let ptr = catch.extract_value(vals, 0); let ptr = bx.extract_value(vals, 0);
let selector = catch.extract_value(vals, 1); let selector = bx.extract_value(vals, 1);
// Check if the typeid we got is the one for a Rust panic. // Check if the typeid we got is the one for a Rust panic.
let rust_typeid = catch.call_intrinsic("llvm.eh.typeid.for", &[tydesc]); let rust_typeid = bx.call_intrinsic("llvm.eh.typeid.for", &[tydesc]);
let is_rust_panic = catch.icmp(IntPredicate::IntEQ, selector, rust_typeid); let is_rust_panic = bx.icmp(IntPredicate::IntEQ, selector, rust_typeid);
let is_rust_panic = catch.zext(is_rust_panic, bx.type_bool()); let is_rust_panic = bx.zext(is_rust_panic, bx.type_bool());
// We need to pass two values to catch_func (ptr and is_rust_panic), so // We need to pass two values to catch_func (ptr and is_rust_panic), so
// create an alloca and pass a pointer to that. // create an alloca and pass a pointer to that.
let ptr_align = bx.tcx().data_layout.pointer_align.abi; let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let i8_align = bx.tcx().data_layout.i8_align.abi; let i8_align = bx.tcx().data_layout.i8_align.abi;
let catch_data_type = bx.type_struct(&[bx.type_i8p(), bx.type_bool()], false); let catch_data_type = bx.type_struct(&[bx.type_i8p(), bx.type_bool()], false);
let catch_data = catch.alloca(catch_data_type, ptr_align); let catch_data = bx.alloca(catch_data_type, ptr_align);
let catch_data_0 = catch.inbounds_gep( let catch_data_0 =
catch_data_type, bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(0)]);
catch_data, bx.store(ptr, catch_data_0, ptr_align);
&[bx.const_usize(0), bx.const_usize(0)], let catch_data_1 =
); bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(1)]);
catch.store(ptr, catch_data_0, ptr_align); bx.store(is_rust_panic, catch_data_1, i8_align);
let catch_data_1 = catch.inbounds_gep( let catch_data = bx.bitcast(catch_data, bx.type_i8p());
catch_data_type,
catch_data,
&[bx.const_usize(0), bx.const_usize(1)],
);
catch.store(is_rust_panic, catch_data_1, i8_align);
let catch_data = catch.bitcast(catch_data, bx.type_i8p());
let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
catch.call(catch_ty, catch_func, &[data, catch_data], None); bx.call(catch_ty, catch_func, &[data, catch_data], None);
catch.ret(bx.const_i32(1)); bx.ret(bx.const_i32(1));
}); });
// Note that no invoke is used here because by definition this function // Note that no invoke is used here because by definition this function

View file

@ -102,10 +102,10 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
let va_list_ty = va_list_layout.llvm_type(bx); let va_list_ty = va_list_layout.llvm_type(bx);
let layout = bx.cx.layout_of(target_ty); let layout = bx.cx.layout_of(target_ty);
let mut maybe_reg = bx.build_sibling_block("va_arg.maybe_reg"); let maybe_reg = bx.append_sibling_block("va_arg.maybe_reg");
let mut in_reg = bx.build_sibling_block("va_arg.in_reg"); let in_reg = bx.append_sibling_block("va_arg.in_reg");
let mut on_stack = bx.build_sibling_block("va_arg.on_stack"); let on_stack = bx.append_sibling_block("va_arg.on_stack");
let mut end = bx.build_sibling_block("va_arg.end"); let end = bx.append_sibling_block("va_arg.end");
let zero = bx.const_i32(0); let zero = bx.const_i32(0);
let offset_align = Align::from_bytes(4).unwrap(); let offset_align = Align::from_bytes(4).unwrap();
@ -125,53 +125,53 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
// if the offset >= 0 then the value will be on the stack // if the offset >= 0 then the value will be on the stack
let mut reg_off_v = bx.load(bx.type_i32(), reg_off, offset_align); let mut reg_off_v = bx.load(bx.type_i32(), reg_off, offset_align);
let use_stack = bx.icmp(IntPredicate::IntSGE, reg_off_v, zero); let use_stack = bx.icmp(IntPredicate::IntSGE, reg_off_v, zero);
bx.cond_br(use_stack, on_stack.llbb(), maybe_reg.llbb()); bx.cond_br(use_stack, on_stack, maybe_reg);
// The value at this point might be in a register, but there is a chance that // The value at this point might be in a register, but there is a chance that
// it could be on the stack so we have to update the offset and then check // it could be on the stack so we have to update the offset and then check
// the offset again. // the offset again.
bx.switch_to_block(maybe_reg);
if gr_type && layout.align.abi.bytes() > 8 { if gr_type && layout.align.abi.bytes() > 8 {
reg_off_v = maybe_reg.add(reg_off_v, bx.const_i32(15)); reg_off_v = bx.add(reg_off_v, bx.const_i32(15));
reg_off_v = maybe_reg.and(reg_off_v, bx.const_i32(-16)); reg_off_v = bx.and(reg_off_v, bx.const_i32(-16));
} }
let new_reg_off_v = maybe_reg.add(reg_off_v, bx.const_i32(slot_size as i32)); let new_reg_off_v = bx.add(reg_off_v, bx.const_i32(slot_size as i32));
maybe_reg.store(new_reg_off_v, reg_off, offset_align); bx.store(new_reg_off_v, reg_off, offset_align);
// Check to see if we have overflowed the registers as a result of this. // Check to see if we have overflowed the registers as a result of this.
// If we have then we need to use the stack for this value // If we have then we need to use the stack for this value
let use_stack = maybe_reg.icmp(IntPredicate::IntSGT, new_reg_off_v, zero); let use_stack = bx.icmp(IntPredicate::IntSGT, new_reg_off_v, zero);
maybe_reg.cond_br(use_stack, on_stack.llbb(), in_reg.llbb()); bx.cond_br(use_stack, on_stack, in_reg);
bx.switch_to_block(in_reg);
let top_type = bx.type_i8p(); let top_type = bx.type_i8p();
let top = in_reg.struct_gep(va_list_ty, va_list_addr, reg_top_index); let top = bx.struct_gep(va_list_ty, va_list_addr, reg_top_index);
let top = in_reg.load(top_type, top, bx.tcx().data_layout.pointer_align.abi); let top = bx.load(top_type, top, bx.tcx().data_layout.pointer_align.abi);
// reg_value = *(@top + reg_off_v); // reg_value = *(@top + reg_off_v);
let mut reg_addr = in_reg.gep(bx.type_i8(), top, &[reg_off_v]); let mut reg_addr = bx.gep(bx.type_i8(), top, &[reg_off_v]);
if bx.tcx().sess.target.endian == Endian::Big && layout.size.bytes() != slot_size { if bx.tcx().sess.target.endian == Endian::Big && layout.size.bytes() != slot_size {
// On big-endian systems the value is right-aligned in its slot. // On big-endian systems the value is right-aligned in its slot.
let offset = bx.const_i32((slot_size - layout.size.bytes()) as i32); let offset = bx.const_i32((slot_size - layout.size.bytes()) as i32);
reg_addr = in_reg.gep(bx.type_i8(), reg_addr, &[offset]); reg_addr = bx.gep(bx.type_i8(), reg_addr, &[offset]);
} }
let reg_type = layout.llvm_type(bx); let reg_type = layout.llvm_type(bx);
let reg_addr = in_reg.bitcast(reg_addr, bx.cx.type_ptr_to(reg_type)); let reg_addr = bx.bitcast(reg_addr, bx.cx.type_ptr_to(reg_type));
let reg_value = in_reg.load(reg_type, reg_addr, layout.align.abi); let reg_value = bx.load(reg_type, reg_addr, layout.align.abi);
in_reg.br(end.llbb()); bx.br(end);
// On Stack block // On Stack block
bx.switch_to_block(on_stack);
let stack_value = let stack_value =
emit_ptr_va_arg(&mut on_stack, list, target_ty, false, Align::from_bytes(8).unwrap(), true); emit_ptr_va_arg(bx, list, target_ty, false, Align::from_bytes(8).unwrap(), true);
on_stack.br(end.llbb()); bx.br(end);
let val = end.phi( bx.switch_to_block(end);
layout.immediate_llvm_type(bx), let val =
&[reg_value, stack_value], bx.phi(layout.immediate_llvm_type(bx), &[reg_value, stack_value], &[in_reg, on_stack]);
&[in_reg.llbb(), on_stack.llbb()],
);
*bx = end;
val val
} }

View file

@ -96,9 +96,10 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
debug!("llblock: creating cleanup trampoline for {:?}", target); debug!("llblock: creating cleanup trampoline for {:?}", target);
let name = &format!("{:?}_cleanup_trampoline_{:?}", self.bb, target); let name = &format!("{:?}_cleanup_trampoline_{:?}", self.bb, target);
let mut trampoline = fx.new_block(name); let trampoline = Bx::append_block(fx.cx, fx.llfn, name);
trampoline.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget)); let mut trampoline_bx = Bx::build(fx.cx, trampoline);
trampoline.llbb() trampoline_bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget));
trampoline
} else { } else {
lltarget lltarget
} }
@ -169,9 +170,9 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
} }
if let Some((ret_dest, target)) = destination { if let Some((ret_dest, target)) = destination {
let mut ret_bx = fx.build_block(target); bx.switch_to_block(fx.llbb(target));
fx.set_debug_loc(&mut ret_bx, self.terminator.source_info); fx.set_debug_loc(bx, self.terminator.source_info);
fx.store_return(&mut ret_bx, ret_dest, &fn_abi.ret, invokeret); fx.store_return(bx, ret_dest, &fn_abi.ret, invokeret);
} }
} else { } else {
let llret = bx.call(fn_ty, fn_ptr, &llargs, self.funclet(fx)); let llret = bx.call(fn_ty, fn_ptr, &llargs, self.funclet(fx));
@ -452,15 +453,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// Create the failure block and the conditional branch to it. // Create the failure block and the conditional branch to it.
let lltarget = helper.llblock(self, target); let lltarget = helper.llblock(self, target);
let panic_block = bx.build_sibling_block("panic"); let panic_block = bx.append_sibling_block("panic");
if expected { if expected {
bx.cond_br(cond, lltarget, panic_block.llbb()); bx.cond_br(cond, lltarget, panic_block);
} else { } else {
bx.cond_br(cond, panic_block.llbb(), lltarget); bx.cond_br(cond, panic_block, lltarget);
} }
// After this point, bx is the block for the call to panic. // After this point, bx is the block for the call to panic.
bx = panic_block; bx.switch_to_block(panic_block);
self.set_debug_loc(&mut bx, terminator.source_info); self.set_debug_loc(&mut bx, terminator.source_info);
// Get the location information. // Get the location information.
@ -908,13 +909,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// Test whether the function pointer is associated with the type identifier. // Test whether the function pointer is associated with the type identifier.
let cond = bx.type_test(fn_ptr, typeid_metadata); let cond = bx.type_test(fn_ptr, typeid_metadata);
let mut bx_pass = bx.build_sibling_block("type_test.pass"); let bb_pass = bx.append_sibling_block("type_test.pass");
let mut bx_fail = bx.build_sibling_block("type_test.fail"); let bb_fail = bx.append_sibling_block("type_test.fail");
bx.cond_br(cond, bx_pass.llbb(), bx_fail.llbb()); bx.cond_br(cond, bb_pass, bb_fail);
bx.switch_to_block(bb_pass);
helper.do_call( helper.do_call(
self, self,
&mut bx_pass, &mut bx,
fn_abi, fn_abi,
fn_ptr, fn_ptr,
&llargs, &llargs,
@ -922,8 +924,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
cleanup, cleanup,
); );
bx_fail.abort(); bx.switch_to_block(bb_fail);
bx_fail.unreachable(); bx.abort();
bx.unreachable();
return; return;
} }
@ -1020,7 +1023,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn codegen_block(&mut self, bb: mir::BasicBlock) { pub fn codegen_block(&mut self, bb: mir::BasicBlock) {
let mut bx = self.build_block(bb); let llbb = self.llbb(bb);
let mut bx = Bx::build(self.cx, llbb);
let mir = self.mir; let mir = self.mir;
let data = &mir[bb]; let data = &mir[bb];
@ -1356,16 +1360,20 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// bar(); // bar();
// } // }
Some(&mir::TerminatorKind::Abort) => { Some(&mir::TerminatorKind::Abort) => {
let mut cs_bx = self.new_block(&format!("cs_funclet{:?}", bb)); let cs_bb =
let mut cp_bx = self.new_block(&format!("cp_funclet{:?}", bb)); Bx::append_block(self.cx, self.llfn, &format!("cs_funclet{:?}", bb));
ret_llbb = cs_bx.llbb(); let cp_bb =
Bx::append_block(self.cx, self.llfn, &format!("cp_funclet{:?}", bb));
ret_llbb = cs_bb;
let cs = cs_bx.catch_switch(None, None, &[cp_bx.llbb()]); let mut cs_bx = Bx::build(self.cx, cs_bb);
let cs = cs_bx.catch_switch(None, None, &[cp_bb]);
// The "null" here is actually a RTTI type descriptor for the // The "null" here is actually a RTTI type descriptor for the
// C++ personality function, but `catch (...)` has no type so // C++ personality function, but `catch (...)` has no type so
// it's null. The 64 here is actually a bitfield which // it's null. The 64 here is actually a bitfield which
// represents that this is a catch-all block. // represents that this is a catch-all block.
let mut cp_bx = Bx::build(self.cx, cp_bb);
let null = cp_bx.const_null( let null = cp_bx.const_null(
cp_bx.type_i8p_ext(cp_bx.cx().data_layout().instruction_address_space), cp_bx.type_i8p_ext(cp_bx.cx().data_layout().instruction_address_space),
); );
@ -1374,8 +1382,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
cp_bx.br(llbb); cp_bx.br(llbb);
} }
_ => { _ => {
let mut cleanup_bx = self.new_block(&format!("funclet_{:?}", bb)); let cleanup_bb =
ret_llbb = cleanup_bx.llbb(); Bx::append_block(self.cx, self.llfn, &format!("funclet_{:?}", bb));
ret_llbb = cleanup_bb;
let mut cleanup_bx = Bx::build(self.cx, cleanup_bb);
funclet = cleanup_bx.cleanup_pad(None, &[]); funclet = cleanup_bx.cleanup_pad(None, &[]);
cleanup_bx.br(llbb); cleanup_bx.br(llbb);
} }
@ -1383,7 +1393,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
self.funclets[bb] = Some(funclet); self.funclets[bb] = Some(funclet);
ret_llbb ret_llbb
} else { } else {
let mut bx = self.new_block("cleanup"); let bb = Bx::append_block(self.cx, self.llfn, "cleanup");
let mut bx = Bx::build(self.cx, bb);
let llpersonality = self.cx.eh_personality(); let llpersonality = self.cx.eh_personality();
let llretty = self.landing_pad_type(); let llretty = self.landing_pad_type();
@ -1405,10 +1416,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
fn unreachable_block(&mut self) -> Bx::BasicBlock { fn unreachable_block(&mut self) -> Bx::BasicBlock {
self.unreachable_block.unwrap_or_else(|| { self.unreachable_block.unwrap_or_else(|| {
let mut bx = self.new_block("unreachable"); let llbb = Bx::append_block(self.cx, self.llfn, "unreachable");
let mut bx = Bx::build(self.cx, llbb);
bx.unreachable(); bx.unreachable();
self.unreachable_block = Some(bx.llbb()); self.unreachable_block = Some(llbb);
bx.llbb() llbb
}) })
} }
@ -1416,7 +1428,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
self.double_unwind_guard.unwrap_or_else(|| { self.double_unwind_guard.unwrap_or_else(|| {
assert!(!base::wants_msvc_seh(self.cx.sess())); assert!(!base::wants_msvc_seh(self.cx.sess()));
let mut bx = self.new_block("abort"); let llbb = Bx::append_block(self.cx, self.llfn, "abort");
let mut bx = Bx::build(self.cx, llbb);
self.set_debug_loc(&mut bx, mir::SourceInfo::outermost(self.mir.span)); self.set_debug_loc(&mut bx, mir::SourceInfo::outermost(self.mir.span));
let llpersonality = self.cx.eh_personality(); let llpersonality = self.cx.eh_personality();
@ -1434,20 +1447,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx.apply_attrs_to_cleanup_callsite(llret); bx.apply_attrs_to_cleanup_callsite(llret);
bx.unreachable(); bx.unreachable();
let llbb = bx.llbb();
self.double_unwind_guard = Some(llbb); self.double_unwind_guard = Some(llbb);
llbb llbb
}) })
} }
// FIXME(eddyb) replace with `build_sibling_block`/`append_sibling_block`
// (which requires having a `Bx` already, and not all callers do).
fn new_block(&self, name: &str) -> Bx {
let llbb = Bx::append_block(self.cx, self.llfn, name);
Bx::build(self.cx, llbb)
}
/// Get the backend `BasicBlock` for a MIR `BasicBlock`, either already /// Get the backend `BasicBlock` for a MIR `BasicBlock`, either already
/// cached in `self.cached_llbbs`, or created on demand (and cached). /// cached in `self.cached_llbbs`, or created on demand (and cached).
// FIXME(eddyb) rename `llbb` and other `ll`-prefixed things to use a // FIXME(eddyb) rename `llbb` and other `ll`-prefixed things to use a
@ -1461,11 +1466,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}) })
} }
pub fn build_block(&mut self, bb: mir::BasicBlock) -> Bx {
let llbb = self.llbb(bb);
Bx::build(self.cx, llbb)
}
fn make_return_dest( fn make_return_dest(
&mut self, &mut self,
bx: &mut Bx, bx: &mut Bx,

View file

@ -53,8 +53,7 @@ pub trait BuilderMethods<'a, 'tcx>:
fn append_sibling_block(&mut self, name: &str) -> Self::BasicBlock; fn append_sibling_block(&mut self, name: &str) -> Self::BasicBlock;
// FIXME(eddyb) replace with callers using `append_sibling_block`. fn switch_to_block(&mut self, llbb: Self::BasicBlock);
fn build_sibling_block(&mut self, name: &str) -> Self;
fn ret_void(&mut self); fn ret_void(&mut self);
fn ret(&mut self, v: Self::Value); fn ret(&mut self, v: Self::Value);