1
Fork 0

Mark drop calls in landing pads cold instead of noinline

Now that deferred inlining has been disabled in LLVM,
this shouldn't cause catastrophic size blowup.
This commit is contained in:
Erik Desjardins 2021-12-29 15:28:31 -05:00
parent 7ae5508426
commit 2b662217e7
6 changed files with 42 additions and 9 deletions

View file

@ -1404,7 +1404,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
self.cx self.cx
} }
fn do_not_inline(&mut self, _llret: RValue<'gcc>) { fn mark_callsite_cold(&mut self, _llret: RValue<'gcc>) {
unimplemented!(); unimplemented!();
} }

View file

@ -1201,8 +1201,8 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
unsafe { llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, UNNAMED) } unsafe { llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, UNNAMED) }
} }
fn do_not_inline(&mut self, llret: &'ll Value) { fn mark_callsite_cold(&mut self, llret: &'ll Value) {
llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret); llvm::Attribute::Cold.apply_callsite(llvm::AttributePlace::Function, llret);
} }
} }

View file

@ -160,11 +160,8 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
let llret = bx.call(fn_ty, fn_ptr, &llargs, self.funclet(fx)); let llret = bx.call(fn_ty, fn_ptr, &llargs, self.funclet(fx));
bx.apply_attrs_callsite(&fn_abi, llret); bx.apply_attrs_callsite(&fn_abi, llret);
if fx.mir[self.bb].is_cleanup { if fx.mir[self.bb].is_cleanup {
// Cleanup is always the cold path. Don't inline // Cleanup is always the cold path.
// drop glue. Also, when there is a deeply-nested bx.mark_callsite_cold(llret);
// struct, there are "symmetry" issues that cause
// exponential inlining - see issue #41696.
bx.do_not_inline(llret);
} }
if let Some((ret_dest, target)) = destination { if let Some((ret_dest, target)) = destination {

View file

@ -311,5 +311,5 @@ pub trait BuilderMethods<'a, 'tcx>:
) -> Self::Value; ) -> Self::Value;
fn zext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; fn zext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
fn do_not_inline(&mut self, llret: Self::Value); fn mark_callsite_cold(&mut self, llret: Self::Value);
} }

View file

@ -0,0 +1,14 @@
// compile-flags: -Cno-prepopulate-passes
#![crate_type = "lib"]
// This test checks that drop calls in unwind landing pads
// get the `cold` attribute.
// CHECK-LABEL: @check_cold
// CHECK: call void {{.+}}drop_in_place{{.+}} [[ATTRIBUTES:#[0-9]+]]
// CHECK: attributes [[ATTRIBUTES]] = { cold }
#[no_mangle]
pub fn check_cold(f: fn(), x: Box<u32>) {
// this may unwind
f();
}

View file

@ -0,0 +1,22 @@
// no-system-llvm: needs patch for Rust alloc/dealloc functions
// compile-flags: -Copt-level=3
#![crate_type = "lib"]
// This test checks that we can inline drop_in_place in
// unwind landing pads. Without this, the box pointers escape,
// and LLVM will not optimize out the pointer comparison.
// See https://github.com/rust-lang/rust/issues/46515
// Everything should be optimized out.
// CHECK-LABEL: @check_no_escape_in_landingpad
// CHECK: start:
// CHECK-NEXT: ret void
#[no_mangle]
pub fn check_no_escape_in_landingpad(f: fn()) {
let x = &*Box::new(0);
let y = &*Box::new(0);
if x as *const _ == y as *const _ {
f();
}
}