Auto merge of #102551 - bjorn3:cg_ssa_cleanup, r=davidtwco

Some more cleanup for rustc_codegen_ssa

With the aim to make non-LLVM like backends, like Cranelift, easier to support using cg_ssa.
This commit is contained in:
bors 2022-10-03 11:02:58 +00:00
commit f47e9af824
22 changed files with 115 additions and 131 deletions

View file

@ -592,10 +592,6 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
}
impl<'tcx> AbiBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
fn apply_attrs_callsite(&mut self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, callsite: Self::Value) {
fn_abi.apply_attrs_callsite(self, callsite)
}
fn get_param(&mut self, index: usize) -> Self::Value {
llvm::get_param(self.llfn(), index as c_uint)
}

View file

@ -430,9 +430,9 @@ pub(crate) fn inline_asm_call<'ll>(
);
let call = if let Some((dest, catch, funclet)) = dest_catch_funclet {
bx.invoke(fty, v, inputs, dest, catch, funclet)
bx.invoke(fty, None, v, inputs, dest, catch, funclet)
} else {
bx.call(fty, v, inputs, None)
bx.call(fty, None, v, inputs, None)
};
// Store mark in a metadata node so we can map LLVM errors

View file

@ -19,6 +19,8 @@ use crate::context::CodegenCx;
use crate::llvm;
use crate::value::Value;
use cstr::cstr;
use rustc_codegen_ssa::base::maybe_create_entry_wrapper;
use rustc_codegen_ssa::mono_item::MonoItemExt;
use rustc_codegen_ssa::traits::*;
@ -107,11 +109,14 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen
}
// Create the llvm.used and llvm.compiler.used variables.
if !cx.used_statics().borrow().is_empty() {
cx.create_used_variable()
if !cx.used_statics.borrow().is_empty() {
cx.create_used_variable_impl(cstr!("llvm.used"), &*cx.used_statics.borrow());
}
if !cx.compiler_used_statics().borrow().is_empty() {
cx.create_compiler_used_variable()
if !cx.compiler_used_statics.borrow().is_empty() {
cx.create_used_variable_impl(
cstr!("llvm.compiler.used"),
&*cx.compiler_used_statics.borrow(),
);
}
// Run replace-all-uses-with for statics that need it. This must

View file

@ -1,3 +1,4 @@
use crate::abi::FnAbiLlvmExt;
use crate::attributes;
use crate::common::Funclet;
use crate::context::CodegenCx;
@ -214,6 +215,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
fn invoke(
&mut self,
llty: &'ll Type,
fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
llfn: &'ll Value,
args: &[&'ll Value],
then: &'ll BasicBlock,
@ -226,7 +228,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let bundle = funclet.map(|funclet| funclet.bundle());
let bundle = bundle.as_ref().map(|b| &*b.raw);
unsafe {
let invoke = unsafe {
llvm::LLVMRustBuildInvoke(
self.llbuilder,
llty,
@ -238,7 +240,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
bundle,
UNNAMED,
)
};
if let Some(fn_abi) = fn_abi {
fn_abi.apply_attrs_callsite(self, invoke);
}
invoke
}
fn unreachable(&mut self) {
@ -405,20 +411,17 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
let mut bx = Builder::with_cx(self.cx);
bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) });
bx.dynamic_alloca(ty, align)
}
fn dynamic_alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
unsafe {
let alloca = llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED);
let alloca = llvm::LLVMBuildAlloca(bx.llbuilder, ty, UNNAMED);
llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
alloca
}
}
fn array_alloca(&mut self, ty: &'ll Type, len: &'ll Value, align: Align) -> &'ll Value {
fn byte_array_alloca(&mut self, len: &'ll Value, align: Align) -> &'ll Value {
unsafe {
let alloca = llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED);
let alloca =
llvm::LLVMBuildArrayAlloca(self.llbuilder, self.cx().type_i8(), len, UNNAMED);
llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
alloca
}
@ -1145,6 +1148,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
fn call(
&mut self,
llty: &'ll Type,
fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
llfn: &'ll Value,
args: &[&'ll Value],
funclet: Option<&Funclet<'ll>>,
@ -1155,7 +1159,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let bundle = funclet.map(|funclet| funclet.bundle());
let bundle = bundle.as_ref().map(|b| &*b.raw);
unsafe {
let call = unsafe {
llvm::LLVMRustBuildCall(
self.llbuilder,
llty,
@ -1164,7 +1168,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
args.len() as c_uint,
bundle,
)
};
if let Some(fn_abi) = fn_abi {
fn_abi.apply_attrs_callsite(self, call);
}
call
}
fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
@ -1397,7 +1405,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
pub(crate) fn call_intrinsic(&mut self, intrinsic: &str, args: &[&'ll Value]) -> &'ll Value {
let (ty, f) = self.cx.get_intrinsic(intrinsic);
self.call(ty, f, args, None)
self.call(ty, None, f, args, None)
}
fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) {
@ -1459,7 +1467,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
format!("llvm.{}.sat.i{}.f{}", instr, int_width, float_width)
};
let f = self.declare_cfn(&name, llvm::UnnamedAddr::No, self.type_func(&[src_ty], dest_ty));
self.call(self.type_func(&[src_ty], dest_ty), f, &[val], None)
self.call(self.type_func(&[src_ty], dest_ty), None, f, &[val], None)
}
pub(crate) fn landing_pad(

View file

@ -458,7 +458,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
self.coverage_cx.as_ref()
}
fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) {
pub(crate) fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) {
let section = cstr!("llvm.metadata");
let array = self.const_array(self.type_ptr_to(self.type_i8()), values);
@ -556,14 +556,6 @@ impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
self.codegen_unit
}
fn used_statics(&self) -> &RefCell<Vec<&'ll Value>> {
&self.used_statics
}
fn compiler_used_statics(&self) -> &RefCell<Vec<&'ll Value>> {
&self.compiler_used_statics
}
fn set_frame_pointer_type(&self, llfn: &'ll Value) {
if let Some(attr) = attributes::frame_pointer_type_attr(self) {
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[attr]);
@ -577,17 +569,6 @@ impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &attrs);
}
fn create_used_variable(&self) {
self.create_used_variable_impl(cstr!("llvm.used"), &*self.used_statics.borrow());
}
fn create_compiler_used_variable(&self) {
self.create_used_variable_impl(
cstr!("llvm.compiler.used"),
&*self.compiler_used_statics.borrow(),
);
}
fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {
if self.get_declared_value("main").is_none() {
Some(self.declare_cfn("main", llvm::UnnamedAddr::Global, fn_type))

View file

@ -108,6 +108,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
let (simple_ty, simple_fn) = simple.unwrap();
self.call(
simple_ty,
None,
simple_fn,
&args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
None,
@ -435,7 +436,7 @@ fn try_intrinsic<'ll>(
) {
if bx.sess().panic_strategy() == PanicStrategy::Abort {
let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
bx.call(try_func_ty, try_func, &[data], None);
bx.call(try_func_ty, None, try_func, &[data], None);
// Return 0 unconditionally from the intrinsic call;
// we can never unwind.
let ret_align = bx.tcx().data_layout.i32_align.abi;
@ -534,7 +535,7 @@ fn codegen_msvc_try<'ll>(
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let slot = bx.alloca(bx.type_i8p(), ptr_align);
let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
bx.invoke(try_func_ty, try_func, &[data], normal, catchswitch, None);
bx.invoke(try_func_ty, None, try_func, &[data], normal, catchswitch, None);
bx.switch_to_block(normal);
bx.ret(bx.const_i32(0));
@ -578,7 +579,7 @@ fn codegen_msvc_try<'ll>(
let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]);
let ptr = bx.load(bx.type_i8p(), slot, ptr_align);
let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
bx.call(catch_ty, catch_func, &[data, ptr], Some(&funclet));
bx.call(catch_ty, None, catch_func, &[data, ptr], Some(&funclet));
bx.catch_ret(&funclet, caught);
// The flag value of 64 indicates a "catch-all".
@ -586,7 +587,7 @@ fn codegen_msvc_try<'ll>(
let flags = bx.const_i32(64);
let null = bx.const_null(bx.type_i8p());
let funclet = bx.catch_pad(cs, &[null, flags, null]);
bx.call(catch_ty, catch_func, &[data, null], Some(&funclet));
bx.call(catch_ty, None, catch_func, &[data, null], Some(&funclet));
bx.catch_ret(&funclet, caught);
bx.switch_to_block(caught);
@ -595,7 +596,7 @@ fn codegen_msvc_try<'ll>(
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None);
let ret = bx.call(llty, None, llfn, &[try_func, data, catch_func], None);
let i32_align = bx.tcx().data_layout.i32_align.abi;
bx.store(ret, dest, i32_align);
}
@ -638,7 +639,7 @@ fn codegen_gnu_try<'ll>(
let data = llvm::get_param(bx.llfn(), 1);
let catch_func = llvm::get_param(bx.llfn(), 2);
let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
bx.invoke(try_func_ty, try_func, &[data], then, catch, None);
bx.invoke(try_func_ty, None, try_func, &[data], then, catch, None);
bx.switch_to_block(then);
bx.ret(bx.const_i32(0));
@ -656,13 +657,13 @@ fn codegen_gnu_try<'ll>(
bx.add_clause(vals, tydesc);
let ptr = bx.extract_value(vals, 0);
let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
bx.call(catch_ty, catch_func, &[data, ptr], None);
bx.call(catch_ty, None, catch_func, &[data, ptr], None);
bx.ret(bx.const_i32(1));
});
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None);
let ret = bx.call(llty, None, llfn, &[try_func, data, catch_func], None);
let i32_align = bx.tcx().data_layout.i32_align.abi;
bx.store(ret, dest, i32_align);
}
@ -702,7 +703,7 @@ fn codegen_emcc_try<'ll>(
let data = llvm::get_param(bx.llfn(), 1);
let catch_func = llvm::get_param(bx.llfn(), 2);
let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
bx.invoke(try_func_ty, try_func, &[data], then, catch, None);
bx.invoke(try_func_ty, None, try_func, &[data], then, catch, None);
bx.switch_to_block(then);
bx.ret(bx.const_i32(0));
@ -741,13 +742,13 @@ fn codegen_emcc_try<'ll>(
let catch_data = bx.bitcast(catch_data, bx.type_i8p());
let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
bx.call(catch_ty, catch_func, &[data, catch_data], None);
bx.call(catch_ty, None, catch_func, &[data, catch_data], None);
bx.ret(bx.const_i32(1));
});
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None);
let ret = bx.call(llty, None, llfn, &[try_func, data, catch_func], None);
let i32_align = bx.tcx().data_layout.i32_align.abi;
bx.store(ret, dest, i32_align);
}
@ -1217,8 +1218,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
};
let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str);
let f = bx.declare_cfn(llvm_name, llvm::UnnamedAddr::No, fn_ty);
let c =
bx.call(fn_ty, f, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None);
let c = bx.call(
fn_ty,
None,
f,
&args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
None,
);
Ok(c)
}
@ -1417,8 +1423,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
llvm_elem_vec_ty,
);
let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
let v =
bx.call(fn_ty, f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None);
let v = bx.call(
fn_ty,
None,
f,
&[args[1].immediate(), alignment, mask, args[0].immediate()],
None,
);
return Ok(v);
}
@ -1543,8 +1554,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
let fn_ty =
bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t);
let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
let v =
bx.call(fn_ty, f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None);
let v = bx.call(
fn_ty,
None,
f,
&[args[0].immediate(), args[1].immediate(), alignment, mask],
None,
);
return Ok(v);
}
@ -1992,7 +2008,7 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
let fn_ty = bx.type_func(&[vec_ty, vec_ty], vec_ty);
let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
let v = bx.call(fn_ty, f, &[lhs, rhs], None);
let v = bx.call(fn_ty, None, f, &[lhs, rhs], None);
return Ok(v);
}

View file

@ -131,12 +131,6 @@ impl ExtraBackendMethods for LlvmCodegenBackend {
) -> TargetMachineFactoryFn<Self> {
back::write::target_machine_factory(sess, optlvl, target_features)
}
fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str {
llvm_util::target_cpu(sess)
}
fn tune_cpu<'b>(&self, sess: &'b Session) -> Option<&'b str> {
llvm_util::tune_cpu(sess)
}
fn spawn_thread<F, T>(time_trace: bool, f: F) -> std::thread::JoinHandle<T>
where
@ -170,7 +164,6 @@ impl ExtraBackendMethods for LlvmCodegenBackend {
impl WriteBackendMethods for LlvmCodegenBackend {
type Module = ModuleLlvm;
type ModuleBuffer = back::lto::ModuleBuffer;
type Context = llvm::Context;
type TargetMachine = &'static mut llvm::TargetMachine;
type ThinData = back::lto::ThinData;
type ThinBuffer = back::lto::ThinBuffer;