Auto merge of #102551 - bjorn3:cg_ssa_cleanup, r=davidtwco

Some more cleanup for rustc_codegen_ssa

With the aim to make non-LLVM like backends, like Cranelift, easier to support using cg_ssa.
This commit is contained in:
bors 2022-10-03 11:02:58 +00:00
commit f47e9af824
22 changed files with 115 additions and 131 deletions

View file

@ -11,10 +11,6 @@ use crate::intrinsic::ArgAbiExt;
use crate::type_of::LayoutGccExt; use crate::type_of::LayoutGccExt;
impl<'a, 'gcc, 'tcx> AbiBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { impl<'a, 'gcc, 'tcx> AbiBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
fn apply_attrs_callsite(&mut self, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _callsite: Self::Value) {
// TODO(antoyo)
}
fn get_param(&mut self, index: usize) -> Self::Value { fn get_param(&mut self, index: usize) -> Self::Value {
let func = self.current_func(); let func = self.current_func();
let param = func.get_param(index as i32); let param = func.get_param(index as i32);

View file

@ -498,7 +498,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
if options.contains(InlineAsmOptions::NORETURN) { if options.contains(InlineAsmOptions::NORETURN) {
let builtin_unreachable = self.context.get_builtin_function("__builtin_unreachable"); let builtin_unreachable = self.context.get_builtin_function("__builtin_unreachable");
let builtin_unreachable: RValue<'gcc> = unsafe { std::mem::transmute(builtin_unreachable) }; let builtin_unreachable: RValue<'gcc> = unsafe { std::mem::transmute(builtin_unreachable) };
self.call(self.type_void(), builtin_unreachable, &[], None); self.call(self.type_void(), None, builtin_unreachable, &[], None);
} }
// Write results to outputs. // Write results to outputs.

View file

@ -444,11 +444,23 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
self.block.end_with_switch(None, value, default_block, &gcc_cases); self.block.end_with_switch(None, value, default_block, &gcc_cases);
} }
fn invoke(&mut self, typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> { fn invoke(
&mut self,
typ: Type<'gcc>,
fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
func: RValue<'gcc>,
args: &[RValue<'gcc>],
then: Block<'gcc>,
catch: Block<'gcc>,
_funclet: Option<&Funclet>,
) -> RValue<'gcc> {
// TODO(bjorn3): Properly implement unwinding. // TODO(bjorn3): Properly implement unwinding.
let call_site = self.call(typ, func, args, None); let call_site = self.call(typ, None, func, args, None);
let condition = self.context.new_rvalue_from_int(self.bool_type, 1); let condition = self.context.new_rvalue_from_int(self.bool_type, 1);
self.llbb().end_with_conditional(None, condition, then, catch); self.llbb().end_with_conditional(None, condition, then, catch);
if let Some(_fn_abi) = fn_abi {
// TODO(bjorn3): Apply function attributes
}
call_site call_site
} }
@ -643,11 +655,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
self.current_func().new_local(None, aligned_type, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None) self.current_func().new_local(None, aligned_type, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None)
} }
fn dynamic_alloca(&mut self, _ty: Type<'gcc>, _align: Align) -> RValue<'gcc> { fn byte_array_alloca(&mut self, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
unimplemented!();
}
fn array_alloca(&mut self, _ty: Type<'gcc>, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
unimplemented!(); unimplemented!();
} }
@ -1227,16 +1235,27 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
// TODO(antoyo) // TODO(antoyo)
} }
fn call(&mut self, _typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], funclet: Option<&Funclet>) -> RValue<'gcc> { fn call(
&mut self,
_typ: Type<'gcc>,
fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
func: RValue<'gcc>,
args: &[RValue<'gcc>],
funclet: Option<&Funclet>,
) -> RValue<'gcc> {
// FIXME(antoyo): remove when having a proper API. // FIXME(antoyo): remove when having a proper API.
let gcc_func = unsafe { std::mem::transmute(func) }; let gcc_func = unsafe { std::mem::transmute(func) };
if self.functions.borrow().values().find(|value| **value == gcc_func).is_some() { let call = if self.functions.borrow().values().find(|value| **value == gcc_func).is_some() {
self.function_call(func, args, funclet) self.function_call(func, args, funclet)
} }
else { else {
// If it's a not function that was defined, it's a function pointer. // If it's a not function that was defined, it's a function pointer.
self.function_ptr_call(func, args, funclet) self.function_ptr_call(func, args, funclet)
};
if let Some(_fn_abi) = fn_abi {
// TODO(bjorn3): Apply function attributes
} }
call
} }
fn zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> { fn zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {

View file

@ -416,10 +416,6 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
self.codegen_unit self.codegen_unit
} }
fn used_statics(&self) -> &RefCell<Vec<RValue<'gcc>>> {
unimplemented!();
}
fn set_frame_pointer_type(&self, _llfn: RValue<'gcc>) { fn set_frame_pointer_type(&self, _llfn: RValue<'gcc>) {
// TODO(antoyo) // TODO(antoyo)
} }
@ -428,10 +424,6 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
// TODO(antoyo) // TODO(antoyo)
} }
fn create_used_variable(&self) {
unimplemented!();
}
fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> { fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {
if self.get_declared_value("main").is_none() { if self.get_declared_value("main").is_none() {
Some(self.declare_cfn("main", fn_type)) Some(self.declare_cfn("main", fn_type))
@ -443,14 +435,6 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
None None
} }
} }
fn compiler_used_statics(&self) -> &RefCell<Vec<RValue<'gcc>>> {
unimplemented!()
}
fn create_compiler_used_variable(&self) {
unimplemented!()
}
} }
impl<'gcc, 'tcx> HasTyCtxt<'tcx> for CodegenCx<'gcc, 'tcx> { impl<'gcc, 'tcx> HasTyCtxt<'tcx> for CodegenCx<'gcc, 'tcx> {

View file

@ -100,7 +100,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
_ if simple.is_some() => { _ if simple.is_some() => {
// FIXME(antoyo): remove this cast when the API supports function. // FIXME(antoyo): remove this cast when the API supports function.
let func = unsafe { std::mem::transmute(simple.expect("simple")) }; let func = unsafe { std::mem::transmute(simple.expect("simple")) };
self.call(self.type_void(), func, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None) self.call(self.type_void(), None, func, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None)
}, },
sym::likely => { sym::likely => {
self.expect(args[0].immediate(), true) self.expect(args[0].immediate(), true)
@ -341,7 +341,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
fn abort(&mut self) { fn abort(&mut self) {
let func = self.context.get_builtin_function("abort"); let func = self.context.get_builtin_function("abort");
let func: RValue<'gcc> = unsafe { std::mem::transmute(func) }; let func: RValue<'gcc> = unsafe { std::mem::transmute(func) };
self.call(self.type_void(), func, &[], None); self.call(self.type_void(), None, func, &[], None);
} }
fn assume(&mut self, value: Self::Value) { fn assume(&mut self, value: Self::Value) {
@ -1124,7 +1124,7 @@ fn try_intrinsic<'gcc, 'tcx>(bx: &mut Builder<'_, 'gcc, 'tcx>, try_func: RValue<
// NOTE: the `|| true` here is to use the panic=abort strategy with panic=unwind too // NOTE: the `|| true` here is to use the panic=abort strategy with panic=unwind too
if bx.sess().panic_strategy() == PanicStrategy::Abort || true { if bx.sess().panic_strategy() == PanicStrategy::Abort || true {
// TODO(bjorn3): Properly implement unwinding and remove the `|| true` once this is done. // TODO(bjorn3): Properly implement unwinding and remove the `|| true` once this is done.
bx.call(bx.type_void(), try_func, &[data], None); bx.call(bx.type_void(), None, try_func, &[data], None);
// Return 0 unconditionally from the intrinsic call; // Return 0 unconditionally from the intrinsic call;
// we can never unwind. // we can never unwind.
let ret_align = bx.tcx.data_layout.i32_align.abi; let ret_align = bx.tcx.data_layout.i32_align.abi;

View file

@ -461,7 +461,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>,
let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str); let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str);
let function = intrinsic::llvm::intrinsic(llvm_name, &bx.cx); let function = intrinsic::llvm::intrinsic(llvm_name, &bx.cx);
let function: RValue<'gcc> = unsafe { std::mem::transmute(function) }; let function: RValue<'gcc> = unsafe { std::mem::transmute(function) };
let c = bx.call(fn_ty, function, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None); let c = bx.call(fn_ty, None, function, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None);
Ok(c) Ok(c)
} }

View file

@ -171,15 +171,6 @@ impl ExtraBackendMethods for GccCodegenBackend {
Ok(()) Ok(())
}) })
} }
fn target_cpu<'b>(&self, _sess: &'b Session) -> &'b str {
unimplemented!();
}
fn tune_cpu<'b>(&self, _sess: &'b Session) -> Option<&'b str> {
None
// TODO(antoyo)
}
} }
pub struct ModuleBuffer; pub struct ModuleBuffer;
@ -210,7 +201,6 @@ impl WriteBackendMethods for GccCodegenBackend {
type Module = GccContext; type Module = GccContext;
type TargetMachine = (); type TargetMachine = ();
type ModuleBuffer = ModuleBuffer; type ModuleBuffer = ModuleBuffer;
type Context = ();
type ThinData = (); type ThinData = ();
type ThinBuffer = ThinBuffer; type ThinBuffer = ThinBuffer;

View file

@ -592,10 +592,6 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
} }
impl<'tcx> AbiBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { impl<'tcx> AbiBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
fn apply_attrs_callsite(&mut self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, callsite: Self::Value) {
fn_abi.apply_attrs_callsite(self, callsite)
}
fn get_param(&mut self, index: usize) -> Self::Value { fn get_param(&mut self, index: usize) -> Self::Value {
llvm::get_param(self.llfn(), index as c_uint) llvm::get_param(self.llfn(), index as c_uint)
} }

View file

@ -430,9 +430,9 @@ pub(crate) fn inline_asm_call<'ll>(
); );
let call = if let Some((dest, catch, funclet)) = dest_catch_funclet { let call = if let Some((dest, catch, funclet)) = dest_catch_funclet {
bx.invoke(fty, v, inputs, dest, catch, funclet) bx.invoke(fty, None, v, inputs, dest, catch, funclet)
} else { } else {
bx.call(fty, v, inputs, None) bx.call(fty, None, v, inputs, None)
}; };
// Store mark in a metadata node so we can map LLVM errors // Store mark in a metadata node so we can map LLVM errors

View file

@ -19,6 +19,8 @@ use crate::context::CodegenCx;
use crate::llvm; use crate::llvm;
use crate::value::Value; use crate::value::Value;
use cstr::cstr;
use rustc_codegen_ssa::base::maybe_create_entry_wrapper; use rustc_codegen_ssa::base::maybe_create_entry_wrapper;
use rustc_codegen_ssa::mono_item::MonoItemExt; use rustc_codegen_ssa::mono_item::MonoItemExt;
use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::traits::*;
@ -107,11 +109,14 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen
} }
// Create the llvm.used and llvm.compiler.used variables. // Create the llvm.used and llvm.compiler.used variables.
if !cx.used_statics().borrow().is_empty() { if !cx.used_statics.borrow().is_empty() {
cx.create_used_variable() cx.create_used_variable_impl(cstr!("llvm.used"), &*cx.used_statics.borrow());
} }
if !cx.compiler_used_statics().borrow().is_empty() { if !cx.compiler_used_statics.borrow().is_empty() {
cx.create_compiler_used_variable() cx.create_used_variable_impl(
cstr!("llvm.compiler.used"),
&*cx.compiler_used_statics.borrow(),
);
} }
// Run replace-all-uses-with for statics that need it. This must // Run replace-all-uses-with for statics that need it. This must

View file

@ -1,3 +1,4 @@
use crate::abi::FnAbiLlvmExt;
use crate::attributes; use crate::attributes;
use crate::common::Funclet; use crate::common::Funclet;
use crate::context::CodegenCx; use crate::context::CodegenCx;
@ -214,6 +215,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
fn invoke( fn invoke(
&mut self, &mut self,
llty: &'ll Type, llty: &'ll Type,
fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
llfn: &'ll Value, llfn: &'ll Value,
args: &[&'ll Value], args: &[&'ll Value],
then: &'ll BasicBlock, then: &'ll BasicBlock,
@ -226,7 +228,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let bundle = funclet.map(|funclet| funclet.bundle()); let bundle = funclet.map(|funclet| funclet.bundle());
let bundle = bundle.as_ref().map(|b| &*b.raw); let bundle = bundle.as_ref().map(|b| &*b.raw);
unsafe { let invoke = unsafe {
llvm::LLVMRustBuildInvoke( llvm::LLVMRustBuildInvoke(
self.llbuilder, self.llbuilder,
llty, llty,
@ -238,7 +240,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
bundle, bundle,
UNNAMED, UNNAMED,
) )
};
if let Some(fn_abi) = fn_abi {
fn_abi.apply_attrs_callsite(self, invoke);
} }
invoke
} }
fn unreachable(&mut self) { fn unreachable(&mut self) {
@ -405,20 +411,17 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value { fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
let mut bx = Builder::with_cx(self.cx); let mut bx = Builder::with_cx(self.cx);
bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) }); bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) });
bx.dynamic_alloca(ty, align)
}
fn dynamic_alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
unsafe { unsafe {
let alloca = llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED); let alloca = llvm::LLVMBuildAlloca(bx.llbuilder, ty, UNNAMED);
llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
alloca alloca
} }
} }
fn array_alloca(&mut self, ty: &'ll Type, len: &'ll Value, align: Align) -> &'ll Value { fn byte_array_alloca(&mut self, len: &'ll Value, align: Align) -> &'ll Value {
unsafe { unsafe {
let alloca = llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED); let alloca =
llvm::LLVMBuildArrayAlloca(self.llbuilder, self.cx().type_i8(), len, UNNAMED);
llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
alloca alloca
} }
@ -1145,6 +1148,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
fn call( fn call(
&mut self, &mut self,
llty: &'ll Type, llty: &'ll Type,
fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
llfn: &'ll Value, llfn: &'ll Value,
args: &[&'ll Value], args: &[&'ll Value],
funclet: Option<&Funclet<'ll>>, funclet: Option<&Funclet<'ll>>,
@ -1155,7 +1159,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let bundle = funclet.map(|funclet| funclet.bundle()); let bundle = funclet.map(|funclet| funclet.bundle());
let bundle = bundle.as_ref().map(|b| &*b.raw); let bundle = bundle.as_ref().map(|b| &*b.raw);
unsafe { let call = unsafe {
llvm::LLVMRustBuildCall( llvm::LLVMRustBuildCall(
self.llbuilder, self.llbuilder,
llty, llty,
@ -1164,7 +1168,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
args.len() as c_uint, args.len() as c_uint,
bundle, bundle,
) )
};
if let Some(fn_abi) = fn_abi {
fn_abi.apply_attrs_callsite(self, call);
} }
call
} }
fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
@ -1397,7 +1405,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
pub(crate) fn call_intrinsic(&mut self, intrinsic: &str, args: &[&'ll Value]) -> &'ll Value { pub(crate) fn call_intrinsic(&mut self, intrinsic: &str, args: &[&'ll Value]) -> &'ll Value {
let (ty, f) = self.cx.get_intrinsic(intrinsic); let (ty, f) = self.cx.get_intrinsic(intrinsic);
self.call(ty, f, args, None) self.call(ty, None, f, args, None)
} }
fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) { fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) {
@ -1459,7 +1467,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
format!("llvm.{}.sat.i{}.f{}", instr, int_width, float_width) format!("llvm.{}.sat.i{}.f{}", instr, int_width, float_width)
}; };
let f = self.declare_cfn(&name, llvm::UnnamedAddr::No, self.type_func(&[src_ty], dest_ty)); let f = self.declare_cfn(&name, llvm::UnnamedAddr::No, self.type_func(&[src_ty], dest_ty));
self.call(self.type_func(&[src_ty], dest_ty), f, &[val], None) self.call(self.type_func(&[src_ty], dest_ty), None, f, &[val], None)
} }
pub(crate) fn landing_pad( pub(crate) fn landing_pad(

View file

@ -458,7 +458,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
self.coverage_cx.as_ref() self.coverage_cx.as_ref()
} }
fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) { pub(crate) fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) {
let section = cstr!("llvm.metadata"); let section = cstr!("llvm.metadata");
let array = self.const_array(self.type_ptr_to(self.type_i8()), values); let array = self.const_array(self.type_ptr_to(self.type_i8()), values);
@ -556,14 +556,6 @@ impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
self.codegen_unit self.codegen_unit
} }
fn used_statics(&self) -> &RefCell<Vec<&'ll Value>> {
&self.used_statics
}
fn compiler_used_statics(&self) -> &RefCell<Vec<&'ll Value>> {
&self.compiler_used_statics
}
fn set_frame_pointer_type(&self, llfn: &'ll Value) { fn set_frame_pointer_type(&self, llfn: &'ll Value) {
if let Some(attr) = attributes::frame_pointer_type_attr(self) { if let Some(attr) = attributes::frame_pointer_type_attr(self) {
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[attr]); attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[attr]);
@ -577,17 +569,6 @@ impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &attrs); attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &attrs);
} }
fn create_used_variable(&self) {
self.create_used_variable_impl(cstr!("llvm.used"), &*self.used_statics.borrow());
}
fn create_compiler_used_variable(&self) {
self.create_used_variable_impl(
cstr!("llvm.compiler.used"),
&*self.compiler_used_statics.borrow(),
);
}
fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> { fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {
if self.get_declared_value("main").is_none() { if self.get_declared_value("main").is_none() {
Some(self.declare_cfn("main", llvm::UnnamedAddr::Global, fn_type)) Some(self.declare_cfn("main", llvm::UnnamedAddr::Global, fn_type))

View file

@ -108,6 +108,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
let (simple_ty, simple_fn) = simple.unwrap(); let (simple_ty, simple_fn) = simple.unwrap();
self.call( self.call(
simple_ty, simple_ty,
None,
simple_fn, simple_fn,
&args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
None, None,
@ -435,7 +436,7 @@ fn try_intrinsic<'ll>(
) { ) {
if bx.sess().panic_strategy() == PanicStrategy::Abort { if bx.sess().panic_strategy() == PanicStrategy::Abort {
let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
bx.call(try_func_ty, try_func, &[data], None); bx.call(try_func_ty, None, try_func, &[data], None);
// Return 0 unconditionally from the intrinsic call; // Return 0 unconditionally from the intrinsic call;
// we can never unwind. // we can never unwind.
let ret_align = bx.tcx().data_layout.i32_align.abi; let ret_align = bx.tcx().data_layout.i32_align.abi;
@ -534,7 +535,7 @@ fn codegen_msvc_try<'ll>(
let ptr_align = bx.tcx().data_layout.pointer_align.abi; let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let slot = bx.alloca(bx.type_i8p(), ptr_align); let slot = bx.alloca(bx.type_i8p(), ptr_align);
let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
bx.invoke(try_func_ty, try_func, &[data], normal, catchswitch, None); bx.invoke(try_func_ty, None, try_func, &[data], normal, catchswitch, None);
bx.switch_to_block(normal); bx.switch_to_block(normal);
bx.ret(bx.const_i32(0)); bx.ret(bx.const_i32(0));
@ -578,7 +579,7 @@ fn codegen_msvc_try<'ll>(
let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]); let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]);
let ptr = bx.load(bx.type_i8p(), slot, ptr_align); let ptr = bx.load(bx.type_i8p(), slot, ptr_align);
let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
bx.call(catch_ty, catch_func, &[data, ptr], Some(&funclet)); bx.call(catch_ty, None, catch_func, &[data, ptr], Some(&funclet));
bx.catch_ret(&funclet, caught); bx.catch_ret(&funclet, caught);
// The flag value of 64 indicates a "catch-all". // The flag value of 64 indicates a "catch-all".
@ -586,7 +587,7 @@ fn codegen_msvc_try<'ll>(
let flags = bx.const_i32(64); let flags = bx.const_i32(64);
let null = bx.const_null(bx.type_i8p()); let null = bx.const_null(bx.type_i8p());
let funclet = bx.catch_pad(cs, &[null, flags, null]); let funclet = bx.catch_pad(cs, &[null, flags, null]);
bx.call(catch_ty, catch_func, &[data, null], Some(&funclet)); bx.call(catch_ty, None, catch_func, &[data, null], Some(&funclet));
bx.catch_ret(&funclet, caught); bx.catch_ret(&funclet, caught);
bx.switch_to_block(caught); bx.switch_to_block(caught);
@ -595,7 +596,7 @@ fn codegen_msvc_try<'ll>(
// Note that no invoke is used here because by definition this function // Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching). // can't panic (that's what it's catching).
let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None); let ret = bx.call(llty, None, llfn, &[try_func, data, catch_func], None);
let i32_align = bx.tcx().data_layout.i32_align.abi; let i32_align = bx.tcx().data_layout.i32_align.abi;
bx.store(ret, dest, i32_align); bx.store(ret, dest, i32_align);
} }
@ -638,7 +639,7 @@ fn codegen_gnu_try<'ll>(
let data = llvm::get_param(bx.llfn(), 1); let data = llvm::get_param(bx.llfn(), 1);
let catch_func = llvm::get_param(bx.llfn(), 2); let catch_func = llvm::get_param(bx.llfn(), 2);
let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
bx.invoke(try_func_ty, try_func, &[data], then, catch, None); bx.invoke(try_func_ty, None, try_func, &[data], then, catch, None);
bx.switch_to_block(then); bx.switch_to_block(then);
bx.ret(bx.const_i32(0)); bx.ret(bx.const_i32(0));
@ -656,13 +657,13 @@ fn codegen_gnu_try<'ll>(
bx.add_clause(vals, tydesc); bx.add_clause(vals, tydesc);
let ptr = bx.extract_value(vals, 0); let ptr = bx.extract_value(vals, 0);
let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
bx.call(catch_ty, catch_func, &[data, ptr], None); bx.call(catch_ty, None, catch_func, &[data, ptr], None);
bx.ret(bx.const_i32(1)); bx.ret(bx.const_i32(1));
}); });
// Note that no invoke is used here because by definition this function // Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching). // can't panic (that's what it's catching).
let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None); let ret = bx.call(llty, None, llfn, &[try_func, data, catch_func], None);
let i32_align = bx.tcx().data_layout.i32_align.abi; let i32_align = bx.tcx().data_layout.i32_align.abi;
bx.store(ret, dest, i32_align); bx.store(ret, dest, i32_align);
} }
@ -702,7 +703,7 @@ fn codegen_emcc_try<'ll>(
let data = llvm::get_param(bx.llfn(), 1); let data = llvm::get_param(bx.llfn(), 1);
let catch_func = llvm::get_param(bx.llfn(), 2); let catch_func = llvm::get_param(bx.llfn(), 2);
let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
bx.invoke(try_func_ty, try_func, &[data], then, catch, None); bx.invoke(try_func_ty, None, try_func, &[data], then, catch, None);
bx.switch_to_block(then); bx.switch_to_block(then);
bx.ret(bx.const_i32(0)); bx.ret(bx.const_i32(0));
@ -741,13 +742,13 @@ fn codegen_emcc_try<'ll>(
let catch_data = bx.bitcast(catch_data, bx.type_i8p()); let catch_data = bx.bitcast(catch_data, bx.type_i8p());
let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
bx.call(catch_ty, catch_func, &[data, catch_data], None); bx.call(catch_ty, None, catch_func, &[data, catch_data], None);
bx.ret(bx.const_i32(1)); bx.ret(bx.const_i32(1));
}); });
// Note that no invoke is used here because by definition this function // Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching). // can't panic (that's what it's catching).
let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None); let ret = bx.call(llty, None, llfn, &[try_func, data, catch_func], None);
let i32_align = bx.tcx().data_layout.i32_align.abi; let i32_align = bx.tcx().data_layout.i32_align.abi;
bx.store(ret, dest, i32_align); bx.store(ret, dest, i32_align);
} }
@ -1217,8 +1218,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
}; };
let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str); let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str);
let f = bx.declare_cfn(llvm_name, llvm::UnnamedAddr::No, fn_ty); let f = bx.declare_cfn(llvm_name, llvm::UnnamedAddr::No, fn_ty);
let c = let c = bx.call(
bx.call(fn_ty, f, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None); fn_ty,
None,
f,
&args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
None,
);
Ok(c) Ok(c)
} }
@ -1417,8 +1423,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
llvm_elem_vec_ty, llvm_elem_vec_ty,
); );
let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty); let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
let v = let v = bx.call(
bx.call(fn_ty, f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None); fn_ty,
None,
f,
&[args[1].immediate(), alignment, mask, args[0].immediate()],
None,
);
return Ok(v); return Ok(v);
} }
@ -1543,8 +1554,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
let fn_ty = let fn_ty =
bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t); bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t);
let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty); let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
let v = let v = bx.call(
bx.call(fn_ty, f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None); fn_ty,
None,
f,
&[args[0].immediate(), args[1].immediate(), alignment, mask],
None,
);
return Ok(v); return Ok(v);
} }
@ -1992,7 +2008,7 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
let fn_ty = bx.type_func(&[vec_ty, vec_ty], vec_ty); let fn_ty = bx.type_func(&[vec_ty, vec_ty], vec_ty);
let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty); let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
let v = bx.call(fn_ty, f, &[lhs, rhs], None); let v = bx.call(fn_ty, None, f, &[lhs, rhs], None);
return Ok(v); return Ok(v);
} }

View file

@ -131,12 +131,6 @@ impl ExtraBackendMethods for LlvmCodegenBackend {
) -> TargetMachineFactoryFn<Self> { ) -> TargetMachineFactoryFn<Self> {
back::write::target_machine_factory(sess, optlvl, target_features) back::write::target_machine_factory(sess, optlvl, target_features)
} }
fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str {
llvm_util::target_cpu(sess)
}
fn tune_cpu<'b>(&self, sess: &'b Session) -> Option<&'b str> {
llvm_util::tune_cpu(sess)
}
fn spawn_thread<F, T>(time_trace: bool, f: F) -> std::thread::JoinHandle<T> fn spawn_thread<F, T>(time_trace: bool, f: F) -> std::thread::JoinHandle<T>
where where
@ -170,7 +164,6 @@ impl ExtraBackendMethods for LlvmCodegenBackend {
impl WriteBackendMethods for LlvmCodegenBackend { impl WriteBackendMethods for LlvmCodegenBackend {
type Module = ModuleLlvm; type Module = ModuleLlvm;
type ModuleBuffer = back::lto::ModuleBuffer; type ModuleBuffer = back::lto::ModuleBuffer;
type Context = llvm::Context;
type TargetMachine = &'static mut llvm::TargetMachine; type TargetMachine = &'static mut llvm::TargetMachine;
type ThinData = back::lto::ThinData; type ThinData = back::lto::ThinData;
type ThinBuffer = back::lto::ThinBuffer; type ThinBuffer = back::lto::ThinBuffer;

View file

@ -474,7 +474,7 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
(rust_main, start_ty, vec![arg_argc, arg_argv]) (rust_main, start_ty, vec![arg_argc, arg_argv])
}; };
let result = bx.call(start_ty, start_fn, &args, None); let result = bx.call(start_ty, None, start_fn, &args, None);
let cast = bx.intcast(result, cx.type_int(), true); let cast = bx.intcast(result, cx.type_int(), true);
bx.ret(cast); bx.ret(cast);

View file

@ -162,9 +162,15 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
} else { } else {
fx.unreachable_block() fx.unreachable_block()
}; };
let invokeret = let invokeret = bx.invoke(
bx.invoke(fn_ty, fn_ptr, &llargs, ret_llbb, unwind_block, self.funclet(fx)); fn_ty,
bx.apply_attrs_callsite(&fn_abi, invokeret); Some(&fn_abi),
fn_ptr,
&llargs,
ret_llbb,
unwind_block,
self.funclet(fx),
);
if fx.mir[self.bb].is_cleanup { if fx.mir[self.bb].is_cleanup {
bx.do_not_inline(invokeret); bx.do_not_inline(invokeret);
} }
@ -178,8 +184,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
fx.store_return(bx, ret_dest, &fn_abi.ret, invokeret); fx.store_return(bx, ret_dest, &fn_abi.ret, invokeret);
} }
} else { } else {
let llret = bx.call(fn_ty, fn_ptr, &llargs, self.funclet(fx)); let llret = bx.call(fn_ty, Some(&fn_abi), fn_ptr, &llargs, self.funclet(fx));
bx.apply_attrs_callsite(&fn_abi, llret);
if fx.mir[self.bb].is_cleanup { if fx.mir[self.bb].is_cleanup {
// Cleanup is always the cold path. Don't inline // Cleanup is always the cold path. Don't inline
// drop glue. Also, when there is a deeply-nested // drop glue. Also, when there is a deeply-nested
@ -1533,8 +1538,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let (fn_abi, fn_ptr) = common::build_langcall(&bx, None, LangItem::PanicNoUnwind); let (fn_abi, fn_ptr) = common::build_langcall(&bx, None, LangItem::PanicNoUnwind);
let fn_ty = bx.fn_decl_backend_type(&fn_abi); let fn_ty = bx.fn_decl_backend_type(&fn_abi);
let llret = bx.call(fn_ty, fn_ptr, &[], None); let llret = bx.call(fn_ty, Some(&fn_abi), fn_ptr, &[], None);
bx.apply_attrs_callsite(&fn_abi, llret);
bx.do_not_inline(llret); bx.do_not_inline(llret);
bx.unreachable(); bx.unreachable();

View file

@ -352,7 +352,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
// Allocate an appropriate region on the stack, and copy the value into it // Allocate an appropriate region on the stack, and copy the value into it
let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra)); let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra));
let lldst = bx.array_alloca(bx.cx().type_i8(), llsize, max_align); let lldst = bx.byte_array_alloca(llsize, max_align);
bx.memcpy(lldst, max_align, llptr, min_align, llsize, flags); bx.memcpy(lldst, max_align, llptr, min_align, llsize, flags);
// Store the allocated region and the extra to the indirect place. // Store the allocated region and the extra to the indirect place.

View file

@ -1,8 +1,5 @@
use super::BackendTypes; use super::BackendTypes;
use rustc_middle::ty::Ty;
use rustc_target::abi::call::FnAbi;
pub trait AbiBuilderMethods<'tcx>: BackendTypes { pub trait AbiBuilderMethods<'tcx>: BackendTypes {
fn apply_attrs_callsite(&mut self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, callsite: Self::Value);
fn get_param(&mut self, index: usize) -> Self::Value; fn get_param(&mut self, index: usize) -> Self::Value;
} }

View file

@ -134,8 +134,6 @@ pub trait ExtraBackendMethods: CodegenBackend + WriteBackendMethods + Sized + Se
opt_level: config::OptLevel, opt_level: config::OptLevel,
target_features: &[String], target_features: &[String],
) -> TargetMachineFactoryFn<Self>; ) -> TargetMachineFactoryFn<Self>;
fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str;
fn tune_cpu<'b>(&self, sess: &'b Session) -> Option<&'b str>;
fn spawn_thread<F, T>(_time_trace: bool, f: F) -> std::thread::JoinHandle<T> fn spawn_thread<F, T>(_time_trace: bool, f: F) -> std::thread::JoinHandle<T>
where where

View file

@ -17,6 +17,7 @@ use crate::MemFlags;
use rustc_middle::ty::layout::{HasParamEnv, TyAndLayout}; use rustc_middle::ty::layout::{HasParamEnv, TyAndLayout};
use rustc_middle::ty::Ty; use rustc_middle::ty::Ty;
use rustc_span::Span; use rustc_span::Span;
use rustc_target::abi::call::FnAbi;
use rustc_target::abi::{Abi, Align, Scalar, Size, WrappingRange}; use rustc_target::abi::{Abi, Align, Scalar, Size, WrappingRange};
use rustc_target::spec::HasTargetSpec; use rustc_target::spec::HasTargetSpec;
@ -71,6 +72,7 @@ pub trait BuilderMethods<'a, 'tcx>:
fn invoke( fn invoke(
&mut self, &mut self,
llty: Self::Type, llty: Self::Type,
fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
llfn: Self::Value, llfn: Self::Value,
args: &[Self::Value], args: &[Self::Value],
then: Self::BasicBlock, then: Self::BasicBlock,
@ -133,8 +135,7 @@ pub trait BuilderMethods<'a, 'tcx>:
fn to_immediate_scalar(&mut self, val: Self::Value, scalar: Scalar) -> Self::Value; fn to_immediate_scalar(&mut self, val: Self::Value, scalar: Scalar) -> Self::Value;
fn alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value; fn alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
fn dynamic_alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value; fn byte_array_alloca(&mut self, len: Self::Value, align: Align) -> Self::Value;
fn array_alloca(&mut self, ty: Self::Type, len: Self::Value, align: Align) -> Self::Value;
fn load(&mut self, ty: Self::Type, ptr: Self::Value, align: Align) -> Self::Value; fn load(&mut self, ty: Self::Type, ptr: Self::Value, align: Align) -> Self::Value;
fn volatile_load(&mut self, ty: Self::Type, ptr: Self::Value) -> Self::Value; fn volatile_load(&mut self, ty: Self::Type, ptr: Self::Value) -> Self::Value;
@ -320,6 +321,7 @@ pub trait BuilderMethods<'a, 'tcx>:
fn call( fn call(
&mut self, &mut self,
llty: Self::Type, llty: Self::Type,
fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
llfn: Self::Value, llfn: Self::Value,
args: &[Self::Value], args: &[Self::Value],
funclet: Option<&Self::Funclet>, funclet: Option<&Self::Funclet>,

View file

@ -15,12 +15,8 @@ pub trait MiscMethods<'tcx>: BackendTypes {
fn eh_personality(&self) -> Self::Value; fn eh_personality(&self) -> Self::Value;
fn sess(&self) -> &Session; fn sess(&self) -> &Session;
fn codegen_unit(&self) -> &'tcx CodegenUnit<'tcx>; fn codegen_unit(&self) -> &'tcx CodegenUnit<'tcx>;
fn used_statics(&self) -> &RefCell<Vec<Self::Value>>;
fn compiler_used_statics(&self) -> &RefCell<Vec<Self::Value>>;
fn set_frame_pointer_type(&self, llfn: Self::Function); fn set_frame_pointer_type(&self, llfn: Self::Function);
fn apply_target_cpu_attr(&self, llfn: Self::Function); fn apply_target_cpu_attr(&self, llfn: Self::Function);
fn create_used_variable(&self);
fn create_compiler_used_variable(&self);
/// Declares the extern "C" main function for the entry point. Returns None if the symbol already exists. /// Declares the extern "C" main function for the entry point. Returns None if the symbol already exists.
fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function>; fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function>;
} }

View file

@ -9,7 +9,6 @@ pub trait WriteBackendMethods: 'static + Sized + Clone {
type Module: Send + Sync; type Module: Send + Sync;
type TargetMachine; type TargetMachine;
type ModuleBuffer: ModuleBufferMethods; type ModuleBuffer: ModuleBufferMethods;
type Context: ?Sized;
type ThinData: Send + Sync; type ThinData: Send + Sync;
type ThinBuffer: ThinBufferMethods; type ThinBuffer: ThinBufferMethods;