Merge commit '49cd5dd454' into sync_cg_clif-2024-06-30

This commit is contained in:
bjorn3 2024-06-30 11:28:14 +00:00
commit 9ec6a02ab3
23 changed files with 441 additions and 292 deletions

View file

@ -5,8 +5,9 @@ mod pass_mode;
mod returning;
use std::borrow::Cow;
use std::mem;
use cranelift_codegen::ir::SigRef;
use cranelift_codegen::ir::{ArgumentPurpose, SigRef};
use cranelift_codegen::isa::CallConv;
use cranelift_module::ModuleError;
use rustc_codegen_ssa::errors::CompilerBuiltinsCannotCall;
@ -17,7 +18,7 @@ use rustc_middle::ty::TypeVisitableExt;
use rustc_monomorphize::is_call_from_compiler_builtins_to_upstream_monomorphization;
use rustc_session::Session;
use rustc_span::source_map::Spanned;
use rustc_target::abi::call::{Conv, FnAbi};
use rustc_target::abi::call::{Conv, FnAbi, PassMode};
use rustc_target::spec::abi::Abi;
use self::pass_mode::*;
@ -487,6 +488,7 @@ pub(crate) fn codegen_terminator_call<'tcx>(
let args = args;
assert_eq!(fn_abi.args.len(), args.len());
#[derive(Copy, Clone)]
enum CallTarget {
Direct(FuncRef),
Indirect(SigRef, Value),
@ -532,7 +534,7 @@ pub(crate) fn codegen_terminator_call<'tcx>(
};
self::returning::codegen_with_call_return_arg(fx, &fn_abi.ret, ret_place, |fx, return_ptr| {
let call_args = return_ptr
let mut call_args = return_ptr
.into_iter()
.chain(first_arg_override.into_iter())
.chain(
@ -545,40 +547,17 @@ pub(crate) fn codegen_terminator_call<'tcx>(
)
.collect::<Vec<Value>>();
let call_inst = match func_ref {
// FIXME: Find a cleaner way to support varargs.
if fn_abi.c_variadic {
adjust_call_for_c_variadic(fx, &fn_abi, source_info, func_ref, &mut call_args);
}
match func_ref {
CallTarget::Direct(func_ref) => fx.bcx.ins().call(func_ref, &call_args),
CallTarget::Indirect(sig, func_ptr) => {
fx.bcx.ins().call_indirect(sig, func_ptr, &call_args)
}
};
// FIXME find a cleaner way to support varargs
if fn_sig.c_variadic() {
if !matches!(fn_sig.abi(), Abi::C { .. }) {
fx.tcx.dcx().span_fatal(
source_info.span,
format!("Variadic call for non-C abi {:?}", fn_sig.abi()),
);
}
let sig_ref = fx.bcx.func.dfg.call_signature(call_inst).unwrap();
let abi_params = call_args
.into_iter()
.map(|arg| {
let ty = fx.bcx.func.dfg.value_type(arg);
if !ty.is_int() {
// FIXME set %al to upperbound on float args once floats are supported
fx.tcx.dcx().span_fatal(
source_info.span,
format!("Non int ty {:?} for variadic call", ty),
);
}
AbiParam::new(ty)
})
.collect::<Vec<AbiParam>>();
fx.bcx.func.dfg.signatures[sig_ref].params = abi_params;
}
call_inst
});
if let Some(dest) = target {
@ -587,6 +566,100 @@ pub(crate) fn codegen_terminator_call<'tcx>(
} else {
fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
}
fn adjust_call_for_c_variadic<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
source_info: mir::SourceInfo,
target: CallTarget,
call_args: &mut Vec<Value>,
) {
if fn_abi.conv != Conv::C {
fx.tcx.dcx().span_fatal(
source_info.span,
format!("Variadic call for non-C abi {:?}", fn_abi.conv),
);
}
let sig_ref = match target {
CallTarget::Direct(func_ref) => fx.bcx.func.dfg.ext_funcs[func_ref].signature,
CallTarget::Indirect(sig_ref, _) => sig_ref,
};
// `mem::take()` the `params` so that `fx.bcx` can be used below.
let mut abi_params = mem::take(&mut fx.bcx.func.dfg.signatures[sig_ref].params);
// Recalculate the parameters in the signature to ensure the signature contains the variadic arguments.
let has_return_arg = matches!(fn_abi.ret.mode, PassMode::Indirect { .. });
// Drop everything except the return argument (if there is one).
abi_params.truncate(if has_return_arg { 1 } else { 0 });
// Add the fixed arguments.
abi_params.extend(
fn_abi.args[..fn_abi.fixed_count as usize]
.iter()
.flat_map(|arg_abi| arg_abi.get_abi_param(fx.tcx).into_iter()),
);
let fixed_arg_count = abi_params.len();
// Add the variadic arguments.
abi_params.extend(
fn_abi.args[fn_abi.fixed_count as usize..]
.iter()
.flat_map(|arg_abi| arg_abi.get_abi_param(fx.tcx).into_iter()),
);
if fx.tcx.sess.target.is_like_osx && fx.tcx.sess.target.arch == "aarch64" {
// Add any padding arguments needed for Apple AArch64.
// There's no need to pad the argument list unless variadic arguments are actually being
// passed.
if abi_params.len() > fixed_arg_count {
// 128-bit integers take 2 registers, and everything else takes 1.
// FIXME: Add support for non-integer types
// This relies on the checks below to ensure all arguments are integer types and
// that the ABI is "C".
// The return argument isn't counted as it goes in its own dedicated register.
let integer_registers_used: usize = abi_params
[if has_return_arg { 1 } else { 0 }..fixed_arg_count]
.iter()
.map(|arg| if arg.value_type.bits() == 128 { 2 } else { 1 })
.sum();
// The ABI uses 8 registers before it starts pushing arguments to the stack. Pad out
// the registers if needed to ensure the variadic arguments are passed on the stack.
if integer_registers_used < 8 {
abi_params.splice(
fixed_arg_count..fixed_arg_count,
(integer_registers_used..8).map(|_| AbiParam::new(types::I64)),
);
call_args.splice(
fixed_arg_count..fixed_arg_count,
(integer_registers_used..8).map(|_| fx.bcx.ins().iconst(types::I64, 0)),
);
}
}
// `StructArgument` is not currently used by the `aarch64` ABI, and is therefore not
// handled when calculating how many padding arguments to use. Assert that this remains
// the case.
assert!(abi_params.iter().all(|param| matches!(
param.purpose,
// The only purposes used are `Normal` and `StructReturn`.
ArgumentPurpose::Normal | ArgumentPurpose::StructReturn
)));
}
// Check all parameters are integers.
for param in abi_params.iter() {
if !param.value_type.is_int() {
// FIXME: Set %al to upperbound on float args once floats are supported.
fx.tcx.dcx().span_fatal(
source_info.span,
format!("Non int ty {:?} for variadic call", param.value_type),
);
}
}
assert_eq!(abi_params.len(), call_args.len());
// Put the `AbiParam`s back in the signature.
fx.bcx.func.dfg.signatures[sig_ref].params = abi_params;
}
}
pub(crate) fn codegen_drop<'tcx>(

View file

@ -11,15 +11,10 @@ use rustc_session::config::OomStrategy;
use crate::prelude::*;
/// Returns whether an allocator shim was created
pub(crate) fn codegen(
tcx: TyCtxt<'_>,
module: &mut impl Module,
unwind_context: &mut UnwindContext,
) -> bool {
pub(crate) fn codegen(tcx: TyCtxt<'_>, module: &mut dyn Module) -> bool {
let Some(kind) = allocator_kind_for_codegen(tcx) else { return false };
codegen_inner(
module,
unwind_context,
kind,
tcx.alloc_error_handler_kind(()).unwrap(),
tcx.sess.opts.unstable_opts.oom,
@ -28,8 +23,7 @@ pub(crate) fn codegen(
}
fn codegen_inner(
module: &mut impl Module,
unwind_context: &mut UnwindContext,
module: &mut dyn Module,
kind: AllocatorKind,
alloc_error_handler_kind: AllocatorKind,
oom_strategy: OomStrategy,
@ -67,7 +61,6 @@ fn codegen_inner(
};
crate::common::create_wrapper_function(
module,
unwind_context,
sig,
&global_fn_name(method.name),
&default_fn_name(method.name),
@ -82,7 +75,6 @@ fn codegen_inner(
};
crate::common::create_wrapper_function(
module,
unwind_context,
sig,
"__rust_alloc_error_handler",
&alloc_error_handler_name(alloc_error_handler_kind),

View file

@ -249,9 +249,7 @@ pub(crate) fn compile_fn(
}
// Define debuginfo for function
let isa = module.isa();
let debug_context = &mut cx.debug_context;
let unwind_context = &mut cx.unwind_context;
cx.profiler.generic_activity("generate debug info").run(|| {
if let Some(debug_context) = debug_context {
codegened_func.func_debug_cx.unwrap().finalize(
@ -260,7 +258,6 @@ pub(crate) fn compile_fn(
context,
);
}
unwind_context.add_function(codegened_func.func_id, &context, isa);
});
}
@ -909,7 +906,7 @@ fn codegen_stmt<'tcx>(
| StatementKind::PlaceMention(..)
| StatementKind::AscribeUserType(..) => {}
StatementKind::Coverage { .. } => fx.tcx.dcx().fatal("-Zcoverage is unimplemented"),
StatementKind::Coverage { .. } => unreachable!(),
StatementKind::Intrinsic(ref intrinsic) => match &**intrinsic {
// We ignore `assume` intrinsics, they are only useful for optimizations
NonDivergingIntrinsic::Assume(_) => {}

View file

@ -247,7 +247,6 @@ pub(crate) fn type_sign(ty: Ty<'_>) -> bool {
pub(crate) fn create_wrapper_function(
module: &mut dyn Module,
unwind_context: &mut UnwindContext,
sig: Signature,
wrapper_name: &str,
callee_name: &str,
@ -280,7 +279,6 @@ pub(crate) fn create_wrapper_function(
bcx.finalize();
}
module.define_function(wrapper_func_id, &mut ctx).unwrap();
unwind_context.add_function(wrapper_func_id, &ctx, module.isa());
}
pub(crate) struct FunctionCx<'m, 'clif, 'tcx: 'm> {
@ -395,6 +393,7 @@ impl<'tcx> FunctionCx<'_, '_, 'tcx> {
// FIXME Don't force the size to a multiple of <abi_align> bytes once Cranelift gets
// a way to specify stack slot alignment.
size: (size + abi_align - 1) / abi_align * abi_align,
align_shift: 4,
});
Pointer::stack_slot(stack_slot)
} else {
@ -405,6 +404,7 @@ impl<'tcx> FunctionCx<'_, '_, 'tcx> {
// FIXME Don't force the size to a multiple of <abi_align> bytes once Cranelift gets
// a way to specify stack slot alignment.
size: (size + align) / abi_align * abi_align,
align_shift: 4,
});
let base_ptr = self.bcx.ins().stack_addr(self.pointer_type, stack_slot, 0);
let misalign_offset = self.bcx.ins().urem_imm(base_ptr, i64::from(align));

View file

@ -26,6 +26,7 @@ use rustc_session::Session;
use crate::concurrency_limiter::{ConcurrencyLimiter, ConcurrencyLimiterToken};
use crate::debuginfo::TypeDebugContext;
use crate::global_asm::GlobalAsmConfig;
use crate::unwind_module::UnwindModule;
use crate::{prelude::*, BackendConfig};
struct ModuleCodegenResult {
@ -318,7 +319,11 @@ fn produce_final_output_artifacts(
// These are used in linking steps and will be cleaned up afterward.
}
fn make_module(sess: &Session, backend_config: &BackendConfig, name: String) -> ObjectModule {
fn make_module(
sess: &Session,
backend_config: &BackendConfig,
name: String,
) -> UnwindModule<ObjectModule> {
let isa = crate::build_isa(sess, backend_config);
let mut builder =
@ -327,16 +332,15 @@ fn make_module(sess: &Session, backend_config: &BackendConfig, name: String) ->
// is important, while cg_clif cares more about compilation times. Enabling -Zfunction-sections
// can easily double the amount of time necessary to perform linking.
builder.per_function_section(sess.opts.unstable_opts.function_sections.unwrap_or(false));
ObjectModule::new(builder)
UnwindModule::new(ObjectModule::new(builder), true)
}
fn emit_cgu(
output_filenames: &OutputFilenames,
prof: &SelfProfilerRef,
name: String,
module: ObjectModule,
module: UnwindModule<ObjectModule>,
debug: Option<DebugContext>,
unwind_context: UnwindContext,
global_asm_object_file: Option<PathBuf>,
producer: &str,
) -> Result<ModuleCodegenResult, String> {
@ -346,8 +350,6 @@ fn emit_cgu(
debug.emit(&mut product);
}
unwind_context.emit(&mut product);
let module_regular = emit_module(
output_filenames,
prof,
@ -494,7 +496,6 @@ fn module_codegen(
let mut cx = crate::CodegenCx::new(
tcx,
backend_config.clone(),
module.isa(),
tcx.sess.opts.debuginfo != DebugInfo::None,
cgu_name,
@ -531,13 +532,7 @@ fn module_codegen(
}
}
}
crate::main_shim::maybe_create_entry_wrapper(
tcx,
&mut module,
&mut cx.unwind_context,
false,
cgu.is_primary(),
);
crate::main_shim::maybe_create_entry_wrapper(tcx, &mut module, false, cgu.is_primary());
let cgu_name = cgu.name().as_str().to_owned();
@ -571,7 +566,6 @@ fn module_codegen(
cgu_name,
module,
cx.debug_context,
cx.unwind_context,
global_asm_object_file,
&producer,
)
@ -665,13 +659,10 @@ pub(crate) fn run_aot(
});
let mut allocator_module = make_module(tcx.sess, &backend_config, "allocator_shim".to_string());
let mut allocator_unwind_context = UnwindContext::new(allocator_module.isa(), true);
let created_alloc_shim =
crate::allocator::codegen(tcx, &mut allocator_module, &mut allocator_unwind_context);
let created_alloc_shim = crate::allocator::codegen(tcx, &mut allocator_module);
let allocator_module = if created_alloc_shim {
let mut product = allocator_module.finish();
allocator_unwind_context.emit(&mut product);
let product = allocator_module.finish();
match emit_module(
tcx.output_filenames(()),

View file

@ -14,12 +14,12 @@ use rustc_session::Session;
use rustc_span::Symbol;
use crate::debuginfo::TypeDebugContext;
use crate::unwind_module::UnwindModule;
use crate::{prelude::*, BackendConfig};
use crate::{CodegenCx, CodegenMode};
struct JitState {
backend_config: BackendConfig,
jit_module: JITModule,
jit_module: UnwindModule<JITModule>,
}
thread_local! {
@ -63,7 +63,7 @@ fn create_jit_module(
tcx: TyCtxt<'_>,
backend_config: &BackendConfig,
hotswap: bool,
) -> (JITModule, CodegenCx) {
) -> (UnwindModule<JITModule>, CodegenCx) {
let crate_info = CrateInfo::new(tcx, "dummy_target_cpu".to_string());
let isa = crate::build_isa(tcx.sess, backend_config);
@ -72,17 +72,11 @@ fn create_jit_module(
crate::compiler_builtins::register_functions_for_jit(&mut jit_builder);
jit_builder.symbol_lookup_fn(dep_symbol_lookup_fn(tcx.sess, crate_info));
jit_builder.symbol("__clif_jit_fn", clif_jit_fn as *const u8);
let mut jit_module = JITModule::new(jit_builder);
let mut jit_module = UnwindModule::new(JITModule::new(jit_builder), false);
let mut cx = crate::CodegenCx::new(
tcx,
backend_config.clone(),
jit_module.isa(),
false,
Symbol::intern("dummy_cgu_name"),
);
let cx = crate::CodegenCx::new(tcx, jit_module.isa(), false, Symbol::intern("dummy_cgu_name"));
crate::allocator::codegen(tcx, &mut jit_module, &mut cx.unwind_context);
crate::allocator::codegen(tcx, &mut jit_module);
(jit_module, cx)
}
@ -128,7 +122,7 @@ pub(crate) fn run_jit(tcx: TyCtxt<'_>, backend_config: BackendConfig) -> ! {
);
}
CodegenMode::JitLazy => {
codegen_shim(tcx, &mut cx, &mut cached_context, &mut jit_module, inst)
codegen_shim(tcx, &mut cached_context, &mut jit_module, inst)
}
},
MonoItem::Static(def_id) => {
@ -146,18 +140,11 @@ pub(crate) fn run_jit(tcx: TyCtxt<'_>, backend_config: BackendConfig) -> ! {
tcx.dcx().fatal("Inline asm is not supported in JIT mode");
}
crate::main_shim::maybe_create_entry_wrapper(
tcx,
&mut jit_module,
&mut cx.unwind_context,
true,
true,
);
crate::main_shim::maybe_create_entry_wrapper(tcx, &mut jit_module, true, true);
tcx.dcx().abort_if_errors();
jit_module.finalize_definitions().unwrap();
unsafe { cx.unwind_context.register_jit(&jit_module) };
jit_module.finalize_definitions();
println!(
"Rustc codegen cranelift will JIT run the executable, because -Cllvm-args=mode=jit was passed"
@ -177,12 +164,12 @@ pub(crate) fn run_jit(tcx: TyCtxt<'_>, backend_config: BackendConfig) -> ! {
call_conv: jit_module.target_config().default_call_conv,
};
let start_func_id = jit_module.declare_function("main", Linkage::Import, &start_sig).unwrap();
let finalized_start: *const u8 = jit_module.get_finalized_function(start_func_id);
let finalized_start: *const u8 = jit_module.module.get_finalized_function(start_func_id);
LAZY_JIT_STATE.with(|lazy_jit_state| {
let mut lazy_jit_state = lazy_jit_state.borrow_mut();
assert!(lazy_jit_state.is_none());
*lazy_jit_state = Some(JitState { backend_config, jit_module });
*lazy_jit_state = Some(JitState { jit_module });
});
let f: extern "C" fn(c_int, *const *const c_char) -> c_int =
@ -268,7 +255,6 @@ fn jit_fn(instance_ptr: *const Instance<'static>, trampoline_ptr: *const u8) ->
let mut lazy_jit_state = lazy_jit_state.borrow_mut();
let lazy_jit_state = lazy_jit_state.as_mut().unwrap();
let jit_module = &mut lazy_jit_state.jit_module;
let backend_config = lazy_jit_state.backend_config.clone();
let name = tcx.symbol_name(instance).name;
let sig = crate::abi::get_function_sig(
@ -278,7 +264,7 @@ fn jit_fn(instance_ptr: *const Instance<'static>, trampoline_ptr: *const u8) ->
);
let func_id = jit_module.declare_function(name, Linkage::Export, &sig).unwrap();
let current_ptr = jit_module.read_got_entry(func_id);
let current_ptr = jit_module.module.read_got_entry(func_id);
// If the function's GOT entry has already been updated to point at something other
// than the shim trampoline, don't re-jit but just return the new pointer instead.
@ -288,11 +274,10 @@ fn jit_fn(instance_ptr: *const Instance<'static>, trampoline_ptr: *const u8) ->
return current_ptr;
}
jit_module.prepare_for_function_redefine(func_id).unwrap();
jit_module.module.prepare_for_function_redefine(func_id).unwrap();
let mut cx = crate::CodegenCx::new(
tcx,
backend_config,
jit_module.isa(),
false,
Symbol::intern("dummy_cgu_name"),
@ -300,9 +285,8 @@ fn jit_fn(instance_ptr: *const Instance<'static>, trampoline_ptr: *const u8) ->
codegen_and_compile_fn(tcx, &mut cx, &mut Context::new(), jit_module, instance);
assert!(cx.global_asm.is_empty());
jit_module.finalize_definitions().unwrap();
unsafe { cx.unwind_context.register_jit(&jit_module) };
jit_module.get_finalized_function(func_id)
jit_module.finalize_definitions();
jit_module.module.get_finalized_function(func_id)
})
})
}
@ -310,7 +294,7 @@ fn jit_fn(instance_ptr: *const Instance<'static>, trampoline_ptr: *const u8) ->
fn dep_symbol_lookup_fn(
sess: &Session,
crate_info: CrateInfo,
) -> Box<dyn Fn(&str) -> Option<*const u8>> {
) -> Box<dyn Fn(&str) -> Option<*const u8> + Send> {
use rustc_middle::middle::dependency_format::Linkage;
let mut dylib_paths = Vec::new();
@ -362,9 +346,8 @@ fn dep_symbol_lookup_fn(
fn codegen_shim<'tcx>(
tcx: TyCtxt<'tcx>,
cx: &mut CodegenCx,
cached_context: &mut Context,
module: &mut JITModule,
module: &mut UnwindModule<JITModule>,
inst: Instance<'tcx>,
) {
let pointer_type = module.target_config().pointer_type();
@ -413,5 +396,4 @@ fn codegen_shim<'tcx>(
trampoline_builder.ins().return_(&ret_vals);
module.define_function(func_id, context).unwrap();
cx.unwind_context.add_function(func_id, context, module.isa());
}

View file

@ -113,13 +113,7 @@ pub(crate) fn codegen_inline_asm_terminator<'tcx>(
);
let sig =
get_function_sig(fx.tcx, fx.target_config.default_call_conv, instance);
create_wrapper_function(
fx.module,
&mut fx.cx.unwind_context,
sig,
&wrapper_name,
symbol.name,
);
create_wrapper_function(fx.module, sig, &wrapper_name, symbol.name);
CInlineAsmOperand::Symbol { symbol: wrapper_name }
} else {
@ -283,13 +277,7 @@ pub(crate) fn codegen_naked_asm<'tcx>(
);
let sig =
get_function_sig(tcx, module.target_config().default_call_conv, instance);
create_wrapper_function(
module,
&mut cx.unwind_context,
sig,
&wrapper_name,
symbol.name,
);
create_wrapper_function(module, sig, &wrapper_name, symbol.name);
CInlineAsmOperand::Symbol { symbol: wrapper_name }
} else {

View file

@ -459,11 +459,20 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
intrinsic_args!(fx, args => (a); intrinsic);
let a = a.load_scalar(fx);
let value = fx.bcx.ins().x86_cvtt2dq(types::I32X4, a);
let cvalue = CValue::by_val(value, ret.layout());
ret.write_cvalue(fx, cvalue);
}
"llvm.x86.sse2.cvtps2dq" => {
// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtps_epi32
intrinsic_args!(fx, args => (a); intrinsic);
let a = a.load_scalar(fx);
// Using inline asm instead of fcvt_to_sint_sat as unrepresentable values are turned
// into 0x80000000 for which Cranelift doesn't have a native instruction.
codegen_inline_asm_inner(
fx,
&[InlineAsmTemplatePiece::String(format!("cvttps2dq xmm0, xmm0"))],
&[InlineAsmTemplatePiece::String(format!("cvtps2dq xmm0, xmm0"))],
&[CInlineAsmOperand::InOut {
reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::xmm0)),
_late: true,
@ -1416,6 +1425,36 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
ret.write_cvalue(fx, res);
}
"llvm.x86.rdtsc" => {
// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_rdtsc&ig_expand=5273
let res_place = CPlace::new_stack_slot(
fx,
fx.layout_of(Ty::new_tup(fx.tcx, &[fx.tcx.types.u32, fx.tcx.types.u32])),
);
let eax_place = res_place.place_field(fx, FieldIdx::new(0));
let edx_place = res_place.place_field(fx, FieldIdx::new(1));
codegen_inline_asm_inner(
fx,
&[InlineAsmTemplatePiece::String("rdtsc".to_string())],
&[
CInlineAsmOperand::Out {
reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax)),
late: true,
place: Some(eax_place),
},
CInlineAsmOperand::Out {
reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx)),
late: true,
place: Some(edx_place),
},
],
InlineAsmOptions::NOSTACK | InlineAsmOptions::NOMEM,
);
let res = res_place.to_cvalue(fx);
ret.write_cvalue_transmute(fx, res);
}
_ => {
fx.tcx
.dcx()

View file

@ -79,6 +79,7 @@ mod pretty_clif;
mod toolchain;
mod trap;
mod unsize;
mod unwind_module;
mod value_and_place;
mod vtable;
@ -130,22 +131,13 @@ struct CodegenCx {
global_asm: String,
inline_asm_index: Cell<usize>,
debug_context: Option<DebugContext>,
unwind_context: UnwindContext,
cgu_name: Symbol,
}
impl CodegenCx {
fn new(
tcx: TyCtxt<'_>,
backend_config: BackendConfig,
isa: &dyn TargetIsa,
debug_info: bool,
cgu_name: Symbol,
) -> Self {
fn new(tcx: TyCtxt<'_>, isa: &dyn TargetIsa, debug_info: bool, cgu_name: Symbol) -> Self {
assert_eq!(pointer_ty(tcx), isa.pointer_type());
let unwind_context =
UnwindContext::new(isa, matches!(backend_config.codegen_mode, CodegenMode::Aot));
let debug_context = if debug_info && !tcx.sess.target.options.is_like_windows {
Some(DebugContext::new(tcx, isa, cgu_name.as_str()))
} else {
@ -158,7 +150,6 @@ impl CodegenCx {
global_asm: String::new(),
inline_asm_index: Cell::new(0),
debug_context,
unwind_context,
cgu_name,
}
}
@ -175,7 +166,7 @@ impl CodegenBackend for CraneliftCodegenBackend {
}
fn init(&self, sess: &Session) {
use rustc_session::config::Lto;
use rustc_session::config::{InstrumentCoverage, Lto};
match sess.lto() {
Lto::No | Lto::ThinLocal => {}
Lto::Thin | Lto::Fat => {
@ -183,6 +174,11 @@ impl CodegenBackend for CraneliftCodegenBackend {
}
}
if sess.opts.cg.instrument_coverage() != InstrumentCoverage::No {
sess.dcx()
.fatal("`-Cinstrument-coverage` is LLVM specific and not supported by Cranelift");
}
let mut config = self.config.borrow_mut();
if config.is_none() {
let new_config = BackendConfig::from_opts(&sess.opts.cg.llvm_args)

View file

@ -11,8 +11,7 @@ use crate::prelude::*;
/// users main function.
pub(crate) fn maybe_create_entry_wrapper(
tcx: TyCtxt<'_>,
module: &mut impl Module,
unwind_context: &mut UnwindContext,
module: &mut dyn Module,
is_jit: bool,
is_primary_cgu: bool,
) {
@ -36,12 +35,11 @@ pub(crate) fn maybe_create_entry_wrapper(
return;
}
create_entry_fn(tcx, module, unwind_context, main_def_id, is_jit, is_main_fn, sigpipe);
create_entry_fn(tcx, module, main_def_id, is_jit, is_main_fn, sigpipe);
fn create_entry_fn(
tcx: TyCtxt<'_>,
m: &mut impl Module,
unwind_context: &mut UnwindContext,
m: &mut dyn Module,
rust_main_def_id: DefId,
ignore_lang_start_wrapper: bool,
is_main_fn: bool,
@ -170,7 +168,5 @@ pub(crate) fn maybe_create_entry_wrapper(
if let Err(err) = m.define_function(cmain_func_id, &mut ctx) {
tcx.dcx().fatal(format!("entry symbol `{entry_name}` defined multiple times: {err}"));
}
unwind_context.add_function(cmain_func_id, &ctx, m.isa());
}
}

View file

@ -0,0 +1,115 @@
use cranelift_codegen::control::ControlPlane;
use cranelift_codegen::ir::{Function, Signature};
use cranelift_codegen::isa::{TargetFrontendConfig, TargetIsa};
use cranelift_codegen::{Context, FinalizedMachReloc};
use cranelift_module::{
DataDescription, DataId, FuncId, FuncOrDataId, Linkage, Module, ModuleDeclarations,
ModuleResult,
};
use cranelift_object::{ObjectModule, ObjectProduct};
use crate::UnwindContext;
/// A wrapper around a [Module] which adds any defined function to the [UnwindContext].
pub(crate) struct UnwindModule<T> {
pub(crate) module: T,
unwind_context: UnwindContext,
}
impl<T: Module> UnwindModule<T> {
pub(crate) fn new(module: T, pic_eh_frame: bool) -> Self {
let unwind_context = UnwindContext::new(module.isa(), pic_eh_frame);
UnwindModule { module, unwind_context }
}
}
impl UnwindModule<ObjectModule> {
pub(crate) fn finish(self) -> ObjectProduct {
let mut product = self.module.finish();
self.unwind_context.emit(&mut product);
product
}
}
#[cfg(feature = "jit")]
impl UnwindModule<cranelift_jit::JITModule> {
pub(crate) fn finalize_definitions(&mut self) {
self.module.finalize_definitions().unwrap();
let prev_unwind_context = std::mem::replace(
&mut self.unwind_context,
UnwindContext::new(self.module.isa(), false),
);
unsafe { prev_unwind_context.register_jit(&self.module) };
}
}
impl<T: Module> Module for UnwindModule<T> {
fn isa(&self) -> &dyn TargetIsa {
self.module.isa()
}
fn declarations(&self) -> &ModuleDeclarations {
self.module.declarations()
}
fn get_name(&self, name: &str) -> Option<FuncOrDataId> {
self.module.get_name(name)
}
fn target_config(&self) -> TargetFrontendConfig {
self.module.target_config()
}
fn declare_function(
&mut self,
name: &str,
linkage: Linkage,
signature: &Signature,
) -> ModuleResult<FuncId> {
self.module.declare_function(name, linkage, signature)
}
fn declare_anonymous_function(&mut self, signature: &Signature) -> ModuleResult<FuncId> {
self.module.declare_anonymous_function(signature)
}
fn declare_data(
&mut self,
name: &str,
linkage: Linkage,
writable: bool,
tls: bool,
) -> ModuleResult<DataId> {
self.module.declare_data(name, linkage, writable, tls)
}
fn declare_anonymous_data(&mut self, writable: bool, tls: bool) -> ModuleResult<DataId> {
self.module.declare_anonymous_data(writable, tls)
}
fn define_function_with_control_plane(
&mut self,
func: FuncId,
ctx: &mut Context,
ctrl_plane: &mut ControlPlane,
) -> ModuleResult<()> {
self.module.define_function_with_control_plane(func, ctx, ctrl_plane)?;
self.unwind_context.add_function(func, ctx, self.module.isa());
Ok(())
}
fn define_function_bytes(
&mut self,
_func_id: FuncId,
_func: &Function,
_alignment: u64,
_bytes: &[u8],
_relocs: &[FinalizedMachReloc],
) -> ModuleResult<()> {
unimplemented!()
}
fn define_data(&mut self, data_id: DataId, data: &DataDescription) -> ModuleResult<()> {
self.module.define_data(data_id, data)
}
}