1
Fork 0

Teach llvm backend how to fall back to default bodies

This commit is contained in:
Oli Scherer 2024-01-31 20:39:59 +00:00
parent 432635a9ea
commit 9a0743747f
11 changed files with 109 additions and 103 deletions

View file

@ -837,8 +837,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
};
}
match intrinsic {
None | Some(sym::drop_in_place) => {}
let instance = match intrinsic {
None | Some(sym::drop_in_place) => instance,
Some(intrinsic) => {
let mut llargs = Vec::with_capacity(1);
let ret_dest = self.make_return_dest(
@ -882,27 +882,24 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
})
.collect();
Self::codegen_intrinsic_call(
bx,
*instance.as_ref().unwrap(),
fn_abi,
&args,
dest,
span,
);
let instance = *instance.as_ref().unwrap();
match Self::codegen_intrinsic_call(bx, instance, fn_abi, &args, dest, span) {
Ok(()) => {
if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
self.store_return(bx, ret_dest, &fn_abi.ret, dst.llval);
}
if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
self.store_return(bx, ret_dest, &fn_abi.ret, dst.llval);
return if let Some(target) = target {
helper.funclet_br(self, bx, target, mergeable_succ)
} else {
bx.unreachable();
MergingSucc::False
};
}
Err(instance) => Some(instance),
}
return if let Some(target) = target {
helper.funclet_br(self, bx, target, mergeable_succ)
} else {
bx.unreachable();
MergingSucc::False
};
}
}
};
let mut llargs = Vec::with_capacity(arg_count);
let destination = target.as_ref().map(|&target| {

View file

@ -54,6 +54,7 @@ fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
}
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
/// In the `Err` case, returns the instance that should be called instead.
pub fn codegen_intrinsic_call(
bx: &mut Bx,
instance: ty::Instance<'tcx>,
@ -61,7 +62,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
args: &[OperandRef<'tcx, Bx::Value>],
llresult: Bx::Value,
span: Span,
) {
) -> Result<(), ty::Instance<'tcx>> {
let callee_ty = instance.ty(bx.tcx(), ty::ParamEnv::reveal_all());
let ty::FnDef(def_id, fn_args) = *callee_ty.kind() else {
@ -81,7 +82,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let llval = match name {
sym::abort => {
bx.abort();
return;
return Ok(());
}
sym::va_start => bx.va_start(args[0].immediate()),
@ -150,7 +151,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
args[0].immediate(),
args[2].immediate(),
);
return;
return Ok(());
}
sym::write_bytes => {
memset_intrinsic(
@ -161,7 +162,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
args[1].immediate(),
args[2].immediate(),
);
return;
return Ok(());
}
sym::volatile_copy_nonoverlapping_memory => {
@ -174,7 +175,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
args[1].immediate(),
args[2].immediate(),
);
return;
return Ok(());
}
sym::volatile_copy_memory => {
copy_intrinsic(
@ -186,7 +187,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
args[1].immediate(),
args[2].immediate(),
);
return;
return Ok(());
}
sym::volatile_set_memory => {
memset_intrinsic(
@ -197,17 +198,17 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
args[1].immediate(),
args[2].immediate(),
);
return;
return Ok(());
}
sym::volatile_store => {
let dst = args[0].deref(bx.cx());
args[1].val.volatile_store(bx, dst);
return;
return Ok(());
}
sym::unaligned_volatile_store => {
let dst = args[0].deref(bx.cx());
args[1].val.unaligned_volatile_store(bx, dst);
return;
return Ok(());
}
sym::exact_div => {
let ty = arg_tys[0];
@ -225,7 +226,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
name,
ty,
});
return;
return Ok(());
}
}
}
@ -245,7 +246,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
name,
ty: arg_tys[0],
});
return;
return Ok(());
}
}
}
@ -256,14 +257,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
span,
ty: arg_tys[0],
});
return;
return Ok(());
}
let Some((_width, signed)) = int_type_width_signed(ret_ty, bx.tcx()) else {
bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
span,
ty: ret_ty,
});
return;
return Ok(());
};
if signed {
bx.fptosi(args[0].immediate(), llret_ty)
@ -280,14 +281,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
sym::const_allocate => {
// returns a null pointer at runtime.
bx.const_null(bx.type_ptr())
}
sym::const_deallocate => {
// nop at runtime.
return;
return Ok(());
}
// This requires that atomic intrinsics follow a specific naming pattern:
@ -350,10 +346,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx.store(val, dest.llval, dest.align);
let dest = result.project_field(bx, 1);
bx.store(success, dest.llval, dest.align);
return;
} else {
return invalid_monomorphization(ty);
invalid_monomorphization(ty);
}
return Ok(());
}
"load" => {
@ -383,7 +379,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
)
}
} else {
return invalid_monomorphization(ty);
invalid_monomorphization(ty);
return Ok(());
}
}
@ -399,10 +396,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
val = bx.ptrtoint(val, bx.type_isize());
}
bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size);
return;
} else {
return invalid_monomorphization(ty);
invalid_monomorphization(ty);
}
return Ok(());
}
"fence" => {
@ -410,7 +407,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
parse_ordering(bx, ordering),
SynchronizationScope::CrossThread,
);
return;
return Ok(());
}
"singlethreadfence" => {
@ -418,7 +415,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
parse_ordering(bx, ordering),
SynchronizationScope::SingleThread,
);
return;
return Ok(());
}
// These are all AtomicRMW ops
@ -449,7 +446,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
} else {
return invalid_monomorphization(ty);
invalid_monomorphization(ty);
return Ok(());
}
}
}
@ -458,7 +456,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
sym::nontemporal_store => {
let dst = args[0].deref(bx.cx());
args[1].val.nontemporal_store(bx, dst);
return;
return Ok(());
}
sym::ptr_guaranteed_cmp => {
@ -493,8 +491,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
_ => {
// Need to use backend-specific things in the implementation.
bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span);
return;
return bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span);
}
};
@ -507,6 +504,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
.store(bx, result);
}
}
Ok(())
}
}

View file

@ -8,6 +8,8 @@ pub trait IntrinsicCallMethods<'tcx>: BackendTypes {
/// Remember to add all intrinsics here, in `compiler/rustc_hir_analysis/src/check/mod.rs`,
/// and in `library/core/src/intrinsics.rs`; if you need access to any LLVM intrinsics,
/// add them to `compiler/rustc_codegen_llvm/src/context.rs`.
/// Returns `Err` if another instance should be called instead. This is used to invoke
/// intrinsic default bodies in case an intrinsic is not implemented by the backend.
fn codegen_intrinsic_call(
&mut self,
instance: ty::Instance<'tcx>,
@ -15,7 +17,7 @@ pub trait IntrinsicCallMethods<'tcx>: BackendTypes {
args: &[OperandRef<'tcx, Self::Value>],
llresult: Self::Value,
span: Span,
);
) -> Result<(), ty::Instance<'tcx>>;
fn abort(&mut self);
fn assume(&mut self, val: Self::Value);