1
Fork 0

Auto merge of #120500 - oli-obk:intrinsics2.0, r=WaffleLapkin

Implement intrinsics with fallback bodies

fixes #93145 (though we can port many more intrinsics)
cc #63585

The way this works is that the backend logic for generating custom code for intrinsics has been made fallible. The only failure path is "this intrinsic is unknown". The `Instance` (that was `InstanceDef::Intrinsic`) then gets converted to `InstanceDef::Item`, which represents the fallback body. A regular function call to that body is then codegenned. This is currently implemented for

* codegen_ssa (so llvm and gcc)
* codegen_cranelift

other backends will need to adjust, but they can just keep doing what they were doing if they prefer (though adding new intrinsics to the compiler will then require them to implement them, instead of getting the fallback body).

cc `@scottmcm` `@WaffleLapkin`

### todo

* [ ] miri support
* [x] default intrinsic name to name of function instead of requiring it to be specified in attribute
* [x] make sure that the bodies are always available (must be collected for metadata)
This commit is contained in:
bors 2024-02-16 09:53:01 +00:00
commit dfa88b328f
49 changed files with 621 additions and 452 deletions

View file

@ -787,7 +787,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// Handle intrinsics old codegen wants Expr's for, ourselves.
let intrinsic = match def {
Some(ty::InstanceDef::Intrinsic(def_id)) => Some(bx.tcx().item_name(def_id)),
Some(ty::InstanceDef::Intrinsic(def_id)) => Some(bx.tcx().intrinsic(def_id).unwrap()),
_ => None,
};
@ -817,21 +817,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// The arguments we'll be passing. Plus one to account for outptr, if used.
let arg_count = fn_abi.args.len() + fn_abi.ret.is_indirect() as usize;
let mut llargs = Vec::with_capacity(arg_count);
// Prepare the return value destination
let ret_dest = if target.is_some() {
let is_intrinsic = intrinsic.is_some();
self.make_return_dest(bx, destination, &fn_abi.ret, &mut llargs, is_intrinsic)
} else {
ReturnDest::Nothing
};
if intrinsic == Some(sym::caller_location) {
return if let Some(target) = target {
let location =
self.get_caller_location(bx, mir::SourceInfo { span: fn_span, ..source_info });
let mut llargs = Vec::with_capacity(arg_count);
let ret_dest =
self.make_return_dest(bx, destination, &fn_abi.ret, &mut llargs, true, true);
assert_eq!(llargs, []);
if let ReturnDest::IndirectOperand(tmp, _) = ret_dest {
location.val.store(bx, tmp);
}
@ -842,9 +837,18 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
};
}
match intrinsic {
None | Some(sym::drop_in_place) => {}
let instance = match intrinsic {
None | Some(sym::drop_in_place) => instance,
Some(intrinsic) => {
let mut llargs = Vec::with_capacity(1);
let ret_dest = self.make_return_dest(
bx,
destination,
&fn_abi.ret,
&mut llargs,
true,
target.is_some(),
);
let dest = match ret_dest {
_ if fn_abi.ret.is_indirect() => llargs[0],
ReturnDest::Nothing => bx.const_undef(bx.type_ptr()),
@ -878,27 +882,29 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
})
.collect();
Self::codegen_intrinsic_call(
bx,
*instance.as_ref().unwrap(),
fn_abi,
&args,
dest,
span,
);
let instance = *instance.as_ref().unwrap();
match Self::codegen_intrinsic_call(bx, instance, fn_abi, &args, dest, span) {
Ok(()) => {
if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
self.store_return(bx, ret_dest, &fn_abi.ret, dst.llval);
}
if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
self.store_return(bx, ret_dest, &fn_abi.ret, dst.llval);
return if let Some(target) = target {
helper.funclet_br(self, bx, target, mergeable_succ)
} else {
bx.unreachable();
MergingSucc::False
};
}
Err(instance) => Some(instance),
}
return if let Some(target) = target {
helper.funclet_br(self, bx, target, mergeable_succ)
} else {
bx.unreachable();
MergingSucc::False
};
}
}
};
let mut llargs = Vec::with_capacity(arg_count);
let destination = target.as_ref().map(|&target| {
(self.make_return_dest(bx, destination, &fn_abi.ret, &mut llargs, false, true), target)
});
// Split the rust-call tupled arguments off.
let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() {
@ -1040,14 +1046,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
(_, Some(llfn)) => llfn,
_ => span_bug!(span, "no instance or llfn for call"),
};
helper.do_call(
self,
bx,
fn_abi,
fn_ptr,
&llargs,
target.as_ref().map(|&target| (ret_dest, target)),
destination,
unwind,
&copied_constant_arguments,
mergeable_succ,
@ -1632,7 +1637,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
fn_ret: &ArgAbi<'tcx, Ty<'tcx>>,
llargs: &mut Vec<Bx::Value>,
is_intrinsic: bool,
has_target: bool,
) -> ReturnDest<'tcx, Bx::Value> {
if !has_target {
return ReturnDest::Nothing;
}
// If the return is ignored, we can just return a do-nothing `ReturnDest`.
if fn_ret.is_ignore() {
return ReturnDest::Nothing;

View file

@ -54,6 +54,7 @@ fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
}
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
/// In the `Err` case, returns the instance that should be called instead.
pub fn codegen_intrinsic_call(
bx: &mut Bx,
instance: ty::Instance<'tcx>,
@ -61,7 +62,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
args: &[OperandRef<'tcx, Bx::Value>],
llresult: Bx::Value,
span: Span,
) {
) -> Result<(), ty::Instance<'tcx>> {
let callee_ty = instance.ty(bx.tcx(), ty::ParamEnv::reveal_all());
let ty::FnDef(def_id, fn_args) = *callee_ty.kind() else {
@ -81,7 +82,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let llval = match name {
sym::abort => {
bx.abort();
return;
return Ok(());
}
sym::va_start => bx.va_start(args[0].immediate()),
@ -150,7 +151,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
args[0].immediate(),
args[2].immediate(),
);
return;
return Ok(());
}
sym::write_bytes => {
memset_intrinsic(
@ -161,7 +162,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
args[1].immediate(),
args[2].immediate(),
);
return;
return Ok(());
}
sym::volatile_copy_nonoverlapping_memory => {
@ -174,7 +175,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
args[1].immediate(),
args[2].immediate(),
);
return;
return Ok(());
}
sym::volatile_copy_memory => {
copy_intrinsic(
@ -186,7 +187,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
args[1].immediate(),
args[2].immediate(),
);
return;
return Ok(());
}
sym::volatile_set_memory => {
memset_intrinsic(
@ -197,17 +198,17 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
args[1].immediate(),
args[2].immediate(),
);
return;
return Ok(());
}
sym::volatile_store => {
let dst = args[0].deref(bx.cx());
args[1].val.volatile_store(bx, dst);
return;
return Ok(());
}
sym::unaligned_volatile_store => {
let dst = args[0].deref(bx.cx());
args[1].val.unaligned_volatile_store(bx, dst);
return;
return Ok(());
}
sym::exact_div => {
let ty = arg_tys[0];
@ -225,7 +226,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
name,
ty,
});
return;
return Ok(());
}
}
}
@ -245,7 +246,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
name,
ty: arg_tys[0],
});
return;
return Ok(());
}
}
}
@ -256,14 +257,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
span,
ty: arg_tys[0],
});
return;
return Ok(());
}
let Some((_width, signed)) = int_type_width_signed(ret_ty, bx.tcx()) else {
bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
span,
ty: ret_ty,
});
return;
return Ok(());
};
if signed {
bx.fptosi(args[0].immediate(), llret_ty)
@ -280,16 +281,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
sym::const_allocate => {
// returns a null pointer at runtime.
bx.const_null(bx.type_ptr())
}
sym::const_deallocate => {
// nop at runtime.
return;
}
// This requires that atomic intrinsics follow a specific naming pattern:
// "atomic_<operation>[_<ordering>]"
name if let Some(atomic) = name_str.strip_prefix("atomic_") => {
@ -350,10 +341,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx.store(val, dest.llval, dest.align);
let dest = result.project_field(bx, 1);
bx.store(success, dest.llval, dest.align);
return;
} else {
return invalid_monomorphization(ty);
invalid_monomorphization(ty);
}
return Ok(());
}
"load" => {
@ -383,7 +374,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
)
}
} else {
return invalid_monomorphization(ty);
invalid_monomorphization(ty);
return Ok(());
}
}
@ -399,10 +391,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
val = bx.ptrtoint(val, bx.type_isize());
}
bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size);
return;
} else {
return invalid_monomorphization(ty);
invalid_monomorphization(ty);
}
return Ok(());
}
"fence" => {
@ -410,7 +402,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
parse_ordering(bx, ordering),
SynchronizationScope::CrossThread,
);
return;
return Ok(());
}
"singlethreadfence" => {
@ -418,7 +410,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
parse_ordering(bx, ordering),
SynchronizationScope::SingleThread,
);
return;
return Ok(());
}
// These are all AtomicRMW ops
@ -449,7 +441,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
} else {
return invalid_monomorphization(ty);
invalid_monomorphization(ty);
return Ok(());
}
}
}
@ -458,7 +451,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
sym::nontemporal_store => {
let dst = args[0].deref(bx.cx());
args[1].val.nontemporal_store(bx, dst);
return;
return Ok(());
}
sym::ptr_guaranteed_cmp => {
@ -493,8 +486,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
_ => {
// Need to use backend-specific things in the implementation.
bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span);
return;
return bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span);
}
};
@ -507,6 +499,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
.store(bx, result);
}
}
Ok(())
}
}

View file

@ -8,6 +8,8 @@ pub trait IntrinsicCallMethods<'tcx>: BackendTypes {
/// Remember to add all intrinsics here, in `compiler/rustc_hir_analysis/src/check/mod.rs`,
/// and in `library/core/src/intrinsics.rs`; if you need access to any LLVM intrinsics,
/// add them to `compiler/rustc_codegen_llvm/src/context.rs`.
/// Returns `Err` if another instance should be called instead. This is used to invoke
/// intrinsic default bodies in case an intrinsic is not implemented by the backend.
fn codegen_intrinsic_call(
&mut self,
instance: ty::Instance<'tcx>,
@ -15,7 +17,7 @@ pub trait IntrinsicCallMethods<'tcx>: BackendTypes {
args: &[OperandRef<'tcx, Self::Value>],
llresult: Self::Value,
span: Span,
);
) -> Result<(), ty::Instance<'tcx>>;
fn abort(&mut self);
fn assume(&mut self, val: Self::Value);