1
Fork 0

Merge commit 'e228f0c16e' into libgccjit-codegen

This commit is contained in:
Antoni Boucher 2021-08-15 08:29:07 -04:00
commit 3d5d4e324d
27 changed files with 265 additions and 3053 deletions

View file

@ -11,16 +11,12 @@ pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function
cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
return func;
},
// TODO: this doc specifies the equivalent GCC builtins: http://huonw.github.io/llvmint/llvmint/x86/index.html
// NOTE: this doc specifies the equivalent GCC builtins: http://huonw.github.io/llvmint/llvmint/x86/index.html
"llvm.x86.sse2.cmp.pd" => "__builtin_ia32_cmppd",
"llvm.x86.sse2.movmsk.pd" => "__builtin_ia32_movmskpd",
"llvm.x86.sse2.pmovmskb.128" => "__builtin_ia32_pmovmskb128",
_ => unimplemented!("unsupported LLVM intrinsic {}", name)
};
println!("Get target builtin");
unimplemented!();
/*let func = cx.context.get_target_builtin_function(gcc_name);
cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
func*/
}

View file

@ -96,7 +96,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
let llval =
match name {
_ if simple.is_some() => {
// FIXME: remove this cast when the API supports function.
// FIXME(antoyo): remove this cast when the API supports function.
let func = unsafe { std::mem::transmute(simple.expect("simple")) };
self.call(self.type_void(), func, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None)
},
@ -118,40 +118,12 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
}
sym::breakpoint => {
unimplemented!();
/*let llfn = self.get_intrinsic(&("llvm.debugtrap"));
self.call(llfn, &[], None)*/
}
sym::va_copy => {
unimplemented!();
/*let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy"));
self.call(intrinsic, &[args[0].immediate(), args[1].immediate()], None)*/
}
sym::va_arg => {
unimplemented!();
/*match fn_abi.ret.layout.abi {
abi::Abi::Scalar(ref scalar) => {
match scalar.value {
Primitive::Int(..) => {
if self.cx().size_of(ret_ty).bytes() < 4 {
// `va_arg` should not be called on a integer type
// less than 4 bytes in length. If it is, promote
// the integer to a `i32` and truncate the result
// back to the smaller type.
let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
self.trunc(promoted_result, llret_ty)
} else {
emit_va_arg(self, args[0], ret_ty)
}
}
Primitive::F64 | Primitive::Pointer => {
emit_va_arg(self, args[0], ret_ty)
}
// `va_arg` should never be used with the return type f32.
Primitive::F32 => bug!("the va_arg intrinsic does not work with `f32`"),
}
}
_ => bug!("the va_arg intrinsic does not work with non-scalar types"),
}*/
}
sym::volatile_load | sym::unaligned_volatile_load => {
@ -161,15 +133,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
ptr = self.pointercast(ptr, self.type_ptr_to(ty.gcc_type(self)));
}
let load = self.volatile_load(ptr.get_type(), ptr);
// TODO
/*let align = if name == sym::unaligned_volatile_load {
1
} else {
self.align_of(tp_ty).bytes() as u32
};
unsafe {
llvm::LLVMSetAlignment(load, align);
}*/
// TODO(antoyo): set alignment.
self.to_immediate(load, self.layout_of(tp_ty))
}
sym::volatile_store => {
@ -187,24 +151,6 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
| sym::prefetch_read_instruction
| sym::prefetch_write_instruction => {
unimplemented!();
/*let expect = self.get_intrinsic(&("llvm.prefetch"));
let (rw, cache_type) = match name {
sym::prefetch_read_data => (0, 1),
sym::prefetch_write_data => (1, 1),
sym::prefetch_read_instruction => (0, 0),
sym::prefetch_write_instruction => (1, 0),
_ => bug!(),
};
self.call(
expect,
&[
args[0].immediate(),
self.const_i32(rw),
args[1].immediate(),
self.const_i32(cache_type),
],
None,
)*/
}
sym::ctlz
| sym::ctlz_nonzero
@ -257,10 +203,6 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
self.block = Some(after_block);
result.to_rvalue()
/*let y = self.const_bool(false);
let llfn = self.get_intrinsic(&format!("llvm.{}.i{}", name, width));
self.call(llfn, &[args[0].immediate(), y], None)*/
}
sym::ctlz_nonzero => {
self.count_leading_zeroes(width, args[0].immediate())
@ -274,11 +216,11 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
args[0].immediate() // byte swap a u8/i8 is just a no-op
}
else {
// TODO: check if it's faster to use string literals and a
// TODO(antoyo): check if it's faster to use string literals and a
// match instead of format!.
let bswap = self.cx.context.get_builtin_function(&format!("__builtin_bswap{}", width));
let mut arg = args[0].immediate();
// FIXME: this cast should not be necessary. Remove
// FIXME(antoyo): this cast should not be necessary. Remove
// when having proper sized integer types.
let param_type = bswap.get_param(0).to_rvalue().get_type();
if param_type != arg.get_type() {
@ -289,7 +231,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
},
sym::bitreverse => self.bit_reverse(width, args[0].immediate()),
sym::rotate_left | sym::rotate_right => {
// TODO: implement using algorithm from:
// TODO(antoyo): implement using algorithm from:
// https://blog.regehr.org/archives/1063
// for other platforms.
let is_left = name == sym::rotate_left;
@ -346,7 +288,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
self.const_bool(true)
}
/*else if use_integer_compare {
let integer_ty = self.type_ix(layout.size.bits()); // FIXME: LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits.
let integer_ty = self.type_ix(layout.size.bits()); // FIXME(antoyo): LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits.
let ptr_ty = self.type_ptr_to(integer_ty);
let a_ptr = self.bitcast(a, ptr_ty);
let a_val = self.load(integer_ty, a_ptr, layout.align.abi);
@ -396,38 +338,27 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
}
fn assume(&mut self, value: Self::Value) {
// TODO: switch to asumme when it exists.
// TODO(antoyo): switch to asumme when it exists.
// Or use something like this:
// #define __assume(cond) do { if (!(cond)) __builtin_unreachable(); } while (0)
self.expect(value, true);
}
fn expect(&mut self, cond: Self::Value, _expected: bool) -> Self::Value {
// TODO
/*let expect = self.context.get_builtin_function("__builtin_expect");
let expect: RValue<'gcc> = unsafe { std::mem::transmute(expect) };
self.call(expect, &[cond, self.const_bool(expected)], None)*/
// TODO(antoyo)
cond
}
fn sideeffect(&mut self) {
// TODO
/*if self.tcx().sess.opts.debugging_opts.insert_sideeffect {
let fnname = self.get_intrinsic(&("llvm.sideeffect"));
self.call(fnname, &[], None);
}*/
// TODO(antoyo)
}
fn va_start(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
unimplemented!();
/*let intrinsic = self.cx().get_intrinsic("llvm.va_start");
self.call(intrinsic, &[va_list], None)*/
}
fn va_end(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
unimplemented!();
/*let intrinsic = self.cx().get_intrinsic("llvm.va_end");
self.call(intrinsic, &[va_list], None)*/
}
}
@ -634,7 +565,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
step4
},
32 => {
// TODO: Refactor with other implementations.
// TODO(antoyo): Refactor with other implementations.
// First step.
let left = self.and(value, context.new_rvalue_from_long(typ, 0x55555555));
let left = self.shl(left, context.new_rvalue_from_long(typ, 1));
@ -681,7 +612,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
// Second step.
let left = self.and(step1, context.new_rvalue_from_long(typ, 0x0001FFFF0001FFFF));
let left = self.shl(left, context.new_rvalue_from_long(typ, 15));
let right = self.and(step1, context.new_rvalue_from_long(typ, 0xFFFE0000FFFE0000u64 as i64)); // TODO: transmute the number instead?
let right = self.and(step1, context.new_rvalue_from_long(typ, 0xFFFE0000FFFE0000u64 as i64)); // TODO(antoyo): transmute the number instead?
let right = self.lshr(right, context.new_rvalue_from_long(typ, 17));
let step2 = self.or(left, right);
@ -715,7 +646,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
step5
},
128 => {
// TODO: find a more efficient implementation?
// TODO(antoyo): find a more efficient implementation?
let sixty_four = self.context.new_rvalue_from_long(typ, 64);
let high = self.context.new_cast(None, value >> sixty_four, self.u64_type);
let low = self.context.new_cast(None, value, self.u64_type);
@ -735,7 +666,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
}
fn count_leading_zeroes(&self, width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
// TODO: use width?
// TODO(antoyo): use width?
let arg_type = arg.get_type();
let count_leading_zeroes =
if arg_type.is_uint(&self.cx) {
@ -873,11 +804,11 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
}
fn pop_count(&self, value: RValue<'gcc>) -> RValue<'gcc> {
// TODO: use the optimized version with fewer operations.
// TODO(antoyo): use the optimized version with fewer operations.
let value_type = value.get_type();
if value_type.is_u128(&self.cx) {
// TODO: implement in the normal algorithm below to have a more efficient
// TODO(antoyo): implement in the normal algorithm below to have a more efficient
// implementation (that does not require a call to __popcountdi2).
let popcount = self.context.get_builtin_function("__builtin_popcountll");
let sixty_four = self.context.new_rvalue_from_long(value_type, 64);
@ -1083,204 +1014,8 @@ fn try_intrinsic<'gcc, 'tcx>(bx: &mut Builder<'_, 'gcc, 'tcx>, try_func: RValue<
}
else if wants_msvc_seh(bx.sess()) {
unimplemented!();
//codegen_msvc_try(bx, try_func, data, catch_func, dest);
}
else {
unimplemented!();
//codegen_gnu_try(bx, try_func, data, catch_func, dest);
}
}
// MSVC's definition of the `rust_try` function.
//
// This implementation uses the new exception handling instructions in LLVM
// which have support in LLVM for SEH on MSVC targets. Although these
// instructions are meant to work for all targets, as of the time of this
// writing, however, LLVM does not recommend the usage of these new instructions
// as the old ones are still more optimized.
/*fn codegen_msvc_try<'a, 'gcc, 'tcx>(_bx: &mut Builder<'a, 'gcc, 'tcx>, _try_func: RValue<'gcc>, _data: RValue<'gcc>, _catch_func: RValue<'gcc>, _dest: RValue<'gcc>) {
unimplemented!();
/*let llfn = get_rust_try_fn(bx, &mut |mut bx| {
bx.set_personality_fn(bx.eh_personality());
bx.sideeffect();
let mut normal = bx.build_sibling_block("normal");
let mut catchswitch = bx.build_sibling_block("catchswitch");
let mut catchpad = bx.build_sibling_block("catchpad");
let mut caught = bx.build_sibling_block("caught");
let try_func = llvm::get_param(bx.llfn(), 0);
let data = llvm::get_param(bx.llfn(), 1);
let catch_func = llvm::get_param(bx.llfn(), 2);
// We're generating an IR snippet that looks like:
//
// declare i32 @rust_try(%try_func, %data, %catch_func) {
// %slot = alloca u8*
// invoke %try_func(%data) to label %normal unwind label %catchswitch
//
// normal:
// ret i32 0
//
// catchswitch:
// %cs = catchswitch within none [%catchpad] unwind to caller
//
// catchpad:
// %tok = catchpad within %cs [%type_descriptor, 0, %slot]
// %ptr = load %slot
// call %catch_func(%data, %ptr)
// catchret from %tok to label %caught
//
// caught:
// ret i32 1
// }
//
// This structure follows the basic usage of throw/try/catch in LLVM.
// For example, compile this C++ snippet to see what LLVM generates:
//
// #include <stdint.h>
//
// struct rust_panic {
// rust_panic(const rust_panic&);
// ~rust_panic();
//
// uint64_t x[2];
// };
//
// int __rust_try(
// void (*try_func)(void*),
// void *data,
// void (*catch_func)(void*, void*) noexcept
// ) {
// try {
// try_func(data);
// return 0;
// } catch(rust_panic& a) {
// catch_func(data, &a);
// return 1;
// }
// }
//
// More information can be found in libstd's seh.rs implementation.
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let slot = bx.alloca(bx.type_i8p(), ptr_align);
bx.invoke(try_func, &[data], normal.llbb(), catchswitch.llbb(), None);
normal.ret(bx.const_i32(0));
let cs = catchswitch.catch_switch(None, None, 1);
catchswitch.add_handler(cs, catchpad.llbb());
// We can't use the TypeDescriptor defined in libpanic_unwind because it
// might be in another DLL and the SEH encoding only supports specifying
// a TypeDescriptor from the current module.
//
// However this isn't an issue since the MSVC runtime uses string
// comparison on the type name to match TypeDescriptors rather than
// pointer equality.
//
// So instead we generate a new TypeDescriptor in each module that uses
// `try` and let the linker merge duplicate definitions in the same
// module.
//
// When modifying, make sure that the type_name string exactly matches
// the one used in src/libpanic_unwind/seh.rs.
let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p());
let type_name = bx.const_bytes(b"rust_panic\0");
let type_info =
bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false);
let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info));
unsafe {
llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
llvm::SetUniqueComdat(bx.llmod, tydesc);
llvm::LLVMSetInitializer(tydesc, type_info);
}
// The flag value of 8 indicates that we are catching the exception by
// reference instead of by value. We can't use catch by value because
// that requires copying the exception object, which we don't support
// since our exception object effectively contains a Box.
//
// Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
let flags = bx.const_i32(8);
let funclet = catchpad.catch_pad(cs, &[tydesc, flags, slot]);
let ptr = catchpad.load(slot, ptr_align);
catchpad.call(catch_func, &[data, ptr], Some(&funclet));
catchpad.catch_ret(&funclet, caught.llbb());
caught.ret(bx.const_i32(1));
});
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
let ret = bx.call(llfn, &[try_func, data, catch_func], None);
let i32_align = bx.tcx().data_layout.i32_align.abi;
bx.store(ret, dest, i32_align);*/
}*/
// Definition of the standard `try` function for Rust using the GNU-like model
// of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
// instructions).
//
// This codegen is a little surprising because we always call a shim
// function instead of inlining the call to `invoke` manually here. This is done
// because in LLVM we're only allowed to have one personality per function
// definition. The call to the `try` intrinsic is being inlined into the
// function calling it, and that function may already have other personality
// functions in play. By calling a shim we're guaranteed that our shim will have
// the right personality function.
/*fn codegen_gnu_try<'a, 'gcc, 'tcx>(_bx: &mut Builder<'a, 'gcc, 'tcx>, _try_func: RValue<'gcc>, _data: RValue<'gcc>, _catch_func: RValue<'gcc>, _dest: RValue<'gcc>) {
unimplemented!();
/*let llfn = get_rust_try_fn(bx, &mut |mut bx| {
// Codegens the shims described above:
//
// bx:
// invoke %try_func(%data) normal %normal unwind %catch
//
// normal:
// ret 0
//
// catch:
// (%ptr, _) = landingpad
// call %catch_func(%data, %ptr)
// ret 1
bx.sideeffect();
let mut then = bx.build_sibling_block("then");
let mut catch = bx.build_sibling_block("catch");
let try_func = llvm::get_param(bx.llfn(), 0);
let data = llvm::get_param(bx.llfn(), 1);
let catch_func = llvm::get_param(bx.llfn(), 2);
bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None);
then.ret(bx.const_i32(0));
// Type indicator for the exception being thrown.
//
// The first value in this tuple is a pointer to the exception object
// being thrown. The second value is a "selector" indicating which of
// the landing pad clauses the exception's type had been matched to.
// rust_try ignores the selector.
let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 1);
let tydesc = match bx.tcx().lang_items().eh_catch_typeinfo() {
Some(tydesc) => {
let tydesc = bx.get_static(tydesc);
bx.bitcast(tydesc, bx.type_i8p())
}
None => bx.const_null(bx.type_i8p()),
};
catch.add_clause(vals, tydesc);
let ptr = catch.extract_value(vals, 0);
catch.call(catch_func, &[data, ptr], None);
catch.ret(bx.const_i32(1));
});
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
let ret = bx.call(llfn, &[try_func, data, catch_func], None);
let i32_align = bx.tcx().data_layout.i32_align.abi;
bx.store(ret, dest, i32_align);*/
}*/

View file

@ -12,8 +12,6 @@ use rustc_span::{Span, Symbol, sym};
use crate::builder::Builder;
pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, name: Symbol, callee_ty: Ty<'tcx>, args: &[OperandRef<'tcx, RValue<'gcc>>], ret_ty: Ty<'tcx>, llret_ty: Type<'gcc>, span: Span) -> Result<RValue<'gcc>, ()> {
//println!("Generic simd: {}", name);
// macros for error handling:
macro_rules! emit_error {
($msg: tt) => {
@ -56,33 +54,6 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>,
let arg_tys = sig.inputs();
let name_str = &*name.as_str();
/*if name == sym::simd_select_bitmask {
let in_ty = arg_tys[0];
let m_len = match in_ty.kind() {
// Note that this `.unwrap()` crashes for isize/usize, that's sort
// of intentional as there's not currently a use case for that.
ty::Int(i) => i.bit_width().unwrap(),
ty::Uint(i) => i.bit_width().unwrap(),
_ => return_error!("`{}` is not an integral type", in_ty),
};
require_simd!(arg_tys[1], "argument");
let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
require!(
// Allow masks for vectors with fewer than 8 elements to be
// represented with a u8 or i8.
m_len == v_len || (m_len == 8 && v_len < 8),
"mismatched lengths: mask length `{}` != other vector length `{}`",
m_len,
v_len
);
let i1 = bx.type_i1();
let im = bx.type_ix(v_len);
let i1xn = bx.type_vector(i1, v_len);
let m_im = bx.trunc(args[0].immediate(), im);
let m_i1s = bx.bitcast(m_im, i1xn);
return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
}*/
// every intrinsic below takes a SIMD vector as its first argument
require_simd!(arg_tys[0], "input");
let in_ty = arg_tys[0];
@ -153,37 +124,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>,
out_ty
);
//let total_len = u128::from(in_len) * 2;
let vector = args[2].immediate();
// TODO:
/*let indices: Option<Vec<_>> = (0..n)
.map(|i| {
let arg_idx = i;
let val = bx.const_get_vector_element(vector, i as u64);
match bx.const_to_opt_u128(val, true) {
None => {
emit_error!("shuffle index #{} is not a constant", arg_idx);
None
}
Some(idx) if idx >= total_len => {
emit_error!(
"shuffle index #{} is out of bounds (limit {})",
arg_idx,
total_len
);
None
}
Some(idx) => Some(bx.const_i32(idx as i32)),
}
})
.collect();
let indices = match indices {
Some(i) => i,
None => return Ok(bx.const_null(llret_ty)),
};*/
return Ok(bx.shuffle_vector(
args[0].immediate(),
args[1].immediate(),
@ -191,723 +133,6 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>,
));
}
/*if name == sym::simd_insert {
require!(
in_elem == arg_tys[2],
"expected inserted type `{}` (element of input `{}`), found `{}`",
in_elem,
in_ty,
arg_tys[2]
);
return Ok(bx.insert_element(
args[0].immediate(),
args[2].immediate(),
args[1].immediate(),
));
}
if name == sym::simd_extract {
require!(
ret_ty == in_elem,
"expected return type `{}` (element of input `{}`), found `{}`",
in_elem,
in_ty,
ret_ty
);
return Ok(bx.extract_element(args[0].immediate(), args[1].immediate()));
}
if name == sym::simd_select {
let m_elem_ty = in_elem;
let m_len = in_len;
require_simd!(arg_tys[1], "argument");
let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
require!(
m_len == v_len,
"mismatched lengths: mask length `{}` != other vector length `{}`",
m_len,
v_len
);
match m_elem_ty.kind() {
ty::Int(_) => {}
_ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty),
}
// truncate the mask to a vector of i1s
let i1 = bx.type_i1();
let i1xn = bx.type_vector(i1, m_len as u64);
let m_i1s = bx.trunc(args[0].immediate(), i1xn);
return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
}
if name == sym::simd_bitmask {
// The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a
// vector mask and returns an unsigned integer containing the most
// significant bit (MSB) of each lane.
// If the vector has less than 8 lanes, an u8 is returned with zeroed
// trailing bits.
let expected_int_bits = in_len.max(8);
match ret_ty.kind() {
ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => (),
_ => return_error!("bitmask `{}`, expected `u{}`", ret_ty, expected_int_bits),
}
// Integer vector <i{in_bitwidth} x in_len>:
let (i_xn, in_elem_bitwidth) = match in_elem.kind() {
ty::Int(i) => (
args[0].immediate(),
i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
),
ty::Uint(i) => (
args[0].immediate(),
i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
),
_ => return_error!(
"vector argument `{}`'s element type `{}`, expected integer element type",
in_ty,
in_elem
),
};
// Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
let shift_indices =
vec![
bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _);
in_len as _
];
let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
// Truncate vector to an <i1 x N>
let i1xn = bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len));
// Bitcast <i1 x N> to iN:
let i_ = bx.bitcast(i1xn, bx.type_ix(in_len));
// Zero-extend iN to the bitmask type:
return Ok(bx.zext(i_, bx.type_ix(expected_int_bits)));
}
fn simd_simple_float_intrinsic<'a, 'gcc, 'tcx>(
name: Symbol,
in_elem: &::rustc_middle::ty::TyS<'_>,
in_ty: &::rustc_middle::ty::TyS<'_>,
in_len: u64,
bx: &mut Builder<'a, 'gcc, 'tcx>,
span: Span,
args: &[OperandRef<'tcx, RValue<'gcc>>],
) -> Result<RValue<'gcc>, ()> {
macro_rules! emit_error {
($msg: tt) => {
emit_error!($msg, )
};
($msg: tt, $($fmt: tt)*) => {
span_invalid_monomorphization_error(
bx.sess(), span,
&format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
name, $($fmt)*));
}
}
macro_rules! return_error {
($($fmt: tt)*) => {
{
emit_error!($($fmt)*);
return Err(());
}
}
}
let (elem_ty_str, elem_ty) = if let ty::Float(f) = in_elem.kind() {
let elem_ty = bx.cx.type_float_from_ty(*f);
match f.bit_width() {
32 => ("f32", elem_ty),
64 => ("f64", elem_ty),
_ => {
return_error!(
"unsupported element type `{}` of floating-point vector `{}`",
f.name_str(),
in_ty
);
}
}
} else {
return_error!("`{}` is not a floating-point type", in_ty);
};
let vec_ty = bx.type_vector(elem_ty, in_len);
let (intr_name, fn_ty) = match name {
sym::simd_ceil => ("ceil", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_fabs => ("fabs", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_fcos => ("cos", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_fexp2 => ("exp2", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_fexp => ("exp", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_flog10 => ("log10", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_flog2 => ("log2", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_flog => ("log", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_floor => ("floor", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_fma => ("fma", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)),
sym::simd_fpowi => ("powi", bx.type_func(&[vec_ty, bx.type_i32()], vec_ty)),
sym::simd_fpow => ("pow", bx.type_func(&[vec_ty, vec_ty], vec_ty)),
sym::simd_fsin => ("sin", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_round => ("round", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)),
_ => return_error!("unrecognized intrinsic `{}`", name),
};
let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str);
let f = bx.declare_cfn(&llvm_name, fn_ty);
let c = bx.call(f, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None);
Ok(c)
}
if std::matches!(
name,
sym::simd_ceil
| sym::simd_fabs
| sym::simd_fcos
| sym::simd_fexp2
| sym::simd_fexp
| sym::simd_flog10
| sym::simd_flog2
| sym::simd_flog
| sym::simd_floor
| sym::simd_fma
| sym::simd_fpow
| sym::simd_fpowi
| sym::simd_fsin
| sym::simd_fsqrt
| sym::simd_round
| sym::simd_trunc
) {
return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args);
}
// FIXME: use:
// https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
// https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
fn llvm_vector_str(elem_ty: Ty<'_>, vec_len: u64, no_pointers: usize) -> String {
let p0s: String = "p0".repeat(no_pointers);
match *elem_ty.kind() {
ty::Int(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
ty::Uint(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()),
_ => unreachable!(),
}
}
fn gcc_vector_ty<'gcc>(
cx: &CodegenCx<'gcc, '_>,
elem_ty: Ty<'_>,
vec_len: u64,
mut no_pointers: usize,
) -> Type<'gcc> {
// FIXME: use cx.layout_of(ty).llvm_type() ?
let mut elem_ty = match *elem_ty.kind() {
ty::Int(v) => cx.type_int_from_ty(v),
ty::Uint(v) => cx.type_uint_from_ty(v),
ty::Float(v) => cx.type_float_from_ty(v),
_ => unreachable!(),
};
while no_pointers > 0 {
elem_ty = cx.type_ptr_to(elem_ty);
no_pointers -= 1;
}
cx.type_vector(elem_ty, vec_len)
}
if name == sym::simd_gather {
// simd_gather(values: <N x T>, pointers: <N x *_ T>,
// mask: <N x i{M}>) -> <N x T>
// * N: number of elements in the input vectors
// * T: type of the element to load
// * M: any integer width is supported, will be truncated to i1
// All types must be simd vector types
require_simd!(in_ty, "first");
require_simd!(arg_tys[1], "second");
require_simd!(arg_tys[2], "third");
require_simd!(ret_ty, "return");
// Of the same length:
let (out_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
let (out_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
require!(
in_len == out_len,
"expected {} argument with length {} (same as input type `{}`), \
found `{}` with length {}",
"second",
in_len,
in_ty,
arg_tys[1],
out_len
);
require!(
in_len == out_len2,
"expected {} argument with length {} (same as input type `{}`), \
found `{}` with length {}",
"third",
in_len,
in_ty,
arg_tys[2],
out_len2
);
// The return type must match the first argument type
require!(ret_ty == in_ty, "expected return type `{}`, found `{}`", in_ty, ret_ty);
// This counts how many pointers
fn ptr_count(t: Ty<'_>) -> usize {
match t.kind() {
ty::RawPtr(p) => 1 + ptr_count(p.ty),
_ => 0,
}
}
// Non-ptr type
fn non_ptr(t: Ty<'_>) -> Ty<'_> {
match t.kind() {
ty::RawPtr(p) => non_ptr(p.ty),
_ => t,
}
}
// The second argument must be a simd vector with an element type that's a pointer
// to the element type of the first argument
let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
let (pointer_count, underlying_ty) = match element_ty1.kind() {
ty::RawPtr(p) if p.ty == in_elem => (ptr_count(element_ty1), non_ptr(element_ty1)),
_ => {
require!(
false,
"expected element type `{}` of second argument `{}` \
to be a pointer to the element type `{}` of the first \
argument `{}`, found `{}` != `*_ {}`",
element_ty1,
arg_tys[1],
in_elem,
in_ty,
element_ty1,
in_elem
);
unreachable!();
}
};
assert!(pointer_count > 0);
assert_eq!(pointer_count - 1, ptr_count(element_ty0));
assert_eq!(underlying_ty, non_ptr(element_ty0));
// The element type of the third argument must be a signed integer type of any width:
let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
match element_ty2.kind() {
ty::Int(_) => (),
_ => {
require!(
false,
"expected element type `{}` of third argument `{}` \
to be a signed integer type",
element_ty2,
arg_tys[2]
);
}
}
// Alignment of T, must be a constant integer value:
let alignment_ty = bx.type_i32();
let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
// Truncate the mask vector to a vector of i1s:
let (mask, mask_ty) = {
let i1 = bx.type_i1();
let i1xn = bx.type_vector(i1, in_len);
(bx.trunc(args[2].immediate(), i1xn), i1xn)
};
// Type of the vector of pointers:
let llvm_pointer_vec_ty = gcc_vector_ty(bx, underlying_ty, in_len, pointer_count);
let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
// Type of the vector of elements:
let llvm_elem_vec_ty = gcc_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
let llvm_intrinsic =
format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
let f = bx.declare_cfn(
&llvm_intrinsic,
bx.type_func(
&[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty],
llvm_elem_vec_ty,
),
);
let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None);
return Ok(v);
}
if name == sym::simd_scatter {
// simd_scatter(values: <N x T>, pointers: <N x *mut T>,
// mask: <N x i{M}>) -> ()
// * N: number of elements in the input vectors
// * T: type of the element to load
// * M: any integer width is supported, will be truncated to i1
// All types must be simd vector types
require_simd!(in_ty, "first");
require_simd!(arg_tys[1], "second");
require_simd!(arg_tys[2], "third");
// Of the same length:
let (element_len1, _) = arg_tys[1].simd_size_and_type(bx.tcx());
let (element_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
require!(
in_len == element_len1,
"expected {} argument with length {} (same as input type `{}`), \
found `{}` with length {}",
"second",
in_len,
in_ty,
arg_tys[1],
element_len1
);
require!(
in_len == element_len2,
"expected {} argument with length {} (same as input type `{}`), \
found `{}` with length {}",
"third",
in_len,
in_ty,
arg_tys[2],
element_len2
);
// This counts how many pointers
fn ptr_count(t: Ty<'_>) -> usize {
match t.kind() {
ty::RawPtr(p) => 1 + ptr_count(p.ty),
_ => 0,
}
}
// Non-ptr type
fn non_ptr(t: Ty<'_>) -> Ty<'_> {
match t.kind() {
ty::RawPtr(p) => non_ptr(p.ty),
_ => t,
}
}
// The second argument must be a simd vector with an element type that's a pointer
// to the element type of the first argument
let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
let (pointer_count, underlying_ty) = match element_ty1.kind() {
ty::RawPtr(p) if p.ty == in_elem && p.mutbl == hir::Mutability::Mut => {
(ptr_count(element_ty1), non_ptr(element_ty1))
}
_ => {
require!(
false,
"expected element type `{}` of second argument `{}` \
to be a pointer to the element type `{}` of the first \
argument `{}`, found `{}` != `*mut {}`",
element_ty1,
arg_tys[1],
in_elem,
in_ty,
element_ty1,
in_elem
);
unreachable!();
}
};
assert!(pointer_count > 0);
assert_eq!(pointer_count - 1, ptr_count(element_ty0));
assert_eq!(underlying_ty, non_ptr(element_ty0));
// The element type of the third argument must be a signed integer type of any width:
match element_ty2.kind() {
ty::Int(_) => (),
_ => {
require!(
false,
"expected element type `{}` of third argument `{}` \
be a signed integer type",
element_ty2,
arg_tys[2]
);
}
}
// Alignment of T, must be a constant integer value:
let alignment_ty = bx.type_i32();
let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
// Truncate the mask vector to a vector of i1s:
let (mask, mask_ty) = {
let i1 = bx.type_i1();
let i1xn = bx.type_vector(i1, in_len);
(bx.trunc(args[2].immediate(), i1xn), i1xn)
};
let ret_t = bx.type_void();
// Type of the vector of pointers:
let llvm_pointer_vec_ty = gcc_vector_ty(bx, underlying_ty, in_len, pointer_count);
let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
// Type of the vector of elements:
let llvm_elem_vec_ty = gcc_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
let llvm_intrinsic =
format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
let f = bx.declare_cfn(
&llvm_intrinsic,
bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t),
);
let v = bx.call(f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None);
return Ok(v);
}
macro_rules! arith_red {
($name:ident : $integer_reduce:ident, $float_reduce:ident, $ordered:expr, $op:ident,
$identity:expr) => {
if name == sym::$name {
require!(
ret_ty == in_elem,
"expected return type `{}` (element of input `{}`), found `{}`",
in_elem,
in_ty,
ret_ty
);
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
let r = bx.$integer_reduce(args[0].immediate());
if $ordered {
// if overflow occurs, the result is the
// mathematical result modulo 2^n:
Ok(bx.$op(args[1].immediate(), r))
} else {
Ok(bx.$integer_reduce(args[0].immediate()))
}
}
ty::Float(f) => {
let acc = if $ordered {
// ordered arithmetic reductions take an accumulator
args[1].immediate()
} else {
// unordered arithmetic reductions use the identity accumulator
match f.bit_width() {
32 => bx.const_real(bx.type_f32(), $identity),
64 => bx.const_real(bx.type_f64(), $identity),
v => return_error!(
r#"
unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
sym::$name,
in_ty,
in_elem,
v,
ret_ty
),
}
};
Ok(bx.$float_reduce(acc, args[0].immediate()))
}
_ => return_error!(
"unsupported {} from `{}` with element `{}` to `{}`",
sym::$name,
in_ty,
in_elem,
ret_ty
),
};
}
};
}
arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, 0.0);
arith_red!(simd_reduce_mul_ordered: vector_reduce_mul, vector_reduce_fmul, true, mul, 1.0);
arith_red!(
simd_reduce_add_unordered: vector_reduce_add,
vector_reduce_fadd_fast,
false,
add,
0.0
);
arith_red!(
simd_reduce_mul_unordered: vector_reduce_mul,
vector_reduce_fmul_fast,
false,
mul,
1.0
);
macro_rules! minmax_red {
($name:ident: $int_red:ident, $float_red:ident) => {
if name == sym::$name {
require!(
ret_ty == in_elem,
"expected return type `{}` (element of input `{}`), found `{}`",
in_elem,
in_ty,
ret_ty
);
return match in_elem.kind() {
ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)),
ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)),
ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())),
_ => return_error!(
"unsupported {} from `{}` with element `{}` to `{}`",
sym::$name,
in_ty,
in_elem,
ret_ty
),
};
}
};
}
minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin);
minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax);
minmax_red!(simd_reduce_min_nanless: vector_reduce_min, vector_reduce_fmin_fast);
minmax_red!(simd_reduce_max_nanless: vector_reduce_max, vector_reduce_fmax_fast);
macro_rules! bitwise_red {
($name:ident : $red:ident, $boolean:expr) => {
if name == sym::$name {
let input = if !$boolean {
require!(
ret_ty == in_elem,
"expected return type `{}` (element of input `{}`), found `{}`",
in_elem,
in_ty,
ret_ty
);
args[0].immediate()
} else {
match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {}
_ => return_error!(
"unsupported {} from `{}` with element `{}` to `{}`",
sym::$name,
in_ty,
in_elem,
ret_ty
),
}
// boolean reductions operate on vectors of i1s:
let i1 = bx.type_i1();
let i1xn = bx.type_vector(i1, in_len as u64);
bx.trunc(args[0].immediate(), i1xn)
};
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
let r = bx.$red(input);
Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
}
_ => return_error!(
"unsupported {} from `{}` with element `{}` to `{}`",
sym::$name,
in_ty,
in_elem,
ret_ty
),
};
}
};
}
bitwise_red!(simd_reduce_and: vector_reduce_and, false);
bitwise_red!(simd_reduce_or: vector_reduce_or, false);
bitwise_red!(simd_reduce_xor: vector_reduce_xor, false);
bitwise_red!(simd_reduce_all: vector_reduce_and, true);
bitwise_red!(simd_reduce_any: vector_reduce_or, true);
if name == sym::simd_cast {
require_simd!(ret_ty, "return");
let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
require!(
in_len == out_len,
"expected return type with length {} (same as input type `{}`), \
found `{}` with length {}",
in_len,
in_ty,
ret_ty,
out_len
);
// casting cares about nominal type, not just structural type
if in_elem == out_elem {
return Ok(args[0].immediate());
}
enum Style {
Float,
Int(/* is signed? */ bool),
Unsupported,
}
let (in_style, in_width) = match in_elem.kind() {
// vectors of pointer-sized integers should've been
// disallowed before here, so this unwrap is safe.
ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
ty::Float(f) => (Style::Float, f.bit_width()),
_ => (Style::Unsupported, 0),
};
let (out_style, out_width) = match out_elem.kind() {
ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
ty::Float(f) => (Style::Float, f.bit_width()),
_ => (Style::Unsupported, 0),
};
match (in_style, out_style) {
(Style::Int(in_is_signed), Style::Int(_)) => {
return Ok(match in_width.cmp(&out_width) {
Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
Ordering::Equal => args[0].immediate(),
Ordering::Less => {
if in_is_signed {
bx.sext(args[0].immediate(), llret_ty)
} else {
bx.zext(args[0].immediate(), llret_ty)
}
}
});
}
(Style::Int(in_is_signed), Style::Float) => {
return Ok(if in_is_signed {
bx.sitofp(args[0].immediate(), llret_ty)
} else {
bx.uitofp(args[0].immediate(), llret_ty)
});
}
(Style::Float, Style::Int(out_is_signed)) => {
return Ok(if out_is_signed {
bx.fptosi(args[0].immediate(), llret_ty)
} else {
bx.fptoui(args[0].immediate(), llret_ty)
});
}
(Style::Float, Style::Float) => {
return Ok(match in_width.cmp(&out_width) {
Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
Ordering::Equal => args[0].immediate(),
Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
});
}
_ => { /* Unsupported. Fallthrough. */ }
}
require!(
false,
"unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
in_ty,
in_elem,
ret_ty,
out_elem
);
}*/
macro_rules! arith_binary {
($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
$(if name == sym::$name {
@ -934,68 +159,9 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
simd_shl: Uint, Int => shl;
simd_shr: Uint => lshr, Int => ashr;
simd_and: Uint, Int => and;
simd_or: Uint, Int => or; // FIXME: calling or might not work on vectors.
simd_or: Uint, Int => or; // FIXME(antoyo): calling `or` might not work on vectors.
simd_xor: Uint, Int => xor;
/*simd_fmax: Float => maxnum;
simd_fmin: Float => minnum;*/
}
/*macro_rules! arith_unary {
($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
$(if name == sym::$name {
match in_elem.kind() {
$($(ty::$p(_))|* => {
return Ok(bx.$call(args[0].immediate()))
})*
_ => {},
}
require!(false,
"unsupported operation on `{}` with element `{}`",
in_ty,
in_elem)
})*
}
}
arith_unary! {
simd_neg: Int => neg, Float => fneg;
}
if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
let lhs = args[0].immediate();
let rhs = args[1].immediate();
let is_add = name == sym::simd_saturating_add;
let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _;
let (signed, elem_width, elem_ty) = match *in_elem.kind() {
ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)),
ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)),
_ => {
return_error!(
"expected element type `{}` of vector type `{}` \
to be a signed or unsigned integer type",
arg_tys[0].simd_size_and_type(bx.tcx()).1,
arg_tys[0]
);
}
};
let llvm_intrinsic = &format!(
"llvm.{}{}.sat.v{}i{}",
if signed { 's' } else { 'u' },
if is_add { "add" } else { "sub" },
in_len,
elem_width
);
let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
let f = bx.declare_cfn(
&llvm_intrinsic,
bx.type_func(&[vec_ty, vec_ty], vec_ty),
);
let v = bx.call(f, &[lhs, rhs], None);
return Ok(v);
}*/
unimplemented!("simd {}", name);
//span_bug!(span, "unknown SIMD intrinsic");
}