1
Fork 0

Merge commit '5988bbd24a' into sync_cg_clif-2020-11-27

This commit is contained in:
bjorn3 2020-11-27 20:48:53 +01:00
commit 477aa67802
33 changed files with 400 additions and 222 deletions

View file

@ -214,10 +214,8 @@ pub(crate) fn get_function_name_and_sig<'tcx>(
support_vararg: bool,
) -> (String, Signature) {
assert!(!inst.substs.needs_infer());
let fn_sig = tcx.normalize_erasing_late_bound_regions(
ParamEnv::reveal_all(),
fn_sig_for_fn_abi(tcx, inst),
);
let fn_sig = tcx
.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_sig_for_fn_abi(tcx, inst));
if fn_sig.c_variadic && !support_vararg {
tcx.sess.span_fatal(
tcx.def_span(inst.def_id()),

View file

@ -8,7 +8,7 @@ use rustc_codegen_ssa::back::archive::{find_library, ArchiveBuilder};
use rustc_codegen_ssa::METADATA_FILENAME;
use rustc_session::Session;
use object::{Object, SymbolKind};
use object::{Object, ObjectSymbol, SymbolKind};
#[derive(Debug)]
enum ArchiveEntry {
@ -184,7 +184,7 @@ impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> {
entry_name.as_bytes().to_vec(),
object
.symbols()
.filter_map(|(_index, symbol)| {
.filter_map(|symbol| {
if symbol.is_undefined()
|| symbol.is_local()
|| symbol.kind() != SymbolKind::Data
@ -193,7 +193,7 @@ impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> {
{
None
} else {
symbol.name().map(|name| name.as_bytes().to_vec())
symbol.name().map(|name| name.as_bytes().to_vec()).ok()
}
})
.collect::<Vec<_>>(),

View file

@ -7,8 +7,7 @@ use crate::prelude::*;
#[cfg(all(feature = "jit", unix))]
#[no_mangle]
static mut __cg_clif_global_atomic_mutex: libc::pthread_mutex_t =
libc::PTHREAD_MUTEX_INITIALIZER;
static mut __cg_clif_global_atomic_mutex: libc::pthread_mutex_t = libc::PTHREAD_MUTEX_INITIALIZER;
pub(crate) fn init_global_lock(
module: &mut impl Module,

View file

@ -12,6 +12,10 @@ pub(crate) fn codegen_fn<'tcx>(
) {
let tcx = cx.tcx;
let _inst_guard =
crate::PrintOnPanic(|| format!("{:?} {}", instance, tcx.symbol_name(instance).name));
debug_assert!(!instance.substs.needs_infer());
let mir = tcx.instance_mir(instance.def);
// Declare function
@ -499,7 +503,8 @@ fn codegen_stmt<'tcx>(
UnOp::Neg => match layout.ty.kind() {
ty::Int(IntTy::I128) => {
// FIXME remove this case once ineg.i128 works
let zero = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
let zero =
CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
crate::num::codegen_int_binop(fx, BinOp::Sub, zero, operand)
}
ty::Int(_) => CValue::by_val(fx.bcx.ins().ineg(val), layout),
@ -509,7 +514,11 @@ fn codegen_stmt<'tcx>(
};
lval.write_cvalue(fx, res);
}
Rvalue::Cast(CastKind::Pointer(PointerCast::ReifyFnPointer), ref operand, to_ty) => {
Rvalue::Cast(
CastKind::Pointer(PointerCast::ReifyFnPointer),
ref operand,
to_ty,
) => {
let from_ty = fx.monomorphize(operand.ty(&fx.mir.local_decls, fx.tcx));
let to_layout = fx.layout_of(fx.monomorphize(to_ty));
match *from_ty.kind() {
@ -530,9 +539,21 @@ fn codegen_stmt<'tcx>(
_ => bug!("Trying to ReifyFnPointer on non FnDef {:?}", from_ty),
}
}
Rvalue::Cast(CastKind::Pointer(PointerCast::UnsafeFnPointer), ref operand, to_ty)
| Rvalue::Cast(CastKind::Pointer(PointerCast::MutToConstPointer), ref operand, to_ty)
| Rvalue::Cast(CastKind::Pointer(PointerCast::ArrayToPointer), ref operand, to_ty) => {
Rvalue::Cast(
CastKind::Pointer(PointerCast::UnsafeFnPointer),
ref operand,
to_ty,
)
| Rvalue::Cast(
CastKind::Pointer(PointerCast::MutToConstPointer),
ref operand,
to_ty,
)
| Rvalue::Cast(
CastKind::Pointer(PointerCast::ArrayToPointer),
ref operand,
to_ty,
) => {
let to_layout = fx.layout_of(fx.monomorphize(to_ty));
let operand = codegen_operand(fx, operand);
lval.write_cvalue(fx, operand.cast_pointer_to(to_layout));

View file

@ -26,15 +26,15 @@ impl rustc_driver::Callbacks for CraneliftPassesCallbacks {
config.opts.cg.panic = Some(PanicStrategy::Abort);
config.opts.debugging_opts.panic_abort_tests = true;
config.opts.maybe_sysroot = Some(
config.opts.maybe_sysroot.clone().unwrap_or_else(
|| std::env::current_exe()
.unwrap()
.parent()
.unwrap()
.join("sysroot"),
),
);
config.opts.maybe_sysroot = Some(config.opts.maybe_sysroot.clone().unwrap_or_else(|| {
std::env::current_exe()
.unwrap()
.parent()
.unwrap()
.parent()
.unwrap()
.to_owned()
}));
}
}

View file

@ -233,7 +233,7 @@ pub(crate) fn type_min_max_value(
let min_msb = bcx.ins().iconst(types::I64, (min >> 64) as u64 as i64);
let min = bcx.ins().iconcat(min_lsb, min_msb);
let max = i128::MIN as u128;
let max = i128::MAX as u128;
let max_lsb = bcx.ins().iconst(types::I64, max as u64 as i64);
let max_msb = bcx.ins().iconst(types::I64, (max >> 64) as u64 as i64);
let max = bcx.ins().iconcat(max_lsb, max_msb);
@ -364,7 +364,7 @@ impl<'tcx, M: Module> FunctionCx<'_, 'tcx, M> {
self.instance.subst_mir_and_normalize_erasing_regions(
self.tcx,
ty::ParamEnv::reveal_all(),
value
value,
)
}

View file

@ -163,10 +163,7 @@ pub(crate) fn codegen_const_value<'tcx>(
assert!(!layout.is_unsized(), "sized const value");
if layout.is_zst() {
return CValue::by_ref(
crate::Pointer::dangling(layout.align.pref),
layout,
);
return CValue::by_ref(crate::Pointer::dangling(layout.align.pref), layout);
}
match const_val {
@ -186,9 +183,7 @@ pub(crate) fn codegen_const_value<'tcx>(
}
match x {
Scalar::Int(int) => {
CValue::const_val(fx, layout, int)
}
Scalar::Int(int) => CValue::const_val(fx, layout, int),
Scalar::Ptr(ptr) => {
let alloc_kind = fx.tcx.get_global_alloc(ptr.alloc_id);
let base_addr = match alloc_kind {

View file

@ -76,7 +76,7 @@ impl WriterRelocate {
#[cfg(feature = "jit")]
pub(super) fn relocate_for_jit(
mut self,
jit_product: &cranelift_simplejit::SimpleJITProduct,
jit_module: &cranelift_simplejit::SimpleJITModule,
) -> Vec<u8> {
use std::convert::TryInto;
@ -84,8 +84,9 @@ impl WriterRelocate {
match reloc.name {
super::DebugRelocName::Section(_) => unreachable!(),
super::DebugRelocName::Symbol(sym) => {
let addr = jit_product
.lookup_func(cranelift_module::FuncId::from_u32(sym.try_into().unwrap()));
let addr = jit_module.get_finalized_function(
cranelift_module::FuncId::from_u32(sym.try_into().unwrap()),
);
let val = (addr as u64 as i64 + reloc.addend) as u64;
self.writer
.write_udata_at(reloc.offset as usize, val, reloc.size)

View file

@ -80,7 +80,7 @@ impl<'tcx> UnwindContext<'tcx> {
#[cfg(feature = "jit")]
pub(crate) unsafe fn register_jit(
self,
jit_product: &cranelift_simplejit::SimpleJITProduct,
jit_module: &cranelift_simplejit::SimpleJITModule,
) -> Option<UnwindRegistry> {
let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(super::target_endian(
self.tcx,
@ -91,7 +91,7 @@ impl<'tcx> UnwindContext<'tcx> {
return None;
}
let mut eh_frame = eh_frame.0.relocate_for_jit(jit_product);
let mut eh_frame = eh_frame.0.relocate_for_jit(jit_module);
// GCC expects a terminating "empty" length, so write a 0 length at the end of the table.
eh_frame.extend(&[0, 0, 0, 0]);

View file

@ -30,8 +30,16 @@ pub(crate) fn codegen_set_discriminant<'tcx>(
.ty
.discriminant_for_variant(fx.tcx, variant_index)
.unwrap()
.val
.into();
.val;
let to = if ptr.layout().abi.is_signed() {
ty::ScalarInt::try_from_int(
ptr.layout().size.sign_extend(to) as i128,
ptr.layout().size,
)
.unwrap()
} else {
ty::ScalarInt::try_from_uint(to, ptr.layout().size).unwrap()
};
let discr = CValue::const_val(fx, ptr.layout(), to);
ptr.write_cvalue(fx, discr);
}
@ -49,8 +57,12 @@ pub(crate) fn codegen_set_discriminant<'tcx>(
if variant_index != dataful_variant {
let niche = place.place_field(fx, mir::Field::new(tag_field));
let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
let niche_value = u128::from(niche_value).wrapping_add(niche_start);
let niche_llval = CValue::const_val(fx, niche.layout(), niche_value.into());
let niche_value = ty::ScalarInt::try_from_uint(
u128::from(niche_value).wrapping_add(niche_start),
niche.layout().size,
)
.unwrap();
let niche_llval = CValue::const_val(fx, niche.layout(), niche_value);
niche.write_cvalue(fx, niche_llval);
}
}
@ -78,7 +90,16 @@ pub(crate) fn codegen_get_discriminant<'tcx>(
.ty
.discriminant_for_variant(fx.tcx, *index)
.map_or(u128::from(index.as_u32()), |discr| discr.val);
return CValue::const_val(fx, dest_layout, discr_val.into());
let discr_val = if dest_layout.abi.is_signed() {
ty::ScalarInt::try_from_int(
dest_layout.size.sign_extend(discr_val) as i128,
dest_layout.size,
)
.unwrap()
} else {
ty::ScalarInt::try_from_uint(discr_val, dest_layout.size).unwrap()
};
return CValue::const_val(fx, dest_layout, discr_val);
}
Variants::Multiple {
tag,

View file

@ -145,7 +145,11 @@ fn module_codegen(tcx: TyCtxt<'_>, cgu_name: rustc_span::Symbol) -> ModuleCodege
}
let mut cx = crate::CodegenCx::new(tcx, module, tcx.sess.opts.debuginfo != DebugInfo::None);
super::codegen_mono_items(&mut cx, mono_items);
super::predefine_mono_items(&mut cx, &mono_items);
for (mono_item, (linkage, visibility)) in mono_items {
let linkage = crate::linkage::get_clif_linkage(mono_item, linkage, visibility);
super::codegen_mono_item(&mut cx, mono_item, linkage);
}
let (mut module, global_asm, debug, mut unwind_context) =
tcx.sess.time("finalize CodegenCx", || cx.finalize());
crate::main_shim::maybe_create_entry_wrapper(tcx, &mut module, &mut unwind_context, false);

View file

@ -70,7 +70,11 @@ pub(super) fn run_jit(tcx: TyCtxt<'_>) -> ! {
let (mut jit_module, global_asm, _debug, mut unwind_context) =
super::time(tcx, "codegen mono items", || {
super::codegen_mono_items(&mut cx, mono_items);
super::predefine_mono_items(&mut cx, &mono_items);
for (mono_item, (linkage, visibility)) in mono_items {
let linkage = crate::linkage::get_clif_linkage(mono_item, linkage, visibility);
super::codegen_mono_item(&mut cx, mono_item, linkage);
}
tcx.sess.time("finalize CodegenCx", || cx.finalize())
});
if !global_asm.is_empty() {
@ -81,11 +85,11 @@ pub(super) fn run_jit(tcx: TyCtxt<'_>) -> ! {
tcx.sess.abort_if_errors();
let jit_product = jit_module.finish();
jit_module.finalize_definitions();
let _unwind_register_guard = unsafe { unwind_context.register_jit(&jit_product) };
let _unwind_register_guard = unsafe { unwind_context.register_jit(&jit_module) };
let finalized_main: *const u8 = jit_product.lookup_func(main_func_id);
let finalized_main: *const u8 = jit_module.get_finalized_function(main_func_id);
println!("Rustc codegen cranelift will JIT run the executable, because --jit was passed");
@ -140,11 +144,11 @@ fn load_imported_symbols_for_jit(tcx: TyCtxt<'_>) -> Vec<(String, *const u8)> {
let mut imported_symbols = Vec::new();
for path in dylib_paths {
use object::Object;
use object::{Object, ObjectSymbol};
let lib = libloading::Library::new(&path).unwrap();
let obj = std::fs::read(path).unwrap();
let obj = object::File::parse(&obj).unwrap();
imported_symbols.extend(obj.dynamic_symbols().filter_map(|(_idx, symbol)| {
imported_symbols.extend(obj.dynamic_symbols().filter_map(|symbol| {
let name = symbol.name().unwrap().to_string();
if name.is_empty() || !symbol.is_global() || symbol.is_undefined() {
return None;

View file

@ -1,4 +1,4 @@
//! Drivers are responsible for calling [`codegen_mono_items`] and performing any further actions
//! Drivers are responsible for calling [`codegen_mono_item`] and performing any further actions
//! like JIT executing or writing object files.
use std::any::Any;
@ -40,12 +40,12 @@ pub(crate) fn codegen_crate(
aot::run_aot(tcx, metadata, need_metadata_module)
}
fn codegen_mono_items<'tcx>(
fn predefine_mono_items<'tcx>(
cx: &mut crate::CodegenCx<'tcx, impl Module>,
mono_items: Vec<(MonoItem<'tcx>, (RLinkage, Visibility))>,
mono_items: &[(MonoItem<'tcx>, (RLinkage, Visibility))],
) {
cx.tcx.sess.time("predefine functions", || {
for &(mono_item, (linkage, visibility)) in &mono_items {
for &(mono_item, (linkage, visibility)) in mono_items {
match mono_item {
MonoItem::Fn(instance) => {
let (name, sig) = get_function_name_and_sig(
@ -61,11 +61,6 @@ fn codegen_mono_items<'tcx>(
}
}
});
for (mono_item, (linkage, visibility)) in mono_items {
let linkage = crate::linkage::get_clif_linkage(mono_item, linkage, visibility);
codegen_mono_item(cx, mono_item, linkage);
}
}
fn codegen_mono_item<'tcx, M: Module>(
@ -73,20 +68,15 @@ fn codegen_mono_item<'tcx, M: Module>(
mono_item: MonoItem<'tcx>,
linkage: Linkage,
) {
let tcx = cx.tcx;
match mono_item {
MonoItem::Fn(inst) => {
let _inst_guard =
crate::PrintOnPanic(|| format!("{:?} {}", inst, tcx.symbol_name(inst).name));
debug_assert!(!inst.substs.needs_infer());
tcx.sess
cx.tcx
.sess
.time("codegen fn", || crate::base::codegen_fn(cx, inst, linkage));
}
MonoItem::Static(def_id) => {
crate::constant::codegen_static(&mut cx.constants_cx, def_id);
}
MonoItem::Static(def_id) => crate::constant::codegen_static(&mut cx.constants_cx, def_id),
MonoItem::GlobalAsm(hir_id) => {
let item = tcx.hir().expect_item(hir_id);
let item = cx.tcx.hir().expect_item(hir_id);
if let rustc_hir::ItemKind::GlobalAsm(rustc_hir::GlobalAsm { asm }) = item.kind {
cx.global_asm.push_str(&*asm.as_str());
cx.global_asm.push_str("\n\n");

View file

@ -263,6 +263,48 @@ fn simd_pair_for_each_lane<'tcx, M: Module>(
}
}
fn simd_reduce<'tcx, M: Module>(
fx: &mut FunctionCx<'_, 'tcx, M>,
val: CValue<'tcx>,
ret: CPlace<'tcx>,
f: impl Fn(&mut FunctionCx<'_, 'tcx, M>, TyAndLayout<'tcx>, Value, Value) -> Value,
) {
let (lane_layout, lane_count) = lane_type_and_count(fx.tcx, val.layout());
assert_eq!(lane_layout, ret.layout());
let mut res_val = val.value_field(fx, mir::Field::new(0)).load_scalar(fx);
for lane_idx in 1..lane_count {
let lane = val
.value_field(fx, mir::Field::new(lane_idx.into()))
.load_scalar(fx);
res_val = f(fx, lane_layout, res_val, lane);
}
let res = CValue::by_val(res_val, lane_layout);
ret.write_cvalue(fx, res);
}
fn simd_reduce_bool<'tcx, M: Module>(
fx: &mut FunctionCx<'_, 'tcx, M>,
val: CValue<'tcx>,
ret: CPlace<'tcx>,
f: impl Fn(&mut FunctionCx<'_, 'tcx, M>, Value, Value) -> Value,
) {
let (_lane_layout, lane_count) = lane_type_and_count(fx.tcx, val.layout());
assert!(ret.layout().ty.is_bool());
let res_val = val.value_field(fx, mir::Field::new(0)).load_scalar(fx);
let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
for lane_idx in 1..lane_count {
let lane = val
.value_field(fx, mir::Field::new(lane_idx.into()))
.load_scalar(fx);
let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
res_val = f(fx, res_val, lane);
}
let res = CValue::by_val(res_val, ret.layout());
ret.write_cvalue(fx, res);
}
fn bool_to_zero_or_max_uint<'tcx>(
fx: &mut FunctionCx<'_, 'tcx, impl Module>,
layout: TyAndLayout<'tcx>,
@ -287,7 +329,7 @@ fn bool_to_zero_or_max_uint<'tcx>(
}
macro simd_cmp {
($fx:expr, $cc:ident($x:ident, $y:ident) -> $ret:ident) => {
($fx:expr, $cc:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
let vector_ty = clif_vector_type($fx.tcx, $x.layout());
if let Some(vector_ty) = vector_ty {
@ -308,6 +350,7 @@ macro simd_cmp {
|fx, lane_layout, res_lane_layout, x_lane, y_lane| {
let res_lane = match lane_layout.ty.kind() {
ty::Uint(_) | ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc, x_lane, y_lane),
ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
_ => unreachable!("{:?}", lane_layout.ty),
};
bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
@ -315,7 +358,7 @@ macro simd_cmp {
);
}
},
($fx:expr, $cc_u:ident|$cc_s:ident($x:ident, $y:ident) -> $ret:ident) => {
($fx:expr, $cc_u:ident|$cc_s:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
// FIXME use vector icmp when possible
simd_pair_for_each_lane(
$fx,
@ -326,6 +369,7 @@ macro simd_cmp {
let res_lane = match lane_layout.ty.kind() {
ty::Uint(_) => fx.bcx.ins().icmp(IntCC::$cc_u, x_lane, y_lane),
ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc_s, x_lane, y_lane),
ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
_ => unreachable!("{:?}", lane_layout.ty),
};
bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
@ -497,12 +541,12 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
};
copy | copy_nonoverlapping, <elem_ty> (v src, v dst, v count) {
let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
let elem_size = fx
.bcx
.ins()
.iconst(fx.pointer_type, elem_size as i64);
assert_eq!(args.len(), 3);
let byte_amount = fx.bcx.ins().imul(count, elem_size);
let byte_amount = if elem_size != 1 {
fx.bcx.ins().imul_imm(count, elem_size as i64)
} else {
count
};
if intrinsic.contains("nonoverlapping") {
// FIXME emit_small_memcpy
@ -515,12 +559,12 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
// NOTE: the volatile variants have src and dst swapped
volatile_copy_memory | volatile_copy_nonoverlapping_memory, <elem_ty> (v dst, v src, v count) {
let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
let elem_size = fx
.bcx
.ins()
.iconst(fx.pointer_type, elem_size as i64);
assert_eq!(args.len(), 3);
let byte_amount = fx.bcx.ins().imul(count, elem_size);
let byte_amount = if elem_size != 1 {
fx.bcx.ins().imul_imm(count, elem_size as i64)
} else {
count
};
// FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
if intrinsic.contains("nonoverlapping") {
@ -676,7 +720,11 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
offset | arith_offset, (c base, v offset) {
let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
let pointee_size = fx.layout_of(pointee_ty).size.bytes();
let ptr_diff = fx.bcx.ins().imul_imm(offset, pointee_size as i64);
let ptr_diff = if pointee_size != 1 {
fx.bcx.ins().imul_imm(offset, pointee_size as i64)
} else {
offset
};
let base_val = base.load_scalar(fx);
let res = fx.bcx.ins().iadd(base_val, ptr_diff);
ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
@ -688,7 +736,11 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
write_bytes | volatile_set_memory, (c dst, v val, v count) {
let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
let pointee_size = fx.layout_of(pointee_ty).size.bytes();
let count = fx.bcx.ins().imul_imm(count, pointee_size as i64);
let count = if pointee_size != 1 {
fx.bcx.ins().imul_imm(count, pointee_size as i64)
} else {
count
};
let dst_ptr = dst.load_scalar(fx);
// FIXME make the memset actually volatile when switching to emit_small_memset
// FIXME use emit_small_memset

View file

@ -35,30 +35,33 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
});
};
// FIXME support float comparisons
simd_eq, (c x, c y) {
validate_simd_type!(fx, intrinsic, span, x.layout().ty);
simd_cmp!(fx, Equal(x, y) -> ret);
simd_cmp!(fx, Equal|Equal(x, y) -> ret);
};
simd_ne, (c x, c y) {
validate_simd_type!(fx, intrinsic, span, x.layout().ty);
simd_cmp!(fx, NotEqual(x, y) -> ret);
simd_cmp!(fx, NotEqual|NotEqual(x, y) -> ret);
};
simd_lt, (c x, c y) {
validate_simd_type!(fx, intrinsic, span, x.layout().ty);
simd_cmp!(fx, UnsignedLessThan|SignedLessThan(x, y) -> ret);
simd_cmp!(fx, UnsignedLessThan|SignedLessThan|LessThan(x, y) -> ret);
};
simd_le, (c x, c y) {
validate_simd_type!(fx, intrinsic, span, x.layout().ty);
simd_cmp!(fx, UnsignedLessThanOrEqual|SignedLessThanOrEqual(x, y) -> ret);
simd_cmp!(fx, UnsignedLessThanOrEqual|SignedLessThanOrEqual|LessThanOrEqual(x, y) -> ret);
};
simd_gt, (c x, c y) {
validate_simd_type!(fx, intrinsic, span, x.layout().ty);
simd_cmp!(fx, UnsignedGreaterThan|SignedGreaterThan(x, y) -> ret);
simd_cmp!(fx, UnsignedGreaterThan|SignedGreaterThan|GreaterThan(x, y) -> ret);
};
simd_ge, (c x, c y) {
validate_simd_type!(fx, intrinsic, span, x.layout().ty);
simd_cmp!(fx, UnsignedGreaterThanOrEqual|SignedGreaterThanOrEqual(x, y) -> ret);
simd_cmp!(
fx,
UnsignedGreaterThanOrEqual|SignedGreaterThanOrEqual|GreaterThanOrEqual
(x, y) -> ret
);
};
// simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U
@ -107,9 +110,9 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
for (out_idx, in_idx) in indexes.into_iter().enumerate() {
let in_lane = if in_idx < lane_count {
x.value_field(fx, mir::Field::new(in_idx.try_into().unwrap()))
x.value_field(fx, mir::Field::new(in_idx.into()))
} else {
y.value_field(fx, mir::Field::new((in_idx - lane_count).try_into().unwrap()))
y.value_field(fx, mir::Field::new((in_idx - lane_count).into()))
};
let out_lane = ret.place_field(fx, mir::Field::new(out_idx));
out_lane.write_cvalue(fx, in_lane);
@ -143,10 +146,17 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) {
idx_const
} else {
fx.tcx.sess.span_fatal(
fx.tcx.sess.span_warn(
span,
"Index argument for `simd_extract` is not a constant",
);
let res = crate::trap::trap_unimplemented_ret_value(
fx,
ret.layout(),
"Index argument for `simd_extract` is not a constant",
);
ret.write_cvalue(fx, res);
return;
};
let idx = idx_const.val.try_to_bits(Size::from_bytes(4 /* u32*/)).unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
@ -207,7 +217,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
assert_eq!(lane_count, ret_lane_count);
for lane in 0..lane_count {
let lane = mir::Field::new(lane.try_into().unwrap());
let lane = mir::Field::new(lane.into());
let a_lane = a.value_field(fx, lane).load_scalar(fx);
let b_lane = b.value_field(fx, lane).load_scalar(fx);
let c_lane = c.value_field(fx, lane).load_scalar(fx);
@ -228,11 +238,42 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
simd_flt_binop!(fx, fmax(x, y) -> ret);
};
simd_reduce_add_ordered | simd_reduce_add_unordered, (c v) {
validate_simd_type!(fx, intrinsic, span, v.layout().ty);
simd_reduce(fx, v, ret, |fx, lane_layout, a, b| {
if lane_layout.ty.is_floating_point() {
fx.bcx.ins().fadd(a, b)
} else {
fx.bcx.ins().iadd(a, b)
}
});
};
simd_reduce_mul_ordered | simd_reduce_mul_unordered, (c v) {
validate_simd_type!(fx, intrinsic, span, v.layout().ty);
simd_reduce(fx, v, ret, |fx, lane_layout, a, b| {
if lane_layout.ty.is_floating_point() {
fx.bcx.ins().fmul(a, b)
} else {
fx.bcx.ins().imul(a, b)
}
});
};
simd_reduce_all, (c v) {
validate_simd_type!(fx, intrinsic, span, v.layout().ty);
simd_reduce_bool(fx, v, ret, |fx, a, b| fx.bcx.ins().band(a, b));
};
simd_reduce_any, (c v) {
validate_simd_type!(fx, intrinsic, span, v.layout().ty);
simd_reduce_bool(fx, v, ret, |fx, a, b| fx.bcx.ins().bor(a, b));
};
// simd_fabs
// simd_saturating_add
// simd_bitmask
// simd_select
// simd_reduce_add_{,un}ordered
// simd_rem
}
}

View file

@ -67,3 +67,15 @@ pub(crate) fn trap_unimplemented(fx: &mut FunctionCx<'_, '_, impl Module>, msg:
let true_ = fx.bcx.ins().iconst(types::I32, 1);
fx.bcx.ins().trapnz(true_, TrapCode::User(!0));
}
/// Like `trap_unimplemented` but returns a fake value of the specified type.
///
/// Trap code: user65535
pub(crate) fn trap_unimplemented_ret_value<'tcx>(
fx: &mut FunctionCx<'_, 'tcx, impl Module>,
dest_layout: TyAndLayout<'tcx>,
msg: impl AsRef<str>,
) -> CValue<'tcx> {
trap_unimplemented(fx, msg);
CValue::by_ref(Pointer::const_addr(fx, 0), dest_layout)
}