2019-05-17 02:20:14 +01:00
|
|
|
|
use super::operand::{OperandRef, OperandValue};
|
|
|
|
|
use super::place::PlaceRef;
|
2019-12-22 17:42:04 -05:00
|
|
|
|
use super::{FunctionCx, LocalRef};
|
2019-05-17 02:20:14 +01:00
|
|
|
|
|
|
|
|
|
use crate::base;
|
2019-12-22 17:42:04 -05:00
|
|
|
|
use crate::common::{self, IntPredicate, RealPredicate};
|
2019-05-17 02:20:14 +01:00
|
|
|
|
use crate::traits::*;
|
2019-12-22 17:42:04 -05:00
|
|
|
|
use crate::MemFlags;
|
2019-05-17 02:20:14 +01:00
|
|
|
|
|
2020-03-29 17:19:48 +02:00
|
|
|
|
use rustc_apfloat::{ieee, Float, Round, Status};
|
2020-08-18 11:47:27 +01:00
|
|
|
|
use rustc_hir::lang_items::LangItem;
|
2020-03-29 16:41:09 +02:00
|
|
|
|
use rustc_middle::mir;
|
|
|
|
|
use rustc_middle::ty::cast::{CastTy, IntTy};
|
2021-08-30 17:38:27 +03:00
|
|
|
|
use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
|
2020-03-29 16:41:09 +02:00
|
|
|
|
use rustc_middle::ty::{self, adjustment::PointerCast, Instance, Ty, TyCtxt};
|
2020-01-01 19:25:28 +01:00
|
|
|
|
use rustc_span::source_map::{Span, DUMMY_SP};
|
2021-08-30 17:38:27 +03:00
|
|
|
|
use rustc_target::abi::{Abi, Int, Variants};
|
2015-10-21 17:42:25 -04:00
|
|
|
|
|
2019-10-26 01:41:17 -04:00
|
|
|
|
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
2018-09-20 15:47:22 +02:00
|
|
|
|
pub fn codegen_rvalue(
|
|
|
|
|
&mut self,
|
2018-10-04 15:23:10 +02:00
|
|
|
|
mut bx: Bx,
|
2018-09-20 15:47:22 +02:00
|
|
|
|
dest: PlaceRef<'tcx, Bx::Value>,
|
2019-12-22 17:42:04 -05:00
|
|
|
|
rvalue: &mir::Rvalue<'tcx>,
|
2018-09-20 15:47:22 +02:00
|
|
|
|
) -> Bx {
|
2019-12-22 17:42:04 -05:00
|
|
|
|
debug!("codegen_rvalue(dest.llval={:?}, rvalue={:?})", dest.llval, rvalue);
|
2015-10-21 17:42:25 -04:00
|
|
|
|
|
|
|
|
|
match *rvalue {
|
2019-12-22 17:42:04 -05:00
|
|
|
|
mir::Rvalue::Use(ref operand) => {
|
|
|
|
|
let cg_operand = self.codegen_operand(&mut bx, operand);
|
|
|
|
|
// FIXME: consider not copying constants through stack. (Fixable by codegen'ing
|
|
|
|
|
// constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?)
|
|
|
|
|
cg_operand.val.store(&mut bx, dest);
|
|
|
|
|
bx
|
|
|
|
|
}
|
2015-10-21 17:42:25 -04:00
|
|
|
|
|
2019-04-15 19:20:44 +05:30
|
|
|
|
mir::Rvalue::Cast(mir::CastKind::Pointer(PointerCast::Unsize), ref source, _) => {
|
2017-10-10 22:04:13 +03:00
|
|
|
|
// The destination necessarily contains a fat pointer, so if
|
|
|
|
|
// it's a scalar pair, it's a fat pointer or newtype thereof.
|
2018-10-03 13:49:57 +02:00
|
|
|
|
if bx.cx().is_backend_scalar_pair(dest.layout) {
|
2019-05-17 02:20:14 +01:00
|
|
|
|
// Into-coerce of a thin pointer to a fat pointer -- just
|
2015-11-13 00:12:50 +02:00
|
|
|
|
// use the operand path.
|
2019-10-14 01:38:38 -04:00
|
|
|
|
let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
|
2018-10-05 15:08:49 +02:00
|
|
|
|
temp.val.store(&mut bx, dest);
|
2018-01-05 07:12:32 +02:00
|
|
|
|
return bx;
|
2015-11-09 02:16:19 +02:00
|
|
|
|
}
|
|
|
|
|
|
2015-11-11 22:02:51 +02:00
|
|
|
|
// Unsize of a nontrivial struct. I would prefer for
|
2018-05-08 16:10:16 +03:00
|
|
|
|
// this to be eliminated by MIR building, but
|
2015-11-11 22:02:51 +02:00
|
|
|
|
// `CoerceUnsized` can be passed by a where-clause,
|
|
|
|
|
// so the (generic) MIR may not be able to expand it.
|
2019-10-14 01:38:38 -04:00
|
|
|
|
let operand = self.codegen_operand(&mut bx, source);
|
2017-06-25 12:41:24 +03:00
|
|
|
|
match operand.val {
|
2019-12-22 17:42:04 -05:00
|
|
|
|
OperandValue::Pair(..) | OperandValue::Immediate(_) => {
|
2019-05-17 02:20:14 +01:00
|
|
|
|
// Unsize from an immediate structure. We don't
|
2016-12-10 20:32:44 -07:00
|
|
|
|
// really need a temporary alloca here, but
|
|
|
|
|
// avoiding it would require us to have
|
2019-05-17 02:20:14 +01:00
|
|
|
|
// `coerce_unsized_into` use `extractvalue` to
|
2016-12-10 20:32:44 -07:00
|
|
|
|
// index into the struct, and this case isn't
|
|
|
|
|
// important enough for it.
|
2018-05-08 16:10:16 +03:00
|
|
|
|
debug!("codegen_rvalue: creating ugly alloca");
|
2019-09-12 19:04:30 +03:00
|
|
|
|
let scratch = PlaceRef::alloca(&mut bx, operand.layout);
|
2018-10-05 15:08:49 +02:00
|
|
|
|
scratch.storage_live(&mut bx);
|
|
|
|
|
operand.val.store(&mut bx, scratch);
|
|
|
|
|
base::coerce_unsized_into(&mut bx, scratch, dest);
|
|
|
|
|
scratch.storage_dead(&mut bx);
|
2017-02-06 17:27:09 +01:00
|
|
|
|
}
|
2018-08-03 23:50:13 +09:00
|
|
|
|
OperandValue::Ref(llref, None, align) => {
|
2019-08-29 14:24:50 -04:00
|
|
|
|
let source = PlaceRef::new_sized_aligned(llref, operand.layout, align);
|
2018-10-05 15:08:49 +02:00
|
|
|
|
base::coerce_unsized_into(&mut bx, source, dest);
|
2015-11-11 22:02:51 +02:00
|
|
|
|
}
|
2018-08-03 23:50:13 +09:00
|
|
|
|
OperandValue::Ref(_, Some(_), _) => {
|
2019-05-17 02:20:14 +01:00
|
|
|
|
bug!("unsized coercion on an unsized rvalue");
|
2018-05-29 00:12:55 +09:00
|
|
|
|
}
|
2017-06-25 12:41:24 +03:00
|
|
|
|
}
|
2018-01-05 07:12:32 +02:00
|
|
|
|
bx
|
2015-10-21 17:42:25 -04:00
|
|
|
|
}
|
|
|
|
|
|
2017-08-05 12:27:28 +03:00
|
|
|
|
mir::Rvalue::Repeat(ref elem, count) => {
|
2019-10-14 01:38:38 -04:00
|
|
|
|
let cg_elem = self.codegen_operand(&mut bx, elem);
|
2017-07-26 16:27:25 +02:00
|
|
|
|
|
2017-06-25 12:41:24 +03:00
|
|
|
|
// Do not generate the loop for zero-sized elements or empty arrays.
|
2017-09-20 18:17:23 +03:00
|
|
|
|
if dest.layout.is_zst() {
|
2018-01-05 07:12:32 +02:00
|
|
|
|
return bx;
|
2017-07-26 16:27:25 +02:00
|
|
|
|
}
|
|
|
|
|
|
2018-05-08 16:10:16 +03:00
|
|
|
|
if let OperandValue::Immediate(v) = cg_elem.val {
|
2018-12-08 11:48:43 +01:00
|
|
|
|
let zero = bx.const_usize(0);
|
|
|
|
|
let start = dest.project_index(&mut bx, zero).llval;
|
|
|
|
|
let size = bx.const_usize(dest.layout.size.bytes());
|
2017-06-01 21:50:53 +03:00
|
|
|
|
|
2017-08-02 00:32:14 +02:00
|
|
|
|
// Use llvm.memset.p0i8.* to initialize all zero arrays
|
2019-08-27 11:51:53 +02:00
|
|
|
|
if bx.cx().const_to_opt_uint(v) == Some(0) {
|
2018-09-06 11:57:42 -07:00
|
|
|
|
let fill = bx.cx().const_u8(0);
|
2018-09-10 17:59:20 +02:00
|
|
|
|
bx.memset(start, fill, size, dest.align, MemFlags::empty());
|
2018-01-05 07:12:32 +02:00
|
|
|
|
return bx;
|
2017-07-26 16:27:25 +02:00
|
|
|
|
}
|
|
|
|
|
|
2017-08-02 00:32:14 +02:00
|
|
|
|
// Use llvm.memset.p0i8.* to initialize byte arrays
|
2020-08-29 18:10:01 +02:00
|
|
|
|
let v = bx.from_immediate(v);
|
2018-09-06 13:52:15 -07:00
|
|
|
|
if bx.cx().val_ty(v) == bx.cx().type_i8() {
|
2018-09-10 17:59:20 +02:00
|
|
|
|
bx.memset(start, v, size, dest.align, MemFlags::empty());
|
2018-01-05 07:12:32 +02:00
|
|
|
|
return bx;
|
2017-07-26 16:27:25 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-14 15:30:35 +01:00
|
|
|
|
let count =
|
2020-10-24 02:21:18 +02:00
|
|
|
|
self.monomorphize(count).eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
|
2020-03-14 15:30:35 +01:00
|
|
|
|
|
2018-12-08 11:48:43 +01:00
|
|
|
|
bx.write_operand_repeatedly(cg_elem, count, dest)
|
2015-10-21 17:42:25 -04:00
|
|
|
|
}
|
|
|
|
|
|
2015-12-25 01:02:34 +02:00
|
|
|
|
mir::Rvalue::Aggregate(ref kind, ref operands) => {
|
2017-06-25 12:41:24 +03:00
|
|
|
|
let (dest, active_field_index) = match **kind {
|
2018-08-09 11:56:53 -04:00
|
|
|
|
mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
|
2018-10-05 15:08:49 +02:00
|
|
|
|
dest.codegen_set_discr(&mut bx, variant_index);
|
2017-06-25 12:41:24 +03:00
|
|
|
|
if adt_def.is_enum() {
|
2018-10-05 15:08:49 +02:00
|
|
|
|
(dest.project_downcast(&mut bx, variant_index), active_field_index)
|
2017-06-25 12:41:24 +03:00
|
|
|
|
} else {
|
|
|
|
|
(dest, active_field_index)
|
2015-12-25 01:02:34 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
2019-12-22 17:42:04 -05:00
|
|
|
|
_ => (dest, None),
|
2017-06-25 12:41:24 +03:00
|
|
|
|
};
|
|
|
|
|
for (i, operand) in operands.iter().enumerate() {
|
2019-10-14 01:38:38 -04:00
|
|
|
|
let op = self.codegen_operand(&mut bx, operand);
|
2017-06-25 12:41:24 +03:00
|
|
|
|
// Do not generate stores and GEPis for zero-sized fields.
|
2017-09-20 18:17:23 +03:00
|
|
|
|
if !op.layout.is_zst() {
|
2017-06-25 12:41:24 +03:00
|
|
|
|
let field_index = active_field_index.unwrap_or(i);
|
2018-10-05 15:08:49 +02:00
|
|
|
|
let field = dest.project_field(&mut bx, field_index);
|
|
|
|
|
op.val.store(&mut bx, field);
|
2017-06-25 12:41:24 +03:00
|
|
|
|
}
|
2015-11-02 09:39:59 -05:00
|
|
|
|
}
|
2018-01-05 07:12:32 +02:00
|
|
|
|
bx
|
2015-11-02 09:39:59 -05:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
_ => {
|
2019-10-14 01:38:38 -04:00
|
|
|
|
assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
|
|
|
|
|
let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
|
2018-10-05 15:08:49 +02:00
|
|
|
|
temp.val.store(&mut bx, dest);
|
2018-01-05 07:12:32 +02:00
|
|
|
|
bx
|
2015-11-02 09:39:59 -05:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-20 15:47:22 +02:00
|
|
|
|
pub fn codegen_rvalue_unsized(
|
|
|
|
|
&mut self,
|
2018-10-05 15:08:49 +02:00
|
|
|
|
mut bx: Bx,
|
2018-09-20 15:47:22 +02:00
|
|
|
|
indirect_dest: PlaceRef<'tcx, Bx::Value>,
|
|
|
|
|
rvalue: &mir::Rvalue<'tcx>,
|
|
|
|
|
) -> Bx {
|
2019-12-22 17:42:04 -05:00
|
|
|
|
debug!(
|
|
|
|
|
"codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
|
|
|
|
|
indirect_dest.llval, rvalue
|
|
|
|
|
);
|
2018-05-29 00:12:55 +09:00
|
|
|
|
|
|
|
|
|
match *rvalue {
|
|
|
|
|
mir::Rvalue::Use(ref operand) => {
|
2019-10-14 01:38:38 -04:00
|
|
|
|
let cg_operand = self.codegen_operand(&mut bx, operand);
|
2018-10-05 15:08:49 +02:00
|
|
|
|
cg_operand.val.store_unsized(&mut bx, indirect_dest);
|
2018-05-29 00:12:55 +09:00
|
|
|
|
bx
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-17 02:20:14 +01:00
|
|
|
|
_ => bug!("unsized assignment other than `Rvalue::Use`"),
|
2018-05-29 00:12:55 +09:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-07 17:14:40 +02:00
|
|
|
|
pub fn codegen_rvalue_operand(
|
|
|
|
|
&mut self,
|
2018-10-05 15:08:49 +02:00
|
|
|
|
mut bx: Bx,
|
2019-12-22 17:42:04 -05:00
|
|
|
|
rvalue: &mir::Rvalue<'tcx>,
|
2018-09-20 15:47:22 +02:00
|
|
|
|
) -> (Bx, OperandRef<'tcx, Bx::Value>) {
|
2019-08-03 15:59:25 -07:00
|
|
|
|
assert!(
|
2019-10-14 01:38:38 -04:00
|
|
|
|
self.rvalue_creates_operand(rvalue, DUMMY_SP),
|
2019-08-03 15:59:25 -07:00
|
|
|
|
"cannot codegen {:?} to operand",
|
|
|
|
|
rvalue,
|
|
|
|
|
);
|
2015-11-02 09:39:59 -05:00
|
|
|
|
|
|
|
|
|
match *rvalue {
|
2017-09-20 18:17:23 +03:00
|
|
|
|
mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
|
2019-10-14 01:38:38 -04:00
|
|
|
|
let operand = self.codegen_operand(&mut bx, source);
|
2016-02-18 19:49:45 +02:00
|
|
|
|
debug!("cast operand is {:?}", operand);
|
2020-10-24 02:21:18 +02:00
|
|
|
|
let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
|
2015-11-11 22:02:51 +02:00
|
|
|
|
|
|
|
|
|
let val = match *kind {
|
2019-04-15 19:20:44 +05:30
|
|
|
|
mir::CastKind::Pointer(PointerCast::ReifyFnPointer) => {
|
2020-08-03 00:49:11 +02:00
|
|
|
|
match *operand.layout.ty.kind() {
|
2018-08-22 01:35:02 +01:00
|
|
|
|
ty::FnDef(def_id, substs) => {
|
2020-06-22 13:57:03 +01:00
|
|
|
|
let instance = ty::Instance::resolve_for_fn_ptr(
|
|
|
|
|
bx.tcx(),
|
|
|
|
|
ty::ParamEnv::reveal_all(),
|
|
|
|
|
def_id,
|
|
|
|
|
substs,
|
2019-10-13 11:45:34 +02:00
|
|
|
|
)
|
2020-06-22 13:57:03 +01:00
|
|
|
|
.unwrap()
|
|
|
|
|
.polymorphize(bx.cx().tcx());
|
|
|
|
|
OperandValue::Immediate(bx.get_fn_addr(instance))
|
2016-03-06 17:32:47 +02:00
|
|
|
|
}
|
2019-12-22 17:42:04 -05:00
|
|
|
|
_ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
|
2016-03-06 17:32:47 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
2019-04-15 19:20:44 +05:30
|
|
|
|
mir::CastKind::Pointer(PointerCast::ClosureFnPointer(_)) => {
|
2020-08-03 00:49:11 +02:00
|
|
|
|
match *operand.layout.ty.kind() {
|
2018-08-22 01:35:02 +01:00
|
|
|
|
ty::Closure(def_id, substs) => {
|
2019-05-23 12:45:22 -05:00
|
|
|
|
let instance = Instance::resolve_closure(
|
2019-09-26 17:30:44 +00:00
|
|
|
|
bx.cx().tcx(),
|
|
|
|
|
def_id,
|
|
|
|
|
substs,
|
2019-12-22 17:42:04 -05:00
|
|
|
|
ty::ClosureKind::FnOnce,
|
2020-06-22 13:57:03 +01:00
|
|
|
|
)
|
|
|
|
|
.polymorphize(bx.cx().tcx());
|
2019-10-13 12:05:40 +02:00
|
|
|
|
OperandValue::Immediate(bx.cx().get_fn_addr(instance))
|
2017-02-22 01:24:16 +01:00
|
|
|
|
}
|
2019-12-22 17:42:04 -05:00
|
|
|
|
_ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
|
2017-02-22 01:24:16 +01:00
|
|
|
|
}
|
|
|
|
|
}
|
2019-04-15 19:20:44 +05:30
|
|
|
|
mir::CastKind::Pointer(PointerCast::UnsafeFnPointer) => {
|
2019-05-17 02:20:14 +01:00
|
|
|
|
// This is a no-op at the LLVM level.
|
2015-11-11 22:02:51 +02:00
|
|
|
|
operand.val
|
|
|
|
|
}
|
2019-04-15 19:20:44 +05:30
|
|
|
|
mir::CastKind::Pointer(PointerCast::Unsize) => {
|
2018-10-03 13:49:57 +02:00
|
|
|
|
assert!(bx.cx().is_backend_scalar_pair(cast));
|
2021-07-31 22:46:23 +08:00
|
|
|
|
let (lldata, llextra) = match operand.val {
|
2016-05-25 11:55:44 +03:00
|
|
|
|
OperandValue::Pair(lldata, llextra) => {
|
2019-05-17 02:20:14 +01:00
|
|
|
|
// unsize from a fat pointer -- this is a
|
2021-07-31 22:46:23 +08:00
|
|
|
|
// "trait-object-to-supertrait" coercion.
|
|
|
|
|
(lldata, Some(llextra))
|
2015-11-11 22:02:51 +02:00
|
|
|
|
}
|
2015-11-13 00:12:50 +02:00
|
|
|
|
OperandValue::Immediate(lldata) => {
|
2015-11-11 22:02:51 +02:00
|
|
|
|
// "standard" unsize
|
2021-07-31 22:46:23 +08:00
|
|
|
|
(lldata, None)
|
2015-11-11 22:02:51 +02:00
|
|
|
|
}
|
2017-02-06 17:27:09 +01:00
|
|
|
|
OperandValue::Ref(..) => {
|
2019-12-22 17:42:04 -05:00
|
|
|
|
bug!("by-ref operand {:?} in `codegen_rvalue_operand`", operand);
|
2015-11-11 22:02:51 +02:00
|
|
|
|
}
|
2021-07-31 22:46:23 +08:00
|
|
|
|
};
|
|
|
|
|
let (lldata, llextra) =
|
|
|
|
|
base::unsize_ptr(&mut bx, lldata, operand.layout.ty, cast.ty, llextra);
|
|
|
|
|
OperandValue::Pair(lldata, llextra)
|
2015-11-11 22:02:51 +02:00
|
|
|
|
}
|
2019-12-22 17:42:04 -05:00
|
|
|
|
mir::CastKind::Pointer(PointerCast::MutToConstPointer)
|
|
|
|
|
| mir::CastKind::Misc
|
|
|
|
|
if bx.cx().is_backend_scalar_pair(operand.layout) =>
|
|
|
|
|
{
|
2017-06-18 17:42:03 +03:00
|
|
|
|
if let OperandValue::Pair(data_ptr, meta) = operand.val {
|
2018-10-03 13:49:57 +02:00
|
|
|
|
if bx.cx().is_backend_scalar_pair(cast) {
|
2019-12-22 17:42:04 -05:00
|
|
|
|
let data_cast = bx.pointercast(
|
|
|
|
|
data_ptr,
|
|
|
|
|
bx.cx().scalar_pair_element_backend_type(cast, 0, true),
|
|
|
|
|
);
|
2017-06-18 17:42:03 +03:00
|
|
|
|
OperandValue::Pair(data_cast, meta)
|
2019-12-22 17:42:04 -05:00
|
|
|
|
} else {
|
|
|
|
|
// cast to thin-ptr
|
2016-08-02 03:28:50 +03:00
|
|
|
|
// Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
|
|
|
|
|
// pointer-cast of that pointer to desired pointer type.
|
2018-09-20 15:47:22 +02:00
|
|
|
|
let llcast_ty = bx.cx().immediate_backend_type(cast);
|
2018-01-05 07:12:32 +02:00
|
|
|
|
let llval = bx.pointercast(data_ptr, llcast_ty);
|
2016-08-02 03:28:50 +03:00
|
|
|
|
OperandValue::Immediate(llval)
|
|
|
|
|
}
|
|
|
|
|
} else {
|
2019-05-17 02:20:14 +01:00
|
|
|
|
bug!("unexpected non-pair operand");
|
2016-08-02 03:28:50 +03:00
|
|
|
|
}
|
|
|
|
|
}
|
2020-04-16 17:38:52 -07:00
|
|
|
|
mir::CastKind::Pointer(
|
|
|
|
|
PointerCast::MutToConstPointer | PointerCast::ArrayToPointer,
|
|
|
|
|
)
|
2019-12-22 17:42:04 -05:00
|
|
|
|
| mir::CastKind::Misc => {
|
2018-10-03 13:49:57 +02:00
|
|
|
|
assert!(bx.cx().is_backend_immediate(cast));
|
2018-09-20 15:47:22 +02:00
|
|
|
|
let ll_t_out = bx.cx().immediate_backend_type(cast);
|
2018-08-23 16:34:38 +02:00
|
|
|
|
if operand.layout.abi.is_uninhabited() {
|
2018-09-06 11:57:42 -07:00
|
|
|
|
let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
|
2019-12-22 17:42:04 -05:00
|
|
|
|
return (bx, OperandRef { val, layout: cast });
|
2018-04-11 17:25:18 +02:00
|
|
|
|
}
|
2019-12-22 17:42:04 -05:00
|
|
|
|
let r_t_in =
|
|
|
|
|
CastTy::from_ty(operand.layout.ty).expect("bad input type for cast");
|
2017-09-20 18:17:23 +03:00
|
|
|
|
let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
|
2018-09-20 15:47:22 +02:00
|
|
|
|
let ll_t_in = bx.cx().immediate_backend_type(operand.layout);
|
2018-03-30 15:49:56 +02:00
|
|
|
|
match operand.layout.variants {
|
2020-03-31 18:16:47 +02:00
|
|
|
|
Variants::Single { index } => {
|
2019-04-18 14:39:03 -07:00
|
|
|
|
if let Some(discr) =
|
|
|
|
|
operand.layout.ty.discriminant_for_variant(bx.tcx(), index)
|
|
|
|
|
{
|
2020-03-18 22:15:23 -04:00
|
|
|
|
let discr_layout = bx.cx().layout_of(discr.ty);
|
|
|
|
|
let discr_t = bx.cx().immediate_backend_type(discr_layout);
|
|
|
|
|
let discr_val = bx.cx().const_uint_big(discr_t, discr.val);
|
|
|
|
|
let discr_val =
|
|
|
|
|
bx.intcast(discr_val, ll_t_out, discr.ty.is_signed());
|
|
|
|
|
|
2019-12-22 17:42:04 -05:00
|
|
|
|
return (
|
|
|
|
|
bx,
|
|
|
|
|
OperandRef {
|
|
|
|
|
val: OperandValue::Immediate(discr_val),
|
|
|
|
|
layout: cast,
|
|
|
|
|
},
|
|
|
|
|
);
|
2018-03-30 15:49:56 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
2020-03-31 18:16:47 +02:00
|
|
|
|
Variants::Multiple { .. } => {}
|
2018-03-30 15:49:56 +02:00
|
|
|
|
}
|
2017-01-05 12:22:58 -07:00
|
|
|
|
let llval = operand.immediate();
|
2017-09-16 16:39:53 +03:00
|
|
|
|
|
2017-09-26 14:41:06 +03:00
|
|
|
|
let mut signed = false;
|
2021-08-28 11:04:23 +02:00
|
|
|
|
if let Abi::Scalar(scalar) = operand.layout.abi {
|
2020-03-31 18:16:47 +02:00
|
|
|
|
if let Int(_, s) = scalar.value {
|
2018-06-16 14:19:05 +03:00
|
|
|
|
// We use `i1` for bytes that are always `0` or `1`,
|
2018-11-27 02:59:49 +00:00
|
|
|
|
// e.g., `#[repr(i8)] enum E { A, B }`, but we can't
|
2018-06-16 14:19:05 +03:00
|
|
|
|
// let LLVM interpret the `i1` as signed, because
|
2018-11-27 02:59:49 +00:00
|
|
|
|
// then `i1 1` (i.e., E::B) is effectively `i8 -1`.
|
2018-06-16 14:19:05 +03:00
|
|
|
|
signed = !scalar.is_bool() && s;
|
2016-10-04 19:24:49 +03:00
|
|
|
|
|
2021-08-25 15:21:45 +02:00
|
|
|
|
if !scalar.is_always_valid_for(bx.cx())
|
2021-08-22 21:46:03 +02:00
|
|
|
|
&& scalar.valid_range.end >= scalar.valid_range.start
|
2019-12-22 17:42:04 -05:00
|
|
|
|
{
|
2020-08-30 21:26:03 +02:00
|
|
|
|
// We want `table[e as usize ± k]` to not
|
2017-09-26 14:41:06 +03:00
|
|
|
|
// have bound checks, and this is the most
|
2020-08-30 21:26:03 +02:00
|
|
|
|
// convenient place to put the `assume`s.
|
2021-08-22 21:46:03 +02:00
|
|
|
|
if scalar.valid_range.start > 0 {
|
2020-08-30 21:26:03 +02:00
|
|
|
|
let enum_value_lower_bound = bx
|
|
|
|
|
.cx()
|
2021-08-22 21:46:03 +02:00
|
|
|
|
.const_uint_big(ll_t_in, scalar.valid_range.start);
|
2020-08-30 21:26:03 +02:00
|
|
|
|
let cmp_start = bx.icmp(
|
|
|
|
|
IntPredicate::IntUGE,
|
|
|
|
|
llval,
|
|
|
|
|
enum_value_lower_bound,
|
|
|
|
|
);
|
|
|
|
|
bx.assume(cmp_start);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let enum_value_upper_bound =
|
2021-08-22 21:46:03 +02:00
|
|
|
|
bx.cx().const_uint_big(ll_t_in, scalar.valid_range.end);
|
2020-08-30 21:26:03 +02:00
|
|
|
|
let cmp_end = bx.icmp(
|
|
|
|
|
IntPredicate::IntULE,
|
|
|
|
|
llval,
|
|
|
|
|
enum_value_upper_bound,
|
|
|
|
|
);
|
|
|
|
|
bx.assume(cmp_end);
|
2017-09-26 14:41:06 +03:00
|
|
|
|
}
|
2016-10-04 19:24:49 +03:00
|
|
|
|
}
|
2017-09-16 16:39:53 +03:00
|
|
|
|
}
|
2016-10-04 19:24:49 +03:00
|
|
|
|
|
2015-11-05 13:33:06 +02:00
|
|
|
|
let newval = match (r_t_in, r_t_out) {
|
2019-12-22 17:42:04 -05:00
|
|
|
|
(CastTy::Int(_), CastTy::Int(_)) => bx.intcast(llval, ll_t_out, signed),
|
2015-11-05 13:33:06 +02:00
|
|
|
|
(CastTy::Float, CastTy::Float) => {
|
2018-09-05 14:14:03 -07:00
|
|
|
|
let srcsz = bx.cx().float_width(ll_t_in);
|
|
|
|
|
let dstsz = bx.cx().float_width(ll_t_out);
|
2015-11-05 13:33:06 +02:00
|
|
|
|
if dstsz > srcsz {
|
2018-01-05 07:12:32 +02:00
|
|
|
|
bx.fpext(llval, ll_t_out)
|
2015-11-05 13:33:06 +02:00
|
|
|
|
} else if srcsz > dstsz {
|
2018-01-05 07:12:32 +02:00
|
|
|
|
bx.fptrunc(llval, ll_t_out)
|
2015-11-05 13:33:06 +02:00
|
|
|
|
} else {
|
|
|
|
|
llval
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-12-15 18:17:00 +01:00
|
|
|
|
(CastTy::Int(_), CastTy::Float) => {
|
|
|
|
|
if signed {
|
|
|
|
|
bx.sitofp(llval, ll_t_out)
|
|
|
|
|
} else {
|
|
|
|
|
bx.uitofp(llval, ll_t_out)
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-04-16 17:38:52 -07:00
|
|
|
|
(CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Ptr(_)) => {
|
2019-12-22 17:42:04 -05:00
|
|
|
|
bx.pointercast(llval, ll_t_out)
|
|
|
|
|
}
|
2020-04-16 17:38:52 -07:00
|
|
|
|
(CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Int(_)) => {
|
2019-12-22 17:42:04 -05:00
|
|
|
|
bx.ptrtoint(llval, ll_t_out)
|
|
|
|
|
}
|
2018-01-02 12:44:54 -08:00
|
|
|
|
(CastTy::Int(_), CastTy::Ptr(_)) => {
|
2018-09-20 15:47:22 +02:00
|
|
|
|
let usize_llval = bx.intcast(llval, bx.cx().type_isize(), signed);
|
2018-01-05 07:12:32 +02:00
|
|
|
|
bx.inttoptr(usize_llval, ll_t_out)
|
2018-01-02 12:44:54 -08:00
|
|
|
|
}
|
2019-12-22 17:42:04 -05:00
|
|
|
|
(CastTy::Float, CastTy::Int(IntTy::I)) => {
|
2021-04-19 10:55:32 -07:00
|
|
|
|
cast_float_to_int(&mut bx, true, llval, ll_t_in, ll_t_out)
|
2019-12-22 17:42:04 -05:00
|
|
|
|
}
|
|
|
|
|
(CastTy::Float, CastTy::Int(_)) => {
|
2021-04-19 10:55:32 -07:00
|
|
|
|
cast_float_to_int(&mut bx, false, llval, ll_t_in, ll_t_out)
|
2019-12-22 17:42:04 -05:00
|
|
|
|
}
|
|
|
|
|
_ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty),
|
2015-11-05 13:33:06 +02:00
|
|
|
|
};
|
|
|
|
|
OperandValue::Immediate(newval)
|
|
|
|
|
}
|
2015-11-11 22:02:51 +02:00
|
|
|
|
};
|
2019-12-22 17:42:04 -05:00
|
|
|
|
(bx, OperandRef { val, layout: cast })
|
2015-11-02 09:39:59 -05:00
|
|
|
|
}
|
|
|
|
|
|
2020-03-31 14:39:18 -03:00
|
|
|
|
mir::Rvalue::Ref(_, bk, place) => {
|
2019-12-22 17:42:04 -05:00
|
|
|
|
let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
|
|
|
|
|
tcx.mk_ref(
|
|
|
|
|
tcx.lifetimes.re_erased,
|
|
|
|
|
ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() },
|
|
|
|
|
)
|
|
|
|
|
};
|
2018-12-23 19:00:58 +00:00
|
|
|
|
self.codegen_place_to_pointer(bx, place, mk_ref)
|
|
|
|
|
}
|
2015-11-11 22:02:51 +02:00
|
|
|
|
|
2020-03-31 14:39:18 -03:00
|
|
|
|
mir::Rvalue::AddressOf(mutability, place) => {
|
2019-12-22 17:42:04 -05:00
|
|
|
|
let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
|
2020-02-25 18:10:34 +01:00
|
|
|
|
tcx.mk_ptr(ty::TypeAndMut { ty, mutbl: mutability })
|
2019-12-22 17:42:04 -05:00
|
|
|
|
};
|
2018-12-23 19:00:58 +00:00
|
|
|
|
self.codegen_place_to_pointer(bx, place, mk_ptr)
|
2015-10-21 17:42:25 -04:00
|
|
|
|
}
|
|
|
|
|
|
2020-03-31 14:37:10 -03:00
|
|
|
|
mir::Rvalue::Len(place) => {
|
2019-10-14 01:38:38 -04:00
|
|
|
|
let size = self.evaluate_array_len(&mut bx, place);
|
2016-02-01 11:04:46 +01:00
|
|
|
|
let operand = OperandRef {
|
2017-08-20 21:35:00 +10:00
|
|
|
|
val: OperandValue::Immediate(size),
|
2018-08-28 17:50:57 +02:00
|
|
|
|
layout: bx.cx().layout_of(bx.tcx().types.usize),
|
2016-02-01 11:04:46 +01:00
|
|
|
|
};
|
2018-01-05 07:12:32 +02:00
|
|
|
|
(bx, operand)
|
2015-10-21 17:42:25 -04:00
|
|
|
|
}
|
|
|
|
|
|
2021-03-05 09:32:47 +00:00
|
|
|
|
mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
|
2019-10-14 01:38:38 -04:00
|
|
|
|
let lhs = self.codegen_operand(&mut bx, lhs);
|
|
|
|
|
let rhs = self.codegen_operand(&mut bx, rhs);
|
2017-10-10 22:04:13 +03:00
|
|
|
|
let llresult = match (lhs.val, rhs.val) {
|
2019-12-22 17:42:04 -05:00
|
|
|
|
(
|
|
|
|
|
OperandValue::Pair(lhs_addr, lhs_extra),
|
|
|
|
|
OperandValue::Pair(rhs_addr, rhs_extra),
|
|
|
|
|
) => self.codegen_fat_ptr_binop(
|
|
|
|
|
&mut bx,
|
|
|
|
|
op,
|
|
|
|
|
lhs_addr,
|
|
|
|
|
lhs_extra,
|
|
|
|
|
rhs_addr,
|
|
|
|
|
rhs_extra,
|
|
|
|
|
lhs.layout.ty,
|
|
|
|
|
),
|
|
|
|
|
|
|
|
|
|
(OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => {
|
2018-10-05 15:08:49 +02:00
|
|
|
|
self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty)
|
2017-10-10 22:04:13 +03:00
|
|
|
|
}
|
|
|
|
|
|
2019-12-22 17:42:04 -05:00
|
|
|
|
_ => bug!(),
|
2015-10-21 17:42:25 -04:00
|
|
|
|
};
|
2016-02-01 11:04:46 +01:00
|
|
|
|
let operand = OperandRef {
|
2015-11-13 00:12:50 +02:00
|
|
|
|
val: OperandValue::Immediate(llresult),
|
2019-12-22 17:42:04 -05:00
|
|
|
|
layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
|
2016-02-01 11:04:46 +01:00
|
|
|
|
};
|
2018-01-05 07:12:32 +02:00
|
|
|
|
(bx, operand)
|
2015-10-21 17:42:25 -04:00
|
|
|
|
}
|
2021-03-05 09:32:47 +00:00
|
|
|
|
mir::Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
|
2019-10-14 01:38:38 -04:00
|
|
|
|
let lhs = self.codegen_operand(&mut bx, lhs);
|
|
|
|
|
let rhs = self.codegen_operand(&mut bx, rhs);
|
2019-12-22 17:42:04 -05:00
|
|
|
|
let result = self.codegen_scalar_checked_binop(
|
|
|
|
|
&mut bx,
|
|
|
|
|
op,
|
|
|
|
|
lhs.immediate(),
|
|
|
|
|
rhs.immediate(),
|
|
|
|
|
lhs.layout.ty,
|
|
|
|
|
);
|
2018-01-05 07:12:32 +02:00
|
|
|
|
let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
|
2018-01-21 13:33:21 +08:00
|
|
|
|
let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
|
2019-12-22 17:42:04 -05:00
|
|
|
|
let operand = OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) };
|
2016-03-31 18:50:07 +13:00
|
|
|
|
|
2018-01-05 07:12:32 +02:00
|
|
|
|
(bx, operand)
|
2016-03-31 18:50:07 +13:00
|
|
|
|
}
|
2015-10-21 17:42:25 -04:00
|
|
|
|
|
|
|
|
|
mir::Rvalue::UnaryOp(op, ref operand) => {
|
2019-10-14 01:38:38 -04:00
|
|
|
|
let operand = self.codegen_operand(&mut bx, operand);
|
2015-11-10 22:05:11 +02:00
|
|
|
|
let lloperand = operand.immediate();
|
2019-06-09 12:53:47 +02:00
|
|
|
|
let is_float = operand.layout.ty.is_floating_point();
|
2015-10-21 17:42:25 -04:00
|
|
|
|
let llval = match op {
|
2018-01-05 07:12:32 +02:00
|
|
|
|
mir::UnOp::Not => bx.not(lloperand),
|
2019-12-22 17:42:04 -05:00
|
|
|
|
mir::UnOp::Neg => {
|
|
|
|
|
if is_float {
|
|
|
|
|
bx.fneg(lloperand)
|
|
|
|
|
} else {
|
|
|
|
|
bx.neg(lloperand)
|
|
|
|
|
}
|
2015-10-21 17:42:25 -04:00
|
|
|
|
}
|
|
|
|
|
};
|
2019-12-22 17:42:04 -05:00
|
|
|
|
(bx, OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout })
|
2015-10-21 17:42:25 -04:00
|
|
|
|
}
|
|
|
|
|
|
2017-12-01 14:39:51 +02:00
|
|
|
|
mir::Rvalue::Discriminant(ref place) => {
|
2020-04-12 10:28:41 -07:00
|
|
|
|
let discr_ty = rvalue.ty(self.mir, bx.tcx());
|
2020-12-11 00:00:00 +00:00
|
|
|
|
let discr_ty = self.monomorphize(discr_ty);
|
2019-12-22 17:42:04 -05:00
|
|
|
|
let discr = self
|
2020-01-14 01:51:59 -03:00
|
|
|
|
.codegen_place(&mut bx, place.as_ref())
|
2018-10-05 15:08:49 +02:00
|
|
|
|
.codegen_get_discr(&mut bx, discr_ty);
|
2019-12-22 17:42:04 -05:00
|
|
|
|
(
|
|
|
|
|
bx,
|
|
|
|
|
OperandRef {
|
|
|
|
|
val: OperandValue::Immediate(discr),
|
|
|
|
|
layout: self.cx.layout_of(discr_ty),
|
|
|
|
|
},
|
|
|
|
|
)
|
2017-01-31 01:10:54 +02:00
|
|
|
|
}
|
|
|
|
|
|
2017-05-18 18:43:52 +03:00
|
|
|
|
mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
|
2020-10-24 02:21:18 +02:00
|
|
|
|
let ty = self.monomorphize(ty);
|
2018-08-28 17:50:57 +02:00
|
|
|
|
assert!(bx.cx().type_is_sized(ty));
|
2018-09-20 15:47:22 +02:00
|
|
|
|
let val = bx.cx().const_usize(bx.cx().layout_of(ty).size.bytes());
|
|
|
|
|
let tcx = self.cx.tcx();
|
2019-12-22 17:42:04 -05:00
|
|
|
|
(
|
|
|
|
|
bx,
|
|
|
|
|
OperandRef {
|
|
|
|
|
val: OperandValue::Immediate(val),
|
|
|
|
|
layout: self.cx.layout_of(tcx.types.usize),
|
|
|
|
|
},
|
|
|
|
|
)
|
2017-05-18 18:43:52 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
|
2020-10-24 02:21:18 +02:00
|
|
|
|
let content_ty = self.monomorphize(content_ty);
|
2018-09-08 22:14:55 +03:00
|
|
|
|
let content_layout = bx.cx().layout_of(content_ty);
|
|
|
|
|
let llsize = bx.cx().const_usize(content_layout.size.bytes());
|
2018-09-09 00:22:22 +03:00
|
|
|
|
let llalign = bx.cx().const_usize(content_layout.align.abi.bytes());
|
2018-08-28 17:50:57 +02:00
|
|
|
|
let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
|
2018-09-20 15:47:22 +02:00
|
|
|
|
let llty_ptr = bx.cx().backend_type(box_layout);
|
2016-12-15 18:00:19 -07:00
|
|
|
|
|
|
|
|
|
// Allocate space:
|
2020-08-18 11:47:27 +01:00
|
|
|
|
let def_id = match bx.tcx().lang_items().require(LangItem::ExchangeMalloc) {
|
2016-12-15 18:00:19 -07:00
|
|
|
|
Ok(id) => id,
|
|
|
|
|
Err(s) => {
|
2018-09-20 15:47:22 +02:00
|
|
|
|
bx.cx().sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
|
2016-12-15 18:00:19 -07:00
|
|
|
|
}
|
|
|
|
|
};
|
2018-01-05 07:12:32 +02:00
|
|
|
|
let instance = ty::Instance::mono(bx.tcx(), def_id);
|
2019-10-13 12:05:40 +02:00
|
|
|
|
let r = bx.cx().get_fn_addr(instance);
|
2021-08-03 15:09:57 -07:00
|
|
|
|
let ty = bx.type_func(&[bx.type_isize(), bx.type_isize()], bx.type_i8p());
|
|
|
|
|
let call = bx.call(ty, r, &[llsize, llalign], None);
|
2018-10-05 15:08:49 +02:00
|
|
|
|
let val = bx.pointercast(call, llty_ptr);
|
2016-12-15 18:00:19 -07:00
|
|
|
|
|
2019-12-22 17:42:04 -05:00
|
|
|
|
let operand = OperandRef { val: OperandValue::Immediate(val), layout: box_layout };
|
2018-01-05 07:12:32 +02:00
|
|
|
|
(bx, operand)
|
2015-10-21 17:42:25 -04:00
|
|
|
|
}
|
2020-05-02 21:44:25 +02:00
|
|
|
|
mir::Rvalue::ThreadLocalRef(def_id) => {
|
|
|
|
|
assert!(bx.cx().tcx().is_static(def_id));
|
|
|
|
|
let static_ = bx.get_static(def_id);
|
|
|
|
|
let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id));
|
|
|
|
|
let operand = OperandRef::from_immediate_or_packed_pair(&mut bx, static_, layout);
|
|
|
|
|
(bx, operand)
|
|
|
|
|
}
|
Various improvements to MIR and LLVM IR Construction
Primarily affects the MIR construction, which indirectly improves LLVM
IR generation, but some LLVM IR changes have been made too.
* Handle "statement expressions" more intelligently. These are
expressions that always evaluate to `()`. Previously a temporary would
be generated as a destination to translate into, which is unnecessary.
This affects assignment, augmented assignment, `return`, `break` and
`continue`.
* Avoid inserting drops for non-drop types in more places. Scheduled
drops were already skipped for types that we knew wouldn't need
dropping at construction time. However manually-inserted drops like
those for `x` in `x = y;` were still generated. `build_drop` now takes
a type parameter like its `schedule_drop` counterpart and checks to
see if the type needs dropping.
* Avoid generating an extra temporary for an assignment where the types
involved don't need dropping. Previously an expression like
`a = b + 1;` would result in a temporary for `b + 1`. This is so the
RHS can be evaluated, then the LHS evaluated and dropped and have
everything work correctly. However, this isn't necessary if the `LHS`
doesn't need a drop, as we can just overwrite the existing value.
* Improves lvalue analysis to allow treating an `Rvalue::Use` as an
operand in certain conditions. The reason for it never being an
operand is so it can be zeroed/drop-filled, but this is only true for
types that need dropping.
The first two changes result in significantly fewer MIR blocks being
generated, as previously almost every statement would end up generating
a new block due to the drop of the `()` temporary being generated.
2016-04-15 12:36:16 +12:00
|
|
|
|
mir::Rvalue::Use(ref operand) => {
|
2019-10-14 01:38:38 -04:00
|
|
|
|
let operand = self.codegen_operand(&mut bx, operand);
|
2018-01-05 07:12:32 +02:00
|
|
|
|
(bx, operand)
|
Various improvements to MIR and LLVM IR Construction
Primarily affects the MIR construction, which indirectly improves LLVM
IR generation, but some LLVM IR changes have been made too.
* Handle "statement expressions" more intelligently. These are
expressions that always evaluate to `()`. Previously a temporary would
be generated as a destination to translate into, which is unnecessary.
This affects assignment, augmented assignment, `return`, `break` and
`continue`.
* Avoid inserting drops for non-drop types in more places. Scheduled
drops were already skipped for types that we knew wouldn't need
dropping at construction time. However manually-inserted drops like
those for `x` in `x = y;` were still generated. `build_drop` now takes
a type parameter like its `schedule_drop` counterpart and checks to
see if the type needs dropping.
* Avoid generating an extra temporary for an assignment where the types
involved don't need dropping. Previously an expression like
`a = b + 1;` would result in a temporary for `b + 1`. This is so the
RHS can be evaluated, then the LHS evaluated and dropped and have
everything work correctly. However, this isn't necessary if the `LHS`
doesn't need a drop, as we can just overwrite the existing value.
* Improves lvalue analysis to allow treating an `Rvalue::Use` as an
operand in certain conditions. The reason for it never being an
operand is so it can be zeroed/drop-filled, but this is only true for
types that need dropping.
The first two changes result in significantly fewer MIR blocks being
generated, as previously almost every statement would end up generating
a new block due to the drop of the `()` temporary being generated.
2016-04-15 12:36:16 +12:00
|
|
|
|
}
|
2019-12-22 17:42:04 -05:00
|
|
|
|
mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => {
|
2017-03-08 20:03:04 +02:00
|
|
|
|
// According to `rvalue_creates_operand`, only ZST
|
|
|
|
|
// aggregate rvalues are allowed to be operands.
|
2020-04-12 10:28:41 -07:00
|
|
|
|
let ty = rvalue.ty(self.mir, self.cx.tcx());
|
2019-12-22 17:42:04 -05:00
|
|
|
|
let operand =
|
2020-10-24 02:21:18 +02:00
|
|
|
|
OperandRef::new_zst(&mut bx, self.cx.layout_of(self.monomorphize(ty)));
|
2019-03-01 15:03:48 +01:00
|
|
|
|
(bx, operand)
|
2015-10-21 17:42:25 -04:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-11-10 22:05:11 +02:00
|
|
|
|
|
2020-03-31 14:37:10 -03:00
|
|
|
|
fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value {
|
2017-08-20 21:35:00 +10:00
|
|
|
|
// ZST are passed as operands and require special handling
|
2018-05-08 16:10:16 +03:00
|
|
|
|
// because codegen_place() panics if Local is operand.
|
2019-10-20 16:09:36 -04:00
|
|
|
|
if let Some(index) = place.as_local() {
|
2017-08-20 21:35:00 +10:00
|
|
|
|
if let LocalRef::Operand(Some(op)) = self.locals[index] {
|
2020-08-03 00:49:11 +02:00
|
|
|
|
if let ty::Array(_, n) = op.layout.ty.kind() {
|
2019-03-26 00:13:09 +01:00
|
|
|
|
let n = n.eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
|
2018-09-06 11:57:42 -07:00
|
|
|
|
return bx.cx().const_usize(n);
|
2017-08-20 21:35:00 +10:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// use common size calculation for non zero-sized types
|
2020-01-14 01:51:59 -03:00
|
|
|
|
let cg_value = self.codegen_place(bx, place.as_ref());
|
2019-07-03 06:20:07 +02:00
|
|
|
|
cg_value.len(bx.cx())
|
2017-08-20 21:35:00 +10:00
|
|
|
|
}
|
|
|
|
|
|
2018-12-23 19:00:58 +00:00
|
|
|
|
/// Codegen an `Rvalue::AddressOf` or `Rvalue::Ref`
|
|
|
|
|
fn codegen_place_to_pointer(
|
|
|
|
|
&mut self,
|
|
|
|
|
mut bx: Bx,
|
2020-03-31 14:39:18 -03:00
|
|
|
|
place: mir::Place<'tcx>,
|
2018-12-23 19:00:58 +00:00
|
|
|
|
mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
|
|
|
|
|
) -> (Bx, OperandRef<'tcx, Bx::Value>) {
|
2020-01-14 01:51:59 -03:00
|
|
|
|
let cg_place = self.codegen_place(&mut bx, place.as_ref());
|
2018-12-23 19:00:58 +00:00
|
|
|
|
|
|
|
|
|
let ty = cg_place.layout.ty;
|
|
|
|
|
|
|
|
|
|
// Note: places are indirect, so storing the `llval` into the
|
|
|
|
|
// destination effectively creates a reference.
|
|
|
|
|
let val = if !bx.cx().type_has_metadata(ty) {
|
|
|
|
|
OperandValue::Immediate(cg_place.llval)
|
|
|
|
|
} else {
|
|
|
|
|
OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
|
|
|
|
|
};
|
2019-12-22 17:42:04 -05:00
|
|
|
|
(bx, OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) })
|
2018-12-23 19:00:58 +00:00
|
|
|
|
}
|
|
|
|
|
|
2018-07-10 13:28:39 +03:00
|
|
|
|
pub fn codegen_scalar_binop(
|
|
|
|
|
&mut self,
|
2018-10-05 15:08:49 +02:00
|
|
|
|
bx: &mut Bx,
|
2018-07-10 13:28:39 +03:00
|
|
|
|
op: mir::BinOp,
|
2018-09-20 15:47:22 +02:00
|
|
|
|
lhs: Bx::Value,
|
|
|
|
|
rhs: Bx::Value,
|
2018-07-10 13:28:39 +03:00
|
|
|
|
input_ty: Ty<'tcx>,
|
2018-09-20 15:47:22 +02:00
|
|
|
|
) -> Bx::Value {
|
2019-06-09 12:53:47 +02:00
|
|
|
|
let is_float = input_ty.is_floating_point();
|
2015-11-10 22:05:11 +02:00
|
|
|
|
let is_signed = input_ty.is_signed();
|
|
|
|
|
match op {
|
2019-12-22 17:42:04 -05:00
|
|
|
|
mir::BinOp::Add => {
|
|
|
|
|
if is_float {
|
|
|
|
|
bx.fadd(lhs, rhs)
|
|
|
|
|
} else {
|
|
|
|
|
bx.add(lhs, rhs)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
mir::BinOp::Sub => {
|
|
|
|
|
if is_float {
|
|
|
|
|
bx.fsub(lhs, rhs)
|
|
|
|
|
} else {
|
|
|
|
|
bx.sub(lhs, rhs)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
mir::BinOp::Mul => {
|
|
|
|
|
if is_float {
|
|
|
|
|
bx.fmul(lhs, rhs)
|
|
|
|
|
} else {
|
|
|
|
|
bx.mul(lhs, rhs)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
mir::BinOp::Div => {
|
|
|
|
|
if is_float {
|
|
|
|
|
bx.fdiv(lhs, rhs)
|
|
|
|
|
} else if is_signed {
|
|
|
|
|
bx.sdiv(lhs, rhs)
|
|
|
|
|
} else {
|
|
|
|
|
bx.udiv(lhs, rhs)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
mir::BinOp::Rem => {
|
|
|
|
|
if is_float {
|
|
|
|
|
bx.frem(lhs, rhs)
|
|
|
|
|
} else if is_signed {
|
|
|
|
|
bx.srem(lhs, rhs)
|
|
|
|
|
} else {
|
|
|
|
|
bx.urem(lhs, rhs)
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-01-05 07:12:32 +02:00
|
|
|
|
mir::BinOp::BitOr => bx.or(lhs, rhs),
|
|
|
|
|
mir::BinOp::BitAnd => bx.and(lhs, rhs),
|
|
|
|
|
mir::BinOp::BitXor => bx.xor(lhs, rhs),
|
2021-08-01 00:00:00 +00:00
|
|
|
|
mir::BinOp::Offset => {
|
|
|
|
|
let pointee_type = input_ty
|
|
|
|
|
.builtin_deref(true)
|
|
|
|
|
.unwrap_or_else(|| bug!("deref of non-pointer {:?}", input_ty))
|
|
|
|
|
.ty;
|
|
|
|
|
let llty = bx.cx().backend_type(bx.cx().layout_of(pointee_type));
|
|
|
|
|
bx.inbounds_gep(llty, lhs, &[rhs])
|
|
|
|
|
}
|
2018-01-05 07:12:32 +02:00
|
|
|
|
mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs),
|
|
|
|
|
mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
|
2019-12-22 17:42:04 -05:00
|
|
|
|
mir::BinOp::Ne
|
|
|
|
|
| mir::BinOp::Lt
|
|
|
|
|
| mir::BinOp::Gt
|
|
|
|
|
| mir::BinOp::Eq
|
|
|
|
|
| mir::BinOp::Le
|
|
|
|
|
| mir::BinOp::Ge => {
|
|
|
|
|
if is_float {
|
|
|
|
|
bx.fcmp(base::bin_op_to_fcmp_predicate(op.to_hir_binop()), lhs, rhs)
|
|
|
|
|
} else {
|
|
|
|
|
bx.icmp(base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed), lhs, rhs)
|
|
|
|
|
}
|
2016-10-04 18:34:03 +03:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-10 13:28:39 +03:00
|
|
|
|
pub fn codegen_fat_ptr_binop(
|
|
|
|
|
&mut self,
|
2018-10-05 15:08:49 +02:00
|
|
|
|
bx: &mut Bx,
|
2018-07-10 13:28:39 +03:00
|
|
|
|
op: mir::BinOp,
|
2018-09-20 15:47:22 +02:00
|
|
|
|
lhs_addr: Bx::Value,
|
|
|
|
|
lhs_extra: Bx::Value,
|
|
|
|
|
rhs_addr: Bx::Value,
|
|
|
|
|
rhs_extra: Bx::Value,
|
2018-07-10 13:28:39 +03:00
|
|
|
|
_input_ty: Ty<'tcx>,
|
2018-09-20 15:47:22 +02:00
|
|
|
|
) -> Bx::Value {
|
2016-10-04 18:34:03 +03:00
|
|
|
|
match op {
|
|
|
|
|
mir::BinOp::Eq => {
|
2018-10-05 15:08:49 +02:00
|
|
|
|
let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
|
|
|
|
|
let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
|
|
|
|
|
bx.and(lhs, rhs)
|
2016-10-04 18:34:03 +03:00
|
|
|
|
}
|
|
|
|
|
mir::BinOp::Ne => {
|
2018-10-05 15:08:49 +02:00
|
|
|
|
let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
|
|
|
|
|
let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
|
|
|
|
|
bx.or(lhs, rhs)
|
2016-10-04 18:34:03 +03:00
|
|
|
|
}
|
2019-12-22 17:42:04 -05:00
|
|
|
|
mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
|
2016-10-04 18:34:03 +03:00
|
|
|
|
// a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
|
|
|
|
|
let (op, strict_op) = match op {
|
2018-08-20 18:16:51 +02:00
|
|
|
|
mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
|
|
|
|
|
mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
|
|
|
|
|
mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
|
|
|
|
|
mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
|
2016-10-04 18:34:03 +03:00
|
|
|
|
_ => bug!(),
|
|
|
|
|
};
|
2018-10-05 15:08:49 +02:00
|
|
|
|
let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
|
|
|
|
|
let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
|
|
|
|
|
let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
|
|
|
|
|
let rhs = bx.and(and_lhs, and_rhs);
|
|
|
|
|
bx.or(lhs, rhs)
|
2016-10-04 18:34:03 +03:00
|
|
|
|
}
|
|
|
|
|
_ => {
|
|
|
|
|
bug!("unexpected fat ptr binop");
|
2015-11-10 22:05:11 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-03-31 18:50:07 +13:00
|
|
|
|
|
2018-09-20 15:47:22 +02:00
|
|
|
|
pub fn codegen_scalar_checked_binop(
|
|
|
|
|
&mut self,
|
2018-10-05 15:08:49 +02:00
|
|
|
|
bx: &mut Bx,
|
2018-09-20 15:47:22 +02:00
|
|
|
|
op: mir::BinOp,
|
|
|
|
|
lhs: Bx::Value,
|
|
|
|
|
rhs: Bx::Value,
|
2019-12-22 17:42:04 -05:00
|
|
|
|
input_ty: Ty<'tcx>,
|
2018-09-20 15:47:22 +02:00
|
|
|
|
) -> OperandValue<Bx::Value> {
|
2016-05-26 20:02:56 +03:00
|
|
|
|
// This case can currently arise only from functions marked
|
|
|
|
|
// with #[rustc_inherit_overflow_checks] and inlined from
|
|
|
|
|
// another crate (mostly core::num generic/#[inline] fns),
|
|
|
|
|
// while the current crate doesn't use overflow checks.
|
2018-09-20 15:47:22 +02:00
|
|
|
|
if !bx.cx().check_overflow() {
|
2018-05-08 16:10:16 +03:00
|
|
|
|
let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
|
2018-09-06 11:57:42 -07:00
|
|
|
|
return OperandValue::Pair(val, bx.cx().const_bool(false));
|
2016-05-26 20:02:56 +03:00
|
|
|
|
}
|
|
|
|
|
|
2016-03-31 18:50:07 +13:00
|
|
|
|
let (val, of) = match op {
|
|
|
|
|
// These are checked using intrinsics
|
|
|
|
|
mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
|
|
|
|
|
let oop = match op {
|
|
|
|
|
mir::BinOp::Add => OverflowOp::Add,
|
|
|
|
|
mir::BinOp::Sub => OverflowOp::Sub,
|
|
|
|
|
mir::BinOp::Mul => OverflowOp::Mul,
|
2019-12-22 17:42:04 -05:00
|
|
|
|
_ => unreachable!(),
|
2016-03-31 18:50:07 +13:00
|
|
|
|
};
|
2018-11-24 16:44:17 +01:00
|
|
|
|
bx.checked_binop(oop, input_ty, lhs, rhs)
|
2016-03-31 18:50:07 +13:00
|
|
|
|
}
|
|
|
|
|
mir::BinOp::Shl | mir::BinOp::Shr => {
|
2018-08-29 18:42:25 +02:00
|
|
|
|
let lhs_llty = bx.cx().val_ty(lhs);
|
|
|
|
|
let rhs_llty = bx.cx().val_ty(rhs);
|
2018-09-20 15:47:22 +02:00
|
|
|
|
let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true);
|
2018-01-05 07:12:32 +02:00
|
|
|
|
let outer_bits = bx.and(rhs, invert_mask);
|
2016-03-31 18:50:07 +13:00
|
|
|
|
|
2018-09-06 11:57:42 -07:00
|
|
|
|
let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty));
|
2018-05-08 16:10:16 +03:00
|
|
|
|
let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
|
2016-03-31 18:50:07 +13:00
|
|
|
|
|
2016-05-25 11:55:44 +03:00
|
|
|
|
(val, of)
|
2016-03-31 18:50:07 +13:00
|
|
|
|
}
|
2019-12-22 17:42:04 -05:00
|
|
|
|
_ => bug!("Operator `{:?}` is not a checkable operator", op),
|
2016-03-31 18:50:07 +13:00
|
|
|
|
};
|
|
|
|
|
|
2016-05-25 11:55:44 +03:00
|
|
|
|
OperandValue::Pair(val, of)
|
2016-03-31 18:50:07 +13:00
|
|
|
|
}
|
2018-09-20 15:47:22 +02:00
|
|
|
|
}
|
2015-11-03 06:35:09 -05:00
|
|
|
|
|
2019-10-26 01:41:17 -04:00
|
|
|
|
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
2019-11-04 19:52:19 -05:00
|
|
|
|
pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
|
2017-03-08 20:03:04 +02:00
|
|
|
|
match *rvalue {
|
|
|
|
|
mir::Rvalue::Ref(..) |
|
2018-12-23 19:00:58 +00:00
|
|
|
|
mir::Rvalue::AddressOf(..) |
|
2017-03-08 20:03:04 +02:00
|
|
|
|
mir::Rvalue::Len(..) |
|
|
|
|
|
mir::Rvalue::Cast(..) | // (*)
|
|
|
|
|
mir::Rvalue::BinaryOp(..) |
|
|
|
|
|
mir::Rvalue::CheckedBinaryOp(..) |
|
|
|
|
|
mir::Rvalue::UnaryOp(..) |
|
|
|
|
|
mir::Rvalue::Discriminant(..) |
|
2017-05-18 18:43:52 +03:00
|
|
|
|
mir::Rvalue::NullaryOp(..) |
|
2020-05-02 21:44:25 +02:00
|
|
|
|
mir::Rvalue::ThreadLocalRef(_) |
|
2017-03-08 20:03:04 +02:00
|
|
|
|
mir::Rvalue::Use(..) => // (*)
|
|
|
|
|
true,
|
|
|
|
|
mir::Rvalue::Repeat(..) |
|
|
|
|
|
mir::Rvalue::Aggregate(..) => {
|
2020-04-12 10:28:41 -07:00
|
|
|
|
let ty = rvalue.ty(self.mir, self.cx.tcx());
|
2020-10-24 02:21:18 +02:00
|
|
|
|
let ty = self.monomorphize(ty);
|
2019-08-03 15:59:25 -07:00
|
|
|
|
self.cx.spanned_layout_of(ty, span).is_zst()
|
2017-03-08 20:03:04 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
2015-11-03 06:35:09 -05:00
|
|
|
|
|
2017-03-08 20:03:04 +02:00
|
|
|
|
// (*) this is only true if the type is suitable
|
|
|
|
|
}
|
2015-11-03 06:35:09 -05:00
|
|
|
|
}
|
2016-03-31 18:50:07 +13:00
|
|
|
|
|
2019-06-14 19:39:39 +03:00
|
|
|
|
fn cast_float_to_int<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
|
2018-10-05 15:08:49 +02:00
|
|
|
|
bx: &mut Bx,
|
2018-09-20 15:47:22 +02:00
|
|
|
|
signed: bool,
|
|
|
|
|
x: Bx::Value,
|
|
|
|
|
float_ty: Bx::Type,
|
2019-12-22 17:42:04 -05:00
|
|
|
|
int_ty: Bx::Type,
|
2018-09-20 15:47:22 +02:00
|
|
|
|
) -> Bx::Value {
|
2020-04-17 21:42:22 -04:00
|
|
|
|
if let Some(false) = bx.cx().sess().opts.debugging_opts.saturating_float_casts {
|
2020-06-25 17:05:12 +02:00
|
|
|
|
return if signed { bx.fptosi(x, int_ty) } else { bx.fptoui(x, int_ty) };
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let try_sat_result = if signed { bx.fptosi_sat(x, int_ty) } else { bx.fptoui_sat(x, int_ty) };
|
|
|
|
|
if let Some(try_sat_result) = try_sat_result {
|
|
|
|
|
return try_sat_result;
|
2017-10-09 02:14:00 +02:00
|
|
|
|
}
|
2018-10-05 15:08:49 +02:00
|
|
|
|
|
|
|
|
|
let int_width = bx.cx().int_width(int_ty);
|
|
|
|
|
let float_width = bx.cx().float_width(float_ty);
|
2017-10-09 02:14:00 +02:00
|
|
|
|
// LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
|
|
|
|
|
// destination integer type after rounding towards zero. This `undef` value can cause UB in
|
|
|
|
|
// safe code (see issue #10184), so we implement a saturating conversion on top of it:
|
|
|
|
|
// Semantically, the mathematical value of the input is rounded towards zero to the next
|
|
|
|
|
// mathematical integer, and then the result is clamped into the range of the destination
|
|
|
|
|
// integer type. Positive and negative infinity are mapped to the maximum and minimum value of
|
|
|
|
|
// the destination integer type. NaN is mapped to 0.
|
|
|
|
|
//
|
|
|
|
|
// Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to
|
|
|
|
|
// a value representable in int_ty.
|
|
|
|
|
// They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits.
|
|
|
|
|
// Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two.
|
|
|
|
|
// int_ty::MIN, however, is either zero or a negative power of two and is thus exactly
|
2018-02-16 15:56:50 +01:00
|
|
|
|
// representable. Note that this only works if float_ty's exponent range is sufficiently large.
|
2017-10-09 02:14:00 +02:00
|
|
|
|
// f16 or 256 bit integers would break this property. Right now the smallest float type is f32
|
|
|
|
|
// with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127.
|
|
|
|
|
// On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
|
|
|
|
|
// we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
|
|
|
|
|
// This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
|
2018-10-05 15:08:49 +02:00
|
|
|
|
let int_max = |signed: bool, int_width: u64| -> u128 {
|
|
|
|
|
let shift_amount = 128 - int_width;
|
2019-12-22 17:42:04 -05:00
|
|
|
|
if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount }
|
2018-09-05 14:14:03 -07:00
|
|
|
|
};
|
2018-10-05 15:08:49 +02:00
|
|
|
|
let int_min = |signed: bool, int_width: u64| -> i128 {
|
2019-12-22 17:42:04 -05:00
|
|
|
|
if signed { i128::MIN >> (128 - int_width) } else { 0 }
|
2018-09-05 14:14:03 -07:00
|
|
|
|
};
|
|
|
|
|
|
2019-12-22 17:42:04 -05:00
|
|
|
|
let compute_clamp_bounds_single = |signed: bool, int_width: u64| -> (u128, u128) {
|
2018-10-05 15:08:49 +02:00
|
|
|
|
let rounded_min = ieee::Single::from_i128_r(int_min(signed, int_width), Round::TowardZero);
|
2018-09-05 14:14:03 -07:00
|
|
|
|
assert_eq!(rounded_min.status, Status::OK);
|
2018-10-05 15:08:49 +02:00
|
|
|
|
let rounded_max = ieee::Single::from_u128_r(int_max(signed, int_width), Round::TowardZero);
|
2018-09-05 14:14:03 -07:00
|
|
|
|
assert!(rounded_max.value.is_finite());
|
|
|
|
|
(rounded_min.value.to_bits(), rounded_max.value.to_bits())
|
|
|
|
|
};
|
2019-12-22 17:42:04 -05:00
|
|
|
|
let compute_clamp_bounds_double = |signed: bool, int_width: u64| -> (u128, u128) {
|
2018-10-05 15:08:49 +02:00
|
|
|
|
let rounded_min = ieee::Double::from_i128_r(int_min(signed, int_width), Round::TowardZero);
|
2018-09-05 14:14:03 -07:00
|
|
|
|
assert_eq!(rounded_min.status, Status::OK);
|
2018-10-05 15:08:49 +02:00
|
|
|
|
let rounded_max = ieee::Double::from_u128_r(int_max(signed, int_width), Round::TowardZero);
|
2018-09-05 14:14:03 -07:00
|
|
|
|
assert!(rounded_max.value.is_finite());
|
|
|
|
|
(rounded_min.value.to_bits(), rounded_max.value.to_bits())
|
|
|
|
|
};
|
|
|
|
|
|
2018-10-05 15:08:49 +02:00
|
|
|
|
let mut float_bits_to_llval = |bits| {
|
2019-12-22 17:42:04 -05:00
|
|
|
|
let bits_llval = match float_width {
|
2018-09-06 11:57:42 -07:00
|
|
|
|
32 => bx.cx().const_u32(bits as u32),
|
|
|
|
|
64 => bx.cx().const_u64(bits as u64),
|
2017-10-09 02:14:00 +02:00
|
|
|
|
n => bug!("unsupported float width {}", n),
|
|
|
|
|
};
|
2018-09-10 16:28:47 +02:00
|
|
|
|
bx.bitcast(bits_llval, float_ty)
|
2017-10-09 02:14:00 +02:00
|
|
|
|
};
|
2018-10-05 15:08:49 +02:00
|
|
|
|
let (f_min, f_max) = match float_width {
|
|
|
|
|
32 => compute_clamp_bounds_single(signed, int_width),
|
|
|
|
|
64 => compute_clamp_bounds_double(signed, int_width),
|
2017-10-28 16:43:08 +02:00
|
|
|
|
n => bug!("unsupported float width {}", n),
|
|
|
|
|
};
|
2017-10-09 02:14:00 +02:00
|
|
|
|
let f_min = float_bits_to_llval(f_min);
|
|
|
|
|
let f_max = float_bits_to_llval(f_max);
|
2017-10-17 16:42:10 +02:00
|
|
|
|
// To implement saturation, we perform the following steps:
|
2017-10-09 02:14:00 +02:00
|
|
|
|
//
|
2017-10-17 16:42:10 +02:00
|
|
|
|
// 1. Cast x to an integer with fpto[su]i. This may result in undef.
|
|
|
|
|
// 2. Compare x to f_min and f_max, and use the comparison results to select:
|
|
|
|
|
// a) int_ty::MIN if x < f_min or x is NaN
|
|
|
|
|
// b) int_ty::MAX if x > f_max
|
|
|
|
|
// c) the result of fpto[su]i otherwise
|
|
|
|
|
// 3. If x is NaN, return 0.0, otherwise return the result of step 2.
|
2017-10-09 02:14:00 +02:00
|
|
|
|
//
|
2017-10-17 16:42:10 +02:00
|
|
|
|
// This avoids resulting undef because values in range [f_min, f_max] by definition fit into the
|
|
|
|
|
// destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of
|
|
|
|
|
// undef does not introduce any non-determinism either.
|
|
|
|
|
// More importantly, the above procedure correctly implements saturating conversion.
|
2017-10-09 02:14:00 +02:00
|
|
|
|
// Proof (sketch):
|
2017-10-28 16:43:08 +02:00
|
|
|
|
// If x is NaN, 0 is returned by definition.
|
2017-10-09 02:14:00 +02:00
|
|
|
|
// Otherwise, x is finite or infinite and thus can be compared with f_min and f_max.
|
|
|
|
|
// This yields three cases to consider:
|
2017-10-17 16:42:10 +02:00
|
|
|
|
// (1) if x in [f_min, f_max], the result of fpto[su]i is returned, which agrees with
|
|
|
|
|
// saturating conversion for inputs in that range.
|
|
|
|
|
// (2) if x > f_max, then x is larger than int_ty::MAX. This holds even if f_max is rounded
|
|
|
|
|
// (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger
|
2017-10-28 16:43:08 +02:00
|
|
|
|
// than int_ty::MAX. Because x is larger than int_ty::MAX, the return value of int_ty::MAX
|
|
|
|
|
// is correct.
|
2017-10-17 16:42:10 +02:00
|
|
|
|
// (3) if x < f_min, then x is smaller than int_ty::MIN. As shown earlier, f_min exactly equals
|
2017-10-28 16:43:08 +02:00
|
|
|
|
// int_ty::MIN and therefore the return value of int_ty::MIN is correct.
|
2017-10-09 02:14:00 +02:00
|
|
|
|
// QED.
|
|
|
|
|
|
2018-10-05 15:08:49 +02:00
|
|
|
|
let int_max = bx.cx().const_uint_big(int_ty, int_max(signed, int_width));
|
|
|
|
|
let int_min = bx.cx().const_uint_big(int_ty, int_min(signed, int_width) as u128);
|
rustc: Improving safe wasm float->int casts
This commit improves code generation for WebAssembly targets when
translating floating to integer casts. This improvement is only relevant
when the `nontrapping-fptoint` feature is not enabled, but the feature
is not enabled by default right now. Additionally this improvement only
affects safe casts since unchecked casts were improved in #74659.
Some more background for this issue is present on #73591, but the
general gist of the issue is that in LLVM the `fptosi` and `fptoui`
instructions are defined to return an `undef` value if they execute on
out-of-bounds values; they notably do not trap. To implement these
instructions for WebAssembly the LLVM backend must therefore generate
quite a few instructions before executing `i32.trunc_f32_s` (for
example) because this WebAssembly instruction traps on out-of-bounds
values. This codegen into wasm instructions happens very late in the
code generator, so what ends up happening is that rustc inserts its own
codegen to implement Rust's saturating semantics, and then LLVM also
inserts its own codegen to make sure that the `fptosi` instruction
doesn't trap. Overall this means that a function like this:
#[no_mangle]
pub unsafe extern "C" fn cast(x: f64) -> u32 {
x as u32
}
will generate this WebAssembly today:
(func $cast (type 0) (param f64) (result i32)
(local i32 i32)
local.get 0
f64.const 0x1.fffffffep+31 (;=4.29497e+09;)
f64.gt
local.set 1
block ;; label = @1
block ;; label = @2
local.get 0
f64.const 0x0p+0 (;=0;)
local.get 0
f64.const 0x0p+0 (;=0;)
f64.gt
select
local.tee 0
f64.const 0x1p+32 (;=4.29497e+09;)
f64.lt
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
i32.and
i32.eqz
br_if 0 (;@2;)
local.get 0
i32.trunc_f64_u
local.set 2
br 1 (;@1;)
end
i32.const 0
local.set 2
end
i32.const -1
local.get 2
local.get 1
select)
This PR improves the situation by updating the code generation for
float-to-int conversions in rustc, specifically only for WebAssembly
targets and only for some situations (float-to-u8 still has not great
codegen). The fix here is to use basic blocks and control flow to avoid
speculatively executing `fptosi`, and instead LLVM's raw intrinsic for
the WebAssembly instruction is used instead. This effectively extends
the support added in #74659 to checked casts. After this commit the
codegen for the above Rust function looks like:
(func $cast (type 0) (param f64) (result i32)
(local i32)
block ;; label = @1
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
local.tee 1
i32.const 1
i32.xor
br_if 0 (;@1;)
local.get 0
f64.const 0x1.fffffffep+31 (;=4.29497e+09;)
f64.le
i32.eqz
br_if 0 (;@1;)
local.get 0
i32.trunc_f64_u
return
end
i32.const -1
i32.const 0
local.get 1
select)
For reference, in Rust 1.44, which did not have saturating
float-to-integer casts, the codegen LLVM would emit is:
(func $cast (type 0) (param f64) (result i32)
block ;; label = @1
local.get 0
f64.const 0x1p+32 (;=4.29497e+09;)
f64.lt
local.get 0
f64.const 0x0p+0 (;=0;)
f64.ge
i32.and
i32.eqz
br_if 0 (;@1;)
local.get 0
i32.trunc_f64_u
return
end
i32.const 0)
So we're relatively close to the original codegen, although it's
slightly different because the semantics of the function changed where
we're emulating the `i32.trunc_sat_f32_s` instruction rather than always
replacing out-of-bounds values with zero.
There is still work that could be done to improve casts such as `f32` to
`u8`. That form of cast still uses the `fptosi` instruction which
generates lots of branch-y code. This seems less important to tackle now
though. In the meantime this should take care of most use cases of
floating-point conversion and as a result I'm going to speculate that
this...
Closes #73591
2020-07-23 12:20:42 -07:00
|
|
|
|
let zero = bx.cx().const_uint(int_ty, 0);
|
|
|
|
|
|
2021-04-19 10:55:32 -07:00
|
|
|
|
// Step 1 ...
|
|
|
|
|
let fptosui_result = if signed { bx.fptosi(x, int_ty) } else { bx.fptoui(x, int_ty) };
|
|
|
|
|
let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min);
|
|
|
|
|
let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max);
|
|
|
|
|
|
|
|
|
|
// Step 2: We use two comparisons and two selects, with %s1 being the
|
|
|
|
|
// result:
|
|
|
|
|
// %less_or_nan = fcmp ult %x, %f_min
|
|
|
|
|
// %greater = fcmp olt %x, %f_max
|
|
|
|
|
// %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result
|
|
|
|
|
// %s1 = select %greater, int_ty::MAX, %s0
|
|
|
|
|
// Note that %less_or_nan uses an *unordered* comparison. This
|
|
|
|
|
// comparison is true if the operands are not comparable (i.e., if x is
|
|
|
|
|
// NaN). The unordered comparison ensures that s1 becomes int_ty::MIN if
|
|
|
|
|
// x is NaN.
|
|
|
|
|
//
|
|
|
|
|
// Performance note: Unordered comparison can be lowered to a "flipped"
|
|
|
|
|
// comparison and a negation, and the negation can be merged into the
|
2021-08-22 14:46:15 +02:00
|
|
|
|
// select. Therefore, it not necessarily any more expensive than an
|
2021-04-19 10:55:32 -07:00
|
|
|
|
// ordered ("normal") comparison. Whether these optimizations will be
|
|
|
|
|
// performed is ultimately up to the backend, but at least x86 does
|
|
|
|
|
// perform them.
|
|
|
|
|
let s0 = bx.select(less_or_nan, int_min, fptosui_result);
|
|
|
|
|
let s1 = bx.select(greater, int_max, s0);
|
|
|
|
|
|
|
|
|
|
// Step 3: NaN replacement.
|
|
|
|
|
// For unsigned types, the above step already yielded int_ty::MIN == 0 if x is NaN.
|
|
|
|
|
// Therefore we only need to execute this step for signed integer types.
|
|
|
|
|
if signed {
|
|
|
|
|
// LLVM has no isNaN predicate, so we use (x == x) instead
|
|
|
|
|
let cmp = bx.fcmp(RealPredicate::RealOEQ, x, x);
|
|
|
|
|
bx.select(cmp, s1, zero)
|
2017-10-09 02:14:00 +02:00
|
|
|
|
} else {
|
2021-04-19 10:55:32 -07:00
|
|
|
|
s1
|
2017-10-09 02:14:00 +02:00
|
|
|
|
}
|
|
|
|
|
}
|