rust/compiler/rustc_codegen_ssa/src/mir/rvalue.rs

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

773 lines
34 KiB
Rust
Raw Normal View History

use super::operand::{OperandRef, OperandValue};
use super::place::PlaceRef;
use super::{FunctionCx, LocalRef};
use crate::base;
2021-12-30 01:18:44 +00:00
use crate::common::{self, IntPredicate};
use crate::traits::*;
use crate::MemFlags;
2020-03-29 16:41:09 +02:00
use rustc_middle::mir;
use rustc_middle::ty::cast::{CastTy, IntTy};
use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
2020-03-29 16:41:09 +02:00
use rustc_middle::ty::{self, adjustment::PointerCast, Instance, Ty, TyCtxt};
use rustc_span::source_map::{Span, DUMMY_SP};
use rustc_target::abi::{Abi, Int, Variants};
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn codegen_rvalue(
&mut self,
mut bx: Bx,
dest: PlaceRef<'tcx, Bx::Value>,
rvalue: &mir::Rvalue<'tcx>,
) -> Bx {
2018-05-08 16:10:16 +03:00
debug!("codegen_rvalue(dest.llval={:?}, rvalue={:?})", dest.llval, rvalue);
match *rvalue {
mir::Rvalue::Use(ref operand) => {
2019-10-14 01:38:38 -04:00
let cg_operand = self.codegen_operand(&mut bx, operand);
// FIXME: consider not copying constants through stack. (Fixable by codegen'ing
// constants into `OperandValue::Ref`; why dont we do that yet if we dont?)
cg_operand.val.store(&mut bx, dest);
2018-01-05 07:12:32 +02:00
bx
}
mir::Rvalue::Cast(mir::CastKind::Pointer(PointerCast::Unsize), ref source, _) => {
// The destination necessarily contains a fat pointer, so if
// it's a scalar pair, it's a fat pointer or newtype thereof.
if bx.cx().is_backend_scalar_pair(dest.layout) {
// Into-coerce of a thin pointer to a fat pointer -- just
2015-11-13 00:12:50 +02:00
// use the operand path.
2019-10-14 01:38:38 -04:00
let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
temp.val.store(&mut bx, dest);
2018-01-05 07:12:32 +02:00
return bx;
}
2015-11-11 22:02:51 +02:00
// Unsize of a nontrivial struct. I would prefer for
2018-05-08 16:10:16 +03:00
// this to be eliminated by MIR building, but
2015-11-11 22:02:51 +02:00
// `CoerceUnsized` can be passed by a where-clause,
// so the (generic) MIR may not be able to expand it.
2019-10-14 01:38:38 -04:00
let operand = self.codegen_operand(&mut bx, source);
match operand.val {
OperandValue::Pair(..) | OperandValue::Immediate(_) => {
// Unsize from an immediate structure. We don't
2016-12-10 20:32:44 -07:00
// really need a temporary alloca here, but
// avoiding it would require us to have
// `coerce_unsized_into` use `extractvalue` to
2016-12-10 20:32:44 -07:00
// index into the struct, and this case isn't
// important enough for it.
2018-05-08 16:10:16 +03:00
debug!("codegen_rvalue: creating ugly alloca");
let scratch = PlaceRef::alloca(&mut bx, operand.layout);
scratch.storage_live(&mut bx);
operand.val.store(&mut bx, scratch);
base::coerce_unsized_into(&mut bx, scratch, dest);
scratch.storage_dead(&mut bx);
}
OperandValue::Ref(llref, None, align) => {
let source = PlaceRef::new_sized_aligned(llref, operand.layout, align);
base::coerce_unsized_into(&mut bx, source, dest);
2015-11-11 22:02:51 +02:00
}
OperandValue::Ref(_, Some(_), _) => {
bug!("unsized coercion on an unsized rvalue");
}
}
2018-01-05 07:12:32 +02:00
bx
}
mir::Rvalue::Repeat(ref elem, count) => {
2019-10-14 01:38:38 -04:00
let cg_elem = self.codegen_operand(&mut bx, elem);
// Do not generate the loop for zero-sized elements or empty arrays.
if dest.layout.is_zst() {
2018-01-05 07:12:32 +02:00
return bx;
}
2018-05-08 16:10:16 +03:00
if let OperandValue::Immediate(v) = cg_elem.val {
let zero = bx.const_usize(0);
let start = dest.project_index(&mut bx, zero).llval;
let size = bx.const_usize(dest.layout.size.bytes());
// Use llvm.memset.p0i8.* to initialize all zero arrays
if bx.cx().const_to_opt_uint(v) == Some(0) {
let fill = bx.cx().const_u8(0);
bx.memset(start, fill, size, dest.align, MemFlags::empty());
2018-01-05 07:12:32 +02:00
return bx;
}
// Use llvm.memset.p0i8.* to initialize byte arrays
let v = bx.from_immediate(v);
if bx.cx().val_ty(v) == bx.cx().type_i8() {
bx.memset(start, v, size, dest.align, MemFlags::empty());
2018-01-05 07:12:32 +02:00
return bx;
}
}
let count =
2020-10-24 02:21:18 +02:00
self.monomorphize(count).eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
bx.write_operand_repeatedly(cg_elem, count, dest)
}
2015-12-25 01:02:34 +02:00
mir::Rvalue::Aggregate(ref kind, ref operands) => {
let (dest, active_field_index) = match **kind {
mir::AggregateKind::Adt(adt_did, variant_index, _, _, active_field_index) => {
dest.codegen_set_discr(&mut bx, variant_index);
if bx.tcx().adt_def(adt_did).is_enum() {
(dest.project_downcast(&mut bx, variant_index), active_field_index)
} else {
(dest, active_field_index)
2015-12-25 01:02:34 +02:00
}
}
_ => (dest, None),
};
for (i, operand) in operands.iter().enumerate() {
2019-10-14 01:38:38 -04:00
let op = self.codegen_operand(&mut bx, operand);
// Do not generate stores and GEPis for zero-sized fields.
if !op.layout.is_zst() {
let field_index = active_field_index.unwrap_or(i);
let field = dest.project_field(&mut bx, field_index);
op.val.store(&mut bx, field);
}
}
2018-01-05 07:12:32 +02:00
bx
}
_ => {
2019-10-14 01:38:38 -04:00
assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
temp.val.store(&mut bx, dest);
2018-01-05 07:12:32 +02:00
bx
}
}
}
pub fn codegen_rvalue_unsized(
&mut self,
mut bx: Bx,
indirect_dest: PlaceRef<'tcx, Bx::Value>,
rvalue: &mir::Rvalue<'tcx>,
) -> Bx {
debug!(
"codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
indirect_dest.llval, rvalue
);
match *rvalue {
mir::Rvalue::Use(ref operand) => {
2019-10-14 01:38:38 -04:00
let cg_operand = self.codegen_operand(&mut bx, operand);
cg_operand.val.store_unsized(&mut bx, indirect_dest);
bx
}
_ => bug!("unsized assignment other than `Rvalue::Use`"),
}
}
pub fn codegen_rvalue_operand(
&mut self,
mut bx: Bx,
rvalue: &mir::Rvalue<'tcx>,
) -> (Bx, OperandRef<'tcx, Bx::Value>) {
assert!(
2019-10-14 01:38:38 -04:00
self.rvalue_creates_operand(rvalue, DUMMY_SP),
"cannot codegen {:?} to operand",
rvalue,
);
match *rvalue {
mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
2019-10-14 01:38:38 -04:00
let operand = self.codegen_operand(&mut bx, source);
debug!("cast operand is {:?}", operand);
2020-10-24 02:21:18 +02:00
let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
2015-11-11 22:02:51 +02:00
let val = match *kind {
mir::CastKind::Pointer(PointerCast::ReifyFnPointer) => {
2020-08-03 00:49:11 +02:00
match *operand.layout.ty.kind() {
ty::FnDef(def_id, substs) => {
let instance = ty::Instance::resolve_for_fn_ptr(
bx.tcx(),
ty::ParamEnv::reveal_all(),
def_id,
substs,
)
.unwrap()
.polymorphize(bx.cx().tcx());
OperandValue::Immediate(bx.get_fn_addr(instance))
}
_ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
}
}
mir::CastKind::Pointer(PointerCast::ClosureFnPointer(_)) => {
2020-08-03 00:49:11 +02:00
match *operand.layout.ty.kind() {
ty::Closure(def_id, substs) => {
2019-05-23 12:45:22 -05:00
let instance = Instance::resolve_closure(
2019-09-26 17:30:44 +00:00
bx.cx().tcx(),
def_id,
substs,
2019-09-26 16:11:23 +00:00
ty::ClosureKind::FnOnce,
)
.polymorphize(bx.cx().tcx());
2019-10-13 12:05:40 +02:00
OperandValue::Immediate(bx.cx().get_fn_addr(instance))
}
_ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
}
}
mir::CastKind::Pointer(PointerCast::UnsafeFnPointer) => {
// This is a no-op at the LLVM level.
2015-11-11 22:02:51 +02:00
operand.val
}
mir::CastKind::Pointer(PointerCast::Unsize) => {
assert!(bx.cx().is_backend_scalar_pair(cast));
2021-07-31 22:46:23 +08:00
let (lldata, llextra) = match operand.val {
OperandValue::Pair(lldata, llextra) => {
// unsize from a fat pointer -- this is a
2021-07-31 22:46:23 +08:00
// "trait-object-to-supertrait" coercion.
(lldata, Some(llextra))
2015-11-11 22:02:51 +02:00
}
2015-11-13 00:12:50 +02:00
OperandValue::Immediate(lldata) => {
2015-11-11 22:02:51 +02:00
// "standard" unsize
2021-07-31 22:46:23 +08:00
(lldata, None)
2015-11-11 22:02:51 +02:00
}
OperandValue::Ref(..) => {
bug!("by-ref operand {:?} in `codegen_rvalue_operand`", operand);
2015-11-11 22:02:51 +02:00
}
2021-07-31 22:46:23 +08:00
};
let (lldata, llextra) =
base::unsize_ptr(&mut bx, lldata, operand.layout.ty, cast.ty, llextra);
OperandValue::Pair(lldata, llextra)
2015-11-11 22:02:51 +02:00
}
mir::CastKind::Pointer(PointerCast::MutToConstPointer)
| mir::CastKind::Misc
if bx.cx().is_backend_scalar_pair(operand.layout) =>
{
if let OperandValue::Pair(data_ptr, meta) = operand.val {
if bx.cx().is_backend_scalar_pair(cast) {
2018-01-05 07:12:32 +02:00
let data_cast = bx.pointercast(
data_ptr,
bx.cx().scalar_pair_element_backend_type(cast, 0, true),
);
OperandValue::Pair(data_cast, meta)
} else {
// cast to thin-ptr
// Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
// pointer-cast of that pointer to desired pointer type.
let llcast_ty = bx.cx().immediate_backend_type(cast);
2018-01-05 07:12:32 +02:00
let llval = bx.pointercast(data_ptr, llcast_ty);
OperandValue::Immediate(llval)
}
} else {
bug!("unexpected non-pair operand");
}
}
mir::CastKind::Pointer(
PointerCast::MutToConstPointer | PointerCast::ArrayToPointer,
)
| mir::CastKind::Misc => {
assert!(bx.cx().is_backend_immediate(cast));
let ll_t_out = bx.cx().immediate_backend_type(cast);
2018-08-23 16:34:38 +02:00
if operand.layout.abi.is_uninhabited() {
let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
return (bx, OperandRef { val, layout: cast });
}
let r_t_in =
CastTy::from_ty(operand.layout.ty).expect("bad input type for cast");
let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
let ll_t_in = bx.cx().immediate_backend_type(operand.layout);
match operand.layout.variants {
Variants::Single { index } => {
if let Some(discr) =
operand.layout.ty.discriminant_for_variant(bx.tcx(), index)
{
let discr_layout = bx.cx().layout_of(discr.ty);
let discr_t = bx.cx().immediate_backend_type(discr_layout);
let discr_val = bx.cx().const_uint_big(discr_t, discr.val);
let discr_val =
bx.intcast(discr_val, ll_t_out, discr.ty.is_signed());
return (
bx,
OperandRef {
val: OperandValue::Immediate(discr_val),
layout: cast,
},
);
}
}
Variants::Multiple { .. } => {}
}
let llval = operand.immediate();
let mut signed = false;
if let Abi::Scalar(scalar) = operand.layout.abi {
if let Int(_, s) = scalar.primitive() {
// We use `i1` for bytes that are always `0` or `1`,
// e.g., `#[repr(i8)] enum E { A, B }`, but we can't
// let LLVM interpret the `i1` as signed, because
// then `i1 1` (i.e., E::B) is effectively `i8 -1`.
signed = !scalar.is_bool() && s;
2021-09-07 20:51:09 +02:00
if !scalar.is_always_valid(bx.cx())
&& scalar.valid_range(bx.cx()).end
>= scalar.valid_range(bx.cx()).start
{
// We want `table[e as usize ± k]` to not
// have bound checks, and this is the most
// convenient place to put the `assume`s.
if scalar.valid_range(bx.cx()).start > 0 {
let enum_value_lower_bound = bx.cx().const_uint_big(
ll_t_in,
scalar.valid_range(bx.cx()).start,
);
let cmp_start = bx.icmp(
IntPredicate::IntUGE,
llval,
enum_value_lower_bound,
);
bx.assume(cmp_start);
}
let enum_value_upper_bound = bx
.cx()
.const_uint_big(ll_t_in, scalar.valid_range(bx.cx()).end);
let cmp_end = bx.icmp(
IntPredicate::IntULE,
llval,
enum_value_upper_bound,
);
bx.assume(cmp_end);
}
}
}
let newval = match (r_t_in, r_t_out) {
2018-01-05 07:12:32 +02:00
(CastTy::Int(_), CastTy::Int(_)) => bx.intcast(llval, ll_t_out, signed),
(CastTy::Float, CastTy::Float) => {
let srcsz = bx.cx().float_width(ll_t_in);
let dstsz = bx.cx().float_width(ll_t_out);
if dstsz > srcsz {
2018-01-05 07:12:32 +02:00
bx.fpext(llval, ll_t_out)
} else if srcsz > dstsz {
2018-01-05 07:12:32 +02:00
bx.fptrunc(llval, ll_t_out)
} else {
llval
}
}
(CastTy::Int(_), CastTy::Float) => {
if signed {
bx.sitofp(llval, ll_t_out)
} else {
bx.uitofp(llval, ll_t_out)
}
}
(CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Ptr(_)) => {
2018-01-05 07:12:32 +02:00
bx.pointercast(llval, ll_t_out)
2019-12-22 17:42:04 -05:00
}
(CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Int(_)) => {
2018-01-05 07:12:32 +02:00
bx.ptrtoint(llval, ll_t_out)
2019-12-22 17:42:04 -05:00
}
(CastTy::Int(_), CastTy::Ptr(_)) => {
let usize_llval = bx.intcast(llval, bx.cx().type_isize(), signed);
2018-01-05 07:12:32 +02:00
bx.inttoptr(usize_llval, ll_t_out)
}
(CastTy::Float, CastTy::Int(IntTy::I)) => {
2021-12-30 01:18:44 +00:00
bx.cast_float_to_int(true, llval, ll_t_out)
2019-12-22 17:42:04 -05:00
}
(CastTy::Float, CastTy::Int(_)) => {
2021-12-30 01:18:44 +00:00
bx.cast_float_to_int(false, llval, ll_t_out)
2019-12-22 17:42:04 -05:00
}
_ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty),
};
OperandValue::Immediate(newval)
}
2015-11-11 22:02:51 +02:00
};
2018-01-05 07:12:32 +02:00
(bx, OperandRef { val, layout: cast })
}
mir::Rvalue::Ref(_, bk, place) => {
let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
tcx.mk_ref(
tcx.lifetimes.re_erased,
ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() },
)
};
self.codegen_place_to_pointer(bx, place, mk_ref)
}
2015-11-11 22:02:51 +02:00
mir::Rvalue::AddressOf(mutability, place) => {
let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
tcx.mk_ptr(ty::TypeAndMut { ty, mutbl: mutability })
};
self.codegen_place_to_pointer(bx, place, mk_ptr)
}
mir::Rvalue::Len(place) => {
2019-10-14 01:38:38 -04:00
let size = self.evaluate_array_len(&mut bx, place);
let operand = OperandRef {
val: OperandValue::Immediate(size),
layout: bx.cx().layout_of(bx.tcx().types.usize),
};
2018-01-05 07:12:32 +02:00
(bx, operand)
}
2021-03-05 09:32:47 +00:00
mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
2019-10-14 01:38:38 -04:00
let lhs = self.codegen_operand(&mut bx, lhs);
let rhs = self.codegen_operand(&mut bx, rhs);
let llresult = match (lhs.val, rhs.val) {
(
OperandValue::Pair(lhs_addr, lhs_extra),
OperandValue::Pair(rhs_addr, rhs_extra),
) => self.codegen_fat_ptr_binop(
&mut bx,
op,
lhs_addr,
lhs_extra,
rhs_addr,
rhs_extra,
lhs.layout.ty,
),
2019-12-22 17:42:04 -05:00
(OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => {
self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty)
}
_ => bug!(),
};
let operand = OperandRef {
2015-11-13 00:12:50 +02:00
val: OperandValue::Immediate(llresult),
2018-01-05 07:12:32 +02:00
layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
};
2018-01-05 07:12:32 +02:00
(bx, operand)
}
2021-03-05 09:32:47 +00:00
mir::Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
2019-10-14 01:38:38 -04:00
let lhs = self.codegen_operand(&mut bx, lhs);
let rhs = self.codegen_operand(&mut bx, rhs);
let result = self.codegen_scalar_checked_binop(
&mut bx,
op,
lhs.immediate(),
rhs.immediate(),
lhs.layout.ty,
);
2018-01-05 07:12:32 +02:00
let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
let operand = OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) };
2018-01-05 07:12:32 +02:00
(bx, operand)
}
mir::Rvalue::UnaryOp(op, ref operand) => {
2019-10-14 01:38:38 -04:00
let operand = self.codegen_operand(&mut bx, operand);
let lloperand = operand.immediate();
let is_float = operand.layout.ty.is_floating_point();
let llval = match op {
2018-01-05 07:12:32 +02:00
mir::UnOp::Not => bx.not(lloperand),
mir::UnOp::Neg => {
if is_float {
2018-01-05 07:12:32 +02:00
bx.fneg(lloperand)
} else {
2018-01-05 07:12:32 +02:00
bx.neg(lloperand)
2019-12-22 17:42:04 -05:00
}
}
};
2015-11-13 00:12:50 +02:00
(bx, OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout })
}
mir::Rvalue::Discriminant(ref place) => {
2020-04-12 10:28:41 -07:00
let discr_ty = rvalue.ty(self.mir, bx.tcx());
let discr_ty = self.monomorphize(discr_ty);
2019-10-14 01:38:38 -04:00
let discr = self
.codegen_place(&mut bx, place.as_ref())
.codegen_get_discr(&mut bx, discr_ty);
2018-01-05 07:12:32 +02:00
(
bx,
OperandRef {
val: OperandValue::Immediate(discr),
2018-01-05 07:04:08 +02:00
layout: self.cx.layout_of(discr_ty),
},
)
}
2021-09-07 16:06:07 +01:00
mir::Rvalue::NullaryOp(null_op, ty) => {
let ty = self.monomorphize(ty);
assert!(bx.cx().type_is_sized(ty));
let layout = bx.cx().layout_of(ty);
let val = match null_op {
mir::NullOp::SizeOf => layout.size.bytes(),
mir::NullOp::AlignOf => layout.align.abi.bytes(),
};
let val = bx.cx().const_usize(val);
let tcx = self.cx.tcx();
(
bx,
OperandRef {
val: OperandValue::Immediate(val),
layout: self.cx.layout_of(tcx.types.usize),
},
)
}
2020-05-02 21:44:25 +02:00
mir::Rvalue::ThreadLocalRef(def_id) => {
assert!(bx.cx().tcx().is_static(def_id));
let static_ = bx.get_static(def_id);
let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id));
let operand = OperandRef::from_immediate_or_packed_pair(&mut bx, static_, layout);
(bx, operand)
}
Various improvements to MIR and LLVM IR Construction Primarily affects the MIR construction, which indirectly improves LLVM IR generation, but some LLVM IR changes have been made too. * Handle "statement expressions" more intelligently. These are expressions that always evaluate to `()`. Previously a temporary would be generated as a destination to translate into, which is unnecessary. This affects assignment, augmented assignment, `return`, `break` and `continue`. * Avoid inserting drops for non-drop types in more places. Scheduled drops were already skipped for types that we knew wouldn't need dropping at construction time. However manually-inserted drops like those for `x` in `x = y;` were still generated. `build_drop` now takes a type parameter like its `schedule_drop` counterpart and checks to see if the type needs dropping. * Avoid generating an extra temporary for an assignment where the types involved don't need dropping. Previously an expression like `a = b + 1;` would result in a temporary for `b + 1`. This is so the RHS can be evaluated, then the LHS evaluated and dropped and have everything work correctly. However, this isn't necessary if the `LHS` doesn't need a drop, as we can just overwrite the existing value. * Improves lvalue analysis to allow treating an `Rvalue::Use` as an operand in certain conditions. The reason for it never being an operand is so it can be zeroed/drop-filled, but this is only true for types that need dropping. The first two changes result in significantly fewer MIR blocks being generated, as previously almost every statement would end up generating a new block due to the drop of the `()` temporary being generated.
2016-04-15 12:36:16 +12:00
mir::Rvalue::Use(ref operand) => {
2019-10-14 01:38:38 -04:00
let operand = self.codegen_operand(&mut bx, operand);
2018-01-05 07:12:32 +02:00
(bx, operand)
Various improvements to MIR and LLVM IR Construction Primarily affects the MIR construction, which indirectly improves LLVM IR generation, but some LLVM IR changes have been made too. * Handle "statement expressions" more intelligently. These are expressions that always evaluate to `()`. Previously a temporary would be generated as a destination to translate into, which is unnecessary. This affects assignment, augmented assignment, `return`, `break` and `continue`. * Avoid inserting drops for non-drop types in more places. Scheduled drops were already skipped for types that we knew wouldn't need dropping at construction time. However manually-inserted drops like those for `x` in `x = y;` were still generated. `build_drop` now takes a type parameter like its `schedule_drop` counterpart and checks to see if the type needs dropping. * Avoid generating an extra temporary for an assignment where the types involved don't need dropping. Previously an expression like `a = b + 1;` would result in a temporary for `b + 1`. This is so the RHS can be evaluated, then the LHS evaluated and dropped and have everything work correctly. However, this isn't necessary if the `LHS` doesn't need a drop, as we can just overwrite the existing value. * Improves lvalue analysis to allow treating an `Rvalue::Use` as an operand in certain conditions. The reason for it never being an operand is so it can be zeroed/drop-filled, but this is only true for types that need dropping. The first two changes result in significantly fewer MIR blocks being generated, as previously almost every statement would end up generating a new block due to the drop of the `()` temporary being generated.
2016-04-15 12:36:16 +12:00
}
mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => {
// According to `rvalue_creates_operand`, only ZST
// aggregate rvalues are allowed to be operands.
2020-04-12 10:28:41 -07:00
let ty = rvalue.ty(self.mir, self.cx.tcx());
let operand =
2020-10-24 02:21:18 +02:00
OperandRef::new_zst(&mut bx, self.cx.layout_of(self.monomorphize(ty)));
(bx, operand)
}
2021-09-06 18:33:23 +01:00
mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
let operand = self.codegen_operand(&mut bx, operand);
let lloperand = operand.immediate();
let content_ty = self.monomorphize(content_ty);
let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
let llty_ptr = bx.cx().backend_type(box_layout);
let val = bx.pointercast(lloperand, llty_ptr);
let operand = OperandRef { val: OperandValue::Immediate(val), layout: box_layout };
(bx, operand)
}
}
}
fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value {
// ZST are passed as operands and require special handling
2018-05-08 16:10:16 +03:00
// because codegen_place() panics if Local is operand.
if let Some(index) = place.as_local() {
if let LocalRef::Operand(Some(op)) = self.locals[index] {
2020-08-03 00:49:11 +02:00
if let ty::Array(_, n) = op.layout.ty.kind() {
let n = n.eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
return bx.cx().const_usize(n);
}
}
}
// use common size calculation for non zero-sized types
let cg_value = self.codegen_place(bx, place.as_ref());
cg_value.len(bx.cx())
}
/// Codegen an `Rvalue::AddressOf` or `Rvalue::Ref`
fn codegen_place_to_pointer(
&mut self,
mut bx: Bx,
place: mir::Place<'tcx>,
mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
) -> (Bx, OperandRef<'tcx, Bx::Value>) {
let cg_place = self.codegen_place(&mut bx, place.as_ref());
let ty = cg_place.layout.ty;
// Note: places are indirect, so storing the `llval` into the
// destination effectively creates a reference.
let val = if !bx.cx().type_has_metadata(ty) {
OperandValue::Immediate(cg_place.llval)
} else {
OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
};
(bx, OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) })
}
pub fn codegen_scalar_binop(
&mut self,
bx: &mut Bx,
op: mir::BinOp,
lhs: Bx::Value,
rhs: Bx::Value,
input_ty: Ty<'tcx>,
) -> Bx::Value {
let is_float = input_ty.is_floating_point();
let is_signed = input_ty.is_signed();
match op {
mir::BinOp::Add => {
if is_float {
2018-01-05 07:12:32 +02:00
bx.fadd(lhs, rhs)
} else {
2018-01-05 07:12:32 +02:00
bx.add(lhs, rhs)
}
2019-12-22 17:42:04 -05:00
}
mir::BinOp::Sub => {
if is_float {
2018-01-05 07:12:32 +02:00
bx.fsub(lhs, rhs)
} else {
2018-01-05 07:12:32 +02:00
bx.sub(lhs, rhs)
}
2019-12-22 17:42:04 -05:00
}
mir::BinOp::Mul => {
if is_float {
2018-01-05 07:12:32 +02:00
bx.fmul(lhs, rhs)
} else {
2018-01-05 07:12:32 +02:00
bx.mul(lhs, rhs)
}
2019-12-22 17:42:04 -05:00
}
mir::BinOp::Div => {
if is_float {
2018-01-05 07:12:32 +02:00
bx.fdiv(lhs, rhs)
} else if is_signed {
2018-01-05 07:12:32 +02:00
bx.sdiv(lhs, rhs)
} else {
2018-01-05 07:12:32 +02:00
bx.udiv(lhs, rhs)
}
2019-12-22 17:42:04 -05:00
}
mir::BinOp::Rem => {
if is_float {
2018-01-05 07:12:32 +02:00
bx.frem(lhs, rhs)
} else if is_signed {
2018-01-05 07:12:32 +02:00
bx.srem(lhs, rhs)
} else {
2018-01-05 07:12:32 +02:00
bx.urem(lhs, rhs)
2019-12-22 17:42:04 -05:00
}
}
2018-01-05 07:12:32 +02:00
mir::BinOp::BitOr => bx.or(lhs, rhs),
mir::BinOp::BitAnd => bx.and(lhs, rhs),
mir::BinOp::BitXor => bx.xor(lhs, rhs),
mir::BinOp::Offset => {
let pointee_type = input_ty
.builtin_deref(true)
.unwrap_or_else(|| bug!("deref of non-pointer {:?}", input_ty))
.ty;
let llty = bx.cx().backend_type(bx.cx().layout_of(pointee_type));
bx.inbounds_gep(llty, lhs, &[rhs])
}
2018-01-05 07:12:32 +02:00
mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs),
mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
mir::BinOp::Ne
| mir::BinOp::Lt
| mir::BinOp::Gt
| mir::BinOp::Eq
| mir::BinOp::Le
| mir::BinOp::Ge => {
if is_float {
bx.fcmp(base::bin_op_to_fcmp_predicate(op.to_hir_binop()), lhs, rhs)
} else {
bx.icmp(base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed), lhs, rhs)
2019-12-22 17:42:04 -05:00
}
}
}
}
pub fn codegen_fat_ptr_binop(
&mut self,
bx: &mut Bx,
op: mir::BinOp,
lhs_addr: Bx::Value,
lhs_extra: Bx::Value,
rhs_addr: Bx::Value,
rhs_extra: Bx::Value,
_input_ty: Ty<'tcx>,
) -> Bx::Value {
match op {
mir::BinOp::Eq => {
let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
bx.and(lhs, rhs)
}
mir::BinOp::Ne => {
let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
bx.or(lhs, rhs)
}
mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
// a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
let (op, strict_op) = match op {
mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
_ => bug!(),
};
let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
let rhs = bx.and(and_lhs, and_rhs);
bx.or(lhs, rhs)
}
_ => {
bug!("unexpected fat ptr binop");
}
}
}
pub fn codegen_scalar_checked_binop(
&mut self,
bx: &mut Bx,
op: mir::BinOp,
lhs: Bx::Value,
rhs: Bx::Value,
input_ty: Ty<'tcx>,
) -> OperandValue<Bx::Value> {
// This case can currently arise only from functions marked
// with #[rustc_inherit_overflow_checks] and inlined from
// another crate (mostly core::num generic/#[inline] fns),
// while the current crate doesn't use overflow checks.
if !bx.cx().check_overflow() {
2018-05-08 16:10:16 +03:00
let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
return OperandValue::Pair(val, bx.cx().const_bool(false));
}
let (val, of) = match op {
// These are checked using intrinsics
mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
let oop = match op {
mir::BinOp::Add => OverflowOp::Add,
mir::BinOp::Sub => OverflowOp::Sub,
mir::BinOp::Mul => OverflowOp::Mul,
_ => unreachable!(),
};
bx.checked_binop(oop, input_ty, lhs, rhs)
}
mir::BinOp::Shl | mir::BinOp::Shr => {
let lhs_llty = bx.cx().val_ty(lhs);
let rhs_llty = bx.cx().val_ty(rhs);
let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true);
2018-01-05 07:12:32 +02:00
let outer_bits = bx.and(rhs, invert_mask);
let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty));
2018-05-08 16:10:16 +03:00
let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
(val, of)
}
_ => bug!("Operator `{:?}` is not a checkable operator", op),
};
OperandValue::Pair(val, of)
}
}
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
match *rvalue {
mir::Rvalue::Ref(..) |
mir::Rvalue::AddressOf(..) |
mir::Rvalue::Len(..) |
mir::Rvalue::Cast(..) | // (*)
2021-09-06 18:33:23 +01:00
mir::Rvalue::ShallowInitBox(..) | // (*)
mir::Rvalue::BinaryOp(..) |
mir::Rvalue::CheckedBinaryOp(..) |
mir::Rvalue::UnaryOp(..) |
mir::Rvalue::Discriminant(..) |
2017-05-18 18:43:52 +03:00
mir::Rvalue::NullaryOp(..) |
2020-05-02 21:44:25 +02:00
mir::Rvalue::ThreadLocalRef(_) |
mir::Rvalue::Use(..) => // (*)
true,
mir::Rvalue::Repeat(..) |
mir::Rvalue::Aggregate(..) => {
2020-04-12 10:28:41 -07:00
let ty = rvalue.ty(self.mir, self.cx.tcx());
2020-10-24 02:21:18 +02:00
let ty = self.monomorphize(ty);
self.cx.spanned_layout_of(ty, span).is_zst()
}
}
// (*) this is only true if the type is suitable
}
}