1
Fork 0

interpret: make overflowing binops just normal binops

This commit is contained in:
Ralf Jung 2024-05-21 12:17:34 +02:00
parent 9cb6bb8599
commit c0b4b454c3
40 changed files with 323 additions and 349 deletions

View file

@ -125,7 +125,7 @@ impl<'mir, 'tcx: 'mir> interpret::Machine<'mir, 'tcx> for DummyMachine {
bin_op: BinOp,
left: &interpret::ImmTy<'tcx, Self::Provenance>,
right: &interpret::ImmTy<'tcx, Self::Provenance>,
) -> interpret::InterpResult<'tcx, (ImmTy<'tcx, Self::Provenance>, bool)> {
) -> interpret::InterpResult<'tcx, ImmTy<'tcx, Self::Provenance>> {
use rustc_middle::mir::BinOp::*;
Ok(match bin_op {
Eq | Ne | Lt | Le | Gt | Ge => {
@ -154,7 +154,7 @@ impl<'mir, 'tcx: 'mir> interpret::Machine<'mir, 'tcx> for DummyMachine {
Ge => left >= right,
_ => bug!(),
};
(ImmTy::from_bool(res, *ecx.tcx), false)
ImmTy::from_bool(res, *ecx.tcx)
}
// Some more operations are possible with atomics.

View file

@ -589,7 +589,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
_bin_op: mir::BinOp,
_left: &ImmTy<'tcx>,
_right: &ImmTy<'tcx>,
) -> InterpResult<'tcx, (ImmTy<'tcx>, bool)> {
) -> InterpResult<'tcx, ImmTy<'tcx>> {
throw_unsup_format!("pointer arithmetic or comparison is not supported at compile-time");
}

View file

@ -1,5 +1,6 @@
use std::borrow::Cow;
use either::Either;
use rustc_errors::{
codes::*, Diag, DiagArgValue, DiagCtxt, DiagMessage, Diagnostic, EmissionGuarantee, Level,
};
@ -481,6 +482,8 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
DivisionOverflow => const_eval_division_overflow,
RemainderOverflow => const_eval_remainder_overflow,
PointerArithOverflow => const_eval_pointer_arithmetic_overflow,
ArithOverflow { .. } => const_eval_overflow_arith,
ShiftOverflow { .. } => const_eval_overflow_shift,
InvalidMeta(InvalidMetaKind::SliceTooBig) => const_eval_invalid_meta_slice,
InvalidMeta(InvalidMetaKind::TooBig) => const_eval_invalid_meta,
UnterminatedCString(_) => const_eval_unterminated_c_string,
@ -539,6 +542,19 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
| UninhabitedEnumVariantWritten(_)
| UninhabitedEnumVariantRead(_) => {}
ArithOverflow { intrinsic } => {
diag.arg("intrinsic", intrinsic);
}
ShiftOverflow { intrinsic, shift_amount } => {
diag.arg("intrinsic", intrinsic);
diag.arg(
"shift_amount",
match shift_amount {
Either::Left(v) => v.to_string(),
Either::Right(v) => v.to_string(),
},
);
}
BoundsCheckFailed { len, index } => {
diag.arg("len", len);
diag.arg("index", index);

View file

@ -172,7 +172,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
let variant_index_relative_val =
self.wrapping_binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
let variant_index_relative =
variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size);
// Check if this is in the range that indicates an actual discriminant.
@ -292,11 +292,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let variant_index_relative_val =
ImmTy::from_uint(variant_index_relative, tag_layout);
let tag = self
.wrapping_binary_op(
mir::BinOp::Add,
&variant_index_relative_val,
&niche_start_val,
)?
.binary_op(mir::BinOp::Add, &variant_index_relative_val, &niche_start_val)?
.to_scalar()
.assert_int();
Ok(Some((tag, tag_field)))

View file

@ -285,9 +285,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let (val, overflowed) = {
let a_offset = ImmTy::from_uint(a_offset, usize_layout);
let b_offset = ImmTy::from_uint(b_offset, usize_layout);
self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)?
self.binary_op(BinOp::SubWithOverflow, &a_offset, &b_offset)?
.to_scalar_pair()
};
if overflowed {
if overflowed.to_bool()? {
// a < b
if intrinsic_name == sym::ptr_offset_from_unsigned {
throw_ub_custom!(
@ -299,7 +300,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// The signed form of the intrinsic allows this. If we interpret the
// difference as isize, we'll get the proper signed difference. If that
// seems *positive*, they were more than isize::MAX apart.
let dist = val.to_scalar().to_target_isize(self)?;
let dist = val.to_target_isize(self)?;
if dist >= 0 {
throw_ub_custom!(
fluent::const_eval_offset_from_underflow,
@ -309,7 +310,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
dist
} else {
// b >= a
let dist = val.to_scalar().to_target_isize(self)?;
let dist = val.to_target_isize(self)?;
// If converting to isize produced a *negative* result, we had an overflow
// because they were more than isize::MAX apart.
if dist < 0 {
@ -515,9 +516,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Performs an exact division, resulting in undefined behavior where
// `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
// First, check x % y != 0 (or if that computation overflows).
let (res, overflow) = self.overflowing_binary_op(BinOp::Rem, a, b)?;
assert!(!overflow); // All overflow is UB, so this should never return on overflow.
if res.to_scalar().assert_bits(a.layout.size) != 0 {
let rem = self.binary_op(BinOp::Rem, a, b)?;
if rem.to_scalar().assert_bits(a.layout.size) != 0 {
throw_ub_custom!(
fluent::const_eval_exact_div_has_remainder,
a = format!("{a}"),
@ -525,7 +525,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
)
}
// `Rem` says this is all right, so we can let `Div` do its job.
self.binop_ignore_overflow(BinOp::Div, a, b, &dest.clone().into())
let res = self.binary_op(BinOp::Div, a, b)?;
self.write_immediate(*res, dest)
}
pub fn saturating_arith(
@ -538,8 +539,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert!(matches!(l.layout.ty.kind(), ty::Int(..) | ty::Uint(..)));
assert!(matches!(mir_op, BinOp::Add | BinOp::Sub));
let (val, overflowed) = self.overflowing_binary_op(mir_op, l, r)?;
Ok(if overflowed {
let (val, overflowed) =
self.binary_op(mir_op.wrapping_to_overflowing().unwrap(), l, r)?.to_scalar_pair();
Ok(if overflowed.to_bool()? {
let size = l.layout.size;
let num_bits = size.bits();
if l.layout.abi.is_signed() {
@ -570,7 +572,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
} else {
val.to_scalar()
val
})
}

View file

@ -252,7 +252,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
bin_op: mir::BinOp,
left: &ImmTy<'tcx, Self::Provenance>,
right: &ImmTy<'tcx, Self::Provenance>,
) -> InterpResult<'tcx, (ImmTy<'tcx, Self::Provenance>, bool)>;
) -> InterpResult<'tcx, ImmTy<'tcx, Self::Provenance>>;
/// Generate the NaN returned by a float operation, given the list of inputs.
/// (This is all inputs, not just NaN inputs!)

View file

@ -7,7 +7,7 @@ use either::{Either, Left, Right};
use rustc_hir::def::Namespace;
use rustc_middle::mir::interpret::ScalarSizeMismatch;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, LayoutOf, TyAndLayout};
use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter};
use rustc_middle::ty::{ConstInt, ScalarInt, Ty, TyCtxt};
use rustc_middle::{bug, span_bug};
@ -249,6 +249,15 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
Self::from_scalar(Scalar::from_i8(c as i8), layout)
}
pub fn from_pair(a: Self, b: Self, tcx: TyCtxt<'tcx>) -> Self {
let layout = tcx
.layout_of(
ty::ParamEnv::reveal_all().and(Ty::new_tup(tcx, &[a.layout.ty, b.layout.ty])),
)
.unwrap();
Self::from_scalar_pair(a.to_scalar(), b.to_scalar(), layout)
}
/// Return the immediate as a `ScalarInt`. Ensures that it has the size that the layout of the
/// immediate indicates.
#[inline]
@ -270,6 +279,17 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral())
}
#[inline]
#[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
pub fn to_pair(self, cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>)) -> (Self, Self) {
let layout = self.layout;
let (val0, val1) = self.to_scalar_pair();
(
ImmTy::from_scalar(val0, layout.field(cx, 0)),
ImmTy::from_scalar(val1, layout.field(cx, 1)),
)
}
/// Compute the "sub-immediate" that is located within the `base` at the given offset with the
/// given layout.
// Not called `offset` to avoid confusion with the trait method.

View file

@ -1,78 +1,22 @@
use either::Either;
use rustc_apfloat::{Float, FloatConvert};
use rustc_middle::mir;
use rustc_middle::mir::interpret::{InterpResult, Scalar};
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, FloatTy, ScalarInt, Ty};
use rustc_middle::ty::{self, FloatTy, ScalarInt};
use rustc_middle::{bug, span_bug};
use rustc_span::symbol::sym;
use rustc_target::abi::Abi;
use super::{err_ub, throw_ub, throw_ub_custom, ImmTy, Immediate, InterpCx, Machine, PlaceTy};
use crate::fluent_generated as fluent;
use super::{err_ub, throw_ub, ImmTy, InterpCx, Machine};
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Applies the binary operation `op` to the two operands and writes a tuple of the result
/// and a boolean signifying the potential overflow to the destination.
pub fn binop_with_overflow(
&mut self,
op: mir::BinOp,
left: &ImmTy<'tcx, M::Provenance>,
right: &ImmTy<'tcx, M::Provenance>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
let (val, overflowed) = self.overflowing_binary_op(op, left, right)?;
debug_assert_eq!(
Ty::new_tup(self.tcx.tcx, &[val.layout.ty, self.tcx.types.bool]),
dest.layout.ty,
"type mismatch for result of {op:?}",
);
// Write the result to `dest`.
if let Abi::ScalarPair(..) = dest.layout.abi {
// We can use the optimized path and avoid `place_field` (which might do
// `force_allocation`).
let pair = Immediate::ScalarPair(val.to_scalar(), Scalar::from_bool(overflowed));
self.write_immediate(pair, dest)?;
} else {
assert!(self.tcx.sess.opts.unstable_opts.randomize_layout);
// With randomized layout, `(int, bool)` might cease to be a `ScalarPair`, so we have to
// do a component-wise write here. This code path is slower than the above because
// `place_field` will have to `force_allocate` locals here.
let val_field = self.project_field(dest, 0)?;
self.write_scalar(val.to_scalar(), &val_field)?;
let overflowed_field = self.project_field(dest, 1)?;
self.write_scalar(Scalar::from_bool(overflowed), &overflowed_field)?;
}
Ok(())
}
/// Applies the binary operation `op` to the arguments and writes the result to the
/// destination.
pub fn binop_ignore_overflow(
&mut self,
op: mir::BinOp,
left: &ImmTy<'tcx, M::Provenance>,
right: &ImmTy<'tcx, M::Provenance>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
let val = self.wrapping_binary_op(op, left, right)?;
assert_eq!(val.layout.ty, dest.layout.ty, "type mismatch for result of {op:?}");
self.write_immediate(*val, dest)
}
}
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
fn three_way_compare<T: Ord>(&self, lhs: T, rhs: T) -> (ImmTy<'tcx, M::Provenance>, bool) {
fn three_way_compare<T: Ord>(&self, lhs: T, rhs: T) -> ImmTy<'tcx, M::Provenance> {
let res = Ord::cmp(&lhs, &rhs);
return (ImmTy::from_ordering(res, *self.tcx), false);
return ImmTy::from_ordering(res, *self.tcx);
}
fn binary_char_op(
&self,
bin_op: mir::BinOp,
l: char,
r: char,
) -> (ImmTy<'tcx, M::Provenance>, bool) {
fn binary_char_op(&self, bin_op: mir::BinOp, l: char, r: char) -> ImmTy<'tcx, M::Provenance> {
use rustc_middle::mir::BinOp::*;
if bin_op == Cmp {
@ -88,15 +32,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ge => l >= r,
_ => span_bug!(self.cur_span(), "Invalid operation on char: {:?}", bin_op),
};
(ImmTy::from_bool(res, *self.tcx), false)
ImmTy::from_bool(res, *self.tcx)
}
fn binary_bool_op(
&self,
bin_op: mir::BinOp,
l: bool,
r: bool,
) -> (ImmTy<'tcx, M::Provenance>, bool) {
fn binary_bool_op(&self, bin_op: mir::BinOp, l: bool, r: bool) -> ImmTy<'tcx, M::Provenance> {
use rustc_middle::mir::BinOp::*;
let res = match bin_op {
@ -111,7 +50,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
BitXor => l ^ r,
_ => span_bug!(self.cur_span(), "Invalid operation on bool: {:?}", bin_op),
};
(ImmTy::from_bool(res, *self.tcx), false)
ImmTy::from_bool(res, *self.tcx)
}
fn binary_float_op<F: Float + FloatConvert<F> + Into<Scalar<M::Provenance>>>(
@ -120,14 +59,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
layout: TyAndLayout<'tcx>,
l: F,
r: F,
) -> (ImmTy<'tcx, M::Provenance>, bool) {
) -> ImmTy<'tcx, M::Provenance> {
use rustc_middle::mir::BinOp::*;
// Performs appropriate non-deterministic adjustments of NaN results.
let adjust_nan =
|f: F| -> F { if f.is_nan() { M::generate_nan(self, &[l, r]) } else { f } };
let val = match bin_op {
match bin_op {
Eq => ImmTy::from_bool(l == r, *self.tcx),
Ne => ImmTy::from_bool(l != r, *self.tcx),
Lt => ImmTy::from_bool(l < r, *self.tcx),
@ -140,8 +79,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Div => ImmTy::from_scalar(adjust_nan((l / r).value).into(), layout),
Rem => ImmTy::from_scalar(adjust_nan((l % r).value).into(), layout),
_ => span_bug!(self.cur_span(), "invalid float op: `{:?}`", bin_op),
};
(val, false)
}
}
fn binary_int_op(
@ -149,7 +87,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
bin_op: mir::BinOp,
left: &ImmTy<'tcx, M::Provenance>,
right: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
use rustc_middle::mir::BinOp::*;
// This checks the size, so that we can just assert it below.
@ -169,25 +107,27 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ShrUnchecked => Some(sym::unchecked_shr),
_ => None,
};
let with_overflow = bin_op.is_overflowing();
// Shift ops can have an RHS with a different numeric type.
if matches!(bin_op, Shl | ShlUnchecked | Shr | ShrUnchecked) {
let size = left.layout.size.bits();
// The shift offset is implicitly masked to the type size. (This is the one MIR operator
// that does *not* directly map to a single LLVM operation.) Compute how much we
// actually shift and whether there was an overflow due to shifting too much.
// Compute the equivalent shift modulo `size` that is in the range `0..size`. (This is
// the one MIR operator that does *not* directly map to a single LLVM operation.)
let (shift_amount, overflow) = if right.layout.abi.is_signed() {
let shift_amount = r_signed();
let overflow = shift_amount < 0 || shift_amount >= i128::from(size);
// Deliberately wrapping `as` casts: shift_amount *can* be negative, but the result
// of the `as` will be equal modulo `size` (since it is a power of two).
let masked_amount = (shift_amount as u128) % u128::from(size);
assert_eq!(overflow, shift_amount != (masked_amount as i128));
assert_eq!(overflow, shift_amount != i128::try_from(masked_amount).unwrap());
(masked_amount, overflow)
} else {
let shift_amount = r_unsigned();
let overflow = shift_amount >= u128::from(size);
let masked_amount = shift_amount % u128::from(size);
(masked_amount, shift_amount != masked_amount)
assert_eq!(overflow, shift_amount != masked_amount);
(masked_amount, overflow)
};
let shift_amount = u32::try_from(shift_amount).unwrap(); // we masked so this will always fit
// Compute the shifted result.
@ -209,19 +149,18 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ScalarInt::truncate_from_uint(result, left.layout.size).0
};
if overflow && let Some(intrinsic_name) = throw_ub_on_overflow {
throw_ub_custom!(
fluent::const_eval_overflow_shift,
val = if right.layout.abi.is_signed() {
r_signed().to_string()
if overflow && let Some(intrinsic) = throw_ub_on_overflow {
throw_ub!(ShiftOverflow {
intrinsic,
shift_amount: if right.layout.abi.is_signed() {
Either::Right(r_signed())
} else {
r_unsigned().to_string()
},
name = intrinsic_name
);
Either::Left(r_unsigned())
}
});
}
return Ok((ImmTy::from_scalar_int(result, left.layout), overflow));
return Ok(ImmTy::from_scalar_int(result, left.layout));
}
// For the remaining ops, the types must be the same on both sides
@ -246,7 +185,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
_ => None,
};
if let Some(op) = op {
return Ok((ImmTy::from_bool(op(&l_signed(), &r_signed()), *self.tcx), false));
return Ok(ImmTy::from_bool(op(&l_signed(), &r_signed()), *self.tcx));
}
if bin_op == Cmp {
return Ok(self.three_way_compare(l_signed(), r_signed()));
@ -256,9 +195,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Rem if r.is_null() => throw_ub!(RemainderByZero),
Div => Some(i128::overflowing_div),
Rem => Some(i128::overflowing_rem),
Add | AddUnchecked => Some(i128::overflowing_add),
Sub | SubUnchecked => Some(i128::overflowing_sub),
Mul | MulUnchecked => Some(i128::overflowing_mul),
Add | AddUnchecked | AddWithOverflow => Some(i128::overflowing_add),
Sub | SubUnchecked | SubWithOverflow => Some(i128::overflowing_sub),
Mul | MulUnchecked | MulWithOverflow => Some(i128::overflowing_mul),
_ => None,
};
if let Some(op) = op {
@ -282,10 +221,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// If that truncation loses any information, we have an overflow.
let (result, lossy) = ScalarInt::truncate_from_int(result, left.layout.size);
let overflow = oflo || lossy;
if overflow && let Some(intrinsic_name) = throw_ub_on_overflow {
throw_ub_custom!(fluent::const_eval_overflow, name = intrinsic_name);
if overflow && let Some(intrinsic) = throw_ub_on_overflow {
throw_ub!(ArithOverflow { intrinsic });
}
return Ok((ImmTy::from_scalar_int(result, left.layout), overflow));
let res = ImmTy::from_scalar_int(result, left.layout);
return Ok(if with_overflow {
let overflow = ImmTy::from_bool(overflow, *self.tcx);
ImmTy::from_pair(res, overflow, *self.tcx)
} else {
res
});
}
}
// From here on it's okay to treat everything as unsigned.
@ -296,7 +241,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
return Ok(self.three_way_compare(l, r));
}
let val = match bin_op {
Ok(match bin_op {
Eq => ImmTy::from_bool(l == r, *self.tcx),
Ne => ImmTy::from_bool(l != r, *self.tcx),
@ -309,40 +254,42 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
BitAnd => ImmTy::from_uint(l & r, left.layout),
BitXor => ImmTy::from_uint(l ^ r, left.layout),
Add | AddUnchecked | Sub | SubUnchecked | Mul | MulUnchecked | Rem | Div => {
_ => {
assert!(!left.layout.abi.is_signed());
let op: fn(u128, u128) -> (u128, bool) = match bin_op {
Add | AddUnchecked => u128::overflowing_add,
Sub | SubUnchecked => u128::overflowing_sub,
Mul | MulUnchecked => u128::overflowing_mul,
Add | AddUnchecked | AddWithOverflow => u128::overflowing_add,
Sub | SubUnchecked | SubWithOverflow => u128::overflowing_sub,
Mul | MulUnchecked | MulWithOverflow => u128::overflowing_mul,
Div if r == 0 => throw_ub!(DivisionByZero),
Rem if r == 0 => throw_ub!(RemainderByZero),
Div => u128::overflowing_div,
Rem => u128::overflowing_rem,
_ => bug!(),
_ => span_bug!(
self.cur_span(),
"invalid binary op {:?}: {:?}, {:?} (both {})",
bin_op,
left,
right,
right.layout.ty,
),
};
let (result, oflo) = op(l, r);
// Truncate to target type.
// If that truncation loses any information, we have an overflow.
let (result, lossy) = ScalarInt::truncate_from_uint(result, left.layout.size);
let overflow = oflo || lossy;
if overflow && let Some(intrinsic_name) = throw_ub_on_overflow {
throw_ub_custom!(fluent::const_eval_overflow, name = intrinsic_name);
if overflow && let Some(intrinsic) = throw_ub_on_overflow {
throw_ub!(ArithOverflow { intrinsic });
}
let res = ImmTy::from_scalar_int(result, left.layout);
if with_overflow {
let overflow = ImmTy::from_bool(overflow, *self.tcx);
ImmTy::from_pair(res, overflow, *self.tcx)
} else {
res
}
return Ok((ImmTy::from_scalar_int(result, left.layout), overflow));
}
_ => span_bug!(
self.cur_span(),
"invalid binary op {:?}: {:?}, {:?} (both {})",
bin_op,
left,
right,
right.layout.ty,
),
};
Ok((val, false))
})
}
fn binary_ptr_op(
@ -350,7 +297,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
bin_op: mir::BinOp,
left: &ImmTy<'tcx, M::Provenance>,
right: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
use rustc_middle::mir::BinOp::*;
match bin_op {
@ -369,10 +316,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
offset_count.checked_mul(pointee_size).ok_or(err_ub!(PointerArithOverflow))?;
let offset_ptr = self.ptr_offset_inbounds(ptr, offset_bytes)?;
Ok((
ImmTy::from_scalar(Scalar::from_maybe_pointer(offset_ptr, self), left.layout),
false,
))
Ok(ImmTy::from_scalar(Scalar::from_maybe_pointer(offset_ptr, self), left.layout))
}
// Fall back to machine hook so Miri can support more pointer ops.
@ -381,12 +325,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
/// Returns the result of the specified operation, and whether it overflowed.
pub fn overflowing_binary_op(
pub fn binary_op(
&self,
bin_op: mir::BinOp,
left: &ImmTy<'tcx, M::Provenance>,
right: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
trace!(
"Running binary op {:?}: {:?} ({}), {:?} ({})",
bin_op,
@ -458,24 +402,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
#[inline]
pub fn wrapping_binary_op(
&self,
bin_op: mir::BinOp,
left: &ImmTy<'tcx, M::Provenance>,
right: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
let (val, _overflow) = self.overflowing_binary_op(bin_op, left, right)?;
Ok(val)
}
/// Returns the result of the specified operation, whether it overflowed, and
/// the result type.
pub fn overflowing_unary_op(
pub fn unary_op(
&self,
un_op: mir::UnOp,
val: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
use rustc_middle::mir::UnOp::*;
let layout = val.layout;
@ -489,7 +422,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Not => !val,
_ => span_bug!(self.cur_span(), "Invalid bool op {:?}", un_op),
};
Ok((ImmTy::from_bool(res, *self.tcx), false))
Ok(ImmTy::from_bool(res, *self.tcx))
}
ty::Float(fty) => {
// No NaN adjustment here, `-` is a bitwise operation!
@ -498,37 +431,25 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
(Neg, FloatTy::F64) => Scalar::from_f64(-val.to_f64()?),
_ => span_bug!(self.cur_span(), "Invalid float op {:?}", un_op),
};
Ok((ImmTy::from_scalar(res, layout), false))
Ok(ImmTy::from_scalar(res, layout))
}
_ => {
assert!(layout.ty.is_integral());
let val = val.to_bits(layout.size)?;
let (res, overflow) = match un_op {
Not => (self.truncate(!val, layout), false), // bitwise negation, then truncate
let res = match un_op {
Not => self.truncate(!val, layout), // bitwise negation, then truncate
Neg => {
// arithmetic negation
assert!(layout.abi.is_signed());
let val = self.sign_extend(val, layout) as i128;
let (res, overflow) = val.overflowing_neg();
let res = val.wrapping_neg();
let res = res as u128;
// Truncate to target type.
// If that truncation loses any information, we have an overflow.
let truncated = self.truncate(res, layout);
(truncated, overflow || self.sign_extend(truncated, layout) != res)
self.truncate(res, layout)
}
};
Ok((ImmTy::from_uint(res, layout), overflow))
Ok(ImmTy::from_uint(res, layout))
}
}
}
#[inline]
pub fn wrapping_unary_op(
&self,
un_op: mir::UnOp,
val: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
let (val, _overflow) = self.overflowing_unary_op(un_op, val)?;
Ok(val)
}
}

View file

@ -167,19 +167,17 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let left = self.read_immediate(&self.eval_operand(left, layout)?)?;
let layout = util::binop_right_homogeneous(bin_op).then_some(left.layout);
let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
if let Some(bin_op) = bin_op.overflowing_to_wrapping() {
self.binop_with_overflow(bin_op, &left, &right, &dest)?;
} else {
self.binop_ignore_overflow(bin_op, &left, &right, &dest)?;
}
let result = self.binary_op(bin_op, &left, &right)?;
assert_eq!(result.layout, dest.layout, "layout mismatch for result of {bin_op:?}");
self.write_immediate(*result, &dest)?;
}
UnaryOp(un_op, ref operand) => {
// The operand always has the same type as the result.
let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?;
let val = self.wrapping_unary_op(un_op, &val)?;
assert_eq!(val.layout, dest.layout, "layout mismatch for result of {un_op:?}");
self.write_immediate(*val, &dest)?;
let result = self.unary_op(un_op, &val)?;
assert_eq!(result.layout, dest.layout, "layout mismatch for result of {un_op:?}");
self.write_immediate(*result, &dest)?;
}
Aggregate(box ref kind, ref operands) => {

View file

@ -97,7 +97,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
for (const_int, target) in targets.iter() {
// Compare using MIR BinOp::Eq, to also support pointer values.
// (Avoiding `self.binary_op` as that does some redundant layout computation.)
let res = self.wrapping_binary_op(
let res = self.binary_op(
mir::BinOp::Eq,
&discr,
&ImmTy::from_uint(const_int, discr.layout),