1
Fork 0

interpret: simplify handling of shifts by no longer trying to handle signed and unsigned shift amounts in the same branch

This commit is contained in:
Ralf Jung 2023-11-12 12:16:41 +01:00
parent a04d56b36d
commit 31493c70fa
7 changed files with 86 additions and 58 deletions

View file

@ -157,41 +157,35 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Shift ops can have an RHS with a different numeric type.
if matches!(bin_op, Shl | ShlUnchecked | Shr | ShrUnchecked) {
let size = u128::from(left_layout.size.bits());
// Even if `r` is signed, we treat it as if it was unsigned (i.e., we use its
// zero-extended form). This matches the codegen backend:
// <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/base.rs#L315-L317>.
// The overflow check is also ignorant to the sign:
// <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/mir/rvalue.rs#L728>.
// This would behave rather strangely if we had integer types of size 256: a shift by
// -1i8 would actually shift by 255, but that would *not* be considered overflowing. A
// shift by -1i16 though would be considered overflowing. If we had integers of size
// 512, then a shift by -1i8 would even produce a different result than one by -1i16:
// the first shifts by 255, the latter by u16::MAX % 512 = 511. Lucky enough, our
// integers are maximally 128bits wide, so negative shifts *always* overflow and we have
// consistent results for the same value represented at different bit widths.
assert!(size <= 128);
let original_r = r;
let overflow = r >= size;
// The shift offset is implicitly masked to the type size, to make sure this operation
// is always defined. This is the one MIR operator that does *not* directly map to a
// single LLVM operation. See
// <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/common.rs#L131-L158>
// for the corresponding truncation in our codegen backends.
let r = r % size;
let r = u32::try_from(r).unwrap(); // we masked so this will always fit
let size = left_layout.size.bits();
// The shift offset is implicitly masked to the type size. (This is the one MIR operator
// that does *not* directly map to a single LLVM operation.) Compute how much we
// actually shift and whether there was an overflow due to shifting too much.
let (shift_amount, overflow) = if right_layout.abi.is_signed() {
let shift_amount = self.sign_extend(r, right_layout) as i128;
let overflow = shift_amount < 0 || shift_amount >= i128::from(size);
let masked_amount = (shift_amount as u128) % u128::from(size);
debug_assert_eq!(overflow, shift_amount != (masked_amount as i128));
(masked_amount, overflow)
} else {
let shift_amount = r;
let masked_amount = shift_amount % u128::from(size);
(masked_amount, shift_amount != masked_amount)
};
let shift_amount = u32::try_from(shift_amount).unwrap(); // we masked so this will always fit
// Compute the shifted result.
let result = if left_layout.abi.is_signed() {
let l = self.sign_extend(l, left_layout) as i128;
let result = match bin_op {
Shl | ShlUnchecked => l.checked_shl(r).unwrap(),
Shr | ShrUnchecked => l.checked_shr(r).unwrap(),
Shl | ShlUnchecked => l.checked_shl(shift_amount).unwrap(),
Shr | ShrUnchecked => l.checked_shr(shift_amount).unwrap(),
_ => bug!(),
};
result as u128
} else {
match bin_op {
Shl | ShlUnchecked => l.checked_shl(r).unwrap(),
Shr | ShrUnchecked => l.checked_shr(r).unwrap(),
Shl | ShlUnchecked => l.checked_shl(shift_amount).unwrap(),
Shr | ShrUnchecked => l.checked_shr(shift_amount).unwrap(),
_ => bug!(),
}
};
@ -200,7 +194,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if overflow && let Some(intrinsic_name) = throw_ub_on_overflow {
throw_ub_custom!(
fluent::const_eval_overflow_shift,
val = original_r,
val = if right_layout.abi.is_signed() {
(self.sign_extend(r, right_layout) as i128).to_string()
} else {
r.to_string()
},
name = intrinsic_name
);
}