interpret: simplify handling of shifts by no longer trying to handle signed and unsigned shift amounts in the same branch

This commit is contained in:
Ralf Jung 2023-11-12 12:16:41 +01:00
parent a04d56b36d
commit 31493c70fa
7 changed files with 86 additions and 58 deletions

View file

@ -322,8 +322,13 @@ pub fn cast_shift_expr_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
if lhs_sz < rhs_sz {
bx.trunc(rhs, lhs_llty)
} else if lhs_sz > rhs_sz {
// FIXME (#1877: If in the future shifting by negative
// values is no longer undefined then this is wrong.
// We zero-extend even if the RHS is signed. So e.g. `(x: i32) << -1i8` will zero-extend the
// RHS to `255i32`. But then we mask the shift amount to be within the size of the LHS
// anyway so the result is `31` as it should be. All the extra bits introduced by zext
// are masked off so their value does not matter.
// FIXME: if we ever support 512bit integers, this will be wrong! For such large integers,
// the extra bits introduced by zext are *not* all masked away any more.
assert!(lhs_sz <= 256);
bx.zext(rhs, lhs_llty)
} else {
rhs