1
Fork 0

Rollup merge of #61164 - RalfJung:scalar, r=oli-obk

rename Scalar::Bits to Scalar::Raw and bits field to data

Also use this opportunity to seal some abstraction leaks (other modules constructing `Scalar::Bits` directly instead of using a constructor).

r? @oli-obk
This commit is contained in:
Mazdak Farrokhzad 2019-05-28 18:15:39 +02:00 committed by GitHub
commit 16ef483b79
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
20 changed files with 184 additions and 198 deletions

View file

@ -2,7 +2,6 @@
use super::{ use super::{
Pointer, EvalResult, AllocId, ScalarMaybeUndef, write_target_uint, read_target_uint, Scalar, Pointer, EvalResult, AllocId, ScalarMaybeUndef, write_target_uint, read_target_uint, Scalar,
truncate,
}; };
use crate::ty::layout::{Size, Align}; use crate::ty::layout::{Size, Align};
@ -407,18 +406,9 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
ScalarMaybeUndef::Undef => return self.mark_definedness(ptr, type_size, false), ScalarMaybeUndef::Undef => return self.mark_definedness(ptr, type_size, false),
}; };
let bytes = match val { let bytes = match val.to_bits_or_ptr(type_size, cx) {
Scalar::Ptr(val) => { Err(val) => val.offset.bytes() as u128,
assert_eq!(type_size, cx.data_layout().pointer_size); Ok(data) => data,
val.offset.bytes() as u128
}
Scalar::Bits { bits, size } => {
assert_eq!(size as u64, type_size.bytes());
debug_assert_eq!(truncate(bits, Size::from_bytes(size.into())), bits,
"Unexpected value of size {} when writing to memory", size);
bits
},
}; };
let endian = cx.data_layout().endian; let endian = cx.data_layout().endian;

View file

@ -349,6 +349,7 @@ impl<'tcx> AllocMap<'tcx> {
/// illegal and will likely ICE. /// illegal and will likely ICE.
/// This function exists to allow const eval to detect the difference between evaluation- /// This function exists to allow const eval to detect the difference between evaluation-
/// local dangling pointers and allocations in constants/statics. /// local dangling pointers and allocations in constants/statics.
#[inline]
pub fn get(&self, id: AllocId) -> Option<AllocKind<'tcx>> { pub fn get(&self, id: AllocId) -> Option<AllocKind<'tcx>> {
self.id_to_kind.get(&id).cloned() self.id_to_kind.get(&id).cloned()
} }
@ -397,6 +398,7 @@ impl<'tcx> AllocMap<'tcx> {
// Methods to access integers in the target endianness // Methods to access integers in the target endianness
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
#[inline]
pub fn write_target_uint( pub fn write_target_uint(
endianness: layout::Endian, endianness: layout::Endian,
mut target: &mut [u8], mut target: &mut [u8],
@ -409,6 +411,7 @@ pub fn write_target_uint(
} }
} }
#[inline]
pub fn read_target_uint(endianness: layout::Endian, mut source: &[u8]) -> Result<u128, io::Error> { pub fn read_target_uint(endianness: layout::Endian, mut source: &[u8]) -> Result<u128, io::Error> {
match endianness { match endianness {
layout::Endian::Little => source.read_uint128::<LittleEndian>(source.len()), layout::Endian::Little => source.read_uint128::<LittleEndian>(source.len()),
@ -420,8 +423,15 @@ pub fn read_target_uint(endianness: layout::Endian, mut source: &[u8]) -> Result
// Methods to facilitate working with signed integers stored in a u128 // Methods to facilitate working with signed integers stored in a u128
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
/// Truncate `value` to `size` bits and then sign-extend it to 128 bits
/// (i.e., if it is negative, fill with 1's on the left).
#[inline]
pub fn sign_extend(value: u128, size: Size) -> u128 { pub fn sign_extend(value: u128, size: Size) -> u128 {
let size = size.bits(); let size = size.bits();
if size == 0 {
// Truncated until nothing is left.
return 0;
}
// sign extend // sign extend
let shift = 128 - size; let shift = 128 - size;
// shift the unsigned value to the left // shift the unsigned value to the left
@ -429,8 +439,14 @@ pub fn sign_extend(value: u128, size: Size) -> u128 {
(((value << shift) as i128) >> shift) as u128 (((value << shift) as i128) >> shift) as u128
} }
/// Truncate `value` to `size` bits.
#[inline]
pub fn truncate(value: u128, size: Size) -> u128 { pub fn truncate(value: u128, size: Size) -> u128 {
let size = size.bits(); let size = size.bits();
if size == 0 {
// Truncated until nothing is left.
return 0;
}
let shift = 128 - size; let shift = 128 - size;
// truncate (shift left to drop out leftover values, shift right to fill with zeroes) // truncate (shift left to drop out leftover values, shift right to fill with zeroes)
(value << shift) >> shift (value << shift) >> shift

View file

@ -20,11 +20,35 @@ pub trait PointerArithmetic: layout::HasDataLayout {
self.data_layout().pointer_size self.data_layout().pointer_size
} }
//// Trunace the given value to the pointer size; also return whether there was an overflow /// Helper function: truncate given value-"overflowed flag" pair to pointer size and
/// update "overflowed flag" if there was an overflow.
/// This should be called by all the other methods before returning!
#[inline] #[inline]
fn truncate_to_ptr(&self, val: u128) -> (u64, bool) { fn truncate_to_ptr(&self, (val, over): (u64, bool)) -> (u64, bool) {
let val = val as u128;
let max_ptr_plus_1 = 1u128 << self.pointer_size().bits(); let max_ptr_plus_1 = 1u128 << self.pointer_size().bits();
((val % max_ptr_plus_1) as u64, val >= max_ptr_plus_1) ((val % max_ptr_plus_1) as u64, over || val >= max_ptr_plus_1)
}
#[inline]
fn overflowing_offset(&self, val: u64, i: u64) -> (u64, bool) {
let res = val.overflowing_add(i);
self.truncate_to_ptr(res)
}
// Overflow checking only works properly on the range from -u64 to +u64.
#[inline]
fn overflowing_signed_offset(&self, val: u64, i: i128) -> (u64, bool) {
// FIXME: is it possible to over/underflow here?
if i < 0 {
// Trickery to ensure that i64::min_value() works fine: compute n = -i.
// This formula only works for true negative values, it overflows for zero!
let n = u64::max_value() - (i as u64) + 1;
let res = val.overflowing_sub(n);
self.truncate_to_ptr(res)
} else {
self.overflowing_offset(val, i as u64)
}
} }
#[inline] #[inline]
@ -33,32 +57,11 @@ pub trait PointerArithmetic: layout::HasDataLayout {
if over { err!(Overflow(mir::BinOp::Add)) } else { Ok(res) } if over { err!(Overflow(mir::BinOp::Add)) } else { Ok(res) }
} }
#[inline]
fn overflowing_offset(&self, val: u64, i: u64) -> (u64, bool) {
let (res, over1) = val.overflowing_add(i);
let (res, over2) = self.truncate_to_ptr(u128::from(res));
(res, over1 || over2)
}
#[inline] #[inline]
fn signed_offset<'tcx>(&self, val: u64, i: i64) -> EvalResult<'tcx, u64> { fn signed_offset<'tcx>(&self, val: u64, i: i64) -> EvalResult<'tcx, u64> {
let (res, over) = self.overflowing_signed_offset(val, i128::from(i)); let (res, over) = self.overflowing_signed_offset(val, i128::from(i));
if over { err!(Overflow(mir::BinOp::Add)) } else { Ok(res) } if over { err!(Overflow(mir::BinOp::Add)) } else { Ok(res) }
} }
// Overflow checking only works properly on the range from -u64 to +u64.
#[inline]
fn overflowing_signed_offset(&self, val: u64, i: i128) -> (u64, bool) {
// FIXME: is it possible to over/underflow here?
if i < 0 {
// trickery to ensure that i64::min_value() works fine
// this formula only works for true negative values, it panics for zero!
let n = u64::max_value() - (i as u64) + 1;
val.overflowing_sub(n)
} else {
self.overflowing_offset(val, i as u64)
}
}
} }
impl<T: layout::HasDataLayout> PointerArithmetic for T {} impl<T: layout::HasDataLayout> PointerArithmetic for T {}

View file

@ -87,11 +87,11 @@ impl<'tcx> ConstValue<'tcx> {
RustcEncodable, RustcDecodable, Hash, HashStable)] RustcEncodable, RustcDecodable, Hash, HashStable)]
pub enum Scalar<Tag=(), Id=AllocId> { pub enum Scalar<Tag=(), Id=AllocId> {
/// The raw bytes of a simple value. /// The raw bytes of a simple value.
Bits { Raw {
/// The first `size` bytes are the value. /// The first `size` bytes of `data` are the value.
/// Do not try to read less or more bytes than that. The remaining bytes must be 0. /// Do not try to read less or more bytes than that. The remaining bytes must be 0.
data: u128,
size: u8, size: u8,
bits: u128,
}, },
/// A pointer into an `Allocation`. An `Allocation` in the `memory` module has a list of /// A pointer into an `Allocation`. An `Allocation` in the `memory` module has a list of
@ -108,16 +108,14 @@ impl<Tag: fmt::Debug, Id: fmt::Debug> fmt::Debug for Scalar<Tag, Id> {
match self { match self {
Scalar::Ptr(ptr) => Scalar::Ptr(ptr) =>
write!(f, "{:?}", ptr), write!(f, "{:?}", ptr),
&Scalar::Bits { bits, size } => { &Scalar::Raw { data, size } => {
Scalar::check_data(data, size);
if size == 0 { if size == 0 {
assert_eq!(bits, 0, "ZST value must be 0");
write!(f, "<ZST>") write!(f, "<ZST>")
} else { } else {
assert_eq!(truncate(bits, Size::from_bytes(size as u64)), bits,
"Scalar value {:#x} exceeds size of {} bytes", bits, size);
// Format as hex number wide enough to fit any value of the given `size`. // Format as hex number wide enough to fit any value of the given `size`.
// So bits=20, size=1 will be "0x14", but with size=4 it'll be "0x00000014". // So data=20, size=1 will be "0x14", but with size=4 it'll be "0x00000014".
write!(f, "0x{:>0width$x}", bits, width=(size*2) as usize) write!(f, "0x{:>0width$x}", data, width=(size*2) as usize)
} }
} }
} }
@ -128,17 +126,23 @@ impl<Tag> fmt::Display for Scalar<Tag> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self { match self {
Scalar::Ptr(_) => write!(f, "a pointer"), Scalar::Ptr(_) => write!(f, "a pointer"),
Scalar::Bits { bits, .. } => write!(f, "{}", bits), Scalar::Raw { data, .. } => write!(f, "{}", data),
} }
} }
} }
impl<'tcx> Scalar<()> { impl<'tcx> Scalar<()> {
#[inline(always)]
fn check_data(data: u128, size: u8) {
debug_assert_eq!(truncate(data, Size::from_bytes(size as u64)), data,
"Scalar value {:#x} exceeds size of {} bytes", data, size);
}
#[inline] #[inline]
pub fn with_tag<Tag>(self, new_tag: Tag) -> Scalar<Tag> { pub fn with_tag<Tag>(self, new_tag: Tag) -> Scalar<Tag> {
match self { match self {
Scalar::Ptr(ptr) => Scalar::Ptr(ptr.with_tag(new_tag)), Scalar::Ptr(ptr) => Scalar::Ptr(ptr.with_tag(new_tag)),
Scalar::Bits { bits, size } => Scalar::Bits { bits, size }, Scalar::Raw { data, size } => Scalar::Raw { data, size },
} }
} }
@ -155,31 +159,31 @@ impl<'tcx, Tag> Scalar<Tag> {
pub fn erase_tag(self) -> Scalar { pub fn erase_tag(self) -> Scalar {
match self { match self {
Scalar::Ptr(ptr) => Scalar::Ptr(ptr.erase_tag()), Scalar::Ptr(ptr) => Scalar::Ptr(ptr.erase_tag()),
Scalar::Bits { bits, size } => Scalar::Bits { bits, size }, Scalar::Raw { data, size } => Scalar::Raw { data, size },
} }
} }
#[inline] #[inline]
pub fn ptr_null(cx: &impl HasDataLayout) -> Self { pub fn ptr_null(cx: &impl HasDataLayout) -> Self {
Scalar::Bits { Scalar::Raw {
bits: 0, data: 0,
size: cx.data_layout().pointer_size.bytes() as u8, size: cx.data_layout().pointer_size.bytes() as u8,
} }
} }
#[inline] #[inline]
pub fn zst() -> Self { pub fn zst() -> Self {
Scalar::Bits { bits: 0, size: 0 } Scalar::Raw { data: 0, size: 0 }
} }
#[inline] #[inline]
pub fn ptr_offset(self, i: Size, cx: &impl HasDataLayout) -> EvalResult<'tcx, Self> { pub fn ptr_offset(self, i: Size, cx: &impl HasDataLayout) -> EvalResult<'tcx, Self> {
let dl = cx.data_layout(); let dl = cx.data_layout();
match self { match self {
Scalar::Bits { bits, size } => { Scalar::Raw { data, size } => {
assert_eq!(size as u64, dl.pointer_size.bytes()); assert_eq!(size as u64, dl.pointer_size.bytes());
Ok(Scalar::Bits { Ok(Scalar::Raw {
bits: dl.offset(bits as u64, i.bytes())? as u128, data: dl.offset(data as u64, i.bytes())? as u128,
size, size,
}) })
} }
@ -191,10 +195,10 @@ impl<'tcx, Tag> Scalar<Tag> {
pub fn ptr_wrapping_offset(self, i: Size, cx: &impl HasDataLayout) -> Self { pub fn ptr_wrapping_offset(self, i: Size, cx: &impl HasDataLayout) -> Self {
let dl = cx.data_layout(); let dl = cx.data_layout();
match self { match self {
Scalar::Bits { bits, size } => { Scalar::Raw { data, size } => {
assert_eq!(size as u64, dl.pointer_size.bytes()); assert_eq!(size as u64, dl.pointer_size.bytes());
Scalar::Bits { Scalar::Raw {
bits: dl.overflowing_offset(bits as u64, i.bytes()).0 as u128, data: dl.overflowing_offset(data as u64, i.bytes()).0 as u128,
size, size,
} }
} }
@ -206,10 +210,10 @@ impl<'tcx, Tag> Scalar<Tag> {
pub fn ptr_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> EvalResult<'tcx, Self> { pub fn ptr_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> EvalResult<'tcx, Self> {
let dl = cx.data_layout(); let dl = cx.data_layout();
match self { match self {
Scalar::Bits { bits, size } => { Scalar::Raw { data, size } => {
assert_eq!(size as u64, dl.pointer_size().bytes()); assert_eq!(size as u64, dl.pointer_size().bytes());
Ok(Scalar::Bits { Ok(Scalar::Raw {
bits: dl.signed_offset(bits as u64, i)? as u128, data: dl.signed_offset(data as u64, i)? as u128,
size, size,
}) })
} }
@ -221,10 +225,10 @@ impl<'tcx, Tag> Scalar<Tag> {
pub fn ptr_wrapping_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> Self { pub fn ptr_wrapping_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> Self {
let dl = cx.data_layout(); let dl = cx.data_layout();
match self { match self {
Scalar::Bits { bits, size } => { Scalar::Raw { data, size } => {
assert_eq!(size as u64, dl.pointer_size.bytes()); assert_eq!(size as u64, dl.pointer_size.bytes());
Scalar::Bits { Scalar::Raw {
bits: dl.overflowing_signed_offset(bits as u64, i128::from(i)).0 as u128, data: dl.overflowing_signed_offset(data as u64, i128::from(i)).0 as u128,
size, size,
} }
} }
@ -232,14 +236,14 @@ impl<'tcx, Tag> Scalar<Tag> {
} }
} }
/// Returns this pointers offset from the allocation base, or from NULL (for /// Returns this pointer's offset from the allocation base, or from NULL (for
/// integer pointers). /// integer pointers).
#[inline] #[inline]
pub fn get_ptr_offset(self, cx: &impl HasDataLayout) -> Size { pub fn get_ptr_offset(self, cx: &impl HasDataLayout) -> Size {
match self { match self {
Scalar::Bits { bits, size } => { Scalar::Raw { data, size } => {
assert_eq!(size as u64, cx.pointer_size().bytes()); assert_eq!(size as u64, cx.pointer_size().bytes());
Size::from_bytes(bits as u64) Size::from_bytes(data as u64)
} }
Scalar::Ptr(ptr) => ptr.offset, Scalar::Ptr(ptr) => ptr.offset,
} }
@ -248,9 +252,9 @@ impl<'tcx, Tag> Scalar<Tag> {
#[inline] #[inline]
pub fn is_null_ptr(self, cx: &impl HasDataLayout) -> bool { pub fn is_null_ptr(self, cx: &impl HasDataLayout) -> bool {
match self { match self {
Scalar::Bits { bits, size } => { Scalar::Raw { data, size } => {
assert_eq!(size as u64, cx.data_layout().pointer_size.bytes()); assert_eq!(size as u64, cx.data_layout().pointer_size.bytes());
bits == 0 data == 0
}, },
Scalar::Ptr(_) => false, Scalar::Ptr(_) => false,
} }
@ -258,20 +262,22 @@ impl<'tcx, Tag> Scalar<Tag> {
#[inline] #[inline]
pub fn from_bool(b: bool) -> Self { pub fn from_bool(b: bool) -> Self {
Scalar::Bits { bits: b as u128, size: 1 } Scalar::Raw { data: b as u128, size: 1 }
} }
#[inline] #[inline]
pub fn from_char(c: char) -> Self { pub fn from_char(c: char) -> Self {
Scalar::Bits { bits: c as u128, size: 4 } Scalar::Raw { data: c as u128, size: 4 }
} }
#[inline] #[inline]
pub fn from_uint(i: impl Into<u128>, size: Size) -> Self { pub fn from_uint(i: impl Into<u128>, size: Size) -> Self {
let i = i.into(); let i = i.into();
debug_assert_eq!(truncate(i, size), i, assert_eq!(
"Unsigned value {} does not fit in {} bits", i, size.bits()); truncate(i, size), i,
Scalar::Bits { bits: i, size: size.bytes() as u8 } "Unsigned value {:#x} does not fit in {} bits", i, size.bits()
);
Scalar::Raw { data: i, size: size.bytes() as u8 }
} }
#[inline] #[inline]
@ -279,28 +285,51 @@ impl<'tcx, Tag> Scalar<Tag> {
let i = i.into(); let i = i.into();
// `into` performed sign extension, we have to truncate // `into` performed sign extension, we have to truncate
let truncated = truncate(i as u128, size); let truncated = truncate(i as u128, size);
debug_assert_eq!(sign_extend(truncated, size) as i128, i, assert_eq!(
"Signed value {} does not fit in {} bits", i, size.bits()); sign_extend(truncated, size) as i128, i,
Scalar::Bits { bits: truncated, size: size.bytes() as u8 } "Signed value {:#x} does not fit in {} bits", i, size.bits()
);
Scalar::Raw { data: truncated, size: size.bytes() as u8 }
} }
#[inline] #[inline]
pub fn from_f32(f: f32) -> Self { pub fn from_f32(f: f32) -> Self {
Scalar::Bits { bits: f.to_bits() as u128, size: 4 } Scalar::Raw { data: f.to_bits() as u128, size: 4 }
} }
#[inline] #[inline]
pub fn from_f64(f: f64) -> Self { pub fn from_f64(f: f64) -> Self {
Scalar::Bits { bits: f.to_bits() as u128, size: 8 } Scalar::Raw { data: f.to_bits() as u128, size: 8 }
}
#[inline]
pub fn to_bits_or_ptr(
self,
target_size: Size,
cx: &impl HasDataLayout,
) -> Result<u128, Pointer<Tag>> {
match self {
Scalar::Raw { data, size } => {
assert_eq!(target_size.bytes(), size as u64);
assert_ne!(size, 0, "you should never look at the bits of a ZST");
Scalar::check_data(data, size);
Ok(data)
}
Scalar::Ptr(ptr) => {
assert_eq!(target_size, cx.data_layout().pointer_size);
Err(ptr)
}
}
} }
#[inline] #[inline]
pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> { pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> {
match self { match self {
Scalar::Bits { bits, size } => { Scalar::Raw { data, size } => {
assert_eq!(target_size.bytes(), size as u64); assert_eq!(target_size.bytes(), size as u64);
assert_ne!(size, 0, "to_bits cannot be used with zsts"); assert_ne!(size, 0, "you should never look at the bits of a ZST");
Ok(bits) Scalar::check_data(data, size);
Ok(data)
} }
Scalar::Ptr(_) => err!(ReadPointerAsBytes), Scalar::Ptr(_) => err!(ReadPointerAsBytes),
} }
@ -309,8 +338,8 @@ impl<'tcx, Tag> Scalar<Tag> {
#[inline] #[inline]
pub fn to_ptr(self) -> EvalResult<'tcx, Pointer<Tag>> { pub fn to_ptr(self) -> EvalResult<'tcx, Pointer<Tag>> {
match self { match self {
Scalar::Bits { bits: 0, .. } => err!(InvalidNullPointerUsage), Scalar::Raw { data: 0, .. } => err!(InvalidNullPointerUsage),
Scalar::Bits { .. } => err!(ReadBytesAsPointer), Scalar::Raw { .. } => err!(ReadBytesAsPointer),
Scalar::Ptr(p) => Ok(p), Scalar::Ptr(p) => Ok(p),
} }
} }
@ -318,7 +347,7 @@ impl<'tcx, Tag> Scalar<Tag> {
#[inline] #[inline]
pub fn is_bits(self) -> bool { pub fn is_bits(self) -> bool {
match self { match self {
Scalar::Bits { .. } => true, Scalar::Raw { .. } => true,
_ => false, _ => false,
} }
} }
@ -333,8 +362,8 @@ impl<'tcx, Tag> Scalar<Tag> {
pub fn to_bool(self) -> EvalResult<'tcx, bool> { pub fn to_bool(self) -> EvalResult<'tcx, bool> {
match self { match self {
Scalar::Bits { bits: 0, size: 1 } => Ok(false), Scalar::Raw { data: 0, size: 1 } => Ok(false),
Scalar::Bits { bits: 1, size: 1 } => Ok(true), Scalar::Raw { data: 1, size: 1 } => Ok(true),
_ => err!(InvalidBool), _ => err!(InvalidBool),
} }
} }
@ -350,27 +379,23 @@ impl<'tcx, Tag> Scalar<Tag> {
pub fn to_u8(self) -> EvalResult<'static, u8> { pub fn to_u8(self) -> EvalResult<'static, u8> {
let sz = Size::from_bits(8); let sz = Size::from_bits(8);
let b = self.to_bits(sz)?; let b = self.to_bits(sz)?;
assert_eq!(b as u8 as u128, b);
Ok(b as u8) Ok(b as u8)
} }
pub fn to_u32(self) -> EvalResult<'static, u32> { pub fn to_u32(self) -> EvalResult<'static, u32> {
let sz = Size::from_bits(32); let sz = Size::from_bits(32);
let b = self.to_bits(sz)?; let b = self.to_bits(sz)?;
assert_eq!(b as u32 as u128, b);
Ok(b as u32) Ok(b as u32)
} }
pub fn to_u64(self) -> EvalResult<'static, u64> { pub fn to_u64(self) -> EvalResult<'static, u64> {
let sz = Size::from_bits(64); let sz = Size::from_bits(64);
let b = self.to_bits(sz)?; let b = self.to_bits(sz)?;
assert_eq!(b as u64 as u128, b);
Ok(b as u64) Ok(b as u64)
} }
pub fn to_usize(self, cx: &impl HasDataLayout) -> EvalResult<'static, u64> { pub fn to_usize(self, cx: &impl HasDataLayout) -> EvalResult<'static, u64> {
let b = self.to_bits(cx.data_layout().pointer_size)?; let b = self.to_bits(cx.data_layout().pointer_size)?;
assert_eq!(b as u64 as u128, b);
Ok(b as u64) Ok(b as u64)
} }
@ -378,7 +403,6 @@ impl<'tcx, Tag> Scalar<Tag> {
let sz = Size::from_bits(8); let sz = Size::from_bits(8);
let b = self.to_bits(sz)?; let b = self.to_bits(sz)?;
let b = sign_extend(b, sz) as i128; let b = sign_extend(b, sz) as i128;
assert_eq!(b as i8 as i128, b);
Ok(b as i8) Ok(b as i8)
} }
@ -386,7 +410,6 @@ impl<'tcx, Tag> Scalar<Tag> {
let sz = Size::from_bits(32); let sz = Size::from_bits(32);
let b = self.to_bits(sz)?; let b = self.to_bits(sz)?;
let b = sign_extend(b, sz) as i128; let b = sign_extend(b, sz) as i128;
assert_eq!(b as i32 as i128, b);
Ok(b as i32) Ok(b as i32)
} }
@ -394,14 +417,13 @@ impl<'tcx, Tag> Scalar<Tag> {
let sz = Size::from_bits(64); let sz = Size::from_bits(64);
let b = self.to_bits(sz)?; let b = self.to_bits(sz)?;
let b = sign_extend(b, sz) as i128; let b = sign_extend(b, sz) as i128;
assert_eq!(b as i64 as i128, b);
Ok(b as i64) Ok(b as i64)
} }
pub fn to_isize(self, cx: &impl HasDataLayout) -> EvalResult<'static, i64> { pub fn to_isize(self, cx: &impl HasDataLayout) -> EvalResult<'static, i64> {
let b = self.to_bits(cx.data_layout().pointer_size)?; let sz = cx.data_layout().pointer_size;
let b = sign_extend(b, cx.data_layout().pointer_size) as i128; let b = self.to_bits(sz)?;
assert_eq!(b as i64 as i128, b); let b = sign_extend(b, sz) as i128;
Ok(b as i64) Ok(b as i64)
} }

View file

@ -1669,10 +1669,7 @@ impl<'tcx> TerminatorKind<'tcx> {
.map(|&u| { .map(|&u| {
tcx.mk_const(ty::Const { tcx.mk_const(ty::Const {
val: ConstValue::Scalar( val: ConstValue::Scalar(
Scalar::Bits { Scalar::from_uint(u, size).into(),
bits: u,
size: size.bytes() as u8,
}.into(),
), ),
ty: switch_ty, ty: switch_ty,
}).to_string().into() }).to_string().into()

View file

@ -1001,7 +1001,7 @@ impl<'tcx> CommonConsts<'tcx> {
CommonConsts { CommonConsts {
err: mk_const(ty::Const { err: mk_const(ty::Const {
val: ConstValue::Scalar(Scalar::Bits { bits: 0, size: 0 }), val: ConstValue::Scalar(Scalar::zst()),
ty: types.err, ty: types.err,
}), }),
} }

View file

@ -845,22 +845,22 @@ pub trait PrettyPrinter<'gcx: 'tcx, 'tcx>:
p!(write("{}", name)); p!(write("{}", name));
return Ok(self); return Ok(self);
} }
if let ConstValue::Scalar(Scalar::Bits { bits, .. }) = ct.val { if let ConstValue::Scalar(Scalar::Raw { data, .. }) = ct.val {
match ct.ty.sty { match ct.ty.sty {
ty::Bool => { ty::Bool => {
p!(write("{}", if bits == 0 { "false" } else { "true" })); p!(write("{}", if data == 0 { "false" } else { "true" }));
return Ok(self); return Ok(self);
}, },
ty::Float(ast::FloatTy::F32) => { ty::Float(ast::FloatTy::F32) => {
p!(write("{}f32", Single::from_bits(bits))); p!(write("{}f32", Single::from_bits(data)));
return Ok(self); return Ok(self);
}, },
ty::Float(ast::FloatTy::F64) => { ty::Float(ast::FloatTy::F64) => {
p!(write("{}f64", Double::from_bits(bits))); p!(write("{}f64", Double::from_bits(data)));
return Ok(self); return Ok(self);
}, },
ty::Uint(ui) => { ty::Uint(ui) => {
p!(write("{}{}", bits, ui)); p!(write("{}{}", data, ui));
return Ok(self); return Ok(self);
}, },
ty::Int(i) =>{ ty::Int(i) =>{
@ -868,11 +868,11 @@ pub trait PrettyPrinter<'gcx: 'tcx, 'tcx>:
let size = self.tcx().layout_of(ty::ParamEnv::empty().and(ty)) let size = self.tcx().layout_of(ty::ParamEnv::empty().and(ty))
.unwrap() .unwrap()
.size; .size;
p!(write("{}{}", sign_extend(bits, size) as i128, i)); p!(write("{}{}", sign_extend(data, size) as i128, i));
return Ok(self); return Ok(self);
}, },
ty::Char => { ty::Char => {
p!(write("{:?}", ::std::char::from_u32(bits as u32).unwrap())); p!(write("{:?}", ::std::char::from_u32(data as u32).unwrap()));
return Ok(self); return Ok(self);
} }
_ => {}, _ => {},

View file

@ -613,7 +613,7 @@ where
(ConstValue::Placeholder(p1), ConstValue::Placeholder(p2)) if p1 == p2 => { (ConstValue::Placeholder(p1), ConstValue::Placeholder(p2)) if p1 == p2 => {
Ok(a) Ok(a)
} }
(ConstValue::Scalar(Scalar::Bits { .. }), _) if a == b => { (ConstValue::Scalar(Scalar::Raw { .. }), _) if a == b => {
Ok(a) Ok(a)
} }
(ConstValue::ByRef(..), _) => { (ConstValue::ByRef(..), _) => {

View file

@ -3,7 +3,7 @@
use crate::hir; use crate::hir;
use crate::hir::def_id::DefId; use crate::hir::def_id::DefId;
use crate::infer::canonical::Canonical; use crate::infer::canonical::Canonical;
use crate::mir::interpret::{ConstValue, truncate}; use crate::mir::interpret::ConstValue;
use crate::middle::region; use crate::middle::region;
use polonius_engine::Atom; use polonius_engine::Atom;
use rustc_data_structures::indexed_vec::Idx; use rustc_data_structures::indexed_vec::Idx;
@ -2232,14 +2232,12 @@ impl<'tcx> Const<'tcx> {
let size = tcx.layout_of(ty).unwrap_or_else(|e| { let size = tcx.layout_of(ty).unwrap_or_else(|e| {
panic!("could not compute layout for {:?}: {:?}", ty, e) panic!("could not compute layout for {:?}: {:?}", ty, e)
}).size; }).size;
let truncated = truncate(bits, size); Self::from_scalar(tcx, Scalar::from_uint(bits, size), ty.value)
assert_eq!(truncated, bits, "from_bits called with untruncated value");
Self::from_scalar(tcx, Scalar::Bits { bits, size: size.bytes() as u8 }, ty.value)
} }
#[inline] #[inline]
pub fn zero_sized(tcx: TyCtxt<'_, '_, 'tcx>, ty: Ty<'tcx>) -> &'tcx Self { pub fn zero_sized(tcx: TyCtxt<'_, '_, 'tcx>, ty: Ty<'tcx>) -> &'tcx Self {
Self::from_scalar(tcx, Scalar::Bits { bits: 0, size: 0 }, ty) Self::from_scalar(tcx, Scalar::zst(), ty)
} }
#[inline] #[inline]

View file

@ -294,13 +294,13 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
) -> &'ll Value { ) -> &'ll Value {
let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() }; let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() };
match cv { match cv {
Scalar::Bits { size: 0, .. } => { Scalar::Raw { size: 0, .. } => {
assert_eq!(0, layout.value.size(self).bytes()); assert_eq!(0, layout.value.size(self).bytes());
self.const_undef(self.type_ix(0)) self.const_undef(self.type_ix(0))
}, },
Scalar::Bits { bits, size } => { Scalar::Raw { data, size } => {
assert_eq!(size as u64, layout.value.size(self).bytes()); assert_eq!(size as u64, layout.value.size(self).bytes());
let llval = self.const_uint_big(self.type_ix(bitsize), bits); let llval = self.const_uint_big(self.type_ix(bitsize), data);
if layout.value == layout::Pointer { if layout.value == layout::Pointer {
unsafe { llvm::LLVMConstIntToPtr(llval, llty) } unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
} else { } else {

View file

@ -443,7 +443,7 @@ impl Printer<'tcx, 'tcx> for SymbolPrinter<'_, 'tcx> {
ct: &'tcx ty::Const<'tcx>, ct: &'tcx ty::Const<'tcx>,
) -> Result<Self::Const, Self::Error> { ) -> Result<Self::Const, Self::Error> {
// only print integers // only print integers
if let ConstValue::Scalar(Scalar::Bits { .. }) = ct.val { if let ConstValue::Scalar(Scalar::Raw { .. }) = ct.val {
if ct.ty.is_integral() { if ct.ty.is_integral() {
return self.pretty_print_const(ct); return self.pretty_print_const(ct);
} }

View file

@ -115,7 +115,7 @@ fn op_to_const<'tcx>(
ecx.tcx.alloc_map.lock().unwrap_memory(ptr.alloc_id), ecx.tcx.alloc_map.lock().unwrap_memory(ptr.alloc_id),
ptr.offset.bytes(), ptr.offset.bytes(),
), ),
Scalar::Bits { .. } => ( Scalar::Raw { .. } => (
ecx.tcx.intern_const_alloc(Allocation::from_byte_aligned_bytes(b"", ())), ecx.tcx.intern_const_alloc(Allocation::from_byte_aligned_bytes(b"", ())),
0, 0,
), ),

View file

@ -1,5 +1,5 @@
use syntax::ast; use syntax::ast;
use rustc::ty::{self, Ty, TyCtxt, ParamEnv}; use rustc::ty::{self, Ty, TyCtxt, ParamEnv, layout::Size};
use syntax_pos::symbol::Symbol; use syntax_pos::symbol::Symbol;
use rustc::mir::interpret::{ConstValue, Scalar}; use rustc::mir::interpret::{ConstValue, Scalar};
@ -23,10 +23,7 @@ crate fn lit_to_const<'a, 'gcx, 'tcx>(
trace!("trunc {} with size {} and shift {}", n, width.bits(), 128 - width.bits()); trace!("trunc {} with size {} and shift {}", n, width.bits(), 128 - width.bits());
let result = truncate(n, width); let result = truncate(n, width);
trace!("trunc result: {}", result); trace!("trunc result: {}", result);
Ok(ConstValue::Scalar(Scalar::Bits { Ok(ConstValue::Scalar(Scalar::from_uint(result, width)))
bits: result,
size: width.bytes() as u8,
}))
}; };
use rustc::mir::interpret::*; use rustc::mir::interpret::*;
@ -50,10 +47,7 @@ crate fn lit_to_const<'a, 'gcx, 'tcx>(
let id = tcx.allocate_bytes(data); let id = tcx.allocate_bytes(data);
ConstValue::Scalar(Scalar::Ptr(id.into())) ConstValue::Scalar(Scalar::Ptr(id.into()))
}, },
LitKind::Byte(n) => ConstValue::Scalar(Scalar::Bits { LitKind::Byte(n) => ConstValue::Scalar(Scalar::from_uint(n, Size::from_bytes(1))),
bits: n as u128,
size: 1,
}),
LitKind::Int(n, _) if neg => { LitKind::Int(n, _) if neg => {
let n = n as i128; let n = n as i128;
let n = n.overflowing_neg().0; let n = n.overflowing_neg().0;
@ -84,7 +78,7 @@ fn parse_float<'tcx>(
let num = num.as_str(); let num = num.as_str();
use rustc_apfloat::ieee::{Single, Double}; use rustc_apfloat::ieee::{Single, Double};
use rustc_apfloat::Float; use rustc_apfloat::Float;
let (bits, size) = match fty { let (data, size) = match fty {
ast::FloatTy::F32 => { ast::FloatTy::F32 => {
num.parse::<f32>().map_err(|_| ())?; num.parse::<f32>().map_err(|_| ())?;
let mut f = num.parse::<Single>().unwrap_or_else(|e| { let mut f = num.parse::<Single>().unwrap_or_else(|e| {
@ -107,5 +101,5 @@ fn parse_float<'tcx>(
} }
}; };
Ok(ConstValue::Scalar(Scalar::Bits { bits, size })) Ok(ConstValue::Scalar(Scalar::from_uint(data, Size::from_bytes(size))))
} }

View file

@ -6,7 +6,7 @@ use syntax::symbol::sym;
use rustc_apfloat::ieee::{Single, Double}; use rustc_apfloat::ieee::{Single, Double};
use rustc::mir::interpret::{ use rustc::mir::interpret::{
Scalar, EvalResult, Pointer, PointerArithmetic, InterpError, truncate Scalar, EvalResult, Pointer, PointerArithmetic, InterpError,
}; };
use rustc::mir::CastKind; use rustc::mir::CastKind;
use rustc_apfloat::Float; use rustc_apfloat::Float;
@ -135,29 +135,13 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M>
use rustc::ty::TyKind::*; use rustc::ty::TyKind::*;
trace!("Casting {:?}: {:?} to {:?}", val, src_layout.ty, dest_layout.ty); trace!("Casting {:?}: {:?} to {:?}", val, src_layout.ty, dest_layout.ty);
match val { match val.to_bits_or_ptr(src_layout.size, self) {
Scalar::Ptr(ptr) => self.cast_from_ptr(ptr, dest_layout.ty), Err(ptr) => self.cast_from_ptr(ptr, dest_layout.ty),
Scalar::Bits { bits, size } => { Ok(data) => {
debug_assert_eq!(size as u64, src_layout.size.bytes()); match src_layout.ty.sty {
debug_assert_eq!(truncate(bits, Size::from_bytes(size.into())), bits, Float(fty) => self.cast_from_float(data, fty, dest_layout.ty),
"Unexpected value of size {} before casting", size); _ => self.cast_from_int(data, src_layout, dest_layout),
let res = match src_layout.ty.sty {
Float(fty) => self.cast_from_float(bits, fty, dest_layout.ty)?,
_ => self.cast_from_int(bits, src_layout, dest_layout)?,
};
// Sanity check
match res {
Scalar::Ptr(_) => bug!("Fabricated a ptr value from an int...?"),
Scalar::Bits { bits, size } => {
debug_assert_eq!(size as u64, dest_layout.size.bytes());
debug_assert_eq!(truncate(bits, Size::from_bytes(size.into())), bits,
"Unexpected value of size {} after casting", size);
}
} }
// Done
Ok(res)
} }
} }
} }
@ -177,7 +161,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M>
trace!("cast_from_int: {}, {}, {}", v, src_layout.ty, dest_layout.ty); trace!("cast_from_int: {}, {}, {}", v, src_layout.ty, dest_layout.ty);
use rustc::ty::TyKind::*; use rustc::ty::TyKind::*;
match dest_layout.ty.sty { match dest_layout.ty.sty {
Int(_) | Uint(_) => { Int(_) | Uint(_) | RawPtr(_) => {
let v = self.truncate(v, dest_layout); let v = self.truncate(v, dest_layout);
Ok(Scalar::from_uint(v, dest_layout.size)) Ok(Scalar::from_uint(v, dest_layout.size))
} }
@ -205,15 +189,6 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M>
Ok(Scalar::from_uint(v, Size::from_bytes(4))) Ok(Scalar::from_uint(v, Size::from_bytes(4)))
}, },
// No alignment check needed for raw pointers.
// But we have to truncate to target ptr size.
RawPtr(_) => {
Ok(Scalar::from_uint(
self.truncate_to_ptr(v).0,
self.pointer_size(),
))
},
// Casts to bool are not permitted by rustc, no need to handle them here. // Casts to bool are not permitted by rustc, no need to handle them here.
_ => err!(Unimplemented(format!("int to {:?} cast", dest_layout.ty))), _ => err!(Unimplemented(format!("int to {:?} cast", dest_layout.ty))),
} }

View file

@ -12,7 +12,6 @@ use std::borrow::Cow;
use rustc::ty::{self, Instance, ParamEnv, query::TyCtxtAt}; use rustc::ty::{self, Instance, ParamEnv, query::TyCtxtAt};
use rustc::ty::layout::{Align, TargetDataLayout, Size, HasDataLayout}; use rustc::ty::layout::{Align, TargetDataLayout, Size, HasDataLayout};
pub use rustc::mir::interpret::{truncate, write_target_uint, read_target_uint};
use rustc_data_structures::fx::{FxHashSet, FxHashMap}; use rustc_data_structures::fx::{FxHashSet, FxHashMap};
use syntax::ast::Mutability; use syntax::ast::Mutability;
@ -248,23 +247,21 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
required_align: Align required_align: Align
) -> EvalResult<'tcx> { ) -> EvalResult<'tcx> {
// Check non-NULL/Undef, extract offset // Check non-NULL/Undef, extract offset
let (offset, alloc_align) = match ptr { let (offset, alloc_align) = match ptr.to_bits_or_ptr(self.pointer_size(), self) {
Scalar::Ptr(ptr) => { Err(ptr) => {
// check this is not NULL -- which we can ensure only if this is in-bounds // check this is not NULL -- which we can ensure only if this is in-bounds
// of some (potentially dead) allocation. // of some (potentially dead) allocation.
let align = self.check_bounds_ptr(ptr, InboundsCheck::MaybeDead, let align = self.check_bounds_ptr(ptr, InboundsCheck::MaybeDead,
CheckInAllocMsg::NullPointerTest)?; CheckInAllocMsg::NullPointerTest)?;
(ptr.offset.bytes(), align) (ptr.offset.bytes(), align)
} }
Scalar::Bits { bits, size } => { Ok(data) => {
assert_eq!(size as u64, self.pointer_size().bytes());
assert!(bits < (1u128 << self.pointer_size().bits()));
// check this is not NULL // check this is not NULL
if bits == 0 { if data == 0 {
return err!(InvalidNullPointerUsage); return err!(InvalidNullPointerUsage);
} }
// the "base address" is 0 and hence always aligned // the "base address" is 0 and hence always aligned
(bits as u64, required_align) (data as u64, required_align)
} }
}; };
// Check alignment // Check alignment

View file

@ -641,19 +641,20 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M>
} => { } => {
let variants_start = niche_variants.start().as_u32() as u128; let variants_start = niche_variants.start().as_u32() as u128;
let variants_end = niche_variants.end().as_u32() as u128; let variants_end = niche_variants.end().as_u32() as u128;
match raw_discr { let raw_discr = raw_discr.not_undef()
ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) => { .map_err(|_| InterpError::InvalidDiscriminant(ScalarMaybeUndef::Undef))?;
match raw_discr.to_bits_or_ptr(discr_val.layout.size, self) {
Err(ptr) => {
// The niche must be just 0 (which an inbounds pointer value never is) // The niche must be just 0 (which an inbounds pointer value never is)
let ptr_valid = niche_start == 0 && variants_start == variants_end && let ptr_valid = niche_start == 0 && variants_start == variants_end &&
self.memory.check_bounds_ptr(ptr, InboundsCheck::MaybeDead, self.memory.check_bounds_ptr(ptr, InboundsCheck::MaybeDead,
CheckInAllocMsg::NullPointerTest).is_ok(); CheckInAllocMsg::NullPointerTest).is_ok();
if !ptr_valid { if !ptr_valid {
return err!(InvalidDiscriminant(raw_discr.erase_tag())); return err!(InvalidDiscriminant(raw_discr.erase_tag().into()));
} }
(dataful_variant.as_u32() as u128, dataful_variant) (dataful_variant.as_u32() as u128, dataful_variant)
}, },
ScalarMaybeUndef::Scalar(Scalar::Bits { bits: raw_discr, size }) => { Ok(raw_discr) => {
assert_eq!(size as u64, discr_val.layout.size.bytes());
let adjusted_discr = raw_discr.wrapping_sub(niche_start) let adjusted_discr = raw_discr.wrapping_sub(niche_start)
.wrapping_add(variants_start); .wrapping_add(variants_start);
if variants_start <= adjusted_discr && adjusted_discr <= variants_end { if variants_start <= adjusted_discr && adjusted_discr <= variants_end {
@ -668,8 +669,6 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M>
(dataful_variant.as_u32() as u128, dataful_variant) (dataful_variant.as_u32() as u128, dataful_variant)
} }
}, },
ScalarMaybeUndef::Undef =>
return err!(InvalidDiscriminant(ScalarMaybeUndef::Undef)),
} }
} }
}) })

View file

@ -686,7 +686,7 @@ where
Immediate::Scalar(ScalarMaybeUndef::Scalar(Scalar::Ptr(_))) => Immediate::Scalar(ScalarMaybeUndef::Scalar(Scalar::Ptr(_))) =>
assert_eq!(self.pointer_size(), dest.layout.size, assert_eq!(self.pointer_size(), dest.layout.size,
"Size mismatch when writing pointer"), "Size mismatch when writing pointer"),
Immediate::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { size, .. })) => Immediate::Scalar(ScalarMaybeUndef::Scalar(Scalar::Raw { size, .. })) =>
assert_eq!(Size::from_bytes(size.into()), dest.layout.size, assert_eq!(Size::from_bytes(size.into()), dest.layout.size,
"Size mismatch when writing bits"), "Size mismatch when writing bits"),
Immediate::Scalar(ScalarMaybeUndef::Undef) => {}, // undef can have any size Immediate::Scalar(ScalarMaybeUndef::Undef) => {}, // undef can have any size

View file

@ -186,9 +186,9 @@ impl<'a, Ctx> Snapshot<'a, Ctx> for Scalar
fn snapshot(&self, ctx: &'a Ctx) -> Self::Item { fn snapshot(&self, ctx: &'a Ctx) -> Self::Item {
match self { match self {
Scalar::Ptr(p) => Scalar::Ptr(p.snapshot(ctx)), Scalar::Ptr(p) => Scalar::Ptr(p.snapshot(ctx)),
Scalar::Bits{ size, bits } => Scalar::Bits { Scalar::Raw{ size, data } => Scalar::Raw {
data: *data,
size: *size, size: *size,
bits: *bits,
}, },
} }
} }

View file

@ -480,8 +480,8 @@ impl<'rt, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>>
wrapping_range_format(&layout.valid_range, max_hi), wrapping_range_format(&layout.valid_range, max_hi),
) )
); );
let bits = match value { let bits = match value.to_bits_or_ptr(op.layout.size, self.ecx) {
Scalar::Ptr(ptr) => { Err(ptr) => {
if lo == 1 && hi == max_hi { if lo == 1 && hi == max_hi {
// only NULL is not allowed. // only NULL is not allowed.
// We can call `check_align` to check non-NULL-ness, but have to also look // We can call `check_align` to check non-NULL-ness, but have to also look
@ -509,10 +509,8 @@ impl<'rt, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>>
); );
} }
} }
Scalar::Bits { bits, size } => { Ok(data) =>
assert_eq!(size as u64, op.layout.size.bytes()); data
bits
}
}; };
// Now compare. This is slightly subtle because this is a special "wrap-around" range. // Now compare. This is slightly subtle because this is a special "wrap-around" range.
if wrapping_range_contains(&layout.valid_range, bits) { if wrapping_range_contains(&layout.valid_range, bits) {

View file

@ -382,10 +382,7 @@ impl<'a, 'mir, 'tcx> ConstPropagator<'a, 'mir, 'tcx> {
type_size_of(self.tcx, self.param_env, ty).and_then(|n| Some( type_size_of(self.tcx, self.param_env, ty).and_then(|n| Some(
ImmTy { ImmTy {
imm: Immediate::Scalar( imm: Immediate::Scalar(
Scalar::Bits { Scalar::from_uint(n, self.tcx.data_layout.pointer_size).into()
bits: n as u128,
size: self.tcx.data_layout.pointer_size.bytes() as u8,
}.into()
), ),
layout: self.tcx.layout_of(self.param_env.and(self.tcx.types.usize)).ok()?, layout: self.tcx.layout_of(self.param_env.and(self.tcx.types.usize)).ok()?,
}.into() }.into()
@ -713,18 +710,18 @@ impl<'b, 'a, 'tcx> MutVisitor<'tcx> for ConstPropagator<'b, 'a, 'tcx> {
.eval_operand(len, source_info) .eval_operand(len, source_info)
.expect("len must be const"); .expect("len must be const");
let len = match self.ecx.read_scalar(len) { let len = match self.ecx.read_scalar(len) {
Ok(ScalarMaybeUndef::Scalar(Scalar::Bits { Ok(ScalarMaybeUndef::Scalar(Scalar::Raw {
bits, .. data, ..
})) => bits, })) => data,
other => bug!("const len not primitive: {:?}", other), other => bug!("const len not primitive: {:?}", other),
}; };
let index = self let index = self
.eval_operand(index, source_info) .eval_operand(index, source_info)
.expect("index must be const"); .expect("index must be const");
let index = match self.ecx.read_scalar(index) { let index = match self.ecx.read_scalar(index) {
Ok(ScalarMaybeUndef::Scalar(Scalar::Bits { Ok(ScalarMaybeUndef::Scalar(Scalar::Raw {
bits, .. data, ..
})) => bits, })) => data,
other => bug!("const index not primitive: {:?}", other), other => bug!("const index not primitive: {:?}", other),
}; };
format!( format!(