1
Fork 0

miri: monomorphize types iff they came from MIR.

This commit is contained in:
Eduard-Mihai Burtescu 2017-12-06 13:50:31 +02:00
parent fe58398314
commit 42a534c20a
8 changed files with 134 additions and 242 deletions

View file

@ -1,4 +1,5 @@
use ty::{self, TyCtxt, Ty, Instance, layout};
use ty::{self, TyCtxt, Ty, Instance};
use ty::layout::{self, LayoutOf};
use mir;
use syntax::ast::Mutability;
@ -29,9 +30,12 @@ pub fn eval_body<'a, 'tcx>(
if ecx.tcx.has_attr(instance.def_id(), "linkage") {
return Err(ConstEvalError::NotConst("extern global".to_string()).into());
}
let mir = ecx.load_mir(instance.def)?;
// FIXME(eddyb) use `Instance::ty` when it becomes available.
let instance_ty =
ecx.monomorphize(instance.def.def_ty(tcx), instance.substs);
if tcx.interpret_interner.borrow().get_cached(cid).is_none() {
let layout = ecx.type_layout_with_substs(mir.return_ty(), instance.substs)?;
let mir = ecx.load_mir(instance.def)?;
let layout = ecx.layout_of(instance_ty)?;
assert!(!layout.is_unsized());
let ptr = ecx.memory.allocate(
layout.size.bytes(),
@ -68,8 +72,7 @@ pub fn eval_body<'a, 'tcx>(
)?;
}
let value = tcx.interpret_interner.borrow().get_cached(cid).expect("global not cached");
let ret_ty = ecx.monomorphize(mir.return_ty(), instance.substs);
Ok((value, ret_ty))
Ok((value, instance_ty))
})();
(try, ecx)
}
@ -226,16 +229,14 @@ impl<'tcx> super::Machine<'tcx> for CompileTimeFunctionEvaluator {
match intrinsic_name {
"min_align_of" => {
let elem_ty = substs.type_at(0);
let elem_align = ecx.type_align(elem_ty)?;
let elem_align = ecx.layout_of(elem_ty)?.align.abi();
let align_val = PrimVal::from_u128(elem_align as u128);
ecx.write_primval(dest, align_val, dest_layout.ty)?;
}
"size_of" => {
let ty = substs.type_at(0);
let size = ecx.type_size(ty)?.expect(
"size_of intrinsic called on unsized value",
) as u128;
let size = ecx.layout_of(ty)?.size.bytes() as u128;
ecx.write_primval(dest, PrimVal::from_u128(size), dest_layout.ty)?;
}

View file

@ -228,19 +228,11 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
}
pub fn alloc_ptr(&mut self, ty: Ty<'tcx>) -> EvalResult<'tcx, MemoryPointer> {
let substs = self.substs();
self.alloc_ptr_with_substs(ty, substs)
}
let layout = self.layout_of(ty)?;
assert!(!layout.is_unsized(), "cannot alloc memory for unsized type");
pub fn alloc_ptr_with_substs(
&mut self,
ty: Ty<'tcx>,
substs: &'tcx Substs<'tcx>,
) -> EvalResult<'tcx, MemoryPointer> {
let size = self.type_size_with_substs(ty, substs)?.expect(
"cannot alloc memory for unsized type",
);
let align = self.type_align_with_substs(ty, substs)?;
let size = layout.size.bytes();
let align = layout.align.abi();
self.memory.allocate(size, align, Some(MemoryKind::Stack))
}
@ -357,7 +349,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
ty: ty::Ty<'tcx>,
value: Value,
) -> EvalResult<'tcx, (Size, Align)> {
let layout = self.type_layout(ty)?;
let layout = self.layout_of(ty)?;
if !layout.is_unsized() {
Ok(layout.size_and_align())
} else {
@ -381,19 +373,9 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
// Recurse to get the size of the dynamically sized field (must be
// the last field).
let (unsized_size, unsized_align) = match ty.sty {
ty::TyAdt(def, substs) => {
let last_field = def.struct_variant().fields.last().unwrap();
let field_ty = self.field_ty(substs, last_field);
self.size_and_align_of_dst(field_ty, value)?
}
ty::TyTuple(ref types, _) => {
let field_ty = types.last().unwrap();
let field_ty = self.tcx.fully_normalize_monormophic_ty(field_ty);
self.size_and_align_of_dst(field_ty, value)?
}
_ => bug!("We already checked that we know this type"),
};
let field_ty = layout.field(&self, layout.fields.count() - 1)?.ty;
let (unsized_size, unsized_align) =
self.size_and_align_of_dst(field_ty, value)?;
// FIXME (#26403, #27023): We should be adding padding
// to `sized_size` (to accommodate the `unsized_align`
@ -439,59 +421,6 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
}
}
/// Returns the normalized type of a struct field
fn field_ty(&self, param_substs: &Substs<'tcx>, f: &ty::FieldDef) -> ty::Ty<'tcx> {
self.tcx.fully_normalize_monormophic_ty(
&f.ty(self.tcx, param_substs),
)
}
pub fn type_size(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, Option<u64>> {
self.type_size_with_substs(ty, self.substs())
}
pub fn type_align(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, u64> {
self.type_align_with_substs(ty, self.substs())
}
pub(super) fn type_size_with_substs(
&self,
ty: Ty<'tcx>,
substs: &'tcx Substs<'tcx>,
) -> EvalResult<'tcx, Option<u64>> {
let layout = self.type_layout_with_substs(ty, substs)?;
if layout.is_unsized() {
Ok(None)
} else {
Ok(Some(layout.size.bytes()))
}
}
pub(super) fn type_align_with_substs(
&self,
ty: Ty<'tcx>,
substs: &'tcx Substs<'tcx>,
) -> EvalResult<'tcx, u64> {
self.type_layout_with_substs(ty, substs).map(|layout| {
layout.align.abi()
})
}
pub fn type_layout(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, TyLayout<'tcx>> {
self.type_layout_with_substs(ty, self.substs())
}
pub(super) fn type_layout_with_substs(
&self,
ty: Ty<'tcx>,
substs: &'tcx Substs<'tcx>,
) -> EvalResult<'tcx, TyLayout<'tcx>> {
// TODO(solson): Is this inefficient? Needs investigation.
let ty = self.monomorphize(ty, substs);
self.layout_of(ty)
}
pub fn push_stack_frame(
&mut self,
instance: ty::Instance<'tcx>,
@ -680,11 +609,11 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
_ => (dest, None)
};
let layout = self.type_layout(dest_ty)?;
let layout = self.layout_of(dest_ty)?;
for (i, operand) in operands.iter().enumerate() {
let value = self.eval_operand(operand)?;
// Ignore zero-sized fields.
if !self.type_layout(value.ty)?.is_zst() {
if !self.layout_of(value.ty)?.is_zst() {
let field_index = active_field_index.unwrap_or(i);
let (field_dest, _) = self.place_field(dest, mir::Field::new(field_index), layout)?;
self.write_value(value, field_dest)?;
@ -702,9 +631,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
)
}
};
let elem_size = self.type_size(elem_ty)?.expect(
"repeat element type must be sized",
);
let elem_size = self.layout_of(elem_ty)?.size.bytes();
let value = self.eval_operand(operand)?.value;
let dest = Pointer::from(self.force_allocation(dest)?.to_ptr()?);
@ -750,16 +677,18 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
}
NullaryOp(mir::NullOp::Box, ty) => {
let ty = self.monomorphize(ty, self.substs());
M::box_alloc(self, ty, dest)?;
}
NullaryOp(mir::NullOp::SizeOf, ty) => {
let size = self.type_size(ty)?.expect(
"SizeOf nullary MIR operator called for unsized type",
);
let ty = self.monomorphize(ty, self.substs());
let layout = self.layout_of(ty)?;
assert!(!layout.is_unsized(),
"SizeOf nullary MIR operator called for unsized type");
self.write_primval(
dest,
PrimVal::from_u128(size as u128),
PrimVal::from_u128(layout.size.bytes() as u128),
dest_ty,
)?;
}
@ -806,7 +735,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
}
ReifyFnPointer => {
match self.operand_ty(operand).sty {
match self.eval_operand(operand)?.ty.sty {
ty::TyFnDef(def_id, substs) => {
let instance = self.resolve(def_id, substs)?;
let fn_ptr = self.memory.create_fn_alloc(instance);
@ -832,7 +761,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
}
ClosureFnPointer => {
match self.operand_ty(operand).sty {
match self.eval_operand(operand)?.ty.sty {
ty::TyClosure(def_id, substs) => {
let substs = self.tcx.trans_apply_param_substs(self.substs(), &substs);
let instance = ty::Instance::resolve_closure(
@ -889,27 +818,6 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
}
}
/// Returns the field type and whether the field is packed
pub fn get_field_ty(
&self,
ty: Ty<'tcx>,
field_index: usize,
) -> EvalResult<'tcx, TyAndPacked<'tcx>> {
let layout = self.type_layout(ty)?.field(self, field_index)?;
Ok(TyAndPacked {
ty: layout.ty,
packed: layout.is_packed()
})
}
pub fn get_field_offset(&self, ty: Ty<'tcx>, field_index: usize) -> EvalResult<'tcx, Size> {
Ok(self.type_layout(ty)?.fields.offset(field_index))
}
pub fn get_field_count(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, u64> {
Ok(self.type_layout(ty)?.fields.count() as u64)
}
pub(super) fn eval_operand_to_primval(
&mut self,
op: &mir::Operand<'tcx>,
@ -929,13 +837,14 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
pub fn eval_operand(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, ValTy<'tcx>> {
use mir::Operand::*;
let ty = self.monomorphize(op.ty(self.mir(), self.tcx), self.substs());
match *op {
// FIXME: do some more logic on `move` to invalidate the old location
Copy(ref place) |
Move(ref place) => {
Ok(ValTy {
value: self.eval_and_read_place(place)?,
ty: self.operand_ty(op),
ty
})
},
@ -956,7 +865,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
Ok(ValTy {
value,
ty: self.operand_ty(op),
ty,
})
}
}
@ -967,7 +876,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
place: Place,
ty: Ty<'tcx>,
) -> EvalResult<'tcx, u128> {
let layout = self.type_layout(ty)?;
let layout = self.layout_of(ty)?;
//trace!("read_discriminant_value {:#?}", layout);
match layout.variants {
@ -1024,7 +933,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
dest: Place,
variant_index: usize,
) -> EvalResult<'tcx> {
let layout = self.type_layout(dest_ty)?;
let layout = self.layout_of(dest_ty)?;
match layout.variants {
layout::Variants::Single { index } => {
@ -1066,15 +975,11 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
Value::ByRef(self.tcx.interpret_interner.borrow().get_cached(gid).expect("global not cached"))
}
pub fn operand_ty(&self, operand: &mir::Operand<'tcx>) -> Ty<'tcx> {
self.monomorphize(operand.ty(self.mir(), self.tcx), self.substs())
}
fn copy(&mut self, src: Pointer, dest: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx> {
let size = self.type_size(ty)?.expect(
"cannot copy from an unsized type",
);
let align = self.type_align(ty)?;
let layout = self.layout_of(ty)?;
assert!(!layout.is_unsized(), "cannot copy from an unsized type");
let size = layout.size.bytes();
let align = layout.align.abi();
self.memory.copy(src, dest, size, align, false)?;
Ok(())
}
@ -1094,8 +999,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
Some(val) => {
let ty = self.stack[frame].mir.local_decls[local].ty;
let ty = self.monomorphize(ty, self.stack[frame].instance.substs);
let substs = self.stack[frame].instance.substs;
let ptr = self.alloc_ptr_with_substs(ty, substs)?;
let ptr = self.alloc_ptr(ty)?;
self.stack[frame].locals[local.index() - 1] =
Some(Value::by_ref(ptr.into())); // it stays live
self.write_value_to_ptr(val, ptr.into(), ty)?;
@ -1265,7 +1169,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
self.read_maybe_aligned_mut(aligned, |ectx| ectx.copy(ptr, dest, dest_ty))
}
Value::ByVal(primval) => {
let layout = self.type_layout(dest_ty)?;
let layout = self.layout_of(dest_ty)?;
if layout.is_zst() {
assert!(primval.is_undef());
Ok(())
@ -1278,7 +1182,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
}
Value::ByValPair(a, b) => {
let ptr = dest.to_ptr()?;
let mut layout = self.type_layout(dest_ty)?;
let mut layout = self.layout_of(dest_ty)?;
trace!("write_value_to_ptr valpair: {:#?}", layout);
let mut packed = layout.is_packed();
'outer: loop {
@ -1360,7 +1264,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
ty::TyAdt(def, _) if def.is_box() => PrimValKind::Ptr,
ty::TyAdt(..) => {
match self.type_layout(ty)?.abi {
match self.layout_of(ty)?.abi {
layout::Abi::Scalar(ref scalar) => {
use ty::layout::Primitive::*;
match scalar.value {
@ -1487,7 +1391,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
return self.read_ptr(ptr, ty.boxed_ty()).map(Some);
}
if let layout::Abi::Scalar(ref scalar) = self.type_layout(ty)?.abi {
if let layout::Abi::Scalar(ref scalar) = self.layout_of(ty)?.abi {
let mut signed = false;
if let layout::Int(_, s) = scalar.value {
signed = s;
@ -1580,34 +1484,36 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
&mut self,
src: Value,
src_ty: Ty<'tcx>,
dest: Place,
dest_ty: Ty<'tcx>,
dst: Place,
dst_ty: Ty<'tcx>,
) -> EvalResult<'tcx> {
match (&src_ty.sty, &dest_ty.sty) {
let src_layout = self.layout_of(src_ty)?;
let dst_layout = self.layout_of(dst_ty)?;
match (&src_ty.sty, &dst_ty.sty) {
(&ty::TyRef(_, ref s), &ty::TyRef(_, ref d)) |
(&ty::TyRef(_, ref s), &ty::TyRawPtr(ref d)) |
(&ty::TyRawPtr(ref s), &ty::TyRawPtr(ref d)) => {
self.unsize_into_ptr(src, src_ty, dest, dest_ty, s.ty, d.ty)
self.unsize_into_ptr(src, src_ty, dst, dst_ty, s.ty, d.ty)
}
(&ty::TyAdt(def_a, substs_a), &ty::TyAdt(def_b, substs_b)) => {
(&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => {
if def_a.is_box() || def_b.is_box() {
if !def_a.is_box() || !def_b.is_box() {
panic!("invalid unsizing between {:?} -> {:?}", src_ty, dest_ty);
panic!("invalid unsizing between {:?} -> {:?}", src_ty, dst_ty);
}
return self.unsize_into_ptr(
src,
src_ty,
dest,
dest_ty,
dst,
dst_ty,
src_ty.boxed_ty(),
dest_ty.boxed_ty(),
dst_ty.boxed_ty(),
);
}
if self.ty_to_primval_kind(src_ty).is_ok() {
// TODO: We ignore the packed flag here
let sty = self.get_field_ty(src_ty, 0)?.ty;
let dty = self.get_field_ty(dest_ty, 0)?.ty;
return self.unsize_into(src, sty, dest, dty);
let sty = src_layout.field(&self, 0)?.ty;
let dty = dst_layout.field(&self, 0)?.ty;
return self.unsize_into(src, sty, dst, dty);
}
// unsizing of generic struct with pointer fields
// Example: `Arc<T>` -> `Arc<Trait>`
@ -1615,34 +1521,25 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
assert_eq!(def_a, def_b);
let src_fields = def_a.variants[0].fields.iter();
let dst_fields = def_b.variants[0].fields.iter();
let iter = src_fields.zip(dst_fields).enumerate();
//let src = adt::MaybeSizedValue::sized(src);
//let dst = adt::MaybeSizedValue::sized(dst);
let src_ptr = match src {
Value::ByRef(PtrAndAlign { ptr, aligned: true }) => ptr,
// the entire struct is just a pointer
Value::ByVal(_) => {
for (i, (src_f, dst_f)) in iter {
let src_fty = self.field_ty(substs_a, src_f);
let dst_fty = self.field_ty(substs_b, dst_f);
if self.type_size(dst_fty)? == Some(0) {
for i in 0..src_layout.fields.count() {
let src_field = src_layout.field(&self, i)?;
let dst_field = dst_layout.field(&self, i)?;
if dst_layout.is_zst() {
continue;
}
let src_field_offset = self.get_field_offset(src_ty, i)?.bytes();
let dst_field_offset = self.get_field_offset(dest_ty, i)?.bytes();
assert_eq!(src_field_offset, 0);
assert_eq!(dst_field_offset, 0);
assert_eq!(self.type_size(src_fty)?, self.type_size(src_ty)?);
assert_eq!(self.type_size(dst_fty)?, self.type_size(dest_ty)?);
assert_eq!(src_layout.fields.offset(i).bytes(), 0);
assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
assert_eq!(src_field.size, src_layout.size);
assert_eq!(dst_field.size, dst_layout.size);
return self.unsize_into(
src,
src_fty,
dest,
dst_fty,
src_field.ty,
dst,
dst_field.ty,
);
}
bug!("by val unsize into where the value doesn't cover the entire type")
@ -1652,25 +1549,25 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
};
// FIXME(solson)
let dest = self.force_allocation(dest)?.to_ptr()?;
for (i, (src_f, dst_f)) in iter {
let src_fty = self.field_ty(substs_a, src_f);
let dst_fty = self.field_ty(substs_b, dst_f);
if self.type_size(dst_fty)? == Some(0) {
let dst = self.force_allocation(dst)?.to_ptr()?;
for i in 0..src_layout.fields.count() {
let src_field = src_layout.field(&self, i)?;
let dst_field = dst_layout.field(&self, i)?;
if dst_field.is_zst() {
continue;
}
let src_field_offset = self.get_field_offset(src_ty, i)?.bytes();
let dst_field_offset = self.get_field_offset(dest_ty, i)?.bytes();
let src_field_offset = src_layout.fields.offset(i).bytes();
let dst_field_offset = dst_layout.fields.offset(i).bytes();
let src_f_ptr = src_ptr.offset(src_field_offset, &self)?;
let dst_f_ptr = dest.offset(dst_field_offset, &self)?;
if src_fty == dst_fty {
self.copy(src_f_ptr, dst_f_ptr.into(), src_fty)?;
let dst_f_ptr = dst.offset(dst_field_offset, &self)?;
if src_field.ty == dst_field.ty {
self.copy(src_f_ptr, dst_f_ptr.into(), src_field.ty)?;
} else {
self.unsize_into(
Value::by_ref(src_f_ptr),
src_fty,
src_field.ty,
Place::from_ptr(dst_f_ptr),
dst_fty,
dst_field.ty,
)?;
}
}
@ -1680,7 +1577,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
bug!(
"unsize_into: invalid conversion: {:?} -> {:?}",
src_ty,
dest_ty
dst_ty
)
}
}

View file

@ -1,6 +1,6 @@
use mir;
use ty::{self, Ty};
use ty::layout::TyLayout;
use ty::layout::{LayoutOf, TyLayout};
use rustc_data_structures::indexed_vec::Idx;
use super::{EvalResult, EvalContext, MemoryPointer, PrimVal, Value, Pointer, Machine, PtrAndAlign, ValTy};
@ -134,7 +134,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
let base_ty = self.place_ty(&proj.base);
match proj.elem {
Field(field, _) => {
let base_layout = self.type_layout(base_ty)?;
let base_layout = self.layout_of(base_ty)?;
let field_index = field.index();
let field = base_layout.field(&self, field_index)?;
let offset = base_layout.fields.offset(field_index);
@ -317,9 +317,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
let (base_ptr, _) = base.to_ptr_extra_aligned();
let (elem_ty, len) = base.elem_ty_and_len(outer_ty);
let elem_size = self.type_size(elem_ty)?.expect(
"slice element must be sized",
);
let elem_size = self.layout_of(elem_ty)?.size.bytes();
assert!(
n < len,
"Tried to access element {} of array/slice with length {}",
@ -354,7 +352,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
use mir::ProjectionElem::*;
let (ptr, extra) = match *proj_elem {
Field(field, _) => {
let layout = self.type_layout(base_ty)?;
let layout = self.layout_of(base_ty)?;
return Ok(self.place_field(base, field, layout)?.0);
}
@ -394,9 +392,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
let (base_ptr, _) = base.to_ptr_extra_aligned();
let (elem_ty, n) = base.elem_ty_and_len(base_ty);
let elem_size = self.type_size(elem_ty)?.expect(
"sequence element must be sized",
);
let elem_size = self.layout_of(elem_ty)?.size.bytes();
assert!(n >= min_length as u64);
let index = if from_end {
@ -415,9 +411,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
let (base_ptr, _) = base.to_ptr_extra_aligned();
let (elem_ty, n) = base.elem_ty_and_len(base_ty);
let elem_size = self.type_size(elem_ty)?.expect(
"slice element must be sized",
);
let elem_size = self.layout_of(elem_ty)?.size.bytes();
assert!(u64::from(from) <= n - u64::from(to));
let ptr = base_ptr.offset(u64::from(from) * elem_size, &self)?;
// sublicing arrays produces arrays

View file

@ -6,7 +6,7 @@ use hir;
use mir::visit::{Visitor, PlaceContext};
use mir;
use ty::{self, Instance};
use ty::subst::Substs;
use ty::layout::LayoutOf;
use middle::const_val::ConstVal;
use super::{EvalResult, EvalContext, StackPopCleanup, PtrAndAlign, GlobalId, Place,
@ -158,7 +158,6 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
instance: Instance<'tcx>,
span: Span,
mutability: Mutability,
orig_substs: &'tcx Substs<'tcx>,
) -> EvalResult<'tcx, bool> {
debug!("global_item: {:?}", instance);
let cid = GlobalId {
@ -172,8 +171,10 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
M::global_item_with_linkage(self, cid.instance, mutability)?;
return Ok(false);
}
let mir = self.load_mir(instance.def)?;
let layout = self.type_layout_with_substs(mir.return_ty(), orig_substs)?;
// FIXME(eddyb) use `Instance::ty` when it becomes available.
let instance_ty =
self.monomorphize(instance.def.def_ty(self.tcx), instance.substs);
let layout = self.layout_of(instance_ty)?;
assert!(!layout.is_unsized());
let ptr = self.memory.allocate(
layout.size.bytes(),
@ -200,6 +201,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
let cleanup = StackPopCleanup::MarkStatic(mutability);
let name = ty::tls::with(|tcx| tcx.item_path_str(instance.def_id()));
trace!("pushing stack frame for global: {}", name);
let mir = self.load_mir(instance.def)?;
self.push_stack_frame(
instance,
span,
@ -254,7 +256,6 @@ impl<'a, 'b, 'tcx, M: Machine<'tcx>> Visitor<'tcx> for ConstantExtractor<'a, 'b,
instance,
constant.span,
Mutability::Immutable,
this.instance.substs,
)
}
mir::Literal::Value { .. } => Ok(false),
@ -267,9 +268,8 @@ impl<'a, 'b, 'tcx, M: Machine<'tcx>> Visitor<'tcx> for ConstantExtractor<'a, 'b,
return Ok(false);
}
let mir = &this.mir.promoted[index];
let layout = this.ecx.type_layout_with_substs(
mir.return_ty(),
this.instance.substs)?;
let ty = this.ecx.monomorphize(mir.return_ty(), this.instance.substs);
let layout = this.ecx.layout_of(ty)?;
assert!(!layout.is_unsized());
let ptr = this.ecx.memory.allocate(
layout.size.bytes(),
@ -320,7 +320,6 @@ impl<'a, 'b, 'tcx, M: Machine<'tcx>> Visitor<'tcx> for ConstantExtractor<'a, 'b,
} else {
Mutability::Immutable
},
this.instance.substs,
)
} else {
bug!("static def id doesn't point to static");
@ -340,7 +339,6 @@ impl<'a, 'b, 'tcx, M: Machine<'tcx>> Visitor<'tcx> for ConstantExtractor<'a, 'b,
} else {
Mutability::Immutable
},
this.instance.substs,
)
} else {
bug!("static found but isn't a static: {:?}", def);

View file

@ -1,5 +1,6 @@
use mir;
use ty::{self, TypeVariants};
use ty::layout::LayoutOf;
use syntax::codemap::Span;
use syntax::abi::Abi;
@ -64,13 +65,14 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
None => None,
};
let func_ty = self.operand_ty(func);
let (fn_def, sig) = match func_ty.sty {
let func = self.eval_operand(func)?;
let (fn_def, sig) = match func.ty.sty {
ty::TyFnPtr(sig) => {
let fn_ptr = self.eval_operand_to_primval(func)?.to_ptr()?;
let fn_ptr = self.value_to_primval(func)?.to_ptr()?;
let instance = self.memory.get_fn(fn_ptr)?;
let instance_ty = instance.def.def_ty(self.tcx);
let instance_ty = self.monomorphize(instance_ty, instance.substs);
// FIXME(eddyb) use `Instance::ty` when it becomes available.
let instance_ty =
self.monomorphize(instance.def.def_ty(self.tcx), instance.substs);
match instance_ty.sty {
ty::TyFnDef(..) => {
let real_sig = instance_ty.fn_sig(self.tcx);
@ -86,10 +88,10 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
}
ty::TyFnDef(def_id, substs) => (
self.resolve(def_id, substs)?,
func_ty.fn_sig(self.tcx),
func.ty.fn_sig(self.tcx),
),
_ => {
let msg = format!("can't handle callee of type {:?}", func_ty);
let msg = format!("can't handle callee of type {:?}", func.ty);
return err!(Unimplemented(msg));
}
};
@ -214,7 +216,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
if check_ty_compat(sig.output(), real_sig.output()) && real_sig.inputs_and_output.len() == 3 => {
// First argument of real_sig must be a ZST
let fst_ty = real_sig.inputs_and_output[0];
if self.type_layout(fst_ty)?.is_zst() {
if self.layout_of(fst_ty)?.is_zst() {
// Second argument must be a tuple matching the argument list of sig
let snd_ty = real_sig.inputs_and_output[1];
match snd_ty.sty {
@ -249,7 +251,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
_ => return err!(Unreachable),
};
let ty = sig.output();
let layout = self.type_layout(ty)?;
let layout = self.layout_of(ty)?;
M::call_intrinsic(self, instance, args, ret, layout, target)?;
self.dump_local(ret);
Ok(())
@ -319,7 +321,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
}
// unpack and write all other args
let layout = self.type_layout(args[1].ty)?;
let layout = self.layout_of(args[1].ty)?;
if let ty::TyTuple(..) = args[1].ty.sty {
if self.frame().mir.args_iter().count() == layout.fields.count() + 1 {
match args[1].value {
@ -405,7 +407,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
)?.to_ptr()?;
let instance = self.memory.get_fn(fn_ptr)?;
let mut args = args.to_vec();
let ty = self.get_field_ty(args[0].ty, 0)?.ty; // TODO: packed flag is ignored
let ty = self.layout_of(args[0].ty)?.field(&self, 0)?.ty;
args[0].ty = ty;
args[0].value = ptr.to_value();
// recurse with concrete function

View file

@ -1,5 +1,5 @@
use ty::{self, Ty};
use ty::layout::{Size, Align};
use ty::layout::{Size, Align, LayoutOf};
use syntax::ast::Mutability;
use super::{EvalResult, EvalContext, eval_context, MemoryPointer, Value, PrimVal,
@ -19,10 +19,10 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
) -> EvalResult<'tcx, MemoryPointer> {
debug!("get_vtable(trait_ref={:?})", trait_ref);
let size = self.type_size(trait_ref.self_ty())?.expect(
"can't create a vtable for an unsized type",
);
let align = self.type_align(trait_ref.self_ty())?;
let layout = self.layout_of(trait_ref.self_ty())?;
assert!(!layout.is_unsized(), "can't create a vtable for an unsized type");
let size = layout.size.bytes();
let align = layout.align.abi();
let ptr_size = self.memory.pointer_size();
let methods = self.tcx.vtable_methods(trait_ref);

View file

@ -2,6 +2,7 @@ use hir::{self, Mutability};
use hir::Mutability::*;
use mir::{self, ValidationOp, ValidationOperand};
use ty::{self, Ty, TypeFoldable, TyCtxt};
use ty::layout::LayoutOf;
use ty::subst::{Substs, Subst};
use traits;
use infer::InferCtxt;
@ -433,7 +434,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
query: ValidationQuery<'tcx>,
mode: ValidationMode,
) -> EvalResult<'tcx> {
let mut layout = self.type_layout(query.ty)?;
let mut layout = self.layout_of(query.ty)?;
layout.ty = query.ty;
// TODO: Maybe take visibility/privacy into account.
@ -530,23 +531,21 @@ impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
let (ptr, extra) = self.force_allocation(query.place.1)?.to_ptr_extra_aligned();
// Determine the size
// FIXME: Can we reuse size_and_align_of_dst for Places?
let len = match self.type_size(query.ty)? {
Some(size) => {
assert_eq!(extra, PlaceExtra::None, "Got a fat ptr to a sized type");
size
}
None => {
// The only unsized typ we concider "owning" is TyStr.
assert_eq!(
query.ty.sty,
TyStr,
"Found a surprising unsized owning type"
);
// The extra must be the length, in bytes.
match extra {
PlaceExtra::Length(len) => len,
_ => bug!("TyStr must have a length as extra"),
}
let layout = self.layout_of(query.ty)?;
let len = if !layout.is_unsized() {
assert_eq!(extra, PlaceExtra::None, "Got a fat ptr to a sized type");
layout.size.bytes()
} else {
// The only unsized typ we concider "owning" is TyStr.
assert_eq!(
query.ty.sty,
TyStr,
"Found a surprising unsized owning type"
);
// The extra must be the length, in bytes.
match extra {
PlaceExtra::Length(len) => len,
_ => bug!("TyStr must have a length as extra"),
}
};
// Handle locking

View file

@ -844,7 +844,7 @@ fn check_ctfe_against_miri<'a, 'tcx>(
},
TyArray(elem_ty, n) => {
let n = n.val.to_const_int().unwrap().to_u64().unwrap();
let size = ecx.type_size(elem_ty).unwrap().unwrap();
let size = ecx.layout_of(elem_ty).unwrap().size.bytes();
let vec: Vec<(ConstVal, Ty<'tcx>)> = match ctfe {
ConstVal::ByteStr(arr) => arr.data.iter().map(|&b| {
(ConstVal::Integral(ConstInt::U8(b)), ecx.tcx.types.u8)
@ -868,8 +868,9 @@ fn check_ctfe_against_miri<'a, 'tcx>(
ConstVal::Aggregate(Tuple(v)) => v,
_ => bug!("miri produced {:?}, but ctfe yielded {:?}", miri_ty, ctfe),
};
let layout = ecx.layout_of(miri_ty).unwrap();
for (i, elem) in vec.into_iter().enumerate() {
let offset = ecx.get_field_offset(miri_ty, i).unwrap();
let offset = layout.fields.offset(i);
let ptr = miri_val.offset(offset.bytes(), &ecx).unwrap();
check_ctfe_against_miri(ecx, ptr, elem.ty, elem.val);
}
@ -895,7 +896,7 @@ fn check_ctfe_against_miri<'a, 'tcx>(
},
ctfe => bug!("miri produced {:?}, but ctfe yielded {:?}", miri_ty, ctfe),
};
let layout = ecx.type_layout(miri_ty).unwrap();
let layout = ecx.layout_of(miri_ty).unwrap();
for &(name, elem) in vec.into_iter() {
let field = struct_variant.fields.iter().position(|f| f.name == name).unwrap();
let (place, _) = ecx.place_field(