1
Fork 0

Merge pull request #75 from oli-obk/master

implement more drop stuff
This commit is contained in:
Scott Olson 2016-11-06 01:29:14 -08:00 committed by GitHub
commit 33b1676bda
13 changed files with 396 additions and 42 deletions

View file

@ -285,11 +285,6 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
}
}
pub fn monomorphize_field_ty(&self, f: ty::FieldDef<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
let substituted = &f.ty(self.tcx, substs);
self.tcx.normalize_associated_type(&substituted)
}
pub fn monomorphize(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
let substituted = ty.subst(self.tcx, substs);
self.tcx.normalize_associated_type(&substituted)
@ -1120,13 +1115,17 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
}
}
fn value_to_primval(&mut self, value: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> {
/// ensures this Value is not a ByRef
fn follow_by_ref_value(&mut self, value: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
match value {
Value::ByRef(ptr) => match self.read_value(ptr, ty)? {
Value::ByRef(_) => bug!("read_value can't result in `ByRef`"),
Value::ByVal(primval) => Ok(primval),
Value::ByValPair(..) => bug!("value_to_primval can't work with fat pointers"),
},
Value::ByRef(ptr) => self.read_value(ptr, ty),
other => Ok(other),
}
}
fn value_to_primval(&mut self, value: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> {
match self.follow_by_ref_value(value, ty)? {
Value::ByRef(_) => bug!("follow_by_ref_value can't result in `ByRef`"),
Value::ByVal(primval) => {
let new_primval = self.transmute_primval(primval, ty)?;
@ -1511,8 +1510,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let iter = src_fields.zip(dst_fields).enumerate();
for (i, (src_f, dst_f)) in iter {
let src_fty = self.monomorphize_field_ty(src_f, substs_a);
let dst_fty = self.monomorphize_field_ty(dst_f, substs_b);
let src_fty = monomorphize_field_ty(self.tcx, src_f, substs_a);
let dst_fty = monomorphize_field_ty(self.tcx, dst_f, substs_b);
if self.type_size(dst_fty) == 0 {
continue;
}
@ -1729,3 +1728,9 @@ impl IntegerExt for layout::Integer {
}
}
}
pub fn monomorphize_field_ty<'a, 'tcx:'a >(tcx: TyCtxt<'a, 'tcx, 'tcx>, f: ty::FieldDef<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
let substituted = &f.ty(tcx, substs);
tcx.normalize_associated_type(&substituted)
}

View file

@ -18,6 +18,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
dest: Lvalue<'tcx>,
dest_ty: Ty<'tcx>,
dest_layout: &'tcx Layout,
target: mir::BasicBlock,
) -> EvalResult<'tcx, ()> {
let arg_vals: EvalResult<Vec<Value>> = args.iter()
.map(|arg| self.eval_operand(arg))
@ -123,10 +124,25 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
"drop_in_place" => {
let ty = substs.type_at(0);
let ptr = arg_vals[0].read_ptr(&self.memory)?;
let ptr_ty = self.tcx.mk_mut_ptr(ty);
let lvalue = match self.follow_by_ref_value(arg_vals[0], ptr_ty)? {
Value::ByRef(_) => bug!("follow_by_ref_value returned ByRef"),
Value::ByVal(ptr) => Lvalue::from_ptr(ptr.expect_ptr("drop_in_place first arg not a pointer")),
Value::ByValPair(ptr, extra) => Lvalue::Ptr {
ptr: ptr.expect_ptr("drop_in_place first arg not a pointer"),
extra: match extra.try_as_ptr() {
Some(vtable) => LvalueExtra::Vtable(vtable),
None => LvalueExtra::Length(extra.expect_uint("either pointer or not, but not neither")),
},
},
};
let mut drops = Vec::new();
self.drop(Lvalue::from_ptr(ptr), ty, &mut drops)?;
self.eval_drop_impls(drops)?;
self.drop(lvalue, ty, &mut drops)?;
// need to change the block before pushing the drop impl stack frames
// we could do this for all intrinsics before evaluating the intrinsics, but if
// the evaluation fails, we should not have moved forward
self.goto_block(target);
return self.eval_drop_impls(drops);
}
"fabsf32" => {
@ -330,6 +346,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
name => return Err(EvalError::Unimplemented(format!("unimplemented intrinsic: {}", name))),
}
self.goto_block(target);
// Since we pushed no stack frame, the main loop will act
// as if the call just completed and it's returning to the
// current frame.

View file

@ -3,7 +3,7 @@ use rustc::mir;
use rustc::traits::{self, Reveal};
use rustc::ty::fold::TypeFoldable;
use rustc::ty::layout::Layout;
use rustc::ty::subst::Substs;
use rustc::ty::subst::{Substs, Kind};
use rustc::ty::{self, Ty, TyCtxt, BareFnTy};
use std::rc::Rc;
use syntax::codemap::{DUMMY_SP, Span};
@ -12,7 +12,7 @@ use syntax::{ast, attr};
use error::{EvalError, EvalResult};
use memory::Pointer;
use primval::PrimVal;
use super::{EvalContext, Lvalue, IntegerExt, StackPopCleanup};
use super::{EvalContext, Lvalue, IntegerExt, StackPopCleanup, LvalueExtra, monomorphize_field_ty};
use super::value::Value;
mod intrinsics;
@ -146,9 +146,11 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
Ok(())
}
fn eval_drop_impls(&mut self, drops: Vec<(DefId, Pointer, &'tcx Substs<'tcx>)>) -> EvalResult<'tcx, ()> {
fn eval_drop_impls(&mut self, drops: Vec<(DefId, Value, &'tcx Substs<'tcx>)>) -> EvalResult<'tcx, ()> {
let span = self.frame().span;
for (drop_def_id, adt_ptr, substs) in drops {
// add them to the stack in reverse order, because the impl that needs to run the last
// is the one that needs to be at the bottom of the stack
for (drop_def_id, self_arg, substs) in drops.into_iter().rev() {
// FIXME: supply a real span
let mir = self.load_mir(drop_def_id)?;
trace!("substs for drop glue: {:?}", substs);
@ -165,7 +167,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
assert!(arg_locals.next().is_none(), "drop impl should have only one arg");
let dest = self.eval_lvalue(&mir::Lvalue::Local(first))?;
let ty = self.frame().mir.local_decls[first].ty;
self.write_value(Value::ByVal(PrimVal::from_ptr(adt_ptr)), dest, ty)?;
self.write_value(self_arg, dest, ty)?;
}
Ok(())
}
@ -185,8 +187,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let ty = fn_ty.sig.0.output;
let layout = self.type_layout(ty);
let (ret, target) = destination.unwrap();
self.call_intrinsic(def_id, substs, arg_operands, ret, ty, layout)?;
self.goto_block(target);
self.call_intrinsic(def_id, substs, arg_operands, ret, ty, layout, target)?;
Ok(())
}
@ -518,7 +519,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
&mut self,
lval: Lvalue<'tcx>,
ty: Ty<'tcx>,
drop: &mut Vec<(DefId, Pointer, &'tcx Substs<'tcx>)>,
drop: &mut Vec<(DefId, Value, &'tcx Substs<'tcx>)>,
) -> EvalResult<'tcx, ()> {
if !self.type_needs_drop(ty) {
debug!("no need to drop {:?}", ty);
@ -530,18 +531,53 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
// special case `Box` to deallocate the inner allocation
ty::TyBox(contents_ty) => {
let val = self.read_lvalue(lval)?;
let contents_ptr = val.read_ptr(&self.memory)?;
self.drop(Lvalue::from_ptr(contents_ptr), contents_ty, drop)?;
trace!("-deallocating box");
self.memory.deallocate(contents_ptr)?;
// we are going through the read_value path, because that already does all the
// checks for the trait object types. We'd only be repeating ourselves here.
let val = self.follow_by_ref_value(val, ty)?;
trace!("box dealloc on {:?}", val);
match val {
Value::ByRef(_) => bug!("follow_by_ref_value can't result in ByRef"),
Value::ByVal(ptr) => {
assert!(self.type_is_sized(contents_ty));
let contents_ptr = ptr.expect_ptr("value of Box type must be a pointer");
self.drop(Lvalue::from_ptr(contents_ptr), contents_ty, drop)?;
},
Value::ByValPair(prim_ptr, extra) => {
let ptr = prim_ptr.expect_ptr("value of Box type must be a pointer");
let extra = match extra.try_as_ptr() {
Some(vtable) => LvalueExtra::Vtable(vtable),
None => LvalueExtra::Length(extra.expect_uint("slice length")),
};
self.drop(
Lvalue::Ptr {
ptr: ptr,
extra: extra,
},
contents_ty,
drop,
)?;
},
}
let box_free_fn = self.tcx.lang_items.box_free_fn().expect("no box_free lang item");
let substs = self.tcx.intern_substs(&[Kind::from(contents_ty)]);
// this is somewhat hacky, but hey, there's no representation difference between
// pointers and references, so
// #[lang = "box_free"] unsafe fn box_free<T>(ptr: *mut T)
// is the same as
// fn drop(&mut self) if Self is Box<T>
drop.push((box_free_fn, val, substs));
},
ty::TyAdt(adt_def, substs) => {
// FIXME: some structs are represented as ByValPair
let adt_ptr = self.force_allocation(lval)?.to_ptr();
let lval = self.force_allocation(lval)?;
let adt_ptr = match lval {
Lvalue::Ptr { ptr, .. } => ptr,
_ => bug!("force allocation can only yield Lvalue::Ptr"),
};
// run drop impl before the fields' drop impls
if let Some(drop_def_id) = adt_def.destructor() {
drop.push((drop_def_id, adt_ptr, substs));
drop.push((drop_def_id, Value::ByVal(PrimVal::from_ptr(adt_ptr)), substs));
}
let layout = self.type_layout(ty);
let fields = match *layout {
@ -565,27 +601,115 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
return Ok(()); // nothing to do, this is zero sized (e.g. `None`)
}
},
Layout::RawNullablePointer { nndiscr, .. } => {
let discr = self.read_discriminant_value(adt_ptr, ty)?;
if discr == nndiscr {
assert_eq!(adt_def.variants[discr as usize].fields.len(), 1);
let field_ty = &adt_def.variants[discr as usize].fields[0];
let field_ty = monomorphize_field_ty(self.tcx, field_ty, substs);
// FIXME: once read_discriminant_value works with lvalue, don't force
// alloc in the RawNullablePointer case
self.drop(lval, field_ty, drop)?;
return Ok(());
} else {
// FIXME: the zst variant might contain zst types that impl Drop
return Ok(()); // nothing to do, this is zero sized (e.g. `None`)
}
},
_ => bug!("{:?} is not an adt layout", layout),
};
for (field_ty, offset) in fields {
let field_ty = self.monomorphize_field_ty(field_ty, substs);
self.drop(Lvalue::from_ptr(adt_ptr.offset(offset.bytes() as isize)), field_ty, drop)?;
}
let tcx = self.tcx;
self.drop_fields(
fields.map(|(ty, &offset)| (monomorphize_field_ty(tcx, ty, substs), offset)),
lval,
drop,
)?;
},
ty::TyTuple(fields) => {
// FIXME: some tuples are represented as ByValPair
let ptr = self.force_allocation(lval)?.to_ptr();
for (i, field_ty) in fields.iter().enumerate() {
let offset = self.get_field_offset(ty, i)?.bytes() as isize;
self.drop(Lvalue::from_ptr(ptr.offset(offset)), field_ty, drop)?;
let offsets = match *self.type_layout(ty) {
Layout::Univariant { ref variant, .. } => &variant.offsets,
_ => bug!("tuples must be univariant"),
};
self.drop_fields(fields.iter().cloned().zip(offsets.iter().cloned()), lval, drop)?;
},
ty::TyTrait(_) => {
let (ptr, vtable) = match lval {
Lvalue::Ptr { ptr, extra: LvalueExtra::Vtable(vtable) } => (ptr, vtable),
_ => bug!("expected an lvalue with a vtable"),
};
let drop_fn = self.memory.read_ptr(vtable)?;
// some values don't need to call a drop impl, so the value is null
if !drop_fn.points_to_zst() {
let (def_id, substs, ty) = self.memory.get_fn(drop_fn.alloc_id)?;
let fn_sig = self.tcx.erase_late_bound_regions_and_normalize(&ty.sig);
let real_ty = fn_sig.inputs[0];
self.drop(Lvalue::from_ptr(ptr), real_ty, drop)?;
drop.push((def_id, Value::ByVal(PrimVal::from_ptr(ptr)), substs));
} else {
// just a sanity check
assert_eq!(drop_fn.offset, 0);
}
},
ty::TySlice(elem_ty) => {
let (ptr, len) = match lval {
Lvalue::Ptr { ptr, extra: LvalueExtra::Length(len) } => (ptr, len as isize),
_ => bug!("expected an lvalue with a length"),
};
let size = self.type_size(elem_ty) as isize;
// FIXME: this creates a lot of stack frames if the element type has
// a drop impl
for i in 0..len {
self.drop(Lvalue::from_ptr(ptr.offset(i * size)), elem_ty, drop)?;
}
},
ty::TyArray(elem_ty, len) => {
let lval = self.force_allocation(lval)?;
let (ptr, extra) = match lval {
Lvalue::Ptr { ptr, extra } => (ptr, extra),
_ => bug!("expected an lvalue with optional extra data"),
};
let size = self.type_size(elem_ty) as isize;
// FIXME: this creates a lot of stack frames if the element type has
// a drop impl
for i in 0..len {
self.drop(Lvalue::Ptr { ptr: ptr.offset(i as isize * size), extra: extra }, elem_ty, drop)?;
}
},
// FIXME: what about TyClosure and TyAnon?
// other types do not need to process drop
_ => {},
}
Ok(())
}
fn drop_fields<
I: Iterator<Item=(Ty<'tcx>, ty::layout::Size)>,
>(
&mut self,
mut fields: I,
lval: Lvalue<'tcx>,
drop: &mut Vec<(DefId, Value, &'tcx Substs<'tcx>)>,
) -> EvalResult<'tcx, ()> {
// FIXME: some aggregates may be represented by Value::ByValPair
let (adt_ptr, extra) = self.force_allocation(lval)?.to_ptr_and_extra();
// manual iteration, because we need to be careful about the last field if it is unsized
while let Some((field_ty, offset)) = fields.next() {
let ptr = adt_ptr.offset(offset.bytes() as isize);
if self.type_is_sized(field_ty) {
self.drop(Lvalue::from_ptr(ptr), field_ty, drop)?;
} else {
let lvalue = Lvalue::Ptr {
ptr: ptr,
extra: extra,
};
self.drop(lvalue, field_ty, drop)?;
break; // if it is not sized, then this is the last field anyway
}
}
assert!(fields.next().is_none());
Ok(())
}
}
#[derive(Debug)]

View file

@ -84,8 +84,19 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
let ptr_size = self.memory.pointer_size();
let vtable = self.memory.allocate(ptr_size * (3 + methods.len()), ptr_size)?;
// FIXME: generate a destructor for the vtable.
// trans does this with glue::get_drop_glue(ccx, trait_ref.self_ty())
// in case there is no drop function to be called, this still needs to be initialized
self.memory.write_usize(vtable, 0)?;
if let ty::TyAdt(adt_def, substs) = trait_ref.self_ty().sty {
if let Some(drop_def_id) = adt_def.destructor() {
let ty_scheme = self.tcx.lookup_item_type(drop_def_id);
let fn_ty = match ty_scheme.ty.sty {
ty::TyFnDef(_, _, fn_ty) => fn_ty,
_ => bug!("drop method is not a TyFnDef"),
};
let fn_ptr = self.memory.create_fn_ptr(drop_def_id, substs, fn_ty);
self.memory.write_ptr(vtable, fn_ptr)?;
}
}
self.memory.write_usize(vtable.offset(ptr_size as isize), size as u64)?;
self.memory.write_usize(vtable.offset((ptr_size * 2) as isize), align as u64)?;

View file

@ -289,7 +289,7 @@ pub fn binary_op<'tcx>(
}
(None, None) => {}
_ => unimplemented!(),
_ => return Err(EvalError::ReadPointerAsBytes),
}
let (l, r) = (left.bits, right.bits);

View file

@ -0,0 +1,37 @@
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(dead_code)]
use std::mem;
enum Tag<A> {
Tag2(A)
}
struct Rec {
c8: u8,
t: Tag<u64>
}
fn mk_rec() -> Rec {
return Rec { c8:0, t:Tag::Tag2(0) };
}
fn is_u64_aligned(u: &Tag<u64>) -> bool {
let p: usize = unsafe { mem::transmute(u) };
let u64_align = std::mem::align_of::<u64>();
return (p & (u64_align - 1)) == 0; //~ ERROR a raw memory access tried to access part of a pointer value as raw bytes
}
pub fn main() {
let x = mk_rec();
assert!(is_u64_aligned(&x.t));
}

View file

@ -0,0 +1,16 @@
struct Bar;
static mut DROP_COUNT: usize = 0;
impl Drop for Bar {
fn drop(&mut self) {
unsafe { DROP_COUNT += 1; }
}
}
fn main() {
let b = [Bar, Bar, Bar, Bar];
assert_eq!(unsafe { DROP_COUNT }, 0);
drop(b);
assert_eq!(unsafe { DROP_COUNT }, 4);
}

View file

@ -0,0 +1,20 @@
trait Foo {}
struct Bar;
impl Foo for Bar {}
static mut DROP_COUNT: usize = 0;
impl Drop for Bar {
fn drop(&mut self) {
unsafe { DROP_COUNT += 1; }
}
}
fn main() {
let b: [Box<Foo>; 4] = [Box::new(Bar), Box::new(Bar), Box::new(Bar), Box::new(Bar)];
assert_eq!(unsafe { DROP_COUNT }, 0);
drop(b);
assert_eq!(unsafe { DROP_COUNT }, 4);
}

View file

@ -0,0 +1,16 @@
struct Bar;
static mut DROP_COUNT: usize = 0;
impl Drop for Bar {
fn drop(&mut self) {
unsafe { DROP_COUNT += 1; }
}
}
fn main() {
let b: Box<[Bar]> = vec![Bar, Bar, Bar, Bar].into_boxed_slice();
assert_eq!(unsafe { DROP_COUNT }, 0);
drop(b);
assert_eq!(unsafe { DROP_COUNT }, 4);
}

View file

@ -0,0 +1,20 @@
trait Foo {}
struct Bar;
static mut DROP_CALLED: bool = false;
impl Drop for Bar {
fn drop(&mut self) {
unsafe { DROP_CALLED = true; }
}
}
impl Foo for Bar {}
fn main() {
let b: Box<Foo> = Box::new(Bar);
assert!(unsafe { !DROP_CALLED });
drop(b);
assert!(unsafe { DROP_CALLED });
}

View file

@ -0,0 +1,22 @@
trait Foo {}
struct Bar;
static mut DROP_CALLED: bool = false;
impl Drop for Bar {
fn drop(&mut self) {
unsafe { DROP_CALLED = true; }
}
}
impl Foo for Bar {}
use std::rc::Rc;
fn main() {
let b: Rc<Foo> = Rc::new(Bar);
assert!(unsafe { !DROP_CALLED });
drop(b);
assert!(unsafe { DROP_CALLED });
}

View file

@ -0,0 +1,20 @@
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unused_features, unused_variables)]
#![feature(box_syntax)]
fn test(foo: Box<Vec<isize>> ) { assert_eq!((*foo)[0], 10); }
pub fn main() {
let x = box vec![10];
// Test forgetting a local by move-in
test(x);
}

View file

@ -0,0 +1,45 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This is a regression test for the ICE from issue #10846.
//
// The original issue causing the ICE: the LUB-computations during
// type inference were encountering late-bound lifetimes, and
// asserting that such lifetimes should have already been substituted
// with a concrete lifetime.
//
// However, those encounters were occurring within the lexical scope
// of the binding for the late-bound lifetime; that is, the late-bound
// lifetimes were perfectly valid. The core problem was that the type
// folding code was over-zealously passing back all lifetimes when
// doing region-folding, when really all clients of the region-folding
// case only want to see FREE lifetime variables, not bound ones.
// pretty-expanded FIXME #23616
#![allow(unused_features)]
#![feature(box_syntax)]
pub fn main() {
fn explicit() {
fn test<F>(_x: Option<Box<F>>) where F: FnMut(Box<for<'a> FnMut(&'a isize)>) {}
test(Some(box |_f: Box<for<'a> FnMut(&'a isize)>| {}));
}
// The code below is shorthand for the code above (and more likely
// to represent what one encounters in practice).
fn implicit() {
fn test<F>(_x: Option<Box<F>>) where F: FnMut(Box< FnMut(& isize)>) {}
test(Some(box |_f: Box< FnMut(& isize)>| {}));
}
explicit();
implicit();
}