1
Fork 0

make boxes self-describing (fixes #1493)

This commit is contained in:
Niko Matsakis 2012-01-17 10:57:11 -08:00
parent 49cb3fc7df
commit c36207bfb8
26 changed files with 582 additions and 596 deletions

View file

@ -61,6 +61,7 @@ RUNTIME_CS_$(1) := \
rt/rust_cc.cpp \ rt/rust_cc.cpp \
rt/rust_debug.cpp \ rt/rust_debug.cpp \
rt/memory_region.cpp \ rt/memory_region.cpp \
rt/boxed_region.cpp \
rt/test/rust_test_harness.cpp \ rt/test/rust_test_harness.cpp \
rt/test/rust_test_runtime.cpp \ rt/test/rust_test_runtime.cpp \
rt/test/rust_test_util.cpp \ rt/test/rust_test_util.cpp \

View file

@ -7,7 +7,7 @@
# If you are making non-backwards compatible changes to the runtime, # If you are making non-backwards compatible changes to the runtime,
# set this flag to 1. It will cause stage1 to use the snapshot # set this flag to 1. It will cause stage1 to use the snapshot
# runtime rather than the runtime from the working directory. # runtime rather than the runtime from the working directory.
USE_SNAPSHOT_RUNTIME=0 USE_SNAPSHOT_RUNTIME=1
define TARGET_STAGE_N define TARGET_STAGE_N

View file

@ -26,9 +26,11 @@ const frame_glue_fns_field_drop: int = 1;
const frame_glue_fns_field_reloc: int = 2; const frame_glue_fns_field_reloc: int = 2;
// n.b. must be same as cbox_elt_refcnt const box_field_refcnt: int = 0;
const box_rc_field_refcnt: int = 0; const box_field_tydesc: int = 1;
const box_rc_field_body: int = 1; const box_field_prev: int = 2;
const box_field_next: int = 3;
const box_field_body: int = 4;
const general_code_alignment: int = 16; const general_code_alignment: int = 16;
@ -59,13 +61,9 @@ const cmp_glue_op_le: uint = 2u;
const fn_field_code: int = 0; const fn_field_code: int = 0;
const fn_field_box: int = 1; const fn_field_box: int = 1;
// closure_box, see trans_closure.rs // closures, see trans_closure.rs
// const closure_body_ty_params: int = 0;
// n.b. the refcnt must be compatible with a normal box const closure_body_bindings: int = 1;
const cbox_elt_refcnt: int = 0;
const cbox_elt_tydesc: int = 1;
const cbox_elt_ty_params: int = 2;
const cbox_elt_bindings: int = 3;
const vec_elt_fill: int = 0; const vec_elt_fill: int = 0;

View file

@ -11,6 +11,7 @@ type upcalls =
{_fail: ValueRef, {_fail: ValueRef,
malloc: ValueRef, malloc: ValueRef,
free: ValueRef, free: ValueRef,
validate_box: ValueRef,
shared_malloc: ValueRef, shared_malloc: ValueRef,
shared_free: ValueRef, shared_free: ValueRef,
mark: ValueRef, mark: ValueRef,
@ -52,10 +53,12 @@ fn declare_upcalls(targ_cfg: @session::config,
T_ptr(T_i8()), T_ptr(T_i8()),
size_t]), size_t]),
malloc: malloc:
d("malloc", [size_t, T_ptr(tydesc_type)], d("malloc", [T_ptr(tydesc_type)],
T_ptr(T_i8())), T_ptr(T_i8())),
free: free:
dv("free", [T_ptr(T_i8()), int_t]), dv("free", [T_ptr(T_i8()), int_t]),
validate_box:
dv("validate_box", [T_ptr(T_i8())]),
shared_malloc: shared_malloc:
d("shared_malloc", [size_t, T_ptr(tydesc_type)], d("shared_malloc", [size_t, T_ptr(tydesc_type)],
T_ptr(T_i8())), T_ptr(T_i8())),

View file

@ -14,6 +14,7 @@ import syntax::ast_util::{dummy_sp};
import syntax::ast::def_id; import syntax::ast::def_id;
import syntax::codemap::span; import syntax::codemap::span;
import syntax::print::pprust::pat_to_str; import syntax::print::pprust::pat_to_str;
import back::abi;
import common::*; import common::*;
@ -465,7 +466,7 @@ fn compile_submatch(bcx: @block_ctxt, m: match, vals: [ValueRef], f: mk_fail,
// Unbox in case of a box field // Unbox in case of a box field
if any_box_pat(m, col) { if any_box_pat(m, col) {
let box = Load(bcx, val); let box = Load(bcx, val);
let unboxed = GEPi(bcx, box, [0, back::abi::box_rc_field_body]); let unboxed = GEPi(bcx, box, [0, abi::box_field_body]);
compile_submatch(bcx, enter_box(m, col, val), [unboxed] + vals_left, compile_submatch(bcx, enter_box(m, col, val), [unboxed] + vals_left,
f, exits); f, exits);
ret; ret;
@ -776,7 +777,7 @@ fn bind_irrefutable_pat(bcx: @block_ctxt, pat: @ast::pat, val: ValueRef,
ast::pat_box(inner) { ast::pat_box(inner) {
let box = Load(bcx, val); let box = Load(bcx, val);
let unboxed = let unboxed =
GEPi(bcx, box, [0, back::abi::box_rc_field_body]); GEPi(bcx, box, [0, abi::box_field_body]);
bcx = bind_irrefutable_pat(bcx, inner, unboxed, true); bcx = bind_irrefutable_pat(bcx, inner, unboxed, true);
} }
ast::pat_uniq(inner) { ast::pat_uniq(inner) {

View file

@ -91,7 +91,7 @@ fn type_of_fn(cx: @crate_ctxt, inputs: [ty::arg],
atys += [out_ty]; atys += [out_ty];
// Arg 1: Environment // Arg 1: Environment
atys += [T_opaque_cbox_ptr(cx)]; atys += [T_opaque_box_ptr(cx)];
// Args >2: ty params, if not acquired via capture... // Args >2: ty params, if not acquired via capture...
for bounds in params { for bounds in params {
@ -193,7 +193,7 @@ fn type_of_inner(cx: @crate_ctxt, t: ty::t)
T_struct(tys) T_struct(tys)
} }
ty::ty_opaque_closure_ptr(_) { ty::ty_opaque_closure_ptr(_) {
T_opaque_cbox_ptr(cx) T_opaque_box_ptr(cx)
} }
ty::ty_constr(subt,_) { ty::ty_constr(subt,_) {
// FIXME: could be a constraint on ty_fn // FIXME: could be a constraint on ty_fn
@ -764,54 +764,54 @@ fn trans_shared_malloc(cx: @block_ctxt, llptr_ty: TypeRef, llsize: ValueRef)
ret rslt(cx, PointerCast(cx, rval, llptr_ty)); ret rslt(cx, PointerCast(cx, rval, llptr_ty));
} }
// Returns a pointer to the body for the box. The box may be an opaque
// box. The result will be casted to the type of body_t, if it is statically
// known.
//
// The runtime equivalent is box_body() in "rust_internal.h".
fn opaque_box_body(bcx: @block_ctxt,
body_t: ty::t,
boxptr: ValueRef) -> ValueRef {
let ccx = bcx_ccx(bcx);
let boxptr = PointerCast(bcx, boxptr, T_ptr(T_box_header(ccx)));
let bodyptr = GEPi(bcx, boxptr, [1]);
if check type_has_static_size(ccx, body_t) {
PointerCast(bcx, bodyptr, T_ptr(type_of(ccx, body_t)))
} else {
PointerCast(bcx, bodyptr, T_ptr(T_i8()))
}
}
// trans_malloc_boxed_raw: expects an unboxed type and returns a pointer to // trans_malloc_boxed_raw: expects an unboxed type and returns a pointer to
// enough space for something of that type, along with space for a reference // enough space for a box of that type. This includes a rust_opaque_box
// count; in other words, it allocates a box for something of that type. // header.
fn trans_malloc_boxed_raw(cx: @block_ctxt, t: ty::t) -> result { fn trans_malloc_boxed_raw(bcx: @block_ctxt, t: ty::t,
let bcx = cx; &static_ti: option<@tydesc_info>) -> result {
let bcx = bcx;
// Synthesize a fake box type structurally so we have something let ccx = bcx_ccx(bcx);
// to measure the size of.
// We synthesize two types here because we want both the type of the
// pointer and the pointee. boxed_body is the type that we measure the
// size of; box_ptr is the type that's converted to a TypeRef and used as
// the pointer cast target in trans_raw_malloc.
// The mk_int here is the space being
// reserved for the refcount.
let boxed_body = ty::mk_tup(bcx_tcx(bcx), [ty::mk_int(bcx_tcx(cx)), t]);
let box_ptr = ty::mk_imm_box(bcx_tcx(bcx), t);
let r = size_of(cx, boxed_body);
let llsz = r.val; bcx = r.bcx;
// Grab the TypeRef type of box_ptr, because that's what trans_raw_malloc // Grab the TypeRef type of box_ptr, because that's what trans_raw_malloc
// wants. // wants.
// FIXME: Could avoid this check with a postcondition on mk_imm_box? let box_ptr = ty::mk_imm_box(bcx_tcx(bcx), t);
// (requires Issue #586)
let ccx = bcx_ccx(bcx);
check (type_has_static_size(ccx, box_ptr)); check (type_has_static_size(ccx, box_ptr));
let llty = type_of(ccx, box_ptr); let llty = type_of(ccx, box_ptr);
let ti = none; // Get the tydesc for the body:
let tydesc_result = get_tydesc(bcx, t, true, ti); let {bcx, val: lltydesc} = get_tydesc(bcx, t, true, static_ti).result;
let lltydesc = tydesc_result.result.val; bcx = tydesc_result.result.bcx;
let rval = Call(cx, ccx.upcalls.malloc, // Allocate space:
[llsz, lltydesc]); let rval = Call(bcx, ccx.upcalls.malloc, [lltydesc]);
ret rslt(cx, PointerCast(cx, rval, llty)); ret rslt(bcx, PointerCast(bcx, rval, llty));
} }
// trans_malloc_boxed: usefully wraps trans_malloc_box_raw; allocates a box, // trans_malloc_boxed: usefully wraps trans_malloc_box_raw; allocates a box,
// initializes the reference count to 1, and pulls out the body and rc // initializes the reference count to 1, and pulls out the body and rc
fn trans_malloc_boxed(cx: @block_ctxt, t: ty::t) -> fn trans_malloc_boxed(bcx: @block_ctxt, t: ty::t) ->
{bcx: @block_ctxt, box: ValueRef, body: ValueRef} { {bcx: @block_ctxt, box: ValueRef, body: ValueRef} {
let res = trans_malloc_boxed_raw(cx, t); let ti = none;
let box = res.val; let {bcx, val:box} = trans_malloc_boxed_raw(bcx, t, ti);
let rc = GEPi(res.bcx, box, [0, abi::box_rc_field_refcnt]); let body = GEPi(bcx, box, [0, abi::box_field_body]);
Store(res.bcx, C_int(bcx_ccx(cx), 1), rc); ret {bcx: bcx, box: box, body: body};
let body = GEPi(res.bcx, box, [0, abi::box_rc_field_body]);
ret {bcx: res.bcx, box: res.val, body: body};
} }
// Type descriptor and type glue stuff // Type descriptor and type glue stuff
@ -1231,8 +1231,8 @@ fn make_take_glue(cx: @block_ctxt, v: ValueRef, t: ty::t) {
fn incr_refcnt_of_boxed(cx: @block_ctxt, box_ptr: ValueRef) -> @block_ctxt { fn incr_refcnt_of_boxed(cx: @block_ctxt, box_ptr: ValueRef) -> @block_ctxt {
let ccx = bcx_ccx(cx); let ccx = bcx_ccx(cx);
let rc_ptr = maybe_validate_box(cx, box_ptr);
GEPi(cx, box_ptr, [0, abi::box_rc_field_refcnt]); let rc_ptr = GEPi(cx, box_ptr, [0, abi::box_field_refcnt]);
let rc = Load(cx, rc_ptr); let rc = Load(cx, rc_ptr);
rc = Add(cx, rc, C_int(ccx, 1)); rc = Add(cx, rc, C_int(ccx, 1));
Store(cx, rc, rc_ptr); Store(cx, rc, rc_ptr);
@ -1243,7 +1243,7 @@ fn free_box(bcx: @block_ctxt, v: ValueRef, t: ty::t) -> @block_ctxt {
ret alt ty::struct(bcx_tcx(bcx), t) { ret alt ty::struct(bcx_tcx(bcx), t) {
ty::ty_box(body_mt) { ty::ty_box(body_mt) {
let v = PointerCast(bcx, v, type_of_1(bcx, t)); let v = PointerCast(bcx, v, type_of_1(bcx, t));
let body = GEPi(bcx, v, [0, abi::box_rc_field_body]); let body = GEPi(bcx, v, [0, abi::box_field_body]);
let bcx = drop_ty(bcx, body, body_mt.ty); let bcx = drop_ty(bcx, body, body_mt.ty);
trans_free_if_not_gc(bcx, v) trans_free_if_not_gc(bcx, v)
} }
@ -1274,7 +1274,7 @@ fn make_free_glue(bcx: @block_ctxt, v: ValueRef, t: ty::t) {
let ccx = bcx_ccx(bcx); let ccx = bcx_ccx(bcx);
let llbox_ty = T_opaque_iface_ptr(ccx); let llbox_ty = T_opaque_iface_ptr(ccx);
let b = PointerCast(bcx, v, llbox_ty); let b = PointerCast(bcx, v, llbox_ty);
let body = GEPi(bcx, b, [0, abi::box_rc_field_body]); let body = GEPi(bcx, b, [0, abi::box_field_body]);
let tydescptr = GEPi(bcx, body, [0, 0]); let tydescptr = GEPi(bcx, body, [0, 0]);
let tydesc = Load(bcx, tydescptr); let tydesc = Load(bcx, tydescptr);
let ti = none; let ti = none;
@ -1375,9 +1375,23 @@ fn trans_res_drop(cx: @block_ctxt, rs: ValueRef, did: ast::def_id,
ret next_cx; ret next_cx;
} }
fn maybe_validate_box(_cx: @block_ctxt, _box_ptr: ValueRef) {
// Uncomment this when debugging annoying use-after-free
// bugs. But do not commit with this uncommented! Big performance hit.
// let cx = _cx, box_ptr = _box_ptr;
// let ccx = bcx_ccx(cx);
// warn_not_to_commit(ccx, "validate_box() is uncommented");
// let raw_box_ptr = PointerCast(cx, box_ptr, T_ptr(T_i8()));
// Call(cx, ccx.upcalls.validate_box, [raw_box_ptr]);
}
fn decr_refcnt_maybe_free(cx: @block_ctxt, box_ptr: ValueRef, t: ty::t) fn decr_refcnt_maybe_free(cx: @block_ctxt, box_ptr: ValueRef, t: ty::t)
-> @block_ctxt { -> @block_ctxt {
let ccx = bcx_ccx(cx); let ccx = bcx_ccx(cx);
maybe_validate_box(cx, box_ptr);
let rc_adj_cx = new_sub_block_ctxt(cx, "rc--"); let rc_adj_cx = new_sub_block_ctxt(cx, "rc--");
let free_cx = new_sub_block_ctxt(cx, "free"); let free_cx = new_sub_block_ctxt(cx, "free");
let next_cx = new_sub_block_ctxt(cx, "next"); let next_cx = new_sub_block_ctxt(cx, "next");
@ -1385,8 +1399,7 @@ fn decr_refcnt_maybe_free(cx: @block_ctxt, box_ptr: ValueRef, t: ty::t)
let box_ptr = PointerCast(cx, box_ptr, llbox_ty); let box_ptr = PointerCast(cx, box_ptr, llbox_ty);
let null_test = IsNull(cx, box_ptr); let null_test = IsNull(cx, box_ptr);
CondBr(cx, null_test, next_cx.llbb, rc_adj_cx.llbb); CondBr(cx, null_test, next_cx.llbb, rc_adj_cx.llbb);
let rc_ptr = let rc_ptr = GEPi(rc_adj_cx, box_ptr, [0, abi::box_field_refcnt]);
GEPi(rc_adj_cx, box_ptr, [0, abi::box_rc_field_refcnt]);
let rc = Load(rc_adj_cx, rc_ptr); let rc = Load(rc_adj_cx, rc_ptr);
rc = Sub(rc_adj_cx, rc, C_int(ccx, 1)); rc = Sub(rc_adj_cx, rc, C_int(ccx, 1));
Store(rc_adj_cx, rc, rc_ptr); Store(rc_adj_cx, rc, rc_ptr);
@ -1397,7 +1410,6 @@ fn decr_refcnt_maybe_free(cx: @block_ctxt, box_ptr: ValueRef, t: ty::t)
ret next_cx; ret next_cx;
} }
// Structural comparison: a rather involved form of glue. // Structural comparison: a rather involved form of glue.
fn maybe_name_value(cx: @crate_ctxt, v: ValueRef, s: str) { fn maybe_name_value(cx: @crate_ctxt, v: ValueRef, s: str) {
if cx.sess.opts.save_temps { if cx.sess.opts.save_temps {
@ -2208,7 +2220,7 @@ fn autoderef(cx: @block_ctxt, v: ValueRef, t: ty::t) -> result_t {
while true { while true {
alt ty::struct(ccx.tcx, t1) { alt ty::struct(ccx.tcx, t1) {
ty::ty_box(mt) { ty::ty_box(mt) {
let body = GEPi(cx, v1, [0, abi::box_rc_field_body]); let body = GEPi(cx, v1, [0, abi::box_field_body]);
t1 = mt.ty; t1 = mt.ty;
// Since we're changing levels of box indirection, we may have // Since we're changing levels of box indirection, we may have
@ -2514,7 +2526,7 @@ type lval_maybe_callee = {bcx: @block_ctxt,
generic: option<generic_info>}; generic: option<generic_info>};
fn null_env_ptr(bcx: @block_ctxt) -> ValueRef { fn null_env_ptr(bcx: @block_ctxt) -> ValueRef {
C_null(T_opaque_cbox_ptr(bcx_ccx(bcx))) C_null(T_opaque_box_ptr(bcx_ccx(bcx)))
} }
fn lval_from_local_var(bcx: @block_ctxt, r: local_var_result) -> lval_result { fn lval_from_local_var(bcx: @block_ctxt, r: local_var_result) -> lval_result {
@ -2790,7 +2802,7 @@ fn trans_lval(cx: @block_ctxt, e: @ast::expr) -> lval_result {
let val = let val =
alt ty::struct(ccx.tcx, t) { alt ty::struct(ccx.tcx, t) {
ty::ty_box(_) { ty::ty_box(_) {
GEPi(sub.bcx, sub.val, [0, abi::box_rc_field_body]) GEPi(sub.bcx, sub.val, [0, abi::box_field_body])
} }
ty::ty_res(_, _, _) { ty::ty_res(_, _, _) {
GEPi(sub.bcx, sub.val, [0, 1]) GEPi(sub.bcx, sub.val, [0, 1])
@ -3160,7 +3172,7 @@ fn trans_call_inner(in_cx: @block_ctxt, fn_expr_ty: ty::t,
let llenv, dict_param = none; let llenv, dict_param = none;
alt f_res.env { alt f_res.env {
null_env { null_env {
llenv = llvm::LLVMGetUndef(T_opaque_cbox_ptr(bcx_ccx(cx))); llenv = llvm::LLVMGetUndef(T_opaque_box_ptr(bcx_ccx(cx)));
} }
self_env(e) { llenv = e; } self_env(e) { llenv = e; }
dict_env(dict, e) { llenv = e; dict_param = some(dict); } dict_env(dict, e) { llenv = e; dict_param = some(dict); }
@ -3465,6 +3477,8 @@ fn trans_expr(bcx: @block_ctxt, e: @ast::expr, dest: dest) -> @block_ctxt {
let tcx = bcx_tcx(bcx); let tcx = bcx_tcx(bcx);
debuginfo::update_source_pos(bcx, e.span); debuginfo::update_source_pos(bcx, e.span);
#debug["trans_expr(%s,%?)", expr_to_str(e), dest];
if expr_is_lval(bcx, e) { if expr_is_lval(bcx, e) {
ret lval_to_dps(bcx, e, dest); ret lval_to_dps(bcx, e, dest);
} }
@ -3998,6 +4012,8 @@ fn zero_alloca(cx: @block_ctxt, llptr: ValueRef, t: ty::t)
} }
fn trans_stmt(cx: @block_ctxt, s: ast::stmt) -> @block_ctxt { fn trans_stmt(cx: @block_ctxt, s: ast::stmt) -> @block_ctxt {
#debug["trans_expr(%s)", stmt_to_str(s)];
if (!bcx_ccx(cx).sess.opts.no_asm_comments) { if (!bcx_ccx(cx).sess.opts.no_asm_comments) {
add_span_comment(cx, s.span, stmt_to_str(s)); add_span_comment(cx, s.span, stmt_to_str(s));
} }
@ -5122,8 +5138,7 @@ fn fill_fn_pair(bcx: @block_ctxt, pair: ValueRef, llfn: ValueRef,
let code_cell = GEPi(bcx, pair, [0, abi::fn_field_code]); let code_cell = GEPi(bcx, pair, [0, abi::fn_field_code]);
Store(bcx, llfn, code_cell); Store(bcx, llfn, code_cell);
let env_cell = GEPi(bcx, pair, [0, abi::fn_field_box]); let env_cell = GEPi(bcx, pair, [0, abi::fn_field_box]);
let llenvblobptr = let llenvblobptr = PointerCast(bcx, llenvptr, T_opaque_box_ptr(ccx));
PointerCast(bcx, llenvptr, T_opaque_cbox_ptr(ccx));
Store(bcx, llenvblobptr, env_cell); Store(bcx, llenvblobptr, env_cell);
} }
@ -5591,7 +5606,8 @@ fn trans_crate(sess: session::session, crate: @ast::crate, tcx: ty::ctxt,
shape_cx: shape::mk_ctxt(llmod), shape_cx: shape::mk_ctxt(llmod),
gc_cx: gc::mk_ctxt(), gc_cx: gc::mk_ctxt(),
crate_map: crate_map, crate_map: crate_map,
dbg_cx: dbg_cx}; dbg_cx: dbg_cx,
mutable do_not_commit_warning_issued: false};
let cx = new_local_ctxt(ccx); let cx = new_local_ctxt(ccx);
collect_items(ccx, crate); collect_items(ccx, crate);
trans_constants(ccx, crate); trans_constants(ccx, crate);

View file

@ -8,7 +8,7 @@ import lib::llvm::{ValueRef, TypeRef, BasicBlockRef, BuilderRef, ModuleRef};
import lib::llvm::{Opcode, IntPredicate, RealPredicate, True, False, import lib::llvm::{Opcode, IntPredicate, RealPredicate, True, False,
CallConv}; CallConv};
import common::{block_ctxt, T_ptr, T_nil, T_i8, T_i1, T_void, import common::{block_ctxt, T_ptr, T_nil, T_i8, T_i1, T_void,
T_fn, val_ty, bcx_ccx, C_i32}; T_fn, val_ty, bcx_ccx, C_i32, val_str};
fn B(cx: @block_ctxt) -> BuilderRef { fn B(cx: @block_ctxt) -> BuilderRef {
let b = *cx.fcx.lcx.ccx.builder; let b = *cx.fcx.lcx.ccx.builder;
@ -95,6 +95,10 @@ fn Invoke(cx: @block_ctxt, Fn: ValueRef, Args: [ValueRef],
if cx.unreachable { ret; } if cx.unreachable { ret; }
assert (!cx.terminated); assert (!cx.terminated);
cx.terminated = true; cx.terminated = true;
#debug["Invoke(%s with arguments (%s))",
val_str(bcx_ccx(cx).tn, Fn),
str::connect(vec::map(Args, {|a|val_str(bcx_ccx(cx).tn, a)}),
", ")];
unsafe { unsafe {
llvm::LLVMBuildInvoke(B(cx), Fn, vec::to_ptr(Args), llvm::LLVMBuildInvoke(B(cx), Fn, vec::to_ptr(Args),
vec::len(Args) as c_uint, Then, Catch, vec::len(Args) as c_uint, Then, Catch,

View file

@ -15,17 +15,6 @@ import back::link::{
mangle_internal_name_by_path, mangle_internal_name_by_path,
mangle_internal_name_by_path_and_seq}; mangle_internal_name_by_path_and_seq};
import util::ppaux::ty_to_str; import util::ppaux::ty_to_str;
import base::{
trans_shared_malloc,
type_of_inner,
node_id_type,
INIT,
trans_shared_free,
drop_ty,
new_sub_block_ctxt,
load_if_immediate,
dest
};
import shape::{size_of}; import shape::{size_of};
// ___Good to know (tm)__________________________________________________ // ___Good to know (tm)__________________________________________________
@ -33,34 +22,31 @@ import shape::{size_of};
// The layout of a closure environment in memory is // The layout of a closure environment in memory is
// roughly as follows: // roughly as follows:
// //
// struct closure_box { // struct rust_opaque_box { // see rust_internal.h
// unsigned ref_count; // only used for shared environments // unsigned ref_count; // only used for fn@()
// type_desc *tydesc; // descriptor for the "struct closure_box" type // type_desc *tydesc; // describes closure_data struct
// rust_opaque_box *prev; // (used internally by memory alloc)
// rust_opaque_box *next; // (used internally by memory alloc)
// struct closure_data {
// type_desc *bound_tdescs[]; // bound descriptors // type_desc *bound_tdescs[]; // bound descriptors
// struct { // struct {
// upvar1_t upvar1; // upvar1_t upvar1;
// ... // ...
// upvarN_t upvarN; // upvarN_t upvarN;
// } bound_data; // } bound_data;
// }
// }; // };
// //
// Note that the closure carries a type descriptor that describes the // Note that the closure is itself a rust_opaque_box. This is true
// closure itself. Trippy. This is needed because the precise types // even for fn~ and fn&, because we wish to keep binary compatibility
// of the closed over data are lost in the closure type (`fn(T)->U`), // between all kinds of closures. The allocation strategy for this
// so if we need to take/drop, we must know what data is in the upvars // closure depends on the closure type. For a sendfn, the closure
// and so forth. This struct is defined in the code in mk_closure_tys() // (and the referenced type descriptors) will be allocated in the
// below. // exchange heap. For a fn, the closure is allocated in the task heap
// and is reference counted. For a block, the closure is allocated on
// the stack.
// //
// The allocation strategy for this closure depends on the closure // ## Opaque closures and the embedded type descriptor ##
// type. For a sendfn, the closure (and the referenced type
// descriptors) will be allocated in the exchange heap. For a fn, the
// closure is allocated in the task heap and is reference counted.
// For a block, the closure is allocated on the stack. Note that in
// all cases we allocate space for a ref count just to make our lives
// easier when upcasting to fn(T)->U, in the shape code, and so
// forth.
//
// ## Opaque Closures ##
// //
// One interesting part of closures is that they encapsulate the data // One interesting part of closures is that they encapsulate the data
// that they close over. So when I have a ptr to a closure, I do not // that they close over. So when I have a ptr to a closure, I do not
@ -69,10 +55,10 @@ import shape::{size_of};
// nor where its fields are located. This is called an "opaque // nor where its fields are located. This is called an "opaque
// closure". // closure".
// //
// Typically an opaque closure suffices because I only manipulate it // Typically an opaque closure suffices because we only manipulate it
// by ptr. The routine common::T_opaque_cbox_ptr() returns an // by ptr. The routine common::T_opaque_box_ptr() returns an
// appropriate type for such an opaque closure; it allows access to the // appropriate type for such an opaque closure; it allows access to
// first two fields, but not the others. // the box fields, but not the closure_data itself.
// //
// But sometimes, such as when cloning or freeing a closure, we need // But sometimes, such as when cloning or freeing a closure, we need
// to know the full information. That is where the type descriptor // to know the full information. That is where the type descriptor
@ -81,31 +67,22 @@ import shape::{size_of};
// //
// ## Subtleties concerning alignment ## // ## Subtleties concerning alignment ##
// //
// You'll note that the closure_box structure is a flat structure with // It is important that we be able to locate the closure data *without
// four fields. In some ways, it would be more convenient to use a nested // knowing the kind of data that is being bound*. This can be tricky
// structure like so: // because the alignment requirements of the bound data affects the
// alignment requires of the closure_data struct as a whole. However,
// right now this is a non-issue in any case, because the size of the
// rust_opaque_box header is always a mutiple of 16-bytes, which is
// the maximum alignment requirement we ever have to worry about.
// //
// struct { // The only reason alignment matters is that, in order to learn what data
// int; // is bound, we would normally first load the type descriptors: but their
// struct { // location is ultimately depend on their content! There is, however, a
// type_desc*; // workaround. We can load the tydesc from the rust_opaque_box, which
// type_desc*[]; // describes the closure_data struct and has self-contained derived type
// bound_data; // descriptors, and read the alignment from there. It's just annoying to
// } } // do. Hopefully should this ever become an issue we'll have monomorphized
// // and type descriptors will all be a bad dream.
// This would be more convenient because it would allow us to use more
// of the existing infrastructure: we could treat the inner struct as
// a type and then hvae a boxed variant (which would add the int) etc.
// However, there is one subtle problem with this: grouping the latter
// 3 fields into an inner struct causes the alignment of the entire
// struct to be the max alignment of the bound_data. This will
// therefore vary from closure to closure. That would mean that we
// cannot reliably locate the initial type_desc* in an opaque closure!
// That's definitely a bad thing. Therefore, I have elected to create
// a flat structure, even though it means some mild amount of code
// duplication (however, we used to do it the other way, and we were
// jumping through about as many hoops just trying to wedge a ref
// count into a unique pointer, so it's kind of a wash in the end).
// //
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -142,16 +119,21 @@ fn mk_tydesc_ty(tcx: ty::ctxt, ck: ty::closure_kind) -> ty::t {
}; };
} }
fn mk_tuplified_uniq_cbox_ty(tcx: ty::ctxt, cdata_ty: ty::t) -> ty::t {
let tydesc_ty = mk_tydesc_ty(tcx, ty::ck_uniq);
let cbox_ty = tuplify_cbox_ty(tcx, cdata_ty, tydesc_ty);
ret ty::mk_imm_uniq(tcx, cbox_ty);
}
// Given a closure ty, emits a corresponding tuple ty // Given a closure ty, emits a corresponding tuple ty
fn mk_closure_tys(tcx: ty::ctxt, fn mk_closure_tys(tcx: ty::ctxt,
ck: ty::closure_kind, ck: ty::closure_kind,
ty_params: [fn_ty_param], ty_params: [fn_ty_param],
bound_values: [environment_value]) bound_values: [environment_value])
-> (ty::t, ty::t, [ty::t]) { -> (ty::t, [ty::t]) {
let bound_tys = []; let bound_tys = [];
let tydesc_ty = let tydesc_ty = mk_tydesc_ty(tcx, ck);
mk_tydesc_ty(tcx, ck);
// Compute the closed over tydescs // Compute the closed over tydescs
let param_ptrs = []; let param_ptrs = [];
@ -173,95 +155,76 @@ fn mk_closure_tys(tcx: ty::ctxt,
} }
let bound_data_ty = ty::mk_tup(tcx, bound_tys); let bound_data_ty = ty::mk_tup(tcx, bound_tys);
let norc_tys = [tydesc_ty, ty::mk_tup(tcx, param_ptrs), bound_data_ty]; let cdata_ty = ty::mk_tup(tcx, [ty::mk_tup(tcx, param_ptrs),
bound_data_ty]);
// closure_norc_ty == everything but ref count #debug["cdata_ty=%s", ty_to_str(tcx, cdata_ty)];
// ret (cdata_ty, bound_tys);
// This is a hack to integrate with the cycle coll. When you
// allocate memory in the task-local space, you are expected to
// provide a descriptor for that memory which excludes the ref
// count. That's what this represents. However, this really
// assumes a type setup like [uint, data] where data can be a
// struct. We don't use that structure here because we don't want
// to alignment of the first few fields being bound up in the
// alignment of the bound data, as would happen if we laid out
// that way. For now this should be fine but ultimately we need
// to modify CC code or else modify box allocation interface to be
// a bit more flexible, perhaps taking a vec of tys in the box
// (which for normal rust code is always of length 1).
let closure_norc_ty = ty::mk_tup(tcx, norc_tys);
#debug["closure_norc_ty=%s", ty_to_str(tcx, closure_norc_ty)];
// closure_ty == ref count, data tydesc, typarams, bound data
let closure_ty = ty::mk_tup(tcx, [ty::mk_int(tcx)] + norc_tys);
#debug["closure_ty=%s", ty_to_str(tcx, closure_norc_ty)];
ret (closure_ty, closure_norc_ty, bound_tys);
} }
fn allocate_cbox(bcx: @block_ctxt, fn allocate_cbox(bcx: @block_ctxt,
ck: ty::closure_kind, ck: ty::closure_kind,
cbox_ty: ty::t, cdata_ty: ty::t)
cbox_norc_ty: ty::t)
-> (@block_ctxt, ValueRef, [ValueRef]) { -> (@block_ctxt, ValueRef, [ValueRef]) {
// let ccx = bcx_ccx(bcx);
let tcx = bcx_tcx(bcx);
fn nuke_ref_count(bcx: @block_ctxt, box: ValueRef) {
// Initialize ref count to arbitrary value for debugging:
let ccx = bcx_ccx(bcx); let ccx = bcx_ccx(bcx);
let box = PointerCast(bcx, box, T_opaque_box_ptr(ccx));
let ref_cnt = GEPi(bcx, box, [0, abi::box_field_refcnt]);
let rc = C_int(ccx, 0x12345678);
Store(bcx, rc, ref_cnt);
}
let alloc_in_heap = fn@(bcx: @block_ctxt, fn store_uniq_tydesc(bcx: @block_ctxt,
xchgheap: bool, cdata_ty: ty::t,
&temp_cleanups: [ValueRef]) box: ValueRef,
-> (@block_ctxt, ValueRef) { &ti: option::t<@tydesc_info>) -> @block_ctxt {
let ccx = bcx_ccx(bcx);
let bound_tydesc = GEPi(bcx, box, [0, abi::box_field_tydesc]);
let {bcx, val: td} =
base::get_tydesc(bcx, cdata_ty, true, ti).result;
let td = Call(bcx, ccx.upcalls.create_shared_type_desc, [td]);
Store(bcx, td, bound_tydesc);
bcx
}
// n.b. If you are wondering why we don't use // Allocate and initialize the box:
// trans_malloc_boxed() or alloc_uniq(), see the section about
// "Subtleties concerning alignment" in the big comment at the
// top of the file.
let {bcx, val:llsz} = size_of(bcx, cbox_ty);
let ti = none; let ti = none;
let tydesc_ty = if xchgheap { cbox_ty } else { cbox_norc_ty };
let {bcx, val:lltydesc} =
get_tydesc(bcx, tydesc_ty, true, ti).result;
let malloc = {
if xchgheap { ccx.upcalls.shared_malloc}
else { ccx.upcalls.malloc }
};
let box = Call(bcx, malloc, [llsz, lltydesc]);
add_clean_free(bcx, box, xchgheap);
temp_cleanups += [box];
(bcx, box)
};
// Allocate the box:
let temp_cleanups = []; let temp_cleanups = [];
let (bcx, box, rc) = alt ck { let (bcx, box) = alt ck {
ty::ck_box { ty::ck_box {
let (bcx, box) = alloc_in_heap(bcx, false, temp_cleanups); let {bcx, val: box} = trans_malloc_boxed_raw(bcx, cdata_ty, ti);
(bcx, box, 1) (bcx, box)
} }
ty::ck_uniq { ty::ck_uniq {
let (bcx, box) = alloc_in_heap(bcx, true, temp_cleanups); let uniq_cbox_ty = mk_tuplified_uniq_cbox_ty(tcx, cdata_ty);
(bcx, box, 0x12345678) // use arbitrary value for debugging check uniq::type_is_unique_box(bcx, uniq_cbox_ty);
let {bcx, val: box} = uniq::alloc_uniq(bcx, uniq_cbox_ty);
nuke_ref_count(bcx, box);
let bcx = store_uniq_tydesc(bcx, cdata_ty, box, ti);
(bcx, box)
} }
ty::ck_block { ty::ck_block {
let cbox_ty = tuplify_box_ty(tcx, cdata_ty);
let {bcx, val: box} = base::alloc_ty(bcx, cbox_ty); let {bcx, val: box} = base::alloc_ty(bcx, cbox_ty);
(bcx, box, 0x12345678) // use arbitrary value for debugging nuke_ref_count(bcx, box);
(bcx, box)
} }
}; };
// Initialize ref count base::lazily_emit_tydesc_glue(bcx, abi::tydesc_field_take_glue, ti);
let box = PointerCast(bcx, box, T_opaque_cbox_ptr(ccx)); base::lazily_emit_tydesc_glue(bcx, abi::tydesc_field_drop_glue, ti);
let ref_cnt = GEPi(bcx, box, [0, abi::box_rc_field_refcnt]); base::lazily_emit_tydesc_glue(bcx, abi::tydesc_field_free_glue, ti);
Store(bcx, C_int(ccx, rc), ref_cnt);
ret (bcx, box, temp_cleanups); ret (bcx, box, temp_cleanups);
} }
type closure_result = { type closure_result = {
llbox: ValueRef, // llvalue of ptr to closure llbox: ValueRef, // llvalue of ptr to closure
cbox_ty: ty::t, // type of the closure data cdata_ty: ty::t, // type of the closure data
bcx: @block_ctxt // final bcx bcx: @block_ctxt // final bcx
}; };
@ -302,34 +265,17 @@ fn store_environment(
let tcx = bcx_tcx(bcx); let tcx = bcx_tcx(bcx);
// compute the shape of the closure // compute the shape of the closure
let (cbox_ty, cbox_norc_ty, bound_tys) = let (cdata_ty, bound_tys) =
mk_closure_tys(tcx, ck, lltyparams, bound_values); mk_closure_tys(tcx, ck, lltyparams, bound_values);
// allocate closure in the heap // allocate closure in the heap
let (bcx, llbox, temp_cleanups) = let (bcx, llbox, temp_cleanups) =
allocate_cbox(bcx, ck, cbox_ty, cbox_norc_ty); allocate_cbox(bcx, ck, cdata_ty);
// store data tydesc.
alt ck {
ty::ck_box | ty::ck_uniq {
let bound_tydesc = GEPi(bcx, llbox, [0, abi::cbox_elt_tydesc]);
let ti = none;
let {result:closure_td, _} =
base::get_tydesc(bcx, cbox_ty, true, ti);
base::lazily_emit_tydesc_glue(bcx, abi::tydesc_field_take_glue, ti);
base::lazily_emit_tydesc_glue(bcx, abi::tydesc_field_drop_glue, ti);
base::lazily_emit_tydesc_glue(bcx, abi::tydesc_field_free_glue, ti);
bcx = closure_td.bcx;
let td = maybe_clone_tydesc(bcx, ck, closure_td.val);
Store(bcx, td, bound_tydesc);
}
ty::ck_block { /* skip this for blocks, not really relevant */ }
}
// cbox_ty has the form of a tuple: (a, b, c) we want a ptr to a // cbox_ty has the form of a tuple: (a, b, c) we want a ptr to a
// tuple. This could be a ptr in uniq or a box or on stack, // tuple. This could be a ptr in uniq or a box or on stack,
// whatever. // whatever.
let cbox_ty = tuplify_box_ty(tcx, cdata_ty);
let cboxptr_ty = ty::mk_ptr(tcx, {ty:cbox_ty, mut:ast::imm}); let cboxptr_ty = ty::mk_ptr(tcx, {ty:cbox_ty, mut:ast::imm});
let llbox = cast_if_we_can(bcx, llbox, cboxptr_ty); let llbox = cast_if_we_can(bcx, llbox, cboxptr_ty);
check type_is_tup_like(bcx, cbox_ty); check type_is_tup_like(bcx, cbox_ty);
@ -337,7 +283,8 @@ fn store_environment(
// If necessary, copy tydescs describing type parameters into the // If necessary, copy tydescs describing type parameters into the
// appropriate slot in the closure. // appropriate slot in the closure.
let {bcx:bcx, val:ty_params_slot} = let {bcx:bcx, val:ty_params_slot} =
GEP_tup_like(bcx, cbox_ty, llbox, [0, abi::cbox_elt_ty_params]); GEP_tup_like(bcx, cbox_ty, llbox,
[0, abi::box_field_body, abi::closure_body_ty_params]);
let off = 0; let off = 0;
for tp in lltyparams { for tp in lltyparams {
let cloned_td = maybe_clone_tydesc(bcx, ck, tp.desc); let cloned_td = maybe_clone_tydesc(bcx, ck, tp.desc);
@ -361,7 +308,9 @@ fn store_environment(
} }
let bound_data = GEP_tup_like_1(bcx, cbox_ty, llbox, let bound_data = GEP_tup_like_1(bcx, cbox_ty, llbox,
[0, abi::cbox_elt_bindings, [0,
abi::box_field_body,
abi::closure_body_bindings,
i as int]); i as int]);
bcx = bound_data.bcx; bcx = bound_data.bcx;
let bound_data = bound_data.val; let bound_data = bound_data.val;
@ -399,7 +348,7 @@ fn store_environment(
} }
for cleanup in temp_cleanups { revoke_clean(bcx, cleanup); } for cleanup in temp_cleanups { revoke_clean(bcx, cleanup); }
ret {llbox: llbox, cbox_ty: cbox_ty, bcx: bcx}; ret {llbox: llbox, cdata_ty: cdata_ty, bcx: bcx};
} }
// Given a context and a list of upvars, build a closure. This just // Given a context and a list of upvars, build a closure. This just
@ -443,22 +392,20 @@ fn build_closure(bcx0: @block_ctxt,
// with the upvars and type descriptors. // with the upvars and type descriptors.
fn load_environment(enclosing_cx: @block_ctxt, fn load_environment(enclosing_cx: @block_ctxt,
fcx: @fn_ctxt, fcx: @fn_ctxt,
cbox_ty: ty::t, cdata_ty: ty::t,
cap_vars: [capture::capture_var], cap_vars: [capture::capture_var],
ck: ty::closure_kind) { ck: ty::closure_kind) {
let bcx = new_raw_block_ctxt(fcx, fcx.llloadenv); let bcx = new_raw_block_ctxt(fcx, fcx.llloadenv);
let ccx = bcx_ccx(bcx);
let tcx = bcx_tcx(bcx);
let cboxptr_ty = ty::mk_ptr(tcx, {ty:cbox_ty, mut:ast::imm}); // Load a pointer to the closure data, skipping over the box header:
check (type_has_static_size(ccx, cboxptr_ty)); let llcdata = base::opaque_box_body(bcx, cdata_ty, fcx.llenv);
let llty = type_of(ccx, cboxptr_ty);
let llclosure = PointerCast(bcx, fcx.llenv, llty);
// Populate the type parameters from the environment. We need to // Populate the type parameters from the environment. We need to
// do this first because the tydescs are needed to index into // do this first because the tydescs are needed to index into
// the bindings if they are dynamically sized. // the bindings if they are dynamically sized.
let lltydescs = GEPi(bcx, llclosure, [0, abi::cbox_elt_ty_params]); check type_is_tup_like(bcx, cdata_ty);
let {bcx, val: lltydescs} = GEP_tup_like(bcx, cdata_ty, llcdata,
[0, abi::closure_body_ty_params]);
let off = 0; let off = 0;
for tp in copy enclosing_cx.fcx.lltyparams { for tp in copy enclosing_cx.fcx.lltyparams {
let tydesc = Load(bcx, GEPi(bcx, lltydescs, [0, off])); let tydesc = Load(bcx, GEPi(bcx, lltydescs, [0, off]));
@ -476,15 +423,15 @@ fn load_environment(enclosing_cx: @block_ctxt,
} }
// Populate the upvars from the environment. // Populate the upvars from the environment.
let path = [0, abi::cbox_elt_bindings];
let i = 0u; let i = 0u;
vec::iter(cap_vars) { |cap_var| vec::iter(cap_vars) { |cap_var|
alt cap_var.mode { alt cap_var.mode {
capture::cap_drop { /* ignore */ } capture::cap_drop { /* ignore */ }
_ { _ {
check type_is_tup_like(bcx, cbox_ty); check type_is_tup_like(bcx, cdata_ty);
let upvarptr = GEP_tup_like( let upvarptr =
bcx, cbox_ty, llclosure, path + [i as int]); GEP_tup_like(bcx, cdata_ty, llcdata,
[0, abi::closure_body_bindings, i as int]);
bcx = upvarptr.bcx; bcx = upvarptr.bcx;
let llupvarptr = upvarptr.val; let llupvarptr = upvarptr.val;
alt ck { alt ck {
@ -519,9 +466,9 @@ fn trans_expr_fn(bcx: @block_ctxt,
let trans_closure_env = fn@(ck: ty::closure_kind) -> ValueRef { let trans_closure_env = fn@(ck: ty::closure_kind) -> ValueRef {
let cap_vars = capture::compute_capture_vars( let cap_vars = capture::compute_capture_vars(
ccx.tcx, id, proto, cap_clause); ccx.tcx, id, proto, cap_clause);
let {llbox, cbox_ty, bcx} = build_closure(bcx, cap_vars, ck); let {llbox, cdata_ty, bcx} = build_closure(bcx, cap_vars, ck);
trans_closure(sub_cx, decl, body, llfn, no_self, [], id, {|fcx| trans_closure(sub_cx, decl, body, llfn, no_self, [], id, {|fcx|
load_environment(bcx, fcx, cbox_ty, cap_vars, ck); load_environment(bcx, fcx, cdata_ty, cap_vars, ck);
}); });
llbox llbox
}; };
@ -531,9 +478,8 @@ fn trans_expr_fn(bcx: @block_ctxt,
ast::proto_box { trans_closure_env(ty::ck_box) } ast::proto_box { trans_closure_env(ty::ck_box) }
ast::proto_uniq { trans_closure_env(ty::ck_uniq) } ast::proto_uniq { trans_closure_env(ty::ck_uniq) }
ast::proto_bare { ast::proto_bare {
let closure = C_null(T_opaque_cbox_ptr(ccx)); let closure = C_null(T_opaque_box_ptr(ccx));
trans_closure(sub_cx, decl, body, llfn, no_self, [], trans_closure(sub_cx, decl, body, llfn, no_self, [], id, {|_fcx|});
id, {|_fcx|});
closure closure
} }
}; };
@ -617,7 +563,7 @@ fn trans_bind_1(cx: @block_ctxt, outgoing_fty: ty::t,
}; };
// Actually construct the closure // Actually construct the closure
let {llbox, cbox_ty, bcx} = store_environment( let {llbox, cdata_ty, bcx} = store_environment(
bcx, vec::map(lltydescs, {|d| {desc: d, dicts: none}}), bcx, vec::map(lltydescs, {|d| {desc: d, dicts: none}}),
env_vals + vec::map(bound, {|x| env_expr(x)}), env_vals + vec::map(bound, {|x| env_expr(x)}),
ty::ck_box); ty::ck_box);
@ -625,7 +571,7 @@ fn trans_bind_1(cx: @block_ctxt, outgoing_fty: ty::t,
// Make thunk // Make thunk
let llthunk = let llthunk =
trans_bind_thunk(cx.fcx.lcx, pair_ty, outgoing_fty_real, args, trans_bind_thunk(cx.fcx.lcx, pair_ty, outgoing_fty_real, args,
cbox_ty, *param_bounds, target_res); cdata_ty, *param_bounds, target_res);
// Fill the function pair // Fill the function pair
fill_fn_pair(bcx, get_dest_addr(dest), llthunk.val, llbox); fill_fn_pair(bcx, get_dest_addr(dest), llthunk.val, llbox);
@ -688,16 +634,20 @@ fn make_opaque_cbox_take_glue(
// Hard case, a deep copy: // Hard case, a deep copy:
let ccx = bcx_ccx(bcx); let ccx = bcx_ccx(bcx);
let llopaquecboxty = T_opaque_cbox_ptr(ccx); let tcx = bcx_tcx(bcx);
let llopaquecboxty = T_opaque_box_ptr(ccx);
let cbox_in = Load(bcx, cboxptr); let cbox_in = Load(bcx, cboxptr);
make_null_test(bcx, cbox_in) {|bcx| make_null_test(bcx, cbox_in) {|bcx|
// Load the size from the type descr found in the cbox // Load the size from the type descr found in the cbox
let cbox_in = PointerCast(bcx, cbox_in, llopaquecboxty); let cbox_in = PointerCast(bcx, cbox_in, llopaquecboxty);
let tydescptr = GEPi(bcx, cbox_in, [0, abi::cbox_elt_tydesc]); let tydescptr = GEPi(bcx, cbox_in, [0, abi::box_field_tydesc]);
let tydesc = Load(bcx, tydescptr); let tydesc = Load(bcx, tydescptr);
let tydesc = PointerCast(bcx, tydesc, T_ptr(ccx.tydesc_type)); let tydesc = PointerCast(bcx, tydesc, T_ptr(ccx.tydesc_type));
let sz = Load(bcx, GEPi(bcx, tydesc, [0, abi::tydesc_field_size])); let sz = Load(bcx, GEPi(bcx, tydesc, [0, abi::tydesc_field_size]));
// Adjust sz to account for the rust_opaque_box header fields
let sz = Add(bcx, sz, base::llsize_of(ccx, T_box_header(ccx)));
// Allocate memory, update original ptr, and copy existing data // Allocate memory, update original ptr, and copy existing data
let malloc = ccx.upcalls.shared_malloc; let malloc = ccx.upcalls.shared_malloc;
let cbox_out = Call(bcx, malloc, [sz, tydesc]); let cbox_out = Call(bcx, malloc, [sz, tydesc]);
@ -705,9 +655,14 @@ fn make_opaque_cbox_take_glue(
let {bcx, val: _} = call_memmove(bcx, cbox_out, cbox_in, sz); let {bcx, val: _} = call_memmove(bcx, cbox_out, cbox_in, sz);
Store(bcx, cbox_out, cboxptr); Store(bcx, cbox_out, cboxptr);
// Take the (deeply cloned) type descriptor
let tydesc_out = GEPi(bcx, cbox_out, [0, abi::box_field_tydesc]);
let bcx = take_ty(bcx, tydesc_out, mk_tydesc_ty(tcx, ty::ck_uniq));
// Take the data in the tuple // Take the data in the tuple
let ti = none; let ti = none;
call_tydesc_glue_full(bcx, cbox_out, tydesc, let cdata_out = GEPi(bcx, cbox_out, [0, abi::box_field_body]);
call_tydesc_glue_full(bcx, cdata_out, tydesc,
abi::tydesc_field_take_glue, ti); abi::tydesc_field_take_glue, ti);
bcx bcx
} }
@ -747,20 +702,14 @@ fn make_opaque_cbox_free_glue(
// Load the type descr found in the cbox // Load the type descr found in the cbox
let lltydescty = T_ptr(ccx.tydesc_type); let lltydescty = T_ptr(ccx.tydesc_type);
let cbox = PointerCast(bcx, cbox, T_opaque_cbox_ptr(ccx)); let cbox = PointerCast(bcx, cbox, T_opaque_cbox_ptr(ccx));
let tydescptr = GEPi(bcx, cbox, [0, abi::cbox_elt_tydesc]); let tydescptr = GEPi(bcx, cbox, [0, abi::box_field_tydesc]);
let tydesc = Load(bcx, tydescptr); let tydesc = Load(bcx, tydescptr);
let tydesc = PointerCast(bcx, tydesc, lltydescty); let tydesc = PointerCast(bcx, tydesc, lltydescty);
// Null out the type descr in the cbox. This is subtle:
// we will be freeing the data in the cbox, and we may need the
// information in the type descr to guide the GEP_tup_like process
// etc if generic types are involved. So we null it out at first
// then free it manually below.
Store(bcx, C_null(lltydescty), tydescptr);
// Drop the tuple data then free the descriptor // Drop the tuple data then free the descriptor
let ti = none; let ti = none;
call_tydesc_glue_full(bcx, cbox, tydesc, let cdata = GEPi(bcx, cbox, [0, abi::box_field_body]);
call_tydesc_glue_full(bcx, cdata, tydesc,
abi::tydesc_field_drop_glue, ti); abi::tydesc_field_drop_glue, ti);
// Free the ty descr (if necc) and the box itself // Free the ty descr (if necc) and the box itself
@ -782,10 +731,11 @@ fn trans_bind_thunk(cx: @local_ctxt,
incoming_fty: ty::t, incoming_fty: ty::t,
outgoing_fty: ty::t, outgoing_fty: ty::t,
args: [option<@ast::expr>], args: [option<@ast::expr>],
cbox_ty: ty::t, cdata_ty: ty::t,
param_bounds: [ty::param_bounds], param_bounds: [ty::param_bounds],
target_fn: option<ValueRef>) target_fn: option<ValueRef>)
-> {val: ValueRef, ty: TypeRef} { -> {val: ValueRef, ty: TypeRef} {
// If we supported constraints on record fields, we could make the // If we supported constraints on record fields, we could make the
// constraints for this function: // constraints for this function:
/* /*
@ -797,6 +747,13 @@ fn trans_bind_thunk(cx: @local_ctxt,
let tcx = ccx_tcx(ccx); let tcx = ccx_tcx(ccx);
check type_has_static_size(ccx, incoming_fty); check type_has_static_size(ccx, incoming_fty);
#debug["trans_bind_thunk[incoming_fty=%s,outgoing_fty=%s,\
cdata_ty=%s,param_bounds=%?]",
ty_to_str(tcx, incoming_fty),
ty_to_str(tcx, outgoing_fty),
ty_to_str(tcx, cdata_ty),
param_bounds];
// Here we're not necessarily constructing a thunk in the sense of // Here we're not necessarily constructing a thunk in the sense of
// "function with no arguments". The result of compiling 'bind f(foo, // "function with no arguments". The result of compiling 'bind f(foo,
// bar, baz)' would be a thunk that, when called, applies f to those // bar, baz)' would be a thunk that, when called, applies f to those
@ -835,15 +792,12 @@ fn trans_bind_thunk(cx: @local_ctxt,
let l_bcx = new_raw_block_ctxt(fcx, fcx.llloadenv); let l_bcx = new_raw_block_ctxt(fcx, fcx.llloadenv);
// The 'llenv' that will arrive in the thunk we're creating is an // The 'llenv' that will arrive in the thunk we're creating is an
// environment that will contain the values of its arguments and a pointer // environment that will contain the values of its arguments and a
// to the original function. So, let's create one of those: // pointer to the original function. This environment is always
// stored like an opaque box (see big comment at the header of the
// The llenv pointer needs to be the correct size. That size is // file), so we load the body body, which contains the type descr
// 'cbox_ty', which was determined by trans_bind. // and cached data.
let cboxptr_ty = ty::mk_ptr(tcx, {ty:cbox_ty, mut:ast::imm}); let llcdata = base::opaque_box_body(l_bcx, cdata_ty, fcx.llenv);
check type_has_static_size(ccx, cboxptr_ty);
let llclosure_ptr_ty = type_of(ccx, cboxptr_ty);
let llclosure = PointerCast(l_bcx, fcx.llenv, llclosure_ptr_ty);
// "target", in this context, means the function that's having some of its // "target", in this context, means the function that's having some of its
// arguments bound and that will be called inside the thunk we're // arguments bound and that will be called inside the thunk we're
@ -856,10 +810,10 @@ fn trans_bind_thunk(cx: @local_ctxt,
} }
none { none {
// Silly check // Silly check
check type_is_tup_like(bcx, cbox_ty); check type_is_tup_like(bcx, cdata_ty);
let {bcx: cx, val: pair} = let {bcx: cx, val: pair} =
GEP_tup_like(bcx, cbox_ty, llclosure, GEP_tup_like(bcx, cdata_ty, llcdata,
[0, abi::cbox_elt_bindings, 0]); [0, abi::closure_body_bindings, 0]);
let lltargetenv = let lltargetenv =
Load(cx, GEPi(cx, pair, [0, abi::fn_field_box])); Load(cx, GEPi(cx, pair, [0, abi::fn_field_box]));
let lltargetfn = Load let lltargetfn = Load
@ -893,10 +847,10 @@ fn trans_bind_thunk(cx: @local_ctxt,
let llargs: [ValueRef] = [llretptr, lltargetenv]; let llargs: [ValueRef] = [llretptr, lltargetenv];
// Copy in the type parameters. // Copy in the type parameters.
check type_is_tup_like(l_bcx, cbox_ty); check type_is_tup_like(l_bcx, cdata_ty);
let {bcx: l_bcx, val: param_record} = let {bcx: l_bcx, val: param_record} =
GEP_tup_like(l_bcx, cbox_ty, llclosure, GEP_tup_like(l_bcx, cdata_ty, llcdata,
[0, abi::cbox_elt_ty_params]); [0, abi::closure_body_ty_params]);
let off = 0; let off = 0;
for param in param_bounds { for param in param_bounds {
let dsc = Load(l_bcx, GEPi(l_bcx, param_record, [0, off])), let dsc = Load(l_bcx, GEPi(l_bcx, param_record, [0, off])),
@ -934,10 +888,10 @@ fn trans_bind_thunk(cx: @local_ctxt,
// closure. // closure.
some(e) { some(e) {
// Silly check // Silly check
check type_is_tup_like(bcx, cbox_ty); check type_is_tup_like(bcx, cdata_ty);
let bound_arg = let bound_arg =
GEP_tup_like(bcx, cbox_ty, llclosure, GEP_tup_like(bcx, cdata_ty, llcdata,
[0, abi::cbox_elt_bindings, b]); [0, abi::closure_body_bindings, b]);
bcx = bound_arg.bcx; bcx = bound_arg.bcx;
let val = bound_arg.val; let val = bound_arg.val;
if out_arg.mode == ast::by_val { val = Load(bcx, val); } if out_arg.mode == ast::by_val { val = Load(bcx, val); }

View file

@ -122,7 +122,8 @@ type crate_ctxt =
shape_cx: shape::ctxt, shape_cx: shape::ctxt,
gc_cx: gc::ctxt, gc_cx: gc::ctxt,
crate_map: ValueRef, crate_map: ValueRef,
dbg_cx: option<@debuginfo::debug_ctxt>}; dbg_cx: option<@debuginfo::debug_ctxt>,
mutable do_not_commit_warning_issued: bool};
type local_ctxt = type local_ctxt =
{path: [str], {path: [str],
@ -243,6 +244,13 @@ type fn_ctxt =
span: option<span>, span: option<span>,
lcx: @local_ctxt}; lcx: @local_ctxt};
fn warn_not_to_commit(ccx: @crate_ctxt, msg: str) {
if !ccx.do_not_commit_warning_issued {
ccx.do_not_commit_warning_issued = true;
ccx.sess.warn(msg + " -- do not commit like this!");
}
}
enum cleanup { enum cleanup {
clean(fn@(@block_ctxt) -> @block_ctxt), clean(fn@(@block_ctxt) -> @block_ctxt),
clean_temp(ValueRef, fn@(@block_ctxt) -> @block_ctxt), clean_temp(ValueRef, fn@(@block_ctxt) -> @block_ctxt),
@ -652,8 +660,42 @@ fn T_opaque_vec(targ_cfg: @session::config) -> TypeRef {
ret T_vec2(targ_cfg, T_i8()); ret T_vec2(targ_cfg, T_i8());
} }
// Let T be the content of a box @T. tuplify_box_ty(t) returns the
// representation of @T as a tuple (i.e., the ty::t version of what T_box()
// returns).
fn tuplify_box_ty(tcx: ty::ctxt, t: ty::t) -> ty::t {
ret tuplify_cbox_ty(tcx, t, ty::mk_type(tcx));
}
// As tuplify_box_ty(), but allows the caller to specify what type of type
// descr is embedded in the box (ty::type vs ty::send_type). This is useful
// for unique closure boxes, hence the name "cbox_ty" (closure box type).
fn tuplify_cbox_ty(tcx: ty::ctxt, t: ty::t, tydesc_t: ty::t) -> ty::t {
let ptr = ty::mk_ptr(tcx, {ty: ty::mk_nil(tcx), mut: ast::imm});
ret ty::mk_tup(tcx, [ty::mk_uint(tcx), tydesc_t,
ptr, ptr,
t]);
}
fn T_box_header_fields(cx: @crate_ctxt) -> [TypeRef] {
let ptr = T_ptr(T_i8());
ret [cx.int_type, T_ptr(cx.tydesc_type), ptr, ptr];
}
fn T_box_header(cx: @crate_ctxt) -> TypeRef {
ret T_struct(T_box_header_fields(cx));
}
fn T_box(cx: @crate_ctxt, t: TypeRef) -> TypeRef { fn T_box(cx: @crate_ctxt, t: TypeRef) -> TypeRef {
ret T_struct([cx.int_type, t]); ret T_struct(T_box_header_fields(cx) + [t]);
}
fn T_opaque_box(cx: @crate_ctxt) -> TypeRef {
ret T_box(cx, T_i8());
}
fn T_opaque_box_ptr(cx: @crate_ctxt) -> TypeRef {
ret T_ptr(T_opaque_box(cx));
} }
fn T_port(cx: @crate_ctxt, _t: TypeRef) -> TypeRef { fn T_port(cx: @crate_ctxt, _t: TypeRef) -> TypeRef {
@ -681,15 +723,9 @@ fn T_typaram(tn: type_names) -> TypeRef {
fn T_typaram_ptr(tn: type_names) -> TypeRef { ret T_ptr(T_typaram(tn)); } fn T_typaram_ptr(tn: type_names) -> TypeRef { ret T_ptr(T_typaram(tn)); }
fn T_opaque_cbox_ptr(cx: @crate_ctxt) -> TypeRef { fn T_opaque_cbox_ptr(cx: @crate_ctxt) -> TypeRef {
let s = "*cbox"; // closures look like boxes (even when they are fn~ or fn&)
alt name_has_type(cx.tn, s) { some(t) { ret t; } _ {} } // see trans_closure.rs
let t = T_ptr(T_struct([cx.int_type, ret T_opaque_box_ptr(cx);
T_ptr(cx.tydesc_type),
T_i8() /* represents closed over tydescs
and data go here; see trans_closure.rs*/
]));
associate_type(cx.tn, s, t);
ret t;
} }
fn T_enum_variant(cx: @crate_ctxt) -> TypeRef { fn T_enum_variant(cx: @crate_ctxt) -> TypeRef {

View file

@ -145,7 +145,7 @@ fn trans_iface_callee(bcx: @block_ctxt, callee_id: ast::node_id,
-> lval_maybe_callee { -> lval_maybe_callee {
let tcx = bcx_tcx(bcx); let tcx = bcx_tcx(bcx);
let {bcx, val} = trans_temp_expr(bcx, base); let {bcx, val} = trans_temp_expr(bcx, base);
let box_body = GEPi(bcx, val, [0, abi::box_rc_field_body]); let box_body = GEPi(bcx, val, [0, abi::box_field_body]);
let dict = Load(bcx, PointerCast(bcx, GEPi(bcx, box_body, [0, 1]), let dict = Load(bcx, PointerCast(bcx, GEPi(bcx, box_body, [0, 1]),
T_ptr(T_ptr(T_dict())))); T_ptr(T_ptr(T_dict()))));
// FIXME[impl] I doubt this is alignment-safe // FIXME[impl] I doubt this is alignment-safe
@ -266,7 +266,7 @@ fn trans_iface_wrapper(ccx: @crate_ctxt, pt: [ast::ident], m: ty::method,
let self = Load(bcx, PointerCast(bcx, let self = Load(bcx, PointerCast(bcx,
LLVMGetParam(llfn, 2u as c_uint), LLVMGetParam(llfn, 2u as c_uint),
T_ptr(T_opaque_iface_ptr(ccx)))); T_ptr(T_opaque_iface_ptr(ccx))));
let boxed = GEPi(bcx, self, [0, abi::box_rc_field_body]); let boxed = GEPi(bcx, self, [0, abi::box_field_body]);
let dict = Load(bcx, PointerCast(bcx, GEPi(bcx, boxed, [0, 1]), let dict = Load(bcx, PointerCast(bcx, GEPi(bcx, boxed, [0, 1]),
T_ptr(T_ptr(T_dict())))); T_ptr(T_ptr(T_dict()))));
let vtable = PointerCast(bcx, Load(bcx, GEPi(bcx, dict, [0, 0])), let vtable = PointerCast(bcx, Load(bcx, GEPi(bcx, dict, [0, 0])),

View file

@ -1928,6 +1928,7 @@ fn parse_mod_items(p: parser, term: token::token,
while p.token != term { while p.token != term {
let attrs = initial_attrs + parse_outer_attributes(p); let attrs = initial_attrs + parse_outer_attributes(p);
initial_attrs = []; initial_attrs = [];
#debug["parse_mod_items: parse_item(attrs=%?)", attrs];
alt parse_item(p, attrs) { alt parse_item(p, attrs) {
some(i) { items += [i]; } some(i) { items += [i]; }
_ { _ {
@ -1935,6 +1936,7 @@ fn parse_mod_items(p: parser, term: token::token,
token::to_str(p.reader, p.token) + "'"); token::to_str(p.reader, p.token) + "'");
} }
} }
#debug["parse_mod_items: attrs=%?", attrs];
} }
ret {view_items: view_items, items: items}; ret {view_items: view_items, items: items};
} }

View file

@ -75,8 +75,6 @@ native mod rustrt {
fn drop_task(task_id: *rust_task); fn drop_task(task_id: *rust_task);
fn get_task_pointer(id: task_id) -> *rust_task; fn get_task_pointer(id: task_id) -> *rust_task;
fn migrate_alloc(alloc: *u8, target: task_id);
fn start_task(id: task, closure: *rust_closure); fn start_task(id: task, closure: *rust_closure);
fn rust_task_is_unwinding(rt: *rust_task) -> bool; fn rust_task_is_unwinding(rt: *rust_task) -> bool;

59
src/rt/boxed_region.cpp Normal file
View file

@ -0,0 +1,59 @@
#include <assert.h>
#include "boxed_region.h"
#include "rust_internal.h"
// #define DUMP_BOXED_REGION
rust_opaque_box *boxed_region::malloc(type_desc *td) {
size_t header_size = sizeof(rust_opaque_box);
size_t body_size = td->size;
size_t body_align = td->align;
size_t total_size = align_to(header_size, body_align) + body_size;
rust_opaque_box *box =
(rust_opaque_box*)backing_region->malloc(total_size, "@");
box->td = td;
box->ref_count = 1;
box->prev = NULL;
box->next = live_allocs;
if (live_allocs) live_allocs->prev = box;
live_allocs = box;
# ifdef DUMP_BOXED_REGION
fprintf(stderr, "Allocated box %p with td %p,"
" size %lu==%lu+%lu, align %lu, prev %p, next %p\n",
box, td, total_size, header_size, body_size, body_align,
box->prev, box->next);
# endif
return box;
}
rust_opaque_box *boxed_region::calloc(type_desc *td) {
rust_opaque_box *box = malloc(td);
memset(box_body(box), 0, td->size);
return box;
}
void boxed_region::free(rust_opaque_box *box) {
// This turns out to not be true in various situations,
// like when we are unwinding after a failure.
//
// assert(box->ref_count == 0);
// This however should always be true. Helps to detect
// double frees (kind of).
assert(box->td != NULL);
# ifdef DUMP_BOXED_REGION
fprintf(stderr, "Freed box %p with td %p, prev %p, next %p\n",
box, box->td, box->prev, box->next);
# endif
if (box->prev) box->prev->next = box->next;
if (box->next) box->next->prev = box->prev;
if (live_allocs == box) live_allocs = box->next;
box->prev = NULL;
box->next = NULL;
box->td = NULL;
backing_region->free(box);
}

39
src/rt/boxed_region.h Normal file
View file

@ -0,0 +1,39 @@
#ifndef BOXED_REGION_H
#define BOXED_REGION_H
#include <stdlib.h>
struct type_desc;
class memory_region;
struct rust_opaque_box;
/* Tracks the data allocated by a particular task in the '@' region.
* Currently still relies on the standard malloc as a backing allocator, but
* this could be improved someday if necessary. Every allocation must provide
* a type descr which describes the payload (what follows the header). */
class boxed_region {
private:
memory_region *backing_region;
rust_opaque_box *live_allocs;
size_t align_to(size_t v, size_t align) {
size_t alignm1 = align - 1;
v += alignm1;
v &= ~alignm1;
return v;
}
public:
boxed_region(memory_region *br)
: backing_region(br)
, live_allocs(NULL)
{}
rust_opaque_box *first_live_alloc() { return live_allocs; }
rust_opaque_box *malloc(type_desc *td);
rust_opaque_box *calloc(type_desc *td);
void free(rust_opaque_box *box);
};
#endif /* BOXED_REGION_H */

View file

@ -50,6 +50,9 @@ private:
void dec_alloc(); void dec_alloc();
void maybe_poison(void *mem); void maybe_poison(void *mem);
void release_alloc(void *mem);
void claim_alloc(void *mem);
public: public:
memory_region(rust_srv *srv, bool synchronized); memory_region(rust_srv *srv, bool synchronized);
memory_region(memory_region *parent); memory_region(memory_region *parent);
@ -58,9 +61,6 @@ public:
void *realloc(void *mem, size_t size); void *realloc(void *mem, size_t size);
void free(void *mem); void free(void *mem);
virtual ~memory_region(); virtual ~memory_region();
void release_alloc(void *mem);
void claim_alloc(void *mem);
}; };
inline void *operator new(size_t size, memory_region &region, inline void *operator new(size_t size, memory_region &region,

View file

@ -429,22 +429,6 @@ start_task(rust_task_id id, fn_env_pair *f) {
target->deref(); target->deref();
} }
extern "C" CDECL void
migrate_alloc(void *alloc, rust_task_id tid) {
rust_task *task = rust_scheduler::get_task();
if(!alloc) return;
rust_task *target = task->kernel->get_task_by_id(tid);
if(target) {
const type_desc *tydesc = task->release_alloc(alloc);
target->claim_alloc(alloc, tydesc);
target->deref();
}
else {
// We couldn't find the target. Maybe we should just free?
task->fail();
}
}
extern "C" CDECL int extern "C" CDECL int
sched_threads() { sched_threads() {
rust_task *task = rust_scheduler::get_task(); rust_task *task = rust_scheduler::get_task();

View file

@ -25,7 +25,7 @@ namespace cc {
// Internal reference count computation // Internal reference count computation
typedef std::map<void *,uintptr_t> irc_map; typedef std::map<rust_opaque_box*,uintptr_t> irc_map;
class irc : public shape::data<irc,shape::ptr> { class irc : public shape::data<irc,shape::ptr> {
friend class shape::data<irc,shape::ptr>; friend class shape::data<irc,shape::ptr>;
@ -118,13 +118,6 @@ class irc : public shape::data<irc,shape::ptr> {
} }
} }
void walk_obj2() {
dp += sizeof(void *); // skip vtable
uint8_t *box_ptr = shape::bump_dp<uint8_t *>(dp);
shape::ptr ref_count_dp(box_ptr);
maybe_record_irc(ref_count_dp);
}
void walk_iface2() { void walk_iface2() {
walk_box2(); walk_box2();
} }
@ -145,30 +138,32 @@ class irc : public shape::data<irc,shape::ptr> {
void walk_uniq_contents2(irc &sub) { sub.walk(); } void walk_uniq_contents2(irc &sub) { sub.walk(); }
void walk_box_contents2(irc &sub, shape::ptr &ref_count_dp) { void walk_box_contents2(irc &sub, shape::ptr &box_dp) {
maybe_record_irc(ref_count_dp); maybe_record_irc(box_dp);
// Do not traverse the contents of this box; it's in the allocation // Do not traverse the contents of this box; it's in the allocation
// somewhere, so we're guaranteed to come back to it (if we haven't // somewhere, so we're guaranteed to come back to it (if we haven't
// traversed it already). // traversed it already).
} }
void maybe_record_irc(shape::ptr &ref_count_dp) { void maybe_record_irc(shape::ptr &box_dp) {
if (!ref_count_dp) if (!box_dp)
return; return;
rust_opaque_box *box_ptr = (rust_opaque_box *) box_dp;
// Bump the internal reference count of the box. // Bump the internal reference count of the box.
if (ircs.find((void *)ref_count_dp) == ircs.end()) { if (ircs.find(box_ptr) == ircs.end()) {
LOG(task, gc, LOG(task, gc,
"setting internal reference count for %p to 1", "setting internal reference count for %p to 1",
(void *)ref_count_dp); box_ptr);
ircs[(void *)ref_count_dp] = 1; ircs[box_ptr] = 1;
} else { } else {
uintptr_t newcount = ircs[(void *)ref_count_dp] + 1; uintptr_t newcount = ircs[box_ptr] + 1;
LOG(task, gc, LOG(task, gc,
"bumping internal reference count for %p to %lu", "bumping internal reference count for %p to %lu",
(void *)ref_count_dp, newcount); box_ptr, newcount);
ircs[(void *)ref_count_dp] = newcount; ircs[box_ptr] = newcount;
} }
} }
@ -207,36 +202,25 @@ irc::walk_variant2(shape::tag_info &tinfo, uint32_t variant_id,
void void
irc::compute_ircs(rust_task *task, irc_map &ircs) { irc::compute_ircs(rust_task *task, irc_map &ircs) {
std::map<void *,const type_desc *>::iterator boxed_region *boxed = &task->boxed;
begin(task->local_allocs.begin()), end(task->local_allocs.end()); for (rust_opaque_box *box = boxed->first_live_alloc();
while (begin != end) { box != NULL;
uint8_t *p = reinterpret_cast<uint8_t *>(begin->first); box = box->next) {
type_desc *tydesc = box->td;
uint8_t *body = (uint8_t*) box_body(box);
const type_desc *tydesc = begin->second; LOG(task, gc,
"determining internal ref counts: "
LOG(task, gc, "determining internal ref counts: %p, tydesc=%p", p, "box=%p tydesc=%p body=%p",
tydesc); box, tydesc, body);
shape::arena arena; shape::arena arena;
shape::type_param *params = shape::type_param *params =
shape::type_param::from_tydesc_and_data(tydesc, p, arena); shape::type_param::from_tydesc_and_data(tydesc, body, arena);
#if 0
shape::print print(task, true, tydesc->shape, params,
tydesc->shape_tables);
print.walk();
shape::log log(task, true, tydesc->shape, params,
tydesc->shape_tables, p + sizeof(uintptr_t),
std::cerr);
log.walk();
#endif
irc irc(task, true, tydesc->shape, params, tydesc->shape_tables, irc irc(task, true, tydesc->shape, params, tydesc->shape_tables,
p + sizeof(uintptr_t), ircs); body, ircs);
irc.walk(); irc.walk();
++begin;
} }
} }
@ -244,17 +228,17 @@ irc::compute_ircs(rust_task *task, irc_map &ircs) {
// Root finding // Root finding
void void
find_roots(rust_task *task, irc_map &ircs, std::vector<void *> &roots) { find_roots(rust_task *task, irc_map &ircs,
std::map<void *,const type_desc *>::iterator std::vector<rust_opaque_box *> &roots) {
begin(task->local_allocs.begin()), end(task->local_allocs.end()); boxed_region *boxed = &task->boxed;
while (begin != end) { for (rust_opaque_box *box = boxed->first_live_alloc();
void *alloc = begin->first; box != NULL;
uintptr_t *ref_count_ptr = reinterpret_cast<uintptr_t *>(alloc); box = box->next) {
uintptr_t ref_count = *ref_count_ptr; uintptr_t ref_count = box->ref_count;
uintptr_t irc; uintptr_t irc;
if (ircs.find(alloc) != ircs.end()) if (ircs.find(box) != ircs.end())
irc = ircs[alloc]; irc = ircs[box];
else else
irc = 0; irc = 0;
@ -262,16 +246,14 @@ find_roots(rust_task *task, irc_map &ircs, std::vector<void *> &roots) {
// This allocation must be a root, because the internal reference // This allocation must be a root, because the internal reference
// count is smaller than the total reference count. // count is smaller than the total reference count.
LOG(task, gc,"root found: %p, irc %lu, ref count %lu", LOG(task, gc,"root found: %p, irc %lu, ref count %lu",
alloc, irc, ref_count); box, irc, ref_count);
roots.push_back(alloc); roots.push_back(box);
} else { } else {
LOG(task, gc, "nonroot found: %p, irc %lu, ref count %lu", LOG(task, gc, "nonroot found: %p, irc %lu, ref count %lu",
alloc, irc, ref_count); box, irc, ref_count);
assert(irc == ref_count && "Internal reference count must be " assert(irc == ref_count && "Internal reference count must be "
"less than or equal to the total reference count!"); "less than or equal to the total reference count!");
} }
++begin;
} }
} }
@ -281,7 +263,7 @@ find_roots(rust_task *task, irc_map &ircs, std::vector<void *> &roots) {
class mark : public shape::data<mark,shape::ptr> { class mark : public shape::data<mark,shape::ptr> {
friend class shape::data<mark,shape::ptr>; friend class shape::data<mark,shape::ptr>;
std::set<void *> &marked; std::set<rust_opaque_box *> &marked;
mark(const mark &other, const shape::ptr &in_dp) mark(const mark &other, const shape::ptr &in_dp)
: shape::data<mark,shape::ptr>(other.task, other.align, other.sp, : shape::data<mark,shape::ptr>(other.task, other.align, other.sp,
@ -319,7 +301,7 @@ class mark : public shape::data<mark,shape::ptr> {
const shape::type_param *in_params, const shape::type_param *in_params,
const rust_shape_tables *in_tables, const rust_shape_tables *in_tables,
uint8_t *in_data, uint8_t *in_data,
std::set<void *> &in_marked) std::set<rust_opaque_box*> &in_marked)
: shape::data<mark,shape::ptr>(in_task, in_align, in_sp, in_params, : shape::data<mark,shape::ptr>(in_task, in_align, in_sp, in_params,
in_tables, in_data), in_tables, in_data),
marked(in_marked) {} marked(in_marked) {}
@ -357,7 +339,7 @@ class mark : public shape::data<mark,shape::ptr> {
case shape::SHAPE_BOX_FN: { case shape::SHAPE_BOX_FN: {
// Record an irc for the environment box, but don't descend // Record an irc for the environment box, but don't descend
// into it since it will be walked via the box's allocation // into it since it will be walked via the box's allocation
shape::data<mark,shape::ptr>::walk_fn_contents1(dp, false); shape::data<mark,shape::ptr>::walk_fn_contents1();
break; break;
} }
case shape::SHAPE_BARE_FN: // Does not close over data. case shape::SHAPE_BARE_FN: // Does not close over data.
@ -368,10 +350,6 @@ class mark : public shape::data<mark,shape::ptr> {
} }
} }
void walk_obj2() {
shape::data<mark,shape::ptr>::walk_obj_contents1(dp);
}
void walk_res2(const shape::rust_fn *dtor, unsigned n_params, void walk_res2(const shape::rust_fn *dtor, unsigned n_params,
const shape::type_param *params, const uint8_t *end_sp, const shape::type_param *params, const uint8_t *end_sp,
bool live) { bool live) {
@ -392,14 +370,16 @@ class mark : public shape::data<mark,shape::ptr> {
void walk_uniq_contents2(mark &sub) { sub.walk(); } void walk_uniq_contents2(mark &sub) { sub.walk(); }
void walk_box_contents2(mark &sub, shape::ptr &ref_count_dp) { void walk_box_contents2(mark &sub, shape::ptr &box_dp) {
if (!ref_count_dp) if (!box_dp)
return; return;
if (marked.find((void *)ref_count_dp) != marked.end()) rust_opaque_box *box_ptr = (rust_opaque_box *) box_dp;
if (marked.find(box_ptr) != marked.end())
return; // Skip to avoid chasing cycles. return; // Skip to avoid chasing cycles.
marked.insert((void *)ref_count_dp); marked.insert(box_ptr);
sub.walk(); sub.walk();
} }
@ -418,8 +398,9 @@ class mark : public shape::data<mark,shape::ptr> {
inline void walk_number2() { /* no-op */ } inline void walk_number2() { /* no-op */ }
public: public:
static void do_mark(rust_task *task, const std::vector<void *> &roots, static void do_mark(rust_task *task,
std::set<void *> &marked); const std::vector<rust_opaque_box *> &roots,
std::set<rust_opaque_box*> &marked);
}; };
void void
@ -438,35 +419,28 @@ mark::walk_variant2(shape::tag_info &tinfo, uint32_t variant_id,
} }
void void
mark::do_mark(rust_task *task, const std::vector<void *> &roots, mark::do_mark(rust_task *task,
std::set<void *> &marked) { const std::vector<rust_opaque_box *> &roots,
std::vector<void *>::const_iterator begin(roots.begin()), std::set<rust_opaque_box *> &marked) {
std::vector<rust_opaque_box *>::const_iterator
begin(roots.begin()),
end(roots.end()); end(roots.end());
while (begin != end) { while (begin != end) {
void *alloc = *begin; rust_opaque_box *box = *begin;
if (marked.find(alloc) == marked.end()) { if (marked.find(box) == marked.end()) {
marked.insert(alloc); marked.insert(box);
const type_desc *tydesc = task->local_allocs[alloc]; const type_desc *tydesc = box->td;
LOG(task, gc, "marking: %p, tydesc=%p", alloc, tydesc); LOG(task, gc, "marking: %p, tydesc=%p", box, tydesc);
uint8_t *p = reinterpret_cast<uint8_t *>(alloc); uint8_t *p = (uint8_t*) box_body(box);
shape::arena arena; shape::arena arena;
shape::type_param *params = shape::type_param *params =
shape::type_param::from_tydesc_and_data(tydesc, p, arena); shape::type_param::from_tydesc_and_data(tydesc, p, arena);
#if 0
// We skip over the reference count here.
shape::log log(task, true, tydesc->shape, params,
tydesc->shape_tables, p + sizeof(uintptr_t),
std::cerr);
log.walk();
#endif
// We skip over the reference count here.
mark mark(task, true, tydesc->shape, params, tydesc->shape_tables, mark mark(task, true, tydesc->shape, params, tydesc->shape_tables,
p + sizeof(uintptr_t), marked); p, marked);
mark.walk(); mark.walk();
} }
@ -552,13 +526,9 @@ class sweep : public shape::data<sweep,shape::ptr> {
fn_env_pair pair = *(fn_env_pair*)dp; fn_env_pair pair = *(fn_env_pair*)dp;
// free closed over data: // free closed over data:
shape::data<sweep,shape::ptr>::walk_fn_contents1(dp, true); shape::data<sweep,shape::ptr>::walk_fn_contents1();
// now free the embedded type descr: // now free the embedded type descr:
//
// see comment in walk_fn_contents1() concerning null_td
// to understand why this does not occur during the normal
// walk.
upcall_s_free_shared_type_desc((type_desc*)pair.env->td); upcall_s_free_shared_type_desc((type_desc*)pair.env->td);
// now free the ptr: // now free the ptr:
@ -610,7 +580,7 @@ class sweep : public shape::data<sweep,shape::ptr> {
void walk_uniq_contents2(sweep &sub) { sub.walk(); } void walk_uniq_contents2(sweep &sub) { sub.walk(); }
void walk_box_contents2(sweep &sub, shape::ptr &ref_count_dp) { void walk_box_contents2(sweep &sub, shape::ptr &box_dp) {
return; return;
} }
@ -637,50 +607,50 @@ class sweep : public shape::data<sweep,shape::ptr> {
inline void walk_number2() { /* no-op */ } inline void walk_number2() { /* no-op */ }
public: public:
static void do_sweep(rust_task *task, const std::set<void *> &marked); static void do_sweep(rust_task *task,
const std::set<rust_opaque_box*> &marked);
}; };
void void
sweep::do_sweep(rust_task *task, const std::set<void *> &marked) { sweep::do_sweep(rust_task *task,
std::map<void *,const type_desc *>::iterator const std::set<rust_opaque_box*> &marked) {
begin(task->local_allocs.begin()), end(task->local_allocs.end()); boxed_region *boxed = &task->boxed;
while (begin != end) { rust_opaque_box *box = boxed->first_live_alloc();
void *alloc = begin->first; while (box != NULL) {
// save next ptr as we may be freeing box
rust_opaque_box *box_next = box->next;
if (marked.find(box) == marked.end()) {
LOG(task, gc, "object is part of a cycle: %p", box);
if (marked.find(alloc) == marked.end()) { const type_desc *tydesc = box->td;
LOG(task, gc, "object is part of a cycle: %p", alloc); uint8_t *p = (uint8_t*) box_body(box);
const type_desc *tydesc = begin->second;
uint8_t *p = reinterpret_cast<uint8_t *>(alloc);
shape::arena arena; shape::arena arena;
shape::type_param *params = shape::type_param *params =
shape::type_param::from_tydesc_and_data(tydesc, p, arena); shape::type_param::from_tydesc_and_data(tydesc, p, arena);
sweep sweep(task, true, tydesc->shape, sweep sweep(task, true, tydesc->shape,
params, tydesc->shape_tables, params, tydesc->shape_tables,
p + sizeof(uintptr_t)); p);
sweep.walk(); sweep.walk();
// FIXME: Run the destructor, *if* it's a resource. boxed->free(box);
task->free(alloc);
} }
++begin; box = box_next;
} }
} }
void void
do_cc(rust_task *task) { do_cc(rust_task *task) {
LOG(task, gc, "cc; n allocs = %lu", LOG(task, gc, "cc");
(long unsigned int)task->local_allocs.size());
irc_map ircs; irc_map ircs;
irc::compute_ircs(task, ircs); irc::compute_ircs(task, ircs);
std::vector<void *> roots; std::vector<rust_opaque_box*> roots;
find_roots(task, ircs, roots); find_roots(task, ircs, roots);
std::set<void *> marked; std::set<rust_opaque_box*> marked;
mark::do_mark(task, roots, marked); mark::do_mark(task, roots, marked);
sweep::do_sweep(task, marked); sweep::do_sweep(task, marked);

View file

@ -231,29 +231,36 @@ struct rust_shape_tables {
uint8_t *resources; uint8_t *resources;
}; };
struct rust_opaque_closure; typedef unsigned long ref_cnt_t;
// Corresponds to the boxed data in the @ region. The body follows the
// header; you can obtain a ptr via box_body() below.
struct rust_opaque_box {
ref_cnt_t ref_count;
type_desc *td;
rust_opaque_box *prev;
rust_opaque_box *next;
};
// The type of functions that we spawn, which fall into two categories: // The type of functions that we spawn, which fall into two categories:
// - the main function: has a NULL environment, but uses the void* arg // - the main function: has a NULL environment, but uses the void* arg
// - unique closures of type fn~(): have a non-NULL environment, but // - unique closures of type fn~(): have a non-NULL environment, but
// no arguments (and hence the final void*) is harmless // no arguments (and hence the final void*) is harmless
typedef void (*CDECL spawn_fn)(void*, rust_opaque_closure*, void *); typedef void (*CDECL spawn_fn)(void*, rust_opaque_box*, void *);
// corresponds to the layout of a fn(), fn@(), fn~() etc // corresponds to the layout of a fn(), fn@(), fn~() etc
struct fn_env_pair { struct fn_env_pair {
spawn_fn f; spawn_fn f;
rust_opaque_closure *env; rust_opaque_box *env;
}; };
// corresponds the closures generated in trans_closure.rs static inline void *box_body(rust_opaque_box *box) {
struct rust_opaque_closure { // Here we take advantage of the fact that the size of a box in 32
intptr_t ref_count; // (resp. 64) bit is 16 (resp. 32) bytes, and thus always 16-byte aligned.
const type_desc *td; // If this were to change, we would have to update the method
// The size/types of these will vary per closure, so they // rustc::middle::trans::base::opaque_box_body() as well.
// cannot be statically expressed. See trans_closure.rs: return (void*)(box + 1);
const type_desc *captured_tds[0]; }
// struct bound_data;
};
struct type_desc { struct type_desc {
// First part of type_desc is known to compiler. // First part of type_desc is known to compiler.

View file

@ -264,7 +264,7 @@ private:
result = sub.result; result = sub.result;
} }
inline void walk_box_contents2(cmp &sub, ptr_pair &ref_count_dp) { inline void walk_box_contents2(cmp &sub, ptr_pair &box_dp) {
sub.align = true; sub.align = true;
sub.walk(); sub.walk();
result = sub.result; result = sub.result;

View file

@ -28,7 +28,6 @@ namespace shape {
typedef unsigned long tag_variant_t; typedef unsigned long tag_variant_t;
typedef unsigned long tag_align_t; typedef unsigned long tag_align_t;
typedef unsigned long ref_cnt_t;
// Constants // Constants
@ -376,7 +375,6 @@ ctxt<T>::walk() {
case SHAPE_TAG: walk_tag0(); break; case SHAPE_TAG: walk_tag0(); break;
case SHAPE_BOX: walk_box0(); break; case SHAPE_BOX: walk_box0(); break;
case SHAPE_STRUCT: walk_struct0(); break; case SHAPE_STRUCT: walk_struct0(); break;
case SHAPE_OBJ: WALK_SIMPLE(walk_obj1); break;
case SHAPE_RES: walk_res0(); break; case SHAPE_RES: walk_res0(); break;
case SHAPE_VAR: walk_var0(); break; case SHAPE_VAR: walk_var0(); break;
case SHAPE_UNIQ: walk_uniq0(); break; case SHAPE_UNIQ: walk_uniq0(); break;
@ -591,7 +589,6 @@ public:
default: abort(); default: abort();
} }
} }
void walk_obj1() { DPRINT("obj"); }
void walk_iface1() { DPRINT("iface"); } void walk_iface1() { DPRINT("iface"); }
void walk_tydesc1(char kind) { void walk_tydesc1(char kind) {
@ -645,7 +642,6 @@ public:
void walk_uniq1() { sa.set(sizeof(void *), sizeof(void *)); } void walk_uniq1() { sa.set(sizeof(void *), sizeof(void *)); }
void walk_box1() { sa.set(sizeof(void *), sizeof(void *)); } void walk_box1() { sa.set(sizeof(void *), sizeof(void *)); }
void walk_fn1(char) { sa.set(sizeof(void *)*2, sizeof(void *)); } void walk_fn1(char) { sa.set(sizeof(void *)*2, sizeof(void *)); }
void walk_obj1() { sa.set(sizeof(void *)*2, sizeof(void *)); }
void walk_iface1() { sa.set(sizeof(void *), sizeof(void *)); } void walk_iface1() { sa.set(sizeof(void *), sizeof(void *)); }
void walk_tydesc1(char) { sa.set(sizeof(void *), sizeof(void *)); } void walk_tydesc1(char) { sa.set(sizeof(void *), sizeof(void *)); }
void walk_closure1(); void walk_closure1();
@ -854,9 +850,8 @@ protected:
void walk_box_contents1(); void walk_box_contents1();
void walk_uniq_contents1(); void walk_uniq_contents1();
void walk_fn_contents1(ptr &dp, bool null_td); void walk_fn_contents1();
void walk_obj_contents1(ptr &dp); void walk_iface_contents1();
void walk_iface_contents1(ptr &dp);
void walk_variant1(tag_info &tinfo, tag_variant_t variant); void walk_variant1(tag_info &tinfo, tag_variant_t variant);
static std::pair<uint8_t *,uint8_t *> get_vec_data_range(ptr dp); static std::pair<uint8_t *,uint8_t *> get_vec_data_range(ptr dp);
@ -894,13 +889,6 @@ public:
dp = next_dp; dp = next_dp;
} }
void walk_obj1() {
ALIGN_TO(alignof<void *>());
U next_dp = dp + sizeof(void *) * 2;
static_cast<T *>(this)->walk_obj2();
dp = next_dp;
}
void walk_iface1() { void walk_iface1() {
ALIGN_TO(alignof<void *>()); ALIGN_TO(alignof<void *>());
U next_dp = dp + sizeof(void *); U next_dp = dp + sizeof(void *);
@ -946,9 +934,17 @@ template<typename T,typename U>
void void
data<T,U>::walk_box_contents1() { data<T,U>::walk_box_contents1() {
typename U::template data<uint8_t *>::t box_ptr = bump_dp<uint8_t *>(dp); typename U::template data<uint8_t *>::t box_ptr = bump_dp<uint8_t *>(dp);
U ref_count_dp(box_ptr); U box_dp(box_ptr);
T sub(*static_cast<T *>(this), ref_count_dp + sizeof(ref_cnt_t));
static_cast<T *>(this)->walk_box_contents2(sub, ref_count_dp); // No need to worry about alignment so long as the box header is
// a multiple of 16 bytes. We can just find the body by adding
// the size of header to box_dp.
assert ((sizeof(rust_opaque_box) % 16) == 0 ||
!"Must align to find the box body");
U body_dp = box_dp + sizeof(rust_opaque_box);
T sub(*static_cast<T *>(this), body_dp);
static_cast<T *>(this)->walk_box_contents2(sub, box_dp);
} }
template<typename T,typename U> template<typename T,typename U>
@ -1010,80 +1006,26 @@ data<T,U>::walk_tag1(tag_info &tinfo) {
template<typename T,typename U> template<typename T,typename U>
void void
data<T,U>::walk_fn_contents1(ptr &dp, bool null_td) { data<T,U>::walk_fn_contents1() {
fn_env_pair pair = bump_dp<fn_env_pair>(dp); fn_env_pair pair = bump_dp<fn_env_pair>(dp);
if (!pair.env) if (!pair.env)
return; return;
arena arena; arena arena;
const type_desc *closure_td = pair.env->td; const type_desc *closure_td = pair.env->td;
type_param *params = type_param *params = type_param::from_tydesc(closure_td, arena);
type_param::from_tydesc(closure_td, arena); ptr closure_dp((uintptr_t)box_body(pair.env));
ptr closure_dp((uintptr_t)pair.env);
T sub(*static_cast<T *>(this), closure_td->shape, params, T sub(*static_cast<T *>(this), closure_td->shape, params,
closure_td->shape_tables, closure_dp); closure_td->shape_tables, closure_dp);
sub.align = true; sub.align = true;
if (null_td) {
// if null_td flag is true, null out the type descr from
// the data structure while we walk. This is used in cycle
// collector when we are sweeping up data. The idea is that
// we are using the information in the embedded type desc to
// walk the contents, so we do not want to free it during that
// walk. This is not *strictly* necessary today because
// type_param::from_tydesc() actually pulls out the "shape"
// string and other information and copies it into a new
// location that is unaffected by the free. But it seems
// safer, particularly as this pulling out of information will
// not cope with nested, derived type descriptors.
pair.env->td = NULL;
}
sub.walk();
if (null_td) {
pair.env->td = closure_td;
}
}
template<typename T,typename U>
void
data<T,U>::walk_obj_contents1(ptr &dp) {
dp += sizeof(void *); // Skip over the vtable.
uint8_t *box_ptr = bump_dp<uint8_t *>(dp);
type_desc *subtydesc =
*reinterpret_cast<type_desc **>(box_ptr + sizeof(void *));
ptr obj_closure_dp(box_ptr + sizeof(void *));
if (!box_ptr) // Null check.
return;
arena arena;
type_param *params = type_param::from_obj_shape(subtydesc->shape,
obj_closure_dp, arena);
T sub(*static_cast<T *>(this), subtydesc->shape, params,
subtydesc->shape_tables, obj_closure_dp);
sub.align = true;
sub.walk(); sub.walk();
} }
template<typename T,typename U> template<typename T,typename U>
void void
data<T,U>::walk_iface_contents1(ptr &dp) { data<T,U>::walk_iface_contents1() {
uint8_t *box_ptr = bump_dp<uint8_t *>(dp); walk_box_contents1();
if (!box_ptr) return;
U ref_count_dp(box_ptr);
uint8_t *body_ptr = box_ptr + sizeof(void*);
type_desc *valtydesc =
*reinterpret_cast<type_desc **>(body_ptr);
ptr value_dp(body_ptr + sizeof(void*) * 2);
// FIXME The 5 is a hard-coded way to skip over a struct shape
// header and the first two (number-typed) fields. This is too
// fragile, but I didn't see a good way to properly encode it.
T sub(*static_cast<T *>(this), valtydesc->shape + 5, NULL, NULL,
value_dp);
sub.align = true;
static_cast<T *>(this)->walk_box_contents2(sub, ref_count_dp);
} }
// Polymorphic logging, for convenience // Polymorphic logging, for convenience
@ -1161,19 +1103,13 @@ private:
void walk_fn2(char kind) { void walk_fn2(char kind) {
out << prefix << "fn"; out << prefix << "fn";
prefix = ""; prefix = "";
data<log,ptr>::walk_fn_contents1(dp, false); data<log,ptr>::walk_fn_contents1();
}
void walk_obj2() {
out << prefix << "obj";
prefix = "";
data<log,ptr>::walk_obj_contents1(dp);
} }
void walk_iface2() { void walk_iface2() {
out << prefix << "iface("; out << prefix << "iface(";
prefix = ""; prefix = "";
data<log,ptr>::walk_iface_contents1(dp); data<log,ptr>::walk_iface_contents1();
out << prefix << ")"; out << prefix << ")";
} }

View file

@ -14,6 +14,7 @@
#include <algorithm> #include <algorithm>
#include "globals.h" #include "globals.h"
#include "rust_upcall.h"
// The amount of extra space at the end of each stack segment, available // The amount of extra space at the end of each stack segment, available
// to the rt, compiler and dynamic linker for running small functions // to the rt, compiler and dynamic linker for running small functions
@ -246,6 +247,7 @@ rust_task::rust_task(rust_scheduler *sched, rust_task_list *state,
running_on(-1), running_on(-1),
pinned_on(-1), pinned_on(-1),
local_region(&sched->srv->local_region), local_region(&sched->srv->local_region),
boxed(&local_region),
unwinding(false), unwinding(false),
killed(false), killed(false),
propagate_failure(true), propagate_failure(true),
@ -295,7 +297,7 @@ rust_task::~rust_task()
struct spawn_args { struct spawn_args {
rust_task *task; rust_task *task;
spawn_fn f; spawn_fn f;
rust_opaque_closure *envptr; rust_opaque_box *envptr;
void *argptr; void *argptr;
}; };
@ -330,8 +332,6 @@ cleanup_task(cleanup_args *args) {
} }
} }
extern "C" void upcall_shared_free(void* ptr);
// This runs on the Rust stack // This runs on the Rust stack
extern "C" CDECL extern "C" CDECL
void task_start_wrapper(spawn_args *a) void task_start_wrapper(spawn_args *a)
@ -349,12 +349,13 @@ void task_start_wrapper(spawn_args *a)
threw_exception = true; threw_exception = true;
} }
rust_opaque_closure* env = a->envptr; rust_opaque_box* env = a->envptr;
if(env) { if(env) {
// free the environment. // free the environment (which should be a unique closure).
const type_desc *td = env->td; const type_desc *td = env->td;
LOG(task, task, "Freeing env %p with td %p", env, td); LOG(task, task, "Freeing env %p with td %p", env, td);
td->drop_glue(NULL, NULL, td->first_param, env); td->drop_glue(NULL, NULL, td->first_param, box_body(env));
upcall_free_shared_type_desc(env->td);
upcall_shared_free(env); upcall_shared_free(env);
} }
@ -367,7 +368,7 @@ void task_start_wrapper(spawn_args *a)
void void
rust_task::start(spawn_fn spawnee_fn, rust_task::start(spawn_fn spawnee_fn,
rust_opaque_closure *envptr, rust_opaque_box *envptr,
void *argptr) void *argptr)
{ {
LOG(this, task, "starting task from fn 0x%" PRIxPTR LOG(this, task, "starting task from fn 0x%" PRIxPTR
@ -678,38 +679,6 @@ rust_port *rust_task::get_port_by_id(rust_port_id id) {
return port; return port;
} }
// Temporary routine to allow boxes on one task's shared heap to be reparented
// to another.
const type_desc *
rust_task::release_alloc(void *alloc) {
I(sched, !lock.lock_held_by_current_thread());
lock.lock();
assert(local_allocs.find(alloc) != local_allocs.end());
const type_desc *tydesc = local_allocs[alloc];
local_allocs.erase(alloc);
local_region.release_alloc(alloc);
lock.unlock();
return tydesc;
}
// Temporary routine to allow boxes from one task's shared heap to be
// reparented to this one.
void
rust_task::claim_alloc(void *alloc, const type_desc *tydesc) {
I(sched, !lock.lock_held_by_current_thread());
lock.lock();
assert(local_allocs.find(alloc) == local_allocs.end());
local_allocs[alloc] = tydesc;
local_region.claim_alloc(alloc);
lock.unlock();
}
void void
rust_task::notify(bool success) { rust_task::notify(bool success) {
// FIXME (1078) Do this in rust code // FIXME (1078) Do this in rust code

View file

@ -14,6 +14,7 @@
#include "rust_internal.h" #include "rust_internal.h"
#include "rust_kernel.h" #include "rust_kernel.h"
#include "rust_obstack.h" #include "rust_obstack.h"
#include "boxed_region.h"
// Corresponds to the rust chan (currently _chan) type. // Corresponds to the rust chan (currently _chan) type.
struct chan_handle { struct chan_handle {
@ -106,6 +107,7 @@ rust_task : public kernel_owned<rust_task>, rust_cond
int pinned_on; int pinned_on;
memory_region local_region; memory_region local_region;
boxed_region boxed;
// Indicates that fail() has been called and we are cleaning up. // Indicates that fail() has been called and we are cleaning up.
// We use this to suppress the "killed" flag during calls to yield. // We use this to suppress the "killed" flag during calls to yield.
@ -121,7 +123,6 @@ rust_task : public kernel_owned<rust_task>, rust_cond
rust_obstack dynastack; rust_obstack dynastack;
std::map<void *,const type_desc *> local_allocs;
uint32_t cc_counter; uint32_t cc_counter;
debug::task_debug_info debug; debug::task_debug_info debug;
@ -139,7 +140,7 @@ rust_task : public kernel_owned<rust_task>, rust_cond
~rust_task(); ~rust_task();
void start(spawn_fn spawnee_fn, void start(spawn_fn spawnee_fn,
rust_opaque_closure *env, rust_opaque_box *env,
void *args); void *args);
void start(); void start();
bool running(); bool running();
@ -194,11 +195,6 @@ rust_task : public kernel_owned<rust_task>, rust_cond
// not at all safe. // not at all safe.
intptr_t get_ref_count() const { return ref_count; } intptr_t get_ref_count() const { return ref_count; }
// FIXME: These functions only exist to get the tasking system off the
// ground. We should never be migrating shared boxes between tasks.
const type_desc *release_alloc(void *alloc);
void claim_alloc(void *alloc, const type_desc *tydesc);
void notify(bool success); void notify(bool success);
void *new_stack(size_t stk_sz, void *args_addr, size_t args_sz); void *new_stack(size_t stk_sz, void *args_addr, size_t args_sz);

View file

@ -16,6 +16,20 @@
#include <stdint.h> #include <stdint.h>
#ifdef __GNUC__
#define LOG_UPCALL_ENTRY(task) \
LOG(task, upcall, \
"> UPCALL %s - task: %s 0x%" PRIxPTR \
" retpc: x%" PRIxPTR, \
__FUNCTION__, \
(task)->name, (task), \
__builtin_return_address(0));
#else
#define LOG_UPCALL_ENTRY(task) \
LOG(task, upcall, "> UPCALL task: %s @x%" PRIxPTR, \
(task)->name, (task));
#endif
// This is called to ensure we've set up our rust stacks // This is called to ensure we've set up our rust stacks
// correctly. Strategically placed at entry to upcalls because they begin on // correctly. Strategically placed at entry to upcalls because they begin on
// the rust stack and happen frequently enough to catch most stack changes, // the rust stack and happen frequently enough to catch most stack changes,
@ -98,7 +112,6 @@ upcall_fail(char const *expr,
struct s_malloc_args { struct s_malloc_args {
uintptr_t retval; uintptr_t retval;
size_t nbytes;
type_desc *td; type_desc *td;
}; };
@ -107,31 +120,27 @@ upcall_s_malloc(s_malloc_args *args) {
rust_task *task = rust_scheduler::get_task(); rust_task *task = rust_scheduler::get_task();
LOG_UPCALL_ENTRY(task); LOG_UPCALL_ENTRY(task);
LOG(task, mem, LOG(task, mem, "upcall malloc(0x%" PRIxPTR ")", args->td);
"upcall malloc(%" PRIdPTR ", 0x%" PRIxPTR ")",
args->nbytes, args->td);
gc::maybe_gc(task); gc::maybe_gc(task);
cc::maybe_cc(task); cc::maybe_cc(task);
// TODO: Maybe use dladdr here to find a more useful name for the // FIXME--does this have to be calloc?
// type_desc. rust_opaque_box *box = task->boxed.calloc(args->td);
void *body = box_body(box);
void *p = task->malloc(args->nbytes, "tdesc", args->td); debug::maybe_track_origin(task, box);
memset(p, '\0', args->nbytes);
task->local_allocs[p] = args->td;
debug::maybe_track_origin(task, p);
LOG(task, mem, LOG(task, mem,
"upcall malloc(%" PRIdPTR ", 0x%" PRIxPTR ") = 0x%" PRIxPTR, "upcall malloc(0x%" PRIxPTR ") = box 0x%" PRIxPTR
args->nbytes, args->td, (uintptr_t)p); " with body 0x%" PRIxPTR,
args->retval = (uintptr_t) p; args->td, (uintptr_t)box, (uintptr_t)body);
args->retval = (uintptr_t) box;
} }
extern "C" CDECL uintptr_t extern "C" CDECL uintptr_t
upcall_malloc(size_t nbytes, type_desc *td) { upcall_malloc(type_desc *td) {
s_malloc_args args = {0, nbytes, td}; s_malloc_args args = {0, td};
UPCALL_SWITCH_STACK(&args, upcall_s_malloc); UPCALL_SWITCH_STACK(&args, upcall_s_malloc);
return args.retval; return args.retval;
} }
@ -155,10 +164,10 @@ upcall_s_free(s_free_args *args) {
"upcall free(0x%" PRIxPTR ", is_gc=%" PRIdPTR ")", "upcall free(0x%" PRIxPTR ", is_gc=%" PRIdPTR ")",
(uintptr_t)args->ptr, args->is_gc); (uintptr_t)args->ptr, args->is_gc);
task->local_allocs.erase(args->ptr);
debug::maybe_untrack_origin(task, args->ptr); debug::maybe_untrack_origin(task, args->ptr);
task->free(args->ptr, (bool) args->is_gc); rust_opaque_box *box = (rust_opaque_box*) args->ptr;
task->boxed.free(box);
} }
extern "C" CDECL void extern "C" CDECL void
@ -167,6 +176,21 @@ upcall_free(void* ptr, uintptr_t is_gc) {
UPCALL_SWITCH_STACK(&args, upcall_s_free); UPCALL_SWITCH_STACK(&args, upcall_s_free);
} }
/**********************************************************************
* Sanity checks on boxes, insert when debugging possible
* use-after-free bugs. See maybe_validate_box() in trans.rs.
*/
extern "C" CDECL void
upcall_validate_box(rust_opaque_box* ptr) {
if (ptr) {
assert(ptr->ref_count > 0);
assert(ptr->td != NULL);
assert(ptr->td->align <= 8);
assert(ptr->td->size <= 4096); // might not really be true...
}
}
/********************************************************************** /**********************************************************************
* Allocate an object in the exchange heap. * Allocate an object in the exchange heap.
*/ */

View file

@ -1,17 +1,7 @@
#pragma once #pragma once
#ifdef __GNUC__ // Upcalls used from C code on occasion:
#define LOG_UPCALL_ENTRY(task) \
LOG(task, upcall, \
"> UPCALL %s - task: %s 0x%" PRIxPTR \
" retpc: x%" PRIxPTR, \
__FUNCTION__, \
(task)->name, (task), \
__builtin_return_address(0));
#else
#define LOG_UPCALL_ENTRY(task) \
LOG(task, upcall, "> UPCALL task: %s @x%" PRIxPTR, \
(task)->name, (task));
#endif
extern "C" CDECL void upcall_shared_free(void* ptr);
extern "C" CDECL void upcall_free_shared_type_desc(type_desc *td);

View file

@ -17,7 +17,6 @@ get_task_pointer
get_time get_time
last_os_error last_os_error
leak leak
migrate_alloc
nano_time nano_time
new_port new_port
new_task new_task
@ -63,6 +62,7 @@ upcall_dynastack_free
upcall_dynastack_mark upcall_dynastack_mark
upcall_fail upcall_fail
upcall_free upcall_free
upcall_validate_box
upcall_create_shared_type_desc upcall_create_shared_type_desc
upcall_free_shared_type_desc upcall_free_shared_type_desc
upcall_get_type_desc upcall_get_type_desc
@ -98,4 +98,3 @@ rust_uvtmp_read_start
rust_uvtmp_timer rust_uvtmp_timer
rust_uvtmp_delete_buf rust_uvtmp_delete_buf
rust_uvtmp_get_req_id rust_uvtmp_get_req_id

View file

@ -39,7 +39,7 @@ rust_domain_test::run() {
return true; return true;
} }
void task_entry(void *, rust_opaque_closure *, void *) { void task_entry(void *, rust_opaque_box*, void *) {
printf("task entry\n"); printf("task entry\n");
} }