1
Fork 0

Making more of the rust_task structure directly accessible from Rust.

This commit is contained in:
Eric Holk 2011-08-17 14:42:28 -07:00
parent efac7c9a19
commit ae89ea223d
10 changed files with 51 additions and 54 deletions

View file

@ -8,7 +8,6 @@ export _chan;
export _port;
export mk_port;
export chan_from_unsafe_ptr;
export send;
export recv;
export chan;
@ -47,7 +46,6 @@ resource port_ptr(po: *rustrt::rust_port) {
type port<~T> = @port_ptr;
obj port_obj<~T>(raw_port : port<T>) {
// FIXME: rename this to chan once chan is not a keyword.
fn mk_chan() -> _chan<T> {
chan::<T>(raw_port)
}

View file

@ -10,3 +10,4 @@ fn offset<T>(ptr: *T, count: uint) -> *T {
ret rusti::ptr_offset(ptr, count);
}
fn null<T>() -> *T { ret unsafe::reinterpret_cast(0u); }

View file

@ -4,6 +4,7 @@ import comm::_chan;
import option::some;
import option::none;
import option = option::t;
import ptr;
native "rust" mod rustrt {
fn task_sleep(time_in_us: uint);
@ -19,9 +20,8 @@ native "rust" mod rustrt {
fn set_min_stack(stack_size: uint);
fn new_task() -> task_id;
fn drop_task(id : task_id);
fn drop_task(task : *rust_task);
fn get_task_pointer(id : task_id) -> *rust_task;
fn get_task_context(id : task_id) -> *x86_registers;
fn start_task(id : task_id);
fn get_task_trampoline() -> u32;
@ -31,10 +31,26 @@ native "rust" mod rustrt {
}
type rust_task = {
id : task,
mutable notify_enabled : u8,
mutable notify_chan : _chan<task_notification>
mutable notify_chan : _chan<task_notification>,
ctx : task_context,
stack_ptr : *u8
};
type task_context = {
regs : x86_registers,
next : *u8
};
resource rust_task_ptr(task : *rust_task) {
rustrt::drop_task(task);
}
fn get_task_ptr(id : task) -> rust_task_ptr {
ret rust_task_ptr(rustrt::get_task_pointer(id));
}
type task = int;
type task_id = task;
@ -95,14 +111,16 @@ fn spawn_inner(thunk : -fn() -> (),
let id = rustrt::new_task();
// the order of arguments are outptr, taskptr, envptr.
// In LLVM fastcall puts the first two in ecx, edx, and the rest on the
// LLVM fastcall puts the first two in ecx, edx, and the rest on the
// stack.
let regs = rustrt::get_task_context(id);
// set up the task pointer
let task_ptr = rustrt::get_task_pointer(id);
(*regs).edx = cast(task_ptr);
let task_ptr = get_task_ptr(id);
let regs = ptr::addr_of((**task_ptr).ctx.regs);
(*regs).edx = cast(*task_ptr);
(*regs).esp = cast((**task_ptr).stack_ptr);
assert ptr::null() != (**task_ptr).stack_ptr;
let raw_thunk : { code: u32, env: u32 } = cast(thunk);
(*regs).eip = raw_thunk.code;
@ -110,8 +128,8 @@ fn spawn_inner(thunk : -fn() -> (),
// set up notifications if they are enabled.
alt notify {
some(c) {
(*task_ptr).notify_enabled = 1u8;
(*task_ptr).notify_chan = c;
(**task_ptr).notify_enabled = 1u8;
(**task_ptr).notify_chan = c;
}
none {}
};
@ -130,7 +148,7 @@ fn spawn_inner(thunk : -fn() -> (),
// put the return pointer in ecx.
(*regs).ecx = (*regs).esp + 8u32;
*tptr = cast(task_ptr);
*tptr = cast(*task_ptr);
*env = raw_thunk.env;
*ra = rustrt::get_task_trampoline();
@ -139,11 +157,6 @@ fn spawn_inner(thunk : -fn() -> (),
rustrt::leak(thunk);
// Drop twice because get_task_context and get_task_pounter both bump the
// ref count and expect us to free it.
rustrt::drop_task(id);
rustrt::drop_task(id);
ret id;
}

View file

@ -4,6 +4,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
extern "C" uint32_t CDECL swap_registers(registers_t *oregs,
registers_t *regs)
@ -11,6 +12,7 @@ extern "C" uint32_t CDECL swap_registers(registers_t *oregs,
context::context()
{
assert((void*)&regs == (void*)this);
}
void context::swap(context &out)

View file

@ -712,7 +712,7 @@ unpin_task(rust_task *task) {
extern "C" CDECL rust_task_id
get_task_id(rust_task *task) {
return task->id;
return task->user.id;
}
extern "C" CDECL rust_task_id
@ -720,30 +720,16 @@ new_task(rust_task *task) {
return task->kernel->create_task(task, NULL);
}
extern "C" CDECL registers_t *
get_task_context(rust_task *task, rust_task_id id) {
rust_task *target = task->kernel->get_task_by_id(id);
registers_t *regs = &target->ctx.regs;
// This next line is a little dangerous.. It means we can only safely call
// this when starting a task.
regs->esp = target->rust_sp;
return regs;
}
extern "C" CDECL void
drop_task(rust_task *task, rust_task_id tid) {
rust_task *target = task->kernel->get_task_by_id(tid);
drop_task(rust_task *task, rust_task *target) {
if(target) {
target->deref();
// Deref twice because get_task_by_id does once.
target->deref();
}
}
extern "C" CDECL rust_task *
get_task_pointer(rust_task *task, rust_task_id id) {
rust_task *t = task->kernel->get_task_by_id(id);
return t;
return task->kernel->get_task_by_id(id);
}
extern "C" CDECL void

View file

@ -154,10 +154,10 @@ rust_kernel::create_task(rust_task *spawner, const char *name) {
rust_task *t = thread->create_task(spawner, name);
{
scoped_lock with(_kernel_lock);
t->id = max_id++;
task_table.put(t->id, t);
t->user.id = max_id++;
task_table.put(t->user.id, t);
}
return t->id;
return t->user.id;
}
rust_task *

View file

@ -47,10 +47,10 @@ void
rust_scheduler::activate(rust_task *task) {
context ctx;
task->ctx.next = &ctx;
task->user.ctx.next = &ctx;
DLOG(this, task, "descheduling...");
lock.unlock();
task->ctx.swap(ctx);
task->user.ctx.swap(ctx);
lock.lock();
DLOG(this, task, "task has returned");
}
@ -226,7 +226,7 @@ rust_scheduler::start_main_loop() {
", state: %s",
scheduled_task->name,
(uintptr_t)scheduled_task,
scheduled_task->rust_sp,
scheduled_task->user.rust_sp,
scheduled_task->state->name);
interrupt_flag = 0;
@ -244,7 +244,7 @@ rust_scheduler::start_main_loop() {
scheduled_task->name,
(uintptr_t)scheduled_task,
scheduled_task->state->name,
scheduled_task->rust_sp,
scheduled_task->user.rust_sp,
id);
reap_dead_tasks(id);

View file

@ -57,7 +57,6 @@ rust_task::rust_task(rust_scheduler *sched, rust_task_list *state,
ref_count(1),
stk(NULL),
runtime_sp(0),
rust_sp(0),
gc_alloc_chain(0),
sched(sched),
cache(NULL),
@ -86,7 +85,7 @@ rust_task::rust_task(rust_scheduler *sched, rust_task_list *state,
user.notify_enabled = 0;
stk = new_stk(sched, this, 0);
rust_sp = stk->limit;
user.rust_sp = stk->limit;
}
rust_task::~rust_task()
@ -99,7 +98,7 @@ rust_task::~rust_task()
get_chan_by_handle(&user.notify_chan);
if(target) {
task_notification msg;
msg.id = id;
msg.id = user.id;
msg.result = failed ? tr_failure : tr_success;
target->send(&msg);
@ -107,7 +106,7 @@ rust_task::~rust_task()
}
}
kernel->release_task_id(id);
kernel->release_task_id(user.id);
/* FIXME: tighten this up, there are some more
assertions that hold at task-lifecycle events. */
@ -166,7 +165,7 @@ rust_task::start(uintptr_t spawnee_fn,
I(sched, stk->data != NULL);
char *sp = (char *)rust_sp;
char *sp = (char *)user.rust_sp;
sp -= sizeof(spawn_args);
@ -178,7 +177,7 @@ rust_task::start(uintptr_t spawnee_fn,
void **f = (void **)&a->f;
*f = (void *)spawnee_fn;
ctx.call((void *)task_start_wrapper, a, sp);
user.ctx.call((void *)task_start_wrapper, a, sp);
this->start();
}
@ -213,7 +212,7 @@ rust_task::yield(size_t time_in_us) {
yield_timer.reset_us(time_in_us);
// Return to the scheduler.
ctx.next->swap(ctx);
user.ctx.next->swap(user.ctx);
}
void

View file

@ -38,8 +38,11 @@ struct gc_alloc {
// portions of the task structure that are accessible from the standard
// library. This struct must agree with the std::task::rust_task record.
struct rust_task_user {
rust_task_id id;
uint8_t notify_enabled;
chan_handle notify_chan;
context ctx;
uintptr_t rust_sp; // Saved sp when not running.
};
// std::lib::task::task_result
@ -66,7 +69,6 @@ rust_task : public kernel_owned<rust_task>, rust_cond
// Fields known to the compiler.
stk_seg *stk;
uintptr_t runtime_sp; // Runtime sp while task running.
uintptr_t rust_sp; // Saved sp when not running.
gc_alloc *gc_alloc_chain; // Linked list of GC allocations.
rust_scheduler *sched;
rust_crate_cache *cache;
@ -82,7 +84,6 @@ rust_task : public kernel_owned<rust_task>, rust_cond
size_t gc_alloc_thresh;
size_t gc_alloc_accum;
rust_task_id id;
rust_port_id next_port_id;
// Keeps track of the last time this task yielded.
@ -99,8 +100,6 @@ rust_task : public kernel_owned<rust_task>, rust_cond
// List of tasks waiting for this task to finish.
array_list<rust_task *> tasks_waiting_to_join;
context ctx;
// This flag indicates that a worker is either currently running the task
// or is about to run this task.
int running_on;

View file

@ -25,7 +25,6 @@ do_gc
drop_port
drop_task
get_port_id
get_task_context
get_task_id
get_task_pointer
get_task_trampoline