1
Fork 0

Renamed what's left of rust_dom to rust_scheduler

This commit is contained in:
Eric Holk 2011-06-28 12:15:41 -07:00 committed by Graydon Hoare
parent 02f6645fca
commit 657e5a2bd5
23 changed files with 303 additions and 321 deletions

View file

@ -10,7 +10,7 @@ RUNTIME_CS := rt/sync/timer.cpp \
rt/rust_run_program.cpp \ rt/rust_run_program.cpp \
rt/rust_crate_cache.cpp \ rt/rust_crate_cache.cpp \
rt/rust_comm.cpp \ rt/rust_comm.cpp \
rt/rust_dom.cpp \ rt/rust_scheduler.cpp \
rt/rust_task.cpp \ rt/rust_task.cpp \
rt/rust_task_list.cpp \ rt/rust_task_list.cpp \
rt/rust_proxy.cpp \ rt/rust_proxy.cpp \
@ -37,7 +37,7 @@ RUNTIME_HDR := rt/globals.h \
rt/rust_util.h \ rt/rust_util.h \
rt/rust_chan.h \ rt/rust_chan.h \
rt/rust_port.h \ rt/rust_port.h \
rt/rust_dom.h \ rt/rust_scheduler.h \
rt/rust_task.h \ rt/rust_task.h \
rt/rust_task_list.h \ rt/rust_task_list.h \
rt/rust_proxy.h \ rt/rust_proxy.h \

View file

@ -5,7 +5,7 @@
#include "rust_internal.h" #include "rust_internal.h"
circular_buffer::circular_buffer(rust_task *task, size_t unit_sz) : circular_buffer::circular_buffer(rust_task *task, size_t unit_sz) :
dom(task->dom), sched(task->sched),
task(task), task(task),
unit_sz(unit_sz), unit_sz(unit_sz),
_buffer_sz(initial_size()), _buffer_sz(initial_size()),
@ -13,26 +13,26 @@ circular_buffer::circular_buffer(rust_task *task, size_t unit_sz) :
_unread(0), _unread(0),
_buffer((uint8_t *)task->malloc(_buffer_sz)) { _buffer((uint8_t *)task->malloc(_buffer_sz)) {
A(dom, unit_sz, "Unit size must be larger than zero."); A(sched, unit_sz, "Unit size must be larger than zero.");
DLOG(dom, mem, "new circular_buffer(buffer_sz=%d, unread=%d)" DLOG(sched, mem, "new circular_buffer(buffer_sz=%d, unread=%d)"
"-> circular_buffer=0x%" PRIxPTR, "-> circular_buffer=0x%" PRIxPTR,
_buffer_sz, _unread, this); _buffer_sz, _unread, this);
A(dom, _buffer, "Failed to allocate buffer."); A(sched, _buffer, "Failed to allocate buffer.");
} }
circular_buffer::~circular_buffer() { circular_buffer::~circular_buffer() {
DLOG(dom, mem, "~circular_buffer 0x%" PRIxPTR, this); DLOG(sched, mem, "~circular_buffer 0x%" PRIxPTR, this);
I(dom, _buffer); I(sched, _buffer);
W(dom, _unread == 0, W(sched, _unread == 0,
"freeing circular_buffer with %d unread bytes", _unread); "freeing circular_buffer with %d unread bytes", _unread);
task->free(_buffer); task->free(_buffer);
} }
size_t size_t
circular_buffer::initial_size() { circular_buffer::initial_size() {
I(dom, unit_sz > 0); I(sched, unit_sz > 0);
return INITIAL_CIRCULAR_BUFFER_SIZE_IN_UNITS * unit_sz; return INITIAL_CIRCULAR_BUFFER_SIZE_IN_UNITS * unit_sz;
} }
@ -41,8 +41,8 @@ circular_buffer::initial_size() {
*/ */
void void
circular_buffer::transfer(void *dst) { circular_buffer::transfer(void *dst) {
I(dom, dst); I(sched, dst);
I(dom, _unread <= _buffer_sz); I(sched, _unread <= _buffer_sz);
uint8_t *ptr = (uint8_t *) dst; uint8_t *ptr = (uint8_t *) dst;
@ -54,13 +54,13 @@ circular_buffer::transfer(void *dst) {
} else { } else {
head_sz = _buffer_sz - _next; head_sz = _buffer_sz - _next;
} }
I(dom, _next + head_sz <= _buffer_sz); I(sched, _next + head_sz <= _buffer_sz);
memcpy(ptr, _buffer + _next, head_sz); memcpy(ptr, _buffer + _next, head_sz);
// Then copy any other items from the beginning of the buffer // Then copy any other items from the beginning of the buffer
I(dom, _unread >= head_sz); I(sched, _unread >= head_sz);
size_t tail_sz = _unread - head_sz; size_t tail_sz = _unread - head_sz;
I(dom, head_sz + tail_sz <= _buffer_sz); I(sched, head_sz + tail_sz <= _buffer_sz);
memcpy(ptr + head_sz, _buffer, tail_sz); memcpy(ptr + head_sz, _buffer, tail_sz);
} }
@ -70,37 +70,37 @@ circular_buffer::transfer(void *dst) {
*/ */
void void
circular_buffer::enqueue(void *src) { circular_buffer::enqueue(void *src) {
I(dom, src); I(sched, src);
I(dom, _unread <= _buffer_sz); I(sched, _unread <= _buffer_sz);
I(dom, _buffer); I(sched, _buffer);
// Grow if necessary. // Grow if necessary.
if (_unread == _buffer_sz) { if (_unread == _buffer_sz) {
grow(); grow();
} }
DLOG(dom, mem, "circular_buffer enqueue " DLOG(sched, mem, "circular_buffer enqueue "
"unread: %d, next: %d, buffer_sz: %d, unit_sz: %d", "unread: %d, next: %d, buffer_sz: %d, unit_sz: %d",
_unread, _next, _buffer_sz, unit_sz); _unread, _next, _buffer_sz, unit_sz);
I(dom, _unread < _buffer_sz); I(sched, _unread < _buffer_sz);
I(dom, _unread + unit_sz <= _buffer_sz); I(sched, _unread + unit_sz <= _buffer_sz);
// Copy data // Copy data
size_t dst_idx = _next + _unread; size_t dst_idx = _next + _unread;
I(dom, dst_idx >= _buffer_sz || dst_idx + unit_sz <= _buffer_sz); I(sched, dst_idx >= _buffer_sz || dst_idx + unit_sz <= _buffer_sz);
if (dst_idx >= _buffer_sz) { if (dst_idx >= _buffer_sz) {
dst_idx -= _buffer_sz; dst_idx -= _buffer_sz;
I(dom, _next >= unit_sz); I(sched, _next >= unit_sz);
I(dom, dst_idx <= _next - unit_sz); I(sched, dst_idx <= _next - unit_sz);
} }
I(dom, dst_idx + unit_sz <= _buffer_sz); I(sched, dst_idx + unit_sz <= _buffer_sz);
memcpy(&_buffer[dst_idx], src, unit_sz); memcpy(&_buffer[dst_idx], src, unit_sz);
_unread += unit_sz; _unread += unit_sz;
DLOG(dom, mem, "circular_buffer pushed data at index: %d", dst_idx); DLOG(sched, mem, "circular_buffer pushed data at index: %d", dst_idx);
} }
/** /**
@ -110,21 +110,21 @@ circular_buffer::enqueue(void *src) {
*/ */
void void
circular_buffer::dequeue(void *dst) { circular_buffer::dequeue(void *dst) {
I(dom, unit_sz > 0); I(sched, unit_sz > 0);
I(dom, _unread >= unit_sz); I(sched, _unread >= unit_sz);
I(dom, _unread <= _buffer_sz); I(sched, _unread <= _buffer_sz);
I(dom, _buffer); I(sched, _buffer);
DLOG(dom, mem, DLOG(sched, mem,
"circular_buffer dequeue " "circular_buffer dequeue "
"unread: %d, next: %d, buffer_sz: %d, unit_sz: %d", "unread: %d, next: %d, buffer_sz: %d, unit_sz: %d",
_unread, _next, _buffer_sz, unit_sz); _unread, _next, _buffer_sz, unit_sz);
I(dom, _next + unit_sz <= _buffer_sz); I(sched, _next + unit_sz <= _buffer_sz);
if (dst != NULL) { if (dst != NULL) {
memcpy(dst, &_buffer[_next], unit_sz); memcpy(dst, &_buffer[_next], unit_sz);
} }
DLOG(dom, mem, "shifted data from index %d", _next); DLOG(sched, mem, "shifted data from index %d", _next);
_unread -= unit_sz; _unread -= unit_sz;
_next += unit_sz; _next += unit_sz;
if (_next == _buffer_sz) { if (_next == _buffer_sz) {
@ -140,8 +140,8 @@ circular_buffer::dequeue(void *dst) {
void void
circular_buffer::grow() { circular_buffer::grow() {
size_t new_buffer_sz = _buffer_sz * 2; size_t new_buffer_sz = _buffer_sz * 2;
I(dom, new_buffer_sz <= MAX_CIRCULAR_BUFFER_SIZE); I(sched, new_buffer_sz <= MAX_CIRCULAR_BUFFER_SIZE);
DLOG(dom, mem, "circular_buffer is growing to %d bytes", new_buffer_sz); DLOG(sched, mem, "circular_buffer is growing to %d bytes", new_buffer_sz);
void *new_buffer = task->malloc(new_buffer_sz); void *new_buffer = task->malloc(new_buffer_sz);
transfer(new_buffer); transfer(new_buffer);
task->free(_buffer); task->free(_buffer);
@ -153,8 +153,8 @@ circular_buffer::grow() {
void void
circular_buffer::shrink() { circular_buffer::shrink() {
size_t new_buffer_sz = _buffer_sz / 2; size_t new_buffer_sz = _buffer_sz / 2;
I(dom, initial_size() <= new_buffer_sz); I(sched, initial_size() <= new_buffer_sz);
DLOG(dom, mem, "circular_buffer is shrinking to %d bytes", new_buffer_sz); DLOG(sched, mem, "circular_buffer is shrinking to %d bytes", new_buffer_sz);
void *new_buffer = task->malloc(new_buffer_sz); void *new_buffer = task->malloc(new_buffer_sz);
transfer(new_buffer); transfer(new_buffer);
task->free(_buffer); task->free(_buffer);

View file

@ -10,7 +10,7 @@ circular_buffer : public task_owned<circular_buffer> {
static const size_t INITIAL_CIRCULAR_BUFFER_SIZE_IN_UNITS = 8; static const size_t INITIAL_CIRCULAR_BUFFER_SIZE_IN_UNITS = 8;
static const size_t MAX_CIRCULAR_BUFFER_SIZE = 1 << 24; static const size_t MAX_CIRCULAR_BUFFER_SIZE = 1 << 24;
rust_dom *dom; rust_scheduler *sched;
public: public:
rust_task *task; rust_task *task;

View file

@ -39,13 +39,13 @@ command_line_args : public kernel_owned<command_line_args>
size_t vec_fill = sizeof(rust_str *) * argc; size_t vec_fill = sizeof(rust_str *) * argc;
size_t vec_alloc = next_power_of_two(sizeof(rust_vec) + vec_fill); size_t vec_alloc = next_power_of_two(sizeof(rust_vec) + vec_fill);
void *mem = kernel->malloc(vec_alloc); void *mem = kernel->malloc(vec_alloc);
args = new (mem) rust_vec(task->dom, vec_alloc, 0, NULL); args = new (mem) rust_vec(task->sched, vec_alloc, 0, NULL);
rust_str **strs = (rust_str**) &args->data[0]; rust_str **strs = (rust_str**) &args->data[0];
for (int i = 0; i < argc; ++i) { for (int i = 0; i < argc; ++i) {
size_t str_fill = strlen(argv[i]) + 1; size_t str_fill = strlen(argv[i]) + 1;
size_t str_alloc = next_power_of_two(sizeof(rust_str) + str_fill); size_t str_alloc = next_power_of_two(sizeof(rust_str) + str_fill);
mem = kernel->malloc(str_alloc); mem = kernel->malloc(str_alloc);
strs[i] = new (mem) rust_str(task->dom, str_alloc, str_fill, strs[i] = new (mem) rust_str(task->sched, str_alloc, str_fill,
(uint8_t const *)argv[i]); (uint8_t const *)argv[i]);
} }
args->fill = vec_fill; args->fill = vec_fill;
@ -98,21 +98,21 @@ rust_start(uintptr_t main_fn, int argc, char **argv, void* crate_map) {
rust_srv *srv = new rust_srv(); rust_srv *srv = new rust_srv();
rust_kernel *kernel = new rust_kernel(srv); rust_kernel *kernel = new rust_kernel(srv);
kernel->start(); kernel->start();
rust_dom *dom = kernel->get_domain(); rust_scheduler *sched = kernel->get_scheduler();
command_line_args *args command_line_args *args
= new (kernel) command_line_args(dom->root_task, argc, argv); = new (kernel) command_line_args(sched->root_task, argc, argv);
DLOG(dom, dom, "startup: %d args in 0x%" PRIxPTR, DLOG(sched, dom, "startup: %d args in 0x%" PRIxPTR,
args->argc, (uintptr_t)args->args); args->argc, (uintptr_t)args->args);
for (int i = 0; i < args->argc; i++) { for (int i = 0; i < args->argc; i++) {
DLOG(dom, dom, "startup: arg[%d] = '%s'", i, args->argv[i]); DLOG(sched, dom, "startup: arg[%d] = '%s'", i, args->argv[i]);
} }
dom->root_task->start(main_fn, (uintptr_t)args->args); sched->root_task->start(main_fn, (uintptr_t)args->args);
int num_threads = get_num_threads(); int num_threads = get_num_threads();
DLOG(dom, dom, "Using %d worker threads.", num_threads); DLOG(sched, dom, "Using %d worker threads.", num_threads);
int ret = kernel->start_task_threads(num_threads); int ret = kernel->start_task_threads(num_threads);
delete args; delete args;

View file

@ -9,7 +9,7 @@
extern "C" CDECL rust_str* extern "C" CDECL rust_str*
last_os_error(rust_task *task) { last_os_error(rust_task *task) {
rust_dom *dom = task->dom; rust_scheduler *sched = task->sched;
LOG(task, task, "last_os_error()"); LOG(task, task, "last_os_error()");
#if defined(__WIN32__) #if defined(__WIN32__)
@ -47,7 +47,7 @@ last_os_error(rust_task *task) {
task->fail(1); task->fail(1);
return NULL; return NULL;
} }
rust_str *st = new (mem) rust_str(dom, alloc, fill, (const uint8_t *)buf); rust_str *st = new (mem) rust_str(sched, alloc, fill, (const uint8_t *)buf);
#ifdef __WIN32__ #ifdef __WIN32__
LocalFree((HLOCAL)buf); LocalFree((HLOCAL)buf);
@ -57,7 +57,7 @@ last_os_error(rust_task *task) {
extern "C" CDECL rust_str * extern "C" CDECL rust_str *
rust_getcwd(rust_task *task) { rust_getcwd(rust_task *task) {
rust_dom *dom = task->dom; rust_scheduler *sched = task->sched;
LOG(task, task, "rust_getcwd()"); LOG(task, task, "rust_getcwd()");
char cbuf[BUF_BYTES]; char cbuf[BUF_BYTES];
@ -80,7 +80,7 @@ rust_getcwd(rust_task *task) {
} }
rust_str *st; rust_str *st;
st = new (mem) rust_str(dom, alloc, fill, (const uint8_t *)cbuf); st = new (mem) rust_str(sched, alloc, fill, (const uint8_t *)cbuf);
return st; return st;
} }
@ -124,7 +124,7 @@ unsupervise(rust_task *task) {
extern "C" CDECL rust_vec* extern "C" CDECL rust_vec*
vec_alloc(rust_task *task, type_desc *t, type_desc *elem_t, size_t n_elts) vec_alloc(rust_task *task, type_desc *t, type_desc *elem_t, size_t n_elts)
{ {
rust_dom *dom = task->dom; rust_scheduler *sched = task->sched;
LOG(task, mem, "vec_alloc %" PRIdPTR " elements of size %" PRIdPTR, LOG(task, mem, "vec_alloc %" PRIdPTR " elements of size %" PRIdPTR,
n_elts, elem_t->size); n_elts, elem_t->size);
size_t fill = n_elts * elem_t->size; size_t fill = n_elts * elem_t->size;
@ -134,7 +134,7 @@ vec_alloc(rust_task *task, type_desc *t, type_desc *elem_t, size_t n_elts)
task->fail(4); task->fail(4);
return NULL; return NULL;
} }
rust_vec *vec = new (mem) rust_vec(dom, alloc, 0, NULL); rust_vec *vec = new (mem) rust_vec(sched, alloc, 0, NULL);
return vec; return vec;
} }
@ -198,11 +198,11 @@ vec_alloc_with_data(rust_task *task,
size_t elt_size, size_t elt_size,
void *d) void *d)
{ {
rust_dom *dom = task->dom; rust_scheduler *sched = task->sched;
size_t alloc = next_power_of_two(sizeof(rust_vec) + (n_elts * elt_size)); size_t alloc = next_power_of_two(sizeof(rust_vec) + (n_elts * elt_size));
void *mem = task->malloc(alloc, memory_region::LOCAL); void *mem = task->malloc(alloc, memory_region::LOCAL);
if (!mem) return NULL; if (!mem) return NULL;
return new (mem) rust_vec(dom, alloc, fill * elt_size, (uint8_t*)d); return new (mem) rust_vec(sched, alloc, fill * elt_size, (uint8_t*)d);
} }
extern "C" CDECL rust_vec* extern "C" CDECL rust_vec*
@ -355,13 +355,13 @@ str_from_buf(rust_task *task, char *buf, unsigned int len) {
extern "C" CDECL void * extern "C" CDECL void *
rand_new(rust_task *task) rand_new(rust_task *task)
{ {
rust_dom *dom = task->dom; rust_scheduler *sched = task->sched;
randctx *rctx = (randctx *) task->malloc(sizeof(randctx)); randctx *rctx = (randctx *) task->malloc(sizeof(randctx));
if (!rctx) { if (!rctx) {
task->fail(1); task->fail(1);
return NULL; return NULL;
} }
isaac_init(dom, rctx); isaac_init(sched, rctx);
return rctx; return rctx;
} }

View file

@ -22,7 +22,7 @@ rust_chan::rust_chan(rust_task *task,
rust_chan::~rust_chan() { rust_chan::~rust_chan() {
LOG(task, comm, "del rust_chan(task=0x%" PRIxPTR ")", (uintptr_t) this); LOG(task, comm, "del rust_chan(task=0x%" PRIxPTR ")", (uintptr_t) this);
A(task->dom, is_associated() == false, A(task->sched, is_associated() == false,
"Channel must be disassociated before being freed."); "Channel must be disassociated before being freed.");
--task->ref_count; --task->ref_count;
} }
@ -49,7 +49,7 @@ bool rust_chan::is_associated() {
* Unlink this channel from its associated port. * Unlink this channel from its associated port.
*/ */
void rust_chan::disassociate() { void rust_chan::disassociate() {
A(task->dom, is_associated(), "Channel must be associated with a port."); A(task->sched, is_associated(), "Channel must be associated with a port.");
if (port->is_proxy() == false) { if (port->is_proxy() == false) {
LOG(task, task, LOG(task, task,
@ -69,14 +69,14 @@ void rust_chan::disassociate() {
void rust_chan::send(void *sptr) { void rust_chan::send(void *sptr) {
buffer.enqueue(sptr); buffer.enqueue(sptr);
rust_dom *dom = task->dom; rust_scheduler *sched = task->sched;
if (!is_associated()) { if (!is_associated()) {
W(dom, is_associated(), W(sched, is_associated(),
"rust_chan::transmit with no associated port."); "rust_chan::transmit with no associated port.");
return; return;
} }
A(dom, !buffer.is_empty(), A(sched, !buffer.is_empty(),
"rust_chan::transmit with nothing to send."); "rust_chan::transmit with nothing to send.");
if (port->is_proxy()) { if (port->is_proxy()) {
@ -86,7 +86,7 @@ void rust_chan::send(void *sptr) {
} else { } else {
rust_port *target_port = port->referent(); rust_port *target_port = port->referent();
if (target_port->task->blocked_on(target_port)) { if (target_port->task->blocked_on(target_port)) {
DLOG(dom, comm, "dequeued in rendezvous_ptr"); DLOG(sched, comm, "dequeued in rendezvous_ptr");
buffer.dequeue(target_port->task->rendezvous_ptr); buffer.dequeue(target_port->task->rendezvous_ptr);
target_port->task->rendezvous_ptr = 0; target_port->task->rendezvous_ptr = 0;
target_port->task->wakeup(target_port); target_port->task->wakeup(target_port);

View file

@ -7,16 +7,16 @@ rust_crate_cache::get_type_desc(size_t size,
size_t n_descs, size_t n_descs,
type_desc const **descs) type_desc const **descs)
{ {
I(dom, n_descs > 1); I(sched, n_descs > 1);
type_desc *td = NULL; type_desc *td = NULL;
size_t keysz = n_descs * sizeof(type_desc*); size_t keysz = n_descs * sizeof(type_desc*);
HASH_FIND(hh, this->type_descs, descs, keysz, td); HASH_FIND(hh, this->type_descs, descs, keysz, td);
if (td) { if (td) {
DLOG(dom, cache, "rust_crate_cache::get_type_desc hit"); DLOG(sched, cache, "rust_crate_cache::get_type_desc hit");
return td; return td;
} }
DLOG(dom, cache, "rust_crate_cache::get_type_desc miss"); DLOG(sched, cache, "rust_crate_cache::get_type_desc miss");
td = (type_desc*) dom->kernel->malloc(sizeof(type_desc) + keysz); td = (type_desc*) sched->kernel->malloc(sizeof(type_desc) + keysz);
if (!td) if (!td)
return NULL; return NULL;
// By convention, desc 0 is the root descriptor. // By convention, desc 0 is the root descriptor.
@ -27,7 +27,7 @@ rust_crate_cache::get_type_desc(size_t size,
td->size = size; td->size = size;
td->align = align; td->align = align;
for (size_t i = 0; i < n_descs; ++i) { for (size_t i = 0; i < n_descs; ++i) {
DLOG(dom, cache, DLOG(sched, cache,
"rust_crate_cache::descs[%" PRIdPTR "] = 0x%" PRIxPTR, "rust_crate_cache::descs[%" PRIdPTR "] = 0x%" PRIxPTR,
i, descs[i]); i, descs[i]);
td->descs[i] = descs[i]; td->descs[i] = descs[i];
@ -38,22 +38,22 @@ rust_crate_cache::get_type_desc(size_t size,
return td; return td;
} }
rust_crate_cache::rust_crate_cache(rust_dom *dom) rust_crate_cache::rust_crate_cache(rust_scheduler *sched)
: type_descs(NULL), : type_descs(NULL),
dom(dom), sched(sched),
idx(0) idx(0)
{ {
} }
void void
rust_crate_cache::flush() { rust_crate_cache::flush() {
DLOG(dom, cache, "rust_crate_cache::flush()"); DLOG(sched, cache, "rust_crate_cache::flush()");
while (type_descs) { while (type_descs) {
type_desc *d = type_descs; type_desc *d = type_descs;
HASH_DEL(type_descs, d); HASH_DEL(type_descs, d);
DLOG(dom, mem, "rust_crate_cache::flush() tydesc %" PRIxPTR, d); DLOG(sched, mem, "rust_crate_cache::flush() tydesc %" PRIxPTR, d);
dom->kernel->free(d); sched->kernel->free(d);
} }
} }

View file

@ -50,7 +50,7 @@ extern "C" {
#include "sync/lock_and_signal.h" #include "sync/lock_and_signal.h"
#include "sync/lock_free_queue.h" #include "sync/lock_free_queue.h"
struct rust_dom; struct rust_scheduler;
struct rust_task; struct rust_task;
class rust_log; class rust_log;
class rust_port; class rust_port;
@ -174,7 +174,7 @@ public:
#include "rust_proxy.h" #include "rust_proxy.h"
#include "rust_kernel.h" #include "rust_kernel.h"
#include "rust_message.h" #include "rust_message.h"
#include "rust_dom.h" #include "rust_scheduler.h"
struct rust_timer { struct rust_timer {
// FIXME: This will probably eventually need replacement // FIXME: This will probably eventually need replacement
@ -183,7 +183,7 @@ struct rust_timer {
// For now it's just the most basic "thread that can interrupt // For now it's just the most basic "thread that can interrupt
// its associated domain-thread" device, so that we have // its associated domain-thread" device, so that we have
// *some* form of task-preemption. // *some* form of task-preemption.
rust_dom *dom; rust_scheduler *sched;
uintptr_t exit_flag; uintptr_t exit_flag;
#if defined(__WIN32__) #if defined(__WIN32__)
@ -193,7 +193,7 @@ struct rust_timer {
pthread_t thread; pthread_t thread;
#endif #endif
rust_timer(rust_dom *dom); rust_timer(rust_scheduler *sched);
~rust_timer(); ~rust_timer();
}; };

View file

@ -13,55 +13,55 @@ rust_kernel::rust_kernel(rust_srv *srv) :
_srv(srv), _srv(srv),
_interrupt_kernel_loop(FALSE) _interrupt_kernel_loop(FALSE)
{ {
dom = create_domain("main"); sched = create_scheduler("main");
} }
rust_dom * rust_scheduler *
rust_kernel::create_domain(const char *name) { rust_kernel::create_scheduler(const char *name) {
_kernel_lock.lock(); _kernel_lock.lock();
rust_message_queue *message_queue = rust_message_queue *message_queue =
new (this) rust_message_queue(_srv, this); new (this) rust_message_queue(_srv, this);
rust_srv *srv = _srv->clone(); rust_srv *srv = _srv->clone();
rust_dom *dom = rust_scheduler *sched =
new (this) rust_dom(this, message_queue, srv, name); new (this) rust_scheduler(this, message_queue, srv, name);
rust_handle<rust_dom> *handle = internal_get_dom_handle(dom); rust_handle<rust_scheduler> *handle = internal_get_sched_handle(sched);
message_queue->associate(handle); message_queue->associate(handle);
message_queues.append(message_queue); message_queues.append(message_queue);
KLOG("created domain: " PTR ", name: %s, index: %d", KLOG("created scheduler: " PTR ", name: %s, index: %d",
dom, name, dom->list_index); sched, name, sched->list_index);
_kernel_lock.signal_all(); _kernel_lock.signal_all();
_kernel_lock.unlock(); _kernel_lock.unlock();
return dom; return sched;
} }
void void
rust_kernel::destroy_domain() { rust_kernel::destroy_scheduler() {
_kernel_lock.lock(); _kernel_lock.lock();
KLOG("deleting domain: " PTR ", name: %s, index: %d", KLOG("deleting scheduler: " PTR ", name: %s, index: %d",
dom, dom->name, dom->list_index); sched, sched->name, sched->list_index);
dom->message_queue->disassociate(); sched->message_queue->disassociate();
rust_srv *srv = dom->srv; rust_srv *srv = sched->srv;
delete dom; delete sched;
delete srv; delete srv;
_kernel_lock.signal_all(); _kernel_lock.signal_all();
_kernel_lock.unlock(); _kernel_lock.unlock();
} }
rust_handle<rust_dom> * rust_handle<rust_scheduler> *
rust_kernel::internal_get_dom_handle(rust_dom *dom) { rust_kernel::internal_get_sched_handle(rust_scheduler *sched) {
rust_handle<rust_dom> *handle = NULL; rust_handle<rust_scheduler> *handle = NULL;
if (_dom_handles.get(dom, &handle) == false) { if (_sched_handles.get(sched, &handle) == false) {
handle = handle =
new (this) rust_handle<rust_dom>(this, dom->message_queue, dom); new (this) rust_handle<rust_scheduler>(this, sched->message_queue, sched);
_dom_handles.put(dom, handle); _sched_handles.put(sched, handle);
} }
return handle; return handle;
} }
rust_handle<rust_dom> * rust_handle<rust_scheduler> *
rust_kernel::get_dom_handle(rust_dom *dom) { rust_kernel::get_sched_handle(rust_scheduler *sched) {
_kernel_lock.lock(); _kernel_lock.lock();
rust_handle<rust_dom> *handle = internal_get_dom_handle(dom); rust_handle<rust_scheduler> *handle = internal_get_sched_handle(sched);
_kernel_lock.unlock(); _kernel_lock.unlock();
return handle; return handle;
} }
@ -72,7 +72,7 @@ rust_kernel::get_task_handle(rust_task *task) {
rust_handle<rust_task> *handle = NULL; rust_handle<rust_task> *handle = NULL;
if (_task_handles.get(task, &handle) == false) { if (_task_handles.get(task, &handle) == false) {
handle = handle =
new (this) rust_handle<rust_task>(this, task->dom->message_queue, new (this) rust_handle<rust_task>(this, task->sched->message_queue,
task); task);
_task_handles.put(task, handle); _task_handles.put(task, handle);
} }
@ -87,7 +87,7 @@ rust_kernel::get_port_handle(rust_port *port) {
if (_port_handles.get(port, &handle) == false) { if (_port_handles.get(port, &handle) == false) {
handle = handle =
new (this) rust_handle<rust_port>(this, new (this) rust_handle<rust_port>(this,
port->task->dom->message_queue, port->task->sched->message_queue,
port); port);
_port_handles.put(port, handle); _port_handles.put(port, handle);
} }
@ -96,9 +96,8 @@ rust_kernel::get_port_handle(rust_port *port) {
} }
void void
rust_kernel::log_all_domain_state() { rust_kernel::log_all_scheduler_state() {
KLOG("log_all_domain_state"); sched->log_state();
dom->log_state();
} }
/** /**
@ -159,7 +158,7 @@ rust_kernel::terminate_kernel_loop() {
} }
rust_kernel::~rust_kernel() { rust_kernel::~rust_kernel() {
destroy_domain(); destroy_scheduler();
terminate_kernel_loop(); terminate_kernel_loop();
@ -175,8 +174,8 @@ rust_kernel::~rust_kernel() {
KLOG("..task handles freed"); KLOG("..task handles freed");
free_handles(_port_handles); free_handles(_port_handles);
KLOG("..port handles freed"); KLOG("..port handles freed");
free_handles(_dom_handles); free_handles(_sched_handles);
KLOG("..dom handles freed"); KLOG("..sched handles freed");
KLOG("freeing queues"); KLOG("freeing queues");
@ -235,14 +234,14 @@ int rust_kernel::start_task_threads(int num_threads)
threads.push(thread); threads.push(thread);
} }
dom->start_main_loop(0); sched->start_main_loop(0);
while(threads.pop(&thread)) { while(threads.pop(&thread)) {
thread->join(); thread->join();
delete thread; delete thread;
} }
return dom->rval; return sched->rval;
} }
#ifdef __WIN32__ #ifdef __WIN32__
@ -257,9 +256,9 @@ rust_kernel::win32_require(LPCTSTR fn, BOOL ok) {
NULL, err, NULL, err,
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
(LPTSTR) &buf, 0, NULL ); (LPTSTR) &buf, 0, NULL );
DLOG_ERR(dom, dom, "%s failed with error %ld: %s", fn, err, buf); DLOG_ERR(sched, dom, "%s failed with error %ld: %s", fn, err, buf);
LocalFree((HLOCAL)buf); LocalFree((HLOCAL)buf);
I(dom, ok); I(sched, ok);
} }
} }
#endif #endif
@ -271,7 +270,7 @@ rust_task_thread::rust_task_thread(int id, rust_kernel *owner)
void rust_task_thread::run() void rust_task_thread::run()
{ {
owner->dom->start_main_loop(id); owner->sched->start_main_loop(id);
} }
// //

View file

@ -52,7 +52,7 @@ class rust_kernel : public rust_thread {
*/ */
hash_map<rust_task *, rust_handle<rust_task> *> _task_handles; hash_map<rust_task *, rust_handle<rust_task> *> _task_handles;
hash_map<rust_port *, rust_handle<rust_port> *> _port_handles; hash_map<rust_port *, rust_handle<rust_port> *> _port_handles;
hash_map<rust_dom *, rust_handle<rust_dom> *> _dom_handles; hash_map<rust_scheduler *, rust_handle<rust_scheduler> *> _sched_handles;
template<class T> void free_handles(hash_map<T*, rust_handle<T>* > &map); template<class T> void free_handles(hash_map<T*, rust_handle<T>* > &map);
@ -65,15 +65,16 @@ class rust_kernel : public rust_thread {
void terminate_kernel_loop(); void terminate_kernel_loop();
void pump_message_queues(); void pump_message_queues();
rust_handle<rust_dom> *internal_get_dom_handle(rust_dom *dom); rust_handle<rust_scheduler> *
internal_get_sched_handle(rust_scheduler *sched);
rust_dom *create_domain(const char *name); rust_scheduler *create_scheduler(const char *name);
void destroy_domain(); void destroy_scheduler();
array_list<rust_task_thread *> threads; array_list<rust_task_thread *> threads;
public: public:
rust_dom *dom; rust_scheduler *sched;
lock_and_signal scheduler_lock; lock_and_signal scheduler_lock;
/** /**
@ -85,7 +86,7 @@ public:
*/ */
indexed_list<rust_message_queue> message_queues; indexed_list<rust_message_queue> message_queues;
rust_handle<rust_dom> *get_dom_handle(rust_dom *dom); rust_handle<rust_scheduler> *get_sched_handle(rust_scheduler *sched);
rust_handle<rust_task> *get_task_handle(rust_task *task); rust_handle<rust_task> *get_task_handle(rust_task *task);
rust_handle<rust_port> *get_port_handle(rust_port *port); rust_handle<rust_port> *get_port_handle(rust_port *port);
@ -103,7 +104,7 @@ public:
void void
notify_message_enqueued(rust_message_queue *queue, rust_message *message); notify_message_enqueued(rust_message_queue *queue, rust_message *message);
void log_all_domain_state(); void log_all_scheduler_state();
void log(uint32_t level, char const *fmt, ...); void log(uint32_t level, char const *fmt, ...);
virtual ~rust_kernel(); virtual ~rust_kernel();
@ -111,7 +112,7 @@ public:
void free(void *mem); void free(void *mem);
// FIXME: this should go away // FIXME: this should go away
inline rust_dom *get_domain() const { return dom; } inline rust_scheduler *get_scheduler() const { return sched; }
int start_task_threads(int num_threads); int start_task_threads(int num_threads);

View file

@ -23,9 +23,9 @@ static const char * _foreground_colors[] = { "[37m",
static lock_and_signal _log_lock; static lock_and_signal _log_lock;
static uint32_t _last_thread_id; static uint32_t _last_thread_id;
rust_log::rust_log(rust_srv *srv, rust_dom *dom) : rust_log::rust_log(rust_srv *srv, rust_scheduler *sched) :
_srv(srv), _srv(srv),
_dom(dom), _sched(sched),
_use_colors(getenv("RUST_COLOR_LOG")) { _use_colors(getenv("RUST_COLOR_LOG")) {
} }
@ -104,12 +104,12 @@ rust_log::trace_ln(rust_task *task, uint32_t level, char *message) {
uint32_t thread_id = hash((uint32_t) pthread_self()); uint32_t thread_id = hash((uint32_t) pthread_self());
#endif #endif
char prefix[BUF_BYTES] = ""; char prefix[BUF_BYTES] = "";
if (_dom && _dom->name) { if (_sched && _sched->name) {
append_string(prefix, "%04" PRIxPTR ":%.10s:", append_string(prefix, "%04" PRIxPTR ":%.10s:",
thread_id, _dom->name); thread_id, _sched->name);
} else { } else {
append_string(prefix, "%04" PRIxPTR ":0x%08" PRIxPTR ":", append_string(prefix, "%04" PRIxPTR ":0x%08" PRIxPTR ":",
thread_id, (uintptr_t) _dom); thread_id, (uintptr_t) _sched);
} }
if (task) { if (task) {
if (task->name) { if (task->name) {

View file

@ -1,3 +1,4 @@
// -*- c++ -*-
#ifndef RUST_LOG_H #ifndef RUST_LOG_H
#define RUST_LOG_H #define RUST_LOG_H
@ -5,30 +6,30 @@ const uint32_t log_err = 0;
const uint32_t log_note = 1; const uint32_t log_note = 1;
#define LOG(task, field, ...) \ #define LOG(task, field, ...) \
DLOG_LVL(log_note, task, task->dom, field, __VA_ARGS__) DLOG_LVL(log_note, task, task->sched, field, __VA_ARGS__)
#define LOG_ERR(task, field, ...) \ #define LOG_ERR(task, field, ...) \
DLOG_LVL(log_err, task, task->dom, field, __VA_ARGS__) DLOG_LVL(log_err, task, task->sched, field, __VA_ARGS__)
#define DLOG(dom, field, ...) \ #define DLOG(sched, field, ...) \
DLOG_LVL(log_note, NULL, dom, field, __VA_ARGS__) DLOG_LVL(log_note, NULL, sched, field, __VA_ARGS__)
#define DLOG_ERR(dom, field, ...) \ #define DLOG_ERR(sched, field, ...) \
DLOG_LVL(log_err, NULL, dom, field, __VA_ARGS__) DLOG_LVL(log_err, NULL, sched, field, __VA_ARGS__)
#define LOGPTR(dom, msg, ptrval) \ #define LOGPTR(sched, msg, ptrval) \
DLOG_LVL(log_note, NULL, dom, mem, "%s 0x%" PRIxPTR, msg, ptrval) DLOG_LVL(log_note, NULL, sched, mem, "%s 0x%" PRIxPTR, msg, ptrval)
#define DLOG_LVL(lvl, task, dom, field, ...) \ #define DLOG_LVL(lvl, task, sched, field, ...) \
do { \ do { \
rust_dom* _d_ = dom; \ rust_scheduler* _d_ = sched; \
if (log_rt_##field >= lvl && _d_->log_lvl >= lvl) { \ if (log_rt_##field >= lvl && _d_->log_lvl >= lvl) { \
_d_->log(task, lvl, __VA_ARGS__); \ _d_->log(task, lvl, __VA_ARGS__); \
} \ } \
} while (0) } while (0)
struct rust_dom; struct rust_scheduler;
struct rust_task; struct rust_task;
class rust_log { class rust_log {
public: public:
rust_log(rust_srv *srv, rust_dom *dom); rust_log(rust_srv *srv, rust_scheduler *sched);
virtual ~rust_log(); virtual ~rust_log();
enum ansi_color { enum ansi_color {
@ -53,7 +54,7 @@ public:
private: private:
rust_srv *_srv; rust_srv *_srv;
rust_dom *_dom; rust_scheduler *_sched;
bool _use_labels; bool _use_labels;
bool _use_colors; bool _use_colors;
void trace_ln(rust_task *task, char *message); void trace_ln(rust_task *task, char *message);

View file

@ -112,7 +112,7 @@ void data_message::kernel_process() {
rust_message_queue::rust_message_queue(rust_srv *srv, rust_kernel *kernel) rust_message_queue::rust_message_queue(rust_srv *srv, rust_kernel *kernel)
: region(srv, true), : region(srv, true),
kernel(kernel), kernel(kernel),
dom_handle(NULL) { sched_handle(NULL) {
// Nop. // Nop.
} }

View file

@ -93,26 +93,26 @@ class rust_message_queue : public lock_free_queue<rust_message*>,
public: public:
memory_region region; memory_region region;
rust_kernel *kernel; rust_kernel *kernel;
rust_handle<rust_dom> *dom_handle; rust_handle<rust_scheduler> *sched_handle;
int32_t list_index; int32_t list_index;
rust_message_queue(rust_srv *srv, rust_kernel *kernel); rust_message_queue(rust_srv *srv, rust_kernel *kernel);
void associate(rust_handle<rust_dom> *dom_handle) { void associate(rust_handle<rust_scheduler> *sched_handle) {
this->dom_handle = dom_handle; this->sched_handle = sched_handle;
} }
/** /**
* The Rust domain relinquishes control to the Rust kernel. * The Rust domain relinquishes control to the Rust kernel.
*/ */
void disassociate() { void disassociate() {
this->dom_handle = NULL; this->sched_handle = NULL;
} }
/** /**
* Checks if a Rust domain is responsible for draining the message queue. * Checks if a Rust domain is responsible for draining the message queue.
*/ */
bool is_associated() { bool is_associated() {
return this->dom_handle != NULL; return this->sched_handle != NULL;
} }
void enqueue(rust_message* message) { void enqueue(rust_message* message) {

View file

@ -3,7 +3,7 @@
#include "rust_internal.h" #include "rust_internal.h"
#include "globals.h" #include "globals.h"
rust_dom::rust_dom(rust_kernel *kernel, rust_scheduler::rust_scheduler(rust_kernel *kernel,
rust_message_queue *message_queue, rust_srv *srv, rust_message_queue *message_queue, rust_srv *srv,
const char *name) : const char *name) :
interrupt_flag(0), interrupt_flag(0),
@ -32,8 +32,8 @@ rust_dom::rust_dom(rust_kernel *kernel,
root_task = create_task(NULL, name); root_task = create_task(NULL, name);
} }
rust_dom::~rust_dom() { rust_scheduler::~rust_scheduler() {
DLOG(this, dom, "~rust_dom %s @0x%" PRIxPTR, name, (uintptr_t)this); DLOG(this, dom, "~rust_scheduler %s @0x%" PRIxPTR, name, (uintptr_t)this);
newborn_tasks.delete_all(); newborn_tasks.delete_all();
running_tasks.delete_all(); running_tasks.delete_all();
@ -45,7 +45,7 @@ rust_dom::~rust_dom() {
} }
void void
rust_dom::activate(rust_task *task) { rust_scheduler::activate(rust_task *task) {
context ctx; context ctx;
task->ctx.next = &ctx; task->ctx.next = &ctx;
@ -57,7 +57,7 @@ rust_dom::activate(rust_task *task) {
} }
void void
rust_dom::log(rust_task* task, uint32_t level, char const *fmt, ...) { rust_scheduler::log(rust_task* task, uint32_t level, char const *fmt, ...) {
char buf[BUF_BYTES]; char buf[BUF_BYTES];
va_list args; va_list args;
va_start(args, fmt); va_start(args, fmt);
@ -67,7 +67,7 @@ rust_dom::log(rust_task* task, uint32_t level, char const *fmt, ...) {
} }
void void
rust_dom::fail() { rust_scheduler::fail() {
log(NULL, log_err, "domain %s @0x%" PRIxPTR " root task failed", log(NULL, log_err, "domain %s @0x%" PRIxPTR " root task failed",
name, this); name, this);
I(this, rval == 0); I(this, rval == 0);
@ -75,7 +75,7 @@ rust_dom::fail() {
} }
size_t size_t
rust_dom::number_of_live_tasks() { rust_scheduler::number_of_live_tasks() {
return running_tasks.length() + blocked_tasks.length(); return running_tasks.length() + blocked_tasks.length();
} }
@ -83,7 +83,7 @@ rust_dom::number_of_live_tasks() {
* Delete any dead tasks. * Delete any dead tasks.
*/ */
void void
rust_dom::reap_dead_tasks() { rust_scheduler::reap_dead_tasks() {
I(this, kernel->scheduler_lock.lock_held_by_current_thread()); I(this, kernel->scheduler_lock.lock_held_by_current_thread());
for (size_t i = 0; i < dead_tasks.length(); ) { for (size_t i = 0; i < dead_tasks.length(); ) {
rust_task *task = dead_tasks[i]; rust_task *task = dead_tasks[i];
@ -104,7 +104,7 @@ rust_dom::reap_dead_tasks() {
/** /**
* Drains and processes incoming pending messages. * Drains and processes incoming pending messages.
*/ */
void rust_dom::drain_incoming_message_queue(bool process) { void rust_scheduler::drain_incoming_message_queue(bool process) {
rust_message *message; rust_message *message;
while (message_queue->dequeue(&message)) { while (message_queue->dequeue(&message)) {
DLOG(this, comm, "<== receiving \"%s\" " PTR, DLOG(this, comm, "<== receiving \"%s\" " PTR,
@ -124,7 +124,7 @@ void rust_dom::drain_incoming_message_queue(bool process) {
* Returns NULL if no tasks can be scheduled. * Returns NULL if no tasks can be scheduled.
*/ */
rust_task * rust_task *
rust_dom::schedule_task() { rust_scheduler::schedule_task() {
I(this, this); I(this, this);
// FIXME: in the face of failing tasks, this is not always right. // FIXME: in the face of failing tasks, this is not always right.
// I(this, n_live_tasks() > 0); // I(this, n_live_tasks() > 0);
@ -142,7 +142,7 @@ rust_dom::schedule_task() {
} }
void void
rust_dom::log_state() { rust_scheduler::log_state() {
if (log_rt_task < log_note) return; if (log_rt_task < log_note) return;
if (!running_tasks.is_empty()) { if (!running_tasks.is_empty()) {
@ -182,7 +182,7 @@ rust_dom::log_state() {
* drop to zero. * drop to zero.
*/ */
int int
rust_dom::start_main_loop(int id) { rust_scheduler::start_main_loop(int id) {
kernel->scheduler_lock.lock(); kernel->scheduler_lock.lock();
// Make sure someone is watching, to pull us out of infinite loops. // Make sure someone is watching, to pull us out of infinite loops.
@ -282,12 +282,12 @@ rust_dom::start_main_loop(int id) {
} }
rust_crate_cache * rust_crate_cache *
rust_dom::get_cache() { rust_scheduler::get_cache() {
return &cache; return &cache;
} }
rust_task * rust_task *
rust_dom::create_task(rust_task *spawner, const char *name) { rust_scheduler::create_task(rust_task *spawner, const char *name) {
rust_task *task = rust_task *task =
new (this->kernel) rust_task (this, &newborn_tasks, spawner, name); new (this->kernel) rust_task (this, &newborn_tasks, spawner, name);
DLOG(this, task, "created task: " PTR ", spawner: %s, name: %s", DLOG(this, task, "created task: " PTR ", spawner: %s, name: %s",

View file

@ -1,7 +1,7 @@
#ifndef RUST_DOM_H #ifndef RUST_SCHEDULER_H
#define RUST_DOM_H #define RUST_SCHEDULER_H
struct rust_dom; struct rust_scheduler;
class class
rust_crate_cache rust_crate_cache
@ -18,15 +18,15 @@ private:
public: public:
rust_dom *dom; rust_scheduler *sched;
size_t idx; size_t idx;
rust_crate_cache(rust_dom *dom); rust_crate_cache(rust_scheduler *sched);
~rust_crate_cache(); ~rust_crate_cache();
void flush(); void flush();
}; };
struct rust_dom : public kernel_owned<rust_dom>, rc_base<rust_dom> struct rust_scheduler : public kernel_owned<rust_scheduler>, rc_base<rust_scheduler>
{ {
// Fields known to the compiler: // Fields known to the compiler:
uintptr_t interrupt_flag; uintptr_t interrupt_flag;
@ -64,10 +64,10 @@ struct rust_dom : public kernel_owned<rust_dom>, rc_base<rust_dom>
// Only a pointer to 'name' is kept, so it must live as long as this // Only a pointer to 'name' is kept, so it must live as long as this
// domain. // domain.
rust_dom(rust_kernel *kernel, rust_scheduler(rust_kernel *kernel,
rust_message_queue *message_queue, rust_srv *srv, rust_message_queue *message_queue, rust_srv *srv,
const char *name); const char *name);
~rust_dom(); ~rust_scheduler();
void activate(rust_task *task); void activate(rust_task *task);
void log(rust_task *task, uint32_t level, char const *fmt, ...); void log(rust_task *task, uint32_t level, char const *fmt, ...);
rust_log & get_log(); rust_log & get_log();
@ -89,7 +89,7 @@ struct rust_dom : public kernel_owned<rust_dom>, rc_base<rust_dom>
}; };
inline rust_log & inline rust_log &
rust_dom::get_log() { rust_scheduler::get_log() {
return _log; return _log;
} }
@ -104,4 +104,4 @@ rust_dom::get_log() {
// End: // End:
// //
#endif /* RUST_DOM_H */ #endif /* RUST_SCHEDULER_H */

View file

@ -26,10 +26,10 @@ new_stk(rust_task *task, size_t minsz)
minsz = min_stk_bytes; minsz = min_stk_bytes;
size_t sz = sizeof(stk_seg) + minsz; size_t sz = sizeof(stk_seg) + minsz;
stk_seg *stk = (stk_seg *)task->malloc(sz); stk_seg *stk = (stk_seg *)task->malloc(sz);
LOGPTR(task->dom, "new stk", (uintptr_t)stk); LOGPTR(task->sched, "new stk", (uintptr_t)stk);
memset(stk, 0, sizeof(stk_seg)); memset(stk, 0, sizeof(stk_seg));
stk->limit = (uintptr_t) &stk->data[minsz]; stk->limit = (uintptr_t) &stk->data[minsz];
LOGPTR(task->dom, "stk limit", stk->limit); LOGPTR(task->sched, "stk limit", stk->limit);
stk->valgrind_id = stk->valgrind_id =
VALGRIND_STACK_REGISTER(&stk->data[0], VALGRIND_STACK_REGISTER(&stk->data[0],
&stk->data[minsz]); &stk->data[minsz]);
@ -40,7 +40,7 @@ static void
del_stk(rust_task *task, stk_seg *stk) del_stk(rust_task *task, stk_seg *stk)
{ {
VALGRIND_STACK_DEREGISTER(stk->valgrind_id); VALGRIND_STACK_DEREGISTER(stk->valgrind_id);
LOGPTR(task->dom, "freeing stk segment", (uintptr_t)stk); LOGPTR(task->sched, "freeing stk segment", (uintptr_t)stk);
task->free(stk); task->free(stk);
} }
@ -52,16 +52,16 @@ del_stk(rust_task *task, stk_seg *stk)
size_t const n_callee_saves = 4; size_t const n_callee_saves = 4;
size_t const callee_save_fp = 0; size_t const callee_save_fp = 0;
rust_task::rust_task(rust_dom *dom, rust_task_list *state, rust_task::rust_task(rust_scheduler *sched, rust_task_list *state,
rust_task *spawner, const char *name) : rust_task *spawner, const char *name) :
maybe_proxy<rust_task>(this), maybe_proxy<rust_task>(this),
stk(NULL), stk(NULL),
runtime_sp(0), runtime_sp(0),
rust_sp(0), rust_sp(0),
gc_alloc_chain(0), gc_alloc_chain(0),
dom(dom), sched(sched),
cache(NULL), cache(NULL),
kernel(dom->kernel), kernel(sched->kernel),
name(name), name(name),
state(state), state(state),
cond(NULL), cond(NULL),
@ -71,11 +71,11 @@ rust_task::rust_task(rust_dom *dom, rust_task_list *state,
rendezvous_ptr(0), rendezvous_ptr(0),
handle(NULL), handle(NULL),
active(false), active(false),
local_region(&dom->srv->local_region), local_region(&sched->srv->local_region),
synchronized_region(&dom->srv->synchronized_region) synchronized_region(&sched->srv->synchronized_region)
{ {
LOGPTR(dom, "new task", (uintptr_t)this); LOGPTR(sched, "new task", (uintptr_t)this);
DLOG(dom, task, "sizeof(task) = %d (0x%x)", sizeof *this, sizeof *this); DLOG(sched, task, "sizeof(task) = %d (0x%x)", sizeof *this, sizeof *this);
stk = new_stk(this, 0); stk = new_stk(this, 0);
rust_sp = stk->limit; rust_sp = stk->limit;
@ -87,33 +87,13 @@ rust_task::rust_task(rust_dom *dom, rust_task_list *state,
rust_task::~rust_task() rust_task::~rust_task()
{ {
DLOG(dom, task, "~rust_task %s @0x%" PRIxPTR ", refcnt=%d", DLOG(sched, task, "~rust_task %s @0x%" PRIxPTR ", refcnt=%d",
name, (uintptr_t)this, ref_count); name, (uintptr_t)this, ref_count);
/*
for (uintptr_t fp = get_fp(); fp; fp = get_previous_fp(fp)) {
frame_glue_fns *glue_fns = get_frame_glue_fns(fp);
DLOG(dom, task,
"~rust_task, frame fp=0x%" PRIxPTR ", glue_fns=0x%" PRIxPTR,
fp, glue_fns);
if (glue_fns) {
DLOG(dom, task,
"~rust_task, mark_glue=0x%" PRIxPTR,
glue_fns->mark_glue);
DLOG(dom, task,
"~rust_task, drop_glue=0x%" PRIxPTR,
glue_fns->drop_glue);
DLOG(dom, task,
"~rust_task, reloc_glue=0x%" PRIxPTR,
glue_fns->reloc_glue);
}
}
*/
/* FIXME: tighten this up, there are some more /* FIXME: tighten this up, there are some more
assertions that hold at task-lifecycle events. */ assertions that hold at task-lifecycle events. */
I(dom, ref_count == 0 || I(sched, ref_count == 0 ||
(ref_count == 1 && this == dom->root_task)); (ref_count == 1 && this == sched->root_task));
del_stk(this, stk); del_stk(this, stk);
} }
@ -147,7 +127,7 @@ void task_start_wrapper(spawn_args *a)
// This is duplicated from upcall_exit, which is probably dead code by // This is duplicated from upcall_exit, which is probably dead code by
// now. // now.
LOG(task, task, "task ref_count: %d", task->ref_count); LOG(task, task, "task ref_count: %d", task->ref_count);
A(task->dom, task->ref_count >= 0, A(task->sched, task->ref_count >= 0,
"Task ref_count should not be negative on exit!"); "Task ref_count should not be negative on exit!");
task->die(); task->die();
task->notify_tasks_waiting_to_join(); task->notify_tasks_waiting_to_join();
@ -160,10 +140,10 @@ void
rust_task::start(uintptr_t spawnee_fn, rust_task::start(uintptr_t spawnee_fn,
uintptr_t args) uintptr_t args)
{ {
LOGPTR(dom, "from spawnee", spawnee_fn); LOGPTR(sched, "from spawnee", spawnee_fn);
I(dom, stk->data != NULL); I(sched, stk->data != NULL);
I(dom, !kernel->scheduler_lock.lock_held_by_current_thread()); I(sched, !kernel->scheduler_lock.lock_held_by_current_thread());
scoped_lock with(kernel->scheduler_lock); scoped_lock with(kernel->scheduler_lock);
@ -182,7 +162,7 @@ rust_task::start(uintptr_t spawnee_fn,
ctx.call((void *)task_start_wrapper, a, sp); ctx.call((void *)task_start_wrapper, a, sp);
yield_timer.reset(0); yield_timer.reset(0);
transition(&dom->newborn_tasks, &dom->running_tasks); transition(&sched->newborn_tasks, &sched->running_tasks);
} }
void void
@ -227,8 +207,8 @@ rust_task::kill() {
// Unblock the task so it can unwind. // Unblock the task so it can unwind.
unblock(); unblock();
if (this == dom->root_task) if (this == sched->root_task)
dom->fail(); sched->fail();
LOG(this, task, "preparing to unwind task: 0x%" PRIxPTR, this); LOG(this, task, "preparing to unwind task: 0x%" PRIxPTR, this);
// run_on_resume(rust_unwind_glue); // run_on_resume(rust_unwind_glue);
@ -237,15 +217,15 @@ rust_task::kill() {
void void
rust_task::fail(size_t nargs) { rust_task::fail(size_t nargs) {
// See note in ::kill() regarding who should call this. // See note in ::kill() regarding who should call this.
DLOG(dom, task, "task %s @0x%" PRIxPTR " failing", name, this); DLOG(sched, task, "task %s @0x%" PRIxPTR " failing", name, this);
backtrace(); backtrace();
// Unblock the task so it can unwind. // Unblock the task so it can unwind.
unblock(); unblock();
if (this == dom->root_task) if (this == sched->root_task)
dom->fail(); sched->fail();
// run_after_return(nargs, rust_unwind_glue); // run_after_return(nargs, rust_unwind_glue);
if (supervisor) { if (supervisor) {
DLOG(dom, task, DLOG(sched, task,
"task %s @0x%" PRIxPTR "task %s @0x%" PRIxPTR
" propagating failure to supervisor %s @0x%" PRIxPTR, " propagating failure to supervisor %s @0x%" PRIxPTR,
name, this, supervisor->name, supervisor); name, this, supervisor->name, supervisor);
@ -259,14 +239,14 @@ void
rust_task::gc(size_t nargs) rust_task::gc(size_t nargs)
{ {
// FIXME: not presently implemented; was broken by rustc. // FIXME: not presently implemented; was broken by rustc.
DLOG(dom, task, DLOG(sched, task,
"task %s @0x%" PRIxPTR " garbage collecting", name, this); "task %s @0x%" PRIxPTR " garbage collecting", name, this);
} }
void void
rust_task::unsupervise() rust_task::unsupervise()
{ {
DLOG(dom, task, DLOG(sched, task,
"task %s @0x%" PRIxPTR "task %s @0x%" PRIxPTR
" disconnecting from supervisor %s @0x%" PRIxPTR, " disconnecting from supervisor %s @0x%" PRIxPTR,
name, this, supervisor->name, supervisor); name, this, supervisor->name, supervisor);
@ -302,13 +282,13 @@ rust_task::get_frame_glue_fns(uintptr_t fp) {
bool bool
rust_task::running() rust_task::running()
{ {
return state == &dom->running_tasks; return state == &sched->running_tasks;
} }
bool bool
rust_task::blocked() rust_task::blocked()
{ {
return state == &dom->blocked_tasks; return state == &sched->blocked_tasks;
} }
bool bool
@ -320,13 +300,13 @@ rust_task::blocked_on(rust_cond *on)
bool bool
rust_task::dead() rust_task::dead()
{ {
return state == &dom->dead_tasks; return state == &sched->dead_tasks;
} }
void void
rust_task::link_gc(gc_alloc *gcm) { rust_task::link_gc(gc_alloc *gcm) {
I(dom, gcm->prev == NULL); I(sched, gcm->prev == NULL);
I(dom, gcm->next == NULL); I(sched, gcm->next == NULL);
gcm->prev = NULL; gcm->prev = NULL;
gcm->next = gc_alloc_chain; gcm->next = gc_alloc_chain;
gc_alloc_chain = gcm; gc_alloc_chain = gcm;
@ -361,7 +341,7 @@ rust_task::malloc(size_t sz, type_desc *td)
return mem; return mem;
if (td) { if (td) {
gc_alloc *gcm = (gc_alloc*) mem; gc_alloc *gcm = (gc_alloc*) mem;
DLOG(dom, task, "task %s @0x%" PRIxPTR DLOG(sched, task, "task %s @0x%" PRIxPTR
" allocated %d GC bytes = 0x%" PRIxPTR, " allocated %d GC bytes = 0x%" PRIxPTR,
name, (uintptr_t)this, sz, gcm); name, (uintptr_t)this, sz, gcm);
memset((void*) gcm, 0, sizeof(gc_alloc)); memset((void*) gcm, 0, sizeof(gc_alloc));
@ -384,7 +364,7 @@ rust_task::realloc(void *data, size_t sz, bool is_gc)
unlink_gc(gcm); unlink_gc(gcm);
sz += sizeof(gc_alloc); sz += sizeof(gc_alloc);
gcm = (gc_alloc*) realloc((void*)gcm, sz, memory_region::LOCAL); gcm = (gc_alloc*) realloc((void*)gcm, sz, memory_region::LOCAL);
DLOG(dom, task, "task %s @0x%" PRIxPTR DLOG(sched, task, "task %s @0x%" PRIxPTR
" reallocated %d GC bytes = 0x%" PRIxPTR, " reallocated %d GC bytes = 0x%" PRIxPTR,
name, (uintptr_t)this, sz, gcm); name, (uintptr_t)this, sz, gcm);
if (!gcm) if (!gcm)
@ -406,7 +386,7 @@ rust_task::free(void *p, bool is_gc)
if (is_gc) { if (is_gc) {
gc_alloc *gcm = (gc_alloc*)(((char *)p) - sizeof(gc_alloc)); gc_alloc *gcm = (gc_alloc*)(((char *)p) - sizeof(gc_alloc));
unlink_gc(gcm); unlink_gc(gcm);
DLOG(dom, mem, DLOG(sched, mem,
"task %s @0x%" PRIxPTR " freeing GC memory = 0x%" PRIxPTR, "task %s @0x%" PRIxPTR " freeing GC memory = 0x%" PRIxPTR,
name, (uintptr_t)this, gcm); name, (uintptr_t)this, gcm);
free(gcm, memory_region::LOCAL); free(gcm, memory_region::LOCAL);
@ -417,11 +397,11 @@ rust_task::free(void *p, bool is_gc)
void void
rust_task::transition(rust_task_list *src, rust_task_list *dst) { rust_task::transition(rust_task_list *src, rust_task_list *dst) {
I(dom, kernel->scheduler_lock.lock_held_by_current_thread()); I(sched, kernel->scheduler_lock.lock_held_by_current_thread());
DLOG(dom, task, DLOG(sched, task,
"task %s " PTR " state change '%s' -> '%s' while in '%s'", "task %s " PTR " state change '%s' -> '%s' while in '%s'",
name, (uintptr_t)this, src->name, dst->name, state->name); name, (uintptr_t)this, src->name, dst->name, state->name);
I(dom, state == src); I(sched, state == src);
src->remove(this); src->remove(this);
dst->append(this); dst->append(this);
state = dst; state = dst;
@ -431,30 +411,30 @@ void
rust_task::block(rust_cond *on, const char* name) { rust_task::block(rust_cond *on, const char* name) {
LOG(this, task, "Blocking on 0x%" PRIxPTR ", cond: 0x%" PRIxPTR, LOG(this, task, "Blocking on 0x%" PRIxPTR ", cond: 0x%" PRIxPTR,
(uintptr_t) on, (uintptr_t) cond); (uintptr_t) on, (uintptr_t) cond);
A(dom, cond == NULL, "Cannot block an already blocked task."); A(sched, cond == NULL, "Cannot block an already blocked task.");
A(dom, on != NULL, "Cannot block on a NULL object."); A(sched, on != NULL, "Cannot block on a NULL object.");
transition(&dom->running_tasks, &dom->blocked_tasks); transition(&sched->running_tasks, &sched->blocked_tasks);
cond = on; cond = on;
cond_name = name; cond_name = name;
} }
void void
rust_task::wakeup(rust_cond *from) { rust_task::wakeup(rust_cond *from) {
A(dom, cond != NULL, "Cannot wake up unblocked task."); A(sched, cond != NULL, "Cannot wake up unblocked task.");
LOG(this, task, "Blocked on 0x%" PRIxPTR " woken up on 0x%" PRIxPTR, LOG(this, task, "Blocked on 0x%" PRIxPTR " woken up on 0x%" PRIxPTR,
(uintptr_t) cond, (uintptr_t) from); (uintptr_t) cond, (uintptr_t) from);
A(dom, cond == from, "Cannot wake up blocked task on wrong condition."); A(sched, cond == from, "Cannot wake up blocked task on wrong condition.");
transition(&dom->blocked_tasks, &dom->running_tasks); transition(&sched->blocked_tasks, &sched->running_tasks);
I(dom, cond == from); I(sched, cond == from);
cond = NULL; cond = NULL;
cond_name = "none"; cond_name = "none";
} }
void void
rust_task::die() { rust_task::die() {
transition(&dom->running_tasks, &dom->dead_tasks); transition(&sched->running_tasks, &sched->dead_tasks);
} }
void void
@ -467,8 +447,8 @@ rust_crate_cache *
rust_task::get_crate_cache() rust_task::get_crate_cache()
{ {
if (!cache) { if (!cache) {
DLOG(dom, task, "fetching cache for current crate"); DLOG(sched, task, "fetching cache for current crate");
cache = dom->get_cache(); cache = sched->get_cache();
} }
return cache; return cache;
} }
@ -486,7 +466,7 @@ rust_task::backtrace() {
rust_handle<rust_task> * rust_handle<rust_task> *
rust_task::get_handle() { rust_task::get_handle() {
if (handle == NULL) { if (handle == NULL) {
handle = dom->kernel->get_task_handle(this); handle = sched->kernel->get_task_handle(this);
} }
return handle; return handle;
} }
@ -503,7 +483,7 @@ rust_task::malloc(size_t size, memory_region::memory_region_type type) {
} else if (type == memory_region::SYNCHRONIZED) { } else if (type == memory_region::SYNCHRONIZED) {
return synchronized_region.malloc(size); return synchronized_region.malloc(size);
} }
I(dom, false); I(sched, false);
return NULL; return NULL;
} }
@ -535,7 +515,7 @@ rust_task::realloc(void *mem, size_t size,
void void
rust_task::free(void *mem, memory_region::memory_region_type type) { rust_task::free(void *mem, memory_region::memory_region_type type) {
DLOG(dom, mem, "rust_task::free(0x%" PRIxPTR ")", mem); DLOG(sched, mem, "rust_task::free(0x%" PRIxPTR ")", mem);
if (type == memory_region::LOCAL) { if (type == memory_region::LOCAL) {
local_region.free(mem); local_region.free(mem);
} else if (type == memory_region::SYNCHRONIZED) { } else if (type == memory_region::SYNCHRONIZED) {

View file

@ -43,7 +43,7 @@ rust_task : public maybe_proxy<rust_task>,
uintptr_t runtime_sp; // Runtime sp while task running. uintptr_t runtime_sp; // Runtime sp while task running.
uintptr_t rust_sp; // Saved sp when not running. uintptr_t rust_sp; // Saved sp when not running.
gc_alloc *gc_alloc_chain; // Linked list of GC allocations. gc_alloc *gc_alloc_chain; // Linked list of GC allocations.
rust_dom *dom; rust_scheduler *sched;
rust_crate_cache *cache; rust_crate_cache *cache;
// Fields known only to the runtime. // Fields known only to the runtime.
@ -83,7 +83,7 @@ rust_task : public maybe_proxy<rust_task>,
memory_region synchronized_region; memory_region synchronized_region;
// Only a pointer to 'name' is kept, so it must live as long as this task. // Only a pointer to 'name' is kept, so it must live as long as this task.
rust_task(rust_dom *dom, rust_task(rust_scheduler *sched,
rust_task_list *state, rust_task_list *state,
rust_task *spawner, rust_task *spawner,
const char *name); const char *name);
@ -111,8 +111,8 @@ rust_task : public maybe_proxy<rust_task>,
void die(); void die();
void unblock(); void unblock();
void check_active() { I(dom, dom->curr_task == this); } void check_active() { I(sched, sched->curr_task == this); }
void check_suspended() { I(dom, dom->curr_task != this); } void check_suspended() { I(sched, sched->curr_task != this); }
// Print a backtrace, if the "bt" logging option is on. // Print a backtrace, if the "bt" logging option is on.
void backtrace(); void backtrace();

View file

@ -1,16 +1,16 @@
#include "rust_internal.h" #include "rust_internal.h"
rust_task_list::rust_task_list (rust_dom *dom, const char* name) : rust_task_list::rust_task_list (rust_scheduler *sched, const char* name) :
dom(dom), name(name) { sched(sched), name(name) {
// Nop; // Nop;
} }
void void
rust_task_list::delete_all() { rust_task_list::delete_all() {
DLOG(dom, task, "deleting all %s tasks", name); DLOG(sched, task, "deleting all %s tasks", name);
while (is_empty() == false) { while (is_empty() == false) {
rust_task *task = pop_value(); rust_task *task = pop_value();
DLOG(dom, task, "deleting task " PTR, task); DLOG(sched, task, "deleting task " PTR, task);
delete task; delete task;
} }
} }

View file

@ -1,4 +1,4 @@
// -*- c++-mode -*- // -*- c++ -*-
#ifndef RUST_TASK_LIST_H #ifndef RUST_TASK_LIST_H
#define RUST_TASK_LIST_H #define RUST_TASK_LIST_H
@ -8,9 +8,9 @@
class rust_task_list : public indexed_list<rust_task>, class rust_task_list : public indexed_list<rust_task>,
public kernel_owned<rust_task_list> { public kernel_owned<rust_task_list> {
public: public:
rust_dom *dom; rust_scheduler *sched;
const char* name; const char* name;
rust_task_list (rust_dom *dom, const char* name); rust_task_list (rust_scheduler *sched, const char* name);
void delete_all(); void delete_all();
}; };

View file

@ -29,8 +29,8 @@ static void *
timer_loop(void *ptr) { timer_loop(void *ptr) {
// We were handed the rust_timer that owns us. // We were handed the rust_timer that owns us.
rust_timer *timer = (rust_timer *)ptr; rust_timer *timer = (rust_timer *)ptr;
rust_dom *dom = timer->dom; rust_scheduler *sched = timer->sched;
DLOG(dom, timer, "in timer 0x%" PRIxPTR, (uintptr_t)timer); DLOG(sched, timer, "in timer 0x%" PRIxPTR, (uintptr_t)timer);
size_t ms = TIME_SLICE_IN_MS; size_t ms = TIME_SLICE_IN_MS;
while (!timer->exit_flag) { while (!timer->exit_flag) {
@ -39,10 +39,10 @@ timer_loop(void *ptr) {
#else #else
usleep(ms * 1000); usleep(ms * 1000);
#endif #endif
DLOG(dom, timer, "timer 0x%" PRIxPTR DLOG(sched, timer, "timer 0x%" PRIxPTR
" interrupting domain 0x%" PRIxPTR, (uintptr_t) timer, " interrupting schedain 0x%" PRIxPTR, (uintptr_t) timer,
(uintptr_t) dom); (uintptr_t) sched);
dom->interrupt_flag = 1; sched->interrupt_flag = 1;
} }
#if defined(__WIN32__) #if defined(__WIN32__)
ExitThread(0); ExitThread(0);
@ -52,12 +52,12 @@ timer_loop(void *ptr) {
return 0; return 0;
} }
rust_timer::rust_timer(rust_dom *dom) : rust_timer::rust_timer(rust_scheduler *sched) :
dom(dom), exit_flag(0) { sched(sched), exit_flag(0) {
DLOG(dom, timer, "creating timer for domain 0x%" PRIxPTR, dom); DLOG(sched, timer, "creating timer for domain 0x%" PRIxPTR, sched);
#if defined(__WIN32__) #if defined(__WIN32__)
thread = CreateThread(NULL, 0, timer_loop, this, 0, NULL); thread = CreateThread(NULL, 0, timer_loop, this, 0, NULL);
dom->kernel->win32_require("CreateThread", thread != NULL); sched->kernel->win32_require("CreateThread", thread != NULL);
if (RUNNING_ON_VALGRIND) if (RUNNING_ON_VALGRIND)
Sleep(10); Sleep(10);
#else #else
@ -70,7 +70,7 @@ rust_timer::rust_timer(rust_dom *dom) :
rust_timer::~rust_timer() { rust_timer::~rust_timer() {
exit_flag = 1; exit_flag = 1;
#if defined(__WIN32__) #if defined(__WIN32__)
dom->kernel->win32_require("WaitForSingleObject", sched->kernel->win32_require("WaitForSingleObject",
WaitForSingleObject(thread, INFINITE) == WaitForSingleObject(thread, INFINITE) ==
WAIT_OBJECT_0); WAIT_OBJECT_0);
#else #else

View file

@ -23,7 +23,7 @@ str_buf(rust_task *task, rust_str *s);
extern "C" void extern "C" void
upcall_grow_task(rust_task *task, size_t n_frame_bytes) { upcall_grow_task(rust_task *task, size_t n_frame_bytes) {
I(task->dom, false); I(task->sched, false);
LOG_UPCALL_ENTRY(task); LOG_UPCALL_ENTRY(task);
task->grow(n_frame_bytes); task->grow(n_frame_bytes);
} }
@ -31,44 +31,44 @@ upcall_grow_task(rust_task *task, size_t n_frame_bytes) {
extern "C" CDECL extern "C" CDECL
void upcall_log_int(rust_task *task, uint32_t level, int32_t i) { void upcall_log_int(rust_task *task, uint32_t level, int32_t i) {
LOG_UPCALL_ENTRY(task); LOG_UPCALL_ENTRY(task);
if (task->dom->log_lvl >= level) if (task->sched->log_lvl >= level)
task->dom->log(task, level, "rust: %" PRId32 " (0x%" PRIx32 ")", task->sched->log(task, level, "rust: %" PRId32 " (0x%" PRIx32 ")",
i, i); i, i);
} }
extern "C" CDECL extern "C" CDECL
void upcall_log_float(rust_task *task, uint32_t level, float f) { void upcall_log_float(rust_task *task, uint32_t level, float f) {
LOG_UPCALL_ENTRY(task); LOG_UPCALL_ENTRY(task);
if (task->dom->log_lvl >= level) if (task->sched->log_lvl >= level)
task->dom->log(task, level, "rust: %12.12f", f); task->sched->log(task, level, "rust: %12.12f", f);
} }
extern "C" CDECL extern "C" CDECL
void upcall_log_double(rust_task *task, uint32_t level, double *f) { void upcall_log_double(rust_task *task, uint32_t level, double *f) {
LOG_UPCALL_ENTRY(task); LOG_UPCALL_ENTRY(task);
if (task->dom->log_lvl >= level) if (task->sched->log_lvl >= level)
task->dom->log(task, level, "rust: %12.12f", *f); task->sched->log(task, level, "rust: %12.12f", *f);
} }
extern "C" CDECL void extern "C" CDECL void
upcall_log_str(rust_task *task, uint32_t level, rust_str *str) { upcall_log_str(rust_task *task, uint32_t level, rust_str *str) {
LOG_UPCALL_ENTRY(task); LOG_UPCALL_ENTRY(task);
if (task->dom->log_lvl >= level) { if (task->sched->log_lvl >= level) {
const char *c = str_buf(task, str); const char *c = str_buf(task, str);
task->dom->log(task, level, "rust: %s", c); task->sched->log(task, level, "rust: %s", c);
} }
} }
extern "C" CDECL void extern "C" CDECL void
upcall_trace_word(rust_task *task, uintptr_t i) { upcall_trace_word(rust_task *task, uintptr_t i) {
LOG_UPCALL_ENTRY(task); LOG_UPCALL_ENTRY(task);
task->dom->log(task, 2, "trace: 0x%" PRIxPTR "", i, i, (char) i); task->sched->log(task, 2, "trace: 0x%" PRIxPTR "", i, i, (char) i);
} }
extern "C" CDECL void extern "C" CDECL void
upcall_trace_str(rust_task *task, char const *c) { upcall_trace_str(rust_task *task, char const *c) {
LOG_UPCALL_ENTRY(task); LOG_UPCALL_ENTRY(task);
task->dom->log(task, 2, "trace: %s", c); task->sched->log(task, 2, "trace: %s", c);
} }
extern "C" CDECL rust_port* extern "C" CDECL rust_port*
@ -85,7 +85,7 @@ upcall_del_port(rust_task *task, rust_port *port) {
LOG_UPCALL_ENTRY(task); LOG_UPCALL_ENTRY(task);
scoped_lock with(task->kernel->scheduler_lock); scoped_lock with(task->kernel->scheduler_lock);
LOG(task, comm, "upcall del_port(0x%" PRIxPTR ")", (uintptr_t) port); LOG(task, comm, "upcall del_port(0x%" PRIxPTR ")", (uintptr_t) port);
I(task->dom, !port->ref_count); I(task->sched, !port->ref_count);
delete port; delete port;
} }
@ -95,11 +95,11 @@ upcall_del_port(rust_task *task, rust_port *port) {
extern "C" CDECL rust_chan* extern "C" CDECL rust_chan*
upcall_new_chan(rust_task *task, rust_port *port) { upcall_new_chan(rust_task *task, rust_port *port) {
LOG_UPCALL_ENTRY(task); LOG_UPCALL_ENTRY(task);
rust_dom *dom = task->dom; rust_scheduler *sched = task->sched;
LOG(task, comm, "upcall_new_chan(" LOG(task, comm, "upcall_new_chan("
"task=0x%" PRIxPTR " (%s), port=0x%" PRIxPTR ")", "task=0x%" PRIxPTR " (%s), port=0x%" PRIxPTR ")",
(uintptr_t) task, task->name, port); (uintptr_t) task, task->name, port);
I(dom, port); I(sched, port);
return new (task) rust_chan(task, port, port->unit_sz); return new (task) rust_chan(task, port, port->unit_sz);
} }
@ -127,7 +127,7 @@ void upcall_del_chan(rust_task *task, rust_chan *chan) {
LOG(task, comm, "upcall del_chan(0x%" PRIxPTR ")", (uintptr_t) chan); LOG(task, comm, "upcall del_chan(0x%" PRIxPTR ")", (uintptr_t) chan);
A(task->dom, chan->ref_count == 0, A(task->sched, chan->ref_count == 0,
"Channel's ref count should be zero."); "Channel's ref count should be zero.");
if (chan->is_associated()) { if (chan->is_associated()) {
@ -174,7 +174,7 @@ upcall_clone_chan(rust_task *task, maybe_proxy<rust_task> *target,
target_task = target->referent(); target_task = target->referent();
} else { } else {
rust_handle<rust_port> *handle = rust_handle<rust_port> *handle =
task->dom->kernel->get_port_handle(port->as_referent()); task->sched->kernel->get_port_handle(port->as_referent());
maybe_proxy<rust_port> *proxy = new rust_proxy<rust_port> (handle); maybe_proxy<rust_port> *proxy = new rust_proxy<rust_port> (handle);
LOG(task, mem, "new proxy: " PTR, proxy); LOG(task, mem, "new proxy: " PTR, proxy);
port = proxy; port = proxy;
@ -275,7 +275,7 @@ upcall_exit(rust_task *task) {
LOG_UPCALL_ENTRY(task); LOG_UPCALL_ENTRY(task);
scoped_lock with(task->kernel->scheduler_lock); scoped_lock with(task->kernel->scheduler_lock);
LOG(task, task, "task ref_count: %d", task->ref_count); LOG(task, task, "task ref_count: %d", task->ref_count);
A(task->dom, task->ref_count >= 0, A(task->sched, task->ref_count >= 0,
"Task ref_count should not be negative on exit!"); "Task ref_count should not be negative on exit!");
task->die(); task->die();
task->notify_tasks_waiting_to_join(); task->notify_tasks_waiting_to_join();
@ -308,8 +308,8 @@ extern "C" CDECL void
upcall_free(rust_task *task, void* ptr, uintptr_t is_gc) { upcall_free(rust_task *task, void* ptr, uintptr_t is_gc) {
LOG_UPCALL_ENTRY(task); LOG_UPCALL_ENTRY(task);
scoped_lock with(task->kernel->scheduler_lock); scoped_lock with(task->kernel->scheduler_lock);
rust_dom *dom = task->dom; rust_scheduler *sched = task->sched;
DLOG(dom, mem, DLOG(sched, mem,
"upcall free(0x%" PRIxPTR ", is_gc=%" PRIdPTR ")", "upcall free(0x%" PRIxPTR ", is_gc=%" PRIdPTR ")",
(uintptr_t)ptr, is_gc); (uintptr_t)ptr, is_gc);
task->free(ptr, (bool) is_gc); task->free(ptr, (bool) is_gc);
@ -320,11 +320,11 @@ upcall_mark(rust_task *task, void* ptr) {
LOG_UPCALL_ENTRY(task); LOG_UPCALL_ENTRY(task);
scoped_lock with(task->kernel->scheduler_lock); scoped_lock with(task->kernel->scheduler_lock);
rust_dom *dom = task->dom; rust_scheduler *sched = task->sched;
if (ptr) { if (ptr) {
gc_alloc *gcm = (gc_alloc*) (((char*)ptr) - sizeof(gc_alloc)); gc_alloc *gcm = (gc_alloc*) (((char*)ptr) - sizeof(gc_alloc));
uintptr_t marked = (uintptr_t) gcm->mark(); uintptr_t marked = (uintptr_t) gcm->mark();
DLOG(dom, gc, "upcall mark(0x%" PRIxPTR ") = %" PRIdPTR, DLOG(sched, gc, "upcall mark(0x%" PRIxPTR ") = %" PRIdPTR,
(uintptr_t)gcm, marked); (uintptr_t)gcm, marked);
return marked; return marked;
} }
@ -332,14 +332,14 @@ upcall_mark(rust_task *task, void* ptr) {
} }
rust_str *make_str(rust_task *task, char const *s, size_t fill) { rust_str *make_str(rust_task *task, char const *s, size_t fill) {
rust_dom *dom = task->dom; rust_scheduler *sched = task->sched;
size_t alloc = next_power_of_two(sizeof(rust_str) + fill); size_t alloc = next_power_of_two(sizeof(rust_str) + fill);
void *mem = task->malloc(alloc); void *mem = task->malloc(alloc);
if (!mem) { if (!mem) {
task->fail(3); task->fail(3);
return NULL; return NULL;
} }
rust_str *st = new (mem) rust_str(dom, alloc, fill, (uint8_t const *) s); rust_str *st = new (mem) rust_str(sched, alloc, fill, (uint8_t const *) s);
LOG(task, mem, LOG(task, mem,
"upcall new_str('%s', %" PRIdPTR ") = 0x%" PRIxPTR, "upcall new_str('%s', %" PRIdPTR ") = 0x%" PRIxPTR,
s, fill, st); s, fill, st);
@ -366,15 +366,15 @@ extern "C" CDECL rust_vec *
upcall_new_vec(rust_task *task, size_t fill, type_desc *td) { upcall_new_vec(rust_task *task, size_t fill, type_desc *td) {
LOG_UPCALL_ENTRY(task); LOG_UPCALL_ENTRY(task);
scoped_lock with(task->kernel->scheduler_lock); scoped_lock with(task->kernel->scheduler_lock);
rust_dom *dom = task->dom; rust_scheduler *sched = task->sched;
DLOG(dom, mem, "upcall new_vec(%" PRIdPTR ")", fill); DLOG(sched, mem, "upcall new_vec(%" PRIdPTR ")", fill);
size_t alloc = next_power_of_two(sizeof(rust_vec) + fill); size_t alloc = next_power_of_two(sizeof(rust_vec) + fill);
void *mem = task->malloc(alloc, td); void *mem = task->malloc(alloc, td);
if (!mem) { if (!mem) {
task->fail(3); task->fail(3);
return NULL; return NULL;
} }
rust_vec *v = new (mem) rust_vec(dom, alloc, 0, NULL); rust_vec *v = new (mem) rust_vec(sched, alloc, 0, NULL);
LOG(task, mem, LOG(task, mem,
"upcall new_vec(%" PRIdPTR ") = 0x%" PRIxPTR, fill, v); "upcall new_vec(%" PRIdPTR ") = 0x%" PRIxPTR, fill, v);
return v; return v;
@ -387,7 +387,7 @@ vec_grow(rust_task *task,
uintptr_t *need_copy, uintptr_t *need_copy,
type_desc *td) type_desc *td)
{ {
rust_dom *dom = task->dom; rust_scheduler *sched = task->sched;
LOG(task, mem, LOG(task, mem,
"vec_grow(0x%" PRIxPTR ", %" PRIdPTR "vec_grow(0x%" PRIxPTR ", %" PRIdPTR
"), rc=%" PRIdPTR " alloc=%" PRIdPTR ", fill=%" PRIdPTR "), rc=%" PRIdPTR " alloc=%" PRIdPTR ", fill=%" PRIdPTR
@ -438,10 +438,10 @@ vec_grow(rust_task *task,
if (v->ref_count != CONST_REFCOUNT) if (v->ref_count != CONST_REFCOUNT)
v->deref(); v->deref();
v = new (mem) rust_vec(dom, alloc, 0, NULL); v = new (mem) rust_vec(sched, alloc, 0, NULL);
*need_copy = 1; *need_copy = 1;
} }
I(dom, sizeof(rust_vec) + v->fill <= v->alloc); I(sched, sizeof(rust_vec) + v->fill <= v->alloc);
return v; return v;
} }
@ -521,8 +521,8 @@ upcall_new_task(rust_task *spawner, rust_vec *name) {
// name is a rust string structure. // name is a rust string structure.
LOG_UPCALL_ENTRY(spawner); LOG_UPCALL_ENTRY(spawner);
scoped_lock with(spawner->kernel->scheduler_lock); scoped_lock with(spawner->kernel->scheduler_lock);
rust_dom *dom = spawner->dom; rust_scheduler *sched = spawner->sched;
rust_task *task = dom->create_task(spawner, (const char *)name->data); rust_task *task = sched->create_task(spawner, (const char *)name->data);
return task; return task;
} }
@ -534,8 +534,8 @@ upcall_start_task(rust_task *spawner,
size_t args_sz) { size_t args_sz) {
LOG_UPCALL_ENTRY(spawner); LOG_UPCALL_ENTRY(spawner);
rust_dom *dom = spawner->dom; rust_scheduler *sched = spawner->sched;
DLOG(dom, task, DLOG(sched, task,
"upcall start_task(task %s @0x%" PRIxPTR "upcall start_task(task %s @0x%" PRIxPTR
", spawnee 0x%" PRIxPTR ")", ", spawnee 0x%" PRIxPTR ")",
task->name, task, task->name, task,
@ -563,7 +563,7 @@ upcall_ivec_resize(rust_task *task,
rust_ivec *v, rust_ivec *v,
size_t newsz) { size_t newsz) {
scoped_lock with(task->kernel->scheduler_lock); scoped_lock with(task->kernel->scheduler_lock);
I(task->dom, !v->fill); I(task->sched, !v->fill);
size_t new_alloc = next_power_of_two(newsz); size_t new_alloc = next_power_of_two(newsz);
rust_ivec_heap *new_heap_part = (rust_ivec_heap *) rust_ivec_heap *new_heap_part = (rust_ivec_heap *)

View file

@ -25,24 +25,24 @@ ptr_vec<T>::ptr_vec(rust_task *task) :
fill(0), fill(0),
data(new (task) T*[alloc]) data(new (task) T*[alloc])
{ {
I(task->dom, data); I(task->sched, data);
DLOG(task->dom, mem, "new ptr_vec(data=0x%" PRIxPTR ") -> 0x%" PRIxPTR, DLOG(task->sched, mem, "new ptr_vec(data=0x%" PRIxPTR ") -> 0x%" PRIxPTR,
(uintptr_t)data, (uintptr_t)this); (uintptr_t)data, (uintptr_t)this);
} }
template <typename T> template <typename T>
ptr_vec<T>::~ptr_vec() ptr_vec<T>::~ptr_vec()
{ {
I(task->dom, data); I(task->sched, data);
DLOG(task->dom, mem, "~ptr_vec 0x%" PRIxPTR ", data=0x%" PRIxPTR, DLOG(task->sched, mem, "~ptr_vec 0x%" PRIxPTR ", data=0x%" PRIxPTR,
(uintptr_t)this, (uintptr_t)data); (uintptr_t)this, (uintptr_t)data);
I(task->dom, fill == 0); I(task->sched, fill == 0);
task->free(data); task->free(data);
} }
template <typename T> T *& template <typename T> T *&
ptr_vec<T>::operator[](size_t offset) { ptr_vec<T>::operator[](size_t offset) {
I(task->dom, data[offset]->idx == offset); I(task->sched, data[offset]->idx == offset);
return data[offset]; return data[offset];
} }
@ -50,14 +50,14 @@ template <typename T>
void void
ptr_vec<T>::push(T *p) ptr_vec<T>::push(T *p)
{ {
I(task->dom, data); I(task->sched, data);
I(task->dom, fill <= alloc); I(task->sched, fill <= alloc);
if (fill == alloc) { if (fill == alloc) {
alloc *= 2; alloc *= 2;
data = (T **)task->realloc(data, alloc * sizeof(T*)); data = (T **)task->realloc(data, alloc * sizeof(T*));
I(task->dom, data); I(task->sched, data);
} }
I(task->dom, fill < alloc); I(task->sched, fill < alloc);
p->idx = fill; p->idx = fill;
data[fill++] = p; data[fill++] = p;
} }
@ -80,13 +80,13 @@ template <typename T>
void void
ptr_vec<T>::trim(size_t sz) ptr_vec<T>::trim(size_t sz)
{ {
I(task->dom, data); I(task->sched, data);
if (sz <= (alloc / 4) && if (sz <= (alloc / 4) &&
(alloc / 2) >= INIT_SIZE) { (alloc / 2) >= INIT_SIZE) {
alloc /= 2; alloc /= 2;
I(task->dom, alloc >= fill); I(task->sched, alloc >= fill);
data = (T **)task->realloc(data, alloc * sizeof(T*)); data = (T **)task->realloc(data, alloc * sizeof(T*));
I(task->dom, data); I(task->sched, data);
} }
} }
@ -95,9 +95,9 @@ void
ptr_vec<T>::swap_delete(T *item) ptr_vec<T>::swap_delete(T *item)
{ {
/* Swap the endpoint into i and decr fill. */ /* Swap the endpoint into i and decr fill. */
I(task->dom, data); I(task->sched, data);
I(task->dom, fill > 0); I(task->sched, fill > 0);
I(task->dom, item->idx < fill); I(task->sched, item->idx < fill);
fill--; fill--;
if (fill > 0) { if (fill > 0) {
T *subst = data[fill]; T *subst = data[fill];
@ -127,22 +127,22 @@ next_power_of_two(size_t s)
// Initialization helper for ISAAC RNG // Initialization helper for ISAAC RNG
static inline void static inline void
isaac_init(rust_dom *dom, randctx *rctx) isaac_init(rust_scheduler *sched, randctx *rctx)
{ {
memset(rctx, 0, sizeof(randctx)); memset(rctx, 0, sizeof(randctx));
#ifdef __WIN32__ #ifdef __WIN32__
{ {
HCRYPTPROV hProv; HCRYPTPROV hProv;
dom->kernel->win32_require sched->kernel->win32_require
(_T("CryptAcquireContext"), (_T("CryptAcquireContext"),
CryptAcquireContext(&hProv, NULL, NULL, PROV_RSA_FULL, CryptAcquireContext(&hProv, NULL, NULL, PROV_RSA_FULL,
CRYPT_VERIFYCONTEXT|CRYPT_SILENT)); CRYPT_VERIFYCONTEXT|CRYPT_SILENT));
dom->kernel->win32_require sched->kernel->win32_require
(_T("CryptGenRandom"), (_T("CryptGenRandom"),
CryptGenRandom(hProv, sizeof(rctx->randrsl), CryptGenRandom(hProv, sizeof(rctx->randrsl),
(BYTE*)(&rctx->randrsl))); (BYTE*)(&rctx->randrsl)));
dom->kernel->win32_require sched->kernel->win32_require
(_T("CryptReleaseContext"), (_T("CryptReleaseContext"),
CryptReleaseContext(hProv, 0)); CryptReleaseContext(hProv, 0));
} }
@ -156,11 +156,11 @@ isaac_init(rust_dom *dom, randctx *rctx)
} }
} else { } else {
int fd = open("/dev/urandom", O_RDONLY); int fd = open("/dev/urandom", O_RDONLY);
I(dom, fd > 0); I(sched, fd > 0);
I(dom, I(sched,
read(fd, (void*) &rctx->randrsl, sizeof(rctx->randrsl)) read(fd, (void*) &rctx->randrsl, sizeof(rctx->randrsl))
== sizeof(rctx->randrsl)); == sizeof(rctx->randrsl));
I(dom, close(fd) == 0); I(sched, close(fd) == 0);
} }
#endif #endif
randinit(rctx, 1); randinit(rctx, 1);
@ -175,9 +175,10 @@ rust_vec : public rc_base<rust_vec>
size_t fill; size_t fill;
size_t pad; // Pad to align data[0] to 16 bytes. size_t pad; // Pad to align data[0] to 16 bytes.
uint8_t data[]; uint8_t data[];
rust_vec(rust_dom *dom, size_t alloc, size_t fill, uint8_t const *d) : rust_vec(rust_scheduler *sched, size_t alloc, size_t fill,
alloc(alloc), uint8_t const *d)
fill(fill) : alloc(alloc),
fill(fill)
{ {
if (d) if (d)
memcpy(&data[0], d, fill); memcpy(&data[0], d, fill);