1
Fork 0

rt: Fix scalability problem with big stacks on 32 bit

This commit is contained in:
Patrick Walton 2013-04-05 11:55:43 -07:00
parent 2dbe20a561
commit ca8e99fd78
6 changed files with 131 additions and 6 deletions

View file

@ -29,6 +29,8 @@ rust_sched_loop::rust_sched_loop(rust_scheduler *sched, int id, bool killed) :
should_exit(false),
cached_c_stack(NULL),
extra_c_stack(NULL),
cached_big_stack(NULL),
extra_big_stack(NULL),
dead_task(NULL),
killed(killed),
pump_signal(NULL),
@ -263,6 +265,11 @@ rust_sched_loop::run_single_turn() {
destroy_exchange_stack(kernel->region(), cached_c_stack);
cached_c_stack = NULL;
}
assert(!extra_big_stack);
if (cached_big_stack) {
destroy_exchange_stack(kernel->region(), cached_big_stack);
cached_big_stack = NULL;
}
sched->release_task_thread();
return sched_loop_state_exit;
@ -392,6 +399,13 @@ rust_sched_loop::prepare_c_stack(rust_task *task) {
cached_c_stack = create_exchange_stack(kernel->region(),
C_STACK_SIZE);
}
assert(!extra_big_stack);
if (!cached_big_stack) {
cached_big_stack = create_exchange_stack(kernel->region(),
C_STACK_SIZE +
(C_STACK_SIZE * 2));
cached_big_stack->is_big = 1;
}
}
void
@ -400,6 +414,10 @@ rust_sched_loop::unprepare_c_stack() {
destroy_exchange_stack(kernel->region(), extra_c_stack);
extra_c_stack = NULL;
}
if (extra_big_stack) {
destroy_exchange_stack(kernel->region(), extra_big_stack);
extra_big_stack = NULL;
}
}
//

View file

@ -67,6 +67,8 @@ private:
stk_seg *cached_c_stack;
stk_seg *extra_c_stack;
stk_seg *cached_big_stack;
stk_seg *extra_big_stack;
rust_task_list running_tasks;
rust_task_list blocked_tasks;
@ -147,6 +149,10 @@ public:
stk_seg *borrow_c_stack();
void return_c_stack(stk_seg *stack);
// Called by tasks when they need a big stack
stk_seg *borrow_big_stack();
void return_big_stack(stk_seg *stack);
int get_id() { return this->id; }
};
@ -202,6 +208,32 @@ rust_sched_loop::return_c_stack(stk_seg *stack) {
}
}
// NB: Runs on the Rust stack. Might return NULL!
inline stk_seg *
rust_sched_loop::borrow_big_stack() {
assert(cached_big_stack);
stk_seg *your_stack;
if (extra_big_stack) {
your_stack = extra_big_stack;
extra_big_stack = NULL;
} else {
your_stack = cached_big_stack;
cached_big_stack = NULL;
}
return your_stack;
}
// NB: Runs on the Rust stack
inline void
rust_sched_loop::return_big_stack(stk_seg *stack) {
assert(!extra_big_stack);
assert(stack);
if (!cached_big_stack)
cached_big_stack = stack;
else
extra_big_stack = stack;
}
// this is needed to appease the circular dependency gods
#include "rust_task.h"

View file

@ -13,6 +13,8 @@
#include "vg/valgrind.h"
#include "vg/memcheck.h"
#include <cstdio>
#ifdef _LP64
const uintptr_t canary_value = 0xABCDABCDABCDABCD;
#else
@ -61,6 +63,7 @@ create_stack(memory_region *region, size_t sz) {
stk_seg *stk = (stk_seg *)region->malloc(total_sz, "stack");
memset(stk, 0, sizeof(stk_seg));
stk->end = (uintptr_t) &stk->data[sz];
stk->is_big = 0;
add_stack_canary(stk);
register_valgrind_stack(stk);
return stk;
@ -78,6 +81,7 @@ create_exchange_stack(rust_exchange_alloc *exchange, size_t sz) {
stk_seg *stk = (stk_seg *)exchange->malloc(total_sz);
memset(stk, 0, sizeof(stk_seg));
stk->end = (uintptr_t) &stk->data[sz];
stk->is_big = 0;
add_stack_canary(stk);
register_valgrind_stack(stk);
return stk;

View file

@ -22,9 +22,7 @@ struct stk_seg {
stk_seg *next;
uintptr_t end;
unsigned int valgrind_id;
#ifndef _LP64
uint32_t pad;
#endif
uint8_t is_big;
rust_task *task;
uintptr_t canary;

View file

@ -53,7 +53,8 @@ rust_task::rust_task(rust_sched_loop *sched_loop, rust_task_state state,
disallow_yield(0),
c_stack(NULL),
next_c_sp(0),
next_rust_sp(0)
next_rust_sp(0),
big_stack(NULL)
{
LOGPTR(sched_loop, "new task", (uintptr_t)this);
DLOG(sched_loop, task, "sizeof(task) = %d (0x%x)",
@ -556,13 +557,64 @@ rust_task::cleanup_after_turn() {
// Delete any spare stack segments that were left
// behind by calls to prev_stack
assert(stk);
while (stk->next) {
stk_seg *new_next = stk->next->next;
free_stack(stk->next);
if (stk->next->is_big) {
assert (big_stack == stk->next);
sched_loop->return_big_stack(big_stack);
big_stack = NULL;
} else {
free_stack(stk->next);
}
stk->next = new_next;
}
}
// NB: Runs on the Rust stack. Returns true if we successfully allocated the big
// stack and false otherwise.
bool
rust_task::new_big_stack() {
// If we have a cached big stack segment, use it.
if (big_stack) {
// Check to see if we're already on the big stack.
stk_seg *ss = stk;
while (ss != NULL) {
if (ss == big_stack)
return false;
ss = ss->prev;
}
// Unlink the big stack.
if (big_stack->next)
big_stack->next->prev = big_stack->prev;
if (big_stack->prev)
big_stack->prev->next = big_stack->next;
} else {
stk_seg *borrowed_big_stack = sched_loop->borrow_big_stack();
if (!borrowed_big_stack) {
dump_stacks();
abort();
} else {
big_stack = borrowed_big_stack;
}
}
big_stack->task = this;
big_stack->next = stk->next;
if (big_stack->next)
big_stack->next->prev = big_stack;
big_stack->prev = stk;
if (stk)
stk->next = big_stack;
stk = big_stack;
return true;
}
static bool
sp_in_stk_seg(uintptr_t sp, stk_seg *stk) {
// Not positive these bounds for sp are correct. I think that the first
@ -602,9 +654,16 @@ rust_task::delete_all_stacks() {
assert(stk->next == NULL);
while (stk != NULL) {
stk_seg *prev = stk->prev;
free_stack(stk);
if (stk->is_big)
sched_loop->return_big_stack(stk);
else
free_stack(stk);
stk = prev;
}
big_stack = NULL;
}
/*

View file

@ -133,6 +133,9 @@
#define RZ_BSD_32 (1024*20)
#define RZ_BSD_64 (1024*20)
// The threshold beyond which we switch to the C stack.
#define STACK_THRESHOLD (1024 * 1024)
#ifdef __linux__
#ifdef __i386__
#define RED_ZONE_SIZE RZ_LINUX_32
@ -263,9 +266,13 @@ private:
uintptr_t next_c_sp;
uintptr_t next_rust_sp;
// The big stack.
stk_seg *big_stack;
// Called when the atomic refcount reaches zero
void delete_this();
bool new_big_stack();
void new_stack_fast(size_t requested_sz);
void new_stack(size_t requested_sz);
void free_stack(stk_seg *stk);
@ -284,6 +291,8 @@ private:
char const *file,
size_t line);
void dump_stacks();
friend void task_start_wrapper(spawn_args *a);
friend void cleanup_task(cleanup_args *a);
friend void reset_stack_limit_on_c_stack(reset_args *a);
@ -568,6 +577,11 @@ rust_task::new_stack_fast(size_t requested_sz) {
// The minimum stack size, in bytes, of a Rust stack, excluding red zone
size_t min_sz = sched_loop->min_stack_size;
if (requested_sz > STACK_THRESHOLD) {
if (new_big_stack())
return;
}
// Try to reuse an existing stack segment
if (stk != NULL && stk->next != NULL) {
size_t next_sz = user_stack_size(stk->next);