diff options
| -rw-r--r-- | src/rt/rust_internal.h | 17 | ||||
| -rw-r--r-- | src/rt/rust_kernel.cpp | 5 | ||||
| -rw-r--r-- | src/rt/rust_scheduler.cpp | 23 | ||||
| -rw-r--r-- | src/rt/rust_scheduler.h | 7 | ||||
| -rw-r--r-- | src/rt/rust_task.cpp | 5 | ||||
| -rw-r--r-- | src/rt/rust_task.h | 7 | ||||
| -rw-r--r-- | src/rt/rust_task_thread.cpp | 22 | ||||
| -rw-r--r-- | src/rt/rust_task_thread.h | 3 |
8 files changed, 66 insertions, 23 deletions
diff --git a/src/rt/rust_internal.h b/src/rt/rust_internal.h index 30608ceac95..b6bd20fc98b 100644 --- a/src/rt/rust_internal.h +++ b/src/rt/rust_internal.h @@ -104,15 +104,14 @@ static size_t const BUF_BYTES = 2048; void ref() { ++ref_count; } \ void deref() { if (--ref_count == 0) { dtor; } } -#define RUST_ATOMIC_REFCOUNT() \ - private: \ - intptr_t ref_count; \ -public: \ - void ref() { \ - intptr_t old = sync::increment(ref_count); \ - assert(old > 0); \ - } \ - void deref() { if(0 == sync::decrement(ref_count)) { delete this; } } +#define RUST_ATOMIC_REFCOUNT() \ +public: \ + intptr_t ref_count; \ + void ref() { \ + intptr_t old = sync::increment(ref_count); \ + assert(old > 0); \ + } \ + void deref() { if(0 == sync::decrement(ref_count)) { delete_this(); } } template <typename T> struct task_owned { inline void *operator new(size_t size, rust_task *task, const char *tag); diff --git a/src/rt/rust_kernel.cpp b/src/rt/rust_kernel.cpp index 8c1278a88db..4517ec7b766 100644 --- a/src/rt/rust_kernel.cpp +++ b/src/rt/rust_kernel.cpp @@ -129,11 +129,6 @@ rust_kernel::release_task_id(rust_task_id id) { new_live_tasks = --live_tasks; } KLOG_("Total outstanding tasks: %d", new_live_tasks); - if (new_live_tasks == 0) { - // There are no more tasks and there never will be. - // Tell all the schedulers to exit. - sched->exit(); - } } rust_task * diff --git a/src/rt/rust_scheduler.cpp b/src/rt/rust_scheduler.cpp index e2389906d86..629df420bd1 100644 --- a/src/rt/rust_scheduler.cpp +++ b/src/rt/rust_scheduler.cpp @@ -9,6 +9,7 @@ rust_scheduler::rust_scheduler(rust_kernel *kernel, srv(srv), env(srv->env), live_threads(num_threads), + live_tasks(0), num_threads(num_threads), id(id) { @@ -84,6 +85,7 @@ rust_scheduler::create_task(rust_task *spawner, const char *name, { scoped_lock with(lock); thread_no = isaac_rand(&rctx) % num_threads; + live_tasks++; } rust_task_thread *thread = threads[thread_no]; return thread->create_task(spawner, name, init_stack_sz); @@ -95,8 +97,27 @@ rust_scheduler::create_task(rust_task *spawner, const char *name) { } void +rust_scheduler::release_task() { + bool need_exit = false; + { + scoped_lock with(lock); + live_tasks--; + if (live_tasks == 0) { + need_exit = true; + } + } + if (need_exit) { + // There are no more tasks on this scheduler. Time to leave + exit(); + } +} + +void rust_scheduler::exit() { - for(size_t i = 0; i < num_threads; ++i) { + // Take a copy of num_threads. After the last thread exits this + // scheduler will get destroyed, and our fields will cease to exist. + size_t current_num_threads = num_threads; + for(size_t i = 0; i < current_num_threads; ++i) { threads[i]->exit(); } } diff --git a/src/rt/rust_scheduler.h b/src/rt/rust_scheduler.h index 47dc0a811a5..533f773ee35 100644 --- a/src/rt/rust_scheduler.h +++ b/src/rt/rust_scheduler.h @@ -14,6 +14,8 @@ private: lock_and_signal lock; // When this hits zero we'll tell the kernel to release us uintptr_t live_threads; + // When this hits zero we'll tell the threads to exit + uintptr_t live_tasks; randctx rctx; array_list<rust_task_thread *> threads; @@ -27,6 +29,8 @@ private: rust_task_thread *create_task_thread(int id); void destroy_task_thread(rust_task_thread *thread); + void exit(); + public: rust_scheduler(rust_kernel *kernel, rust_srv *srv, size_t num_threads, rust_sched_id id); @@ -39,7 +43,8 @@ public: size_t init_stack_sz); rust_task_id create_task(rust_task *spawner, const char *name); - void exit(); + void release_task(); + size_t number_of_threads(); // Called by each thread when it terminates. When all threads // terminate the scheduler does as well. diff --git a/src/rt/rust_task.cpp b/src/rt/rust_task.cpp index 136b00e1f56..9443d4f3706 100644 --- a/src/rt/rust_task.cpp +++ b/src/rt/rust_task.cpp @@ -268,7 +268,8 @@ rust_task::rust_task(rust_task_thread *thread, rust_task_list *state, } } -rust_task::~rust_task() +void +rust_task::delete_this() { I(thread, !thread->lock.lock_held_by_current_thread()); I(thread, port_table.is_empty()); @@ -291,6 +292,8 @@ rust_task::~rust_task() while (stk != NULL) { del_stk(this, stk); } + + thread->release_task(this); } struct spawn_args { diff --git a/src/rt/rust_task.h b/src/rt/rust_task.h index 7e407b38200..33e0da5af33 100644 --- a/src/rt/rust_task.h +++ b/src/rt/rust_task.h @@ -122,6 +122,11 @@ rust_task : public kernel_owned<rust_task>, rust_cond // The amount of stack we're using, excluding red zones size_t total_stack_sz; +private: + // Called when the atomic refcount reaches zero + void delete_this(); +public: + // Only a pointer to 'name' is kept, so it must live as long as this task. rust_task(rust_task_thread *thread, rust_task_list *state, @@ -129,8 +134,6 @@ rust_task : public kernel_owned<rust_task>, rust_cond const char *name, size_t init_stack_sz); - ~rust_task(); - void start(spawn_fn spawnee_fn, rust_opaque_box *env, void *args); diff --git a/src/rt/rust_task_thread.cpp b/src/rt/rust_task_thread.cpp index 6251d5c24c1..236eaaab98e 100644 --- a/src/rt/rust_task_thread.cpp +++ b/src/rt/rust_task_thread.cpp @@ -136,16 +136,30 @@ rust_task_thread::reap_dead_tasks() { for (size_t i = 0; i < dead_tasks_len; ++i) { rust_task *task = dead_tasks_copy[i]; - if (task) { - kernel->release_task_id(task->user.id); - task->deref(); - } + // Release the task from the kernel so nobody else can get at it + kernel->release_task_id(task->user.id); + // Deref the task, which may cause it to request us to release it + task->deref(); } srv->free(dead_tasks_copy); lock.lock(); } +void +rust_task_thread::release_task(rust_task *task) { + // Nobody should have a ref to the task at this point + I(this, task->ref_count == 0); + // Kernel should not know about the task any more + I(this, kernel->get_task_by_id(task->user.id) == NULL); + // Now delete the task, which will require using this thread's + // memory region. + delete task; + // Now release the task from the scheduler, which may trigger this + // thread to exit + sched->release_task(); +} + /** * Schedules a running task for execution. Only running tasks can be * activated. Blocked tasks have to be unblocked before they can be diff --git a/src/rt/rust_task_thread.h b/src/rt/rust_task_thread.h index e8fb70fee93..541d95f6460 100644 --- a/src/rt/rust_task_thread.h +++ b/src/rt/rust_task_thread.h @@ -122,6 +122,9 @@ struct rust_task_thread : public kernel_owned<rust_task_thread>, static rust_task *get_task(); + // Called by each task when they are ready to be destroyed + void release_task(rust_task *task); + // Tells the scheduler to exit it's scheduling loop and thread void exit(); }; |
