about summary refs log tree commit diff
path: root/src/rt/rust_sched_loop.cpp
diff options
context:
space:
mode:
authorBrian Anderson <banderson@mozilla.com>2012-04-01 00:13:59 -0700
committerBrian Anderson <banderson@mozilla.com>2012-04-01 00:15:04 -0700
commite78396850d8e27138dd06e723de5ad3de0e65950 (patch)
tree3768b6007a00186902b099efe24bf5ee82195cb3 /src/rt/rust_sched_loop.cpp
parent21064637ed8b8259d1305f21ace12c40b9561706 (diff)
parentde47fcfdf9404d53940099f5e7810bdb2bf37af3 (diff)
downloadrust-e78396850d8e27138dd06e723de5ad3de0e65950.tar.gz
rust-e78396850d8e27138dd06e723de5ad3de0e65950.zip
Merge remote-tracking branch 'brson/mainthread'
Conflicts:
	src/rt/rust_sched_loop.cpp
	src/rt/rust_shape.cpp
	src/rt/rust_task.cpp
Diffstat (limited to 'src/rt/rust_sched_loop.cpp')
-rw-r--r--src/rt/rust_sched_loop.cpp413
1 files changed, 413 insertions, 0 deletions
diff --git a/src/rt/rust_sched_loop.cpp b/src/rt/rust_sched_loop.cpp
new file mode 100644
index 00000000000..6a9668f418e
--- /dev/null
+++ b/src/rt/rust_sched_loop.cpp
@@ -0,0 +1,413 @@
+
+#include <stdarg.h>
+#include <cassert>
+#include <pthread.h>
+#include <vector>
+#include "rust_internal.h"
+#include "rust_util.h"
+#include "rust_scheduler.h"
+
+#ifndef _WIN32
+pthread_key_t rust_sched_loop::task_key;
+#else
+DWORD rust_sched_loop::task_key;
+#endif
+
+const size_t C_STACK_SIZE = 1024*1024;
+
+bool rust_sched_loop::tls_initialized = false;
+
+rust_sched_loop::rust_sched_loop(rust_scheduler *sched,
+                                   rust_srv *srv,
+                                   int id) :
+    _log(srv, this),
+    id(id),
+    should_exit(false),
+    cached_c_stack(NULL),
+    dead_task(NULL),
+    pump_signal(NULL),
+    kernel(sched->kernel),
+    sched(sched),
+    srv(srv),
+    log_lvl(log_debug),
+    min_stack_size(kernel->env->min_stack_size),
+    env(kernel->env),
+    // TODO: calculate a per scheduler name.
+    name("main")
+{
+    LOGPTR(this, "new dom", (uintptr_t)this);
+    isaac_init(kernel, &rctx);
+
+    if (!tls_initialized)
+        init_tls();
+}
+
+void
+rust_sched_loop::activate(rust_task *task) {
+    lock.must_have_lock();
+    task->ctx.next = &c_context;
+    DLOG(this, task, "descheduling...");
+    lock.unlock();
+    prepare_c_stack(task);
+    task->ctx.swap(c_context);
+    task->cleanup_after_turn();
+    unprepare_c_stack();
+    lock.lock();
+    DLOG(this, task, "task has returned");
+}
+
+void
+rust_sched_loop::log(rust_task* task, uint32_t level, char const *fmt, ...) {
+    char buf[BUF_BYTES];
+    va_list args;
+    va_start(args, fmt);
+    vsnprintf(buf, sizeof(buf), fmt, args);
+    _log.trace_ln(task, level, buf);
+    va_end(args);
+}
+
+void
+rust_sched_loop::fail() {
+    log(NULL, log_err, "domain %s @0x%" PRIxPTR " root task failed",
+        name, this);
+    kernel->fail();
+}
+
+void
+rust_sched_loop::kill_all_tasks() {
+    std::vector<rust_task*> all_tasks;
+
+    {
+        scoped_lock with(lock);
+
+        for (size_t i = 0; i < running_tasks.length(); i++) {
+            all_tasks.push_back(running_tasks[i]);
+        }
+
+        for (size_t i = 0; i < blocked_tasks.length(); i++) {
+            all_tasks.push_back(blocked_tasks[i]);
+        }
+    }
+
+    while (!all_tasks.empty()) {
+        rust_task *task = all_tasks.back();
+        all_tasks.pop_back();
+        // We don't want the failure of these tasks to propagate back
+        // to the kernel again since we're already failing everything
+        task->unsupervise();
+        task->kill();
+    }
+}
+
+size_t
+rust_sched_loop::number_of_live_tasks() {
+    return running_tasks.length() + blocked_tasks.length();
+}
+
+/**
+ * Delete any dead tasks.
+ */
+void
+rust_sched_loop::reap_dead_tasks() {
+    lock.must_have_lock();
+
+    if (dead_task == NULL) {
+        return;
+    }
+
+    // Dereferencing the task will probably cause it to be released
+    // from the scheduler, which may end up trying to take this lock
+    lock.unlock();
+
+    dead_task->delete_all_stacks();
+    // Deref the task, which may cause it to request us to release it
+    dead_task->deref();
+    dead_task = NULL;
+
+    lock.lock();
+}
+
+void
+rust_sched_loop::release_task(rust_task *task) {
+    // Nobody should have a ref to the task at this point
+    I(this, task->get_ref_count() == 0);
+    // Now delete the task, which will require using this thread's
+    // memory region.
+    delete task;
+    // Now release the task from the scheduler, which may trigger this
+    // thread to exit
+    sched->release_task();
+}
+
+/**
+ * Schedules a running task for execution. Only running tasks can be
+ * activated.  Blocked tasks have to be unblocked before they can be
+ * activated.
+ *
+ * Returns NULL if no tasks can be scheduled.
+ */
+rust_task *
+rust_sched_loop::schedule_task() {
+    lock.must_have_lock();
+    I(this, this);
+    // FIXME: in the face of failing tasks, this is not always right.
+    // I(this, n_live_tasks() > 0);
+    if (running_tasks.length() > 0) {
+        size_t k = isaac_rand(&rctx);
+        // Look around for a runnable task, starting at k.
+        for(size_t j = 0; j < running_tasks.length(); ++j) {
+            size_t  i = (j + k) % running_tasks.length();
+            return (rust_task *)running_tasks[i];
+        }
+    }
+    return NULL;
+}
+
+void
+rust_sched_loop::log_state() {
+    if (log_rt_task < log_debug) return;
+
+    if (!running_tasks.is_empty()) {
+        log(NULL, log_debug, "running tasks:");
+        for (size_t i = 0; i < running_tasks.length(); i++) {
+            log(NULL, log_debug, "\t task: %s @0x%" PRIxPTR,
+                running_tasks[i]->name,
+                running_tasks[i]);
+        }
+    }
+
+    if (!blocked_tasks.is_empty()) {
+        log(NULL, log_debug, "blocked tasks:");
+        for (size_t i = 0; i < blocked_tasks.length(); i++) {
+            log(NULL, log_debug, "\t task: %s @0x%" PRIxPTR
+                ", blocked on: 0x%" PRIxPTR " '%s'",
+                blocked_tasks[i]->name, blocked_tasks[i],
+                blocked_tasks[i]->get_cond(),
+                blocked_tasks[i]->get_cond_name());
+        }
+    }
+}
+
+void
+rust_sched_loop::on_pump_loop(rust_signal *signal) {
+    I(this, pump_signal == NULL);
+    I(this, signal != NULL);
+    pump_signal = signal;
+}
+
+void
+rust_sched_loop::pump_loop() {
+    I(this, pump_signal != NULL);
+    pump_signal->signal();
+}
+
+rust_sched_loop_state
+rust_sched_loop::run_single_turn() {
+    DLOG(this, task,
+         "scheduler %d resuming ...", id);
+
+    lock.lock();
+
+    if (!should_exit) {
+        A(this, dead_task == NULL,
+          "Tasks should only die after running");
+
+        DLOG(this, dom, "worker %d, number_of_live_tasks = %d",
+             id, number_of_live_tasks());
+
+        rust_task *scheduled_task = schedule_task();
+
+        if (scheduled_task == NULL) {
+            log_state();
+            DLOG(this, task,
+                 "all tasks are blocked, scheduler id %d yielding ...",
+                 id);
+
+            lock.unlock();
+            return sched_loop_state_block;
+        }
+
+        I(this, scheduled_task->running());
+
+        DLOG(this, task,
+             "activating task %s 0x%" PRIxPTR
+             ", state: %s",
+             scheduled_task->name,
+             (uintptr_t)scheduled_task,
+             state_name(scheduled_task->get_state()));
+
+        place_task_in_tls(scheduled_task);
+
+        DLOG(this, task,
+             "Running task %p on worker %d",
+             scheduled_task, id);
+        activate(scheduled_task);
+
+        DLOG(this, task,
+             "returned from task %s @0x%" PRIxPTR
+             " in state '%s', worker id=%d" PRIxPTR,
+             scheduled_task->name,
+             (uintptr_t)scheduled_task,
+             state_name(scheduled_task->get_state()),
+             id);
+
+        reap_dead_tasks();
+
+        lock.unlock();
+        return sched_loop_state_keep_going;
+    } else {
+        A(this, running_tasks.is_empty(), "Should have no running tasks");
+        A(this, blocked_tasks.is_empty(), "Should have no blocked tasks");
+        A(this, dead_task == NULL, "Should have no dead tasks");
+
+        DLOG(this, dom, "finished main-loop %d", id);
+
+        lock.unlock();
+
+        I(this, !extra_c_stack);
+        if (cached_c_stack) {
+            destroy_stack(kernel->region(), cached_c_stack);
+            cached_c_stack = NULL;
+        }
+
+        sched->release_task_thread();
+        return sched_loop_state_exit;
+    }
+}
+
+rust_task *
+rust_sched_loop::create_task(rust_task *spawner, const char *name) {
+    rust_task *task =
+        new (this->kernel, "rust_task")
+        rust_task (this, task_state_newborn,
+                   spawner, name, env->min_stack_size);
+    DLOG(this, task, "created task: " PTR ", spawner: %s, name: %s",
+                        task, spawner ? spawner->name : "null", name);
+
+    task->id = kernel->generate_task_id();
+    return task;
+}
+
+rust_task_list *
+rust_sched_loop::state_list(rust_task_state state) {
+    switch (state) {
+    case task_state_running:
+        return &running_tasks;
+    case task_state_blocked:
+        return &blocked_tasks;
+    default:
+        return NULL;
+    }
+}
+
+const char *
+rust_sched_loop::state_name(rust_task_state state) {
+    switch (state) {
+    case task_state_newborn:
+        return "newborn";
+    case task_state_running:
+        return "running";
+    case task_state_blocked:
+        return "blocked";
+    case task_state_dead:
+        return "dead";
+    default:
+        assert(false);
+        return "";
+    }
+}
+
+void
+rust_sched_loop::transition(rust_task *task,
+                             rust_task_state src, rust_task_state dst,
+                             rust_cond *cond, const char* cond_name) {
+    scoped_lock with(lock);
+    DLOG(this, task,
+         "task %s " PTR " state change '%s' -> '%s' while in '%s'",
+         name, (uintptr_t)this, state_name(src), state_name(dst),
+         state_name(task->get_state()));
+    I(this, task->get_state() == src);
+    rust_task_list *src_list = state_list(src);
+    if (src_list) {
+        src_list->remove(task);
+    }
+    rust_task_list *dst_list = state_list(dst);
+    if (dst_list) {
+        dst_list->append(task);
+    }
+    if (dst == task_state_dead) {
+        I(this, dead_task == NULL);
+        dead_task = task;
+    }
+    task->set_state(dst, cond, cond_name);
+
+    pump_loop();
+}
+
+#ifndef _WIN32
+void
+rust_sched_loop::init_tls() {
+    int result = pthread_key_create(&task_key, NULL);
+    assert(!result && "Couldn't create the TLS key!");
+    tls_initialized = true;
+}
+
+void
+rust_sched_loop::place_task_in_tls(rust_task *task) {
+    int result = pthread_setspecific(task_key, task);
+    assert(!result && "Couldn't place the task in TLS!");
+    task->record_stack_limit();
+}
+#else
+void
+rust_sched_loop::init_tls() {
+    task_key = TlsAlloc();
+    assert(task_key != TLS_OUT_OF_INDEXES && "Couldn't create the TLS key!");
+    tls_initialized = true;
+}
+
+void
+rust_sched_loop::place_task_in_tls(rust_task *task) {
+    BOOL result = TlsSetValue(task_key, task);
+    assert(result && "Couldn't place the task in TLS!");
+    task->record_stack_limit();
+}
+#endif
+
+void
+rust_sched_loop::exit() {
+    scoped_lock with(lock);
+    DLOG(this, dom, "Requesting exit for thread %d", id);
+    should_exit = true;
+    pump_loop();
+}
+
+// Before activating each task, make sure we have a C stack available.
+// It needs to be allocated ahead of time (while we're on our own
+// stack), because once we're on the Rust stack we won't have enough
+// room to do the allocation
+void
+rust_sched_loop::prepare_c_stack(rust_task *task) {
+    I(this, !extra_c_stack);
+    if (!cached_c_stack && !task->have_c_stack()) {
+        cached_c_stack = create_stack(kernel->region(), C_STACK_SIZE);
+    }
+}
+
+void
+rust_sched_loop::unprepare_c_stack() {
+    if (extra_c_stack) {
+        destroy_stack(kernel->region(), extra_c_stack);
+        extra_c_stack = NULL;
+    }
+}
+
+//
+// Local Variables:
+// mode: C++
+// fill-column: 70;
+// indent-tabs-mode: nil
+// c-basic-offset: 4
+// buffer-file-coding-system: utf-8-unix
+// End:
+//