about summary refs log tree commit diff
path: root/src/rt/rust_task.cpp
diff options
context:
space:
mode:
authorPatrick Walton <pcwalton@mimiga.net>2013-04-05 11:55:43 -0700
committerPatrick Walton <pcwalton@mimiga.net>2013-04-19 11:53:34 -0700
commitca8e99fd78ab9b56c5bdc61027b032ad52c2ec8b (patch)
treee3fef1f74bbfaa9ee6846c67941b9f2d8dab0b78 /src/rt/rust_task.cpp
parent2dbe20a5610c3244feab0db5ab20ff062dc91085 (diff)
downloadrust-ca8e99fd78ab9b56c5bdc61027b032ad52c2ec8b.tar.gz
rust-ca8e99fd78ab9b56c5bdc61027b032ad52c2ec8b.zip
rt: Fix scalability problem with big stacks on 32 bit
Diffstat (limited to 'src/rt/rust_task.cpp')
-rw-r--r--src/rt/rust_task.cpp65
1 files changed, 62 insertions, 3 deletions
diff --git a/src/rt/rust_task.cpp b/src/rt/rust_task.cpp
index 021811ffa76..6db138b418e 100644
--- a/src/rt/rust_task.cpp
+++ b/src/rt/rust_task.cpp
@@ -53,7 +53,8 @@ rust_task::rust_task(rust_sched_loop *sched_loop, rust_task_state state,
     disallow_yield(0),
     c_stack(NULL),
     next_c_sp(0),
-    next_rust_sp(0)
+    next_rust_sp(0),
+    big_stack(NULL)
 {
     LOGPTR(sched_loop, "new task", (uintptr_t)this);
     DLOG(sched_loop, task, "sizeof(task) = %d (0x%x)",
@@ -556,13 +557,64 @@ rust_task::cleanup_after_turn() {
     // Delete any spare stack segments that were left
     // behind by calls to prev_stack
     assert(stk);
+
     while (stk->next) {
         stk_seg *new_next = stk->next->next;
-        free_stack(stk->next);
+
+        if (stk->next->is_big) {
+            assert (big_stack == stk->next);
+            sched_loop->return_big_stack(big_stack);
+            big_stack = NULL;
+        } else {
+            free_stack(stk->next);
+        }
+
         stk->next = new_next;
     }
 }
 
+// NB: Runs on the Rust stack. Returns true if we successfully allocated the big
+// stack and false otherwise.
+bool
+rust_task::new_big_stack() {
+    // If we have a cached big stack segment, use it.
+    if (big_stack) {
+        // Check to see if we're already on the big stack.
+        stk_seg *ss = stk;
+        while (ss != NULL) {
+            if (ss == big_stack)
+                return false;
+            ss = ss->prev;
+        }
+
+        // Unlink the big stack.
+        if (big_stack->next)
+            big_stack->next->prev = big_stack->prev;
+        if (big_stack->prev)
+            big_stack->prev->next = big_stack->next;
+    } else {
+        stk_seg *borrowed_big_stack = sched_loop->borrow_big_stack();
+        if (!borrowed_big_stack) {
+            dump_stacks();
+            abort();
+        } else {
+            big_stack = borrowed_big_stack;
+        }
+    }
+
+    big_stack->task = this;
+    big_stack->next = stk->next;
+    if (big_stack->next)
+        big_stack->next->prev = big_stack;
+    big_stack->prev = stk;
+    if (stk)
+        stk->next = big_stack;
+
+    stk = big_stack;
+
+    return true;
+}
+
 static bool
 sp_in_stk_seg(uintptr_t sp, stk_seg *stk) {
     // Not positive these bounds for sp are correct.  I think that the first
@@ -602,9 +654,16 @@ rust_task::delete_all_stacks() {
     assert(stk->next == NULL);
     while (stk != NULL) {
         stk_seg *prev = stk->prev;
-        free_stack(stk);
+
+        if (stk->is_big)
+            sched_loop->return_big_stack(stk);
+        else
+            free_stack(stk);
+
         stk = prev;
     }
+
+    big_stack = NULL;
 }
 
 /*