From 62bc6b51136760b1d4f4b691aaa089bdb9bf0af5 Mon Sep 17 00:00:00 2001 From: Eric Holk Date: Sat, 23 Jul 2011 19:03:02 -0700 Subject: Per-thread scheduling. Closes #682. Tasks are spawned on a random thread. Currently they stay there, but we should add task migration and load balancing in the future. This should drammatically improve our task performance benchmarks. --- src/rt/circular_buffer.cpp | 84 +++++++++++++++++++++++----------------------- 1 file changed, 42 insertions(+), 42 deletions(-) (limited to 'src/rt/circular_buffer.cpp') diff --git a/src/rt/circular_buffer.cpp b/src/rt/circular_buffer.cpp index b645a08e563..aa0127d8c25 100644 --- a/src/rt/circular_buffer.cpp +++ b/src/rt/circular_buffer.cpp @@ -5,7 +5,6 @@ #include "rust_internal.h" circular_buffer::circular_buffer(rust_kernel *kernel, size_t unit_sz) : - sched(kernel->sched), kernel(kernel), unit_sz(unit_sz), _buffer_sz(initial_size()), @@ -13,26 +12,26 @@ circular_buffer::circular_buffer(rust_kernel *kernel, size_t unit_sz) : _unread(0), _buffer((uint8_t *)kernel->malloc(_buffer_sz, "circular_buffer")) { - A(sched, unit_sz, "Unit size must be larger than zero."); + // A(sched, unit_sz, "Unit size must be larger than zero."); - DLOG(sched, mem, "new circular_buffer(buffer_sz=%d, unread=%d)" - "-> circular_buffer=0x%" PRIxPTR, - _buffer_sz, _unread, this); + // DLOG(sched, mem, "new circular_buffer(buffer_sz=%d, unread=%d)" + // "-> circular_buffer=0x%" PRIxPTR, + // _buffer_sz, _unread, this); - A(sched, _buffer, "Failed to allocate buffer."); + // A(sched, _buffer, "Failed to allocate buffer."); } circular_buffer::~circular_buffer() { - DLOG(sched, mem, "~circular_buffer 0x%" PRIxPTR, this); - I(sched, _buffer); - W(sched, _unread == 0, - "freeing circular_buffer with %d unread bytes", _unread); + // DLOG(sched, mem, "~circular_buffer 0x%" PRIxPTR, this); + // I(sched, _buffer); + // W(sched, _unread == 0, + // "freeing circular_buffer with %d unread bytes", _unread); kernel->free(_buffer); } size_t circular_buffer::initial_size() { - I(sched, unit_sz > 0); + // I(sched, unit_sz > 0); return INITIAL_CIRCULAR_BUFFER_SIZE_IN_UNITS * unit_sz; } @@ -41,8 +40,8 @@ circular_buffer::initial_size() { */ void circular_buffer::transfer(void *dst) { - I(sched, dst); - I(sched, _unread <= _buffer_sz); + // I(sched, dst); + // I(sched, _unread <= _buffer_sz); uint8_t *ptr = (uint8_t *) dst; @@ -54,13 +53,13 @@ circular_buffer::transfer(void *dst) { } else { head_sz = _buffer_sz - _next; } - I(sched, _next + head_sz <= _buffer_sz); + // I(sched, _next + head_sz <= _buffer_sz); memcpy(ptr, _buffer + _next, head_sz); // Then copy any other items from the beginning of the buffer - I(sched, _unread >= head_sz); + // I(sched, _unread >= head_sz); size_t tail_sz = _unread - head_sz; - I(sched, head_sz + tail_sz <= _buffer_sz); + // I(sched, head_sz + tail_sz <= _buffer_sz); memcpy(ptr + head_sz, _buffer, tail_sz); } @@ -70,21 +69,21 @@ circular_buffer::transfer(void *dst) { */ void circular_buffer::enqueue(void *src) { - I(sched, src); - I(sched, _unread <= _buffer_sz); - I(sched, _buffer); + // I(sched, src); + // I(sched, _unread <= _buffer_sz); + // I(sched, _buffer); // Grow if necessary. if (_unread == _buffer_sz) { grow(); } - DLOG(sched, mem, "circular_buffer enqueue " - "unread: %d, next: %d, buffer_sz: %d, unit_sz: %d", - _unread, _next, _buffer_sz, unit_sz); + // DLOG(sched, mem, "circular_buffer enqueue " + // "unread: %d, next: %d, buffer_sz: %d, unit_sz: %d", + // _unread, _next, _buffer_sz, unit_sz); - I(sched, _unread < _buffer_sz); - I(sched, _unread + unit_sz <= _buffer_sz); + // I(sched, _unread < _buffer_sz); + // I(sched, _unread + unit_sz <= _buffer_sz); // Copy data size_t dst_idx = _next + _unread; @@ -92,15 +91,15 @@ circular_buffer::enqueue(void *src) { if (dst_idx >= _buffer_sz) { dst_idx -= _buffer_sz; - I(sched, _next >= unit_sz); - I(sched, dst_idx <= _next - unit_sz); + // I(sched, _next >= unit_sz); + // I(sched, dst_idx <= _next - unit_sz); } - I(sched, dst_idx + unit_sz <= _buffer_sz); + // I(sched, dst_idx + unit_sz <= _buffer_sz); memcpy(&_buffer[dst_idx], src, unit_sz); _unread += unit_sz; - DLOG(sched, mem, "circular_buffer pushed data at index: %d", dst_idx); + // DLOG(sched, mem, "circular_buffer pushed data at index: %d", dst_idx); } /** @@ -110,17 +109,17 @@ circular_buffer::enqueue(void *src) { */ void circular_buffer::dequeue(void *dst) { - I(sched, unit_sz > 0); - I(sched, _unread >= unit_sz); - I(sched, _unread <= _buffer_sz); - I(sched, _buffer); + // I(sched, unit_sz > 0); + // I(sched, _unread >= unit_sz); + // I(sched, _unread <= _buffer_sz); + // I(sched, _buffer); - DLOG(sched, mem, - "circular_buffer dequeue " - "unread: %d, next: %d, buffer_sz: %d, unit_sz: %d", - _unread, _next, _buffer_sz, unit_sz); + // DLOG(sched, mem, + // "circular_buffer dequeue " + // "unread: %d, next: %d, buffer_sz: %d, unit_sz: %d", + // _unread, _next, _buffer_sz, unit_sz); - I(sched, _next + unit_sz <= _buffer_sz); + // I(sched, _next + unit_sz <= _buffer_sz); if (dst != NULL) { memcpy(dst, &_buffer[_next], unit_sz); } @@ -140,8 +139,9 @@ circular_buffer::dequeue(void *dst) { void circular_buffer::grow() { size_t new_buffer_sz = _buffer_sz * 2; - I(sched, new_buffer_sz <= MAX_CIRCULAR_BUFFER_SIZE); - DLOG(sched, mem, "circular_buffer is growing to %d bytes", new_buffer_sz); + // I(sched, new_buffer_sz <= MAX_CIRCULAR_BUFFER_SIZE); + // DLOG(sched, mem, "circular_buffer is growing to %d bytes", + // new_buffer_sz); void *new_buffer = kernel->malloc(new_buffer_sz, "new circular_buffer (grow)"); transfer(new_buffer); @@ -154,9 +154,9 @@ circular_buffer::grow() { void circular_buffer::shrink() { size_t new_buffer_sz = _buffer_sz / 2; - I(sched, initial_size() <= new_buffer_sz); - DLOG(sched, mem, "circular_buffer is shrinking to %d bytes", - new_buffer_sz); + // I(sched, initial_size() <= new_buffer_sz); + // DLOG(sched, mem, "circular_buffer is shrinking to %d bytes", + // new_buffer_sz); void *new_buffer = kernel->malloc(new_buffer_sz, "new circular_buffer (shrink)"); transfer(new_buffer); -- cgit 1.4.1-3-g733a5