1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
|
// -*- c++ -*-
#ifndef RUST_KERNEL_H
#define RUST_KERNEL_H
#include <map>
#include <vector>
#include "memory_region.h"
#include "rust_log.h"
#include "rust_sched_reaper.h"
#include "util/hash_map.h"
struct rust_task_thread;
class rust_scheduler;
class rust_port;
typedef intptr_t rust_sched_id;
typedef intptr_t rust_task_id;
typedef intptr_t rust_port_id;
typedef std::map<rust_sched_id, rust_scheduler*> sched_map;
/**
* A global object shared by all thread domains. Most of the data structures
* in this class are synchronized since they are accessed from multiple
* threads.
*/
class rust_kernel {
memory_region _region;
rust_log _log;
// The next task id
rust_task_id max_task_id;
// Protects max_port_id and port_table
lock_and_signal port_lock;
// The next port id
rust_task_id max_port_id;
hash_map<rust_port_id, rust_port *> port_table;
lock_and_signal rval_lock;
int rval;
// Protects max_sched_id and sched_table, join_list
lock_and_signal sched_lock;
// The next scheduler id
rust_sched_id max_sched_id;
// A map from scheduler ids to schedulers. When this is empty
// the kernel terminates
sched_map sched_table;
// A list of scheduler ids that are ready to exit
std::vector<rust_sched_id> join_list;
rust_sched_reaper sched_reaper;
public:
struct rust_env *env;
rust_kernel(rust_env *env);
void log(uint32_t level, char const *fmt, ...);
void fatal(char const *fmt, ...);
void *malloc(size_t size, const char *tag);
void *realloc(void *mem, size_t size);
void free(void *mem);
memory_region *region() { return &_region; }
void fail();
rust_sched_id create_scheduler(size_t num_threads);
rust_scheduler* get_scheduler_by_id(rust_sched_id id);
// Called by a scheduler to indicate that it is terminating
void release_scheduler_id(rust_sched_id id);
void wait_for_schedulers();
int wait_for_exit();
#ifdef __WIN32__
void win32_require(LPCTSTR fn, BOOL ok);
#endif
rust_task_id generate_task_id();
rust_port_id register_port(rust_port *port);
rust_port *get_port_by_id(rust_port_id id);
void release_port_id(rust_port_id tid);
void set_exit_status(int code);
};
template <typename T> struct kernel_owned {
inline void *operator new(size_t size, rust_kernel *kernel,
const char *tag);
void operator delete(void *ptr) {
((T *)ptr)->kernel->free(ptr);
}
};
template <typename T>
inline void *kernel_owned<T>::operator new(size_t size, rust_kernel *kernel,
const char *tag) {
return kernel->malloc(size, tag);
}
#endif /* RUST_KERNEL_H */
|