summaryrefslogtreecommitdiff
path: root/vm_sync.c
diff options
context:
space:
mode:
authorKoichi Sasada <[email protected]>2023-04-10 10:53:13 +0900
committerKoichi Sasada <[email protected]>2023-10-12 14:47:01 +0900
commitbe1bbd5b7d40ad863ab35097765d3754726bbd54 (patch)
tree2995a0859bea1d6b2903dcd324f41869dbef14a1 /vm_sync.c
parent096ee0648e215915a3019c2cd68ba220d94eca12 (diff)
M:N thread scheduler for Ractors
This patch introduce M:N thread scheduler for Ractor system. In general, M:N thread scheduler employs N native threads (OS threads) to manage M user-level threads (Ruby threads in this case). On the Ruby interpreter, 1 native thread is provided for 1 Ractor and all Ruby threads are managed by the native thread. From Ruby 1.9, the interpreter uses 1:1 thread scheduler which means 1 Ruby thread has 1 native thread. M:N scheduler change this strategy. Because of compatibility issue (and stableness issue of the implementation) main Ractor doesn't use M:N scheduler on default. On the other words, threads on the main Ractor will be managed with 1:1 thread scheduler. There are additional settings by environment variables: `RUBY_MN_THREADS=1` enables M:N thread scheduler on the main ractor. Note that non-main ractors use the M:N scheduler without this configuration. With this configuration, single ractor applications run threads on M:1 thread scheduler (green threads, user-level threads). `RUBY_MAX_CPU=n` specifies maximum number of native threads for M:N scheduler (default: 8). This patch will be reverted soon if non-easy issues are found. [Bug #19842]
Diffstat (limited to 'vm_sync.c')
-rw-r--r--vm_sync.c112
1 files changed, 23 insertions, 89 deletions
diff --git a/vm_sync.c b/vm_sync.c
index 01c8505344..4bef232f20 100644
--- a/vm_sync.c
+++ b/vm_sync.c
@@ -5,7 +5,8 @@
#include "ractor_core.h"
#include "vm_debug.h"
-static bool vm_barrier_finish_p(rb_vm_t *vm);
+void rb_ractor_sched_barrier_start(rb_vm_t *vm, rb_ractor_t *cr);
+void rb_ractor_sched_barrier_join(rb_vm_t *vm, rb_ractor_t *cr);
static bool
vm_locked(rb_vm_t *vm)
@@ -52,56 +53,32 @@ vm_lock_enter(rb_ractor_t *cr, rb_vm_t *vm, bool locked, bool no_barrier, unsign
// locking ractor and acquire VM lock will cause deadlock
VM_ASSERT(cr->sync.locked_by != rb_ractor_self(cr));
#endif
-
// lock
rb_native_mutex_lock(&vm->ractor.sync.lock);
VM_ASSERT(vm->ractor.sync.lock_owner == NULL);
- vm->ractor.sync.lock_owner = cr;
+ VM_ASSERT(vm->ractor.sync.lock_rec == 0);
+
+#ifdef RUBY_THREAD_PTHREAD_H
+ if (!no_barrier &&
+ cr->threads.sched.running != NULL // ractor has running threads.
+ ) {
+ while (vm->ractor.sched.barrier_waiting) {
+ RUBY_DEBUG_LOG("barrier serial:%u", vm->ractor.sched.barrier_serial);
+ rb_ractor_sched_barrier_join(vm, cr);
+ }
+ }
+#else
if (!no_barrier) {
- // barrier
while (vm->ractor.sync.barrier_waiting) {
- unsigned int barrier_cnt = vm->ractor.sync.barrier_cnt;
- rb_thread_t *th = GET_THREAD();
- bool running;
-
- RB_VM_SAVE_MACHINE_CONTEXT(th);
-
- if (rb_ractor_status_p(cr, ractor_running)) {
- rb_vm_ractor_blocking_cnt_inc(vm, cr, __FILE__, __LINE__);
- running = true;
- }
- else {
- running = false;
- }
- VM_ASSERT(rb_ractor_status_p(cr, ractor_blocking));
-
- if (vm_barrier_finish_p(vm)) {
- RUBY_DEBUG_LOG("wakeup barrier owner");
- rb_native_cond_signal(&vm->ractor.sync.barrier_cond);
- }
- else {
- RUBY_DEBUG_LOG("wait for barrier finish");
- }
-
- // wait for restart
- while (barrier_cnt == vm->ractor.sync.barrier_cnt) {
- vm->ractor.sync.lock_owner = NULL;
- rb_native_cond_wait(&cr->barrier_wait_cond, &vm->ractor.sync.lock);
- VM_ASSERT(vm->ractor.sync.lock_owner == NULL);
- vm->ractor.sync.lock_owner = cr;
- }
-
- RUBY_DEBUG_LOG("barrier is released. Acquire vm_lock");
-
- if (running) {
- rb_vm_ractor_blocking_cnt_dec(vm, cr, __FILE__, __LINE__);
- }
+ rb_ractor_sched_barrier_join(vm, cr);
}
}
+#endif
VM_ASSERT(vm->ractor.sync.lock_rec == 0);
- VM_ASSERT(vm->ractor.sync.lock_owner == cr);
+ VM_ASSERT(vm->ractor.sync.lock_owner == NULL);
+ vm->ractor.sync.lock_owner = cr;
}
vm->ractor.sync.lock_rec++;
@@ -114,8 +91,9 @@ vm_lock_enter(rb_ractor_t *cr, rb_vm_t *vm, bool locked, bool no_barrier, unsign
static void
vm_lock_leave(rb_vm_t *vm, unsigned int *lev APPEND_LOCATION_ARGS)
{
- RUBY_DEBUG_LOG2(file, line, "rec:%u owner:%u", vm->ractor.sync.lock_rec,
- (unsigned int)rb_ractor_id(vm->ractor.sync.lock_owner));
+ RUBY_DEBUG_LOG2(file, line, "rec:%u owner:%u%s", vm->ractor.sync.lock_rec,
+ (unsigned int)rb_ractor_id(vm->ractor.sync.lock_owner),
+ vm->ractor.sync.lock_rec == 1 ? " (leave)" : "");
ASSERT_vm_locking();
VM_ASSERT(vm->ractor.sync.lock_rec > 0);
@@ -216,18 +194,6 @@ rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long ms
vm_cond_wait(vm, cond, msec);
}
-static bool
-vm_barrier_finish_p(rb_vm_t *vm)
-{
- RUBY_DEBUG_LOG("cnt:%u living:%u blocking:%u",
- vm->ractor.sync.barrier_cnt,
- vm->ractor.cnt,
- vm->ractor.blocking_cnt);
-
- VM_ASSERT(vm->ractor.blocking_cnt <= vm->ractor.cnt);
- return vm->ractor.blocking_cnt == vm->ractor.cnt;
-}
-
void
rb_vm_barrier(void)
{
@@ -239,45 +205,13 @@ rb_vm_barrier(void)
}
else {
rb_vm_t *vm = GET_VM();
- VM_ASSERT(vm->ractor.sync.barrier_waiting == false);
+ VM_ASSERT(!vm->ractor.sched.barrier_waiting);
ASSERT_vm_locking();
-
rb_ractor_t *cr = vm->ractor.sync.lock_owner;
VM_ASSERT(cr == GET_RACTOR());
VM_ASSERT(rb_ractor_status_p(cr, ractor_running));
- vm->ractor.sync.barrier_waiting = true;
-
- RUBY_DEBUG_LOG("barrier start. cnt:%u living:%u blocking:%u",
- vm->ractor.sync.barrier_cnt,
- vm->ractor.cnt,
- vm->ractor.blocking_cnt);
-
- rb_vm_ractor_blocking_cnt_inc(vm, cr, __FILE__, __LINE__);
-
- // send signal
- rb_ractor_t *r = 0;
- ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
- if (r != cr) {
- rb_ractor_vm_barrier_interrupt_running_thread(r);
- }
- }
-
- // wait
- while (!vm_barrier_finish_p(vm)) {
- rb_vm_cond_wait(vm, &vm->ractor.sync.barrier_cond);
- }
-
- RUBY_DEBUG_LOG("cnt:%u barrier success", vm->ractor.sync.barrier_cnt);
-
- rb_vm_ractor_blocking_cnt_dec(vm, cr, __FILE__, __LINE__);
-
- vm->ractor.sync.barrier_waiting = false;
- vm->ractor.sync.barrier_cnt++;
-
- ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
- rb_native_cond_signal(&r->barrier_wait_cond);
- }
+ rb_ractor_sched_barrier_start(vm, cr);
}
}