summaryrefslogtreecommitdiff
path: root/thread_pthread_mn.c
diff options
context:
space:
mode:
authorKoichi Sasada <[email protected]>2025-05-27 03:58:04 +0900
committerKoichi Sasada <[email protected]>2025-05-31 04:01:33 +0900
commitef2bb61018cd9ccb5b61a3d91911e04a773da4a7 (patch)
treefcf3685efc9d3efaa1a66236ad17d2a72b7c5144 /thread_pthread_mn.c
parentd2a1ad00cbba41e22c11abf2948c23cd8d68f565 (diff)
`Ractor::Port`
* Added `Ractor::Port` * `Ractor::Port#receive` (support multi-threads) * `Rcator::Port#close` * `Ractor::Port#closed?` * Added some methods * `Ractor#join` * `Ractor#value` * `Ractor#monitor` * `Ractor#unmonitor` * Removed some methods * `Ractor#take` * `Ractor.yield` * Change the spec * `Racotr.select` You can wait for multiple sequences of messages with `Ractor::Port`. ```ruby ports = 3.times.map{ Ractor::Port.new } ports.map.with_index do |port, ri| Ractor.new port,ri do |port, ri| 3.times{|i| port << "r#{ri}-#{i}"} end end p ports.each{|port| pp 3.times.map{port.receive}} ``` In this example, we use 3 ports, and 3 Ractors send messages to them respectively. We can receive a series of messages from each port. You can use `Ractor#value` to get the last value of a Ractor's block: ```ruby result = Ractor.new do heavy_task() end.value ``` You can wait for the termination of a Ractor with `Ractor#join` like this: ```ruby Ractor.new do some_task() end.join ``` `#value` and `#join` are similar to `Thread#value` and `Thread#join`. To implement `#join`, `Ractor#monitor` (and `Ractor#unmonitor`) is introduced. This commit changes `Ractor.select()` method. It now only accepts ports or Ractors, and returns when a port receives a message or a Ractor terminates. We removes `Ractor.yield` and `Ractor#take` because: * `Ractor::Port` supports most of similar use cases in a simpler manner. * Removing them significantly simplifies the code. We also change the internal thread scheduler code (thread_pthread.c): * During barrier synchronization, we keep the `ractor_sched` lock to avoid deadlocks. This lock is released by `rb_ractor_sched_barrier_end()` which is called at the end of operations that require the barrier. * fix potential deadlock issues by checking interrupts just before setting UBF. https://bugs.ruby-lang.org/issues/21262
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/13445
Diffstat (limited to 'thread_pthread_mn.c')
-rw-r--r--thread_pthread_mn.c12
1 files changed, 5 insertions, 7 deletions
diff --git a/thread_pthread_mn.c b/thread_pthread_mn.c
index cc0dae3b70..4a671cf3a1 100644
--- a/thread_pthread_mn.c
+++ b/thread_pthread_mn.c
@@ -72,7 +72,7 @@ thread_sched_wait_events(struct rb_thread_sched *sched, rb_thread_t *th, int fd,
RUBY_DEBUG_LOG("wait fd:%d", fd);
RB_VM_SAVE_MACHINE_CONTEXT(th);
- setup_ubf(th, ubf_event_waiting, (void *)th);
+ ubf_set(th, ubf_event_waiting, (void *)th);
RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
@@ -102,7 +102,7 @@ thread_sched_wait_events(struct rb_thread_sched *sched, rb_thread_t *th, int fd,
timer_thread_cancel_waiting(th);
}
- setup_ubf(th, NULL, NULL); // TODO: maybe it is already NULL?
+ ubf_clear(th); // TODO: maybe it is already NULL?
th->status = THREAD_RUNNABLE;
}
@@ -450,7 +450,7 @@ co_start(struct coroutine_context *from, struct coroutine_context *self)
// RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
- thread_sched_set_lock_owner(sched, th);
+ thread_sched_set_locked(sched, th);
thread_sched_add_running_thread(TH_SCHED(th), th);
thread_sched_unlock(sched, th);
{
@@ -475,13 +475,11 @@ co_start(struct coroutine_context *from, struct coroutine_context *self)
coroutine_transfer0(self, nt->nt_context, true);
}
else {
- rb_vm_t *vm = th->vm;
- bool has_ready_ractor = vm->ractor.sched.grq_cnt > 0; // at least this ractor is not queued
rb_thread_t *next_th = sched->running;
- if (!has_ready_ractor && next_th && !next_th->nt) {
+ if (next_th && !next_th->nt) {
// switch to the next thread
- thread_sched_set_lock_owner(sched, NULL);
+ thread_sched_set_unlocked(sched, NULL);
th->sched.finished = true;
thread_sched_switch0(th->sched.context, next_th, nt, true);
}