2 /**********************************************************************
8 Copyright (C) 2004-2007 Koichi Sasada
10 **********************************************************************/
12 #ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
14 #include "internal/gc.h"
15 #include "internal/sanitizers.h"
17 #ifdef HAVE_SYS_RESOURCE_H
18 #include <sys/resource.h>
20 #ifdef HAVE_THR_STKSEGMENT
23 #if defined(HAVE_FCNTL_H)
25 #elif defined(HAVE_SYS_FCNTL_H)
26 #include <sys/fcntl.h>
28 #ifdef HAVE_SYS_PRCTL_H
29 #include <sys/prctl.h>
31 #if defined(HAVE_SYS_TIME_H)
34 #if defined(__HAIKU__)
35 #include <kernel/OS.h>
38 #include <sys/syscall.h> /* for SYS_gettid */
44 # include <AvailabilityMacros.h>
47 #if defined(HAVE_SYS_EVENTFD_H) && defined(HAVE_EVENTFD)
48 # define USE_EVENTFD (1)
49 # include <sys/eventfd.h>
51 # define USE_EVENTFD (0)
54 #if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && \
55 defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && \
56 defined(HAVE_CLOCK_GETTIME)
57 static pthread_condattr_t condattr_mono
;
58 static pthread_condattr_t
*condattr_monotonic
= &condattr_mono
;
60 static const void *const condattr_monotonic
= NULL
;
65 #ifndef HAVE_SYS_EVENT_H
66 #define HAVE_SYS_EVENT_H 0
69 #ifndef HAVE_SYS_EPOLL_H
70 #define HAVE_SYS_EPOLL_H 0
72 // force setting for debug
73 // #undef HAVE_SYS_EPOLL_H
74 // #define HAVE_SYS_EPOLL_H 0
77 #ifndef USE_MN_THREADS
78 #if defined(__EMSCRIPTEN__) || defined(COROUTINE_PTHREAD_CONTEXT)
79 // on __EMSCRIPTEN__ provides epoll* declarations, but no implementations.
80 // on COROUTINE_PTHREAD_CONTEXT, it doesn't worth to use it.
81 #define USE_MN_THREADS 0
82 #elif HAVE_SYS_EPOLL_H
83 #include <sys/epoll.h>
84 #define USE_MN_THREADS 1
85 #elif HAVE_SYS_EVENT_H
86 #include <sys/event.h>
87 #define USE_MN_THREADS 1
89 #define USE_MN_THREADS 0
93 // native thread wrappers
95 #define NATIVE_MUTEX_LOCK_DEBUG 0
98 mutex_debug(const char *msg
, void *lock
)
100 if (NATIVE_MUTEX_LOCK_DEBUG
) {
102 static pthread_mutex_t dbglock
= PTHREAD_MUTEX_INITIALIZER
;
104 if ((r
= pthread_mutex_lock(&dbglock
)) != 0) {exit(EXIT_FAILURE
);}
105 fprintf(stdout
, "%s: %p\n", msg
, lock
);
106 if ((r
= pthread_mutex_unlock(&dbglock
)) != 0) {exit(EXIT_FAILURE
);}
111 rb_native_mutex_lock(pthread_mutex_t
*lock
)
114 mutex_debug("lock", lock
);
115 if ((r
= pthread_mutex_lock(lock
)) != 0) {
116 rb_bug_errno("pthread_mutex_lock", r
);
121 rb_native_mutex_unlock(pthread_mutex_t
*lock
)
124 mutex_debug("unlock", lock
);
125 if ((r
= pthread_mutex_unlock(lock
)) != 0) {
126 rb_bug_errno("pthread_mutex_unlock", r
);
131 rb_native_mutex_trylock(pthread_mutex_t
*lock
)
134 mutex_debug("trylock", lock
);
135 if ((r
= pthread_mutex_trylock(lock
)) != 0) {
140 rb_bug_errno("pthread_mutex_trylock", r
);
147 rb_native_mutex_initialize(pthread_mutex_t
*lock
)
149 int r
= pthread_mutex_init(lock
, 0);
150 mutex_debug("init", lock
);
152 rb_bug_errno("pthread_mutex_init", r
);
157 rb_native_mutex_destroy(pthread_mutex_t
*lock
)
159 int r
= pthread_mutex_destroy(lock
);
160 mutex_debug("destroy", lock
);
162 rb_bug_errno("pthread_mutex_destroy", r
);
167 rb_native_cond_initialize(rb_nativethread_cond_t
*cond
)
169 int r
= pthread_cond_init(cond
, condattr_monotonic
);
171 rb_bug_errno("pthread_cond_init", r
);
176 rb_native_cond_destroy(rb_nativethread_cond_t
*cond
)
178 int r
= pthread_cond_destroy(cond
);
180 rb_bug_errno("pthread_cond_destroy", r
);
185 * In OS X 10.7 (Lion), pthread_cond_signal and pthread_cond_broadcast return
186 * EAGAIN after retrying 8192 times. You can see them in the following page:
188 * http://www.opensource.apple.com/source/Libc/Libc-763.11/pthreads/pthread_cond.c
190 * The following rb_native_cond_signal and rb_native_cond_broadcast functions
191 * need to retrying until pthread functions don't return EAGAIN.
195 rb_native_cond_signal(rb_nativethread_cond_t
*cond
)
199 r
= pthread_cond_signal(cond
);
200 } while (r
== EAGAIN
);
202 rb_bug_errno("pthread_cond_signal", r
);
207 rb_native_cond_broadcast(rb_nativethread_cond_t
*cond
)
211 r
= pthread_cond_broadcast(cond
);
212 } while (r
== EAGAIN
);
214 rb_bug_errno("rb_native_cond_broadcast", r
);
219 rb_native_cond_wait(rb_nativethread_cond_t
*cond
, pthread_mutex_t
*mutex
)
221 int r
= pthread_cond_wait(cond
, mutex
);
223 rb_bug_errno("pthread_cond_wait", r
);
228 native_cond_timedwait(rb_nativethread_cond_t
*cond
, pthread_mutex_t
*mutex
, const rb_hrtime_t
*abs
)
234 * An old Linux may return EINTR. Even though POSIX says
235 * "These functions shall not return an error code of [EINTR]".
236 * http://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_cond_timedwait.html
237 * Let's hide it from arch generic code.
240 rb_hrtime2timespec(&ts
, abs
);
241 r
= pthread_cond_timedwait(cond
, mutex
, &ts
);
242 } while (r
== EINTR
);
244 if (r
!= 0 && r
!= ETIMEDOUT
) {
245 rb_bug_errno("pthread_cond_timedwait", r
);
252 native_cond_timeout(rb_nativethread_cond_t
*cond
, const rb_hrtime_t rel
)
254 if (condattr_monotonic
) {
255 return rb_hrtime_add(rb_hrtime_now(), rel
);
260 rb_timespec_now(&ts
);
261 return rb_hrtime_add(rb_timespec2hrtime(&ts
), rel
);
266 rb_native_cond_timedwait(rb_nativethread_cond_t
*cond
, pthread_mutex_t
*mutex
, unsigned long msec
)
268 rb_hrtime_t hrmsec
= native_cond_timeout(cond
, RB_HRTIME_PER_MSEC
* msec
);
269 native_cond_timedwait(cond
, mutex
, &hrmsec
);
274 static rb_internal_thread_event_hook_t
*rb_internal_thread_event_hooks
= NULL
;
275 static void rb_thread_execute_hooks(rb_event_flag_t event
, rb_thread_t
*th
);
279 event_name(rb_event_flag_t event
)
282 case RUBY_INTERNAL_THREAD_EVENT_STARTED
:
284 case RUBY_INTERNAL_THREAD_EVENT_READY
:
286 case RUBY_INTERNAL_THREAD_EVENT_RESUMED
:
288 case RUBY_INTERNAL_THREAD_EVENT_SUSPENDED
:
290 case RUBY_INTERNAL_THREAD_EVENT_EXITED
:
296 #define RB_INTERNAL_THREAD_HOOK(event, th) \
297 if (UNLIKELY(rb_internal_thread_event_hooks)) { \
298 fprintf(stderr, "[thread=%"PRIxVALUE"] %s in %s (%s:%d)\n", th->self, event_name(event), __func__, __FILE__, __LINE__); \
299 rb_thread_execute_hooks(event, th); \
302 #define RB_INTERNAL_THREAD_HOOK(event, th) if (UNLIKELY(rb_internal_thread_event_hooks)) { rb_thread_execute_hooks(event, th); }
305 static rb_serial_t current_fork_gen
= 1; /* We can't use GET_VM()->fork_gen */
307 #if defined(SIGVTALRM) && !defined(__EMSCRIPTEN__)
308 # define USE_UBF_LIST 1
311 static void threadptr_trap_interrupt(rb_thread_t
*);
313 #ifdef HAVE_SCHED_YIELD
314 #define native_thread_yield() (void)sched_yield()
316 #define native_thread_yield() ((void)0)
319 static void native_thread_dedicated_inc(rb_vm_t
*vm
, rb_ractor_t
*cr
, struct rb_native_thread
*nt
);
320 static void native_thread_dedicated_dec(rb_vm_t
*vm
, rb_ractor_t
*cr
, struct rb_native_thread
*nt
);
321 static void native_thread_assign(struct rb_native_thread
*nt
, rb_thread_t
*th
);
323 static void ractor_sched_enq(rb_vm_t
*vm
, rb_ractor_t
*r
);
324 static void timer_thread_wakeup(void);
325 static void timer_thread_wakeup_locked(rb_vm_t
*vm
);
326 static void timer_thread_wakeup_force(void);
327 static void thread_sched_switch(rb_thread_t
*cth
, rb_thread_t
*next_th
);
328 static void coroutine_transfer0(struct coroutine_context
*transfer_from
,
329 struct coroutine_context
*transfer_to
, bool to_dead
);
331 #define thread_sched_dump(s) thread_sched_dump_(__FILE__, __LINE__, s)
334 th_has_dedicated_nt(const rb_thread_t
*th
)
336 // TODO: th->has_dedicated_nt
337 return th
->nt
->dedicated
> 0;
340 RBIMPL_ATTR_MAYBE_UNUSED()
342 thread_sched_dump_(const char *file
, int line
, struct rb_thread_sched
*sched
)
344 fprintf(stderr
, "@%s:%d running:%d\n", file
, line
, sched
->running
? (int)sched
->running
->serial
: -1);
347 ccan_list_for_each(&sched
->readyq
, th
, sched
.node
.readyq
) {
348 i
++; if (i
>10) rb_bug("too many");
349 fprintf(stderr
, " ready:%d (%sNT:%d)\n", th
->serial
,
350 th
->nt
? (th
->nt
->dedicated
? "D" : "S") : "x",
351 th
->nt
? (int)th
->nt
->serial
: -1);
355 #define ractor_sched_dump(s) ractor_sched_dump_(__FILE__, __LINE__, s)
357 RBIMPL_ATTR_MAYBE_UNUSED()
359 ractor_sched_dump_(const char *file
, int line
, rb_vm_t
*vm
)
363 fprintf(stderr
, "ractor_sched_dump %s:%d\n", file
, line
);
366 ccan_list_for_each(&vm
->ractor
.sched
.grq
, r
, threads
.sched
.grq_node
) {
368 if (i
>10) rb_bug("!!");
369 fprintf(stderr
, " %d ready:%d\n", i
, rb_ractor_id(r
));
373 #define thread_sched_lock(a, b) thread_sched_lock_(a, b, __FILE__, __LINE__)
374 #define thread_sched_unlock(a, b) thread_sched_unlock_(a, b, __FILE__, __LINE__)
377 thread_sched_lock_(struct rb_thread_sched
*sched
, rb_thread_t
*th
, const char *file
, int line
)
379 rb_native_mutex_lock(&sched
->lock_
);
382 RUBY_DEBUG_LOG2(file
, line
, "th:%u prev_owner:%u", rb_th_serial(th
), rb_th_serial(sched
->lock_owner
));
383 VM_ASSERT(sched
->lock_owner
== NULL
);
384 sched
->lock_owner
= th
;
386 RUBY_DEBUG_LOG2(file
, line
, "th:%u", rb_th_serial(th
));
391 thread_sched_unlock_(struct rb_thread_sched
*sched
, rb_thread_t
*th
, const char *file
, int line
)
393 RUBY_DEBUG_LOG2(file
, line
, "th:%u", rb_th_serial(th
));
396 VM_ASSERT(sched
->lock_owner
== th
);
397 sched
->lock_owner
= NULL
;
400 rb_native_mutex_unlock(&sched
->lock_
);
404 thread_sched_set_lock_owner(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
406 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th
));
408 #if VM_CHECK_MODE > 0
409 sched
->lock_owner
= th
;
414 ASSERT_thread_sched_locked(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
416 VM_ASSERT(rb_native_mutex_trylock(&sched
->lock_
) == EBUSY
);
420 VM_ASSERT(sched
->lock_owner
== th
);
423 VM_ASSERT(sched
->lock_owner
!= NULL
);
428 #define ractor_sched_lock(a, b) ractor_sched_lock_(a, b, __FILE__, __LINE__)
429 #define ractor_sched_unlock(a, b) ractor_sched_unlock_(a, b, __FILE__, __LINE__)
431 RBIMPL_ATTR_MAYBE_UNUSED()
433 rb_ractor_serial(const rb_ractor_t
*r
)
436 return rb_ractor_id(r
);
444 ractor_sched_set_locked(rb_vm_t
*vm
, rb_ractor_t
*cr
)
446 #if VM_CHECK_MODE > 0
447 VM_ASSERT(vm
->ractor
.sched
.lock_owner
== NULL
);
448 VM_ASSERT(vm
->ractor
.sched
.locked
== false);
450 vm
->ractor
.sched
.lock_owner
= cr
;
451 vm
->ractor
.sched
.locked
= true;
456 ractor_sched_set_unlocked(rb_vm_t
*vm
, rb_ractor_t
*cr
)
458 #if VM_CHECK_MODE > 0
459 VM_ASSERT(vm
->ractor
.sched
.locked
);
460 VM_ASSERT(vm
->ractor
.sched
.lock_owner
== cr
);
462 vm
->ractor
.sched
.locked
= false;
463 vm
->ractor
.sched
.lock_owner
= NULL
;
468 ractor_sched_lock_(rb_vm_t
*vm
, rb_ractor_t
*cr
, const char *file
, int line
)
470 rb_native_mutex_lock(&vm
->ractor
.sched
.lock
);
473 RUBY_DEBUG_LOG2(file
, line
, "cr:%u prev_owner:%u", rb_ractor_serial(cr
), rb_ractor_serial(vm
->ractor
.sched
.lock_owner
));
475 RUBY_DEBUG_LOG2(file
, line
, "cr:%u", rb_ractor_serial(cr
));
478 ractor_sched_set_locked(vm
, cr
);
482 ractor_sched_unlock_(rb_vm_t
*vm
, rb_ractor_t
*cr
, const char *file
, int line
)
484 RUBY_DEBUG_LOG2(file
, line
, "cr:%u", rb_ractor_serial(cr
));
486 ractor_sched_set_unlocked(vm
, cr
);
487 rb_native_mutex_unlock(&vm
->ractor
.sched
.lock
);
491 ASSERT_ractor_sched_locked(rb_vm_t
*vm
, rb_ractor_t
*cr
)
493 VM_ASSERT(rb_native_mutex_trylock(&vm
->ractor
.sched
.lock
) == EBUSY
);
494 VM_ASSERT(vm
->ractor
.sched
.locked
);
495 VM_ASSERT(cr
== NULL
|| vm
->ractor
.sched
.lock_owner
== cr
);
498 RBIMPL_ATTR_MAYBE_UNUSED()
500 ractor_sched_running_threads_contain_p(rb_vm_t
*vm
, rb_thread_t
*th
)
503 ccan_list_for_each(&vm
->ractor
.sched
.running_threads
, rth
, sched
.node
.running_threads
) {
504 if (rth
== th
) return true;
509 RBIMPL_ATTR_MAYBE_UNUSED()
511 ractor_sched_running_threads_size(rb_vm_t
*vm
)
515 ccan_list_for_each(&vm
->ractor
.sched
.running_threads
, th
, sched
.node
.running_threads
) {
521 RBIMPL_ATTR_MAYBE_UNUSED()
523 ractor_sched_timeslice_threads_size(rb_vm_t
*vm
)
527 ccan_list_for_each(&vm
->ractor
.sched
.timeslice_threads
, th
, sched
.node
.timeslice_threads
) {
533 RBIMPL_ATTR_MAYBE_UNUSED()
535 ractor_sched_timeslice_threads_contain_p(rb_vm_t
*vm
, rb_thread_t
*th
)
538 ccan_list_for_each(&vm
->ractor
.sched
.timeslice_threads
, rth
, sched
.node
.timeslice_threads
) {
539 if (rth
== th
) return true;
544 static void ractor_sched_barrier_join_signal_locked(rb_vm_t
*vm
);
545 static void ractor_sched_barrier_join_wait_locked(rb_vm_t
*vm
, rb_thread_t
*th
);
547 // setup timeslice signals by the timer thread.
549 thread_sched_setup_running_threads(struct rb_thread_sched
*sched
, rb_ractor_t
*cr
, rb_vm_t
*vm
,
550 rb_thread_t
*add_th
, rb_thread_t
*del_th
, rb_thread_t
*add_timeslice_th
)
552 #if USE_RUBY_DEBUG_LOG
553 unsigned int prev_running_cnt
= vm
->ractor
.sched
.running_cnt
;
556 rb_thread_t
*del_timeslice_th
;
558 if (del_th
&& sched
->is_running_timeslice
) {
559 del_timeslice_th
= del_th
;
560 sched
->is_running_timeslice
= false;
563 del_timeslice_th
= NULL
;
566 RUBY_DEBUG_LOG("+:%u -:%u +ts:%u -ts:%u",
567 rb_th_serial(add_th
), rb_th_serial(del_th
),
568 rb_th_serial(add_timeslice_th
), rb_th_serial(del_timeslice_th
));
570 ractor_sched_lock(vm
, cr
);
572 // update running_threads
574 VM_ASSERT(ractor_sched_running_threads_contain_p(vm
, del_th
));
575 VM_ASSERT(del_timeslice_th
!= NULL
||
576 !ractor_sched_timeslice_threads_contain_p(vm
, del_th
));
578 ccan_list_del_init(&del_th
->sched
.node
.running_threads
);
579 vm
->ractor
.sched
.running_cnt
--;
581 if (UNLIKELY(vm
->ractor
.sched
.barrier_waiting
)) {
582 ractor_sched_barrier_join_signal_locked(vm
);
584 sched
->is_running
= false;
588 while (UNLIKELY(vm
->ractor
.sched
.barrier_waiting
)) {
589 RUBY_DEBUG_LOG("barrier-wait");
591 ractor_sched_barrier_join_signal_locked(vm
);
592 ractor_sched_barrier_join_wait_locked(vm
, add_th
);
595 VM_ASSERT(!ractor_sched_running_threads_contain_p(vm
, add_th
));
596 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(vm
, add_th
));
598 ccan_list_add(&vm
->ractor
.sched
.running_threads
, &add_th
->sched
.node
.running_threads
);
599 vm
->ractor
.sched
.running_cnt
++;
600 sched
->is_running
= true;
601 VM_ASSERT(!vm
->ractor
.sched
.barrier_waiting
);
604 if (add_timeslice_th
) {
605 // update timeslice threads
606 int was_empty
= ccan_list_empty(&vm
->ractor
.sched
.timeslice_threads
);
607 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(vm
, add_timeslice_th
));
608 ccan_list_add(&vm
->ractor
.sched
.timeslice_threads
, &add_timeslice_th
->sched
.node
.timeslice_threads
);
609 sched
->is_running_timeslice
= true;
611 timer_thread_wakeup_locked(vm
);
615 if (del_timeslice_th
) {
616 VM_ASSERT(ractor_sched_timeslice_threads_contain_p(vm
, del_timeslice_th
));
617 ccan_list_del_init(&del_timeslice_th
->sched
.node
.timeslice_threads
);
620 VM_ASSERT(ractor_sched_running_threads_size(vm
) == vm
->ractor
.sched
.running_cnt
);
621 VM_ASSERT(ractor_sched_timeslice_threads_size(vm
) <= vm
->ractor
.sched
.running_cnt
);
623 ractor_sched_unlock(vm
, cr
);
625 if (add_th
&& !del_th
&& UNLIKELY(vm
->ractor
.sync
.lock_owner
!= NULL
)) {
626 // it can be after barrier synchronization by another ractor
627 rb_thread_t
*lock_owner
= NULL
;
629 lock_owner
= sched
->lock_owner
;
631 thread_sched_unlock(sched
, lock_owner
);
636 thread_sched_lock(sched
, lock_owner
);
639 //RUBY_DEBUG_LOG("+:%u -:%u +ts:%u -ts:%u run:%u->%u",
640 // rb_th_serial(add_th), rb_th_serial(del_th),
641 // rb_th_serial(add_timeslice_th), rb_th_serial(del_timeslice_th),
642 RUBY_DEBUG_LOG("run:%u->%u", prev_running_cnt
, vm
->ractor
.sched
.running_cnt
);
646 thread_sched_add_running_thread(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
648 ASSERT_thread_sched_locked(sched
, th
);
649 VM_ASSERT(sched
->running
== th
);
651 rb_vm_t
*vm
= th
->vm
;
652 thread_sched_setup_running_threads(sched
, th
->ractor
, vm
, th
, NULL
, ccan_list_empty(&sched
->readyq
) ? NULL
: th
);
656 thread_sched_del_running_thread(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
658 ASSERT_thread_sched_locked(sched
, th
);
660 rb_vm_t
*vm
= th
->vm
;
661 thread_sched_setup_running_threads(sched
, th
->ractor
, vm
, NULL
, th
, NULL
);
665 rb_add_running_thread(rb_thread_t
*th
)
667 struct rb_thread_sched
*sched
= TH_SCHED(th
);
669 thread_sched_lock(sched
, th
);
671 thread_sched_add_running_thread(sched
, th
);
673 thread_sched_unlock(sched
, th
);
677 rb_del_running_thread(rb_thread_t
*th
)
679 struct rb_thread_sched
*sched
= TH_SCHED(th
);
681 thread_sched_lock(sched
, th
);
683 thread_sched_del_running_thread(sched
, th
);
685 thread_sched_unlock(sched
, th
);
688 // setup current or next running thread
689 // sched->running should be set only on this function.
691 // if th is NULL, there is no running threads.
693 thread_sched_set_running(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
695 RUBY_DEBUG_LOG("th:%u->th:%u", rb_th_serial(sched
->running
), rb_th_serial(th
));
696 VM_ASSERT(sched
->running
!= th
);
701 RBIMPL_ATTR_MAYBE_UNUSED()
703 thread_sched_readyq_contain_p(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
706 ccan_list_for_each(&sched
->readyq
, rth
, sched
.node
.readyq
) {
707 if (rth
== th
) return true;
712 // deque thread from the ready queue.
713 // if the ready queue is empty, return NULL.
715 // return deque'ed running thread (or NULL).
717 thread_sched_deq(struct rb_thread_sched
*sched
)
719 ASSERT_thread_sched_locked(sched
, NULL
);
720 rb_thread_t
*next_th
;
722 VM_ASSERT(sched
->running
!= NULL
);
724 if (ccan_list_empty(&sched
->readyq
)) {
728 next_th
= ccan_list_pop(&sched
->readyq
, rb_thread_t
, sched
.node
.readyq
);
730 VM_ASSERT(sched
->readyq_cnt
> 0);
732 ccan_list_node_init(&next_th
->sched
.node
.readyq
);
735 RUBY_DEBUG_LOG("next_th:%u readyq_cnt:%d", rb_th_serial(next_th
), sched
->readyq_cnt
);
740 // enqueue ready thread to the ready queue.
742 thread_sched_enq(struct rb_thread_sched
*sched
, rb_thread_t
*ready_th
)
744 ASSERT_thread_sched_locked(sched
, NULL
);
745 RUBY_DEBUG_LOG("ready_th:%u readyq_cnt:%d", rb_th_serial(ready_th
), sched
->readyq_cnt
);
747 VM_ASSERT(sched
->running
!= NULL
);
748 VM_ASSERT(!thread_sched_readyq_contain_p(sched
, ready_th
));
750 if (sched
->is_running
) {
751 if (ccan_list_empty(&sched
->readyq
)) {
752 // add sched->running to timeslice
753 thread_sched_setup_running_threads(sched
, ready_th
->ractor
, ready_th
->vm
, NULL
, NULL
, sched
->running
);
757 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(ready_th
->vm
, sched
->running
));
760 ccan_list_add_tail(&sched
->readyq
, &ready_th
->sched
.node
.readyq
);
767 thread_sched_wakeup_running_thread(struct rb_thread_sched
*sched
, rb_thread_t
*next_th
, bool will_switch
)
769 ASSERT_thread_sched_locked(sched
, NULL
);
770 VM_ASSERT(sched
->running
== next_th
);
774 if (th_has_dedicated_nt(next_th
)) {
775 RUBY_DEBUG_LOG("pinning th:%u", next_th
->serial
);
776 rb_native_cond_signal(&next_th
->nt
->cond
.readyq
);
780 RUBY_DEBUG_LOG("th:%u is already running.", next_th
->serial
);
785 RUBY_DEBUG_LOG("th:%u (do nothing)", rb_th_serial(next_th
));
788 RUBY_DEBUG_LOG("th:%u (enq)", rb_th_serial(next_th
));
789 ractor_sched_enq(next_th
->vm
, next_th
->ractor
);
794 RUBY_DEBUG_LOG("no waiting threads%s", "");
798 // waiting -> ready (locked)
800 thread_sched_to_ready_common(struct rb_thread_sched
*sched
, rb_thread_t
*th
, bool wakeup
, bool will_switch
)
802 RUBY_DEBUG_LOG("th:%u running:%u redyq_cnt:%d", rb_th_serial(th
), rb_th_serial(sched
->running
), sched
->readyq_cnt
);
804 VM_ASSERT(sched
->running
!= th
);
805 VM_ASSERT(!thread_sched_readyq_contain_p(sched
, th
));
806 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_READY
, th
);
808 if (sched
->running
== NULL
) {
809 thread_sched_set_running(sched
, th
);
810 if (wakeup
) thread_sched_wakeup_running_thread(sched
, th
, will_switch
);
813 thread_sched_enq(sched
, th
);
819 // `th` had became "waiting" state by `thread_sched_to_waiting`
820 // and `thread_sched_to_ready` enqueue `th` to the thread ready queue.
821 RBIMPL_ATTR_MAYBE_UNUSED()
823 thread_sched_to_ready(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
825 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th
));
827 thread_sched_lock(sched
, th
);
829 thread_sched_to_ready_common(sched
, th
, true, false);
831 thread_sched_unlock(sched
, th
);
834 // wait until sched->running is `th`.
836 thread_sched_wait_running_turn(struct rb_thread_sched
*sched
, rb_thread_t
*th
, bool can_direct_transfer
)
838 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th
));
840 ASSERT_thread_sched_locked(sched
, th
);
841 VM_ASSERT(th
== rb_ec_thread_ptr(rb_current_ec_noinline()));
843 if (th
!= sched
->running
) {
844 // already deleted from running threads
845 // VM_ASSERT(!ractor_sched_running_threads_contain_p(th->vm, th)); // need locking
847 // wait for execution right
848 rb_thread_t
*next_th
;
849 while((next_th
= sched
->running
) != th
) {
850 if (th_has_dedicated_nt(th
)) {
851 RUBY_DEBUG_LOG("(nt) sleep th:%u running:%u", rb_th_serial(th
), rb_th_serial(sched
->running
));
853 thread_sched_set_lock_owner(sched
, NULL
);
855 RUBY_DEBUG_LOG("nt:%d cond:%p", th
->nt
->serial
, &th
->nt
->cond
.readyq
);
856 rb_native_cond_wait(&th
->nt
->cond
.readyq
, &sched
->lock_
);
858 thread_sched_set_lock_owner(sched
, th
);
860 RUBY_DEBUG_LOG("(nt) wakeup %s", sched
->running
== th
? "success" : "failed");
861 if (th
== sched
->running
) {
862 rb_ractor_thread_switch(th
->ractor
, th
);
866 // search another ready thread
867 if (can_direct_transfer
&&
868 (next_th
= sched
->running
) != NULL
&&
869 !next_th
->nt
// next_th is running or has dedicated nt
872 RUBY_DEBUG_LOG("th:%u->%u (direct)", rb_th_serial(th
), rb_th_serial(next_th
));
874 thread_sched_set_lock_owner(sched
, NULL
);
876 rb_ractor_set_current_ec(th
->ractor
, NULL
);
877 thread_sched_switch(th
, next_th
);
879 thread_sched_set_lock_owner(sched
, th
);
882 // search another ready ractor
883 struct rb_native_thread
*nt
= th
->nt
;
884 native_thread_assign(NULL
, th
);
886 RUBY_DEBUG_LOG("th:%u->%u (ractor scheduling)", rb_th_serial(th
), rb_th_serial(next_th
));
888 thread_sched_set_lock_owner(sched
, NULL
);
890 rb_ractor_set_current_ec(th
->ractor
, NULL
);
891 coroutine_transfer0(th
->sched
.context
, nt
->nt_context
, false);
893 thread_sched_set_lock_owner(sched
, th
);
896 VM_ASSERT(rb_current_ec_noinline() == th
->ec
);
900 VM_ASSERT(th
->nt
!= NULL
);
901 VM_ASSERT(rb_current_ec_noinline() == th
->ec
);
902 VM_ASSERT(th
->sched
.waiting_reason
.flags
== thread_sched_waiting_none
);
904 // add th to running threads
905 thread_sched_add_running_thread(sched
, th
);
908 // VM_ASSERT(ractor_sched_running_threads_contain_p(th->vm, th)); need locking
909 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_RESUMED
, th
);
912 // waiting -> ready -> running (locked)
914 thread_sched_to_running_common(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
916 RUBY_DEBUG_LOG("th:%u dedicated:%d", rb_th_serial(th
), th_has_dedicated_nt(th
));
918 VM_ASSERT(sched
->running
!= th
);
919 VM_ASSERT(th_has_dedicated_nt(th
));
920 VM_ASSERT(GET_THREAD() == th
);
922 native_thread_dedicated_dec(th
->vm
, th
->ractor
, th
->nt
);
925 thread_sched_to_ready_common(sched
, th
, false, false);
927 if (sched
->running
== th
) {
928 thread_sched_add_running_thread(sched
, th
);
931 // TODO: check SNT number
932 thread_sched_wait_running_turn(sched
, th
, false);
935 // waiting -> ready -> running
937 // `th` had been waiting by `thread_sched_to_waiting()`
938 // and run a dedicated task (like waitpid and so on).
939 // After the dedicated task, this function is called
940 // to join a normal thread-scheduling.
942 thread_sched_to_running(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
944 thread_sched_lock(sched
, th
);
946 thread_sched_to_running_common(sched
, th
);
948 thread_sched_unlock(sched
, th
);
951 // resume a next thread in the thread ready queue.
953 // deque next running thread from the ready thread queue and
954 // resume this thread if available.
956 // If the next therad has a dedicated native thraed, simply signal to resume.
957 // Otherwise, make the ractor ready and other nt will run the ractor and the thread.
959 thread_sched_wakeup_next_thread(struct rb_thread_sched
*sched
, rb_thread_t
*th
, bool will_switch
)
961 ASSERT_thread_sched_locked(sched
, th
);
963 VM_ASSERT(sched
->running
== th
);
964 VM_ASSERT(sched
->running
->nt
!= NULL
);
966 rb_thread_t
*next_th
= thread_sched_deq(sched
);
968 RUBY_DEBUG_LOG("next_th:%u", rb_th_serial(next_th
));
969 VM_ASSERT(th
!= next_th
);
971 thread_sched_set_running(sched
, next_th
);
972 VM_ASSERT(next_th
== sched
->running
);
973 thread_sched_wakeup_running_thread(sched
, next_th
, will_switch
);
976 thread_sched_del_running_thread(sched
, th
);
980 // running -> waiting
983 // th will run dedicated task.
984 // run another ready thread.
987 // run another ready thread.
989 thread_sched_to_waiting_common0(struct rb_thread_sched
*sched
, rb_thread_t
*th
, bool to_dead
)
991 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED
, th
);
993 if (!to_dead
) native_thread_dedicated_inc(th
->vm
, th
->ractor
, th
->nt
);
995 RUBY_DEBUG_LOG("%sth:%u", to_dead
? "to_dead " : "", rb_th_serial(th
));
997 bool can_switch
= to_dead
? !th_has_dedicated_nt(th
) : false;
998 thread_sched_wakeup_next_thread(sched
, th
, can_switch
);
1001 // running -> dead (locked)
1003 thread_sched_to_dead_common(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
1005 RUBY_DEBUG_LOG("dedicated:%d", th
->nt
->dedicated
);
1006 thread_sched_to_waiting_common0(sched
, th
, true);
1007 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_EXITED
, th
);
1012 thread_sched_to_dead(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
1014 thread_sched_lock(sched
, th
);
1016 thread_sched_to_dead_common(sched
, th
);
1018 thread_sched_unlock(sched
, th
);
1021 // running -> waiting (locked)
1023 // This thread will run dedicated task (th->nt->dedicated++).
1025 thread_sched_to_waiting_common(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
1027 RUBY_DEBUG_LOG("dedicated:%d", th
->nt
->dedicated
);
1028 thread_sched_to_waiting_common0(sched
, th
, false);
1031 // running -> waiting
1033 // This thread will run a dedicated task.
1035 thread_sched_to_waiting(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
1037 thread_sched_lock(sched
, th
);
1039 thread_sched_to_waiting_common(sched
, th
);
1041 thread_sched_unlock(sched
, th
);
1044 // mini utility func
1046 setup_ubf(rb_thread_t
*th
, rb_unblock_function_t
*func
, void *arg
)
1048 rb_native_mutex_lock(&th
->interrupt_lock
);
1050 th
->unblock
.func
= func
;
1051 th
->unblock
.arg
= arg
;
1053 rb_native_mutex_unlock(&th
->interrupt_lock
);
1057 ubf_waiting(void *ptr
)
1059 rb_thread_t
*th
= (rb_thread_t
*)ptr
;
1060 struct rb_thread_sched
*sched
= TH_SCHED(th
);
1062 // only once. it is safe because th->interrupt_lock is already acquired.
1063 th
->unblock
.func
= NULL
;
1064 th
->unblock
.arg
= NULL
;
1066 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th
));
1068 thread_sched_lock(sched
, th
);
1070 if (sched
->running
== th
) {
1071 // not sleeping yet.
1074 thread_sched_to_ready_common(sched
, th
, true, false);
1077 thread_sched_unlock(sched
, th
);
1080 // running -> waiting
1082 // This thread will sleep until other thread wakeup the thread.
1084 thread_sched_to_waiting_until_wakeup(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
1086 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th
));
1088 RB_VM_SAVE_MACHINE_CONTEXT(th
);
1089 setup_ubf(th
, ubf_waiting
, (void *)th
);
1091 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED
, th
);
1093 thread_sched_lock(sched
, th
);
1095 if (!RUBY_VM_INTERRUPTED(th
->ec
)) {
1096 bool can_direct_transfer
= !th_has_dedicated_nt(th
);
1097 thread_sched_wakeup_next_thread(sched
, th
, can_direct_transfer
);
1098 thread_sched_wait_running_turn(sched
, th
, can_direct_transfer
);
1101 RUBY_DEBUG_LOG("th:%u interrupted", rb_th_serial(th
));
1104 thread_sched_unlock(sched
, th
);
1106 setup_ubf(th
, NULL
, NULL
);
1109 // run another thread in the ready queue.
1110 // continue to run if there are no ready threads.
1112 thread_sched_yield(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
1114 RUBY_DEBUG_LOG("th:%d sched->readyq_cnt:%d", (int)th
->serial
, sched
->readyq_cnt
);
1116 thread_sched_lock(sched
, th
);
1118 if (!ccan_list_empty(&sched
->readyq
)) {
1119 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED
, th
);
1120 thread_sched_wakeup_next_thread(sched
, th
, !th_has_dedicated_nt(th
));
1121 bool can_direct_transfer
= !th_has_dedicated_nt(th
);
1122 thread_sched_to_ready_common(sched
, th
, false, can_direct_transfer
);
1123 thread_sched_wait_running_turn(sched
, th
, can_direct_transfer
);
1126 VM_ASSERT(sched
->readyq_cnt
== 0);
1129 thread_sched_unlock(sched
, th
);
1133 rb_thread_sched_init(struct rb_thread_sched
*sched
, bool atfork
)
1135 rb_native_mutex_initialize(&sched
->lock_
);
1138 sched
->lock_owner
= NULL
;
1141 ccan_list_head_init(&sched
->readyq
);
1142 sched
->readyq_cnt
= 0;
1145 if (!atfork
) sched
->enable_mn_threads
= true; // MN is enabled on Ractors
1150 coroutine_transfer0(struct coroutine_context
*transfer_from
, struct coroutine_context
*transfer_to
, bool to_dead
)
1152 #ifdef RUBY_ASAN_ENABLED
1153 void **fake_stack
= to_dead
? NULL
: &transfer_from
->fake_stack
;
1154 __sanitizer_start_switch_fiber(fake_stack
, transfer_to
->stack_base
, transfer_to
->stack_size
);
1157 RBIMPL_ATTR_MAYBE_UNUSED()
1158 struct coroutine_context
*returning_from
= coroutine_transfer(transfer_from
, transfer_to
);
1160 /* if to_dead was passed, the caller is promising that this coroutine is finished and it should
1161 * never be resumed! */
1162 VM_ASSERT(!to_dead
);
1163 #ifdef RUBY_ASAN_ENABLED
1164 __sanitizer_finish_switch_fiber(transfer_from
->fake_stack
,
1165 (const void**)&returning_from
->stack_base
, &returning_from
->stack_size
);
1171 thread_sched_switch0(struct coroutine_context
*current_cont
, rb_thread_t
*next_th
, struct rb_native_thread
*nt
, bool to_dead
)
1173 VM_ASSERT(!nt
->dedicated
);
1174 VM_ASSERT(next_th
->nt
== NULL
);
1176 RUBY_DEBUG_LOG("next_th:%u", rb_th_serial(next_th
));
1178 ruby_thread_set_native(next_th
);
1179 native_thread_assign(nt
, next_th
);
1181 coroutine_transfer0(current_cont
, next_th
->sched
.context
, to_dead
);
1185 thread_sched_switch(rb_thread_t
*cth
, rb_thread_t
*next_th
)
1187 struct rb_native_thread
*nt
= cth
->nt
;
1188 native_thread_assign(NULL
, cth
);
1189 RUBY_DEBUG_LOG("th:%u->%u on nt:%d", rb_th_serial(cth
), rb_th_serial(next_th
), nt
->serial
);
1190 thread_sched_switch0(cth
->sched
.context
, next_th
, nt
, cth
->status
== THREAD_KILLED
);
1193 #if VM_CHECK_MODE > 0
1194 RBIMPL_ATTR_MAYBE_UNUSED()
1196 grq_size(rb_vm_t
*vm
, rb_ractor_t
*cr
)
1198 ASSERT_ractor_sched_locked(vm
, cr
);
1200 rb_ractor_t
*r
, *prev_r
= NULL
;
1203 ccan_list_for_each(&vm
->ractor
.sched
.grq
, r
, threads
.sched
.grq_node
) {
1206 VM_ASSERT(r
!= prev_r
);
1214 ractor_sched_enq(rb_vm_t
*vm
, rb_ractor_t
*r
)
1216 struct rb_thread_sched
*sched
= &r
->threads
.sched
;
1217 rb_ractor_t
*cr
= NULL
; // timer thread can call this function
1219 VM_ASSERT(sched
->running
!= NULL
);
1220 VM_ASSERT(sched
->running
->nt
== NULL
);
1222 ractor_sched_lock(vm
, cr
);
1224 #if VM_CHECK_MODE > 0
1225 // check if grq contains r
1227 ccan_list_for_each(&vm
->ractor
.sched
.grq
, tr
, threads
.sched
.grq_node
) {
1232 ccan_list_add_tail(&vm
->ractor
.sched
.grq
, &sched
->grq_node
);
1233 vm
->ractor
.sched
.grq_cnt
++;
1234 VM_ASSERT(grq_size(vm
, cr
) == vm
->ractor
.sched
.grq_cnt
);
1236 RUBY_DEBUG_LOG("r:%u th:%u grq_cnt:%u", rb_ractor_id(r
), rb_th_serial(sched
->running
), vm
->ractor
.sched
.grq_cnt
);
1238 rb_native_cond_signal(&vm
->ractor
.sched
.cond
);
1240 // ractor_sched_dump(vm);
1242 ractor_sched_unlock(vm
, cr
);
1246 #ifndef SNT_KEEP_SECONDS
1247 #define SNT_KEEP_SECONDS 0
1251 // make at least MINIMUM_SNT snts for debug.
1252 #define MINIMUM_SNT 0
1255 static rb_ractor_t
*
1256 ractor_sched_deq(rb_vm_t
*vm
, rb_ractor_t
*cr
)
1260 ractor_sched_lock(vm
, cr
);
1262 RUBY_DEBUG_LOG("empty? %d", ccan_list_empty(&vm
->ractor
.sched
.grq
));
1263 // ractor_sched_dump(vm);
1265 VM_ASSERT(rb_current_execution_context(false) == NULL
);
1266 VM_ASSERT(grq_size(vm
, cr
) == vm
->ractor
.sched
.grq_cnt
);
1268 while ((r
= ccan_list_pop(&vm
->ractor
.sched
.grq
, rb_ractor_t
, threads
.sched
.grq_node
)) == NULL
) {
1269 RUBY_DEBUG_LOG("wait grq_cnt:%d", (int)vm
->ractor
.sched
.grq_cnt
);
1271 #if SNT_KEEP_SECONDS > 0
1272 rb_hrtime_t abs
= rb_hrtime_add(rb_hrtime_now(), RB_HRTIME_PER_SEC
* SNT_KEEP_SECONDS
);
1273 if (native_cond_timedwait(&vm
->ractor
.sched
.cond
, &vm
->ractor
.sched
.lock
, &abs
) == ETIMEDOUT
) {
1274 RUBY_DEBUG_LOG("timeout, grq_cnt:%d", (int)vm
->ractor
.sched
.grq_cnt
);
1275 VM_ASSERT(r
== NULL
);
1276 vm
->ractor
.sched
.snt_cnt
--;
1277 vm
->ractor
.sched
.running_cnt
--;
1281 RUBY_DEBUG_LOG("wakeup grq_cnt:%d", (int)vm
->ractor
.sched
.grq_cnt
);
1284 ractor_sched_set_unlocked(vm
, cr
);
1285 rb_native_cond_wait(&vm
->ractor
.sched
.cond
, &vm
->ractor
.sched
.lock
);
1286 ractor_sched_set_locked(vm
, cr
);
1288 RUBY_DEBUG_LOG("wakeup grq_cnt:%d", (int)vm
->ractor
.sched
.grq_cnt
);
1292 VM_ASSERT(rb_current_execution_context(false) == NULL
);
1295 VM_ASSERT(vm
->ractor
.sched
.grq_cnt
> 0);
1296 vm
->ractor
.sched
.grq_cnt
--;
1297 RUBY_DEBUG_LOG("r:%d grq_cnt:%u", (int)rb_ractor_id(r
), vm
->ractor
.sched
.grq_cnt
);
1300 VM_ASSERT(SNT_KEEP_SECONDS
> 0);
1304 ractor_sched_unlock(vm
, cr
);
1309 void rb_ractor_lock_self(rb_ractor_t
*r
);
1310 void rb_ractor_unlock_self(rb_ractor_t
*r
);
1313 rb_ractor_sched_sleep(rb_execution_context_t
*ec
, rb_ractor_t
*cr
, rb_unblock_function_t
*ubf
)
1315 // ractor lock of cr is acquired
1316 // r is sleeping status
1317 rb_thread_t
* volatile th
= rb_ec_thread_ptr(ec
);
1318 struct rb_thread_sched
*sched
= TH_SCHED(th
);
1319 cr
->sync
.wait
.waiting_thread
= th
; // TODO: multi-thread
1321 setup_ubf(th
, ubf
, (void *)cr
);
1323 thread_sched_lock(sched
, th
);
1325 rb_ractor_unlock_self(cr
);
1327 if (RUBY_VM_INTERRUPTED(th
->ec
)) {
1328 RUBY_DEBUG_LOG("interrupted");
1330 else if (cr
->sync
.wait
.wakeup_status
!= wakeup_none
) {
1331 RUBY_DEBUG_LOG("awaken:%d", (int)cr
->sync
.wait
.wakeup_status
);
1335 RB_VM_SAVE_MACHINE_CONTEXT(th
);
1336 th
->status
= THREAD_STOPPED_FOREVER
;
1338 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED
, th
);
1340 bool can_direct_transfer
= !th_has_dedicated_nt(th
);
1341 thread_sched_wakeup_next_thread(sched
, th
, can_direct_transfer
);
1342 thread_sched_wait_running_turn(sched
, th
, can_direct_transfer
);
1343 th
->status
= THREAD_RUNNABLE
;
1348 thread_sched_unlock(sched
, th
);
1350 setup_ubf(th
, NULL
, NULL
);
1352 rb_ractor_lock_self(cr
);
1353 cr
->sync
.wait
.waiting_thread
= NULL
;
1357 rb_ractor_sched_wakeup(rb_ractor_t
*r
)
1359 rb_thread_t
*r_th
= r
->sync
.wait
.waiting_thread
;
1360 // ractor lock of r is acquired
1361 struct rb_thread_sched
*sched
= TH_SCHED(r_th
);
1363 VM_ASSERT(r
->sync
.wait
.wakeup_status
!= 0);
1365 thread_sched_lock(sched
, r_th
);
1367 if (r_th
->status
== THREAD_STOPPED_FOREVER
) {
1368 thread_sched_to_ready_common(sched
, r_th
, true, false);
1371 thread_sched_unlock(sched
, r_th
);
1375 ractor_sched_barrier_completed_p(rb_vm_t
*vm
)
1377 RUBY_DEBUG_LOG("run:%u wait:%u", vm
->ractor
.sched
.running_cnt
, vm
->ractor
.sched
.barrier_waiting_cnt
);
1378 VM_ASSERT(vm
->ractor
.sched
.running_cnt
- 1 >= vm
->ractor
.sched
.barrier_waiting_cnt
);
1379 return (vm
->ractor
.sched
.running_cnt
- vm
->ractor
.sched
.barrier_waiting_cnt
) == 1;
1383 rb_ractor_sched_barrier_start(rb_vm_t
*vm
, rb_ractor_t
*cr
)
1385 VM_ASSERT(cr
== GET_RACTOR());
1386 VM_ASSERT(vm
->ractor
.sync
.lock_owner
== cr
); // VM is locked
1387 VM_ASSERT(!vm
->ractor
.sched
.barrier_waiting
);
1388 VM_ASSERT(vm
->ractor
.sched
.barrier_waiting_cnt
== 0);
1390 RUBY_DEBUG_LOG("start serial:%u", vm
->ractor
.sched
.barrier_serial
);
1392 unsigned int lock_rec
;
1394 ractor_sched_lock(vm
, cr
);
1396 vm
->ractor
.sched
.barrier_waiting
= true;
1399 lock_rec
= vm
->ractor
.sync
.lock_rec
;
1400 vm
->ractor
.sync
.lock_rec
= 0;
1401 vm
->ractor
.sync
.lock_owner
= NULL
;
1402 rb_native_mutex_unlock(&vm
->ractor
.sync
.lock
);
1404 // interrupts all running threads
1406 ccan_list_for_each(&vm
->ractor
.sched
.running_threads
, ith
, sched
.node
.running_threads
) {
1407 if (ith
->ractor
!= cr
) {
1408 RUBY_DEBUG_LOG("barrier int:%u", rb_th_serial(ith
));
1409 RUBY_VM_SET_VM_BARRIER_INTERRUPT(ith
->ec
);
1413 // wait for other ractors
1414 while (!ractor_sched_barrier_completed_p(vm
)) {
1415 ractor_sched_set_unlocked(vm
, cr
);
1416 rb_native_cond_wait(&vm
->ractor
.sched
.barrier_complete_cond
, &vm
->ractor
.sched
.lock
);
1417 ractor_sched_set_locked(vm
, cr
);
1421 ractor_sched_unlock(vm
, cr
);
1424 rb_native_mutex_lock(&vm
->ractor
.sync
.lock
);
1425 vm
->ractor
.sync
.lock_rec
= lock_rec
;
1426 vm
->ractor
.sync
.lock_owner
= cr
;
1428 RUBY_DEBUG_LOG("completed seirial:%u", vm
->ractor
.sched
.barrier_serial
);
1430 ractor_sched_lock(vm
, cr
);
1432 vm
->ractor
.sched
.barrier_waiting
= false;
1433 vm
->ractor
.sched
.barrier_serial
++;
1434 vm
->ractor
.sched
.barrier_waiting_cnt
= 0;
1435 rb_native_cond_broadcast(&vm
->ractor
.sched
.barrier_release_cond
);
1437 ractor_sched_unlock(vm
, cr
);
1441 ractor_sched_barrier_join_signal_locked(rb_vm_t
*vm
)
1443 if (ractor_sched_barrier_completed_p(vm
)) {
1444 rb_native_cond_signal(&vm
->ractor
.sched
.barrier_complete_cond
);
1449 ractor_sched_barrier_join_wait_locked(rb_vm_t
*vm
, rb_thread_t
*th
)
1451 VM_ASSERT(vm
->ractor
.sched
.barrier_waiting
);
1453 unsigned int barrier_serial
= vm
->ractor
.sched
.barrier_serial
;
1455 while (vm
->ractor
.sched
.barrier_serial
== barrier_serial
) {
1456 RUBY_DEBUG_LOG("sleep serial:%u", barrier_serial
);
1457 RB_VM_SAVE_MACHINE_CONTEXT(th
);
1459 rb_ractor_t
*cr
= th
->ractor
;
1460 ractor_sched_set_unlocked(vm
, cr
);
1461 rb_native_cond_wait(&vm
->ractor
.sched
.barrier_release_cond
, &vm
->ractor
.sched
.lock
);
1462 ractor_sched_set_locked(vm
, cr
);
1464 RUBY_DEBUG_LOG("wakeup serial:%u", barrier_serial
);
1469 rb_ractor_sched_barrier_join(rb_vm_t
*vm
, rb_ractor_t
*cr
)
1471 VM_ASSERT(cr
->threads
.sched
.running
!= NULL
); // running ractor
1472 VM_ASSERT(cr
== GET_RACTOR());
1473 VM_ASSERT(vm
->ractor
.sync
.lock_owner
== NULL
); // VM is locked, but owner == NULL
1474 VM_ASSERT(vm
->ractor
.sched
.barrier_waiting
); // VM needs barrier sync
1476 #if USE_RUBY_DEBUG_LOG || VM_CHECK_MODE > 0
1477 unsigned int barrier_serial
= vm
->ractor
.sched
.barrier_serial
;
1480 RUBY_DEBUG_LOG("join");
1482 rb_native_mutex_unlock(&vm
->ractor
.sync
.lock
);
1484 VM_ASSERT(vm
->ractor
.sched
.barrier_waiting
); // VM needs barrier sync
1485 VM_ASSERT(vm
->ractor
.sched
.barrier_serial
== barrier_serial
);
1487 ractor_sched_lock(vm
, cr
);
1490 vm
->ractor
.sched
.barrier_waiting_cnt
++;
1491 RUBY_DEBUG_LOG("waiting_cnt:%u serial:%u", vm
->ractor
.sched
.barrier_waiting_cnt
, barrier_serial
);
1493 ractor_sched_barrier_join_signal_locked(vm
);
1494 ractor_sched_barrier_join_wait_locked(vm
, cr
->threads
.sched
.running
);
1496 ractor_sched_unlock(vm
, cr
);
1499 rb_native_mutex_lock(&vm
->ractor
.sync
.lock
);
1506 static void clear_thread_cache_altstack(void);
1509 rb_thread_sched_destroy(struct rb_thread_sched
*sched
)
1512 * only called once at VM shutdown (not atfork), another thread
1513 * may still grab vm->gvl.lock when calling gvl_release at
1514 * the end of thread_start_func_2
1517 rb_native_mutex_destroy(&sched
->lock
);
1519 clear_thread_cache_altstack();
1523 #ifdef RB_THREAD_T_HAS_NATIVE_ID
1525 get_native_thread_id(void)
1528 return (int)syscall(SYS_gettid
);
1529 #elif defined(__FreeBSD__)
1530 return pthread_getthreadid_np();
1535 #if defined(HAVE_WORKING_FORK)
1537 thread_sched_atfork(struct rb_thread_sched
*sched
)
1540 rb_thread_sched_init(sched
, true);
1541 rb_thread_t
*th
= GET_THREAD();
1542 rb_vm_t
*vm
= GET_VM();
1544 if (th_has_dedicated_nt(th
)) {
1545 vm
->ractor
.sched
.snt_cnt
= 0;
1548 vm
->ractor
.sched
.snt_cnt
= 1;
1550 vm
->ractor
.sched
.running_cnt
= 0;
1552 rb_native_mutex_initialize(&vm
->ractor
.sched
.lock
);
1553 #if VM_CHECK_MODE > 0
1554 vm
->ractor
.sched
.lock_owner
= NULL
;
1555 vm
->ractor
.sched
.locked
= false;
1558 // rb_native_cond_destroy(&vm->ractor.sched.cond);
1559 rb_native_cond_initialize(&vm
->ractor
.sched
.cond
);
1560 rb_native_cond_initialize(&vm
->ractor
.sched
.barrier_complete_cond
);
1561 rb_native_cond_initialize(&vm
->ractor
.sched
.barrier_release_cond
);
1563 ccan_list_head_init(&vm
->ractor
.sched
.grq
);
1564 ccan_list_head_init(&vm
->ractor
.sched
.timeslice_threads
);
1565 ccan_list_head_init(&vm
->ractor
.sched
.running_threads
);
1567 VM_ASSERT(sched
->is_running
);
1568 sched
->is_running_timeslice
= false;
1570 if (sched
->running
!= th
) {
1571 thread_sched_to_running(sched
, th
);
1574 thread_sched_setup_running_threads(sched
, th
->ractor
, vm
, th
, NULL
, NULL
);
1577 #ifdef RB_THREAD_T_HAS_NATIVE_ID
1579 th
->nt
->tid
= get_native_thread_id();
1586 #ifdef RB_THREAD_LOCAL_SPECIFIER
1587 static RB_THREAD_LOCAL_SPECIFIER rb_thread_t
*ruby_native_thread
;
1589 static pthread_key_t ruby_native_thread_key
;
1596 // This function can be called from signal handler
1597 // RUBY_DEBUG_LOG("i:%d", i);
1601 ruby_thread_from_native(void)
1603 #ifdef RB_THREAD_LOCAL_SPECIFIER
1604 return ruby_native_thread
;
1606 return pthread_getspecific(ruby_native_thread_key
);
1611 ruby_thread_set_native(rb_thread_t
*th
)
1615 ccan_list_node_init(&th
->sched
.node
.ubf
);
1622 rb_ractor_set_current_ec(th
->ractor
, th
->ec
);
1624 #ifdef RB_THREAD_LOCAL_SPECIFIER
1625 ruby_native_thread
= th
;
1628 return pthread_setspecific(ruby_native_thread_key
, th
) == 0;
1632 static void native_thread_setup(struct rb_native_thread
*nt
);
1633 static void native_thread_setup_on_thread(struct rb_native_thread
*nt
);
1636 Init_native_thread(rb_thread_t
*main_th
)
1638 #if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK)
1639 if (condattr_monotonic
) {
1640 int r
= pthread_condattr_init(condattr_monotonic
);
1642 r
= pthread_condattr_setclock(condattr_monotonic
, CLOCK_MONOTONIC
);
1644 if (r
) condattr_monotonic
= NULL
;
1648 #ifndef RB_THREAD_LOCAL_SPECIFIER
1649 if (pthread_key_create(&ruby_native_thread_key
, 0) == EAGAIN
) {
1650 rb_bug("pthread_key_create failed (ruby_native_thread_key)");
1652 if (pthread_key_create(&ruby_current_ec_key
, 0) == EAGAIN
) {
1653 rb_bug("pthread_key_create failed (ruby_current_ec_key)");
1656 ruby_posix_signal(SIGVTALRM
, null_func
);
1659 rb_vm_t
*vm
= main_th
->vm
;
1660 rb_native_mutex_initialize(&vm
->ractor
.sched
.lock
);
1661 rb_native_cond_initialize(&vm
->ractor
.sched
.cond
);
1662 rb_native_cond_initialize(&vm
->ractor
.sched
.barrier_complete_cond
);
1663 rb_native_cond_initialize(&vm
->ractor
.sched
.barrier_release_cond
);
1665 ccan_list_head_init(&vm
->ractor
.sched
.grq
);
1666 ccan_list_head_init(&vm
->ractor
.sched
.timeslice_threads
);
1667 ccan_list_head_init(&vm
->ractor
.sched
.running_threads
);
1669 // setup main thread
1670 main_th
->nt
->thread_id
= pthread_self();
1671 main_th
->nt
->serial
= 1;
1672 #ifdef RUBY_NT_SERIAL
1675 ruby_thread_set_native(main_th
);
1676 native_thread_setup(main_th
->nt
);
1677 native_thread_setup_on_thread(main_th
->nt
);
1679 TH_SCHED(main_th
)->running
= main_th
;
1680 main_th
->has_dedicated_nt
= 1;
1682 thread_sched_setup_running_threads(TH_SCHED(main_th
), main_th
->ractor
, vm
, main_th
, NULL
, NULL
);
1685 main_th
->nt
->dedicated
= 1;
1686 main_th
->nt
->vm
= vm
;
1689 vm
->ractor
.sched
.dnt_cnt
= 1;
1692 extern int ruby_mn_threads_enabled
;
1695 ruby_mn_threads_params(void)
1697 rb_vm_t
*vm
= GET_VM();
1698 rb_ractor_t
*main_ractor
= GET_RACTOR();
1700 const char *mn_threads_cstr
= getenv("RUBY_MN_THREADS");
1701 bool enable_mn_threads
= false;
1703 if (USE_MN_THREADS
&& mn_threads_cstr
&& (enable_mn_threads
= atoi(mn_threads_cstr
) > 0)) {
1705 ruby_mn_threads_enabled
= 1;
1707 main_ractor
->threads
.sched
.enable_mn_threads
= enable_mn_threads
;
1709 const char *max_cpu_cstr
= getenv("RUBY_MAX_CPU");
1710 const int default_max_cpu
= 8; // TODO: CPU num?
1711 int max_cpu
= default_max_cpu
;
1713 if (USE_MN_THREADS
&& max_cpu_cstr
) {
1714 int given_max_cpu
= atoi(max_cpu_cstr
);
1715 if (given_max_cpu
> 0) {
1716 max_cpu
= given_max_cpu
;
1720 vm
->ractor
.sched
.max_cpu
= max_cpu
;
1724 native_thread_dedicated_inc(rb_vm_t
*vm
, rb_ractor_t
*cr
, struct rb_native_thread
*nt
)
1726 RUBY_DEBUG_LOG("nt:%d %d->%d", nt
->serial
, nt
->dedicated
, nt
->dedicated
+ 1);
1728 if (nt
->dedicated
== 0) {
1729 ractor_sched_lock(vm
, cr
);
1731 vm
->ractor
.sched
.snt_cnt
--;
1732 vm
->ractor
.sched
.dnt_cnt
++;
1734 ractor_sched_unlock(vm
, cr
);
1741 native_thread_dedicated_dec(rb_vm_t
*vm
, rb_ractor_t
*cr
, struct rb_native_thread
*nt
)
1743 RUBY_DEBUG_LOG("nt:%d %d->%d", nt
->serial
, nt
->dedicated
, nt
->dedicated
- 1);
1744 VM_ASSERT(nt
->dedicated
> 0);
1747 if (nt
->dedicated
== 0) {
1748 ractor_sched_lock(vm
, cr
);
1750 nt
->vm
->ractor
.sched
.snt_cnt
++;
1751 nt
->vm
->ractor
.sched
.dnt_cnt
--;
1753 ractor_sched_unlock(vm
, cr
);
1758 native_thread_assign(struct rb_native_thread
*nt
, rb_thread_t
*th
)
1760 #if USE_RUBY_DEBUG_LOG
1763 RUBY_DEBUG_LOG("th:%d nt:%d->%d", (int)th
->serial
, (int)th
->nt
->serial
, (int)nt
->serial
);
1766 RUBY_DEBUG_LOG("th:%d nt:NULL->%d", (int)th
->serial
, (int)nt
->serial
);
1771 RUBY_DEBUG_LOG("th:%d nt:%d->NULL", (int)th
->serial
, (int)th
->nt
->serial
);
1774 RUBY_DEBUG_LOG("th:%d nt:NULL->NULL", (int)th
->serial
);
1783 native_thread_destroy(struct rb_native_thread
*nt
)
1786 rb_native_cond_destroy(&nt
->cond
.readyq
);
1788 if (&nt
->cond
.readyq
!= &nt
->cond
.intr
) {
1789 rb_native_cond_destroy(&nt
->cond
.intr
);
1792 RB_ALTSTACK_FREE(nt
->altstack
);
1793 ruby_xfree(nt
->nt_context
);
1798 #if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
1799 #define STACKADDR_AVAILABLE 1
1800 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
1801 #define STACKADDR_AVAILABLE 1
1802 #undef MAINSTACKADDR_AVAILABLE
1803 #define MAINSTACKADDR_AVAILABLE 1
1804 void *pthread_get_stackaddr_np(pthread_t
);
1805 size_t pthread_get_stacksize_np(pthread_t
);
1806 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
1807 #define STACKADDR_AVAILABLE 1
1808 #elif defined HAVE_PTHREAD_GETTHRDS_NP
1809 #define STACKADDR_AVAILABLE 1
1810 #elif defined __HAIKU__
1811 #define STACKADDR_AVAILABLE 1
1814 #ifndef MAINSTACKADDR_AVAILABLE
1815 # ifdef STACKADDR_AVAILABLE
1816 # define MAINSTACKADDR_AVAILABLE 1
1818 # define MAINSTACKADDR_AVAILABLE 0
1821 #if MAINSTACKADDR_AVAILABLE && !defined(get_main_stack)
1822 # define get_main_stack(addr, size) get_stack(addr, size)
1825 #ifdef STACKADDR_AVAILABLE
1827 * Get the initial address and size of current thread's stack
1830 get_stack(void **addr
, size_t *size
)
1832 #define CHECK_ERR(expr) \
1833 {int err = (expr); if (err) return err;}
1834 #ifdef HAVE_PTHREAD_GETATTR_NP /* Linux */
1835 pthread_attr_t attr
;
1837 STACK_GROW_DIR_DETECTION
;
1838 CHECK_ERR(pthread_getattr_np(pthread_self(), &attr
));
1839 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
1840 CHECK_ERR(pthread_attr_getstack(&attr
, addr
, size
));
1841 STACK_DIR_UPPER((void)0, (void)(*addr
= (char *)*addr
+ *size
));
1843 CHECK_ERR(pthread_attr_getstackaddr(&attr
, addr
));
1844 CHECK_ERR(pthread_attr_getstacksize(&attr
, size
));
1846 # ifdef HAVE_PTHREAD_ATTR_GETGUARDSIZE
1847 CHECK_ERR(pthread_attr_getguardsize(&attr
, &guard
));
1849 guard
= getpagesize();
1852 pthread_attr_destroy(&attr
);
1853 #elif defined HAVE_PTHREAD_ATTR_GET_NP /* FreeBSD, DragonFly BSD, NetBSD */
1854 pthread_attr_t attr
;
1855 CHECK_ERR(pthread_attr_init(&attr
));
1856 CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr
));
1857 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
1858 CHECK_ERR(pthread_attr_getstack(&attr
, addr
, size
));
1860 CHECK_ERR(pthread_attr_getstackaddr(&attr
, addr
));
1861 CHECK_ERR(pthread_attr_getstacksize(&attr
, size
));
1863 STACK_DIR_UPPER((void)0, (void)(*addr
= (char *)*addr
+ *size
));
1864 pthread_attr_destroy(&attr
);
1865 #elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP) /* MacOS X */
1866 pthread_t th
= pthread_self();
1867 *addr
= pthread_get_stackaddr_np(th
);
1868 *size
= pthread_get_stacksize_np(th
);
1869 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
1871 # if defined HAVE_THR_STKSEGMENT /* Solaris */
1872 CHECK_ERR(thr_stksegment(&stk
));
1873 # else /* OpenBSD */
1874 CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk
));
1877 *size
= stk
.ss_size
;
1878 #elif defined HAVE_PTHREAD_GETTHRDS_NP /* AIX */
1879 pthread_t th
= pthread_self();
1880 struct __pthrdsinfo thinfo
;
1882 int regsiz
=sizeof(reg
);
1883 CHECK_ERR(pthread_getthrds_np(&th
, PTHRDSINFO_QUERY_ALL
,
1884 &thinfo
, sizeof(thinfo
),
1886 *addr
= thinfo
.__pi_stackaddr
;
1887 /* Must not use thinfo.__pi_stacksize for size.
1888 It is around 3KB smaller than the correct size
1889 calculated by thinfo.__pi_stackend - thinfo.__pi_stackaddr. */
1890 *size
= thinfo
.__pi_stackend
- thinfo
.__pi_stackaddr
;
1891 STACK_DIR_UPPER((void)0, (void)(*addr
= (char *)*addr
+ *size
));
1892 #elif defined __HAIKU__
1894 STACK_GROW_DIR_DETECTION
;
1895 CHECK_ERR(get_thread_info(find_thread(NULL
), &info
));
1896 *addr
= info
.stack_base
;
1897 *size
= (uintptr_t)info
.stack_end
- (uintptr_t)info
.stack_base
;
1898 STACK_DIR_UPPER((void)0, (void)(*addr
= (char *)*addr
+ *size
));
1900 #error STACKADDR_AVAILABLE is defined but not implemented.
1908 rb_nativethread_id_t id
;
1909 size_t stack_maxsize
;
1911 } native_main_thread
;
1913 #ifdef STACK_END_ADDRESS
1914 extern void *STACK_END_ADDRESS
;
1918 RUBY_STACK_SPACE_LIMIT
= 1024 * 1024, /* 1024KB */
1919 RUBY_STACK_SPACE_RATIO
= 5
1923 space_size(size_t stack_size
)
1925 size_t space_size
= stack_size
/ RUBY_STACK_SPACE_RATIO
;
1926 if (space_size
> RUBY_STACK_SPACE_LIMIT
) {
1927 return RUBY_STACK_SPACE_LIMIT
;
1935 native_thread_init_main_thread_stack(void *addr
)
1937 native_main_thread
.id
= pthread_self();
1938 #ifdef RUBY_ASAN_ENABLED
1939 addr
= asan_get_real_stack_addr((void *)addr
);
1942 #if MAINSTACKADDR_AVAILABLE
1943 if (native_main_thread
.stack_maxsize
) return;
1947 if (get_main_stack(&stackaddr
, &size
) == 0) {
1948 native_main_thread
.stack_maxsize
= size
;
1949 native_main_thread
.stack_start
= stackaddr
;
1954 #ifdef STACK_END_ADDRESS
1955 native_main_thread
.stack_start
= STACK_END_ADDRESS
;
1957 if (!native_main_thread
.stack_start
||
1958 STACK_UPPER((VALUE
*)(void *)&addr
,
1959 native_main_thread
.stack_start
> (VALUE
*)addr
,
1960 native_main_thread
.stack_start
< (VALUE
*)addr
)) {
1961 native_main_thread
.stack_start
= (VALUE
*)addr
;
1965 #if defined(HAVE_GETRLIMIT)
1966 #if defined(PTHREAD_STACK_DEFAULT)
1967 # if PTHREAD_STACK_DEFAULT < RUBY_STACK_SPACE*5
1968 # error "PTHREAD_STACK_DEFAULT is too small"
1970 size_t size
= PTHREAD_STACK_DEFAULT
;
1972 size_t size
= RUBY_VM_THREAD_VM_STACK_SIZE
;
1975 int pagesize
= getpagesize();
1977 STACK_GROW_DIR_DETECTION
;
1978 if (getrlimit(RLIMIT_STACK
, &rlim
) == 0) {
1979 size
= (size_t)rlim
.rlim_cur
;
1981 addr
= native_main_thread
.stack_start
;
1982 if (IS_STACK_DIR_UPPER()) {
1983 space
= ((size_t)((char *)addr
+ size
) / pagesize
) * pagesize
- (size_t)addr
;
1986 space
= (size_t)addr
- ((size_t)((char *)addr
- size
) / pagesize
+ 1) * pagesize
;
1988 native_main_thread
.stack_maxsize
= space
;
1992 #if MAINSTACKADDR_AVAILABLE
1995 /* If addr is out of range of main-thread stack range estimation, */
1996 /* it should be on co-routine (alternative stack). [Feature #2294] */
1999 STACK_GROW_DIR_DETECTION
;
2001 if (IS_STACK_DIR_UPPER()) {
2002 start
= native_main_thread
.stack_start
;
2003 end
= (char *)native_main_thread
.stack_start
+ native_main_thread
.stack_maxsize
;
2006 start
= (char *)native_main_thread
.stack_start
- native_main_thread
.stack_maxsize
;
2007 end
= native_main_thread
.stack_start
;
2010 if ((void *)addr
< start
|| (void *)addr
> end
) {
2012 native_main_thread
.stack_start
= (VALUE
*)addr
;
2013 native_main_thread
.stack_maxsize
= 0; /* unknown */
2018 #define CHECK_ERR(expr) \
2019 {int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
2022 native_thread_init_stack(rb_thread_t
*th
, void *local_in_parent_frame
)
2024 rb_nativethread_id_t curr
= pthread_self();
2025 #ifdef RUBY_ASAN_ENABLED
2026 local_in_parent_frame
= asan_get_real_stack_addr(local_in_parent_frame
);
2027 th
->ec
->machine
.asan_fake_stack_handle
= asan_get_thread_fake_stack_handle();
2030 if (!native_main_thread
.id
) {
2031 /* This thread is the first thread, must be the main thread -
2032 * configure the native_main_thread object */
2033 native_thread_init_main_thread_stack(local_in_parent_frame
);
2036 if (pthread_equal(curr
, native_main_thread
.id
)) {
2037 th
->ec
->machine
.stack_start
= native_main_thread
.stack_start
;
2038 th
->ec
->machine
.stack_maxsize
= native_main_thread
.stack_maxsize
;
2041 #ifdef STACKADDR_AVAILABLE
2042 if (th_has_dedicated_nt(th
)) {
2046 if (get_stack(&start
, &size
) == 0) {
2047 uintptr_t diff
= (uintptr_t)start
- (uintptr_t)local_in_parent_frame
;
2048 th
->ec
->machine
.stack_start
= local_in_parent_frame
;
2049 th
->ec
->machine
.stack_maxsize
= size
- diff
;
2053 rb_raise(rb_eNotImpError
, "ruby engine can initialize only in the main thread");
2062 struct rb_native_thread
*nt
;
2066 nt_start(void *ptr
);
2069 native_thread_create0(struct rb_native_thread
*nt
)
2072 pthread_attr_t attr
;
2074 const size_t stack_size
= nt
->vm
->default_params
.thread_machine_stack_size
;
2075 const size_t space
= space_size(stack_size
);
2077 nt
->machine_stack_maxsize
= stack_size
- space
;
2079 #ifdef USE_SIGALTSTACK
2080 nt
->altstack
= rb_allocate_sigaltstack();
2083 CHECK_ERR(pthread_attr_init(&attr
));
2085 # ifdef PTHREAD_STACK_MIN
2086 RUBY_DEBUG_LOG("stack size: %lu", (unsigned long)stack_size
);
2087 CHECK_ERR(pthread_attr_setstacksize(&attr
, stack_size
));
2090 # ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
2091 CHECK_ERR(pthread_attr_setinheritsched(&attr
, PTHREAD_INHERIT_SCHED
));
2093 CHECK_ERR(pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
));
2095 err
= pthread_create(&nt
->thread_id
, &attr
, nt_start
, nt
);
2097 RUBY_DEBUG_LOG("nt:%d err:%d", (int)nt
->serial
, err
);
2099 CHECK_ERR(pthread_attr_destroy(&attr
));
2105 native_thread_setup(struct rb_native_thread
*nt
)
2108 rb_native_cond_initialize(&nt
->cond
.readyq
);
2110 if (&nt
->cond
.readyq
!= &nt
->cond
.intr
) {
2111 rb_native_cond_initialize(&nt
->cond
.intr
);
2116 native_thread_setup_on_thread(struct rb_native_thread
*nt
)
2119 #ifdef RB_THREAD_T_HAS_NATIVE_ID
2120 nt
->tid
= get_native_thread_id();
2123 // init signal handler
2124 RB_ALTSTACK_INIT(nt
->altstack
, nt
->altstack
);
2127 static struct rb_native_thread
*
2128 native_thread_alloc(void)
2130 struct rb_native_thread
*nt
= ZALLOC(struct rb_native_thread
);
2131 native_thread_setup(nt
);
2134 nt
->nt_context
= ruby_xmalloc(sizeof(struct coroutine_context
));
2137 #if USE_RUBY_DEBUG_LOG
2138 static rb_atomic_t nt_serial
= 2;
2139 nt
->serial
= RUBY_ATOMIC_FETCH_ADD(nt_serial
, 1);
2145 native_thread_create_dedicated(rb_thread_t
*th
)
2147 th
->nt
= native_thread_alloc();
2148 th
->nt
->vm
= th
->vm
;
2149 th
->nt
->running_thread
= th
;
2150 th
->nt
->dedicated
= 1;
2153 size_t vm_stack_word_size
= th
->vm
->default_params
.thread_vm_stack_size
/ sizeof(VALUE
);
2154 void *vm_stack
= ruby_xmalloc(vm_stack_word_size
* sizeof(VALUE
));
2155 th
->sched
.malloc_stack
= true;
2156 rb_ec_initialize_vm_stack(th
->ec
, vm_stack
, vm_stack_word_size
);
2157 th
->sched
.context_stack
= vm_stack
;
2160 int err
= native_thread_create0(th
->nt
);
2163 thread_sched_to_ready(TH_SCHED(th
), th
);
2169 call_thread_start_func_2(rb_thread_t
*th
)
2171 /* Capture the address of a local in this stack frame to mark the beginning of the
2172 machine stack for this thread. This is required even if we can tell the real
2173 stack beginning from the pthread API in native_thread_init_stack, because
2174 glibc stores some of its own data on the stack before calling into user code
2175 on a new thread, and replacing that data on fiber-switch would break it (see
2177 VALUE stack_start
= 0;
2178 VALUE
*stack_start_addr
= asan_get_real_stack_addr(&stack_start
);
2180 native_thread_init_stack(th
, stack_start_addr
);
2181 thread_start_func_2(th
, th
->ec
->machine
.stack_start
);
2187 struct rb_native_thread
*nt
= (struct rb_native_thread
*)ptr
;
2188 rb_vm_t
*vm
= nt
->vm
;
2190 native_thread_setup_on_thread(nt
);
2193 #ifdef RB_THREAD_T_HAS_NATIVE_ID
2194 nt
->tid
= get_native_thread_id();
2197 #if USE_RUBY_DEBUG_LOG && defined(RUBY_NT_SERIAL)
2198 ruby_nt_serial
= nt
->serial
;
2201 RUBY_DEBUG_LOG("nt:%u", nt
->serial
);
2203 if (!nt
->dedicated
) {
2204 coroutine_initialize_main(nt
->nt_context
);
2208 if (nt
->dedicated
) {
2209 // wait running turn
2210 rb_thread_t
*th
= nt
->running_thread
;
2211 struct rb_thread_sched
*sched
= TH_SCHED(th
);
2213 RUBY_DEBUG_LOG("on dedicated th:%u", rb_th_serial(th
));
2214 ruby_thread_set_native(th
);
2216 thread_sched_lock(sched
, th
);
2218 if (sched
->running
== th
) {
2219 thread_sched_add_running_thread(sched
, th
);
2221 thread_sched_wait_running_turn(sched
, th
, false);
2223 thread_sched_unlock(sched
, th
);
2226 call_thread_start_func_2(th
);
2227 break; // TODO: allow to change to the SNT
2230 RUBY_DEBUG_LOG("check next");
2231 rb_ractor_t
*r
= ractor_sched_deq(vm
, NULL
);
2234 struct rb_thread_sched
*sched
= &r
->threads
.sched
;
2236 thread_sched_lock(sched
, NULL
);
2238 rb_thread_t
*next_th
= sched
->running
;
2240 if (next_th
&& next_th
->nt
== NULL
) {
2241 RUBY_DEBUG_LOG("nt:%d next_th:%d", (int)nt
->serial
, (int)next_th
->serial
);
2242 thread_sched_switch0(nt
->nt_context
, next_th
, nt
, false);
2245 RUBY_DEBUG_LOG("no schedulable threads -- next_th:%p", next_th
);
2248 thread_sched_unlock(sched
, NULL
);
2251 // timeout -> deleted.
2255 if (nt
->dedicated
) {
2256 // SNT becomes DNT while running
2265 static int native_thread_create_shared(rb_thread_t
*th
);
2268 static void nt_free_stack(void *mstack
);
2272 rb_threadptr_remove(rb_thread_t
*th
)
2275 if (th
->sched
.malloc_stack
) {
2280 rb_vm_t
*vm
= th
->vm
;
2281 th
->sched
.finished
= false;
2285 ccan_list_add(&vm
->ractor
.sched
.zombie_threads
, &th
->sched
.node
.zombie_threads
);
2293 rb_threadptr_sched_free(rb_thread_t
*th
)
2296 if (th
->sched
.malloc_stack
) {
2298 ruby_xfree(th
->sched
.context_stack
);
2299 native_thread_destroy(th
->nt
);
2302 nt_free_stack(th
->sched
.context_stack
);
2303 // TODO: how to free nt and nt->altstack?
2306 ruby_xfree(th
->sched
.context
);
2307 th
->sched
.context
= NULL
;
2308 // VM_ASSERT(th->sched.context == NULL);
2310 ruby_xfree(th
->sched
.context_stack
);
2311 native_thread_destroy(th
->nt
);
2318 rb_thread_sched_mark_zombies(rb_vm_t
*vm
)
2320 if (!ccan_list_empty(&vm
->ractor
.sched
.zombie_threads
)) {
2321 rb_thread_t
*zombie_th
, *next_zombie_th
;
2322 ccan_list_for_each_safe(&vm
->ractor
.sched
.zombie_threads
, zombie_th
, next_zombie_th
, sched
.node
.zombie_threads
) {
2323 if (zombie_th
->sched
.finished
) {
2324 ccan_list_del_init(&zombie_th
->sched
.node
.zombie_threads
);
2327 rb_gc_mark(zombie_th
->self
);
2334 native_thread_create(rb_thread_t
*th
)
2336 VM_ASSERT(th
->nt
== 0);
2337 RUBY_DEBUG_LOG("th:%d has_dnt:%d", th
->serial
, th
->has_dedicated_nt
);
2338 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_STARTED
, th
);
2340 if (!th
->ractor
->threads
.sched
.enable_mn_threads
) {
2341 th
->has_dedicated_nt
= 1;
2344 if (th
->has_dedicated_nt
) {
2345 return native_thread_create_dedicated(th
);
2348 return native_thread_create_shared(th
);
2352 #if USE_NATIVE_THREAD_PRIORITY
2355 native_thread_apply_priority(rb_thread_t
*th
)
2357 #if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0)
2358 struct sched_param sp
;
2360 int priority
= 0 - th
->priority
;
2362 pthread_getschedparam(th
->nt
->thread_id
, &policy
, &sp
);
2363 max
= sched_get_priority_max(policy
);
2364 min
= sched_get_priority_min(policy
);
2366 if (min
> priority
) {
2369 else if (max
< priority
) {
2373 sp
.sched_priority
= priority
;
2374 pthread_setschedparam(th
->nt
->thread_id
, policy
, &sp
);
2380 #endif /* USE_NATIVE_THREAD_PRIORITY */
2383 native_fd_select(int n
, rb_fdset_t
*readfds
, rb_fdset_t
*writefds
, rb_fdset_t
*exceptfds
, struct timeval
*timeout
, rb_thread_t
*th
)
2385 return rb_fd_select(n
, readfds
, writefds
, exceptfds
, timeout
);
2389 ubf_pthread_cond_signal(void *ptr
)
2391 rb_thread_t
*th
= (rb_thread_t
*)ptr
;
2392 RUBY_DEBUG_LOG("th:%u on nt:%d", rb_th_serial(th
), (int)th
->nt
->serial
);
2393 rb_native_cond_signal(&th
->nt
->cond
.intr
);
2397 native_cond_sleep(rb_thread_t
*th
, rb_hrtime_t
*rel
)
2399 rb_nativethread_lock_t
*lock
= &th
->interrupt_lock
;
2400 rb_nativethread_cond_t
*cond
= &th
->nt
->cond
.intr
;
2402 /* Solaris cond_timedwait() return EINVAL if an argument is greater than
2403 * current_time + 100,000,000. So cut up to 100,000,000. This is
2404 * considered as a kind of spurious wakeup. The caller to native_sleep
2405 * should care about spurious wakeup.
2407 * See also [Bug #1341] [ruby-core:29702]
2408 * http://download.oracle.com/docs/cd/E19683-01/816-0216/6m6ngupgv/index.html
2410 const rb_hrtime_t max
= (rb_hrtime_t
)100000000 * RB_HRTIME_PER_SEC
;
2412 THREAD_BLOCKING_BEGIN(th
);
2414 rb_native_mutex_lock(lock
);
2415 th
->unblock
.func
= ubf_pthread_cond_signal
;
2416 th
->unblock
.arg
= th
;
2418 if (RUBY_VM_INTERRUPTED(th
->ec
)) {
2419 /* interrupted. return immediate */
2420 RUBY_DEBUG_LOG("interrupted before sleep th:%u", rb_th_serial(th
));
2424 rb_native_cond_wait(cond
, lock
);
2433 end
= native_cond_timeout(cond
, *rel
);
2434 native_cond_timedwait(cond
, lock
, &end
);
2437 th
->unblock
.func
= 0;
2439 rb_native_mutex_unlock(lock
);
2441 THREAD_BLOCKING_END(th
);
2443 RUBY_DEBUG_LOG("done th:%u", rb_th_serial(th
));
2447 static CCAN_LIST_HEAD(ubf_list_head
);
2448 static rb_nativethread_lock_t ubf_list_lock
= RB_NATIVETHREAD_LOCK_INIT
;
2451 ubf_list_atfork(void)
2453 ccan_list_head_init(&ubf_list_head
);
2454 rb_native_mutex_initialize(&ubf_list_lock
);
2457 RBIMPL_ATTR_MAYBE_UNUSED()
2459 ubf_list_contain_p(rb_thread_t
*th
)
2461 rb_thread_t
*list_th
;
2462 ccan_list_for_each(&ubf_list_head
, list_th
, sched
.node
.ubf
) {
2463 if (list_th
== th
) return true;
2468 /* The thread 'th' is registered to be trying unblock. */
2470 register_ubf_list(rb_thread_t
*th
)
2472 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th
));
2473 struct ccan_list_node
*node
= &th
->sched
.node
.ubf
;
2475 VM_ASSERT(th
->unblock
.func
!= NULL
);
2477 rb_native_mutex_lock(&ubf_list_lock
);
2479 // check not connected yet
2480 if (ccan_list_empty((struct ccan_list_head
*)node
)) {
2481 VM_ASSERT(!ubf_list_contain_p(th
));
2482 ccan_list_add(&ubf_list_head
, node
);
2485 rb_native_mutex_unlock(&ubf_list_lock
);
2487 timer_thread_wakeup();
2490 /* The thread 'th' is unblocked. It no longer need to be registered. */
2492 unregister_ubf_list(rb_thread_t
*th
)
2494 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th
));
2495 struct ccan_list_node
*node
= &th
->sched
.node
.ubf
;
2497 /* we can't allow re-entry into ubf_list_head */
2498 VM_ASSERT(th
->unblock
.func
== NULL
);
2500 if (!ccan_list_empty((struct ccan_list_head
*)node
)) {
2501 rb_native_mutex_lock(&ubf_list_lock
);
2503 VM_ASSERT(ubf_list_contain_p(th
));
2504 ccan_list_del_init(node
);
2506 rb_native_mutex_unlock(&ubf_list_lock
);
2511 * send a signal to intent that a target thread return from blocking syscall.
2512 * Maybe any signal is ok, but we chose SIGVTALRM.
2515 ubf_wakeup_thread(rb_thread_t
*th
)
2517 RUBY_DEBUG_LOG("th:%u thread_id:%p", rb_th_serial(th
), (void *)th
->nt
->thread_id
);
2519 pthread_kill(th
->nt
->thread_id
, SIGVTALRM
);
2523 ubf_select(void *ptr
)
2525 rb_thread_t
*th
= (rb_thread_t
*)ptr
;
2526 RUBY_DEBUG_LOG("wakeup th:%u", rb_th_serial(th
));
2527 ubf_wakeup_thread(th
);
2528 register_ubf_list(th
);
2532 ubf_threads_empty(void)
2534 return ccan_list_empty(&ubf_list_head
) != 0;
2538 ubf_wakeup_all_threads(void)
2540 if (!ubf_threads_empty()) {
2542 rb_native_mutex_lock(&ubf_list_lock
);
2544 ccan_list_for_each(&ubf_list_head
, th
, sched
.node
.ubf
) {
2545 ubf_wakeup_thread(th
);
2548 rb_native_mutex_unlock(&ubf_list_lock
);
2552 #else /* USE_UBF_LIST */
2553 #define register_ubf_list(th) (void)(th)
2554 #define unregister_ubf_list(th) (void)(th)
2555 #define ubf_select 0
2556 static void ubf_wakeup_all_threads(void) { return; }
2557 static bool ubf_threads_empty(void) { return true; }
2558 #define ubf_list_atfork() do {} while (0)
2559 #endif /* USE_UBF_LIST */
2562 #define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0)
2565 rb_thread_wakeup_timer_thread(int sig
)
2567 // This function can be called from signal handlers so that
2568 // pthread_mutex_lock() should not be used.
2570 // wakeup timer thread
2571 timer_thread_wakeup_force();
2573 // interrupt main thread if main thread is available
2574 if (system_working
) {
2575 rb_vm_t
*vm
= GET_VM();
2576 rb_thread_t
*main_th
= vm
->ractor
.main_thread
;
2579 volatile rb_execution_context_t
*main_th_ec
= ACCESS_ONCE(rb_execution_context_t
*, main_th
->ec
);
2582 RUBY_VM_SET_TRAP_INTERRUPT(main_th_ec
);
2584 if (vm
->ubf_async_safe
&& main_th
->unblock
.func
) {
2585 (main_th
->unblock
.func
)(main_th
->unblock
.arg
);
2592 #define CLOSE_INVALIDATE_PAIR(expr) \
2593 close_invalidate_pair(expr,"close_invalidate: "#expr)
2595 close_invalidate(int *fdp
, const char *msg
)
2600 if (close(fd
) < 0) {
2601 async_bug_fd(msg
, errno
, fd
);
2606 close_invalidate_pair(int fds
[2], const char *msg
)
2608 if (USE_EVENTFD
&& fds
[0] == fds
[1]) {
2609 fds
[1] = -1; // disable write port first
2610 close_invalidate(&fds
[0], msg
);
2613 close_invalidate(&fds
[1], msg
);
2614 close_invalidate(&fds
[0], msg
);
2619 set_nonblock(int fd
)
2624 oflags
= fcntl(fd
, F_GETFL
);
2627 oflags
|= O_NONBLOCK
;
2628 err
= fcntl(fd
, F_SETFL
, oflags
);
2633 /* communication pipe with timer thread and signal handler */
2635 setup_communication_pipe_internal(int pipes
[2])
2639 if (pipes
[0] > 0 || pipes
[1] > 0) {
2640 VM_ASSERT(pipes
[0] > 0);
2641 VM_ASSERT(pipes
[1] > 0);
2646 * Don't bother with eventfd on ancient Linux 2.6.22..2.6.26 which were
2647 * missing EFD_* flags, they can fall back to pipe
2649 #if USE_EVENTFD && defined(EFD_NONBLOCK) && defined(EFD_CLOEXEC)
2650 pipes
[0] = pipes
[1] = eventfd(0, EFD_NONBLOCK
|EFD_CLOEXEC
);
2652 if (pipes
[0] >= 0) {
2653 rb_update_max_fd(pipes
[0]);
2658 err
= rb_cloexec_pipe(pipes
);
2660 rb_bug("can not create communication pipe");
2662 rb_update_max_fd(pipes
[0]);
2663 rb_update_max_fd(pipes
[1]);
2664 set_nonblock(pipes
[0]);
2665 set_nonblock(pipes
[1]);
2668 #if !defined(SET_CURRENT_THREAD_NAME) && defined(__linux__) && defined(PR_SET_NAME)
2669 # define SET_CURRENT_THREAD_NAME(name) prctl(PR_SET_NAME, name)
2674 #if defined(__linux__)
2676 #elif defined(__APPLE__)
2677 /* Undocumented, and main thread seems unlimited */
2684 static VALUE
threadptr_invoke_proc_location(rb_thread_t
*th
);
2687 native_set_thread_name(rb_thread_t
*th
)
2689 #ifdef SET_CURRENT_THREAD_NAME
2691 if (!NIL_P(loc
= th
->name
)) {
2692 SET_CURRENT_THREAD_NAME(RSTRING_PTR(loc
));
2694 else if ((loc
= threadptr_invoke_proc_location(th
)) != Qnil
) {
2696 char buf
[THREAD_NAME_MAX
];
2700 name
= RSTRING_PTR(RARRAY_AREF(loc
, 0));
2701 p
= strrchr(name
, '/'); /* show only the basename of the path. */
2705 n
= snprintf(buf
, sizeof(buf
), "%s:%d", name
, NUM2INT(RARRAY_AREF(loc
, 1)));
2709 if (len
>= sizeof(buf
)) {
2710 buf
[sizeof(buf
)-2] = '*';
2711 buf
[sizeof(buf
)-1] = '\0';
2713 SET_CURRENT_THREAD_NAME(buf
);
2719 native_set_another_thread_name(rb_nativethread_id_t thread_id
, VALUE name
)
2721 #if defined SET_ANOTHER_THREAD_NAME || defined SET_CURRENT_THREAD_NAME
2722 char buf
[THREAD_NAME_MAX
];
2724 # if !defined SET_ANOTHER_THREAD_NAME
2725 if (!pthread_equal(pthread_self(), thread_id
)) return;
2729 RSTRING_GETMEM(name
, s
, n
);
2730 if (n
>= (int)sizeof(buf
)) {
2731 memcpy(buf
, s
, sizeof(buf
)-1);
2732 buf
[sizeof(buf
)-1] = '\0';
2736 # if defined SET_ANOTHER_THREAD_NAME
2737 SET_ANOTHER_THREAD_NAME(thread_id
, s
);
2738 # elif defined SET_CURRENT_THREAD_NAME
2739 SET_CURRENT_THREAD_NAME(s
);
2744 #if defined(RB_THREAD_T_HAS_NATIVE_ID) || defined(__APPLE__)
2746 native_thread_native_thread_id(rb_thread_t
*target_th
)
2748 if (!target_th
->nt
) return Qnil
;
2750 #ifdef RB_THREAD_T_HAS_NATIVE_ID
2751 int tid
= target_th
->nt
->tid
;
2752 if (tid
== 0) return Qnil
;
2753 return INT2FIX(tid
);
2754 #elif defined(__APPLE__)
2756 /* The first condition is needed because MAC_OS_X_VERSION_10_6
2757 is not defined on 10.5, and while __POWERPC__ takes care of ppc/ppc64,
2758 i386 will be broken without this. Note, 10.5 is supported with GCC upstream,
2759 so it has C++17 and everything needed to build modern Ruby. */
2760 # if (!defined(MAC_OS_X_VERSION_10_6) || \
2761 (MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_6) || \
2762 defined(__POWERPC__) /* never defined for PowerPC platforms */)
2763 const bool no_pthread_threadid_np
= true;
2764 # define NO_PTHREAD_MACH_THREAD_NP 1
2765 # elif MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_6
2766 const bool no_pthread_threadid_np
= false;
2768 # if !(defined(__has_attribute) && __has_attribute(availability))
2769 /* __API_AVAILABLE macro does nothing on gcc */
2770 __attribute__((weak
)) int pthread_threadid_np(pthread_t
, uint64_t*);
2772 /* Check weakly linked symbol */
2773 const bool no_pthread_threadid_np
= !&pthread_threadid_np
;
2775 if (no_pthread_threadid_np
) {
2776 return ULL2NUM(pthread_mach_thread_np(pthread_self()));
2778 # ifndef NO_PTHREAD_MACH_THREAD_NP
2779 int e
= pthread_threadid_np(target_th
->nt
->thread_id
, &tid
);
2780 if (e
!= 0) rb_syserr_fail(e
, "pthread_threadid_np");
2781 return ULL2NUM((unsigned long long)tid
);
2785 # define USE_NATIVE_THREAD_NATIVE_THREAD_ID 1
2787 # define USE_NATIVE_THREAD_NATIVE_THREAD_ID 0
2791 rb_serial_t created_fork_gen
;
2792 pthread_t pthread_id
;
2794 int comm_fds
[2]; // r, w
2796 #if (HAVE_SYS_EPOLL_H || HAVE_SYS_EVENT_H) && USE_MN_THREADS
2797 int event_fd
; // kernel event queue fd (epoll/kqueue)
2799 #if HAVE_SYS_EPOLL_H && USE_MN_THREADS
2800 #define EPOLL_EVENTS_MAX 0x10
2801 struct epoll_event finished_events
[EPOLL_EVENTS_MAX
];
2802 #elif HAVE_SYS_EVENT_H && USE_MN_THREADS
2803 #define KQUEUE_EVENTS_MAX 0x10
2804 struct kevent finished_events
[KQUEUE_EVENTS_MAX
];
2807 // waiting threads list
2808 struct ccan_list_head waiting
; // waiting threads in ractors
2809 pthread_mutex_t waiting_lock
;
2811 .created_fork_gen
= 0,
2814 #define TIMER_THREAD_CREATED_P() (timer_th.created_fork_gen == current_fork_gen)
2816 static void timer_thread_check_timeslice(rb_vm_t
*vm
);
2817 static int timer_thread_set_timeout(rb_vm_t
*vm
);
2818 static void timer_thread_wakeup_thread(rb_thread_t
*th
);
2820 #include "thread_pthread_mn.c"
2822 static rb_thread_t
*
2823 thread_sched_waiting_thread(struct rb_thread_sched_waiting
*w
)
2826 return (rb_thread_t
*)((size_t)w
- offsetof(rb_thread_t
, sched
.waiting_reason
));
2834 timer_thread_set_timeout(rb_vm_t
*vm
)
2841 ractor_sched_lock(vm
, NULL
);
2843 if ( !ccan_list_empty(&vm
->ractor
.sched
.timeslice_threads
) // (1-1) Provide time slice for active NTs
2844 || !ubf_threads_empty() // (1-3) Periodic UBF
2845 || vm
->ractor
.sched
.grq_cnt
> 0 // (1-4) Lazy GRQ deq start
2848 RUBY_DEBUG_LOG("timeslice:%d ubf:%d grq:%d",
2849 !ccan_list_empty(&vm
->ractor
.sched
.timeslice_threads
),
2850 !ubf_threads_empty(),
2851 (vm
->ractor
.sched
.grq_cnt
> 0));
2854 vm
->ractor
.sched
.timeslice_wait_inf
= false;
2857 vm
->ractor
.sched
.timeslice_wait_inf
= true;
2860 ractor_sched_unlock(vm
, NULL
);
2862 if (vm
->ractor
.sched
.timeslice_wait_inf
) {
2863 rb_native_mutex_lock(&timer_th
.waiting_lock
);
2865 struct rb_thread_sched_waiting
*w
= ccan_list_top(&timer_th
.waiting
, struct rb_thread_sched_waiting
, node
);
2866 rb_thread_t
*th
= thread_sched_waiting_thread(w
);
2868 if (th
&& (th
->sched
.waiting_reason
.flags
& thread_sched_waiting_timeout
)) {
2869 rb_hrtime_t now
= rb_hrtime_now();
2870 rb_hrtime_t hrrel
= rb_hrtime_sub(th
->sched
.waiting_reason
.data
.timeout
, now
);
2872 RUBY_DEBUG_LOG("th:%u now:%lu rel:%lu", rb_th_serial(th
), (unsigned long)now
, (unsigned long)hrrel
);
2875 timeout
= (int)((hrrel
+ RB_HRTIME_PER_MSEC
- 1) / RB_HRTIME_PER_MSEC
); // ms
2878 rb_native_mutex_unlock(&timer_th
.waiting_lock
);
2881 RUBY_DEBUG_LOG("timeout:%d inf:%d", timeout
, (int)vm
->ractor
.sched
.timeslice_wait_inf
);
2883 // fprintf(stderr, "timeout:%d\n", timeout);
2889 timer_thread_check_signal(rb_vm_t
*vm
)
2891 // ruby_sigchld_handler(vm); TODO
2893 int signum
= rb_signal_buff_size();
2894 if (UNLIKELY(signum
> 0) && vm
->ractor
.main_thread
) {
2895 RUBY_DEBUG_LOG("signum:%d", signum
);
2896 threadptr_trap_interrupt(vm
->ractor
.main_thread
);
2901 timer_thread_check_exceed(rb_hrtime_t abs
, rb_hrtime_t now
)
2906 else if (abs
- now
< RB_HRTIME_PER_MSEC
) {
2907 return true; // too short time
2914 static rb_thread_t
*
2915 timer_thread_deq_wakeup(rb_vm_t
*vm
, rb_hrtime_t now
)
2917 struct rb_thread_sched_waiting
*w
= ccan_list_top(&timer_th
.waiting
, struct rb_thread_sched_waiting
, node
);
2920 (w
->flags
& thread_sched_waiting_timeout
) &&
2921 timer_thread_check_exceed(w
->data
.timeout
, now
)) {
2923 RUBY_DEBUG_LOG("wakeup th:%u", rb_th_serial(thread_sched_waiting_thread(w
)));
2925 // delete from waiting list
2926 ccan_list_del_init(&w
->node
);
2929 w
->flags
= thread_sched_waiting_none
;
2932 return thread_sched_waiting_thread(w
);
2939 timer_thread_wakeup_thread(rb_thread_t
*th
)
2941 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th
));
2942 struct rb_thread_sched
*sched
= TH_SCHED(th
);
2944 thread_sched_lock(sched
, th
);
2946 if (sched
->running
!= th
) {
2947 thread_sched_to_ready_common(sched
, th
, true, false);
2950 // will be release the execution right
2953 thread_sched_unlock(sched
, th
);
2957 timer_thread_check_timeout(rb_vm_t
*vm
)
2959 rb_hrtime_t now
= rb_hrtime_now();
2962 rb_native_mutex_lock(&timer_th
.waiting_lock
);
2964 while ((th
= timer_thread_deq_wakeup(vm
, now
)) != NULL
) {
2965 timer_thread_wakeup_thread(th
);
2968 rb_native_mutex_unlock(&timer_th
.waiting_lock
);
2972 timer_thread_check_timeslice(rb_vm_t
*vm
)
2976 ccan_list_for_each(&vm
->ractor
.sched
.timeslice_threads
, th
, sched
.node
.timeslice_threads
) {
2977 RUBY_DEBUG_LOG("timeslice th:%u", rb_th_serial(th
));
2978 RUBY_VM_SET_TIMER_INTERRUPT(th
->ec
);
2986 pthread_sigmask(0, NULL
, &oldmask
);
2987 if (sigismember(&oldmask
, SIGVTALRM
)) {
2991 RUBY_DEBUG_LOG("ok");
2996 timer_thread_func(void *ptr
)
2998 rb_vm_t
*vm
= (rb_vm_t
*)ptr
;
2999 #if defined(RUBY_NT_SERIAL)
3000 ruby_nt_serial
= (rb_atomic_t
)-1;
3003 RUBY_DEBUG_LOG("started%s", "");
3005 while (system_working
) {
3006 timer_thread_check_signal(vm
);
3007 timer_thread_check_timeout(vm
);
3008 ubf_wakeup_all_threads();
3010 RUBY_DEBUG_LOG("system_working:%d", system_working
);
3011 timer_thread_polling(vm
);
3014 RUBY_DEBUG_LOG("terminated");
3018 /* only use signal-safe system calls here */
3020 signal_communication_pipe(int fd
)
3023 const uint64_t buff
= 1;
3025 const char buff
= '!';
3029 /* already opened */
3032 if ((result
= write(fd
, &buff
, sizeof(buff
))) <= 0) {
3035 case EINTR
: goto retry
;
3037 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
3042 async_bug_fd("rb_thread_wakeup_timer_thread: write", e
, fd
);
3045 if (TT_DEBUG
) WRITE_CONST(2, "rb_thread_wakeup_timer_thread: write\n");
3053 timer_thread_wakeup_force(void)
3055 // should not use RUBY_DEBUG_LOG() because it can be called within signal handlers.
3056 signal_communication_pipe(timer_th
.comm_fds
[1]);
3060 timer_thread_wakeup_locked(rb_vm_t
*vm
)
3062 // should be locked before.
3063 ASSERT_ractor_sched_locked(vm
, NULL
);
3065 if (timer_th
.created_fork_gen
== current_fork_gen
) {
3066 if (vm
->ractor
.sched
.timeslice_wait_inf
) {
3067 RUBY_DEBUG_LOG("wakeup with fd:%d", timer_th
.comm_fds
[1]);
3068 timer_thread_wakeup_force();
3071 RUBY_DEBUG_LOG("will be wakeup...");
3077 timer_thread_wakeup(void)
3079 rb_vm_t
*vm
= GET_VM();
3081 ractor_sched_lock(vm
, NULL
);
3083 timer_thread_wakeup_locked(vm
);
3085 ractor_sched_unlock(vm
, NULL
);
3089 rb_thread_create_timer_thread(void)
3091 rb_serial_t created_fork_gen
= timer_th
.created_fork_gen
;
3093 RUBY_DEBUG_LOG("fork_gen create:%d current:%d", (int)created_fork_gen
, (int)current_fork_gen
);
3095 timer_th
.created_fork_gen
= current_fork_gen
;
3097 if (created_fork_gen
!= current_fork_gen
) {
3098 if (created_fork_gen
!= 0) {
3099 RUBY_DEBUG_LOG("forked child process");
3101 CLOSE_INVALIDATE_PAIR(timer_th
.comm_fds
);
3102 #if HAVE_SYS_EPOLL_H && USE_MN_THREADS
3103 close_invalidate(&timer_th
.event_fd
, "close event_fd");
3105 rb_native_mutex_destroy(&timer_th
.waiting_lock
);
3108 ccan_list_head_init(&timer_th
.waiting
);
3109 rb_native_mutex_initialize(&timer_th
.waiting_lock
);
3111 // open communication channel
3112 setup_communication_pipe_internal(timer_th
.comm_fds
);
3115 timer_thread_setup_mn();
3118 pthread_create(&timer_th
.pthread_id
, NULL
, timer_thread_func
, GET_VM());
3122 native_stop_timer_thread(void)
3125 stopped
= --system_working
<= 0;
3128 RUBY_DEBUG_LOG("wakeup send %d", timer_th
.comm_fds
[1]);
3129 timer_thread_wakeup_force();
3130 RUBY_DEBUG_LOG("wakeup sent");
3131 pthread_join(timer_th
.pthread_id
, NULL
);
3134 if (TT_DEBUG
) fprintf(stderr
, "stop timer thread\n");
3139 native_reset_timer_thread(void)
3144 #ifdef HAVE_SIGALTSTACK
3146 ruby_stack_overflowed_p(const rb_thread_t
*th
, const void *addr
)
3150 const size_t water_mark
= 1024 * 1024;
3151 STACK_GROW_DIR_DETECTION
;
3154 size
= th
->ec
->machine
.stack_maxsize
;
3155 base
= (char *)th
->ec
->machine
.stack_start
- STACK_DIR_UPPER(0, size
);
3157 #ifdef STACKADDR_AVAILABLE
3158 else if (get_stack(&base
, &size
) == 0) {
3160 if (pthread_equal(th
->nt
->thread_id
, native_main_thread
.id
)) {
3162 if (getrlimit(RLIMIT_STACK
, &rlim
) == 0 && rlim
.rlim_cur
> size
) {
3163 size
= (size_t)rlim
.rlim_cur
;
3167 base
= (char *)base
+ STACK_DIR_UPPER(+size
, -size
);
3174 size
/= RUBY_STACK_SPACE_RATIO
;
3175 if (size
> water_mark
) size
= water_mark
;
3176 if (IS_STACK_DIR_UPPER()) {
3177 if (size
> ~(size_t)base
+1) size
= ~(size_t)base
+1;
3178 if (addr
> base
&& addr
<= (void *)((char *)base
+ size
)) return 1;
3181 if (size
> (size_t)base
) size
= (size_t)base
;
3182 if (addr
> (void *)((char *)base
- size
) && addr
<= base
) return 1;
3189 rb_reserved_fd_p(int fd
)
3191 /* no false-positive if out-of-FD at startup */
3192 if (fd
< 0) return 0;
3194 if (fd
== timer_th
.comm_fds
[0] ||
3195 fd
== timer_th
.comm_fds
[1]
3196 #if (HAVE_SYS_EPOLL_H || HAVE_SYS_EVENT_H) && USE_MN_THREADS
3197 || fd
== timer_th
.event_fd
3200 goto check_fork_gen
;
3205 if (timer_th
.created_fork_gen
== current_fork_gen
) {
3206 /* async-signal-safe */
3214 rb_nativethread_id_t
3215 rb_nativethread_self(void)
3217 return pthread_self();
3220 #if defined(USE_POLL) && !defined(HAVE_PPOLL)
3221 /* TODO: don't ignore sigmask */
3223 ruby_ppoll(struct pollfd
*fds
, nfds_t nfds
,
3224 const struct timespec
*ts
, const sigset_t
*sigmask
)
3231 if (ts
->tv_sec
> INT_MAX
/1000)
3232 timeout_ms
= INT_MAX
;
3234 tmp
= (int)(ts
->tv_sec
* 1000);
3235 /* round up 1ns to 1ms to avoid excessive wakeups for <1ms sleep */
3236 tmp2
= (int)((ts
->tv_nsec
+ 999999L) / (1000L * 1000L));
3237 if (INT_MAX
- tmp
< tmp2
)
3238 timeout_ms
= INT_MAX
;
3240 timeout_ms
= (int)(tmp
+ tmp2
);
3246 return poll(fds
, nfds
, timeout_ms
);
3248 # define ppoll(fds,nfds,ts,sigmask) ruby_ppoll((fds),(nfds),(ts),(sigmask))
3252 * Single CPU setups benefit from explicit sched_yield() before ppoll(),
3253 * since threads may be too starved to enter the GVL waitqueue for
3254 * us to detect contention. Instead, we want to kick other threads
3255 * so they can run and possibly prevent us from entering slow paths
3256 * in ppoll() or similar syscalls.
3258 * Confirmed on FreeBSD 11.2 and Linux 4.19.
3259 * [ruby-core:90417] [Bug #15398]
3261 #define THREAD_BLOCKING_YIELD(th) do { \
3262 const rb_thread_t *next_th; \
3263 struct rb_thread_sched *sched = TH_SCHED(th); \
3264 RB_VM_SAVE_MACHINE_CONTEXT(th); \
3265 thread_sched_to_waiting(sched, (th)); \
3266 next_th = sched->running; \
3267 rb_native_mutex_unlock(&sched->lock_); \
3268 native_thread_yield(); /* TODO: needed? */ \
3269 if (!next_th && rb_ractor_living_thread_num(th->ractor) > 1) { \
3270 native_thread_yield(); \
3274 native_sleep(rb_thread_t
*th
, rb_hrtime_t
*rel
)
3276 struct rb_thread_sched
*sched
= TH_SCHED(th
);
3278 RUBY_DEBUG_LOG("rel:%d", rel
? (int)*rel
: 0);
3280 if (th_has_dedicated_nt(th
)) {
3281 native_cond_sleep(th
, rel
);
3284 thread_sched_wait_events(sched
, th
, -1, thread_sched_waiting_timeout
, rel
);
3288 thread_sched_to_waiting_until_wakeup(sched
, th
);
3291 RUBY_DEBUG_LOG("wakeup");
3294 // fork read-write lock (only for pthread)
3295 static pthread_rwlock_t rb_thread_fork_rw_lock
= PTHREAD_RWLOCK_INITIALIZER
;
3298 rb_thread_release_fork_lock(void)
3301 if ((r
= pthread_rwlock_unlock(&rb_thread_fork_rw_lock
))) {
3302 rb_bug_errno("pthread_rwlock_unlock", r
);
3307 rb_thread_reset_fork_lock(void)
3310 if ((r
= pthread_rwlock_destroy(&rb_thread_fork_rw_lock
))) {
3311 rb_bug_errno("pthread_rwlock_destroy", r
);
3314 if ((r
= pthread_rwlock_init(&rb_thread_fork_rw_lock
, NULL
))) {
3315 rb_bug_errno("pthread_rwlock_init", r
);
3320 rb_thread_prevent_fork(void *(*func
)(void *), void *data
)
3323 if ((r
= pthread_rwlock_rdlock(&rb_thread_fork_rw_lock
))) {
3324 rb_bug_errno("pthread_rwlock_rdlock", r
);
3326 void *result
= func(data
);
3327 rb_thread_release_fork_lock();
3332 rb_thread_acquire_fork_lock(void)
3335 if ((r
= pthread_rwlock_wrlock(&rb_thread_fork_rw_lock
))) {
3336 rb_bug_errno("pthread_rwlock_wrlock", r
);
3340 // thread internal event hooks (only for pthread)
3342 struct rb_internal_thread_event_hook
{
3343 rb_internal_thread_event_callback callback
;
3344 rb_event_flag_t event
;
3347 struct rb_internal_thread_event_hook
*next
;
3350 static pthread_rwlock_t rb_internal_thread_event_hooks_rw_lock
= PTHREAD_RWLOCK_INITIALIZER
;
3352 rb_internal_thread_event_hook_t
*
3353 rb_internal_thread_add_event_hook(rb_internal_thread_event_callback callback
, rb_event_flag_t internal_event
, void *user_data
)
3355 rb_internal_thread_event_hook_t
*hook
= ALLOC_N(rb_internal_thread_event_hook_t
, 1);
3356 hook
->callback
= callback
;
3357 hook
->user_data
= user_data
;
3358 hook
->event
= internal_event
;
3361 if ((r
= pthread_rwlock_wrlock(&rb_internal_thread_event_hooks_rw_lock
))) {
3362 rb_bug_errno("pthread_rwlock_wrlock", r
);
3365 hook
->next
= rb_internal_thread_event_hooks
;
3366 ATOMIC_PTR_EXCHANGE(rb_internal_thread_event_hooks
, hook
);
3368 if ((r
= pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock
))) {
3369 rb_bug_errno("pthread_rwlock_unlock", r
);
3375 rb_internal_thread_remove_event_hook(rb_internal_thread_event_hook_t
* hook
)
3378 if ((r
= pthread_rwlock_wrlock(&rb_internal_thread_event_hooks_rw_lock
))) {
3379 rb_bug_errno("pthread_rwlock_wrlock", r
);
3382 bool success
= FALSE
;
3384 if (rb_internal_thread_event_hooks
== hook
) {
3385 ATOMIC_PTR_EXCHANGE(rb_internal_thread_event_hooks
, hook
->next
);
3389 rb_internal_thread_event_hook_t
*h
= rb_internal_thread_event_hooks
;
3392 if (h
->next
== hook
) {
3393 h
->next
= hook
->next
;
3397 } while ((h
= h
->next
));
3400 if ((r
= pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock
))) {
3401 rb_bug_errno("pthread_rwlock_unlock", r
);
3411 rb_thread_execute_hooks(rb_event_flag_t event
, rb_thread_t
*th
)
3414 if ((r
= pthread_rwlock_rdlock(&rb_internal_thread_event_hooks_rw_lock
))) {
3415 rb_bug_errno("pthread_rwlock_rdlock", r
);
3418 if (rb_internal_thread_event_hooks
) {
3419 rb_internal_thread_event_hook_t
*h
= rb_internal_thread_event_hooks
;
3421 if (h
->event
& event
) {
3422 rb_internal_thread_event_data_t event_data
= {
3425 (*h
->callback
)(event
, &event_data
, h
->user_data
);
3427 } while((h
= h
->next
));
3429 if ((r
= pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock
))) {
3430 rb_bug_errno("pthread_rwlock_unlock", r
);
3434 // return true if the current thread acquires DNT.
3435 // return false if the current thread already acquires DNT.
3437 rb_thread_lock_native_thread(void)
3439 rb_thread_t
*th
= GET_THREAD();
3440 bool is_snt
= th
->nt
->dedicated
== 0;
3441 native_thread_dedicated_inc(th
->vm
, th
->ractor
, th
->nt
);
3446 #endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */