[ruby/strscan] jruby: Check if len++ walked off the end
[ruby.git] / thread_pthread.c
blobfc9662259242c11ba79394ac00c42e87da65f626
1 /* -*-c-*- */
2 /**********************************************************************
4 thread_pthread.c -
6 $Author$
8 Copyright (C) 2004-2007 Koichi Sasada
10 **********************************************************************/
12 #ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
14 #include "internal/gc.h"
15 #include "internal/sanitizers.h"
17 #ifdef HAVE_SYS_RESOURCE_H
18 #include <sys/resource.h>
19 #endif
20 #ifdef HAVE_THR_STKSEGMENT
21 #include <thread.h>
22 #endif
23 #if defined(HAVE_FCNTL_H)
24 #include <fcntl.h>
25 #elif defined(HAVE_SYS_FCNTL_H)
26 #include <sys/fcntl.h>
27 #endif
28 #ifdef HAVE_SYS_PRCTL_H
29 #include <sys/prctl.h>
30 #endif
31 #if defined(HAVE_SYS_TIME_H)
32 #include <sys/time.h>
33 #endif
34 #if defined(__HAIKU__)
35 #include <kernel/OS.h>
36 #endif
37 #ifdef __linux__
38 #include <sys/syscall.h> /* for SYS_gettid */
39 #endif
40 #include <time.h>
41 #include <signal.h>
43 #if defined __APPLE__
44 # include <AvailabilityMacros.h>
45 #endif
47 #if defined(HAVE_SYS_EVENTFD_H) && defined(HAVE_EVENTFD)
48 # define USE_EVENTFD (1)
49 # include <sys/eventfd.h>
50 #else
51 # define USE_EVENTFD (0)
52 #endif
54 #if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && \
55 defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && \
56 defined(HAVE_CLOCK_GETTIME)
57 static pthread_condattr_t condattr_mono;
58 static pthread_condattr_t *condattr_monotonic = &condattr_mono;
59 #else
60 static const void *const condattr_monotonic = NULL;
61 #endif
63 #include COROUTINE_H
65 #ifndef HAVE_SYS_EVENT_H
66 #define HAVE_SYS_EVENT_H 0
67 #endif
69 #ifndef HAVE_SYS_EPOLL_H
70 #define HAVE_SYS_EPOLL_H 0
71 #else
72 // force setting for debug
73 // #undef HAVE_SYS_EPOLL_H
74 // #define HAVE_SYS_EPOLL_H 0
75 #endif
77 #ifndef USE_MN_THREADS
78 #if defined(__EMSCRIPTEN__) || defined(COROUTINE_PTHREAD_CONTEXT)
79 // on __EMSCRIPTEN__ provides epoll* declarations, but no implementations.
80 // on COROUTINE_PTHREAD_CONTEXT, it doesn't worth to use it.
81 #define USE_MN_THREADS 0
82 #elif HAVE_SYS_EPOLL_H
83 #include <sys/epoll.h>
84 #define USE_MN_THREADS 1
85 #elif HAVE_SYS_EVENT_H
86 #include <sys/event.h>
87 #define USE_MN_THREADS 1
88 #else
89 #define USE_MN_THREADS 0
90 #endif
91 #endif
93 // native thread wrappers
95 #define NATIVE_MUTEX_LOCK_DEBUG 0
97 static void
98 mutex_debug(const char *msg, void *lock)
100 if (NATIVE_MUTEX_LOCK_DEBUG) {
101 int r;
102 static pthread_mutex_t dbglock = PTHREAD_MUTEX_INITIALIZER;
104 if ((r = pthread_mutex_lock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
105 fprintf(stdout, "%s: %p\n", msg, lock);
106 if ((r = pthread_mutex_unlock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
110 void
111 rb_native_mutex_lock(pthread_mutex_t *lock)
113 int r;
114 mutex_debug("lock", lock);
115 if ((r = pthread_mutex_lock(lock)) != 0) {
116 rb_bug_errno("pthread_mutex_lock", r);
120 void
121 rb_native_mutex_unlock(pthread_mutex_t *lock)
123 int r;
124 mutex_debug("unlock", lock);
125 if ((r = pthread_mutex_unlock(lock)) != 0) {
126 rb_bug_errno("pthread_mutex_unlock", r);
131 rb_native_mutex_trylock(pthread_mutex_t *lock)
133 int r;
134 mutex_debug("trylock", lock);
135 if ((r = pthread_mutex_trylock(lock)) != 0) {
136 if (r == EBUSY) {
137 return EBUSY;
139 else {
140 rb_bug_errno("pthread_mutex_trylock", r);
143 return 0;
146 void
147 rb_native_mutex_initialize(pthread_mutex_t *lock)
149 int r = pthread_mutex_init(lock, 0);
150 mutex_debug("init", lock);
151 if (r != 0) {
152 rb_bug_errno("pthread_mutex_init", r);
156 void
157 rb_native_mutex_destroy(pthread_mutex_t *lock)
159 int r = pthread_mutex_destroy(lock);
160 mutex_debug("destroy", lock);
161 if (r != 0) {
162 rb_bug_errno("pthread_mutex_destroy", r);
166 void
167 rb_native_cond_initialize(rb_nativethread_cond_t *cond)
169 int r = pthread_cond_init(cond, condattr_monotonic);
170 if (r != 0) {
171 rb_bug_errno("pthread_cond_init", r);
175 void
176 rb_native_cond_destroy(rb_nativethread_cond_t *cond)
178 int r = pthread_cond_destroy(cond);
179 if (r != 0) {
180 rb_bug_errno("pthread_cond_destroy", r);
185 * In OS X 10.7 (Lion), pthread_cond_signal and pthread_cond_broadcast return
186 * EAGAIN after retrying 8192 times. You can see them in the following page:
188 * http://www.opensource.apple.com/source/Libc/Libc-763.11/pthreads/pthread_cond.c
190 * The following rb_native_cond_signal and rb_native_cond_broadcast functions
191 * need to retrying until pthread functions don't return EAGAIN.
194 void
195 rb_native_cond_signal(rb_nativethread_cond_t *cond)
197 int r;
198 do {
199 r = pthread_cond_signal(cond);
200 } while (r == EAGAIN);
201 if (r != 0) {
202 rb_bug_errno("pthread_cond_signal", r);
206 void
207 rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
209 int r;
210 do {
211 r = pthread_cond_broadcast(cond);
212 } while (r == EAGAIN);
213 if (r != 0) {
214 rb_bug_errno("rb_native_cond_broadcast", r);
218 void
219 rb_native_cond_wait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex)
221 int r = pthread_cond_wait(cond, mutex);
222 if (r != 0) {
223 rb_bug_errno("pthread_cond_wait", r);
227 static int
228 native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, const rb_hrtime_t *abs)
230 int r;
231 struct timespec ts;
234 * An old Linux may return EINTR. Even though POSIX says
235 * "These functions shall not return an error code of [EINTR]".
236 * http://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_cond_timedwait.html
237 * Let's hide it from arch generic code.
239 do {
240 rb_hrtime2timespec(&ts, abs);
241 r = pthread_cond_timedwait(cond, mutex, &ts);
242 } while (r == EINTR);
244 if (r != 0 && r != ETIMEDOUT) {
245 rb_bug_errno("pthread_cond_timedwait", r);
248 return r;
251 static rb_hrtime_t
252 native_cond_timeout(rb_nativethread_cond_t *cond, const rb_hrtime_t rel)
254 if (condattr_monotonic) {
255 return rb_hrtime_add(rb_hrtime_now(), rel);
257 else {
258 struct timespec ts;
260 rb_timespec_now(&ts);
261 return rb_hrtime_add(rb_timespec2hrtime(&ts), rel);
265 void
266 rb_native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, unsigned long msec)
268 rb_hrtime_t hrmsec = native_cond_timeout(cond, RB_HRTIME_PER_MSEC * msec);
269 native_cond_timedwait(cond, mutex, &hrmsec);
272 // thread scheduling
274 static rb_internal_thread_event_hook_t *rb_internal_thread_event_hooks = NULL;
275 static void rb_thread_execute_hooks(rb_event_flag_t event, rb_thread_t *th);
277 #if 0
278 static const char *
279 event_name(rb_event_flag_t event)
281 switch (event) {
282 case RUBY_INTERNAL_THREAD_EVENT_STARTED:
283 return "STARTED";
284 case RUBY_INTERNAL_THREAD_EVENT_READY:
285 return "READY";
286 case RUBY_INTERNAL_THREAD_EVENT_RESUMED:
287 return "RESUMED";
288 case RUBY_INTERNAL_THREAD_EVENT_SUSPENDED:
289 return "SUSPENDED";
290 case RUBY_INTERNAL_THREAD_EVENT_EXITED:
291 return "EXITED";
293 return "no-event";
296 #define RB_INTERNAL_THREAD_HOOK(event, th) \
297 if (UNLIKELY(rb_internal_thread_event_hooks)) { \
298 fprintf(stderr, "[thread=%"PRIxVALUE"] %s in %s (%s:%d)\n", th->self, event_name(event), __func__, __FILE__, __LINE__); \
299 rb_thread_execute_hooks(event, th); \
301 #else
302 #define RB_INTERNAL_THREAD_HOOK(event, th) if (UNLIKELY(rb_internal_thread_event_hooks)) { rb_thread_execute_hooks(event, th); }
303 #endif
305 static rb_serial_t current_fork_gen = 1; /* We can't use GET_VM()->fork_gen */
307 #if defined(SIGVTALRM) && !defined(__EMSCRIPTEN__)
308 # define USE_UBF_LIST 1
309 #endif
311 static void threadptr_trap_interrupt(rb_thread_t *);
313 #ifdef HAVE_SCHED_YIELD
314 #define native_thread_yield() (void)sched_yield()
315 #else
316 #define native_thread_yield() ((void)0)
317 #endif
319 static void native_thread_dedicated_inc(rb_vm_t *vm, rb_ractor_t *cr, struct rb_native_thread *nt);
320 static void native_thread_dedicated_dec(rb_vm_t *vm, rb_ractor_t *cr, struct rb_native_thread *nt);
321 static void native_thread_assign(struct rb_native_thread *nt, rb_thread_t *th);
323 static void ractor_sched_enq(rb_vm_t *vm, rb_ractor_t *r);
324 static void timer_thread_wakeup(void);
325 static void timer_thread_wakeup_locked(rb_vm_t *vm);
326 static void timer_thread_wakeup_force(void);
327 static void thread_sched_switch(rb_thread_t *cth, rb_thread_t *next_th);
328 static void coroutine_transfer0(struct coroutine_context *transfer_from,
329 struct coroutine_context *transfer_to, bool to_dead);
331 #define thread_sched_dump(s) thread_sched_dump_(__FILE__, __LINE__, s)
333 static bool
334 th_has_dedicated_nt(const rb_thread_t *th)
336 // TODO: th->has_dedicated_nt
337 return th->nt->dedicated > 0;
340 RBIMPL_ATTR_MAYBE_UNUSED()
341 static void
342 thread_sched_dump_(const char *file, int line, struct rb_thread_sched *sched)
344 fprintf(stderr, "@%s:%d running:%d\n", file, line, sched->running ? (int)sched->running->serial : -1);
345 rb_thread_t *th;
346 int i = 0;
347 ccan_list_for_each(&sched->readyq, th, sched.node.readyq) {
348 i++; if (i>10) rb_bug("too many");
349 fprintf(stderr, " ready:%d (%sNT:%d)\n", th->serial,
350 th->nt ? (th->nt->dedicated ? "D" : "S") : "x",
351 th->nt ? (int)th->nt->serial : -1);
355 #define ractor_sched_dump(s) ractor_sched_dump_(__FILE__, __LINE__, s)
357 RBIMPL_ATTR_MAYBE_UNUSED()
358 static void
359 ractor_sched_dump_(const char *file, int line, rb_vm_t *vm)
361 rb_ractor_t *r;
363 fprintf(stderr, "ractor_sched_dump %s:%d\n", file, line);
365 int i = 0;
366 ccan_list_for_each(&vm->ractor.sched.grq, r, threads.sched.grq_node) {
367 i++;
368 if (i>10) rb_bug("!!");
369 fprintf(stderr, " %d ready:%d\n", i, rb_ractor_id(r));
373 #define thread_sched_lock(a, b) thread_sched_lock_(a, b, __FILE__, __LINE__)
374 #define thread_sched_unlock(a, b) thread_sched_unlock_(a, b, __FILE__, __LINE__)
376 static void
377 thread_sched_lock_(struct rb_thread_sched *sched, rb_thread_t *th, const char *file, int line)
379 rb_native_mutex_lock(&sched->lock_);
381 #if VM_CHECK_MODE
382 RUBY_DEBUG_LOG2(file, line, "th:%u prev_owner:%u", rb_th_serial(th), rb_th_serial(sched->lock_owner));
383 VM_ASSERT(sched->lock_owner == NULL);
384 sched->lock_owner = th;
385 #else
386 RUBY_DEBUG_LOG2(file, line, "th:%u", rb_th_serial(th));
387 #endif
390 static void
391 thread_sched_unlock_(struct rb_thread_sched *sched, rb_thread_t *th, const char *file, int line)
393 RUBY_DEBUG_LOG2(file, line, "th:%u", rb_th_serial(th));
395 #if VM_CHECK_MODE
396 VM_ASSERT(sched->lock_owner == th);
397 sched->lock_owner = NULL;
398 #endif
400 rb_native_mutex_unlock(&sched->lock_);
403 static void
404 thread_sched_set_lock_owner(struct rb_thread_sched *sched, rb_thread_t *th)
406 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
408 #if VM_CHECK_MODE > 0
409 sched->lock_owner = th;
410 #endif
413 static void
414 ASSERT_thread_sched_locked(struct rb_thread_sched *sched, rb_thread_t *th)
416 VM_ASSERT(rb_native_mutex_trylock(&sched->lock_) == EBUSY);
418 #if VM_CHECK_MODE
419 if (th) {
420 VM_ASSERT(sched->lock_owner == th);
422 else {
423 VM_ASSERT(sched->lock_owner != NULL);
425 #endif
428 #define ractor_sched_lock(a, b) ractor_sched_lock_(a, b, __FILE__, __LINE__)
429 #define ractor_sched_unlock(a, b) ractor_sched_unlock_(a, b, __FILE__, __LINE__)
431 RBIMPL_ATTR_MAYBE_UNUSED()
432 static unsigned int
433 rb_ractor_serial(const rb_ractor_t *r)
435 if (r) {
436 return rb_ractor_id(r);
438 else {
439 return 0;
443 static void
444 ractor_sched_set_locked(rb_vm_t *vm, rb_ractor_t *cr)
446 #if VM_CHECK_MODE > 0
447 VM_ASSERT(vm->ractor.sched.lock_owner == NULL);
448 VM_ASSERT(vm->ractor.sched.locked == false);
450 vm->ractor.sched.lock_owner = cr;
451 vm->ractor.sched.locked = true;
452 #endif
455 static void
456 ractor_sched_set_unlocked(rb_vm_t *vm, rb_ractor_t *cr)
458 #if VM_CHECK_MODE > 0
459 VM_ASSERT(vm->ractor.sched.locked);
460 VM_ASSERT(vm->ractor.sched.lock_owner == cr);
462 vm->ractor.sched.locked = false;
463 vm->ractor.sched.lock_owner = NULL;
464 #endif
467 static void
468 ractor_sched_lock_(rb_vm_t *vm, rb_ractor_t *cr, const char *file, int line)
470 rb_native_mutex_lock(&vm->ractor.sched.lock);
472 #if VM_CHECK_MODE
473 RUBY_DEBUG_LOG2(file, line, "cr:%u prev_owner:%u", rb_ractor_serial(cr), rb_ractor_serial(vm->ractor.sched.lock_owner));
474 #else
475 RUBY_DEBUG_LOG2(file, line, "cr:%u", rb_ractor_serial(cr));
476 #endif
478 ractor_sched_set_locked(vm, cr);
481 static void
482 ractor_sched_unlock_(rb_vm_t *vm, rb_ractor_t *cr, const char *file, int line)
484 RUBY_DEBUG_LOG2(file, line, "cr:%u", rb_ractor_serial(cr));
486 ractor_sched_set_unlocked(vm, cr);
487 rb_native_mutex_unlock(&vm->ractor.sched.lock);
490 static void
491 ASSERT_ractor_sched_locked(rb_vm_t *vm, rb_ractor_t *cr)
493 VM_ASSERT(rb_native_mutex_trylock(&vm->ractor.sched.lock) == EBUSY);
494 VM_ASSERT(vm->ractor.sched.locked);
495 VM_ASSERT(cr == NULL || vm->ractor.sched.lock_owner == cr);
498 RBIMPL_ATTR_MAYBE_UNUSED()
499 static bool
500 ractor_sched_running_threads_contain_p(rb_vm_t *vm, rb_thread_t *th)
502 rb_thread_t *rth;
503 ccan_list_for_each(&vm->ractor.sched.running_threads, rth, sched.node.running_threads) {
504 if (rth == th) return true;
506 return false;
509 RBIMPL_ATTR_MAYBE_UNUSED()
510 static unsigned int
511 ractor_sched_running_threads_size(rb_vm_t *vm)
513 rb_thread_t *th;
514 unsigned int i = 0;
515 ccan_list_for_each(&vm->ractor.sched.running_threads, th, sched.node.running_threads) {
516 i++;
518 return i;
521 RBIMPL_ATTR_MAYBE_UNUSED()
522 static unsigned int
523 ractor_sched_timeslice_threads_size(rb_vm_t *vm)
525 rb_thread_t *th;
526 unsigned int i = 0;
527 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, th, sched.node.timeslice_threads) {
528 i++;
530 return i;
533 RBIMPL_ATTR_MAYBE_UNUSED()
534 static bool
535 ractor_sched_timeslice_threads_contain_p(rb_vm_t *vm, rb_thread_t *th)
537 rb_thread_t *rth;
538 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, rth, sched.node.timeslice_threads) {
539 if (rth == th) return true;
541 return false;
544 static void ractor_sched_barrier_join_signal_locked(rb_vm_t *vm);
545 static void ractor_sched_barrier_join_wait_locked(rb_vm_t *vm, rb_thread_t *th);
547 // setup timeslice signals by the timer thread.
548 static void
549 thread_sched_setup_running_threads(struct rb_thread_sched *sched, rb_ractor_t *cr, rb_vm_t *vm,
550 rb_thread_t *add_th, rb_thread_t *del_th, rb_thread_t *add_timeslice_th)
552 #if USE_RUBY_DEBUG_LOG
553 unsigned int prev_running_cnt = vm->ractor.sched.running_cnt;
554 #endif
556 rb_thread_t *del_timeslice_th;
558 if (del_th && sched->is_running_timeslice) {
559 del_timeslice_th = del_th;
560 sched->is_running_timeslice = false;
562 else {
563 del_timeslice_th = NULL;
566 RUBY_DEBUG_LOG("+:%u -:%u +ts:%u -ts:%u",
567 rb_th_serial(add_th), rb_th_serial(del_th),
568 rb_th_serial(add_timeslice_th), rb_th_serial(del_timeslice_th));
570 ractor_sched_lock(vm, cr);
572 // update running_threads
573 if (del_th) {
574 VM_ASSERT(ractor_sched_running_threads_contain_p(vm, del_th));
575 VM_ASSERT(del_timeslice_th != NULL ||
576 !ractor_sched_timeslice_threads_contain_p(vm, del_th));
578 ccan_list_del_init(&del_th->sched.node.running_threads);
579 vm->ractor.sched.running_cnt--;
581 if (UNLIKELY(vm->ractor.sched.barrier_waiting)) {
582 ractor_sched_barrier_join_signal_locked(vm);
584 sched->is_running = false;
587 if (add_th) {
588 while (UNLIKELY(vm->ractor.sched.barrier_waiting)) {
589 RUBY_DEBUG_LOG("barrier-wait");
591 ractor_sched_barrier_join_signal_locked(vm);
592 ractor_sched_barrier_join_wait_locked(vm, add_th);
595 VM_ASSERT(!ractor_sched_running_threads_contain_p(vm, add_th));
596 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(vm, add_th));
598 ccan_list_add(&vm->ractor.sched.running_threads, &add_th->sched.node.running_threads);
599 vm->ractor.sched.running_cnt++;
600 sched->is_running = true;
601 VM_ASSERT(!vm->ractor.sched.barrier_waiting);
604 if (add_timeslice_th) {
605 // update timeslice threads
606 int was_empty = ccan_list_empty(&vm->ractor.sched.timeslice_threads);
607 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(vm, add_timeslice_th));
608 ccan_list_add(&vm->ractor.sched.timeslice_threads, &add_timeslice_th->sched.node.timeslice_threads);
609 sched->is_running_timeslice = true;
610 if (was_empty) {
611 timer_thread_wakeup_locked(vm);
615 if (del_timeslice_th) {
616 VM_ASSERT(ractor_sched_timeslice_threads_contain_p(vm, del_timeslice_th));
617 ccan_list_del_init(&del_timeslice_th->sched.node.timeslice_threads);
620 VM_ASSERT(ractor_sched_running_threads_size(vm) == vm->ractor.sched.running_cnt);
621 VM_ASSERT(ractor_sched_timeslice_threads_size(vm) <= vm->ractor.sched.running_cnt);
623 ractor_sched_unlock(vm, cr);
625 if (add_th && !del_th && UNLIKELY(vm->ractor.sync.lock_owner != NULL)) {
626 // it can be after barrier synchronization by another ractor
627 rb_thread_t *lock_owner = NULL;
628 #if VM_CHECK_MODE
629 lock_owner = sched->lock_owner;
630 #endif
631 thread_sched_unlock(sched, lock_owner);
633 RB_VM_LOCK_ENTER();
634 RB_VM_LOCK_LEAVE();
636 thread_sched_lock(sched, lock_owner);
639 //RUBY_DEBUG_LOG("+:%u -:%u +ts:%u -ts:%u run:%u->%u",
640 // rb_th_serial(add_th), rb_th_serial(del_th),
641 // rb_th_serial(add_timeslice_th), rb_th_serial(del_timeslice_th),
642 RUBY_DEBUG_LOG("run:%u->%u", prev_running_cnt, vm->ractor.sched.running_cnt);
645 static void
646 thread_sched_add_running_thread(struct rb_thread_sched *sched, rb_thread_t *th)
648 ASSERT_thread_sched_locked(sched, th);
649 VM_ASSERT(sched->running == th);
651 rb_vm_t *vm = th->vm;
652 thread_sched_setup_running_threads(sched, th->ractor, vm, th, NULL, ccan_list_empty(&sched->readyq) ? NULL : th);
655 static void
656 thread_sched_del_running_thread(struct rb_thread_sched *sched, rb_thread_t *th)
658 ASSERT_thread_sched_locked(sched, th);
660 rb_vm_t *vm = th->vm;
661 thread_sched_setup_running_threads(sched, th->ractor, vm, NULL, th, NULL);
664 void
665 rb_add_running_thread(rb_thread_t *th)
667 struct rb_thread_sched *sched = TH_SCHED(th);
669 thread_sched_lock(sched, th);
671 thread_sched_add_running_thread(sched, th);
673 thread_sched_unlock(sched, th);
676 void
677 rb_del_running_thread(rb_thread_t *th)
679 struct rb_thread_sched *sched = TH_SCHED(th);
681 thread_sched_lock(sched, th);
683 thread_sched_del_running_thread(sched, th);
685 thread_sched_unlock(sched, th);
688 // setup current or next running thread
689 // sched->running should be set only on this function.
691 // if th is NULL, there is no running threads.
692 static void
693 thread_sched_set_running(struct rb_thread_sched *sched, rb_thread_t *th)
695 RUBY_DEBUG_LOG("th:%u->th:%u", rb_th_serial(sched->running), rb_th_serial(th));
696 VM_ASSERT(sched->running != th);
698 sched->running = th;
701 RBIMPL_ATTR_MAYBE_UNUSED()
702 static bool
703 thread_sched_readyq_contain_p(struct rb_thread_sched *sched, rb_thread_t *th)
705 rb_thread_t *rth;
706 ccan_list_for_each(&sched->readyq, rth, sched.node.readyq) {
707 if (rth == th) return true;
709 return false;
712 // deque thread from the ready queue.
713 // if the ready queue is empty, return NULL.
715 // return deque'ed running thread (or NULL).
716 static rb_thread_t *
717 thread_sched_deq(struct rb_thread_sched *sched)
719 ASSERT_thread_sched_locked(sched, NULL);
720 rb_thread_t *next_th;
722 VM_ASSERT(sched->running != NULL);
724 if (ccan_list_empty(&sched->readyq)) {
725 next_th = NULL;
727 else {
728 next_th = ccan_list_pop(&sched->readyq, rb_thread_t, sched.node.readyq);
730 VM_ASSERT(sched->readyq_cnt > 0);
731 sched->readyq_cnt--;
732 ccan_list_node_init(&next_th->sched.node.readyq);
735 RUBY_DEBUG_LOG("next_th:%u readyq_cnt:%d", rb_th_serial(next_th), sched->readyq_cnt);
737 return next_th;
740 // enqueue ready thread to the ready queue.
741 static void
742 thread_sched_enq(struct rb_thread_sched *sched, rb_thread_t *ready_th)
744 ASSERT_thread_sched_locked(sched, NULL);
745 RUBY_DEBUG_LOG("ready_th:%u readyq_cnt:%d", rb_th_serial(ready_th), sched->readyq_cnt);
747 VM_ASSERT(sched->running != NULL);
748 VM_ASSERT(!thread_sched_readyq_contain_p(sched, ready_th));
750 if (sched->is_running) {
751 if (ccan_list_empty(&sched->readyq)) {
752 // add sched->running to timeslice
753 thread_sched_setup_running_threads(sched, ready_th->ractor, ready_th->vm, NULL, NULL, sched->running);
756 else {
757 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(ready_th->vm, sched->running));
760 ccan_list_add_tail(&sched->readyq, &ready_th->sched.node.readyq);
761 sched->readyq_cnt++;
764 // DNT: kick condvar
765 // SNT: TODO
766 static void
767 thread_sched_wakeup_running_thread(struct rb_thread_sched *sched, rb_thread_t *next_th, bool will_switch)
769 ASSERT_thread_sched_locked(sched, NULL);
770 VM_ASSERT(sched->running == next_th);
772 if (next_th) {
773 if (next_th->nt) {
774 if (th_has_dedicated_nt(next_th)) {
775 RUBY_DEBUG_LOG("pinning th:%u", next_th->serial);
776 rb_native_cond_signal(&next_th->nt->cond.readyq);
778 else {
779 // TODO
780 RUBY_DEBUG_LOG("th:%u is already running.", next_th->serial);
783 else {
784 if (will_switch) {
785 RUBY_DEBUG_LOG("th:%u (do nothing)", rb_th_serial(next_th));
787 else {
788 RUBY_DEBUG_LOG("th:%u (enq)", rb_th_serial(next_th));
789 ractor_sched_enq(next_th->vm, next_th->ractor);
793 else {
794 RUBY_DEBUG_LOG("no waiting threads%s", "");
798 // waiting -> ready (locked)
799 static void
800 thread_sched_to_ready_common(struct rb_thread_sched *sched, rb_thread_t *th, bool wakeup, bool will_switch)
802 RUBY_DEBUG_LOG("th:%u running:%u redyq_cnt:%d", rb_th_serial(th), rb_th_serial(sched->running), sched->readyq_cnt);
804 VM_ASSERT(sched->running != th);
805 VM_ASSERT(!thread_sched_readyq_contain_p(sched, th));
806 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_READY, th);
808 if (sched->running == NULL) {
809 thread_sched_set_running(sched, th);
810 if (wakeup) thread_sched_wakeup_running_thread(sched, th, will_switch);
812 else {
813 thread_sched_enq(sched, th);
817 // waiting -> ready
819 // `th` had became "waiting" state by `thread_sched_to_waiting`
820 // and `thread_sched_to_ready` enqueue `th` to the thread ready queue.
821 RBIMPL_ATTR_MAYBE_UNUSED()
822 static void
823 thread_sched_to_ready(struct rb_thread_sched *sched, rb_thread_t *th)
825 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
827 thread_sched_lock(sched, th);
829 thread_sched_to_ready_common(sched, th, true, false);
831 thread_sched_unlock(sched, th);
834 // wait until sched->running is `th`.
835 static void
836 thread_sched_wait_running_turn(struct rb_thread_sched *sched, rb_thread_t *th, bool can_direct_transfer)
838 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
840 ASSERT_thread_sched_locked(sched, th);
841 VM_ASSERT(th == rb_ec_thread_ptr(rb_current_ec_noinline()));
843 if (th != sched->running) {
844 // already deleted from running threads
845 // VM_ASSERT(!ractor_sched_running_threads_contain_p(th->vm, th)); // need locking
847 // wait for execution right
848 rb_thread_t *next_th;
849 while((next_th = sched->running) != th) {
850 if (th_has_dedicated_nt(th)) {
851 RUBY_DEBUG_LOG("(nt) sleep th:%u running:%u", rb_th_serial(th), rb_th_serial(sched->running));
853 thread_sched_set_lock_owner(sched, NULL);
855 RUBY_DEBUG_LOG("nt:%d cond:%p", th->nt->serial, &th->nt->cond.readyq);
856 rb_native_cond_wait(&th->nt->cond.readyq, &sched->lock_);
858 thread_sched_set_lock_owner(sched, th);
860 RUBY_DEBUG_LOG("(nt) wakeup %s", sched->running == th ? "success" : "failed");
861 if (th == sched->running) {
862 rb_ractor_thread_switch(th->ractor, th);
865 else {
866 // search another ready thread
867 if (can_direct_transfer &&
868 (next_th = sched->running) != NULL &&
869 !next_th->nt // next_th is running or has dedicated nt
872 RUBY_DEBUG_LOG("th:%u->%u (direct)", rb_th_serial(th), rb_th_serial(next_th));
874 thread_sched_set_lock_owner(sched, NULL);
876 rb_ractor_set_current_ec(th->ractor, NULL);
877 thread_sched_switch(th, next_th);
879 thread_sched_set_lock_owner(sched, th);
881 else {
882 // search another ready ractor
883 struct rb_native_thread *nt = th->nt;
884 native_thread_assign(NULL, th);
886 RUBY_DEBUG_LOG("th:%u->%u (ractor scheduling)", rb_th_serial(th), rb_th_serial(next_th));
888 thread_sched_set_lock_owner(sched, NULL);
890 rb_ractor_set_current_ec(th->ractor, NULL);
891 coroutine_transfer0(th->sched.context, nt->nt_context, false);
893 thread_sched_set_lock_owner(sched, th);
896 VM_ASSERT(rb_current_ec_noinline() == th->ec);
900 VM_ASSERT(th->nt != NULL);
901 VM_ASSERT(rb_current_ec_noinline() == th->ec);
902 VM_ASSERT(th->sched.waiting_reason.flags == thread_sched_waiting_none);
904 // add th to running threads
905 thread_sched_add_running_thread(sched, th);
908 // VM_ASSERT(ractor_sched_running_threads_contain_p(th->vm, th)); need locking
909 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_RESUMED, th);
912 // waiting -> ready -> running (locked)
913 static void
914 thread_sched_to_running_common(struct rb_thread_sched *sched, rb_thread_t *th)
916 RUBY_DEBUG_LOG("th:%u dedicated:%d", rb_th_serial(th), th_has_dedicated_nt(th));
918 VM_ASSERT(sched->running != th);
919 VM_ASSERT(th_has_dedicated_nt(th));
920 VM_ASSERT(GET_THREAD() == th);
922 native_thread_dedicated_dec(th->vm, th->ractor, th->nt);
924 // waiting -> ready
925 thread_sched_to_ready_common(sched, th, false, false);
927 if (sched->running == th) {
928 thread_sched_add_running_thread(sched, th);
931 // TODO: check SNT number
932 thread_sched_wait_running_turn(sched, th, false);
935 // waiting -> ready -> running
937 // `th` had been waiting by `thread_sched_to_waiting()`
938 // and run a dedicated task (like waitpid and so on).
939 // After the dedicated task, this function is called
940 // to join a normal thread-scheduling.
941 static void
942 thread_sched_to_running(struct rb_thread_sched *sched, rb_thread_t *th)
944 thread_sched_lock(sched, th);
946 thread_sched_to_running_common(sched, th);
948 thread_sched_unlock(sched, th);
951 // resume a next thread in the thread ready queue.
953 // deque next running thread from the ready thread queue and
954 // resume this thread if available.
956 // If the next therad has a dedicated native thraed, simply signal to resume.
957 // Otherwise, make the ractor ready and other nt will run the ractor and the thread.
958 static void
959 thread_sched_wakeup_next_thread(struct rb_thread_sched *sched, rb_thread_t *th, bool will_switch)
961 ASSERT_thread_sched_locked(sched, th);
963 VM_ASSERT(sched->running == th);
964 VM_ASSERT(sched->running->nt != NULL);
966 rb_thread_t *next_th = thread_sched_deq(sched);
968 RUBY_DEBUG_LOG("next_th:%u", rb_th_serial(next_th));
969 VM_ASSERT(th != next_th);
971 thread_sched_set_running(sched, next_th);
972 VM_ASSERT(next_th == sched->running);
973 thread_sched_wakeup_running_thread(sched, next_th, will_switch);
975 if (th != next_th) {
976 thread_sched_del_running_thread(sched, th);
980 // running -> waiting
982 // to_dead: false
983 // th will run dedicated task.
984 // run another ready thread.
985 // to_dead: true
986 // th will be dead.
987 // run another ready thread.
988 static void
989 thread_sched_to_waiting_common0(struct rb_thread_sched *sched, rb_thread_t *th, bool to_dead)
991 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
993 if (!to_dead) native_thread_dedicated_inc(th->vm, th->ractor, th->nt);
995 RUBY_DEBUG_LOG("%sth:%u", to_dead ? "to_dead " : "", rb_th_serial(th));
997 bool can_switch = to_dead ? !th_has_dedicated_nt(th) : false;
998 thread_sched_wakeup_next_thread(sched, th, can_switch);
1001 // running -> dead (locked)
1002 static void
1003 thread_sched_to_dead_common(struct rb_thread_sched *sched, rb_thread_t *th)
1005 RUBY_DEBUG_LOG("dedicated:%d", th->nt->dedicated);
1006 thread_sched_to_waiting_common0(sched, th, true);
1007 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_EXITED, th);
1010 // running -> dead
1011 static void
1012 thread_sched_to_dead(struct rb_thread_sched *sched, rb_thread_t *th)
1014 thread_sched_lock(sched, th);
1016 thread_sched_to_dead_common(sched, th);
1018 thread_sched_unlock(sched, th);
1021 // running -> waiting (locked)
1023 // This thread will run dedicated task (th->nt->dedicated++).
1024 static void
1025 thread_sched_to_waiting_common(struct rb_thread_sched *sched, rb_thread_t *th)
1027 RUBY_DEBUG_LOG("dedicated:%d", th->nt->dedicated);
1028 thread_sched_to_waiting_common0(sched, th, false);
1031 // running -> waiting
1033 // This thread will run a dedicated task.
1034 static void
1035 thread_sched_to_waiting(struct rb_thread_sched *sched, rb_thread_t *th)
1037 thread_sched_lock(sched, th);
1039 thread_sched_to_waiting_common(sched, th);
1041 thread_sched_unlock(sched, th);
1044 // mini utility func
1045 static void
1046 setup_ubf(rb_thread_t *th, rb_unblock_function_t *func, void *arg)
1048 rb_native_mutex_lock(&th->interrupt_lock);
1050 th->unblock.func = func;
1051 th->unblock.arg = arg;
1053 rb_native_mutex_unlock(&th->interrupt_lock);
1056 static void
1057 ubf_waiting(void *ptr)
1059 rb_thread_t *th = (rb_thread_t *)ptr;
1060 struct rb_thread_sched *sched = TH_SCHED(th);
1062 // only once. it is safe because th->interrupt_lock is already acquired.
1063 th->unblock.func = NULL;
1064 th->unblock.arg = NULL;
1066 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
1068 thread_sched_lock(sched, th);
1070 if (sched->running == th) {
1071 // not sleeping yet.
1073 else {
1074 thread_sched_to_ready_common(sched, th, true, false);
1077 thread_sched_unlock(sched, th);
1080 // running -> waiting
1082 // This thread will sleep until other thread wakeup the thread.
1083 static void
1084 thread_sched_to_waiting_until_wakeup(struct rb_thread_sched *sched, rb_thread_t *th)
1086 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
1088 RB_VM_SAVE_MACHINE_CONTEXT(th);
1089 setup_ubf(th, ubf_waiting, (void *)th);
1091 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
1093 thread_sched_lock(sched, th);
1095 if (!RUBY_VM_INTERRUPTED(th->ec)) {
1096 bool can_direct_transfer = !th_has_dedicated_nt(th);
1097 thread_sched_wakeup_next_thread(sched, th, can_direct_transfer);
1098 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1100 else {
1101 RUBY_DEBUG_LOG("th:%u interrupted", rb_th_serial(th));
1104 thread_sched_unlock(sched, th);
1106 setup_ubf(th, NULL, NULL);
1109 // run another thread in the ready queue.
1110 // continue to run if there are no ready threads.
1111 static void
1112 thread_sched_yield(struct rb_thread_sched *sched, rb_thread_t *th)
1114 RUBY_DEBUG_LOG("th:%d sched->readyq_cnt:%d", (int)th->serial, sched->readyq_cnt);
1116 thread_sched_lock(sched, th);
1118 if (!ccan_list_empty(&sched->readyq)) {
1119 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
1120 thread_sched_wakeup_next_thread(sched, th, !th_has_dedicated_nt(th));
1121 bool can_direct_transfer = !th_has_dedicated_nt(th);
1122 thread_sched_to_ready_common(sched, th, false, can_direct_transfer);
1123 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1125 else {
1126 VM_ASSERT(sched->readyq_cnt == 0);
1129 thread_sched_unlock(sched, th);
1132 void
1133 rb_thread_sched_init(struct rb_thread_sched *sched, bool atfork)
1135 rb_native_mutex_initialize(&sched->lock_);
1137 #if VM_CHECK_MODE
1138 sched->lock_owner = NULL;
1139 #endif
1141 ccan_list_head_init(&sched->readyq);
1142 sched->readyq_cnt = 0;
1144 #if USE_MN_THREADS
1145 if (!atfork) sched->enable_mn_threads = true; // MN is enabled on Ractors
1146 #endif
1149 static void
1150 coroutine_transfer0(struct coroutine_context *transfer_from, struct coroutine_context *transfer_to, bool to_dead)
1152 #ifdef RUBY_ASAN_ENABLED
1153 void **fake_stack = to_dead ? NULL : &transfer_from->fake_stack;
1154 __sanitizer_start_switch_fiber(fake_stack, transfer_to->stack_base, transfer_to->stack_size);
1155 #endif
1157 RBIMPL_ATTR_MAYBE_UNUSED()
1158 struct coroutine_context *returning_from = coroutine_transfer(transfer_from, transfer_to);
1160 /* if to_dead was passed, the caller is promising that this coroutine is finished and it should
1161 * never be resumed! */
1162 VM_ASSERT(!to_dead);
1163 #ifdef RUBY_ASAN_ENABLED
1164 __sanitizer_finish_switch_fiber(transfer_from->fake_stack,
1165 (const void**)&returning_from->stack_base, &returning_from->stack_size);
1166 #endif
1170 static void
1171 thread_sched_switch0(struct coroutine_context *current_cont, rb_thread_t *next_th, struct rb_native_thread *nt, bool to_dead)
1173 VM_ASSERT(!nt->dedicated);
1174 VM_ASSERT(next_th->nt == NULL);
1176 RUBY_DEBUG_LOG("next_th:%u", rb_th_serial(next_th));
1178 ruby_thread_set_native(next_th);
1179 native_thread_assign(nt, next_th);
1181 coroutine_transfer0(current_cont, next_th->sched.context, to_dead);
1184 static void
1185 thread_sched_switch(rb_thread_t *cth, rb_thread_t *next_th)
1187 struct rb_native_thread *nt = cth->nt;
1188 native_thread_assign(NULL, cth);
1189 RUBY_DEBUG_LOG("th:%u->%u on nt:%d", rb_th_serial(cth), rb_th_serial(next_th), nt->serial);
1190 thread_sched_switch0(cth->sched.context, next_th, nt, cth->status == THREAD_KILLED);
1193 #if VM_CHECK_MODE > 0
1194 RBIMPL_ATTR_MAYBE_UNUSED()
1195 static unsigned int
1196 grq_size(rb_vm_t *vm, rb_ractor_t *cr)
1198 ASSERT_ractor_sched_locked(vm, cr);
1200 rb_ractor_t *r, *prev_r = NULL;
1201 unsigned int i = 0;
1203 ccan_list_for_each(&vm->ractor.sched.grq, r, threads.sched.grq_node) {
1204 i++;
1206 VM_ASSERT(r != prev_r);
1207 prev_r = r;
1209 return i;
1211 #endif
1213 static void
1214 ractor_sched_enq(rb_vm_t *vm, rb_ractor_t *r)
1216 struct rb_thread_sched *sched = &r->threads.sched;
1217 rb_ractor_t *cr = NULL; // timer thread can call this function
1219 VM_ASSERT(sched->running != NULL);
1220 VM_ASSERT(sched->running->nt == NULL);
1222 ractor_sched_lock(vm, cr);
1224 #if VM_CHECK_MODE > 0
1225 // check if grq contains r
1226 rb_ractor_t *tr;
1227 ccan_list_for_each(&vm->ractor.sched.grq, tr, threads.sched.grq_node) {
1228 VM_ASSERT(r != tr);
1230 #endif
1232 ccan_list_add_tail(&vm->ractor.sched.grq, &sched->grq_node);
1233 vm->ractor.sched.grq_cnt++;
1234 VM_ASSERT(grq_size(vm, cr) == vm->ractor.sched.grq_cnt);
1236 RUBY_DEBUG_LOG("r:%u th:%u grq_cnt:%u", rb_ractor_id(r), rb_th_serial(sched->running), vm->ractor.sched.grq_cnt);
1238 rb_native_cond_signal(&vm->ractor.sched.cond);
1240 // ractor_sched_dump(vm);
1242 ractor_sched_unlock(vm, cr);
1246 #ifndef SNT_KEEP_SECONDS
1247 #define SNT_KEEP_SECONDS 0
1248 #endif
1250 #ifndef MINIMUM_SNT
1251 // make at least MINIMUM_SNT snts for debug.
1252 #define MINIMUM_SNT 0
1253 #endif
1255 static rb_ractor_t *
1256 ractor_sched_deq(rb_vm_t *vm, rb_ractor_t *cr)
1258 rb_ractor_t *r;
1260 ractor_sched_lock(vm, cr);
1262 RUBY_DEBUG_LOG("empty? %d", ccan_list_empty(&vm->ractor.sched.grq));
1263 // ractor_sched_dump(vm);
1265 VM_ASSERT(rb_current_execution_context(false) == NULL);
1266 VM_ASSERT(grq_size(vm, cr) == vm->ractor.sched.grq_cnt);
1268 while ((r = ccan_list_pop(&vm->ractor.sched.grq, rb_ractor_t, threads.sched.grq_node)) == NULL) {
1269 RUBY_DEBUG_LOG("wait grq_cnt:%d", (int)vm->ractor.sched.grq_cnt);
1271 #if SNT_KEEP_SECONDS > 0
1272 rb_hrtime_t abs = rb_hrtime_add(rb_hrtime_now(), RB_HRTIME_PER_SEC * SNT_KEEP_SECONDS);
1273 if (native_cond_timedwait(&vm->ractor.sched.cond, &vm->ractor.sched.lock, &abs) == ETIMEDOUT) {
1274 RUBY_DEBUG_LOG("timeout, grq_cnt:%d", (int)vm->ractor.sched.grq_cnt);
1275 VM_ASSERT(r == NULL);
1276 vm->ractor.sched.snt_cnt--;
1277 vm->ractor.sched.running_cnt--;
1278 break;
1280 else {
1281 RUBY_DEBUG_LOG("wakeup grq_cnt:%d", (int)vm->ractor.sched.grq_cnt);
1283 #else
1284 ractor_sched_set_unlocked(vm, cr);
1285 rb_native_cond_wait(&vm->ractor.sched.cond, &vm->ractor.sched.lock);
1286 ractor_sched_set_locked(vm, cr);
1288 RUBY_DEBUG_LOG("wakeup grq_cnt:%d", (int)vm->ractor.sched.grq_cnt);
1289 #endif
1292 VM_ASSERT(rb_current_execution_context(false) == NULL);
1294 if (r) {
1295 VM_ASSERT(vm->ractor.sched.grq_cnt > 0);
1296 vm->ractor.sched.grq_cnt--;
1297 RUBY_DEBUG_LOG("r:%d grq_cnt:%u", (int)rb_ractor_id(r), vm->ractor.sched.grq_cnt);
1299 else {
1300 VM_ASSERT(SNT_KEEP_SECONDS > 0);
1301 // timeout
1304 ractor_sched_unlock(vm, cr);
1306 return r;
1309 void rb_ractor_lock_self(rb_ractor_t *r);
1310 void rb_ractor_unlock_self(rb_ractor_t *r);
1312 void
1313 rb_ractor_sched_sleep(rb_execution_context_t *ec, rb_ractor_t *cr, rb_unblock_function_t *ubf)
1315 // ractor lock of cr is acquired
1316 // r is sleeping status
1317 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
1318 struct rb_thread_sched *sched = TH_SCHED(th);
1319 cr->sync.wait.waiting_thread = th; // TODO: multi-thread
1321 setup_ubf(th, ubf, (void *)cr);
1323 thread_sched_lock(sched, th);
1325 rb_ractor_unlock_self(cr);
1327 if (RUBY_VM_INTERRUPTED(th->ec)) {
1328 RUBY_DEBUG_LOG("interrupted");
1330 else if (cr->sync.wait.wakeup_status != wakeup_none) {
1331 RUBY_DEBUG_LOG("awaken:%d", (int)cr->sync.wait.wakeup_status);
1333 else {
1334 // sleep
1335 RB_VM_SAVE_MACHINE_CONTEXT(th);
1336 th->status = THREAD_STOPPED_FOREVER;
1338 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
1340 bool can_direct_transfer = !th_has_dedicated_nt(th);
1341 thread_sched_wakeup_next_thread(sched, th, can_direct_transfer);
1342 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1343 th->status = THREAD_RUNNABLE;
1344 // wakeup
1348 thread_sched_unlock(sched, th);
1350 setup_ubf(th, NULL, NULL);
1352 rb_ractor_lock_self(cr);
1353 cr->sync.wait.waiting_thread = NULL;
1356 void
1357 rb_ractor_sched_wakeup(rb_ractor_t *r)
1359 rb_thread_t *r_th = r->sync.wait.waiting_thread;
1360 // ractor lock of r is acquired
1361 struct rb_thread_sched *sched = TH_SCHED(r_th);
1363 VM_ASSERT(r->sync.wait.wakeup_status != 0);
1365 thread_sched_lock(sched, r_th);
1367 if (r_th->status == THREAD_STOPPED_FOREVER) {
1368 thread_sched_to_ready_common(sched, r_th, true, false);
1371 thread_sched_unlock(sched, r_th);
1374 static bool
1375 ractor_sched_barrier_completed_p(rb_vm_t *vm)
1377 RUBY_DEBUG_LOG("run:%u wait:%u", vm->ractor.sched.running_cnt, vm->ractor.sched.barrier_waiting_cnt);
1378 VM_ASSERT(vm->ractor.sched.running_cnt - 1 >= vm->ractor.sched.barrier_waiting_cnt);
1379 return (vm->ractor.sched.running_cnt - vm->ractor.sched.barrier_waiting_cnt) == 1;
1382 void
1383 rb_ractor_sched_barrier_start(rb_vm_t *vm, rb_ractor_t *cr)
1385 VM_ASSERT(cr == GET_RACTOR());
1386 VM_ASSERT(vm->ractor.sync.lock_owner == cr); // VM is locked
1387 VM_ASSERT(!vm->ractor.sched.barrier_waiting);
1388 VM_ASSERT(vm->ractor.sched.barrier_waiting_cnt == 0);
1390 RUBY_DEBUG_LOG("start serial:%u", vm->ractor.sched.barrier_serial);
1392 unsigned int lock_rec;
1394 ractor_sched_lock(vm, cr);
1396 vm->ractor.sched.barrier_waiting = true;
1398 // release VM lock
1399 lock_rec = vm->ractor.sync.lock_rec;
1400 vm->ractor.sync.lock_rec = 0;
1401 vm->ractor.sync.lock_owner = NULL;
1402 rb_native_mutex_unlock(&vm->ractor.sync.lock);
1404 // interrupts all running threads
1405 rb_thread_t *ith;
1406 ccan_list_for_each(&vm->ractor.sched.running_threads, ith, sched.node.running_threads) {
1407 if (ith->ractor != cr) {
1408 RUBY_DEBUG_LOG("barrier int:%u", rb_th_serial(ith));
1409 RUBY_VM_SET_VM_BARRIER_INTERRUPT(ith->ec);
1413 // wait for other ractors
1414 while (!ractor_sched_barrier_completed_p(vm)) {
1415 ractor_sched_set_unlocked(vm, cr);
1416 rb_native_cond_wait(&vm->ractor.sched.barrier_complete_cond, &vm->ractor.sched.lock);
1417 ractor_sched_set_locked(vm, cr);
1421 ractor_sched_unlock(vm, cr);
1423 // acquire VM lock
1424 rb_native_mutex_lock(&vm->ractor.sync.lock);
1425 vm->ractor.sync.lock_rec = lock_rec;
1426 vm->ractor.sync.lock_owner = cr;
1428 RUBY_DEBUG_LOG("completed seirial:%u", vm->ractor.sched.barrier_serial);
1430 ractor_sched_lock(vm, cr);
1432 vm->ractor.sched.barrier_waiting = false;
1433 vm->ractor.sched.barrier_serial++;
1434 vm->ractor.sched.barrier_waiting_cnt = 0;
1435 rb_native_cond_broadcast(&vm->ractor.sched.barrier_release_cond);
1437 ractor_sched_unlock(vm, cr);
1440 static void
1441 ractor_sched_barrier_join_signal_locked(rb_vm_t *vm)
1443 if (ractor_sched_barrier_completed_p(vm)) {
1444 rb_native_cond_signal(&vm->ractor.sched.barrier_complete_cond);
1448 static void
1449 ractor_sched_barrier_join_wait_locked(rb_vm_t *vm, rb_thread_t *th)
1451 VM_ASSERT(vm->ractor.sched.barrier_waiting);
1453 unsigned int barrier_serial = vm->ractor.sched.barrier_serial;
1455 while (vm->ractor.sched.barrier_serial == barrier_serial) {
1456 RUBY_DEBUG_LOG("sleep serial:%u", barrier_serial);
1457 RB_VM_SAVE_MACHINE_CONTEXT(th);
1459 rb_ractor_t *cr = th->ractor;
1460 ractor_sched_set_unlocked(vm, cr);
1461 rb_native_cond_wait(&vm->ractor.sched.barrier_release_cond, &vm->ractor.sched.lock);
1462 ractor_sched_set_locked(vm, cr);
1464 RUBY_DEBUG_LOG("wakeup serial:%u", barrier_serial);
1468 void
1469 rb_ractor_sched_barrier_join(rb_vm_t *vm, rb_ractor_t *cr)
1471 VM_ASSERT(cr->threads.sched.running != NULL); // running ractor
1472 VM_ASSERT(cr == GET_RACTOR());
1473 VM_ASSERT(vm->ractor.sync.lock_owner == NULL); // VM is locked, but owner == NULL
1474 VM_ASSERT(vm->ractor.sched.barrier_waiting); // VM needs barrier sync
1476 #if USE_RUBY_DEBUG_LOG || VM_CHECK_MODE > 0
1477 unsigned int barrier_serial = vm->ractor.sched.barrier_serial;
1478 #endif
1480 RUBY_DEBUG_LOG("join");
1482 rb_native_mutex_unlock(&vm->ractor.sync.lock);
1484 VM_ASSERT(vm->ractor.sched.barrier_waiting); // VM needs barrier sync
1485 VM_ASSERT(vm->ractor.sched.barrier_serial == barrier_serial);
1487 ractor_sched_lock(vm, cr);
1489 // running_cnt
1490 vm->ractor.sched.barrier_waiting_cnt++;
1491 RUBY_DEBUG_LOG("waiting_cnt:%u serial:%u", vm->ractor.sched.barrier_waiting_cnt, barrier_serial);
1493 ractor_sched_barrier_join_signal_locked(vm);
1494 ractor_sched_barrier_join_wait_locked(vm, cr->threads.sched.running);
1496 ractor_sched_unlock(vm, cr);
1499 rb_native_mutex_lock(&vm->ractor.sync.lock);
1500 // VM locked here
1503 #if 0
1504 // TODO
1506 static void clear_thread_cache_altstack(void);
1508 static void
1509 rb_thread_sched_destroy(struct rb_thread_sched *sched)
1512 * only called once at VM shutdown (not atfork), another thread
1513 * may still grab vm->gvl.lock when calling gvl_release at
1514 * the end of thread_start_func_2
1516 if (0) {
1517 rb_native_mutex_destroy(&sched->lock);
1519 clear_thread_cache_altstack();
1521 #endif
1523 #ifdef RB_THREAD_T_HAS_NATIVE_ID
1524 static int
1525 get_native_thread_id(void)
1527 #ifdef __linux__
1528 return (int)syscall(SYS_gettid);
1529 #elif defined(__FreeBSD__)
1530 return pthread_getthreadid_np();
1531 #endif
1533 #endif
1535 #if defined(HAVE_WORKING_FORK)
1536 static void
1537 thread_sched_atfork(struct rb_thread_sched *sched)
1539 current_fork_gen++;
1540 rb_thread_sched_init(sched, true);
1541 rb_thread_t *th = GET_THREAD();
1542 rb_vm_t *vm = GET_VM();
1544 if (th_has_dedicated_nt(th)) {
1545 vm->ractor.sched.snt_cnt = 0;
1547 else {
1548 vm->ractor.sched.snt_cnt = 1;
1550 vm->ractor.sched.running_cnt = 0;
1552 rb_native_mutex_initialize(&vm->ractor.sched.lock);
1553 #if VM_CHECK_MODE > 0
1554 vm->ractor.sched.lock_owner = NULL;
1555 vm->ractor.sched.locked = false;
1556 #endif
1558 // rb_native_cond_destroy(&vm->ractor.sched.cond);
1559 rb_native_cond_initialize(&vm->ractor.sched.cond);
1560 rb_native_cond_initialize(&vm->ractor.sched.barrier_complete_cond);
1561 rb_native_cond_initialize(&vm->ractor.sched.barrier_release_cond);
1563 ccan_list_head_init(&vm->ractor.sched.grq);
1564 ccan_list_head_init(&vm->ractor.sched.timeslice_threads);
1565 ccan_list_head_init(&vm->ractor.sched.running_threads);
1567 VM_ASSERT(sched->is_running);
1568 sched->is_running_timeslice = false;
1570 if (sched->running != th) {
1571 thread_sched_to_running(sched, th);
1573 else {
1574 thread_sched_setup_running_threads(sched, th->ractor, vm, th, NULL, NULL);
1577 #ifdef RB_THREAD_T_HAS_NATIVE_ID
1578 if (th->nt) {
1579 th->nt->tid = get_native_thread_id();
1581 #endif
1584 #endif
1586 #ifdef RB_THREAD_LOCAL_SPECIFIER
1587 static RB_THREAD_LOCAL_SPECIFIER rb_thread_t *ruby_native_thread;
1588 #else
1589 static pthread_key_t ruby_native_thread_key;
1590 #endif
1592 static void
1593 null_func(int i)
1595 /* null */
1596 // This function can be called from signal handler
1597 // RUBY_DEBUG_LOG("i:%d", i);
1600 rb_thread_t *
1601 ruby_thread_from_native(void)
1603 #ifdef RB_THREAD_LOCAL_SPECIFIER
1604 return ruby_native_thread;
1605 #else
1606 return pthread_getspecific(ruby_native_thread_key);
1607 #endif
1611 ruby_thread_set_native(rb_thread_t *th)
1613 if (th) {
1614 #ifdef USE_UBF_LIST
1615 ccan_list_node_init(&th->sched.node.ubf);
1616 #endif
1619 // setup TLS
1621 if (th && th->ec) {
1622 rb_ractor_set_current_ec(th->ractor, th->ec);
1624 #ifdef RB_THREAD_LOCAL_SPECIFIER
1625 ruby_native_thread = th;
1626 return 1;
1627 #else
1628 return pthread_setspecific(ruby_native_thread_key, th) == 0;
1629 #endif
1632 static void native_thread_setup(struct rb_native_thread *nt);
1633 static void native_thread_setup_on_thread(struct rb_native_thread *nt);
1635 void
1636 Init_native_thread(rb_thread_t *main_th)
1638 #if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK)
1639 if (condattr_monotonic) {
1640 int r = pthread_condattr_init(condattr_monotonic);
1641 if (r == 0) {
1642 r = pthread_condattr_setclock(condattr_monotonic, CLOCK_MONOTONIC);
1644 if (r) condattr_monotonic = NULL;
1646 #endif
1648 #ifndef RB_THREAD_LOCAL_SPECIFIER
1649 if (pthread_key_create(&ruby_native_thread_key, 0) == EAGAIN) {
1650 rb_bug("pthread_key_create failed (ruby_native_thread_key)");
1652 if (pthread_key_create(&ruby_current_ec_key, 0) == EAGAIN) {
1653 rb_bug("pthread_key_create failed (ruby_current_ec_key)");
1655 #endif
1656 ruby_posix_signal(SIGVTALRM, null_func);
1658 // setup vm
1659 rb_vm_t *vm = main_th->vm;
1660 rb_native_mutex_initialize(&vm->ractor.sched.lock);
1661 rb_native_cond_initialize(&vm->ractor.sched.cond);
1662 rb_native_cond_initialize(&vm->ractor.sched.barrier_complete_cond);
1663 rb_native_cond_initialize(&vm->ractor.sched.barrier_release_cond);
1665 ccan_list_head_init(&vm->ractor.sched.grq);
1666 ccan_list_head_init(&vm->ractor.sched.timeslice_threads);
1667 ccan_list_head_init(&vm->ractor.sched.running_threads);
1669 // setup main thread
1670 main_th->nt->thread_id = pthread_self();
1671 main_th->nt->serial = 1;
1672 #ifdef RUBY_NT_SERIAL
1673 ruby_nt_serial = 1;
1674 #endif
1675 ruby_thread_set_native(main_th);
1676 native_thread_setup(main_th->nt);
1677 native_thread_setup_on_thread(main_th->nt);
1679 TH_SCHED(main_th)->running = main_th;
1680 main_th->has_dedicated_nt = 1;
1682 thread_sched_setup_running_threads(TH_SCHED(main_th), main_th->ractor, vm, main_th, NULL, NULL);
1684 // setup main NT
1685 main_th->nt->dedicated = 1;
1686 main_th->nt->vm = vm;
1688 // setup mn
1689 vm->ractor.sched.dnt_cnt = 1;
1692 extern int ruby_mn_threads_enabled;
1694 void
1695 ruby_mn_threads_params(void)
1697 rb_vm_t *vm = GET_VM();
1698 rb_ractor_t *main_ractor = GET_RACTOR();
1700 const char *mn_threads_cstr = getenv("RUBY_MN_THREADS");
1701 bool enable_mn_threads = false;
1703 if (USE_MN_THREADS && mn_threads_cstr && (enable_mn_threads = atoi(mn_threads_cstr) > 0)) {
1704 // enabled
1705 ruby_mn_threads_enabled = 1;
1707 main_ractor->threads.sched.enable_mn_threads = enable_mn_threads;
1709 const char *max_cpu_cstr = getenv("RUBY_MAX_CPU");
1710 const int default_max_cpu = 8; // TODO: CPU num?
1711 int max_cpu = default_max_cpu;
1713 if (USE_MN_THREADS && max_cpu_cstr) {
1714 int given_max_cpu = atoi(max_cpu_cstr);
1715 if (given_max_cpu > 0) {
1716 max_cpu = given_max_cpu;
1720 vm->ractor.sched.max_cpu = max_cpu;
1723 static void
1724 native_thread_dedicated_inc(rb_vm_t *vm, rb_ractor_t *cr, struct rb_native_thread *nt)
1726 RUBY_DEBUG_LOG("nt:%d %d->%d", nt->serial, nt->dedicated, nt->dedicated + 1);
1728 if (nt->dedicated == 0) {
1729 ractor_sched_lock(vm, cr);
1731 vm->ractor.sched.snt_cnt--;
1732 vm->ractor.sched.dnt_cnt++;
1734 ractor_sched_unlock(vm, cr);
1737 nt->dedicated++;
1740 static void
1741 native_thread_dedicated_dec(rb_vm_t *vm, rb_ractor_t *cr, struct rb_native_thread *nt)
1743 RUBY_DEBUG_LOG("nt:%d %d->%d", nt->serial, nt->dedicated, nt->dedicated - 1);
1744 VM_ASSERT(nt->dedicated > 0);
1745 nt->dedicated--;
1747 if (nt->dedicated == 0) {
1748 ractor_sched_lock(vm, cr);
1750 nt->vm->ractor.sched.snt_cnt++;
1751 nt->vm->ractor.sched.dnt_cnt--;
1753 ractor_sched_unlock(vm, cr);
1757 static void
1758 native_thread_assign(struct rb_native_thread *nt, rb_thread_t *th)
1760 #if USE_RUBY_DEBUG_LOG
1761 if (nt) {
1762 if (th->nt) {
1763 RUBY_DEBUG_LOG("th:%d nt:%d->%d", (int)th->serial, (int)th->nt->serial, (int)nt->serial);
1765 else {
1766 RUBY_DEBUG_LOG("th:%d nt:NULL->%d", (int)th->serial, (int)nt->serial);
1769 else {
1770 if (th->nt) {
1771 RUBY_DEBUG_LOG("th:%d nt:%d->NULL", (int)th->serial, (int)th->nt->serial);
1773 else {
1774 RUBY_DEBUG_LOG("th:%d nt:NULL->NULL", (int)th->serial);
1777 #endif
1779 th->nt = nt;
1782 static void
1783 native_thread_destroy(struct rb_native_thread *nt)
1785 if (nt) {
1786 rb_native_cond_destroy(&nt->cond.readyq);
1788 if (&nt->cond.readyq != &nt->cond.intr) {
1789 rb_native_cond_destroy(&nt->cond.intr);
1792 RB_ALTSTACK_FREE(nt->altstack);
1793 ruby_xfree(nt->nt_context);
1794 ruby_xfree(nt);
1798 #if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
1799 #define STACKADDR_AVAILABLE 1
1800 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
1801 #define STACKADDR_AVAILABLE 1
1802 #undef MAINSTACKADDR_AVAILABLE
1803 #define MAINSTACKADDR_AVAILABLE 1
1804 void *pthread_get_stackaddr_np(pthread_t);
1805 size_t pthread_get_stacksize_np(pthread_t);
1806 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
1807 #define STACKADDR_AVAILABLE 1
1808 #elif defined HAVE_PTHREAD_GETTHRDS_NP
1809 #define STACKADDR_AVAILABLE 1
1810 #elif defined __HAIKU__
1811 #define STACKADDR_AVAILABLE 1
1812 #endif
1814 #ifndef MAINSTACKADDR_AVAILABLE
1815 # ifdef STACKADDR_AVAILABLE
1816 # define MAINSTACKADDR_AVAILABLE 1
1817 # else
1818 # define MAINSTACKADDR_AVAILABLE 0
1819 # endif
1820 #endif
1821 #if MAINSTACKADDR_AVAILABLE && !defined(get_main_stack)
1822 # define get_main_stack(addr, size) get_stack(addr, size)
1823 #endif
1825 #ifdef STACKADDR_AVAILABLE
1827 * Get the initial address and size of current thread's stack
1829 static int
1830 get_stack(void **addr, size_t *size)
1832 #define CHECK_ERR(expr) \
1833 {int err = (expr); if (err) return err;}
1834 #ifdef HAVE_PTHREAD_GETATTR_NP /* Linux */
1835 pthread_attr_t attr;
1836 size_t guard = 0;
1837 STACK_GROW_DIR_DETECTION;
1838 CHECK_ERR(pthread_getattr_np(pthread_self(), &attr));
1839 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
1840 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
1841 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
1842 # else
1843 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
1844 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
1845 # endif
1846 # ifdef HAVE_PTHREAD_ATTR_GETGUARDSIZE
1847 CHECK_ERR(pthread_attr_getguardsize(&attr, &guard));
1848 # else
1849 guard = getpagesize();
1850 # endif
1851 *size -= guard;
1852 pthread_attr_destroy(&attr);
1853 #elif defined HAVE_PTHREAD_ATTR_GET_NP /* FreeBSD, DragonFly BSD, NetBSD */
1854 pthread_attr_t attr;
1855 CHECK_ERR(pthread_attr_init(&attr));
1856 CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr));
1857 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
1858 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
1859 # else
1860 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
1861 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
1862 # endif
1863 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
1864 pthread_attr_destroy(&attr);
1865 #elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP) /* MacOS X */
1866 pthread_t th = pthread_self();
1867 *addr = pthread_get_stackaddr_np(th);
1868 *size = pthread_get_stacksize_np(th);
1869 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
1870 stack_t stk;
1871 # if defined HAVE_THR_STKSEGMENT /* Solaris */
1872 CHECK_ERR(thr_stksegment(&stk));
1873 # else /* OpenBSD */
1874 CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk));
1875 # endif
1876 *addr = stk.ss_sp;
1877 *size = stk.ss_size;
1878 #elif defined HAVE_PTHREAD_GETTHRDS_NP /* AIX */
1879 pthread_t th = pthread_self();
1880 struct __pthrdsinfo thinfo;
1881 char reg[256];
1882 int regsiz=sizeof(reg);
1883 CHECK_ERR(pthread_getthrds_np(&th, PTHRDSINFO_QUERY_ALL,
1884 &thinfo, sizeof(thinfo),
1885 &reg, &regsiz));
1886 *addr = thinfo.__pi_stackaddr;
1887 /* Must not use thinfo.__pi_stacksize for size.
1888 It is around 3KB smaller than the correct size
1889 calculated by thinfo.__pi_stackend - thinfo.__pi_stackaddr. */
1890 *size = thinfo.__pi_stackend - thinfo.__pi_stackaddr;
1891 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
1892 #elif defined __HAIKU__
1893 thread_info info;
1894 STACK_GROW_DIR_DETECTION;
1895 CHECK_ERR(get_thread_info(find_thread(NULL), &info));
1896 *addr = info.stack_base;
1897 *size = (uintptr_t)info.stack_end - (uintptr_t)info.stack_base;
1898 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
1899 #else
1900 #error STACKADDR_AVAILABLE is defined but not implemented.
1901 #endif
1902 return 0;
1903 #undef CHECK_ERR
1905 #endif
1907 static struct {
1908 rb_nativethread_id_t id;
1909 size_t stack_maxsize;
1910 VALUE *stack_start;
1911 } native_main_thread;
1913 #ifdef STACK_END_ADDRESS
1914 extern void *STACK_END_ADDRESS;
1915 #endif
1917 enum {
1918 RUBY_STACK_SPACE_LIMIT = 1024 * 1024, /* 1024KB */
1919 RUBY_STACK_SPACE_RATIO = 5
1922 static size_t
1923 space_size(size_t stack_size)
1925 size_t space_size = stack_size / RUBY_STACK_SPACE_RATIO;
1926 if (space_size > RUBY_STACK_SPACE_LIMIT) {
1927 return RUBY_STACK_SPACE_LIMIT;
1929 else {
1930 return space_size;
1934 static void
1935 native_thread_init_main_thread_stack(void *addr)
1937 native_main_thread.id = pthread_self();
1938 #ifdef RUBY_ASAN_ENABLED
1939 addr = asan_get_real_stack_addr((void *)addr);
1940 #endif
1942 #if MAINSTACKADDR_AVAILABLE
1943 if (native_main_thread.stack_maxsize) return;
1945 void* stackaddr;
1946 size_t size;
1947 if (get_main_stack(&stackaddr, &size) == 0) {
1948 native_main_thread.stack_maxsize = size;
1949 native_main_thread.stack_start = stackaddr;
1950 goto bound_check;
1953 #endif
1954 #ifdef STACK_END_ADDRESS
1955 native_main_thread.stack_start = STACK_END_ADDRESS;
1956 #else
1957 if (!native_main_thread.stack_start ||
1958 STACK_UPPER((VALUE *)(void *)&addr,
1959 native_main_thread.stack_start > (VALUE *)addr,
1960 native_main_thread.stack_start < (VALUE *)addr)) {
1961 native_main_thread.stack_start = (VALUE *)addr;
1963 #endif
1965 #if defined(HAVE_GETRLIMIT)
1966 #if defined(PTHREAD_STACK_DEFAULT)
1967 # if PTHREAD_STACK_DEFAULT < RUBY_STACK_SPACE*5
1968 # error "PTHREAD_STACK_DEFAULT is too small"
1969 # endif
1970 size_t size = PTHREAD_STACK_DEFAULT;
1971 #else
1972 size_t size = RUBY_VM_THREAD_VM_STACK_SIZE;
1973 #endif
1974 size_t space;
1975 int pagesize = getpagesize();
1976 struct rlimit rlim;
1977 STACK_GROW_DIR_DETECTION;
1978 if (getrlimit(RLIMIT_STACK, &rlim) == 0) {
1979 size = (size_t)rlim.rlim_cur;
1981 addr = native_main_thread.stack_start;
1982 if (IS_STACK_DIR_UPPER()) {
1983 space = ((size_t)((char *)addr + size) / pagesize) * pagesize - (size_t)addr;
1985 else {
1986 space = (size_t)addr - ((size_t)((char *)addr - size) / pagesize + 1) * pagesize;
1988 native_main_thread.stack_maxsize = space;
1989 #endif
1992 #if MAINSTACKADDR_AVAILABLE
1993 bound_check:
1994 #endif
1995 /* If addr is out of range of main-thread stack range estimation, */
1996 /* it should be on co-routine (alternative stack). [Feature #2294] */
1998 void *start, *end;
1999 STACK_GROW_DIR_DETECTION;
2001 if (IS_STACK_DIR_UPPER()) {
2002 start = native_main_thread.stack_start;
2003 end = (char *)native_main_thread.stack_start + native_main_thread.stack_maxsize;
2005 else {
2006 start = (char *)native_main_thread.stack_start - native_main_thread.stack_maxsize;
2007 end = native_main_thread.stack_start;
2010 if ((void *)addr < start || (void *)addr > end) {
2011 /* out of range */
2012 native_main_thread.stack_start = (VALUE *)addr;
2013 native_main_thread.stack_maxsize = 0; /* unknown */
2018 #define CHECK_ERR(expr) \
2019 {int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
2021 static int
2022 native_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame)
2024 rb_nativethread_id_t curr = pthread_self();
2025 #ifdef RUBY_ASAN_ENABLED
2026 local_in_parent_frame = asan_get_real_stack_addr(local_in_parent_frame);
2027 th->ec->machine.asan_fake_stack_handle = asan_get_thread_fake_stack_handle();
2028 #endif
2030 if (!native_main_thread.id) {
2031 /* This thread is the first thread, must be the main thread -
2032 * configure the native_main_thread object */
2033 native_thread_init_main_thread_stack(local_in_parent_frame);
2036 if (pthread_equal(curr, native_main_thread.id)) {
2037 th->ec->machine.stack_start = native_main_thread.stack_start;
2038 th->ec->machine.stack_maxsize = native_main_thread.stack_maxsize;
2040 else {
2041 #ifdef STACKADDR_AVAILABLE
2042 if (th_has_dedicated_nt(th)) {
2043 void *start;
2044 size_t size;
2046 if (get_stack(&start, &size) == 0) {
2047 uintptr_t diff = (uintptr_t)start - (uintptr_t)local_in_parent_frame;
2048 th->ec->machine.stack_start = local_in_parent_frame;
2049 th->ec->machine.stack_maxsize = size - diff;
2052 #else
2053 rb_raise(rb_eNotImpError, "ruby engine can initialize only in the main thread");
2054 #endif
2057 return 0;
2060 struct nt_param {
2061 rb_vm_t *vm;
2062 struct rb_native_thread *nt;
2065 static void *
2066 nt_start(void *ptr);
2068 static int
2069 native_thread_create0(struct rb_native_thread *nt)
2071 int err = 0;
2072 pthread_attr_t attr;
2074 const size_t stack_size = nt->vm->default_params.thread_machine_stack_size;
2075 const size_t space = space_size(stack_size);
2077 nt->machine_stack_maxsize = stack_size - space;
2079 #ifdef USE_SIGALTSTACK
2080 nt->altstack = rb_allocate_sigaltstack();
2081 #endif
2083 CHECK_ERR(pthread_attr_init(&attr));
2085 # ifdef PTHREAD_STACK_MIN
2086 RUBY_DEBUG_LOG("stack size: %lu", (unsigned long)stack_size);
2087 CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
2088 # endif
2090 # ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
2091 CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
2092 # endif
2093 CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
2095 err = pthread_create(&nt->thread_id, &attr, nt_start, nt);
2097 RUBY_DEBUG_LOG("nt:%d err:%d", (int)nt->serial, err);
2099 CHECK_ERR(pthread_attr_destroy(&attr));
2101 return err;
2104 static void
2105 native_thread_setup(struct rb_native_thread *nt)
2107 // init cond
2108 rb_native_cond_initialize(&nt->cond.readyq);
2110 if (&nt->cond.readyq != &nt->cond.intr) {
2111 rb_native_cond_initialize(&nt->cond.intr);
2115 static void
2116 native_thread_setup_on_thread(struct rb_native_thread *nt)
2118 // init tid
2119 #ifdef RB_THREAD_T_HAS_NATIVE_ID
2120 nt->tid = get_native_thread_id();
2121 #endif
2123 // init signal handler
2124 RB_ALTSTACK_INIT(nt->altstack, nt->altstack);
2127 static struct rb_native_thread *
2128 native_thread_alloc(void)
2130 struct rb_native_thread *nt = ZALLOC(struct rb_native_thread);
2131 native_thread_setup(nt);
2133 #if USE_MN_THREADS
2134 nt->nt_context = ruby_xmalloc(sizeof(struct coroutine_context));
2135 #endif
2137 #if USE_RUBY_DEBUG_LOG
2138 static rb_atomic_t nt_serial = 2;
2139 nt->serial = RUBY_ATOMIC_FETCH_ADD(nt_serial, 1);
2140 #endif
2141 return nt;
2144 static int
2145 native_thread_create_dedicated(rb_thread_t *th)
2147 th->nt = native_thread_alloc();
2148 th->nt->vm = th->vm;
2149 th->nt->running_thread = th;
2150 th->nt->dedicated = 1;
2152 // vm stack
2153 size_t vm_stack_word_size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
2154 void *vm_stack = ruby_xmalloc(vm_stack_word_size * sizeof(VALUE));
2155 th->sched.malloc_stack = true;
2156 rb_ec_initialize_vm_stack(th->ec, vm_stack, vm_stack_word_size);
2157 th->sched.context_stack = vm_stack;
2160 int err = native_thread_create0(th->nt);
2161 if (!err) {
2162 // setup
2163 thread_sched_to_ready(TH_SCHED(th), th);
2165 return err;
2168 static void
2169 call_thread_start_func_2(rb_thread_t *th)
2171 /* Capture the address of a local in this stack frame to mark the beginning of the
2172 machine stack for this thread. This is required even if we can tell the real
2173 stack beginning from the pthread API in native_thread_init_stack, because
2174 glibc stores some of its own data on the stack before calling into user code
2175 on a new thread, and replacing that data on fiber-switch would break it (see
2176 bug #13887) */
2177 VALUE stack_start = 0;
2178 VALUE *stack_start_addr = asan_get_real_stack_addr(&stack_start);
2180 native_thread_init_stack(th, stack_start_addr);
2181 thread_start_func_2(th, th->ec->machine.stack_start);
2184 static void *
2185 nt_start(void *ptr)
2187 struct rb_native_thread *nt = (struct rb_native_thread *)ptr;
2188 rb_vm_t *vm = nt->vm;
2190 native_thread_setup_on_thread(nt);
2192 // init tid
2193 #ifdef RB_THREAD_T_HAS_NATIVE_ID
2194 nt->tid = get_native_thread_id();
2195 #endif
2197 #if USE_RUBY_DEBUG_LOG && defined(RUBY_NT_SERIAL)
2198 ruby_nt_serial = nt->serial;
2199 #endif
2201 RUBY_DEBUG_LOG("nt:%u", nt->serial);
2203 if (!nt->dedicated) {
2204 coroutine_initialize_main(nt->nt_context);
2207 while (1) {
2208 if (nt->dedicated) {
2209 // wait running turn
2210 rb_thread_t *th = nt->running_thread;
2211 struct rb_thread_sched *sched = TH_SCHED(th);
2213 RUBY_DEBUG_LOG("on dedicated th:%u", rb_th_serial(th));
2214 ruby_thread_set_native(th);
2216 thread_sched_lock(sched, th);
2218 if (sched->running == th) {
2219 thread_sched_add_running_thread(sched, th);
2221 thread_sched_wait_running_turn(sched, th, false);
2223 thread_sched_unlock(sched, th);
2225 // start threads
2226 call_thread_start_func_2(th);
2227 break; // TODO: allow to change to the SNT
2229 else {
2230 RUBY_DEBUG_LOG("check next");
2231 rb_ractor_t *r = ractor_sched_deq(vm, NULL);
2233 if (r) {
2234 struct rb_thread_sched *sched = &r->threads.sched;
2236 thread_sched_lock(sched, NULL);
2238 rb_thread_t *next_th = sched->running;
2240 if (next_th && next_th->nt == NULL) {
2241 RUBY_DEBUG_LOG("nt:%d next_th:%d", (int)nt->serial, (int)next_th->serial);
2242 thread_sched_switch0(nt->nt_context, next_th, nt, false);
2244 else {
2245 RUBY_DEBUG_LOG("no schedulable threads -- next_th:%p", next_th);
2248 thread_sched_unlock(sched, NULL);
2250 else {
2251 // timeout -> deleted.
2252 break;
2255 if (nt->dedicated) {
2256 // SNT becomes DNT while running
2257 break;
2262 return NULL;
2265 static int native_thread_create_shared(rb_thread_t *th);
2267 #if USE_MN_THREADS
2268 static void nt_free_stack(void *mstack);
2269 #endif
2271 void
2272 rb_threadptr_remove(rb_thread_t *th)
2274 #if USE_MN_THREADS
2275 if (th->sched.malloc_stack) {
2276 // dedicated
2277 return;
2279 else {
2280 rb_vm_t *vm = th->vm;
2281 th->sched.finished = false;
2283 RB_VM_LOCK_ENTER();
2285 ccan_list_add(&vm->ractor.sched.zombie_threads, &th->sched.node.zombie_threads);
2287 RB_VM_LOCK_LEAVE();
2289 #endif
2292 void
2293 rb_threadptr_sched_free(rb_thread_t *th)
2295 #if USE_MN_THREADS
2296 if (th->sched.malloc_stack) {
2297 // has dedicated
2298 ruby_xfree(th->sched.context_stack);
2299 native_thread_destroy(th->nt);
2301 else {
2302 nt_free_stack(th->sched.context_stack);
2303 // TODO: how to free nt and nt->altstack?
2306 ruby_xfree(th->sched.context);
2307 th->sched.context = NULL;
2308 // VM_ASSERT(th->sched.context == NULL);
2309 #else
2310 ruby_xfree(th->sched.context_stack);
2311 native_thread_destroy(th->nt);
2312 #endif
2314 th->nt = NULL;
2317 void
2318 rb_thread_sched_mark_zombies(rb_vm_t *vm)
2320 if (!ccan_list_empty(&vm->ractor.sched.zombie_threads)) {
2321 rb_thread_t *zombie_th, *next_zombie_th;
2322 ccan_list_for_each_safe(&vm->ractor.sched.zombie_threads, zombie_th, next_zombie_th, sched.node.zombie_threads) {
2323 if (zombie_th->sched.finished) {
2324 ccan_list_del_init(&zombie_th->sched.node.zombie_threads);
2326 else {
2327 rb_gc_mark(zombie_th->self);
2333 static int
2334 native_thread_create(rb_thread_t *th)
2336 VM_ASSERT(th->nt == 0);
2337 RUBY_DEBUG_LOG("th:%d has_dnt:%d", th->serial, th->has_dedicated_nt);
2338 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_STARTED, th);
2340 if (!th->ractor->threads.sched.enable_mn_threads) {
2341 th->has_dedicated_nt = 1;
2344 if (th->has_dedicated_nt) {
2345 return native_thread_create_dedicated(th);
2347 else {
2348 return native_thread_create_shared(th);
2352 #if USE_NATIVE_THREAD_PRIORITY
2354 static void
2355 native_thread_apply_priority(rb_thread_t *th)
2357 #if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0)
2358 struct sched_param sp;
2359 int policy;
2360 int priority = 0 - th->priority;
2361 int max, min;
2362 pthread_getschedparam(th->nt->thread_id, &policy, &sp);
2363 max = sched_get_priority_max(policy);
2364 min = sched_get_priority_min(policy);
2366 if (min > priority) {
2367 priority = min;
2369 else if (max < priority) {
2370 priority = max;
2373 sp.sched_priority = priority;
2374 pthread_setschedparam(th->nt->thread_id, policy, &sp);
2375 #else
2376 /* not touched */
2377 #endif
2380 #endif /* USE_NATIVE_THREAD_PRIORITY */
2382 static int
2383 native_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout, rb_thread_t *th)
2385 return rb_fd_select(n, readfds, writefds, exceptfds, timeout);
2388 static void
2389 ubf_pthread_cond_signal(void *ptr)
2391 rb_thread_t *th = (rb_thread_t *)ptr;
2392 RUBY_DEBUG_LOG("th:%u on nt:%d", rb_th_serial(th), (int)th->nt->serial);
2393 rb_native_cond_signal(&th->nt->cond.intr);
2396 static void
2397 native_cond_sleep(rb_thread_t *th, rb_hrtime_t *rel)
2399 rb_nativethread_lock_t *lock = &th->interrupt_lock;
2400 rb_nativethread_cond_t *cond = &th->nt->cond.intr;
2402 /* Solaris cond_timedwait() return EINVAL if an argument is greater than
2403 * current_time + 100,000,000. So cut up to 100,000,000. This is
2404 * considered as a kind of spurious wakeup. The caller to native_sleep
2405 * should care about spurious wakeup.
2407 * See also [Bug #1341] [ruby-core:29702]
2408 * http://download.oracle.com/docs/cd/E19683-01/816-0216/6m6ngupgv/index.html
2410 const rb_hrtime_t max = (rb_hrtime_t)100000000 * RB_HRTIME_PER_SEC;
2412 THREAD_BLOCKING_BEGIN(th);
2414 rb_native_mutex_lock(lock);
2415 th->unblock.func = ubf_pthread_cond_signal;
2416 th->unblock.arg = th;
2418 if (RUBY_VM_INTERRUPTED(th->ec)) {
2419 /* interrupted. return immediate */
2420 RUBY_DEBUG_LOG("interrupted before sleep th:%u", rb_th_serial(th));
2422 else {
2423 if (!rel) {
2424 rb_native_cond_wait(cond, lock);
2426 else {
2427 rb_hrtime_t end;
2429 if (*rel > max) {
2430 *rel = max;
2433 end = native_cond_timeout(cond, *rel);
2434 native_cond_timedwait(cond, lock, &end);
2437 th->unblock.func = 0;
2439 rb_native_mutex_unlock(lock);
2441 THREAD_BLOCKING_END(th);
2443 RUBY_DEBUG_LOG("done th:%u", rb_th_serial(th));
2446 #ifdef USE_UBF_LIST
2447 static CCAN_LIST_HEAD(ubf_list_head);
2448 static rb_nativethread_lock_t ubf_list_lock = RB_NATIVETHREAD_LOCK_INIT;
2450 static void
2451 ubf_list_atfork(void)
2453 ccan_list_head_init(&ubf_list_head);
2454 rb_native_mutex_initialize(&ubf_list_lock);
2457 RBIMPL_ATTR_MAYBE_UNUSED()
2458 static bool
2459 ubf_list_contain_p(rb_thread_t *th)
2461 rb_thread_t *list_th;
2462 ccan_list_for_each(&ubf_list_head, list_th, sched.node.ubf) {
2463 if (list_th == th) return true;
2465 return false;
2468 /* The thread 'th' is registered to be trying unblock. */
2469 static void
2470 register_ubf_list(rb_thread_t *th)
2472 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
2473 struct ccan_list_node *node = &th->sched.node.ubf;
2475 VM_ASSERT(th->unblock.func != NULL);
2477 rb_native_mutex_lock(&ubf_list_lock);
2479 // check not connected yet
2480 if (ccan_list_empty((struct ccan_list_head*)node)) {
2481 VM_ASSERT(!ubf_list_contain_p(th));
2482 ccan_list_add(&ubf_list_head, node);
2485 rb_native_mutex_unlock(&ubf_list_lock);
2487 timer_thread_wakeup();
2490 /* The thread 'th' is unblocked. It no longer need to be registered. */
2491 static void
2492 unregister_ubf_list(rb_thread_t *th)
2494 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
2495 struct ccan_list_node *node = &th->sched.node.ubf;
2497 /* we can't allow re-entry into ubf_list_head */
2498 VM_ASSERT(th->unblock.func == NULL);
2500 if (!ccan_list_empty((struct ccan_list_head*)node)) {
2501 rb_native_mutex_lock(&ubf_list_lock);
2503 VM_ASSERT(ubf_list_contain_p(th));
2504 ccan_list_del_init(node);
2506 rb_native_mutex_unlock(&ubf_list_lock);
2511 * send a signal to intent that a target thread return from blocking syscall.
2512 * Maybe any signal is ok, but we chose SIGVTALRM.
2514 static void
2515 ubf_wakeup_thread(rb_thread_t *th)
2517 RUBY_DEBUG_LOG("th:%u thread_id:%p", rb_th_serial(th), (void *)th->nt->thread_id);
2519 pthread_kill(th->nt->thread_id, SIGVTALRM);
2522 static void
2523 ubf_select(void *ptr)
2525 rb_thread_t *th = (rb_thread_t *)ptr;
2526 RUBY_DEBUG_LOG("wakeup th:%u", rb_th_serial(th));
2527 ubf_wakeup_thread(th);
2528 register_ubf_list(th);
2531 static bool
2532 ubf_threads_empty(void)
2534 return ccan_list_empty(&ubf_list_head) != 0;
2537 static void
2538 ubf_wakeup_all_threads(void)
2540 if (!ubf_threads_empty()) {
2541 rb_thread_t *th;
2542 rb_native_mutex_lock(&ubf_list_lock);
2544 ccan_list_for_each(&ubf_list_head, th, sched.node.ubf) {
2545 ubf_wakeup_thread(th);
2548 rb_native_mutex_unlock(&ubf_list_lock);
2552 #else /* USE_UBF_LIST */
2553 #define register_ubf_list(th) (void)(th)
2554 #define unregister_ubf_list(th) (void)(th)
2555 #define ubf_select 0
2556 static void ubf_wakeup_all_threads(void) { return; }
2557 static bool ubf_threads_empty(void) { return true; }
2558 #define ubf_list_atfork() do {} while (0)
2559 #endif /* USE_UBF_LIST */
2561 #define TT_DEBUG 0
2562 #define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0)
2564 void
2565 rb_thread_wakeup_timer_thread(int sig)
2567 // This function can be called from signal handlers so that
2568 // pthread_mutex_lock() should not be used.
2570 // wakeup timer thread
2571 timer_thread_wakeup_force();
2573 // interrupt main thread if main thread is available
2574 if (system_working) {
2575 rb_vm_t *vm = GET_VM();
2576 rb_thread_t *main_th = vm->ractor.main_thread;
2578 if (main_th) {
2579 volatile rb_execution_context_t *main_th_ec = ACCESS_ONCE(rb_execution_context_t *, main_th->ec);
2581 if (main_th_ec) {
2582 RUBY_VM_SET_TRAP_INTERRUPT(main_th_ec);
2584 if (vm->ubf_async_safe && main_th->unblock.func) {
2585 (main_th->unblock.func)(main_th->unblock.arg);
2592 #define CLOSE_INVALIDATE_PAIR(expr) \
2593 close_invalidate_pair(expr,"close_invalidate: "#expr)
2594 static void
2595 close_invalidate(int *fdp, const char *msg)
2597 int fd = *fdp;
2599 *fdp = -1;
2600 if (close(fd) < 0) {
2601 async_bug_fd(msg, errno, fd);
2605 static void
2606 close_invalidate_pair(int fds[2], const char *msg)
2608 if (USE_EVENTFD && fds[0] == fds[1]) {
2609 fds[1] = -1; // disable write port first
2610 close_invalidate(&fds[0], msg);
2612 else {
2613 close_invalidate(&fds[1], msg);
2614 close_invalidate(&fds[0], msg);
2618 static void
2619 set_nonblock(int fd)
2621 int oflags;
2622 int err;
2624 oflags = fcntl(fd, F_GETFL);
2625 if (oflags == -1)
2626 rb_sys_fail(0);
2627 oflags |= O_NONBLOCK;
2628 err = fcntl(fd, F_SETFL, oflags);
2629 if (err == -1)
2630 rb_sys_fail(0);
2633 /* communication pipe with timer thread and signal handler */
2634 static void
2635 setup_communication_pipe_internal(int pipes[2])
2637 int err;
2639 if (pipes[0] > 0 || pipes[1] > 0) {
2640 VM_ASSERT(pipes[0] > 0);
2641 VM_ASSERT(pipes[1] > 0);
2642 return;
2646 * Don't bother with eventfd on ancient Linux 2.6.22..2.6.26 which were
2647 * missing EFD_* flags, they can fall back to pipe
2649 #if USE_EVENTFD && defined(EFD_NONBLOCK) && defined(EFD_CLOEXEC)
2650 pipes[0] = pipes[1] = eventfd(0, EFD_NONBLOCK|EFD_CLOEXEC);
2652 if (pipes[0] >= 0) {
2653 rb_update_max_fd(pipes[0]);
2654 return;
2656 #endif
2658 err = rb_cloexec_pipe(pipes);
2659 if (err != 0) {
2660 rb_bug("can not create communication pipe");
2662 rb_update_max_fd(pipes[0]);
2663 rb_update_max_fd(pipes[1]);
2664 set_nonblock(pipes[0]);
2665 set_nonblock(pipes[1]);
2668 #if !defined(SET_CURRENT_THREAD_NAME) && defined(__linux__) && defined(PR_SET_NAME)
2669 # define SET_CURRENT_THREAD_NAME(name) prctl(PR_SET_NAME, name)
2670 #endif
2672 enum {
2673 THREAD_NAME_MAX =
2674 #if defined(__linux__)
2676 #elif defined(__APPLE__)
2677 /* Undocumented, and main thread seems unlimited */
2679 #else
2681 #endif
2684 static VALUE threadptr_invoke_proc_location(rb_thread_t *th);
2686 static void
2687 native_set_thread_name(rb_thread_t *th)
2689 #ifdef SET_CURRENT_THREAD_NAME
2690 VALUE loc;
2691 if (!NIL_P(loc = th->name)) {
2692 SET_CURRENT_THREAD_NAME(RSTRING_PTR(loc));
2694 else if ((loc = threadptr_invoke_proc_location(th)) != Qnil) {
2695 char *name, *p;
2696 char buf[THREAD_NAME_MAX];
2697 size_t len;
2698 int n;
2700 name = RSTRING_PTR(RARRAY_AREF(loc, 0));
2701 p = strrchr(name, '/'); /* show only the basename of the path. */
2702 if (p && p[1])
2703 name = p + 1;
2705 n = snprintf(buf, sizeof(buf), "%s:%d", name, NUM2INT(RARRAY_AREF(loc, 1)));
2706 RB_GC_GUARD(loc);
2708 len = (size_t)n;
2709 if (len >= sizeof(buf)) {
2710 buf[sizeof(buf)-2] = '*';
2711 buf[sizeof(buf)-1] = '\0';
2713 SET_CURRENT_THREAD_NAME(buf);
2715 #endif
2718 static void
2719 native_set_another_thread_name(rb_nativethread_id_t thread_id, VALUE name)
2721 #if defined SET_ANOTHER_THREAD_NAME || defined SET_CURRENT_THREAD_NAME
2722 char buf[THREAD_NAME_MAX];
2723 const char *s = "";
2724 # if !defined SET_ANOTHER_THREAD_NAME
2725 if (!pthread_equal(pthread_self(), thread_id)) return;
2726 # endif
2727 if (!NIL_P(name)) {
2728 long n;
2729 RSTRING_GETMEM(name, s, n);
2730 if (n >= (int)sizeof(buf)) {
2731 memcpy(buf, s, sizeof(buf)-1);
2732 buf[sizeof(buf)-1] = '\0';
2733 s = buf;
2736 # if defined SET_ANOTHER_THREAD_NAME
2737 SET_ANOTHER_THREAD_NAME(thread_id, s);
2738 # elif defined SET_CURRENT_THREAD_NAME
2739 SET_CURRENT_THREAD_NAME(s);
2740 # endif
2741 #endif
2744 #if defined(RB_THREAD_T_HAS_NATIVE_ID) || defined(__APPLE__)
2745 static VALUE
2746 native_thread_native_thread_id(rb_thread_t *target_th)
2748 if (!target_th->nt) return Qnil;
2750 #ifdef RB_THREAD_T_HAS_NATIVE_ID
2751 int tid = target_th->nt->tid;
2752 if (tid == 0) return Qnil;
2753 return INT2FIX(tid);
2754 #elif defined(__APPLE__)
2755 uint64_t tid;
2756 /* The first condition is needed because MAC_OS_X_VERSION_10_6
2757 is not defined on 10.5, and while __POWERPC__ takes care of ppc/ppc64,
2758 i386 will be broken without this. Note, 10.5 is supported with GCC upstream,
2759 so it has C++17 and everything needed to build modern Ruby. */
2760 # if (!defined(MAC_OS_X_VERSION_10_6) || \
2761 (MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_6) || \
2762 defined(__POWERPC__) /* never defined for PowerPC platforms */)
2763 const bool no_pthread_threadid_np = true;
2764 # define NO_PTHREAD_MACH_THREAD_NP 1
2765 # elif MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_6
2766 const bool no_pthread_threadid_np = false;
2767 # else
2768 # if !(defined(__has_attribute) && __has_attribute(availability))
2769 /* __API_AVAILABLE macro does nothing on gcc */
2770 __attribute__((weak)) int pthread_threadid_np(pthread_t, uint64_t*);
2771 # endif
2772 /* Check weakly linked symbol */
2773 const bool no_pthread_threadid_np = !&pthread_threadid_np;
2774 # endif
2775 if (no_pthread_threadid_np) {
2776 return ULL2NUM(pthread_mach_thread_np(pthread_self()));
2778 # ifndef NO_PTHREAD_MACH_THREAD_NP
2779 int e = pthread_threadid_np(target_th->nt->thread_id, &tid);
2780 if (e != 0) rb_syserr_fail(e, "pthread_threadid_np");
2781 return ULL2NUM((unsigned long long)tid);
2782 # endif
2783 #endif
2785 # define USE_NATIVE_THREAD_NATIVE_THREAD_ID 1
2786 #else
2787 # define USE_NATIVE_THREAD_NATIVE_THREAD_ID 0
2788 #endif
2790 static struct {
2791 rb_serial_t created_fork_gen;
2792 pthread_t pthread_id;
2794 int comm_fds[2]; // r, w
2796 #if (HAVE_SYS_EPOLL_H || HAVE_SYS_EVENT_H) && USE_MN_THREADS
2797 int event_fd; // kernel event queue fd (epoll/kqueue)
2798 #endif
2799 #if HAVE_SYS_EPOLL_H && USE_MN_THREADS
2800 #define EPOLL_EVENTS_MAX 0x10
2801 struct epoll_event finished_events[EPOLL_EVENTS_MAX];
2802 #elif HAVE_SYS_EVENT_H && USE_MN_THREADS
2803 #define KQUEUE_EVENTS_MAX 0x10
2804 struct kevent finished_events[KQUEUE_EVENTS_MAX];
2805 #endif
2807 // waiting threads list
2808 struct ccan_list_head waiting; // waiting threads in ractors
2809 pthread_mutex_t waiting_lock;
2810 } timer_th = {
2811 .created_fork_gen = 0,
2814 #define TIMER_THREAD_CREATED_P() (timer_th.created_fork_gen == current_fork_gen)
2816 static void timer_thread_check_timeslice(rb_vm_t *vm);
2817 static int timer_thread_set_timeout(rb_vm_t *vm);
2818 static void timer_thread_wakeup_thread(rb_thread_t *th);
2820 #include "thread_pthread_mn.c"
2822 static rb_thread_t *
2823 thread_sched_waiting_thread(struct rb_thread_sched_waiting *w)
2825 if (w) {
2826 return (rb_thread_t *)((size_t)w - offsetof(rb_thread_t, sched.waiting_reason));
2828 else {
2829 return NULL;
2833 static int
2834 timer_thread_set_timeout(rb_vm_t *vm)
2836 #if 0
2837 return 10; // ms
2838 #else
2839 int timeout = -1;
2841 ractor_sched_lock(vm, NULL);
2843 if ( !ccan_list_empty(&vm->ractor.sched.timeslice_threads) // (1-1) Provide time slice for active NTs
2844 || !ubf_threads_empty() // (1-3) Periodic UBF
2845 || vm->ractor.sched.grq_cnt > 0 // (1-4) Lazy GRQ deq start
2848 RUBY_DEBUG_LOG("timeslice:%d ubf:%d grq:%d",
2849 !ccan_list_empty(&vm->ractor.sched.timeslice_threads),
2850 !ubf_threads_empty(),
2851 (vm->ractor.sched.grq_cnt > 0));
2853 timeout = 10; // ms
2854 vm->ractor.sched.timeslice_wait_inf = false;
2856 else {
2857 vm->ractor.sched.timeslice_wait_inf = true;
2860 ractor_sched_unlock(vm, NULL);
2862 if (vm->ractor.sched.timeslice_wait_inf) {
2863 rb_native_mutex_lock(&timer_th.waiting_lock);
2865 struct rb_thread_sched_waiting *w = ccan_list_top(&timer_th.waiting, struct rb_thread_sched_waiting, node);
2866 rb_thread_t *th = thread_sched_waiting_thread(w);
2868 if (th && (th->sched.waiting_reason.flags & thread_sched_waiting_timeout)) {
2869 rb_hrtime_t now = rb_hrtime_now();
2870 rb_hrtime_t hrrel = rb_hrtime_sub(th->sched.waiting_reason.data.timeout, now);
2872 RUBY_DEBUG_LOG("th:%u now:%lu rel:%lu", rb_th_serial(th), (unsigned long)now, (unsigned long)hrrel);
2874 // TODO: overflow?
2875 timeout = (int)((hrrel + RB_HRTIME_PER_MSEC - 1) / RB_HRTIME_PER_MSEC); // ms
2878 rb_native_mutex_unlock(&timer_th.waiting_lock);
2881 RUBY_DEBUG_LOG("timeout:%d inf:%d", timeout, (int)vm->ractor.sched.timeslice_wait_inf);
2883 // fprintf(stderr, "timeout:%d\n", timeout);
2884 return timeout;
2885 #endif
2888 static void
2889 timer_thread_check_signal(rb_vm_t *vm)
2891 // ruby_sigchld_handler(vm); TODO
2893 int signum = rb_signal_buff_size();
2894 if (UNLIKELY(signum > 0) && vm->ractor.main_thread) {
2895 RUBY_DEBUG_LOG("signum:%d", signum);
2896 threadptr_trap_interrupt(vm->ractor.main_thread);
2900 static bool
2901 timer_thread_check_exceed(rb_hrtime_t abs, rb_hrtime_t now)
2903 if (abs < now) {
2904 return true;
2906 else if (abs - now < RB_HRTIME_PER_MSEC) {
2907 return true; // too short time
2909 else {
2910 return false;
2914 static rb_thread_t *
2915 timer_thread_deq_wakeup(rb_vm_t *vm, rb_hrtime_t now)
2917 struct rb_thread_sched_waiting *w = ccan_list_top(&timer_th.waiting, struct rb_thread_sched_waiting, node);
2919 if (w != NULL &&
2920 (w->flags & thread_sched_waiting_timeout) &&
2921 timer_thread_check_exceed(w->data.timeout, now)) {
2923 RUBY_DEBUG_LOG("wakeup th:%u", rb_th_serial(thread_sched_waiting_thread(w)));
2925 // delete from waiting list
2926 ccan_list_del_init(&w->node);
2928 // setup result
2929 w->flags = thread_sched_waiting_none;
2930 w->data.result = 0;
2932 return thread_sched_waiting_thread(w);
2935 return NULL;
2938 static void
2939 timer_thread_wakeup_thread(rb_thread_t *th)
2941 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
2942 struct rb_thread_sched *sched = TH_SCHED(th);
2944 thread_sched_lock(sched, th);
2946 if (sched->running != th) {
2947 thread_sched_to_ready_common(sched, th, true, false);
2949 else {
2950 // will be release the execution right
2953 thread_sched_unlock(sched, th);
2956 static void
2957 timer_thread_check_timeout(rb_vm_t *vm)
2959 rb_hrtime_t now = rb_hrtime_now();
2960 rb_thread_t *th;
2962 rb_native_mutex_lock(&timer_th.waiting_lock);
2964 while ((th = timer_thread_deq_wakeup(vm, now)) != NULL) {
2965 timer_thread_wakeup_thread(th);
2968 rb_native_mutex_unlock(&timer_th.waiting_lock);
2971 static void
2972 timer_thread_check_timeslice(rb_vm_t *vm)
2974 // TODO: check time
2975 rb_thread_t *th;
2976 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, th, sched.node.timeslice_threads) {
2977 RUBY_DEBUG_LOG("timeslice th:%u", rb_th_serial(th));
2978 RUBY_VM_SET_TIMER_INTERRUPT(th->ec);
2982 void
2983 rb_assert_sig(void)
2985 sigset_t oldmask;
2986 pthread_sigmask(0, NULL, &oldmask);
2987 if (sigismember(&oldmask, SIGVTALRM)) {
2988 rb_bug("!!!");
2990 else {
2991 RUBY_DEBUG_LOG("ok");
2995 static void *
2996 timer_thread_func(void *ptr)
2998 rb_vm_t *vm = (rb_vm_t *)ptr;
2999 #if defined(RUBY_NT_SERIAL)
3000 ruby_nt_serial = (rb_atomic_t)-1;
3001 #endif
3003 RUBY_DEBUG_LOG("started%s", "");
3005 while (system_working) {
3006 timer_thread_check_signal(vm);
3007 timer_thread_check_timeout(vm);
3008 ubf_wakeup_all_threads();
3010 RUBY_DEBUG_LOG("system_working:%d", system_working);
3011 timer_thread_polling(vm);
3014 RUBY_DEBUG_LOG("terminated");
3015 return NULL;
3018 /* only use signal-safe system calls here */
3019 static void
3020 signal_communication_pipe(int fd)
3022 #if USE_EVENTFD
3023 const uint64_t buff = 1;
3024 #else
3025 const char buff = '!';
3026 #endif
3027 ssize_t result;
3029 /* already opened */
3030 if (fd >= 0) {
3031 retry:
3032 if ((result = write(fd, &buff, sizeof(buff))) <= 0) {
3033 int e = errno;
3034 switch (e) {
3035 case EINTR: goto retry;
3036 case EAGAIN:
3037 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
3038 case EWOULDBLOCK:
3039 #endif
3040 break;
3041 default:
3042 async_bug_fd("rb_thread_wakeup_timer_thread: write", e, fd);
3045 if (TT_DEBUG) WRITE_CONST(2, "rb_thread_wakeup_timer_thread: write\n");
3047 else {
3048 // ignore wakeup
3052 static void
3053 timer_thread_wakeup_force(void)
3055 // should not use RUBY_DEBUG_LOG() because it can be called within signal handlers.
3056 signal_communication_pipe(timer_th.comm_fds[1]);
3059 static void
3060 timer_thread_wakeup_locked(rb_vm_t *vm)
3062 // should be locked before.
3063 ASSERT_ractor_sched_locked(vm, NULL);
3065 if (timer_th.created_fork_gen == current_fork_gen) {
3066 if (vm->ractor.sched.timeslice_wait_inf) {
3067 RUBY_DEBUG_LOG("wakeup with fd:%d", timer_th.comm_fds[1]);
3068 timer_thread_wakeup_force();
3070 else {
3071 RUBY_DEBUG_LOG("will be wakeup...");
3076 static void
3077 timer_thread_wakeup(void)
3079 rb_vm_t *vm = GET_VM();
3081 ractor_sched_lock(vm, NULL);
3083 timer_thread_wakeup_locked(vm);
3085 ractor_sched_unlock(vm, NULL);
3088 static void
3089 rb_thread_create_timer_thread(void)
3091 rb_serial_t created_fork_gen = timer_th.created_fork_gen;
3093 RUBY_DEBUG_LOG("fork_gen create:%d current:%d", (int)created_fork_gen, (int)current_fork_gen);
3095 timer_th.created_fork_gen = current_fork_gen;
3097 if (created_fork_gen != current_fork_gen) {
3098 if (created_fork_gen != 0) {
3099 RUBY_DEBUG_LOG("forked child process");
3101 CLOSE_INVALIDATE_PAIR(timer_th.comm_fds);
3102 #if HAVE_SYS_EPOLL_H && USE_MN_THREADS
3103 close_invalidate(&timer_th.event_fd, "close event_fd");
3104 #endif
3105 rb_native_mutex_destroy(&timer_th.waiting_lock);
3108 ccan_list_head_init(&timer_th.waiting);
3109 rb_native_mutex_initialize(&timer_th.waiting_lock);
3111 // open communication channel
3112 setup_communication_pipe_internal(timer_th.comm_fds);
3114 // open event fd
3115 timer_thread_setup_mn();
3118 pthread_create(&timer_th.pthread_id, NULL, timer_thread_func, GET_VM());
3121 static int
3122 native_stop_timer_thread(void)
3124 int stopped;
3125 stopped = --system_working <= 0;
3127 if (stopped) {
3128 RUBY_DEBUG_LOG("wakeup send %d", timer_th.comm_fds[1]);
3129 timer_thread_wakeup_force();
3130 RUBY_DEBUG_LOG("wakeup sent");
3131 pthread_join(timer_th.pthread_id, NULL);
3134 if (TT_DEBUG) fprintf(stderr, "stop timer thread\n");
3135 return stopped;
3138 static void
3139 native_reset_timer_thread(void)
3144 #ifdef HAVE_SIGALTSTACK
3146 ruby_stack_overflowed_p(const rb_thread_t *th, const void *addr)
3148 void *base;
3149 size_t size;
3150 const size_t water_mark = 1024 * 1024;
3151 STACK_GROW_DIR_DETECTION;
3153 if (th) {
3154 size = th->ec->machine.stack_maxsize;
3155 base = (char *)th->ec->machine.stack_start - STACK_DIR_UPPER(0, size);
3157 #ifdef STACKADDR_AVAILABLE
3158 else if (get_stack(&base, &size) == 0) {
3159 # ifdef __APPLE__
3160 if (pthread_equal(th->nt->thread_id, native_main_thread.id)) {
3161 struct rlimit rlim;
3162 if (getrlimit(RLIMIT_STACK, &rlim) == 0 && rlim.rlim_cur > size) {
3163 size = (size_t)rlim.rlim_cur;
3166 # endif
3167 base = (char *)base + STACK_DIR_UPPER(+size, -size);
3169 #endif
3170 else {
3171 return 0;
3174 size /= RUBY_STACK_SPACE_RATIO;
3175 if (size > water_mark) size = water_mark;
3176 if (IS_STACK_DIR_UPPER()) {
3177 if (size > ~(size_t)base+1) size = ~(size_t)base+1;
3178 if (addr > base && addr <= (void *)((char *)base + size)) return 1;
3180 else {
3181 if (size > (size_t)base) size = (size_t)base;
3182 if (addr > (void *)((char *)base - size) && addr <= base) return 1;
3184 return 0;
3186 #endif
3189 rb_reserved_fd_p(int fd)
3191 /* no false-positive if out-of-FD at startup */
3192 if (fd < 0) return 0;
3194 if (fd == timer_th.comm_fds[0] ||
3195 fd == timer_th.comm_fds[1]
3196 #if (HAVE_SYS_EPOLL_H || HAVE_SYS_EVENT_H) && USE_MN_THREADS
3197 || fd == timer_th.event_fd
3198 #endif
3200 goto check_fork_gen;
3202 return 0;
3204 check_fork_gen:
3205 if (timer_th.created_fork_gen == current_fork_gen) {
3206 /* async-signal-safe */
3207 return 1;
3209 else {
3210 return 0;
3214 rb_nativethread_id_t
3215 rb_nativethread_self(void)
3217 return pthread_self();
3220 #if defined(USE_POLL) && !defined(HAVE_PPOLL)
3221 /* TODO: don't ignore sigmask */
3222 static int
3223 ruby_ppoll(struct pollfd *fds, nfds_t nfds,
3224 const struct timespec *ts, const sigset_t *sigmask)
3226 int timeout_ms;
3228 if (ts) {
3229 int tmp, tmp2;
3231 if (ts->tv_sec > INT_MAX/1000)
3232 timeout_ms = INT_MAX;
3233 else {
3234 tmp = (int)(ts->tv_sec * 1000);
3235 /* round up 1ns to 1ms to avoid excessive wakeups for <1ms sleep */
3236 tmp2 = (int)((ts->tv_nsec + 999999L) / (1000L * 1000L));
3237 if (INT_MAX - tmp < tmp2)
3238 timeout_ms = INT_MAX;
3239 else
3240 timeout_ms = (int)(tmp + tmp2);
3243 else
3244 timeout_ms = -1;
3246 return poll(fds, nfds, timeout_ms);
3248 # define ppoll(fds,nfds,ts,sigmask) ruby_ppoll((fds),(nfds),(ts),(sigmask))
3249 #endif
3252 * Single CPU setups benefit from explicit sched_yield() before ppoll(),
3253 * since threads may be too starved to enter the GVL waitqueue for
3254 * us to detect contention. Instead, we want to kick other threads
3255 * so they can run and possibly prevent us from entering slow paths
3256 * in ppoll() or similar syscalls.
3258 * Confirmed on FreeBSD 11.2 and Linux 4.19.
3259 * [ruby-core:90417] [Bug #15398]
3261 #define THREAD_BLOCKING_YIELD(th) do { \
3262 const rb_thread_t *next_th; \
3263 struct rb_thread_sched *sched = TH_SCHED(th); \
3264 RB_VM_SAVE_MACHINE_CONTEXT(th); \
3265 thread_sched_to_waiting(sched, (th)); \
3266 next_th = sched->running; \
3267 rb_native_mutex_unlock(&sched->lock_); \
3268 native_thread_yield(); /* TODO: needed? */ \
3269 if (!next_th && rb_ractor_living_thread_num(th->ractor) > 1) { \
3270 native_thread_yield(); \
3273 static void
3274 native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
3276 struct rb_thread_sched *sched = TH_SCHED(th);
3278 RUBY_DEBUG_LOG("rel:%d", rel ? (int)*rel : 0);
3279 if (rel) {
3280 if (th_has_dedicated_nt(th)) {
3281 native_cond_sleep(th, rel);
3283 else {
3284 thread_sched_wait_events(sched, th, -1, thread_sched_waiting_timeout, rel);
3287 else {
3288 thread_sched_to_waiting_until_wakeup(sched, th);
3291 RUBY_DEBUG_LOG("wakeup");
3294 // fork read-write lock (only for pthread)
3295 static pthread_rwlock_t rb_thread_fork_rw_lock = PTHREAD_RWLOCK_INITIALIZER;
3297 void
3298 rb_thread_release_fork_lock(void)
3300 int r;
3301 if ((r = pthread_rwlock_unlock(&rb_thread_fork_rw_lock))) {
3302 rb_bug_errno("pthread_rwlock_unlock", r);
3306 void
3307 rb_thread_reset_fork_lock(void)
3309 int r;
3310 if ((r = pthread_rwlock_destroy(&rb_thread_fork_rw_lock))) {
3311 rb_bug_errno("pthread_rwlock_destroy", r);
3314 if ((r = pthread_rwlock_init(&rb_thread_fork_rw_lock, NULL))) {
3315 rb_bug_errno("pthread_rwlock_init", r);
3319 void *
3320 rb_thread_prevent_fork(void *(*func)(void *), void *data)
3322 int r;
3323 if ((r = pthread_rwlock_rdlock(&rb_thread_fork_rw_lock))) {
3324 rb_bug_errno("pthread_rwlock_rdlock", r);
3326 void *result = func(data);
3327 rb_thread_release_fork_lock();
3328 return result;
3331 void
3332 rb_thread_acquire_fork_lock(void)
3334 int r;
3335 if ((r = pthread_rwlock_wrlock(&rb_thread_fork_rw_lock))) {
3336 rb_bug_errno("pthread_rwlock_wrlock", r);
3340 // thread internal event hooks (only for pthread)
3342 struct rb_internal_thread_event_hook {
3343 rb_internal_thread_event_callback callback;
3344 rb_event_flag_t event;
3345 void *user_data;
3347 struct rb_internal_thread_event_hook *next;
3350 static pthread_rwlock_t rb_internal_thread_event_hooks_rw_lock = PTHREAD_RWLOCK_INITIALIZER;
3352 rb_internal_thread_event_hook_t *
3353 rb_internal_thread_add_event_hook(rb_internal_thread_event_callback callback, rb_event_flag_t internal_event, void *user_data)
3355 rb_internal_thread_event_hook_t *hook = ALLOC_N(rb_internal_thread_event_hook_t, 1);
3356 hook->callback = callback;
3357 hook->user_data = user_data;
3358 hook->event = internal_event;
3360 int r;
3361 if ((r = pthread_rwlock_wrlock(&rb_internal_thread_event_hooks_rw_lock))) {
3362 rb_bug_errno("pthread_rwlock_wrlock", r);
3365 hook->next = rb_internal_thread_event_hooks;
3366 ATOMIC_PTR_EXCHANGE(rb_internal_thread_event_hooks, hook);
3368 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3369 rb_bug_errno("pthread_rwlock_unlock", r);
3371 return hook;
3374 bool
3375 rb_internal_thread_remove_event_hook(rb_internal_thread_event_hook_t * hook)
3377 int r;
3378 if ((r = pthread_rwlock_wrlock(&rb_internal_thread_event_hooks_rw_lock))) {
3379 rb_bug_errno("pthread_rwlock_wrlock", r);
3382 bool success = FALSE;
3384 if (rb_internal_thread_event_hooks == hook) {
3385 ATOMIC_PTR_EXCHANGE(rb_internal_thread_event_hooks, hook->next);
3386 success = TRUE;
3388 else {
3389 rb_internal_thread_event_hook_t *h = rb_internal_thread_event_hooks;
3391 do {
3392 if (h->next == hook) {
3393 h->next = hook->next;
3394 success = TRUE;
3395 break;
3397 } while ((h = h->next));
3400 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3401 rb_bug_errno("pthread_rwlock_unlock", r);
3404 if (success) {
3405 ruby_xfree(hook);
3407 return success;
3410 static void
3411 rb_thread_execute_hooks(rb_event_flag_t event, rb_thread_t *th)
3413 int r;
3414 if ((r = pthread_rwlock_rdlock(&rb_internal_thread_event_hooks_rw_lock))) {
3415 rb_bug_errno("pthread_rwlock_rdlock", r);
3418 if (rb_internal_thread_event_hooks) {
3419 rb_internal_thread_event_hook_t *h = rb_internal_thread_event_hooks;
3420 do {
3421 if (h->event & event) {
3422 rb_internal_thread_event_data_t event_data = {
3423 .thread = th->self,
3425 (*h->callback)(event, &event_data, h->user_data);
3427 } while((h = h->next));
3429 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3430 rb_bug_errno("pthread_rwlock_unlock", r);
3434 // return true if the current thread acquires DNT.
3435 // return false if the current thread already acquires DNT.
3436 bool
3437 rb_thread_lock_native_thread(void)
3439 rb_thread_t *th = GET_THREAD();
3440 bool is_snt = th->nt->dedicated == 0;
3441 native_thread_dedicated_inc(th->vm, th->ractor, th->nt);
3443 return is_snt;
3446 #endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */