2 /**********************************************************************
8 Copyright (C) 2004-2007 Koichi Sasada
10 **********************************************************************/
12 #ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
14 #include "internal/sanitizers.h"
17 #define TIME_QUANTUM_USEC (10 * 1000)
18 #define RB_CONDATTR_CLOCK_MONOTONIC 1 /* no effect */
22 #define native_thread_yield() Sleep(0)
23 #define unregister_ubf_list(th)
24 #define ubf_wakeup_all_threads() do {} while (0)
25 #define ubf_threads_empty() (1)
26 #define ubf_timer_disarm() do {} while (0)
27 #define ubf_list_atfork() do {} while (0)
29 static volatile DWORD ruby_native_thread_key
= TLS_OUT_OF_INDEXES
;
31 static int w32_wait_events(HANDLE
*events
, int count
, DWORD timeout
, rb_thread_t
*th
);
33 rb_internal_thread_event_hook_t
*
34 rb_internal_thread_add_event_hook(rb_internal_thread_event_callback callback
, rb_event_flag_t internal_event
, void *user_data
)
41 rb_internal_thread_remove_event_hook(rb_internal_thread_event_hook_t
* hook
)
47 RBIMPL_ATTR_NORETURN()
49 w32_error(const char *func
)
52 DWORD err
= GetLastError();
53 if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER
|
54 FORMAT_MESSAGE_FROM_SYSTEM
|
55 FORMAT_MESSAGE_IGNORE_INSERTS
,
58 MAKELANGID(LANG_ENGLISH
, SUBLANG_ENGLISH_US
),
59 (LPTSTR
) & lpMsgBuf
, 0, NULL
) == 0)
60 FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER
|
61 FORMAT_MESSAGE_FROM_SYSTEM
|
62 FORMAT_MESSAGE_IGNORE_INSERTS
,
65 MAKELANGID(LANG_NEUTRAL
, SUBLANG_DEFAULT
),
66 (LPTSTR
) & lpMsgBuf
, 0, NULL
);
67 rb_bug("%s: %s", func
, (char*)lpMsgBuf
);
71 #define W32_EVENT_DEBUG 0
74 #define w32_event_debug printf
76 #define w32_event_debug if (0) printf
80 w32_mutex_lock(HANDLE lock
, bool try)
84 // RUBY_DEBUG_LOG() is not available because RUBY_DEBUG_LOG() calls it.
85 w32_event_debug("lock:%p\n", lock
);
87 result
= w32_wait_events(&lock
, 1, try ? 0 : INFINITE
, 0);
90 /* get mutex object */
91 w32_event_debug("locked lock:%p\n", lock
);
94 case WAIT_OBJECT_0
+ 1:
97 w32_event_debug("interrupted lock:%p\n", lock
);
101 w32_event_debug("timeout locK:%p\n", lock
);
105 rb_bug("win32_mutex_lock: WAIT_ABANDONED");
109 rb_bug("win32_mutex_lock: unknown result (%ld)", result
);
117 w32_mutex_create(void)
119 HANDLE lock
= CreateMutex(NULL
, FALSE
, NULL
);
121 w32_error("rb_native_mutex_initialize");
129 thread_sched_to_running(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
131 w32_mutex_lock(sched
->lock
, false);
132 if (GVL_DEBUG
) fprintf(stderr
, "gvl acquire (%p): acquire\n", th
);
135 #define thread_sched_to_dead thread_sched_to_waiting
138 thread_sched_to_waiting(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
140 ReleaseMutex(sched
->lock
);
144 thread_sched_yield(struct rb_thread_sched
*sched
, rb_thread_t
*th
)
146 thread_sched_to_waiting(sched
, th
);
147 native_thread_yield();
148 thread_sched_to_running(sched
, th
);
152 rb_thread_sched_init(struct rb_thread_sched
*sched
, bool atfork
)
154 if (GVL_DEBUG
) fprintf(stderr
, "sched init\n");
155 sched
->lock
= w32_mutex_create();
161 rb_thread_sched_destroy(struct rb_thread_sched
*sched
)
163 if (GVL_DEBUG
) fprintf(stderr
, "sched destroy\n");
164 CloseHandle(sched
->lock
);
169 ruby_thread_from_native(void)
171 return TlsGetValue(ruby_native_thread_key
);
175 ruby_thread_set_native(rb_thread_t
*th
)
178 rb_ractor_set_current_ec(th
->ractor
, th
->ec
);
180 return TlsSetValue(ruby_native_thread_key
, th
);
184 Init_native_thread(rb_thread_t
*main_th
)
186 if ((ruby_current_ec_key
= TlsAlloc()) == TLS_OUT_OF_INDEXES
) {
187 rb_bug("TlsAlloc() for ruby_current_ec_key fails");
189 if ((ruby_native_thread_key
= TlsAlloc()) == TLS_OUT_OF_INDEXES
) {
190 rb_bug("TlsAlloc() for ruby_native_thread_key fails");
195 ruby_thread_set_native(main_th
);
196 main_th
->nt
->interrupt_event
= CreateEvent(0, TRUE
, FALSE
, 0);
198 DuplicateHandle(GetCurrentProcess(),
201 &main_th
->nt
->thread_id
, 0, FALSE
, DUPLICATE_SAME_ACCESS
);
203 RUBY_DEBUG_LOG("initial thread th:%u thid:%p, event: %p",
204 rb_th_serial(main_th
),
205 main_th
->nt
->thread_id
,
206 main_th
->nt
->interrupt_event
);
210 ruby_mn_threads_params(void)
215 w32_wait_events(HANDLE
*events
, int count
, DWORD timeout
, rb_thread_t
*th
)
217 HANDLE
*targets
= events
;
219 const int initcount
= count
;
222 w32_event_debug("events:%p, count:%d, timeout:%ld, th:%u\n",
223 events
, count
, timeout
, th
? rb_th_serial(th
) : UINT_MAX
);
225 if (th
&& (intr
= th
->nt
->interrupt_event
)) {
226 if (ResetEvent(intr
) && (!RUBY_VM_INTERRUPTED(th
->ec
) || SetEvent(intr
))) {
227 targets
= ALLOCA_N(HANDLE
, count
+ 1);
228 memcpy(targets
, events
, sizeof(HANDLE
) * count
);
230 targets
[count
++] = intr
;
231 w32_event_debug("handle:%p (count:%d, intr)\n", intr
, count
);
233 else if (intr
== th
->nt
->interrupt_event
) {
234 w32_error("w32_wait_events");
238 w32_event_debug("WaitForMultipleObjects start count:%d\n", count
);
239 ret
= WaitForMultipleObjects(count
, targets
, FALSE
, timeout
);
240 w32_event_debug("WaitForMultipleObjects end ret:%lu\n", ret
);
242 if (ret
== (DWORD
)(WAIT_OBJECT_0
+ initcount
) && th
) {
245 if (ret
== WAIT_FAILED
&& W32_EVENT_DEBUG
) {
248 for (i
= 0; i
< count
; i
++) {
249 w32_event_debug("i:%d %s\n", i
, GetHandleInformation(targets
[i
], &dmy
) ? "OK" : "NG");
255 static void ubf_handle(void *ptr
);
256 #define ubf_select ubf_handle
259 rb_w32_wait_events_blocking(HANDLE
*events
, int num
, DWORD timeout
)
261 return w32_wait_events(events
, num
, timeout
, ruby_thread_from_native());
265 rb_w32_wait_events(HANDLE
*events
, int num
, DWORD timeout
)
268 rb_thread_t
*th
= GET_THREAD();
270 BLOCKING_REGION(th
, ret
= rb_w32_wait_events_blocking(events
, num
, timeout
),
271 ubf_handle
, ruby_thread_from_native(), FALSE
);
276 w32_close_handle(HANDLE handle
)
278 if (CloseHandle(handle
) == 0) {
279 w32_error("w32_close_handle");
284 w32_resume_thread(HANDLE handle
)
286 if (ResumeThread(handle
) == (DWORD
)-1) {
287 w32_error("w32_resume_thread");
292 #define HAVE__BEGINTHREADEX 1
294 #undef HAVE__BEGINTHREADEX
297 #ifdef HAVE__BEGINTHREADEX
298 #define start_thread (HANDLE)_beginthreadex
299 #define thread_errno errno
300 typedef unsigned long (__stdcall
*w32_thread_start_func
)(void*);
302 #define start_thread CreateThread
303 #define thread_errno rb_w32_map_errno(GetLastError())
304 typedef LPTHREAD_START_ROUTINE w32_thread_start_func
;
308 w32_create_thread(DWORD stack_size
, w32_thread_start_func func
, void *val
)
310 return start_thread(0, stack_size
, func
, val
, CREATE_SUSPENDED
| STACK_SIZE_PARAM_IS_A_RESERVATION
, 0);
314 rb_w32_sleep(unsigned long msec
)
316 return w32_wait_events(0, 0, msec
, ruby_thread_from_native());
320 rb_w32_Sleep(unsigned long msec
)
323 rb_thread_t
*th
= GET_THREAD();
325 BLOCKING_REGION(th
, ret
= rb_w32_sleep(msec
),
326 ubf_handle
, ruby_thread_from_native(), FALSE
);
331 hrtime2msec(rb_hrtime_t hrt
)
333 return (DWORD
)hrt
/ (DWORD
)RB_HRTIME_PER_MSEC
;
337 native_sleep(rb_thread_t
*th
, rb_hrtime_t
*rel
)
339 const volatile DWORD msec
= rel
? hrtime2msec(*rel
) : INFINITE
;
341 THREAD_BLOCKING_BEGIN(th
);
345 rb_native_mutex_lock(&th
->interrupt_lock
);
346 th
->unblock
.func
= ubf_handle
;
347 th
->unblock
.arg
= th
;
348 rb_native_mutex_unlock(&th
->interrupt_lock
);
350 if (RUBY_VM_INTERRUPTED(th
->ec
)) {
351 /* interrupted. return immediate */
354 RUBY_DEBUG_LOG("start msec:%lu", msec
);
355 ret
= w32_wait_events(0, 0, msec
, th
);
356 RUBY_DEBUG_LOG("done ret:%lu", ret
);
360 rb_native_mutex_lock(&th
->interrupt_lock
);
361 th
->unblock
.func
= 0;
363 rb_native_mutex_unlock(&th
->interrupt_lock
);
365 THREAD_BLOCKING_END(th
);
369 rb_native_mutex_lock(rb_nativethread_lock_t
*lock
)
371 #ifdef USE_WIN32_MUTEX
372 w32_mutex_lock(lock
->mutex
, false);
374 EnterCriticalSection(&lock
->crit
);
379 rb_native_mutex_trylock(rb_nativethread_lock_t
*lock
)
381 #ifdef USE_WIN32_MUTEX
382 return w32_mutex_lock(lock
->mutex
, true);
384 return TryEnterCriticalSection(&lock
->crit
) == 0 ? EBUSY
: 0;
389 rb_native_mutex_unlock(rb_nativethread_lock_t
*lock
)
391 #ifdef USE_WIN32_MUTEX
392 RUBY_DEBUG_LOG("lock:%p", lock
->mutex
);
393 ReleaseMutex(lock
->mutex
);
395 LeaveCriticalSection(&lock
->crit
);
400 rb_native_mutex_initialize(rb_nativethread_lock_t
*lock
)
402 #ifdef USE_WIN32_MUTEX
403 lock
->mutex
= w32_mutex_create();
404 /* thread_debug("initialize mutex: %p\n", lock->mutex); */
406 InitializeCriticalSection(&lock
->crit
);
411 rb_native_mutex_destroy(rb_nativethread_lock_t
*lock
)
413 #ifdef USE_WIN32_MUTEX
414 w32_close_handle(lock
->mutex
);
416 DeleteCriticalSection(&lock
->crit
);
420 struct cond_event_entry
{
421 struct cond_event_entry
* next
;
422 struct cond_event_entry
* prev
;
427 rb_native_cond_signal(rb_nativethread_cond_t
*cond
)
429 /* cond is guarded by mutex */
430 struct cond_event_entry
*e
= cond
->next
;
431 struct cond_event_entry
*head
= (struct cond_event_entry
*)cond
;
434 struct cond_event_entry
*next
= e
->next
;
435 struct cond_event_entry
*prev
= e
->prev
;
439 e
->next
= e
->prev
= e
;
446 rb_native_cond_broadcast(rb_nativethread_cond_t
*cond
)
448 /* cond is guarded by mutex */
449 struct cond_event_entry
*e
= cond
->next
;
450 struct cond_event_entry
*head
= (struct cond_event_entry
*)cond
;
453 struct cond_event_entry
*next
= e
->next
;
454 struct cond_event_entry
*prev
= e
->prev
;
460 e
->next
= e
->prev
= e
;
467 native_cond_timedwait_ms(rb_nativethread_cond_t
*cond
, rb_nativethread_lock_t
*mutex
, unsigned long msec
)
470 struct cond_event_entry entry
;
471 struct cond_event_entry
*head
= (struct cond_event_entry
*)cond
;
473 entry
.event
= CreateEvent(0, FALSE
, FALSE
, 0);
475 /* cond is guarded by mutex */
477 entry
.prev
= head
->prev
;
478 head
->prev
->next
= &entry
;
481 rb_native_mutex_unlock(mutex
);
483 r
= WaitForSingleObject(entry
.event
, msec
);
484 if ((r
!= WAIT_OBJECT_0
) && (r
!= WAIT_TIMEOUT
)) {
485 rb_bug("rb_native_cond_wait: WaitForSingleObject returns %lu", r
);
488 rb_native_mutex_lock(mutex
);
490 entry
.prev
->next
= entry
.next
;
491 entry
.next
->prev
= entry
.prev
;
493 w32_close_handle(entry
.event
);
494 return (r
== WAIT_OBJECT_0
) ? 0 : ETIMEDOUT
;
498 rb_native_cond_wait(rb_nativethread_cond_t
*cond
, rb_nativethread_lock_t
*mutex
)
500 native_cond_timedwait_ms(cond
, mutex
, INFINITE
);
504 abs_timespec_to_timeout_ms(const struct timespec
*ts
)
509 gettimeofday(&now
, NULL
);
510 tv
.tv_sec
= ts
->tv_sec
;
511 tv
.tv_usec
= ts
->tv_nsec
/ 1000;
513 if (!rb_w32_time_subtract(&tv
, &now
))
516 return (tv
.tv_sec
* 1000) + (tv
.tv_usec
/ 1000);
520 native_cond_timedwait(rb_nativethread_cond_t
*cond
, rb_nativethread_lock_t
*mutex
, const struct timespec
*ts
)
522 unsigned long timeout_ms
;
524 timeout_ms
= abs_timespec_to_timeout_ms(ts
);
528 return native_cond_timedwait_ms(cond
, mutex
, timeout_ms
);
531 static struct timespec
native_cond_timeout(rb_nativethread_cond_t
*cond
, struct timespec timeout_rel
);
534 rb_native_cond_timedwait(rb_nativethread_cond_t
*cond
, rb_nativethread_lock_t
*mutex
, unsigned long msec
)
536 struct timespec rel
= {
537 .tv_sec
= msec
/ 1000,
538 .tv_nsec
= (msec
% 1000) * 1000 * 1000,
540 struct timespec ts
= native_cond_timeout(cond
, rel
);
541 native_cond_timedwait(cond
, mutex
, &ts
);
544 static struct timespec
545 native_cond_timeout(rb_nativethread_cond_t
*cond
, struct timespec timeout_rel
)
549 struct timespec timeout
;
552 ret
= gettimeofday(&tv
, 0);
555 now
.tv_sec
= tv
.tv_sec
;
556 now
.tv_nsec
= tv
.tv_usec
* 1000;
558 timeout
.tv_sec
= now
.tv_sec
;
559 timeout
.tv_nsec
= now
.tv_nsec
;
560 timeout
.tv_sec
+= timeout_rel
.tv_sec
;
561 timeout
.tv_nsec
+= timeout_rel
.tv_nsec
;
563 if (timeout
.tv_nsec
>= 1000*1000*1000) {
565 timeout
.tv_nsec
-= 1000*1000*1000;
568 if (timeout
.tv_sec
< now
.tv_sec
)
569 timeout
.tv_sec
= TIMET_MAX
;
575 rb_native_cond_initialize(rb_nativethread_cond_t
*cond
)
577 cond
->next
= (struct cond_event_entry
*)cond
;
578 cond
->prev
= (struct cond_event_entry
*)cond
;
582 rb_native_cond_destroy(rb_nativethread_cond_t
*cond
)
588 #define CHECK_ERR(expr) \
589 {if (!(expr)) {rb_bug("err: %lu - %s", GetLastError(), #expr);}}
591 COMPILER_WARNING_PUSH
592 #if __has_warning("-Wmaybe-uninitialized")
593 COMPILER_WARNING_IGNORED(-Wmaybe
-uninitialized
)
596 query_memory_basic_info(PMEMORY_BASIC_INFORMATION mi
, void *local_in_parent_frame
)
598 return VirtualQuery(asan_get_real_stack_addr(local_in_parent_frame
), mi
, sizeof(*mi
));
603 native_thread_init_stack(rb_thread_t
*th
, void *local_in_parent_frame
)
605 MEMORY_BASIC_INFORMATION mi
;
609 CHECK_ERR(query_memory_basic_info(&mi
, local_in_parent_frame
));
610 base
= mi
.AllocationBase
;
611 end
= mi
.BaseAddress
;
612 end
+= mi
.RegionSize
;
615 if (space
> 1024*1024) space
= 1024*1024;
616 th
->ec
->machine
.stack_start
= (VALUE
*)end
- 1;
617 th
->ec
->machine
.stack_maxsize
= size
- space
;
620 #ifndef InterlockedExchangePointer
621 #define InterlockedExchangePointer(t, v) \
622 (void *)InterlockedExchange((long *)(t), (long)(v))
625 native_thread_destroy(struct rb_native_thread
*nt
)
628 HANDLE intr
= InterlockedExchangePointer(&nt
->interrupt_event
, 0);
629 RUBY_DEBUG_LOG("close handle intr:%p, thid:%p\n", intr
, nt
->thread_id
);
630 w32_close_handle(intr
);
634 static unsigned long __stdcall
635 thread_start_func_1(void *th_ptr
)
637 rb_thread_t
*th
= th_ptr
;
638 volatile HANDLE thread_id
= th
->nt
->thread_id
;
640 native_thread_init_stack(th
, &th
);
641 th
->nt
->interrupt_event
= CreateEvent(0, TRUE
, FALSE
, 0);
644 RUBY_DEBUG_LOG("thread created th:%u, thid: %p, event: %p",
645 rb_th_serial(th
), th
->nt
->thread_id
, th
->nt
->interrupt_event
);
647 thread_sched_to_running(TH_SCHED(th
), th
);
648 ruby_thread_set_native(th
);
651 thread_start_func_2(th
, th
->ec
->machine
.stack_start
);
653 w32_close_handle(thread_id
);
654 RUBY_DEBUG_LOG("thread deleted th:%u", rb_th_serial(th
));
660 native_thread_create(rb_thread_t
*th
)
663 const size_t stack_size
= th
->vm
->default_params
.thread_machine_stack_size
;
664 th
->nt
= ZALLOC(struct rb_native_thread
);
665 th
->nt
->thread_id
= w32_create_thread(stack_size
, thread_start_func_1
, th
);
668 size_t vm_stack_word_size
= th
->vm
->default_params
.thread_vm_stack_size
/ sizeof(VALUE
);
669 void *vm_stack
= ruby_xmalloc(vm_stack_word_size
* sizeof(VALUE
));
670 th
->sched
.vm_stack
= vm_stack
;
671 rb_ec_initialize_vm_stack(th
->ec
, vm_stack
, vm_stack_word_size
);
673 if ((th
->nt
->thread_id
) == 0) {
677 w32_resume_thread(th
->nt
->thread_id
);
679 if (USE_RUBY_DEBUG_LOG
) {
681 RUBY_DEBUG_LOG("th:%u thid:%p intr:%p), stack size: %"PRIuSIZE
"",
682 rb_th_serial(th
), th
->nt
->thread_id
,
683 th
->nt
->interrupt_event
, stack_size
);
689 native_thread_join(HANDLE th
)
691 w32_wait_events(&th
, 1, INFINITE
, 0);
694 #if USE_NATIVE_THREAD_PRIORITY
697 native_thread_apply_priority(rb_thread_t
*th
)
699 int priority
= th
->priority
;
700 if (th
->priority
> 0) {
701 priority
= THREAD_PRIORITY_ABOVE_NORMAL
;
703 else if (th
->priority
< 0) {
704 priority
= THREAD_PRIORITY_BELOW_NORMAL
;
707 priority
= THREAD_PRIORITY_NORMAL
;
710 SetThreadPriority(th
->nt
->thread_id
, priority
);
713 #endif /* USE_NATIVE_THREAD_PRIORITY */
715 int rb_w32_select_with_thread(int, fd_set
*, fd_set
*, fd_set
*, struct timeval
*, void *); /* @internal */
718 native_fd_select(int n
, rb_fdset_t
*readfds
, rb_fdset_t
*writefds
, rb_fdset_t
*exceptfds
, struct timeval
*timeout
, rb_thread_t
*th
)
720 fd_set
*r
= NULL
, *w
= NULL
, *e
= NULL
;
722 rb_fd_resize(n
- 1, readfds
);
723 r
= rb_fd_ptr(readfds
);
726 rb_fd_resize(n
- 1, writefds
);
727 w
= rb_fd_ptr(writefds
);
730 rb_fd_resize(n
- 1, exceptfds
);
731 e
= rb_fd_ptr(exceptfds
);
733 return rb_w32_select_with_thread(n
, r
, w
, e
, timeout
, th
);
738 rb_w32_check_interrupt(rb_thread_t
*th
)
740 return w32_wait_events(0, 0, 0, th
);
744 ubf_handle(void *ptr
)
746 rb_thread_t
*th
= (rb_thread_t
*)ptr
;
747 RUBY_DEBUG_LOG("th:%u\n", rb_th_serial(th
));
749 if (!SetEvent(th
->nt
->interrupt_event
)) {
750 w32_error("ubf_handle");
754 int rb_w32_set_thread_description(HANDLE th
, const WCHAR
*name
);
755 int rb_w32_set_thread_description_str(HANDLE th
, VALUE name
);
756 #define native_set_another_thread_name rb_w32_set_thread_description_str
762 #define TIMER_THREAD_CREATED_P() (timer_thread.id != 0)
764 static unsigned long __stdcall
765 timer_thread_func(void *dummy
)
767 rb_vm_t
*vm
= GET_VM();
768 RUBY_DEBUG_LOG("start");
769 rb_w32_set_thread_description(GetCurrentThread(), L
"ruby-timer-thread");
770 while (WaitForSingleObject(timer_thread
.lock
,
771 TIME_QUANTUM_USEC
/1000) == WAIT_TIMEOUT
) {
773 rb_threadptr_check_signal(vm
->ractor
.main_thread
);
775 RUBY_DEBUG_LOG("end");
780 rb_thread_wakeup_timer_thread(int sig
)
786 rb_thread_create_timer_thread(void)
788 if (timer_thread
.id
== 0) {
789 if (!timer_thread
.lock
) {
790 timer_thread
.lock
= CreateEvent(0, TRUE
, FALSE
, 0);
792 timer_thread
.id
= w32_create_thread(1024 + (USE_RUBY_DEBUG_LOG
? BUFSIZ
: 0),
793 timer_thread_func
, 0);
794 w32_resume_thread(timer_thread
.id
);
799 native_stop_timer_thread(void)
801 int stopped
= --system_working
<= 0;
803 SetEvent(timer_thread
.lock
);
804 native_thread_join(timer_thread
.id
);
805 CloseHandle(timer_thread
.lock
);
806 timer_thread
.lock
= 0;
812 native_reset_timer_thread(void)
814 if (timer_thread
.id
) {
815 CloseHandle(timer_thread
.id
);
821 ruby_stack_overflowed_p(const rb_thread_t
*th
, const void *addr
)
823 return rb_ec_raised_p(th
->ec
, RAISED_STACKOVERFLOW
);
826 #if defined(__MINGW32__)
828 rb_w32_stack_overflow_handler(struct _EXCEPTION_POINTERS
*exception
)
830 if (exception
->ExceptionRecord
->ExceptionCode
== EXCEPTION_STACK_OVERFLOW
) {
831 rb_ec_raised_set(GET_EC(), RAISED_STACKOVERFLOW
);
834 return EXCEPTION_CONTINUE_SEARCH
;
838 #ifdef RUBY_ALLOCA_CHKSTK
840 ruby_alloca_chkstk(size_t len
, void *sp
)
842 if (ruby_stack_length(NULL
) * sizeof(VALUE
) >= len
) {
843 rb_execution_context_t
*ec
= GET_EC();
844 if (!rb_ec_raised_p(ec
, RAISED_STACKOVERFLOW
)) {
845 rb_ec_raised_set(ec
, RAISED_STACKOVERFLOW
);
846 rb_exc_raise(sysstack_error
);
852 rb_reserved_fd_p(int fd
)
858 rb_nativethread_self(void)
860 return GetCurrentThread();
864 native_set_thread_name(rb_thread_t
*th
)
869 native_thread_native_thread_id(rb_thread_t
*th
)
871 DWORD tid
= GetThreadId(th
->nt
->thread_id
);
872 if (tid
== 0) rb_sys_fail("GetThreadId");
873 return ULONG2NUM(tid
);
875 #define USE_NATIVE_THREAD_NATIVE_THREAD_ID 1
878 rb_add_running_thread(rb_thread_t
*th
)
884 rb_del_running_thread(rb_thread_t
*th
)
890 th_has_dedicated_nt(const rb_thread_t
*th
)
896 rb_threadptr_sched_free(rb_thread_t
*th
)
898 native_thread_destroy(th
->nt
);
900 ruby_xfree(th
->sched
.vm_stack
);
904 rb_threadptr_remove(rb_thread_t
*th
)
910 rb_thread_sched_mark_zombies(rb_vm_t
*vm
)
916 vm_barrier_finish_p(rb_vm_t
*vm
)
918 RUBY_DEBUG_LOG("cnt:%u living:%u blocking:%u",
919 vm
->ractor
.blocking_cnt
== vm
->ractor
.cnt
,
920 vm
->ractor
.sync
.barrier_cnt
,
922 vm
->ractor
.blocking_cnt
);
924 VM_ASSERT(vm
->ractor
.blocking_cnt
<= vm
->ractor
.cnt
);
925 return vm
->ractor
.blocking_cnt
== vm
->ractor
.cnt
;
929 rb_ractor_sched_barrier_start(rb_vm_t
*vm
, rb_ractor_t
*cr
)
931 vm
->ractor
.sync
.barrier_waiting
= true;
933 RUBY_DEBUG_LOG("barrier start. cnt:%u living:%u blocking:%u",
934 vm
->ractor
.sync
.barrier_cnt
,
936 vm
->ractor
.blocking_cnt
);
938 rb_vm_ractor_blocking_cnt_inc(vm
, cr
, __FILE__
, __LINE__
);
942 ccan_list_for_each(&vm
->ractor
.set
, r
, vmlr_node
) {
944 rb_ractor_vm_barrier_interrupt_running_thread(r
);
949 while (!vm_barrier_finish_p(vm
)) {
950 rb_vm_cond_wait(vm
, &vm
->ractor
.sync
.barrier_cond
);
953 RUBY_DEBUG_LOG("cnt:%u barrier success", vm
->ractor
.sync
.barrier_cnt
);
955 rb_vm_ractor_blocking_cnt_dec(vm
, cr
, __FILE__
, __LINE__
);
957 vm
->ractor
.sync
.barrier_waiting
= false;
958 vm
->ractor
.sync
.barrier_cnt
++;
960 ccan_list_for_each(&vm
->ractor
.set
, r
, vmlr_node
) {
961 rb_native_cond_signal(&r
->barrier_wait_cond
);
966 rb_ractor_sched_barrier_join(rb_vm_t
*vm
, rb_ractor_t
*cr
)
968 vm
->ractor
.sync
.lock_owner
= cr
;
969 unsigned int barrier_cnt
= vm
->ractor
.sync
.barrier_cnt
;
970 rb_thread_t
*th
= GET_THREAD();
973 RB_VM_SAVE_MACHINE_CONTEXT(th
);
975 if (rb_ractor_status_p(cr
, ractor_running
)) {
976 rb_vm_ractor_blocking_cnt_inc(vm
, cr
, __FILE__
, __LINE__
);
982 VM_ASSERT(rb_ractor_status_p(cr
, ractor_blocking
));
984 if (vm_barrier_finish_p(vm
)) {
985 RUBY_DEBUG_LOG("wakeup barrier owner");
986 rb_native_cond_signal(&vm
->ractor
.sync
.barrier_cond
);
989 RUBY_DEBUG_LOG("wait for barrier finish");
993 while (barrier_cnt
== vm
->ractor
.sync
.barrier_cnt
) {
994 vm
->ractor
.sync
.lock_owner
= NULL
;
995 rb_native_cond_wait(&cr
->barrier_wait_cond
, &vm
->ractor
.sync
.lock
);
996 VM_ASSERT(vm
->ractor
.sync
.lock_owner
== NULL
);
997 vm
->ractor
.sync
.lock_owner
= cr
;
1000 RUBY_DEBUG_LOG("barrier is released. Acquire vm_lock");
1003 rb_vm_ractor_blocking_cnt_dec(vm
, cr
, __FILE__
, __LINE__
);
1006 vm
->ractor
.sync
.lock_owner
= NULL
;
1010 rb_thread_lock_native_thread(void)
1016 rb_thread_prevent_fork(void *(*func
)(void *), void *data
)
1021 #endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */