1 /**********************************************************************
7 Copyright (C) 2020 Samuel Grant Dawson Williams
9 **********************************************************************/
12 #include "ruby/fiber/scheduler.h"
14 #include "ruby/io/buffer.h"
16 #include "ruby/thread.h"
18 // For `ruby_thread_has_gvl_p`.
19 #include "internal/thread.h"
22 static ID id_scheduler_close
;
27 static ID id_timeout_after
;
28 static ID id_kernel_sleep
;
29 static ID id_process_wait
;
31 static ID id_io_read
, id_io_pread
;
32 static ID id_io_write
, id_io_pwrite
;
34 static ID id_io_select
;
35 static ID id_io_close
;
37 static ID id_address_resolve
;
39 static ID id_blocking_operation_wait
;
41 static ID id_fiber_schedule
;
44 * Document-class: Fiber::Scheduler
46 * This is not an existing class, but documentation of the interface that Scheduler
47 * object should comply to in order to be used as argument to Fiber.scheduler and handle non-blocking
48 * fibers. See also the "Non-blocking fibers" section in Fiber class docs for explanations
51 * Scheduler's behavior and usage are expected to be as follows:
53 * * When the execution in the non-blocking Fiber reaches some blocking operation (like
54 * sleep, wait for a process, or a non-ready I/O), it calls some of the scheduler's
55 * hook methods, listed below.
56 * * Scheduler somehow registers what the current fiber is waiting on, and yields control
57 * to other fibers with Fiber.yield (so the fiber would be suspended while expecting its
58 * wait to end, and other fibers in the same thread can perform)
59 * * At the end of the current thread execution, the scheduler's method #scheduler_close is called
60 * * The scheduler runs into a wait loop, checking all the blocked fibers (which it has
61 * registered on hook calls) and resuming them when the awaited resource is ready
62 * (e.g. I/O ready or sleep time elapsed).
64 * This way concurrent execution will be achieved transparently for every
65 * individual Fiber's code.
67 * Scheduler implementations are provided by gems, like
68 * Async[https://github.com/socketry/async].
72 * * #io_wait, #io_read, #io_write, #io_pread, #io_pwrite, and #io_select, #io_close
77 * * #block and #unblock
78 * * #blocking_operation_wait
79 * * (the list is expanded as Ruby developers make more methods having non-blocking calls)
81 * When not specified otherwise, the hook implementations are mandatory: if they are not
82 * implemented, the methods trying to call hook will fail. To provide backward compatibility,
83 * in the future hooks will be optional (if they are not implemented, due to the scheduler
84 * being created for the older Ruby version, the code which needs this hook will not fail,
85 * and will just behave in a blocking fashion).
87 * It is also strongly recommended that the scheduler implements the #fiber method, which is
88 * delegated to by Fiber.schedule.
90 * Sample _toy_ implementation of the scheduler can be found in Ruby's code, in
91 * <tt>test/fiber/scheduler.rb</tt>
95 Init_Fiber_Scheduler(void)
97 id_close
= rb_intern_const("close");
98 id_scheduler_close
= rb_intern_const("scheduler_close");
100 id_block
= rb_intern_const("block");
101 id_unblock
= rb_intern_const("unblock");
103 id_timeout_after
= rb_intern_const("timeout_after");
104 id_kernel_sleep
= rb_intern_const("kernel_sleep");
105 id_process_wait
= rb_intern_const("process_wait");
107 id_io_read
= rb_intern_const("io_read");
108 id_io_pread
= rb_intern_const("io_pread");
109 id_io_write
= rb_intern_const("io_write");
110 id_io_pwrite
= rb_intern_const("io_pwrite");
112 id_io_wait
= rb_intern_const("io_wait");
113 id_io_select
= rb_intern_const("io_select");
114 id_io_close
= rb_intern_const("io_close");
116 id_address_resolve
= rb_intern_const("address_resolve");
118 id_blocking_operation_wait
= rb_intern_const("blocking_operation_wait");
120 id_fiber_schedule
= rb_intern_const("fiber");
123 rb_cFiberScheduler
= rb_define_class_under(rb_cFiber
, "Scheduler", rb_cObject
);
124 rb_define_method(rb_cFiberScheduler
, "close", rb_fiber_scheduler_close
, 0);
125 rb_define_method(rb_cFiberScheduler
, "process_wait", rb_fiber_scheduler_process_wait
, 2);
126 rb_define_method(rb_cFiberScheduler
, "io_wait", rb_fiber_scheduler_io_wait
, 3);
127 rb_define_method(rb_cFiberScheduler
, "io_read", rb_fiber_scheduler_io_read
, 4);
128 rb_define_method(rb_cFiberScheduler
, "io_write", rb_fiber_scheduler_io_write
, 4);
129 rb_define_method(rb_cFiberScheduler
, "io_pread", rb_fiber_scheduler_io_pread
, 5);
130 rb_define_method(rb_cFiberScheduler
, "io_pwrite", rb_fiber_scheduler_io_pwrite
, 5);
131 rb_define_method(rb_cFiberScheduler
, "io_select", rb_fiber_scheduler_io_select
, 4);
132 rb_define_method(rb_cFiberScheduler
, "kernel_sleep", rb_fiber_scheduler_kernel_sleep
, 1);
133 rb_define_method(rb_cFiberScheduler
, "address_resolve", rb_fiber_scheduler_address_resolve
, 1);
134 rb_define_method(rb_cFiberScheduler
, "timeout_after", rb_fiber_scheduler_timeout_after
, 3);
135 rb_define_method(rb_cFiberScheduler
, "block", rb_fiber_scheduler_block
, 2);
136 rb_define_method(rb_cFiberScheduler
, "unblock", rb_fiber_scheduler_unblock
, 2);
137 rb_define_method(rb_cFiberScheduler
, "fiber", rb_fiber_scheduler
, -2);
138 rb_define_method(rb_cFiberScheduler
, "blocking_operation_wait", rb_fiber_scheduler_blocking_operation_wait
, -2);
143 rb_fiber_scheduler_get(void)
145 RUBY_ASSERT(ruby_thread_has_gvl_p());
147 rb_thread_t
*thread
= GET_THREAD();
150 return thread
->scheduler
;
154 verify_interface(VALUE scheduler
)
156 if (!rb_respond_to(scheduler
, id_block
)) {
157 rb_raise(rb_eArgError
, "Scheduler must implement #block");
160 if (!rb_respond_to(scheduler
, id_unblock
)) {
161 rb_raise(rb_eArgError
, "Scheduler must implement #unblock");
164 if (!rb_respond_to(scheduler
, id_kernel_sleep
)) {
165 rb_raise(rb_eArgError
, "Scheduler must implement #kernel_sleep");
168 if (!rb_respond_to(scheduler
, id_io_wait
)) {
169 rb_raise(rb_eArgError
, "Scheduler must implement #io_wait");
174 fiber_scheduler_close(VALUE scheduler
)
176 return rb_fiber_scheduler_close(scheduler
);
180 fiber_scheduler_close_ensure(VALUE _thread
)
182 rb_thread_t
*thread
= (rb_thread_t
*)_thread
;
183 thread
->scheduler
= Qnil
;
189 rb_fiber_scheduler_set(VALUE scheduler
)
191 RUBY_ASSERT(ruby_thread_has_gvl_p());
193 rb_thread_t
*thread
= GET_THREAD();
196 if (scheduler
!= Qnil
) {
197 verify_interface(scheduler
);
200 // We invoke Scheduler#close when setting it to something else, to ensure
201 // the previous scheduler runs to completion before changing the scheduler.
202 // That way, we do not need to consider interactions, e.g., of a Fiber from
203 // the previous scheduler with the new scheduler.
204 if (thread
->scheduler
!= Qnil
) {
205 // rb_fiber_scheduler_close(thread->scheduler);
206 rb_ensure(fiber_scheduler_close
, thread
->scheduler
, fiber_scheduler_close_ensure
, (VALUE
)thread
);
209 thread
->scheduler
= scheduler
;
211 return thread
->scheduler
;
215 rb_fiber_scheduler_current_for_threadptr(rb_thread_t
*thread
)
219 if (thread
->blocking
== 0) {
220 return thread
->scheduler
;
228 rb_fiber_scheduler_current(void)
230 return rb_fiber_scheduler_current_for_threadptr(GET_THREAD());
233 VALUE
rb_fiber_scheduler_current_for_thread(VALUE thread
)
235 return rb_fiber_scheduler_current_for_threadptr(rb_thread_ptr(thread
));
240 * Document-method: Fiber::Scheduler#close
242 * Called when the current thread exits. The scheduler is expected to implement this
243 * method in order to allow all waiting fibers to finalize their execution.
245 * The suggested pattern is to implement the main event loop in the #close method.
249 rb_fiber_scheduler_close(VALUE scheduler
)
251 RUBY_ASSERT(ruby_thread_has_gvl_p());
255 // The reason for calling `scheduler_close` before calling `close` is for
256 // legacy schedulers which implement `close` and expect the user to call
257 // it. Subsequently, that method would call `Fiber.set_scheduler(nil)`
258 // which should call `scheduler_close`. If it were to call `close`, it
259 // would create an infinite loop.
261 result
= rb_check_funcall(scheduler
, id_scheduler_close
, 0, NULL
);
262 if (!UNDEF_P(result
)) return result
;
264 result
= rb_check_funcall(scheduler
, id_close
, 0, NULL
);
265 if (!UNDEF_P(result
)) return result
;
271 rb_fiber_scheduler_make_timeout(struct timeval
*timeout
)
274 return rb_float_new((double)timeout
->tv_sec
+ (0.000001 * timeout
->tv_usec
));
281 * Document-method: Fiber::Scheduler#kernel_sleep
282 * call-seq: kernel_sleep(duration = nil)
284 * Invoked by Kernel#sleep and Mutex#sleep and is expected to provide
285 * an implementation of sleeping in a non-blocking way. Implementation might
286 * register the current fiber in some list of "which fiber wait until what
287 * moment", call Fiber.yield to pass control, and then in #close resume
288 * the fibers whose wait period has elapsed.
292 rb_fiber_scheduler_kernel_sleep(VALUE scheduler
, VALUE timeout
)
294 return rb_funcall(scheduler
, id_kernel_sleep
, 1, timeout
);
298 rb_fiber_scheduler_kernel_sleepv(VALUE scheduler
, int argc
, VALUE
* argv
)
300 return rb_funcallv(scheduler
, id_kernel_sleep
, argc
, argv
);
305 * Document-method: Fiber::Scheduler#timeout_after
306 * call-seq: timeout_after(duration, exception_class, *exception_arguments, &block) -> result of block
308 * Invoked by Timeout.timeout to execute the given +block+ within the given
309 * +duration+. It can also be invoked directly by the scheduler or user code.
311 * Attempt to limit the execution time of a given +block+ to the given
312 * +duration+ if possible. When a non-blocking operation causes the +block+'s
313 * execution time to exceed the specified +duration+, that non-blocking
314 * operation should be interrupted by raising the specified +exception_class+
315 * constructed with the given +exception_arguments+.
317 * General execution timeouts are often considered risky. This implementation
318 * will only interrupt non-blocking operations. This is by design because it's
319 * expected that non-blocking operations can fail for a variety of
320 * unpredictable reasons, so applications should already be robust in handling
321 * these conditions and by implication timeouts.
323 * However, as a result of this design, if the +block+ does not invoke any
324 * non-blocking operations, it will be impossible to interrupt it. If you
325 * desire to provide predictable points for timeouts, consider adding
328 * If the block is executed successfully, its result will be returned.
330 * The exception will typically be raised using Fiber#raise.
333 rb_fiber_scheduler_timeout_after(VALUE scheduler
, VALUE timeout
, VALUE exception
, VALUE message
)
335 VALUE arguments
[] = {
336 timeout
, exception
, message
339 return rb_check_funcall(scheduler
, id_timeout_after
, 3, arguments
);
343 rb_fiber_scheduler_timeout_afterv(VALUE scheduler
, int argc
, VALUE
* argv
)
345 return rb_check_funcall(scheduler
, id_timeout_after
, argc
, argv
);
350 * Document-method: Fiber::Scheduler#process_wait
351 * call-seq: process_wait(pid, flags)
353 * Invoked by Process::Status.wait in order to wait for a specified process.
354 * See that method description for arguments description.
356 * Suggested minimal implementation:
359 * Process::Status.wait(pid, flags)
362 * This hook is optional: if it is not present in the current scheduler,
363 * Process::Status.wait will behave as a blocking method.
365 * Expected to return a Process::Status instance.
368 rb_fiber_scheduler_process_wait(VALUE scheduler
, rb_pid_t pid
, int flags
)
370 VALUE arguments
[] = {
371 PIDT2NUM(pid
), RB_INT2NUM(flags
)
374 return rb_check_funcall(scheduler
, id_process_wait
, 2, arguments
);
378 * Document-method: Fiber::Scheduler#block
379 * call-seq: block(blocker, timeout = nil)
381 * Invoked by methods like Thread.join, and by Mutex, to signify that current
382 * Fiber is blocked until further notice (e.g. #unblock) or until +timeout+ has
385 * +blocker+ is what we are waiting on, informational only (for debugging and
386 * logging). There are no guarantee about its value.
388 * Expected to return boolean, specifying whether the blocking operation was
392 rb_fiber_scheduler_block(VALUE scheduler
, VALUE blocker
, VALUE timeout
)
394 return rb_funcall(scheduler
, id_block
, 2, blocker
, timeout
);
398 * Document-method: Fiber::Scheduler#unblock
399 * call-seq: unblock(blocker, fiber)
401 * Invoked to wake up Fiber previously blocked with #block (for example, Mutex#lock
402 * calls #block and Mutex#unlock calls #unblock). The scheduler should use
403 * the +fiber+ parameter to understand which fiber is unblocked.
405 * +blocker+ is what was awaited for, but it is informational only (for debugging
406 * and logging), and it is not guaranteed to be the same value as the +blocker+ for
411 rb_fiber_scheduler_unblock(VALUE scheduler
, VALUE blocker
, VALUE fiber
)
413 RUBY_ASSERT(rb_obj_is_fiber(fiber
));
415 // `rb_fiber_scheduler_unblock` can be called from points where `errno` is expected to be preserved. Therefore, we should save and restore it. For example `io_binwrite` calls `rb_fiber_scheduler_unblock` and if `errno` is reset to 0 by user code, it will break the error handling in `io_write`.
416 // If we explicitly preserve `errno` in `io_binwrite` and other similar functions (e.g. by returning it), this code is no longer needed. I hope in the future we will be able to remove it.
417 int saved_errno
= errno
;
419 VALUE result
= rb_funcall(scheduler
, id_unblock
, 2, blocker
, fiber
);
427 * Document-method: Fiber::Scheduler#io_wait
428 * call-seq: io_wait(io, events, timeout)
430 * Invoked by IO#wait, IO#wait_readable, IO#wait_writable to ask whether the
431 * specified descriptor is ready for specified events within
432 * the specified +timeout+.
434 * +events+ is a bit mask of <tt>IO::READABLE</tt>, <tt>IO::WRITABLE</tt>, and
435 * <tt>IO::PRIORITY</tt>.
437 * Suggested implementation should register which Fiber is waiting for which
438 * resources and immediately calling Fiber.yield to pass control to other
439 * fibers. Then, in the #close method, the scheduler might dispatch all the
440 * I/O resources to fibers waiting for it.
442 * Expected to return the subset of events that are ready immediately.
446 rb_fiber_scheduler_io_wait(VALUE scheduler
, VALUE io
, VALUE events
, VALUE timeout
)
448 return rb_funcall(scheduler
, id_io_wait
, 3, io
, events
, timeout
);
452 rb_fiber_scheduler_io_wait_readable(VALUE scheduler
, VALUE io
)
454 return rb_fiber_scheduler_io_wait(scheduler
, io
, RB_UINT2NUM(RUBY_IO_READABLE
), rb_io_timeout(io
));
458 rb_fiber_scheduler_io_wait_writable(VALUE scheduler
, VALUE io
)
460 return rb_fiber_scheduler_io_wait(scheduler
, io
, RB_UINT2NUM(RUBY_IO_WRITABLE
), rb_io_timeout(io
));
464 * Document-method: Fiber::Scheduler#io_select
465 * call-seq: io_select(readables, writables, exceptables, timeout)
467 * Invoked by IO.select to ask whether the specified descriptors are ready for
468 * specified events within the specified +timeout+.
470 * Expected to return the 3-tuple of Array of IOs that are ready.
473 VALUE
rb_fiber_scheduler_io_select(VALUE scheduler
, VALUE readables
, VALUE writables
, VALUE exceptables
, VALUE timeout
)
475 VALUE arguments
[] = {
476 readables
, writables
, exceptables
, timeout
479 return rb_fiber_scheduler_io_selectv(scheduler
, 4, arguments
);
482 VALUE
rb_fiber_scheduler_io_selectv(VALUE scheduler
, int argc
, VALUE
*argv
)
484 // I wondered about extracting argv, and checking if there is only a single
485 // IO instance, and instead calling `io_wait`. However, it would require a
486 // decent amount of work and it would be hard to preserve the exact
487 // semantics of IO.select.
489 return rb_check_funcall(scheduler
, id_io_select
, argc
, argv
);
493 * Document-method: Fiber::Scheduler#io_read
494 * call-seq: io_read(io, buffer, length, offset) -> read length or -errno
496 * Invoked by IO#read or IO#Buffer.read to read +length+ bytes from +io+ into a
497 * specified +buffer+ (see IO::Buffer) at the given +offset+.
499 * The +length+ argument is the "minimum length to be read". If the IO buffer
500 * size is 8KiB, but the +length+ is +1024+ (1KiB), up to 8KiB might be read,
501 * but at least 1KiB will be. Generally, the only case where less data than
502 * +length+ will be read is if there is an error reading the data.
504 * Specifying a +length+ of 0 is valid and means try reading at least once and
505 * return any available data.
507 * Suggested implementation should try to read from +io+ in a non-blocking
508 * manner and call #io_wait if the +io+ is not ready (which will yield control
511 * See IO::Buffer for an interface available to return data.
513 * Expected to return number of bytes read, or, in case of an error,
514 * <tt>-errno</tt> (negated number corresponding to system's error code).
516 * The method should be considered _experimental_.
519 rb_fiber_scheduler_io_read(VALUE scheduler
, VALUE io
, VALUE buffer
, size_t length
, size_t offset
)
521 VALUE arguments
[] = {
522 io
, buffer
, SIZET2NUM(length
), SIZET2NUM(offset
)
525 return rb_check_funcall(scheduler
, id_io_read
, 4, arguments
);
529 * Document-method: Fiber::Scheduler#io_read
530 * call-seq: io_pread(io, buffer, from, length, offset) -> read length or -errno
532 * Invoked by IO#pread or IO::Buffer#pread to read +length+ bytes from +io+
533 * at offset +from+ into a specified +buffer+ (see IO::Buffer) at the given
536 * This method is semantically the same as #io_read, but it allows to specify
537 * the offset to read from and is often better for asynchronous IO on the same
540 * The method should be considered _experimental_.
543 rb_fiber_scheduler_io_pread(VALUE scheduler
, VALUE io
, rb_off_t from
, VALUE buffer
, size_t length
, size_t offset
)
545 VALUE arguments
[] = {
546 io
, buffer
, OFFT2NUM(from
), SIZET2NUM(length
), SIZET2NUM(offset
)
549 return rb_check_funcall(scheduler
, id_io_pread
, 5, arguments
);
553 * Document-method: Scheduler#io_write
554 * call-seq: io_write(io, buffer, length, offset) -> written length or -errno
556 * Invoked by IO#write or IO::Buffer#write to write +length+ bytes to +io+ from
557 * from a specified +buffer+ (see IO::Buffer) at the given +offset+.
559 * The +length+ argument is the "minimum length to be written". If the IO
560 * buffer size is 8KiB, but the +length+ specified is 1024 (1KiB), at most 8KiB
561 * will be written, but at least 1KiB will be. Generally, the only case where
562 * less data than +length+ will be written is if there is an error writing the
565 * Specifying a +length+ of 0 is valid and means try writing at least once, as
566 * much data as possible.
568 * Suggested implementation should try to write to +io+ in a non-blocking
569 * manner and call #io_wait if the +io+ is not ready (which will yield control
572 * See IO::Buffer for an interface available to get data from buffer
575 * Expected to return number of bytes written, or, in case of an error,
576 * <tt>-errno</tt> (negated number corresponding to system's error code).
578 * The method should be considered _experimental_.
581 rb_fiber_scheduler_io_write(VALUE scheduler
, VALUE io
, VALUE buffer
, size_t length
, size_t offset
)
583 VALUE arguments
[] = {
584 io
, buffer
, SIZET2NUM(length
), SIZET2NUM(offset
)
587 return rb_check_funcall(scheduler
, id_io_write
, 4, arguments
);
591 * Document-method: Fiber::Scheduler#io_pwrite
592 * call-seq: io_pwrite(io, buffer, from, length, offset) -> written length or -errno
594 * Invoked by IO#pwrite or IO::Buffer#pwrite to write +length+ bytes to +io+
595 * at offset +from+ into a specified +buffer+ (see IO::Buffer) at the given
598 * This method is semantically the same as #io_write, but it allows to specify
599 * the offset to write to and is often better for asynchronous IO on the same
602 * The method should be considered _experimental_.
606 rb_fiber_scheduler_io_pwrite(VALUE scheduler
, VALUE io
, rb_off_t from
, VALUE buffer
, size_t length
, size_t offset
)
608 VALUE arguments
[] = {
609 io
, buffer
, OFFT2NUM(from
), SIZET2NUM(length
), SIZET2NUM(offset
)
612 return rb_check_funcall(scheduler
, id_io_pwrite
, 5, arguments
);
616 rb_fiber_scheduler_io_read_memory(VALUE scheduler
, VALUE io
, void *base
, size_t size
, size_t length
)
618 VALUE buffer
= rb_io_buffer_new(base
, size
, RB_IO_BUFFER_LOCKED
);
620 VALUE result
= rb_fiber_scheduler_io_read(scheduler
, io
, buffer
, length
, 0);
622 rb_io_buffer_free_locked(buffer
);
628 rb_fiber_scheduler_io_write_memory(VALUE scheduler
, VALUE io
, const void *base
, size_t size
, size_t length
)
630 VALUE buffer
= rb_io_buffer_new((void*)base
, size
, RB_IO_BUFFER_LOCKED
|RB_IO_BUFFER_READONLY
);
632 VALUE result
= rb_fiber_scheduler_io_write(scheduler
, io
, buffer
, length
, 0);
634 rb_io_buffer_free_locked(buffer
);
640 rb_fiber_scheduler_io_pread_memory(VALUE scheduler
, VALUE io
, rb_off_t from
, void *base
, size_t size
, size_t length
)
642 VALUE buffer
= rb_io_buffer_new(base
, size
, RB_IO_BUFFER_LOCKED
);
644 VALUE result
= rb_fiber_scheduler_io_pread(scheduler
, io
, from
, buffer
, length
, 0);
646 rb_io_buffer_free_locked(buffer
);
652 rb_fiber_scheduler_io_pwrite_memory(VALUE scheduler
, VALUE io
, rb_off_t from
, const void *base
, size_t size
, size_t length
)
654 VALUE buffer
= rb_io_buffer_new((void*)base
, size
, RB_IO_BUFFER_LOCKED
|RB_IO_BUFFER_READONLY
);
656 VALUE result
= rb_fiber_scheduler_io_pwrite(scheduler
, io
, from
, buffer
, length
, 0);
658 rb_io_buffer_free_locked(buffer
);
664 rb_fiber_scheduler_io_close(VALUE scheduler
, VALUE io
)
666 VALUE arguments
[] = {io
};
668 return rb_check_funcall(scheduler
, id_io_close
, 1, arguments
);
672 * Document-method: Fiber::Scheduler#address_resolve
673 * call-seq: address_resolve(hostname) -> array_of_strings or nil
675 * Invoked by any method that performs a non-reverse DNS lookup. The most
676 * notable method is Addrinfo.getaddrinfo, but there are many other.
678 * The method is expected to return an array of strings corresponding to ip
679 * addresses the +hostname+ is resolved to, or +nil+ if it can not be resolved.
681 * Fairly exhaustive list of all possible call-sites:
683 * - Addrinfo.getaddrinfo
688 * - Addrinfo.marshal_load
692 * - IPSocket.getaddress
693 * - TCPSocket.gethostbyname
694 * - UDPSocket#connect
697 * - Socket.getaddrinfo
698 * - Socket.gethostbyname
699 * - Socket.pack_sockaddr_in
700 * - Socket.sockaddr_in
701 * - Socket.unpack_sockaddr_in
704 rb_fiber_scheduler_address_resolve(VALUE scheduler
, VALUE hostname
)
706 VALUE arguments
[] = {
710 return rb_check_funcall(scheduler
, id_address_resolve
, 1, arguments
);
713 struct rb_blocking_operation_wait_arguments
{
714 void *(*function
)(void *);
716 rb_unblock_function_t
*unblock_function
;
720 struct rb_fiber_scheduler_blocking_operation_state
*state
;
724 rb_fiber_scheduler_blocking_operation_wait_proc(RB_BLOCK_CALL_FUNC_ARGLIST(value
, _arguments
))
726 struct rb_blocking_operation_wait_arguments
*arguments
= (struct rb_blocking_operation_wait_arguments
*)_arguments
;
728 if (arguments
->state
== NULL
) {
729 rb_raise(rb_eRuntimeError
, "Blocking function was already invoked!");
732 arguments
->state
->result
= rb_nogvl(arguments
->function
, arguments
->data
, arguments
->unblock_function
, arguments
->data2
, arguments
->flags
);
733 arguments
->state
->saved_errno
= rb_errno();
735 // Make sure it's only invoked once.
736 arguments
->state
= NULL
;
742 * Document-method: Fiber::Scheduler#blocking_operation_wait
743 * call-seq: blocking_operation_wait(work)
745 * Invoked by Ruby's core methods to run a blocking operation in a non-blocking way.
747 * Minimal suggested implementation is:
749 * def blocking_operation_wait(work)
750 * Thread.new(&work).join
753 VALUE
rb_fiber_scheduler_blocking_operation_wait(VALUE scheduler
, void* (*function
)(void *), void *data
, rb_unblock_function_t
*unblock_function
, void *data2
, int flags
, struct rb_fiber_scheduler_blocking_operation_state
*state
)
755 struct rb_blocking_operation_wait_arguments arguments
= {
756 .function
= function
,
758 .unblock_function
= unblock_function
,
764 VALUE proc
= rb_proc_new(rb_fiber_scheduler_blocking_operation_wait_proc
, (VALUE
)&arguments
);
766 return rb_check_funcall(scheduler
, id_blocking_operation_wait
, 1, &proc
);
770 * Document-method: Fiber::Scheduler#fiber
771 * call-seq: fiber(&block)
773 * Implementation of the Fiber.schedule. The method is <em>expected</em> to immediately
774 * run the given block of code in a separate non-blocking fiber, and to return that Fiber.
776 * Minimal suggested implementation is:
779 * fiber = Fiber.new(blocking: false, &block)
785 rb_fiber_scheduler_fiber(VALUE scheduler
, int argc
, VALUE
*argv
, int kw_splat
)
787 return rb_funcall_passing_block_kw(scheduler
, id_fiber_schedule
, argc
, argv
, kw_splat
);