Merge pull request #6184 from dearblue/L_STOP
[mruby.git] / src / vm.c
blob77e9e55c69ed3c2694195144b46eb4bfb1834fc0
1 /*
2 ** vm.c - virtual machine for mruby
3 **
4 ** See Copyright Notice in mruby.h
5 */
7 #include <mruby.h>
8 #include <mruby/array.h>
9 #include <mruby/class.h>
10 #include <mruby/hash.h>
11 #include <mruby/irep.h>
12 #include <mruby/numeric.h>
13 #include <mruby/proc.h>
14 #include <mruby/range.h>
15 #include <mruby/string.h>
16 #include <mruby/variable.h>
17 #include <mruby/error.h>
18 #include <mruby/opcode.h>
19 #include "value_array.h"
20 #include <mruby/throw.h>
21 #include <mruby/dump.h>
22 #include <mruby/internal.h>
23 #include <mruby/presym.h>
25 #ifdef MRB_NO_STDIO
26 #if defined(__cplusplus)
27 extern "C" {
28 #endif
29 void abort(void);
30 #if defined(__cplusplus)
31 } /* extern "C" */
32 #endif
33 #endif
35 #define STACK_INIT_SIZE 128
36 #define CALLINFO_INIT_SIZE 32
38 /* Define amount of linear stack growth. */
39 #ifndef MRB_STACK_GROWTH
40 #define MRB_STACK_GROWTH 128
41 #endif
43 /* Maximum recursive depth. Should be set lower on memory constrained systems. */
44 #ifdef __clang__
45 #if __has_feature(address_sanitizer) && !defined(__SANITIZE_ADDRESS__)
46 #define __SANITIZE_ADDRESS__
47 #endif
48 #endif
50 #ifndef MRB_CALL_LEVEL_MAX
51 #if defined(__SANITIZE_ADDRESS__)
52 #define MRB_CALL_LEVEL_MAX 128
53 #else
54 #define MRB_CALL_LEVEL_MAX 512
55 #endif
56 #endif
58 /* Maximum stack depth. Should be set lower on memory constrained systems.
59 The value below allows about 60000 recursive calls in the simplest case. */
60 #ifndef MRB_STACK_MAX
61 #define MRB_STACK_MAX (0x40000 - MRB_STACK_GROWTH)
62 #endif
64 #ifdef VM_DEBUG
65 # define DEBUG(x) (x)
66 #else
67 # define DEBUG(x)
68 #endif
71 #ifndef MRB_GC_FIXED_ARENA
72 static void
73 mrb_gc_arena_shrink(mrb_state *mrb, int idx)
75 mrb_gc *gc = &mrb->gc;
76 int capa = gc->arena_capa;
78 gc->arena_idx = idx;
79 if (idx < capa / 4) {
80 capa >>= 2;
81 if (capa < MRB_GC_ARENA_SIZE) {
82 capa = MRB_GC_ARENA_SIZE;
84 if (capa != gc->arena_capa) {
85 gc->arena = (struct RBasic**)mrb_realloc(mrb, gc->arena, sizeof(struct RBasic*)*capa);
86 gc->arena_capa = capa;
90 #else
91 #define mrb_gc_arena_shrink(mrb,idx) mrb_gc_arena_restore(mrb,idx)
92 #endif
94 #define CALL_MAXARGS 15
95 #define CALL_VARARGS (CALL_MAXARGS<<4 | CALL_MAXARGS)
97 static inline void
98 stack_clear(mrb_value *from, size_t count)
100 while (count-- > 0) {
101 SET_NIL_VALUE(*from);
102 from++;
106 static inline void
107 stack_copy(mrb_value *dst, const mrb_value *src, size_t size)
109 if (!src) return;
110 memcpy(dst, src, sizeof(mrb_value)*size);
113 static void
114 stack_init(mrb_state *mrb)
116 struct mrb_context *c = mrb->c;
118 /* mrb_assert(mrb->stack == NULL); */
119 c->stbase = (mrb_value*)mrb_calloc(mrb, STACK_INIT_SIZE, sizeof(mrb_value));
120 c->stend = c->stbase + STACK_INIT_SIZE;
122 /* mrb_assert(ci == NULL); */
123 c->cibase = (mrb_callinfo*)mrb_calloc(mrb, CALLINFO_INIT_SIZE, sizeof(mrb_callinfo));
124 c->ciend = c->cibase + CALLINFO_INIT_SIZE;
125 c->ci = c->cibase;
126 c->ci->u.target_class = mrb->object_class;
127 c->ci->stack = c->stbase;
130 static inline void
131 envadjust(mrb_state *mrb, mrb_value *oldbase, mrb_value *newbase, size_t oldsize)
133 mrb_callinfo *ci = mrb->c->cibase;
134 ptrdiff_t delta = newbase - oldbase;
136 if (delta == 0) return;
137 while (ci <= mrb->c->ci) {
138 struct REnv *e = mrb_vm_ci_env(ci);
139 mrb_value *st;
141 if (e && MRB_ENV_ONSTACK_P(e) &&
142 (st = e->stack) && (size_t)(st - oldbase) < oldsize) {
143 e->stack += delta;
146 if (ci->proc && MRB_PROC_ENV_P(ci->proc) && e != MRB_PROC_ENV(ci->proc)) {
147 e = MRB_PROC_ENV(ci->proc);
149 if (e && MRB_ENV_ONSTACK_P(e) &&
150 (st = e->stack) && (size_t)(st - oldbase) < oldsize) {
151 e->stack += delta;
155 ci->stack += delta;
156 ci++;
160 /** def rec; $deep =+ 1; if $deep > 1000; return 0; end; rec; end **/
162 static void
163 stack_extend_alloc(mrb_state *mrb, mrb_int room)
165 mrb_value *oldbase = mrb->c->stbase;
166 mrb_value *newstack;
167 size_t oldsize = mrb->c->stend - mrb->c->stbase;
168 size_t size = oldsize;
169 size_t off = mrb->c->ci->stack ? mrb->c->stend - mrb->c->ci->stack : 0;
171 if (off > size) size = off;
172 #ifdef MRB_STACK_EXTEND_DOUBLING
173 if ((size_t)room <= size)
174 size *= 2;
175 else
176 size += room;
177 #else
178 /* Use linear stack growth.
179 It is slightly slower than doubling the stack space,
180 but it saves memory on small devices. */
181 if (room <= MRB_STACK_GROWTH)
182 size += MRB_STACK_GROWTH;
183 else
184 size += room;
185 #endif
187 newstack = (mrb_value*)mrb_realloc(mrb, mrb->c->stbase, sizeof(mrb_value) * size);
188 stack_clear(&(newstack[oldsize]), size - oldsize);
189 envadjust(mrb, oldbase, newstack, oldsize);
190 mrb->c->stbase = newstack;
191 mrb->c->stend = mrb->c->stbase + size;
193 /* Raise an exception if the new stack size will be too large,
194 to prevent infinite recursion. However, do this only after resizing the stack, so mrb_raise has stack space to work with. */
195 if (size > MRB_STACK_MAX) {
196 mrb_exc_raise(mrb, mrb_obj_value(mrb->stack_err));
200 static inline void
201 stack_extend(mrb_state *mrb, mrb_int room)
203 if (!mrb->c->ci->stack || mrb->c->ci->stack + room >= mrb->c->stend) {
204 stack_extend_alloc(mrb, room);
208 MRB_API void
209 mrb_stack_extend(mrb_state *mrb, mrb_int room)
211 stack_extend(mrb, room);
214 static void
215 stack_extend_adjust(mrb_state *mrb, mrb_int room, const mrb_value **argp)
217 const struct mrb_context *c = mrb->c;
218 ptrdiff_t voff = *argp - c->stbase;
220 if (voff < 0 || voff >= c->stend - c->stbase) {
221 stack_extend(mrb, room);
223 else {
224 stack_extend(mrb, room);
225 *argp = c->stbase + voff;
229 static inline struct REnv*
230 uvenv(mrb_state *mrb, mrb_int up)
232 const struct RProc *proc = mrb->c->ci->proc;
233 struct REnv *e;
235 while (up--) {
236 proc = proc->upper;
237 if (!proc) return NULL;
239 e = MRB_PROC_ENV(proc);
240 if (e) return e; /* proc has enclosed env */
241 else {
242 mrb_callinfo *ci = mrb->c->ci;
243 mrb_callinfo *cb = mrb->c->cibase;
245 while (cb <= ci) {
246 if (ci->proc == proc) {
247 return mrb_vm_ci_env(ci);
249 ci--;
252 return NULL;
255 static inline const struct RProc*
256 top_proc(mrb_state *mrb, const struct RProc *proc)
258 while (proc->upper) {
259 if (MRB_PROC_SCOPE_P(proc) || MRB_PROC_STRICT_P(proc))
260 return proc;
261 proc = proc->upper;
263 return proc;
266 #define CI_PROC_SET(ci, p) do {\
267 ci->proc = p;\
268 mrb_assert(!p || !MRB_PROC_ALIAS_P(p));\
269 ci->pc = (p && !MRB_PROC_CFUNC_P(p) && p->body.irep) ? p->body.irep->iseq : NULL;\
270 } while (0)
272 void
273 mrb_vm_ci_proc_set(mrb_callinfo *ci, const struct RProc *p)
275 CI_PROC_SET(ci, p);
278 #define CI_TARGET_CLASS(ci) (((ci)->u.env && (ci)->u.env->tt == MRB_TT_ENV)? (ci)->u.env->c : (ci)->u.target_class)
280 struct RClass*
281 mrb_vm_ci_target_class(const mrb_callinfo *ci)
283 return CI_TARGET_CLASS(ci);
286 void
287 mrb_vm_ci_target_class_set(mrb_callinfo *ci, struct RClass *tc)
289 struct REnv *e = ci->u.env;
290 if (e && e->tt == MRB_TT_ENV) {
291 e->c = tc;
293 else {
294 ci->u.target_class = tc;
298 #define CI_ENV(ci) (((ci)->u.env && (ci)->u.env->tt == MRB_TT_ENV)? (ci)->u.env : NULL)
300 struct REnv*
301 mrb_vm_ci_env(const mrb_callinfo *ci)
303 return CI_ENV(ci);
306 static inline void
307 ci_env_set(mrb_callinfo *ci, struct REnv *e)
309 if (ci->u.env) {
310 if (ci->u.env->tt == MRB_TT_ENV) {
311 if (e) {
312 e->c = ci->u.env->c;
313 ci->u.env = e;
315 else {
316 ci->u.target_class = ci->u.env->c;
319 else if (e) {
320 e->c = ci->u.target_class;
321 ci->u.env = e;
324 else {
325 ci->u.env = e;
329 void
330 mrb_vm_ci_env_set(mrb_callinfo *ci, struct REnv *e)
332 ci_env_set(ci, e);
335 MRB_API void
336 mrb_vm_ci_env_clear(mrb_state *mrb, mrb_callinfo *ci)
338 struct REnv *e = ci->u.env;
339 if (e && e->tt == MRB_TT_ENV) {
340 ci->u.target_class = e->c;
341 mrb_env_unshare(mrb, e, FALSE);
345 #define CINFO_NONE 0 // called method from mruby VM (without C functions)
346 #define CINFO_SKIP 1 // ignited mruby VM from C
347 #define CINFO_DIRECT 2 // called method from C
348 #define CINFO_RESUMED 3 // resumed by `Fiber.yield` (probably the main call is `mrb_fiber_resume()`)
350 #define BLK_PTR(b) ((mrb_proc_p(b)) ? mrb_proc_ptr(b) : NULL)
352 static inline mrb_callinfo*
353 cipush(mrb_state *mrb, mrb_int push_stacks, uint8_t cci, struct RClass *target_class,
354 const struct RProc *proc, struct RProc *blk, mrb_sym mid, uint16_t argc)
356 struct mrb_context *c = mrb->c;
357 mrb_callinfo *ci = c->ci;
359 if (ci + 1 == c->ciend) {
360 ptrdiff_t size = ci - c->cibase;
362 if (size > MRB_CALL_LEVEL_MAX) {
363 mrb_exc_raise(mrb, mrb_obj_value(mrb->stack_err));
365 c->cibase = (mrb_callinfo*)mrb_realloc(mrb, c->cibase, sizeof(mrb_callinfo)*size*2);
366 c->ci = c->cibase + size;
367 c->ciend = c->cibase + size * 2;
369 ci = ++c->ci;
370 ci->mid = mid;
371 CI_PROC_SET(ci, proc);
372 ci->blk = blk;
373 ci->stack = ci[-1].stack + push_stacks;
374 ci->n = argc & 0xf;
375 ci->nk = (argc>>4) & 0xf;
376 ci->cci = cci;
377 ci->u.target_class = target_class;
379 return ci;
382 mrb_bool
383 mrb_env_unshare(mrb_state *mrb, struct REnv *e, mrb_bool noraise)
385 if (e == NULL) return TRUE;
386 if (!MRB_ENV_ONSTACK_P(e)) return TRUE;
387 if (e->cxt != mrb->c) return TRUE;
388 if (e == CI_ENV(mrb->c->cibase)) return TRUE; /* for mirb */
390 size_t len = (size_t)MRB_ENV_LEN(e);
391 if (len == 0) {
392 e->stack = NULL;
393 MRB_ENV_CLOSE(e);
394 return TRUE;
397 size_t live = mrb->gc.live;
398 mrb_value *p = (mrb_value*)mrb_malloc_simple(mrb, sizeof(mrb_value)*len);
399 if (live != mrb->gc.live && mrb_object_dead_p(mrb, (struct RBasic*)e)) {
400 // The e object is now subject to GC inside mrb_malloc_simple().
401 // Moreover, if NULL is returned due to mrb_malloc_simple() failure, simply ignore it.
402 mrb_free(mrb, p);
403 return TRUE;
405 else if (p) {
406 stack_copy(p, e->stack, len);
407 e->stack = p;
408 MRB_ENV_CLOSE(e);
409 mrb_write_barrier(mrb, (struct RBasic*)e);
410 return TRUE;
412 else {
413 e->stack = NULL;
414 MRB_ENV_CLOSE(e);
415 MRB_ENV_SET_LEN(e, 0);
416 MRB_ENV_SET_BIDX(e, 0);
417 if (!noraise) {
418 mrb_exc_raise(mrb, mrb_obj_value(mrb->nomem_err));
420 return FALSE;
424 static inline mrb_callinfo*
425 cipop(mrb_state *mrb)
427 struct mrb_context *c = mrb->c;
428 mrb_callinfo *ci = c->ci;
429 struct REnv *env = CI_ENV(ci);
431 ci_env_set(ci, NULL); // make possible to free env by GC if not needed
432 struct RProc *b = ci->blk;
433 if (b && !mrb_object_dead_p(mrb, (struct RBasic*)b) && b->tt == MRB_TT_PROC &&
434 !MRB_PROC_STRICT_P(b) && MRB_PROC_ENV(b) == CI_ENV(&ci[-1])) {
435 b->flags |= MRB_PROC_ORPHAN;
437 if (env && !mrb_env_unshare(mrb, env, TRUE)) {
438 c->ci--; // exceptions are handled at the method caller; see #3087
439 mrb_exc_raise(mrb, mrb_obj_value(mrb->nomem_err));
441 c->ci--;
442 return c->ci;
445 MRB_API mrb_value
446 mrb_protect_error(mrb_state *mrb, mrb_protect_error_func *body, void *userdata, mrb_bool *error)
448 struct mrb_jmpbuf *prev_jmp = mrb->jmp;
449 struct mrb_jmpbuf c_jmp;
450 mrb_value result = mrb_nil_value();
451 int ai = mrb_gc_arena_save(mrb);
452 const struct mrb_context *c = mrb->c;
453 ptrdiff_t ci_index = c->ci - c->cibase;
455 if (error) { *error = FALSE; }
457 MRB_TRY(&c_jmp) {
458 mrb->jmp = &c_jmp;
459 result = body(mrb, userdata);
460 mrb->jmp = prev_jmp;
462 MRB_CATCH(&c_jmp) {
463 mrb->jmp = prev_jmp;
464 result = mrb_obj_value(mrb->exc);
465 mrb->exc = NULL;
466 if (error) { *error = TRUE; }
467 if (mrb->c == c) {
468 while (c->ci - c->cibase > ci_index) {
469 cipop(mrb);
472 else {
473 // It was probably switched by mrb_fiber_resume().
474 // Simply destroy all successive CINFO_DIRECTs once the fiber has been switched.
475 c = mrb->c;
476 while (c->ci > c->cibase && c->ci->cci == CINFO_DIRECT) {
477 cipop(mrb);
481 MRB_END_EXC(&c_jmp);
483 mrb_gc_arena_restore(mrb, ai);
484 mrb_gc_protect(mrb, result);
485 return result;
488 void mrb_exc_set(mrb_state *mrb, mrb_value exc);
489 static mrb_value mrb_run(mrb_state *mrb, const struct RProc* proc, mrb_value self);
491 #ifndef MRB_FUNCALL_ARGC_MAX
492 #define MRB_FUNCALL_ARGC_MAX 16
493 #endif
495 MRB_API mrb_value
496 mrb_funcall(mrb_state *mrb, mrb_value self, const char *name, mrb_int argc, ...)
498 mrb_value argv[MRB_FUNCALL_ARGC_MAX];
499 va_list ap;
500 mrb_sym mid = mrb_intern_cstr(mrb, name);
502 if (argc > MRB_FUNCALL_ARGC_MAX) {
503 mrb_raise(mrb, E_ARGUMENT_ERROR, "Too long arguments. (limit=" MRB_STRINGIZE(MRB_FUNCALL_ARGC_MAX) ")");
506 va_start(ap, argc);
507 for (mrb_int i = 0; i < argc; i++) {
508 argv[i] = va_arg(ap, mrb_value);
510 va_end(ap);
511 return mrb_funcall_argv(mrb, self, mid, argc, argv);
514 MRB_API mrb_value
515 mrb_funcall_id(mrb_state *mrb, mrb_value self, mrb_sym mid, mrb_int argc, ...)
517 mrb_value argv[MRB_FUNCALL_ARGC_MAX];
518 va_list ap;
520 if (argc > MRB_FUNCALL_ARGC_MAX) {
521 mrb_raise(mrb, E_ARGUMENT_ERROR, "Too long arguments. (limit=" MRB_STRINGIZE(MRB_FUNCALL_ARGC_MAX) ")");
524 va_start(ap, argc);
525 for (mrb_int i = 0; i < argc; i++) {
526 argv[i] = va_arg(ap, mrb_value);
528 va_end(ap);
529 return mrb_funcall_argv(mrb, self, mid, argc, argv);
532 static mrb_int
533 mrb_ci_kidx(const mrb_callinfo *ci)
535 if (ci->nk == 0) return -1;
536 return (ci->n == CALL_MAXARGS) ? 2 : ci->n + 1;
539 static inline mrb_int
540 mrb_bidx(uint8_t n, uint8_t k)
542 if (n == 15) n = 1;
543 if (k == 15) n += 1;
544 else n += k*2;
545 return n + 1; /* self + args + kargs */
548 static inline mrb_int
549 ci_bidx(mrb_callinfo *ci)
551 return mrb_bidx(ci->n, ci->nk);
554 mrb_int
555 mrb_ci_bidx(mrb_callinfo *ci)
557 return ci_bidx(ci);
560 mrb_int
561 mrb_ci_nregs(mrb_callinfo *ci)
563 const struct RProc *p;
565 if (!ci) return 4;
566 mrb_int nregs = ci_bidx(ci) + 1; /* self + args + kargs + blk */
567 p = ci->proc;
568 if (p && !MRB_PROC_CFUNC_P(p) && p->body.irep && p->body.irep->nregs > nregs) {
569 return p->body.irep->nregs;
571 return nregs;
574 mrb_value mrb_obj_missing(mrb_state *mrb, mrb_value mod);
576 static mrb_method_t
577 prepare_missing(mrb_state *mrb, mrb_callinfo *ci, mrb_value recv, mrb_sym mid, mrb_value blk, mrb_bool super)
579 mrb_sym missing = MRB_SYM(method_missing);
580 mrb_value *argv = &ci->stack[1];
581 mrb_value args;
582 mrb_method_t m;
584 /* pack positional arguments */
585 if (ci->n == 15) args = argv[0];
586 else args = mrb_ary_new_from_values(mrb, ci->n, argv);
588 if (mrb_func_basic_p(mrb, recv, missing, mrb_obj_missing)) {
589 method_missing:
590 if (super) mrb_no_method_error(mrb, mid, args, "no superclass method '%n'", mid);
591 else mrb_method_missing(mrb, mid, recv, args);
592 /* not reached */
594 if (mid != missing) {
595 ci->u.target_class = mrb_class(mrb, recv);
597 m = mrb_vm_find_method(mrb, ci->u.target_class, &ci->u.target_class, missing);
598 if (MRB_METHOD_UNDEF_P(m)) goto method_missing; /* just in case */
599 stack_extend(mrb, 4);
601 argv = &ci->stack[1]; /* maybe reallocated */
602 argv[0] = args;
603 if (ci->nk == 0) {
604 argv[1] = blk;
606 else {
607 mrb_assert(ci->nk == 15);
608 argv[1] = argv[ci->n];
609 argv[2] = blk;
611 ci->n = CALL_MAXARGS;
612 /* ci->nk is already set to zero or CALL_MAXARGS */
613 mrb_ary_unshift(mrb, args, mrb_symbol_value(mid));
614 ci->mid = missing;
615 return m;
618 static void
619 funcall_args_capture(mrb_state *mrb, int stoff, mrb_int argc, const mrb_value *argv, mrb_value block, mrb_callinfo *ci)
621 if (argc < 0 || argc > INT32_MAX) {
622 mrb_raisef(mrb, E_ARGUMENT_ERROR, "negative or too big argc for funcall (%i)", argc);
625 ci->nk = 0; /* funcall does not support keyword arguments */
626 if (argc < CALL_MAXARGS) {
627 mrb_int extends = stoff + argc + 2 /* self + block */;
628 stack_extend_adjust(mrb, extends, &argv);
630 mrb_value *args = mrb->c->ci->stack + stoff + 1 /* self */;
631 stack_copy(args, argv, argc);
632 args[argc] = block;
633 ci->n = (uint8_t)argc;
635 else {
636 int extends = stoff + 3 /* self + splat + block */;
637 stack_extend_adjust(mrb, extends, &argv);
639 mrb_value *args = mrb->c->ci->stack + stoff + 1 /* self */;
640 args[0] = mrb_ary_new_from_values(mrb, argc, argv);
641 args[1] = block;
642 ci->n = CALL_MAXARGS;
646 static inline mrb_value
647 ensure_block(mrb_state *mrb, mrb_value blk)
649 if (!mrb_nil_p(blk) && !mrb_proc_p(blk)) {
650 blk = mrb_type_convert(mrb, blk, MRB_TT_PROC, MRB_SYM(to_proc));
651 /* The stack might have been reallocated during mrb_type_convert(), see #3622 */
653 return blk;
656 MRB_API mrb_value
657 mrb_funcall_with_block(mrb_state *mrb, mrb_value self, mrb_sym mid, mrb_int argc, const mrb_value *argv, mrb_value blk)
659 mrb_value val;
660 int ai = mrb_gc_arena_save(mrb);
662 if (!mrb->jmp) {
663 struct mrb_jmpbuf c_jmp;
664 ptrdiff_t nth_ci = mrb->c->ci - mrb->c->cibase;
666 MRB_TRY(&c_jmp) {
667 mrb->jmp = &c_jmp;
668 /* recursive call */
669 val = mrb_funcall_with_block(mrb, self, mid, argc, argv, blk);
670 mrb->jmp = NULL;
672 MRB_CATCH(&c_jmp) { /* error */
673 while (nth_ci < (mrb->c->ci - mrb->c->cibase)) {
674 cipop(mrb);
676 mrb->jmp = 0;
677 val = mrb_obj_value(mrb->exc);
679 MRB_END_EXC(&c_jmp);
680 mrb->jmp = NULL;
682 else {
683 mrb_method_t m;
684 mrb_callinfo *ci = mrb->c->ci;
685 mrb_int n = mrb_ci_nregs(ci);
687 if (!mrb->c->stbase) {
688 stack_init(mrb);
690 if (ci - mrb->c->cibase > MRB_CALL_LEVEL_MAX) {
691 mrb_exc_raise(mrb, mrb_obj_value(mrb->stack_err));
693 blk = ensure_block(mrb, blk);
694 ci = cipush(mrb, n, CINFO_DIRECT, NULL, NULL, BLK_PTR(blk), 0, 0);
695 funcall_args_capture(mrb, 0, argc, argv, blk, ci);
696 ci->u.target_class = mrb_class(mrb, self);
697 m = mrb_vm_find_method(mrb, ci->u.target_class, &ci->u.target_class, mid);
698 if (MRB_METHOD_UNDEF_P(m)) {
699 m = prepare_missing(mrb, ci, self, mid, mrb_nil_value(), FALSE);
701 else {
702 ci->mid = mid;
704 ci->proc = MRB_METHOD_PROC_P(m) ? MRB_METHOD_PROC(m) : NULL;
706 if (MRB_METHOD_CFUNC_P(m)) {
707 ci->stack[0] = self;
708 val = MRB_METHOD_CFUNC(m)(mrb, self);
709 cipop(mrb);
711 else {
712 /* handle alias */
713 if (MRB_PROC_ALIAS_P(ci->proc)) {
714 ci->mid = ci->proc->body.mid;
715 ci->proc = ci->proc->upper;
717 ci->cci = CINFO_SKIP;
718 val = mrb_run(mrb, ci->proc, self);
721 mrb_gc_arena_restore(mrb, ai);
722 mrb_gc_protect(mrb, val);
723 return val;
726 MRB_API mrb_value
727 mrb_funcall_argv(mrb_state *mrb, mrb_value self, mrb_sym mid, mrb_int argc, const mrb_value *argv)
729 return mrb_funcall_with_block(mrb, self, mid, argc, argv, mrb_nil_value());
732 static void
733 check_method_noarg(mrb_state *mrb, const mrb_callinfo *ci)
735 mrb_int argc = ci->n == CALL_MAXARGS ? RARRAY_LEN(ci->stack[1]) : ci->n;
736 if (ci->nk > 0) {
737 mrb_value kdict = ci->stack[mrb_ci_kidx(ci)];
738 if (!(mrb_hash_p(kdict) && mrb_hash_empty_p(mrb, kdict))) {
739 argc++;
742 if (argc > 0) {
743 mrb_argnum_error(mrb, argc, 0, 0);
747 static mrb_value
748 exec_irep(mrb_state *mrb, mrb_value self, const struct RProc *p)
750 mrb_callinfo *ci = mrb->c->ci;
751 mrb_int keep, nregs;
753 ci->stack[0] = self;
754 /* handle alias */
755 if (MRB_PROC_ALIAS_P(p)) {
756 ci->mid = p->body.mid;
757 p = p->upper;
759 CI_PROC_SET(ci, p);
760 if (MRB_PROC_CFUNC_P(p)) {
761 if (MRB_PROC_NOARG_P(p) && (ci->n > 0 || ci->nk > 0)) {
762 check_method_noarg(mrb, ci);
764 return MRB_PROC_CFUNC(p)(mrb, self);
766 nregs = p->body.irep->nregs;
767 keep = ci_bidx(ci)+1;
768 if (nregs < keep) {
769 stack_extend(mrb, keep);
771 else {
772 stack_extend(mrb, nregs);
773 stack_clear(ci->stack+keep, nregs-keep);
776 cipush(mrb, 0, 0, NULL, NULL, NULL, 0, 0);
778 return self;
781 mrb_value
782 mrb_exec_irep(mrb_state *mrb, mrb_value self, struct RProc *p)
784 mrb_callinfo *ci = mrb->c->ci;
785 if (ci->cci == CINFO_NONE) {
786 return exec_irep(mrb, self, p);
788 else {
789 mrb_value ret;
790 if (MRB_PROC_CFUNC_P(p)) {
791 if (MRB_PROC_NOARG_P(p) && (ci->n > 0 || ci->nk > 0)) {
792 check_method_noarg(mrb, ci);
794 cipush(mrb, 0, CINFO_DIRECT, CI_TARGET_CLASS(ci), p, NULL, ci->mid, ci->n|(ci->nk<<4));
795 ret = MRB_PROC_CFUNC(p)(mrb, self);
796 cipop(mrb);
798 else {
799 mrb_int keep = ci_bidx(ci) + 1; /* receiver + block */
800 ret = mrb_top_run(mrb, p, self, keep);
802 if (mrb->exc && mrb->jmp) {
803 mrb_exc_raise(mrb, mrb_obj_value(mrb->exc));
805 return ret;
809 /* 15.3.1.3.4 */
810 /* 15.3.1.3.44 */
812 * call-seq:
813 * obj.send(symbol [, args...]) -> obj
814 * obj.__send__(symbol [, args...]) -> obj
816 * Invokes the method identified by _symbol_, passing it any
817 * arguments specified. You can use <code>__send__</code> if the name
818 * +send+ clashes with an existing method in _obj_.
820 * class Klass
821 * def hello(*args)
822 * "Hello " + args.join(' ')
823 * end
824 * end
825 * k = Klass.new
826 * k.send :hello, "gentle", "readers" #=> "Hello gentle readers"
828 mrb_value
829 mrb_f_send(mrb_state *mrb, mrb_value self)
831 mrb_sym name;
832 mrb_value block, *regs;
833 mrb_method_t m;
834 struct RClass *c;
835 mrb_callinfo *ci = mrb->c->ci;
836 int n = ci->n;
838 if (ci->cci > CINFO_NONE) {
839 funcall:;
840 const mrb_value *argv;
841 mrb_int argc;
842 mrb_get_args(mrb, "n*&", &name, &argv, &argc, &block);
843 return mrb_funcall_with_block(mrb, self, name, argc, argv, block);
846 regs = mrb->c->ci->stack+1;
848 if (n == 0) {
849 argnum_error:
850 mrb_argnum_error(mrb, 0, 1, -1);
852 else if (n == 15) {
853 if (RARRAY_LEN(regs[0]) == 0) goto argnum_error;
854 name = mrb_obj_to_sym(mrb, RARRAY_PTR(regs[0])[0]);
856 else {
857 name = mrb_obj_to_sym(mrb, regs[0]);
860 c = mrb_class(mrb, self);
861 m = mrb_vm_find_method(mrb, c, &c, name);
862 if (MRB_METHOD_UNDEF_P(m)) { /* call method_mising */
863 goto funcall;
866 ci->mid = name;
867 ci->u.target_class = c;
868 /* remove first symbol from arguments */
869 if (n == 15) { /* variable length arguments */
870 regs[0] = mrb_ary_subseq(mrb, regs[0], 1, RARRAY_LEN(regs[0]) - 1);
872 else { /* n > 0 */
873 for (int i=0; i<n; i++) {
874 regs[i] = regs[i+1];
876 regs[n] = regs[n+1]; /* copy kdict or block */
877 if (ci->nk > 0) {
878 regs[n+1] = regs[n+2]; /* copy block */
880 ci->n--;
883 const struct RProc *p;
884 if (MRB_METHOD_PROC_P(m)) {
885 p = MRB_METHOD_PROC(m);
886 /* handle alias */
887 if (MRB_PROC_ALIAS_P(p)) {
888 ci->mid = p->body.mid;
889 p = p->upper;
891 CI_PROC_SET(ci, p);
893 if (MRB_METHOD_CFUNC_P(m)) {
894 if (MRB_METHOD_NOARG_P(m) && (ci->n > 0 || ci->nk > 0)) {
895 check_method_noarg(mrb, ci);
897 return MRB_METHOD_CFUNC(m)(mrb, self);
899 return exec_irep(mrb, self, p);
902 static void
903 check_block(mrb_state *mrb, mrb_value blk)
905 if (mrb_nil_p(blk)) {
906 mrb_raise(mrb, E_ARGUMENT_ERROR, "no block given");
908 if (!mrb_proc_p(blk)) {
909 mrb_raise(mrb, E_TYPE_ERROR, "not a block");
913 static mrb_value
914 eval_under(mrb_state *mrb, mrb_value self, mrb_value blk, struct RClass *c)
916 struct RProc *p;
917 mrb_callinfo *ci;
918 int nregs;
920 check_block(mrb, blk);
921 ci = mrb->c->ci;
922 if (ci->cci == CINFO_DIRECT) {
923 return mrb_yield_with_class(mrb, blk, 1, &self, self, c);
925 ci->u.target_class = c;
926 p = mrb_proc_ptr(blk);
927 /* just in case irep is NULL; #6065 */
928 if (p->body.irep == NULL) return mrb_nil_value();
929 CI_PROC_SET(ci, p);
930 ci->n = 1;
931 ci->nk = 0;
932 ci->mid = ci[-1].mid;
933 if (MRB_PROC_CFUNC_P(p)) {
934 stack_extend(mrb, 4);
935 mrb->c->ci->stack[0] = self;
936 mrb->c->ci->stack[1] = self;
937 mrb->c->ci->stack[2] = mrb_nil_value();
938 return MRB_PROC_CFUNC(p)(mrb, self);
940 nregs = p->body.irep->nregs;
941 if (nregs < 4) nregs = 4;
942 stack_extend(mrb, nregs);
943 mrb->c->ci->stack[0] = self;
944 mrb->c->ci->stack[1] = self;
945 stack_clear(mrb->c->ci->stack+2, nregs-2);
946 ci = cipush(mrb, 0, 0, NULL, NULL, NULL, 0, 0);
948 return self;
951 /* 15.2.2.4.35 */
953 * call-seq:
954 * mod.class_eval {| | block } -> obj
955 * mod.module_eval {| | block } -> obj
957 * Evaluates block in the context of _mod_. This can
958 * be used to add methods to a class. <code>module_eval</code> returns
959 * the result of evaluating its argument.
961 mrb_value
962 mrb_mod_module_eval(mrb_state *mrb, mrb_value mod)
964 mrb_value a, b;
966 if (mrb_get_args(mrb, "|S&", &a, &b) == 1) {
967 mrb_raise(mrb, E_NOTIMP_ERROR, "module_eval/class_eval with string not implemented");
969 return eval_under(mrb, mod, b, mrb_class_ptr(mod));
972 /* 15.3.1.3.18 */
974 * call-seq:
975 * obj.instance_eval {| | block } -> obj
977 * Evaluates the given block,within the context of the receiver (_obj_).
978 * In order to set the context, the variable +self+ is set to _obj_ while
979 * the code is executing, giving the code access to _obj_'s
980 * instance variables. In the version of <code>instance_eval</code>
981 * that takes a +String+, the optional second and third
982 * parameters supply a filename and starting line number that are used
983 * when reporting compilation errors.
985 * class KlassWithSecret
986 * def initialize
987 * @secret = 99
988 * end
989 * end
990 * k = KlassWithSecret.new
991 * k.instance_eval { @secret } #=> 99
993 mrb_value
994 mrb_obj_instance_eval(mrb_state *mrb, mrb_value self)
996 mrb_value a, b;
998 if (mrb_get_args(mrb, "|S&", &a, &b) == 1) {
999 mrb_raise(mrb, E_NOTIMP_ERROR, "instance_eval with string not implemented");
1001 return eval_under(mrb, self, b, mrb_singleton_class_ptr(mrb, self));
1004 MRB_API mrb_value
1005 mrb_yield_with_class(mrb_state *mrb, mrb_value b, mrb_int argc, const mrb_value *argv, mrb_value self, struct RClass *c)
1007 struct RProc *p;
1008 mrb_sym mid;
1009 mrb_callinfo *ci;
1010 mrb_value val;
1011 mrb_int n;
1013 check_block(mrb, b);
1014 ci = mrb->c->ci;
1015 n = mrb_ci_nregs(ci);
1016 p = mrb_proc_ptr(b);
1017 if (MRB_PROC_ENV_P(p)) {
1018 mid = p->e.env->mid;
1020 else {
1021 mid = ci->mid;
1023 ci = cipush(mrb, n, CINFO_DIRECT, NULL, NULL, NULL, mid, 0);
1024 funcall_args_capture(mrb, 0, argc, argv, mrb_nil_value(), ci);
1025 ci->u.target_class = c;
1026 ci->proc = p;
1028 if (MRB_PROC_CFUNC_P(p)) {
1029 ci->stack[0] = self;
1030 val = MRB_PROC_CFUNC(p)(mrb, self);
1031 cipop(mrb);
1033 else {
1034 ci->cci = CINFO_SKIP;
1035 val = mrb_run(mrb, p, self);
1037 return val;
1040 MRB_API mrb_value
1041 mrb_yield_argv(mrb_state *mrb, mrb_value b, mrb_int argc, const mrb_value *argv)
1043 struct RProc *p = mrb_proc_ptr(b);
1044 struct RClass *tc;
1045 mrb_value self = mrb_proc_get_self(mrb, p, &tc);
1047 return mrb_yield_with_class(mrb, b, argc, argv, self, tc);
1050 MRB_API mrb_value
1051 mrb_yield(mrb_state *mrb, mrb_value b, mrb_value arg)
1053 struct RProc *p = mrb_proc_ptr(b);
1054 struct RClass *tc;
1055 mrb_value self = mrb_proc_get_self(mrb, p, &tc);
1057 return mrb_yield_with_class(mrb, b, 1, &arg, self, tc);
1060 mrb_value
1061 mrb_yield_cont(mrb_state *mrb, mrb_value b, mrb_value self, mrb_int argc, const mrb_value *argv)
1063 struct RProc *p;
1064 mrb_callinfo *ci;
1066 check_block(mrb, b);
1067 p = mrb_proc_ptr(b);
1068 ci = mrb->c->ci;
1070 stack_extend_adjust(mrb, 4, &argv);
1071 mrb->c->ci->stack[1] = mrb_ary_new_from_values(mrb, argc, argv);
1072 mrb->c->ci->stack[2] = mrb_nil_value();
1073 mrb->c->ci->stack[3] = mrb_nil_value();
1074 ci->n = 15;
1075 ci->nk = 0;
1076 return exec_irep(mrb, self, p);
1079 #define RBREAK_TAG_FOREACH(f) \
1080 f(RBREAK_TAG_BREAK, 0) \
1081 f(RBREAK_TAG_JUMP, 1) \
1082 f(RBREAK_TAG_STOP, 2)
1084 #define RBREAK_TAG_DEFINE(tag, i) tag = i,
1085 enum {
1086 RBREAK_TAG_FOREACH(RBREAK_TAG_DEFINE)
1088 #undef RBREAK_TAG_DEFINE
1090 #define RBREAK_TAG_BIT 3
1091 #define RBREAK_TAG_BIT_OFF 8
1092 #define RBREAK_TAG_MASK (~(~UINT32_C(0) << RBREAK_TAG_BIT))
1094 static inline uint32_t
1095 mrb_break_tag_get(struct RBreak *brk)
1097 return (brk->flags >> RBREAK_TAG_BIT_OFF) & RBREAK_TAG_MASK;
1100 static inline void
1101 mrb_break_tag_set(struct RBreak *brk, uint32_t tag)
1103 brk->flags &= ~(RBREAK_TAG_MASK << RBREAK_TAG_BIT_OFF);
1104 brk->flags |= (tag & RBREAK_TAG_MASK) << RBREAK_TAG_BIT_OFF;
1107 static struct RBreak*
1108 break_new(mrb_state *mrb, uint32_t tag, const mrb_callinfo *return_ci, mrb_value val)
1110 mrb_assert((size_t)(return_ci - mrb->c->cibase) <= (size_t)(mrb->c->ci - mrb->c->cibase));
1112 struct RBreak *brk = MRB_OBJ_ALLOC(mrb, MRB_TT_BREAK, NULL);
1113 brk->ci_break_index = return_ci - mrb->c->cibase;
1114 mrb_break_value_set(brk, val);
1115 mrb_break_tag_set(brk, tag);
1117 return brk;
1120 #define MRB_CATCH_FILTER_RESCUE (UINT32_C(1) << MRB_CATCH_RESCUE)
1121 #define MRB_CATCH_FILTER_ENSURE (UINT32_C(1) << MRB_CATCH_ENSURE)
1122 #define MRB_CATCH_FILTER_ALL (MRB_CATCH_FILTER_RESCUE | MRB_CATCH_FILTER_ENSURE)
1124 static const struct mrb_irep_catch_handler *
1125 catch_handler_find(const mrb_irep *irep, const mrb_code *pc, uint32_t filter)
1127 ptrdiff_t xpc;
1128 size_t cnt;
1129 const struct mrb_irep_catch_handler *e;
1131 /* The comparison operators use `>` and `<=` because pc already points to the next instruction */
1132 #define catch_cover_p(pc, beg, end) ((pc) > (ptrdiff_t)(beg) && (pc) <= (ptrdiff_t)(end))
1134 mrb_assert(irep && irep->clen > 0);
1135 xpc = pc - irep->iseq;
1136 /* If it retry at the top level, pc will be 0, so check with -1 as the start position */
1137 mrb_assert(catch_cover_p(xpc, -1, irep->ilen));
1138 if (!catch_cover_p(xpc, -1, irep->ilen)) return NULL;
1140 /* Currently uses a simple linear search to avoid processing complexity. */
1141 cnt = irep->clen;
1142 e = mrb_irep_catch_handler_table(irep) + cnt - 1;
1143 for (; cnt > 0; cnt--, e--) {
1144 if (((UINT32_C(1) << e->type) & filter) &&
1145 catch_cover_p(xpc, mrb_irep_catch_handler_unpack(e->begin), mrb_irep_catch_handler_unpack(e->end))) {
1146 return e;
1150 #undef catch_cover_p
1152 return NULL;
1155 typedef enum {
1156 LOCALJUMP_ERROR_RETURN = 0,
1157 LOCALJUMP_ERROR_BREAK = 1,
1158 LOCALJUMP_ERROR_YIELD = 2
1159 } localjump_error_kind;
1161 static void
1162 localjump_error(mrb_state *mrb, localjump_error_kind kind)
1164 char kind_str[3][7] = { "return", "break", "yield" };
1165 char kind_str_len[] = { 6, 5, 5 };
1166 static const char lead[] = "unexpected ";
1167 mrb_value msg;
1168 mrb_value exc;
1170 msg = mrb_str_new_capa(mrb, sizeof(lead) + 7);
1171 mrb_str_cat(mrb, msg, lead, sizeof(lead) - 1);
1172 mrb_str_cat(mrb, msg, kind_str[kind], kind_str_len[kind]);
1173 exc = mrb_exc_new_str(mrb, E_LOCALJUMP_ERROR, msg);
1174 mrb_exc_set(mrb, exc);
1177 #define RAISE_EXC(mrb, exc) do { \
1178 mrb_value exc_value = (exc); \
1179 mrb_exc_set(mrb, exc_value); \
1180 goto L_RAISE; \
1181 } while (0)
1183 #define RAISE_LIT(mrb, c, str) RAISE_EXC(mrb, mrb_exc_new_lit(mrb, c, str))
1184 #define RAISE_FORMAT(mrb, c, fmt, ...) RAISE_EXC(mrb, mrb_exc_new_str(mrb, c, mrb_format(mrb, fmt, __VA_ARGS__)))
1186 static void
1187 argnum_error(mrb_state *mrb, mrb_int num)
1189 mrb_value exc;
1190 mrb_value str;
1191 mrb_int argc = mrb->c->ci->n;
1193 if (argc == 15) {
1194 mrb_value args = mrb->c->ci->stack[1];
1195 if (mrb_array_p(args)) {
1196 argc = RARRAY_LEN(args);
1199 if (argc == 0 && mrb->c->ci->nk != 0 && !mrb_hash_empty_p(mrb, mrb->c->ci->stack[1])) {
1200 argc++;
1202 str = mrb_format(mrb, "wrong number of arguments (given %i, expected %i)", argc, num);
1203 exc = mrb_exc_new_str(mrb, E_ARGUMENT_ERROR, str);
1204 mrb_exc_set(mrb, exc);
1207 static mrb_bool
1208 break_tag_p(struct RBreak *brk, uint32_t tag)
1210 return (brk != NULL && brk->tt == MRB_TT_BREAK) ? TRUE : FALSE;
1213 static void
1214 prepare_tagged_break(mrb_state *mrb, uint32_t tag, const mrb_callinfo *return_ci, mrb_value val)
1216 if (break_tag_p((struct RBreak*)mrb->exc, tag)) {
1217 mrb_break_tag_set((struct RBreak*)mrb->exc, tag);
1219 else {
1220 mrb->exc = (struct RObject*)break_new(mrb, tag, return_ci, val);
1224 #define THROW_TAGGED_BREAK(mrb, tag, return_ci, val) \
1225 do { \
1226 prepare_tagged_break(mrb, tag, return_ci, val); \
1227 goto L_CATCH_TAGGED_BREAK; \
1228 } while (0)
1230 #define UNWIND_ENSURE(mrb, ci, pc, tag, return_ci, val) \
1231 do { \
1232 if ((proc = (ci)->proc) && !MRB_PROC_CFUNC_P(proc) && (irep = proc->body.irep) && irep->clen > 0 && \
1233 (ch = catch_handler_find(irep, pc, MRB_CATCH_FILTER_ENSURE))) { \
1234 THROW_TAGGED_BREAK(mrb, tag, return_ci, val); \
1236 } while (0)
1239 * CHECKPOINT_RESTORE(tag) {
1240 * This part is executed when jumping by the same "tag" of RBreak (it is not executed the first time).
1241 * Write the code required (initialization of variables, etc.) for the subsequent processing.
1243 * CHECKPOINT_MAIN(tag) {
1244 * This part is always executed.
1246 * CHECKPOINT_END(tag);
1248 * ...
1250 * // Jump to CHECKPOINT_RESTORE with the same "tag".
1251 * goto CHECKPOINT_LABEL_MAKE(tag);
1254 #define CHECKPOINT_LABEL_MAKE(tag) L_CHECKPOINT_ ## tag
1256 #define CHECKPOINT_RESTORE(tag) \
1257 do { \
1258 if (FALSE) { \
1259 CHECKPOINT_LABEL_MAKE(tag): \
1260 do {
1262 #define CHECKPOINT_MAIN(tag) \
1263 } while (0); \
1265 do {
1267 #define CHECKPOINT_END(tag) \
1268 } while (0); \
1269 } while (0)
1271 #ifdef MRB_USE_DEBUG_HOOK
1272 #define CODE_FETCH_HOOK(mrb, irep, pc, regs) if ((mrb)->code_fetch_hook) (mrb)->code_fetch_hook((mrb), (irep), (pc), (regs));
1273 #else
1274 #define CODE_FETCH_HOOK(mrb, irep, pc, regs)
1275 #endif
1277 #ifdef MRB_BYTECODE_DECODE_OPTION
1278 #define BYTECODE_DECODER(x) ((mrb)->bytecode_decoder)?(mrb)->bytecode_decoder((mrb), (x)):(x)
1279 #else
1280 #define BYTECODE_DECODER(x) (x)
1281 #endif
1283 #ifndef MRB_USE_VM_SWITCH_DISPATCH
1284 #if !defined __GNUC__ && !defined __clang__ && !defined __INTEL_COMPILER
1285 #define MRB_USE_VM_SWITCH_DISPATCH
1286 #endif
1287 #endif /* ifndef MRB_USE_VM_SWITCH_DISPATCH */
1289 #ifdef MRB_USE_VM_SWITCH_DISPATCH
1291 #define INIT_DISPATCH for (;;) { insn = BYTECODE_DECODER(*pc); CODE_FETCH_HOOK(mrb, irep, pc, regs); switch (insn) {
1292 #define CASE(insn,ops) case insn: pc++; FETCH_ ## ops (); mrb->c->ci->pc = pc; L_ ## insn ## _BODY:
1293 #define NEXT goto L_END_DISPATCH
1294 #define JUMP NEXT
1295 #define END_DISPATCH L_END_DISPATCH:;}}
1297 #else
1299 #define INIT_DISPATCH JUMP; return mrb_nil_value();
1300 #define CASE(insn,ops) L_ ## insn: pc++; FETCH_ ## ops (); mrb->c->ci->pc = pc; L_ ## insn ## _BODY:
1301 #define NEXT insn=BYTECODE_DECODER(*pc); CODE_FETCH_HOOK(mrb, irep, pc, regs); goto *optable[insn]
1302 #define JUMP NEXT
1304 #define END_DISPATCH
1306 #endif
1308 MRB_API mrb_value
1309 mrb_vm_run(mrb_state *mrb, const struct RProc *proc, mrb_value self, mrb_int stack_keep)
1311 const mrb_irep *irep = proc->body.irep;
1312 mrb_value result;
1313 struct mrb_context *c = mrb->c;
1314 ptrdiff_t cioff = c->ci - c->cibase;
1315 mrb_int nregs = irep->nregs;
1317 if (!c->stbase) {
1318 stack_init(mrb);
1320 if (stack_keep > nregs)
1321 nregs = stack_keep;
1322 else {
1323 struct REnv *e = CI_ENV(mrb->c->ci);
1324 if (e && (stack_keep == 0 || irep->nlocals < MRB_ENV_LEN(e))) {
1325 ci_env_set(mrb->c->ci, NULL);
1326 mrb_env_unshare(mrb, e, FALSE);
1329 stack_extend(mrb, nregs);
1330 stack_clear(c->ci->stack + stack_keep, nregs - stack_keep);
1331 c->ci->stack[0] = self;
1332 result = mrb_vm_exec(mrb, proc, irep->iseq);
1333 if (mrb->c != c) {
1334 if (mrb->c->fib) {
1335 mrb_write_barrier(mrb, (struct RBasic*)mrb->c->fib);
1337 mrb->c = c;
1339 else if (c->ci - c->cibase > cioff) {
1340 c->ci = c->cibase + cioff;
1342 return result;
1345 static struct RClass*
1346 check_target_class(mrb_state *mrb)
1348 struct RClass *target = CI_TARGET_CLASS(mrb->c->ci);
1349 if (!target) {
1350 mrb_raise(mrb, E_TYPE_ERROR, "no class/module to add method");
1352 return target;
1355 #define regs (mrb->c->ci->stack)
1357 static mrb_value
1358 hash_new_from_regs(mrb_state *mrb, mrb_int argc, mrb_int idx)
1360 mrb_value hash = mrb_hash_new_capa(mrb, argc);
1361 while (argc--) {
1362 mrb_hash_set(mrb, hash, regs[idx+0], regs[idx+1]);
1363 idx += 2;
1365 return hash;
1368 #define ary_new_from_regs(mrb, argc, idx) mrb_ary_new_from_values(mrb, (argc), &regs[idx]);
1370 MRB_API mrb_value
1371 mrb_vm_exec(mrb_state *mrb, const struct RProc *proc, const mrb_code *pc)
1373 /* mrb_assert(MRB_PROC_CFUNC_P(proc)) */
1374 const mrb_irep *irep = proc->body.irep;
1375 const mrb_pool_value *pool = irep->pool;
1376 const mrb_sym *syms = irep->syms;
1377 mrb_code insn;
1378 int ai = mrb_gc_arena_save(mrb);
1379 struct mrb_jmpbuf *prev_jmp = mrb->jmp;
1380 struct mrb_jmpbuf c_jmp;
1381 uint32_t a;
1382 uint16_t b;
1383 uint16_t c;
1384 mrb_sym mid;
1385 const struct mrb_irep_catch_handler *ch;
1387 #ifndef MRB_USE_VM_SWITCH_DISPATCH
1388 static const void * const optable[] = {
1389 #define OPCODE(x,_) &&L_OP_ ## x,
1390 #include "mruby/ops.h"
1391 #undef OPCODE
1393 #endif
1395 mrb_bool exc_catched = FALSE;
1396 RETRY_TRY_BLOCK:
1398 MRB_TRY(&c_jmp) {
1400 if (exc_catched) {
1401 exc_catched = FALSE;
1402 mrb_gc_arena_restore(mrb, ai);
1403 if (mrb->exc && mrb->exc->tt == MRB_TT_BREAK)
1404 goto L_BREAK;
1405 goto L_RAISE;
1407 mrb->jmp = &c_jmp;
1408 CI_PROC_SET(mrb->c->ci, proc);
1410 INIT_DISPATCH {
1411 CASE(OP_NOP, Z) {
1412 /* do nothing */
1413 NEXT;
1416 CASE(OP_MOVE, BB) {
1417 regs[a] = regs[b];
1418 NEXT;
1421 CASE(OP_LOADL, BB) {
1422 switch (pool[b].tt) { /* number */
1423 case IREP_TT_INT32:
1424 regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i32);
1425 break;
1426 case IREP_TT_INT64:
1427 #if defined(MRB_INT64)
1428 regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i64);
1429 break;
1430 #else
1431 #if defined(MRB_64BIT)
1432 if (INT32_MIN <= pool[b].u.i64 && pool[b].u.i64 <= INT32_MAX) {
1433 regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i64);
1434 break;
1436 #endif
1437 goto L_INT_OVERFLOW;
1438 #endif
1439 case IREP_TT_BIGINT:
1440 #ifdef MRB_USE_BIGINT
1442 const char *s = pool[b].u.str;
1443 regs[a] = mrb_bint_new_str(mrb, s+2, (uint8_t)s[0], s[1]);
1445 break;
1446 #else
1447 goto L_INT_OVERFLOW;
1448 #endif
1449 #ifndef MRB_NO_FLOAT
1450 case IREP_TT_FLOAT:
1451 regs[a] = mrb_float_value(mrb, pool[b].u.f);
1452 break;
1453 #endif
1454 default:
1455 /* should not happen (tt:string) */
1456 regs[a] = mrb_nil_value();
1457 break;
1459 NEXT;
1462 CASE(OP_LOADI, BB) {
1463 SET_FIXNUM_VALUE(regs[a], b);
1464 NEXT;
1467 CASE(OP_LOADINEG, BB) {
1468 SET_FIXNUM_VALUE(regs[a], -b);
1469 NEXT;
1472 CASE(OP_LOADI__1,B) goto L_LOADI;
1473 CASE(OP_LOADI_0,B) goto L_LOADI;
1474 CASE(OP_LOADI_1,B) goto L_LOADI;
1475 CASE(OP_LOADI_2,B) goto L_LOADI;
1476 CASE(OP_LOADI_3,B) goto L_LOADI;
1477 CASE(OP_LOADI_4,B) goto L_LOADI;
1478 CASE(OP_LOADI_5,B) goto L_LOADI;
1479 CASE(OP_LOADI_6,B) goto L_LOADI;
1480 CASE(OP_LOADI_7, B) {
1481 L_LOADI:
1482 SET_FIXNUM_VALUE(regs[a], (mrb_int)insn - (mrb_int)OP_LOADI_0);
1483 NEXT;
1486 CASE(OP_LOADI16, BS) {
1487 SET_FIXNUM_VALUE(regs[a], (mrb_int)(int16_t)b);
1488 NEXT;
1491 CASE(OP_LOADI32, BSS) {
1492 SET_INT_VALUE(mrb, regs[a], (int32_t)(((uint32_t)b<<16)+c));
1493 NEXT;
1496 CASE(OP_LOADSYM, BB) {
1497 SET_SYM_VALUE(regs[a], syms[b]);
1498 NEXT;
1501 CASE(OP_LOADNIL, B) {
1502 SET_NIL_VALUE(regs[a]);
1503 NEXT;
1506 CASE(OP_LOADSELF, B) {
1507 regs[a] = regs[0];
1508 NEXT;
1511 CASE(OP_LOADT, B) {
1512 SET_TRUE_VALUE(regs[a]);
1513 NEXT;
1516 CASE(OP_LOADF, B) {
1517 SET_FALSE_VALUE(regs[a]);
1518 NEXT;
1521 CASE(OP_GETGV, BB) {
1522 mrb_value val = mrb_gv_get(mrb, syms[b]);
1523 regs[a] = val;
1524 NEXT;
1527 CASE(OP_SETGV, BB) {
1528 mrb_gv_set(mrb, syms[b], regs[a]);
1529 NEXT;
1532 CASE(OP_GETSV, BB) {
1533 mrb_value val = mrb_vm_special_get(mrb, syms[b]);
1534 regs[a] = val;
1535 NEXT;
1538 CASE(OP_SETSV, BB) {
1539 mrb_vm_special_set(mrb, syms[b], regs[a]);
1540 NEXT;
1543 CASE(OP_GETIV, BB) {
1544 regs[a] = mrb_iv_get(mrb, regs[0], syms[b]);
1545 NEXT;
1548 CASE(OP_SETIV, BB) {
1549 mrb_iv_set(mrb, regs[0], syms[b], regs[a]);
1550 NEXT;
1553 CASE(OP_GETCV, BB) {
1554 mrb_value val;
1555 val = mrb_vm_cv_get(mrb, syms[b]);
1556 regs[a] = val;
1557 NEXT;
1560 CASE(OP_SETCV, BB) {
1561 mrb_vm_cv_set(mrb, syms[b], regs[a]);
1562 NEXT;
1565 CASE(OP_GETIDX, B) {
1566 mrb_value va = regs[a], vb = regs[a+1];
1567 switch (mrb_type(va)) {
1568 case MRB_TT_ARRAY:
1569 if (!mrb_integer_p(vb)) goto getidx_fallback;
1570 else {
1571 mrb_int idx = mrb_integer(vb);
1572 if (0 <= idx && idx < RARRAY_LEN(va)) {
1573 regs[a] = RARRAY_PTR(va)[idx];
1575 else {
1576 regs[a] = mrb_ary_entry(va, idx);
1579 break;
1580 case MRB_TT_HASH:
1581 va = mrb_hash_get(mrb, va, vb);
1582 regs[a] = va;
1583 break;
1584 case MRB_TT_STRING:
1585 switch (mrb_type(vb)) {
1586 case MRB_TT_INTEGER:
1587 case MRB_TT_STRING:
1588 case MRB_TT_RANGE:
1589 va = mrb_str_aref(mrb, va, vb, mrb_undef_value());
1590 regs[a] = va;
1591 break;
1592 default:
1593 goto getidx_fallback;
1595 break;
1596 default:
1597 getidx_fallback:
1598 mid = MRB_OPSYM(aref);
1599 goto L_SEND_SYM;
1601 NEXT;
1604 CASE(OP_SETIDX, B) {
1605 c = 2;
1606 mid = MRB_OPSYM(aset);
1607 SET_NIL_VALUE(regs[a+3]);
1608 goto L_SENDB_SYM;
1611 CASE(OP_GETCONST, BB) {
1612 mrb_value v = mrb_vm_const_get(mrb, syms[b]);
1613 regs[a] = v;
1614 NEXT;
1617 CASE(OP_SETCONST, BB) {
1618 mrb_vm_const_set(mrb, syms[b], regs[a]);
1619 NEXT;
1622 CASE(OP_GETMCNST, BB) {
1623 mrb_value v = mrb_const_get(mrb, regs[a], syms[b]);
1624 regs[a] = v;
1625 NEXT;
1628 CASE(OP_SETMCNST, BB) {
1629 mrb_const_set(mrb, regs[a+1], syms[b], regs[a]);
1630 NEXT;
1633 CASE(OP_GETUPVAR, BBB) {
1634 struct REnv *e = uvenv(mrb, c);
1636 if (e && b < MRB_ENV_LEN(e)) {
1637 regs[a] = e->stack[b];
1639 else {
1640 regs[a] = mrb_nil_value();
1642 NEXT;
1645 CASE(OP_SETUPVAR, BBB) {
1646 struct REnv *e = uvenv(mrb, c);
1648 if (e) {
1649 if (b < MRB_ENV_LEN(e)) {
1650 e->stack[b] = regs[a];
1651 mrb_write_barrier(mrb, (struct RBasic*)e);
1654 NEXT;
1657 CASE(OP_JMP, S) {
1658 pc += (int16_t)a;
1659 JUMP;
1661 CASE(OP_JMPIF, BS) {
1662 if (mrb_test(regs[a])) {
1663 pc += (int16_t)b;
1664 JUMP;
1666 NEXT;
1668 CASE(OP_JMPNOT, BS) {
1669 if (!mrb_test(regs[a])) {
1670 pc += (int16_t)b;
1671 JUMP;
1673 NEXT;
1675 CASE(OP_JMPNIL, BS) {
1676 if (mrb_nil_p(regs[a])) {
1677 pc += (int16_t)b;
1678 JUMP;
1680 NEXT;
1683 CASE(OP_JMPUW, S) {
1684 a = (uint32_t)((pc - irep->iseq) + (int16_t)a);
1685 CHECKPOINT_RESTORE(RBREAK_TAG_JUMP) {
1686 struct RBreak *brk = (struct RBreak*)mrb->exc;
1687 mrb_value target = mrb_break_value_get(brk);
1688 mrb_assert(mrb_integer_p(target));
1689 a = (uint32_t)mrb_integer(target);
1690 mrb_assert(a >= 0 && a < irep->ilen);
1692 CHECKPOINT_MAIN(RBREAK_TAG_JUMP) {
1693 if (irep->clen > 0 &&
1694 (ch = catch_handler_find(irep, pc, MRB_CATCH_FILTER_ENSURE))) {
1695 /* avoiding a jump from a catch handler into the same handler */
1696 if (a < mrb_irep_catch_handler_unpack(ch->begin) || a >= mrb_irep_catch_handler_unpack(ch->end)) {
1697 THROW_TAGGED_BREAK(mrb, RBREAK_TAG_JUMP, mrb->c->ci, mrb_fixnum_value(a));
1701 CHECKPOINT_END(RBREAK_TAG_JUMP);
1703 mrb->exc = NULL; /* clear break object */
1704 pc = irep->iseq + a;
1705 JUMP;
1708 CASE(OP_EXCEPT, B) {
1709 mrb_value exc;
1711 if (mrb->exc == NULL) {
1712 exc = mrb_nil_value();
1714 else {
1715 switch (mrb->exc->tt) {
1716 case MRB_TT_BREAK:
1717 case MRB_TT_EXCEPTION:
1718 exc = mrb_obj_value(mrb->exc);
1719 break;
1720 default:
1721 mrb_assert(!"bad mrb_type");
1722 exc = mrb_nil_value();
1723 break;
1725 mrb->exc = NULL;
1727 regs[a] = exc;
1728 NEXT;
1730 CASE(OP_RESCUE, BB) {
1731 mrb_value exc = regs[a]; /* exc on stack */
1732 mrb_value e = regs[b];
1733 struct RClass *ec;
1735 switch (mrb_type(e)) {
1736 case MRB_TT_CLASS:
1737 case MRB_TT_MODULE:
1738 break;
1739 default:
1740 RAISE_LIT(mrb, E_TYPE_ERROR, "class or module required for rescue clause");
1742 ec = mrb_class_ptr(e);
1743 regs[b] = mrb_bool_value(mrb_obj_is_kind_of(mrb, exc, ec));
1744 NEXT;
1747 CASE(OP_RAISEIF, B) {
1748 mrb_value exc;
1749 exc = regs[a];
1750 if (mrb_nil_p(exc)) {
1751 mrb->exc = NULL;
1753 else if (mrb_break_p(exc)) {
1754 struct RBreak *brk;
1755 mrb->exc = mrb_obj_ptr(exc);
1756 L_BREAK:
1757 brk = (struct RBreak*)mrb->exc;
1758 switch (mrb_break_tag_get(brk)) {
1759 #define DISPATCH_CHECKPOINTS(n, i) case n: goto CHECKPOINT_LABEL_MAKE(n);
1760 RBREAK_TAG_FOREACH(DISPATCH_CHECKPOINTS)
1761 #undef DISPATCH_CHECKPOINTS
1762 default:
1763 mrb_assert(!"wrong break tag");
1766 else {
1767 mrb_callinfo *ci;
1768 mrb_exc_set(mrb, exc);
1769 L_RAISE:
1770 ci = mrb->c->ci;
1771 while (!(proc = ci->proc) || MRB_PROC_CFUNC_P(ci->proc) || !(irep = proc->body.irep) || irep->clen < 1 ||
1772 (ch = catch_handler_find(irep, ci->pc, MRB_CATCH_FILTER_ALL)) == NULL) {
1773 if (ci != mrb->c->cibase) {
1774 ci = cipop(mrb);
1775 if (ci[1].cci == CINFO_SKIP) {
1776 mrb_assert(prev_jmp != NULL);
1777 mrb->jmp = prev_jmp;
1778 MRB_THROW(prev_jmp);
1781 else if (mrb->c == mrb->root_c) {
1782 mrb->c->ci->stack = mrb->c->stbase;
1783 mrb->jmp = prev_jmp;
1784 return mrb_obj_value(mrb->exc);
1786 else {
1787 struct mrb_context *c = mrb->c;
1789 c->status = MRB_FIBER_TERMINATED;
1790 mrb->c = c->prev;
1791 if (!mrb->c) mrb->c = mrb->root_c;
1792 else c->prev = NULL;
1793 if (!c->vmexec) goto L_RAISE;
1794 mrb->jmp = prev_jmp;
1795 if (!prev_jmp) return mrb_obj_value(mrb->exc);
1796 MRB_THROW(prev_jmp);
1800 if (FALSE) {
1801 L_CATCH_TAGGED_BREAK: /* from THROW_TAGGED_BREAK() or UNWIND_ENSURE() */
1802 ci = mrb->c->ci;
1804 proc = ci->proc;
1805 irep = proc->body.irep;
1806 pool = irep->pool;
1807 syms = irep->syms;
1808 stack_extend(mrb, irep->nregs);
1809 pc = irep->iseq + mrb_irep_catch_handler_unpack(ch->target);
1811 NEXT;
1814 CASE(OP_SSEND, BBB) {
1815 regs[a] = regs[0];
1816 insn = OP_SEND;
1818 goto L_SENDB;
1820 CASE(OP_SSENDB, BBB) {
1821 regs[a] = regs[0];
1823 goto L_SENDB;
1825 CASE(OP_SEND, BBB)
1826 goto L_SENDB;
1828 L_SEND_SYM:
1829 c = 1;
1830 /* push nil after arguments */
1831 SET_NIL_VALUE(regs[a+2]);
1832 goto L_SENDB_SYM;
1834 CASE(OP_SENDB, BBB)
1835 L_SENDB:
1836 mid = syms[b];
1837 L_SENDB_SYM:
1839 mrb_callinfo *ci;
1840 mrb_method_t m;
1841 mrb_value recv, blk;
1842 int n = c&0xf;
1843 int nk = (c>>4)&0xf;
1844 mrb_int bidx = a + mrb_bidx(n,nk);
1845 mrb_int new_bidx = bidx;
1847 if (nk == CALL_MAXARGS) {
1848 mrb_ensure_hash_type(mrb, regs[a+(n==CALL_MAXARGS?1:n)+1]);
1850 else if (nk > 0) { /* pack keyword arguments */
1851 mrb_int kidx = a+(n==CALL_MAXARGS?1:n)+1;
1852 mrb_value kdict = hash_new_from_regs(mrb, nk, kidx);
1853 regs[kidx] = kdict;
1854 nk = CALL_MAXARGS;
1855 c = n | (nk<<4);
1856 new_bidx = a+mrb_bidx(n, nk);
1859 mrb_assert(bidx < irep->nregs);
1860 if (insn == OP_SEND) {
1861 /* clear block argument */
1862 SET_NIL_VALUE(regs[new_bidx]);
1863 SET_NIL_VALUE(blk);
1865 else {
1866 blk = ensure_block(mrb, regs[bidx]);
1867 regs[new_bidx] = blk;
1870 ci = cipush(mrb, a, CINFO_DIRECT, NULL, NULL, BLK_PTR(blk), 0, c);
1871 recv = regs[0];
1872 ci->u.target_class = (insn == OP_SUPER) ? CI_TARGET_CLASS(ci - 1)->super : mrb_class(mrb, recv);
1873 m = mrb_vm_find_method(mrb, ci->u.target_class, &ci->u.target_class, mid);
1874 if (MRB_METHOD_UNDEF_P(m)) {
1875 m = prepare_missing(mrb, ci, recv, mid, blk, (insn == OP_SUPER));
1877 else {
1878 ci->mid = mid;
1880 ci->cci = CINFO_NONE;
1882 if (MRB_METHOD_PROC_P(m)) {
1883 const struct RProc *p = MRB_METHOD_PROC(m);
1884 /* handle alias */
1885 if (MRB_PROC_ALIAS_P(p)) {
1886 ci->mid = p->body.mid;
1887 p = p->upper;
1889 CI_PROC_SET(ci, p);
1890 if (!MRB_PROC_CFUNC_P(p)) {
1891 /* setup environment for calling method */
1892 proc = p;
1893 irep = proc->body.irep;
1894 pool = irep->pool;
1895 syms = irep->syms;
1896 stack_extend(mrb, (irep->nregs < 4) ? 4 : irep->nregs);
1897 pc = irep->iseq;
1898 JUMP;
1900 else {
1901 if (MRB_PROC_NOARG_P(p) && (ci->n > 0 || ci->nk > 0)) {
1902 check_method_noarg(mrb, ci);
1904 recv = MRB_PROC_CFUNC(p)(mrb, recv);
1907 else {
1908 if (MRB_METHOD_NOARG_P(m) && (ci->n > 0 || ci->nk > 0)) {
1909 check_method_noarg(mrb, ci);
1911 recv = MRB_METHOD_FUNC(m)(mrb, recv);
1914 /* cfunc epilogue */
1915 mrb_assert(mrb->c->ci > mrb->c->cibase);
1916 mrb_gc_arena_shrink(mrb, ai);
1917 if (mrb->exc) goto L_RAISE;
1918 ci = mrb->c->ci;
1919 if (!ci->u.keep_context) { /* return from context modifying method (resume/yield) */
1920 if (ci->cci == CINFO_RESUMED) {
1921 mrb->jmp = prev_jmp;
1922 return recv;
1924 else {
1925 mrb_assert(!MRB_PROC_CFUNC_P(ci[-1].proc));
1926 proc = ci[-1].proc;
1927 irep = proc->body.irep;
1928 pool = irep->pool;
1929 syms = irep->syms;
1932 ci->stack[0] = recv;
1933 /* pop stackpos */
1934 ci = cipop(mrb);
1935 pc = ci->pc;
1936 JUMP;
1939 CASE(OP_CALL, Z) {
1940 mrb_callinfo *ci = mrb->c->ci;
1941 mrb_value recv = ci->stack[0];
1942 const struct RProc *p = mrb_proc_ptr(recv);
1944 /* handle alias */
1945 if (MRB_PROC_ALIAS_P(p)) {
1946 ci->mid = p->body.mid;
1947 p = p->upper;
1949 else if (MRB_PROC_ENV_P(p)) {
1950 ci->mid = MRB_PROC_ENV(p)->mid;
1952 /* replace callinfo */
1953 ci->u.target_class = MRB_PROC_TARGET_CLASS(p);
1954 CI_PROC_SET(ci, p);
1956 /* prepare stack */
1957 if (MRB_PROC_CFUNC_P(p)) {
1958 recv = MRB_PROC_CFUNC(p)(mrb, recv);
1959 mrb_gc_arena_shrink(mrb, ai);
1960 if (mrb->exc) goto L_RAISE;
1961 /* pop stackpos */
1962 ci = cipop(mrb);
1963 pc = ci->pc;
1964 ci[1].stack[0] = recv;
1965 irep = mrb->c->ci->proc->body.irep;
1967 else {
1968 /* setup environment for calling method */
1969 proc = p;
1970 irep = p->body.irep;
1971 if (!irep) {
1972 mrb->c->ci->stack[0] = mrb_nil_value();
1973 a = 0;
1974 goto L_OP_RETURN_BODY;
1976 mrb_int nargs = ci_bidx(ci)+1;
1977 if (nargs < irep->nregs) {
1978 stack_extend(mrb, irep->nregs);
1979 stack_clear(regs+nargs, irep->nregs-nargs);
1981 if (MRB_PROC_ENV_P(p)) {
1982 regs[0] = MRB_PROC_ENV(p)->stack[0];
1984 pc = irep->iseq;
1986 pool = irep->pool;
1987 syms = irep->syms;
1988 JUMP;
1991 CASE(OP_SUPER, BB) {
1992 mrb_callinfo *ci = mrb->c->ci;
1993 mrb_value recv;
1994 struct RClass* target_class = CI_TARGET_CLASS(ci);
1996 mid = ci->mid;
1997 if (mid == 0 || !target_class) {
1998 RAISE_LIT(mrb, E_NOMETHOD_ERROR, "super called outside of method");
2000 if ((target_class->flags & MRB_FL_CLASS_IS_PREPENDED) || target_class->tt == MRB_TT_MODULE) {
2001 goto super_typeerror;
2003 recv = regs[0];
2004 if (!mrb_obj_is_kind_of(mrb, recv, target_class)) {
2005 super_typeerror:
2006 RAISE_LIT(mrb, E_TYPE_ERROR, "self has wrong type to call super in this context");
2009 c = b; // arg info
2010 regs[a] = recv;
2011 goto L_SENDB_SYM;
2014 CASE(OP_ARGARY, BS) {
2015 mrb_int m1 = (b>>11)&0x3f;
2016 mrb_int r = (b>>10)&0x1;
2017 mrb_int m2 = (b>>5)&0x1f;
2018 mrb_int kd = (b>>4)&0x1;
2019 mrb_int lv = (b>>0)&0xf;
2020 mrb_value *stack;
2022 if (mrb->c->ci->mid == 0 || CI_TARGET_CLASS(mrb->c->ci) == NULL) {
2023 L_NOSUPER:
2024 RAISE_LIT(mrb, E_NOMETHOD_ERROR, "super called outside of method");
2026 if (lv == 0) stack = regs + 1;
2027 else {
2028 struct REnv *e = uvenv(mrb, lv-1);
2029 if (!e) goto L_NOSUPER;
2030 if (MRB_ENV_LEN(e) <= m1+r+m2+1)
2031 goto L_NOSUPER;
2032 stack = e->stack + 1;
2034 if (r == 0) {
2035 regs[a] = mrb_ary_new_from_values(mrb, m1+m2, stack);
2037 else {
2038 mrb_value *pp = NULL;
2039 struct RArray *rest;
2040 mrb_int len = 0;
2042 if (mrb_array_p(stack[m1])) {
2043 struct RArray *ary = mrb_ary_ptr(stack[m1]);
2045 pp = ARY_PTR(ary);
2046 len = ARY_LEN(ary);
2048 regs[a] = mrb_ary_new_capa(mrb, m1+len+m2);
2049 rest = mrb_ary_ptr(regs[a]);
2050 if (m1 > 0) {
2051 stack_copy(ARY_PTR(rest), stack, m1);
2053 if (len > 0) {
2054 stack_copy(ARY_PTR(rest)+m1, pp, len);
2056 if (m2 > 0) {
2057 stack_copy(ARY_PTR(rest)+m1+len, stack+m1+1, m2);
2059 ARY_SET_LEN(rest, m1+len+m2);
2061 if (kd) {
2062 regs[a+1] = stack[m1+r+m2];
2063 regs[a+2] = stack[m1+r+m2+1];
2065 else {
2066 regs[a+1] = stack[m1+r+m2];
2068 mrb_gc_arena_restore(mrb, ai);
2069 NEXT;
2072 CASE(OP_ENTER, W) {
2073 mrb_callinfo *ci = mrb->c->ci;
2074 mrb_int argc = ci->n;
2075 mrb_value *argv = regs+1;
2077 mrb_int m1 = MRB_ASPEC_REQ(a);
2079 /* no other args */
2080 if ((a & ~0x7c0001) == 0 && argc < 15 && MRB_PROC_STRICT_P(proc)) {
2081 if (argc+(ci->nk==15) != m1) { /* count kdict too */
2082 argnum_error(mrb, m1);
2083 goto L_RAISE;
2085 /* clear local (but non-argument) variables */
2086 mrb_int pos = m1+2; /* self+m1+blk */
2087 if (irep->nlocals-pos > 0) {
2088 stack_clear(&regs[pos], irep->nlocals-pos);
2090 NEXT;
2093 mrb_int o = MRB_ASPEC_OPT(a);
2094 mrb_int r = MRB_ASPEC_REST(a);
2095 mrb_int m2 = MRB_ASPEC_POST(a);
2096 mrb_int kd = (MRB_ASPEC_KEY(a) > 0 || MRB_ASPEC_KDICT(a))? 1 : 0;
2097 /* unused
2098 int b = MRB_ASPEC_BLOCK(a);
2100 mrb_int const len = m1 + o + r + m2;
2102 mrb_value * const argv0 = argv;
2103 mrb_value blk = regs[ci_bidx(ci)];
2104 mrb_value kdict = mrb_nil_value();
2106 /* keyword arguments */
2107 if (ci->nk == 15) {
2108 kdict = regs[mrb_ci_kidx(ci)];
2110 if (!kd) {
2111 if (!mrb_nil_p(kdict) && mrb_hash_size(mrb, kdict) > 0) {
2112 if (argc < 14) {
2113 ci->n++;
2114 argc++; /* include kdict in normal arguments */
2116 else if (argc == 14) {
2117 /* pack arguments and kdict */
2118 regs[1] = ary_new_from_regs(mrb, argc+1, 1);
2119 argc = ci->n = 15;
2121 else {/* argc == 15 */
2122 /* push kdict to packed arguments */
2123 mrb_ary_push(mrb, regs[1], kdict);
2126 kdict = mrb_nil_value();
2127 ci->nk = 0;
2129 else if (MRB_ASPEC_KEY(a) > 0 && !mrb_nil_p(kdict)) {
2130 kdict = mrb_hash_dup(mrb, kdict);
2132 else if (!mrb_nil_p(kdict)) {
2133 mrb_gc_protect(mrb, kdict);
2136 /* arguments is passed with Array */
2137 if (argc == 15) {
2138 struct RArray *ary = mrb_ary_ptr(regs[1]);
2139 argv = ARY_PTR(ary);
2140 argc = (int)ARY_LEN(ary);
2141 mrb_gc_protect(mrb, regs[1]);
2144 /* strict argument check */
2145 if (ci->proc && MRB_PROC_STRICT_P(ci->proc)) {
2146 if (argc < m1 + m2 || (r == 0 && argc > len)) {
2147 argnum_error(mrb, m1+m2);
2148 goto L_RAISE;
2151 /* extract first argument array to arguments */
2152 else if (len > 1 && argc == 1 && mrb_array_p(argv[0])) {
2153 mrb_gc_protect(mrb, argv[0]);
2154 argc = (int)RARRAY_LEN(argv[0]);
2155 argv = RARRAY_PTR(argv[0]);
2158 /* rest arguments */
2159 mrb_value rest = mrb_nil_value();
2160 if (argc < len) {
2161 mrb_int mlen = m2;
2162 if (argc < m1+m2) {
2163 mlen = m1 < argc ? argc - m1 : 0;
2166 /* copy mandatory and optional arguments */
2167 if (argv0 != argv && argv) {
2168 value_move(&regs[1], argv, argc-mlen); /* m1 + o */
2170 if (argc < m1) {
2171 stack_clear(&regs[argc+1], m1-argc);
2173 /* copy post mandatory arguments */
2174 if (mlen) {
2175 value_move(&regs[len-m2+1], &argv[argc-mlen], mlen);
2177 if (mlen < m2) {
2178 stack_clear(&regs[len-m2+mlen+1], m2-mlen);
2180 /* initialize rest arguments with empty Array */
2181 if (r) {
2182 rest = mrb_ary_new_capa(mrb, 0);
2183 regs[m1+o+1] = rest;
2185 /* skip initializer of passed arguments */
2186 if (o > 0 && argc > m1+m2)
2187 pc += (argc - m1 - m2)*3;
2189 else {
2190 mrb_int rnum = 0;
2191 if (argv0 != argv) {
2192 mrb_gc_protect(mrb, blk);
2193 value_move(&regs[1], argv, m1+o);
2195 if (r) {
2196 rnum = argc-m1-o-m2;
2197 rest = mrb_ary_new_from_values(mrb, rnum, argv+m1+o);
2198 regs[m1+o+1] = rest;
2200 if (m2 > 0 && argc-m2 > m1) {
2201 value_move(&regs[m1+o+r+1], &argv[m1+o+rnum], m2);
2203 pc += o*3;
2206 /* need to be update blk first to protect blk from GC */
2207 mrb_int const kw_pos = len + kd; /* where kwhash should be */
2208 mrb_int const blk_pos = kw_pos + 1; /* where block should be */
2209 regs[blk_pos] = blk; /* move block */
2210 if (kd) {
2211 if (mrb_nil_p(kdict)) {
2212 kdict = mrb_hash_new_capa(mrb, 0);
2214 regs[kw_pos] = kdict; /* set kwhash */
2215 ci->nk = 15;
2218 /* format arguments for generated code */
2219 mrb->c->ci->n = (uint8_t)len;
2221 /* clear local (but non-argument) variables */
2222 if (irep->nlocals-blk_pos-1 > 0) {
2223 stack_clear(&regs[blk_pos+1], irep->nlocals-blk_pos-1);
2225 JUMP;
2228 CASE(OP_KARG, BB) {
2229 mrb_value k = mrb_symbol_value(syms[b]);
2230 mrb_int kidx = mrb_ci_kidx(mrb->c->ci);
2231 mrb_value kdict, v;
2233 if (kidx < 0 || !mrb_hash_p(kdict=regs[kidx]) || !mrb_hash_key_p(mrb, kdict, k)) {
2234 RAISE_FORMAT(mrb, E_ARGUMENT_ERROR, "missing keyword: %v", k);
2236 v = mrb_hash_get(mrb, kdict, k);
2237 regs[a] = v;
2238 mrb_hash_delete_key(mrb, kdict, k);
2239 NEXT;
2242 CASE(OP_KEY_P, BB) {
2243 mrb_value k = mrb_symbol_value(syms[b]);
2244 mrb_int kidx = mrb_ci_kidx(mrb->c->ci);
2245 mrb_value kdict;
2246 mrb_bool key_p = FALSE;
2248 if (kidx >= 0 && mrb_hash_p(kdict=regs[kidx])) {
2249 key_p = mrb_hash_key_p(mrb, kdict, k);
2251 regs[a] = mrb_bool_value(key_p);
2252 NEXT;
2255 CASE(OP_KEYEND, Z) {
2256 mrb_int kidx = mrb_ci_kidx(mrb->c->ci);
2257 mrb_value kdict;
2259 if (kidx >= 0 && mrb_hash_p(kdict=regs[kidx]) && !mrb_hash_empty_p(mrb, kdict)) {
2260 mrb_value keys = mrb_hash_keys(mrb, kdict);
2261 mrb_value key1 = RARRAY_PTR(keys)[0];
2262 RAISE_FORMAT(mrb, E_ARGUMENT_ERROR, "unknown keyword: %v", key1);
2264 NEXT;
2267 CASE(OP_BREAK, B) {
2268 if (mrb->exc) {
2269 goto L_RAISE;
2272 if (MRB_PROC_STRICT_P(proc)) goto NORMAL_RETURN;
2273 if (MRB_PROC_ORPHAN_P(proc) || !MRB_PROC_ENV_P(proc) || !MRB_ENV_ONSTACK_P(MRB_PROC_ENV(proc))) {
2274 L_BREAK_ERROR:
2275 RAISE_LIT(mrb, E_LOCALJUMP_ERROR, "break from proc-closure");
2277 else {
2278 struct REnv *e = MRB_PROC_ENV(proc);
2280 if (e->cxt != mrb->c) {
2281 goto L_BREAK_ERROR;
2284 mrb_callinfo *ci = mrb->c->ci;
2285 proc = proc->upper;
2286 while (mrb->c->cibase < ci && ci[-1].proc != proc) {
2287 ci--;
2289 if (ci == mrb->c->cibase) {
2290 goto L_BREAK_ERROR;
2292 c = a; // release the "a" variable, which can handle 32-bit values
2293 a = ci - mrb->c->cibase;
2294 goto L_UNWINDING;
2296 CASE(OP_RETURN_BLK, B) {
2297 if (mrb->exc) {
2298 goto L_RAISE;
2301 mrb_callinfo *ci = mrb->c->ci;
2303 if (!MRB_PROC_ENV_P(proc) || MRB_PROC_STRICT_P(proc)) {
2304 goto NORMAL_RETURN;
2307 const struct RProc *dst;
2308 mrb_callinfo *cibase;
2309 cibase = mrb->c->cibase;
2310 dst = top_proc(mrb, proc);
2312 if (MRB_PROC_ENV_P(dst)) {
2313 struct REnv *e = MRB_PROC_ENV(dst);
2315 if (!MRB_ENV_ONSTACK_P(e) || e->cxt != mrb->c) {
2316 localjump_error(mrb, LOCALJUMP_ERROR_RETURN);
2317 goto L_RAISE;
2320 /* check jump destination */
2321 while (cibase <= ci && ci->proc != dst) {
2322 ci--;
2324 if (ci <= cibase) { /* no jump destination */
2325 localjump_error(mrb, LOCALJUMP_ERROR_RETURN);
2326 goto L_RAISE;
2328 c = a; // release the "a" variable, which can handle 32-bit values
2329 a = ci - mrb->c->cibase;
2330 goto L_UNWINDING;
2332 CASE(OP_RETURN, B) {
2333 mrb_callinfo *ci;
2335 ci = mrb->c->ci;
2336 if (mrb->exc) {
2337 goto L_RAISE;
2339 else {
2340 mrb_int acc;
2341 mrb_value v;
2343 NORMAL_RETURN:
2344 ci = mrb->c->ci;
2346 if (ci == mrb->c->cibase) {
2347 struct mrb_context *c;
2348 c = mrb->c;
2350 if (c->prev && !c->vmexec && c->prev->ci == c->prev->cibase) {
2351 RAISE_LIT(mrb, E_FIBER_ERROR, "double resume");
2355 v = regs[a];
2356 mrb_gc_protect(mrb, v);
2357 CHECKPOINT_RESTORE(RBREAK_TAG_BREAK) {
2358 if (TRUE) {
2359 struct RBreak *brk = (struct RBreak*)mrb->exc;
2360 ci = &mrb->c->cibase[brk->ci_break_index];
2361 v = mrb_break_value_get(brk);
2363 else {
2364 L_UNWINDING: // for a check on the role of `a` and `c`, see `goto L_UNWINDING`
2365 ci = mrb->c->cibase + a;
2366 v = regs[c];
2368 mrb_gc_protect(mrb, v);
2370 CHECKPOINT_MAIN(RBREAK_TAG_BREAK) {
2371 for (;;) {
2372 UNWIND_ENSURE(mrb, mrb->c->ci, mrb->c->ci->pc, RBREAK_TAG_BREAK, ci, v);
2374 if (mrb->c->ci == ci) {
2375 break;
2377 cipop(mrb);
2378 if (mrb->c->ci[1].cci != CINFO_NONE) {
2379 mrb_assert(prev_jmp != NULL);
2380 mrb->exc = (struct RObject*)break_new(mrb, RBREAK_TAG_BREAK, ci, v);
2381 mrb_gc_arena_restore(mrb, ai);
2382 mrb->c->vmexec = FALSE;
2383 mrb->jmp = prev_jmp;
2384 MRB_THROW(prev_jmp);
2388 CHECKPOINT_END(RBREAK_TAG_BREAK);
2389 mrb->exc = NULL; /* clear break object */
2391 if (ci == mrb->c->cibase) {
2392 struct mrb_context *c = mrb->c;
2393 if (c == mrb->root_c) {
2394 /* toplevel return */
2395 mrb_gc_arena_restore(mrb, ai);
2396 mrb->jmp = prev_jmp;
2397 return v;
2400 /* fiber termination should automatic yield or transfer to root */
2401 c->status = MRB_FIBER_TERMINATED;
2402 mrb->c = c->prev ? c->prev : mrb->root_c;
2403 c->prev = NULL;
2404 mrb->c->status = MRB_FIBER_RUNNING;
2405 if (c->vmexec ||
2406 (mrb->c == mrb->root_c && mrb->c->ci == mrb->c->cibase) /* case using Fiber#transfer in mrb_fiber_resume() */) {
2407 mrb_gc_arena_restore(mrb, ai);
2408 c->vmexec = FALSE;
2409 mrb->jmp = prev_jmp;
2410 return v;
2412 ci = mrb->c->ci;
2415 if (mrb->c->vmexec && !ci->u.keep_context) {
2416 mrb_gc_arena_restore(mrb, ai);
2417 mrb->c->vmexec = FALSE;
2418 mrb->jmp = prev_jmp;
2419 return v;
2421 acc = ci->cci;
2422 ci = cipop(mrb);
2423 if (acc == CINFO_SKIP || acc == CINFO_DIRECT) {
2424 mrb_gc_arena_restore(mrb, ai);
2425 mrb->jmp = prev_jmp;
2426 return v;
2428 pc = ci->pc;
2429 DEBUG(fprintf(stderr, "from :%s\n", mrb_sym_name(mrb, ci->mid)));
2430 proc = ci->proc;
2431 irep = proc->body.irep;
2432 pool = irep->pool;
2433 syms = irep->syms;
2435 ci[1].stack[0] = v;
2436 mrb_gc_arena_restore(mrb, ai);
2438 JUMP;
2441 CASE(OP_BLKPUSH, BS) {
2442 int m1 = (b>>11)&0x3f;
2443 int r = (b>>10)&0x1;
2444 int m2 = (b>>5)&0x1f;
2445 int kd = (b>>4)&0x1;
2446 int lv = (b>>0)&0xf;
2447 mrb_value *stack;
2449 if (lv == 0) stack = regs + 1;
2450 else {
2451 struct REnv *e = uvenv(mrb, lv-1);
2452 if (!e || (!MRB_ENV_ONSTACK_P(e) && e->mid == 0) ||
2453 MRB_ENV_LEN(e) <= m1+r+m2+1) {
2454 localjump_error(mrb, LOCALJUMP_ERROR_YIELD);
2455 goto L_RAISE;
2457 stack = e->stack + 1;
2459 if (mrb_nil_p(stack[m1+r+m2+kd])) {
2460 localjump_error(mrb, LOCALJUMP_ERROR_YIELD);
2461 goto L_RAISE;
2463 regs[a] = stack[m1+r+m2+kd];
2464 NEXT;
2467 #if !defined(MRB_USE_BIGINT) || defined(MRB_INT32)
2468 L_INT_OVERFLOW:
2469 RAISE_LIT(mrb, E_RANGE_ERROR, "integer overflow");
2470 #endif
2472 #define TYPES2(a,b) ((((uint16_t)(a))<<8)|(((uint16_t)(b))&0xff))
2473 #define OP_MATH(op_name) \
2474 /* need to check if op is overridden */ \
2475 switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) { \
2476 OP_MATH_CASE_INTEGER(op_name); \
2477 OP_MATH_CASE_FLOAT(op_name, integer, float); \
2478 OP_MATH_CASE_FLOAT(op_name, float, integer); \
2479 OP_MATH_CASE_FLOAT(op_name, float, float); \
2480 OP_MATH_CASE_STRING_##op_name(); \
2481 default: \
2482 mid = MRB_OPSYM(op_name); \
2483 goto L_SEND_SYM; \
2485 NEXT;
2486 #define OP_MATH_CASE_INTEGER(op_name) \
2487 case TYPES2(MRB_TT_INTEGER, MRB_TT_INTEGER): \
2489 mrb_int x = mrb_integer(regs[a]), y = mrb_integer(regs[a+1]), z; \
2490 if (mrb_int_##op_name##_overflow(x, y, &z)) { \
2491 OP_MATH_OVERFLOW_INT(op_name,x,y); \
2493 else \
2494 SET_INT_VALUE(mrb,regs[a], z); \
2496 break
2497 #ifdef MRB_NO_FLOAT
2498 #define OP_MATH_CASE_FLOAT(op_name, t1, t2) (void)0
2499 #else
2500 #define OP_MATH_CASE_FLOAT(op_name, t1, t2) \
2501 case TYPES2(OP_MATH_TT_##t1, OP_MATH_TT_##t2): \
2503 mrb_float z = mrb_##t1(regs[a]) OP_MATH_OP_##op_name mrb_##t2(regs[a+1]); \
2504 SET_FLOAT_VALUE(mrb, regs[a], z); \
2506 break
2507 #endif
2508 #ifdef MRB_USE_BIGINT
2509 #define OP_MATH_OVERFLOW_INT(op,x,y) regs[a] = mrb_bint_##op##_ii(mrb,x,y)
2510 #else
2511 #define OP_MATH_OVERFLOW_INT(op,x,y) goto L_INT_OVERFLOW
2512 #endif
2513 #define OP_MATH_CASE_STRING_add() \
2514 case TYPES2(MRB_TT_STRING, MRB_TT_STRING): \
2515 regs[a] = mrb_str_plus(mrb, regs[a], regs[a+1]); \
2516 mrb_gc_arena_restore(mrb, ai); \
2517 break
2518 #define OP_MATH_CASE_STRING_sub() (void)0
2519 #define OP_MATH_CASE_STRING_mul() (void)0
2520 #define OP_MATH_OP_add +
2521 #define OP_MATH_OP_sub -
2522 #define OP_MATH_OP_mul *
2523 #define OP_MATH_TT_integer MRB_TT_INTEGER
2524 #define OP_MATH_TT_float MRB_TT_FLOAT
2526 CASE(OP_ADD, B) {
2527 OP_MATH(add);
2530 CASE(OP_SUB, B) {
2531 OP_MATH(sub);
2534 CASE(OP_MUL, B) {
2535 OP_MATH(mul);
2538 CASE(OP_DIV, B) {
2539 #ifndef MRB_NO_FLOAT
2540 mrb_float x, y, f;
2541 #endif
2543 /* need to check if op is overridden */
2544 switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {
2545 case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):
2547 mrb_int x = mrb_integer(regs[a]);
2548 mrb_int y = mrb_integer(regs[a+1]);
2549 regs[a] = mrb_div_int_value(mrb, x, y);
2551 NEXT;
2552 #ifndef MRB_NO_FLOAT
2553 case TYPES2(MRB_TT_INTEGER,MRB_TT_FLOAT):
2554 x = (mrb_float)mrb_integer(regs[a]);
2555 y = mrb_float(regs[a+1]);
2556 break;
2557 case TYPES2(MRB_TT_FLOAT,MRB_TT_INTEGER):
2558 x = mrb_float(regs[a]);
2559 y = (mrb_float)mrb_integer(regs[a+1]);
2560 break;
2561 case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT):
2562 x = mrb_float(regs[a]);
2563 y = mrb_float(regs[a+1]);
2564 break;
2565 #endif
2566 default:
2567 mid = MRB_OPSYM(div);
2568 goto L_SEND_SYM;
2571 #ifndef MRB_NO_FLOAT
2572 f = mrb_div_float(x, y);
2573 SET_FLOAT_VALUE(mrb, regs[a], f);
2574 #endif
2575 NEXT;
2578 #define OP_MATHI(op_name) \
2579 /* need to check if op is overridden */ \
2580 switch (mrb_type(regs[a])) { \
2581 OP_MATHI_CASE_INTEGER(op_name); \
2582 OP_MATHI_CASE_FLOAT(op_name); \
2583 default: \
2584 SET_INT_VALUE(mrb,regs[a+1], b); \
2585 mid = MRB_OPSYM(op_name); \
2586 goto L_SEND_SYM; \
2588 NEXT;
2589 #define OP_MATHI_CASE_INTEGER(op_name) \
2590 case MRB_TT_INTEGER: \
2592 mrb_int x = mrb_integer(regs[a]), y = (mrb_int)b, z; \
2593 if (mrb_int_##op_name##_overflow(x, y, &z)) { \
2594 OP_MATH_OVERFLOW_INT(op_name,x,y); \
2596 else \
2597 SET_INT_VALUE(mrb,regs[a], z); \
2599 break
2600 #ifdef MRB_NO_FLOAT
2601 #define OP_MATHI_CASE_FLOAT(op_name) (void)0
2602 #else
2603 #define OP_MATHI_CASE_FLOAT(op_name) \
2604 case MRB_TT_FLOAT: \
2606 mrb_float z = mrb_float(regs[a]) OP_MATH_OP_##op_name b; \
2607 SET_FLOAT_VALUE(mrb, regs[a], z); \
2609 break
2610 #endif
2612 CASE(OP_ADDI, BB) {
2613 OP_MATHI(add);
2616 CASE(OP_SUBI, BB) {
2617 OP_MATHI(sub);
2620 #define OP_CMP_BODY(op,v1,v2) (v1(regs[a]) op v2(regs[a+1]))
2622 #ifdef MRB_NO_FLOAT
2623 #define OP_CMP(op,sym) do {\
2624 int result;\
2625 /* need to check if - is overridden */\
2626 switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\
2627 case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):\
2628 result = OP_CMP_BODY(op,mrb_fixnum,mrb_fixnum);\
2629 break;\
2630 default:\
2631 mid = MRB_OPSYM(sym);\
2632 goto L_SEND_SYM;\
2634 if (result) {\
2635 SET_TRUE_VALUE(regs[a]);\
2637 else {\
2638 SET_FALSE_VALUE(regs[a]);\
2640 } while(0)
2641 #else
2642 #define OP_CMP(op, sym) do {\
2643 int result;\
2644 /* need to check if - is overridden */\
2645 switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\
2646 case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):\
2647 result = OP_CMP_BODY(op,mrb_integer,mrb_integer);\
2648 break;\
2649 case TYPES2(MRB_TT_INTEGER,MRB_TT_FLOAT):\
2650 result = OP_CMP_BODY(op,mrb_integer,mrb_float);\
2651 break;\
2652 case TYPES2(MRB_TT_FLOAT,MRB_TT_INTEGER):\
2653 result = OP_CMP_BODY(op,mrb_float,mrb_integer);\
2654 break;\
2655 case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT):\
2656 result = OP_CMP_BODY(op,mrb_float,mrb_float);\
2657 break;\
2658 default:\
2659 mid = MRB_OPSYM(sym);\
2660 goto L_SEND_SYM;\
2662 if (result) {\
2663 SET_TRUE_VALUE(regs[a]);\
2665 else {\
2666 SET_FALSE_VALUE(regs[a]);\
2668 } while(0)
2669 #endif
2671 CASE(OP_EQ, B) {
2672 if (mrb_obj_eq(mrb, regs[a], regs[a+1])) {
2673 SET_TRUE_VALUE(regs[a]);
2675 else {
2676 OP_CMP(==,eq);
2678 NEXT;
2681 CASE(OP_LT, B) {
2682 OP_CMP(<,lt);
2683 NEXT;
2686 CASE(OP_LE, B) {
2687 OP_CMP(<=,le);
2688 NEXT;
2691 CASE(OP_GT, B) {
2692 OP_CMP(>,gt);
2693 NEXT;
2696 CASE(OP_GE, B) {
2697 OP_CMP(>=,ge);
2698 NEXT;
2701 CASE(OP_ARRAY, BB) {
2702 regs[a] = ary_new_from_regs(mrb, b, a);
2703 mrb_gc_arena_restore(mrb, ai);
2704 NEXT;
2706 CASE(OP_ARRAY2, BBB) {
2707 regs[a] = ary_new_from_regs(mrb, c, b);
2708 mrb_gc_arena_restore(mrb, ai);
2709 NEXT;
2712 CASE(OP_ARYCAT, B) {
2713 mrb_value splat = mrb_ary_splat(mrb, regs[a+1]);
2714 if (mrb_nil_p(regs[a])) {
2715 regs[a] = splat;
2717 else {
2718 mrb_assert(mrb_array_p(regs[a]));
2719 mrb_ary_concat(mrb, regs[a], splat);
2721 mrb_gc_arena_restore(mrb, ai);
2722 NEXT;
2725 CASE(OP_ARYPUSH, BB) {
2726 mrb_assert(mrb_array_p(regs[a]));
2727 for (mrb_int i=0; i<b; i++) {
2728 mrb_ary_push(mrb, regs[a], regs[a+i+1]);
2730 NEXT;
2733 CASE(OP_ARYSPLAT, B) {
2734 mrb_value ary = mrb_ary_splat(mrb, regs[a]);
2735 regs[a] = ary;
2736 mrb_gc_arena_restore(mrb, ai);
2737 NEXT;
2740 CASE(OP_AREF, BBB) {
2741 mrb_value v = regs[b];
2743 if (!mrb_array_p(v)) {
2744 if (c == 0) {
2745 regs[a] = v;
2747 else {
2748 SET_NIL_VALUE(regs[a]);
2751 else {
2752 v = mrb_ary_ref(mrb, v, c);
2753 regs[a] = v;
2755 NEXT;
2758 CASE(OP_ASET, BBB) {
2759 mrb_assert(mrb_array_p(regs[a]));
2760 mrb_ary_set(mrb, regs[b], c, regs[a]);
2761 NEXT;
2764 CASE(OP_APOST, BBB) {
2765 mrb_value v = regs[a];
2766 int pre = b;
2767 int post = c;
2768 struct RArray *ary;
2769 int len, idx;
2771 if (!mrb_array_p(v)) {
2772 v = ary_new_from_regs(mrb, 1, a);
2774 ary = mrb_ary_ptr(v);
2775 len = (int)ARY_LEN(ary);
2776 if (len > pre + post) {
2777 v = mrb_ary_new_from_values(mrb, len - pre - post, ARY_PTR(ary)+pre);
2778 regs[a++] = v;
2779 while (post--) {
2780 regs[a++] = ARY_PTR(ary)[len-post-1];
2783 else {
2784 v = mrb_ary_new_capa(mrb, 0);
2785 regs[a++] = v;
2786 for (idx=0; idx+pre<len; idx++) {
2787 regs[a+idx] = ARY_PTR(ary)[pre+idx];
2789 while (idx < post) {
2790 SET_NIL_VALUE(regs[a+idx]);
2791 idx++;
2794 mrb_gc_arena_restore(mrb, ai);
2795 NEXT;
2798 CASE(OP_INTERN, B) {
2799 mrb_assert(mrb_string_p(regs[a]));
2800 mrb_sym sym = mrb_intern_str(mrb, regs[a]);
2801 regs[a] = mrb_symbol_value(sym);
2802 NEXT;
2805 CASE(OP_SYMBOL, BB) {
2806 size_t len;
2807 mrb_sym sym;
2809 mrb_assert((pool[b].tt&IREP_TT_NFLAG)==0);
2810 len = pool[b].tt >> 2;
2811 if (pool[b].tt & IREP_TT_SFLAG) {
2812 sym = mrb_intern_static(mrb, pool[b].u.str, len);
2814 else {
2815 sym = mrb_intern(mrb, pool[b].u.str, len);
2817 regs[a] = mrb_symbol_value(sym);
2818 NEXT;
2821 CASE(OP_STRING, BB) {
2822 mrb_int len;
2824 mrb_assert((pool[b].tt&IREP_TT_NFLAG)==0);
2825 len = pool[b].tt >> 2;
2826 if (pool[b].tt & IREP_TT_SFLAG) {
2827 regs[a] = mrb_str_new_static(mrb, pool[b].u.str, len);
2829 else {
2830 regs[a] = mrb_str_new(mrb, pool[b].u.str, len);
2832 mrb_gc_arena_restore(mrb, ai);
2833 NEXT;
2836 CASE(OP_STRCAT, B) {
2837 mrb_assert(mrb_string_p(regs[a]));
2838 mrb_str_concat(mrb, regs[a], regs[a+1]);
2839 NEXT;
2842 CASE(OP_HASH, BB) {
2843 mrb_value hash = mrb_hash_new_capa(mrb, b);
2844 int lim = a+b*2;
2846 for (int i=a; i<lim; i+=2) {
2847 mrb_hash_set(mrb, hash, regs[i], regs[i+1]);
2849 regs[a] = hash;
2850 mrb_gc_arena_restore(mrb, ai);
2851 NEXT;
2854 CASE(OP_HASHADD, BB) {
2855 mrb_value hash;
2856 int lim = a+b*2+1;
2858 hash = regs[a];
2859 mrb_ensure_hash_type(mrb, hash);
2860 for (int i=a+1; i<lim; i+=2) {
2861 mrb_hash_set(mrb, hash, regs[i], regs[i+1]);
2863 mrb_gc_arena_restore(mrb, ai);
2864 NEXT;
2866 CASE(OP_HASHCAT, B) {
2867 mrb_value hash = regs[a];
2869 mrb_assert(mrb_hash_p(hash));
2870 mrb_hash_merge(mrb, hash, regs[a+1]);
2871 mrb_gc_arena_restore(mrb, ai);
2872 NEXT;
2875 CASE(OP_LAMBDA, BB)
2876 c = OP_L_LAMBDA;
2877 L_MAKE_LAMBDA:
2879 struct RProc *p;
2880 const mrb_irep *nirep = irep->reps[b];
2882 if (c & OP_L_CAPTURE) {
2883 p = mrb_closure_new(mrb, nirep);
2885 else {
2886 p = mrb_proc_new(mrb, nirep);
2887 p->flags |= MRB_PROC_SCOPE;
2889 if (c & OP_L_STRICT) p->flags |= MRB_PROC_STRICT;
2890 regs[a] = mrb_obj_value(p);
2891 mrb_gc_arena_restore(mrb, ai);
2892 NEXT;
2894 CASE(OP_BLOCK, BB) {
2895 c = OP_L_BLOCK;
2896 goto L_MAKE_LAMBDA;
2898 CASE(OP_METHOD, BB) {
2899 c = OP_L_METHOD;
2900 goto L_MAKE_LAMBDA;
2903 CASE(OP_RANGE_INC, B) {
2904 mrb_value v = mrb_range_new(mrb, regs[a], regs[a+1], FALSE);
2905 regs[a] = v;
2906 mrb_gc_arena_restore(mrb, ai);
2907 NEXT;
2910 CASE(OP_RANGE_EXC, B) {
2911 mrb_value v = mrb_range_new(mrb, regs[a], regs[a+1], TRUE);
2912 regs[a] = v;
2913 mrb_gc_arena_restore(mrb, ai);
2914 NEXT;
2917 CASE(OP_OCLASS, B) {
2918 regs[a] = mrb_obj_value(mrb->object_class);
2919 NEXT;
2922 CASE(OP_CLASS, BB) {
2923 struct RClass *c = 0, *baseclass;
2924 mrb_value base, super;
2925 mrb_sym id = syms[b];
2927 base = regs[a];
2928 super = regs[a+1];
2929 if (mrb_nil_p(base)) {
2930 baseclass = MRB_PROC_TARGET_CLASS(mrb->c->ci->proc);
2931 if (!baseclass) baseclass = mrb->object_class;
2932 base = mrb_obj_value(baseclass);
2934 c = mrb_vm_define_class(mrb, base, super, id);
2935 regs[a] = mrb_obj_value(c);
2936 mrb_gc_arena_restore(mrb, ai);
2937 NEXT;
2940 CASE(OP_MODULE, BB) {
2941 struct RClass *cls = 0, *baseclass;
2942 mrb_value base;
2943 mrb_sym id = syms[b];
2945 base = regs[a];
2946 if (mrb_nil_p(base)) {
2947 baseclass = MRB_PROC_TARGET_CLASS(mrb->c->ci->proc);
2948 if (!baseclass) baseclass = mrb->object_class;
2949 base = mrb_obj_value(baseclass);
2951 cls = mrb_vm_define_module(mrb, base, id);
2952 regs[a] = mrb_obj_value(cls);
2953 mrb_gc_arena_restore(mrb, ai);
2954 NEXT;
2957 CASE(OP_EXEC, BB)
2959 mrb_value recv = regs[a];
2960 struct RProc *p;
2961 const mrb_irep *nirep = irep->reps[b];
2963 /* prepare closure */
2964 p = mrb_proc_new(mrb, nirep);
2965 p->c = NULL;
2966 mrb_field_write_barrier(mrb, (struct RBasic*)p, (struct RBasic*)proc);
2967 MRB_PROC_SET_TARGET_CLASS(p, mrb_class_ptr(recv));
2968 p->flags |= MRB_PROC_SCOPE;
2970 /* prepare call stack */
2971 cipush(mrb, a, 0, mrb_class_ptr(recv), p, NULL, 0, 0);
2973 irep = p->body.irep;
2974 pool = irep->pool;
2975 syms = irep->syms;
2976 stack_extend(mrb, irep->nregs);
2977 stack_clear(regs+1, irep->nregs-1);
2978 pc = irep->iseq;
2979 JUMP;
2982 CASE(OP_DEF, BB) {
2983 struct RClass *target = mrb_class_ptr(regs[a]);
2984 struct RProc *p = mrb_proc_ptr(regs[a+1]);
2985 mrb_method_t m;
2986 mrb_sym mid = syms[b];
2988 MRB_METHOD_FROM_PROC(m, p);
2989 mrb_define_method_raw(mrb, target, mid, m);
2990 mrb_method_added(mrb, target, mid);
2991 mrb_gc_arena_restore(mrb, ai);
2992 regs[a] = mrb_symbol_value(mid);
2993 NEXT;
2996 CASE(OP_SCLASS, B) {
2997 regs[a] = mrb_singleton_class(mrb, regs[a]);
2998 mrb_gc_arena_restore(mrb, ai);
2999 NEXT;
3002 CASE(OP_TCLASS, B) {
3003 struct RClass *target = check_target_class(mrb);
3004 if (!target) goto L_RAISE;
3005 regs[a] = mrb_obj_value(target);
3006 NEXT;
3009 CASE(OP_ALIAS, BB) {
3010 struct RClass *target = check_target_class(mrb);
3012 if (!target) goto L_RAISE;
3013 mrb_alias_method(mrb, target, syms[a], syms[b]);
3014 mrb_method_added(mrb, target, syms[a]);
3015 NEXT;
3017 CASE(OP_UNDEF, B) {
3018 struct RClass *target = check_target_class(mrb);
3020 if (!target) goto L_RAISE;
3021 mrb_undef_method_id(mrb, target, syms[a]);
3022 NEXT;
3025 CASE(OP_DEBUG, Z) {
3026 FETCH_BBB();
3027 #ifdef MRB_USE_DEBUG_HOOK
3028 mrb->debug_op_hook(mrb, irep, pc, regs);
3029 #else
3030 #ifndef MRB_NO_STDIO
3031 printf("OP_DEBUG %d %d %d\n", a, b, c);
3032 #else
3033 abort();
3034 #endif
3035 #endif
3036 NEXT;
3039 CASE(OP_ERR, B) {
3040 size_t len = pool[a].tt >> 2;
3041 mrb_value exc;
3043 mrb_assert((pool[a].tt&IREP_TT_NFLAG)==0);
3044 exc = mrb_exc_new(mrb, E_LOCALJUMP_ERROR, pool[a].u.str, len);
3045 RAISE_EXC(mrb, exc);
3048 CASE(OP_EXT1, Z) {
3049 insn = READ_B();
3050 switch (insn) {
3051 #define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _1(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
3052 #include "mruby/ops.h"
3053 #undef OPCODE
3055 pc--;
3056 NEXT;
3058 CASE(OP_EXT2, Z) {
3059 insn = READ_B();
3060 switch (insn) {
3061 #define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _2(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
3062 #include "mruby/ops.h"
3063 #undef OPCODE
3065 pc--;
3066 NEXT;
3068 CASE(OP_EXT3, Z) {
3069 insn = READ_B();
3070 switch (insn) {
3071 #define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _3(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
3072 #include "mruby/ops.h"
3073 #undef OPCODE
3075 pc--;
3076 NEXT;
3079 CASE(OP_STOP, Z) {
3080 /* stop VM */
3081 CHECKPOINT_RESTORE(RBREAK_TAG_STOP) {
3082 /* do nothing */
3084 CHECKPOINT_MAIN(RBREAK_TAG_STOP) {
3085 UNWIND_ENSURE(mrb, mrb->c->ci, mrb->c->ci->pc, RBREAK_TAG_STOP, mrb->c->ci, mrb_nil_value());
3087 CHECKPOINT_END(RBREAK_TAG_STOP);
3088 mrb->jmp = prev_jmp;
3089 if (mrb->exc) {
3090 mrb_assert(mrb->exc->tt == MRB_TT_EXCEPTION);
3091 return mrb_obj_value(mrb->exc);
3093 return regs[irep->nlocals];
3096 END_DISPATCH;
3097 #undef regs
3099 MRB_CATCH(&c_jmp) {
3100 mrb_callinfo *ci = mrb->c->ci;
3101 while (ci > mrb->c->cibase && ci->cci == CINFO_DIRECT) {
3102 ci = cipop(mrb);
3104 exc_catched = TRUE;
3105 pc = ci->pc;
3106 goto RETRY_TRY_BLOCK;
3108 MRB_END_EXC(&c_jmp);
3111 static mrb_value
3112 mrb_run(mrb_state *mrb, const struct RProc *proc, mrb_value self)
3114 return mrb_vm_run(mrb, proc, self, ci_bidx(mrb->c->ci) + 1);
3117 MRB_API mrb_value
3118 mrb_top_run(mrb_state *mrb, const struct RProc *proc, mrb_value self, mrb_int stack_keep)
3120 if (mrb->c->cibase && mrb->c->ci > mrb->c->cibase) {
3121 cipush(mrb, 0, CINFO_SKIP, mrb->object_class, NULL, NULL, 0, 0);
3123 return mrb_vm_run(mrb, proc, self, stack_keep);