2 ** vm.c - virtual machine for mruby
4 ** See Copyright Notice in mruby.h
8 #include <mruby/array.h>
9 #include <mruby/class.h>
10 #include <mruby/hash.h>
11 #include <mruby/irep.h>
12 #include <mruby/numeric.h>
13 #include <mruby/proc.h>
14 #include <mruby/range.h>
15 #include <mruby/string.h>
16 #include <mruby/variable.h>
17 #include <mruby/error.h>
18 #include <mruby/opcode.h>
19 #include "value_array.h"
20 #include <mruby/throw.h>
21 #include <mruby/dump.h>
22 #include <mruby/internal.h>
23 #include <mruby/presym.h>
26 #if defined(__cplusplus)
30 #if defined(__cplusplus)
35 #define STACK_INIT_SIZE 128
36 #define CALLINFO_INIT_SIZE 32
38 /* Define amount of linear stack growth. */
39 #ifndef MRB_STACK_GROWTH
40 #define MRB_STACK_GROWTH 128
43 /* Maximum recursive depth. Should be set lower on memory constrained systems. */
45 #if __has_feature(address_sanitizer) && !defined(__SANITIZE_ADDRESS__)
46 #define __SANITIZE_ADDRESS__
50 #ifndef MRB_CALL_LEVEL_MAX
51 #if defined(__SANITIZE_ADDRESS__)
52 #define MRB_CALL_LEVEL_MAX 128
54 #define MRB_CALL_LEVEL_MAX 512
58 /* Maximum stack depth. Should be set lower on memory constrained systems.
59 The value below allows about 60000 recursive calls in the simplest case. */
61 #define MRB_STACK_MAX (0x40000 - MRB_STACK_GROWTH)
71 #ifndef MRB_GC_FIXED_ARENA
73 mrb_gc_arena_shrink(mrb_state
*mrb
, int idx
)
75 mrb_gc
*gc
= &mrb
->gc
;
76 int capa
= gc
->arena_capa
;
81 if (capa
< MRB_GC_ARENA_SIZE
) {
82 capa
= MRB_GC_ARENA_SIZE
;
84 if (capa
!= gc
->arena_capa
) {
85 gc
->arena
= (struct RBasic
**)mrb_realloc(mrb
, gc
->arena
, sizeof(struct RBasic
*)*capa
);
86 gc
->arena_capa
= capa
;
91 #define mrb_gc_arena_shrink(mrb,idx) mrb_gc_arena_restore(mrb,idx)
94 #define CALL_MAXARGS 15
95 #define CALL_VARARGS (CALL_MAXARGS<<4 | CALL_MAXARGS)
98 stack_clear(mrb_value
*from
, size_t count
)
100 while (count
-- > 0) {
101 SET_NIL_VALUE(*from
);
107 stack_copy(mrb_value
*dst
, const mrb_value
*src
, size_t size
)
110 memcpy(dst
, src
, sizeof(mrb_value
)*size
);
114 stack_init(mrb_state
*mrb
)
116 struct mrb_context
*c
= mrb
->c
;
118 /* mrb_assert(mrb->stack == NULL); */
119 c
->stbase
= (mrb_value
*)mrb_calloc(mrb
, STACK_INIT_SIZE
, sizeof(mrb_value
));
120 c
->stend
= c
->stbase
+ STACK_INIT_SIZE
;
122 /* mrb_assert(ci == NULL); */
123 c
->cibase
= (mrb_callinfo
*)mrb_calloc(mrb
, CALLINFO_INIT_SIZE
, sizeof(mrb_callinfo
));
124 c
->ciend
= c
->cibase
+ CALLINFO_INIT_SIZE
;
126 c
->ci
->u
.target_class
= mrb
->object_class
;
127 c
->ci
->stack
= c
->stbase
;
131 envadjust(mrb_state
*mrb
, mrb_value
*oldbase
, mrb_value
*newbase
, size_t oldsize
)
133 mrb_callinfo
*ci
= mrb
->c
->cibase
;
134 ptrdiff_t delta
= newbase
- oldbase
;
136 if (delta
== 0) return;
137 while (ci
<= mrb
->c
->ci
) {
138 struct REnv
*e
= mrb_vm_ci_env(ci
);
141 if (e
&& MRB_ENV_ONSTACK_P(e
) &&
142 (st
= e
->stack
) && (size_t)(st
- oldbase
) < oldsize
) {
146 if (ci
->proc
&& MRB_PROC_ENV_P(ci
->proc
) && e
!= MRB_PROC_ENV(ci
->proc
)) {
147 e
= MRB_PROC_ENV(ci
->proc
);
149 if (e
&& MRB_ENV_ONSTACK_P(e
) &&
150 (st
= e
->stack
) && (size_t)(st
- oldbase
) < oldsize
) {
160 /** def rec; $deep =+ 1; if $deep > 1000; return 0; end; rec; end **/
163 stack_extend_alloc(mrb_state
*mrb
, mrb_int room
)
165 mrb_value
*oldbase
= mrb
->c
->stbase
;
167 size_t oldsize
= mrb
->c
->stend
- mrb
->c
->stbase
;
168 size_t size
= oldsize
;
169 size_t off
= mrb
->c
->ci
->stack
? mrb
->c
->stend
- mrb
->c
->ci
->stack
: 0;
171 if (off
> size
) size
= off
;
172 #ifdef MRB_STACK_EXTEND_DOUBLING
173 if ((size_t)room
<= size
)
178 /* Use linear stack growth.
179 It is slightly slower than doubling the stack space,
180 but it saves memory on small devices. */
181 if (room
<= MRB_STACK_GROWTH
)
182 size
+= MRB_STACK_GROWTH
;
187 newstack
= (mrb_value
*)mrb_realloc(mrb
, mrb
->c
->stbase
, sizeof(mrb_value
) * size
);
188 stack_clear(&(newstack
[oldsize
]), size
- oldsize
);
189 envadjust(mrb
, oldbase
, newstack
, oldsize
);
190 mrb
->c
->stbase
= newstack
;
191 mrb
->c
->stend
= mrb
->c
->stbase
+ size
;
193 /* Raise an exception if the new stack size will be too large,
194 to prevent infinite recursion. However, do this only after resizing the stack, so mrb_raise has stack space to work with. */
195 if (size
> MRB_STACK_MAX
) {
196 mrb_exc_raise(mrb
, mrb_obj_value(mrb
->stack_err
));
201 stack_extend(mrb_state
*mrb
, mrb_int room
)
203 if (!mrb
->c
->ci
->stack
|| mrb
->c
->ci
->stack
+ room
>= mrb
->c
->stend
) {
204 stack_extend_alloc(mrb
, room
);
209 mrb_stack_extend(mrb_state
*mrb
, mrb_int room
)
211 stack_extend(mrb
, room
);
215 stack_extend_adjust(mrb_state
*mrb
, mrb_int room
, const mrb_value
**argp
)
217 const struct mrb_context
*c
= mrb
->c
;
218 ptrdiff_t voff
= *argp
- c
->stbase
;
220 if (voff
< 0 || voff
>= c
->stend
- c
->stbase
) {
221 stack_extend(mrb
, room
);
224 stack_extend(mrb
, room
);
225 *argp
= c
->stbase
+ voff
;
229 static inline struct REnv
*
230 uvenv(mrb_state
*mrb
, mrb_int up
)
232 const struct RProc
*proc
= mrb
->c
->ci
->proc
;
237 if (!proc
) return NULL
;
239 e
= MRB_PROC_ENV(proc
);
240 if (e
) return e
; /* proc has enclosed env */
242 mrb_callinfo
*ci
= mrb
->c
->ci
;
243 mrb_callinfo
*cb
= mrb
->c
->cibase
;
246 if (ci
->proc
== proc
) {
247 return mrb_vm_ci_env(ci
);
255 static inline const struct RProc
*
256 top_proc(mrb_state
*mrb
, const struct RProc
*proc
)
258 while (proc
->upper
) {
259 if (MRB_PROC_SCOPE_P(proc
) || MRB_PROC_STRICT_P(proc
))
266 #define CI_PROC_SET(ci, p) do {\
268 mrb_assert(!p || !MRB_PROC_ALIAS_P(p));\
269 ci->pc = (p && !MRB_PROC_CFUNC_P(p) && p->body.irep) ? p->body.irep->iseq : NULL;\
273 mrb_vm_ci_proc_set(mrb_callinfo
*ci
, const struct RProc
*p
)
278 #define CI_TARGET_CLASS(ci) (((ci)->u.env && (ci)->u.env->tt == MRB_TT_ENV)? (ci)->u.env->c : (ci)->u.target_class)
281 mrb_vm_ci_target_class(const mrb_callinfo
*ci
)
283 return CI_TARGET_CLASS(ci
);
287 mrb_vm_ci_target_class_set(mrb_callinfo
*ci
, struct RClass
*tc
)
289 struct REnv
*e
= ci
->u
.env
;
290 if (e
&& e
->tt
== MRB_TT_ENV
) {
294 ci
->u
.target_class
= tc
;
298 #define CI_ENV(ci) (((ci)->u.env && (ci)->u.env->tt == MRB_TT_ENV)? (ci)->u.env : NULL)
301 mrb_vm_ci_env(const mrb_callinfo
*ci
)
307 ci_env_set(mrb_callinfo
*ci
, struct REnv
*e
)
310 if (ci
->u
.env
->tt
== MRB_TT_ENV
) {
316 ci
->u
.target_class
= ci
->u
.env
->c
;
320 e
->c
= ci
->u
.target_class
;
330 mrb_vm_ci_env_set(mrb_callinfo
*ci
, struct REnv
*e
)
336 mrb_vm_ci_env_clear(mrb_state
*mrb
, mrb_callinfo
*ci
)
338 struct REnv
*e
= ci
->u
.env
;
339 if (e
&& e
->tt
== MRB_TT_ENV
) {
340 ci
->u
.target_class
= e
->c
;
341 mrb_env_unshare(mrb
, e
, FALSE
);
345 #define CINFO_NONE 0 // called method from mruby VM (without C functions)
346 #define CINFO_SKIP 1 // ignited mruby VM from C
347 #define CINFO_DIRECT 2 // called method from C
348 #define CINFO_RESUMED 3 // resumed by `Fiber.yield` (probably the main call is `mrb_fiber_resume()`)
350 #define BLK_PTR(b) ((mrb_proc_p(b)) ? mrb_proc_ptr(b) : NULL)
352 static inline mrb_callinfo
*
353 cipush(mrb_state
*mrb
, mrb_int push_stacks
, uint8_t cci
, struct RClass
*target_class
,
354 const struct RProc
*proc
, struct RProc
*blk
, mrb_sym mid
, uint16_t argc
)
356 struct mrb_context
*c
= mrb
->c
;
357 mrb_callinfo
*ci
= c
->ci
;
359 if (ci
+ 1 == c
->ciend
) {
360 ptrdiff_t size
= ci
- c
->cibase
;
362 if (size
> MRB_CALL_LEVEL_MAX
) {
363 mrb_exc_raise(mrb
, mrb_obj_value(mrb
->stack_err
));
365 c
->cibase
= (mrb_callinfo
*)mrb_realloc(mrb
, c
->cibase
, sizeof(mrb_callinfo
)*size
*2);
366 c
->ci
= c
->cibase
+ size
;
367 c
->ciend
= c
->cibase
+ size
* 2;
371 CI_PROC_SET(ci
, proc
);
373 ci
->stack
= ci
[-1].stack
+ push_stacks
;
375 ci
->nk
= (argc
>>4) & 0xf;
377 ci
->u
.target_class
= target_class
;
383 mrb_env_unshare(mrb_state
*mrb
, struct REnv
*e
, mrb_bool noraise
)
385 if (e
== NULL
) return TRUE
;
386 if (!MRB_ENV_ONSTACK_P(e
)) return TRUE
;
387 if (e
->cxt
!= mrb
->c
) return TRUE
;
388 if (e
== CI_ENV(mrb
->c
->cibase
)) return TRUE
; /* for mirb */
390 size_t len
= (size_t)MRB_ENV_LEN(e
);
397 size_t live
= mrb
->gc
.live
;
398 mrb_value
*p
= (mrb_value
*)mrb_malloc_simple(mrb
, sizeof(mrb_value
)*len
);
399 if (live
!= mrb
->gc
.live
&& mrb_object_dead_p(mrb
, (struct RBasic
*)e
)) {
400 // The e object is now subject to GC inside mrb_malloc_simple().
401 // Moreover, if NULL is returned due to mrb_malloc_simple() failure, simply ignore it.
406 stack_copy(p
, e
->stack
, len
);
409 mrb_write_barrier(mrb
, (struct RBasic
*)e
);
415 MRB_ENV_SET_LEN(e
, 0);
416 MRB_ENV_SET_BIDX(e
, 0);
418 mrb_exc_raise(mrb
, mrb_obj_value(mrb
->nomem_err
));
424 static inline mrb_callinfo
*
425 cipop(mrb_state
*mrb
)
427 struct mrb_context
*c
= mrb
->c
;
428 mrb_callinfo
*ci
= c
->ci
;
429 struct REnv
*env
= CI_ENV(ci
);
431 ci_env_set(ci
, NULL
); // make possible to free env by GC if not needed
432 struct RProc
*b
= ci
->blk
;
433 if (b
&& !mrb_object_dead_p(mrb
, (struct RBasic
*)b
) && b
->tt
== MRB_TT_PROC
&&
434 !MRB_PROC_STRICT_P(b
) && MRB_PROC_ENV(b
) == CI_ENV(&ci
[-1])) {
435 b
->flags
|= MRB_PROC_ORPHAN
;
437 if (env
&& !mrb_env_unshare(mrb
, env
, TRUE
)) {
438 c
->ci
--; // exceptions are handled at the method caller; see #3087
439 mrb_exc_raise(mrb
, mrb_obj_value(mrb
->nomem_err
));
446 mrb_protect_error(mrb_state
*mrb
, mrb_protect_error_func
*body
, void *userdata
, mrb_bool
*error
)
448 struct mrb_jmpbuf
*prev_jmp
= mrb
->jmp
;
449 struct mrb_jmpbuf c_jmp
;
450 mrb_value result
= mrb_nil_value();
451 int ai
= mrb_gc_arena_save(mrb
);
452 const struct mrb_context
*c
= mrb
->c
;
453 ptrdiff_t ci_index
= c
->ci
- c
->cibase
;
455 if (error
) { *error
= FALSE
; }
459 result
= body(mrb
, userdata
);
464 result
= mrb_obj_value(mrb
->exc
);
466 if (error
) { *error
= TRUE
; }
468 while (c
->ci
- c
->cibase
> ci_index
) {
473 // It was probably switched by mrb_fiber_resume().
474 // Simply destroy all successive CINFO_DIRECTs once the fiber has been switched.
476 while (c
->ci
> c
->cibase
&& c
->ci
->cci
== CINFO_DIRECT
) {
483 mrb_gc_arena_restore(mrb
, ai
);
484 mrb_gc_protect(mrb
, result
);
488 void mrb_exc_set(mrb_state
*mrb
, mrb_value exc
);
489 static mrb_value
mrb_run(mrb_state
*mrb
, const struct RProc
* proc
, mrb_value self
);
491 #ifndef MRB_FUNCALL_ARGC_MAX
492 #define MRB_FUNCALL_ARGC_MAX 16
496 mrb_funcall(mrb_state
*mrb
, mrb_value self
, const char *name
, mrb_int argc
, ...)
498 mrb_value argv
[MRB_FUNCALL_ARGC_MAX
];
500 mrb_sym mid
= mrb_intern_cstr(mrb
, name
);
502 if (argc
> MRB_FUNCALL_ARGC_MAX
) {
503 mrb_raise(mrb
, E_ARGUMENT_ERROR
, "Too long arguments. (limit=" MRB_STRINGIZE(MRB_FUNCALL_ARGC_MAX
) ")");
507 for (mrb_int i
= 0; i
< argc
; i
++) {
508 argv
[i
] = va_arg(ap
, mrb_value
);
511 return mrb_funcall_argv(mrb
, self
, mid
, argc
, argv
);
515 mrb_funcall_id(mrb_state
*mrb
, mrb_value self
, mrb_sym mid
, mrb_int argc
, ...)
517 mrb_value argv
[MRB_FUNCALL_ARGC_MAX
];
520 if (argc
> MRB_FUNCALL_ARGC_MAX
) {
521 mrb_raise(mrb
, E_ARGUMENT_ERROR
, "Too long arguments. (limit=" MRB_STRINGIZE(MRB_FUNCALL_ARGC_MAX
) ")");
525 for (mrb_int i
= 0; i
< argc
; i
++) {
526 argv
[i
] = va_arg(ap
, mrb_value
);
529 return mrb_funcall_argv(mrb
, self
, mid
, argc
, argv
);
533 mrb_ci_kidx(const mrb_callinfo
*ci
)
535 if (ci
->nk
== 0) return -1;
536 return (ci
->n
== CALL_MAXARGS
) ? 2 : ci
->n
+ 1;
539 static inline mrb_int
540 mrb_bidx(uint8_t n
, uint8_t k
)
545 return n
+ 1; /* self + args + kargs */
548 static inline mrb_int
549 ci_bidx(mrb_callinfo
*ci
)
551 return mrb_bidx(ci
->n
, ci
->nk
);
555 mrb_ci_bidx(mrb_callinfo
*ci
)
561 mrb_ci_nregs(mrb_callinfo
*ci
)
563 const struct RProc
*p
;
566 mrb_int nregs
= ci_bidx(ci
) + 1; /* self + args + kargs + blk */
568 if (p
&& !MRB_PROC_CFUNC_P(p
) && p
->body
.irep
&& p
->body
.irep
->nregs
> nregs
) {
569 return p
->body
.irep
->nregs
;
574 mrb_value
mrb_obj_missing(mrb_state
*mrb
, mrb_value mod
);
577 prepare_missing(mrb_state
*mrb
, mrb_callinfo
*ci
, mrb_value recv
, mrb_sym mid
, mrb_value blk
, mrb_bool super
)
579 mrb_sym missing
= MRB_SYM(method_missing
);
580 mrb_value
*argv
= &ci
->stack
[1];
584 /* pack positional arguments */
585 if (ci
->n
== 15) args
= argv
[0];
586 else args
= mrb_ary_new_from_values(mrb
, ci
->n
, argv
);
588 if (mrb_func_basic_p(mrb
, recv
, missing
, mrb_obj_missing
)) {
590 if (super
) mrb_no_method_error(mrb
, mid
, args
, "no superclass method '%n'", mid
);
591 else mrb_method_missing(mrb
, mid
, recv
, args
);
594 if (mid
!= missing
) {
595 ci
->u
.target_class
= mrb_class(mrb
, recv
);
597 m
= mrb_vm_find_method(mrb
, ci
->u
.target_class
, &ci
->u
.target_class
, missing
);
598 if (MRB_METHOD_UNDEF_P(m
)) goto method_missing
; /* just in case */
599 stack_extend(mrb
, 4);
601 argv
= &ci
->stack
[1]; /* maybe reallocated */
607 mrb_assert(ci
->nk
== 15);
608 argv
[1] = argv
[ci
->n
];
611 ci
->n
= CALL_MAXARGS
;
612 /* ci->nk is already set to zero or CALL_MAXARGS */
613 mrb_ary_unshift(mrb
, args
, mrb_symbol_value(mid
));
619 funcall_args_capture(mrb_state
*mrb
, int stoff
, mrb_int argc
, const mrb_value
*argv
, mrb_value block
, mrb_callinfo
*ci
)
621 if (argc
< 0 || argc
> INT32_MAX
) {
622 mrb_raisef(mrb
, E_ARGUMENT_ERROR
, "negative or too big argc for funcall (%i)", argc
);
625 ci
->nk
= 0; /* funcall does not support keyword arguments */
626 if (argc
< CALL_MAXARGS
) {
627 mrb_int extends
= stoff
+ argc
+ 2 /* self + block */;
628 stack_extend_adjust(mrb
, extends
, &argv
);
630 mrb_value
*args
= mrb
->c
->ci
->stack
+ stoff
+ 1 /* self */;
631 stack_copy(args
, argv
, argc
);
633 ci
->n
= (uint8_t)argc
;
636 int extends
= stoff
+ 3 /* self + splat + block */;
637 stack_extend_adjust(mrb
, extends
, &argv
);
639 mrb_value
*args
= mrb
->c
->ci
->stack
+ stoff
+ 1 /* self */;
640 args
[0] = mrb_ary_new_from_values(mrb
, argc
, argv
);
642 ci
->n
= CALL_MAXARGS
;
646 static inline mrb_value
647 ensure_block(mrb_state
*mrb
, mrb_value blk
)
649 if (!mrb_nil_p(blk
) && !mrb_proc_p(blk
)) {
650 blk
= mrb_type_convert(mrb
, blk
, MRB_TT_PROC
, MRB_SYM(to_proc
));
651 /* The stack might have been reallocated during mrb_type_convert(), see #3622 */
657 mrb_funcall_with_block(mrb_state
*mrb
, mrb_value self
, mrb_sym mid
, mrb_int argc
, const mrb_value
*argv
, mrb_value blk
)
660 int ai
= mrb_gc_arena_save(mrb
);
663 struct mrb_jmpbuf c_jmp
;
664 ptrdiff_t nth_ci
= mrb
->c
->ci
- mrb
->c
->cibase
;
669 val
= mrb_funcall_with_block(mrb
, self
, mid
, argc
, argv
, blk
);
672 MRB_CATCH(&c_jmp
) { /* error */
673 while (nth_ci
< (mrb
->c
->ci
- mrb
->c
->cibase
)) {
677 val
= mrb_obj_value(mrb
->exc
);
684 mrb_callinfo
*ci
= mrb
->c
->ci
;
685 mrb_int n
= mrb_ci_nregs(ci
);
687 if (!mrb
->c
->stbase
) {
690 if (ci
- mrb
->c
->cibase
> MRB_CALL_LEVEL_MAX
) {
691 mrb_exc_raise(mrb
, mrb_obj_value(mrb
->stack_err
));
693 blk
= ensure_block(mrb
, blk
);
694 ci
= cipush(mrb
, n
, CINFO_DIRECT
, NULL
, NULL
, BLK_PTR(blk
), 0, 0);
695 funcall_args_capture(mrb
, 0, argc
, argv
, blk
, ci
);
696 ci
->u
.target_class
= mrb_class(mrb
, self
);
697 m
= mrb_vm_find_method(mrb
, ci
->u
.target_class
, &ci
->u
.target_class
, mid
);
698 if (MRB_METHOD_UNDEF_P(m
)) {
699 m
= prepare_missing(mrb
, ci
, self
, mid
, mrb_nil_value(), FALSE
);
704 ci
->proc
= MRB_METHOD_PROC_P(m
) ? MRB_METHOD_PROC(m
) : NULL
;
706 if (MRB_METHOD_CFUNC_P(m
)) {
708 val
= MRB_METHOD_CFUNC(m
)(mrb
, self
);
713 if (MRB_PROC_ALIAS_P(ci
->proc
)) {
714 ci
->mid
= ci
->proc
->body
.mid
;
715 ci
->proc
= ci
->proc
->upper
;
717 ci
->cci
= CINFO_SKIP
;
718 val
= mrb_run(mrb
, ci
->proc
, self
);
721 mrb_gc_arena_restore(mrb
, ai
);
722 mrb_gc_protect(mrb
, val
);
727 mrb_funcall_argv(mrb_state
*mrb
, mrb_value self
, mrb_sym mid
, mrb_int argc
, const mrb_value
*argv
)
729 return mrb_funcall_with_block(mrb
, self
, mid
, argc
, argv
, mrb_nil_value());
733 check_method_noarg(mrb_state
*mrb
, const mrb_callinfo
*ci
)
735 mrb_int argc
= ci
->n
== CALL_MAXARGS
? RARRAY_LEN(ci
->stack
[1]) : ci
->n
;
737 mrb_value kdict
= ci
->stack
[mrb_ci_kidx(ci
)];
738 if (!(mrb_hash_p(kdict
) && mrb_hash_empty_p(mrb
, kdict
))) {
743 mrb_argnum_error(mrb
, argc
, 0, 0);
748 exec_irep(mrb_state
*mrb
, mrb_value self
, const struct RProc
*p
)
750 mrb_callinfo
*ci
= mrb
->c
->ci
;
755 if (MRB_PROC_ALIAS_P(p
)) {
756 ci
->mid
= p
->body
.mid
;
760 if (MRB_PROC_CFUNC_P(p
)) {
761 if (MRB_PROC_NOARG_P(p
) && (ci
->n
> 0 || ci
->nk
> 0)) {
762 check_method_noarg(mrb
, ci
);
764 return MRB_PROC_CFUNC(p
)(mrb
, self
);
766 nregs
= p
->body
.irep
->nregs
;
767 keep
= ci_bidx(ci
)+1;
769 stack_extend(mrb
, keep
);
772 stack_extend(mrb
, nregs
);
773 stack_clear(ci
->stack
+keep
, nregs
-keep
);
776 cipush(mrb
, 0, 0, NULL
, NULL
, NULL
, 0, 0);
782 mrb_exec_irep(mrb_state
*mrb
, mrb_value self
, struct RProc
*p
)
784 mrb_callinfo
*ci
= mrb
->c
->ci
;
785 if (ci
->cci
== CINFO_NONE
) {
786 return exec_irep(mrb
, self
, p
);
790 if (MRB_PROC_CFUNC_P(p
)) {
791 if (MRB_PROC_NOARG_P(p
) && (ci
->n
> 0 || ci
->nk
> 0)) {
792 check_method_noarg(mrb
, ci
);
794 cipush(mrb
, 0, CINFO_DIRECT
, CI_TARGET_CLASS(ci
), p
, NULL
, ci
->mid
, ci
->n
|(ci
->nk
<<4));
795 ret
= MRB_PROC_CFUNC(p
)(mrb
, self
);
799 mrb_int keep
= ci_bidx(ci
) + 1; /* receiver + block */
800 ret
= mrb_top_run(mrb
, p
, self
, keep
);
802 if (mrb
->exc
&& mrb
->jmp
) {
803 mrb_exc_raise(mrb
, mrb_obj_value(mrb
->exc
));
813 * obj.send(symbol [, args...]) -> obj
814 * obj.__send__(symbol [, args...]) -> obj
816 * Invokes the method identified by _symbol_, passing it any
817 * arguments specified. You can use <code>__send__</code> if the name
818 * +send+ clashes with an existing method in _obj_.
822 * "Hello " + args.join(' ')
826 * k.send :hello, "gentle", "readers" #=> "Hello gentle readers"
829 mrb_f_send(mrb_state
*mrb
, mrb_value self
)
832 mrb_value block
, *regs
;
835 mrb_callinfo
*ci
= mrb
->c
->ci
;
838 if (ci
->cci
> CINFO_NONE
) {
840 const mrb_value
*argv
;
842 mrb_get_args(mrb
, "n*&", &name
, &argv
, &argc
, &block
);
843 return mrb_funcall_with_block(mrb
, self
, name
, argc
, argv
, block
);
846 regs
= mrb
->c
->ci
->stack
+1;
850 mrb_argnum_error(mrb
, 0, 1, -1);
853 if (RARRAY_LEN(regs
[0]) == 0) goto argnum_error
;
854 name
= mrb_obj_to_sym(mrb
, RARRAY_PTR(regs
[0])[0]);
857 name
= mrb_obj_to_sym(mrb
, regs
[0]);
860 c
= mrb_class(mrb
, self
);
861 m
= mrb_vm_find_method(mrb
, c
, &c
, name
);
862 if (MRB_METHOD_UNDEF_P(m
)) { /* call method_mising */
867 ci
->u
.target_class
= c
;
868 /* remove first symbol from arguments */
869 if (n
== 15) { /* variable length arguments */
870 regs
[0] = mrb_ary_subseq(mrb
, regs
[0], 1, RARRAY_LEN(regs
[0]) - 1);
873 for (int i
=0; i
<n
; i
++) {
876 regs
[n
] = regs
[n
+1]; /* copy kdict or block */
878 regs
[n
+1] = regs
[n
+2]; /* copy block */
883 const struct RProc
*p
;
884 if (MRB_METHOD_PROC_P(m
)) {
885 p
= MRB_METHOD_PROC(m
);
887 if (MRB_PROC_ALIAS_P(p
)) {
888 ci
->mid
= p
->body
.mid
;
893 if (MRB_METHOD_CFUNC_P(m
)) {
894 if (MRB_METHOD_NOARG_P(m
) && (ci
->n
> 0 || ci
->nk
> 0)) {
895 check_method_noarg(mrb
, ci
);
897 return MRB_METHOD_CFUNC(m
)(mrb
, self
);
899 return exec_irep(mrb
, self
, p
);
903 check_block(mrb_state
*mrb
, mrb_value blk
)
905 if (mrb_nil_p(blk
)) {
906 mrb_raise(mrb
, E_ARGUMENT_ERROR
, "no block given");
908 if (!mrb_proc_p(blk
)) {
909 mrb_raise(mrb
, E_TYPE_ERROR
, "not a block");
914 eval_under(mrb_state
*mrb
, mrb_value self
, mrb_value blk
, struct RClass
*c
)
920 check_block(mrb
, blk
);
922 if (ci
->cci
== CINFO_DIRECT
) {
923 return mrb_yield_with_class(mrb
, blk
, 1, &self
, self
, c
);
925 ci
->u
.target_class
= c
;
926 p
= mrb_proc_ptr(blk
);
927 /* just in case irep is NULL; #6065 */
928 if (p
->body
.irep
== NULL
) return mrb_nil_value();
932 ci
->mid
= ci
[-1].mid
;
933 if (MRB_PROC_CFUNC_P(p
)) {
934 stack_extend(mrb
, 4);
935 mrb
->c
->ci
->stack
[0] = self
;
936 mrb
->c
->ci
->stack
[1] = self
;
937 mrb
->c
->ci
->stack
[2] = mrb_nil_value();
938 return MRB_PROC_CFUNC(p
)(mrb
, self
);
940 nregs
= p
->body
.irep
->nregs
;
941 if (nregs
< 4) nregs
= 4;
942 stack_extend(mrb
, nregs
);
943 mrb
->c
->ci
->stack
[0] = self
;
944 mrb
->c
->ci
->stack
[1] = self
;
945 stack_clear(mrb
->c
->ci
->stack
+2, nregs
-2);
946 ci
= cipush(mrb
, 0, 0, NULL
, NULL
, NULL
, 0, 0);
954 * mod.class_eval {| | block } -> obj
955 * mod.module_eval {| | block } -> obj
957 * Evaluates block in the context of _mod_. This can
958 * be used to add methods to a class. <code>module_eval</code> returns
959 * the result of evaluating its argument.
962 mrb_mod_module_eval(mrb_state
*mrb
, mrb_value mod
)
966 if (mrb_get_args(mrb
, "|S&", &a
, &b
) == 1) {
967 mrb_raise(mrb
, E_NOTIMP_ERROR
, "module_eval/class_eval with string not implemented");
969 return eval_under(mrb
, mod
, b
, mrb_class_ptr(mod
));
975 * obj.instance_eval {| | block } -> obj
977 * Evaluates the given block,within the context of the receiver (_obj_).
978 * In order to set the context, the variable +self+ is set to _obj_ while
979 * the code is executing, giving the code access to _obj_'s
980 * instance variables. In the version of <code>instance_eval</code>
981 * that takes a +String+, the optional second and third
982 * parameters supply a filename and starting line number that are used
983 * when reporting compilation errors.
985 * class KlassWithSecret
990 * k = KlassWithSecret.new
991 * k.instance_eval { @secret } #=> 99
994 mrb_obj_instance_eval(mrb_state
*mrb
, mrb_value self
)
998 if (mrb_get_args(mrb
, "|S&", &a
, &b
) == 1) {
999 mrb_raise(mrb
, E_NOTIMP_ERROR
, "instance_eval with string not implemented");
1001 return eval_under(mrb
, self
, b
, mrb_singleton_class_ptr(mrb
, self
));
1005 mrb_yield_with_class(mrb_state
*mrb
, mrb_value b
, mrb_int argc
, const mrb_value
*argv
, mrb_value self
, struct RClass
*c
)
1013 check_block(mrb
, b
);
1015 n
= mrb_ci_nregs(ci
);
1016 p
= mrb_proc_ptr(b
);
1017 if (MRB_PROC_ENV_P(p
)) {
1018 mid
= p
->e
.env
->mid
;
1023 ci
= cipush(mrb
, n
, CINFO_DIRECT
, NULL
, NULL
, NULL
, mid
, 0);
1024 funcall_args_capture(mrb
, 0, argc
, argv
, mrb_nil_value(), ci
);
1025 ci
->u
.target_class
= c
;
1028 if (MRB_PROC_CFUNC_P(p
)) {
1029 ci
->stack
[0] = self
;
1030 val
= MRB_PROC_CFUNC(p
)(mrb
, self
);
1034 ci
->cci
= CINFO_SKIP
;
1035 val
= mrb_run(mrb
, p
, self
);
1041 mrb_yield_argv(mrb_state
*mrb
, mrb_value b
, mrb_int argc
, const mrb_value
*argv
)
1043 struct RProc
*p
= mrb_proc_ptr(b
);
1045 mrb_value self
= mrb_proc_get_self(mrb
, p
, &tc
);
1047 return mrb_yield_with_class(mrb
, b
, argc
, argv
, self
, tc
);
1051 mrb_yield(mrb_state
*mrb
, mrb_value b
, mrb_value arg
)
1053 struct RProc
*p
= mrb_proc_ptr(b
);
1055 mrb_value self
= mrb_proc_get_self(mrb
, p
, &tc
);
1057 return mrb_yield_with_class(mrb
, b
, 1, &arg
, self
, tc
);
1061 mrb_yield_cont(mrb_state
*mrb
, mrb_value b
, mrb_value self
, mrb_int argc
, const mrb_value
*argv
)
1066 check_block(mrb
, b
);
1067 p
= mrb_proc_ptr(b
);
1070 stack_extend_adjust(mrb
, 4, &argv
);
1071 mrb
->c
->ci
->stack
[1] = mrb_ary_new_from_values(mrb
, argc
, argv
);
1072 mrb
->c
->ci
->stack
[2] = mrb_nil_value();
1073 mrb
->c
->ci
->stack
[3] = mrb_nil_value();
1076 return exec_irep(mrb
, self
, p
);
1079 #define RBREAK_TAG_FOREACH(f) \
1080 f(RBREAK_TAG_BREAK, 0) \
1081 f(RBREAK_TAG_JUMP, 1) \
1082 f(RBREAK_TAG_STOP, 2)
1084 #define RBREAK_TAG_DEFINE(tag, i) tag = i,
1086 RBREAK_TAG_FOREACH(RBREAK_TAG_DEFINE
)
1088 #undef RBREAK_TAG_DEFINE
1090 #define RBREAK_TAG_BIT 3
1091 #define RBREAK_TAG_BIT_OFF 8
1092 #define RBREAK_TAG_MASK (~(~UINT32_C(0) << RBREAK_TAG_BIT))
1094 static inline uint32_t
1095 mrb_break_tag_get(struct RBreak
*brk
)
1097 return (brk
->flags
>> RBREAK_TAG_BIT_OFF
) & RBREAK_TAG_MASK
;
1101 mrb_break_tag_set(struct RBreak
*brk
, uint32_t tag
)
1103 brk
->flags
&= ~(RBREAK_TAG_MASK
<< RBREAK_TAG_BIT_OFF
);
1104 brk
->flags
|= (tag
& RBREAK_TAG_MASK
) << RBREAK_TAG_BIT_OFF
;
1107 static struct RBreak
*
1108 break_new(mrb_state
*mrb
, uint32_t tag
, const mrb_callinfo
*return_ci
, mrb_value val
)
1110 mrb_assert((size_t)(return_ci
- mrb
->c
->cibase
) <= (size_t)(mrb
->c
->ci
- mrb
->c
->cibase
));
1112 struct RBreak
*brk
= MRB_OBJ_ALLOC(mrb
, MRB_TT_BREAK
, NULL
);
1113 brk
->ci_break_index
= return_ci
- mrb
->c
->cibase
;
1114 mrb_break_value_set(brk
, val
);
1115 mrb_break_tag_set(brk
, tag
);
1120 #define MRB_CATCH_FILTER_RESCUE (UINT32_C(1) << MRB_CATCH_RESCUE)
1121 #define MRB_CATCH_FILTER_ENSURE (UINT32_C(1) << MRB_CATCH_ENSURE)
1122 #define MRB_CATCH_FILTER_ALL (MRB_CATCH_FILTER_RESCUE | MRB_CATCH_FILTER_ENSURE)
1124 static const struct mrb_irep_catch_handler
*
1125 catch_handler_find(const mrb_irep
*irep
, const mrb_code
*pc
, uint32_t filter
)
1129 const struct mrb_irep_catch_handler
*e
;
1131 /* The comparison operators use `>` and `<=` because pc already points to the next instruction */
1132 #define catch_cover_p(pc, beg, end) ((pc) > (ptrdiff_t)(beg) && (pc) <= (ptrdiff_t)(end))
1134 mrb_assert(irep
&& irep
->clen
> 0);
1135 xpc
= pc
- irep
->iseq
;
1136 /* If it retry at the top level, pc will be 0, so check with -1 as the start position */
1137 mrb_assert(catch_cover_p(xpc
, -1, irep
->ilen
));
1138 if (!catch_cover_p(xpc
, -1, irep
->ilen
)) return NULL
;
1140 /* Currently uses a simple linear search to avoid processing complexity. */
1142 e
= mrb_irep_catch_handler_table(irep
) + cnt
- 1;
1143 for (; cnt
> 0; cnt
--, e
--) {
1144 if (((UINT32_C(1) << e
->type
) & filter
) &&
1145 catch_cover_p(xpc
, mrb_irep_catch_handler_unpack(e
->begin
), mrb_irep_catch_handler_unpack(e
->end
))) {
1150 #undef catch_cover_p
1156 LOCALJUMP_ERROR_RETURN
= 0,
1157 LOCALJUMP_ERROR_BREAK
= 1,
1158 LOCALJUMP_ERROR_YIELD
= 2
1159 } localjump_error_kind
;
1162 localjump_error(mrb_state
*mrb
, localjump_error_kind kind
)
1164 char kind_str
[3][7] = { "return", "break", "yield" };
1165 char kind_str_len
[] = { 6, 5, 5 };
1166 static const char lead
[] = "unexpected ";
1170 msg
= mrb_str_new_capa(mrb
, sizeof(lead
) + 7);
1171 mrb_str_cat(mrb
, msg
, lead
, sizeof(lead
) - 1);
1172 mrb_str_cat(mrb
, msg
, kind_str
[kind
], kind_str_len
[kind
]);
1173 exc
= mrb_exc_new_str(mrb
, E_LOCALJUMP_ERROR
, msg
);
1174 mrb_exc_set(mrb
, exc
);
1177 #define RAISE_EXC(mrb, exc) do { \
1178 mrb_value exc_value = (exc); \
1179 mrb_exc_set(mrb, exc_value); \
1183 #define RAISE_LIT(mrb, c, str) RAISE_EXC(mrb, mrb_exc_new_lit(mrb, c, str))
1184 #define RAISE_FORMAT(mrb, c, fmt, ...) RAISE_EXC(mrb, mrb_exc_new_str(mrb, c, mrb_format(mrb, fmt, __VA_ARGS__)))
1187 argnum_error(mrb_state
*mrb
, mrb_int num
)
1191 mrb_int argc
= mrb
->c
->ci
->n
;
1194 mrb_value args
= mrb
->c
->ci
->stack
[1];
1195 if (mrb_array_p(args
)) {
1196 argc
= RARRAY_LEN(args
);
1199 if (argc
== 0 && mrb
->c
->ci
->nk
!= 0 && !mrb_hash_empty_p(mrb
, mrb
->c
->ci
->stack
[1])) {
1202 str
= mrb_format(mrb
, "wrong number of arguments (given %i, expected %i)", argc
, num
);
1203 exc
= mrb_exc_new_str(mrb
, E_ARGUMENT_ERROR
, str
);
1204 mrb_exc_set(mrb
, exc
);
1208 break_tag_p(struct RBreak
*brk
, uint32_t tag
)
1210 return (brk
!= NULL
&& brk
->tt
== MRB_TT_BREAK
) ? TRUE
: FALSE
;
1214 prepare_tagged_break(mrb_state
*mrb
, uint32_t tag
, const mrb_callinfo
*return_ci
, mrb_value val
)
1216 if (break_tag_p((struct RBreak
*)mrb
->exc
, tag
)) {
1217 mrb_break_tag_set((struct RBreak
*)mrb
->exc
, tag
);
1220 mrb
->exc
= (struct RObject
*)break_new(mrb
, tag
, return_ci
, val
);
1224 #define THROW_TAGGED_BREAK(mrb, tag, return_ci, val) \
1226 prepare_tagged_break(mrb, tag, return_ci, val); \
1227 goto L_CATCH_TAGGED_BREAK; \
1230 #define UNWIND_ENSURE(mrb, ci, pc, tag, return_ci, val) \
1232 if ((proc = (ci)->proc) && !MRB_PROC_CFUNC_P(proc) && (irep = proc->body.irep) && irep->clen > 0 && \
1233 (ch = catch_handler_find(irep, pc, MRB_CATCH_FILTER_ENSURE))) { \
1234 THROW_TAGGED_BREAK(mrb, tag, return_ci, val); \
1239 * CHECKPOINT_RESTORE(tag) {
1240 * This part is executed when jumping by the same "tag" of RBreak (it is not executed the first time).
1241 * Write the code required (initialization of variables, etc.) for the subsequent processing.
1243 * CHECKPOINT_MAIN(tag) {
1244 * This part is always executed.
1246 * CHECKPOINT_END(tag);
1250 * // Jump to CHECKPOINT_RESTORE with the same "tag".
1251 * goto CHECKPOINT_LABEL_MAKE(tag);
1254 #define CHECKPOINT_LABEL_MAKE(tag) L_CHECKPOINT_ ## tag
1256 #define CHECKPOINT_RESTORE(tag) \
1259 CHECKPOINT_LABEL_MAKE(tag): \
1262 #define CHECKPOINT_MAIN(tag) \
1267 #define CHECKPOINT_END(tag) \
1271 #ifdef MRB_USE_DEBUG_HOOK
1272 #define CODE_FETCH_HOOK(mrb, irep, pc, regs) if ((mrb)->code_fetch_hook) (mrb)->code_fetch_hook((mrb), (irep), (pc), (regs));
1274 #define CODE_FETCH_HOOK(mrb, irep, pc, regs)
1277 #ifdef MRB_BYTECODE_DECODE_OPTION
1278 #define BYTECODE_DECODER(x) ((mrb)->bytecode_decoder)?(mrb)->bytecode_decoder((mrb), (x)):(x)
1280 #define BYTECODE_DECODER(x) (x)
1283 #ifndef MRB_USE_VM_SWITCH_DISPATCH
1284 #if !defined __GNUC__ && !defined __clang__ && !defined __INTEL_COMPILER
1285 #define MRB_USE_VM_SWITCH_DISPATCH
1287 #endif /* ifndef MRB_USE_VM_SWITCH_DISPATCH */
1289 #ifdef MRB_USE_VM_SWITCH_DISPATCH
1291 #define INIT_DISPATCH for (;;) { insn = BYTECODE_DECODER(*pc); CODE_FETCH_HOOK(mrb, irep, pc, regs); switch (insn) {
1292 #define CASE(insn,ops) case insn: pc++; FETCH_ ## ops (); mrb->c->ci->pc = pc; L_ ## insn ## _BODY:
1293 #define NEXT goto L_END_DISPATCH
1295 #define END_DISPATCH L_END_DISPATCH:;}}
1299 #define INIT_DISPATCH JUMP; return mrb_nil_value();
1300 #define CASE(insn,ops) L_ ## insn: pc++; FETCH_ ## ops (); mrb->c->ci->pc = pc; L_ ## insn ## _BODY:
1301 #define NEXT insn=BYTECODE_DECODER(*pc); CODE_FETCH_HOOK(mrb, irep, pc, regs); goto *optable[insn]
1304 #define END_DISPATCH
1309 mrb_vm_run(mrb_state
*mrb
, const struct RProc
*proc
, mrb_value self
, mrb_int stack_keep
)
1311 const mrb_irep
*irep
= proc
->body
.irep
;
1313 struct mrb_context
*c
= mrb
->c
;
1314 ptrdiff_t cioff
= c
->ci
- c
->cibase
;
1315 mrb_int nregs
= irep
->nregs
;
1320 if (stack_keep
> nregs
)
1323 struct REnv
*e
= CI_ENV(mrb
->c
->ci
);
1324 if (e
&& (stack_keep
== 0 || irep
->nlocals
< MRB_ENV_LEN(e
))) {
1325 ci_env_set(mrb
->c
->ci
, NULL
);
1326 mrb_env_unshare(mrb
, e
, FALSE
);
1329 stack_extend(mrb
, nregs
);
1330 stack_clear(c
->ci
->stack
+ stack_keep
, nregs
- stack_keep
);
1331 c
->ci
->stack
[0] = self
;
1332 result
= mrb_vm_exec(mrb
, proc
, irep
->iseq
);
1335 mrb_write_barrier(mrb
, (struct RBasic
*)mrb
->c
->fib
);
1339 else if (c
->ci
- c
->cibase
> cioff
) {
1340 c
->ci
= c
->cibase
+ cioff
;
1345 static struct RClass
*
1346 check_target_class(mrb_state
*mrb
)
1348 struct RClass
*target
= CI_TARGET_CLASS(mrb
->c
->ci
);
1350 mrb_raise(mrb
, E_TYPE_ERROR
, "no class/module to add method");
1355 #define regs (mrb->c->ci->stack)
1358 hash_new_from_regs(mrb_state
*mrb
, mrb_int argc
, mrb_int idx
)
1360 mrb_value hash
= mrb_hash_new_capa(mrb
, argc
);
1362 mrb_hash_set(mrb
, hash
, regs
[idx
+0], regs
[idx
+1]);
1368 #define ary_new_from_regs(mrb, argc, idx) mrb_ary_new_from_values(mrb, (argc), ®s[idx]);
1371 mrb_vm_exec(mrb_state
*mrb
, const struct RProc
*proc
, const mrb_code
*pc
)
1373 /* mrb_assert(MRB_PROC_CFUNC_P(proc)) */
1374 const mrb_irep
*irep
= proc
->body
.irep
;
1375 const mrb_pool_value
*pool
= irep
->pool
;
1376 const mrb_sym
*syms
= irep
->syms
;
1378 int ai
= mrb_gc_arena_save(mrb
);
1379 struct mrb_jmpbuf
*prev_jmp
= mrb
->jmp
;
1380 struct mrb_jmpbuf c_jmp
;
1385 const struct mrb_irep_catch_handler
*ch
;
1387 #ifndef MRB_USE_VM_SWITCH_DISPATCH
1388 static const void * const optable
[] = {
1389 #define OPCODE(x,_) &&L_OP_ ## x,
1390 #include "mruby/ops.h"
1395 mrb_bool exc_catched
= FALSE
;
1401 exc_catched
= FALSE
;
1402 mrb_gc_arena_restore(mrb
, ai
);
1403 if (mrb
->exc
&& mrb
->exc
->tt
== MRB_TT_BREAK
)
1408 CI_PROC_SET(mrb
->c
->ci
, proc
);
1421 CASE(OP_LOADL
, BB
) {
1422 switch (pool
[b
].tt
) { /* number */
1424 regs
[a
] = mrb_int_value(mrb
, (mrb_int
)pool
[b
].u
.i32
);
1427 #if defined(MRB_INT64)
1428 regs
[a
] = mrb_int_value(mrb
, (mrb_int
)pool
[b
].u
.i64
);
1431 #if defined(MRB_64BIT)
1432 if (INT32_MIN
<= pool
[b
].u
.i64
&& pool
[b
].u
.i64
<= INT32_MAX
) {
1433 regs
[a
] = mrb_int_value(mrb
, (mrb_int
)pool
[b
].u
.i64
);
1437 goto L_INT_OVERFLOW
;
1439 case IREP_TT_BIGINT
:
1440 #ifdef MRB_USE_BIGINT
1442 const char *s
= pool
[b
].u
.str
;
1443 regs
[a
] = mrb_bint_new_str(mrb
, s
+2, (uint8_t)s
[0], s
[1]);
1447 goto L_INT_OVERFLOW
;
1449 #ifndef MRB_NO_FLOAT
1451 regs
[a
] = mrb_float_value(mrb
, pool
[b
].u
.f
);
1455 /* should not happen (tt:string) */
1456 regs
[a
] = mrb_nil_value();
1462 CASE(OP_LOADI
, BB
) {
1463 SET_FIXNUM_VALUE(regs
[a
], b
);
1467 CASE(OP_LOADINEG
, BB
) {
1468 SET_FIXNUM_VALUE(regs
[a
], -b
);
1472 CASE(OP_LOADI__1
,B
) goto L_LOADI
;
1473 CASE(OP_LOADI_0
,B
) goto L_LOADI
;
1474 CASE(OP_LOADI_1
,B
) goto L_LOADI
;
1475 CASE(OP_LOADI_2
,B
) goto L_LOADI
;
1476 CASE(OP_LOADI_3
,B
) goto L_LOADI
;
1477 CASE(OP_LOADI_4
,B
) goto L_LOADI
;
1478 CASE(OP_LOADI_5
,B
) goto L_LOADI
;
1479 CASE(OP_LOADI_6
,B
) goto L_LOADI
;
1480 CASE(OP_LOADI_7
, B
) {
1482 SET_FIXNUM_VALUE(regs
[a
], (mrb_int
)insn
- (mrb_int
)OP_LOADI_0
);
1486 CASE(OP_LOADI16
, BS
) {
1487 SET_FIXNUM_VALUE(regs
[a
], (mrb_int
)(int16_t)b
);
1491 CASE(OP_LOADI32
, BSS
) {
1492 SET_INT_VALUE(mrb
, regs
[a
], (int32_t)(((uint32_t)b
<<16)+c
));
1496 CASE(OP_LOADSYM
, BB
) {
1497 SET_SYM_VALUE(regs
[a
], syms
[b
]);
1501 CASE(OP_LOADNIL
, B
) {
1502 SET_NIL_VALUE(regs
[a
]);
1506 CASE(OP_LOADSELF
, B
) {
1512 SET_TRUE_VALUE(regs
[a
]);
1517 SET_FALSE_VALUE(regs
[a
]);
1521 CASE(OP_GETGV
, BB
) {
1522 mrb_value val
= mrb_gv_get(mrb
, syms
[b
]);
1527 CASE(OP_SETGV
, BB
) {
1528 mrb_gv_set(mrb
, syms
[b
], regs
[a
]);
1532 CASE(OP_GETSV
, BB
) {
1533 mrb_value val
= mrb_vm_special_get(mrb
, syms
[b
]);
1538 CASE(OP_SETSV
, BB
) {
1539 mrb_vm_special_set(mrb
, syms
[b
], regs
[a
]);
1543 CASE(OP_GETIV
, BB
) {
1544 regs
[a
] = mrb_iv_get(mrb
, regs
[0], syms
[b
]);
1548 CASE(OP_SETIV
, BB
) {
1549 mrb_iv_set(mrb
, regs
[0], syms
[b
], regs
[a
]);
1553 CASE(OP_GETCV
, BB
) {
1555 val
= mrb_vm_cv_get(mrb
, syms
[b
]);
1560 CASE(OP_SETCV
, BB
) {
1561 mrb_vm_cv_set(mrb
, syms
[b
], regs
[a
]);
1565 CASE(OP_GETIDX
, B
) {
1566 mrb_value va
= regs
[a
], vb
= regs
[a
+1];
1567 switch (mrb_type(va
)) {
1569 if (!mrb_integer_p(vb
)) goto getidx_fallback
;
1571 mrb_int idx
= mrb_integer(vb
);
1572 if (0 <= idx
&& idx
< RARRAY_LEN(va
)) {
1573 regs
[a
] = RARRAY_PTR(va
)[idx
];
1576 regs
[a
] = mrb_ary_entry(va
, idx
);
1581 va
= mrb_hash_get(mrb
, va
, vb
);
1585 switch (mrb_type(vb
)) {
1586 case MRB_TT_INTEGER
:
1589 va
= mrb_str_aref(mrb
, va
, vb
, mrb_undef_value());
1593 goto getidx_fallback
;
1598 mid
= MRB_OPSYM(aref
);
1604 CASE(OP_SETIDX
, B
) {
1606 mid
= MRB_OPSYM(aset
);
1607 SET_NIL_VALUE(regs
[a
+3]);
1611 CASE(OP_GETCONST
, BB
) {
1612 mrb_value v
= mrb_vm_const_get(mrb
, syms
[b
]);
1617 CASE(OP_SETCONST
, BB
) {
1618 mrb_vm_const_set(mrb
, syms
[b
], regs
[a
]);
1622 CASE(OP_GETMCNST
, BB
) {
1623 mrb_value v
= mrb_const_get(mrb
, regs
[a
], syms
[b
]);
1628 CASE(OP_SETMCNST
, BB
) {
1629 mrb_const_set(mrb
, regs
[a
+1], syms
[b
], regs
[a
]);
1633 CASE(OP_GETUPVAR
, BBB
) {
1634 struct REnv
*e
= uvenv(mrb
, c
);
1636 if (e
&& b
< MRB_ENV_LEN(e
)) {
1637 regs
[a
] = e
->stack
[b
];
1640 regs
[a
] = mrb_nil_value();
1645 CASE(OP_SETUPVAR
, BBB
) {
1646 struct REnv
*e
= uvenv(mrb
, c
);
1649 if (b
< MRB_ENV_LEN(e
)) {
1650 e
->stack
[b
] = regs
[a
];
1651 mrb_write_barrier(mrb
, (struct RBasic
*)e
);
1661 CASE(OP_JMPIF
, BS
) {
1662 if (mrb_test(regs
[a
])) {
1668 CASE(OP_JMPNOT
, BS
) {
1669 if (!mrb_test(regs
[a
])) {
1675 CASE(OP_JMPNIL
, BS
) {
1676 if (mrb_nil_p(regs
[a
])) {
1684 a
= (uint32_t)((pc
- irep
->iseq
) + (int16_t)a
);
1685 CHECKPOINT_RESTORE(RBREAK_TAG_JUMP
) {
1686 struct RBreak
*brk
= (struct RBreak
*)mrb
->exc
;
1687 mrb_value target
= mrb_break_value_get(brk
);
1688 mrb_assert(mrb_integer_p(target
));
1689 a
= (uint32_t)mrb_integer(target
);
1690 mrb_assert(a
>= 0 && a
< irep
->ilen
);
1692 CHECKPOINT_MAIN(RBREAK_TAG_JUMP
) {
1693 if (irep
->clen
> 0 &&
1694 (ch
= catch_handler_find(irep
, pc
, MRB_CATCH_FILTER_ENSURE
))) {
1695 /* avoiding a jump from a catch handler into the same handler */
1696 if (a
< mrb_irep_catch_handler_unpack(ch
->begin
) || a
>= mrb_irep_catch_handler_unpack(ch
->end
)) {
1697 THROW_TAGGED_BREAK(mrb
, RBREAK_TAG_JUMP
, mrb
->c
->ci
, mrb_fixnum_value(a
));
1701 CHECKPOINT_END(RBREAK_TAG_JUMP
);
1703 mrb
->exc
= NULL
; /* clear break object */
1704 pc
= irep
->iseq
+ a
;
1708 CASE(OP_EXCEPT
, B
) {
1711 if (mrb
->exc
== NULL
) {
1712 exc
= mrb_nil_value();
1715 switch (mrb
->exc
->tt
) {
1717 case MRB_TT_EXCEPTION
:
1718 exc
= mrb_obj_value(mrb
->exc
);
1721 mrb_assert(!"bad mrb_type");
1722 exc
= mrb_nil_value();
1730 CASE(OP_RESCUE
, BB
) {
1731 mrb_value exc
= regs
[a
]; /* exc on stack */
1732 mrb_value e
= regs
[b
];
1735 switch (mrb_type(e
)) {
1740 RAISE_LIT(mrb
, E_TYPE_ERROR
, "class or module required for rescue clause");
1742 ec
= mrb_class_ptr(e
);
1743 regs
[b
] = mrb_bool_value(mrb_obj_is_kind_of(mrb
, exc
, ec
));
1747 CASE(OP_RAISEIF
, B
) {
1750 if (mrb_nil_p(exc
)) {
1753 else if (mrb_break_p(exc
)) {
1755 mrb
->exc
= mrb_obj_ptr(exc
);
1757 brk
= (struct RBreak
*)mrb
->exc
;
1758 switch (mrb_break_tag_get(brk
)) {
1759 #define DISPATCH_CHECKPOINTS(n, i) case n: goto CHECKPOINT_LABEL_MAKE(n);
1760 RBREAK_TAG_FOREACH(DISPATCH_CHECKPOINTS
)
1761 #undef DISPATCH_CHECKPOINTS
1763 mrb_assert(!"wrong break tag");
1768 mrb_exc_set(mrb
, exc
);
1771 while (!(proc
= ci
->proc
) || MRB_PROC_CFUNC_P(ci
->proc
) || !(irep
= proc
->body
.irep
) || irep
->clen
< 1 ||
1772 (ch
= catch_handler_find(irep
, ci
->pc
, MRB_CATCH_FILTER_ALL
)) == NULL
) {
1773 if (ci
!= mrb
->c
->cibase
) {
1775 if (ci
[1].cci
== CINFO_SKIP
) {
1776 mrb_assert(prev_jmp
!= NULL
);
1777 mrb
->jmp
= prev_jmp
;
1778 MRB_THROW(prev_jmp
);
1781 else if (mrb
->c
== mrb
->root_c
) {
1782 mrb
->c
->ci
->stack
= mrb
->c
->stbase
;
1783 mrb
->jmp
= prev_jmp
;
1784 return mrb_obj_value(mrb
->exc
);
1787 struct mrb_context
*c
= mrb
->c
;
1789 c
->status
= MRB_FIBER_TERMINATED
;
1791 if (!mrb
->c
) mrb
->c
= mrb
->root_c
;
1792 else c
->prev
= NULL
;
1793 if (!c
->vmexec
) goto L_RAISE
;
1794 mrb
->jmp
= prev_jmp
;
1795 if (!prev_jmp
) return mrb_obj_value(mrb
->exc
);
1796 MRB_THROW(prev_jmp
);
1801 L_CATCH_TAGGED_BREAK
: /* from THROW_TAGGED_BREAK() or UNWIND_ENSURE() */
1805 irep
= proc
->body
.irep
;
1808 stack_extend(mrb
, irep
->nregs
);
1809 pc
= irep
->iseq
+ mrb_irep_catch_handler_unpack(ch
->target
);
1814 CASE(OP_SSEND
, BBB
) {
1820 CASE(OP_SSENDB
, BBB
) {
1830 /* push nil after arguments */
1831 SET_NIL_VALUE(regs
[a
+2]);
1841 mrb_value recv
, blk
;
1843 int nk
= (c
>>4)&0xf;
1844 mrb_int bidx
= a
+ mrb_bidx(n
,nk
);
1845 mrb_int new_bidx
= bidx
;
1847 if (nk
== CALL_MAXARGS
) {
1848 mrb_ensure_hash_type(mrb
, regs
[a
+(n
==CALL_MAXARGS
?1:n
)+1]);
1850 else if (nk
> 0) { /* pack keyword arguments */
1851 mrb_int kidx
= a
+(n
==CALL_MAXARGS
?1:n
)+1;
1852 mrb_value kdict
= hash_new_from_regs(mrb
, nk
, kidx
);
1856 new_bidx
= a
+mrb_bidx(n
, nk
);
1859 mrb_assert(bidx
< irep
->nregs
);
1860 if (insn
== OP_SEND
) {
1861 /* clear block argument */
1862 SET_NIL_VALUE(regs
[new_bidx
]);
1866 blk
= ensure_block(mrb
, regs
[bidx
]);
1867 regs
[new_bidx
] = blk
;
1870 ci
= cipush(mrb
, a
, CINFO_DIRECT
, NULL
, NULL
, BLK_PTR(blk
), 0, c
);
1872 ci
->u
.target_class
= (insn
== OP_SUPER
) ? CI_TARGET_CLASS(ci
- 1)->super
: mrb_class(mrb
, recv
);
1873 m
= mrb_vm_find_method(mrb
, ci
->u
.target_class
, &ci
->u
.target_class
, mid
);
1874 if (MRB_METHOD_UNDEF_P(m
)) {
1875 m
= prepare_missing(mrb
, ci
, recv
, mid
, blk
, (insn
== OP_SUPER
));
1880 ci
->cci
= CINFO_NONE
;
1882 if (MRB_METHOD_PROC_P(m
)) {
1883 const struct RProc
*p
= MRB_METHOD_PROC(m
);
1885 if (MRB_PROC_ALIAS_P(p
)) {
1886 ci
->mid
= p
->body
.mid
;
1890 if (!MRB_PROC_CFUNC_P(p
)) {
1891 /* setup environment for calling method */
1893 irep
= proc
->body
.irep
;
1896 stack_extend(mrb
, (irep
->nregs
< 4) ? 4 : irep
->nregs
);
1901 if (MRB_PROC_NOARG_P(p
) && (ci
->n
> 0 || ci
->nk
> 0)) {
1902 check_method_noarg(mrb
, ci
);
1904 recv
= MRB_PROC_CFUNC(p
)(mrb
, recv
);
1908 if (MRB_METHOD_NOARG_P(m
) && (ci
->n
> 0 || ci
->nk
> 0)) {
1909 check_method_noarg(mrb
, ci
);
1911 recv
= MRB_METHOD_FUNC(m
)(mrb
, recv
);
1914 /* cfunc epilogue */
1915 mrb_assert(mrb
->c
->ci
> mrb
->c
->cibase
);
1916 mrb_gc_arena_shrink(mrb
, ai
);
1917 if (mrb
->exc
) goto L_RAISE
;
1919 if (!ci
->u
.keep_context
) { /* return from context modifying method (resume/yield) */
1920 if (ci
->cci
== CINFO_RESUMED
) {
1921 mrb
->jmp
= prev_jmp
;
1925 mrb_assert(!MRB_PROC_CFUNC_P(ci
[-1].proc
));
1927 irep
= proc
->body
.irep
;
1932 ci
->stack
[0] = recv
;
1940 mrb_callinfo
*ci
= mrb
->c
->ci
;
1941 mrb_value recv
= ci
->stack
[0];
1942 const struct RProc
*p
= mrb_proc_ptr(recv
);
1945 if (MRB_PROC_ALIAS_P(p
)) {
1946 ci
->mid
= p
->body
.mid
;
1949 else if (MRB_PROC_ENV_P(p
)) {
1950 ci
->mid
= MRB_PROC_ENV(p
)->mid
;
1952 /* replace callinfo */
1953 ci
->u
.target_class
= MRB_PROC_TARGET_CLASS(p
);
1957 if (MRB_PROC_CFUNC_P(p
)) {
1958 recv
= MRB_PROC_CFUNC(p
)(mrb
, recv
);
1959 mrb_gc_arena_shrink(mrb
, ai
);
1960 if (mrb
->exc
) goto L_RAISE
;
1964 ci
[1].stack
[0] = recv
;
1965 irep
= mrb
->c
->ci
->proc
->body
.irep
;
1968 /* setup environment for calling method */
1970 irep
= p
->body
.irep
;
1972 mrb
->c
->ci
->stack
[0] = mrb_nil_value();
1974 goto L_OP_RETURN_BODY
;
1976 mrb_int nargs
= ci_bidx(ci
)+1;
1977 if (nargs
< irep
->nregs
) {
1978 stack_extend(mrb
, irep
->nregs
);
1979 stack_clear(regs
+nargs
, irep
->nregs
-nargs
);
1981 if (MRB_PROC_ENV_P(p
)) {
1982 regs
[0] = MRB_PROC_ENV(p
)->stack
[0];
1991 CASE(OP_SUPER
, BB
) {
1992 mrb_callinfo
*ci
= mrb
->c
->ci
;
1994 struct RClass
* target_class
= CI_TARGET_CLASS(ci
);
1997 if (mid
== 0 || !target_class
) {
1998 RAISE_LIT(mrb
, E_NOMETHOD_ERROR
, "super called outside of method");
2000 if ((target_class
->flags
& MRB_FL_CLASS_IS_PREPENDED
) || target_class
->tt
== MRB_TT_MODULE
) {
2001 goto super_typeerror
;
2004 if (!mrb_obj_is_kind_of(mrb
, recv
, target_class
)) {
2006 RAISE_LIT(mrb
, E_TYPE_ERROR
, "self has wrong type to call super in this context");
2014 CASE(OP_ARGARY
, BS
) {
2015 mrb_int m1
= (b
>>11)&0x3f;
2016 mrb_int r
= (b
>>10)&0x1;
2017 mrb_int m2
= (b
>>5)&0x1f;
2018 mrb_int kd
= (b
>>4)&0x1;
2019 mrb_int lv
= (b
>>0)&0xf;
2022 if (mrb
->c
->ci
->mid
== 0 || CI_TARGET_CLASS(mrb
->c
->ci
) == NULL
) {
2024 RAISE_LIT(mrb
, E_NOMETHOD_ERROR
, "super called outside of method");
2026 if (lv
== 0) stack
= regs
+ 1;
2028 struct REnv
*e
= uvenv(mrb
, lv
-1);
2029 if (!e
) goto L_NOSUPER
;
2030 if (MRB_ENV_LEN(e
) <= m1
+r
+m2
+1)
2032 stack
= e
->stack
+ 1;
2035 regs
[a
] = mrb_ary_new_from_values(mrb
, m1
+m2
, stack
);
2038 mrb_value
*pp
= NULL
;
2039 struct RArray
*rest
;
2042 if (mrb_array_p(stack
[m1
])) {
2043 struct RArray
*ary
= mrb_ary_ptr(stack
[m1
]);
2048 regs
[a
] = mrb_ary_new_capa(mrb
, m1
+len
+m2
);
2049 rest
= mrb_ary_ptr(regs
[a
]);
2051 stack_copy(ARY_PTR(rest
), stack
, m1
);
2054 stack_copy(ARY_PTR(rest
)+m1
, pp
, len
);
2057 stack_copy(ARY_PTR(rest
)+m1
+len
, stack
+m1
+1, m2
);
2059 ARY_SET_LEN(rest
, m1
+len
+m2
);
2062 regs
[a
+1] = stack
[m1
+r
+m2
];
2063 regs
[a
+2] = stack
[m1
+r
+m2
+1];
2066 regs
[a
+1] = stack
[m1
+r
+m2
];
2068 mrb_gc_arena_restore(mrb
, ai
);
2073 mrb_callinfo
*ci
= mrb
->c
->ci
;
2074 mrb_int argc
= ci
->n
;
2075 mrb_value
*argv
= regs
+1;
2077 mrb_int m1
= MRB_ASPEC_REQ(a
);
2080 if ((a
& ~0x7c0001) == 0 && argc
< 15 && MRB_PROC_STRICT_P(proc
)) {
2081 if (argc
+(ci
->nk
==15) != m1
) { /* count kdict too */
2082 argnum_error(mrb
, m1
);
2085 /* clear local (but non-argument) variables */
2086 mrb_int pos
= m1
+2; /* self+m1+blk */
2087 if (irep
->nlocals
-pos
> 0) {
2088 stack_clear(®s
[pos
], irep
->nlocals
-pos
);
2093 mrb_int o
= MRB_ASPEC_OPT(a
);
2094 mrb_int r
= MRB_ASPEC_REST(a
);
2095 mrb_int m2
= MRB_ASPEC_POST(a
);
2096 mrb_int kd
= (MRB_ASPEC_KEY(a
) > 0 || MRB_ASPEC_KDICT(a
))? 1 : 0;
2098 int b = MRB_ASPEC_BLOCK(a);
2100 mrb_int
const len
= m1
+ o
+ r
+ m2
;
2102 mrb_value
* const argv0
= argv
;
2103 mrb_value blk
= regs
[ci_bidx(ci
)];
2104 mrb_value kdict
= mrb_nil_value();
2106 /* keyword arguments */
2108 kdict
= regs
[mrb_ci_kidx(ci
)];
2111 if (!mrb_nil_p(kdict
) && mrb_hash_size(mrb
, kdict
) > 0) {
2114 argc
++; /* include kdict in normal arguments */
2116 else if (argc
== 14) {
2117 /* pack arguments and kdict */
2118 regs
[1] = ary_new_from_regs(mrb
, argc
+1, 1);
2121 else {/* argc == 15 */
2122 /* push kdict to packed arguments */
2123 mrb_ary_push(mrb
, regs
[1], kdict
);
2126 kdict
= mrb_nil_value();
2129 else if (MRB_ASPEC_KEY(a
) > 0 && !mrb_nil_p(kdict
)) {
2130 kdict
= mrb_hash_dup(mrb
, kdict
);
2132 else if (!mrb_nil_p(kdict
)) {
2133 mrb_gc_protect(mrb
, kdict
);
2136 /* arguments is passed with Array */
2138 struct RArray
*ary
= mrb_ary_ptr(regs
[1]);
2139 argv
= ARY_PTR(ary
);
2140 argc
= (int)ARY_LEN(ary
);
2141 mrb_gc_protect(mrb
, regs
[1]);
2144 /* strict argument check */
2145 if (ci
->proc
&& MRB_PROC_STRICT_P(ci
->proc
)) {
2146 if (argc
< m1
+ m2
|| (r
== 0 && argc
> len
)) {
2147 argnum_error(mrb
, m1
+m2
);
2151 /* extract first argument array to arguments */
2152 else if (len
> 1 && argc
== 1 && mrb_array_p(argv
[0])) {
2153 mrb_gc_protect(mrb
, argv
[0]);
2154 argc
= (int)RARRAY_LEN(argv
[0]);
2155 argv
= RARRAY_PTR(argv
[0]);
2158 /* rest arguments */
2159 mrb_value rest
= mrb_nil_value();
2163 mlen
= m1
< argc
? argc
- m1
: 0;
2166 /* copy mandatory and optional arguments */
2167 if (argv0
!= argv
&& argv
) {
2168 value_move(®s
[1], argv
, argc
-mlen
); /* m1 + o */
2171 stack_clear(®s
[argc
+1], m1
-argc
);
2173 /* copy post mandatory arguments */
2175 value_move(®s
[len
-m2
+1], &argv
[argc
-mlen
], mlen
);
2178 stack_clear(®s
[len
-m2
+mlen
+1], m2
-mlen
);
2180 /* initialize rest arguments with empty Array */
2182 rest
= mrb_ary_new_capa(mrb
, 0);
2183 regs
[m1
+o
+1] = rest
;
2185 /* skip initializer of passed arguments */
2186 if (o
> 0 && argc
> m1
+m2
)
2187 pc
+= (argc
- m1
- m2
)*3;
2191 if (argv0
!= argv
) {
2192 mrb_gc_protect(mrb
, blk
);
2193 value_move(®s
[1], argv
, m1
+o
);
2196 rnum
= argc
-m1
-o
-m2
;
2197 rest
= mrb_ary_new_from_values(mrb
, rnum
, argv
+m1
+o
);
2198 regs
[m1
+o
+1] = rest
;
2200 if (m2
> 0 && argc
-m2
> m1
) {
2201 value_move(®s
[m1
+o
+r
+1], &argv
[m1
+o
+rnum
], m2
);
2206 /* need to be update blk first to protect blk from GC */
2207 mrb_int
const kw_pos
= len
+ kd
; /* where kwhash should be */
2208 mrb_int
const blk_pos
= kw_pos
+ 1; /* where block should be */
2209 regs
[blk_pos
] = blk
; /* move block */
2211 if (mrb_nil_p(kdict
)) {
2212 kdict
= mrb_hash_new_capa(mrb
, 0);
2214 regs
[kw_pos
] = kdict
; /* set kwhash */
2218 /* format arguments for generated code */
2219 mrb
->c
->ci
->n
= (uint8_t)len
;
2221 /* clear local (but non-argument) variables */
2222 if (irep
->nlocals
-blk_pos
-1 > 0) {
2223 stack_clear(®s
[blk_pos
+1], irep
->nlocals
-blk_pos
-1);
2229 mrb_value k
= mrb_symbol_value(syms
[b
]);
2230 mrb_int kidx
= mrb_ci_kidx(mrb
->c
->ci
);
2233 if (kidx
< 0 || !mrb_hash_p(kdict
=regs
[kidx
]) || !mrb_hash_key_p(mrb
, kdict
, k
)) {
2234 RAISE_FORMAT(mrb
, E_ARGUMENT_ERROR
, "missing keyword: %v", k
);
2236 v
= mrb_hash_get(mrb
, kdict
, k
);
2238 mrb_hash_delete_key(mrb
, kdict
, k
);
2242 CASE(OP_KEY_P
, BB
) {
2243 mrb_value k
= mrb_symbol_value(syms
[b
]);
2244 mrb_int kidx
= mrb_ci_kidx(mrb
->c
->ci
);
2246 mrb_bool key_p
= FALSE
;
2248 if (kidx
>= 0 && mrb_hash_p(kdict
=regs
[kidx
])) {
2249 key_p
= mrb_hash_key_p(mrb
, kdict
, k
);
2251 regs
[a
] = mrb_bool_value(key_p
);
2255 CASE(OP_KEYEND
, Z
) {
2256 mrb_int kidx
= mrb_ci_kidx(mrb
->c
->ci
);
2259 if (kidx
>= 0 && mrb_hash_p(kdict
=regs
[kidx
]) && !mrb_hash_empty_p(mrb
, kdict
)) {
2260 mrb_value keys
= mrb_hash_keys(mrb
, kdict
);
2261 mrb_value key1
= RARRAY_PTR(keys
)[0];
2262 RAISE_FORMAT(mrb
, E_ARGUMENT_ERROR
, "unknown keyword: %v", key1
);
2272 if (MRB_PROC_STRICT_P(proc
)) goto NORMAL_RETURN
;
2273 if (MRB_PROC_ORPHAN_P(proc
) || !MRB_PROC_ENV_P(proc
) || !MRB_ENV_ONSTACK_P(MRB_PROC_ENV(proc
))) {
2275 RAISE_LIT(mrb
, E_LOCALJUMP_ERROR
, "break from proc-closure");
2278 struct REnv
*e
= MRB_PROC_ENV(proc
);
2280 if (e
->cxt
!= mrb
->c
) {
2284 mrb_callinfo
*ci
= mrb
->c
->ci
;
2286 while (mrb
->c
->cibase
< ci
&& ci
[-1].proc
!= proc
) {
2289 if (ci
== mrb
->c
->cibase
) {
2292 c
= a
; // release the "a" variable, which can handle 32-bit values
2293 a
= ci
- mrb
->c
->cibase
;
2296 CASE(OP_RETURN_BLK
, B
) {
2301 mrb_callinfo
*ci
= mrb
->c
->ci
;
2303 if (!MRB_PROC_ENV_P(proc
) || MRB_PROC_STRICT_P(proc
)) {
2307 const struct RProc
*dst
;
2308 mrb_callinfo
*cibase
;
2309 cibase
= mrb
->c
->cibase
;
2310 dst
= top_proc(mrb
, proc
);
2312 if (MRB_PROC_ENV_P(dst
)) {
2313 struct REnv
*e
= MRB_PROC_ENV(dst
);
2315 if (!MRB_ENV_ONSTACK_P(e
) || e
->cxt
!= mrb
->c
) {
2316 localjump_error(mrb
, LOCALJUMP_ERROR_RETURN
);
2320 /* check jump destination */
2321 while (cibase
<= ci
&& ci
->proc
!= dst
) {
2324 if (ci
<= cibase
) { /* no jump destination */
2325 localjump_error(mrb
, LOCALJUMP_ERROR_RETURN
);
2328 c
= a
; // release the "a" variable, which can handle 32-bit values
2329 a
= ci
- mrb
->c
->cibase
;
2332 CASE(OP_RETURN
, B
) {
2346 mrb_gc_protect(mrb
, v
);
2347 CHECKPOINT_RESTORE(RBREAK_TAG_BREAK
) {
2349 struct RBreak
*brk
= (struct RBreak
*)mrb
->exc
;
2350 ci
= &mrb
->c
->cibase
[brk
->ci_break_index
];
2351 v
= mrb_break_value_get(brk
);
2354 L_UNWINDING
: // for a check on the role of `a` and `c`, see `goto L_UNWINDING`
2355 ci
= mrb
->c
->cibase
+ a
;
2358 mrb_gc_protect(mrb
, v
);
2360 CHECKPOINT_MAIN(RBREAK_TAG_BREAK
) {
2362 UNWIND_ENSURE(mrb
, mrb
->c
->ci
, mrb
->c
->ci
->pc
, RBREAK_TAG_BREAK
, ci
, v
);
2364 if (mrb
->c
->ci
== ci
) {
2368 if (mrb
->c
->ci
[1].cci
!= CINFO_NONE
) {
2369 mrb_assert(prev_jmp
!= NULL
);
2370 mrb
->exc
= (struct RObject
*)break_new(mrb
, RBREAK_TAG_BREAK
, ci
, v
);
2371 mrb_gc_arena_restore(mrb
, ai
);
2372 mrb
->c
->vmexec
= FALSE
;
2373 mrb
->jmp
= prev_jmp
;
2374 MRB_THROW(prev_jmp
);
2378 CHECKPOINT_END(RBREAK_TAG_BREAK
);
2379 mrb
->exc
= NULL
; /* clear break object */
2381 if (ci
== mrb
->c
->cibase
) {
2382 struct mrb_context
*c
= mrb
->c
;
2383 if (c
== mrb
->root_c
) {
2384 /* toplevel return */
2385 mrb_gc_arena_restore(mrb
, ai
);
2386 mrb
->jmp
= prev_jmp
;
2390 /* fiber termination should automatic yield or transfer to root */
2391 c
->status
= MRB_FIBER_TERMINATED
;
2392 mrb
->c
= c
->prev
? c
->prev
: mrb
->root_c
;
2394 mrb
->c
->status
= MRB_FIBER_RUNNING
;
2396 (mrb
->c
== mrb
->root_c
&& mrb
->c
->ci
== mrb
->c
->cibase
) /* case using Fiber#transfer in mrb_fiber_resume() */) {
2397 mrb_gc_arena_restore(mrb
, ai
);
2399 mrb
->jmp
= prev_jmp
;
2405 if (mrb
->c
->vmexec
&& !ci
->u
.keep_context
) {
2406 mrb_gc_arena_restore(mrb
, ai
);
2407 mrb
->c
->vmexec
= FALSE
;
2408 mrb
->jmp
= prev_jmp
;
2413 if (acc
== CINFO_SKIP
|| acc
== CINFO_DIRECT
) {
2414 mrb_gc_arena_restore(mrb
, ai
);
2415 mrb
->jmp
= prev_jmp
;
2419 DEBUG(fprintf(stderr
, "from :%s\n", mrb_sym_name(mrb
, ci
->mid
)));
2421 irep
= proc
->body
.irep
;
2426 mrb_gc_arena_restore(mrb
, ai
);
2431 CASE(OP_BLKPUSH
, BS
) {
2432 int m1
= (b
>>11)&0x3f;
2433 int r
= (b
>>10)&0x1;
2434 int m2
= (b
>>5)&0x1f;
2435 int kd
= (b
>>4)&0x1;
2436 int lv
= (b
>>0)&0xf;
2439 if (lv
== 0) stack
= regs
+ 1;
2441 struct REnv
*e
= uvenv(mrb
, lv
-1);
2442 if (!e
|| (!MRB_ENV_ONSTACK_P(e
) && e
->mid
== 0) ||
2443 MRB_ENV_LEN(e
) <= m1
+r
+m2
+1) {
2444 localjump_error(mrb
, LOCALJUMP_ERROR_YIELD
);
2447 stack
= e
->stack
+ 1;
2449 if (mrb_nil_p(stack
[m1
+r
+m2
+kd
])) {
2450 localjump_error(mrb
, LOCALJUMP_ERROR_YIELD
);
2453 regs
[a
] = stack
[m1
+r
+m2
+kd
];
2457 #if !defined(MRB_USE_BIGINT) || defined(MRB_INT32)
2459 RAISE_LIT(mrb
, E_RANGE_ERROR
, "integer overflow");
2462 #define TYPES2(a,b) ((((uint16_t)(a))<<8)|(((uint16_t)(b))&0xff))
2463 #define OP_MATH(op_name) \
2464 /* need to check if op is overridden */ \
2465 switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) { \
2466 OP_MATH_CASE_INTEGER(op_name); \
2467 OP_MATH_CASE_FLOAT(op_name, integer, float); \
2468 OP_MATH_CASE_FLOAT(op_name, float, integer); \
2469 OP_MATH_CASE_FLOAT(op_name, float, float); \
2470 OP_MATH_CASE_STRING_##op_name(); \
2472 mid = MRB_OPSYM(op_name); \
2476 #define OP_MATH_CASE_INTEGER(op_name) \
2477 case TYPES2(MRB_TT_INTEGER, MRB_TT_INTEGER): \
2479 mrb_int x = mrb_integer(regs[a]), y = mrb_integer(regs[a+1]), z; \
2480 if (mrb_int_##op_name##_overflow(x, y, &z)) { \
2481 OP_MATH_OVERFLOW_INT(op_name,x,y); \
2484 SET_INT_VALUE(mrb,regs[a], z); \
2488 #define OP_MATH_CASE_FLOAT(op_name, t1, t2) (void)0
2490 #define OP_MATH_CASE_FLOAT(op_name, t1, t2) \
2491 case TYPES2(OP_MATH_TT_##t1, OP_MATH_TT_##t2): \
2493 mrb_float z = mrb_##t1(regs[a]) OP_MATH_OP_##op_name mrb_##t2(regs[a+1]); \
2494 SET_FLOAT_VALUE(mrb, regs[a], z); \
2498 #ifdef MRB_USE_BIGINT
2499 #define OP_MATH_OVERFLOW_INT(op,x,y) regs[a] = mrb_bint_##op##_ii(mrb,x,y)
2501 #define OP_MATH_OVERFLOW_INT(op,x,y) goto L_INT_OVERFLOW
2503 #define OP_MATH_CASE_STRING_add() \
2504 case TYPES2(MRB_TT_STRING, MRB_TT_STRING): \
2505 regs[a] = mrb_str_plus(mrb, regs[a], regs[a+1]); \
2506 mrb_gc_arena_restore(mrb, ai); \
2508 #define OP_MATH_CASE_STRING_sub() (void)0
2509 #define OP_MATH_CASE_STRING_mul() (void)0
2510 #define OP_MATH_OP_add +
2511 #define OP_MATH_OP_sub -
2512 #define OP_MATH_OP_mul *
2513 #define OP_MATH_TT_integer MRB_TT_INTEGER
2514 #define OP_MATH_TT_float MRB_TT_FLOAT
2529 #ifndef MRB_NO_FLOAT
2533 /* need to check if op is overridden */
2534 switch (TYPES2(mrb_type(regs
[a
]),mrb_type(regs
[a
+1]))) {
2535 case TYPES2(MRB_TT_INTEGER
,MRB_TT_INTEGER
):
2537 mrb_int x
= mrb_integer(regs
[a
]);
2538 mrb_int y
= mrb_integer(regs
[a
+1]);
2539 regs
[a
] = mrb_div_int_value(mrb
, x
, y
);
2542 #ifndef MRB_NO_FLOAT
2543 case TYPES2(MRB_TT_INTEGER
,MRB_TT_FLOAT
):
2544 x
= (mrb_float
)mrb_integer(regs
[a
]);
2545 y
= mrb_float(regs
[a
+1]);
2547 case TYPES2(MRB_TT_FLOAT
,MRB_TT_INTEGER
):
2548 x
= mrb_float(regs
[a
]);
2549 y
= (mrb_float
)mrb_integer(regs
[a
+1]);
2551 case TYPES2(MRB_TT_FLOAT
,MRB_TT_FLOAT
):
2552 x
= mrb_float(regs
[a
]);
2553 y
= mrb_float(regs
[a
+1]);
2557 mid
= MRB_OPSYM(div
);
2561 #ifndef MRB_NO_FLOAT
2562 f
= mrb_div_float(x
, y
);
2563 SET_FLOAT_VALUE(mrb
, regs
[a
], f
);
2568 #define OP_MATHI(op_name) \
2569 /* need to check if op is overridden */ \
2570 switch (mrb_type(regs[a])) { \
2571 OP_MATHI_CASE_INTEGER(op_name); \
2572 OP_MATHI_CASE_FLOAT(op_name); \
2574 SET_INT_VALUE(mrb,regs[a+1], b); \
2575 mid = MRB_OPSYM(op_name); \
2579 #define OP_MATHI_CASE_INTEGER(op_name) \
2580 case MRB_TT_INTEGER: \
2582 mrb_int x = mrb_integer(regs[a]), y = (mrb_int)b, z; \
2583 if (mrb_int_##op_name##_overflow(x, y, &z)) { \
2584 OP_MATH_OVERFLOW_INT(op_name,x,y); \
2587 SET_INT_VALUE(mrb,regs[a], z); \
2591 #define OP_MATHI_CASE_FLOAT(op_name) (void)0
2593 #define OP_MATHI_CASE_FLOAT(op_name) \
2594 case MRB_TT_FLOAT: \
2596 mrb_float z = mrb_float(regs[a]) OP_MATH_OP_##op_name b; \
2597 SET_FLOAT_VALUE(mrb, regs[a], z); \
2610 #define OP_CMP_BODY(op,v1,v2) (v1(regs[a]) op v2(regs[a+1]))
2613 #define OP_CMP(op,sym) do {\
2615 /* need to check if - is overridden */\
2616 switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\
2617 case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):\
2618 result = OP_CMP_BODY(op,mrb_fixnum,mrb_fixnum);\
2621 mid = MRB_OPSYM(sym);\
2625 SET_TRUE_VALUE(regs[a]);\
2628 SET_FALSE_VALUE(regs[a]);\
2632 #define OP_CMP(op, sym) do {\
2634 /* need to check if - is overridden */\
2635 switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\
2636 case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):\
2637 result = OP_CMP_BODY(op,mrb_integer,mrb_integer);\
2639 case TYPES2(MRB_TT_INTEGER,MRB_TT_FLOAT):\
2640 result = OP_CMP_BODY(op,mrb_integer,mrb_float);\
2642 case TYPES2(MRB_TT_FLOAT,MRB_TT_INTEGER):\
2643 result = OP_CMP_BODY(op,mrb_float,mrb_integer);\
2645 case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT):\
2646 result = OP_CMP_BODY(op,mrb_float,mrb_float);\
2649 mid = MRB_OPSYM(sym);\
2653 SET_TRUE_VALUE(regs[a]);\
2656 SET_FALSE_VALUE(regs[a]);\
2662 if (mrb_obj_eq(mrb
, regs
[a
], regs
[a
+1])) {
2663 SET_TRUE_VALUE(regs
[a
]);
2691 CASE(OP_ARRAY
, BB
) {
2692 regs
[a
] = ary_new_from_regs(mrb
, b
, a
);
2693 mrb_gc_arena_restore(mrb
, ai
);
2696 CASE(OP_ARRAY2
, BBB
) {
2697 regs
[a
] = ary_new_from_regs(mrb
, c
, b
);
2698 mrb_gc_arena_restore(mrb
, ai
);
2702 CASE(OP_ARYCAT
, B
) {
2703 mrb_value splat
= mrb_ary_splat(mrb
, regs
[a
+1]);
2704 if (mrb_nil_p(regs
[a
])) {
2708 mrb_assert(mrb_array_p(regs
[a
]));
2709 mrb_ary_concat(mrb
, regs
[a
], splat
);
2711 mrb_gc_arena_restore(mrb
, ai
);
2715 CASE(OP_ARYPUSH
, BB
) {
2716 mrb_assert(mrb_array_p(regs
[a
]));
2717 for (mrb_int i
=0; i
<b
; i
++) {
2718 mrb_ary_push(mrb
, regs
[a
], regs
[a
+i
+1]);
2723 CASE(OP_ARYSPLAT
, B
) {
2724 mrb_value ary
= mrb_ary_splat(mrb
, regs
[a
]);
2726 mrb_gc_arena_restore(mrb
, ai
);
2730 CASE(OP_AREF
, BBB
) {
2731 mrb_value v
= regs
[b
];
2733 if (!mrb_array_p(v
)) {
2738 SET_NIL_VALUE(regs
[a
]);
2742 v
= mrb_ary_ref(mrb
, v
, c
);
2748 CASE(OP_ASET
, BBB
) {
2749 mrb_assert(mrb_array_p(regs
[a
]));
2750 mrb_ary_set(mrb
, regs
[b
], c
, regs
[a
]);
2754 CASE(OP_APOST
, BBB
) {
2755 mrb_value v
= regs
[a
];
2761 if (!mrb_array_p(v
)) {
2762 v
= ary_new_from_regs(mrb
, 1, a
);
2764 ary
= mrb_ary_ptr(v
);
2765 len
= (int)ARY_LEN(ary
);
2766 if (len
> pre
+ post
) {
2767 v
= mrb_ary_new_from_values(mrb
, len
- pre
- post
, ARY_PTR(ary
)+pre
);
2770 regs
[a
++] = ARY_PTR(ary
)[len
-post
-1];
2774 v
= mrb_ary_new_capa(mrb
, 0);
2776 for (idx
=0; idx
+pre
<len
; idx
++) {
2777 regs
[a
+idx
] = ARY_PTR(ary
)[pre
+idx
];
2779 while (idx
< post
) {
2780 SET_NIL_VALUE(regs
[a
+idx
]);
2784 mrb_gc_arena_restore(mrb
, ai
);
2788 CASE(OP_INTERN
, B
) {
2789 mrb_assert(mrb_string_p(regs
[a
]));
2790 mrb_sym sym
= mrb_intern_str(mrb
, regs
[a
]);
2791 regs
[a
] = mrb_symbol_value(sym
);
2795 CASE(OP_SYMBOL
, BB
) {
2799 mrb_assert((pool
[b
].tt
&IREP_TT_NFLAG
)==0);
2800 len
= pool
[b
].tt
>> 2;
2801 if (pool
[b
].tt
& IREP_TT_SFLAG
) {
2802 sym
= mrb_intern_static(mrb
, pool
[b
].u
.str
, len
);
2805 sym
= mrb_intern(mrb
, pool
[b
].u
.str
, len
);
2807 regs
[a
] = mrb_symbol_value(sym
);
2811 CASE(OP_STRING
, BB
) {
2814 mrb_assert((pool
[b
].tt
&IREP_TT_NFLAG
)==0);
2815 len
= pool
[b
].tt
>> 2;
2816 if (pool
[b
].tt
& IREP_TT_SFLAG
) {
2817 regs
[a
] = mrb_str_new_static(mrb
, pool
[b
].u
.str
, len
);
2820 regs
[a
] = mrb_str_new(mrb
, pool
[b
].u
.str
, len
);
2822 mrb_gc_arena_restore(mrb
, ai
);
2826 CASE(OP_STRCAT
, B
) {
2827 mrb_assert(mrb_string_p(regs
[a
]));
2828 mrb_str_concat(mrb
, regs
[a
], regs
[a
+1]);
2833 mrb_value hash
= mrb_hash_new_capa(mrb
, b
);
2836 for (int i
=a
; i
<lim
; i
+=2) {
2837 mrb_hash_set(mrb
, hash
, regs
[i
], regs
[i
+1]);
2840 mrb_gc_arena_restore(mrb
, ai
);
2844 CASE(OP_HASHADD
, BB
) {
2849 mrb_ensure_hash_type(mrb
, hash
);
2850 for (int i
=a
+1; i
<lim
; i
+=2) {
2851 mrb_hash_set(mrb
, hash
, regs
[i
], regs
[i
+1]);
2853 mrb_gc_arena_restore(mrb
, ai
);
2856 CASE(OP_HASHCAT
, B
) {
2857 mrb_value hash
= regs
[a
];
2859 mrb_assert(mrb_hash_p(hash
));
2860 mrb_hash_merge(mrb
, hash
, regs
[a
+1]);
2861 mrb_gc_arena_restore(mrb
, ai
);
2870 const mrb_irep
*nirep
= irep
->reps
[b
];
2872 if (c
& OP_L_CAPTURE
) {
2873 p
= mrb_closure_new(mrb
, nirep
);
2876 p
= mrb_proc_new(mrb
, nirep
);
2877 p
->flags
|= MRB_PROC_SCOPE
;
2879 if (c
& OP_L_STRICT
) p
->flags
|= MRB_PROC_STRICT
;
2880 regs
[a
] = mrb_obj_value(p
);
2881 mrb_gc_arena_restore(mrb
, ai
);
2884 CASE(OP_BLOCK
, BB
) {
2888 CASE(OP_METHOD
, BB
) {
2893 CASE(OP_RANGE_INC
, B
) {
2894 mrb_value v
= mrb_range_new(mrb
, regs
[a
], regs
[a
+1], FALSE
);
2896 mrb_gc_arena_restore(mrb
, ai
);
2900 CASE(OP_RANGE_EXC
, B
) {
2901 mrb_value v
= mrb_range_new(mrb
, regs
[a
], regs
[a
+1], TRUE
);
2903 mrb_gc_arena_restore(mrb
, ai
);
2907 CASE(OP_OCLASS
, B
) {
2908 regs
[a
] = mrb_obj_value(mrb
->object_class
);
2912 CASE(OP_CLASS
, BB
) {
2913 struct RClass
*c
= 0, *baseclass
;
2914 mrb_value base
, super
;
2915 mrb_sym id
= syms
[b
];
2919 if (mrb_nil_p(base
)) {
2920 baseclass
= MRB_PROC_TARGET_CLASS(mrb
->c
->ci
->proc
);
2921 if (!baseclass
) baseclass
= mrb
->object_class
;
2922 base
= mrb_obj_value(baseclass
);
2924 c
= mrb_vm_define_class(mrb
, base
, super
, id
);
2925 regs
[a
] = mrb_obj_value(c
);
2926 mrb_gc_arena_restore(mrb
, ai
);
2930 CASE(OP_MODULE
, BB
) {
2931 struct RClass
*cls
= 0, *baseclass
;
2933 mrb_sym id
= syms
[b
];
2936 if (mrb_nil_p(base
)) {
2937 baseclass
= MRB_PROC_TARGET_CLASS(mrb
->c
->ci
->proc
);
2938 if (!baseclass
) baseclass
= mrb
->object_class
;
2939 base
= mrb_obj_value(baseclass
);
2941 cls
= mrb_vm_define_module(mrb
, base
, id
);
2942 regs
[a
] = mrb_obj_value(cls
);
2943 mrb_gc_arena_restore(mrb
, ai
);
2949 mrb_value recv
= regs
[a
];
2951 const mrb_irep
*nirep
= irep
->reps
[b
];
2953 /* prepare closure */
2954 p
= mrb_proc_new(mrb
, nirep
);
2956 mrb_field_write_barrier(mrb
, (struct RBasic
*)p
, (struct RBasic
*)proc
);
2957 MRB_PROC_SET_TARGET_CLASS(p
, mrb_class_ptr(recv
));
2958 p
->flags
|= MRB_PROC_SCOPE
;
2960 /* prepare call stack */
2961 cipush(mrb
, a
, 0, mrb_class_ptr(recv
), p
, NULL
, 0, 0);
2963 irep
= p
->body
.irep
;
2966 stack_extend(mrb
, irep
->nregs
);
2967 stack_clear(regs
+1, irep
->nregs
-1);
2973 struct RClass
*target
= mrb_class_ptr(regs
[a
]);
2974 struct RProc
*p
= mrb_proc_ptr(regs
[a
+1]);
2976 mrb_sym mid
= syms
[b
];
2978 MRB_METHOD_FROM_PROC(m
, p
);
2979 mrb_define_method_raw(mrb
, target
, mid
, m
);
2980 mrb_method_added(mrb
, target
, mid
);
2981 mrb_gc_arena_restore(mrb
, ai
);
2982 regs
[a
] = mrb_symbol_value(mid
);
2986 CASE(OP_SCLASS
, B
) {
2987 regs
[a
] = mrb_singleton_class(mrb
, regs
[a
]);
2988 mrb_gc_arena_restore(mrb
, ai
);
2992 CASE(OP_TCLASS
, B
) {
2993 struct RClass
*target
= check_target_class(mrb
);
2994 if (!target
) goto L_RAISE
;
2995 regs
[a
] = mrb_obj_value(target
);
2999 CASE(OP_ALIAS
, BB
) {
3000 struct RClass
*target
= check_target_class(mrb
);
3002 if (!target
) goto L_RAISE
;
3003 mrb_alias_method(mrb
, target
, syms
[a
], syms
[b
]);
3004 mrb_method_added(mrb
, target
, syms
[a
]);
3008 struct RClass
*target
= check_target_class(mrb
);
3010 if (!target
) goto L_RAISE
;
3011 mrb_undef_method_id(mrb
, target
, syms
[a
]);
3017 #ifdef MRB_USE_DEBUG_HOOK
3018 mrb
->debug_op_hook(mrb
, irep
, pc
, regs
);
3020 #ifndef MRB_NO_STDIO
3021 printf("OP_DEBUG %d %d %d\n", a
, b
, c
);
3030 size_t len
= pool
[a
].tt
>> 2;
3033 mrb_assert((pool
[a
].tt
&IREP_TT_NFLAG
)==0);
3034 exc
= mrb_exc_new(mrb
, E_LOCALJUMP_ERROR
, pool
[a
].u
.str
, len
);
3035 RAISE_EXC(mrb
, exc
);
3041 #define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _1(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
3042 #include "mruby/ops.h"
3051 #define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _2(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
3052 #include "mruby/ops.h"
3061 #define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _3(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
3062 #include "mruby/ops.h"
3071 CHECKPOINT_RESTORE(RBREAK_TAG_STOP
) {
3074 CHECKPOINT_MAIN(RBREAK_TAG_STOP
) {
3075 UNWIND_ENSURE(mrb
, mrb
->c
->ci
, mrb
->c
->ci
->pc
, RBREAK_TAG_STOP
, mrb
->c
->ci
, mrb_nil_value());
3077 CHECKPOINT_END(RBREAK_TAG_STOP
);
3078 mrb
->jmp
= prev_jmp
;
3080 mrb_assert(mrb
->exc
->tt
== MRB_TT_EXCEPTION
);
3081 return mrb_obj_value(mrb
->exc
);
3083 return regs
[irep
->nlocals
];
3090 mrb_callinfo
*ci
= mrb
->c
->ci
;
3091 while (ci
> mrb
->c
->cibase
&& ci
->cci
== CINFO_DIRECT
) {
3096 goto RETRY_TRY_BLOCK
;
3098 MRB_END_EXC(&c_jmp
);
3102 mrb_run(mrb_state
*mrb
, const struct RProc
*proc
, mrb_value self
)
3104 return mrb_vm_run(mrb
, proc
, self
, ci_bidx(mrb
->c
->ci
) + 1);
3108 mrb_top_run(mrb_state
*mrb
, const struct RProc
*proc
, mrb_value self
, mrb_int stack_keep
)
3110 if (mrb
->c
->cibase
&& mrb
->c
->ci
> mrb
->c
->cibase
) {
3111 cipush(mrb
, 0, CINFO_SKIP
, mrb
->object_class
, NULL
, NULL
, 0, 0);
3113 return mrb_vm_run(mrb
, proc
, self
, stack_keep
);