2 ** gc.c - garbage collector for mruby
4 ** See Copyright Notice in mruby.h
8 #ifdef MRB_USE_MALLOC_TRIM
12 #include <mruby/array.h>
13 #include <mruby/class.h>
14 #include <mruby/data.h>
15 #include <mruby/istruct.h>
16 #include <mruby/hash.h>
17 #include <mruby/proc.h>
18 #include <mruby/range.h>
19 #include <mruby/string.h>
20 #include <mruby/variable.h>
22 #include <mruby/error.h>
23 #include <mruby/throw.h>
24 #include <mruby/presym.h>
31 = Tri-color Incremental Garbage Collection
33 mruby's GC is Tri-color Incremental GC with Mark & Sweep.
34 Algorithm details are omitted.
35 Instead, the implementation part is described below.
39 Each object can be painted in three colors:
42 * Gray - Marked, But the child objects are unmarked.
43 * Black - Marked, the child objects are also marked.
47 * Red - Static (ROM object) no need to be collected.
48 - All child objects should be Red as well.
52 There are two white color types in a flip-flop fashion: White-A and White-B,
53 which respectively represent the Current White color (the newly allocated
54 objects in the current GC cycle) and the Sweep Target White color (the
55 dead objects to be swept).
57 A and B will be switched just at the beginning of the next GC cycle. At
58 that time, all the dead objects have been swept, while the newly created
59 objects in the current GC cycle which finally remains White are now
60 regarded as dead objects. Instead of traversing all the White-A objects and
61 painting them as White-B, just switch the meaning of White-A and White-B as
62 this will be much cheaper.
64 As a result, the objects we sweep in the current GC cycle are always
65 left from the previous GC cycle. This allows us to sweep objects
66 incrementally, without the disturbance of the newly created objects.
70 GC Execution Time and Each step interval are decided by live objects count.
71 List of Adjustment API:
73 * gc_interval_ratio_set
76 For details, see the comments for each function.
80 mruby implementer and C extension library writer must insert a write
81 barrier when updating a reference from a field of an object.
82 When updating a reference from a field of object A to object B,
83 two different types of write barrier are available:
85 * mrb_field_write_barrier - target B object for a mark.
86 * mrb_write_barrier - target A object for a mark.
90 mruby's GC offers an Generational Mode while re-using the tri-color GC
91 infrastructure. It will treat the Black objects as Old objects after each
92 sweep phase, instead of painting them White. The key ideas are still the same
93 as traditional generational GC:
95 * Minor GC - just traverse the Young objects (Gray objects) in the mark
96 phase, then only sweep the newly created objects, and leave
99 * Major GC - same as a full regular GC cycle.
101 The difference from "traditional" generational GC is, that the major GC
102 in mruby is triggered incrementally in a tri-color manner.
105 For details, see the comments for each function.
114 struct RVALUE_initializer
{
116 char padding
[sizeof(void*) * 4 - sizeof(uint32_t)];
121 struct RVALUE_initializer init
; /* must be first member to ensure initialization */
122 struct free_obj free
;
124 struct RObject object
;
126 struct RString string
;
131 struct RIStruct istruct
;
135 struct RException exc
;
142 #include <sys/time.h>
144 static double program_invoke_time
= 0;
145 static double gc_time
= 0;
146 static double gc_total_time
= 0;
149 gettimeofday_time(void)
152 gettimeofday(&tv
, NULL
);
153 return tv
.tv_sec
+ tv
.tv_usec
* 1e-6;
156 #define GC_INVOKE_TIME_REPORT(with) do {\
157 fprintf(stderr, "%s\n", with);\
158 fprintf(stderr, "gc_invoke: %19.3f\n", gettimeofday_time() - program_invoke_time);\
159 fprintf(stderr, "is_generational: %d\n", is_generational(gc));\
160 fprintf(stderr, "is_major_gc: %d\n", is_major_gc(gc));\
163 #define GC_TIME_START do {\
164 gc_time = gettimeofday_time();\
167 #define GC_TIME_STOP_AND_REPORT do {\
168 gc_time = gettimeofday_time() - gc_time;\
169 gc_total_time += gc_time;\
170 fprintf(stderr, "gc_state: %d\n", gc->state);\
171 fprintf(stderr, "live: %zu\n", gc->live);\
172 fprintf(stderr, "majorgc_old_threshold: %zu\n", gc->majorgc_old_threshold);\
173 fprintf(stderr, "gc_threshold: %zu\n", gc->threshold);\
174 fprintf(stderr, "gc_time: %30.20f\n", gc_time);\
175 fprintf(stderr, "gc_total_time: %30.20f\n\n", gc_total_time);\
178 #define GC_INVOKE_TIME_REPORT(s)
179 #define GC_TIME_START
180 #define GC_TIME_STOP_AND_REPORT
189 #ifndef MRB_HEAP_PAGE_SIZE
190 #define MRB_HEAP_PAGE_SIZE 1024
193 #define GC_STEP_SIZE 1024
195 /* white: 001 or 010, black: 100, gray: 000 */
198 #define GC_WHITE_B (1 << 1)
199 #define GC_BLACK (1 << 2)
200 #define GC_RED MRB_GC_RED
201 #define GC_WHITES (GC_WHITE_A | GC_WHITE_B)
202 #define GC_COLOR_MASK 7
203 mrb_static_assert1(MRB_GC_RED
<= GC_COLOR_MASK
);
205 #define paint_gray(o) ((o)->color = GC_GRAY)
206 #define paint_black(o) ((o)->color = GC_BLACK)
207 #define paint_white(o) ((o)->color = GC_WHITES)
208 #define paint_partial_white(s, o) ((o)->color = (s)->current_white_part)
209 #define is_gray(o) ((o)->color == GC_GRAY)
210 #define is_white(o) ((o)->color & GC_WHITES)
211 #define is_black(o) ((o)->color == GC_BLACK)
212 #define is_red(o) ((o)->color == GC_RED)
213 #define flip_white_part(s) ((s)->current_white_part = other_white_part(s))
214 #define other_white_part(s) ((s)->current_white_part ^ GC_WHITES)
215 #define is_dead(s, o) (((o)->color & other_white_part(s) & GC_WHITES) || (o)->tt == MRB_TT_FREE)
217 #define objects(p) ((RVALUE *)p->objects)
219 mrb_noreturn
void mrb_raise_nomemory(mrb_state
*mrb
);
222 mrb_realloc_simple(mrb_state
*mrb
, void *p
, size_t len
)
226 p2
= (mrb
->allocf
)(mrb
, p
, len
, mrb
->allocf_ud
);
227 if (!p2
&& len
> 0 && mrb
->gc
.heaps
) {
229 p2
= (mrb
->allocf
)(mrb
, p
, len
, mrb
->allocf_ud
);
236 mrb_realloc(mrb_state
*mrb
, void *p
, size_t len
)
240 p2
= mrb_realloc_simple(mrb
, p
, len
);
241 if (len
== 0) return p2
;
243 mrb
->gc
.out_of_memory
= TRUE
;
244 mrb_raise_nomemory(mrb
);
247 mrb
->gc
.out_of_memory
= FALSE
;
254 mrb_malloc(mrb_state
*mrb
, size_t len
)
256 return mrb_realloc(mrb
, 0, len
);
260 mrb_malloc_simple(mrb_state
*mrb
, size_t len
)
262 return mrb_realloc_simple(mrb
, 0, len
);
266 mrb_calloc(mrb_state
*mrb
, size_t nelem
, size_t len
)
270 if (nelem
> 0 && len
> 0 &&
271 nelem
<= SIZE_MAX
/ len
) {
274 p
= mrb_malloc(mrb
, size
);
286 mrb_free(mrb_state
*mrb
, void *p
)
288 (mrb
->allocf
)(mrb
, p
, 0, mrb
->allocf_ud
);
292 mrb_alloca(mrb_state
*mrb
, size_t size
)
295 s
= MRB_OBJ_ALLOC(mrb
, MRB_TT_STRING
, mrb
->string_class
);
296 return s
->as
.heap
.ptr
= (char*)mrb_malloc(mrb
, size
);
300 heap_p(mrb_gc
*gc
, struct RBasic
*object
)
309 if (&p
[0].as
.basic
<= object
&& object
<= &p
[MRB_HEAP_PAGE_SIZE
].as
.basic
) {
318 mrb_object_dead_p(mrb_state
*mrb
, struct RBasic
*object
) {
319 mrb_gc
*gc
= &mrb
->gc
;
320 if (!heap_p(gc
, object
)) return TRUE
;
321 return is_dead(gc
, object
);
325 link_heap_page(mrb_gc
*gc
, mrb_heap_page
*page
)
327 page
->next
= gc
->heaps
;
329 gc
->heaps
->prev
= page
;
334 unlink_heap_page(mrb_gc
*gc
, mrb_heap_page
*page
)
337 page
->prev
->next
= page
->next
;
339 page
->next
->prev
= page
->prev
;
340 if (gc
->heaps
== page
)
341 gc
->heaps
= page
->next
;
347 link_free_heap_page(mrb_gc
*gc
, mrb_heap_page
*page
)
349 page
->free_next
= gc
->free_heaps
;
350 if (gc
->free_heaps
) {
351 gc
->free_heaps
->free_prev
= page
;
353 gc
->free_heaps
= page
;
357 unlink_free_heap_page(mrb_gc
*gc
, mrb_heap_page
*page
)
360 page
->free_prev
->free_next
= page
->free_next
;
362 page
->free_next
->free_prev
= page
->free_prev
;
363 if (gc
->free_heaps
== page
)
364 gc
->free_heaps
= page
->free_next
;
365 page
->free_prev
= NULL
;
366 page
->free_next
= NULL
;
370 add_heap(mrb_state
*mrb
, mrb_gc
*gc
)
372 mrb_heap_page
*page
= (mrb_heap_page
*)mrb_calloc(mrb
, 1, sizeof(mrb_heap_page
) + MRB_HEAP_PAGE_SIZE
* sizeof(RVALUE
));
374 struct RBasic
*prev
= NULL
;
376 for (p
= objects(page
), e
=p
+MRB_HEAP_PAGE_SIZE
; p
<e
; p
++) {
377 p
->as
.free
.tt
= MRB_TT_FREE
;
378 p
->as
.free
.next
= prev
;
381 page
->freelist
= prev
;
383 link_heap_page(gc
, page
);
384 link_free_heap_page(gc
, page
);
387 #define DEFAULT_GC_INTERVAL_RATIO 200
388 #define DEFAULT_GC_STEP_RATIO 200
389 #define MAJOR_GC_INC_RATIO 120
390 #define MAJOR_GC_TOOMANY 10000
391 #define is_generational(gc) ((gc)->generational)
392 #define is_major_gc(gc) (is_generational(gc) && (gc)->full)
393 #define is_minor_gc(gc) (is_generational(gc) && !(gc)->full)
396 mrb_gc_init(mrb_state
*mrb
, mrb_gc
*gc
)
398 #ifndef MRB_GC_FIXED_ARENA
399 gc
->arena
= (struct RBasic
**)mrb_malloc(mrb
, sizeof(struct RBasic
*)*MRB_GC_ARENA_SIZE
);
400 gc
->arena_capa
= MRB_GC_ARENA_SIZE
;
403 gc
->current_white_part
= GC_WHITE_A
;
405 gc
->free_heaps
= NULL
;
407 gc
->interval_ratio
= DEFAULT_GC_INTERVAL_RATIO
;
408 gc
->step_ratio
= DEFAULT_GC_STEP_RATIO
;
409 #ifndef MRB_GC_TURN_OFF_GENERATIONAL
410 gc
->generational
= TRUE
;
415 program_invoke_time
= gettimeofday_time();
419 static void obj_free(mrb_state
*mrb
, struct RBasic
*obj
, int end
);
422 free_heap(mrb_state
*mrb
, mrb_gc
*gc
)
424 mrb_heap_page
*page
= gc
->heaps
;
431 for (p
= objects(tmp
), e
=p
+MRB_HEAP_PAGE_SIZE
; p
<e
; p
++) {
432 if (p
->as
.free
.tt
!= MRB_TT_FREE
)
433 obj_free(mrb
, &p
->as
.basic
, TRUE
);
440 mrb_gc_destroy(mrb_state
*mrb
, mrb_gc
*gc
)
443 #ifndef MRB_GC_FIXED_ARENA
444 mrb_free(mrb
, gc
->arena
);
449 gc_protect(mrb_state
*mrb
, mrb_gc
*gc
, struct RBasic
*p
)
451 #ifdef MRB_GC_FIXED_ARENA
452 if (gc
->arena_idx
>= MRB_GC_ARENA_SIZE
) {
453 /* arena overflow error */
454 gc
->arena_idx
= MRB_GC_ARENA_SIZE
- 4; /* force room in arena */
455 mrb_exc_raise(mrb
, mrb_obj_value(mrb
->arena_err
));
458 if (gc
->arena_idx
>= gc
->arena_capa
) {
460 gc
->arena_capa
= (int)(gc
->arena_capa
* 3 / 2);
461 gc
->arena
= (struct RBasic
**)mrb_realloc(mrb
, gc
->arena
, sizeof(struct RBasic
*)*gc
->arena_capa
);
464 gc
->arena
[gc
->arena_idx
++] = p
;
467 /* mrb_gc_protect() leaves the object in the arena */
469 mrb_gc_protect(mrb_state
*mrb
, mrb_value obj
)
471 if (mrb_immediate_p(obj
)) return;
472 gc_protect(mrb
, &mrb
->gc
, mrb_basic_ptr(obj
));
475 #define GC_ROOT_SYM MRB_SYM(_gc_root_)
477 /* mrb_gc_register() keeps the object from GC.
479 Register your object when it's exported to C world,
480 without reference from Ruby world, e.g. callback
481 arguments. Don't forget to remove the object using
482 mrb_gc_unregister, otherwise your object will leak.
486 mrb_gc_register(mrb_state
*mrb
, mrb_value obj
)
491 if (mrb_immediate_p(obj
)) return;
493 table
= mrb_gv_get(mrb
, root
);
494 if (mrb_nil_p(table
) || !mrb_array_p(table
)) {
495 table
= mrb_ary_new(mrb
);
496 mrb_gv_set(mrb
, root
, table
);
498 mrb_ary_push(mrb
, table
, obj
);
501 /* mrb_gc_unregister() removes the object from GC root. */
503 mrb_gc_unregister(mrb_state
*mrb
, mrb_value obj
)
510 if (mrb_immediate_p(obj
)) return;
512 table
= mrb_gv_get(mrb
, root
);
513 if (mrb_nil_p(table
)) return;
514 if (!mrb_array_p(table
)) {
515 mrb_gv_set(mrb
, root
, mrb_nil_value());
518 a
= mrb_ary_ptr(table
);
519 mrb_ary_modify(mrb
, a
);
520 for (i
= 0; i
< ARY_LEN(a
); i
++) {
521 if (mrb_ptr(ARY_PTR(a
)[i
]) == mrb_ptr(obj
)) {
522 mrb_int len
= ARY_LEN(a
)-1;
523 mrb_value
*ptr
= ARY_PTR(a
);
526 memmove(&ptr
[i
], &ptr
[i
+ 1], (len
- i
) * sizeof(mrb_value
));
532 MRB_API
struct RBasic
*
533 mrb_obj_alloc(mrb_state
*mrb
, enum mrb_vtype ttype
, struct RClass
*cls
)
536 static const RVALUE RVALUE_zero
= { { { NULL
, NULL
, MRB_TT_FALSE
} } };
537 mrb_gc
*gc
= &mrb
->gc
;
549 mrb_raise(mrb
, E_TYPE_ERROR
, "allocation failure");
551 tt
= MRB_INSTANCE_TT(cls
);
552 if (tt
!= MRB_TT_FALSE
&&
553 ttype
!= MRB_TT_SCLASS
&&
554 ttype
!= MRB_TT_ICLASS
&&
555 ttype
!= MRB_TT_ENV
&&
557 mrb_raisef(mrb
, E_TYPE_ERROR
, "allocation failure of %C", cls
);
560 if (ttype
<= MRB_TT_FREE
) {
561 mrb_raisef(mrb
, E_TYPE_ERROR
, "allocation failure of %C (type %d)", cls
, (int)ttype
);
567 if (gc
->threshold
< gc
->live
) {
568 mrb_incremental_gc(mrb
);
570 if (gc
->free_heaps
== NULL
) {
574 p
= gc
->free_heaps
->freelist
;
575 gc
->free_heaps
->freelist
= ((struct free_obj
*)p
)->next
;
576 if (gc
->free_heaps
->freelist
== NULL
) {
577 unlink_free_heap_page(gc
, gc
->free_heaps
);
581 gc_protect(mrb
, gc
, p
);
582 *(RVALUE
*)p
= RVALUE_zero
;
585 paint_partial_white(gc
, p
);
590 add_gray_list(mrb_state
*mrb
, mrb_gc
*gc
, struct RBasic
*obj
)
593 if (obj
->tt
> MRB_TT_MAXDEFINE
) {
598 obj
->gcnext
= gc
->gray_list
;
602 mrb_int
mrb_ci_nregs(mrb_callinfo
*ci
);
605 mark_context_stack(mrb_state
*mrb
, struct mrb_context
*c
)
611 if (c
->stbase
== NULL
) return;
613 e
= (c
->ci
->stack
? c
->ci
->stack
- c
->stbase
: 0);
614 e
+= mrb_ci_nregs(c
->ci
);
619 if (c
->stbase
+ e
> c
->stend
) e
= c
->stend
- c
->stbase
;
620 for (i
=0; i
<e
; i
++) {
621 mrb_value v
= c
->stbase
[i
];
623 if (!mrb_immediate_p(v
)) {
624 mrb_gc_mark(mrb
, mrb_basic_ptr(v
));
627 e
= c
->stend
- c
->stbase
;
628 nil
= mrb_nil_value();
635 mark_context(mrb_state
*mrb
, struct mrb_context
*c
)
640 if (c
->status
== MRB_FIBER_TERMINATED
) return;
643 mark_context_stack(mrb
, c
);
645 /* mark call stack */
647 for (ci
= c
->cibase
; ci
<= c
->ci
; ci
++) {
648 mrb_gc_mark(mrb
, (struct RBasic
*)ci
->proc
);
649 mrb_gc_mark(mrb
, (struct RBasic
*)ci
->u
.target_class
);
653 mrb_gc_mark(mrb
, (struct RBasic
*)c
->fib
);
661 gc_mark_children(mrb_state
*mrb
, mrb_gc
*gc
, struct RBasic
*obj
)
663 mrb_assert(is_gray(obj
));
665 mrb_gc_mark(mrb
, (struct RBasic
*)obj
->c
);
669 struct RClass
*c
= (struct RClass
*)obj
;
670 if (MRB_FLAG_TEST(c
, MRB_FL_CLASS_IS_ORIGIN
))
671 mrb_gc_mark_mt(mrb
, c
);
672 mrb_gc_mark(mrb
, (struct RBasic
*)((struct RClass
*)obj
)->super
);
680 struct RClass
*c
= (struct RClass
*)obj
;
682 mrb_gc_mark_mt(mrb
, c
);
683 mrb_gc_mark(mrb
, (struct RBasic
*)c
->super
);
689 mrb_gc_mark_iv(mrb
, (struct RObject
*)obj
);
694 struct RProc
*p
= (struct RProc
*)obj
;
696 mrb_gc_mark(mrb
, (struct RBasic
*)p
->upper
);
697 mrb_gc_mark(mrb
, (struct RBasic
*)p
->e
.env
);
703 struct REnv
*e
= (struct REnv
*)obj
;
706 if (MRB_ENV_ONSTACK_P(e
) && e
->cxt
&& e
->cxt
->fib
) {
707 mrb_gc_mark(mrb
, (struct RBasic
*)e
->cxt
->fib
);
709 len
= MRB_ENV_LEN(e
);
710 for (i
=0; i
<len
; i
++) {
711 mrb_gc_mark_value(mrb
, e
->stack
[i
]);
718 struct mrb_context
*c
= ((struct RFiber
*)obj
)->cxt
;
720 if (c
) mark_context(mrb
, c
);
727 struct RArray
*a
= (struct RArray
*)obj
;
728 size_t i
, e
=ARY_LEN(a
);
729 mrb_value
*p
= ARY_PTR(a
);
731 for (i
=0; i
<e
; i
++) {
732 mrb_gc_mark_value(mrb
, p
[i
]);
738 mrb_gc_mark_iv(mrb
, (struct RObject
*)obj
);
739 mrb_gc_mark_hash(mrb
, (struct RHash
*)obj
);
743 if (RSTR_FSHARED_P(obj
)) {
744 struct RString
*s
= (struct RString
*)obj
;
745 mrb_gc_mark(mrb
, (struct RBasic
*)s
->as
.heap
.aux
.fshared
);
750 mrb_gc_mark_range(mrb
, (struct RRange
*)obj
);
755 struct RBreak
*brk
= (struct RBreak
*)obj
;
756 mrb_gc_mark(mrb
, (struct RBasic
*)mrb_break_proc_get(brk
));
757 mrb_gc_mark_value(mrb
, mrb_break_value_get(brk
));
761 case MRB_TT_EXCEPTION
:
762 mrb_gc_mark_iv(mrb
, (struct RObject
*)obj
);
763 if ((obj
->flags
& MRB_EXC_MESG_STRING_FLAG
) != 0) {
764 mrb_gc_mark(mrb
, (struct RBasic
*)((struct RException
*)obj
)->mesg
);
774 mrb_gc_mark(mrb_state
*mrb
, struct RBasic
*obj
)
776 if (obj
== 0) return;
777 if (!is_white(obj
)) return;
778 if (is_red(obj
)) return;
779 mrb_assert((obj
)->tt
!= MRB_TT_FREE
);
780 add_gray_list(mrb
, &mrb
->gc
, obj
);
784 obj_free(mrb_state
*mrb
, struct RBasic
*obj
, int end
)
786 DEBUG(fprintf(stderr
, "obj_free(%p,tt=%d)\n",obj
,obj
->tt
));
789 mrb_gc_free_iv(mrb
, (struct RObject
*)obj
);
792 case MRB_TT_EXCEPTION
:
793 mrb_gc_free_iv(mrb
, (struct RObject
*)obj
);
799 mrb_gc_free_mt(mrb
, (struct RClass
*)obj
);
800 mrb_gc_free_iv(mrb
, (struct RObject
*)obj
);
802 mrb_mc_clear_by_class(mrb
, (struct RClass
*)obj
);
805 if (MRB_FLAG_TEST(obj
, MRB_FL_CLASS_IS_ORIGIN
))
806 mrb_gc_free_mt(mrb
, (struct RClass
*)obj
);
808 mrb_mc_clear_by_class(mrb
, (struct RClass
*)obj
);
812 struct REnv
*e
= (struct REnv
*)obj
;
814 if (MRB_ENV_ONSTACK_P(e
)) {
815 /* cannot be freed */
819 mrb_free(mrb
, e
->stack
);
826 struct mrb_context
*c
= ((struct RFiber
*)obj
)->cxt
;
828 if (c
&& c
!= mrb
->root_c
) {
829 if (!end
&& c
->status
!= MRB_FIBER_TERMINATED
) {
830 mrb_callinfo
*ci
= c
->ci
;
831 mrb_callinfo
*ce
= c
->cibase
;
834 struct REnv
*e
= ci
->u
.env
;
835 if (e
&& !mrb_object_dead_p(mrb
, (struct RBasic
*)e
) &&
836 e
->tt
== MRB_TT_ENV
&& MRB_ENV_ONSTACK_P(e
)) {
837 mrb_env_unshare(mrb
, e
);
842 mrb_free_context(mrb
, c
);
849 if (ARY_SHARED_P(obj
))
850 mrb_ary_decref(mrb
, ((struct RArray
*)obj
)->as
.heap
.aux
.shared
);
851 else if (!ARY_EMBED_P(obj
))
852 mrb_free(mrb
, ((struct RArray
*)obj
)->as
.heap
.ptr
);
856 mrb_gc_free_iv(mrb
, (struct RObject
*)obj
);
857 mrb_gc_free_hash(mrb
, (struct RHash
*)obj
);
861 mrb_gc_free_str(mrb
, (struct RString
*)obj
);
866 struct RProc
*p
= (struct RProc
*)obj
;
868 if (!MRB_PROC_CFUNC_P(p
) && p
->body
.irep
) {
869 mrb_irep
*irep
= (mrb_irep
*)p
->body
.irep
;
871 mrb_irep_cutref(mrb
, irep
);
873 mrb_irep_decref(mrb
, irep
);
879 mrb_gc_free_range(mrb
, ((struct RRange
*)obj
));
884 struct RData
*d
= (struct RData
*)obj
;
885 if (d
->type
&& d
->type
->dfree
) {
886 d
->type
->dfree(mrb
, d
->data
);
888 mrb_gc_free_iv(mrb
, (struct RObject
*)obj
);
892 #if defined(MRB_USE_RATIONAL) && defined(MRB_INT64) && defined(MRB_32BIT)
893 case MRB_TT_RATIONAL
:
895 struct RData
*o
= (struct RData
*)obj
;
896 mrb_free(mrb
, o
->iv
);
901 #if defined(MRB_USE_COMPLEX) && defined(MRB_32BIT) && !defined(MRB_USE_FLOAT32)
904 struct RData
*o
= (struct RData
*)obj
;
905 mrb_free(mrb
, o
->iv
);
913 obj
->tt
= MRB_TT_FREE
;
917 root_scan_phase(mrb_state
*mrb
, mrb_gc
*gc
)
921 if (!is_minor_gc(gc
)) {
922 gc
->gray_list
= NULL
;
923 gc
->atomic_gray_list
= NULL
;
928 for (i
=0,e
=gc
->arena_idx
; i
<e
; i
++) {
929 mrb_gc_mark(mrb
, gc
->arena
[i
]);
931 /* mark class hierarchy */
932 mrb_gc_mark(mrb
, (struct RBasic
*)mrb
->object_class
);
934 /* mark built-in classes */
935 mrb_gc_mark(mrb
, (struct RBasic
*)mrb
->class_class
);
936 mrb_gc_mark(mrb
, (struct RBasic
*)mrb
->module_class
);
937 mrb_gc_mark(mrb
, (struct RBasic
*)mrb
->proc_class
);
938 mrb_gc_mark(mrb
, (struct RBasic
*)mrb
->string_class
);
939 mrb_gc_mark(mrb
, (struct RBasic
*)mrb
->array_class
);
940 mrb_gc_mark(mrb
, (struct RBasic
*)mrb
->hash_class
);
941 mrb_gc_mark(mrb
, (struct RBasic
*)mrb
->range_class
);
944 mrb_gc_mark(mrb
, (struct RBasic
*)mrb
->float_class
);
946 mrb_gc_mark(mrb
, (struct RBasic
*)mrb
->integer_class
);
947 mrb_gc_mark(mrb
, (struct RBasic
*)mrb
->true_class
);
948 mrb_gc_mark(mrb
, (struct RBasic
*)mrb
->false_class
);
949 mrb_gc_mark(mrb
, (struct RBasic
*)mrb
->nil_class
);
950 mrb_gc_mark(mrb
, (struct RBasic
*)mrb
->symbol_class
);
951 mrb_gc_mark(mrb
, (struct RBasic
*)mrb
->kernel_module
);
953 mrb_gc_mark(mrb
, (struct RBasic
*)mrb
->eException_class
);
954 mrb_gc_mark(mrb
, (struct RBasic
*)mrb
->eStandardError_class
);
957 mrb_gc_mark(mrb
, (struct RBasic
*)mrb
->top_self
);
959 mrb_gc_mark(mrb
, (struct RBasic
*)mrb
->exc
);
960 /* mark pre-allocated exception */
961 mrb_gc_mark(mrb
, (struct RBasic
*)mrb
->nomem_err
);
962 mrb_gc_mark(mrb
, (struct RBasic
*)mrb
->stack_err
);
963 #ifdef MRB_GC_FIXED_ARENA
964 mrb_gc_mark(mrb
, (struct RBasic
*)mrb
->arena_err
);
967 mark_context(mrb
, mrb
->c
);
968 if (mrb
->root_c
!= mrb
->c
) {
969 mark_context(mrb
, mrb
->root_c
);
973 /* rough estimation of number of GC marks (non recursive) */
975 gc_gray_counts(mrb_state
*mrb
, mrb_gc
*gc
, struct RBasic
*obj
)
988 struct RClass
*c
= (struct RClass
*)obj
;
990 children
+= mrb_gc_mark_iv_size(mrb
, (struct RObject
*)obj
);
991 children
+= mrb_gc_mark_mt_size(mrb
, c
);
998 children
+= mrb_gc_mark_iv_size(mrb
, (struct RObject
*)obj
);
1002 children
+= MRB_ENV_LEN(obj
);
1007 struct mrb_context
*c
= ((struct RFiber
*)obj
)->cxt
;
1011 if (!c
|| c
->status
== MRB_FIBER_TERMINATED
) break;
1014 i
= c
->ci
->stack
- c
->stbase
;
1017 i
+= mrb_ci_nregs(c
->ci
);
1019 if (c
->stbase
+ i
> c
->stend
) i
= c
->stend
- c
->stbase
;
1024 for (i
=0, ci
= c
->cibase
; ci
<= c
->ci
; i
++, ci
++)
1034 struct RArray
*a
= (struct RArray
*)obj
;
1035 children
+= ARY_LEN(a
);
1040 children
+= mrb_gc_mark_iv_size(mrb
, (struct RObject
*)obj
);
1041 children
+= mrb_gc_mark_hash_size(mrb
, (struct RHash
*)obj
);
1050 case MRB_TT_EXCEPTION
:
1051 children
+= mrb_gc_mark_iv_size(mrb
, (struct RObject
*)obj
);
1052 if ((obj
->flags
& MRB_EXC_MESG_STRING_FLAG
) != 0) {
1065 gc_mark_gray_list(mrb_state
*mrb
, mrb_gc
*gc
) {
1066 while (gc
->gray_list
) {
1067 struct RBasic
*obj
= gc
->gray_list
;
1068 gc
->gray_list
= obj
->gcnext
;
1069 gc_mark_children(mrb
, gc
, obj
);
1075 incremental_marking_phase(mrb_state
*mrb
, mrb_gc
*gc
, size_t limit
)
1077 size_t tried_marks
= 0;
1079 while (gc
->gray_list
&& tried_marks
< limit
) {
1080 struct RBasic
*obj
= gc
->gray_list
;
1081 gc
->gray_list
= obj
->gcnext
;
1082 gc_mark_children(mrb
, gc
, obj
);
1083 tried_marks
+= gc_gray_counts(mrb
, gc
, obj
);
1090 final_marking_phase(mrb_state
*mrb
, mrb_gc
*gc
)
1095 for (i
=0,e
=gc
->arena_idx
; i
<e
; i
++) {
1096 mrb_gc_mark(mrb
, gc
->arena
[i
]);
1098 mrb_gc_mark_gv(mrb
);
1099 mark_context(mrb
, mrb
->c
);
1100 if (mrb
->c
!= mrb
->root_c
) {
1101 mark_context(mrb
, mrb
->root_c
);
1103 mrb_gc_mark(mrb
, (struct RBasic
*)mrb
->exc
);
1104 gc_mark_gray_list(mrb
, gc
);
1105 mrb_assert(gc
->gray_list
== NULL
);
1106 gc
->gray_list
= gc
->atomic_gray_list
;
1107 gc
->atomic_gray_list
= NULL
;
1108 gc_mark_gray_list(mrb
, gc
);
1109 mrb_assert(gc
->gray_list
== NULL
);
1113 prepare_incremental_sweep(mrb_state
*mrb
, mrb_gc
*gc
)
1115 gc
->state
= MRB_GC_STATE_SWEEP
;
1116 gc
->sweeps
= gc
->heaps
;
1117 gc
->live_after_mark
= gc
->live
;
1121 incremental_sweep_phase(mrb_state
*mrb
, mrb_gc
*gc
, size_t limit
)
1123 mrb_heap_page
*page
= gc
->sweeps
;
1124 size_t tried_sweep
= 0;
1126 while (page
&& (tried_sweep
< limit
)) {
1127 RVALUE
*p
= objects(page
);
1128 RVALUE
*e
= p
+ MRB_HEAP_PAGE_SIZE
;
1130 mrb_bool dead_slot
= TRUE
;
1131 mrb_bool full
= (page
->freelist
== NULL
);
1133 if (is_minor_gc(gc
) && page
->old
) {
1134 /* skip a slot which doesn't contain any young object */
1139 if (is_dead(gc
, &p
->as
.basic
)) {
1140 if (p
->as
.basic
.tt
!= MRB_TT_FREE
) {
1141 obj_free(mrb
, &p
->as
.basic
, FALSE
);
1142 if (p
->as
.basic
.tt
== MRB_TT_FREE
) {
1143 p
->as
.free
.next
= page
->freelist
;
1144 page
->freelist
= (struct RBasic
*)p
;
1153 if (!is_generational(gc
))
1154 paint_partial_white(gc
, &p
->as
.basic
); /* next gc target */
1160 /* free dead slot */
1161 if (dead_slot
&& freed
< MRB_HEAP_PAGE_SIZE
) {
1162 mrb_heap_page
*next
= page
->next
;
1164 unlink_heap_page(gc
, page
);
1165 unlink_free_heap_page(gc
, page
);
1166 mrb_free(mrb
, page
);
1170 if (full
&& freed
> 0) {
1171 link_free_heap_page(gc
, page
);
1173 if (page
->freelist
== NULL
&& is_minor_gc(gc
))
1179 tried_sweep
+= MRB_HEAP_PAGE_SIZE
;
1181 gc
->live_after_mark
-= freed
;
1188 incremental_gc(mrb_state
*mrb
, mrb_gc
*gc
, size_t limit
)
1190 switch (gc
->state
) {
1191 case MRB_GC_STATE_ROOT
:
1192 root_scan_phase(mrb
, gc
);
1193 gc
->state
= MRB_GC_STATE_MARK
;
1194 flip_white_part(gc
);
1196 case MRB_GC_STATE_MARK
:
1197 if (gc
->gray_list
) {
1198 return incremental_marking_phase(mrb
, gc
, limit
);
1201 final_marking_phase(mrb
, gc
);
1202 prepare_incremental_sweep(mrb
, gc
);
1205 case MRB_GC_STATE_SWEEP
: {
1206 size_t tried_sweep
= 0;
1207 tried_sweep
= incremental_sweep_phase(mrb
, gc
, limit
);
1208 if (tried_sweep
== 0)
1209 gc
->state
= MRB_GC_STATE_ROOT
;
1220 incremental_gc_until(mrb_state
*mrb
, mrb_gc
*gc
, mrb_gc_state to_state
)
1223 incremental_gc(mrb
, gc
, SIZE_MAX
);
1224 } while (gc
->state
!= to_state
);
1228 incremental_gc_step(mrb_state
*mrb
, mrb_gc
*gc
)
1230 size_t limit
= 0, result
= 0;
1231 limit
= (GC_STEP_SIZE
/100) * gc
->step_ratio
;
1232 while (result
< limit
) {
1233 result
+= incremental_gc(mrb
, gc
, limit
);
1234 if (gc
->state
== MRB_GC_STATE_ROOT
)
1238 gc
->threshold
= gc
->live
+ GC_STEP_SIZE
;
1242 clear_all_old(mrb_state
*mrb
, mrb_gc
*gc
)
1244 mrb_bool origin_mode
= gc
->generational
;
1246 mrb_assert(is_generational(gc
));
1247 if (is_major_gc(gc
)) {
1248 /* finish the half baked GC */
1249 incremental_gc_until(mrb
, gc
, MRB_GC_STATE_ROOT
);
1252 /* Sweep the dead objects, then reset all the live objects
1253 * (including all the old objects, of course) to white. */
1254 gc
->generational
= FALSE
;
1255 prepare_incremental_sweep(mrb
, gc
);
1256 incremental_gc_until(mrb
, gc
, MRB_GC_STATE_ROOT
);
1257 gc
->generational
= origin_mode
;
1259 /* The gray objects have already been painted as white */
1260 gc
->atomic_gray_list
= gc
->gray_list
= NULL
;
1264 mrb_incremental_gc(mrb_state
*mrb
)
1266 mrb_gc
*gc
= &mrb
->gc
;
1268 if (gc
->disabled
|| gc
->iterating
) return;
1270 GC_INVOKE_TIME_REPORT("mrb_incremental_gc()");
1273 if (is_minor_gc(gc
)) {
1274 incremental_gc_until(mrb
, gc
, MRB_GC_STATE_ROOT
);
1277 incremental_gc_step(mrb
, gc
);
1280 if (gc
->state
== MRB_GC_STATE_ROOT
) {
1281 mrb_assert(gc
->live
>= gc
->live_after_mark
);
1282 gc
->threshold
= (gc
->live_after_mark
/100) * gc
->interval_ratio
;
1283 if (gc
->threshold
< GC_STEP_SIZE
) {
1284 gc
->threshold
= GC_STEP_SIZE
;
1287 if (is_major_gc(gc
)) {
1288 size_t threshold
= gc
->live_after_mark
/100 * MAJOR_GC_INC_RATIO
;
1291 if (threshold
< MAJOR_GC_TOOMANY
) {
1292 gc
->majorgc_old_threshold
= threshold
;
1295 /* too many objects allocated during incremental GC, */
1296 /* instead of increasing threshold, invoke full GC. */
1300 else if (is_minor_gc(gc
)) {
1301 if (gc
->live
> gc
->majorgc_old_threshold
) {
1302 clear_all_old(mrb
, gc
);
1308 GC_TIME_STOP_AND_REPORT
;
1311 /* Perform a full gc cycle */
1313 mrb_full_gc(mrb_state
*mrb
)
1315 mrb_gc
*gc
= &mrb
->gc
;
1317 if (!mrb
->c
) return;
1318 if (gc
->disabled
|| gc
->iterating
) return;
1320 GC_INVOKE_TIME_REPORT("mrb_full_gc()");
1323 if (is_generational(gc
)) {
1324 /* clear all the old objects back to young */
1325 clear_all_old(mrb
, gc
);
1328 else if (gc
->state
!= MRB_GC_STATE_ROOT
) {
1329 /* finish half baked GC cycle */
1330 incremental_gc_until(mrb
, gc
, MRB_GC_STATE_ROOT
);
1333 incremental_gc_until(mrb
, gc
, MRB_GC_STATE_ROOT
);
1334 gc
->threshold
= (gc
->live_after_mark
/100) * gc
->interval_ratio
;
1336 if (is_generational(gc
)) {
1337 gc
->majorgc_old_threshold
= gc
->live_after_mark
/100 * MAJOR_GC_INC_RATIO
;
1341 #ifdef MRB_USE_MALLOC_TRIM
1344 GC_TIME_STOP_AND_REPORT
;
1348 mrb_garbage_collect(mrb_state
*mrb
)
1354 * Field write barrier
1355 * Paint obj(Black) -> value(White) to obj(Black) -> value(Gray).
1359 mrb_field_write_barrier(mrb_state
*mrb
, struct RBasic
*obj
, struct RBasic
*value
)
1361 mrb_gc
*gc
= &mrb
->gc
;
1363 if (!is_black(obj
)) return;
1364 if (!is_white(value
)) return;
1366 mrb_assert(gc
->state
== MRB_GC_STATE_MARK
|| (!is_dead(gc
, value
) && !is_dead(gc
, obj
)));
1367 mrb_assert(is_generational(gc
) || gc
->state
!= MRB_GC_STATE_ROOT
);
1369 if (is_generational(gc
) || gc
->state
== MRB_GC_STATE_MARK
) {
1370 add_gray_list(mrb
, gc
, value
);
1373 mrb_assert(gc
->state
== MRB_GC_STATE_SWEEP
);
1374 paint_partial_white(gc
, obj
); /* for never write barriers */
1380 * Paint obj(Black) to obj(Gray).
1382 * The object that is painted gray will be traversed atomically in final
1383 * mark phase. So you use this write barrier if it's frequency written spot.
1384 * e.g. Set element on Array.
1388 mrb_write_barrier(mrb_state
*mrb
, struct RBasic
*obj
)
1390 mrb_gc
*gc
= &mrb
->gc
;
1392 if (!is_black(obj
)) return;
1394 mrb_assert(!is_dead(gc
, obj
));
1395 mrb_assert(is_generational(gc
) || gc
->state
!= MRB_GC_STATE_ROOT
);
1397 obj
->gcnext
= gc
->atomic_gray_list
;
1398 gc
->atomic_gray_list
= obj
;
1405 * Initiates full garbage collection.
1410 gc_start(mrb_state
*mrb
, mrb_value obj
)
1413 return mrb_nil_value();
1418 * GC.enable -> true or false
1420 * Enables garbage collection, returning <code>true</code> if garbage
1421 * collection was previously disabled.
1423 * GC.disable #=> false
1424 * GC.enable #=> true
1425 * GC.enable #=> false
1430 gc_enable(mrb_state
*mrb
, mrb_value obj
)
1432 mrb_bool old
= mrb
->gc
.disabled
;
1434 mrb
->gc
.disabled
= FALSE
;
1436 return mrb_bool_value(old
);
1441 * GC.disable -> true or false
1443 * Disables garbage collection, returning <code>true</code> if garbage
1444 * collection was already disabled.
1446 * GC.disable #=> false
1447 * GC.disable #=> true
1452 gc_disable(mrb_state
*mrb
, mrb_value obj
)
1454 mrb_bool old
= mrb
->gc
.disabled
;
1456 mrb
->gc
.disabled
= TRUE
;
1458 return mrb_bool_value(old
);
1463 * GC.interval_ratio -> int
1465 * Returns ratio of GC interval. Default value is 200(%).
1470 gc_interval_ratio_get(mrb_state
*mrb
, mrb_value obj
)
1472 return mrb_int_value(mrb
, mrb
->gc
.interval_ratio
);
1477 * GC.interval_ratio = int -> nil
1479 * Updates ratio of GC interval. Default value is 200(%).
1480 * GC start as soon as after end all step of GC if you set 100(%).
1485 gc_interval_ratio_set(mrb_state
*mrb
, mrb_value obj
)
1489 mrb_get_args(mrb
, "i", &ratio
);
1490 mrb
->gc
.interval_ratio
= (int)ratio
;
1491 return mrb_nil_value();
1496 * GC.step_ratio -> int
1498 * Returns step span ratio of Incremental GC. Default value is 200(%).
1503 gc_step_ratio_get(mrb_state
*mrb
, mrb_value obj
)
1505 return mrb_int_value(mrb
, mrb
->gc
.step_ratio
);
1510 * GC.step_ratio = int -> nil
1512 * Updates step span ratio of Incremental GC. Default value is 200(%).
1513 * 1 step of incrementalGC becomes long if a rate is big.
1518 gc_step_ratio_set(mrb_state
*mrb
, mrb_value obj
)
1522 mrb_get_args(mrb
, "i", &ratio
);
1523 mrb
->gc
.step_ratio
= (int)ratio
;
1524 return mrb_nil_value();
1528 change_gen_gc_mode(mrb_state
*mrb
, mrb_gc
*gc
, mrb_bool enable
)
1530 if (gc
->disabled
|| gc
->iterating
) {
1531 mrb_raise(mrb
, E_RUNTIME_ERROR
, "generational mode changed when GC disabled");
1534 if (is_generational(gc
) && !enable
) {
1535 clear_all_old(mrb
, gc
);
1536 mrb_assert(gc
->state
== MRB_GC_STATE_ROOT
);
1539 else if (!is_generational(gc
) && enable
) {
1540 incremental_gc_until(mrb
, gc
, MRB_GC_STATE_ROOT
);
1541 gc
->majorgc_old_threshold
= gc
->live_after_mark
/100 * MAJOR_GC_INC_RATIO
;
1544 gc
->generational
= enable
;
1549 * GC.generational_mode -> true or false
1551 * Returns generational or normal gc mode.
1556 gc_generational_mode_get(mrb_state
*mrb
, mrb_value self
)
1558 return mrb_bool_value(mrb
->gc
.generational
);
1563 * GC.generational_mode = true or false -> true or false
1565 * Changes to generational or normal gc mode.
1570 gc_generational_mode_set(mrb_state
*mrb
, mrb_value self
)
1574 mrb_get_args(mrb
, "b", &enable
);
1575 if (mrb
->gc
.generational
!= enable
)
1576 change_gen_gc_mode(mrb
, &mrb
->gc
, enable
);
1578 return mrb_bool_value(enable
);
1583 gc_each_objects(mrb_state
*mrb
, mrb_gc
*gc
, mrb_each_object_callback
*callback
, void *data
)
1585 mrb_heap_page
* page
;
1588 while (page
!= NULL
) {
1593 for (i
=0; i
< MRB_HEAP_PAGE_SIZE
; i
++) {
1594 if ((*callback
)(mrb
, &p
[i
].as
.basic
, data
) == MRB_EACH_OBJ_BREAK
)
1602 mrb_objspace_each_objects(mrb_state
*mrb
, mrb_each_object_callback
*callback
, void *data
)
1604 mrb_bool iterating
= mrb
->gc
.iterating
;
1607 mrb
->gc
.iterating
= TRUE
;
1609 gc_each_objects(mrb
, &mrb
->gc
, callback
, data
);
1612 struct mrb_jmpbuf
*prev_jmp
= mrb
->jmp
;
1613 struct mrb_jmpbuf c_jmp
;
1617 gc_each_objects(mrb
, &mrb
->gc
, callback
, data
);
1618 mrb
->jmp
= prev_jmp
;
1619 mrb
->gc
.iterating
= iterating
;
1620 } MRB_CATCH(&c_jmp
) {
1621 mrb
->gc
.iterating
= iterating
;
1622 mrb
->jmp
= prev_jmp
;
1623 MRB_THROW(prev_jmp
);
1624 } MRB_END_EXC(&c_jmp
);
1629 mrb_objspace_page_slot_size(void)
1631 return sizeof(RVALUE
);
1636 mrb_init_gc(mrb_state
*mrb
)
1640 mrb_static_assert(sizeof(RVALUE
) <= sizeof(void*) * 6,
1641 "RVALUE size must be within 6 words");
1643 gc
= mrb_define_module(mrb
, "GC");
1645 mrb_define_class_method(mrb
, gc
, "start", gc_start
, MRB_ARGS_NONE());
1646 mrb_define_class_method(mrb
, gc
, "enable", gc_enable
, MRB_ARGS_NONE());
1647 mrb_define_class_method(mrb
, gc
, "disable", gc_disable
, MRB_ARGS_NONE());
1648 mrb_define_class_method(mrb
, gc
, "interval_ratio", gc_interval_ratio_get
, MRB_ARGS_NONE());
1649 mrb_define_class_method(mrb
, gc
, "interval_ratio=", gc_interval_ratio_set
, MRB_ARGS_REQ(1));
1650 mrb_define_class_method(mrb
, gc
, "step_ratio", gc_step_ratio_get
, MRB_ARGS_NONE());
1651 mrb_define_class_method(mrb
, gc
, "step_ratio=", gc_step_ratio_set
, MRB_ARGS_REQ(1));
1652 mrb_define_class_method(mrb
, gc
, "generational_mode=", gc_generational_mode_set
, MRB_ARGS_REQ(1));
1653 mrb_define_class_method(mrb
, gc
, "generational_mode", gc_generational_mode_get
, MRB_ARGS_NONE());