Merge branch 'exc_mesg' of https://github.com/take-cheeze/mruby into take-cheeze...
[mruby.git] / src / gc.c
blobfce2c31500395d6ee9beef053d76610bcfdfdba2
1 /*
2 ** gc.c - garbage collector for mruby
3 **
4 ** See Copyright Notice in mruby.h
5 */
7 #include <string.h>
8 #ifdef MRB_USE_MALLOC_TRIM
9 #include <malloc.h>
10 #endif
11 #include <mruby.h>
12 #include <mruby/array.h>
13 #include <mruby/class.h>
14 #include <mruby/data.h>
15 #include <mruby/istruct.h>
16 #include <mruby/hash.h>
17 #include <mruby/proc.h>
18 #include <mruby/range.h>
19 #include <mruby/string.h>
20 #include <mruby/variable.h>
21 #include <mruby/gc.h>
22 #include <mruby/error.h>
23 #include <mruby/throw.h>
24 #include <mruby/presym.h>
26 #ifdef MRB_GC_STRESS
27 #include <stdlib.h>
28 #endif
31 = Tri-color Incremental Garbage Collection
33 mruby's GC is Tri-color Incremental GC with Mark & Sweep.
34 Algorithm details are omitted.
35 Instead, the implementation part is described below.
37 == Object's Color
39 Each object can be painted in three colors:
41 * White - Unmarked.
42 * Gray - Marked, But the child objects are unmarked.
43 * Black - Marked, the child objects are also marked.
45 Extra color
47 * Red - Static (ROM object) no need to be collected.
48 - All child objects should be Red as well.
50 == Two White Types
52 There are two white color types in a flip-flop fashion: White-A and White-B,
53 which respectively represent the Current White color (the newly allocated
54 objects in the current GC cycle) and the Sweep Target White color (the
55 dead objects to be swept).
57 A and B will be switched just at the beginning of the next GC cycle. At
58 that time, all the dead objects have been swept, while the newly created
59 objects in the current GC cycle which finally remains White are now
60 regarded as dead objects. Instead of traversing all the White-A objects and
61 painting them as White-B, just switch the meaning of White-A and White-B as
62 this will be much cheaper.
64 As a result, the objects we sweep in the current GC cycle are always
65 left from the previous GC cycle. This allows us to sweep objects
66 incrementally, without the disturbance of the newly created objects.
68 == Execution Timing
70 GC Execution Time and Each step interval are decided by live objects count.
71 List of Adjustment API:
73 * gc_interval_ratio_set
74 * gc_step_ratio_set
76 For details, see the comments for each function.
78 == Write Barrier
80 mruby implementer and C extension library writer must insert a write
81 barrier when updating a reference from a field of an object.
82 When updating a reference from a field of object A to object B,
83 two different types of write barrier are available:
85 * mrb_field_write_barrier - target B object for a mark.
86 * mrb_write_barrier - target A object for a mark.
88 == Generational Mode
90 mruby's GC offers an Generational Mode while re-using the tri-color GC
91 infrastructure. It will treat the Black objects as Old objects after each
92 sweep phase, instead of painting them White. The key ideas are still the same
93 as traditional generational GC:
95 * Minor GC - just traverse the Young objects (Gray objects) in the mark
96 phase, then only sweep the newly created objects, and leave
97 the Old objects live.
99 * Major GC - same as a full regular GC cycle.
101 The difference from "traditional" generational GC is, that the major GC
102 in mruby is triggered incrementally in a tri-color manner.
105 For details, see the comments for each function.
109 struct free_obj {
110 MRB_OBJECT_HEADER;
111 struct RBasic *next;
114 struct RVALUE_initializer {
115 MRB_OBJECT_HEADER;
116 char padding[sizeof(void*) * 4 - sizeof(uint32_t)];
119 typedef struct {
120 union {
121 struct RVALUE_initializer init; /* must be first member to ensure initialization */
122 struct free_obj free;
123 struct RBasic basic;
124 struct RObject object;
125 struct RClass klass;
126 struct RString string;
127 struct RArray array;
128 struct RHash hash;
129 struct RRange range;
130 struct RData data;
131 struct RIStruct istruct;
132 struct RProc proc;
133 struct REnv env;
134 struct RFiber fiber;
135 struct RException exc;
136 struct RBreak brk;
137 } as;
138 } RVALUE;
140 #ifdef GC_PROFILE
141 #include <stdio.h>
142 #include <sys/time.h>
144 static double program_invoke_time = 0;
145 static double gc_time = 0;
146 static double gc_total_time = 0;
148 static double
149 gettimeofday_time(void)
151 struct timeval tv;
152 gettimeofday(&tv, NULL);
153 return tv.tv_sec + tv.tv_usec * 1e-6;
156 #define GC_INVOKE_TIME_REPORT(with) do {\
157 fprintf(stderr, "%s\n", with);\
158 fprintf(stderr, "gc_invoke: %19.3f\n", gettimeofday_time() - program_invoke_time);\
159 fprintf(stderr, "is_generational: %d\n", is_generational(gc));\
160 fprintf(stderr, "is_major_gc: %d\n", is_major_gc(gc));\
161 } while(0)
163 #define GC_TIME_START do {\
164 gc_time = gettimeofday_time();\
165 } while(0)
167 #define GC_TIME_STOP_AND_REPORT do {\
168 gc_time = gettimeofday_time() - gc_time;\
169 gc_total_time += gc_time;\
170 fprintf(stderr, "gc_state: %d\n", gc->state);\
171 fprintf(stderr, "live: %zu\n", gc->live);\
172 fprintf(stderr, "majorgc_old_threshold: %zu\n", gc->majorgc_old_threshold);\
173 fprintf(stderr, "gc_threshold: %zu\n", gc->threshold);\
174 fprintf(stderr, "gc_time: %30.20f\n", gc_time);\
175 fprintf(stderr, "gc_total_time: %30.20f\n\n", gc_total_time);\
176 } while(0)
177 #else
178 #define GC_INVOKE_TIME_REPORT(s)
179 #define GC_TIME_START
180 #define GC_TIME_STOP_AND_REPORT
181 #endif
183 #ifdef GC_DEBUG
184 #define DEBUG(x) (x)
185 #else
186 #define DEBUG(x)
187 #endif
189 #ifndef MRB_HEAP_PAGE_SIZE
190 #define MRB_HEAP_PAGE_SIZE 1024
191 #endif
193 #define GC_STEP_SIZE 1024
195 /* white: 001 or 010, black: 100, gray: 000 */
196 #define GC_GRAY 0
197 #define GC_WHITE_A 1
198 #define GC_WHITE_B (1 << 1)
199 #define GC_BLACK (1 << 2)
200 #define GC_RED MRB_GC_RED
201 #define GC_WHITES (GC_WHITE_A | GC_WHITE_B)
202 #define GC_COLOR_MASK 7
203 mrb_static_assert1(MRB_GC_RED <= GC_COLOR_MASK);
205 #define paint_gray(o) ((o)->color = GC_GRAY)
206 #define paint_black(o) ((o)->color = GC_BLACK)
207 #define paint_white(o) ((o)->color = GC_WHITES)
208 #define paint_partial_white(s, o) ((o)->color = (s)->current_white_part)
209 #define is_gray(o) ((o)->color == GC_GRAY)
210 #define is_white(o) ((o)->color & GC_WHITES)
211 #define is_black(o) ((o)->color == GC_BLACK)
212 #define is_red(o) ((o)->color == GC_RED)
213 #define flip_white_part(s) ((s)->current_white_part = other_white_part(s))
214 #define other_white_part(s) ((s)->current_white_part ^ GC_WHITES)
215 #define is_dead(s, o) (((o)->color & other_white_part(s) & GC_WHITES) || (o)->tt == MRB_TT_FREE)
217 #define objects(p) ((RVALUE *)p->objects)
219 mrb_noreturn void mrb_raise_nomemory(mrb_state *mrb);
221 MRB_API void*
222 mrb_realloc_simple(mrb_state *mrb, void *p, size_t len)
224 void *p2;
226 p2 = (mrb->allocf)(mrb, p, len, mrb->allocf_ud);
227 if (!p2 && len > 0 && mrb->gc.heaps) {
228 mrb_full_gc(mrb);
229 p2 = (mrb->allocf)(mrb, p, len, mrb->allocf_ud);
232 return p2;
235 MRB_API void*
236 mrb_realloc(mrb_state *mrb, void *p, size_t len)
238 void *p2;
240 p2 = mrb_realloc_simple(mrb, p, len);
241 if (len == 0) return p2;
242 if (p2 == NULL) {
243 mrb->gc.out_of_memory = TRUE;
244 mrb_raise_nomemory(mrb);
246 else {
247 mrb->gc.out_of_memory = FALSE;
250 return p2;
253 MRB_API void*
254 mrb_malloc(mrb_state *mrb, size_t len)
256 return mrb_realloc(mrb, 0, len);
259 MRB_API void*
260 mrb_malloc_simple(mrb_state *mrb, size_t len)
262 return mrb_realloc_simple(mrb, 0, len);
265 MRB_API void*
266 mrb_calloc(mrb_state *mrb, size_t nelem, size_t len)
268 void *p;
270 if (nelem > 0 && len > 0 &&
271 nelem <= SIZE_MAX / len) {
272 size_t size;
273 size = nelem * len;
274 p = mrb_malloc(mrb, size);
276 memset(p, 0, size);
278 else {
279 p = NULL;
282 return p;
285 MRB_API void
286 mrb_free(mrb_state *mrb, void *p)
288 (mrb->allocf)(mrb, p, 0, mrb->allocf_ud);
291 MRB_API void*
292 mrb_alloca(mrb_state *mrb, size_t size)
294 struct RString *s;
295 s = MRB_OBJ_ALLOC(mrb, MRB_TT_STRING, mrb->string_class);
296 return s->as.heap.ptr = (char*)mrb_malloc(mrb, size);
299 static mrb_bool
300 heap_p(mrb_gc *gc, struct RBasic *object)
302 mrb_heap_page* page;
304 page = gc->heaps;
305 while (page) {
306 RVALUE *p;
308 p = objects(page);
309 if (&p[0].as.basic <= object && object <= &p[MRB_HEAP_PAGE_SIZE].as.basic) {
310 return TRUE;
312 page = page->next;
314 return FALSE;
317 MRB_API mrb_bool
318 mrb_object_dead_p(mrb_state *mrb, struct RBasic *object) {
319 mrb_gc *gc = &mrb->gc;
320 if (!heap_p(gc, object)) return TRUE;
321 return is_dead(gc, object);
324 static void
325 link_heap_page(mrb_gc *gc, mrb_heap_page *page)
327 page->next = gc->heaps;
328 if (gc->heaps)
329 gc->heaps->prev = page;
330 gc->heaps = page;
333 static void
334 unlink_heap_page(mrb_gc *gc, mrb_heap_page *page)
336 if (page->prev)
337 page->prev->next = page->next;
338 if (page->next)
339 page->next->prev = page->prev;
340 if (gc->heaps == page)
341 gc->heaps = page->next;
342 page->prev = NULL;
343 page->next = NULL;
346 static void
347 link_free_heap_page(mrb_gc *gc, mrb_heap_page *page)
349 page->free_next = gc->free_heaps;
350 if (gc->free_heaps) {
351 gc->free_heaps->free_prev = page;
353 gc->free_heaps = page;
356 static void
357 unlink_free_heap_page(mrb_gc *gc, mrb_heap_page *page)
359 if (page->free_prev)
360 page->free_prev->free_next = page->free_next;
361 if (page->free_next)
362 page->free_next->free_prev = page->free_prev;
363 if (gc->free_heaps == page)
364 gc->free_heaps = page->free_next;
365 page->free_prev = NULL;
366 page->free_next = NULL;
369 static void
370 add_heap(mrb_state *mrb, mrb_gc *gc)
372 mrb_heap_page *page = (mrb_heap_page *)mrb_calloc(mrb, 1, sizeof(mrb_heap_page) + MRB_HEAP_PAGE_SIZE * sizeof(RVALUE));
373 RVALUE *p, *e;
374 struct RBasic *prev = NULL;
376 for (p = objects(page), e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) {
377 p->as.free.tt = MRB_TT_FREE;
378 p->as.free.next = prev;
379 prev = &p->as.basic;
381 page->freelist = prev;
383 link_heap_page(gc, page);
384 link_free_heap_page(gc, page);
387 #define DEFAULT_GC_INTERVAL_RATIO 200
388 #define DEFAULT_GC_STEP_RATIO 200
389 #define MAJOR_GC_INC_RATIO 120
390 #define MAJOR_GC_TOOMANY 10000
391 #define is_generational(gc) ((gc)->generational)
392 #define is_major_gc(gc) (is_generational(gc) && (gc)->full)
393 #define is_minor_gc(gc) (is_generational(gc) && !(gc)->full)
395 void
396 mrb_gc_init(mrb_state *mrb, mrb_gc *gc)
398 #ifndef MRB_GC_FIXED_ARENA
399 gc->arena = (struct RBasic**)mrb_malloc(mrb, sizeof(struct RBasic*)*MRB_GC_ARENA_SIZE);
400 gc->arena_capa = MRB_GC_ARENA_SIZE;
401 #endif
403 gc->current_white_part = GC_WHITE_A;
404 gc->heaps = NULL;
405 gc->free_heaps = NULL;
406 add_heap(mrb, gc);
407 gc->interval_ratio = DEFAULT_GC_INTERVAL_RATIO;
408 gc->step_ratio = DEFAULT_GC_STEP_RATIO;
409 #ifndef MRB_GC_TURN_OFF_GENERATIONAL
410 gc->generational = TRUE;
411 gc->full = TRUE;
412 #endif
414 #ifdef GC_PROFILE
415 program_invoke_time = gettimeofday_time();
416 #endif
419 static void obj_free(mrb_state *mrb, struct RBasic *obj, int end);
421 static void
422 free_heap(mrb_state *mrb, mrb_gc *gc)
424 mrb_heap_page *page = gc->heaps;
425 mrb_heap_page *tmp;
426 RVALUE *p, *e;
428 while (page) {
429 tmp = page;
430 page = page->next;
431 for (p = objects(tmp), e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) {
432 if (p->as.free.tt != MRB_TT_FREE)
433 obj_free(mrb, &p->as.basic, TRUE);
435 mrb_free(mrb, tmp);
439 void
440 mrb_gc_destroy(mrb_state *mrb, mrb_gc *gc)
442 free_heap(mrb, gc);
443 #ifndef MRB_GC_FIXED_ARENA
444 mrb_free(mrb, gc->arena);
445 #endif
448 static void
449 gc_protect(mrb_state *mrb, mrb_gc *gc, struct RBasic *p)
451 #ifdef MRB_GC_FIXED_ARENA
452 if (gc->arena_idx >= MRB_GC_ARENA_SIZE) {
453 /* arena overflow error */
454 gc->arena_idx = MRB_GC_ARENA_SIZE - 4; /* force room in arena */
455 mrb_exc_raise(mrb, mrb_obj_value(mrb->arena_err));
457 #else
458 if (gc->arena_idx >= gc->arena_capa) {
459 /* extend arena */
460 gc->arena_capa = (int)(gc->arena_capa * 3 / 2);
461 gc->arena = (struct RBasic**)mrb_realloc(mrb, gc->arena, sizeof(struct RBasic*)*gc->arena_capa);
463 #endif
464 gc->arena[gc->arena_idx++] = p;
467 /* mrb_gc_protect() leaves the object in the arena */
468 MRB_API void
469 mrb_gc_protect(mrb_state *mrb, mrb_value obj)
471 if (mrb_immediate_p(obj)) return;
472 gc_protect(mrb, &mrb->gc, mrb_basic_ptr(obj));
475 #define GC_ROOT_SYM MRB_SYM(_gc_root_)
477 /* mrb_gc_register() keeps the object from GC.
479 Register your object when it's exported to C world,
480 without reference from Ruby world, e.g. callback
481 arguments. Don't forget to remove the object using
482 mrb_gc_unregister, otherwise your object will leak.
485 MRB_API void
486 mrb_gc_register(mrb_state *mrb, mrb_value obj)
488 mrb_sym root;
489 mrb_value table;
491 if (mrb_immediate_p(obj)) return;
492 root = GC_ROOT_SYM;
493 table = mrb_gv_get(mrb, root);
494 if (mrb_nil_p(table) || !mrb_array_p(table)) {
495 table = mrb_ary_new(mrb);
496 mrb_gv_set(mrb, root, table);
498 mrb_ary_push(mrb, table, obj);
501 /* mrb_gc_unregister() removes the object from GC root. */
502 MRB_API void
503 mrb_gc_unregister(mrb_state *mrb, mrb_value obj)
505 mrb_sym root;
506 mrb_value table;
507 struct RArray *a;
508 mrb_int i;
510 if (mrb_immediate_p(obj)) return;
511 root = GC_ROOT_SYM;
512 table = mrb_gv_get(mrb, root);
513 if (mrb_nil_p(table)) return;
514 if (!mrb_array_p(table)) {
515 mrb_gv_set(mrb, root, mrb_nil_value());
516 return;
518 a = mrb_ary_ptr(table);
519 mrb_ary_modify(mrb, a);
520 for (i = 0; i < ARY_LEN(a); i++) {
521 if (mrb_ptr(ARY_PTR(a)[i]) == mrb_ptr(obj)) {
522 mrb_int len = ARY_LEN(a)-1;
523 mrb_value *ptr = ARY_PTR(a);
525 ARY_SET_LEN(a, len);
526 memmove(&ptr[i], &ptr[i + 1], (len - i) * sizeof(mrb_value));
527 break;
532 MRB_API struct RBasic*
533 mrb_obj_alloc(mrb_state *mrb, enum mrb_vtype ttype, struct RClass *cls)
535 struct RBasic *p;
536 static const RVALUE RVALUE_zero = { { { NULL, NULL, MRB_TT_FALSE } } };
537 mrb_gc *gc = &mrb->gc;
539 if (cls) {
540 enum mrb_vtype tt;
542 switch (cls->tt) {
543 case MRB_TT_CLASS:
544 case MRB_TT_SCLASS:
545 case MRB_TT_MODULE:
546 case MRB_TT_ENV:
547 break;
548 default:
549 mrb_raise(mrb, E_TYPE_ERROR, "allocation failure");
551 tt = MRB_INSTANCE_TT(cls);
552 if (tt != MRB_TT_FALSE &&
553 ttype != MRB_TT_SCLASS &&
554 ttype != MRB_TT_ICLASS &&
555 ttype != MRB_TT_ENV &&
556 ttype != tt) {
557 mrb_raisef(mrb, E_TYPE_ERROR, "allocation failure of %C", cls);
560 if (ttype <= MRB_TT_FREE) {
561 mrb_raisef(mrb, E_TYPE_ERROR, "allocation failure of %C (type %d)", cls, (int)ttype);
564 #ifdef MRB_GC_STRESS
565 mrb_full_gc(mrb);
566 #endif
567 if (gc->threshold < gc->live) {
568 mrb_incremental_gc(mrb);
570 if (gc->free_heaps == NULL) {
571 add_heap(mrb, gc);
574 p = gc->free_heaps->freelist;
575 gc->free_heaps->freelist = ((struct free_obj*)p)->next;
576 if (gc->free_heaps->freelist == NULL) {
577 unlink_free_heap_page(gc, gc->free_heaps);
580 gc->live++;
581 gc_protect(mrb, gc, p);
582 *(RVALUE *)p = RVALUE_zero;
583 p->tt = ttype;
584 p->c = cls;
585 paint_partial_white(gc, p);
586 return p;
589 static inline void
590 add_gray_list(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj)
592 #ifdef MRB_GC_STRESS
593 if (obj->tt > MRB_TT_MAXDEFINE) {
594 abort();
596 #endif
597 paint_gray(obj);
598 obj->gcnext = gc->gray_list;
599 gc->gray_list = obj;
602 mrb_int mrb_ci_nregs(mrb_callinfo *ci);
604 static void
605 mark_context_stack(mrb_state *mrb, struct mrb_context *c)
607 size_t i;
608 size_t e;
609 mrb_value nil;
611 if (c->stbase == NULL) return;
612 if (c->ci) {
613 e = (c->ci->stack ? c->ci->stack - c->stbase : 0);
614 e += mrb_ci_nregs(c->ci);
616 else {
617 e = 0;
619 if (c->stbase + e > c->stend) e = c->stend - c->stbase;
620 for (i=0; i<e; i++) {
621 mrb_value v = c->stbase[i];
623 if (!mrb_immediate_p(v)) {
624 mrb_gc_mark(mrb, mrb_basic_ptr(v));
627 e = c->stend - c->stbase;
628 nil = mrb_nil_value();
629 for (; i<e; i++) {
630 c->stbase[i] = nil;
634 static void
635 mark_context(mrb_state *mrb, struct mrb_context *c)
637 mrb_callinfo *ci;
639 start:
640 if (c->status == MRB_FIBER_TERMINATED) return;
642 /* mark VM stack */
643 mark_context_stack(mrb, c);
645 /* mark call stack */
646 if (c->cibase) {
647 for (ci = c->cibase; ci <= c->ci; ci++) {
648 mrb_gc_mark(mrb, (struct RBasic*)ci->proc);
649 mrb_gc_mark(mrb, (struct RBasic*)ci->u.target_class);
652 /* mark fibers */
653 mrb_gc_mark(mrb, (struct RBasic*)c->fib);
654 if (c->prev) {
655 c = c->prev;
656 goto start;
660 static void
661 gc_mark_children(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj)
663 mrb_assert(is_gray(obj));
664 paint_black(obj);
665 mrb_gc_mark(mrb, (struct RBasic*)obj->c);
666 switch (obj->tt) {
667 case MRB_TT_ICLASS:
669 struct RClass *c = (struct RClass*)obj;
670 if (MRB_FLAG_TEST(c, MRB_FL_CLASS_IS_ORIGIN))
671 mrb_gc_mark_mt(mrb, c);
672 mrb_gc_mark(mrb, (struct RBasic*)((struct RClass*)obj)->super);
674 break;
676 case MRB_TT_CLASS:
677 case MRB_TT_MODULE:
678 case MRB_TT_SCLASS:
680 struct RClass *c = (struct RClass*)obj;
682 mrb_gc_mark_mt(mrb, c);
683 mrb_gc_mark(mrb, (struct RBasic*)c->super);
685 /* fall through */
687 case MRB_TT_OBJECT:
688 case MRB_TT_DATA:
689 mrb_gc_mark_iv(mrb, (struct RObject*)obj);
690 break;
692 case MRB_TT_PROC:
694 struct RProc *p = (struct RProc*)obj;
696 mrb_gc_mark(mrb, (struct RBasic*)p->upper);
697 mrb_gc_mark(mrb, (struct RBasic*)p->e.env);
699 break;
701 case MRB_TT_ENV:
703 struct REnv *e = (struct REnv*)obj;
704 mrb_int i, len;
706 if (MRB_ENV_ONSTACK_P(e) && e->cxt && e->cxt->fib) {
707 mrb_gc_mark(mrb, (struct RBasic*)e->cxt->fib);
709 len = MRB_ENV_LEN(e);
710 for (i=0; i<len; i++) {
711 mrb_gc_mark_value(mrb, e->stack[i]);
714 break;
716 case MRB_TT_FIBER:
718 struct mrb_context *c = ((struct RFiber*)obj)->cxt;
720 if (c) mark_context(mrb, c);
722 break;
724 case MRB_TT_STRUCT:
725 case MRB_TT_ARRAY:
727 struct RArray *a = (struct RArray*)obj;
728 size_t i, e=ARY_LEN(a);
729 mrb_value *p = ARY_PTR(a);
731 for (i=0; i<e; i++) {
732 mrb_gc_mark_value(mrb, p[i]);
735 break;
737 case MRB_TT_HASH:
738 mrb_gc_mark_iv(mrb, (struct RObject*)obj);
739 mrb_gc_mark_hash(mrb, (struct RHash*)obj);
740 break;
742 case MRB_TT_STRING:
743 if (RSTR_FSHARED_P(obj)) {
744 struct RString *s = (struct RString*)obj;
745 mrb_gc_mark(mrb, (struct RBasic*)s->as.heap.aux.fshared);
747 break;
749 case MRB_TT_RANGE:
750 mrb_gc_mark_range(mrb, (struct RRange*)obj);
751 break;
753 case MRB_TT_BREAK:
755 struct RBreak *brk = (struct RBreak*)obj;
756 mrb_gc_mark(mrb, (struct RBasic*)mrb_break_proc_get(brk));
757 mrb_gc_mark_value(mrb, mrb_break_value_get(brk));
759 break;
761 case MRB_TT_EXCEPTION:
762 mrb_gc_mark_iv(mrb, (struct RObject*)obj);
763 if ((obj->flags & MRB_EXC_MESG_STRING_FLAG) != 0) {
764 mrb_gc_mark(mrb, (struct RBasic*)((struct RException*)obj)->mesg);
766 break;
768 default:
769 break;
773 MRB_API void
774 mrb_gc_mark(mrb_state *mrb, struct RBasic *obj)
776 if (obj == 0) return;
777 if (!is_white(obj)) return;
778 if (is_red(obj)) return;
779 mrb_assert((obj)->tt != MRB_TT_FREE);
780 add_gray_list(mrb, &mrb->gc, obj);
783 static void
784 obj_free(mrb_state *mrb, struct RBasic *obj, int end)
786 DEBUG(fprintf(stderr, "obj_free(%p,tt=%d)\n",obj,obj->tt));
787 switch (obj->tt) {
788 case MRB_TT_OBJECT:
789 mrb_gc_free_iv(mrb, (struct RObject*)obj);
790 break;
792 case MRB_TT_EXCEPTION:
793 mrb_gc_free_iv(mrb, (struct RObject*)obj);
794 break;
796 case MRB_TT_CLASS:
797 case MRB_TT_MODULE:
798 case MRB_TT_SCLASS:
799 mrb_gc_free_mt(mrb, (struct RClass*)obj);
800 mrb_gc_free_iv(mrb, (struct RObject*)obj);
801 if (!end)
802 mrb_mc_clear_by_class(mrb, (struct RClass*)obj);
803 break;
804 case MRB_TT_ICLASS:
805 if (MRB_FLAG_TEST(obj, MRB_FL_CLASS_IS_ORIGIN))
806 mrb_gc_free_mt(mrb, (struct RClass*)obj);
807 if (!end)
808 mrb_mc_clear_by_class(mrb, (struct RClass*)obj);
809 break;
810 case MRB_TT_ENV:
812 struct REnv *e = (struct REnv*)obj;
814 if (MRB_ENV_ONSTACK_P(e)) {
815 /* cannot be freed */
816 e->stack = NULL;
817 break;
819 mrb_free(mrb, e->stack);
820 e->stack = NULL;
822 break;
824 case MRB_TT_FIBER:
826 struct mrb_context *c = ((struct RFiber*)obj)->cxt;
828 if (c && c != mrb->root_c) {
829 if (!end && c->status != MRB_FIBER_TERMINATED) {
830 mrb_callinfo *ci = c->ci;
831 mrb_callinfo *ce = c->cibase;
833 while (ce <= ci) {
834 struct REnv *e = ci->u.env;
835 if (e && !mrb_object_dead_p(mrb, (struct RBasic*)e) &&
836 e->tt == MRB_TT_ENV && MRB_ENV_ONSTACK_P(e)) {
837 mrb_env_unshare(mrb, e);
839 ci--;
842 mrb_free_context(mrb, c);
845 break;
847 case MRB_TT_STRUCT:
848 case MRB_TT_ARRAY:
849 if (ARY_SHARED_P(obj))
850 mrb_ary_decref(mrb, ((struct RArray*)obj)->as.heap.aux.shared);
851 else if (!ARY_EMBED_P(obj))
852 mrb_free(mrb, ((struct RArray*)obj)->as.heap.ptr);
853 break;
855 case MRB_TT_HASH:
856 mrb_gc_free_iv(mrb, (struct RObject*)obj);
857 mrb_gc_free_hash(mrb, (struct RHash*)obj);
858 break;
860 case MRB_TT_STRING:
861 mrb_gc_free_str(mrb, (struct RString*)obj);
862 break;
864 case MRB_TT_PROC:
866 struct RProc *p = (struct RProc*)obj;
868 if (!MRB_PROC_CFUNC_P(p) && p->body.irep) {
869 mrb_irep *irep = (mrb_irep*)p->body.irep;
870 if (end) {
871 mrb_irep_cutref(mrb, irep);
873 mrb_irep_decref(mrb, irep);
876 break;
878 case MRB_TT_RANGE:
879 mrb_gc_free_range(mrb, ((struct RRange*)obj));
880 break;
882 case MRB_TT_DATA:
884 struct RData *d = (struct RData*)obj;
885 if (d->type && d->type->dfree) {
886 d->type->dfree(mrb, d->data);
888 mrb_gc_free_iv(mrb, (struct RObject*)obj);
890 break;
892 #if defined(MRB_USE_RATIONAL) && defined(MRB_INT64) && defined(MRB_32BIT)
893 case MRB_TT_RATIONAL:
895 struct RData *o = (struct RData*)obj;
896 mrb_free(mrb, o->iv);
898 break;
899 #endif
901 #if defined(MRB_USE_COMPLEX) && defined(MRB_32BIT) && !defined(MRB_USE_FLOAT32)
902 case MRB_TT_COMPLEX:
904 struct RData *o = (struct RData*)obj;
905 mrb_free(mrb, o->iv);
907 break;
908 #endif
910 default:
911 break;
913 obj->tt = MRB_TT_FREE;
916 static void
917 root_scan_phase(mrb_state *mrb, mrb_gc *gc)
919 int i, e;
921 if (!is_minor_gc(gc)) {
922 gc->gray_list = NULL;
923 gc->atomic_gray_list = NULL;
926 mrb_gc_mark_gv(mrb);
927 /* mark arena */
928 for (i=0,e=gc->arena_idx; i<e; i++) {
929 mrb_gc_mark(mrb, gc->arena[i]);
931 /* mark class hierarchy */
932 mrb_gc_mark(mrb, (struct RBasic*)mrb->object_class);
934 /* mark built-in classes */
935 mrb_gc_mark(mrb, (struct RBasic*)mrb->class_class);
936 mrb_gc_mark(mrb, (struct RBasic*)mrb->module_class);
937 mrb_gc_mark(mrb, (struct RBasic*)mrb->proc_class);
938 mrb_gc_mark(mrb, (struct RBasic*)mrb->string_class);
939 mrb_gc_mark(mrb, (struct RBasic*)mrb->array_class);
940 mrb_gc_mark(mrb, (struct RBasic*)mrb->hash_class);
941 mrb_gc_mark(mrb, (struct RBasic*)mrb->range_class);
943 #ifndef MRB_NO_FLOAT
944 mrb_gc_mark(mrb, (struct RBasic*)mrb->float_class);
945 #endif
946 mrb_gc_mark(mrb, (struct RBasic*)mrb->integer_class);
947 mrb_gc_mark(mrb, (struct RBasic*)mrb->true_class);
948 mrb_gc_mark(mrb, (struct RBasic*)mrb->false_class);
949 mrb_gc_mark(mrb, (struct RBasic*)mrb->nil_class);
950 mrb_gc_mark(mrb, (struct RBasic*)mrb->symbol_class);
951 mrb_gc_mark(mrb, (struct RBasic*)mrb->kernel_module);
953 mrb_gc_mark(mrb, (struct RBasic*)mrb->eException_class);
954 mrb_gc_mark(mrb, (struct RBasic*)mrb->eStandardError_class);
956 /* mark top_self */
957 mrb_gc_mark(mrb, (struct RBasic*)mrb->top_self);
958 /* mark exception */
959 mrb_gc_mark(mrb, (struct RBasic*)mrb->exc);
960 /* mark pre-allocated exception */
961 mrb_gc_mark(mrb, (struct RBasic*)mrb->nomem_err);
962 mrb_gc_mark(mrb, (struct RBasic*)mrb->stack_err);
963 #ifdef MRB_GC_FIXED_ARENA
964 mrb_gc_mark(mrb, (struct RBasic*)mrb->arena_err);
965 #endif
967 mark_context(mrb, mrb->c);
968 if (mrb->root_c != mrb->c) {
969 mark_context(mrb, mrb->root_c);
973 /* rough estimation of number of GC marks (non recursive) */
974 static size_t
975 gc_gray_counts(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj)
977 size_t children = 0;
979 switch (obj->tt) {
980 case MRB_TT_ICLASS:
981 children++;
982 break;
984 case MRB_TT_CLASS:
985 case MRB_TT_SCLASS:
986 case MRB_TT_MODULE:
988 struct RClass *c = (struct RClass*)obj;
990 children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj);
991 children += mrb_gc_mark_mt_size(mrb, c);
992 children++;
994 break;
996 case MRB_TT_OBJECT:
997 case MRB_TT_DATA:
998 children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj);
999 break;
1001 case MRB_TT_ENV:
1002 children += MRB_ENV_LEN(obj);
1003 break;
1005 case MRB_TT_FIBER:
1007 struct mrb_context *c = ((struct RFiber*)obj)->cxt;
1008 size_t i;
1009 mrb_callinfo *ci;
1011 if (!c || c->status == MRB_FIBER_TERMINATED) break;
1013 /* mark stack */
1014 i = c->ci->stack - c->stbase;
1016 if (c->ci) {
1017 i += mrb_ci_nregs(c->ci);
1019 if (c->stbase + i > c->stend) i = c->stend - c->stbase;
1020 children += i;
1022 /* mark closure */
1023 if (c->cibase) {
1024 for (i=0, ci = c->cibase; ci <= c->ci; i++, ci++)
1027 children += i;
1029 break;
1031 case MRB_TT_STRUCT:
1032 case MRB_TT_ARRAY:
1034 struct RArray *a = (struct RArray*)obj;
1035 children += ARY_LEN(a);
1037 break;
1039 case MRB_TT_HASH:
1040 children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj);
1041 children += mrb_gc_mark_hash_size(mrb, (struct RHash*)obj);
1042 break;
1044 case MRB_TT_PROC:
1045 case MRB_TT_RANGE:
1046 case MRB_TT_BREAK:
1047 children+=2;
1048 break;
1050 case MRB_TT_EXCEPTION:
1051 children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj);
1052 if ((obj->flags & MRB_EXC_MESG_STRING_FLAG) != 0) {
1053 children++;
1055 break;
1057 default:
1058 break;
1060 return children;
1064 static void
1065 gc_mark_gray_list(mrb_state *mrb, mrb_gc *gc) {
1066 while (gc->gray_list) {
1067 struct RBasic *obj = gc->gray_list;
1068 gc->gray_list = obj->gcnext;
1069 gc_mark_children(mrb, gc, obj);
1074 static size_t
1075 incremental_marking_phase(mrb_state *mrb, mrb_gc *gc, size_t limit)
1077 size_t tried_marks = 0;
1079 while (gc->gray_list && tried_marks < limit) {
1080 struct RBasic *obj = gc->gray_list;
1081 gc->gray_list = obj->gcnext;
1082 gc_mark_children(mrb, gc, obj);
1083 tried_marks += gc_gray_counts(mrb, gc, obj);
1086 return tried_marks;
1089 static void
1090 final_marking_phase(mrb_state *mrb, mrb_gc *gc)
1092 int i, e;
1094 /* mark arena */
1095 for (i=0,e=gc->arena_idx; i<e; i++) {
1096 mrb_gc_mark(mrb, gc->arena[i]);
1098 mrb_gc_mark_gv(mrb);
1099 mark_context(mrb, mrb->c);
1100 if (mrb->c != mrb->root_c) {
1101 mark_context(mrb, mrb->root_c);
1103 mrb_gc_mark(mrb, (struct RBasic*)mrb->exc);
1104 gc_mark_gray_list(mrb, gc);
1105 mrb_assert(gc->gray_list == NULL);
1106 gc->gray_list = gc->atomic_gray_list;
1107 gc->atomic_gray_list = NULL;
1108 gc_mark_gray_list(mrb, gc);
1109 mrb_assert(gc->gray_list == NULL);
1112 static void
1113 prepare_incremental_sweep(mrb_state *mrb, mrb_gc *gc)
1115 gc->state = MRB_GC_STATE_SWEEP;
1116 gc->sweeps = gc->heaps;
1117 gc->live_after_mark = gc->live;
1120 static size_t
1121 incremental_sweep_phase(mrb_state *mrb, mrb_gc *gc, size_t limit)
1123 mrb_heap_page *page = gc->sweeps;
1124 size_t tried_sweep = 0;
1126 while (page && (tried_sweep < limit)) {
1127 RVALUE *p = objects(page);
1128 RVALUE *e = p + MRB_HEAP_PAGE_SIZE;
1129 size_t freed = 0;
1130 mrb_bool dead_slot = TRUE;
1131 mrb_bool full = (page->freelist == NULL);
1133 if (is_minor_gc(gc) && page->old) {
1134 /* skip a slot which doesn't contain any young object */
1135 p = e;
1136 dead_slot = FALSE;
1138 while (p<e) {
1139 if (is_dead(gc, &p->as.basic)) {
1140 if (p->as.basic.tt != MRB_TT_FREE) {
1141 obj_free(mrb, &p->as.basic, FALSE);
1142 if (p->as.basic.tt == MRB_TT_FREE) {
1143 p->as.free.next = page->freelist;
1144 page->freelist = (struct RBasic*)p;
1145 freed++;
1147 else {
1148 dead_slot = FALSE;
1152 else {
1153 if (!is_generational(gc))
1154 paint_partial_white(gc, &p->as.basic); /* next gc target */
1155 dead_slot = FALSE;
1157 p++;
1160 /* free dead slot */
1161 if (dead_slot && freed < MRB_HEAP_PAGE_SIZE) {
1162 mrb_heap_page *next = page->next;
1164 unlink_heap_page(gc, page);
1165 unlink_free_heap_page(gc, page);
1166 mrb_free(mrb, page);
1167 page = next;
1169 else {
1170 if (full && freed > 0) {
1171 link_free_heap_page(gc, page);
1173 if (page->freelist == NULL && is_minor_gc(gc))
1174 page->old = TRUE;
1175 else
1176 page->old = FALSE;
1177 page = page->next;
1179 tried_sweep += MRB_HEAP_PAGE_SIZE;
1180 gc->live -= freed;
1181 gc->live_after_mark -= freed;
1183 gc->sweeps = page;
1184 return tried_sweep;
1187 static size_t
1188 incremental_gc(mrb_state *mrb, mrb_gc *gc, size_t limit)
1190 switch (gc->state) {
1191 case MRB_GC_STATE_ROOT:
1192 root_scan_phase(mrb, gc);
1193 gc->state = MRB_GC_STATE_MARK;
1194 flip_white_part(gc);
1195 return 0;
1196 case MRB_GC_STATE_MARK:
1197 if (gc->gray_list) {
1198 return incremental_marking_phase(mrb, gc, limit);
1200 else {
1201 final_marking_phase(mrb, gc);
1202 prepare_incremental_sweep(mrb, gc);
1203 return 0;
1205 case MRB_GC_STATE_SWEEP: {
1206 size_t tried_sweep = 0;
1207 tried_sweep = incremental_sweep_phase(mrb, gc, limit);
1208 if (tried_sweep == 0)
1209 gc->state = MRB_GC_STATE_ROOT;
1210 return tried_sweep;
1212 default:
1213 /* unknown state */
1214 mrb_assert(0);
1215 return 0;
1219 static void
1220 incremental_gc_until(mrb_state *mrb, mrb_gc *gc, mrb_gc_state to_state)
1222 do {
1223 incremental_gc(mrb, gc, SIZE_MAX);
1224 } while (gc->state != to_state);
1227 static void
1228 incremental_gc_step(mrb_state *mrb, mrb_gc *gc)
1230 size_t limit = 0, result = 0;
1231 limit = (GC_STEP_SIZE/100) * gc->step_ratio;
1232 while (result < limit) {
1233 result += incremental_gc(mrb, gc, limit);
1234 if (gc->state == MRB_GC_STATE_ROOT)
1235 break;
1238 gc->threshold = gc->live + GC_STEP_SIZE;
1241 static void
1242 clear_all_old(mrb_state *mrb, mrb_gc *gc)
1244 mrb_bool origin_mode = gc->generational;
1246 mrb_assert(is_generational(gc));
1247 if (is_major_gc(gc)) {
1248 /* finish the half baked GC */
1249 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1252 /* Sweep the dead objects, then reset all the live objects
1253 * (including all the old objects, of course) to white. */
1254 gc->generational = FALSE;
1255 prepare_incremental_sweep(mrb, gc);
1256 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1257 gc->generational = origin_mode;
1259 /* The gray objects have already been painted as white */
1260 gc->atomic_gray_list = gc->gray_list = NULL;
1263 MRB_API void
1264 mrb_incremental_gc(mrb_state *mrb)
1266 mrb_gc *gc = &mrb->gc;
1268 if (gc->disabled || gc->iterating) return;
1270 GC_INVOKE_TIME_REPORT("mrb_incremental_gc()");
1271 GC_TIME_START;
1273 if (is_minor_gc(gc)) {
1274 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1276 else {
1277 incremental_gc_step(mrb, gc);
1280 if (gc->state == MRB_GC_STATE_ROOT) {
1281 mrb_assert(gc->live >= gc->live_after_mark);
1282 gc->threshold = (gc->live_after_mark/100) * gc->interval_ratio;
1283 if (gc->threshold < GC_STEP_SIZE) {
1284 gc->threshold = GC_STEP_SIZE;
1287 if (is_major_gc(gc)) {
1288 size_t threshold = gc->live_after_mark/100 * MAJOR_GC_INC_RATIO;
1290 gc->full = FALSE;
1291 if (threshold < MAJOR_GC_TOOMANY) {
1292 gc->majorgc_old_threshold = threshold;
1294 else {
1295 /* too many objects allocated during incremental GC, */
1296 /* instead of increasing threshold, invoke full GC. */
1297 mrb_full_gc(mrb);
1300 else if (is_minor_gc(gc)) {
1301 if (gc->live > gc->majorgc_old_threshold) {
1302 clear_all_old(mrb, gc);
1303 gc->full = TRUE;
1308 GC_TIME_STOP_AND_REPORT;
1311 /* Perform a full gc cycle */
1312 MRB_API void
1313 mrb_full_gc(mrb_state *mrb)
1315 mrb_gc *gc = &mrb->gc;
1317 if (!mrb->c) return;
1318 if (gc->disabled || gc->iterating) return;
1320 GC_INVOKE_TIME_REPORT("mrb_full_gc()");
1321 GC_TIME_START;
1323 if (is_generational(gc)) {
1324 /* clear all the old objects back to young */
1325 clear_all_old(mrb, gc);
1326 gc->full = TRUE;
1328 else if (gc->state != MRB_GC_STATE_ROOT) {
1329 /* finish half baked GC cycle */
1330 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1333 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1334 gc->threshold = (gc->live_after_mark/100) * gc->interval_ratio;
1336 if (is_generational(gc)) {
1337 gc->majorgc_old_threshold = gc->live_after_mark/100 * MAJOR_GC_INC_RATIO;
1338 gc->full = FALSE;
1341 #ifdef MRB_USE_MALLOC_TRIM
1342 malloc_trim(0);
1343 #endif
1344 GC_TIME_STOP_AND_REPORT;
1347 MRB_API void
1348 mrb_garbage_collect(mrb_state *mrb)
1350 mrb_full_gc(mrb);
1354 * Field write barrier
1355 * Paint obj(Black) -> value(White) to obj(Black) -> value(Gray).
1358 MRB_API void
1359 mrb_field_write_barrier(mrb_state *mrb, struct RBasic *obj, struct RBasic *value)
1361 mrb_gc *gc = &mrb->gc;
1363 if (!is_black(obj)) return;
1364 if (!is_white(value)) return;
1366 mrb_assert(gc->state == MRB_GC_STATE_MARK || (!is_dead(gc, value) && !is_dead(gc, obj)));
1367 mrb_assert(is_generational(gc) || gc->state != MRB_GC_STATE_ROOT);
1369 if (is_generational(gc) || gc->state == MRB_GC_STATE_MARK) {
1370 add_gray_list(mrb, gc, value);
1372 else {
1373 mrb_assert(gc->state == MRB_GC_STATE_SWEEP);
1374 paint_partial_white(gc, obj); /* for never write barriers */
1379 * Write barrier
1380 * Paint obj(Black) to obj(Gray).
1382 * The object that is painted gray will be traversed atomically in final
1383 * mark phase. So you use this write barrier if it's frequency written spot.
1384 * e.g. Set element on Array.
1387 MRB_API void
1388 mrb_write_barrier(mrb_state *mrb, struct RBasic *obj)
1390 mrb_gc *gc = &mrb->gc;
1392 if (!is_black(obj)) return;
1394 mrb_assert(!is_dead(gc, obj));
1395 mrb_assert(is_generational(gc) || gc->state != MRB_GC_STATE_ROOT);
1396 paint_gray(obj);
1397 obj->gcnext = gc->atomic_gray_list;
1398 gc->atomic_gray_list = obj;
1402 * call-seq:
1403 * GC.start -> nil
1405 * Initiates full garbage collection.
1409 static mrb_value
1410 gc_start(mrb_state *mrb, mrb_value obj)
1412 mrb_full_gc(mrb);
1413 return mrb_nil_value();
1417 * call-seq:
1418 * GC.enable -> true or false
1420 * Enables garbage collection, returning <code>true</code> if garbage
1421 * collection was previously disabled.
1423 * GC.disable #=> false
1424 * GC.enable #=> true
1425 * GC.enable #=> false
1429 static mrb_value
1430 gc_enable(mrb_state *mrb, mrb_value obj)
1432 mrb_bool old = mrb->gc.disabled;
1434 mrb->gc.disabled = FALSE;
1436 return mrb_bool_value(old);
1440 * call-seq:
1441 * GC.disable -> true or false
1443 * Disables garbage collection, returning <code>true</code> if garbage
1444 * collection was already disabled.
1446 * GC.disable #=> false
1447 * GC.disable #=> true
1451 static mrb_value
1452 gc_disable(mrb_state *mrb, mrb_value obj)
1454 mrb_bool old = mrb->gc.disabled;
1456 mrb->gc.disabled = TRUE;
1458 return mrb_bool_value(old);
1462 * call-seq:
1463 * GC.interval_ratio -> int
1465 * Returns ratio of GC interval. Default value is 200(%).
1469 static mrb_value
1470 gc_interval_ratio_get(mrb_state *mrb, mrb_value obj)
1472 return mrb_int_value(mrb, mrb->gc.interval_ratio);
1476 * call-seq:
1477 * GC.interval_ratio = int -> nil
1479 * Updates ratio of GC interval. Default value is 200(%).
1480 * GC start as soon as after end all step of GC if you set 100(%).
1484 static mrb_value
1485 gc_interval_ratio_set(mrb_state *mrb, mrb_value obj)
1487 mrb_int ratio;
1489 mrb_get_args(mrb, "i", &ratio);
1490 mrb->gc.interval_ratio = (int)ratio;
1491 return mrb_nil_value();
1495 * call-seq:
1496 * GC.step_ratio -> int
1498 * Returns step span ratio of Incremental GC. Default value is 200(%).
1502 static mrb_value
1503 gc_step_ratio_get(mrb_state *mrb, mrb_value obj)
1505 return mrb_int_value(mrb, mrb->gc.step_ratio);
1509 * call-seq:
1510 * GC.step_ratio = int -> nil
1512 * Updates step span ratio of Incremental GC. Default value is 200(%).
1513 * 1 step of incrementalGC becomes long if a rate is big.
1517 static mrb_value
1518 gc_step_ratio_set(mrb_state *mrb, mrb_value obj)
1520 mrb_int ratio;
1522 mrb_get_args(mrb, "i", &ratio);
1523 mrb->gc.step_ratio = (int)ratio;
1524 return mrb_nil_value();
1527 static void
1528 change_gen_gc_mode(mrb_state *mrb, mrb_gc *gc, mrb_bool enable)
1530 if (gc->disabled || gc->iterating) {
1531 mrb_raise(mrb, E_RUNTIME_ERROR, "generational mode changed when GC disabled");
1532 return;
1534 if (is_generational(gc) && !enable) {
1535 clear_all_old(mrb, gc);
1536 mrb_assert(gc->state == MRB_GC_STATE_ROOT);
1537 gc->full = FALSE;
1539 else if (!is_generational(gc) && enable) {
1540 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1541 gc->majorgc_old_threshold = gc->live_after_mark/100 * MAJOR_GC_INC_RATIO;
1542 gc->full = FALSE;
1544 gc->generational = enable;
1548 * call-seq:
1549 * GC.generational_mode -> true or false
1551 * Returns generational or normal gc mode.
1555 static mrb_value
1556 gc_generational_mode_get(mrb_state *mrb, mrb_value self)
1558 return mrb_bool_value(mrb->gc.generational);
1562 * call-seq:
1563 * GC.generational_mode = true or false -> true or false
1565 * Changes to generational or normal gc mode.
1569 static mrb_value
1570 gc_generational_mode_set(mrb_state *mrb, mrb_value self)
1572 mrb_bool enable;
1574 mrb_get_args(mrb, "b", &enable);
1575 if (mrb->gc.generational != enable)
1576 change_gen_gc_mode(mrb, &mrb->gc, enable);
1578 return mrb_bool_value(enable);
1582 static void
1583 gc_each_objects(mrb_state *mrb, mrb_gc *gc, mrb_each_object_callback *callback, void *data)
1585 mrb_heap_page* page;
1587 page = gc->heaps;
1588 while (page != NULL) {
1589 RVALUE *p;
1590 int i;
1592 p = objects(page);
1593 for (i=0; i < MRB_HEAP_PAGE_SIZE; i++) {
1594 if ((*callback)(mrb, &p[i].as.basic, data) == MRB_EACH_OBJ_BREAK)
1595 return;
1597 page = page->next;
1601 void
1602 mrb_objspace_each_objects(mrb_state *mrb, mrb_each_object_callback *callback, void *data)
1604 mrb_bool iterating = mrb->gc.iterating;
1606 mrb_full_gc(mrb);
1607 mrb->gc.iterating = TRUE;
1608 if (iterating) {
1609 gc_each_objects(mrb, &mrb->gc, callback, data);
1611 else {
1612 struct mrb_jmpbuf *prev_jmp = mrb->jmp;
1613 struct mrb_jmpbuf c_jmp;
1615 MRB_TRY(&c_jmp) {
1616 mrb->jmp = &c_jmp;
1617 gc_each_objects(mrb, &mrb->gc, callback, data);
1618 mrb->jmp = prev_jmp;
1619 mrb->gc.iterating = iterating;
1620 } MRB_CATCH(&c_jmp) {
1621 mrb->gc.iterating = iterating;
1622 mrb->jmp = prev_jmp;
1623 MRB_THROW(prev_jmp);
1624 } MRB_END_EXC(&c_jmp);
1628 size_t
1629 mrb_objspace_page_slot_size(void)
1631 return sizeof(RVALUE);
1635 void
1636 mrb_init_gc(mrb_state *mrb)
1638 struct RClass *gc;
1640 mrb_static_assert(sizeof(RVALUE) <= sizeof(void*) * 6,
1641 "RVALUE size must be within 6 words");
1643 gc = mrb_define_module(mrb, "GC");
1645 mrb_define_class_method(mrb, gc, "start", gc_start, MRB_ARGS_NONE());
1646 mrb_define_class_method(mrb, gc, "enable", gc_enable, MRB_ARGS_NONE());
1647 mrb_define_class_method(mrb, gc, "disable", gc_disable, MRB_ARGS_NONE());
1648 mrb_define_class_method(mrb, gc, "interval_ratio", gc_interval_ratio_get, MRB_ARGS_NONE());
1649 mrb_define_class_method(mrb, gc, "interval_ratio=", gc_interval_ratio_set, MRB_ARGS_REQ(1));
1650 mrb_define_class_method(mrb, gc, "step_ratio", gc_step_ratio_get, MRB_ARGS_NONE());
1651 mrb_define_class_method(mrb, gc, "step_ratio=", gc_step_ratio_set, MRB_ARGS_REQ(1));
1652 mrb_define_class_method(mrb, gc, "generational_mode=", gc_generational_mode_set, MRB_ARGS_REQ(1));
1653 mrb_define_class_method(mrb, gc, "generational_mode", gc_generational_mode_get, MRB_ARGS_NONE());