1 #ifndef INTERNAL_GC_H /*-*-C-*-vi:se ft=c:*/
4 * @author Ruby developers <ruby-core@ruby-lang.org>
5 * @copyright This file is a part of the programming language Ruby.
6 * Permission is hereby granted, to either redistribute and/or
7 * modify this file, provided that the conditions mentioned in the
8 * file COPYING are met. Consult the file for details.
9 * @brief Internal header for GC.
11 #include "ruby/internal/config.h"
13 #include <stddef.h> /* for size_t */
15 #include "internal/compilers.h" /* for __has_attribute */
16 #include "ruby/ruby.h" /* for rb_event_flag_t */
17 #include "vm_core.h" /* for GET_EC() */
19 #ifndef USE_MODULAR_GC
20 # define USE_MODULAR_GC 0
23 #if defined(__x86_64__) && !defined(_ILP32) && defined(__GNUC__)
24 #define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("movq\t%%rsp, %0" : "=r" (*(p)))
25 #elif defined(__i386) && defined(__GNUC__)
26 #define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("movl\t%%esp, %0" : "=r" (*(p)))
27 #elif (defined(__powerpc__) || defined(__powerpc64__)) && defined(__GNUC__) && !defined(_AIX) && !defined(__APPLE__) // Not Apple is NEEDED to unbreak ppc64 build on Darwin. Don't ask.
28 #define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("mr\t%0, %%r1" : "=r" (*(p)))
29 #elif (defined(__powerpc__) || defined(__powerpc64__)) && defined(__GNUC__) && defined(_AIX)
30 #define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("mr %0,1" : "=r" (*(p)))
31 #elif defined(__POWERPC__) && defined(__APPLE__) // Darwin ppc and ppc64
32 #define SET_MACHINE_STACK_END(p) __asm__ volatile("mr %0, r1" : "=r" (*(p)))
33 #elif defined(__aarch64__) && defined(__GNUC__)
34 #define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("mov\t%0, sp" : "=r" (*(p)))
36 NOINLINE(void rb_gc_set_stack_end(VALUE
**stack_end_p
));
37 #define SET_MACHINE_STACK_END(p) rb_gc_set_stack_end(p)
38 #define USE_CONSERVATIVE_STACK_END
43 #ifndef RUBY_MARK_FREE_DEBUG
44 #define RUBY_MARK_FREE_DEBUG 0
47 #if RUBY_MARK_FREE_DEBUG
48 extern int ruby_gc_debug_indent
;
51 rb_gc_debug_indent(void)
53 ruby_debug_printf("%*s", ruby_gc_debug_indent
, "");
57 rb_gc_debug_body(const char *mode
, const char *msg
, int st
, void *ptr
)
60 ruby_gc_debug_indent
--;
63 ruby_debug_printf("%s: %s %s (%p)\n", mode
, st
? "->" : "<-", msg
, ptr
);
66 ruby_gc_debug_indent
++;
72 #define RUBY_MARK_ENTER(msg) rb_gc_debug_body("mark", (msg), 1, ptr)
73 #define RUBY_MARK_LEAVE(msg) rb_gc_debug_body("mark", (msg), 0, ptr)
74 #define RUBY_FREE_ENTER(msg) rb_gc_debug_body("free", (msg), 1, ptr)
75 #define RUBY_FREE_LEAVE(msg) rb_gc_debug_body("free", (msg), 0, ptr)
76 #define RUBY_GC_INFO rb_gc_debug_indent(), ruby_debug_printf
79 #define RUBY_MARK_ENTER(msg)
80 #define RUBY_MARK_LEAVE(msg)
81 #define RUBY_FREE_ENTER(msg)
82 #define RUBY_FREE_LEAVE(msg)
83 #define RUBY_GC_INFO if(0)printf
86 #define RUBY_FREE_UNLESS_NULL(ptr) if(ptr){ruby_xfree(ptr);(ptr)=NULL;}
88 #if STACK_GROW_DIRECTION > 0
89 # define STACK_UPPER(x, a, b) (a)
90 #elif STACK_GROW_DIRECTION < 0
91 # define STACK_UPPER(x, a, b) (b)
93 RUBY_EXTERN
int ruby_stack_grow_direction
;
94 int ruby_get_stack_grow_direction(volatile VALUE
*addr
);
95 # define stack_growup_p(x) ( \
96 (ruby_stack_grow_direction ? \
97 ruby_stack_grow_direction : \
98 ruby_get_stack_grow_direction(x)) > 0)
99 # define STACK_UPPER(x, a, b) (stack_growup_p(x) ? (a) : (b))
103 STACK_GROW_DIR_DETECTION is used with STACK_DIR_UPPER.
105 On most normal systems, stacks grow from high address to lower address. In
106 this case, STACK_DIR_UPPER(a, b) will return (b), but on exotic systems where
107 the stack grows UP (from low address to high address), it will return (a).
110 #if STACK_GROW_DIRECTION
111 #define STACK_GROW_DIR_DETECTION
112 #define STACK_DIR_UPPER(a,b) STACK_UPPER(0, (a), (b))
114 #define STACK_GROW_DIR_DETECTION VALUE stack_grow_dir_detection
115 #define STACK_DIR_UPPER(a,b) STACK_UPPER(&stack_grow_dir_detection, (a), (b))
117 #define IS_STACK_DIR_UPPER() STACK_DIR_UPPER(1,0)
119 const char *rb_obj_info(VALUE obj
);
120 const char *rb_raw_obj_info(char *const buff
, const size_t buff_size
, VALUE obj
);
122 struct rb_execution_context_struct
; /* in vm_core.h */
123 struct rb_objspace
; /* in vm_core.h */
125 #define NEWOBJ_OF(var, T, c, f, s, ec) \
126 T *(var) = (T *)(((f) & FL_WB_PROTECTED) ? \
127 rb_wb_protected_newobj_of((ec ? ec : GET_EC()), (c), (f) & ~FL_WB_PROTECTED, s) : \
128 rb_wb_unprotected_newobj_of((c), (f), s))
130 #ifndef RB_GC_OBJECT_METADATA_ENTRY_DEFINED
131 # define RB_GC_OBJECT_METADATA_ENTRY_DEFINED
132 struct rb_gc_object_metadata_entry
{
138 #ifndef USE_UNALIGNED_MEMBER_ACCESS
139 # define UNALIGNED_MEMBER_ACCESS(expr) (expr)
140 #elif ! USE_UNALIGNED_MEMBER_ACCESS
141 # define UNALIGNED_MEMBER_ACCESS(expr) (expr)
142 #elif ! (__has_warning("-Waddress-of-packed-member") || GCC_VERSION_SINCE(9, 0, 0))
143 # define UNALIGNED_MEMBER_ACCESS(expr) (expr)
145 # include "internal/warnings.h"
146 # define UNALIGNED_MEMBER_ACCESS(expr) __extension__({ \
147 COMPILER_WARNING_PUSH; \
148 COMPILER_WARNING_IGNORED(-Waddress-of-packed-member); \
149 __typeof__(expr) unaligned_member_access_result = (expr); \
150 COMPILER_WARNING_POP; \
151 unaligned_member_access_result; \
154 # define UNALIGNED_MEMBER_PTR(ptr, mem) __extension__({ \
155 COMPILER_WARNING_PUSH; \
156 COMPILER_WARNING_IGNORED(-Waddress-of-packed-member); \
157 const volatile void *unaligned_member_ptr_result = &(ptr)->mem; \
158 COMPILER_WARNING_POP; \
159 (__typeof__((ptr)->mem) *)unaligned_member_ptr_result; \
163 #ifndef UNALIGNED_MEMBER_PTR
164 # define UNALIGNED_MEMBER_PTR(ptr, mem) UNALIGNED_MEMBER_ACCESS(&(ptr)->mem)
167 #define RB_OBJ_WRITE_UNALIGNED(old, slot, young) do { \
168 VALUE *_slot = UNALIGNED_MEMBER_ACCESS(slot); \
169 RB_OBJ_WRITE(old, _slot, young); \
172 /* Used in places that could malloc during, which can cause the GC to run. We
173 * need to temporarily disable the GC to allow the malloc to happen.
174 * Allocating memory during GC is a bad idea, so use this only when absolutely
176 #define DURING_GC_COULD_MALLOC_REGION_START() \
177 assert(rb_during_gc()); \
178 VALUE _already_disabled = rb_gc_disable_no_rest()
180 #define DURING_GC_COULD_MALLOC_REGION_END() \
181 if (_already_disabled == Qfalse) rb_gc_enable()
184 RUBY_ATTR_MALLOC
void *ruby_mimmalloc(size_t size
);
185 RUBY_ATTR_MALLOC
void *ruby_mimcalloc(size_t num
, size_t size
);
186 void ruby_mimfree(void *ptr
);
187 void rb_gc_prepare_heap(void);
188 void rb_objspace_set_event_hook(const rb_event_flag_t event
);
189 VALUE
rb_objspace_gc_enable(void *objspace
);
190 VALUE
rb_objspace_gc_disable(void *objspace
);
191 void ruby_gc_set_params(void);
192 void rb_gc_copy_attributes(VALUE dest
, VALUE obj
);
193 size_t rb_size_mul_or_raise(size_t, size_t, VALUE
); /* used in compile.c */
194 size_t rb_size_mul_add_or_raise(size_t, size_t, size_t, VALUE
); /* used in iseq.h */
195 size_t rb_malloc_grow_capa(size_t current_capacity
, size_t type_size
);
196 RUBY_ATTR_MALLOC
void *rb_xmalloc_mul_add(size_t, size_t, size_t);
197 RUBY_ATTR_MALLOC
void *rb_xcalloc_mul_add(size_t, size_t, size_t);
198 void *rb_xrealloc_mul_add(const void *, size_t, size_t, size_t);
199 RUBY_ATTR_MALLOC
void *rb_xmalloc_mul_add_mul(size_t, size_t, size_t, size_t);
200 RUBY_ATTR_MALLOC
void *rb_xcalloc_mul_add_mul(size_t, size_t, size_t, size_t);
201 static inline void *ruby_sized_xrealloc_inlined(void *ptr
, size_t new_size
, size_t old_size
) RUBY_ATTR_RETURNS_NONNULL
RUBY_ATTR_ALLOC_SIZE((2));
202 static inline void *ruby_sized_xrealloc2_inlined(void *ptr
, size_t new_count
, size_t elemsiz
, size_t old_count
) RUBY_ATTR_RETURNS_NONNULL
RUBY_ATTR_ALLOC_SIZE((2, 3));
203 static inline void ruby_sized_xfree_inlined(void *ptr
, size_t size
);
204 void rb_gc_obj_id_moved(VALUE obj
);
206 void *rb_gc_ractor_cache_alloc(rb_ractor_t
*ractor
);
207 void rb_gc_ractor_cache_free(void *cache
);
209 bool rb_gc_size_allocatable_p(size_t size
);
210 size_t *rb_gc_heap_sizes(void);
211 size_t rb_gc_heap_id_for_size(size_t size
);
213 void rb_gc_mark_and_move(VALUE
*ptr
);
215 void rb_gc_mark_weak(VALUE
*ptr
);
216 void rb_gc_remove_weak(VALUE parent_obj
, VALUE
*ptr
);
218 void rb_gc_ref_update_table_values_only(st_table
*tbl
);
220 void rb_gc_initial_stress_set(VALUE flag
);
222 void rb_gc_before_fork(void);
223 void rb_gc_after_fork(rb_pid_t pid
);
225 #define rb_gc_mark_and_move_ptr(ptr) do { \
226 VALUE _obj = (VALUE)*(ptr); \
227 rb_gc_mark_and_move(&_obj); \
228 if (_obj != (VALUE)*(ptr)) *(ptr) = (void *)_obj; \
231 RUBY_SYMBOL_EXPORT_BEGIN
232 /* exports for objspace module */
233 void rb_objspace_reachable_objects_from(VALUE obj
, void (func
)(VALUE
, void *), void *data
);
234 void rb_objspace_reachable_objects_from_root(void (func
)(const char *category
, VALUE
, void *), void *data
);
235 int rb_objspace_internal_object_p(VALUE obj
);
236 int rb_objspace_garbage_object_p(VALUE obj
);
237 bool rb_gc_pointer_to_heap_p(VALUE obj
);
239 void rb_objspace_each_objects(
240 int (*callback
)(void *start
, void *end
, size_t stride
, void *data
),
243 size_t rb_gc_obj_slot_size(VALUE obj
);
245 VALUE
rb_gc_disable_no_rest(void);
247 #define RB_GC_MAX_NAME_LEN 20
250 const char *rb_objspace_data_type_name(VALUE obj
);
251 VALUE
rb_wb_protected_newobj_of(struct rb_execution_context_struct
*, VALUE
, VALUE
, size_t);
252 VALUE
rb_wb_unprotected_newobj_of(VALUE
, VALUE
, size_t);
253 size_t rb_obj_memsize_of(VALUE
);
254 struct rb_gc_object_metadata_entry
*rb_gc_object_metadata(VALUE obj
);
255 void rb_gc_mark_values(long n
, const VALUE
*values
);
256 void rb_gc_mark_vm_stack_values(long n
, const VALUE
*values
);
257 void rb_gc_update_values(long n
, VALUE
*values
);
258 void *ruby_sized_xrealloc(void *ptr
, size_t new_size
, size_t old_size
) RUBY_ATTR_RETURNS_NONNULL
RUBY_ATTR_ALLOC_SIZE((2));
259 void *ruby_sized_xrealloc2(void *ptr
, size_t new_count
, size_t element_size
, size_t old_count
) RUBY_ATTR_RETURNS_NONNULL
RUBY_ATTR_ALLOC_SIZE((2, 3));
260 void ruby_sized_xfree(void *x
, size_t size
);
262 const char *rb_gc_active_gc_name(void);
263 int rb_gc_modular_gc_loaded_p(void);
265 RUBY_SYMBOL_EXPORT_END
267 int rb_ec_stack_check(struct rb_execution_context_struct
*ec
);
268 void rb_gc_writebarrier_remember(VALUE obj
);
269 const char *rb_obj_info(VALUE obj
);
270 void ruby_annotate_mmap(const void *addr
, unsigned long size
, const char *name
);
272 #if defined(HAVE_MALLOC_USABLE_SIZE) || defined(HAVE_MALLOC_SIZE) || defined(_WIN32)
275 ruby_sized_xrealloc_inlined(void *ptr
, size_t new_size
, size_t old_size
)
277 return ruby_xrealloc(ptr
, new_size
);
281 ruby_sized_xrealloc2_inlined(void *ptr
, size_t new_count
, size_t elemsiz
, size_t old_count
)
283 return ruby_xrealloc2(ptr
, new_count
, elemsiz
);
287 ruby_sized_xfree_inlined(void *ptr
, size_t size
)
292 # define SIZED_REALLOC_N(x, y, z, w) REALLOC_N(x, y, z)
295 ruby_sized_realloc_n(void *ptr
, size_t new_count
, size_t element_size
, size_t old_count
)
297 return ruby_xrealloc2(ptr
, new_count
, element_size
);
303 ruby_sized_xrealloc_inlined(void *ptr
, size_t new_size
, size_t old_size
)
305 return ruby_sized_xrealloc(ptr
, new_size
, old_size
);
309 ruby_sized_xrealloc2_inlined(void *ptr
, size_t new_count
, size_t elemsiz
, size_t old_count
)
311 return ruby_sized_xrealloc2(ptr
, new_count
, elemsiz
, old_count
);
315 ruby_sized_xfree_inlined(void *ptr
, size_t size
)
317 ruby_sized_xfree(ptr
, size
);
320 # define SIZED_REALLOC_N(v, T, m, n) \
321 ((v) = (T *)ruby_sized_xrealloc2((void *)(v), (m), sizeof(T), (n)))
324 ruby_sized_realloc_n(void *ptr
, size_t new_count
, size_t element_size
, size_t old_count
)
326 return ruby_sized_xrealloc2(ptr
, new_count
, element_size
, old_count
);
329 #endif /* HAVE_MALLOC_USABLE_SIZE */
331 #define ruby_sized_xrealloc ruby_sized_xrealloc_inlined
332 #define ruby_sized_xrealloc2 ruby_sized_xrealloc2_inlined
333 #define ruby_sized_xfree ruby_sized_xfree_inlined
334 #endif /* INTERNAL_GC_H */