1 #ifndef INTERNAL_GC_H /*-*-C-*-vi:se ft=c:*/
4 * @author Ruby developers <ruby-core@ruby-lang.org>
5 * @copyright This file is a part of the programming language Ruby.
6 * Permission is hereby granted, to either redistribute and/or
7 * modify this file, provided that the conditions mentioned in the
8 * file COPYING are met. Consult the file for details.
9 * @brief Internal header for GC.
11 #include "ruby/internal/config.h"
13 #include <stddef.h> /* for size_t */
15 #include "internal/compilers.h" /* for __has_attribute */
16 #include "ruby/ruby.h" /* for rb_event_flag_t */
18 #if defined(__x86_64__) && !defined(_ILP32) && defined(__GNUC__)
19 #define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("movq\t%%rsp, %0" : "=r" (*(p)))
20 #elif defined(__i386) && defined(__GNUC__)
21 #define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("movl\t%%esp, %0" : "=r" (*(p)))
22 #elif (defined(__powerpc__) || defined(__powerpc64__)) && defined(__GNUC__) && !defined(_AIX) && !defined(__APPLE__) // Not Apple is NEEDED to unbreak ppc64 build on Darwin. Don't ask.
23 #define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("mr\t%0, %%r1" : "=r" (*(p)))
24 #elif (defined(__powerpc__) || defined(__powerpc64__)) && defined(__GNUC__) && defined(_AIX)
25 #define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("mr %0,1" : "=r" (*(p)))
26 #elif defined(__POWERPC__) && defined(__APPLE__) // Darwin ppc and ppc64
27 #define SET_MACHINE_STACK_END(p) __asm__ volatile("mr %0, r1" : "=r" (*(p)))
28 #elif defined(__aarch64__) && defined(__GNUC__)
29 #define SET_MACHINE_STACK_END(p) __asm__ __volatile__ ("mov\t%0, sp" : "=r" (*(p)))
31 NOINLINE(void rb_gc_set_stack_end(VALUE
**stack_end_p
));
32 #define SET_MACHINE_STACK_END(p) rb_gc_set_stack_end(p)
33 #define USE_CONSERVATIVE_STACK_END
36 #define RB_GC_SAVE_MACHINE_CONTEXT(th) \
38 FLUSH_REGISTER_WINDOWS; \
39 setjmp((th)->ec->machine.regs); \
40 SET_MACHINE_STACK_END(&(th)->ec->machine.stack_end); \
45 #ifndef RUBY_MARK_FREE_DEBUG
46 #define RUBY_MARK_FREE_DEBUG 0
49 #if RUBY_MARK_FREE_DEBUG
50 extern int ruby_gc_debug_indent
;
53 rb_gc_debug_indent(void)
55 ruby_debug_printf("%*s", ruby_gc_debug_indent
, "");
59 rb_gc_debug_body(const char *mode
, const char *msg
, int st
, void *ptr
)
62 ruby_gc_debug_indent
--;
65 ruby_debug_printf("%s: %s %s (%p)\n", mode
, st
? "->" : "<-", msg
, ptr
);
68 ruby_gc_debug_indent
++;
74 #define RUBY_MARK_ENTER(msg) rb_gc_debug_body("mark", (msg), 1, ptr)
75 #define RUBY_MARK_LEAVE(msg) rb_gc_debug_body("mark", (msg), 0, ptr)
76 #define RUBY_FREE_ENTER(msg) rb_gc_debug_body("free", (msg), 1, ptr)
77 #define RUBY_FREE_LEAVE(msg) rb_gc_debug_body("free", (msg), 0, ptr)
78 #define RUBY_GC_INFO rb_gc_debug_indent(), ruby_debug_printf
81 #define RUBY_MARK_ENTER(msg)
82 #define RUBY_MARK_LEAVE(msg)
83 #define RUBY_FREE_ENTER(msg)
84 #define RUBY_FREE_LEAVE(msg)
85 #define RUBY_GC_INFO if(0)printf
88 #define RUBY_MARK_MOVABLE_UNLESS_NULL(ptr) do { \
89 VALUE markobj = (ptr); \
90 if (RTEST(markobj)) {rb_gc_mark_movable(markobj);} \
92 #define RUBY_MARK_UNLESS_NULL(ptr) do { \
93 VALUE markobj = (ptr); \
94 if (RTEST(markobj)) {rb_gc_mark(markobj);} \
96 #define RUBY_FREE_UNLESS_NULL(ptr) if(ptr){ruby_xfree(ptr);(ptr)=NULL;}
98 #if STACK_GROW_DIRECTION > 0
99 # define STACK_UPPER(x, a, b) (a)
100 #elif STACK_GROW_DIRECTION < 0
101 # define STACK_UPPER(x, a, b) (b)
103 RUBY_EXTERN
int ruby_stack_grow_direction
;
104 int ruby_get_stack_grow_direction(volatile VALUE
*addr
);
105 # define stack_growup_p(x) ( \
106 (ruby_stack_grow_direction ? \
107 ruby_stack_grow_direction : \
108 ruby_get_stack_grow_direction(x)) > 0)
109 # define STACK_UPPER(x, a, b) (stack_growup_p(x) ? (a) : (b))
113 STACK_GROW_DIR_DETECTION is used with STACK_DIR_UPPER.
115 On most normal systems, stacks grow from high address to lower address. In
116 this case, STACK_DIR_UPPER(a, b) will return (b), but on exotic systems where
117 the stack grows UP (from low address to high address), it will return (a).
120 #if STACK_GROW_DIRECTION
121 #define STACK_GROW_DIR_DETECTION
122 #define STACK_DIR_UPPER(a,b) STACK_UPPER(0, (a), (b))
124 #define STACK_GROW_DIR_DETECTION VALUE stack_grow_dir_detection
125 #define STACK_DIR_UPPER(a,b) STACK_UPPER(&stack_grow_dir_detection, (a), (b))
127 #define IS_STACK_DIR_UPPER() STACK_DIR_UPPER(1,0)
129 const char *rb_obj_info(VALUE obj
);
130 const char *rb_raw_obj_info(char *const buff
, const size_t buff_size
, VALUE obj
);
133 struct rb_thread_struct
;
134 size_t rb_size_pool_slot_size(unsigned char pool_id
);
136 struct rb_execution_context_struct
; /* in vm_core.h */
137 struct rb_objspace
; /* in vm_core.h */
144 #define RVALUE_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]))
146 #define RB_RVARGC_NEWOBJ_OF(var, T, c, f, s) \
147 T *(var) = (T *)(((f) & FL_WB_PROTECTED) ? \
148 rb_wb_protected_newobj_of((c), (f) & ~FL_WB_PROTECTED, s) : \
149 rb_wb_unprotected_newobj_of((c), (f), s))
151 #define RB_RVARGC_EC_NEWOBJ_OF(ec, var, T, c, f, s) \
152 T *(var) = (T *)(((f) & FL_WB_PROTECTED) ? \
153 rb_ec_wb_protected_newobj_of((ec), (c), (f) & ~FL_WB_PROTECTED, s) : \
154 rb_wb_unprotected_newobj_of((c), (f), s))
156 /* optimized version of NEWOBJ() */
157 #define RB_NEWOBJ_OF(var, T, c, f) RB_RVARGC_NEWOBJ_OF(var, T, c, f, RVALUE_SIZE)
159 #define RB_EC_NEWOBJ_OF(ec, var, T, c, f) RB_RVARGC_EC_NEWOBJ_OF(ec, var, T, c, f, RVALUE_SIZE)
161 #define NEWOBJ_OF(var, T, c, f) RB_NEWOBJ_OF((var), T, (c), (f))
162 #define RVARGC_NEWOBJ_OF(var, T, c, f, s) RB_RVARGC_NEWOBJ_OF((var), T, (c), (f), (s))
163 #define RB_OBJ_GC_FLAGS_MAX 6 /* used in ext/objspace */
165 #ifndef USE_UNALIGNED_MEMBER_ACCESS
166 # define UNALIGNED_MEMBER_ACCESS(expr) (expr)
167 #elif ! USE_UNALIGNED_MEMBER_ACCESS
168 # define UNALIGNED_MEMBER_ACCESS(expr) (expr)
169 #elif ! (__has_warning("-Waddress-of-packed-member") || GCC_VERSION_SINCE(9, 0, 0))
170 # define UNALIGNED_MEMBER_ACCESS(expr) (expr)
172 # include "internal/warnings.h"
173 # define UNALIGNED_MEMBER_ACCESS(expr) __extension__({ \
174 COMPILER_WARNING_PUSH; \
175 COMPILER_WARNING_IGNORED(-Waddress-of-packed-member); \
176 __typeof__(expr) unaligned_member_access_result = (expr); \
177 COMPILER_WARNING_POP; \
178 unaligned_member_access_result; \
181 # define UNALIGNED_MEMBER_PTR(ptr, mem) __extension__({ \
182 COMPILER_WARNING_PUSH; \
183 COMPILER_WARNING_IGNORED(-Waddress-of-packed-member); \
184 const volatile void *unaligned_member_ptr_result = &(ptr)->mem; \
185 COMPILER_WARNING_POP; \
186 (__typeof__((ptr)->mem) *)unaligned_member_ptr_result; \
190 #ifndef UNALIGNED_MEMBER_PTR
191 # define UNALIGNED_MEMBER_PTR(ptr, mem) UNALIGNED_MEMBER_ACCESS(&(ptr)->mem)
194 #define RB_OBJ_WRITE_UNALIGNED(old, slot, young) do { \
195 VALUE *_slot = UNALIGNED_MEMBER_ACCESS(slot); \
196 RB_OBJ_WRITE(old, _slot, young); \
199 // We use SIZE_POOL_COUNT number of shape IDs for transitions out of different size pools
200 // The next available shape ID will be the SPECIAL_CONST_SHAPE_ID
201 #if USE_RVARGC && (SIZEOF_UINT64_T == SIZEOF_VALUE)
202 # define SIZE_POOL_COUNT 5
204 # define SIZE_POOL_COUNT 1
207 #define RCLASS_EXT_EMBEDDED (SIZE_POOL_COUNT > 1)
209 typedef struct ractor_newobj_size_pool_cache
{
210 struct RVALUE
*freelist
;
211 struct heap_page
*using_page
;
212 } rb_ractor_newobj_size_pool_cache_t
;
214 typedef struct ractor_newobj_cache
{
215 size_t incremental_mark_step_allocated_slots
;
216 rb_ractor_newobj_size_pool_cache_t size_pool_caches
[SIZE_POOL_COUNT
];
217 } rb_ractor_newobj_cache_t
;
220 extern VALUE
*ruby_initial_gc_stress_ptr
;
221 extern int ruby_disable_gc
;
222 RUBY_ATTR_MALLOC
void *ruby_mimmalloc(size_t size
);
223 void ruby_mimfree(void *ptr
);
224 void rb_objspace_set_event_hook(const rb_event_flag_t event
);
225 VALUE
rb_objspace_gc_enable(struct rb_objspace
*);
226 VALUE
rb_objspace_gc_disable(struct rb_objspace
*);
227 void ruby_gc_set_params(void);
228 void rb_copy_wb_protected_attribute(VALUE dest
, VALUE obj
);
229 #if __has_attribute(alloc_align)
230 __attribute__((__alloc_align__(1)))
232 RUBY_ATTR_MALLOC
void *rb_aligned_malloc(size_t, size_t) RUBY_ATTR_ALLOC_SIZE((2));
233 size_t rb_size_mul_or_raise(size_t, size_t, VALUE
); /* used in compile.c */
234 size_t rb_size_mul_add_or_raise(size_t, size_t, size_t, VALUE
); /* used in iseq.h */
235 RUBY_ATTR_MALLOC
void *rb_xmalloc_mul_add(size_t, size_t, size_t);
236 RUBY_ATTR_MALLOC
void *rb_xcalloc_mul_add(size_t, size_t, size_t);
237 void *rb_xrealloc_mul_add(const void *, size_t, size_t, size_t);
238 RUBY_ATTR_MALLOC
void *rb_xmalloc_mul_add_mul(size_t, size_t, size_t, size_t);
239 RUBY_ATTR_MALLOC
void *rb_xcalloc_mul_add_mul(size_t, size_t, size_t, size_t);
240 static inline void *ruby_sized_xrealloc_inlined(void *ptr
, size_t new_size
, size_t old_size
) RUBY_ATTR_RETURNS_NONNULL
RUBY_ATTR_ALLOC_SIZE((2));
241 static inline void *ruby_sized_xrealloc2_inlined(void *ptr
, size_t new_count
, size_t elemsiz
, size_t old_count
) RUBY_ATTR_RETURNS_NONNULL
RUBY_ATTR_ALLOC_SIZE((2, 3));
242 static inline void ruby_sized_xfree_inlined(void *ptr
, size_t size
);
243 VALUE
rb_class_allocate_instance(VALUE klass
);
244 void rb_gc_ractor_newobj_cache_clear(rb_ractor_newobj_cache_t
*newobj_cache
);
245 size_t rb_gc_obj_slot_size(VALUE obj
);
246 bool rb_gc_size_allocatable_p(size_t size
);
247 int rb_objspace_garbage_object_p(VALUE obj
);
249 void rb_gc_mark_and_move(VALUE
*ptr
);
251 #define rb_gc_mark_and_move_ptr(ptr) do { \
252 VALUE _obj = (VALUE)*(ptr); \
253 rb_gc_mark_and_move(&_obj); \
254 if (_obj != (VALUE)*(ptr)) *(ptr) = (void *)_obj; \
257 RUBY_SYMBOL_EXPORT_BEGIN
258 /* exports for objspace module */
259 size_t rb_objspace_data_type_memsize(VALUE obj
);
260 void rb_objspace_reachable_objects_from(VALUE obj
, void (func
)(VALUE
, void *), void *data
);
261 void rb_objspace_reachable_objects_from_root(void (func
)(const char *category
, VALUE
, void *), void *data
);
262 int rb_objspace_markable_object_p(VALUE obj
);
263 int rb_objspace_internal_object_p(VALUE obj
);
264 int rb_objspace_marked_object_p(VALUE obj
);
266 void rb_objspace_each_objects(
267 int (*callback
)(void *start
, void *end
, size_t stride
, void *data
),
270 void rb_objspace_each_objects_without_setup(
271 int (*callback
)(void *, void *, size_t, void *),
274 size_t rb_gc_obj_slot_size(VALUE obj
);
276 VALUE
rb_gc_disable_no_rest(void);
280 const char *rb_objspace_data_type_name(VALUE obj
);
281 VALUE
rb_wb_protected_newobj_of(VALUE
, VALUE
, size_t);
282 VALUE
rb_wb_unprotected_newobj_of(VALUE
, VALUE
, size_t);
283 VALUE
rb_ec_wb_protected_newobj_of(struct rb_execution_context_struct
*ec
, VALUE klass
, VALUE flags
, size_t);
284 size_t rb_obj_memsize_of(VALUE
);
285 void rb_gc_verify_internal_consistency(void);
286 size_t rb_obj_gc_flags(VALUE
, ID
[], size_t);
287 void rb_gc_mark_values(long n
, const VALUE
*values
);
288 void rb_gc_mark_vm_stack_values(long n
, const VALUE
*values
);
289 void *ruby_sized_xrealloc(void *ptr
, size_t new_size
, size_t old_size
) RUBY_ATTR_RETURNS_NONNULL
RUBY_ATTR_ALLOC_SIZE((2));
290 void *ruby_sized_xrealloc2(void *ptr
, size_t new_count
, size_t element_size
, size_t old_count
) RUBY_ATTR_RETURNS_NONNULL
RUBY_ATTR_ALLOC_SIZE((2, 3));
291 void ruby_sized_xfree(void *x
, size_t size
);
292 RUBY_SYMBOL_EXPORT_END
294 int rb_ec_stack_check(struct rb_execution_context_struct
*ec
);
295 void rb_gc_writebarrier_remember(VALUE obj
);
296 const char *rb_obj_info(VALUE obj
);
298 #if defined(HAVE_MALLOC_USABLE_SIZE) || defined(HAVE_MALLOC_SIZE) || defined(_WIN32)
301 ruby_sized_xrealloc_inlined(void *ptr
, size_t new_size
, size_t old_size
)
303 return ruby_xrealloc(ptr
, new_size
);
307 ruby_sized_xrealloc2_inlined(void *ptr
, size_t new_count
, size_t elemsiz
, size_t old_count
)
309 return ruby_xrealloc2(ptr
, new_count
, elemsiz
);
313 ruby_sized_xfree_inlined(void *ptr
, size_t size
)
318 # define SIZED_REALLOC_N(x, y, z, w) REALLOC_N(x, y, z)
323 ruby_sized_xrealloc_inlined(void *ptr
, size_t new_size
, size_t old_size
)
325 return ruby_sized_xrealloc(ptr
, new_size
, old_size
);
329 ruby_sized_xrealloc2_inlined(void *ptr
, size_t new_count
, size_t elemsiz
, size_t old_count
)
331 return ruby_sized_xrealloc2(ptr
, new_count
, elemsiz
, old_count
);
335 ruby_sized_xfree_inlined(void *ptr
, size_t size
)
337 ruby_sized_xfree(ptr
, size
);
340 # define SIZED_REALLOC_N(v, T, m, n) \
341 ((v) = (T *)ruby_sized_xrealloc2((void *)(v), (m), sizeof(T), (n)))
343 #endif /* HAVE_MALLOC_USABLE_SIZE */
345 #define ruby_sized_xrealloc ruby_sized_xrealloc_inlined
346 #define ruby_sized_xrealloc2 ruby_sized_xrealloc2_inlined
347 #define ruby_sized_xfree ruby_sized_xfree_inlined
348 #endif /* INTERNAL_GC_H */