1 /**********************************************************************
6 created at: 2006-07-11(Tue) 09:00:03 +0900
8 Copyright (C) 2006 Koichi Sasada
10 **********************************************************************/
12 #define RUBY_VM_INSNS_INFO 1
13 /* #define RUBY_MARK_FREE_DEBUG 1 */
15 #include "ruby/internal/config.h"
21 #include "eval_intern.h"
24 #include "internal/bits.h"
25 #include "internal/class.h"
26 #include "internal/compile.h"
27 #include "internal/error.h"
28 #include "internal/file.h"
29 #include "internal/gc.h"
30 #include "internal/hash.h"
31 #include "internal/io.h"
32 #include "internal/ruby_parser.h"
33 #include "internal/sanitizers.h"
34 #include "internal/set_table.h"
35 #include "internal/symbol.h"
36 #include "internal/thread.h"
37 #include "internal/variable.h"
39 #include "ruby/util.h"
41 #include "vm_callinfo.h"
43 #include "ruby/ractor.h"
46 #include "insns_info.inc"
49 static VALUE
iseqw_new(const rb_iseq_t
*iseq
);
50 static const rb_iseq_t
*iseqw_check(VALUE iseqw
);
52 #if VM_INSN_INFO_TABLE_IMPL == 2
53 static struct succ_index_table
*succ_index_table_create(int max_pos
, int *data
, int size
);
54 static unsigned int *succ_index_table_invert(int max_pos
, struct succ_index_table
*sd
, int size
);
55 static int succ_index_lookup(const struct succ_index_table
*sd
, int x
);
58 #define hidden_obj_p(obj) (!SPECIAL_CONST_P(obj) && !RBASIC(obj)->klass)
61 obj_resurrect(VALUE obj
)
63 if (hidden_obj_p(obj
)) {
64 switch (BUILTIN_TYPE(obj
)) {
66 obj
= rb_str_resurrect(obj
);
69 obj
= rb_ary_resurrect(obj
);
72 obj
= rb_hash_resurrect(obj
);
82 free_arena(struct iseq_compile_data_storage
*cur
)
84 struct iseq_compile_data_storage
*next
;
94 compile_data_free(struct iseq_compile_data
*compile_data
)
97 free_arena(compile_data
->node
.storage_head
);
98 free_arena(compile_data
->insn
.storage_head
);
99 if (compile_data
->ivar_cache_table
) {
100 rb_id_table_free(compile_data
->ivar_cache_table
);
102 ruby_xfree(compile_data
);
107 remove_from_constant_cache(ID id
, IC ic
)
109 rb_vm_t
*vm
= GET_VM();
111 st_data_t ic_data
= (st_data_t
)ic
;
113 if (rb_id_table_lookup(vm
->constant_cache
, id
, &lookup_result
)) {
114 set_table
*ics
= (set_table
*)lookup_result
;
115 set_delete(ics
, &ic_data
);
117 if (ics
->num_entries
== 0 &&
118 // See comment in vm_track_constant_cache on why we need this check
119 id
!= vm
->inserting_constant_cache_id
) {
120 rb_id_table_delete(vm
->constant_cache
, id
);
126 // When an ISEQ is being freed, all of its associated ICs are going to go away
127 // as well. Because of this, we need to iterate over the ICs, and clear them
128 // from the VM's constant cache.
130 iseq_clear_ic_references(const rb_iseq_t
*iseq
)
132 // In some cases (when there is a compilation error), we end up with
133 // ic_size greater than 0, but no allocated is_entries buffer.
134 // If there's no is_entries buffer to loop through, return early.
136 if (!ISEQ_BODY(iseq
)->is_entries
) {
140 for (unsigned int ic_idx
= 0; ic_idx
< ISEQ_BODY(iseq
)->ic_size
; ic_idx
++) {
141 IC ic
= &ISEQ_IS_IC_ENTRY(ISEQ_BODY(iseq
), ic_idx
);
143 // Iterate over the IC's constant path's segments and clean any references to
144 // the ICs out of the VM's constant cache table.
145 const ID
*segments
= ic
->segments
;
147 // It's possible that segments is NULL if we overallocated an IC but
148 // optimizations removed the instruction using it
149 if (segments
== NULL
)
152 for (int i
= 0; segments
[i
]; i
++) {
154 if (id
== idNULL
) continue;
155 remove_from_constant_cache(id
, ic
);
158 ruby_xfree((void *)segments
);
163 rb_iseq_free(const rb_iseq_t
*iseq
)
165 RUBY_FREE_ENTER("iseq");
167 if (iseq
&& ISEQ_BODY(iseq
)) {
168 iseq_clear_ic_references(iseq
);
169 struct rb_iseq_constant_body
*const body
= ISEQ_BODY(iseq
);
171 rb_yjit_iseq_free(iseq
);
172 if (FL_TEST_RAW((VALUE
)iseq
, ISEQ_TRANSLATED
)) {
173 RUBY_ASSERT(rb_yjit_live_iseq_count
> 0);
174 rb_yjit_live_iseq_count
--;
177 ruby_xfree((void *)body
->iseq_encoded
);
178 ruby_xfree((void *)body
->insns_info
.body
);
179 ruby_xfree((void *)body
->insns_info
.positions
);
180 #if VM_INSN_INFO_TABLE_IMPL == 2
181 ruby_xfree(body
->insns_info
.succ_index_table
);
183 ruby_xfree((void *)body
->is_entries
);
184 ruby_xfree(body
->call_data
);
185 ruby_xfree((void *)body
->catch_table
);
186 ruby_xfree((void *)body
->param
.opt_table
);
187 if (ISEQ_MBITS_BUFLEN(body
->iseq_size
) > 1 && body
->mark_bits
.list
) {
188 ruby_xfree((void *)body
->mark_bits
.list
);
191 ruby_xfree(body
->variable
.original_iseq
);
193 if (body
->param
.keyword
!= NULL
) {
194 if (body
->param
.keyword
->table
!= &body
->local_table
[body
->param
.keyword
->bits_start
- body
->param
.keyword
->num
])
195 ruby_xfree((void *)body
->param
.keyword
->table
);
196 if (body
->param
.keyword
->default_values
) {
197 ruby_xfree((void *)body
->param
.keyword
->default_values
);
199 ruby_xfree((void *)body
->param
.keyword
);
201 if (LIKELY(body
->local_table
!= rb_iseq_shared_exc_local_tbl
))
202 ruby_xfree((void *)body
->local_table
);
203 compile_data_free(ISEQ_COMPILE_DATA(iseq
));
204 if (body
->outer_variables
) rb_id_table_free(body
->outer_variables
);
208 if (iseq
&& ISEQ_EXECUTABLE_P(iseq
) && iseq
->aux
.exec
.local_hooks
) {
209 rb_hook_list_free(iseq
->aux
.exec
.local_hooks
);
212 RUBY_FREE_LEAVE("iseq");
215 typedef VALUE
iseq_value_itr_t(void *ctx
, VALUE obj
);
218 iseq_scan_bits(unsigned int page
, iseq_bits_t bits
, VALUE
*code
, VALUE
*original_iseq
)
221 unsigned int page_offset
= (page
* ISEQ_MBITS_BITLENGTH
);
224 offset
= ntz_intptr(bits
);
225 VALUE op
= code
[page_offset
+ offset
];
226 rb_gc_mark_and_move(&code
[page_offset
+ offset
]);
227 VALUE newop
= code
[page_offset
+ offset
];
228 if (original_iseq
&& newop
!= op
) {
229 original_iseq
[page_offset
+ offset
] = newop
;
231 bits
&= bits
- 1; // Reset Lowest Set Bit (BLSR)
236 rb_iseq_mark_and_move_each_compile_data_value(const rb_iseq_t
*iseq
, VALUE
*original_iseq
)
240 const struct iseq_compile_data
*const compile_data
= ISEQ_COMPILE_DATA(iseq
);
242 size
= compile_data
->iseq_size
;
243 code
= compile_data
->iseq_encoded
;
246 if (compile_data
->mark_bits
.list
) {
247 if(compile_data
->is_single_mark_bit
) {
248 iseq_scan_bits(0, compile_data
->mark_bits
.single
, code
, original_iseq
);
251 for (unsigned int i
= 0; i
< ISEQ_MBITS_BUFLEN(size
); i
++) {
252 iseq_bits_t bits
= compile_data
->mark_bits
.list
[i
];
253 iseq_scan_bits(i
, bits
, code
, original_iseq
);
259 rb_iseq_mark_and_move_each_body_value(const rb_iseq_t
*iseq
, VALUE
*original_iseq
)
263 const struct rb_iseq_constant_body
*const body
= ISEQ_BODY(iseq
);
265 size
= body
->iseq_size
;
266 code
= body
->iseq_encoded
;
268 union iseq_inline_storage_entry
*is_entries
= body
->is_entries
;
270 if (body
->is_entries
) {
271 // Skip iterating over ivc caches
272 is_entries
+= body
->ivc_size
;
275 for (unsigned int i
= 0; i
< body
->icvarc_size
; i
++, is_entries
++) {
276 ICVARC icvarc
= (ICVARC
)is_entries
;
278 RUBY_ASSERT(!RB_TYPE_P(icvarc
->entry
->class_value
, T_NONE
));
280 rb_gc_mark_and_move(&icvarc
->entry
->class_value
);
285 for (unsigned int i
= 0; i
< body
->ise_size
; i
++, is_entries
++) {
286 union iseq_inline_storage_entry
*const is
= (union iseq_inline_storage_entry
*)is_entries
;
287 if (is
->once
.value
) {
288 rb_gc_mark_and_move(&is
->once
.value
);
293 for (unsigned int i
= 0; i
< body
->ic_size
; i
++, is_entries
++) {
294 IC ic
= (IC
)is_entries
;
296 rb_gc_mark_and_move_ptr(&ic
->entry
);
302 if (body
->mark_bits
.list
) {
303 if (ISEQ_MBITS_BUFLEN(size
) == 1) {
304 iseq_scan_bits(0, body
->mark_bits
.single
, code
, original_iseq
);
307 for (unsigned int i
= 0; i
< ISEQ_MBITS_BUFLEN(size
); i
++) {
308 iseq_bits_t bits
= body
->mark_bits
.list
[i
];
309 iseq_scan_bits(i
, bits
, code
, original_iseq
);
316 cc_is_active(const struct rb_callcache
*cc
, bool reference_updating
)
319 if (cc
== rb_vm_empty_cc() || rb_vm_empty_cc_for_super()) {
323 if (reference_updating
) {
324 cc
= (const struct rb_callcache
*)rb_gc_location((VALUE
)cc
);
327 if (vm_cc_markable(cc
)) {
328 if (cc
->klass
) { // cc is not invalidated
329 const struct rb_callable_method_entry_struct
*cme
= vm_cc_cme(cc
);
330 if (reference_updating
) {
331 cme
= (const struct rb_callable_method_entry_struct
*)rb_gc_location((VALUE
)cme
);
333 if (!METHOD_ENTRY_INVALIDATED(cme
)) {
343 rb_iseq_mark_and_move(rb_iseq_t
*iseq
, bool reference_updating
)
345 RUBY_MARK_ENTER("iseq");
347 rb_gc_mark_and_move(&iseq
->wrapper
);
349 if (ISEQ_BODY(iseq
)) {
350 struct rb_iseq_constant_body
*body
= ISEQ_BODY(iseq
);
352 rb_iseq_mark_and_move_each_body_value(iseq
, reference_updating
? ISEQ_ORIGINAL_ISEQ(iseq
) : NULL
);
354 rb_gc_mark_and_move(&body
->variable
.coverage
);
355 rb_gc_mark_and_move(&body
->variable
.pc2branchindex
);
356 rb_gc_mark_and_move(&body
->variable
.script_lines
);
357 rb_gc_mark_and_move(&body
->location
.label
);
358 rb_gc_mark_and_move(&body
->location
.base_label
);
359 rb_gc_mark_and_move(&body
->location
.pathobj
);
360 if (body
->local_iseq
) rb_gc_mark_and_move_ptr(&body
->local_iseq
);
361 if (body
->parent_iseq
) rb_gc_mark_and_move_ptr(&body
->parent_iseq
);
362 if (body
->mandatory_only_iseq
) rb_gc_mark_and_move_ptr(&body
->mandatory_only_iseq
);
364 if (body
->call_data
) {
365 for (unsigned int i
= 0; i
< body
->ci_size
; i
++) {
366 struct rb_call_data
*cds
= body
->call_data
;
368 if (cds
[i
].ci
) rb_gc_mark_and_move_ptr(&cds
[i
].ci
);
370 if (cc_is_active(cds
[i
].cc
, reference_updating
)) {
371 rb_gc_mark_and_move_ptr(&cds
[i
].cc
);
373 else if (cds
[i
].cc
!= rb_vm_empty_cc()) {
374 cds
[i
].cc
= rb_vm_empty_cc();
379 if (body
->param
.flags
.has_kw
&& body
->param
.keyword
!= NULL
) {
380 const struct rb_iseq_param_keyword
*const keyword
= body
->param
.keyword
;
382 if (keyword
->default_values
!= NULL
) {
383 for (int j
= 0, i
= keyword
->required_num
; i
< keyword
->num
; i
++, j
++) {
384 rb_gc_mark_and_move(&keyword
->default_values
[j
]);
389 if (body
->catch_table
) {
390 struct iseq_catch_table
*table
= body
->catch_table
;
392 for (unsigned int i
= 0; i
< table
->size
; i
++) {
393 struct iseq_catch_table_entry
*entry
;
394 entry
= UNALIGNED_MEMBER_PTR(table
, entries
[i
]);
396 rb_gc_mark_and_move_ptr(&entry
->iseq
);
401 if (reference_updating
) {
403 rb_yjit_iseq_update_references(iseq
);
408 rb_yjit_iseq_mark(body
->yjit_payload
);
413 if (FL_TEST_RAW((VALUE
)iseq
, ISEQ_NOT_LOADED_YET
)) {
414 rb_gc_mark_and_move(&iseq
->aux
.loader
.obj
);
416 else if (FL_TEST_RAW((VALUE
)iseq
, ISEQ_USE_COMPILE_DATA
)) {
417 const struct iseq_compile_data
*const compile_data
= ISEQ_COMPILE_DATA(iseq
);
419 rb_iseq_mark_and_move_insn_storage(compile_data
->insn
.storage_head
);
420 rb_iseq_mark_and_move_each_compile_data_value(iseq
, reference_updating
? ISEQ_ORIGINAL_ISEQ(iseq
) : NULL
);
422 rb_gc_mark_and_move((VALUE
*)&compile_data
->err_info
);
423 rb_gc_mark_and_move((VALUE
*)&compile_data
->catch_table_ary
);
427 VM_ASSERT(ISEQ_EXECUTABLE_P(iseq
));
429 if (iseq
->aux
.exec
.local_hooks
) {
430 rb_hook_list_mark_and_update(iseq
->aux
.exec
.local_hooks
);
434 RUBY_MARK_LEAVE("iseq");
438 param_keyword_size(const struct rb_iseq_param_keyword
*pkw
)
442 if (!pkw
) return size
;
444 size
+= sizeof(struct rb_iseq_param_keyword
);
445 size
+= sizeof(VALUE
) * (pkw
->num
- pkw
->required_num
);
451 rb_iseq_memsize(const rb_iseq_t
*iseq
)
453 size_t size
= 0; /* struct already counted as RVALUE size */
454 const struct rb_iseq_constant_body
*body
= ISEQ_BODY(iseq
);
455 const struct iseq_compile_data
*compile_data
;
457 /* TODO: should we count original_iseq? */
459 if (ISEQ_EXECUTABLE_P(iseq
) && body
) {
460 size
+= sizeof(struct rb_iseq_constant_body
);
461 size
+= body
->iseq_size
* sizeof(VALUE
);
462 size
+= body
->insns_info
.size
* (sizeof(struct iseq_insn_info_entry
) + sizeof(unsigned int));
463 size
+= body
->local_table_size
* sizeof(ID
);
464 size
+= ISEQ_MBITS_BUFLEN(body
->iseq_size
) * ISEQ_MBITS_SIZE
;
465 if (body
->catch_table
) {
466 size
+= iseq_catch_table_bytes(body
->catch_table
->size
);
468 size
+= (body
->param
.opt_num
+ 1) * sizeof(VALUE
);
469 size
+= param_keyword_size(body
->param
.keyword
);
471 /* body->is_entries */
472 size
+= ISEQ_IS_SIZE(body
) * sizeof(union iseq_inline_storage_entry
);
474 if (ISEQ_BODY(iseq
)->is_entries
) {
475 /* IC entries constant segments */
476 for (unsigned int ic_idx
= 0; ic_idx
< body
->ic_size
; ic_idx
++) {
477 IC ic
= &ISEQ_IS_IC_ENTRY(body
, ic_idx
);
478 const ID
*ids
= ic
->segments
;
483 size
+= sizeof(ID
); // null terminator
487 /* body->call_data */
488 size
+= body
->ci_size
* sizeof(struct rb_call_data
);
489 // TODO: should we count imemo_callinfo?
492 compile_data
= ISEQ_COMPILE_DATA(iseq
);
494 struct iseq_compile_data_storage
*cur
;
496 size
+= sizeof(struct iseq_compile_data
);
498 cur
= compile_data
->node
.storage_head
;
500 size
+= cur
->size
+ offsetof(struct iseq_compile_data_storage
, buff
);
508 struct rb_iseq_constant_body
*
509 rb_iseq_constant_body_alloc(void)
511 struct rb_iseq_constant_body
*iseq_body
;
512 iseq_body
= ZALLOC(struct rb_iseq_constant_body
);