summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/macos.yml4
-rw-r--r--.github/workflows/ubuntu.yml4
-rw-r--r--NEWS.md7
-rw-r--r--benchmark/README.md2
-rw-r--r--class.c2
-rw-r--r--common.mk4
-rw-r--r--compile.c26
-rw-r--r--debug_counter.h2
-rw-r--r--doc/distribution.md2
-rw-r--r--enum.c5
-rw-r--r--ext/objspace/objspace.c2
-rw-r--r--ext/objspace/objspace_dump.c9
-rw-r--r--gc.c130
-rw-r--r--gems/bundled_gems12
-rw-r--r--imemo.c96
-rw-r--r--internal/class.h6
-rw-r--r--internal/imemo.h22
-rw-r--r--internal/variable.h4
-rw-r--r--iseq.c4
-rw-r--r--lib/tempfile.rb2
-rw-r--r--marshal.c6
-rw-r--r--prism_compile.c3
-rw-r--r--ractor.c15
-rw-r--r--ractor_sync.c2
-rw-r--r--shape.c2
-rw-r--r--shape.h4
-rw-r--r--spec/ruby/core/kernel/caller_locations_spec.rb14
-rw-r--r--spec/ruby/core/kernel/caller_spec.rb25
-rw-r--r--test/prism/lex_test.rb2
-rw-r--r--test/ruby/test_backtrace.rb6
-rw-r--r--test/ruby/test_encoding.rb2
-rw-r--r--test/ruby/test_zjit.rb49
-rw-r--r--time.c22
-rwxr-xr-xtool/auto-style.rb8
-rw-r--r--tool/lib/_tmpdir.rb6
-rw-r--r--tool/test/testunit/test_parallel.rb26
-rw-r--r--tool/test/testunit/tests_for_parallel/ptest_forth.rb8
-rw-r--r--variable.c606
-rw-r--r--variable.h13
-rw-r--r--vm.c6
-rw-r--r--vm_backtrace.c68
-rw-r--r--vm_insnhelper.c26
-rw-r--r--yjit/src/cruby_bindings.inc.rs2
-rw-r--r--zjit/src/codegen.rs16
-rw-r--r--zjit/src/cruby_bindings.inc.rs2
-rw-r--r--zjit/src/hir.rs441
46 files changed, 1098 insertions, 627 deletions
diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml
index 54161f888c..d418912f35 100644
--- a/.github/workflows/macos.yml
+++ b/.github/workflows/macos.yml
@@ -107,6 +107,10 @@ jobs:
- run: make hello
+ - name: runirb
+ run: |
+ echo IRB::VERSION | make runirb RUNOPT="-- -f"
+
- name: Set test options for skipped tests
run: |
set -x
diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml
index ac7963649b..041cb412fd 100644
--- a/.github/workflows/ubuntu.yml
+++ b/.github/workflows/ubuntu.yml
@@ -99,6 +99,10 @@ jobs:
- run: $SETARCH make hello
+ - name: runirb
+ run: |
+ echo IRB::VERSION | $SETARCH make runirb RUNOPT="-- -f"
+
- name: Set test options for skipped tests
run: |
set -x
diff --git a/NEWS.md b/NEWS.md
index 350d9a04f0..f58bb343e7 100644
--- a/NEWS.md
+++ b/NEWS.md
@@ -108,11 +108,11 @@ Note: We're only listing outstanding class updates.
The following bundled gems are promoted from default gems.
-* ostruct 0.6.1
+* ostruct 0.6.2
* pstore 0.2.0
* benchmark 0.4.1
* logger 1.7.0
-* rdoc 6.14.0
+* rdoc 6.14.1
* win32ole 1.9.2
* irb 1.15.2
* reline 0.6.1
@@ -153,7 +153,10 @@ The following bundled gems are updated.
* rexml 3.4.1
* net-imap 0.5.8
* net-smtp 0.5.1
+* matrix 0.4.3
+* prime 0.1.4
* rbs 3.9.4
+* debug 1.11.0
* base64 0.3.0
* bigdecimal 3.2.2
* drb 2.2.3
diff --git a/benchmark/README.md b/benchmark/README.md
index c5c29d0daf..9f9192685e 100644
--- a/benchmark/README.md
+++ b/benchmark/README.md
@@ -40,7 +40,7 @@ Usage: benchmark-driver [options] RUBY|YAML...
--filter REGEXP Filter out benchmarks with given regexp
--run-duration SECONDS Warmup estimates loop_count to run for this duration (default: 3)
--timeout SECONDS Timeout ruby command execution with timeout(1)
- -v, --verbose Verbose mode. Multiple -v options increase visilibity (max: 2)
+ -v, --verbose Verbose mode. Multiple -v options increase visibility (max: 2)
```
## make benchmark
diff --git a/class.c b/class.c
index 480bdb7c14..506054ad68 100644
--- a/class.c
+++ b/class.c
@@ -298,7 +298,7 @@ rb_class_duplicate_classext(rb_classext_t *orig, VALUE klass, const rb_namespace
RCLASSEXT_M_TBL(ext) = duplicate_classext_m_tbl(RCLASSEXT_M_TBL(orig), klass, dup_iclass);
if (orig->fields_obj) {
- RB_OBJ_WRITE(klass, &ext->fields_obj, rb_imemo_class_fields_clone(orig->fields_obj));
+ RB_OBJ_WRITE(klass, &ext->fields_obj, rb_imemo_fields_clone(orig->fields_obj));
}
if (RCLASSEXT_SHARED_CONST_TBL(orig)) {
diff --git a/common.mk b/common.mk
index f94ad33d88..e5a4d34a0a 100644
--- a/common.mk
+++ b/common.mk
@@ -1427,8 +1427,8 @@ run: yes-fake miniruby$(EXEEXT) PHONY
runruby: $(PROGRAM) PHONY
RUBY_ON_BUG='gdb -x $(srcdir)/.gdbinit -p' $(RUNRUBY) $(RUNOPT0) $(TESTRUN_SCRIPT) $(RUNOPT)
-runirb: $(PROGRAM) PHONY
- RUBY_ON_BUG='gdb -x $(srcdir)/.gdbinit -p' $(RUNRUBY) $(RUNOPT0) -r irb -e 'IRB.start("make runirb")' $(RUNOPT)
+runirb: $(PROGRAM) update-default-gemspecs
+ RUBY_ON_BUG='gdb -x $(srcdir)/.gdbinit -p' $(RUNRUBY) $(RUNOPT0) -rrubygems -r irb -e 'IRB.start("make runirb")' $(RUNOPT)
parse: yes-fake miniruby$(EXEEXT) PHONY
$(BTESTRUBY) --dump=parsetree_with_comment,insns $(TESTRUN_SCRIPT)
diff --git a/compile.c b/compile.c
index 6bcfcd3398..477f082144 100644
--- a/compile.c
+++ b/compile.c
@@ -3493,7 +3493,7 @@ iseq_peephole_optimize(rb_iseq_t *iseq, LINK_ELEMENT *list, const int do_tailcal
iobj->insn_id = BIN(opt_ary_freeze);
iobj->operand_size = 2;
iobj->operands = compile_data_calloc2(iseq, iobj->operand_size, sizeof(VALUE));
- iobj->operands[0] = rb_cArray_empty_frozen;
+ RB_OBJ_WRITE(iseq, &iobj->operands[0], rb_cArray_empty_frozen);
iobj->operands[1] = (VALUE)ci;
ELEM_REMOVE(next);
}
@@ -3516,7 +3516,7 @@ iseq_peephole_optimize(rb_iseq_t *iseq, LINK_ELEMENT *list, const int do_tailcal
iobj->insn_id = BIN(opt_hash_freeze);
iobj->operand_size = 2;
iobj->operands = compile_data_calloc2(iseq, iobj->operand_size, sizeof(VALUE));
- iobj->operands[0] = rb_cHash_empty_frozen;
+ RB_OBJ_WRITE(iseq, &iobj->operands[0], rb_cHash_empty_frozen);
iobj->operands[1] = (VALUE)ci;
ELEM_REMOVE(next);
}
@@ -4094,7 +4094,7 @@ iseq_peephole_optimize(rb_iseq_t *iseq, LINK_ELEMENT *list, const int do_tailcal
unsigned int flags = vm_ci_flag(ci);
if ((flags & set_flags) == set_flags && !(flags & unset_flags)) {
((INSN*)niobj)->insn_id = BIN(putobject);
- OPERAND_AT(niobj, 0) = rb_hash_freeze(rb_hash_resurrect(OPERAND_AT(niobj, 0)));
+ RB_OBJ_WRITE(iseq, &OPERAND_AT(niobj, 0), rb_hash_freeze(rb_hash_resurrect(OPERAND_AT(niobj, 0))));
const struct rb_callinfo *nci = vm_ci_new(vm_ci_mid(ci),
flags & ~VM_CALL_KW_SPLAT_MUT, vm_ci_argc(ci), vm_ci_kwarg(ci));
@@ -9257,12 +9257,13 @@ compile_builtin_mandatory_only_method(rb_iseq_t *iseq, const NODE *node, const N
VALUE ast_value = rb_ruby_ast_new(RNODE(&scope_node));
- ISEQ_BODY(iseq)->mandatory_only_iseq =
+ const rb_iseq_t *mandatory_only_iseq =
rb_iseq_new_with_opt(ast_value, rb_iseq_base_label(iseq),
rb_iseq_path(iseq), rb_iseq_realpath(iseq),
nd_line(line_node), NULL, 0,
ISEQ_TYPE_METHOD, ISEQ_COMPILE_DATA(iseq)->option,
ISEQ_BODY(iseq)->variable.script_lines);
+ RB_OBJ_WRITE(iseq, &ISEQ_BODY(iseq)->mandatory_only_iseq, (VALUE)mandatory_only_iseq);
ALLOCV_END(idtmp);
return COMPILE_OK;
@@ -13288,7 +13289,7 @@ ibf_dump_catch_table(struct ibf_dump *dump, const rb_iseq_t *iseq)
}
static struct iseq_catch_table *
-ibf_load_catch_table(const struct ibf_load *load, ibf_offset_t catch_table_offset, unsigned int size)
+ibf_load_catch_table(const struct ibf_load *load, ibf_offset_t catch_table_offset, unsigned int size, const rb_iseq_t *parent_iseq)
{
if (size) {
struct iseq_catch_table *table = ruby_xmalloc(iseq_catch_table_bytes(size));
@@ -13305,7 +13306,8 @@ ibf_load_catch_table(const struct ibf_load *load, ibf_offset_t catch_table_offse
table->entries[i].cont = (unsigned int)ibf_load_small_value(load, &reading_pos);
table->entries[i].sp = (unsigned int)ibf_load_small_value(load, &reading_pos);
- table->entries[i].iseq = ibf_load_iseq(load, (const rb_iseq_t *)(VALUE)iseq_index);
+ rb_iseq_t *catch_iseq = (rb_iseq_t *)ibf_load_iseq(load, (const rb_iseq_t *)(VALUE)iseq_index);
+ RB_OBJ_WRITE(parent_iseq, &table->entries[i].iseq, catch_iseq);
}
return table;
}
@@ -13823,10 +13825,14 @@ ibf_load_iseq_each(struct ibf_load *load, rb_iseq_t *iseq, ibf_offset_t offset)
load_body->insns_info.body = ibf_load_insns_info_body(load, insns_info_body_offset, insns_info_size);
load_body->insns_info.positions = ibf_load_insns_info_positions(load, insns_info_positions_offset, insns_info_size);
load_body->local_table = ibf_load_local_table(load, local_table_offset, local_table_size);
- load_body->catch_table = ibf_load_catch_table(load, catch_table_offset, catch_table_size);
- load_body->parent_iseq = ibf_load_iseq(load, (const rb_iseq_t *)(VALUE)parent_iseq_index);
- load_body->local_iseq = ibf_load_iseq(load, (const rb_iseq_t *)(VALUE)local_iseq_index);
- load_body->mandatory_only_iseq = ibf_load_iseq(load, (const rb_iseq_t *)(VALUE)mandatory_only_iseq_index);
+ load_body->catch_table = ibf_load_catch_table(load, catch_table_offset, catch_table_size, iseq);
+ const rb_iseq_t *parent_iseq = ibf_load_iseq(load, (const rb_iseq_t *)(VALUE)parent_iseq_index);
+ const rb_iseq_t *local_iseq = ibf_load_iseq(load, (const rb_iseq_t *)(VALUE)local_iseq_index);
+ const rb_iseq_t *mandatory_only_iseq = ibf_load_iseq(load, (const rb_iseq_t *)(VALUE)mandatory_only_iseq_index);
+
+ RB_OBJ_WRITE(iseq, &load_body->parent_iseq, parent_iseq);
+ RB_OBJ_WRITE(iseq, &load_body->local_iseq, local_iseq);
+ RB_OBJ_WRITE(iseq, &load_body->mandatory_only_iseq, mandatory_only_iseq);
// This must be done after the local table is loaded.
if (load_body->param.keyword != NULL) {
diff --git a/debug_counter.h b/debug_counter.h
index 3142ada0c3..fada7513aa 100644
--- a/debug_counter.h
+++ b/debug_counter.h
@@ -315,7 +315,7 @@ RB_DEBUG_COUNTER(obj_imemo_parser_strterm)
RB_DEBUG_COUNTER(obj_imemo_callinfo)
RB_DEBUG_COUNTER(obj_imemo_callcache)
RB_DEBUG_COUNTER(obj_imemo_constcache)
-RB_DEBUG_COUNTER(obj_imemo_class_fields)
+RB_DEBUG_COUNTER(obj_imemo_fields)
RB_DEBUG_COUNTER(opt_new_hit)
RB_DEBUG_COUNTER(opt_new_miss)
diff --git a/doc/distribution.md b/doc/distribution.md
index 5a4d51da6f..164e1b7109 100644
--- a/doc/distribution.md
+++ b/doc/distribution.md
@@ -8,7 +8,7 @@ This document outlines the expected way to distribute Ruby, with a specific focu
The tarball for official releases is created by the release manager. The release manager uploads the tarball to the [Ruby website](https://www.ruby-lang.org/en/downloads/).
-Downstream distributors should use the official release tarballs as part of their build process. This ensures that the tarball is created in a consistent way, and that the tarball is crytographically verified.
+Downstream distributors should use the official release tarballs as part of their build process. This ensures that the tarball is created in a consistent way, and that the tarball is cryptographically verified.
### Using the nightly tarball for testing
diff --git a/enum.c b/enum.c
index 182e4f6e83..cbf74df484 100644
--- a/enum.c
+++ b/enum.c
@@ -1215,14 +1215,15 @@ tally_up(st_data_t *group, st_data_t *value, st_data_t arg, int existing)
RB_OBJ_WRITTEN(hash, Qundef, tally);
}
*value = (st_data_t)tally;
- if (!SPECIAL_CONST_P(*group)) RB_OBJ_WRITTEN(hash, Qundef, *group);
return ST_CONTINUE;
}
static VALUE
rb_enum_tally_up(VALUE hash, VALUE group)
{
- rb_hash_stlike_update(hash, group, tally_up, (st_data_t)hash);
+ if (!rb_hash_stlike_update(hash, group, tally_up, (st_data_t)hash)) {
+ RB_OBJ_WRITTEN(hash, Qundef, group);
+ }
return hash;
}
diff --git a/ext/objspace/objspace.c b/ext/objspace/objspace.c
index 754c998ac6..5e183e78ed 100644
--- a/ext/objspace/objspace.c
+++ b/ext/objspace/objspace.c
@@ -504,7 +504,7 @@ count_imemo_objects(int argc, VALUE *argv, VALUE self)
INIT_IMEMO_TYPE_ID(imemo_callinfo);
INIT_IMEMO_TYPE_ID(imemo_callcache);
INIT_IMEMO_TYPE_ID(imemo_constcache);
- INIT_IMEMO_TYPE_ID(imemo_class_fields);
+ INIT_IMEMO_TYPE_ID(imemo_fields);
#undef INIT_IMEMO_TYPE_ID
}
diff --git a/ext/objspace/objspace_dump.c b/ext/objspace/objspace_dump.c
index 83b434c3a1..80732d0282 100644
--- a/ext/objspace/objspace_dump.c
+++ b/ext/objspace/objspace_dump.c
@@ -394,9 +394,10 @@ dump_object(VALUE obj, struct dump_config *dc)
dc->cur_obj = obj;
dc->cur_obj_references = 0;
- if (BUILTIN_TYPE(obj) == T_NODE || BUILTIN_TYPE(obj) == T_IMEMO) {
+ if (BUILTIN_TYPE(obj) == T_NODE || (BUILTIN_TYPE(obj) == T_IMEMO && !IMEMO_TYPE_P(obj, imemo_fields))) {
dc->cur_obj_klass = 0;
- } else {
+ }
+ else {
dc->cur_obj_klass = RBASIC_CLASS(obj);
}
@@ -414,8 +415,8 @@ dump_object(VALUE obj, struct dump_config *dc)
dump_append(dc, obj_type(obj));
dump_append(dc, "\"");
- if (BUILTIN_TYPE(obj) != T_IMEMO) {
- size_t shape_id = rb_obj_shape_id(obj);
+ if (BUILTIN_TYPE(obj) != T_IMEMO || IMEMO_TYPE_P(obj, imemo_fields)) {
+ size_t shape_id = rb_obj_shape_id(obj) & SHAPE_ID_OFFSET_MASK;
dump_append(dc, ", \"shape_id\":");
dump_append_sizet(dc, shape_id);
}
diff --git a/gc.c b/gc.c
index f0189294bd..b0876fca5e 100644
--- a/gc.c
+++ b/gc.c
@@ -2015,27 +2015,6 @@ object_id_to_ref(void *objspace_ptr, VALUE object_id)
static inline void
obj_free_object_id(VALUE obj)
{
- if (RB_BUILTIN_TYPE(obj) == T_IMEMO) {
- return;
- }
-
-#if RUBY_DEBUG
- switch (BUILTIN_TYPE(obj)) {
- case T_CLASS:
- case T_MODULE:
- break;
- default:
- if (rb_shape_obj_has_id(obj)) {
- VALUE id = object_id_get(obj, RBASIC_SHAPE_ID(obj)); // Crash if missing
- if (!(FIXNUM_P(id) || RB_TYPE_P(id, T_BIGNUM))) {
- rb_p(obj);
- rb_bug("Corrupted object_id");
- }
- }
- break;
- }
-#endif
-
VALUE obj_id = 0;
if (RB_UNLIKELY(id2ref_tbl)) {
switch (BUILTIN_TYPE(obj)) {
@@ -2043,21 +2022,32 @@ obj_free_object_id(VALUE obj)
case T_MODULE:
obj_id = RCLASS(obj)->object_id;
break;
- default: {
+ case T_IMEMO:
+ if (!IMEMO_TYPE_P(obj, imemo_fields)) {
+ return;
+ }
+ // fallthrough
+ case T_OBJECT:
+ {
shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
if (rb_shape_has_object_id(shape_id)) {
obj_id = object_id_get(obj, shape_id);
}
break;
}
+ default:
+ // For generic_fields, the T_IMEMO/fields is responsible for freeing the id.
+ return;
}
if (RB_UNLIKELY(obj_id)) {
RUBY_ASSERT(FIXNUM_P(obj_id) || RB_TYPE_P(obj_id, T_BIGNUM));
if (!st_delete(id2ref_tbl, (st_data_t *)&obj_id, NULL)) {
- // If we're currently building the table then it's not a bug
- if (id2ref_tbl_built) {
+ // If we're currently building the table then it's not a bug.
+ // The the object is a T_IMEMO/fields, then it's possible the actual object
+ // has been garbage collected already.
+ if (id2ref_tbl_built && !RB_TYPE_P(obj, T_IMEMO)) {
rb_bug("Object ID seen, but not in _id2ref table: object_id=%llu object=%s", NUM2ULL(obj_id), rb_obj_info(obj));
}
}
@@ -2071,7 +2061,7 @@ rb_gc_obj_free_vm_weak_references(VALUE obj)
obj_free_object_id(obj);
if (rb_obj_exivar_p(obj)) {
- rb_free_generic_ivar((VALUE)obj);
+ rb_free_generic_ivar(obj);
}
switch (BUILTIN_TYPE(obj)) {
@@ -2316,10 +2306,6 @@ rb_obj_memsize_of(VALUE obj)
return 0;
}
- if (rb_obj_exivar_p(obj)) {
- size += rb_generic_ivar_memsize(obj);
- }
-
switch (BUILTIN_TYPE(obj)) {
case T_OBJECT:
if (rb_shape_obj_too_complex_p(obj)) {
@@ -3935,38 +3921,6 @@ vm_weak_table_foreach_update_weak_value(st_data_t *key, st_data_t *value, st_dat
return iter_data->update_callback((VALUE *)value, iter_data->data);
}
-static void
-free_gen_fields_tbl(VALUE obj, struct gen_fields_tbl *fields_tbl)
-{
- if (UNLIKELY(rb_shape_obj_too_complex_p(obj))) {
- st_free_table(fields_tbl->as.complex.table);
- }
-
- xfree(fields_tbl);
-}
-
-static int
-vm_weak_table_gen_fields_foreach_too_complex_i(st_data_t _key, st_data_t value, st_data_t data, int error)
-{
- struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
-
- GC_ASSERT(!iter_data->weak_only);
-
- if (SPECIAL_CONST_P((VALUE)value)) return ST_CONTINUE;
-
- return iter_data->callback((VALUE)value, iter_data->data);
-}
-
-static int
-vm_weak_table_gen_fields_foreach_too_complex_replace_i(st_data_t *_key, st_data_t *value, st_data_t data, int existing)
-{
- struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
-
- GC_ASSERT(!iter_data->weak_only);
-
- return iter_data->update_callback((VALUE *)value, iter_data->data);
-}
-
struct st_table *rb_generic_fields_tbl_get(void);
static int
@@ -4003,60 +3957,50 @@ vm_weak_table_gen_fields_foreach(st_data_t key, st_data_t value, st_data_t data)
int ret = iter_data->callback((VALUE)key, iter_data->data);
+ VALUE new_value = (VALUE)value;
+ VALUE new_key = (VALUE)key;
+
switch (ret) {
case ST_CONTINUE:
break;
case ST_DELETE:
- free_gen_fields_tbl((VALUE)key, (struct gen_fields_tbl *)value);
RBASIC_SET_SHAPE_ID((VALUE)key, ROOT_SHAPE_ID);
return ST_DELETE;
case ST_REPLACE: {
- VALUE new_key = (VALUE)key;
ret = iter_data->update_callback(&new_key, iter_data->data);
- if (key != new_key) ret = ST_DELETE;
- DURING_GC_COULD_MALLOC_REGION_START();
- {
- st_insert(rb_generic_fields_tbl_get(), (st_data_t)new_key, value);
+ if (key != new_key) {
+ ret = ST_DELETE;
}
- DURING_GC_COULD_MALLOC_REGION_END();
- key = (st_data_t)new_key;
break;
}
default:
- return ret;
+ rb_bug("vm_weak_table_gen_fields_foreach: return value %d not supported", ret);
}
if (!iter_data->weak_only) {
- struct gen_fields_tbl *fields_tbl = (struct gen_fields_tbl *)value;
+ int ivar_ret = iter_data->callback(new_value, iter_data->data);
+ switch (ivar_ret) {
+ case ST_CONTINUE:
+ break;
- if (rb_shape_obj_too_complex_p((VALUE)key)) {
- st_foreach_with_replace(
- fields_tbl->as.complex.table,
- vm_weak_table_gen_fields_foreach_too_complex_i,
- vm_weak_table_gen_fields_foreach_too_complex_replace_i,
- data
- );
+ case ST_REPLACE:
+ iter_data->update_callback(&new_value, iter_data->data);
+ break;
+
+ default:
+ rb_bug("vm_weak_table_gen_fields_foreach: return value %d not supported", ivar_ret);
}
- else {
- uint32_t fields_count = RSHAPE_LEN(RBASIC_SHAPE_ID((VALUE)key));
- for (uint32_t i = 0; i < fields_count; i++) {
- if (SPECIAL_CONST_P(fields_tbl->as.shape.fields[i])) continue;
+ }
- int ivar_ret = iter_data->callback(fields_tbl->as.shape.fields[i], iter_data->data);
- switch (ivar_ret) {
- case ST_CONTINUE:
- break;
- case ST_REPLACE:
- iter_data->update_callback(&fields_tbl->as.shape.fields[i], iter_data->data);
- break;
- default:
- rb_bug("vm_weak_table_gen_fields_foreach: return value %d not supported", ivar_ret);
- }
- }
+ if (key != new_key || value != new_value) {
+ DURING_GC_COULD_MALLOC_REGION_START();
+ {
+ st_insert(rb_generic_fields_tbl_get(), (st_data_t)new_key, new_value);
}
+ DURING_GC_COULD_MALLOC_REGION_END();
}
return ret;
diff --git a/gems/bundled_gems b/gems/bundled_gems
index 6b24757a10..15a9df6cce 100644
--- a/gems/bundled_gems
+++ b/gems/bundled_gems
@@ -16,11 +16,11 @@ net-ftp 0.3.8 https://github.com/ruby/net-ftp
net-imap 0.5.8 https://github.com/ruby/net-imap
net-pop 0.1.2 https://github.com/ruby/net-pop
net-smtp 0.5.1 https://github.com/ruby/net-smtp
-matrix 0.4.2 https://github.com/ruby/matrix 200efebc35dc1a8d16fad671f7006c85cbd0e3f5
-prime 0.1.3 https://github.com/ruby/prime d97973271103f2bdde91f3f0bd3e42526401ad77
+matrix 0.4.3 https://github.com/ruby/matrix
+prime 0.1.4 https://github.com/ruby/prime
rbs 3.9.4 https://github.com/ruby/rbs
typeprof 0.30.1 https://github.com/ruby/typeprof
-debug 1.10.0 https://github.com/ruby/debug cf469f2b21710727abdd153b25a1e5123b002bb0
+debug 1.11.0 https://github.com/ruby/debug
racc 1.8.1 https://github.com/ruby/racc
mutex_m 0.3.0 https://github.com/ruby/mutex_m
getoptlong 0.2.1 https://github.com/ruby/getoptlong
@@ -35,13 +35,13 @@ nkf 0.2.0 https://github.com/ruby/nkf
syslog 0.3.0 https://github.com/ruby/syslog
csv 3.3.5 https://github.com/ruby/csv
repl_type_completor 0.1.11 https://github.com/ruby/repl_type_completor 25108aa8d69ddaba0b5da3feff1c0035371524b2
-ostruct 0.6.1 https://github.com/ruby/ostruct 50d51248bec5560a102a1024aff4174b31dca8cc
+ostruct 0.6.2 https://github.com/ruby/ostruct
pstore 0.2.0 https://github.com/ruby/pstore
benchmark 0.4.1 https://github.com/ruby/benchmark
logger 1.7.0 https://github.com/ruby/logger
-rdoc 6.14.0 https://github.com/ruby/rdoc
+rdoc 6.14.1 https://github.com/ruby/rdoc
win32ole 1.9.2 https://github.com/ruby/win32ole
-irb 1.15.2 https://github.com/ruby/irb
+irb 1.15.2 https://github.com/ruby/irb 331c4e851296b115db766c291e8cf54a2492fb36
reline 0.6.1 https://github.com/ruby/reline
readline 0.0.4 https://github.com/ruby/readline
fiddle 1.1.8 https://github.com/ruby/fiddle
diff --git a/imemo.c b/imemo.c
index ebea6f6f25..f8c0e3b171 100644
--- a/imemo.c
+++ b/imemo.c
@@ -30,7 +30,7 @@ rb_imemo_name(enum imemo_type type)
IMEMO_NAME(svar);
IMEMO_NAME(throw_data);
IMEMO_NAME(tmpbuf);
- IMEMO_NAME(class_fields);
+ IMEMO_NAME(fields);
#undef IMEMO_NAME
}
rb_bug("unreachable");
@@ -111,16 +111,16 @@ rb_imemo_tmpbuf_parser_heap(void *buf, rb_imemo_tmpbuf_t *old_heap, size_t cnt)
}
static VALUE
-imemo_class_fields_new(VALUE klass, size_t capa)
+imemo_fields_new(VALUE klass, size_t capa)
{
- size_t embedded_size = offsetof(struct rb_class_fields, as.embed) + capa * sizeof(VALUE);
+ size_t embedded_size = offsetof(struct rb_fields, as.embed) + capa * sizeof(VALUE);
if (rb_gc_size_allocatable_p(embedded_size)) {
- VALUE fields = rb_imemo_new(imemo_class_fields, klass, embedded_size);
- RUBY_ASSERT(IMEMO_TYPE_P(fields, imemo_class_fields));
+ VALUE fields = rb_imemo_new(imemo_fields, klass, embedded_size);
+ RUBY_ASSERT(IMEMO_TYPE_P(fields, imemo_fields));
return fields;
}
else {
- VALUE fields = rb_imemo_new(imemo_class_fields, klass, sizeof(struct rb_class_fields));
+ VALUE fields = rb_imemo_new(imemo_fields, klass, sizeof(struct rb_fields));
FL_SET_RAW(fields, OBJ_FIELD_EXTERNAL);
IMEMO_OBJ_FIELDS(fields)->as.external.ptr = ALLOC_N(VALUE, capa);
return fields;
@@ -128,46 +128,90 @@ imemo_class_fields_new(VALUE klass, size_t capa)
}
VALUE
-rb_imemo_class_fields_new(VALUE klass, size_t capa)
+rb_imemo_fields_new(VALUE klass, size_t capa)
{
- return imemo_class_fields_new(rb_singleton_class(klass), capa);
+ return imemo_fields_new(klass, capa);
}
static VALUE
-imemo_class_fields_new_complex(VALUE klass, size_t capa)
+imemo_fields_new_complex(VALUE klass, size_t capa)
{
- VALUE fields = imemo_class_fields_new(klass, sizeof(struct rb_class_fields));
+ VALUE fields = imemo_fields_new(klass, sizeof(struct rb_fields));
IMEMO_OBJ_FIELDS(fields)->as.complex.table = st_init_numtable_with_size(capa);
return fields;
}
VALUE
-rb_imemo_class_fields_new_complex(VALUE klass, size_t capa)
+rb_imemo_fields_new_complex(VALUE klass, size_t capa)
{
- return imemo_class_fields_new_complex(rb_singleton_class(klass), capa);
+ return imemo_fields_new_complex(klass, capa);
+}
+
+static int
+imemo_fields_trigger_wb_i(st_data_t key, st_data_t value, st_data_t arg)
+{
+ VALUE field_obj = (VALUE)arg;
+ RB_OBJ_WRITTEN(field_obj, Qundef, (VALUE)value);
+ return ST_CONTINUE;
+}
+
+static int
+imemo_fields_complex_wb_i(st_data_t key, st_data_t value, st_data_t arg)
+{
+ RB_OBJ_WRITTEN((VALUE)arg, Qundef, (VALUE)value);
+ return ST_CONTINUE;
}
VALUE
-rb_imemo_class_fields_clone(VALUE fields_obj)
+rb_imemo_fields_new_complex_tbl(VALUE klass, st_table *tbl)
+{
+ VALUE fields = imemo_fields_new(klass, sizeof(struct rb_fields));
+ IMEMO_OBJ_FIELDS(fields)->as.complex.table = tbl;
+ st_foreach(tbl, imemo_fields_trigger_wb_i, (st_data_t)fields);
+ return fields;
+}
+
+VALUE
+rb_imemo_fields_clone(VALUE fields_obj)
{
shape_id_t shape_id = RBASIC_SHAPE_ID(fields_obj);
VALUE clone;
if (rb_shape_too_complex_p(shape_id)) {
- clone = rb_imemo_class_fields_new_complex(CLASS_OF(fields_obj), 0);
+ clone = rb_imemo_fields_new_complex(CLASS_OF(fields_obj), 0);
RBASIC_SET_SHAPE_ID(clone, shape_id);
- st_table *src_table = rb_imemo_class_fields_complex_tbl(fields_obj);
- st_replace(rb_imemo_class_fields_complex_tbl(clone), src_table);
+ st_table *src_table = rb_imemo_fields_complex_tbl(fields_obj);
+ st_table *dest_table = rb_imemo_fields_complex_tbl(clone);
+ st_replace(dest_table, src_table);
+ st_foreach(dest_table, imemo_fields_complex_wb_i, (st_data_t)clone);
}
else {
- clone = imemo_class_fields_new(CLASS_OF(fields_obj), RSHAPE_CAPACITY(shape_id));
+ clone = imemo_fields_new(CLASS_OF(fields_obj), RSHAPE_CAPACITY(shape_id));
RBASIC_SET_SHAPE_ID(clone, shape_id);
- MEMCPY(rb_imemo_class_fields_ptr(clone), rb_imemo_class_fields_ptr(fields_obj), VALUE, RSHAPE_LEN(shape_id));
+ VALUE *fields = rb_imemo_fields_ptr(clone);
+ attr_index_t fields_count = RSHAPE_LEN(shape_id);
+ MEMCPY(fields, rb_imemo_fields_ptr(fields_obj), VALUE, fields_count);
+ for (attr_index_t i = 0; i < fields_count; i++) {
+ RB_OBJ_WRITTEN(clone, Qundef, fields[i]);
+ }
}
return clone;
}
+void
+rb_imemo_fields_clear(VALUE fields_obj)
+{
+ // When replacing an imemo/fields by another one, we must clear
+ // its shape so that gc.c:obj_free_object_id won't be called.
+ if (rb_shape_obj_too_complex_p(fields_obj)) {
+ RBASIC_SET_SHAPE_ID(fields_obj, ROOT_TOO_COMPLEX_SHAPE_ID);
+ }
+ else {
+ RBASIC_SET_SHAPE_ID(fields_obj, ROOT_SHAPE_ID);
+ }
+}
+
/* =========================================================================
* memsize
* ========================================================================= */
@@ -215,7 +259,7 @@ rb_imemo_memsize(VALUE obj)
size += ((rb_imemo_tmpbuf_t *)obj)->cnt * sizeof(VALUE);
break;
- case imemo_class_fields:
+ case imemo_fields:
if (rb_shape_obj_too_complex_p(obj)) {
size += st_memsize(IMEMO_OBJ_FIELDS(obj)->as.complex.table);
}
@@ -487,11 +531,11 @@ rb_imemo_mark_and_move(VALUE obj, bool reference_updating)
break;
}
- case imemo_class_fields: {
+ case imemo_fields: {
rb_gc_mark_and_move((VALUE *)&RBASIC(obj)->klass);
if (rb_shape_obj_too_complex_p(obj)) {
- st_table *tbl = rb_imemo_class_fields_complex_tbl(obj);
+ st_table *tbl = rb_imemo_fields_complex_tbl(obj);
if (reference_updating) {
rb_gc_ref_update_table_values_only(tbl);
}
@@ -500,7 +544,7 @@ rb_imemo_mark_and_move(VALUE obj, bool reference_updating)
}
}
else {
- VALUE *fields = rb_imemo_class_fields_ptr(obj);
+ VALUE *fields = rb_imemo_fields_ptr(obj);
attr_index_t len = RSHAPE_LEN(RBASIC_SHAPE_ID(obj));
for (attr_index_t i = 0; i < len; i++) {
rb_gc_mark_and_move(&fields[i]);
@@ -602,7 +646,7 @@ rb_cc_tbl_free(struct rb_id_table *cc_tbl, VALUE klass)
}
static inline void
-imemo_class_fields_free(struct rb_class_fields *fields)
+imemo_fields_free(struct rb_fields *fields)
{
if (rb_shape_obj_too_complex_p((VALUE)fields)) {
st_free_table(fields->as.complex.table);
@@ -686,9 +730,9 @@ rb_imemo_free(VALUE obj)
RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf);
break;
- case imemo_class_fields:
- imemo_class_fields_free(IMEMO_OBJ_FIELDS(obj));
- RB_DEBUG_COUNTER_INC(obj_imemo_class_fields);
+ case imemo_fields:
+ imemo_fields_free(IMEMO_OBJ_FIELDS(obj));
+ RB_DEBUG_COUNTER_INC(obj_imemo_fields);
break;
default:
rb_bug("unreachable");
diff --git a/internal/class.h b/internal/class.h
index 2250d3f343..f4677ae400 100644
--- a/internal/class.h
+++ b/internal/class.h
@@ -432,7 +432,7 @@ static inline rb_classext_t *
RCLASS_EXT_WRITABLE(VALUE obj)
{
const rb_namespace_t *ns;
- if (RCLASS_PRIME_CLASSEXT_WRITABLE_P(obj)) {
+ if (LIKELY(RCLASS_PRIME_CLASSEXT_WRITABLE_P(obj))) {
return RCLASS_EXT_PRIME(obj);
}
// delay namespace loading to optimize for unmodified classes
@@ -526,7 +526,7 @@ RCLASS_WRITABLE_ENSURE_FIELDS_OBJ(VALUE obj)
RUBY_ASSERT(RB_TYPE_P(obj, RUBY_T_CLASS) || RB_TYPE_P(obj, RUBY_T_MODULE));
rb_classext_t *ext = RCLASS_EXT_WRITABLE(obj);
if (!ext->fields_obj) {
- RB_OBJ_WRITE(obj, &ext->fields_obj, rb_imemo_class_fields_new(obj, 1));
+ RB_OBJ_WRITE(obj, &ext->fields_obj, rb_imemo_fields_new(rb_singleton_class(obj), 1));
}
return ext->fields_obj;
}
@@ -564,7 +564,7 @@ RCLASS_FIELDS_COUNT(VALUE obj)
VALUE fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
if (fields_obj) {
if (rb_shape_obj_too_complex_p(fields_obj)) {
- return (uint32_t)rb_st_table_size(rb_imemo_class_fields_complex_tbl(fields_obj));
+ return (uint32_t)rb_st_table_size(rb_imemo_fields_complex_tbl(fields_obj));
}
else {
return RSHAPE_LEN(RBASIC_SHAPE_ID(fields_obj));
diff --git a/internal/imemo.h b/internal/imemo.h
index 0806baa9a6..44b41d1b1c 100644
--- a/internal/imemo.h
+++ b/internal/imemo.h
@@ -42,7 +42,7 @@ enum imemo_type {
imemo_callinfo = 11,
imemo_callcache = 12,
imemo_constcache = 13,
- imemo_class_fields = 14,
+ imemo_fields = 14,
};
/* CREF (Class REFerence) is defined in method.h */
@@ -258,7 +258,7 @@ MEMO_V2_SET(struct MEMO *m, VALUE v)
RB_OBJ_WRITE(m, &m->v2, v);
}
-struct rb_class_fields {
+struct rb_fields {
struct RBasic basic;
union {
struct {
@@ -276,20 +276,22 @@ struct rb_class_fields {
};
#define OBJ_FIELD_EXTERNAL IMEMO_FL_USER0
-#define IMEMO_OBJ_FIELDS(fields) ((struct rb_class_fields *)fields)
+#define IMEMO_OBJ_FIELDS(fields) ((struct rb_fields *)fields)
-VALUE rb_imemo_class_fields_new(VALUE klass, size_t capa);
-VALUE rb_imemo_class_fields_new_complex(VALUE klass, size_t capa);
-VALUE rb_imemo_class_fields_clone(VALUE fields_obj);
+VALUE rb_imemo_fields_new(VALUE klass, size_t capa);
+VALUE rb_imemo_fields_new_complex(VALUE klass, size_t capa);
+VALUE rb_imemo_fields_new_complex_tbl(VALUE klass, st_table *tbl);
+VALUE rb_imemo_fields_clone(VALUE fields_obj);
+void rb_imemo_fields_clear(VALUE fields_obj);
static inline VALUE *
-rb_imemo_class_fields_ptr(VALUE obj_fields)
+rb_imemo_fields_ptr(VALUE obj_fields)
{
if (!obj_fields) {
return NULL;
}
- RUBY_ASSERT(IMEMO_TYPE_P(obj_fields, imemo_class_fields));
+ RUBY_ASSERT(IMEMO_TYPE_P(obj_fields, imemo_fields));
if (RB_UNLIKELY(FL_TEST_RAW(obj_fields, OBJ_FIELD_EXTERNAL))) {
return IMEMO_OBJ_FIELDS(obj_fields)->as.external.ptr;
@@ -300,13 +302,13 @@ rb_imemo_class_fields_ptr(VALUE obj_fields)
}
static inline st_table *
-rb_imemo_class_fields_complex_tbl(VALUE obj_fields)
+rb_imemo_fields_complex_tbl(VALUE obj_fields)
{
if (!obj_fields) {
return NULL;
}
- RUBY_ASSERT(IMEMO_TYPE_P(obj_fields, imemo_class_fields));
+ RUBY_ASSERT(IMEMO_TYPE_P(obj_fields, imemo_fields));
return IMEMO_OBJ_FIELDS(obj_fields)->as.complex.table;
}
diff --git a/internal/variable.h b/internal/variable.h
index 8da6c678a5..92017d6184 100644
--- a/internal/variable.h
+++ b/internal/variable.h
@@ -18,7 +18,6 @@
/* variable.c */
void rb_gc_mark_global_tbl(void);
void rb_gc_update_global_tbl(void);
-size_t rb_generic_ivar_memsize(VALUE);
VALUE rb_search_class_path(VALUE);
VALUE rb_attr_delete(VALUE, ID);
void rb_autoload_str(VALUE mod, ID id, VALUE file);
@@ -47,8 +46,7 @@ void rb_gvar_namespace_ready(const char *name);
*/
VALUE rb_mod_set_temporary_name(VALUE, VALUE);
-struct gen_fields_tbl;
-int rb_gen_fields_tbl_get(VALUE obj, ID id, struct gen_fields_tbl **fields_tbl);
+int rb_gen_fields_tbl_get(VALUE obj, ID id, VALUE *fields_obj);
void rb_obj_copy_ivs_to_hash_table(VALUE obj, st_table *table);
void rb_obj_init_too_complex(VALUE obj, st_table *table);
void rb_evict_ivars_to_hash(VALUE obj);
diff --git a/iseq.c b/iseq.c
index dcde27ba1b..1201b877ab 100644
--- a/iseq.c
+++ b/iseq.c
@@ -602,11 +602,11 @@ set_relation(rb_iseq_t *iseq, const rb_iseq_t *piseq)
body->local_iseq = iseq;
}
else if (piseq) {
- body->local_iseq = ISEQ_BODY(piseq)->local_iseq;
+ RB_OBJ_WRITE(iseq, &body->local_iseq, ISEQ_BODY(piseq)->local_iseq);
}
if (piseq) {
- body->parent_iseq = piseq;
+ RB_OBJ_WRITE(iseq, &body->parent_iseq, piseq);
}
if (type == ISEQ_TYPE_MAIN) {
diff --git a/lib/tempfile.rb b/lib/tempfile.rb
index f3213c5684..7292e72c25 100644
--- a/lib/tempfile.rb
+++ b/lib/tempfile.rb
@@ -29,7 +29,7 @@ require 'tmpdir'
# require 'tempfile'
#
# # Tempfile.create with a block
-# # The filename are choosen automatically.
+# # The filename are chosen automatically.
# # (You can specify the prefix and suffix of the filename by an optional argument.)
# Tempfile.create {|f|
# f.puts "foo"
diff --git a/marshal.c b/marshal.c
index 55b3bf156a..7db4bfc6d9 100644
--- a/marshal.c
+++ b/marshal.c
@@ -145,12 +145,14 @@ rb_marshal_define_compat(VALUE newclass, VALUE oldclass, VALUE (*dumper)(VALUE),
compat_allocator_table();
compat = ALLOC(marshal_compat_t);
- RB_OBJ_WRITE(compat_allocator_tbl_wrapper, &compat->newclass, newclass);
- RB_OBJ_WRITE(compat_allocator_tbl_wrapper, &compat->oldclass, oldclass);
+ compat->newclass = newclass;
+ compat->oldclass = oldclass;
compat->dumper = dumper;
compat->loader = loader;
st_insert(compat_allocator_table(), (st_data_t)allocator, (st_data_t)compat);
+ RB_OBJ_WRITTEN(compat_allocator_tbl_wrapper, Qundef, newclass);
+ RB_OBJ_WRITTEN(compat_allocator_tbl_wrapper, Qundef, oldclass);
}
struct dump_arg {
diff --git a/prism_compile.c b/prism_compile.c
index 2ae6c1db9e..05697ff5cf 100644
--- a/prism_compile.c
+++ b/prism_compile.c
@@ -3497,7 +3497,7 @@ pm_compile_builtin_mandatory_only_method(rb_iseq_t *iseq, pm_scope_node_t *scope
pm_scope_node_init(&def.base, &next_scope_node, scope_node);
int error_state;
- ISEQ_BODY(iseq)->mandatory_only_iseq = pm_iseq_new_with_opt(
+ const rb_iseq_t *mandatory_only_iseq = pm_iseq_new_with_opt(
&next_scope_node,
rb_iseq_base_label(iseq),
rb_iseq_path(iseq),
@@ -3509,6 +3509,7 @@ pm_compile_builtin_mandatory_only_method(rb_iseq_t *iseq, pm_scope_node_t *scope
ISEQ_COMPILE_DATA(iseq)->option,
&error_state
);
+ RB_OBJ_WRITE(iseq, &ISEQ_BODY(iseq)->mandatory_only_iseq, (VALUE)mandatory_only_iseq);
if (error_state) {
RUBY_ASSERT(ISEQ_BODY(iseq)->mandatory_only_iseq == NULL);
diff --git a/ractor.c b/ractor.c
index cce376c543..317b24dca2 100644
--- a/ractor.c
+++ b/ractor.c
@@ -1188,6 +1188,7 @@ obj_traverse_i(VALUE obj, struct obj_traverse_data *data)
// already traversed
return 0;
}
+ RB_OBJ_WRITTEN(data->rec_hash, Qundef, obj);
struct obj_traverse_callback_data d = {
.stop = false,
@@ -1644,6 +1645,8 @@ obj_traverse_replace_i(VALUE obj, struct obj_traverse_replace_data *data)
}
else {
st_insert(obj_traverse_replace_rec(data), (st_data_t)obj, replacement);
+ RB_OBJ_WRITTEN(data->rec_hash, Qundef, obj);
+ RB_OBJ_WRITTEN(data->rec_hash, Qundef, replacement);
}
if (!data->move) {
@@ -1657,8 +1660,8 @@ obj_traverse_replace_i(VALUE obj, struct obj_traverse_replace_data *data)
} while (0)
if (UNLIKELY(rb_obj_exivar_p(obj))) {
- struct gen_fields_tbl *fields_tbl;
- rb_ivar_generic_fields_tbl_lookup(obj, &fields_tbl);
+ VALUE fields_obj;
+ rb_ivar_generic_fields_tbl_lookup(obj, &fields_obj);
if (UNLIKELY(rb_shape_obj_too_complex_p(obj))) {
struct obj_traverse_replace_callback_data d = {
@@ -1667,7 +1670,7 @@ obj_traverse_replace_i(VALUE obj, struct obj_traverse_replace_data *data)
.src = obj,
};
rb_st_foreach_with_replace(
- fields_tbl->as.complex.table,
+ rb_imemo_fields_complex_tbl(fields_obj),
obj_iv_hash_traverse_replace_foreach_i,
obj_iv_hash_traverse_replace_i,
(st_data_t)&d
@@ -1676,8 +1679,9 @@ obj_traverse_replace_i(VALUE obj, struct obj_traverse_replace_data *data)
}
else {
uint32_t fields_count = RSHAPE_LEN(RBASIC_SHAPE_ID(obj));
+ VALUE *fields = rb_imemo_fields_ptr(fields_obj);
for (uint32_t i = 0; i < fields_count; i++) {
- CHECK_AND_REPLACE(fields_tbl->as.shape.fields[i]);
+ CHECK_AND_REPLACE(fields[i]);
}
}
}
@@ -1881,6 +1885,9 @@ move_leave(VALUE obj, struct obj_traverse_replace_data *data)
rb_gc_obj_slot_size(obj) - sizeof(VALUE)
);
+ // We've copied obj's references to the replacement
+ rb_gc_writebarrier_remember(data->replacement);
+
void rb_replace_generic_ivar(VALUE clone, VALUE obj); // variable.c
rb_gc_obj_id_moved(data->replacement);
diff --git a/ractor_sync.c b/ractor_sync.c
index 204c800a06..30c386663c 100644
--- a/ractor_sync.c
+++ b/ractor_sync.c
@@ -81,6 +81,7 @@ ractor_port_init(VALUE rpv, rb_ractor_t *r)
struct ractor_port *rp = RACTOR_PORT_PTR(rpv);
rp->r = r;
+ RB_OBJ_WRITTEN(rpv, Qundef, r->pub.self);
rp->id_ = ractor_genid_for_port(r);
ractor_add_port(r, ractor_port_id(rp));
@@ -102,6 +103,7 @@ ractor_port_initialzie_copy(VALUE self, VALUE orig)
struct ractor_port *dst = RACTOR_PORT_PTR(self);
struct ractor_port *src = RACTOR_PORT_PTR(orig);
dst->r = src->r;
+ RB_OBJ_WRITTEN(self, Qundef, dst->r->pub.self);
dst->id_ = ractor_port_id(src);
return self;
diff --git a/shape.c b/shape.c
index 20153b1c98..50cf8dcc0d 100644
--- a/shape.c
+++ b/shape.c
@@ -877,7 +877,7 @@ shape_get_next(rb_shape_t *shape, VALUE obj, ID id, bool emit_warnings)
#endif
VALUE klass;
- if (IMEMO_TYPE_P(obj, imemo_class_fields)) { // HACK
+ if (IMEMO_TYPE_P(obj, imemo_fields)) { // HACK
klass = CLASS_OF(obj);
}
else {
diff --git a/shape.h b/shape.h
index b23fda4e29..c6eb1981d0 100644
--- a/shape.h
+++ b/shape.h
@@ -111,7 +111,7 @@ static inline shape_id_t
RBASIC_SHAPE_ID(VALUE obj)
{
RUBY_ASSERT(!RB_SPECIAL_CONST_P(obj));
- RUBY_ASSERT(!RB_TYPE_P(obj, T_IMEMO) || IMEMO_TYPE_P(obj, imemo_class_fields));
+ RUBY_ASSERT(!RB_TYPE_P(obj, T_IMEMO) || IMEMO_TYPE_P(obj, imemo_fields));
#if RBASIC_SHAPE_ID_FIELD
return (shape_id_t)((RBASIC(obj)->shape_id));
#else
@@ -135,7 +135,7 @@ static inline void
RBASIC_SET_SHAPE_ID(VALUE obj, shape_id_t shape_id)
{
RUBY_ASSERT(!RB_SPECIAL_CONST_P(obj));
- RUBY_ASSERT(!RB_TYPE_P(obj, T_IMEMO) || IMEMO_TYPE_P(obj, imemo_class_fields));
+ RUBY_ASSERT(!RB_TYPE_P(obj, T_IMEMO) || IMEMO_TYPE_P(obj, imemo_fields));
#if RBASIC_SHAPE_ID_FIELD
RBASIC(obj)->shape_id = (VALUE)shape_id;
#else
diff --git a/spec/ruby/core/kernel/caller_locations_spec.rb b/spec/ruby/core/kernel/caller_locations_spec.rb
index aaacd9a910..6074879d59 100644
--- a/spec/ruby/core/kernel/caller_locations_spec.rb
+++ b/spec/ruby/core/kernel/caller_locations_spec.rb
@@ -83,7 +83,7 @@ describe 'Kernel#caller_locations' do
end
end
- ruby_version_is "3.4" do
+ ruby_version_is "3.4"..."3.5" do
it "includes core library methods defined in Ruby" do
file, line = Kernel.instance_method(:tap).source_location
file.should.start_with?('<internal:')
@@ -94,5 +94,17 @@ describe 'Kernel#caller_locations' do
loc.path.should.start_with? "<internal:"
end
end
+
+ ruby_version_is "3.5" do
+ it "does not include core library methods defined in Ruby" do
+ file, line = Kernel.instance_method(:tap).source_location
+ file.should.start_with?('<internal:')
+
+ loc = nil
+ tap { loc = caller_locations(1, 1)[0] }
+ loc.label.should == "Kernel#tap"
+ loc.path.should == __FILE__
+ end
+ end
end
end
diff --git a/spec/ruby/core/kernel/caller_spec.rb b/spec/ruby/core/kernel/caller_spec.rb
index 33c7929a31..4bf9f7c2c2 100644
--- a/spec/ruby/core/kernel/caller_spec.rb
+++ b/spec/ruby/core/kernel/caller_spec.rb
@@ -84,13 +84,26 @@ describe 'Kernel#caller' do
end
guard -> { Kernel.instance_method(:tap).source_location } do
- it "includes core library methods defined in Ruby" do
- file, line = Kernel.instance_method(:tap).source_location
- file.should.start_with?('<internal:')
+ ruby_version_is ""..."3.5" do
+ it "includes core library methods defined in Ruby" do
+ file, line = Kernel.instance_method(:tap).source_location
+ file.should.start_with?('<internal:')
+
+ loc = nil
+ tap { loc = caller(1, 1)[0] }
+ loc.should =~ /\A<internal:.*in [`'](?:Kernel#)?tap'\z/
+ end
+ end
+
+ ruby_version_is "3.5" do
+ it "includes core library methods defined in Ruby" do
+ file, line = Kernel.instance_method(:tap).source_location
+ file.should.start_with?('<internal:')
- loc = nil
- tap { loc = caller(1, 1)[0] }
- loc.should =~ /\A<internal:.*in [`'](?:Kernel#)?tap'\z/
+ loc = nil
+ tap { loc = caller(1, 1)[0] }
+ loc.should =~ /\A#{ __FILE__ }:.*in [`'](?:Kernel#)?tap'\z/
+ end
end
end
end
diff --git a/test/prism/lex_test.rb b/test/prism/lex_test.rb
index 2786c45a22..d34c3d9dd3 100644
--- a/test/prism/lex_test.rb
+++ b/test/prism/lex_test.rb
@@ -17,7 +17,7 @@ module Prism
"spanning_heredoc.txt",
"spanning_heredoc_newlines.txt",
# Prism emits a single :on_tstring_content in <<- style heredocs when there
- # is a line continuation preceeded by escaped backslashes. It should emit two, same
+ # is a line continuation preceded by escaped backslashes. It should emit two, same
# as if the backslashes are not present.
"heredocs_with_fake_newlines.txt",
]
diff --git a/test/ruby/test_backtrace.rb b/test/ruby/test_backtrace.rb
index fca7b62030..01a757f827 100644
--- a/test/ruby/test_backtrace.rb
+++ b/test/ruby/test_backtrace.rb
@@ -454,4 +454,10 @@ class TestBacktrace < Test::Unit::TestCase
foo::Bar.baz
end;
end
+
+ def test_backtrace_internal_frame
+ backtrace = tap { break caller_locations(0) }
+ assert_equal(__FILE__, backtrace[1].path) # not "<internal:kernel>"
+ assert_equal("Kernel#tap", backtrace[1].label)
+ end
end
diff --git a/test/ruby/test_encoding.rb b/test/ruby/test_encoding.rb
index ee37199be0..0ab357f53a 100644
--- a/test/ruby/test_encoding.rb
+++ b/test/ruby/test_encoding.rb
@@ -33,7 +33,7 @@ class TestEncoding < Test::Unit::TestCase
encodings.each do |e|
assert_raise(TypeError) { e.dup }
assert_raise(TypeError) { e.clone }
- assert_equal(e.object_id, Marshal.load(Marshal.dump(e)).object_id)
+ assert_same(e, Marshal.load(Marshal.dump(e)))
end
end
diff --git a/test/ruby/test_zjit.rb b/test/ruby/test_zjit.rb
index d7249053e5..2b171b02b1 100644
--- a/test/ruby/test_zjit.rb
+++ b/test/ruby/test_zjit.rb
@@ -295,6 +295,34 @@ class TestZJIT < Test::Unit::TestCase
}, insns: [:opt_ge], call_threshold: 2
end
+ def test_opt_hash_freeze
+ assert_compiles '{}', <<~RUBY, insns: [:opt_hash_freeze]
+ def test = {}.freeze
+ test
+ RUBY
+ end
+
+ def test_opt_ary_freeze
+ assert_compiles '[]', <<~RUBY, insns: [:opt_ary_freeze]
+ def test = [].freeze
+ test
+ RUBY
+ end
+
+ def test_opt_str_freeze
+ assert_compiles '""', <<~RUBY, insns: [:opt_str_freeze]
+ def test = "".freeze
+ test
+ RUBY
+ end
+
+ def test_opt_str_uminus
+ assert_compiles '""', <<~RUBY, insns: [:opt_str_uminus]
+ def test = -""
+ test
+ RUBY
+ end
+
def test_new_array_empty
assert_compiles '[]', %q{
def test = []
@@ -650,6 +678,27 @@ class TestZJIT < Test::Unit::TestCase
}
end
+ def test_uncached_getconstant_path
+ assert_compiles RUBY_COPYRIGHT.dump, %q{
+ def test = RUBY_COPYRIGHT
+ test
+ }, call_threshold: 1, insns: [:opt_getconstant_path]
+ end
+
+ def test_getconstant_path_autoload
+ # A constant-referencing expression can run arbitrary code through Kernel#autoload.
+ Dir.mktmpdir('autoload') do |tmpdir|
+ autoload_path = File.join(tmpdir, 'test_getconstant_path_autoload.rb')
+ File.write(autoload_path, 'X = RUBY_COPYRIGHT')
+
+ assert_compiles RUBY_COPYRIGHT.dump, %Q{
+ Object.autoload(:X, #{File.realpath(autoload_path).inspect})
+ def test = X
+ test
+ }, call_threshold: 1, insns: [:opt_getconstant_path]
+ end
+ end
+
def test_send_backtrace
backtrace = [
"-e:2:in 'Object#jit_frame1'",
diff --git a/time.c b/time.c
index 0e91521db1..1eb8f8da9c 100644
--- a/time.c
+++ b/time.c
@@ -2307,14 +2307,14 @@ utc_offset_arg(VALUE arg)
static void
zone_set_offset(VALUE zone, struct time_object *tobj,
- wideval_t tlocal, wideval_t tutc)
+ wideval_t tlocal, wideval_t tutc, VALUE time)
{
/* tlocal and tutc must be unmagnified and in seconds */
wideval_t w = wsub(tlocal, tutc);
VALUE off = w2v(w);
validate_utc_offset(off);
- tobj->vtm.utc_offset = off;
- tobj->vtm.zone = zone;
+ RB_OBJ_WRITE(time, &tobj->vtm.utc_offset, off);
+ RB_OBJ_WRITE(time, &tobj->vtm.zone, zone);
TZMODE_SET_LOCALTIME(tobj);
}
@@ -2429,7 +2429,7 @@ zone_timelocal(VALUE zone, VALUE time)
if (UNDEF_P(utc)) return 0;
s = extract_time(utc);
- zone_set_offset(zone, tobj, t, s);
+ zone_set_offset(zone, tobj, t, s, time);
s = rb_time_magnify(s);
if (tobj->vtm.subsecx != INT2FIX(0)) {
s = wadd(s, v2w(tobj->vtm.subsecx));
@@ -2458,7 +2458,7 @@ zone_localtime(VALUE zone, VALUE time)
s = extract_vtm(local, time, tobj, subsecx);
tobj->vtm.tm_got = 1;
- zone_set_offset(zone, tobj, s, t);
+ zone_set_offset(zone, tobj, s, t, time);
zone_set_dst(zone, tobj, tm);
RB_GC_GUARD(time);
@@ -4088,7 +4088,9 @@ time_init_copy(VALUE copy, VALUE time)
if (!OBJ_INIT_COPY(copy, time)) return copy;
GetTimeval(time, tobj);
GetNewTimeval(copy, tcopy);
- MEMCPY(tcopy, tobj, struct time_object, 1);
+
+ time_set_timew(copy, tcopy, tobj->timew);
+ time_set_vtm(copy, tcopy, tobj->vtm);
return copy;
}
@@ -5752,7 +5754,7 @@ end_submicro: ;
}
if (!NIL_P(zone)) {
zone = mload_zone(time, zone);
- tobj->vtm.zone = zone;
+ RB_OBJ_WRITE(time, &tobj->vtm.zone, zone);
zone_localtime(zone, time);
}
@@ -5798,8 +5800,10 @@ tm_from_time(VALUE klass, VALUE time)
tm = time_s_alloc(klass);
ttm = RTYPEDDATA_GET_DATA(tm);
v = &vtm;
- GMTIMEW(ttm->timew = tobj->timew, v);
- ttm->timew = wsub(ttm->timew, v->subsecx);
+
+ WIDEVALUE timew = tobj->timew;
+ GMTIMEW(timew, v);
+ time_set_timew(tm, ttm, wsub(timew, v->subsecx));
v->subsecx = INT2FIX(0);
v->zone = Qnil;
time_set_vtm(tm, ttm, *v);
diff --git a/tool/auto-style.rb b/tool/auto-style.rb
index 25055ace7d..0c6ce6848a 100755
--- a/tool/auto-style.rb
+++ b/tool/auto-style.rb
@@ -233,10 +233,10 @@ edited_files = files.select do |f|
if File.fnmatch?("*.[ch]", f, File::FNM_PATHNAME) &&
!DIFFERENT_STYLE_FILES.any? {|pat| File.fnmatch?(pat, f, File::FNM_PATHNAME)}
- src.gsub!(/^\w+\([^(\n)]*?\)\K[ \t]*(?=\{$)/, "\n")
- src.gsub!(/^([ \t]*)\}\K[ \t]*(?=else\b)/, "\n" '\1')
- src.gsub!(/^[ \t]*\}\n\K\n+(?=[ \t]*else\b)/, '')
- indent = indent0 = true
+ indent0 = true if src.gsub!(/^\w+\([^(\n)]*?\)\K[ \t]*(?=\{$)/, "\n")
+ indent0 = true if src.gsub!(/^([ \t]*)\}\K[ \t]*(?=else\b)/, "\n" '\1')
+ indent0 = true if src.gsub!(/^[ \t]*\}\n\K\n+(?=[ \t]*else\b)/, '')
+ indent ||= indent0
end
if trailing0 or eofnewline0 or expandtab0 or indent0
diff --git a/tool/lib/_tmpdir.rb b/tool/lib/_tmpdir.rb
index fd429dab37..daa1a1f235 100644
--- a/tool/lib/_tmpdir.rb
+++ b/tool/lib/_tmpdir.rb
@@ -4,11 +4,11 @@ template = "rubytest."
# Assume the directory by these environment variables are safe.
base = [ENV["TMPDIR"], ENV["TMP"], "/tmp"].find do |tmp|
next unless tmp and tmp.size <= 50 and File.directory?(tmp)
- # On macOS, the default TMPDIR is very long, inspite of UNIX socket
- # path length is limited.
+ # On macOS, the default TMPDIR is very long, in spite of UNIX socket
+ # path length being limited.
#
# Also Rubygems creates its own temporary directory per tests, and
- # some tests copy the full path of gemhome there. In that caes, the
+ # some tests copy the full path of gemhome there. In that case, the
# path contains both temporary names twice, and can exceed path name
# limit very easily.
tmp
diff --git a/tool/test/testunit/test_parallel.rb b/tool/test/testunit/test_parallel.rb
index a0cbca69eb..d87e0ed327 100644
--- a/tool/test/testunit/test_parallel.rb
+++ b/tool/test/testunit/test_parallel.rb
@@ -126,19 +126,19 @@ module TestParallel
assert_not_nil($1, "'done' was not found")
result = Marshal.load($1.chomp.unpack1("m"))
- assert_equal(5, result[0])
- pend "TODO: result[1] returns 17. We should investigate it" do # TODO: misusage of pend (pend doens't use given block)
- assert_equal(12, result[1])
- end
- assert_kind_of(Array,result[2])
- assert_kind_of(Array,result[3])
- assert_kind_of(Array,result[4])
- assert_kind_of(Array,result[2][1])
- assert_kind_of(Test::Unit::AssertionFailedError,result[2][0][2])
- assert_kind_of(Test::Unit::PendedError,result[2][1][2])
- assert_kind_of(Test::Unit::PendedError,result[2][2][2])
- assert_kind_of(Exception, result[2][3][2])
- assert_equal(result[5], "TestE")
+ tests, asserts, reports, failures, loadpaths, suite = result
+ assert_equal(5, tests)
+ assert_equal(12, asserts)
+ assert_kind_of(Array, reports)
+ assert_kind_of(Array, failures)
+ assert_kind_of(Array, loadpaths)
+ reports.sort_by! {|_, t| t}
+ assert_kind_of(Array, reports[1])
+ assert_kind_of(Test::Unit::AssertionFailedError, reports[0][2])
+ assert_kind_of(Test::Unit::PendedError, reports[1][2])
+ assert_kind_of(Test::Unit::PendedError, reports[2][2])
+ assert_kind_of(Exception, reports[3][2])
+ assert_equal("TestE", suite)
end
end
diff --git a/tool/test/testunit/tests_for_parallel/ptest_forth.rb b/tool/test/testunit/tests_for_parallel/ptest_forth.rb
index 8831676e19..54474c828d 100644
--- a/tool/test/testunit/tests_for_parallel/ptest_forth.rb
+++ b/tool/test/testunit/tests_for_parallel/ptest_forth.rb
@@ -8,19 +8,19 @@ class TestE < Test::Unit::TestCase
assert_equal(1,1)
end
- def test_always_skip
- skip "always"
+ def test_always_omit
+ omit "always"
end
def test_always_fail
assert_equal(0,1)
end
- def test_skip_after_unknown_error
+ def test_pend_after_unknown_error
begin
raise UnknownError, "unknown error"
rescue
- skip "after raise"
+ pend "after raise"
end
end
diff --git a/variable.c b/variable.c
index a2f8c17b47..e535aefe27 100644
--- a/variable.c
+++ b/variable.c
@@ -1197,8 +1197,31 @@ rb_generic_fields_tbl_get(void)
return generic_fields_tbl_;
}
+static inline VALUE
+generic_fields_lookup(VALUE obj, ID id, bool force_check_ractor)
+{
+ VALUE fields_obj = Qfalse;
+ RB_VM_LOCKING() {
+ st_table *generic_tbl = generic_fields_tbl(obj, id, false);
+ st_lookup(generic_tbl, obj, (st_data_t *)&fields_obj);
+ }
+ return fields_obj;
+}
+
+static inline void
+generic_fields_insert(VALUE obj, VALUE fields_obj)
+{
+ RUBY_ASSERT(IMEMO_TYPE_P(fields_obj, imemo_fields));
+
+ RB_VM_LOCKING() {
+ st_table *generic_tbl = generic_fields_tbl_no_ractor_check(obj);
+ st_insert(generic_tbl, obj, fields_obj);
+ }
+ RB_OBJ_WRITTEN(obj, Qundef, fields_obj);
+}
+
int
-rb_gen_fields_tbl_get(VALUE obj, ID id, struct gen_fields_tbl **fields_tbl)
+rb_gen_fields_tbl_get(VALUE obj, ID id, VALUE *fields_obj)
{
RUBY_ASSERT(!RB_TYPE_P(obj, T_ICLASS));
@@ -1207,7 +1230,7 @@ rb_gen_fields_tbl_get(VALUE obj, ID id, struct gen_fields_tbl **fields_tbl)
RB_VM_LOCKING() {
if (st_lookup(generic_fields_tbl(obj, id, false), (st_data_t)obj, &data)) {
- *fields_tbl = (struct gen_fields_tbl *)data;
+ *fields_obj = (VALUE)data;
r = 1;
}
}
@@ -1216,33 +1239,17 @@ rb_gen_fields_tbl_get(VALUE obj, ID id, struct gen_fields_tbl **fields_tbl)
}
int
-rb_ivar_generic_fields_tbl_lookup(VALUE obj, struct gen_fields_tbl **fields_tbl)
-{
- return rb_gen_fields_tbl_get(obj, 0, fields_tbl);
-}
-
-static size_t
-gen_fields_tbl_bytes(size_t n)
+rb_ivar_generic_fields_tbl_lookup(VALUE obj, VALUE *fields_obj)
{
- return offsetof(struct gen_fields_tbl, as.shape.fields) + n * sizeof(VALUE);
+ return rb_gen_fields_tbl_get(obj, 0, fields_obj);
}
-
void
rb_mark_generic_ivar(VALUE obj)
{
- st_data_t data;
- if (st_lookup(generic_fields_tbl_no_ractor_check(obj), (st_data_t)obj, &data)) {
- struct gen_fields_tbl *fields_tbl = (struct gen_fields_tbl *)data;
- if (rb_shape_obj_too_complex_p(obj)) {
- rb_mark_tbl_no_pin(fields_tbl->as.complex.table);
- }
- else {
- uint32_t fields_count = RSHAPE_LEN(RBASIC_SHAPE_ID(obj));
- for (uint32_t i = 0; i < fields_count; i++) {
- rb_gc_mark_movable(fields_tbl->as.shape.fields[i]);
- }
- }
+ VALUE data;
+ if (st_lookup(generic_fields_tbl_no_ractor_check(obj), (st_data_t)obj, (st_data_t *)&data)) {
+ rb_gc_mark_movable(data);
}
}
@@ -1252,48 +1259,10 @@ rb_free_generic_ivar(VALUE obj)
if (rb_obj_exivar_p(obj)) {
st_data_t key = (st_data_t)obj, value;
- bool too_complex = rb_shape_obj_too_complex_p(obj);
-
RB_VM_LOCKING() {
- if (st_delete(generic_fields_tbl_no_ractor_check(obj), &key, &value)) {
- struct gen_fields_tbl *fields_tbl = (struct gen_fields_tbl *)value;
-
- if (UNLIKELY(too_complex)) {
- st_free_table(fields_tbl->as.complex.table);
- }
-
- xfree(fields_tbl);
- }
- }
- RBASIC_SET_SHAPE_ID(obj, ROOT_SHAPE_ID);
- }
-}
-
-size_t
-rb_generic_ivar_memsize(VALUE obj)
-{
- struct gen_fields_tbl *fields_tbl;
-
- if (rb_gen_fields_tbl_get(obj, 0, &fields_tbl)) {
- if (rb_shape_obj_too_complex_p(obj)) {
- return sizeof(struct gen_fields_tbl) + st_memsize(fields_tbl->as.complex.table);
- }
- else {
- return gen_fields_tbl_bytes(RSHAPE_CAPACITY(RBASIC_SHAPE_ID(obj)));
+ st_delete(generic_fields_tbl_no_ractor_check(obj), &key, &value);
}
}
- return 0;
-}
-
-static size_t
-gen_fields_tbl_count(VALUE obj, const struct gen_fields_tbl *fields_tbl)
-{
- if (rb_shape_obj_too_complex_p(obj)) {
- return st_table_size(fields_tbl->as.complex.table);
- }
- else {
- return RSHAPE_LEN(RBASIC_SHAPE_ID(obj));
- }
}
VALUE
@@ -1321,12 +1290,16 @@ rb_obj_field_get(VALUE obj, shape_id_t target_shape_id)
case T_OBJECT:
fields_hash = ROBJECT_FIELDS_HASH(obj);
break;
+ case T_IMEMO:
+ RUBY_ASSERT(IMEMO_TYPE_P(obj, imemo_fields));
+ fields_hash = rb_imemo_fields_complex_tbl(obj);
+ break;
default:
RUBY_ASSERT(rb_obj_exivar_p(obj));
- struct gen_fields_tbl *fields_tbl = NULL;
- rb_ivar_generic_fields_tbl_lookup(obj, &fields_tbl);
- RUBY_ASSERT(fields_tbl);
- fields_hash = fields_tbl->as.complex.table;
+ VALUE fields_obj = 0;
+ rb_ivar_generic_fields_tbl_lookup(obj, &fields_obj);
+ RUBY_ASSERT(fields_obj);
+ fields_hash = rb_imemo_fields_complex_tbl(fields_obj);
break;
}
VALUE value = Qundef;
@@ -1352,12 +1325,16 @@ rb_obj_field_get(VALUE obj, shape_id_t target_shape_id)
case T_OBJECT:
fields = ROBJECT_FIELDS(obj);
break;
+ case T_IMEMO:
+ RUBY_ASSERT(IMEMO_TYPE_P(obj, imemo_fields));
+ fields = rb_imemo_fields_ptr(obj);
+ break;
default:
RUBY_ASSERT(rb_obj_exivar_p(obj));
- struct gen_fields_tbl *fields_tbl = NULL;
- rb_ivar_generic_fields_tbl_lookup(obj, &fields_tbl);
- RUBY_ASSERT(fields_tbl);
- fields = fields_tbl->as.shape.fields;
+ VALUE fields_obj = 0;
+ rb_ivar_generic_fields_tbl_lookup(obj, &fields_obj);
+ RUBY_ASSERT(fields_obj);
+ fields = rb_imemo_fields_ptr(fields_obj);
break;
}
return fields[attr_index];
@@ -1393,11 +1370,11 @@ rb_ivar_lookup(VALUE obj, ID id, VALUE undef)
case T_IMEMO:
// Handled like T_OBJECT
{
- RUBY_ASSERT(IMEMO_TYPE_P(obj, imemo_class_fields));
+ RUBY_ASSERT(IMEMO_TYPE_P(obj, imemo_fields));
shape_id = RBASIC_SHAPE_ID(obj);
if (rb_shape_too_complex_p(shape_id)) {
- st_table *iv_table = rb_imemo_class_fields_complex_tbl(obj);
+ st_table *iv_table = rb_imemo_fields_complex_tbl(obj);
VALUE val;
if (rb_st_lookup(iv_table, (st_data_t)id, (st_data_t *)&val)) {
return val;
@@ -1408,7 +1385,7 @@ rb_ivar_lookup(VALUE obj, ID id, VALUE undef)
}
RUBY_ASSERT(!rb_shape_obj_too_complex_p(obj));
- ivar_list = rb_imemo_class_fields_ptr(obj);
+ ivar_list = rb_imemo_fields_ptr(obj);
break;
}
case T_OBJECT:
@@ -1432,19 +1409,21 @@ rb_ivar_lookup(VALUE obj, ID id, VALUE undef)
default:
shape_id = RBASIC_SHAPE_ID(obj);
if (rb_obj_exivar_p(obj)) {
- struct gen_fields_tbl *fields_tbl;
- rb_gen_fields_tbl_get(obj, id, &fields_tbl);
+ VALUE fields_obj = 0;
+ rb_gen_fields_tbl_get(obj, id, &fields_obj);
- if (rb_shape_obj_too_complex_p(obj)) {
+ RUBY_ASSERT(fields_obj);
+
+ if (rb_shape_obj_too_complex_p(fields_obj)) {
VALUE val;
- if (rb_st_lookup(fields_tbl->as.complex.table, (st_data_t)id, (st_data_t *)&val)) {
+ if (rb_st_lookup(rb_imemo_fields_complex_tbl(fields_obj), (st_data_t)id, (st_data_t *)&val)) {
return val;
}
else {
return undef;
}
}
- ivar_list = fields_tbl->as.shape.fields;
+ ivar_list = rb_imemo_fields_ptr(fields_obj);
}
else {
return undef;
@@ -1486,7 +1465,7 @@ rb_ivar_delete(VALUE obj, ID id, VALUE undef)
VALUE fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
if (fields_obj) {
if (rb_multi_ractor_p()) {
- fields_obj = rb_imemo_class_fields_clone(fields_obj);
+ fields_obj = rb_imemo_fields_clone(fields_obj);
val = rb_ivar_delete(fields_obj, id, undef);
RCLASS_WRITABLE_SET_FIELDS_OBJ(obj, fields_obj);
}
@@ -1523,16 +1502,16 @@ rb_ivar_delete(VALUE obj, ID id, VALUE undef)
rb_bug("Unreachable");
break;
case T_IMEMO:
- RUBY_ASSERT(IMEMO_TYPE_P(obj, imemo_class_fields));
- fields = rb_imemo_class_fields_ptr(obj);
+ RUBY_ASSERT(IMEMO_TYPE_P(obj, imemo_fields));
+ fields = rb_imemo_fields_ptr(obj);
break;
case T_OBJECT:
fields = ROBJECT_FIELDS(obj);
break;
default: {
- struct gen_fields_tbl *fields_tbl;
- rb_gen_fields_tbl_get(obj, id, &fields_tbl);
- fields = fields_tbl->as.shape.fields;
+ VALUE fields_obj;
+ rb_gen_fields_tbl_get(obj, id, &fields_obj);
+ fields = rb_imemo_fields_ptr(fields_obj);
break;
}
}
@@ -1576,8 +1555,8 @@ too_complex:
break;
case T_IMEMO:
- RUBY_ASSERT(IMEMO_TYPE_P(obj, imemo_class_fields));
- table = rb_imemo_class_fields_complex_tbl(obj);
+ RUBY_ASSERT(IMEMO_TYPE_P(obj, imemo_fields));
+ table = rb_imemo_fields_complex_tbl(obj);
break;
case T_OBJECT:
@@ -1585,9 +1564,9 @@ too_complex:
break;
default: {
- struct gen_fields_tbl *fields_tbl;
- if (rb_gen_fields_tbl_get(obj, 0, &fields_tbl)) {
- table = fields_tbl->as.complex.table;
+ VALUE fields_obj;
+ if (rb_gen_fields_tbl_get(obj, 0, &fields_obj)) {
+ table = rb_imemo_fields_complex_tbl(fields_obj);
}
break;
}
@@ -1609,6 +1588,8 @@ rb_attr_delete(VALUE obj, ID id)
return rb_ivar_delete(obj, id, Qnil);
}
+static inline void generic_update_fields_obj(VALUE obj, VALUE fields_obj, const VALUE original_fields_obj);
+
static shape_id_t
obj_transition_too_complex(VALUE obj, st_table *table)
{
@@ -1619,46 +1600,37 @@ obj_transition_too_complex(VALUE obj, st_table *table)
RUBY_ASSERT(!rb_shape_obj_too_complex_p(obj));
shape_id_t shape_id = rb_shape_transition_complex(obj);
- VALUE *old_fields = NULL;
-
switch (BUILTIN_TYPE(obj)) {
case T_OBJECT:
- if (!(RBASIC(obj)->flags & ROBJECT_EMBED)) {
- old_fields = ROBJECT_FIELDS(obj);
+ {
+ VALUE *old_fields = NULL;
+ if (!(RBASIC(obj)->flags & ROBJECT_EMBED)) {
+ old_fields = ROBJECT_FIELDS(obj);
+ }
+ RBASIC_SET_SHAPE_ID(obj, shape_id);
+ ROBJECT_SET_FIELDS_HASH(obj, table);
+ if (old_fields) {
+ xfree(old_fields);
+ }
}
- RBASIC_SET_SHAPE_ID(obj, shape_id);
- ROBJECT_SET_FIELDS_HASH(obj, table);
break;
case T_CLASS:
case T_MODULE:
rb_bug("Unreachable");
break;
default:
- RB_VM_LOCKING() {
- struct st_table *gen_ivs = generic_fields_tbl_no_ractor_check(obj);
-
- struct gen_fields_tbl *old_fields_tbl = NULL;
- st_lookup(gen_ivs, (st_data_t)obj, (st_data_t *)&old_fields_tbl);
-
- if (old_fields_tbl) {
- /* We need to modify old_fields_tbl to have the too complex shape
- * and hold the table because the xmalloc could trigger a GC
- * compaction. We want the table to be updated rather than
- * the original fields. */
- rb_obj_set_shape_id(obj, shape_id);
- old_fields_tbl->as.complex.table = table;
- old_fields = (VALUE *)old_fields_tbl;
- }
-
- struct gen_fields_tbl *fields_tbl = xmalloc(sizeof(struct gen_fields_tbl));
- fields_tbl->as.complex.table = table;
- st_insert(gen_ivs, (st_data_t)obj, (st_data_t)fields_tbl);
+ {
+ VALUE fields_obj = rb_imemo_fields_new_complex_tbl(rb_obj_class(obj), table);
+ RBASIC_SET_SHAPE_ID(fields_obj, shape_id);
+ RB_VM_LOCKING() {
+ const VALUE original_fields_obj = generic_fields_lookup(obj, 0, false);
+ generic_update_fields_obj(obj, fields_obj, original_fields_obj);
+ }
RBASIC_SET_SHAPE_ID(obj, shape_id);
}
}
- xfree(old_fields);
return shape_id;
}
@@ -1673,12 +1645,12 @@ rb_obj_init_too_complex(VALUE obj, st_table *table)
obj_transition_too_complex(obj, table);
}
+void rb_obj_copy_fields_to_hash_table(VALUE obj, st_table *table);
+
// Copy all object fields, including ivars and internal object_id, etc
shape_id_t
rb_evict_fields_to_hash(VALUE obj)
{
- void rb_obj_copy_fields_to_hash_table(VALUE obj, st_table *table);
-
RUBY_ASSERT(!rb_shape_obj_too_complex_p(obj));
st_table *table = st_init_numtable_with_size(RSHAPE_LEN(RBASIC_SHAPE_ID(obj)));
@@ -1809,135 +1781,174 @@ general_field_set(VALUE obj, shape_id_t target_shape_id, VALUE val, void *data,
}
}
-struct gen_fields_lookup_ensure_size {
- VALUE obj;
- ID id;
- shape_id_t shape_id;
- bool resize;
-};
+static inline void
+generic_update_fields_obj(VALUE obj, VALUE fields_obj, const VALUE original_fields_obj)
+{
+ if (fields_obj != original_fields_obj) {
+ if (original_fields_obj) {
+ // Clear root shape to avoid triggering cleanup such as free_object_id.
+ rb_imemo_fields_clear(original_fields_obj);
+ }
-static VALUE *
-generic_ivar_set_shape_fields(VALUE obj, void *data)
+ generic_fields_insert(obj, fields_obj);
+ }
+}
+
+static void
+generic_ivar_set(VALUE obj, ID id, VALUE val)
{
- RUBY_ASSERT(!rb_shape_obj_too_complex_p(obj));
+ bool existing = true;
- struct gen_fields_lookup_ensure_size *fields_lookup = data;
- struct gen_fields_tbl *fields_tbl = NULL;
+ VALUE fields_obj = generic_fields_lookup(obj, id, false);
- // We can't use st_update, since when resizing the fields table GC can
- // happen, which will modify the st_table and may rebuild it
- RB_VM_LOCKING() {
- st_table *tbl = generic_fields_tbl(obj, fields_lookup->id, false);
- int existing = st_lookup(tbl, (st_data_t)obj, (st_data_t *)&fields_tbl);
+ const VALUE original_fields_obj = fields_obj;
+ if (!fields_obj) {
+ fields_obj = rb_imemo_fields_new(rb_obj_class(obj), 1);
+ }
+ RUBY_ASSERT(RBASIC_SHAPE_ID(obj) == RBASIC_SHAPE_ID(fields_obj));
- if (!existing || fields_lookup->resize) {
- uint32_t new_capa = RSHAPE_CAPACITY(fields_lookup->shape_id);
- uint32_t old_capa = RSHAPE_CAPACITY(RSHAPE_PARENT(fields_lookup->shape_id));
+ shape_id_t current_shape_id = RBASIC_SHAPE_ID(fields_obj);
+ shape_id_t next_shape_id = current_shape_id;
- if (existing) {
- RUBY_ASSERT(RSHAPE_TYPE_P(fields_lookup->shape_id, SHAPE_IVAR) || RSHAPE_TYPE_P(fields_lookup->shape_id, SHAPE_OBJ_ID));
- RUBY_ASSERT(old_capa < new_capa);
- RUBY_ASSERT(fields_tbl);
- }
- else {
- RUBY_ASSERT(!fields_tbl);
- RUBY_ASSERT(old_capa == 0);
- }
- RUBY_ASSERT(new_capa > 0);
+ if (UNLIKELY(rb_shape_too_complex_p(current_shape_id))) {
+ goto too_complex;
+ }
- struct gen_fields_tbl *old_fields_tbl = fields_tbl;
- fields_tbl = xmalloc(gen_fields_tbl_bytes(new_capa));
- if (old_fields_tbl) {
- memcpy(fields_tbl, old_fields_tbl, gen_fields_tbl_bytes(old_capa));
- }
- st_insert(tbl, (st_data_t)obj, (st_data_t)fields_tbl);
- if (old_fields_tbl) {
- xfree(old_fields_tbl);
+ attr_index_t index;
+ if (!rb_shape_get_iv_index(current_shape_id, id, &index)) {
+ existing = false;
+
+ index = RSHAPE_LEN(current_shape_id);
+ if (index >= SHAPE_MAX_FIELDS) {
+ rb_raise(rb_eArgError, "too many instance variables");
+ }
+
+ next_shape_id = rb_shape_transition_add_ivar(fields_obj, id);
+ if (UNLIKELY(rb_shape_too_complex_p(next_shape_id))) {
+ attr_index_t current_len = RSHAPE_LEN(current_shape_id);
+ fields_obj = rb_imemo_fields_new_complex(rb_obj_class(obj), current_len + 1);
+ if (current_len) {
+ rb_obj_copy_fields_to_hash_table(original_fields_obj, rb_imemo_fields_complex_tbl(fields_obj));
}
+ RBASIC_SET_SHAPE_ID(fields_obj, next_shape_id);
+ goto too_complex;
}
- if (fields_lookup->shape_id) {
- rb_obj_set_shape_id(fields_lookup->obj, fields_lookup->shape_id);
+ attr_index_t next_capacity = RSHAPE_CAPACITY(next_shape_id);
+ attr_index_t current_capacity = RSHAPE_CAPACITY(current_shape_id);
+
+ if (next_capacity != current_capacity) {
+ RUBY_ASSERT(next_capacity > current_capacity);
+
+ fields_obj = rb_imemo_fields_new(rb_obj_class(obj), next_capacity);
+ if (original_fields_obj) {
+ attr_index_t fields_count = RSHAPE_LEN(current_shape_id);
+ VALUE *fields = rb_imemo_fields_ptr(fields_obj);
+ MEMCPY(fields, rb_imemo_fields_ptr(original_fields_obj), VALUE, fields_count);
+ for (attr_index_t i = 0; i < fields_count; i++) {
+ RB_OBJ_WRITTEN(fields_obj, Qundef, fields[i]);
+ }
+ }
}
+
+ RUBY_ASSERT(RSHAPE(next_shape_id)->type == SHAPE_IVAR);
+ RUBY_ASSERT(index == (RSHAPE_LEN(next_shape_id) - 1));
}
- return fields_tbl->as.shape.fields;
-}
+ VALUE *fields = rb_imemo_fields_ptr(fields_obj);
+ RB_OBJ_WRITE(fields_obj, &fields[index], val);
-static void
-generic_ivar_set_shape_resize_fields(VALUE obj, attr_index_t _old_capa, attr_index_t new_capa, void *data)
-{
- struct gen_fields_lookup_ensure_size *fields_lookup = data;
+ if (!existing) {
+ RBASIC_SET_SHAPE_ID(fields_obj, next_shape_id);
+ }
- fields_lookup->resize = true;
-}
+ generic_update_fields_obj(obj, fields_obj, original_fields_obj);
-static void
-generic_ivar_set_set_shape_id(VALUE obj, shape_id_t shape_id, void *data)
-{
- struct gen_fields_lookup_ensure_size *fields_lookup = data;
+ if (!existing) {
+ RBASIC_SET_SHAPE_ID(obj, next_shape_id);
+ }
- fields_lookup->shape_id = shape_id;
-}
+ RUBY_ASSERT(RBASIC_SHAPE_ID(obj) == RBASIC_SHAPE_ID(fields_obj));
-static shape_id_t
-generic_ivar_set_transition_too_complex(VALUE obj, void *_data)
-{
- shape_id_t new_shape_id = rb_evict_fields_to_hash(obj);
- return new_shape_id;
-}
+ return;
-static st_table *
-generic_ivar_set_too_complex_table(VALUE obj, void *data)
-{
- struct gen_fields_lookup_ensure_size *fields_lookup = data;
+too_complex:
+ {
+ st_table *table = rb_imemo_fields_complex_tbl(fields_obj);
+ existing = st_insert(table, (st_data_t)id, (st_data_t)val);
+ RB_OBJ_WRITTEN(fields_obj, Qundef, val);
- struct gen_fields_tbl *fields_tbl;
- if (!rb_gen_fields_tbl_get(obj, 0, &fields_tbl)) {
- fields_tbl = xmalloc(sizeof(struct gen_fields_tbl));
- fields_tbl->as.complex.table = st_init_numtable_with_size(1);
+ generic_update_fields_obj(obj, fields_obj, original_fields_obj);
- RB_VM_LOCKING() {
- st_insert(generic_fields_tbl(obj, fields_lookup->id, false), (st_data_t)obj, (st_data_t)fields_tbl);
+ if (!existing) {
+ RBASIC_SET_SHAPE_ID(obj, next_shape_id);
}
}
- RUBY_ASSERT(rb_shape_obj_too_complex_p(obj));
+ RUBY_ASSERT(RBASIC_SHAPE_ID(obj) == RBASIC_SHAPE_ID(fields_obj));
- return fields_tbl->as.complex.table;
+ return;
}
static void
-generic_ivar_set(VALUE obj, ID id, VALUE val)
+generic_field_set(VALUE obj, shape_id_t target_shape_id, VALUE val)
{
- struct gen_fields_lookup_ensure_size fields_lookup = {
- .obj = obj,
- .id = id,
- .resize = false,
- };
+ bool existing = true;
- general_ivar_set(obj, id, val, &fields_lookup,
- generic_ivar_set_shape_fields,
- generic_ivar_set_shape_resize_fields,
- generic_ivar_set_set_shape_id,
- generic_ivar_set_transition_too_complex,
- generic_ivar_set_too_complex_table);
-}
+ VALUE fields_obj = generic_fields_lookup(obj, RSHAPE_EDGE_NAME(target_shape_id), false);
+ const VALUE original_fields_obj = fields_obj;
-static void
-generic_field_set(VALUE obj, shape_id_t target_shape_id, VALUE val)
-{
- struct gen_fields_lookup_ensure_size fields_lookup = {
- .obj = obj,
- .resize = false,
- };
+ shape_id_t current_shape_id = fields_obj ? RBASIC_SHAPE_ID(fields_obj) : ROOT_SHAPE_ID;
+
+ if (UNLIKELY(rb_shape_too_complex_p(target_shape_id))) {
+ if (UNLIKELY(!rb_shape_too_complex_p(current_shape_id))) {
+ attr_index_t current_len = RSHAPE_LEN(current_shape_id);
+ fields_obj = rb_imemo_fields_new_complex(rb_obj_class(obj), current_len + 1);
+ if (current_len) {
+ rb_obj_copy_fields_to_hash_table(original_fields_obj, rb_imemo_fields_complex_tbl(fields_obj));
+ }
+
+ current_shape_id = target_shape_id;
+ }
+
+ existing = false;
+ st_table *table = rb_imemo_fields_complex_tbl(fields_obj);
+
+ RUBY_ASSERT(RSHAPE_EDGE_NAME(target_shape_id));
+ st_insert(table, (st_data_t)RSHAPE_EDGE_NAME(target_shape_id), (st_data_t)val);
+ RB_OBJ_WRITTEN(fields_obj, Qundef, val);
+ RBASIC_SET_SHAPE_ID(fields_obj, target_shape_id);
+ }
+ else {
+ attr_index_t index = RSHAPE_INDEX(target_shape_id);
+ if (index >= RSHAPE_CAPACITY(current_shape_id)) {
+ fields_obj = rb_imemo_fields_new(rb_obj_class(obj), index);
+ if (original_fields_obj) {
+ attr_index_t fields_count = RSHAPE_LEN(current_shape_id);
+ VALUE *fields = rb_imemo_fields_ptr(fields_obj);
+ MEMCPY(fields, rb_imemo_fields_ptr(original_fields_obj), VALUE, fields_count);
+ for (attr_index_t i = 0; i < fields_count; i++) {
+ RB_OBJ_WRITTEN(fields_obj, Qundef, fields[i]);
+ }
+ }
+ }
+
+ VALUE *table = rb_imemo_fields_ptr(fields_obj);
+ RB_OBJ_WRITE(fields_obj, &table[index], val);
+
+ if (RSHAPE_LEN(target_shape_id) > RSHAPE_LEN(current_shape_id)) {
+ existing = false;
+ RBASIC_SET_SHAPE_ID(fields_obj, target_shape_id);
+ }
+ }
+
+ generic_update_fields_obj(obj, fields_obj, original_fields_obj);
+
+ if (!existing) {
+ RBASIC_SET_SHAPE_ID(obj, target_shape_id);
+ }
- general_field_set(obj, target_shape_id, val, &fields_lookup,
- generic_ivar_set_shape_fields,
- generic_ivar_set_shape_resize_fields,
- generic_ivar_set_set_shape_id,
- generic_ivar_set_transition_too_complex,
- generic_ivar_set_too_complex_table);
+ RUBY_ASSERT(RBASIC_SHAPE_ID(obj) == RBASIC_SHAPE_ID(fields_obj));
}
void
@@ -2156,8 +2167,8 @@ ivar_defined0(VALUE obj, ID id)
break;
case T_IMEMO:
- RUBY_ASSERT(IMEMO_TYPE_P(obj, imemo_class_fields));
- table = rb_imemo_class_fields_complex_tbl(obj);
+ RUBY_ASSERT(IMEMO_TYPE_P(obj, imemo_fields));
+ table = rb_imemo_fields_complex_tbl(obj);
break;
case T_OBJECT:
@@ -2165,11 +2176,10 @@ ivar_defined0(VALUE obj, ID id)
break;
default: {
- struct gen_fields_tbl *fields_tbl;
- if (rb_gen_fields_tbl_get(obj, 0, &fields_tbl)) {
- table = fields_tbl->as.complex.table;
+ VALUE fields_obj;
+ if (rb_gen_fields_tbl_get(obj, 0, &fields_obj)) {
+ table = rb_imemo_fields_complex_tbl(fields_obj);
}
- break;
}
}
@@ -2225,27 +2235,23 @@ iterate_over_shapes_callback(shape_id_t shape_id, void *data)
return ST_CONTINUE;
}
- VALUE *iv_list;
+ VALUE *fields;
switch (BUILTIN_TYPE(itr_data->obj)) {
case T_OBJECT:
RUBY_ASSERT(!rb_shape_obj_too_complex_p(itr_data->obj));
- iv_list = ROBJECT_FIELDS(itr_data->obj);
+ fields = ROBJECT_FIELDS(itr_data->obj);
break;
- case T_CLASS:
- case T_MODULE:
- rb_bug("Unreachable");
case T_IMEMO:
- RUBY_ASSERT(IMEMO_TYPE_P(itr_data->obj, imemo_class_fields));
+ RUBY_ASSERT(IMEMO_TYPE_P(itr_data->obj, imemo_fields));
RUBY_ASSERT(!rb_shape_obj_too_complex_p(itr_data->obj));
- iv_list = rb_imemo_class_fields_ptr(itr_data->obj);
+ fields = rb_imemo_fields_ptr(itr_data->obj);
break;
default:
- iv_list = itr_data->fields_tbl->as.shape.fields;
- break;
+ rb_bug("Unreachable");
}
- VALUE val = iv_list[RSHAPE_INDEX(shape_id)];
+ VALUE val = fields[RSHAPE_INDEX(shape_id)];
return itr_data->func(RSHAPE_EDGE_NAME(shape_id), val, itr_data->arg);
}
@@ -2287,33 +2293,9 @@ obj_fields_each(VALUE obj, rb_ivar_foreach_callback_func *func, st_data_t arg, b
}
static void
-gen_fields_each(VALUE obj, rb_ivar_foreach_callback_func *func, st_data_t arg, bool ivar_only)
-{
- struct gen_fields_tbl *fields_tbl;
- if (!rb_gen_fields_tbl_get(obj, 0, &fields_tbl)) return;
-
- struct iv_itr_data itr_data = {
- .obj = obj,
- .fields_tbl = fields_tbl,
- .arg = arg,
- .func = func,
- .ivar_only = ivar_only,
- };
-
- shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
- if (rb_shape_too_complex_p(shape_id)) {
- rb_st_foreach(fields_tbl->as.complex.table, each_hash_iv, (st_data_t)&itr_data);
- }
- else {
- itr_data.fields = fields_tbl->as.shape.fields;
- iterate_over_shapes(shape_id, func, &itr_data);
- }
-}
-
-static void
-class_fields_each(VALUE fields_obj, rb_ivar_foreach_callback_func *func, st_data_t arg, bool ivar_only)
+imemo_fields_each(VALUE fields_obj, rb_ivar_foreach_callback_func *func, st_data_t arg, bool ivar_only)
{
- IMEMO_TYPE_P(fields_obj, imemo_class_fields);
+ IMEMO_TYPE_P(fields_obj, imemo_fields);
struct iv_itr_data itr_data = {
.obj = fields_obj,
@@ -2324,10 +2306,10 @@ class_fields_each(VALUE fields_obj, rb_ivar_foreach_callback_func *func, st_data
shape_id_t shape_id = RBASIC_SHAPE_ID(fields_obj);
if (rb_shape_too_complex_p(shape_id)) {
- rb_st_foreach(rb_imemo_class_fields_complex_tbl(fields_obj), each_hash_iv, (st_data_t)&itr_data);
+ rb_st_foreach(rb_imemo_fields_complex_tbl(fields_obj), each_hash_iv, (st_data_t)&itr_data);
}
else {
- itr_data.fields = rb_imemo_class_fields_ptr(fields_obj);
+ itr_data.fields = rb_imemo_fields_ptr(fields_obj);
iterate_over_shapes(shape_id, func, &itr_data);
}
}
@@ -2335,8 +2317,8 @@ class_fields_each(VALUE fields_obj, rb_ivar_foreach_callback_func *func, st_data
void
rb_copy_generic_ivar(VALUE dest, VALUE obj)
{
- struct gen_fields_tbl *obj_fields_tbl;
- struct gen_fields_tbl *new_fields_tbl;
+ VALUE fields_obj;
+ VALUE new_fields_obj;
rb_check_frozen(dest);
@@ -2344,19 +2326,16 @@ rb_copy_generic_ivar(VALUE dest, VALUE obj)
return;
}
- unsigned long src_num_ivs = rb_ivar_count(obj);
- if (!src_num_ivs) {
- goto clear;
- }
-
shape_id_t src_shape_id = rb_obj_shape_id(obj);
- if (rb_gen_fields_tbl_get(obj, 0, &obj_fields_tbl)) {
- if (gen_fields_tbl_count(obj, obj_fields_tbl) == 0)
+ if (rb_gen_fields_tbl_get(obj, 0, &fields_obj)) {
+ unsigned long src_num_ivs = rb_ivar_count(fields_obj);
+ if (!src_num_ivs) {
goto clear;
+ }
if (rb_shape_too_complex_p(src_shape_id)) {
- rb_shape_copy_complex_ivars(dest, obj, src_shape_id, obj_fields_tbl->as.complex.table);
+ rb_shape_copy_complex_ivars(dest, obj, src_shape_id, rb_imemo_fields_complex_tbl(fields_obj));
return;
}
@@ -2371,7 +2350,6 @@ rb_copy_generic_ivar(VALUE dest, VALUE obj)
st_table *table = rb_st_init_numtable_with_size(src_num_ivs);
rb_obj_copy_ivs_to_hash_table(obj, table);
rb_obj_init_too_complex(dest, table);
-
return;
}
}
@@ -2381,25 +2359,19 @@ rb_copy_generic_ivar(VALUE dest, VALUE obj)
return;
}
- uint32_t dest_capa = RSHAPE_CAPACITY(dest_shape_id);
- RUBY_ASSERT(dest_capa > 0);
- new_fields_tbl = xmalloc(gen_fields_tbl_bytes(dest_capa));
-
- VALUE *src_buf = obj_fields_tbl->as.shape.fields;
- VALUE *dest_buf = new_fields_tbl->as.shape.fields;
-
+ new_fields_obj = rb_imemo_fields_new(rb_obj_class(dest), RSHAPE_CAPACITY(dest_shape_id));
+ VALUE *src_buf = rb_imemo_fields_ptr(fields_obj);
+ VALUE *dest_buf = rb_imemo_fields_ptr(new_fields_obj);
rb_shape_copy_fields(dest, dest_buf, dest_shape_id, obj, src_buf, src_shape_id);
+ RBASIC_SET_SHAPE_ID(new_fields_obj, dest_shape_id);
- /*
- * c.fields_tbl may change in gen_fields_copy due to realloc,
- * no need to free
- */
RB_VM_LOCKING() {
generic_fields_tbl_no_ractor_check(dest);
- st_insert(generic_fields_tbl_no_ractor_check(obj), (st_data_t)dest, (st_data_t)new_fields_tbl);
+ st_insert(generic_fields_tbl_no_ractor_check(obj), (st_data_t)dest, (st_data_t)new_fields_obj);
+ RB_OBJ_WRITTEN(dest, Qundef, new_fields_obj);
}
- rb_obj_set_shape_id(dest, dest_shape_id);
+ RBASIC_SET_SHAPE_ID(dest, dest_shape_id);
}
return;
@@ -2427,8 +2399,8 @@ rb_field_foreach(VALUE obj, rb_ivar_foreach_callback_func *func, st_data_t arg,
if (SPECIAL_CONST_P(obj)) return;
switch (BUILTIN_TYPE(obj)) {
case T_IMEMO:
- if (IMEMO_TYPE_P(obj, imemo_class_fields)) {
- class_fields_each(obj, func, arg, ivar_only);
+ if (IMEMO_TYPE_P(obj, imemo_fields)) {
+ imemo_fields_each(obj, func, arg, ivar_only);
}
break;
case T_OBJECT:
@@ -2440,13 +2412,16 @@ rb_field_foreach(VALUE obj, rb_ivar_foreach_callback_func *func, st_data_t arg,
IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(0);
VALUE fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
if (fields_obj) {
- class_fields_each(fields_obj, func, arg, ivar_only);
+ imemo_fields_each(fields_obj, func, arg, ivar_only);
}
}
break;
default:
if (rb_obj_exivar_p(obj)) {
- gen_fields_each(obj, func, arg, ivar_only);
+ VALUE fields_obj = 0;
+ if (!rb_gen_fields_tbl_get(obj, 0, &fields_obj)) return;
+
+ imemo_fields_each(fields_obj, func, arg, ivar_only);
}
break;
}
@@ -2468,6 +2443,7 @@ rb_ivar_count(VALUE obj)
case T_OBJECT:
iv_count = ROBJECT_FIELDS_COUNT(obj);
break;
+
case T_CLASS:
case T_MODULE:
{
@@ -2476,16 +2452,37 @@ rb_ivar_count(VALUE obj)
return 0;
}
if (rb_shape_obj_too_complex_p(fields_obj)) {
- return rb_st_table_size(rb_imemo_class_fields_complex_tbl(fields_obj));
+ iv_count = rb_st_table_size(rb_imemo_fields_complex_tbl(fields_obj));
+ }
+ else {
+ iv_count = RBASIC_FIELDS_COUNT(fields_obj);
}
- return RBASIC_FIELDS_COUNT(fields_obj);
}
+ break;
+
+ case T_IMEMO:
+ RUBY_ASSERT(IMEMO_TYPE_P(obj, imemo_fields));
+
+ if (rb_shape_obj_too_complex_p(obj)) {
+ iv_count = rb_st_table_size(rb_imemo_fields_complex_tbl(obj));
+ }
+ else {
+ iv_count = RBASIC_FIELDS_COUNT(obj);
+ }
+ break;
+
default:
if (rb_obj_exivar_p(obj)) {
- struct gen_fields_tbl *fields_tbl;
- if (rb_gen_fields_tbl_get(obj, 0, &fields_tbl)) {
- iv_count = gen_fields_tbl_count(obj, fields_tbl);
+ if (rb_shape_obj_too_complex_p(obj)) {
+ VALUE fields_obj;
+
+ if (rb_gen_fields_tbl_get(obj, 0, &fields_obj)) {
+ iv_count = rb_st_table_size(rb_imemo_fields_complex_tbl(fields_obj));
+ }
+ }
+ else {
+ iv_count = RBASIC_FIELDS_COUNT(obj);
}
}
break;
@@ -4690,7 +4687,7 @@ class_fields_ivar_set(VALUE klass, VALUE fields_obj, ID id, VALUE val, bool conc
{
bool existing = true;
const VALUE original_fields_obj = fields_obj;
- fields_obj = original_fields_obj ? original_fields_obj : rb_imemo_class_fields_new(klass, 1);
+ fields_obj = original_fields_obj ? original_fields_obj : rb_imemo_fields_new(rb_singleton_class(klass), 1);
shape_id_t current_shape_id = RBASIC_SHAPE_ID(fields_obj);
shape_id_t next_shape_id = current_shape_id;
@@ -4711,9 +4708,9 @@ class_fields_ivar_set(VALUE klass, VALUE fields_obj, ID id, VALUE val, bool conc
next_shape_id = rb_shape_transition_add_ivar(fields_obj, id);
if (UNLIKELY(rb_shape_too_complex_p(next_shape_id))) {
attr_index_t current_len = RSHAPE_LEN(current_shape_id);
- fields_obj = rb_imemo_class_fields_new_complex(klass, current_len + 1);
+ fields_obj = rb_imemo_fields_new_complex(rb_singleton_class(klass), current_len + 1);
if (current_len) {
- rb_obj_copy_fields_to_hash_table(original_fields_obj, rb_imemo_class_fields_complex_tbl(fields_obj));
+ rb_obj_copy_fields_to_hash_table(original_fields_obj, rb_imemo_fields_complex_tbl(fields_obj));
RBASIC_SET_SHAPE_ID(fields_obj, next_shape_id);
}
goto too_complex;
@@ -4727,9 +4724,9 @@ class_fields_ivar_set(VALUE klass, VALUE fields_obj, ID id, VALUE val, bool conc
// We allocate a new fields_obj even when concurrency isn't a concern
// so that we're embedded as long as possible.
- fields_obj = rb_imemo_class_fields_new(klass, next_capacity);
+ fields_obj = rb_imemo_fields_new(rb_singleton_class(klass), next_capacity);
if (original_fields_obj) {
- MEMCPY(rb_imemo_class_fields_ptr(fields_obj), rb_imemo_class_fields_ptr(original_fields_obj), VALUE, RSHAPE_LEN(current_shape_id));
+ MEMCPY(rb_imemo_fields_ptr(fields_obj), rb_imemo_fields_ptr(original_fields_obj), VALUE, RSHAPE_LEN(current_shape_id));
}
}
@@ -4737,7 +4734,7 @@ class_fields_ivar_set(VALUE klass, VALUE fields_obj, ID id, VALUE val, bool conc
RUBY_ASSERT(index == (RSHAPE_LEN(next_shape_id) - 1));
}
- VALUE *fields = rb_imemo_class_fields_ptr(fields_obj);
+ VALUE *fields = rb_imemo_fields_ptr(fields_obj);
RB_OBJ_WRITE(fields_obj, &fields[index], val);
if (!existing) {
@@ -4749,7 +4746,7 @@ class_fields_ivar_set(VALUE klass, VALUE fields_obj, ID id, VALUE val, bool conc
too_complex:
{
- st_table *table = rb_imemo_class_fields_complex_tbl(fields_obj);
+ st_table *table = rb_imemo_fields_complex_tbl(fields_obj);
existing = st_insert(table, (st_data_t)id, (st_data_t)val);
RB_OBJ_WRITTEN(fields_obj, Qundef, val);
@@ -4777,13 +4774,14 @@ rb_class_ivar_set(VALUE obj, ID id, VALUE val)
if (new_fields_obj != original_fields_obj) {
RCLASS_WRITABLE_SET_FIELDS_OBJ(obj, new_fields_obj);
-
- // TODO: What should we set as the T_CLASS shape_id?
- // In most case we can replicate the single `fields_obj` shape
- // but in namespaced case?
- // Perhaps INVALID_SHAPE_ID?
- RBASIC_SET_SHAPE_ID(obj, RBASIC_SHAPE_ID(new_fields_obj));
}
+
+ // TODO: What should we set as the T_CLASS shape_id?
+ // In most case we can replicate the single `fields_obj` shape
+ // but in namespaced case?
+ // Perhaps INVALID_SHAPE_ID?
+ RBASIC_SET_SHAPE_ID(obj, RBASIC_SHAPE_ID(new_fields_obj));
+
return existing;
}
diff --git a/variable.h b/variable.h
index 54b7fc5461..82a79c63ce 100644
--- a/variable.h
+++ b/variable.h
@@ -12,18 +12,7 @@
#include "shape.h"
-struct gen_fields_tbl {
- union {
- struct {
- VALUE fields[1];
- } shape;
- struct {
- st_table *table;
- } complex;
- } as;
-};
-
-int rb_ivar_generic_fields_tbl_lookup(VALUE obj, struct gen_fields_tbl **);
+int rb_ivar_generic_fields_tbl_lookup(VALUE obj, VALUE *);
void rb_copy_complex_ivars(VALUE dest, VALUE obj, shape_id_t src_shape_id, st_table *fields_table);
void rb_free_rb_global_tbl(void);
diff --git a/vm.c b/vm.c
index 7b0775fbb3..a8822239cf 100644
--- a/vm.c
+++ b/vm.c
@@ -3675,10 +3675,10 @@ rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
void
rb_ec_clear_vm_stack(rb_execution_context_t *ec)
{
- rb_ec_set_vm_stack(ec, NULL, 0);
-
- // Avoid dangling pointers:
+ // set cfp to NULL before clearing the stack in case `thread_profile_frames`
+ // gets called in this middle of `rb_ec_set_vm_stack` via signal handler.
ec->cfp = NULL;
+ rb_ec_set_vm_stack(ec, NULL, 0);
}
static void
diff --git a/vm_backtrace.c b/vm_backtrace.c
index 9046f4aa29..68fc2b987b 100644
--- a/vm_backtrace.c
+++ b/vm_backtrace.c
@@ -262,6 +262,15 @@ retry:
}
}
+static bool
+is_internal_location(const rb_iseq_t *iseq)
+{
+ static const char prefix[] = "<internal:";
+ const size_t prefix_len = sizeof(prefix) - 1;
+ VALUE file = rb_iseq_path(iseq);
+ return strncmp(prefix, RSTRING_PTR(file), prefix_len) == 0;
+}
+
// Return true if a given location is a C method or supposed to behave like one.
static inline bool
location_cfunc_p(rb_backtrace_location_t *loc)
@@ -272,7 +281,7 @@ location_cfunc_p(rb_backtrace_location_t *loc)
case VM_METHOD_TYPE_CFUNC:
return true;
case VM_METHOD_TYPE_ISEQ:
- return rb_iseq_attr_p(loc->cme->def->body.iseq.iseqptr, BUILTIN_ATTR_C_TRACE);
+ return is_internal_location(loc->cme->def->body.iseq.iseqptr);
default:
return false;
}
@@ -605,15 +614,6 @@ backtrace_size(const rb_execution_context_t *ec)
}
static bool
-is_internal_location(const rb_control_frame_t *cfp)
-{
- static const char prefix[] = "<internal:";
- const size_t prefix_len = sizeof(prefix) - 1;
- VALUE file = rb_iseq_path(cfp->iseq);
- return strncmp(prefix, RSTRING_PTR(file), prefix_len) == 0;
-}
-
-static bool
is_rescue_or_ensure_frame(const rb_control_frame_t *cfp)
{
enum rb_iseq_type type = ISEQ_BODY(cfp->iseq)->type;
@@ -621,11 +621,11 @@ is_rescue_or_ensure_frame(const rb_control_frame_t *cfp)
}
static void
-bt_update_cfunc_loc(unsigned long cfunc_counter, rb_backtrace_location_t *cfunc_loc, const rb_iseq_t *iseq, const VALUE *pc)
+bt_backpatch_loc(unsigned long backpatch_counter, rb_backtrace_location_t *loc, const rb_iseq_t *iseq, const VALUE *pc)
{
- for (; cfunc_counter > 0; cfunc_counter--, cfunc_loc--) {
- cfunc_loc->iseq = iseq;
- cfunc_loc->pc = pc;
+ for (; backpatch_counter > 0; backpatch_counter--, loc--) {
+ loc->iseq = iseq;
+ loc->pc = pc;
}
}
@@ -648,7 +648,7 @@ rb_ec_partial_backtrace_object(const rb_execution_context_t *ec, long start_fram
rb_backtrace_t *bt = NULL;
VALUE btobj = Qnil;
rb_backtrace_location_t *loc = NULL;
- unsigned long cfunc_counter = 0;
+ unsigned long backpatch_counter = 0;
bool skip_next_frame = FALSE;
// In the case the thread vm_stack or cfp is not initialized, there is no backtrace.
@@ -691,26 +691,36 @@ rb_ec_partial_backtrace_object(const rb_execution_context_t *ec, long start_fram
if (start_frame > 0) {
start_frame--;
}
- else if (!(skip_internal && is_internal_location(cfp))) {
+ else {
+ bool internal = is_internal_location(cfp->iseq);
+ if (skip_internal && internal) continue;
if (!skip_next_frame) {
const rb_iseq_t *iseq = cfp->iseq;
const VALUE *pc = cfp->pc;
+ if (internal && backpatch_counter > 0) {
+ // To keep only one internal frame, discard the previous backpatch frames
+ bt->backtrace_size -= backpatch_counter;
+ backpatch_counter = 0;
+ }
loc = &bt->backtrace[bt->backtrace_size++];
RB_OBJ_WRITE(btobj, &loc->cme, rb_vm_frame_method_entry(cfp));
- // Ruby methods with `Primitive.attr! :c_trace` should behave like C methods
- if (rb_iseq_attr_p(cfp->iseq, BUILTIN_ATTR_C_TRACE)) {
- loc->iseq = NULL;
- loc->pc = NULL;
- cfunc_counter++;
+ // internal frames (`<internal:...>`) should behave like C methods
+ if (internal) {
+ // Typically, these iseq and pc are not needed because they will be backpatched later.
+ // But when the call stack starts with an internal frame (i.e., prelude.rb),
+ // they will be used to show the `<internal:...>` location.
+ RB_OBJ_WRITE(btobj, &loc->iseq, iseq);
+ loc->pc = pc;
+ backpatch_counter++;
}
else {
RB_OBJ_WRITE(btobj, &loc->iseq, iseq);
loc->pc = pc;
- bt_update_cfunc_loc(cfunc_counter, loc-1, iseq, pc);
+ bt_backpatch_loc(backpatch_counter, loc-1, iseq, pc);
if (do_yield) {
- bt_yield_loc(loc - cfunc_counter, cfunc_counter+1, btobj);
+ bt_yield_loc(loc - backpatch_counter, backpatch_counter+1, btobj);
}
- cfunc_counter = 0;
+ backpatch_counter = 0;
}
}
skip_next_frame = is_rescue_or_ensure_frame(cfp);
@@ -727,21 +737,21 @@ rb_ec_partial_backtrace_object(const rb_execution_context_t *ec, long start_fram
RB_OBJ_WRITE(btobj, &loc->cme, rb_vm_frame_method_entry(cfp));
loc->iseq = NULL;
loc->pc = NULL;
- cfunc_counter++;
+ backpatch_counter++;
}
}
}
// When a backtrace entry corresponds to a method defined in C (e.g. rb_define_method), the reported file:line
// is the one of the caller Ruby frame, so if the last entry is a C frame we find the caller Ruby frame here.
- if (cfunc_counter > 0) {
+ if (backpatch_counter > 0) {
for (; cfp != end_cfp; cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp)) {
- if (cfp->iseq && cfp->pc && !(skip_internal && is_internal_location(cfp))) {
+ if (cfp->iseq && cfp->pc && !(skip_internal && is_internal_location(cfp->iseq))) {
VM_ASSERT(!skip_next_frame); // ISEQ_TYPE_RESCUE/ISEQ_TYPE_ENSURE should have a caller Ruby ISEQ, not a cfunc
- bt_update_cfunc_loc(cfunc_counter, loc, cfp->iseq, cfp->pc);
+ bt_backpatch_loc(backpatch_counter, loc, cfp->iseq, cfp->pc);
RB_OBJ_WRITTEN(btobj, Qundef, cfp->iseq);
if (do_yield) {
- bt_yield_loc(loc - cfunc_counter, cfunc_counter, btobj);
+ bt_yield_loc(loc - backpatch_counter, backpatch_counter, btobj);
}
break;
}
diff --git a/vm_insnhelper.c b/vm_insnhelper.c
index 32641d2b5e..2fe5e26928 100644
--- a/vm_insnhelper.c
+++ b/vm_insnhelper.c
@@ -1252,16 +1252,18 @@ vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_call
if (!fields_obj) {
return default_value;
}
- ivar_list = rb_imemo_class_fields_ptr(fields_obj);
- shape_id = rb_obj_shape_id(fields_obj);
+ ivar_list = rb_imemo_fields_ptr(fields_obj);
+ shape_id = RBASIC_SHAPE_ID_FOR_READ(fields_obj);
break;
}
default:
if (rb_obj_exivar_p(obj)) {
- struct gen_fields_tbl *fields_tbl;
- rb_gen_fields_tbl_get(obj, id, &fields_tbl);
- ivar_list = fields_tbl->as.shape.fields;
+ VALUE fields_obj = 0;
+ if (!rb_gen_fields_tbl_get(obj, id, &fields_obj)) {
+ return default_value;
+ }
+ ivar_list = rb_imemo_fields_ptr(fields_obj);
}
else {
return default_value;
@@ -1325,7 +1327,7 @@ vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_call
switch (BUILTIN_TYPE(obj)) {
case T_CLASS:
case T_MODULE:
- table = rb_imemo_class_fields_complex_tbl(fields_obj);
+ table = rb_imemo_fields_complex_tbl(fields_obj);
break;
case T_OBJECT:
@@ -1333,9 +1335,9 @@ vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_call
break;
default: {
- struct gen_fields_tbl *fields_tbl;
- if (rb_gen_fields_tbl_get(obj, 0, &fields_tbl)) {
- table = fields_tbl->as.complex.table;
+ VALUE fields_obj;
+ if (rb_gen_fields_tbl_get(obj, 0, &fields_obj)) {
+ table = rb_imemo_fields_complex_tbl(fields_obj);
}
break;
}
@@ -1456,7 +1458,7 @@ vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_i
{
shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
- struct gen_fields_tbl *fields_tbl = 0;
+ VALUE fields_obj = 0;
// Cache hit case
if (shape_id == dest_shape_id) {
@@ -1474,13 +1476,13 @@ vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_i
return Qundef;
}
- rb_gen_fields_tbl_get(obj, 0, &fields_tbl);
+ rb_gen_fields_tbl_get(obj, 0, &fields_obj);
if (shape_id != dest_shape_id) {
RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
}
- RB_OBJ_WRITE(obj, &fields_tbl->as.shape.fields[index], val);
+ RB_OBJ_WRITE(obj, &rb_imemo_fields_ptr(fields_obj)[index], val);
RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
diff --git a/yjit/src/cruby_bindings.inc.rs b/yjit/src/cruby_bindings.inc.rs
index 1d7ffca165..8aa874f4dd 100644
--- a/yjit/src/cruby_bindings.inc.rs
+++ b/yjit/src/cruby_bindings.inc.rs
@@ -410,7 +410,7 @@ pub const imemo_parser_strterm: imemo_type = 10;
pub const imemo_callinfo: imemo_type = 11;
pub const imemo_callcache: imemo_type = 12;
pub const imemo_constcache: imemo_type = 13;
-pub const imemo_class_fields: imemo_type = 14;
+pub const imemo_fields: imemo_type = 14;
pub type imemo_type = u32;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
diff --git a/zjit/src/codegen.rs b/zjit/src/codegen.rs
index 286f3f39b4..b1869f71c0 100644
--- a/zjit/src/codegen.rs
+++ b/zjit/src/codegen.rs
@@ -278,6 +278,7 @@ fn gen_insn(cb: &mut CodeBlock, jit: &mut JITState, asm: &mut Assembler, functio
Insn::GetIvar { self_val, id, state: _ } => gen_getivar(asm, opnd!(self_val), *id),
Insn::SetGlobal { id, val, state: _ } => gen_setglobal(asm, *id, opnd!(val)),
Insn::GetGlobal { id, state: _ } => gen_getglobal(asm, *id),
+ Insn::GetConstantPath { ic, state } => gen_get_constant_path(asm, *ic, &function.frame_state(*state)),
Insn::SetIvar { self_val, id, val, state: _ } => return gen_setivar(asm, opnd!(self_val), *id, opnd!(val)),
Insn::SideExit { state } => return gen_side_exit(jit, asm, &function.frame_state(*state)),
Insn::PutSpecialObject { value_type } => gen_putspecialobject(asm, *value_type),
@@ -295,6 +296,21 @@ fn gen_insn(cb: &mut CodeBlock, jit: &mut JITState, asm: &mut Assembler, functio
Some(())
}
+fn gen_get_constant_path(asm: &mut Assembler, ic: *const iseq_inline_constant_cache, state: &FrameState) -> Opnd {
+ unsafe extern "C" {
+ fn rb_vm_opt_getconstant_path(ec: EcPtr, cfp: CfpPtr, ic: *const iseq_inline_constant_cache) -> VALUE;
+ }
+
+ // Save PC since the call can allocate an IC
+ gen_save_pc(asm, state);
+
+ let val = asm.ccall(
+ rb_vm_opt_getconstant_path as *const u8,
+ vec![EC, CFP, Opnd::const_ptr(ic as *const u8)],
+ );
+ val
+}
+
/// Lowering for [`Insn::CCall`]. This is a low-level raw call that doesn't know
/// anything about the callee, so handling for e.g. GC safety is dealt with elsewhere.
fn gen_ccall(jit: &mut JITState, asm: &mut Assembler, cfun: *const u8, args: &[InsnId]) -> Option<lir::Opnd> {
diff --git a/zjit/src/cruby_bindings.inc.rs b/zjit/src/cruby_bindings.inc.rs
index 5fb5c2ec02..d5e54955c8 100644
--- a/zjit/src/cruby_bindings.inc.rs
+++ b/zjit/src/cruby_bindings.inc.rs
@@ -227,7 +227,7 @@ pub const imemo_parser_strterm: imemo_type = 10;
pub const imemo_callinfo: imemo_type = 11;
pub const imemo_callcache: imemo_type = 12;
pub const imemo_constcache: imemo_type = 13;
-pub const imemo_class_fields: imemo_type = 14;
+pub const imemo_fields: imemo_type = 14;
pub type imemo_type = u32;
pub const METHOD_VISI_UNDEF: rb_method_visibility_t = 0;
pub const METHOD_VISI_PUBLIC: rb_method_visibility_t = 1;
diff --git a/zjit/src/hir.rs b/zjit/src/hir.rs
index c67f25451a..fbca1f4418 100644
--- a/zjit/src/hir.rs
+++ b/zjit/src/hir.rs
@@ -185,7 +185,9 @@ impl<'a> std::fmt::Display for InvariantPrinter<'a> {
write!(f, "BOPRedefined(")?;
match klass {
INTEGER_REDEFINED_OP_FLAG => write!(f, "INTEGER_REDEFINED_OP_FLAG")?,
+ STRING_REDEFINED_OP_FLAG => write!(f, "STRING_REDEFINED_OP_FLAG")?,
ARRAY_REDEFINED_OP_FLAG => write!(f, "ARRAY_REDEFINED_OP_FLAG")?,
+ HASH_REDEFINED_OP_FLAG => write!(f, "HASH_REDEFINED_OP_FLAG")?,
_ => write!(f, "{klass}")?,
}
write!(f, ", ")?;
@@ -201,6 +203,8 @@ impl<'a> std::fmt::Display for InvariantPrinter<'a> {
BOP_LE => write!(f, "BOP_LE")?,
BOP_GT => write!(f, "BOP_GT")?,
BOP_GE => write!(f, "BOP_GE")?,
+ BOP_FREEZE => write!(f, "BOP_FREEZE")?,
+ BOP_UMINUS => write!(f, "BOP_UMINUS")?,
BOP_MAX => write!(f, "BOP_MAX")?,
_ => write!(f, "{bop}")?,
}
@@ -427,7 +431,7 @@ pub enum Insn {
/// Return C `true` if `val` is `Qnil`, else `false`.
IsNil { val: InsnId },
Defined { op_type: usize, obj: VALUE, pushval: VALUE, v: InsnId },
- GetConstantPath { ic: *const iseq_inline_constant_cache },
+ GetConstantPath { ic: *const iseq_inline_constant_cache, state: InsnId },
/// Get a global variable named `id`
GetGlobal { id: ID, state: InsnId },
@@ -647,7 +651,7 @@ impl<'a> std::fmt::Display for InsnPrinter<'a> {
Insn::GuardType { val, guard_type, .. } => { write!(f, "GuardType {val}, {}", guard_type.print(self.ptr_map)) },
Insn::GuardBitEquals { val, expected, .. } => { write!(f, "GuardBitEquals {val}, {}", expected.print(self.ptr_map)) },
Insn::PatchPoint(invariant) => { write!(f, "PatchPoint {}", invariant.print(self.ptr_map)) },
- Insn::GetConstantPath { ic } => { write!(f, "GetConstantPath {:p}", self.ptr_map.map_ptr(ic)) },
+ Insn::GetConstantPath { ic, .. } => { write!(f, "GetConstantPath {:p}", self.ptr_map.map_ptr(ic)) },
Insn::CCall { cfun, args, name, return_type: _, elidable: _ } => {
write!(f, "CCall {}@{:p}", name.contents_lossy(), self.ptr_map.map_ptr(cfun))?;
for arg in args {
@@ -1250,6 +1254,38 @@ impl Function {
}
}
+ fn rewrite_if_frozen(&mut self, block: BlockId, orig_insn_id: InsnId, self_val: InsnId, klass: u32, bop: u32) {
+ let self_type = self.type_of(self_val);
+ if let Some(obj) = self_type.ruby_object() {
+ if obj.is_frozen() {
+ self.push_insn(block, Insn::PatchPoint(Invariant::BOPRedefined { klass, bop }));
+ self.make_equal_to(orig_insn_id, self_val);
+ return;
+ }
+ }
+ self.push_insn_id(block, orig_insn_id);
+ }
+
+ fn try_rewrite_freeze(&mut self, block: BlockId, orig_insn_id: InsnId, self_val: InsnId) {
+ if self.is_a(self_val, types::StringExact) {
+ self.rewrite_if_frozen(block, orig_insn_id, self_val, STRING_REDEFINED_OP_FLAG, BOP_FREEZE);
+ } else if self.is_a(self_val, types::ArrayExact) {
+ self.rewrite_if_frozen(block, orig_insn_id, self_val, ARRAY_REDEFINED_OP_FLAG, BOP_FREEZE);
+ } else if self.is_a(self_val, types::HashExact) {
+ self.rewrite_if_frozen(block, orig_insn_id, self_val, HASH_REDEFINED_OP_FLAG, BOP_FREEZE);
+ } else {
+ self.push_insn_id(block, orig_insn_id);
+ }
+ }
+
+ fn try_rewrite_uminus(&mut self, block: BlockId, orig_insn_id: InsnId, self_val: InsnId) {
+ if self.is_a(self_val, types::StringExact) {
+ self.rewrite_if_frozen(block, orig_insn_id, self_val, STRING_REDEFINED_OP_FLAG, BOP_UMINUS);
+ } else {
+ self.push_insn_id(block, orig_insn_id);
+ }
+ }
+
/// Rewrite SendWithoutBlock opcodes into SendWithoutBlockDirect opcodes if we know the target
/// ISEQ statically. This removes run-time method lookups and opens the door for inlining.
fn optimize_direct_sends(&mut self) {
@@ -1280,6 +1316,10 @@ impl Function {
self.try_rewrite_fixnum_op(block, insn_id, &|left, right| Insn::FixnumGt { left, right }, BOP_GT, self_val, args[0], state),
Insn::SendWithoutBlock { self_val, call_info: CallInfo { method_name }, args, state, .. } if method_name == ">=" && args.len() == 1 =>
self.try_rewrite_fixnum_op(block, insn_id, &|left, right| Insn::FixnumGe { left, right }, BOP_GE, self_val, args[0], state),
+ Insn::SendWithoutBlock { self_val, call_info: CallInfo { method_name }, args, .. } if method_name == "freeze" && args.len() == 0 =>
+ self.try_rewrite_freeze(block, insn_id, self_val),
+ Insn::SendWithoutBlock { self_val, call_info: CallInfo { method_name }, args, .. } if method_name == "-@" && args.len() == 0 =>
+ self.try_rewrite_uminus(block, insn_id, self_val),
Insn::SendWithoutBlock { mut self_val, call_info, cd, args, state } => {
let frame_state = self.frame_state(state);
let (klass, guard_equal_to) = if let Some(klass) = self.type_of(self_val).runtime_exact_ruby_class() {
@@ -1315,7 +1355,7 @@ impl Function {
let send_direct = self.push_insn(block, Insn::SendWithoutBlockDirect { self_val, call_info, cd, cme, iseq, args, state });
self.make_equal_to(insn_id, send_direct);
}
- Insn::GetConstantPath { ic } => {
+ Insn::GetConstantPath { ic, .. } => {
let idlist: *const ID = unsafe { (*ic).segments };
let ice = unsafe { (*ic).entry };
if ice.is_null() {
@@ -1602,10 +1642,14 @@ impl Function {
if necessary[insn_id.0] { continue; }
necessary[insn_id.0] = true;
match self.find(insn_id) {
- Insn::Const { .. } | Insn::Param { .. }
- | Insn::PatchPoint(..) | Insn::GetConstantPath { .. }
+ Insn::Const { .. }
+ | Insn::Param { .. }
+ | Insn::PatchPoint(..)
| Insn::PutSpecialObject { .. } =>
{}
+ Insn::GetConstantPath { ic: _, state } => {
+ worklist.push_back(state);
+ }
Insn::ArrayMax { elements, state }
| Insn::NewArray { elements, state } => {
worklist.extend(elements);
@@ -2269,7 +2313,8 @@ pub fn iseq_to_hir(iseq: *const rb_iseq_t) -> Result<Function, ParseError> {
}
YARVINSN_opt_getconstant_path => {
let ic = get_arg(pc, 0).as_ptr();
- state.stack_push(fun.push_insn(block, Insn::GetConstantPath { ic }));
+ let snapshot = fun.push_insn(block, Insn::Snapshot { state: exit_state });
+ state.stack_push(fun.push_insn(block, Insn::GetConstantPath { ic, state: snapshot }));
}
YARVINSN_branchunless => {
let offset = get_arg(pc, 0).as_i64();
@@ -2421,6 +2466,34 @@ pub fn iseq_to_hir(iseq: *const rb_iseq_t) -> Result<Function, ParseError> {
let send = fun.push_insn(block, Insn::SendWithoutBlock { self_val: recv, call_info: CallInfo { method_name }, cd, args, state: exit_id });
state.stack_push(send);
}
+ YARVINSN_opt_hash_freeze |
+ YARVINSN_opt_ary_freeze |
+ YARVINSN_opt_str_freeze |
+ YARVINSN_opt_str_uminus => {
+ // NB: these instructions have the recv for the call at get_arg(0)
+ let cd: *const rb_call_data = get_arg(pc, 1).as_ptr();
+ let call_info = unsafe { rb_get_call_data_ci(cd) };
+ if unknown_call_type(unsafe { rb_vm_ci_flag(call_info) }) {
+ // Unknown call type; side-exit into the interpreter
+ let exit_id = fun.push_insn(block, Insn::Snapshot { state: exit_state });
+ fun.push_insn(block, Insn::SideExit { state: exit_id });
+ break; // End the block
+ }
+ let argc = unsafe { vm_ci_argc((*cd).ci) };
+ let name = insn_name(opcode as usize);
+ assert_eq!(0, argc, "{name} should not have args");
+ let args = vec![];
+
+ let method_name = unsafe {
+ let mid = rb_vm_ci_mid(call_info);
+ mid.contents_lossy().into_owned()
+ };
+
+ let exit_id = fun.push_insn(block, Insn::Snapshot { state: exit_state });
+ let recv = fun.push_insn(block, Insn::Const { val: Const::Value(get_arg(pc, 0)) });
+ let send = fun.push_insn(block, Insn::SendWithoutBlock { self_val: recv, call_info: CallInfo { method_name }, cd, args, state: exit_id });
+ state.stack_push(send);
+ }
YARVINSN_leave => {
fun.push_insn(block, Insn::Return { val: state.stack_pop()? });
@@ -3099,6 +3172,62 @@ mod tests {
}
#[test]
+ fn test_opt_hash_freeze() {
+ eval("
+ def test = {}.freeze
+ ");
+ assert_method_hir_with_opcode("test", YARVINSN_opt_hash_freeze, expect![[r#"
+ fn test:
+ bb0(v0:BasicObject):
+ v3:HashExact[VALUE(0x1000)] = Const Value(VALUE(0x1000))
+ v4:BasicObject = SendWithoutBlock v3, :freeze
+ Return v4
+ "#]]);
+ }
+
+ #[test]
+ fn test_opt_ary_freeze() {
+ eval("
+ def test = [].freeze
+ ");
+ assert_method_hir_with_opcode("test", YARVINSN_opt_ary_freeze, expect![[r#"
+ fn test:
+ bb0(v0:BasicObject):
+ v3:ArrayExact[VALUE(0x1000)] = Const Value(VALUE(0x1000))
+ v4:BasicObject = SendWithoutBlock v3, :freeze
+ Return v4
+ "#]]);
+ }
+
+ #[test]
+ fn test_opt_str_freeze() {
+ eval("
+ def test = ''.freeze
+ ");
+ assert_method_hir_with_opcode("test", YARVINSN_opt_str_freeze, expect![[r#"
+ fn test:
+ bb0(v0:BasicObject):
+ v3:StringExact[VALUE(0x1000)] = Const Value(VALUE(0x1000))
+ v4:BasicObject = SendWithoutBlock v3, :freeze
+ Return v4
+ "#]]);
+ }
+
+ #[test]
+ fn test_opt_str_uminus() {
+ eval("
+ def test = -''
+ ");
+ assert_method_hir_with_opcode("test", YARVINSN_opt_str_uminus, expect![[r#"
+ fn test:
+ bb0(v0:BasicObject):
+ v3:StringExact[VALUE(0x1000)] = Const Value(VALUE(0x1000))
+ v4:BasicObject = SendWithoutBlock v3, :-@
+ Return v4
+ "#]]);
+ }
+
+ #[test]
fn test_setlocal_getlocal() {
eval("
def test
@@ -3621,14 +3750,14 @@ mod tests {
assert_method_hir_with_opcode("test", YARVINSN_opt_new, expect![[r#"
fn test:
bb0(v0:BasicObject):
- v2:BasicObject = GetConstantPath 0x1000
- v3:NilClassExact = Const Value(nil)
- Jump bb1(v0, v3, v2)
- bb1(v5:BasicObject, v6:NilClassExact, v7:BasicObject):
- v10:BasicObject = SendWithoutBlock v7, :new
- Jump bb2(v5, v10, v6)
- bb2(v12:BasicObject, v13:BasicObject, v14:NilClassExact):
- Return v13
+ v3:BasicObject = GetConstantPath 0x1000
+ v4:NilClassExact = Const Value(nil)
+ Jump bb1(v0, v4, v3)
+ bb1(v6:BasicObject, v7:NilClassExact, v8:BasicObject):
+ v11:BasicObject = SendWithoutBlock v8, :new
+ Jump bb2(v6, v11, v7)
+ bb2(v13:BasicObject, v14:BasicObject, v15:NilClassExact):
+ Return v14
"#]]);
}
@@ -5031,9 +5160,9 @@ mod opt_tests {
assert_optimized_method_hir("test", expect![[r#"
fn test:
bb0(v0:BasicObject):
- v2:BasicObject = GetConstantPath 0x1000
- v3:Fixnum[5] = Const Value(5)
- Return v3
+ v3:BasicObject = GetConstantPath 0x1000
+ v4:Fixnum[5] = Const Value(5)
+ Return v4
"#]]);
}
@@ -5102,8 +5231,8 @@ mod opt_tests {
PatchPoint SingleRactorMode
PatchPoint StableConstantNames(0x1000, M)
PatchPoint MethodRedefined(Module@0x1008, name@0x1010)
- v6:Fixnum[1] = Const Value(1)
- Return v6
+ v7:Fixnum[1] = Const Value(1)
+ Return v7
"#]]);
}
@@ -5220,8 +5349,8 @@ mod opt_tests {
assert_optimized_method_hir("test", expect![[r#"
fn test:
bb0(v0:BasicObject):
- v2:BasicObject = GetConstantPath 0x1000
- Return v2
+ v3:BasicObject = GetConstantPath 0x1000
+ Return v3
"#]]);
}
@@ -5235,8 +5364,8 @@ mod opt_tests {
assert_optimized_method_hir("test", expect![[r#"
fn test:
bb0(v0:BasicObject):
- v2:BasicObject = GetConstantPath 0x1000
- Return v2
+ v3:BasicObject = GetConstantPath 0x1000
+ Return v3
"#]]);
}
@@ -5251,8 +5380,8 @@ mod opt_tests {
bb0(v0:BasicObject):
PatchPoint SingleRactorMode
PatchPoint StableConstantNames(0x1000, Kernel)
- v6:BasicObject[VALUE(0x1008)] = Const Value(VALUE(0x1008))
- Return v6
+ v7:BasicObject[VALUE(0x1008)] = Const Value(VALUE(0x1008))
+ Return v7
"#]]);
}
@@ -5273,8 +5402,8 @@ mod opt_tests {
bb0(v0:BasicObject):
PatchPoint SingleRactorMode
PatchPoint StableConstantNames(0x1000, Foo::Bar::C)
- v6:BasicObject[VALUE(0x1008)] = Const Value(VALUE(0x1008))
- Return v6
+ v7:BasicObject[VALUE(0x1008)] = Const Value(VALUE(0x1008))
+ Return v7
"#]]);
}
@@ -5290,14 +5419,14 @@ mod opt_tests {
bb0(v0:BasicObject):
PatchPoint SingleRactorMode
PatchPoint StableConstantNames(0x1000, C)
- v19:BasicObject[VALUE(0x1008)] = Const Value(VALUE(0x1008))
- v3:NilClassExact = Const Value(nil)
- Jump bb1(v0, v3, v19)
- bb1(v5:BasicObject, v6:NilClassExact, v7:BasicObject[VALUE(0x1008)]):
- v10:BasicObject = SendWithoutBlock v7, :new
- Jump bb2(v5, v10, v6)
- bb2(v12:BasicObject, v13:BasicObject, v14:NilClassExact):
- Return v13
+ v20:BasicObject[VALUE(0x1008)] = Const Value(VALUE(0x1008))
+ v4:NilClassExact = Const Value(nil)
+ Jump bb1(v0, v4, v20)
+ bb1(v6:BasicObject, v7:NilClassExact, v8:BasicObject[VALUE(0x1008)]):
+ v11:BasicObject = SendWithoutBlock v8, :new
+ Jump bb2(v6, v11, v7)
+ bb2(v13:BasicObject, v14:BasicObject, v15:NilClassExact):
+ Return v14
"#]]);
}
@@ -5317,15 +5446,15 @@ mod opt_tests {
bb0(v0:BasicObject):
PatchPoint SingleRactorMode
PatchPoint StableConstantNames(0x1000, C)
- v21:BasicObject[VALUE(0x1008)] = Const Value(VALUE(0x1008))
- v3:NilClassExact = Const Value(nil)
- v4:Fixnum[1] = Const Value(1)
- Jump bb1(v0, v3, v21, v4)
- bb1(v6:BasicObject, v7:NilClassExact, v8:BasicObject[VALUE(0x1008)], v9:Fixnum[1]):
- v12:BasicObject = SendWithoutBlock v8, :new, v9
- Jump bb2(v6, v12, v7)
- bb2(v14:BasicObject, v15:BasicObject, v16:NilClassExact):
- Return v15
+ v22:BasicObject[VALUE(0x1008)] = Const Value(VALUE(0x1008))
+ v4:NilClassExact = Const Value(nil)
+ v5:Fixnum[1] = Const Value(1)
+ Jump bb1(v0, v4, v22, v5)
+ bb1(v7:BasicObject, v8:NilClassExact, v9:BasicObject[VALUE(0x1008)], v10:Fixnum[1]):
+ v13:BasicObject = SendWithoutBlock v9, :new, v10
+ Jump bb2(v7, v13, v8)
+ bb2(v15:BasicObject, v16:BasicObject, v17:NilClassExact):
+ Return v16
"#]]);
}
@@ -5385,4 +5514,228 @@ mod opt_tests {
Return v2
"#]]);
}
+
+ #[test]
+ fn test_elide_freeze_with_frozen_hash() {
+ eval("
+ def test = {}.freeze
+ ");
+ assert_optimized_method_hir("test", expect![[r#"
+ fn test:
+ bb0(v0:BasicObject):
+ v3:HashExact[VALUE(0x1000)] = Const Value(VALUE(0x1000))
+ PatchPoint BOPRedefined(HASH_REDEFINED_OP_FLAG, BOP_FREEZE)
+ Return v3
+ "#]]);
+ }
+
+ #[test]
+ fn test_elide_freeze_with_refrozen_hash() {
+ eval("
+ def test = {}.freeze.freeze
+ ");
+ assert_optimized_method_hir("test", expect![[r#"
+ fn test:
+ bb0(v0:BasicObject):
+ v3:HashExact[VALUE(0x1000)] = Const Value(VALUE(0x1000))
+ PatchPoint BOPRedefined(HASH_REDEFINED_OP_FLAG, BOP_FREEZE)
+ PatchPoint BOPRedefined(HASH_REDEFINED_OP_FLAG, BOP_FREEZE)
+ Return v3
+ "#]]);
+ }
+
+ #[test]
+ fn test_no_elide_freeze_with_unfrozen_hash() {
+ eval("
+ def test = {}.dup.freeze
+ ");
+ assert_optimized_method_hir("test", expect![[r#"
+ fn test:
+ bb0(v0:BasicObject):
+ v3:HashExact = NewHash
+ v5:BasicObject = SendWithoutBlock v3, :dup
+ v7:BasicObject = SendWithoutBlock v5, :freeze
+ Return v7
+ "#]]);
+ }
+
+ #[test]
+ fn test_no_elide_freeze_hash_with_args() {
+ eval("
+ def test = {}.freeze(nil)
+ ");
+ assert_optimized_method_hir("test", expect![[r#"
+ fn test:
+ bb0(v0:BasicObject):
+ v3:HashExact = NewHash
+ v4:NilClassExact = Const Value(nil)
+ v6:BasicObject = SendWithoutBlock v3, :freeze, v4
+ Return v6
+ "#]]);
+ }
+
+ #[test]
+ fn test_elide_freeze_with_frozen_ary() {
+ eval("
+ def test = [].freeze
+ ");
+ assert_optimized_method_hir("test", expect![[r#"
+ fn test:
+ bb0(v0:BasicObject):
+ v3:ArrayExact[VALUE(0x1000)] = Const Value(VALUE(0x1000))
+ PatchPoint BOPRedefined(ARRAY_REDEFINED_OP_FLAG, BOP_FREEZE)
+ Return v3
+ "#]]);
+ }
+
+ #[test]
+ fn test_elide_freeze_with_refrozen_ary() {
+ eval("
+ def test = [].freeze.freeze
+ ");
+ assert_optimized_method_hir("test", expect![[r#"
+ fn test:
+ bb0(v0:BasicObject):
+ v3:ArrayExact[VALUE(0x1000)] = Const Value(VALUE(0x1000))
+ PatchPoint BOPRedefined(ARRAY_REDEFINED_OP_FLAG, BOP_FREEZE)
+ PatchPoint BOPRedefined(ARRAY_REDEFINED_OP_FLAG, BOP_FREEZE)
+ Return v3
+ "#]]);
+ }
+
+ #[test]
+ fn test_no_elide_freeze_with_unfrozen_ary() {
+ eval("
+ def test = [].dup.freeze
+ ");
+ assert_optimized_method_hir("test", expect![[r#"
+ fn test:
+ bb0(v0:BasicObject):
+ v3:ArrayExact = NewArray
+ v5:BasicObject = SendWithoutBlock v3, :dup
+ v7:BasicObject = SendWithoutBlock v5, :freeze
+ Return v7
+ "#]]);
+ }
+
+ #[test]
+ fn test_no_elide_freeze_ary_with_args() {
+ eval("
+ def test = [].freeze(nil)
+ ");
+ assert_optimized_method_hir("test", expect![[r#"
+ fn test:
+ bb0(v0:BasicObject):
+ v3:ArrayExact = NewArray
+ v4:NilClassExact = Const Value(nil)
+ v6:BasicObject = SendWithoutBlock v3, :freeze, v4
+ Return v6
+ "#]]);
+ }
+
+ #[test]
+ fn test_elide_freeze_with_frozen_str() {
+ eval("
+ def test = ''.freeze
+ ");
+ assert_optimized_method_hir("test", expect![[r#"
+ fn test:
+ bb0(v0:BasicObject):
+ v3:StringExact[VALUE(0x1000)] = Const Value(VALUE(0x1000))
+ PatchPoint BOPRedefined(STRING_REDEFINED_OP_FLAG, BOP_FREEZE)
+ Return v3
+ "#]]);
+ }
+
+ #[test]
+ fn test_elide_freeze_with_refrozen_str() {
+ eval("
+ def test = ''.freeze.freeze
+ ");
+ assert_optimized_method_hir("test", expect![[r#"
+ fn test:
+ bb0(v0:BasicObject):
+ v3:StringExact[VALUE(0x1000)] = Const Value(VALUE(0x1000))
+ PatchPoint BOPRedefined(STRING_REDEFINED_OP_FLAG, BOP_FREEZE)
+ PatchPoint BOPRedefined(STRING_REDEFINED_OP_FLAG, BOP_FREEZE)
+ Return v3
+ "#]]);
+ }
+
+ #[test]
+ fn test_no_elide_freeze_with_unfrozen_str() {
+ eval("
+ def test = ''.dup.freeze
+ ");
+ assert_optimized_method_hir("test", expect![[r#"
+ fn test:
+ bb0(v0:BasicObject):
+ v2:StringExact[VALUE(0x1000)] = Const Value(VALUE(0x1000))
+ v3:StringExact = StringCopy v2
+ v5:BasicObject = SendWithoutBlock v3, :dup
+ v7:BasicObject = SendWithoutBlock v5, :freeze
+ Return v7
+ "#]]);
+ }
+
+ #[test]
+ fn test_no_elide_freeze_str_with_args() {
+ eval("
+ def test = ''.freeze(nil)
+ ");
+ assert_optimized_method_hir("test", expect![[r#"
+ fn test:
+ bb0(v0:BasicObject):
+ v2:StringExact[VALUE(0x1000)] = Const Value(VALUE(0x1000))
+ v3:StringExact = StringCopy v2
+ v4:NilClassExact = Const Value(nil)
+ v6:BasicObject = SendWithoutBlock v3, :freeze, v4
+ Return v6
+ "#]]);
+ }
+
+ #[test]
+ fn test_elide_uminus_with_frozen_str() {
+ eval("
+ def test = -''
+ ");
+ assert_optimized_method_hir("test", expect![[r#"
+ fn test:
+ bb0(v0:BasicObject):
+ v3:StringExact[VALUE(0x1000)] = Const Value(VALUE(0x1000))
+ PatchPoint BOPRedefined(STRING_REDEFINED_OP_FLAG, BOP_UMINUS)
+ Return v3
+ "#]]);
+ }
+
+ #[test]
+ fn test_elide_uminus_with_refrozen_str() {
+ eval("
+ def test = -''.freeze
+ ");
+ assert_optimized_method_hir("test", expect![[r#"
+ fn test:
+ bb0(v0:BasicObject):
+ v3:StringExact[VALUE(0x1000)] = Const Value(VALUE(0x1000))
+ PatchPoint BOPRedefined(STRING_REDEFINED_OP_FLAG, BOP_FREEZE)
+ PatchPoint BOPRedefined(STRING_REDEFINED_OP_FLAG, BOP_UMINUS)
+ Return v3
+ "#]]);
+ }
+
+ #[test]
+ fn test_no_elide_uminus_with_unfrozen_str() {
+ eval("
+ def test = -''.dup
+ ");
+ assert_optimized_method_hir("test", expect![[r#"
+ fn test:
+ bb0(v0:BasicObject):
+ v2:StringExact[VALUE(0x1000)] = Const Value(VALUE(0x1000))
+ v3:StringExact = StringCopy v2
+ v5:BasicObject = SendWithoutBlock v3, :dup
+ v7:BasicObject = SendWithoutBlock v5, :-@
+ Return v7
+ "#]]);
+ }
}