summaryrefslogtreecommitdiff
path: root/imemo.c
diff options
context:
space:
mode:
authorJean Boussier <[email protected]>2025-05-22 14:01:46 +0200
committerJean Boussier <[email protected]>2025-06-12 07:58:16 +0200
commit3abdd4241fd5231a5711ce1b087d660c667ef30d (patch)
treeddcdd184ca6720bac671cf296a5b7474a22477f5 /imemo.c
parent166ff187bd2a84fddd7a633bdbdbcd4ae393c91e (diff)
Turn `rb_classext_t.fields` into a T_IMEMO/class_fields
This behave almost exactly as a T_OBJECT, the layout is entirely compatible. This aims to solve two problems. First, it solves the problem of namspaced classes having a single `shape_id`. Now each namespaced classext has an object that can hold the namespace specific shape. Second, it open the door to later make class instance variable writes atomics, hence be able to read class variables without locking the VM. In the future, in multi-ractor mode, we can do the write on a copy of the `fields_obj` and then atomically swap it. Considerations: - Right now the `RClass` shape_id is always synchronized, but with namespace we should likely mark classes that have multiple namespace with a specific shape flag.
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/13411
Diffstat (limited to 'imemo.c')
-rw-r--r--imemo.c106
1 files changed, 104 insertions, 2 deletions
diff --git a/imemo.c b/imemo.c
index 2245434e21..6477be9d78 100644
--- a/imemo.c
+++ b/imemo.c
@@ -3,6 +3,7 @@
#include "id_table.h"
#include "internal.h"
#include "internal/imemo.h"
+#include "internal/st.h"
#include "vm_callinfo.h"
size_t rb_iseq_memsize(const rb_iseq_t *iseq);
@@ -29,10 +30,10 @@ rb_imemo_name(enum imemo_type type)
IMEMO_NAME(svar);
IMEMO_NAME(throw_data);
IMEMO_NAME(tmpbuf);
+ IMEMO_NAME(class_fields);
#undef IMEMO_NAME
- default:
- rb_bug("unreachable");
}
+ rb_bug("unreachable");
}
/* =========================================================================
@@ -109,6 +110,62 @@ rb_imemo_tmpbuf_parser_heap(void *buf, rb_imemo_tmpbuf_t *old_heap, size_t cnt)
return tmpbuf;
}
+static VALUE
+imemo_class_fields_new(VALUE klass, size_t capa)
+{
+ size_t embedded_size = offsetof(struct rb_class_fields, as.embed) + capa * sizeof(VALUE);
+ if (rb_gc_size_allocatable_p(embedded_size)) {
+ VALUE fields = rb_imemo_new(imemo_class_fields, klass, embedded_size);
+ RUBY_ASSERT(IMEMO_TYPE_P(fields, imemo_class_fields));
+ return fields;
+ }
+ else {
+ VALUE fields = rb_imemo_new(imemo_class_fields, klass, sizeof(struct rb_class_fields));
+ FL_SET_RAW(fields, OBJ_FIELD_EXTERNAL);
+ IMEMO_OBJ_FIELDS(fields)->as.external.ptr = ALLOC_N(VALUE, capa);
+ return fields;
+ }
+}
+
+VALUE
+rb_imemo_class_fields_new(VALUE klass, size_t capa)
+{
+ return imemo_class_fields_new(rb_singleton_class(klass), capa);
+}
+
+static VALUE
+imemo_class_fields_new_complex(VALUE klass, size_t capa)
+{
+ VALUE fields = imemo_class_fields_new(klass, sizeof(struct rb_class_fields));
+ IMEMO_OBJ_FIELDS(fields)->as.complex.table = st_init_numtable_with_size(capa);
+ return fields;
+}
+
+VALUE
+rb_imemo_class_fields_new_complex(VALUE klass, size_t capa)
+{
+ return imemo_class_fields_new_complex(rb_singleton_class(klass), capa);
+}
+
+VALUE
+rb_imemo_class_fields_clone(VALUE fields_obj)
+{
+ shape_id_t shape_id = RBASIC_SHAPE_ID(fields_obj);
+ VALUE clone;
+
+ if (rb_shape_too_complex_p(shape_id)) {
+ clone = rb_imemo_class_fields_new_complex(CLASS_OF(fields_obj), 0);
+ st_table *src_table = rb_imemo_class_fields_complex_tbl(fields_obj);
+ st_replace(rb_imemo_class_fields_complex_tbl(clone), src_table);
+ }
+ else {
+ clone = imemo_class_fields_new(CLASS_OF(fields_obj), RSHAPE_CAPACITY(shape_id));
+ MEMCPY(rb_imemo_class_fields_ptr(clone), rb_imemo_class_fields_ptr(fields_obj), VALUE, RSHAPE_LEN(shape_id));
+ }
+
+ return clone;
+}
+
/* =========================================================================
* memsize
* ========================================================================= */
@@ -156,6 +213,14 @@ rb_imemo_memsize(VALUE obj)
size += ((rb_imemo_tmpbuf_t *)obj)->cnt * sizeof(VALUE);
break;
+ case imemo_class_fields:
+ if (rb_shape_obj_too_complex_p(obj)) {
+ size += st_memsize(IMEMO_OBJ_FIELDS(obj)->as.complex.table);
+ }
+ else if (FL_TEST_RAW(obj, OBJ_FIELD_EXTERNAL)) {
+ size += RSHAPE_CAPACITY(RBASIC_SHAPE_ID(obj)) * sizeof(VALUE);
+ }
+ break;
default:
rb_bug("unreachable");
}
@@ -420,6 +485,27 @@ rb_imemo_mark_and_move(VALUE obj, bool reference_updating)
break;
}
+ case imemo_class_fields: {
+ rb_gc_mark_and_move((VALUE *)&RBASIC(obj)->klass);
+
+ if (rb_shape_obj_too_complex_p(obj)) {
+ st_table *tbl = rb_imemo_class_fields_complex_tbl(obj);
+ if (reference_updating) {
+ rb_gc_ref_update_table_values_only(tbl);
+ }
+ else {
+ rb_mark_tbl_no_pin(tbl);
+ }
+ }
+ else {
+ VALUE *fields = rb_imemo_class_fields_ptr(obj);
+ attr_index_t len = RSHAPE_LEN(RBASIC_SHAPE_ID(obj));
+ for (attr_index_t i = 0; i < len; i++) {
+ rb_gc_mark_and_move(&fields[i]);
+ }
+ }
+ break;
+ }
default:
rb_bug("unreachable");
}
@@ -513,6 +599,17 @@ rb_cc_tbl_free(struct rb_id_table *cc_tbl, VALUE klass)
rb_id_table_free(cc_tbl);
}
+static inline void
+imemo_class_fields_free(struct rb_class_fields *fields)
+{
+ if (rb_shape_obj_too_complex_p((VALUE)fields)) {
+ st_free_table(fields->as.complex.table);
+ }
+ else if (FL_TEST_RAW((VALUE)fields, OBJ_FIELD_EXTERNAL)) {
+ xfree(fields->as.external.ptr);
+ }
+}
+
void
rb_imemo_free(VALUE obj)
{
@@ -576,6 +673,7 @@ rb_imemo_free(VALUE obj)
break;
case imemo_svar:
RB_DEBUG_COUNTER_INC(obj_imemo_svar);
+
break;
case imemo_throw_data:
RB_DEBUG_COUNTER_INC(obj_imemo_throw_data);
@@ -586,6 +684,10 @@ rb_imemo_free(VALUE obj)
RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf);
break;
+ case imemo_class_fields:
+ imemo_class_fields_free(IMEMO_OBJ_FIELDS(obj));
+ RB_DEBUG_COUNTER_INC(obj_imemo_class_fields);
+ break;
default:
rb_bug("unreachable");
}