summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Zhu <[email protected]>2024-03-12 13:50:50 -0400
committerPeter Zhu <[email protected]>2024-03-13 09:55:52 -0400
commit6ad347a1055902abfd5a7f5233dd8d18e1f1360b (patch)
tree9d906ec788ca0e4a1bb731b4b72300f236d27271
parentd1eaa97ec3cdbe38605379fc87a55987d6802dc7 (diff)
Don't directly read the SIZE_POOL_COUNT in shapes
This removes the assumption about SIZE_POOL_COUNT for shapes.
-rw-r--r--gc.c66
-rw-r--r--internal/gc.h2
-rw-r--r--object.c5
-rw-r--r--shape.c7
4 files changed, 48 insertions, 32 deletions
diff --git a/gc.c b/gc.c
index 9bf741025e..5b10fc7b95 100644
--- a/gc.c
+++ b/gc.c
@@ -2665,6 +2665,44 @@ rb_gc_size_allocatable_p(size_t size)
return size <= size_pool_slot_size(SIZE_POOL_COUNT - 1);
}
+static size_t size_pool_sizes[SIZE_POOL_COUNT + 1] = { 0 };
+
+size_t *
+rb_gc_size_pool_sizes(void)
+{
+ if (size_pool_sizes[0] == 0) {
+ for (unsigned char i = 0; i < SIZE_POOL_COUNT; i++) {
+ size_pool_sizes[i] = rb_size_pool_slot_size(i);
+ }
+ }
+
+ return size_pool_sizes;
+}
+
+size_t
+rb_gc_size_pool_id_for_size(size_t size)
+{
+ size += RVALUE_OVERHEAD;
+
+ size_t slot_count = CEILDIV(size, BASE_SLOT_SIZE);
+
+ /* size_pool_idx is ceil(log2(slot_count)) */
+ size_t size_pool_idx = 64 - nlz_int64(slot_count - 1);
+
+ if (size_pool_idx >= SIZE_POOL_COUNT) {
+ rb_bug("rb_gc_size_pool_id_for_size: allocation size too large "
+ "(size=%"PRIuSIZE"u, size_pool_idx=%"PRIuSIZE"u)", size, size_pool_idx);
+ }
+
+#if RGENGC_CHECK_MODE
+ rb_objspace_t *objspace = &rb_objspace;
+ GC_ASSERT(size <= (size_t)size_pools[size_pool_idx].slot_size);
+ if (size_pool_idx > 0) GC_ASSERT(size > (size_t)size_pools[size_pool_idx - 1].slot_size);
+#endif
+
+ return size_pool_idx;
+}
+
static inline VALUE
ractor_cache_allocate_slot(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache,
size_t size_pool_idx)
@@ -2754,30 +2792,6 @@ newobj_fill(VALUE obj, VALUE v1, VALUE v2, VALUE v3)
return obj;
}
-static inline size_t
-size_pool_idx_for_size(size_t size)
-{
- size += RVALUE_OVERHEAD;
-
- size_t slot_count = CEILDIV(size, BASE_SLOT_SIZE);
-
- /* size_pool_idx is ceil(log2(slot_count)) */
- size_t size_pool_idx = 64 - nlz_int64(slot_count - 1);
-
- if (size_pool_idx >= SIZE_POOL_COUNT) {
- rb_bug("size_pool_idx_for_size: allocation size too large "
- "(size=%"PRIuSIZE"u, size_pool_idx=%"PRIuSIZE"u)", size, size_pool_idx);
- }
-
-#if RGENGC_CHECK_MODE
- rb_objspace_t *objspace = &rb_objspace;
- GC_ASSERT(size <= (size_t)size_pools[size_pool_idx].slot_size);
- if (size_pool_idx > 0) GC_ASSERT(size > (size_t)size_pools[size_pool_idx - 1].slot_size);
-#endif
-
- return size_pool_idx;
-}
-
static VALUE
newobj_alloc(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t size_pool_idx, bool vm_locked)
{
@@ -2902,7 +2916,7 @@ newobj_of(rb_ractor_t *cr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v
}
}
- size_t size_pool_idx = size_pool_idx_for_size(alloc_size);
+ size_t size_pool_idx = rb_gc_size_pool_id_for_size(alloc_size);
if (SHAPE_IN_BASIC_FLAGS || (flags & RUBY_T_MASK) == T_OBJECT) {
flags |= (VALUE)size_pool_idx << SHAPE_FLAG_SHIFT;
@@ -8086,7 +8100,7 @@ gc_compact_destination_pool(rb_objspace_t *objspace, rb_size_pool_t *src_pool, V
}
if (rb_gc_size_allocatable_p(obj_size)){
- idx = size_pool_idx_for_size(obj_size);
+ idx = rb_gc_size_pool_id_for_size(obj_size);
}
return &size_pools[idx];
}
diff --git a/internal/gc.h b/internal/gc.h
index 9470ceeccc..9435f26ffd 100644
--- a/internal/gc.h
+++ b/internal/gc.h
@@ -223,6 +223,8 @@ static inline void ruby_sized_xfree_inlined(void *ptr, size_t size);
void rb_gc_ractor_newobj_cache_clear(rb_ractor_newobj_cache_t *newobj_cache);
size_t rb_gc_obj_slot_size(VALUE obj);
bool rb_gc_size_allocatable_p(size_t size);
+size_t *rb_gc_size_pool_sizes(void);
+size_t rb_gc_size_pool_id_for_size(size_t size);
int rb_objspace_garbage_object_p(VALUE obj);
bool rb_gc_is_ptr_to_obj(const void *ptr);
diff --git a/object.c b/object.c
index 075deef974..1156b69767 100644
--- a/object.c
+++ b/object.c
@@ -134,9 +134,8 @@ rb_class_allocate_instance(VALUE klass)
RUBY_ASSERT(rb_shape_get_shape(obj)->type == SHAPE_ROOT);
- // Set the shape to the specific T_OBJECT shape which is always
- // SIZE_POOL_COUNT away from the root shape.
- ROBJECT_SET_SHAPE_ID(obj, ROBJECT_SHAPE_ID(obj) + SIZE_POOL_COUNT);
+ // Set the shape to the specific T_OBJECT shape.
+ ROBJECT_SET_SHAPE_ID(obj, SIZE_POOL_COUNT + rb_gc_size_pool_id_for_size(size));
#if RUBY_DEBUG
RUBY_ASSERT(!rb_shape_obj_too_complex(obj));
diff --git a/shape.c b/shape.c
index 06d0f2135d..5854e4f94e 100644
--- a/shape.c
+++ b/shape.c
@@ -1266,12 +1266,13 @@ Init_default_shapes(void)
}
// Make shapes for T_OBJECT
- for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+ size_t *sizes = rb_gc_size_pool_sizes();
+ for (int i = 0; sizes[i] > 0; i++) {
rb_shape_t * shape = rb_shape_get_shape_by_id(i);
bool dont_care;
- rb_shape_t * t_object_shape =
+ rb_shape_t *t_object_shape =
get_next_shape_internal(shape, id_t_object, SHAPE_T_OBJECT, &dont_care, true);
- t_object_shape->capacity = (uint32_t)((rb_size_pool_slot_size(i) - offsetof(struct RObject, as.ary)) / sizeof(VALUE));
+ t_object_shape->capacity = (uint32_t)((sizes[i] - offsetof(struct RObject, as.ary)) / sizeof(VALUE));
t_object_shape->edges = rb_id_table_create(0);
t_object_shape->ancestor_index = LEAF;
RUBY_ASSERT(rb_shape_id(t_object_shape) == (shape_id_t)(i + SIZE_POOL_COUNT));