diff options
Diffstat (limited to 'shape.c')
-rw-r--r-- | shape.c | 180 |
1 files changed, 107 insertions, 73 deletions
@@ -33,9 +33,7 @@ #define MAX_SHAPE_ID (SHAPE_BUFFER_SIZE - 1) #define ANCESTOR_SEARCH_MAX_DEPTH 2 -static ID id_frozen; -static ID id_t_object; -ID ruby_internal_object_id; // extern +static ID id_object_id; #define LEAF 0 #define BLACK 0x0 @@ -48,8 +46,8 @@ redblack_left(redblack_node_t *node) return LEAF; } else { - RUBY_ASSERT(node->l < GET_SHAPE_TREE()->cache_size); - redblack_node_t *left = &GET_SHAPE_TREE()->shape_cache[node->l - 1]; + RUBY_ASSERT(node->l < rb_shape_tree.cache_size); + redblack_node_t *left = &rb_shape_tree.shape_cache[node->l - 1]; return left; } } @@ -61,8 +59,8 @@ redblack_right(redblack_node_t *node) return LEAF; } else { - RUBY_ASSERT(node->r < GET_SHAPE_TREE()->cache_size); - redblack_node_t *right = &GET_SHAPE_TREE()->shape_cache[node->r - 1]; + RUBY_ASSERT(node->r < rb_shape_tree.cache_size); + redblack_node_t *right = &rb_shape_tree.shape_cache[node->r - 1]; return right; } } @@ -120,7 +118,7 @@ redblack_id_for(redblack_node_t *node) return 0; } else { - redblack_node_t *redblack_nodes = GET_SHAPE_TREE()->shape_cache; + redblack_node_t *redblack_nodes = rb_shape_tree.shape_cache; redblack_id_t id = (redblack_id_t)(node - redblack_nodes); return id + 1; } @@ -129,7 +127,7 @@ redblack_id_for(redblack_node_t *node) static redblack_node_t * redblack_new(char color, ID key, rb_shape_t *value, redblack_node_t *left, redblack_node_t *right) { - if (GET_SHAPE_TREE()->cache_size + 1 >= REDBLACK_CACHE_SIZE) { + if (rb_shape_tree.cache_size + 1 >= REDBLACK_CACHE_SIZE) { // We're out of cache, just quit return LEAF; } @@ -137,8 +135,8 @@ redblack_new(char color, ID key, rb_shape_t *value, redblack_node_t *left, redbl RUBY_ASSERT(left == LEAF || left->key < key); RUBY_ASSERT(right == LEAF || right->key > key); - redblack_node_t *redblack_nodes = GET_SHAPE_TREE()->shape_cache; - redblack_node_t *node = &redblack_nodes[(GET_SHAPE_TREE()->cache_size)++]; + redblack_node_t *redblack_nodes = rb_shape_tree.shape_cache; + redblack_node_t *node = &redblack_nodes[(rb_shape_tree.cache_size)++]; node->key = key; node->value = (rb_shape_t *)((uintptr_t)value | color); node->l = redblack_id_for(left); @@ -288,20 +286,20 @@ redblack_insert(redblack_node_t *tree, ID key, rb_shape_t *value) } #endif -rb_shape_tree_t *rb_shape_tree_ptr = NULL; +rb_shape_tree_t rb_shape_tree = { 0 }; static VALUE shape_tree_obj = Qfalse; rb_shape_t * rb_shape_get_root_shape(void) { - return GET_SHAPE_TREE()->root_shape; + return rb_shape_tree.root_shape; } static void shape_tree_mark(void *data) { rb_shape_t *cursor = rb_shape_get_root_shape(); - rb_shape_t *end = RSHAPE(GET_SHAPE_TREE()->next_shape_id - 1); + rb_shape_t *end = RSHAPE(rb_shape_tree.next_shape_id - 1); while (cursor < end) { if (cursor->edges && !SINGLE_CHILD_P(cursor->edges)) { rb_gc_mark_movable(cursor->edges); @@ -314,7 +312,7 @@ static void shape_tree_compact(void *data) { rb_shape_t *cursor = rb_shape_get_root_shape(); - rb_shape_t *end = RSHAPE(GET_SHAPE_TREE()->next_shape_id - 1); + rb_shape_t *end = RSHAPE(rb_shape_tree.next_shape_id - 1); while (cursor < end) { if (cursor->edges && !SINGLE_CHILD_P(cursor->edges)) { cursor->edges = rb_gc_location(cursor->edges); @@ -326,7 +324,7 @@ shape_tree_compact(void *data) static size_t shape_tree_memsize(const void *data) { - return GET_SHAPE_TREE()->cache_size * sizeof(redblack_node_t); + return rb_shape_tree.cache_size * sizeof(redblack_node_t); } static const rb_data_type_t shape_tree_type = { @@ -349,14 +347,14 @@ static inline shape_id_t raw_shape_id(rb_shape_t *shape) { RUBY_ASSERT(shape); - return (shape_id_t)(shape - GET_SHAPE_TREE()->shape_list); + return (shape_id_t)(shape - rb_shape_tree.shape_list); } static inline shape_id_t shape_id(rb_shape_t *shape, shape_id_t previous_shape_id) { RUBY_ASSERT(shape); - shape_id_t raw_id = (shape_id_t)(shape - GET_SHAPE_TREE()->shape_list); + shape_id_t raw_id = (shape_id_t)(shape - rb_shape_tree.shape_list); return raw_id | (previous_shape_id & SHAPE_ID_FLAGS_MASK); } @@ -373,22 +371,13 @@ rb_shape_each_shape_id(each_shape_callback callback, void *data) { rb_shape_t *start = rb_shape_get_root_shape(); rb_shape_t *cursor = start; - rb_shape_t *end = RSHAPE(GET_SHAPE_TREE()->next_shape_id); + rb_shape_t *end = RSHAPE(rb_shape_tree.next_shape_id); while (cursor < end) { callback((shape_id_t)(cursor - start), data); cursor += 1; } } -RUBY_FUNC_EXPORTED rb_shape_t * -rb_shape_lookup(shape_id_t shape_id) -{ - uint32_t offset = (shape_id & SHAPE_ID_OFFSET_MASK); - RUBY_ASSERT(offset != INVALID_SHAPE_ID); - - return &GET_SHAPE_TREE()->shape_list[offset]; -} - RUBY_FUNC_EXPORTED shape_id_t rb_obj_shape_id(VALUE obj) { @@ -396,6 +385,13 @@ rb_obj_shape_id(VALUE obj) return SPECIAL_CONST_SHAPE_ID; } + if (BUILTIN_TYPE(obj) == T_CLASS || BUILTIN_TYPE(obj) == T_MODULE) { + VALUE fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj); + if (fields_obj) { + return RBASIC_SHAPE_ID(fields_obj); + } + return ROOT_SHAPE_ID; + } return RBASIC_SHAPE_ID(obj); } @@ -416,14 +412,14 @@ rb_shape_depth(shape_id_t shape_id) static rb_shape_t * shape_alloc(void) { - shape_id_t shape_id = (shape_id_t)RUBY_ATOMIC_FETCH_ADD(GET_SHAPE_TREE()->next_shape_id, 1); + shape_id_t shape_id = (shape_id_t)RUBY_ATOMIC_FETCH_ADD(rb_shape_tree.next_shape_id, 1); if (shape_id == (MAX_SHAPE_ID + 1)) { // TODO: Make an OutOfShapesError ?? rb_bug("Out of shapes"); } - return &GET_SHAPE_TREE()->shape_list[shape_id]; + return &rb_shape_tree.shape_list[shape_id]; } static rb_shape_t * @@ -487,7 +483,7 @@ redblack_cache_ancestors(rb_shape_t *shape) static attr_index_t shape_grow_capa(attr_index_t current_capa) { - const attr_index_t *capacities = GET_SHAPE_TREE()->capacities; + const attr_index_t *capacities = rb_shape_tree.capacities; // First try to use the next size that will be embeddable in a larger object slot. attr_index_t capa; @@ -566,7 +562,7 @@ retry: if (!res) { // If we're not allowed to create a new variation, of if we're out of shapes // we return TOO_COMPLEX_SHAPE. - if (!new_variations_allowed || GET_SHAPE_TREE()->next_shape_id > MAX_SHAPE_ID) { + if (!new_variations_allowed || rb_shape_tree.next_shape_id > MAX_SHAPE_ID) { res = NULL; } else { @@ -642,7 +638,7 @@ get_next_shape_internal(rb_shape_t *shape, ID id, enum shape_type shape_type, bo if (!res) { // If we're not allowed to create a new variation, of if we're out of shapes // we return TOO_COMPLEX_SHAPE. - if (!new_variations_allowed || GET_SHAPE_TREE()->next_shape_id > MAX_SHAPE_ID) { + if (!new_variations_allowed || rb_shape_tree.next_shape_id > MAX_SHAPE_ID) { res = NULL; } else { @@ -716,7 +712,7 @@ shape_transition_object_id(shape_id_t original_shape_id) RUBY_ASSERT(!rb_shape_has_object_id(original_shape_id)); bool dont_care; - rb_shape_t *shape = get_next_shape_internal(RSHAPE(original_shape_id), ruby_internal_object_id, SHAPE_OBJ_ID, &dont_care, true); + rb_shape_t *shape = get_next_shape_internal(RSHAPE(original_shape_id), id_object_id, SHAPE_OBJ_ID, &dont_care, true); if (!shape) { shape = RSHAPE(ROOT_SHAPE_WITH_OBJ_ID); } @@ -881,14 +877,11 @@ shape_get_next(rb_shape_t *shape, VALUE obj, ID id, bool emit_warnings) #endif VALUE klass; - switch (BUILTIN_TYPE(obj)) { - case T_CLASS: - case T_MODULE: - klass = rb_singleton_class(obj); - break; - default: + if (IMEMO_TYPE_P(obj, imemo_class_fields)) { // HACK + klass = CLASS_OF(obj); + } + else { klass = rb_obj_class(obj); - break; } bool allow_new_shape = RCLASS_VARIATION_COUNT(klass) < SHAPE_MAX_VARIATIONS; @@ -1151,7 +1144,7 @@ rb_shape_copy_complex_ivars(VALUE dest, VALUE obj, shape_id_t src_shape_id, st_t // obj is TOO_COMPLEX so we can copy its iv_hash st_table *table = st_copy(fields_table); if (rb_shape_has_object_id(src_shape_id)) { - st_data_t id = (st_data_t)ruby_internal_object_id; + st_data_t id = (st_data_t)id_object_id; st_delete(table, &id, NULL); } rb_obj_init_too_complex(dest, table); @@ -1184,6 +1177,31 @@ rb_shape_memsize(shape_id_t shape_id) return memsize; } +bool +rb_shape_foreach_field(shape_id_t initial_shape_id, rb_shape_foreach_transition_callback func, void *data) +{ + RUBY_ASSERT(!rb_shape_too_complex_p(initial_shape_id)); + + rb_shape_t *shape = RSHAPE(initial_shape_id); + if (shape->type == SHAPE_ROOT) { + return true; + } + + shape_id_t parent_id = shape_id(RSHAPE(shape->parent_id), initial_shape_id); + if (rb_shape_foreach_field(parent_id, func, data)) { + switch (func(shape_id(shape, initial_shape_id), data)) { + case ST_STOP: + return false; + case ST_CHECK: + case ST_CONTINUE: + break; + default: + rb_bug("unreachable"); + } + } + return true; +} + #if RUBY_DEBUG bool rb_shape_verify_consistency(VALUE obj, shape_id_t shape_id) @@ -1216,13 +1234,30 @@ rb_shape_verify_consistency(VALUE obj, shape_id_t shape_id) } } + // Make sure SHAPE_ID_HAS_IVAR_MASK is valid. + if (rb_shape_too_complex_p(shape_id)) { + RUBY_ASSERT(shape_id & SHAPE_ID_HAS_IVAR_MASK); + } + else { + attr_index_t ivar_count = RSHAPE_LEN(shape_id); + if (has_object_id) { + ivar_count--; + } + if (ivar_count) { + RUBY_ASSERT(shape_id & SHAPE_ID_HAS_IVAR_MASK); + } + else { + RUBY_ASSERT(!(shape_id & SHAPE_ID_HAS_IVAR_MASK)); + } + } + uint8_t flags_heap_index = rb_shape_heap_index(shape_id); if (RB_TYPE_P(obj, T_OBJECT)) { - size_t shape_id_slot_size = GET_SHAPE_TREE()->capacities[flags_heap_index - 1] * sizeof(VALUE) + sizeof(struct RBasic); + size_t shape_id_slot_size = rb_shape_tree.capacities[flags_heap_index - 1] * sizeof(VALUE) + sizeof(struct RBasic); size_t actual_slot_size = rb_gc_obj_slot_size(obj); if (shape_id_slot_size != actual_slot_size) { - rb_bug("shape_id heap_index flags mismatch: shape_id_slot_size=%lu, gc_slot_size=%lu\n", shape_id_slot_size, actual_slot_size); + rb_bug("shape_id heap_index flags mismatch: shape_id_slot_size=%zu, gc_slot_size=%zu\n", shape_id_slot_size, actual_slot_size); } } else { @@ -1368,7 +1403,7 @@ rb_shape_root_shape(VALUE self) static VALUE rb_shape_shapes_available(VALUE self) { - return INT2NUM(MAX_SHAPE_ID - (GET_SHAPE_TREE()->next_shape_id - 1)); + return INT2NUM(MAX_SHAPE_ID - (rb_shape_tree.next_shape_id - 1)); } static VALUE @@ -1376,7 +1411,7 @@ rb_shape_exhaust(int argc, VALUE *argv, VALUE self) { rb_check_arity(argc, 0, 1); int offset = argc == 1 ? NUM2INT(argv[0]) : 0; - GET_SHAPE_TREE()->next_shape_id = MAX_SHAPE_ID - offset + 1; + rb_shape_tree.next_shape_id = MAX_SHAPE_ID - offset + 1; return Qnil; } @@ -1432,7 +1467,7 @@ static VALUE rb_shape_find_by_id(VALUE mod, VALUE id) { shape_id_t shape_id = NUM2UINT(id); - if (shape_id >= GET_SHAPE_TREE()->next_shape_id) { + if (shape_id >= rb_shape_tree.next_shape_id) { rb_raise(rb_eArgError, "Shape ID %d is out of bounds\n", shape_id); } return shape_id_t_to_rb_cShape(shape_id); @@ -1446,8 +1481,6 @@ rb_shape_find_by_id(VALUE mod, VALUE id) void Init_default_shapes(void) { - rb_shape_tree_ptr = xcalloc(1, sizeof(rb_shape_tree_t)); - size_t *heap_sizes = rb_gc_heap_sizes(); size_t heaps_count = 0; while (heap_sizes[heaps_count]) { @@ -1459,45 +1492,43 @@ Init_default_shapes(void) for (index = 0; index < heaps_count; index++) { capacities[index] = (heap_sizes[index] - sizeof(struct RBasic)) / sizeof(VALUE); } - GET_SHAPE_TREE()->capacities = capacities; + rb_shape_tree.capacities = capacities; #ifdef HAVE_MMAP size_t shape_list_mmap_size = rb_size_mul_or_raise(SHAPE_BUFFER_SIZE, sizeof(rb_shape_t), rb_eRuntimeError); - rb_shape_tree_ptr->shape_list = (rb_shape_t *)mmap(NULL, shape_list_mmap_size, + rb_shape_tree.shape_list = (rb_shape_t *)mmap(NULL, shape_list_mmap_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - if (GET_SHAPE_TREE()->shape_list == MAP_FAILED) { - GET_SHAPE_TREE()->shape_list = 0; + if (rb_shape_tree.shape_list == MAP_FAILED) { + rb_shape_tree.shape_list = 0; } else { - ruby_annotate_mmap(rb_shape_tree_ptr->shape_list, shape_list_mmap_size, "Ruby:Init_default_shapes:shape_list"); + ruby_annotate_mmap(rb_shape_tree.shape_list, shape_list_mmap_size, "Ruby:Init_default_shapes:shape_list"); } #else - GET_SHAPE_TREE()->shape_list = xcalloc(SHAPE_BUFFER_SIZE, sizeof(rb_shape_t)); + rb_shape_tree.shape_list = xcalloc(SHAPE_BUFFER_SIZE, sizeof(rb_shape_t)); #endif - if (!GET_SHAPE_TREE()->shape_list) { + if (!rb_shape_tree.shape_list) { rb_memerror(); } - id_frozen = rb_make_internal_id(); - id_t_object = rb_make_internal_id(); - ruby_internal_object_id = rb_make_internal_id(); + id_object_id = rb_make_internal_id(); #ifdef HAVE_MMAP size_t shape_cache_mmap_size = rb_size_mul_or_raise(REDBLACK_CACHE_SIZE, sizeof(redblack_node_t), rb_eRuntimeError); - rb_shape_tree_ptr->shape_cache = (redblack_node_t *)mmap(NULL, shape_cache_mmap_size, + rb_shape_tree.shape_cache = (redblack_node_t *)mmap(NULL, shape_cache_mmap_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - rb_shape_tree_ptr->cache_size = 0; + rb_shape_tree.cache_size = 0; // If mmap fails, then give up on the redblack tree cache. // We set the cache size such that the redblack node allocators think // the cache is full. - if (GET_SHAPE_TREE()->shape_cache == MAP_FAILED) { - GET_SHAPE_TREE()->shape_cache = 0; - GET_SHAPE_TREE()->cache_size = REDBLACK_CACHE_SIZE; + if (rb_shape_tree.shape_cache == MAP_FAILED) { + rb_shape_tree.shape_cache = 0; + rb_shape_tree.cache_size = REDBLACK_CACHE_SIZE; } else { - ruby_annotate_mmap(rb_shape_tree_ptr->shape_cache, shape_cache_mmap_size, "Ruby:Init_default_shapes:shape_cache"); + ruby_annotate_mmap(rb_shape_tree.shape_cache, shape_cache_mmap_size, "Ruby:Init_default_shapes:shape_cache"); } #endif @@ -1508,21 +1539,24 @@ Init_default_shapes(void) rb_shape_t *root = rb_shape_alloc_with_parent_id(0, INVALID_SHAPE_ID); root->capacity = 0; root->type = SHAPE_ROOT; - GET_SHAPE_TREE()->root_shape = root; - RUBY_ASSERT(raw_shape_id(GET_SHAPE_TREE()->root_shape) == ROOT_SHAPE_ID); + rb_shape_tree.root_shape = root; + RUBY_ASSERT(raw_shape_id(rb_shape_tree.root_shape) == ROOT_SHAPE_ID); + RUBY_ASSERT(!(raw_shape_id(rb_shape_tree.root_shape) & SHAPE_ID_HAS_IVAR_MASK)); - rb_shape_t *root_with_obj_id = rb_shape_alloc_with_parent_id(0, ROOT_SHAPE_ID); - root_with_obj_id->type = SHAPE_OBJ_ID; - root_with_obj_id->edge_name = ruby_internal_object_id; - root_with_obj_id->next_field_index++; + bool dontcare; + rb_shape_t *root_with_obj_id = get_next_shape_internal(root, id_object_id, SHAPE_OBJ_ID, &dontcare, true); RUBY_ASSERT(raw_shape_id(root_with_obj_id) == ROOT_SHAPE_WITH_OBJ_ID); + RUBY_ASSERT(root_with_obj_id->type == SHAPE_OBJ_ID); + RUBY_ASSERT(root_with_obj_id->edge_name == id_object_id); + RUBY_ASSERT(root_with_obj_id->next_field_index == 1); + RUBY_ASSERT(!(raw_shape_id(root_with_obj_id) & SHAPE_ID_HAS_IVAR_MASK)); + (void)root_with_obj_id; } void rb_shape_free_all(void) { - xfree((void *)GET_SHAPE_TREE()->capacities); - xfree(GET_SHAPE_TREE()); + xfree((void *)rb_shape_tree.capacities); } void |