summaryrefslogtreecommitdiff
path: root/internal/set_table.h
diff options
context:
space:
mode:
authoralpaca-tc <[email protected]>2025-04-06 01:50:08 +0900
committerKoichi Sasada <[email protected]>2025-06-09 12:33:35 +0900
commitc8ddc0a843074811b200673a2019fbe4b50bb890 (patch)
treef3881cd1f408f10abfdd7a8258d88cf18a557717 /internal/set_table.h
parentd0b5f3155406e8243b78e4cedd3a38710c7c323c (diff)
Optimize callcache invalidation for refinements
Fixes [Bug #21201] This change addresses a performance regression where defining methods inside `refine` blocks caused severe slowdowns. The issue was due to `rb_clear_all_refinement_method_cache()` triggering a full object space scan via `rb_objspace_each_objects` to find and invalidate affected callcaches, which is very inefficient. To fix this, I introduce `vm->cc_refinement_table` to track callcaches related to refinements. This allows us to invalidate only the necessary callcaches without scanning the entire heap, resulting in significant performance improvement.
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/13077
Diffstat (limited to 'internal/set_table.h')
-rw-r--r--internal/set_table.h2
1 files changed, 2 insertions, 0 deletions
diff --git a/internal/set_table.h b/internal/set_table.h
index 7c03de2060..def52db039 100644
--- a/internal/set_table.h
+++ b/internal/set_table.h
@@ -37,6 +37,8 @@ size_t rb_set_table_size(const struct set_table *tbl);
set_table *rb_set_init_table_with_size(set_table *tab, const struct st_hash_type *, st_index_t);
#define set_init_numtable rb_set_init_numtable
set_table *rb_set_init_numtable(void);
+#define set_init_numtable_with_size rb_set_init_numtable_with_size
+set_table *rb_set_init_numtable_with_size(st_index_t size);
#define set_delete rb_set_delete
int rb_set_delete(set_table *, st_data_t *); /* returns 0:notfound 1:deleted */
#define set_insert rb_set_insert