1 /**********************************************************************
3 node.c - ruby node tree
6 created at: 09/12/06 21:23:44 JST
8 Copyright (C) 2009 Yusuke Endoh
10 **********************************************************************/
12 #ifdef UNIVERSAL_PARSER
16 #include "rubyparser.h"
17 #include "internal/parse.h"
23 #include "internal/hash.h"
24 #include "internal/variable.h"
25 #include "ruby/ruby.h"
30 #define NODE_BUF_DEFAULT_SIZE (sizeof(struct RNode) * 16)
33 init_node_buffer_elem(node_buffer_elem_t
*nbe
, size_t allocated
, void *xmalloc(size_t))
35 nbe
->allocated
= allocated
;
38 nbe
->nodes
= xmalloc(allocated
/ sizeof(struct RNode
) * sizeof(struct RNode
*)); /* All node requires at least RNode */
42 init_node_buffer_list(node_buffer_list_t
* nb
, node_buffer_elem_t
*head
, void *xmalloc(size_t))
44 init_node_buffer_elem(head
, NODE_BUF_DEFAULT_SIZE
, xmalloc
);
45 nb
->head
= nb
->last
= head
;
46 nb
->head
->next
= NULL
;
49 #ifdef UNIVERSAL_PARSER
50 #define ruby_xmalloc config->malloc
51 #define Qnil config->qnil
54 #ifdef UNIVERSAL_PARSER
55 static node_buffer_t
*
56 rb_node_buffer_new(rb_parser_config_t
*config
)
58 static node_buffer_t
*
59 rb_node_buffer_new(void)
62 const size_t bucket_size
= offsetof(node_buffer_elem_t
, buf
) + NODE_BUF_DEFAULT_SIZE
;
63 const size_t alloc_size
= sizeof(node_buffer_t
) + (bucket_size
* 2);
66 offsetof(node_buffer_elem_t
, buf
) + NODE_BUF_DEFAULT_SIZE
67 > sizeof(node_buffer_t
) + 2 * sizeof(node_buffer_elem_t
));
68 node_buffer_t
*nb
= ruby_xmalloc(alloc_size
);
69 init_node_buffer_list(&nb
->unmarkable
, (node_buffer_elem_t
*)&nb
[1], ruby_xmalloc
);
70 init_node_buffer_list(&nb
->markable
, (node_buffer_elem_t
*)((size_t)nb
->unmarkable
.head
+ bucket_size
), ruby_xmalloc
);
74 #ifdef UNIVERSAL_PARSER
80 #ifdef UNIVERSAL_PARSER
82 #define ruby_xmalloc ast->node_buffer->config->malloc
84 #define xfree ast->node_buffer->config->free
85 #define rb_ident_hash_new ast->node_buffer->config->ident_hash_new
86 #define rb_xmalloc_mul_add ast->node_buffer->config->xmalloc_mul_add
87 #define ruby_xrealloc(var,size) (ast->node_buffer->config->realloc_n((void *)var, 1, size))
88 #define rb_gc_mark ast->node_buffer->config->gc_mark
89 #define rb_gc_location ast->node_buffer->config->gc_location
90 #define rb_gc_mark_movable ast->node_buffer->config->gc_mark_movable
92 #define Qnil ast->node_buffer->config->qnil
93 #define Qtrue ast->node_buffer->config->qtrue
94 #define NIL_P ast->node_buffer->config->nil_p
95 #define rb_hash_aset ast->node_buffer->config->hash_aset
96 #define rb_hash_delete ast->node_buffer->config->hash_delete
97 #define RB_OBJ_WRITE(old, slot, young) ast->node_buffer->config->obj_write((VALUE)(old), (VALUE *)(slot), (VALUE)(young))
100 typedef void node_itr_t(rb_ast_t
*ast
, void *ctx
, NODE
*node
);
101 static void iterate_node_values(rb_ast_t
*ast
, node_buffer_list_t
*nb
, node_itr_t
* func
, void *ctx
);
103 /* Setup NODE structure.
104 * NODE is not an object managed by GC, but it imitates an object
105 * so that it can work with `RB_TYPE_P(obj, T_NODE)`.
106 * This dirty hack is needed because Ripper jumbles NODEs and other type
110 rb_node_init(NODE
*n
, enum node_type type
)
112 RNODE(n
)->flags
= T_NODE
;
113 nd_init_type(RNODE(n
), type
);
114 RNODE(n
)->nd_loc
.beg_pos
.lineno
= 0;
115 RNODE(n
)->nd_loc
.beg_pos
.column
= 0;
116 RNODE(n
)->nd_loc
.end_pos
.lineno
= 0;
117 RNODE(n
)->nd_loc
.end_pos
.column
= 0;
118 RNODE(n
)->node_id
= -1;
122 rb_node_name(int node
)
125 #include "node_name.inc"
131 #ifdef UNIVERSAL_PARSER
133 ruby_node_name(int node
)
135 return rb_node_name(node
);
139 ruby_node_name(int node
)
141 const char *name
= rb_node_name(node
);
143 if (!name
) rb_bug("unknown node: %d", node
);
149 node_buffer_list_free(rb_ast_t
*ast
, node_buffer_list_t
* nb
)
151 node_buffer_elem_t
*nbe
= nb
->head
;
152 while (nbe
!= nb
->last
) {
159 /* The last node_buffer_elem_t is allocated in the node_buffer_t, so we
160 * only need to free the nodes. */
164 struct rb_ast_local_table_link
{
165 struct rb_ast_local_table_link
*next
;
166 // struct rb_ast_id_table {
168 ID ids
[FLEX_ARY_LEN
];
173 free_ast_value(rb_ast_t
*ast
, void *ctx
, NODE
*node
)
175 switch (nd_type(node
)) {
177 xfree(RNODE_ARGS(node
)->nd_ainfo
);
183 rb_node_buffer_free(rb_ast_t
*ast
, node_buffer_t
*nb
)
185 iterate_node_values(ast
, &nb
->unmarkable
, free_ast_value
, NULL
);
186 node_buffer_list_free(ast
, &nb
->unmarkable
);
187 node_buffer_list_free(ast
, &nb
->markable
);
188 struct rb_ast_local_table_link
*local_table
= nb
->local_tables
;
189 while (local_table
) {
190 struct rb_ast_local_table_link
*next_table
= local_table
->next
;
192 local_table
= next_table
;
197 #define buf_add_offset(nbe, offset) ((char *)(nbe->buf) + (offset))
200 ast_newnode_in_bucket(rb_ast_t
*ast
, node_buffer_list_t
*nb
, size_t size
, size_t alignment
)
205 padding
= alignment
- (size_t)buf_add_offset(nb
->head
, nb
->head
->used
) % alignment
;
206 padding
= padding
== alignment
? 0 : padding
;
208 if (nb
->head
->used
+ size
+ padding
> nb
->head
->allocated
) {
209 size_t n
= nb
->head
->allocated
* 2;
210 node_buffer_elem_t
*nbe
;
211 nbe
= rb_xmalloc_mul_add(n
, sizeof(char *), offsetof(node_buffer_elem_t
, buf
));
212 init_node_buffer_elem(nbe
, n
, ruby_xmalloc
);
213 nbe
->next
= nb
->head
;
215 padding
= 0; /* malloc returns aligned address then no need to add padding */
218 ptr
= (NODE
*)buf_add_offset(nb
->head
, nb
->head
->used
+ padding
);
219 nb
->head
->used
+= (size
+ padding
);
220 nb
->head
->nodes
[nb
->head
->len
++] = ptr
;
226 nodetype_markable_p(enum node_type type
)
244 rb_ast_newnode(rb_ast_t
*ast
, enum node_type type
, size_t size
, size_t alignment
)
246 node_buffer_t
*nb
= ast
->node_buffer
;
247 node_buffer_list_t
*bucket
=
248 (nodetype_markable_p(type
) ? &nb
->markable
: &nb
->unmarkable
);
249 return ast_newnode_in_bucket(ast
, bucket
, size
, alignment
);
254 rb_ast_node_type_change(NODE
*n
, enum node_type type
)
256 enum node_type old_type
= nd_type(n
);
257 if (nodetype_markable_p(old_type
) != nodetype_markable_p(type
)) {
258 rb_bug("node type changed: %s -> %s",
259 ruby_node_name(old_type
), ruby_node_name(type
));
265 rb_ast_new_local_table(rb_ast_t
*ast
, int size
)
267 size_t alloc_size
= sizeof(struct rb_ast_local_table_link
) + size
* sizeof(ID
);
268 struct rb_ast_local_table_link
*link
= ruby_xmalloc(alloc_size
);
269 link
->next
= ast
->node_buffer
->local_tables
;
270 ast
->node_buffer
->local_tables
= link
;
273 return (rb_ast_id_table_t
*) &link
->size
;
277 rb_ast_resize_latest_local_table(rb_ast_t
*ast
, int size
)
279 struct rb_ast_local_table_link
*link
= ast
->node_buffer
->local_tables
;
280 size_t alloc_size
= sizeof(struct rb_ast_local_table_link
) + size
* sizeof(ID
);
281 link
= ruby_xrealloc(link
, alloc_size
);
282 ast
->node_buffer
->local_tables
= link
;
285 return (rb_ast_id_table_t
*) &link
->size
;
289 rb_ast_delete_node(rb_ast_t
*ast
, NODE
*n
)
293 /* should we implement freelist? */
296 #ifdef UNIVERSAL_PARSER
298 rb_ast_new(rb_parser_config_t
*config
)
300 node_buffer_t
*nb
= rb_node_buffer_new(config
);
302 return config
->ast_new((VALUE
)nb
);
308 node_buffer_t
*nb
= rb_node_buffer_new();
309 rb_ast_t
*ast
= (rb_ast_t
*)rb_imemo_new(imemo_ast
, 0, 0, 0, (VALUE
)nb
);
315 iterate_buffer_elements(rb_ast_t
*ast
, node_buffer_elem_t
*nbe
, long len
, node_itr_t
*func
, void *ctx
)
318 for (cursor
= 0; cursor
< len
; cursor
++) {
319 func(ast
, ctx
, nbe
->nodes
[cursor
]);
324 iterate_node_values(rb_ast_t
*ast
, node_buffer_list_t
*nb
, node_itr_t
* func
, void *ctx
)
326 node_buffer_elem_t
*nbe
= nb
->head
;
329 iterate_buffer_elements(ast
, nbe
, nbe
->len
, func
, ctx
);
335 mark_ast_value(rb_ast_t
*ast
, void *ctx
, NODE
*node
)
337 #ifdef UNIVERSAL_PARSER
338 bug_report_func rb_bug
= ast
->node_buffer
->config
->bug
;
341 switch (nd_type(node
)) {
350 rb_gc_mark_movable(RNODE_LIT(node
)->nd_lit
);
353 rb_bug("unreachable node %s", ruby_node_name(nd_type(node
)));
358 update_ast_value(rb_ast_t
*ast
, void *ctx
, NODE
*node
)
360 #ifdef UNIVERSAL_PARSER
361 bug_report_func rb_bug
= ast
->node_buffer
->config
->bug
;
364 switch (nd_type(node
)) {
373 RNODE_LIT(node
)->nd_lit
= rb_gc_location(RNODE_LIT(node
)->nd_lit
);
376 rb_bug("unreachable");
381 rb_ast_update_references(rb_ast_t
*ast
)
383 if (ast
->node_buffer
) {
384 node_buffer_t
*nb
= ast
->node_buffer
;
386 iterate_node_values(ast
, &nb
->markable
, update_ast_value
, NULL
);
391 rb_ast_mark(rb_ast_t
*ast
)
393 if (ast
->node_buffer
) {
394 rb_gc_mark(ast
->node_buffer
->mark_hash
);
395 rb_gc_mark(ast
->node_buffer
->tokens
);
396 node_buffer_t
*nb
= ast
->node_buffer
;
397 iterate_node_values(ast
, &nb
->markable
, mark_ast_value
, NULL
);
398 if (ast
->body
.script_lines
) rb_gc_mark(ast
->body
.script_lines
);
403 rb_ast_free(rb_ast_t
*ast
)
405 if (ast
->node_buffer
) {
406 #ifdef UNIVERSAL_PARSER
407 rb_parser_config_t
*config
= ast
->node_buffer
->config
;
410 rb_node_buffer_free(ast
, ast
->node_buffer
);
411 ast
->node_buffer
= 0;
412 #ifdef UNIVERSAL_PARSER
414 if (config
->counter
<= 0) {
415 rb_ruby_parser_config_free(config
);
422 buffer_list_size(node_buffer_list_t
*nb
)
425 node_buffer_elem_t
*nbe
= nb
->head
;
426 while (nbe
!= nb
->last
) {
427 size
+= offsetof(node_buffer_elem_t
, buf
) + nbe
->used
;
434 rb_ast_memsize(const rb_ast_t
*ast
)
437 node_buffer_t
*nb
= ast
->node_buffer
;
440 size
+= sizeof(node_buffer_t
);
441 size
+= buffer_list_size(&nb
->unmarkable
);
442 size
+= buffer_list_size(&nb
->markable
);
448 rb_ast_dispose(rb_ast_t
*ast
)
454 rb_ast_add_mark_object(rb_ast_t
*ast
, VALUE obj
)
456 if (NIL_P(ast
->node_buffer
->mark_hash
)) {
457 RB_OBJ_WRITE(ast
, &ast
->node_buffer
->mark_hash
, rb_ident_hash_new());
459 rb_hash_aset(ast
->node_buffer
->mark_hash
, obj
, Qtrue
);
463 rb_ast_delete_mark_object(rb_ast_t
*ast
, VALUE obj
)
465 if (NIL_P(ast
->node_buffer
->mark_hash
)) return;
466 rb_hash_delete(ast
->node_buffer
->mark_hash
, obj
);
470 rb_ast_tokens(rb_ast_t
*ast
)
472 return ast
->node_buffer
->tokens
;
476 rb_ast_set_tokens(rb_ast_t
*ast
, VALUE tokens
)
478 RB_OBJ_WRITE(ast
, &ast
->node_buffer
->tokens
, tokens
);
482 rb_node_set_type(NODE
*n
, enum node_type t
)
485 rb_ast_node_type_change(n
, t
);
487 return nd_init_type(n
, t
);