3 * File name: ir/ir/irgwalk.c
5 * Author: Boris Boesler
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1999-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
14 * traverse an ir graph
15 * - execute the pre function before recursion
16 * - execute the post function after recursion
26 # include "irnode_t.h"
27 # include "irgraph_t.h" /* visited flag */
30 # include "typewalk.h"
35 /* walk over an interprocedural graph (callgraph). Visits only graphs in irg_set. */
36 static void irg_walk_cg(ir_node * node, int visited, eset * irg_set,
37 irg_walk_func *pre, irg_walk_func *post, void * env) {
39 ir_graph * rem = current_ir_graph;
42 assert(node && node->kind == k_ir_node);
43 if (get_irn_visited(node) >= visited) return;
45 set_irn_visited(node, visited);
47 pred = skip_Proj(node);
48 if (intern_get_irn_op(pred) == op_CallBegin
49 || intern_get_irn_op(pred) == op_EndReg
50 || intern_get_irn_op(pred) == op_EndExcept) {
51 current_ir_graph = get_irn_irg(pred);
54 if (pre) pre(node, env);
56 if (is_no_Block(node))
57 irg_walk_cg(get_nodes_block(node), visited, irg_set, pre, post, env);
59 if (intern_get_irn_op(node) == op_Block) { /* block */
60 for (i = intern_get_irn_arity(node) - 1; i >= 0; --i) {
61 ir_node * exec = intern_get_irn_n(node, i);
62 ir_node * pred = skip_Proj(exec);
63 if ((intern_get_irn_op(pred) != op_CallBegin
64 && intern_get_irn_op(pred) != op_EndReg
65 && intern_get_irn_op(pred) != op_EndExcept)
66 || eset_contains(irg_set, get_irn_irg(pred))) {
67 irg_walk_cg(exec, visited, irg_set, pre, post, env);
70 } else if (intern_get_irn_op(node) == op_Filter) {
71 for (i = intern_get_irn_arity(node) - 1; i >= 0; --i) {
72 ir_node * pred = intern_get_irn_n(node, i);
73 if (intern_get_irn_op(pred) == op_Unknown || intern_get_irn_op(pred) == op_Bad) {
74 irg_walk_cg(pred, visited, irg_set, pre, post, env);
77 exec = skip_Proj(get_Block_cfgpred(get_nodes_block(node), i));
78 assert(intern_get_irn_op(exec) == op_CallBegin
79 || intern_get_irn_op(exec) == op_EndReg
80 || intern_get_irn_op(exec) == op_EndExcept);
81 if (eset_contains(irg_set, get_irn_irg(exec))) {
82 current_ir_graph = get_irn_irg(exec);
83 irg_walk_cg(pred, visited, irg_set, pre, post, env);
84 current_ir_graph = rem;
89 for (i = intern_get_irn_arity(node) - 1; i >= 0; --i) {
90 irg_walk_cg(intern_get_irn_n(node, i), visited, irg_set, pre, post, env);
94 if (post) post(node, env);
96 current_ir_graph = rem;
100 /* Insert all ir_graphs in irg_set, that are (transitive) reachable. */
101 static void collect_irgs(ir_node * node, eset * irg_set) {
102 if (intern_get_irn_op(node) == op_Call) {
104 for (i = get_Call_n_callees(node) - 1; i >= 0; --i) {
105 entity * ent = get_Call_callee(node, i);
106 ir_graph * irg = ent ? get_entity_irg(ent) : NULL;
107 if (irg && !eset_contains(irg_set, irg)) {
108 eset_insert(irg_set, irg);
109 irg_walk_graph(irg, (irg_walk_func *) collect_irgs, NULL, irg_set);
116 irg_walk_2(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void * env)
119 assert(node && node->kind==k_ir_node);
121 if (get_irn_visited(node) < get_irg_visited(current_ir_graph)) {
122 set_irn_visited(node, get_irg_visited(current_ir_graph));
124 if (pre) pre(node, env);
126 if (is_no_Block(node))
127 irg_walk_2(get_nodes_block(node), pre, post, env);
128 for (i = intern_get_irn_arity(node) - 1; i >= 0; --i)
129 irg_walk_2(intern_get_irn_n(node, i), pre, post, env);
131 if (post) post(node, env);
134 if (node->visited < current_ir_graph->visited) {
135 set_irn_visited(node, current_ir_graph->visited);
137 if (pre) pre(node, env);
139 if (node->op != op_Block)
140 irg_walk_2(intern_get_irn_n(node, -1), pre, post, env);
141 for (i = intern_get_irn_arity(node) - 1; i >= 0; --i)
142 irg_walk_2(intern_get_irn_n(node, i), pre, post, env);
144 if (post) post(node, env);
150 void irg_walk(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void *env)
152 assert(node && node->kind==k_ir_node);
153 if (interprocedural_view) {
154 eset * irg_set = eset_create();
157 interprocedural_view = false;
158 eset_insert(irg_set, current_ir_graph);
159 irg_walk(node, (irg_walk_func *) collect_irgs, NULL, irg_set);
160 interprocedural_view = true;
161 visited = get_max_irg_visited() + 1;
162 for (irg = eset_first(irg_set); irg; irg = eset_next(irg_set)) {
163 set_irg_visited(irg, visited);
165 irg_walk_cg(node, visited, irg_set, pre, post, env);
166 eset_destroy(irg_set);
168 inc_irg_visited(current_ir_graph);
169 irg_walk_2(node, pre, post, env);
175 void irg_walk_graph(ir_graph *irg, irg_walk_func *pre, irg_walk_func *post, void *env) {
176 ir_graph * rem = current_ir_graph;
177 current_ir_graph = irg;
178 irg_walk(get_irg_end(irg), pre, post, env);
179 current_ir_graph = rem;
182 /* Executes irg_walk(end, pre, post, env) for all irgraphs in irprog.
183 Sets current_ir_graph properly for each walk. Conserves current
185 void all_irg_walk(irg_walk_func *pre, irg_walk_func *post, void *env) {
189 rem = current_ir_graph;
191 for (i = 0; i < get_irp_n_irgs(); i++) {
192 irg = get_irp_irg(i);
193 current_ir_graph = irg;
194 irg_walk(get_irg_end(irg), pre, post, env);
196 current_ir_graph = rem;
199 /***************************************************************************/
201 /* Returns current_ir_graph and sets it to the irg of predecessor index
203 static INLINE ir_graph *
204 switch_irg (ir_node *n, int index) {
205 ir_graph *old_current = current_ir_graph;
207 if (interprocedural_view) {
208 /* Only Filter and Block nodes can have predecessors in other graphs. */
209 if (intern_get_irn_op(n) == op_Filter)
210 n = get_nodes_block(n);
211 if (intern_get_irn_op(n) == op_Block) {
212 ir_node *cfop = skip_Proj(get_Block_cfgpred(n, index));
213 if (is_ip_cfop(cfop)) {
214 current_ir_graph = get_irn_irg(cfop);
223 cg_walk_2(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void * env)
226 ir_graph *rem = NULL;
229 if (get_irn_visited(node) < get_irg_visited(current_ir_graph)) {
230 set_irn_visited(node, get_irg_visited(current_ir_graph));
232 if (pre) pre(node, env);
234 if (is_no_Block(node))
235 cg_walk_2(get_nodes_block(node), pre, post, env);
236 for (i = intern_get_irn_arity(node) - 1; i >= 0; --i) {
237 rem = switch_irg(node, i); /* @@@ AS: Is this wrong? We do have to
238 switch to the irg of the predecessor, don't we? */
239 cg_walk_2(intern_get_irn_n(node, i), pre, post, env);
240 current_ir_graph = rem;
243 if (post) post(node, env);
249 /* Walks all irgs in interprocedural view. Visits each node only once. */
250 void cg_walk(irg_walk_func *pre, irg_walk_func *post, void *env) {
252 ir_graph *rem = current_ir_graph;
253 int rem_view = interprocedural_view;
255 interprocedural_view = true;
257 inc_max_irg_visited();
258 /* Fix all irg_visited flags */
259 for (i = 0; i < get_irp_n_irgs(); i++)
260 set_irg_visited(get_irp_irg(i), get_max_irg_visited());
262 /* Walk starting at unreachable procedures. Only these
263 * have End blocks visible in interprocedural view. */
264 for (i = 0; i < get_irp_n_irgs(); i++) {
266 current_ir_graph = get_irp_irg(i);
268 sb = get_irg_start_block(current_ir_graph);
270 if ((get_Block_n_cfgpreds(sb) > 1) ||
271 (get_nodes_block(get_Block_cfgpred(sb, 0)) != sb)) continue;
273 cg_walk_2(get_irg_end(current_ir_graph), pre, post, env);
276 /* Check whether we walked all procedures: there could be procedures
277 with cyclic calls but no call from the outside. */
278 for (i = 0; i < get_irp_n_irgs(); i++) {
280 current_ir_graph = get_irp_irg(i);
282 /* Test start block: if inner procedure end and end block are not
283 * visible and therefore not marked. */
284 sb = get_irg_start_block(current_ir_graph);
285 if (get_irn_visited(sb) < get_irg_visited(current_ir_graph)) {
286 cg_walk_2(sb, pre, post, env);
290 /* Walk all endless loops in inner procedures.
291 * We recognize an inner procedure if the End node is not visited. */
292 for (i = 0; i < get_irp_n_irgs(); i++) {
294 current_ir_graph = get_irp_irg(i);
295 e = get_irg_end(current_ir_graph);
296 if (get_irn_visited(e) < get_irg_visited(current_ir_graph)) {
298 /* Don't visit the End node. */
299 for (j = 0; j < get_End_n_keepalives(e); j++)
300 cg_walk_2(get_End_keepalive(e, j), pre, post, env);
304 interprocedural_view = rem_view;
305 current_ir_graph = rem;
309 /***************************************************************************/
311 /* Walks back from n until it finds a real cf op. */
312 static ir_node *get_cf_op(ir_node *n) {
318 if (!(is_cfop(pred) || is_fragile_op(pred) ||
319 (intern_get_irn_op(pred) == op_Bad)))
325 static void irg_block_walk_2(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void *env)
329 if(get_Block_block_visited(node) < get_irg_block_visited(current_ir_graph)) {
330 set_Block_block_visited(node, get_irg_block_visited(current_ir_graph));
332 if(pre) pre(node, env);
334 for(i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
335 /* find the corresponding predecessor block. */
336 ir_node *pred = get_cf_op(get_Block_cfgpred(node, i));
337 pred = get_nodes_block(pred);
338 if(intern_get_irn_opcode(pred) == iro_Block) {
340 irg_block_walk_2(pred, pre, post, env);
343 assert(get_irn_opcode(pred) == iro_Bad);
347 if(post) post(node, env);
353 /* walks only over Block nodes in the graph. Has it's own visited
354 flag, so that it can be interleaved with the other walker. */
355 void irg_block_walk(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void *env)
357 ir_node *block, *pred;
361 assert(!interprocedural_view); /* interprocedural_view not implemented, because it
362 * interleaves with irg_walk */
363 inc_irg_block_visited(current_ir_graph);
364 if (is_no_Block(node)) block = get_nodes_block(node); else block = node;
365 assert(get_irn_opcode(block) == iro_Block);
366 irg_block_walk_2(block, pre, post, env);
367 /* keepalive: the endless loops ... */
368 if (intern_get_irn_op(node) == op_End) {
369 int arity = intern_get_irn_arity(node);
370 for (i = 0; i < arity; i++) {
371 pred = intern_get_irn_n(node, i);
372 if (intern_get_irn_op(pred) == op_Block)
373 irg_block_walk_2(pred, pre, post, env);
381 void irg_block_walk_graph(ir_graph *irg, irg_walk_func *pre,
382 irg_walk_func *post, void *env) {
383 ir_graph * rem = current_ir_graph;
384 current_ir_graph = irg;
385 irg_block_walk(get_irg_end(irg), pre, post, env);
386 current_ir_graph = rem;
389 /********************************************************************/
391 typedef struct walk_env {
397 /* Walk to all constant expressions in this entity. */
398 static void walk_entity(entity *ent, void *env)
400 walk_env *my_env = (walk_env *)env;
402 if (get_entity_variability(ent) != variability_uninitialized) {
403 if (is_atomic_entity(ent)) {
404 irg_walk(get_atomic_ent_value(ent), my_env->pre, my_env->post, my_env->env);
407 int i, n = get_compound_ent_n_values(ent);
409 for (i = 0; i < n; i++)
410 irg_walk(get_compound_ent_value(ent, i), my_env->pre, my_env->post, my_env->env);
415 /* Walks over all code in const_code_irg. */
416 void walk_const_code(irg_walk_func *pre, irg_walk_func *post, void *env) {
420 ir_graph *rem = current_ir_graph;
421 current_ir_graph = get_const_code_irg();
422 inc_irg_visited(current_ir_graph);
428 /* Walk all types that can contain constant entities. */
429 walk_types_entities(get_glob_type(), &walk_entity, &my_env);
430 for (i = 0; i < get_irp_n_types(); i++)
431 walk_types_entities(get_irp_type(i), &walk_entity, &my_env);
432 for (i = 0; i < get_irp_n_irgs(); i++)
433 walk_types_entities(get_irg_frame_type(get_irp_irg(i)), &walk_entity, &my_env);
435 /* Walk constant array bounds. */
436 for (i = 0; i < get_irp_n_types(); i++) {
437 type *tp = get_irp_type(i);
438 if (is_array_type(tp)) {
439 for (j = 0; j < get_array_n_dimensions(tp); j++) {
441 n = get_array_lower_bound(tp, j);
442 if (n) irg_walk(n, pre, post, env);
443 n = get_array_upper_bound(tp, j);
444 if (n) irg_walk(n, pre, post, env);
449 current_ir_graph = rem;
453 /********************************************************************/
454 /** Walking support for interprocedural analysis **/
456 /** @@@ Don't use, not operational yet, doesn't grok recursions!! **/
457 /** @@@ Header for irgwalk.h, here until it works. **/
459 /** Interprocedural walking should not walk all predecessors of **/
460 /** all nodes. When leaving a procedure the walker should only **/
461 /** follow the edge corresponding to the most recent entry of the **/
462 /** procedure. The following functions use an internal stack to **/
463 /** remember the current call site of a procedure. **/
464 /** They also set current_ir_graph correctly. **/
466 /** Usage example: **/
468 /** void init_ip_walk (); **/
469 /** work_on_graph(some_end_node); **/
470 /** void finish_ip_walk(); **/
472 /** work_on_graph(ir_node *n) { **/
473 /** for (i = 0; i < get_irn_arity(n); i++) { **/
474 /** if (...) continue; **/
475 /** ir_node *m = get_irn_ip_pred(n, i); **/
476 /** if !m continue; **/
477 /** work_on_graph(m); **/
478 /** return_recur(n, i); **/
481 /********************************************************************/
483 /* Call for i in {0|-1 ... get_irn_arity(n)}.
484 If n is a conventional node returns the same node as get_irn_n(n, i).
485 If the predecessors of n are in the callee of the procedure n belongs
486 to, returns get_irn_n(n, i) if this node is in the callee on the top
487 of the stack, else returns NULL.
488 If the predecessors of n are in a procedure called by the procedure n
489 belongs to pushes the caller on the caller stack in the callee.
490 Sets current_ir_graph to the graph the node returned is in. */
491 ir_node *get_irn_ip_pred(ir_node *n, int pos);
493 /* If get_irn_ip_pred() returned a node (not NULL) this must be
494 called to clear up the stacks.
495 Sets current_ir_graph to the graph n is in. */
496 void return_recur(ir_node *n, int pos);
499 /********************************************************************/
500 /** Walking support for interprocedural analysis **/
501 /********************************************************************/
503 #define MIN_STACK_SIZE 40
505 typedef struct callsite_stack {
510 /* Access the stack in the irg **************************************/
513 set_irg_callsite_stack(ir_graph *g, callsite_stack *s) {
517 static INLINE callsite_stack *
518 get_irg_callsite_stack(ir_graph *g) {
519 return (callsite_stack *) get_irg_link(g);
522 /* A stack of callsites *********************************************/
524 /* @@@ eventually change the implementation so the new_ only sets the field
525 to NULL, and the stack is only allocated if used. Saves Memory! */
526 static INLINE callsite_stack *
527 new_callsite_stack(ir_graph *g) {
528 callsite_stack *res = (callsite_stack *)malloc(sizeof(callsite_stack));
530 res->s = NEW_ARR_F (ir_node *, MIN_STACK_SIZE);
531 set_irg_callsite_stack(g, res);
536 free_callsite_stack(ir_graph *g) {
537 callsite_stack *s = get_irg_callsite_stack(g);
543 push_callsite(ir_graph *callee, ir_node *callsite) {
544 callsite_stack *s = get_irg_callsite_stack(callee);
545 if (s->tos == ARR_LEN(s->s)) {
546 int nlen = ARR_LEN (s->s) * 2;
547 ARR_RESIZE (ir_node *, s->s, nlen);
549 s->s[s->tos] = callsite;
553 static INLINE ir_node *
554 get_top_of_callsite_stack(ir_graph *callee) {
555 callsite_stack *s = get_irg_callsite_stack(callee);
556 return (s->s[s->tos-1]);
560 ir_node * pop_callsite(ir_graph *callee) {
561 callsite_stack *s = get_irg_callsite_stack(callee);
563 return (s->s[s->tos]);
567 /* Initialization routines ******************************************/
569 void init_ip_walk (void) {
571 for (i = 0; i < get_irp_n_irgs(); i++)
572 new_callsite_stack(get_irp_irg(i));
575 void finish_ip_walk(void) {
577 for (i = 0; i < get_irp_n_irgs(); i++)
578 free_callsite_stack(get_irp_irg(i));
579 set_irg_link(get_irp_irg(i), NULL);
582 /* walker routines **************************************************/
584 /* cf_pred is End* */
586 enter_procedure(ir_node *block, ir_node *cf_pred, int pos) {
588 ir_graph *irg = get_irn_irg(cf_pred);
590 assert(interprocedural_view);
592 interprocedural_view = 0;
593 callbegin = skip_Proj(intern_get_irn_n(block, 0));
594 assert(intern_get_irn_op(callbegin) == op_CallBegin);
595 interprocedural_view = 1;
597 push_callsite(irg, callbegin);
598 current_ir_graph = irg;
601 /* cf_pred is CallBegin */
603 leave_procedure(ir_node *block, ir_node *cf_pred, int pos) {
604 ir_node *tos = get_top_of_callsite_stack(current_ir_graph);
606 assert(get_irn_op(cf_pred) == op_CallBegin);
608 if (tos == cf_pred) {
609 /* We entered this procedure by the call pred pos refers to. */
610 pop_callsite(current_ir_graph);
611 current_ir_graph = get_CallBegin_irg(cf_pred);
619 ir_node *get_irn_ip_pred(ir_node *n, int pos) {
621 if (interprocedural_view) {
623 /* Find the cf_pred refering to pos. */
626 if (intern_get_irn_opcode(n) == iro_Filter) block = get_nodes_block(n);
627 cf_pred = skip_Proj(intern_get_irn_n(block, pos));
629 /* Check whether we enter or leave a procedure and act according. */
630 if ((intern_get_irn_op(cf_pred) == op_EndReg) ||
631 (intern_get_irn_op(cf_pred) == op_EndExcept))
632 enter_procedure(block, cf_pred, pos);
633 if (intern_get_irn_op(cf_pred) == op_CallBegin)
634 if (!leave_procedure(block, cf_pred, pos)) return NULL;
637 return intern_get_irn_n(n, pos);
641 re_enter_procedure(ir_node *block, ir_node *cf_pred, int pos) {
642 ir_node *callbegin = pop_callsite(current_ir_graph);
643 assert(interprocedural_view);
644 current_ir_graph = get_CallBegin_irg(callbegin);
648 re_leave_procedure(ir_node *block, ir_node *cf_pred, int pos) {
652 assert(get_irn_op(cf_pred) == op_CallBegin);
654 /* Find the irg block is in. */
655 proj = get_Block_cg_cfgpred(block, pos);
656 assert(is_Proj(proj));
657 callee = get_entity_irg(get_Call_callee(get_CallBegin_call(cf_pred),
658 get_Proj_proj(proj)));
659 current_ir_graph = callee;
660 push_callsite(callee, cf_pred);
664 return_recur(ir_node *n, int pos) {
668 if (!interprocedural_view) return;
670 /* Find the cf_pred refering to pos. */
672 if (intern_get_irn_opcode(n) == iro_Filter) block = get_nodes_block(n);
673 cf_pred = skip_Proj(intern_get_irn_n(block, pos));
675 /* Check whether we re_enter or re_leave a procedure and act according. */
676 if ((intern_get_irn_op(cf_pred) == op_EndReg) ||
677 (intern_get_irn_op(cf_pred) == op_EndExcept))
678 re_enter_procedure(block, cf_pred, pos);
679 if (intern_get_irn_op(cf_pred) == op_CallBegin)
680 re_leave_procedure(block, cf_pred, pos);