3 * File name: ir/ir/irgwalk.c
5 * Author: Boris Boesler
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1999-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
14 * traverse an ir graph
15 * - execute the pre function before recursion
16 * - execute the post function after recursion
26 # include "irnode_t.h"
27 # include "irgraph_t.h" /* visited flag */
30 # include "typewalk.h"
31 # include "firmstat.h"
32 # include "ircgcons.h"
37 /* walk over an interprocedural graph (callgraph). Visits only graphs in irg_set. */
38 static void irg_walk_cg(ir_node * node, int visited, eset * irg_set,
39 irg_walk_func *pre, irg_walk_func *post, void * env) {
41 ir_graph * rem = current_ir_graph;
44 assert(node && node->kind == k_ir_node);
45 if (get_irn_visited(node) >= visited) return;
47 set_irn_visited(node, visited);
49 if (pre) pre(node, env);
51 pred = skip_Proj(node);
52 if (get_irn_op(pred) == op_CallBegin
53 || get_irn_op(pred) == op_EndReg
54 || get_irn_op(pred) == op_EndExcept) {
55 current_ir_graph = get_irn_irg(pred);
58 if (is_no_Block(node)) { /* not block */
59 irg_walk_cg(get_nodes_block(node), visited, irg_set, pre, post, env);
62 if (get_irn_op(node) == op_Block) { /* block */
63 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
64 ir_node * exec = get_irn_n(node, i);
65 ir_node * pred = skip_Proj(exec);
66 if ((get_irn_op(pred) != op_CallBegin
67 && get_irn_op(pred) != op_EndReg
68 && get_irn_op(pred) != op_EndExcept)
69 || eset_contains(irg_set, get_irn_irg(pred))) {
70 irg_walk_cg(exec, visited, irg_set, pre, post, env);
73 } else if (get_irn_op(node) == op_Filter) { /* filter */
74 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
75 ir_node * pred = get_irn_n(node, i);
76 if (get_irn_op(pred) == op_Unknown || get_irn_op(pred) == op_Bad) {
77 irg_walk_cg(pred, visited, irg_set, pre, post, env);
80 exec = skip_Proj(get_Block_cfgpred(get_nodes_block(node), i));
82 if (op_Bad == get_irn_op (exec)) {
86 assert(get_irn_op(exec) == op_CallBegin
87 || get_irn_op(exec) == op_EndReg
88 || get_irn_op(exec) == op_EndExcept);
89 if (eset_contains(irg_set, get_irn_irg(exec))) {
90 current_ir_graph = get_irn_irg(exec);
91 irg_walk_cg(pred, visited, irg_set, pre, post, env);
92 current_ir_graph = rem;
96 } else { /* everything else */
97 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
98 irg_walk_cg(get_irn_n(node, i), visited, irg_set, pre, post, env);
102 if (post) post(node, env);
104 current_ir_graph = rem;
108 /* Insert all ir_graphs in irg_set, that are (transitive) reachable. */
109 static void collect_irgs(ir_node * node, eset * irg_set) {
110 if (get_irn_op(node) == op_Call) {
112 for (i = get_Call_n_callees(node) - 1; i >= 0; --i) {
113 entity * ent = get_Call_callee(node, i);
114 ir_graph * irg = ent ? get_entity_irg(ent) : NULL;
115 if (irg && !eset_contains(irg_set, irg)) {
116 eset_insert(irg_set, irg);
117 irg_walk_graph(irg, (irg_walk_func *) collect_irgs, NULL, irg_set);
124 irg_walk_2_pre(ir_node *node, irg_walk_func *pre, void * env) {
126 set_irn_visited(node, current_ir_graph->visited);
130 if (node->op != op_Block) {
131 ir_node *pred = get_irn_n(node, -1);
132 if (pred->visited < current_ir_graph->visited)
133 irg_walk_2_pre(pred, pre, env);
135 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
136 ir_node *pred = get_irn_n(node, i);
137 if (pred->visited < current_ir_graph->visited)
138 irg_walk_2_pre(pred, pre, env);
143 irg_walk_2_post(ir_node *node, irg_walk_func *post, void * env) {
145 set_irn_visited(node, current_ir_graph->visited);
147 if (node->op != op_Block) {
148 ir_node *pred = get_irn_n(node, -1);
149 if (pred->visited < current_ir_graph->visited)
150 irg_walk_2_post(pred, post, env);
152 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
153 ir_node *pred = get_irn_n(node, i);
154 if (pred->visited < current_ir_graph->visited)
155 irg_walk_2_post(pred, post, env);
162 irg_walk_2_both(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void * env) {
164 set_irn_visited(node, current_ir_graph->visited);
168 if (node->op != op_Block) {
169 ir_node *pred = get_irn_n(node, -1);
170 if (pred->visited < current_ir_graph->visited)
171 irg_walk_2_both(pred, pre, post, env);
173 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
174 ir_node *pred = get_irn_n(node, i);
175 if (pred->visited < current_ir_graph->visited)
176 irg_walk_2_both(pred, pre, post, env);
184 irg_walk_2(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void * env)
186 #if 0 /* safe, old */
188 if (get_irn_visited(node) < get_irg_visited(current_ir_graph)) {
189 set_irn_visited(node, get_irg_visited(current_ir_graph));
191 if (pre) pre(node, env);
193 if (is_no_Block(node))
194 irg_walk_2(get_nodes_block(node), pre, post, env);
195 for (i = get_irn_arity(node) - 1; i >= 0; --i)
196 irg_walk_2(get_irn_n(node, i), pre, post, env);
198 if (post) post(node, env);
202 if (node->visited < current_ir_graph->visited) {
204 set_irn_visited(node, current_ir_graph->visited);
206 if (pre) pre(node, env);
208 if (node->op != op_Block)
209 irg_walk_2(get_irn_n(node, -1), pre, post, env);
210 for (i = get_irn_arity(node) - 1; i >= 0; --i)
211 irg_walk_2(get_irn_n(node, i), pre, post, env);
213 if (post) post(node, env);
215 #else /* even faster */
216 if (node->visited < current_ir_graph->visited) {
217 if (!post) irg_walk_2_pre (node, pre, env);
218 else if (!pre) irg_walk_2_post(node, post, env);
219 else irg_walk_2_both(node, pre, post, env);
226 void irg_walk(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void *env)
228 assert(node && node->kind==k_ir_node);
230 if (interprocedural_view) {
231 eset * irg_set = eset_create();
234 assert(get_irp_ip_view_state() == ip_view_valid);
236 interprocedural_view = false;
237 eset_insert(irg_set, current_ir_graph);
238 irg_walk(node, (irg_walk_func *) collect_irgs, NULL, irg_set);
239 interprocedural_view = true;
240 visited = get_max_irg_visited() + 1;
241 for (irg = eset_first(irg_set); irg; irg = eset_next(irg_set)) {
242 set_irg_visited(irg, visited);
244 irg_walk_cg(node, visited, irg_set, pre, post, env);
245 eset_destroy(irg_set);
247 inc_irg_visited(current_ir_graph);
248 irg_walk_2(node, pre, post, env);
254 void irg_walk_graph(ir_graph *irg, irg_walk_func *pre, irg_walk_func *post, void *env) {
255 ir_graph * rem = current_ir_graph;
257 stat_irg_walk(irg, (void *)pre, (void *)post);
258 current_ir_graph = irg;
259 irg_walk(get_irg_end(irg), pre, post, env);
260 current_ir_graph = rem;
263 /* Executes irg_walk(end, pre, post, env) for all irgraphs in irprog.
264 Sets current_ir_graph properly for each walk. Conserves current
266 void all_irg_walk(irg_walk_func *pre, irg_walk_func *post, void *env) {
270 rem = current_ir_graph;
272 for (i = 0; i < get_irp_n_irgs(); i++) {
273 irg = get_irp_irg(i);
274 current_ir_graph = irg;
275 irg_walk(get_irg_end(irg), pre, post, env);
277 current_ir_graph = rem;
280 /***************************************************************************/
282 /* Returns current_ir_graph and sets it to the irg of predecessor index
284 static INLINE ir_graph *
285 switch_irg (ir_node *n, int index) {
286 ir_graph *old_current = current_ir_graph;
288 if (interprocedural_view) {
289 /* Only Filter and Block nodes can have predecessors in other graphs. */
290 if (get_irn_op(n) == op_Filter)
291 n = get_nodes_block(n);
292 if (get_irn_op(n) == op_Block) {
293 ir_node *cfop = skip_Proj(get_Block_cfgpred(n, index));
294 if (is_ip_cfop(cfop)) {
295 current_ir_graph = get_irn_irg(cfop);
304 cg_walk_2(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void * env)
307 ir_graph *rem = NULL;
310 if (get_irn_visited(node) < get_irg_visited(current_ir_graph)) {
311 set_irn_visited(node, get_irg_visited(current_ir_graph));
313 if (pre) pre(node, env);
315 if (is_no_Block(node))
316 cg_walk_2(get_nodes_block(node), pre, post, env);
317 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
318 rem = switch_irg(node, i); /* @@@ AS: Is this wrong? We do have to
319 switch to the irg of the predecessor, don't we? */
320 cg_walk_2(get_irn_n(node, i), pre, post, env);
321 current_ir_graph = rem;
324 if (post) post(node, env);
330 /* Walks all irgs in interprocedural view. Visits each node only once. */
331 void cg_walk(irg_walk_func *pre, irg_walk_func *post, void *env) {
333 ir_graph *rem = current_ir_graph;
334 int rem_view = interprocedural_view;
336 interprocedural_view = true;
338 inc_max_irg_visited();
339 /* Fix all irg_visited flags */
340 for (i = 0; i < get_irp_n_irgs(); i++)
341 set_irg_visited(get_irp_irg(i), get_max_irg_visited());
343 /* Walk starting at unreachable procedures. Only these
344 * have End blocks visible in interprocedural view. */
345 for (i = 0; i < get_irp_n_irgs(); i++) {
347 current_ir_graph = get_irp_irg(i);
349 sb = get_irg_start_block(current_ir_graph);
351 if ((get_Block_n_cfgpreds(sb) > 1) ||
352 (get_nodes_block(get_Block_cfgpred(sb, 0)) != sb)) continue;
354 cg_walk_2(get_irg_end(current_ir_graph), pre, post, env);
357 /* Check whether we walked all procedures: there could be procedures
358 with cyclic calls but no call from the outside. */
359 for (i = 0; i < get_irp_n_irgs(); i++) {
361 current_ir_graph = get_irp_irg(i);
363 /* Test start block: if inner procedure end and end block are not
364 * visible and therefore not marked. */
365 sb = get_irg_start_block(current_ir_graph);
366 if (get_irn_visited(sb) < get_irg_visited(current_ir_graph)) {
367 cg_walk_2(sb, pre, post, env);
371 /* Walk all endless loops in inner procedures.
372 * We recognize an inner procedure if the End node is not visited. */
373 for (i = 0; i < get_irp_n_irgs(); i++) {
375 current_ir_graph = get_irp_irg(i);
376 e = get_irg_end(current_ir_graph);
377 if (get_irn_visited(e) < get_irg_visited(current_ir_graph)) {
379 /* Don't visit the End node. */
380 for (j = 0; j < get_End_n_keepalives(e); j++)
381 cg_walk_2(get_End_keepalive(e, j), pre, post, env);
385 interprocedural_view = rem_view;
386 current_ir_graph = rem;
390 /***************************************************************************/
392 /* Walks back from n until it finds a real cf op. */
393 static ir_node *get_cf_op(ir_node *n) {
399 if (!(is_cfop(pred) || is_fragile_op(pred) ||
400 (get_irn_op(pred) == op_Bad)))
406 static void irg_block_walk_2(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void *env)
410 if(get_Block_block_visited(node) < get_irg_block_visited(current_ir_graph)) {
411 set_Block_block_visited(node, get_irg_block_visited(current_ir_graph));
413 if(pre) pre(node, env);
415 for(i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
416 /* find the corresponding predecessor block. */
417 ir_node *pred = get_cf_op(get_Block_cfgpred(node, i));
418 pred = get_nodes_block(pred);
419 if(get_irn_opcode(pred) == iro_Block) {
421 irg_block_walk_2(pred, pre, post, env);
424 assert(get_irn_opcode(pred) == iro_Bad);
428 if(post) post(node, env);
434 /* walks only over Block nodes in the graph. Has it's own visited
435 flag, so that it can be interleaved with the other walker. */
436 void irg_block_walk(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void *env)
438 ir_node *block, *pred;
441 stat_irg_block_walk(current_ir_graph, node, (void *)pre, (void *)post);
444 assert(!interprocedural_view); /* interprocedural_view not implemented, because it
445 * interleaves with irg_walk */
446 inc_irg_block_visited(current_ir_graph);
447 if (is_no_Block(node)) block = get_nodes_block(node); else block = node;
448 assert(get_irn_opcode(block) == iro_Block);
449 irg_block_walk_2(block, pre, post, env);
450 /* keepalive: the endless loops ... */
451 if (get_irn_op(node) == op_End) {
452 int arity = get_irn_arity(node);
453 for (i = 0; i < arity; i++) {
454 pred = get_irn_n(node, i);
455 if (get_irn_op(pred) == op_Block)
456 irg_block_walk_2(pred, pre, post, env);
464 void irg_block_walk_graph(ir_graph *irg, irg_walk_func *pre,
465 irg_walk_func *post, void *env) {
466 ir_graph * rem = current_ir_graph;
467 current_ir_graph = irg;
468 irg_block_walk(get_irg_end(irg), pre, post, env);
469 current_ir_graph = rem;
472 /********************************************************************/
474 typedef struct walk_env {
480 /* Walk to all constant expressions in this entity. */
481 static void walk_entity(entity *ent, void *env)
483 walk_env *my_env = (walk_env *)env;
485 if (get_entity_variability(ent) != variability_uninitialized) {
486 if (is_atomic_entity(ent)) {
487 irg_walk(get_atomic_ent_value(ent), my_env->pre, my_env->post, my_env->env);
490 int i, n = get_compound_ent_n_values(ent);
492 for (i = 0; i < n; i++)
493 irg_walk(get_compound_ent_value(ent, i), my_env->pre, my_env->post, my_env->env);
498 /* Walks over all code in const_code_irg. */
499 void walk_const_code(irg_walk_func *pre, irg_walk_func *post, void *env) {
503 ir_graph *rem = current_ir_graph;
504 current_ir_graph = get_const_code_irg();
505 inc_irg_visited(current_ir_graph);
511 /* Walk all types that can contain constant entities. */
512 walk_types_entities(get_glob_type(), &walk_entity, &my_env);
513 for (i = 0; i < get_irp_n_types(); i++)
514 walk_types_entities(get_irp_type(i), &walk_entity, &my_env);
515 for (i = 0; i < get_irp_n_irgs(); i++)
516 walk_types_entities(get_irg_frame_type(get_irp_irg(i)), &walk_entity, &my_env);
518 /* Walk constant array bounds. */
519 for (i = 0; i < get_irp_n_types(); i++) {
520 type *tp = get_irp_type(i);
521 if (is_array_type(tp)) {
522 for (j = 0; j < get_array_n_dimensions(tp); j++) {
524 n = get_array_lower_bound(tp, j);
525 if (n) irg_walk(n, pre, post, env);
526 n = get_array_upper_bound(tp, j);
527 if (n) irg_walk(n, pre, post, env);
532 current_ir_graph = rem;
536 /********************************************************************/
537 /** Walking support for interprocedural analysis **/
539 /** @@@ Don't use, not operational yet, doesn't grok recursions!! **/
540 /** @@@ Header for irgwalk.h, here until it works. **/
542 /** Interprocedural walking should not walk all predecessors of **/
543 /** all nodes. When leaving a procedure the walker should only **/
544 /** follow the edge corresponding to the most recent entry of the **/
545 /** procedure. The following functions use an internal stack to **/
546 /** remember the current call site of a procedure. **/
547 /** They also set current_ir_graph correctly. **/
549 /** Usage example: **/
551 /** void init_ip_walk (); **/
552 /** work_on_graph(some_end_node); **/
553 /** void finish_ip_walk(); **/
555 /** work_on_graph(ir_node *n) { **/
556 /** for (i = 0; i < get_irn_arity(n); i++) { **/
557 /** if (...) continue; **/
558 /** ir_node *m = get_irn_ip_pred(n, i); **/
559 /** if !m continue; **/
560 /** work_on_graph(m); **/
561 /** return_recur(n, i); **/
564 /********************************************************************/
566 /* Call for i in {0|-1 ... get_irn_arity(n)}.
567 If n is a conventional node returns the same node as get_irn_n(n, i).
568 If the predecessors of n are in the callee of the procedure n belongs
569 to, returns get_irn_n(n, i) if this node is in the callee on the top
570 of the stack, else returns NULL.
571 If the predecessors of n are in a procedure called by the procedure n
572 belongs to pushes the caller on the caller stack in the callee.
573 Sets current_ir_graph to the graph the node returned is in. */
574 ir_node *get_irn_ip_pred(ir_node *n, int pos);
576 /* If get_irn_ip_pred() returned a node (not NULL) this must be
577 called to clear up the stacks.
578 Sets current_ir_graph to the graph n is in. */
579 void return_recur(ir_node *n, int pos);
582 /********************************************************************/
583 /** Walking support for interprocedural analysis **/
584 /********************************************************************/
586 #define MIN_STACK_SIZE 40
588 typedef struct callsite_stack {
593 /* Access the stack in the irg **************************************/
596 set_irg_callsite_stack(ir_graph *g, callsite_stack *s) {
600 static INLINE callsite_stack *
601 get_irg_callsite_stack(ir_graph *g) {
602 return (callsite_stack *) get_irg_link(g);
605 /* A stack of callsites *********************************************/
607 /* @@@ eventually change the implementation so the new_ only sets the field
608 to NULL, and the stack is only allocated if used. Saves Memory! */
609 static INLINE callsite_stack *
610 new_callsite_stack(ir_graph *g) {
611 callsite_stack *res = (callsite_stack *)malloc(sizeof(callsite_stack));
613 res->s = NEW_ARR_F (ir_node *, MIN_STACK_SIZE);
614 set_irg_callsite_stack(g, res);
619 free_callsite_stack(ir_graph *g) {
620 callsite_stack *s = get_irg_callsite_stack(g);
626 push_callsite(ir_graph *callee, ir_node *callsite) {
627 callsite_stack *s = get_irg_callsite_stack(callee);
628 if (s->tos == ARR_LEN(s->s)) {
629 int nlen = ARR_LEN (s->s) * 2;
630 ARR_RESIZE (ir_node *, s->s, nlen);
632 s->s[s->tos] = callsite;
636 static INLINE ir_node *
637 get_top_of_callsite_stack(ir_graph *callee) {
638 callsite_stack *s = get_irg_callsite_stack(callee);
639 return (s->s[s->tos-1]);
643 ir_node * pop_callsite(ir_graph *callee) {
644 callsite_stack *s = get_irg_callsite_stack(callee);
646 return (s->s[s->tos]);
650 /* Initialization routines ******************************************/
652 void init_ip_walk (void) {
654 for (i = 0; i < get_irp_n_irgs(); i++)
655 new_callsite_stack(get_irp_irg(i));
658 void finish_ip_walk(void) {
660 for (i = 0; i < get_irp_n_irgs(); i++)
661 free_callsite_stack(get_irp_irg(i));
662 set_irg_link(get_irp_irg(i), NULL);
665 /* walker routines **************************************************/
667 /* cf_pred is End* */
669 enter_procedure(ir_node *block, ir_node *cf_pred, int pos) {
671 ir_graph *irg = get_irn_irg(cf_pred);
673 assert(interprocedural_view);
675 interprocedural_view = 0;
676 callbegin = skip_Proj(get_irn_n(block, 0));
677 assert(get_irn_op(callbegin) == op_CallBegin);
678 interprocedural_view = 1;
680 push_callsite(irg, callbegin);
681 current_ir_graph = irg;
684 /* cf_pred is CallBegin */
686 leave_procedure(ir_node *block, ir_node *cf_pred, int pos) {
687 ir_node *tos = get_top_of_callsite_stack(current_ir_graph);
689 assert(get_irn_op(cf_pred) == op_CallBegin);
691 if (tos == cf_pred) {
692 /* We entered this procedure by the call pred pos refers to. */
693 pop_callsite(current_ir_graph);
694 current_ir_graph = get_CallBegin_irg(cf_pred);
702 ir_node *get_irn_ip_pred(ir_node *n, int pos) {
704 if (interprocedural_view) {
706 /* Find the cf_pred refering to pos. */
709 if (get_irn_opcode(n) == iro_Filter) block = get_nodes_block(n);
710 cf_pred = skip_Proj(get_irn_n(block, pos));
712 /* Check whether we enter or leave a procedure and act according. */
713 if ((get_irn_op(cf_pred) == op_EndReg) ||
714 (get_irn_op(cf_pred) == op_EndExcept))
715 enter_procedure(block, cf_pred, pos);
716 if (get_irn_op(cf_pred) == op_CallBegin)
717 if (!leave_procedure(block, cf_pred, pos)) return NULL;
720 return get_irn_n(n, pos);
724 re_enter_procedure(ir_node *block, ir_node *cf_pred, int pos) {
725 ir_node *callbegin = pop_callsite(current_ir_graph);
726 assert(interprocedural_view);
727 current_ir_graph = get_CallBegin_irg(callbegin);
731 re_leave_procedure(ir_node *block, ir_node *cf_pred, int pos) {
735 assert(get_irn_op(cf_pred) == op_CallBegin);
737 /* Find the irg block is in. */
738 proj = get_Block_cg_cfgpred(block, pos);
739 assert(is_Proj(proj));
740 callee = get_entity_irg(get_Call_callee(get_CallBegin_call(cf_pred),
741 get_Proj_proj(proj)));
742 current_ir_graph = callee;
743 push_callsite(callee, cf_pred);
747 return_recur(ir_node *n, int pos) {
751 if (!interprocedural_view) return;
753 /* Find the cf_pred refering to pos. */
755 if (get_irn_opcode(n) == iro_Filter) block = get_nodes_block(n);
756 cf_pred = skip_Proj(get_irn_n(block, pos));
758 /* Check whether we re_enter or re_leave a procedure and act according. */
759 if ((get_irn_op(cf_pred) == op_EndReg) ||
760 (get_irn_op(cf_pred) == op_EndExcept))
761 re_enter_procedure(block, cf_pred, pos);
762 if (get_irn_op(cf_pred) == op_CallBegin)
763 re_leave_procedure(block, cf_pred, pos);