1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
2 ** All rights reserved.
4 ** Author: Boris Boesler
6 ** traverse an ir graph
7 ** - execute the pre function before recursion
8 ** - execute the post function after recursion
20 # include "irgraph.h" /* visited flag */
27 /* walk over an interprocedural graph (callgraph). Visits only graphs in irg_set. */
28 static void irg_walk_cg(ir_node * node, int visited, eset * irg_set,
29 irg_walk_func *pre, irg_walk_func *post, void * env) {
31 ir_graph * rem = current_ir_graph;
35 if (get_irn_visited(node) >= visited) return;
37 set_irn_visited(node, visited);
39 pred = skip_Proj(node);
40 if (get_irn_op(pred) == op_CallBegin
41 || get_irn_op(pred) == op_EndReg
42 || get_irn_op(pred) == op_EndExcept) {
43 current_ir_graph = get_irn_irg(pred);
46 if (pre) pre(node, env);
48 if (is_no_Block(node))
49 irg_walk_cg(get_nodes_Block(node), visited, irg_set, pre, post, env);
51 if (get_irn_op(node) == op_Block) { /* block */
52 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
53 ir_node * exec = get_irn_n(node, i);
54 ir_node * pred = skip_Proj(exec);
55 if ((get_irn_op(pred) != op_CallBegin
56 && get_irn_op(pred) != op_EndReg
57 && get_irn_op(pred) != op_EndExcept)
58 || eset_contains(irg_set, get_irn_irg(pred))) {
59 irg_walk_cg(exec, visited, irg_set, pre, post, env);
62 } else if (get_irn_op(node) == op_Filter) {
63 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
64 ir_node * pred = get_irn_n(node, i);
65 if (get_irn_op(pred) == op_Unknown || get_irn_op(pred) == op_Bad) {
66 irg_walk_cg(pred, visited, irg_set, pre, post, env);
69 exec = skip_Proj(get_Block_cfgpred(get_nodes_Block(node), i));
70 assert(get_irn_op(exec) == op_CallBegin
71 || get_irn_op(exec) == op_EndReg
72 || get_irn_op(exec) == op_EndExcept);
73 if (eset_contains(irg_set, get_irn_irg(exec))) {
74 current_ir_graph = get_irn_irg(exec);
75 irg_walk_cg(pred, visited, irg_set, pre, post, env);
76 current_ir_graph = rem;
81 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
82 irg_walk_cg(get_irn_n(node, i), visited, irg_set, pre, post, env);
86 if (post) post(node, env);
88 current_ir_graph = rem;
92 /* Insert all ir_graphs in irg_set, that are (transitive) reachable. */
93 static void collect_irgs(ir_node * node, eset * irg_set) {
94 if (get_irn_op(node) == op_Call) {
96 for (i = get_Call_n_callees(node) - 1; i >= 0; --i) {
97 entity * ent = get_Call_callee(node, i);
98 ir_graph * irg = ent ? get_entity_irg(ent) : NULL;
99 if (irg && !eset_contains(irg_set, irg)) {
100 eset_insert(irg_set, irg);
101 irg_walk_graph(irg, (irg_walk_func *) collect_irgs, NULL, irg_set);
107 void irg_walk_2(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void * env)
112 if (get_irn_visited(node) < get_irg_visited(current_ir_graph)) {
113 set_irn_visited(node, get_irg_visited(current_ir_graph));
115 if (pre) pre(node, env);
117 if (is_no_Block(node))
118 irg_walk_2(get_nodes_Block(node), pre, post, env);
119 for (i = get_irn_arity(node) - 1; i >= 0; --i)
120 irg_walk_2(get_irn_n(node, i), pre, post, env);
122 if (post) post(node, env);
128 void irg_walk(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void *env)
131 if (interprocedural_view) {
132 eset * irg_set = eset_create();
135 interprocedural_view = false;
136 eset_insert(irg_set, current_ir_graph);
137 irg_walk(node, (irg_walk_func *) collect_irgs, NULL, irg_set);
138 interprocedural_view = true;
139 visited = get_max_irg_visited() + 1;
140 irg_walk_cg(node, visited, irg_set, pre, post, env);
141 for (irg = eset_first(irg_set); irg; irg = eset_next(irg_set)) {
142 set_irg_visited(irg, visited);
144 eset_destroy(irg_set);
146 inc_irg_visited(current_ir_graph);
147 irg_walk_2(node, pre, post, env);
153 void irg_walk_graph(ir_graph *irg, irg_walk_func *pre, irg_walk_func *post, void *env) {
154 ir_graph * rem = current_ir_graph;
155 current_ir_graph = irg;
156 irg_walk(get_irg_end(irg), pre, post, env);
157 current_ir_graph = rem;
160 /* Executes irg_walk(end, pre, post, env) for all irgraphs in irprog.
161 Sets current_ir_graph properly for each walk. Conserves current
163 void all_irg_walk(irg_walk_func *pre, irg_walk_func *post, void *env) {
167 rem = current_ir_graph;
169 for (i = 0; i < get_irp_n_irgs(); i++) {
170 irg = get_irp_irg(i);
171 current_ir_graph = irg;
172 irg_walk(get_irg_end(irg), pre, post, env);
174 current_ir_graph = rem;
177 /***************************************************************************/
179 /* Returns current_ir_graph and sets it to the irg of predecessor index
181 static INLINE ir_graph *
182 switch_irg (ir_node *n, int index) {
183 ir_graph *old_current = current_ir_graph;
185 if (interprocedural_view) {
186 /* Only Filter and Block nodes can have predecessors in other graphs. */
187 if (get_irn_op(n) == op_Filter)
188 n = get_nodes_Block(n);
189 if (get_irn_op(n) == op_Block) {
190 ir_node *cfop = skip_Proj(get_Block_cfgpred(n, index));
191 if (is_ip_cfop(cfop)) {
192 current_ir_graph = get_irn_irg(cfop);
200 void cg_walk_2(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void * env)
203 ir_graph *rem = NULL;
206 if (get_irn_visited(node) < get_irg_visited(current_ir_graph)) {
207 set_irn_visited(node, get_irg_visited(current_ir_graph));
209 if (pre) pre(node, env);
211 if (is_no_Block(node))
212 cg_walk_2(get_nodes_Block(node), pre, post, env);
213 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
214 rem = switch_irg(node, i);
215 cg_walk_2(get_irn_n(node, i), pre, post, env);
216 current_ir_graph = rem;
219 if (post) post(node, env);
225 /* Walks all irgs in interprocedural view. Visits each node only once. */
226 void cg_walk(irg_walk_func *pre, irg_walk_func *post, void *env) {
228 ir_graph *rem = current_ir_graph;
229 int rem_view = interprocedural_view;
231 interprocedural_view = true;
233 inc_max_irg_visited();
234 /* Fix all irg_visited flags */
235 for (i = 0; i < get_irp_n_irgs(); i++)
236 set_irg_visited(get_irp_irg(i), get_max_irg_visited());
238 /* Walk starting at unreachable procedures. */
239 for (i = 0; i < get_irp_n_irgs(); i++) {
241 current_ir_graph = get_irp_irg(i);
243 sb = get_irg_start_block(current_ir_graph);
244 if ((get_Block_n_cfgpreds(sb) > 1) ||
245 (get_nodes_Block(get_Block_cfgpred(sb, 0)) != sb)) continue;
247 cg_walk_2(get_irg_end(current_ir_graph), pre, post, env);
250 interprocedural_view = rem_view;
251 current_ir_graph = rem;
255 /***************************************************************************/
257 /* Walks back from n until it finds a real cf op. */
258 ir_node *get_cf_op(ir_node *n) {
264 if (!(is_cfop(pred) || is_fragile_op(pred) ||
265 (get_irn_op(pred) == op_Bad)))
271 void irg_block_walk_2(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void *env)
274 assert(get_irn_opcode(node) == iro_Block);
276 if(get_Block_block_visited(node) < get_irg_block_visited(current_ir_graph)) {
277 set_Block_block_visited(node, get_irg_block_visited(current_ir_graph));
279 if(pre) pre(node, env);
281 for(i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
282 /* find the corresponding predecessor block. */
283 ir_node *pred = get_cf_op(get_Block_cfgpred(node, i));
284 pred = get_nodes_Block(pred);
285 if(get_irn_opcode(pred) == iro_Block) {
287 irg_block_walk_2(pred, pre, post, env);
290 assert(get_irn_opcode(pred) == iro_Bad);
294 if(post) post(node, env);
300 /* walks only over Block nodes in the graph. Has it's own visited
301 flag, so that it can be interleaved with the other walker. */
302 void irg_block_walk(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void *env)
304 ir_node *block, *pred;
308 assert(!interprocedural_view); /* interprocedural_view not implemented, because it
309 * interleaves with irg_walk */
310 inc_irg_block_visited(current_ir_graph);
311 if (is_no_Block(node)) block = get_nodes_Block(node); else block = node;
312 assert(get_irn_opcode(block) == iro_Block);
313 irg_block_walk_2(block, pre, post, env);
314 /* keepalive: the endless loops ... */
315 if (get_irn_op(node) == op_End)
316 for (i = 0; i < get_irn_arity(node); i++) {
317 pred = get_irn_n(node, i);
318 if (get_irn_op(pred) == op_Block)
319 irg_block_walk_2(pred, pre, post, env);
326 void irg_block_walk_graph(ir_graph *irg, irg_walk_func *pre,
327 irg_walk_func *post, void *env) {
328 ir_graph * rem = current_ir_graph;
329 current_ir_graph = irg;
330 irg_block_walk(get_irg_end(irg), pre, post, env);
331 current_ir_graph = rem;
334 /********************************************************************/
336 typedef struct walk_env {
342 /* Walk to all constant expressions in this entity. */
343 void walk_entity(entity *ent, void *env) {
344 walk_env *my_env = (walk_env *)env;
345 if (get_entity_variability(ent) != uninitialized) {
346 if (is_atomic_entity(ent)) {
347 irg_walk(get_atomic_ent_value(ent), my_env->pre, my_env->post, my_env->env);
350 for (i = 0; i < get_compound_ent_n_values(ent); i++) {
351 irg_walk(get_compound_ent_value(ent, i), my_env->pre, my_env->post, my_env->env);
357 /* Walks over all code in const_code_irg. */
358 void walk_const_code(irg_walk_func *pre, irg_walk_func *post, void *env) {
362 ir_graph *rem = current_ir_graph;
363 current_ir_graph = get_const_code_irg();
364 inc_irg_visited(current_ir_graph);
370 /* Walk all types that can contain constant entities. */
371 walk_types_entities(get_glob_type(), &walk_entity, &my_env);
372 for (i = 0; i < get_irp_n_types(); i++)
373 walk_types_entities(get_irp_type(i), &walk_entity, &my_env);
374 for (i = 0; i < get_irp_n_irgs(); i++)
375 walk_types_entities(get_irg_frame_type(get_irp_irg(i)), &walk_entity, &my_env);
377 /* Walk constant array bounds. */
378 for (i = 0; i < get_irp_n_types(); i++) {
379 type *tp = get_irp_type(i);
380 if (is_array_type(tp)) {
381 for (j = 0; j < get_array_n_dimensions(tp); j++) {
383 n = get_array_lower_bound(tp, j);
384 if (n) irg_walk(n, pre, post, env);
385 n = get_array_upper_bound(tp, j);
386 if (n) irg_walk(n, pre, post, env);
391 current_ir_graph = rem;
395 /********************************************************************/
396 /** Walking support for interprocedural analysis **/
398 /** @@@ Don't use, not operational yet, doesn't grok recursions!! **/
399 /** @@@ Header for irgwalk.h, here until it works. **/
401 /** Interprocedural walking should not walk all predecessors of **/
402 /** all nodes. When leaving a procedure the walker should only **/
403 /** follow the edge corresponding to the most recent entry of the **/
404 /** procedure. The following functions use an internal stack to **/
405 /** remember the current call site of a procedure. **/
406 /** They also set current_ir_graph correctly. **/
408 /** Usage example: **/
410 /** void init_ip_walk (); **/
411 /** work_on_graph(some_end_node); **/
412 /** void finish_ip_walk(); **/
414 /** work_on_graph(ir_node *n) { **/
415 /** for (i = 0; i < get_irn_arity(n); i++) { **/
416 /** if (...) continue; **/
417 /** ir_node *m = get_irn_ip_pred(n, i); **/
418 /** if !m continue; **/
419 /** work_on_graph(m); **/
420 /** return_recur(n, i); **/
423 /********************************************************************/
425 /* Allocates some necessary datastructures. */
426 void init_ip_walk ();
427 /* Frees some necessary datastructures. */
428 void finish_ip_walk();
430 /* Call for i in {0|-1 ... get_irn_arity(n)}.
431 If n is a conventional node returns the same node as get_irn_n(n, i).
432 If the predecessors of n are in the callee of the procedure n belongs
433 to, returns get_irn_n(n, i) if this node is in the callee on the top
434 of the stack, else returns NULL.
435 If the predecessors of n are in a procedure called by the procedure n
436 belongs to pushes the caller on the caller stack in the callee.
437 Sets current_ir_graph to the graph the node returned is in. */
438 ir_node *get_irn_ip_pred(ir_node *n, int pos);
440 /* If get_irn_ip_pred() returned a node (not NULL) this must be
441 called to clear up the stacks.
442 Sets current_ir_graph to the graph n is in. */
443 void return_recur(ir_node *n, int pos);
446 /********************************************************************/
447 /** Walking support for interprocedural analysis **/
448 /********************************************************************/
450 #define MIN_STACK_SIZE 40
452 typedef struct callsite_stack {
457 /* Access the stack in the irg **************************************/
460 set_irg_callsite_stack(ir_graph *g, callsite_stack *s) {
464 static INLINE callsite_stack *
465 get_irg_callsite_stack(ir_graph *g) {
466 return (callsite_stack *) get_irg_link(g);
469 /* A stack of callsites *********************************************/
471 /* @@@ eventually change the implementation so the new_ only sets the field
472 to NULL, and the stack is only allocated if used. Saves Memory! */
473 static INLINE callsite_stack *
474 new_callsite_stack(ir_graph *g) {
475 callsite_stack *res = (callsite_stack *)malloc(sizeof(callsite_stack));
477 res->s = NEW_ARR_F (ir_node *, MIN_STACK_SIZE);
478 set_irg_callsite_stack(g, res);
483 free_callsite_stack(ir_graph *g) {
484 callsite_stack *s = get_irg_callsite_stack(g);
490 push_callsite(ir_graph *callee, ir_node *callsite) {
491 callsite_stack *s = get_irg_callsite_stack(callee);
492 if (s->tos == ARR_LEN(s->s)) {
493 int nlen = ARR_LEN (s->s) * 2;
494 ARR_RESIZE (ir_node *, s->s, nlen);
496 s->s[s->tos] = callsite;
500 static INLINE ir_node *
501 get_top_of_callsite_stack(ir_graph *callee) {
502 callsite_stack *s = get_irg_callsite_stack(callee);
503 return (s->s[s->tos-1]);
507 ir_node * pop_callsite(ir_graph *callee) {
508 callsite_stack *s = get_irg_callsite_stack(callee);
510 return (s->s[s->tos]);
514 /* Initialization routines ******************************************/
516 void init_ip_walk () {
518 for (i = 0; i < get_irp_n_irgs(); i++)
519 new_callsite_stack(get_irp_irg(i));
522 void finish_ip_walk() {
524 for (i = 0; i < get_irp_n_irgs(); i++)
525 free_callsite_stack(get_irp_irg(i));
526 set_irg_link(get_irp_irg(i), NULL);
529 /* walker routines **************************************************/
531 /* cf_pred is End* */
533 enter_procedure(ir_node *block, ir_node *cf_pred, int pos) {
535 ir_graph *irg = get_irn_irg(cf_pred);
537 assert(interprocedural_view);
539 interprocedural_view = 0;
540 callbegin = skip_Proj(get_irn_n(block, 0));
541 assert(get_irn_op(callbegin) == op_CallBegin);
542 interprocedural_view = 1;
544 push_callsite(irg, callbegin);
545 current_ir_graph = irg;
548 /* cf_pred is CallBegin */
550 leave_procedure(ir_node *block, ir_node *cf_pred, int pos) {
551 ir_node *tos = get_top_of_callsite_stack(current_ir_graph);
553 assert(get_irn_op(cf_pred) == op_CallBegin);
555 if (tos == cf_pred) {
556 /* We entered this procedure by the call pred pos refers to. */
557 pop_callsite(current_ir_graph);
558 current_ir_graph = get_CallBegin_irg(cf_pred);
566 ir_node *get_irn_ip_pred(ir_node *n, int pos) {
568 if (interprocedural_view) {
570 /* Find the cf_pred refering to pos. */
573 if (get_irn_opcode(n) == iro_Filter) block = get_nodes_Block(n);
574 cf_pred = skip_Proj(get_irn_n(block, pos));
576 /* Check whether we enter or leave a procedure and act according. */
577 if ((get_irn_op(cf_pred) == op_EndReg) ||
578 (get_irn_op(cf_pred) == op_EndExcept))
579 enter_procedure(block, cf_pred, pos);
580 if (get_irn_op(cf_pred) == op_CallBegin)
581 if (!leave_procedure(block, cf_pred, pos)) return NULL;
584 return get_irn_n(n, pos);
588 re_enter_procedure(ir_node *block, ir_node *cf_pred, int pos) {
589 ir_node *callbegin = pop_callsite(current_ir_graph);
590 assert(interprocedural_view);
591 current_ir_graph = get_CallBegin_irg(callbegin);
595 re_leave_procedure(ir_node *block, ir_node *cf_pred, int pos) {
599 assert(get_irn_op(cf_pred) == op_CallBegin);
601 /* Find the irg block is in. */
602 proj = get_Block_cg_cfgpred(block, pos);
603 assert(is_Proj(proj));
604 callee = get_entity_irg(get_Call_callee(get_CallBegin_call(cf_pred),
605 get_Proj_proj(proj)));
606 current_ir_graph = callee;
607 push_callsite(callee, cf_pred);
611 return_recur(ir_node *n, int pos) {
615 if (!interprocedural_view) return;
617 /* Find the cf_pred refering to pos. */
619 if (get_irn_opcode(n) == iro_Filter) block = get_nodes_Block(n);
620 cf_pred = skip_Proj(get_irn_n(block, pos));
622 /* Check whether we re_enter or re_leave a procedure and act according. */
623 if ((get_irn_op(cf_pred) == op_EndReg) ||
624 (get_irn_op(cf_pred) == op_EndExcept))
625 re_enter_procedure(block, cf_pred, pos);
626 if (get_irn_op(cf_pred) == op_CallBegin)
627 re_leave_procedure(block, cf_pred, pos);