1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
4 * Author: Boris Boesler
7 * - execute the pre function before recursion
8 * - execute the post function after recursion
20 # include "irgraph.h" /* visited flag */
27 /* walk over an interprocedural graph (callgraph). Visits only graphs in irg_set. */
28 static void irg_walk_cg(ir_node * node, int visited, eset * irg_set,
29 irg_walk_func *pre, irg_walk_func *post, void * env) {
31 ir_graph * rem = current_ir_graph;
35 if (get_irn_visited(node) >= visited) return;
37 set_irn_visited(node, visited);
39 pred = skip_Proj(node);
40 if (get_irn_op(pred) == op_CallBegin
41 || get_irn_op(pred) == op_EndReg
42 || get_irn_op(pred) == op_EndExcept) {
43 current_ir_graph = get_irn_irg(pred);
46 if (pre) pre(node, env);
48 if (is_no_Block(node))
49 irg_walk_cg(get_nodes_Block(node), visited, irg_set, pre, post, env);
51 if (get_irn_op(node) == op_Block) { /* block */
52 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
53 ir_node * exec = get_irn_n(node, i);
54 ir_node * pred = skip_Proj(exec);
55 if ((get_irn_op(pred) != op_CallBegin
56 && get_irn_op(pred) != op_EndReg
57 && get_irn_op(pred) != op_EndExcept)
58 || eset_contains(irg_set, get_irn_irg(pred))) {
59 irg_walk_cg(exec, visited, irg_set, pre, post, env);
62 } else if (get_irn_op(node) == op_Filter) {
63 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
64 ir_node * pred = get_irn_n(node, i);
65 if (get_irn_op(pred) == op_Unknown || get_irn_op(pred) == op_Bad) {
66 irg_walk_cg(pred, visited, irg_set, pre, post, env);
69 exec = skip_Proj(get_Block_cfgpred(get_nodes_Block(node), i));
70 assert(get_irn_op(exec) == op_CallBegin
71 || get_irn_op(exec) == op_EndReg
72 || get_irn_op(exec) == op_EndExcept);
73 if (eset_contains(irg_set, get_irn_irg(exec))) {
74 current_ir_graph = get_irn_irg(exec);
75 irg_walk_cg(pred, visited, irg_set, pre, post, env);
76 current_ir_graph = rem;
81 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
82 irg_walk_cg(get_irn_n(node, i), visited, irg_set, pre, post, env);
86 if (post) post(node, env);
88 current_ir_graph = rem;
92 /* Insert all ir_graphs in irg_set, that are (transitive) reachable. */
93 static void collect_irgs(ir_node * node, eset * irg_set) {
94 if (get_irn_op(node) == op_Call) {
96 for (i = get_Call_n_callees(node) - 1; i >= 0; --i) {
97 entity * ent = get_Call_callee(node, i);
98 ir_graph * irg = ent ? get_entity_irg(ent) : NULL;
99 if (irg && !eset_contains(irg_set, irg)) {
100 eset_insert(irg_set, irg);
101 irg_walk_graph(irg, (irg_walk_func *) collect_irgs, NULL, irg_set);
108 irg_walk_2(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void * env)
113 if (get_irn_visited(node) < get_irg_visited(current_ir_graph)) {
114 set_irn_visited(node, get_irg_visited(current_ir_graph));
116 if (pre) pre(node, env);
118 if (is_no_Block(node))
119 irg_walk_2(get_nodes_Block(node), pre, post, env);
120 for (i = get_irn_arity(node) - 1; i >= 0; --i)
121 irg_walk_2(get_irn_n(node, i), pre, post, env);
123 if (post) post(node, env);
129 void irg_walk(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void *env)
132 if (interprocedural_view) {
133 eset * irg_set = eset_create();
136 interprocedural_view = false;
137 eset_insert(irg_set, current_ir_graph);
138 irg_walk(node, (irg_walk_func *) collect_irgs, NULL, irg_set);
139 interprocedural_view = true;
140 visited = get_max_irg_visited() + 1;
141 irg_walk_cg(node, visited, irg_set, pre, post, env);
142 for (irg = eset_first(irg_set); irg; irg = eset_next(irg_set)) {
143 set_irg_visited(irg, visited);
145 eset_destroy(irg_set);
147 inc_irg_visited(current_ir_graph);
148 irg_walk_2(node, pre, post, env);
154 void irg_walk_graph(ir_graph *irg, irg_walk_func *pre, irg_walk_func *post, void *env) {
155 ir_graph * rem = current_ir_graph;
156 current_ir_graph = irg;
157 irg_walk(get_irg_end(irg), pre, post, env);
158 current_ir_graph = rem;
161 /* Executes irg_walk(end, pre, post, env) for all irgraphs in irprog.
162 Sets current_ir_graph properly for each walk. Conserves current
164 void all_irg_walk(irg_walk_func *pre, irg_walk_func *post, void *env) {
168 rem = current_ir_graph;
170 for (i = 0; i < get_irp_n_irgs(); i++) {
171 irg = get_irp_irg(i);
172 current_ir_graph = irg;
173 irg_walk(get_irg_end(irg), pre, post, env);
175 current_ir_graph = rem;
178 /***************************************************************************/
180 /* Returns current_ir_graph and sets it to the irg of predecessor index
182 static INLINE ir_graph *
183 switch_irg (ir_node *n, int index) {
184 ir_graph *old_current = current_ir_graph;
186 if (interprocedural_view) {
187 /* Only Filter and Block nodes can have predecessors in other graphs. */
188 if (get_irn_op(n) == op_Filter)
189 n = get_nodes_Block(n);
190 if (get_irn_op(n) == op_Block) {
191 ir_node *cfop = skip_Proj(get_Block_cfgpred(n, index));
192 if (is_ip_cfop(cfop)) {
193 current_ir_graph = get_irn_irg(cfop);
202 cg_walk_2(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void * env)
205 ir_graph *rem = NULL;
208 if (get_irn_visited(node) < get_irg_visited(current_ir_graph)) {
209 set_irn_visited(node, get_irg_visited(current_ir_graph));
211 if (pre) pre(node, env);
213 if (is_no_Block(node))
214 cg_walk_2(get_nodes_Block(node), pre, post, env);
215 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
216 rem = switch_irg(node, i);
217 cg_walk_2(get_irn_n(node, i), pre, post, env);
218 current_ir_graph = rem;
221 if (post) post(node, env);
227 /* Walks all irgs in interprocedural view. Visits each node only once. */
228 void cg_walk(irg_walk_func *pre, irg_walk_func *post, void *env) {
230 ir_graph *rem = current_ir_graph;
231 int rem_view = interprocedural_view;
233 interprocedural_view = true;
235 inc_max_irg_visited();
236 /* Fix all irg_visited flags */
237 for (i = 0; i < get_irp_n_irgs(); i++)
238 set_irg_visited(get_irp_irg(i), get_max_irg_visited());
240 /* Walk starting at unreachable procedures. */
241 for (i = 0; i < get_irp_n_irgs(); i++) {
243 current_ir_graph = get_irp_irg(i);
245 sb = get_irg_start_block(current_ir_graph);
246 if ((get_Block_n_cfgpreds(sb) > 1) ||
247 (get_nodes_Block(get_Block_cfgpred(sb, 0)) != sb)) continue;
249 cg_walk_2(get_irg_end(current_ir_graph), pre, post, env);
252 interprocedural_view = rem_view;
253 current_ir_graph = rem;
257 /***************************************************************************/
259 /* Walks back from n until it finds a real cf op. */
260 static ir_node *get_cf_op(ir_node *n) {
266 if (!(is_cfop(pred) || is_fragile_op(pred) ||
267 (get_irn_op(pred) == op_Bad)))
273 static void irg_block_walk_2(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void *env)
276 assert(get_irn_opcode(node) == iro_Block);
278 if(get_Block_block_visited(node) < get_irg_block_visited(current_ir_graph)) {
279 set_Block_block_visited(node, get_irg_block_visited(current_ir_graph));
281 if(pre) pre(node, env);
283 for(i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
284 /* find the corresponding predecessor block. */
285 ir_node *pred = get_cf_op(get_Block_cfgpred(node, i));
286 pred = get_nodes_Block(pred);
287 if(get_irn_opcode(pred) == iro_Block) {
289 irg_block_walk_2(pred, pre, post, env);
292 assert(get_irn_opcode(pred) == iro_Bad);
296 if(post) post(node, env);
302 /* walks only over Block nodes in the graph. Has it's own visited
303 flag, so that it can be interleaved with the other walker. */
304 void irg_block_walk(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void *env)
306 ir_node *block, *pred;
310 assert(!interprocedural_view); /* interprocedural_view not implemented, because it
311 * interleaves with irg_walk */
312 inc_irg_block_visited(current_ir_graph);
313 if (is_no_Block(node)) block = get_nodes_Block(node); else block = node;
314 assert(get_irn_opcode(block) == iro_Block);
315 irg_block_walk_2(block, pre, post, env);
316 /* keepalive: the endless loops ... */
317 if (get_irn_op(node) == op_End)
318 for (i = 0; i < get_irn_arity(node); i++) {
319 pred = get_irn_n(node, i);
320 if (get_irn_op(pred) == op_Block)
321 irg_block_walk_2(pred, pre, post, env);
328 void irg_block_walk_graph(ir_graph *irg, irg_walk_func *pre,
329 irg_walk_func *post, void *env) {
330 ir_graph * rem = current_ir_graph;
331 current_ir_graph = irg;
332 irg_block_walk(get_irg_end(irg), pre, post, env);
333 current_ir_graph = rem;
336 /********************************************************************/
338 typedef struct walk_env {
344 /* Walk to all constant expressions in this entity. */
345 static void walk_entity(entity *ent, void *env) {
346 walk_env *my_env = (walk_env *)env;
347 if (get_entity_variability(ent) != uninitialized) {
348 if (is_atomic_entity(ent)) {
349 irg_walk(get_atomic_ent_value(ent), my_env->pre, my_env->post, my_env->env);
352 for (i = 0; i < get_compound_ent_n_values(ent); i++) {
353 irg_walk(get_compound_ent_value(ent, i), my_env->pre, my_env->post, my_env->env);
359 /* Walks over all code in const_code_irg. */
360 void walk_const_code(irg_walk_func *pre, irg_walk_func *post, void *env) {
364 ir_graph *rem = current_ir_graph;
365 current_ir_graph = get_const_code_irg();
366 inc_irg_visited(current_ir_graph);
372 /* Walk all types that can contain constant entities. */
373 walk_types_entities(get_glob_type(), &walk_entity, &my_env);
374 for (i = 0; i < get_irp_n_types(); i++)
375 walk_types_entities(get_irp_type(i), &walk_entity, &my_env);
376 for (i = 0; i < get_irp_n_irgs(); i++)
377 walk_types_entities(get_irg_frame_type(get_irp_irg(i)), &walk_entity, &my_env);
379 /* Walk constant array bounds. */
380 for (i = 0; i < get_irp_n_types(); i++) {
381 type *tp = get_irp_type(i);
382 if (is_array_type(tp)) {
383 for (j = 0; j < get_array_n_dimensions(tp); j++) {
385 n = get_array_lower_bound(tp, j);
386 if (n) irg_walk(n, pre, post, env);
387 n = get_array_upper_bound(tp, j);
388 if (n) irg_walk(n, pre, post, env);
393 current_ir_graph = rem;
397 /********************************************************************/
398 /** Walking support for interprocedural analysis **/
400 /** @@@ Don't use, not operational yet, doesn't grok recursions!! **/
401 /** @@@ Header for irgwalk.h, here until it works. **/
403 /** Interprocedural walking should not walk all predecessors of **/
404 /** all nodes. When leaving a procedure the walker should only **/
405 /** follow the edge corresponding to the most recent entry of the **/
406 /** procedure. The following functions use an internal stack to **/
407 /** remember the current call site of a procedure. **/
408 /** They also set current_ir_graph correctly. **/
410 /** Usage example: **/
412 /** void init_ip_walk (); **/
413 /** work_on_graph(some_end_node); **/
414 /** void finish_ip_walk(); **/
416 /** work_on_graph(ir_node *n) { **/
417 /** for (i = 0; i < get_irn_arity(n); i++) { **/
418 /** if (...) continue; **/
419 /** ir_node *m = get_irn_ip_pred(n, i); **/
420 /** if !m continue; **/
421 /** work_on_graph(m); **/
422 /** return_recur(n, i); **/
425 /********************************************************************/
427 /* Allocates some necessary datastructures. */
428 void init_ip_walk ();
429 /* Frees some necessary datastructures. */
430 void finish_ip_walk();
432 /* Call for i in {0|-1 ... get_irn_arity(n)}.
433 If n is a conventional node returns the same node as get_irn_n(n, i).
434 If the predecessors of n are in the callee of the procedure n belongs
435 to, returns get_irn_n(n, i) if this node is in the callee on the top
436 of the stack, else returns NULL.
437 If the predecessors of n are in a procedure called by the procedure n
438 belongs to pushes the caller on the caller stack in the callee.
439 Sets current_ir_graph to the graph the node returned is in. */
440 ir_node *get_irn_ip_pred(ir_node *n, int pos);
442 /* If get_irn_ip_pred() returned a node (not NULL) this must be
443 called to clear up the stacks.
444 Sets current_ir_graph to the graph n is in. */
445 void return_recur(ir_node *n, int pos);
448 /********************************************************************/
449 /** Walking support for interprocedural analysis **/
450 /********************************************************************/
452 #define MIN_STACK_SIZE 40
454 typedef struct callsite_stack {
459 /* Access the stack in the irg **************************************/
462 set_irg_callsite_stack(ir_graph *g, callsite_stack *s) {
466 static INLINE callsite_stack *
467 get_irg_callsite_stack(ir_graph *g) {
468 return (callsite_stack *) get_irg_link(g);
471 /* A stack of callsites *********************************************/
473 /* @@@ eventually change the implementation so the new_ only sets the field
474 to NULL, and the stack is only allocated if used. Saves Memory! */
475 static INLINE callsite_stack *
476 new_callsite_stack(ir_graph *g) {
477 callsite_stack *res = (callsite_stack *)malloc(sizeof(callsite_stack));
479 res->s = NEW_ARR_F (ir_node *, MIN_STACK_SIZE);
480 set_irg_callsite_stack(g, res);
485 free_callsite_stack(ir_graph *g) {
486 callsite_stack *s = get_irg_callsite_stack(g);
492 push_callsite(ir_graph *callee, ir_node *callsite) {
493 callsite_stack *s = get_irg_callsite_stack(callee);
494 if (s->tos == ARR_LEN(s->s)) {
495 int nlen = ARR_LEN (s->s) * 2;
496 ARR_RESIZE (ir_node *, s->s, nlen);
498 s->s[s->tos] = callsite;
502 static INLINE ir_node *
503 get_top_of_callsite_stack(ir_graph *callee) {
504 callsite_stack *s = get_irg_callsite_stack(callee);
505 return (s->s[s->tos-1]);
509 ir_node * pop_callsite(ir_graph *callee) {
510 callsite_stack *s = get_irg_callsite_stack(callee);
512 return (s->s[s->tos]);
516 /* Initialization routines ******************************************/
518 void init_ip_walk () {
520 for (i = 0; i < get_irp_n_irgs(); i++)
521 new_callsite_stack(get_irp_irg(i));
524 void finish_ip_walk() {
526 for (i = 0; i < get_irp_n_irgs(); i++)
527 free_callsite_stack(get_irp_irg(i));
528 set_irg_link(get_irp_irg(i), NULL);
531 /* walker routines **************************************************/
533 /* cf_pred is End* */
535 enter_procedure(ir_node *block, ir_node *cf_pred, int pos) {
537 ir_graph *irg = get_irn_irg(cf_pred);
539 assert(interprocedural_view);
541 interprocedural_view = 0;
542 callbegin = skip_Proj(get_irn_n(block, 0));
543 assert(get_irn_op(callbegin) == op_CallBegin);
544 interprocedural_view = 1;
546 push_callsite(irg, callbegin);
547 current_ir_graph = irg;
550 /* cf_pred is CallBegin */
552 leave_procedure(ir_node *block, ir_node *cf_pred, int pos) {
553 ir_node *tos = get_top_of_callsite_stack(current_ir_graph);
555 assert(get_irn_op(cf_pred) == op_CallBegin);
557 if (tos == cf_pred) {
558 /* We entered this procedure by the call pred pos refers to. */
559 pop_callsite(current_ir_graph);
560 current_ir_graph = get_CallBegin_irg(cf_pred);
568 ir_node *get_irn_ip_pred(ir_node *n, int pos) {
570 if (interprocedural_view) {
572 /* Find the cf_pred refering to pos. */
575 if (get_irn_opcode(n) == iro_Filter) block = get_nodes_Block(n);
576 cf_pred = skip_Proj(get_irn_n(block, pos));
578 /* Check whether we enter or leave a procedure and act according. */
579 if ((get_irn_op(cf_pred) == op_EndReg) ||
580 (get_irn_op(cf_pred) == op_EndExcept))
581 enter_procedure(block, cf_pred, pos);
582 if (get_irn_op(cf_pred) == op_CallBegin)
583 if (!leave_procedure(block, cf_pred, pos)) return NULL;
586 return get_irn_n(n, pos);
590 re_enter_procedure(ir_node *block, ir_node *cf_pred, int pos) {
591 ir_node *callbegin = pop_callsite(current_ir_graph);
592 assert(interprocedural_view);
593 current_ir_graph = get_CallBegin_irg(callbegin);
597 re_leave_procedure(ir_node *block, ir_node *cf_pred, int pos) {
601 assert(get_irn_op(cf_pred) == op_CallBegin);
603 /* Find the irg block is in. */
604 proj = get_Block_cg_cfgpred(block, pos);
605 assert(is_Proj(proj));
606 callee = get_entity_irg(get_Call_callee(get_CallBegin_call(cf_pred),
607 get_Proj_proj(proj)));
608 current_ir_graph = callee;
609 push_callsite(callee, cf_pred);
613 return_recur(ir_node *n, int pos) {
617 if (!interprocedural_view) return;
619 /* Find the cf_pred refering to pos. */
621 if (get_irn_opcode(n) == iro_Filter) block = get_nodes_Block(n);
622 cf_pred = skip_Proj(get_irn_n(block, pos));
624 /* Check whether we re_enter or re_leave a procedure and act according. */
625 if ((get_irn_op(cf_pred) == op_EndReg) ||
626 (get_irn_op(cf_pred) == op_EndExcept))
627 re_enter_procedure(block, cf_pred, pos);
628 if (get_irn_op(cf_pred) == op_CallBegin)
629 re_leave_procedure(block, cf_pred, pos);