2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation and computation of the callgraph.
23 * @author Goetz Lindenmaier
32 #include "callgraph.h"
36 #include "irgraph_t.h"
44 #include "raw_bitset.h"
48 static ir_visited_t master_cg_visited = 0;
49 static inline int cg_irg_visited (ir_graph *n);
50 static inline void mark_cg_irg_visited(ir_graph *n);
52 /** Returns the callgraph state of the program representation. */
53 irp_callgraph_state get_irp_callgraph_state(void)
55 return irp->callgraph_state;
58 /* Sets the callgraph state of the program representation. */
59 void set_irp_callgraph_state(irp_callgraph_state s)
61 irp->callgraph_state = s;
64 /* Returns the number of procedures that call the given irg. */
65 size_t get_irg_n_callers(const ir_graph *irg)
68 return irg->callers ? ARR_LEN(irg->callers) : 0;
71 /* Returns the caller at position pos. */
72 ir_graph *get_irg_caller(const ir_graph *irg, size_t pos)
74 assert(pos < get_irg_n_callers(irg));
75 return irg->callers ? irg->callers[pos] : NULL;
78 /* Returns non-zero if the caller at position pos is "a backedge", i.e. a recursion. */
79 int is_irg_caller_backedge(const ir_graph *irg, size_t pos)
81 assert(pos < get_irg_n_callers(irg));
82 return irg->caller_isbe != NULL ? rbitset_is_set(irg->caller_isbe, pos) : 0;
85 /** Search the caller in the list of all callers and set its backedge property. */
86 static void set_irg_caller_backedge(ir_graph *irg, const ir_graph *caller)
88 size_t i, n_callers = get_irg_n_callers(irg);
90 /* allocate a new array on demand */
91 if (irg->caller_isbe == NULL)
92 irg->caller_isbe = rbitset_malloc(n_callers);
93 for (i = 0; i < n_callers; ++i) {
94 if (get_irg_caller(irg, i) == caller) {
95 rbitset_set(irg->caller_isbe, i);
101 /* Returns non-zero if the irg has a backedge caller. */
102 int has_irg_caller_backedge(const ir_graph *irg)
104 size_t i, n_callers = get_irg_n_callers(irg);
106 if (irg->caller_isbe != NULL) {
107 for (i = 0; i < n_callers; ++i)
108 if (rbitset_is_set(irg->caller_isbe, i))
115 * Find the reversion position of a caller.
116 * Given the position pos_caller of an caller of irg, return
117 * irg's callee position on that caller.
119 static size_t reverse_pos(const ir_graph *callee, size_t pos_caller)
121 ir_graph *caller = get_irg_caller(callee, pos_caller);
122 /* search the other relation for the corresponding edge. */
123 size_t i, n_callees = get_irg_n_callees(caller);
124 for (i = 0; i < n_callees; ++i) {
125 if (get_irg_callee(caller, i) == callee) {
130 assert(!"reverse_pos() did not find position");
135 /* Returns the maximal loop depth of call nodes that call along this edge. */
136 size_t get_irg_caller_loop_depth(const ir_graph *irg, size_t pos)
138 ir_graph *caller = get_irg_caller(irg, pos);
139 size_t pos_callee = reverse_pos(irg, pos);
141 return get_irg_callee_loop_depth(caller, pos_callee);
145 /* Returns the number of procedures that are called by the given irg. */
146 size_t get_irg_n_callees(const ir_graph *irg)
148 assert(irg->callees);
149 return irg->callees ? ARR_LEN(irg->callees) : 0;
152 /* Returns the callee at position pos. */
153 ir_graph *get_irg_callee(const ir_graph *irg, size_t pos)
155 assert(pos < get_irg_n_callees(irg));
156 return irg->callees ? irg->callees[pos]->irg : NULL;
159 /* Returns non-zero if the callee at position pos is "a backedge", i.e. a recursion. */
160 int is_irg_callee_backedge(const ir_graph *irg, size_t pos)
162 assert(pos < get_irg_n_callees(irg));
163 return irg->callee_isbe != NULL ? rbitset_is_set(irg->callee_isbe, pos) : 0;
166 /* Returns non-zero if the irg has a backedge callee. */
167 int has_irg_callee_backedge(const ir_graph *irg)
169 size_t i, n_callees = get_irg_n_callees(irg);
171 if (irg->callee_isbe != NULL) {
172 for (i = 0; i < n_callees; ++i)
173 if (rbitset_is_set(irg->callee_isbe, i))
180 * Mark the callee at position pos as a backedge.
182 static void set_irg_callee_backedge(ir_graph *irg, size_t pos)
184 size_t n = get_irg_n_callees(irg);
186 /* allocate a new array on demand */
187 if (irg->callee_isbe == NULL)
188 irg->callee_isbe = rbitset_malloc(n);
190 rbitset_set(irg->callee_isbe, pos);
193 /* Returns the maximal loop depth of call nodes that call along this edge. */
194 size_t get_irg_callee_loop_depth(const ir_graph *irg, size_t pos)
196 assert(pos < get_irg_n_callees(irg));
197 return irg->callees ? irg->callees[pos]->max_depth : 0;
201 /* --------------------- Compute the callgraph ------------------------ */
204 * Pre-Walker called by compute_callgraph(), analyses all Call nodes.
206 static void ana_Call(ir_node *n, void *env)
212 if (! is_Call(n)) return;
214 irg = get_irn_irg(n);
215 n_callees = get_Call_n_callees(n);
216 for (i = 0; i < n_callees; ++i) {
217 ir_entity *callee_e = get_Call_callee(n, i);
218 ir_graph *callee = get_entity_irg(callee_e);
222 cg_callee_entry *found;
227 pset_insert((pset *)callee->callers, irg, HASH_PTR(irg));
228 found = (cg_callee_entry*) pset_find((pset *)irg->callees, &buf, HASH_PTR(callee));
229 if (found) { /* add Call node to list, compute new nesting. */
230 ir_node **arr = found->call_list;
231 ARR_APP1(ir_node *, arr, n);
232 found->call_list = arr;
233 } else { /* New node, add Call node and init nesting. */
234 found = OALLOC(irg->obst, cg_callee_entry);
236 found->call_list = NEW_ARR_F(ir_node *, 1);
237 found->call_list[0] = n;
238 found->max_depth = 0;
239 pset_insert((pset *)irg->callees, found, HASH_PTR(callee));
241 depth = get_loop_depth(get_irn_loop(get_nodes_block(n)));
242 found->max_depth = (depth > found->max_depth) ? depth : found->max_depth;
247 /** compare two ir graphs in a cg_callee_entry */
248 static int cg_callee_entry_cmp(const void *elt, const void *key)
250 const cg_callee_entry *e1 = (const cg_callee_entry*) elt;
251 const cg_callee_entry *e2 = (const cg_callee_entry*) key;
252 return e1->irg != e2->irg;
255 /** compare two ir graphs for pointer identity */
256 static int graph_cmp(const void *elt, const void *key)
258 const ir_graph *e1 = (const ir_graph*) elt;
259 const ir_graph *e2 = (const ir_graph*) key;
264 /* Construct and destruct the callgraph. */
265 void compute_callgraph(void)
272 n_irgs = get_irp_n_irgs();
273 for (i = 0; i < n_irgs; ++i) {
274 ir_graph *irg = get_irp_irg(i);
275 assert(get_irg_callee_info_state(irg) == irg_callee_info_consistent);
276 irg->callees = (cg_callee_entry **)new_pset(cg_callee_entry_cmp, 8);
277 irg->callers = (ir_graph **)new_pset(graph_cmp, 8);
278 //construct_cf_backedges(irg);
281 /* Compute the call graph */
282 for (i = 0; i < n_irgs; ++i) {
283 ir_graph *irg = get_irp_irg(i);
284 construct_cf_backedges(irg); // We also find the maximal loop depth of a call.
285 irg_walk_graph(irg, ana_Call, NULL, NULL);
288 /* Change the sets to arrays. */
289 for (i = 0; i < n_irgs; ++i) {
291 cg_callee_entry *callee;
292 ir_graph *c, *irg = get_irp_irg(i);
293 pset *callee_set, *caller_set;
295 callee_set = (pset *)irg->callees;
296 count = pset_count(callee_set);
297 irg->callees = NEW_ARR_F(cg_callee_entry *, count);
298 irg->callee_isbe = NULL;
299 callee = (cg_callee_entry*) pset_first(callee_set);
300 for (j = 0; j < count; ++j) {
301 irg->callees[j] = callee;
302 callee = (cg_callee_entry*) pset_next(callee_set);
304 del_pset(callee_set);
305 assert(callee == NULL);
307 caller_set = (pset *)irg->callers;
308 count = pset_count(caller_set);
309 irg->callers = NEW_ARR_F(ir_graph *, count);
310 irg->caller_isbe = NULL;
311 c = (ir_graph*) pset_first(caller_set);
312 for (j = 0; j < count; ++j) {
314 c = (ir_graph*) pset_next(caller_set);
316 del_pset(caller_set);
319 set_irp_callgraph_state(irp_callgraph_consistent);
322 /* Destruct the callgraph. */
323 void free_callgraph(void)
325 size_t i, n_irgs = get_irp_n_irgs();
326 for (i = 0; i < n_irgs; ++i) {
327 ir_graph *irg = get_irp_irg(i);
328 if (irg->callees) DEL_ARR_F(irg->callees);
329 if (irg->callers) DEL_ARR_F(irg->callers);
330 if (irg->callee_isbe) free(irg->callee_isbe);
331 if (irg->caller_isbe) free(irg->caller_isbe);
334 irg->callee_isbe = NULL;
335 irg->caller_isbe = NULL;
337 set_irp_callgraph_state(irp_callgraph_none);
340 /* ----------------------------------------------------------------------------------- */
341 /* A walker for the callgraph */
342 /* ----------------------------------------------------------------------------------- */
345 static void do_walk(ir_graph *irg, callgraph_walk_func *pre, callgraph_walk_func *post, void *env)
349 if (cg_irg_visited(irg))
351 mark_cg_irg_visited(irg);
356 n_callees = get_irg_n_callees(irg);
357 for (i = 0; i < n_callees; i++) {
358 ir_graph *m = get_irg_callee(irg, i);
359 do_walk(m, pre, post, env);
366 void callgraph_walk(callgraph_walk_func *pre, callgraph_walk_func *post, void *env)
368 size_t i, n_irgs = get_irp_n_irgs();
371 /* roots are methods which have no callers in the current program */
372 for (i = 0; i < n_irgs; ++i) {
373 ir_graph *irg = get_irp_irg(i);
375 if (get_irg_n_callers(irg) == 0)
376 do_walk(irg, pre, post, env);
379 /* in case of unreachable call loops we haven't visited some irgs yet */
380 for (i = 0; i < n_irgs; i++) {
381 ir_graph *irg = get_irp_irg(i);
382 do_walk(irg, pre, post, env);
386 /* ----------------------------------------------------------------------------------- */
387 /* loop construction algorithm */
388 /* ----------------------------------------------------------------------------------- */
390 static ir_graph *outermost_ir_graph; /**< The outermost graph the scc is computed
392 static ir_loop *current_loop; /**< Current cfloop construction is working
394 static size_t loop_node_cnt = 0; /**< Counts the number of allocated cfloop nodes.
395 Each cfloop node gets a unique number.
396 What for? ev. remove. @@@ */
397 static size_t current_dfn = 1; /**< Counter to generate depth first numbering
400 /*-----------------*/
401 /* Node attributes */
402 /*-----------------*/
404 typedef struct scc_info {
405 size_t dfn; /**< Depth first search number. */
406 size_t uplink; /**< dfn number of ancestor. */
407 ir_visited_t visited; /**< visited counter */
408 int in_stack; /**< Marks whether node is on the stack. */
412 * allocates a new scc_info on the obstack
414 static inline scc_info *new_scc_info(struct obstack *obst)
416 return OALLOCZ(obst, scc_info);
420 * Returns non-zero if a graph was already visited.
422 static inline int cg_irg_visited(ir_graph *irg)
424 return irg->self_visited >= master_cg_visited;
428 * Marks a graph as visited.
430 static inline void mark_cg_irg_visited(ir_graph *irg)
432 irg->self_visited = master_cg_visited;
436 * Set a graphs visited flag to i.
438 static inline void set_cg_irg_visited(ir_graph *irg, ir_visited_t i)
440 irg->self_visited = i;
444 * Returns the visited flag of a graph.
446 static inline ir_visited_t get_cg_irg_visited(const ir_graph *irg)
448 return irg->self_visited;
451 static inline void mark_irg_in_stack(ir_graph *irg)
453 scc_info *info = (scc_info*) get_irg_link(irg);
454 assert(info && "missing call to init_scc()");
458 static inline void mark_irg_not_in_stack(ir_graph *irg)
460 scc_info *info = (scc_info*) get_irg_link(irg);
461 assert(info && "missing call to init_scc()");
465 static inline int irg_is_in_stack(const ir_graph *irg)
467 scc_info *info = (scc_info*) get_irg_link(irg);
468 assert(info && "missing call to init_scc()");
469 return info->in_stack;
472 static inline void set_irg_uplink(ir_graph *irg, size_t uplink)
474 scc_info *info = (scc_info*) get_irg_link(irg);
475 assert(info && "missing call to init_scc()");
476 info->uplink = uplink;
479 static inline size_t get_irg_uplink(const ir_graph *irg)
481 const scc_info *info = (scc_info*) get_irg_link(irg);
482 assert(info && "missing call to init_scc()");
486 static inline void set_irg_dfn(ir_graph *irg, size_t dfn)
488 scc_info *info = (scc_info*) get_irg_link(irg);
489 assert(info && "missing call to init_scc()");
493 static inline size_t get_irg_dfn(const ir_graph *irg)
495 const scc_info *info = (scc_info*) get_irg_link(irg);
496 assert(info && "missing call to init_scc()");
500 /**********************************************************************/
502 /**********************************************************************/
504 static ir_graph **stack = NULL;
505 static size_t tos = 0; /**< top of stack */
508 * Initialize the irg stack.
510 static inline void init_stack(void)
513 ARR_RESIZE(ir_graph *, stack, 1000);
515 stack = NEW_ARR_F(ir_graph *, 1000);
521 * push a graph on the irg stack
522 * @param n the graph to be pushed
524 static inline void push(ir_graph *irg)
526 if (tos == ARR_LEN(stack)) {
527 size_t nlen = ARR_LEN(stack) * 2;
528 ARR_RESIZE(ir_graph*, stack, nlen);
531 mark_irg_in_stack(irg);
535 * return the topmost graph on the stack and pop it
537 static inline ir_graph *pop(void)
543 mark_irg_not_in_stack(irg);
548 * The nodes up to irg belong to the current loop.
549 * Removes them from the stack and adds them to the current loop.
551 static inline void pop_scc_to_loop(ir_graph *irg)
558 set_irg_dfn(m, loop_node_cnt);
559 add_loop_irg(current_loop, m);
561 //m->callgraph_loop_depth = current_loop->depth;
565 /* GL ??? my last son is my grandson??? Removes cfloops with no
566 ir_nodes in them. Such loops have only another loop as son. (Why
567 can't they have two loops as sons? Does it never get that far? ) */
568 static void close_loop(ir_loop *l)
570 size_t last = get_loop_n_elements(l) - 1;
571 loop_element lelement = get_loop_element(l, last);
572 ir_loop *last_son = lelement.son;
574 if (get_kind(last_son) == k_ir_loop &&
575 get_loop_n_elements(last_son) == 1) {
578 lelement = get_loop_element(last_son, 0);
580 if (get_kind(gson) == k_ir_loop) {
581 loop_element new_last_son;
583 gson->outer_loop = l;
584 new_last_son.son = gson;
585 l->children[last] = new_last_son;
592 * Removes and unmarks all nodes up to n from the stack.
593 * The nodes must be visited once more to assign them to a scc.
595 static inline void pop_scc_unmark_visit(ir_graph *n)
601 set_cg_irg_visited(m, 0);
605 /**********************************************************************/
606 /* The loop data structure. **/
607 /**********************************************************************/
610 * Allocates a new loop as son of current_loop. Sets current_loop
611 * to the new loop and returns the father.
613 static ir_loop *new_loop(void)
615 ir_loop *father = current_loop;
616 ir_loop *son = alloc_loop(father, outermost_ir_graph->obst);
623 /**********************************************************************/
624 /* Constructing and destructing the loop/backedge information. **/
625 /**********************************************************************/
627 /* Initialization steps. **********************************************/
629 static void init_scc(struct obstack *obst)
637 n_irgs = get_irp_n_irgs();
638 for (i = 0; i < n_irgs; ++i) {
639 ir_graph *irg = get_irp_irg(i);
640 set_irg_link(irg, new_scc_info(obst));
641 irg->callgraph_recursion_depth = 0;
642 irg->callgraph_loop_depth = 0;
646 /** Returns non-zero if n is a loop header, i.e., it is a Block node
647 * and has predecessors within the cfloop and out of the cfloop.
649 * @param root: only needed for assertion.
651 static int is_head(const ir_graph *n, const ir_graph *root)
654 int some_outof_loop = 0, some_in_loop = 0;
656 n_callees = get_irg_n_callees(n);
657 for (i = 0; i < n_callees; ++i) {
658 const ir_graph *pred = get_irg_callee(n, i);
659 if (is_irg_callee_backedge(n, i)) continue;
660 if (!irg_is_in_stack(pred)) {
663 if (get_irg_uplink(pred) < get_irg_uplink(root)) {
664 assert(get_irg_uplink(pred) >= get_irg_uplink(root));
670 return some_outof_loop && some_in_loop;
674 * Returns non-zero if n is possible loop head of an endless loop.
675 * I.e., it is a Block or Phi node and has only predecessors
677 * @arg root: only needed for assertion.
679 static int is_endless_head(const ir_graph *n, const ir_graph *root)
682 int some_outof_loop = 0, some_in_loop = 0;
684 n_calless = get_irg_n_callees(n);
685 for (i = 0; i < n_calless; ++i) {
686 const ir_graph *pred = get_irg_callee(n, i);
688 if (is_irg_callee_backedge(n, i))
690 if (!irg_is_in_stack(pred)) {
693 if (get_irg_uplink(pred) < get_irg_uplink(root)) {
694 assert(get_irg_uplink(pred) >= get_irg_uplink(root));
699 return !some_outof_loop && some_in_loop;
703 * Finds index of the predecessor with the smallest dfn number
704 * greater-equal than limit.
706 static bool smallest_dfn_pred(const ir_graph *n, size_t limit, size_t *result)
708 size_t index = 0, min = 0;
711 size_t i, n_callees = get_irg_n_callees(n);
712 for (i = 0; i < n_callees; ++i) {
713 const ir_graph *pred = get_irg_callee(n, i);
714 if (is_irg_callee_backedge(n, i) || !irg_is_in_stack(pred))
716 if (get_irg_dfn(pred) >= limit && (!found || get_irg_dfn(pred) < min)) {
718 min = get_irg_dfn(pred);
727 /** Finds index of the predecessor with the largest dfn number. */
728 static bool largest_dfn_pred(const ir_graph *n, size_t *result)
730 size_t index = 0, max = 0;
733 size_t i, n_callees = get_irg_n_callees(n);
734 for (i = 0; i < n_callees; ++i) {
735 const ir_graph *pred = get_irg_callee(n, i);
736 if (is_irg_callee_backedge (n, i) || !irg_is_in_stack(pred))
738 /* Note: dfn is always > 0 */
739 if (get_irg_dfn(pred) > max) {
741 max = get_irg_dfn(pred);
750 static ir_graph *find_tail(const ir_graph *n)
757 if (!icfg && rm_cyclic_phis && remove_cyclic_phis (n)) return NULL;
759 m = stack[tos - 1]; /* tos = top of stack */
761 found = smallest_dfn_pred(m, 0, &res_index);
762 if (!found && /* no smallest dfn pred found. */
766 if (m == n) return NULL; // Is this to catch Phi - self loops?
767 for (i = tos - 1; i > 0;) {
771 found = smallest_dfn_pred(m, get_irg_dfn(m) + 1, &res_index);
772 if (! found) /* no smallest dfn pred found. */
773 found = largest_dfn_pred(m, &res_index);
778 /* We should not walk past our selves on the stack: The upcoming nodes
779 are not in this loop. We assume a loop not reachable from Start. */
788 /* A dead loop not reachable from Start. */
789 for (i = tos-1; i > 0;) {
791 if (is_endless_head(m, n)) {
792 found = smallest_dfn_pred(m, get_irg_dfn(m) + 1, &res_index);
793 if (!found) /* no smallest dfn pred found. */
794 found = largest_dfn_pred(m, &res_index);
797 /* It's not an unreachable loop, either. */
801 //assert(0 && "no head found on stack");
807 set_irg_callee_backedge(m, res_index);
808 return get_irg_callee(m, res_index);
811 /*-----------------------------------------------------------*
812 * The core algorithm. *
813 *-----------------------------------------------------------*/
816 static void cgscc(ir_graph *n)
820 if (cg_irg_visited(n)) return;
821 mark_cg_irg_visited(n);
823 /* Initialize the node */
824 set_irg_dfn(n, current_dfn); /* Depth first number for this node */
825 set_irg_uplink(n, current_dfn); /* ... is default uplink. */
829 n_callees = get_irg_n_callees(n);
830 for (i = 0; i < n_callees; ++i) {
832 if (is_irg_callee_backedge(n, i)) continue;
833 m = get_irg_callee(n, i);
835 /** This marks the backedge, but does it guarantee a correct loop tree? */
836 //if (m == n) { set_irg_callee_backedge(n, i); continue; }
839 if (irg_is_in_stack(m)) {
840 /* Uplink of m is smaller if n->m is a backedge.
841 Propagate the uplink to mark the cfloop. */
842 if (get_irg_uplink(m) < get_irg_uplink(n))
843 set_irg_uplink(n, get_irg_uplink(m));
847 if (get_irg_dfn(n) == get_irg_uplink(n)) {
848 /* This condition holds for
849 1) the node with the incoming backedge.
850 That is: We found a cfloop!
851 2) Straight line code, because no uplink has been propagated, so the
852 uplink still is the same as the dfn.
854 But n might not be a proper cfloop head for the analysis. Proper cfloop
855 heads are Block and Phi nodes. find_tail searches the stack for
856 Block's and Phi's and takes those nodes as cfloop heads for the current
857 cfloop instead and marks the incoming edge as backedge. */
859 ir_graph *tail = find_tail(n);
861 /* We have a cfloop, that is no straight line code,
862 because we found a cfloop head!
863 Next actions: Open a new cfloop on the cfloop tree and
864 try to find inner cfloops */
867 ir_loop *l = new_loop();
869 /* Remove the cfloop from the stack ... */
870 pop_scc_unmark_visit(n);
872 /* The current backedge has been marked, that is temporarily eliminated,
873 by find tail. Start the scc algorithm
874 anew on the subgraph thats left (the current cfloop without the backedge)
875 in order to find more inner cfloops. */
879 assert(cg_irg_visited(n));
889 * reset the backedge information for all callers in all irgs
891 static void reset_isbe(void)
893 size_t i, n_irgs = get_irp_n_irgs();
895 for (i = 0; i < n_irgs; ++i) {
896 ir_graph *irg = get_irp_irg(i);
898 if (irg->caller_isbe)
899 xfree(irg->caller_isbe);
900 irg->caller_isbe = NULL;
902 if (irg->callee_isbe)
903 xfree(irg->callee_isbe);
904 irg->callee_isbe = NULL;
908 /* ----------------------------------------------------------------------------------- */
909 /* The recursion stuff driver. */
910 /* ----------------------------------------------------------------------------------- */
912 /* Compute the backedges that represent recursions. */
913 void find_callgraph_recursions(void)
920 /* -- compute the looptree. -- */
922 /* The outermost graph. We start here. Then we start at all
923 functions in irg list that are never called, then at the remaining
924 unvisited ones. The third step is needed for functions that are not
925 reachable from the outermost graph, but call themselves in a cycle. */
926 assert(get_irp_main_irg());
927 outermost_ir_graph = get_irp_main_irg();
932 new_loop(); /* sets current_loop */
935 cgscc(outermost_ir_graph);
936 n_irgs = get_irp_n_irgs();
937 for (i = 0; i < n_irgs; ++i) {
938 ir_graph *irg = get_irp_irg(i);
939 if (!cg_irg_visited(irg) && get_irg_n_callers(irg) == 0)
942 for (i = 0; i < n_irgs; ++i) {
943 ir_graph *irg = get_irp_irg(i);
944 if (!cg_irg_visited(irg))
947 obstack_free(&temp, NULL);
949 irp->outermost_cg_loop = current_loop;
950 mature_loops(current_loop, outermost_ir_graph->obst);
952 /* -- Reverse the backedge information. -- */
953 for (i = 0; i < n_irgs; ++i) {
954 ir_graph *irg = get_irp_irg(i);
955 size_t j, n_callees = get_irg_n_callees(irg);
956 for (j = 0; j < n_callees; ++j) {
957 if (is_irg_callee_backedge(irg, j))
958 set_irg_caller_backedge(get_irg_callee(irg, j), irg);
962 irp->callgraph_state = irp_callgraph_and_calltree_consistent;
965 /* Returns the maximal loop depth of all paths from an external visible method to
967 size_t get_irg_loop_depth(const ir_graph *irg)
969 assert(irp->callgraph_state == irp_callgraph_consistent ||
970 irp->callgraph_state == irp_callgraph_and_calltree_consistent);
971 return irg->callgraph_loop_depth;
974 /* Returns the maximal recursion depth of all paths from an external visible method to
976 size_t get_irg_recursion_depth(const ir_graph *irg)
978 assert(irp->callgraph_state == irp_callgraph_and_calltree_consistent);
979 return irg->callgraph_recursion_depth;
982 /* Computes the interprocedural loop nesting information. */
983 void analyse_loop_nesting_depth(void)
985 /* establish preconditions. */
986 if (get_irp_callee_info_state() != irg_callee_info_consistent) {
987 ir_entity **free_methods = NULL;
989 cgana(&free_methods);
993 if (irp_callgraph_consistent != get_irp_callgraph_state()) {
997 find_callgraph_recursions();
999 set_irp_loop_nesting_depth_state(loop_nesting_depth_consistent);
1002 loop_nesting_depth_state get_irp_loop_nesting_depth_state(void)
1004 return irp->lnd_state;
1006 void set_irp_loop_nesting_depth_state(loop_nesting_depth_state s)
1010 void set_irp_loop_nesting_depth_state_inconsistent(void)
1012 if (irp->lnd_state == loop_nesting_depth_consistent)
1013 irp->lnd_state = loop_nesting_depth_inconsistent;