2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation and computation of the callgraph.
23 * @author Goetz Lindenmaier
31 #include "callgraph.h"
35 #include "irgraph_t.h"
43 #include "raw_bitset.h"
47 static ir_visited_t master_cg_visited = 0;
48 static inline int cg_irg_visited (ir_graph *n);
49 static inline void mark_cg_irg_visited(ir_graph *n);
51 /** Returns the callgraph state of the program representation. */
52 irp_callgraph_state get_irp_callgraph_state(void)
54 return irp->callgraph_state;
57 /* Sets the callgraph state of the program representation. */
58 void set_irp_callgraph_state(irp_callgraph_state s)
60 irp->callgraph_state = s;
63 /* Returns the number of procedures that call the given irg. */
64 size_t get_irg_n_callers(const ir_graph *irg)
67 return irg->callers ? ARR_LEN(irg->callers) : 0;
70 /* Returns the caller at position pos. */
71 ir_graph *get_irg_caller(const ir_graph *irg, size_t pos)
73 assert(pos < get_irg_n_callers(irg));
74 return irg->callers ? irg->callers[pos] : NULL;
77 /* Returns non-zero if the caller at position pos is "a backedge", i.e. a recursion. */
78 int is_irg_caller_backedge(const ir_graph *irg, size_t pos)
80 assert(pos < get_irg_n_callers(irg));
81 return irg->caller_isbe != NULL ? rbitset_is_set(irg->caller_isbe, pos) : 0;
84 /** Search the caller in the list of all callers and set its backedge property. */
85 static void set_irg_caller_backedge(ir_graph *irg, const ir_graph *caller)
87 size_t i, n_callers = get_irg_n_callers(irg);
89 /* allocate a new array on demand */
90 if (irg->caller_isbe == NULL)
91 irg->caller_isbe = rbitset_malloc(n_callers);
92 for (i = 0; i < n_callers; ++i) {
93 if (get_irg_caller(irg, i) == caller) {
94 rbitset_set(irg->caller_isbe, i);
100 /* Returns non-zero if the irg has a backedge caller. */
101 int has_irg_caller_backedge(const ir_graph *irg)
103 size_t i, n_callers = get_irg_n_callers(irg);
105 if (irg->caller_isbe != NULL) {
106 for (i = 0; i < n_callers; ++i)
107 if (rbitset_is_set(irg->caller_isbe, i))
114 * Find the reversion position of a caller.
115 * Given the position pos_caller of an caller of irg, return
116 * irg's callee position on that caller.
118 static size_t reverse_pos(const ir_graph *callee, size_t pos_caller)
120 ir_graph *caller = get_irg_caller(callee, pos_caller);
121 /* search the other relation for the corresponding edge. */
122 size_t i, n_callees = get_irg_n_callees(caller);
123 for (i = 0; i < n_callees; ++i) {
124 if (get_irg_callee(caller, i) == callee) {
129 assert(!"reverse_pos() did not find position");
134 /* Returns the maximal loop depth of call nodes that call along this edge. */
135 size_t get_irg_caller_loop_depth(const ir_graph *irg, size_t pos)
137 ir_graph *caller = get_irg_caller(irg, pos);
138 size_t pos_callee = reverse_pos(irg, pos);
140 return get_irg_callee_loop_depth(caller, pos_callee);
144 /* Returns the number of procedures that are called by the given irg. */
145 size_t get_irg_n_callees(const ir_graph *irg)
147 assert(irg->callees);
148 return irg->callees ? ARR_LEN(irg->callees) : 0;
151 /* Returns the callee at position pos. */
152 ir_graph *get_irg_callee(const ir_graph *irg, size_t pos)
154 assert(pos < get_irg_n_callees(irg));
155 return irg->callees ? irg->callees[pos]->irg : NULL;
158 /* Returns non-zero if the callee at position pos is "a backedge", i.e. a recursion. */
159 int is_irg_callee_backedge(const ir_graph *irg, size_t pos)
161 assert(pos < get_irg_n_callees(irg));
162 return irg->callee_isbe != NULL ? rbitset_is_set(irg->callee_isbe, pos) : 0;
165 /* Returns non-zero if the irg has a backedge callee. */
166 int has_irg_callee_backedge(const ir_graph *irg)
168 size_t i, n_callees = get_irg_n_callees(irg);
170 if (irg->callee_isbe != NULL) {
171 for (i = 0; i < n_callees; ++i)
172 if (rbitset_is_set(irg->callee_isbe, i))
179 * Mark the callee at position pos as a backedge.
181 static void set_irg_callee_backedge(ir_graph *irg, size_t pos)
183 size_t n = get_irg_n_callees(irg);
185 /* allocate a new array on demand */
186 if (irg->callee_isbe == NULL)
187 irg->callee_isbe = rbitset_malloc(n);
189 rbitset_set(irg->callee_isbe, pos);
192 /* Returns the maximal loop depth of call nodes that call along this edge. */
193 size_t get_irg_callee_loop_depth(const ir_graph *irg, size_t pos)
195 assert(pos < get_irg_n_callees(irg));
196 return irg->callees ? irg->callees[pos]->max_depth : 0;
200 /* --------------------- Compute the callgraph ------------------------ */
203 * Pre-Walker called by compute_callgraph(), analyses all Call nodes.
205 static void ana_Call(ir_node *n, void *env)
211 if (! is_Call(n)) return;
213 irg = get_irn_irg(n);
214 n_callees = get_Call_n_callees(n);
215 for (i = 0; i < n_callees; ++i) {
216 ir_entity *callee_e = get_Call_callee(n, i);
217 ir_graph *callee = get_entity_irg(callee_e);
221 cg_callee_entry *found;
226 pset_insert((pset *)callee->callers, irg, HASH_PTR(irg));
227 found = (cg_callee_entry*) pset_find((pset *)irg->callees, &buf, HASH_PTR(callee));
228 if (found) { /* add Call node to list, compute new nesting. */
229 ir_node **arr = found->call_list;
230 ARR_APP1(ir_node *, arr, n);
231 found->call_list = arr;
232 } else { /* New node, add Call node and init nesting. */
233 found = OALLOC(irg->obst, cg_callee_entry);
235 found->call_list = NEW_ARR_F(ir_node *, 1);
236 found->call_list[0] = n;
237 found->max_depth = 0;
238 pset_insert((pset *)irg->callees, found, HASH_PTR(callee));
240 depth = get_loop_depth(get_irn_loop(get_nodes_block(n)));
241 found->max_depth = (depth > found->max_depth) ? depth : found->max_depth;
246 /** compare two ir graphs in a cg_callee_entry */
247 static int cg_callee_entry_cmp(const void *elt, const void *key)
249 const cg_callee_entry *e1 = (const cg_callee_entry*) elt;
250 const cg_callee_entry *e2 = (const cg_callee_entry*) key;
251 return e1->irg != e2->irg;
254 /** compare two ir graphs for pointer identity */
255 static int graph_cmp(const void *elt, const void *key)
257 const ir_graph *e1 = (const ir_graph*) elt;
258 const ir_graph *e2 = (const ir_graph*) key;
263 /* Construct and destruct the callgraph. */
264 void compute_callgraph(void)
271 n_irgs = get_irp_n_irgs();
272 for (i = 0; i < n_irgs; ++i) {
273 ir_graph *irg = get_irp_irg(i);
274 assert(get_irg_callee_info_state(irg) == irg_callee_info_consistent);
275 irg->callees = (cg_callee_entry **)new_pset(cg_callee_entry_cmp, 8);
276 irg->callers = (ir_graph **)new_pset(graph_cmp, 8);
277 //construct_cf_backedges(irg);
280 /* Compute the call graph */
281 for (i = 0; i < n_irgs; ++i) {
282 ir_graph *irg = get_irp_irg(i);
283 construct_cf_backedges(irg); // We also find the maximal loop depth of a call.
284 irg_walk_graph(irg, ana_Call, NULL, NULL);
287 /* Change the sets to arrays. */
288 for (i = 0; i < n_irgs; ++i) {
290 cg_callee_entry *callee;
291 ir_graph *c, *irg = get_irp_irg(i);
292 pset *callee_set, *caller_set;
294 callee_set = (pset *)irg->callees;
295 count = pset_count(callee_set);
296 irg->callees = NEW_ARR_F(cg_callee_entry *, count);
297 irg->callee_isbe = NULL;
298 callee = (cg_callee_entry*) pset_first(callee_set);
299 for (j = 0; j < count; ++j) {
300 irg->callees[j] = callee;
301 callee = (cg_callee_entry*) pset_next(callee_set);
303 del_pset(callee_set);
304 assert(callee == NULL);
306 caller_set = (pset *)irg->callers;
307 count = pset_count(caller_set);
308 irg->callers = NEW_ARR_F(ir_graph *, count);
309 irg->caller_isbe = NULL;
310 c = (ir_graph*) pset_first(caller_set);
311 for (j = 0; j < count; ++j) {
313 c = (ir_graph*) pset_next(caller_set);
315 del_pset(caller_set);
318 set_irp_callgraph_state(irp_callgraph_consistent);
321 /* Destruct the callgraph. */
322 void free_callgraph(void)
324 size_t i, n_irgs = get_irp_n_irgs();
325 for (i = 0; i < n_irgs; ++i) {
326 ir_graph *irg = get_irp_irg(i);
327 if (irg->callees) DEL_ARR_F(irg->callees);
328 if (irg->callers) DEL_ARR_F(irg->callers);
329 if (irg->callee_isbe) free(irg->callee_isbe);
330 if (irg->caller_isbe) free(irg->caller_isbe);
333 irg->callee_isbe = NULL;
334 irg->caller_isbe = NULL;
336 set_irp_callgraph_state(irp_callgraph_none);
339 /* ----------------------------------------------------------------------------------- */
340 /* A walker for the callgraph */
341 /* ----------------------------------------------------------------------------------- */
344 static void do_walk(ir_graph *irg, callgraph_walk_func *pre, callgraph_walk_func *post, void *env)
348 if (cg_irg_visited(irg))
350 mark_cg_irg_visited(irg);
355 n_callees = get_irg_n_callees(irg);
356 for (i = 0; i < n_callees; i++) {
357 ir_graph *m = get_irg_callee(irg, i);
358 do_walk(m, pre, post, env);
365 void callgraph_walk(callgraph_walk_func *pre, callgraph_walk_func *post, void *env)
367 size_t i, n_irgs = get_irp_n_irgs();
370 /* roots are methods which have no callers in the current program */
371 for (i = 0; i < n_irgs; ++i) {
372 ir_graph *irg = get_irp_irg(i);
374 if (get_irg_n_callers(irg) == 0)
375 do_walk(irg, pre, post, env);
378 /* in case of unreachable call loops we haven't visited some irgs yet */
379 for (i = 0; i < n_irgs; i++) {
380 ir_graph *irg = get_irp_irg(i);
381 do_walk(irg, pre, post, env);
385 /* ----------------------------------------------------------------------------------- */
386 /* loop construction algorithm */
387 /* ----------------------------------------------------------------------------------- */
389 static ir_graph *outermost_ir_graph; /**< The outermost graph the scc is computed
391 static ir_loop *current_loop; /**< Current cfloop construction is working
393 static size_t loop_node_cnt = 0; /**< Counts the number of allocated cfloop nodes.
394 Each cfloop node gets a unique number.
395 What for? ev. remove. @@@ */
396 static size_t current_dfn = 1; /**< Counter to generate depth first numbering
399 /*-----------------*/
400 /* Node attributes */
401 /*-----------------*/
403 typedef struct scc_info {
404 size_t dfn; /**< Depth first search number. */
405 size_t uplink; /**< dfn number of ancestor. */
406 ir_visited_t visited; /**< visited counter */
407 int in_stack; /**< Marks whether node is on the stack. */
411 * allocates a new scc_info on the obstack
413 static inline scc_info *new_scc_info(struct obstack *obst)
415 return OALLOCZ(obst, scc_info);
419 * Returns non-zero if a graph was already visited.
421 static inline int cg_irg_visited(ir_graph *irg)
423 return irg->self_visited >= master_cg_visited;
427 * Marks a graph as visited.
429 static inline void mark_cg_irg_visited(ir_graph *irg)
431 irg->self_visited = master_cg_visited;
435 * Set a graphs visited flag to i.
437 static inline void set_cg_irg_visited(ir_graph *irg, ir_visited_t i)
439 irg->self_visited = i;
443 * Returns the visited flag of a graph.
445 static inline ir_visited_t get_cg_irg_visited(const ir_graph *irg)
447 return irg->self_visited;
450 static inline void mark_irg_in_stack(ir_graph *irg)
452 scc_info *info = (scc_info*) get_irg_link(irg);
453 assert(info && "missing call to init_scc()");
457 static inline void mark_irg_not_in_stack(ir_graph *irg)
459 scc_info *info = (scc_info*) get_irg_link(irg);
460 assert(info && "missing call to init_scc()");
464 static inline int irg_is_in_stack(const ir_graph *irg)
466 scc_info *info = (scc_info*) get_irg_link(irg);
467 assert(info && "missing call to init_scc()");
468 return info->in_stack;
471 static inline void set_irg_uplink(ir_graph *irg, size_t uplink)
473 scc_info *info = (scc_info*) get_irg_link(irg);
474 assert(info && "missing call to init_scc()");
475 info->uplink = uplink;
478 static inline size_t get_irg_uplink(const ir_graph *irg)
480 const scc_info *info = (scc_info*) get_irg_link(irg);
481 assert(info && "missing call to init_scc()");
485 static inline void set_irg_dfn(ir_graph *irg, size_t dfn)
487 scc_info *info = (scc_info*) get_irg_link(irg);
488 assert(info && "missing call to init_scc()");
492 static inline size_t get_irg_dfn(const ir_graph *irg)
494 const scc_info *info = (scc_info*) get_irg_link(irg);
495 assert(info && "missing call to init_scc()");
499 /**********************************************************************/
501 /**********************************************************************/
503 static ir_graph **stack = NULL;
504 static size_t tos = 0; /**< top of stack */
507 * Initialize the irg stack.
509 static inline void init_stack(void)
512 ARR_RESIZE(ir_graph *, stack, 1000);
514 stack = NEW_ARR_F(ir_graph *, 1000);
520 * push a graph on the irg stack
521 * @param n the graph to be pushed
523 static inline void push(ir_graph *irg)
525 if (tos == ARR_LEN(stack)) {
526 size_t nlen = ARR_LEN(stack) * 2;
527 ARR_RESIZE(ir_graph*, stack, nlen);
530 mark_irg_in_stack(irg);
534 * return the topmost graph on the stack and pop it
536 static inline ir_graph *pop(void)
542 mark_irg_not_in_stack(irg);
547 * The nodes up to irg belong to the current loop.
548 * Removes them from the stack and adds them to the current loop.
550 static inline void pop_scc_to_loop(ir_graph *irg)
557 set_irg_dfn(m, loop_node_cnt);
558 add_loop_irg(current_loop, m);
560 //m->callgraph_loop_depth = current_loop->depth;
564 /* GL ??? my last son is my grandson??? Removes cfloops with no
565 ir_nodes in them. Such loops have only another loop as son. (Why
566 can't they have two loops as sons? Does it never get that far? ) */
567 static void close_loop(ir_loop *l)
569 size_t last = get_loop_n_elements(l) - 1;
570 loop_element lelement = get_loop_element(l, last);
571 ir_loop *last_son = lelement.son;
573 if (get_kind(last_son) == k_ir_loop &&
574 get_loop_n_elements(last_son) == 1) {
577 lelement = get_loop_element(last_son, 0);
579 if (get_kind(gson) == k_ir_loop) {
580 loop_element new_last_son;
582 gson->outer_loop = l;
583 new_last_son.son = gson;
584 l->children[last] = new_last_son;
591 * Removes and unmarks all nodes up to n from the stack.
592 * The nodes must be visited once more to assign them to a scc.
594 static inline void pop_scc_unmark_visit(ir_graph *n)
600 set_cg_irg_visited(m, 0);
604 /**********************************************************************/
605 /* The loop data structure. **/
606 /**********************************************************************/
609 * Allocates a new loop as son of current_loop. Sets current_loop
610 * to the new loop and returns the father.
612 static ir_loop *new_loop(void)
614 ir_loop *father = current_loop;
615 ir_loop *son = alloc_loop(father, outermost_ir_graph->obst);
622 /**********************************************************************/
623 /* Constructing and destructing the loop/backedge information. **/
624 /**********************************************************************/
626 /* Initialization steps. **********************************************/
628 static void init_scc(struct obstack *obst)
636 n_irgs = get_irp_n_irgs();
637 for (i = 0; i < n_irgs; ++i) {
638 ir_graph *irg = get_irp_irg(i);
639 set_irg_link(irg, new_scc_info(obst));
640 irg->callgraph_recursion_depth = 0;
641 irg->callgraph_loop_depth = 0;
645 /** Returns non-zero if n is a loop header, i.e., it is a Block node
646 * and has predecessors within the cfloop and out of the cfloop.
648 * @param root: only needed for assertion.
650 static int is_head(const ir_graph *n, const ir_graph *root)
653 int some_outof_loop = 0, some_in_loop = 0;
655 n_callees = get_irg_n_callees(n);
656 for (i = 0; i < n_callees; ++i) {
657 const ir_graph *pred = get_irg_callee(n, i);
658 if (is_irg_callee_backedge(n, i)) continue;
659 if (!irg_is_in_stack(pred)) {
662 if (get_irg_uplink(pred) < get_irg_uplink(root)) {
663 assert(get_irg_uplink(pred) >= get_irg_uplink(root));
669 return some_outof_loop && some_in_loop;
673 * Returns non-zero if n is possible loop head of an endless loop.
674 * I.e., it is a Block or Phi node and has only predecessors
676 * @arg root: only needed for assertion.
678 static int is_endless_head(const ir_graph *n, const ir_graph *root)
681 int some_outof_loop = 0, some_in_loop = 0;
683 n_calless = get_irg_n_callees(n);
684 for (i = 0; i < n_calless; ++i) {
685 const ir_graph *pred = get_irg_callee(n, i);
687 if (is_irg_callee_backedge(n, i))
689 if (!irg_is_in_stack(pred)) {
692 if (get_irg_uplink(pred) < get_irg_uplink(root)) {
693 assert(get_irg_uplink(pred) >= get_irg_uplink(root));
698 return !some_outof_loop && some_in_loop;
702 * Finds index of the predecessor with the smallest dfn number
703 * greater-equal than limit.
705 static bool smallest_dfn_pred(const ir_graph *n, size_t limit, size_t *result)
707 size_t index = 0, min = 0;
710 size_t i, n_callees = get_irg_n_callees(n);
711 for (i = 0; i < n_callees; ++i) {
712 const ir_graph *pred = get_irg_callee(n, i);
713 if (is_irg_callee_backedge(n, i) || !irg_is_in_stack(pred))
715 if (get_irg_dfn(pred) >= limit && (!found || get_irg_dfn(pred) < min)) {
717 min = get_irg_dfn(pred);
726 /** Finds index of the predecessor with the largest dfn number. */
727 static bool largest_dfn_pred(const ir_graph *n, size_t *result)
729 size_t index = 0, max = 0;
732 size_t i, n_callees = get_irg_n_callees(n);
733 for (i = 0; i < n_callees; ++i) {
734 const ir_graph *pred = get_irg_callee(n, i);
735 if (is_irg_callee_backedge (n, i) || !irg_is_in_stack(pred))
737 /* Note: dfn is always > 0 */
738 if (get_irg_dfn(pred) > max) {
740 max = get_irg_dfn(pred);
749 static ir_graph *find_tail(const ir_graph *n)
756 if (!icfg && rm_cyclic_phis && remove_cyclic_phis (n)) return NULL;
758 m = stack[tos - 1]; /* tos = top of stack */
760 found = smallest_dfn_pred(m, 0, &res_index);
761 if (!found && /* no smallest dfn pred found. */
765 if (m == n) return NULL; // Is this to catch Phi - self loops?
766 for (i = tos - 1; i > 0;) {
770 found = smallest_dfn_pred(m, get_irg_dfn(m) + 1, &res_index);
771 if (! found) /* no smallest dfn pred found. */
772 found = largest_dfn_pred(m, &res_index);
777 /* We should not walk past our selves on the stack: The upcoming nodes
778 are not in this loop. We assume a loop not reachable from Start. */
787 /* A dead loop not reachable from Start. */
788 for (i = tos-1; i > 0;) {
790 if (is_endless_head(m, n)) {
791 found = smallest_dfn_pred(m, get_irg_dfn(m) + 1, &res_index);
792 if (!found) /* no smallest dfn pred found. */
793 found = largest_dfn_pred(m, &res_index);
796 /* It's not an unreachable loop, either. */
800 //assert(0 && "no head found on stack");
806 set_irg_callee_backedge(m, res_index);
807 return get_irg_callee(m, res_index);
810 /*-----------------------------------------------------------*
811 * The core algorithm. *
812 *-----------------------------------------------------------*/
815 static void cgscc(ir_graph *n)
819 if (cg_irg_visited(n)) return;
820 mark_cg_irg_visited(n);
822 /* Initialize the node */
823 set_irg_dfn(n, current_dfn); /* Depth first number for this node */
824 set_irg_uplink(n, current_dfn); /* ... is default uplink. */
828 n_callees = get_irg_n_callees(n);
829 for (i = 0; i < n_callees; ++i) {
831 if (is_irg_callee_backedge(n, i)) continue;
832 m = get_irg_callee(n, i);
834 /** This marks the backedge, but does it guarantee a correct loop tree? */
835 //if (m == n) { set_irg_callee_backedge(n, i); continue; }
838 if (irg_is_in_stack(m)) {
839 /* Uplink of m is smaller if n->m is a backedge.
840 Propagate the uplink to mark the cfloop. */
841 if (get_irg_uplink(m) < get_irg_uplink(n))
842 set_irg_uplink(n, get_irg_uplink(m));
846 if (get_irg_dfn(n) == get_irg_uplink(n)) {
847 /* This condition holds for
848 1) the node with the incoming backedge.
849 That is: We found a cfloop!
850 2) Straight line code, because no uplink has been propagated, so the
851 uplink still is the same as the dfn.
853 But n might not be a proper cfloop head for the analysis. Proper cfloop
854 heads are Block and Phi nodes. find_tail searches the stack for
855 Block's and Phi's and takes those nodes as cfloop heads for the current
856 cfloop instead and marks the incoming edge as backedge. */
858 ir_graph *tail = find_tail(n);
860 /* We have a cfloop, that is no straight line code,
861 because we found a cfloop head!
862 Next actions: Open a new cfloop on the cfloop tree and
863 try to find inner cfloops */
866 ir_loop *l = new_loop();
868 /* Remove the cfloop from the stack ... */
869 pop_scc_unmark_visit(n);
871 /* The current backedge has been marked, that is temporarily eliminated,
872 by find tail. Start the scc algorithm
873 anew on the subgraph thats left (the current cfloop without the backedge)
874 in order to find more inner cfloops. */
878 assert(cg_irg_visited(n));
888 * reset the backedge information for all callers in all irgs
890 static void reset_isbe(void)
892 size_t i, n_irgs = get_irp_n_irgs();
894 for (i = 0; i < n_irgs; ++i) {
895 ir_graph *irg = get_irp_irg(i);
897 if (irg->caller_isbe)
898 xfree(irg->caller_isbe);
899 irg->caller_isbe = NULL;
901 if (irg->callee_isbe)
902 xfree(irg->callee_isbe);
903 irg->callee_isbe = NULL;
907 /* ----------------------------------------------------------------------------------- */
908 /* The recursion stuff driver. */
909 /* ----------------------------------------------------------------------------------- */
911 /* Compute the backedges that represent recursions. */
912 void find_callgraph_recursions(void)
919 /* -- compute the looptree. -- */
921 /* The outermost graph. We start here. Then we start at all
922 functions in irg list that are never called, then at the remaining
923 unvisited ones. The third step is needed for functions that are not
924 reachable from the outermost graph, but call themselves in a cycle. */
925 assert(get_irp_main_irg());
926 outermost_ir_graph = get_irp_main_irg();
931 new_loop(); /* sets current_loop */
934 cgscc(outermost_ir_graph);
935 n_irgs = get_irp_n_irgs();
936 for (i = 0; i < n_irgs; ++i) {
937 ir_graph *irg = get_irp_irg(i);
938 if (!cg_irg_visited(irg) && get_irg_n_callers(irg) == 0)
941 for (i = 0; i < n_irgs; ++i) {
942 ir_graph *irg = get_irp_irg(i);
943 if (!cg_irg_visited(irg))
946 obstack_free(&temp, NULL);
948 irp->outermost_cg_loop = current_loop;
949 mature_loops(current_loop, outermost_ir_graph->obst);
951 /* -- Reverse the backedge information. -- */
952 for (i = 0; i < n_irgs; ++i) {
953 ir_graph *irg = get_irp_irg(i);
954 size_t j, n_callees = get_irg_n_callees(irg);
955 for (j = 0; j < n_callees; ++j) {
956 if (is_irg_callee_backedge(irg, j))
957 set_irg_caller_backedge(get_irg_callee(irg, j), irg);
961 irp->callgraph_state = irp_callgraph_and_calltree_consistent;
964 /* Returns the maximal loop depth of all paths from an external visible method to
966 size_t get_irg_loop_depth(const ir_graph *irg)
968 assert(irp->callgraph_state == irp_callgraph_consistent ||
969 irp->callgraph_state == irp_callgraph_and_calltree_consistent);
970 return irg->callgraph_loop_depth;
973 /* Returns the maximal recursion depth of all paths from an external visible method to
975 size_t get_irg_recursion_depth(const ir_graph *irg)
977 assert(irp->callgraph_state == irp_callgraph_and_calltree_consistent);
978 return irg->callgraph_recursion_depth;
981 /* Computes the interprocedural loop nesting information. */
982 void analyse_loop_nesting_depth(void)
984 /* establish preconditions. */
985 if (get_irp_callee_info_state() != irg_callee_info_consistent) {
986 ir_entity **free_methods = NULL;
988 cgana(&free_methods);
992 if (irp_callgraph_consistent != get_irp_callgraph_state()) {
996 find_callgraph_recursions();
998 set_irp_loop_nesting_depth_state(loop_nesting_depth_consistent);
1001 loop_nesting_depth_state get_irp_loop_nesting_depth_state(void)
1003 return irp->lnd_state;
1005 void set_irp_loop_nesting_depth_state(loop_nesting_depth_state s)
1009 void set_irp_loop_nesting_depth_state_inconsistent(void)
1011 if (irp->lnd_state == loop_nesting_depth_consistent)
1012 irp->lnd_state = loop_nesting_depth_inconsistent;