2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation and computation of the callgraph.
23 * @author Goetz Lindenmaier
31 #include "callgraph.h"
35 #include "irgraph_t.h"
43 #include "raw_bitset.h"
47 static ir_visited_t master_cg_visited = 0;
48 static inline int cg_irg_visited (ir_graph *n);
49 static inline void mark_cg_irg_visited(ir_graph *n);
51 irp_callgraph_state get_irp_callgraph_state(void)
53 return irp->callgraph_state;
56 void set_irp_callgraph_state(irp_callgraph_state s)
58 irp->callgraph_state = s;
61 size_t get_irg_n_callers(const ir_graph *irg)
64 return irg->callers ? ARR_LEN(irg->callers) : 0;
67 ir_graph *get_irg_caller(const ir_graph *irg, size_t pos)
69 assert(pos < get_irg_n_callers(irg));
70 return irg->callers ? irg->callers[pos] : NULL;
73 int is_irg_caller_backedge(const ir_graph *irg, size_t pos)
75 assert(pos < get_irg_n_callers(irg));
76 return irg->caller_isbe != NULL ? rbitset_is_set(irg->caller_isbe, pos) : 0;
79 /** Search the caller in the list of all callers and set its backedge property. */
80 static void set_irg_caller_backedge(ir_graph *irg, const ir_graph *caller)
82 size_t i, n_callers = get_irg_n_callers(irg);
84 /* allocate a new array on demand */
85 if (irg->caller_isbe == NULL)
86 irg->caller_isbe = rbitset_malloc(n_callers);
87 for (i = 0; i < n_callers; ++i) {
88 if (get_irg_caller(irg, i) == caller) {
89 rbitset_set(irg->caller_isbe, i);
95 int has_irg_caller_backedge(const ir_graph *irg)
97 size_t i, n_callers = get_irg_n_callers(irg);
99 if (irg->caller_isbe != NULL) {
100 for (i = 0; i < n_callers; ++i)
101 if (rbitset_is_set(irg->caller_isbe, i))
108 * Find the reversion position of a caller.
109 * Given the position pos_caller of an caller of irg, return
110 * irg's callee position on that caller.
112 static size_t reverse_pos(const ir_graph *callee, size_t pos_caller)
114 ir_graph *caller = get_irg_caller(callee, pos_caller);
115 /* search the other relation for the corresponding edge. */
116 size_t i, n_callees = get_irg_n_callees(caller);
117 for (i = 0; i < n_callees; ++i) {
118 if (get_irg_callee(caller, i) == callee) {
123 assert(!"reverse_pos() did not find position");
128 size_t get_irg_caller_loop_depth(const ir_graph *irg, size_t pos)
130 ir_graph *caller = get_irg_caller(irg, pos);
131 size_t pos_callee = reverse_pos(irg, pos);
133 return get_irg_callee_loop_depth(caller, pos_callee);
136 size_t get_irg_n_callees(const ir_graph *irg)
138 assert(irg->callees);
139 return irg->callees ? ARR_LEN(irg->callees) : 0;
142 ir_graph *get_irg_callee(const ir_graph *irg, size_t pos)
144 assert(pos < get_irg_n_callees(irg));
145 return irg->callees ? irg->callees[pos]->irg : NULL;
148 int is_irg_callee_backedge(const ir_graph *irg, size_t pos)
150 assert(pos < get_irg_n_callees(irg));
151 return irg->callee_isbe != NULL ? rbitset_is_set(irg->callee_isbe, pos) : 0;
154 int has_irg_callee_backedge(const ir_graph *irg)
156 size_t i, n_callees = get_irg_n_callees(irg);
158 if (irg->callee_isbe != NULL) {
159 for (i = 0; i < n_callees; ++i)
160 if (rbitset_is_set(irg->callee_isbe, i))
167 * Mark the callee at position pos as a backedge.
169 static void set_irg_callee_backedge(ir_graph *irg, size_t pos)
171 size_t n = get_irg_n_callees(irg);
173 /* allocate a new array on demand */
174 if (irg->callee_isbe == NULL)
175 irg->callee_isbe = rbitset_malloc(n);
177 rbitset_set(irg->callee_isbe, pos);
180 size_t get_irg_callee_loop_depth(const ir_graph *irg, size_t pos)
182 assert(pos < get_irg_n_callees(irg));
183 return irg->callees ? irg->callees[pos]->max_depth : 0;
188 * Pre-Walker called by compute_callgraph(), analyses all Call nodes.
190 static void ana_Call(ir_node *n, void *env)
196 if (! is_Call(n)) return;
198 irg = get_irn_irg(n);
199 n_callees = get_Call_n_callees(n);
200 for (i = 0; i < n_callees; ++i) {
201 ir_entity *callee_e = get_Call_callee(n, i);
202 ir_graph *callee = get_entity_irg(callee_e);
206 cg_callee_entry *found;
211 pset_insert((pset *)callee->callers, irg, hash_ptr(irg));
212 found = (cg_callee_entry*) pset_find((pset *)irg->callees, &buf, hash_ptr(callee));
213 if (found) { /* add Call node to list, compute new nesting. */
214 ir_node **arr = found->call_list;
215 ARR_APP1(ir_node *, arr, n);
216 found->call_list = arr;
217 } else { /* New node, add Call node and init nesting. */
218 found = OALLOC(irg->obst, cg_callee_entry);
220 found->call_list = NEW_ARR_F(ir_node *, 1);
221 found->call_list[0] = n;
222 found->max_depth = 0;
223 pset_insert((pset *)irg->callees, found, hash_ptr(callee));
225 depth = get_loop_depth(get_irn_loop(get_nodes_block(n)));
226 found->max_depth = (depth > found->max_depth) ? depth : found->max_depth;
231 /** compare two ir graphs in a cg_callee_entry */
232 static int cg_callee_entry_cmp(const void *elt, const void *key)
234 const cg_callee_entry *e1 = (const cg_callee_entry*) elt;
235 const cg_callee_entry *e2 = (const cg_callee_entry*) key;
236 return e1->irg != e2->irg;
239 /** compare two ir graphs for pointer identity */
240 static int graph_cmp(const void *elt, const void *key)
242 const ir_graph *e1 = (const ir_graph*) elt;
243 const ir_graph *e2 = (const ir_graph*) key;
247 void compute_callgraph(void)
254 n_irgs = get_irp_n_irgs();
255 for (i = 0; i < n_irgs; ++i) {
256 ir_graph *irg = get_irp_irg(i);
257 assert(get_irg_callee_info_state(irg) == irg_callee_info_consistent);
258 irg->callees = (cg_callee_entry **)new_pset(cg_callee_entry_cmp, 8);
259 irg->callers = (ir_graph **)new_pset(graph_cmp, 8);
260 //construct_cf_backedges(irg);
263 /* Compute the call graph */
264 for (i = 0; i < n_irgs; ++i) {
265 ir_graph *irg = get_irp_irg(i);
266 construct_cf_backedges(irg); // We also find the maximal loop depth of a call.
267 irg_walk_graph(irg, ana_Call, NULL, NULL);
270 /* Change the sets to arrays. */
271 for (i = 0; i < n_irgs; ++i) {
273 cg_callee_entry *callee;
274 ir_graph *c, *irg = get_irp_irg(i);
275 pset *callee_set, *caller_set;
277 callee_set = (pset *)irg->callees;
278 count = pset_count(callee_set);
279 irg->callees = NEW_ARR_F(cg_callee_entry *, count);
280 irg->callee_isbe = NULL;
281 callee = (cg_callee_entry*) pset_first(callee_set);
282 for (j = 0; j < count; ++j) {
283 irg->callees[j] = callee;
284 callee = (cg_callee_entry*) pset_next(callee_set);
286 del_pset(callee_set);
287 assert(callee == NULL);
289 caller_set = (pset *)irg->callers;
290 count = pset_count(caller_set);
291 irg->callers = NEW_ARR_F(ir_graph *, count);
292 irg->caller_isbe = NULL;
293 c = (ir_graph*) pset_first(caller_set);
294 for (j = 0; j < count; ++j) {
296 c = (ir_graph*) pset_next(caller_set);
298 del_pset(caller_set);
301 set_irp_callgraph_state(irp_callgraph_consistent);
304 void free_callgraph(void)
306 size_t i, n_irgs = get_irp_n_irgs();
307 for (i = 0; i < n_irgs; ++i) {
308 ir_graph *irg = get_irp_irg(i);
309 if (irg->callees) DEL_ARR_F(irg->callees);
310 if (irg->callers) DEL_ARR_F(irg->callers);
311 if (irg->callee_isbe) free(irg->callee_isbe);
312 if (irg->caller_isbe) free(irg->caller_isbe);
315 irg->callee_isbe = NULL;
316 irg->caller_isbe = NULL;
318 set_irp_callgraph_state(irp_callgraph_none);
322 static void do_walk(ir_graph *irg, callgraph_walk_func *pre, callgraph_walk_func *post, void *env)
326 if (cg_irg_visited(irg))
328 mark_cg_irg_visited(irg);
333 n_callees = get_irg_n_callees(irg);
334 for (i = 0; i < n_callees; i++) {
335 ir_graph *m = get_irg_callee(irg, i);
336 do_walk(m, pre, post, env);
343 void callgraph_walk(callgraph_walk_func *pre, callgraph_walk_func *post, void *env)
345 size_t i, n_irgs = get_irp_n_irgs();
348 /* roots are methods which have no callers in the current program */
349 for (i = 0; i < n_irgs; ++i) {
350 ir_graph *irg = get_irp_irg(i);
352 if (get_irg_n_callers(irg) == 0)
353 do_walk(irg, pre, post, env);
356 /* in case of unreachable call loops we haven't visited some irgs yet */
357 for (i = 0; i < n_irgs; i++) {
358 ir_graph *irg = get_irp_irg(i);
359 do_walk(irg, pre, post, env);
363 static ir_graph *outermost_ir_graph; /**< The outermost graph the scc is computed
365 static ir_loop *current_loop; /**< Current cfloop construction is working
367 static size_t loop_node_cnt = 0; /**< Counts the number of allocated cfloop nodes.
368 Each cfloop node gets a unique number.
369 What for? ev. remove. @@@ */
370 static size_t current_dfn = 1; /**< Counter to generate depth first numbering
373 typedef struct scc_info {
374 size_t dfn; /**< Depth first search number. */
375 size_t uplink; /**< dfn number of ancestor. */
376 ir_visited_t visited; /**< visited counter */
377 int in_stack; /**< Marks whether node is on the stack. */
381 * allocates a new scc_info on the obstack
383 static inline scc_info *new_scc_info(struct obstack *obst)
385 return OALLOCZ(obst, scc_info);
389 * Returns non-zero if a graph was already visited.
391 static inline int cg_irg_visited(ir_graph *irg)
393 return irg->self_visited >= master_cg_visited;
397 * Marks a graph as visited.
399 static inline void mark_cg_irg_visited(ir_graph *irg)
401 irg->self_visited = master_cg_visited;
405 * Set a graphs visited flag to i.
407 static inline void set_cg_irg_visited(ir_graph *irg, ir_visited_t i)
409 irg->self_visited = i;
413 * Returns the visited flag of a graph.
415 static inline ir_visited_t get_cg_irg_visited(const ir_graph *irg)
417 return irg->self_visited;
420 static inline void mark_irg_in_stack(ir_graph *irg)
422 scc_info *info = (scc_info*) get_irg_link(irg);
423 assert(info && "missing call to init_scc()");
427 static inline void mark_irg_not_in_stack(ir_graph *irg)
429 scc_info *info = (scc_info*) get_irg_link(irg);
430 assert(info && "missing call to init_scc()");
434 static inline int irg_is_in_stack(const ir_graph *irg)
436 scc_info *info = (scc_info*) get_irg_link(irg);
437 assert(info && "missing call to init_scc()");
438 return info->in_stack;
441 static inline void set_irg_uplink(ir_graph *irg, size_t uplink)
443 scc_info *info = (scc_info*) get_irg_link(irg);
444 assert(info && "missing call to init_scc()");
445 info->uplink = uplink;
448 static inline size_t get_irg_uplink(const ir_graph *irg)
450 const scc_info *info = (scc_info*) get_irg_link(irg);
451 assert(info && "missing call to init_scc()");
455 static inline void set_irg_dfn(ir_graph *irg, size_t dfn)
457 scc_info *info = (scc_info*) get_irg_link(irg);
458 assert(info && "missing call to init_scc()");
462 static inline size_t get_irg_dfn(const ir_graph *irg)
464 const scc_info *info = (scc_info*) get_irg_link(irg);
465 assert(info && "missing call to init_scc()");
469 static ir_graph **stack = NULL;
470 static size_t tos = 0; /**< top of stack */
473 * Initialize the irg stack.
475 static inline void init_stack(void)
478 ARR_RESIZE(ir_graph *, stack, 1000);
480 stack = NEW_ARR_F(ir_graph *, 1000);
486 * push a graph on the irg stack
487 * @param n the graph to be pushed
489 static inline void push(ir_graph *irg)
491 if (tos == ARR_LEN(stack)) {
492 size_t nlen = ARR_LEN(stack) * 2;
493 ARR_RESIZE(ir_graph*, stack, nlen);
496 mark_irg_in_stack(irg);
500 * return the topmost graph on the stack and pop it
502 static inline ir_graph *pop(void)
508 mark_irg_not_in_stack(irg);
513 * The nodes up to irg belong to the current loop.
514 * Removes them from the stack and adds them to the current loop.
516 static inline void pop_scc_to_loop(ir_graph *irg)
523 set_irg_dfn(m, loop_node_cnt);
524 add_loop_irg(current_loop, m);
526 //m->callgraph_loop_depth = current_loop->depth;
530 /* GL ??? my last son is my grandson??? Removes cfloops with no
531 ir_nodes in them. Such loops have only another loop as son. (Why
532 can't they have two loops as sons? Does it never get that far? ) */
533 static void close_loop(ir_loop *l)
535 size_t last = get_loop_n_elements(l) - 1;
536 loop_element lelement = get_loop_element(l, last);
537 ir_loop *last_son = lelement.son;
539 if (get_kind(last_son) == k_ir_loop &&
540 get_loop_n_elements(last_son) == 1) {
543 lelement = get_loop_element(last_son, 0);
545 if (get_kind(gson) == k_ir_loop) {
546 loop_element new_last_son;
548 gson->outer_loop = l;
549 new_last_son.son = gson;
550 l->children[last] = new_last_son;
557 * Removes and unmarks all nodes up to n from the stack.
558 * The nodes must be visited once more to assign them to a scc.
560 static inline void pop_scc_unmark_visit(ir_graph *n)
566 set_cg_irg_visited(m, 0);
571 * Allocates a new loop as son of current_loop. Sets current_loop
572 * to the new loop and returns the father.
574 static ir_loop *new_loop(void)
576 ir_loop *father = current_loop;
577 ir_loop *son = alloc_loop(father, outermost_ir_graph->obst);
584 static void init_scc(struct obstack *obst)
592 n_irgs = get_irp_n_irgs();
593 for (i = 0; i < n_irgs; ++i) {
594 ir_graph *irg = get_irp_irg(i);
595 set_irg_link(irg, new_scc_info(obst));
596 irg->callgraph_recursion_depth = 0;
597 irg->callgraph_loop_depth = 0;
601 /** Returns non-zero if n is a loop header, i.e., it is a Block node
602 * and has predecessors within the cfloop and out of the cfloop.
604 * @param root: only needed for assertion.
606 static int is_head(const ir_graph *n, const ir_graph *root)
609 int some_outof_loop = 0, some_in_loop = 0;
611 n_callees = get_irg_n_callees(n);
612 for (i = 0; i < n_callees; ++i) {
613 const ir_graph *pred = get_irg_callee(n, i);
614 if (is_irg_callee_backedge(n, i)) continue;
615 if (!irg_is_in_stack(pred)) {
618 if (get_irg_uplink(pred) < get_irg_uplink(root)) {
619 assert(get_irg_uplink(pred) >= get_irg_uplink(root));
625 return some_outof_loop && some_in_loop;
629 * Returns non-zero if n is possible loop head of an endless loop.
630 * I.e., it is a Block or Phi node and has only predecessors
632 * @arg root: only needed for assertion.
634 static int is_endless_head(const ir_graph *n, const ir_graph *root)
637 int some_outof_loop = 0, some_in_loop = 0;
639 n_calless = get_irg_n_callees(n);
640 for (i = 0; i < n_calless; ++i) {
641 const ir_graph *pred = get_irg_callee(n, i);
643 if (is_irg_callee_backedge(n, i))
645 if (!irg_is_in_stack(pred)) {
648 if (get_irg_uplink(pred) < get_irg_uplink(root)) {
649 assert(get_irg_uplink(pred) >= get_irg_uplink(root));
654 return !some_outof_loop && some_in_loop;
658 * Finds index of the predecessor with the smallest dfn number
659 * greater-equal than limit.
661 static bool smallest_dfn_pred(const ir_graph *n, size_t limit, size_t *result)
663 size_t index = 0, min = 0;
666 size_t i, n_callees = get_irg_n_callees(n);
667 for (i = 0; i < n_callees; ++i) {
668 const ir_graph *pred = get_irg_callee(n, i);
669 if (is_irg_callee_backedge(n, i) || !irg_is_in_stack(pred))
671 if (get_irg_dfn(pred) >= limit && (!found || get_irg_dfn(pred) < min)) {
673 min = get_irg_dfn(pred);
682 /** Finds index of the predecessor with the largest dfn number. */
683 static bool largest_dfn_pred(const ir_graph *n, size_t *result)
685 size_t index = 0, max = 0;
688 size_t i, n_callees = get_irg_n_callees(n);
689 for (i = 0; i < n_callees; ++i) {
690 const ir_graph *pred = get_irg_callee(n, i);
691 if (is_irg_callee_backedge (n, i) || !irg_is_in_stack(pred))
693 /* Note: dfn is always > 0 */
694 if (get_irg_dfn(pred) > max) {
696 max = get_irg_dfn(pred);
705 static ir_graph *find_tail(const ir_graph *n)
712 if (!icfg && rm_cyclic_phis && remove_cyclic_phis (n)) return NULL;
714 m = stack[tos - 1]; /* tos = top of stack */
716 found = smallest_dfn_pred(m, 0, &res_index);
717 if (!found && /* no smallest dfn pred found. */
721 if (m == n) return NULL; // Is this to catch Phi - self loops?
722 for (i = tos - 1; i > 0;) {
726 found = smallest_dfn_pred(m, get_irg_dfn(m) + 1, &res_index);
727 if (! found) /* no smallest dfn pred found. */
728 found = largest_dfn_pred(m, &res_index);
733 /* We should not walk past our selves on the stack: The upcoming nodes
734 are not in this loop. We assume a loop not reachable from Start. */
743 /* A dead loop not reachable from Start. */
744 for (i = tos-1; i > 0;) {
746 if (is_endless_head(m, n)) {
747 found = smallest_dfn_pred(m, get_irg_dfn(m) + 1, &res_index);
748 if (!found) /* no smallest dfn pred found. */
749 found = largest_dfn_pred(m, &res_index);
752 /* It's not an unreachable loop, either. */
756 //assert(0 && "no head found on stack");
762 set_irg_callee_backedge(m, res_index);
763 return get_irg_callee(m, res_index);
766 static void cgscc(ir_graph *n)
770 if (cg_irg_visited(n)) return;
771 mark_cg_irg_visited(n);
773 /* Initialize the node */
774 set_irg_dfn(n, current_dfn); /* Depth first number for this node */
775 set_irg_uplink(n, current_dfn); /* ... is default uplink. */
779 n_callees = get_irg_n_callees(n);
780 for (i = 0; i < n_callees; ++i) {
782 if (is_irg_callee_backedge(n, i)) continue;
783 m = get_irg_callee(n, i);
785 /** This marks the backedge, but does it guarantee a correct loop tree? */
786 //if (m == n) { set_irg_callee_backedge(n, i); continue; }
789 if (irg_is_in_stack(m)) {
790 /* Uplink of m is smaller if n->m is a backedge.
791 Propagate the uplink to mark the cfloop. */
792 if (get_irg_uplink(m) < get_irg_uplink(n))
793 set_irg_uplink(n, get_irg_uplink(m));
797 if (get_irg_dfn(n) == get_irg_uplink(n)) {
798 /* This condition holds for
799 1) the node with the incoming backedge.
800 That is: We found a cfloop!
801 2) Straight line code, because no uplink has been propagated, so the
802 uplink still is the same as the dfn.
804 But n might not be a proper cfloop head for the analysis. Proper cfloop
805 heads are Block and Phi nodes. find_tail searches the stack for
806 Block's and Phi's and takes those nodes as cfloop heads for the current
807 cfloop instead and marks the incoming edge as backedge. */
809 ir_graph *tail = find_tail(n);
811 /* We have a cfloop, that is no straight line code,
812 because we found a cfloop head!
813 Next actions: Open a new cfloop on the cfloop tree and
814 try to find inner cfloops */
817 ir_loop *l = new_loop();
819 /* Remove the cfloop from the stack ... */
820 pop_scc_unmark_visit(n);
822 /* The current backedge has been marked, that is temporarily eliminated,
823 by find tail. Start the scc algorithm
824 anew on the subgraph thats left (the current cfloop without the backedge)
825 in order to find more inner cfloops. */
829 assert(cg_irg_visited(n));
839 * reset the backedge information for all callers in all irgs
841 static void reset_isbe(void)
843 size_t i, n_irgs = get_irp_n_irgs();
845 for (i = 0; i < n_irgs; ++i) {
846 ir_graph *irg = get_irp_irg(i);
848 if (irg->caller_isbe)
849 xfree(irg->caller_isbe);
850 irg->caller_isbe = NULL;
852 if (irg->callee_isbe)
853 xfree(irg->callee_isbe);
854 irg->callee_isbe = NULL;
858 void find_callgraph_recursions(void)
865 /* -- compute the looptree. -- */
867 /* The outermost graph. We start here. Then we start at all
868 functions in irg list that are never called, then at the remaining
869 unvisited ones. The third step is needed for functions that are not
870 reachable from the outermost graph, but call themselves in a cycle. */
871 assert(get_irp_main_irg());
872 outermost_ir_graph = get_irp_main_irg();
877 new_loop(); /* sets current_loop */
880 cgscc(outermost_ir_graph);
881 n_irgs = get_irp_n_irgs();
882 for (i = 0; i < n_irgs; ++i) {
883 ir_graph *irg = get_irp_irg(i);
884 if (!cg_irg_visited(irg) && get_irg_n_callers(irg) == 0)
887 for (i = 0; i < n_irgs; ++i) {
888 ir_graph *irg = get_irp_irg(i);
889 if (!cg_irg_visited(irg))
892 obstack_free(&temp, NULL);
894 irp->outermost_cg_loop = current_loop;
895 mature_loops(current_loop, outermost_ir_graph->obst);
897 /* -- Reverse the backedge information. -- */
898 for (i = 0; i < n_irgs; ++i) {
899 ir_graph *irg = get_irp_irg(i);
900 size_t j, n_callees = get_irg_n_callees(irg);
901 for (j = 0; j < n_callees; ++j) {
902 if (is_irg_callee_backedge(irg, j))
903 set_irg_caller_backedge(get_irg_callee(irg, j), irg);
907 irp->callgraph_state = irp_callgraph_and_calltree_consistent;
910 size_t get_irg_loop_depth(const ir_graph *irg)
912 assert(irp->callgraph_state == irp_callgraph_consistent ||
913 irp->callgraph_state == irp_callgraph_and_calltree_consistent);
914 return irg->callgraph_loop_depth;
917 size_t get_irg_recursion_depth(const ir_graph *irg)
919 assert(irp->callgraph_state == irp_callgraph_and_calltree_consistent);
920 return irg->callgraph_recursion_depth;
923 void analyse_loop_nesting_depth(void)
925 /* establish preconditions. */
926 if (get_irp_callee_info_state() != irg_callee_info_consistent) {
927 ir_entity **free_methods = NULL;
929 cgana(&free_methods);
933 if (irp_callgraph_consistent != get_irp_callgraph_state()) {
937 find_callgraph_recursions();
939 set_irp_loop_nesting_depth_state(loop_nesting_depth_consistent);
942 loop_nesting_depth_state get_irp_loop_nesting_depth_state(void)
944 return irp->lnd_state;
946 void set_irp_loop_nesting_depth_state(loop_nesting_depth_state s)
950 void set_irp_loop_nesting_depth_state_inconsistent(void)
952 if (irp->lnd_state == loop_nesting_depth_consistent)
953 irp->lnd_state = loop_nesting_depth_inconsistent;