2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Representation and computation of the callgraph.
23 * @author Goetz Lindenmaier
31 #include "callgraph.h"
35 #include "irgraph_t.h"
43 #include "raw_bitset.h"
47 static ir_visited_t master_cg_visited = 0;
48 static inline int cg_irg_visited (ir_graph *n);
49 static inline void mark_cg_irg_visited(ir_graph *n);
51 irp_callgraph_state get_irp_callgraph_state(void)
53 return irp->callgraph_state;
56 void set_irp_callgraph_state(irp_callgraph_state s)
58 irp->callgraph_state = s;
61 size_t get_irg_n_callers(const ir_graph *irg)
64 return irg->callers ? ARR_LEN(irg->callers) : 0;
67 ir_graph *get_irg_caller(const ir_graph *irg, size_t pos)
69 assert(pos < get_irg_n_callers(irg));
70 return irg->callers ? irg->callers[pos] : NULL;
73 int is_irg_caller_backedge(const ir_graph *irg, size_t pos)
75 assert(pos < get_irg_n_callers(irg));
76 return irg->caller_isbe != NULL ? rbitset_is_set(irg->caller_isbe, pos) : 0;
79 /** Search the caller in the list of all callers and set its backedge property. */
80 static void set_irg_caller_backedge(ir_graph *irg, const ir_graph *caller)
82 size_t i, n_callers = get_irg_n_callers(irg);
84 /* allocate a new array on demand */
85 if (irg->caller_isbe == NULL)
86 irg->caller_isbe = rbitset_malloc(n_callers);
87 for (i = 0; i < n_callers; ++i) {
88 if (get_irg_caller(irg, i) == caller) {
89 rbitset_set(irg->caller_isbe, i);
95 int has_irg_caller_backedge(const ir_graph *irg)
97 size_t i, n_callers = get_irg_n_callers(irg);
99 if (irg->caller_isbe != NULL) {
100 for (i = 0; i < n_callers; ++i)
101 if (rbitset_is_set(irg->caller_isbe, i))
108 * Find the reversion position of a caller.
109 * Given the position pos_caller of an caller of irg, return
110 * irg's callee position on that caller.
112 static size_t reverse_pos(const ir_graph *callee, size_t pos_caller)
114 ir_graph *caller = get_irg_caller(callee, pos_caller);
115 /* search the other relation for the corresponding edge. */
116 size_t i, n_callees = get_irg_n_callees(caller);
117 for (i = 0; i < n_callees; ++i) {
118 if (get_irg_callee(caller, i) == callee) {
123 assert(!"reverse_pos() did not find position");
128 size_t get_irg_caller_loop_depth(const ir_graph *irg, size_t pos)
130 ir_graph *caller = get_irg_caller(irg, pos);
131 size_t pos_callee = reverse_pos(irg, pos);
133 return get_irg_callee_loop_depth(caller, pos_callee);
136 size_t get_irg_n_callees(const ir_graph *irg)
138 assert(irg->callees);
139 return irg->callees ? ARR_LEN(irg->callees) : 0;
142 ir_graph *get_irg_callee(const ir_graph *irg, size_t pos)
144 assert(pos < get_irg_n_callees(irg));
145 return irg->callees ? irg->callees[pos]->irg : NULL;
148 int is_irg_callee_backedge(const ir_graph *irg, size_t pos)
150 assert(pos < get_irg_n_callees(irg));
151 return irg->callee_isbe != NULL ? rbitset_is_set(irg->callee_isbe, pos) : 0;
154 int has_irg_callee_backedge(const ir_graph *irg)
156 size_t i, n_callees = get_irg_n_callees(irg);
158 if (irg->callee_isbe != NULL) {
159 for (i = 0; i < n_callees; ++i)
160 if (rbitset_is_set(irg->callee_isbe, i))
167 * Mark the callee at position pos as a backedge.
169 static void set_irg_callee_backedge(ir_graph *irg, size_t pos)
171 size_t n = get_irg_n_callees(irg);
173 /* allocate a new array on demand */
174 if (irg->callee_isbe == NULL)
175 irg->callee_isbe = rbitset_malloc(n);
177 rbitset_set(irg->callee_isbe, pos);
180 size_t get_irg_callee_loop_depth(const ir_graph *irg, size_t pos)
182 assert(pos < get_irg_n_callees(irg));
183 return irg->callees ? irg->callees[pos]->max_depth : 0;
188 * Pre-Walker called by compute_callgraph(), analyses all Call nodes.
190 static void ana_Call(ir_node *n, void *env)
196 if (! is_Call(n)) return;
198 irg = get_irn_irg(n);
199 n_callees = get_Call_n_callees(n);
200 for (i = 0; i < n_callees; ++i) {
201 ir_entity *callee_e = get_Call_callee(n, i);
202 ir_graph *callee = get_entity_irg(callee_e);
206 cg_callee_entry *found;
211 pset_insert((pset *)callee->callers, irg, hash_ptr(irg));
212 found = (cg_callee_entry*) pset_find((pset *)irg->callees, &buf, hash_ptr(callee));
213 if (found) { /* add Call node to list, compute new nesting. */
214 ir_node **arr = found->call_list;
215 ARR_APP1(ir_node *, arr, n);
216 found->call_list = arr;
217 } else { /* New node, add Call node and init nesting. */
218 found = OALLOC(get_irg_obstack(irg), cg_callee_entry);
220 found->call_list = NEW_ARR_F(ir_node *, 1);
221 found->call_list[0] = n;
222 found->max_depth = 0;
223 pset_insert((pset *)irg->callees, found, hash_ptr(callee));
225 depth = get_loop_depth(get_irn_loop(get_nodes_block(n)));
226 found->max_depth = (depth > found->max_depth) ? depth : found->max_depth;
231 /** compare two ir graphs in a cg_callee_entry */
232 static int cg_callee_entry_cmp(const void *elt, const void *key)
234 const cg_callee_entry *e1 = (const cg_callee_entry*) elt;
235 const cg_callee_entry *e2 = (const cg_callee_entry*) key;
236 return e1->irg != e2->irg;
239 /** compare two ir graphs for pointer identity */
240 static int graph_cmp(const void *elt, const void *key)
242 const ir_graph *e1 = (const ir_graph*) elt;
243 const ir_graph *e2 = (const ir_graph*) key;
247 void compute_callgraph(void)
254 n_irgs = get_irp_n_irgs();
255 for (i = 0; i < n_irgs; ++i) {
256 ir_graph *irg = get_irp_irg(i);
257 assert(get_irg_callee_info_state(irg) == irg_callee_info_consistent);
258 irg->callees = (cg_callee_entry **)new_pset(cg_callee_entry_cmp, 8);
259 irg->callers = (ir_graph **)new_pset(graph_cmp, 8);
260 //construct_cf_backedges(irg);
263 /* Compute the call graph */
264 for (i = 0; i < n_irgs; ++i) {
265 ir_graph *irg = get_irp_irg(i);
266 construct_cf_backedges(irg); // We also find the maximal loop depth of a call.
267 irg_walk_graph(irg, ana_Call, NULL, NULL);
270 /* Change the sets to arrays. */
271 for (i = 0; i < n_irgs; ++i) {
273 ir_graph *irg = get_irp_irg(i);
274 pset *callee_set, *caller_set;
276 callee_set = (pset *)irg->callees;
277 count = pset_count(callee_set);
278 irg->callees = NEW_ARR_F(cg_callee_entry *, count);
279 irg->callee_isbe = NULL;
281 foreach_pset(callee_set, cg_callee_entry, callee) {
282 irg->callees[j++] = callee;
284 del_pset(callee_set);
287 caller_set = (pset *)irg->callers;
288 count = pset_count(caller_set);
289 irg->callers = NEW_ARR_F(ir_graph *, count);
290 irg->caller_isbe = NULL;
292 foreach_pset(caller_set, ir_graph, c) {
293 irg->callers[j++] = c;
295 del_pset(caller_set);
298 set_irp_callgraph_state(irp_callgraph_consistent);
301 void free_callgraph(void)
303 size_t i, n_irgs = get_irp_n_irgs();
304 for (i = 0; i < n_irgs; ++i) {
305 ir_graph *irg = get_irp_irg(i);
306 if (irg->callees) DEL_ARR_F(irg->callees);
307 if (irg->callers) DEL_ARR_F(irg->callers);
308 if (irg->callee_isbe) free(irg->callee_isbe);
309 if (irg->caller_isbe) free(irg->caller_isbe);
312 irg->callee_isbe = NULL;
313 irg->caller_isbe = NULL;
315 set_irp_callgraph_state(irp_callgraph_none);
319 static void do_walk(ir_graph *irg, callgraph_walk_func *pre, callgraph_walk_func *post, void *env)
323 if (cg_irg_visited(irg))
325 mark_cg_irg_visited(irg);
330 n_callees = get_irg_n_callees(irg);
331 for (i = 0; i < n_callees; i++) {
332 ir_graph *m = get_irg_callee(irg, i);
333 do_walk(m, pre, post, env);
340 void callgraph_walk(callgraph_walk_func *pre, callgraph_walk_func *post, void *env)
342 size_t i, n_irgs = get_irp_n_irgs();
345 /* roots are methods which have no callers in the current program */
346 for (i = 0; i < n_irgs; ++i) {
347 ir_graph *irg = get_irp_irg(i);
349 if (get_irg_n_callers(irg) == 0)
350 do_walk(irg, pre, post, env);
353 /* in case of unreachable call loops we haven't visited some irgs yet */
354 for (i = 0; i < n_irgs; i++) {
355 ir_graph *irg = get_irp_irg(i);
356 do_walk(irg, pre, post, env);
360 static ir_graph *outermost_ir_graph; /**< The outermost graph the scc is computed
362 static ir_loop *current_loop; /**< Current cfloop construction is working
364 static size_t loop_node_cnt = 0; /**< Counts the number of allocated cfloop nodes.
365 Each cfloop node gets a unique number.
366 What for? ev. remove. @@@ */
367 static size_t current_dfn = 1; /**< Counter to generate depth first numbering
370 typedef struct scc_info {
371 size_t dfn; /**< Depth first search number. */
372 size_t uplink; /**< dfn number of ancestor. */
373 ir_visited_t visited; /**< visited counter */
374 int in_stack; /**< Marks whether node is on the stack. */
378 * allocates a new scc_info on the obstack
380 static inline scc_info *new_scc_info(struct obstack *obst)
382 return OALLOCZ(obst, scc_info);
386 * Returns non-zero if a graph was already visited.
388 static inline int cg_irg_visited(ir_graph *irg)
390 return irg->self_visited >= master_cg_visited;
394 * Marks a graph as visited.
396 static inline void mark_cg_irg_visited(ir_graph *irg)
398 irg->self_visited = master_cg_visited;
402 * Set a graphs visited flag to i.
404 static inline void set_cg_irg_visited(ir_graph *irg, ir_visited_t i)
406 irg->self_visited = i;
410 * Returns the visited flag of a graph.
412 static inline ir_visited_t get_cg_irg_visited(const ir_graph *irg)
414 return irg->self_visited;
417 static inline void mark_irg_in_stack(ir_graph *irg)
419 scc_info *info = (scc_info*) get_irg_link(irg);
420 assert(info && "missing call to init_scc()");
424 static inline void mark_irg_not_in_stack(ir_graph *irg)
426 scc_info *info = (scc_info*) get_irg_link(irg);
427 assert(info && "missing call to init_scc()");
431 static inline int irg_is_in_stack(const ir_graph *irg)
433 scc_info *info = (scc_info*) get_irg_link(irg);
434 assert(info && "missing call to init_scc()");
435 return info->in_stack;
438 static inline void set_irg_uplink(ir_graph *irg, size_t uplink)
440 scc_info *info = (scc_info*) get_irg_link(irg);
441 assert(info && "missing call to init_scc()");
442 info->uplink = uplink;
445 static inline size_t get_irg_uplink(const ir_graph *irg)
447 const scc_info *info = (scc_info*) get_irg_link(irg);
448 assert(info && "missing call to init_scc()");
452 static inline void set_irg_dfn(ir_graph *irg, size_t dfn)
454 scc_info *info = (scc_info*) get_irg_link(irg);
455 assert(info && "missing call to init_scc()");
459 static inline size_t get_irg_dfn(const ir_graph *irg)
461 const scc_info *info = (scc_info*) get_irg_link(irg);
462 assert(info && "missing call to init_scc()");
466 static ir_graph **stack = NULL;
467 static size_t tos = 0; /**< top of stack */
470 * Initialize the irg stack.
472 static inline void init_stack(void)
475 ARR_RESIZE(ir_graph *, stack, 1000);
477 stack = NEW_ARR_F(ir_graph *, 1000);
483 * push a graph on the irg stack
484 * @param n the graph to be pushed
486 static inline void push(ir_graph *irg)
488 if (tos == ARR_LEN(stack)) {
489 size_t nlen = ARR_LEN(stack) * 2;
490 ARR_RESIZE(ir_graph*, stack, nlen);
493 mark_irg_in_stack(irg);
497 * return the topmost graph on the stack and pop it
499 static inline ir_graph *pop(void)
505 mark_irg_not_in_stack(irg);
510 * The nodes up to irg belong to the current loop.
511 * Removes them from the stack and adds them to the current loop.
513 static inline void pop_scc_to_loop(ir_graph *irg)
520 set_irg_dfn(m, loop_node_cnt);
521 add_loop_irg(current_loop, m);
523 //m->callgraph_loop_depth = current_loop->depth;
527 /* GL ??? my last son is my grandson??? Removes cfloops with no
528 ir_nodes in them. Such loops have only another loop as son. (Why
529 can't they have two loops as sons? Does it never get that far? ) */
530 static void close_loop(ir_loop *l)
532 size_t last = get_loop_n_elements(l) - 1;
533 loop_element lelement = get_loop_element(l, last);
534 ir_loop *last_son = lelement.son;
536 if (get_kind(last_son) == k_ir_loop &&
537 get_loop_n_elements(last_son) == 1) {
540 lelement = get_loop_element(last_son, 0);
542 if (get_kind(gson) == k_ir_loop) {
543 loop_element new_last_son;
545 gson->outer_loop = l;
546 new_last_son.son = gson;
547 l->children[last] = new_last_son;
554 * Removes and unmarks all nodes up to n from the stack.
555 * The nodes must be visited once more to assign them to a scc.
557 static inline void pop_scc_unmark_visit(ir_graph *n)
563 set_cg_irg_visited(m, 0);
568 * Allocates a new loop as son of current_loop. Sets current_loop
569 * to the new loop and returns the father.
571 static ir_loop *new_loop(void)
573 ir_loop *father = current_loop;
574 ir_loop *son = alloc_loop(father, get_irg_obstack(outermost_ir_graph));
581 static void init_scc(struct obstack *obst)
589 n_irgs = get_irp_n_irgs();
590 for (i = 0; i < n_irgs; ++i) {
591 ir_graph *irg = get_irp_irg(i);
592 set_irg_link(irg, new_scc_info(obst));
593 irg->callgraph_recursion_depth = 0;
594 irg->callgraph_loop_depth = 0;
598 /** Returns non-zero if n is a loop header, i.e., it is a Block node
599 * and has predecessors within the cfloop and out of the cfloop.
601 * @param root: only needed for assertion.
603 static int is_head(const ir_graph *n, const ir_graph *root)
606 int some_outof_loop = 0, some_in_loop = 0;
608 n_callees = get_irg_n_callees(n);
609 for (i = 0; i < n_callees; ++i) {
610 const ir_graph *pred = get_irg_callee(n, i);
611 if (is_irg_callee_backedge(n, i)) continue;
612 if (!irg_is_in_stack(pred)) {
615 if (get_irg_uplink(pred) < get_irg_uplink(root)) {
616 assert(get_irg_uplink(pred) >= get_irg_uplink(root));
622 return some_outof_loop && some_in_loop;
626 * Returns non-zero if n is possible loop head of an endless loop.
627 * I.e., it is a Block or Phi node and has only predecessors
629 * @arg root: only needed for assertion.
631 static int is_endless_head(const ir_graph *n, const ir_graph *root)
634 int some_outof_loop = 0, some_in_loop = 0;
636 n_calless = get_irg_n_callees(n);
637 for (i = 0; i < n_calless; ++i) {
638 const ir_graph *pred = get_irg_callee(n, i);
640 if (is_irg_callee_backedge(n, i))
642 if (!irg_is_in_stack(pred)) {
645 if (get_irg_uplink(pred) < get_irg_uplink(root)) {
646 assert(get_irg_uplink(pred) >= get_irg_uplink(root));
651 return !some_outof_loop && some_in_loop;
655 * Finds index of the predecessor with the smallest dfn number
656 * greater-equal than limit.
658 static bool smallest_dfn_pred(const ir_graph *n, size_t limit, size_t *result)
660 size_t index = 0, min = 0;
663 size_t i, n_callees = get_irg_n_callees(n);
664 for (i = 0; i < n_callees; ++i) {
665 const ir_graph *pred = get_irg_callee(n, i);
666 if (is_irg_callee_backedge(n, i) || !irg_is_in_stack(pred))
668 if (get_irg_dfn(pred) >= limit && (!found || get_irg_dfn(pred) < min)) {
670 min = get_irg_dfn(pred);
679 /** Finds index of the predecessor with the largest dfn number. */
680 static bool largest_dfn_pred(const ir_graph *n, size_t *result)
682 size_t index = 0, max = 0;
685 size_t i, n_callees = get_irg_n_callees(n);
686 for (i = 0; i < n_callees; ++i) {
687 const ir_graph *pred = get_irg_callee(n, i);
688 if (is_irg_callee_backedge (n, i) || !irg_is_in_stack(pred))
690 /* Note: dfn is always > 0 */
691 if (get_irg_dfn(pred) > max) {
693 max = get_irg_dfn(pred);
702 static ir_graph *find_tail(const ir_graph *n)
709 if (!icfg && rm_cyclic_phis && remove_cyclic_phis (n)) return NULL;
711 m = stack[tos - 1]; /* tos = top of stack */
713 found = smallest_dfn_pred(m, 0, &res_index);
714 if (!found && /* no smallest dfn pred found. */
718 if (m == n) return NULL; // Is this to catch Phi - self loops?
719 for (i = tos - 1; i > 0;) {
723 found = smallest_dfn_pred(m, get_irg_dfn(m) + 1, &res_index);
724 if (! found) /* no smallest dfn pred found. */
725 found = largest_dfn_pred(m, &res_index);
730 /* We should not walk past our selves on the stack: The upcoming nodes
731 are not in this loop. We assume a loop not reachable from Start. */
740 /* A dead loop not reachable from Start. */
741 for (i = tos-1; i > 0;) {
743 if (is_endless_head(m, n)) {
744 found = smallest_dfn_pred(m, get_irg_dfn(m) + 1, &res_index);
745 if (!found) /* no smallest dfn pred found. */
746 found = largest_dfn_pred(m, &res_index);
749 /* It's not an unreachable loop, either. */
753 //assert(0 && "no head found on stack");
759 set_irg_callee_backedge(m, res_index);
760 return get_irg_callee(m, res_index);
763 static void cgscc(ir_graph *n)
767 if (cg_irg_visited(n)) return;
768 mark_cg_irg_visited(n);
770 /* Initialize the node */
771 set_irg_dfn(n, current_dfn); /* Depth first number for this node */
772 set_irg_uplink(n, current_dfn); /* ... is default uplink. */
776 n_callees = get_irg_n_callees(n);
777 for (i = 0; i < n_callees; ++i) {
779 if (is_irg_callee_backedge(n, i)) continue;
780 m = get_irg_callee(n, i);
782 /** This marks the backedge, but does it guarantee a correct loop tree? */
783 //if (m == n) { set_irg_callee_backedge(n, i); continue; }
786 if (irg_is_in_stack(m)) {
787 /* Uplink of m is smaller if n->m is a backedge.
788 Propagate the uplink to mark the cfloop. */
789 if (get_irg_uplink(m) < get_irg_uplink(n))
790 set_irg_uplink(n, get_irg_uplink(m));
794 if (get_irg_dfn(n) == get_irg_uplink(n)) {
795 /* This condition holds for
796 1) the node with the incoming backedge.
797 That is: We found a cfloop!
798 2) Straight line code, because no uplink has been propagated, so the
799 uplink still is the same as the dfn.
801 But n might not be a proper cfloop head for the analysis. Proper cfloop
802 heads are Block and Phi nodes. find_tail searches the stack for
803 Block's and Phi's and takes those nodes as cfloop heads for the current
804 cfloop instead and marks the incoming edge as backedge. */
806 ir_graph *tail = find_tail(n);
808 /* We have a cfloop, that is no straight line code,
809 because we found a cfloop head!
810 Next actions: Open a new cfloop on the cfloop tree and
811 try to find inner cfloops */
814 ir_loop *l = new_loop();
816 /* Remove the cfloop from the stack ... */
817 pop_scc_unmark_visit(n);
819 /* The current backedge has been marked, that is temporarily eliminated,
820 by find tail. Start the scc algorithm
821 anew on the subgraph thats left (the current cfloop without the backedge)
822 in order to find more inner cfloops. */
826 assert(cg_irg_visited(n));
836 * reset the backedge information for all callers in all irgs
838 static void reset_isbe(void)
840 size_t i, n_irgs = get_irp_n_irgs();
842 for (i = 0; i < n_irgs; ++i) {
843 ir_graph *irg = get_irp_irg(i);
845 if (irg->caller_isbe)
846 xfree(irg->caller_isbe);
847 irg->caller_isbe = NULL;
849 if (irg->callee_isbe)
850 xfree(irg->callee_isbe);
851 irg->callee_isbe = NULL;
855 void find_callgraph_recursions(void)
862 /* -- compute the looptree. -- */
864 /* The outermost graph. We start here. Then we start at all
865 functions in irg list that are never called, then at the remaining
866 unvisited ones. The third step is needed for functions that are not
867 reachable from the outermost graph, but call themselves in a cycle. */
868 assert(get_irp_main_irg());
869 outermost_ir_graph = get_irp_main_irg();
874 new_loop(); /* sets current_loop */
877 cgscc(outermost_ir_graph);
878 n_irgs = get_irp_n_irgs();
879 for (i = 0; i < n_irgs; ++i) {
880 ir_graph *irg = get_irp_irg(i);
881 if (!cg_irg_visited(irg) && get_irg_n_callers(irg) == 0)
884 for (i = 0; i < n_irgs; ++i) {
885 ir_graph *irg = get_irp_irg(i);
886 if (!cg_irg_visited(irg))
889 obstack_free(&temp, NULL);
891 irp->outermost_cg_loop = current_loop;
892 mature_loops(current_loop, get_irg_obstack(outermost_ir_graph));
894 /* -- Reverse the backedge information. -- */
895 for (i = 0; i < n_irgs; ++i) {
896 ir_graph *irg = get_irp_irg(i);
897 size_t j, n_callees = get_irg_n_callees(irg);
898 for (j = 0; j < n_callees; ++j) {
899 if (is_irg_callee_backedge(irg, j))
900 set_irg_caller_backedge(get_irg_callee(irg, j), irg);
904 irp->callgraph_state = irp_callgraph_and_calltree_consistent;
907 size_t get_irg_loop_depth(const ir_graph *irg)
909 assert(irp->callgraph_state == irp_callgraph_consistent ||
910 irp->callgraph_state == irp_callgraph_and_calltree_consistent);
911 return irg->callgraph_loop_depth;
914 size_t get_irg_recursion_depth(const ir_graph *irg)
916 assert(irp->callgraph_state == irp_callgraph_and_calltree_consistent);
917 return irg->callgraph_recursion_depth;
920 void analyse_loop_nesting_depth(void)
922 /* establish preconditions. */
923 if (get_irp_callee_info_state() != irg_callee_info_consistent) {
924 ir_entity **free_methods = NULL;
926 cgana(&free_methods);
930 if (irp_callgraph_consistent != get_irp_callgraph_state()) {
934 find_callgraph_recursions();
936 set_irp_loop_nesting_depth_state(loop_nesting_depth_consistent);
939 loop_nesting_depth_state get_irp_loop_nesting_depth_state(void)
941 return irp->lnd_state;
943 void set_irp_loop_nesting_depth_state(loop_nesting_depth_state s)
947 void set_irp_loop_nesting_depth_state_inconsistent(void)
949 if (irp->lnd_state == loop_nesting_depth_consistent)
950 irp->lnd_state = loop_nesting_depth_inconsistent;