2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Control flow optimizations.
23 * @author Goetz Lindenmaier, Michael Beck, Sebastian Hack
28 #include "iroptimize.h"
35 #include "irgraph_t.h"
49 #include "irbackedge_t.h"
54 #include "iropt_dbg.h"
56 /*------------------------------------------------------------------*/
57 /* Control flow optimization. */
59 /* Removes Bad control flow predecessors and empty blocks. A block */
60 /* is empty if it contains only a Jmp node. */
61 /* Blocks can only be removed if they are not needed for the */
62 /* semantics of Phi nodes. */
63 /* Further, we NEVER remove labeled blocks (even if we could move */
65 /*------------------------------------------------------------------*/
67 #define set_Block_removable(block) set_Block_mark(block, 1)
68 #define set_Block_non_removable(block) set_Block_mark(block, 0)
69 #define is_Block_removable(block) (get_Block_mark(block) != 0)
72 * Replace binary Conds that jumps twice into the same block
78 * ProjX True ProjX False ==> | /
83 * Such pattern are the result of if-conversion.
85 * Note that the simple case that Block has only these two
86 * predecessors are already handled in equivalent_node_Block().
88 static int remove_senseless_conds(ir_node *bl) {
90 int n = get_Block_n_cfgpreds(bl);
93 for (i = 0; i < n; ++i) {
94 ir_node *pred_i = get_Block_cfgpred(bl, i);
95 ir_node *cond_i = skip_Proj(pred_i);
98 if (is_Cond(cond_i) && get_irn_mode(get_Cond_selector(cond_i)) == mode_b) {
100 for (j = i + 1; j < n; ++j) {
101 ir_node *pred_j = get_Block_cfgpred(bl, j);
102 ir_node *cond_j = skip_Proj(pred_j);
104 if (cond_j == cond_i) {
105 ir_node *jmp = new_r_Jmp(current_ir_graph, get_nodes_block(cond_i));
106 set_irn_n(bl, i, jmp);
107 set_irn_n(bl, j, new_Bad());
109 DBG_OPT_IFSIM2(cond_i, jmp);
119 /** An environment for merge_blocks and collect nodes. */
120 typedef struct _merge_env {
121 int changed; /**< Set if the graph was changed. */
122 int phis_moved; /**< Set if Phi nodes were moved. */
123 plist_t *list; /**< Helper list for all found Switch Conds. */
127 * Removes Tuples from Block control flow predecessors.
128 * Optimizes blocks with equivalent_node(). This is tricky,
129 * as we want to avoid nodes that have as block predecessor Bads.
130 * Therefore we also optimize at control flow operations, depending
131 * how we first reach the Block.
133 static void merge_blocks(ir_node *node, void *ctx) {
136 merge_env *env = ctx;
138 /* clear the link field for ALL nodes first */
139 set_irn_link(node, NULL);
141 if (is_Block(node)) {
143 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
144 ir_node *pred = get_Block_cfgpred(node, i);
145 ir_node *skipped = skip_Tuple(pred);
146 if (pred != skipped) {
147 set_Block_cfgpred(node, i, skipped);
153 new_block = equivalent_node(node);
154 if (new_block != node && ! is_Block_dead(new_block)) {
155 exchange(node, new_block);
159 } else if (get_opt_optimize() && (get_irn_mode(node) == mode_X)) {
160 /* We will soon visit a block. Optimize it before visiting! */
161 ir_node *b = get_nodes_block(skip_Proj(node));
163 if (!is_Block_dead(b)) {
164 new_block = equivalent_node(b);
166 while (!irn_visited(b) && !is_Block_dead(new_block) && new_block != b) {
167 /* We would have to run gigo() if new is bad, so we
168 promote it directly below. Nevertheless, we sometimes reach a block
169 the first time through a dataflow node. In this case we optimized the
170 block as such and have to promote the Bad here. */
171 assert((get_opt_control_flow_straightening() ||
172 get_opt_control_flow_weak_simplification()) &&
173 ("strange flag setting"));
174 exchange(b, new_block);
177 new_block = equivalent_node(b);
180 /* normally, we would create a Bad block here, but this must be
181 * prevented, so just set it's cf to Bad.
183 if (is_Block_dead(new_block)) {
184 exchange(node, new_Bad());
192 * Block walker removing control flow from dead block by
193 * inspecting dominance info.
194 * Do not replace blocks by Bad. This optimization shall
195 * ensure, that all Bad control flow predecessors are
196 * removed, and no new other Bads are introduced.
197 * Further removed useless Conds and clear the mark of all blocks.
199 * Must be run in the post walker.
201 static void remove_unreachable_blocks_and_conds(ir_node *block, void *env) {
205 /* Check block predecessors and turn control flow into bad.
206 Beware of Tuple, kill them. */
207 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
208 ir_node *pred_X = get_Block_cfgpred(block, i);
209 ir_node *skipped = skip_Tuple(pred_X);
211 if (! is_Bad(skipped)) {
212 ir_node *pred_bl = get_nodes_block(skip_Proj(skipped));
214 if (is_Block_dead(pred_bl) || (get_Block_dom_depth(pred_bl) < 0)) {
215 set_Block_dead(pred_bl);
216 exchange(pred_X, new_Bad());
218 } else if (skipped != pred_X) {
219 set_Block_cfgpred(block, i, skipped);
225 *changed |= remove_senseless_conds(block);
227 /* clear the block mark of all blocks */
228 set_Block_removable(block);
232 * Collects all Phi nodes in link list of Block.
233 * Marks all blocks "non_removable" if they contain a node other
234 * than Jmp (and Proj).
235 * Links all Proj nodes to their predecessors.
236 * Collects all switch-Conds in a list.
238 static void collect_nodes(ir_node *n, void *ctx) {
239 ir_opcode code = get_irn_opcode(n);
240 merge_env *env = ctx;
242 if (code == iro_Block) {
243 /* mark the block as non-removable if it is labeled */
244 if (has_Block_label(n))
245 set_Block_non_removable(n);
247 ir_node *b = get_nodes_block(n);
249 if (code == iro_Phi && get_irn_arity(n) > 0) {
250 /* Collect Phi nodes to compact ins along with block's ins. */
251 set_irn_link(n, get_irn_link(b));
253 } else if (code != iro_Jmp && !is_Bad(b)) { /* Check for non-empty block. */
254 set_Block_non_removable(b);
256 if (code == iro_Proj) { /* link Proj nodes */
257 ir_node *pred = get_Proj_pred(n);
259 set_irn_link(n, get_irn_link(pred));
260 set_irn_link(pred, n);
261 } else if (code == iro_Cond) {
262 ir_node *sel = get_Cond_selector(n);
263 if (mode_is_int(get_irn_mode(sel))) {
264 /* found a switch-Cond, collect */
265 plist_insert_back(env->list, n);
272 /** Returns true if pred is predecessor of block. */
273 static int is_pred_of(ir_node *pred, ir_node *b) {
276 for (i = get_Block_n_cfgpreds(b) - 1; i >= 0; --i) {
277 ir_node *b_pred = get_Block_cfgpred_block(b, i);
284 /** Test whether we can optimize away pred block pos of b.
286 * @param b A block node.
287 * @param pos The position of the predecessor block to judge about.
289 * @returns The number of predecessors
291 * The test is rather tricky.
293 * The situation is something like the following:
302 * b merges the control flow of an if-then-else. We may not remove
303 * the 'then' _and_ the 'else' block of an 'if' if there is a Phi
304 * node in b, even if both are empty. The destruction of this Phi
305 * requires that a copy is added before the merge. We have to
306 * keep one of the case blocks to place the copies in.
308 * To perform the test for pos, we must regard predecessors before pos
309 * as already removed.
311 static int test_whether_dispensable(ir_node *b, int pos) {
312 int i, j, n_preds = 1;
313 ir_node *pred = get_Block_cfgpred_block(b, pos);
315 /* Bad blocks will be optimized away, so we don't need space for them */
316 if (is_Block_dead(pred))
319 if (is_Block_removable(pred)) {
320 if (!get_opt_optimize() || !get_opt_control_flow_strong_simplification()) {
321 /* Mark block so that is will not be removed: optimization is turned off. */
322 set_Block_non_removable(pred);
326 /* Seems to be empty. At least we detected this in collect_nodes. */
327 if (get_irn_link(b) == NULL) {
328 /* There are no Phi nodes ==> all predecessors are dispensable. */
329 n_preds = get_Block_n_cfgpreds(pred);
331 /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
332 Handle all pred blocks with preds < pos as if they were already removed. */
333 for (i = 0; i < pos; i++) {
334 ir_node *b_pred = get_Block_cfgpred_block(b, i);
335 if (! is_Block_dead(b_pred) && is_Block_removable(b_pred)) {
336 for (j = get_Block_n_cfgpreds(b_pred) - 1; j >= 0; --j) {
337 ir_node *b_pred_pred = get_Block_cfgpred_block(b_pred, j);
338 if (is_pred_of(b_pred_pred, pred))
339 goto non_dispensable;
342 if (is_pred_of(b_pred, pred))
343 goto non_dispensable;
346 for (i = pos +1; i < get_Block_n_cfgpreds(b); i++) {
347 ir_node *b_pred = get_Block_cfgpred_block(b, i);
348 if (is_pred_of(b_pred, pred))
349 goto non_dispensable;
351 /* if we get here, the block is dispensable */
352 n_preds = get_Block_n_cfgpreds(pred);
359 set_Block_non_removable(pred);
364 * This method removed Bad cf predecessors from Blocks and Phis, and removes
365 * empty blocks. A block is empty if it only contains Phi and Jmp nodes.
367 * We first adapt Phi nodes, then Block nodes, as we need the old ins
368 * of the Block to adapt the Phi nodes. We do this by computing new
369 * in arrays, and then replacing the old ones. So far we compute new in arrays
370 * for all nodes, not regarding whether there is a possibility for optimization.
372 * For each predecessor p of a Block b there are three cases:
373 * -1. The predecessor p is a Bad node: just skip it. The in array of b shrinks by one.
374 * -2. The predecessor p is empty. Remove p. All predecessors of p are now
376 * -3. The predecessor p is a block containing useful code. Just keep p as is.
378 * For Phi nodes f we have to check the conditions at the Block of f.
379 * For cases 1 and 3 we proceed as for Blocks. For case 2 we can have two
381 * -2a: The old predecessor of the Phi f is a Phi pred_f IN THE BLOCK REMOVED. In this
382 * case we proceed as for blocks. We remove pred_f. All
383 * predecessors of pred_f now are predecessors of f.
384 * -2b: The old predecessor of f is NOT in the block removed. It might be a Phi, too.
385 * We have to replicate f for each predecessor of the removed block. Or, with
386 * other words, the removed predecessor block has exactly one predecessor.
388 * Further there is a special case for self referencing blocks:
391 * then_b else_b then_b else_b
397 * | | | === optimized to ===> \ | | |
404 * If there is a Phi in pred_b, but we remove pred_b, we have to generate a
405 * Phi in loop_b, that has the ins of the Phi in pred_b and a self referencing
407 * @@@ It is negotiable whether we should do this ... there might end up a copy
408 * from the Phi in the loop when removing the Phis.
410 static void optimize_blocks(ir_node *b, void *ctx) {
411 int i, j, k, n, max_preds, n_preds, p_preds = -1;
414 merge_env *env = ctx;
416 /* Count the number of predecessor if this block is merged with pred blocks
419 for (i = 0, k = get_Block_n_cfgpreds(b); i < k; ++i) {
420 max_preds += test_whether_dispensable(b, i);
422 in = XMALLOCN(ir_node*, max_preds);
424 /*- Fix the Phi nodes of the current block -*/
425 for (phi = get_irn_link(b); phi; ) {
426 assert(get_irn_op(phi) == op_Phi);
428 /* Find the new predecessors for the Phi */
430 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
431 pred = get_Block_cfgpred_block(b, i);
433 if (is_Block_dead(pred)) {
434 /* case Phi 1: Do nothing */
435 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
436 /* case Phi 2: It's an empty block and not yet visited. */
437 ir_node *phi_pred = get_Phi_pred(phi, i);
439 for (j = 0, k = get_Block_n_cfgpreds(pred); j < k; j++) {
440 /* because of breaking loops, not all predecessors are Bad-clean,
441 * so we must check this here again */
442 if (! is_Bad(get_Block_cfgpred(pred, j))) {
443 if (get_nodes_block(phi_pred) == pred) {
445 assert(get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
447 in[p_preds++] = get_Phi_pred(phi_pred, j);
450 in[p_preds++] = phi_pred;
456 in[p_preds++] = get_Phi_pred(phi, i);
459 assert(p_preds <= max_preds);
463 /* By removal of Bad ins the Phi might be degenerated. */
464 exchange(phi, in[0]);
466 set_irn_in(phi, p_preds, in);
469 phi = get_irn_link(phi);
472 /*- This happens only if merge between loop backedge and single loop entry.
473 Moreover, it is only needed if predb is the direct dominator of b, else there can be no uses
474 of the Phi's in predb ... -*/
475 for (k = 0, n = get_Block_n_cfgpreds(b); k < n; ++k) {
476 ir_node *predb = get_nodes_block(get_Block_cfgpred(b, k));
478 if (is_Block_removable(predb) && !Block_block_visited(predb)) {
481 /* we found a predecessor block at position k that will be removed */
482 for (phi = get_irn_link(predb); phi; phi = next_phi) {
484 next_phi = get_irn_link(phi);
488 if (get_Block_idom(b) != predb) {
489 /* predb is not the dominator. There can't be uses of pred's Phi nodes, kill them .*/
490 exchange(phi, new_Bad());
492 /* predb is the direct dominator of b. There might be uses of the Phi nodes from
493 predb in further block, so move this phi from the predecessor into the block b */
494 set_nodes_block(phi, b);
495 set_irn_link(phi, get_irn_link(b));
496 set_irn_link(b, phi);
499 /* first, copy all 0..k-1 predecessors */
500 for (i = 0; i < k; i++) {
501 pred = get_Block_cfgpred_block(b, i);
503 if (is_Block_dead(pred)) {
505 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
506 /* It's an empty block and not yet visited. */
507 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
508 if (! is_Bad(get_Block_cfgpred(pred, j)))
516 /* now we are at k, copy the phi predecessors */
517 pred = get_nodes_block(get_Block_cfgpred(b, k));
518 for (i = 0; i < get_Phi_n_preds(phi); i++) {
519 if (! is_Bad(get_Block_cfgpred(pred, i)))
520 in[q_preds++] = get_Phi_pred(phi, i);
523 /* and now all the rest */
524 for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
525 pred = get_Block_cfgpred_block(b, i);
527 if (is_Block_dead(pred)) {
529 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
530 /* It's an empty block and not yet visited. */
531 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
532 if (! is_Bad(get_Block_cfgpred(pred, j)))
542 exchange(phi, in[0]);
544 set_irn_in(phi, q_preds, in);
547 assert(q_preds <= max_preds);
548 // assert(p_preds == q_preds && "Wrong Phi Fix");
554 /*- Fix the block -*/
556 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
557 pred = get_Block_cfgpred_block(b, i);
559 if (is_Block_dead(pred)) {
560 /* case 1: Do nothing */
561 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
562 /* case 2: It's an empty block and not yet visited. */
563 assert(get_Block_n_cfgpreds(b) > 1);
564 /* Else it should be optimized by equivalent_node. */
565 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
566 ir_node *pred_X = get_Block_cfgpred(pred, j);
568 /* because of breaking loops, not all predecessors are Bad-clean,
569 * so we must check this here again */
570 if (! is_Bad(pred_X))
571 in[n_preds++] = pred_X;
573 /* Remove block as it might be kept alive. */
574 exchange(pred, b/*new_Bad()*/);
577 in[n_preds++] = get_Block_cfgpred(b, i);
580 assert(n_preds <= max_preds);
582 set_irn_in(b, n_preds, in);
585 assert(get_irn_link(b) == NULL || p_preds == -1 || (n_preds == p_preds && "Wrong Phi Fix"));
590 * Block walker: optimize all blocks using the default optimizations.
591 * This removes Blocks that with only a Jmp predecessor.
593 static void remove_simple_blocks(ir_node *block, void *ctx) {
594 ir_node *new_blk = equivalent_node(block);
595 merge_env *env = ctx;
597 if (new_blk != block) {
598 exchange(block, new_blk);
604 * Handle pre-optimized table switch Cond's.
605 * During iropt, all Projs from a switch-Cond are already removed except
606 * the defProj and maybe the taken one.
607 * The defProj cannot be removed WITHOUT looking backwards, so we do this here.
609 * @param cond the switch-Cond
611 * @return non-zero if a switch-Cond was optimized
613 * Expects all Proj's linked to the cond node
615 static int handle_switch_cond(ir_node *cond) {
616 ir_node *sel = get_Cond_selector(cond);
618 ir_node *proj1 = get_irn_link(cond);
619 ir_node *proj2 = get_irn_link(proj1);
622 blk = get_nodes_block(cond);
625 /* this Cond has only one Proj: must be the defProj */
626 assert(get_Cond_defaultProj(cond) == get_Proj_proj(proj1));
627 /* convert it into a Jmp */
628 jmp = new_r_Jmp(current_ir_graph, blk);
629 exchange(proj1, jmp);
631 } else if (get_irn_link(proj2) == NULL) {
632 /* We have two Proj's here. Check if the Cond has
633 a constant argument */
634 tarval *tv = value_of(sel);
636 if (tv != tarval_bad) {
637 /* we have a constant switch */
638 long num = get_tarval_long(tv);
639 long def_num = get_Cond_defaultProj(cond);
641 if (def_num == get_Proj_proj(proj1)) {
642 /* first one is the defProj */
643 if (num == get_Proj_proj(proj2)) {
644 jmp = new_r_Jmp(current_ir_graph, blk);
645 exchange(proj2, jmp);
646 exchange(proj1, new_Bad());
649 } else if (def_num == get_Proj_proj(proj2)) {
650 /* second one is the defProj */
651 if (num == get_Proj_proj(proj1)) {
652 jmp = new_r_Jmp(current_ir_graph, blk);
653 exchange(proj1, jmp);
654 exchange(proj2, new_Bad());
658 /* neither: strange, Cond was not optimized so far */
659 if (num == get_Proj_proj(proj1)) {
660 jmp = new_r_Jmp(current_ir_graph, blk);
661 exchange(proj1, jmp);
662 exchange(proj2, new_Bad());
664 } else if (num == get_Proj_proj(proj2)) {
665 jmp = new_r_Jmp(current_ir_graph, blk);
666 exchange(proj2, jmp);
667 exchange(proj1, new_Bad());
676 /* Optimizations of the control flow that also require changes of Phi nodes.
678 * This optimization performs two passes over the graph.
680 * The first pass collects all Phi nodes in a link list in the block
681 * nodes. Further it performs simple control flow optimizations.
682 * Finally it marks all blocks that do not contain useful
683 * computations, i.e., these blocks might be removed.
685 * The second pass performs the optimizations intended by this algorithm.
686 * It walks only over block nodes and adapts these and the Phi nodes in these blocks,
687 * which it finds in a linked list computed by the first pass.
689 * We use the mark flag to mark removable blocks in the first
692 void optimize_cf(ir_graph *irg) {
695 ir_node *cond, *end = get_irg_end(irg);
696 ir_graph *rem = current_ir_graph;
700 assert(get_irg_phase_state(irg) != phase_building);
702 /* if the graph is not pinned, we cannot determine empty blocks */
703 assert(get_irg_pinned(irg) != op_pin_state_floats &&
704 "Control flow optimization need a pinned graph");
706 current_ir_graph = irg;
708 /* FIXME: control flow opt destroys block edges. So edges are deactivated here. Fix the edges! */
709 edges_deactivate(irg);
711 /* we use the mark flag to mark removable blocks */
712 ir_reserve_resources(irg, IR_RESOURCE_BLOCK_MARK);
717 /* ALWAYS kill unreachable control flow. Backend cannot handle it anyway.
718 Use dominator info to kill blocks. Also optimize useless Conds. */
720 irg_block_walk_graph(irg, NULL, remove_unreachable_blocks_and_conds, &env.changed);
722 /* fix the keep-alives */
723 for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
724 ir_node *ka = get_End_keepalive(end, i);
727 /* do NOT keep dead blocks */
728 if (is_Block_dead(ka) || get_Block_dom_depth(ka) < 0) {
729 set_End_keepalive(end, i, new_Bad());
732 } else if (is_Block_dead(get_nodes_block(ka)) ||
733 get_Block_dom_depth(get_nodes_block(ka)) < 0) {
734 /* do NOT keep nodes in dead blocks */
735 set_End_keepalive(end, i, new_Bad());
740 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
742 env.list = plist_new();
743 irg_walk(end, merge_blocks, collect_nodes, &env);
745 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
748 /* Handle graph state if was changed. */
749 set_irg_outs_inconsistent(irg);
750 set_irg_doms_inconsistent(irg);
751 set_irg_extblk_inconsistent(irg);
752 set_irg_loopinfo_inconsistent(irg);
756 /* handle all collected switch-Conds */
757 foreach_plist(env.list, el) {
758 cond = plist_element_get_value(el);
759 env.changed |= handle_switch_cond(cond);
761 plist_free(env.list);
764 /* The Cond optimization might generate unreachable code, so restart if
769 /* Optimize the standard code. */
772 irg_block_walk(get_irg_end_block(irg), optimize_blocks, remove_simple_blocks, &env);
774 /* Walk all keep alives, optimize them if block, add to new in-array
775 for end if useful. */
776 n = get_End_n_keepalives(end);
778 NEW_ARR_A(ir_node *, in, n);
780 /* in rare cases a node may be kept alive more than once, use the visited flag to detect this */
781 inc_irg_visited(irg);
782 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
784 /* fix the keep alive */
785 for (i = j = 0; i < n; i++) {
786 ir_node *ka = get_End_keepalive(end, i);
788 if (!irn_visited(ka)) {
790 if (!Block_block_visited(ka)) {
791 /* irg_block_walk() will increase the block visited flag, but we must visit only
792 these blocks that are not visited yet, so decrease it first. */
793 set_irg_block_visited(irg, get_irg_block_visited(irg) - 1);
794 irg_block_walk(ka, optimize_blocks, remove_simple_blocks, &env.changed);
795 mark_irn_visited(ka);
799 mark_irn_visited(ka);
800 /* don't keep alive dead blocks */
801 if (!is_Bad(ka) && !is_Block_dead(get_nodes_block(ka)))
807 set_End_keepalives(end, j, in);
811 ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_VISITED);
813 if (env.phis_moved) {
814 /* Bad: when we moved Phi's, we might produce dead Phi nodes
816 Some other phases cannot copy with this, so will them.
818 n = get_End_n_keepalives(end);
821 /* Handle graph state if was changed. */
822 set_irg_outs_inconsistent(irg);
824 assure_irg_outs(irg);
826 for (i = j = 0; i < n; ++i) {
827 ir_node *ka = get_End_keepalive(end, i);
832 for (k = get_irn_n_outs(ka) - 1; k >= 0; --k) {
833 ir_node *user = get_irn_out(ka, k);
835 if (user != ka && user != end) {
836 /* Is it a real user or just a self loop ? */
846 set_End_keepalives(end, j, in);
853 /* Handle graph state if was changed. */
854 set_irg_outs_inconsistent(irg);
855 set_irg_doms_inconsistent(irg);
856 set_irg_extblk_inconsistent(irg);
857 set_irg_loopinfo_inconsistent(irg);
861 /* the verifier doesn't work yet with floating nodes */
862 if (get_irg_pinned(irg) == op_pin_state_pinned) {
863 /* after optimize_cf(), only Bad data flow may remain. */
864 if (irg_vrfy_bads(irg, BAD_DF | BAD_BLOCK | TUPLE)) {
865 dump_ir_block_graph(irg, "-vrfy-cf");
866 dump_ir_graph(irg, "-vrfy-cf");
867 fprintf(stderr, "VRFY_BAD in optimize_cf()\n");
871 current_ir_graph = rem;