2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Control flow optimizations.
23 * @author Goetz Lindenmaier, Michael Beck, Sebastian Hack
30 #include "iroptimize.h"
37 #include "irgraph_t.h"
51 #include "irbackedge_t.h"
56 #include "iropt_dbg.h"
58 /*------------------------------------------------------------------*/
59 /* Control flow optimization. */
61 /* Removes Bad control flow predecessors and empty blocks. A block */
62 /* is empty if it contains only a Jmp node. */
63 /* Blocks can only be removed if they are not needed for the */
64 /* semantics of Phi nodes. */
65 /* Further, we NEVER remove labeled blocks (even if we could move */
67 /*------------------------------------------------------------------*/
69 #define set_Block_removable(block) set_Block_mark(block, 1)
70 #define set_Block_non_removable(block) set_Block_mark(block, 0)
71 #define is_Block_removable(block) (get_Block_mark(block) != 0)
74 * Replace binary Conds that jumps twice into the same block
80 * ProjX True ProjX False ==> | /
85 * Such pattern are the result of if-conversion.
87 * Note that the simple case that Block has only these two
88 * predecessors are already handled in equivalent_node_Block().
90 static int remove_senseless_conds(ir_node *bl) {
92 int n = get_Block_n_cfgpreds(bl);
95 for (i = 0; i < n; ++i) {
96 ir_node *pred_i = get_Block_cfgpred(bl, i);
97 ir_node *cond_i = skip_Proj(pred_i);
100 if (is_Cond(cond_i) && get_irn_mode(get_Cond_selector(cond_i)) == mode_b) {
102 for (j = i + 1; j < n; ++j) {
103 ir_node *pred_j = get_Block_cfgpred(bl, j);
104 ir_node *cond_j = skip_Proj(pred_j);
106 if (cond_j == cond_i) {
107 ir_node *jmp = new_r_Jmp(current_ir_graph, get_nodes_block(cond_i));
108 set_irn_n(bl, i, jmp);
109 set_irn_n(bl, j, new_Bad());
111 DBG_OPT_IFSIM2(cond_i, jmp);
121 /** An environment for merge_blocks and collect nodes. */
122 typedef struct _merge_env {
123 int changed; /**< Set if the graph was changed. */
124 int phis_moved; /**< Set if Phi nodes were moved. */
125 plist_t *list; /**< Helper list for all found Switch Conds. */
129 * Removes Tuples from Block control flow predecessors.
130 * Optimizes blocks with equivalent_node(). This is tricky,
131 * as we want to avoid nodes that have as block predecessor Bads.
132 * Therefore we also optimize at control flow operations, depending
133 * how we first reach the Block.
135 static void merge_blocks(ir_node *node, void *ctx) {
138 merge_env *env = ctx;
140 /* clear the link field for ALL nodes first */
141 set_irn_link(node, NULL);
143 if (is_Block(node)) {
145 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
146 ir_node *pred = get_Block_cfgpred(node, i);
147 ir_node *skipped = skip_Tuple(pred);
148 if (pred != skipped) {
149 set_Block_cfgpred(node, i, skipped);
155 new_block = equivalent_node(node);
156 if (new_block != node && ! is_Block_dead(new_block)) {
157 exchange(node, new_block);
161 } else if (get_opt_optimize() && (get_irn_mode(node) == mode_X)) {
162 /* We will soon visit a block. Optimize it before visiting! */
163 ir_node *b = get_nodes_block(skip_Proj(node));
165 if (!is_Block_dead(b)) {
166 new_block = equivalent_node(b);
168 while (!irn_visited(b) && !is_Block_dead(new_block) && new_block != b) {
169 /* We would have to run gigo() if new is bad, so we
170 promote it directly below. Nevertheless, we sometimes reach a block
171 the first time through a dataflow node. In this case we optimized the
172 block as such and have to promote the Bad here. */
173 assert((get_opt_control_flow_straightening() ||
174 get_opt_control_flow_weak_simplification()) &&
175 ("strange flag setting"));
176 exchange(b, new_block);
179 new_block = equivalent_node(b);
182 /* normally, we would create a Bad block here, but this must be
183 * prevented, so just set it's cf to Bad.
185 if (is_Block_dead(new_block)) {
186 exchange(node, new_Bad());
194 * Block walker removing control flow from dead block by
195 * inspecting dominance info.
196 * Do not replace blocks by Bad. This optimization shall
197 * ensure, that all Bad control flow predecessors are
198 * removed, and no new other Bads are introduced.
199 * Further removed useless Conds and clear the mark of all blocks.
201 * Must be run in the post walker.
203 static void remove_unreachable_blocks_and_conds(ir_node *block, void *env) {
207 /* Check block predecessors and turn control flow into bad.
208 Beware of Tuple, kill them. */
209 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
210 ir_node *pred_X = get_Block_cfgpred(block, i);
211 ir_node *skipped = skip_Tuple(pred_X);
213 if (! is_Bad(skipped)) {
214 ir_node *pred_bl = get_nodes_block(skip_Proj(skipped));
216 if (is_Block_dead(pred_bl) || (get_Block_dom_depth(pred_bl) < 0)) {
217 set_Block_dead(pred_bl);
218 exchange(pred_X, new_Bad());
220 } else if (skipped != pred_X) {
221 set_Block_cfgpred(block, i, skipped);
227 *changed |= remove_senseless_conds(block);
229 /* clear the block mark of all blocks */
230 set_Block_removable(block);
234 * Collects all Phi nodes in link list of Block.
235 * Marks all blocks "non_removable" if they contain a node other
236 * than Jmp (and Proj).
237 * Links all Proj nodes to their predecessors.
238 * Collects all switch-Conds in a list.
240 static void collect_nodes(ir_node *n, void *ctx) {
241 ir_opcode code = get_irn_opcode(n);
242 merge_env *env = ctx;
244 if (code == iro_Block) {
245 /* mark the block as non-removable if it is labeled */
246 if (has_Block_label(n))
247 set_Block_non_removable(n);
249 ir_node *b = get_nodes_block(n);
251 if (code == iro_Phi && get_irn_arity(n) > 0) {
252 /* Collect Phi nodes to compact ins along with block's ins. */
253 set_irn_link(n, get_irn_link(b));
255 } else if (code != iro_Jmp && !is_Bad(b)) { /* Check for non-empty block. */
256 set_Block_non_removable(b);
258 if (code == iro_Proj) { /* link Proj nodes */
259 ir_node *pred = get_Proj_pred(n);
261 set_irn_link(n, get_irn_link(pred));
262 set_irn_link(pred, n);
263 } else if (code == iro_Cond) {
264 ir_node *sel = get_Cond_selector(n);
265 if (mode_is_int(get_irn_mode(sel))) {
266 /* found a switch-Cond, collect */
267 plist_insert_back(env->list, n);
274 /** Returns true if pred is predecessor of block. */
275 static int is_pred_of(ir_node *pred, ir_node *b) {
278 for (i = get_Block_n_cfgpreds(b) - 1; i >= 0; --i) {
279 ir_node *b_pred = get_Block_cfgpred_block(b, i);
286 /** Test whether we can optimize away pred block pos of b.
288 * @param b A block node.
289 * @param pos The position of the predecessor block to judge about.
291 * @returns The number of predecessors
293 * The test is rather tricky.
295 * The situation is something like the following:
304 * b merges the control flow of an if-then-else. We may not remove
305 * the 'then' _and_ the 'else' block of an 'if' if there is a Phi
306 * node in b, even if both are empty. The destruction of this Phi
307 * requires that a copy is added before the merge. We have to
308 * keep one of the case blocks to place the copies in.
310 * To perform the test for pos, we must regard predecessors before pos
311 * as already removed.
313 static int test_whether_dispensable(ir_node *b, int pos) {
314 int i, j, n_preds = 1;
315 ir_node *pred = get_Block_cfgpred_block(b, pos);
317 /* Bad blocks will be optimized away, so we don't need space for them */
318 if (is_Block_dead(pred))
321 if (is_Block_removable(pred)) {
322 if (!get_opt_optimize() || !get_opt_control_flow_strong_simplification()) {
323 /* Mark block so that is will not be removed: optimization is turned off. */
324 set_Block_non_removable(pred);
328 /* Seems to be empty. At least we detected this in collect_nodes. */
329 if (get_irn_link(b) == NULL) {
330 /* There are no Phi nodes ==> all predecessors are dispensable. */
331 n_preds = get_Block_n_cfgpreds(pred);
333 /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
334 Handle all pred blocks with preds < pos as if they were already removed. */
335 for (i = 0; i < pos; i++) {
336 ir_node *b_pred = get_Block_cfgpred_block(b, i);
337 if (! is_Block_dead(b_pred) && is_Block_removable(b_pred)) {
338 for (j = get_Block_n_cfgpreds(b_pred) - 1; j >= 0; --j) {
339 ir_node *b_pred_pred = get_Block_cfgpred_block(b_pred, j);
340 if (is_pred_of(b_pred_pred, pred))
341 goto non_dispensable;
344 if (is_pred_of(b_pred, pred))
345 goto non_dispensable;
348 for (i = pos +1; i < get_Block_n_cfgpreds(b); i++) {
349 ir_node *b_pred = get_Block_cfgpred_block(b, i);
350 if (is_pred_of(b_pred, pred))
351 goto non_dispensable;
353 /* if we get here, the block is dispensable */
354 n_preds = get_Block_n_cfgpreds(pred);
361 set_Block_non_removable(pred);
366 * This method removed Bad cf predecessors from Blocks and Phis, and removes
367 * empty blocks. A block is empty if it only contains Phi and Jmp nodes.
369 * We first adapt Phi nodes, then Block nodes, as we need the old ins
370 * of the Block to adapt the Phi nodes. We do this by computing new
371 * in arrays, and then replacing the old ones. So far we compute new in arrays
372 * for all nodes, not regarding whether there is a possibility for optimization.
374 * For each predecessor p of a Block b there are three cases:
375 * -1. The predecessor p is a Bad node: just skip it. The in array of b shrinks by one.
376 * -2. The predecessor p is empty. Remove p. All predecessors of p are now
378 * -3. The predecessor p is a block containing useful code. Just keep p as is.
380 * For Phi nodes f we have to check the conditions at the Block of f.
381 * For cases 1 and 3 we proceed as for Blocks. For case 2 we can have two
383 * -2a: The old predecessor of the Phi f is a Phi pred_f IN THE BLOCK REMOVED. In this
384 * case we proceed as for blocks. We remove pred_f. All
385 * predecessors of pred_f now are predecessors of f.
386 * -2b: The old predecessor of f is NOT in the block removed. It might be a Phi, too.
387 * We have to replicate f for each predecessor of the removed block. Or, with
388 * other words, the removed predecessor block has exactly one predecessor.
390 * Further there is a special case for self referencing blocks:
393 * then_b else_b then_b else_b
399 * | | | === optimized to ===> \ | | |
406 * If there is a Phi in pred_b, but we remove pred_b, we have to generate a
407 * Phi in loop_b, that has the ins of the Phi in pred_b and a self referencing
409 * @@@ It is negotiable whether we should do this ... there might end up a copy
410 * from the Phi in the loop when removing the Phis.
412 static void optimize_blocks(ir_node *b, void *ctx) {
413 int i, j, k, n, max_preds, n_preds, p_preds = -1;
416 merge_env *env = ctx;
418 /* Count the number of predecessor if this block is merged with pred blocks
421 for (i = 0, k = get_Block_n_cfgpreds(b); i < k; ++i) {
422 max_preds += test_whether_dispensable(b, i);
424 in = XMALLOCN(ir_node*, max_preds);
426 /*- Fix the Phi nodes of the current block -*/
427 for (phi = get_irn_link(b); phi; ) {
428 assert(get_irn_op(phi) == op_Phi);
430 /* Find the new predecessors for the Phi */
432 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
433 pred = get_Block_cfgpred_block(b, i);
435 if (is_Block_dead(pred)) {
436 /* case Phi 1: Do nothing */
437 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
438 /* case Phi 2: It's an empty block and not yet visited. */
439 ir_node *phi_pred = get_Phi_pred(phi, i);
441 for (j = 0, k = get_Block_n_cfgpreds(pred); j < k; j++) {
442 /* because of breaking loops, not all predecessors are Bad-clean,
443 * so we must check this here again */
444 if (! is_Bad(get_Block_cfgpred(pred, j))) {
445 if (get_nodes_block(phi_pred) == pred) {
447 assert(get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
449 in[p_preds++] = get_Phi_pred(phi_pred, j);
452 in[p_preds++] = phi_pred;
458 in[p_preds++] = get_Phi_pred(phi, i);
461 assert(p_preds <= max_preds);
465 /* By removal of Bad ins the Phi might be degenerated. */
466 exchange(phi, in[0]);
468 set_irn_in(phi, p_preds, in);
471 phi = get_irn_link(phi);
474 /*- This happens only if merge between loop backedge and single loop entry.
475 Moreover, it is only needed if predb is the direct dominator of b, else there can be no uses
476 of the Phi's in predb ... -*/
477 for (k = 0, n = get_Block_n_cfgpreds(b); k < n; ++k) {
478 ir_node *predb = get_nodes_block(get_Block_cfgpred(b, k));
480 if (is_Block_removable(predb) && !Block_block_visited(predb)) {
483 /* we found a predecessor block at position k that will be removed */
484 for (phi = get_irn_link(predb); phi; phi = next_phi) {
486 next_phi = get_irn_link(phi);
490 if (get_Block_idom(b) != predb) {
491 /* predb is not the dominator. There can't be uses of pred's Phi nodes, kill them .*/
492 exchange(phi, new_Bad());
494 /* predb is the direct dominator of b. There might be uses of the Phi nodes from
495 predb in further block, so move this phi from the predecessor into the block b */
496 set_nodes_block(phi, b);
497 set_irn_link(phi, get_irn_link(b));
498 set_irn_link(b, phi);
501 /* first, copy all 0..k-1 predecessors */
502 for (i = 0; i < k; i++) {
503 pred = get_Block_cfgpred_block(b, i);
505 if (is_Block_dead(pred)) {
507 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
508 /* It's an empty block and not yet visited. */
509 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
510 if (! is_Bad(get_Block_cfgpred(pred, j)))
518 /* now we are at k, copy the phi predecessors */
519 pred = get_nodes_block(get_Block_cfgpred(b, k));
520 for (i = 0; i < get_Phi_n_preds(phi); i++) {
521 if (! is_Bad(get_Block_cfgpred(pred, i)))
522 in[q_preds++] = get_Phi_pred(phi, i);
525 /* and now all the rest */
526 for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
527 pred = get_Block_cfgpred_block(b, i);
529 if (is_Block_dead(pred)) {
531 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
532 /* It's an empty block and not yet visited. */
533 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
534 if (! is_Bad(get_Block_cfgpred(pred, j)))
544 exchange(phi, in[0]);
546 set_irn_in(phi, q_preds, in);
549 assert(q_preds <= max_preds);
550 // assert(p_preds == q_preds && "Wrong Phi Fix");
556 /*- Fix the block -*/
558 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
559 pred = get_Block_cfgpred_block(b, i);
561 if (is_Block_dead(pred)) {
562 /* case 1: Do nothing */
563 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
564 /* case 2: It's an empty block and not yet visited. */
565 assert(get_Block_n_cfgpreds(b) > 1);
566 /* Else it should be optimized by equivalent_node. */
567 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
568 ir_node *pred_X = get_Block_cfgpred(pred, j);
570 /* because of breaking loops, not all predecessors are Bad-clean,
571 * so we must check this here again */
572 if (! is_Bad(pred_X))
573 in[n_preds++] = pred_X;
575 /* Remove block as it might be kept alive. */
576 exchange(pred, b/*new_Bad()*/);
579 in[n_preds++] = get_Block_cfgpred(b, i);
582 assert(n_preds <= max_preds);
584 set_irn_in(b, n_preds, in);
587 assert(get_irn_link(b) == NULL || p_preds == -1 || (n_preds == p_preds && "Wrong Phi Fix"));
592 * Block walker: optimize all blocks using the default optimizations.
593 * This removes Blocks that with only a Jmp predecessor.
595 static void remove_simple_blocks(ir_node *block, void *ctx) {
596 ir_node *new_blk = equivalent_node(block);
597 merge_env *env = ctx;
599 if (new_blk != block) {
600 exchange(block, new_blk);
606 * Handle pre-optimized table switch Cond's.
607 * During iropt, all Projs from a switch-Cond are already removed except
608 * the defProj and maybe the taken one.
609 * The defProj cannot be removed WITHOUT looking backwards, so we do this here.
611 * @param cond the switch-Cond
613 * @return non-zero if a switch-Cond was optimized
615 * Expects all Proj's linked to the cond node
617 static int handle_switch_cond(ir_node *cond) {
618 ir_node *sel = get_Cond_selector(cond);
620 ir_node *proj1 = get_irn_link(cond);
621 ir_node *proj2 = get_irn_link(proj1);
624 blk = get_nodes_block(cond);
627 /* this Cond has only one Proj: must be the defProj */
628 assert(get_Cond_defaultProj(cond) == get_Proj_proj(proj1));
629 /* convert it into a Jmp */
630 jmp = new_r_Jmp(current_ir_graph, blk);
631 exchange(proj1, jmp);
633 } else if (get_irn_link(proj2) == NULL) {
634 /* We have two Proj's here. Check if the Cond has
635 a constant argument */
636 tarval *tv = value_of(sel);
638 if (tv != tarval_bad) {
639 /* we have a constant switch */
640 long num = get_tarval_long(tv);
641 long def_num = get_Cond_defaultProj(cond);
643 if (def_num == get_Proj_proj(proj1)) {
644 /* first one is the defProj */
645 if (num == get_Proj_proj(proj2)) {
646 jmp = new_r_Jmp(current_ir_graph, blk);
647 exchange(proj2, jmp);
648 exchange(proj1, new_Bad());
651 } else if (def_num == get_Proj_proj(proj2)) {
652 /* second one is the defProj */
653 if (num == get_Proj_proj(proj1)) {
654 jmp = new_r_Jmp(current_ir_graph, blk);
655 exchange(proj1, jmp);
656 exchange(proj2, new_Bad());
660 /* neither: strange, Cond was not optimized so far */
661 if (num == get_Proj_proj(proj1)) {
662 jmp = new_r_Jmp(current_ir_graph, blk);
663 exchange(proj1, jmp);
664 exchange(proj2, new_Bad());
666 } else if (num == get_Proj_proj(proj2)) {
667 jmp = new_r_Jmp(current_ir_graph, blk);
668 exchange(proj2, jmp);
669 exchange(proj1, new_Bad());
678 /* Optimizations of the control flow that also require changes of Phi nodes.
680 * This optimization performs two passes over the graph.
682 * The first pass collects all Phi nodes in a link list in the block
683 * nodes. Further it performs simple control flow optimizations.
684 * Finally it marks all blocks that do not contain useful
685 * computations, i.e., these blocks might be removed.
687 * The second pass performs the optimizations intended by this algorithm.
688 * It walks only over block nodes and adapts these and the Phi nodes in these blocks,
689 * which it finds in a linked list computed by the first pass.
691 * We use the mark flag to mark removable blocks in the first
694 void optimize_cf(ir_graph *irg) {
697 ir_node *cond, *end = get_irg_end(irg);
698 ir_graph *rem = current_ir_graph;
702 assert(get_irg_phase_state(irg) != phase_building);
704 /* if the graph is not pinned, we cannot determine empty blocks */
705 assert(get_irg_pinned(irg) != op_pin_state_floats &&
706 "Control flow optimization need a pinned graph");
708 current_ir_graph = irg;
710 /* FIXME: control flow opt destroys block edges. So edges are deactivated here. Fix the edges! */
711 edges_deactivate(irg);
713 /* we use the mark flag to mark removable blocks */
714 ir_reserve_resources(irg, IR_RESOURCE_BLOCK_MARK);
719 /* ALWAYS kill unreachable control flow. Backend cannot handle it anyway.
720 Use dominator info to kill blocks. Also optimize useless Conds. */
722 irg_block_walk_graph(irg, NULL, remove_unreachable_blocks_and_conds, &env.changed);
724 /* fix the keep-alives */
725 for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
726 ir_node *ka = get_End_keepalive(end, i);
729 /* do NOT keep dead blocks */
730 if (is_Block_dead(ka) || get_Block_dom_depth(ka) < 0) {
731 set_End_keepalive(end, i, new_Bad());
734 } else if (is_Block_dead(get_nodes_block(ka)) ||
735 get_Block_dom_depth(get_nodes_block(ka)) < 0) {
736 /* do NOT keep nodes in dead blocks */
737 set_End_keepalive(end, i, new_Bad());
742 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
744 env.list = plist_new();
745 irg_walk(end, merge_blocks, collect_nodes, &env);
747 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
750 /* Handle graph state if was changed. */
751 set_irg_outs_inconsistent(irg);
752 set_irg_doms_inconsistent(irg);
753 set_irg_extblk_inconsistent(irg);
754 set_irg_loopinfo_inconsistent(irg);
758 /* handle all collected switch-Conds */
759 foreach_plist(env.list, el) {
760 cond = plist_element_get_value(el);
761 env.changed |= handle_switch_cond(cond);
763 plist_free(env.list);
766 /* The Cond optimization might generate unreachable code, so restart if
771 /* Optimize the standard code. */
774 irg_block_walk(get_irg_end_block(irg), optimize_blocks, remove_simple_blocks, &env);
776 /* Walk all keep alives, optimize them if block, add to new in-array
777 for end if useful. */
778 n = get_End_n_keepalives(end);
780 NEW_ARR_A(ir_node *, in, n);
782 /* in rare cases a node may be kept alive more than once, use the visited flag to detect this */
783 inc_irg_visited(irg);
784 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
786 /* fix the keep alive */
787 for (i = j = 0; i < n; i++) {
788 ir_node *ka = get_End_keepalive(end, i);
790 if (!irn_visited(ka)) {
792 if (!Block_block_visited(ka)) {
793 /* irg_block_walk() will increase the block visited flag, but we must visit only
794 these blocks that are not visited yet, so decrease it first. */
795 set_irg_block_visited(irg, get_irg_block_visited(irg) - 1);
796 irg_block_walk(ka, optimize_blocks, remove_simple_blocks, &env.changed);
797 mark_irn_visited(ka);
801 mark_irn_visited(ka);
802 /* don't keep alive dead blocks */
803 if (!is_Bad(ka) && !is_Block_dead(get_nodes_block(ka)))
809 set_End_keepalives(end, j, in);
813 ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_VISITED);
815 if (env.phis_moved) {
816 /* Bad: when we moved Phi's, we might produce dead Phi nodes
818 Some other phases cannot copy with this, so will them.
820 n = get_End_n_keepalives(end);
823 /* Handle graph state if was changed. */
824 set_irg_outs_inconsistent(irg);
826 assure_irg_outs(irg);
828 for (i = j = 0; i < n; ++i) {
829 ir_node *ka = get_End_keepalive(end, i);
834 for (k = get_irn_n_outs(ka) - 1; k >= 0; --k) {
835 ir_node *user = get_irn_out(ka, k);
837 if (user != ka && user != end) {
838 /* Is it a real user or just a self loop ? */
848 set_End_keepalives(end, j, in);
855 /* Handle graph state if was changed. */
856 set_irg_outs_inconsistent(irg);
857 set_irg_doms_inconsistent(irg);
858 set_irg_extblk_inconsistent(irg);
859 set_irg_loopinfo_inconsistent(irg);
863 /* the verifier doesn't work yet with floating nodes */
864 if (get_irg_pinned(irg) == op_pin_state_pinned) {
865 /* after optimize_cf(), only Bad data flow may remain. */
866 if (irg_vrfy_bads(irg, BAD_DF | BAD_BLOCK | TUPLE)) {
867 dump_ir_block_graph(irg, "-vrfy-cf");
868 dump_ir_graph(irg, "-vrfy-cf");
869 fprintf(stderr, "VRFY_BAD in optimize_cf()\n");
873 current_ir_graph = rem;