2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Control flow optimizations.
23 * @author Goetz Lindenmaier, Michael Beck, Sebastian Hack
30 #include "iroptimize.h"
37 #include "irgraph_t.h"
51 #include "irbackedge_t.h"
56 #include "iropt_dbg.h"
58 /*------------------------------------------------------------------*/
59 /* Control flow optimization. */
61 /* Removes Bad control flow predecessors and empty blocks. A block */
62 /* is empty if it contains only a Jmp node. */
63 /* Blocks can only be removed if they are not needed for the */
64 /* semantics of Phi nodes. */
65 /* Further, we NEVER remove labeled blocks (even if we could move */
67 /*------------------------------------------------------------------*/
70 * Block walker, replacing binary Conds that jumps twice into the same block
76 * ProjX True ProjX False ==> | /
81 * Such pattern are the result of if-conversion.
83 * Note that the simple case that Block has only these two
84 * predecessors are already handled in equivalent_node_Block().
86 static void remove_senseless_conds(ir_node *bl, void *env) {
88 int n = get_Block_n_cfgpreds(bl);
93 for (i = 0; i < n; ++i) {
94 ir_node *pred_i = get_Block_cfgpred(bl, i);
95 ir_node *cond_i = skip_Proj(pred_i);
98 if (is_Cond(cond_i) && get_irn_mode(get_Cond_selector(cond_i)) == mode_b) {
100 for (j = i + 1; j < n; ++j) {
101 ir_node *pred_j = get_Block_cfgpred(bl, j);
102 ir_node *cond_j = skip_Proj(pred_j);
104 if (cond_j == cond_i) {
105 ir_node *jmp = new_r_Jmp(current_ir_graph, get_nodes_block(cond_i));
106 set_irn_n(bl, i, jmp);
107 set_irn_n(bl, j, new_Bad());
109 DBG_OPT_IFSIM2(cond_i, jmp);
118 /** An environment for merge_blocks and collect nodes. */
119 typedef struct _merge_env {
120 int changed; /**< Set if the graph was changed. */
121 int phis_moved; /**< Set if Phi nodes were moved. */
122 plist_t *list; /**< Helper list for all found Switch Conds. */
126 * Removes Tuples from Block control flow predecessors.
127 * Optimizes blocks with equivalent_node(). This is tricky,
128 * as we want to avoid nodes that have as block predecessor Bads.
129 * Therefore we also optimize at control flow operations, depending
130 * how we first reach the Block.
132 static void merge_blocks(ir_node *node, void *ctx) {
135 merge_env *env = ctx;
137 /* clear the link field for ALL nodes first */
138 set_irn_link(node, NULL);
140 if (is_Block(node)) {
142 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
143 ir_node *pred = get_Block_cfgpred(node, i);
144 ir_node *skipped = skip_Tuple(pred);
145 if (pred != skipped) {
146 set_Block_cfgpred(node, i, skipped);
152 new_block = equivalent_node(node);
153 if (new_block != node && ! is_Block_dead(new_block)) {
154 exchange(node, new_block);
158 } else if (get_opt_optimize() && (get_irn_mode(node) == mode_X)) {
159 /* We will soon visit a block. Optimize it before visiting! */
160 ir_node *b = get_nodes_block(skip_Proj(node));
162 if (!is_Block_dead(b)) {
163 new_block = equivalent_node(b);
165 while (irn_not_visited(b) && (!is_Block_dead(new_block)) && (new_block != b)) {
166 /* We would have to run gigo() if new is bad, so we
167 promote it directly below. Nevertheless, we sometimes reach a block
168 the first time through a dataflow node. In this case we optimized the
169 block as such and have to promote the Bad here. */
170 assert((get_opt_control_flow_straightening() ||
171 get_opt_control_flow_weak_simplification()) &&
172 ("strange flag setting"));
173 exchange(b, new_block);
176 new_block = equivalent_node(b);
179 /* normally, we would create a Bad block here, but this must be
180 * prevented, so just set it's cf to Bad.
182 if (is_Block_dead(new_block)) {
183 exchange(node, new_Bad());
191 * Block walker removing control flow from dead block by
192 * inspecting dominance info.
193 * Do not replace blocks by Bad. This optimization shall
194 * ensure, that all Bad control flow predecessors are
195 * removed, and no new other Bads are introduced.
197 * Must be run in the post walker.
199 static void remove_dead_block_cf(ir_node *block, void *env) {
203 /* check block predecessors and turn control flow into bad */
204 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
205 ir_node *pred_X = get_Block_cfgpred(block, i);
207 if (! is_Bad(pred_X)) {
208 ir_node *pred_bl = get_nodes_block(skip_Proj(pred_X));
210 if (is_Block_dead(pred_bl) || (get_Block_dom_depth(pred_bl) < 0)) {
211 set_Block_dead(pred_bl);
212 exchange(pred_X, new_Bad());
220 * Collects all Phi nodes in link list of Block.
221 * Marks all blocks "block_visited" if they contain a node other
222 * than Jmp (and Proj).
223 * Links all Proj nodes to their predecessors.
224 * Collects all switch-Conds in a list.
226 static void collect_nodes(ir_node *n, void *ctx) {
227 ir_op *op = get_irn_op(n);
228 merge_env *env = ctx;
230 if (op == op_Block) {
231 /* mark the block as non-empty if it is labeled */
232 if (has_Block_label(n))
233 mark_Block_block_visited(n);
235 ir_node *b = get_nodes_block(n);
237 if (op == op_Phi && get_irn_arity(n) > 0) {
238 /* Collect Phi nodes to compact ins along with block's ins. */
239 set_irn_link(n, get_irn_link(b));
241 } else if (op != op_Jmp && !is_Bad(b)) { /* Check for non empty block. */
242 mark_Block_block_visited(b);
244 if (op == op_Proj) { /* link Proj nodes */
245 ir_node *pred = get_Proj_pred(n);
247 set_irn_link(n, get_irn_link(pred));
248 set_irn_link(pred, n);
249 } else if (op == op_Cond) {
250 ir_node *sel = get_Cond_selector(n);
251 if (mode_is_int(get_irn_mode(sel))) {
252 /* found a switch-Cond, collect */
253 plist_insert_back(env->list, n);
260 /** Returns true if pred is predecessor of block. */
261 static int is_pred_of(ir_node *pred, ir_node *b) {
264 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
265 ir_node *b_pred = get_Block_cfgpred_block(b, i);
266 if (b_pred == pred) return 1;
271 /** Test whether we can optimize away pred block pos of b.
273 * @param b A block node.
274 * @param pos The position of the predecessor block to judge about.
276 * @returns The number of predecessors
278 * The test is rather tricky.
280 * The situation is something like the following:
289 * b merges the control flow of an if-then-else. We may not remove
290 * the 'then' _and_ the 'else' block of an 'if' if there is a Phi
291 * node in b, even if both are empty. The destruction of this Phi
292 * requires that a copy is added before the merge. We have to
293 * keep one of the case blocks to place the copies in.
295 * To perform the test for pos, we must regard predecessors before pos
296 * as already removed.
298 static int test_whether_dispensable(ir_node *b, int pos) {
299 int i, j, n_preds = 1;
300 ir_node *pred = get_Block_cfgpred_block(b, pos);
302 /* Bad blocks will be optimized away, so we don't need space for them */
303 if (is_Block_dead(pred))
306 if (get_Block_block_visited(pred) + 1
307 < get_irg_block_visited(current_ir_graph)) {
309 if (!get_opt_optimize() || !get_opt_control_flow_strong_simplification()) {
310 /* Mark block so that is will not be removed: optimization is turned off. */
311 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
315 /* Seems to be empty. At least we detected this in collect_nodes. */
316 if (get_irn_link(b) == NULL) {
317 /* There are no Phi nodes ==> all predecessors are dispensable. */
318 n_preds = get_Block_n_cfgpreds(pred);
320 /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
321 Handle all pred blocks with preds < pos as if they were already removed. */
322 for (i = 0; i < pos; i++) {
323 ir_node *b_pred = get_Block_cfgpred_block(b, i);
324 if (! is_Block_dead(b_pred) &&
325 get_Block_block_visited(b_pred) + 1
326 < get_irg_block_visited(current_ir_graph)) {
327 for (j = 0; j < get_Block_n_cfgpreds(b_pred); j++) {
328 ir_node *b_pred_pred = get_Block_cfgpred_block(b_pred, j);
329 if (is_pred_of(b_pred_pred, pred))
330 goto non_dispensable;
333 if (is_pred_of(b_pred, pred))
334 goto non_dispensable;
337 for (i = pos +1; i < get_Block_n_cfgpreds(b); i++) {
338 ir_node *b_pred = get_Block_cfgpred_block(b, i);
339 if (is_pred_of(b_pred, pred))
340 goto non_dispensable;
342 /* if we get here, the block is dispensable */
343 n_preds = get_Block_n_cfgpreds(pred);
350 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
355 * This method removed Bad cf predecessors from Blocks and Phis, and removes
356 * empty blocks. A block is empty if it only contains Phi and Jmp nodes.
358 * We first adapt Phi nodes, then Block nodes, as we need the old ins
359 * of the Block to adapt the Phi nodes. We do this by computing new
360 * in arrays, and then replacing the old ones. So far we compute new in arrays
361 * for all nodes, not regarding whether there is a possibility for optimization.
363 * For each predecessor p of a Block b there are three cases:
364 * -1. The predecessor p is a Bad node: just skip it. The in array of b shrinks by one.
365 * -2. The predecessor p is empty. Remove p. All predecessors of p are now
367 * -3. The predecessor p is a block containing useful code. Just keep p as is.
369 * For Phi nodes f we have to check the conditions at the Block of f.
370 * For cases 1 and 3 we proceed as for Blocks. For case 2 we can have two
372 * -2a: The old predecessor of the Phi f is a Phi pred_f IN THE BLOCK REMOVED. In this
373 * case we proceed as for blocks. We remove pred_f. All
374 * predecessors of pred_f now are predecessors of f.
375 * -2b: The old predecessor of f is NOT in the block removed. It might be a Phi, too.
376 * We have to replicate f for each predecessor of the removed block. Or, with
377 * other words, the removed predecessor block has exactly one predecessor.
379 * Further there is a special case for self referencing blocks:
382 * then_b else_b then_b else_b
388 * | | | === optimized to ===> \ | | |
395 * If there is a Phi in pred_b, but we remove pred_b, we have to generate a
396 * Phi in loop_b, that has the ins of the Phi in pred_b and a self referencing
398 * @@@ It is negotiable whether we should do this ... there might end up a copy
399 * from the Phi in the loop when removing the Phis.
401 static void optimize_blocks(ir_node *b, void *ctx) {
402 int i, j, k, n, max_preds, n_preds, p_preds = -1;
405 merge_env *env = ctx;
407 /* Count the number of predecessor if this block is merged with pred blocks
410 for (i = 0, k = get_Block_n_cfgpreds(b); i < k; ++i) {
411 max_preds += test_whether_dispensable(b, i);
413 in = xmalloc(max_preds * sizeof(*in));
415 /*- Fix the Phi nodes of the current block -*/
416 for (phi = get_irn_link(b); phi; ) {
417 assert(get_irn_op(phi) == op_Phi);
419 /* Find the new predecessors for the Phi */
421 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
422 pred = get_Block_cfgpred_block(b, i);
424 if (is_Bad(get_Block_cfgpred(b, i))) {
425 /* case Phi 1: Do nothing */
427 else if (get_Block_block_visited(pred) + 1
428 < get_irg_block_visited(current_ir_graph)) {
429 /* case Phi 2: It's an empty block and not yet visited. */
430 ir_node *phi_pred = get_Phi_pred(phi, i);
432 for (j = 0, k = get_Block_n_cfgpreds(pred); j < k; j++) {
433 /* because of breaking loops, not all predecessors are Bad-clean,
434 * so we must check this here again */
435 if (! is_Bad(get_Block_cfgpred(pred, j))) {
436 if (get_nodes_block(phi_pred) == pred) {
438 assert(get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
440 in[p_preds++] = get_Phi_pred(phi_pred, j);
443 in[p_preds++] = phi_pred;
449 in[p_preds++] = get_Phi_pred(phi, i);
452 assert(p_preds <= max_preds);
456 /* By removal of Bad ins the Phi might be degenerated. */
457 exchange(phi, in[0]);
459 set_irn_in(phi, p_preds, in);
462 phi = get_irn_link(phi);
465 /*- This happens only if merge between loop backedge and single loop entry.
466 Moreover, it is only needed if predb is the direct dominator of b, else there can be no uses
467 of the Phi's in predb ... -*/
468 for (k = 0, n = get_Block_n_cfgpreds(b); k < n; ++k) {
469 ir_node *predb = get_nodes_block(get_Block_cfgpred(b, k));
471 if (get_Block_block_visited(predb) + 1 < get_irg_block_visited(current_ir_graph)) {
474 /* we found a predecessor block at position k that will be removed */
475 for (phi = get_irn_link(predb); phi; phi = next_phi) {
477 next_phi = get_irn_link(phi);
481 if (get_Block_idom(b) != predb) {
482 /* predb is not the dominator. There can't be uses of pred's Phi nodes, kill them .*/
483 exchange(phi, new_Bad());
485 /* predb is the direct dominator of b. There might be uses of the Phi nodes from
486 predb in further block, so move this phi from the predecessor into the block b */
487 set_nodes_block(phi, b);
488 set_irn_link(phi, get_irn_link(b));
489 set_irn_link(b, phi);
492 /* first, copy all 0..k-1 predecessors */
493 for (i = 0; i < k; i++) {
494 pred = get_Block_cfgpred_block(b, i);
498 } else if (get_Block_block_visited(pred) + 1
499 < get_irg_block_visited(current_ir_graph)) {
500 /* It's an empty block and not yet visited. */
501 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
502 if (! is_Bad(get_Block_cfgpred(pred, j)))
510 /* now we are at k, copy the phi predecessors */
511 pred = get_nodes_block(get_Block_cfgpred(b, k));
512 for (i = 0; i < get_Phi_n_preds(phi); i++) {
513 if (! is_Bad(get_Block_cfgpred(pred, i)))
514 in[q_preds++] = get_Phi_pred(phi, i);
517 /* and now all the rest */
518 for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
519 pred = get_nodes_block(get_Block_cfgpred(b, i));
521 if (is_Bad(get_Block_cfgpred(b, i))) {
523 } else if (get_Block_block_visited(pred) +1
524 < get_irg_block_visited(current_ir_graph)) {
525 /* It's an empty block and not yet visited. */
526 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
527 if (! is_Bad(get_Block_cfgpred(pred, j)))
537 exchange(phi, in[0]);
539 set_irn_in(phi, q_preds, in);
542 assert(q_preds <= max_preds);
543 // assert(p_preds == q_preds && "Wrong Phi Fix");
549 /*- Fix the block -*/
551 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
552 pred = get_Block_cfgpred_block(b, i);
555 /* case 1: Do nothing */
556 } else if (get_Block_block_visited(pred) +1
557 < get_irg_block_visited(current_ir_graph)) {
558 /* case 2: It's an empty block and not yet visited. */
559 assert(get_Block_n_cfgpreds(b) > 1);
560 /* Else it should be optimized by equivalent_node. */
561 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
562 ir_node *pred_block = get_Block_cfgpred(pred, j);
564 /* because of breaking loops, not all predecessors are Bad-clean,
565 * so we must check this here again */
566 if (! is_Bad(pred_block))
567 in[n_preds++] = pred_block;
569 /* Remove block as it might be kept alive. */
570 exchange(pred, b/*new_Bad()*/);
573 in[n_preds++] = get_Block_cfgpred(b, i);
576 assert(n_preds <= max_preds);
578 set_irn_in(b, n_preds, in);
581 assert(get_irn_link(b) == NULL || p_preds == -1 || (n_preds == p_preds && "Wrong Phi Fix"));
586 * Block walker: optimize all blocks using the default optimizations.
587 * This removes Blocks that with only a Jmp predecessor.
589 static void remove_simple_blocks(ir_node *block, void *ctx) {
590 ir_node *new_blk = equivalent_node(block);
591 merge_env *env = ctx;
593 if (new_blk != block) {
594 exchange(block, new_blk);
600 * Handle pre-optimized table switch Cond's.
601 * During iropt, all Projs from a switch-Cond are already removed except
602 * the defProj and maybe the taken one.
603 * The defProj cannot be removed WITHOUT looking backwards, so we do this here.
605 * @param cond the switch-Cond
607 * @return non-zero if a switch-Cond was optimized
609 * Expects all Proj's linked to the cond node
611 static int handle_switch_cond(ir_node *cond) {
612 ir_node *sel = get_Cond_selector(cond);
614 ir_node *proj1 = get_irn_link(cond);
615 ir_node *proj2 = get_irn_link(proj1);
618 blk = get_nodes_block(cond);
621 /* this Cond has only one Proj: must be the defProj */
622 assert(get_Cond_defaultProj(cond) == get_Proj_proj(proj1));
623 /* convert it into a Jmp */
624 jmp = new_r_Jmp(current_ir_graph, blk);
625 exchange(proj1, jmp);
627 } else if (get_irn_link(proj2) == NULL) {
628 /* We have two Proj's here. Check if the Cond has
629 a constant argument */
630 tarval *tv = value_of(sel);
632 if (tv != tarval_bad) {
633 /* we have a constant switch */
634 long num = get_tarval_long(tv);
635 long def_num = get_Cond_defaultProj(cond);
637 if (def_num == get_Proj_proj(proj1)) {
638 /* first one is the defProj */
639 if (num == get_Proj_proj(proj2)) {
640 jmp = new_r_Jmp(current_ir_graph, blk);
641 exchange(proj2, jmp);
642 exchange(proj1, new_Bad());
645 } else if (def_num == get_Proj_proj(proj2)) {
646 /* second one is the defProj */
647 if (num == get_Proj_proj(proj1)) {
648 jmp = new_r_Jmp(current_ir_graph, blk);
649 exchange(proj1, jmp);
650 exchange(proj2, new_Bad());
654 /* neither: strange, Cond was not optimized so far */
655 if (num == get_Proj_proj(proj1)) {
656 jmp = new_r_Jmp(current_ir_graph, blk);
657 exchange(proj1, jmp);
658 exchange(proj2, new_Bad());
660 } else if (num == get_Proj_proj(proj2)) {
661 jmp = new_r_Jmp(current_ir_graph, blk);
662 exchange(proj2, jmp);
663 exchange(proj1, new_Bad());
672 /* Optimizations of the control flow that also require changes of Phi nodes.
674 * This optimization performs two passes over the graph.
676 * The first pass collects all Phi nodes in a link list in the block
677 * nodes. Further it performs simple control flow optimizations.
678 * Finally it marks all blocks that do not contain useful
679 * computations, i.e., these blocks might be removed.
681 * The second pass performs the optimizations intended by this algorithm.
682 * It walks only over block nodes and adapts these and the Phi nodes in these blocks,
683 * which it finds in a linked list computed by the first pass.
685 * We use the block_visited flag to mark empty blocks in the first
687 * @@@ It would be better to add a struct in the link field
688 * that keeps the Phi list and the mark. Place it on an obstack, as
689 * we will lose blocks and thereby generate memory leaks.
691 void optimize_cf(ir_graph *irg) {
694 ir_node *cond, *end = get_irg_end(irg);
695 ir_graph *rem = current_ir_graph;
699 assert(get_irg_phase_state(irg) != phase_building);
701 /* if the graph is not pinned, we cannot determine empty blocks */
702 assert(get_irg_pinned(irg) != op_pin_state_floats &&
703 "Control flow optimization need a pinned graph");
705 current_ir_graph = irg;
707 /* FIXME: is this still needed? */
708 edges_deactivate(irg);
713 if (get_opt_optimize() && get_opt_unreachable_code()) {
716 /* kill dead blocks using dom info */
718 irg_block_walk_graph(irg, NULL, remove_dead_block_cf, &env.changed);
720 /* fix the keep-alives */
721 end = get_irg_end(irg);
722 for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
723 ir_node *ka = get_End_keepalive(end, i);
726 /* do NOT keep dead blocks */
727 if (get_Block_dom_depth(ka) < 0) {
728 set_End_keepalive(end, i, new_Bad());
731 } else if (is_Block_dead(get_nodes_block(ka)) ||
732 get_Block_dom_depth(get_nodes_block(ka)) < 0) {
733 /* do NOT keep nodes in dead blocks */
734 set_End_keepalive(end, i, new_Bad());
739 irg_block_walk_graph(irg, NULL, remove_senseless_conds, &env.changed);
741 /* Use block visited flag to mark non-empty blocks. */
742 inc_irg_block_visited(irg);
743 set_using_block_visited(irg);
744 set_using_irn_link(irg);
746 env.list = plist_new();
747 irg_walk(end, merge_blocks, collect_nodes, &env);
749 clear_using_block_visited(irg);
750 clear_using_irn_link(irg);
752 /* handle all collected switch-Conds */
753 foreach_plist(env.list, el) {
754 cond = plist_element_get_value(el);
755 env.changed |= handle_switch_cond(cond);
757 plist_free(env.list);
760 /* Handle graph state if was changed. */
761 set_irg_outs_inconsistent(irg);
762 set_irg_doms_inconsistent(irg);
763 set_irg_extblk_inconsistent(irg);
764 set_irg_loopinfo_inconsistent(irg);
767 /* Optimize the standard code. */
770 irg_block_walk(get_irg_end_block(irg), optimize_blocks, remove_simple_blocks, &env);
772 /* Walk all keep alives, optimize them if block, add to new in-array
773 for end if useful. */
774 n = get_End_n_keepalives(end);
776 NEW_ARR_A(ir_node *, in, n);
778 /* in rare cases a node may be kept alive more than once, use the visited flag to detect this */
779 inc_irg_visited(irg);
780 set_using_irn_visited(irg);
782 /* fix the keep alive */
783 for (i = j = 0; i < n; i++) {
784 ir_node *ka = get_End_keepalive(end, i);
786 if (irn_not_visited(ka)) {
787 ir_op *op = get_irn_op(ka);
789 if ((op == op_Block) && Block_not_block_visited(ka)) {
790 /* irg_block_walk() will increase the block visited flag, but we must visit only
791 these blocks that are not visited yet, so decrease it first. */
792 set_irg_block_visited(irg, get_irg_block_visited(irg) - 1);
793 irg_block_walk(ka, optimize_blocks, remove_simple_blocks, &env.changed);
794 mark_irn_visited(ka);
796 } else if (op == op_Phi) {
797 mark_irn_visited(ka);
798 /* don't keep alive dead blocks */
799 if (! is_Block_dead(get_nodes_block(ka)))
801 } else if (is_op_keep(op)) {
802 mark_irn_visited(ka);
803 if (! is_Block_dead(get_nodes_block(ka)))
809 set_End_keepalives(end, j, in);
813 clear_using_irn_visited(irg);
815 if (env.phis_moved) {
816 /* Bad: when we moved Phi's, we might produce dead Phi nodes
818 Some other phases cannot copy with this, so will them.
820 n = get_End_n_keepalives(end);
823 /* Handle graph state if was changed. */
824 set_irg_outs_inconsistent(irg);
826 assure_irg_outs(irg);
828 for (i = j = 0; i < n; ++i) {
829 ir_node *ka = get_End_keepalive(end, i);
834 for (k = get_irn_n_outs(ka) - 1; k >= 0; --k) {
835 ir_node *user = get_irn_out(ka, k);
837 if (user != ka && user != end) {
838 /* Is it a real user or just a self loop ? */
848 set_End_keepalives(end, j, in);
855 /* Handle graph state if was changed. */
856 set_irg_outs_inconsistent(irg);
857 set_irg_doms_inconsistent(irg);
858 set_irg_extblk_inconsistent(irg);
859 set_irg_loopinfo_inconsistent(irg);
863 /* the verifier doesn't work yet with floating nodes */
864 if (get_irg_pinned(irg) == op_pin_state_pinned) {
865 /* after optimize_cf(), only Bad data flow may remain. */
866 if (irg_vrfy_bads(irg, BAD_DF | BAD_BLOCK | TUPLE)) {
867 dump_ir_block_graph(irg, "-vrfy-cf");
868 dump_ir_graph(irg, "-vrfy-cf");
869 fprintf(stderr, "VRFY_BAD in optimize_cf()\n");
873 current_ir_graph = rem;