2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Control flow optimizations.
23 * @author Goetz Lindenmaier, Michael Beck, Sebastian Hack
30 #include "iroptimize.h"
37 #include "irgraph_t.h"
51 #include "irbackedge_t.h"
56 #include "iropt_dbg.h"
58 /*------------------------------------------------------------------*/
59 /* Control flow optimization. */
61 /* Removes Bad control flow predecessors and empty blocks. A block */
62 /* is empty if it contains only a Jmp node. */
63 /* Blocks can only be removed if they are not needed for the */
64 /* semantics of Phi nodes. */
65 /*------------------------------------------------------------------*/
68 * Block walker, replacing binary Conds that jumps twice into the same block
74 * ProjX True ProjX False ==> | /
79 * Such pattern are the result of if-conversion.
81 * Note that the simple case that Block has only these two
82 * predecessors are already handled in equivalent_node_Block().
84 static void remove_senseless_conds(ir_node *bl, void *env) {
86 int n = get_Block_n_cfgpreds(bl);
91 for (i = 0; i < n; ++i) {
92 ir_node *pred_i = get_Block_cfgpred(bl, i);
93 ir_node *cond_i = skip_Proj(pred_i);
96 if (is_Cond(cond_i) && get_irn_mode(get_Cond_selector(cond_i)) == mode_b) {
98 for (j = i + 1; j < n; ++j) {
99 ir_node *pred_j = get_Block_cfgpred(bl, j);
100 ir_node *cond_j = skip_Proj(pred_j);
102 if (cond_j == cond_i) {
103 ir_node *jmp = new_r_Jmp(current_ir_graph, get_nodes_block(cond_i));
104 set_irn_n(bl, i, jmp);
105 set_irn_n(bl, j, new_Bad());
107 DBG_OPT_IFSIM2(cond_i, jmp);
116 /** An environment for merge_blocks and collect nodes. */
117 typedef struct _merge_env {
118 int changed; /**< Set if the graph was changed. */
119 int phis_moved; /**< Set if Phi nodes were moved. */
120 plist_t *list; /**< Helper list for all found Switch Conds. */
124 * Removes Tuples from Block control flow predecessors.
125 * Optimizes blocks with equivalent_node(). This is tricky,
126 * as we want to avoid nodes that have as block predecessor Bads.
127 * Therefore we also optimize at control flow operations, depending
128 * how we first reach the Block.
130 static void merge_blocks(ir_node *node, void *ctx) {
133 merge_env *env = ctx;
135 /* clear the link field for ALL nodes first */
136 set_irn_link(node, NULL);
138 if (is_Block(node)) {
140 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
141 ir_node *pred = get_Block_cfgpred(node, i);
142 ir_node *skipped = skip_Tuple(pred);
143 if (pred != skipped) {
144 set_Block_cfgpred(node, i, skipped);
150 new_block = equivalent_node(node);
151 if (new_block != node && ! is_Block_dead(new_block)) {
152 exchange(node, new_block);
156 } else if (get_opt_optimize() && (get_irn_mode(node) == mode_X)) {
157 /* We will soon visit a block. Optimize it before visiting! */
158 ir_node *b = get_nodes_block(skip_Proj(node));
160 if (!is_Block_dead(b)) {
161 new_block = equivalent_node(b);
163 while (irn_not_visited(b) && (!is_Block_dead(new_block)) && (new_block != b)) {
164 /* We would have to run gigo() if new is bad, so we
165 promote it directly below. Nevertheless, we sometimes reach a block
166 the first time through a dataflow node. In this case we optimized the
167 block as such and have to promote the Bad here. */
168 assert((get_opt_control_flow_straightening() ||
169 get_opt_control_flow_weak_simplification()) &&
170 ("strange flag setting"));
171 exchange(b, new_block);
174 new_block = equivalent_node(b);
177 /* normally, we would create a Bad block here, but this must be
178 * prevented, so just set it's cf to Bad.
180 if (is_Block_dead(new_block)) {
181 exchange(node, new_Bad());
190 * Block walker removing control flow from dead block by
191 * inspecting dominance info.
192 * Do not replace blocks by Bad. This optimization shall
193 * ensure, that all Bad control flow predecessors are
194 * removed, and no new other Bads are introduced.
196 * Must be run in the post walker.
198 static void remove_dead_block_cf(ir_node *block, void *env) {
202 /* check block predecessors and turn control flow into bad */
203 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
204 ir_node *pred_X = get_Block_cfgpred(block, i);
206 if (! is_Bad(pred_X)) {
207 ir_node *pred_bl = get_nodes_block(skip_Proj(pred_X));
209 if (is_Block_dead(pred_bl) || (get_Block_dom_depth(pred_bl) < 0)) {
210 set_Block_dead(pred_bl);
211 exchange(pred_X, new_Bad());
219 * Collects all Phi nodes in link list of Block.
220 * Marks all blocks "block_visited" if they contain a node other
221 * than Jmp (and Proj).
222 * Links all Proj nodes to their predecessors.
223 * Collects all switch-Conds in a list.
225 static void collect_nodes(ir_node *n, void *ctx) {
226 ir_op *op = get_irn_op(n);
227 merge_env *env = ctx;
229 if (op != op_Block) {
230 ir_node *b = get_nodes_block(n);
233 /* Collect Phi nodes to compact ins along with block's ins. */
234 set_irn_link(n, get_irn_link(b));
236 } else if (op != op_Jmp && !is_Bad(b)) { /* Check for non empty block. */
237 mark_Block_block_visited(b);
239 if (op == op_Proj) { /* link Proj nodes */
240 ir_node *pred = get_Proj_pred(n);
242 set_irn_link(n, get_irn_link(pred));
243 set_irn_link(pred, n);
244 } else if (op == op_Cond) {
245 ir_node *sel = get_Cond_selector(n);
246 if (mode_is_int(get_irn_mode(sel))) {
247 /* found a switch-Cond, collect */
248 plist_insert_back(env->list, n);
255 /** Returns true if pred is predecessor of block. */
256 static int is_pred_of(ir_node *pred, ir_node *b) {
259 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
260 ir_node *b_pred = get_Block_cfgpred_block(b, i);
261 if (b_pred == pred) return 1;
266 /** Test whether we can optimize away pred block pos of b.
268 * @param b A block node.
269 * @param pos The position of the predecessor block to judge about.
271 * @returns The number of predecessors
273 * The test is rather tricky.
275 * The situation is something like the following:
284 * b merges the control flow of an if-then-else. We may not remove
285 * the 'then' _and_ the 'else' block of an 'if' if there is a Phi
286 * node in b, even if both are empty. The destruction of this Phi
287 * requires that a copy is added before the merge. We have to
288 * keep one of the case blocks to place the copies in.
290 * To perform the test for pos, we must regard predecessors before pos
291 * as already removed.
293 static int test_whether_dispensable(ir_node *b, int pos) {
294 int i, j, n_preds = 1;
295 ir_node *pred = get_Block_cfgpred_block(b, pos);
297 /* Bad blocks will be optimized away, so we don't need space for them */
298 if (is_Block_dead(pred))
301 if (get_Block_block_visited(pred) + 1
302 < get_irg_block_visited(current_ir_graph)) {
304 if (!get_opt_optimize() || !get_opt_control_flow_strong_simplification()) {
305 /* Mark block so that is will not be removed: optimization is turned off. */
306 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
310 /* Seems to be empty. At least we detected this in collect_nodes. */
311 if (!get_irn_link(b)) {
312 /* There are no Phi nodes ==> all predecessors are dispensable. */
313 n_preds = get_Block_n_cfgpreds(pred);
315 /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
316 Handle all pred blocks with preds < pos as if they were already removed. */
317 for (i = 0; i < pos; i++) {
318 ir_node *b_pred = get_Block_cfgpred_block(b, i);
319 if (! is_Block_dead(b_pred) &&
320 get_Block_block_visited(b_pred) + 1
321 < get_irg_block_visited(current_ir_graph)) {
322 for (j = 0; j < get_Block_n_cfgpreds(b_pred); j++) {
323 ir_node *b_pred_pred = get_Block_cfgpred_block(b_pred, j);
324 if (is_pred_of(b_pred_pred, pred))
325 goto non_dispensable;
328 if (is_pred_of(b_pred, pred))
329 goto non_dispensable;
332 for (i = pos +1; i < get_Block_n_cfgpreds(b); i++) {
333 ir_node *b_pred = get_Block_cfgpred_block(b, i);
334 if (is_pred_of(b_pred, pred))
335 goto non_dispensable;
337 /* if we get here, the block is dispensable */
338 n_preds = get_Block_n_cfgpreds(pred);
345 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
350 * This method removed Bad cf predecessors from Blocks and Phis, and removes
351 * empty blocks. A block is empty if it only contains Phi and Jmp nodes.
353 * We first adapt Phi nodes, then Block nodes, as we need the old ins
354 * of the Block to adapt the Phi nodes. We do this by computing new
355 * in arrays, and then replacing the old ones. So far we compute new in arrays
356 * for all nodes, not regarding whether there is a possibility for optimization.
358 * For each predecessor p of a Block b there are three cases:
359 * -1. The predecessor p is a Bad node: just skip it. The in array of b shrinks by one.
360 * -2. The predecessor p is empty. Remove p. All predecessors of p are now
362 * -3. The predecessor p is a block containing useful code. Just keep p as is.
364 * For Phi nodes f we have to check the conditions at the Block of f.
365 * For cases 1 and 3 we proceed as for Blocks. For case 2 we can have two
367 * -2a: The old predecessor of the Phi f is a Phi pred_f IN THE BLOCK REMOVED. In this
368 * case we proceed as for blocks. We remove pred_f. All
369 * predecessors of pred_f now are predecessors of f.
370 * -2b: The old predecessor of f is NOT in the block removed. It might be a Phi, too.
371 * We have to replicate f for each predecessor of the removed block. Or, with
372 * other words, the removed predecessor block has exactly one predecessor.
374 * Further there is a special case for self referencing blocks:
377 * then_b else_b then_b else_b
383 * | | | === optimized to ===> \ | | |
390 * If there is a Phi in pred_b, but we remove pred_b, we have to generate a
391 * Phi in loop_b, that has the ins of the Phi in pred_b and a self referencing
393 * @@@ It is negotiable whether we should do this ... there might end up a copy
394 * from the Phi in the loop when removing the Phis.
396 static void optimize_blocks(ir_node *b, void *ctx) {
397 int i, j, k, n, max_preds, n_preds, p_preds = -1;
400 merge_env *env = ctx;
402 /* Count the number of predecessor if this block is merged with pred blocks
405 for (i = 0, k = get_Block_n_cfgpreds(b); i < k; ++i) {
406 max_preds += test_whether_dispensable(b, i);
408 in = xmalloc(max_preds * sizeof(*in));
410 /*- Fix the Phi nodes of the current block -*/
411 for (phi = get_irn_link(b); phi; ) {
412 assert(get_irn_op(phi) == op_Phi);
414 /* Find the new predecessors for the Phi */
416 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
417 pred = get_Block_cfgpred_block(b, i);
419 if (is_Bad(get_Block_cfgpred(b, i))) {
420 /* case Phi 1: Do nothing */
422 else if (get_Block_block_visited(pred) + 1
423 < get_irg_block_visited(current_ir_graph)) {
424 /* case Phi 2: It's an empty block and not yet visited. */
425 ir_node *phi_pred = get_Phi_pred(phi, i);
427 for (j = 0, k = get_Block_n_cfgpreds(pred); j < k; j++) {
428 /* because of breaking loops, not all predecessors are Bad-clean,
429 * so we must check this here again */
430 if (! is_Bad(get_Block_cfgpred(pred, j))) {
431 if (get_nodes_block(phi_pred) == pred) {
433 assert(get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
435 in[p_preds++] = get_Phi_pred(phi_pred, j);
438 in[p_preds++] = phi_pred;
444 in[p_preds++] = get_Phi_pred(phi, i);
447 assert(p_preds <= max_preds);
451 /* By removal of Bad ins the Phi might be degenerated. */
452 exchange(phi, in[0]);
454 set_irn_in(phi, p_preds, in);
457 phi = get_irn_link(phi);
460 /*- This happens only if merge between loop backedge and single loop entry.
461 Moreover, it is only needed if predb is the direct dominator of b, else there can be no uses
462 of the Phi's in predb ... -*/
463 for (k = 0, n = get_Block_n_cfgpreds(b); k < n; ++k) {
464 ir_node *predb = get_nodes_block(get_Block_cfgpred(b, k));
466 if (get_Block_block_visited(predb) + 1 < get_irg_block_visited(current_ir_graph)) {
469 /* we found a predecessor block at position k that will be removed */
470 for (phi = get_irn_link(predb); phi; phi = next_phi) {
472 next_phi = get_irn_link(phi);
476 if (get_Block_idom(b) != predb) {
477 /* predb is not the dominator. There can't be uses of pred's Phi nodes, kill them .*/
478 exchange(phi, new_Bad());
480 /* predb is the direct dominator of b. There might be uses of the Phi nodes from
481 predb in further block, so move this phi from the predecessor into the block b */
482 set_nodes_block(phi, b);
483 set_irn_link(phi, get_irn_link(b));
484 set_irn_link(b, phi);
487 /* first, copy all 0..k-1 predecessors */
488 for (i = 0; i < k; i++) {
489 pred = get_Block_cfgpred_block(b, i);
493 } else if (get_Block_block_visited(pred) + 1
494 < get_irg_block_visited(current_ir_graph)) {
495 /* It's an empty block and not yet visited. */
496 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
497 if (! is_Bad(get_Block_cfgpred(pred, j)))
505 /* now we are at k, copy the phi predecessors */
506 pred = get_nodes_block(get_Block_cfgpred(b, k));
507 for (i = 0; i < get_Phi_n_preds(phi); i++) {
508 if (! is_Bad(get_Block_cfgpred(pred, i)))
509 in[q_preds++] = get_Phi_pred(phi, i);
512 /* and now all the rest */
513 for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
514 pred = get_nodes_block(get_Block_cfgpred(b, i));
516 if (is_Bad(get_Block_cfgpred(b, i))) {
518 } else if (get_Block_block_visited(pred) +1
519 < get_irg_block_visited(current_ir_graph)) {
520 /* It's an empty block and not yet visited. */
521 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
522 if (! is_Bad(get_Block_cfgpred(pred, j)))
532 exchange(phi, in[0]);
534 set_irn_in(phi, q_preds, in);
537 assert(q_preds <= max_preds);
538 // assert(p_preds == q_preds && "Wrong Phi Fix");
544 /*- Fix the block -*/
546 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
547 pred = get_Block_cfgpred_block(b, i);
550 /* case 1: Do nothing */
551 } else if (get_Block_block_visited(pred) +1
552 < get_irg_block_visited(current_ir_graph)) {
553 /* case 2: It's an empty block and not yet visited. */
554 assert(get_Block_n_cfgpreds(b) > 1);
555 /* Else it should be optimized by equivalent_node. */
556 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
557 ir_node *pred_block = get_Block_cfgpred(pred, j);
559 /* because of breaking loops, not all predecessors are Bad-clean,
560 * so we must check this here again */
561 if (! is_Bad(pred_block))
562 in[n_preds++] = pred_block;
564 /* Remove block as it might be kept alive. */
565 exchange(pred, b/*new_Bad()*/);
568 in[n_preds++] = get_Block_cfgpred(b, i);
571 assert(n_preds <= max_preds);
573 set_irn_in(b, n_preds, in);
576 assert(get_irn_link(b) == NULL || p_preds == -1 || (n_preds == p_preds && "Wrong Phi Fix"));
581 * Block walker: optimize all blocks using the default optimizations.
582 * This removes Blocks that with only a Jmp predecessor.
584 static void remove_simple_blocks(ir_node *block, void *ctx) {
585 ir_node *new_blk = equivalent_node(block);
586 merge_env *env = ctx;
588 if (new_blk != block) {
589 exchange(block, new_blk);
595 * Handle pre-optimized table switch Cond's.
596 * During iropt, all Projs from a switch-Cond are already removed except
597 * the defProj and maybe the taken one.
598 * The defProj cannot be removed WITHOUT looking backwards, so we do this here.
600 * @param cond the switch-Cond
602 * @return non-zero if a switch-Cond was optimized
604 * Expects all Proj's linked to the cond node
606 static int handle_switch_cond(ir_node *cond) {
607 ir_node *sel = get_Cond_selector(cond);
609 ir_node *proj1 = get_irn_link(cond);
610 ir_node *proj2 = get_irn_link(proj1);
613 blk = get_nodes_block(cond);
616 /* this Cond has only one Proj: must be the defProj */
617 assert(get_Cond_defaultProj(cond) == get_Proj_proj(proj1));
618 /* convert it into a Jmp */
619 jmp = new_r_Jmp(current_ir_graph, blk);
620 exchange(proj1, jmp);
622 } else if (get_irn_link(proj2) == NULL) {
623 /* We have two Proj's here. Check if the Cond has
624 a constant argument */
625 tarval *tv = value_of(sel);
627 if (tv != tarval_bad) {
628 /* we have a constant switch */
629 long num = get_tarval_long(tv);
630 long def_num = get_Cond_defaultProj(cond);
632 if (def_num == get_Proj_proj(proj1)) {
633 /* first one is the defProj */
634 if (num == get_Proj_proj(proj2)) {
635 jmp = new_r_Jmp(current_ir_graph, blk);
636 exchange(proj2, jmp);
637 exchange(proj1, new_Bad());
640 } else if (def_num == get_Proj_proj(proj2)) {
641 /* second one is the defProj */
642 if (num == get_Proj_proj(proj1)) {
643 jmp = new_r_Jmp(current_ir_graph, blk);
644 exchange(proj1, jmp);
645 exchange(proj2, new_Bad());
649 /* neither: strange, Cond was not optimized so far */
650 if (num == get_Proj_proj(proj1)) {
651 jmp = new_r_Jmp(current_ir_graph, blk);
652 exchange(proj1, jmp);
653 exchange(proj2, new_Bad());
655 } else if (num == get_Proj_proj(proj2)) {
656 jmp = new_r_Jmp(current_ir_graph, blk);
657 exchange(proj2, jmp);
658 exchange(proj1, new_Bad());
667 /* Optimizations of the control flow that also require changes of Phi nodes.
669 * This optimization performs two passes over the graph.
671 * The first pass collects all Phi nodes in a link list in the block
672 * nodes. Further it performs simple control flow optimizations.
673 * Finally it marks all blocks that do not contain useful
674 * computations, i.e., these blocks might be removed.
676 * The second pass performs the optimizations intended by this algorithm.
677 * It walks only over block nodes and adapts these and the Phi nodes in these blocks,
678 * which it finds in a linked list computed by the first pass.
680 * We use the block_visited flag to mark empty blocks in the first
682 * @@@ It would be better to add a struct in the link field
683 * that keeps the Phi list and the mark. Place it on an obstack, as
684 * we will lose blocks and thereby generate memory leaks.
686 void optimize_cf(ir_graph *irg) {
689 ir_node *cond, *end = get_irg_end(irg);
690 ir_graph *rem = current_ir_graph;
694 assert(get_irg_phase_state(irg) != phase_building);
696 /* if the graph is not pinned, we cannot determine empty blocks */
697 assert(get_irg_pinned(irg) != op_pin_state_floats &&
698 "Control flow optimization need a pinned graph");
700 current_ir_graph = irg;
702 /* FIXME: is this still needed? */
703 edges_deactivate(irg);
708 if (get_opt_optimize() && get_opt_unreachable_code()) {
711 /* kill dead blocks using dom info */
713 irg_block_walk_graph(irg, NULL, remove_dead_block_cf, &env.changed);
715 /* fix the keep-alives */
716 end = get_irg_end(irg);
717 for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
718 ir_node *ka = get_End_keepalive(end, i);
721 /* do NOT keep dead blocks */
722 if (get_Block_dom_depth(ka) < 0) {
723 set_End_keepalive(end, i, new_Bad());
726 } else if (is_Block_dead(get_nodes_block(ka)) ||
727 get_Block_dom_depth(get_nodes_block(ka)) < 0) {
728 /* do NOT keep nodes in dead blocks */
729 set_End_keepalive(end, i, new_Bad());
734 irg_block_walk_graph(irg, NULL, remove_senseless_conds, &env.changed);
736 /* Use block visited flag to mark non-empty blocks. */
737 inc_irg_block_visited(irg);
738 set_using_block_visited(irg);
739 set_using_irn_link(irg);
741 env.list = plist_new();
742 irg_walk(end, merge_blocks, collect_nodes, &env);
744 clear_using_block_visited(irg);
745 clear_using_irn_link(irg);
747 /* handle all collected switch-Conds */
748 foreach_plist(env.list, el) {
749 cond = plist_element_get_value(el);
750 env.changed |= handle_switch_cond(cond);
752 plist_free(env.list);
755 /* Handle graph state if was changed. */
756 set_irg_outs_inconsistent(irg);
757 set_irg_doms_inconsistent(irg);
758 set_irg_extblk_inconsistent(irg);
759 set_irg_loopinfo_inconsistent(irg);
762 /* Optimize the standard code. */
765 irg_block_walk(get_irg_end_block(irg), optimize_blocks, remove_simple_blocks, &env);
767 /* Walk all keep alives, optimize them if block, add to new in-array
768 for end if useful. */
769 n = get_End_n_keepalives(end);
771 NEW_ARR_A(ir_node *, in, n);
773 /* in rare cases a node may be kept alive more than once, use the visited flag to detect this */
774 inc_irg_visited(irg);
775 set_using_visited(irg);
777 /* fix the keep alive */
778 for (i = j = 0; i < n; i++) {
779 ir_node *ka = get_End_keepalive(end, i);
781 if (irn_not_visited(ka)) {
782 ir_op *op = get_irn_op(ka);
784 if ((op == op_Block) && Block_not_block_visited(ka)) {
785 /* irg_block_walk() will increase the block visited flag, but we must visit only
786 these blocks that are not visited yet, so decrease it first. */
787 set_irg_block_visited(irg, get_irg_block_visited(irg) - 1);
788 irg_block_walk(ka, optimize_blocks, remove_simple_blocks, &env.changed);
789 mark_irn_visited(ka);
791 } else if (op == op_Phi) {
792 mark_irn_visited(ka);
793 /* don't keep alive dead blocks */
794 if (! is_Block_dead(get_nodes_block(ka)))
796 } else if (is_op_keep(op)) {
797 mark_irn_visited(ka);
798 if (! is_Block_dead(get_nodes_block(ka)))
804 set_End_keepalives(end, j, in);
808 clear_using_visited(irg);
810 if (env.phis_moved) {
811 /* Bad: when we moved Phi's, we might produce dead Phi nodes
813 Some other phases cannot copy with this, so will them.
815 n = get_End_n_keepalives(end);
818 /* Handle graph state if was changed. */
819 set_irg_outs_inconsistent(irg);
821 assure_irg_outs(irg);
823 for (i = j = 0; i < n; ++i) {
824 ir_node *ka = get_End_keepalive(end, i);
829 for (k = get_irn_n_outs(ka) - 1; k >= 0; --k) {
830 ir_node *user = get_irn_out(ka, k);
832 if (user != ka && user != end) {
833 /* Is it a real user or just a self loop ? */
843 set_End_keepalives(end, j, in);
850 /* Handle graph state if was changed. */
851 set_irg_outs_inconsistent(irg);
852 set_irg_doms_inconsistent(irg);
853 set_irg_extblk_inconsistent(irg);
854 set_irg_loopinfo_inconsistent(irg);
858 /* the verifier doesn't work yet with floating nodes */
859 if (get_irg_pinned(irg) == op_pin_state_pinned) {
860 /* after optimize_cf(), only Bad data flow may remain. */
861 if (irg_vrfy_bads(irg, BAD_DF | BAD_BLOCK | TUPLE)) {
862 dump_ir_block_graph(irg, "-vrfy-cf");
863 dump_ir_graph(irg, "-vrfy-cf");
864 fprintf(stderr, "VRFY_BAD in optimize_cf()\n");
868 current_ir_graph = rem;