2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Control flow optimizations.
23 * @author Goetz Lindenmaier, Michael Beck, Sebastian Hack
35 #include "irgraph_t.h"
49 #include "irbackedge_t.h"
55 #include "iropt_dbg.h"
57 /*------------------------------------------------------------------*/
58 /* Control flow optimization. */
60 /* Removes Bad control flow predecessors and empty blocks. A block */
61 /* is empty if it contains only a Jmp node. */
62 /* Blocks can only be removed if they are not needed for the */
63 /* semantics of Phi nodes. */
64 /*------------------------------------------------------------------*/
67 * Block walker, replacing binary Conds that jumps twice into the same block
73 * ProjX True ProjX False ==> | /
78 * Such pattern are the result of if-conversion.
80 * Note that the simple case that Block has only these two
81 * predecessors are already handled in equivalent_node_Block().
83 static void remove_senseless_conds(ir_node *bl, void *env) {
85 int n = get_Block_n_cfgpreds(bl);
90 for (i = 0; i < n; ++i) {
91 ir_node *pred_i = get_Block_cfgpred(bl, i);
92 ir_node *cond_i = skip_Proj(pred_i);
95 if (is_Cond(cond_i) && get_irn_mode(get_Cond_selector(cond_i)) == mode_b) {
97 for (j = i + 1; j < n; ++j) {
98 ir_node *pred_j = get_Block_cfgpred(bl, j);
99 ir_node *cond_j = skip_Proj(pred_j);
101 if (cond_j == cond_i) {
102 ir_node *jmp = new_r_Jmp(current_ir_graph, get_nodes_block(cond_i));
103 set_irn_n(bl, i, jmp);
104 set_irn_n(bl, j, new_Bad());
106 DBG_OPT_IFSIM2(cond_i, jmp);
115 /** An environment for merge_blocks and collect nodes. */
116 typedef struct _merge_env {
117 int changed; /**< Set if the graph was changed. */
118 int phis_moved; /**< Set if Phi nodes were moved. */
119 plist_t *list; /**< Helper list for all found Switch Conds. */
123 * Removes Tuples from Block control flow predecessors.
124 * Optimizes blocks with equivalent_node(). This is tricky,
125 * as we want to avoid nodes that have as block predecessor Bads.
126 * Therefore we also optimize at control flow operations, depending
127 * how we first reach the Block.
129 static void merge_blocks(ir_node *node, void *ctx) {
132 merge_env *env = ctx;
134 /* clear the link field for ALL nodes first */
135 set_irn_link(node, NULL);
137 if (is_Block(node)) {
139 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
140 ir_node *pred = get_Block_cfgpred(node, i);
141 ir_node *skipped = skip_Tuple(pred);
142 if (pred != skipped) {
143 set_Block_cfgpred(node, i, skipped);
149 new_block = equivalent_node(node);
150 if (new_block != node && ! is_Block_dead(new_block)) {
151 exchange(node, new_block);
155 } else if (get_opt_optimize() && (get_irn_mode(node) == mode_X)) {
156 /* We will soon visit a block. Optimize it before visiting! */
157 ir_node *b = get_nodes_block(skip_Proj(node));
159 if (!is_Block_dead(b)) {
160 new_block = equivalent_node(b);
162 while (irn_not_visited(b) && (!is_Block_dead(new_block)) && (new_block != b)) {
163 /* We would have to run gigo() if new is bad, so we
164 promote it directly below. Nevertheless, we sometimes reach a block
165 the first time through a dataflow node. In this case we optimized the
166 block as such and have to promote the Bad here. */
167 assert((get_opt_control_flow_straightening() ||
168 get_opt_control_flow_weak_simplification()) &&
169 ("strange flag setting"));
170 exchange(b, new_block);
173 new_block = equivalent_node(b);
176 /* normally, we would create a Bad block here, but this must be
177 * prevented, so just set it's cf to Bad.
179 if (is_Block_dead(new_block)) {
180 exchange(node, new_Bad());
189 * Block walker removing control flow from dead block by
190 * inspecting dominance info.
191 * Do not replace blocks by Bad. This optimization shall
192 * ensure, that all Bad control flow predecessors are
193 * removed, and no new other Bads are introduced.
195 * Must be run in the post walker.
197 static void remove_dead_block_cf(ir_node *block, void *env) {
201 /* check block predecessors and turn control flow into bad */
202 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
203 ir_node *pred_X = get_Block_cfgpred(block, i);
205 if (! is_Bad(pred_X)) {
206 ir_node *pred_bl = get_nodes_block(skip_Proj(pred_X));
208 if (is_Block_dead(pred_bl) || (get_Block_dom_depth(pred_bl) < 0)) {
209 set_Block_dead(pred_bl);
210 exchange(pred_X, new_Bad());
218 * Collects all Phi nodes in link list of Block.
219 * Marks all blocks "block_visited" if they contain a node other
220 * than Jmp (and Proj).
221 * Links all Proj nodes to their predecessors.
222 * Collects all switch-Conds in a list.
224 static void collect_nodes(ir_node *n, void *ctx) {
225 ir_op *op = get_irn_op(n);
226 merge_env *env = ctx;
228 if (op != op_Block) {
229 ir_node *b = get_nodes_block(n);
232 /* Collect Phi nodes to compact ins along with block's ins. */
233 set_irn_link(n, get_irn_link(b));
235 } else if (op != op_Jmp && !is_Bad(b)) { /* Check for non empty block. */
236 mark_Block_block_visited(b);
238 if (op == op_Proj) { /* link Proj nodes */
239 ir_node *pred = get_Proj_pred(n);
241 set_irn_link(n, get_irn_link(pred));
242 set_irn_link(pred, n);
243 } else if (op == op_Cond) {
244 ir_node *sel = get_Cond_selector(n);
245 if (mode_is_int(get_irn_mode(sel))) {
246 /* found a switch-Cond, collect */
247 plist_insert_back(env->list, n);
254 /** Returns true if pred is predecessor of block. */
255 static int is_pred_of(ir_node *pred, ir_node *b) {
258 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
259 ir_node *b_pred = get_Block_cfgpred_block(b, i);
260 if (b_pred == pred) return 1;
265 /** Test whether we can optimize away pred block pos of b.
267 * @param b A block node.
268 * @param pos The position of the predecessor block to judge about.
270 * @returns The number of predecessors
272 * The test is rather tricky.
274 * The situation is something like the following:
283 * b merges the control flow of an if-then-else. We may not remove
284 * the 'then' _and_ the 'else' block of an 'if' if there is a Phi
285 * node in b, even if both are empty. The destruction of this Phi
286 * requires that a copy is added before the merge. We have to
287 * keep one of the case blocks to place the copies in.
289 * To perform the test for pos, we must regard predecessors before pos
290 * as already removed.
292 static int test_whether_dispensable(ir_node *b, int pos) {
293 int i, j, n_preds = 1;
294 ir_node *pred = get_Block_cfgpred_block(b, pos);
296 /* Bad blocks will be optimized away, so we don't need space for them */
297 if (is_Block_dead(pred))
300 if (get_Block_block_visited(pred) + 1
301 < get_irg_block_visited(current_ir_graph)) {
303 if (!get_opt_optimize() || !get_opt_control_flow_strong_simplification()) {
304 /* Mark block so that is will not be removed: optimization is turned off. */
305 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
309 /* Seems to be empty. At least we detected this in collect_nodes. */
310 if (!get_irn_link(b)) {
311 /* There are no Phi nodes ==> all predecessors are dispensable. */
312 n_preds = get_Block_n_cfgpreds(pred);
314 /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
315 Handle all pred blocks with preds < pos as if they were already removed. */
316 for (i = 0; i < pos; i++) {
317 ir_node *b_pred = get_Block_cfgpred_block(b, i);
318 if (! is_Block_dead(b_pred) &&
319 get_Block_block_visited(b_pred) + 1
320 < get_irg_block_visited(current_ir_graph)) {
321 for (j = 0; j < get_Block_n_cfgpreds(b_pred); j++) {
322 ir_node *b_pred_pred = get_Block_cfgpred_block(b_pred, j);
323 if (is_pred_of(b_pred_pred, pred))
324 goto non_dispensable;
327 if (is_pred_of(b_pred, pred))
328 goto non_dispensable;
331 for (i = pos +1; i < get_Block_n_cfgpreds(b); i++) {
332 ir_node *b_pred = get_Block_cfgpred_block(b, i);
333 if (is_pred_of(b_pred, pred))
334 goto non_dispensable;
336 /* if we get here, the block is dispensable */
337 n_preds = get_Block_n_cfgpreds(pred);
344 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
349 * This method removed Bad cf predecessors from Blocks and Phis, and removes
350 * empty blocks. A block is empty if it only contains Phi and Jmp nodes.
352 * We first adapt Phi nodes, then Block nodes, as we need the old ins
353 * of the Block to adapt the Phi nodes. We do this by computing new
354 * in arrays, and then replacing the old ones. So far we compute new in arrays
355 * for all nodes, not regarding whether there is a possibility for optimization.
357 * For each predecessor p of a Block b there are three cases:
358 * -1. The predecessor p is a Bad node: just skip it. The in array of b shrinks by one.
359 * -2. The predecessor p is empty. Remove p. All predecessors of p are now
361 * -3. The predecessor p is a block containing useful code. Just keep p as is.
363 * For Phi nodes f we have to check the conditions at the Block of f.
364 * For cases 1 and 3 we proceed as for Blocks. For case 2 we can have two
366 * -2a: The old predecessor of the Phi f is a Phi pred_f IN THE BLOCK REMOVED. In this
367 * case we proceed as for blocks. We remove pred_f. All
368 * predecessors of pred_f now are predecessors of f.
369 * -2b: The old predecessor of f is NOT in the block removed. It might be a Phi, too.
370 * We have to replicate f for each predecessor of the removed block. Or, with
371 * other words, the removed predecessor block has exactly one predecessor.
373 * Further there is a special case for self referencing blocks:
376 * then_b else_b then_b else_b
382 * | | | === optimized to ===> \ | | |
389 * If there is a Phi in pred_b, but we remove pred_b, we have to generate a
390 * Phi in loop_b, that has the ins of the Phi in pred_b and a self referencing
392 * @@@ It is negotiable whether we should do this ... there might end up a copy
393 * from the Phi in the loop when removing the Phis.
395 static void optimize_blocks(ir_node *b, void *ctx) {
396 int i, j, k, n, max_preds, n_preds, p_preds = -1;
399 merge_env *env = ctx;
401 /* Count the number of predecessor if this block is merged with pred blocks
404 for (i = 0, k = get_Block_n_cfgpreds(b); i < k; ++i) {
405 max_preds += test_whether_dispensable(b, i);
407 in = xmalloc(max_preds * sizeof(*in));
409 /*- Fix the Phi nodes of the current block -*/
410 for (phi = get_irn_link(b); phi; ) {
411 assert(get_irn_op(phi) == op_Phi);
413 /* Find the new predecessors for the Phi */
415 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
416 pred = get_Block_cfgpred_block(b, i);
418 if (is_Bad(get_Block_cfgpred(b, i))) {
419 /* case Phi 1: Do nothing */
421 else if (get_Block_block_visited(pred) + 1
422 < get_irg_block_visited(current_ir_graph)) {
423 /* case Phi 2: It's an empty block and not yet visited. */
424 ir_node *phi_pred = get_Phi_pred(phi, i);
426 for (j = 0, k = get_Block_n_cfgpreds(pred); j < k; j++) {
427 /* because of breaking loops, not all predecessors are Bad-clean,
428 * so we must check this here again */
429 if (! is_Bad(get_Block_cfgpred(pred, j))) {
430 if (get_nodes_block(phi_pred) == pred) {
432 assert(get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
434 in[p_preds++] = get_Phi_pred(phi_pred, j);
437 in[p_preds++] = phi_pred;
443 in[p_preds++] = get_Phi_pred(phi, i);
446 assert(p_preds <= max_preds);
450 /* By removal of Bad ins the Phi might be degenerated. */
451 exchange(phi, in[0]);
453 set_irn_in(phi, p_preds, in);
456 phi = get_irn_link(phi);
459 /*- This happens only if merge between loop backedge and single loop entry.
460 Moreover, it is only needed if predb is the direct dominator of b, else there can be no uses
461 of the Phi's in predb ... -*/
462 for (k = 0, n = get_Block_n_cfgpreds(b); k < n; ++k) {
463 ir_node *predb = get_nodes_block(get_Block_cfgpred(b, k));
465 if (get_Block_block_visited(predb) + 1 < get_irg_block_visited(current_ir_graph)) {
468 /* we found a predecessor block at position k that will be removed */
469 for (phi = get_irn_link(predb); phi; phi = next_phi) {
471 next_phi = get_irn_link(phi);
475 if (get_Block_idom(b) != predb) {
476 /* predb is not the dominator. There can't be uses of pred's Phi nodes, kill them .*/
477 exchange(phi, new_Bad());
479 /* predb is the direct dominator of b. There might be uses of the Phi nodes from
480 predb in further block, so move this phi from the predecessor into the block b */
481 set_nodes_block(phi, b);
482 set_irn_link(phi, get_irn_link(b));
483 set_irn_link(b, phi);
486 /* first, copy all 0..k-1 predecessors */
487 for (i = 0; i < k; i++) {
488 pred = get_Block_cfgpred_block(b, i);
492 } else if (get_Block_block_visited(pred) + 1
493 < get_irg_block_visited(current_ir_graph)) {
494 /* It's an empty block and not yet visited. */
495 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
496 if (! is_Bad(get_Block_cfgpred(pred, j)))
504 /* now we are at k, copy the phi predecessors */
505 pred = get_nodes_block(get_Block_cfgpred(b, k));
506 for (i = 0; i < get_Phi_n_preds(phi); i++) {
507 if (! is_Bad(get_Block_cfgpred(pred, i)))
508 in[q_preds++] = get_Phi_pred(phi, i);
511 /* and now all the rest */
512 for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
513 pred = get_nodes_block(get_Block_cfgpred(b, i));
515 if (is_Bad(get_Block_cfgpred(b, i))) {
517 } else if (get_Block_block_visited(pred) +1
518 < get_irg_block_visited(current_ir_graph)) {
519 /* It's an empty block and not yet visited. */
520 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
521 if (! is_Bad(get_Block_cfgpred(pred, j)))
531 exchange(phi, in[0]);
533 set_irn_in(phi, q_preds, in);
536 assert(q_preds <= max_preds);
537 // assert(p_preds == q_preds && "Wrong Phi Fix");
543 /*- Fix the block -*/
545 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
546 pred = get_Block_cfgpred_block(b, i);
549 /* case 1: Do nothing */
550 } else if (get_Block_block_visited(pred) +1
551 < get_irg_block_visited(current_ir_graph)) {
552 /* case 2: It's an empty block and not yet visited. */
553 assert(get_Block_n_cfgpreds(b) > 1);
554 /* Else it should be optimized by equivalent_node. */
555 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
556 ir_node *pred_block = get_Block_cfgpred(pred, j);
558 /* because of breaking loops, not all predecessors are Bad-clean,
559 * so we must check this here again */
560 if (! is_Bad(pred_block))
561 in[n_preds++] = pred_block;
563 /* Remove block as it might be kept alive. */
564 exchange(pred, b/*new_Bad()*/);
567 in[n_preds++] = get_Block_cfgpred(b, i);
570 assert(n_preds <= max_preds);
572 set_irn_in(b, n_preds, in);
575 assert(get_irn_link(b) == NULL || p_preds == -1 || (n_preds == p_preds && "Wrong Phi Fix"));
580 * Block walker: optimize all blocks using the default optimizations.
581 * This removes Blocks that with only a Jmp predecessor.
583 static void remove_simple_blocks(ir_node *block, void *ctx) {
584 ir_node *new_blk = equivalent_node(block);
585 merge_env *env = ctx;
587 if (new_blk != block) {
588 exchange(block, new_blk);
594 * Handle pre-optimized table switch Cond's.
595 * During iropt, all Projs from a switch-Cond are already removed except
596 * the defProj and maybe the taken one.
597 * The defProj cannot be removed WITHOUT looking backwards, so we do this here.
599 * @param cond the switch-Cond
601 * @return non-zero if a switch-Cond was optimized
603 * Expects all Proj's linked to the cond node
605 static int handle_switch_cond(ir_node *cond) {
606 ir_node *sel = get_Cond_selector(cond);
608 ir_node *proj1 = get_irn_link(cond);
609 ir_node *proj2 = get_irn_link(proj1);
612 blk = get_nodes_block(cond);
615 /* this Cond has only one Proj: must be the defProj */
616 assert(get_Cond_defaultProj(cond) == get_Proj_proj(proj1));
617 /* convert it into a Jmp */
618 jmp = new_r_Jmp(current_ir_graph, blk);
619 exchange(proj1, jmp);
621 } else if (get_irn_link(proj2) == NULL) {
622 /* We have two Proj's here. Check if the Cond has
623 a constant argument */
624 tarval *tv = value_of(sel);
626 if (tv != tarval_bad) {
627 /* we have a constant switch */
628 long num = get_tarval_long(tv);
629 long def_num = get_Cond_defaultProj(cond);
631 if (def_num == get_Proj_proj(proj1)) {
632 /* first one is the defProj */
633 if (num == get_Proj_proj(proj2)) {
634 jmp = new_r_Jmp(current_ir_graph, blk);
635 exchange(proj2, jmp);
636 exchange(proj1, new_Bad());
639 } else if (def_num == get_Proj_proj(proj2)) {
640 /* second one is the defProj */
641 if (num == get_Proj_proj(proj1)) {
642 jmp = new_r_Jmp(current_ir_graph, blk);
643 exchange(proj1, jmp);
644 exchange(proj2, new_Bad());
648 /* neither: strange, Cond was not optimized so far */
649 if (num == get_Proj_proj(proj1)) {
650 jmp = new_r_Jmp(current_ir_graph, blk);
651 exchange(proj1, jmp);
652 exchange(proj2, new_Bad());
654 } else if (num == get_Proj_proj(proj2)) {
655 jmp = new_r_Jmp(current_ir_graph, blk);
656 exchange(proj2, jmp);
657 exchange(proj1, new_Bad());
666 /* Optimizations of the control flow that also require changes of Phi nodes.
668 * This optimization performs two passes over the graph.
670 * The first pass collects all Phi nodes in a link list in the block
671 * nodes. Further it performs simple control flow optimizations.
672 * Finally it marks all blocks that do not contain useful
673 * computations, i.e., these blocks might be removed.
675 * The second pass performs the optimizations intended by this algorithm.
676 * It walks only over block nodes and adapts these and the Phi nodes in these blocks,
677 * which it finds in a linked list computed by the first pass.
679 * We use the block_visited flag to mark empty blocks in the first
681 * @@@ It would be better to add a struct in the link field
682 * that keeps the Phi list and the mark. Place it on an obstack, as
683 * we will lose blocks and thereby generate memory leaks.
685 void optimize_cf(ir_graph *irg) {
688 ir_node *cond, *end = get_irg_end(irg);
689 ir_graph *rem = current_ir_graph;
693 assert(get_irg_phase_state(irg) != phase_building);
695 /* if the graph is not pinned, we cannot determine empty blocks */
696 assert(get_irg_pinned(irg) != op_pin_state_floats &&
697 "Control flow optimization need a pinned graph");
699 current_ir_graph = irg;
701 /* FIXME: is this still needed? */
702 edges_deactivate(irg);
707 if (get_opt_optimize() && get_opt_unreachable_code()) {
710 /* kill dead blocks using dom info */
712 irg_block_walk_graph(irg, NULL, remove_dead_block_cf, &env.changed);
714 /* fix the keep-alives */
715 end = get_irg_end(irg);
716 for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
717 ir_node *ka = get_End_keepalive(end, i);
720 /* do NOT keep dead blocks */
721 if (get_Block_dom_depth(ka) < 0) {
722 set_End_keepalive(end, i, new_Bad());
725 } else if (is_Block_dead(get_nodes_block(ka)) ||
726 get_Block_dom_depth(get_nodes_block(ka)) < 0) {
727 /* do NOT keep nodes in dead blocks */
728 set_End_keepalive(end, i, new_Bad());
733 irg_block_walk_graph(irg, NULL, remove_senseless_conds, &env.changed);
735 /* Use block visited flag to mark non-empty blocks. */
736 inc_irg_block_visited(irg);
737 set_using_block_visited(irg);
738 set_using_irn_link(irg);
740 env.list = plist_new();
741 irg_walk(end, merge_blocks, collect_nodes, &env);
743 clear_using_block_visited(irg);
744 clear_using_irn_link(irg);
746 /* handle all collected switch-Conds */
747 foreach_plist(env.list, el) {
748 cond = plist_element_get_value(el);
749 env.changed |= handle_switch_cond(cond);
751 plist_free(env.list);
754 /* Handle graph state if was changed. */
755 set_irg_outs_inconsistent(irg);
756 set_irg_doms_inconsistent(irg);
757 set_irg_extblk_inconsistent(irg);
758 set_irg_loopinfo_inconsistent(irg);
761 /* Optimize the standard code. */
764 irg_block_walk(get_irg_end_block(irg), optimize_blocks, remove_simple_blocks, &env);
766 /* Walk all keep alives, optimize them if block, add to new in-array
767 for end if useful. */
768 n = get_End_n_keepalives(end);
770 NEW_ARR_A(ir_node *, in, n);
772 /* in rare cases a node may be kept alive more than once, use the visited flag to detect this */
773 inc_irg_visited(irg);
774 set_using_visited(irg);
776 /* fix the keep alive */
777 for (i = j = 0; i < n; i++) {
778 ir_node *ka = get_End_keepalive(end, i);
780 if (irn_not_visited(ka)) {
781 ir_op *op = get_irn_op(ka);
783 if ((op == op_Block) && Block_not_block_visited(ka)) {
784 /* irg_block_walk() will increase the block visited flag, but we must visit only
785 these blocks that are not visited yet, so decrease it first. */
786 set_irg_block_visited(irg, get_irg_block_visited(irg) - 1);
787 irg_block_walk(ka, optimize_blocks, remove_simple_blocks, &env.changed);
788 mark_irn_visited(ka);
790 } else if (op == op_Phi) {
791 mark_irn_visited(ka);
792 /* don't keep alive dead blocks */
793 if (! is_Block_dead(get_nodes_block(ka)))
795 } else if (is_op_keep(op)) {
796 mark_irn_visited(ka);
797 if (! is_Block_dead(get_nodes_block(ka)))
803 set_End_keepalives(end, j, in);
807 clear_using_visited(irg);
809 if (env.phis_moved) {
810 /* Bad: when we moved Phi's, we might produce dead Phi nodes
812 Some other phases cannot copy with this, so will them.
814 n = get_End_n_keepalives(end);
817 /* Handle graph state if was changed. */
818 set_irg_outs_inconsistent(irg);
820 assure_irg_outs(irg);
822 for (i = j = 0; i < n; ++i) {
823 ir_node *ka = get_End_keepalive(end, i);
828 for (k = get_irn_n_outs(ka) - 1; k >= 0; --k) {
829 ir_node *user = get_irn_out(ka, k);
831 if (user != ka && user != end) {
832 /* Is it a real user or just a self loop ? */
842 set_End_keepalives(end, j, in);
849 /* Handle graph state if was changed. */
850 set_irg_outs_inconsistent(irg);
851 set_irg_doms_inconsistent(irg);
852 set_irg_extblk_inconsistent(irg);
853 set_irg_loopinfo_inconsistent(irg);
857 /* the verifier doesn't work yet with floating nodes */
858 if (get_irg_pinned(irg) == op_pin_state_pinned) {
859 /* after optimize_cf(), only Bad data flow may remain. */
860 if (irg_vrfy_bads(irg, BAD_DF | BAD_BLOCK | TUPLE)) {
861 dump_ir_block_graph(irg, "-vrfy-cf");
862 dump_ir_graph(irg, "-vrfy-cf");
863 fprintf(stderr, "VRFY_BAD in optimize_cf()\n");
867 current_ir_graph = rem;