2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Control flow optimizations.
23 * @author Goetz Lindenmaier, Michael Beck, Sebastian Hack
28 #include "iroptimize.h"
35 #include "irgraph_t.h"
49 #include "irbackedge_t.h"
55 #include "iropt_dbg.h"
57 /*------------------------------------------------------------------*/
58 /* Control flow optimization. */
60 /* Removes Bad control flow predecessors and empty blocks. A block */
61 /* is empty if it contains only a Jmp node. */
62 /* Blocks can only be removed if they are not needed for the */
63 /* semantics of Phi nodes. */
64 /* Further, we NEVER remove labeled blocks (even if we could move */
66 /*------------------------------------------------------------------*/
68 #define set_Block_removable(block) set_Block_mark(block, 1)
69 #define set_Block_non_removable(block) set_Block_mark(block, 0)
70 #define is_Block_removable(block) (get_Block_mark(block) != 0)
73 * Replace binary Conds that jumps twice into the same block
79 * ProjX True ProjX False ==> | /
84 * Such pattern are the result of if-conversion.
86 * Note that the simple case that Block has only these two
87 * predecessors are already handled in equivalent_node_Block().
89 static int remove_senseless_conds(ir_node *bl)
92 int n = get_Block_n_cfgpreds(bl);
95 for (i = 0; i < n; ++i) {
96 ir_node *pred_i = get_Block_cfgpred(bl, i);
97 ir_node *cond_i = skip_Proj(pred_i);
100 if (is_Cond(cond_i) && get_irn_mode(get_Cond_selector(cond_i)) == mode_b) {
102 for (j = i + 1; j < n; ++j) {
103 ir_node *pred_j = get_Block_cfgpred(bl, j);
104 ir_node *cond_j = skip_Proj(pred_j);
106 if (cond_j == cond_i) {
107 ir_graph *irg = get_irn_irg(bl);
108 ir_node *jmp = new_r_Jmp(get_nodes_block(cond_i));
109 set_irn_n(bl, i, jmp);
110 set_irn_n(bl, j, new_r_Bad(irg));
112 DBG_OPT_IFSIM2(cond_i, jmp);
122 /** An environment for merge_blocks and collect nodes. */
123 typedef struct merge_env {
124 int changed; /**< Set if the graph was changed. */
125 int phis_moved; /**< Set if Phi nodes were moved. */
126 plist_t *list; /**< Helper list for all found Switch Conds. */
130 * Removes Tuples from Block control flow predecessors.
131 * Optimizes blocks with equivalent_node(). This is tricky,
132 * as we want to avoid nodes that have as block predecessor Bads.
133 * Therefore we also optimize at control flow operations, depending
134 * how we first reach the Block.
136 static void merge_blocks(ir_node *node, void *ctx)
140 merge_env *env = (merge_env*)ctx;
142 /* clear the link field for ALL nodes first */
143 set_irn_link(node, NULL);
145 if (is_Block(node)) {
147 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
148 ir_node *pred = get_Block_cfgpred(node, i);
149 ir_node *skipped = skip_Tuple(pred);
150 if (pred != skipped) {
151 set_Block_cfgpred(node, i, skipped);
157 new_block = equivalent_node(node);
158 if (new_block != node && ! is_Block_dead(new_block)) {
159 exchange(node, new_block);
163 } else if (get_opt_optimize() && (get_irn_mode(node) == mode_X)) {
164 /* We will soon visit a block. Optimize it before visiting! */
165 ir_node *b = get_nodes_block(skip_Proj(node));
167 if (!is_Block_dead(b)) {
168 new_block = equivalent_node(b);
170 while (!irn_visited(b) && !is_Block_dead(new_block) && new_block != b) {
171 /* We would have to run gigo() if new is bad, so we
172 promote it directly below. Nevertheless, we sometimes reach a block
173 the first time through a dataflow node. In this case we optimized the
174 block as such and have to promote the Bad here. */
175 exchange(b, new_block);
178 new_block = equivalent_node(b);
181 /* normally, we would create a Bad block here, but this must be
182 * prevented, so just set it's cf to Bad.
184 if (is_Block_dead(new_block)) {
185 ir_graph *irg = get_irn_irg(node);
186 exchange(node, new_r_Bad(irg));
194 * Block walker removing control flow from dead block by
195 * inspecting dominance info.
196 * Do not replace blocks by Bad. This optimization shall
197 * ensure, that all Bad control flow predecessors are
198 * removed, and no new other Bads are introduced.
199 * Further removed useless Conds and clear the mark of all blocks.
201 * Must be run in the post walker.
203 static void remove_unreachable_blocks_and_conds(ir_node *block, void *env)
206 int *changed = (int*)env;
208 /* Check block predecessors and turn control flow into bad.
209 Beware of Tuple, kill them. */
210 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
211 ir_node *pred_X = get_Block_cfgpred(block, i);
212 ir_node *skipped = skip_Tuple(pred_X);
214 if (! is_Bad(skipped)) {
215 ir_node *pred_bl = get_nodes_block(skip_Proj(skipped));
217 if (is_Block_dead(pred_bl) || (get_Block_dom_depth(pred_bl) < 0)) {
218 ir_graph *irg = get_irn_irg(block);
219 set_Block_dead(pred_bl);
220 exchange(pred_X, new_r_Bad(irg));
222 } else if (skipped != pred_X) {
223 set_Block_cfgpred(block, i, skipped);
229 *changed |= remove_senseless_conds(block);
231 /* clear the block mark of all non labeled blocks */
232 if (has_Block_entity(block))
233 set_Block_non_removable(block);
235 set_Block_removable(block);
239 * Collects all Phi nodes in link list of Block.
240 * Marks all blocks "non_removable" if they contain a node other
241 * than Jmp (and Proj).
242 * Links all Proj nodes to their predecessors.
243 * Collects all switch-Conds in a list.
245 static void collect_nodes(ir_node *n, void *ctx)
247 unsigned code = get_irn_opcode(n);
248 merge_env *env = (merge_env*)ctx;
250 if (code == iro_Block) {
251 /* mark the block as non-removable if it is labeled */
252 if (has_Block_entity(n))
253 set_Block_non_removable(n);
255 ir_node *b = get_nodes_block(n);
257 if (code == iro_Phi && get_irn_arity(n) > 0) {
258 /* Collect Phi nodes to compact ins along with block's ins. */
259 set_irn_link(n, get_irn_link(b));
261 } else if (code != iro_Jmp && !is_Bad(b)) { /* Check for non-empty block. */
262 set_Block_non_removable(b);
264 if (code == iro_Proj) { /* link Proj nodes */
265 ir_node *pred = get_Proj_pred(n);
267 set_irn_link(n, get_irn_link(pred));
268 set_irn_link(pred, n);
269 } else if (code == iro_Cond) {
270 ir_node *sel = get_Cond_selector(n);
271 if (mode_is_int(get_irn_mode(sel))) {
272 /* found a switch-Cond, collect */
273 plist_insert_back(env->list, n);
280 /** Returns true if pred is predecessor of block. */
281 static int is_pred_of(ir_node *pred, ir_node *b)
285 for (i = get_Block_n_cfgpreds(b) - 1; i >= 0; --i) {
286 ir_node *b_pred = get_Block_cfgpred_block(b, i);
293 /** Test whether we can optimize away pred block pos of b.
295 * @param b A block node.
296 * @param pos The position of the predecessor block to judge about.
298 * @returns The number of predecessors
300 * The test is rather tricky.
302 * The situation is something like the following:
311 * b merges the control flow of an if-then-else. We may not remove
312 * the 'then' _and_ the 'else' block of an 'if' if there is a Phi
313 * node in b, even if both are empty. The destruction of this Phi
314 * requires that a copy is added before the merge. We have to
315 * keep one of the case blocks to place the copies in.
317 * To perform the test for pos, we must regard predecessors before pos
318 * as already removed.
320 static int test_whether_dispensable(ir_node *b, int pos)
322 int i, j, n_preds = 1;
323 ir_node *pred = get_Block_cfgpred_block(b, pos);
325 /* Bad blocks will be optimized away, so we don't need space for them */
326 if (is_Block_dead(pred))
329 if (is_Block_removable(pred)) {
330 /* Seems to be empty. At least we detected this in collect_nodes. */
331 if (get_irn_link(b) == NULL) {
332 /* There are no Phi nodes ==> all predecessors are dispensable. */
333 n_preds = get_Block_n_cfgpreds(pred);
335 /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
336 Handle all pred blocks with preds < pos as if they were already removed. */
337 for (i = 0; i < pos; i++) {
338 ir_node *b_pred = get_Block_cfgpred_block(b, i);
339 if (! is_Block_dead(b_pred) && is_Block_removable(b_pred)) {
340 for (j = get_Block_n_cfgpreds(b_pred) - 1; j >= 0; --j) {
341 ir_node *b_pred_pred = get_Block_cfgpred_block(b_pred, j);
342 if (is_pred_of(b_pred_pred, pred))
343 goto non_dispensable;
346 if (is_pred_of(b_pred, pred))
347 goto non_dispensable;
350 for (i = pos +1; i < get_Block_n_cfgpreds(b); i++) {
351 ir_node *b_pred = get_Block_cfgpred_block(b, i);
352 if (is_pred_of(b_pred, pred))
353 goto non_dispensable;
355 /* if we get here, the block is dispensable */
356 n_preds = get_Block_n_cfgpreds(pred);
363 set_Block_non_removable(pred);
368 * This method removed Bad cf predecessors from Blocks and Phis, and removes
369 * empty blocks. A block is empty if it only contains Phi and Jmp nodes.
371 * We first adapt Phi nodes, then Block nodes, as we need the old ins
372 * of the Block to adapt the Phi nodes. We do this by computing new
373 * in arrays, and then replacing the old ones. So far we compute new in arrays
374 * for all nodes, not regarding whether there is a possibility for optimization.
376 * For each predecessor p of a Block b there are three cases:
377 * -1. The predecessor p is a Bad node: just skip it. The in array of b shrinks by one.
378 * -2. The predecessor p is empty. Remove p. All predecessors of p are now
380 * -3. The predecessor p is a block containing useful code. Just keep p as is.
382 * For Phi nodes f we have to check the conditions at the Block of f.
383 * For cases 1 and 3 we proceed as for Blocks. For case 2 we can have two
385 * -2a: The old predecessor of the Phi f is a Phi pred_f IN THE BLOCK REMOVED. In this
386 * case we proceed as for blocks. We remove pred_f. All
387 * predecessors of pred_f now are predecessors of f.
388 * -2b: The old predecessor of f is NOT in the block removed. It might be a Phi, too.
389 * We have to replicate f for each predecessor of the removed block. Or, with
390 * other words, the removed predecessor block has exactly one predecessor.
392 * Further there is a special case for self referencing blocks:
395 * then_b else_b then_b else_b
401 * | | | === optimized to ===> \ | | |
408 * If there is a Phi in pred_b, but we remove pred_b, we have to generate a
409 * Phi in loop_b, that has the ins of the Phi in pred_b and a self referencing
411 * @@@ It is negotiable whether we should do this ... there might end up a copy
412 * from the Phi in the loop when removing the Phis.
414 static void optimize_blocks(ir_node *b, void *ctx)
416 int i, j, k, n, max_preds, n_preds, p_preds = -1;
417 ir_node *pred, *phi, *next;
419 merge_env *env = (merge_env*)ctx;
421 /* Count the number of predecessor if this block is merged with pred blocks
424 for (i = 0, k = get_Block_n_cfgpreds(b); i < k; ++i) {
425 max_preds += test_whether_dispensable(b, i);
427 in = XMALLOCN(ir_node*, max_preds);
429 /*- Fix the Phi nodes of the current block -*/
430 for (phi = (ir_node*)get_irn_link(b); phi != NULL; phi = (ir_node*)next) {
432 next = (ir_node*)get_irn_link(phi);
434 /* Find the new predecessors for the Phi */
436 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
437 pred = get_Block_cfgpred_block(b, i);
439 if (is_Block_dead(pred)) {
440 /* case Phi 1: Do nothing */
441 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
442 /* case Phi 2: It's an empty block and not yet visited. */
443 ir_node *phi_pred = get_Phi_pred(phi, i);
445 for (j = 0, k = get_Block_n_cfgpreds(pred); j < k; j++) {
446 /* because of breaking loops, not all predecessors are Bad-clean,
447 * so we must check this here again */
448 if (! is_Bad(get_Block_cfgpred(pred, j))) {
449 if (get_nodes_block(phi_pred) == pred) {
451 assert(is_Phi(phi_pred)); /* Block is empty!! */
453 in[p_preds++] = get_Phi_pred(phi_pred, j);
456 in[p_preds++] = phi_pred;
462 in[p_preds++] = get_Phi_pred(phi, i);
465 assert(p_preds <= max_preds);
469 /* By removal of Bad ins the Phi might be degenerated. */
470 exchange(phi, in[0]);
472 set_irn_in(phi, p_preds, in);
476 /*- This happens only if merge between loop backedge and single loop entry.
477 Moreover, it is only needed if predb is the direct dominator of b, else there can be no uses
478 of the Phi's in predb ... -*/
479 for (k = 0, n = get_Block_n_cfgpreds(b); k < n; ++k) {
480 ir_node *predb = get_nodes_block(get_Block_cfgpred(b, k));
482 if (is_Block_removable(predb) && !Block_block_visited(predb)) {
485 /* we found a predecessor block at position k that will be removed */
486 for (phi = (ir_node*)get_irn_link(predb); phi; phi = next_phi) {
488 next_phi = (ir_node*)get_irn_link(phi);
492 if (get_Block_idom(b) != predb) {
493 /* predb is not the dominator. There can't be uses of pred's Phi nodes, kill them .*/
494 ir_graph *irg = get_irn_irg(b);
495 exchange(phi, new_r_Bad(irg));
497 /* predb is the direct dominator of b. There might be uses of the Phi nodes from
498 predb in further block, so move this phi from the predecessor into the block b */
499 set_nodes_block(phi, b);
500 set_irn_link(phi, get_irn_link(b));
501 set_irn_link(b, phi);
504 /* first, copy all 0..k-1 predecessors */
505 for (i = 0; i < k; i++) {
506 pred = get_Block_cfgpred_block(b, i);
508 if (is_Block_dead(pred)) {
510 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
511 /* It's an empty block and not yet visited. */
512 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
513 if (! is_Bad(get_Block_cfgpred(pred, j)))
521 /* now we are at k, copy the phi predecessors */
522 pred = get_nodes_block(get_Block_cfgpred(b, k));
523 for (i = 0; i < get_Phi_n_preds(phi); i++) {
524 if (! is_Bad(get_Block_cfgpred(pred, i)))
525 in[q_preds++] = get_Phi_pred(phi, i);
528 /* and now all the rest */
529 for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
530 pred = get_Block_cfgpred_block(b, i);
532 if (is_Block_dead(pred)) {
534 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
535 /* It's an empty block and not yet visited. */
536 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
537 if (! is_Bad(get_Block_cfgpred(pred, j)))
547 exchange(phi, in[0]);
549 set_irn_in(phi, q_preds, in);
552 assert(q_preds <= max_preds);
553 // assert(p_preds == q_preds && "Wrong Phi Fix");
559 /*- Fix the block -*/
561 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
562 pred = get_Block_cfgpred_block(b, i);
564 if (is_Block_dead(pred)) {
565 /* case 1: Do nothing */
566 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
567 /* case 2: It's an empty block and not yet visited. */
568 assert(get_Block_n_cfgpreds(b) > 1 || has_Block_entity(b));
569 /* Else it should be optimized by equivalent_node. */
570 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
571 ir_node *pred_X = get_Block_cfgpred(pred, j);
573 /* because of breaking loops, not all predecessors are Bad-clean,
574 * so we must check this here again */
575 if (! is_Bad(pred_X))
576 in[n_preds++] = pred_X;
578 /* Remove block as it might be kept alive. */
579 exchange(pred, b/*new_r_Bad(irg)*/);
582 in[n_preds++] = get_Block_cfgpred(b, i);
585 assert(n_preds <= max_preds);
587 set_irn_in(b, n_preds, in);
590 assert(get_irn_link(b) == NULL || p_preds == -1 || (n_preds == p_preds && "Wrong Phi Fix"));
595 * Block walker: optimize all blocks using the default optimizations.
596 * This removes Blocks that with only a Jmp predecessor.
598 static void remove_simple_blocks(ir_node *block, void *ctx)
600 merge_env *env = (merge_env*)ctx;
601 ir_node *new_blk = equivalent_node(block);
603 if (new_blk != block) {
604 exchange(block, new_blk);
610 * Handle pre-optimized table switch Cond's.
611 * During iropt, all Projs from a switch-Cond are already removed except
612 * the defProj and maybe the taken one.
613 * The defProj cannot be removed WITHOUT looking backwards, so we do this here.
615 * @param cond the switch-Cond
617 * @return non-zero if a switch-Cond was optimized
619 * Expects all Proj's linked to the cond node
621 static int handle_switch_cond(ir_node *cond)
623 ir_node *sel = get_Cond_selector(cond);
625 ir_node *proj1 = (ir_node*)get_irn_link(cond);
626 ir_node *proj2 = (ir_node*)get_irn_link(proj1);
629 blk = get_nodes_block(cond);
632 /* this Cond has only one Proj: must be the defProj */
633 assert(get_Cond_default_proj(cond) == get_Proj_proj(proj1));
634 /* convert it into a Jmp */
635 jmp = new_r_Jmp(blk);
636 exchange(proj1, jmp);
638 } else if (get_irn_link(proj2) == NULL) {
639 /* We have two Proj's here. Check if the Cond has
640 a constant argument */
641 ir_tarval *tv = value_of(sel);
643 if (tv != tarval_bad) {
644 /* we have a constant switch */
645 long num = get_tarval_long(tv);
646 long def_num = get_Cond_default_proj(cond);
647 ir_graph *irg = get_irn_irg(cond);
649 if (def_num == get_Proj_proj(proj1)) {
650 /* first one is the defProj */
651 if (num == get_Proj_proj(proj2)) {
652 jmp = new_r_Jmp(blk);
653 exchange(proj2, jmp);
654 exchange(proj1, new_r_Bad(irg));
657 } else if (def_num == get_Proj_proj(proj2)) {
658 /* second one is the defProj */
659 if (num == get_Proj_proj(proj1)) {
660 jmp = new_r_Jmp(blk);
661 exchange(proj1, jmp);
662 exchange(proj2, new_r_Bad(irg));
666 /* neither: strange, Cond was not optimized so far */
667 if (num == get_Proj_proj(proj1)) {
668 jmp = new_r_Jmp(blk);
669 exchange(proj1, jmp);
670 exchange(proj2, new_r_Bad(irg));
672 } else if (num == get_Proj_proj(proj2)) {
673 jmp = new_r_Jmp(blk);
674 exchange(proj2, jmp);
675 exchange(proj1, new_r_Bad(irg));
684 /* Optimizations of the control flow that also require changes of Phi nodes.
686 * This optimization performs two passes over the graph.
688 * The first pass collects all Phi nodes in a link list in the block
689 * nodes. Further it performs simple control flow optimizations.
690 * Finally it marks all blocks that do not contain useful
691 * computations, i.e., these blocks might be removed.
693 * The second pass performs the optimizations intended by this algorithm.
694 * It walks only over block nodes and adapts these and the Phi nodes in these blocks,
695 * which it finds in a linked list computed by the first pass.
697 * We use the mark flag to mark removable blocks in the first
700 void optimize_cf(ir_graph *irg)
702 int i, j, n, changed;
704 ir_node *cond, *end = get_irg_end(irg);
708 assert(get_irg_phase_state(irg) != phase_building);
710 /* if the graph is not pinned, we cannot determine empty blocks */
711 assert(get_irg_pinned(irg) != op_pin_state_floats &&
712 "Control flow optimization need a pinned graph");
714 /* FIXME: control flow opt destroys block edges. So edges are deactivated here. Fix the edges! */
715 edges_deactivate(irg);
717 /* we use the mark flag to mark removable blocks */
718 ir_reserve_resources(irg, IR_RESOURCE_BLOCK_MARK);
723 /* ALWAYS kill unreachable control flow. Backend cannot handle it anyway.
724 Use dominator info to kill blocks. Also optimize useless Conds. */
726 irg_block_walk_graph(irg, NULL, remove_unreachable_blocks_and_conds, &env.changed);
728 /* fix the keep-alives */
730 for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
731 ir_node *ka = get_End_keepalive(end, i);
734 /* do NOT keep dead blocks */
735 if (is_Block_dead(ka) || get_Block_dom_depth(ka) < 0) {
736 set_End_keepalive(end, i, new_r_Bad(irg));
740 ir_node *block = get_nodes_block(ka);
742 if (is_Bad(block) || is_Block_dead(block) || get_Block_dom_depth(block) < 0) {
743 /* do NOT keep nodes in dead blocks */
744 set_End_keepalive(end, i, new_r_Bad(irg));
749 env.changed |= changed;
751 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
753 env.list = plist_new();
754 irg_walk(end, merge_blocks, collect_nodes, &env);
756 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
759 /* Handle graph state if was changed. */
760 set_irg_outs_inconsistent(irg);
761 set_irg_doms_inconsistent(irg);
762 set_irg_extblk_inconsistent(irg);
763 set_irg_loopinfo_inconsistent(irg);
764 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
768 /* handle all collected switch-Conds */
769 foreach_plist(env.list, el) {
770 cond = (ir_node*)plist_element_get_value(el);
771 env.changed |= handle_switch_cond(cond);
773 plist_free(env.list);
776 /* The Cond optimization might generate unreachable code, so restart if
781 /* Optimize the standard code. */
784 irg_block_walk_graph(irg, optimize_blocks, remove_simple_blocks, &env);
786 /* in rare cases a node may be kept alive more than once, use the visited flag to detect this */
787 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
788 inc_irg_visited(irg);
790 /* fix the keep-alives again */
792 for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
793 ir_node *ka = get_End_keepalive(end, i);
796 /* do NOT keep dead blocks */
797 if (is_Block_dead(ka) || get_Block_dom_depth(ka) < 0) {
798 set_End_keepalive(end, i, new_r_Bad(irg));
802 ir_node *block = get_nodes_block(ka);
804 if (is_Bad(block) || is_Block_dead(block) || get_Block_dom_depth(block) < 0) {
805 /* do NOT keep nodes in dead blocks */
806 set_End_keepalive(end, i, new_r_Bad(irg));
811 env.changed |= changed;
813 remove_End_Bads_and_doublets(end);
816 ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_VISITED);
818 if (env.phis_moved) {
819 /* Bad: when we moved Phi's, we might produce dead Phi nodes
821 Some other phases cannot copy with this, so will them.
823 n = get_End_n_keepalives(end);
825 NEW_ARR_A(ir_node *, in, n);
827 /* Handle graph state if was changed. */
828 set_irg_outs_inconsistent(irg);
830 assure_irg_outs(irg);
832 for (i = j = 0; i < n; ++i) {
833 ir_node *ka = get_End_keepalive(end, i);
838 for (k = get_irn_n_outs(ka) - 1; k >= 0; --k) {
839 ir_node *user = get_irn_out(ka, k);
841 if (user != ka && user != end) {
842 /* Is it a real user or just a self loop ? */
852 set_End_keepalives(end, j, in);
859 /* Handle graph state if was changed. */
860 set_irg_outs_inconsistent(irg);
861 set_irg_doms_inconsistent(irg);
862 set_irg_extblk_inconsistent(irg);
863 set_irg_loopinfo_inconsistent(irg);
864 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
868 /* the verifier doesn't work yet with floating nodes */
869 if (get_irg_pinned(irg) == op_pin_state_pinned) {
870 /* after optimize_cf(), only Bad data flow may remain. */
871 if (irg_verify_bads(irg, BAD_DF | BAD_BLOCK | TUPLE)) {
872 dump_ir_graph(irg, "-verify-cf");
873 fprintf(stderr, "VERIFY_BAD in optimize_cf()\n");
878 /* Creates an ir_graph pass for optimize_cf. */
879 ir_graph_pass_t *optimize_cf_pass(const char *name)
881 return def_graph_pass(name ? name : "optimize_cf", optimize_cf);
882 } /* optimize_cf_pass */