2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Control flow optimizations.
23 * @author Goetz Lindenmaier, Michael Beck, Sebastian Hack
26 * Removes Bad control flow predecessors and empty blocks. A block is empty
27 * if it contains only a Jmp node. Blocks can only be removed if they are not
28 * needed for the semantics of Phi nodes. Further, we NEVER remove labeled
29 * blocks (even if we could move the label).
33 #include "iroptimize.h"
40 #include "irgraph_t.h"
54 #include "irbackedge_t.h"
59 #include "irphase_t.h"
61 #include "iropt_dbg.h"
63 /** An environment for merge_blocks and collect nodes. */
64 typedef struct merge_env {
65 bool changed; /**< Set if the graph was changed. */
66 bool phis_moved; /**< Set if Phi nodes were moved. */
69 /** set or reset the removable property of a block. */
70 static void set_Block_removable(ir_node *block, bool removable)
72 set_Block_mark(block, removable);
75 /** check if a block has the removable property set. */
76 static bool is_Block_removable(ir_node *block)
78 return get_Block_mark(block);
81 /** checks if a given Cond node is a switch Cond. */
82 static bool is_switch_Cond(ir_node *cond) {
83 ir_node *sel = get_Cond_selector(cond);
84 return get_irn_mode(sel) != mode_b;
87 /** Walker: clear link fields and mark all blocks as removable. */
88 static void clear_link_and_mark_blocks_removable(ir_node *node, void *ctx)
91 set_irn_link(node, NULL);
93 set_Block_removable(node, true);
97 * Collects all Phi nodes in link list of Block.
98 * Marks all blocks "non_removable" if they contain a node other
99 * than Jmp (and Proj).
100 * Links all Proj nodes to their predecessors.
101 * Collects all switch-Conds in a list.
103 static void collect_nodes(ir_node *n, void *ctx)
105 ir_node ***switch_conds = (ir_node***)ctx;
108 /* Collect Phi nodes to compact ins along with block's ins. */
109 ir_node *block = get_nodes_block(n);
110 set_irn_link(n, get_irn_link(block));
111 set_irn_link(block, n);
112 } else if (is_Block(n)) {
113 if (has_Block_entity(n)) {
114 /* block with a jump label attached cannot be removed. */
115 set_Block_removable(n, false);
118 } else if (!is_Jmp(n)) { /* Check for non-empty block. */
119 ir_node *block = get_nodes_block(n);
120 set_Block_removable(block, false);
123 /* link Proj nodes */
124 ir_node *pred = get_Proj_pred(n);
125 set_irn_link(n, get_irn_link(pred));
126 set_irn_link(pred, n);
127 } else if (is_Cond(n) && is_switch_Cond(n)) {
128 /* found a switch-Cond, collect */
129 ARR_APP1(ir_node*, *switch_conds, n);
134 /** Returns true if pred is predecessor of block b. */
135 static bool is_pred_of(ir_node *pred, ir_node *b)
139 for (i = get_Block_n_cfgpreds(b) - 1; i >= 0; --i) {
140 ir_node *b_pred = get_Block_cfgpred_block(b, i);
147 /** Test whether we can optimize away pred block pos of b.
149 * @param b A block node.
150 * @param pos The position of the predecessor block to judge about.
152 * @returns The number of predecessors
154 * The test is rather tricky.
156 * The situation is something like the following:
165 * b merges the control flow of an if-then-else. We may not remove
166 * the 'then' _and_ the 'else' block of an 'if' if there is a Phi
167 * node in b, even if both are empty. The destruction of this Phi
168 * requires that a copy is added before the merge. We have to
169 * keep one of the case blocks to place the copies in.
171 * To perform the test for pos, we must regard predecessors before pos
172 * as already removed.
174 static unsigned test_whether_dispensable(ir_node *b, int pos)
176 ir_node *pred = get_Block_cfgpred(b, pos);
177 ir_node *predb = get_nodes_block(pred);
179 if (is_Bad(pred) || !is_Block_removable(predb))
182 /* can't remove self-loops */
184 goto non_dispensable;
185 if (is_unknown_jump(pred))
186 goto non_dispensable;
188 /* Seems to be empty. At least we detected this in collect_nodes. */
189 if (get_irn_link(b) != NULL) {
190 int n_cfgpreds = get_Block_n_cfgpreds(b);
192 /* there are Phi nodes */
194 /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
195 * Handle all pred blocks with preds < pos as if they were already
197 for (i = 0; i < pos; i++) {
198 ir_node *other_pred = get_Block_cfgpred(b, i);
199 ir_node *other_predb = get_nodes_block(other_pred);
200 if (is_Bad(other_pred))
202 if (is_Block_removable(other_predb)
203 && !Block_block_visited(other_predb)) {
205 for (j = get_Block_n_cfgpreds(other_predb) - 1; j >= 0; --j) {
206 ir_node *other_predpred
207 = get_Block_cfgpred_block(other_predb, j);
208 if (is_pred_of(other_predpred, predb))
209 goto non_dispensable;
211 } else if (is_pred_of(other_predb, predb)) {
212 goto non_dispensable;
215 for (i = pos+1; i < n_cfgpreds; i++) {
216 ir_node *other_predb = get_Block_cfgpred_block(b, i);
217 if (is_pred_of(other_predb, predb))
218 goto non_dispensable;
221 /* we will not dispense already visited blocks */
222 if (Block_block_visited(predb))
224 /* if we get here, the block is dispensable, count useful preds */
225 return get_irn_arity(predb);
228 set_Block_removable(predb, false);
233 * This method removes empty blocks. A block is empty if it only contains Phi
236 * We first adapt Phi nodes, then Block nodes, as we need the old ins
237 * of the Block to adapt the Phi nodes. We do this by computing new
238 * in arrays, and then replacing the old ones. So far we compute new in arrays
239 * for all nodes, not regarding whether there is a possibility for optimization.
241 * For each predecessor p of a Block b there are three cases:
242 * - The predecessor p is a Bad node: just skip it. The in array of b shrinks
244 * - The predecessor p is empty. Remove p. All predecessors of p are now
246 * - The predecessor p is a block containing useful code. Just keep p as is.
248 * For Phi nodes f we have to check the conditions at the Block of f.
249 * For cases 1 and 3 we proceed as for Blocks. For case 2 we can have two
251 * -2a: The old predecessor of the Phi f is a Phi pred_f IN THE BLOCK REMOVED.
252 * In this case we proceed as for blocks. We remove pred_f. All
253 * predecessors of pred_f now are predecessors of f.
254 * -2b: The old predecessor of f is NOT in the block removed. It might be a Phi
255 * too. We have to replicate f for each predecessor of the removed block.
256 * Or, with other words, the removed predecessor block has exactly one
259 * Further there is a special case for self referencing blocks:
262 * then_b else_b then_b else_b
268 * | | | === optimized to ===> \ | | |
275 * If there is a Phi in pred_b, but we remove pred_b, we have to generate a
276 * Phi in loop_b, that has the ins of the Phi in pred_b and a self referencing
279 static void optimize_blocks(ir_node *b, void *ctx)
281 int i, j, k, n, max_preds, n_preds, p_preds = -1;
282 ir_node *pred, *phi, *next;
284 merge_env *env = (merge_env*)ctx;
286 if (get_Block_dom_depth(b) < 0) {
287 /* ignore unreachable blocks */
291 /* Count the number of predecessor if this block is merged with pred blocks
294 for (i = 0, k = get_Block_n_cfgpreds(b); i < k; ++i) {
295 max_preds += test_whether_dispensable(b, i);
297 in = XMALLOCN(ir_node*, max_preds);
299 /*- Fix the Phi nodes of the current block -*/
300 for (phi = (ir_node*)get_irn_link(b); phi != NULL; phi = (ir_node*)next) {
302 next = (ir_node*)get_irn_link(phi);
304 /* Find the new predecessors for the Phi */
306 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
307 ir_graph *irg = get_irn_irg(b);
308 pred = get_Block_cfgpred_block(b, i);
311 /* case Phi 1: maintain Bads, as somebody else is responsible to remove them */
312 in[p_preds++] = new_r_Bad(irg, get_irn_mode(phi));
313 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
314 /* case Phi 2: It's an empty block and not yet visited. */
315 ir_node *phi_pred = get_Phi_pred(phi, i);
317 for (j = 0, k = get_Block_n_cfgpreds(pred); j < k; j++) {
318 ir_node *pred_pred = get_Block_cfgpred(pred, j);
320 if (is_Bad(pred_pred)) {
321 in[p_preds++] = new_r_Bad(irg, get_irn_mode(phi));
325 if (get_nodes_block(phi_pred) == pred) {
327 assert(is_Phi(phi_pred)); /* Block is empty!! */
329 in[p_preds++] = get_Phi_pred(phi_pred, j);
332 in[p_preds++] = phi_pred;
337 in[p_preds++] = get_Phi_pred(phi, i);
340 assert(p_preds == max_preds);
344 exchange(phi, in[0]);
346 set_irn_in(phi, p_preds, in);
350 /*- This happens only if merge between loop backedge and single loop entry.
351 Moreover, it is only needed if predb is the direct dominator of b,
352 else there can be no uses of the Phi's in predb ... -*/
353 for (k = 0, n = get_Block_n_cfgpreds(b); k < n; ++k) {
354 ir_node *pred = get_Block_cfgpred(b, k);
355 ir_node *predb = get_nodes_block(pred);
359 if (is_Block_removable(predb) && !Block_block_visited(predb)) {
362 /* we found a predecessor block at position k that will be removed */
363 for (phi = (ir_node*)get_irn_link(predb); phi; phi = next_phi) {
365 next_phi = (ir_node*)get_irn_link(phi);
369 if (get_Block_idom(b) != predb) {
370 /* predb is not the dominator. There can't be uses of pred's Phi nodes, kill them .*/
371 ir_graph *irg = get_irn_irg(b);
372 ir_mode *mode = get_irn_mode(phi);
373 exchange(phi, new_r_Bad(irg, mode));
375 /* predb is the direct dominator of b. There might be uses of the Phi nodes from
376 predb in further block, so move this phi from the predecessor into the block b */
377 set_nodes_block(phi, b);
378 set_irn_link(phi, get_irn_link(b));
379 set_irn_link(b, phi);
380 env->phis_moved = true;
382 /* first, copy all 0..k-1 predecessors */
383 for (i = 0; i < k; i++) {
384 pred = get_Block_cfgpred_block(b, i);
387 ir_graph *irg = get_irn_irg(b);
388 ir_mode *mode = get_irn_mode(phi);
389 in[q_preds++] = new_r_Bad(irg, mode);
390 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
391 /* It's an empty block and not yet visited. */
392 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
393 if (! is_Bad(get_Block_cfgpred(pred, j))) {
396 ir_graph *irg = get_irn_irg(b);
397 ir_mode *mode = get_irn_mode(phi);
398 in[q_preds++] = new_r_Bad(irg, mode);
406 /* now we are at k, copy the phi predecessors */
407 pred = get_nodes_block(get_Block_cfgpred(b, k));
408 for (i = 0; i < get_Phi_n_preds(phi); i++) {
409 in[q_preds++] = get_Phi_pred(phi, i);
412 /* and now all the rest */
413 for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
414 pred = get_Block_cfgpred_block(b, i);
417 ir_graph *irg = get_irn_irg(b);
418 ir_mode *mode = get_irn_mode(phi);
419 in[q_preds++] = new_r_Bad(irg, mode);
420 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
421 /* It's an empty block and not yet visited. */
422 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
423 if (! is_Bad(get_Block_cfgpred(pred, j))) {
426 ir_graph *irg = get_irn_irg(b);
427 ir_mode *mode = get_irn_mode(phi);
428 in[q_preds++] = new_r_Bad(irg, mode);
438 exchange(phi, in[0]);
440 set_irn_in(phi, q_preds, in);
443 assert(q_preds <= max_preds);
444 // assert(p_preds == q_preds && "Wrong Phi Fix");
450 /*- Fix the block -*/
452 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
453 ir_node *pred = get_Block_cfgpred(b, i);
454 ir_node *predb = get_nodes_block(pred);
455 ir_graph *irg = get_irn_irg(pred);
457 /* case 1: Bad predecessor */
459 in[n_preds++] = new_r_Bad(irg, mode_X);
462 if (is_Block_removable(predb) && !Block_block_visited(predb)) {
463 /* case 2: It's an empty block and not yet visited. */
464 for (j = 0; j < get_Block_n_cfgpreds(predb); j++) {
465 ir_node *predpred = get_Block_cfgpred(predb, j);
467 if (is_Bad(predpred)) {
468 in[n_preds++] = new_r_Bad(irg, mode_X);
472 in[n_preds++] = predpred;
474 /* Remove block+jump as it might be kept alive. */
475 exchange(pred, new_r_Bad(get_irn_irg(b), mode_X));
476 exchange(predb, new_r_Bad(get_irn_irg(b), mode_BB));
479 in[n_preds++] = pred;
482 assert(n_preds == max_preds);
484 set_irn_in(b, n_preds, in);
487 /* see if phi-fix was correct */
488 assert(get_irn_link(b) == NULL || p_preds == -1 || (n_preds == p_preds));
493 * Optimize table-switch Conds.
495 * @param cond the switch-Cond
496 * @return true if the switch-Cond was optimized
498 static bool handle_switch_cond(ir_node *cond)
500 ir_node *sel = get_Cond_selector(cond);
501 ir_node *proj1 = (ir_node*)get_irn_link(cond);
502 ir_node *proj2 = (ir_node*)get_irn_link(proj1);
503 ir_node *blk = get_nodes_block(cond);
505 /* exactly 1 Proj on the Cond node: must be the defaultProj */
507 ir_node *jmp = new_r_Jmp(blk);
508 assert(get_Cond_default_proj(cond) == get_Proj_proj(proj1));
509 /* convert it into a Jmp */
510 exchange(proj1, jmp);
514 /* handle Cond nodes with constant argument. In this case the localopt rules
515 * should have killed all obviously impossible cases.
516 * So the only case left to handle here is 1 defaultProj + 1 case
517 * (this one case should be the one taken) */
518 if (get_irn_link(proj2) == NULL) {
519 ir_tarval *tv = value_of(sel);
521 if (tv != tarval_bad) {
522 /* we have a constant switch */
523 long num = get_tarval_long(tv);
524 long def_num = get_Cond_default_proj(cond);
525 ir_graph *irg = get_irn_irg(cond);
526 ir_node *bad = new_r_Bad(irg, mode_X);
528 if (def_num == get_Proj_proj(proj1)) {
529 /* first one is the defProj */
530 if (num == get_Proj_proj(proj2)) {
531 ir_node *jmp = new_r_Jmp(blk);
532 exchange(proj2, jmp);
533 exchange(proj1, bad);
536 } else if (def_num == get_Proj_proj(proj2)) {
537 /* second one is the defProj */
538 if (num == get_Proj_proj(proj1)) {
539 ir_node *jmp = new_r_Jmp(blk);
540 exchange(proj1, jmp);
541 exchange(proj2, bad);
545 /* neither: strange, Cond was not optimized so far */
546 if (num == get_Proj_proj(proj1)) {
547 ir_node *jmp = new_r_Jmp(blk);
548 exchange(proj1, jmp);
549 exchange(proj2, bad);
551 } else if (num == get_Proj_proj(proj2)) {
552 ir_node *jmp = new_r_Jmp(blk);
553 exchange(proj2, jmp);
554 exchange(proj1, bad);
564 * Optimize boolean Conds, where true and false jump to the same block into a Jmp
565 * Block must contain no Phi nodes.
569 * projA projB => Jmp Bad
573 static bool optimize_pred_cond(ir_node *block, int i, int j)
575 ir_node *projA, *projB, *cond, *pred_block, *jmp, *bad;
578 projA = get_Block_cfgpred(block, i);
579 if (!is_Proj(projA)) return false;
580 projB = get_Block_cfgpred(block, j);
581 if (!is_Proj(projB)) return false;
582 cond = get_Proj_pred(projA);
583 if (!is_Cond(cond)) return false;
585 if (cond != get_Proj_pred(projB)) return false;
586 if (is_switch_Cond(cond)) return false;
588 /* cond should actually be a Jmp */
589 pred_block = get_nodes_block(cond);
590 jmp = new_r_Jmp(pred_block);
591 bad = new_r_Bad(get_irn_irg(block), mode_X);
593 assert(projA != projB);
594 exchange(projA, jmp);
595 exchange(projB, bad);
599 typedef enum block_flags_t {
600 BF_HAS_OPERATIONS = 1 << 0,
601 BF_HAS_PHIS = 1 << 1,
602 BF_IS_UNKNOWN_JUMP_TARGET = 1 << 2,
605 static bool get_phase_flag(ir_phase *block_info, ir_node *block, int flag) {
606 return ((int)phase_get_irn_data(block_info, block)) & flag;
608 static void set_phase_flag(ir_phase *block_info, ir_node *block, block_flags_t flag) {
609 int data = (int)phase_get_irn_data(block_info, block);
611 phase_set_irn_data(block_info, block, (void*)data);
614 static bool has_operations(ir_phase *block_info, ir_node *block) {
615 return get_phase_flag(block_info, block, BF_HAS_OPERATIONS);
617 static void set_has_operations(ir_phase *block_info, ir_node *block) {
618 set_phase_flag(block_info, block, BF_HAS_OPERATIONS);
621 static bool has_phis(ir_phase *block_info, ir_node *block) {
622 return get_phase_flag(block_info, block, BF_HAS_PHIS);
624 static void set_has_phis(ir_phase *block_info, ir_node *block) {
625 set_phase_flag(block_info, block, BF_HAS_PHIS);
628 static bool is_unknown_jump_target(ir_phase *block_info, ir_node *block) {
629 return get_phase_flag(block_info, block, BF_IS_UNKNOWN_JUMP_TARGET);
631 static void set_is_unknown_jump_target(ir_phase *block_info, ir_node *block) {
632 set_phase_flag(block_info, block, BF_IS_UNKNOWN_JUMP_TARGET);
636 * Walker: fill block info information.
638 static void compute_block_info(ir_node *n, void *x)
640 ir_phase *block_info = (ir_phase *)x;
643 int i, max = get_Block_n_cfgpreds(n);
644 for (i=0; i<max; i++) {
645 ir_node *pred = get_Block_cfgpred(n,i);
646 if (is_unknown_jump(pred)) {
647 set_is_unknown_jump_target(block_info, n);
650 } else if (is_Phi(n)) {
651 ir_node *block = get_nodes_block(n);
652 set_has_phis(block_info, block);
653 } else if (is_Jmp(n) || is_Cond(n) || is_Cmp(n) || is_Proj(n)) {
656 ir_node *block = get_nodes_block(n);
657 set_has_operations(block_info, block);
661 typedef struct skip_env {
667 * Post-Block-walker: Optimize useless if's (boolean Cond nodes
668 * with same true/false target)
671 static void optimize_ifs(ir_node *block, void *x)
673 skip_env *env = (skip_env*)x;
675 int n_preds = get_Block_n_cfgpreds(block);
677 if (has_phis(env->phase, block))
680 /* optimize Cond predecessors (might produce Bad predecessors) */
681 for (i = 0; i < n_preds; ++i) {
682 for (j = i+1; j < n_preds; ++j) {
683 optimize_pred_cond(block, i, j);
689 * Pre-Block walker: remove empty blocks that are
690 * predecessors of the current block.
692 static void remove_empty_blocks(ir_node *block, void *x)
694 skip_env *env = (skip_env*)x;
696 int n_preds = get_Block_n_cfgpreds(block);
698 for (i = 0; i < n_preds; ++i) {
699 ir_node *jmp, *jmp_block, *pred, *pred_block;
701 jmp = get_Block_cfgpred(block, i);
704 jmp_block = get_nodes_block(jmp);
705 if (is_unknown_jump_target(env->phase, jmp_block))
707 if (has_operations(env->phase,jmp_block))
709 /* jmp_block is an empty block! */
711 if (get_Block_n_cfgpreds(jmp_block) != 1)
713 pred = get_Block_cfgpred(jmp_block, 0);
717 /* cleanup: jmp_block might have a Keep edge! */
718 pred_block = get_nodes_block(pred);
719 exchange(jmp_block, pred_block);
724 * Some cfg optimizations, which do not touch Phi nodes
726 static void cfgopt_ignoring_phis(ir_graph *irg) {
727 ir_phase *block_info = new_phase(irg, NULL);
728 skip_env env = { false, block_info };
730 irg_walk_graph(irg, compute_block_info, NULL, block_info);
735 /* optimize useless ifs: will not touch empty blocks */
736 irg_block_walk_graph(irg, NULL, optimize_ifs, &env);
738 /* Remove empty blocks */
739 irg_block_walk_graph(irg, remove_empty_blocks, NULL, &env);
741 set_irg_doms_inconsistent(irg);
742 /* Removing blocks might enable more useless-if optimizations */
749 phase_free(block_info);
752 /* Optimizations of the control flow that also require changes of Phi nodes. */
753 void optimize_cf(ir_graph *irg)
757 ir_node *end = get_irg_end(irg);
762 env.phis_moved = false;
764 assert(get_irg_phase_state(irg) != phase_building);
766 /* if the graph is not pinned, we cannot determine empty blocks */
767 assert(get_irg_pinned(irg) != op_pin_state_floats &&
768 "Control flow optimization need a pinned graph");
770 /* FIXME: control flow opt destroys block edges. So edges are deactivated
771 * here. Fix the edges! */
772 edges_deactivate(irg);
774 cfgopt_ignoring_phis(irg);
776 /* we use the mark flag to mark removable blocks */
777 ir_reserve_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_LINK);
779 /* The switch Cond optimization might expose unreachable code, so we loop */
782 ir_node **switch_conds = NULL;
783 bool changed = false;
788 * This pass collects all Phi nodes in a link list in the block
789 * nodes. Further it performs simple control flow optimizations.
790 * Finally it marks all blocks that do not contain useful
791 * computations, i.e., these blocks might be removed.
793 switch_conds = NEW_ARR_F(ir_node*, 0);
794 irg_walk(end, clear_link_and_mark_blocks_removable, collect_nodes, &switch_conds);
796 /* handle all collected switch-Conds */
797 length = ARR_LEN(switch_conds);
798 for (i = 0; i < length; ++i) {
799 ir_node *cond = switch_conds[i];
800 changed |= handle_switch_cond(cond);
802 DEL_ARR_F(switch_conds);
807 set_irg_doms_inconsistent(irg);
808 set_irg_extblk_inconsistent(irg);
809 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
812 /* assert due to collect_nodes:
813 * 1. removable blocks are now marked as such
814 * 2. phi lists are up to date
817 /* Optimize the standard code.
818 * It walks only over block nodes and adapts these and the Phi nodes in these
819 * blocks, which it finds in a linked list computed before.
822 irg_block_walk_graph(irg, optimize_blocks, NULL, &env);
824 new_end = optimize_in_place(end);
825 if (new_end != end) {
826 set_irg_end(irg, new_end);
829 remove_End_Bads_and_doublets(end);
831 ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_LINK);
833 if (env.phis_moved) {
834 /* Bad: when we moved Phi's, we might produce dead Phi nodes
836 Some other phases cannot copy with this, so kill them.
838 n = get_End_n_keepalives(end);
840 NEW_ARR_A(ir_node *, in, n);
841 assure_irg_outs(irg);
843 for (i = j = 0; i < n; ++i) {
844 ir_node *ka = get_End_keepalive(end, i);
849 for (k = get_irn_n_outs(ka) - 1; k >= 0; --k) {
850 ir_node *user = get_irn_out(ka, k);
852 if (user != ka && user != end) {
853 /* Is it a real user or just a self loop ? */
863 set_End_keepalives(end, j, in);
870 /* Handle graph state if was changed. */
871 set_irg_doms_inconsistent(irg);
872 set_irg_extblk_inconsistent(irg);
873 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
877 /* Creates an ir_graph pass for optimize_cf. */
878 ir_graph_pass_t *optimize_cf_pass(const char *name)
880 return def_graph_pass(name ? name : "optimize_cf", optimize_cf);