2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Control flow optimizations.
23 * @author Goetz Lindenmaier, Michael Beck, Sebastian Hack
26 * Removes Bad control flow predecessors and empty blocks. A block is empty
27 * if it contains only a Jmp node. Blocks can only be removed if they are not
28 * needed for the semantics of Phi nodes. Further, we NEVER remove labeled
29 * blocks (even if we could move the label).
33 #include "iroptimize.h"
40 #include "irgraph_t.h"
54 #include "irbackedge_t.h"
59 #include "irphase_t.h"
61 #include "iropt_dbg.h"
63 /** An environment for merge_blocks and collect nodes. */
64 typedef struct merge_env {
65 bool changed; /**< Set if the graph was changed. */
66 bool phis_moved; /**< Set if Phi nodes were moved. */
69 /** set or reset the removable property of a block. */
70 static void set_Block_removable(ir_node *block, bool removable)
72 set_Block_mark(block, removable);
75 /** check if a block has the removable property set. */
76 static bool is_Block_removable(ir_node *block)
78 return get_Block_mark(block);
81 /** checks if a given Cond node is a switch Cond. */
82 static bool is_switch_Cond(ir_node *cond) {
83 ir_node *sel = get_Cond_selector(cond);
84 return get_irn_mode(sel) != mode_b;
87 /** Walker: clear link fields and mark all blocks as removable. */
88 static void clear_link_and_mark_blocks_removable(ir_node *node, void *ctx)
91 set_irn_link(node, NULL);
93 set_Block_removable(node, true);
97 * Collects all Phi nodes in link list of Block.
98 * Marks all blocks "non_removable" if they contain a node other
99 * than Jmp (and Proj).
100 * Links all Proj nodes to their predecessors.
101 * Collects all switch-Conds in a list.
103 static void collect_nodes(ir_node *n, void *ctx)
105 ir_node ***switch_conds = (ir_node***)ctx;
108 /* Collect Phi nodes to compact ins along with block's ins. */
109 ir_node *block = get_nodes_block(n);
110 set_irn_link(n, get_irn_link(block));
111 set_irn_link(block, n);
112 } else if (is_Block(n)) {
113 if (has_Block_entity(n)) {
114 /* block with a jump label attached cannot be removed. */
115 set_Block_removable(n, false);
118 } else if (!is_Jmp(n)) { /* Check for non-empty block. */
119 ir_node *block = get_nodes_block(n);
120 set_Block_removable(block, false);
123 /* link Proj nodes */
124 ir_node *pred = get_Proj_pred(n);
125 set_irn_link(n, get_irn_link(pred));
126 set_irn_link(pred, n);
127 } else if (is_Cond(n) && is_switch_Cond(n)) {
128 /* found a switch-Cond, collect */
129 ARR_APP1(ir_node*, *switch_conds, n);
134 /** Returns true if pred is predecessor of block b. */
135 static bool is_pred_of(ir_node *pred, ir_node *b)
139 for (i = get_Block_n_cfgpreds(b) - 1; i >= 0; --i) {
140 ir_node *b_pred = get_Block_cfgpred_block(b, i);
147 /** Test whether we can optimize away pred block pos of b.
149 * @param b A block node.
150 * @param pos The position of the predecessor block to judge about.
152 * @returns The number of predecessors
154 * The test is rather tricky.
156 * The situation is something like the following:
165 * b merges the control flow of an if-then-else. We may not remove
166 * the 'then' _and_ the 'else' block of an 'if' if there is a Phi
167 * node in b, even if both are empty. The destruction of this Phi
168 * requires that a copy is added before the merge. We have to
169 * keep one of the case blocks to place the copies in.
171 * To perform the test for pos, we must regard predecessors before pos
172 * as already removed.
174 static unsigned test_whether_dispensable(ir_node *b, int pos)
176 ir_node *pred = get_Block_cfgpred(b, pos);
177 ir_node *predb = get_nodes_block(pred);
179 if (is_Bad(pred) || !is_Block_removable(predb))
182 /* can't remove self-loops */
184 goto non_dispensable;
185 if (is_unknown_jump(pred))
186 goto non_dispensable;
188 /* Seems to be empty. At least we detected this in collect_nodes. */
189 if (get_irn_link(b) != NULL) {
190 int n_cfgpreds = get_Block_n_cfgpreds(b);
192 /* there are Phi nodes */
194 /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
195 * Handle all pred blocks with preds < pos as if they were already
197 for (i = 0; i < pos; i++) {
198 ir_node *other_pred = get_Block_cfgpred(b, i);
199 ir_node *other_predb = get_nodes_block(other_pred);
200 if (is_Bad(other_pred))
202 if (is_Block_removable(other_predb)
203 && !Block_block_visited(other_predb)) {
205 for (j = get_Block_n_cfgpreds(other_predb) - 1; j >= 0; --j) {
206 ir_node *other_predpred
207 = get_Block_cfgpred_block(other_predb, j);
208 if (is_pred_of(other_predpred, predb))
209 goto non_dispensable;
211 } else if (is_pred_of(other_predb, predb)) {
212 goto non_dispensable;
215 for (i = pos+1; i < n_cfgpreds; i++) {
216 ir_node *other_predb = get_Block_cfgpred_block(b, i);
217 if (is_pred_of(other_predb, predb))
218 goto non_dispensable;
221 /* we will not dispense already visited blocks */
222 if (Block_block_visited(predb))
224 /* if we get here, the block is dispensable, count useful preds */
225 return get_irn_arity(predb);
228 set_Block_removable(predb, false);
233 * This method removes empty blocks. A block is empty if it only contains Phi
236 * We first adapt Phi nodes, then Block nodes, as we need the old ins
237 * of the Block to adapt the Phi nodes. We do this by computing new
238 * in arrays, and then replacing the old ones. So far we compute new in arrays
239 * for all nodes, not regarding whether there is a possibility for optimization.
241 * For each predecessor p of a Block b there are three cases:
242 * - The predecessor p is a Bad node: just skip it. The in array of b shrinks
244 * - The predecessor p is empty. Remove p. All predecessors of p are now
246 * - The predecessor p is a block containing useful code. Just keep p as is.
248 * For Phi nodes f we have to check the conditions at the Block of f.
249 * For cases 1 and 3 we proceed as for Blocks. For case 2 we can have two
251 * -2a: The old predecessor of the Phi f is a Phi pred_f IN THE BLOCK REMOVED.
252 * In this case we proceed as for blocks. We remove pred_f. All
253 * predecessors of pred_f now are predecessors of f.
254 * -2b: The old predecessor of f is NOT in the block removed. It might be a Phi
255 * too. We have to replicate f for each predecessor of the removed block.
256 * Or, with other words, the removed predecessor block has exactly one
259 * Further there is a special case for self referencing blocks:
262 * then_b else_b then_b else_b
268 * | | | === optimized to ===> \ | | |
275 * If there is a Phi in pred_b, but we remove pred_b, we have to generate a
276 * Phi in loop_b, that has the ins of the Phi in pred_b and a self referencing
279 static void optimize_blocks(ir_node *b, void *ctx)
281 int i, j, k, n, max_preds, n_preds, p_preds = -1;
282 ir_node *pred, *phi, *next;
284 merge_env *env = (merge_env*)ctx;
286 /* Count the number of predecessor if this block is merged with pred blocks
289 for (i = 0, k = get_Block_n_cfgpreds(b); i < k; ++i) {
290 max_preds += test_whether_dispensable(b, i);
292 in = XMALLOCN(ir_node*, max_preds);
294 /*- Fix the Phi nodes of the current block -*/
295 for (phi = (ir_node*)get_irn_link(b); phi != NULL; phi = (ir_node*)next) {
297 next = (ir_node*)get_irn_link(phi);
299 /* Find the new predecessors for the Phi */
301 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
302 ir_graph *irg = get_irn_irg(b);
303 pred = get_Block_cfgpred_block(b, i);
306 /* case Phi 1: maintain Bads, as somebody else is responsible to remove them */
307 in[p_preds++] = new_r_Bad(irg, get_irn_mode(phi));
308 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
309 /* case Phi 2: It's an empty block and not yet visited. */
310 ir_node *phi_pred = get_Phi_pred(phi, i);
312 for (j = 0, k = get_Block_n_cfgpreds(pred); j < k; j++) {
313 ir_node *pred_pred = get_Block_cfgpred(pred, j);
315 if (is_Bad(pred_pred)) {
316 in[p_preds++] = new_r_Bad(irg, get_irn_mode(phi));
320 if (get_nodes_block(phi_pred) == pred) {
322 assert(is_Phi(phi_pred)); /* Block is empty!! */
324 in[p_preds++] = get_Phi_pred(phi_pred, j);
327 in[p_preds++] = phi_pred;
332 in[p_preds++] = get_Phi_pred(phi, i);
335 assert(p_preds == max_preds);
339 exchange(phi, in[0]);
341 set_irn_in(phi, p_preds, in);
345 /*- This happens only if merge between loop backedge and single loop entry.
346 Moreover, it is only needed if predb is the direct dominator of b,
347 else there can be no uses of the Phi's in predb ... -*/
348 for (k = 0, n = get_Block_n_cfgpreds(b); k < n; ++k) {
349 ir_node *pred = get_Block_cfgpred(b, k);
350 ir_node *predb = get_nodes_block(pred);
354 if (is_Block_removable(predb) && !Block_block_visited(predb)) {
357 /* we found a predecessor block at position k that will be removed */
358 for (phi = (ir_node*)get_irn_link(predb); phi; phi = next_phi) {
360 next_phi = (ir_node*)get_irn_link(phi);
364 if (get_Block_idom(b) != predb) {
365 /* predb is not the dominator. There can't be uses of pred's Phi nodes, kill them .*/
366 ir_graph *irg = get_irn_irg(b);
367 ir_mode *mode = get_irn_mode(phi);
368 exchange(phi, new_r_Bad(irg, mode));
370 /* predb is the direct dominator of b. There might be uses of the Phi nodes from
371 predb in further block, so move this phi from the predecessor into the block b */
372 set_nodes_block(phi, b);
373 set_irn_link(phi, get_irn_link(b));
374 set_irn_link(b, phi);
375 env->phis_moved = true;
377 /* first, copy all 0..k-1 predecessors */
378 for (i = 0; i < k; i++) {
379 pred = get_Block_cfgpred_block(b, i);
382 ir_graph *irg = get_irn_irg(b);
383 ir_mode *mode = get_irn_mode(phi);
384 in[q_preds++] = new_r_Bad(irg, mode);
385 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
386 /* It's an empty block and not yet visited. */
387 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
388 if (! is_Bad(get_Block_cfgpred(pred, j))) {
391 ir_graph *irg = get_irn_irg(b);
392 ir_mode *mode = get_irn_mode(phi);
393 in[q_preds++] = new_r_Bad(irg, mode);
401 /* now we are at k, copy the phi predecessors */
402 pred = get_nodes_block(get_Block_cfgpred(b, k));
403 for (i = 0; i < get_Phi_n_preds(phi); i++) {
404 in[q_preds++] = get_Phi_pred(phi, i);
407 /* and now all the rest */
408 for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
409 pred = get_Block_cfgpred_block(b, i);
412 ir_graph *irg = get_irn_irg(b);
413 ir_mode *mode = get_irn_mode(phi);
414 in[q_preds++] = new_r_Bad(irg, mode);
415 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
416 /* It's an empty block and not yet visited. */
417 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
418 if (! is_Bad(get_Block_cfgpred(pred, j))) {
421 ir_graph *irg = get_irn_irg(b);
422 ir_mode *mode = get_irn_mode(phi);
423 in[q_preds++] = new_r_Bad(irg, mode);
433 exchange(phi, in[0]);
435 set_irn_in(phi, q_preds, in);
438 assert(q_preds <= max_preds);
439 // assert(p_preds == q_preds && "Wrong Phi Fix");
445 /*- Fix the block -*/
447 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
448 ir_node *pred = get_Block_cfgpred(b, i);
449 ir_node *predb = get_nodes_block(pred);
450 ir_graph *irg = get_irn_irg(pred);
452 /* case 1: Bad predecessor */
454 in[n_preds++] = new_r_Bad(irg, mode_X);
457 if (is_Block_removable(predb) && !Block_block_visited(predb)) {
458 /* case 2: It's an empty block and not yet visited. */
459 for (j = 0; j < get_Block_n_cfgpreds(predb); j++) {
460 ir_node *predpred = get_Block_cfgpred(predb, j);
462 if (is_Bad(predpred)) {
463 in[n_preds++] = new_r_Bad(irg, mode_X);
467 in[n_preds++] = predpred;
469 /* Remove block+jump as it might be kept alive. */
470 exchange(pred, new_r_Bad(get_irn_irg(b), mode_X));
471 exchange(predb, new_r_Bad(get_irn_irg(b), mode_BB));
474 in[n_preds++] = pred;
477 assert(n_preds == max_preds);
479 set_irn_in(b, n_preds, in);
482 /* see if phi-fix was correct */
483 assert(get_irn_link(b) == NULL || p_preds == -1 || (n_preds == p_preds));
488 * Optimize table-switch Conds.
490 * @param cond the switch-Cond
491 * @return true if the switch-Cond was optimized
493 static bool handle_switch_cond(ir_node *cond)
495 ir_node *sel = get_Cond_selector(cond);
496 ir_node *proj1 = (ir_node*)get_irn_link(cond);
497 ir_node *proj2 = (ir_node*)get_irn_link(proj1);
498 ir_node *blk = get_nodes_block(cond);
500 /* exactly 1 Proj on the Cond node: must be the defaultProj */
502 ir_node *jmp = new_r_Jmp(blk);
503 assert(get_Cond_default_proj(cond) == get_Proj_proj(proj1));
504 /* convert it into a Jmp */
505 exchange(proj1, jmp);
509 /* handle Cond nodes with constant argument. In this case the localopt rules
510 * should have killed all obviously impossible cases.
511 * So the only case left to handle here is 1 defaultProj + 1 case
512 * (this one case should be the one taken) */
513 if (get_irn_link(proj2) == NULL) {
514 ir_tarval *tv = value_of(sel);
516 if (tv != tarval_bad) {
517 /* we have a constant switch */
518 long num = get_tarval_long(tv);
519 long def_num = get_Cond_default_proj(cond);
520 ir_graph *irg = get_irn_irg(cond);
521 ir_node *bad = new_r_Bad(irg, mode_X);
523 if (def_num == get_Proj_proj(proj1)) {
524 /* first one is the defProj */
525 if (num == get_Proj_proj(proj2)) {
526 ir_node *jmp = new_r_Jmp(blk);
527 exchange(proj2, jmp);
528 exchange(proj1, bad);
531 } else if (def_num == get_Proj_proj(proj2)) {
532 /* second one is the defProj */
533 if (num == get_Proj_proj(proj1)) {
534 ir_node *jmp = new_r_Jmp(blk);
535 exchange(proj1, jmp);
536 exchange(proj2, bad);
540 /* neither: strange, Cond was not optimized so far */
541 if (num == get_Proj_proj(proj1)) {
542 ir_node *jmp = new_r_Jmp(blk);
543 exchange(proj1, jmp);
544 exchange(proj2, bad);
546 } else if (num == get_Proj_proj(proj2)) {
547 ir_node *jmp = new_r_Jmp(blk);
548 exchange(proj2, jmp);
549 exchange(proj1, bad);
559 * Optimize boolean Conds, where true and false jump to the same block into a Jmp
560 * Block must contain no Phi nodes.
564 * projA projB => Jmp Bad
568 static bool optimize_pred_cond(ir_node *block, int i, int j)
570 ir_node *projA, *projB, *cond, *pred_block, *jmp, *bad;
573 projA = get_Block_cfgpred(block, i);
574 if (!is_Proj(projA)) return false;
575 projB = get_Block_cfgpred(block, j);
576 if (!is_Proj(projB)) return false;
577 cond = get_Proj_pred(projA);
578 if (!is_Cond(cond)) return false;
580 if (cond != get_Proj_pred(projB)) return false;
581 if (is_switch_Cond(cond)) return false;
583 /* cond should actually be a Jmp */
584 pred_block = get_nodes_block(cond);
585 jmp = new_r_Jmp(pred_block);
586 bad = new_r_Bad(get_irn_irg(block), mode_X);
588 assert(projA != projB);
589 exchange(projA, jmp);
590 exchange(projB, bad);
594 typedef enum block_flags_t {
595 BF_HAS_OPERATIONS = 1 << 0,
596 BF_HAS_PHIS = 1 << 1,
597 BF_IS_UNKNOWN_JUMP_TARGET = 1 << 2,
600 static bool get_phase_flag(ir_phase *block_info, ir_node *block, int flag) {
601 return ((int)phase_get_irn_data(block_info, block)) & flag;
603 static void set_phase_flag(ir_phase *block_info, ir_node *block, int flag) {
604 int data = (int)phase_get_irn_data(block_info, block);
606 phase_set_irn_data(block_info, block, (void*)data);
609 static bool has_operations(ir_phase *block_info, ir_node *block) {
610 return get_phase_flag(block_info, block, BF_HAS_OPERATIONS);
612 static void set_has_operations(ir_phase *block_info, ir_node *block) {
613 set_phase_flag(block_info, block, BF_HAS_OPERATIONS);
616 static bool has_phis(ir_phase *block_info, ir_node *block) {
617 return get_phase_flag(block_info, block, BF_HAS_PHIS);
619 static void set_has_phis(ir_phase *block_info, ir_node *block) {
620 set_phase_flag(block_info, block, BF_HAS_PHIS);
623 static bool is_unknown_jump_target(ir_phase *block_info, ir_node *block) {
624 return get_phase_flag(block_info, block, BF_IS_UNKNOWN_JUMP_TARGET);
626 static void set_is_unknown_jump_target(ir_phase *block_info, ir_node *block) {
627 set_phase_flag(block_info, block, BF_IS_UNKNOWN_JUMP_TARGET);
631 * Walker: fill block info information.
633 static void compute_block_info(ir_node *n, void *x)
635 ir_phase *block_info = (ir_phase *)x;
638 int i, max = get_Block_n_cfgpreds(n);
639 for (i=0; i<max; i++) {
640 ir_node *pred = get_Block_cfgpred(n,i);
641 if (is_unknown_jump(pred)) {
642 set_is_unknown_jump_target(block_info, n);
645 } else if (is_Phi(n)) {
646 ir_node *block = get_nodes_block(n);
647 set_has_phis(block_info, block);
648 } else if (is_Jmp(n) || is_Cond(n) || is_Cmp(n) || is_Proj(n)) {
651 ir_node *block = get_nodes_block(n);
652 set_has_operations(block_info, block);
656 typedef struct skip_env {
662 * Post-Block-walker: Optimize useless if's (boolean Cond nodes
663 * with same true/false target)
666 static void optimize_ifs(ir_node *block, void *x)
668 skip_env *env = (skip_env*)x;
670 int n_preds = get_Block_n_cfgpreds(block);
672 if (has_phis(env->phase, block))
675 /* optimize Cond predecessors (might produce Bad predecessors) */
676 for (i = 0; i < n_preds; ++i) {
677 for (j = i+1; j < n_preds; ++j) {
678 optimize_pred_cond(block, i, j);
684 * Post-Block walker: remove empty blocks that are
685 * predecessors of the current block.
687 static void remove_empty_blocks(ir_node *block, void *x)
689 skip_env *env = (skip_env*)x;
691 int n_preds = get_Block_n_cfgpreds(block);
693 for (i = 0; i < n_preds; ++i) {
694 ir_node *jmp, *jmp_block, *pred, *pred_block;
696 jmp = get_Block_cfgpred(block, i);
699 jmp_block = get_nodes_block(jmp);
700 if (is_unknown_jump_target(env->phase, jmp_block))
702 if (has_operations(env->phase,jmp_block))
704 /* jmp_block is an empty block! */
706 if (get_Block_n_cfgpreds(jmp_block) != 1)
708 pred = get_Block_cfgpred(jmp_block, 0);
712 /* cleanup: jmp_block might have a Keep edge! */
713 pred_block = get_nodes_block(pred);
714 exchange(jmp_block, pred_block);
719 * Some cfg optimizations, which do not touch Phi nodes
721 static void cfgopt_ignoring_phis(ir_graph *irg) {
722 ir_phase *block_info = new_phase(irg, NULL);
723 skip_env env = { false, block_info };
725 irg_walk_graph(irg, compute_block_info, NULL, block_info);
730 /* useless if optimization: will not touch empty blocks */
731 irg_block_walk_graph(irg, NULL, optimize_ifs, &env);
733 /* Remove empty blocks */
734 irg_block_walk_graph(irg, NULL, remove_empty_blocks, &env);
736 set_irg_doms_inconsistent(irg);
737 /* Removing blocks might enable more Cond optimizations */
744 phase_free(block_info);
747 /* Optimizations of the control flow that also require changes of Phi nodes. */
748 void optimize_cf(ir_graph *irg)
752 ir_node *end = get_irg_end(irg);
757 env.phis_moved = false;
759 assert(get_irg_phase_state(irg) != phase_building);
761 /* if the graph is not pinned, we cannot determine empty blocks */
762 assert(get_irg_pinned(irg) != op_pin_state_floats &&
763 "Control flow optimization need a pinned graph");
765 /* FIXME: control flow opt destroys block edges. So edges are deactivated
766 * here. Fix the edges! */
767 edges_deactivate(irg);
769 cfgopt_ignoring_phis(irg);
771 /* we use the mark flag to mark removable blocks */
772 ir_reserve_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_LINK);
774 /* The switch Cond optimization might expose unreachable code, so we loop */
777 ir_node **switch_conds = NULL;
778 bool changed = false;
783 * This pass collects all Phi nodes in a link list in the block
784 * nodes. Further it performs simple control flow optimizations.
785 * Finally it marks all blocks that do not contain useful
786 * computations, i.e., these blocks might be removed.
788 switch_conds = NEW_ARR_F(ir_node*, 0);
789 irg_walk(end, clear_link_and_mark_blocks_removable, collect_nodes, &switch_conds);
791 /* handle all collected switch-Conds */
792 length = ARR_LEN(switch_conds);
793 for (i = 0; i < length; ++i) {
794 ir_node *cond = switch_conds[i];
795 changed |= handle_switch_cond(cond);
797 DEL_ARR_F(switch_conds);
802 set_irg_outs_inconsistent(irg);
803 set_irg_doms_inconsistent(irg);
804 set_irg_extblk_inconsistent(irg);
805 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
808 /* assert due to collect_nodes:
809 * 1. removable blocks are now marked as such
810 * 2. phi lists are up to date
813 /* Optimize the standard code.
814 * It walks only over block nodes and adapts these and the Phi nodes in these
815 * blocks, which it finds in a linked list computed before.
818 irg_block_walk_graph(irg, optimize_blocks, NULL, &env);
820 new_end = optimize_in_place(end);
821 if (new_end != end) {
822 set_irg_end(irg, new_end);
825 remove_End_Bads_and_doublets(end);
827 ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_LINK);
829 if (env.phis_moved) {
830 /* Bad: when we moved Phi's, we might produce dead Phi nodes
832 Some other phases cannot copy with this, so kill them.
834 n = get_End_n_keepalives(end);
836 NEW_ARR_A(ir_node *, in, n);
837 assure_irg_outs(irg);
839 for (i = j = 0; i < n; ++i) {
840 ir_node *ka = get_End_keepalive(end, i);
845 for (k = get_irn_n_outs(ka) - 1; k >= 0; --k) {
846 ir_node *user = get_irn_out(ka, k);
848 if (user != ka && user != end) {
849 /* Is it a real user or just a self loop ? */
859 set_End_keepalives(end, j, in);
866 /* Handle graph state if was changed. */
867 set_irg_outs_inconsistent(irg);
868 set_irg_doms_inconsistent(irg);
869 set_irg_extblk_inconsistent(irg);
870 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
874 /* Creates an ir_graph pass for optimize_cf. */
875 ir_graph_pass_t *optimize_cf_pass(const char *name)
877 return def_graph_pass(name ? name : "optimize_cf", optimize_cf);