2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Control flow optimizations.
23 * @author Goetz Lindenmaier, Michael Beck, Sebastian Hack
26 * Removes Bad control flow predecessors and empty blocks. A block is empty
27 * if it contains only a Jmp node. Blocks can only be removed if they are not
28 * needed for the semantics of Phi nodes. Further, we NEVER remove labeled
29 * blocks (even if we could move the label).
33 #include "iroptimize.h"
40 #include "irgraph_t.h"
50 #include "opt_manage.h"
55 #include "irbackedge_t.h"
60 #include "irnodehashmap.h"
63 #include "iropt_dbg.h"
65 /** An environment for merge_blocks and collect nodes. */
66 typedef struct merge_env {
67 bool changed; /**< Set if the graph was changed. */
68 bool phis_moved; /**< Set if Phi nodes were moved. */
71 /** set or reset the removable property of a block. */
72 static void set_Block_removable(ir_node *block, bool removable)
74 set_Block_mark(block, removable);
77 /** check if a block has the removable property set. */
78 static bool is_Block_removable(const ir_node *block)
80 return get_Block_mark(block);
83 /** checks if a given Cond node is a switch Cond. */
84 static bool is_switch_Cond(const ir_node *cond)
86 ir_node *sel = get_Cond_selector(cond);
87 return get_irn_mode(sel) != mode_b;
90 /** Walker: clear link fields and mark all blocks as removable. */
91 static void clear_link_and_mark_blocks_removable(ir_node *node, void *ctx)
94 set_irn_link(node, NULL);
96 set_Block_removable(node, true);
97 set_Block_phis(node, NULL);
98 } else if (is_Phi(node)) {
99 set_Phi_next(node, NULL);
104 * Collects all Phi nodes in link list of Block.
105 * Marks all blocks "non_removable" if they contain a node other
106 * than Jmp (and Proj).
107 * Links all Proj nodes to their predecessors.
108 * Collects all switch-Conds in a list.
110 static void collect_nodes(ir_node *n, void *ctx)
114 /* Collect Phi nodes to compact ins along with block's ins. */
115 ir_node *block = get_nodes_block(n);
116 add_Block_phi(block, n);
117 } else if (is_Block(n)) {
118 if (has_Block_entity(n)) {
119 /* block with a jump label attached cannot be removed. */
120 set_Block_removable(n, false);
122 } else if (is_Bad(n) || is_Jmp(n)) {
126 /* Check for non-empty block. */
127 ir_node *block = get_nodes_block(n);
129 set_Block_removable(block, false);
132 /* link Proj nodes */
133 ir_node *pred = get_Proj_pred(n);
134 set_irn_link(n, get_irn_link(pred));
135 set_irn_link(pred, n);
140 /** Returns true if pred is predecessor of block b. */
141 static bool is_pred_of(const ir_node *pred, const ir_node *b)
145 for (i = get_Block_n_cfgpreds(b) - 1; i >= 0; --i) {
146 ir_node *b_pred = get_Block_cfgpred_block(b, i);
153 /** Test whether we can optimize away pred block pos of b.
155 * @param b A block node.
156 * @param pos The position of the predecessor block to judge about.
158 * @returns The number of predecessors
160 * The test is rather tricky.
162 * The situation is something like the following:
171 * b merges the control flow of an if-then-else. We may not remove
172 * the 'then' _and_ the 'else' block of an 'if' if there is a Phi
173 * node in b, even if both are empty. The destruction of this Phi
174 * requires that a copy is added before the merge. We have to
175 * keep one of the case blocks to place the copies in.
177 * To perform the test for pos, we must regard predecessors before pos
178 * as already removed.
180 static unsigned test_whether_dispensable(const ir_node *b, int pos)
182 ir_node *pred = get_Block_cfgpred(b, pos);
183 ir_node *predb = get_nodes_block(pred);
185 if (is_Bad(pred) || !is_Block_removable(predb))
188 /* can't remove self-loops */
190 goto non_dispensable;
191 if (is_unknown_jump(pred))
192 goto non_dispensable;
194 /* Seems to be empty. At least we detected this in collect_nodes. */
195 if (get_Block_phis(b) != NULL) {
196 int n_cfgpreds = get_Block_n_cfgpreds(b);
198 /* there are Phi nodes */
200 /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
201 * Handle all pred blocks with preds < pos as if they were already
203 for (i = 0; i < pos; i++) {
204 ir_node *other_pred = get_Block_cfgpred(b, i);
205 ir_node *other_predb = get_nodes_block(other_pred);
206 if (is_Bad(other_pred))
208 if (is_Block_removable(other_predb)
209 && !Block_block_visited(other_predb)) {
211 for (j = get_Block_n_cfgpreds(other_predb) - 1; j >= 0; --j) {
212 ir_node *other_predpred
213 = get_Block_cfgpred_block(other_predb, j);
214 if (is_pred_of(other_predpred, predb))
215 goto non_dispensable;
217 } else if (is_pred_of(other_predb, predb)) {
218 goto non_dispensable;
221 for (i = pos+1; i < n_cfgpreds; i++) {
222 ir_node *other_predb = get_Block_cfgpred_block(b, i);
223 if (is_pred_of(other_predb, predb))
224 goto non_dispensable;
227 /* we will not dispense already visited blocks */
228 if (Block_block_visited(predb))
230 /* if we get here, the block is dispensable, count useful preds */
231 return get_irn_arity(predb);
234 set_Block_removable(predb, false);
239 * This method removes empty blocks. A block is empty if it only contains Phi
242 * We first adapt Phi nodes, then Block nodes, as we need the old ins
243 * of the Block to adapt the Phi nodes. We do this by computing new
244 * in arrays, and then replacing the old ones. So far we compute new in arrays
245 * for all nodes, not regarding whether there is a possibility for optimization.
247 * For each predecessor p of a Block b there are three cases:
248 * - The predecessor p is a Bad node: just skip it. The in array of b shrinks
250 * - The predecessor p is empty. Remove p. All predecessors of p are now
252 * - The predecessor p is a block containing useful code. Just keep p as is.
254 * For Phi nodes f we have to check the conditions at the Block of f.
255 * For cases 1 and 3 we proceed as for Blocks. For case 2 we can have two
257 * -2a: The old predecessor of the Phi f is a Phi pred_f IN THE BLOCK REMOVED.
258 * In this case we proceed as for blocks. We remove pred_f. All
259 * predecessors of pred_f now are predecessors of f.
260 * -2b: The old predecessor of f is NOT in the block removed. It might be a Phi
261 * too. We have to replicate f for each predecessor of the removed block.
262 * Or, with other words, the removed predecessor block has exactly one
265 * Further there is a special case for self referencing blocks:
268 * then_b else_b then_b else_b
274 * | | | === optimized to ===> \ | | |
281 * If there is a Phi in pred_b, but we remove pred_b, we have to generate a
282 * Phi in loop_b, that has the ins of the Phi in pred_b and a self referencing
285 static void optimize_blocks(ir_node *b, void *ctx)
287 int i, j, k, n, max_preds, n_preds, p_preds = -1;
291 merge_env *env = (merge_env*)ctx;
293 if (get_Block_dom_depth(b) < 0) {
294 /* ignore unreachable blocks */
298 /* Count the number of predecessor if this block is merged with pred blocks
301 for (i = 0, k = get_Block_n_cfgpreds(b); i < k; ++i) {
302 max_preds += test_whether_dispensable(b, i);
304 in = XMALLOCN(ir_node*, max_preds);
306 /*- Fix the Phi nodes of the current block -*/
307 for (phi = get_Block_phis(b); phi != NULL; phi = next) {
308 next = get_Phi_next(phi);
310 /* Find the new predecessors for the Phi */
312 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
313 ir_graph *irg = get_irn_irg(b);
314 ir_node *predx = get_Block_cfgpred(b, i);
317 /* case Phi 1: maintain Bads, as somebody else is responsible to
320 in[p_preds++] = new_r_Bad(irg, get_irn_mode(phi));
324 pred = get_nodes_block(predx);
326 /* case Phi 2: It's an empty block and not yet visited. */
327 if (is_Block_removable(pred) && !Block_block_visited(pred)) {
328 ir_node *phi_pred = get_Phi_pred(phi, i);
330 for (j = 0, k = get_Block_n_cfgpreds(pred); j < k; j++) {
331 ir_node *pred_pred = get_Block_cfgpred(pred, j);
333 if (is_Bad(pred_pred)) {
334 in[p_preds++] = new_r_Bad(irg, get_irn_mode(phi));
338 if (get_nodes_block(phi_pred) == pred) {
340 assert(is_Phi(phi_pred)); /* Block is empty!! */
342 in[p_preds++] = get_Phi_pred(phi_pred, j);
345 in[p_preds++] = phi_pred;
350 in[p_preds++] = get_Phi_pred(phi, i);
353 assert(p_preds == max_preds);
357 exchange(phi, in[0]);
359 set_irn_in(phi, p_preds, in);
364 /*- This happens only if merge between loop backedge and single loop entry.
365 Moreover, it is only needed if predb is the direct dominator of b,
366 else there can be no uses of the Phi's in predb ... -*/
367 for (k = 0, n = get_Block_n_cfgpreds(b); k < n; ++k) {
368 ir_node *pred = get_Block_cfgpred(b, k);
369 ir_node *predb = get_nodes_block(pred);
373 if (is_Block_removable(predb) && !Block_block_visited(predb)) {
376 /* we found a predecessor block at position k that will be removed */
377 for (phi = get_Block_phis(predb); phi != NULL; phi = next_phi) {
379 next_phi = get_Phi_next(phi);
381 if (get_Block_idom(b) != predb) {
382 /* predb is not the dominator. There can't be uses of
383 * pred's Phi nodes, kill them .*/
384 ir_graph *irg = get_irn_irg(b);
385 ir_mode *mode = get_irn_mode(phi);
386 exchange(phi, new_r_Bad(irg, mode));
388 /* predb is the direct dominator of b. There might be uses
389 * of the Phi nodes from predb in further block, so move
390 * this phi from the predecessor into the block b */
391 set_nodes_block(phi, b);
392 set_Phi_next(phi, get_Block_phis(b));
393 set_Block_phis(b, phi);
394 env->phis_moved = true;
396 /* first, copy all 0..k-1 predecessors */
397 for (i = 0; i < k; i++) {
398 ir_node *predx = get_Block_cfgpred(b, i);
402 ir_graph *irg = get_irn_irg(b);
403 ir_mode *mode = get_irn_mode(phi);
404 in[q_preds++] = new_r_Bad(irg, mode);
407 pred_block = get_nodes_block(predx);
408 if (is_Block_removable(pred_block)
409 && !Block_block_visited(pred_block)) {
410 int n_cfgpreds = get_Block_n_cfgpreds(pred_block);
411 /* It's an empty block and not yet visited. */
412 for (j = 0; j < n_cfgpreds; j++) {
413 if (!is_Bad(get_Block_cfgpred(pred_block, j))) {
416 ir_graph *irg = get_irn_irg(b);
417 ir_mode *mode = get_irn_mode(phi);
418 in[q_preds++] = new_r_Bad(irg, mode);
426 /* now we are at k, copy the phi predecessors */
427 pred = get_nodes_block(get_Block_cfgpred(b, k));
428 for (i = 0; i < get_Phi_n_preds(phi); i++) {
429 in[q_preds++] = get_Phi_pred(phi, i);
432 /* and now all the rest */
433 for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
434 pred = get_Block_cfgpred_block(b, i);
437 ir_graph *irg = get_irn_irg(b);
438 ir_mode *mode = get_irn_mode(phi);
439 in[q_preds++] = new_r_Bad(irg, mode);
440 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
441 /* It's an empty block and not yet visited. */
442 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
443 if (! is_Bad(get_Block_cfgpred(pred, j))) {
446 ir_graph *irg = get_irn_irg(b);
447 ir_mode *mode = get_irn_mode(phi);
448 in[q_preds++] = new_r_Bad(irg, mode);
458 exchange(phi, in[0]);
460 set_irn_in(phi, q_preds, in);
463 assert(q_preds <= max_preds);
464 // assert(p_preds == q_preds && "Wrong Phi Fix");
470 /*- Fix the block -*/
472 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
473 ir_node *pred = get_Block_cfgpred(b, i);
474 ir_node *predb = get_nodes_block(pred);
475 ir_graph *irg = get_irn_irg(pred);
477 /* case 1: Bad predecessor */
479 in[n_preds++] = new_r_Bad(irg, mode_X);
482 if (is_Block_removable(predb) && !Block_block_visited(predb)) {
483 /* case 2: It's an empty block and not yet visited. */
484 for (j = 0; j < get_Block_n_cfgpreds(predb); j++) {
485 ir_node *predpred = get_Block_cfgpred(predb, j);
487 if (is_Bad(predpred)) {
488 in[n_preds++] = new_r_Bad(irg, mode_X);
492 in[n_preds++] = predpred;
494 /* Remove block+jump as it might be kept alive. */
495 exchange(pred, new_r_Bad(get_irn_irg(b), mode_X));
496 exchange(predb, new_r_Bad(get_irn_irg(b), mode_BB));
499 in[n_preds++] = pred;
502 assert(n_preds == max_preds);
504 set_irn_in(b, n_preds, in);
507 /* see if phi-fix was correct */
508 assert(get_Block_phis(b) == NULL || p_preds == -1 || (n_preds == p_preds));
513 * Optimize boolean Conds, where true and false jump to the same block into a Jmp
514 * Block must contain no Phi nodes.
518 * projA projB => Jmp Bad
522 static bool optimize_pred_cond(ir_node *block, int i, int j)
524 ir_node *projA, *projB, *cond, *pred_block, *jmp, *bad;
527 projA = get_Block_cfgpred(block, i);
528 if (!is_Proj(projA)) return false;
529 projB = get_Block_cfgpred(block, j);
530 if (!is_Proj(projB)) return false;
531 cond = get_Proj_pred(projA);
532 if (!is_Cond(cond)) return false;
534 if (cond != get_Proj_pred(projB)) return false;
535 if (is_switch_Cond(cond)) return false;
537 /* cond should actually be a Jmp */
538 pred_block = get_nodes_block(cond);
539 jmp = new_r_Jmp(pred_block);
540 bad = new_r_Bad(get_irn_irg(block), mode_X);
542 assert(projA != projB);
543 exchange(projA, jmp);
544 exchange(projB, bad);
548 typedef enum block_flags_t {
549 BF_HAS_OPERATIONS = 1 << 0,
550 BF_HAS_PHIS = 1 << 1,
551 BF_IS_UNKNOWN_JUMP_TARGET = 1 << 2,
554 static bool get_block_flag(const ir_nodehashmap_t *infos, const ir_node *block,
557 return PTR_TO_INT(ir_nodehashmap_get(infos, block)) & flag;
560 static void set_block_flag(ir_nodehashmap_t *infos, ir_node *block,
563 int data = PTR_TO_INT(ir_nodehashmap_get(infos, block));
565 ir_nodehashmap_insert(infos, block, INT_TO_PTR(data));
568 static void clear_block_flag(ir_nodehashmap_t *infos, const ir_node *block)
570 ir_nodehashmap_remove(infos, block);
573 static bool has_operations(ir_nodehashmap_t *infos, const ir_node *block)
575 return get_block_flag(infos, block, BF_HAS_OPERATIONS);
578 static void set_has_operations(ir_nodehashmap_t *infos, ir_node *block)
580 set_block_flag(infos, block, BF_HAS_OPERATIONS);
583 static bool has_phis(ir_nodehashmap_t *infos, const ir_node *block)
585 return get_block_flag(infos, block, BF_HAS_PHIS);
588 static void set_has_phis(ir_nodehashmap_t *infos, ir_node *block)
590 set_block_flag(infos, block, BF_HAS_PHIS);
593 static bool is_unknown_jump_target(ir_nodehashmap_t *infos, const ir_node *block)
595 return get_block_flag(infos, block, BF_IS_UNKNOWN_JUMP_TARGET);
598 static void set_is_unknown_jump_target(ir_nodehashmap_t *infos, ir_node *block)
600 set_block_flag(infos, block, BF_IS_UNKNOWN_JUMP_TARGET);
604 * Pre-Walker: fill block info information.
606 static void compute_block_info(ir_node *n, void *x)
608 ir_nodehashmap_t *infos = (ir_nodehashmap_t*)x;
611 int i, max = get_Block_n_cfgpreds(n);
612 for (i=0; i<max; i++) {
613 ir_node *pred = get_Block_cfgpred(n,i);
614 if (is_unknown_jump(pred)) {
615 set_is_unknown_jump_target(infos, n);
618 } else if (is_Phi(n)) {
619 ir_node *block = get_nodes_block(n);
620 set_has_phis(infos, block);
621 } else if (is_Jmp(n) || is_Cond(n) || is_Proj(n)) {
624 ir_node *block = get_nodes_block(n);
625 set_has_operations(infos, block);
629 static void clear_block_info(ir_node *block, void *x)
631 ir_nodehashmap_t *infos = (ir_nodehashmap_t*)x;
632 clear_block_flag(infos, block);
635 typedef struct skip_env {
637 ir_nodehashmap_t block_infos;
641 * Post-Block-walker: Optimize useless if's (boolean Cond nodes
642 * with same true/false target) away.
644 static void optimize_ifs(ir_node *block, void *x)
646 skip_env *env = (skip_env*)x;
648 int n_preds = get_Block_n_cfgpreds(block);
650 if (has_phis(&env->block_infos, block))
653 /* optimize Cond predecessors (might produce Bad predecessors) */
654 for (i = 0; i < n_preds; ++i) {
655 for (j = i+1; j < n_preds; ++j) {
656 optimize_pred_cond(block, i, j);
662 * Pre-Block walker: remove empty blocks (only contain a Jmp)
663 * that are control flow predecessors of the current block.
665 static void remove_empty_blocks(ir_node *block, void *x)
667 skip_env *env = (skip_env*)x;
669 int n_preds = get_Block_n_cfgpreds(block);
671 for (i = 0; i < n_preds; ++i) {
672 ir_node *jmp, *jmp_block, *pred, *pred_block;
675 jmp = get_Block_cfgpred(block, i);
678 jmp_block = get_nodes_block(jmp);
679 if (jmp_block == block)
680 continue; /* this infinite loop cannot be optimized any further */
681 if (is_unknown_jump_target(&env->block_infos, jmp_block))
682 continue; /* unknown jump target must not be optimized */
683 if (has_operations(&env->block_infos,jmp_block))
684 continue; /* this block contains operations and cannot be skipped */
685 if (has_phis(&env->block_infos,jmp_block))
686 continue; /* this block contains Phis and is not skipped */
687 if (Block_block_visited(jmp_block)) {
689 /* otherwise we could break the walker,
690 * if block was reached via
691 * KeepAlive edge -> jmp_block -> A ---> block,
692 * because the walker cannot handle Id nodes.
702 /* jmp_block is an empty block and can be optimized! */
704 n_jpreds = get_Block_n_cfgpreds(jmp_block);
706 * If the jmp block has only one predecessor this is straightforward.
707 * However, if there are more predecessors, we only handle this,
708 * if block has no Phis.
711 /* skip jmp block by rerouting its predecessor to block
719 pred = get_Block_cfgpred(jmp_block, 0);
722 /* cleanup: jmp_block might have a Keep edge! */
723 pred_block = get_nodes_block(pred);
724 exchange(jmp_block, pred_block);
726 } else if (! has_phis(&env->block_infos, block)) {
727 /* all predecessors can skip the jmp block, so block gets some new
732 * jmp_block C => Bad C | |
736 ir_node **ins = ALLOCAN(ir_node*, n_preds+n_jpreds);
738 /* first copy the old predecessors, because the outer loop (i)
739 * still walks over them */
740 for (j = 0; j < n_preds; ++j) {
741 ins[j] = get_Block_cfgpred(block, j);
743 /* now append the new predecessors */
744 for (j = 0; j < n_jpreds; ++j) {
745 pred = get_Block_cfgpred(jmp_block, j);
746 ins[n_preds+j] = pred;
748 set_irn_in(block, n_preds+n_jpreds, ins);
749 /* convert the jmp_block to Bad */
750 ir_graph *irg = get_irn_irg(block);
751 exchange(jmp_block, new_r_Bad(irg, mode_BB));
752 exchange(jmp, new_r_Bad(irg, mode_X));
753 /* let the outer loop walk over the new predecessors as well */
756 // TODO What if jmp_block had a KeepAlive edge?
758 /* This would involve Phis ... */
764 * All cfg optimizations, which do not touch Phi nodes.
766 * Note that this might create critical edges.
768 static void cfgopt_ignoring_phis(ir_graph *irg)
773 ir_nodehashmap_init(&env.block_infos);
775 while (env.changed) {
776 irg_walk_graph(irg, compute_block_info, NULL, &env.block_infos);
779 /* Remove blocks, which only consist of a Jmp */
780 irg_block_walk_graph(irg, remove_empty_blocks, NULL, &env);
782 /* Optimize Cond->Jmp, where then- and else-block are the same. */
783 irg_block_walk_graph(irg, NULL, optimize_ifs, &env);
786 clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_DOMINANCE);
787 /* clear block info, because it must be recomputed */
788 irg_block_walk_graph(irg, clear_block_info, NULL, &env.block_infos);
789 /* Removing blocks and Conds might enable more optimizations */
796 ir_nodehashmap_destroy(&env.block_infos);
799 /* Optimizations of the control flow that also require changes of Phi nodes. */
800 static ir_graph_state_t do_cfopt(ir_graph *irg)
804 ir_node *end = get_irg_end(irg);
809 env.phis_moved = false;
811 assert(get_irg_phase_state(irg) != phase_building);
813 /* if the graph is not pinned, we cannot determine empty blocks */
814 assert(get_irg_pinned(irg) != op_pin_state_floats &&
815 "Control flow optimization need a pinned graph");
817 edges_deactivate(irg);
819 /* First the "simple" optimizations, which do not touch Phis */
820 cfgopt_ignoring_phis(irg);
822 /* we use the mark flag to mark removable blocks */
823 ir_reserve_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_LINK
824 | IR_RESOURCE_PHI_LIST);
826 /* The switch Cond optimization might expose unreachable code, so we loop */
828 bool changed = false;
833 * This pass collects all Phi nodes in a link list in the block
834 * nodes. Further it performs simple control flow optimizations.
835 * Finally it marks all blocks that do not contain useful
836 * computations, i.e., these blocks might be removed.
838 irg_walk(end, clear_link_and_mark_blocks_removable, collect_nodes, NULL);
843 clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_DOMINANCE
844 | IR_GRAPH_STATE_VALID_EXTENDED_BLOCKS
845 | IR_GRAPH_STATE_CONSISTENT_ENTITY_USAGE);
848 /* assert due to collect_nodes:
849 * 1. removable blocks are now marked as such
850 * 2. phi lists are up to date
853 /* Optimize the standard code.
854 * It walks only over block nodes and adapts these and the Phi nodes in these
855 * blocks, which it finds in a linked list computed before.
858 irg_block_walk_graph(irg, optimize_blocks, NULL, &env);
860 new_end = optimize_in_place(end);
861 if (new_end != end) {
862 set_irg_end(irg, new_end);
865 remove_End_Bads_and_doublets(end);
867 ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_LINK
868 | IR_RESOURCE_PHI_LIST);
870 if (env.phis_moved) {
871 /* Bad: when we moved Phi's, we might produce dead Phi nodes
873 Some other phases cannot copy with this, so kill them.
875 n = get_End_n_keepalives(end);
877 NEW_ARR_A(ir_node *, in, n);
878 assure_irg_outs(irg);
880 for (i = j = 0; i < n; ++i) {
881 ir_node *ka = get_End_keepalive(end, i);
886 for (k = get_irn_n_outs(ka) - 1; k >= 0; --k) {
887 ir_node *user = get_irn_out(ka, k);
889 if (user != ka && user != end) {
890 /* Is it a real user or just a self loop ? */
900 set_End_keepalives(end, j, in);
909 static optdesc_t opt_cf = {
911 IR_GRAPH_STATE_NO_UNREACHABLE_CODE,
915 void optimize_cf(ir_graph *irg)
917 perform_irg_optimization(irg, &opt_cf);
920 /* Creates an ir_graph pass for optimize_cf. */
921 ir_graph_pass_t *optimize_cf_pass(const char *name)
923 return def_graph_pass(name ? name : "optimize_cf", optimize_cf);