2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Control flow optimizations.
23 * @author Goetz Lindenmaier, Michael Beck, Sebastian Hack
25 * Removes Bad control flow predecessors and empty blocks. A block is empty
26 * if it contains only a Jmp node. Blocks can only be removed if they are not
27 * needed for the semantics of Phi nodes. Further, we NEVER remove labeled
28 * blocks (even if we could move the label).
32 #include "iroptimize.h"
39 #include "irgraph_t.h"
53 #include "irbackedge_t.h"
58 #include "irnodehashmap.h"
61 #include "iropt_dbg.h"
63 /** An environment for merge_blocks and collect nodes. */
64 typedef struct merge_env {
65 bool changed; /**< Set if the graph was changed. */
66 bool phis_moved; /**< Set if Phi nodes were moved. */
69 /** set or reset the removable property of a block. */
70 static void set_Block_removable(ir_node *block, bool removable)
72 set_Block_mark(block, removable);
75 /** check if a block has the removable property set. */
76 static bool is_Block_removable(const ir_node *block)
78 return get_Block_mark(block);
81 /** Walker: clear link fields and mark all blocks as removable. */
82 static void clear_link_and_mark_blocks_removable(ir_node *node, void *ctx)
85 set_irn_link(node, NULL);
87 set_Block_removable(node, true);
88 set_Block_phis(node, NULL);
89 } else if (is_Phi(node)) {
90 set_Phi_next(node, NULL);
95 * Collects all Phi nodes in link list of Block.
96 * Marks all blocks "non_removable" if they contain a node other
97 * than Jmp (and Proj).
98 * Links all Proj nodes to their predecessors.
99 * Collects all switch-Conds in a list.
101 static void collect_nodes(ir_node *n, void *ctx)
105 /* Collect Phi nodes to compact ins along with block's ins. */
106 ir_node *block = get_nodes_block(n);
107 add_Block_phi(block, n);
108 } else if (is_Block(n)) {
109 if (get_Block_entity(n) != NULL) {
110 /* block with a jump label attached cannot be removed. */
111 set_Block_removable(n, false);
113 } else if (is_Bad(n) || is_Jmp(n)) {
117 /* Check for non-empty block. */
118 ir_node *block = get_nodes_block(n);
120 set_Block_removable(block, false);
123 /* link Proj nodes */
124 ir_node *pred = get_Proj_pred(n);
125 set_irn_link(n, get_irn_link(pred));
126 set_irn_link(pred, n);
131 /** Returns true if pred is predecessor of block b. */
132 static bool is_pred_of(const ir_node *pred, const ir_node *b)
136 for (i = get_Block_n_cfgpreds(b) - 1; i >= 0; --i) {
137 ir_node *b_pred = get_Block_cfgpred_block(b, i);
144 /** Test whether we can optimize away pred block pos of b.
146 * @param b A block node.
147 * @param pos The position of the predecessor block to judge about.
149 * @returns The number of predecessors
151 * The test is rather tricky.
153 * The situation is something like the following:
162 * b merges the control flow of an if-then-else. We may not remove
163 * the 'then' _and_ the 'else' block of an 'if' if there is a Phi
164 * node in b, even if both are empty. The destruction of this Phi
165 * requires that a copy is added before the merge. We have to
166 * keep one of the case blocks to place the copies in.
168 * To perform the test for pos, we must regard predecessors before pos
169 * as already removed.
171 static unsigned test_whether_dispensable(const ir_node *b, int pos)
173 ir_node *pred = get_Block_cfgpred(b, pos);
174 ir_node *predb = get_nodes_block(pred);
176 if (is_Bad(pred) || !is_Block_removable(predb))
179 /* can't remove self-loops */
181 goto non_dispensable;
182 if (is_unknown_jump(pred))
183 goto non_dispensable;
185 /* Seems to be empty. At least we detected this in collect_nodes. */
186 if (get_Block_phis(b) != NULL) {
187 int n_cfgpreds = get_Block_n_cfgpreds(b);
189 /* there are Phi nodes */
191 /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
192 * Handle all pred blocks with preds < pos as if they were already
194 for (i = 0; i < pos; i++) {
195 ir_node *other_pred = get_Block_cfgpred(b, i);
196 ir_node *other_predb = get_nodes_block(other_pred);
197 if (is_Bad(other_pred))
199 if (is_Block_removable(other_predb)
200 && !Block_block_visited(other_predb)) {
202 for (j = get_Block_n_cfgpreds(other_predb) - 1; j >= 0; --j) {
203 ir_node *other_predpred
204 = get_Block_cfgpred_block(other_predb, j);
205 if (is_pred_of(other_predpred, predb))
206 goto non_dispensable;
208 } else if (is_pred_of(other_predb, predb)) {
209 goto non_dispensable;
212 for (i = pos+1; i < n_cfgpreds; i++) {
213 ir_node *other_predb = get_Block_cfgpred_block(b, i);
214 if (is_pred_of(other_predb, predb))
215 goto non_dispensable;
218 /* we will not dispense already visited blocks */
219 if (Block_block_visited(predb))
221 /* if we get here, the block is dispensable, count useful preds */
222 return get_irn_arity(predb);
225 set_Block_removable(predb, false);
230 * This method merges blocks. A block is applicable to be merged, if it
231 * has only one predecessor with an unconditional jump to this block;
232 * and if this block does not contain any phis.
234 static void merge_blocks(ir_node *b, void *env)
238 if (get_Block_n_cfgpreds(b) == 1) {
239 ir_node* pred = get_Block_cfgpred(b, 0);
241 ir_node* pred_block = get_nodes_block(pred);
242 if (get_Block_phis(b) == NULL) {
243 exchange(b, pred_block);
250 * This method removes empty blocks. A block is empty if it only contains Phi
253 * We first adapt Phi nodes, then Block nodes, as we need the old ins
254 * of the Block to adapt the Phi nodes. We do this by computing new
255 * in arrays, and then replacing the old ones. So far we compute new in arrays
256 * for all nodes, not regarding whether there is a possibility for optimization.
258 * For each predecessor p of a Block b there are three cases:
259 * - The predecessor p is a Bad node: just skip it. The in array of b shrinks
261 * - The predecessor p is empty. Remove p. All predecessors of p are now
263 * - The predecessor p is a block containing useful code. Just keep p as is.
265 * For Phi nodes f we have to check the conditions at the Block of f.
266 * For cases 1 and 3 we proceed as for Blocks. For case 2 we can have two
268 * -2a: The old predecessor of the Phi f is a Phi pred_f IN THE BLOCK REMOVED.
269 * In this case we proceed as for blocks. We remove pred_f. All
270 * predecessors of pred_f now are predecessors of f.
271 * -2b: The old predecessor of f is NOT in the block removed. It might be a Phi
272 * too. We have to replicate f for each predecessor of the removed block.
273 * Or, with other words, the removed predecessor block has exactly one
276 * Further there is a special case for self referencing blocks:
279 * then_b else_b then_b else_b
285 * | | | === optimized to ===> \ | | |
292 * If there is a Phi in pred_b, but we remove pred_b, we have to generate a
293 * Phi in loop_b, that has the ins of the Phi in pred_b and a self referencing
296 static void optimize_blocks(ir_node *b, void *ctx)
298 int i, j, k, n, max_preds, n_preds, p_preds = -1;
302 merge_env *env = (merge_env*)ctx;
304 if (get_Block_dom_depth(b) < 0) {
305 /* ignore unreachable blocks */
309 /* Count the number of predecessor if this block is merged with pred blocks
312 for (i = 0, k = get_Block_n_cfgpreds(b); i < k; ++i) {
313 max_preds += test_whether_dispensable(b, i);
315 in = XMALLOCN(ir_node*, max_preds);
317 /*- Fix the Phi nodes of the current block -*/
318 for (phi = get_Block_phis(b); phi != NULL; phi = next) {
319 next = get_Phi_next(phi);
321 /* Find the new predecessors for the Phi */
323 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
324 ir_graph *irg = get_irn_irg(b);
325 ir_node *predx = get_Block_cfgpred(b, i);
328 /* case Phi 1: maintain Bads, as somebody else is responsible to
331 in[p_preds++] = new_r_Bad(irg, get_irn_mode(phi));
335 pred = get_nodes_block(predx);
337 /* case Phi 2: It's an empty block and not yet visited. */
338 if (is_Block_removable(pred) && !Block_block_visited(pred)) {
339 ir_node *phi_pred = get_Phi_pred(phi, i);
341 for (j = 0, k = get_Block_n_cfgpreds(pred); j < k; j++) {
342 ir_node *pred_pred = get_Block_cfgpred(pred, j);
344 if (is_Bad(pred_pred)) {
345 in[p_preds++] = new_r_Bad(irg, get_irn_mode(phi));
349 if (get_nodes_block(phi_pred) == pred) {
351 in[p_preds++] = get_Phi_pred(phi_pred, j);
354 in[p_preds++] = phi_pred;
359 in[p_preds++] = get_Phi_pred(phi, i);
362 assert(p_preds == max_preds);
366 exchange(phi, in[0]);
368 set_irn_in(phi, p_preds, in);
373 /*- This happens only if merge between loop backedge and single loop entry.
374 Moreover, it is only needed if predb is the direct dominator of b,
375 else there can be no uses of the Phi's in predb ... -*/
376 for (k = 0, n = get_Block_n_cfgpreds(b); k < n; ++k) {
377 ir_node *pred = get_Block_cfgpred(b, k);
378 ir_node *predb = get_nodes_block(pred);
382 if (is_Block_removable(predb) && !Block_block_visited(predb)) {
385 /* we found a predecessor block at position k that will be removed */
386 for (phi = get_Block_phis(predb); phi != NULL; phi = next_phi) {
388 next_phi = get_Phi_next(phi);
390 if (get_Block_idom(b) != predb) {
391 /* predb is not the dominator. There can't be uses of
392 * pred's Phi nodes, kill them .*/
393 ir_graph *irg = get_irn_irg(b);
394 ir_mode *mode = get_irn_mode(phi);
395 exchange(phi, new_r_Bad(irg, mode));
397 /* predb is the direct dominator of b. There might be uses
398 * of the Phi nodes from predb in further block, so move
399 * this phi from the predecessor into the block b */
400 set_nodes_block(phi, b);
401 set_Phi_next(phi, get_Block_phis(b));
402 set_Block_phis(b, phi);
403 env->phis_moved = true;
405 /* first, copy all 0..k-1 predecessors */
406 for (i = 0; i < k; i++) {
407 ir_node *predx = get_Block_cfgpred(b, i);
411 ir_graph *irg = get_irn_irg(b);
412 ir_mode *mode = get_irn_mode(phi);
413 in[q_preds++] = new_r_Bad(irg, mode);
416 pred_block = get_nodes_block(predx);
417 if (is_Block_removable(pred_block)
418 && !Block_block_visited(pred_block)) {
419 int n_cfgpreds = get_Block_n_cfgpreds(pred_block);
420 /* It's an empty block and not yet visited. */
421 for (j = 0; j < n_cfgpreds; j++) {
422 if (!is_Bad(get_Block_cfgpred(pred_block, j))) {
425 ir_graph *irg = get_irn_irg(b);
426 ir_mode *mode = get_irn_mode(phi);
427 in[q_preds++] = new_r_Bad(irg, mode);
435 /* now we are at k, copy the phi predecessors */
436 pred = get_nodes_block(get_Block_cfgpred(b, k));
437 for (i = 0; i < get_Phi_n_preds(phi); i++) {
438 in[q_preds++] = get_Phi_pred(phi, i);
441 /* and now all the rest */
442 for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
443 pred = get_Block_cfgpred_block(b, i);
446 ir_graph *irg = get_irn_irg(b);
447 ir_mode *mode = get_irn_mode(phi);
448 in[q_preds++] = new_r_Bad(irg, mode);
449 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
450 /* It's an empty block and not yet visited. */
451 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
452 if (! is_Bad(get_Block_cfgpred(pred, j))) {
455 ir_graph *irg = get_irn_irg(b);
456 ir_mode *mode = get_irn_mode(phi);
457 in[q_preds++] = new_r_Bad(irg, mode);
467 exchange(phi, in[0]);
469 set_irn_in(phi, q_preds, in);
472 assert(q_preds <= max_preds);
473 // assert(p_preds == q_preds && "Wrong Phi Fix");
479 /*- Fix the block -*/
481 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
482 ir_node *pred = get_Block_cfgpred(b, i);
483 ir_node *predb = get_nodes_block(pred);
484 ir_graph *irg = get_irn_irg(pred);
486 /* case 1: Bad predecessor */
488 in[n_preds++] = new_r_Bad(irg, mode_X);
491 if (is_Block_removable(predb) && !Block_block_visited(predb)) {
492 /* case 2: It's an empty block and not yet visited. */
493 for (j = 0; j < get_Block_n_cfgpreds(predb); j++) {
494 ir_node *predpred = get_Block_cfgpred(predb, j);
496 if (is_Bad(predpred)) {
497 in[n_preds++] = new_r_Bad(irg, mode_X);
501 in[n_preds++] = predpred;
503 /* Remove block+jump as it might be kept alive. */
504 exchange(pred, new_r_Bad(get_irn_irg(b), mode_X));
505 exchange(predb, new_r_Bad(get_irn_irg(b), mode_BB));
508 in[n_preds++] = pred;
511 assert(n_preds == max_preds);
513 set_irn_in(b, n_preds, in);
516 /* see if phi-fix was correct */
517 assert(get_Block_phis(b) == NULL || p_preds == -1 || (n_preds == p_preds));
522 * Optimize boolean Conds, where true and false jump to the same block into a Jmp
523 * Block must contain no Phi nodes.
527 * projA projB => Jmp Bad
531 static bool optimize_pred_cond(ir_node *block, int i, int j)
533 ir_node *projA, *projB, *cond, *pred_block, *jmp, *bad;
536 projA = get_Block_cfgpred(block, i);
537 if (!is_Proj(projA)) return false;
538 projB = get_Block_cfgpred(block, j);
539 if (!is_Proj(projB)) return false;
540 cond = get_Proj_pred(projA);
541 if (!is_Cond(cond)) return false;
543 if (cond != get_Proj_pred(projB)) return false;
545 /* cond should actually be a Jmp */
546 pred_block = get_nodes_block(cond);
547 jmp = new_r_Jmp(pred_block);
548 bad = new_r_Bad(get_irn_irg(block), mode_X);
550 assert(projA != projB);
551 exchange(projA, jmp);
552 exchange(projB, bad);
556 typedef enum block_flags_t {
557 BF_HAS_OPERATIONS = 1 << 0,
558 BF_HAS_PHIS = 1 << 1,
559 BF_IS_UNKNOWN_JUMP_TARGET = 1 << 2,
562 static bool get_block_flag(const ir_nodehashmap_t *infos, const ir_node *block,
565 return PTR_TO_INT(ir_nodehashmap_get(void, infos, block)) & flag;
568 static void set_block_flag(ir_nodehashmap_t *infos, ir_node *block,
571 int data = PTR_TO_INT(ir_nodehashmap_get(void, infos, block));
573 ir_nodehashmap_insert(infos, block, INT_TO_PTR(data));
576 static void clear_block_flag(ir_nodehashmap_t *infos, const ir_node *block)
578 ir_nodehashmap_remove(infos, block);
581 static bool has_operations(ir_nodehashmap_t *infos, const ir_node *block)
583 return get_block_flag(infos, block, BF_HAS_OPERATIONS);
586 static void set_has_operations(ir_nodehashmap_t *infos, ir_node *block)
588 set_block_flag(infos, block, BF_HAS_OPERATIONS);
591 static bool has_phis(ir_nodehashmap_t *infos, const ir_node *block)
593 return get_block_flag(infos, block, BF_HAS_PHIS);
596 static void set_has_phis(ir_nodehashmap_t *infos, ir_node *block)
598 set_block_flag(infos, block, BF_HAS_PHIS);
601 static bool is_unknown_jump_target(ir_nodehashmap_t *infos, const ir_node *block)
603 return get_block_flag(infos, block, BF_IS_UNKNOWN_JUMP_TARGET);
606 static void set_is_unknown_jump_target(ir_nodehashmap_t *infos, ir_node *block)
608 set_block_flag(infos, block, BF_IS_UNKNOWN_JUMP_TARGET);
612 * Pre-Walker: fill block info information.
614 static void compute_block_info(ir_node *n, void *x)
616 ir_nodehashmap_t *infos = (ir_nodehashmap_t*)x;
619 int i, max = get_Block_n_cfgpreds(n);
620 for (i=0; i<max; i++) {
621 ir_node *pred = get_Block_cfgpred(n,i);
622 if (is_unknown_jump(pred)) {
623 set_is_unknown_jump_target(infos, n);
626 } else if (is_Phi(n)) {
627 ir_node *block = get_nodes_block(n);
628 set_has_phis(infos, block);
629 } else if (is_Jmp(n) || is_Cond(n) || is_Proj(n)) {
632 ir_node *block = get_nodes_block(n);
633 set_has_operations(infos, block);
637 static void clear_block_info(ir_node *block, void *x)
639 ir_nodehashmap_t *infos = (ir_nodehashmap_t*)x;
640 clear_block_flag(infos, block);
643 typedef struct skip_env {
645 ir_nodehashmap_t block_infos;
649 * Post-Block-walker: Optimize useless if's (boolean Cond nodes
650 * with same true/false target) away.
652 static void optimize_ifs(ir_node *block, void *x)
654 skip_env *env = (skip_env*)x;
656 int n_preds = get_Block_n_cfgpreds(block);
658 if (has_phis(&env->block_infos, block))
661 /* optimize Cond predecessors (might produce Bad predecessors) */
662 for (i = 0; i < n_preds; ++i) {
663 for (j = i+1; j < n_preds; ++j) {
664 optimize_pred_cond(block, i, j);
670 * Pre-Block walker: remove empty blocks (only contain a Jmp)
671 * that are control flow predecessors of the current block.
673 static void remove_empty_blocks(ir_node *block, void *x)
675 skip_env *env = (skip_env*)x;
677 int n_preds = get_Block_n_cfgpreds(block);
679 for (i = 0; i < n_preds; ++i) {
680 ir_node *jmp, *jmp_block;
683 jmp = get_Block_cfgpred(block, i);
686 jmp_block = get_nodes_block(jmp);
687 if (jmp_block == block)
688 continue; /* this infinite loop cannot be optimized any further */
689 if (is_unknown_jump_target(&env->block_infos, jmp_block))
690 continue; /* unknown jump target must not be optimized */
691 if (has_phis(&env->block_infos,jmp_block))
692 continue; /* this block contains Phis and is not skipped */
693 if (Block_block_visited(jmp_block)) {
695 /* otherwise we could break the walker,
696 * if block was reached via
697 * KeepAlive edge -> jmp_block -> A ---> block,
698 * because the walker cannot handle Id nodes.
708 /* jmp_block is an empty block and can be optimized! */
710 n_jpreds = get_Block_n_cfgpreds(jmp_block);
712 * If the jmp block has only one predecessor this is straightforward.
713 * However, if there are more predecessors, we only handle this,
714 * if block has no Phis.
717 ir_node *pred = get_Block_cfgpred(jmp_block, 0);
718 ir_node *pred_block = get_nodes_block(pred);
719 if (has_operations(&env->block_infos,jmp_block)) {
721 continue; /* must not create partially dead code, especially when it is mode_M */
724 /* skip jmp block by rerouting its predecessor to block
734 /* cleanup: jmp_block might have a Keep edge! */
735 exchange(jmp_block, pred_block);
737 } else if ( !has_phis(&env->block_infos, block) &&
738 !has_operations(&env->block_infos,jmp_block))
740 /* all predecessors can skip the jmp block, so block gets some new
745 * jmp_block C => Bad C | |
749 ir_node **ins = ALLOCAN(ir_node*, n_preds+n_jpreds);
751 /* first copy the old predecessors, because the outer loop (i)
752 * still walks over them */
753 for (j = 0; j < n_preds; ++j) {
754 ins[j] = get_Block_cfgpred(block, j);
756 /* now append the new predecessors */
757 for (j = 0; j < n_jpreds; ++j) {
758 ir_node *pred = get_Block_cfgpred(jmp_block, j);
759 ins[n_preds+j] = pred;
761 set_irn_in(block, n_preds+n_jpreds, ins);
762 /* convert the jmp_block to Bad */
763 ir_graph *irg = get_irn_irg(block);
764 exchange(jmp_block, new_r_Bad(irg, mode_BB));
765 exchange(jmp, new_r_Bad(irg, mode_X));
766 /* let the outer loop walk over the new predecessors as well */
769 // TODO What if jmp_block had a KeepAlive edge?
771 /* This would involve Phis ... */
777 * All cfg optimizations, which do not touch Phi nodes.
779 * Note that this might create critical edges.
781 static void cfgopt_ignoring_phis(ir_graph *irg)
786 ir_nodehashmap_init(&env.block_infos);
788 while (env.changed) {
789 irg_walk_graph(irg, compute_block_info, NULL, &env.block_infos);
792 /* Remove blocks, which only consist of a Jmp */
793 irg_block_walk_graph(irg, remove_empty_blocks, NULL, &env);
795 /* Optimize Cond->Jmp, where then- and else-block are the same. */
796 irg_block_walk_graph(irg, NULL, optimize_ifs, &env);
799 confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_NONE);
800 /* clear block info, because it must be recomputed */
801 irg_block_walk_graph(irg, clear_block_info, NULL, &env.block_infos);
802 /* Removing blocks and Conds might enable more optimizations */
805 confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
810 ir_nodehashmap_destroy(&env.block_infos);
813 /* Optimizations of the control flow that also require changes of Phi nodes. */
814 void optimize_cf(ir_graph *irg)
818 ir_node *end = get_irg_end(irg);
823 env.phis_moved = false;
825 /* if the graph is not pinned, we cannot determine empty blocks */
826 assert(get_irg_pinned(irg) != op_pin_state_floats &&
827 "Control flow optimization need a pinned graph");
829 assure_irg_properties(irg, IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE);
831 /* First the "simple" optimizations, which do not touch Phis */
832 cfgopt_ignoring_phis(irg);
834 /* we use the mark flag to mark removable blocks */
835 ir_reserve_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_LINK
836 | IR_RESOURCE_PHI_LIST);
839 * This pass collects all Phi nodes in a link list in the block
840 * nodes. Further it performs simple control flow optimizations.
841 * Finally it marks all blocks that do not contain useful
842 * computations, i.e., these blocks might be removed.
844 irg_walk(end, clear_link_and_mark_blocks_removable, collect_nodes, NULL);
846 /* assert due to collect_nodes:
847 * 1. removable blocks are now marked as such
848 * 2. phi lists are up to date
851 /* Optimize the standard code.
852 * It walks only over block nodes and adapts these and the Phi nodes in
853 * these blocks, which it finds in a linked list computed before.
855 assure_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE);
856 irg_block_walk_graph(irg, optimize_blocks, merge_blocks, &env);
858 new_end = optimize_in_place(end);
859 if (new_end != end) {
860 set_irg_end(irg, new_end);
863 remove_End_Bads_and_doublets(end);
865 ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_LINK
866 | IR_RESOURCE_PHI_LIST);
868 if (env.phis_moved) {
869 /* Bad: when we moved Phi's, we might produce dead Phi nodes
871 Some other phases cannot copy with this, so kill them.
873 n = get_End_n_keepalives(end);
875 NEW_ARR_A(ir_node *, in, n);
876 assure_irg_outs(irg);
878 for (i = j = 0; i < n; ++i) {
879 ir_node *ka = get_End_keepalive(end, i);
884 for (k = get_irn_n_outs(ka) - 1; k >= 0; --k) {
885 ir_node *user = get_irn_out(ka, k);
887 if (user != ka && user != end) {
888 /* Is it a real user or just a self loop ? */
898 set_End_keepalives(end, j, in);
904 confirm_irg_properties(irg,
905 env.changed ? IR_GRAPH_PROPERTIES_NONE : IR_GRAPH_PROPERTIES_ALL);
908 /* Creates an ir_graph pass for optimize_cf. */
909 ir_graph_pass_t *optimize_cf_pass(const char *name)
911 return def_graph_pass(name ? name : "optimize_cf", optimize_cf);