2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Control flow optimizations.
23 * @author Goetz Lindenmaier, Michael Beck, Sebastian Hack
25 * Removes Bad control flow predecessors and empty blocks. A block is empty
26 * if it contains only a Jmp node. Blocks can only be removed if they are not
27 * needed for the semantics of Phi nodes. Further, we NEVER remove labeled
28 * blocks (even if we could move the label).
32 #include "iroptimize.h"
39 #include "irgraph_t.h"
53 #include "irbackedge_t.h"
58 #include "irnodehashmap.h"
61 #include "iropt_dbg.h"
63 /** An environment for merge_blocks and collect nodes. */
64 typedef struct merge_env {
65 bool changed; /**< Set if the graph was changed. */
66 bool phis_moved; /**< Set if Phi nodes were moved. */
69 /** set or reset the removable property of a block. */
70 static void set_Block_removable(ir_node *block, bool removable)
72 set_Block_mark(block, removable);
75 /** check if a block has the removable property set. */
76 static bool is_Block_removable(const ir_node *block)
78 return get_Block_mark(block);
81 /** checks if a given Cond node is a switch Cond. */
82 static bool is_switch_Cond(const ir_node *cond)
84 ir_node *sel = get_Cond_selector(cond);
85 return get_irn_mode(sel) != mode_b;
88 /** Walker: clear link fields and mark all blocks as removable. */
89 static void clear_link_and_mark_blocks_removable(ir_node *node, void *ctx)
92 set_irn_link(node, NULL);
94 set_Block_removable(node, true);
95 set_Block_phis(node, NULL);
96 } else if (is_Phi(node)) {
97 set_Phi_next(node, NULL);
102 * Collects all Phi nodes in link list of Block.
103 * Marks all blocks "non_removable" if they contain a node other
104 * than Jmp (and Proj).
105 * Links all Proj nodes to their predecessors.
106 * Collects all switch-Conds in a list.
108 static void collect_nodes(ir_node *n, void *ctx)
112 /* Collect Phi nodes to compact ins along with block's ins. */
113 ir_node *block = get_nodes_block(n);
114 add_Block_phi(block, n);
115 } else if (is_Block(n)) {
116 if (get_Block_entity(n) != NULL) {
117 /* block with a jump label attached cannot be removed. */
118 set_Block_removable(n, false);
120 } else if (is_Bad(n) || is_Jmp(n)) {
124 /* Check for non-empty block. */
125 ir_node *block = get_nodes_block(n);
127 set_Block_removable(block, false);
130 /* link Proj nodes */
131 ir_node *pred = get_Proj_pred(n);
132 set_irn_link(n, get_irn_link(pred));
133 set_irn_link(pred, n);
138 /** Returns true if pred is predecessor of block b. */
139 static bool is_pred_of(const ir_node *pred, const ir_node *b)
143 for (i = get_Block_n_cfgpreds(b) - 1; i >= 0; --i) {
144 ir_node *b_pred = get_Block_cfgpred_block(b, i);
151 /** Test whether we can optimize away pred block pos of b.
153 * @param b A block node.
154 * @param pos The position of the predecessor block to judge about.
156 * @returns The number of predecessors
158 * The test is rather tricky.
160 * The situation is something like the following:
169 * b merges the control flow of an if-then-else. We may not remove
170 * the 'then' _and_ the 'else' block of an 'if' if there is a Phi
171 * node in b, even if both are empty. The destruction of this Phi
172 * requires that a copy is added before the merge. We have to
173 * keep one of the case blocks to place the copies in.
175 * To perform the test for pos, we must regard predecessors before pos
176 * as already removed.
178 static unsigned test_whether_dispensable(const ir_node *b, int pos)
180 ir_node *pred = get_Block_cfgpred(b, pos);
181 ir_node *predb = get_nodes_block(pred);
183 if (is_Bad(pred) || !is_Block_removable(predb))
186 /* can't remove self-loops */
188 goto non_dispensable;
189 if (is_unknown_jump(pred))
190 goto non_dispensable;
192 /* Seems to be empty. At least we detected this in collect_nodes. */
193 if (get_Block_phis(b) != NULL) {
194 int n_cfgpreds = get_Block_n_cfgpreds(b);
196 /* there are Phi nodes */
198 /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
199 * Handle all pred blocks with preds < pos as if they were already
201 for (i = 0; i < pos; i++) {
202 ir_node *other_pred = get_Block_cfgpred(b, i);
203 ir_node *other_predb = get_nodes_block(other_pred);
204 if (is_Bad(other_pred))
206 if (is_Block_removable(other_predb)
207 && !Block_block_visited(other_predb)) {
209 for (j = get_Block_n_cfgpreds(other_predb) - 1; j >= 0; --j) {
210 ir_node *other_predpred
211 = get_Block_cfgpred_block(other_predb, j);
212 if (is_pred_of(other_predpred, predb))
213 goto non_dispensable;
215 } else if (is_pred_of(other_predb, predb)) {
216 goto non_dispensable;
219 for (i = pos+1; i < n_cfgpreds; i++) {
220 ir_node *other_predb = get_Block_cfgpred_block(b, i);
221 if (is_pred_of(other_predb, predb))
222 goto non_dispensable;
225 /* we will not dispense already visited blocks */
226 if (Block_block_visited(predb))
228 /* if we get here, the block is dispensable, count useful preds */
229 return get_irn_arity(predb);
232 set_Block_removable(predb, false);
237 * This method merges blocks. A block is applicable to be merged, if it
238 * has only one predecessor with an unconditional jump to this block;
239 * and if this block does not contain any phis.
241 static void merge_blocks(ir_node *b, void *env)
245 if (get_Block_n_cfgpreds(b) == 1) {
246 ir_node* pred = get_Block_cfgpred(b, 0);
248 ir_node* pred_block = get_nodes_block(pred);
249 if (get_Block_phis(b) == NULL) {
250 exchange(b, pred_block);
257 * This method removes empty blocks. A block is empty if it only contains Phi
260 * We first adapt Phi nodes, then Block nodes, as we need the old ins
261 * of the Block to adapt the Phi nodes. We do this by computing new
262 * in arrays, and then replacing the old ones. So far we compute new in arrays
263 * for all nodes, not regarding whether there is a possibility for optimization.
265 * For each predecessor p of a Block b there are three cases:
266 * - The predecessor p is a Bad node: just skip it. The in array of b shrinks
268 * - The predecessor p is empty. Remove p. All predecessors of p are now
270 * - The predecessor p is a block containing useful code. Just keep p as is.
272 * For Phi nodes f we have to check the conditions at the Block of f.
273 * For cases 1 and 3 we proceed as for Blocks. For case 2 we can have two
275 * -2a: The old predecessor of the Phi f is a Phi pred_f IN THE BLOCK REMOVED.
276 * In this case we proceed as for blocks. We remove pred_f. All
277 * predecessors of pred_f now are predecessors of f.
278 * -2b: The old predecessor of f is NOT in the block removed. It might be a Phi
279 * too. We have to replicate f for each predecessor of the removed block.
280 * Or, with other words, the removed predecessor block has exactly one
283 * Further there is a special case for self referencing blocks:
286 * then_b else_b then_b else_b
292 * | | | === optimized to ===> \ | | |
299 * If there is a Phi in pred_b, but we remove pred_b, we have to generate a
300 * Phi in loop_b, that has the ins of the Phi in pred_b and a self referencing
303 static void optimize_blocks(ir_node *b, void *ctx)
305 int i, j, k, n, max_preds, n_preds, p_preds = -1;
309 merge_env *env = (merge_env*)ctx;
311 if (get_Block_dom_depth(b) < 0) {
312 /* ignore unreachable blocks */
316 /* Count the number of predecessor if this block is merged with pred blocks
319 for (i = 0, k = get_Block_n_cfgpreds(b); i < k; ++i) {
320 max_preds += test_whether_dispensable(b, i);
322 in = XMALLOCN(ir_node*, max_preds);
324 /*- Fix the Phi nodes of the current block -*/
325 for (phi = get_Block_phis(b); phi != NULL; phi = next) {
326 next = get_Phi_next(phi);
328 /* Find the new predecessors for the Phi */
330 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
331 ir_graph *irg = get_irn_irg(b);
332 ir_node *predx = get_Block_cfgpred(b, i);
335 /* case Phi 1: maintain Bads, as somebody else is responsible to
338 in[p_preds++] = new_r_Bad(irg, get_irn_mode(phi));
342 pred = get_nodes_block(predx);
344 /* case Phi 2: It's an empty block and not yet visited. */
345 if (is_Block_removable(pred) && !Block_block_visited(pred)) {
346 ir_node *phi_pred = get_Phi_pred(phi, i);
348 for (j = 0, k = get_Block_n_cfgpreds(pred); j < k; j++) {
349 ir_node *pred_pred = get_Block_cfgpred(pred, j);
351 if (is_Bad(pred_pred)) {
352 in[p_preds++] = new_r_Bad(irg, get_irn_mode(phi));
356 if (get_nodes_block(phi_pred) == pred) {
358 in[p_preds++] = get_Phi_pred(phi_pred, j);
361 in[p_preds++] = phi_pred;
366 in[p_preds++] = get_Phi_pred(phi, i);
369 assert(p_preds == max_preds);
373 exchange(phi, in[0]);
375 set_irn_in(phi, p_preds, in);
380 /*- This happens only if merge between loop backedge and single loop entry.
381 Moreover, it is only needed if predb is the direct dominator of b,
382 else there can be no uses of the Phi's in predb ... -*/
383 for (k = 0, n = get_Block_n_cfgpreds(b); k < n; ++k) {
384 ir_node *pred = get_Block_cfgpred(b, k);
385 ir_node *predb = get_nodes_block(pred);
389 if (is_Block_removable(predb) && !Block_block_visited(predb)) {
392 /* we found a predecessor block at position k that will be removed */
393 for (phi = get_Block_phis(predb); phi != NULL; phi = next_phi) {
395 next_phi = get_Phi_next(phi);
397 if (get_Block_idom(b) != predb) {
398 /* predb is not the dominator. There can't be uses of
399 * pred's Phi nodes, kill them .*/
400 ir_graph *irg = get_irn_irg(b);
401 ir_mode *mode = get_irn_mode(phi);
402 exchange(phi, new_r_Bad(irg, mode));
404 /* predb is the direct dominator of b. There might be uses
405 * of the Phi nodes from predb in further block, so move
406 * this phi from the predecessor into the block b */
407 set_nodes_block(phi, b);
408 set_Phi_next(phi, get_Block_phis(b));
409 set_Block_phis(b, phi);
410 env->phis_moved = true;
412 /* first, copy all 0..k-1 predecessors */
413 for (i = 0; i < k; i++) {
414 ir_node *predx = get_Block_cfgpred(b, i);
418 ir_graph *irg = get_irn_irg(b);
419 ir_mode *mode = get_irn_mode(phi);
420 in[q_preds++] = new_r_Bad(irg, mode);
423 pred_block = get_nodes_block(predx);
424 if (is_Block_removable(pred_block)
425 && !Block_block_visited(pred_block)) {
426 int n_cfgpreds = get_Block_n_cfgpreds(pred_block);
427 /* It's an empty block and not yet visited. */
428 for (j = 0; j < n_cfgpreds; j++) {
429 if (!is_Bad(get_Block_cfgpred(pred_block, j))) {
432 ir_graph *irg = get_irn_irg(b);
433 ir_mode *mode = get_irn_mode(phi);
434 in[q_preds++] = new_r_Bad(irg, mode);
442 /* now we are at k, copy the phi predecessors */
443 pred = get_nodes_block(get_Block_cfgpred(b, k));
444 for (i = 0; i < get_Phi_n_preds(phi); i++) {
445 in[q_preds++] = get_Phi_pred(phi, i);
448 /* and now all the rest */
449 for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
450 pred = get_Block_cfgpred_block(b, i);
453 ir_graph *irg = get_irn_irg(b);
454 ir_mode *mode = get_irn_mode(phi);
455 in[q_preds++] = new_r_Bad(irg, mode);
456 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
457 /* It's an empty block and not yet visited. */
458 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
459 if (! is_Bad(get_Block_cfgpred(pred, j))) {
462 ir_graph *irg = get_irn_irg(b);
463 ir_mode *mode = get_irn_mode(phi);
464 in[q_preds++] = new_r_Bad(irg, mode);
474 exchange(phi, in[0]);
476 set_irn_in(phi, q_preds, in);
479 assert(q_preds <= max_preds);
480 // assert(p_preds == q_preds && "Wrong Phi Fix");
486 /*- Fix the block -*/
488 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
489 ir_node *pred = get_Block_cfgpred(b, i);
490 ir_node *predb = get_nodes_block(pred);
491 ir_graph *irg = get_irn_irg(pred);
493 /* case 1: Bad predecessor */
495 in[n_preds++] = new_r_Bad(irg, mode_X);
498 if (is_Block_removable(predb) && !Block_block_visited(predb)) {
499 /* case 2: It's an empty block and not yet visited. */
500 for (j = 0; j < get_Block_n_cfgpreds(predb); j++) {
501 ir_node *predpred = get_Block_cfgpred(predb, j);
503 if (is_Bad(predpred)) {
504 in[n_preds++] = new_r_Bad(irg, mode_X);
508 in[n_preds++] = predpred;
510 /* Remove block+jump as it might be kept alive. */
511 exchange(pred, new_r_Bad(get_irn_irg(b), mode_X));
512 exchange(predb, new_r_Bad(get_irn_irg(b), mode_BB));
515 in[n_preds++] = pred;
518 assert(n_preds == max_preds);
520 set_irn_in(b, n_preds, in);
523 /* see if phi-fix was correct */
524 assert(get_Block_phis(b) == NULL || p_preds == -1 || (n_preds == p_preds));
529 * Optimize boolean Conds, where true and false jump to the same block into a Jmp
530 * Block must contain no Phi nodes.
534 * projA projB => Jmp Bad
538 static bool optimize_pred_cond(ir_node *block, int i, int j)
540 ir_node *projA, *projB, *cond, *pred_block, *jmp, *bad;
543 projA = get_Block_cfgpred(block, i);
544 if (!is_Proj(projA)) return false;
545 projB = get_Block_cfgpred(block, j);
546 if (!is_Proj(projB)) return false;
547 cond = get_Proj_pred(projA);
548 if (!is_Cond(cond)) return false;
550 if (cond != get_Proj_pred(projB)) return false;
551 if (is_switch_Cond(cond)) return false;
553 /* cond should actually be a Jmp */
554 pred_block = get_nodes_block(cond);
555 jmp = new_r_Jmp(pred_block);
556 bad = new_r_Bad(get_irn_irg(block), mode_X);
558 assert(projA != projB);
559 exchange(projA, jmp);
560 exchange(projB, bad);
564 typedef enum block_flags_t {
565 BF_HAS_OPERATIONS = 1 << 0,
566 BF_HAS_PHIS = 1 << 1,
567 BF_IS_UNKNOWN_JUMP_TARGET = 1 << 2,
570 static bool get_block_flag(const ir_nodehashmap_t *infos, const ir_node *block,
573 return PTR_TO_INT(ir_nodehashmap_get(void, infos, block)) & flag;
576 static void set_block_flag(ir_nodehashmap_t *infos, ir_node *block,
579 int data = PTR_TO_INT(ir_nodehashmap_get(void, infos, block));
581 ir_nodehashmap_insert(infos, block, INT_TO_PTR(data));
584 static void clear_block_flag(ir_nodehashmap_t *infos, const ir_node *block)
586 ir_nodehashmap_remove(infos, block);
589 static bool has_operations(ir_nodehashmap_t *infos, const ir_node *block)
591 return get_block_flag(infos, block, BF_HAS_OPERATIONS);
594 static void set_has_operations(ir_nodehashmap_t *infos, ir_node *block)
596 set_block_flag(infos, block, BF_HAS_OPERATIONS);
599 static bool has_phis(ir_nodehashmap_t *infos, const ir_node *block)
601 return get_block_flag(infos, block, BF_HAS_PHIS);
604 static void set_has_phis(ir_nodehashmap_t *infos, ir_node *block)
606 set_block_flag(infos, block, BF_HAS_PHIS);
609 static bool is_unknown_jump_target(ir_nodehashmap_t *infos, const ir_node *block)
611 return get_block_flag(infos, block, BF_IS_UNKNOWN_JUMP_TARGET);
614 static void set_is_unknown_jump_target(ir_nodehashmap_t *infos, ir_node *block)
616 set_block_flag(infos, block, BF_IS_UNKNOWN_JUMP_TARGET);
620 * Pre-Walker: fill block info information.
622 static void compute_block_info(ir_node *n, void *x)
624 ir_nodehashmap_t *infos = (ir_nodehashmap_t*)x;
627 int i, max = get_Block_n_cfgpreds(n);
628 for (i=0; i<max; i++) {
629 ir_node *pred = get_Block_cfgpred(n,i);
630 if (is_unknown_jump(pred)) {
631 set_is_unknown_jump_target(infos, n);
634 } else if (is_Phi(n)) {
635 ir_node *block = get_nodes_block(n);
636 set_has_phis(infos, block);
637 } else if (is_Jmp(n) || is_Cond(n) || is_Proj(n)) {
640 ir_node *block = get_nodes_block(n);
641 set_has_operations(infos, block);
645 static void clear_block_info(ir_node *block, void *x)
647 ir_nodehashmap_t *infos = (ir_nodehashmap_t*)x;
648 clear_block_flag(infos, block);
651 typedef struct skip_env {
653 ir_nodehashmap_t block_infos;
657 * Post-Block-walker: Optimize useless if's (boolean Cond nodes
658 * with same true/false target) away.
660 static void optimize_ifs(ir_node *block, void *x)
662 skip_env *env = (skip_env*)x;
664 int n_preds = get_Block_n_cfgpreds(block);
666 if (has_phis(&env->block_infos, block))
669 /* optimize Cond predecessors (might produce Bad predecessors) */
670 for (i = 0; i < n_preds; ++i) {
671 for (j = i+1; j < n_preds; ++j) {
672 optimize_pred_cond(block, i, j);
678 * Pre-Block walker: remove empty blocks (only contain a Jmp)
679 * that are control flow predecessors of the current block.
681 static void remove_empty_blocks(ir_node *block, void *x)
683 skip_env *env = (skip_env*)x;
685 int n_preds = get_Block_n_cfgpreds(block);
687 for (i = 0; i < n_preds; ++i) {
688 ir_node *jmp, *jmp_block;
691 jmp = get_Block_cfgpred(block, i);
694 jmp_block = get_nodes_block(jmp);
695 if (jmp_block == block)
696 continue; /* this infinite loop cannot be optimized any further */
697 if (is_unknown_jump_target(&env->block_infos, jmp_block))
698 continue; /* unknown jump target must not be optimized */
699 if (has_phis(&env->block_infos,jmp_block))
700 continue; /* this block contains Phis and is not skipped */
701 if (Block_block_visited(jmp_block)) {
703 /* otherwise we could break the walker,
704 * if block was reached via
705 * KeepAlive edge -> jmp_block -> A ---> block,
706 * because the walker cannot handle Id nodes.
716 /* jmp_block is an empty block and can be optimized! */
718 n_jpreds = get_Block_n_cfgpreds(jmp_block);
720 * If the jmp block has only one predecessor this is straightforward.
721 * However, if there are more predecessors, we only handle this,
722 * if block has no Phis.
725 ir_node *pred = get_Block_cfgpred(jmp_block, 0);
726 ir_node *pred_block = get_nodes_block(pred);
727 if (has_operations(&env->block_infos,jmp_block)) {
728 if (get_irg_start_block(get_irn_irg(pred_block)) == pred_block)
729 continue; /* must not merge operations into start block */
731 continue; /* must not create partially dead code, especially when it is mode_M */
734 /* skip jmp block by rerouting its predecessor to block
744 /* cleanup: jmp_block might have a Keep edge! */
745 exchange(jmp_block, pred_block);
747 } else if ( !has_phis(&env->block_infos, block) &&
748 !has_operations(&env->block_infos,jmp_block))
750 /* all predecessors can skip the jmp block, so block gets some new
755 * jmp_block C => Bad C | |
759 ir_node **ins = ALLOCAN(ir_node*, n_preds+n_jpreds);
761 /* first copy the old predecessors, because the outer loop (i)
762 * still walks over them */
763 for (j = 0; j < n_preds; ++j) {
764 ins[j] = get_Block_cfgpred(block, j);
766 /* now append the new predecessors */
767 for (j = 0; j < n_jpreds; ++j) {
768 ir_node *pred = get_Block_cfgpred(jmp_block, j);
769 ins[n_preds+j] = pred;
771 set_irn_in(block, n_preds+n_jpreds, ins);
772 /* convert the jmp_block to Bad */
773 ir_graph *irg = get_irn_irg(block);
774 exchange(jmp_block, new_r_Bad(irg, mode_BB));
775 exchange(jmp, new_r_Bad(irg, mode_X));
776 /* let the outer loop walk over the new predecessors as well */
779 // TODO What if jmp_block had a KeepAlive edge?
781 /* This would involve Phis ... */
787 * All cfg optimizations, which do not touch Phi nodes.
789 * Note that this might create critical edges.
791 static void cfgopt_ignoring_phis(ir_graph *irg)
796 ir_nodehashmap_init(&env.block_infos);
798 while (env.changed) {
799 irg_walk_graph(irg, compute_block_info, NULL, &env.block_infos);
802 /* Remove blocks, which only consist of a Jmp */
803 irg_block_walk_graph(irg, remove_empty_blocks, NULL, &env);
805 /* Optimize Cond->Jmp, where then- and else-block are the same. */
806 irg_block_walk_graph(irg, NULL, optimize_ifs, &env);
809 confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_NONE);
810 /* clear block info, because it must be recomputed */
811 irg_block_walk_graph(irg, clear_block_info, NULL, &env.block_infos);
812 /* Removing blocks and Conds might enable more optimizations */
815 confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
820 ir_nodehashmap_destroy(&env.block_infos);
823 /* Optimizations of the control flow that also require changes of Phi nodes. */
824 void optimize_cf(ir_graph *irg)
828 ir_node *end = get_irg_end(irg);
833 env.phis_moved = false;
835 /* if the graph is not pinned, we cannot determine empty blocks */
836 assert(get_irg_pinned(irg) != op_pin_state_floats &&
837 "Control flow optimization need a pinned graph");
839 assure_irg_properties(irg, IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE);
841 /* First the "simple" optimizations, which do not touch Phis */
842 cfgopt_ignoring_phis(irg);
844 /* we use the mark flag to mark removable blocks */
845 ir_reserve_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_LINK
846 | IR_RESOURCE_PHI_LIST);
849 * This pass collects all Phi nodes in a link list in the block
850 * nodes. Further it performs simple control flow optimizations.
851 * Finally it marks all blocks that do not contain useful
852 * computations, i.e., these blocks might be removed.
854 irg_walk(end, clear_link_and_mark_blocks_removable, collect_nodes, NULL);
856 /* assert due to collect_nodes:
857 * 1. removable blocks are now marked as such
858 * 2. phi lists are up to date
861 /* Optimize the standard code.
862 * It walks only over block nodes and adapts these and the Phi nodes in
863 * these blocks, which it finds in a linked list computed before.
865 assure_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE);
866 irg_block_walk_graph(irg, optimize_blocks, merge_blocks, &env);
868 new_end = optimize_in_place(end);
869 if (new_end != end) {
870 set_irg_end(irg, new_end);
873 remove_End_Bads_and_doublets(end);
875 ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_LINK
876 | IR_RESOURCE_PHI_LIST);
878 if (env.phis_moved) {
879 /* Bad: when we moved Phi's, we might produce dead Phi nodes
881 Some other phases cannot copy with this, so kill them.
883 n = get_End_n_keepalives(end);
885 NEW_ARR_A(ir_node *, in, n);
886 assure_irg_outs(irg);
888 for (i = j = 0; i < n; ++i) {
889 ir_node *ka = get_End_keepalive(end, i);
894 for (k = get_irn_n_outs(ka) - 1; k >= 0; --k) {
895 ir_node *user = get_irn_out(ka, k);
897 if (user != ka && user != end) {
898 /* Is it a real user or just a self loop ? */
908 set_End_keepalives(end, j, in);
914 confirm_irg_properties(irg,
915 env.changed ? IR_GRAPH_PROPERTIES_NONE : IR_GRAPH_PROPERTIES_ALL);
918 /* Creates an ir_graph pass for optimize_cf. */
919 ir_graph_pass_t *optimize_cf_pass(const char *name)
921 return def_graph_pass(name ? name : "optimize_cf", optimize_cf);