2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Control flow optimizations.
23 * @author Goetz Lindenmaier, Michael Beck, Sebastian Hack
25 * Removes Bad control flow predecessors and empty blocks. A block is empty
26 * if it contains only a Jmp node. Blocks can only be removed if they are not
27 * needed for the semantics of Phi nodes. Further, we NEVER remove labeled
28 * blocks (even if we could move the label).
32 #include "iroptimize.h"
39 #include "irgraph_t.h"
49 #include "opt_manage.h"
54 #include "irbackedge_t.h"
59 #include "irnodehashmap.h"
62 #include "iropt_dbg.h"
64 /** An environment for merge_blocks and collect nodes. */
65 typedef struct merge_env {
66 bool changed; /**< Set if the graph was changed. */
67 bool phis_moved; /**< Set if Phi nodes were moved. */
70 /** set or reset the removable property of a block. */
71 static void set_Block_removable(ir_node *block, bool removable)
73 set_Block_mark(block, removable);
76 /** check if a block has the removable property set. */
77 static bool is_Block_removable(const ir_node *block)
79 return get_Block_mark(block);
82 /** checks if a given Cond node is a switch Cond. */
83 static bool is_switch_Cond(const ir_node *cond)
85 ir_node *sel = get_Cond_selector(cond);
86 return get_irn_mode(sel) != mode_b;
89 /** Walker: clear link fields and mark all blocks as removable. */
90 static void clear_link_and_mark_blocks_removable(ir_node *node, void *ctx)
93 set_irn_link(node, NULL);
95 set_Block_removable(node, true);
96 set_Block_phis(node, NULL);
97 } else if (is_Phi(node)) {
98 set_Phi_next(node, NULL);
103 * Collects all Phi nodes in link list of Block.
104 * Marks all blocks "non_removable" if they contain a node other
105 * than Jmp (and Proj).
106 * Links all Proj nodes to their predecessors.
107 * Collects all switch-Conds in a list.
109 static void collect_nodes(ir_node *n, void *ctx)
113 /* Collect Phi nodes to compact ins along with block's ins. */
114 ir_node *block = get_nodes_block(n);
115 add_Block_phi(block, n);
116 } else if (is_Block(n)) {
117 if (get_Block_entity(n) != NULL) {
118 /* block with a jump label attached cannot be removed. */
119 set_Block_removable(n, false);
121 } else if (is_Bad(n) || is_Jmp(n)) {
125 /* Check for non-empty block. */
126 ir_node *block = get_nodes_block(n);
128 set_Block_removable(block, false);
131 /* link Proj nodes */
132 ir_node *pred = get_Proj_pred(n);
133 set_irn_link(n, get_irn_link(pred));
134 set_irn_link(pred, n);
139 /** Returns true if pred is predecessor of block b. */
140 static bool is_pred_of(const ir_node *pred, const ir_node *b)
144 for (i = get_Block_n_cfgpreds(b) - 1; i >= 0; --i) {
145 ir_node *b_pred = get_Block_cfgpred_block(b, i);
152 /** Test whether we can optimize away pred block pos of b.
154 * @param b A block node.
155 * @param pos The position of the predecessor block to judge about.
157 * @returns The number of predecessors
159 * The test is rather tricky.
161 * The situation is something like the following:
170 * b merges the control flow of an if-then-else. We may not remove
171 * the 'then' _and_ the 'else' block of an 'if' if there is a Phi
172 * node in b, even if both are empty. The destruction of this Phi
173 * requires that a copy is added before the merge. We have to
174 * keep one of the case blocks to place the copies in.
176 * To perform the test for pos, we must regard predecessors before pos
177 * as already removed.
179 static unsigned test_whether_dispensable(const ir_node *b, int pos)
181 ir_node *pred = get_Block_cfgpred(b, pos);
182 ir_node *predb = get_nodes_block(pred);
184 if (is_Bad(pred) || !is_Block_removable(predb))
187 /* can't remove self-loops */
189 goto non_dispensable;
190 if (is_unknown_jump(pred))
191 goto non_dispensable;
193 /* Seems to be empty. At least we detected this in collect_nodes. */
194 if (get_Block_phis(b) != NULL) {
195 int n_cfgpreds = get_Block_n_cfgpreds(b);
197 /* there are Phi nodes */
199 /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
200 * Handle all pred blocks with preds < pos as if they were already
202 for (i = 0; i < pos; i++) {
203 ir_node *other_pred = get_Block_cfgpred(b, i);
204 ir_node *other_predb = get_nodes_block(other_pred);
205 if (is_Bad(other_pred))
207 if (is_Block_removable(other_predb)
208 && !Block_block_visited(other_predb)) {
210 for (j = get_Block_n_cfgpreds(other_predb) - 1; j >= 0; --j) {
211 ir_node *other_predpred
212 = get_Block_cfgpred_block(other_predb, j);
213 if (is_pred_of(other_predpred, predb))
214 goto non_dispensable;
216 } else if (is_pred_of(other_predb, predb)) {
217 goto non_dispensable;
220 for (i = pos+1; i < n_cfgpreds; i++) {
221 ir_node *other_predb = get_Block_cfgpred_block(b, i);
222 if (is_pred_of(other_predb, predb))
223 goto non_dispensable;
226 /* we will not dispense already visited blocks */
227 if (Block_block_visited(predb))
229 /* if we get here, the block is dispensable, count useful preds */
230 return get_irn_arity(predb);
233 set_Block_removable(predb, false);
238 * This method merges blocks. A block is applicable to be merged, if it
239 * has only one predecessor with an unconditional jump to this block;
240 * and if this block does not contain any phis.
242 static void merge_blocks(ir_node *b, void *env)
246 if (get_Block_n_cfgpreds(b) == 1) {
247 ir_node* pred = get_Block_cfgpred(b, 0);
249 ir_node* pred_block = get_nodes_block(pred);
250 if (get_Block_phis(b) == NULL) {
251 exchange(b, pred_block);
258 * This method removes empty blocks. A block is empty if it only contains Phi
261 * We first adapt Phi nodes, then Block nodes, as we need the old ins
262 * of the Block to adapt the Phi nodes. We do this by computing new
263 * in arrays, and then replacing the old ones. So far we compute new in arrays
264 * for all nodes, not regarding whether there is a possibility for optimization.
266 * For each predecessor p of a Block b there are three cases:
267 * - The predecessor p is a Bad node: just skip it. The in array of b shrinks
269 * - The predecessor p is empty. Remove p. All predecessors of p are now
271 * - The predecessor p is a block containing useful code. Just keep p as is.
273 * For Phi nodes f we have to check the conditions at the Block of f.
274 * For cases 1 and 3 we proceed as for Blocks. For case 2 we can have two
276 * -2a: The old predecessor of the Phi f is a Phi pred_f IN THE BLOCK REMOVED.
277 * In this case we proceed as for blocks. We remove pred_f. All
278 * predecessors of pred_f now are predecessors of f.
279 * -2b: The old predecessor of f is NOT in the block removed. It might be a Phi
280 * too. We have to replicate f for each predecessor of the removed block.
281 * Or, with other words, the removed predecessor block has exactly one
284 * Further there is a special case for self referencing blocks:
287 * then_b else_b then_b else_b
293 * | | | === optimized to ===> \ | | |
300 * If there is a Phi in pred_b, but we remove pred_b, we have to generate a
301 * Phi in loop_b, that has the ins of the Phi in pred_b and a self referencing
304 static void optimize_blocks(ir_node *b, void *ctx)
306 int i, j, k, n, max_preds, n_preds, p_preds = -1;
310 merge_env *env = (merge_env*)ctx;
312 if (get_Block_dom_depth(b) < 0) {
313 /* ignore unreachable blocks */
317 /* Count the number of predecessor if this block is merged with pred blocks
320 for (i = 0, k = get_Block_n_cfgpreds(b); i < k; ++i) {
321 max_preds += test_whether_dispensable(b, i);
323 in = XMALLOCN(ir_node*, max_preds);
325 /*- Fix the Phi nodes of the current block -*/
326 for (phi = get_Block_phis(b); phi != NULL; phi = next) {
327 next = get_Phi_next(phi);
329 /* Find the new predecessors for the Phi */
331 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
332 ir_graph *irg = get_irn_irg(b);
333 ir_node *predx = get_Block_cfgpred(b, i);
336 /* case Phi 1: maintain Bads, as somebody else is responsible to
339 in[p_preds++] = new_r_Bad(irg, get_irn_mode(phi));
343 pred = get_nodes_block(predx);
345 /* case Phi 2: It's an empty block and not yet visited. */
346 if (is_Block_removable(pred) && !Block_block_visited(pred)) {
347 ir_node *phi_pred = get_Phi_pred(phi, i);
349 for (j = 0, k = get_Block_n_cfgpreds(pred); j < k; j++) {
350 ir_node *pred_pred = get_Block_cfgpred(pred, j);
352 if (is_Bad(pred_pred)) {
353 in[p_preds++] = new_r_Bad(irg, get_irn_mode(phi));
357 if (get_nodes_block(phi_pred) == pred) {
359 assert(is_Phi(phi_pred)); /* Block is empty!! */
361 in[p_preds++] = get_Phi_pred(phi_pred, j);
364 in[p_preds++] = phi_pred;
369 in[p_preds++] = get_Phi_pred(phi, i);
372 assert(p_preds == max_preds);
376 exchange(phi, in[0]);
378 set_irn_in(phi, p_preds, in);
383 /*- This happens only if merge between loop backedge and single loop entry.
384 Moreover, it is only needed if predb is the direct dominator of b,
385 else there can be no uses of the Phi's in predb ... -*/
386 for (k = 0, n = get_Block_n_cfgpreds(b); k < n; ++k) {
387 ir_node *pred = get_Block_cfgpred(b, k);
388 ir_node *predb = get_nodes_block(pred);
392 if (is_Block_removable(predb) && !Block_block_visited(predb)) {
395 /* we found a predecessor block at position k that will be removed */
396 for (phi = get_Block_phis(predb); phi != NULL; phi = next_phi) {
398 next_phi = get_Phi_next(phi);
400 if (get_Block_idom(b) != predb) {
401 /* predb is not the dominator. There can't be uses of
402 * pred's Phi nodes, kill them .*/
403 ir_graph *irg = get_irn_irg(b);
404 ir_mode *mode = get_irn_mode(phi);
405 exchange(phi, new_r_Bad(irg, mode));
407 /* predb is the direct dominator of b. There might be uses
408 * of the Phi nodes from predb in further block, so move
409 * this phi from the predecessor into the block b */
410 set_nodes_block(phi, b);
411 set_Phi_next(phi, get_Block_phis(b));
412 set_Block_phis(b, phi);
413 env->phis_moved = true;
415 /* first, copy all 0..k-1 predecessors */
416 for (i = 0; i < k; i++) {
417 ir_node *predx = get_Block_cfgpred(b, i);
421 ir_graph *irg = get_irn_irg(b);
422 ir_mode *mode = get_irn_mode(phi);
423 in[q_preds++] = new_r_Bad(irg, mode);
426 pred_block = get_nodes_block(predx);
427 if (is_Block_removable(pred_block)
428 && !Block_block_visited(pred_block)) {
429 int n_cfgpreds = get_Block_n_cfgpreds(pred_block);
430 /* It's an empty block and not yet visited. */
431 for (j = 0; j < n_cfgpreds; j++) {
432 if (!is_Bad(get_Block_cfgpred(pred_block, j))) {
435 ir_graph *irg = get_irn_irg(b);
436 ir_mode *mode = get_irn_mode(phi);
437 in[q_preds++] = new_r_Bad(irg, mode);
445 /* now we are at k, copy the phi predecessors */
446 pred = get_nodes_block(get_Block_cfgpred(b, k));
447 for (i = 0; i < get_Phi_n_preds(phi); i++) {
448 in[q_preds++] = get_Phi_pred(phi, i);
451 /* and now all the rest */
452 for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
453 pred = get_Block_cfgpred_block(b, i);
456 ir_graph *irg = get_irn_irg(b);
457 ir_mode *mode = get_irn_mode(phi);
458 in[q_preds++] = new_r_Bad(irg, mode);
459 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
460 /* It's an empty block and not yet visited. */
461 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
462 if (! is_Bad(get_Block_cfgpred(pred, j))) {
465 ir_graph *irg = get_irn_irg(b);
466 ir_mode *mode = get_irn_mode(phi);
467 in[q_preds++] = new_r_Bad(irg, mode);
477 exchange(phi, in[0]);
479 set_irn_in(phi, q_preds, in);
482 assert(q_preds <= max_preds);
483 // assert(p_preds == q_preds && "Wrong Phi Fix");
489 /*- Fix the block -*/
491 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
492 ir_node *pred = get_Block_cfgpred(b, i);
493 ir_node *predb = get_nodes_block(pred);
494 ir_graph *irg = get_irn_irg(pred);
496 /* case 1: Bad predecessor */
498 in[n_preds++] = new_r_Bad(irg, mode_X);
501 if (is_Block_removable(predb) && !Block_block_visited(predb)) {
502 /* case 2: It's an empty block and not yet visited. */
503 for (j = 0; j < get_Block_n_cfgpreds(predb); j++) {
504 ir_node *predpred = get_Block_cfgpred(predb, j);
506 if (is_Bad(predpred)) {
507 in[n_preds++] = new_r_Bad(irg, mode_X);
511 in[n_preds++] = predpred;
513 /* Remove block+jump as it might be kept alive. */
514 exchange(pred, new_r_Bad(get_irn_irg(b), mode_X));
515 exchange(predb, new_r_Bad(get_irn_irg(b), mode_BB));
518 in[n_preds++] = pred;
521 assert(n_preds == max_preds);
523 set_irn_in(b, n_preds, in);
526 /* see if phi-fix was correct */
527 assert(get_Block_phis(b) == NULL || p_preds == -1 || (n_preds == p_preds));
532 * Optimize boolean Conds, where true and false jump to the same block into a Jmp
533 * Block must contain no Phi nodes.
537 * projA projB => Jmp Bad
541 static bool optimize_pred_cond(ir_node *block, int i, int j)
543 ir_node *projA, *projB, *cond, *pred_block, *jmp, *bad;
546 projA = get_Block_cfgpred(block, i);
547 if (!is_Proj(projA)) return false;
548 projB = get_Block_cfgpred(block, j);
549 if (!is_Proj(projB)) return false;
550 cond = get_Proj_pred(projA);
551 if (!is_Cond(cond)) return false;
553 if (cond != get_Proj_pred(projB)) return false;
554 if (is_switch_Cond(cond)) return false;
556 /* cond should actually be a Jmp */
557 pred_block = get_nodes_block(cond);
558 jmp = new_r_Jmp(pred_block);
559 bad = new_r_Bad(get_irn_irg(block), mode_X);
561 assert(projA != projB);
562 exchange(projA, jmp);
563 exchange(projB, bad);
567 typedef enum block_flags_t {
568 BF_HAS_OPERATIONS = 1 << 0,
569 BF_HAS_PHIS = 1 << 1,
570 BF_IS_UNKNOWN_JUMP_TARGET = 1 << 2,
573 static bool get_block_flag(const ir_nodehashmap_t *infos, const ir_node *block,
576 return PTR_TO_INT(ir_nodehashmap_get(infos, block)) & flag;
579 static void set_block_flag(ir_nodehashmap_t *infos, ir_node *block,
582 int data = PTR_TO_INT(ir_nodehashmap_get(infos, block));
584 ir_nodehashmap_insert(infos, block, INT_TO_PTR(data));
587 static void clear_block_flag(ir_nodehashmap_t *infos, const ir_node *block)
589 ir_nodehashmap_remove(infos, block);
592 static bool has_operations(ir_nodehashmap_t *infos, const ir_node *block)
594 return get_block_flag(infos, block, BF_HAS_OPERATIONS);
597 static void set_has_operations(ir_nodehashmap_t *infos, ir_node *block)
599 set_block_flag(infos, block, BF_HAS_OPERATIONS);
602 static bool has_phis(ir_nodehashmap_t *infos, const ir_node *block)
604 return get_block_flag(infos, block, BF_HAS_PHIS);
607 static void set_has_phis(ir_nodehashmap_t *infos, ir_node *block)
609 set_block_flag(infos, block, BF_HAS_PHIS);
612 static bool is_unknown_jump_target(ir_nodehashmap_t *infos, const ir_node *block)
614 return get_block_flag(infos, block, BF_IS_UNKNOWN_JUMP_TARGET);
617 static void set_is_unknown_jump_target(ir_nodehashmap_t *infos, ir_node *block)
619 set_block_flag(infos, block, BF_IS_UNKNOWN_JUMP_TARGET);
623 * Pre-Walker: fill block info information.
625 static void compute_block_info(ir_node *n, void *x)
627 ir_nodehashmap_t *infos = (ir_nodehashmap_t*)x;
630 int i, max = get_Block_n_cfgpreds(n);
631 for (i=0; i<max; i++) {
632 ir_node *pred = get_Block_cfgpred(n,i);
633 if (is_unknown_jump(pred)) {
634 set_is_unknown_jump_target(infos, n);
637 } else if (is_Phi(n)) {
638 ir_node *block = get_nodes_block(n);
639 set_has_phis(infos, block);
640 } else if (is_Jmp(n) || is_Cond(n) || is_Proj(n)) {
643 ir_node *block = get_nodes_block(n);
644 set_has_operations(infos, block);
648 static void clear_block_info(ir_node *block, void *x)
650 ir_nodehashmap_t *infos = (ir_nodehashmap_t*)x;
651 clear_block_flag(infos, block);
654 typedef struct skip_env {
656 ir_nodehashmap_t block_infos;
660 * Post-Block-walker: Optimize useless if's (boolean Cond nodes
661 * with same true/false target) away.
663 static void optimize_ifs(ir_node *block, void *x)
665 skip_env *env = (skip_env*)x;
667 int n_preds = get_Block_n_cfgpreds(block);
669 if (has_phis(&env->block_infos, block))
672 /* optimize Cond predecessors (might produce Bad predecessors) */
673 for (i = 0; i < n_preds; ++i) {
674 for (j = i+1; j < n_preds; ++j) {
675 optimize_pred_cond(block, i, j);
681 * Pre-Block walker: remove empty blocks (only contain a Jmp)
682 * that are control flow predecessors of the current block.
684 static void remove_empty_blocks(ir_node *block, void *x)
686 skip_env *env = (skip_env*)x;
688 int n_preds = get_Block_n_cfgpreds(block);
690 for (i = 0; i < n_preds; ++i) {
691 ir_node *jmp, *jmp_block;
694 jmp = get_Block_cfgpred(block, i);
697 jmp_block = get_nodes_block(jmp);
698 if (jmp_block == block)
699 continue; /* this infinite loop cannot be optimized any further */
700 if (is_unknown_jump_target(&env->block_infos, jmp_block))
701 continue; /* unknown jump target must not be optimized */
702 if (has_phis(&env->block_infos,jmp_block))
703 continue; /* this block contains Phis and is not skipped */
704 if (Block_block_visited(jmp_block)) {
706 /* otherwise we could break the walker,
707 * if block was reached via
708 * KeepAlive edge -> jmp_block -> A ---> block,
709 * because the walker cannot handle Id nodes.
719 /* jmp_block is an empty block and can be optimized! */
721 n_jpreds = get_Block_n_cfgpreds(jmp_block);
723 * If the jmp block has only one predecessor this is straightforward.
724 * However, if there are more predecessors, we only handle this,
725 * if block has no Phis.
728 ir_node *pred = get_Block_cfgpred(jmp_block, 0);
729 ir_node *pred_block = get_nodes_block(pred);
730 if (has_operations(&env->block_infos,jmp_block)) {
731 if (get_irg_start_block(get_irn_irg(pred_block)) == pred_block)
732 continue; /* must not merge operations into start block */
734 continue; /* must not create partially dead code, especially when it is mode_M */
737 /* skip jmp block by rerouting its predecessor to block
747 /* cleanup: jmp_block might have a Keep edge! */
748 exchange(jmp_block, pred_block);
750 } else if ( !has_phis(&env->block_infos, block) &&
751 !has_operations(&env->block_infos,jmp_block))
753 /* all predecessors can skip the jmp block, so block gets some new
758 * jmp_block C => Bad C | |
762 ir_node **ins = ALLOCAN(ir_node*, n_preds+n_jpreds);
764 /* first copy the old predecessors, because the outer loop (i)
765 * still walks over them */
766 for (j = 0; j < n_preds; ++j) {
767 ins[j] = get_Block_cfgpred(block, j);
769 /* now append the new predecessors */
770 for (j = 0; j < n_jpreds; ++j) {
771 ir_node *pred = get_Block_cfgpred(jmp_block, j);
772 ins[n_preds+j] = pred;
774 set_irn_in(block, n_preds+n_jpreds, ins);
775 /* convert the jmp_block to Bad */
776 ir_graph *irg = get_irn_irg(block);
777 exchange(jmp_block, new_r_Bad(irg, mode_BB));
778 exchange(jmp, new_r_Bad(irg, mode_X));
779 /* let the outer loop walk over the new predecessors as well */
782 // TODO What if jmp_block had a KeepAlive edge?
784 /* This would involve Phis ... */
790 * All cfg optimizations, which do not touch Phi nodes.
792 * Note that this might create critical edges.
794 static void cfgopt_ignoring_phis(ir_graph *irg)
799 ir_nodehashmap_init(&env.block_infos);
801 while (env.changed) {
802 irg_walk_graph(irg, compute_block_info, NULL, &env.block_infos);
805 /* Remove blocks, which only consist of a Jmp */
806 irg_block_walk_graph(irg, remove_empty_blocks, NULL, &env);
808 /* Optimize Cond->Jmp, where then- and else-block are the same. */
809 irg_block_walk_graph(irg, NULL, optimize_ifs, &env);
812 clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_DOMINANCE);
813 /* clear block info, because it must be recomputed */
814 irg_block_walk_graph(irg, clear_block_info, NULL, &env.block_infos);
815 /* Removing blocks and Conds might enable more optimizations */
822 ir_nodehashmap_destroy(&env.block_infos);
825 /* Optimizations of the control flow that also require changes of Phi nodes. */
826 static ir_graph_state_t do_cfopt(ir_graph *irg)
830 ir_node *end = get_irg_end(irg);
835 env.phis_moved = false;
837 assert(get_irg_phase_state(irg) != phase_building);
839 /* if the graph is not pinned, we cannot determine empty blocks */
840 assert(get_irg_pinned(irg) != op_pin_state_floats &&
841 "Control flow optimization need a pinned graph");
843 edges_deactivate(irg);
845 /* First the "simple" optimizations, which do not touch Phis */
846 cfgopt_ignoring_phis(irg);
848 /* we use the mark flag to mark removable blocks */
849 ir_reserve_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_LINK
850 | IR_RESOURCE_PHI_LIST);
852 /* The switch Cond optimization might expose unreachable code, so we loop */
854 bool changed = false;
859 * This pass collects all Phi nodes in a link list in the block
860 * nodes. Further it performs simple control flow optimizations.
861 * Finally it marks all blocks that do not contain useful
862 * computations, i.e., these blocks might be removed.
864 irg_walk(end, clear_link_and_mark_blocks_removable, collect_nodes, NULL);
869 clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_DOMINANCE
870 | IR_GRAPH_STATE_VALID_EXTENDED_BLOCKS
871 | IR_GRAPH_STATE_CONSISTENT_ENTITY_USAGE);
874 /* assert due to collect_nodes:
875 * 1. removable blocks are now marked as such
876 * 2. phi lists are up to date
879 /* Optimize the standard code.
880 * It walks only over block nodes and adapts these and the Phi nodes in these
881 * blocks, which it finds in a linked list computed before.
884 irg_block_walk_graph(irg, optimize_blocks, merge_blocks, &env);
886 new_end = optimize_in_place(end);
887 if (new_end != end) {
888 set_irg_end(irg, new_end);
891 remove_End_Bads_and_doublets(end);
893 ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_LINK
894 | IR_RESOURCE_PHI_LIST);
896 if (env.phis_moved) {
897 /* Bad: when we moved Phi's, we might produce dead Phi nodes
899 Some other phases cannot copy with this, so kill them.
901 n = get_End_n_keepalives(end);
903 NEW_ARR_A(ir_node *, in, n);
904 assure_irg_outs(irg);
906 for (i = j = 0; i < n; ++i) {
907 ir_node *ka = get_End_keepalive(end, i);
912 for (k = get_irn_n_outs(ka) - 1; k >= 0; --k) {
913 ir_node *user = get_irn_out(ka, k);
915 if (user != ka && user != end) {
916 /* Is it a real user or just a self loop ? */
926 set_End_keepalives(end, j, in);
935 static optdesc_t opt_cf = {
937 IR_GRAPH_STATE_NO_UNREACHABLE_CODE,
941 void optimize_cf(ir_graph *irg)
943 perform_irg_optimization(irg, &opt_cf);
946 /* Creates an ir_graph pass for optimize_cf. */
947 ir_graph_pass_t *optimize_cf_pass(const char *name)
949 return def_graph_pass(name ? name : "optimize_cf", optimize_cf);