2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Control flow optimizations.
23 * @author Goetz Lindenmaier, Michael Beck, Sebastian Hack
26 * Removes Bad control flow predecessors and empty blocks. A block is empty
27 * if it contains only a Jmp node. Blocks can only be removed if they are not
28 * needed for the semantics of Phi nodes. Further, we NEVER remove labeled
29 * blocks (even if we could move the label).
33 #include "iroptimize.h"
40 #include "irgraph_t.h"
54 #include "irbackedge_t.h"
59 #include "irphase_t.h"
61 #include "iropt_dbg.h"
63 /** An environment for merge_blocks and collect nodes. */
64 typedef struct merge_env {
65 bool changed; /**< Set if the graph was changed. */
66 bool phis_moved; /**< Set if Phi nodes were moved. */
69 /** set or reset the removable property of a block. */
70 static void set_Block_removable(ir_node *block, bool removable)
72 set_Block_mark(block, removable);
75 /** check if a block has the removable property set. */
76 static bool is_Block_removable(ir_node *block)
78 return get_Block_mark(block);
81 /** checks if a given Cond node is a switch Cond. */
82 static bool is_switch_Cond(ir_node *cond)
84 ir_node *sel = get_Cond_selector(cond);
85 return get_irn_mode(sel) != mode_b;
88 /** Walker: clear link fields and mark all blocks as removable. */
89 static void clear_link_and_mark_blocks_removable(ir_node *node, void *ctx)
92 set_irn_link(node, NULL);
94 set_Block_removable(node, true);
98 * Collects all Phi nodes in link list of Block.
99 * Marks all blocks "non_removable" if they contain a node other
100 * than Jmp (and Proj).
101 * Links all Proj nodes to their predecessors.
102 * Collects all switch-Conds in a list.
104 static void collect_nodes(ir_node *n, void *ctx)
106 ir_node ***switch_conds = (ir_node***)ctx;
109 /* Collect Phi nodes to compact ins along with block's ins. */
110 ir_node *block = get_nodes_block(n);
111 set_irn_link(n, get_irn_link(block));
112 set_irn_link(block, n);
113 } else if (is_Block(n)) {
114 if (has_Block_entity(n)) {
115 /* block with a jump label attached cannot be removed. */
116 set_Block_removable(n, false);
119 } else if (!is_Jmp(n)) { /* Check for non-empty block. */
120 ir_node *block = get_nodes_block(n);
121 set_Block_removable(block, false);
124 /* link Proj nodes */
125 ir_node *pred = get_Proj_pred(n);
126 set_irn_link(n, get_irn_link(pred));
127 set_irn_link(pred, n);
128 } else if (is_Cond(n) && is_switch_Cond(n)) {
129 /* found a switch-Cond, collect */
130 ARR_APP1(ir_node*, *switch_conds, n);
135 /** Returns true if pred is predecessor of block b. */
136 static bool is_pred_of(ir_node *pred, ir_node *b)
140 for (i = get_Block_n_cfgpreds(b) - 1; i >= 0; --i) {
141 ir_node *b_pred = get_Block_cfgpred_block(b, i);
148 /** Test whether we can optimize away pred block pos of b.
150 * @param b A block node.
151 * @param pos The position of the predecessor block to judge about.
153 * @returns The number of predecessors
155 * The test is rather tricky.
157 * The situation is something like the following:
166 * b merges the control flow of an if-then-else. We may not remove
167 * the 'then' _and_ the 'else' block of an 'if' if there is a Phi
168 * node in b, even if both are empty. The destruction of this Phi
169 * requires that a copy is added before the merge. We have to
170 * keep one of the case blocks to place the copies in.
172 * To perform the test for pos, we must regard predecessors before pos
173 * as already removed.
175 static unsigned test_whether_dispensable(ir_node *b, int pos)
177 ir_node *pred = get_Block_cfgpred(b, pos);
178 ir_node *predb = get_nodes_block(pred);
180 if (is_Bad(pred) || !is_Block_removable(predb))
183 /* can't remove self-loops */
185 goto non_dispensable;
186 if (is_unknown_jump(pred))
187 goto non_dispensable;
189 /* Seems to be empty. At least we detected this in collect_nodes. */
190 if (get_irn_link(b) != NULL) {
191 int n_cfgpreds = get_Block_n_cfgpreds(b);
193 /* there are Phi nodes */
195 /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
196 * Handle all pred blocks with preds < pos as if they were already
198 for (i = 0; i < pos; i++) {
199 ir_node *other_pred = get_Block_cfgpred(b, i);
200 ir_node *other_predb = get_nodes_block(other_pred);
201 if (is_Bad(other_pred))
203 if (is_Block_removable(other_predb)
204 && !Block_block_visited(other_predb)) {
206 for (j = get_Block_n_cfgpreds(other_predb) - 1; j >= 0; --j) {
207 ir_node *other_predpred
208 = get_Block_cfgpred_block(other_predb, j);
209 if (is_pred_of(other_predpred, predb))
210 goto non_dispensable;
212 } else if (is_pred_of(other_predb, predb)) {
213 goto non_dispensable;
216 for (i = pos+1; i < n_cfgpreds; i++) {
217 ir_node *other_predb = get_Block_cfgpred_block(b, i);
218 if (is_pred_of(other_predb, predb))
219 goto non_dispensable;
222 /* we will not dispense already visited blocks */
223 if (Block_block_visited(predb))
225 /* if we get here, the block is dispensable, count useful preds */
226 return get_irn_arity(predb);
229 set_Block_removable(predb, false);
234 * This method removes empty blocks. A block is empty if it only contains Phi
237 * We first adapt Phi nodes, then Block nodes, as we need the old ins
238 * of the Block to adapt the Phi nodes. We do this by computing new
239 * in arrays, and then replacing the old ones. So far we compute new in arrays
240 * for all nodes, not regarding whether there is a possibility for optimization.
242 * For each predecessor p of a Block b there are three cases:
243 * - The predecessor p is a Bad node: just skip it. The in array of b shrinks
245 * - The predecessor p is empty. Remove p. All predecessors of p are now
247 * - The predecessor p is a block containing useful code. Just keep p as is.
249 * For Phi nodes f we have to check the conditions at the Block of f.
250 * For cases 1 and 3 we proceed as for Blocks. For case 2 we can have two
252 * -2a: The old predecessor of the Phi f is a Phi pred_f IN THE BLOCK REMOVED.
253 * In this case we proceed as for blocks. We remove pred_f. All
254 * predecessors of pred_f now are predecessors of f.
255 * -2b: The old predecessor of f is NOT in the block removed. It might be a Phi
256 * too. We have to replicate f for each predecessor of the removed block.
257 * Or, with other words, the removed predecessor block has exactly one
260 * Further there is a special case for self referencing blocks:
263 * then_b else_b then_b else_b
269 * | | | === optimized to ===> \ | | |
276 * If there is a Phi in pred_b, but we remove pred_b, we have to generate a
277 * Phi in loop_b, that has the ins of the Phi in pred_b and a self referencing
280 static void optimize_blocks(ir_node *b, void *ctx)
282 int i, j, k, n, max_preds, n_preds, p_preds = -1;
283 ir_node *pred, *phi, *next;
285 merge_env *env = (merge_env*)ctx;
287 if (get_Block_dom_depth(b) < 0) {
288 /* ignore unreachable blocks */
292 /* Count the number of predecessor if this block is merged with pred blocks
295 for (i = 0, k = get_Block_n_cfgpreds(b); i < k; ++i) {
296 max_preds += test_whether_dispensable(b, i);
298 in = XMALLOCN(ir_node*, max_preds);
300 /*- Fix the Phi nodes of the current block -*/
301 for (phi = (ir_node*)get_irn_link(b); phi != NULL; phi = (ir_node*)next) {
303 next = (ir_node*)get_irn_link(phi);
305 /* Find the new predecessors for the Phi */
307 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
308 ir_graph *irg = get_irn_irg(b);
309 pred = get_Block_cfgpred_block(b, i);
312 /* case Phi 1: maintain Bads, as somebody else is responsible to remove them */
313 in[p_preds++] = new_r_Bad(irg, get_irn_mode(phi));
314 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
315 /* case Phi 2: It's an empty block and not yet visited. */
316 ir_node *phi_pred = get_Phi_pred(phi, i);
318 for (j = 0, k = get_Block_n_cfgpreds(pred); j < k; j++) {
319 ir_node *pred_pred = get_Block_cfgpred(pred, j);
321 if (is_Bad(pred_pred)) {
322 in[p_preds++] = new_r_Bad(irg, get_irn_mode(phi));
326 if (get_nodes_block(phi_pred) == pred) {
328 assert(is_Phi(phi_pred)); /* Block is empty!! */
330 in[p_preds++] = get_Phi_pred(phi_pred, j);
333 in[p_preds++] = phi_pred;
338 in[p_preds++] = get_Phi_pred(phi, i);
341 assert(p_preds == max_preds);
345 exchange(phi, in[0]);
347 set_irn_in(phi, p_preds, in);
351 /*- This happens only if merge between loop backedge and single loop entry.
352 Moreover, it is only needed if predb is the direct dominator of b,
353 else there can be no uses of the Phi's in predb ... -*/
354 for (k = 0, n = get_Block_n_cfgpreds(b); k < n; ++k) {
355 ir_node *pred = get_Block_cfgpred(b, k);
356 ir_node *predb = get_nodes_block(pred);
360 if (is_Block_removable(predb) && !Block_block_visited(predb)) {
363 /* we found a predecessor block at position k that will be removed */
364 for (phi = (ir_node*)get_irn_link(predb); phi; phi = next_phi) {
366 next_phi = (ir_node*)get_irn_link(phi);
370 if (get_Block_idom(b) != predb) {
371 /* predb is not the dominator. There can't be uses of pred's Phi nodes, kill them .*/
372 ir_graph *irg = get_irn_irg(b);
373 ir_mode *mode = get_irn_mode(phi);
374 exchange(phi, new_r_Bad(irg, mode));
376 /* predb is the direct dominator of b. There might be uses of the Phi nodes from
377 predb in further block, so move this phi from the predecessor into the block b */
378 set_nodes_block(phi, b);
379 set_irn_link(phi, get_irn_link(b));
380 set_irn_link(b, phi);
381 env->phis_moved = true;
383 /* first, copy all 0..k-1 predecessors */
384 for (i = 0; i < k; i++) {
385 pred = get_Block_cfgpred_block(b, i);
388 ir_graph *irg = get_irn_irg(b);
389 ir_mode *mode = get_irn_mode(phi);
390 in[q_preds++] = new_r_Bad(irg, mode);
391 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
392 /* It's an empty block and not yet visited. */
393 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
394 if (! is_Bad(get_Block_cfgpred(pred, j))) {
397 ir_graph *irg = get_irn_irg(b);
398 ir_mode *mode = get_irn_mode(phi);
399 in[q_preds++] = new_r_Bad(irg, mode);
407 /* now we are at k, copy the phi predecessors */
408 pred = get_nodes_block(get_Block_cfgpred(b, k));
409 for (i = 0; i < get_Phi_n_preds(phi); i++) {
410 in[q_preds++] = get_Phi_pred(phi, i);
413 /* and now all the rest */
414 for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
415 pred = get_Block_cfgpred_block(b, i);
418 ir_graph *irg = get_irn_irg(b);
419 ir_mode *mode = get_irn_mode(phi);
420 in[q_preds++] = new_r_Bad(irg, mode);
421 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
422 /* It's an empty block and not yet visited. */
423 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
424 if (! is_Bad(get_Block_cfgpred(pred, j))) {
427 ir_graph *irg = get_irn_irg(b);
428 ir_mode *mode = get_irn_mode(phi);
429 in[q_preds++] = new_r_Bad(irg, mode);
439 exchange(phi, in[0]);
441 set_irn_in(phi, q_preds, in);
444 assert(q_preds <= max_preds);
445 // assert(p_preds == q_preds && "Wrong Phi Fix");
451 /*- Fix the block -*/
453 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
454 ir_node *pred = get_Block_cfgpred(b, i);
455 ir_node *predb = get_nodes_block(pred);
456 ir_graph *irg = get_irn_irg(pred);
458 /* case 1: Bad predecessor */
460 in[n_preds++] = new_r_Bad(irg, mode_X);
463 if (is_Block_removable(predb) && !Block_block_visited(predb)) {
464 /* case 2: It's an empty block and not yet visited. */
465 for (j = 0; j < get_Block_n_cfgpreds(predb); j++) {
466 ir_node *predpred = get_Block_cfgpred(predb, j);
468 if (is_Bad(predpred)) {
469 in[n_preds++] = new_r_Bad(irg, mode_X);
473 in[n_preds++] = predpred;
475 /* Remove block+jump as it might be kept alive. */
476 exchange(pred, new_r_Bad(get_irn_irg(b), mode_X));
477 exchange(predb, new_r_Bad(get_irn_irg(b), mode_BB));
480 in[n_preds++] = pred;
483 assert(n_preds == max_preds);
485 set_irn_in(b, n_preds, in);
488 /* see if phi-fix was correct */
489 assert(get_irn_link(b) == NULL || p_preds == -1 || (n_preds == p_preds));
494 * Optimize table-switch Conds.
496 * @param cond the switch-Cond
497 * @return true if the switch-Cond was optimized
499 static bool handle_switch_cond(ir_node *cond)
501 ir_node *sel = get_Cond_selector(cond);
502 ir_node *proj1 = (ir_node*)get_irn_link(cond);
503 ir_node *proj2 = (ir_node*)get_irn_link(proj1);
504 ir_node *blk = get_nodes_block(cond);
506 /* exactly 1 Proj on the Cond node: must be the defaultProj */
508 ir_node *jmp = new_r_Jmp(blk);
509 assert(get_Cond_default_proj(cond) == get_Proj_proj(proj1));
510 /* convert it into a Jmp */
511 exchange(proj1, jmp);
515 /* handle Cond nodes with constant argument. In this case the localopt rules
516 * should have killed all obviously impossible cases.
517 * So the only case left to handle here is 1 defaultProj + 1 case
518 * (this one case should be the one taken) */
519 if (get_irn_link(proj2) == NULL) {
520 ir_tarval *tv = value_of(sel);
522 if (tv != tarval_bad) {
523 /* we have a constant switch */
524 long num = get_tarval_long(tv);
525 long def_num = get_Cond_default_proj(cond);
526 ir_graph *irg = get_irn_irg(cond);
527 ir_node *bad = new_r_Bad(irg, mode_X);
529 if (def_num == get_Proj_proj(proj1)) {
530 /* first one is the defProj */
531 if (num == get_Proj_proj(proj2)) {
532 ir_node *jmp = new_r_Jmp(blk);
533 exchange(proj2, jmp);
534 exchange(proj1, bad);
537 } else if (def_num == get_Proj_proj(proj2)) {
538 /* second one is the defProj */
539 if (num == get_Proj_proj(proj1)) {
540 ir_node *jmp = new_r_Jmp(blk);
541 exchange(proj1, jmp);
542 exchange(proj2, bad);
546 /* neither: strange, Cond was not optimized so far */
547 if (num == get_Proj_proj(proj1)) {
548 ir_node *jmp = new_r_Jmp(blk);
549 exchange(proj1, jmp);
550 exchange(proj2, bad);
552 } else if (num == get_Proj_proj(proj2)) {
553 ir_node *jmp = new_r_Jmp(blk);
554 exchange(proj2, jmp);
555 exchange(proj1, bad);
565 * Optimize boolean Conds, where true and false jump to the same block into a Jmp
566 * Block must contain no Phi nodes.
570 * projA projB => Jmp Bad
574 static bool optimize_pred_cond(ir_node *block, int i, int j)
576 ir_node *projA, *projB, *cond, *pred_block, *jmp, *bad;
579 projA = get_Block_cfgpred(block, i);
580 if (!is_Proj(projA)) return false;
581 projB = get_Block_cfgpred(block, j);
582 if (!is_Proj(projB)) return false;
583 cond = get_Proj_pred(projA);
584 if (!is_Cond(cond)) return false;
586 if (cond != get_Proj_pred(projB)) return false;
587 if (is_switch_Cond(cond)) return false;
589 /* cond should actually be a Jmp */
590 pred_block = get_nodes_block(cond);
591 jmp = new_r_Jmp(pred_block);
592 bad = new_r_Bad(get_irn_irg(block), mode_X);
594 assert(projA != projB);
595 exchange(projA, jmp);
596 exchange(projB, bad);
600 typedef enum block_flags_t {
601 BF_HAS_OPERATIONS = 1 << 0,
602 BF_HAS_PHIS = 1 << 1,
603 BF_IS_UNKNOWN_JUMP_TARGET = 1 << 2,
606 static bool get_phase_flag(ir_phase *block_info, ir_node *block, int flag)
608 return PTR_TO_INT(phase_get_irn_data(block_info, block)) & flag;
611 static void set_phase_flag(ir_phase *block_info, ir_node *block,
614 int data = PTR_TO_INT(phase_get_irn_data(block_info, block));
616 phase_set_irn_data(block_info, block, INT_TO_PTR(data));
619 static void clear_phase_flag(ir_phase *block_info, ir_node *block)
621 phase_set_irn_data(block_info, block, NULL);
624 static bool has_operations(ir_phase *block_info, ir_node *block)
626 return get_phase_flag(block_info, block, BF_HAS_OPERATIONS);
629 static void set_has_operations(ir_phase *block_info, ir_node *block)
631 set_phase_flag(block_info, block, BF_HAS_OPERATIONS);
634 static bool has_phis(ir_phase *block_info, ir_node *block)
636 return get_phase_flag(block_info, block, BF_HAS_PHIS);
639 static void set_has_phis(ir_phase *block_info, ir_node *block)
641 set_phase_flag(block_info, block, BF_HAS_PHIS);
644 static bool is_unknown_jump_target(ir_phase *block_info, ir_node *block)
646 return get_phase_flag(block_info, block, BF_IS_UNKNOWN_JUMP_TARGET);
649 static void set_is_unknown_jump_target(ir_phase *block_info, ir_node *block)
651 set_phase_flag(block_info, block, BF_IS_UNKNOWN_JUMP_TARGET);
655 * Pre-Walker: fill block info information.
657 static void compute_block_info(ir_node *n, void *x)
659 ir_phase *block_info = (ir_phase *)x;
662 int i, max = get_Block_n_cfgpreds(n);
663 for (i=0; i<max; i++) {
664 ir_node *pred = get_Block_cfgpred(n,i);
665 if (is_unknown_jump(pred)) {
666 set_is_unknown_jump_target(block_info, n);
669 } else if (is_Phi(n)) {
670 ir_node *block = get_nodes_block(n);
671 set_has_phis(block_info, block);
672 } else if (is_Jmp(n) || is_Cond(n) || is_Cmp(n) || is_Proj(n)) {
675 ir_node *block = get_nodes_block(n);
676 set_has_operations(block_info, block);
680 static void clear_block_info(ir_node *block, void *x)
682 ir_phase *block_info = (ir_phase *)x;
683 clear_phase_flag(block_info, block);
686 typedef struct skip_env {
692 * Post-Block-walker: Optimize useless if's (boolean Cond nodes
693 * with same true/false target)
696 static void optimize_ifs(ir_node *block, void *x)
698 skip_env *env = (skip_env*)x;
700 int n_preds = get_Block_n_cfgpreds(block);
702 if (has_phis(env->phase, block))
705 /* optimize Cond predecessors (might produce Bad predecessors) */
706 for (i = 0; i < n_preds; ++i) {
707 for (j = i+1; j < n_preds; ++j) {
708 optimize_pred_cond(block, i, j);
714 * Pre-Block walker: remove empty blocks (only contain a Jmp)
715 * that are control flow predecessors of the current block.
717 static void remove_empty_blocks(ir_node *block, void *x)
719 skip_env *env = (skip_env*)x;
721 int n_preds = get_Block_n_cfgpreds(block);
723 for (i = 0; i < n_preds; ++i) {
724 ir_node *jmp, *jmp_block, *pred, *pred_block;
727 jmp = get_Block_cfgpred(block, i);
730 jmp_block = get_nodes_block(jmp);
731 if (jmp_block == block)
732 continue; /* this infinite loop cannot be optimized any further */
733 if (is_unknown_jump_target(env->phase, jmp_block))
734 continue; /* unknown jump target must not be optimized */
735 if (has_operations(env->phase,jmp_block))
736 continue; /* this block contains operations and cannot be skipped */
737 if (has_phis(env->phase,jmp_block))
738 continue; /* this block contains Phis and is not skipped */
740 /* jmp_block is an empty block and can be optimized! */
742 n_jpreds = get_Block_n_cfgpreds(jmp_block);
744 * If the jmp block has only one predecessor this is straightforward.
745 * However, if there are more predecessors, we only handle this,
746 * if block has no Phis.
749 /* skip jmp block by rerouting its predecessor to block
757 pred = get_Block_cfgpred(jmp_block, 0);
760 /* cleanup: jmp_block might have a Keep edge! */
761 pred_block = get_nodes_block(pred);
762 exchange(jmp_block, pred_block);
764 } else if (! has_phis(env->phase, block)) {
765 /* all predecessors can skip the jmp block, so block gets some new predecessors
769 * jmp_block C => Bad C | |
773 ir_node **ins = NULL;
775 NEW_ARR_A(ir_node *, ins, n_preds+n_jpreds);
776 /* first copy the old predecessors, because the outer loop (i) still walks over them */
777 for (j = 0; j < n_preds; ++j) {
778 ins[j] = get_Block_cfgpred(block, j);
780 /* now append the new predecessors */
781 for (j = 0; j < n_jpreds; ++j) {
782 pred = get_Block_cfgpred(jmp_block, j);
783 ins[n_preds+j] = pred;
785 set_irn_in(block, n_preds+n_jpreds, ins);
786 /* convert the jmp_block to Bad */
787 ir_graph *irg = get_irn_irg(block);
788 exchange(jmp_block, new_r_Bad(irg, mode_BB));
789 exchange(jmp, new_r_Bad(irg, mode_X));
790 /* let the outer loop walk over the new predecessors as well */
793 // TODO What if jmp_block had a KeepAlive edge?
795 /* This would involve Phis ... */
801 * All cfg optimizations, which do not touch Phi nodes.
803 * Note that this might create critical edges.
805 static void cfgopt_ignoring_phis(ir_graph *irg)
807 ir_phase *block_info = new_phase(irg, NULL);
808 skip_env env = { true, block_info };
810 while (env.changed) {
811 irg_walk_graph(irg, compute_block_info, NULL, block_info);
814 /* Remove blocks, which only consist of a Jmp */
815 irg_block_walk_graph(irg, remove_empty_blocks, NULL, &env);
817 /* Optimize Cond->Jmp, where then- and else-block are the same. */
818 irg_block_walk_graph(irg, NULL, optimize_ifs, &env);
821 set_irg_doms_inconsistent(irg);
822 /* clear block info, because it must be recomputed */
823 irg_block_walk_graph(irg, clear_block_info, NULL, block_info);
824 /* Removing blocks and Conds might enable more optimizations */
831 phase_free(block_info);
834 /* Optimizations of the control flow that also require changes of Phi nodes. */
835 void optimize_cf(ir_graph *irg)
839 ir_node *end = get_irg_end(irg);
844 env.phis_moved = false;
846 assert(get_irg_phase_state(irg) != phase_building);
848 /* if the graph is not pinned, we cannot determine empty blocks */
849 assert(get_irg_pinned(irg) != op_pin_state_floats &&
850 "Control flow optimization need a pinned graph");
852 edges_deactivate(irg);
854 /* First the "simple" optimizations, which do not touch Phis */
855 cfgopt_ignoring_phis(irg);
857 /* we use the mark flag to mark removable blocks */
858 ir_reserve_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_LINK);
860 /* The switch Cond optimization might expose unreachable code, so we loop */
863 ir_node **switch_conds = NULL;
864 bool changed = false;
869 * This pass collects all Phi nodes in a link list in the block
870 * nodes. Further it performs simple control flow optimizations.
871 * Finally it marks all blocks that do not contain useful
872 * computations, i.e., these blocks might be removed.
874 switch_conds = NEW_ARR_F(ir_node*, 0);
875 irg_walk(end, clear_link_and_mark_blocks_removable, collect_nodes, &switch_conds);
877 /* handle all collected switch-Conds */
878 length = ARR_LEN(switch_conds);
879 for (i = 0; i < length; ++i) {
880 ir_node *cond = switch_conds[i];
881 changed |= handle_switch_cond(cond);
883 DEL_ARR_F(switch_conds);
888 set_irg_doms_inconsistent(irg);
889 set_irg_extblk_inconsistent(irg);
890 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
893 /* assert due to collect_nodes:
894 * 1. removable blocks are now marked as such
895 * 2. phi lists are up to date
898 /* Optimize the standard code.
899 * It walks only over block nodes and adapts these and the Phi nodes in these
900 * blocks, which it finds in a linked list computed before.
903 irg_block_walk_graph(irg, optimize_blocks, NULL, &env);
905 new_end = optimize_in_place(end);
906 if (new_end != end) {
907 set_irg_end(irg, new_end);
910 remove_End_Bads_and_doublets(end);
912 ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_LINK);
914 if (env.phis_moved) {
915 /* Bad: when we moved Phi's, we might produce dead Phi nodes
917 Some other phases cannot copy with this, so kill them.
919 n = get_End_n_keepalives(end);
921 NEW_ARR_A(ir_node *, in, n);
922 assure_irg_outs(irg);
924 for (i = j = 0; i < n; ++i) {
925 ir_node *ka = get_End_keepalive(end, i);
930 for (k = get_irn_n_outs(ka) - 1; k >= 0; --k) {
931 ir_node *user = get_irn_out(ka, k);
933 if (user != ka && user != end) {
934 /* Is it a real user or just a self loop ? */
944 set_End_keepalives(end, j, in);
951 /* Handle graph state if was changed. */
952 set_irg_doms_inconsistent(irg);
953 set_irg_extblk_inconsistent(irg);
954 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
958 /* Creates an ir_graph pass for optimize_cf. */
959 ir_graph_pass_t *optimize_cf_pass(const char *name)
961 return def_graph_pass(name ? name : "optimize_cf", optimize_cf);