2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * File name: ir/opt/cfopt.c
23 * Purpose: control flow optimizations
27 * Copyright: (c) 1998-2004 Universität Karlsruhe
39 #include "irgraph_t.h"
53 #include "irbackedge_t.h"
59 #include "iropt_dbg.h"
61 /*------------------------------------------------------------------*/
62 /* Control flow optimization. */
64 /* Removes Bad control flow predecessors and empty blocks. A block */
65 /* is empty if it contains only a Jmp node. */
66 /* Blocks can only be removed if they are not needed for the */
67 /* semantics of Phi nodes. */
68 /*------------------------------------------------------------------*/
71 * Replace binary Conds that jumps twice into the same block
77 * ProjX True ProjX False ==> | /
82 * Such pattern are the result of if-conversion.
84 * Note that the simple case that Block has only these two
85 * predecessors are already handled in equivalent_node_Block().
87 static void remove_senseless_conds(ir_node *bl, void *data) {
89 int n = get_Block_n_cfgpreds(bl);
93 for (i = 0; i < n; ++i) {
94 ir_node *pred_i = get_Block_cfgpred(bl, i);
95 ir_node *cond_i = skip_Proj(pred_i);
98 if (is_Cond(cond_i) && get_irn_mode(get_Cond_selector(cond_i)) == mode_b) {
100 for (j = i + 1; j < n; ++j) {
101 ir_node *pred_j = get_Block_cfgpred(bl, j);
102 ir_node *cond_j = skip_Proj(pred_j);
104 if (cond_j == cond_i) {
105 ir_node *jmp = new_r_Jmp(current_ir_graph, get_nodes_block(cond_i));
106 set_irn_n(bl, i, jmp);
107 set_irn_n(bl, j, new_Bad());
109 DBG_OPT_IFSIM2(cond_i, jmp);
118 * Removes Tuples from Block control flow predecessors.
119 * Optimizes blocks with equivalent_node(). This is tricky,
120 * as we want to avoid nodes that have as block predecessor Bads.
121 * Therefore we also optimize at control flow operations, depending
122 * how we first reach the Block.
124 static void merge_blocks(ir_node *node, void *env) {
128 /* clear the link field for ALL nodes first */
129 set_irn_link(node, NULL);
131 if (is_Block(node)) {
134 /* GL @@@ : is this possible? if (get_opt_normalize()) -- added, all tests go through.
135 A different order of optimizations might cause problems. */
136 if (get_opt_normalize()) {
137 for (i = 0, n = get_Block_n_cfgpreds(node); i < n; ++i)
138 set_Block_cfgpred(node, i, skip_Tuple(get_Block_cfgpred(node, i)));
142 new_block = equivalent_node(node);
143 if (new_block != node && ! is_Block_dead(new_block))
144 exchange(node, new_block);
146 } else if (get_opt_optimize() && (get_irn_mode(node) == mode_X)) {
147 /* We will soon visit a block. Optimize it before visiting! */
148 ir_node *b = get_nodes_block(skip_Proj(node));
150 if (!is_Block_dead(b)) {
151 new_block = equivalent_node(b);
153 while (irn_not_visited(b) && (!is_Block_dead(new_block)) && (new_block != b)) {
154 /* We would have to run gigo() if new is bad, so we
155 promote it directly below. Nevertheless, we sometimes reach a block
156 the first time through a dataflow node. In this case we optimized the
157 block as such and have to promote the Bad here. */
158 assert((get_opt_control_flow_straightening() ||
159 get_opt_control_flow_weak_simplification()) &&
160 ("strange flag setting"));
161 exchange (b, new_block);
163 new_block = equivalent_node(b);
166 /* normally, we would create a Bad block here, but this must be
167 * prevented, so just set it's cf to Bad.
169 if (is_Block_dead(new_block))
170 exchange(node, new_Bad());
177 * Remove cf from dead block by inspecting dominance info
178 * Do not replace blocks by Bad. This optimization shall
179 * ensure, that all Bad control flow predecessors are
180 * removed, and no new other Bads are introduced.
182 * Must be run in the post walker.
184 static void remove_dead_block_cf(ir_node *block, void *env) {
187 /* check block predecessors and turn control flow into bad */
188 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
189 ir_node *pred_X = get_Block_cfgpred(block, i);
191 if (! is_Bad(pred_X)) {
192 ir_node *pred_bl = get_nodes_block(skip_Proj(pred_X));
194 if (is_Block_dead(pred_bl) || (get_Block_dom_depth(pred_bl) < 0)) {
195 set_Block_dead(pred_bl);
196 exchange(pred_X, new_Bad());
203 * Collects all Phi nodes in link list of Block.
204 * Marks all blocks "block_visited" if they contain a node other
205 * than Jmp (and Proj).
206 * Links all Proj nodes to their predecessors.
207 * Collects all switch-Conds in a list.
209 static void collect_nodes(ir_node *n, void *env) {
210 ir_op *op = get_irn_op(n);
213 if (op != op_Block) {
214 ir_node *b = get_nodes_block(n);
217 /* Collect Phi nodes to compact ins along with block's ins. */
218 set_irn_link(n, get_irn_link(b));
220 } else if (op != op_Jmp && !is_Bad(b)) { /* Check for non empty block. */
221 mark_Block_block_visited(b);
223 if (op == op_Proj) { /* link Proj nodes */
224 ir_node *pred = get_Proj_pred(n);
226 set_irn_link(n, get_irn_link(pred));
227 set_irn_link(pred, n);
228 } else if (op == op_Cond) {
229 ir_node *sel = get_Cond_selector(n);
230 if (mode_is_int(get_irn_mode(sel))) {
231 /* found a switch-Cond, collect */
232 plist_insert_back(list, n);
239 /** Returns true if pred is predecessor of block. */
240 static int is_pred_of(ir_node *pred, ir_node *b) {
243 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
244 ir_node *b_pred = get_Block_cfgpred_block(b, i);
245 if (b_pred == pred) return 1;
250 /** Test whether we can optimize away pred block pos of b.
252 * @param b A block node.
253 * @param pos The position of the predecessor block to judge about.
255 * @returns The number of predecessors
257 * The test is rather tricky.
259 * The situation is something like the following:
268 * b merges the control flow of an if-then-else. We may not remove
269 * the 'then' _and_ the 'else' block of an 'if' if there is a Phi
270 * node in b, even if both are empty. The destruction of this Phi
271 * requires that a copy is added before the merge. We have to
272 * keep one of the case blocks to place the copies in.
274 * To perform the test for pos, we must regard predecessors before pos
275 * as already removed.
277 static int test_whether_dispensable(ir_node *b, int pos) {
278 int i, j, n_preds = 1;
279 ir_node *pred = get_Block_cfgpred_block(b, pos);
281 /* Bad blocks will be optimized away, so we don't need space for them */
282 if (is_Block_dead(pred))
285 if (get_Block_block_visited(pred) + 1
286 < get_irg_block_visited(current_ir_graph)) {
288 if (!get_opt_optimize() || !get_opt_control_flow_strong_simplification()) {
289 /* Mark block so that is will not be removed: optimization is turned off. */
290 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
294 /* Seems to be empty. At least we detected this in collect_nodes. */
295 if (!get_irn_link(b)) {
296 /* There are no Phi nodes ==> all predecessors are dispensable. */
297 n_preds = get_Block_n_cfgpreds(pred);
299 /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
300 Handle all pred blocks with preds < pos as if they were already removed. */
301 for (i = 0; i < pos; i++) {
302 ir_node *b_pred = get_Block_cfgpred_block(b, i);
303 if (! is_Block_dead(b_pred) &&
304 get_Block_block_visited(b_pred) + 1
305 < get_irg_block_visited(current_ir_graph)) {
306 for (j = 0; j < get_Block_n_cfgpreds(b_pred); j++) {
307 ir_node *b_pred_pred = get_Block_cfgpred_block(b_pred, j);
308 if (is_pred_of(b_pred_pred, pred))
309 goto non_dispensable;
312 if (is_pred_of(b_pred, pred))
313 goto non_dispensable;
316 for (i = pos +1; i < get_Block_n_cfgpreds(b); i++) {
317 ir_node *b_pred = get_Block_cfgpred_block(b, i);
318 if (is_pred_of(b_pred, pred))
319 goto non_dispensable;
321 /* if we get here, the block is dispensable */
322 n_preds = get_Block_n_cfgpreds(pred);
329 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
334 * Store to defer the exchanged of Phi nodes.
336 typedef struct _defer_ex_phi {
337 ir_node *phi_pred; /**< the previous Phi node that will be replaced */
338 ir_node *phi; /**< the new Phi node that replaces phi_pred */
342 * This method removed Bad cf predecessors from Blocks and Phis, and removes
343 * empty blocks. A block is empty if it only contains Phi and Jmp nodes.
345 * We first adapt Phi nodes, then Block nodes, as we need the old ins
346 * of the Block to adapt the Phi nodes. We do this by computing new
347 * in arrays, and then replacing the old ones. So far we compute new in arrays
348 * for all nodes, not regarding whether there is a possibility for optimization.
350 * For each predecessor p of a Block b there are three cases:
351 * -1. The predecessor p is a Bad node: just skip it. The in array of b shrinks by one.
352 * -2. The predecessor p is empty. Remove p. All predecessors of p are now
354 * -3. The predecessor p is a block containing useful code. Just keep p as is.
356 * For Phi nodes f we have to check the conditions at the Block of f.
357 * For cases 1 and 3 we proceed as for Blocks. For case 2 we can have two
359 * -2a: The old predecessor of the Phi f is a Phi pred_f IN THE BLOCK REMOVED. In this
360 * case we proceed as for blocks. We remove pred_f. All
361 * predecessors of pred_f now are predecessors of f.
362 * -2b: The old predecessor of f is NOT in the block removed. It might be a Phi, too.
363 * We have to replicate f for each predecessor of the removed block. Or, with
364 * other words, the removed predecessor block has exactly one predecessor.
366 * Further there is a special case for self referencing blocks:
369 * then_b else_b then_b else_b
375 * | | | === optimized to ===> \ | | |
382 * If there is a Phi in pred_b, but we remove pred_b, we have to generate a
383 * Phi in loop_b, that has the ins of the Phi in pred_b and a self referencing
385 * @@@ It is negotiable whether we should do this ... there might end up a copy
386 * from the Phi in the loop when removing the Phis.
388 static void optimize_blocks(ir_node *b, void *env) {
389 int i, j, k, n, max_preds, n_preds, p_preds = -1;
392 defer_ex_phi *defers;
394 /* Count the number of predecessor if this block is merged with pred blocks
397 for (i = 0, k = get_Block_n_cfgpreds(b); i < k; ++i) {
398 max_preds += test_whether_dispensable(b, i);
400 in = xmalloc(max_preds * sizeof(*in));
402 defers = NEW_ARR_F(defer_ex_phi, 0);
405 printf(" working on "); DDMN(b);
406 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
407 pred = get_nodes_block(get_Block_cfgpred(b, i));
408 if (is_Bad(get_Block_cfgpred(b, i))) {
409 printf(" removing Bad %i\n ", i);
410 } else if (get_Block_block_visited(pred) +1
411 < get_irg_block_visited(current_ir_graph)) {
412 printf(" removing pred %i ", i); DDMN(pred);
413 } else { printf(" Nothing to do for "); DDMN(pred); }
415 * end Debug output -*/
417 /*- Fix the Phi nodes of the current block -*/
418 for (phi = get_irn_link(b); phi; ) {
419 assert(get_irn_op(phi) == op_Phi);
421 /* Find the new predecessors for the Phi */
423 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
424 pred = get_Block_cfgpred_block(b, i);
426 if (is_Bad(get_Block_cfgpred(b, i))) {
427 /* case Phi 1: Do nothing */
429 else if (get_Block_block_visited(pred) + 1
430 < get_irg_block_visited(current_ir_graph)) {
431 /* case Phi 2: It's an empty block and not yet visited. */
432 ir_node *phi_pred = get_Phi_pred(phi, i);
434 for (j = 0, k = get_Block_n_cfgpreds(pred); j < k; j++) {
435 /* because of breaking loops, not all predecessors are Bad-clean,
436 * so we must check this here again */
437 if (! is_Bad(get_Block_cfgpred(pred, j))) {
438 if (get_nodes_block(phi_pred) == pred) {
440 assert(get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
442 in[p_preds++] = get_Phi_pred(phi_pred, j);
445 in[p_preds++] = phi_pred;
450 /* The Phi_pred node is replaced now if it is a Phi.
452 Somehow the removed Phi node can be used legally in loops.
453 Therefore we replace the old phi by the new one.
454 This must be done _AFTER_ all Phis are optimized, or
455 it will fail if two Phis use the same pred_Phi.
457 FIXME: Is the following true? We ALWAYS replace it by the new one.
459 Further we have to remove the old Phi node by replacing it
460 by Bad. Else it will remain in the keep alive array of End
461 and cause illegal situations. So if there is no loop, we should
464 if (get_nodes_block(phi_pred) == pred) {
466 /* remove the Phi as it might be kept alive. Further there
467 might be other users. */
468 for (i = ARR_LEN(defers) - 1; i >= 0; --i) {
469 if (defers[i].phi_pred == phi_pred)
473 /* we have a new replacement */
476 elem.phi_pred = phi_pred;
478 ARR_APP1(defer_ex_phi, defers, elem);
483 in[p_preds++] = get_Phi_pred(phi, i);
486 assert(p_preds <= max_preds);
490 /* By removal of Bad ins the Phi might be degenerated. */
491 exchange(phi, in[0]);
493 set_irn_in(phi, p_preds, in);
495 phi = get_irn_link(phi);
498 /* now, exchange all Phis */
499 for (i = ARR_LEN(defers) - 1; i >= 0; --i) {
500 exchange(defers[i].phi_pred, defers[i].phi);
504 /*- This happens only if merge between loop backedge and single loop entry.
505 See special case above. -*/
506 for (k = 0, n = get_Block_n_cfgpreds(b); k < n; ++k) {
507 pred = get_nodes_block(get_Block_cfgpred(b, k));
509 if (get_Block_block_visited(pred) + 1 < get_irg_block_visited(current_ir_graph)) {
510 /* we found a predecessor block at position k that will be removed */
511 for (phi = get_irn_link(pred); phi;) {
513 * the previous phase may already changed the phi, and even
514 * removed it at all, so check here if this node is still a phi
516 if (get_irn_op(phi) == op_Phi) {
519 /* move this phi from the predecessor into the block b */
520 set_nodes_block(phi, b);
522 /* first, copy all 0..k-1 predecessors */
523 for (i = 0; i < k; i++) {
524 pred = get_Block_cfgpred_block(b, i);
528 } else if (get_Block_block_visited(pred) + 1
529 < get_irg_block_visited(current_ir_graph)) {
530 /* It's an empty block and not yet visited. */
531 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
532 /* @@@ Hier brauche ich Schleifeninformation!!! Kontrollflusskante
533 muss Rueckwaertskante sein! (An allen vier in[q_preds] = phi
534 Anweisungen.) Trotzdem tuts bisher!! */
535 if (! is_Bad(get_Block_cfgpred(pred, j)))
543 /* now we are at k, copy the phi predecessors */
544 pred = get_nodes_block(get_Block_cfgpred(b, k));
545 for (i = 0; i < get_Phi_n_preds(phi); i++) {
546 if (! is_Bad(get_Block_cfgpred(pred, i)))
547 in[q_preds++] = get_Phi_pred(phi, i);
550 /* and now all the rest */
551 for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
552 pred = get_nodes_block(get_Block_cfgpred(b, i));
554 if (is_Bad(get_Block_cfgpred(b, i))) {
556 } else if (get_Block_block_visited(pred) +1
557 < get_irg_block_visited(current_ir_graph)) {
558 /* It's an empty block and not yet visited. */
559 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
560 if (! is_Bad(get_Block_cfgpred(pred, j)))
570 exchange(phi, in[0]);
572 set_irn_in(phi, q_preds, in);
574 assert(q_preds <= max_preds);
575 // assert(p_preds == q_preds && "Wrong Phi Fix");
577 phi = get_irn_link(phi);
582 /*- Fix the block -*/
584 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
585 pred = get_Block_cfgpred_block(b, i);
588 /* case 1: Do nothing */
589 } else if (get_Block_block_visited(pred) +1
590 < get_irg_block_visited(current_ir_graph)) {
591 /* case 2: It's an empty block and not yet visited. */
592 assert(get_Block_n_cfgpreds(b) > 1);
593 /* Else it should be optimized by equivalent_node. */
594 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
595 ir_node *pred_block = get_Block_cfgpred(pred, j);
597 /* because of breaking loops, not all predecessors are Bad-clean,
598 * so we must check this here again */
599 if (! is_Bad(pred_block))
600 in[n_preds++] = pred_block;
602 /* Remove block as it might be kept alive. */
603 exchange(pred, b/*new_Bad()*/);
606 in[n_preds++] = get_Block_cfgpred(b, i);
609 assert(n_preds <= max_preds);
611 set_irn_in(b, n_preds, in);
613 assert(get_irn_link(b) == NULL || (n_preds == p_preds && "Wrong Phi Fix"));
618 * Walker: optimize all blocks using the default optimizations.
619 * This removes Blocks that with only a Jmp predecessor.
621 static void remove_simple_blocks(ir_node *block, void *env) {
622 ir_node *new_blk = equivalent_node(block);
623 if (new_blk != block)
624 exchange(block, new_blk);
628 * Handle pre-optimized table switch Cond's.
629 * During iropt, all Projs from a switch-Cond are already removed except
630 * the defProj and maybe the taken one.
631 * The defProj cannot be removed WITHOUT looking backwards, so we do this here.
633 * @param cond the switch-Cond
635 * @return non-zero if a switch-Cond was optimized
637 * Expects all Proj's linked to the cond node
639 static int handle_switch_cond(ir_node *cond) {
640 ir_node *sel = get_Cond_selector(cond);
642 ir_node *proj1 = get_irn_link(cond);
643 ir_node *proj2 = get_irn_link(proj1);
646 blk = get_nodes_block(cond);
649 /* this Cond has only one Proj: must be the defProj */
650 assert(get_Cond_defaultProj(cond) == get_Proj_proj(proj1));
651 /* convert it into a Jmp */
652 jmp = new_r_Jmp(current_ir_graph, blk);
653 exchange(proj1, jmp);
655 } else if (get_irn_link(proj2) == NULL) {
656 /* We have two Proj's here. Check if the Cond has
657 a constant argument */
658 tarval *tv = value_of(sel);
660 if (tv != tarval_bad) {
661 /* we have a constant switch */
662 long num = get_tarval_long(tv);
663 long def_num = get_Cond_defaultProj(cond);
665 if (def_num == get_Proj_proj(proj1)) {
666 /* first one is the defProj */
667 if (num == get_Proj_proj(proj2)) {
668 jmp = new_r_Jmp(current_ir_graph, blk);
669 exchange(proj2, jmp);
670 exchange(proj1, new_Bad());
673 } else if (def_num == get_Proj_proj(proj2)) {
674 /* second one is the defProj */
675 if (num == get_Proj_proj(proj1)) {
676 jmp = new_r_Jmp(current_ir_graph, blk);
677 exchange(proj1, jmp);
678 exchange(proj2, new_Bad());
682 /* neither: strange, Cond was not optimized so far */
683 if (num == get_Proj_proj(proj1)) {
684 jmp = new_r_Jmp(current_ir_graph, blk);
685 exchange(proj1, jmp);
686 exchange(proj2, new_Bad());
688 } else if (num == get_Proj_proj(proj2)) {
689 jmp = new_r_Jmp(current_ir_graph, blk);
690 exchange(proj2, jmp);
691 exchange(proj1, new_Bad());
700 /* Optimizations of the control flow that also require changes of Phi nodes.
702 * This optimization performs two passes over the graph.
704 * The first pass collects all Phi nodes in a link list in the block
705 * nodes. Further it performs simple control flow optimizations.
706 * Finally it marks all blocks that do not contain useful
707 * computations, i.e., these blocks might be removed.
709 * The second pass performs the optimizations intended by this algorithm.
710 * It walks only over block nodes and adapts these and the Phi nodes in these blocks,
711 * which it finds in a linked list computed by the first pass.
713 * We use the block_visited flag to mark empty blocks in the first
715 * @@@ It would be better to add a struct in the link field
716 * that keeps the Phi list and the mark. Place it on an obstack, as
717 * we will lose blocks and thereby generate memory leaks.
719 void optimize_cf(ir_graph *irg) {
722 ir_node *cond, *end = get_irg_end(irg);
723 ir_graph *rem = current_ir_graph;
724 irg_dom_state dom_state = get_irg_dom_state(current_ir_graph);
728 assert(get_irg_phase_state(irg) != phase_building);
730 /* if the graph is not pinned, we cannot determine empty blocks */
731 assert(get_irg_pinned(irg) != op_pin_state_floats &&
732 "Control flow optimization need a pinned graph");
734 current_ir_graph = irg;
736 edges_deactivate(irg);
738 /* Handle graph state */
739 set_irg_outs_inconsistent(current_ir_graph);
740 set_irg_extblk_inconsistent(current_ir_graph);
741 set_irg_loopinfo_inconsistent(current_ir_graph);
742 set_irg_doms_inconsistent(current_ir_graph);
744 if (dom_state == dom_consistent && get_opt_optimize() && get_opt_unreachable_code()) {
747 /* we have dominance info, we can kill dead block */
748 irg_block_walk_graph(irg, NULL, remove_dead_block_cf, NULL);
750 /* fix the keep-alives */
751 end = get_irg_end(irg);
752 for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
753 ir_node *ka = get_End_keepalive(end, i);
756 /* do NOT keep dead blocks */
757 if (get_Block_dom_depth(ka) < 0)
758 set_End_keepalive(end, i, new_Bad());
759 } else if (is_Block_dead(get_nodes_block(ka)) ||
760 get_Block_dom_depth(get_nodes_block(ka)) < 0)
761 /* do NOT keep nodes in dead blocks */
762 set_End_keepalive(end, i, new_Bad());
765 irg_block_walk_graph(irg, NULL, remove_senseless_conds, NULL);
767 /* Use block visited flag to mark non-empty blocks. */
768 inc_irg_block_visited(irg);
771 irg_walk(end, merge_blocks, collect_nodes, list);
773 /* handle all collected switch-Conds */
774 foreach_plist(list, el) {
775 cond = plist_element_get_value(el);
776 handle_switch_cond(cond);
780 /* Optimize the standard code. */
781 irg_block_walk(get_irg_end_block(irg), optimize_blocks, remove_simple_blocks, NULL);
783 /* Walk all keep alives, optimize them if block, add to new in-array
784 for end if useful. */
785 n = get_End_n_keepalives(end);
787 NEW_ARR_A(ir_node *, in, n);
788 inc_irg_visited(irg);
790 /* fix the keep alive */
791 for (i = j = 0; i < n; i++) {
792 ir_node *ka = get_End_keepalive(end, i);
794 if (irn_not_visited(ka)) {
795 ir_op *op = get_irn_op(ka);
797 if ((op == op_Block) && Block_not_block_visited(ka)) {
798 set_irg_block_visited(irg, /* Don't walk all the way to Start. */
799 get_irg_block_visited(irg)-1);
800 irg_block_walk(ka, optimize_blocks, remove_simple_blocks, NULL);
801 mark_irn_visited(ka);
803 } else if (op == op_Phi) {
804 mark_irn_visited(ka);
805 if (! is_Block_dead(get_nodes_block(ka)))
807 } else if (is_op_keep(op)) {
808 mark_irn_visited(ka);
809 if (! is_Block_dead(get_nodes_block(ka)))
815 set_End_keepalives(end, j, in);
816 /* the verifier doesn't work yet with floating nodes */
817 if (get_irg_pinned(irg) == op_pin_state_pinned) {
818 /* after optimize_cf(), only Bad data flow may remain. */
819 if (irg_vrfy_bads(irg, BAD_DF | BAD_BLOCK | TUPLE)) {
820 dump_ir_block_graph(irg, "-vrfy-cf");
821 dump_ir_graph(irg, "-vrfy-cf");
822 fprintf(stderr, "VRFY_BAD in optimize_cf()\n");
826 current_ir_graph = rem;