2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Control flow optimizations.
23 * @author Goetz Lindenmaier, Michael Beck, Sebastian Hack
35 #include "irgraph_t.h"
49 #include "irbackedge_t.h"
55 #include "iropt_dbg.h"
57 /*------------------------------------------------------------------*/
58 /* Control flow optimization. */
60 /* Removes Bad control flow predecessors and empty blocks. A block */
61 /* is empty if it contains only a Jmp node. */
62 /* Blocks can only be removed if they are not needed for the */
63 /* semantics of Phi nodes. */
64 /*------------------------------------------------------------------*/
67 * Replace binary Conds that jumps twice into the same block
73 * ProjX True ProjX False ==> | /
78 * Such pattern are the result of if-conversion.
80 * Note that the simple case that Block has only these two
81 * predecessors are already handled in equivalent_node_Block().
83 static void remove_senseless_conds(ir_node *bl, void *data) {
85 int n = get_Block_n_cfgpreds(bl);
89 for (i = 0; i < n; ++i) {
90 ir_node *pred_i = get_Block_cfgpred(bl, i);
91 ir_node *cond_i = skip_Proj(pred_i);
94 if (is_Cond(cond_i) && get_irn_mode(get_Cond_selector(cond_i)) == mode_b) {
96 for (j = i + 1; j < n; ++j) {
97 ir_node *pred_j = get_Block_cfgpred(bl, j);
98 ir_node *cond_j = skip_Proj(pred_j);
100 if (cond_j == cond_i) {
101 ir_node *jmp = new_r_Jmp(current_ir_graph, get_nodes_block(cond_i));
102 set_irn_n(bl, i, jmp);
103 set_irn_n(bl, j, new_Bad());
105 DBG_OPT_IFSIM2(cond_i, jmp);
114 * Removes Tuples from Block control flow predecessors.
115 * Optimizes blocks with equivalent_node(). This is tricky,
116 * as we want to avoid nodes that have as block predecessor Bads.
117 * Therefore we also optimize at control flow operations, depending
118 * how we first reach the Block.
120 static void merge_blocks(ir_node *node, void *env) {
124 /* clear the link field for ALL nodes first */
125 set_irn_link(node, NULL);
127 if (is_Block(node)) {
130 /* GL @@@ : is this possible? if (get_opt_normalize()) -- added, all tests go through.
131 A different order of optimizations might cause problems. */
132 if (get_opt_normalize()) {
133 for (i = 0, n = get_Block_n_cfgpreds(node); i < n; ++i)
134 set_Block_cfgpred(node, i, skip_Tuple(get_Block_cfgpred(node, i)));
138 new_block = equivalent_node(node);
139 if (new_block != node && ! is_Block_dead(new_block))
140 exchange(node, new_block);
142 } else if (get_opt_optimize() && (get_irn_mode(node) == mode_X)) {
143 /* We will soon visit a block. Optimize it before visiting! */
144 ir_node *b = get_nodes_block(skip_Proj(node));
146 if (!is_Block_dead(b)) {
147 new_block = equivalent_node(b);
149 while (irn_not_visited(b) && (!is_Block_dead(new_block)) && (new_block != b)) {
150 /* We would have to run gigo() if new is bad, so we
151 promote it directly below. Nevertheless, we sometimes reach a block
152 the first time through a dataflow node. In this case we optimized the
153 block as such and have to promote the Bad here. */
154 assert((get_opt_control_flow_straightening() ||
155 get_opt_control_flow_weak_simplification()) &&
156 ("strange flag setting"));
157 exchange (b, new_block);
159 new_block = equivalent_node(b);
162 /* normally, we would create a Bad block here, but this must be
163 * prevented, so just set it's cf to Bad.
165 if (is_Block_dead(new_block))
166 exchange(node, new_Bad());
173 * Remove cf from dead block by inspecting dominance info
174 * Do not replace blocks by Bad. This optimization shall
175 * ensure, that all Bad control flow predecessors are
176 * removed, and no new other Bads are introduced.
178 * Must be run in the post walker.
180 static void remove_dead_block_cf(ir_node *block, void *env) {
183 /* check block predecessors and turn control flow into bad */
184 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
185 ir_node *pred_X = get_Block_cfgpred(block, i);
187 if (! is_Bad(pred_X)) {
188 ir_node *pred_bl = get_nodes_block(skip_Proj(pred_X));
190 if (is_Block_dead(pred_bl) || (get_Block_dom_depth(pred_bl) < 0)) {
191 set_Block_dead(pred_bl);
192 exchange(pred_X, new_Bad());
199 * Collects all Phi nodes in link list of Block.
200 * Marks all blocks "block_visited" if they contain a node other
201 * than Jmp (and Proj).
202 * Links all Proj nodes to their predecessors.
203 * Collects all switch-Conds in a list.
205 static void collect_nodes(ir_node *n, void *env) {
206 ir_op *op = get_irn_op(n);
209 if (op != op_Block) {
210 ir_node *b = get_nodes_block(n);
213 /* Collect Phi nodes to compact ins along with block's ins. */
214 set_irn_link(n, get_irn_link(b));
216 } else if (op != op_Jmp && !is_Bad(b)) { /* Check for non empty block. */
217 mark_Block_block_visited(b);
219 if (op == op_Proj) { /* link Proj nodes */
220 ir_node *pred = get_Proj_pred(n);
222 set_irn_link(n, get_irn_link(pred));
223 set_irn_link(pred, n);
224 } else if (op == op_Cond) {
225 ir_node *sel = get_Cond_selector(n);
226 if (mode_is_int(get_irn_mode(sel))) {
227 /* found a switch-Cond, collect */
228 plist_insert_back(list, n);
235 /** Returns true if pred is predecessor of block. */
236 static int is_pred_of(ir_node *pred, ir_node *b) {
239 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
240 ir_node *b_pred = get_Block_cfgpred_block(b, i);
241 if (b_pred == pred) return 1;
246 /** Test whether we can optimize away pred block pos of b.
248 * @param b A block node.
249 * @param pos The position of the predecessor block to judge about.
251 * @returns The number of predecessors
253 * The test is rather tricky.
255 * The situation is something like the following:
264 * b merges the control flow of an if-then-else. We may not remove
265 * the 'then' _and_ the 'else' block of an 'if' if there is a Phi
266 * node in b, even if both are empty. The destruction of this Phi
267 * requires that a copy is added before the merge. We have to
268 * keep one of the case blocks to place the copies in.
270 * To perform the test for pos, we must regard predecessors before pos
271 * as already removed.
273 static int test_whether_dispensable(ir_node *b, int pos) {
274 int i, j, n_preds = 1;
275 ir_node *pred = get_Block_cfgpred_block(b, pos);
277 /* Bad blocks will be optimized away, so we don't need space for them */
278 if (is_Block_dead(pred))
281 if (get_Block_block_visited(pred) + 1
282 < get_irg_block_visited(current_ir_graph)) {
284 if (!get_opt_optimize() || !get_opt_control_flow_strong_simplification()) {
285 /* Mark block so that is will not be removed: optimization is turned off. */
286 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
290 /* Seems to be empty. At least we detected this in collect_nodes. */
291 if (!get_irn_link(b)) {
292 /* There are no Phi nodes ==> all predecessors are dispensable. */
293 n_preds = get_Block_n_cfgpreds(pred);
295 /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
296 Handle all pred blocks with preds < pos as if they were already removed. */
297 for (i = 0; i < pos; i++) {
298 ir_node *b_pred = get_Block_cfgpred_block(b, i);
299 if (! is_Block_dead(b_pred) &&
300 get_Block_block_visited(b_pred) + 1
301 < get_irg_block_visited(current_ir_graph)) {
302 for (j = 0; j < get_Block_n_cfgpreds(b_pred); j++) {
303 ir_node *b_pred_pred = get_Block_cfgpred_block(b_pred, j);
304 if (is_pred_of(b_pred_pred, pred))
305 goto non_dispensable;
308 if (is_pred_of(b_pred, pred))
309 goto non_dispensable;
312 for (i = pos +1; i < get_Block_n_cfgpreds(b); i++) {
313 ir_node *b_pred = get_Block_cfgpred_block(b, i);
314 if (is_pred_of(b_pred, pred))
315 goto non_dispensable;
317 /* if we get here, the block is dispensable */
318 n_preds = get_Block_n_cfgpreds(pred);
325 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
330 * Store to defer the exchanged of Phi nodes.
332 typedef struct _defer_ex_phi {
333 ir_node *phi_pred; /**< the previous Phi node that will be replaced */
334 ir_node *phi; /**< the new Phi node that replaces phi_pred */
338 * This method removed Bad cf predecessors from Blocks and Phis, and removes
339 * empty blocks. A block is empty if it only contains Phi and Jmp nodes.
341 * We first adapt Phi nodes, then Block nodes, as we need the old ins
342 * of the Block to adapt the Phi nodes. We do this by computing new
343 * in arrays, and then replacing the old ones. So far we compute new in arrays
344 * for all nodes, not regarding whether there is a possibility for optimization.
346 * For each predecessor p of a Block b there are three cases:
347 * -1. The predecessor p is a Bad node: just skip it. The in array of b shrinks by one.
348 * -2. The predecessor p is empty. Remove p. All predecessors of p are now
350 * -3. The predecessor p is a block containing useful code. Just keep p as is.
352 * For Phi nodes f we have to check the conditions at the Block of f.
353 * For cases 1 and 3 we proceed as for Blocks. For case 2 we can have two
355 * -2a: The old predecessor of the Phi f is a Phi pred_f IN THE BLOCK REMOVED. In this
356 * case we proceed as for blocks. We remove pred_f. All
357 * predecessors of pred_f now are predecessors of f.
358 * -2b: The old predecessor of f is NOT in the block removed. It might be a Phi, too.
359 * We have to replicate f for each predecessor of the removed block. Or, with
360 * other words, the removed predecessor block has exactly one predecessor.
362 * Further there is a special case for self referencing blocks:
365 * then_b else_b then_b else_b
371 * | | | === optimized to ===> \ | | |
378 * If there is a Phi in pred_b, but we remove pred_b, we have to generate a
379 * Phi in loop_b, that has the ins of the Phi in pred_b and a self referencing
381 * @@@ It is negotiable whether we should do this ... there might end up a copy
382 * from the Phi in the loop when removing the Phis.
384 static void optimize_blocks(ir_node *b, void *env) {
385 int i, j, k, n, max_preds, n_preds, p_preds = -1;
388 defer_ex_phi *defers;
390 /* Count the number of predecessor if this block is merged with pred blocks
393 for (i = 0, k = get_Block_n_cfgpreds(b); i < k; ++i) {
394 max_preds += test_whether_dispensable(b, i);
396 in = xmalloc(max_preds * sizeof(*in));
398 defers = NEW_ARR_F(defer_ex_phi, 0);
401 printf(" working on "); DDMN(b);
402 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
403 pred = get_nodes_block(get_Block_cfgpred(b, i));
404 if (is_Bad(get_Block_cfgpred(b, i))) {
405 printf(" removing Bad %i\n ", i);
406 } else if (get_Block_block_visited(pred) +1
407 < get_irg_block_visited(current_ir_graph)) {
408 printf(" removing pred %i ", i); DDMN(pred);
409 } else { printf(" Nothing to do for "); DDMN(pred); }
411 * end Debug output -*/
413 /*- Fix the Phi nodes of the current block -*/
414 for (phi = get_irn_link(b); phi; ) {
415 assert(get_irn_op(phi) == op_Phi);
417 /* Find the new predecessors for the Phi */
419 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
420 pred = get_Block_cfgpred_block(b, i);
422 if (is_Bad(get_Block_cfgpred(b, i))) {
423 /* case Phi 1: Do nothing */
425 else if (get_Block_block_visited(pred) + 1
426 < get_irg_block_visited(current_ir_graph)) {
427 /* case Phi 2: It's an empty block and not yet visited. */
428 ir_node *phi_pred = get_Phi_pred(phi, i);
430 for (j = 0, k = get_Block_n_cfgpreds(pred); j < k; j++) {
431 /* because of breaking loops, not all predecessors are Bad-clean,
432 * so we must check this here again */
433 if (! is_Bad(get_Block_cfgpred(pred, j))) {
434 if (get_nodes_block(phi_pred) == pred) {
436 assert(get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
438 in[p_preds++] = get_Phi_pred(phi_pred, j);
441 in[p_preds++] = phi_pred;
446 /* The Phi_pred node is replaced now if it is a Phi.
448 Somehow the removed Phi node can be used legally in loops.
449 Therefore we replace the old phi by the new one.
450 This must be done _AFTER_ all Phis are optimized, or
451 it will fail if two Phis use the same pred_Phi.
453 FIXME: Is the following true? We ALWAYS replace it by the new one.
455 Further we have to remove the old Phi node by replacing it
456 by Bad. Else it will remain in the keep alive array of End
457 and cause illegal situations. So if there is no loop, we should
460 if (get_nodes_block(phi_pred) == pred) {
462 /* remove the Phi as it might be kept alive. Further there
463 might be other users. */
464 for (i = ARR_LEN(defers) - 1; i >= 0; --i) {
465 if (defers[i].phi_pred == phi_pred)
469 /* we have a new replacement */
472 elem.phi_pred = phi_pred;
474 ARR_APP1(defer_ex_phi, defers, elem);
479 in[p_preds++] = get_Phi_pred(phi, i);
482 assert(p_preds <= max_preds);
486 /* By removal of Bad ins the Phi might be degenerated. */
487 exchange(phi, in[0]);
489 set_irn_in(phi, p_preds, in);
491 phi = get_irn_link(phi);
494 /* now, exchange all Phis */
495 for (i = ARR_LEN(defers) - 1; i >= 0; --i) {
496 exchange(defers[i].phi_pred, defers[i].phi);
500 /*- This happens only if merge between loop backedge and single loop entry.
501 See special case above. -*/
502 for (k = 0, n = get_Block_n_cfgpreds(b); k < n; ++k) {
503 pred = get_nodes_block(get_Block_cfgpred(b, k));
505 if (get_Block_block_visited(pred) + 1 < get_irg_block_visited(current_ir_graph)) {
506 /* we found a predecessor block at position k that will be removed */
507 for (phi = get_irn_link(pred); phi;) {
509 * the previous phase may already changed the phi, and even
510 * removed it at all, so check here if this node is still a phi
512 if (get_irn_op(phi) == op_Phi) {
515 /* move this phi from the predecessor into the block b */
516 set_nodes_block(phi, b);
518 /* first, copy all 0..k-1 predecessors */
519 for (i = 0; i < k; i++) {
520 pred = get_Block_cfgpred_block(b, i);
524 } else if (get_Block_block_visited(pred) + 1
525 < get_irg_block_visited(current_ir_graph)) {
526 /* It's an empty block and not yet visited. */
527 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
528 /* @@@ Hier brauche ich Schleifeninformation!!! Kontrollflusskante
529 muss Rueckwaertskante sein! (An allen vier in[q_preds] = phi
530 Anweisungen.) Trotzdem tuts bisher!! */
531 if (! is_Bad(get_Block_cfgpred(pred, j)))
539 /* now we are at k, copy the phi predecessors */
540 pred = get_nodes_block(get_Block_cfgpred(b, k));
541 for (i = 0; i < get_Phi_n_preds(phi); i++) {
542 if (! is_Bad(get_Block_cfgpred(pred, i)))
543 in[q_preds++] = get_Phi_pred(phi, i);
546 /* and now all the rest */
547 for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
548 pred = get_nodes_block(get_Block_cfgpred(b, i));
550 if (is_Bad(get_Block_cfgpred(b, i))) {
552 } else if (get_Block_block_visited(pred) +1
553 < get_irg_block_visited(current_ir_graph)) {
554 /* It's an empty block and not yet visited. */
555 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
556 if (! is_Bad(get_Block_cfgpred(pred, j)))
566 exchange(phi, in[0]);
568 set_irn_in(phi, q_preds, in);
570 assert(q_preds <= max_preds);
571 // assert(p_preds == q_preds && "Wrong Phi Fix");
573 phi = get_irn_link(phi);
578 /*- Fix the block -*/
580 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
581 pred = get_Block_cfgpred_block(b, i);
584 /* case 1: Do nothing */
585 } else if (get_Block_block_visited(pred) +1
586 < get_irg_block_visited(current_ir_graph)) {
587 /* case 2: It's an empty block and not yet visited. */
588 assert(get_Block_n_cfgpreds(b) > 1);
589 /* Else it should be optimized by equivalent_node. */
590 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
591 ir_node *pred_block = get_Block_cfgpred(pred, j);
593 /* because of breaking loops, not all predecessors are Bad-clean,
594 * so we must check this here again */
595 if (! is_Bad(pred_block))
596 in[n_preds++] = pred_block;
598 /* Remove block as it might be kept alive. */
599 exchange(pred, b/*new_Bad()*/);
602 in[n_preds++] = get_Block_cfgpred(b, i);
605 assert(n_preds <= max_preds);
607 set_irn_in(b, n_preds, in);
609 assert(get_irn_link(b) == NULL || (n_preds == p_preds && "Wrong Phi Fix"));
614 * Walker: optimize all blocks using the default optimizations.
615 * This removes Blocks that with only a Jmp predecessor.
617 static void remove_simple_blocks(ir_node *block, void *env) {
618 ir_node *new_blk = equivalent_node(block);
619 if (new_blk != block)
620 exchange(block, new_blk);
624 * Handle pre-optimized table switch Cond's.
625 * During iropt, all Projs from a switch-Cond are already removed except
626 * the defProj and maybe the taken one.
627 * The defProj cannot be removed WITHOUT looking backwards, so we do this here.
629 * @param cond the switch-Cond
631 * @return non-zero if a switch-Cond was optimized
633 * Expects all Proj's linked to the cond node
635 static int handle_switch_cond(ir_node *cond) {
636 ir_node *sel = get_Cond_selector(cond);
638 ir_node *proj1 = get_irn_link(cond);
639 ir_node *proj2 = get_irn_link(proj1);
642 blk = get_nodes_block(cond);
645 /* this Cond has only one Proj: must be the defProj */
646 assert(get_Cond_defaultProj(cond) == get_Proj_proj(proj1));
647 /* convert it into a Jmp */
648 jmp = new_r_Jmp(current_ir_graph, blk);
649 exchange(proj1, jmp);
651 } else if (get_irn_link(proj2) == NULL) {
652 /* We have two Proj's here. Check if the Cond has
653 a constant argument */
654 tarval *tv = value_of(sel);
656 if (tv != tarval_bad) {
657 /* we have a constant switch */
658 long num = get_tarval_long(tv);
659 long def_num = get_Cond_defaultProj(cond);
661 if (def_num == get_Proj_proj(proj1)) {
662 /* first one is the defProj */
663 if (num == get_Proj_proj(proj2)) {
664 jmp = new_r_Jmp(current_ir_graph, blk);
665 exchange(proj2, jmp);
666 exchange(proj1, new_Bad());
669 } else if (def_num == get_Proj_proj(proj2)) {
670 /* second one is the defProj */
671 if (num == get_Proj_proj(proj1)) {
672 jmp = new_r_Jmp(current_ir_graph, blk);
673 exchange(proj1, jmp);
674 exchange(proj2, new_Bad());
678 /* neither: strange, Cond was not optimized so far */
679 if (num == get_Proj_proj(proj1)) {
680 jmp = new_r_Jmp(current_ir_graph, blk);
681 exchange(proj1, jmp);
682 exchange(proj2, new_Bad());
684 } else if (num == get_Proj_proj(proj2)) {
685 jmp = new_r_Jmp(current_ir_graph, blk);
686 exchange(proj2, jmp);
687 exchange(proj1, new_Bad());
696 /* Optimizations of the control flow that also require changes of Phi nodes.
698 * This optimization performs two passes over the graph.
700 * The first pass collects all Phi nodes in a link list in the block
701 * nodes. Further it performs simple control flow optimizations.
702 * Finally it marks all blocks that do not contain useful
703 * computations, i.e., these blocks might be removed.
705 * The second pass performs the optimizations intended by this algorithm.
706 * It walks only over block nodes and adapts these and the Phi nodes in these blocks,
707 * which it finds in a linked list computed by the first pass.
709 * We use the block_visited flag to mark empty blocks in the first
711 * @@@ It would be better to add a struct in the link field
712 * that keeps the Phi list and the mark. Place it on an obstack, as
713 * we will lose blocks and thereby generate memory leaks.
715 void optimize_cf(ir_graph *irg) {
718 ir_node *cond, *end = get_irg_end(irg);
719 ir_graph *rem = current_ir_graph;
720 irg_dom_state dom_state = get_irg_dom_state(current_ir_graph);
724 assert(get_irg_phase_state(irg) != phase_building);
726 /* if the graph is not pinned, we cannot determine empty blocks */
727 assert(get_irg_pinned(irg) != op_pin_state_floats &&
728 "Control flow optimization need a pinned graph");
730 current_ir_graph = irg;
732 edges_deactivate(irg);
734 /* Handle graph state */
735 set_irg_outs_inconsistent(current_ir_graph);
736 set_irg_extblk_inconsistent(current_ir_graph);
737 set_irg_loopinfo_inconsistent(current_ir_graph);
738 set_irg_doms_inconsistent(current_ir_graph);
740 if (dom_state == dom_consistent && get_opt_optimize() && get_opt_unreachable_code()) {
743 /* we have dominance info, we can kill dead block */
744 irg_block_walk_graph(irg, NULL, remove_dead_block_cf, NULL);
746 /* fix the keep-alives */
747 end = get_irg_end(irg);
748 for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
749 ir_node *ka = get_End_keepalive(end, i);
752 /* do NOT keep dead blocks */
753 if (get_Block_dom_depth(ka) < 0)
754 set_End_keepalive(end, i, new_Bad());
755 } else if (is_Block_dead(get_nodes_block(ka)) ||
756 get_Block_dom_depth(get_nodes_block(ka)) < 0)
757 /* do NOT keep nodes in dead blocks */
758 set_End_keepalive(end, i, new_Bad());
761 irg_block_walk_graph(irg, NULL, remove_senseless_conds, NULL);
763 /* Use block visited flag to mark non-empty blocks. */
764 inc_irg_block_visited(irg);
767 irg_walk(end, merge_blocks, collect_nodes, list);
769 /* handle all collected switch-Conds */
770 foreach_plist(list, el) {
771 cond = plist_element_get_value(el);
772 handle_switch_cond(cond);
776 /* Optimize the standard code. */
777 irg_block_walk(get_irg_end_block(irg), optimize_blocks, remove_simple_blocks, NULL);
779 /* Walk all keep alives, optimize them if block, add to new in-array
780 for end if useful. */
781 n = get_End_n_keepalives(end);
783 NEW_ARR_A(ir_node *, in, n);
784 inc_irg_visited(irg);
786 /* fix the keep alive */
787 for (i = j = 0; i < n; i++) {
788 ir_node *ka = get_End_keepalive(end, i);
790 if (irn_not_visited(ka)) {
791 ir_op *op = get_irn_op(ka);
793 if ((op == op_Block) && Block_not_block_visited(ka)) {
794 set_irg_block_visited(irg, /* Don't walk all the way to Start. */
795 get_irg_block_visited(irg)-1);
796 irg_block_walk(ka, optimize_blocks, remove_simple_blocks, NULL);
797 mark_irn_visited(ka);
799 } else if (op == op_Phi) {
800 mark_irn_visited(ka);
801 if (! is_Block_dead(get_nodes_block(ka)))
803 } else if (is_op_keep(op)) {
804 mark_irn_visited(ka);
805 if (! is_Block_dead(get_nodes_block(ka)))
811 set_End_keepalives(end, j, in);
812 /* the verifier doesn't work yet with floating nodes */
813 if (get_irg_pinned(irg) == op_pin_state_pinned) {
814 /* after optimize_cf(), only Bad data flow may remain. */
815 if (irg_vrfy_bads(irg, BAD_DF | BAD_BLOCK | TUPLE)) {
816 dump_ir_block_graph(irg, "-vrfy-cf");
817 dump_ir_graph(irg, "-vrfy-cf");
818 fprintf(stderr, "VRFY_BAD in optimize_cf()\n");
822 current_ir_graph = rem;