3 * File name: ir/opt/cfopt.c
4 * Purpose: control flow optimizations
8 * Copyright: (c) 1998-2004 Universität Karlsruhe
9 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
28 #include "irgraph_t.h"
42 #include "irbackedge_t.h"
48 #include "iropt_dbg.h"
50 /*------------------------------------------------------------------*/
51 /* Control flow optimization. */
53 /* Removes Bad control flow predecessors and empty blocks. A block */
54 /* is empty if it contains only a Jmp node. */
55 /* Blocks can only be removed if they are not needed for the */
56 /* semantics of Phi nodes. */
57 /*------------------------------------------------------------------*/
60 * Replace binary Conds that jumps twice into the same block
66 * ProjX True ProjX False ==> | /
71 * Such pattern are the result of if-conversion.
73 * Note that the simple case that Block has only these two
74 * predecessors are already handled in equivalent_node_Block().
76 static void remove_senseless_conds(ir_node *bl, void *data) {
78 int n = get_Block_n_cfgpreds(bl);
82 for (i = 0; i < n; ++i) {
83 ir_node *pred_i = get_Block_cfgpred(bl, i);
84 ir_node *cond_i = skip_Proj(pred_i);
87 if (is_Cond(cond_i) && get_irn_mode(get_Cond_selector(cond_i)) == mode_b) {
89 for (j = i + 1; j < n; ++j) {
90 ir_node *pred_j = get_Block_cfgpred(bl, j);
91 ir_node *cond_j = skip_Proj(pred_j);
93 if (cond_j == cond_i) {
94 ir_node *jmp = new_r_Jmp(current_ir_graph, get_nodes_block(cond_i));
95 set_irn_n(bl, i, jmp);
96 set_irn_n(bl, j, new_Bad());
98 DBG_OPT_IFSIM2(cond_i, jmp);
107 * Removes Tuples from Block control flow predecessors.
108 * Optimizes blocks with equivalent_node(). This is tricky,
109 * as we want to avoid nodes that have as block predecessor Bads.
110 * Therefore we also optimize at control flow operations, depending
111 * how we first reach the Block.
113 static void merge_blocks(ir_node *node, void *env) {
117 /* clear the link field for ALL nodes first */
118 set_irn_link(node, NULL);
120 if (is_Block(node)) {
123 /* GL @@@ : is this possible? if (get_opt_normalize()) -- added, all tests go through.
124 A different order of optimizations might cause problems. */
125 if (get_opt_normalize()) {
126 for (i = 0, n = get_Block_n_cfgpreds(node); i < n; ++i)
127 set_Block_cfgpred(node, i, skip_Tuple(get_Block_cfgpred(node, i)));
131 new_block = equivalent_node(node);
132 if (new_block != node && ! is_Block_dead(new_block))
133 exchange(node, new_block);
135 } else if (get_opt_optimize() && (get_irn_mode(node) == mode_X)) {
136 /* We will soon visit a block. Optimize it before visiting! */
137 ir_node *b = get_nodes_block(skip_Proj(node));
139 if (!is_Block_dead(b)) {
140 new_block = equivalent_node(b);
142 while (irn_not_visited(b) && (!is_Block_dead(new_block)) && (new_block != b)) {
143 /* We would have to run gigo() if new is bad, so we
144 promote it directly below. Nevertheless, we sometimes reach a block
145 the first time through a dataflow node. In this case we optimized the
146 block as such and have to promote the Bad here. */
147 assert((get_opt_control_flow_straightening() ||
148 get_opt_control_flow_weak_simplification()) &&
149 ("strange flag setting"));
150 exchange (b, new_block);
152 new_block = equivalent_node(b);
155 /* normally, we would create a Bad block here, but this must be
156 * prevented, so just set it's cf to Bad.
158 if (is_Block_dead(new_block))
159 exchange(node, new_Bad());
166 * Remove cf from dead block by inspecting dominance info
167 * Do not replace blocks by Bad. This optimization shall
168 * ensure, that all Bad control flow predecessors are
169 * removed, and no new other Bads are introduced.
171 * Must be run in the post walker.
173 static void remove_dead_block_cf(ir_node *block, void *env) {
176 /* check block predecessors and turn control flow into bad */
177 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
178 ir_node *pred_X = get_Block_cfgpred(block, i);
180 if (! is_Bad(pred_X)) {
181 ir_node *pred_bl = get_nodes_block(skip_Proj(pred_X));
183 if (is_Block_dead(pred_bl) || (get_Block_dom_depth(pred_bl) < 0)) {
184 set_Block_dead(pred_bl);
185 exchange(pred_X, new_Bad());
192 * Collects all Phi nodes in link list of Block.
193 * Marks all blocks "block_visited" if they contain a node other
194 * than Jmp (and Proj).
195 * Links all Proj nodes to their predecessors.
196 * Collects all switch-Conds in a list.
198 static void collect_nodes(ir_node *n, void *env) {
199 ir_op *op = get_irn_op(n);
202 if (op != op_Block) {
203 ir_node *b = get_nodes_block(n);
206 /* Collect Phi nodes to compact ins along with block's ins. */
207 set_irn_link(n, get_irn_link(b));
209 } else if (op != op_Jmp && !is_Bad(b)) { /* Check for non empty block. */
210 mark_Block_block_visited(b);
212 if (op == op_Proj) { /* link Proj nodes */
213 ir_node *pred = get_Proj_pred(n);
215 set_irn_link(n, get_irn_link(pred));
216 set_irn_link(pred, n);
217 } else if (op == op_Cond) {
218 ir_node *sel = get_Cond_selector(n);
219 if (mode_is_int(get_irn_mode(sel))) {
220 /* found a switch-Cond, collect */
221 plist_insert_back(list, n);
228 /** Returns true if pred is predecessor of block. */
229 static int is_pred_of(ir_node *pred, ir_node *b) {
232 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
233 ir_node *b_pred = get_Block_cfgpred_block(b, i);
234 if (b_pred == pred) return 1;
239 /** Test whether we can optimize away pred block pos of b.
241 * @param b A block node.
242 * @param pos The position of the predecessor block to judge about.
244 * @returns The number of predecessors
246 * The test is rather tricky.
248 * The situation is something like the following:
257 * b merges the control flow of an if-then-else. We may not remove
258 * the 'then' _and_ the 'else' block of an 'if' if there is a Phi
259 * node in b, even if both are empty. The destruction of this Phi
260 * requires that a copy is added before the merge. We have to
261 * keep one of the case blocks to place the copies in.
263 * To perform the test for pos, we must regard predecessors before pos
264 * as already removed.
266 static int test_whether_dispensable(ir_node *b, int pos) {
267 int i, j, n_preds = 1;
268 ir_node *pred = get_Block_cfgpred_block(b, pos);
270 /* Bad blocks will be optimized away, so we don't need space for them */
271 if (is_Block_dead(pred))
274 if (get_Block_block_visited(pred) + 1
275 < get_irg_block_visited(current_ir_graph)) {
277 if (!get_opt_optimize() || !get_opt_control_flow_strong_simplification()) {
278 /* Mark block so that is will not be removed: optimization is turned off. */
279 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
283 /* Seems to be empty. At least we detected this in collect_nodes. */
284 if (!get_irn_link(b)) {
285 /* There are no Phi nodes ==> all predecessors are dispensable. */
286 n_preds = get_Block_n_cfgpreds(pred);
288 /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
289 Handle all pred blocks with preds < pos as if they were already removed. */
290 for (i = 0; i < pos; i++) {
291 ir_node *b_pred = get_Block_cfgpred_block(b, i);
292 if (! is_Block_dead(b_pred) &&
293 get_Block_block_visited(b_pred) + 1
294 < get_irg_block_visited(current_ir_graph)) {
295 for (j = 0; j < get_Block_n_cfgpreds(b_pred); j++) {
296 ir_node *b_pred_pred = get_Block_cfgpred_block(b_pred, j);
297 if (is_pred_of(b_pred_pred, pred))
298 goto non_dispensable;
301 if (is_pred_of(b_pred, pred))
302 goto non_dispensable;
305 for (i = pos +1; i < get_Block_n_cfgpreds(b); i++) {
306 ir_node *b_pred = get_Block_cfgpred_block(b, i);
307 if (is_pred_of(b_pred, pred))
308 goto non_dispensable;
310 /* if we get here, the block is dispensable */
311 n_preds = get_Block_n_cfgpreds(pred);
318 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
323 * Store to defer the exchanged of Phi nodes.
325 typedef struct _defer_ex_phi {
326 ir_node *phi_pred; /**< the previous Phi node that will be replaced */
327 ir_node *phi; /**< the new Phi node that replaces phi_pred */
331 * This method removed Bad cf predecessors from Blocks and Phis, and removes
332 * empty blocks. A block is empty if it only contains Phi and Jmp nodes.
334 * We first adapt Phi nodes, then Block nodes, as we need the old ins
335 * of the Block to adapt the Phi nodes. We do this by computing new
336 * in arrays, and then replacing the old ones. So far we compute new in arrays
337 * for all nodes, not regarding whether there is a possibility for optimization.
339 * For each predecessor p of a Block b there are three cases:
340 * -1. The predecessor p is a Bad node: just skip it. The in array of b shrinks by one.
341 * -2. The predecessor p is empty. Remove p. All predecessors of p are now
343 * -3. The predecessor p is a block containing useful code. Just keep p as is.
345 * For Phi nodes f we have to check the conditions at the Block of f.
346 * For cases 1 and 3 we proceed as for Blocks. For case 2 we can have two
348 * -2a: The old predecessor of the Phi f is a Phi pred_f IN THE BLOCK REMOVED. In this
349 * case we proceed as for blocks. We remove pred_f. All
350 * predecessors of pred_f now are predecessors of f.
351 * -2b: The old predecessor of f is NOT in the block removed. It might be a Phi, too.
352 * We have to replicate f for each predecessor of the removed block. Or, with
353 * other words, the removed predecessor block has exactly one predecessor.
355 * Further there is a special case for self referencing blocks:
358 * then_b else_b then_b else_b
364 * | | | === optimized to ===> \ | | |
371 * If there is a Phi in pred_b, but we remove pred_b, we have to generate a
372 * Phi in loop_b, that has the ins of the Phi in pred_b and a self referencing
374 * @@@ It is negotiable whether we should do this ... there might end up a copy
375 * from the Phi in the loop when removing the Phis.
377 static void optimize_blocks(ir_node *b, void *env) {
378 int i, j, k, n, max_preds, n_preds, p_preds = -1;
381 defer_ex_phi *defers;
383 /* Count the number of predecessor if this block is merged with pred blocks
386 for (i = 0, k = get_Block_n_cfgpreds(b); i < k; ++i) {
387 max_preds += test_whether_dispensable(b, i);
389 in = xmalloc(max_preds * sizeof(*in));
391 defers = NEW_ARR_F(defer_ex_phi, 0);
394 printf(" working on "); DDMN(b);
395 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
396 pred = get_nodes_block(get_Block_cfgpred(b, i));
397 if (is_Bad(get_Block_cfgpred(b, i))) {
398 printf(" removing Bad %i\n ", i);
399 } else if (get_Block_block_visited(pred) +1
400 < get_irg_block_visited(current_ir_graph)) {
401 printf(" removing pred %i ", i); DDMN(pred);
402 } else { printf(" Nothing to do for "); DDMN(pred); }
404 * end Debug output -*/
406 /*- Fix the Phi nodes of the current block -*/
407 for (phi = get_irn_link(b); phi; ) {
408 assert(get_irn_op(phi) == op_Phi);
410 /* Find the new predecessors for the Phi */
412 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
413 pred = get_Block_cfgpred_block(b, i);
415 if (is_Bad(get_Block_cfgpred(b, i))) {
416 /* case Phi 1: Do nothing */
418 else if (get_Block_block_visited(pred) + 1
419 < get_irg_block_visited(current_ir_graph)) {
420 /* case Phi 2: It's an empty block and not yet visited. */
421 ir_node *phi_pred = get_Phi_pred(phi, i);
423 for (j = 0, k = get_Block_n_cfgpreds(pred); j < k; j++) {
424 /* because of breaking loops, not all predecessors are Bad-clean,
425 * so we must check this here again */
426 if (! is_Bad(get_Block_cfgpred(pred, j))) {
427 if (get_nodes_block(phi_pred) == pred) {
429 assert(get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
431 in[p_preds++] = get_Phi_pred(phi_pred, j);
434 in[p_preds++] = phi_pred;
439 /* The Phi_pred node is replaced now if it is a Phi.
441 Somehow the removed Phi node can be used legally in loops.
442 Therefore we replace the old phi by the new one.
443 This must be done _AFTER_ all Phis are optimized, or
444 it will fail if two Phis use the same pred_Phi.
446 FIXME: Is the following true? We ALWAYS replace it by the new one.
448 Further we have to remove the old Phi node by replacing it
449 by Bad. Else it will remain in the keep alive array of End
450 and cause illegal situations. So if there is no loop, we should
453 if (get_nodes_block(phi_pred) == pred) {
455 /* remove the Phi as it might be kept alive. Further there
456 might be other users. */
457 for (i = ARR_LEN(defers) - 1; i >= 0; --i) {
458 if (defers[i].phi_pred == phi_pred)
462 /* we have a new replacement */
465 elem.phi_pred = phi_pred;
467 ARR_APP1(defer_ex_phi, defers, elem);
472 in[p_preds++] = get_Phi_pred(phi, i);
475 assert(p_preds <= max_preds);
479 /* By removal of Bad ins the Phi might be degenerated. */
480 exchange(phi, in[0]);
482 set_irn_in(phi, p_preds, in);
484 phi = get_irn_link(phi);
487 /* now, exchange all Phis */
488 for (i = ARR_LEN(defers) - 1; i >= 0; --i) {
489 exchange(defers[i].phi_pred, defers[i].phi);
493 /*- This happens only if merge between loop backedge and single loop entry.
494 See special case above. -*/
495 for (k = 0, n = get_Block_n_cfgpreds(b); k < n; ++k) {
496 pred = get_nodes_block(get_Block_cfgpred(b, k));
498 if (get_Block_block_visited(pred) + 1 < get_irg_block_visited(current_ir_graph)) {
499 /* we found a predecessor block at position k that will be removed */
500 for (phi = get_irn_link(pred); phi;) {
502 * the previous phase may already changed the phi, and even
503 * removed it at all, so check here if this node is still a phi
505 if (get_irn_op(phi) == op_Phi) {
508 /* move this phi from the predecessor into the block b */
509 set_nodes_block(phi, b);
511 /* first, copy all 0..k-1 predecessors */
512 for (i = 0; i < k; i++) {
513 pred = get_Block_cfgpred_block(b, i);
517 } else if (get_Block_block_visited(pred) + 1
518 < get_irg_block_visited(current_ir_graph)) {
519 /* It's an empty block and not yet visited. */
520 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
521 /* @@@ Hier brauche ich Schleifeninformation!!! Kontrollflusskante
522 muss Rueckwaertskante sein! (An allen vier in[q_preds] = phi
523 Anweisungen.) Trotzdem tuts bisher!! */
524 if (! is_Bad(get_Block_cfgpred(pred, j)))
532 /* now we are at k, copy the phi predecessors */
533 pred = get_nodes_block(get_Block_cfgpred(b, k));
534 for (i = 0; i < get_Phi_n_preds(phi); i++) {
535 if (! is_Bad(get_Block_cfgpred(pred, i)))
536 in[q_preds++] = get_Phi_pred(phi, i);
539 /* and now all the rest */
540 for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
541 pred = get_nodes_block(get_Block_cfgpred(b, i));
543 if (is_Bad(get_Block_cfgpred(b, i))) {
545 } else if (get_Block_block_visited(pred) +1
546 < get_irg_block_visited(current_ir_graph)) {
547 /* It's an empty block and not yet visited. */
548 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
549 if (! is_Bad(get_Block_cfgpred(pred, j)))
559 exchange(phi, in[0]);
561 set_irn_in(phi, q_preds, in);
563 assert(q_preds <= max_preds);
564 // assert(p_preds == q_preds && "Wrong Phi Fix");
566 phi = get_irn_link(phi);
571 /*- Fix the block -*/
573 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
574 pred = get_Block_cfgpred_block(b, i);
577 /* case 1: Do nothing */
578 } else if (get_Block_block_visited(pred) +1
579 < get_irg_block_visited(current_ir_graph)) {
580 /* case 2: It's an empty block and not yet visited. */
581 assert(get_Block_n_cfgpreds(b) > 1);
582 /* Else it should be optimized by equivalent_node. */
583 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
584 ir_node *pred_block = get_Block_cfgpred(pred, j);
586 /* because of breaking loops, not all predecessors are Bad-clean,
587 * so we must check this here again */
588 if (! is_Bad(pred_block))
589 in[n_preds++] = pred_block;
591 /* Remove block as it might be kept alive. */
592 exchange(pred, b/*new_Bad()*/);
595 in[n_preds++] = get_Block_cfgpred(b, i);
598 assert(n_preds <= max_preds);
600 set_irn_in(b, n_preds, in);
602 assert(get_irn_link(b) == NULL || (n_preds == p_preds && "Wrong Phi Fix"));
607 * Walker: optimize all blocks using the default optimizations.
608 * This removes Blocks that with only a Jmp predecessor.
610 static void remove_simple_blocks(ir_node *block, void *env) {
611 ir_node *new_blk = equivalent_node(block);
612 if (new_blk != block)
613 exchange(block, new_blk);
617 * Handle pre-optimized table switch Cond's.
618 * During iropt, all Projs from a switch-Cond are already removed except
619 * the defProj and maybe the taken one.
620 * The defProj cannot be removed WITHOUT looking backwards, so we do this here.
622 * @param cond the switch-Cond
624 * @return non-zero if a switch-Cond was optimized
626 * Expects all Proj's linked to the cond node
628 static int handle_switch_cond(ir_node *cond) {
629 ir_node *sel = get_Cond_selector(cond);
631 ir_node *proj1 = get_irn_link(cond);
632 ir_node *proj2 = get_irn_link(proj1);
635 blk = get_nodes_block(cond);
638 /* this Cond has only one Proj: must be the defProj */
639 assert(get_Cond_defaultProj(cond) == get_Proj_proj(proj1));
640 /* convert it into a Jmp */
641 jmp = new_r_Jmp(current_ir_graph, blk);
642 exchange(proj1, jmp);
644 } else if (get_irn_link(proj2) == NULL) {
645 /* We have two Proj's here. Check if the Cond has
646 a constant argument */
647 tarval *tv = value_of(sel);
649 if (tv != tarval_bad) {
650 /* we have a constant switch */
651 long num = get_tarval_long(tv);
652 long def_num = get_Cond_defaultProj(cond);
654 if (def_num == get_Proj_proj(proj1)) {
655 /* first one is the defProj */
656 if (num == get_Proj_proj(proj2)) {
657 jmp = new_r_Jmp(current_ir_graph, blk);
658 exchange(proj2, jmp);
659 exchange(proj1, new_Bad());
662 } else if (def_num == get_Proj_proj(proj2)) {
663 /* second one is the defProj */
664 if (num == get_Proj_proj(proj1)) {
665 jmp = new_r_Jmp(current_ir_graph, blk);
666 exchange(proj1, jmp);
667 exchange(proj2, new_Bad());
671 /* neither: strange, Cond was not optimized so far */
672 if (num == get_Proj_proj(proj1)) {
673 jmp = new_r_Jmp(current_ir_graph, blk);
674 exchange(proj1, jmp);
675 exchange(proj2, new_Bad());
677 } else if (num == get_Proj_proj(proj2)) {
678 jmp = new_r_Jmp(current_ir_graph, blk);
679 exchange(proj2, jmp);
680 exchange(proj1, new_Bad());
689 /* Optimizations of the control flow that also require changes of Phi nodes.
691 * This optimization performs two passes over the graph.
693 * The first pass collects all Phi nodes in a link list in the block
694 * nodes. Further it performs simple control flow optimizations.
695 * Finally it marks all blocks that do not contain useful
696 * computations, i.e., these blocks might be removed.
698 * The second pass performs the optimizations intended by this algorithm.
699 * It walks only over block nodes and adapts these and the Phi nodes in these blocks,
700 * which it finds in a linked list computed by the first pass.
702 * We use the block_visited flag to mark empty blocks in the first
704 * @@@ It would be better to add a struct in the link field
705 * that keeps the Phi list and the mark. Place it on an obstack, as
706 * we will lose blocks and thereby generate memory leaks.
708 void optimize_cf(ir_graph *irg) {
711 ir_node *cond, *end = get_irg_end(irg);
712 ir_graph *rem = current_ir_graph;
713 irg_dom_state dom_state = get_irg_dom_state(current_ir_graph);
717 assert(get_irg_phase_state(irg) != phase_building);
719 /* if the graph is not pinned, we cannot determine empty blocks */
720 assert(get_irg_pinned(irg) != op_pin_state_floats &&
721 "Control flow optimization need a pinned graph");
723 current_ir_graph = irg;
725 edges_deactivate(irg);
727 /* Handle graph state */
728 set_irg_outs_inconsistent(current_ir_graph);
729 set_irg_extblk_inconsistent(current_ir_graph);
730 set_irg_loopinfo_inconsistent(current_ir_graph);
731 set_irg_doms_inconsistent(current_ir_graph);
733 if (dom_state == dom_consistent && get_opt_optimize() && get_opt_unreachable_code()) {
736 /* we have dominance info, we can kill dead block */
737 irg_block_walk_graph(irg, NULL, remove_dead_block_cf, NULL);
739 /* fix the keep-alives */
740 end = get_irg_end(irg);
741 for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
742 ir_node *ka = get_End_keepalive(end, i);
745 /* do NOT keep dead blocks */
746 if (get_Block_dom_depth(ka) < 0)
747 set_End_keepalive(end, i, new_Bad());
748 } else if (is_Block_dead(get_nodes_block(ka)) ||
749 get_Block_dom_depth(get_nodes_block(ka)) < 0)
750 /* do NOT keep nodes in dead blocks */
751 set_End_keepalive(end, i, new_Bad());
754 irg_block_walk_graph(irg, NULL, remove_senseless_conds, NULL);
756 /* Use block visited flag to mark non-empty blocks. */
757 inc_irg_block_visited(irg);
760 irg_walk(end, merge_blocks, collect_nodes, list);
762 /* handle all collected switch-Conds */
763 foreach_plist(list, el) {
764 cond = plist_element_get_value(el);
765 handle_switch_cond(cond);
769 /* Optimize the standard code. */
770 irg_block_walk(get_irg_end_block(irg), optimize_blocks, remove_simple_blocks, NULL);
772 /* Walk all keep alives, optimize them if block, add to new in-array
773 for end if useful. */
774 n = get_End_n_keepalives(end);
776 NEW_ARR_A(ir_node *, in, n);
777 inc_irg_visited(irg);
779 /* fix the keep alive */
780 for (i = j = 0; i < n; i++) {
781 ir_node *ka = get_End_keepalive(end, i);
783 if (irn_not_visited(ka)) {
784 ir_op *op = get_irn_op(ka);
786 if ((op == op_Block) && Block_not_block_visited(ka)) {
787 set_irg_block_visited(irg, /* Don't walk all the way to Start. */
788 get_irg_block_visited(irg)-1);
789 irg_block_walk(ka, optimize_blocks, remove_simple_blocks, NULL);
790 mark_irn_visited(ka);
792 } else if (op == op_Phi) {
793 mark_irn_visited(ka);
794 if (! is_Block_dead(get_nodes_block(ka)))
796 } else if (is_op_keep(op)) {
797 mark_irn_visited(ka);
798 if (! is_Block_dead(get_nodes_block(ka)))
804 set_End_keepalives(end, j, in);
805 /* the verifier doesn't work yet with floating nodes */
806 if (get_irg_pinned(irg) == op_pin_state_pinned) {
807 /* after optimize_cf(), only Bad data flow may remain. */
808 if (irg_vrfy_bads(irg, BAD_DF | BAD_BLOCK | TUPLE)) {
809 dump_ir_block_graph(irg, "-vrfy-cf");
810 dump_ir_graph(irg, "-vrfy-cf");
811 fprintf(stderr, "VRFY_BAD in optimize_cf()\n");
815 current_ir_graph = rem;