3 * File name: ir/opt/cfopt.c
4 * Purpose: control flow optimizations
8 * Copyright: (c) 1998-2004 Universität Karlsruhe
9 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
21 #include "irgraph_t.h"
35 #include "irbackedge_t.h"
41 #include "iropt_dbg.h"
43 /*------------------------------------------------------------------*/
44 /* Control flow optimization. */
46 /* Removes Bad control flow predecessors and empty blocks. A block */
47 /* is empty if it contains only a Jmp node. */
48 /* Blocks can only be removed if they are not needed for the */
49 /* semantics of Phi nodes. */
50 /*------------------------------------------------------------------*/
53 * Replace binary Conds that jumps twice into the same block
59 * ProjX True ProjX False ==> | /
64 * Such pattern are the result of if-conversion.
66 * Note that the simple case that Block has only these two
67 * predecessors are already handled in equivalent_node_Block().
69 static void remove_senseless_conds(ir_node *bl, void *data) {
71 int n = get_Block_n_cfgpreds(bl);
75 for (i = 0; i < n; ++i) {
76 ir_node *pred_i = get_Block_cfgpred(bl, i);
77 ir_node *cond_i = skip_Proj(pred_i);
80 if (is_Cond(cond_i) && get_irn_mode(get_Cond_selector(cond_i)) == mode_b) {
82 for (j = i + 1; j < n; ++j) {
83 ir_node *pred_j = get_Block_cfgpred(bl, j);
84 ir_node *cond_j = skip_Proj(pred_j);
86 if (cond_j == cond_i) {
87 ir_node *jmp = new_r_Jmp(current_ir_graph, get_nodes_block(cond_i));
88 set_irn_n(bl, i, jmp);
89 set_irn_n(bl, j, new_Bad());
91 DBG_OPT_IFSIM2(cond_i, jmp);
100 * Removes Tuples from Block control flow predecessors.
101 * Optimizes blocks with equivalent_node(). This is tricky,
102 * as we want to avoid nodes that have as block predecessor Bads.
103 * Therefore we also optimize at control flow operations, depending
104 * how we first reach the Block.
106 static void merge_blocks(ir_node *node, void *env) {
110 /* clear the link field for ALL nodes first */
111 set_irn_link(node, NULL);
113 if (is_Block(node)) {
116 /* GL @@@ : is this possible? if (get_opt_normalize()) -- added, all tests go through.
117 A different order of optimizations might cause problems. */
118 if (get_opt_normalize()) {
119 for (i = 0, n = get_Block_n_cfgpreds(node); i < n; ++i)
120 set_Block_cfgpred(node, i, skip_Tuple(get_Block_cfgpred(node, i)));
124 new_block = equivalent_node(node);
125 if (new_block != node && ! is_Block_dead(new_block))
126 exchange(node, new_block);
128 } else if (get_opt_optimize() && (get_irn_mode(node) == mode_X)) {
129 /* We will soon visit a block. Optimize it before visiting! */
130 ir_node *b = get_nodes_block(skip_Proj(node));
132 if (!is_Block_dead(b)) {
133 new_block = equivalent_node(b);
135 while (irn_not_visited(b) && (!is_Block_dead(new_block)) && (new_block != b)) {
136 /* We would have to run gigo() if new is bad, so we
137 promote it directly below. Nevertheless, we sometimes reach a block
138 the first time through a dataflow node. In this case we optimized the
139 block as such and have to promote the Bad here. */
140 assert((get_opt_control_flow_straightening() ||
141 get_opt_control_flow_weak_simplification()) &&
142 ("strange flag setting"));
143 exchange (b, new_block);
145 new_block = equivalent_node(b);
148 /* normally, we would create a Bad block here, but this must be
149 * prevented, so just set it's cf to Bad.
151 if (is_Block_dead(new_block))
152 exchange(node, new_Bad());
159 * Remove cf from dead block by inspecting dominance info
160 * Do not replace blocks by Bad. This optimization shall
161 * ensure, that all Bad control flow predecessors are
162 * removed, and no new other Bads are introduced.
164 * Must be run in the post walker.
166 static void remove_dead_block_cf(ir_node *block, void *env) {
169 /* check block predecessors and turn control flow into bad */
170 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
171 ir_node *pred_X = get_Block_cfgpred(block, i);
173 if (! is_Bad(pred_X)) {
174 ir_node *pred_bl = get_nodes_block(skip_Proj(pred_X));
176 if (is_Block_dead(pred_bl) || (get_Block_dom_depth(pred_bl) < 0)) {
177 set_Block_dead(pred_bl);
178 exchange(pred_X, new_Bad());
185 * Collects all Phi nodes in link list of Block.
186 * Marks all blocks "block_visited" if they contain a node other
187 * than Jmp (and Proj).
188 * Links all Proj nodes to their predecessors.
189 * Collects all switch-Conds in a list.
191 static void collect_nodes(ir_node *n, void *env) {
192 ir_op *op = get_irn_op(n);
195 if (op != op_Block) {
196 ir_node *b = get_nodes_block(n);
199 /* Collect Phi nodes to compact ins along with block's ins. */
200 set_irn_link(n, get_irn_link(b));
202 } else if (op != op_Jmp && !is_Bad(b)) { /* Check for non empty block. */
203 mark_Block_block_visited(b);
205 if (op == op_Proj) { /* link Proj nodes */
206 ir_node *pred = get_Proj_pred(n);
208 set_irn_link(n, get_irn_link(pred));
209 set_irn_link(pred, n);
210 } else if (op == op_Cond) {
211 ir_node *sel = get_Cond_selector(n);
212 if (mode_is_int(get_irn_mode(sel))) {
213 /* found a switch-Cond, collect */
214 plist_insert_back(list, n);
221 /** Returns true if pred is predecessor of block. */
222 static int is_pred_of(ir_node *pred, ir_node *b) {
225 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
226 ir_node *b_pred = get_Block_cfgpred_block(b, i);
227 if (b_pred == pred) return 1;
232 /** Test whether we can optimize away pred block pos of b.
234 * @param b A block node.
235 * @param pos The position of the predecessor block to judge about.
237 * @returns The number of predecessors
239 * The test is rather tricky.
241 * The situation is something like the following:
250 * b merges the control flow of an if-then-else. We may not remove
251 * the 'then' _and_ the 'else' block of an 'if' if there is a Phi
252 * node in b, even if both are empty. The destruction of this Phi
253 * requires that a copy is added before the merge. We have to
254 * keep one of the case blocks to place the copies in.
256 * To perform the test for pos, we must regard predecessors before pos
257 * as already removed.
259 static int test_whether_dispensable(ir_node *b, int pos) {
260 int i, j, n_preds = 1;
261 ir_node *pred = get_Block_cfgpred_block(b, pos);
263 /* Bad blocks will be optimized away, so we don't need space for them */
264 if (is_Block_dead(pred))
267 if (get_Block_block_visited(pred) + 1
268 < get_irg_block_visited(current_ir_graph)) {
270 if (!get_opt_optimize() || !get_opt_control_flow_strong_simplification()) {
271 /* Mark block so that is will not be removed: optimization is turned off. */
272 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
276 /* Seems to be empty. At least we detected this in collect_nodes. */
277 if (!get_irn_link(b)) {
278 /* There are no Phi nodes ==> all predecessors are dispensable. */
279 n_preds = get_Block_n_cfgpreds(pred);
281 /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
282 Handle all pred blocks with preds < pos as if they were already removed. */
283 for (i = 0; i < pos; i++) {
284 ir_node *b_pred = get_Block_cfgpred_block(b, i);
285 if (! is_Block_dead(b_pred) &&
286 get_Block_block_visited(b_pred) + 1
287 < get_irg_block_visited(current_ir_graph)) {
288 for (j = 0; j < get_Block_n_cfgpreds(b_pred); j++) {
289 ir_node *b_pred_pred = get_Block_cfgpred_block(b_pred, j);
290 if (is_pred_of(b_pred_pred, pred))
291 goto non_dispensable;
294 if (is_pred_of(b_pred, pred))
295 goto non_dispensable;
298 for (i = pos +1; i < get_Block_n_cfgpreds(b); i++) {
299 ir_node *b_pred = get_Block_cfgpred_block(b, i);
300 if (is_pred_of(b_pred, pred))
301 goto non_dispensable;
303 /* if we get here, the block is dispensable */
304 n_preds = get_Block_n_cfgpreds(pred);
311 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
316 * Store to defer the exchanged of Phi nodes.
318 typedef struct _defer_ex_phi {
319 ir_node *phi_pred; /**< the previous Phi node that will be replaced */
320 ir_node *phi; /**< the new Phi node that replaces phi_pred */
324 * This method removed Bad cf predecessors from Blocks and Phis, and removes
325 * empty blocks. A block is empty if it only contains Phi and Jmp nodes.
327 * We first adapt Phi nodes, then Block nodes, as we need the old ins
328 * of the Block to adapt the Phi nodes. We do this by computing new
329 * in arrays, and then replacing the old ones. So far we compute new in arrays
330 * for all nodes, not regarding whether there is a possibility for optimization.
332 * For each predecessor p of a Block b there are three cases:
333 * -1. The predecessor p is a Bad node: just skip it. The in array of b shrinks by one.
334 * -2. The predecessor p is empty. Remove p. All predecessors of p are now
336 * -3. The predecessor p is a block containing useful code. Just keep p as is.
338 * For Phi nodes f we have to check the conditions at the Block of f.
339 * For cases 1 and 3 we proceed as for Blocks. For case 2 we can have two
341 * -2a: The old predecessor of the Phi f is a Phi pred_f IN THE BLOCK REMOVED. In this
342 * case we proceed as for blocks. We remove pred_f. All
343 * predecessors of pred_f now are predecessors of f.
344 * -2b: The old predecessor of f is NOT in the block removed. It might be a Phi, too.
345 * We have to replicate f for each predecessor of the removed block. Or, with
346 * other words, the removed predecessor block has exactly one predecessor.
348 * Further there is a special case for self referencing blocks:
351 * then_b else_b then_b else_b
357 * | | | === optimized to ===> \ | | |
364 * If there is a Phi in pred_b, but we remove pred_b, we have to generate a
365 * Phi in loop_b, that has the ins of the Phi in pred_b and a self referencing
367 * @@@ It is negotiable whether we should do this ... there might end up a copy
368 * from the Phi in the loop when removing the Phis.
370 static void optimize_blocks(ir_node *b, void *env) {
371 int i, j, k, n, max_preds, n_preds, p_preds = -1;
374 defer_ex_phi *defers;
376 /* Count the number of predecessor if this block is merged with pred blocks
379 for (i = 0, k = get_Block_n_cfgpreds(b); i < k; ++i) {
380 max_preds += test_whether_dispensable(b, i);
382 in = xmalloc(max_preds * sizeof(*in));
384 defers = NEW_ARR_F(defer_ex_phi, 0);
387 printf(" working on "); DDMN(b);
388 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
389 pred = get_nodes_block(get_Block_cfgpred(b, i));
390 if (is_Bad(get_Block_cfgpred(b, i))) {
391 printf(" removing Bad %i\n ", i);
392 } else if (get_Block_block_visited(pred) +1
393 < get_irg_block_visited(current_ir_graph)) {
394 printf(" removing pred %i ", i); DDMN(pred);
395 } else { printf(" Nothing to do for "); DDMN(pred); }
397 * end Debug output -*/
399 /*- Fix the Phi nodes of the current block -*/
400 for (phi = get_irn_link(b); phi; ) {
401 assert(get_irn_op(phi) == op_Phi);
403 /* Find the new predecessors for the Phi */
405 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
406 pred = get_Block_cfgpred_block(b, i);
408 if (is_Bad(get_Block_cfgpred(b, i))) {
409 /* case Phi 1: Do nothing */
411 else if (get_Block_block_visited(pred) + 1
412 < get_irg_block_visited(current_ir_graph)) {
413 /* case Phi 2: It's an empty block and not yet visited. */
414 ir_node *phi_pred = get_Phi_pred(phi, i);
416 for (j = 0, k = get_Block_n_cfgpreds(pred); j < k; j++) {
417 /* because of breaking loops, not all predecessors are Bad-clean,
418 * so we must check this here again */
419 if (! is_Bad(get_Block_cfgpred(pred, j))) {
420 if (get_nodes_block(phi_pred) == pred) {
422 assert(get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
424 in[p_preds++] = get_Phi_pred(phi_pred, j);
427 in[p_preds++] = phi_pred;
432 /* The Phi_pred node is replaced now if it is a Phi.
434 Somehow the removed Phi node can be used legally in loops.
435 Therefore we replace the old phi by the new one.
436 This must be done _AFTER_ all Phis are optimized, or
437 it will fail if two Phis use the same pred_Phi.
439 FIXME: Is the following true? We ALWAYS replace it by the new one.
441 Further we have to remove the old Phi node by replacing it
442 by Bad. Else it will remain in the keep alive array of End
443 and cause illegal situations. So if there is no loop, we should
446 if (get_nodes_block(phi_pred) == pred) {
448 /* remove the Phi as it might be kept alive. Further there
449 might be other users. */
450 for (i = ARR_LEN(defers) - 1; i >= 0; --i) {
451 if (defers[i].phi_pred == phi_pred)
455 /* we have a new replacement */
458 elem.phi_pred = phi_pred;
460 ARR_APP1(defer_ex_phi, defers, elem);
465 in[p_preds++] = get_Phi_pred(phi, i);
468 assert(p_preds <= max_preds);
472 /* By removal of Bad ins the Phi might be degenerated. */
473 exchange(phi, in[0]);
475 set_irn_in(phi, p_preds, in);
477 phi = get_irn_link(phi);
480 /* now, exchange all Phis */
481 for (i = ARR_LEN(defers) - 1; i >= 0; --i) {
482 exchange(defers[i].phi_pred, defers[i].phi);
486 /*- This happens only if merge between loop backedge and single loop entry.
487 See special case above. -*/
488 for (k = 0, n = get_Block_n_cfgpreds(b); k < n; ++k) {
489 pred = get_nodes_block(get_Block_cfgpred(b, k));
491 if (get_Block_block_visited(pred) + 1 < get_irg_block_visited(current_ir_graph)) {
492 /* we found a predecessor block at position k that will be removed */
493 for (phi = get_irn_link(pred); phi;) {
495 * the previous phase may already changed the phi, and even
496 * removed it at all, so check here if this node is still a phi
498 if (get_irn_op(phi) == op_Phi) {
501 /* move this phi from the predecessor into the block b */
502 set_nodes_block(phi, b);
504 /* first, copy all 0..k-1 predecessors */
505 for (i = 0; i < k; i++) {
506 pred = get_Block_cfgpred_block(b, i);
510 } else if (get_Block_block_visited(pred) + 1
511 < get_irg_block_visited(current_ir_graph)) {
512 /* It's an empty block and not yet visited. */
513 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
514 /* @@@ Hier brauche ich Schleifeninformation!!! Kontrollflusskante
515 muss Rueckwaertskante sein! (An allen vier in[q_preds] = phi
516 Anweisungen.) Trotzdem tuts bisher!! */
517 if (! is_Bad(get_Block_cfgpred(pred, j)))
525 /* now we are at k, copy the phi predecessors */
526 pred = get_nodes_block(get_Block_cfgpred(b, k));
527 for (i = 0; i < get_Phi_n_preds(phi); i++) {
528 if (! is_Bad(get_Block_cfgpred(pred, i)))
529 in[q_preds++] = get_Phi_pred(phi, i);
532 /* and now all the rest */
533 for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
534 pred = get_nodes_block(get_Block_cfgpred(b, i));
536 if (is_Bad(get_Block_cfgpred(b, i))) {
538 } else if (get_Block_block_visited(pred) +1
539 < get_irg_block_visited(current_ir_graph)) {
540 /* It's an empty block and not yet visited. */
541 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
542 if (! is_Bad(get_Block_cfgpred(pred, j)))
552 exchange(phi, in[0]);
554 set_irn_in(phi, q_preds, in);
556 assert(q_preds <= max_preds);
557 // assert(p_preds == q_preds && "Wrong Phi Fix");
559 phi = get_irn_link(phi);
564 /*- Fix the block -*/
566 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
567 pred = get_Block_cfgpred_block(b, i);
570 /* case 1: Do nothing */
571 } else if (get_Block_block_visited(pred) +1
572 < get_irg_block_visited(current_ir_graph)) {
573 /* case 2: It's an empty block and not yet visited. */
574 assert(get_Block_n_cfgpreds(b) > 1);
575 /* Else it should be optimized by equivalent_node. */
576 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
577 ir_node *pred_block = get_Block_cfgpred(pred, j);
579 /* because of breaking loops, not all predecessors are Bad-clean,
580 * so we must check this here again */
581 if (! is_Bad(pred_block))
582 in[n_preds++] = pred_block;
584 /* Remove block as it might be kept alive. */
585 exchange(pred, b/*new_Bad()*/);
588 in[n_preds++] = get_Block_cfgpred(b, i);
591 assert(n_preds <= max_preds);
593 set_irn_in(b, n_preds, in);
595 assert(get_irn_link(b) == NULL || (n_preds == p_preds && "Wrong Phi Fix"));
600 * Walker: optimize all blocks using the default optimizations.
601 * This removes Blocks that with only a Jmp predecessor.
603 static void remove_simple_blocks(ir_node *block, void *env) {
604 ir_node *new_blk = equivalent_node(block);
605 if (new_blk != block)
606 exchange(block, new_blk);
610 * Handle pre-optimized table switch Cond's.
611 * During iropt, all Projs from a switch-Cond are already removed except
612 * the defProj and maybe the taken one.
613 * The defProj cannot be removed WITHOUT looking backwards, so we do this here.
615 * @param cond the switch-Cond
617 * @return non-zero if a switch-Cond was optimized
619 * Expects all Proj's linked to the cond node
621 static int handle_switch_cond(ir_node *cond) {
622 ir_node *sel = get_Cond_selector(cond);
624 ir_node *proj1 = get_irn_link(cond);
625 ir_node *proj2 = get_irn_link(proj1);
628 blk = get_nodes_block(cond);
631 /* this Cond has only one Proj: must be the defProj */
632 assert(get_Cond_defaultProj(cond) == get_Proj_proj(proj1));
633 /* convert it into a Jmp */
634 jmp = new_r_Jmp(current_ir_graph, blk);
635 exchange(proj1, jmp);
637 } else if (get_irn_link(proj2) == NULL) {
638 /* We have two Proj's here. Check if the Cond has
639 a constant argument */
640 tarval *tv = value_of(sel);
642 if (tv != tarval_bad) {
643 /* we have a constant switch */
644 long num = get_tarval_long(tv);
645 long def_num = get_Cond_defaultProj(cond);
647 if (def_num == get_Proj_proj(proj1)) {
648 /* first one is the defProj */
649 if (num == get_Proj_proj(proj2)) {
650 jmp = new_r_Jmp(current_ir_graph, blk);
651 exchange(proj2, jmp);
652 exchange(proj1, new_Bad());
655 } else if (def_num == get_Proj_proj(proj2)) {
656 /* second one is the defProj */
657 if (num == get_Proj_proj(proj1)) {
658 jmp = new_r_Jmp(current_ir_graph, blk);
659 exchange(proj1, jmp);
660 exchange(proj2, new_Bad());
664 /* neither: strange, Cond was not optimized so far */
665 if (num == get_Proj_proj(proj1)) {
666 jmp = new_r_Jmp(current_ir_graph, blk);
667 exchange(proj1, jmp);
668 exchange(proj2, new_Bad());
670 } else if (num == get_Proj_proj(proj2)) {
671 jmp = new_r_Jmp(current_ir_graph, blk);
672 exchange(proj2, jmp);
673 exchange(proj1, new_Bad());
682 /* Optimizations of the control flow that also require changes of Phi nodes.
684 * This optimization performs two passes over the graph.
686 * The first pass collects all Phi nodes in a link list in the block
687 * nodes. Further it performs simple control flow optimizations.
688 * Finally it marks all blocks that do not contain useful
689 * computations, i.e., these blocks might be removed.
691 * The second pass performs the optimizations intended by this algorithm.
692 * It walks only over block nodes and adapts these and the Phi nodes in these blocks,
693 * which it finds in a linked list computed by the first pass.
695 * We use the block_visited flag to mark empty blocks in the first
697 * @@@ It would be better to add a struct in the link field
698 * that keeps the Phi list and the mark. Place it on an obstack, as
699 * we will lose blocks and thereby generate memory leaks.
701 void optimize_cf(ir_graph *irg) {
704 ir_node *cond, *end = get_irg_end(irg);
705 ir_graph *rem = current_ir_graph;
706 irg_dom_state dom_state = get_irg_dom_state(current_ir_graph);
710 assert(get_irg_phase_state(irg) != phase_building);
712 /* if the graph is not pinned, we cannot determine empty blocks */
713 assert(get_irg_pinned(irg) != op_pin_state_floats &&
714 "Control flow optimization need a pinned graph");
716 current_ir_graph = irg;
718 edges_deactivate(irg);
720 /* Handle graph state */
721 set_irg_outs_inconsistent(current_ir_graph);
722 set_irg_extblk_inconsistent(current_ir_graph);
723 set_irg_loopinfo_inconsistent(current_ir_graph);
724 set_irg_doms_inconsistent(current_ir_graph);
726 if (dom_state == dom_consistent && get_opt_optimize() && get_opt_unreachable_code()) {
729 /* we have dominance info, we can kill dead block */
730 irg_block_walk_graph(irg, NULL, remove_dead_block_cf, NULL);
732 /* fix the keep-alives */
733 end = get_irg_end(irg);
734 for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
735 ir_node *ka = get_End_keepalive(end, i);
738 /* do NOT keep dead blocks */
739 if (get_Block_dom_depth(ka) < 0)
740 set_End_keepalive(end, i, new_Bad());
741 } else if (is_Block_dead(get_nodes_block(ka)) ||
742 get_Block_dom_depth(get_nodes_block(ka)) < 0)
743 /* do NOT keep nodes in dead blocks */
744 set_End_keepalive(end, i, new_Bad());
747 irg_block_walk_graph(irg, NULL, remove_senseless_conds, NULL);
749 /* Use block visited flag to mark non-empty blocks. */
750 inc_irg_block_visited(irg);
753 irg_walk(end, merge_blocks, collect_nodes, list);
755 /* handle all collected switch-Conds */
756 foreach_plist(list, el) {
757 cond = plist_element_get_value(el);
758 handle_switch_cond(cond);
762 /* Optimize the standard code. */
763 irg_block_walk(get_irg_end_block(irg), optimize_blocks, remove_simple_blocks, NULL);
765 /* Walk all keep alives, optimize them if block, add to new in-array
766 for end if useful. */
767 n = get_End_n_keepalives(end);
769 NEW_ARR_A(ir_node *, in, n);
770 inc_irg_visited(irg);
772 /* fix the keep alive */
773 for (i = j = 0; i < n; i++) {
774 ir_node *ka = get_End_keepalive(end, i);
776 if (irn_not_visited(ka)) {
777 ir_op *op = get_irn_op(ka);
779 if ((op == op_Block) && Block_not_block_visited(ka)) {
780 set_irg_block_visited(irg, /* Don't walk all the way to Start. */
781 get_irg_block_visited(irg)-1);
782 irg_block_walk(ka, optimize_blocks, remove_simple_blocks, NULL);
783 mark_irn_visited(ka);
785 } else if (op == op_Phi) {
786 mark_irn_visited(ka);
787 if (! is_Block_dead(get_nodes_block(ka)))
789 } else if (is_op_keep(op)) {
790 mark_irn_visited(ka);
791 if (! is_Block_dead(get_nodes_block(ka)))
797 set_End_keepalives(end, j, in);
798 /* the verifier doesn't work yet with floating nodes */
799 if (get_irg_pinned(irg) == op_pin_state_pinned) {
800 /* after optimize_cf(), only Bad data flow may remain. */
801 if (irg_vrfy_bads(irg, BAD_DF | BAD_BLOCK | TUPLE)) {
802 dump_ir_block_graph(irg, "-vrfy-cf");
803 dump_ir_graph(irg, "-vrfy-cf");
804 fprintf(stderr, "VRFY_BAD in optimize_cf()\n");
808 current_ir_graph = rem;