2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Control flow optimizations.
23 * @author Goetz Lindenmaier, Michael Beck, Sebastian Hack
28 #include "iroptimize.h"
35 #include "irgraph_t.h"
49 #include "irbackedge_t.h"
55 #include "iropt_dbg.h"
57 /*------------------------------------------------------------------*/
58 /* Control flow optimization. */
60 /* Removes Bad control flow predecessors and empty blocks. A block */
61 /* is empty if it contains only a Jmp node. */
62 /* Blocks can only be removed if they are not needed for the */
63 /* semantics of Phi nodes. */
64 /* Further, we NEVER remove labeled blocks (even if we could move */
66 /*------------------------------------------------------------------*/
68 #define set_Block_removable(block) set_Block_mark(block, 1)
69 #define set_Block_non_removable(block) set_Block_mark(block, 0)
70 #define is_Block_removable(block) (get_Block_mark(block) != 0)
73 * Replace binary Conds that jumps twice into the same block
79 * ProjX True ProjX False ==> | /
84 * Such pattern are the result of if-conversion.
86 * Note that the simple case that Block has only these two
87 * predecessors are already handled in equivalent_node_Block().
89 static int remove_senseless_conds(ir_node *bl)
92 int n = get_Block_n_cfgpreds(bl);
95 for (i = 0; i < n; ++i) {
96 ir_node *pred_i = get_Block_cfgpred(bl, i);
97 ir_node *cond_i = skip_Proj(pred_i);
100 if (is_Cond(cond_i) && get_irn_mode(get_Cond_selector(cond_i)) == mode_b) {
102 for (j = i + 1; j < n; ++j) {
103 ir_node *pred_j = get_Block_cfgpred(bl, j);
104 ir_node *cond_j = skip_Proj(pred_j);
106 if (cond_j == cond_i) {
107 ir_node *jmp = new_r_Jmp(get_nodes_block(cond_i));
108 set_irn_n(bl, i, jmp);
109 set_irn_n(bl, j, new_Bad());
111 DBG_OPT_IFSIM2(cond_i, jmp);
121 /** An environment for merge_blocks and collect nodes. */
122 typedef struct merge_env {
123 int changed; /**< Set if the graph was changed. */
124 int phis_moved; /**< Set if Phi nodes were moved. */
125 plist_t *list; /**< Helper list for all found Switch Conds. */
129 * Removes Tuples from Block control flow predecessors.
130 * Optimizes blocks with equivalent_node(). This is tricky,
131 * as we want to avoid nodes that have as block predecessor Bads.
132 * Therefore we also optimize at control flow operations, depending
133 * how we first reach the Block.
135 static void merge_blocks(ir_node *node, void *ctx)
139 merge_env *env = ctx;
141 /* clear the link field for ALL nodes first */
142 set_irn_link(node, NULL);
144 if (is_Block(node)) {
146 for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
147 ir_node *pred = get_Block_cfgpred(node, i);
148 ir_node *skipped = skip_Tuple(pred);
149 if (pred != skipped) {
150 set_Block_cfgpred(node, i, skipped);
156 new_block = equivalent_node(node);
157 if (new_block != node && ! is_Block_dead(new_block)) {
158 exchange(node, new_block);
162 } else if (get_opt_optimize() && (get_irn_mode(node) == mode_X)) {
163 /* We will soon visit a block. Optimize it before visiting! */
164 ir_node *b = get_nodes_block(skip_Proj(node));
166 if (!is_Block_dead(b)) {
167 new_block = equivalent_node(b);
169 while (!irn_visited(b) && !is_Block_dead(new_block) && new_block != b) {
170 /* We would have to run gigo() if new is bad, so we
171 promote it directly below. Nevertheless, we sometimes reach a block
172 the first time through a dataflow node. In this case we optimized the
173 block as such and have to promote the Bad here. */
174 exchange(b, new_block);
177 new_block = equivalent_node(b);
180 /* normally, we would create a Bad block here, but this must be
181 * prevented, so just set it's cf to Bad.
183 if (is_Block_dead(new_block)) {
184 exchange(node, new_Bad());
192 * Block walker removing control flow from dead block by
193 * inspecting dominance info.
194 * Do not replace blocks by Bad. This optimization shall
195 * ensure, that all Bad control flow predecessors are
196 * removed, and no new other Bads are introduced.
197 * Further removed useless Conds and clear the mark of all blocks.
199 * Must be run in the post walker.
201 static void remove_unreachable_blocks_and_conds(ir_node *block, void *env)
206 /* Check block predecessors and turn control flow into bad.
207 Beware of Tuple, kill them. */
208 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
209 ir_node *pred_X = get_Block_cfgpred(block, i);
210 ir_node *skipped = skip_Tuple(pred_X);
212 if (! is_Bad(skipped)) {
213 ir_node *pred_bl = get_nodes_block(skip_Proj(skipped));
215 if (is_Block_dead(pred_bl) || (get_Block_dom_depth(pred_bl) < 0)) {
216 set_Block_dead(pred_bl);
217 exchange(pred_X, new_Bad());
219 } else if (skipped != pred_X) {
220 set_Block_cfgpred(block, i, skipped);
226 *changed |= remove_senseless_conds(block);
228 /* clear the block mark of all non labeled blocks */
229 if (has_Block_entity(block))
230 set_Block_non_removable(block);
232 set_Block_removable(block);
236 * Collects all Phi nodes in link list of Block.
237 * Marks all blocks "non_removable" if they contain a node other
238 * than Jmp (and Proj).
239 * Links all Proj nodes to their predecessors.
240 * Collects all switch-Conds in a list.
242 static void collect_nodes(ir_node *n, void *ctx)
244 ir_opcode code = get_irn_opcode(n);
245 merge_env *env = ctx;
247 if (code == iro_Block) {
248 /* mark the block as non-removable if it is labeled */
249 if (has_Block_entity(n))
250 set_Block_non_removable(n);
252 ir_node *b = get_nodes_block(n);
254 if (code == iro_Phi && get_irn_arity(n) > 0) {
255 /* Collect Phi nodes to compact ins along with block's ins. */
256 set_irn_link(n, get_irn_link(b));
258 } else if (code != iro_Jmp && !is_Bad(b)) { /* Check for non-empty block. */
259 set_Block_non_removable(b);
261 if (code == iro_Proj) { /* link Proj nodes */
262 ir_node *pred = get_Proj_pred(n);
264 set_irn_link(n, get_irn_link(pred));
265 set_irn_link(pred, n);
266 } else if (code == iro_Cond) {
267 ir_node *sel = get_Cond_selector(n);
268 if (mode_is_int(get_irn_mode(sel))) {
269 /* found a switch-Cond, collect */
270 plist_insert_back(env->list, n);
277 /** Returns true if pred is predecessor of block. */
278 static int is_pred_of(ir_node *pred, ir_node *b)
282 for (i = get_Block_n_cfgpreds(b) - 1; i >= 0; --i) {
283 ir_node *b_pred = get_Block_cfgpred_block(b, i);
290 /** Test whether we can optimize away pred block pos of b.
292 * @param b A block node.
293 * @param pos The position of the predecessor block to judge about.
295 * @returns The number of predecessors
297 * The test is rather tricky.
299 * The situation is something like the following:
308 * b merges the control flow of an if-then-else. We may not remove
309 * the 'then' _and_ the 'else' block of an 'if' if there is a Phi
310 * node in b, even if both are empty. The destruction of this Phi
311 * requires that a copy is added before the merge. We have to
312 * keep one of the case blocks to place the copies in.
314 * To perform the test for pos, we must regard predecessors before pos
315 * as already removed.
317 static int test_whether_dispensable(ir_node *b, int pos)
319 int i, j, n_preds = 1;
320 ir_node *pred = get_Block_cfgpred_block(b, pos);
322 /* Bad blocks will be optimized away, so we don't need space for them */
323 if (is_Block_dead(pred))
326 if (is_Block_removable(pred)) {
327 /* Seems to be empty. At least we detected this in collect_nodes. */
328 if (get_irn_link(b) == NULL) {
329 /* There are no Phi nodes ==> all predecessors are dispensable. */
330 n_preds = get_Block_n_cfgpreds(pred);
332 /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
333 Handle all pred blocks with preds < pos as if they were already removed. */
334 for (i = 0; i < pos; i++) {
335 ir_node *b_pred = get_Block_cfgpred_block(b, i);
336 if (! is_Block_dead(b_pred) && is_Block_removable(b_pred)) {
337 for (j = get_Block_n_cfgpreds(b_pred) - 1; j >= 0; --j) {
338 ir_node *b_pred_pred = get_Block_cfgpred_block(b_pred, j);
339 if (is_pred_of(b_pred_pred, pred))
340 goto non_dispensable;
343 if (is_pred_of(b_pred, pred))
344 goto non_dispensable;
347 for (i = pos +1; i < get_Block_n_cfgpreds(b); i++) {
348 ir_node *b_pred = get_Block_cfgpred_block(b, i);
349 if (is_pred_of(b_pred, pred))
350 goto non_dispensable;
352 /* if we get here, the block is dispensable */
353 n_preds = get_Block_n_cfgpreds(pred);
360 set_Block_non_removable(pred);
365 * This method removed Bad cf predecessors from Blocks and Phis, and removes
366 * empty blocks. A block is empty if it only contains Phi and Jmp nodes.
368 * We first adapt Phi nodes, then Block nodes, as we need the old ins
369 * of the Block to adapt the Phi nodes. We do this by computing new
370 * in arrays, and then replacing the old ones. So far we compute new in arrays
371 * for all nodes, not regarding whether there is a possibility for optimization.
373 * For each predecessor p of a Block b there are three cases:
374 * -1. The predecessor p is a Bad node: just skip it. The in array of b shrinks by one.
375 * -2. The predecessor p is empty. Remove p. All predecessors of p are now
377 * -3. The predecessor p is a block containing useful code. Just keep p as is.
379 * For Phi nodes f we have to check the conditions at the Block of f.
380 * For cases 1 and 3 we proceed as for Blocks. For case 2 we can have two
382 * -2a: The old predecessor of the Phi f is a Phi pred_f IN THE BLOCK REMOVED. In this
383 * case we proceed as for blocks. We remove pred_f. All
384 * predecessors of pred_f now are predecessors of f.
385 * -2b: The old predecessor of f is NOT in the block removed. It might be a Phi, too.
386 * We have to replicate f for each predecessor of the removed block. Or, with
387 * other words, the removed predecessor block has exactly one predecessor.
389 * Further there is a special case for self referencing blocks:
392 * then_b else_b then_b else_b
398 * | | | === optimized to ===> \ | | |
405 * If there is a Phi in pred_b, but we remove pred_b, we have to generate a
406 * Phi in loop_b, that has the ins of the Phi in pred_b and a self referencing
408 * @@@ It is negotiable whether we should do this ... there might end up a copy
409 * from the Phi in the loop when removing the Phis.
411 static void optimize_blocks(ir_node *b, void *ctx)
413 int i, j, k, n, max_preds, n_preds, p_preds = -1;
414 ir_node *pred, *phi, *next;
416 merge_env *env = ctx;
418 /* Count the number of predecessor if this block is merged with pred blocks
421 for (i = 0, k = get_Block_n_cfgpreds(b); i < k; ++i) {
422 max_preds += test_whether_dispensable(b, i);
424 in = XMALLOCN(ir_node*, max_preds);
426 /*- Fix the Phi nodes of the current block -*/
427 for (phi = get_irn_link(b); phi != NULL; phi = next) {
429 next = get_irn_link(phi);
431 /* Find the new predecessors for the Phi */
433 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
434 pred = get_Block_cfgpred_block(b, i);
436 if (is_Block_dead(pred)) {
437 /* case Phi 1: Do nothing */
438 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
439 /* case Phi 2: It's an empty block and not yet visited. */
440 ir_node *phi_pred = get_Phi_pred(phi, i);
442 for (j = 0, k = get_Block_n_cfgpreds(pred); j < k; j++) {
443 /* because of breaking loops, not all predecessors are Bad-clean,
444 * so we must check this here again */
445 if (! is_Bad(get_Block_cfgpred(pred, j))) {
446 if (get_nodes_block(phi_pred) == pred) {
448 assert(is_Phi(phi_pred)); /* Block is empty!! */
450 in[p_preds++] = get_Phi_pred(phi_pred, j);
453 in[p_preds++] = phi_pred;
459 in[p_preds++] = get_Phi_pred(phi, i);
462 assert(p_preds <= max_preds);
466 /* By removal of Bad ins the Phi might be degenerated. */
467 exchange(phi, in[0]);
469 set_irn_in(phi, p_preds, in);
473 /*- This happens only if merge between loop backedge and single loop entry.
474 Moreover, it is only needed if predb is the direct dominator of b, else there can be no uses
475 of the Phi's in predb ... -*/
476 for (k = 0, n = get_Block_n_cfgpreds(b); k < n; ++k) {
477 ir_node *predb = get_nodes_block(get_Block_cfgpred(b, k));
479 if (is_Block_removable(predb) && !Block_block_visited(predb)) {
482 /* we found a predecessor block at position k that will be removed */
483 for (phi = get_irn_link(predb); phi; phi = next_phi) {
485 next_phi = get_irn_link(phi);
489 if (get_Block_idom(b) != predb) {
490 /* predb is not the dominator. There can't be uses of pred's Phi nodes, kill them .*/
491 exchange(phi, new_Bad());
493 /* predb is the direct dominator of b. There might be uses of the Phi nodes from
494 predb in further block, so move this phi from the predecessor into the block b */
495 set_nodes_block(phi, b);
496 set_irn_link(phi, get_irn_link(b));
497 set_irn_link(b, phi);
500 /* first, copy all 0..k-1 predecessors */
501 for (i = 0; i < k; i++) {
502 pred = get_Block_cfgpred_block(b, i);
504 if (is_Block_dead(pred)) {
506 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
507 /* It's an empty block and not yet visited. */
508 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
509 if (! is_Bad(get_Block_cfgpred(pred, j)))
517 /* now we are at k, copy the phi predecessors */
518 pred = get_nodes_block(get_Block_cfgpred(b, k));
519 for (i = 0; i < get_Phi_n_preds(phi); i++) {
520 if (! is_Bad(get_Block_cfgpred(pred, i)))
521 in[q_preds++] = get_Phi_pred(phi, i);
524 /* and now all the rest */
525 for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
526 pred = get_Block_cfgpred_block(b, i);
528 if (is_Block_dead(pred)) {
530 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
531 /* It's an empty block and not yet visited. */
532 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
533 if (! is_Bad(get_Block_cfgpred(pred, j)))
543 exchange(phi, in[0]);
545 set_irn_in(phi, q_preds, in);
548 assert(q_preds <= max_preds);
549 // assert(p_preds == q_preds && "Wrong Phi Fix");
555 /*- Fix the block -*/
557 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
558 pred = get_Block_cfgpred_block(b, i);
560 if (is_Block_dead(pred)) {
561 /* case 1: Do nothing */
562 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
563 /* case 2: It's an empty block and not yet visited. */
564 assert(get_Block_n_cfgpreds(b) > 1 || has_Block_entity(b));
565 /* Else it should be optimized by equivalent_node. */
566 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
567 ir_node *pred_X = get_Block_cfgpred(pred, j);
569 /* because of breaking loops, not all predecessors are Bad-clean,
570 * so we must check this here again */
571 if (! is_Bad(pred_X))
572 in[n_preds++] = pred_X;
574 /* Remove block as it might be kept alive. */
575 exchange(pred, b/*new_Bad()*/);
578 in[n_preds++] = get_Block_cfgpred(b, i);
581 assert(n_preds <= max_preds);
583 set_irn_in(b, n_preds, in);
586 assert(get_irn_link(b) == NULL || p_preds == -1 || (n_preds == p_preds && "Wrong Phi Fix"));
591 * Block walker: optimize all blocks using the default optimizations.
592 * This removes Blocks that with only a Jmp predecessor.
594 static void remove_simple_blocks(ir_node *block, void *ctx)
596 ir_node *new_blk = equivalent_node(block);
597 merge_env *env = ctx;
599 if (new_blk != block) {
600 exchange(block, new_blk);
606 * Handle pre-optimized table switch Cond's.
607 * During iropt, all Projs from a switch-Cond are already removed except
608 * the defProj and maybe the taken one.
609 * The defProj cannot be removed WITHOUT looking backwards, so we do this here.
611 * @param cond the switch-Cond
613 * @return non-zero if a switch-Cond was optimized
615 * Expects all Proj's linked to the cond node
617 static int handle_switch_cond(ir_node *cond)
619 ir_node *sel = get_Cond_selector(cond);
621 ir_node *proj1 = get_irn_link(cond);
622 ir_node *proj2 = get_irn_link(proj1);
625 blk = get_nodes_block(cond);
628 /* this Cond has only one Proj: must be the defProj */
629 assert(get_Cond_default_proj(cond) == get_Proj_proj(proj1));
630 /* convert it into a Jmp */
631 jmp = new_r_Jmp(blk);
632 exchange(proj1, jmp);
634 } else if (get_irn_link(proj2) == NULL) {
635 /* We have two Proj's here. Check if the Cond has
636 a constant argument */
637 tarval *tv = value_of(sel);
639 if (tv != tarval_bad) {
640 /* we have a constant switch */
641 long num = get_tarval_long(tv);
642 long def_num = get_Cond_default_proj(cond);
644 if (def_num == get_Proj_proj(proj1)) {
645 /* first one is the defProj */
646 if (num == get_Proj_proj(proj2)) {
647 jmp = new_r_Jmp(blk);
648 exchange(proj2, jmp);
649 exchange(proj1, new_Bad());
652 } else if (def_num == get_Proj_proj(proj2)) {
653 /* second one is the defProj */
654 if (num == get_Proj_proj(proj1)) {
655 jmp = new_r_Jmp(blk);
656 exchange(proj1, jmp);
657 exchange(proj2, new_Bad());
661 /* neither: strange, Cond was not optimized so far */
662 if (num == get_Proj_proj(proj1)) {
663 jmp = new_r_Jmp(blk);
664 exchange(proj1, jmp);
665 exchange(proj2, new_Bad());
667 } else if (num == get_Proj_proj(proj2)) {
668 jmp = new_r_Jmp(blk);
669 exchange(proj2, jmp);
670 exchange(proj1, new_Bad());
679 /* Optimizations of the control flow that also require changes of Phi nodes.
681 * This optimization performs two passes over the graph.
683 * The first pass collects all Phi nodes in a link list in the block
684 * nodes. Further it performs simple control flow optimizations.
685 * Finally it marks all blocks that do not contain useful
686 * computations, i.e., these blocks might be removed.
688 * The second pass performs the optimizations intended by this algorithm.
689 * It walks only over block nodes and adapts these and the Phi nodes in these blocks,
690 * which it finds in a linked list computed by the first pass.
692 * We use the mark flag to mark removable blocks in the first
695 void optimize_cf(ir_graph *irg)
697 int i, j, n, changed;
699 ir_node *cond, *end = get_irg_end(irg);
700 ir_graph *rem = current_ir_graph;
704 assert(get_irg_phase_state(irg) != phase_building);
706 /* if the graph is not pinned, we cannot determine empty blocks */
707 assert(get_irg_pinned(irg) != op_pin_state_floats &&
708 "Control flow optimization need a pinned graph");
710 current_ir_graph = irg;
712 /* FIXME: control flow opt destroys block edges. So edges are deactivated here. Fix the edges! */
713 edges_deactivate(irg);
715 /* we use the mark flag to mark removable blocks */
716 ir_reserve_resources(irg, IR_RESOURCE_BLOCK_MARK);
721 /* ALWAYS kill unreachable control flow. Backend cannot handle it anyway.
722 Use dominator info to kill blocks. Also optimize useless Conds. */
724 irg_block_walk_graph(irg, NULL, remove_unreachable_blocks_and_conds, &env.changed);
726 /* fix the keep-alives */
728 for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
729 ir_node *ka = get_End_keepalive(end, i);
732 /* do NOT keep dead blocks */
733 if (is_Block_dead(ka) || get_Block_dom_depth(ka) < 0) {
734 set_End_keepalive(end, i, new_Bad());
738 ir_node *block = get_nodes_block(ka);
740 if (is_Bad(block) || is_Block_dead(block) || get_Block_dom_depth(block) < 0) {
741 /* do NOT keep nodes in dead blocks */
742 set_End_keepalive(end, i, new_Bad());
747 env.changed |= changed;
749 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
751 env.list = plist_new();
752 irg_walk(end, merge_blocks, collect_nodes, &env);
754 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
757 /* Handle graph state if was changed. */
758 set_irg_outs_inconsistent(irg);
759 set_irg_doms_inconsistent(irg);
760 set_irg_extblk_inconsistent(irg);
761 set_irg_loopinfo_inconsistent(irg);
762 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
766 /* handle all collected switch-Conds */
767 foreach_plist(env.list, el) {
768 cond = plist_element_get_value(el);
769 env.changed |= handle_switch_cond(cond);
771 plist_free(env.list);
774 /* The Cond optimization might generate unreachable code, so restart if
779 /* Optimize the standard code. */
782 irg_block_walk_graph(irg, optimize_blocks, remove_simple_blocks, &env);
784 /* in rare cases a node may be kept alive more than once, use the visited flag to detect this */
785 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
786 inc_irg_visited(irg);
788 /* fix the keep-alives again */
790 for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
791 ir_node *ka = get_End_keepalive(end, i);
794 /* do NOT keep dead blocks */
795 if (is_Block_dead(ka) || get_Block_dom_depth(ka) < 0) {
796 set_End_keepalive(end, i, new_Bad());
800 ir_node *block = get_nodes_block(ka);
802 if (is_Bad(block) || is_Block_dead(block) || get_Block_dom_depth(block) < 0) {
803 /* do NOT keep nodes in dead blocks */
804 set_End_keepalive(end, i, new_Bad());
809 env.changed |= changed;
811 remove_End_Bads_and_doublets(end);
814 ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_VISITED);
816 if (env.phis_moved) {
817 /* Bad: when we moved Phi's, we might produce dead Phi nodes
819 Some other phases cannot copy with this, so will them.
821 n = get_End_n_keepalives(end);
823 NEW_ARR_A(ir_node *, in, n);
825 /* Handle graph state if was changed. */
826 set_irg_outs_inconsistent(irg);
828 assure_irg_outs(irg);
830 for (i = j = 0; i < n; ++i) {
831 ir_node *ka = get_End_keepalive(end, i);
836 for (k = get_irn_n_outs(ka) - 1; k >= 0; --k) {
837 ir_node *user = get_irn_out(ka, k);
839 if (user != ka && user != end) {
840 /* Is it a real user or just a self loop ? */
850 set_End_keepalives(end, j, in);
857 /* Handle graph state if was changed. */
858 set_irg_outs_inconsistent(irg);
859 set_irg_doms_inconsistent(irg);
860 set_irg_extblk_inconsistent(irg);
861 set_irg_loopinfo_inconsistent(irg);
862 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
866 /* the verifier doesn't work yet with floating nodes */
867 if (get_irg_pinned(irg) == op_pin_state_pinned) {
868 /* after optimize_cf(), only Bad data flow may remain. */
869 if (irg_verify_bads(irg, BAD_DF | BAD_BLOCK | TUPLE)) {
870 dump_ir_graph(irg, "-verify-cf");
871 fprintf(stderr, "VERIFY_BAD in optimize_cf()\n");
875 current_ir_graph = rem;
878 /* Creates an ir_graph pass for optimize_cf. */
879 ir_graph_pass_t *optimize_cf_pass(const char *name)
881 return def_graph_pass(name ? name : "optimize_cf", optimize_cf);
882 } /* optimize_cf_pass */