2 * This file is part of libFirm.
3 * Copyright (C) 2012 University of Karlsruhe.
8 * @brief Control flow optimizations.
9 * @author Goetz Lindenmaier, Michael Beck, Sebastian Hack
11 * Removes Bad control flow predecessors and empty blocks. A block is empty
12 * if it contains only a Jmp node. Blocks can only be removed if they are not
13 * needed for the semantics of Phi nodes. Further, we NEVER remove labeled
14 * blocks (even if we could move the label).
18 #include "iroptimize.h"
25 #include "irgraph_t.h"
39 #include "irbackedge_t.h"
44 #include "irnodehashmap.h"
47 #include "iropt_dbg.h"
49 /** An environment for merge_blocks and collect nodes. */
50 typedef struct merge_env {
51 bool changed; /**< Set if the graph was changed. */
52 bool phis_moved; /**< Set if Phi nodes were moved. */
55 /** set or reset the removable property of a block. */
56 static void set_Block_removable(ir_node *block, bool removable)
58 set_Block_mark(block, removable);
61 /** check if a block has the removable property set. */
62 static bool is_Block_removable(const ir_node *block)
64 return get_Block_mark(block);
67 /** Walker: clear link fields and mark all blocks as removable. */
68 static void clear_link_and_mark_blocks_removable(ir_node *node, void *ctx)
71 set_irn_link(node, NULL);
73 set_Block_removable(node, true);
74 set_Block_phis(node, NULL);
75 } else if (is_Phi(node)) {
76 set_Phi_next(node, NULL);
81 * Collects all Phi nodes in link list of Block.
82 * Marks all blocks "non_removable" if they contain a node other
83 * than Jmp (and Proj).
84 * Links all Proj nodes to their predecessors.
85 * Collects all switch-Conds in a list.
87 static void collect_nodes(ir_node *n, void *ctx)
91 /* Collect Phi nodes to compact ins along with block's ins. */
92 ir_node *block = get_nodes_block(n);
93 add_Block_phi(block, n);
94 } else if (is_Block(n)) {
95 if (get_Block_entity(n) != NULL) {
96 /* block with a jump label attached cannot be removed. */
97 set_Block_removable(n, false);
99 } else if (is_Bad(n) || is_Jmp(n)) {
103 /* Check for non-empty block. */
104 ir_node *block = get_nodes_block(n);
106 set_Block_removable(block, false);
109 /* link Proj nodes */
110 ir_node *pred = get_Proj_pred(n);
111 set_irn_link(n, get_irn_link(pred));
112 set_irn_link(pred, n);
117 /** Returns true if pred is predecessor of block b. */
118 static bool is_pred_of(const ir_node *pred, const ir_node *b)
122 for (i = get_Block_n_cfgpreds(b) - 1; i >= 0; --i) {
123 ir_node *b_pred = get_Block_cfgpred_block(b, i);
130 /** Test whether we can optimize away pred block pos of b.
132 * @param b A block node.
133 * @param pos The position of the predecessor block to judge about.
135 * @returns The number of predecessors
137 * The test is rather tricky.
139 * The situation is something like the following:
148 * b merges the control flow of an if-then-else. We may not remove
149 * the 'then' _and_ the 'else' block of an 'if' if there is a Phi
150 * node in b, even if both are empty. The destruction of this Phi
151 * requires that a copy is added before the merge. We have to
152 * keep one of the case blocks to place the copies in.
154 * To perform the test for pos, we must regard predecessors before pos
155 * as already removed.
157 static unsigned test_whether_dispensable(const ir_node *b, int pos)
159 ir_node *pred = get_Block_cfgpred(b, pos);
160 ir_node *predb = get_nodes_block(pred);
162 if (is_Bad(pred) || !is_Block_removable(predb))
165 /* can't remove self-loops */
167 goto non_dispensable;
168 if (is_unknown_jump(pred))
169 goto non_dispensable;
171 /* Seems to be empty. At least we detected this in collect_nodes. */
172 if (get_Block_phis(b) != NULL) {
173 int n_cfgpreds = get_Block_n_cfgpreds(b);
175 /* there are Phi nodes */
177 /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
178 * Handle all pred blocks with preds < pos as if they were already
180 for (i = 0; i < pos; i++) {
181 ir_node *other_pred = get_Block_cfgpred(b, i);
182 ir_node *other_predb = get_nodes_block(other_pred);
183 if (is_Bad(other_pred))
185 if (is_Block_removable(other_predb)
186 && !Block_block_visited(other_predb)) {
188 for (j = get_Block_n_cfgpreds(other_predb) - 1; j >= 0; --j) {
189 ir_node *other_predpred
190 = get_Block_cfgpred_block(other_predb, j);
191 if (is_pred_of(other_predpred, predb))
192 goto non_dispensable;
194 } else if (is_pred_of(other_predb, predb)) {
195 goto non_dispensable;
198 for (i = pos+1; i < n_cfgpreds; i++) {
199 ir_node *other_predb = get_Block_cfgpred_block(b, i);
200 if (is_pred_of(other_predb, predb))
201 goto non_dispensable;
204 /* we will not dispense already visited blocks */
205 if (Block_block_visited(predb))
207 /* if we get here, the block is dispensable, count useful preds */
208 return get_irn_arity(predb);
211 set_Block_removable(predb, false);
216 * This method merges blocks. A block is applicable to be merged, if it
217 * has only one predecessor with an unconditional jump to this block;
218 * and if this block does not contain any phis.
220 static void merge_blocks(ir_node *b, void *env)
224 if (get_Block_n_cfgpreds(b) == 1) {
225 ir_node* pred = get_Block_cfgpred(b, 0);
227 ir_node* pred_block = get_nodes_block(pred);
228 if (get_Block_phis(b) == NULL) {
229 exchange(b, pred_block);
236 * This method removes empty blocks. A block is empty if it only contains Phi
239 * We first adapt Phi nodes, then Block nodes, as we need the old ins
240 * of the Block to adapt the Phi nodes. We do this by computing new
241 * in arrays, and then replacing the old ones. So far we compute new in arrays
242 * for all nodes, not regarding whether there is a possibility for optimization.
244 * For each predecessor p of a Block b there are three cases:
245 * - The predecessor p is a Bad node: just skip it. The in array of b shrinks
247 * - The predecessor p is empty. Remove p. All predecessors of p are now
249 * - The predecessor p is a block containing useful code. Just keep p as is.
251 * For Phi nodes f we have to check the conditions at the Block of f.
252 * For cases 1 and 3 we proceed as for Blocks. For case 2 we can have two
254 * -2a: The old predecessor of the Phi f is a Phi pred_f IN THE BLOCK REMOVED.
255 * In this case we proceed as for blocks. We remove pred_f. All
256 * predecessors of pred_f now are predecessors of f.
257 * -2b: The old predecessor of f is NOT in the block removed. It might be a Phi
258 * too. We have to replicate f for each predecessor of the removed block.
259 * Or, with other words, the removed predecessor block has exactly one
262 * Further there is a special case for self referencing blocks:
265 * then_b else_b then_b else_b
271 * | | | === optimized to ===> \ | | |
278 * If there is a Phi in pred_b, but we remove pred_b, we have to generate a
279 * Phi in loop_b, that has the ins of the Phi in pred_b and a self referencing
282 static void optimize_blocks(ir_node *b, void *ctx)
284 int i, j, k, n, max_preds, n_preds, p_preds = -1;
288 merge_env *env = (merge_env*)ctx;
290 if (get_Block_dom_depth(b) < 0) {
291 /* ignore unreachable blocks */
295 /* Count the number of predecessor if this block is merged with pred blocks
298 for (i = 0, k = get_Block_n_cfgpreds(b); i < k; ++i) {
299 max_preds += test_whether_dispensable(b, i);
301 in = XMALLOCN(ir_node*, max_preds);
303 /*- Fix the Phi nodes of the current block -*/
304 for (phi = get_Block_phis(b); phi != NULL; phi = next) {
305 next = get_Phi_next(phi);
307 /* Find the new predecessors for the Phi */
309 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
310 ir_graph *irg = get_irn_irg(b);
311 ir_node *predx = get_Block_cfgpred(b, i);
314 /* case Phi 1: maintain Bads, as somebody else is responsible to
317 in[p_preds++] = new_r_Bad(irg, get_irn_mode(phi));
321 pred = get_nodes_block(predx);
323 /* case Phi 2: It's an empty block and not yet visited. */
324 if (is_Block_removable(pred) && !Block_block_visited(pred)) {
325 ir_node *phi_pred = get_Phi_pred(phi, i);
327 for (j = 0, k = get_Block_n_cfgpreds(pred); j < k; j++) {
328 ir_node *pred_pred = get_Block_cfgpred(pred, j);
330 if (is_Bad(pred_pred)) {
331 in[p_preds++] = new_r_Bad(irg, get_irn_mode(phi));
335 if (get_nodes_block(phi_pred) == pred) {
337 in[p_preds++] = get_Phi_pred(phi_pred, j);
340 in[p_preds++] = phi_pred;
345 in[p_preds++] = get_Phi_pred(phi, i);
348 assert(p_preds == max_preds);
352 exchange(phi, in[0]);
354 set_irn_in(phi, p_preds, in);
359 /*- This happens only if merge between loop backedge and single loop entry.
360 Moreover, it is only needed if predb is the direct dominator of b,
361 else there can be no uses of the Phi's in predb ... -*/
362 for (k = 0, n = get_Block_n_cfgpreds(b); k < n; ++k) {
363 ir_node *pred = get_Block_cfgpred(b, k);
364 ir_node *predb = get_nodes_block(pred);
368 if (is_Block_removable(predb) && !Block_block_visited(predb)) {
371 /* we found a predecessor block at position k that will be removed */
372 for (phi = get_Block_phis(predb); phi != NULL; phi = next_phi) {
374 next_phi = get_Phi_next(phi);
376 if (get_Block_idom(b) != predb) {
377 /* predb is not the dominator. There can't be uses of
378 * pred's Phi nodes, kill them .*/
379 ir_graph *irg = get_irn_irg(b);
380 ir_mode *mode = get_irn_mode(phi);
381 exchange(phi, new_r_Bad(irg, mode));
383 /* predb is the direct dominator of b. There might be uses
384 * of the Phi nodes from predb in further block, so move
385 * this phi from the predecessor into the block b */
386 set_nodes_block(phi, b);
387 set_Phi_next(phi, get_Block_phis(b));
388 set_Block_phis(b, phi);
389 env->phis_moved = true;
391 /* first, copy all 0..k-1 predecessors */
392 for (i = 0; i < k; i++) {
393 ir_node *predx = get_Block_cfgpred(b, i);
397 ir_graph *irg = get_irn_irg(b);
398 ir_mode *mode = get_irn_mode(phi);
399 in[q_preds++] = new_r_Bad(irg, mode);
402 pred_block = get_nodes_block(predx);
403 if (is_Block_removable(pred_block)
404 && !Block_block_visited(pred_block)) {
405 int n_cfgpreds = get_Block_n_cfgpreds(pred_block);
406 /* It's an empty block and not yet visited. */
407 for (j = 0; j < n_cfgpreds; j++) {
408 if (!is_Bad(get_Block_cfgpred(pred_block, j))) {
411 ir_graph *irg = get_irn_irg(b);
412 ir_mode *mode = get_irn_mode(phi);
413 in[q_preds++] = new_r_Bad(irg, mode);
421 /* now we are at k, copy the phi predecessors */
422 pred = get_nodes_block(get_Block_cfgpred(b, k));
423 for (i = 0; i < get_Phi_n_preds(phi); i++) {
424 in[q_preds++] = get_Phi_pred(phi, i);
427 /* and now all the rest */
428 for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
429 pred = get_Block_cfgpred_block(b, i);
432 ir_graph *irg = get_irn_irg(b);
433 ir_mode *mode = get_irn_mode(phi);
434 in[q_preds++] = new_r_Bad(irg, mode);
435 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
436 /* It's an empty block and not yet visited. */
437 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
438 if (! is_Bad(get_Block_cfgpred(pred, j))) {
441 ir_graph *irg = get_irn_irg(b);
442 ir_mode *mode = get_irn_mode(phi);
443 in[q_preds++] = new_r_Bad(irg, mode);
453 exchange(phi, in[0]);
455 set_irn_in(phi, q_preds, in);
458 assert(q_preds <= max_preds);
459 // assert(p_preds == q_preds && "Wrong Phi Fix");
465 /*- Fix the block -*/
467 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
468 ir_node *pred = get_Block_cfgpred(b, i);
469 ir_node *predb = get_nodes_block(pred);
470 ir_graph *irg = get_irn_irg(pred);
472 /* case 1: Bad predecessor */
474 in[n_preds++] = new_r_Bad(irg, mode_X);
477 if (is_Block_removable(predb) && !Block_block_visited(predb)) {
478 /* case 2: It's an empty block and not yet visited. */
479 for (j = 0; j < get_Block_n_cfgpreds(predb); j++) {
480 ir_node *predpred = get_Block_cfgpred(predb, j);
482 if (is_Bad(predpred)) {
483 in[n_preds++] = new_r_Bad(irg, mode_X);
487 in[n_preds++] = predpred;
489 /* Remove block+jump as it might be kept alive. */
490 exchange(pred, new_r_Bad(get_irn_irg(b), mode_X));
491 exchange(predb, new_r_Bad(get_irn_irg(b), mode_BB));
494 in[n_preds++] = pred;
497 assert(n_preds == max_preds);
499 set_irn_in(b, n_preds, in);
502 /* see if phi-fix was correct */
503 assert(get_Block_phis(b) == NULL || p_preds == -1 || (n_preds == p_preds));
508 * Optimize boolean Conds, where true and false jump to the same block into a Jmp
509 * Block must contain no Phi nodes.
513 * projA projB => Jmp Bad
517 static bool optimize_pred_cond(ir_node *block, int i, int j)
519 ir_node *projA, *projB, *cond, *pred_block, *jmp, *bad;
522 projA = get_Block_cfgpred(block, i);
523 if (!is_Proj(projA)) return false;
524 projB = get_Block_cfgpred(block, j);
525 if (!is_Proj(projB)) return false;
526 cond = get_Proj_pred(projA);
527 if (!is_Cond(cond)) return false;
529 if (cond != get_Proj_pred(projB)) return false;
531 /* cond should actually be a Jmp */
532 pred_block = get_nodes_block(cond);
533 jmp = new_r_Jmp(pred_block);
534 bad = new_r_Bad(get_irn_irg(block), mode_X);
536 assert(projA != projB);
537 exchange(projA, jmp);
538 exchange(projB, bad);
542 typedef enum block_flags_t {
543 BF_HAS_OPERATIONS = 1 << 0,
544 BF_HAS_PHIS = 1 << 1,
545 BF_IS_UNKNOWN_JUMP_TARGET = 1 << 2,
548 static bool get_block_flag(const ir_nodehashmap_t *infos, const ir_node *block,
551 return PTR_TO_INT(ir_nodehashmap_get(void, infos, block)) & flag;
554 static void set_block_flag(ir_nodehashmap_t *infos, ir_node *block,
557 int data = PTR_TO_INT(ir_nodehashmap_get(void, infos, block));
559 ir_nodehashmap_insert(infos, block, INT_TO_PTR(data));
562 static void clear_block_flag(ir_nodehashmap_t *infos, const ir_node *block)
564 ir_nodehashmap_remove(infos, block);
567 static bool has_operations(ir_nodehashmap_t *infos, const ir_node *block)
569 return get_block_flag(infos, block, BF_HAS_OPERATIONS);
572 static void set_has_operations(ir_nodehashmap_t *infos, ir_node *block)
574 set_block_flag(infos, block, BF_HAS_OPERATIONS);
577 static bool has_phis(ir_nodehashmap_t *infos, const ir_node *block)
579 return get_block_flag(infos, block, BF_HAS_PHIS);
582 static void set_has_phis(ir_nodehashmap_t *infos, ir_node *block)
584 set_block_flag(infos, block, BF_HAS_PHIS);
587 static bool is_unknown_jump_target(ir_nodehashmap_t *infos, const ir_node *block)
589 return get_block_flag(infos, block, BF_IS_UNKNOWN_JUMP_TARGET);
592 static void set_is_unknown_jump_target(ir_nodehashmap_t *infos, ir_node *block)
594 set_block_flag(infos, block, BF_IS_UNKNOWN_JUMP_TARGET);
598 * Pre-Walker: fill block info information.
600 static void compute_block_info(ir_node *n, void *x)
602 ir_nodehashmap_t *infos = (ir_nodehashmap_t*)x;
605 int i, max = get_Block_n_cfgpreds(n);
606 for (i=0; i<max; i++) {
607 ir_node *pred = get_Block_cfgpred(n,i);
608 if (is_unknown_jump(pred)) {
609 set_is_unknown_jump_target(infos, n);
612 } else if (is_Phi(n)) {
613 ir_node *block = get_nodes_block(n);
614 set_has_phis(infos, block);
615 } else if (is_Jmp(n) || is_Cond(n) || is_Proj(n)) {
618 ir_node *block = get_nodes_block(n);
619 set_has_operations(infos, block);
623 static void clear_block_info(ir_node *block, void *x)
625 ir_nodehashmap_t *infos = (ir_nodehashmap_t*)x;
626 clear_block_flag(infos, block);
629 typedef struct skip_env {
631 ir_nodehashmap_t block_infos;
635 * Post-Block-walker: Optimize useless if's (boolean Cond nodes
636 * with same true/false target) away.
638 static void optimize_ifs(ir_node *block, void *x)
640 skip_env *env = (skip_env*)x;
642 int n_preds = get_Block_n_cfgpreds(block);
644 if (has_phis(&env->block_infos, block))
647 /* optimize Cond predecessors (might produce Bad predecessors) */
648 for (i = 0; i < n_preds; ++i) {
649 for (j = i+1; j < n_preds; ++j) {
650 optimize_pred_cond(block, i, j);
656 * Pre-Block walker: remove empty blocks (only contain a Jmp)
657 * that are control flow predecessors of the current block.
659 static void remove_empty_blocks(ir_node *block, void *x)
661 skip_env *env = (skip_env*)x;
663 int n_preds = get_Block_n_cfgpreds(block);
665 for (i = 0; i < n_preds; ++i) {
666 ir_node *jmp, *jmp_block;
669 jmp = get_Block_cfgpred(block, i);
672 jmp_block = get_nodes_block(jmp);
673 if (jmp_block == block)
674 continue; /* this infinite loop cannot be optimized any further */
675 if (is_unknown_jump_target(&env->block_infos, jmp_block))
676 continue; /* unknown jump target must not be optimized */
677 if (has_phis(&env->block_infos,jmp_block))
678 continue; /* this block contains Phis and is not skipped */
679 if (Block_block_visited(jmp_block)) {
681 /* otherwise we could break the walker,
682 * if block was reached via
683 * KeepAlive edge -> jmp_block -> A ---> block,
684 * because the walker cannot handle Id nodes.
694 /* jmp_block is an empty block and can be optimized! */
696 n_jpreds = get_Block_n_cfgpreds(jmp_block);
698 * If the jmp block has only one predecessor this is straightforward.
699 * However, if there are more predecessors, we only handle this,
700 * if block has no Phis.
703 ir_node *pred = get_Block_cfgpred(jmp_block, 0);
704 ir_node *pred_block = get_nodes_block(pred);
705 if (has_operations(&env->block_infos,jmp_block)) {
707 continue; /* must not create partially dead code, especially when it is mode_M */
710 /* skip jmp block by rerouting its predecessor to block
720 /* cleanup: jmp_block might have a Keep edge! */
721 exchange(jmp_block, pred_block);
723 } else if ( !has_phis(&env->block_infos, block) &&
724 !has_operations(&env->block_infos,jmp_block))
726 /* all predecessors can skip the jmp block, so block gets some new
731 * jmp_block C => Bad C | |
735 ir_node **ins = ALLOCAN(ir_node*, n_preds+n_jpreds);
737 /* first copy the old predecessors, because the outer loop (i)
738 * still walks over them */
739 for (j = 0; j < n_preds; ++j) {
740 ins[j] = get_Block_cfgpred(block, j);
742 /* now append the new predecessors */
743 for (j = 0; j < n_jpreds; ++j) {
744 ir_node *pred = get_Block_cfgpred(jmp_block, j);
745 ins[n_preds+j] = pred;
747 set_irn_in(block, n_preds+n_jpreds, ins);
748 /* convert the jmp_block to Bad */
749 ir_graph *irg = get_irn_irg(block);
750 exchange(jmp_block, new_r_Bad(irg, mode_BB));
751 exchange(jmp, new_r_Bad(irg, mode_X));
752 /* let the outer loop walk over the new predecessors as well */
755 // TODO What if jmp_block had a KeepAlive edge?
757 /* This would involve Phis ... */
763 * All cfg optimizations, which do not touch Phi nodes.
765 * Note that this might create critical edges.
767 static void cfgopt_ignoring_phis(ir_graph *irg)
772 ir_nodehashmap_init(&env.block_infos);
774 while (env.changed) {
775 irg_walk_graph(irg, compute_block_info, NULL, &env.block_infos);
778 /* Remove blocks, which only consist of a Jmp */
779 irg_block_walk_graph(irg, remove_empty_blocks, NULL, &env);
781 /* Optimize Cond->Jmp, where then- and else-block are the same. */
782 irg_block_walk_graph(irg, NULL, optimize_ifs, &env);
785 confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_NONE);
786 /* clear block info, because it must be recomputed */
787 irg_block_walk_graph(irg, clear_block_info, NULL, &env.block_infos);
788 /* Removing blocks and Conds might enable more optimizations */
791 confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
796 ir_nodehashmap_destroy(&env.block_infos);
799 /* Optimizations of the control flow that also require changes of Phi nodes. */
800 void optimize_cf(ir_graph *irg)
804 ir_node *end = get_irg_end(irg);
809 env.phis_moved = false;
811 /* if the graph is not pinned, we cannot determine empty blocks */
812 assert(get_irg_pinned(irg) != op_pin_state_floats &&
813 "Control flow optimization need a pinned graph");
815 assure_irg_properties(irg, IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE);
817 /* First the "simple" optimizations, which do not touch Phis */
818 cfgopt_ignoring_phis(irg);
820 /* we use the mark flag to mark removable blocks */
821 ir_reserve_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_LINK
822 | IR_RESOURCE_PHI_LIST);
825 * This pass collects all Phi nodes in a link list in the block
826 * nodes. Further it performs simple control flow optimizations.
827 * Finally it marks all blocks that do not contain useful
828 * computations, i.e., these blocks might be removed.
830 irg_walk(end, clear_link_and_mark_blocks_removable, collect_nodes, NULL);
832 /* assert due to collect_nodes:
833 * 1. removable blocks are now marked as such
834 * 2. phi lists are up to date
837 /* Optimize the standard code.
838 * It walks only over block nodes and adapts these and the Phi nodes in
839 * these blocks, which it finds in a linked list computed before.
841 assure_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE);
842 irg_block_walk_graph(irg, optimize_blocks, merge_blocks, &env);
844 new_end = optimize_in_place(end);
845 if (new_end != end) {
846 set_irg_end(irg, new_end);
849 remove_End_Bads_and_doublets(end);
851 ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_LINK
852 | IR_RESOURCE_PHI_LIST);
854 if (env.phis_moved) {
855 /* Bad: when we moved Phi's, we might produce dead Phi nodes
857 Some other phases cannot copy with this, so kill them.
859 n = get_End_n_keepalives(end);
861 NEW_ARR_A(ir_node *, in, n);
862 assure_irg_outs(irg);
864 for (i = j = 0; i < n; ++i) {
865 ir_node *ka = get_End_keepalive(end, i);
870 for (k = get_irn_n_outs(ka) - 1; k >= 0; --k) {
871 ir_node *user = get_irn_out(ka, k);
873 if (user != ka && user != end) {
874 /* Is it a real user or just a self loop ? */
884 set_End_keepalives(end, j, in);
890 confirm_irg_properties(irg,
891 env.changed ? IR_GRAPH_PROPERTIES_NONE : IR_GRAPH_PROPERTIES_ALL);
894 /* Creates an ir_graph pass for optimize_cf. */
895 ir_graph_pass_t *optimize_cf_pass(const char *name)
897 return def_graph_pass(name ? name : "optimize_cf", optimize_cf);