3 * Make Mux nodes from Conds where it its possible.
4 * @author Sebastian Hack
25 #include "irgraph_t.h"
41 #include "bitfiddle.h"
47 * Mux optimization routines.
51 static ir_node *local_optimize_mux(ir_node *mux)
55 ir_node *sel = get_Mux_sel(mux);
56 ir_node *cmp = skip_Proj(sel);
58 /* Optimize the children */
59 for(i = 1, n = get_irn_arity(mux); i < n; ++i) {
60 ir_node *operand = get_irn_n(mux, i);
61 if(get_irn_op(operand) == op_Mux)
62 optimize_mux(operand);
65 /* If we have no cmp above the mux, get out. */
66 if(is_Proj(sel) && get_irn_mode(sel) == mode_b && get_irn_opcode(cmp) == iro_Cmp) {
68 pn_Cmp cc = get_Proj_proj(sel);
69 ir_mode *mode = get_irn_mode(mux);
70 ir_node *block = get_nodes_block(n);
71 ir_node *cmp_left = get_Cmp_left(cmp);
72 ir_node *cmp_right = get_Cmp_right(cmp);
73 ir_node *mux_true = get_Mux_true(mux);
74 ir_node *mux_false = get_Mux_false(mux);
77 * Check for comparisons with signed integers.
79 if(mode_is_int(mode) /* We need an integral mode */
80 && mode_is_signed(mode) /* which is signed */
81 && cc == Lt) { /* and have to compare for < */
84 * Mux(x:T < 0, -1, 0) -> Shrs(x, sizeof_bits(T) - 1)
88 if(classify_Const(cmp_right) == CNST_NULL
89 && classify_Const(mux_true) == CNST_ALL_ONE
90 && classify_Const(mux_false) == CNST_NULL) {
92 ir_mode *u_mode = find_unsigned_mode(mode);
94 res = new_r_Shrs(current_ir_graph, block, cmp_left,
95 new_r_Const_long(current_ir_graph, block, u_mode,
96 get_mode_size_bits(mode) - 1),
101 * Mux(0 < x:T, 1, 0) -> Shr(-x, sizeof_bits(T) - 1)
105 else if(classify_Const(cmp_left) == CNST_NULL
106 && classify_Const(mux_true) == CNST_ONE
107 && classify_Const(mux_false) == CNST_NULL) {
109 ir_mode *u_mode = find_unsigned_mode(mode);
111 res = new_r_Shr(current_ir_graph, block,
113 /* -x goes to 0 - x in Firm (cmp_left is 0, see the if) */
114 new_r_Sub(current_ir_graph, block, cmp_left, cmp_right, mode),
116 /* This is sizeof_bits(T) - 1 */
117 new_r_Const_long(current_ir_graph, block, u_mode,
118 get_mode_size_bits(mode) - 1),
129 * check, if a node is const and return its tarval or
130 * return a default tarval.
131 * @param cnst The node whose tarval to get.
132 * @param or The alternative tarval, if the node is no Const.
133 * @return The tarval of @p cnst, if the node is Const, @p otherwise.
135 static tarval *get_value_or(ir_node *cnst, tarval *or)
137 return get_irn_op(cnst) == op_Const ? get_Const_tarval(cnst) : or;
142 * Try to optimize nested muxes into a dis- or conjunction
144 * @param mux The parent mux, which has muxes as operands.
145 * @return The replacement node for this mux. If the optimization is
146 * successful, this might be an And or Or node, if not, its the mux
149 static ir_node *optimize_mux_chain(ir_node *mux)
154 ir_mode *mode = get_irn_mode(mux);
159 * If we have no mux, or its mode is not integer, we
162 if(get_irn_op(mux) != op_Mux || !mode_is_int(mode))
166 null = get_tarval_null(mode);
167 minus_one = tarval_sub(null, get_tarval_one(mode));
169 ops[0] = get_Mux_false(mux);
170 ops[1] = get_Mux_true(mux);
172 for(i = 0; i < 2; ++i) {
174 tarval *tva, *tvb, *tvd;
178 * A mux operand at the first position can be factored
179 * out, if the operands fulfill several conditions:
181 * mux(c1, mux(c2, a, b), d)
183 * This can be made into:
184 * 1) mux(c1, 0, d) | mux(c2, a, b)
185 * if a | d == d and b | d == d
187 * 2) mux(c1, -1, d) & mux(c2, a, b)
188 * if a & d == d and a & b == b
190 if(get_irn_op(ops[i]) == op_Mux) {
193 a = get_Mux_false(child_mux);
194 b = get_Mux_true(child_mux);
197 /* Try the or stuff */
198 tva = get_value_or(a, minus_one);
199 tvb = get_value_or(b, minus_one);
200 tvd = get_value_or(d, null);
202 if(tarval_cmp(tarval_or(tva, tvd), tvd) == pn_Cmp_Eq
203 && tarval_cmp(tarval_or(tvb, tvd), tvd) == pn_Cmp_Eq) {
205 ops[i] = new_Const(mode, null);
206 res = new_r_Or(current_ir_graph, get_nodes_block(mux),
207 mux, child_mux, mode);
211 /* If the or didn't go, try the and stuff */
212 tva = get_value_or(a, null);
213 tvb = get_value_or(b, null);
214 tvd = get_value_or(d, minus_one);
216 if(tarval_cmp(tarval_and(tva, tvd), tvd) == pn_Cmp_Eq
217 && tarval_cmp(tarval_and(tvb, tvd), tvd) == pn_Cmp_Eq) {
219 ops[i] = new_Const(mode, minus_one);
220 res = new_r_And(current_ir_graph, get_nodes_block(mux),
221 mux, child_mux, mode);
227 /* recursively optimize nested muxes. */
228 set_irn_n(mux, 1, optimize_mux_chain(ops[0]));
229 set_irn_n(mux, 2, optimize_mux_chain(ops[1]));
235 /***********************************************************
236 * The If conversion itself.
237 ***********************************************************/
242 static opt_if_conv_info_t default_info = {
246 /** The debugging module. */
247 static firm_dbg_module_t *dbg;
250 * A simple check for side effects upto an opcode of a ir node.
251 * @param irn The ir node to check,
252 * @return 1 if the opcode itself may produce side effects, 0 if not.
254 static INLINE int has_side_effects(const ir_node *irn)
256 ir_op *op = get_irn_op(irn);
261 return !mode_is_datab(get_irn_mode(irn));
264 enum failure_reason_t {
265 SUCCESS = IF_RESULT_SUCCESS,
266 TO_DEEP = IF_RESULT_TOO_DEEP,
267 SIDE_EFFECTS = IF_RESULT_SIDE_EFFECT,
268 PHI_FOUND = IF_RESULT_SIDE_EFFECT_PHI
272 * Decides, if a given expression and its subexpressions
273 * (to certain, also given extent) can be moved to a block.
275 * @param expr The expression to examine.
276 * @param block The block where the expression should go.
277 * @param depth The current depth, passed recursively. Use 0 for
278 * non-recursive calls.
279 * @param max_depth The maximum depth to which the expression should be
282 * @return a failure reason
284 static int _can_move_to(ir_node *expr, ir_node *dest_block, int depth, int max_depth)
288 ir_node *expr_block = get_nodes_block(expr);
291 * If we are forced to look too deep into the expression,
292 * treat it like it could not be moved.
294 if(depth >= max_depth) {
300 * If the block of the expression dominates the specified
301 * destination block, it does not matter if the expression
302 * has side effects or anything else. It is executed on each
303 * path the destination block is reached.
305 if (block_dominates(expr_block, dest_block))
309 * We cannot move phis!
317 * This should be superfluous and could be converted into a assertion.
318 * The destination block _must_ dominate the block of the expression,
319 * else the expression could be used without its definition.
321 if (! block_dominates(dest_block, expr_block)) {
322 res = IF_RESULT_SIDE_EFFECT;
327 * Surely, if the expression does not have a data mode, it is not
328 * movable. Perhaps one should also test the floating property of
331 if (has_side_effects(expr)) {
332 res = IF_RESULT_SIDE_EFFECT;
337 * If the node looks alright so far, look at its operands and
338 * check them out. If one of them cannot be moved, this one
339 * cannot be moved either.
341 for (i = 0, n = get_irn_arity(expr); i < n; ++i) {
342 ir_node *op = get_irn_n(expr, i);
343 int new_depth = is_Proj(op) ? depth : depth + 1;
345 res = _can_move_to(op, dest_block, new_depth, max_depth);
352 DBG((dbg, LEVEL_3, "\t\t\t%Dcan move to %n: %d\n", depth, expr, res));
358 * Convenience function for _can_move_to.
359 * Checks, if an expression can be moved to another block. The check can
360 * be limited to a expression depth meaning if we need to crawl in
361 * deeper into an expression than a given threshold to examine if
362 * it can be moved, the expression is rejected and the test returns
365 * @param expr The expression to check for.
366 * @param dest_block The destination block you want @p expr to be.
367 * @param max_depth The maximum depth @p expr should be investigated.
369 * @return return a failure reason
371 static INLINE int can_move_to(ir_node *expr, ir_node *dest_block, int max_depth)
373 return _can_move_to(expr, dest_block, 0, max_depth);
377 * move a DAG given by a root node expr into a new block
379 * @param expr the root of a dag
380 * @param dest_block the destination block
382 static void move_to(ir_node *expr, ir_node *dest_block)
385 ir_node *expr_block = get_nodes_block(expr);
388 * If we reached the dominator, we are done.
389 * We will never put code through the dominator
391 if (block_dominates(expr_block, dest_block))
394 for (i = 0, n = get_irn_arity(expr); i < n; ++i)
395 move_to(get_irn_n(expr, i), dest_block);
397 set_nodes_block(expr, dest_block);
401 * return the common dominator of two blocks
403 static INLINE ir_node *common_idom(ir_node *b1, ir_node *b2)
405 if(block_dominates(b1, b2))
407 else if(block_dominates(b2, b1))
412 for (p = get_Block_idom(b1); !block_dominates(p, b2); p = get_Block_idom(p));
418 * Information about a cond node.
420 typedef struct _cond_t {
421 ir_node *cond; /**< The cond node. */
422 struct list_head list; /**< List head which is used for queuing this cond
423 into the cond bunch it belongs to. */
425 unsigned totally_covers : 1;
426 struct _cond_t *link;
430 * Information about the both 'branches'
431 * (true and false), the cond creates.
434 int pos; /**< Number of the predecessor of the
435 phi block by which this branch is
436 reached. It is -1, if this branch is
437 only reached through another cond. */
439 struct _cond_t *masked_by; /**< If this cond's branch is only reached
440 through another cond, we store this
441 cond ir_node here. */
446 * retrieve the conditional information from a Cond node
448 static INLINE cond_t *get_cond(ir_node *irn, set *cond_set)
453 return set_find(cond_set, &templ, sizeof(templ), HASH_PTR(templ.cond));
457 typedef void (cond_walker_t)(cond_t *cond, void *env);
459 static void _walk_conds(cond_t *cond, cond_walker_t *pre, cond_walker_t *post,
460 long visited_nr, void *env)
464 if(cond->visited_nr >= visited_nr)
467 cond->visited_nr = visited_nr;
472 for(i = 0; i < 2; ++i) {
473 cond_t *c = cond->cases[i].masked_by;
476 _walk_conds(c, pre, post, visited_nr, env);
483 static long cond_visited_nr = 0;
485 static void walk_conds(cond_t *cond, cond_walker_t *pre, cond_walker_t *post, void *env)
487 _walk_conds(cond, pre, post, ++cond_visited_nr, env);
490 static void link_conds(cond_t *cond, void *env)
492 cond_t **ptr = (cond_t **) env;
499 * Compare two conds for use in a firm set.
500 * Two cond_t's are equal, if they designate the same cond node.
502 * @param b Another one.
503 * @param size Not used.
504 * @return 0 (!) if they are equal, != 0 otherwise.
506 static int cond_cmp(const void *a, const void *b, size_t size)
510 return x->cond != y->cond;
514 * Information about conds which can be made to muxes.
515 * Instances of this struct are attached to the link field of
516 * blocks in which phis are located.
518 typedef struct _cond_info_t {
519 struct list_head list; /**< Used to list all of these structs per class. */
521 struct list_head roots; /**< A list of non-depending Conds. Two Conds are
522 independent, if it's not possible not reach one from the
523 other (all Conds in this list have to dominate the
524 block this struct is attached to). */
526 ir_node *first_phi; /**< The first phi node this cond info was made for. */
527 set *cond_set; /**< A set of all dominating reachable Conds. */
533 static void _find_conds(ir_node *irn, long visited_nr,
534 ir_node *dominator, cond_t *masked_by, int pos, int depth, cond_info_t *ci)
537 int saw_select_cond = 0;
539 block = get_nodes_block(irn);
542 * Only check this block if it is dominated by the specified
543 * dominator or it has not been visited yet.
545 if (block_dominates(dominator, block) && get_Block_block_visited(block) < visited_nr) {
546 cond_t *res = masked_by;
549 /* check, if we're on a ProjX
551 * Further, the ProjX/Cond block must dominate the base block
552 * (the block with the phi in it), otherwise, the Cond
553 * is not affecting the phi so that a mux can be inserted.
555 if(is_Proj(irn) && get_irn_mode(irn) == mode_X) {
557 int proj = get_Proj_proj(irn);
558 ir_node *cond = get_Proj_pred(irn);
560 /* true, if the mode is a mode_b cond _NO_ switch cond */
561 int is_modeb_cond = get_irn_opcode(cond) == iro_Cond
562 && get_irn_mode(get_Cond_selector(cond)) == mode_b;
564 saw_select_cond = !is_modeb_cond;
566 /* Check, if the pred of the proj is a Cond
567 * with a Projb as selector.
572 memset(&c, 0, sizeof(c));
578 /* get or insert the cond info into the set. */
579 res = set_insert(ci->cond_set, &c, sizeof(c), HASH_PTR(cond));
582 * If this cond is already masked by the masked_by cond
583 * return immediately, since we don't have anything to add.
585 if(masked_by && res->cases[proj].masked_by == masked_by)
590 list_add(&res->list, &ci->roots);
594 * Set masked by (either NULL or another cond node.
595 * If this cond is truly masked by another one, set
596 * the position of the actually investigated branch
597 * to -1. Since the cond is masked by another one,
598 * there could be more ways from the start block
599 * to this branch, so we choose -1.
601 res->cases[proj].masked_by = masked_by;
604 res->cases[proj].pos = pos;
607 * Since the masked_by nodes masks a cond, remove it from the
608 * root list of the conf trees.
611 assert(res->cases[proj].pos < 0);
612 list_del_init(&masked_by->list);
615 DBG((dbg, LEVEL_2, "%D%n (%s branch) "
616 "for pos %d in block %n reached by %n\n",
617 depth, cond, proj ? "true" : "false", pos,
618 block, masked_by ? masked_by->cond : NULL));
622 if(get_Block_block_visited(block) < visited_nr && !saw_select_cond) {
624 set_Block_block_visited(block, visited_nr);
626 /* Search recursively from this cond. */
627 for(i = 0, n = get_irn_arity(block); i < n; ++i) {
628 ir_node *pred = get_irn_n(block, i);
631 * If the depth is 0 (the first recursion), we set the pos to
632 * the current viewed predecessor, else we adopt the position
633 * as given by the caller. We also increase the depth for the
634 * recursively called functions.
636 _find_conds(pred, visited_nr, dominator, res, pos, depth + (res != masked_by), ci);
644 * A convenience function for _find_conds.
645 * It sets some parameters needed for recursion to appropriate start
646 * values. Always use this function.
648 * @param irn The node to start looking for Conds from. This might
649 * be the phi node we are investigating.
650 * @param conds The set to record the found Conds in.
652 static INLINE void find_conds(ir_node *irn, cond_info_t *ci)
656 ir_node *block = get_nodes_block(irn);
657 ir_node *dom = get_Block_idom(block);
659 for(i = 0, n = get_irn_arity(block); i < n; ++i) {
660 ir_node *pred = get_irn_n(block, i);
662 inc_irg_block_visited(current_ir_graph);
663 visited_nr = get_irg_block_visited(current_ir_graph);
664 set_Block_block_visited(block, visited_nr);
666 DBG((dbg, LEVEL_2, "find conds at pred %d (%n) and idom %n\n", i, pred, dom));
667 _find_conds(pred, visited_nr, dom, NULL, i, 0, ci);
672 * Make the mux for a given cond.
673 * @param phi The phi node which shall be replaced by a mux.
674 * @param dom The block where the muxes shall be placed.
675 * @param cond The cond information.
676 * @return The mux node made for this cond.
678 static ir_node *make_mux_on_demand(ir_node *phi, ir_node *dom, cond_t *cond,
679 int max_depth, ir_node **mux, bitset_t *positions, int *muxes_made, long visited_nr)
682 ir_node *projb = get_Cond_selector(cond->cond);
683 ir_node *bl = get_nodes_block(cond->cond);
684 ir_node *operands[2];
687 cond->visited_nr = visited_nr;
688 DBG((dbg, LEVEL_2, "%n\n", cond->cond));
689 for(i = 0; i < 2; ++i) {
690 cond_t *masked_by = cond->cases[i].masked_by;
691 int pos = cond->cases[i].pos;
697 * If this Cond branch is masked by another cond, make the mux
698 * for that Cond first, since the Mux for this cond takes
703 DBG((dbg, LEVEL_2, "\tmasked by: %n\n", masked_by->cond));
704 if(masked_by->visited_nr < visited_nr)
705 operands[i] = make_mux_on_demand(phi, dom, masked_by, max_depth, mux, positions, muxes_made, visited_nr);
709 * If this cond branch is not masked by another cond, take
710 * the corresponding phi operand as an operand to the mux.
713 operands[i] = get_irn_n(phi, pos);
719 * Move the operands to the dominator block if the cond
720 * made sense. Some Conds found are not suitable for making a mux
721 * out of them, since one of their branches cannot be reached from
722 * the phi block. In that case we do not make a mux and return NULL.
724 if(operands[0] && operands[1]) {
725 if (operands[0] == operands[1]) {
726 /* there is no gain in using mux in this case, as
727 it will be optimized away. We will NOT move the
728 content of the blocks either
730 for (i = 0; i < 2; ++i)
732 bitset_set(positions, set[i]);
738 can_move[0] = can_move_to(operands[0], bl, max_depth);
739 can_move[1] = can_move_to(operands[1], bl, max_depth);
741 if (can_move[0] == SUCCESS && can_move[1] == SUCCESS) {
742 move_to(operands[0], bl);
743 move_to(operands[1], bl);
746 *mux = new_r_Mux(current_ir_graph, bl, projb,
747 operands[0], operands[1], get_irn_mode(operands[0]));
751 DBG((dbg, LEVEL_2, "\t%n(%n, %n, %n)[%d, %d]\n",
752 *mux, projb, operands[0], operands[1], set[0], set[1]));
754 for(i = 0; i < 2; ++i)
756 bitset_set(positions, set[i]);
758 /* we have done one */
759 hook_if_conversion(current_ir_graph, phi, set[i], *mux, IF_RESULT_SUCCESS);
763 if(can_move[0] != SUCCESS)
764 hook_if_conversion(current_ir_graph, phi, set[0], NULL, can_move[0]);
765 if(can_move[1] != SUCCESS)
766 hook_if_conversion(current_ir_graph, phi, set[1], NULL, can_move[1]);
770 if(operands[0] != SUCCESS)
771 hook_if_conversion(current_ir_graph, phi, set[0], NULL, IF_RESULT_BAD_CF);
772 if(operands[1] != SUCCESS)
773 hook_if_conversion(current_ir_graph, phi, set[1], NULL, IF_RESULT_BAD_CF);
779 typedef struct _phi_info_t {
780 struct list_head list;
781 cond_info_t *cond_info;
787 * Examine a phi node if it can be replaced by some muxes.
788 * @param irn A phi node.
789 * @param info Parameters for the if conversion algorithm.
791 static int check_out_phi(phi_info_t *phi_info, opt_if_conv_info_t *info)
793 int max_depth = info->max_depth;
794 ir_node *irn = phi_info->irn;
796 cond_info_t *cond_info = phi_info->cond_info;
802 block = get_nodes_block(irn);
803 arity = get_irn_arity(irn);
804 positions = bitset_alloca(arity);
807 assert(get_irn_arity(irn) == get_irn_arity(block));
810 DBG((dbg, LEVEL_2, "phi candidate: %n\n", irn));
812 list_for_each_entry(cond_t, cond, &cond_info->roots, list) {
813 ir_node *cidom = block;
815 cond_t *p, *head = NULL;
818 bitset_clear_all(positions);
820 DBG((dbg, LEVEL_2, "\tcond root: %n\n", cond->cond));
822 * Link all conds which are in the subtree of
823 * the current cond in the list together.
825 walk_conds(cond, link_conds, NULL, &head);
828 for(p = head; p; p = p->link) {
829 for(i = 0; i < 2; ++i) {
830 int pos = p->cases[i].pos;
832 cidom = common_idom(cidom, get_nodes_block(get_irn_n(block, pos)));
836 DBG((dbg, LEVEL_2, "\tcommon idom: %n\n", cidom));
837 make_mux_on_demand(irn, cidom, cond, max_depth, &mux, positions, &muxes_made, ++cond_visited_nr);
840 bitset_foreach(positions, pos)
841 set_irn_n(irn, (int) pos, mux);
846 * optimize the phi away. This can anable further runs of this
847 * function. Look at _can_move. phis cannot be moved there.
849 nw = optimize_in_place_2(irn);
856 typedef struct _cond_walk_info_t {
857 struct obstack *obst;
858 struct list_head cond_info_head;
859 struct list_head phi_head;
863 static void annotate_cond_info_pre(ir_node *irn, void *data)
865 set_irn_link(irn, NULL);
868 static void annotate_cond_info_post(ir_node *irn, void *data)
870 cond_walk_info_t *cwi = data;
873 * Check, if the node is a phi
874 * we then compute a set of conds which are reachable from this
875 * phi's block up to its dominator.
876 * The set is attached to the blocks link field.
878 if(is_Phi(irn) && mode_is_datab(get_irn_mode(irn))) {
879 ir_node *block = get_nodes_block(irn);
881 cond_info_t *ci = get_irn_link(block);
883 /* If the set is not yet computed, do it now. */
885 ci = obstack_alloc(cwi->obst, sizeof(*ci));
886 ci->cond_set = new_set(cond_cmp, log2_ceil(get_irn_arity(block)));
889 INIT_LIST_HEAD(&ci->roots);
890 INIT_LIST_HEAD(&ci->list);
893 * Add this cond info to the list of all cond infos
894 * in this graph. This is just done to free the
895 * set easier afterwards (we save an irg_walk_graph).
897 list_add(&cwi->cond_info_head, &ci->list);
899 DBG((dbg, LEVEL_2, "searching conds at %n\n", irn));
902 * Fill the set with conds we find on the way from
903 * the block to its dominator.
908 * If there where no suitable conds, delete the set
909 * immediately and reset the set pointer to NULL
911 if(set_count(ci->cond_set) == 0) {
912 del_set(ci->cond_set);
914 obstack_free(cwi->obst, ci);
920 DBG((dbg, LEVEL_2, "conds already computed for %n (look at %n)\n", irn, ci->first_phi));
922 set_irn_link(block, ci);
925 phi_info_t *pi = obstack_alloc(cwi->obst, sizeof(*pi));
928 INIT_LIST_HEAD(&pi->list);
929 list_add(&pi->list, &cwi->phi_head);
935 static void dump_conds(cond_t *cond, void *env)
940 ir_fprintf(f, "node:{title:\"n%p\" label:\"%n(%d, %d)\n%n\"}\n",
941 cond, cond->cond, cond->cases[0].pos, cond->cases[1].pos,
942 get_nodes_block(cond->cond));
944 for(i = 0; i < 2; ++i)
945 if(cond->cases[i].masked_by)
946 ir_fprintf(f, "edge:{sourcename:\"n%p\" targetname:\"n%p\" label:\"%d\"}\n",
947 cond, cond->cases[i].masked_by, i);
950 static void vcg_dump_conds(ir_graph *irg, cond_walk_info_t *cwi)
955 snprintf(buf, sizeof(buf), "%s-conds.vcg", get_entity_name(get_irg_entity(irg)));
957 if((f = fopen(buf, "wt")) != NULL) {
962 ir_fprintf(f, "graph:{\ndisplay_edge_labels:yes\n");
963 list_for_each_entry(cond_info_t, ci, &cwi->cond_info_head, list) {
964 ir_fprintf(f, "node:{title:\"n%p\" label:\"cond info\"}\n", ci);
965 list_for_each_entry(cond_t, cond, &ci->roots, list) {
966 walk_conds(cond, NULL, dump_conds, f);
967 ir_fprintf(f, "edge:{sourcename:\"n%p\" targetname:\"n%p\"}\n", ci, cond);
971 list_for_each_entry(phi_info_t, phi, &cwi->phi_head, list) {
972 ir_fprintf(f, "node:{title:\"n%p\" label:\"%n\n%n\"}\n",
973 phi->irn, phi->irn, get_nodes_block(phi->irn));
974 ir_fprintf(f, "edge:{sourcename:\"n%p\" targetname:\"n%p\"}\n", phi->irn, phi->cond_info);
980 void opt_if_conv(ir_graph *irg, opt_if_conv_info_t *params)
984 phi_info_t *phi_info;
985 cond_info_t *cond_info;
986 cond_walk_info_t cwi;
988 opt_if_conv_info_t *p = params ? params : &default_info;
990 if(!get_opt_if_conversion())
996 INIT_LIST_HEAD(&cwi.cond_info_head);
997 INIT_LIST_HEAD(&cwi.phi_head);
999 /* Init the debug stuff. */
1000 dbg = firm_dbg_register("firm.opt.ifconv");
1002 firm_dbg_set_mask(dbg, LEVEL_1);
1005 /* Ensure, that the dominators are computed. */
1008 DBG((dbg, LEVEL_1, "if conversion for irg %s(%p)\n",
1009 get_entity_name(get_irg_entity(irg)), irg));
1012 * Collect information about the conds pu the phis on an obstack.
1013 * It is important that phi nodes which are 'higher' (with a
1014 * lower dfs pre order) are in front of the obstack. Since they are
1015 * possibly turned in to muxes this can enable the optimization
1018 irg_walk_graph(irg, annotate_cond_info_pre, annotate_cond_info_post, &cwi);
1021 vcg_dump_conds(irg, &cwi);
1024 /* Process each suitable phi found. */
1025 list_for_each_entry(phi_info_t, phi_info, &cwi.phi_head, list) {
1026 DBG((dbg, LEVEL_2, "phi node %n\n", phi_info->irn));
1027 muxes_made += check_out_phi(phi_info, p);
1030 list_for_each_entry(cond_info_t, cond_info, &cwi.cond_info_head, list) {
1031 del_set(cond_info->cond_set);
1034 DBG((dbg, LEVEL_1, "muxes made: %d\n", muxes_made));
1036 obstack_free(&obst, NULL);