3 * Make Mux nodes from Conds where it its possible.
4 * @author Sebastian Hack
13 #include "irgraph_t.h"
29 #include "bitfiddle.h"
35 * Mux optimization routines.
39 static ir_node *local_optimize_mux(ir_node *mux)
43 ir_node *sel = get_Mux_sel(mux);
44 ir_node *cmp = skip_Proj(sel);
46 /* Optimize the children */
47 for(i = 1, n = get_irn_arity(mux); i < n; ++i) {
48 ir_node *operand = get_irn_n(mux, i);
49 if(get_irn_op(operand) == op_Mux)
50 optimize_mux(operand);
53 /* If we have no cmp above the mux, get out. */
54 if(is_Proj(sel) && get_irn_mode(sel) == mode_b && get_irn_opcode(cmp) == iro_Cmp) {
56 pn_Cmp cc = get_Proj_proj(sel);
57 ir_mode *mode = get_irn_mode(mux);
58 ir_node *block = get_nodes_block(n);
59 ir_node *cmp_left = get_Cmp_left(cmp);
60 ir_node *cmp_right = get_Cmp_right(cmp);
61 ir_node *mux_true = get_Mux_true(mux);
62 ir_node *mux_false = get_Mux_false(mux);
65 * Check for comparisons with signed integers.
67 if(mode_is_int(mode) /* We need an integral mode */
68 && mode_is_signed(mode) /* which is signed */
69 && cc == Lt) { /* and have to compare for < */
72 * Mux(x:T < 0, -1, 0) -> Shrs(x, sizeof_bits(T) - 1)
76 if(classify_Const(cmp_right) == CNST_NULL
77 && classify_Const(mux_true) == CNST_ALL_ONE
78 && classify_Const(mux_false) == CNST_NULL) {
80 ir_mode *u_mode = find_unsigned_mode(mode);
82 res = new_r_Shrs(current_ir_graph, block, cmp_left,
83 new_r_Const_long(current_ir_graph, block, u_mode,
84 get_mode_size_bits(mode) - 1),
89 * Mux(0 < x:T, 1, 0) -> Shr(-x, sizeof_bits(T) - 1)
93 else if(classify_Const(cmp_left) == CNST_NULL
94 && classify_Const(mux_true) == CNST_ONE
95 && classify_Const(mux_false) == CNST_NULL) {
97 ir_mode *u_mode = find_unsigned_mode(mode);
99 res = new_r_Shr(current_ir_graph, block,
101 /* -x goes to 0 - x in Firm (cmp_left is 0, see the if) */
102 new_r_Sub(current_ir_graph, block, cmp_left, cmp_right, mode),
104 /* This is sizeof_bits(T) - 1 */
105 new_r_Const_long(current_ir_graph, block, u_mode,
106 get_mode_size_bits(mode) - 1),
117 * check, if a node is const and return its tarval or
118 * return a default tarval.
119 * @param cnst The node whose tarval to get.
120 * @param or The alternative tarval, if the node is no Const.
121 * @return The tarval of @p cnst, if the node is Const, @p otherwise.
123 static tarval *get_value_or(ir_node *cnst, tarval *or)
125 return get_irn_op(cnst) == op_Const ? get_Const_tarval(cnst) : or;
130 * Try to optimize nested muxes into a dis- or conjunction
132 * @param mux The parent mux, which has muxes as operands.
133 * @return The replacement node for this mux. If the optimization is
134 * successful, this might be an And or Or node, if not, its the mux
137 static ir_node *optimize_mux_chain(ir_node *mux)
142 ir_mode *mode = get_irn_mode(mux);
147 * If we have no mux, or its mode is not integer, we
150 if(get_irn_op(mux) != op_Mux || !mode_is_int(mode))
154 null = get_tarval_null(mode);
155 minus_one = tarval_sub(null, get_tarval_one(mode));
157 ops[0] = get_Mux_false(mux);
158 ops[1] = get_Mux_true(mux);
160 for(i = 0; i < 2; ++i) {
162 tarval *tva, *tvb, *tvd;
166 * A mux operand at the first position can be factored
167 * out, if the operands fulfill several conditions:
169 * mux(c1, mux(c2, a, b), d)
171 * This can be made into:
172 * 1) mux(c1, 0, d) | mux(c2, a, b)
173 * if a | d == d and b | d == d
175 * 2) mux(c1, -1, d) & mux(c2, a, b)
176 * if a & d == d and a & b == b
178 if(get_irn_op(ops[i]) == op_Mux) {
181 a = get_Mux_false(child_mux);
182 b = get_Mux_true(child_mux);
185 /* Try the or stuff */
186 tva = get_value_or(a, minus_one);
187 tvb = get_value_or(b, minus_one);
188 tvd = get_value_or(d, null);
190 if(tarval_cmp(tarval_or(tva, tvd), tvd) == pn_Cmp_Eq
191 && tarval_cmp(tarval_or(tvb, tvd), tvd) == pn_Cmp_Eq) {
193 ops[i] = new_Const(mode, null);
194 res = new_r_Or(current_ir_graph, get_nodes_block(mux),
195 mux, child_mux, mode);
199 /* If the or didn't go, try the and stuff */
200 tva = get_value_or(a, null);
201 tvb = get_value_or(b, null);
202 tvd = get_value_or(d, minus_one);
204 if(tarval_cmp(tarval_and(tva, tvd), tvd) == pn_Cmp_Eq
205 && tarval_cmp(tarval_and(tvb, tvd), tvd) == pn_Cmp_Eq) {
207 ops[i] = new_Const(mode, minus_one);
208 res = new_r_And(current_ir_graph, get_nodes_block(mux),
209 mux, child_mux, mode);
215 /* recursively optimize nested muxes. */
216 set_irn_n(mux, 1, optimize_mux_chain(ops[0]));
217 set_irn_n(mux, 2, optimize_mux_chain(ops[1]));
223 /***********************************************************
224 * The If conversion itself.
225 ***********************************************************/
230 static opt_if_conv_info_t default_info = {
234 /** The debugging module. */
235 static firm_dbg_module_t *dbg;
238 * A simple check for side effects upto an opcode of a ir node.
239 * @param irn The ir node to check,
240 * @return 1 if the opcode itself may produce side effects, 0 if not.
242 static INLINE int has_side_effects(const ir_node *irn)
244 ir_op *op = get_irn_op(irn);
249 return !mode_is_datab(get_irn_mode(irn));
252 enum failure_reason_t {
253 SUCCESS = IF_RESULT_SUCCESS,
254 TO_DEEP = IF_RESULT_TOO_DEEP,
255 SIDE_EFFECTS = IF_RESULT_SIDE_EFFECT,
256 PHI_FOUND = IF_RESULT_SIDE_EFFECT_PHI
260 * Decides, if a given expression and its subexpressions
261 * (to certain, also given extent) can be moved to a block.
263 * @param expr The expression to examine.
264 * @param block The block where the expression should go.
265 * @param depth The current depth, passed recursively. Use 0 for
266 * non-recursive calls.
267 * @param max_depth The maximum depth to which the expression should be
270 * @return a failure reason
272 static int _can_move_to(ir_node *expr, ir_node *dest_block, int depth, int max_depth)
276 ir_node *expr_block = get_nodes_block(expr);
279 * If we are forced to look too deep into the expression,
280 * treat it like it could not be moved.
282 if(depth >= max_depth) {
288 * If the block of the expression dominates the specified
289 * destination block, it does not matter if the expression
290 * has side effects or anything else. It is executed on each
291 * path the destination block is reached.
293 if (block_dominates(expr_block, dest_block))
297 * We cannot move phis!
305 * This should be superfluous and could be converted into a assertion.
306 * The destination block _must_ dominate the block of the expression,
307 * else the expression could be used without its definition.
309 if (! block_dominates(dest_block, expr_block)) {
310 res = IF_RESULT_SIDE_EFFECT;
315 * Surely, if the expression does not have a data mode, it is not
316 * movable. Perhaps one should also test the floating property of
319 if (has_side_effects(expr)) {
320 res = IF_RESULT_SIDE_EFFECT;
325 * If the node looks alright so far, look at its operands and
326 * check them out. If one of them cannot be moved, this one
327 * cannot be moved either.
329 for (i = 0, n = get_irn_arity(expr); i < n; ++i) {
330 ir_node *op = get_irn_n(expr, i);
331 int new_depth = is_Proj(op) ? depth : depth + 1;
333 res = _can_move_to(op, dest_block, new_depth, max_depth);
340 DBG((dbg, LEVEL_3, "\t\t\t%Dcan move to %n: %d\n", depth, expr, res));
346 * Convenience function for _can_move_to.
347 * Checks, if an expression can be moved to another block. The check can
348 * be limited to a expression depth meaning if we need to crawl in
349 * deeper into an expression than a given threshold to examine if
350 * it can be moved, the expression is rejected and the test returns
353 * @param expr The expression to check for.
354 * @param dest_block The destination block you want @p expr to be.
355 * @param max_depth The maximum depth @p expr should be investigated.
357 * @return return a failure reason
359 static INLINE int can_move_to(ir_node *expr, ir_node *dest_block, int max_depth)
361 return _can_move_to(expr, dest_block, 0, max_depth);
365 * move a DAG given by a root node expr into a new block
367 * @param expr the root of a dag
368 * @param dest_block the destination block
370 static void move_to(ir_node *expr, ir_node *dest_block)
373 ir_node *expr_block = get_nodes_block(expr);
376 * If we reached the dominator, we are done.
377 * We will never put code through the dominator
379 if (block_dominates(expr_block, dest_block))
382 for (i = 0, n = get_irn_arity(expr); i < n; ++i)
383 move_to(get_irn_n(expr, i), dest_block);
385 set_nodes_block(expr, dest_block);
389 * return the common dominator of two blocks
391 static INLINE ir_node *common_idom(ir_node *b1, ir_node *b2)
393 if(block_dominates(b1, b2))
395 else if(block_dominates(b2, b1))
400 for (p = get_Block_idom(b1); !block_dominates(p, b2); p = get_Block_idom(p));
406 * Information about a cond node.
408 typedef struct _cond_t {
409 ir_node *cond; /**< The cond node. */
410 struct list_head list; /**< List head which is used for queuing this cond
411 into the cond bunch it belongs to. */
413 unsigned totally_covers : 1;
414 struct _cond_t *link;
418 * Information about the both 'branches'
419 * (true and false), the cond creates.
422 int pos; /**< Number of the predecessor of the
423 phi block by which this branch is
424 reached. It is -1, if this branch is
425 only reached through another cond. */
427 struct _cond_t *masked_by; /**< If this cond's branch is only reached
428 through another cond, we store this
429 cond ir_node here. */
434 * retrieve the conditional information from a Cond node
436 static INLINE cond_t *get_cond(ir_node *irn, set *cond_set)
441 return set_find(cond_set, &templ, sizeof(templ), HASH_PTR(templ.cond));
445 typedef void (cond_walker_t)(cond_t *cond, void *env);
447 static void _walk_conds(cond_t *cond, cond_walker_t *pre, cond_walker_t *post,
448 long visited_nr, void *env)
452 if(cond->visited_nr >= visited_nr)
455 cond->visited_nr = visited_nr;
460 for(i = 0; i < 2; ++i) {
461 cond_t *c = cond->cases[i].masked_by;
464 _walk_conds(c, pre, post, visited_nr, env);
471 static long cond_visited_nr = 0;
473 static void walk_conds(cond_t *cond, cond_walker_t *pre, cond_walker_t *post, void *env)
475 _walk_conds(cond, pre, post, ++cond_visited_nr, env);
478 static void link_conds(cond_t *cond, void *env)
480 cond_t **ptr = (cond_t **) env;
487 * Compare two conds for use in a firm set.
488 * Two cond_t's are equal, if they designate the same cond node.
490 * @param b Another one.
491 * @param size Not used.
492 * @return 0 (!) if they are equal, != 0 otherwise.
494 static int cond_cmp(const void *a, const void *b, size_t size)
498 return x->cond != y->cond;
502 * Information about conds which can be made to muxes.
503 * Instances of this struct are attached to the link field of
504 * blocks in which phis are located.
506 typedef struct _cond_info_t {
507 struct list_head list; /**< Used to list all of these structs per class. */
509 struct list_head roots; /**< A list of non-depending Conds. Two Conds are
510 independent, if it's not possible not reach one from the
511 other (all Conds in this list have to dominate the
512 block this struct is attached to). */
514 ir_node *first_phi; /**< The first phi node this cond info was made for. */
515 set *cond_set; /**< A set of all dominating reachable Conds. */
521 static void _find_conds(ir_node *irn, long visited_nr,
522 ir_node *dominator, cond_t *masked_by, int pos, int depth, cond_info_t *ci)
525 int saw_select_cond = 0;
527 block = get_nodes_block(irn);
530 * Only check this block if it is dominated by the specified
531 * dominator or it has not been visited yet.
533 if (block_dominates(dominator, block) && get_Block_block_visited(block) < visited_nr) {
534 cond_t *res = masked_by;
537 /* check, if we're on a ProjX
539 * Further, the ProjX/Cond block must dominate the base block
540 * (the block with the phi in it), otherwise, the Cond
541 * is not affecting the phi so that a mux can be inserted.
543 if(is_Proj(irn) && get_irn_mode(irn) == mode_X) {
545 int proj = get_Proj_proj(irn);
546 ir_node *cond = get_Proj_pred(irn);
548 /* true, if the mode is a mode_b cond _NO_ switch cond */
549 int is_modeb_cond = get_irn_opcode(cond) == iro_Cond
550 && get_irn_mode(get_Cond_selector(cond)) == mode_b;
552 saw_select_cond = !is_modeb_cond;
554 /* Check, if the pred of the proj is a Cond
555 * with a Projb as selector.
560 memset(&c, 0, sizeof(c));
566 /* get or insert the cond info into the set. */
567 res = set_insert(ci->cond_set, &c, sizeof(c), HASH_PTR(cond));
570 * If this cond is already masked by the masked_by cond
571 * return immediately, since we don't have anything to add.
573 if(masked_by && res->cases[proj].masked_by == masked_by)
578 list_add(&res->list, &ci->roots);
582 * Set masked by (either NULL or another cond node.
583 * If this cond is truly masked by another one, set
584 * the position of the actually investigated branch
585 * to -1. Since the cond is masked by another one,
586 * there could be more ways from the start block
587 * to this branch, so we choose -1.
589 res->cases[proj].masked_by = masked_by;
592 res->cases[proj].pos = pos;
595 * Since the masked_by nodes masks a cond, remove it from the
596 * root list of the conf trees.
599 assert(res->cases[proj].pos < 0);
600 list_del_init(&masked_by->list);
603 DBG((dbg, LEVEL_2, "%D%n (%s branch) "
604 "for pos %d in block %n reached by %n\n",
605 depth, cond, proj ? "true" : "false", pos,
606 block, masked_by ? masked_by->cond : NULL));
610 if(get_Block_block_visited(block) < visited_nr && !saw_select_cond) {
612 set_Block_block_visited(block, visited_nr);
614 /* Search recursively from this cond. */
615 for(i = 0, n = get_irn_arity(block); i < n; ++i) {
616 ir_node *pred = get_irn_n(block, i);
619 * If the depth is 0 (the first recursion), we set the pos to
620 * the current viewed predecessor, else we adopt the position
621 * as given by the caller. We also increase the depth for the
622 * recursively called functions.
624 _find_conds(pred, visited_nr, dominator, res, pos, depth + (res != masked_by), ci);
632 * A convenience function for _find_conds.
633 * It sets some parameters needed for recursion to appropriate start
634 * values. Always use this function.
636 * @param irn The node to start looking for Conds from. This might
637 * be the phi node we are investigating.
638 * @param conds The set to record the found Conds in.
640 static INLINE void find_conds(ir_node *irn, cond_info_t *ci)
644 ir_node *block = get_nodes_block(irn);
645 ir_node *dom = get_Block_idom(block);
647 for(i = 0, n = get_irn_arity(block); i < n; ++i) {
648 ir_node *pred = get_irn_n(block, i);
650 inc_irg_block_visited(current_ir_graph);
651 visited_nr = get_irg_block_visited(current_ir_graph);
652 set_Block_block_visited(block, visited_nr);
654 DBG((dbg, LEVEL_2, "find conds at pred %d (%n) and idom %n\n", i, pred, dom));
655 _find_conds(pred, visited_nr, dom, NULL, i, 0, ci);
660 * Make the mux for a given cond.
661 * @param phi The phi node which shall be replaced by a mux.
662 * @param dom The block where the muxes shall be placed.
663 * @param cond The cond information.
664 * @return The mux node made for this cond.
666 static ir_node *make_mux_on_demand(ir_node *phi, ir_node *dom, cond_t *cond,
667 int max_depth, ir_node **mux, bitset_t *positions, int *muxes_made, long visited_nr)
670 ir_node *projb = get_Cond_selector(cond->cond);
671 ir_node *bl = get_nodes_block(cond->cond);
672 ir_node *operands[2];
675 cond->visited_nr = visited_nr;
676 DBG((dbg, LEVEL_2, "%n\n", cond->cond));
677 for(i = 0; i < 2; ++i) {
678 cond_t *masked_by = cond->cases[i].masked_by;
679 int pos = cond->cases[i].pos;
685 * If this Cond branch is masked by another cond, make the mux
686 * for that Cond first, since the Mux for this cond takes
691 DBG((dbg, LEVEL_2, "\tmasked by: %n\n", masked_by->cond));
692 if(masked_by->visited_nr < visited_nr)
693 operands[i] = make_mux_on_demand(phi, dom, masked_by, max_depth, mux, positions, muxes_made, visited_nr);
697 * If this cond branch is not masked by another cond, take
698 * the corresponding phi operand as an operand to the mux.
701 operands[i] = get_irn_n(phi, pos);
707 * Move the operands to the dominator block if the cond
708 * made sense. Some Conds found are not suitable for making a mux
709 * out of them, since one of their branches cannot be reached from
710 * the phi block. In that case we do not make a mux and return NULL.
712 if(operands[0] && operands[1]) {
713 if (operands[0] == operands[1]) {
714 /* there is no gain in using mux in this case, as
715 it will be optimized away. We will NOT move the
716 content of the blocks either
718 for (i = 0; i < 2; ++i)
720 bitset_set(positions, set[i]);
726 can_move[0] = can_move_to(operands[0], bl, max_depth);
727 can_move[1] = can_move_to(operands[1], bl, max_depth);
729 if (can_move[0] == SUCCESS && can_move[1] == SUCCESS) {
730 move_to(operands[0], bl);
731 move_to(operands[1], bl);
734 *mux = new_r_Mux(current_ir_graph, bl, projb,
735 operands[0], operands[1], get_irn_mode(operands[0]));
739 DBG((dbg, LEVEL_2, "\t%n(%n, %n, %n)[%d, %d]\n",
740 *mux, projb, operands[0], operands[1], set[0], set[1]));
742 for(i = 0; i < 2; ++i)
744 bitset_set(positions, set[i]);
746 /* we have done one */
747 hook_if_conversion(current_ir_graph, phi, set[i], *mux, IF_RESULT_SUCCESS);
751 if(can_move[0] != SUCCESS)
752 hook_if_conversion(current_ir_graph, phi, set[0], NULL, can_move[0]);
753 if(can_move[1] != SUCCESS)
754 hook_if_conversion(current_ir_graph, phi, set[1], NULL, can_move[1]);
758 if(operands[0] != SUCCESS)
759 hook_if_conversion(current_ir_graph, phi, set[0], NULL, IF_RESULT_BAD_CF);
760 if(operands[1] != SUCCESS)
761 hook_if_conversion(current_ir_graph, phi, set[1], NULL, IF_RESULT_BAD_CF);
767 typedef struct _phi_info_t {
768 struct list_head list;
769 cond_info_t *cond_info;
775 * Examine a phi node if it can be replaced by some muxes.
776 * @param irn A phi node.
777 * @param info Parameters for the if conversion algorithm.
779 static int check_out_phi(phi_info_t *phi_info, opt_if_conv_info_t *info)
781 int max_depth = info->max_depth;
782 ir_node *irn = phi_info->irn;
784 cond_info_t *cond_info = phi_info->cond_info;
790 block = get_nodes_block(irn);
791 arity = get_irn_arity(irn);
792 positions = bitset_alloca(arity);
795 assert(get_irn_arity(irn) == get_irn_arity(block));
798 DBG((dbg, LEVEL_2, "phi candidate: %n\n", irn));
800 list_for_each_entry(cond_t, cond, &cond_info->roots, list) {
801 ir_node *cidom = block;
803 cond_t *p, *head = NULL;
806 bitset_clear_all(positions);
808 DBG((dbg, LEVEL_2, "\tcond root: %n\n", cond->cond));
810 * Link all conds which are in the subtree of
811 * the current cond in the list together.
813 walk_conds(cond, link_conds, NULL, &head);
816 for(p = head; p; p = p->link) {
817 for(i = 0; i < 2; ++i) {
818 int pos = p->cases[i].pos;
820 cidom = common_idom(cidom, get_nodes_block(get_irn_n(block, pos)));
824 DBG((dbg, LEVEL_2, "\tcommon idom: %n\n", cidom));
825 make_mux_on_demand(irn, cidom, cond, max_depth, &mux, positions, &muxes_made, ++cond_visited_nr);
828 bitset_foreach(positions, pos)
829 set_irn_n(irn, (int) pos, mux);
834 * optimize the phi away. This can anable further runs of this
835 * function. Look at _can_move. phis cannot be moved there.
837 nw = optimize_in_place_2(irn);
844 typedef struct _cond_walk_info_t {
845 struct obstack *obst;
846 struct list_head cond_info_head;
847 struct list_head phi_head;
851 static void annotate_cond_info_pre(ir_node *irn, void *data)
853 set_irn_link(irn, NULL);
856 static void annotate_cond_info_post(ir_node *irn, void *data)
858 cond_walk_info_t *cwi = data;
861 * Check, if the node is a phi
862 * we then compute a set of conds which are reachable from this
863 * phi's block up to its dominator.
864 * The set is attached to the blocks link field.
866 if(is_Phi(irn) && mode_is_datab(get_irn_mode(irn))) {
867 ir_node *block = get_nodes_block(irn);
869 cond_info_t *ci = get_irn_link(block);
871 /* If the set is not yet computed, do it now. */
873 ci = obstack_alloc(cwi->obst, sizeof(*ci));
874 ci->cond_set = new_set(cond_cmp, log2_ceil(get_irn_arity(block)));
877 INIT_LIST_HEAD(&ci->roots);
878 INIT_LIST_HEAD(&ci->list);
881 * Add this cond info to the list of all cond infos
882 * in this graph. This is just done to free the
883 * set easier afterwards (we save an irg_walk_graph).
885 list_add(&cwi->cond_info_head, &ci->list);
887 DBG((dbg, LEVEL_2, "searching conds at %n\n", irn));
890 * Fill the set with conds we find on the way from
891 * the block to its dominator.
896 * If there where no suitable conds, delete the set
897 * immediately and reset the set pointer to NULL
899 if(set_count(ci->cond_set) == 0) {
900 del_set(ci->cond_set);
902 obstack_free(cwi->obst, ci);
908 DBG((dbg, LEVEL_2, "conds already computed for %n (look at %n)\n", irn, ci->first_phi));
910 set_irn_link(block, ci);
913 phi_info_t *pi = obstack_alloc(cwi->obst, sizeof(*pi));
916 INIT_LIST_HEAD(&pi->list);
917 list_add(&pi->list, &cwi->phi_head);
923 static void dump_conds(cond_t *cond, void *env)
928 ir_fprintf(f, "node:{title:\"n%p\" label:\"%n(%d, %d)\n%n\"}\n",
929 cond, cond->cond, cond->cases[0].pos, cond->cases[1].pos,
930 get_nodes_block(cond->cond));
932 for(i = 0; i < 2; ++i)
933 if(cond->cases[i].masked_by)
934 ir_fprintf(f, "edge:{sourcename:\"n%p\" targetname:\"n%p\" label:\"%d\"}\n",
935 cond, cond->cases[i].masked_by, i);
938 static void vcg_dump_conds(ir_graph *irg, cond_walk_info_t *cwi)
943 snprintf(buf, sizeof(buf), "%s-conds.vcg", get_entity_name(get_irg_entity(irg)));
945 if((f = fopen(buf, "wt")) != NULL) {
950 ir_fprintf(f, "graph:{\ndisplay_edge_labels:yes\n");
951 list_for_each_entry(cond_info_t, ci, &cwi->cond_info_head, list) {
952 ir_fprintf(f, "node:{title:\"n%p\" label:\"cond info\"}\n", ci);
953 list_for_each_entry(cond_t, cond, &ci->roots, list) {
954 walk_conds(cond, NULL, dump_conds, f);
955 ir_fprintf(f, "edge:{sourcename:\"n%p\" targetname:\"n%p\"}\n", ci, cond);
959 list_for_each_entry(phi_info_t, phi, &cwi->phi_head, list) {
960 ir_fprintf(f, "node:{title:\"n%p\" label:\"%n\n%n\"}\n",
961 phi->irn, phi->irn, get_nodes_block(phi->irn));
962 ir_fprintf(f, "edge:{sourcename:\"n%p\" targetname:\"n%p\"}\n", phi->irn, phi->cond_info);
968 void opt_if_conv(ir_graph *irg, opt_if_conv_info_t *params)
972 phi_info_t *phi_info;
973 cond_info_t *cond_info;
974 cond_walk_info_t cwi;
976 opt_if_conv_info_t *p = params ? params : &default_info;
978 if(!get_opt_if_conversion())
984 INIT_LIST_HEAD(&cwi.cond_info_head);
985 INIT_LIST_HEAD(&cwi.phi_head);
987 /* Init the debug stuff. */
988 dbg = firm_dbg_register("firm.opt.ifconv");
990 firm_dbg_set_mask(dbg, LEVEL_1);
993 /* Ensure, that the dominators are computed. */
996 DBG((dbg, LEVEL_1, "if conversion for irg %s(%p)\n",
997 get_entity_name(get_irg_entity(irg)), irg));
1000 * Collect information about the conds pu the phis on an obstack.
1001 * It is important that phi nodes which are 'higher' (with a
1002 * lower dfs pre order) are in front of the obstack. Since they are
1003 * possibly turned in to muxes this can enable the optimization
1006 irg_walk_graph(irg, annotate_cond_info_pre, annotate_cond_info_post, &cwi);
1009 vcg_dump_conds(irg, &cwi);
1012 /* Process each suitable phi found. */
1013 list_for_each_entry(phi_info_t, phi_info, &cwi.phi_head, list) {
1014 DBG((dbg, LEVEL_2, "phi node %n\n", phi_info->irn));
1015 muxes_made += check_out_phi(phi_info, p);
1018 list_for_each_entry(cond_info_t, cond_info, &cwi.cond_info_head, list) {
1019 del_set(cond_info->cond_set);
1022 DBG((dbg, LEVEL_1, "muxes made: %d\n", muxes_made));
1024 obstack_free(&obst, NULL);