3 * Make Mux nodes from Conds where it its possible.
4 * @author Sebastian Hack
13 #include "irgraph_t.h"
29 #include "bitfiddle.h"
34 * Mux optimization routines.
38 static ir_node *local_optimize_mux(ir_node *mux)
42 ir_node *sel = get_Mux_sel(mux);
43 ir_node *cmp = skip_Proj(sel);
45 /* Optimize the children */
46 for(i = 1, n = get_irn_arity(mux); i < n; ++i) {
47 ir_node *operand = get_irn_n(mux, i);
48 if(get_irn_op(operand) == op_Mux)
49 optimize_mux(operand);
52 /* If we have no cmp above the mux, get out. */
53 if(is_Proj(sel) && get_irn_mode(sel) == mode_b && get_irn_opcode(cmp) == iro_Cmp) {
55 pn_Cmp cc = get_Proj_proj(sel);
56 ir_mode *mode = get_irn_mode(mux);
57 ir_node *block = get_nodes_block(n);
58 ir_node *cmp_left = get_Cmp_left(cmp);
59 ir_node *cmp_right = get_Cmp_right(cmp);
60 ir_node *mux_true = get_Mux_true(mux);
61 ir_node *mux_false = get_Mux_false(mux);
64 * Check for comparisons with signed integers.
66 if(mode_is_int(mode) /* We need an integral mode */
67 && mode_is_signed(mode) /* which is signed */
68 && cc == Lt) { /* and have to compare for < */
71 * Mux(x:T < 0, -1, 0) -> Shrs(x, sizeof_bits(T) - 1)
75 if(classify_Const(cmp_right) == CNST_NULL
76 && classify_Const(mux_true) == CNST_ALL_ONE
77 && classify_Const(mux_false) == CNST_NULL) {
79 ir_mode *u_mode = find_unsigned_mode(mode);
81 res = new_r_Shrs(current_ir_graph, block, cmp_left,
82 new_r_Const_long(current_ir_graph, block, u_mode,
83 get_mode_size_bits(mode) - 1),
88 * Mux(0 < x:T, 1, 0) -> Shr(-x, sizeof_bits(T) - 1)
92 else if(classify_Const(cmp_left) == CNST_NULL
93 && classify_Const(mux_true) == CNST_ONE
94 && classify_Const(mux_false) == CNST_NULL) {
96 ir_mode *u_mode = find_unsigned_mode(mode);
98 res = new_r_Shr(current_ir_graph, block,
100 /* -x goes to 0 - x in Firm (cmp_left is 0, see the if) */
101 new_r_Sub(current_ir_graph, block, cmp_left, cmp_right, mode),
103 /* This is sizeof_bits(T) - 1 */
104 new_r_Const_long(current_ir_graph, block, u_mode,
105 get_mode_size_bits(mode) - 1),
116 * check, if a node is const and return its tarval or
117 * return a default tarval.
118 * @param cnst The node whose tarval to get.
119 * @param or The alternative tarval, if the node is no Const.
120 * @return The tarval of @p cnst, if the node is Const, @p otherwise.
122 static tarval *get_value_or(ir_node *cnst, tarval *or)
124 return get_irn_op(cnst) == op_Const ? get_Const_tarval(cnst) : or;
129 * Try to optimize nested muxes into a dis- or conjunction
131 * @param mux The parent mux, which has muxes as operands.
132 * @return The replacement node for this mux. If the optimization is
133 * successful, this might be an And or Or node, if not, its the mux
136 static ir_node *optimize_mux_chain(ir_node *mux)
141 ir_mode *mode = get_irn_mode(mux);
146 * If we have no mux, or its mode is not integer, we
149 if(get_irn_op(mux) != op_Mux || !mode_is_int(mode))
153 null = get_tarval_null(mode);
154 minus_one = tarval_sub(null, get_tarval_one(mode));
156 ops[0] = get_Mux_false(mux);
157 ops[1] = get_Mux_true(mux);
159 for(i = 0; i < 2; ++i) {
161 tarval *tva, *tvb, *tvd;
165 * A mux operand at the first position can be factored
166 * out, if the operands fulfill several conditions:
168 * mux(c1, mux(c2, a, b), d)
170 * This can be made into:
171 * 1) mux(c1, 0, d) | mux(c2, a, b)
172 * if a | d == d and b | d == d
174 * 2) mux(c1, -1, d) & mux(c2, a, b)
175 * if a & d == d and a & b == b
177 if(get_irn_op(ops[i]) == op_Mux) {
180 a = get_Mux_false(child_mux);
181 b = get_Mux_true(child_mux);
184 /* Try the or stuff */
185 tva = get_value_or(a, minus_one);
186 tvb = get_value_or(b, minus_one);
187 tvd = get_value_or(d, null);
189 if(tarval_cmp(tarval_or(tva, tvd), tvd) == pn_Cmp_Eq
190 && tarval_cmp(tarval_or(tvb, tvd), tvd) == pn_Cmp_Eq) {
192 ops[i] = new_Const(mode, null);
193 res = new_r_Or(current_ir_graph, get_nodes_block(mux),
194 mux, child_mux, mode);
198 /* If the or didn't go, try the and stuff */
199 tva = get_value_or(a, null);
200 tvb = get_value_or(b, null);
201 tvd = get_value_or(d, minus_one);
203 if(tarval_cmp(tarval_and(tva, tvd), tvd) == pn_Cmp_Eq
204 && tarval_cmp(tarval_and(tvb, tvd), tvd) == pn_Cmp_Eq) {
206 ops[i] = new_Const(mode, minus_one);
207 res = new_r_And(current_ir_graph, get_nodes_block(mux),
208 mux, child_mux, mode);
214 /* recursively optimize nested muxes. */
215 set_irn_n(mux, 1, optimize_mux_chain(ops[0]));
216 set_irn_n(mux, 2, optimize_mux_chain(ops[1]));
222 /***********************************************************
223 * The If conversion itself.
224 ***********************************************************/
229 static opt_if_conv_info_t default_info = {
233 /** The debugging module. */
234 static firm_dbg_module_t *dbg;
237 * A simple check for sde effects upton an opcode of a ir node.
238 * @param irn The ir node to check,
239 * @return 1 if the opcode itself may produce side effects, 0 if not.
241 static INLINE int has_side_effects(const ir_node *irn)
243 opcode opc = get_irn_opcode(irn);
248 return !mode_is_datab(get_irn_mode(irn));
252 * Decdies, if a given expression and its subexpressions
253 * (to certain, also given extent) can be moved to a block.
254 * @param expr The expression to examine.
255 * @param block The block where the expression should go.
256 * @param depth The current depth, passed recursively. Use 0 for
257 * non-recursive calls.
258 * @param max_depth The maximum depth to which the expression should be
261 static int _can_move_to(ir_node *expr, ir_node *dest_block, int depth, int max_depth)
265 ir_node *expr_block = get_nodes_block(expr);
269 * If we are forced to look too deep into the expression,
270 * treat it like it could not be moved.
272 if(depth >= max_depth) {
278 * If the block of the expression dominates the specified
279 * destination block, it does not matter if the expression
280 * has side effects or anything else. It is executed on each
281 * path the destination block is reached.
283 if(block_dominates(expr_block, dest_block))
287 * We cannot move phis!
295 * This should be superflous and could be converted into a assertion.
296 * The destination block _must_ dominate the block of the expression,
297 * else the expression could be used without its definition.
299 if(!block_dominates(dest_block, expr_block)) {
305 * Surely, if the expression does not have a data mode, it is not
306 * movable. Perhaps onw should also test the floating property of
309 if(has_side_effects(expr)) {
315 * If the node looks alright so far, look at its operands and
316 * check them out. If one of them cannot be moved, this one
317 * cannot be moved either.
319 for(i = 0, n = get_irn_arity(expr); i < n; ++i) {
320 ir_node *op = get_irn_n(expr, i);
321 int new_depth = is_Proj(op) ? depth : depth + 1;
322 if(!_can_move_to(op, dest_block, new_depth, max_depth)) {
329 DBG((dbg, LEVEL_3, "\t\t\t%Dcan move to %n: %d\n", depth, expr, res));
335 * Convenience function for _can_move_to.
336 * Checks, if an expression can be moved to another block. The check can
337 * be limited to a expression depth meaning if we need to crawl in
338 * deeper into an expression than a given threshold to examine if
339 * it can be moved, the expression is rejected and the test returns
341 * @param expr The expression to check for.
342 * @param dest_block The destination block you want @p expr to be.
343 * @param max_depth The maximum depth @p expr should be investigated.
344 * @return 1, if the expression can be moved to the destination block,
347 static INLINE int can_move_to(ir_node *expr, ir_node *dest_block, int max_depth)
349 return _can_move_to(expr, dest_block, 0, max_depth);
352 static void move_to(ir_node *expr, ir_node *dest_block)
355 ir_node *expr_block = get_nodes_block(expr);
358 * If we reached the dominator, we are done.
359 * We will never put code through the dominator
361 if(block_dominates(expr_block, dest_block))
364 for(i = 0, n = get_irn_arity(expr); i < n; ++i)
365 move_to(get_irn_n(expr, i), dest_block);
367 set_nodes_block(expr, dest_block);
370 static INLINE ir_node *common_idom(ir_node *b1, ir_node *b2)
372 if(block_dominates(b1, b2))
374 else if(block_dominates(b2, b1))
379 for(p = b1; !block_dominates(p, b2); p = get_Block_idom(p));
386 * Information about a cond node.
388 typedef struct _cond_t {
389 ir_node *cond; /**< The cond node. */
390 struct list_head list; /**< List head which is used for queueing this cond
391 into the cond bunch it belongs to. */
393 unsigned totally_covers : 1;
394 struct _cond_t *link;
398 * Information about the both 'branches'
399 * (true and false), the cond creates.
402 int pos; /**< Number of the predecessor of the
403 phi block by which this branch is
404 reached. It is -1, if this branch is
405 only reached through another cond. */
407 struct _cond_t *masked_by; /**< If this cond's branch is only reached
408 through another cond, we store this
409 cond ir_node here. */
413 static INLINE cond_t *get_cond(ir_node *irn, set *cond_set)
418 return set_find(cond_set, &templ, sizeof(templ), HASH_PTR(templ.cond));
422 typedef void (cond_walker_t)(cond_t *cond, void *env);
424 static void _walk_conds(cond_t *cond, cond_walker_t *pre, cond_walker_t *post,
425 long visited_nr, void *env)
429 if(cond->visited_nr >= visited_nr)
432 cond->visited_nr = visited_nr;
437 for(i = 0; i < 2; ++i) {
438 cond_t *c = cond->cases[i].masked_by;
441 _walk_conds(c, pre, post, visited_nr, env);
448 static long cond_visited_nr = 0;
450 static void walk_conds(cond_t *cond, cond_walker_t *pre, cond_walker_t *post, void *env)
452 _walk_conds(cond, pre, post, ++cond_visited_nr, env);
455 static void link_conds(cond_t *cond, void *env)
457 cond_t **ptr = (cond_t **) env;
464 * Compare two conds for use in a firm set.
465 * Two cond_t's are equal, if they designate the same cond node.
467 * @param b Another one.
468 * @param size Not used.
469 * @return 0 (!) if they are equal, != 0 otherwise.
471 static int cond_cmp(const void *a, const void *b, size_t size)
475 return x->cond != y->cond;
479 * Information about conds which can be made to muxes.
480 * Instances of this struct are attached to the link field of
481 * blocks in which phis are located.
483 typedef struct _cond_info_t {
484 struct list_head list; /**< Used to list all of these structs per method. */
486 struct list_head roots; /**< A list of non-depending conds. Two conds are
487 independent, if yot can not reach the one from the
488 other (all conds in this list have to dominate the
489 block this struct is attached to. */
491 ir_node *first_phi; /**< The first phi node this cond info was made for. */
492 set *cond_set; /**< A set of all dominating reachable conds. */
498 static void _find_conds(ir_node *irn, long visited_nr,
499 ir_node *dominator, cond_t *masked_by, int pos, int depth, cond_info_t *ci)
502 int saw_switch_cond = 0;
504 block = get_nodes_block(irn);
507 * Only check this block if it is dominated by the specified
508 * dominator or it has not been visited yet.
510 if(block_dominates(dominator, block) && get_Block_block_visited(block) < visited_nr) {
511 cond_t *res = masked_by;
514 /* check, if we're on a ProjX
516 * Further, the ProjX/Cond block must dominate the base block
517 * (the block with the phi in it), otherwise, the Cond
518 * is not affecting the phi so that a mux can be inserted.
520 if(is_Proj(irn) && get_irn_mode(irn) == mode_X) {
522 int proj = get_Proj_proj(irn);
523 ir_node *cond = get_Proj_pred(irn);
525 /* true, if the mode is a mode_b cond _NO_ switch cond */
526 int is_modeb_cond = get_irn_opcode(cond) == iro_Cond
527 && get_irn_mode(get_Cond_selector(cond)) == mode_b;
529 saw_switch_cond = !is_modeb_cond;
531 /* Check, if the pred of the proj is a Cond
532 * with a Projb as selector.
537 memset(&c, 0, sizeof(c));
543 /* get or insert the cond info into the set. */
544 res = set_insert(ci->cond_set, &c, sizeof(c), HASH_PTR(cond));
547 * If this cond is already masked by the masked_by cond
548 * return immediately, since we don't have anything to add.
550 if(masked_by && res->cases[proj].masked_by == masked_by)
555 list_add(&res->list, &ci->roots);
559 * Set masked by (either NULL or another cond node.
560 * If this cond is truly masked by another one, set
561 * the position of the actually investigated branch
562 * to -1. Since the cond is masked by another one,
563 * there could be more ways from the start block
564 * to this branch, so we choose -1.
566 res->cases[proj].masked_by = masked_by;
569 res->cases[proj].pos = pos;
572 * Since the masked_by nodes masks a cond, remove it from the
573 * root list of the conf trees.
576 DBG((dbg, LEVEL_2, "%Ddeleting from list: %n, res: %n\n", depth, masked_by->cond, res->cond));
577 assert(res->cases[proj].pos < 0);
578 list_del_init(&masked_by->list);
581 DBG((dbg, LEVEL_2, "%D%n (%s branch) "
582 "for pos %d in block %n reached by %n\n",
583 depth, cond, proj ? "true" : "false", pos,
584 block, masked_by ? masked_by->cond : NULL));
588 if(get_Block_block_visited(block) < visited_nr && !saw_switch_cond) {
590 set_Block_block_visited(block, visited_nr);
592 /* Search recursively from this cond. */
593 for(i = 0, n = get_irn_arity(block); i < n; ++i) {
594 ir_node *pred = get_irn_n(block, i);
597 * If the depth is 0 (the first recursion), we set the pos to
598 * the current viewed predecessor, else we adopt the position
599 * as given by the caller. We also increase the depth for the
600 * recursively called functions.
602 _find_conds(pred, visited_nr, dominator, res, pos, depth + (res != masked_by), ci);
610 * A convenience function for _find_conds.
611 * It sets some parameters needed for recursion to appropriate start
612 * values. Always use this function.
613 * @param irn The node to start looking for conds from. This might
614 * be the phi node we are investigating.
615 * @param conds The set to record the found conds in.
617 static INLINE void find_conds(ir_node *irn, cond_info_t *ci)
621 ir_node *block = get_nodes_block(irn);
622 ir_node *dom = get_Block_idom(block);
625 for(i = 0, n = get_irn_arity(block); i < n; ++i) {
626 ir_node *pred = get_irn_n(block, i);
628 inc_irg_block_visited(current_ir_graph);
629 visited_nr = get_irg_block_visited(current_ir_graph);
630 set_Block_block_visited(block, visited_nr);
632 DBG((dbg, LEVEL_2, "find conds at pred %d (%n) and idom %n\n", i, pred, dom));
633 _find_conds(pred, visited_nr, dom, NULL, i, 0, ci);
638 * Make the mux for a given cond.
639 * @param phi The phi node which shall be replaced by a mux.
640 * @param dom The block where the muxes shall be placed.
641 * @param cond The cond information.
642 * @return The mux node made for this cond.
644 static ir_node *make_mux_on_demand(ir_node *phi, ir_node *dom, cond_t *cond,
645 int max_depth, ir_node **mux, bitset_t *positions, int *muxes_made, long visited_nr)
648 ir_node *projb = get_Cond_selector(cond->cond);
649 ir_node *bl = get_nodes_block(cond->cond);
650 ir_node *operands[2];
653 cond->visited_nr = visited_nr;
654 DBG((dbg, LEVEL_2, "%n\n", cond->cond));
655 for(i = 0; i < 2; ++i) {
656 cond_t *masked_by = cond->cases[i].masked_by;
657 int pos = cond->cases[i].pos;
663 * If this cond branch is masked by another cond, make the mux
664 * for that cond first, since the mux for this cond takes
669 DBG((dbg, LEVEL_2, "\tmasked by: %n\n", masked_by->cond));
670 if(masked_by->visited_nr < visited_nr)
671 operands[i] = make_mux_on_demand(phi, dom, masked_by, max_depth, mux, positions, muxes_made, visited_nr);
675 * If this cond branch is not masked by another cond, take
676 * the corresponding phi operand as an operand to the mux.
679 operands[i] = get_irn_n(phi, pos);
685 * Move the operands to the dominator block if the cond
686 * made sense. Some conds found are not suitable for making a mux
687 * out of them, since one of their branches cannot be reached from
688 * the phi block. In that case we do not make a mux and return NULL.
690 if(operands[0] && operands[1]
691 && can_move_to(operands[0], bl, max_depth)
692 && can_move_to(operands[1], bl, max_depth)) {
694 move_to(operands[0], bl);
695 move_to(operands[1], bl);
698 *mux = new_r_Mux(current_ir_graph, bl, projb,
699 operands[0], operands[1], get_irn_mode(operands[0]));
703 DBG((dbg, LEVEL_2, "\t%n(%n, %n, %n)[%d, %d]\n",
704 *mux, projb, operands[0], operands[1], set[0], set[1]));
706 for(i = 0; i < 2; ++i)
708 bitset_set(positions, set[i]);
714 typedef struct _phi_info_t {
715 struct list_head list;
716 cond_info_t *cond_info;
722 * Examine a phi node if it can be replaced by some muxes.
723 * @param irn A phi node.
724 * @param info Parameters for the if conversion algorithm.
726 static int check_out_phi(phi_info_t *phi_info, opt_if_conv_info_t *info)
728 int max_depth = info->max_depth;
729 ir_node *irn = phi_info->irn;
731 cond_info_t *cond_info = phi_info->cond_info;
737 block = get_nodes_block(irn);
738 arity = get_irn_arity(irn);
739 positions = bitset_alloca(arity);
742 assert(get_irn_arity(irn) == get_irn_arity(block));
745 DBG((dbg, LEVEL_2, "phi candidate: %n\n", irn));
747 list_for_each_entry(cond_t, cond, &cond_info->roots, list) {
748 ir_node *cidom = block;
750 cond_t *p, *head = NULL;
753 bitset_clear_all(positions);
755 DBG((dbg, LEVEL_2, "\tcond root: %n\n", cond->cond));
757 * Link all conds which are in the subtree of
758 * the current cond in the list together.
760 walk_conds(cond, link_conds, NULL, &head);
763 for(p = head; p; p = p->link) {
764 for(i = 0; i < 2; ++i) {
765 int pos = p->cases[i].pos;
767 cidom = common_idom(cidom, get_nodes_block(get_irn_n(block, pos)));
771 DBG((dbg, LEVEL_2, "\tcommon idom: %n\n", cidom));
772 make_mux_on_demand(irn, cidom, cond, max_depth, &mux, positions, &muxes_made, ++cond_visited_nr);
775 bitset_foreach(positions, pos)
776 set_irn_n(irn, (int) pos, mux);
781 * optimize the phi away. This can anable further runs of this
782 * function. Look at _can_move. phis cannot be moved there.
784 nw = optimize_in_place_2(irn);
791 typedef struct _cond_walk_info_t {
792 struct obstack *obst;
793 struct list_head cond_info_head;
794 struct list_head phi_head;
798 static void annotate_cond_info_pre(ir_node *irn, void *data)
800 set_irn_link(irn, NULL);
803 static void annotate_cond_info_post(ir_node *irn, void *data)
805 cond_walk_info_t *cwi = data;
808 * Check, if the node is a phi
809 * we then compute a set of conds which are reachable from this
810 * phi's block up to its dominator.
811 * The set is attached to the blocks link field.
813 if(is_Phi(irn) && mode_is_datab(get_irn_mode(irn))) {
814 ir_node *block = get_nodes_block(irn);
816 cond_info_t *ci = get_irn_link(block);
818 /* If the set is not yet computed, do it now. */
820 ci = obstack_alloc(cwi->obst, sizeof(*ci));
821 ci->cond_set = new_set(cond_cmp, log2_ceil(get_irn_arity(block)));
824 INIT_LIST_HEAD(&ci->roots);
825 INIT_LIST_HEAD(&ci->list);
828 * Add this cond info to the list of all cond infos
829 * in this graph. This is just done to free the
830 * set easier afterwards (we save an irg_walk_graph).
832 list_add(&cwi->cond_info_head, &ci->list);
834 DBG((dbg, LEVEL_2, "searching conds at %n(%n)\n", irn, block));
837 * Fill the set with conds we find on the way from
838 * the block to its dominator.
843 * If there where no suitable conds, delete the set
844 * immediately and reset the set pointer to NULL
846 if(set_count(ci->cond_set) == 0) {
847 del_set(ci->cond_set);
849 obstack_free(cwi->obst, ci);
855 DBG((dbg, LEVEL_2, "conds already computed for %n (look at %n)\n", irn, ci->first_phi));
857 set_irn_link(block, ci);
860 phi_info_t *pi = obstack_alloc(cwi->obst, sizeof(*pi));
863 INIT_LIST_HEAD(&pi->list);
864 list_add(&pi->list, &cwi->phi_head);
870 static void dump_conds(cond_t *cond, void *env)
875 ir_fprintf(f, "node:{title:\"n%p\" label:\"%n(%d, %d)\n%n\"}\n",
876 cond, cond->cond, cond->cases[0].pos, cond->cases[1].pos,
877 get_nodes_block(cond->cond));
879 for(i = 0; i < 2; ++i)
880 if(cond->cases[i].masked_by)
881 ir_fprintf(f, "edge:{sourcename:\"n%p\" targetname:\"n%p\" label:\"%d\"}\n",
882 cond, cond->cases[i].masked_by, i);
885 static void vcg_dump_conds(ir_graph *irg, cond_walk_info_t *cwi)
890 snprintf(buf, sizeof(buf), "%s-conds.vcg", get_entity_name(get_irg_entity(irg)));
892 if((f = fopen(buf, "wt")) != NULL) {
897 ir_fprintf(f, "graph:{\ndisplay_edge_labels:yes\n");
898 list_for_each_entry(cond_info_t, ci, &cwi->cond_info_head, list) {
899 ir_fprintf(f, "node:{title:\"n%p\" label:\"cond info\"}\n", ci);
900 list_for_each_entry(cond_t, cond, &ci->roots, list) {
901 walk_conds(cond, NULL, dump_conds, f);
902 ir_fprintf(f, "edge:{sourcename:\"n%p\" targetname:\"n%p\"}\n", ci, cond);
906 list_for_each_entry(phi_info_t, phi, &cwi->phi_head, list) {
907 ir_fprintf(f, "node:{title:\"n%p\" label:\"%n\n%n\"}\n",
908 phi->irn, phi->irn, get_nodes_block(phi->irn));
909 ir_fprintf(f, "edge:{sourcename:\"n%p\" targetname:\"n%p\"}\n", phi->irn, phi->cond_info);
915 void opt_if_conv(ir_graph *irg, opt_if_conv_info_t *params)
919 phi_info_t *phi_info;
920 cond_info_t *cond_info;
921 cond_walk_info_t cwi;
923 opt_if_conv_info_t *p = params ? params : &default_info;
925 if(!get_opt_if_conversion())
931 INIT_LIST_HEAD(&cwi.cond_info_head);
932 INIT_LIST_HEAD(&cwi.phi_head);
934 /* Init the debug stuff. */
935 dbg = firm_dbg_register("firm.opt.ifconv");
937 firm_dbg_set_mask(dbg, LEVEL_1 | LEVEL_2 | LEVEL_3);
940 /* Ensure, that the dominators are computed. */
943 DBG((dbg, LEVEL_1, "if conversion for irg %s(%p)\n",
944 get_entity_name(get_irg_entity(irg)), irg));
947 * Collect information about the conds pu the phis on an obstack.
948 * It is important that phi nodes which are 'higher' (with a
949 * lower dfs pre order) are in front of the obstack. Since they are
950 * possibly turned in to muxes this can enable the optimization
953 irg_walk_graph(irg, annotate_cond_info_pre, annotate_cond_info_post, &cwi);
956 vcg_dump_conds(irg, &cwi);
959 /* Process each suitable phi found. */
960 list_for_each_entry(phi_info_t, phi_info, &cwi.phi_head, list) {
961 DBG((dbg, LEVEL_2, "phi node %n\n", phi_info->irn));
962 muxes_made += check_out_phi(phi_info, p);
965 list_for_each_entry(cond_info_t, cond_info, &cwi.cond_info_head, list) {
966 del_set(cond_info->cond_set);
969 DBG((dbg, LEVEL_1, "muxes made: %d\n", muxes_made));
971 obstack_free(&obst, NULL);