3 * File name: ir/opt/ifconv.c
4 * Purpose: If conversion
5 * Author: Sebastian Hack.
8 * Copyright: (c) 1998-2005 Universität Karlsruhe
9 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
15 * Make Mux nodes from Conds where it its possible.
16 * @author Sebastian Hack
36 #include "irgraph_t.h"
52 #include "bitfiddle.h"
59 * check, if a node is const and return its tarval or
60 * return a default tarval.
61 * @param cnst The node whose tarval to get.
62 * @param or The alternative tarval, if the node is no Const.
63 * @return The tarval of @p cnst, if the node is Const, @p otherwise.
65 static tarval *get_value_or(ir_node *cnst, tarval *or)
67 return get_irn_op(cnst) == op_Const ? get_Const_tarval(cnst) : or;
72 * Try to optimize nested muxes into a dis- or conjunction
74 * @param mux The parent mux, which has muxes as operands.
75 * @return The replacement node for this mux. If the optimization is
76 * successful, this might be an And or Or node, if not, its the mux
79 static ir_node *optimize_mux_chain(ir_node *mux)
84 ir_mode *mode = get_irn_mode(mux);
89 * If we have no mux, or its mode is not integer, we
92 if(get_irn_op(mux) != op_Mux || !mode_is_int(mode))
96 null = get_tarval_null(mode);
97 minus_one = tarval_sub(null, get_tarval_one(mode));
99 ops[0] = get_Mux_false(mux);
100 ops[1] = get_Mux_true(mux);
102 for(i = 0; i < 2; ++i) {
104 tarval *tva, *tvb, *tvd;
108 * A mux operand at the first position can be factored
109 * out, if the operands fulfill several conditions:
111 * mux(c1, mux(c2, a, b), d)
113 * This can be made into:
114 * 1) mux(c1, 0, d) | mux(c2, a, b)
115 * if a | d == d and b | d == d
117 * 2) mux(c1, -1, d) & mux(c2, a, b)
118 * if a & d == d and a & b == b
120 if(get_irn_op(ops[i]) == op_Mux) {
123 a = get_Mux_false(child_mux);
124 b = get_Mux_true(child_mux);
127 /* Try the or stuff */
128 tva = get_value_or(a, minus_one);
129 tvb = get_value_or(b, minus_one);
130 tvd = get_value_or(d, null);
132 if(tarval_cmp(tarval_or(tva, tvd), tvd) == pn_Cmp_Eq
133 && tarval_cmp(tarval_or(tvb, tvd), tvd) == pn_Cmp_Eq) {
135 ops[i] = new_Const(mode, null);
136 res = new_r_Or(current_ir_graph, get_nodes_block(mux),
137 mux, child_mux, mode);
141 /* If the or didn't go, try the and stuff */
142 tva = get_value_or(a, null);
143 tvb = get_value_or(b, null);
144 tvd = get_value_or(d, minus_one);
146 if(tarval_cmp(tarval_and(tva, tvd), tvd) == pn_Cmp_Eq
147 && tarval_cmp(tarval_and(tvb, tvd), tvd) == pn_Cmp_Eq) {
149 ops[i] = new_Const(mode, minus_one);
150 res = new_r_And(current_ir_graph, get_nodes_block(mux),
151 mux, child_mux, mode);
157 /* recursively optimize nested muxes. */
158 set_irn_n(mux, 1, optimize_mux_chain(ops[0]));
159 set_irn_n(mux, 2, optimize_mux_chain(ops[1]));
165 /***********************************************************
166 * The If conversion itself.
167 ***********************************************************/
169 /** allow every Mux to be created. */
170 static int default_allow_mux(ir_node *sel, ir_node *false_res, ir_node *true_res) {
177 static const opt_if_conv_info_t default_info = {
182 /** The debugging module. */
183 static firm_dbg_module_t *dbg;
186 * A simple check for side effects upto an opcode of a ir node.
187 * @param irn The ir node to check,
188 * @return 1 if the opcode itself may produce side effects, 0 if not.
190 static INLINE int has_side_effects(const ir_node *irn)
192 ir_op *op = get_irn_op(irn);
197 return !mode_is_datab(get_irn_mode(irn));
201 * Possible failure reasons
203 enum failure_reason_t {
204 SUCCESS = IF_RESULT_SUCCESS,
205 TO_DEEP = IF_RESULT_TOO_DEEP,
206 SIDE_EFFECTS = IF_RESULT_SIDE_EFFECT,
207 PHI_FOUND = IF_RESULT_SIDE_EFFECT_PHI,
208 DENIED = IF_RESULT_DENIED
212 * Decides, if a given expression and its subexpressions
213 * (to certain, also given extent) can be moved to a block.
215 * @param expr The expression to examine.
216 * @param block The block where the expression should go.
217 * @param depth The current depth, passed recursively. Use 0 for
218 * non-recursive calls.
219 * @param info The options for createing Mux nodes.
222 * @return a failure reason
224 static int _can_move_to(ir_node *expr, ir_node *dest_block, int depth, const opt_if_conv_info_t *info)
228 ir_node *expr_block = get_nodes_block(expr);
231 * If we are forced to look too deep into the expression,
232 * treat it like it could not be moved.
234 if(depth >= info->max_depth) {
240 * If the block of the expression dominates the specified
241 * destination block, it does not matter if the expression
242 * has side effects or anything else. It is executed on each
243 * path the destination block is reached.
245 if (block_dominates(expr_block, dest_block))
249 * We cannot move phis!
257 * This should be superfluous and could be converted into a assertion.
258 * The destination block _must_ dominate the block of the expression,
259 * else the expression could be used without its definition.
261 if (! block_dominates(dest_block, expr_block)) {
262 res = IF_RESULT_SIDE_EFFECT;
267 * Surely, if the expression does not have a data mode, it is not
268 * movable. Perhaps one should also test the floating property of
271 if (has_side_effects(expr)) {
272 res = IF_RESULT_SIDE_EFFECT;
277 * If the node looks alright so far, look at its operands and
278 * check them out. If one of them cannot be moved, this one
279 * cannot be moved either.
281 for (i = 0, n = get_irn_arity(expr); i < n; ++i) {
282 ir_node *op = get_irn_n(expr, i);
283 int new_depth = is_Proj(op) ? depth : depth + 1;
285 res = _can_move_to(op, dest_block, new_depth, info);
292 DBG((dbg, LEVEL_3, "\t\t\t%Dcan move to %n: %d\n", depth, expr, res));
298 * Convenience function for _can_move_to.
299 * Checks, if an expression can be moved to another block. The check can
300 * be limited to a expression depth meaning if we need to crawl in
301 * deeper into an expression than a given threshold to examine if
302 * it can be moved, the expression is rejected and the test returns
305 * @param expr The expression to check for.
306 * @param dest_block The destination block you want @p expr to be.
307 * @param info The options for createing Mux nodes.
309 * @return return a failure reason
311 static INLINE int can_move_to(ir_node *expr, ir_node *dest_block, const opt_if_conv_info_t *info)
313 return _can_move_to(expr, dest_block, 0, info);
317 * move a DAG given by a root node expr into a new block
319 * @param expr the root of a dag
320 * @param dest_block the destination block
322 static void move_to(ir_node *expr, ir_node *dest_block)
325 ir_node *expr_block = get_nodes_block(expr);
328 * If we reached the dominator, we are done.
329 * We will never put code through the dominator
331 if (block_dominates(expr_block, dest_block))
334 for (i = 0, n = get_irn_arity(expr); i < n; ++i)
335 move_to(get_irn_n(expr, i), dest_block);
337 set_nodes_block(expr, dest_block);
341 * return the common dominator of two blocks
343 static INLINE ir_node *common_idom(ir_node *b1, ir_node *b2)
345 if(block_dominates(b1, b2))
347 else if(block_dominates(b2, b1))
352 for (p = get_Block_idom(b1); !block_dominates(p, b2); p = get_Block_idom(p));
358 * Information about a cond node.
360 typedef struct _cond_t {
361 ir_node *cond; /**< The cond node. */
362 struct list_head list; /**< List head which is used for queuing this cond
363 into the cond bunch it belongs to. */
365 unsigned totally_covers : 1;
366 struct _cond_t *link;
370 * Information about the both 'branches'
371 * (true and false), the cond creates.
374 int pos; /**< Number of the predecessor of the
375 phi block by which this branch is
376 reached. It is -1, if this branch is
377 only reached through another cond. */
379 struct _cond_t *masked_by; /**< If this cond's branch is only reached
380 through another cond, we store this
381 cond ir_node here. */
386 * retrieve the conditional information from a Cond node
388 static INLINE cond_t *get_cond(ir_node *irn, set *cond_set)
393 return set_find(cond_set, &templ, sizeof(templ), HASH_PTR(templ.cond));
397 typedef void (cond_walker_t)(cond_t *cond, void *env);
399 static void _walk_conds(cond_t *cond, cond_walker_t *pre, cond_walker_t *post,
400 long visited_nr, void *env)
404 if(cond->visited_nr >= visited_nr)
407 cond->visited_nr = visited_nr;
412 for(i = 0; i < 2; ++i) {
413 cond_t *c = cond->cases[i].masked_by;
416 _walk_conds(c, pre, post, visited_nr, env);
423 static long cond_visited_nr = 0;
425 static void walk_conds(cond_t *cond, cond_walker_t *pre, cond_walker_t *post, void *env)
427 _walk_conds(cond, pre, post, ++cond_visited_nr, env);
430 static void link_conds(cond_t *cond, void *env)
432 cond_t **ptr = (cond_t **) env;
439 * Compare two conds for use in a firm set.
440 * Two cond_t's are equal, if they designate the same cond node.
442 * @param b Another one.
443 * @param size Not used.
444 * @return 0 (!) if they are equal, != 0 otherwise.
446 static int cond_cmp(const void *a, const void *b, size_t size)
450 return x->cond != y->cond;
454 * Information about conds which can be made to muxes.
455 * Instances of this struct are attached to the link field of
456 * blocks in which phis are located.
458 typedef struct _cond_info_t {
459 struct list_head list; /**< Used to list all of these structs per class. */
461 struct list_head roots; /**< A list of non-depending Conds. Two Conds are
462 independent, if it's not possible not reach one from the
463 other (all Conds in this list have to dominate the
464 block this struct is attached to). */
466 ir_node *first_phi; /**< The first phi node this cond info was made for. */
467 set *cond_set; /**< A set of all dominating reachable Conds. */
473 static void _find_conds(ir_node *irn, unsigned long visited_nr,
474 ir_node *dominator, cond_t *masked_by, int pos, int depth, cond_info_t *ci)
477 int saw_select_cond = 0;
479 block = get_nodes_block(irn);
482 * Only check this block if it is dominated by the specified
483 * dominator or it has not been visited yet.
485 if (block_dominates(dominator, block) && get_Block_block_visited(block) < visited_nr) {
486 cond_t *res = masked_by;
489 /* check, if we're on a ProjX
491 * Further, the ProjX/Cond block must dominate the base block
492 * (the block with the phi in it), otherwise, the Cond
493 * is not affecting the phi so that a mux can be inserted.
495 if(is_Proj(irn) && get_irn_mode(irn) == mode_X) {
497 int proj = get_Proj_proj(irn);
498 ir_node *cond = get_Proj_pred(irn);
500 /* true, if the mode is a mode_b cond _NO_ switch cond */
501 int is_modeb_cond = get_irn_opcode(cond) == iro_Cond
502 && get_irn_mode(get_Cond_selector(cond)) == mode_b;
504 saw_select_cond = !is_modeb_cond;
506 /* Check, if the pred of the proj is a Cond
507 * with a Projb as selector.
512 memset(&c, 0, sizeof(c));
518 /* get or insert the cond info into the set. */
519 res = set_insert(ci->cond_set, &c, sizeof(c), HASH_PTR(cond));
522 * If this cond is already masked by the masked_by cond
523 * return immediately, since we don't have anything to add.
525 if(masked_by && res->cases[proj].masked_by == masked_by)
530 list_add(&res->list, &ci->roots);
534 * Set masked by (either NULL or another cond node.
535 * If this cond is truly masked by another one, set
536 * the position of the actually investigated branch
537 * to -1. Since the cond is masked by another one,
538 * there could be more ways from the start block
539 * to this branch, so we choose -1.
541 res->cases[proj].masked_by = masked_by;
544 res->cases[proj].pos = pos;
547 * Since the masked_by nodes masks a cond, remove it from the
548 * root list of the conf trees.
551 assert(res->cases[proj].pos < 0);
552 list_del_init(&masked_by->list);
555 DBG((dbg, LEVEL_2, "%D%n (%s branch) "
556 "for pos %d in block %n reached by %n\n",
557 depth, cond, proj ? "true" : "false", pos,
558 block, masked_by ? masked_by->cond : NULL));
562 if(get_Block_block_visited(block) < visited_nr && !saw_select_cond) {
564 set_Block_block_visited(block, visited_nr);
566 /* Search recursively from this cond. */
567 for(i = 0, n = get_irn_arity(block); i < n; ++i) {
568 ir_node *pred = get_irn_n(block, i);
571 * If the depth is 0 (the first recursion), we set the pos to
572 * the current viewed predecessor, else we adopt the position
573 * as given by the caller. We also increase the depth for the
574 * recursively called functions.
576 _find_conds(pred, visited_nr, dominator, res, pos, depth + (res != masked_by), ci);
584 * A convenience function for _find_conds.
585 * It sets some parameters needed for recursion to appropriate start
586 * values. Always use this function.
588 * @param irn The node to start looking for Conds from. This might
589 * be the phi node we are investigating.
590 * @param conds The set to record the found Conds in.
592 static INLINE void find_conds(ir_node *irn, cond_info_t *ci)
595 unsigned long visited_nr;
596 ir_node *block = get_nodes_block(irn);
597 ir_node *dom = get_Block_idom(block);
599 for(i = 0, n = get_irn_arity(block); i < n; ++i) {
600 ir_node *pred = get_irn_n(block, i);
602 inc_irg_block_visited(current_ir_graph);
603 visited_nr = get_irg_block_visited(current_ir_graph);
604 set_Block_block_visited(block, visited_nr);
606 DBG((dbg, LEVEL_2, "find conds at pred %d (%n) and idom %n\n", i, pred, dom));
607 _find_conds(pred, visited_nr, dom, NULL, i, 0, ci);
612 * Make the mux for a given cond.
614 * @param phi The phi node which shall be replaced by a mux.
615 * @param dom The block where the muxes shall be placed.
616 * @param cond The cond information.
617 * @param info The options for createing Mux nodes.
618 * @return The mux node made for this cond.
620 static ir_node *make_mux_on_demand(ir_node *phi, ir_node *dom, cond_t *cond,
621 const opt_if_conv_info_t *info, ir_node **mux, bitset_t *positions,
622 int *muxes_made, long visited_nr)
625 ir_node *projb = get_Cond_selector(cond->cond);
626 ir_node *bl = get_nodes_block(cond->cond);
627 ir_node *operands[2];
630 cond->visited_nr = visited_nr;
631 DBG((dbg, LEVEL_2, "%n\n", cond->cond));
632 for(i = 0; i < 2; ++i) {
633 cond_t *masked_by = cond->cases[i].masked_by;
634 int pos = cond->cases[i].pos;
640 * If this Cond branch is masked by another cond, make the mux
641 * for that Cond first, since the Mux for this cond takes
646 DBG((dbg, LEVEL_2, "\tmasked by: %n\n", masked_by->cond));
647 if(masked_by->visited_nr < visited_nr)
648 operands[i] = make_mux_on_demand(phi, dom, masked_by, info, mux, positions, muxes_made, visited_nr);
652 * If this cond branch is not masked by another cond, take
653 * the corresponding phi operand as an operand to the mux.
656 operands[i] = get_irn_n(phi, pos);
662 * Move the operands to the dominator block if the cond
663 * made sense. Some Conds found are not suitable for making a mux
664 * out of them, since one of their branches cannot be reached from
665 * the phi block. In that case we do not make a mux and return NULL.
667 if(operands[0] && operands[1]) {
668 if (operands[0] == operands[1]) {
669 /* there is no gain in using mux in this case, as
670 it will be optimized away. We will NOT move the
671 content of the blocks either
673 for (i = 0; i < 2; ++i)
675 bitset_set(positions, set[i]);
681 can_move[0] = can_move_to(operands[0], bl, info);
682 can_move[1] = can_move_to(operands[1], bl, info);
684 if (can_move[0] == SUCCESS && can_move[1] == SUCCESS) {
685 if (info->allow_mux(projb, operands[0], operands[1])) {
686 move_to(operands[0], bl);
687 move_to(operands[1], bl);
690 *mux = new_r_Mux(current_ir_graph, bl, projb,
691 operands[0], operands[1], get_irn_mode(operands[0]));
695 DBG((dbg, LEVEL_2, "\t%n(%n, %n, %n)[%d, %d]\n",
696 *mux, projb, operands[0], operands[1], set[0], set[1]));
698 for(i = 0; i < 2; ++i)
700 bitset_set(positions, set[i]);
702 /* we have done one */
703 hook_if_conversion(current_ir_graph, phi, set[i], *mux, IF_RESULT_SUCCESS);
707 hook_if_conversion(current_ir_graph, phi, set[i], *mux, IF_RESULT_DENIED);
711 if(can_move[0] != SUCCESS)
712 hook_if_conversion(current_ir_graph, phi, set[0], NULL, can_move[0]);
713 if(can_move[1] != SUCCESS)
714 hook_if_conversion(current_ir_graph, phi, set[1], NULL, can_move[1]);
719 hook_if_conversion(current_ir_graph, phi, set[0], NULL, IF_RESULT_BAD_CF);
721 hook_if_conversion(current_ir_graph, phi, set[1], NULL, IF_RESULT_BAD_CF);
727 typedef struct _phi_info_t {
728 struct list_head list;
729 cond_info_t *cond_info;
735 * Examine a phi node if it can be replaced by some muxes.
736 * @param irn A phi node.
737 * @param info Parameters for the if conversion algorithm.
739 static int check_out_phi(phi_info_t *phi_info, const opt_if_conv_info_t *info)
741 ir_node *irn = phi_info->irn;
743 cond_info_t *cond_info = phi_info->cond_info;
749 block = get_nodes_block(irn);
750 arity = get_irn_arity(irn);
751 positions = bitset_alloca(arity);
754 assert(get_irn_arity(irn) == get_irn_arity(block));
757 DBG((dbg, LEVEL_2, "phi candidate: %n\n", irn));
759 list_for_each_entry(cond_t, cond, &cond_info->roots, list) {
760 ir_node *cidom = block;
762 cond_t *p, *head = NULL;
765 bitset_clear_all(positions);
767 DBG((dbg, LEVEL_2, "\tcond root: %n\n", cond->cond));
769 * Link all conds which are in the subtree of
770 * the current cond in the list together.
772 walk_conds(cond, link_conds, NULL, &head);
775 for(p = head; p; p = p->link) {
776 for(i = 0; i < 2; ++i) {
777 int pos = p->cases[i].pos;
779 cidom = common_idom(cidom, get_nodes_block(get_irn_n(block, pos)));
783 DBG((dbg, LEVEL_2, "\tcommon idom: %n\n", cidom));
784 make_mux_on_demand(irn, cidom, cond, info, &mux, positions, &muxes_made, ++cond_visited_nr);
787 bitset_foreach(positions, pos)
788 set_irn_n(irn, (int) pos, mux);
793 * optimize the phi away. This can anable further runs of this
794 * function. Look at _can_move. phis cannot be moved there.
796 nw = optimize_in_place_2(irn);
803 typedef struct _cond_walk_info_t {
804 struct obstack *obst;
805 struct list_head cond_info_head;
806 struct list_head phi_head;
810 static void annotate_cond_info_pre(ir_node *irn, void *data)
812 set_irn_link(irn, NULL);
815 static void annotate_cond_info_post(ir_node *irn, void *data)
817 cond_walk_info_t *cwi = data;
820 * Check, if the node is a phi
821 * we then compute a set of conds which are reachable from this
822 * phi's block up to its dominator.
823 * The set is attached to the blocks link field.
825 if(is_Phi(irn) && mode_is_datab(get_irn_mode(irn))) {
826 ir_node *block = get_nodes_block(irn);
828 cond_info_t *ci = get_irn_link(block);
830 /* If the set is not yet computed, do it now. */
832 ci = obstack_alloc(cwi->obst, sizeof(*ci));
833 ci->cond_set = new_set(cond_cmp, log2_ceil(get_irn_arity(block)));
836 INIT_LIST_HEAD(&ci->roots);
837 INIT_LIST_HEAD(&ci->list);
840 * Add this cond info to the list of all cond infos
841 * in this graph. This is just done to free the
842 * set easier afterwards (we save an irg_walk_graph).
844 list_add(&cwi->cond_info_head, &ci->list);
846 DBG((dbg, LEVEL_2, "searching conds at %n\n", irn));
849 * Fill the set with conds we find on the way from
850 * the block to its dominator.
855 * If there where no suitable conds, delete the set
856 * immediately and reset the set pointer to NULL
858 if(set_count(ci->cond_set) == 0) {
859 del_set(ci->cond_set);
861 obstack_free(cwi->obst, ci);
867 DBG((dbg, LEVEL_2, "conds already computed for %n (look at %n)\n", irn, ci->first_phi));
869 set_irn_link(block, ci);
872 phi_info_t *pi = obstack_alloc(cwi->obst, sizeof(*pi));
875 INIT_LIST_HEAD(&pi->list);
876 list_add(&pi->list, &cwi->phi_head);
882 static void dump_conds(cond_t *cond, void *env)
887 ir_fprintf(f, "node:{title:\"n%p\" label:\"%n(%d, %d)\n%n\"}\n",
888 cond, cond->cond, cond->cases[0].pos, cond->cases[1].pos,
889 get_nodes_block(cond->cond));
891 for(i = 0; i < 2; ++i)
892 if(cond->cases[i].masked_by)
893 ir_fprintf(f, "edge:{sourcename:\"n%p\" targetname:\"n%p\" label:\"%d\"}\n",
894 cond, cond->cases[i].masked_by, i);
897 static void vcg_dump_conds(ir_graph *irg, cond_walk_info_t *cwi)
902 snprintf(buf, sizeof(buf), "%s-conds.vcg", get_entity_name(get_irg_entity(irg)));
904 if((f = fopen(buf, "wt")) != NULL) {
909 ir_fprintf(f, "graph:{\ndisplay_edge_labels:yes\n");
910 list_for_each_entry(cond_info_t, ci, &cwi->cond_info_head, list) {
911 ir_fprintf(f, "node:{title:\"n%p\" label:\"cond info\"}\n", ci);
912 list_for_each_entry(cond_t, cond, &ci->roots, list) {
913 walk_conds(cond, NULL, dump_conds, f);
914 ir_fprintf(f, "edge:{sourcename:\"n%p\" targetname:\"n%p\"}\n", ci, cond);
918 list_for_each_entry(phi_info_t, phi, &cwi->phi_head, list) {
919 ir_fprintf(f, "node:{title:\"n%p\" label:\"%n\n%n\"}\n",
920 phi->irn, phi->irn, get_nodes_block(phi->irn));
921 ir_fprintf(f, "edge:{sourcename:\"n%p\" targetname:\"n%p\"}\n", phi->irn, phi->cond_info);
927 void opt_if_conv(ir_graph *irg, const opt_if_conv_info_t *params)
931 phi_info_t *phi_info;
932 cond_info_t *cond_info;
933 cond_walk_info_t cwi;
935 opt_if_conv_info_t p;
937 if(!get_opt_if_conversion())
940 /* get the parameters */
942 memcpy(&p, params, sizeof(p));
944 memcpy(&p, &default_info, sizeof(p));
947 p.allow_mux = default_info.allow_mux;
952 INIT_LIST_HEAD(&cwi.cond_info_head);
953 INIT_LIST_HEAD(&cwi.phi_head);
955 /* Init the debug stuff. */
956 dbg = firm_dbg_register("firm.opt.ifconv");
958 firm_dbg_set_mask(dbg, LEVEL_1);
961 /* if-conversion works better with normalized returns */
962 normalize_one_return(irg);
964 /* Ensure, that the dominators are computed. */
967 DBG((dbg, LEVEL_1, "if conversion for irg %s(%p)\n",
968 get_entity_name(get_irg_entity(irg)), irg));
971 * Collect information about the conds pu the phis on an obstack.
972 * It is important that phi nodes which are 'higher' (with a
973 * lower dfs pre order) are in front of the obstack. Since they are
974 * possibly turned in to muxes this can enable the optimization
977 irg_walk_graph(irg, annotate_cond_info_pre, annotate_cond_info_post, &cwi);
980 vcg_dump_conds(irg, &cwi);
983 /* Process each suitable phi found. */
984 list_for_each_entry(phi_info_t, phi_info, &cwi.phi_head, list) {
985 DBG((dbg, LEVEL_2, "phi node %n\n", phi_info->irn));
986 muxes_made += check_out_phi(phi_info, &p);
989 list_for_each_entry(cond_info_t, cond_info, &cwi.cond_info_head, list) {
990 del_set(cond_info->cond_set);
993 DBG((dbg, LEVEL_1, "muxes made: %d\n", muxes_made));
995 obstack_free(&obst, NULL);