3 * Make Mux nodes from Conds where it its possible.
4 * @author Sebastian Hack
13 #include "irgraph_t.h"
29 #include "bitfiddle.h"
34 * Mux optimization routines.
38 static ir_node *local_optimize_mux(ir_node *mux)
42 ir_node *sel = get_Mux_sel(mux);
43 ir_node *cmp = skip_Proj(sel);
45 /* Optimize the children */
46 for(i = 1, n = get_irn_arity(mux); i < n; ++i) {
47 ir_node *operand = get_irn_n(mux, i);
48 if(get_irn_op(operand) == op_Mux)
49 optimize_mux(operand);
52 /* If we have no cmp above the mux, get out. */
53 if(is_Proj(sel) && get_irn_mode(sel) == mode_b && get_irn_opcode(cmp) == iro_Cmp) {
55 pn_Cmp cc = get_Proj_proj(sel);
56 ir_mode *mode = get_irn_mode(mux);
57 ir_node *block = get_nodes_block(n);
58 ir_node *cmp_left = get_Cmp_left(cmp);
59 ir_node *cmp_right = get_Cmp_right(cmp);
60 ir_node *mux_true = get_Mux_true(mux);
61 ir_node *mux_false = get_Mux_false(mux);
64 * Check for comparisons with signed integers.
66 if(mode_is_int(mode) /* We need an integral mode */
67 && mode_is_signed(mode) /* which is signed */
68 && cc == Lt) { /* and have to compare for < */
71 * Mux(x:T < 0, -1, 0) -> Shrs(x, sizeof_bits(T) - 1)
75 if(classify_Const(cmp_right) == CNST_NULL
76 && classify_Const(mux_true) == CNST_ALL_ONE
77 && classify_Const(mux_false) == CNST_NULL) {
79 ir_mode *u_mode = find_unsigned_mode(mode);
81 res = new_r_Shrs(current_ir_graph, block, cmp_left,
82 new_r_Const_long(current_ir_graph, block, u_mode,
83 get_mode_size_bits(mode) - 1),
88 * Mux(0 < x:T, 1, 0) -> Shr(-x, sizeof_bits(T) - 1)
92 else if(classify_Const(cmp_left) == CNST_NULL
93 && classify_Const(mux_true) == CNST_ONE
94 && classify_Const(mux_false) == CNST_NULL) {
96 ir_mode *u_mode = find_unsigned_mode(mode);
98 res = new_r_Shr(current_ir_graph, block,
100 /* -x goes to 0 - x in Firm (cmp_left is 0, see the if) */
101 new_r_Sub(current_ir_graph, block, cmp_left, cmp_right, mode),
103 /* This is sizeof_bits(T) - 1 */
104 new_r_Const_long(current_ir_graph, block, u_mode,
105 get_mode_size_bits(mode) - 1),
116 * check, if a node is const and return its tarval or
117 * return a default tarval.
118 * @param cnst The node whose tarval to get.
119 * @param or The alternative tarval, if the node is no Const.
120 * @return The tarval of @p cnst, if the node is Const, @p otherwise.
122 static tarval *get_value_or(ir_node *cnst, tarval *or)
124 return get_irn_op(cnst) == op_Const ? get_Const_tarval(cnst) : or;
129 * Try to optimize nested muxes into a dis- or conjunction
131 * @param mux The parent mux, which has muxes as operands.
132 * @return The replacement node for this mux. If the optimization is
133 * successful, this might be an And or Or node, if not, its the mux
136 static ir_node *optimize_mux_chain(ir_node *mux)
141 ir_mode *mode = get_irn_mode(mux);
146 * If we have no mux, or its mode is not integer, we
149 if(get_irn_op(mux) != op_Mux || !mode_is_int(mode))
153 null = get_tarval_null(mode);
154 minus_one = tarval_sub(null, get_tarval_one(mode));
156 ops[0] = get_Mux_false(mux);
157 ops[1] = get_Mux_true(mux);
159 for(i = 0; i < 2; ++i) {
161 tarval *tva, *tvb, *tvd;
165 * A mux operand at the first position can be factored
166 * out, if the operands fulfill several conditions:
168 * mux(c1, mux(c2, a, b), d)
170 * This can be made into:
171 * 1) mux(c1, 0, d) | mux(c2, a, b)
172 * if a | d == d and b | d == d
174 * 2) mux(c1, -1, d) & mux(c2, a, b)
175 * if a & d == d and a & b == b
177 if(get_irn_op(ops[i]) == op_Mux) {
180 a = get_Mux_false(child_mux);
181 b = get_Mux_true(child_mux);
184 /* Try the or stuff */
185 tva = get_value_or(a, minus_one);
186 tvb = get_value_or(b, minus_one);
187 tvd = get_value_or(d, null);
189 if(tarval_cmp(tarval_or(tva, tvd), tvd) == pn_Cmp_Eq
190 && tarval_cmp(tarval_or(tvb, tvd), tvd) == pn_Cmp_Eq) {
192 ops[i] = new_Const(mode, null);
193 res = new_r_Or(current_ir_graph, get_nodes_block(mux),
194 mux, child_mux, mode);
198 /* If the or didn't go, try the and stuff */
199 tva = get_value_or(a, null);
200 tvb = get_value_or(b, null);
201 tvd = get_value_or(d, minus_one);
203 if(tarval_cmp(tarval_and(tva, tvd), tvd) == pn_Cmp_Eq
204 && tarval_cmp(tarval_and(tvb, tvd), tvd) == pn_Cmp_Eq) {
206 ops[i] = new_Const(mode, minus_one);
207 res = new_r_And(current_ir_graph, get_nodes_block(mux),
208 mux, child_mux, mode);
214 /* recursively optimize nested muxes. */
215 set_irn_n(mux, 1, optimize_mux_chain(ops[0]));
216 set_irn_n(mux, 2, optimize_mux_chain(ops[1]));
222 /***********************************************************
223 * The If conversion itself.
224 ***********************************************************/
229 static opt_if_conv_info_t default_info = {
233 /** The debugging module. */
234 static firm_dbg_module_t *dbg;
237 * A simple check for sde effects upton an opcode of a ir node.
238 * @param irn The ir node to check,
239 * @return 1 if the opcode itself may produce side effects, 0 if not.
241 static INLINE int has_side_effects(const ir_node *irn)
243 opcode opc = get_irn_opcode(irn);
248 return !mode_is_datab(get_irn_mode(irn));
252 * Decdies, if a given expression and its subexpressions
253 * (to certain, also given extent) can be moved to a block.
254 * @param expr The expression to examine.
255 * @param block The block where the expression should go.
256 * @param depth The current depth, passed recursively. Use 0 for
257 * non-recursive calls.
258 * @param max_depth The maximum depth to which the expression should be
261 static int _can_move_to(ir_node *expr, ir_node *dest_block, int depth, int max_depth)
265 ir_node *expr_block = get_nodes_block(expr);
269 * If we are forced to look too deep into the expression,
270 * treat it like it could not be moved.
272 if(depth >= max_depth) {
278 * If the block of the expression dominates the specified
279 * destination block, it does not matter if the expression
280 * has side effects or anything else. It is executed on each
281 * path the destination block is reached.
283 if(block_dominates(expr_block, dest_block))
287 * We cannot move phis!
295 * This should be superflous and could be converted into a assertion.
296 * The destination block _must_ dominate the block of the expression,
297 * else the expression could be used without its definition.
299 if(!block_dominates(dest_block, expr_block)) {
305 * Surely, if the expression does not have a data mode, it is not
306 * movable. Perhaps onw should also test the floating property of
309 if(has_side_effects(expr)) {
315 * If the node looks alright so far, look at its operands and
316 * check them out. If one of them cannot be moved, this one
317 * cannot be moved either.
319 for(i = 0, n = get_irn_arity(expr); i < n; ++i) {
320 ir_node *op = get_irn_n(expr, i);
321 int new_depth = is_Proj(op) ? depth : depth + 1;
322 if(!_can_move_to(op, dest_block, new_depth, max_depth)) {
329 DBG((dbg, LEVEL_3, "\t\t\t%Dcan move to %n: %d\n", depth, expr, res));
335 * Convenience function for _can_move_to.
336 * Checks, if an expression can be moved to another block. The check can
337 * be limited to a expression depth meaning if we need to crawl in
338 * deeper into an expression than a given threshold to examine if
339 * it can be moved, the expression is rejected and the test returns
341 * @param expr The expression to check for.
342 * @param dest_block The destination block you want @p expr to be.
343 * @param max_depth The maximum depth @p expr should be investigated.
344 * @return 1, if the expression can be moved to the destination block,
347 static INLINE int can_move_to(ir_node *expr, ir_node *dest_block, int max_depth)
349 return _can_move_to(expr, dest_block, 0, max_depth);
352 static void move_to(ir_node *expr, ir_node *dest_block)
355 ir_node *expr_block = get_nodes_block(expr);
358 * If we reached the dominator, we are done.
359 * We will never put code through the dominator
361 if(block_dominates(expr_block, dest_block))
364 for(i = 0, n = get_irn_arity(expr); i < n; ++i)
365 move_to(get_irn_n(expr, i), dest_block);
367 set_nodes_block(expr, dest_block);
370 static INLINE ir_node *common_idom(ir_node *b1, ir_node *b2)
372 if(block_dominates(b1, b2))
374 else if(block_dominates(b2, b1))
379 for(p = b1; !block_dominates(p, b2); p = get_Block_idom(p));
386 * Information about a cond node.
388 typedef struct _cond_t {
389 ir_node *cond; /**< The cond node. */
390 struct list_head list; /**< List head which is used for queueing this cond
391 into the cond bunch it belongs to. */
393 unsigned totally_covers : 1;
394 struct _cond_t *link;
398 * Information about the both 'branches'
399 * (true and false), the cond creates.
402 int pos; /**< Number of the predecessor of the
403 phi block by which this branch is
404 reached. It is -1, if this branch is
405 only reached through another cond. */
407 struct _cond_t *masked_by; /**< If this cond's branch is only reached
408 through another cond, we store this
409 cond ir_node here. */
413 static INLINE cond_t *get_cond(ir_node *irn, set *cond_set)
418 return set_find(cond_set, &templ, sizeof(templ), HASH_PTR(templ.cond));
422 typedef void (cond_walker_t)(cond_t *cond, void *env);
424 static void _walk_conds(cond_t *cond, cond_walker_t *pre, cond_walker_t *post,
425 long visited_nr, void *env)
429 if(cond->visited_nr >= visited_nr)
432 cond->visited_nr = visited_nr;
437 for(i = 0; i < 2; ++i) {
438 cond_t *c = cond->cases[i].masked_by;
441 _walk_conds(c, pre, post, visited_nr, env);
448 static long cond_visited_nr = 0;
450 static void walk_conds(cond_t *cond, cond_walker_t *pre, cond_walker_t *post, void *env)
452 _walk_conds(cond, pre, post, ++cond_visited_nr, env);
455 static void link_conds(cond_t *cond, void *env)
457 cond_t **ptr = (cond_t **) env;
464 * Compare two conds for use in a firm set.
465 * Two cond_t's are equal, if they designate the same cond node.
467 * @param b Another one.
468 * @param size Not used.
469 * @return 0 (!) if they are equal, != 0 otherwise.
471 static int cond_cmp(const void *a, const void *b, size_t size)
475 return x->cond != y->cond;
479 * Information about conds which can be made to muxes.
480 * Instances of this struct are attached to the link field of
481 * blocks in which phis are located.
483 typedef struct _cond_info_t {
484 struct list_head list; /**< Used to list all of these structs per class. */
486 struct list_head roots; /**< A list of non-depending conds. Two conds are
487 independent, if yot can not reach the one from the
488 other (all conds in this list have to dominate the
489 block this struct is attached to. */
491 ir_node *first_phi; /**< The first phi node this cond info was made for. */
492 set *cond_set; /**< A set of all dominating reachable conds. */
498 static void _find_conds(ir_node *irn, long visited_nr,
499 ir_node *dominator, cond_t *masked_by, int pos, int depth, cond_info_t *ci)
502 int is_modeb_cond = 0;
504 block = get_nodes_block(irn);
507 * Only check this block if it is dominated by the specified
508 * dominator or it has not been visited yet.
510 if(block_dominates(dominator, block) && get_Block_block_visited(block) < visited_nr) {
511 cond_t *res = masked_by;
514 /* check, if we're on a ProjX
516 * Further, the ProjX/Cond block must dominate the base block
517 * (the block with the phi in it), otherwise, the Cond
518 * is not affecting the phi so that a mux can be inserted.
520 if(is_Proj(irn) && get_irn_mode(irn) == mode_X) {
522 int proj = get_Proj_proj(irn);
523 ir_node *cond = get_Proj_pred(irn);
525 /* true, if the mode is a mode_b cond _NO_ switch cond */
526 is_modeb_cond = get_irn_opcode(cond) == iro_Cond
527 && get_irn_mode(get_Cond_selector(cond)) == mode_b;
529 /* Check, if the pred of the proj is a Cond
530 * with a Projb as selector.
535 memset(&c, 0, sizeof(c));
541 /* get or insert the cond info into the set. */
542 res = set_insert(ci->cond_set, &c, sizeof(c), HASH_PTR(cond));
545 * If this cond is already masked by the masked_by cond
546 * return immediately, since we don't have anything to add.
548 if(masked_by && res->cases[proj].masked_by == masked_by)
553 list_add(&res->list, &ci->roots);
557 * Set masked by (either NULL or another cond node.
558 * If this cond is truly masked by another one, set
559 * the position of the actually investigated branch
560 * to -1. Since the cond is masked by another one,
561 * there could be more ways from the start block
562 * to this branch, so we choose -1.
564 res->cases[proj].masked_by = masked_by;
567 res->cases[proj].pos = pos;
570 * Since the masked_by nodes masks a cond, remove it from the
571 * root list of the conf trees.
574 assert(res->cases[proj].pos < 0);
575 list_del_init(&masked_by->list);
578 DBG((dbg, LEVEL_2, "%D%n (%s branch) "
579 "for pos %d in block %n reached by %n\n",
580 depth, cond, proj ? "true" : "false", pos,
581 block, masked_by ? masked_by->cond : NULL));
585 if(get_Block_block_visited(block) < visited_nr) {
587 set_Block_block_visited(block, visited_nr);
589 /* Search recursively from this cond. */
590 for(i = 0, n = get_irn_arity(block); i < n; ++i) {
591 ir_node *pred = get_irn_n(block, i);
594 * If the depth is 0 (the first recursion), we set the pos to
595 * the current viewed predecessor, else we adopt the position
596 * as given by the caller. We also increase the depth for the
597 * recursively called functions.
599 _find_conds(pred, visited_nr, dominator, res, pos, depth + (res != masked_by), ci);
607 * A convenience function for _find_conds.
608 * It sets some parameters needed for recursion to appropriate start
609 * values. Always use this function.
610 * @param irn The node to start looking for conds from. This might
611 * be the phi node we are investigating.
612 * @param conds The set to record the found conds in.
614 static INLINE void find_conds(ir_node *irn, cond_info_t *ci)
618 ir_node *block = get_nodes_block(irn);
619 ir_node *dom = get_Block_idom(block);
622 for(i = 0, n = get_irn_arity(block); i < n; ++i) {
623 ir_node *pred = get_irn_n(block, i);
625 inc_irg_block_visited(current_ir_graph);
626 visited_nr = get_irg_block_visited(current_ir_graph);
627 set_Block_block_visited(block, visited_nr);
629 DBG((dbg, LEVEL_2, "find conds at pred %d (%n) and idom %n\n", i, pred, dom));
630 _find_conds(pred, visited_nr, dom, NULL, i, 0, ci);
635 * Make the mux for a given cond.
636 * @param phi The phi node which shall be replaced by a mux.
637 * @param dom The block where the muxes shall be placed.
638 * @param cond The cond information.
639 * @return The mux node made for this cond.
641 static ir_node *make_mux_on_demand(ir_node *phi, ir_node *dom, cond_t *cond,
642 int max_depth, ir_node **mux, bitset_t *positions, int *muxes_made, long visited_nr)
645 ir_node *projb = get_Cond_selector(cond->cond);
646 ir_node *bl = get_nodes_block(cond->cond);
647 ir_node *operands[2];
650 cond->visited_nr = visited_nr;
651 DBG((dbg, LEVEL_2, "%n\n", cond->cond));
652 for(i = 0; i < 2; ++i) {
653 cond_t *masked_by = cond->cases[i].masked_by;
654 int pos = cond->cases[i].pos;
660 * If this cond branch is masked by another cond, make the mux
661 * for that cond first, since the mux for this cond takes
666 DBG((dbg, LEVEL_2, "\tmasked by: %n\n", masked_by->cond));
667 if(masked_by->visited_nr < visited_nr)
668 operands[i] = make_mux_on_demand(phi, dom, masked_by, max_depth, mux, positions, muxes_made, visited_nr);
672 * If this cond branch is not masked by another cond, take
673 * the corresponding phi operand as an operand to the mux.
676 operands[i] = get_irn_n(phi, pos);
682 * Move the operands to the dominator block if the cond
683 * made sense. Some conds found are not suitable for making a mux
684 * out of them, since one of their branches cannot be reached from
685 * the phi block. In that case we do not make a mux and return NULL.
687 if(operands[0] && operands[1]
688 && can_move_to(operands[0], bl, max_depth)
689 && can_move_to(operands[1], bl, max_depth)) {
691 move_to(operands[0], bl);
692 move_to(operands[1], bl);
695 *mux = new_r_Mux(current_ir_graph, bl, projb,
696 operands[0], operands[1], get_irn_mode(operands[0]));
700 DBG((dbg, LEVEL_2, "\t%n(%n, %n, %n)[%d, %d]\n",
701 *mux, projb, operands[0], operands[1], set[0], set[1]));
703 for(i = 0; i < 2; ++i)
705 bitset_set(positions, set[i]);
711 typedef struct _phi_info_t {
712 struct list_head list;
713 cond_info_t *cond_info;
719 * Examine a phi node if it can be replaced by some muxes.
720 * @param irn A phi node.
721 * @param info Parameters for the if conversion algorithm.
723 static int check_out_phi(phi_info_t *phi_info, opt_if_conv_info_t *info)
725 int max_depth = info->max_depth;
726 ir_node *irn = phi_info->irn;
728 cond_info_t *cond_info = phi_info->cond_info;
734 block = get_nodes_block(irn);
735 arity = get_irn_arity(irn);
736 positions = bitset_alloca(arity);
739 assert(get_irn_arity(irn) == get_irn_arity(block));
742 DBG((dbg, LEVEL_2, "phi candidate: %n\n", irn));
744 list_for_each_entry(cond_t, cond, &cond_info->roots, list) {
745 ir_node *cidom = block;
747 cond_t *p, *head = NULL;
750 bitset_clear_all(positions);
752 DBG((dbg, LEVEL_2, "\tcond root: %n\n", cond->cond));
754 * Link all conds which are in the subtree of
755 * the current cond in the list together.
757 walk_conds(cond, link_conds, NULL, &head);
760 for(p = head; p; p = p->link) {
761 for(i = 0; i < 2; ++i) {
762 int pos = p->cases[i].pos;
764 cidom = common_idom(cidom, get_nodes_block(get_irn_n(block, pos)));
768 DBG((dbg, LEVEL_2, "\tcommon idom: %n\n", cidom));
769 make_mux_on_demand(irn, cidom, cond, max_depth, &mux, positions, &muxes_made, ++cond_visited_nr);
772 bitset_foreach(positions, pos)
773 set_irn_n(irn, (int) pos, mux);
778 * optimize the phi away. This can anable further runs of this
779 * function. Look at _can_move. phis cannot be moved there.
781 nw = optimize_in_place_2(irn);
788 typedef struct _cond_walk_info_t {
789 struct obstack *obst;
790 struct list_head cond_info_head;
791 struct list_head phi_head;
795 static void annotate_cond_info_pre(ir_node *irn, void *data)
797 set_irn_link(irn, NULL);
800 static void annotate_cond_info_post(ir_node *irn, void *data)
802 cond_walk_info_t *cwi = data;
805 * Check, if the node is a phi
806 * we then compute a set of conds which are reachable from this
807 * phi's block up to its dominator.
808 * The set is attached to the blocks link field.
810 if(is_Phi(irn) && mode_is_datab(get_irn_mode(irn))) {
811 ir_node *block = get_nodes_block(irn);
813 cond_info_t *ci = get_irn_link(block);
815 /* If the set is not yet computed, do it now. */
817 ci = obstack_alloc(cwi->obst, sizeof(*ci));
818 ci->cond_set = new_set(cond_cmp, log2_ceil(get_irn_arity(block)));
821 INIT_LIST_HEAD(&ci->roots);
822 INIT_LIST_HEAD(&ci->list);
825 * Add this cond info to the list of all cond infos
826 * in this graph. This is just done to free the
827 * set easier afterwards (we save an irg_walk_graph).
829 list_add(&cwi->cond_info_head, &ci->list);
831 DBG((dbg, LEVEL_2, "searching conds at %n\n", irn));
834 * Fill the set with conds we find on the way from
835 * the block to its dominator.
840 * If there where no suitable conds, delete the set
841 * immediately and reset the set pointer to NULL
843 if(set_count(ci->cond_set) == 0) {
844 del_set(ci->cond_set);
846 obstack_free(cwi->obst, ci);
852 DBG((dbg, LEVEL_2, "conds already computed for %n (look at %n)\n", irn, ci->first_phi));
854 set_irn_link(block, ci);
857 phi_info_t *pi = obstack_alloc(cwi->obst, sizeof(*pi));
860 INIT_LIST_HEAD(&pi->list);
861 list_add(&pi->list, &cwi->phi_head);
867 static void dump_conds(cond_t *cond, void *env)
872 ir_fprintf(f, "node:{title:\"n%p\" label:\"%n(%d, %d)\n%n\"}\n",
873 cond, cond->cond, cond->cases[0].pos, cond->cases[1].pos,
874 get_nodes_block(cond->cond));
876 for(i = 0; i < 2; ++i)
877 if(cond->cases[i].masked_by)
878 ir_fprintf(f, "edge:{sourcename:\"n%p\" targetname:\"n%p\" label:\"%d\"}\n",
879 cond, cond->cases[i].masked_by, i);
882 static void vcg_dump_conds(ir_graph *irg, cond_walk_info_t *cwi)
887 snprintf(buf, sizeof(buf), "%s-conds.vcg", get_entity_name(get_irg_entity(irg)));
889 if((f = fopen(buf, "wt")) != NULL) {
894 ir_fprintf(f, "graph:{\ndisplay_edge_labels:yes\n");
895 list_for_each_entry(cond_info_t, ci, &cwi->cond_info_head, list) {
896 ir_fprintf(f, "node:{title:\"n%p\" label:\"cond info\"}\n", ci);
897 list_for_each_entry(cond_t, cond, &ci->roots, list) {
898 walk_conds(cond, NULL, dump_conds, f);
899 ir_fprintf(f, "edge:{sourcename:\"n%p\" targetname:\"n%p\"}\n", ci, cond);
903 list_for_each_entry(phi_info_t, phi, &cwi->phi_head, list) {
904 ir_fprintf(f, "node:{title:\"n%p\" label:\"%n\n%n\"}\n",
905 phi->irn, phi->irn, get_nodes_block(phi->irn));
906 ir_fprintf(f, "edge:{sourcename:\"n%p\" targetname:\"n%p\"}\n", phi->irn, phi->cond_info);
912 void opt_if_conv(ir_graph *irg, opt_if_conv_info_t *params)
916 phi_info_t *phi_info;
917 cond_info_t *cond_info;
918 cond_walk_info_t cwi;
920 opt_if_conv_info_t *p = params ? params : &default_info;
922 if(!get_opt_if_conversion())
928 INIT_LIST_HEAD(&cwi.cond_info_head);
929 INIT_LIST_HEAD(&cwi.phi_head);
931 /* Init the debug stuff. */
932 dbg = firm_dbg_register("firm.opt.ifconv");
934 firm_dbg_set_mask(dbg, LEVEL_1);
937 /* Ensure, that the dominators are computed. */
940 DBG((dbg, LEVEL_1, "if conversion for irg %s(%p)\n",
941 get_entity_name(get_irg_entity(irg)), irg));
944 * Collect information about the conds pu the phis on an obstack.
945 * It is important that phi nodes which are 'higher' (with a
946 * lower dfs pre order) are in front of the obstack. Since they are
947 * possibly turned in to muxes this can enable the optimization
950 irg_walk_graph(irg, annotate_cond_info_pre, annotate_cond_info_post, &cwi);
953 vcg_dump_conds(irg, &cwi);
956 /* Process each suitable phi found. */
957 list_for_each_entry(phi_info_t, phi_info, &cwi.phi_head, list) {
958 DBG((dbg, LEVEL_2, "phi node %n\n", phi_info->irn));
959 muxes_made += check_out_phi(phi_info, p);
962 list_for_each_entry(cond_info_t, cond_info, &cwi.cond_info_head, list) {
963 del_set(cond_info->cond_set);
966 DBG((dbg, LEVEL_1, "muxes made: %d\n", muxes_made));
968 obstack_free(&obst, NULL);