3 * Make Mux nodes from Conds where it its possible.
4 * @author Sebastian Hack
11 #include "irgraph_t.h"
26 * Mux optimization routines.
30 static ir_node *local_optimize_mux(ir_node *mux)
34 ir_node *sel = get_Mux_sel(mux);
35 ir_node *cmp = skip_Proj(sel);
37 /* Optimize the children */
38 for(i = 1, n = get_irn_arity(mux); i < n; ++i) {
39 ir_node *operand = get_irn_n(mux, i);
40 if(get_irn_op(operand) == op_Mux)
41 optimize_mux(operand);
44 /* If we have no cmp above the mux, get out. */
45 if(is_Proj(sel) && get_irn_mode(sel) == mode_b && get_irn_opcode(cmp) == iro_Cmp) {
47 pnc_number cc = get_Proj_proj(sel);
48 ir_mode *mode = get_irn_mode(mux);
49 ir_node *block = get_nodes_block(n);
50 ir_node *cmp_left = get_Cmp_left(cmp);
51 ir_node *cmp_right = get_Cmp_right(cmp);
52 ir_node *mux_true = get_Mux_true(mux);
53 ir_node *mux_false = get_Mux_false(mux);
56 * Check for comparisons with signed integers.
58 if(mode_is_int(mode) /* We need an integral mode */
59 && mode_is_signed(mode) /* which is signed */
60 && cc == Lt) { /* and have to compare for < */
63 * Mux(x:T < 0, -1, 0) -> Shrs(x, sizeof_bits(T) - 1)
67 if(classify_Const(cmp_right) == CNST_NULL
68 && classify_Const(mux_true) == CNST_ALL_ONE
69 && classify_Const(mux_false) == CNST_NULL) {
71 ir_mode *u_mode = find_unsigned_mode(mode);
73 res = new_r_Shrs(current_ir_graph, block, cmp_left,
74 new_r_Const_long(current_ir_graph, block, u_mode,
75 get_mode_size_bits(mode) - 1),
80 * Mux(0 < x:T, 1, 0) -> Shr(-x, sizeof_bits(T) - 1)
84 else if(classify_Const(cmp_left) == CNST_NULL
85 && classify_Const(mux_true) == CNST_ONE
86 && classify_Const(mux_false) == CNST_NULL) {
88 ir_mode *u_mode = find_unsigned_mode(mode);
90 res = new_r_Shr(current_ir_graph, block,
92 /* -x goes to 0 - x in Firm (cmp_left is 0, see the if) */
93 new_r_Sub(current_ir_graph, block, cmp_left, cmp_right, mode),
95 /* This is sizeof_bits(T) - 1 */
96 new_r_Const_long(current_ir_graph, block, u_mode,
97 get_mode_size_bits(mode) - 1),
107 static tarval *get_value_or(ir_node *cnst, tarval *or)
109 return get_irn_op(cnst) == op_Const ? get_Const_tarval(cnst) : or;
113 static ir_node *optimize_mux_chain(ir_node *mux)
122 if(get_irn_op(mux) != op_Mux)
126 mode = get_irn_mode(mux);
127 null = get_tarval_null(mode);
128 minus_one = tarval_sub(null, get_tarval_one(mode));
130 ops[0] = get_Mux_false(mux);
131 ops[1] = get_Mux_true(mux);
133 for(i = 0; i < 2; ++i) {
135 tarval *tva, *tvb, *tvd;
139 * This is the or case, the child mux is the false operand
142 * mux(c1, mux(c2, a, b), d)
144 * This can be made into:
145 * 1) mux(c1, 0, d) | mux(c2, a, b)
146 * if a | d == d and b | d == d
148 * 2) mux(c1, -1, d) & mux(c2, a, b)
149 * if a & d == d and a & b == b
151 if(get_irn_op(ops[i]) == op_Mux) {
154 a = get_Mux_false(child_mux);
155 b = get_Mux_true(child_mux);
158 /* Try the or stuff */
159 tva = get_value_or(a, minus_one);
160 tvb = get_value_or(b, minus_one);
161 tvd = get_value_or(d, null);
163 if(tarval_cmp(tarval_or(tva, tvd), tvd) == Eq
164 && tarval_cmp(tarval_or(tvb, tvd), tvd) == Eq) {
166 ops[i] = new_Const(mode, null);
167 res = new_r_Or(current_ir_graph, get_nodes_block(mux),
168 mux, child_mux, mode);
172 /* If the or didn't go, try the and stuff */
173 tva = get_value_or(a, null);
174 tvb = get_value_or(b, null);
175 tvd = get_value_or(d, minus_one);
177 if(tarval_cmp(tarval_and(tva, tvd), tvd) == Eq
178 && tarval_cmp(tarval_and(tvb, tvd), tvd) == Eq) {
180 ops[i] = new_Const(mode, minus_one);
181 res = new_r_And(current_ir_graph, get_nodes_block(mux),
182 mux, child_mux, mode);
188 set_irn_n(mux, 1, optimize_mux_chain(ops[0]));
189 set_irn_n(mux, 2, optimize_mux_chain(ops[1]));
195 /***********************************************************
196 * The If conversion itself.
197 ***********************************************************/
202 static opt_if_conv_info_t default_info = {
206 /** THe debugging module. */
207 static firm_dbg_module_t *dbg;
210 * A simple check for sde effects upton an opcode of a ir node.
211 * @param irn The ir node to check,
212 * @return 1 if the opcode itself may produce side effects, 0 if not.
214 static INLINE int has_side_effects(const ir_node *irn)
216 opcode opc = get_irn_opcode(irn);
221 return !mode_is_datab(get_irn_mode(irn));
225 * Decdies, if a given expression and its subexpressions
226 * (to certain, also given extent) can be moved to a block.
227 * @param expr The expression to examine.
228 * @param block The block where the expression should go.
229 * @param depth The current depth, passed recursively. Use 0 for
230 * non-recursive calls.
231 * @param max_depth The maximum depth to which the expression should be
234 static int _can_move_to(ir_node *expr, ir_node *dest_block, int depth, int max_depth)
238 ir_node *expr_block = get_nodes_block(expr);
242 * If we are forced to look too deep into the expression,
243 * treat it like it could not be moved.
245 if(depth >= max_depth) {
251 * If the block of the expression dominates the specified
252 * destination block, it does not matter if the expression
253 * has side effects or anything else. It is executed on each
254 * path the destination block is reached.
256 if(block_dominates(expr_block, dest_block))
260 * This should be superflous and could be converted into a assertion.
261 * The destination block _must_ dominate the block of the expression,
262 * else the expression could be used without its definition.
264 if(!block_dominates(dest_block, expr_block)) {
270 * Surely, if the expression does not have a data mode, it is not
271 * movable. Perhaps onw should also test the floating property of
274 if(has_side_effects(expr)) {
280 * If the node looks alright so far, look at its operands and
281 * check them out. If one of them cannot be moved, this one
282 * cannot be moved either.
284 for(i = 0, n = get_irn_arity(expr); i < n; ++i) {
285 ir_node *op = get_irn_n(expr, i);
286 int new_depth = is_Proj(op) ? depth : depth + 1;
287 if(!_can_move_to(op, dest_block, new_depth, max_depth)) {
294 DBG((dbg, LEVEL_5, "\t\t\tcan move to(%d) %n: %d\n", depth, expr, res));
300 * Convenience function for _can_move_to.
301 * Checks, if an expression can be moved to another block. The check can
302 * be limited to a expression depth meaning if we need to crawl in
303 * deeper into an expression than a given threshold to examine if
304 * it can be moved, the expression is rejected and the test returns
306 * @param expr The expression to check for.
307 * @param dest_block The destination block you want @p expr to be.
308 * @param max_depth The maximum depth @p expr should be investigated.
309 * @return 1, if the expression can be moved to the destination block,
312 static INLINE int can_move_to(ir_node *expr, ir_node *dest_block, int max_depth)
314 return _can_move_to(expr, dest_block, 0, max_depth);
317 static void move_to(ir_node *expr, ir_node *dest_block)
320 ir_node *expr_block = get_nodes_block(expr);
323 * If we reached the dominator, we are done.
324 * We will never put code through the dominator
326 if(block_dominates(expr_block, dest_block))
329 for(i = 0, n = get_irn_arity(expr); i < n; ++i)
330 move_to(get_irn_n(expr, i), dest_block);
332 set_nodes_block(expr, dest_block);
336 * Information about a cond node.
338 typedef struct _cond_t {
339 ir_node *cond; /**< The cond node. */
340 ir_node *mux; /**< The mux node, that will be generated for this cond. */
343 * Information about the both 'branches'
344 * (true and false), the cond creates.
347 int pos; /**< Number of the predecessor of the
348 phi block by which this branch is
349 reached. It is -1, if this branch is
350 only reached through another cond. */
352 ir_node *masked_by; /**< If this cond's branch is only reached
353 through another cond, we store this
354 cond ir_node here. */
359 * Compare two conds for use in a firm set.
360 * Two cond_t's are equal, if they designate the same cond node.
362 * @param b Another one.
363 * @param size Not used.
364 * @return 0 (!) if they are equal, != 0 otherwise.
366 static int cond_cmp(const void *a, const void *b, size_t size)
370 return x->cond != y->cond;
376 static void _find_conds(ir_node *irn, unsigned long visited_nr,
377 ir_node *dominator, ir_node *masked_by, int pos, int depth, set *conds)
381 block = get_nodes_block(irn);
383 if(block_dominates(dominator, block)) {
384 ir_node *cond = NULL;
387 /* check, if we're on a ProjX */
388 if(is_Proj(irn) && get_irn_mode(irn) == mode_X) {
390 int proj = get_Proj_proj(irn);
391 cond = get_Proj_pred(irn);
393 /* Check, if the pred of the proj is a Cond
394 * with a Projb as selector. */
395 if(get_irn_opcode(cond) == iro_Cond
396 && get_irn_mode(get_Cond_selector(cond)) == mode_b) {
405 /* get or insert the cond info into the set. */
406 res = set_insert(conds, &c, sizeof(c), HASH_PTR(cond));
409 * Link it to the cond ir_node. We need that later, since
410 * one cond masks the other we want to retreive the cond_t
411 * data from the masking cond ir_node.
413 set_irn_link(cond, res);
416 * Set masked by (either NULL or another cond node.
417 * If this cond is truly masked by another one, set
418 * the position of the actually investigated branch
419 * to -1. Since the cond is masked by another one,
420 * there could be more ways from the start block
421 * to this branch, so we choose -1.
423 res->cases[proj].masked_by = masked_by;
425 res->cases[proj].pos = pos;
427 DBG((dbg, LEVEL_5, "found cond %n (%s branch) for pos %d in block %n reached by %n\n",
428 cond, get_Proj_proj(irn) ? "true" : "false", pos, block, masked_by));
433 * If this block has already been visited, don't recurse to its
436 if(get_Block_block_visited(block) < visited_nr) {
438 /* Mark the block visited. */
439 set_Block_block_visited(block, visited_nr);
441 /* Search recursively from this cond. */
442 for(i = 0, n = get_irn_arity(block); i < n; ++i) {
443 ir_node *pred = get_irn_n(block, i);
446 * If the depth is 0 (the first recursion), we set the pos to
447 * the current viewed predecessor, else we adopt the position
448 * as given by the caller. We also increase the depth for the
449 * recursively called functions.
451 _find_conds(pred, visited_nr, dominator, cond, depth == 0 ? i : pos, depth + 1, conds);
458 * A convenience function for _find_conds.
459 * It sets some parameters needed for recursion to appropriate start
460 * values. Always use this function.
461 * @param irn The node to start looking for conds from. This might
462 * be the phi node we are investigating.
463 * @param dominator The dominator up to which we want to look for conds.
464 * @param conds The set to record the found conds in.
466 static INLINE void find_conds(ir_node *irn, ir_node *dominator, set *conds)
468 inc_irg_block_visited(current_ir_graph);
469 _find_conds(irn, get_irg_block_visited(current_ir_graph), dominator, NULL, 0, 0, conds);
474 * Make the mux for a given cond.
475 * @param phi The phi node which shall be replaced by a mux.
476 * @param dom The block where the muxes shall be placed.
477 * @param cond The cond information.
478 * @return The mux node made for this cond.
480 static ir_node *make_mux_on_demand(ir_node *phi, ir_node *dom, cond_t *cond)
483 ir_node *projb = get_Cond_selector(cond->cond);
484 ir_node *operands[2];
486 for(i = 0; i < 2; ++i) {
489 * If this cond branch is masked by another cond, make the mux
490 * for that cond first, since the mux for this cond takes
493 if(cond->cases[i].masked_by) {
494 cond_t *masking_cond = get_irn_link(cond->cases[i].masked_by);
495 operands[i] = make_mux_on_demand(phi, dom, masking_cond);
499 * If this cond branch is not masked by another cond, take
500 * the corresponding phi operand as an operand to the mux.
503 assert(cond->cases[i].pos >= 0);
504 operands[i] = get_irn_n(phi, cond->cases[i].pos);
507 /* Move the selected operand to the dominator block. */
508 move_to(operands[i], dom);
511 /* Move the comparison expression of the cond to the dominator. */
515 cond->mux = new_r_Mux(current_ir_graph, dom, projb,
516 operands[0], operands[1], get_irn_mode(operands[0]));
522 * Examine a phi node if it can be replaced by some muxes.
523 * @param irn A phi node.
524 * @param info Parameters for the if conversion algorithm.
526 static void check_out_phi(ir_node *irn, opt_if_conv_info_t *info)
528 int max_depth = info->max_depth;
537 cond_t *largest_cond;
544 block = get_nodes_block(irn);
545 arity = get_irn_arity(irn);
546 idom = get_Block_idom(block);
549 assert(get_irn_arity(irn) == get_irn_arity(block));
552 cond_set = get_irn_link(block);
553 assert(conds && "no cond set for this phi");
555 DBG((dbg, LEVEL_5, "phi candidate: %n\n", irn));
558 * Check, if we can move all operands of the
559 * phi node to the dominator. Else exit.
561 for(i = 0; i < arity; ++i) {
562 if(!can_move_to(get_irn_n(irn, i), idom, max_depth)) {
563 DBG((dbg, LEVEL_5, "cannot move operand %d of %n to %n\n", i, irn, idom));
568 n_conds = set_count(cond_set);
570 /* This should never happen and can be turned into an assertion */
572 DBG((dbg, LEVEL_5, "no conds found. how can this be?"));
577 * Put all cond information structures into an array.
578 * This is just done for convenience. It's not neccessary.
580 conds = alloca(n_conds * sizeof(conds[0]));
581 for(i = 0, cond = set_first(cond_set); cond; cond = set_next(cond_set))
585 * Check, if we can move the compare nodes of the conds to
588 for(i = 0; i < n_conds; ++i) {
589 ir_node *projb = get_Cond_selector(conds[i]->cond);
590 if(!can_move_to(projb, idom, max_depth)) {
591 DBG((dbg, LEVEL_5, "cannot move Projb %d of %n to %n\n", i, projb, idom));
597 * Find the largest cond (the one that dominates all others)
598 * and start the mux generation from there.
600 largest_cond = conds[0];
601 DBG((dbg, LEVEL_5, "\tlargest cond %n\n", largest_cond->cond));
602 for(i = 1; i < n_conds; ++i) {
603 ir_node *curr_largest_block = get_nodes_block(largest_cond->cond);
604 ir_node *bl = get_nodes_block(conds[i]->cond);
606 if(block_dominates(bl, curr_largest_block)) {
607 DBG((dbg, LEVEL_5, "\tnew largest cond %n\n", largest_cond->cond));
608 largest_cond = conds[i];
613 for(i = 0; i < n_conds; ++i) {
614 cond_t *c = conds[i];
615 DBG((dbg, LEVEL_5, "\tcond %n (t: (%d,%n), f: (%d,%n))\n", c->cond,
616 c->cases[1].pos, c->cases[1].masked_by,
617 c->cases[0].pos, c->cases[0].masked_by));
622 * Make the mux for the 'largest' cond. This will also
623 * produce all other muxes.
624 * @see make_mux_on_demand.
626 mux = make_mux_on_demand(irn, idom, largest_cond);
629 * Try to optimize mux chains.
631 mux = optimize_mux_chain(mux);
634 * Set all preds of the phi node to the mux
635 * for the 'largest' cond.
637 for(i = 0; i < arity; ++i)
638 set_irn_n(irn, i, mux);
641 static void annotate_cond_info_pre(ir_node *irn, void *data)
643 set_irn_link(irn, NULL);
646 static void annotate_cond_info_post(ir_node *irn, void *data)
649 * Check, if the node is a phi
650 * we then compute a set of conds which are reachable from this
651 * phi's block up to its dominator.
652 * The set is attached to the blocks link field.
654 if(is_Phi(irn) && mode_is_datab(get_irn_mode(irn))) {
655 ir_node *block = get_nodes_block(irn);
656 ir_node **phi_list_head = (ir_node **) data;
658 set *conds = get_irn_link(block);
660 /* If the set is not yet computed, do it now. */
662 ir_node *idom = get_Block_idom(block);
663 conds = new_set(cond_cmp, 8);
666 * Fill the set with conds we find on the way from
667 * the block to its dominator.
669 find_conds(irn, idom, conds);
672 * If there where no suitable conds, delete the set
673 * immediately and reset the set pointer to NULL
675 if(set_count(conds) == 0) {
681 set_irn_link(block, conds);
684 * If this phi node has a set of conds reachable, enqueue
685 * the phi node in a list with its link field.
686 * Then, we do not have to walk the graph again. We can
687 * use the list to reach all phi nodes for which if conversion
691 ir_node *old = *phi_list_head;
692 set_irn_link(irn, old);
693 *phi_list_head = irn;
699 static void free_sets(ir_node *irn, void *data)
701 if(is_Block(irn) && get_irn_link(irn)) {
702 set *conds = get_irn_link(irn);
707 void opt_if_conv(ir_graph *irg, opt_if_conv_info_t *params)
709 opt_if_conv_info_t *p = params ? params : &default_info;
710 ir_node *list_head = NULL;
712 if(!get_opt_if_conversion())
715 dbg = firm_dbg_register("firm.opt.ifconv");
716 firm_dbg_set_mask(dbg, -1);
719 DBG((dbg, LEVEL_4, "if conversion for irg %s(%p)\n",
720 get_entity_name(get_irg_entity(irg)), irg));
722 irg_walk_graph(irg, annotate_cond_info_pre, annotate_cond_info_post, &list_head);
724 /* traverse the list of linked phis */
726 check_out_phi(list_head, p);
727 list_head = get_irn_link(list_head);
730 irg_walk_graph(irg, free_sets, NULL, NULL);