2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Reassociation
23 * @author Michael Beck
27 #include "iroptimize.h"
30 #include "irgraph_t.h"
34 #include "iropt_dbg.h"
38 #include "reassoc_t.h"
46 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
49 NO_CONSTANT = 0, /**< node is not constant */
50 REAL_CONSTANT = 1, /**< node is a Const that is suitable for constant folding */
51 REGION_CONST = 4 /**< node is a constant expression in the current context,
52 use 4 here to simplify implementation of get_comm_Binop_ops() */
56 * returns whether a node is constant ie is a constant or
57 * is loop invariant (called region constant)
59 * @param n the node to be checked for constant
60 * @param block a block that might be in a loop
62 static const_class_t get_const_class(const ir_node *n, const ir_node *block)
67 /* constant nodes which can't be folded are region constants */
68 if (is_irn_constlike(n))
72 * Beware: Bad nodes are always loop-invariant, but
73 * cannot handled in later code, so filter them here.
75 if (! is_Bad(n) && is_loop_invariant(n, block))
79 } /* get_const_class */
82 * returns the operands of a commutative bin-op, if one operand is
83 * a region constant, it is returned as the second one.
85 * Beware: Real constants must be returned with higher priority than
86 * region constants, because they might be folded.
88 static void get_comm_Binop_ops(ir_node *binop, ir_node **a, ir_node **c)
90 ir_node *op_a = get_binop_left(binop);
91 ir_node *op_b = get_binop_right(binop);
92 ir_node *block = get_nodes_block(binop);
93 int class_a = get_const_class(op_a, block);
94 int class_b = get_const_class(op_b, block);
96 assert(is_op_commutative(get_irn_op(binop)));
98 switch (class_a + 2*class_b) {
99 case REAL_CONSTANT + 2*REAL_CONSTANT:
100 /* if both are constants, one might be a
101 * pointer constant like NULL, return the other
103 if (mode_is_reference(get_irn_mode(op_a))) {
111 case REAL_CONSTANT + 2*NO_CONSTANT:
112 case REAL_CONSTANT + 2*REGION_CONST:
113 case REGION_CONST + 2*NO_CONSTANT:
122 } /* get_comm_Binop_ops */
125 * reassociate a Sub: x - c = x + (-c)
127 static int reassoc_Sub(ir_node **in)
130 ir_node *right = get_Sub_right(n);
131 ir_mode *rmode = get_irn_mode(right);
134 /* cannot handle SubIs(P, P) */
135 if (mode_is_reference(rmode))
138 block = get_nodes_block(n);
141 * convert x - c => x + (-c)
143 if (get_const_class(right, block) == REAL_CONSTANT) {
144 ir_node *left = get_Sub_left(n);
149 switch (get_const_class(left, block)) {
151 irn = optimize_in_place(n);
161 /* already constant, nothing to do */
165 mode = get_irn_mode(n);
166 dbi = get_irn_dbg_info(n);
168 /* Beware of SubP(P, Is) */
169 irn = new_rd_Minus(dbi, block, right, rmode);
170 irn = new_rd_Add(dbi, block, left, irn, mode);
172 DBG((dbg, LEVEL_5, "Applied: %n - %n => %n + (-%n)\n",
173 get_Sub_left(n), right, get_Sub_left(n), right));
186 /** Retrieve a mode from the operands. We need this, because
187 * Add and Sub are allowed to operate on (P, Is)
189 static ir_mode *get_mode_from_ops(ir_node *op1, ir_node *op2)
193 m1 = get_irn_mode(op1);
194 if (mode_is_reference(m1))
197 m2 = get_irn_mode(op2);
198 if (mode_is_reference(m2))
204 } /* get_mode_from_ops */
207 * reassociate a commutative Binop
209 * BEWARE: this rule leads to a potential loop, if
210 * two operands are region constants and the third is a
211 * constant, so avoid this situation.
213 static int reassoc_commutative(ir_node **node)
216 ir_op *op = get_irn_op(n);
217 ir_node *block = get_nodes_block(n);
220 get_comm_Binop_ops(n, &t1, &c1);
222 if (get_irn_op(t1) == op) {
224 const_class_t c_c1, c_c2, c_t2;
226 get_comm_Binop_ops(t1, &t2, &c2);
228 /* do not optimize Bad nodes, will fail later */
232 c_c1 = get_const_class(c1, block);
233 c_c2 = get_const_class(c2, block);
234 c_t2 = get_const_class(t2, block);
236 if ( ((c_c1 > NO_CONSTANT) & (c_t2 > NO_CONSTANT)) &&
237 ((((c_c1 ^ c_c2 ^ c_t2) & REGION_CONST) == 0) || ((c_c1 & c_c2 & c_t2) == REGION_CONST)) ) {
238 /* All three are constant and either all are constant expressions
239 * or two of them are:
240 * then applying this rule would lead into a cycle
242 * Note that if t2 is a constant so is c2 hence we save one test.
247 if ((c_c1 != NO_CONSTANT) /* & (c_c2 != NO_CONSTANT) */) {
248 /* handles rules R7, R8, R9, R10:
249 * convert c1 .OP. (c2 .OP. x) => x .OP. (c1 .OP. c2)
251 ir_node *irn, *in[2];
252 ir_mode *mode, *mode_c1 = get_irn_mode(c1), *mode_c2 = get_irn_mode(c2);
253 ir_graph *irg = get_irn_irg(c1);
255 /* It might happen, that c1 and c2 have different modes, for
256 * instance Is and Iu.
259 if (mode_c1 != mode_c2) {
260 if (mode_is_int(mode_c1) && mode_is_int(mode_c2)) {
261 /* get the bigger one */
262 if (get_mode_size_bits(mode_c1) > get_mode_size_bits(mode_c2))
263 c2 = new_r_Conv(block, c2, mode_c1);
264 else if (get_mode_size_bits(mode_c1) < get_mode_size_bits(mode_c2))
265 c1 = new_r_Conv(block, c1, mode_c2);
267 /* Try to cast the real const */
268 if (c_c1 == REAL_CONSTANT)
269 c1 = new_r_Conv(block, c1, mode_c2);
271 c2 = new_r_Conv(block, c2, mode_c1);
279 mode = get_mode_from_ops(in[0], in[1]);
280 in[1] = optimize_node(new_ir_node(NULL, irg, block, op, mode, 2, in));
283 mode = get_mode_from_ops(in[0], in[1]);
284 irn = optimize_node(new_ir_node(NULL, irg, block, op, mode, 2, in));
286 DBG((dbg, LEVEL_5, "Applied: %n .%s. (%n .%s. %n) => %n .%s. (%n .%s. %n)\n",
287 c1, get_irn_opname(n), c2, get_irn_opname(n), t2,
288 t2, get_irn_opname(n), c1, get_irn_opname(n), c2));
290 * In some rare cases it can really happen that we get the same
291 * node back. This might be happen in dead loops, were the Phi
292 * nodes are already gone away. So check this.
301 if (get_irn_op(c1) == op) {
306 if (get_irn_op(t1) == op) {
307 ir_node *l = get_binop_left(t1);
308 ir_node *r = get_binop_right(t1);
316 c_r = get_const_class(r, block);
317 if (c_r != NO_CONSTANT) {
319 * Beware: don't do the following op if a constant was
320 * placed below, else we will fall into a loop.
326 /* convert x .OP. (x .OP. y) => y .OP. (x .OP. x) */
327 ir_mode *mode_res = get_irn_mode(n);
328 ir_mode *mode_c1 = get_irn_mode(c1);
329 ir_graph *irg = get_irn_irg(c1);
330 ir_node *irn, *in[2];
335 in[1] = optimize_node(new_ir_node(NULL, irg, block, op, mode_c1, 2, in));
338 irn = optimize_node(new_ir_node(NULL, irg, block, op, mode_res, 2, in));
340 DBG((dbg, LEVEL_5, "Applied: %n .%s. (%n .%s. %n) => %n .%s. (%n .%s. %n)\n",
341 c1, get_irn_opname(n), l, get_irn_opname(n), r,
342 r, get_irn_opname(n), c1, get_irn_opname(n), c1));
352 } /* reassoc_commutative */
354 #define reassoc_Add reassoc_commutative
355 #define reassoc_And reassoc_commutative
356 #define reassoc_Or reassoc_commutative
357 #define reassoc_Eor reassoc_commutative
360 * Reassociate using commutative law for Mul and distributive law for Mul and Add/Sub:
362 static int reassoc_Mul(ir_node **node)
365 ir_node *add_sub, *c;
368 if (reassoc_commutative(&n))
371 get_comm_Binop_ops(n, &add_sub, &c);
372 op = get_irn_op(add_sub);
374 /* handles rules R11, R12, R13, R14, R15, R16, R17, R18, R19, R20 */
375 if (op == op_Add || op == op_Sub) {
376 ir_mode *mode = get_irn_mode(n);
377 ir_node *irn, *block, *t1, *t2, *in[2];
379 block = get_nodes_block(n);
380 t1 = get_binop_left(add_sub);
381 t2 = get_binop_right(add_sub);
383 /* we can only multiplication rules on integer arithmetic */
384 if (mode_is_int(get_irn_mode(t1)) && mode_is_int(get_irn_mode(t2))) {
385 ir_graph *irg = get_irn_irg(t1);
386 in[0] = new_rd_Mul(NULL, block, c, t1, mode);
387 in[1] = new_rd_Mul(NULL, block, c, t2, mode);
389 irn = optimize_node(new_ir_node(NULL, irg, block, op, mode, 2, in));
391 /* In some cases it might happen that the new irn is equal the old one, for
393 * (x - 1) * y == x * y - y
394 * will be transformed back by simpler optimization
395 * We could switch simple optimizations off, but this only happens iff y
396 * is a loop-invariant expression and that it is not clear if the new form
398 * So, we let the old one.
401 DBG((dbg, LEVEL_5, "Applied: (%n .%s. %n) %n %n => (%n %n %n) .%s. (%n %n %n)\n",
402 t1, get_op_name(op), t2, n, c, t1, n, c, get_op_name(op), t2, n, c));
414 * Reassociate Shl. We transform Shl(x, const) into Mul's if possible.
416 static int reassoc_Shl(ir_node **node)
419 ir_node *c = get_Shl_right(n);
420 ir_node *x, *blk, *irn;
429 mode = get_irn_mode(x);
431 tv = get_mode_one(mode);
432 tv = tarval_shl(tv, get_Const_tarval(c));
434 if (tv == tarval_bad)
437 blk = get_nodes_block(n);
438 irg = get_irn_irg(blk);
439 c = new_r_Const(irg, tv);
440 irn = new_rd_Mul(get_irn_dbg_info(n), blk, x, c, mode);
451 * The walker for the reassociation.
453 static void wq_walker(ir_node *n, void *env)
455 waitq *const wq = (waitq*)env;
457 set_irn_link(n, NULL);
465 * The walker for the reassociation.
467 static void do_reassociation(waitq *const wq)
472 while (! waitq_empty(wq)) {
473 n = (ir_node*)waitq_get(wq);
474 set_irn_link(n, NULL);
478 /* reassociation must run until a fixpoint is reached. */
481 ir_op *op = get_irn_op(n);
482 ir_mode *mode = get_irn_mode(n);
486 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
487 if (mode_is_float(mode) && get_irg_fp_model(get_irn_irg(n)) & fp_strict_algebraic)
490 if (op->ops.reassociate) {
491 res = op->ops.reassociate(&n);
499 for (i = get_irn_arity(n) - 1; i >= 0; --i) {
500 ir_node *pred = get_irn_n(n, i);
502 if (get_irn_link(pred) != wq) {
504 set_irn_link(pred, wq);
509 } /* do_reassociation */
512 * Returns the earliest were a,b are available.
513 * Note that we know that a, b both dominate
514 * the block of the previous operation, so one must dominate the other.
516 * If the earliest block is the start block, return curr_blk instead
518 static ir_node *earliest_block(ir_node *a, ir_node *b, ir_node *curr_blk)
520 ir_node *blk_a = get_nodes_block(a);
521 ir_node *blk_b = get_nodes_block(b);
524 /* if blk_a != blk_b, one must dominate the other */
525 if (block_dominates(blk_a, blk_b))
529 if (res == get_irg_start_block(get_irn_irg(curr_blk)))
532 } /* earliest_block */
535 * Checks whether a node is a Constant expression.
536 * The following trees are constant expressions:
538 * Const, SymConst, Const + SymConst
540 * Handling SymConsts as const might be not a good idea for all
543 static int is_constant_expr(ir_node *irn)
545 switch (get_irn_opcode(irn)) {
551 ir_node *const l = get_Add_left(irn);
552 if (!is_Const(l) && !is_SymConst(l))
554 ir_node *const r = get_Add_right(irn);
555 if (!is_Const(r) && !is_SymConst(r))
563 } /* is_constant_expr */
566 * Apply distributive Law for Mul and Add/Sub
568 static int reverse_rule_distributive(ir_node **node)
571 ir_node *left = get_binop_left(n);
572 ir_node *right = get_binop_right(n);
573 ir_node *x, *blk, *curr_blk;
574 ir_node *a, *b, *irn;
579 op = get_irn_op(left);
580 if (op != get_irn_op(right))
584 x = get_Shl_right(left);
586 if (x == get_Shl_right(right)) {
587 /* (a << x) +/- (b << x) ==> (a +/- b) << x */
588 a = get_Shl_left(left);
589 b = get_Shl_left(right);
592 } else if (op == op_Mul) {
593 x = get_Mul_left(left);
595 if (x == get_Mul_left(right)) {
596 /* (x * a) +/- (x * b) ==> (a +/- b) * x */
597 a = get_Mul_right(left);
598 b = get_Mul_right(right);
600 } else if (x == get_Mul_right(right)) {
601 /* (x * a) +/- (b * x) ==> (a +/- b) * x */
602 a = get_Mul_right(left);
603 b = get_Mul_left(right);
607 x = get_Mul_right(left);
609 if (x == get_Mul_right(right)) {
610 /* (a * x) +/- (b * x) ==> (a +/- b) * x */
611 a = get_Mul_left(left);
612 b = get_Mul_left(right);
614 } else if (x == get_Mul_left(right)) {
615 /* (a * x) +/- (x * b) ==> (a +/- b) * x */
616 a = get_Mul_left(left);
617 b = get_Mul_right(right);
624 curr_blk = get_nodes_block(n);
626 blk = earliest_block(a, b, curr_blk);
628 dbg = get_irn_dbg_info(n);
629 mode = get_irn_mode(n);
632 irn = new_rd_Add(dbg, blk, a, b, mode);
634 irn = new_rd_Sub(dbg, blk, a, b, mode);
636 blk = earliest_block(irn, x, curr_blk);
639 irn = new_rd_Mul(dbg, blk, irn, x, mode);
641 irn = new_rd_Shl(dbg, blk, irn, x, mode);
646 } /* reverse_rule_distributive */
649 * Move Constants towards the root.
651 static int move_consts_up(ir_node **node)
655 ir_node *l, *r, *a, *b, *c, *blk, *irn, *in[2];
656 ir_mode *mode, *ma, *mb;
660 l = get_binop_left(n);
661 r = get_binop_right(n);
663 /* check if one is already a constant expression */
664 if (is_constant_expr(l) || is_constant_expr(r))
667 dbg = get_irn_dbg_info(n);
669 if (get_irn_op(l) == op) {
670 /* (a .op. b) .op. r */
671 a = get_binop_left(l);
672 b = get_binop_right(l);
674 if (is_constant_expr(a)) {
675 /* (C .op. b) .op. r ==> (r .op. b) .op. C */
678 blk = get_nodes_block(l);
679 dbg = dbg == get_irn_dbg_info(l) ? dbg : NULL;
681 } else if (is_constant_expr(b)) {
682 /* (a .op. C) .op. r ==> (a .op. r) .op. C */
685 blk = get_nodes_block(l);
686 dbg = dbg == get_irn_dbg_info(l) ? dbg : NULL;
690 if (get_irn_op(r) == op) {
691 /* l .op. (a .op. b) */
692 a = get_binop_left(r);
693 b = get_binop_right(r);
695 if (is_constant_expr(a)) {
696 /* l .op. (C .op. b) ==> (l .op. b) .op. C */
699 blk = get_nodes_block(r);
700 dbg = dbg == get_irn_dbg_info(r) ? dbg : NULL;
702 } else if (is_constant_expr(b)) {
703 /* l .op. (a .op. C) ==> (a .op. l) .op. C */
706 blk = get_nodes_block(r);
707 dbg = dbg == get_irn_dbg_info(r) ? dbg : NULL;
714 /* In some cases a and b might be both of different integer mode, and c a SymConst.
715 * in that case we could either
716 * 1.) cast into unsigned mode
718 * we implement the second here
720 ma = get_irn_mode(a);
721 mb = get_irn_mode(b);
722 if (ma != mb && mode_is_int(ma) && mode_is_int(mb))
725 /* check if (a .op. b) can be calculated in the same block is the old instruction */
726 if (! block_dominates(get_nodes_block(a), blk))
728 if (! block_dominates(get_nodes_block(b), blk))
734 mode = get_mode_from_ops(a, b);
735 irg = get_irn_irg(blk);
736 in[0] = irn = optimize_node(new_ir_node(dbg, irg, blk, op, mode, 2, in));
738 /* beware: optimize_node might have changed the opcode, check again */
739 if (is_Add(irn) || is_Sub(irn)) {
740 reverse_rule_distributive(&in[0]);
744 mode = get_mode_from_ops(in[0], in[1]);
745 irn = optimize_node(new_ir_node(dbg, irg, blk, op, mode, 2, in));
750 } /* move_consts_up */
753 * Apply the rules in reverse order, removing code that was not collapsed
755 static void reverse_rules(ir_node *node, void *env)
759 ir_graph *irg = get_irn_irg(node);
760 ir_mode *mode = get_irn_mode(node);
763 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
764 if (mode_is_float(mode) && get_irg_fp_model(irg) & fp_strict_algebraic)
768 ir_op *op = get_irn_op(node);
771 if (is_op_commutative(op)) {
772 res = move_consts_up(&node);
774 /* beware: move_consts_up might have changed the opcode, check again */
775 if (is_Add(node) || is_Sub(node)) {
776 res = reverse_rule_distributive(&node);
782 * do the reassociation
784 void optimize_reassociation(ir_graph *irg)
786 assert(get_irg_phase_state(irg) != phase_building);
787 assert(get_irg_pinned(irg) != op_pin_state_floats &&
788 "Reassociation needs pinned graph to work properly");
790 assure_irg_properties(irg,
791 IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE
792 | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
794 waitq *const wq = new_waitq();
796 /* disable some optimizations while reassoc is running to prevent endless loops */
797 set_reassoc_running(1);
799 /* now we have collected enough information, optimize */
800 irg_walk_graph(irg, NULL, wq_walker, wq);
801 do_reassociation(wq);
803 /* reverse those rules that do not result in collapsed constants */
804 irg_walk_graph(irg, NULL, reverse_rules, NULL);
806 set_reassoc_running(0);
810 confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_CONTROL_FLOW);
811 } /* optimize_reassociation */
813 /* create a pass for the reassociation */
814 ir_graph_pass_t *optimize_reassociation_pass(const char *name)
816 return def_graph_pass(name ? name : "reassoc", optimize_reassociation);
817 } /* optimize_reassociation_pass */
819 static void register_node_reassoc_func(ir_op *op, reassociate_func func)
821 op->ops.reassociate = func;
824 void ir_register_reassoc_node_ops(void)
826 register_node_reassoc_func(op_Mul, reassoc_Mul);
827 register_node_reassoc_func(op_Add, reassoc_Add);
828 register_node_reassoc_func(op_Sub, reassoc_Sub);
829 register_node_reassoc_func(op_And, reassoc_And);
830 register_node_reassoc_func(op_Or, reassoc_Or);
831 register_node_reassoc_func(op_Eor, reassoc_Eor);
832 register_node_reassoc_func(op_Shl, reassoc_Shl);
835 /* initialize the reassociation by adding operations to some opcodes */
836 void firm_init_reassociation(void)
838 FIRM_DBG_REGISTER(dbg, "firm.opt.reassoc");
839 } /* firm_init_reassociation */