2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Reassociation
23 * @author Michael Beck
27 #include "iroptimize.h"
30 #include "irgraph_t.h"
34 #include "iropt_dbg.h"
38 #include "reassoc_t.h"
48 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
50 typedef struct walker_t {
51 int changes; /**< set, if a reassociation take place */
53 waitq *wq; /**< a wait queue */
57 NO_CONSTANT = 0, /**< node is not constant */
58 REAL_CONSTANT = 1, /**< node is a Const that is suitable for constant folding */
59 REGION_CONST = 4 /**< node is a constant expression in the current context,
60 use 4 here to simplify implementation of get_comm_Binop_ops() */
64 * returns whether a node is constant ie is a constant or
65 * is loop invariant (called region constant)
67 * @param n the node to be checked for constant
68 * @param block a block that might be in a loop
70 static const_class_t get_const_class(const ir_node *n, const ir_node *block)
75 /* constant nodes which can't be folded are region constants */
76 if (is_irn_constlike(n))
80 * Beware: Bad nodes are always loop-invariant, but
81 * cannot handled in later code, so filter them here.
83 if (! is_Bad(n) && is_loop_invariant(n, block))
87 } /* get_const_class */
90 * returns the operands of a commutative bin-op, if one operand is
91 * a region constant, it is returned as the second one.
93 * Beware: Real constants must be returned with higher priority than
94 * region constants, because they might be folded.
96 static void get_comm_Binop_ops(ir_node *binop, ir_node **a, ir_node **c)
98 ir_node *op_a = get_binop_left(binop);
99 ir_node *op_b = get_binop_right(binop);
100 ir_node *block = get_nodes_block(binop);
101 int class_a = get_const_class(op_a, block);
102 int class_b = get_const_class(op_b, block);
104 assert(is_op_commutative(get_irn_op(binop)));
106 switch (class_a + 2*class_b) {
107 case REAL_CONSTANT + 2*REAL_CONSTANT:
108 /* if both are constants, one might be a
109 * pointer constant like NULL, return the other
111 if (mode_is_reference(get_irn_mode(op_a))) {
119 case REAL_CONSTANT + 2*NO_CONSTANT:
120 case REAL_CONSTANT + 2*REGION_CONST:
121 case REGION_CONST + 2*NO_CONSTANT:
130 } /* get_comm_Binop_ops */
133 * reassociate a Sub: x - c = x + (-c)
135 static int reassoc_Sub(ir_node **in)
138 ir_node *right = get_Sub_right(n);
139 ir_mode *rmode = get_irn_mode(right);
142 /* cannot handle SubIs(P, P) */
143 if (mode_is_reference(rmode))
146 block = get_nodes_block(n);
149 * convert x - c => x + (-c)
151 if (get_const_class(right, block) == REAL_CONSTANT) {
152 ir_node *left = get_Sub_left(n);
157 switch (get_const_class(left, block)) {
159 irn = optimize_in_place(n);
169 /* already constant, nothing to do */
173 mode = get_irn_mode(n);
174 dbi = get_irn_dbg_info(n);
176 /* Beware of SubP(P, Is) */
177 irn = new_rd_Minus(dbi, block, right, rmode);
178 irn = new_rd_Add(dbi, block, left, irn, mode);
180 DBG((dbg, LEVEL_5, "Applied: %n - %n => %n + (-%n)\n",
181 get_Sub_left(n), right, get_Sub_left(n), right));
194 /** Retrieve a mode from the operands. We need this, because
195 * Add and Sub are allowed to operate on (P, Is)
197 static ir_mode *get_mode_from_ops(ir_node *op1, ir_node *op2)
201 m1 = get_irn_mode(op1);
202 if (mode_is_reference(m1))
205 m2 = get_irn_mode(op2);
206 if (mode_is_reference(m2))
212 } /* get_mode_from_ops */
217 * reassociate a commutative Binop
219 * BEWARE: this rule leads to a potential loop, if
220 * two operands are region constants and the third is a
221 * constant, so avoid this situation.
223 static int reassoc_commutative(ir_node **node)
226 ir_op *op = get_irn_op(n);
227 ir_node *block = get_nodes_block(n);
230 get_comm_Binop_ops(n, &t1, &c1);
232 if (get_irn_op(t1) == op) {
234 const_class_t c_c1, c_c2, c_t2;
236 get_comm_Binop_ops(t1, &t2, &c2);
238 /* do not optimize Bad nodes, will fail later */
242 c_c1 = get_const_class(c1, block);
243 c_c2 = get_const_class(c2, block);
244 c_t2 = get_const_class(t2, block);
246 if ( ((c_c1 > NO_CONSTANT) & (c_t2 > NO_CONSTANT)) &&
247 ((((c_c1 ^ c_c2 ^ c_t2) & REGION_CONST) == 0) || ((c_c1 & c_c2 & c_t2) == REGION_CONST)) ) {
248 /* All three are constant and either all are constant expressions
249 * or two of them are:
250 * then applying this rule would lead into a cycle
252 * Note that if t2 is a constant so is c2 hence we save one test.
257 if ((c_c1 != NO_CONSTANT) /* & (c_c2 != NO_CONSTANT) */) {
258 /* handles rules R7, R8, R9, R10:
259 * convert c1 .OP. (c2 .OP. x) => x .OP. (c1 .OP. c2)
261 ir_node *irn, *in[2];
262 ir_mode *mode, *mode_c1 = get_irn_mode(c1), *mode_c2 = get_irn_mode(c2);
263 ir_graph *irg = get_irn_irg(c1);
265 /* It might happen, that c1 and c2 have different modes, for
266 * instance Is and Iu.
269 if (mode_c1 != mode_c2) {
270 if (mode_is_int(mode_c1) && mode_is_int(mode_c2)) {
271 /* get the bigger one */
272 if (get_mode_size_bits(mode_c1) > get_mode_size_bits(mode_c2))
273 c2 = new_r_Conv(block, c2, mode_c1);
274 else if (get_mode_size_bits(mode_c1) < get_mode_size_bits(mode_c2))
275 c1 = new_r_Conv(block, c1, mode_c2);
277 /* Try to cast the real const */
278 if (c_c1 == REAL_CONSTANT)
279 c1 = new_r_Conv(block, c1, mode_c2);
281 c2 = new_r_Conv(block, c2, mode_c1);
289 mode = get_mode_from_ops(in[0], in[1]);
290 in[1] = optimize_node(new_ir_node(NULL, irg, block, op, mode, 2, in));
293 mode = get_mode_from_ops(in[0], in[1]);
294 irn = optimize_node(new_ir_node(NULL, irg, block, op, mode, 2, in));
296 DBG((dbg, LEVEL_5, "Applied: %n .%s. (%n .%s. %n) => %n .%s. (%n .%s. %n)\n",
297 c1, get_irn_opname(n), c2, get_irn_opname(n), t2,
298 t2, get_irn_opname(n), c1, get_irn_opname(n), c2));
300 * In some rare cases it can really happen that we get the same
301 * node back. This might be happen in dead loops, were the Phi
302 * nodes are already gone away. So check this.
311 if (get_irn_op(c1) == op) {
316 if (get_irn_op(t1) == op) {
317 ir_node *l = get_binop_left(t1);
318 ir_node *r = get_binop_right(t1);
326 c_r = get_const_class(r, block);
327 if (c_r != NO_CONSTANT) {
329 * Beware: don't do the following op if a constant was
330 * placed below, else we will fall into a loop.
336 /* convert x .OP. (x .OP. y) => y .OP. (x .OP. x) */
337 ir_mode *mode_res = get_irn_mode(n);
338 ir_mode *mode_c1 = get_irn_mode(c1);
339 ir_graph *irg = get_irn_irg(c1);
340 ir_node *irn, *in[2];
345 in[1] = optimize_node(new_ir_node(NULL, irg, block, op, mode_c1, 2, in));
348 irn = optimize_node(new_ir_node(NULL, irg, block, op, mode_res, 2, in));
350 DBG((dbg, LEVEL_5, "Applied: %n .%s. (%n .%s. %n) => %n .%s. (%n .%s. %n)\n",
351 c1, get_irn_opname(n), l, get_irn_opname(n), r,
352 r, get_irn_opname(n), c1, get_irn_opname(n), c1));
362 } /* reassoc_commutative */
366 static ir_op *commutative_op;
367 static ir_node *commutative_block;
368 static struct obstack commutative_args;
370 static void collect_args(ir_node *node)
372 ir_node *left = get_binop_left(node);
373 ir_node *right = get_binop_right(node);
375 if (get_irn_op(left) == commutative_op
376 && (!get_irn_outs_computed(left) || get_irn_n_outs(left) == 1)) {
379 obstack_ptr_grow(&commutative_args, left);
382 if (get_irn_op(right) == commutative_op
383 && (!get_irn_outs_computed(right) || get_irn_n_outs(right) == 1)) {
386 obstack_ptr_grow(&commutative_args, right);
391 ir_mode *mode = get_irn_mode(node);
392 if (is_Add(node) && mode_is_reference(mode)) {
393 assert(get_irn_mode(left) == mode || get_irn_mode(right) == mode);
395 assert(get_irn_mode(left) == mode);
396 assert(get_irn_mode(right) == mode);
402 static int compare_nodes(const ir_node *node1, const ir_node *node2)
404 const_class_t class1 = get_const_class(node1, commutative_block);
405 const_class_t class2 = get_const_class(node2, commutative_block);
407 if (class1 == class2)
409 // return get_irn_idx(node1) - get_irn_idx(node2);
414 assert(class1 > class2);
418 static int compare_node_ptr(const void *e1, const void *e2)
420 const ir_node *node1 = *((const ir_node *const*) e1);
421 const ir_node *node2 = *((const ir_node *const*) e2);
422 return compare_nodes(node1, node2);
425 static int reassoc_commutative(ir_node **n)
434 commutative_op = get_irn_op(node);
435 commutative_block = get_nodes_block(node);
437 /* collect all nodes with same op type */
440 n_args = obstack_object_size(&commutative_args) / sizeof(ir_node*);
441 args = obstack_finish(&commutative_args);
443 /* shortcut: in most cases there's nothing to do */
444 if (n_args == 2 && compare_nodes(args[0], args[1]) <= 0) {
445 obstack_free(&commutative_args, args);
449 /* sort the arguments */
450 qsort(args, n_args, sizeof(ir_node*), compare_node_ptr);
453 last = args[n_args-1];
454 mode = get_irn_mode(last);
455 for (i = n_args-2; i >= 0; --i) {
459 ir_graph *irg = get_irn_irg(last);
464 /* AddP violates the assumption that all modes in args are equal...
465 * we need some hacks to cope with this */
466 mode_right = get_irn_mode(in[1]);
467 if (mode_is_reference(mode_right)) {
468 assert(is_Add(node) && mode_is_reference(get_irn_mode(node)));
469 mode = get_irn_mode(in[1]);
471 if (mode_right != mode) {
472 assert(is_Add(node) && mode_is_reference(get_irn_mode(node)));
473 in[1] = new_r_Conv(irg, commutative_block,in[1], mode);
476 /* TODO: produce useful debug info! */
477 new_node = new_ir_node(NULL, irg, commutative_block,
478 commutative_op, mode, 2, in);
479 new_node = optimize_node(new_node);
483 /* CSE often returns the old node again, only exchange if needed */
485 exchange(node, last);
494 #define reassoc_Add reassoc_commutative
495 #define reassoc_And reassoc_commutative
496 #define reassoc_Or reassoc_commutative
497 #define reassoc_Eor reassoc_commutative
500 * Reassociate using commutative law for Mul and distributive law for Mul and Add/Sub:
502 static int reassoc_Mul(ir_node **node)
505 ir_node *add_sub, *c;
508 if (reassoc_commutative(&n))
511 get_comm_Binop_ops(n, &add_sub, &c);
512 op = get_irn_op(add_sub);
514 /* handles rules R11, R12, R13, R14, R15, R16, R17, R18, R19, R20 */
515 if (op == op_Add || op == op_Sub) {
516 ir_mode *mode = get_irn_mode(n);
517 ir_node *irn, *block, *t1, *t2, *in[2];
519 block = get_nodes_block(n);
520 t1 = get_binop_left(add_sub);
521 t2 = get_binop_right(add_sub);
523 /* we can only multiplication rules on integer arithmetic */
524 if (mode_is_int(get_irn_mode(t1)) && mode_is_int(get_irn_mode(t2))) {
525 ir_graph *irg = get_irn_irg(t1);
526 in[0] = new_rd_Mul(NULL, block, c, t1, mode);
527 in[1] = new_rd_Mul(NULL, block, c, t2, mode);
529 irn = optimize_node(new_ir_node(NULL, irg, block, op, mode, 2, in));
531 /* In some cases it might happen that the new irn is equal the old one, for
533 * (x - 1) * y == x * y - y
534 * will be transformed back by simpler optimization
535 * We could switch simple optimizations off, but this only happens iff y
536 * is a loop-invariant expression and that it is not clear if the new form
538 * So, we let the old one.
541 DBG((dbg, LEVEL_5, "Applied: (%n .%s. %n) %n %n => (%n %n %n) .%s. (%n %n %n)\n",
542 t1, get_op_name(op), t2, n, c, t1, n, c, get_op_name(op), t2, n, c));
554 * Reassociate Shl. We transform Shl(x, const) into Mul's if possible.
556 static int reassoc_Shl(ir_node **node)
559 ir_node *c = get_Shl_right(n);
560 ir_node *x, *blk, *irn;
569 mode = get_irn_mode(x);
571 tv = get_mode_one(mode);
572 tv = tarval_shl(tv, get_Const_tarval(c));
574 if (tv == tarval_bad)
577 blk = get_nodes_block(n);
578 irg = get_irn_irg(blk);
579 c = new_r_Const(irg, tv);
580 irn = new_rd_Mul(get_irn_dbg_info(n), blk, x, c, mode);
591 * The walker for the reassociation.
593 static void wq_walker(ir_node *n, void *env)
595 walker_t *wenv = (walker_t*)env;
597 set_irn_link(n, NULL);
599 waitq_put(wenv->wq, n);
600 set_irn_link(n, wenv->wq);
605 * The walker for the reassociation.
607 static void do_reassociation(walker_t *wenv)
612 while (! waitq_empty(wenv->wq)) {
613 n = (ir_node*)waitq_get(wenv->wq);
614 set_irn_link(n, NULL);
618 /* reassociation must run until a fixpoint is reached. */
621 ir_op *op = get_irn_op(n);
622 ir_mode *mode = get_irn_mode(n);
626 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
627 if (mode_is_float(mode) && get_irg_fp_model(wenv->irg) & fp_strict_algebraic)
630 if (op->ops.reassociate) {
631 res = op->ops.reassociate(&n);
638 wenv->changes |= changed;
641 for (i = get_irn_arity(n) - 1; i >= 0; --i) {
642 ir_node *pred = get_irn_n(n, i);
644 if (get_irn_link(pred) != wenv->wq) {
645 waitq_put(wenv->wq, pred);
646 set_irn_link(pred, wenv->wq);
651 } /* do_reassociation */
654 * Returns the earliest were a,b are available.
655 * Note that we know that a, b both dominate
656 * the block of the previous operation, so one must dominate the other.
658 * If the earliest block is the start block, return curr_blk instead
660 static ir_node *earliest_block(ir_node *a, ir_node *b, ir_node *curr_blk)
662 ir_node *blk_a = get_nodes_block(a);
663 ir_node *blk_b = get_nodes_block(b);
666 /* if blk_a != blk_b, one must dominate the other */
667 if (block_dominates(blk_a, blk_b))
671 if (res == get_irg_start_block(get_irn_irg(curr_blk)))
674 } /* earliest_block */
677 * Checks whether a node is a Constant expression.
678 * The following trees are constant expressions:
680 * Const, SymConst, Const + SymConst
682 * Handling SymConsts as const might be not a good idea for all
685 static int is_constant_expr(ir_node *irn)
689 switch (get_irn_opcode(irn)) {
694 op = get_irn_op(get_Add_left(irn));
695 if (op != op_Const && op != op_SymConst)
697 op = get_irn_op(get_Add_right(irn));
698 if (op != op_Const && op != op_SymConst)
704 } /* is_constant_expr */
707 * Apply distributive Law for Mul and Add/Sub
709 static int reverse_rule_distributive(ir_node **node)
712 ir_node *left = get_binop_left(n);
713 ir_node *right = get_binop_right(n);
714 ir_node *x, *blk, *curr_blk;
715 ir_node *a, *b, *irn;
720 op = get_irn_op(left);
721 if (op != get_irn_op(right))
725 x = get_Shl_right(left);
727 if (x == get_Shl_right(right)) {
728 /* (a << x) +/- (b << x) ==> (a +/- b) << x */
729 a = get_Shl_left(left);
730 b = get_Shl_left(right);
733 } else if (op == op_Mul) {
734 x = get_Mul_left(left);
736 if (x == get_Mul_left(right)) {
737 /* (x * a) +/- (x * b) ==> (a +/- b) * x */
738 a = get_Mul_right(left);
739 b = get_Mul_right(right);
741 } else if (x == get_Mul_right(right)) {
742 /* (x * a) +/- (b * x) ==> (a +/- b) * x */
743 a = get_Mul_right(left);
744 b = get_Mul_left(right);
748 x = get_Mul_right(left);
750 if (x == get_Mul_right(right)) {
751 /* (a * x) +/- (b * x) ==> (a +/- b) * x */
752 a = get_Mul_left(left);
753 b = get_Mul_left(right);
755 } else if (x == get_Mul_left(right)) {
756 /* (a * x) +/- (x * b) ==> (a +/- b) * x */
757 a = get_Mul_left(left);
758 b = get_Mul_right(right);
765 curr_blk = get_nodes_block(n);
767 blk = earliest_block(a, b, curr_blk);
769 dbg = get_irn_dbg_info(n);
770 mode = get_irn_mode(n);
773 irn = new_rd_Add(dbg, blk, a, b, mode);
775 irn = new_rd_Sub(dbg, blk, a, b, mode);
777 blk = earliest_block(irn, x, curr_blk);
780 irn = new_rd_Mul(dbg, blk, irn, x, mode);
782 irn = new_rd_Shl(dbg, blk, irn, x, mode);
787 } /* reverse_rule_distributive */
790 * Move Constants towards the root.
792 static int move_consts_up(ir_node **node)
796 ir_node *l, *r, *a, *b, *c, *blk, *irn, *in[2];
797 ir_mode *mode, *ma, *mb;
801 l = get_binop_left(n);
802 r = get_binop_right(n);
804 /* check if one is already a constant expression */
805 if (is_constant_expr(l) || is_constant_expr(r))
808 dbg = get_irn_dbg_info(n);
810 if (get_irn_op(l) == op) {
811 /* (a .op. b) .op. r */
812 a = get_binop_left(l);
813 b = get_binop_right(l);
815 if (is_constant_expr(a)) {
816 /* (C .op. b) .op. r ==> (r .op. b) .op. C */
819 blk = get_nodes_block(l);
820 dbg = dbg == get_irn_dbg_info(l) ? dbg : NULL;
822 } else if (is_constant_expr(b)) {
823 /* (a .op. C) .op. r ==> (a .op. r) .op. C */
826 blk = get_nodes_block(l);
827 dbg = dbg == get_irn_dbg_info(l) ? dbg : NULL;
831 if (get_irn_op(r) == op) {
832 /* l .op. (a .op. b) */
833 a = get_binop_left(r);
834 b = get_binop_right(r);
836 if (is_constant_expr(a)) {
837 /* l .op. (C .op. b) ==> (l .op. b) .op. C */
840 blk = get_nodes_block(r);
841 dbg = dbg == get_irn_dbg_info(r) ? dbg : NULL;
843 } else if (is_constant_expr(b)) {
844 /* l .op. (a .op. C) ==> (a .op. l) .op. C */
847 blk = get_nodes_block(r);
848 dbg = dbg == get_irn_dbg_info(r) ? dbg : NULL;
855 /* In some cases a and b might be both of different integer mode, and c a SymConst.
856 * in that case we could either
857 * 1.) cast into unsigned mode
859 * we implement the second here
861 ma = get_irn_mode(a);
862 mb = get_irn_mode(b);
863 if (ma != mb && mode_is_int(ma) && mode_is_int(mb))
866 /* check if (a .op. b) can be calculated in the same block is the old instruction */
867 if (! block_dominates(get_nodes_block(a), blk))
869 if (! block_dominates(get_nodes_block(b), blk))
875 mode = get_mode_from_ops(a, b);
876 irg = get_irn_irg(blk);
877 in[0] = irn = optimize_node(new_ir_node(dbg, irg, blk, op, mode, 2, in));
879 /* beware: optimize_node might have changed the opcode, check again */
880 if (is_Add(irn) || is_Sub(irn)) {
881 reverse_rule_distributive(&in[0]);
885 mode = get_mode_from_ops(in[0], in[1]);
886 irn = optimize_node(new_ir_node(dbg, irg, blk, op, mode, 2, in));
891 } /* move_consts_up */
894 * Apply the rules in reverse order, removing code that was not collapsed
896 static void reverse_rules(ir_node *node, void *env)
898 walker_t *wenv = (walker_t*)env;
899 ir_graph *irg = get_irn_irg(node);
900 ir_mode *mode = get_irn_mode(node);
903 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
904 if (mode_is_float(mode) && get_irg_fp_model(irg) & fp_strict_algebraic)
908 ir_op *op = get_irn_op(node);
911 if (is_op_commutative(op)) {
912 wenv->changes |= res = move_consts_up(&node);
914 /* beware: move_consts_up might have changed the opcode, check again */
915 if (is_Add(node) || is_Sub(node)) {
916 wenv->changes |= res = reverse_rule_distributive(&node);
922 * do the reassociation
924 void optimize_reassociation(ir_graph *irg)
928 assert(get_irg_phase_state(irg) != phase_building);
929 assert(get_irg_pinned(irg) != op_pin_state_floats &&
930 "Reassociation needs pinned graph to work properly");
932 /* we use dominance to detect dead blocks */
936 assure_irg_outs(irg);
937 obstack_init(&commutative_args);
941 * Calculate loop info, so we could identify loop-invariant
942 * code and treat it like a constant.
944 assure_loopinfo(irg);
948 env.wq = new_waitq();
950 /* disable some optimizations while reassoc is running to prevent endless loops */
951 set_reassoc_running(1);
953 /* now we have collected enough information, optimize */
954 irg_walk_graph(irg, NULL, wq_walker, &env);
955 do_reassociation(&env);
957 /* reverse those rules that do not result in collapsed constants */
958 irg_walk_graph(irg, NULL, reverse_rules, &env);
960 set_reassoc_running(0);
963 obstack_free(&commutative_args, NULL);
967 } /* optimize_reassociation */
969 /* create a pass for the reassociation */
970 ir_graph_pass_t *optimize_reassociation_pass(const char *name)
972 return def_graph_pass(name ? name : "reassoc", optimize_reassociation);
973 } /* optimize_reassociation_pass */
975 /* Sets the default reassociation operation for an ir_op_ops. */
976 ir_op_ops *firm_set_default_reassoc(unsigned code, ir_op_ops *ops)
978 #define CASE(a) case iro_##a: ops->reassociate = reassoc_##a; break
994 } /* firm_set_default_reassoc */
996 /* initialize the reassociation by adding operations to some opcodes */
997 void firm_init_reassociation(void)
999 FIRM_DBG_REGISTER(dbg, "firm.opt.reassoc");
1000 } /* firm_init_reassociation */