2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Reassociation
23 * @author Michael Beck
27 #include "iroptimize.h"
30 #include "irgraph_t.h"
34 #include "iropt_dbg.h"
38 #include "reassoc_t.h"
48 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
51 NO_CONSTANT = 0, /**< node is not constant */
52 REAL_CONSTANT = 1, /**< node is a Const that is suitable for constant folding */
53 REGION_CONST = 4 /**< node is a constant expression in the current context,
54 use 4 here to simplify implementation of get_comm_Binop_ops() */
58 * returns whether a node is constant ie is a constant or
59 * is loop invariant (called region constant)
61 * @param n the node to be checked for constant
62 * @param block a block that might be in a loop
64 static const_class_t get_const_class(const ir_node *n, const ir_node *block)
69 /* constant nodes which can't be folded are region constants */
70 if (is_irn_constlike(n))
74 * Beware: Bad nodes are always loop-invariant, but
75 * cannot handled in later code, so filter them here.
77 if (! is_Bad(n) && is_loop_invariant(n, block))
81 } /* get_const_class */
84 * returns the operands of a commutative bin-op, if one operand is
85 * a region constant, it is returned as the second one.
87 * Beware: Real constants must be returned with higher priority than
88 * region constants, because they might be folded.
90 static void get_comm_Binop_ops(ir_node *binop, ir_node **a, ir_node **c)
92 ir_node *op_a = get_binop_left(binop);
93 ir_node *op_b = get_binop_right(binop);
94 ir_node *block = get_nodes_block(binop);
95 int class_a = get_const_class(op_a, block);
96 int class_b = get_const_class(op_b, block);
98 assert(is_op_commutative(get_irn_op(binop)));
100 switch (class_a + 2*class_b) {
101 case REAL_CONSTANT + 2*REAL_CONSTANT:
102 /* if both are constants, one might be a
103 * pointer constant like NULL, return the other
105 if (mode_is_reference(get_irn_mode(op_a))) {
113 case REAL_CONSTANT + 2*NO_CONSTANT:
114 case REAL_CONSTANT + 2*REGION_CONST:
115 case REGION_CONST + 2*NO_CONSTANT:
124 } /* get_comm_Binop_ops */
127 * reassociate a Sub: x - c = x + (-c)
129 static int reassoc_Sub(ir_node **in)
132 ir_node *right = get_Sub_right(n);
133 ir_mode *rmode = get_irn_mode(right);
136 /* cannot handle SubIs(P, P) */
137 if (mode_is_reference(rmode))
140 block = get_nodes_block(n);
143 * convert x - c => x + (-c)
145 if (get_const_class(right, block) == REAL_CONSTANT) {
146 ir_node *left = get_Sub_left(n);
151 switch (get_const_class(left, block)) {
153 irn = optimize_in_place(n);
163 /* already constant, nothing to do */
167 mode = get_irn_mode(n);
168 dbi = get_irn_dbg_info(n);
170 /* Beware of SubP(P, Is) */
171 irn = new_rd_Minus(dbi, block, right, rmode);
172 irn = new_rd_Add(dbi, block, left, irn, mode);
174 DBG((dbg, LEVEL_5, "Applied: %n - %n => %n + (-%n)\n",
175 get_Sub_left(n), right, get_Sub_left(n), right));
188 /** Retrieve a mode from the operands. We need this, because
189 * Add and Sub are allowed to operate on (P, Is)
191 static ir_mode *get_mode_from_ops(ir_node *op1, ir_node *op2)
195 m1 = get_irn_mode(op1);
196 if (mode_is_reference(m1))
199 m2 = get_irn_mode(op2);
200 if (mode_is_reference(m2))
206 } /* get_mode_from_ops */
211 * reassociate a commutative Binop
213 * BEWARE: this rule leads to a potential loop, if
214 * two operands are region constants and the third is a
215 * constant, so avoid this situation.
217 static int reassoc_commutative(ir_node **node)
220 ir_op *op = get_irn_op(n);
221 ir_node *block = get_nodes_block(n);
224 get_comm_Binop_ops(n, &t1, &c1);
226 if (get_irn_op(t1) == op) {
228 const_class_t c_c1, c_c2, c_t2;
230 get_comm_Binop_ops(t1, &t2, &c2);
232 /* do not optimize Bad nodes, will fail later */
236 c_c1 = get_const_class(c1, block);
237 c_c2 = get_const_class(c2, block);
238 c_t2 = get_const_class(t2, block);
240 if ( ((c_c1 > NO_CONSTANT) & (c_t2 > NO_CONSTANT)) &&
241 ((((c_c1 ^ c_c2 ^ c_t2) & REGION_CONST) == 0) || ((c_c1 & c_c2 & c_t2) == REGION_CONST)) ) {
242 /* All three are constant and either all are constant expressions
243 * or two of them are:
244 * then applying this rule would lead into a cycle
246 * Note that if t2 is a constant so is c2 hence we save one test.
251 if ((c_c1 != NO_CONSTANT) /* & (c_c2 != NO_CONSTANT) */) {
252 /* handles rules R7, R8, R9, R10:
253 * convert c1 .OP. (c2 .OP. x) => x .OP. (c1 .OP. c2)
255 ir_node *irn, *in[2];
256 ir_mode *mode, *mode_c1 = get_irn_mode(c1), *mode_c2 = get_irn_mode(c2);
257 ir_graph *irg = get_irn_irg(c1);
259 /* It might happen, that c1 and c2 have different modes, for
260 * instance Is and Iu.
263 if (mode_c1 != mode_c2) {
264 if (mode_is_int(mode_c1) && mode_is_int(mode_c2)) {
265 /* get the bigger one */
266 if (get_mode_size_bits(mode_c1) > get_mode_size_bits(mode_c2))
267 c2 = new_r_Conv(block, c2, mode_c1);
268 else if (get_mode_size_bits(mode_c1) < get_mode_size_bits(mode_c2))
269 c1 = new_r_Conv(block, c1, mode_c2);
271 /* Try to cast the real const */
272 if (c_c1 == REAL_CONSTANT)
273 c1 = new_r_Conv(block, c1, mode_c2);
275 c2 = new_r_Conv(block, c2, mode_c1);
283 mode = get_mode_from_ops(in[0], in[1]);
284 in[1] = optimize_node(new_ir_node(NULL, irg, block, op, mode, 2, in));
287 mode = get_mode_from_ops(in[0], in[1]);
288 irn = optimize_node(new_ir_node(NULL, irg, block, op, mode, 2, in));
290 DBG((dbg, LEVEL_5, "Applied: %n .%s. (%n .%s. %n) => %n .%s. (%n .%s. %n)\n",
291 c1, get_irn_opname(n), c2, get_irn_opname(n), t2,
292 t2, get_irn_opname(n), c1, get_irn_opname(n), c2));
294 * In some rare cases it can really happen that we get the same
295 * node back. This might be happen in dead loops, were the Phi
296 * nodes are already gone away. So check this.
305 if (get_irn_op(c1) == op) {
310 if (get_irn_op(t1) == op) {
311 ir_node *l = get_binop_left(t1);
312 ir_node *r = get_binop_right(t1);
320 c_r = get_const_class(r, block);
321 if (c_r != NO_CONSTANT) {
323 * Beware: don't do the following op if a constant was
324 * placed below, else we will fall into a loop.
330 /* convert x .OP. (x .OP. y) => y .OP. (x .OP. x) */
331 ir_mode *mode_res = get_irn_mode(n);
332 ir_mode *mode_c1 = get_irn_mode(c1);
333 ir_graph *irg = get_irn_irg(c1);
334 ir_node *irn, *in[2];
339 in[1] = optimize_node(new_ir_node(NULL, irg, block, op, mode_c1, 2, in));
342 irn = optimize_node(new_ir_node(NULL, irg, block, op, mode_res, 2, in));
344 DBG((dbg, LEVEL_5, "Applied: %n .%s. (%n .%s. %n) => %n .%s. (%n .%s. %n)\n",
345 c1, get_irn_opname(n), l, get_irn_opname(n), r,
346 r, get_irn_opname(n), c1, get_irn_opname(n), c1));
356 } /* reassoc_commutative */
360 static ir_op *commutative_op;
361 static ir_node *commutative_block;
362 static struct obstack commutative_args;
364 static void collect_args(ir_node *node)
366 ir_node *left = get_binop_left(node);
367 ir_node *right = get_binop_right(node);
369 if (get_irn_op(left) == commutative_op
370 && (!get_irn_outs_computed(left) || get_irn_n_outs(left) == 1)) {
373 obstack_ptr_grow(&commutative_args, left);
376 if (get_irn_op(right) == commutative_op
377 && (!get_irn_outs_computed(right) || get_irn_n_outs(right) == 1)) {
380 obstack_ptr_grow(&commutative_args, right);
385 ir_mode *mode = get_irn_mode(node);
386 if (is_Add(node) && mode_is_reference(mode)) {
387 assert(get_irn_mode(left) == mode || get_irn_mode(right) == mode);
389 assert(get_irn_mode(left) == mode);
390 assert(get_irn_mode(right) == mode);
396 static int compare_nodes(const ir_node *node1, const ir_node *node2)
398 const_class_t class1 = get_const_class(node1, commutative_block);
399 const_class_t class2 = get_const_class(node2, commutative_block);
401 if (class1 == class2)
403 // return get_irn_idx(node1) - get_irn_idx(node2);
408 assert(class1 > class2);
412 static int compare_node_ptr(const void *e1, const void *e2)
414 const ir_node *node1 = *((const ir_node *const*) e1);
415 const ir_node *node2 = *((const ir_node *const*) e2);
416 return compare_nodes(node1, node2);
419 static int reassoc_commutative(ir_node **n)
428 commutative_op = get_irn_op(node);
429 commutative_block = get_nodes_block(node);
431 /* collect all nodes with same op type */
434 n_args = obstack_object_size(&commutative_args) / sizeof(ir_node*);
435 args = obstack_finish(&commutative_args);
437 /* shortcut: in most cases there's nothing to do */
438 if (n_args == 2 && compare_nodes(args[0], args[1]) <= 0) {
439 obstack_free(&commutative_args, args);
443 /* sort the arguments */
444 qsort(args, n_args, sizeof(ir_node*), compare_node_ptr);
447 last = args[n_args-1];
448 mode = get_irn_mode(last);
449 for (i = n_args-2; i >= 0; --i) {
453 ir_graph *irg = get_irn_irg(last);
458 /* AddP violates the assumption that all modes in args are equal...
459 * we need some hacks to cope with this */
460 mode_right = get_irn_mode(in[1]);
461 if (mode_is_reference(mode_right)) {
462 assert(is_Add(node) && mode_is_reference(get_irn_mode(node)));
463 mode = get_irn_mode(in[1]);
465 if (mode_right != mode) {
466 assert(is_Add(node) && mode_is_reference(get_irn_mode(node)));
467 in[1] = new_r_Conv(irg, commutative_block,in[1], mode);
470 /* TODO: produce useful debug info! */
471 new_node = new_ir_node(NULL, irg, commutative_block,
472 commutative_op, mode, 2, in);
473 new_node = optimize_node(new_node);
477 /* CSE often returns the old node again, only exchange if needed */
479 exchange(node, last);
488 #define reassoc_Add reassoc_commutative
489 #define reassoc_And reassoc_commutative
490 #define reassoc_Or reassoc_commutative
491 #define reassoc_Eor reassoc_commutative
494 * Reassociate using commutative law for Mul and distributive law for Mul and Add/Sub:
496 static int reassoc_Mul(ir_node **node)
499 ir_node *add_sub, *c;
502 if (reassoc_commutative(&n))
505 get_comm_Binop_ops(n, &add_sub, &c);
506 op = get_irn_op(add_sub);
508 /* handles rules R11, R12, R13, R14, R15, R16, R17, R18, R19, R20 */
509 if (op == op_Add || op == op_Sub) {
510 ir_mode *mode = get_irn_mode(n);
511 ir_node *irn, *block, *t1, *t2, *in[2];
513 block = get_nodes_block(n);
514 t1 = get_binop_left(add_sub);
515 t2 = get_binop_right(add_sub);
517 /* we can only multiplication rules on integer arithmetic */
518 if (mode_is_int(get_irn_mode(t1)) && mode_is_int(get_irn_mode(t2))) {
519 ir_graph *irg = get_irn_irg(t1);
520 in[0] = new_rd_Mul(NULL, block, c, t1, mode);
521 in[1] = new_rd_Mul(NULL, block, c, t2, mode);
523 irn = optimize_node(new_ir_node(NULL, irg, block, op, mode, 2, in));
525 /* In some cases it might happen that the new irn is equal the old one, for
527 * (x - 1) * y == x * y - y
528 * will be transformed back by simpler optimization
529 * We could switch simple optimizations off, but this only happens iff y
530 * is a loop-invariant expression and that it is not clear if the new form
532 * So, we let the old one.
535 DBG((dbg, LEVEL_5, "Applied: (%n .%s. %n) %n %n => (%n %n %n) .%s. (%n %n %n)\n",
536 t1, get_op_name(op), t2, n, c, t1, n, c, get_op_name(op), t2, n, c));
548 * Reassociate Shl. We transform Shl(x, const) into Mul's if possible.
550 static int reassoc_Shl(ir_node **node)
553 ir_node *c = get_Shl_right(n);
554 ir_node *x, *blk, *irn;
563 mode = get_irn_mode(x);
565 tv = get_mode_one(mode);
566 tv = tarval_shl(tv, get_Const_tarval(c));
568 if (tv == tarval_bad)
571 blk = get_nodes_block(n);
572 irg = get_irn_irg(blk);
573 c = new_r_Const(irg, tv);
574 irn = new_rd_Mul(get_irn_dbg_info(n), blk, x, c, mode);
585 * The walker for the reassociation.
587 static void wq_walker(ir_node *n, void *env)
589 waitq *const wq = (waitq*)env;
591 set_irn_link(n, NULL);
599 * The walker for the reassociation.
601 static void do_reassociation(waitq *const wq)
606 while (! waitq_empty(wq)) {
607 n = (ir_node*)waitq_get(wq);
608 set_irn_link(n, NULL);
612 /* reassociation must run until a fixpoint is reached. */
615 ir_op *op = get_irn_op(n);
616 ir_mode *mode = get_irn_mode(n);
620 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
621 if (mode_is_float(mode) && get_irg_fp_model(get_irn_irg(n)) & fp_strict_algebraic)
624 if (op->ops.reassociate) {
625 res = op->ops.reassociate(&n);
633 for (i = get_irn_arity(n) - 1; i >= 0; --i) {
634 ir_node *pred = get_irn_n(n, i);
636 if (get_irn_link(pred) != wq) {
638 set_irn_link(pred, wq);
643 } /* do_reassociation */
646 * Returns the earliest were a,b are available.
647 * Note that we know that a, b both dominate
648 * the block of the previous operation, so one must dominate the other.
650 * If the earliest block is the start block, return curr_blk instead
652 static ir_node *earliest_block(ir_node *a, ir_node *b, ir_node *curr_blk)
654 ir_node *blk_a = get_nodes_block(a);
655 ir_node *blk_b = get_nodes_block(b);
658 /* if blk_a != blk_b, one must dominate the other */
659 if (block_dominates(blk_a, blk_b))
663 if (res == get_irg_start_block(get_irn_irg(curr_blk)))
666 } /* earliest_block */
669 * Checks whether a node is a Constant expression.
670 * The following trees are constant expressions:
672 * Const, SymConst, Const + SymConst
674 * Handling SymConsts as const might be not a good idea for all
677 static int is_constant_expr(ir_node *irn)
679 switch (get_irn_opcode(irn)) {
685 ir_node *const l = get_Add_left(irn);
686 if (!is_Const(l) && !is_SymConst(l))
688 ir_node *const r = get_Add_right(irn);
689 if (!is_Const(r) && !is_SymConst(r))
697 } /* is_constant_expr */
700 * Apply distributive Law for Mul and Add/Sub
702 static int reverse_rule_distributive(ir_node **node)
705 ir_node *left = get_binop_left(n);
706 ir_node *right = get_binop_right(n);
707 ir_node *x, *blk, *curr_blk;
708 ir_node *a, *b, *irn;
713 op = get_irn_op(left);
714 if (op != get_irn_op(right))
718 x = get_Shl_right(left);
720 if (x == get_Shl_right(right)) {
721 /* (a << x) +/- (b << x) ==> (a +/- b) << x */
722 a = get_Shl_left(left);
723 b = get_Shl_left(right);
726 } else if (op == op_Mul) {
727 x = get_Mul_left(left);
729 if (x == get_Mul_left(right)) {
730 /* (x * a) +/- (x * b) ==> (a +/- b) * x */
731 a = get_Mul_right(left);
732 b = get_Mul_right(right);
734 } else if (x == get_Mul_right(right)) {
735 /* (x * a) +/- (b * x) ==> (a +/- b) * x */
736 a = get_Mul_right(left);
737 b = get_Mul_left(right);
741 x = get_Mul_right(left);
743 if (x == get_Mul_right(right)) {
744 /* (a * x) +/- (b * x) ==> (a +/- b) * x */
745 a = get_Mul_left(left);
746 b = get_Mul_left(right);
748 } else if (x == get_Mul_left(right)) {
749 /* (a * x) +/- (x * b) ==> (a +/- b) * x */
750 a = get_Mul_left(left);
751 b = get_Mul_right(right);
758 curr_blk = get_nodes_block(n);
760 blk = earliest_block(a, b, curr_blk);
762 dbg = get_irn_dbg_info(n);
763 mode = get_irn_mode(n);
766 irn = new_rd_Add(dbg, blk, a, b, mode);
768 irn = new_rd_Sub(dbg, blk, a, b, mode);
770 blk = earliest_block(irn, x, curr_blk);
773 irn = new_rd_Mul(dbg, blk, irn, x, mode);
775 irn = new_rd_Shl(dbg, blk, irn, x, mode);
780 } /* reverse_rule_distributive */
783 * Move Constants towards the root.
785 static int move_consts_up(ir_node **node)
789 ir_node *l, *r, *a, *b, *c, *blk, *irn, *in[2];
790 ir_mode *mode, *ma, *mb;
794 l = get_binop_left(n);
795 r = get_binop_right(n);
797 /* check if one is already a constant expression */
798 if (is_constant_expr(l) || is_constant_expr(r))
801 dbg = get_irn_dbg_info(n);
803 if (get_irn_op(l) == op) {
804 /* (a .op. b) .op. r */
805 a = get_binop_left(l);
806 b = get_binop_right(l);
808 if (is_constant_expr(a)) {
809 /* (C .op. b) .op. r ==> (r .op. b) .op. C */
812 blk = get_nodes_block(l);
813 dbg = dbg == get_irn_dbg_info(l) ? dbg : NULL;
815 } else if (is_constant_expr(b)) {
816 /* (a .op. C) .op. r ==> (a .op. r) .op. C */
819 blk = get_nodes_block(l);
820 dbg = dbg == get_irn_dbg_info(l) ? dbg : NULL;
824 if (get_irn_op(r) == op) {
825 /* l .op. (a .op. b) */
826 a = get_binop_left(r);
827 b = get_binop_right(r);
829 if (is_constant_expr(a)) {
830 /* l .op. (C .op. b) ==> (l .op. b) .op. C */
833 blk = get_nodes_block(r);
834 dbg = dbg == get_irn_dbg_info(r) ? dbg : NULL;
836 } else if (is_constant_expr(b)) {
837 /* l .op. (a .op. C) ==> (a .op. l) .op. C */
840 blk = get_nodes_block(r);
841 dbg = dbg == get_irn_dbg_info(r) ? dbg : NULL;
848 /* In some cases a and b might be both of different integer mode, and c a SymConst.
849 * in that case we could either
850 * 1.) cast into unsigned mode
852 * we implement the second here
854 ma = get_irn_mode(a);
855 mb = get_irn_mode(b);
856 if (ma != mb && mode_is_int(ma) && mode_is_int(mb))
859 /* check if (a .op. b) can be calculated in the same block is the old instruction */
860 if (! block_dominates(get_nodes_block(a), blk))
862 if (! block_dominates(get_nodes_block(b), blk))
868 mode = get_mode_from_ops(a, b);
869 irg = get_irn_irg(blk);
870 in[0] = irn = optimize_node(new_ir_node(dbg, irg, blk, op, mode, 2, in));
872 /* beware: optimize_node might have changed the opcode, check again */
873 if (is_Add(irn) || is_Sub(irn)) {
874 reverse_rule_distributive(&in[0]);
878 mode = get_mode_from_ops(in[0], in[1]);
879 irn = optimize_node(new_ir_node(dbg, irg, blk, op, mode, 2, in));
884 } /* move_consts_up */
887 * Apply the rules in reverse order, removing code that was not collapsed
889 static void reverse_rules(ir_node *node, void *env)
893 ir_graph *irg = get_irn_irg(node);
894 ir_mode *mode = get_irn_mode(node);
897 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
898 if (mode_is_float(mode) && get_irg_fp_model(irg) & fp_strict_algebraic)
902 ir_op *op = get_irn_op(node);
905 if (is_op_commutative(op)) {
906 res = move_consts_up(&node);
908 /* beware: move_consts_up might have changed the opcode, check again */
909 if (is_Add(node) || is_Sub(node)) {
910 res = reverse_rule_distributive(&node);
916 * do the reassociation
918 void optimize_reassociation(ir_graph *irg)
920 assert(get_irg_phase_state(irg) != phase_building);
921 assert(get_irg_pinned(irg) != op_pin_state_floats &&
922 "Reassociation needs pinned graph to work properly");
924 assure_irg_properties(irg,
925 IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE
926 | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
929 assire_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS);
930 obstack_init(&commutative_args);
933 waitq *const wq = new_waitq();
935 /* disable some optimizations while reassoc is running to prevent endless loops */
936 set_reassoc_running(1);
938 /* now we have collected enough information, optimize */
939 irg_walk_graph(irg, NULL, wq_walker, wq);
940 do_reassociation(wq);
942 /* reverse those rules that do not result in collapsed constants */
943 irg_walk_graph(irg, NULL, reverse_rules, NULL);
945 set_reassoc_running(0);
948 obstack_free(&commutative_args, NULL);
953 confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_CONTROL_FLOW);
954 } /* optimize_reassociation */
956 /* create a pass for the reassociation */
957 ir_graph_pass_t *optimize_reassociation_pass(const char *name)
959 return def_graph_pass(name ? name : "reassoc", optimize_reassociation);
960 } /* optimize_reassociation_pass */
962 static void register_node_reassoc_func(ir_op *op, reassociate_func func)
964 op->ops.reassociate = func;
967 void ir_register_reassoc_node_ops(void)
969 register_node_reassoc_func(op_Mul, reassoc_Mul);
970 register_node_reassoc_func(op_Add, reassoc_Add);
971 register_node_reassoc_func(op_Sub, reassoc_Sub);
972 register_node_reassoc_func(op_And, reassoc_And);
973 register_node_reassoc_func(op_Or, reassoc_Or);
974 register_node_reassoc_func(op_Eor, reassoc_Eor);
975 register_node_reassoc_func(op_Shl, reassoc_Shl);
978 /* initialize the reassociation by adding operations to some opcodes */
979 void firm_init_reassociation(void)
981 FIRM_DBG_REGISTER(dbg, "firm.opt.reassoc");
982 } /* firm_init_reassociation */