2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Reassociation
23 * @author Michael Beck
28 #include "iroptimize.h"
31 #include "irgraph_t.h"
35 #include "iropt_dbg.h"
39 #include "reassoc_t.h"
49 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
51 typedef struct walker_t {
52 int changes; /**< set, if a reassociation take place */
54 waitq *wq; /**< a wait queue */
58 NO_CONSTANT = 0, /**< node is not constant */
59 REAL_CONSTANT = 1, /**< node is a Const that is suitable for constant folding */
60 REGION_CONST = 4 /**< node is a constant expression in the current context,
61 use 4 here to simplify implementation of get_comm_Binop_ops() */
65 * returns whether a node is constant ie is a constant or
66 * is loop invariant (called region constant)
68 * @param n the node to be checked for constant
69 * @param block a block that might be in a loop
71 static const_class_t get_const_class(const ir_node *n, const ir_node *block)
76 /* constant nodes which can't be folded are region constants */
77 if (is_irn_constlike(n))
81 * Beware: Bad nodes are always loop-invariant, but
82 * cannot handled in later code, so filter them here.
84 if (! is_Bad(n) && is_loop_invariant(n, block))
88 } /* get_const_class */
91 * returns the operands of a commutative bin-op, if one operand is
92 * a region constant, it is returned as the second one.
94 * Beware: Real constants must be returned with higher priority than
95 * region constants, because they might be folded.
97 static void get_comm_Binop_ops(ir_node *binop, ir_node **a, ir_node **c)
99 ir_node *op_a = get_binop_left(binop);
100 ir_node *op_b = get_binop_right(binop);
101 ir_node *block = get_nodes_block(binop);
102 int class_a = get_const_class(op_a, block);
103 int class_b = get_const_class(op_b, block);
105 assert(is_op_commutative(get_irn_op(binop)));
107 switch (class_a + 2*class_b) {
108 case REAL_CONSTANT + 2*REAL_CONSTANT:
109 /* if both are constants, one might be a
110 * pointer constant like NULL, return the other
112 if (mode_is_reference(get_irn_mode(op_a))) {
120 case REAL_CONSTANT + 2*NO_CONSTANT:
121 case REAL_CONSTANT + 2*REGION_CONST:
122 case REGION_CONST + 2*NO_CONSTANT:
131 } /* get_comm_Binop_ops */
134 * reassociate a Sub: x - c = x + (-c)
136 static int reassoc_Sub(ir_node **in)
139 ir_node *right = get_Sub_right(n);
140 ir_mode *rmode = get_irn_mode(right);
143 /* cannot handle SubIs(P, P) */
144 if (mode_is_reference(rmode))
147 block = get_nodes_block(n);
150 * convert x - c => x + (-c)
152 if (get_const_class(right, block) == REAL_CONSTANT) {
153 ir_node *left = get_Sub_left(n);
158 switch (get_const_class(left, block)) {
160 irn = optimize_in_place(n);
170 /* already constant, nothing to do */
174 mode = get_irn_mode(n);
175 dbi = get_irn_dbg_info(n);
177 /* Beware of SubP(P, Is) */
178 irn = new_rd_Minus(dbi, block, right, rmode);
179 irn = new_rd_Add(dbi, block, left, irn, mode);
181 DBG((dbg, LEVEL_5, "Applied: %n - %n => %n + (-%n)\n",
182 get_Sub_left(n), right, get_Sub_left(n), right));
195 /** Retrieve a mode from the operands. We need this, because
196 * Add and Sub are allowed to operate on (P, Is)
198 static ir_mode *get_mode_from_ops(ir_node *op1, ir_node *op2)
202 m1 = get_irn_mode(op1);
203 if (mode_is_reference(m1))
206 m2 = get_irn_mode(op2);
207 if (mode_is_reference(m2))
213 } /* get_mode_from_ops */
218 * reassociate a commutative Binop
220 * BEWARE: this rule leads to a potential loop, if
221 * two operands are region constants and the third is a
222 * constant, so avoid this situation.
224 static int reassoc_commutative(ir_node **node)
227 ir_op *op = get_irn_op(n);
228 ir_node *block = get_nodes_block(n);
231 get_comm_Binop_ops(n, &t1, &c1);
233 if (get_irn_op(t1) == op) {
235 const_class_t c_c1, c_c2, c_t2;
237 get_comm_Binop_ops(t1, &t2, &c2);
239 /* do not optimize Bad nodes, will fail later */
243 c_c1 = get_const_class(c1, block);
244 c_c2 = get_const_class(c2, block);
245 c_t2 = get_const_class(t2, block);
247 if ( ((c_c1 > NO_CONSTANT) & (c_t2 > NO_CONSTANT)) &&
248 ((((c_c1 ^ c_c2 ^ c_t2) & REGION_CONST) == 0) || ((c_c1 & c_c2 & c_t2) == REGION_CONST)) ) {
249 /* All three are constant and either all are constant expressions
250 * or two of them are:
251 * then applying this rule would lead into a cycle
253 * Note that if t2 is a constant so is c2 hence we save one test.
258 if ((c_c1 != NO_CONSTANT) /* & (c_c2 != NO_CONSTANT) */) {
259 /* handles rules R7, R8, R9, R10:
260 * convert c1 .OP. (c2 .OP. x) => x .OP. (c1 .OP. c2)
262 ir_node *irn, *in[2];
263 ir_mode *mode, *mode_c1 = get_irn_mode(c1), *mode_c2 = get_irn_mode(c2);
264 ir_graph *irg = get_irn_irg(c1);
266 /* It might happen, that c1 and c2 have different modes, for
267 * instance Is and Iu.
270 if (mode_c1 != mode_c2) {
271 if (mode_is_int(mode_c1) && mode_is_int(mode_c2)) {
272 /* get the bigger one */
273 if (get_mode_size_bits(mode_c1) > get_mode_size_bits(mode_c2))
274 c2 = new_r_Conv(block, c2, mode_c1);
275 else if (get_mode_size_bits(mode_c1) < get_mode_size_bits(mode_c2))
276 c1 = new_r_Conv(block, c1, mode_c2);
278 /* Try to cast the real const */
279 if (c_c1 == REAL_CONSTANT)
280 c1 = new_r_Conv(block, c1, mode_c2);
282 c2 = new_r_Conv(block, c2, mode_c1);
290 mode = get_mode_from_ops(in[0], in[1]);
291 in[1] = optimize_node(new_ir_node(NULL, irg, block, op, mode, 2, in));
294 mode = get_mode_from_ops(in[0], in[1]);
295 irn = optimize_node(new_ir_node(NULL, irg, block, op, mode, 2, in));
297 DBG((dbg, LEVEL_5, "Applied: %n .%s. (%n .%s. %n) => %n .%s. (%n .%s. %n)\n",
298 c1, get_irn_opname(n), c2, get_irn_opname(n), t2,
299 t2, get_irn_opname(n), c1, get_irn_opname(n), c2));
301 * In some rare cases it can really happen that we get the same
302 * node back. This might be happen in dead loops, were the Phi
303 * nodes are already gone away. So check this.
312 if (get_irn_op(c1) == op) {
317 if (get_irn_op(t1) == op) {
318 ir_node *l = get_binop_left(t1);
319 ir_node *r = get_binop_right(t1);
327 c_r = get_const_class(r, block);
328 if (c_r != NO_CONSTANT) {
330 * Beware: don't do the following op if a constant was
331 * placed below, else we will fall into a loop.
337 /* convert x .OP. (x .OP. y) => y .OP. (x .OP. x) */
338 ir_mode *mode_res = get_irn_mode(n);
339 ir_mode *mode_c1 = get_irn_mode(c1);
340 ir_graph *irg = get_irn_irg(c1);
341 ir_node *irn, *in[2];
346 in[1] = optimize_node(new_ir_node(NULL, irg, block, op, mode_c1, 2, in));
349 irn = optimize_node(new_ir_node(NULL, irg, block, op, mode_res, 2, in));
351 DBG((dbg, LEVEL_5, "Applied: %n .%s. (%n .%s. %n) => %n .%s. (%n .%s. %n)\n",
352 c1, get_irn_opname(n), l, get_irn_opname(n), r,
353 r, get_irn_opname(n), c1, get_irn_opname(n), c1));
363 } /* reassoc_commutative */
367 static ir_op *commutative_op;
368 static ir_node *commutative_block;
369 static struct obstack commutative_args;
371 static void collect_args(ir_node *node)
373 ir_node *left = get_binop_left(node);
374 ir_node *right = get_binop_right(node);
376 if (get_irn_op(left) == commutative_op
377 && (!get_irn_outs_computed(left) || get_irn_n_outs(left) == 1)) {
380 obstack_ptr_grow(&commutative_args, left);
383 if (get_irn_op(right) == commutative_op
384 && (!get_irn_outs_computed(right) || get_irn_n_outs(right) == 1)) {
387 obstack_ptr_grow(&commutative_args, right);
392 ir_mode *mode = get_irn_mode(node);
393 if (is_Add(node) && mode_is_reference(mode)) {
394 assert(get_irn_mode(left) == mode || get_irn_mode(right) == mode);
396 assert(get_irn_mode(left) == mode);
397 assert(get_irn_mode(right) == mode);
403 static int compare_nodes(const ir_node *node1, const ir_node *node2)
405 const_class_t class1 = get_const_class(node1, commutative_block);
406 const_class_t class2 = get_const_class(node2, commutative_block);
408 if (class1 == class2)
410 // return get_irn_idx(node1) - get_irn_idx(node2);
415 assert(class1 > class2);
419 static int compare_node_ptr(const void *e1, const void *e2)
421 const ir_node *node1 = *((const ir_node *const*) e1);
422 const ir_node *node2 = *((const ir_node *const*) e2);
423 return compare_nodes(node1, node2);
426 static int reassoc_commutative(ir_node **n)
435 commutative_op = get_irn_op(node);
436 commutative_block = get_nodes_block(node);
438 /* collect all nodes with same op type */
441 n_args = obstack_object_size(&commutative_args) / sizeof(ir_node*);
442 args = obstack_finish(&commutative_args);
444 /* shortcut: in most cases there's nothing to do */
445 if (n_args == 2 && compare_nodes(args[0], args[1]) <= 0) {
446 obstack_free(&commutative_args, args);
450 /* sort the arguments */
451 qsort(args, n_args, sizeof(ir_node*), compare_node_ptr);
454 last = args[n_args-1];
455 mode = get_irn_mode(last);
456 for (i = n_args-2; i >= 0; --i) {
460 ir_graph *irg = get_irn_irg(last);
465 /* AddP violates the assumption that all modes in args are equal...
466 * we need some hacks to cope with this */
467 mode_right = get_irn_mode(in[1]);
468 if (mode_is_reference(mode_right)) {
469 assert(is_Add(node) && mode_is_reference(get_irn_mode(node)));
470 mode = get_irn_mode(in[1]);
472 if (mode_right != mode) {
473 assert(is_Add(node) && mode_is_reference(get_irn_mode(node)));
474 in[1] = new_r_Conv(irg, commutative_block,in[1], mode);
477 /* TODO: produce useful debug info! */
478 new_node = new_ir_node(NULL, irg, commutative_block,
479 commutative_op, mode, 2, in);
480 new_node = optimize_node(new_node);
484 /* CSE often returns the old node again, only exchange if needed */
486 exchange(node, last);
495 #define reassoc_Add reassoc_commutative
496 #define reassoc_And reassoc_commutative
497 #define reassoc_Or reassoc_commutative
498 #define reassoc_Eor reassoc_commutative
501 * Reassociate using commutative law for Mul and distributive law for Mul and Add/Sub:
503 static int reassoc_Mul(ir_node **node)
506 ir_node *add_sub, *c;
509 if (reassoc_commutative(&n))
512 get_comm_Binop_ops(n, &add_sub, &c);
513 op = get_irn_op(add_sub);
515 /* handles rules R11, R12, R13, R14, R15, R16, R17, R18, R19, R20 */
516 if (op == op_Add || op == op_Sub) {
517 ir_mode *mode = get_irn_mode(n);
518 ir_node *irn, *block, *t1, *t2, *in[2];
520 block = get_nodes_block(n);
521 t1 = get_binop_left(add_sub);
522 t2 = get_binop_right(add_sub);
524 /* we can only multiplication rules on integer arithmetic */
525 if (mode_is_int(get_irn_mode(t1)) && mode_is_int(get_irn_mode(t2))) {
526 ir_graph *irg = get_irn_irg(t1);
527 in[0] = new_rd_Mul(NULL, block, c, t1, mode);
528 in[1] = new_rd_Mul(NULL, block, c, t2, mode);
530 irn = optimize_node(new_ir_node(NULL, irg, block, op, mode, 2, in));
532 /* In some cases it might happen that the new irn is equal the old one, for
534 * (x - 1) * y == x * y - y
535 * will be transformed back by simpler optimization
536 * We could switch simple optimizations off, but this only happens iff y
537 * is a loop-invariant expression and that it is not clear if the new form
539 * So, we let the old one.
542 DBG((dbg, LEVEL_5, "Applied: (%n .%s. %n) %n %n => (%n %n %n) .%s. (%n %n %n)\n",
543 t1, get_op_name(op), t2, n, c, t1, n, c, get_op_name(op), t2, n, c));
555 * Reassociate Shl. We transform Shl(x, const) into Mul's if possible.
557 static int reassoc_Shl(ir_node **node)
560 ir_node *c = get_Shl_right(n);
561 ir_node *x, *blk, *irn;
570 mode = get_irn_mode(x);
572 tv = get_mode_one(mode);
573 tv = tarval_shl(tv, get_Const_tarval(c));
575 if (tv == tarval_bad)
578 blk = get_nodes_block(n);
579 irg = get_irn_irg(blk);
580 c = new_r_Const(irg, tv);
581 irn = new_rd_Mul(get_irn_dbg_info(n), blk, x, c, mode);
592 * The walker for the reassociation.
594 static void wq_walker(ir_node *n, void *env)
596 walker_t *wenv = (walker_t*)env;
598 set_irn_link(n, NULL);
600 ir_node *blk = get_nodes_block(n);
602 if (is_Block_dead(blk) || get_Block_dom_depth(blk) < 0) {
603 /* We are in a dead block, do not optimize or we may fall into an endless
604 loop. We check this here instead of requiring that all dead blocks are removed
605 which or cf_opt do not guarantee yet. */
608 waitq_put(wenv->wq, n);
609 set_irn_link(n, wenv->wq);
614 * The walker for the reassociation.
616 static void do_reassociation(walker_t *wenv)
621 while (! waitq_empty(wenv->wq)) {
622 n = (ir_node*)waitq_get(wenv->wq);
623 set_irn_link(n, NULL);
625 blk = get_nodes_block(n);
626 if (is_Block_dead(blk) || get_Block_dom_depth(blk) < 0) {
627 /* We are in a dead block, do not optimize or we may fall into an endless
628 loop. We check this here instead of requiring that all dead blocks are removed
629 which or cf_opt do not guarantee yet. */
636 /* reassociation must run until a fixpoint is reached. */
639 ir_op *op = get_irn_op(n);
640 ir_mode *mode = get_irn_mode(n);
644 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
645 if (mode_is_float(mode) && get_irg_fp_model(wenv->irg) & fp_strict_algebraic)
648 if (op->ops.reassociate) {
649 res = op->ops.reassociate(&n);
656 wenv->changes |= changed;
659 for (i = get_irn_arity(n) - 1; i >= 0; --i) {
660 ir_node *pred = get_irn_n(n, i);
662 if (get_irn_link(pred) != wenv->wq) {
663 waitq_put(wenv->wq, pred);
664 set_irn_link(pred, wenv->wq);
669 } /* do_reassociation */
672 * Returns the earliest were a,b are available.
673 * Note that we know that a, b both dominate
674 * the block of the previous operation, so one must dominate the other.
676 * If the earliest block is the start block, return curr_blk instead
678 static ir_node *earliest_block(ir_node *a, ir_node *b, ir_node *curr_blk)
680 ir_node *blk_a = get_nodes_block(a);
681 ir_node *blk_b = get_nodes_block(b);
684 /* if blk_a != blk_b, one must dominate the other */
685 if (block_dominates(blk_a, blk_b))
689 if (res == get_irg_start_block(get_irn_irg(curr_blk)))
692 } /* earliest_block */
695 * Checks whether a node is a Constant expression.
696 * The following trees are constant expressions:
698 * Const, SymConst, Const + SymConst
700 * Handling SymConsts as const might be not a good idea for all
703 static int is_constant_expr(ir_node *irn)
707 switch (get_irn_opcode(irn)) {
712 op = get_irn_op(get_Add_left(irn));
713 if (op != op_Const && op != op_SymConst)
715 op = get_irn_op(get_Add_right(irn));
716 if (op != op_Const && op != op_SymConst)
722 } /* is_constant_expr */
725 * Apply distributive Law for Mul and Add/Sub
727 static int reverse_rule_distributive(ir_node **node)
730 ir_node *left = get_binop_left(n);
731 ir_node *right = get_binop_right(n);
732 ir_node *x, *blk, *curr_blk;
733 ir_node *a, *b, *irn;
738 op = get_irn_op(left);
739 if (op != get_irn_op(right))
743 x = get_Shl_right(left);
745 if (x == get_Shl_right(right)) {
746 /* (a << x) +/- (b << x) ==> (a +/- b) << x */
747 a = get_Shl_left(left);
748 b = get_Shl_left(right);
751 } else if (op == op_Mul) {
752 x = get_Mul_left(left);
754 if (x == get_Mul_left(right)) {
755 /* (x * a) +/- (x * b) ==> (a +/- b) * x */
756 a = get_Mul_right(left);
757 b = get_Mul_right(right);
759 } else if (x == get_Mul_right(right)) {
760 /* (x * a) +/- (b * x) ==> (a +/- b) * x */
761 a = get_Mul_right(left);
762 b = get_Mul_left(right);
766 x = get_Mul_right(left);
768 if (x == get_Mul_right(right)) {
769 /* (a * x) +/- (b * x) ==> (a +/- b) * x */
770 a = get_Mul_left(left);
771 b = get_Mul_left(right);
773 } else if (x == get_Mul_left(right)) {
774 /* (a * x) +/- (x * b) ==> (a +/- b) * x */
775 a = get_Mul_left(left);
776 b = get_Mul_right(right);
783 curr_blk = get_nodes_block(n);
785 blk = earliest_block(a, b, curr_blk);
787 dbg = get_irn_dbg_info(n);
788 mode = get_irn_mode(n);
791 irn = new_rd_Add(dbg, blk, a, b, mode);
793 irn = new_rd_Sub(dbg, blk, a, b, mode);
795 blk = earliest_block(irn, x, curr_blk);
798 irn = new_rd_Mul(dbg, blk, irn, x, mode);
800 irn = new_rd_Shl(dbg, blk, irn, x, mode);
805 } /* reverse_rule_distributive */
808 * Move Constants towards the root.
810 static int move_consts_up(ir_node **node)
814 ir_node *l, *r, *a, *b, *c, *blk, *irn, *in[2];
815 ir_mode *mode, *ma, *mb;
819 l = get_binop_left(n);
820 r = get_binop_right(n);
822 /* check if one is already a constant expression */
823 if (is_constant_expr(l) || is_constant_expr(r))
826 dbg = get_irn_dbg_info(n);
828 if (get_irn_op(l) == op) {
829 /* (a .op. b) .op. r */
830 a = get_binop_left(l);
831 b = get_binop_right(l);
833 if (is_constant_expr(a)) {
834 /* (C .op. b) .op. r ==> (r .op. b) .op. C */
837 blk = get_nodes_block(l);
838 dbg = dbg == get_irn_dbg_info(l) ? dbg : NULL;
840 } else if (is_constant_expr(b)) {
841 /* (a .op. C) .op. r ==> (a .op. r) .op. C */
844 blk = get_nodes_block(l);
845 dbg = dbg == get_irn_dbg_info(l) ? dbg : NULL;
849 if (get_irn_op(r) == op) {
850 /* l .op. (a .op. b) */
851 a = get_binop_left(r);
852 b = get_binop_right(r);
854 if (is_constant_expr(a)) {
855 /* l .op. (C .op. b) ==> (l .op. b) .op. C */
858 blk = get_nodes_block(r);
859 dbg = dbg == get_irn_dbg_info(r) ? dbg : NULL;
861 } else if (is_constant_expr(b)) {
862 /* l .op. (a .op. C) ==> (a .op. l) .op. C */
865 blk = get_nodes_block(r);
866 dbg = dbg == get_irn_dbg_info(r) ? dbg : NULL;
873 /* In some cases a and b might be both of different integer mode, and c a SymConst.
874 * in that case we could either
875 * 1.) cast into unsigned mode
877 * we implement the second here
879 ma = get_irn_mode(a);
880 mb = get_irn_mode(b);
881 if (ma != mb && mode_is_int(ma) && mode_is_int(mb))
884 /* check if (a .op. b) can be calculated in the same block is the old instruction */
885 if (! block_dominates(get_nodes_block(a), blk))
887 if (! block_dominates(get_nodes_block(b), blk))
893 mode = get_mode_from_ops(a, b);
894 irg = get_irn_irg(blk);
895 in[0] = irn = optimize_node(new_ir_node(dbg, irg, blk, op, mode, 2, in));
897 /* beware: optimize_node might have changed the opcode, check again */
898 if (is_Add(irn) || is_Sub(irn)) {
899 reverse_rule_distributive(&in[0]);
903 mode = get_mode_from_ops(in[0], in[1]);
904 irn = optimize_node(new_ir_node(dbg, irg, blk, op, mode, 2, in));
909 } /* move_consts_up */
912 * Apply the rules in reverse order, removing code that was not collapsed
914 static void reverse_rules(ir_node *node, void *env)
916 walker_t *wenv = (walker_t*)env;
917 ir_graph *irg = get_irn_irg(node);
918 ir_mode *mode = get_irn_mode(node);
921 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
922 if (mode_is_float(mode) && get_irg_fp_model(irg) & fp_strict_algebraic)
926 ir_op *op = get_irn_op(node);
929 if (is_op_commutative(op)) {
930 wenv->changes |= res = move_consts_up(&node);
932 /* beware: move_consts_up might have changed the opcode, check again */
933 if (is_Add(node) || is_Sub(node)) {
934 wenv->changes |= res = reverse_rule_distributive(&node);
940 * do the reassociation
942 int optimize_reassociation(ir_graph *irg)
945 irg_loopinfo_state state;
947 assert(get_irg_phase_state(irg) != phase_building);
948 assert(get_irg_pinned(irg) != op_pin_state_floats &&
949 "Reassociation needs pinned graph to work properly");
951 /* we use dominance to detect dead blocks */
955 assure_irg_outs(irg);
956 obstack_init(&commutative_args);
960 * Calculate loop info, so we could identify loop-invariant
961 * code and threat it like a constant.
962 * We only need control flow loops here but can handle generic
963 * INTRA info as well.
965 state = get_irg_loopinfo_state(irg);
966 if ((state & loopinfo_inter) ||
967 (state & (loopinfo_constructed | loopinfo_valid)) != (loopinfo_constructed | loopinfo_valid))
968 construct_cf_backedges(irg);
972 env.wq = new_waitq();
974 /* disable some optimizations while reassoc is running to prevent endless loops */
975 set_reassoc_running(1);
977 /* now we have collected enough information, optimize */
978 irg_walk_graph(irg, NULL, wq_walker, &env);
979 do_reassociation(&env);
981 /* reverse those rules that do not result in collapsed constants */
982 irg_walk_graph(irg, NULL, reverse_rules, &env);
984 set_reassoc_running(0);
986 /* Handle graph state */
988 set_irg_outs_inconsistent(irg);
989 set_irg_loopinfo_inconsistent(irg);
993 obstack_free(&commutative_args, NULL);
998 } /* optimize_reassociation */
1000 /* create a pass for the reassociation */
1001 ir_graph_pass_t *optimize_reassociation_pass(const char *name)
1003 return def_graph_pass_ret(name ? name : "reassoc", optimize_reassociation);
1004 } /* optimize_reassociation_pass */
1006 /* Sets the default reassociation operation for an ir_op_ops. */
1007 ir_op_ops *firm_set_default_reassoc(ir_opcode code, ir_op_ops *ops)
1009 #define CASE(a) case iro_##a: ops->reassociate = reassoc_##a; break
1025 } /* firm_set_default_reassoc */
1027 /* initialize the reassociation by adding operations to some opcodes */
1028 void firm_init_reassociation(void)
1030 FIRM_DBG_REGISTER(dbg, "firm.opt.reassoc");
1031 } /* firm_init_reassociation */