2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Reassociation
23 * @author Michael Beck
30 #include "irgraph_t.h"
34 #include "iropt_dbg.h"
38 #include "reassoc_t.h"
48 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
50 typedef struct _walker_t {
51 int changes; /**< set, if a reassociation take place */
52 waitq *wq; /**< a wait queue */
56 NO_CONSTANT = 0, /**< node is not constant */
57 REAL_CONSTANT = 1, /**< node is a Const that is suitable for constant folding */
58 REGION_CONST = 4 /**< node is a constant expression in the current context,
59 use 4 here to simplify implementation of get_comm_Binop_ops() */
63 * returns whether a node is constant ie is a constant or
64 * is loop invariant (called region constant)
66 * @param n the node to be checked for constant
67 * @param block a block that might be in a loop
69 static const_class_t get_const_class(const ir_node *n, const ir_node *block)
74 /* constant nodes which can't be folded are region constants */
75 if (is_irn_constlike(n))
79 * Beware: Bad nodes are always loop-invariant, but
80 * cannot handled in later code, so filter them here.
82 if (! is_Bad(n) && is_loop_invariant(n, block))
86 } /* get_const_class */
89 * returns the operands of a commutative bin-op, if one operand is
90 * a region constant, it is returned as the second one.
92 * Beware: Real constants must be returned with higher priority than
93 * region constants, because they might be folded.
95 static void get_comm_Binop_ops(ir_node *binop, ir_node **a, ir_node **c)
97 ir_node *op_a = get_binop_left(binop);
98 ir_node *op_b = get_binop_right(binop);
99 ir_node *block = get_nodes_block(binop);
100 int class_a = get_const_class(op_a, block);
101 int class_b = get_const_class(op_b, block);
103 assert(is_op_commutative(get_irn_op(binop)));
105 switch (class_a + 2*class_b) {
106 case REAL_CONSTANT + 2*REAL_CONSTANT:
107 /* if both are constants, one might be a
108 * pointer constant like NULL, return the other
110 if (mode_is_reference(get_irn_mode(op_a))) {
118 case REAL_CONSTANT + 2*NO_CONSTANT:
119 case REAL_CONSTANT + 2*REGION_CONST:
120 case REGION_CONST + 2*NO_CONSTANT:
129 } /* get_comm_Binop_ops */
132 * reassociate a Sub: x - c = x + (-c)
134 static int reassoc_Sub(ir_node **in)
137 ir_node *right = get_Sub_right(n);
138 ir_mode *rmode = get_irn_mode(right);
141 /* cannot handle SubIs(P, P) */
142 if (mode_is_reference(rmode))
145 block = get_nodes_block(n);
148 * convert x - c => x + (-c)
150 if (get_const_class(right, block) == REAL_CONSTANT) {
151 ir_node *left = get_Sub_left(n);
156 switch (get_const_class(left, block)) {
158 irn = optimize_in_place(n);
168 /* already constant, nothing to do */
172 mode = get_irn_mode(n);
173 dbi = get_irn_dbg_info(n);
175 /* Beware of SubP(P, Is) */
176 irn = new_rd_Minus(dbi, block, right, rmode);
177 irn = new_rd_Add(dbi, block, left, irn, mode);
179 DBG((dbg, LEVEL_5, "Applied: %n - %n => %n + (-%n)\n",
180 get_Sub_left(n), right, get_Sub_left(n), right));
193 /** Retrieve a mode from the operands. We need this, because
194 * Add and Sub are allowed to operate on (P, Is)
196 static ir_mode *get_mode_from_ops(ir_node *op1, ir_node *op2)
200 m1 = get_irn_mode(op1);
201 if (mode_is_reference(m1))
204 m2 = get_irn_mode(op2);
205 if (mode_is_reference(m2))
211 } /* get_mode_from_ops */
216 * reassociate a commutative Binop
218 * BEWARE: this rule leads to a potential loop, if
219 * two operands are region constants and the third is a
220 * constant, so avoid this situation.
222 static int reassoc_commutative(ir_node **node)
225 ir_op *op = get_irn_op(n);
226 ir_node *block = get_nodes_block(n);
229 get_comm_Binop_ops(n, &t1, &c1);
231 if (get_irn_op(t1) == op) {
233 const_class_t c_c1, c_c2, c_t2;
235 get_comm_Binop_ops(t1, &t2, &c2);
237 /* do not optimize Bad nodes, will fail later */
241 c_c1 = get_const_class(c1, block);
242 c_c2 = get_const_class(c2, block);
243 c_t2 = get_const_class(t2, block);
245 if ( ((c_c1 > NO_CONSTANT) & (c_t2 > NO_CONSTANT)) &&
246 ((((c_c1 ^ c_c2 ^ c_t2) & REGION_CONST) == 0) || ((c_c1 & c_c2 & c_t2) == REGION_CONST)) ) {
247 /* All three are constant and either all are constant expressions
248 * or two of them are:
249 * then applying this rule would lead into a cycle
251 * Note that if t2 is a constant so is c2 hence we save one test.
256 if ((c_c1 != NO_CONSTANT) /* & (c_c2 != NO_CONSTANT) */) {
257 /* handles rules R7, R8, R9, R10:
258 * convert c1 .OP. (c2 .OP. x) => x .OP. (c1 .OP. c2)
260 ir_node *irn, *in[2];
261 ir_mode *mode, *mode_c1 = get_irn_mode(c1), *mode_c2 = get_irn_mode(c2);
263 /* It might happen, that c1 and c2 have different modes, for
264 * instance Is and Iu.
267 if (mode_c1 != mode_c2) {
268 if (mode_is_int(mode_c1) && mode_is_int(mode_c2)) {
269 /* get the bigger one */
270 if (get_mode_size_bits(mode_c1) > get_mode_size_bits(mode_c2))
271 c2 = new_r_Conv(block, c2, mode_c1);
272 else if (get_mode_size_bits(mode_c1) < get_mode_size_bits(mode_c2))
273 c1 = new_r_Conv(block, c1, mode_c2);
275 /* Try to cast the real const */
276 if (c_c1 == REAL_CONSTANT)
277 c1 = new_r_Conv(block, c1, mode_c2);
279 c2 = new_r_Conv(block, c2, mode_c1);
287 mode = get_mode_from_ops(in[0], in[1]);
288 in[1] = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in));
291 mode = get_mode_from_ops(in[0], in[1]);
292 irn = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in));
294 DBG((dbg, LEVEL_5, "Applied: %n .%s. (%n .%s. %n) => %n .%s. (%n .%s. %n)\n",
295 c1, get_irn_opname(n), c2, get_irn_opname(n), t2,
296 t2, get_irn_opname(n), c1, get_irn_opname(n), c2));
298 * In some rare cases it can really happen that we get the same
299 * node back. This might be happen in dead loops, were the Phi
300 * nodes are already gone away. So check this.
309 if (get_irn_op(c1) == op) {
314 if (get_irn_op(t1) == op) {
315 ir_node *l = get_binop_left(t1);
316 ir_node *r = get_binop_right(t1);
324 c_r = get_const_class(r, block);
325 if (c_r != NO_CONSTANT) {
327 * Beware: don't do the following op if a constant was
328 * placed below, else we will fall into a loop.
334 /* convert x .OP. (x .OP. y) => y .OP. (x .OP. x) */
335 ir_mode *mode_res = get_irn_mode(n);
336 ir_mode *mode_c1 = get_irn_mode(c1);
337 ir_node *irn, *in[2];
342 in[1] = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode_c1, 2, in));
345 irn = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode_res, 2, in));
347 DBG((dbg, LEVEL_5, "Applied: %n .%s. (%n .%s. %n) => %n .%s. (%n .%s. %n)\n",
348 c1, get_irn_opname(n), l, get_irn_opname(n), r,
349 r, get_irn_opname(n), c1, get_irn_opname(n), c1));
359 } /* reassoc_commutative */
363 static ir_op *commutative_op;
364 static ir_node *commutative_block;
365 static struct obstack commutative_args;
367 static void collect_args(ir_node *node)
369 ir_node *left = get_binop_left(node);
370 ir_node *right = get_binop_right(node);
372 if (get_irn_op(left) == commutative_op
373 && (!get_irn_outs_computed(left) || get_irn_n_outs(left) == 1)) {
376 obstack_ptr_grow(&commutative_args, left);
379 if (get_irn_op(right) == commutative_op
380 && (!get_irn_outs_computed(right) || get_irn_n_outs(right) == 1)) {
383 obstack_ptr_grow(&commutative_args, right);
388 ir_mode *mode = get_irn_mode(node);
389 if (is_Add(node) && mode_is_reference(mode)) {
390 assert(get_irn_mode(left) == mode || get_irn_mode(right) == mode);
392 assert(get_irn_mode(left) == mode);
393 assert(get_irn_mode(right) == mode);
399 static int compare_nodes(const ir_node *node1, const ir_node *node2)
401 const_class_t class1 = get_const_class(node1, commutative_block);
402 const_class_t class2 = get_const_class(node2, commutative_block);
404 if (class1 == class2)
406 // return get_irn_idx(node1) - get_irn_idx(node2);
411 assert(class1 > class2);
415 static int compare_node_ptr(const void *e1, const void *e2)
417 const ir_node *node1 = *((const ir_node *const*) e1);
418 const ir_node *node2 = *((const ir_node *const*) e2);
419 return compare_nodes(node1, node2);
422 static int reassoc_commutative(ir_node **n)
431 commutative_op = get_irn_op(node);
432 commutative_block = get_nodes_block(node);
434 /* collect all nodes with same op type */
437 n_args = obstack_object_size(&commutative_args) / sizeof(ir_node*);
438 args = obstack_finish(&commutative_args);
440 /* shortcut: in most cases there's nothing to do */
441 if (n_args == 2 && compare_nodes(args[0], args[1]) <= 0) {
442 obstack_free(&commutative_args, args);
446 /* sort the arguments */
447 qsort(args, n_args, sizeof(ir_node*), compare_node_ptr);
450 last = args[n_args-1];
451 mode = get_irn_mode(last);
452 for (i = n_args-2; i >= 0; --i) {
460 /* AddP violates the assumption that all modes in args are equal...
461 * we need some hacks to cope with this */
462 mode_right = get_irn_mode(in[1]);
463 if (mode_is_reference(mode_right)) {
464 assert(is_Add(node) && mode_is_reference(get_irn_mode(node)));
465 mode = get_irn_mode(in[1]);
467 if (mode_right != mode) {
468 assert(is_Add(node) && mode_is_reference(get_irn_mode(node)));
469 in[1] = new_r_Conv(current_ir_graph, commutative_block,in[1], mode);
472 /* TODO: produce useful debug info! */
473 new_node = new_ir_node(NULL, current_ir_graph, commutative_block,
474 commutative_op, mode, 2, in);
475 new_node = optimize_node(new_node);
479 /* CSE often returns the old node again, only exchange if needed */
481 exchange(node, last);
490 #define reassoc_Add reassoc_commutative
491 #define reassoc_And reassoc_commutative
492 #define reassoc_Or reassoc_commutative
493 #define reassoc_Eor reassoc_commutative
496 * Reassociate using commutative law for Mul and distributive law for Mul and Add/Sub:
498 static int reassoc_Mul(ir_node **node)
501 ir_node *add_sub, *c;
504 if (reassoc_commutative(&n))
507 get_comm_Binop_ops(n, &add_sub, &c);
508 op = get_irn_op(add_sub);
510 /* handles rules R11, R12, R13, R14, R15, R16, R17, R18, R19, R20 */
511 if (op == op_Add || op == op_Sub) {
512 ir_mode *mode = get_irn_mode(n);
513 ir_node *irn, *block, *t1, *t2, *in[2];
515 block = get_nodes_block(n);
516 t1 = get_binop_left(add_sub);
517 t2 = get_binop_right(add_sub);
519 /* we can only multiplication rules on integer arithmetic */
520 if (mode_is_int(get_irn_mode(t1)) && mode_is_int(get_irn_mode(t2))) {
521 in[0] = new_rd_Mul(NULL, block, c, t1, mode);
522 in[1] = new_rd_Mul(NULL, block, c, t2, mode);
524 irn = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in));
526 /* In some cases it might happen that the new irn is equal the old one, for
528 * (x - 1) * y == x * y - y
529 * will be transformed back by simpler optimization
530 * We could switch simple optimizations off, but this only happens iff y
531 * is a loop-invariant expression and that it is not clear if the new form
533 * So, we let the old one.
536 DBG((dbg, LEVEL_5, "Applied: (%n .%s. %n) %n %n => (%n %n %n) .%s. (%n %n %n)\n",
537 t1, get_op_name(op), t2, n, c, t1, n, c, get_op_name(op), t2, n, c));
549 * Reassociate Shl. We transform Shl(x, const) into Mul's if possible.
551 static int reassoc_Shl(ir_node **node)
554 ir_node *c = get_Shl_right(n);
555 ir_node *x, *blk, *irn;
563 mode = get_irn_mode(x);
565 tv = get_mode_one(mode);
566 tv = tarval_shl(tv, get_Const_tarval(c));
568 if (tv == tarval_bad)
571 blk = get_nodes_block(n);
573 irn = new_rd_Mul(get_irn_dbg_info(n), blk, x, c, mode);
584 * The walker for the reassociation.
586 static void wq_walker(ir_node *n, void *env)
588 walker_t *wenv = env;
590 set_irn_link(n, NULL);
591 if (is_no_Block(n)) {
592 ir_node *blk = get_nodes_block(n);
594 if (is_Block_dead(blk) || get_Block_dom_depth(blk) < 0) {
595 /* We are in a dead block, do not optimize or we may fall into an endless
596 loop. We check this here instead of requiring that all dead blocks are removed
597 which or cf_opt do not guarantee yet. */
600 waitq_put(wenv->wq, n);
601 set_irn_link(n, wenv->wq);
606 * The walker for the reassociation.
608 static void do_reassociation(walker_t *wenv)
613 while (! waitq_empty(wenv->wq)) {
614 n = waitq_get(wenv->wq);
615 set_irn_link(n, NULL);
617 blk = get_nodes_block(n);
618 if (is_Block_dead(blk) || get_Block_dom_depth(blk) < 0) {
619 /* We are in a dead block, do not optimize or we may fall into an endless
620 loop. We check this here instead of requiring that all dead blocks are removed
621 which or cf_opt do not guarantee yet. */
628 /* reassociation must run until a fixpoint is reached. */
631 ir_op *op = get_irn_op(n);
632 ir_mode *mode = get_irn_mode(n);
636 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
637 if (mode_is_float(mode) && get_irg_fp_model(current_ir_graph) & fp_strict_algebraic)
640 if (op->ops.reassociate) {
641 res = op->ops.reassociate(&n);
648 wenv->changes |= changed;
651 for (i = get_irn_arity(n) - 1; i >= 0; --i) {
652 ir_node *pred = get_irn_n(n, i);
654 if (get_irn_link(pred) != wenv->wq) {
655 waitq_put(wenv->wq, pred);
656 set_irn_link(pred, wenv->wq);
661 } /* do_reassociation */
664 * Returns the earliest were a,b are available.
665 * Note that we know that a, b both dominate
666 * the block of the previous operation, so one must dominate the other.
668 * If the earliest block is the start block, return curr_blk instead
670 static ir_node *earliest_block(ir_node *a, ir_node *b, ir_node *curr_blk)
672 ir_node *blk_a = get_nodes_block(a);
673 ir_node *blk_b = get_nodes_block(b);
676 /* if blk_a != blk_b, one must dominate the other */
677 if (block_dominates(blk_a, blk_b))
681 if (res == get_irg_start_block(current_ir_graph))
684 } /* earliest_block */
687 * Checks whether a node is a Constant expression.
688 * The following trees are constant expressions:
690 * Const, SymConst, Const + SymConst
692 * Handling SymConsts as const might be not a good idea for all
695 static int is_constant_expr(ir_node *irn)
699 switch (get_irn_opcode(irn)) {
704 op = get_irn_op(get_Add_left(irn));
705 if (op != op_Const && op != op_SymConst)
707 op = get_irn_op(get_Add_right(irn));
708 if (op != op_Const && op != op_SymConst)
714 } /* is_constant_expr */
717 * Apply distributive Law for Mul and Add/Sub
719 static int reverse_rule_distributive(ir_node **node)
722 ir_node *left = get_binop_left(n);
723 ir_node *right = get_binop_right(n);
724 ir_node *x, *blk, *curr_blk;
725 ir_node *a, *b, *irn;
730 op = get_irn_op(left);
731 if (op != get_irn_op(right))
735 x = get_Shl_right(left);
737 if (x == get_Shl_right(right)) {
738 /* (a << x) +/- (b << x) ==> (a +/- b) << x */
739 a = get_Shl_left(left);
740 b = get_Shl_left(right);
743 } else if (op == op_Mul) {
744 x = get_Mul_left(left);
746 if (x == get_Mul_left(right)) {
747 /* (x * a) +/- (x * b) ==> (a +/- b) * x */
748 a = get_Mul_right(left);
749 b = get_Mul_right(right);
751 } else if (x == get_Mul_right(right)) {
752 /* (x * a) +/- (b * x) ==> (a +/- b) * x */
753 a = get_Mul_right(left);
754 b = get_Mul_left(right);
758 x = get_Mul_right(left);
760 if (x == get_Mul_right(right)) {
761 /* (a * x) +/- (b * x) ==> (a +/- b) * x */
762 a = get_Mul_left(left);
763 b = get_Mul_left(right);
765 } else if (x == get_Mul_left(right)) {
766 /* (a * x) +/- (x * b) ==> (a +/- b) * x */
767 a = get_Mul_left(left);
768 b = get_Mul_right(right);
775 curr_blk = get_nodes_block(n);
777 blk = earliest_block(a, b, curr_blk);
779 dbg = get_irn_dbg_info(n);
780 mode = get_irn_mode(n);
783 irn = new_rd_Add(dbg, blk, a, b, mode);
785 irn = new_rd_Sub(dbg, blk, a, b, mode);
787 blk = earliest_block(irn, x, curr_blk);
790 irn = new_rd_Mul(dbg, blk, irn, x, mode);
792 irn = new_rd_Shl(dbg, blk, irn, x, mode);
797 } /* reverse_rule_distributive */
800 * Move Constants towards the root.
802 static int move_consts_up(ir_node **node)
806 ir_node *l, *r, *a, *b, *c, *blk, *irn, *in[2];
807 ir_mode *mode, *ma, *mb;
810 l = get_binop_left(n);
811 r = get_binop_right(n);
813 /* check if one is already a constant expression */
814 if (is_constant_expr(l) || is_constant_expr(r))
817 dbg = get_irn_dbg_info(n);
819 if (get_irn_op(l) == op) {
820 /* (a .op. b) .op. r */
821 a = get_binop_left(l);
822 b = get_binop_right(l);
824 if (is_constant_expr(a)) {
825 /* (C .op. b) .op. r ==> (r .op. b) .op. C */
828 blk = get_nodes_block(l);
829 dbg = dbg == get_irn_dbg_info(l) ? dbg : NULL;
831 } else if (is_constant_expr(b)) {
832 /* (a .op. C) .op. r ==> (a .op. r) .op. C */
835 blk = get_nodes_block(l);
836 dbg = dbg == get_irn_dbg_info(l) ? dbg : NULL;
840 if (get_irn_op(r) == op) {
841 /* l .op. (a .op. b) */
842 a = get_binop_left(r);
843 b = get_binop_right(r);
845 if (is_constant_expr(a)) {
846 /* l .op. (C .op. b) ==> (l .op. b) .op. C */
849 blk = get_nodes_block(r);
850 dbg = dbg == get_irn_dbg_info(r) ? dbg : NULL;
852 } else if (is_constant_expr(b)) {
853 /* l .op. (a .op. C) ==> (a .op. l) .op. C */
856 blk = get_nodes_block(r);
857 dbg = dbg == get_irn_dbg_info(r) ? dbg : NULL;
864 /* In some cases a and b might be both of different integer mode, and c a SymConst.
865 * in that case we could either
866 * 1.) cast into unsigned mode
868 * we implement the second here
870 ma = get_irn_mode(a);
871 mb = get_irn_mode(b);
872 if (ma != mb && mode_is_int(ma) && mode_is_int(mb))
875 /* check if (a .op. b) can be calculated in the same block is the old instruction */
876 if (! block_dominates(get_nodes_block(a), blk))
878 if (! block_dominates(get_nodes_block(b), blk))
884 mode = get_mode_from_ops(a, b);
885 in[0] = irn = optimize_node(new_ir_node(dbg, current_ir_graph, blk, op, mode, 2, in));
887 /* beware: optimize_node might have changed the opcode, check again */
888 if (is_Add(irn) || is_Sub(irn)) {
889 reverse_rule_distributive(&in[0]);
893 mode = get_mode_from_ops(in[0], in[1]);
894 irn = optimize_node(new_ir_node(dbg, current_ir_graph, blk, op, mode, 2, in));
899 } /* move_consts_up */
902 * Apply the rules in reverse order, removing code that was not collapsed
904 static void reverse_rules(ir_node *node, void *env)
906 walker_t *wenv = env;
907 ir_mode *mode = get_irn_mode(node);
910 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
911 if (mode_is_float(mode) && get_irg_fp_model(current_ir_graph) & fp_strict_algebraic)
915 ir_op *op = get_irn_op(node);
918 if (is_op_commutative(op)) {
919 wenv->changes |= res = move_consts_up(&node);
921 /* beware: move_consts_up might have changed the opcode, check again */
922 if (is_Add(node) || is_Sub(node)) {
923 wenv->changes |= res = reverse_rule_distributive(&node);
929 * do the reassociation
931 int optimize_reassociation(ir_graph *irg)
934 irg_loopinfo_state state;
937 assert(get_irg_phase_state(irg) != phase_building);
938 assert(get_irg_pinned(irg) != op_pin_state_floats &&
939 "Reassociation needs pinned graph to work properly");
941 rem = current_ir_graph;
942 current_ir_graph = irg;
944 /* we use dominance to detect dead blocks */
948 assure_irg_outs(irg);
949 obstack_init(&commutative_args);
953 * Calculate loop info, so we could identify loop-invariant
954 * code and threat it like a constant.
955 * We only need control flow loops here but can handle generic
956 * INTRA info as well.
958 state = get_irg_loopinfo_state(irg);
959 if ((state & loopinfo_inter) ||
960 (state & (loopinfo_constructed | loopinfo_valid)) != (loopinfo_constructed | loopinfo_valid))
961 construct_cf_backedges(irg);
964 env.wq = new_waitq();
966 /* disable some optimizations while reassoc is running to prevent endless loops */
967 set_reassoc_running(1);
969 /* now we have collected enough information, optimize */
970 irg_walk_graph(irg, NULL, wq_walker, &env);
971 do_reassociation(&env);
973 /* reverse those rules that do not result in collapsed constants */
974 irg_walk_graph(irg, NULL, reverse_rules, &env);
976 set_reassoc_running(0);
978 /* Handle graph state */
980 set_irg_outs_inconsistent(irg);
981 set_irg_loopinfo_inconsistent(irg);
985 obstack_free(&commutative_args, NULL);
989 current_ir_graph = rem;
991 } /* optimize_reassociation */
993 /* create a pass for the reassociation */
994 ir_graph_pass_t *optimize_reassociation_pass(const char *name)
996 return def_graph_pass_ret(name ? name : "reassoc", optimize_reassociation);
997 } /* optimize_reassociation_pass */
999 /* Sets the default reassociation operation for an ir_op_ops. */
1000 ir_op_ops *firm_set_default_reassoc(ir_opcode code, ir_op_ops *ops)
1002 #define CASE(a) case iro_##a: ops->reassociate = reassoc_##a; break
1018 } /* firm_set_default_reassoc */
1020 /* initialize the reassociation by adding operations to some opcodes */
1021 void firm_init_reassociation(void)
1023 FIRM_DBG_REGISTER(dbg, "firm.opt.reassoc");
1024 } /* firm_init_reassociation */